From 15551aef674d58969a55f3f7bb341e7f53b21ff7 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Wed, 1 May 2024 20:14:05 +0200 Subject: [PATCH 001/251] clean state --- examples/browser_summarizer/pubspec.lock | 10 +- examples/docs_examples/pubspec.lock | 171 ++++++++++++++++-- examples/hello_world_backend/pubspec.lock | 8 +- examples/hello_world_cli/pubspec.lock | 8 +- examples/hello_world_flutter/pubspec.lock | 8 +- .../lib/src/chat_models/types.dart | 75 ++++++++ .../lib/src/stores/firestore.dart | 169 +++++++++++++++++ packages/langchain_google/pubspec.yaml | 2 + 8 files changed, 423 insertions(+), 28 deletions(-) create mode 100644 packages/langchain_google/lib/src/stores/firestore.dart diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index e9a63872..c5f12de6 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -225,28 +225,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.5.0+1" + version: "0.6.0+1" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.1.0" + version: "0.1.0+2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.1.0" + version: "0.2.0+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.5.0+1" + version: "0.5.1+1" langchain_tiktoken: dependency: transitive description: @@ -309,7 +309,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index f0384ca9..39732225 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -33,6 +33,14 @@ packages: url: "https://pub.dev" source: hosted version: "0.3.0" + boolean_selector: + dependency: transitive + description: + name: boolean_selector + sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + url: "https://pub.dev" + source: hosted + version: "2.1.1" characters: dependency: transitive description: @@ -48,6 +56,14 @@ packages: relative: true source: path version: "0.2.0" + clock: + dependency: transitive + description: + name: clock + sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + url: "https://pub.dev" + source: hosted + version: "1.1.1" collection: dependency: transitive description: @@ -88,6 +104,14 @@ packages: url: "https://pub.dev" source: hosted version: "6.0.0" + fake_async: + dependency: transitive + description: + name: fake_async + sha256: "511392330127add0b769b75a987850d136345d9227c6b94c96a04cf4a391bf78" + url: "https://pub.dev" + source: hosted + version: "1.3.1" fetch_api: dependency: transitive description: @@ -104,6 +128,30 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.2" + firebase_core: + dependency: transitive + description: + name: firebase_core + sha256: "4f1d7c13a909e82ff026679c9b8493cdeb35a9c76bc46c42bf9e2240c9e57e80" + url: "https://pub.dev" + source: hosted + version: "1.24.0" + firebase_core_platform_interface: + dependency: transitive + description: + name: firebase_core_platform_interface + sha256: b63e3be6c96ef5c33bdec1aab23c91eb00696f6452f0519401d640938c94cba2 + url: "https://pub.dev" + source: hosted + version: "4.8.0" + firebase_core_web: + dependency: transitive + description: + name: firebase_core_web + sha256: "839f1b48032a61962792cea1225fae030d4f27163867f181d6d2072dd40acbee" + url: "https://pub.dev" + source: hosted + version: "1.7.3" fixnum: dependency: transitive description: @@ -112,6 +160,21 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flutter: + dependency: transitive + description: flutter + source: sdk + version: "0.0.0" + flutter_test: + dependency: transitive + description: flutter + source: sdk + version: "0.0.0" + flutter_web_plugins: + dependency: transitive + description: flutter + source: sdk + version: "0.0.0" freezed_annotation: dependency: transitive description: @@ -221,56 +284,56 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.5.0+1" + version: "0.6.0+1" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.0" + version: "0.2.0+2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.1.0" + version: "0.1.0+2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.1.0" + version: "0.2.0+1" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.3.0" + version: "0.3.0+2" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.1.0" + version: "0.1.0+2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.1.0" + version: "0.1.0+2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.5.0+1" + version: "0.5.1+1" langchain_tiktoken: dependency: transitive description: @@ -279,6 +342,46 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.1" + leak_tracker: + dependency: transitive + description: + name: leak_tracker + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + url: "https://pub.dev" + source: hosted + version: "10.0.0" + leak_tracker_flutter_testing: + dependency: transitive + description: + name: leak_tracker_flutter_testing + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + url: "https://pub.dev" + source: hosted + version: "2.0.1" + leak_tracker_testing: + dependency: transitive + description: + name: leak_tracker_testing + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + url: "https://pub.dev" + source: hosted + version: "2.0.1" + matcher: + dependency: transitive + description: + name: matcher + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb + url: "https://pub.dev" + source: hosted + version: "0.12.16+1" + material_color_utilities: + dependency: transitive + description: + name: material_color_utilities + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + url: "https://pub.dev" + source: hosted + version: "0.8.0" math_expressions: dependency: transitive description: @@ -299,10 +402,10 @@ packages: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.11.0" mistralai_dart: dependency: "direct overridden" description: @@ -323,7 +426,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: @@ -340,6 +443,14 @@ packages: url: "https://pub.dev" source: hosted version: "6.0.2" + plugin_platform_interface: + dependency: transitive + description: + name: plugin_platform_interface + sha256: "4820fbfdb9478b1ebae27888254d445073732dae3d6ea81f0b7e06d5dedc3f02" + url: "https://pub.dev" + source: hosted + version: "2.1.8" retry: dependency: transitive description: @@ -364,6 +475,11 @@ packages: url: "https://pub.dev" source: hosted version: "0.27.7" + sky_engine: + dependency: transitive + description: flutter + source: sdk + version: "0.0.99" source_span: dependency: transitive description: @@ -380,6 +496,22 @@ packages: url: "https://pub.dev" source: hosted version: "7.0.0" + stack_trace: + dependency: transitive + description: + name: stack_trace + sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" + url: "https://pub.dev" + source: hosted + version: "1.11.1" + stream_channel: + dependency: transitive + description: + name: stream_channel + sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 + url: "https://pub.dev" + source: hosted + version: "2.1.2" string_scanner: dependency: transitive description: @@ -396,6 +528,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.1" + test_api: + dependency: transitive + description: + name: test_api + sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + url: "https://pub.dev" + source: hosted + version: "0.6.1" typed_data: dependency: transitive description: @@ -427,6 +567,14 @@ packages: relative: true source: path version: "0.1.0" + vm_service: + dependency: transitive + description: + name: vm_service + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + url: "https://pub.dev" + source: hosted + version: "13.0.0" web: dependency: transitive description: @@ -437,3 +585,4 @@ packages: version: "0.5.1" sdks: dart: ">=3.3.0 <4.0.0" + flutter: ">=3.3.0" diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 286dcebf..8350944e 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.5.0+1" + version: "0.6.0+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.1.0" + version: "0.2.0+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.5.0+1" + version: "0.5.1+1" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index c5718392..a0919a79 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.5.0+1" + version: "0.6.0+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.1.0" + version: "0.2.0+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.5.0+1" + version: "0.5.1+1" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index a99021aa..8bcbd6cb 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -140,21 +140,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.5.0+1" + version: "0.6.0+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.1.0" + version: "0.2.0+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.5.0+1" + version: "0.5.1+1" langchain_tiktoken: dependency: transitive description: @@ -193,7 +193,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 3cb51076..566e9157 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -135,6 +135,9 @@ sealed class ChatMessage { /// Merges this message with another by concatenating the content. ChatMessage concat(final ChatMessage other); + + ///Converts ChatMessage to json string + Map toJson(); } /// {@template system_chat_message} @@ -175,6 +178,15 @@ SystemChatMessage{ content: $content, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'content': content, + }; + + factory SystemChatMessage.fromJson(final Map json) => + SystemChatMessage(content: json['content']); } /// {@template human_chat_message} @@ -267,6 +279,18 @@ HumanChatMessage{ content: $content, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'content': content, + }; + + factory HumanChatMessage.fromJson(final Map json) { + return HumanChatMessage( + content: json['content'], + ); + } } /// {@template ai_chat_message} @@ -328,6 +352,22 @@ AIChatMessage{ functionCall: $functionCall, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'content': content, + 'functionCall': functionCall?.toJson(), + }; + + factory AIChatMessage.fromJson(final Map json) { + return AIChatMessage( + content: json['content'], + functionCall: json['functionCall'] != null + ? AIChatMessageFunctionCall.fromJson(json['functionCall']) + : null, + ); + } } /// {@template ai_chat_message_function_call} @@ -382,6 +422,22 @@ AIChatMessageFunctionCall{ arguments: $arguments, }'''; } + + ///The toJson function will convert the ChatMessage to a json object + Map toJson() => { + 'name': name, + 'argumentsRaw': argumentsRaw, + 'arguments': arguments, + }; + + ///The fromJson will create a AI + factory AIChatMessageFunctionCall.fromJson(final Map json) { + return AIChatMessageFunctionCall( + name: json['name'], + argumentsRaw: json['argumentsRaw'], + arguments: json['arguments'], + ); + } } /// {@template function_chat_message} @@ -431,6 +487,16 @@ FunctionChatMessage{ content: $content, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'name': name, + 'content': content, + }; + + factory FunctionChatMessage.fromJson(final Map json) => + FunctionChatMessage(content: json['content'], name: json['name']); } /// {@template custom_chat_message} @@ -476,6 +542,15 @@ CustomChatMessage{ role: $role, }'''; } + + @override + Map toJson() => { + 'type': 'Custom', + 'content': content, + 'role': role, + }; + factory CustomChatMessage.fromJson(final Map json) => + CustomChatMessage(content: json['content'], role: json['role']); } /// Role of a chat message diff --git a/packages/langchain_google/lib/src/stores/firestore.dart b/packages/langchain_google/lib/src/stores/firestore.dart new file mode 100644 index 00000000..3beffb97 --- /dev/null +++ b/packages/langchain_google/lib/src/stores/firestore.dart @@ -0,0 +1,169 @@ +import 'package:cloud_firestore/cloud_firestore.dart'; +import 'package:firebase_core/firebase_core.dart'; +import 'package:langchain_core/chat_history.dart'; +import 'package:langchain_core/chat_models.dart'; + +///Can be uses to store ChatMessages in firestore +final class FirestoreChatMessageHistory extends BaseChatMessageHistory { + ///Constructor + FirestoreChatMessageHistory({ + required this.collections, + required this.options, + }); + + ///Function to initialize firebaseinstance + Future initFirestore() async { + await Firebase.initializeApp( + options: options, + ); + collectionReference = FirebaseFirestore.instance.collection(collections); + } + + ///String list of collection names + String collections; + + ///Firebase Options + FirebaseOptions options; + + ///Firestore instance + CollectionReference>? collectionReference; + + @override + Future addChatMessage(final ChatMessage message) async { + final FirestoreChatMessageField messageField = + FirestoreChatMessageField(message: message); + if (collectionReference == null) { + throw Exception('Remember to initialize firestore'); + } else { + await collectionReference!.doc().set(messageField.toJson()); + } + } + + @override + Future clear() async { + if (collectionReference == null) { + throw Exception('Remember to initialize firestore'); + } else { + final snapshot = await collectionReference!.get(); + + //Delete all docs in firestore + for (final doc in snapshot.docs) { + await doc.reference.delete(); + } + } + } + + @override + Future> getChatMessages() async { + //Get chat messages in ascending order so the newest message is the last in the list + final snapshot = + await collectionReference!.orderBy('created', descending: false).get(); + + //Take each doc and add it to the conversation list. + final List conversation = snapshot.docs.map((final doc) { + return FirestoreChatMessageField.fromJson(doc.data()).message; + }).toList(); + + return conversation; + } + + /// Removes and returns the first (oldest) element of the history. + /// + /// The history must not be empty when this method is called. + @override + Future removeFirst() async { + final snapshot = await collectionReference! + .orderBy('created', descending: false) + .limit(1) + .get(); + + //get oldest document + final oldest = snapshot.docs.first; + + //Delete doc in firestore + await oldest.reference.delete(); + + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(oldest.data()).message; + } + + /// Removes and returns the last (newest) element of the history. + /// + /// The history must not be empty when this method is called. + @override + Future removeLast() async { + final snapshot = await collectionReference! + .orderBy('created', descending: true) + .limit(1) + .get(); + + //get newest document + final newest = snapshot.docs.first; + + //Delete doc in firestore + await newest.reference.delete(); + + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(newest.data()).message; + } +} + +///This class makes sure that every chat message on firestore has a timestamp +///This will enable fetching document with orderBy(created). +class FirestoreChatMessageField { + ///Will contain the chat message + final ChatMessage message; + + ///Timestamp to keep messages in order. + Timestamp created = Timestamp.now(); + + ///Constructor + FirestoreChatMessageField({required this.message, final Timestamp? created}) { + if (created == null) { + this.created = Timestamp.now(); + } else { + this.created = created; + } + } + + ///FromJson will convert a json map to a FirestoreChatMessageField + factory FirestoreChatMessageField.fromJson(final Map json) { + // ignore: avoid_dynamic_calls + switch (json['message']['type']) { + case SystemChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: SystemChatMessage.fromJson(json['message']), + created: json['created'], + ); + case HumanChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: HumanChatMessage.fromJson(json['message']), + created: json['created'], + ); + + case AIChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: AIChatMessage.fromJson(json['message']), + created: json['created'], + ); + + case FunctionChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: FunctionChatMessage.fromJson(json['message']), + created: json['created'], + ); + + case 'Custom': + return FirestoreChatMessageField( + message: CustomChatMessage.fromJson(json['message']), + created: json['created'], + ); + default: + // ignore: avoid_dynamic_calls + throw FormatException("INVALID JSON FILE = ${json['message']['type']}"); + } + } + + Map toJson() => + {'message': message.toJson(), 'created': created}; +} diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 1ce2f589..9a61e495 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -17,7 +17,9 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: + cloud_firestore: ^3.2.0 collection: '>=1.17.0 <1.19.0' + firebase_core: ^1.19.0 gcloud: ^0.8.12 googleai_dart: ^0.0.4 googleapis: ^12.0.0 From 4e9b5e27c72be12ef830b1ed14be1a43e1c018e2 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Wed, 1 May 2024 22:03:23 +0200 Subject: [PATCH 002/251] fix to json --- packages/langchain/pubspec.yaml | 5 +++-- packages/langchain_chroma/pubspec.yaml | 3 ++- packages/langchain_community/pubspec.yaml | 3 ++- .../lib/src/chat_models/types.dart | 21 ++++++++++++++++++- .../example/firestore_memory.dart | 19 +++++++++++++++++ .../lib/langchain_google.dart | 1 + packages/langchain_google/pubspec.yaml | 11 +++++----- packages/langchain_mistralai/pubspec.yaml | 5 +++-- packages/langchain_ollama/pubspec.yaml | 5 +++-- packages/langchain_openai/pubspec.yaml | 5 +++-- packages/langchain_pinecone/pubspec.yaml | 3 ++- packages/langchain_supabase/pubspec.yaml | 3 ++- 12 files changed, 66 insertions(+), 18 deletions(-) create mode 100644 packages/langchain_google/example/firestore_memory.dart diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index c5bc30c5..76c9d104 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -17,9 +17,10 @@ environment: dependencies: characters: ^1.3.0 - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 53cddc9b..f28c3a54 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -19,7 +19,8 @@ environment: dependencies: chromadb: ^0.2.0 http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 236c3d0d..fcfdffb3 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -21,7 +21,8 @@ dependencies: csv: ^6.0.0 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core math_expressions: ^2.4.0 meta: ^1.11.0 diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 566e9157..3b18f72e 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -283,7 +283,7 @@ HumanChatMessage{ @override Map toJson() => { 'type': defaultPrefix, - 'content': content, + 'content': content.toJson(), }; factory HumanChatMessage.fromJson(final Map json) { @@ -602,6 +602,8 @@ sealed class ChatMessageContent { final List parts, ) => ChatMessageContentMultiModal(parts: parts); + + Map toJson(); } /// {@template chat_message_content_text} @@ -630,6 +632,11 @@ ChatMessageContentText{ text: $text, }'''; } + + @override + Map toJson() => { + 'text': text, + }; } /// {@template chat_message_content_image} @@ -679,6 +686,12 @@ ChatMessageContentImage{ imageDetail: $detail, }'''; } + + @override + Map toJson() { + // TODO: implement toJson + throw UnimplementedError(); + } } /// {@template chat_message_content_multi_modal} @@ -711,6 +724,12 @@ ChatMessageContentMultiModal{ parts: $parts, }'''; } + + @override + Map toJson() { + // TODO: implement toJson + throw UnimplementedError(); + } } /// Specifies the detail level of the image. diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart new file mode 100644 index 00000000..fddcc1c7 --- /dev/null +++ b/packages/langchain_google/example/firestore_memory.dart @@ -0,0 +1,19 @@ +import 'package:langchain_google/langchain_google.dart'; + +Future main() async { + // final db = FakeFirebaseFirestore(); + + // await db.collection('Users').doc('a').collection('Langchain').add({ + // 'username': 'Bob', + // }); + + // final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + // db.collection('Users').doc('a').collection('Langchain'), + // ); + + // await history.addHumanChatMessage('hi!'); + // await history.addAIChatMessage('whats up?'); + + // print(await history.getChatMessages()); + // FirestoreChatMessageHistory his = FirestoreChatMessageHistory(collections: 'Users/', options: options) +} diff --git a/packages/langchain_google/lib/langchain_google.dart b/packages/langchain_google/lib/langchain_google.dart index 371e45ad..f2e5374d 100644 --- a/packages/langchain_google/lib/langchain_google.dart +++ b/packages/langchain_google/lib/langchain_google.dart @@ -4,4 +4,5 @@ library; export 'src/chat_models/chat_models.dart'; export 'src/embeddings/embeddings.dart'; export 'src/llms/llms.dart'; +export 'src/stores/firestore.dart'; export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 9a61e495..fc5e926f 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -17,18 +17,19 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - cloud_firestore: ^3.2.0 - collection: '>=1.17.0 <1.19.0' - firebase_core: ^1.19.0 + collection: ">=1.17.0 <1.19.0" gcloud: ^0.8.12 googleai_dart: ^0.0.4 googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0 + firebase_core: ^2.25.5 + cloud_firestore: ^4.17.0 dev_dependencies: - test: ^1.25.2 + test: ^1.24.9 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index fcd79f17..2138ac65 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -17,9 +17,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+1 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 4841c526..d425438f 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -17,9 +17,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core langchain_tiktoken: ^1.0.1 meta: ^1.11.0 ollama_dart: ^0.0.3+1 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index eeb0c9de..5f0b0dc2 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -17,9 +17,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core langchain_tiktoken: ^1.0.1 meta: ^1.11.0 openai_dart: ^0.2.2 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 9e546eb8..a6abfdf9 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -18,7 +18,8 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 09dc32ce..d8b30b92 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -18,7 +18,8 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.2.0+1 + langchain_core: + path: ../langchain_core meta: ^1.11.0 supabase: ^2.0.8 From ae048275c1a3f2c4818a582e41548449be9ac556 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Thu, 2 May 2024 19:38:38 +0200 Subject: [PATCH 003/251] fix overrides --- packages/langchain/pubspec.yaml | 3 +-- packages/langchain_chroma/pubspec.yaml | 3 +-- packages/langchain_community/pubspec.yaml | 3 +-- packages/langchain_google/pubspec.yaml | 3 +-- packages/langchain_mistralai/pubspec.yaml | 3 +-- packages/langchain_ollama/pubspec.yaml | 3 +-- packages/langchain_openai/pubspec.yaml | 3 +-- packages/langchain_pinecone/pubspec.yaml | 3 +-- packages/langchain_supabase/pubspec.yaml | 3 +-- 9 files changed, 9 insertions(+), 18 deletions(-) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 76c9d104..e4a1804e 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -19,8 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index f28c3a54..53cddc9b 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -19,8 +19,7 @@ environment: dependencies: chromadb: ^0.2.0 http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index fcfdffb3..236c3d0d 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -21,8 +21,7 @@ dependencies: csv: ^6.0.0 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 math_expressions: ^2.4.0 meta: ^1.11.0 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index fc5e926f..c671b070 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -23,8 +23,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 2138ac65..ddedc096 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -19,8 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+1 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index d425438f..f993c5e0 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -19,8 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 ollama_dart: ^0.0.3+1 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 5f0b0dc2..4e8bd8a1 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -19,8 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 openai_dart: ^0.2.2 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index a6abfdf9..9e546eb8 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -18,8 +18,7 @@ environment: dependencies: http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index d8b30b92..09dc32ce 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -18,8 +18,7 @@ environment: dependencies: http: ^1.1.0 - langchain_core: - path: ../langchain_core + langchain_core: ^0.2.0+1 meta: ^1.11.0 supabase: ^2.0.8 From a45201bf811d749e0fd6309b98934686e22213d6 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Fri, 3 May 2024 18:18:39 +0200 Subject: [PATCH 004/251] impl tojson --- examples/docs_examples/pubspec.lock | 44 ++++++++++++++++--- .../lib/src/chat_models/types.dart | 23 ++++++---- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 39732225..d06f8e68 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -9,6 +9,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.6" + _flutterfire_internals: + dependency: transitive + description: + name: _flutterfire_internals + sha256: "3dee3db3468c5f4640a4e8aa9c1e22561c298976d8c39ed2fdd456a9a3db26e1" + url: "https://pub.dev" + source: hosted + version: "1.3.32" args: dependency: transitive description: @@ -64,6 +72,30 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.1" + cloud_firestore: + dependency: transitive + description: + name: cloud_firestore + sha256: "58ea77912e355cfd4e49f858e01d5ab4f99c7181ebd545b3fbeae25d6677dff6" + url: "https://pub.dev" + source: hosted + version: "4.17.2" + cloud_firestore_platform_interface: + dependency: transitive + description: + name: cloud_firestore_platform_interface + sha256: "9cb7d350c08c3d4987d34f418037aca3882649cdad5158440b4b932232edcf82" + url: "https://pub.dev" + source: hosted + version: "6.2.2" + cloud_firestore_web: + dependency: transitive + description: + name: cloud_firestore_web + sha256: "28db029962ebdcb8caf80f0c109f359ebcefa983b85fb62e5a220b1a577d84b5" + url: "https://pub.dev" + source: hosted + version: "3.12.2" collection: dependency: transitive description: @@ -132,26 +164,26 @@ packages: dependency: transitive description: name: firebase_core - sha256: "4f1d7c13a909e82ff026679c9b8493cdeb35a9c76bc46c42bf9e2240c9e57e80" + sha256: "4aef2a23d0f3265545807d68fbc2f76a6b994ca3c778d88453b99325abd63284" url: "https://pub.dev" source: hosted - version: "1.24.0" + version: "2.30.1" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: b63e3be6c96ef5c33bdec1aab23c91eb00696f6452f0519401d640938c94cba2 + sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 url: "https://pub.dev" source: hosted - version: "4.8.0" + version: "5.0.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "839f1b48032a61962792cea1225fae030d4f27163867f181d6d2072dd40acbee" + sha256: "67f2fcc600fc78c2f731c370a3a5e6c87ee862e3a2fba6f951eca6d5dafe5c29" url: "https://pub.dev" source: hosted - version: "1.7.3" + version: "2.16.0" fixnum: dependency: transitive description: diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 3b18f72e..b2827dfa 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -288,7 +288,7 @@ HumanChatMessage{ factory HumanChatMessage.fromJson(final Map json) { return HumanChatMessage( - content: json['content'], + content: ChatMessageContentText.fromJson(json['content']), ); } } @@ -603,6 +603,7 @@ sealed class ChatMessageContent { ) => ChatMessageContentMultiModal(parts: parts); + /// Converts the class to a json object Map toJson(); } @@ -637,6 +638,10 @@ ChatMessageContentText{ Map toJson() => { 'text': text, }; + + /// Takes a json object and returns a new ChatMessageContentText + factory ChatMessageContentText.fromJson(final Map json) => + ChatMessageContentText(text: json['text']); } /// {@template chat_message_content_image} @@ -688,10 +693,11 @@ ChatMessageContentImage{ } @override - Map toJson() { - // TODO: implement toJson - throw UnimplementedError(); - } + Map toJson() => { + 'data': data, + 'mimeType': mimeType, + 'imageDetail': detail, + }; } /// {@template chat_message_content_multi_modal} @@ -726,10 +732,9 @@ ChatMessageContentMultiModal{ } @override - Map toJson() { - // TODO: implement toJson - throw UnimplementedError(); - } + Map toJson() => { + 'parts': parts.map((part) => part.toJson()).toList(), + }; } /// Specifies the detail level of the image. From ca2e27f2ea0dd72267e7334e5e6228f890abfb5e Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Fri, 3 May 2024 18:55:14 +0200 Subject: [PATCH 005/251] Add createdby field --- .../lib/src/stores/firestore.dart | 122 +++++++++++------- 1 file changed, 78 insertions(+), 44 deletions(-) diff --git a/packages/langchain_google/lib/src/stores/firestore.dart b/packages/langchain_google/lib/src/stores/firestore.dart index 3beffb97..ac3813a7 100644 --- a/packages/langchain_google/lib/src/stores/firestore.dart +++ b/packages/langchain_google/lib/src/stores/firestore.dart @@ -9,10 +9,11 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { FirestoreChatMessageHistory({ required this.collections, required this.options, + required this.userId, }); ///Function to initialize firebaseinstance - Future initFirestore() async { + Future ensureFirestore() async { await Firebase.initializeApp( options: options, ); @@ -25,13 +26,16 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { ///Firebase Options FirebaseOptions options; + /// Used to identify the sender of each message + String userId; + ///Firestore instance CollectionReference>? collectionReference; @override Future addChatMessage(final ChatMessage message) async { final FirestoreChatMessageField messageField = - FirestoreChatMessageField(message: message); + FirestoreChatMessageField(message: message, createdBy: userId); if (collectionReference == null) { throw Exception('Remember to initialize firestore'); } else { @@ -55,16 +59,22 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { @override Future> getChatMessages() async { - //Get chat messages in ascending order so the newest message is the last in the list - final snapshot = - await collectionReference!.orderBy('created', descending: false).get(); - - //Take each doc and add it to the conversation list. - final List conversation = snapshot.docs.map((final doc) { - return FirestoreChatMessageField.fromJson(doc.data()).message; - }).toList(); - - return conversation; + if (collectionReference == null) { + throw Exception('Remember to initialize firestore'); + } else { + //Get chat messages in ascending order so the newest message is the last in the list + final snapshot = await collectionReference! + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: false) + .get(); + + //Take each doc and add it to the conversation list. + final List conversation = snapshot.docs.map((final doc) { + return FirestoreChatMessageField.fromJson(doc.data()).message; + }).toList(); + + return conversation; + } } /// Removes and returns the first (oldest) element of the history. @@ -72,19 +82,23 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { /// The history must not be empty when this method is called. @override Future removeFirst() async { - final snapshot = await collectionReference! - .orderBy('created', descending: false) - .limit(1) - .get(); + if (collectionReference == null) { + throw Exception('Remember to initialize firestore'); + } else { + final snapshot = await collectionReference! + .orderBy('createdAt', descending: false) + .limit(1) + .get(); - //get oldest document - final oldest = snapshot.docs.first; + //get oldest document + final oldest = snapshot.docs.first; - //Delete doc in firestore - await oldest.reference.delete(); + //Delete doc in firestore + await oldest.reference.delete(); - //Create FirestoreChatMessageField and return ChatMessage - return FirestoreChatMessageField.fromJson(oldest.data()).message; + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(oldest.data()).message; + } } /// Removes and returns the last (newest) element of the history. @@ -92,19 +106,23 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { /// The history must not be empty when this method is called. @override Future removeLast() async { - final snapshot = await collectionReference! - .orderBy('created', descending: true) - .limit(1) - .get(); + if (collectionReference == null) { + throw Exception('Remember to initialize firestore'); + } else { + final snapshot = await collectionReference! + .orderBy('createdAt', descending: true) + .limit(1) + .get(); - //get newest document - final newest = snapshot.docs.first; + //get newest document + final newest = snapshot.docs.first; - //Delete doc in firestore - await newest.reference.delete(); + //Delete doc in firestore + await newest.reference.delete(); - //Create FirestoreChatMessageField and return ChatMessage - return FirestoreChatMessageField.fromJson(newest.data()).message; + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(newest.data()).message; + } } } @@ -115,14 +133,21 @@ class FirestoreChatMessageField { final ChatMessage message; ///Timestamp to keep messages in order. - Timestamp created = Timestamp.now(); + Timestamp createdAt = Timestamp.now(); + + /// Used to identify the sender of each message + final String createdBy; ///Constructor - FirestoreChatMessageField({required this.message, final Timestamp? created}) { - if (created == null) { - this.created = Timestamp.now(); + FirestoreChatMessageField({ + required this.message, + final Timestamp? createdAt, + required this.createdBy, + }) { + if (createdAt == null) { + this.createdAt = Timestamp.now(); } else { - this.created = created; + this.createdAt = createdAt; } } @@ -133,30 +158,35 @@ class FirestoreChatMessageField { case SystemChatMessage.defaultPrefix: return FirestoreChatMessageField( message: SystemChatMessage.fromJson(json['message']), - created: json['created'], + createdAt: json['createdAt'], + createdBy: json['createdBy'], ); case HumanChatMessage.defaultPrefix: return FirestoreChatMessageField( message: HumanChatMessage.fromJson(json['message']), - created: json['created'], + createdAt: json['createdAt'], + createdBy: json['createdBy'], ); case AIChatMessage.defaultPrefix: return FirestoreChatMessageField( message: AIChatMessage.fromJson(json['message']), - created: json['created'], + createdAt: json['createdAt'], + createdBy: json['createdBy'], ); case FunctionChatMessage.defaultPrefix: return FirestoreChatMessageField( message: FunctionChatMessage.fromJson(json['message']), - created: json['created'], + createdAt: json['createdAt'], + createdBy: json['createdBy'], ); case 'Custom': return FirestoreChatMessageField( message: CustomChatMessage.fromJson(json['message']), - created: json['created'], + createdAt: json['createdAt'], + createdBy: json['createdBy'], ); default: // ignore: avoid_dynamic_calls @@ -164,6 +194,10 @@ class FirestoreChatMessageField { } } - Map toJson() => - {'message': message.toJson(), 'created': created}; + /// Will be used to transform the class into a json object + Map toJson() => { + 'message': message.toJson(), + 'createdAt': createdAt, + 'createdBy': createdBy, + }; } From 279cbbe36121f5e0c923d7c67d87f75ae724cabb Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sat, 4 May 2024 12:03:26 +0200 Subject: [PATCH 006/251] Remove userId from firestoreChatMessageField --- .../lib/src/stores/firestore.dart | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/packages/langchain_google/lib/src/stores/firestore.dart b/packages/langchain_google/lib/src/stores/firestore.dart index ac3813a7..98004a7d 100644 --- a/packages/langchain_google/lib/src/stores/firestore.dart +++ b/packages/langchain_google/lib/src/stores/firestore.dart @@ -35,11 +35,11 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { @override Future addChatMessage(final ChatMessage message) async { final FirestoreChatMessageField messageField = - FirestoreChatMessageField(message: message, createdBy: userId); + FirestoreChatMessageField(message: message); if (collectionReference == null) { throw Exception('Remember to initialize firestore'); } else { - await collectionReference!.doc().set(messageField.toJson()); + await collectionReference!.doc().set(messageField.toJson(userId)); } } @@ -86,6 +86,7 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { throw Exception('Remember to initialize firestore'); } else { final snapshot = await collectionReference! + .where('createdBy', isEqualTo: userId) .orderBy('createdAt', descending: false) .limit(1) .get(); @@ -110,6 +111,7 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { throw Exception('Remember to initialize firestore'); } else { final snapshot = await collectionReference! + .where('createdBy', isEqualTo: userId) .orderBy('createdAt', descending: true) .limit(1) .get(); @@ -135,14 +137,10 @@ class FirestoreChatMessageField { ///Timestamp to keep messages in order. Timestamp createdAt = Timestamp.now(); - /// Used to identify the sender of each message - final String createdBy; - ///Constructor FirestoreChatMessageField({ required this.message, final Timestamp? createdAt, - required this.createdBy, }) { if (createdAt == null) { this.createdAt = Timestamp.now(); @@ -159,34 +157,29 @@ class FirestoreChatMessageField { return FirestoreChatMessageField( message: SystemChatMessage.fromJson(json['message']), createdAt: json['createdAt'], - createdBy: json['createdBy'], ); case HumanChatMessage.defaultPrefix: return FirestoreChatMessageField( message: HumanChatMessage.fromJson(json['message']), createdAt: json['createdAt'], - createdBy: json['createdBy'], ); case AIChatMessage.defaultPrefix: return FirestoreChatMessageField( message: AIChatMessage.fromJson(json['message']), createdAt: json['createdAt'], - createdBy: json['createdBy'], ); case FunctionChatMessage.defaultPrefix: return FirestoreChatMessageField( message: FunctionChatMessage.fromJson(json['message']), createdAt: json['createdAt'], - createdBy: json['createdBy'], ); case 'Custom': return FirestoreChatMessageField( message: CustomChatMessage.fromJson(json['message']), createdAt: json['createdAt'], - createdBy: json['createdBy'], ); default: // ignore: avoid_dynamic_calls @@ -195,7 +188,7 @@ class FirestoreChatMessageField { } /// Will be used to transform the class into a json object - Map toJson() => { + Map toJson(String createdBy) => { 'message': message.toJson(), 'createdAt': createdAt, 'createdBy': createdBy, From a30d10980c705387927402ed16e0aaca93ed7773 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sat, 4 May 2024 15:45:16 +0200 Subject: [PATCH 007/251] Create example --- .../example/firestore_memory.dart | 83 ++++++++++++++++--- packages/langchain_google/pubspec.yaml | 2 + .../langchain_google/pubspec_overrides.yaml | 2 + 3 files changed, 76 insertions(+), 11 deletions(-) diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart index fddcc1c7..858a829f 100644 --- a/packages/langchain_google/example/firestore_memory.dart +++ b/packages/langchain_google/example/firestore_memory.dart @@ -1,19 +1,80 @@ +import 'package:fake_cloud_firestore/fake_cloud_firestore.dart'; import 'package:langchain_google/langchain_google.dart'; +import 'package:langchain/langchain.dart'; Future main() async { - // final db = FakeFirebaseFirestore(); + await _example1(); +} + +Future _example1() async { + final db = FakeFirebaseFirestore(); + const String userId = '1234'; + + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + collections: 'Langchain', + options: db.app.options, + userId: userId, + ); + //Very important to initialize firestore + await history.ensureFirestore(); + + await history.addHumanChatMessage('hi!'); + await history.addAIChatMessage('whats up?'); + + print(await history.getChatMessages()); +} + +//Nested firestore collection +Future _example2() async { + final db = FakeFirebaseFirestore(); + const String userId = '1234'; + + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + collections: 'Users/$userId/Messages', + options: db.app.options, + userId: userId, + ); + + //Very important to initialize firestore + await history.ensureFirestore(); + + await history.addHumanChatMessage('hi!'); + await history.addAIChatMessage('whats up?'); + + print(await history.getChatMessages()); + // [HumanChatMessage(content='hi!', example=false), + // AIMessage(content='whats up?', example=false)] +} + +//Nested firestore collection +Future _example3() async { + final db = FakeFirebaseFirestore(); + const String userId = '1234'; + + //Create firestore history to give to ConversationBufferMemory + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + collections: 'Users/$userId/Messages', + options: db.app.options, + userId: userId, + ); + + //Very important to initialize firestore + await history.ensureFirestore(); + + //Create ConversationBufferMemory instance to give to Chain + final ConversationBufferMemory memory = + ConversationBufferMemory(chatHistory: history); - // await db.collection('Users').doc('a').collection('Langchain').add({ - // 'username': 'Bob', - // }); + //Create llm instanse + final llm = FakeLLM(responses: ['Hey there what can I help you with?']); - // final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( - // db.collection('Users').doc('a').collection('Langchain'), - // ); + //Create chain with memory + final ConversationChain chain = ConversationChain(llm: llm, memory: memory); - // await history.addHumanChatMessage('hi!'); - // await history.addAIChatMessage('whats up?'); + //Call llm + await chain.run('Hi there!'); - // print(await history.getChatMessages()); - // FirestoreChatMessageHistory his = FirestoreChatMessageHistory(collections: 'Users/', options: options) + print(await history.getChatMessages()); + // [HumanChatMessage(content='Hi there!', example=false), + // AIMessage(content='Hey there what can I help you with?', example=false)] } diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index c671b070..375d5cb4 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -32,3 +32,5 @@ dependencies: dev_dependencies: test: ^1.24.9 + fake_cloud_firestore: ^2.5.1 + langchain: ^0.6.0+1 diff --git a/packages/langchain_google/pubspec_overrides.yaml b/packages/langchain_google/pubspec_overrides.yaml index b0513992..29a2262a 100644 --- a/packages/langchain_google/pubspec_overrides.yaml +++ b/packages/langchain_google/pubspec_overrides.yaml @@ -1,5 +1,7 @@ # melos_managed_dependency_overrides: googleai_dart,vertex_ai,langchain_core dependency_overrides: + langchain: + path: ../langchain googleai_dart: path: ../googleai_dart langchain_core: From e5c11ffad16a6ab5c92c9486ed5bacde95762b9a Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sat, 4 May 2024 15:52:37 +0200 Subject: [PATCH 008/251] Renaming some examples --- .../example/firestore_memory.dart | 34 +++++++++++-------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart index 858a829f..828cf9c2 100644 --- a/packages/langchain_google/example/firestore_memory.dart +++ b/packages/langchain_google/example/firestore_memory.dart @@ -3,10 +3,12 @@ import 'package:langchain_google/langchain_google.dart'; import 'package:langchain/langchain.dart'; Future main() async { - await _example1(); + await _history(); + // await _historyWithNestedCollection(); + // await _chainWithFirestoreHistory(); } -Future _example1() async { +Future _history() async { final db = FakeFirebaseFirestore(); const String userId = '1234'; @@ -25,7 +27,7 @@ Future _example1() async { } //Nested firestore collection -Future _example2() async { +Future _historyWithNestedCollection() async { final db = FakeFirebaseFirestore(); const String userId = '1234'; @@ -46,8 +48,8 @@ Future _example2() async { // AIMessage(content='whats up?', example=false)] } -//Nested firestore collection -Future _example3() async { +//Using in a chain +Future _chainWithFirestoreHistory() async { final db = FakeFirebaseFirestore(); const String userId = '1234'; @@ -61,20 +63,22 @@ Future _example3() async { //Very important to initialize firestore await history.ensureFirestore(); - //Create ConversationBufferMemory instance to give to Chain - final ConversationBufferMemory memory = - ConversationBufferMemory(chatHistory: history); - //Create llm instanse - final llm = FakeLLM(responses: ['Hey there what can I help you with?']); + final llm = FakeLLM( + responses: [ + "Hi there! It's nice to meet you. How can I help you today?", + ], + ); //Create chain with memory - final ConversationChain chain = ConversationChain(llm: llm, memory: memory); + final ConversationChain conversation = ConversationChain( + llm: llm, + memory: ConversationBufferMemory(chatHistory: history), + ); //Call llm - await chain.run('Hi there!'); + final output1 = await conversation.run('Hi there!'); - print(await history.getChatMessages()); - // [HumanChatMessage(content='Hi there!', example=false), - // AIMessage(content='Hey there what can I help you with?', example=false)] + print(output1); + // -> 'Hi there! It's nice to meet you. How can I help you today?' } From a30b41026d25d5efd7e2f30cb8e1013b78239417 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sun, 19 May 2024 13:07:56 +0200 Subject: [PATCH 009/251] migrate functioncall to toolcall --- .../lib/src/chat_models/types.dart | 28 +++++++++++-------- .../lib/src/stores/firestore.dart | 4 +-- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 42c60c6a..79d97106 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -393,17 +393,17 @@ AIChatMessage{ @override Map toJson() => { - 'type': defaultPrefix, 'content': content, - 'functionCall': functionCall?.toJson(), + 'toolCalls': toolCalls.map((toolCall) => toolCall.toJson()).toList(), }; + /// Will be used to get an AIChatMessage from a json format factory AIChatMessage.fromJson(final Map json) { return AIChatMessage( content: json['content'], - functionCall: json['functionCall'] != null - ? AIChatMessageFunctionCall.fromJson(json['functionCall']) - : null, + toolCalls: (json['toolCalls'] as List) + .map((toolCallJson) => AIChatMessageToolCall.fromJson(toolCallJson)) + .toList(), ); } } @@ -475,16 +475,17 @@ AIChatMessageToolCall{ }'''; } - ///The toJson function will convert the ChatMessage to a json object + /// The toJson function will convert the AIChatMessageToolCall to a json object Map toJson() => { 'name': name, 'argumentsRaw': argumentsRaw, 'arguments': arguments, }; - ///The fromJson will create a AI - factory AIChatMessageFunctionCall.fromJson(final Map json) { - return AIChatMessageFunctionCall( + /// Will be used to get an AIChatMessageToolCall from a json format + factory AIChatMessageToolCall.fromJson(final Map json) { + return AIChatMessageToolCall( + id: json['id'], name: json['name'], argumentsRaw: json['argumentsRaw'], arguments: json['arguments'], @@ -544,12 +545,13 @@ ToolChatMessage{ @override Map toJson() => { 'type': defaultPrefix, - 'name': name, + 'toolCallId': toolCallId, 'content': content, }; - factory FunctionChatMessage.fromJson(final Map json) => - FunctionChatMessage(content: json['content'], name: json['name']); + /// Will be used to get an ToolChatMessage from a json format + factory ToolChatMessage.fromJson(final Map json) => + ToolChatMessage(content: json['content'], toolCallId: json['toolCallId']); } /// {@template custom_chat_message} @@ -602,6 +604,8 @@ CustomChatMessage{ 'content': content, 'role': role, }; + + /// Will be used to get an ToolChatMessage from a json format factory CustomChatMessage.fromJson(final Map json) => CustomChatMessage(content: json['content'], role: json['role']); } diff --git a/packages/langchain_google/lib/src/stores/firestore.dart b/packages/langchain_google/lib/src/stores/firestore.dart index 98004a7d..c27ff1ec 100644 --- a/packages/langchain_google/lib/src/stores/firestore.dart +++ b/packages/langchain_google/lib/src/stores/firestore.dart @@ -170,9 +170,9 @@ class FirestoreChatMessageField { createdAt: json['createdAt'], ); - case FunctionChatMessage.defaultPrefix: + case ToolChatMessage.defaultPrefix: return FirestoreChatMessageField( - message: FunctionChatMessage.fromJson(json['message']), + message: ToolChatMessage.fromJson(json['message']), createdAt: json['createdAt'], ); From 1fada5151cf45c1efd85a8fa435eeb4f0206256b Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sun, 19 May 2024 13:21:53 +0200 Subject: [PATCH 010/251] move firstoreChatMessageHistory to langchain_firebase --- examples/docs_examples/pubspec.lock | 181 ------------------ .../lib/langchain_firebase.dart | 1 + .../lib/src/stores/firestore.dart | 0 packages/langchain_firebase/pubspec.lock | 24 +++ packages/langchain_firebase/pubspec.yaml | 3 +- .../example/firestore_memory.dart | 2 +- .../lib/langchain_google.dart | 1 - packages/langchain_google/pubspec.yaml | 3 +- .../langchain_google/pubspec_overrides.yaml | 2 + 9 files changed, 31 insertions(+), 186 deletions(-) rename packages/{langchain_google => langchain_firebase}/lib/src/stores/firestore.dart (100%) diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index c8d655ab..6234c279 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -9,14 +9,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.6" - _flutterfire_internals: - dependency: transitive - description: - name: _flutterfire_internals - sha256: "3dee3db3468c5f4640a4e8aa9c1e22561c298976d8c39ed2fdd456a9a3db26e1" - url: "https://pub.dev" - source: hosted - version: "1.3.32" args: dependency: transitive description: @@ -41,14 +33,6 @@ packages: url: "https://pub.dev" source: hosted version: "0.3.0" - boolean_selector: - dependency: transitive - description: - name: boolean_selector - sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" - url: "https://pub.dev" - source: hosted - version: "2.1.1" characters: dependency: transitive description: @@ -64,38 +48,6 @@ packages: relative: true source: path version: "0.2.0" - clock: - dependency: transitive - description: - name: clock - sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf - url: "https://pub.dev" - source: hosted - version: "1.1.1" - cloud_firestore: - dependency: transitive - description: - name: cloud_firestore - sha256: "58ea77912e355cfd4e49f858e01d5ab4f99c7181ebd545b3fbeae25d6677dff6" - url: "https://pub.dev" - source: hosted - version: "4.17.2" - cloud_firestore_platform_interface: - dependency: transitive - description: - name: cloud_firestore_platform_interface - sha256: "9cb7d350c08c3d4987d34f418037aca3882649cdad5158440b4b932232edcf82" - url: "https://pub.dev" - source: hosted - version: "6.2.2" - cloud_firestore_web: - dependency: transitive - description: - name: cloud_firestore_web - sha256: "28db029962ebdcb8caf80f0c109f359ebcefa983b85fb62e5a220b1a577d84b5" - url: "https://pub.dev" - source: hosted - version: "3.12.2" collection: dependency: transitive description: @@ -136,14 +88,6 @@ packages: url: "https://pub.dev" source: hosted version: "6.0.0" - fake_async: - dependency: transitive - description: - name: fake_async - sha256: "511392330127add0b769b75a987850d136345d9227c6b94c96a04cf4a391bf78" - url: "https://pub.dev" - source: hosted - version: "1.3.1" fetch_api: dependency: transitive description: @@ -160,30 +104,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.2" - firebase_core: - dependency: transitive - description: - name: firebase_core - sha256: "4aef2a23d0f3265545807d68fbc2f76a6b994ca3c778d88453b99325abd63284" - url: "https://pub.dev" - source: hosted - version: "2.30.1" - firebase_core_platform_interface: - dependency: transitive - description: - name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 - url: "https://pub.dev" - source: hosted - version: "5.0.0" - firebase_core_web: - dependency: transitive - description: - name: firebase_core_web - sha256: "67f2fcc600fc78c2f731c370a3a5e6c87ee862e3a2fba6f951eca6d5dafe5c29" - url: "https://pub.dev" - source: hosted - version: "2.16.0" fixnum: dependency: transitive description: @@ -192,21 +112,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" - flutter: - dependency: transitive - description: flutter - source: sdk - version: "0.0.0" - flutter_test: - dependency: transitive - description: flutter - source: sdk - version: "0.0.0" - flutter_web_plugins: - dependency: transitive - description: flutter - source: sdk - version: "0.0.0" freezed_annotation: dependency: transitive description: @@ -375,46 +280,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.1" - leak_tracker: - dependency: transitive - description: - name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" - url: "https://pub.dev" - source: hosted - version: "10.0.0" - leak_tracker_flutter_testing: - dependency: transitive - description: - name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 - url: "https://pub.dev" - source: hosted - version: "2.0.1" - leak_tracker_testing: - dependency: transitive - description: - name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 - url: "https://pub.dev" - source: hosted - version: "2.0.1" - matcher: - dependency: transitive - description: - name: matcher - sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb - url: "https://pub.dev" - source: hosted - version: "0.12.16+1" - material_color_utilities: - dependency: transitive - description: - name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" - url: "https://pub.dev" - source: hosted - version: "0.8.0" math_expressions: dependency: transitive description: @@ -476,14 +341,6 @@ packages: url: "https://pub.dev" source: hosted version: "6.0.2" - plugin_platform_interface: - dependency: transitive - description: - name: plugin_platform_interface - sha256: "4820fbfdb9478b1ebae27888254d445073732dae3d6ea81f0b7e06d5dedc3f02" - url: "https://pub.dev" - source: hosted - version: "2.1.8" retry: dependency: transitive description: @@ -508,11 +365,6 @@ packages: url: "https://pub.dev" source: hosted version: "0.27.7" - sky_engine: - dependency: transitive - description: flutter - source: sdk - version: "0.0.99" source_span: dependency: transitive description: @@ -529,22 +381,6 @@ packages: url: "https://pub.dev" source: hosted version: "7.0.0" - stack_trace: - dependency: transitive - description: - name: stack_trace - sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" - url: "https://pub.dev" - source: hosted - version: "1.11.1" - stream_channel: - dependency: transitive - description: - name: stream_channel - sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 - url: "https://pub.dev" - source: hosted - version: "2.1.2" string_scanner: dependency: transitive description: @@ -561,14 +397,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.1" - test_api: - dependency: transitive - description: - name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" - url: "https://pub.dev" - source: hosted - version: "0.6.1" typed_data: dependency: transitive description: @@ -600,14 +428,6 @@ packages: relative: true source: path version: "0.1.0" - vm_service: - dependency: transitive - description: - name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 - url: "https://pub.dev" - source: hosted - version: "13.0.0" web: dependency: transitive description: @@ -618,4 +438,3 @@ packages: version: "0.5.1" sdks: dart: ">=3.3.0 <4.0.0" - flutter: ">=3.3.0" diff --git a/packages/langchain_firebase/lib/langchain_firebase.dart b/packages/langchain_firebase/lib/langchain_firebase.dart index 243b71ac..0b76e587 100644 --- a/packages/langchain_firebase/lib/langchain_firebase.dart +++ b/packages/langchain_firebase/lib/langchain_firebase.dart @@ -2,3 +2,4 @@ library; export 'src/chat_models/chat_models.dart'; +export 'src/stores/firestore.dart'; diff --git a/packages/langchain_google/lib/src/stores/firestore.dart b/packages/langchain_firebase/lib/src/stores/firestore.dart similarity index 100% rename from packages/langchain_google/lib/src/stores/firestore.dart rename to packages/langchain_firebase/lib/src/stores/firestore.dart diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 52f48436..c13007f5 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -41,6 +41,30 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.1" + cloud_firestore: + dependency: "direct main" + description: + name: cloud_firestore + sha256: e461ea9ab23959102a780efcbccfe33c2ac46269928bc57093bbc0b526afc801 + url: "https://pub.dev" + source: hosted + version: "4.17.3" + cloud_firestore_platform_interface: + dependency: transitive + description: + name: cloud_firestore_platform_interface + sha256: "2e0b8db9a759ffc71086019f1bd27237e5e888ab1e99c507067ff8616acdfa24" + url: "https://pub.dev" + source: hosted + version: "6.2.3" + cloud_firestore_web: + dependency: transitive + description: + name: cloud_firestore_web + sha256: "37b6974bef5b0a7ecd31037ffb7d7bfe6bb9d2ac6c064fbea395411ef0a64d55" + url: "https://pub.dev" + source: hosted + version: "3.12.3" collection: dependency: "direct main" description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 279bc37a..acb0baa0 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -18,9 +18,10 @@ environment: flutter: ">=3.19.0" dependencies: - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.19.0" firebase_app_check: ^0.2.2+5 firebase_core: ^2.31.0 + cloud_firestore: ^4.17.0 firebase_vertexai: ^0.1.0 langchain_core: ^0.3.1 meta: ^1.11.0 diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart index 828cf9c2..82d9349c 100644 --- a/packages/langchain_google/example/firestore_memory.dart +++ b/packages/langchain_google/example/firestore_memory.dart @@ -1,6 +1,6 @@ import 'package:fake_cloud_firestore/fake_cloud_firestore.dart'; -import 'package:langchain_google/langchain_google.dart'; import 'package:langchain/langchain.dart'; +import 'package:langchain_firebase/langchain_firebase.dart'; Future main() async { await _history(); diff --git a/packages/langchain_google/lib/langchain_google.dart b/packages/langchain_google/lib/langchain_google.dart index f2e5374d..371e45ad 100644 --- a/packages/langchain_google/lib/langchain_google.dart +++ b/packages/langchain_google/lib/langchain_google.dart @@ -4,5 +4,4 @@ library; export 'src/chat_models/chat_models.dart'; export 'src/embeddings/embeddings.dart'; export 'src/llms/llms.dart'; -export 'src/stores/firestore.dart'; export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index bc2c3e75..3070406c 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -28,8 +28,7 @@ dependencies: meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0 - firebase_core: ^2.25.5 - cloud_firestore: ^4.17.0 + langchain_firebase: ^0.1.0 dev_dependencies: test: ^1.24.9 diff --git a/packages/langchain_google/pubspec_overrides.yaml b/packages/langchain_google/pubspec_overrides.yaml index 50319fbe..1fabfd3c 100644 --- a/packages/langchain_google/pubspec_overrides.yaml +++ b/packages/langchain_google/pubspec_overrides.yaml @@ -4,3 +4,5 @@ dependency_overrides: path: ../langchain_core vertex_ai: path: ../vertex_ai + langchain_firebase: + path: ../langchain_firebase From 7bab052e164ac4ac898acef443db3526ce34ffeb Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sun, 19 May 2024 13:58:10 +0200 Subject: [PATCH 011/251] Make user handle firebase app initialize --- .../Flutter/GeneratedPluginRegistrant.swift | 2 + .../langchain_firebase/example/pubspec.lock | 24 ++++ .../lib/src/stores/firestore.dart | 109 +++++++----------- .../example/firestore_memory.dart | 18 +-- packages/langchain_google/pubspec.yaml | 1 + 5 files changed, 79 insertions(+), 75 deletions(-) diff --git a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift index 7bade716..2884d031 100644 --- a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -5,10 +5,12 @@ import FlutterMacOS import Foundation +import cloud_firestore import firebase_app_check import firebase_core func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { + FLTFirebaseFirestorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseFirestorePlugin")) FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAppCheckPlugin")) FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseCorePlugin")) } diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index c3e14ba2..6ea344fc 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -49,6 +49,30 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.1" + cloud_firestore: + dependency: transitive + description: + name: cloud_firestore + sha256: e461ea9ab23959102a780efcbccfe33c2ac46269928bc57093bbc0b526afc801 + url: "https://pub.dev" + source: hosted + version: "4.17.3" + cloud_firestore_platform_interface: + dependency: transitive + description: + name: cloud_firestore_platform_interface + sha256: "2e0b8db9a759ffc71086019f1bd27237e5e888ab1e99c507067ff8616acdfa24" + url: "https://pub.dev" + source: hosted + version: "6.2.3" + cloud_firestore_web: + dependency: transitive + description: + name: cloud_firestore_web + sha256: "37b6974bef5b0a7ecd31037ffb7d7bfe6bb9d2ac6c064fbea395411ef0a64d55" + url: "https://pub.dev" + source: hosted + version: "3.12.3" collection: dependency: transitive description: diff --git a/packages/langchain_firebase/lib/src/stores/firestore.dart b/packages/langchain_firebase/lib/src/stores/firestore.dart index c27ff1ec..e2db4685 100644 --- a/packages/langchain_firebase/lib/src/stores/firestore.dart +++ b/packages/langchain_firebase/lib/src/stores/firestore.dart @@ -10,13 +10,7 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { required this.collections, required this.options, required this.userId, - }); - - ///Function to initialize firebaseinstance - Future ensureFirestore() async { - await Firebase.initializeApp( - options: options, - ); + }) { collectionReference = FirebaseFirestore.instance.collection(collections); } @@ -30,51 +24,40 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { String userId; ///Firestore instance - CollectionReference>? collectionReference; + late CollectionReference> collectionReference; @override Future addChatMessage(final ChatMessage message) async { final FirestoreChatMessageField messageField = FirestoreChatMessageField(message: message); - if (collectionReference == null) { - throw Exception('Remember to initialize firestore'); - } else { - await collectionReference!.doc().set(messageField.toJson(userId)); - } + + await collectionReference.doc().set(messageField.toJson(userId)); } @override Future clear() async { - if (collectionReference == null) { - throw Exception('Remember to initialize firestore'); - } else { - final snapshot = await collectionReference!.get(); + final snapshot = await collectionReference.get(); - //Delete all docs in firestore - for (final doc in snapshot.docs) { - await doc.reference.delete(); - } + //Delete all docs in firestore + for (final doc in snapshot.docs) { + await doc.reference.delete(); } } @override Future> getChatMessages() async { - if (collectionReference == null) { - throw Exception('Remember to initialize firestore'); - } else { - //Get chat messages in ascending order so the newest message is the last in the list - final snapshot = await collectionReference! - .where('createdBy', isEqualTo: userId) - .orderBy('createdAt', descending: false) - .get(); - - //Take each doc and add it to the conversation list. - final List conversation = snapshot.docs.map((final doc) { - return FirestoreChatMessageField.fromJson(doc.data()).message; - }).toList(); - - return conversation; - } + //Get chat messages in ascending order so the newest message is the last in the list + final snapshot = await collectionReference + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: false) + .get(); + + //Take each doc and add it to the conversation list. + final List conversation = snapshot.docs.map((final doc) { + return FirestoreChatMessageField.fromJson(doc.data()).message; + }).toList(); + + return conversation; } /// Removes and returns the first (oldest) element of the history. @@ -82,24 +65,20 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { /// The history must not be empty when this method is called. @override Future removeFirst() async { - if (collectionReference == null) { - throw Exception('Remember to initialize firestore'); - } else { - final snapshot = await collectionReference! - .where('createdBy', isEqualTo: userId) - .orderBy('createdAt', descending: false) - .limit(1) - .get(); + final snapshot = await collectionReference + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: false) + .limit(1) + .get(); - //get oldest document - final oldest = snapshot.docs.first; + //get oldest document + final oldest = snapshot.docs.first; - //Delete doc in firestore - await oldest.reference.delete(); + //Delete doc in firestore + await oldest.reference.delete(); - //Create FirestoreChatMessageField and return ChatMessage - return FirestoreChatMessageField.fromJson(oldest.data()).message; - } + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(oldest.data()).message; } /// Removes and returns the last (newest) element of the history. @@ -107,24 +86,20 @@ final class FirestoreChatMessageHistory extends BaseChatMessageHistory { /// The history must not be empty when this method is called. @override Future removeLast() async { - if (collectionReference == null) { - throw Exception('Remember to initialize firestore'); - } else { - final snapshot = await collectionReference! - .where('createdBy', isEqualTo: userId) - .orderBy('createdAt', descending: true) - .limit(1) - .get(); + final snapshot = await collectionReference + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: true) + .limit(1) + .get(); - //get newest document - final newest = snapshot.docs.first; + //get newest document + final newest = snapshot.docs.first; - //Delete doc in firestore - await newest.reference.delete(); + //Delete doc in firestore + await newest.reference.delete(); - //Create FirestoreChatMessageField and return ChatMessage - return FirestoreChatMessageField.fromJson(newest.data()).message; - } + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(newest.data()).message; } } diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart index 82d9349c..11c08946 100644 --- a/packages/langchain_google/example/firestore_memory.dart +++ b/packages/langchain_google/example/firestore_memory.dart @@ -1,4 +1,5 @@ import 'package:fake_cloud_firestore/fake_cloud_firestore.dart'; +import 'package:firebase_core/firebase_core.dart'; import 'package:langchain/langchain.dart'; import 'package:langchain_firebase/langchain_firebase.dart'; @@ -12,13 +13,14 @@ Future _history() async { final db = FakeFirebaseFirestore(); const String userId = '1234'; + // Initialize your firebase app + await Firebase.initializeApp(name: db.app.name, options: db.app.options); + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( collections: 'Langchain', options: db.app.options, userId: userId, ); - //Very important to initialize firestore - await history.ensureFirestore(); await history.addHumanChatMessage('hi!'); await history.addAIChatMessage('whats up?'); @@ -31,15 +33,15 @@ Future _historyWithNestedCollection() async { final db = FakeFirebaseFirestore(); const String userId = '1234'; + // Initialize your firebase app + await Firebase.initializeApp(name: db.app.name, options: db.app.options); + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( collections: 'Users/$userId/Messages', options: db.app.options, userId: userId, ); - //Very important to initialize firestore - await history.ensureFirestore(); - await history.addHumanChatMessage('hi!'); await history.addAIChatMessage('whats up?'); @@ -53,6 +55,9 @@ Future _chainWithFirestoreHistory() async { final db = FakeFirebaseFirestore(); const String userId = '1234'; + // Initialize your firebase app + await Firebase.initializeApp(name: db.app.name, options: db.app.options); + //Create firestore history to give to ConversationBufferMemory final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( collections: 'Users/$userId/Messages', @@ -60,9 +65,6 @@ Future _chainWithFirestoreHistory() async { userId: userId, ); - //Very important to initialize firestore - await history.ensureFirestore(); - //Create llm instanse final llm = FakeLLM( responses: [ diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 3070406c..7b441954 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -29,6 +29,7 @@ dependencies: uuid: ^4.3.3 vertex_ai: ^0.1.0 langchain_firebase: ^0.1.0 + firebase_core: ^2.31.0 dev_dependencies: test: ^1.24.9 From 85a2580481f97a51ade85f6220d94e9e7dc216dc Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Sun, 19 May 2024 15:42:55 +0200 Subject: [PATCH 012/251] Inserted missing json object for AiChatMessage and ToolCall --- packages/langchain_core/lib/src/chat_models/types.dart | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 79d97106..1ada3bbe 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -393,11 +393,12 @@ AIChatMessage{ @override Map toJson() => { + 'type': defaultPrefix, 'content': content, 'toolCalls': toolCalls.map((toolCall) => toolCall.toJson()).toList(), }; - /// Will be used to get an AIChatMessage from a json format + /// Will be used to get an [AIChatMessage] from a json format factory AIChatMessage.fromJson(final Map json) { return AIChatMessage( content: json['content'], @@ -475,8 +476,9 @@ AIChatMessageToolCall{ }'''; } - /// The toJson function will convert the AIChatMessageToolCall to a json object + /// The toJson function will convert the [AIChatMessageToolCall] to a json object Map toJson() => { + 'id': id, 'name': name, 'argumentsRaw': argumentsRaw, 'arguments': arguments, @@ -549,7 +551,7 @@ ToolChatMessage{ 'content': content, }; - /// Will be used to get an ToolChatMessage from a json format + /// Will be used to get an [ToolChatMessage] from a json format factory ToolChatMessage.fromJson(final Map json) => ToolChatMessage(content: json['content'], toolCallId: json['toolCallId']); } @@ -605,7 +607,7 @@ CustomChatMessage{ 'role': role, }; - /// Will be used to get an ToolChatMessage from a json format + /// Will be used to get an [ToolChatMessage] from a json format factory CustomChatMessage.fromJson(final Map json) => CustomChatMessage(content: json['content'], role: json['role']); } From 2a6f3793f7b9b47956780a0a0e2b06333d40a778 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 13:45:47 +0200 Subject: [PATCH 013/251] build(deps): bump actions/checkout from 4.1.5 to 4.1.6 (#427) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.5 to 4.1.6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/44c2b7a8a4ea60a981eaca3cf939b5f4305c123b...a5ac7e51b41094c92402da3b24376905380afc29) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yaml | 2 +- .github/workflows/test.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 59926dbf..5520d768 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 114e4fab..2b4ff0c5 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 From ccb03a2a91407c31d6518777e985b7e030c6bc9f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 20 May 2024 16:53:41 +0200 Subject: [PATCH 014/251] fix: Make quote nullable in MessageContentTextAnnotationsFileCitation (#428) --- ...ontent_text_annotations_file_citation.dart | 2 +- .../src/generated/schema/schema.freezed.dart | 34 ++++++++++++------- .../lib/src/generated/schema/schema.g.dart | 21 ++++++++---- packages/openai_dart/oas/openapi_curated.yaml | 2 +- 4 files changed, 38 insertions(+), 21 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart index 5317431b..1e6807c9 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart @@ -20,7 +20,7 @@ class MessageContentTextAnnotationsFileCitation @JsonKey(name: 'file_id') required String fileId, /// The specific quote in the file. - required String quote, + @JsonKey(includeIfNull: false) String? quote, }) = _MessageContentTextAnnotationsFileCitation; /// Object construction from a JSON representation diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 472cae5b..6417d1c3 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -40379,7 +40379,8 @@ mixin _$MessageContentTextAnnotationsFileCitation { String get fileId => throw _privateConstructorUsedError; /// The specific quote in the file. - String get quote => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get quote => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) @@ -40396,7 +40397,9 @@ abstract class $MessageContentTextAnnotationsFileCitationCopyWith<$Res> { _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, MessageContentTextAnnotationsFileCitation>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(includeIfNull: false) String? quote}); } /// @nodoc @@ -40415,17 +40418,17 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, @override $Res call({ Object? fileId = null, - Object? quote = null, + Object? quote = freezed, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: null == quote + quote: freezed == quote ? _value.quote : quote // ignore: cast_nullable_to_non_nullable - as String, + as String?, ) as $Val); } } @@ -40439,7 +40442,9 @@ abstract class _$$MessageContentTextAnnotationsFileCitationImplCopyWith<$Res> __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(includeIfNull: false) String? quote}); } /// @nodoc @@ -40456,17 +40461,17 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> @override $Res call({ Object? fileId = null, - Object? quote = null, + Object? quote = freezed, }) { return _then(_$MessageContentTextAnnotationsFileCitationImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: null == quote + quote: freezed == quote ? _value.quote : quote // ignore: cast_nullable_to_non_nullable - as String, + as String?, )); } } @@ -40476,7 +40481,8 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> class _$MessageContentTextAnnotationsFileCitationImpl extends _MessageContentTextAnnotationsFileCitation { const _$MessageContentTextAnnotationsFileCitationImpl( - {@JsonKey(name: 'file_id') required this.fileId, required this.quote}) + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(includeIfNull: false) this.quote}) : super._(); factory _$MessageContentTextAnnotationsFileCitationImpl.fromJson( @@ -40490,7 +40496,8 @@ class _$MessageContentTextAnnotationsFileCitationImpl /// The specific quote in the file. @override - final String quote; + @JsonKey(includeIfNull: false) + final String? quote; @override String toString() { @@ -40532,7 +40539,7 @@ abstract class _MessageContentTextAnnotationsFileCitation extends MessageContentTextAnnotationsFileCitation { const factory _MessageContentTextAnnotationsFileCitation( {@JsonKey(name: 'file_id') required final String fileId, - required final String quote}) = + @JsonKey(includeIfNull: false) final String? quote}) = _$MessageContentTextAnnotationsFileCitationImpl; const _MessageContentTextAnnotationsFileCitation._() : super._(); @@ -40548,7 +40555,8 @@ abstract class _MessageContentTextAnnotationsFileCitation @override /// The specific quote in the file. - String get quote; + @JsonKey(includeIfNull: false) + String? get quote; @override @JsonKey(ignore: true) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 03a49b59..eeb6a84e 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -3906,15 +3906,24 @@ _$MessageContentTextAnnotationsFileCitationImpl Map json) => _$MessageContentTextAnnotationsFileCitationImpl( fileId: json['file_id'] as String, - quote: json['quote'] as String, + quote: json['quote'] as String?, ); Map _$$MessageContentTextAnnotationsFileCitationImplToJson( - _$MessageContentTextAnnotationsFileCitationImpl instance) => - { - 'file_id': instance.fileId, - 'quote': instance.quote, - }; + _$MessageContentTextAnnotationsFileCitationImpl instance) { + final val = { + 'file_id': instance.fileId, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('quote', instance.quote); + return val; +} _$MessageDeltaContentImageUrlObjectImpl _$$MessageDeltaContentImageUrlObjectImplFromJson( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 9490261d..46201dd4 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4890,7 +4890,7 @@ components: type: string required: - file_id - - quote + # - quote # https://github.com/openai/openai-openapi/issues/263 MessageContentTextAnnotationsFilePathObject: type: object description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. From 4f9cd7cea4ef5d941a4c9321d379ac510f092200 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 20 May 2024 17:05:58 +0200 Subject: [PATCH 015/251] fix: Rename CreateRunRequestModel factories names (#429) --- .../generated/schema/create_run_request.dart | 4 +- .../src/generated/schema/schema.freezed.dart | 119 +++++++++--------- packages/openai_dart/oas/main.dart | 74 +++++------ .../test/openai_client_assistants_test.dart | 10 +- 4 files changed, 104 insertions(+), 103 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index edd89f09..95ad74a8 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -221,12 +221,12 @@ sealed class CreateRunRequestModel with _$CreateRunRequestModel { const CreateRunRequestModel._(); /// Available models. Mind that the list may not be exhaustive nor up-to-date. - const factory CreateRunRequestModel.enumeration( + const factory CreateRunRequestModel.model( RunModels value, ) = CreateRunRequestModelEnumeration; /// The ID of the model to use for this request. - const factory CreateRunRequestModel.string( + const factory CreateRunRequestModel.modelId( String value, ) = CreateRunRequestModelString; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 6417d1c3..abfb4fc9 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -30240,9 +30240,9 @@ abstract class _CreateRunRequest extends CreateRunRequest { CreateRunRequestModel _$CreateRunRequestModelFromJson( Map json) { switch (json['runtimeType']) { - case 'enumeration': + case 'model': return CreateRunRequestModelEnumeration.fromJson(json); - case 'string': + case 'modelId': return CreateRunRequestModelString.fromJson(json); default: @@ -30259,40 +30259,39 @@ mixin _$CreateRunRequestModel { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -30358,7 +30357,7 @@ class _$CreateRunRequestModelEnumerationImpl extends CreateRunRequestModelEnumeration { const _$CreateRunRequestModelEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'enumeration', + : $type = $type ?? 'model', super._(); factory _$CreateRunRequestModelEnumerationImpl.fromJson( @@ -30373,7 +30372,7 @@ class _$CreateRunRequestModelEnumerationImpl @override String toString() { - return 'CreateRunRequestModel.enumeration(value: $value)'; + return 'CreateRunRequestModel.model(value: $value)'; } @override @@ -30399,30 +30398,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) { - return enumeration(value); + return model(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) { - return enumeration?.call(value); + return model?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(value); + if (model != null) { + return model(value); } return orElse(); } @@ -30430,31 +30429,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) { - return enumeration(this); + return model(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) { - return enumeration?.call(this); + return model?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(this); + if (model != null) { + return model(this); } return orElse(); } @@ -30521,7 +30519,7 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { const _$CreateRunRequestModelStringImpl(this.value, {final String? $type}) - : $type = $type ?? 'string', + : $type = $type ?? 'modelId', super._(); factory _$CreateRunRequestModelStringImpl.fromJson( @@ -30536,7 +30534,7 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override String toString() { - return 'CreateRunRequestModel.string(value: $value)'; + return 'CreateRunRequestModel.modelId(value: $value)'; } @override @@ -30561,30 +30559,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) { - return string(value); + return modelId(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) { - return string?.call(value); + return modelId?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) { - if (string != null) { - return string(value); + if (modelId != null) { + return modelId(value); } return orElse(); } @@ -30592,31 +30590,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) { - return string(this); + return modelId(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) { - return string?.call(this); + return modelId?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) { - if (string != null) { - return string(this); + if (modelId != null) { + return modelId(this); } return orElse(); } diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index 1f2fe406..f1fbee08 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -77,53 +77,55 @@ String? _onSchemaUnionFactoryName( // Assistant 'AssistantModelEnumeration' => 'model', 'AssistantModelString' => 'modelId', + 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', + 'CreateMessageRequestContentListMessageContent' => 'parts', + 'CreateMessageRequestContentString' => 'text', + 'CreateRunRequestModelEnumeration' => 'model', + 'CreateRunRequestModelString' => 'modelId', + 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', + 'CreateRunRequestResponseFormatEnumeration' => 'mode', + 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateRunRequestToolChoiceEnumeration' => 'mode', + 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', + 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', 'MessageContentImageFileObject' => 'imageFile', - 'MessageDeltaContentImageFileObject' => 'imageFile', - 'MessageContentTextObject' => 'text', - 'MessageDeltaContentTextObject' => 'text', 'MessageContentImageUrlObject' => 'imageUrl', 'MessageContentTextAnnotationsFileCitationObject' => 'fileCitation', - 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageContentTextAnnotationsFilePathObject' => 'filePath', + 'MessageContentTextObject' => 'text', + 'MessageDeltaContentImageFileObject' => 'imageFile', + 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageDeltaContentTextAnnotationsFilePathObject' => 'filePath', + 'MessageDeltaContentTextObject' => 'text', + 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', 'RunModelEnumeration' => 'model', 'RunModelString' => 'modelId', - 'ThreadAndRunModelEnumeration' => 'model', - 'ThreadAndRunModelString' => 'modelId', - 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', + 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', + 'RunObjectResponseFormatEnumeration' => 'mode', + 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', + 'RunObjectToolChoiceEnumeration' => 'mode', + 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', 'RunStepDeltaStepDetailsToolCallsCodeObject' => 'codeInterpreter', - 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', 'RunStepDeltaStepDetailsToolCallsFileSearchObject' => 'fileSearch', - 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDeltaStepDetailsToolCallsFunctionObject' => 'function', - 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', - 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', 'RunStepDetailsMessageCreationObject' => 'messageCreation', - 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', + 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', + 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', + 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDetailsToolCallsObject' => 'toolCalls', - 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', - 'CreateRunRequestResponseFormatEnumeration' => 'mode', - 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', - 'RunObjectResponseFormatEnumeration' => 'mode', - 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', - 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', - 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', - 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', - 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'CreateRunRequestToolChoiceEnumeration' => 'mode', - 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', - 'RunObjectToolChoiceEnumeration' => 'mode', - 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateMessageRequestContentString' => 'text', - 'CreateMessageRequestContentListMessageContent' => 'parts', + 'ThreadAndRunModelEnumeration' => 'model', + 'ThreadAndRunModelString' => 'modelId', _ => null, }; diff --git a/packages/openai_dart/test/openai_client_assistants_test.dart b/packages/openai_dart/test/openai_client_assistants_test.dart index cf622c2c..61479182 100644 --- a/packages/openai_dart/test/openai_client_assistants_test.dart +++ b/packages/openai_dart/test/openai_client_assistants_test.dart @@ -8,6 +8,8 @@ import 'package:test/test.dart'; // https://platform.openai.com/docs/assistants/overview void main() { + const defaultModel = 'gpt-4o'; + group('OpenAI Assistants API tests', timeout: const Timeout(Duration(minutes: 5)), () { late OpenAIClient client; @@ -23,14 +25,13 @@ void main() { }); Future createAssistant() async { - const model = 'gpt-4'; const name = 'Math Tutor'; const description = 'Help students with math homework'; const instructions = 'You are a personal math tutor. Write and run code to answer math questions.'; final res = await client.createAssistant( request: const CreateAssistantRequest( - model: AssistantModel.modelId(model), + model: AssistantModel.modelId(defaultModel), name: name, description: description, instructions: instructions, @@ -42,7 +43,7 @@ void main() { expect(res.createdAt, greaterThan(0)); expect(res.name, name); expect(res.description, description); - expect(res.model, model); + expect(res.model, startsWith(defaultModel)); expect(res.instructions, instructions); expect(res.tools, hasLength(1)); final tool = res.tools.first; @@ -114,6 +115,7 @@ void main() { assistantId: assistantId, instructions: 'Please address the user as Jane Doe. The user has a premium account.', + model: const CreateRunRequestModel.modelId(defaultModel), ), ); expect(res.id, isNotNull); @@ -129,7 +131,7 @@ void main() { expect(res.cancelledAt, isNull); expect(res.failedAt, isNull); expect(res.completedAt, isNull); - expect(res.model, startsWith('gpt-4')); + expect(res.model, startsWith(defaultModel)); expect(res.instructions, isNotEmpty); expect(res.tools, hasLength(1)); expect(res.metadata, isEmpty); From 697740635d1caddbf96645d5090a143a50716088 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 20 May 2024 17:21:17 +0200 Subject: [PATCH 016/251] fix: digest path param in Ollama blob endpoints (#430) --- packages/ollama_dart/README.md | 4 ++-- .../ollama_dart/lib/src/generated/client.dart | 22 ++++++++----------- packages/ollama_dart/oas/ollama-curated.yaml | 11 +++++----- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 0aaf5a97..27895b5b 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -251,11 +251,11 @@ await for (final res in stream) { #### Check if a Blob Exists -Check if a blob is known to the server. +Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai. ```dart await client.checkBlob( - name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); ``` diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index 3ab44797..a22d8729 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -605,26 +605,23 @@ class OllamaClient { /// Create a blob from a file. Returns the server file path. /// - /// `name`: the SHA256 digest of the blob + /// `digest`: the SHA256 digest of the blob /// /// `request`: No description /// /// `POST` `http://localhost:11434/api/blobs/{digest}` Future createBlob({ - required String name, + required String digest, String? request, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/{digest}', + path: '/blobs/$digest', method: HttpMethod.post, isMultipart: false, requestType: 'application/octet-stream', responseType: '', body: request, - queryParams: { - 'name': name, - }, ); } @@ -632,24 +629,23 @@ class OllamaClient { // METHOD: checkBlob // ------------------------------------------ - /// Check to see if a blob exists on the Ollama server which is useful when creating models. + /// Ensures that the file blob used for a FROM or ADAPTER field exists on the server. /// - /// `name`: the SHA256 digest of the blob + /// This is checking your Ollama server and not Ollama.ai. + /// + /// `digest`: the SHA256 digest of the blob /// /// `HEAD` `http://localhost:11434/api/blobs/{digest}` Future checkBlob({ - required String name, + required String digest, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/{digest}', + path: '/blobs/$digest', method: HttpMethod.head, isMultipart: false, requestType: '', responseType: '', - queryParams: { - 'name': name, - }, ); } } diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index b63d0c21..876bab50 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -198,10 +198,11 @@ paths: operationId: checkBlob tags: - Models - summary: Check to see if a blob exists on the Ollama server which is useful when creating models. + summary: Ensures that the file blob used for a FROM or ADAPTER field exists on the server. + description: This is checking your Ollama server and not Ollama.ai. parameters: - - in: query - name: name + - in: path + name: digest schema: type: string required: true @@ -218,8 +219,8 @@ paths: - Models summary: Create a blob from a file. Returns the server file path. parameters: - - in: query - name: name + - in: path + name: digest schema: type: string required: true From 4de9cfd87d57a294c4ffef9d9beeac77a5ddfae6 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Mon, 20 May 2024 17:27:24 +0200 Subject: [PATCH 017/251] docs: Fix lint issues in langchain_firebase example --- examples/browser_summarizer/pubspec.lock | 4 ++-- examples/hello_world_flutter/pubspec.lock | 4 ++-- .../langchain_firebase/example/lib/main.dart | 2 +- .../langchain_firebase/example/pubspec.lock | 24 +++++++++---------- packages/langchain_firebase/pubspec.lock | 24 +++++++++---------- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 5050c14b..cc499c81 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -291,10 +291,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" nested: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 05dca7e4..a12c6037 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -175,10 +175,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" nested: dependency: transitive description: diff --git a/packages/langchain_firebase/example/lib/main.dart b/packages/langchain_firebase/example/lib/main.dart index 7cbb5e8e..44e019e9 100644 --- a/packages/langchain_firebase/example/lib/main.dart +++ b/packages/langchain_firebase/example/lib/main.dart @@ -580,7 +580,7 @@ class MessageWidget extends StatelessWidget { decoration: BoxDecoration( color: isFromUser ? Theme.of(context).colorScheme.primaryContainer - : Theme.of(context).colorScheme.surfaceVariant, + : Theme.of(context).colorScheme.surfaceContainerHighest, borderRadius: BorderRadius.circular(18), ), padding: const EdgeInsets.symmetric( diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 6ea344fc..0dd384a2 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -257,26 +257,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" lints: dependency: transitive description: @@ -313,10 +313,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: transitive description: @@ -398,10 +398,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -430,10 +430,10 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" web: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index c13007f5..89e38672 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -211,26 +211,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" matcher: dependency: transitive description: @@ -251,10 +251,10 @@ packages: dependency: "direct main" description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: transitive description: @@ -336,10 +336,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -368,10 +368,10 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" web: dependency: transitive description: From 001dc5c08d415242d963e4402c78dac257e0bd3d Mon Sep 17 00:00:00 2001 From: David Miguel Date: Mon, 20 May 2024 17:28:01 +0200 Subject: [PATCH 018/251] chore(release): publish packages - langchain_firebase@0.1.0+1 - ollama_dart@0.1.0+1 - openai_dart@0.3.2+1 - langchain_ollama@0.2.1+1 - langchain_openai@0.6.1+1 --- CHANGELOG.md | 49 +++++++++++++++++++ examples/browser_summarizer/pubspec.yaml | 2 +- examples/docs_examples/pubspec.yaml | 4 +- examples/hello_world_backend/pubspec.yaml | 2 +- examples/hello_world_cli/pubspec.yaml | 2 +- examples/hello_world_flutter/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 ++ .../langchain_firebase/example/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_ollama/CHANGELOG.md | 4 ++ packages/langchain_ollama/pubspec.yaml | 4 +- packages/langchain_openai/CHANGELOG.md | 4 ++ packages/langchain_openai/pubspec.yaml | 4 +- packages/langchain_pinecone/pubspec.yaml | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- packages/ollama_dart/CHANGELOG.md | 4 ++ packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 5 ++ packages/openai_dart/pubspec.yaml | 2 +- 20 files changed, 87 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a668f597..079c8450 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,46 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-05-20 + +### Changes + +--- + +Packages with breaking changes: + +- There are no breaking changes in this release. + +Packages with other changes: + +- [`langchain_firebase` - `v0.1.0+1`](#langchain_firebase---v0101) +- [`ollama_dart` - `v0.1.0+1`](#ollama_dart---v0101) +- [`openai_dart` - `v0.3.2+1`](#openai_dart---v0321) +- [`langchain_ollama` - `v0.2.1+1`](#langchain_ollama---v0211) +- [`langchain_openai` - `v0.6.1+1`](#langchain_openai---v0611) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + +- `langchain_ollama` - `v0.2.1+1` +- `langchain_openai` - `v0.6.1+1` + +--- + +#### `openai_dart` - `v0.3.2+1` + +- **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) +- **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) + +#### `ollama_dart` - `v0.1.0+1` + +- **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) + +#### `langchain_firebase` - `v0.1.0+1` + +- **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) + ## 2024-05-14 ### Changes @@ -2387,6 +2427,15 @@ Packages with changes: ### Changes +#### `langchain` - `v0.0.1` + + - Initial public release. +t/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) + +## 2023-07-02 + +### Changes + #### `langchain` - `v0.0.1` - Initial public release. diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 9a2c4936..2ab1aff4 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -15,7 +15,7 @@ dependencies: js: ^0.7.1 langchain: ^0.7.1 langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 0b57edbc..37662d4c 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -12,5 +12,5 @@ dependencies: langchain_community: 0.2.0+1 langchain_google: ^0.5.0 langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1 - langchain_openai: ^0.6.1 + langchain_ollama: ^0.2.1+1 + langchain_openai: ^0.6.1+1 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index fa43a6d8..4c7f0059 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -8,6 +8,6 @@ environment: dependencies: langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 581a3927..d814f7c4 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -8,4 +8,4 @@ environment: dependencies: langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index c000d972..9fc3a925 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,7 +12,7 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 flutter: uses-material-design: true diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 3da841ef..197d0776 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -27,4 +27,4 @@ dev_dependencies: test: ^1.25.2 langchain: ^0.7.1 langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 862156b6..a7350a9b 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) + ## 0.1.0 - **FEAT**: Add support for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 76900f0d..a4857f0d 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -14,7 +14,7 @@ dependencies: sdk: flutter flutter_markdown: ^0.6.22 langchain: 0.7.1 - langchain_firebase: 0.1.0 + langchain_firebase: 0.1.0+1 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index acb0baa0..7397caf0 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 6f38a23b..81bb56d2 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1+1 + + - Update a dependency to the latest release. + ## 0.2.1 - **FEAT**: Handle finish reason in ChatOllama ([#416](https://github.com/davidmigloz/langchain_dart/issues/416)). ([a5e1af13](https://github.com/davidmigloz/langchain_dart/commit/a5e1af13ef4d2db690ab599dbf5e42f28659a059)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 9b4736df..adfa39d4 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.0 + ollama_dart: ^0.1.0+1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 2d1e113a..d1a0368a 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.6.1+1 + + - Update a dependency to the latest release. + ## 0.6.1 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index b70412db..ca76313e 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.1 +version: 0.6.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.2 + openai_dart: ^0.3.2+1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 141a96f4..c64dda9f 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -25,4 +25,4 @@ dependencies: dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index c480cdc7..5450773b 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -26,4 +26,4 @@ dev_dependencies: test: ^1.25.2 langchain: ^0.7.1 langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 4b5ff033..f7c943f9 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) + ## 0.1.0 > Note: This release has breaking changes. diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 30f792f5..e2eee5ca 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 632fa141..6e366631 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.2+1 + + - **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) + - **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) + ## 0.3.2 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index a6fd761e..30fee90f 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). -version: 0.3.2 +version: 0.3.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart From 0091fc1d3986bbb1cdcf028afafa35b75f6d2a0b Mon Sep 17 00:00:00 2001 From: David Miguel Date: Tue, 21 May 2024 15:31:10 +0200 Subject: [PATCH 019/251] test: Fix ollama_dart tests --- packages/ollama_dart/example/ollama_dart_example.dart | 2 +- packages/ollama_dart/test/ollama_dart_models_test.dart | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index 15ef53d9..fab5f712 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -217,7 +217,7 @@ Future _pushModelStream(final OllamaClient client) async { Future _checkBlob(final OllamaClient client) async { await client.checkBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); } diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index f77a9d32..abb3cef3 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -149,14 +149,14 @@ void main() { test('Test check blob', skip: true, () async { await client.checkBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); }); test('Test create blob', skip: true, () async { await client.createBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', request: 'file contents', ); From a89f89a1fb45dd7c2f305a824d07b557855f5c72 Mon Sep 17 00:00:00 2001 From: Alfredo Bautista Date: Thu, 23 May 2024 10:21:01 +0200 Subject: [PATCH 020/251] feat: Implement anthropic_sdk_dart, a Dart client for Anthropic API (#433) Co-authored-by: David Miguel --- packages/anthropic_sdk_dart/CHANGELOG.md | 3 + packages/anthropic_sdk_dart/LICENSE | 21 + packages/anthropic_sdk_dart/README.md | 172 + .../anthropic_sdk_dart/analysis_options.yaml | 1 + packages/anthropic_sdk_dart/build.yaml | 13 + .../lib/anthropic_sdk_dart.dart | 7 + .../anthropic_sdk_dart/lib/src/client.dart | 101 + .../lib/src/extensions.dart | 13 + .../lib/src/generated/client.dart | 395 ++ .../lib/src/generated/schema/block.dart | 55 + .../schema/create_message_request.dart | 293 + .../create_message_request_metadata.dart | 44 + .../generated/schema/image_block_source.dart | 74 + .../lib/src/generated/schema/message.dart | 162 + .../src/generated/schema/message_delta.dart | 61 + .../generated/schema/message_delta_usage.dart | 51 + .../src/generated/schema/message_role.dart | 17 + .../schema/message_stream_event.dart | 124 + .../schema/message_stream_event_type.dart | 27 + .../lib/src/generated/schema/schema.dart | 25 + .../src/generated/schema/schema.freezed.dart | 5620 +++++++++++++++++ .../lib/src/generated/schema/schema.g.dart | 404 ++ .../lib/src/generated/schema/stop_reason.dart | 28 + .../generated/schema/text_block_delta.dart | 44 + .../lib/src/generated/schema/usage.dart | 54 + .../lib/src/http_client/http_client.dart | 4 + .../lib/src/http_client/http_client_html.dart | 18 + .../lib/src/http_client/http_client_io.dart | 12 + .../lib/src/http_client/http_client_stub.dart | 10 + .../oas/anthropic_openapi_curated.yaml | 562 ++ packages/anthropic_sdk_dart/oas/main.dart | 45 + packages/anthropic_sdk_dart/pubspec.lock | 627 ++ packages/anthropic_sdk_dart/pubspec.yaml | 34 + .../test/messages_test.dart | 150 + 34 files changed, 9271 insertions(+) create mode 100644 packages/anthropic_sdk_dart/CHANGELOG.md create mode 100644 packages/anthropic_sdk_dart/LICENSE create mode 100644 packages/anthropic_sdk_dart/README.md create mode 100644 packages/anthropic_sdk_dart/analysis_options.yaml create mode 100644 packages/anthropic_sdk_dart/build.yaml create mode 100644 packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/client.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/extensions.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/client.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart create mode 100644 packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml create mode 100644 packages/anthropic_sdk_dart/oas/main.dart create mode 100644 packages/anthropic_sdk_dart/pubspec.lock create mode 100644 packages/anthropic_sdk_dart/pubspec.yaml create mode 100644 packages/anthropic_sdk_dart/test/messages_test.dart diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/anthropic_sdk_dart/LICENSE b/packages/anthropic_sdk_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/anthropic_sdk_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md new file mode 100644 index 00000000..6ccb7d3a --- /dev/null +++ b/packages/anthropic_sdk_dart/README.md @@ -0,0 +1,172 @@ +# Anthropic Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (aka Claude API). + +## Features + +- Fully type-safe, [documented](https://pub.dev/documentation/anthropic_sdk_dart/latest) and tested +- All platforms supported (including streaming on web) +- Custom base URL, headers and query params support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) + +**Supported endpoints:** + +- Messages (with streaming support) + +## Table of contents + +- [Usage](#usage) + * [Authentication](#authentication) + * [Messages](#messages) +- [Advance Usage](#advance-usage) + * [Default HTTP client](#default-http-client) + * [Custom HTTP client](#custom-http-client) + * [Using a proxy](#using-a-proxy) + + [HTTP proxy](#http-proxy) + + [SOCKS5 proxy](#socks5-proxy) +- [Acknowledgements](#acknowledgements) +- [License](#license) + +## Usage + +Refer to the [documentation](https://docs.anthropic.com) for more information about the API. + +### Authentication + +The Anthropic API uses API keys for authentication. Visit the [Anthropic console](https://console.anthropic.com/settings/keys) to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; +final client = AnthropicClient(apiKey: apiKey); +``` + +### Messages + +Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + +**Create a Message:** + +```dart +final res = await client.createMessage( + request: CreateMessageRequest( + model: Model.model(Models.claude3Opus20240229), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: 'Hello, Claude', + ), + ], + ), +); +print(res.content.text); +// Hi there! How can I help you today? +``` + +`Model` is a sealed class that offers two ways to specify the model: +- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-3-haiku-20240307'`). +- `Model.model(Models.claude3Opus20240229)`: a value from `Models` enum which lists all the available models. + +Mind that this list may not be up-to-date. Refer to the [documentation](https://docs.anthropic.com/en/docs/models-overview) for the updated list. + +**Streaming messages:** + +```dart +final stream = await client.createMessageStream( + request: CreateMessageRequest( + model: Model.model(Models.claude3Opus20240229), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: 'Hello, Claude', + ), + ], + ), +); +String text = ''; +await for (final res in stream) { + res.map( + messageStart: (e) {}, + messageDelta: (e) {}, + messageStop: (e) {}, + contentBlockStart: (e) {}, + contentBlockDelta: (e) { + text += e.delta.text; + }, + contentBlockStop: (e) {}, + ping: (e) {}, + ); +} +print(text); +// Hi there! How can I help you today? +``` + +## Advance Usage + +### Default HTTP client + +By default, the client uses `https://api.anthropic.com/v1` as the `baseUrl` and the following implementations of `http.Client`: + +- Non-web: [`IOClient`](https://pub.dev/documentation/http/latest/io_client/IOClient-class.html) +- Web: [`FetchClient`](https://pub.dev/documentation/fetch_client/latest/fetch_client/FetchClient-class.html) (to support streaming on web) + +### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = AnthropicClient( + apiKey: 'MISTRAL_API_KEY', + client: MyHttpClient(), +); +``` + +### Using a proxy + +#### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = AnthropicClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +#### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = AnthropicClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. + +## License + +Anthropic Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/anthropic_sdk_dart/analysis_options.yaml b/packages/anthropic_sdk_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/anthropic_sdk_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/anthropic_sdk_dart/build.yaml b/packages/anthropic_sdk_dart/build.yaml new file mode 100644 index 00000000..dee719ac --- /dev/null +++ b/packages/anthropic_sdk_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + - unnecessary_null_checks + json_serializable: + options: + explicit_to_json: true diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart new file mode 100644 index 00000000..65378d70 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -0,0 +1,7 @@ +/// Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +library anthropic_sdk_dart; + +export 'src/client.dart'; +export 'src/extensions.dart'; +export 'src/generated/client.dart' show AnthropicClientException; +export 'src/generated/schema/schema.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/client.dart b/packages/anthropic_sdk_dart/lib/src/client.dart new file mode 100644 index 00000000..17c4e2a1 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/client.dart @@ -0,0 +1,101 @@ +// ignore_for_file: use_super_parameters +import 'dart:async'; +import 'dart:convert'; + +import 'package:http/http.dart' as http; + +import 'generated/client.dart' as g; +import 'generated/schema/schema.dart'; +import 'http_client/http_client.dart'; + +/// Client for Anthropic API. +/// +/// Please see https://docs.anthropic.com/en/api for more details. +class AnthropicClient extends g.AnthropicClient { + /// Create a new Anthropic API client. + /// + /// Main configuration options: + /// - `apiKey`: your Anthropic API key. You can find your API key in the + /// [Anthropic console](https://console.anthropic.com/settings/keys). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to `https://api.anthropic.com/v1`. + /// You can override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + AnthropicClient({ + final String? apiKey, + final String? baseUrl, + final Map? headers, + final Map? queryParams, + final http.Client? client, + }) : super( + apiKey: apiKey ?? '', + baseUrl: baseUrl, + headers: { + 'anthropic-version': '2023-06-01', + ...?headers, + }, + queryParams: queryParams ?? const {}, + client: client ?? createDefaultHttpClient(), + ); + + // ------------------------------------------ + // METHOD: createMessageStream + // ------------------------------------------ + + /// Create a Message + /// + /// Send a structured list of input messages with text and/or image content, and the + /// model will generate the next message in the conversation. + /// + /// The Messages API can be used for either single queries or stateless multi-turn + /// conversations. + /// + /// `request`: The request parameters for creating a message. + /// + /// `POST` `https://api.anthropic.com/v1/messages` + Stream createMessageStream({ + required final CreateMessageRequest request, + }) async* { + final r = await makeRequestStream( + baseUrl: 'https://api.anthropic.com/v1', + path: '/messages', + method: g.HttpMethod.post, + requestType: 'application/json', + responseType: 'application/json', + body: request.copyWith(stream: true), + headerParams: { + if (apiKey.isNotEmpty) 'x-api-key': apiKey, + }, + ); + yield* r.stream + .transform(const _AnthropicStreamTransformer()) // + .map( + (final d) => MessageStreamEvent.fromJson(json.decode(d)), + ); + } + + @override + Future onRequest(final http.BaseRequest request) { + return onRequestHandler(request); + } +} + +class _AnthropicStreamTransformer + extends StreamTransformerBase, String> { + const _AnthropicStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()) // + .where((final i) => i.startsWith('data: ')) + .map((final item) => item.substring(6)); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart new file mode 100644 index 00000000..749979e5 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -0,0 +1,13 @@ +import 'generated/schema/schema.dart'; + +/// Extension methods for [MessageContent]. +extension MessageContentX on MessageContent { + /// Returns the text content of the message. + String get text { + return map( + text: (text) => text.value, + blocks: (blocks) => + blocks.value.whereType().map((t) => t.text).join('\n'), + ); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/client.dart b/packages/anthropic_sdk_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..0f3e82a8 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/client.dart @@ -0,0 +1,395 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target, unused_import + +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; +import 'package:meta/meta.dart'; + +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: AnthropicClientException +// ========================================== + +/// HTTP exception handler for AnthropicClient +class AnthropicClientException implements Exception { + AnthropicClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + final String message; + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'AnthropicClientException($s)'; + } +} + +// ========================================== +// CLASS: AnthropicClient +// ========================================== + +/// Client for Anthropic API (v.1) +/// +/// API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. +class AnthropicClient { + /// Creates a new AnthropicClient instance. + /// + /// - [AnthropicClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [AnthropicClient.headers] Global headers to be sent with every request + /// - [AnthropicClient.queryParams] Global query parameters to be sent with every request + /// - [AnthropicClient.client] Override HTTP client to use for requests + AnthropicClient({ + this.apiKey = '', + this.baseUrl, + this.headers = const {}, + this.queryParams = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// Global query parameters to be sent with every request + final Map queryParams; + + /// HTTP client for requests + final http.Client client; + + /// Authentication related variables + final String apiKey; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _jsonDecode + // ------------------------------------------ + + dynamic _jsonDecode(http.Response r) { + return json.decode(utf8.decode(r.bodyBytes)); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + @protected + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Add global query parameters + queryParams = {...queryParams, ...this.queryParams}; + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + return await client.send(request); + } + + // ------------------------------------------ + // METHOD: makeRequestStream + // ------------------------------------------ + + /// Reusable request stream method + @protected + Future makeRequestStream({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.StreamedResponse response; + try { + response = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } + + // ------------------------------------------ + // METHOD: makeRequest + // ------------------------------------------ + + /// Reusable request method + @protected + Future makeRequest({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.Response response; + try { + final streamedResponse = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + response = await http.Response.fromStream(streamedResponse); + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: createMessage + // ------------------------------------------ + + /// Create a Message + /// + /// Send a structured list of input messages with text and/or image content, and the + /// model will generate the next message in the conversation. + /// + /// The Messages API can be used for either single queries or stateless multi-turn + /// conversations. + /// + /// `request`: The request parameters for creating a message. + /// + /// `POST` `https://api.anthropic.com/v1/messages` + Future createMessage({ + required CreateMessageRequest request, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.anthropic.com/v1', + path: '/messages', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + headerParams: { + if (apiKey.isNotEmpty) 'x-api-key': apiKey, + }, + ); + return Message.fromJson(_jsonDecode(r)); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart new file mode 100644 index 00000000..36fcbaae --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -0,0 +1,55 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Block +// ========================================== + +/// A block of content in a message. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class Block with _$Block { + const Block._(); + + // ------------------------------------------ + // UNION: TextBlock + // ------------------------------------------ + + /// A block of text content. + const factory Block.text({ + /// The text content. + required String text, + + /// The type of content block. + @Default('text') String type, + }) = TextBlock; + + // ------------------------------------------ + // UNION: ImageBlock + // ------------------------------------------ + + /// A block of image content. + const factory Block.image({ + /// The source of an image block. + required ImageBlockSource source, + + /// The type of content block. + @Default('image') String type, + }) = ImageBlock; + + /// Object construction from a JSON representation + factory Block.fromJson(Map json) => _$BlockFromJson(json); +} + +// ========================================== +// ENUM: BlockEnumType +// ========================================== + +enum BlockEnumType { + @JsonValue('text') + text, + @JsonValue('image') + image, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart new file mode 100644 index 00000000..2f06233e --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -0,0 +1,293 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: CreateMessageRequest +// ========================================== + +/// The request parameters for creating a message. +@freezed +class CreateMessageRequest with _$CreateMessageRequest { + const CreateMessageRequest._(); + + /// Factory constructor for CreateMessageRequest + const factory CreateMessageRequest({ + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() required Model model, + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + required List messages, + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') required int maxTokens, + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) String? system, + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) double? temperature, + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + @Default(false) bool stream, + }) = _CreateMessageRequest; + + /// Object construction from a JSON representation + factory CreateMessageRequest.fromJson(Map json) => + _$CreateMessageRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'messages', + 'max_tokens', + 'metadata', + 'stop_sequences', + 'system', + 'temperature', + 'top_k', + 'top_p', + 'stream' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'messages': messages, + 'max_tokens': maxTokens, + 'metadata': metadata, + 'stop_sequences': stopSequences, + 'system': system, + 'temperature': temperature, + 'top_k': topK, + 'top_p': topP, + 'stream': stream, + }; + } +} + +// ========================================== +// ENUM: Models +// ========================================== + +/// Available models. Mind that the list may not be exhaustive nor up-to-date. +enum Models { + @JsonValue('claude-3-opus-20240229') + claude3Opus20240229, + @JsonValue('claude-3-sonnet-20240229') + claude3Sonnet20240229, + @JsonValue('claude-3-haiku-20240307') + claude3Haiku20240307, + @JsonValue('claude-2.1') + claude21, + @JsonValue('claude-2.0') + claude20, + @JsonValue('claude-instant-1.2') + claudeInstant12, +} + +// ========================================== +// CLASS: Model +// ========================================== + +/// The model that will complete your prompt. +/// +/// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional +/// details and options. +@freezed +sealed class Model with _$Model { + const Model._(); + + /// Available models. Mind that the list may not be exhaustive nor up-to-date. + const factory Model.model( + Models value, + ) = ModelEnumeration; + + /// The ID of the model to use for this request. + const factory Model.modelId( + String value, + ) = ModelString; + + /// Object construction from a JSON representation + factory Model.fromJson(Map json) => _$ModelFromJson(json); +} + +/// Custom JSON converter for [Model] +class _ModelConverter implements JsonConverter { + const _ModelConverter(); + + @override + Model fromJson(Object? data) { + if (data is String && _$ModelsEnumMap.values.contains(data)) { + return ModelEnumeration( + _$ModelsEnumMap.keys.elementAt( + _$ModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return ModelString(data); + } + throw Exception( + 'Unexpected value for Model: $data', + ); + } + + @override + Object? toJson(Model data) { + return switch (data) { + ModelEnumeration(value: final v) => _$ModelsEnumMap[v]!, + ModelString(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart new file mode 100644 index 00000000..bf588756 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: CreateMessageRequestMetadata +// ========================================== + +/// An object describing metadata about the request. +@freezed +class CreateMessageRequestMetadata with _$CreateMessageRequestMetadata { + const CreateMessageRequestMetadata._(); + + /// Factory constructor for CreateMessageRequestMetadata + const factory CreateMessageRequestMetadata({ + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) String? userId, + }) = _CreateMessageRequestMetadata; + + /// Object construction from a JSON representation + factory CreateMessageRequestMetadata.fromJson(Map json) => + _$CreateMessageRequestMetadataFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['user_id']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'user_id': userId, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart new file mode 100644 index 00000000..e0a89687 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart @@ -0,0 +1,74 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: ImageBlockSource +// ========================================== + +/// The source of an image block. +@freezed +class ImageBlockSource with _$ImageBlockSource { + const ImageBlockSource._(); + + /// Factory constructor for ImageBlockSource + const factory ImageBlockSource({ + /// The base64-encoded image data. + required String data, + + /// The media type of the image. + @JsonKey(name: 'media_type') required ImageBlockSourceMediaType mediaType, + + /// The type of image source. + required ImageBlockSourceType type, + }) = _ImageBlockSource; + + /// Object construction from a JSON representation + factory ImageBlockSource.fromJson(Map json) => + _$ImageBlockSourceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['data', 'media_type', 'type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'media_type': mediaType, + 'type': type, + }; + } +} + +// ========================================== +// ENUM: ImageBlockSourceMediaType +// ========================================== + +/// The media type of the image. +enum ImageBlockSourceMediaType { + @JsonValue('image/jpeg') + imageJpeg, + @JsonValue('image/png') + imagePng, + @JsonValue('image/gif') + imageGif, + @JsonValue('image/webp') + imageWebp, +} + +// ========================================== +// ENUM: ImageBlockSourceType +// ========================================== + +/// The type of image source. +enum ImageBlockSourceType { + @JsonValue('base64') + base64, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart new file mode 100644 index 00000000..e8e0b298 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart @@ -0,0 +1,162 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Message +// ========================================== + +/// A message in a chat conversation. +@freezed +class Message with _$Message { + const Message._(); + + /// Factory constructor for Message + const factory Message({ + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) String? id, + + /// The content of the message. + @_MessageContentConverter() required MessageContent content, + + /// The role of the messages author. + required MessageRole role, + + /// The model that handled the request. + @JsonKey(includeIfNull: false) String? model, + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + StopReason? stopReason, + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) String? type, + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) Usage? usage, + }) = _Message; + + /// Object construction from a JSON representation + factory Message.fromJson(Map json) => + _$MessageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'content', + 'role', + 'model', + 'stop_reason', + 'stop_sequence', + 'type', + 'usage' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'content': content, + 'role': role, + 'model': model, + 'stop_reason': stopReason, + 'stop_sequence': stopSequence, + 'type': type, + 'usage': usage, + }; + } +} + +// ========================================== +// CLASS: MessageContent +// ========================================== + +/// The content of the message. +@freezed +sealed class MessageContent with _$MessageContent { + const MessageContent._(); + + /// An array of content blocks. + const factory MessageContent.blocks( + List value, + ) = MessageContentListBlock; + + /// A single text block. + const factory MessageContent.text( + String value, + ) = MessageContentString; + + /// Object construction from a JSON representation + factory MessageContent.fromJson(Map json) => + _$MessageContentFromJson(json); +} + +/// Custom JSON converter for [MessageContent] +class _MessageContentConverter + implements JsonConverter { + const _MessageContentConverter(); + + @override + MessageContent fromJson(Object? data) { + if (data is List && data.every((item) => item is Map)) { + return MessageContentListBlock(data + .map((i) => Block.fromJson(i as Map)) + .toList(growable: false)); + } + if (data is String) { + return MessageContentString(data); + } + throw Exception( + 'Unexpected value for MessageContent: $data', + ); + } + + @override + Object? toJson(MessageContent data) { + return switch (data) { + MessageContentListBlock(value: final v) => v, + MessageContentString(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart new file mode 100644 index 00000000..aa23db40 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart @@ -0,0 +1,61 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageDelta +// ========================================== + +/// A delta in a streaming message. +@freezed +class MessageDelta with _$MessageDelta { + const MessageDelta._(); + + /// Factory constructor for MessageDelta + const factory MessageDelta({ + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + StopReason? stopReason, + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, + }) = _MessageDelta; + + /// Object construction from a JSON representation + factory MessageDelta.fromJson(Map json) => + _$MessageDeltaFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['stop_reason', 'stop_sequence']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'stop_reason': stopReason, + 'stop_sequence': stopSequence, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart new file mode 100644 index 00000000..3ce710cc --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart @@ -0,0 +1,51 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageDeltaUsage +// ========================================== + +/// Billing and rate-limit usage. +/// +/// Anthropic's API bills and rate-limits by token counts, as tokens represent the +/// underlying cost to our systems. +/// +/// Under the hood, the API transforms requests into a format suitable for the +/// model. The model's output then goes through a parsing stage before becoming an +/// API response. As a result, the token counts in `usage` will not match one-to-one +/// with the exact visible content of an API request or response. +/// +/// For example, `output_tokens` will be non-zero, even for an empty string response +/// from Claude. +@freezed +class MessageDeltaUsage with _$MessageDeltaUsage { + const MessageDeltaUsage._(); + + /// Factory constructor for MessageDeltaUsage + const factory MessageDeltaUsage({ + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') required int outputTokens, + }) = _MessageDeltaUsage; + + /// Object construction from a JSON representation + factory MessageDeltaUsage.fromJson(Map json) => + _$MessageDeltaUsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['output_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'output_tokens': outputTokens, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart new file mode 100644 index 00000000..e502789a --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart @@ -0,0 +1,17 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: MessageRole +// ========================================== + +/// The role of the messages author. +enum MessageRole { + @JsonValue('user') + user, + @JsonValue('assistant') + assistant, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart new file mode 100644 index 00000000..73dac3c3 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart @@ -0,0 +1,124 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageStreamEvent +// ========================================== + +/// A event in a streaming conversation. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class MessageStreamEvent with _$MessageStreamEvent { + const MessageStreamEvent._(); + + // ------------------------------------------ + // UNION: MessageStartEvent + // ------------------------------------------ + + /// A start event in a streaming conversation. + const factory MessageStreamEvent.messageStart({ + /// A message in a chat conversation. + required Message message, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = MessageStartEvent; + + // ------------------------------------------ + // UNION: MessageDeltaEvent + // ------------------------------------------ + + /// A delta event in a streaming conversation. + const factory MessageStreamEvent.messageDelta({ + /// A delta in a streaming message. + required MessageDelta delta, + + /// The type of a streaming event. + required MessageStreamEventType type, + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + required MessageDeltaUsage usage, + }) = MessageDeltaEvent; + + // ------------------------------------------ + // UNION: MessageStopEvent + // ------------------------------------------ + + /// A stop event in a streaming conversation. + const factory MessageStreamEvent.messageStop({ + /// The type of a streaming event. + required MessageStreamEventType type, + }) = MessageStopEvent; + + // ------------------------------------------ + // UNION: ContentBlockStartEvent + // ------------------------------------------ + + /// A start event in a streaming content block. + const factory MessageStreamEvent.contentBlockStart({ + /// A block of text content. + @JsonKey(name: 'content_block') required TextBlock contentBlock, + + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockStartEvent; + + // ------------------------------------------ + // UNION: ContentBlockDeltaEvent + // ------------------------------------------ + + /// A delta event in a streaming content block. + const factory MessageStreamEvent.contentBlockDelta({ + /// A delta in a streaming text block. + required TextBlockDelta delta, + + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockDeltaEvent; + + // ------------------------------------------ + // UNION: ContentBlockStopEvent + // ------------------------------------------ + + /// A stop event in a streaming content block. + const factory MessageStreamEvent.contentBlockStop({ + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockStopEvent; + + // ------------------------------------------ + // UNION: PingEvent + // ------------------------------------------ + + /// A ping event in a streaming conversation. + const factory MessageStreamEvent.ping({ + /// The type of a streaming event. + required MessageStreamEventType type, + }) = PingEvent; + + /// Object construction from a JSON representation + factory MessageStreamEvent.fromJson(Map json) => + _$MessageStreamEventFromJson(json); +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart new file mode 100644 index 00000000..0e6aa425 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart @@ -0,0 +1,27 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: MessageStreamEventType +// ========================================== + +/// The type of a streaming event. +enum MessageStreamEventType { + @JsonValue('message_start') + messageStart, + @JsonValue('message_delta') + messageDelta, + @JsonValue('message_stop') + messageStop, + @JsonValue('content_block_start') + contentBlockStart, + @JsonValue('content_block_delta') + contentBlockDelta, + @JsonValue('content_block_stop') + contentBlockStop, + @JsonValue('ping') + ping, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..1953d0e4 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,25 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library anthropic_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'create_message_request.dart'; +part 'create_message_request_metadata.dart'; +part 'message.dart'; +part 'message_role.dart'; +part 'image_block_source.dart'; +part 'stop_reason.dart'; +part 'usage.dart'; +part 'message_stream_event_type.dart'; +part 'message_delta.dart'; +part 'message_delta_usage.dart'; +part 'text_block_delta.dart'; +part 'block.dart'; +part 'message_stream_event.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..a014e0e8 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,5620 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); + +CreateMessageRequest _$CreateMessageRequestFromJson(Map json) { + return _CreateMessageRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateMessageRequest { + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() + Model get model => throw _privateConstructorUsedError; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + List get messages => throw _privateConstructorUsedError; + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') + int get maxTokens => throw _privateConstructorUsedError; + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) + CreateMessageRequestMetadata? get metadata => + throw _privateConstructorUsedError; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences => throw _privateConstructorUsedError; + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) + String? get system => throw _privateConstructorUsedError; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) + int? get topK => throw _privateConstructorUsedError; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + bool get stream => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateMessageRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateMessageRequestCopyWith<$Res> { + factory $CreateMessageRequestCopyWith(CreateMessageRequest value, + $Res Function(CreateMessageRequest) then) = + _$CreateMessageRequestCopyWithImpl<$Res, CreateMessageRequest>; + @useResult + $Res call( + {@_ModelConverter() Model model, + List messages, + @JsonKey(name: 'max_tokens') int maxTokens, + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + @JsonKey(includeIfNull: false) String? system, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + bool stream}); + + $ModelCopyWith<$Res> get model; + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; +} + +/// @nodoc +class _$CreateMessageRequestCopyWithImpl<$Res, + $Val extends CreateMessageRequest> + implements $CreateMessageRequestCopyWith<$Res> { + _$CreateMessageRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? maxTokens = null, + Object? metadata = freezed, + Object? stopSequences = freezed, + Object? system = freezed, + Object? temperature = freezed, + Object? topK = freezed, + Object? topP = freezed, + Object? stream = null, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as Model, + messages: null == messages + ? _value.messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + maxTokens: null == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as CreateMessageRequestMetadata?, + stopSequences: freezed == stopSequences + ? _value.stopSequences + : stopSequences // ignore: cast_nullable_to_non_nullable + as List?, + system: freezed == system + ? _value.system + : system // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topK: freezed == topK + ? _value.topK + : topK // ignore: cast_nullable_to_non_nullable + as int?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModelCopyWith<$Res> get model { + return $ModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata { + if (_value.metadata == null) { + return null; + } + + return $CreateMessageRequestMetadataCopyWith<$Res>(_value.metadata!, + (value) { + return _then(_value.copyWith(metadata: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateMessageRequestImplCopyWith<$Res> + implements $CreateMessageRequestCopyWith<$Res> { + factory _$$CreateMessageRequestImplCopyWith(_$CreateMessageRequestImpl value, + $Res Function(_$CreateMessageRequestImpl) then) = + __$$CreateMessageRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_ModelConverter() Model model, + List messages, + @JsonKey(name: 'max_tokens') int maxTokens, + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + @JsonKey(includeIfNull: false) String? system, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + bool stream}); + + @override + $ModelCopyWith<$Res> get model; + @override + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; +} + +/// @nodoc +class __$$CreateMessageRequestImplCopyWithImpl<$Res> + extends _$CreateMessageRequestCopyWithImpl<$Res, _$CreateMessageRequestImpl> + implements _$$CreateMessageRequestImplCopyWith<$Res> { + __$$CreateMessageRequestImplCopyWithImpl(_$CreateMessageRequestImpl _value, + $Res Function(_$CreateMessageRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? maxTokens = null, + Object? metadata = freezed, + Object? stopSequences = freezed, + Object? system = freezed, + Object? temperature = freezed, + Object? topK = freezed, + Object? topP = freezed, + Object? stream = null, + }) { + return _then(_$CreateMessageRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as Model, + messages: null == messages + ? _value._messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + maxTokens: null == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as CreateMessageRequestMetadata?, + stopSequences: freezed == stopSequences + ? _value._stopSequences + : stopSequences // ignore: cast_nullable_to_non_nullable + as List?, + system: freezed == system + ? _value.system + : system // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topK: freezed == topK + ? _value.topK + : topK // ignore: cast_nullable_to_non_nullable + as int?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateMessageRequestImpl extends _CreateMessageRequest { + const _$CreateMessageRequestImpl( + {@_ModelConverter() required this.model, + required final List messages, + @JsonKey(name: 'max_tokens') required this.maxTokens, + @JsonKey(includeIfNull: false) this.metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + final List? stopSequences, + @JsonKey(includeIfNull: false) this.system, + @JsonKey(includeIfNull: false) this.temperature, + @JsonKey(name: 'top_k', includeIfNull: false) this.topK, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP, + this.stream = false}) + : _messages = messages, + _stopSequences = stopSequences, + super._(); + + factory _$CreateMessageRequestImpl.fromJson(Map json) => + _$$CreateMessageRequestImplFromJson(json); + + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @override + @_ModelConverter() + final Model model; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + final List _messages; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + @override + List get messages { + if (_messages is EqualUnmodifiableListView) return _messages; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_messages); + } + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @override + @JsonKey(name: 'max_tokens') + final int maxTokens; + + /// An object describing metadata about the request. + @override + @JsonKey(includeIfNull: false) + final CreateMessageRequestMetadata? metadata; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + final List? _stopSequences; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @override + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences { + final value = _stopSequences; + if (value == null) return null; + if (_stopSequences is EqualUnmodifiableListView) return _stopSequences; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @override + @JsonKey(includeIfNull: false) + final String? system; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @override + @JsonKey(name: 'top_k', includeIfNull: false) + final int? topK; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + @override + @JsonKey() + final bool stream; + + @override + String toString() { + return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, topK: $topK, topP: $topP, stream: $stream)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateMessageRequestImpl && + (identical(other.model, model) || other.model == model) && + const DeepCollectionEquality().equals(other._messages, _messages) && + (identical(other.maxTokens, maxTokens) || + other.maxTokens == maxTokens) && + (identical(other.metadata, metadata) || + other.metadata == metadata) && + const DeepCollectionEquality() + .equals(other._stopSequences, _stopSequences) && + (identical(other.system, system) || other.system == system) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.topK, topK) || other.topK == topK) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.stream, stream) || other.stream == stream)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + model, + const DeepCollectionEquality().hash(_messages), + maxTokens, + metadata, + const DeepCollectionEquality().hash(_stopSequences), + system, + temperature, + topK, + topP, + stream); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> + get copyWith => + __$$CreateMessageRequestImplCopyWithImpl<_$CreateMessageRequestImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateMessageRequestImplToJson( + this, + ); + } +} + +abstract class _CreateMessageRequest extends CreateMessageRequest { + const factory _CreateMessageRequest( + {@_ModelConverter() required final Model model, + required final List messages, + @JsonKey(name: 'max_tokens') required final int maxTokens, + @JsonKey(includeIfNull: false) + final CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + final List? stopSequences, + @JsonKey(includeIfNull: false) final String? system, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + final bool stream}) = _$CreateMessageRequestImpl; + const _CreateMessageRequest._() : super._(); + + factory _CreateMessageRequest.fromJson(Map json) = + _$CreateMessageRequestImpl.fromJson; + + @override + + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() + Model get model; + @override + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + List get messages; + @override + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') + int get maxTokens; + @override + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) + CreateMessageRequestMetadata? get metadata; + @override + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences; + @override + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) + String? get system; + @override + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) + int? get topK; + @override + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + bool get stream; + @override + @JsonKey(ignore: true) + _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Model _$ModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'model': + return ModelEnumeration.fromJson(json); + case 'modelId': + return ModelString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'Model', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$Model { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ModelEnumeration value) model, + required TResult Function(ModelString value) modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelEnumeration value)? model, + TResult? Function(ModelString value)? modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelEnumeration value)? model, + TResult Function(ModelString value)? modelId, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelCopyWith<$Res> { + factory $ModelCopyWith(Model value, $Res Function(Model) then) = + _$ModelCopyWithImpl<$Res, Model>; +} + +/// @nodoc +class _$ModelCopyWithImpl<$Res, $Val extends Model> + implements $ModelCopyWith<$Res> { + _$ModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ModelEnumerationImplCopyWith<$Res> { + factory _$$ModelEnumerationImplCopyWith(_$ModelEnumerationImpl value, + $Res Function(_$ModelEnumerationImpl) then) = + __$$ModelEnumerationImplCopyWithImpl<$Res>; + @useResult + $Res call({Models value}); +} + +/// @nodoc +class __$$ModelEnumerationImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelEnumerationImpl> + implements _$$ModelEnumerationImplCopyWith<$Res> { + __$$ModelEnumerationImplCopyWithImpl(_$ModelEnumerationImpl _value, + $Res Function(_$ModelEnumerationImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ModelEnumerationImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as Models, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelEnumerationImpl extends ModelEnumeration { + const _$ModelEnumerationImpl(this.value, {final String? $type}) + : $type = $type ?? 'model', + super._(); + + factory _$ModelEnumerationImpl.fromJson(Map json) => + _$$ModelEnumerationImplFromJson(json); + + @override + final Models value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'Model.model(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelEnumerationImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => + __$$ModelEnumerationImplCopyWithImpl<_$ModelEnumerationImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) { + return model(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) { + return model?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) { + if (model != null) { + return model(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ModelEnumeration value) model, + required TResult Function(ModelString value) modelId, + }) { + return model(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelEnumeration value)? model, + TResult? Function(ModelString value)? modelId, + }) { + return model?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelEnumeration value)? model, + TResult Function(ModelString value)? modelId, + required TResult orElse(), + }) { + if (model != null) { + return model(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModelEnumerationImplToJson( + this, + ); + } +} + +abstract class ModelEnumeration extends Model { + const factory ModelEnumeration(final Models value) = _$ModelEnumerationImpl; + const ModelEnumeration._() : super._(); + + factory ModelEnumeration.fromJson(Map json) = + _$ModelEnumerationImpl.fromJson; + + @override + Models get value; + @JsonKey(ignore: true) + _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ModelStringImplCopyWith<$Res> { + factory _$$ModelStringImplCopyWith( + _$ModelStringImpl value, $Res Function(_$ModelStringImpl) then) = + __$$ModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ModelStringImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelStringImpl> + implements _$$ModelStringImplCopyWith<$Res> { + __$$ModelStringImplCopyWithImpl( + _$ModelStringImpl _value, $Res Function(_$ModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelStringImpl extends ModelString { + const _$ModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'modelId', + super._(); + + factory _$ModelStringImpl.fromJson(Map json) => + _$$ModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'Model.modelId(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => + __$$ModelStringImplCopyWithImpl<_$ModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) { + return modelId(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) { + return modelId?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) { + if (modelId != null) { + return modelId(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ModelEnumeration value) model, + required TResult Function(ModelString value) modelId, + }) { + return modelId(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelEnumeration value)? model, + TResult? Function(ModelString value)? modelId, + }) { + return modelId?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelEnumeration value)? model, + TResult Function(ModelString value)? modelId, + required TResult orElse(), + }) { + if (modelId != null) { + return modelId(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModelStringImplToJson( + this, + ); + } +} + +abstract class ModelString extends Model { + const factory ModelString(final String value) = _$ModelStringImpl; + const ModelString._() : super._(); + + factory ModelString.fromJson(Map json) = + _$ModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateMessageRequestMetadata _$CreateMessageRequestMetadataFromJson( + Map json) { + return _CreateMessageRequestMetadata.fromJson(json); +} + +/// @nodoc +mixin _$CreateMessageRequestMetadata { + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) + String? get userId => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateMessageRequestMetadataCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateMessageRequestMetadataCopyWith<$Res> { + factory $CreateMessageRequestMetadataCopyWith( + CreateMessageRequestMetadata value, + $Res Function(CreateMessageRequestMetadata) then) = + _$CreateMessageRequestMetadataCopyWithImpl<$Res, + CreateMessageRequestMetadata>; + @useResult + $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); +} + +/// @nodoc +class _$CreateMessageRequestMetadataCopyWithImpl<$Res, + $Val extends CreateMessageRequestMetadata> + implements $CreateMessageRequestMetadataCopyWith<$Res> { + _$CreateMessageRequestMetadataCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? userId = freezed, + }) { + return _then(_value.copyWith( + userId: freezed == userId + ? _value.userId + : userId // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateMessageRequestMetadataImplCopyWith<$Res> + implements $CreateMessageRequestMetadataCopyWith<$Res> { + factory _$$CreateMessageRequestMetadataImplCopyWith( + _$CreateMessageRequestMetadataImpl value, + $Res Function(_$CreateMessageRequestMetadataImpl) then) = + __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); +} + +/// @nodoc +class __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res> + extends _$CreateMessageRequestMetadataCopyWithImpl<$Res, + _$CreateMessageRequestMetadataImpl> + implements _$$CreateMessageRequestMetadataImplCopyWith<$Res> { + __$$CreateMessageRequestMetadataImplCopyWithImpl( + _$CreateMessageRequestMetadataImpl _value, + $Res Function(_$CreateMessageRequestMetadataImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? userId = freezed, + }) { + return _then(_$CreateMessageRequestMetadataImpl( + userId: freezed == userId + ? _value.userId + : userId // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateMessageRequestMetadataImpl extends _CreateMessageRequestMetadata { + const _$CreateMessageRequestMetadataImpl( + {@JsonKey(name: 'user_id', includeIfNull: false) this.userId}) + : super._(); + + factory _$CreateMessageRequestMetadataImpl.fromJson( + Map json) => + _$$CreateMessageRequestMetadataImplFromJson(json); + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @override + @JsonKey(name: 'user_id', includeIfNull: false) + final String? userId; + + @override + String toString() { + return 'CreateMessageRequestMetadata(userId: $userId)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateMessageRequestMetadataImpl && + (identical(other.userId, userId) || other.userId == userId)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, userId); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateMessageRequestMetadataImplCopyWith< + _$CreateMessageRequestMetadataImpl> + get copyWith => __$$CreateMessageRequestMetadataImplCopyWithImpl< + _$CreateMessageRequestMetadataImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateMessageRequestMetadataImplToJson( + this, + ); + } +} + +abstract class _CreateMessageRequestMetadata + extends CreateMessageRequestMetadata { + const factory _CreateMessageRequestMetadata( + {@JsonKey(name: 'user_id', includeIfNull: false) + final String? userId}) = _$CreateMessageRequestMetadataImpl; + const _CreateMessageRequestMetadata._() : super._(); + + factory _CreateMessageRequestMetadata.fromJson(Map json) = + _$CreateMessageRequestMetadataImpl.fromJson; + + @override + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) + String? get userId; + @override + @JsonKey(ignore: true) + _$$CreateMessageRequestMetadataImplCopyWith< + _$CreateMessageRequestMetadataImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Message _$MessageFromJson(Map json) { + return _Message.fromJson(json); +} + +/// @nodoc +mixin _$Message { + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) + String? get id => throw _privateConstructorUsedError; + + /// The content of the message. + @_MessageContentConverter() + MessageContent get content => throw _privateConstructorUsedError; + + /// The role of the messages author. + MessageRole get role => throw _privateConstructorUsedError; + + /// The model that handled the request. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason => throw _privateConstructorUsedError; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence => throw _privateConstructorUsedError; + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) + String? get type => throw _privateConstructorUsedError; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) + Usage? get usage => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageCopyWith<$Res> { + factory $MessageCopyWith(Message value, $Res Function(Message) then) = + _$MessageCopyWithImpl<$Res, Message>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? id, + @_MessageContentConverter() MessageContent content, + MessageRole role, + @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(includeIfNull: false) Usage? usage}); + + $MessageContentCopyWith<$Res> get content; + $UsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class _$MessageCopyWithImpl<$Res, $Val extends Message> + implements $MessageCopyWith<$Res> { + _$MessageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = freezed, + Object? content = null, + Object? role = null, + Object? model = freezed, + Object? stopReason = freezed, + Object? stopSequence = freezed, + Object? type = freezed, + Object? usage = freezed, + }) { + return _then(_value.copyWith( + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as MessageContent, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as MessageRole, + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as Usage?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $MessageContentCopyWith<$Res> get content { + return $MessageContentCopyWith<$Res>(_value.content, (value) { + return _then(_value.copyWith(content: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $UsageCopyWith<$Res>? get usage { + if (_value.usage == null) { + return null; + } + + return $UsageCopyWith<$Res>(_value.usage!, (value) { + return _then(_value.copyWith(usage: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { + factory _$$MessageImplCopyWith( + _$MessageImpl value, $Res Function(_$MessageImpl) then) = + __$$MessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? id, + @_MessageContentConverter() MessageContent content, + MessageRole role, + @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(includeIfNull: false) Usage? usage}); + + @override + $MessageContentCopyWith<$Res> get content; + @override + $UsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class __$$MessageImplCopyWithImpl<$Res> + extends _$MessageCopyWithImpl<$Res, _$MessageImpl> + implements _$$MessageImplCopyWith<$Res> { + __$$MessageImplCopyWithImpl( + _$MessageImpl _value, $Res Function(_$MessageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = freezed, + Object? content = null, + Object? role = null, + Object? model = freezed, + Object? stopReason = freezed, + Object? stopSequence = freezed, + Object? type = freezed, + Object? usage = freezed, + }) { + return _then(_$MessageImpl( + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as MessageContent, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as MessageRole, + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as Usage?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageImpl extends _Message { + const _$MessageImpl( + {@JsonKey(includeIfNull: false) this.id, + @_MessageContentConverter() required this.content, + required this.role, + @JsonKey(includeIfNull: false) this.model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence, + @JsonKey(includeIfNull: false) this.type, + @JsonKey(includeIfNull: false) this.usage}) + : super._(); + + factory _$MessageImpl.fromJson(Map json) => + _$$MessageImplFromJson(json); + + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @override + @JsonKey(includeIfNull: false) + final String? id; + + /// The content of the message. + @override + @_MessageContentConverter() + final MessageContent content; + + /// The role of the messages author. + @override + final MessageRole role; + + /// The model that handled the request. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @override + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @override + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence; + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @override + @JsonKey(includeIfNull: false) + final String? type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + @JsonKey(includeIfNull: false) + final Usage? usage; + + @override + String toString() { + return 'Message(id: $id, content: $content, role: $role, model: $model, stopReason: $stopReason, stopSequence: $stopSequence, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.content, content) || other.content == content) && + (identical(other.role, role) || other.role == role) && + (identical(other.model, model) || other.model == model) && + (identical(other.stopReason, stopReason) || + other.stopReason == stopReason) && + (identical(other.stopSequence, stopSequence) || + other.stopSequence == stopSequence) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, content, role, model, + stopReason, stopSequence, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageImplCopyWith<_$MessageImpl> get copyWith => + __$$MessageImplCopyWithImpl<_$MessageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageImplToJson( + this, + ); + } +} + +abstract class _Message extends Message { + const factory _Message( + {@JsonKey(includeIfNull: false) final String? id, + @_MessageContentConverter() required final MessageContent content, + required final MessageRole role, + @JsonKey(includeIfNull: false) final String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence, + @JsonKey(includeIfNull: false) final String? type, + @JsonKey(includeIfNull: false) final Usage? usage}) = _$MessageImpl; + const _Message._() : super._(); + + factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; + + @override + + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) + String? get id; + @override + + /// The content of the message. + @_MessageContentConverter() + MessageContent get content; + @override + + /// The role of the messages author. + MessageRole get role; + @override + + /// The model that handled the request. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason; + @override + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence; + @override + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) + String? get type; + @override + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) + Usage? get usage; + @override + @JsonKey(ignore: true) + _$$MessageImplCopyWith<_$MessageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageContent _$MessageContentFromJson(Map json) { + switch (json['runtimeType']) { + case 'blocks': + return MessageContentListBlock.fromJson(json); + case 'text': + return MessageContentString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'MessageContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$MessageContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentListBlock value) blocks, + required TResult Function(MessageContentString value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentListBlock value)? blocks, + TResult? Function(MessageContentString value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentListBlock value)? blocks, + TResult Function(MessageContentString value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageContentCopyWith<$Res> { + factory $MessageContentCopyWith( + MessageContent value, $Res Function(MessageContent) then) = + _$MessageContentCopyWithImpl<$Res, MessageContent>; +} + +/// @nodoc +class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> + implements $MessageContentCopyWith<$Res> { + _$MessageContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$MessageContentListBlockImplCopyWith<$Res> { + factory _$$MessageContentListBlockImplCopyWith( + _$MessageContentListBlockImpl value, + $Res Function(_$MessageContentListBlockImpl) then) = + __$$MessageContentListBlockImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$MessageContentListBlockImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentListBlockImpl> + implements _$$MessageContentListBlockImplCopyWith<$Res> { + __$$MessageContentListBlockImplCopyWithImpl( + _$MessageContentListBlockImpl _value, + $Res Function(_$MessageContentListBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$MessageContentListBlockImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentListBlockImpl extends MessageContentListBlock { + const _$MessageContentListBlockImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'blocks', + super._(); + + factory _$MessageContentListBlockImpl.fromJson(Map json) => + _$$MessageContentListBlockImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'MessageContent.blocks(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentListBlockImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> + get copyWith => __$$MessageContentListBlockImplCopyWithImpl< + _$MessageContentListBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return blocks(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return blocks?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentListBlock value) blocks, + required TResult Function(MessageContentString value) text, + }) { + return blocks(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentListBlock value)? blocks, + TResult? Function(MessageContentString value)? text, + }) { + return blocks?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentListBlock value)? blocks, + TResult Function(MessageContentString value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentListBlockImplToJson( + this, + ); + } +} + +abstract class MessageContentListBlock extends MessageContent { + const factory MessageContentListBlock(final List value) = + _$MessageContentListBlockImpl; + const MessageContentListBlock._() : super._(); + + factory MessageContentListBlock.fromJson(Map json) = + _$MessageContentListBlockImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageContentStringImplCopyWith<$Res> { + factory _$$MessageContentStringImplCopyWith(_$MessageContentStringImpl value, + $Res Function(_$MessageContentStringImpl) then) = + __$$MessageContentStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$MessageContentStringImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentStringImpl> + implements _$$MessageContentStringImplCopyWith<$Res> { + __$$MessageContentStringImplCopyWithImpl(_$MessageContentStringImpl _value, + $Res Function(_$MessageContentStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$MessageContentStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentStringImpl extends MessageContentString { + const _$MessageContentStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'text', + super._(); + + factory _$MessageContentStringImpl.fromJson(Map json) => + _$$MessageContentStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'MessageContent.text(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> + get copyWith => + __$$MessageContentStringImplCopyWithImpl<_$MessageContentStringImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return text(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return text?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentListBlock value) blocks, + required TResult Function(MessageContentString value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentListBlock value)? blocks, + TResult? Function(MessageContentString value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentListBlock value)? blocks, + TResult Function(MessageContentString value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentStringImplToJson( + this, + ); + } +} + +abstract class MessageContentString extends MessageContent { + const factory MessageContentString(final String value) = + _$MessageContentStringImpl; + const MessageContentString._() : super._(); + + factory MessageContentString.fromJson(Map json) = + _$MessageContentStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ImageBlockSource _$ImageBlockSourceFromJson(Map json) { + return _ImageBlockSource.fromJson(json); +} + +/// @nodoc +mixin _$ImageBlockSource { + /// The base64-encoded image data. + String get data => throw _privateConstructorUsedError; + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + + /// The type of image source. + ImageBlockSourceType get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ImageBlockSourceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ImageBlockSourceCopyWith<$Res> { + factory $ImageBlockSourceCopyWith( + ImageBlockSource value, $Res Function(ImageBlockSource) then) = + _$ImageBlockSourceCopyWithImpl<$Res, ImageBlockSource>; + @useResult + $Res call( + {String data, + @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, + ImageBlockSourceType type}); +} + +/// @nodoc +class _$ImageBlockSourceCopyWithImpl<$Res, $Val extends ImageBlockSource> + implements $ImageBlockSourceCopyWith<$Res> { + _$ImageBlockSourceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? mediaType = null, + Object? type = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as String, + mediaType: null == mediaType + ? _value.mediaType + : mediaType // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceMediaType, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ImageBlockSourceImplCopyWith<$Res> + implements $ImageBlockSourceCopyWith<$Res> { + factory _$$ImageBlockSourceImplCopyWith(_$ImageBlockSourceImpl value, + $Res Function(_$ImageBlockSourceImpl) then) = + __$$ImageBlockSourceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String data, + @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, + ImageBlockSourceType type}); +} + +/// @nodoc +class __$$ImageBlockSourceImplCopyWithImpl<$Res> + extends _$ImageBlockSourceCopyWithImpl<$Res, _$ImageBlockSourceImpl> + implements _$$ImageBlockSourceImplCopyWith<$Res> { + __$$ImageBlockSourceImplCopyWithImpl(_$ImageBlockSourceImpl _value, + $Res Function(_$ImageBlockSourceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? mediaType = null, + Object? type = null, + }) { + return _then(_$ImageBlockSourceImpl( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as String, + mediaType: null == mediaType + ? _value.mediaType + : mediaType // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceMediaType, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageBlockSourceImpl extends _ImageBlockSource { + const _$ImageBlockSourceImpl( + {required this.data, + @JsonKey(name: 'media_type') required this.mediaType, + required this.type}) + : super._(); + + factory _$ImageBlockSourceImpl.fromJson(Map json) => + _$$ImageBlockSourceImplFromJson(json); + + /// The base64-encoded image data. + @override + final String data; + + /// The media type of the image. + @override + @JsonKey(name: 'media_type') + final ImageBlockSourceMediaType mediaType; + + /// The type of image source. + @override + final ImageBlockSourceType type; + + @override + String toString() { + return 'ImageBlockSource(data: $data, mediaType: $mediaType, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageBlockSourceImpl && + (identical(other.data, data) || other.data == data) && + (identical(other.mediaType, mediaType) || + other.mediaType == mediaType) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, data, mediaType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => + __$$ImageBlockSourceImplCopyWithImpl<_$ImageBlockSourceImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ImageBlockSourceImplToJson( + this, + ); + } +} + +abstract class _ImageBlockSource extends ImageBlockSource { + const factory _ImageBlockSource( + {required final String data, + @JsonKey(name: 'media_type') + required final ImageBlockSourceMediaType mediaType, + required final ImageBlockSourceType type}) = _$ImageBlockSourceImpl; + const _ImageBlockSource._() : super._(); + + factory _ImageBlockSource.fromJson(Map json) = + _$ImageBlockSourceImpl.fromJson; + + @override + + /// The base64-encoded image data. + String get data; + @override + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType; + @override + + /// The type of image source. + ImageBlockSourceType get type; + @override + @JsonKey(ignore: true) + _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Usage _$UsageFromJson(Map json) { + return _Usage.fromJson(json); +} + +/// @nodoc +mixin _$Usage { + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') + int get inputTokens => throw _privateConstructorUsedError; + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $UsageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $UsageCopyWith<$Res> { + factory $UsageCopyWith(Usage value, $Res Function(Usage) then) = + _$UsageCopyWithImpl<$Res, Usage>; + @useResult + $Res call( + {@JsonKey(name: 'input_tokens') int inputTokens, + @JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class _$UsageCopyWithImpl<$Res, $Val extends Usage> + implements $UsageCopyWith<$Res> { + _$UsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputTokens = null, + Object? outputTokens = null, + }) { + return _then(_value.copyWith( + inputTokens: null == inputTokens + ? _value.inputTokens + : inputTokens // ignore: cast_nullable_to_non_nullable + as int, + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$UsageImplCopyWith<$Res> implements $UsageCopyWith<$Res> { + factory _$$UsageImplCopyWith( + _$UsageImpl value, $Res Function(_$UsageImpl) then) = + __$$UsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'input_tokens') int inputTokens, + @JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class __$$UsageImplCopyWithImpl<$Res> + extends _$UsageCopyWithImpl<$Res, _$UsageImpl> + implements _$$UsageImplCopyWith<$Res> { + __$$UsageImplCopyWithImpl( + _$UsageImpl _value, $Res Function(_$UsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputTokens = null, + Object? outputTokens = null, + }) { + return _then(_$UsageImpl( + inputTokens: null == inputTokens + ? _value.inputTokens + : inputTokens // ignore: cast_nullable_to_non_nullable + as int, + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UsageImpl extends _Usage { + const _$UsageImpl( + {@JsonKey(name: 'input_tokens') required this.inputTokens, + @JsonKey(name: 'output_tokens') required this.outputTokens}) + : super._(); + + factory _$UsageImpl.fromJson(Map json) => + _$$UsageImplFromJson(json); + + /// The number of input tokens which were used. + @override + @JsonKey(name: 'input_tokens') + final int inputTokens; + + /// The number of output tokens which were used. + @override + @JsonKey(name: 'output_tokens') + final int outputTokens; + + @override + String toString() { + return 'Usage(inputTokens: $inputTokens, outputTokens: $outputTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UsageImpl && + (identical(other.inputTokens, inputTokens) || + other.inputTokens == inputTokens) && + (identical(other.outputTokens, outputTokens) || + other.outputTokens == outputTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, inputTokens, outputTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UsageImplCopyWith<_$UsageImpl> get copyWith => + __$$UsageImplCopyWithImpl<_$UsageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$UsageImplToJson( + this, + ); + } +} + +abstract class _Usage extends Usage { + const factory _Usage( + {@JsonKey(name: 'input_tokens') required final int inputTokens, + @JsonKey(name: 'output_tokens') required final int outputTokens}) = + _$UsageImpl; + const _Usage._() : super._(); + + factory _Usage.fromJson(Map json) = _$UsageImpl.fromJson; + + @override + + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') + int get inputTokens; + @override + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens; + @override + @JsonKey(ignore: true) + _$$UsageImplCopyWith<_$UsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageDelta _$MessageDeltaFromJson(Map json) { + return _MessageDelta.fromJson(json); +} + +/// @nodoc +mixin _$MessageDelta { + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason => throw _privateConstructorUsedError; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaCopyWith<$Res> { + factory $MessageDeltaCopyWith( + MessageDelta value, $Res Function(MessageDelta) then) = + _$MessageDeltaCopyWithImpl<$Res, MessageDelta>; + @useResult + $Res call( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence}); +} + +/// @nodoc +class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> + implements $MessageDeltaCopyWith<$Res> { + _$MessageDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? stopReason = freezed, + Object? stopSequence = freezed, + }) { + return _then(_value.copyWith( + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaImplCopyWith<$Res> + implements $MessageDeltaCopyWith<$Res> { + factory _$$MessageDeltaImplCopyWith( + _$MessageDeltaImpl value, $Res Function(_$MessageDeltaImpl) then) = + __$$MessageDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence}); +} + +/// @nodoc +class __$$MessageDeltaImplCopyWithImpl<$Res> + extends _$MessageDeltaCopyWithImpl<$Res, _$MessageDeltaImpl> + implements _$$MessageDeltaImplCopyWith<$Res> { + __$$MessageDeltaImplCopyWithImpl( + _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? stopReason = freezed, + Object? stopSequence = freezed, + }) { + return _then(_$MessageDeltaImpl( + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaImpl extends _MessageDelta { + const _$MessageDeltaImpl( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence}) + : super._(); + + factory _$MessageDeltaImpl.fromJson(Map json) => + _$$MessageDeltaImplFromJson(json); + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @override + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @override + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence; + + @override + String toString() { + return 'MessageDelta(stopReason: $stopReason, stopSequence: $stopSequence)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaImpl && + (identical(other.stopReason, stopReason) || + other.stopReason == stopReason) && + (identical(other.stopSequence, stopSequence) || + other.stopSequence == stopSequence)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, stopReason, stopSequence); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => + __$$MessageDeltaImplCopyWithImpl<_$MessageDeltaImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaImplToJson( + this, + ); + } +} + +abstract class _MessageDelta extends MessageDelta { + const factory _MessageDelta( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence}) = _$MessageDeltaImpl; + const _MessageDelta._() : super._(); + + factory _MessageDelta.fromJson(Map json) = + _$MessageDeltaImpl.fromJson; + + @override + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason; + @override + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence; + @override + @JsonKey(ignore: true) + _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageDeltaUsage _$MessageDeltaUsageFromJson(Map json) { + return _MessageDeltaUsage.fromJson(json); +} + +/// @nodoc +mixin _$MessageDeltaUsage { + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaUsageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaUsageCopyWith<$Res> { + factory $MessageDeltaUsageCopyWith( + MessageDeltaUsage value, $Res Function(MessageDeltaUsage) then) = + _$MessageDeltaUsageCopyWithImpl<$Res, MessageDeltaUsage>; + @useResult + $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class _$MessageDeltaUsageCopyWithImpl<$Res, $Val extends MessageDeltaUsage> + implements $MessageDeltaUsageCopyWith<$Res> { + _$MessageDeltaUsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? outputTokens = null, + }) { + return _then(_value.copyWith( + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaUsageImplCopyWith<$Res> + implements $MessageDeltaUsageCopyWith<$Res> { + factory _$$MessageDeltaUsageImplCopyWith(_$MessageDeltaUsageImpl value, + $Res Function(_$MessageDeltaUsageImpl) then) = + __$$MessageDeltaUsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class __$$MessageDeltaUsageImplCopyWithImpl<$Res> + extends _$MessageDeltaUsageCopyWithImpl<$Res, _$MessageDeltaUsageImpl> + implements _$$MessageDeltaUsageImplCopyWith<$Res> { + __$$MessageDeltaUsageImplCopyWithImpl(_$MessageDeltaUsageImpl _value, + $Res Function(_$MessageDeltaUsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? outputTokens = null, + }) { + return _then(_$MessageDeltaUsageImpl( + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaUsageImpl extends _MessageDeltaUsage { + const _$MessageDeltaUsageImpl( + {@JsonKey(name: 'output_tokens') required this.outputTokens}) + : super._(); + + factory _$MessageDeltaUsageImpl.fromJson(Map json) => + _$$MessageDeltaUsageImplFromJson(json); + + /// The cumulative number of output tokens which were used. + @override + @JsonKey(name: 'output_tokens') + final int outputTokens; + + @override + String toString() { + return 'MessageDeltaUsage(outputTokens: $outputTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaUsageImpl && + (identical(other.outputTokens, outputTokens) || + other.outputTokens == outputTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, outputTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => + __$$MessageDeltaUsageImplCopyWithImpl<_$MessageDeltaUsageImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaUsageImplToJson( + this, + ); + } +} + +abstract class _MessageDeltaUsage extends MessageDeltaUsage { + const factory _MessageDeltaUsage( + {@JsonKey(name: 'output_tokens') required final int outputTokens}) = + _$MessageDeltaUsageImpl; + const _MessageDeltaUsage._() : super._(); + + factory _MessageDeltaUsage.fromJson(Map json) = + _$MessageDeltaUsageImpl.fromJson; + + @override + + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens; + @override + @JsonKey(ignore: true) + _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +TextBlockDelta _$TextBlockDeltaFromJson(Map json) { + return _TextBlockDelta.fromJson(json); +} + +/// @nodoc +mixin _$TextBlockDelta { + /// The text delta. + String get text => throw _privateConstructorUsedError; + + /// The type of content block. + String get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $TextBlockDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $TextBlockDeltaCopyWith<$Res> { + factory $TextBlockDeltaCopyWith( + TextBlockDelta value, $Res Function(TextBlockDelta) then) = + _$TextBlockDeltaCopyWithImpl<$Res, TextBlockDelta>; + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> + implements $TextBlockDeltaCopyWith<$Res> { + _$TextBlockDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_value.copyWith( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockDeltaImplCopyWith<$Res> + implements $TextBlockDeltaCopyWith<$Res> { + factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, + $Res Function(_$TextBlockDeltaImpl) then) = + __$$TextBlockDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class __$$TextBlockDeltaImplCopyWithImpl<$Res> + extends _$TextBlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> + implements _$$TextBlockDeltaImplCopyWith<$Res> { + __$$TextBlockDeltaImplCopyWithImpl( + _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_$TextBlockDeltaImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$TextBlockDeltaImpl extends _TextBlockDelta { + const _$TextBlockDeltaImpl({required this.text, required this.type}) + : super._(); + + factory _$TextBlockDeltaImpl.fromJson(Map json) => + _$$TextBlockDeltaImplFromJson(json); + + /// The text delta. + @override + final String text; + + /// The type of content block. + @override + final String type; + + @override + String toString() { + return 'TextBlockDelta(text: $text, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$TextBlockDeltaImpl && + (identical(other.text, text) || other.text == text) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, text, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$TextBlockDeltaImplToJson( + this, + ); + } +} + +abstract class _TextBlockDelta extends TextBlockDelta { + const factory _TextBlockDelta( + {required final String text, + required final String type}) = _$TextBlockDeltaImpl; + const _TextBlockDelta._() : super._(); + + factory _TextBlockDelta.fromJson(Map json) = + _$TextBlockDeltaImpl.fromJson; + + @override + + /// The text delta. + String get text; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Block _$BlockFromJson(Map json) { + switch (json['type']) { + case 'text': + return TextBlock.fromJson(json); + case 'image': + return ImageBlock.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$Block { + /// The type of content block. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BlockCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BlockCopyWith<$Res> { + factory $BlockCopyWith(Block value, $Res Function(Block) then) = + _$BlockCopyWithImpl<$Res, Block>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$BlockCopyWithImpl<$Res, $Val extends Block> + implements $BlockCopyWith<$Res> { + _$BlockCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$TextBlockImplCopyWith( + _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = + __$$TextBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class __$$TextBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> + implements _$$TextBlockImplCopyWith<$Res> { + __$$TextBlockImplCopyWithImpl( + _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_$TextBlockImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$TextBlockImpl extends TextBlock { + const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); + + factory _$TextBlockImpl.fromJson(Map json) => + _$$TextBlockImplFromJson(json); + + /// The text content. + @override + final String text; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.text(text: $text, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$TextBlockImpl && + (identical(other.text, text) || other.text == text) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, text, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + }) { + return text(this.text, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + }) { + return text?.call(this.text, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + required TResult orElse(), + }) { + if (text != null) { + return text(this.text, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$TextBlockImplToJson( + this, + ); + } +} + +abstract class TextBlock extends Block { + const factory TextBlock({required final String text, final String type}) = + _$TextBlockImpl; + const TextBlock._() : super._(); + + factory TextBlock.fromJson(Map json) = + _$TextBlockImpl.fromJson; + + /// The text content. + String get text; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ImageBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$ImageBlockImplCopyWith( + _$ImageBlockImpl value, $Res Function(_$ImageBlockImpl) then) = + __$$ImageBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ImageBlockSource source, String type}); + + $ImageBlockSourceCopyWith<$Res> get source; +} + +/// @nodoc +class __$$ImageBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ImageBlockImpl> + implements _$$ImageBlockImplCopyWith<$Res> { + __$$ImageBlockImplCopyWithImpl( + _$ImageBlockImpl _value, $Res Function(_$ImageBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? source = null, + Object? type = null, + }) { + return _then(_$ImageBlockImpl( + source: null == source + ? _value.source + : source // ignore: cast_nullable_to_non_nullable + as ImageBlockSource, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } + + @override + @pragma('vm:prefer-inline') + $ImageBlockSourceCopyWith<$Res> get source { + return $ImageBlockSourceCopyWith<$Res>(_value.source, (value) { + return _then(_value.copyWith(source: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageBlockImpl extends ImageBlock { + const _$ImageBlockImpl({required this.source, this.type = 'image'}) + : super._(); + + factory _$ImageBlockImpl.fromJson(Map json) => + _$$ImageBlockImplFromJson(json); + + /// The source of an image block. + @override + final ImageBlockSource source; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.image(source: $source, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageBlockImpl && + (identical(other.source, source) || other.source == source) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, source, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => + __$$ImageBlockImplCopyWithImpl<_$ImageBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + }) { + return image(source, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + }) { + return image?.call(source, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + required TResult orElse(), + }) { + if (image != null) { + return image(source, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + }) { + return image(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + }) { + return image?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + required TResult orElse(), + }) { + if (image != null) { + return image(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ImageBlockImplToJson( + this, + ); + } +} + +abstract class ImageBlock extends Block { + const factory ImageBlock( + {required final ImageBlockSource source, + final String type}) = _$ImageBlockImpl; + const ImageBlock._() : super._(); + + factory ImageBlock.fromJson(Map json) = + _$ImageBlockImpl.fromJson; + + /// The source of an image block. + ImageBlockSource get source; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageStreamEvent _$MessageStreamEventFromJson(Map json) { + switch (json['type']) { + case 'message_start': + return MessageStartEvent.fromJson(json); + case 'message_delta': + return MessageDeltaEvent.fromJson(json); + case 'message_stop': + return MessageStopEvent.fromJson(json); + case 'content_block_start': + return ContentBlockStartEvent.fromJson(json); + case 'content_block_delta': + return ContentBlockDeltaEvent.fromJson(json); + case 'content_block_stop': + return ContentBlockStopEvent.fromJson(json); + case 'ping': + return PingEvent.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageStreamEvent { + /// The type of a streaming event. + MessageStreamEventType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageStreamEventCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageStreamEventCopyWith<$Res> { + factory $MessageStreamEventCopyWith( + MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = + _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> + implements $MessageStreamEventCopyWith<$Res> { + _$MessageStreamEventCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, + $Res Function(_$MessageStartEventImpl) then) = + __$$MessageStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({Message message, MessageStreamEventType type}); + + $MessageCopyWith<$Res> get message; +} + +/// @nodoc +class __$$MessageStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> + implements _$$MessageStartEventImplCopyWith<$Res> { + __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, + $Res Function(_$MessageStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? message = null, + Object? type = null, + }) { + return _then(_$MessageStartEventImpl( + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as Message, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { + return _then(_value.copyWith(message: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStartEventImpl extends MessageStartEvent { + const _$MessageStartEventImpl({required this.message, required this.type}) + : super._(); + + factory _$MessageStartEventImpl.fromJson(Map json) => + _$$MessageStartEventImplFromJson(json); + + /// A message in a chat conversation. + @override + final Message message; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStartEventImpl && + (identical(other.message, message) || other.message == message) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, message, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStart(message, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStart?.call(message, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(message, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStartEventImplToJson( + this, + ); + } +} + +abstract class MessageStartEvent extends MessageStreamEvent { + const factory MessageStartEvent( + {required final Message message, + required final MessageStreamEventType type}) = _$MessageStartEventImpl; + const MessageStartEvent._() : super._(); + + factory MessageStartEvent.fromJson(Map json) = + _$MessageStartEventImpl.fromJson; + + /// A message in a chat conversation. + Message get message; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, + $Res Function(_$MessageDeltaEventImpl) then) = + __$$MessageDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {MessageDelta delta, + MessageStreamEventType type, + MessageDeltaUsage usage}); + + $MessageDeltaCopyWith<$Res> get delta; + $MessageDeltaUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class __$$MessageDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> + implements _$$MessageDeltaEventImplCopyWith<$Res> { + __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, + $Res Function(_$MessageDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? type = null, + Object? usage = null, + }) { + return _then(_$MessageDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as MessageDelta, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as MessageDeltaUsage, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaCopyWith<$Res> get delta { + return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaUsageCopyWith<$Res> get usage { + return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { + return _then(_value.copyWith(usage: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaEventImpl extends MessageDeltaEvent { + const _$MessageDeltaEventImpl( + {required this.delta, required this.type, required this.usage}) + : super._(); + + factory _$MessageDeltaEventImpl.fromJson(Map json) => + _$$MessageDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + @override + final MessageDelta delta; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + final MessageDeltaUsage usage; + + @override + String toString() { + return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageDelta(delta, type, usage); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageDelta?.call(delta, type, usage); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageDelta != null) { + return messageDelta(delta, type, usage); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageDelta != null) { + return messageDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaEventImplToJson( + this, + ); + } +} + +abstract class MessageDeltaEvent extends MessageStreamEvent { + const factory MessageDeltaEvent( + {required final MessageDelta delta, + required final MessageStreamEventType type, + required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; + const MessageDeltaEvent._() : super._(); + + factory MessageDeltaEvent.fromJson(Map json) = + _$MessageDeltaEventImpl.fromJson; + + /// A delta in a streaming message. + MessageDelta get delta; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + MessageDeltaUsage get usage; + @override + @JsonKey(ignore: true) + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageStopEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, + $Res Function(_$MessageStopEventImpl) then) = + __$$MessageStopEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class __$$MessageStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> + implements _$$MessageStopEventImplCopyWith<$Res> { + __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, + $Res Function(_$MessageStopEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$MessageStopEventImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStopEventImpl extends MessageStopEvent { + const _$MessageStopEventImpl({required this.type}) : super._(); + + factory _$MessageStopEventImpl.fromJson(Map json) => + _$$MessageStopEventImplFromJson(json); + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStop(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStopEventImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStop(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStop?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStop != null) { + return messageStop(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStop(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStop?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStop != null) { + return messageStop(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStopEventImplToJson( + this, + ); + } +} + +abstract class MessageStopEvent extends MessageStreamEvent { + const factory MessageStopEvent({required final MessageStreamEventType type}) = + _$MessageStopEventImpl; + const MessageStopEvent._() : super._(); + + factory MessageStopEvent.fromJson(Map json) = + _$MessageStopEventImpl.fromJson; + + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockStartEventImplCopyWith( + _$ContentBlockStartEventImpl value, + $Res Function(_$ContentBlockStartEventImpl) then) = + __$$ContentBlockStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type}); +} + +/// @nodoc +class __$$ContentBlockStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> + implements _$$ContentBlockStartEventImplCopyWith<$Res> { + __$$ContentBlockStartEventImplCopyWithImpl( + _$ContentBlockStartEventImpl _value, + $Res Function(_$ContentBlockStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? contentBlock = freezed, + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockStartEventImpl( + contentBlock: freezed == contentBlock + ? _value.contentBlock + : contentBlock // ignore: cast_nullable_to_non_nullable + as TextBlock, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { + const _$ContentBlockStartEventImpl( + {@JsonKey(name: 'content_block') required this.contentBlock, + required this.index, + required this.type}) + : super._(); + + factory _$ContentBlockStartEventImpl.fromJson(Map json) => + _$$ContentBlockStartEventImplFromJson(json); + + /// A block of text content. + @override + @JsonKey(name: 'content_block') + final TextBlock contentBlock; + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockStartEventImpl && + const DeepCollectionEquality() + .equals(other.contentBlock, contentBlock) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, + const DeepCollectionEquality().hash(contentBlock), index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< + _$ContentBlockStartEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockStart(contentBlock, index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockStart?.call(contentBlock, index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockStart != null) { + return contentBlockStart(contentBlock, index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockStart != null) { + return contentBlockStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockStartEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockStartEvent extends MessageStreamEvent { + const factory ContentBlockStartEvent( + {@JsonKey(name: 'content_block') required final TextBlock contentBlock, + required final int index, + required final MessageStreamEventType + type}) = _$ContentBlockStartEventImpl; + const ContentBlockStartEvent._() : super._(); + + factory ContentBlockStartEvent.fromJson(Map json) = + _$ContentBlockStartEventImpl.fromJson; + + /// A block of text content. + @JsonKey(name: 'content_block') + TextBlock get contentBlock; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockDeltaEventImplCopyWith( + _$ContentBlockDeltaEventImpl value, + $Res Function(_$ContentBlockDeltaEventImpl) then) = + __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({TextBlockDelta delta, int index, MessageStreamEventType type}); + + $TextBlockDeltaCopyWith<$Res> get delta; +} + +/// @nodoc +class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> + implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { + __$$ContentBlockDeltaEventImplCopyWithImpl( + _$ContentBlockDeltaEventImpl _value, + $Res Function(_$ContentBlockDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as TextBlockDelta, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $TextBlockDeltaCopyWith<$Res> get delta { + return $TextBlockDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { + const _$ContentBlockDeltaEventImpl( + {required this.delta, required this.index, required this.type}) + : super._(); + + factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => + _$$ContentBlockDeltaEventImplFromJson(json); + + /// A delta in a streaming text block. + @override + final TextBlockDelta delta; + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< + _$ContentBlockDeltaEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockDelta(delta, index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockDelta?.call(delta, index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockDelta != null) { + return contentBlockDelta(delta, index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockDelta != null) { + return contentBlockDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockDeltaEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockDeltaEvent extends MessageStreamEvent { + const factory ContentBlockDeltaEvent( + {required final TextBlockDelta delta, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockDeltaEventImpl; + const ContentBlockDeltaEvent._() : super._(); + + factory ContentBlockDeltaEvent.fromJson(Map json) = + _$ContentBlockDeltaEventImpl.fromJson; + + /// A delta in a streaming text block. + TextBlockDelta get delta; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockStopEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockStopEventImplCopyWith( + _$ContentBlockStopEventImpl value, + $Res Function(_$ContentBlockStopEventImpl) then) = + __$$ContentBlockStopEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({int index, MessageStreamEventType type}); +} + +/// @nodoc +class __$$ContentBlockStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> + implements _$$ContentBlockStopEventImplCopyWith<$Res> { + __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, + $Res Function(_$ContentBlockStopEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockStopEventImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { + const _$ContentBlockStopEventImpl({required this.index, required this.type}) + : super._(); + + factory _$ContentBlockStopEventImpl.fromJson(Map json) => + _$$ContentBlockStopEventImplFromJson(json); + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockStopEventImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< + _$ContentBlockStopEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockStop(index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockStop?.call(index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockStop != null) { + return contentBlockStop(index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockStop(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockStop?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockStop != null) { + return contentBlockStop(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockStopEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockStopEvent extends MessageStreamEvent { + const factory ContentBlockStopEvent( + {required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStopEventImpl; + const ContentBlockStopEvent._() : super._(); + + factory ContentBlockStopEvent.fromJson(Map json) = + _$ContentBlockStopEventImpl.fromJson; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$PingEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$PingEventImplCopyWith( + _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = + __$$PingEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class __$$PingEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> + implements _$$PingEventImplCopyWith<$Res> { + __$$PingEventImplCopyWithImpl( + _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$PingEventImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$PingEventImpl extends PingEvent { + const _$PingEventImpl({required this.type}) : super._(); + + factory _$PingEventImpl.fromJson(Map json) => + _$$PingEventImplFromJson(json); + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.ping(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$PingEventImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return ping(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return ping?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (ping != null) { + return ping(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return ping(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return ping?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (ping != null) { + return ping(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$PingEventImplToJson( + this, + ); + } +} + +abstract class PingEvent extends MessageStreamEvent { + const factory PingEvent({required final MessageStreamEventType type}) = + _$PingEventImpl; + const PingEvent._() : super._(); + + factory PingEvent.fromJson(Map json) = + _$PingEventImpl.fromJson; + + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + throw _privateConstructorUsedError; +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..b08b072f --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,404 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$CreateMessageRequestImpl _$$CreateMessageRequestImplFromJson( + Map json) => + _$CreateMessageRequestImpl( + model: const _ModelConverter().fromJson(json['model']), + messages: (json['messages'] as List) + .map((e) => Message.fromJson(e as Map)) + .toList(), + maxTokens: (json['max_tokens'] as num).toInt(), + metadata: json['metadata'] == null + ? null + : CreateMessageRequestMetadata.fromJson( + json['metadata'] as Map), + stopSequences: (json['stop_sequences'] as List?) + ?.map((e) => e as String) + .toList(), + system: json['system'] as String?, + temperature: (json['temperature'] as num?)?.toDouble(), + topK: (json['top_k'] as num?)?.toInt(), + topP: (json['top_p'] as num?)?.toDouble(), + stream: json['stream'] as bool? ?? false, + ); + +Map _$$CreateMessageRequestImplToJson( + _$CreateMessageRequestImpl instance) { + final val = { + 'model': const _ModelConverter().toJson(instance.model), + 'messages': instance.messages.map((e) => e.toJson()).toList(), + 'max_tokens': instance.maxTokens, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('metadata', instance.metadata?.toJson()); + writeNotNull('stop_sequences', instance.stopSequences); + writeNotNull('system', instance.system); + writeNotNull('temperature', instance.temperature); + writeNotNull('top_k', instance.topK); + writeNotNull('top_p', instance.topP); + val['stream'] = instance.stream; + return val; +} + +_$ModelEnumerationImpl _$$ModelEnumerationImplFromJson( + Map json) => + _$ModelEnumerationImpl( + $enumDecode(_$ModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$ModelEnumerationImplToJson( + _$ModelEnumerationImpl instance) => + { + 'value': _$ModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$ModelsEnumMap = { + Models.claude3Opus20240229: 'claude-3-opus-20240229', + Models.claude3Sonnet20240229: 'claude-3-sonnet-20240229', + Models.claude3Haiku20240307: 'claude-3-haiku-20240307', + Models.claude21: 'claude-2.1', + Models.claude20: 'claude-2.0', + Models.claudeInstant12: 'claude-instant-1.2', +}; + +_$ModelStringImpl _$$ModelStringImplFromJson(Map json) => + _$ModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$ModelStringImplToJson(_$ModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$CreateMessageRequestMetadataImpl _$$CreateMessageRequestMetadataImplFromJson( + Map json) => + _$CreateMessageRequestMetadataImpl( + userId: json['user_id'] as String?, + ); + +Map _$$CreateMessageRequestMetadataImplToJson( + _$CreateMessageRequestMetadataImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('user_id', instance.userId); + return val; +} + +_$MessageImpl _$$MessageImplFromJson(Map json) => + _$MessageImpl( + id: json['id'] as String?, + content: const _MessageContentConverter().fromJson(json['content']), + role: $enumDecode(_$MessageRoleEnumMap, json['role']), + model: json['model'] as String?, + stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + stopSequence: json['stop_sequence'] as String?, + type: json['type'] as String?, + usage: json['usage'] == null + ? null + : Usage.fromJson(json['usage'] as Map), + ); + +Map _$$MessageImplToJson(_$MessageImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('id', instance.id); + val['content'] = const _MessageContentConverter().toJson(instance.content); + val['role'] = _$MessageRoleEnumMap[instance.role]!; + writeNotNull('model', instance.model); + writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); + writeNotNull('stop_sequence', instance.stopSequence); + writeNotNull('type', instance.type); + writeNotNull('usage', instance.usage?.toJson()); + return val; +} + +const _$MessageRoleEnumMap = { + MessageRole.user: 'user', + MessageRole.assistant: 'assistant', +}; + +const _$StopReasonEnumMap = { + StopReason.endTurn: 'end_turn', + StopReason.maxTokens: 'max_tokens', + StopReason.stopSequence: 'stop_sequence', +}; + +_$MessageContentListBlockImpl _$$MessageContentListBlockImplFromJson( + Map json) => + _$MessageContentListBlockImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$MessageContentListBlockImplToJson( + _$MessageContentListBlockImpl instance) => + { + 'value': instance.value.map((e) => e.toJson()).toList(), + 'runtimeType': instance.$type, + }; + +_$MessageContentStringImpl _$$MessageContentStringImplFromJson( + Map json) => + _$MessageContentStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$MessageContentStringImplToJson( + _$MessageContentStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$ImageBlockSourceImpl _$$ImageBlockSourceImplFromJson( + Map json) => + _$ImageBlockSourceImpl( + data: json['data'] as String, + mediaType: + $enumDecode(_$ImageBlockSourceMediaTypeEnumMap, json['media_type']), + type: $enumDecode(_$ImageBlockSourceTypeEnumMap, json['type']), + ); + +Map _$$ImageBlockSourceImplToJson( + _$ImageBlockSourceImpl instance) => + { + 'data': instance.data, + 'media_type': _$ImageBlockSourceMediaTypeEnumMap[instance.mediaType]!, + 'type': _$ImageBlockSourceTypeEnumMap[instance.type]!, + }; + +const _$ImageBlockSourceMediaTypeEnumMap = { + ImageBlockSourceMediaType.imageJpeg: 'image/jpeg', + ImageBlockSourceMediaType.imagePng: 'image/png', + ImageBlockSourceMediaType.imageGif: 'image/gif', + ImageBlockSourceMediaType.imageWebp: 'image/webp', +}; + +const _$ImageBlockSourceTypeEnumMap = { + ImageBlockSourceType.base64: 'base64', +}; + +_$UsageImpl _$$UsageImplFromJson(Map json) => _$UsageImpl( + inputTokens: (json['input_tokens'] as num).toInt(), + outputTokens: (json['output_tokens'] as num).toInt(), + ); + +Map _$$UsageImplToJson(_$UsageImpl instance) => + { + 'input_tokens': instance.inputTokens, + 'output_tokens': instance.outputTokens, + }; + +_$MessageDeltaImpl _$$MessageDeltaImplFromJson(Map json) => + _$MessageDeltaImpl( + stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + stopSequence: json['stop_sequence'] as String?, + ); + +Map _$$MessageDeltaImplToJson(_$MessageDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); + writeNotNull('stop_sequence', instance.stopSequence); + return val; +} + +_$MessageDeltaUsageImpl _$$MessageDeltaUsageImplFromJson( + Map json) => + _$MessageDeltaUsageImpl( + outputTokens: (json['output_tokens'] as num).toInt(), + ); + +Map _$$MessageDeltaUsageImplToJson( + _$MessageDeltaUsageImpl instance) => + { + 'output_tokens': instance.outputTokens, + }; + +_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => + _$TextBlockDeltaImpl( + text: json['text'] as String, + type: json['type'] as String, + ); + +Map _$$TextBlockDeltaImplToJson( + _$TextBlockDeltaImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$TextBlockImpl _$$TextBlockImplFromJson(Map json) => + _$TextBlockImpl( + text: json['text'] as String, + type: json['type'] as String? ?? 'text', + ); + +Map _$$TextBlockImplToJson(_$TextBlockImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$ImageBlockImpl _$$ImageBlockImplFromJson(Map json) => + _$ImageBlockImpl( + source: ImageBlockSource.fromJson(json['source'] as Map), + type: json['type'] as String? ?? 'image', + ); + +Map _$$ImageBlockImplToJson(_$ImageBlockImpl instance) => + { + 'source': instance.source.toJson(), + 'type': instance.type, + }; + +_$MessageStartEventImpl _$$MessageStartEventImplFromJson( + Map json) => + _$MessageStartEventImpl( + message: Message.fromJson(json['message'] as Map), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$MessageStartEventImplToJson( + _$MessageStartEventImpl instance) => + { + 'message': instance.message.toJson(), + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +const _$MessageStreamEventTypeEnumMap = { + MessageStreamEventType.messageStart: 'message_start', + MessageStreamEventType.messageDelta: 'message_delta', + MessageStreamEventType.messageStop: 'message_stop', + MessageStreamEventType.contentBlockStart: 'content_block_start', + MessageStreamEventType.contentBlockDelta: 'content_block_delta', + MessageStreamEventType.contentBlockStop: 'content_block_stop', + MessageStreamEventType.ping: 'ping', +}; + +_$MessageDeltaEventImpl _$$MessageDeltaEventImplFromJson( + Map json) => + _$MessageDeltaEventImpl( + delta: MessageDelta.fromJson(json['delta'] as Map), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + usage: MessageDeltaUsage.fromJson(json['usage'] as Map), + ); + +Map _$$MessageDeltaEventImplToJson( + _$MessageDeltaEventImpl instance) => + { + 'delta': instance.delta.toJson(), + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + 'usage': instance.usage.toJson(), + }; + +_$MessageStopEventImpl _$$MessageStopEventImplFromJson( + Map json) => + _$MessageStopEventImpl( + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$MessageStopEventImplToJson( + _$MessageStopEventImpl instance) => + { + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockStartEventImpl _$$ContentBlockStartEventImplFromJson( + Map json) => + _$ContentBlockStartEventImpl( + contentBlock: + TextBlock.fromJson(json['content_block'] as Map), + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockStartEventImplToJson( + _$ContentBlockStartEventImpl instance) => + { + 'content_block': instance.contentBlock.toJson(), + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockDeltaEventImpl _$$ContentBlockDeltaEventImplFromJson( + Map json) => + _$ContentBlockDeltaEventImpl( + delta: TextBlockDelta.fromJson(json['delta'] as Map), + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockDeltaEventImplToJson( + _$ContentBlockDeltaEventImpl instance) => + { + 'delta': instance.delta.toJson(), + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockStopEventImpl _$$ContentBlockStopEventImplFromJson( + Map json) => + _$ContentBlockStopEventImpl( + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockStopEventImplToJson( + _$ContentBlockStopEventImpl instance) => + { + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$PingEventImpl _$$PingEventImplFromJson(Map json) => + _$PingEventImpl( + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$PingEventImplToJson(_$PingEventImpl instance) => + { + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart new file mode 100644 index 00000000..331c6207 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart @@ -0,0 +1,28 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: StopReason +// ========================================== + +/// The reason that we stopped. +/// +/// This may be one the following values: +/// +/// - `"end_turn"`: the model reached a natural stopping point +/// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum +/// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated +/// +/// In non-streaming mode this value is always non-null. In streaming mode, it is +/// null in the `message_start` event and non-null otherwise. +enum StopReason { + @JsonValue('end_turn') + endTurn, + @JsonValue('max_tokens') + maxTokens, + @JsonValue('stop_sequence') + stopSequence, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart new file mode 100644 index 00000000..fa05ffce --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: TextBlockDelta +// ========================================== + +/// A delta in a streaming text block. +@freezed +class TextBlockDelta with _$TextBlockDelta { + const TextBlockDelta._(); + + /// Factory constructor for TextBlockDelta + const factory TextBlockDelta({ + /// The text delta. + required String text, + + /// The type of content block. + required String type, + }) = _TextBlockDelta; + + /// Object construction from a JSON representation + factory TextBlockDelta.fromJson(Map json) => + _$TextBlockDeltaFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['text', 'type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'text': text, + 'type': type, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart new file mode 100644 index 00000000..37f3d39d --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Usage +// ========================================== + +/// Billing and rate-limit usage. +/// +/// Anthropic's API bills and rate-limits by token counts, as tokens represent the +/// underlying cost to our systems. +/// +/// Under the hood, the API transforms requests into a format suitable for the +/// model. The model's output then goes through a parsing stage before becoming an +/// API response. As a result, the token counts in `usage` will not match one-to-one +/// with the exact visible content of an API request or response. +/// +/// For example, `output_tokens` will be non-zero, even for an empty string response +/// from Claude. +@freezed +class Usage with _$Usage { + const Usage._(); + + /// Factory constructor for Usage + const factory Usage({ + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') required int inputTokens, + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') required int outputTokens, + }) = _Usage; + + /// Object construction from a JSON representation + factory Usage.fromJson(Map json) => _$UsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['input_tokens', 'output_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'input_tokens': inputTokens, + 'output_tokens': outputTokens, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart new file mode 100644 index 00000000..99555ca4 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart @@ -0,0 +1,4 @@ +export 'http_client_stub.dart' + if (dart.library.io) 'http_client_io.dart' + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart new file mode 100644 index 00000000..59abc229 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart @@ -0,0 +1,18 @@ +import 'package:fetch_client/fetch_client.dart' as fetch; +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(fetch.FetchClient(mode: fetch.RequestMode.cors)); +} + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) { + // If the request if bigger than 60KiB set persistentConnection to false + // Ref: https://github.com/Zekfad/fetch_client#large-payload + if ((request.contentLength ?? 0) > 61440) { + request.persistentConnection = false; + } + return Future.value(request); +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart new file mode 100644 index 00000000..0b24e7db --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart @@ -0,0 +1,12 @@ +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(http.Client()); +} + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) { + return Future.value(request); +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart new file mode 100644 index 00000000..2668d1ac --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart @@ -0,0 +1,10 @@ +import 'package:http/http.dart' as http; + +/// Creates a default HTTP client for the current platform. +http.Client createDefaultHttpClient() => throw UnsupportedError( + 'Cannot create a client without dart:html or dart:io.', + ); + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) => + throw UnsupportedError('stub'); diff --git a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml new file mode 100644 index 00000000..a3f60e70 --- /dev/null +++ b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml @@ -0,0 +1,562 @@ +openapi: 3.0.3 + +info: + title: Anthropic API + description: API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. + version: "1" + +servers: + - url: https://api.anthropic.com/v1 + +tags: + - name: Messages + description: Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + +paths: + /messages: + post: + operationId: createMessage + tags: + - Messages + summary: Create a Message + description: | + Send a structured list of input messages with text and/or image content, and the + model will generate the next message in the conversation. + + The Messages API can be used for either single queries or stateless multi-turn + conversations. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Message" +components: + securitySchemes: + ApiKeyAuth: + type: apiKey + in: header + name: x-api-key + + schemas: + CreateMessageRequest: + type: object + description: The request parameters for creating a message. + properties: + model: + title: Model + description: | + The model that will complete your prompt. + + See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + example: "claude-3-opus-20240229" + anyOf: + - type: string + description: The ID of the model to use for this request. + - type: string + title: Models + description: | + Available models. Mind that the list may not be exhaustive nor up-to-date. + enum: + - claude-3-opus-20240229 + - claude-3-sonnet-20240229 + - claude-3-haiku-20240307 + - claude-2.1 + - claude-2.0 + - claude-instant-1.2 + messages: + type: array + description: | + Input messages. + + Our models are trained to operate on alternating `user` and `assistant` + conversational turns. When creating a new `Message`, you specify the prior + conversational turns with the `messages` parameter, and the model then generates + the next `Message` in the conversation. + + Each input message must be an object with a `role` and `content`. You can + specify a single `user`-role message, or you can include multiple `user` and + `assistant` messages. The first message must always use the `user` role. + + If the final message uses the `assistant` role, the response content will + continue immediately from the content in that message. This can be used to + constrain part of the model's response. + + Example with a single `user` message: + + ```json + [{ "role": "user", "content": "Hello, Claude" }] + ``` + + Example with multiple conversational turns: + + ```json + [ + { "role": "user", "content": "Hello there." }, + { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + { "role": "user", "content": "Can you explain LLMs in plain English?" } + ] + ``` + + Example with a partially-filled response from Claude: + + ```json + [ + { + "role": "user", + "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + }, + { "role": "assistant", "content": "The best answer is (" } + ] + ``` + + Each input message `content` may be either a single `string` or an array of + content blocks, where each block has a specific `type`. Using a `string` for + `content` is shorthand for an array of one content block of type `"text"`. The + following input messages are equivalent: + + ```json + { "role": "user", "content": "Hello, Claude" } + ``` + + ```json + { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + ``` + + Starting with Claude 3 models, you can also send image content blocks: + + ```json + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/jpeg", + "data": "/9j/4AAQSkZJRg..." + } + }, + { "type": "text", "text": "What is in this image?" } + ] + } + ``` + + We currently support the `base64` source type for images, and the `image/jpeg`, + `image/png`, `image/gif`, and `image/webp` media types. + + See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + input examples. + + Note that if you want to include a + [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + the top-level `system` parameter — there is no `"system"` role for input + messages in the Messages API. + minItems: 1 + items: + $ref: '#/components/schemas/Message' + max_tokens: + type: integer + description: | + The maximum number of tokens to generate before stopping. + + Note that our models may stop _before_ reaching this maximum. This parameter + only specifies the absolute maximum number of tokens to generate. + + Different models have different maximum values for this parameter. See + [models](https://docs.anthropic.com/en/docs/models-overview) for details. + metadata: + $ref: '#/components/schemas/CreateMessageRequestMetadata' + stop_sequences: + type: array + description: | + Custom text sequences that will cause the model to stop generating. + + Our models will normally stop when they have naturally completed their turn, + which will result in a response `stop_reason` of `"end_turn"`. + + If you want the model to stop generating when it encounters custom strings of + text, you can use the `stop_sequences` parameter. If the model encounters one of + the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + and the response `stop_sequence` value will contain the matched stop sequence. + items: + type: string + system: + type: string + description: | + System prompt. + + A system prompt is a way of providing context and instructions to Claude, such + as specifying a particular goal or role. See our + [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + temperature: + type: number + description: | + Amount of randomness injected into the response. + + Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + for analytical / multiple choice, and closer to `1.0` for creative and + generative tasks. + + Note that even with `temperature` of `0.0`, the results will not be fully + deterministic. + top_k: + type: integer + description: | + Only sample from the top K options for each subsequent token. + + Used to remove "long tail" low probability responses. + [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + + Recommended for advanced use cases only. You usually only need to use + `temperature`. + top_p: + type: number + description: | + Use nucleus sampling. + + In nucleus sampling, we compute the cumulative distribution over all the options + for each subsequent token in decreasing probability order and cut it off once it + reaches a particular probability specified by `top_p`. You should either alter + `temperature` or `top_p`, but not both. + + Recommended for advanced use cases only. You usually only need to use + `temperature`. + stream: + type: boolean + default: false + description: | + Whether to incrementally stream the response using server-sent events. + + See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + details. + required: + - model + - messages + - max_tokens + CreateMessageRequestMetadata: + type: object + description: An object describing metadata about the request. + properties: + user_id: + type: string + description: | + An external identifier for the user who is associated with the request. + + This should be a uuid, hash value, or other opaque identifier. Anthropic may use + this id to help detect abuse. Do not include any identifying information such as + name, email address, or phone number. + Message: + type: object + description: A message in a chat conversation. + properties: + id: + type: string + description: | + Unique object identifier. + + The format and length of IDs may change over time. + content: + description: The content of the message. + oneOf: + - type: string + description: A single text block. + - type: array + description: An array of content blocks. + items: + $ref: "#/components/schemas/Block" + role: + $ref: "#/components/schemas/MessageRole" + model: + type: string + description: The model that handled the request. + stop_reason: + $ref: "#/components/schemas/StopReason" + stop_sequence: + type: string + description: | + Which custom stop sequence was generated, if any. + + This value will be a non-null string if one of your custom stop sequences was + generated. + type: + type: string + description: | + Object type. + + For Messages, this is always `"message"`. + usage: + $ref: "#/components/schemas/Usage" + required: + - content + - role + MessageRole: + type: string + description: The role of the messages author. + enum: + - user + - assistant + Block: + description: A block of content in a message. + oneOf: + - $ref: "#/components/schemas/TextBlock" + - $ref: "#/components/schemas/ImageBlock" + discriminator: + propertyName: type + TextBlock: + type: object + description: A block of text content. + properties: + text: + type: string + description: The text content. + type: + type: string + description: The type of content block. + default: text + required: + - text + ImageBlock: + type: object + description: A block of image content. + properties: + source: + $ref: "#/components/schemas/ImageBlockSource" + type: + type: string + description: The type of content block. + default: image + required: + - source + ImageBlockSource: + type: object + description: The source of an image block. + properties: + data: + type: string + description: The base64-encoded image data. + media_type: + type: string + description: The media type of the image. + enum: + - image/jpeg + - image/png + - image/gif + - image/webp + type: + type: string + description: The type of image source. + enum: + - base64 + required: + - data + - media_type + - type + StopReason: + type: string + description: | + The reason that we stopped. + + This may be one the following values: + + - `"end_turn"`: the model reached a natural stopping point + - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + + In non-streaming mode this value is always non-null. In streaming mode, it is + null in the `message_start` event and non-null otherwise. + enum: + - end_turn + - max_tokens + - stop_sequence + Usage: + type: object + description: | + Billing and rate-limit usage. + + Anthropic's API bills and rate-limits by token counts, as tokens represent the + underlying cost to our systems. + + Under the hood, the API transforms requests into a format suitable for the + model. The model's output then goes through a parsing stage before becoming an + API response. As a result, the token counts in `usage` will not match one-to-one + with the exact visible content of an API request or response. + + For example, `output_tokens` will be non-zero, even for an empty string response + from Claude. + properties: + input_tokens: + type: integer + description: The number of input tokens which were used. + output_tokens: + type: integer + description: The number of output tokens which were used. + required: + - input_tokens + - output_tokens + MessageStreamEvent: + type: object + description: A event in a streaming conversation. + oneOf: + - $ref: "#/components/schemas/MessageStartEvent" + - $ref: "#/components/schemas/MessageDeltaEvent" + - $ref: "#/components/schemas/MessageStopEvent" + - $ref: "#/components/schemas/ContentBlockStartEvent" + - $ref: "#/components/schemas/ContentBlockDeltaEvent" + - $ref: "#/components/schemas/ContentBlockStopEvent" + - $ref: "#/components/schemas/PingEvent" + discriminator: + propertyName: type + MessageStreamEventType: + type: string + description: The type of a streaming event. + enum: + - message_start + - message_delta + - message_stop + - content_block_start + - content_block_delta + - content_block_stop + - ping + MessageStartEvent: + type: object + description: A start event in a streaming conversation. + properties: + message: + $ref: "#/components/schemas/Message" + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - message + - type + MessageDeltaEvent: + type: object + description: A delta event in a streaming conversation. + properties: + delta: + $ref: "#/components/schemas/MessageDelta" + type: + $ref: "#/components/schemas/MessageStreamEventType" + usage: + $ref: "#/components/schemas/MessageDeltaUsage" + required: + - delta + - type + - usage + MessageDelta: + type: object + description: A delta in a streaming message. + properties: + stop_reason: + $ref: "#/components/schemas/StopReason" + stop_sequence: + type: string + description: | + Which custom stop sequence was generated, if any. + + This value will be a non-null string if one of your custom stop sequences was + generated. + MessageDeltaUsage: + type: object + description: | + Billing and rate-limit usage. + + Anthropic's API bills and rate-limits by token counts, as tokens represent the + underlying cost to our systems. + + Under the hood, the API transforms requests into a format suitable for the + model. The model's output then goes through a parsing stage before becoming an + API response. As a result, the token counts in `usage` will not match one-to-one + with the exact visible content of an API request or response. + + For example, `output_tokens` will be non-zero, even for an empty string response + from Claude. + properties: + output_tokens: + type: integer + description: The cumulative number of output tokens which were used. + required: + - output_tokens + MessageStopEvent: + type: object + description: A stop event in a streaming conversation. + properties: + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - type + ContentBlockStartEvent: + type: object + description: A start event in a streaming content block. + properties: + content_block: + $ref: "#/components/schemas/TextBlock" + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - content_block + - index + - type + ContentBlockDeltaEvent: + type: object + description: A delta event in a streaming content block. + properties: + delta: + $ref: "#/components/schemas/TextBlockDelta" + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - delta + - index + - type + TextBlockDelta: + type: object + description: A delta in a streaming text block. + properties: + text: + type: string + description: The text delta. + type: + type: string + description: The type of content block. + default: text_delta + required: + - text + - type + ContentBlockStopEvent: + type: object + description: A stop event in a streaming content block. + properties: + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - index + - type + PingEvent: + type: object + description: A ping event in a streaming conversation. + properties: + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - type + +security: + - ApiKeyAuth: [ ] diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart new file mode 100644 index 00000000..cdeaa32c --- /dev/null +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -0,0 +1,45 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Anthropic API client Dart code from the OpenAPI spec. +/// https://docs.anthropic.com/en/api +void main() async { + final spec = OpenApi.fromFile(source: 'oas/anthropic_openapi_curated.yaml'); + + await spec.generate( + package: 'Anthropic', + destination: 'lib/src/generated/', + replace: true, + schemaOptions: const SchemaGeneratorOptions( + onSchemaUnionFactoryName: _onSchemaUnionFactoryName, + ), + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} + +String? _onSchemaUnionFactoryName( + final String union, + final String unionSubclass, +) => + switch (unionSubclass) { + 'ModelEnumeration' => 'model', + 'ModelString' => 'modelId', + 'MessageContentListBlock' => 'blocks', + 'MessageContentString' => 'text', + 'MessageStartEvent' => 'messageStart', + 'MessageDeltaEvent' => 'messageDelta', + 'MessageStopEvent' => 'messageStop', + 'ContentBlockStartEvent' => 'contentBlockStart', + 'ContentBlockDeltaEvent' => 'contentBlockDelta', + 'ContentBlockStopEvent' => 'contentBlockStop', + 'PingEvent' => 'ping', + _ => null, + }; diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock new file mode 100644 index 00000000..1849898b --- /dev/null +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -0,0 +1,627 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + _fe_analyzer_shared: + dependency: transitive + description: + name: _fe_analyzer_shared + sha256: "5aaf60d96c4cd00fe7f21594b5ad6a1b699c80a27420f8a837f4d68473ef09e3" + url: "https://pub.dev" + source: hosted + version: "68.0.0" + _macros: + dependency: transitive + description: dart + source: sdk + version: "0.1.0" + analyzer: + dependency: transitive + description: + name: analyzer + sha256: "21f1d3720fd1c70316399d5e2bccaebb415c434592d778cce8acb967b8578808" + url: "https://pub.dev" + source: hosted + version: "6.5.0" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + boolean_selector: + dependency: transitive + description: + name: boolean_selector + sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + build: + dependency: transitive + description: + name: build + sha256: "80184af8b6cb3e5c1c4ec6d8544d27711700bc3e6d2efad04238c7b5290889f0" + url: "https://pub.dev" + source: hosted + version: "2.4.1" + build_config: + dependency: transitive + description: + name: build_config + sha256: bf80fcfb46a29945b423bd9aad884590fb1dc69b330a4d4700cac476af1708d1 + url: "https://pub.dev" + source: hosted + version: "1.1.1" + build_daemon: + dependency: transitive + description: + name: build_daemon + sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + build_resolvers: + dependency: transitive + description: + name: build_resolvers + sha256: "339086358431fa15d7eca8b6a36e5d783728cf025e559b834f4609a1fcfb7b0a" + url: "https://pub.dev" + source: hosted + version: "2.4.2" + build_runner: + dependency: "direct dev" + description: + name: build_runner + sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa" + url: "https://pub.dev" + source: hosted + version: "2.4.10" + build_runner_core: + dependency: transitive + description: + name: build_runner_core + sha256: "4ae8ffe5ac758da294ecf1802f2aff01558d8b1b00616aa7538ea9a8a5d50799" + url: "https://pub.dev" + source: hosted + version: "7.3.0" + built_collection: + dependency: transitive + description: + name: built_collection + sha256: "376e3dd27b51ea877c28d525560790aee2e6fbb5f20e2f85d5081027d94e2100" + url: "https://pub.dev" + source: hosted + version: "5.1.1" + built_value: + dependency: transitive + description: + name: built_value + sha256: c7913a9737ee4007efedaffc968c049fd0f3d0e49109e778edc10de9426005cb + url: "https://pub.dev" + source: hosted + version: "8.9.2" + checked_yaml: + dependency: transitive + description: + name: checked_yaml + sha256: feb6bed21949061731a7a75fc5d2aa727cf160b91af9a3e464c5e3a32e28b5ff + url: "https://pub.dev" + source: hosted + version: "2.0.3" + clock: + dependency: transitive + description: + name: clock + sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + url: "https://pub.dev" + source: hosted + version: "1.1.1" + code_builder: + dependency: transitive + description: + name: code_builder + sha256: f692079e25e7869c14132d39f223f8eec9830eb76131925143b2129c4bb01b37 + url: "https://pub.dev" + source: hosted + version: "4.10.0" + collection: + dependency: transitive + description: + name: collection + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + url: "https://pub.dev" + source: hosted + version: "1.18.0" + convert: + dependency: transitive + description: + name: convert + sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592" + url: "https://pub.dev" + source: hosted + version: "3.1.1" + coverage: + dependency: transitive + description: + name: coverage + sha256: "3945034e86ea203af7a056d98e98e42a5518fff200d6e8e6647e1886b07e936e" + url: "https://pub.dev" + source: hosted + version: "1.8.0" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" + dart_style: + dependency: transitive + description: + name: dart_style + sha256: "99e066ce75c89d6b29903d788a7bb9369cf754f7b24bf70bf4b6d6d6b26853b9" + url: "https://pub.dev" + source: hosted + version: "2.3.6" + fetch_api: + dependency: transitive + description: + name: fetch_api + sha256: c0a76bfd84d4bc5a0733ab8b9fcee268d5069228790a6dd71fc2a6d1049223cc + url: "https://pub.dev" + source: hosted + version: "2.1.0" + fetch_client: + dependency: "direct main" + description: + name: fetch_client + sha256: "0b935eff9dfa84fb56bddadaf020c9aa61f02cbd6fa8dad914d6d343a838936d" + url: "https://pub.dev" + source: hosted + version: "1.1.1" + file: + dependency: transitive + description: + name: file + sha256: "5fc22d7c25582e38ad9a8515372cd9a93834027aacf1801cf01164dac0ffa08c" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + freezed: + dependency: "direct dev" + description: + name: freezed + sha256: "5606fb95d54f3bb241b3e12dcfb65ba7494c775c4cb458334eccceb07334a3d9" + url: "https://pub.dev" + source: hosted + version: "2.5.3" + freezed_annotation: + dependency: "direct main" + description: + name: freezed_annotation + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + url: "https://pub.dev" + source: hosted + version: "2.4.1" + frontend_server_client: + dependency: transitive + description: + name: frontend_server_client + sha256: f64a0333a82f30b0cca061bc3d143813a486dc086b574bfb233b7c1372427694 + url: "https://pub.dev" + source: hosted + version: "4.0.0" + glob: + dependency: transitive + description: + name: glob + sha256: "0e7014b3b7d4dac1ca4d6114f82bf1782ee86745b9b42a92c9289c23d8a0ab63" + url: "https://pub.dev" + source: hosted + version: "2.1.2" + graphs: + dependency: transitive + description: + name: graphs + sha256: aedc5a15e78fc65a6e23bcd927f24c64dd995062bcd1ca6eda65a3cff92a4d19 + url: "https://pub.dev" + source: hosted + version: "2.3.1" + http: + dependency: "direct main" + description: + name: http + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + http_multi_server: + dependency: transitive + description: + name: http_multi_server + sha256: "97486f20f9c2f7be8f514851703d0119c3596d14ea63227af6f7a481ef2b2f8b" + url: "https://pub.dev" + source: hosted + version: "3.2.1" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + intl: + dependency: transitive + description: + name: intl + sha256: d6f56758b7d3014a48af9701c085700aac781a92a87a62b1333b46d8879661cf + url: "https://pub.dev" + source: hosted + version: "0.19.0" + io: + dependency: transitive + description: + name: io + sha256: "2ec25704aba361659e10e3e5f5d672068d332fc8ac516421d483a11e5cbd061e" + url: "https://pub.dev" + source: hosted + version: "1.0.4" + js: + dependency: transitive + description: + name: js + sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf + url: "https://pub.dev" + source: hosted + version: "0.7.1" + json_annotation: + dependency: "direct main" + description: + name: json_annotation + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + url: "https://pub.dev" + source: hosted + version: "4.9.0" + json_serializable: + dependency: "direct dev" + description: + name: json_serializable + sha256: ea1432d167339ea9b5bb153f0571d0039607a873d6e04e0117af043f14a1fd4b + url: "https://pub.dev" + source: hosted + version: "6.8.0" + logging: + dependency: transitive + description: + name: logging + sha256: "623a88c9594aa774443aa3eb2d41807a48486b5613e67599fb4c41c0ad47c340" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + macros: + dependency: transitive + description: + name: macros + sha256: "12e8a9842b5a7390de7a781ec63d793527582398d16ea26c60fed58833c9ae79" + url: "https://pub.dev" + source: hosted + version: "0.1.0-main.0" + matcher: + dependency: transitive + description: + name: matcher + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb + url: "https://pub.dev" + source: hosted + version: "0.12.16+1" + meta: + dependency: "direct main" + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + mime: + dependency: transitive + description: + name: mime + sha256: "2e123074287cc9fd6c09de8336dae606d1ddb88d9ac47358826db698c176a1f2" + url: "https://pub.dev" + source: hosted + version: "1.0.5" + node_preamble: + dependency: transitive + description: + name: node_preamble + sha256: "6e7eac89047ab8a8d26cf16127b5ed26de65209847630400f9aefd7cd5c730db" + url: "https://pub.dev" + source: hosted + version: "2.0.2" + openapi_spec: + dependency: "direct dev" + description: + path: "." + ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" + resolved-ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" + url: "https://github.com/davidmigloz/openapi_spec.git" + source: git + version: "0.7.8" + package_config: + dependency: transitive + description: + name: package_config + sha256: "1c5b77ccc91e4823a5af61ee74e6b972db1ef98c2ff5a18d3161c982a55448bd" + url: "https://pub.dev" + source: hosted + version: "2.1.0" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + pool: + dependency: transitive + description: + name: pool + sha256: "20fe868b6314b322ea036ba325e6fc0711a22948856475e2c2b6306e8ab39c2a" + url: "https://pub.dev" + source: hosted + version: "1.5.1" + pub_semver: + dependency: transitive + description: + name: pub_semver + sha256: "40d3ab1bbd474c4c2328c91e3a7df8c6dd629b79ece4c4bd04bee496a224fb0c" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + pubspec_parse: + dependency: transitive + description: + name: pubspec_parse + sha256: c63b2876e58e194e4b0828fcb080ad0e06d051cb607a6be51a9e084f47cb9367 + url: "https://pub.dev" + source: hosted + version: "1.2.3" + recase: + dependency: transitive + description: + name: recase + sha256: e4eb4ec2dcdee52dcf99cb4ceabaffc631d7424ee55e56f280bc039737f89213 + url: "https://pub.dev" + source: hosted + version: "4.1.0" + shelf: + dependency: transitive + description: + name: shelf + sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + url: "https://pub.dev" + source: hosted + version: "1.4.1" + shelf_packages_handler: + dependency: transitive + description: + name: shelf_packages_handler + sha256: "89f967eca29607c933ba9571d838be31d67f53f6e4ee15147d5dc2934fee1b1e" + url: "https://pub.dev" + source: hosted + version: "3.0.2" + shelf_static: + dependency: transitive + description: + name: shelf_static + sha256: a41d3f53c4adf0f57480578c1d61d90342cd617de7fc8077b1304643c2d85c1e + url: "https://pub.dev" + source: hosted + version: "1.1.2" + shelf_web_socket: + dependency: transitive + description: + name: shelf_web_socket + sha256: "073c147238594ecd0d193f3456a5fe91c4b0abbcc68bf5cd95b36c4e194ac611" + url: "https://pub.dev" + source: hosted + version: "2.0.0" + source_gen: + dependency: transitive + description: + name: source_gen + sha256: "14658ba5f669685cd3d63701d01b31ea748310f7ab854e471962670abcf57832" + url: "https://pub.dev" + source: hosted + version: "1.5.0" + source_helper: + dependency: transitive + description: + name: source_helper + sha256: "6adebc0006c37dd63fe05bca0a929b99f06402fc95aa35bf36d67f5c06de01fd" + url: "https://pub.dev" + source: hosted + version: "1.3.4" + source_map_stack_trace: + dependency: transitive + description: + name: source_map_stack_trace + sha256: "84cf769ad83aa6bb61e0aa5a18e53aea683395f196a6f39c4c881fb90ed4f7ae" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + source_maps: + dependency: transitive + description: + name: source_maps + sha256: "708b3f6b97248e5781f493b765c3337db11c5d2c81c3094f10904bfa8004c703" + url: "https://pub.dev" + source: hosted + version: "0.10.12" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + stack_trace: + dependency: transitive + description: + name: stack_trace + sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" + url: "https://pub.dev" + source: hosted + version: "1.11.1" + stream_channel: + dependency: transitive + description: + name: stream_channel + sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 + url: "https://pub.dev" + source: hosted + version: "2.1.2" + stream_transform: + dependency: transitive + description: + name: stream_transform + sha256: "14a00e794c7c11aa145a170587321aedce29769c08d7f58b1d141da75e3b1c6f" + url: "https://pub.dev" + source: hosted + version: "2.1.0" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + test: + dependency: "direct dev" + description: + name: test + sha256: d11b55850c68c1f6c0cf00eabded4e66c4043feaf6c0d7ce4a36785137df6331 + url: "https://pub.dev" + source: hosted + version: "1.25.5" + test_api: + dependency: transitive + description: + name: test_api + sha256: "2419f20b0c8677b2d67c8ac4d1ac7372d862dc6c460cdbb052b40155408cd794" + url: "https://pub.dev" + source: hosted + version: "0.7.1" + test_core: + dependency: transitive + description: + name: test_core + sha256: "4d070a6bc36c1c4e89f20d353bfd71dc30cdf2bd0e14349090af360a029ab292" + url: "https://pub.dev" + source: hosted + version: "0.6.2" + timing: + dependency: transitive + description: + name: timing + sha256: "70a3b636575d4163c477e6de42f247a23b315ae20e86442bebe32d3cabf61c32" + url: "https://pub.dev" + source: hosted + version: "1.0.1" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" + vm_service: + dependency: transitive + description: + name: vm_service + sha256: "7475cb4dd713d57b6f7464c0e13f06da0d535d8b2067e188962a59bac2cf280b" + url: "https://pub.dev" + source: hosted + version: "14.2.2" + watcher: + dependency: transitive + description: + name: watcher + sha256: "3d2ad6751b3c16cf07c7fca317a1413b3f26530319181b37e3b9039b84fc01d8" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + web: + dependency: transitive + description: + name: web + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + url: "https://pub.dev" + source: hosted + version: "0.5.1" + web_socket: + dependency: transitive + description: + name: web_socket + sha256: "217f49b5213796cb508d6a942a5dc604ce1cb6a0a6b3d8cb3f0c314f0ecea712" + url: "https://pub.dev" + source: hosted + version: "0.1.4" + web_socket_channel: + dependency: transitive + description: + name: web_socket_channel + sha256: a2d56211ee4d35d9b344d9d4ce60f362e4f5d1aafb988302906bd732bc731276 + url: "https://pub.dev" + source: hosted + version: "3.0.0" + webkit_inspection_protocol: + dependency: transitive + description: + name: webkit_inspection_protocol + sha256: "87d3f2333bb240704cd3f1c6b5b7acd8a10e7f0bc28c28dcf14e782014f4a572" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + yaml: + dependency: transitive + description: + name: yaml + sha256: "75769501ea3489fca56601ff33454fe45507ea3bfb014161abc3b43ae25989d5" + url: "https://pub.dev" + source: hosted + version: "3.1.2" +sdks: + dart: ">=3.4.0 <4.0.0" diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml new file mode 100644 index 00000000..2fa2b082 --- /dev/null +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -0,0 +1,34 @@ +name: anthropic_sdk_dart +description: Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.com + +topics: + - ai + - nlp + - llms + - anthropic + +environment: + sdk: ">=3.0.0 <4.0.0" + +dependencies: + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 + meta: ^1.11.0 + +dev_dependencies: + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 + # openapi_spec: ^0.7.8 + openapi_spec: + git: + url: https://github.com/davidmigloz/openapi_spec.git + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + test: ^1.25.2 diff --git a/packages/anthropic_sdk_dart/test/messages_test.dart b/packages/anthropic_sdk_dart/test/messages_test.dart new file mode 100644 index 00000000..63bbb01e --- /dev/null +++ b/packages/anthropic_sdk_dart/test/messages_test.dart @@ -0,0 +1,150 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Anthropic Messages API tests', () { + late AnthropicClient client; + + setUp(() async { + client = AnthropicClient( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call messages API', () async { + const models = Models.values; + for (final model in models) { + final res = await client.createMessage( + request: CreateMessageRequest( + model: Model.model(model), + temperature: 0, + maxTokens: 1024, + system: + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces, commas or additional explanations.', + messages: const [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'List the numbers from 1 to 9 in order.', + ), + ), + ], + ), + ); + expect(res.id, isNotEmpty); + expect( + res.content.text.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + expect(res.role, MessageRole.assistant); + expect( + res.model?.replaceAll(RegExp(r'[-.]'), ''), + model.name.toLowerCase(), + ); + expect(res.stopReason, StopReason.endTurn); + expect(res.stopSequence, isNull); + expect(res.type, 'message'); + expect(res.usage?.inputTokens, greaterThan(0)); + expect(res.usage?.outputTokens, greaterThan(0)); + await Future.delayed( + const Duration(seconds: 5), + ); // To avoid rate limit + } + }); + + test('Test call messages streaming API', () async { + final stream = client.createMessageStream( + request: const CreateMessageRequest( + model: Model.model(Models.claudeInstant12), + temperature: 0, + maxTokens: 1024, + system: 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces, commas or additional explanations.', + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'List the numbers from 1 to 9 in order.', + ), + ), + ], + ), + ); + String text = ''; + await for (final res in stream) { + res.map( + messageStart: (v) { + expect(res.type, MessageStreamEventType.messageStart); + expect(v.message.id, isNotEmpty); + expect(v.message.role, MessageRole.assistant); + expect( + v.message.model?.replaceAll(RegExp(r'[-.]'), ''), + Models.claudeInstant12.name.toLowerCase(), + ); + expect(v.message.stopReason, isNull); + expect(v.message.stopSequence, isNull); + expect(v.message.usage?.inputTokens, greaterThan(0)); + expect(v.message.usage?.outputTokens, greaterThan(0)); + }, + messageDelta: (v) { + expect(res.type, MessageStreamEventType.messageDelta); + expect(v.delta.stopReason, StopReason.endTurn); + expect(v.usage.outputTokens, greaterThan(0)); + }, + messageStop: (v) { + expect(res.type, MessageStreamEventType.messageStop); + }, + contentBlockStart: (v) { + expect(res.type, MessageStreamEventType.contentBlockStart); + expect(v.index, 0); + expect(v.contentBlock.text, isEmpty); + expect(v.contentBlock.type, 'text'); + }, + contentBlockDelta: (v) { + expect(res.type, MessageStreamEventType.contentBlockDelta); + expect(v.index, greaterThanOrEqualTo(0)); + expect(v.delta.text, isNotEmpty); + expect(v.delta.type, 'text_delta'); + text += v.delta.text.replaceAll(RegExp(r'[\s\n]'), ''); + }, + contentBlockStop: (v) { + expect(res.type, MessageStreamEventType.contentBlockStop); + expect(v.index, greaterThanOrEqualTo(0)); + }, + ping: (v) { + expect(res.type, MessageStreamEventType.ping); + }, + ); + } + expect(text, contains('123456789')); + }); + + test('Test response max tokens', () async { + const request = CreateMessageRequest( + model: Model.model(Models.claudeInstant12), + maxTokens: 1, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'Tell me a joke.', + ), + ), + ], + ); + + final res = await client.createMessage(request: request); + expect(res.stopReason, StopReason.maxTokens); + }); + }); +} From 9b55b80221f7ea8c7e9823f434656d7a2cfde115 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 24 May 2024 22:48:42 +0200 Subject: [PATCH 021/251] refactor: Migrate to langchaindart.dev domain (#434) --- docs/CNAME | 2 +- docs/README.md | 2 +- docs/index.html | 13 +++++-------- .../chat_models/integrations/firebase_vertex_ai.md | 2 +- .../models/chat_models/integrations/googleai.md | 2 +- examples/browser_summarizer/pubspec.lock | 4 ++-- examples/docs_examples/README.md | 2 +- examples/docs_examples/pubspec.lock | 8 ++++---- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_backend/README.md | 2 +- examples/hello_world_backend/pubspec.lock | 4 ++-- examples/hello_world_cli/README.md | 2 +- examples/hello_world_cli/pubspec.lock | 4 ++-- examples/hello_world_flutter/README.md | 2 +- examples/hello_world_flutter/pubspec.lock | 4 ++-- packages/anthropic_sdk_dart/pubspec.yaml | 2 +- packages/chromadb/pubspec.yaml | 2 +- packages/googleai_dart/pubspec.yaml | 2 +- packages/langchain/CHANGELOG.md | 6 +++--- packages/langchain/README.md | 4 ++-- packages/langchain/pubspec.yaml | 2 +- packages/langchain_amazon/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_cohere/pubspec.yaml | 2 +- packages/langchain_community/pubspec.yaml | 2 +- packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/example/pubspec.lock | 2 +- .../vertex_ai/chat_firebase_vertex_ai.dart | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- .../google_ai/chat_google_generative_ai.dart | 2 +- packages/langchain_google/pubspec.yaml | 2 +- packages/langchain_huggingface/pubspec.yaml | 2 +- packages/langchain_microsoft/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 2 +- packages/langchain_ollama/pubspec.yaml | 2 +- packages/langchain_openai/CHANGELOG.md | 4 ++-- packages/langchain_openai/pubspec.yaml | 2 +- packages/langchain_pinecone/pubspec.yaml | 2 +- .../lib/src/vector_stores/supabase.dart | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- packages/langchain_weaviate/pubspec.yaml | 2 +- packages/langchain_wikipedia/pubspec.yaml | 2 +- packages/langchain_wolfram/pubspec.yaml | 2 +- packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/pubspec.yaml | 2 +- packages/vertex_ai/pubspec.yaml | 2 +- 48 files changed, 63 insertions(+), 66 deletions(-) diff --git a/docs/CNAME b/docs/CNAME index 17960576..6217d1b3 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -langchaindart.com \ No newline at end of file +langchaindart.dev diff --git a/docs/README.md b/docs/README.md index 8f9a3f2f..12785c34 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,7 +38,7 @@ LCEL is a declarative way to compose chains. LCEL was designed from day 1 to sup - [Overview](/expression_language/expression_language): LCEL and its benefits - [Interface](/expression_language/interface): The standard interface for LCEL objects -- [Cookbook](https://langchaindart.com/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks +- [Cookbook](https://langchaindart.dev/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks ## Modules diff --git a/docs/index.html b/docs/index.html index eab7ac39..6d4f395b 100644 --- a/docs/index.html +++ b/docs/index.html @@ -2,16 +2,13 @@ - + @@ -41,7 +38,7 @@ - + diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index 8dc05345..ef8e03d0 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -140,7 +140,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 6eca8777..87a43755 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -118,7 +118,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index cc499c81..b3733733 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -246,7 +246,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -309,7 +309,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/docs_examples/README.md b/examples/docs_examples/README.md index a2dc3095..6ec73e85 100644 --- a/examples/docs_examples/README.md +++ b/examples/docs_examples/README.md @@ -1,3 +1,3 @@ # Docs examples -Examples used in https://langchaindart.com documentation. +Examples used in https://langchaindart.dev documentation. diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 6234c279..014430f6 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -264,14 +264,14 @@ packages: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1" + version: "0.2.1+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -317,14 +317,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 37662d4c..716c7270 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -1,5 +1,5 @@ name: docs_examples -description: Examples used in langchaindart.com documentation. +description: Examples used in langchaindart.dev documentation. version: 1.0.0 publish_to: none diff --git a/examples/hello_world_backend/README.md b/examples/hello_world_backend/README.md index 4f00582c..70208b7a 100644 --- a/examples/hello_world_backend/README.md +++ b/examples/hello_world_backend/README.md @@ -7,7 +7,7 @@ It exposes a REST API that given a list of topics, generates a sonnet about them The HTTP server is implemented using [package:shelf](https://pub.dev/packages/shelf). -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ![Hello world backend](hello_world_backend.gif) diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 3ef992b7..9c8a5ba4 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -133,7 +133,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/hello_world_cli/README.md b/examples/hello_world_cli/README.md index 608daeb6..3ab0ed81 100644 --- a/examples/hello_world_cli/README.md +++ b/examples/hello_world_cli/README.md @@ -2,7 +2,7 @@ This sample app demonstrates how to call an LLM from a CLI application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ## Usage diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 42f90c1a..df156ea2 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -125,7 +125,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/README.md b/examples/hello_world_flutter/README.md index 80a111af..6b7c3871 100644 --- a/examples/hello_world_flutter/README.md +++ b/examples/hello_world_flutter/README.md @@ -2,7 +2,7 @@ This sample app demonstrates how to call an LLM from a Flutter application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ## Usage diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index a12c6037..ecb15bcc 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -154,7 +154,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -193,7 +193,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 2fa2b082..5beab57e 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index 7a218b9c..d992b91c 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/chromadb issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index ca8f0f00..7ccb5df6 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 47e5a89d..9d255b21 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -153,7 +153,7 @@ ## 0.0.13 -> Check out the [LangChain Expression Language documentation](https://langchaindart.com/#/expression_language/interface) for more details +> Check out the [LangChain Expression Language documentation](https://langchaindart.dev/#/expression_language/interface) for more details - **FEAT**: Add support for JsonOutputFunctionsParser ([#165](https://github.com/davidmigloz/langchain_dart/issues/165)). ([66c8e644](https://github.com/davidmigloz/langchain_dart/commit/66c8e64410d1dbf8b75e5734cb0cbb0e43dc0615)) - **FEAT**: Add support for StringOutputParser ([#164](https://github.com/davidmigloz/langchain_dart/issues/164)). ([ee29e99a](https://github.com/davidmigloz/langchain_dart/commit/ee29e99a410c3cc6a7ae263fea1cde283f904edf)) @@ -274,7 +274,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 @@ -322,7 +322,7 @@ https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef - Add support for LLMs - `BaseLLM` class (#14). - Add support for Chat models - `BaseChatModel` class (#10). - Add support for prompt templates - `PromptTemplate` class (#7). -- Publish LangChain.dart documentation on http://langchaindart.com. +- Publish LangChain.dart documentation on http://langchaindart.dev. ## 0.0.1-dev.1 diff --git a/packages/langchain/README.md b/packages/langchain/README.md index bef19382..561f7d7d 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -156,9 +156,9 @@ print(res); ## Documentation -- [LangChain.dart documentation](https://langchaindart.com) +- [LangChain.dart documentation](https://langchaindart.dev) - [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) -- [LangChain.dart blog](https://blog.langchaindart.com) +- [LangChain.dart blog](https://blog.langchaindart.dev) - [Project board](https://github.com/users/davidmigloz/projects/2/views/1) ## Community diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 4326a8fb..1483d1f5 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_amazon/pubspec.yaml b/packages/langchain_amazon/pubspec.yaml index 41af11b0..abbcb58c 100644 --- a/packages/langchain_amazon/pubspec.yaml +++ b/packages/langchain_amazon/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_amazon issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_amazon homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index de768b22..6ed5624f 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 197d0776..84e24303 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_cohere/pubspec.yaml b/packages/langchain_cohere/pubspec.yaml index bcb53a98..8ace6cf2 100644 --- a/packages/langchain_cohere/pubspec.yaml +++ b/packages/langchain_cohere/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_cohere issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_cohere homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 29fbdb15..8cacd96c 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 65650ce8..d6f04b41 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 0dd384a2..3b051b3a 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -252,7 +252,7 @@ packages: path: ".." relative: true source: path - version: "0.1.0" + version: "0.1.0+1" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index f8c3870d..1a3863b4 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -111,7 +111,7 @@ import 'types.dart'; /// /// [ChatFirebaseVertexAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 7397caf0..413e85c3 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 02fde0bb..30a27cdd 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -118,7 +118,7 @@ import 'types.dart'; /// /// [ChatGoogleGenerativeAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 7b441954..67a75cff 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.5.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 576a8f6f..7c1f00d4 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_huggingface issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_huggingface homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_microsoft/pubspec.yaml b/packages/langchain_microsoft/pubspec.yaml index 685287b7..3bd05e6a 100644 --- a/packages/langchain_microsoft/pubspec.yaml +++ b/packages/langchain_microsoft/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_microsoft issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_microsoft homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 90b027e9..964397d3 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index adfa39d4..aea5e9ee 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index d1a0368a..ae115e6d 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -77,7 +77,7 @@ ## 0.3.2 - - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.com/#/modules/model_io/models/chat_models/integrations/open_router)) + - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/open_router)) - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e)) - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f)) - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f)) @@ -257,7 +257,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index ca76313e..efab060a 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.6.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index c64dda9f..479b441e 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart index f6d1e11e..0c777f01 100644 --- a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart +++ b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart @@ -54,7 +54,7 @@ import 'package:supabase/supabase.dart'; /// ``` /// /// See documentation for more details: -/// - [LangChain.dart Supabase docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/supabase) +/// - [LangChain.dart Supabase docs](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) /// - [Supabase Vector docs](https://supabase.com/docs/guides/ai) /// {@endtemplate} class Supabase extends VectorStore { diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 5450773b..91340307 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_weaviate/pubspec.yaml b/packages/langchain_weaviate/pubspec.yaml index f5f5de33..fb6e6ce4 100644 --- a/packages/langchain_weaviate/pubspec.yaml +++ b/packages/langchain_weaviate/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_weaviate issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_weaviate homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_wikipedia/pubspec.yaml b/packages/langchain_wikipedia/pubspec.yaml index e1377267..d8f713b5 100644 --- a/packages/langchain_wikipedia/pubspec.yaml +++ b/packages/langchain_wikipedia/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wikipedia issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wikipedia homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_wolfram/pubspec.yaml b/packages/langchain_wolfram/pubspec.yaml index b64e02a0..950db4e1 100644 --- a/packages/langchain_wolfram/pubspec.yaml +++ b/packages/langchain_wolfram/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wolfram issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wolfram homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index a7aa8347..d4af6eaa 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index e2eee5ca..c9dd9706 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 30fee90f..fe366c39 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.3.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 1edc8121..703fb145 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai From 8ccbe38fead747e050c268afaf14efdeb5bb462f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:04:35 +0200 Subject: [PATCH 022/251] fix: Fix deserialization of sealed classes (#435) --- melos.yaml | 2 +- packages/chromadb/pubspec.yaml | 2 +- packages/googleai_dart/pubspec.yaml | 2 +- packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/pubspec.yaml | 2 +- .../lib/src/generated/schema/chat_completion_message.dart | 8 +++++--- .../src/generated/schema/create_completion_request.dart | 2 +- .../src/generated/schema/create_embedding_request.dart | 2 +- .../lib/src/generated/schema/create_message_request.dart | 6 ++++-- packages/openai_dart/pubspec.yaml | 2 +- 10 files changed, 17 insertions(+), 13 deletions(-) diff --git a/melos.yaml b/melos.yaml index 51805716..d4792fff 100644 --- a/melos.yaml +++ b/melos.yaml @@ -62,7 +62,7 @@ command: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 scripts: diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index d992b91c..40252b6b 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -27,5 +27,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 7ccb5df6..2ed4d004 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -31,5 +31,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index d4af6eaa..27b81ed4 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index c9dd9706..ab538c0d 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index ae4d6e9c..65e9b1d8 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -158,9 +158,11 @@ class _ChatCompletionUserMessageContentConverter @override ChatCompletionUserMessageContent fromJson(Object? data) { - if (data is List && - data.every((item) => item is ChatCompletionMessageContentPart)) { - return ChatCompletionMessageContentParts(data.cast()); + if (data is List && data.every((item) => item is Map)) { + return ChatCompletionMessageContentParts(data + .map((i) => ChatCompletionMessageContentPart.fromJson( + i as Map)) + .toList(growable: false)); } if (data is String) { return ChatCompletionUserMessageContentString(data); diff --git a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart index 31bb714a..ff66b86c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart @@ -347,7 +347,7 @@ class _CompletionPromptConverter @override CompletionPrompt fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return CompletionPromptListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart index fec9f621..10c24925 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart @@ -179,7 +179,7 @@ class _EmbeddingInputConverter @override EmbeddingInput fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return EmbeddingInputListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart index bad29bc1..7837049f 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart @@ -88,8 +88,10 @@ class _CreateMessageRequestContentConverter @override CreateMessageRequestContent fromJson(Object? data) { - if (data is List && data.every((item) => item is MessageContent)) { - return CreateMessageRequestContentListMessageContent(data.cast()); + if (data is List && data.every((item) => item is Map)) { + return CreateMessageRequestContentListMessageContent(data + .map((i) => MessageContent.fromJson(i as Map)) + .toList(growable: false)); } if (data is String) { return CreateMessageRequestContentString(data); diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index fe366c39..e34a047d 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 From 8ac0e94c85ebdd029f83d04592256141e6a664c0 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:32:51 +0200 Subject: [PATCH 023/251] fix: Make vector store name optional in openai_dart (#436) --- packages/openai_dart/lib/openai_dart.dart | 2 +- .../schema/create_vector_store_request.dart | 10 +-- .../src/generated/schema/schema.freezed.dart | 67 ++++++++++--------- .../lib/src/generated/schema/schema.g.dart | 4 +- packages/openai_dart/oas/openapi_curated.yaml | 8 +-- packages/openai_dart/pubspec.yaml | 2 +- 6 files changed, 47 insertions(+), 46 deletions(-) diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart index 87830981..7600ced2 100644 --- a/packages/openai_dart/lib/openai_dart.dart +++ b/packages/openai_dart/lib/openai_dart.dart @@ -1,4 +1,4 @@ -/// Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +/// Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. library; export 'src/client.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index cce0ccd3..bb9e83d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -15,12 +15,12 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Factory constructor for CreateVectorStoreRequest const factory CreateVectorStoreRequest({ + /// The name of the vector store. + @JsonKey(includeIfNull: false) String? name, + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - /// The name of the vector store. - required String name, - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, @@ -35,8 +35,8 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// List of all property names of schema static const List propertyNames = [ - 'file_ids', 'name', + 'file_ids', 'expires_after', 'metadata' ]; @@ -49,8 +49,8 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Map representation of object (not serialized) Map toMap() { return { - 'file_ids': fileIds, 'name': name, + 'file_ids': fileIds, 'expires_after': expiresAfter, 'metadata': metadata, }; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index abfb4fc9..1395bc5a 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -44925,13 +44925,14 @@ CreateVectorStoreRequest _$CreateVectorStoreRequestFromJson( /// @nodoc mixin _$CreateVectorStoreRequest { + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds => throw _privateConstructorUsedError; - /// The name of the vector store. - String get name => throw _privateConstructorUsedError; - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter => @@ -44954,8 +44955,8 @@ abstract class $CreateVectorStoreRequestCopyWith<$Res> { _$CreateVectorStoreRequestCopyWithImpl<$Res, CreateVectorStoreRequest>; @useResult $Res call( - {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - String name, + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, @JsonKey(includeIfNull: false) dynamic metadata}); @@ -44977,20 +44978,20 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ + Object? name = freezed, Object? fileIds = freezed, - Object? name = null, Object? expiresAfter = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, fileIds: freezed == fileIds ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, expiresAfter: freezed == expiresAfter ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable @@ -45026,8 +45027,8 @@ abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - String name, + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, @JsonKey(includeIfNull: false) dynamic metadata}); @@ -45049,20 +45050,20 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> @pragma('vm:prefer-inline') @override $Res call({ + Object? name = freezed, Object? fileIds = freezed, - Object? name = null, Object? expiresAfter = freezed, Object? metadata = freezed, }) { return _then(_$CreateVectorStoreRequestImpl( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, fileIds: freezed == fileIds ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, expiresAfter: freezed == expiresAfter ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable @@ -45079,9 +45080,9 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { const _$CreateVectorStoreRequestImpl( - {@JsonKey(name: 'file_ids', includeIfNull: false) + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, - required this.name, @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, @@ -45090,6 +45091,11 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { factory _$CreateVectorStoreRequestImpl.fromJson(Map json) => _$$CreateVectorStoreRequestImplFromJson(json); + /// The name of the vector store. + @override + @JsonKey(includeIfNull: false) + final String? name; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. final List? _fileIds; @@ -45104,10 +45110,6 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { return EqualUnmodifiableListView(value); } - /// The name of the vector store. - @override - final String name; - /// The expiration policy for a vector store. @override @JsonKey(name: 'expires_after', includeIfNull: false) @@ -45120,7 +45122,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @override String toString() { - return 'CreateVectorStoreRequest(fileIds: $fileIds, name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, metadata: $metadata)'; } @override @@ -45128,8 +45130,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { return identical(this, other) || (other.runtimeType == runtimeType && other is _$CreateVectorStoreRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds) && (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && (identical(other.expiresAfter, expiresAfter) || other.expiresAfter == expiresAfter) && const DeepCollectionEquality().equals(other.metadata, metadata)); @@ -45139,8 +45141,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @override int get hashCode => Object.hash( runtimeType, - const DeepCollectionEquality().hash(_fileIds), name, + const DeepCollectionEquality().hash(_fileIds), expiresAfter, const DeepCollectionEquality().hash(metadata)); @@ -45161,9 +45163,9 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { const factory _CreateVectorStoreRequest( - {@JsonKey(name: 'file_ids', includeIfNull: false) + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, - required final String name, @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter, @JsonKey(includeIfNull: false) final dynamic metadata}) = @@ -45175,15 +45177,16 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { @override + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name; + @override + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; @override - /// The name of the vector store. - String get name; - @override - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index eeb6a84e..4062dc95 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -4373,10 +4373,10 @@ Map _$$VectorStoreObjectFileCountsImplToJson( _$CreateVectorStoreRequestImpl _$$CreateVectorStoreRequestImplFromJson( Map json) => _$CreateVectorStoreRequestImpl( + name: json['name'] as String?, fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), - name: json['name'] as String, expiresAfter: json['expires_after'] == null ? null : VectorStoreExpirationAfter.fromJson( @@ -4394,8 +4394,8 @@ Map _$$CreateVectorStoreRequestImplToJson( } } + writeNotNull('name', instance.name); writeNotNull('file_ids', instance.fileIds); - val['name'] = instance.name; writeNotNull('expires_after', instance.expiresAfter?.toJson()); writeNotNull('metadata', instance.metadata); return val; diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 46201dd4..b1a945bc 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -5669,23 +5669,21 @@ components: description: Request object for the Create assistant file endpoint. additionalProperties: false properties: + name: + description: The name of the vector store. + type: string file_ids: description: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. type: array maxItems: 500 items: type: string - name: - description: The name of the vector store. - type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" metadata: description: *metadata_description type: object nullable: true - required: - - name UpdateVectorStoreRequest: type: object description: Request object for the Update vector store endpoint. diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index e34a047d..ee8442e2 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: openai_dart -description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. version: 0.3.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart From 50e69e01574a975f830bfe968998151f2018bc56 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:55:04 +0200 Subject: [PATCH 024/251] docs: Document tool calling with OpenRouter (#437) --- .../chat_models/integrations/open_router.md | 60 ++++++++++++++++ .../chat_models/integrations/open_router.dart | 54 +++++++++++++++ .../test/chat_models/open_router_test.dart | 69 +++++++++++++++++++ 3 files changed, 183 insertions(+) diff --git a/docs/modules/model_io/models/chat_models/integrations/open_router.md b/docs/modules/model_io/models/chat_models/integrations/open_router.md index e747ca5f..c2d63555 100644 --- a/docs/modules/model_io/models/chat_models/integrations/open_router.md +++ b/docs/modules/model_io/models/chat_models/integrations/open_router.md @@ -95,3 +95,63 @@ await stream.forEach(print); // 123 // 456789 ``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart index 439943c5..f552e60b 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart @@ -7,6 +7,7 @@ import 'package:langchain_openai/langchain_openai.dart'; void main(final List arguments) async { await _openRouter(); await _openRouterStreaming(); + await _openRouterStreamingTools(); } Future _openRouter() async { @@ -66,3 +67,56 @@ Future _openRouterStreaming() async { // 123 // 456789 } + +Future _openRouterStreamingTools() async { + final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final outputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(outputParser); + + final stream = chain.stream({'foo': 'bears'}); + await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); + } + // {} + // {setup: } + // {setup: Why don't} + // {setup: Why don't bears} + // {setup: Why don't bears like fast food} + // {setup: Why don't bears like fast food?, punchline: } + // {setup: Why don't bears like fast food?, punchline: Because} + // {setup: Why don't bears like fast food?, punchline: Because they can't} + // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +} diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index 396f8ac4..4587b56b 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -1,10 +1,12 @@ @TestOn('vm') library; // Uses dart:io +import 'dart:convert'; import 'dart:io'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; import 'package:test/test.dart'; @@ -104,5 +106,72 @@ void main() { expect(numTokens, 13, reason: model); } }); + + test('Test tool calling', + timeout: const Timeout(Duration(minutes: 1)), () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); }); } From 23d63cea20a4738dc601829e4876329eb3b1ecb9 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 28 May 2024 23:24:19 +0200 Subject: [PATCH 025/251] feat: Add support for ObjectBoxVectorStore (#438) --- docs/_sidebar.md | 1 + .../integrations/img/objectbox.png | Bin 0 -> 51968 bytes .../vector_stores/integrations/memory.md | 8 +- .../vector_stores/integrations/objectbox.md | 258 ++++++++++++++++++ examples/browser_summarizer/pubspec.lock | 16 ++ .../vector_stores/integrations/objectbox.dart | 108 ++++++++ examples/docs_examples/pubspec.lock | 24 ++ melos.yaml | 2 + .../lib/src/vector_stores/memory.dart | 4 +- .../lib/langchain_community.dart | 1 + .../objectbox/base_objectbox.dart | 120 ++++++++ .../objectbox/objectbox-model.json | 56 ++++ .../vector_stores/objectbox/objectbox.dart | 196 +++++++++++++ .../vector_stores/objectbox/objectbox.g.dart | 193 +++++++++++++ .../src/vector_stores/objectbox/types.dart | 29 ++ .../lib/src/vector_stores/vector_stores.dart | 4 + packages/langchain_community/pubspec.yaml | 8 + .../pubspec_overrides.yaml | 6 +- .../objectbox/objectbox_test.dart | 159 +++++++++++ 19 files changed, 1187 insertions(+), 6 deletions(-) create mode 100644 docs/modules/retrieval/vector_stores/integrations/img/objectbox.png create mode 100644 docs/modules/retrieval/vector_stores/integrations/objectbox.md create mode 100644 examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/types.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/vector_stores.dart create mode 100644 packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 532c82b8..6ce757ba 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -94,6 +94,7 @@ - [Vector stores](/modules/retrieval/vector_stores/vector_stores.md) - Integrations - [Memory](/modules/retrieval/vector_stores/integrations/memory.md) + - [ObjectBox](/modules/retrieval/vector_stores/integrations/objectbox.md) - [Chroma](/modules/retrieval/vector_stores/integrations/chroma.md) - [Pinecone](/modules/retrieval/vector_stores/integrations/pinecone.md) - [Supabase](/modules/retrieval/vector_stores/integrations/supabase.md) diff --git a/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png b/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png new file mode 100644 index 0000000000000000000000000000000000000000..6d88c06fd27d87ec228ae1bdf5a1b803b0af140d GIT binary patch literal 51968 zcmbSxWm_Cg(>4SW0>Oeykl-#sg9LZC#ogV4ySux)yDaYRu8Vt+#aSF)uKW21?;P`K zrl+dTs)zW&E3QE^XuEkoA>?Wvz4vg$))v?+2x_3$+rIS`sVJu zs^+x(%J8(};Dl_C*bL|JB%Jlnsr&l*wYirV0mQEm$@}A1% zut-KVOEzH*wz}G)^|fj6S=)TuM&Cq2Y26M51x`XvatN3qNdgD>BcNB?mRfi1+8#5)K_n9Nw&0zONu?dzTP}N zQ7W0McXfJAjyVhuT|7SW0fBAfGt^a;xFsd{rKJlO;PTCN#+vGc#f9a|3&Hw&$I%gK zNd*Iaz2U~9qp8-AFi#~#h1J93h3@K`rS8jxE-MRDKNlD9YmgOjic#+m6MB+Ux04vhHmApmt5dnQono#CUpv`7#z3Gt76u&9oZY~U+fhf01vT6*AuOQm3Vqfq z01br-1lqspM2dMlY(Fs`8_Nz1i8!m$1K%}liD;9c6oL@SVf z&;SfnPmWqv*??l?j)S0%@K-}RiFzs;HxUeD^>`*xakOt1fk+`#-OF8JUXpC54^IYvYhbSosHACN{yu4!o`n7kr_|2 z;Ef3uiX6}yIa-;aHV>yyRjQ?90-sy`$vF)kjQJaIRj1zUC9U#Dc4zMoJ~Sk9k5V@K zlivMr>(%cnE`=}(NJw>L<-oDD+5H@uX@Ez1D!mTw&=jh=t|o zaDMB!LAIb;{HtM5k)o_4!pf`G(UfkBE4=TsK-a%K>cH8w zfMMU+-Dok}$3m1-sm)@4CeH)8v6cYJPYtL31 zb-V2yp?c08_FtS!vi^QCh~r6SUFtwEsR!)MrfC8*{mK2*C09e)>I%{S%J$YTSvs3M z8C3lxy=eaYecU2-OauNB&Sop#{is^teE4P9#q5Lr&8Sdk3@A;%!3;!IG+*JLiL%q8 zDNk9sG`bd9<|nF*LRA`cBFYg&1q*J>y3`oGmaG?z4H3})qHc4HOzct0<#=7+QoBS2 zHU{uQ&7C7Gkn>C|j}=@Q8AcAzOClrH33`O7*vr!p?g|SF7(5G&(B)%Ef z`O?RFyzQ>4$|V_7!kfdj;}vE#J_~-1OzbxuPC#Pk%^kzB?`=$apu`GcG?6`aa$*_7 z7uEe2gO?s?Je2cjj!>%6|Z>v zVEaIXcm+YaEc^Jwgn-U~Ag3PXTj<0PX}#O$XU{tITbl+32`{c=k{;|rxre;7Vr;o& z>#gM1k&0#8P!)%6ulsY}4@tkp+gZrOg?4(kk zJGtT>tQICg<#4vxj=${=9;r6&`dav2N#Tj`_puqz#x%Kq(oe_+x4-XQ{PBz8Wmx50#g*M|CwIR%-f+ormA3L}^U^yDG zcWuQF$#LH)NyU68`qpsHSY4^xcF)s1qt@Kbe(+>zdg0;Ne6=L>EE&F!Al&>%phKrh zq|aa8cEJBzBV!}~GjOtQtF^7``e8ZlbXj!kodF=U>9J7q-gTqvY2bEHhVAWLb(^4D zKzr}{`#xy@S!c`m@kFSZ7c$NVwtu_s(%mf%an?x=v%>L49%P_9LtQB~3zb1RPSy|Q z5u^KzrV@c&6dBaX0~v!YVKvUy<{141&Jp^)(?&0iWwtdx>5{44di$^}n_$4_HTT+X zDTRIBx;RY`YxfpWF`km`ett_0nJ3%O?da%ozHb%L2qryxw@o>S;c$)s4hu<%F z!Kgq4JG2>n0jfz+qsxvA7=C9_x4?!eG;1q8)7h}pYQ7|W*2WUeiM73{wU?-cFlDnQsOsAzf=k^ zCLR4L3pXiAY?S@t!c3dxDaMhHOkdB>T_MtnZMamkBUZAk#pn9iE+tjrA{%Fa?p>Lk zhBc@)Kk|AuH+E9sNYt{$7d5Br^%_Yu+f@S0KoJFN%akA$W%-L2YdT~u)$R#O)=TOQ zCnc9JReiBGa?EJLuO8yxkPeY1IJxdwGz>SH7uD_Ce5fttCf~b>hD^+`o|f1H7!>U0 z;Z^}mGUQa!LyjvWX98`xR1|MwxXAtnj$^u7yu4mR16wsi_)+0$EXXt3wZ9e>E5EZe zBW_yycm~m~HuWCkguuPrQGt_M<`M6D-KsV~TpXpT-%-Udw zwm>6Efc(sIFr-2ukutQ#mKD&}1xLzc%&F2g--N`GV2Ol0x`aIhbl7{VNI;|q{B>e` zdDGk=IC$T9PmM;DY7%VUKk2y@6;LYyyyHTcLk{bce>s>0oUuIc<7=fZV-;cKoI83h zNIevefBWV;OT?>l<0K;XWl;$~D|AnIGVKaCKPcsOI*=p+ zoO+GZns(n6oR$!II$1wnYsbK^BQ@)k5op znXsdWyi@kRzOsDCORxp0mRt~IGGR7*O+l$8B|+Gq8Z`FRG0NHDZHF=?=K*vFzglvk z)a_z_FabU~@2b5Wa8v`k^4|3+r2x)1UIWejAj9vPjmzk{am=6usPfacrcQHX_WUM8 zvGOH>#^Hz`sxwaXE&R{aGd5{l#LWpy%w{eI#pV{wSvx*`@p!qrgFUX^zx?L8`}1&T zzmI$Tur@*GFTn3FM6>7TK#Jyowou<;Rp9Q%EXuIzoc{B(-wDp7Uc3d}*n9yn&?@?D zZK}%PeQ*3S9$#JK?MoTL+uHW`(HwELrosE^4DQSlozr*HcA#MF;^MmoVpApiV#^HnD{GSO0 zXuk@7cL0dz^K)r_t}Y|xlQ%QHj_R>o%WNVn zL*b=W=Rp_F1(P$yAl{1K#=*B%%|M(9;fl>tkP-)FjHt@B37SI>tP}6?bm%Ls635+G zo#a1(=rn?O8*lbZnHY-L+WO8vUxkc`MA(I7p{YwjG@udJNYi>=@?V2P80mH>C70ul z=Odp_o}T7dmdVGx|DhM_(p9a$liSFOOc7?px*38G4QDrF1ZCo%?S(zIit2KIet{G9 zDO{|Q*DQh&_QbTgc{G0YUov&_MBDhgA2>Tl>?c~>->1laWO4F{Jl@;2H+E%3Y? z0YwcPV$X!37sM$o?LB=~1nwJ+LS1W-J*H?s^NCTIrSxl*7(zy2=hK$Rnu*M8C|4Jv z1PF&8SU1Uv>IxDuQ0ldlXH2vtM2F#VNlZ98Olv0{MT+JXYUN@lWGP3bvScYI*Z~ka zOO<{{8rB&#T5SwY<*rX*jXcd}d_E>#$Kz_3oG?#sd#AK1Ti!>jr|HpYSb{og-FEmK zCGHPZduYFGH@K^euC;@lE2`$jxzANb%%VZR*5b^Vwq^`REkv3F1Q%yXM@fX*Gpl~b zMMcYyzCMbyr`nVzDvDVn(lLsmsSsGL#rgYcosp^a*U_$UCB}YdBUmq-^uI}%kv&2F zmp)kSPg8ou8KkzEp?9*aJX~kZ=T)RZ+-@c&Wx*Y2WQFzo>foJlaO%e;srt3aYMRt# zV1)s|dsMaoil_XK{qtGo2*Ojvx#-u`2=>bv8f*X1*CY2@GN?n+@Sc!O>}Y6M)EkCt z6r%K%?*Wlgo>bM`)%(WrKiEc1Rm2E1eo!Y5C8MYJ>W^2}!Mh8tVRcN(oA#$FHEv!0 zLm$WP^6FG#PVk&KH~_kAdAtXpcwZ&tO#6GkFU-ve-MFQ;U+z5pY0TmEg>XMo18M}@ z<)dL1tubdAFr;WyoOmg&?y?Qn+N{IFb;$D8Q!Py_>3Sl}qI;KmDKcGkTA2xw zEETdLW>Mb^A!iitzUktc4^;+(?<6eYGOvvs?CRWK*XVdKbQ?TDjjQ+fku#Ou3f z9&e11bCR^E0GIyMx|xC%+L4z{}MI|8`U?geJ zj948x`A1W22;>vY&0(YL+&`u#_sHemFx#Y4HdZz>cQaeWLMudi!iiryNMY{3*Ygp< z&`pmT^*O6X^AA55c;Z{7BcfzJ_>Pe&UaecD@DDRXJ#%X$Ilxtlkt(;h3S+^)??--C zX1mWppQK%c1+ zznHin_3lhOu&Qy=pYBp3aD%^v%Yu!%VnXS`)IW*BxPLXfbZ+c#@IN^xe%BnBZ5 zBO{ooTCK_3lJV-Xy~O5}zm)17GT&EC(DeP#;~8dt#<8du*OG-6BenrriAba|mFnqQEO+(wacqa0B$HzKt z018FX%_F!w4`IQCz*8CIun=K9XUu&(#qd)AH-X6Er+S5Cgx08n8HT_(TYJ8g7#Lyl zod>yOoZNB5rY{cml;Ry`K2*}PIyTVg))yXu06Pqz(^(~Xly3+mtF|Zig5xPjkTX~h zS)(N|VNmCLr;KK(b9wm$?k^yExUcFW;lGle)sYz81Hq07b|qT9hHR+v*q7jwT;)mC zNG0(gNH|dj9zXqtn224o$fk1KJRrX4c?1^x_+|U4G|l! zI4!G>LhTn*5vLUmS8ul=y4pH#N8N9~79HQcpK}_%H@}}9C*85c*gH}>9E~Yh`P}p- zWCsx^ z*GqpI@pDXfyXUb79WKjayN|~+U-$c}bkA$+__(RDWX-)UHqb#R+d@Kqe zAt3VZ%1AeNqXb$w;7r0;O^5s<&nx{}wzyQsa==2V5e1Ul+8M@L9Vwa>8o)V86y!{q zPeqQMYaosA16na_mMU7?BHHdT8iuBtBDi9)r3(ct14w1C^G`i3Z4v$bku&m50`RO^ z&!|E09J34{aCy!ex)AqG80`Jb=I>8n1GaPO<2*MYLB-tB^><0qJE~F@ zOCnHKOUtv`)A!6=*@df_XoiI&B=FINdfVmVs`!xS`@~$(#mgNd+#uTd^z@fCpYFEX z2Wz9ZsXMPr)U_?o?QdPvCwBcJf}#Q@?rF%uf9CbV4w;%lt(*=_ej>;k%f}{?<~sva zLi4Hk4X{VzY82>we9#|N4{(iX|8haIMl@i+Hjg?&`yC}fFu;XYHdYJm6P<|HKvh)d zSq4NWd2W4k+CH>j+Jn$+l-saA&9vyj&L?dcls{xgVx>R*2wG)jhbBp@nAv0v&qkc9 z*{*E~arS}lp{F)1y2-TK9V5lZ`>c-*&ZCmQ7?x_9L4wT3uB1wjXwdyc|ID$LN!ys< zXque!!XEGS<@Q=hO0MHMMd)d{rmpX`q@fqmKx^qTrM=FuemzsPj1vFaHC?M^<_BT@ zoIwykqIJbkPS-S;6$&#i?Gl1avau`v^tBNmAH$bR+I1)Ont3qB0dF}X>38+0h0+gG z7g5&!tIC<8l%GE=_&_j<;ao)qJsyv4P++=X6EUwad$`$ix%OO?)MoSf3HMe=R^IWa zC^=fA8Q(F8SU7JkfeFi=wpNg{joc%05XO!93mc8*@3@JGd@~H8WZ#VzlRiJ$>@|`0 zl~16N?S)(enjqASZgO|@Yb<^EK0N2IH-XL$r%L0NTj^0&6U|G>s(er5-`y|LP~KiA ztOme*t+vN=+Fg$xN$y8`HPXFrOGHF}-pg`&KY^3-nXz*8!wg0#Zw;ZMKE>tpYkCPF zb118zf44mEk&B$&2}FG6fl?g9q)132IgAnT$X&_LMSir8^E?VKNrrJ&*%l-x$ei@2 z()?-dBNz<@PbQfuQ zspXu#)DJgv4}&zT4i7p|RUVS8CYix6W(GUqiX{J>5yf^3<^W9ootkYz5bmqC2nOOL zz;e%}T|*tUGPjl7BZq$#HDQD&?^UYew1VY(J?ZL|_IdZ4L*d)g8JsQO%?++wT3gqz z5UcwwhoimmnvUzC)0&R@E=Y)`?ZYkg?Pm~JoYp_wT-!d-H9Soc)~{LLZt3gqb;XHb zWQ!#qkSk7A&e;@b$R7s+6(Z~Q@-g~zcuqt$Z3 zRnw9KH77;)GpBtfg+{SrxMCB6SUi+$KVk$qlv6``B~wgaW?f!D^rk3*c+lh3XpE;< z$WG2#x@1K(wL3f+wwbaL*F%h`JSP^o4*=8LTJ~3Qf!mB&*`D7vwclCUdf)Yj+^6zp z^0U0GZ~3FjVqtrP8b9+;3vxvhcc{wU#r}+$!`Xei32=5eFFI)0%88mwwbfGdM$pYA z9EP}T4mqxw3QlXaJzTzI-AH+IY&{)`oxk<>yS!a9^X>fU#4>~90?1($Cc`K%$5qmY zVUmgJ&sk{}5?$dTLwyTG$=#rm`F(D>7)knw)7ZcflbcQ=HNjeW3?0k;xljEV?i^XL-!oI0t_0|-R?`{08I|Hp-G zewTIj9WEmtcma2B0Pm3xaCbp^Tt)YnI zVPhSB0lySjo^JK0dSTsyztL9yyMk@%{%GxVXCpPe->#0+qCB;JG)wt_f~{xSIP9Mb zn5>#Zf<`OENNkXRv+~y#Vi|*etFk7Q#G4jZitP9d=f(Z@O^p$9%%SuFTSHefgaczc zR_uE~x*wBw-yYucv^^nLONi6e*Fog~kho1yIvl({Y)F51FiWS&gZ&#=d)2)fhoDjIEDg^C=T<|HP~(ROOe`l#HZHw^Hm*XJUm zFHsW%2>OjM3*U)d> z@2wI)b1*PuNjn=oi(PnLM&LfW9=AqF-P5gf@jsi=^itj!=_(~Z1_;64)tNH=;QQ!> z@i0*S$d;Z#QYZG1*}RE!;<&P*s0p*)3T)jk{R~SjXo$O09wm$P71qhsoG48rZHn6l z``_p8_vkXEGxYU=q<7OJ2D=|(`ApF%&1q@Dv(e-B?Fy4T|0HNO?32^nIN;Pw4ha;* z0b`SP{eI}uxk>Kv-CLjOUcQ|?KC`i!cNez9_y^-gFn&?|QwLs66zj+1@HkbIfHDv1 z)_MAOby7>x-F0)lw>S}7sBP&ufe%l@SX$ues`^JEw&W(c4J!(=z`_s8!=TUUe!5<} zco|D{eHxA-?1X6!Bm;xULnf>zB5gLQM$lj@e&>v!8ac2TW|5n0udMAv=ggg{us$x_4A>2w{iJSZ79M&aDbB|ydE(qvAgp)t`;~TR&D;F z8VbRDxs3#(r6rG=(R|iHH~yyM93sBPLEyNJ08>cd%MSuZ1hWyLV+@o;{Zk+wSK^uI z>OP38&C}$x6$3$w7j{nCV;lMhdKexG6OXG*JkmGqPZpBv?Cf_KE(TKsmjU2y~v*iaTaB@(LjKf3T>cxaWp_w&?Cs6-M z)#lTc{ECLw|1wlvxln#l zpUzG9+3kc6x^Ty*sH|*Um1#AXI5m_+mqGuBLpAszDwX)B^M*905nFg`0FW%{3L*}t z^eNVvA9U)TZ>TKVE8Eh=SkbVng!@ArxrJ0U72!Xr-Vz)opOILSoyaO0yHm6wW-F`{ zI>I8K3khN&ZGT*ND8$7%&ym!Do>OukhThagR6?N4;R-uRxkL}ShVbY|%E86;A)16p z91r~H6>R^3ux&G+9R3X7USD)Pg{<~TOrY0e;N!hYr( zlku3x6$nPm56%rP`vTzS(|-fZGXg^@QS$sOkT z{vr@|L_nqYHl}dgv}RoT?xEh2%MyN*LYvqR0I&2nmP z*hY?!Xrg}_R+iPf@J3G9<@KnG%luq+JSpk8ofY$ELNcw^^q%q6-L#D0T1LvQaMaKM zUhR{1WZjIw@*)G&K#Y+ljBmdT?T)#NUuMC#YpCC;FhPDSRH$O7(#h%mv4|8D9G@fD z|KZ3zXEE4Y=?q?3m11GK}+io=!EZ7tq zbDTuD@!wK~Glc%0w7KtBei8)d<}M0B+XLvs*D*+$X7CjjIKG;%!4enHI{^A^QVuj` zBKF0PpeYxQle33#!Hon#UOSoE>Twmn=Pc&w|Mo3R=~?6$FM|j9az@6SO2zmHr@yB| zJz4>#3Kt`aB)!~ zyC2B(Gj4sKj1-}<3=~4WlYS5jNuiCisz>Ck9f?!V!=H$Ma1Dnpu*RmvauyF z0a;Q(rY$~?SB7cupmz@^9^LGY2L%8P|67ggT6I=|v#inqXC;S1ZQFObnX-cJLg(pa zwpO0p$sQv8ZVjkh-q z-@OIH=Z|=$(JpVgpBzji;=9^WlJgx((^gSQj5kj^QQ^JSgTe?jg^}4n0>)9VI>^!w zTtL!tXDwPy0t5kYlS zQ6+U!C^pR|Rwpv(YmIe_V6RX7(RLhV8~#*`$cY5Q6gQt>7PTDEId}}#IxTI zO0!B5Q27J*Z2;V+r8xY19g_=FBWS4hty3Y{C;xc z#m&$walb6~!l-{lNFj65Tz7)B%ty_x9aS4yyH!z+7_Fd|V3Y7gc{EB8 z#vW&kO$tOODeo*)6b5x3bbjbxtYts$U}je&2@loa;|IqS#uN6>xK-O(ii`?h8Qj?g zA6V+(H|*T{J;-ZfA^S59Qw{m9B{qqVtNUxzX{NTWuKKs<#bjTJ`=wH^Wuox+snAuq z_12fM#j6*$BT&)A`ly2Eu0rj~K0Tw1CJuBDTc(|`k67y|hlI&xfve$JpRs>ii zXoR{2<%^l`W?%k7>q7ki!w;Z|nrurC(Zlm^ilc7u$nXmIBNM>H8-FJbE0c{?&)3u& zSa@sb_tjRK0h>7;MAhhf;pm8KW=QIFe3rYrl#5p z3eIm>_O!<0C|gaxCXF9(&2RZ>_MFxZ>nXR9|Kh8Bpwd7-COENwHJsd9x-cth5JN1{ z0U`ZK4Vqph%j?RltzZE6i_BH)9N>O5q!Sy*n6@!dXD& z+6QbNt6b>K7$VtM3p?t5u zxb)Zq{}dAGdux6P8RTvn_(+DtmB}IKwl&YM#LN=sw1>ez?%LImRM`)@iWA9Nyezs) z9xnDMS$gf$1Ao6|YkAqe1h^i}Y1VXSBI)RIHQF&Meq?xi`F>s+O&o+=Eh_FiVlSHl9C8*asiP2I8>w;`4 zOs|68SOq-CbxZik=(|6vXUIT+&;`${rY-xf{47GrwiG#^YQtWG3s=3j*HXe6C1I(W z#rsg29IKl7K(D)7E{hn3i(USa%P%FokmbrePfzvY9t#E!o)Bk^nR0` zTJhmHm{{$L-2>sY6o4F`HxZRxfj+0wKrC9MGgAf=6&+N(+t8#N}Er& zsLE+9923{Vi7J?-rQB=RosnwimV4o|>T?LNMCQYz@0Qg@}M2rfz_a9@9VIq9>t=CCP(mN5zzGhKLgw()_ke%XkJp{WIRLmHxkyuiS{yxLEst+u);Z26LQT2zWaPk+!B?pw~;T7vJeuOx`_YD0^7~4 z%JZ;T1eCQAQA|D~m?#}nOQcLiWueLm&wrykF=pc9FzsVOd1$NIw&*0*^z`S@{d}b6 z{q8J|dx7g~`E7%eP>{;=JzuMD(pMycl;qae%9&8l#_--qkO19R1Ag#dEebSXJ>7KC z;?ziMYo#;um)+GEg?jr;I;7Zi-NYD!+&(2P3sw2lI{LA>F7Vm(M&~kr^sR+(JHk`V{!59pOcQ0$p9w#BW3OfkxA%m3i zk_-v`6qO$MH{$RK-dJYtT596m@o>_I)Yf{UE{ig|6R`H)*p!jwNb8})B@ zK}!U!6NvNF#NgjvLw(Y;_n$!8U4ML6wU3uysLqvNWan*H33D^1b^cdpddyYYwgq&{-DSOd zeF|d9F<3P_PO8-#kS79b8h8B7Qa5NEh2APx_*$URJ3M!1wqp8=((6+eYg&4x{Y zl|L=JGSpAUzLTMOf3e%f9?AG@*hW_`I{LWemVt2QZ~d{bgKeP(E`F!Yo7%}p zaM&p}ohYLT2W{~^*L@!^mSr{fI0+f_r-jDa=v&PrKkDT_9`*24Jf z>7UB9T_ArGY1x@P!Y0C6uz%3osL2i62MB-XjBYn0bg4rN6KC?9`FaLaOXd7oTR(>+ z{LM1fu*Io||M1JF^rn;=5Qnao6IWLMx9%-EB%sg3Dv@N=w6|K?{8Zv}bU=<*=Pnh- zOTe;Um91Y2G*OsE={x*`kDjNRHX_icxp+0uSRZRi{xBio*4LETpUfyB_Ke2t=wKpNpGdaDABQK4Fz;2VbIV|jn*`)$QBXz1J+8&NA}|4 z1#5n(G_q+a_kjrDR(N^!GRj3kV473>K|T+|d`WmdZ}ut-KAl^S<2mmdf8Si+GB6Ru zW*(tZz3h$NuHL)|PR7P0lkog=y{x@T4(^#JqaGvjAi4mKJT)w?nzd7cOWaxhCXkE) zPJ)2I5(E65%z1F%0!W7MV&*`dGp10JS5)v(s~=X&h(Uhsb6O;b{!73&XU#baxi%4n z#7bdl3w>>VupbWmCDMpKmnu+mO4qZda*@FYAorW&=?jOX5ZLE!?X^2)X_?PK<(cnI z{qLJ+BM4JN%&0*Q@_T_1Qztlc`N^t@I>Xw0*Kas))AD{yVaDxHf_VYi%XvnVbH;>O zOEsnN;~ysQ8D4mr6S1Xj{8?(JyhU@)AN<{G{WOREHEtzG%51~BjqN5hvhVjAzbq*C zQ#Yexh3DW9&d9>h3%z}#zSo27gDHEE77GK%hbTtYdl<7`ZCWb`uB~3YYxz`YEEwtE zic4RS`nwlAeCmBnYL>-nqIizmSk~s}KW-L}lG8A-#`9h$;1YEF8Pd3A!(UXIi*XfC7 zgH0-{1^%dC1x-%gmHw_Al)ghsZB<8gMOu;%NmuojG|8z6v@RlyvG`65^$7anJOjW@DC~>y<7K;R=2M)laO+aXA=w3{CKV&-7c@^qh@g&i9I8(2csh^BLV zL^ViA&YZ~pi-NeHysc?eHB1hZ#+Wkgt)*jOTLI{r&%G{7Pz{-k zVu$-bq~u5o6q@Zvhc~K#IThr!-Ph5On1~_>(c*$3-0XhCh0*+GmeK+~`<11c%St3> z_Lg%0`c=cg4=1oBj!~ti{28bEr(NyV}0=R6l?C}ju zGI>$c1ORWhBy%Z)uQ>>Zzx^+cS&yU74IE-I<-iAQo7-lriY1-L1iZ;Si)C{*B#nj5 zv+8++(k21ZO+dSXVW0GMrf7t|iA(^&AYMBd$pMw6C`G@N0MYHbp@DY&3f1s-fA$}o z-s6ZAr+KYnZ4j9rY_eEjeVh2VNHuC$$~$_p<9s_#Vue7$AtV5=2(CC<3buBz{k6E( zk!806z@j_-ZK*B}RO|LDUKXjGCKp^IiFwQ8R0|@IGQ>A*f_R*MWDH;m*L^WXa4xB8 znKG&}9nj|$n*h?k5FK7gEP;o_Ld=>=>%XnZX0p(=+mO*`;G6exI2L8BPLQ0411J}o zr2t%#N2$Tk)>?|v@yX_Va~Kv(L}NJGBt-mqTN#UZ$smZ~Hbl$RF@owon}T7M8afLb z)}jrNh(uTJ5kdOeF|f-?K2|}7?MC)67=1s+XsJ3q;lTqIO7qCazwyB0BAVd0#?N?O ztp4JDKRxpBAGLg7Q}Ev}X=%hgZB0-{mdSX%IxABIO3>QXi@f}l%NXV z&ZBd4Q5#qjrltWV34Lb81ujR3N*5!;c0QJO9twtX6UO?o!2vZlu!x;IBV;Og(*Pb; zV&H44V-$tSv3Q% z@Vver253J4jd&st9!}vdE=iout=!&56*ZMFPmjh}l>YN%-Q^pSdz+X`z<+Ma3hrE`tui>#$!cxPU!D7)oKF+F`wKTj2zP6wv zYO`dZcN`W9EOL~u3>{-Y^QFPMD+Hv`Y>q=**?qcCxDpl!pfJ*qW~p ztI+ncHDXPWjN#MPc-p9dI}_9T6Bw&9MII@zNO5G~dFGE;D(GgJ(?_z8B*^H*In@{psVF3#$XH4|qAf*^ zjsH_&DVL}jx)xlv58|dE5nLA}j4C20X8tO$`}K714p!nPS=fNY9P02L8e^#aw=g9V zDRO&7#@d$6@YCh5b7(Gc)rA9>A58XHi$2Aavel$h_0adcEj+_9wfiXBSC*Z3 zKKr=1_2kWI`}p{HawXBn^X}L~E+^d^>$lNh4Bouds%;s>dwt5esg$7K9wVsf!tGF} zf{1&-#i{_HCBb&jZSlTRR>6%~*>iE1)5{LdA^P#O6|h(H zr2mTjf_^F#(yzDXJr!e9mY2OoWM3x4qW4ne$IMX{V<`KEa!c&rt5Q1^!_kVO@Tnbr)+^j zdiho0l$nse^9Hcga1CkniYx^p8w=2mO zNA=kLXe?O->nWkET3%H7e$k|%J<^vcQ>8$|_9aHg7l{*=9j{2*KvNE=Jl3;ZzC+h@ zY7u7T+}Vym=~-W@?YlX0 z9HdaYW#RKak>v8Q9T~CZ2`(DfT~Pvd*RJgjdN6i-!r45zx)7{*KOT78JJ&!`qPwMl zK933Sm&ql%uE*qRWDVTF_o<(X9yPo1LWx=~+F(-0kqiql%;JVAP_`J@_`zrK023X* z%9&i9E}1M+yi}0&h=I|_OhEZ$G)Yl;=Eh*2c2Sw+taaA4STHodQHy>ll8d^{y#o-@ z5H5ZqYdttdZIr-a5vHvClZ~-GU-H0}T6)GF$y)>5%U3?vQ5b+C^HrTM6mzUOELtx@!@T24QKI z68Lz(e{k>IGv}F^d+vF3z;gVq9a^4FQydHe3F%k}AWBD}1OcR3hf1JZ-=8!EGiij| z`<2PSuYzAbHn5p4o^`}P1#Gcb#}Fziok&crRkhtV(KO!&MvZ1$O{9GCU9Ndz`Q+Koi zG{s`w1QnQQ@Dw`Y3yx+j>XTkgaI2(RLo!9BYLxq+u8oLr0Q|@`w9}T@P;&R$-qq!N zUI}v{{aVc*mSe^^q1#cIHRq>{*sdDX^=_Gk`U?=&TLBCPB{xXsbCOIB1w$#)= zXsg_49Msuh)rHfxiL>R1MlQ-P8+)+BhohTttEL(hz{>absf39jPkt-IZTcMeyIuP| zsj@>*JAvLT2L;&r1GT_lUk%mM^=u@U2HJ|u;pX^X(Aj2x@W0{M1!jkG`jlfW$Ai7G zg_QIPdzqr+VCGYipq?ehq@BjnD&!o#55%vIJvL^sM0zJTrPo4BaUo_DS)D$ z?W{xhYB+n8eei3dYa>H29AFC z=a`F02kFanIWy%IE;Cvie|7<{FE|EH{=VQJwRLr6T^nu`^G={BmyDNJ4-SCjCH<-L zQW8Aj8{1AONKbnj1s7$gF(vjvrtknH*%{@Aw|#pH=(<$-8}-22(;6yno)71?SRXCl zRp;T+Xa}UQx1&3;x75SzUegEoaHOELEc3zm(`i<4jR;c~3WFaPC4xM47J~(Rf|=|3 z{huC>wQQ|!(py$$Hu?h}=1a`_{Pur0@m0voUZ!NP9iI0Ex+{YFgG*s!I!}6j`a+8G z(3I)Fh%EcJT;(lzujf*uul^Cyc@`C@m+RcJz0bcxHz7C*ReAGBUodc+kje3CO{Ori zsAan=9UMbHFdcX{X-Q8RxJKf2%GCYTn%Wm3nkH7wr?gR*x3|Tsz>p~vD`?JTqb%i% z%;HOcumTweQA$lYc0}YIF6Sz#u683~Oy)Hh#@C;+mX`GG#Q`fJyu{!T8-3obR0y6P z>jGeYs3bN6ds`r|6oNOHb9lpcqT2VBiT&LQh%o9%v`ReR`uUjQ(i5z^zZxjqkl9u( z4&(;h?E)o-+}>Vu!u;VVdBKvRGSEAV;9D4TLqTqm+DxvP=f5>}60VIv3eLA%gQ^w> zji}j<;QHf!;Hd2tiYU8* zT5}N{+=#`30Q|7Na}wU*-)m+;HMwjT8$wwIV{Kl`xcPyuC;Exd@TQ7U=e?)yc}a%fIY`n~dNyi9Vhf<@0wJtQDbz}U! zj&BC!)8fm58GpeHSJeA7Iot?ZG<9`dLY+O2Vm>v-?A-i98P2sr|)X%th^1#c? zgMx@cUY&xf3W(%DwSnnCMu>zQzujX(o%G(zuhT>!^p{vUkX4wgm?cv8_b%YhTa-oN zqv;~2(Fbg#via2|Y~0j(nf9lY(C4vIa9=}e^{_Q^K zj6cj57YqQ1c>O)aDCl{*Dy?}|g8nvXsDqzmn)(B)3~~^8V3G@Gne~y_fz#Nd^?MzC zCFghfE$ABKZn6=9M7u~Rzl3T{;^E>Gnc!lqU|-fz1kBBkHcD!MMSj-cz+A*OZ8A~E zV)@FO<4#D@%ouIyW%sewuGSWvR}-apdb}>?HbpQzgy&DO{MO#f; zJqw{*BPC6Sg!~*>U;X?vw&Yf!6IRu_6P6vP=qmO%^#s5ruWnliW0=ES_~}J3q&rQg zF@Gt-XuDLdUU|(h=NN0YW)!RkWw6QcdEKwSr$7T`6Nb|d1(-!^og{@D%seZ7 z=1@hz=qZx=^Lr`^yPe%ip{pL@smv@EXM_9PhqK%{B0USV7yG2 z76e{FuV-a`lU)ppbvxA>>Gb`{w(T+zKwoOr$mHdfZ_Dy$PhZj&YBPsos+1cfaO*u0 z5J|>CVTgkYqwTiGik)r^&r-6WGT=Lc+#<+a7oIMLC%Iy2R#ACETnuv%n7-3gx$B(0 zNlPFtO4~CNj>$-YZT}Yo{-3-o@|I$FU01^SF%HBf>Ttjeqjw+VgR8R2(?VHm5rm4E z?|8njK8Q6Zp(1^(*5w^AuF-x}FHJkQg=8dLAPbRfko%lq9L@uPC;daw0jn1yL%X}e|P=t#1w3Ey*4 zUObJ^k_87|Ru+4=416_J@>_W)zF-)@ql**M+2h(A8%9CvmxwO3-@D|P_48A;JgcFo3?zzFJWv!^)P!QAzV4Ryws93)gq)G zOnSzx`|+hc$p8{_pTk6yZ`!W1&`R5Zz^ZM|_c6vvPXHV%Rk>ONUb19(-}Q+>=7-`;E`=tOxX<|>wJQQw2pchUARpanV$@p9*y@! z+!i5zBbv%2FhH0t`Smur6uGbO$^|FFv}o4Ro5w$&-*=?fiAUc`p$`)_QKXgyRJw1k5z{H-wHsT^1t zpSQHBg|Q@EW1v$P%r`OXQ?1Rx8mc)*WK))x?og+) z>=s*l2UZC_0HO=g*P%Y2?b^$Qd7lD_`o=yW(?>gJvcRp~sRtEg6M`)hCMV_W>s`Cu zyAF$~Na?rQl8ON(*aWhE=?lPMfY6hwV#+}#iga*vkeU~yejW0GW z21>DpsY+yVgM}Og&41GySi8L0U+jWkxsrFd%a-13pE@RLCsWrbW=|*3;bM-ud+l!v zrkc)^cRmV|S-<}9uAocRHFQ4{%{-Jz{k5f@0Fe}a=Kp90I9EYC3O_^^gHTseZ6jeJ zvaotww6p)YxY^Nyl2S{-S!?D%oMVSoJZCCJJVQjs#tffu)v6kNyKsIpdJM0JrZl_5 zzD3=xl=)v+72#r^c0=C!!&8QsXVuJPMVZtYi#}m&&H{0=cfA+c6a+=E!-}ndti??y z_v*Y7vL9QgH78-(wKW^HD?~_Y-yq5PiE&v!!hsGSqUo(D9V6Z;d?B#Ly)RU!gG*t= zRHLNK`@(DKMtAZwruXEgcxMWw$r0yyyRGIH68Ap+yG0#vcHq~6kFGw`b8)Pk@^t&F zfQAXYQUb*SfA{^$9ZAp=AgLpYron2_y;7fy_M3I{kNT^?VW6vA#|H&rZge?b zaz<30&o^{T5+;zjGzq-`_Vr;OdQ_&wQ1o;VG6FKiADGc>hp{r=B- zwy=hf%L&WptCa$(Oh`lizqcm!v?}{}VTQ)%Pp}Q=)!+~zSm-Oio+$pnQx*#vhwL_p z>_OY)B1~GwK8`Qk>gvBRDTOKx+1bsSCex8&o|&o%67yWJD6Nf!YtwGu_xw3Wi?Hd| z6>V0BN~aDCmP$9VCv!@JjU6!GWp7CmKm7d?bxQK*u~aB{>9D=O_ahV7QuMh`Xe;pA z#-W1V0sG_r%h%mhNijKqJIlM!Lhg;g^_6;ah&Lc>UjAlKR|y~9XUP369WRv(LGx>AUYXp(16C_p)Q z!M(mE50x75osp-R6Hh9!>R+Pv6Y)C!Q*ABr`rBo1k1yo527c?|_A5_S(Ai``|GS5& zr|Ag${Kx3hJvNNHyr|enNP`28Fhl5SKm`}28Rr3{Y8LeJnAuN$wKy|<-l2GY1h~6? z6AO2-IT?O=y#a7^$}p60jjJ_aOe?J3jjb4F_K zE)w7D#+1rj&Rq zSLSHZg^0uTezTyr%R3&as6TZa_hmTim`dnmi(P|3E+ z4zV))gVl~aJ#KiogE#P$_y9Lz-HZ$m3H1qPT z@C&>1BC$fz|6!;9bv>TzWg`AivGRHA0a=z3#$IuvX} zo=u{-ek98z?90kT>^my>TN3h5y$6@Dhf7>?sPplglg>{!#b?sZJ}#RDzXLO@bxFIu z8r_wnFIfm{DK$^NaO;b#oZWV4T8;p=5CDpL7F3xCcSVdt!Qv#3<~kpULJ7j7`(zXH zj;Q_p10I`8VfnIdH>+P?7y*_XCB*sxBfo|-b~Q<6HJ8^V{#B4!OGD{LmL!57_H@Ch z>kb-J)Ons&YS43`+*`h=77`W{IOy%8*Z15gF$$n<|(u#L*aS(Cf z;=h<;AnIfCC5V1-sXD0nuDSjBDYBq$K4^bup)x3NXYU$tM_3FaI7A0Dcziyk8tUC} zX#K16B-oN@j>P-PdHyL+R?>J?Kt&;G z@&M`K9iz2O?p3@Rxr%1&w(wDWg;f@pOrEFvi*dDy^7CMWP~lyZ{#`1h80d3G*!VxDh7- z2bAWm0&yuxDZOsZ{?f8_G1>F~)e+TJJu~bHh8*!zWzR^=ghQStsyFa)T>|xDyuaA; zs>F{W(S&YS(*D$$of$5iLakGR;yLctIkC~*TPu_9hG4DF-W$9K1EB(cO}sU;22~~*6HH6m?}VxpnuBH+xdSzFLI4~vnjp?{?h;0;$7xG z>3JDe;_X-rIvtLb#jd*eJjQeUuzfnPIyqcswXXwC#(ujpRL2SE3($auNQn9ryI-h9 zF2Z|P*VLRh8jA9*t_uX;bM2ZKzDiE{Bw|{876fFnHErD`Oqsm|S9%8)gZ%FGK$+v? zl~&0MoVbBw!$F#wggBM1aCg$U_CoO zGK`UDQM_J$<$xriw!oIYGhet<;N!bBHMvX(Hv|HLmuoRm|{Cu7ru`DAp zaIIG>asMFvkS+yM561Q(QGD6%z_l_?+9!~ZR=QS#@{rmSyjPV|Dxr|CsVp~pSvdtT z<`uC0Mt;cd^Z(ve3`?T14>{K3IyHj4QTzGSNXYZdvX1Di+U?|K-@2Wm#<|aj%9z=X z-@1|4)h5TtUGg2}zw^?rHM=!c`+`_ws5xexKtzqsOwhwwKkwG@cN_UW$h~gv`>GfV zPYuM69PGVkNrmiap}?!Z8RADG%;LuH*yc3L!Rug??wcB)1W>_jm^YOhXjHgR5Ztes zLsVNreV>@^(g9;J&7ewAUh@gj~BoX!=#X4&&>W7(*bTOi1j?Uw0Rg&H7an0P1k(f>7D5{{j_xW0Ts8 z>uc3HXF~pwUL8a_F}UB_zFjc&15QhspU1945*LaM*z4#=qZoS-$AC8fBf^HQAN7V* z0qL=BSn-@J|5vNHXIrKq{l1T#)hNx|9ce>@Z%Sv z5jQ-$i~rJ#V)FqGdUmgT#g=5jhp~Ts*B^RLXQzIGuF6); zHOVCm>!lSeUz@uYEy(wm`fk^90jP2nASLwQqrE1LMYwr`8VHp}P_7fxejTP6ixd9B zPgZ4}!CS)Lk_!Q4O~oa+ zyjCx-`7v~JtWXOz_xxVPTzatkwe&Y91Ca568bkzd!O{3%ijuFyzO1*18wywr4FI;< zW(HVzuxr1$SPY&fQ;R)hAJ|2?x>k>KDb_2v>-~aaC7y?pVAld`fe}AN(>jA{*wF;h zYk}%SPU5uvYbQSC&TB+|dI6h1CNNiKb0@e|>mgS6MycYIA`27MdQYx5NlPzuHB_E% zm{e9*2BC@YHB>ziz1Od&YgzvDJ{;Y2D>L8k?Xu$6w3@}f+21=@t$|A+Cs*t#0kRFE zm7x&H_s(ozL_88LIfQm$Q+03aC_E<3>J2(cP{rV;D28R8B=%=^3;o&q(mXtiW zpC69QV7Se~^*7kZ6q#sMG5Y?=Jy;(0*QelDK|0TIb*RZ~?m`CHho-Tmq$O+%^rWKC zDNsjh28GXH_XZ}i&g;|ZDCaeOZuc(D%d(=Pf*s?2M_Kig%I|q)tZ_C3%I`Iz!aVAFlT=3-zMvA!%E{t)9Le5h|CyPr_E(fwaE>TuBc^K;QrS zZz8?x>dB;UN6_8O_izgwjQ5U$xnVE3qdKg33LHGrq1tZKsL$Ftx3S70VO;MvVgxzr z)WCx*Ae=o1TUkaea!iA6>o*JzZi}{LZ}Q}R)i!~~fTn&TdL69Y9UC~q6mia1_yV0= zWD=wwNL2_I@JJ8StohmgIbOaik~6v~L*$;mM-E9iP?aGAcs=N+)GSj-rW-|`9OT_ZW_J_3G;xhwZFD@VpVdfQ=`o_dvpC_n=_eF#6bplCbJ^q01baE zL`s6V1^7WGvaKJw)@iVK!Ki&z9fv@^el{-wkqwgO1DnZd5^8<9{T8=j zhEp&Ghl_U!J}hda|HS7BxE$>JW;Z-3XNC4iCP<5hDglFAG6KGRy^5 zzD@mV{Dc;Yvy&>?g`5Okh3*^NB#swIcIfQi z--kwNLwdvSlZR0&?eK3TIt&f!Wg&wM{0i`+m9>T8bt(MmUl;~zk5)1y38D-~~ zq&Um(Kj?))a+6);l#_Zo)fA}g^4Rr{{FlB!8ZUS(P`4xwS=H#RG>OM0g3@QJH6Y$e zvsD5cH66av&$%&9;WRt%Zeu+tL^NSVi9ZO0DqyEvm9F=@xdbCyHA3`DfJoap3PCAn z8ll=M(1yRZq>$jyIe}vFO6QV&$GpH2NWtEfGo7Joj-BU|gYA??=pexG?Y1hDcdk~nN*Gsn))qgC%a#s~s zGH`}NsWEqbBU|P<|K50;xBQfXiUM-YK2HS?7lve+<6LWgOA4`FCnrB(NwSRCCLYlo zf}3g}pSGh_tw;>gO(GU7fb5brM+BDG~M?{yMD_#Z5}-%%t*ByHp;q~nnM|kxmIP- zE}v?AK5;~1_@_u`a(8je1Ac$E#OY)!sqbOU-nHLw{x8lJU_@&j^xu3!qHJ&q_Fw}2 zt|;DorF|c)(dLu=#|IO8<+rJinNFjpwj?3_JAN{be6$!XOdkFSx|=c#oAqaJ4w4j3 zyr|d`R3~cjf}MNI!tb-TjkhT^OcWCk_=ni~K7{|EQv)3Ctjb?o2WjrqCj17Vical; zuc%}N$m4L3P3>NHHE+@?^L8F$aNA>)oz-HzSxN1RWfD|m*s(@Bg$Et5&^*6emM(B- zT*$_&6IS6Sm?B%!A(ez+LB3aCpD?UQwSDDbyavY(ON+iK6qm5ko?^SR)sh1~^;&SF zabHzZoqFbse|43dJ*fRYQ~&+p+qLVA95hfZ<_a?}218pAVb^gWRDez^(yUvvRoPO9 zHA@MDm3>rcGnv`NIyB5W%L2dkWQyisWrDMJyM>q9aDK@h@r{)9YpCXg1!$4a7A(e$ zBWz4K+ik$-{Nza)%FtHqDMw4mmHgj_-xQ!~I!X)hN7oodVGvfyn^?`IeEcCnmPcAO zZXU@9Z4sszi2|^Z>aE21w_?EviZ4Byx3(sV-M#D$ z0vdJya5HxuY0DD@1z%`?$popKu9m+?L#PR2tp3$?b<_5$j`&&p>Gf0a)?62(wkQSh zd#Wfr&fY?xAe~$%sZId;2U&GyWQ@G9ccu`*!|1k3+!6x4ACGU1{-G|N6ZI&n_bL(3 zduvi@WC+US8Qb(&y~WX#aPP)|{DK+n-MAb5V`^9&bc%3$d^OndkS(G%VlI9>kI)8ia5! zc4R#@Y0kee==DqWl2^(~zCK1`ORbAk53p&CT5&xHr6;oZQ>Hx#0&q)oGY+=v{^u z$ssQV8>+B>FkytiqzOdi$HHqd+TzW-M=4PF5`*BAQSXqbsjvKHWP_7u&SP&pn-bas z3fECNA7pQO@>d52d<#YiEJ=-!4Zj2zISKjBB~TuT*s2wkQ#lKn{XbY3d%d@GAIlIM zFVnB#)$)sE8mNRcuVp}6;baz91xYLXeHOV5VSt%ZGly&)tI}R#3MSU<)4;2-AI@=e zmdSs~K9jnx(#J7C%cq%+3@d?^F#gQ|?>yb7gYmoJG`5CYQ~`6x^11UmzxcZHwUpE; zDe$2{5jux5)#+(Yv8}FmMX%-HMj#ZRzywwrT|gh^S>X*_i&?<&I)%_@!3gH6 z&yoA;CMVg;54cREV)xH;1;|D(w?jA~uTPp`vFUyX85*YMx}T+~^`3-He1Cltf#7&nv}%5(R7&J#af z{7V{z)SirO{7rp>`2J7$x>KmAX|y8h`a4WfqJL_cFTS$MWO`3zYyyB_{j)#x)>}n{ z0|K86-zt4Ky$X9<-Lioy%9(`sU?&Xi_gYXbXxWz8!_?28b*3jL-`o3c@~br@=+;;iW{V-Y9p91x4VK&Alut2|HXmilOz9>hqWZ!+M{q;%a86qmy1sF zmG1zW?-~k0iofBUZFuy$l~Sj9GoCUg31o?%B+77g0|><5@Bs7{D8baPR9{Y+B&j=4 z$!fKP`wtsD?!Y@(CnXbO58Nt$a{N}9`csO?GpE#<*LC!UTy-B!5ff-CNJvQfgPGa5TEs2p} zQY6VNa3^8)a|VLAPC*OA{MVI48L914o_}YmTEUpTZH7#8PJU{Wx~O#*OVa=%`jkl{jt z4*z=7-0xjd3WSxS|Db6p7c5WWak&iTb8Z3AgbAZF;iFnR7o<+akVFTr*M|W}c#4l? zJ2~aRI65lxPh)-s94#xw^j{Oa_>EjT+n_Ux^pN;4;Qw<-nP3MbpeBwvp zea#@Dri>3u=!wGW+E_>|(}L0=V;Kc#=<;eVntuD$TA7=?k=`5LTyASC&ntewi8W-C zn`~ytNA_`)VdQYM`7L#)cpP8VDxQCQMEMUb7DmXmSP^;C#~;<=OjCSvali4ZYk0Yl z@ILT$w>XVqz8>FM_4JC+R6k>eZ}8?$WK7l&uM;xLC?J}h^$}WTVD`xk%NYEsx{P%r zlMWzZ8|INkqW-x{u7Ds;oU*tqGuhh_wdU=-P9v~!qAL5}rn)z$K+v$65uw6coMZ`Q zv=cSvxq29coT6g@yKP=QfAi0qhffjYO*V1xLb-S*^>;oU_KEB8Ldvf|Bs%`6!L7TM{*lAm2=|T8fP36u>w&m~fqr7| zeNNIBma)x!7fxL3qG&iwrS7QzrSt{>>BSUB3L#~c{4vj0sgpL2hn>NJTm33J;hS!N zEh6-fX|zSLKMg@<9V?QG(2qM`^53MEGxFxf*k+sfwgONfB=+v1pwOrUp;Jq-M29OZh~V zK#+&32Y==uB@I%fTTuBzdCeSu|E57!g$@zXe!IT0G>_+G`R!?~tqjvq6kujR%b6@d zEIM&;THwf*Bc;?JD10wXvQ4G-?Cw#vM$T${yTDZKXAD?C%gYbJIo=qSMpiI;Z~#O8 z-T|5v8x;0mteC^~V>l>qfb+pjmh+G)$xj^p5j-W9@-I+|itbHP=@%^M#*g13w9_&l zxEXWMxIWNQLafWz%BB}!PzZ5Ik%h^f4m^HgL zOg!5Am9n;R{}-# z^I0Vu&&lFGcDF)QnNB{2;ye7xAm2})iri@|31~`SSAfxTRVZO6|K;&MeYQV8)VA!< zllks%4dG&a`O*yw&-iRE()!{hO}64}vE=D{eZnXoRcs(zRMc2lT3G0XLmH?Mb2!sR`qAUM z&^#N!#jg9V;L0?_+`@=SBakBQ%m%JcZA6!e)!~6NO(t8V4`NpYkP@Np`VHd|%|IeK z^VQPh#Z^ud4Y%d*k?p#WCz(^_#7}yD?i|<&s>)LVoG08_!tkosN+*N0J^NKC4a9> z##!v}Wrg6!MSx`uM>ajK7w%n?6B?EMe6|Mg zP_N~#cWxkeGWE+^Lh))qw>YZxIlU53S_~auBX8?#{p5n#LsQT(UiEy>7VM$gZJUW$GN_dNePEBoL-#qYUs@DZ%% z{u%4Sb)^d4Z{hZH^hl%a_ihQOqsi6!RQI2?-#YzvJFQc0+(rlWnuEImOCFWb_qvs% zf2)b0`mKft(Bj-omO!JT4)*ti=?h;c>CJ(kb;2mI#jzdFxf{tbN2_pV-Ja|E!B@A@ z{Wo>)Vnc=B#Z(6zl8PMrfFKgyv)A~?@qzmCR~_#Pk6jAar?bsYROs-OqtUnh6mQDq z(VKVD8t$_?EyTpb&qm5~5al|ep>MnI$ykYW$Hfl9RVTcDJj17Zo|uTiZ%K>Eo-;sr zpbJ#{G@UmXqoNMI>oxP1I&-eM1?N;S|7f>KPzm|3huyxBe2VJ}m+}=;`;e_@w)D^2 z(Tb4oXYg0xW9R1=MstvO&zWv-$Jv{Dsq4w7QQP+s!b)0z@OOw$sommxAGVUvfGNm5+kv+}MxgmRTAZ^HF$eHIbUmQw!V7g=7Cdej^;`pjSMebId zNBZj{##1BJhx)tljP!8oL z%=UE0mBqgKwfnK=6PqskxREg-PK)z{vWvAl+?>z51~w0Nyv#{df%sYf26~lVbY}iV zs^UG|0b$~5o@x%_(fa!D^yl(N1Z=`F`AB<5+eY0@;!DJpIu$G-Q{Ms#EjI(Jq)+_sy@x#krSGFY%|*}wnWf0(zNtYAZvMORffE>#ux&cx%@>Ik>Zn|H(%szQaYjhC5Jq~ zBTWItX`{Qo_<5z@8kur|l)MoU5>z=8Ac2iY@f~J|yV711M4O2_?1C|5pPTk)@o;W4 zH#S$1=eY^2gf#f=V+($)*dTzL!P%9mxxAv0k4zq4j*t0sf{c=HL6)U6!--k|QM*B_sjSIH1~!Z{;LkW)cYI7nH1JLErcs z$vdO}7qIj}T;v!V6xegxJUXp_UsJ8~wsBt-B`*jx&faV7?XjFPGm-;_0QFcxfE-+6 z+QeQ)3T{! z^ep9A<(av>D7D{7%|~fI|BG|-o=_ly1BHlI%*(}s<{1q(!U~4JJv(HrmSvbp|4;cq z_--h0j!ZMcS_LCX-Cu^aaohIs`SqAi#IrEG`a zY*c)k5NQ*~5ny6grIkawj9;`z9_xA}rK2=vI$N(iArm$VU`h|mr3Vp1$u|Z$ZG>H3 zZB18F&b7xWvU+__{DeJW1a-+7r*9uAQ-o#^|CLdso<0#2efpC)^q zmgUEOBXgUNb?&FTPT$@ut7EB@5YJk8-;Ic3%G1+pE?}zDAK?5pwq(|xddh!}XGg9j z@rI@k*hT)70xL`Z0>D8ZnrY{}-#Kfb?|dvbPK-xDli4P(9idk}^8FDs5PKHaq(%)D zST58}X=YG3*Hl7KQllPVEd>6BJBJ7LCy@N$;nenV$9hWP9((6zNkKBq4{mWBy?Bns zA)jCkI{hek8qjKT^sqx=cMl_AruncYt$vskT+h)=bMkv3OkYD^-&}MJ9}{Jv=)zc? zn+#`Xs{W#bDa)(P@vrrCgBKpp<`3JU@l{)~O*MI&aoT-sd9qCc4IJ85f>2;sVpNOR zVOsI;YId?FuY4*GXuz~7*CkSvoF^I-X=)p3~ZTVFtxFsuMt=q6XtE=C>;1{+X zQw!30S3W!MyUJ|Tj5Li`RaZN&co@`xIf*1xyGHah2#Amkr)($J!saW{o6-_YPJUDk z8?L#)CXHzqK9A?cr0mlDhXDyPL=PvktIup7(DV1lOP(oq#S+b5dS`Y?EB`>qzS%p4 zmDhe*cRw-qi_pa!x%#)>?!E>4_5~;~{e043L#u#y?MTuRo+a^)RCBp0e1kMZZg*|*V3XrGc`1L~+@TEyg z^e6P}DDdKoKeMQPdy${#&5y`b?OmpvhD|g=i@V8ZB&ID4m)Ey{ZeqAFiykpvM`Dmn zV$mc#>n?o%uv%m|DO+7v)Bgq+yGE7jw^%q3I`EPa#}|uBa39AxveX$ki>GVoN_~ic z8KKOm$j|0?h!KAUqCkDo!b5JB5Fi3bv9Q9lkC+|s=TNvps@K1UoC}rrRI@M3Q(kk_ z58RM_{zyD+xEn?yXoo5858*oEV0bTnL^aDno}YELhHb^-gxJxtX({8l3A!m6JeW40 z{7u5I0a!Zsp(qp19BlF0Ci2}xqk25a>CUL0>e19$s?R=#D;{{QcPPZ=iis99r}E|( zk5SC0vzQ!k4f?J99cYEopI*Z32lRs~r@7A7-8UpN{fp~r>VloR$tkgdH!@0S3hVif zzWs|5{yvC9a^Jcr4hS`+^+khp6vqAye1+|gADup3woRvhWeU(=G78u<`X^iN?Zyd}9nlX(F8RpvY~nH`SIpcnY~XVWV8x3Z9tA`~u6F z3Z3+NPh0YMY#s`m_}qF}A?%%|n%jrq$-O!1rIXB+owO8NR6H>JA|mz0 z9YZ-;<~9^Pa$lg4m{{l7jFT~@UVu0R6Ooq|!I~Gx>ghU95}i<1REzvkobyI)wLH3q z!2bH*j~)G|uMeLb7bV=Ae7PQ@+*hH7Bth{Tg8W0yxm~>CyRz36QE{W|?=wwupfh0N z>(fHrBowJOJAdUn$ne+L;GvtNql{^f4g2p6mVYU-+t&P_Rc}{Z+R{-R*=@Rc&HMM< zvpRT}`b#I0&r*waj~u9#U`gU8wbkR*e<7&5yp=J|hPHoZOn6mRe%cD-r4zl>>ZTde z((W>Ov(AmVVEkJ2OwBmjVUGd=?%LmKfCV^+0E6NshxZ+};{4}U9IK9~J!6E|XUm6S zg-Fbb_o{`x+nf9VgCJGD@i0JJp}1gGu{ok+rmZm2EB=}i`m99@GSrzAXsljxuuycx zCKHf>An_9(N~zEV4^4|7MaMRmi5~+P026=hQg3EXR%n;C`X0_Dc~wEp@7yof7alSi zK0<%A>W6plZ=4<6js<*a`3 zZ@Ffha_p~nt2RA%mY8cIuElo)x&IJ+E0Y)nuef+eF)jRN-MIOEyfAB8?ryttvv@So zRmE=k5&H2sWGe8gb*OKVU+5MRF*1u)(peP{e)$5UzvBw|_j@$ShuRutHL6k+#8tbbCnehOCeez8i_i5zGMtfE>KyJY*2hR2KKs%o$`I3l{S%2*6><1e5z7}%rZ4+4)`5rv zYI?+}xR9YxVA?Pb)~qB?%%{qG9^S@lN7Of~cAwMdo9F3n2?vfynDN3S_x+{CsD~8* z-2`Lcgf+cklxNp8qK{XW@6XAHO^byD_f^s?4~~2LDL#dkIZ-hR77J#Z9^aFyL)E;9 zE<-~){kYjaey9N=8;3E`4fue5gp%;`JtE$a8 zkQX7R`nq6E8!+hfWdj+zpeAt1Mqj@b5i9lgnCeY1E73MCr1NWIOSY4-J*vS@TCJK+ zczBN}qeEurVE_EQ6AzDuDOX$Lhlicf4qxQiz8{?ta?^nQ&&}43%mw0RQliZVlo(d1 zoC~1MFgj8bdC}=+es55l?*XJE!%&#gekMm+Mgx%bsLs z9JPc5Gw#E=KF9DCCV@oqtadKW$-&tjinfG=d<(RX3JQvfoTMbo3X?(zjO*u>TrNVPydtt9QDKjktXXJDL3PHB${mBG1g3c!+C8z@Q2Zo zE}I9o3VGBL5JxDd!H{ZK1yfGJhjwS4hCQtq)>-)1T?GQbGo4y zQ3VS*b+DEz6G2y-71QvKR?>7hn~gnpJx|46A}}X`4Ro#1=A4@NQJhhgs%X|?Imp-4 zE5#gj8B-AvDeH0Xw@bgGd?=}$BK@Slm-0g@!!qN?v7=6!ng1z|w5UZu>Gjb}Q1c!u zC3F+t5ogr}<2v;AtjGBd^1)-OBzNqDSFl?{$Q`j&7I)N|&n$cPxctwz`ru8M zg*A?ii#k#t+*G0U_NLzcU5P`_z_x~6EAb?KeJA}n&e+e&6za8C1#<9M^2Xm5g(Ig^ zj-o_lLq-2mFx(=F%h9jx$1-{^mTA59I&)G-ftU0nco7HTFe^}CpD%DrS96x|^I_iI zR*+-^Xv_GJC8}7Z@VWkpv=!@Qb@UT*YNcdo>5J{09c7Vu+M*!(m@-E)kT**YaW9R~ zl4;gO8-<CPie2K z)e|x!rOj=a)q3gH`W!6qT^ogH{uPnzQ{r|J#X%2+-6Cyc@k+nXsP zH;$xf)F`TNQg?~+?(H`A5V73=PeG5sNBQ&1x$OC8+n49=hJP24IO)$f+w`+|wHf4U z8&NN;j(;!G+6PRtZw3;Y&o-ZrQ5vbI~2P6=aIDW}zmFC_~*XDh>O2sqcjdB?S!- z$?!E#F(}ltv1(_eAGdr|5ThU-k=tu#E@dOq970O4qE{xXYyauQ+oW~0@=o2AHzUTV zNqx=}%r16}T`>=b%(jd7Y0hs$L-2Nk={Y?b;t)?f9@yNyU_=OIn#iqVh7;E9-uRS% zX(2Gh^_zR$wR|p;y2b8?H5t0v(5wqO>EWra_w3vTuh6G2{M93Z+o9olssO*Y;`wN8 z9CoNdE!GxL)vA$Abx|k9wIz{0Vow(I*$LnS3GHip$6H2kc#0h@Eo9ehI*IEqw02tl zQ1-~~O(57U;-&V~?rI_exdrRITWs_G9{}V)8^3MOew4|MlQc~c_|H1pt7s1?wJck! zh!sOS{2)j2u3a5a96NxknNymjX}y-h*0+Kk`Swq##gY8B;>Wws;V>T8^THS>uxSVC*pt8NG``L$f`wRdo>!hrEHA*&gE}Fx%KO zJM8nK(Tl#)@-EDN@z&{0w!d_&s}(kzmX_{RL>p%?!lm4>O^x92dZZO{H@ z3jbL~dll{K_M8x0D(2d?Pj+l+>U-zy+izP5g5g<~#0i?isJv(Y@sHns|DXTqZS?Y! z1ZwT8p|+y^?%nY{hYRQMJ+PPoNB_COTv!mloxf4|q+aith!Spjqm>Gg%1wP>)QL1H}UoVB=2b~(IKp_v$N@=3wHL2 z#txJD0+QL(_4NZ(O%?6cvr)?ywGRlriT3lb&vcz;_*XywE8o6I(j4)Q3if~fmw$Qd z{eS-Z_f+4$j?sg}W~}}0>cXxkTMwyJKX(o-wU5LeC^G0lY31gRz_HgQMewa1?I*Sc zWnOH5CE%;DyhE>i_rKMhsVDY!wL_S_vT8VVvWR8>+R@H-1BHD=MYJ?^A+8r>{y+B4 zJgTXyZS;Ge`4B)vAOZ<+jvxY}-22Y+NEp6(2;y2VB2-NmSbCAtS3#Ql8BvyjiXl)% z@5uj4z4K`MjS`a3fwJ&>4eh1KkPwK3Bx1+=oDf1loGYd^zdu-e?{jj_euw`0tY<%G z?|h|zBWjPR{qZYg{KAdCd-u~j98tW-sOvi3Npnj<`xpOBr~6-DeF0{d_!GWmZ!`9* zy?)$dsh1bzH7<$0-S&sK0~In4gPb0fHalisV6_86%8s1GwSoKh^5h$?b}8XrGnn+F z@BbL~7JxMS2BXL?xVJLqm)W^lX(W0^Cr76$Z?Gd#<XQ+8+Yp?w{+2qL~+4?TC#lDBb>}Rh2S-@*U||)Q#uHm64j=Tl4efP`tR& zY}=K$w)*kUf4ehpOD0AyC|I_}?7-^{DZ=y3$wbs{hC1v?zu5atCdXR2&!rn2E4O+N zOq-vd+9^eDK<#Oi^cgBkNNH^S@d`=p%`6M~7>T?8>ys~5R#I8*@9KCC)D8r_K4|{v z_tEqqdlbn~IhuUGeHLLyR4p`;f zcs6Y>{a34#F~8cCyFHCUFDy*zJ!S{D*KTIe3VcaRlNuqUZ)wr+kDx4lNlT0BOcZ)@ zB5I$NiG2%_(Ou)B%)VCbPO5KiMeXy{fzjuAHfm$TN?kifEP;SCtp{~FW7QbT1l0Z& zETgV)M-&(?b8Fz2kcfGE(EN^H?7%+-_K*n&!e8wG-j-9E|HBZq%kwUO@I1KjlbZJS z_F<@Q*?6)_SCz>8#m`DhPvg}w;+UzI`$kQ(Zk@2Dm(Uc zRJAr?^u$EeK1Cza=YmE*i8#xH)oyvdY7ZX8q1YrK6i_=JyUhA9IZ#JQ)A)AUosT~HIOz3{KmN$p7*YFUBc?*e{kwk_PV>YR4^Yy6URa7uvcjE!q{OylmV^|)NXT5(42n2WP?K|7>Y3fA=SaHN%Q=Eif<*r83F!8(jXFnzKE)u|Rh2?l=a$&=5(l1&GC> zGW7t-l!o2J2ibEDIGOg#EiB70LKwg=n&e&B|eXv!n-rH5ZSB=&oqISlfp3y();T8;> z1y0}?m@>~U>gT6?&@f8>!=-) zG4Gh$!R#Swk7DPYPOs>1Mw^^YC4ue(Pti2-;EDRIPQbKa7$evDwVqgWyR@@1{moH3 zKyA(`-T9qA{?qU0B;DB9iuSAR?bkfL+7%Y-2itdS+0g38oq2yQh3o~ed@4q^d5m4o z$JB1yeB`y){!I*yU(U84k*%wCRqOeRs=Y5&cU{nzwRM-DJyFqhTmv=C6{pHiBcP|v zc(?^2#2-4ZJFa>|*H#Q_H=b|q1pRlMZRZGBq4V8mn^aZZUCkJ+M?~!l zcGf$WF)(|8v$!nM_c=EBYaQ2HZT@9clc1kpSnv?jQ(>cal9bN0Rug{BaF`S_MNts` z1l%4dlDT`=@Al6`L1f7gwS(0mU=N7Bu{I=Xmy%MGZt01fWmH;uK=(mqvD;_RgHSCO z8cmwqYn9C!?DLDnH(l)r3yC{&vw!sC(x1G)k*lXkgM-7$0qrUVe91%Yg{d1Cxo_H; z^XGr@>!0Q3>`o(51Vuye8&v7A^X)P^d3m5QW?i*69XFmjs_Ln%Y;D<-@Le$>VVeDvDojh25sio4`b-b*&_k?a=QBj+*Lxt8Q zqIQ#0=C+7F^NQN%IiD#bp!WU+k<6T!7$2W^gp1k@6h(Iu;<00TX_zAo^5iWUEW>=Z zoMit50Jnes$BZb7EKP-|9lSo+Ug{6nuO^46{S2IAZL?E>wu0waO_1Lj0UHQ=qUEQa7gdU{! zV~34Zr;i;yY&=m^X*^fNBkoYe!B-%3Z{^XtMATkC>(gjdCi6IFnVhE#lW=?-H|?Cz zo0imW5-9uJ+?;c2GHldNVpX$B*3r=-Wk z)c(;s0j~qKyHXB>9(#>sPAC9n5yIn86J%l#v?z@rF8flgku`z%2~`Xn(cu+3vGnRD zX$;_-WCFv9B8Mb8LG2Dt^a1;nbfkuVC4dlx6lL+^|d;`ywE zZ5tD{=a=T>WNj8O0wN@an*DFKuzmkFSmoXjcfVZwsfvn=9fvf{ZBiDd{JiesX;Ayo zfT8Qkb%(Y0CS;1=s(>s^d3iU)?x6ObD8v_?tFEl7hMU(XvW!}9_NwrN%Ur}+%qBr* za#{NryK9>B%s~*}H#arG`5dl(`;n@*h}i*ZS7Jm`?tl5kirPQ> z8tHEBT1W{eaxORoKw0>DUirdgoKcN!G52@ z%H+Ra5q+rIb!|lmVW?|=Q>`vJcKoIDx>K#7_QO?g0LPm{)UG*UtT?vxp?UY|5Vare zGL}nspS@7Gq;}MFw)@mU<4byUA0kov_5MD>ZkE(;oApsLw~TT-%nPD^+%58bb7rsD zIqvS0nZl_;CJINfoq7U7a+a0JOB=J&G0!pewFN)@BB1t9@6aqmV!zv^sJ*nbG5Aet zR=wx;ty`THNh`O?VxrPC7rsoz~BMvq*=PtD$=Iw0K5kGnI>)iu@)@7aMA^xNmC!jEhd*) zYCGYzjD6PZe}#vKgpqU7-}X^qqIMv8NB!2C_Q9>MKFhSs`!~Kb)ZRX%Skyj{UJop> z*2v?Yso2xDz*OGORib4gC>vHjKur;4Ng<{$J%E-u)1f>3mKtnrtUjb^ee-ll)6t6R zvqe?9b7dM$Q&CH2h}xy%$4gC6{0PgbTGb_8`AJ@X0unOK<+`dmji#hnQxZ@+_ezD) z*xgD*)V?0B^w@ql%sIB-X12t5Z8nERCde#vHrVQLQ8?g@wM@ZkV2mKlcwOd&n0Z_H z6hE>QD+nw_@MMNkkxnx-&82|BcmLqCKmPRY-4E_yOfu2$-ogaUg4%=RWG5=eLF{YQ zj%08mo|Qmj*6X$f7NMCb5a}!g*>hr4A6y!ph2!uX9^b+!95AJbQTwKB*C(ax4mZh8 zfv@4eOE-@Im3K+)h?Ne$7S!<9W$ib^WAz>rJjCgZrloUOozii9b`sPs9ltjTukf;Z z{{<&J)~hEVH#TZrn1w`937+YP>;CDCPpUb?<>=kbyZht4s{t%oG#wmc1RVXcGTOZ>w2SgZ(Dcwakc(}uHw?^y$5yY zo1reItFo-DvaPHspmqXmuY*=Z?d#${{&UnU{ec$zoty=O0Jh25N!z3}28x3w+&T#t z!b9yC6M{@e69nNeM2Fy=CNPT4Yp6{WAi2)yc?8_K`^l#)%TPhJ=UhvOyDfQr9kuI0 zu@07)n1!_l5^-LTwr|Yff*6~Eni-c*7>6rfIvO-?=LiA9_t}%8beO40kkoE=dhIeC z8nq*p)DW5+le2bKu!7)O=?gI1JWY&AV;W%ru4^QcX=8-e9~%vJ^kQN3AV z)ai7_^M?p^S-J82xh~_Wlena^!dP8xtk@65kJUl7qvBI=FYrbQ;-xlJvqRL*H}C1z zbyve9N-DdTgbt3^UFt*;wLc>^UhSBm1eqwZ1Wr$lCNmkDr5T392*<`71V-u-cv5g)xTX=HYtJ(lRRI z-TusclT4=JxM|n*$qY&D{V}wvDCU3$es|r=kd(!I-718Q+6%I$wLs5=GG(}C@Y>gF zci2!sK2lSY>@_U*Y9AGtO2V%8p=(L0Uu)>TIq~Yq%^|tMzl@uZy9EV^fN?@#*|b4K zQ4|XZwE?v&$&mXrhfb7VEUzk4Bcv~@g64^1B!c4<_FcTV??RLMt&Wahbvh<`E88xf z>gwKC%pv_N`!1GOzPYEW7paS2_~MDT^qLEMDoZ4_>kn0Q9X%6K`?p5z21=BP9L5w& zMJr_)TVp6JW!h zT6H>tYNrwIb6hu1tJN|K+&l$upOF$WHkZ}w86(j+yg`}{#Qy!6VWak4pmsmKi*DuB zo9%f|*M@4^M^dK8d$f;5&!R_}RdW-jvhC|CiAgMksSMZqW|pRsQksGy z1W9WQm^?N;J^cWX))3hGF&c+jNUhQKv~>E1h)!tg{Y|cEo$!g`H{3%Oa}(o?ke zqOq+PA$3WQRu7jYgX>Frdo(=a^_otQbQIBayrQj+kEs3IqV}A)nPLr(0I)WYu_RTV zV_EUi`6?|Zhyiu{b)-p(U2oPE1c; zpY4NZPe^JPSE*gItacGrO`T(e$6+&1x<;>us9o)Y>Ue2vMeR|N+AXxZKXAc3gBXj$ zpDmm7oSB#kPA9C?ZV0VXQH;P=yMtFR-+2H1r&~Au!IfcnQq5Mwfc6Q1skrM$Zo-D{ z$=g1By5Rl84>u)d*Rb zq6_8S`!y({_HT{am9JFxEXiC*m$4L&BrriSqHv66_(=hpIgS5tF^{e(eS6PwP>Ao#ui5S5;PYx1A&-YX24`Wa^HV5SZs^ z){qd*F*JuU!BI3#@l@IV-)St)J{!Xru#H~^P?~>bHW=IH6oBBsuE|p@Yg0n;rHLp)5KM1Avd$?>N8+fbe{|G?)qkkt?9omXQTw-O8I=c*(+o{< z(QyJIrL~)~qZ-NLNZq8N*=PZ-VXS)phCtICPqL}&t373n+6k}!xT$FZ+7#&AVTsQ4 zSnVt=Woir=G5xktyM92biHQ~jla#5+G|Wj~oZhSU+39JE6+&+>i$Luzi)_Lg%(Z(n zL<5Y?kHJ_#?XKy}7?bU~Bl>^~DrjUgCMnm>I7esg0_y?I8(65BnVrwb^elK~d>EzeKa|7ND|}Dw^JpwOmseSz(vnyEitRXbo5)OUy)gDoMd}**k zhGbZgAu(tj!v#fBeaG1rsQDC<1Xnu*u^LWOn|Ok z4#%v`6-#6|Z8q23EV$Rrvr>(_!_N5N`s|#`W}EVH_p06Inw*6D7BYD>Fbg;KJIs(Y zmm$FfhMzL@xumfv7@N)T*R{i&%uv_Pj7>p%&f(9sb9S@M<+RShYi0aIzx3Lm79eL_n%5c?f1AnbxqvI53%Zq&DAvjNt=|sSHC)mzLEoG7NhkweN;!DG0QJ z-U>=xV3uCz1K9$j3j*Ri;8$SELLmZ5-9hA?v}H;fvpJ?|v_|b-|IJ`xy8usDTV|nS zhNqd+5tKDTN2#B8NjJ=a&SNFDJ0_)DtQox8;BUDg;E%*W7FeZr0Qcf?byx9IUAc4A+3w74F3z?NeF(AyWarubv9bB|AG? znn~zX1`87{6N|m~pGxoA3YiE56Iaj>V)lmZF%)`U@M>;-REG|?>O=YjMqxoj?L^wn zVDTe?p-<2ZfeD6HG&UTG1EZ4|JkK&DQBnk188uelx;@6SF@|VneYMxa#sm3js|vJ= z6$)i~V1yi#Hg-`=&{%3)yiTXGkAiXf*n-n)P$Q~O2@OjohY)8`C=L)P0Czwkp*%K5 zVFc*My-w5gxKg1gLQ$O_uXUj~iasEX^(o zc4ao~CX+0p)kH-_g_dVw9PF)AwneP;q6oa|AplzQ)4&Li@$vv(8VkJ{dVyaA6;7fM z;7T~D-SDX8h`l`ni!&2$x5wu*06ve$Jux%0IMCCp-DrBGsf@cl9+(N>^TAYLCXYOo zgnPb5jevQAHKb$}jugt{V`+?@7esSMweC?l-^!tIAfk36E({AAajYOx&o#&1^oCLQ=$tC%Q5X_kSdO*DSyqP)H%JkjA1w)V=@-h z4&<&<`)#*!D?CRs`%_IhqIUUCrS`=mD>n{>B@y_t3f*I1tm;JJKt$~bi<`GgWipLB zuhyr@C?21=b?Xe~SP}G225telOEvF2l8|lpNA3A5YQOEVB<%JJKWF>qr<-v^?Fo4b z?F+}i_S`fSmPFu>KAzfN(x9*)qIQJo*aUx4rZH#N&$h&6NM~*Epmr7-XbFFp3-l-@ z++CY24=kh3#3~cE=jZx$pPiDE z_K4a6LWoP+1#gF(U{53$wU#pAyIqBTR~o_GKN{y9#K1$DPz^al0#C6 zN!eA~;HS3sNPb~*{I=K(fn_-y8_zNnc?S+zy5;|#iDPJ5l*uxrYM3KCkK_c7&}%IC z|Lnb6OdIFE_y2olIQTJyMF{7ZlMCe(LaTk*<}Ub<`|&|4HjaZw%C<*h?fp-TPm?;> zo_ZLtA=uckbJ2ZKX)C#ilE2iepsiQh-7hQ64MJ$k?L~#?VWq3pV$+JtcVDPink)-y^?&r)Zsr`tl z9hWz+4%XF;wVcxK?9xk?wYSs{?4EQgZYJ7$ZfLNfuDSkQ%}nc+8S!M)nNwoNa?4nA zb6s86pj2vqPIp5~XDNX|QTx8B>aE*mPa40T8z=(>rPi*fegDKYkC#LydsfyOt`JXm z@U5tBJb&q5_MlI$xt#l#J)dRYN{$oD45n2wO4cxZn7s0O>ZM8 zD5+gh`@TVBS>?kenOt2bdhrPmrP}t^n&zgVfph2n{LiA~W&TC}5s^wYfBy5i?xCjI zv#pid5)6ezQM>lt^IPmmqq(mcjxI`SzdSk`b&EfvZuj$ndv`Dz6@T2L;_vLCrglZ` zWJg=|*uASenZ2%^fLB0>DC?*^HKVDmt7~X%Y`WJp*fn@>@LpqMV|`uS+0)fkZ52Dc zkq8vE7nJg*o*DLpv0UHx4jfq&wcoP2(lJlWla}#_!#xKgp<9QQ+7-1URaskg?O;!C zWi=<7{qd`2M;%p_Dug6yBTTAFOUtz#ogEdO+D>hSw!Bv|uZIJMuq z)CLL#15=4af~Qcy)IM#*`QbVwwJT~Tv{y9E4_9(44{KXm?=3C$G{2hJi9ZdFY4bl< zS5bRmDLLDx$+9PljlsSOP$&!ppY>(}BLS;x+^9l@Qaj|wp{QL^J1T3_)DQJcK7QC- z-KJHMnn`g6m4KHaT6*d`;E+(%PMq#-UOs8PkMH%6pil^~ZaiR)&dv@G&(2%I8U6m{ zjuo{ZZMCDa%9dKm>qE6Om0J0K@b;D33N;-xHFx9T8y`rmsgFR7f*Iudy zh5bXsqBlk6W|QG?*c_GP`z^CSe*99qqIN`_Y^4X7d2^$N@9;rr?YfI=hM5w^S8v$VLfrYQoEvdRH7B@>)pAWSYNO1%xga7 z@N4ag+Q~D0W9*4ybEvxl6!r_j*rXkIg*1(y-UwzaqT7cTS#uM*Arbs#! z2SD_A#FL_c)a&(WJp};q6qEHvygCF4J!9~&m|$a(7%RTZ8rTW(TRh`&nG%4EOwj8} zX9*QEHJb60#jbXQ%dU*wTe~{>xJ%R8Uape7uBg4})DEZn8dL1aVSQk*6AFRS_&Dq( z;3ZTpF}B+nR6MC2GY-zi+0qCF?b5)DbPfw=`G}YQ(}WWQE*hc{BXAaOoR|+H#4rmvwK_K5 zq0tP>+Al}2J{gUqcpqoe)8poFP#w5zPx&&VqY+~E=T7VKuqTX^{S~-=9@Q1P4VJM z?ew%+PY`;O&M1t~1nHR{rv-rp89F4S0D?(Dr%#UPfQSrx5m2sVj10^h5a?}TA7F3P zL%5O#417TROX;McXlw0=EN!cKxH37}^KQ+RG6eesA989})DCrh&FqOG)jd!J`$d7a zi(3rX33wT`(IK`(rm5md?R+HZNHa8{_PYgZCbKx}rG;=1Wa#N|7Jv#zje5??12sJ_ zeapaQ_`tj$fDs%tV6Qtuxhw_@ajzv-ylN*(wUwHNm8IOu;F&5ddUe}fr65y$YOm_+ z+GbA*zg~K`1NMu8ZbVG&p}o}Z6xAN1iYK)rIvq0G7X9&wS&Ku$|BBi}ZgxlQPPw5* zZ~iT{Cp}bgs+}mSs1m(CnOnP8JJUgejH33US9@9a`Cr+S!q}xc684J%FBN3Mi9HH3 zoLG<P5BN`~=~TF7mS4ae6qzBb}8SCtgsyNm6?< zMj_+0z!2$$aa>Gl$EEGnV-2fUdxjd$R(F;_p(t-{ovF=rwY0WgA>pu4)c#Ii-ASp) zZ1wb4fWp4P-i4btFGmP?3Enz)^Olf8M_uiVV0MkiLdi7N1-OhMCI~bi;YLj3BM#q- zYM-~8ybkv`memeEhs8CKvDuC4ms7jj88ZZu)&v^2*iDIIQ9Dsu(JJ+?=gv3ORCSi1 zg6=50w$>vS?$_38RB%WrYKP8&p-uKcc(S>Nksp&rwF0fEB4L8-9l z@URcPbX#u`mn;S7Bfn&c@=pn7ArwmhkqQWcu;?d{FA_GJ9bP_RwbIYjzA){unO(e^ zorB&DaS4~weuBd_+d`u;~*_8@rw<4-=hQ@ink&o|kpY}{#FX5Tj3 zO_%-z1wbJY5GykC5gz^H-KjaT9^<8s{(kKw&G_|3DNqtR#-R5R0ElP(`XB{Zr^CC` zsD8$f<)zoRqZq92g;xTD5(7wai;-`iGb%I$|d@KE)-N3!+z}uWA#fT_3o|b9Wn^3U;5S_ zu69`r1V&H)#vir~mNz%IlZUBxQd`x$Dy~cFu5alqS0Q)?GDfU{m?-N!;jPJ}D*<~_ z*1ROKQV9o3MbpxM{n!7Kz!h`sByD_LeG<+Xa1%b+GEslIc15Sc4i?Du#0<*nPXgDzqE3ulT;sy+7VfL zW$fYArKO$*O~*f$*RkI=JUTk-NMXnizgtMNzwt78s#^~2CGh(oETP_q+W+&5!{PrF zgWAg*E>#~)Y8TbcF5kGgeD~wJ3pdVgG~c+ewf*UX4{l%I+PHJ=?uR!%TVKBa$p@d_ zSbuW&LG9fe8utBb7e2qCS#G@l;oWNmcl(ouzE&s*3Md;(N9aw?bV&}(Cra((-4s$ET#R-6*+Fq65JhL%&J*Act`N?EvA zFdN~fjcPz@>3#V*ic?trfZ_y}@2J%vfjm(!M)C`U`q>-s46XdH{G&=cZ^(#53^W0G zgDHFz)c!wfMWA+6)7!)z1pmLJcJ`ACPcF8s|N7~LWvL+ZBLd`0@ql>7oA%Qfd`!wakxeDkKm=KNk|G<3M#K@%dOarbN*GgF@A$66R5NQ! zlg(wY2Z+G%6nbRXVzY6{=~UisM?sHKN$vSk`Ck9lfu;7;{R`}`AKtxtV{lupwLiLe z_wKd#dkFoSw91#imGsbH10Rd{9 z9`<6Sr1tz!)nBSTs9HO__3``bx9>Ec{kNUezVYC*GiR>X$!f2DQ1k1JyJH_*`zLn2 z=ECg<8qM|FpMU=OMfQoN@%H_iLf2yk2dWSh2n9s?L+-hoFEFeO2uv!@8=j?ucz+%Ev!=aeo z#07Y@-(uwffMV>Ut`zSVoZ~^S;K=g!VY}B)!SmQY)U4gRmd9n{yJ|NxlI*8BJv}ku z31rnegDa3RBmlGC36C{nFnXreH)_P|=d#)#U4O)GU4LKvC>LbTT$^EMuK&~4h3e(4f4k87@rT>nn;YAow}|h4 z#uk=Gy^UHZ913wZBFx{~6}vm+r4byK+DqH2iiDZ2C5e^#>I(8L6=arHCLcent!~q* z&~FQ9VG1OIf}q<~J27SPQ>5M&P))hVF&G^-AI@0D5lFZ!2H-Q?I01abl1|v842Bob zo<2pwUZkSdm()%K77R?7Gl%tjL~xD><}?vcC&Ttd&Sn=D9hMOzVYN6z)0{9K%m}xH zj89Q}o~YjK)f_~v{n~>EAAdZP-FR^0;-}X>+upo?r+V}HC;xH(UmG{>h_#m+w>M_4 z-MMh%U(asddC+|EYoEXW;KKIp`|n@8_90tX8v8W}6bcAVFp+6)es*|x)@>e1 zvs8ZiR-r-`WQq+WPou8X%~Yt~+BE9Y(2m!U8h%gh2t>YlQSBSK)QEAsX<}3H)5`*yl8-^fsF}#j|FI-sgAP zg7L^`fHg)&qb4I`xA}>{Y=#YbxPZ~T0%YYCgO1*{>|C`Q$U?Lf`SO%$QZoow8@{Mc?@}RwL|q5n;##alJffR!`d9O zfKuiV|6J`hcSyvUQORmo>$yPIVx?YGyWkV=GiIx-_CJL1IKt-3(Qr6J@2EYRF_~OW zcZS#70y=GMTiw5^8C)WKah<1^`kEb}C}=0t7iH0aTjvifT^*@Gj1P zUrX&uYDZ`L>YuR3nXQ3yr$C{wAWTpRpV4Ua>3GrZC_lA}Gl;924&sESb|AE+=m?&& zggvtX3c$F?F^IQ?c;H2~qrid*0}du&NA1Wo?-uwM)K0jfUP7w4n-Y7ey<@FsNK;t~ zyC6cPVxobBibOkV=S)VUQAYsyCAH(Jr)qcdlG@QLYPT4$lGUPK4z(IIzW+_dP< zB!F66aOpG2C`wm{P3LI zATzhqA~QEUA9ZHNg9!p;h?&~DJ-D|dlN;1@l1C@CD{9Adz{XidEnXfF6XrOl!~& zy_s`|UH+X@{Z-99x!hy%_pyArDqXJTa=E1)ax0orCAA+NwF6S63!xC_~#ua-4RfGv7~m;*7w!0$Ck~m{#H;Z z1Vk`4y>NM_o&DCF*!MW<77;rjFVEfFxjD>@jME5!I8}4@>J0Qu|rjZFk9PJ6uMxq|P_ZNQ~T-q+%y=SPtrnRC3Fu!ZML^@-H z9ayWE2f)G1v?G)WBEk@00BEa=fVkhv5CL~Ud=T-eAz=+q1(m+`{6T-}={~lxJ*DYw zEC+=GpfXHHcN{)E$_b%>)fGvn)1FAiI^r-}W}nt(I4eT|V3HuD-<=B~Uog=suf(jc z?!*T$jfy&|YpV%(6BM<}5Iz(_eDlJF{u z+6#a$?Fk}yGl;VG*0F}QT+eFbnW_%*b-yi5qwbT&4`9g>F8@=*%H&e+pwzCY9rKiWKVL7a zs2XcnS<0<7)X1&<`GYgzpt4{4n?f}7)h!>-(B9osjbPs>;BkwzOS>h*B;x2dUZH4% zeO}7-j?~_-bhMSL)CaK9Kv`?U<4I|Hwz;C?2%{}2R<#!ZHBqXqthpy{8Q(ovbEO19 zz98C1D5?F8sq8;LlRb{1y}3`Rv=;(obj*p)4o~n1eh0PAlay*Qt_17_LI<%uSV>jG z)ukPBJx7_D5*U!i00$DBz0!sTq`IPD74QxSqy&w z@+ZZkQ^P6r_V#Npndy3__Q~84W^%GtTMTMXcw98R9uzM^VaNkqdTQ)pZgNRn%~V-N zq61dDlG>qjwdc|Ci|yMDmugF3|0wVovCWUbAz^MXrA4 z=<=wcNYw6IbOa$ENIC&B$m0lwLe>eIfP9E+XKII5#GaY@>NagDQd0YYLZ0rscl=^| zOV5B76be9ML_8!f%)=j}PH|#ICPuy0vr$VbHC>OFj`lQa+1PPYI~($V3=y~-3M@Kp zRwK&SHx**P`${f1P_Jq0P$4C?A0QQ7y&CqIvC-6b8Wakq_Wb^DYBY<->3LLdFSW~% zm704iN4oj3{K%>uC3HF_h}Be3&*;!owG%$Q-bVm0C2=*civ#vCdX`rMq2qOoK1kuj zgxgNXK_ng+){{JI`kS<%P%yQJDEMR0p{QM?v|Ur*@&;;GQoHISt2LZa>Acf| zU}dqacEVs6gn(b=52OHiugggJN5VqLLj!B|Si|N;Kb;N@TLM`SAw;zUpp+xbs@KZlDkl}S9}wmBeGS{}abolQfU;Vt2-IGv zJ!Eicdj~$kslEBAs+|wdIy2*mm@pEtChZAH?Z|I-e3S`?QhrMYLohVrW5VH&BGWc+ z+_1n+xvaLu)cDA(`J>=-wF93?`Uy5HxIUUT$5hB`4tSN+fhxjH zApJrBWQb>KPkMEnjRv4H7Jt?~MFPzI!^Vze558$_0rUl|*k zT$=1@&~%g_CAAlp%JTy)DfW1<-q6>elw*|Co)1c`T~>S2>euV_3@xjD)Tx&+5{VFJ z;35R}CLQ{e$+CyqcT;;Jz%jn0GXa2IQG;GqyPCjiBH_21-7X&f0HVCA`JuQ#p}W4i zqLf5RYA=s!^{95 zNTvWWwmsEON~s<5|q;~buP3>wd1ACy<+QDcy$Bf2tt5H%rA8;>jEm@faV53?1G-RH z2SE58wne+m?xO>XL6i+yLW|)LgMpZ`m@r^}m#Zr`=)p@Q@b>W4^OmwENZEP<@Md@*Yz9+6%I9 zssG$D*aC8StgovR6bgp=Sg4&5IHyw(M)YxIHR_8{29rvPQ~gJo$hc`D;q#^dFl#a*Lhs6?Km_?cBI7J2jcIQt6Jb;UL@JWWcxePWlgsKF zr`3=@@zVBcaW&Iq&%=66m6lZ0zJE$;FAZ*Gi@xu%zgsEC6cY8(U$T@3dW%EvH(8U5 zLG@vz_9I)60Zf1dit{`n*R9k5K!_jEP900WAd083_z1}*EiB!{6px?&0fLaEGo^)hk^?Rpe`{{j2Z3`+8^a?RVdPC6B)T?z^AAdiw0U zzZ~~zR1wsA?%c0MJ%w7|H>iRlL|L+A_L%Zx@lDi@WVNe}i=3bExUDR+XfwpE^J$|Y z98MX{v#ykXIvI-_g`_DsA%q!Zy(}2AnPi5iZ8p6zA`XmUn<412SQAQWe`_F;*UpR$ z<(4Mb8frR9N;Hp`&R3JKtM>1%RQ#{3@2~&f@q;Y+zx?uz_WPfu(NF)b-Tv_eq;@Dd z+uyyJE#fKEex;+mI8*!0TbJ|X=FK-t?O}t#WFHOqXu+n#@d=wtx0noK%8+4=W~U0U zo*SVf^DzXJN0hvEb^?Gg+kL_0B9G}vI^h|O5rl8V!78c!t%7h_YwfCZ@?L#Q!_v}H zS0(Y9YX2)+_REgizbF5`qxK&#&|iwN{i|O{C>ICEJ++Hy>-q+_igyaNr(4a4SRfRteO z(T-%m8%z+Sb$%+7u?jXnQd0Zd1tC@YO!Mm1++=P^&4m}bEXWA4N7XCn;*Sgpwuo;qaxPnWN>OhU?phEr!)F9W=HC2Y7g6k0e4DD z_(CSJt9IVVv*S~i3>|P!up&m4Qfog95JD9VOFOJ;y7sGfa$`gLCE}-+cBPzsuIB$Gc?fwxzfm7i{V?=ruHwMY#>}xN6Q03~< z4!PB}{i;2_`AGZy*I&0+{Lp!F)K0W^_tzAi+p}9;eNClsKqzW=I~*YqhxGH?gYazO z)PBo0p(2dag;YBVP78XKCpzslWTyS}h{a_{hr^7~JZ~M3IXF|?IL)~X5h0XCyJ}~+ zaLn)GtUA^qK8x91Iz{b=16iG0y4th$u%WhQY-ryFnT=o8EB^Ll>E@S`+K>5?rCmx| z28z$^8;!jeDnQ|&YV9}YZq13m^3U$zm3wnH3#azEX&(Se7f9_)I0FEbDeU1B5wlGY zGDadbVi9akFK;v_LxRm5(E(+6#=j|Cm{`<-BRzq zzx%fO#b-)tKV)88?fefvfBWsXfBE`*;`{H!>$kGn|F%vLGhctHJwd5m1dDFh6p!22 zn=W;?gTldpNPWbWH!&jt1!{hr}F%{`8Ft86tF>5c>Y|r^H50m2?IL5`?V`-|L#e- z%;w+zzB5?<>o3}^@4oo5V*A^#eypfC5gQFWr%cn+UsD|Q_N~U=F2(FcjZv7pnLkl4 zR?bFM)SfS2ef`sa|0MI(cR%g$m!E(7>(^g=B@Vv$_NSk|KHl4dy+WBdm)iWK7|cE> znjJwAqd=L=!?$kc$K0qj0m|8^irVw$zfSm39IsGP{pbF=b@r$;^IqSDb^?kT1>Rr{ zh4)5SxNHf(z_f>kVngX`FC5BI90j22TwlWxcYF3x_oc25D0UREE|^Vam-qN(0;AT{ zbENLY|Hs}PgD8##U>tu*qT`@uooterBylyW$$Lpm`aVti^2)20yTZQM9=5WWV)`DY zhuz}f3JKRN$M%SDU?(D$w{r(q?Si|f&qTi8Ho`LPpO5#VsRyJTX|F0lnNh3Kuhcub z0X6Q(4TIKCzx;Z(n&UtQX~%)g7gE<^dAX`?*%x8R;TORGX~#7x($3);m7n{eQOw<{ zJ}9#@qTm5#N7}L4k#@Wr^?UgajkRqhkJ*(d^e!a%XgKrUCfzPn`3rB+OgX4TzfT1-6;#5RaKshF(c~uBB?Dv+Aj}j$8+tc z@HNV&`^SphpUwvYsVTsL4C&H8F7GYUj?5Bejv2@TN^#)k&fYn|(Ba;@h))y|*Xn?}?u*L>r?+PQzyL~oP&1FUwyYOkRCk(<)c^tSJ~dE75@ z7Mk_{k`my#cEDGbDodKUGa6bRuQT`cB99{T)kq}u2UzWZ=h`bu5TubC6wwDRZ|T_B z38j|O6i7`0&f1Z7q`fxo)P%9jJ6^QzPph3C7c{N^moEUM)&0ag$0)S z*KYfxc|UIUrJaT>p7$RnA_2IOw39~X=ktMHj;}27Bl?PJ#-EiPoNx|az2h;ZU$5Vg!Yw2pwc@h;gGd!g&6M%1} z9RO+PPY~qhsO$EVqF^)%Qe)vA)~jzeeY@$Ynw5nSWd+UjUdL~U1ORCVJp5QiV)M;d zPlJduRxlc7sb$!z>v?6lJd|bWx$Z(Ur?Ywd^8bv+S>GA7TLb`U2Yh9z;v~P*Q~xP= zki-#Xl>LXk^gK+0EVXp!L1|;F1JVvy?KL8Z%~3}g^d3}aVcVLf>$;}duN~DL_mu9? z$9*Xv?SR!@V}c|}vfOAiWVt1YC^{hR@Z)L!{W^ZT%xVWf+5y_;$M;wASwY$XkaoC) z=2*KHb%L}5{M}AnR3Q)r003NVS&Zt+ClC8 z!#OZ8-#n-ts88)nc%Z2x;I6t(!UJ&x00000000000000000000000000000000000 n0000000000000000PvHpcVFuk;7*Vv00000NkvXXu0mjfew@iP literal 0 HcmV?d00001 diff --git a/docs/modules/retrieval/vector_stores/integrations/memory.md b/docs/modules/retrieval/vector_stores/integrations/memory.md index 58f3aacb..7acb37cf 100644 --- a/docs/modules/retrieval/vector_stores/integrations/memory.md +++ b/docs/modules/retrieval/vector_stores/integrations/memory.md @@ -1,8 +1,8 @@ # MemoryVectorStore -`MemoryVectorStore` is an in-memory, ephemeral vector store that stores -embeddings in-memory and does an exact, linear search for the most similar -embeddings. The default similarity metric is cosine similarity. +`MemoryVectorStore` is an in-memory, ephemeral vector store that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity. + +This class is useful for testing and prototyping, but it is not recommended for production use cases. See other vector store integrations for production use cases. ```dart const filePath = './test/chains/assets/state_of_the_union.txt'; @@ -30,7 +30,7 @@ final docSearch = await MemoryVectorStore.fromDocuments( ); final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + defaultOptions: ChatOpenAIOptions(temperature: 0), ); final qaChain = OpenAIQAWithSourcesChain(llm: llm); final docPrompt = PromptTemplate.fromTemplate( diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md new file mode 100644 index 00000000..af4bb6c6 --- /dev/null +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -0,0 +1,258 @@ +# ObjectBox + +Vector store for the [ObjectBox](https://objectbox.io/) on-device database. + +ObjectBox features: +- Embedded Database that runs inside your application without latency +- Vector search based is state-of-the-art HNSW algorithm that scales very well with growing data volume +- HNSW is tightly integrated within ObjectBox's internal database. Vector Search doesn’t just run “on top of database persistence” +- With this deep integration ObjectBox does not need to keep all vectors in memory +- Multi-layered caching: if a vector is not in-memory, ObjectBox fetches it from disk +- Not just a vector database: you can store any data in ObjectBox, not just vectors. You won’t need a second database +- Low minimum hardware requirements: e.g. an old Raspberry Pi comfortably runs ObjectBox smoothly +- Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB) +- Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers + +- [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) +- [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0) +- [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter) + +## Setup + +LangChain.dart offers two classes for working with ObjectBox: +- `ObjectBoxVectorStore`: This vector stores creates a `Store` with an `ObjectBoxDocument` entity that persists LangChain `Document`s along with their embeddings. +- `BaseObjectBoxVectorStore`: If you need more control over the entity (e.g. if you need to persist custom fields), you can use this class instead. + +### 1. Add ObjectBox to your project + +See the [ObjectBox documentation](https://docs.objectbox.io/getting-started) to learn how to add ObjectBox to your project. + +Note that the integration differs depending on whether you are building a Flutter application or a pure Dart application. + +### 2. Add the LangChain.dart Community package + +Add the `langchain_community` package to your `pubspec.yaml` file. + +```yaml +dependencies: + langchain: {version} + langchain_community: {version} +``` + +### 3. Instantiate the ObjectBox vector store + +```dart +final embeddings = OllamaEmbeddings(model: 'mxbai-embed-large:335m'); +final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1024, +); +``` + +The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the `mxbai-embed-large:335m` model, which has 1024 dimensions. + +The `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored: + +```dart +final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1024, + directory: 'path/to/db', +); +``` + +## Usage + +### Storing vectors + +```dart +final res = await vectorStore.addDocuments( + documents: [ + Document( + pageContent: 'The cat sat on the mat', + metadata: {'cat': 'animal'}, + ), + Document( + pageContent: 'The dog chased the ball.', + metadata: {'cat': 'animal'}, + ), + ], +); +``` + +### Querying vectors + +```dart +final res = await vectorStore.similaritySearch( + query: 'Where is the cat?', + config: const ObjectBoxSimilaritySearch(k: 1), +); +``` + +You can change the minimum similarity score threshold by setting the `scoreThreshold` parameter in the `ObjectBoxSimilaritySearch` config object. + +#### Filtering + +You can use the `ObjectBoxSimilaritySearch` class to pass ObjectBox-specific filtering options. + +`ObjectBoxVectorStore` supports filtering queries by id, content or metadata using ObjectBox's `Condition`. You can define the filter condition in the `ObjectBoxSimilaritySearch.filterCondition` parameter. Use the `ObjectBoxDocumentProps` class to reference the entity fields to use in the query. + +For example: +```dart +final res = await vectorStore.similaritySearch( + query: 'What should I feed my cat?', + config: ObjectBoxSimilaritySearch( + k: 5, + scoreThreshold: 0.8, + filterCondition: ObjectBoxDocumentProps.id.equals('my-id') + .or(ObjectBoxDocumentProps.metadata.contains('some-text')), + ), +); +``` + +### Deleting vectors + +To delete documents, you can use the `delete` method passing the ids of the documents you want to delete. + +```dart +await vectorStore.delete(ids: ['9999']); +``` + +## Example: Building a Fully Local RAG App with ObjectBox and Ollama + +This example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions. + +![RAG Pipeline](img/objectbox.png) + +#### Prerequisites + +Before running the example, make sure you have the following: + +- Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions). +- [mxbai-embed-large:335m](https://ollama.com/library/mxbai-embed-large:335m) and [`llama3:8b`](https://ollama.com/library/llama3:8b) models downloaded. + +#### Steps + +**Step 1: Retrieving and Storing Documents** + +1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader. +2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`. +3. Create embeddings from the document chunks using the `mxbai-embed-large:335m` embeddings model via `OllamaEmbeddings`. +4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`. + +> Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries. + +**Step 2: Constructing the RAG Pipeline** + +1. Set up a retrieval pipeline that takes a user question as input and retrieves the most relevant documents from the ObjectBox vector store. +2. Format the retrieved documents into a single string containing the source, title, and content of each document. +3. Pass the formatted string to the Llama 3 model to generate a response to the user question. + +```dart +// 1. Instantiate vector store +final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), + dimensions: 1024, +); + +// 2. Load documents +const loader = WebBaseLoader([ + 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', + 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', + 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', + 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', +]); +final List docs = await loader.load(); + +// 3. Split docs into chunks +const splitter = RecursiveCharacterTextSplitter( + chunkSize: 500, + chunkOverlap: 0, +); +final List chunkedDocs = await splitter.invoke(docs); + +// 4. Add documents to vector store +await vectorStore.addDocuments(documents: chunkedDocs); + +// 5. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, + ''' +You are an assistant for question-answering tasks. + +Use the following pieces of retrieved context to answer the user question. + +Context: +{context} + +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Cite the source you used to answer the question. + +Example: +""" +One sentence [1]. Another sentence [2]. + +Sources: +[1] https://example.com/1 +[2] https://example.com/2 +""" +''' + ), + (ChatMessageType.human, '{question}'), +]); + +// 6. Define the model to use and the vector store retriever +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions(model: 'llama3:8b'), +); +final retriever = vectorStore.asRetriever(); + +// 7. Create a Runnable that combines the retrieved documents into a single formatted string +final docCombiner = Runnable.mapInput, String>((docs) { + return docs.map((d) => ''' +Source: ${d.metadata['source']} +Title: ${d.metadata['title']} +Content: ${d.pageContent} +--- +''').join('\n'); +}); + +// 8. Define the RAG pipeline +final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), +}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); + +// 9. Run the pipeline +final stream = chain.stream( + 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', +); +await stream.forEach(stdout.write); +// According to the sources provided, ObjectBox Vector Search uses the HNSW +// (Hierarchical Navigable Small World) algorithm [1]. +// +// And yes, you can use it in Flutter apps. The article specifically mentions +// that ObjectBox 4.0 introduces an on-device vector database for the +// Dart/Flutter platform [2]. +// +// Sources: +// [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ +// [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ +``` + +## Advance + +### BaseObjectBoxVectorStore + +If you need more control over the entity (e.g. if you need to persist custom fields), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. + +`BaseObjectBoxVectorStore` requires the following parameters: +- `embeddings`: The embeddings model to use. +- `box`: The ObjectBox `Box` instance to use. +- `createEntity`: A function that creates an entity from the given data. +- `createDocument`: A function that creates a LangChain's `Document` from the given entity. +- `getIdProperty`: A function that returns the ID property of the entity. +- `getEmbeddingProperty`: A function that returns the embedding property of the entity. + +You can check how `ObjectBoxVectorStore` is implemented to see how to use `BaseObjectBoxVectorStore`. diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index b3733733..ca9c5503 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -129,6 +129,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" flutter: dependency: "direct main" description: flutter @@ -303,6 +311,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" openai_dart: dependency: "direct overridden" description: diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart new file mode 100644 index 00000000..4a8950b7 --- /dev/null +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -0,0 +1,108 @@ +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + await _rag(); +} + +Future _rag() async { + // 1. Instantiate vector store + final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), + dimensions: 1024, + directory: 'bin/modules/retrieval/vector_stores/integrations', + ); + + // 2. Load documents + const loader = WebBaseLoader([ + 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', + 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', + 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', + 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', + ]); + final List docs = await loader.load(); + + // 3. Split docs into chunks + const splitter = RecursiveCharacterTextSplitter( + chunkSize: 500, + chunkOverlap: 0, + ); + final List chunkedDocs = await splitter.invoke(docs); + + // 4. Add documents to vector store + await vectorStore.addDocuments(documents: chunkedDocs); + + // 5. Construct a RAG prompt template + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + ''' +You are an assistant for question-answering tasks. + +Use the following pieces of retrieved context to answer the user question. + +Context: +{context} + +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Cite the source you used to answer the question. + +Example: +""" +One sentence [1]. Another sentence [2]. + +Sources: +[1] https://example.com/1 +[2] https://example.com/2 +""" +''' + ), + (ChatMessageType.human, '{question}'), + ]); + + // 6. Define the model to use and the vector store retriever + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions(model: 'llama3'), + ); + final retriever = vectorStore.asRetriever(); + + // 7. Create a Runnable that combines the retrieved documents into a single string + final docCombiner = Runnable.mapInput, String>((docs) { + return docs + .map( + (final d) => ''' +Source: ${d.metadata['source']} +Title: ${d.metadata['title']} +Content: ${d.pageContent} +--- +''', + ) + .join('\n'); + }); + + // 8. Define the RAG pipeline + final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), + }).pipe(promptTemplate).pipe(chatModel).pipe(const StringOutputParser()); + + // 9. Run the pipeline + final stream = chain.stream( + 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', + ); + await stream.forEach(stdout.write); + // According to the sources provided, ObjectBox Vector Search uses the HNSW + // (Hierarchical Navigable Small World) algorithm [1]. + // + // And yes, you can use it in Flutter apps. The article specifically mentions + // that ObjectBox 4.0 introduces an on-device vector database for the + // Dart/Flutter platform [2]. + // + // Sources: + // [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ + // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ +} diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 014430f6..bc3d8b13 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -104,6 +104,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.2" + ffi: + dependency: transitive + description: + name: ffi + sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + url: "https://pub.dev" + source: hosted + version: "2.1.2" fixnum: dependency: transitive description: @@ -112,6 +120,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" freezed_annotation: dependency: transitive description: @@ -311,6 +327,14 @@ packages: relative: true source: path version: "0.0.3+1" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" ollama_dart: dependency: "direct overridden" description: diff --git a/melos.yaml b/melos.yaml index d4792fff..b39bb2a1 100644 --- a/melos.yaml +++ b/melos.yaml @@ -49,6 +49,7 @@ command: langchain_tiktoken: ^1.0.1 math_expressions: ^2.4.0 meta: ^1.11.0 + objectbox: ^4.0.1 pinecone: ^0.7.2 shared_preferences: ^2.2.2 shelf: ^1.4.1 @@ -59,6 +60,7 @@ command: build_runner: ^2.4.9 freezed: ^2.4.7 json_serializable: ^6.7.1 + objectbox_generator: ^4.0.0 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git diff --git a/packages/langchain/lib/src/vector_stores/memory.dart b/packages/langchain/lib/src/vector_stores/memory.dart index e812d275..a439e1cf 100644 --- a/packages/langchain/lib/src/vector_stores/memory.dart +++ b/packages/langchain/lib/src/vector_stores/memory.dart @@ -14,7 +14,9 @@ import 'package:uuid/uuid.dart'; /// This is not efficient for large vector stores as it has a time complexity /// of O(vector_dimensionality * num_vectors). /// -/// For more efficient vector stores, see [VertexAIMatchingEngine](https://pub.dev/documentation/langchain_google/latest/langchain_google/VertexAIMatchingEngine-class.html). +/// This class is useful for testing and prototyping, but it is not recommended +/// for production use cases. See other vector store integrations for +/// production use cases. /// /// ### Filtering /// diff --git a/packages/langchain_community/lib/langchain_community.dart b/packages/langchain_community/lib/langchain_community.dart index 3aee4cf9..b91a968a 100644 --- a/packages/langchain_community/lib/langchain_community.dart +++ b/packages/langchain_community/lib/langchain_community.dart @@ -3,3 +3,4 @@ library; export 'src/document_loaders/document_loaders.dart'; export 'src/tools/tools.dart'; +export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart new file mode 100644 index 00000000..7e065c4a --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart @@ -0,0 +1,120 @@ +import 'dart:convert'; + +import 'package:langchain_core/documents.dart'; +import 'package:langchain_core/vector_stores.dart'; +import 'package:objectbox/objectbox.dart' + show + Box, + Condition, + ObjectWithScore, + QueryHnswProperty, + QueryStringProperty; +import 'package:uuid/uuid.dart'; + +/// {@template base_object_box_vector_store} +/// Base class for ObjectBox vector store. +/// +/// Use this class if you need more control over the ObjectBox store. +/// Otherwise, use [ObjectBoxVectorStore] which is a pre-configured version. +/// {@endtemplate} +class BaseObjectBoxVectorStore extends VectorStore { + /// {@macro base_object_box_vector_store} + BaseObjectBoxVectorStore({ + required super.embeddings, + required final Box box, + required final T Function( + String id, + String content, + String metadata, + List embedding, + ) createEntity, + required final Document Function(T) createDocument, + required final QueryStringProperty Function() getIdProperty, + required final QueryHnswProperty Function() getEmbeddingProperty, + }) : _box = box, + _createEntity = createEntity, + _createDocument = createDocument, + _getIdProperty = getIdProperty, + _getEmbeddingProperty = getEmbeddingProperty; + + /// The [Box] to store the entities in. + final Box _box; + + /// The function to create an entity [T] from the given data. + final T Function( + String id, + String content, + String metadata, + List embedding, + ) _createEntity; + + /// The function to create a [Document] from the given entity [T]. + final Document Function(T) _createDocument; + + /// A getter for the ID query property. + final QueryStringProperty Function() _getIdProperty; + + /// A getter for the embedding query property. + final QueryHnswProperty Function() _getEmbeddingProperty; + + /// UUID generator. + final Uuid _uuid = const Uuid(); + + @override + Future> addVectors({ + required final List> vectors, + required final List documents, + }) async { + assert(vectors.length == documents.length); + + final List ids = []; + final List records = []; + for (var i = 0; i < documents.length; i++) { + final doc = documents[i]; + final id = doc.id ?? _uuid.v4(); + final entity = _createEntity( + id, + doc.pageContent, + jsonEncode(doc.metadata), + vectors[i], + ); + ids.add(id); + records.add(entity); + } + + _box.putMany(records); + return ids; + } + + @override + Future delete({required final List ids}) async { + _box.query(_getIdProperty().oneOf(ids)).build().remove(); + } + + @override + Future> similaritySearchByVectorWithScores({ + required final List embedding, + final VectorStoreSimilaritySearch config = + const VectorStoreSimilaritySearch(), + }) async { + var filter = + _getEmbeddingProperty().nearestNeighborsF32(embedding, config.k); + + final filterCondition = config.filter?.values.firstOrNull; + if (filterCondition != null && filterCondition is Condition) { + filter = filter.and(filterCondition); + } + + final query = _box.query(filter).build(); + + Iterable> results = query.findWithScores(); + + if (config.scoreThreshold != null) { + results = results.where((final r) => r.score >= config.scoreThreshold!); + } + + return results + .map((r) => (_createDocument(r.object), r.score)) + .toList(growable: false); + } +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json new file mode 100644 index 00000000..32251c2e --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json @@ -0,0 +1,56 @@ +{ + "_note1": "KEEP THIS FILE! Check it into a version control system (VCS) like git.", + "_note2": "ObjectBox manages crucial IDs for your object model. See docs for details.", + "_note3": "If you have VCS merge conflicts, you must resolve them according to ObjectBox docs.", + "entities": [ + { + "id": "1:4662034750769022750", + "lastPropertyId": "5:5762998900965066008", + "name": "ObjectBoxDocument", + "properties": [ + { + "id": "1:328437667364158177", + "name": "internalId", + "type": 6, + "flags": 1 + }, + { + "id": "2:3766173764062654800", + "name": "id", + "type": 9, + "flags": 34848, + "indexId": "1:8818474670164842374" + }, + { + "id": "3:7972539540824041325", + "name": "content", + "type": 9 + }, + { + "id": "4:866532944790310363", + "name": "metadata", + "type": 9 + }, + { + "id": "5:5762998900965066008", + "name": "embedding", + "type": 28, + "flags": 8, + "indexId": "2:3016727589204567263" + } + ], + "relations": [] + } + ], + "lastEntityId": "1:4662034750769022750", + "lastIndexId": "2:3016727589204567263", + "lastRelationId": "0:0", + "lastSequenceId": "0:0", + "modelVersion": 5, + "modelVersionParserMinimum": 5, + "retiredEntityUids": [], + "retiredIndexUids": [], + "retiredPropertyUids": [], + "retiredRelationUids": [], + "version": 1 +} \ No newline at end of file diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart new file mode 100644 index 00000000..0a3ac27b --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -0,0 +1,196 @@ +import 'dart:convert'; + +import 'package:langchain_core/documents.dart'; +import 'package:objectbox/objectbox.dart' + show + Condition, + ConflictStrategy, + Entity, + HnswIndex, + Id, + Property, + PropertyType, + Store, + Unique; + +import 'base_objectbox.dart'; +import 'objectbox.g.dart' as obxg; +import 'types.dart'; + +/// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. +/// +/// ```dart +/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); +/// ``` +/// +/// This vector stores creates a [Store] with an [ObjectBoxDocument] entity +/// that persists LangChain [Document]s along with their embeddings. If you +/// need more control over the entity, you can use the +/// [BaseObjectBoxVectorStore] class instead. +/// +/// See documentation for more details: +/// - [LangChain.dart ObjectBox docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/objectbox) +/// - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) +/// +/// ### Filtering +/// +/// You can use the [ObjectBoxSimilaritySearch] class to pass ObjectBox-specific +/// filtering options. +/// +/// [ObjectBoxVectorStore] supports filtering queries by id, content or metadata +/// using ObjectBox's [Condition]. You can define the filter condition in the +/// [ObjectBoxSimilaritySearch] `filterCondition` parameter. Use the +/// [ObjectBoxDocumentProps] class to reference the entity fields to use in the +/// query. +/// +/// For example: +/// ```dart +/// final vectorStore = ObjectBoxVectorStore(...); +/// final res = await vectorStore.similaritySearch( +/// query: 'What should I feed my cat?', +/// config: ObjectBoxSimilaritySearch( +/// k: 5, +/// scoreThreshold: 0.8, +/// filterCondition: ObjectBoxDocumentProps.id.equals('my-id') +/// .or(ObjectBoxDocumentProps.metadata.contains('some-text')), +/// ), +/// ); +/// ``` +class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { + /// Creates an [ObjectBoxVectorStore] instance. + /// + /// Main configuration options: + /// - [embeddings] The embeddings model to use. + /// - [dimensions] The number of dimensions of the embeddings (vector size). + /// + /// ObjectBox-specific options: + /// - Check the ObjectBox's [Store] documentation for more details on the + /// different options. + ObjectBoxVectorStore({ + required super.embeddings, + required final int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) : super( + box: _openStore( + dimensions: dimensions, + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup, + ).box(), + createEntity: _createObjectBoxDocument, + createDocument: _createDoc, + getIdProperty: () => obxg.ObjectBoxDocument_.id, + getEmbeddingProperty: () => obxg.ObjectBoxDocument_.embedding, + ); + + /// The ObjectBox store. + static Store? _store; + + /// Opens the ObjectBox store. + static Store _openStore({ + required final int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) { + return _store ??= obxg.openStore( + dimensions: dimensions, + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup, + ); + } + + /// Creates an [ObjectBoxDocument] entity. + static ObjectBoxDocument _createObjectBoxDocument( + String id, + String content, + String metadata, + List embedding, + ) => + ObjectBoxDocument(0, id, content, metadata, embedding); + + /// Creates a [Document] from an [ObjectBoxDocument] entity. + static Document _createDoc(ObjectBoxDocument entity) { + Map metadata = const {}; + try { + metadata = jsonDecode(entity.metadata); + } catch (_) {} + return Document( + id: entity.id, + pageContent: entity.content, + metadata: metadata, + ); + } + + /// Closes the ObjectBox store; + /// + /// Don't try to call any other methods after the store is closed. + void close() { + _store?.close(); + _store = null; + } +} + +/// {@template objectbox_document} +/// The ObjectBox entity representing a LangChain [Document]. +/// {@endtemplate} +@Entity() +class ObjectBoxDocument { + /// {@macro objectbox_document} + ObjectBoxDocument( + this.internalId, + this.id, + this.content, + this.metadata, + this.embedding, + ); + + /// The internal ID used by ObjectBox. + @Id() + int internalId = 0; + + /// The ID of the document. + @Unique(onConflict: ConflictStrategy.replace) + String id; + + /// The content of the document. + String content; + + /// The metadata of the document. + String metadata; + + /// The embedding of the document. + @HnswIndex(dimensions: 0) // Set dynamically in the ObjectBoxVectorStore + @Property(type: PropertyType.floatVector) + List embedding; +} + +/// [ObjectBoxDocument] entity fields to define ObjectBox queries. +/// +/// Example: +/// ```dart +/// final filterCondition = ObjectBoxDocumentProps.metadata +/// .contains('animal') +/// .or(ObjectBoxDocumentProps.metadata.contains('natural'); +/// ``` +typedef ObjectBoxDocumentProps = obxg.ObjectBoxDocument_; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart new file mode 100644 index 00000000..4eed33be --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart @@ -0,0 +1,193 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND +// This code was generated by ObjectBox. To update it run the generator again +// with `dart run build_runner build`. +// See also https://docs.objectbox.io/getting-started#generate-objectbox-code + +// ignore_for_file: camel_case_types, depend_on_referenced_packages, avoid_js_rounded_ints, require_trailing_commas, cascade_invocations, strict_raw_type +// coverage:ignore-file + +import 'dart:typed_data'; + +import 'package:flat_buffers/flat_buffers.dart' as fb; +import 'package:objectbox/internal.dart' + as obx_int; // generated code can access "internal" functionality +import 'package:objectbox/objectbox.dart' as obx; + +import '../../../src/vector_stores/objectbox/objectbox.dart'; + +export 'package:objectbox/objectbox.dart'; // so that callers only have to import this file + +List? _entities; + +List _getEntities(int dimensions) { + if (_entities != null) { + final objectBoxDocumentEntity = _entities![0]; + final embeddingProperty = objectBoxDocumentEntity.properties[4]; + + if (embeddingProperty.hnswParams?.dimensions != dimensions) { + _entities = null; + } else { + return _entities!; + } + } + + return _entities ??= [ + obx_int.ModelEntity( + id: const obx_int.IdUid(1, 4662034750769022750), + name: 'ObjectBoxDocument', + lastPropertyId: const obx_int.IdUid(5, 5762998900965066008), + flags: 0, + properties: [ + obx_int.ModelProperty( + id: const obx_int.IdUid(1, 328437667364158177), + name: 'internalId', + type: 6, + flags: 1), + obx_int.ModelProperty( + id: const obx_int.IdUid(2, 3766173764062654800), + name: 'id', + type: 9, + flags: 34848, + indexId: const obx_int.IdUid(1, 8818474670164842374)), + obx_int.ModelProperty( + id: const obx_int.IdUid(3, 7972539540824041325), + name: 'content', + type: 9, + flags: 0), + obx_int.ModelProperty( + id: const obx_int.IdUid(4, 866532944790310363), + name: 'metadata', + type: 9, + flags: 0), + obx_int.ModelProperty( + id: const obx_int.IdUid(5, 5762998900965066008), + name: 'embedding', + type: 28, + flags: 8, + indexId: const obx_int.IdUid(2, 3016727589204567263), + hnswParams: obx_int.ModelHnswParams( + dimensions: dimensions, + )) + ], + relations: [], + backlinks: []) + ]; +} + +/// Shortcut for [obx.Store.new] that passes [getObjectBoxModel] and for Flutter +/// apps by default a [directory] using `defaultStoreDirectory()` from the +/// ObjectBox Flutter library. +/// +/// Note: for desktop apps it is recommended to specify a unique [directory]. +/// +/// See [obx.Store.new] for an explanation of all parameters. +/// +/// For Flutter apps, also calls `loadObjectBoxLibraryAndroidCompat()` from +/// the ObjectBox Flutter library to fix loading the native ObjectBox library +/// on Android 6 and older. +obx.Store openStore( + {required int dimensions, + String? directory, + int? maxDBSizeInKB, + int? maxDataSizeInKB, + int? fileMode, + int? maxReaders, + bool queriesCaseSensitiveDefault = true, + String? macosApplicationGroup}) { + return obx.Store(getObjectBoxModel(dimensions), + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup); +} + +/// Returns the ObjectBox model definition for this project for use with +/// [obx.Store.new]. +obx_int.ModelDefinition getObjectBoxModel(int dimensions) { + final entities = _getEntities(dimensions); + final model = obx_int.ModelInfo( + entities: _getEntities(dimensions), + lastEntityId: const obx_int.IdUid(1, 4662034750769022750), + lastIndexId: const obx_int.IdUid(2, 3016727589204567263), + lastRelationId: const obx_int.IdUid(0, 0), + lastSequenceId: const obx_int.IdUid(0, 0), + retiredEntityUids: const [], + retiredIndexUids: const [], + retiredPropertyUids: const [], + retiredRelationUids: const [], + modelVersion: 5, + modelVersionParserMinimum: 5, + version: 1); + + final bindings = { + ObjectBoxDocument: obx_int.EntityDefinition( + model: entities[0], + toOneRelations: (ObjectBoxDocument object) => [], + toManyRelations: (ObjectBoxDocument object) => {}, + getId: (ObjectBoxDocument object) => object.internalId, + setId: (ObjectBoxDocument object, int id) { + object.internalId = id; + }, + objectToFB: (ObjectBoxDocument object, fb.Builder fbb) { + final idOffset = fbb.writeString(object.id); + final contentOffset = fbb.writeString(object.content); + final metadataOffset = fbb.writeString(object.metadata); + final embeddingOffset = fbb.writeListFloat32(object.embedding); + fbb.startTable(6); + fbb.addInt64(0, object.internalId); + fbb.addOffset(1, idOffset); + fbb.addOffset(2, contentOffset); + fbb.addOffset(3, metadataOffset); + fbb.addOffset(4, embeddingOffset); + fbb.finish(fbb.endTable()); + return object.internalId; + }, + objectFromFB: (obx.Store store, ByteData fbData) { + final buffer = fb.BufferContext(fbData); + final rootOffset = buffer.derefObject(0); + final internalIdParam = + const fb.Int64Reader().vTableGet(buffer, rootOffset, 4, 0); + final idParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 6, ''); + final contentParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 8, ''); + final metadataParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 10, ''); + final embeddingParam = + const fb.ListReader(fb.Float32Reader(), lazy: false) + .vTableGet(buffer, rootOffset, 12, []); + final object = ObjectBoxDocument(internalIdParam, idParam, + contentParam, metadataParam, embeddingParam); + + return object; + }) + }; + + return obx_int.ModelDefinition(model, bindings); +} + +/// [ObjectBoxDocument] entity fields to define ObjectBox queries. +class ObjectBoxDocument_ { + /// See [ObjectBoxDocument.internalId]. + static final internalId = + obx.QueryIntegerProperty(_entities![0].properties[0]); + + /// See [ObjectBoxDocument.id]. + static final id = + obx.QueryStringProperty(_entities![0].properties[1]); + + /// See [ObjectBoxDocument.content]. + static final content = + obx.QueryStringProperty(_entities![0].properties[2]); + + /// See [ObjectBoxDocument.metadata]. + static final metadata = + obx.QueryStringProperty(_entities![0].properties[3]); + + /// See [ObjectBoxDocument.embedding]. + static final embedding = + obx.QueryHnswProperty(_entities![0].properties[4]); +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart new file mode 100644 index 00000000..aaa08078 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart @@ -0,0 +1,29 @@ +import 'package:langchain_core/vector_stores.dart'; +import 'package:objectbox/objectbox.dart' show Condition; + +/// {@template objectbox_similarity_search} +/// ObjectBox similarity search config. +/// +/// ObjectBox supports filtering queries by id, content or metadata using +/// [Condition]. You can define the filter condition in the [filterCondition] +/// parameter. +/// +/// Example: +/// ```dart +/// ObjectBoxSimilaritySearch( +/// k: 10, +/// scoreThreshold: 1.3, +/// filterCondition: ObjectBoxDocumentProps.metadata.contains('cat'), +/// ); +/// ``` +/// {@endtemplate} +class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { + /// {@macro objectbox_similarity_search} + ObjectBoxSimilaritySearch({ + super.k = 4, + super.scoreThreshold, + final Condition? filterCondition, + }) : super( + filter: filterCondition != null ? {'filter': filterCondition} : null, + ); +} diff --git a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart new file mode 100644 index 00000000..753d8168 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart @@ -0,0 +1,4 @@ +export 'objectbox/base_objectbox.dart' show BaseObjectBoxVectorStore; +export 'objectbox/objectbox.dart' + show ObjectBoxDocument, ObjectBoxDocumentProps, ObjectBoxVectorStore; +export 'objectbox/types.dart' show ObjectBoxSimilaritySearch; diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 8cacd96c..caa994db 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -24,6 +24,14 @@ dependencies: langchain_core: ^0.3.1 math_expressions: ^2.4.0 meta: ^1.11.0 + objectbox: ^4.0.1 + uuid: ^4.3.3 dev_dependencies: + build_runner: ^2.4.9 + langchain_openai: ^0.6.1+1 + objectbox_generator: ^4.0.0 test: ^1.25.2 + +objectbox: + output_dir: src/vector_stores/objectbox diff --git a/packages/langchain_community/pubspec_overrides.yaml b/packages/langchain_community/pubspec_overrides.yaml index 3508ed77..de62cfcc 100644 --- a/packages/langchain_community/pubspec_overrides.yaml +++ b/packages/langchain_community/pubspec_overrides.yaml @@ -1,4 +1,8 @@ -# melos_managed_dependency_overrides: langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart dependency_overrides: langchain_core: path: ../langchain_core + langchain_openai: + path: ../langchain_openai + openai_dart: + path: ../openai_dart diff --git a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart new file mode 100644 index 00000000..740a06d7 --- /dev/null +++ b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart @@ -0,0 +1,159 @@ +import 'dart:io'; + +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_core/documents.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:objectbox/objectbox.dart'; +import 'package:test/test.dart'; + +void main() async { + late final OpenAIEmbeddings embeddings; + late final ObjectBoxVectorStore vectorStore; + + setUpAll(() async { + embeddings = OpenAIEmbeddings( + apiKey: Platform.environment['OPENAI_API_KEY'], + ); + vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1536, + directory: 'test/vector_stores/objectbox', + ); + }); + + group('ObjectBoxVectorStore tests', () { + test('Test add new vectors', () async { + final res = await vectorStore.addDocuments( + documents: [ + const Document( + id: '1', + pageContent: 'The cat sat on the mat', + metadata: {'cat': 'animal'}, + ), + const Document( + id: '2', + pageContent: 'The dog chased the ball.', + metadata: {'cat': 'animal'}, + ), + const Document( + id: '3', + pageContent: 'The boy ate the apple.', + metadata: {'cat': 'person'}, + ), + const Document( + id: '4', + pageContent: 'The girl drank the milk.', + metadata: {'cat': 'person'}, + ), + const Document( + id: '5', + pageContent: 'The sun is shining.', + metadata: {'cat': 'natural'}, + ), + ], + ); + + expect(res.length, 5); + }); + + test('Test query return 1 result', () async { + final res = await vectorStore.similaritySearch( + query: 'Is it raining?', + config: ObjectBoxSimilaritySearch(k: 1), + ); + expect(res.length, 1); + expect( + res.first.id, + '5', + ); + }); + + test('Test query with scoreThreshold', () async { + final res = await vectorStore.similaritySearchWithScores( + query: 'Is it raining?', + config: ObjectBoxSimilaritySearch(scoreThreshold: 0.3), + ); + for (final (_, score) in res) { + expect(score, greaterThan(0.3)); + } + }); + + test('Test query with equality filter', () async { + final res = await vectorStore.similaritySearch( + query: 'What are they eating?', + config: ObjectBoxSimilaritySearch( + k: 10, + scoreThreshold: 1.3, + filterCondition: ObjectBoxDocumentProps.metadata.contains('person'), + ), + ); + for (final doc in res) { + expect(doc.metadata['cat'], 'person'); + } + }); + + test('Test query with filter with multiple operators', () async { + final res = await vectorStore.similaritySearch( + query: 'What are they eating?', + config: ObjectBoxSimilaritySearch( + k: 10, + filterCondition: ObjectBoxDocumentProps.metadata + .contains('animal') + .or(ObjectBoxDocumentProps.metadata.contains('natural')), + ), + ); + for (final doc in res) { + expect(doc.metadata['cat'], isNot('person')); + } + }); + + test('Test delete document', () async { + await vectorStore.addDocuments( + documents: [ + const Document( + id: '9999', + pageContent: 'This document will be deleted', + metadata: {'cat': 'xxx'}, + ), + ], + ); + final res1 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res1.length, 1); + expect(res1.first.id, '9999'); + + await vectorStore.delete(ids: ['9999']); + final res2 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res2.length, 0); + }); + }); + + group('ObjectBoxSimilaritySearch', () { + test('ObjectBoxSimilaritySearch fields', () { + final config = ObjectBoxSimilaritySearch( + k: 5, + scoreThreshold: 0.8, + filterCondition: ObjectBoxDocumentProps.metadata.contains('style1'), + ); + expect(config.k, 5); + expect(config.scoreThreshold, 0.8); + expect(config.filter?['filter'], isA>()); + }); + }); + + tearDownAll(() async { + embeddings.close(); + vectorStore.close(); + await File('test/vector_stores/objectbox/data.mdb').delete(); + await File('test/vector_stores/objectbox/lock.mdb').delete(); + }); +} From 554b1c6228cc3abb9729e76a4488c0b414b49427 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 28 May 2024 23:30:54 +0200 Subject: [PATCH 026/251] feat: Add Runnable.close() to close any resources associated with it (#439) --- packages/langchain_core/lib/src/runnables/binding.dart | 5 +++++ packages/langchain_core/lib/src/runnables/map.dart | 7 +++++++ .../langchain_core/lib/src/runnables/runnable.dart | 10 ++++++++++ .../langchain_core/lib/src/runnables/sequence.dart | 7 +++++++ .../google_ai/chat_google_generative_ai.dart | 2 +- .../lib/src/chat_models/chat_mistralai.dart | 2 +- .../lib/src/chat_models/chat_ollama.dart | 2 +- packages/langchain_ollama/lib/src/llms/ollama.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 2 +- packages/langchain_openai/lib/src/llms/openai.dart | 2 +- packages/langchain_openai/lib/src/tools/dall_e.dart | 2 +- .../test/chat_models/open_router_test.dart | 4 ++-- 12 files changed, 38 insertions(+), 9 deletions(-) diff --git a/packages/langchain_core/lib/src/runnables/binding.dart b/packages/langchain_core/lib/src/runnables/binding.dart index 75e6084f..a1b9f907 100644 --- a/packages/langchain_core/lib/src/runnables/binding.dart +++ b/packages/langchain_core/lib/src/runnables/binding.dart @@ -70,4 +70,9 @@ class RunnableBinding }), ).asBroadcastStream(); } + + @override + void close() { + for (final step in steps.values) { + step.close(); + } + } } diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 792bc80a..71021af6 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -289,4 +289,14 @@ abstract class Runnable { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart index a62962e4..2e8fe5f6 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart @@ -278,7 +278,7 @@ class ChatOllama extends BaseChatModel { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index 7eeb7e7c..e61c6e27 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -276,7 +276,7 @@ class Ollama extends BaseLLM { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 053bf481..83bb8cd5 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -399,7 +399,7 @@ class ChatOpenAI extends BaseChatModel { : getEncoding('cl100k_base'); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/llms/openai.dart b/packages/langchain_openai/lib/src/llms/openai.dart index 086b8b8a..9471acfc 100644 --- a/packages/langchain_openai/lib/src/llms/openai.dart +++ b/packages/langchain_openai/lib/src/llms/openai.dart @@ -345,7 +345,7 @@ class OpenAI extends BaseLLM { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/tools/dall_e.dart b/packages/langchain_openai/lib/src/tools/dall_e.dart index 3137dcfa..3ce66fd9 100644 --- a/packages/langchain_openai/lib/src/tools/dall_e.dart +++ b/packages/langchain_openai/lib/src/tools/dall_e.dart @@ -111,7 +111,7 @@ final class OpenAIDallETool extends StringTool { } } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index 4587b56b..f108db8b 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -107,8 +107,8 @@ void main() { } }); - test('Test tool calling', - timeout: const Timeout(Duration(minutes: 1)), () async { + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { const tool = ToolSpec( name: 'get_current_weather', description: 'Get the current weather in a given location', From b12a343a13ee3953fc61b08ccd4f586d598e6cd0 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 00:02:23 +0200 Subject: [PATCH 027/251] fix: Stream errors are not propagated by StringOutputParser (#440) --- packages/langchain_core/lib/src/output_parsers/string.dart | 4 +--- packages/langchain_core/lib/src/runnables/sequence.dart | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/langchain_core/lib/src/output_parsers/string.dart b/packages/langchain_core/lib/src/output_parsers/string.dart index f5ea11a8..9dd4722a 100644 --- a/packages/langchain_core/lib/src/output_parsers/string.dart +++ b/packages/langchain_core/lib/src/output_parsers/string.dart @@ -68,9 +68,7 @@ class StringOutputParser if (reduceOutputStream) { yield await inputStream.map(_parse).reduce((final a, final b) => '$a$b'); } else { - await for (final input in inputStream) { - yield _parse(input); - } + yield* inputStream.map(_parse); } } diff --git a/packages/langchain_core/lib/src/runnables/sequence.dart b/packages/langchain_core/lib/src/runnables/sequence.dart index e69cf0bc..4be296b9 100644 --- a/packages/langchain_core/lib/src/runnables/sequence.dart +++ b/packages/langchain_core/lib/src/runnables/sequence.dart @@ -129,7 +129,7 @@ class RunnableSequence Stream streamFromInputStream( final Stream inputStream, { final RunnableOptions? options, - }) { + }) async* { Stream nextStepStream; try { nextStepStream = first.streamFromInputStream( @@ -152,7 +152,7 @@ class RunnableSequence } try { - return last.streamFromInputStream( + yield* last.streamFromInputStream( nextStepStream, options: last.getCompatibleOptions(options), ); From d827d2fb14da6a8d1a9a329519718879c586ccd4 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 00:05:54 +0200 Subject: [PATCH 028/251] docs: Update hello_world_flutter example with Ollama, GoogleAI and Mistral suport (#441) --- examples/hello_world_flutter/README.md | 21 +- .../android/app/build.gradle | 30 +- .../hello_world_flutter/android/build.gradle | 13 - .../android/gradle.properties | 2 +- .../gradle/wrapper/gradle-wrapper.properties | 2 +- .../android/settings.gradle | 30 +- .../hello_world_flutter/devtools_options.yaml | 3 + .../hello_world_flutter.gif | Bin 0 -> 235564 bytes .../hello_world_flutter_local.gif | Bin 144881 -> 0 bytes .../hello_world_flutter_openai.gif | Bin 119360 -> 0 bytes .../ios/Flutter/AppFrameworkInfo.plist | 2 +- .../ios/Runner.xcodeproj/project.pbxproj | 8 +- .../xcshareddata/xcschemes/Runner.xcscheme | 2 +- examples/hello_world_flutter/lib/app.dart | 1 + .../lib/home/bloc/home_screen_cubit.dart | 179 ++++++--- .../lib/home/bloc/home_screen_state.dart | 54 +-- .../lib/home/bloc/providers.dart | 40 ++ .../lib/home/home_screen.dart | 355 ++++++++++++------ examples/hello_world_flutter/pubspec.lock | 124 +++++- examples/hello_world_flutter/pubspec.yaml | 4 + .../pubspec_overrides.yaml | 14 +- .../web/flutter_bootstrap.js | 12 + examples/hello_world_flutter/web/index.html | 62 +-- .../hello_world_flutter/web/manifest.json | 4 +- 24 files changed, 647 insertions(+), 315 deletions(-) create mode 100644 examples/hello_world_flutter/devtools_options.yaml create mode 100644 examples/hello_world_flutter/hello_world_flutter.gif delete mode 100644 examples/hello_world_flutter/hello_world_flutter_local.gif delete mode 100644 examples/hello_world_flutter/hello_world_flutter_openai.gif create mode 100644 examples/hello_world_flutter/lib/home/bloc/providers.dart create mode 100644 examples/hello_world_flutter/web/flutter_bootstrap.js diff --git a/examples/hello_world_flutter/README.md b/examples/hello_world_flutter/README.md index 6b7c3871..eb983d97 100644 --- a/examples/hello_world_flutter/README.md +++ b/examples/hello_world_flutter/README.md @@ -1,9 +1,8 @@ -# Hello world Flutter +# Hello World Flutter -This sample app demonstrates how to call an LLM from a Flutter application using LangChain.dart. +This sample application demonstrates how to call various remote and local LLMs from a Flutter application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) -blog post. +![Hello World Flutter](hello_world_flutter.gif) ## Usage @@ -11,15 +10,5 @@ blog post. flutter run ``` -### Using OpenAI API - -You can get your OpenAI API key [here](https://platform.openai.com/account/api-keys). - -![OpenAI](hello_world_flutter_openai.gif) - -### Local model - -You can easily run local models using [Prem app](https://www.premai.io/#PremApp). It creates a local -server that exposes a REST API with the same interface as the OpenAI API. - -![Local](hello_world_flutter_local.gif) +- To use the remote providers you need to provide your API key. +- To use local models you need to have the [Ollama](https://ollama.ai/) app running and the model downloaded. diff --git a/examples/hello_world_flutter/android/app/build.gradle b/examples/hello_world_flutter/android/app/build.gradle index 48e93274..2c711c95 100644 --- a/examples/hello_world_flutter/android/app/build.gradle +++ b/examples/hello_world_flutter/android/app/build.gradle @@ -1,3 +1,9 @@ +plugins { + id 'com.android.application' + id 'kotlin-android' + id 'dev.flutter.flutter-gradle-plugin' +} + def localProperties = new Properties() def localPropertiesFile = rootProject.file('local.properties') if (localPropertiesFile.exists()) { @@ -6,11 +12,6 @@ if (localPropertiesFile.exists()) { } } -def flutterRoot = localProperties.getProperty('flutter.sdk') -if (flutterRoot == null) { - throw new GradleException("Flutter SDK not found. Define location with flutter.sdk in the local.properties file.") -} - def flutterVersionCode = localProperties.getProperty('flutter.versionCode') if (flutterVersionCode == null) { flutterVersionCode = '1' @@ -21,22 +22,18 @@ if (flutterVersionName == null) { flutterVersionName = '1.0' } -apply plugin: 'com.android.application' -apply plugin: 'kotlin-android' -apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle" - android { namespace "com.example.hello_world_flutter" compileSdkVersion flutter.compileSdkVersion ndkVersion flutter.ndkVersion compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } kotlinOptions { - jvmTarget = '1.8' + jvmTarget = '17' } sourceSets { @@ -44,10 +41,7 @@ android { } defaultConfig { - // TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html). applicationId "com.example.hello_world_flutter" - // You can update the following values to match your application needs. - // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. minSdkVersion flutter.minSdkVersion targetSdkVersion flutter.targetSdkVersion versionCode flutterVersionCode.toInteger() @@ -56,8 +50,6 @@ android { buildTypes { release { - // TODO: Add your own signing config for the release build. - // Signing with the debug keys for now, so `flutter run --release` works. signingConfig signingConfigs.debug } } @@ -66,7 +58,3 @@ android { flutter { source '../..' } - -dependencies { - implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" -} diff --git a/examples/hello_world_flutter/android/build.gradle b/examples/hello_world_flutter/android/build.gradle index f7eb7f63..bc157bd1 100644 --- a/examples/hello_world_flutter/android/build.gradle +++ b/examples/hello_world_flutter/android/build.gradle @@ -1,16 +1,3 @@ -buildscript { - ext.kotlin_version = '1.7.10' - repositories { - google() - mavenCentral() - } - - dependencies { - classpath 'com.android.tools.build:gradle:7.3.0' - classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" - } -} - allprojects { repositories { google() diff --git a/examples/hello_world_flutter/android/gradle.properties b/examples/hello_world_flutter/android/gradle.properties index 94adc3a3..a199917a 100644 --- a/examples/hello_world_flutter/android/gradle.properties +++ b/examples/hello_world_flutter/android/gradle.properties @@ -1,3 +1,3 @@ -org.gradle.jvmargs=-Xmx1536M +org.gradle.jvmargs=-Xmx8g -XX:+HeapDumpOnOutOfMemoryError -XX:+UseParallelGC -Dfile.encoding=UTF-8 android.useAndroidX=true android.enableJetifier=true diff --git a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties index 3c472b99..11fce01a 100644 --- a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties +++ b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties @@ -2,4 +2,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip diff --git a/examples/hello_world_flutter/android/settings.gradle b/examples/hello_world_flutter/android/settings.gradle index 44e62bcf..fd7c1580 100644 --- a/examples/hello_world_flutter/android/settings.gradle +++ b/examples/hello_world_flutter/android/settings.gradle @@ -1,11 +1,25 @@ -include ':app' +pluginManagement { + def flutterSdkPath = { + def properties = new Properties() + file("local.properties").withInputStream { properties.load(it) } + def flutterSdkPath = properties.getProperty("flutter.sdk") + assert flutterSdkPath != null, "flutter.sdk not set in local.properties" + return flutterSdkPath + }() -def localPropertiesFile = new File(rootProject.projectDir, "local.properties") -def properties = new Properties() + includeBuild("$flutterSdkPath/packages/flutter_tools/gradle") -assert localPropertiesFile.exists() -localPropertiesFile.withReader("UTF-8") { reader -> properties.load(reader) } + repositories { + google() + mavenCentral() + gradlePluginPortal() + } +} -def flutterSdkPath = properties.getProperty("flutter.sdk") -assert flutterSdkPath != null, "flutter.sdk not set in local.properties" -apply from: "$flutterSdkPath/packages/flutter_tools/gradle/app_plugin_loader.gradle" +plugins { + id "dev.flutter.flutter-plugin-loader" version "1.0.0" + id "com.android.application" version "8.2.2" apply false + id "org.jetbrains.kotlin.android" version "1.9.23" apply false +} + +include ":app" diff --git a/examples/hello_world_flutter/devtools_options.yaml b/examples/hello_world_flutter/devtools_options.yaml new file mode 100644 index 00000000..fa0b357c --- /dev/null +++ b/examples/hello_world_flutter/devtools_options.yaml @@ -0,0 +1,3 @@ +description: This file stores settings for Dart & Flutter DevTools. +documentation: https://docs.flutter.dev/tools/devtools/extensions#configure-extension-enablement-states +extensions: diff --git a/examples/hello_world_flutter/hello_world_flutter.gif b/examples/hello_world_flutter/hello_world_flutter.gif new file mode 100644 index 0000000000000000000000000000000000000000..25058c3853c765f2244c53b6e10bfd69bd67ef44 GIT binary patch literal 235564 zcmWh!Wmwa17yhj`SkaB6K|(-8q#WHi(vgl71#~JRO1Qx&35O!0gd;^n93dj1qd~%f z2uL{)vB3Wn1C@AvpO4R{=Q`K9&$-WipU2L@*63i+NyrrV7x4dc3jn}^fFK465(Pv= z5z;akh#VldM+Bn?QdAOCQIi(b1omplY3V4+?1RehL*WeoB?H0z25N?fv<{hQ?KQ*8 zm;+|!`udhq7FLE9N3Vk(UJhp7 z1V^&KIkLo2AD22G9b-R?v;RSNf1;nin{}X4K(L!fhiVi+sjpVFHzx|5%9{l*oqoA8vvsL!`D zZ{B7yi<52_=N6Y-*Scd(zvGv6=iD9E4c9X3vWgq=m8lh#MHi}!@zr((cj;H|1>L<@ z+Ix?BueP-0em4CYd*Dw4fDN+j_I5^+9@@>67;Q>mBTY4y%sNrl_YU^PfF?%6U}TeYvZ<Y}IJPgVFTn(V{1#B^{%+ zoY4m(qdk>l&)dh6+Q%yDUevH(WH!7=YkqOP<%R#~i=q46mU?b}8<*P4ZEP6NY#dK* z9#?oV-hY2$@WI5v`iU#86W7Nl23V769h1e~ukKI28qS-FzA;65I+f0xW(-e1c{RQMUnal&_wm24AOHLI?(4VrAAU~# z__?&x2a+|S=%|NFD_>(AQnKVLp?aer*R-`f86ZFg~ZclTfNE9?ro+IV_cS&(e> zb#x)1|6l!v!VthNp!I)n{2wL&au>L;R`%-o)WZ~{$X@?Jc3Dq4Udgf8zM;G?OLG6o z`N4*YfvW_oE3yual|$FnJ!<`j8mop=lWA*iQ?hyH;`pfc)qQf)$%W_REDt~UlPhvgEp@M(B3{-8v(I5w*yQir&8x9jqipF_eU;{JZXCWf@PH@ z@6y(MVvMp-8aUea=<~D?#{KUB6Rowm)^nf7>LNxY&6~1E8@I=&s$H@T8_u@f+WhjK zThSwT&dMeotF^_vEq3jOqBXB;&+f@T+~2Mn2(Mjd?at1>zkGjJvd6&jB8SJWh;6if zG%2@AmkdTKynWPo%4@l$v^GwPq=ifqw8g834l z=Lpb2<#Q_6{3hawJYJ9E}p9N>{=h2eG@lp2UHnQd9{pC4gw~HXYBRrS-nYpHe8#{5gtN6cnDdl?-r}O_$I0+;A=yl?oM9QaF0{ zc;mRvG*Xcsx~uPe2kG`XdEZLs*4yOC!R%IM_R0I}SDX7joJB$hK^SeQ#!Dn*O;zN( z+H=6A=w@k|6G4c52@#y{*Pg$= z=v;@Gn9-J7H%$?({Z074A%uq43WA) zqFw3uNgT2WD7}fDc={(tA$C8%?@@7Ax}-lV$&2i|#lH)+V9-Ekd2$e8CaqMA8b?Gc zhVPzQLfbGDE=HbXsMnssgzHvF*k&O$``Kwhp5TK`&C`=KNj9ES%420!d2;AtQ|^_wtZrlqfP zg&<-k1gAxy2^rXEAb0?7k8b(_v;q_rN5_RX>MIy;+{*6CQ~bt6*pbg=PDj}8Kbx2J z-mLO)c&pXHdUBeb(Xh>xVh`0&i$-1B=+y7AJP;_Ucwrv^Wk# zF(})wyg}qSA4Z6~$ha(E0XWd$ibBa2)$C<3RNX=?unc`rnFBu1Wh02Bq+R-Bi3Z!> z@F2dxNAnVYgW24d0hKYB4A8X*wx{99P8+Yf$=H&`Q0>#tb*_z zys|+o0Wfxc%XCSp_I_L|K~V>b6t=306j@|9u7a<)g=S=a!DpN@U4C#?m(dj1aL}6T zAU+eHXMzKc6b_b&HT_l7SNhs?c(zNzxvURBBnw_#Ut&JjwUSFLL#rSG!3s*4B3+yg zRkoC>r^2-LnSkwVGVUC{@}v}n?z)v*)zVxiu7e=&{nsf3%bHxaf{C<4#8@n}=AG&a zC*$=vSo&N|g+Aq`L&e1Mg`q(ZXr7lb<*S!-#&Y{>Wn0~Tr|eVJUJ5ez ztySm$AbnPbd@kT2v`@Wb5!(*gOCen6_ms39f$Qv6$u-T}*Ro~s6xAXMfC$v1pq~dd z-sp@9&+I9E0m4fphAQi1m!XgFr_p448nk@x1jL}bpuSQk26YR1n-P{El))jS%kCA+KrmTtN4Lso#k{5&bl6I3p| z?arIB3BHSzjIfM2S!ClJJQGfB%jhFyy;$Sy=oLmdqI4q8lp#qihy3kGL2Gb3LCv8J zj;BXs?a@*3P;!s-D^lv-MyarGw;wAk9uf*{7%8{Q@4}xt-z_>j?8?!ACe%SaCAh8W z_A$xTr%u6&-^o5R)_gmrH@Kf$^Oa(SbqCA|#h3oPMU4sM#6H^$QKEePAY;%jl1JfW zJbd@z=ARbAWMRzGUO8~Ino6u$VTqcqL#k~|vUp_Snx?JX3D^nDtGdufy=wJ0f=wY) zuO#osRhWjImR~U$*c+>DtT3%ncMxy0+Zx#5ksFy=LID@%lNgjThz7+ zkNeGcOP>9QV?a(|BU_t}@x z!D0M;aDx*z`i5N*mpn@Hy$AkOKJ+T8hA0k_sj(wA&X5{CC7UD7{6qJrV{-F>c)mc>FS`v@cN8`F z>Z;$&wFC!B$XOOrRU}4=NG^!6H3J0mYyfp)@L4>_Ow|*?w6vW{08`)@u*8>-Q%uRi zf7~uizqg*1Kr>nJi@I9M;3Njym0NDMJAN>ITCMl&F=G|beiGy$6+&dCmREY*Vt}78 z+-Ss$1?k#j#85yK0_Fn;saD^TVcE4+1dtA4pEwcFVW9;NreJP*=%#}6D^twKSE^fu z4o4);+)R{_K%Zp6&(lyn`r2(%YF6FIH$icl2UEkuK~XIcC=Rm40HFkc#JP}vpxVQ6 zR_6d=5qKhj4)h8-FhMvbD3Tg$7%n)(L0VEkI0{Hd7je;BL?R3{bsjI~nKnd$tK)O+ zxF`o*a4g&NT(7-U3~-9+_uCfmBzQ_NAs^BNpG)SFxkwZnvNsf6N`>+^LBV*G4iCao zLZXQ&oD@M%T_<0r-`&kTf|RhY`-K;zYjtFAn;Q5TJmS1=RtKMPizGPREzr8T=etd~ z`|b*=gpV?PeH~#(Y38FI@&wr&%0rImi=X~N-2&^1p+mYRmM9FF>X)aga6~HK>V&^j z#q}3r;&nWfW2RuCQoi+X0X8e&8AUv%;p?<<;{iZyp&gn5i-#P_f5^EWgu3xFG~e+f z%5unYh+km$+F8M;FztkpL61NuyU?}QWquNUorG}o#B`VMY2_kio78S_k=a~4AS*U`>yA_}R-$1L1xno#Ia|J&&&?@Ju6dU9LjhoEG0QT;7xvA)}7 zspk$i8OW$wy6U;La-HvK6gytGFRLn2NG%eQIQ-yZ;moZPd>P>`S1|Wu;XS6n@m&~W z`e0To|5nmt!TZ2%%>m;Ep~LluZtjEMsoB`K2HKx39HXs^$R;39k|7su4Rk&3pcEs1 zX(&9qct_xm%I>nVp2(eZLk9Ry;)qWwnA#G_+yCgM;DS`C{7F<5zWg3wqx{>jX;bh4 z33c`da<4$@I!d&K3v)RS7~F-w~29d1+zX@DEJE8(-kx+S1Fwkc-|-&^}PDE zm8gS)eN)eEua{M^m+ppqt!iT54QeZludQ z%uh8UZs-m|&5h?-mO*#2+wK)#63hK_uSB}G{E7_AxVHLIZO!f4+P2!d+1mO)wQTA8 zjmGzzeebtiy5D;Hep}o9j@kQ9|J>(D*L54$^ydOkH{~bNj(;+k^Sp2aA6m@TBXPjq6u@>sK$;f4E)$>C%IKZvB@(^?d1v-;5uA z_kH;D(!qMp?++5JR4cW7HDUq=h)c4Y`jc^ut|ex*26#4 z2FaoZ>GlTMxd!>a4Fs7+MUzHlzed&gM)jh`z3q)!bB#KG8+B!x^i7%!{F)4RNA4z%JpCSd$3G$$J@RdT zH@4Sj*WjDSM>N&`{RVU z$4P%5(_~tcODM_G-#Js%IosYjKi9eVx0AOk^K{wd z>5AXe)%d3$ik^OIf4VmJ^vmC;e3@t8OrCxBd-gN_*=EtRKkd)9=AP~ReFn&Kz@{9i zKL?(`K^AiaIymTg4t9%!m+cZZ?Gp9x5>M!oEbfx-=#ri9lHckg$aX85b}RdLt0r`- z7kBUN=+>I=*4gUTmF>|t?J@B0F-+(&D(*4v=rNt|G2iMT%Jy3NKilNDZ&7>gi+deA zdY$KcUAKBkvVHEReV+b(-U)r=;y&MwKL7c?z^y)tY`^zuqk#Zc7*U zmETX59XM+`5aT}(n=lYpJaDOFAYpzWX={KcJD6-b$nYObOBl>39=x<`ii;e<{U-HC z$PQi0?zG#&TmulHY?u#wsCa&;bZdwu`@G!rd8PmJ>a`&^3hZvj^Sb%x^;^#?qG0#X z!_EG~Em6abQLwg-;g0#?r(44u*^zG3kzW6i&SF?71(DCk*qIKEZHZlQjBaqSek{xeb?nR57+?0qH`%cb8rGYJT{nI4 zr{l$z>5C0K)<+jJOT$0|xbQ?SvV<$p$we=4vD;jj8K#DSI!zmCp!PD+qtcz@wp!Rw z9;}8l7;HMBywJPBgMHu)s&r0hE%bh+z`k-P^tE69-sRvvuwhGF>>Kt=(}kDj+b@Z7 zla^+a)&Y~Y3oqAc*bhW(J{uNFz~+vOyKj$d;RW1?s8RnX@(B~9obD|E=MCW80=g%( zuzp1B8zS~(=Ty|f)T!+$s@(Khv+0eX)5VzHEf&g+gN-)r&RBnyT{5FIg1cHW z^^QARzdg&An`<Oq z?e5ydsk)N6$$*8a#Dy6b>^l~=GY7-jHJj(T^lUI;H9Xt~se3rOdvsxDq;qyQXMr#G z=3C;z>;m=<0sa-g_-DI&!g>5nNyo{p!J)*bo76>K(c*agVs725jY!^j`{Ikv4&P25 zMs|T|_C~UFNg6x1!o~U#P`jOP6vyWGJ-`UbcT9}*zaQ;~1a^QHm$~iz&x>Z%Tsx5R zqd9dvh1`x80dFUU`zHjK#*3Dt=LdDoJ0_f$W}V(zd|8wU#C{|#e8j)2FTs9eVc*fF zkK`_V0x+BFZpaXN3rjA=zGlO@%;|U*_CJ?* z4K8b*u4|I@SU(P`MrU>GY1bBjDC4em4G+!`L2y1KkORe#fP5aVats$s1WN;8Kj3}5 z{5(v4{{GYVJAAmb?$mwxjvXRYne>sb3mWL1gAR}t8E+5X@8QdWCZxXm1zg~DC4_-eDqmI{`cUUfVz{YS{6ad44`tcz!U65+& z$E{rs_6Q3k2|#LeXWw^zQ8)ij{J*d2f&cwF)w`q%8U|K-1wa4yU}>EP&*NerJ-|NH zg-9_$7FD0QB;UV0h$;c+wC|ns9#r_wTIh?etWH?K`f}t5R*VK(=HaM^afd0ufbOi$ z7hEV87|s?D<3mKTzfecsQ}=aH-wf{15SnaW}AML|p3Fyhv4wZdN zky>aN5xN0vE^&XZQDz>Rj$A^;-#ZGf&#s zZ0NaeTU1Hj;TI2n!ud%XEngekm`)qGu4eJOE0Q{N$31VOWAMPe>j4EQ$b^8{XPhmp z+{omcV0u@|4VJG1lV_1XpYR#++m!Qb&PH>?PK*eP?M@TP;kUom^$t6_ zAJ>wG5Us^O9hsvU?3hn%?^k%^p&9iaGck3*)4ODOab#5@GVf>W%y#FKqN8;!a2BX& z{z>Zm<8 zlE}_bmP?nMv_vC+Rh%}b_Y&?tzUA(D#zx#P=BtflNdB4rq2P;o`53=$7h8QeufdOGw#K(eJk(0JXvbEuR+@3LJHtl)Q+>Y4eJ z)7G({+i`+1vRC#g(zSqI!s$=r!HifIoyUQGR8L~DdK#x}|#@eue7p|~_iHRxn^Z14xGH~I*r>cTv4zEwJAI9>qA7B%=Kb%?`PMzBZ@@@2h%yC)vDYtn5~b0g zPCK>=QqE;t$r_MorOYPGGcU~PQMeT~)nC(gTt~Z%hXQ_CQn=7R? zj6y@@)6xTVY?{pLSS~vmsZm5uiq|2If7RUe_cWtYFpHmI7i_1c3V_*!ujg;&$*r66 zh|7J7G0yB{u_OwRgtp8MRsg-E)>z5!eRw|JaXP+ux;`fEEizAckhmze?{mOIVkWQ9 zwAD@s21LQvDOGaKtWYe#DAlVi*pWD^|&?c z8Ghl^q>!>(HxMNtmD{`<4v3$GIuuihd8=L6s08L75dtQlj?-fcPj%ihqRV|(Og0a7 z5jML77Aa*(FJA=qY1Xi&M*3~9LdOp-=m95y456B@Dd0nBZ)rG0KTef2WbN$ovF`8-zQsT+BD7^ z7?x28qAy8+F#j7=pZ^w@qI3mWSZ$DKPQOD`nEn>%HFW;#zUxZ%95wRq`cIjIA@A5U zIqn(!#kKp#?pfnE8^3}jq7)JvW2{gsKj>zny;v!N-DO(>{9q{Ya`3CYgYM6rdsV#~ zvcl>dPcFlDX|vgM*C6LGZ4=iGBcXEV6lrsD;%z%K8^_!s!AT|sa(RZ;yaQO6t`ouf zhQLXKo)Sn0Y0$u#7rVBphJV?jrYJq$Sdf!`*p)V$mh?7dXR}MhmD8*x)K_ls3O;J^ zRpjy@uwOECSmgfatJ-|FVYjZTgZJM19FCA&kd#hHTm=R)GgLKt5j$DFaTBu{<|R-F zH~!<`clmo+`xfo)ttdbHeIKTb-gRrlv-dj3>h(ec&O?5zz?S}O@?gIxRi@@dD7fK( z#b1B{n{Bw7*h)nIu8I6%%TD(WXUW~MCmR0DyP5{Oei@-Lo++*P|B1%A=j-|FTYjF;a${1N#B=O@CNrDqV&cB(8o598pyUY9PFV zk~SfdEGup2)Ecqg1|%%w!jtsv!Ha$ZO{X_cw4_w8i=#Ql%Bv8#W=5*v1PkyQ4T0!p z?8m_0OtauH3OK zV$22%{@kGb_ex0!6_>KFmjGz5ga~f683vAzV@=33*{Aj#@q`9;S8$>EuBZ^iG%bP3 zQ1f3>d5e%uim5Eppn%*6R(;w@XH?M~xaG=#qAMGOp8*)gA0bFn0tA00RalMBLCFr{ zY@#`WG8Ad~P`tEi900YKoQ1lNsR;Bc?pZPQBW_0po*{7n=hnEFLwFd329RAv2cRQ; zV=J2}=-?FuroZ>90gVDtYsDUB(EqWpeeM)gUrPZGrfixCR(Hv~hxmhdb+PNC#9a_@ z3c=|#4T40d<$Sc}F!oT0@H#;!aYG(Rc0L@Rr&wH=IJQ{gNs;n8cywWpe{^I>5t606 zmN5&;k|NQ-`ZJO?T6`4rBTH^+#uBd1N>$DPCF->98n&E%J&Z%IU~1JYzurH(9{9yV zQ}B4wM#*kMK6~vJ*8nRlO9Ix8ANwmpTXP<;lz7)1rezI6yGw@Q;t84RJ9t0RqJl@r z_*#*=qJSy28{5FNW~q#$jLVt(*9D2NjAX^H$OO*eWtKq7FapZB{o6`_EqUc4nfN0_ z^o+(e56uGyXRniJAQ+MZOai&SMJ$TwHweg;D_u@Jj962g@Zlt6Aj;G^*|ZyfhdEW7 zrd&-#8X=vEtP&DJ(s?SU`Ec)RH>0S?)Arp@71N_y5AKPvRF29>r&w;sb+d(Ae-r-I zMX?WmEp^dWtA6+V^*D^iXL0sc2b_C$Ri!dppIko}qxiB+(343Iqx@r}2Dg%LEoR8b zezlUFzfmEh{w@O%N47f9d>E>7(I%!hX~Ca;4<^JO;9Cd+o>NgrnxIqSrD+Sd8+> z%rj8AlXDh(qil|%RfJCT`I;jADlIRC+5{A*SgY>~TC=;nHE{h_`tw5rz*!=?IPawB zEuyK^X-_FBS=LXsa%}Nn{F>yh0<`-#bwI9IPkRQDU~fl5JLJg@-Qc34Gm@2LY{Rqj z7|KJ^9liNJ_EMRHb>)L{|2)#k?kmj>@(j4vAp8N~a8uT?T+6X&918y4ofkD!7ByI2 z=}@xbaIe^L$1OdIlY9i}STXNdzvft2#!!zddgu4tLfP@Ifsa7k_Rw!a zjt`5S+O{l{kg4h{#=-pIR6)h3=$=kCIF9K+8Oc2#>eTD(ET6@M(^FgPoCdQ;F3+R% z`Cw1`e(H$x=)CjTn)8c)GJ|OL53n6q%jKo1%cQl-3kR2}D3@tx7dA0n8<~FPiOpPx z%lwGT!o17kn#-Flmt9_mOQ3$n5$-?59PuFE`JKP3Zbv_vkfO~=A#vfcM%FGJu4^N% z>+`N()?EKH=#8VKYje{sZn|!m9{KKkuMAU>Ef;;M<6K3SuqgmIqn)I?t34&Yj(P8jk;?uxN9W3>ukI0Vm$QZJhXne z>zjEPxOnUu9Pl_0?Qu}-rA$w*Qh>YhgO~l_?CB8?vjq?Hb&tc_9z=|%MSv~MbmGt> z4{H}sn*dMSXivLDPx~BChZ0Z62cAxyp3bA5E(@Nn>z+rpJxLfZH#skNZ7&ZqFHaXQ zuK+LaXs@G*UgR7vpAs+M2VQ=iUjCzA0SjJ%>t4sUy(k#(AUW?~ZSN2>?@$--umJDy zXz$~R-Vr%dJ|*erC8Z-fy`x6GqZhnSt$Ux|_NHQvo{>9xR{Q8Vv!gLCN6!Zwjg3Bf zA#pk~EIps3828|4eCN@G(W8kAN0Zi%ULHNV<&<|3pMHAt=szoi;X+OgAg4u>soU_2 z2T-x<Uu^T^7Zv-pPPw3g*iSk zsWZ2jK1H2A#iOqaQpvLOf(5U9SQy_jx!HsVKILYbmJHOz2oa1vk#ve}=g*+H~bg1*2>n zZbAvz9p7HKzwOVH3tT==o+v@T>^zzg0L9y<7#e9uvFAr#&40YHI4YJN#;1iyN=NYl zPYObQlP(ZIH=v}S;0MO22mY|}T?#n%EAd!@S)j6GV01R-e;MNL5UX(=D% zZd>q^En2rux!ESO$uc4Adl4hzoz3nF>uihADXcbAf`}p`5*OY*f)d zdfP{M%=_;n`Uxb$g+>4G5k%@kiuxvYupm;B(3XHSkq90lJbe$7 z632rIvw4Uy3O*?~Z=IrE8oXDPEbTuhaw-5-+nC(Bcq)}_hkAn{Ku`jyCZfT+jvH-f z6s|e3=+r{2e5<#DYB16kY!m^Jra`DDm-l{od*w#(VeHB;v7o}TH_ybg#SFnX7DQ;1zK0ExMIvD4^mQ== zqS`n*J0r8D}{GH(NDE-kE|X7UXjr5>(X0F}if z&K0Js&m>>mq#tAwq$OcK;t0X$c0I33WJH00=cn|`AC^BO?>hIoYO z3}BH=x1golkU?kgX(IUjj+>Aum1K1`#Y$RQ))rx!0NzlclWH2z!y`&;rxRPIi~l)# z*iT6<-hxw87WRx1*OQ%{!5wBxVKh35kQ}CdLn|&_iwYQ#;mU=8B!3|~EZs?6LzYE1 zAV|BiK+&r1|oISt;;C}% z>++pwUMc)l;Ck<=H>cy0EhHIigYb2Eh8i%hKJ$eSpMH>(3{4~3_&{A+8Gl~^Iz^41k1)V6`dlSR_nD7}V>18pCqEC7v#VkA78t`g=i`KJBx} zk(7hP)G~`y1Ac(XX0E{FFH>K9Pq~=ljs69qUHN^K>V>mt;(LWVXRnI|M%!P<%mC74 zFzh&3dhna}G3ve-XYF=A?miPcBEx<2ZNaaROB)T@Say0N^R+ynTCMu*V-TY+5QriZ&FE1KLLRB?g!aum@D$=7$SJK>b&t~ux7;j*Zo$K8L1>mm?M{FU6^jo z1umBUcM9=gH|&@1k67A4QaQEo`;PSOxWk~M;pqpdfTF@u^wGsXu=%{pajkt{w8#ug zKKQ8Wj5e8JGnjmHH%(ieaqIH$XdA?1Y^oDyMWvA3$N~1*FBq~TBi~(!vH@dQG^3U& z70LEd$|1rOT!@xJMcTx2FXq2_mthyw$T^uc5Er?ynY+8meD8fpJ80&?bX?7k^Y2dY zginj?go(;eU;ppY>8qKiI_KkDPL0C0O0K4kETTu@pdezX_vH}v1^=pNQ`kLF{9tmX z&A;?XJ`J3TLZ;_FPDZBf?;Lpl@IpXl?9bU(X%X*(J-Oc^U&LAkrfJv2L(gg#5lEaO9Q&O>>P6uR!I_aCxC-qg2)yCt&-I`>?e5-yEy(+&zO;X$sVJ%(-%edC z=f^#c0*dpMos74_HPWQq?>dVemwtU)|NLtokCU9Wsu{Jr zsk_{JWdkEG>k5^sbAC`qI$;Nyt}-adPaJ%UGL8*2=nZ}x1w%WQwbT*kmaPk%5E^i|Xo!zC=UATQ!^L40WZVJ1hO zPRN8@yi}pXb=%Kx$H>-ffY@g(P$A*5}?M1-RuVhsqef%PX3BR@E@;w zvDx}xYKEZ0g>R3UqKu(94_oP0q|~!Rzm@adbZlgAc&=7q3VcpJc~@`&w)+@~tQeUR zQzQ86=_jV!+rrN@9&>Tup_NKxtJSW zc`%i!eoI^Nxj&E5^>K_8SEJB676*6P6KbdKyfm&XOhBI?>~-rsn+ZO;F8Lg49o3gF zaoFI%5p}r#geMiL*|Ey7=h4JWZpStBsf202?b9EbEps`V&)ub~4{OUjS071Vs=}ZN zOzQrzL!X_&NL^NfM1d1*d-R2+#5ReGbrEs5nPqv1#~WFP-yHVxOFS^;BUr6IlDlft z3nh)jcEcMuBExXCj!2vCOJR?R=XBLsboWpui-S7dYiSJ=*`lt>z>3-3C>UXh30Jch zABMvJD&I94R?ZT`337?u;4RpNA*lQe4?t7Ptop&a912GStG&676G3CUfzz@BSa|^^ z(F$T}WZ(#sPRPAwVaF*Hd!dNaqYTNhvlj{`;#I2=ru z{CJ*@Rg~ls9WHB_kKx=OwbhQ6Mf$&d7*v z()~5pK#M4|!A^z@9>gir^sWoGGK`WBD47x{N|+}~CTmFH*p<9sq1R)kJgQk9I|Q!@ z{L@51itALn^2p!;fjWL}7E8qGbcdt5eP3?$nn?E7I55mt6I2;A~t3D@KbuH1pi!^-UBpF1?*1}R65ACUsG6cQB%BIPgZ(j3i4 zfLSL^6#~uQBjk77o_lJ*_$>5psjnY6nx>qfn+a_*~8Fc7!_T-Kz2?)jwnCQp!nmqe0r%3Li$C$1J z{GD-Z-#NfeNqnZLxjGnEmKjr$aQuqTQP~Q0nwV|bHFW|7-w=n8hf`8){Fis3ep2q@ z6P$blQ{%Hi1ta=syvVsV2~-)G@p&tS0ozg#J@O$~07HW~a6k|+E5k^Y%{YA`?!{Qt zAWD^;LE2(b%6`*9{C^Zql7LzBu?7{Z#0|D$DziBgTu$-adOen>Inv4MRV6tJRXwUH|)1xM#BC#w7^s$K$i zYAWYMIUM0;&}+?=57287R}Ss4LDNe?u=|UEC>a64cLsqE@4kc|)}$sg4}}WKusHBu z?Q1IPtekEttxw|$Lxk9(c{740AFZfmYQ%bKj1YuV3E7Q|16#P2?Z@t=Cs&n)4`weC^en5j@jA`;L~}XgURD z(j4fbo_a`k>^qT>k!q`(cg18y^tUeI*XZOAjecY)LEzM#Ut$BV-r-D0^P5^WS}R$1 z{F9N5=(F=staNIi7KHfKMk+c$V89H6_AH-2DpT1XCD)_7X}9%V?p*t zl!&^PIg2R-N0dn<1s%~Y3o-}-9U@46f$NH;ZdNTx8RI3MJInN!$?7vD#om9h1d;aJG5DIfV+d1ObK1g|1tRouj@+~if< zRjb@}R6Go;Jj_)*ZL2(8RlK}aj&#$T_&xUS4i4pDsSdhVS`|50#iyXkr&Ptay2|%< zm8<*sz5>?a-YWkwm4KxYY z&t0v_{i1&LXU)|ebte2C6RVLYe!^vRx5t=PeoaT?y5UEKw)C@4@iN95H@xrN2-J8P z&}*)`Z0xL2kfc$VcCT>ElS*DLEY-MOeeZT#kYfG4qNf_g{Tlg&_ll?Pl`LwMdIuM+ zYTWsG?@qNw3HA#18}E*IZJB&68&=C&+hZLXa3AL53d#g_7lsh^cTC(2b>Et@e z$XNQfWs;ildqOUAuY+VCYoeF+&mL^4y&rfoe~SpCrpl_ICq0qY9eA{E!SNv2D9iFV z1J3F~R5T8-^nQny!OVwl}Le)S2}5 z3`sOv$Rh&$_@c#=83(OqdcWN6OGV{m2}bw98IY82uU6GRH;L7>qseL0Xf_`bF+e|$ z=)uKVe9Gt%!QaL(Eh3l}Urj9vKgeRZUBWc@C_0Z44M@%8D03kGofguAG(i-uz8DY= z9Tw2C8sKtdtSw5QTBB2Sx3P8C@fM0&U4#*71AVeqG){Wu=z zjWQES&{JGHMGdt@jR;?@>Zwl=r8AoJptbY1#d zM$f0Scp-lQS;@&C^19%+K|e2@pnd2Q&hS+)roHBYL?n?a3>~3=e}lF;!elz8=0AN? z5SCAr$=*BA2$8e?a`?8G@RooG$xYlEZfp&bGk}SZ7>1#A3?J}NVdHlB65*#kVlp#M z>^guy!76v|*qgrWH?t;jPufcsu$eel|0{PyM>o4{g!kRb(93~>^(52J2vnc)dQfUP zgD&7_Th^>88r+h6rYHOB#6mel))Rmg)6mo#7)mCFLzKS*{M)}s@6%I|Xe?RQG9g*Q zm_!BY88?4N1sY6+S+8)kQRV1~1$_`q>lpId1Vb5Y;>J_Cs;{wMV=S#lfXVr97CnZ> z|7H@yR%~hu^tGoOKcLHBbo#Hfx0If|K#7D)z_0~at-^Ys`gp^0MwNBO(a9# zTG{ft^_X}B|GF#!DcxjvdIN%4TcvPN%9g_S%M6vlWZA_E?$vE;YwV-G+QnUlAAYZW zU9F)h2=2*&=j$H)Z~tMxPo;}6xe(yrqvb;+_xx=@GXa<^lb(xstd&=F45BH%-oLzK*aQgU=Xlwejs4_z$q>2sRW4kv;i_|#I(l8 znQgUQv(K0d0KL7Yar8}R8%T$RSitxN$%jY(-VZq1686UC=cnDY!1vxRf=)(S5Rss3 z7{bfI8q@{KJ1lZjB2|%Q zArPvxphz#FNE0L!r3#@4C?Hij62O8;2^|s1jedXizPZ2moA>VAdGFtxnar7!oV`AK z?X}m+g{Z{;+{wM7s+^W8pIpfXifTx=+Dub5NY7QyoW7e;r<~RO1Jm4)HQtc@l|Fl} zA!nUNEjHx#2IW#42)PYX@&$RqD*2L)`LZen=Nbz%R0{PP3(ZuXSsk2GaZJt5R05m~z`uI5L|s5rU5ILZ zR8xJTYC~#ML#}FLaZ_WZs*^wU%Z`X-b9Ynofa>eT{gnp&y%y!c8< zv|7pC+$OBnF4^2JtJZO@xkE#(Q?I$xOzn+T^BV`XE|=ymZ?*1#=I#)+o~Y)YM76i6 z&2Mwn-qDoem1@0p&AqK^ecjD{18V)_&HZy~@4q&`UsoI0JM1|2jX;}&YL|)|mJSNP z&b?JC?tgsv-0PgwZyGeI{&*0nXe2Q=_u_z2u-4hV|V zsT3lw*cnCWL#o)mS8H`v(}7zR_$I2@>&i`Je3{-<;au>J9g&D*mbHPbPP*~@%KqE? ztj_vL#|%;wg4k{tK9#n4<@bZ_rg8ciw=s@Xgo|mmYS3<9s>O5+Tqv6B?6M;c<~gPB zs+0#`Jm(*%V2tZAr%v-bRt=MsUl9@PNHa*x&qx-2A^BKhOU#Ai@X<$8kc#sVmyG)I zJf4N5@U`i79p{4{&%n6%QxOwW6qI8GTa#O03xvh0P7dQ+MRt9n^Ci(Sv%nx#@d4jm z_Yco)>imE4?ds>&xGjQDY`d-%8fB`-t6b5p526=2HG=7Yup-4c+J6AUV1m9&=Gpiv zF!(X4%LAcp9~=lB4cOe83QNiC0O4OUI~k(Wno1;=Eac;cS4wmzW$WEkcdAv@J7ypTK1^y$t;9{Lv z{S#_(l(0A|-y$27AL&C}=7A6UNF~F=LtR98$(b(c@QKW>)@F=zcPxBR+l2`cZh#2a zoMMlPlwqufl7|h?TbEs!JN3v{ODuXnl%a$;Nk9a)&kr-=q&Bd! zehNbvytP)$8;IBh@5|9k(khNj{%grP@3s~9os4)det>{w6XrjqokYW;vc*l!VL~38 z*Q1zOO)p4tA-F;%x!}m=GaT?!G0+L?1^X`0Nst}K$H33osUL$^uZ#}qhKf3tz)knL zF;uLkHC?x{gR_(_d`-;jwD_d*)5yz14dEXmA-<2mY^FR+dQo@ZxPw%LZc4Hu290j_ zUveZaWLu+=Kn$y88YKHh8nO){J&03;dvY)PCk03WML7B=R-JM#sPWvGh0na^UM_i- zld)V@F(x&~X4VN{d7W}_@IBJnZ{u7U=_h2Bqc{Aw=6QL(kmq&(sK5(b50$m^qJ9tef$yn7 zYz$od@FgD8EcWKZFX57rM#yZpoOO$`w~>Nz959M26C)c9V){IhkcdIkL#%5-5(>A20Nw=0&w-3ajav z2Sqq=C}34l9`Ms-g5YBUiAxIxLQ3ff=pfrfr8dGD0)c`{JYf22B%>k5TtE!~1W#i_ zwU$hEtFA*&C{VclCqi$P5EyxH>odU7ODN*79kqVqg6u=qIH3b!Px;3wN}pk%rxzCs z@%hJN^*$l(PMgDl{LlTMiDvw!G+&uL-`)fRBp37x5(Z&b)>;xd^ z6562neNIWZpbr77=5*}og=l~V`00|)aa)Vqe4{$B2OwD3j|^|W(R>lkZ~6v&XXDnw z#g|C9Cx%lOQ(HKS*w2@mUbnrJ`H`boJloCm_SB`^BaV`)nQsMQRNKo1LY$@M{L5yK zr!E((bCwuzZGSD&d2R+ zdarTSq<>hssXT2n@Q|xEk8{=4Aa?Ru7wBc_l~s3}Y1{D@u2)r0S3TV9?500*)ir!r z^$MD{n>*sFZ{u9^iLtX^6yk1pdu8o*`n3Jm1Aiga&w6S-M0!~dVAJG>HUEa`tLqQB zo98*#1K-+R+sfg7{q4$n@Z|Khy%z44ji>APzu7tb{K(z<5+F4U5;m7SAnH+dJc*iJ_$}^6L5S~sEu8n8|`|IqvJa3L$ zZalJ?xz63n)5WJQCje=_6f(optvt4o5H#Z?Oy%j( z_nk$`W^$?h4QU77UYoJal!loblI(%S^Xyw`Z|!fM$>r^Lv)syft2JKl#>jw>o{+!$pICYA9v5w)Cp!EeHPT$+6v64YQu!A^bD*TzmCz zuX^3d<)8g#xz{*3>lM(-|8XN_uld_m?|XCnb3exRS~lVl_sQYHK<<4K!!;kAu)sWn zhLY)Fzju^|zyfRTS#@v@VzeE!$UDB@rTo!1F+|{#fZoYBpd;^?T!GKWt%9Bzuq0Yc zh%o9oyI zbI@}K!S6QX2V)H%?^fQLr9UOr#uSKw`cXPQ8Cm_Dnq={PEExu&S`@2%XuTHDH1~Q< zdGH>~O_aZ&PT0IB_aT{K-Rn`!F-Ebp1LoV{wP4R0=`uO3O8j}s4 z$7pEWzyVy}A$TNLXs6Wb*RsuA@Y`+_!)g!oi?YML>A9ncLkyG^?Qr*bozQ+8_YuX) z0VhCe5vu9_wV6J5|LT?#lLYM7hZu(k-yMW^x)R&?c87evh6o?dYn;`P(xl_vZ-Go$ zQ4eB5?vqBY9d4vj55+6)2^nV}tjIn**l9fy=@B|wG6O*I0BY0%TPS!bfqEdjV-BF_ zLH=|{K|WRjqzCC_@rqwUVH9s*j4b3j3L1>2v++iJK|!b&5io06sJ<*j$mbWn@~50R z#03xdG0%fRZ@-X*1Y^0DHHDY+*n`OszRK-kS;%l3f20pkZyiEpp|2lczK&)5f`yGE z_>vPLHRep&2;oB!R%604Q5KTyD7=ei28amXcZAsDspe$xYapM%AlzU8{+R;O#;`bH zV7p|P3)B7-AmR%q#H!Ba$ zcQqeC9Ub6`L9g7zK+Os4yLrpEF~U?)Abqo=aHFI!&9=s!Ah4S!&^*sKEh0D-D!fYu z?P7(;NbnM0?pgx);lROl0Mqh3cSMrV_l-}KPzHMx^IbfnbD{uX9%e{DPUo%PMho*f zf_w4=?-Mrnfk<5v|F9-Rod7=+VKc-)+Cv2l3EVG9h`l_pkOg*bUNBr!_$onAHxE1vkrUB$pR9huq_j#HJ{PG}HV5AsgTZU`>=3e^qEAx38SHdyZtG9Ng?%`p&pl7I_} zZP^i`OMpy07O2hxPgMzt<$>EZd9IO8ZOE_O&lB9Qmf+75YG0GSil7g!l4E_McU56&Lnyz;+N!&!?I2K z`)KV_Qz%s{al=?aM!OPfewmk(BAAF47P_qElB^pWzaZKM=J7*Tm_%t2R!Fo{Ld^RY{>j+!HO zM#n@L&=fQNyT8OJu;$CUtWPgWKOH%$5ahaRCiwJHL?SYZAaIape)8#Mqo<$u^8{D8et zumk|pE|zPwjoW1S=POQ!t(Q96BcF6$3Vi}%K$aM23WjM6gWz&xcqOzvZ-m$|is7`| zxpIB$!(7_vby+T_GnP*0Qk+!%oqnxeleBc!QY<$_)dqn@%b7 z_9-`Sjooy!ym`mcB_PG+-k3`Wmn+WFH7dn5cFZ-A%k7D!TWX41=9pV9mwSPwdvS{U z^D*~Iu3I&hx9U=EHI3bB~J&((7xA*Y`26 zbuMq}mZkSziucbkZz`7$$jS#8P(sn+(Ci?{uKa zuQI@+mHU2&)&1_&`@Q4$2e==MSUng|eK0-#V2=CYqSeE%sSm%8KV0Vy*|G}ROAYxs z9zx~Df~>L7G%V8u7QqwBZXL><7Ro;nD$Emh%sNamElheMOqK_C#u|4n4W~MR)8Gl$ zwhpK2rG*rRX7 zoroLYi660!A5V*)o`|30Nm#T__?njReIj9D0{0^jzMI0-KED^o-||8I`=5H8z=b>6uNFnXSB89X46r=~=y#Sp&S;BR1LN>Dkkh z*>k)(i#9o5({sL0=B)GPZrSASrRV;f%%$=YK(+*E27zgcfZ)qxx6R|u$m5^N6Xwf5 zW}7dWkuN=!FUwbO#S%Ga?aYotm zsj^DG=QXy^>oT4?^7=( z*ZC^9Y%BLNDt}H@Qu&A=J0dib$TUqv@K>?hRdHul@lRI?^H(3UtCq~HmY%Mb<*zwo zS9312Ms>PIgTGeWu2wIz)@ZucjQ`~&yO&m(FYTsZI`F@8vU}x{`Rdm6D{ua~J9c#e znRWN3>q7YJad!1lnf0;L^@;oqPwW~}GaE9e8*=#@3+x(;GaH{zH&*gD)z~%FWi~ZU zH?{INci1&|XEygvHxKZ?9MjRR3mGS)l*q#MO7Tr6X}m zTF(r$i^@_pk*#j`RLnEALvdoh|Fo*?9b3s{(1=EAhJg}DISauLoz6vfT80JP`}ixB z+Y!?aqGz|fA4urvqSnUngkQ>v7hhqNwT$DrJ^gp9%5TG#7(A(qCHvMLI%+WeiT4SS z)^owtOvU{#(G}@bF6ZL2yWaD*^y_Q!UMN$~c>&=OfL>OIQ*UtNn1tFZCI7+9T&j>+ zOY@JdrIE~WiN%N*j|KXC{+9~~b~b4WVWbPxseyEJ-i7l@XQ9C-s6_3zj6QfQ*aej1 zqs3)$ND&lzC?3ebQ1a<4CKQ^#1y@NWqw=_?h+7bt%+w%1s|_;BR7NL@sKqg9BMG`9 z8BKm;wM>8f6JCeQ?NbuB+hPZkcpMvx;!M2KJBZ|9+go;?xXl3t??B``1-kQ^?V@=| z&35?}{V*W2r+z+<>b1*QJt=Qj3QpU{gU}z4>VbS@Y7u86!7xH(XM$OT%flf0M4X9~ zQ0-|GFD134bU74~l~J5JNW7vX=Ru~I^fAUlef5jVmdnatEiYf3*r$@|7$u_7bT9?6 z=tmG-ANwREP%QdGj>YUvgyT_vO*AB<4>Wpp#DmhMw$R5O0aqECZ_8P+r_++6`dlwZ zqXth8)1Vy32B#baW_AQG5k4z|-&Db135k+-g7t~zwsRl|e*l23M zO4|7LSxqc6uKL}Ij8lh(o4u&1nu;Sx#&=;blb6|UMR5U<@=#j~YsXp@-j#KFWoMkW zQ?zTtk_fnY$F*PqSj_K@;Z_X^moewlvAUqk7fY;(hE};!SFG#iT0H1qQC)ji#Z`;* zfLE?Oo0stQL;2D%>()J3Zr+K!L&w|n%}?G}%*U&0%s$cc;9E)YivxojOqY-PP=86AD9F=yOT#}9`92_P#iJY2u%2*9rmB-BvNXbz@x!Uq zMZXbazoP+03%iplDuvw52L~&4>kpS%Qc<@lC0LXytZnmUn>^jg8cmLci9~L#JVFXk z_Ot@2EudBtB(&tr%2Tg%^A!;Y{(->KF~3p!NDQ?mi@}54`31@IcwwD9|p!x~n2KHjIEPJ=;FeTEW(}m9mtL=3}j2?Cq zg?sYQdy?^0OU&1iIW~m~#RmQEYvJcr^z}^08!QCpF69kPs9MfWw(p5$GW=>;G2i(X+P9PqE25HY4GjS zE@#h{B=??9^ewq7Gi)9!6cA?aTQq9+_!>M43a3I>n(8X{hV@g46 z!nsu2aq~&$!FDkkzN~$F>S7wll@fiEvWxVFquHl+O3mZTE+d9VOKo?`tU2AU)V%xf zBKR!9?pK+0x8c}c#LIG5ljpX){$np2cPe~NUbI^@9B-Vut8nM_^J{zW#*06_ti;00 zF%DsH-ad3O&ZPW0_pQg?azLifN++BojV9lzgZV#7L6!5JCf^5-)l46jyXyTMmGKA_ zx6$|H((j!boAP6N?uonQVq{kbdZ&o`Cvw4QKE`Gssei`>gnLI9-tiAE=5{` z>$ruKe=OHaAqDftsXCz6UmN*{bHcm|X-!IdC>ACwcUg=B8VuIT4(lK|lKop2sjF~1F zmkaM0Kq-Vfjk9kNouG)V{Tdiz#r}4?yrK}OT`FjD5UOscvym#OLu7x)Fuz~WpEBiv zeWcMHm{|R9*FcUh0jMHdtWf9E4^AfYN={#>s)vup(^kY$yqhk`J{`@=X_`f|FyzUs7kT| z?`-LO$vbzhn}!!UllX!)A%eX+oQ8bz&hyg5i;PBgW!*yGPBy={{Z8pX>d9<$a=(E> zHSYa->Tx-l%rQ`8(V_{~;OUKa^8ud(X-NeQ$rW+$z1P;w$Rbe^HTI32Ol2%)1bQJO zom=7D7E^7-x!CG0ZRfX-Mub`V)r~erFI#!;d{9z})oFQ|@6`RMrvkGgiYlx+iO?vl z+9pa`zaii(uyQ@Bd3a<5H%jW~0GEI~Ra2+gwb)U6;9?kUBg}`4`X#w2)gdi^W=>zv+{F?h%X&iH?@WR z+Qz!Yaca`&w^SN+NTbHiiZZ*TI-3K3hFlmI@dr^$u zP^N?;<1huT1e^yrO7eZ9J>+y$8?(Q;0N-VTIbIHS7>bt^v42E0muh8+q=ZiziGX<1 z(0q#KVM={@ECEVtX1$Im*H8>RPLw0Hr{jd}#v|+S z7(}R)&M}suNWDG*o^N_x$8{|p-Vj*i=1zN~*tikzQ`^IZOVg4y9Et;19ox%+y*Ztq zlz6$*NdRilqjl3a;i|_1AJC>lEwU=^QMj{Mu6~Dx&=5YYuTY{Rsz=3iIKiD`5xwuK zt&!oI7^f!Af9~x&%f^e>v;D4%YXlkSeWWCn8Q(Z@vf-W8!DcGSeiZSI$G|9(?cS7h z4|h>we?H&T|a>`EOjP&e-{b z3*+y`Uj4>}_~-Utf8)Z3=b#umwlvas%fZecT=>x#-p;a1jQ))ay%Sw3yH)u3a_8T; zV7ptL)?a?}TrUk5c54Xm3RgX&sbNJf1`D#DS~Dg>rhfM&F}}jX#b|m;c53%keSd{l zKrhXYcDIfMf8i5lG_%0BSKnju;&y89%omlthQat3e#J(!WG#t*!4L!h4=@3x{yT>J zuWj)8-h}?U4SuSe)4ME#nj-Ba(EQ}T+u#=9V)0GhdX-nJziseaV6p;jZY=>!!lupj8;dt@Qrd!UpMZ2 zpQyPt_x^R$-a5G>mMZ+W-r_F|*`qA>=9;y({Q9}S^?h!jwUw4A0FfeN!CbE7P?$ss zIgD9xiHt+)O3jDUZ1CqJcy5-=M+*2a%}1dkr52*aQd}2eq>4%w9?85~T8NeFl3I*Y z7;|0x2Zk&rq9K3pE%YTyKRq^9{PyXIx$g1L$(JqNK0md-S^7D}-v8U@)ZaGvY0fEb zU(#KRO21@yy!!Sf)2HkB*DSv=x3AfOUrN8`+~587H5Usxu|x>xa$m~(JBBP3;B`-Y zD@?X@|Mo2HX4$u*EdS+i#e~Qc%O!;=?#rbmfA1}x6T7GzFJ3)lwPa zg>n*uMxf5=yBybyO*;80hjOq3V_9u2nf-83ht$$OaIMx6H#O@eB-5!OFyNeGy!hmH|JILrCC7gsCLp8XHu3ecnJo2dk}=aymYnK`ubCzEh1%0s9e< z5RH8x!!U^j*cI=R1;|jM)@tIWUgM(+NRcofEKWEJ&9*|q!Bga5DJK`fHnDFQTG|-+ z?-vBy)xV=%kr%yu+E*eui}&k>lCQw&$TRopdi$&zeCmC|=`^7yFy^mwKO{htF*d9( zHNz|pjl383BPD#$kh}`iUE4!l84SKIP%V^s%bHH$U0~PRGYd1XLIFkcLKkot9qGTX zxkD(1`_l9R=Q|hsUGb1NF&K^&R6EyT5)4U_4_vVB;%{et#2F_lWQPtL)W>F6;|Z>D z9Nprk03(z4RZon6I_Gq5gmY2=&~j}k8)P43N?hboMCK?B%c)D(`ed!ZtY>{lpw%{c z4n?9S2cs0VR&uZgu3CeG2NJ>EZKt?;u|UNmk+=|757s5L9tVX~Esn;APxW2uAfAqQ zWA=)E3jFq-uj`B_M%x9@dsFC*Zv)*F{#lH%fQoMEnIKwmKJNK>v4~aSwfK8S&pxS~ z9K!px8<(Ex?i3m_gt5%F$$N(}EcffGTq%6TDruEnW*LzonUl|2>YEfRNsmJ|_^#7F znzSx>H&zyT_LP19lXLIyUat2#TXy|Ibbr{1E6r@q6?Y#zzxtMM-1Owj{~kj!c56#a zD%`DTy+z~SFeElzGWZb0@ZVy{vh7|Y%?97Uu6K4VIG}+_T-7K?f85O2YhE&`3?A?O zxZAk*ni5}m|Etm5!PH*Met+e|z23Q_gS}P&3lR%7Ce!inlVGOAuytuX_Fs59Wm-!^ zn_tl2sOg_n7yAh@94Hd-Ze};mCuMM-`}-#i zUewt*`5re^wm7(gc~ti%eGW~-!{~gSqSvNQ=NKNW&w8cpDGT&X>Rb|0j7zMF0c{0SMEcoh*R*>zF8! zfEJ>A!_b*x?x^kiKPTGg#Zy~iIF9?|ybS>Y|JhQ01~LGd035XM$oa#QuG!Ux(tYh; z>_!M`M^Fd*pUVLgHPqPU`n31FKelQz3CQp`zyFB#JMse#ejdnb2%S4>up_UDw}-6E zi1=7-NvQg(PR<^7d3~h2r{(?SXB0`GHIq#c;DZ_XLMk?~6BTnG&1uS13T-rEyd+`r zoX(=lw)E^gE%2s4C|Tc;P9Vfb!kE}?A)*noa`>ahMSiu_Gm1LN4EyIT|A$Ac2tWW9 zfn30UBToMphnvMW4nqHy@iXkm6ov}@bs#=M?o0Vc#?MLLUj+;vOD37j&``=O+eT4-`xB3e?@vgK7*{=}O~nA9Wzqv$%BJbZ=gx9d3S#82)cQ+@#!EYgj6J zzSg+%>ib#~rAvCfd3)^E`s@8K&nvAIR(f;f=#;S_O|g}4vi^WG&fRUZp(IKG>=4JI z#fkuTbPR0(=(2f2_O!bfY=H8C0*M0IN{JceA3IOn+cW8v>z3K-QyBNy>R0+&zV%*h zZ)Ix$4L!L%sKf2KJ)|#Lu{~^jZgqRaT<_%0Uk$RgOU2HZeZcBp#OZ`{s^{*cYjMTy zltt`Dn=(`wMaB*7g_idZ&JT zO1AR)@j1=q#h(WG$5%qsse`4$R4*EF%B>1-m@hnnt0woL-l|lj9yJPb4qBM4KC8h+ zLHr5Y?|TK<2EKm}ybRD+?GWCN-{97)9NLmJ&_M23I64SzzB}Xn0J}oePTE~+p0wDP zyy1>KSag^h+(T4R({?}n{P`~)M=9WJLff1XcNUy6Q12Wd<2p=}Q{k#(3Rt0EX` zy@2|GQHq3aZK0z@gwK8y6Lnq=djmEUJb=rkE{y|3?>`DWF9TDJ29$_DOcc!H0WfhY zA;e@z4=~L+yJ%P#gE@+cK4?~yi;PvgPKI`4($VcAuT#Z+QCp}mx-V&MI~Kk%RKlF* zTAHS+s0H3l#~D?i-;)X)LWL=av3-LX9QY1|8{QuP<7wX2L>vPE)xm5=q}y;LGi+g^ z4kjGcLrG48gNslWMUuGxgeI^TtEVQnFYjM?QG~Mp8EvYZIM(lUy=i+6CcfJfx<5>_ zmn6_}okj#I_z0PmV9ziNzq@v{c{iDdgg7-H0W1s^GzaGaDTEj!vLhXje)_r7QpGL{ zr!!yZxS;g*sc=69cEN=j7cJ``<1iv9efdt29BcbK=WCUDw{>#SXU`fvB(B`uz(xrG zr`jSQo5F3MZu}@u*Zk=yAF{F!gh>>^Suy16)InWP4_=bdT9f}8N*g@n8!ZkJ57kWpXTGWL{x9LoiNE4#8J17oR}-AI?Jo*->yERQI@e zJD?yiO8HCD1+GRPUN>U9D5RQMR>q0dvgEmPJ8Pn(F{;Svf}~2{Bg>bDaqJw2xJ%IB zn3L2%5e~R9NHQU5#M*)MlE4(DJ>_d8f@CRQ}UHAflbLfMxB z1?duhs+eY7d39^nMW%;fgX?N%vO_)BG+H{J-0leUe%%TF$jjt!-ws%w1wc#zI0h*) z+lDn5VBK>GNW=$Lp|t2FC?LZ=918@i*~W+tU&(jk;GDq1`37Zc6T;~Dh@l{b!Er_z zN1@&{0F1bi$Us8pat8XUI1+$NIV7OsAQ~ipl}dFKg_?x;dn@?N35|x zvgUoBF}bL0UKZ}hBnaG3=AqL;q8kdp0(}+^LZd95P4SdmIF^GIX zAf-_GV7bp6g)I-bs#TX*?wfFi7wFu&PF=Yb^7FA|Ga$ruUR&6Rt{+DN`lam)Lxdhp zT>>yDcR%E=6L#_AX`Q@=w-Vn0Ny_>9VF53=8L1zf1UnsqhBa4GycM9qcZ(d`&F{Hk z0J_drEFaD7!Q+pV3;!$3X8r;eR`RZ zB31VUwne08LNe;?JAHM&U4Q4IR0K#835R1Z2V#Nq=D=%YmWoS2Gfcz^8kE0>ICc)! zhy^^}H9v9o^sB;9*GRx)IYb}^;5n)b!-53$kP}!i3(3m~Mh8Oy#=jV6oI{`}U`e4{NN)8*ni;)0 z5uCRlv~F(dy+nH*!Rwd@nx72@Bx2}#SZKNz%YAcWC0~CmKsTHv!{-6>KBD*&OT-P3 zB*xHYNz2~a(_{1zs}7QuIVFkKq+(bUFaUN+Boh{TA~5#ke%OuQx|c^VEOZx(({*M^ zB}5(~>FMgt01;sN!FU!qAcRbha5dU{Vz8Fv!-0qJb-N=YHwGpyK za3+#eFX`#D4>FmJ(Txm%6$f3be>zQs`xf7q{S+oxcroHx&^#q|hzOr2v*@QMjnze- z{iN|u2h2xFvTI1z`jlv>?~w;EGs4EL$axD5Y7~FM2lJYT0L-qDH7mD3ViXC_1{J=b z2kUZ-gWZUhLO7!sQU{Q*MM9v=U5Y;;u9*X9vkrbD5OM}9NeiwZ)7#(;k1+IbD%tV7 zGvbS-+!tg3Z$MZc*{cd0wuR0j2WByj8j9~&rwrfN^EZD?@#~7Yp5uIHblYt;ie*_D zITn+RPrgXMq{@hbdpBUaI#~2+f7B5J#=*x2yFoACvPc5xO1Ehd84n_!!qg~;4H6>N z9I)y}&q#!y;(mPNN1AM)&%&2{IX?AXvQ{Q<-mQ9=%<^s8u8hG`uTn4};w-|S0xS&@ z7D54PdRROGfCo}8#3ijTUG$VW@M)Uvb%uvk6c!(B6TbmM$@%Iy(`U)SlqB?AZvy)K8I0us3DiK}cjw@*{TV_4 zbhduxZW)kUDh>zRSH#MUV@UZ+WG&tjFGT`?7EIzP2aH9=LWn>dlF>*GfcaMP&Y3>= zT`}TwG1eV`v@P0?&^-PLAn(JF3ZUB<^<~EzS~teI7Q3es=>%!+dCmYubh)$~a6~Sj zQVSzC0-3tO32y^sm-2Ba=3|CH9#Vxqxne`Eh}W%X`GdpE0Z5ycxUg4tO%7Oq3{*oh z;wXvx8!Gy8z~gruVZ>rk70?`=se!#wY=9jULw5VnNq0uFAmj3oPw3|94?Y83?jmha zp8S-m?_Bh^^Whxkj;CsMjrAh0E~$1s0_p?MU{*#BDgY`EF!Mu}t)njw)xz))x*zYf ze#@Gk>%!$CAunMD(`b1kv?UhE1E32e;4Cp9N;p*Qd(F?z>c~bj4WH93cdA^Ast}xv zF_AzeN-|v)&=Q@gL?(9LrCT}xjkua;0YE%xpd6Bs{fSoF-o=z^Px?vCm_fkDI%W=| z+A3tdoLqe%`W19LoU0Rjak}no12bo%!{w)1OoPxE0P#m?7JX9I#Gppuv}P9!S|5xo zxp%&(w2`~h+ZV-6fzm@=e~B%3~BL2J7f_Sf13M>|Ca&2-$qI<(VBbPGq53g zTRoP&^jB!VoY@#f@}z*P-+Vhg_`f1L{sRZ{A1?iq?9up5_DH@UqeJOwWLpU_PbrL< zpT*A=>DUg1{Z(u)1BZW2M`_AEfs zv>)tWWNfTO8;^-Jev0K7d?3I@An*kM>&vSMw{m_G&zW1>EjCUQ;sk z4;kAZHinTc#x(GIi#T7PF&V)}ZD+U-av_hMV}<_0Ss0PO+Uatbd`c9L(3%*HIf`b zDQdCb( zXm-KFJqgDc7$!v81~nuwbvl}e>&FKp==vjAoR(a}K59IIAOG@Ou0R_#J>sBeemjKa z3WsbAE^0XY1kA8aiM;JVSdu6xmV$Zgtb*;~RtG=FJk?@SL-|6RptbTz-%pQ*FZ?xX z@`&sHbp{?SJ|)f)T|0C@Tk$VZ8SNKoqAj2&RnU`H7KoqJ?S8W{Og!eA zhJGRz^e?}A*3Bwju58?YA^yJQ@XPo0R)FeVZxT4MbG?oJw~USXT=Qt#FBO_xfy2tf zR|~0kf1^tvU}d9$cm3x^k65b5W}HaP-OYD0ThDIy$_Xdm{)>$<=#Tru#@x|CR_Wp2 zYy4(o)T82PYz(3hJ;HrVu6Ee`w_JglJ^nWvGrN>6e`$4x#>Sj40HK$eswRK4G55Qx zcH$?zf3q={=NFMGyo-O>m}6JgV?e)9461i-3H)YbP7TkDvpB!kfRjR%+7o`WF+4H! zl~cFw(Ab!K1hdMQLYj>2i*4oF!S}#AmY*xNPfp%jt^a!M>}=!S+P$GV>ZwDD9MtS^ z16>($*siJQeXu#Gy)CagYUTZFXTqiO*X~Tf`fs^{?9u+GRPUo7OU1N2J(`T|=qIIH zmU_57?oIu*|Fx2O0A}s(paPf)0H{8Rj*o(c858KZdq@zK-*N>4SW>@@(Uua%(?@`u z>uF;NrqF;r52mNz&Spqd(&9(5;z{jXjg$yET9~bie#ajf+i$r-PlwO}B?`@)j|`5| z?-b?Rh}JjGXG`trlu+4-F^|jV2+k>IWN%Smr_$O|GEERrp87#{ITOS7kE^A6F6$|1|(U*q4D_)qCc?dz}wzM8|pI*BwUIBJDcx{3)lfHP=kHThunr zKtI{>e+IyF7aUHHzqr;^Sa8(*Bg&MWT__1MoOg$Lw(dge63kip@+HyGV)`RiDX-B? z4TCGK3_E6N+7+{(bV@W5J<*_@t99`84ZW^-ufcUiFaZ`3WwsQ5*{wbriRt zK4(ws_Xde+;j*i8nr~G;Ax}Swi6Jfav$hy-(JAcjKR%|QQ+q~;<&*Iu=me$qiH;aW zX9(hCU@o!nx5@WSalyMEesy$etcv|GIPZU`#}c4}$?Fi(n>R?fN1Nz^mE%|@Sy
4(UD^Vmz}f zRq*gqB99lTfF8ATzkvQm<&^s$a@hNJz^f;4MT6P5Ctgyvt8{Apv-A=KlQj=`Xp*HN z7IFFgi-}xb__H|KC07w$l@CEy*UvlaSPW>0C+KM_CfoR|l!`kzhCnnkFUr=}Ay^JHY-``!r;Kps|?Yp^d%#kmElMr8n zmd-gFL@!nML5>3Ez*oCVBGo^h{7nu&l>O}7P@Fec;I{Pf`SZ*-mt%EKyf|10O6NRS zE=#<5Z>0z-dr0Zj4rtOLNpidwo-FAY-6-H4y{8##=5YAC96cx>lW6@@jviJ#O8gJy z=;&~7z13g^(?eBRNCnc$(JmW=?s#<^J*^zQDno+#> zPyKf}q6a}(QJNGi7NO;cQqb3DGNF~DYvPJ$K9N<0s>S$e<>>SzR+(Bd-ViF_g;TxU zqfl#c21P4JCzLhWO!gBbv}o$7xEMP3`takM;y?tvMy!nK{{7g#E;N8WH&Eyi} zXD6(FtEV=f662o9xES=>PHm>-%P?uSN1tb5z<6}**6W-eChKHmRk0}W9igffr)R!?nZQ{sx%z8VY!Pi^Jw z_Z6$}y`>I3JlM+pqpCHesi(FHFq0B3?%(RE?L1`slKO|YxR6iUsIN1j|BuvDo~0}a z=83UK$AotD%PywU&eeZcPZ@qF`>m?|zf(^+%yz4TA0rA>HeUo*@T>1WN^GX7r&Jbi zIRK3&+WG!#^^|QcrQk&I$qIJ_Z{j~D-!%0UwafoyJ-wK!R%nz=niPW`>|fMVUcS_M zMF;Zfy1{-??PwH^yZjPVWhwNp$@g9p8D1IKstV#>LYWMi$nwa(1ATYrXwr)Qx5;;v zDvk|q`5Lf0&xJvQgh^JgMH6BecOQ9`Q4^|_7RqDJNoGv-VNjv8v7q{91p@awP%Kr^ zdd3TEnfsl}6KlxZn0c-{cm{bnAOCyU1-|M1E+vRBBHSD#(6HalWFH#JYP=}G|D#9W z^mJlq-zQ)@rf14zGbUE%#&M>3NVaQr%9h?NT<=HE!zyyT@ zh#f|8F|A8`KT3T$4`Ivf*nK&nTIDDSBzkY6>Dk9M_!mmz*%<)SJ+#l$3bAjtiR<+0 z_`3=B%yA*xAf)lnZYkIMIMHsx1f8!Y!>96Dr8yMjG)1m9HXEcbIS-;M)r2D-EDCj! z#{LI;?-|x)yRPe|7fRmHJA|TO4}wyKP?RDbTnr;RG*T3<*_3G8wQ`|As2CSob-S#zvL;}uB6if?ZT0r+7xQkfa zS<|t_1kf>6@7PckB>N;MD^a8?yV?hgMy?^?cISg%!*ysX_!E+Hu@)~M@n*^D z4ej10hDo);Z;ibtG^ML&Zdz{$nCZkBshtda=Y5?<_VGqI^*2a)d_u(Qhkcoxwa9<< z`dCxW`ID!ZHh~i>8D<#-q(Obn!~NLP)v--#YgmicRyO&^yT!NAe6X?5kaj!v{17#N zZKK;WLGFFhH{8 zEzXZ$!_Vf18-C<=z`AUE;A?xs1T?F+?9cMs#np>1EKRVG*^Qa6Rqbz^^)v8qKy?j z?_Gr25l^}GMF$K)@<)8jc18_F6Fc?=>qy07j7O)dq#E;i(@ik%iX=cV(wQ1+`0LFS)U0=cj zenP!+Vxwtdvq9orK5&D3thy#qtpW%^?b^Ep8zNpBJA|!Q7g$PL#S38;>g`+h<5r zdbhIJQLQk~MH2&^(sQCVspg9YLpephzYQ`j)WE#D$xgzErZC=T8+AAMY?m!@GMQ?s zBi%83>zTEM7vk`s&5@hGBA6ZGltPM!@X^X(AMVA-PeQQlD&m{xWYg$|91mAgm4aw? zaeE>**2eHi!FkO;BbfQY-wtL}NZ3gp*WWLW)p>*qHg90>w>SWcpox^|ok@ho*9B{S z#`wh%Oqz=ccCkKGR8^dum0Vswt}tht90&`sW2k*g)Mo_4PRQ)r@#eAq9}&!x8>!eJ z5!YnEMr)g1lQl`&y`LI|hCPjh2SnO!Pm#_*#VY?1!7Q>bMcc!oI7xvn`=N6R2TvhX zLO8Oi=x$wip)=cZ8MZNFdK*mA z{+(sU2q8?dTi@oLU~c4B1QXY#lFCBC&53!567Kqi|9c{s|0JdG<$+-K2Nxq(^6Ftq z$9WCfgGJI}EM8}A^__Xf)dIshP7q{YKSAPA;#g7e2uZy|3dh+XknNny+ zKh{klZ2C7-3RN%RpLGB2l!EU0sUI=^zoZnzTs(fgTFk}l*41Jz?mf96=Hi@Ec z89xW4!cQ)?MPLTCjIb!{^=*y2yFg@-vMu@t7w=&sU-=ba_HIO|aNUq=D+{oqSjPtK z_bew17o96Qvj6}|rHi>3n$)JYWRHwHsr-P{Wt0v4ba|9%U7@n|hb9eJYRraF8~8B> z)Ob@D-8-?Ugh_g66lsVQ85OF&;1f0p?2?vb3}QoxH0_wJ$@DsR=`jDOU0kHK12$4= z$+9T?EGbPmT_}oTK&kYC^u6j{Iu1@{3iH?~-;N}Wj%mPml){jzvr#){L{sSoG0fUN zNS`D=-d|;y=s^JMo;8A6r1_)PmFXuk7S!peXJa0w>~ro(7jVEHk*yu2YxJ@9a)|vwq!pzUWica4X_~~LmrZ_Lzttm| z9=8l_PK@Q9c4QnAdSgValr!F6InSOlY3VAwaTUm;(lrhV`K8bLc zk$hHt5DrBfNFcvb$=srdxqW&H$rrmW_4pQPygXvO`Su0!J~?Y;`8LUc)TDWh!y8@R zP^CJCJB~Xa#yL5%T>aj^xan%3sVaLEKHX=p!@Q#D&<6%(#@C;3+=Tdq=eUbc@nVFr zWN&wp&}trt2>y0U?g>)mESsh}LXuwar%49lBVS^DkwzRFYuSdLJxkBQub4@#nAn4@ z6BcRB%dLO|30n2pJsak@ty+h%k5w^Lt7|rGdBTEiRFMJgX5T*4{&NiJv#_(0t9_fT z-x8wY5waPjTRw+UV&lzMuSH9&75m(0QtYo&)cdjNUX{91S8OZe%qcNh6+B>NhHi1; z1rp#{@OpN6tkx3eQ!r#!V_oqxq*`tECZ&jseSG8+#q#Y{~dCO zsPQ?89q=_3uuu+)QPMj;?_~;0zU&qOS>#!pE7~VN#?mz>@CVndwAKAwpe37Z7{E3h zLOe4`w`ndO*xMbXd^z5GOw&3(%Wlx$*dj{ zRn+mJ{XNYvqppIvg5IRiU)k;(v)PJQUbs1VF7YY0qd_KPYf6Nr+0$Up_Ur;GoX|M; zOq_hlJ9lQpQf<#FJ`QQRC^)jBIso&wY?|;neAHQKi4FFX4Tj(6Yj^Z+`Z3l<3zCc9 z=A63*3ywtH9#G~wV_TJRMkPTpbbJp0*E*fB!Z`5hgy+J1Z#|}p9lPQ9sYCt0m$u-Re;E8{1A_b7?ZmAD9 zJe6;~xweW8T+MmouT_^tIsGMM$4!kd^am5E?X%A^|JIQ>H!iBUY;@jD&nH;ik&yhO zBhj_}_XOsT&xZuk6@NZr|InZP^dB)xBI!ZQuM3P$^Qz4&kG^gTC`eBJ#|upR&kjlL z*FjwN-(r>o+R67ww;zLjTGi%(3gAUr*d_7jqj34bRgaWeUHY08PQIRM;>6Cn$;NW% zMArVz(^u^ck4t{0DX&@GdKXeKmo_eWyy;(inD{48Bp7iTc^ee~^*mJrNDrv0S7@I+X$f!-x;{GRMubhnVnV~(ZQc9&y zpj%@T)BaRjR`kBE;L8vS-7Mk_bZr3RK;N%lR&A z=_sz-SQZb$ffw{9n$!u@nb22$9Fl_~4rTwNZUdz)73oDnHC*VnuNWk_DT5pABVI|- zC?kn=p=kuN8?2d>DF5p;G+rD@B$4j6I^D8$@c_#NGfV@p1puEPBW0TkSlWX%N9mDD z+=k>OV!c#cQ)~Y~RiZ+(m*ya&!9c{ZfjPZ*TO+DDU>RvDahxKyj3{r^;mSy=kFHY0 za}euEyp)jZP$Zd-lxw4;oa2GWS{>MLjRA>%z6f;43j|;+1DINZH3kU~X*wZFsgkO( zv3Yo`ZwmMl0aY_r9?2= zC)t>CNUDVk4u*UEMB@De>98^qQnrreDw7zg0hVtye9JLe-RLdpGO8*S)%94xKMm5p`u>X>?)zqi6RAwKn4m*VC~Qfh&iRTSp+!ah0yNoWZ%bugIoKPj5%N=kv z+1Yw<_-ysrQq#(Ys@A-l0hY=`ubO3&{c|xIeVmvzk5aequ2N7t{-UK{Le}G6)r}kb zZ*UeEK^>1Yx8v@It(+ZDd`e^$-S{H+8nZ5;uVvGhv&?VDy_RM4r&1JwR_26Pd8}pR z^+23v#-wjZY13y^XenCBf$b;N29e>l18?ZvkXHAUKYw%^)niQU_adB5U&f1GV) zx%8VSyO!zkv(JwdREe&_!HXf0=aCirq4{v2`@UO=;tK$A5%RE>Aq-GW>H$*jb(&)G@5Nz;7jBDj2aY?{$6-2`kbvJKis~n0ri2 zu3(U-Qw;EfPdqv+E>?GZSRFRkPQu(~wz`b0&6(@)b-kV4+cC2K-kj*EfAa0zk1ik0 zU(Iy}ciz7Az2l?R?YX=3ep~rwY_zS~JcsUDS*Y}Mbd$w=7bCf{Sbxiy^WORA%+AUZ zlc!^@Ve>Com^)<-TgJEL%y*Z$-nqK#grDsxn|cBL&h;Z(Cic9V@2Tm$b2I3-yV;gW zFSUifHrJ|(%%_uwEf)GY$yJr5TRt7#yD-q!(t5J;>8F!n3$M7C>YCOqpHJs3ydHI} zzT5lsbKt#&H@xKPx{q7FguYsM%kQjy@crqR^9u{_KtN61w2lArq8bOdiCD(i$&0ZN z3}RDin)O|$;`a)Ml(pg;jow}g4i*gKfZ8?(*RN?gx$ohke$&?WGnT7(Qy=gtwH-%X zr?V%vOPW5b?F_mJH>5X6rxG>>>~g(ijc-6YT1SdE6wQ=aERGRV=4H|-(O35_j(gk+ zm94r2Gt_HrvPY)q2?wD!^=-y|-R=!sVSKB)ry=RCb?-^jd5yIh5fhuWBHw=R{Qh8J z@e2hI$dnQI)oO3Yo&2HZBd*`a><2$P19k89-9jG93BNL*)s3tnf4f;tn@T!@vCt&X zw(Szmlzmy?nmn8DxvlZ}7$|JCXgIavy8_pg-HUC;&IHi1-@uZt>GG zO-Too=*)-=Tr>C0{03!#OcZX8jJI9PZ4Gp1MSR{B?9N7f%~aP3keb~U(dqz)O-VT6 zVUIpiL`00dZ|IiNAm`RVhkBHVjdZ{xlZfZwZQ<@9X*5$kjTF}NQBs6O8yd*I%se03 z3l)5nhEUIk#nLtqp=PWg@qp2e5PS@a_ToU-nx6X<6v@-4t#DKq@DQtg!ySl#Sd`;{ zhe8Y^Q;87{9P}|36aq#;sPNEU6vReS1fu5o#~|rZsSL~lULuTePJfgdhK+ty4!5F2 z)aYZq{9)$)Fb@v26B#4opf?b(nj$cHjDbC1Ks$j&4^n_e4#K@R>J^e4ZmK>RAuR7=tCy^$O^uK8zC{8ZeDsizN>-U`P89MmSoy&pB6Lx(7e5 zCKZ^%!YTAHAKw_Aa-OXdTk3|PPmgabF+iw$cI zjZ;V32`VeHttq%UlV|-u_ zC;4-4gq=8~XpnNJ0g&)Zsp*A!VIz(FVXHaO`hc;W z1t0BzMbA;9mYSwI_d<`3rr*()UEY^iR-FRVMjSH1zQX2Y^e51laf1nOv3{+QXh!@Y?w9pD_N{5v-O{XBqw zB_FjsM4={VAv1D;$AXx%;8&qyJ)GiImK_J`0g~=QhssE(&Jh7heX7!)!idcCVJr+* zRYA*HZH@wP*s-=G+R~#1qb89Y4lvkXXq^QtaHUi6$fxKW`#r^*o&=u3qTdpM6;Fy9 zr~s@Q_{6|`;7acdz3h>FdC%R;`vxw1UcLN*f%(K^Z5c0aZa9I?EIE3&nU{O3Y|rH_D zT9&=I@+a~9CYAX?Jd3?gaNT0q{eyV0@9r*f+=5$CNBPm2t6?8ZdH^#|59a>t%x%8( zdYwvxrlK&3^mW^FI?oLcx>y7Y>^ZY3UNZFxODfNSRYR(+VEBKMps;9N<;q9`n ziF*N06n_c{Yd2d&d$gCs8wGAkx5E?i6Y^>ni4Z2Ry!=H^L1n*P(RP}wzh*c{V}m6NX@ zKOsFg4t5Z&h_o$788-m1Ja(h~R(vJlT?K|n2KyPm-;guYopLl{(`5Lqr{CXPONf|LNl*DtQ(8Y_5n?69|AUlP zHZRJ+r9fg=+y5k`C7wqAXDO{x$45VuRT&(FwALB@(~7=w(x zqbS^pG&5>FmBAlu05tpWaiqU$k%KSVu2zu=^nZ?PQh3^K zxJE1r5TjP-ok3}+N|fG!KXxtlJ7n$uWohX2 zX$POX4VD&CVJhLeijM3C9hevNzyJYfOtDeL>0{S&o=d1=9mj+Lk$yt&97lYwHJu?c z#R}}yJSiG=LNHR5a}fQgvG-1)zv)q1BwCM8l|Em8Q_Im+J=Vx$MS3Ptv+jB5OC2RC z-*2>lw?^_J&MV7GFf;=*qeb)RF;iI1kb@;_tRHPE|Js02H+e}@WD4lABNlZ}C=-8dl_a`7a{ry?!R-P2Z1*~{-UIfV2e_7lFiYkkv1=BV4w=hg2_fww-_|w|ZQHRV z@l55*r_$@A?)CJa`>b|MI`4$!^XsRYtAiwkh)BQQd)%Z`xSoYgeAx5aJ4=}x8q|*4 z-CQ?#xug#_ZsC~dHi-*-%4{)RI*@cmOgv`-nylV#)s0*GJ{4rvcG|3hI5Y>GoN>iX z5Gvxm;+{7b*gcjpypr*uq9)GeSUc(BZum^Ce4taOM5g5QF`eTZ-lBa(aK3p=wu${? zMdYJUIAK?+!*hxm^6+Kn{xkOK+>ehf2iw;>ow>ayNX9}bdvzIKefhdMAYbCqQwXf?ut(FsLp*(*RaMnu;;~r3 zO`-lR;;Bcv@*>p)T>k1d%QjNdtRB!xkq< zz`ed!*V(dCFlL&obY0LeTXk>olYh#+*B@QK-+i_CIr!PVzY|X#7wN`tR1@;(ZgoRS z&-g7C!YM{d-FxJ%K#!-xKZ(cW*<5FsaGC|&A9Zk>f1V>I9=H4ByTrtEPxy_Ua)0uO z+rq#r;cU&b`=5iJExf%a#Lb}|@M7HrLuw*^o7;o0na>0x7NR*$%7d9wx5e?jqWQjO z4`!>MEq)FYEpUN{B7Uoz@N14p&|7_HzW14M_MT{wm-0|>V{}zG$6JVvM_Tb4$O1`y zK4dULgvHpi;DIdkJ4yt!VTDj%x#>IbHWm=pqnGj^OYxAC^#Gq8A#JMsp_&{)5?=(N ztHsx2v17L_LW8a}(h8j5L5#3aLy+_WB@$8-c^XXls00jhC6n;TPej_XvPda9?GW{R zp#{eV8_*qt>2zkq zF_8zoqm}Lgj=Sy^o)R3LRwkbj9M{+yr_PLXvWaW`7&m~7cPx)iaf~nKQ&P$N*K{kaIi`Ea3tY2Jz=mkp}H;M-N%GdR3g(LaidM*Bi}?ZxJZ;G-l~c37!B_X zj^`*RwVNb$`zCe8CS`jh4fMsmu8}{?PbyMQPIn9+@<<*Tm4A;)o~Vf(;3t1IO_>ot z0R*S;nJM!%DT2NfAwLDA!iefhhV5n`LKvtlhD0qxs-N*<*9weEs;pV6{O(l6kW}R{ z1}rsIy+2iRE)}Perel^?XPdS(B+VczZFz0l%Kk)H0~xQ9zSb<=cz62xkaW}9taS6* zbj$v9tGRT7N`|djhW+l0O=2xYR)%wJhD(2j>s$s=<-#_z3+}rwIPAvqS<-#13w@jm zo^uySDi^)XF8b`gcsS&uZ`Q@5wHJ@~UpzT?(O)I=v{~la-I;+QnZa3^p|zRe{h8=@&}{n+S#6vHHAIBKi@Cv`1aRE)l143PxW`af@LZHgWd~y zjP~DA@0I@UxLEIX?Wx=|>%GBXvEFO`L$o{O4%i{vza>ji{Y2Zpq4!c4nRc_v_8K3{ z%Z-@_tnx2P94;&PS~Ol(QWd)7YVKFIT&H5Apb5Bgb3}E?bw1x)T}?Z=yxU(ugg ziR@puS>X=kPPMJIssoI1dtJUhF}4!8J<++n(kKDwwVZysrbpeV^Vsjjry8&kSOKK{ z$Cp^1dFqG%dU}~)7q-AIMnZCRxG&zzK3pEy(-E7ZRdq@uG&Fh} zXKkW;y1R_pt=|ynaMJ!?g!BWt5p<;0rjB>ALm3%MWHbWQYq zcfU2;+ZvUuTxOT^!J*|*qFN^%f zx^#w@SF`>4$-*hd>ETS^ef8>8W%;n#qX+H=6NN$&BxXuq{PB$W4^Y2h;>q|x^&e2bKjAty zUftJ)d*j|8P(PUR1ZJD$_$Sn_tolOGP!T@*6YB5Hxcf`r@IhGr0rlx}O2;LCL4DAt zKcK$XlGCJggN8;tko{%(6Rlfqy7Z^z@7~4zjD?3E27X%ptZt@JZ|n(K^TQPTeYXP+ zmM{0y@<)q`X3z)+enGu?@&8RwA1Wh;`T=cgG1Qw68vKNM#ewBNpk7tu+K^N+SQheV zc%uAEgD#ORvynP^d-A>=2Q8I~V`%i)Ui%a_Cl&RniaXNt$+k1_{l=Qn(-nL6Bj?7lc+hQBzB5l77a_dH|%6R1z>cp!cHwdnPV>PPLb z-;^szEfcZCA{@DRu-(LJwcz_|+=Cp87yLyx_JmJZaPxDC_sN6__2x;F^j-VtgbP8J z{0-##+4ND&5krnxBQEJ?3flq=oA^Cq_Icq;{m-t0(y6~k%mmXMhc$~c|9182zZuk< z*;X57r~0Is{!zUu-;N0$rm5muF$G1*xaGQ)xe2v>hxK^RS!Ey+kvRmABsOh-8I_Pn`=V#fA1wI#6G(cPeqaeUm$4KDf zZ(RrUQ(bi%$x~dIMWmU_EuNivAQvpY z#f9@k5DorzkUz}`%pncd-$}FV-_rnysheNS- zmz-diZcK-4KD%-dZpAZj`eBNdtwh@T+sFIMWO1t;tgdsvg2Lb z^A)4D_69d#V4>Yt6=$?|KD7b%v11}CpI@;VjMXDaaIR19jwWP>gs*BV^g z{mi^qn^#$QA_=_`slB6cpb|kjwQ+dHkA9)fMiKcD9(eUkD*vl)bf zNgqFWjE%H;O}Y)2K5iQqN>f(9gK#K|-K8Qa=OJN-SW92&bPNaEBkHyjuU?AA93^x& zjj#~oyqKMbEC!T24r(3aBNayo`yNA3x32F;TM1H8vTYYiOo*GTXK@Guw@JyUjOvF} zrK!EISK}x*ke=qy8d^Z`sQX|EZVpXN{UG4Puh*R4>RGgOR_j{Q$<6$(nDfFr$Zzy! z z;wCi_%E97!7v?+&0uSTHD$f#eX`Xq zjFsY;K~WzyjYL&)Kgb5_YFHRFsFfXyR4JyZuQt7@YyhH1G^bzf+j1l6U1FN)v`4WC zr$I9cOfq5NWal{z$#VN5&!UFmYsDt0O@onV3EtvSUXyj9In=kZ2`;89Wz{-BtF5+V z_JmRCjYg=OrpR3f-70oji-v=nn?hXlL&;$#~IQuNa6{?E<{X zs>R2|Tlluxa+T`B3QX{J*3I}xmC;PC4Rrer%9b?PDGRmL{@_UKLCDGUu-=ctbL`kR zbZY7D7TwEJB_B7Wjcsa#+wzZLo@`CoaQz_gO~gOP)GdhK#nYA5!L?fJXvch@H^4vf z5ccOzeD>VD?@d@A^868EcXLnQQ}yrkW4Dg=Y1a*D^7oxSJ-DtohlO|Or>vOG`6+j@4 z#(cs>kz+q$gvr~Rzjz=Mts#VvgkYES8H63D_4SymtcMW%dDF&HyDjq^jW0Db7!89R zS3>6pG2OY}clt^xZyioLBh;PnuihG$9s2T`)$GFq20S_ESEpy1W~;xqwcA+rj^w-? zy}IRrlgGK~K@+XvPJLgg4k8e;`Jvu~BSduSwwo}kg(Wd=YW4NuGZ~Nby6B8m4+ZeB zsO4e#iy9vtEZF$Ba|_nDUzK7O{K@>@08kIgmO~afoF94#O=Rsk5pxz^cJ_K9CJYa| zd`|p2@1fpD)#Oug%8@#zk=kWc+7eh8iR_?HmE+4ac4I2pNDn;B*n_q{m}bhPnb*)P z+h`taz_QhS|$FY>-3r*vTJ>pA( z=YNnGXRA=BjdVw?|79%Uwv*CbE&C4c58SJfmnR>uFQ zP!AbPfu;h#Lp^4Wf&Crovr<)S{{r=TyMKrJR7h9aU!Y#}8`R6r0fheu^=7GS<}!Xk z{mzgJ9$6Rm)LxLPrT;5Xk8*du;hX6J2Ev^-PvHRp7g&Bd(+TV@&jv}bKnNB{83*;l zVQ5awO<;EkB1p|2qJx7d<3S(-xEw1ceR;x5@sCLgNkG{Z>GdFxGJ6ZGVL7`ovI?@l zeD_vIWKOvswxD}^tsjjB*wm4I@WBxXmdI1B4B$~aI07np)<7K&eZb(mEu%;#uk zml<}t9y5h?d6Sg~*Je(OD0#Hnymya*Dg6FAhzlPNOwMIf`!9(Id2x=pE&Xezxxi)+ z$djAP*2C;%fu%tZPf*sIHeiB)(BXg->cy_`6?1V&5W&rw0+s_o$-FfJf3!6bjPZw@ zU~icp0&_aD{Z={F1nQArB@P z%N1fJr9mK7928TZJx~u&=A>0YAPf$Q!-1d-1aTbj2Z8jtAcQ|etR@AK;CCTAAUKft zAH}9vH7sVYv)Hc(#j{E@>$BE#;ssa<30Co1e2IiVTF6=>AfO{8AZs}#I8I@V2vr=p z4Ewk?D-FjS!h@lBRDm(D8(%ut2sna3ZXl-GKuH0X>5m6NN$^+3KrjUa#b1#b$jV2% zFN8o|IH3`EkQ%QzY#ao|N_43JIdRAWAHa`r`PEEe!W>J$#pKV!xV!1I%cNS(fEOUB zr#Udo6`P5QiK@;xJgA+Uiz9*txVdv#2n+=&XbK-ogWCg+9_I=C!k0pxS$b2AruEGLl+3~C=dlk z4M9UlP?MZ|9}s9ajyX!mjvTm2G{01I1vJU15b~f9)-6AeSoH*uR{{u*2NAjKAr=S) zg03e)jR{ao{AIUxpqx{z$%SW9Fl#4(b@yN?@oSWhfrbYzbF$so@{({6WG=4q+fm^8 z7_j9Sd{aEKOagiWE6F3|3H+rHY|x2`Dq|8cc&A#=H1yZp9z7RK0)ng`cR}C$IMhX&F zpUrq&4dPZeN&<4YntexaH=eDDdsUaTP?LJ^mf#7dh=Gs;fz~tf>G!~&@t3Of9#U${ zOT+T#%uo<6xKI^PAcBTfHwz@(2Pv3(J*G7XM{K1H;33+^>-gg5ZMOmH%j*A+Qf_hOWqo42khfvGg-Oo#(@GN zNRM%4ZtkWVmbq>LO5WRSO1dS_yGF})6H2g!^);62_tW+^r-kJSc~{2Tfi*eo+4sOZ zinM4gU0oFRXl@z%hKh3^8?<}*)d7D9!vE&?cy`802&n$%K68**hgNtOC4mP;s$RR~ zcgy~2jcsmy=t}X*KTsOZxqu5=s_t9>dg70}Dpvhq>dW4@V=nmB4Yvb**=))c^pA_lJ`qH)yacTSOR_=6*qg5HP5dFa$8yMePKmPr%>+r1F*|3j|3=#8 z+qt#i_1K%c&07ULBm)0*MhY3Xk{(kl{`#z8Hou|O&Q3Rm4Zn^>SVQ1mP(KbHiudEF%Rzg}&TUrvDyB!aD3x6060+C@B= zCI_TUfwjDPMneJ4*9)dra0Ey1bS-n~3&|4lKbltRHypd)8P_Xvl?f#pe z;_}MIYOed_eeS>Msj~TQZO#pPPW|JY`{+93x}55`HQ4bUs7(({yZ5?J51*1) zOGAEY#X~FcZGF0YQKZV%WH45*ZN z8N3q&SwVW!`zAohdh_~eY0Ctl#$Xo^!P8@aKJLNTjklBcFXu8|zgPm@$^~|FL7qIA zCl_@5_`uwKnHP87>Fo!9!7}l<+Rb?IP998I$dc@gp1A);@U%{Lb*(1>@@dbITIXN_ z0sQ(Ydb#G1ka)!nhiR{e)`z~*e~W#w5v}w3N&$OVNP;dt2HJ5@H3CcK{IHfG5D2{- zIGvgI1mH39(~{mR6%+()$T@jp*!>dr_mP$yhzRn89R3$ZT4nejW~dFP|3fl=XYMX+ ze#hVS9R!-Q<%rvRS1HIG`p?A`=6O5WTN=E#{ki~5G2=9;K!4l0!Y%D31K31aD`%W9{wxzyB-&6<; zg^a`F3^UlOt7D~73KHXG3L0X!Gg928`MDJDf5qa8|9;7Q@klG?%JPifeImW1_LIQ_ zvfD)Rp#~Pu%U6}>tcJHdwq{=ZlKDS+#=$oEIlcDL1=I@7^T>jG;RjVOBTGj zViU~2BD>RHkRJW+iQ#I_#fZCm%Pb15mSd&Dhu#ni(t5wU+7(r9jx7LA)GxE~OmKd= z>8dK{a{sd%H#x=wi5g!l8ZFL7epy9*qdo;*Lk+i=D7L2(sG_2})168grgcT5%kC1g zMRLnF21pgx>YH!#ci+jW@N=X^keY0;Ebo7A z6S`NPM6^fQCf5)Do>N)A@}LuODrMD5D$1$tcbgC(io+o+DqUj~$DlI4IH<}bH_n`1 zDF3&d%8?Jg3h&=#TRIVxX?|RI2xIci%F^dI^T zm44_-!nO>kHciUKENX{%q?Mpi+2)fcZ!;46>`cvew`}l1z~|mMMaZ_$aFshB=h3Q-9?%r(?~3!$HqmJ?J9;m zx-K;>-iBsie8J44$?7#XQX>&mWyKnoI?GA1H|SL}eEP+DC&=mkPNj$Qcht!XV2>T+ zr==GOI@Ur$>kiDp3JP~gsG)=;RlJ8Y&^_kE^0{7>KBb5K1muZhqmh*lsHmaKN2KdV zW!yOzu58&7xkea}LJrow<-q((kS&jVq_|wxM=us%Vi8L=*n!miR{r5IWj8@;hlo|l zH?Uwx9ojgMp6op@HGZ4?VfkZot;GXL37({H|$rMx|4rpoc?l`J+JN-)*2ta zEN2EgHL066$A?}mayni!LXlYf2?YBd?0b|PxcUqQey&*gqPt&NW%Y@V)Xly7o89CcPd4eAe~@%H6|Gm(_&{*AfgNL=DKs ziSix3HGj*gWWUNCc`J5yoL@Zll-tGSqyCapiBA6dE1CaKaw-)$i<3W+`MsV0VopUr z%(^@ynEtv37rAZA@ERS1#VuCa?<7Cit|ZSz6?fu-y3_`tc39Baw@Z9&>%_&ui*!0QV8pnqt#^O=dYaVq_#QvRHAR;)FX z5ZQ0^?tAv!1Ch;0*Fp#^PXL{JFn8>E_`A&fxj_f(p1DDQ%(GkA%A#72*oeq|qPZRi z@;4LgtUd*5Lq-^cK@B3leN>sPra*inwzUG{+hSlnW=09VmOw7xA*LzFGM4BJh!YJmgCU zsv?~_-*Z;LK`5}O7`BuU3qL0#DZxRCuYKnRfi&?%gem`mD8Bi_F(mj*D^!94bHjsE zPe&sNFgFTVzn(lzfa+o4@?u{y10~0XIdVY>Y_c)|tOr608Hnu$P&pAP$}Kp6j|WGO z0&exuZanA^2Sg^4%Q>JSE?`Z8Er@eG43r;{+|CA#ashi@^bi}A$pv<@!86}67=jqW z;Rj5Clu^Kkfc}<=c=H`lBf)1sCS2gdXj~xG*I(>vR-+(yI+AyC!E%&D!FNDayq%8# z&gLf0jR2D*qz?}!jl~GXTL9T)Ar9Kchpp!(JJv&mASjLtcVoa2oJ1i-+K+))%Z50E zA-CJe&mEIQ4(Dd7QH~&pF*e>)422+wa#_Sq9<-bSe%0nCpolYx@L)EaRg+v?7O|cU zyJ!IYY>*=Khv<{wZammdg1kQk>?y*dEd3$QgW%{kvXINbF2l&8q9<^Pvks8ue&Qv< zDbsD_MmE@(k~kQY@@W(p;)2H8l7SlVDN@|DBN@+0#&hA{dMOW-CB9{%Ea${7WxBvW zL)sDYsqef&DgbW zl7Lt;1{V)rCMie+PD<#HM&VJD{^<3D3^f95$R91o1v{66{eP}w_aVk_4T@|hK*h%9 z-K>b+;=UiBJ`_p*}U<(IyY0P#d^;5&3HA$ca2YzksY z`jM4=$@=ujnIpg~Mf}}PotR_6Jnwpa?foOL4!PPa$uQ{ip{uODy6 zJ^axl-;o8pBblYyPGWqs803iPC4qOE|u@aw(Swhq*La1xN9&y`IB+#cL$m{y)2G z@4qFo{Np+rwaUCS4Q=;51N%23gN{gaEcWUIig!s3z{OVyrL+=utZLI2r0nxCQ6!1#Bi0vyC;BEi-E$qCufK1rj?nPk{^btzv@OaVcsXsD z3Dw<|GE_IK_(3=5w5Ick?tPC#@?Uy9@o@f)U588!R z`n|Ib-`a5f5iRsu7j6}-3qbkMy_D&nmFF&-{f&DIEcss?|2%C~hzRlUld zvf&7|c~wi%z+l2ALA1;hgI4KhiYcMRx@sI=^xu)CP_Z19bi5dz6d2~ht;mj5U~{qq(6Uoj5;e1)%NhqTFPjZYO>&o=?zXHvCj1h{pEPQhlq zp`df`k*GFqO0e=FO(CukEdX~=x!!gUnzgJ+JOr6Jt<{$pDa~B#lFxLo9octywgnyQ z5j-f{KGQ1a%M}5EbvEh(E>a&+?sA@_D=o_SIL$vuL*oX~7>cdo_py{y@;F5~xD7#R zD|l%i1u{Sx8E=_`o>>hAH*!9jKN_SQl0PGfAJvC@9duoh;V!1dRtYXQC4^@O3npH% zZtenQ&NoTCl7d56SooUj1SojFfz$wJd)~!q^omtS4O7QcHkxwLqzH(nfA)*Ksxy!T z>{U}mw$z!E2Xo4O35HMoT;5OFA>>feW|#2bz|r)4Q6EUQPbMDC!LjhB)b--^f~V8& zoD=Y=3JeNWPWfd?Qz9INr$mVzE38RUBZ43lB^4vxBpQ%kteO}t9r3@Vs*E4;Pm(#aqgiT5e}7{?GL>Au7Uoi zb%LS0`ImJix5xux) zP&R2CZy+O@WR1!^S#BQ1c0)*hs#H0=4t1yRZNFpHoXTDUYaTvz^x*4@-LAFM-?aNR zN2^v8Sm9l7`&uT9WQ;OBKHM+1dYow(XftMzMy1 zbp}=@{*~F#Fd${gmwWcmOHI^mV&ipd9fK>vde#hz*VdTY#Gi6c)sVm%N?4P=_=X=cn&H-j-by4r`k3j?hRMpYqOAMCP+1Q!OrH{KJ;0`>yMq9s2Lf@l zm$Dx3CaXY-ls3Rie;LDQ;3!mu^%U57@m8o&TGM=#kBLq$>-dNV1#}-otGM3;ejY~Z z9|b|kE;MxMnffInflaJJZWN5hL&|!Nrfkk&Xvo!EvcArPl55=J)~R!0X~8C_)i)gi zujN^f3<9bi4O%NeRHaJ{ICQ10x}-;bUuC)FdA&#}Pyc!y0XS9;|3wSuIJ9J@6@70` zS6+f{D__TkNDI26l$D1p1XD0|)@K-dK__K(b(G;e6zRrmMZ1!s^(`AD20S)Fd#WwF zsaRwnp+Jv6>W~YoI6gp!Na>7;|CZKglr%SOs&}9ggP%(n*XXIYm=?Dm`g{iG+w`W6 zRa+6^4IVIfWuC^5(Y*K+8deEK8wa&-rWYNL@Fz^Ga_&AI!@$}Fxu^?|T@;sZ1!oeF zDyrBo$xC%e*vT50bZ1MJWIRZ$hy89qmpV>w?mro}9hWVN~#OwPcO;$omv?iQm>*egGANy+N|SC-=$I7_n68 zXL4VAepsoz@KJ)Y#%#kTLoqC1KQ6IkV&7SYL#DPmzF(v19I10qM;)EMw2{2@K#Lyp z4C!$?JV*kzJOgJcnR3?@ zi~k3EZ~l<^gKL!Wyj=%;OlWC+@bmSd*DPsa(Q-j05?8d=xlZM1 z8MVpzae7{3?8C2vCa16N)DW51cMuWi1JQhC77*w3+E7`>vdo}R3(VlWbn>6|cQk|`W8Mo5w~ zXyvy2BQz7Tp+EEmDGP>H`}vvc;3l|5kVpH#mB|DLQ8)b4@;)ienCw>p88Bxwht!cs z*bJwB#$5u2n)=ucqqOaVWTz&fmgT497D56eA~kUE4uU59vz4uvuDWbfDXp0z#JaCE zd!13`PR`msaVr{9qx@ z9Xqt`k-mMP3-2U*XVcbDQrWNN6s?9|9>LbDg|I7_q|5`fKqh&KgDYCNHuJ(>MInM_ zm}9prkHshH7&=5@jXuR~lb-*S=fyD?5j!`TymSPe(-U=Z!C}QDGSa=cPyVt^^Qfr} zvOibB-Syal^A2JfBJOshg(X2uT^oMUb0g5rp>UdafM-R1Dd`DyWjqLiJ)b25H@YHs zA46|NwBg)zmB)v81}>(qxb=kUTTQ4vE@UM z`is@885u;^p2u#hKQ)GgE=WI=fV}tkk>Xg;$^D-#TdMWm&58Rk>9t}7sbGsii^jvR z@=7ewmJk)aXHt2G_yfmP0K$WR1D*Q*i=DKrpijpXzG2WLSJQDO{-u83ez9`_3>x0d zx5&)k?pMD{RqY3CXJxzRVx=w|$I#8XHs7)~nA`VLlR5MqrC7T@;kSO1Eym`r$$nPx3c;g!{x8tWH1)VX_Wgy5dsQCTa|p(tQ}d{KzY+T3F6FVpb*P9a{b| zHRiCMBzcI&mPOJv2(fIaYs&jPYmZoU@0(r96x7Dk>VulKG}gIbYjg_L?k%I@QiDjw5D9c0pTRTF&I~Xz9UnF-})3<5A3nObvMW zlGX1ntKvBvJ<>VJu$u{6o{7b#GXQ9Ke@=2W3>tm9Cw%Rm_yql1@iHXcmtWs}t` zut+fSE+7wH>N&BbMxkP4>uDjMZNHqTJdg!lN0nwlX-Y3ola8*b8%{HzVZjVASpMJ8+~SCdxn9UEvcz^^X^zK@^jySeMw&|8)g<~E4k9GiWu(JAw-Od-xjy^w^ou0 zc5OBx3Fh+j)^sL$dg#b}9^5EP)m8ID;&69d}TzD_v)kY}fGW_7a!_G!s#3RExW*>bPe97EP zJbt6{;svCQTTE@>?i&L$+F@tsV(M0I_14hX@Oc&i?9WY< z1}>yo8^@AF-^qS4bDX`=@>n{{8O?slR!sJlFcn(C=kSK6Y=a{`sKN z|ILXXg6on#k5^th{56KQ0oB)v*FnEhnxOOj5)G_`H0O>^9Tep8)cN^7DtQ!NlKxlK z*UY^ky5j?T2(z_m>WKH+9E~Iod(ihs- zx&U-z_Vy!DzP269Xp@CFwmRymiJ++n?jTCflV@1NT5*dVDu2rF^K|gshfH@6f%^?% zMoS~`X#x>;HBv*mw;28aJSUag*K9Gh(Ll?>fsLp%)WGNqGNWKiO%wgR0M)9H7RyBg zt>9*TQeMCsnMquq#rxPV@CO)+GzWXOtD7wvxShg6%Gi34s~nfccL)LtqJtMg4Q7Mj zL4qd81}W}&9u(%1+YGC~U!w&}P=MdkyhRE&1hy<%l9Zs&&WsSt5L&s-h>#Glg~UmB ztpf9qNT@kAaV*3f{cSkO(g4P#jhm)iiF}>Qq)Sakfch^c(I597b*~hiE9PW z4O>x1I@XSog)o2EBjUjxpo>Rr7CEVIXwXGh2(ypiAmAmwwq(hT zqStdYU2UVO)@*-_1BBH!HuHYhAY<LA#`dJ6==qe%Mhm-`1}}BBq%dF2O4Tt9+_sJSN&OQRbiNZj9$5{;{i1~8Gk0#vV>iLQ1-Yy3Te4SzIPfI- zt;yd;63!@&g}ycojv6?}d41l$wv+I~w(t(9uRI2d{_%m{4M#Nx{E*DwAWqev-WGm; zpiLJ)lw_o<{ilzC<=GkBRe;9(M>0Q7+uIX}18L4v4wh$>`yDWTdAecNeLc)9eDvFf zw|)iCT4V9%>1}~I%K{zn7qakwQhnujjd#7i&9_MIV@PM`%`a^wmo87Fu`J3d{UU%Y z{5_Ie8SrnC`QJEOLAM*n6+^ve|7d-*U zM%A8x#GDzdIJWMn5v^6X1?QgRFg1oV>l5_S9g10;*xJ4-FrrL_tZ#FP22BK6tV_}v zimtd@!KSF!KtML?+$f>*DB!Txp^*f+C-#Q{i3(kZ0i2lzpAIc^DaC7Oggdw!(JXc! zYk`F;>yfC77VOqjer*z*J3)M@R*#~bn+xrk@Pea{b+Nbs?U82Ud7!hQ5W@e zHvt!VxJ!Yp?IKj2c0aPzb%7!vW}e>i)9(F^Uy3%k)kjMYPV9>R(O~=waINnJ{{z?h zXR&pr&D%)5)dSzx`9CITJx24NKhTcFMdhtouYBHrKf?GOFe}|B4`ux1|Ggh@d69Qu zH0j#F8Q=#h+FQK}SJ6>7UCUyB#Vjm;L*F+LUcb)t5$68(RMsgR%C@r%!<+jO*9A zQokoRr_tYy|AYo(gEZ(%zPEmyRYtFA?aTcdb|k)Q;8A4E!LabSSBH8h8F;sdY{Q^p zX7;StCDDiD0CdKw@3kzik0srmHCUSR%YOi!QL^#On^UDb-o80qmTmX8rt0wKx3&Mx z=5I-VQ}M@C~Anxs)&F=Ss7*+0}8HYzsEU*zM3+BA~9>frA9Wz8D-@gM(z zT9*I2Bh1h5K0RUPIDQ7|N)tXmjXixHn4p2W(qEHW9sk3%EYo$R|F)JzLBV+hd>!CD zR5^H}XoOL(ulE%|qlkxEDCE*chn5Dvhe_*iyjKQ>j=n#OYq&9LcvC$=RjY)VV^FDx zH+*%fe)0<-XlBuxqvr5wzuwtOrYWs1EwbqUD)Q2O!E$P&jwnP}v*m0^}vV=s?Edi1vE+UkdiNrt@*q$wcjQ|%EQ+B2eMS{2@g2UsPS(>cKT_J`-6pMtV zFhQ_%AyLQf8iyz^Lcl3agv`M??`Bkjj7oDbQ${OLSTieK6-X(yrsn0BJ-$nRg)byR< z4QAu;QwzgvfuRnW1+9pMS_N(crv!>%nN3AH8|5gn(&|GR2)aWaWu)Z?b=wdLbqn8q zWO%h0c!JxNo8gp()Pfy zI+2-LkYhb+gWqAx-);2hrO(aN?LP~;D-5gGzQ3`F{L9m=1y@Y`zBKr{mt17NtP?~;Z0cxU8MPBiR9#GezwMwaX~|;aqf@^pI*tc?Io*4M zrFc8<$WMSh>JLbX=)d;ry>&bC%Ljw)tSLm0Sw$Ao=x*`)pd_TG?vTepN`4QvKC|dXrK_R ztF(M|P~Atc(pdbc=H+&;mqL(>8tKJ36dgXR8e?c`E<5~SB@;|lU$dO4^O_{ZVz-@; zfcIElIXo)Bje z*b}V5WP>ViSl_S4>KU?y5SvmUA?mDTpS#@kriY7lZUc7VH}A=_uXp8Ld*Ar}C^9j{ z;`Nr<`d?%>H*zO8o(}rngZ1AmS^PU6+@E_D63b=JiP9<2)!&xwthyQzl`0OB{G>R5 zoT*9?2Mwqkny(F%R#CoE3Yqa-NelIS->v8(c!a;DQl&x9bAn!| z29|kD1%2rvu46d#5}L#ZVf(uAU+cem!E}~cr|Z+C&)lo53KVlYNxZ05lfF^P(rLCu zW~+&Kl#+}VBu|SI#)CN1*hDim1#Tp#)6>?-bYA#D5l*eR9ew7Qe!kdQtepk#VQQ-- z4Bgw(EJg=6v(FD|BnY|2aoMW9E<&uFQjg|nvV~gcgU0Y)P=;t_Bxpq5YGIuuBE||e zZTdjqd3^Y+Y6etQg24FUH8dlnN&2N`GwkqLdhyKgATmh%1mMVdmNdf{)I^A9vKa!A zCRW)rQw7w)0Tv^Ik^y#N!>k;bc(tJ^O=b?&iDtlDu0>h^u}4>{QM=m#hHC3FyiqVo zTg__LQJG_{#a{jRb)TMh&i6%-e!01;VIo!7U7*;qRTB@ukBPoHfdY6_>zqa^Rw0 z3d+u@gc-Gwr@$2DfJv1u!YMLIY!hA16Rvqh)R$!0uozG)xvy?KzI`1e54N_^9QY~W zb}*TIr$^6ex25d$vPj4}dW+Qg%hGh8QOxCY5<9P2y=-ZVS`QO z!Qk|dT=*WtO!VD{j`mb3DqLBO87hV&D4%Ni1
  • C}7fY`r$Gt*lJ)jWY#3}kg6ve z+K_xLf*Pqo%FR-$84$k>0-V#UrofvI;n~$o3HE$2KaCBwGTo}JS96dy>4Y@w1MOF_ zIR-T;*NCwf@rGsRyyM6VSt)uY{qCg^lVF_wk)QU&C4g4X;V{zU%k!EP2#jZd-`=Ua zNx+$}-^xb7xe1{kR^?_v^verfL1p=Cj)TCHO(>XhRL8aCaoFT27OkS2_R}F&Cj|j% z!b&jxMkd56643HN*$T-9-BkLWhS}^KX9|PTHwrbPCi<1f3N8Aaa^Vr%Z(RRr72ngn zBQBmtOc4s<#z#Za$(%IcAJGr$LD-v1OH|6cIN&9;?Xc7OG&URV$zEnU zKRNYL9$;W*{wGcxBD0^9pYz*iHcF+h_tn^GNBhmBmM$E-a`Srl63^Y+7u7#c+WmSo zYjoE;k?DzJh+7eDOr!L$*`G*=!M=rEZFfv80)E?lH80Q^W6=jfr^Vgz`4ZhRqrqia z5B28JAD>k)@T@-n{aJ09H1QMsahCks2tfQ-d?56%E&;PToBrHm{!%Y_V;=&=*@3no z-D{XWOIC(c?fz0>1PBewH8Q4yk}GRAbOUF}KcCfamw@R#=6i+FH=wulzxY}0x^?r* zhksR};qUJ;v4<7ZM?)2xMhCjRfPlWXhpIziTNOB3SvaFaRtfl*05Q8-W0bp@QKIQ3 zK2@rovTCO*m0pXXqe?GtU3Ko>ZmLps59g*)w!gLp8p-WssDx06jf!NlCHV5@OHEXj zX+8RkI>mk#ZPm3z%jRX!I#%nfUgv1ZkR5?w(Zt|6e zI7P!18l}O$9d>5N!mH35eVD5>zcqpUhaQO8^PuTX1)e7fXU_P16%pl0mxLBkXWjTJSW_d_os^kzW!z)5 z5`{Jafz6bWxV%Sp`hG04sh!Je-&Ak0D0%n@QhT{EcCB~Xj}iq55)2vx{a+*s)swB| zqMX0k9E)Cjfq5%^rosMz1>bz4PH#!MT@@EOn5b!==%APT(6YUwi>K+HZXe8n40PPx zY~6N&gENqxM@_-z15)y;EP;QYqOz@H!r_S6cHP1Umaev z1GxGBkC7<;(j5CAoS4sPduZ$HZucgegei`Uszf}R#=8!lx4Vh1%Tbl7qO}_`%Iz{4 zA>G1zsABi$_PVuh8#_|0RAj?d^6BrC>GYZHnL`hC8^(o43*bw-soEEAed>-qyM+HV zVuIVMxt2IpUqHL>B46@EtwLX>Wu3mYL)V40PT9X}Y7Oflb!t0H7V*9Sfm*v>M^l2y z9KgA+WA*df?l&b9+}esV-%*(`13dvdbhc)@$A&<)hnk(KOI) z;ajU7!DkCSa-^3=bLHhL2AV3l7;YgP;LW&_*yAHD*A*7gYQpGZ93iuL;vg9a9YaJeOdm%NBhZYUSzml_%B2(tGKAq*xl6zT)VMu6Q9# zxP+-eC~58TjAYw-H)+{RjDK6RiF>u~eAW)81T_?VEyR{-)*lQaoMUR38J5L`KP;n7 zQHFSNh5$Sz0w%MPvTZ1%C>H;|%y@o_#*TL|+)H0ueKIc67K924^C!lkh#70L zA>uluM(*qk_wA+qNLz06I2Rruu%P*s(bL5l`Lc-00qafGGEO4bFZlWVzSo=!=OwUy zd@JF(ga_VQ4&stqiFo-PuS-jyvIsZ^pX6c1WE#CDdFf{lRvn*=Ojj$7C3fU%slY5> z88vN)hh4hdrR5@ThPwtMSG@23*;NQa10wUO&M2*$;wI;aH&!K|uv=s5gI$06dNh>TVXbAb6PtNrspxd? zk3M>B;Kt(fd*Et(dh}u5&iLlY`rD(=Hy{0e%iH_^61bZ7(bty?e<-F*J)zo^6UcD4 zfanN&`q zybGthOjx+trM@Q967JKkfd64PmcKvx{I3ZjV$;8aJ^fFUaQ}X@_B8f%@ai)kh1~k; zRul7X{%EvfZnwz`-TB%yuGr$%vM02K-EyejRQG43W9w3Isgi5_{H-blg0Z3Z4vWGBOtd-6U=+T3binX_-xAPxvQC(_sfcs|gdH33wOvKQ0be(ymIQ2L+vft`oMfygVCb4MHPoYJU_VwfEKP_@V{##6 z76gsQYbjc+_2IS_G#Bz6f{H#rsf4bZMh6d%4&>f=3W1*kWe})*lolO=KXTe-x6)Eu zp=fp1XJgYz9GEI!147S*8jmF!Bk2;kG8uw4<3bL-Y$B-23pE-Z5aY=AiTe!qYN{&J ztH%)T9)_MZ3--@glmg1Ic%1wZ>LH9Ha#&dTtaCFLbvo4afA_Gsg;HdqAy03xub)1yv` z!;|)9U`VzMY^hF6IbigGZQCh=x9^VH?w*|qRmutGa(1X+QQh`hfWKZdEnVbt}^>jK% z3!b_(dU2B?KsrX(8=P-XYtp{)4_El?Sb!fq{A=Zl1knl~%qbrGL#zcYT8n3Jxb9k; zeH3-a@QU1uq*(!ZOYqe;$DVf`-0N>t20y+6KMemtpT>glpe4|&KVIy=`+NV+Yclmz zbMap-(&TTi$;S5e2wxz+`&V7S_+**)pRbAkWxL)5Q%NfUefoDxo%_C{^l;b{{-29< z7BYI;QaAmY{A|tT+QT=$3%Dlut3`Tl=JH_nx7Xy)RMzvwTfV2V!WRszKbz^cVmg)e zdFaWN?ZeMI-JRcORPXwB5M054rW3+8HeLt1fVnr4CKKbvhWqNbY>M0S<074Vq3d7i zV*juGnzV+2G3dNrxmeeCyofOrhTh_UT6}J_cF%6r@O+E>iKWJvy3lR4o*^(_?DQ84N@(VeDbx>6(<@U|+{ikHS_Ye0u!P@S6Nj zg=n|>-Tg7XdtT%g6$zcAj52GE&!x$eGMLtL%~kXkSf8|GlYX?B3dnWc4q37V3p>`^w{iZh zaa{b}4MzvYHuk7{0=HRk;I6S{sqv(4|bjbUag zztiq|n|rd|#>(;UkRJR6__bYL#Zx7=K3VO!bY;|G;-{L~?h_$Jv$^QF9=M$pj7Vsd z>Yx@*p9q$$`Z$0gu$pn!KozGk9ZE9Hq)F%;TOl1f#H_KY$7}H{n&4{S?UGAE5(4R_ zW|uj7pw0g2Vhc&5Ip4ZjE8{h*DnB4sG2ItD%)I1WuCaK? zXNl1dHZKy058eWr|Ho0YB&bZl`Edo&EqO&ek!J|lypY-qPgBUPq(3lq+X{;L#OBod z>b&5^_14}j?ArckwI3rqj$5%bm(N;8a;ax!RJi}eC684r?8Wmh&P)6aG6vb&zQ&~ji4UgLjWc$|um+keekE&-Y%=S!Z z#}b!AU#Fmr5_1VVDjgnpwwh3!uPQL5_^f2Z9nVf?QgDDV<>~L(7`rNe8b3`3klGLVb@(%D^ZDPzXAR0?J{f z*s{eshNVniz?T_wLFME?AafBE%;v@qp^vt}{UifHL=8TP%l&T1V)>ke$ML6pea;mJ z2A`sl#QyM^FRPQAw+v5i($IOC^U(b|KAMg%{MTdN@sd>N$D8$(>WJFri@(vxTr`y-603 zlMUryLXzHTxEIHhN^jcqUhBAbai-%5ik8@gVGEvf-^|QvXbLcet)@k0-O{?{x0vNB z7#vQoENkxm?d8mmXK4uno3}dtU%lX{iAe(ujBFI@dGokdz3_bgP|3$%VFWOdMxMhR zF2C4>&)T^sRD)}9{H7ZpPRq6ILW%dzWgIVc5e~z>b3=#QABL1+PKL82B)7wR>(RJU zX2-gndu)?X?s+Oaj+VX|m+|(aV?BM?Uv!&(hnv!lHN}?P|2eBvRp5#Vv3oY3MLs?A zVD4gapAe;Qek`UG1f|7_aFQn6j?DXO`te{S@Lv@Jl-p75L-h~(7^g|VFw;<-k$B*$ z7SRO%viPZsEuC!b5V=)XU_hGyL2x}xpZ6KyswCwDYYmyTjhf}VGuF~%M<4LP-iOgB zH(Zn^@%ETv`RYmQ#OX+zS+ZICST92vPhYxOEd+IZl_v3xD0@2VtWs__=J-%92@xUIKNN7-q{VytNDHc(tYPlh5sf91`; zsFm&iu}(bl@jkiLLDAvzBwo=;89b-xqJ9}sbkh-zWB;F8+1#E7+NR80bYzB@&?cT) zRZ9QdKy~*H`fYaI`_o?l9ppe~`w` zQw4i^+p5qnw7)<`=lVH3cAtl|Qw4swpwxQhLiJF^KL8P0mCziBx42@7#tUYl{N8*V zF1VR^*Zw(O-Xwg5@-}VF%?YLRfimwTl6(L_uP~e5V*Z947QII5p9%_ zKq6&fazo_aS6r~^hPtePq(q~{p2c6_`#e$}@UXf77{FCnl*(@cQ-(y;5y2vd*-@Yu z^49qP~YVvN#*8uOo^lrwC@ESHO`PV>=D)5ZDGLGj$&0dWT7hSE~ zmgSsrU=9X>ovo0xXfh|bWB^4F=F(iipp@o-1u8t^n=hiwa<>(QYudb_Sns0;idqUfP;Sx2NUdGB)o!hkZQa6AhQc&IV+2E`4j z`?fY?R?2JA=C56pfzq5Bv&K^gdOX4HGyAaQWau{xb-)-MMm5?AMH13-}u|Sfjqwm^%+#X$R4=Pqiwc0uCpQHFY~dfx{{mLmCysXv+FPaGm@XM^D}8 zconlweyac~Q*vRbU9B1P4`gp?w+p5$;q7boeR<}4%vhI5q1xRmbXJdXIJBfvLFyTG z4|Dg^2VX@Zo!wh{fn-Q7kX3PQ4EcIS+Pn1jSYXv1^>bPe;MIHKuIHf|g&GPfCws{6xEKMAS_@cFub?DT%Y z|NdD~doWoSwasjC0T2N1EzybgKV;i-EAr|`3t9q8p8B$N_7dHrkVUd%rxhpLGrWG1 z4g7pgc=f2Smd|9)#hM4HYTJ1wh(k4DXOX9kv&x$^dk)SScl?CdPFkHFHtx8cwDcYA z%De%YUf;4Z|FoknNo#-oWzMgenJYG|JI zFQwK=nG**vq-$LV7#8_R<2Af}Q=eP0umT&bL@?aAPrY)4$hR=lT+_VNf<^+=vZ0YpS+p8MdR`zDbj>C_jzZRSV+jnkAY1`q zFO;O;(oG-%2`b#vnJV4&!(nCB1}>!eI3)988ukr*xL2OMOH| ztEtt|xM+1Fcli=<#+HY7t#a9Zf-?eIO8I!9Vc&0;x#f+)>>V%D)a>5wHGWR$N>M>Hd9F z{iCnA;f?>vrQcrP&0#S3(;sdu0ar#klQw>Pea~FKGF?=fuiYiQ3UNY4=N!HOyuMSP zF+k`_DuTA;c>$rTf!6;e-~DTcuKtltH7c)Ll=xJU+-m9Du(TC%hkFU3w~xaQM{hDu zCfaI6cl%VDI*4fVX;$pcDWxTu20x_g?w&GSi+=zgoEz+iS+MZZL(t$rgLmMQa{&47 zZ-uUSEnc1LgrbIZde!BhsERd9dRoVxO$E$bG%l(90wkNB-KDL(dR{3jK;(B*;f$?L zG*5zeYKv07eX)gidI!%n#qaj}0Z5)2_k3SyA@2dtej$(M%?H@m?~Wm}Ox+TQdr2< zYCr_MFGABB3^$?s%BC7xc&Wj{9cn`{q)&=-4$ff3;Q^Al6BsiwfZF3%f!Ln|UZV7Z z#*xg7`XxMGQ_>DH+Y93)#A=fwcl4JCVUv--o>{kARdN@i*@laBVee53eT*LhIetSq z%0wt~&dABs1SkOOg4r{A=>X6(gJtP%NeeH#8^C14NF^0Cbs5`{z6=0RpSa>Y=<87- zdKy(STnPj9+BE! z9r`qJ9edG)mtnxmR#^5C*Ii_%E~h)fV3c*c`qWctQHQdT8NIN zvl_eDj7nRNy}s9fes+0N^_I7^kyYNK=3C8Hy%}C>FvN58=FI39vS$oRuUp72U#d8J z?g831;(kmbX{BiRV?W{o^&<#oBJkWaXQPni5FT~J@_gox+9d<1uI>ik{0CkHCEfsl zW_#`ZT3PU`Hd~XbG_d7O;=aHIL@?$>s#qt zPghrS1v@r~kBl~b{i<|qow6mp^^!xL7QbM_EUpl@K&n6UR_cS14>X{+mPs;oZ|gPD zzLs0_caUOOc+yl-gf!P`>rbolY){q5_Bvb{lI4@{+71-B{1Q7*NO@f|P(=MaG{B?7 zXMKy|zTD?$4a9H*UEjPs5bQJSpM`}wd{GFu9}$T4w~W-aS)U!b)ZscjQs3kA|9&4M zC#Re4elcJ3R;65&FDviShaQ2BvXhW6V&Hm9odt)TVL&m;eTsjmJvSNG5D zQB`{pj3m14?o2!4l%$n5qU5zOym?8Y0bjX!+he``sqBUmUF&ee|A- z^?XrY=hk&|dG`vkpnDh0OG9rku9G)gjI{9r3O1Ons}aISgj2W#6~iR1$7kl8Z>c%r zSfg0qv#!9KbdU%H=uE{S?GXU;HL=9jW>ULoK3{6MQ`}0t-IV9J2EtcGWEZo2w0X-hdV>xjYpK zI`iNWxDI_AG@R*(R65}_r`q%yG!3^r+USpnzFPqvk>_CG_i;w(Ux=kb_QDbKXH9P5 zI)-vz;zq5$UnVSdk>A+18AG3?u!dI%6ujI&-p2*w9KkVTB<0=i+F_(sW=nfiySio+ z3Qv$bCS~1+#XKY(YCA7$O6P&d4+#eTh?17{>TWD2))VdI9jzbE9+#WsI4TK;vre0BXDinr(kw`cYF1^~UG$H+|({N+{Z9 zLc}sg873YGORo1Z*41pr8Mpm(TNn8rx2zo(8C!OyTrOn1aawQ4lEApg4=ziAN|nB{ zGb$#607TxbVxbM!ZYNrxk3S>;c8COJ6MJ#bI;6quCw_oqyHUUibaymE{8l*_dn}flYd5Nbw zk5QsdL`7^T-6#dlnupp%<*o@ML+I%WKp|249p$+HCu>M10k(OS1wZETMP`s$Xs#u2 z)|{hN@uwm0PpS5QezFbMH5cbe(+necFx1?jAn#woUeXj}RzL}sHIUEPQ7Xh82y)B8 z(SSo((2_OSmoX)fpJw5U*G*Vj9bKUo%{+!0C3M-YI2C8TUBy)RnDomJ*cgqj8NcgP zCrO@3%L|qbIz_8SF{X&o=@iD;tXZbRD#l&sgcH|0EY`c-Yu7JM+4<}+fUp(o5FBJU zs&4$0w3`GM5d49v+_^zemhwO@ghn>c0LzpFEx5Oe%&-MgBp^&+kAUu=wtdrWVm!2$ zgYYAjgG?V=wBp<`Ru5m1umG$QS{^L1qnWVpx5E%it(4Kv_4z<;$nNU zw3`Y-kX8LHdoY8hp8&K z2T_<-f^>p1kNb44+W7WLlSgplQUb*HXBz4VaZy$%D)YQ?$W)OusQ=1ga zEyQf|Dr$0m&6mpCuTN~ha(Rc6+6_671q)Y|4yuq zu8`RR8yW)KE@qOXFD`m7)i9mduL=Y@Z9iGi6Ry9+wAiBv6;!sYThv8U2M%x^Z8-YS z)|xbL;#OtAXJM-v4H0rWESJe+_g}y^mpblw6L|b%=>4UVEN)1=RtPi4l`IjMV}jxg z!~BkA+uRw7pJi%Hf`G2N6cX8Gk(kDyvcQ&Vt^8LHe zlArVb*E@gDs5@F=kS$ZMWEh^U1VFss$UQOl^)%P~f!1=&?)Yxh4P8c$u>B??$x_(l z*G-z=$UVykF2(dWbe^4F8O}&`@2(yin+8e5tn*h|O{2B8I4?5zBk_8yhxFeDlK59i zy#CUVc}T5NbTO2D+@K@h$vr!-E)P;P)>_cwR*Ab$vcszeX=?d)*-1d|=pY)jh`K6y z&y60OM2yY_Zs=$`nais2s2sSe!*bTKN^5T}L@$uefbLf+Hl9GennKyT*A=wjkkLv) zP_5ZSlt^E#R3K2h^^XP%_E@J3>&r7OyXMQISDh>W#9Z0qVE$Rw*vL9NcCo9~$e z7)cHo$x_bsgHotq2z*492SbL(%AuDJEh{)jQ^*gr!2(E9p@nS}or7eFKZ+nB0K*Go zNU9S{Es<#_vGG!J?GfYG%J-9r>(~t%{0)Ap5k13WWkDTA*36=#ks~+nTs^DGm_e$ z)lr1AY4&~hgB&m^XjxZ?a7p|CS9zvM5=7PB+jBL%V1%mf;cyJN@}%aIX2MeE*9Sy= zpYoC|9xj+jE%&b{E&7(ga5e`Or3a(le>D}zB@A2?)c$p{kv`)SpZ^(`WcDzOiFPesQIKxUl-05e$AVzOwRai?N;#Z;oyo) z4j@pV;p_~Cxu)JUVkWZM&QK%lGTTGxWoRQ5;`-&>gZr3U5@k7h#-O=I5wrDm{mODY zKNSTRz9QYc7gpZarG%Kum+Y|1kR3kj=iIfPbGWtTXH7t-@dn z;QMjCXN@P!0WkV6NNR6Ra@qbirEP2Zl`Po~%^7w*A_f9Jb<6Tz%4tYEY@pmyiOCb6 zSW?LE$x0g!17fueol(WbS>vK0ALWpk zSCTn;plyECI&si3=WjVuAp@wKVI$=YMtch9tV&%a+ z9PPd?9!pbDCW6YDn!0hP^RKkvFm(lQa7Q(d+r6$*^95}uHl3Jy!=s;WZhw0Q+TOnr z;L3=)U9h>K_KZuf?Ova`ap@3_mbiPM+igd@_TJ-DjqP~`I|Ktf%IaW2GfBP1v)v_H zB7H?%S9ZzXHL;<*^Fk|sG@FvRq5idr;x_7>E0KTa{~~Wh9pqa5vswdewbN z#-@Ae61*AjfIg4pX87iP3@|!NsmKKTQKgPLG&~1%4Pa`42?2qEwqxmG01jL$R_6lP zg__(9>zy7w$ka#?r21q1@9tH#x04X%Di&dp&QvF`sXF$|au4A5-BjDG!gpVgk+r*P zO(e;0Q5PqoTHu?q=hBT&{~z|=JF3a{ZPQI71xTSOozPK1L+>OMDFOnD2&fo}2nZT1 z7;L1_JA!l}6cH7D0YSlnp$JF~VnI-;0xC)I_okBc)E#grsI8#_?Cp%x+l-R<%IHAb{_V3Z@%^Fz5i{C9+4e)eSrl4 z%aYxhKk&81(gH}o+CgTW?@|*S4sM3QC)$6%d*wSP)eX)!ItEb`hJyeFT+3*MvnPL* znxLv8`cm=RFj457^4$2zM-KPkh>^1B3(f1riA6?#JA3^G;{SuH&{a?uKm} zJF07X4ap-C8U>%cPzVN4St?|;ERpY!+a}o(AqTb4ERrY6(HiX6j52a!>@AppZ5OYG zht&ZAWES+y9x?E!F?reaDyi8)n8v^3g>Jx@+= zE*M&XpeX>sdGeqkd5ntG1j=QVQMTGsfI6@4fLIq>rZkSxkrXU)xQiyx-G(-!FfZne zl^^`+uI`QuaGCeKW zpFEOlA>S2>{wU?H5W!E*um3J?|D#6rKKuH+N6EkBR_`Wj>CoQTJAccqOpd>fy$P$> z+4~E8dP;)%f8?@4@l**K{w*n!Rj- zjS5ejb;Rrbi6F3OojdbyCR)7emXVY=(={G{NR!YeCRvE%AA0UYx&$hqL>v7_Oeg*1g$^%w zVhmC;h<|QDtE$`3@T}f638W^q4T1}LVlUvtzxB%==g-&_h#uT-0a7c?4JEiqAF_PM z&4@ufnY;{-kHL#(K<{h$184b&MSDh#Cze76Z#(kNYJhSo7m2{JVW#55xUKCZDJYA9 zzk;!ja3#cfFsXPRbQ}=w+KYE%iX+&>2O2|^kcC`=Mo&A+EPWh-gva5@Y)I75qzsZ3 z+u=T&rfwD<)WHv44h>L_1tklsYhdAw=`=k_tv|2&K*F3dbhuvOQ1+0G)Y)XYeam(l zdKdw-&=R2SRI{i_MG7u89DGa}B$66yZeOdtMS96fcAWLBB;ln(5{W71u^KJCNsXpy z3ghkB4fk(+JDm`1xpfrwLQ98wHQPy1ZDRn6nF|vUALCbs0DGbe7e&PyWMnr)lAuiI zuqdNu&=3FgD-MYYI|v}tzAdfY`ou&hCR5MPmkw$`!|WMTp!Ye|kwFB}O#)2d3G-d5 zr_&g66UT2IMRr93TxH900Z1z-T`b62)+NXq=qu&eoa`hf7C*Cv%+e+kiHWlu%gHe! z*q1|xyUltUX%JkP8H|(MTqu%%XJ-_hEGL19ZGTzK$}Iwb?W-+$SuY^@O3jz)vqyI$ z!1Z6foU!Z%pU#iIX{jl~!3h73A8^qF-P_Itd0qQrW_$i&iI-yRThn0yA%izJx~`s) z7xHRfguYGQ*5)rMQ+U2d(ee&HqV6&Hty>1IJ!^njOdW z+ zH_3M++HkBTzJXOY`@aaRZgR|O7{8xMjP!^$Z`YU}kn6u}c$l$w*^#A=MIU#zaI>wrD1KFfO|euY zj-@GJbxvnt!BT1fSkxtO_CzkzkUw?&5h?*_g;5|;Twmb)G7Xxaz>AZZO;u0*Ji4eeTHTB)_NMB%G@n~hU;)g%j0NgjVp%TxVk$Q&Q}P=kF}aI$IrOnY zgHiw(dD!o)t@yDo+~??wmw?e5OjZ!n8tIxL=k#e>Sq9)n z*gKgj!wNcRDT5n1280bcR;q0}z=T7YLXOxBo$=KrROOJkwnH7&+7`e`oN6 z_KuFmE|K&z4=&y|f6y_u^mLM3SO6=Bh@wF>qtlY{ik~EVi2av`&OD;SCg%oBqgd1^ z@cpBkX6GC3o~-Y_^DchTv z_5Ez>Wv{8}h2|93du_f~yt1O+ili*=0RE$7a2!w#)Bx`IYX+YLv7b&EcFZA_Q$FCJCpG*I}?8fx^PYPceebW zt>BxFocQ222U*_fNBqsz%)?)(V?JA6Bfjb1M;#xpIDv@WbTU!FX9{e*(CU>|Mv+J7 z#>Dti;wOd^9uGfi)uiDakQ1vTEv>~3pi0iT%+tb$OEH^zVN7GKE%*pj6>tB>Zx8UV z?-wV1R-c`}3N0+zx$6HGoBb>@xN1pkMJ>hP!{qgxx7b2BB5GE1OdgHpUnXvsz$~;v zN;qOZSJv{>4~rW}zGdopIUN^;CtQky33Dtxhs_2SX8Oe>5_N&XLX1{4e`&*vmhfN` z0j7$Hvr-A7MV^YLf<%5(^Y-&n{%S=f54zW6@j)?3q}xGWwU&oLbWGWCmRO@G#|?;SZGDSRlFBkFNn{U<|7@S)CzwB9`YNZ5`=dAa&!If6>=Rx5d2Pax!{h3*4|d7 zGfU+hwAn6xq{$I0T$fF7d7oQg4!SYkk3Y+SAI`Adl4F1Mu|Ae=jl@X}gAiUdtkiuK zvB$C-$6Ifl^ttl6Aj$8eZD3vw}ND&L)(tI(zbs%rbrRv_BwtmA?ZOZ<; zxvokt?$y(x`^{QW?wXdZi~NP=9ALe_`u3yF5X+@D{)#}4GZ<1W+x$h#qIwM++EAm% z3bh>Xn*CPA?E8WiwF&ChvK#ZUa?lK5w*JwHf7DH`fvckmO)>b&bp9TLKj`*izYb-@ z8gMD={Yr;4>22u6zB>bE|GXm{2gp|MEM=mdD6Z^#Tu(Qugq|zE1fGK>w2JL8!t8Ce zabiP6gv~W00*e{W2MXIK?fYPP(=VffRaOM%y6_V)yKUyv}jQl;Iz)XF}l_n5uK)i`L6F z6b9130L!j75M(YmD+u74bp_rY-<1juAatmTGw`F-e#O0$xj7sv48#5=iu8wx;#h`A z*;Iuj{`p+RY`z~kcENy6Yw%N~wWsVo594z={PZwk`i_U=mpV*|G4(Xz&vN$@2h4(0 z(j`1JX%l|Ff>MzJnk4-+EYiBrGI_6Kk^?dA*39#?qo1kEuYpSHl z_T;~x777=pa{qi3-biaj?A*WP*dDS^y7bsf{Lbh{}Kf; z4lQWTj?po;8spL89zo+Vl7W@uv9b}f<8jznjfr@b?4XJN6M%ekV*V*-`p$e4_vhF7 zXaA8%{Q0?s4s4v};(t&EuXl=DC*?8_%(uF$x?wkzEtYz$F%~>~EzB$Tqs~9fywH#z z_#^b*dIEQh6^r+spY0#!U(BVAa9y81z=thO{v0_tid-R_q4RoOj3nbH>rx|*^1&WU zLs}@g;U|j8_$5AoLdkvU%nQ}gaw^O;{1?3QRJ?AyfxY@fjN0ygek;v~(5H;IArl3J zvGJ4r+5!}kmBt*u7$hNySxCdXNdA67mTqJq&SXamL!&2L%>F=ku**#3)rGH=eW5!6 z`iFO5`NcKC!?h6_hpKu}pl;560fEURIzLk~1ZW-Z*2J1it<&E_vV9HRohWBa_DQ3P z#?lrXtoWRITQKD2qew1Q*uxXB$xnpUJ@*H_9Aql-D>;}odV~+zYKt%~{-IVGaFkt? z!dWowQ=egAD&t!|~W#1*D#&$cae`;BgEKzUvS8(zF>-#g10@q*6qt#)q2h zo}eCSA(hx2LjJR86&5j!-+mJ5??jVxF%S|_M%-nHPd(qc1y(2X;bb&g&IEv(8*2lJ zDz*!xYa>zd&vnvq6y*2cQc$dDJbMfuPA&nwd<+3Wn^PT!2&7Gp^$JHMX?Lfq?d4 zuR_uo)nI5r8;oJ?FA6`SF7lG{K+{-T2+IPX{DLqyj+6#->FLuiG+b%KBpf`>U_CCu1=t|#h&Cx9RTi2sRD01v_4 z`j1@6{~{mi0h$B10wnq0VG#d2l;yaO5MSkl|KXL5;r||k0MGv`@2qhv>x4OoF%7ilgeR-m;Zv` z{jRMuSyu6$ZEyljK2QE9P5yWTIiqdwJbm17GuVT;-H zqbOm6zfU7_F~+=s@!Zbl2|JCpa^UyVNYWQ=^@@!)meg9XqABM8isM`4`iX8?xc^+p zbnGBMj-NebjXdG5k`RZ5^5giTC@>j>?Ovto=XJt*trmHh+GA-<2M(-NsVQG=I;s zw{{#_nM7)2Bif=b!WS!&J*G+YT*4d5qy_xgYMQY58&5i>j$4*yaTlIpeO=hw%i4se z*)?05W#D~W7<4zh6;E^WR<@1`^_oQfwhWG!RoQ-UHR@4ewJJ2gSI0Mr(H9Li_;A+^-1@M=$7wi zX#_vadjC$p9@boZfm)V}nPU%w!4Ob%3Nr->Dy9o*swuX_QW|h~&}vV@7loCs5x(Qa z3+Gxo^a7JzEqEB;@FElV)J*Fd+;bVPR<10Zrpwh$6#7h9lDlnc`yo{bvwyu`WMALx z87pt$BmQV>h*_)1{nK|{(yZYDGhy|`Yb9|EaNj*utUh>j~^QXsNy;=J3T+&c)G|fC~DXmSN2d>-l z8(f7($l!vz@qXl7!+Gxzq|vCKPe6qf*+hfAJvsKpLR^YGV;2+7Y z6ZnW8EZxwfzuMN^E3NObkV95xT2JD~ZdcL3{@E#1*X4)wO*`x?U6=?S+l(ME(I4U{ zD+!0ti9faFWJ~NS%UqEY?dgV9-$TufK3vg!(7fp>NIbVF0#i0Baehr#9AaB;%&OcH zsAuEV7hjcU9WS-qn2Z*B@ltrg?uPu%;80|9i(tZKdmE__L}7{MI9#ghEhQ5XfAMlS zt7*$`>fUZvh74@6=0sA|`!9*a09HlNf;`nJL+PHQo8(=|J5hwPU2D-;iXuR2F+HA}hKWs&J|Pi;y#xxd<7TCX66o7lolq5olEc+d zw{6AT@;xTovMGSSCu<}i3*ZXg@$bDW|G8U^8Q%<+X4jPemHfN$H|VH9RRk`a@ULyd zANMLv+=@S37k}QX@N)hBFT3S`!5e;+h5mv!{Qo}@{6D!Yw5De9pLh0E!JjLq8VuRU z+%@=5!y6t>wC%?j4BGEt|7R3ZI?sXYJJOAS#s9T!gHyG^V7xNMg0&X9JYcVWmyhT) z{I<+Tbf#1rSUdh3L?`qFy&un^{O#49OhZq6Uhz97?+iwZtjC!b@7HNm9uZhn1Z ze4u3~iCHVA$zfJKyyvWWN^}46x0>HjYwWhIr??BsNqcJzjMyxwl#fbjui>~&k%=kU zrn#@~jh*AuX^?=a9#>yKs2%C7~WOLXc z*$T_M$5dQJC*aS;HZ5b{ylXx`>>0gvmeOkJ3PSsgMStRP?Any17vj$?9c^y}@^aJaS2jmQeQu4dTy zLQOCl#v7vwL=s^ULH+_c6qsxg1r=_lD0G)=C*9Uex5z#1UBj`{7}8FOs$Nh?piJU- zmLR2sX>#3!PugdV(ay_2xg&)|TXJf8j$ENb&k|vxPnf8Zr!^7Rgl`&kIP_gTvXAH# z)F~=G-Db!KGu1Gmuc0ETMteYV!JbyYC^iobdzqfS9Qw{sqr^T`(;Au&hoL_<2uqUu z@<8E4KCd3EvaBep!R^p#DS003N`q3w;v_apoQ>kaGQ3%Ech`%T*^icQT#_jmy|TqF z4(=cCyAG6R`Dy`LGN4z9cG8;*n(_qgEx>{qhcXG&-YR0*5FyBZOZdKwHaD?ujBTB} zA<}`{Dm2>wu{XZ$c5|)o+It%-ncKJbky@7Tl2Yy%^>6FX*Rg>tmU*-k!k_!ko-An* z-Y%=RJyoxhaEHWdGXw~kN{1+mG>PX+v}nUK;NK9B9b0wu!a;lE#Ss&%HoR*IQmd;U z>eJ~P@Ve{GwXPra)hS^jpYu&bc}6x@UcEnIvW@`jMAPBIpPE(MG91-92Zi*Qmg1W5 z_n6aW7dbC$& z9Mi5Nn~a1W!g#+jY4^BU|9JPw*XQBwza<{*@q6+)=b3Qu2ZHNi;}b73=_*&gzh8dT zW^_6y2HRf#iOhW;jPU9(K3Q`$r)TQk(;>yXVpKv9S7)nL?F-Mo+H8rHsm-0%UDD5Q zz4-p{lTN{EPsrEP$=~*Tq9;i4{7>$_UpV7*{N}AlMV+F>0=Woc(yf#Jm!a>9!wiGU zkoWrH_nsYI{)$nt32*ni_l6nkS@*(0m^@VZu}C%Wb9~r^mjT&Zrqr_C8Cnb8;XXr^ z?{4Wk?O1!gC$Nq%oNlr=M6@+jezETKj%rJ=R?#rK1*-w8>D2xq>JfD2Ic)>xXFWIA zP-hh>@H54d?CXc}I8q5zOFo%I9udCtv%Vl#-@kS3y=I^ublBD9&>5Ya3p*vf765kP z%a*__VzqrMH%%e8?A+7u^>cN;aeC$Vs(qb~d;{c!#AXfw@q{=kedUprzqKs4u%H!@ zJ%DG{9P4IsK(`MBC7pWGdX6U^FF`sOM%5%@4`zEXzz-MFh00^@-#2RK0q-_ne?p|( z=M)1|9}~oeCu(*qf7`p4tw+>({uG)&0E`&-QI`AQsV-l3Jos%)D7NROk5{MIX91%t zbcj0n4L+OIyY}I?!NL5vt)m%@&Fw9+(^tL&nzj0$(sM%`IS&m^IuIg#F5P-52$L7y zsrBs4;ku4FAJ$7Rtuw^$SMpA%G9@Luc2jERw1m9n#<2;zSy4l5YiA}H zm!?|=YM4BC#{!s9o?Uu6 zzv=a?+iU`uZE^hg#yX=cjHk({M8;cr^;ou2VGlA)Fz?OI328hw;6v7ZJ$;2_ggcZ< zDHF#EQ-_9ef-V%G7zJYa+O8(uoJ=IkMF*nvlI;i&wr>ql08wv`f>WpHFrKUV z8~}_3T(aEnRl?SsW6_o$_|}XGxKT_NA@~{`+zkM05{4#ig!?n_st@O z33gJzt%Q}1BQGZevH*C_%wb6bxJD%nUltWM8#+n}^Pn>$uyvm#FJW;x6f*OGy~JEcFBZeFZH|w489I=P5^9xqI^n zu>Mkn7$${>1rJ^gn*e}x#=J{c&{xw<9@7E$%e{F3upuk~QX;mnoN8vLKW3i++LoH9 z>yE2VHj+R+{(**%3ACwU)T=?Dn)t=#BLjv`OhYYQJ&aPLuMZo&g9L)6Ltpc(As|c8 z_pJijD#>je>NqZwM?7SdK$6*p>1nVhGXP4=bjMX>glpFJhqPkywOFx4mzk4ym3=RZ zpIU530H#p7j5NW`d`y zQ)X(%dro2YHoa>vbdJ8w-l2>-gt48YplppU?dD~=ky*}ZDyTZL(;P0d zhKX`xqt#SV2~(!$B6IRGi{`3}?r{n)A_^Ya9Okhh8H2Q7Cd%8LG1Pk1vytJoZU0=G z);J3)35f7rfy`kTOIXZzLZ+r})E4Q}I1*@Nthfau3KBG(pyXLDL47NbITSz=1yvUh z%ddueL%>_##U$a7v+TXEXjC0c-~arZ&^bJK=W2ZD(d( z^Uu{;E_)W~dY2h?=~!k$p@dU0nbBVwKx&9$XA>s_6QB(%T99y{n}U+EqlUN0=3E+DrDXwN>CJe=3K4D;4IES*zo-B60yit;%CpTl_y_=2q? zgiTiYltAkNP!5;~mXp@gOF#h(DtjZNUs+|{lDd(8&}Fkxc#d-8X5)>~5R5@JQnP$_ z05<%sx7T0{1k7|+G~6cu^xoCwYBdNrBbb)AbPo`Sy2SrFV>ct>e0_Ew(6ZK&0C-dV zulG+>v4$%jJ79L@xE8I#9&!%6=ABLfQvqk$s8Yc^s|L%V?M1K*XShZ!eC%#5EYCO? z+#VSB0KuvKWF#rsSi49z>!sbU9YsMZ0)%>CO`Fvl)}=2adZG|8A?Wf2}fP$xX8VF)qS3)Rme1jp`r zi~(MO0xXyKC^X^6LXXUOu=#j#eEakb-^av9S8S1q;vJ5U4F4=WF5{FYg& z#M~cx(JBjD=?#H>XhVH?;6KC$zl%mm?QwsX%fwBYPODRUw}B*opgma5T-WR(hr&5r z!|pue8(v;HL=ikhYVuq1q`d)k%{($lKJ?Ko z-4d}E{vwCZ!!k5Y%y=X6=4ylp_()s^_V8z$i(eI0xsxc#zUEv)fSO^gQCiePxtSRt6{> zan1%Dx5HaJ9eOq0KFZZR!A6S>wE|h5de8365H2Q_BMYk`+7A=YsT3&AHu(drkftI zkLOy-_VD5-^uI}UI}6s$ufoc(mVCTa@B7sC?oRjJ8QjBN!XDk7fnGICFy9X#d{;lB zO8dLX9p@VWn;2_pHiQ)dHo8DU+4SbRm!rk2gOV>FA|=VBds|l^JxEEC=>o6)8gA_e z;u*i~;n@d%zW{1oDU@KNZP>mE=MiG*P+>&znhE8Vl@~Z%VC7M^IO*lP9Vq%3pf$$S z^(mnCh1JPClU=teq<$2EfUw;uK&@A&qY@geT3cV9h&}rFvfLZm@nEn*1#;{40V8R> zI!U)dwU8_l=y<}Ox~-@(Hu`uZC@J08_PX&Ndo8T zbJ;E0`kAfP9L`!k?A+_qBQ9Sa54o29cy&Q5>tJ4BI?1MS>(<>pVF6mQruH`uK11f+ z(k+$bRUn|%@agIim7BxO&Y;_-xXSAXUh5!5-(=KwkG%LcQu)F6o5B0icA|^`?e<#} z$=6oj3rsAuMzCs+^RrEWn|le_#&`YApCgT6o9D~T$6fq~ryYYc)dFtahxh8zAIk6n2r*>|%!^Qn}Napq4`!@v-!AGTMs12ilu!6d*i=3j(dL8+hU>n`2OG3QzVly?XN$gn zY5G3*@%#79?`-+`MT_}mpZT9x<~c?4YfbZW78o}Q#Hktr!m}Yq+0ZC9yqGQcj4eFM zM)BAfg#}Ukg80#eEl~@S#S7BU7Gy^kgbWt*eRwo%LW#mjr1Ejx`a@8vDKD*V`o z|KWc0$8NlP>D3?J&wd;p{o%v=K~(tZhyNLH^k-ny&!FO;Aj{ZE!`$(-koOnFv$|<`h#)_{W%!~k1V}o{KG5Vy{ z+~U>zXRC#yt3|xk!g-J?|J`|><2|vchg;RHUeg7v-Q%rQ&#xKd){SxC(%-{XOF|*a{2dqHM(Y3rN@Jsy0tD_rlqBe$#H-?{W~UIaj=)pAOb0K_<|uY5;ET=q9I_w{~h{p0}}t#_%90 z6;IZfWi&!!XTT6^+&W%Fb#J-nz{KukktKwRqQ5ON-Owgg#p~siL)Nwf_W~l7?UIwV zkM)&%4Nf~38lU?<^=@#+g<%Ou~w|J~4k0N zpSn9_`oqY|sj=o5|DQRXZ5{O|S97Euo z96C4t`SowdM?Y??$8k613>Ea_n8G_PhD73H20lkslTI)cyVj4V7(=8%I#df&zcd(3 zw7RKcm^N=Etow>&jcRO(OK6f^npRLctl=9gSG#UkvtB0?Bu$MVl@z9+h9|+)K$!&v ztv&2J+eS>pPunXc=`D8BehP}{ZHI!UibY+6ri#-}SI!iRKo(1)skK}?DEcQyU390= z%NCHfBO}eK!q3P`L1i-Q%wChP$)QD2KAp~Y9|Uq>($qF)#-6P<^hFom)7zi4{G}wq%G@%=tpC%`qC= z@I30Wz}CPKzkM2FTv*2yoedHD?e{d#c?A}nd*gMblYy?2#pf#We~{dlF(M(DPWoBK z@`8dxgLv%0qYCHl?W0-@y%8|Tt0fg5|60G|bIGu+qZnWD=?qruJx9GzN1*gmv4!B9 z2??qbT)p3iw<__H20J$y?|&%qZie*I4~~Djb%k(ol&e?m;4S!e?Sr(2o><0KkA`yt zS5(efN@+YFtBF>&4K0gy=e2kRlqMOh&EB0DIG}CuY|s}bwZ3RD$dfZu`YNChV1E`W z*SXgDRr&(fG1U14p=OSJkakP1d?aE$tRc81PQla(F1>w7Z}+-;kYPssAkrW%aPLs>Q@yWxanWH@Wmm- z5&n`l@FDX1;}xHV#ut&PpTBe;{d(aPX>{}A&yn*{I#=3??~#|j9!EZodKncJwYh$C zNAcx3Do*PD(G3eaO?u8!;p@^-upEbQdrDd16N!!ifOA2^^E7InHE(S#91V|kjaHsZ zfD8^Ge5C|6WAm-m?cW+*&2Wv?Eic*qQ_=E$eVN>sF<Ie9VUH8O%_-38BzuZsaE_t_Rpj(pXCBLmtIlFPOZpr>C{!+bUTfGXmloJ8|G9Ng$ zM(u8?XUhHMzLD+BrrpvmPWdZraO?=sed%1 zwIUsy0{3MWOa-W$uR6HK?#n7uIkwF?a*un(zH4^^j_o+Iy2rD9U-td-V_IR6j)$lB znw()9k~^{M0dja3&4)SUrV z4YEXDaE(@TFA+KxWG%Pm8gJ`fDt04ix7G!>=4)>0vF_!{s=*G<7xraU zxZl`uEZFhL+P>U&_X^z`!Ome9_7_gO-!%Oc>~eW+KLdK8(pojdE#-oHx!UU_yz3XY z9jsRc?d(i4ZTtJm;hNil2kspD6mszC+JWlW16BU2C%k$uc+|-!+~#@SQFc&!UDtl# zUUp;KVdqCcn%TKbBs-Y!kKGpvP|x3eq5j-`iO2u;-}k)#ofG44<{iQr-BAl|!L7EL z6%Y7LoB!};-Zgx|$FIQS{_m2HKf&ey-QCxys*DH2s5bFeud=cB(=XZSpL)9fkOH;T zu^v|C{yV?RPWhm`?xLq2GM2*)zw~s!WT#B}&DXK>3Krv?nLGZFeB4iviQD^>56a`~ z=}zt3AWsCRpP=#1Uz848{Wd%JOHXG$^F8r-g6@vdZ#wlWAO76TmrBfh@Esnb+C>fz z4cJAF57@lXF$g+6Odi%;a?zN{&@Up7Qy=ofVxT{AVx<9W#R?>v*QS$iP{Lhm4PLyw zKig<_D8j*LFna&d7#>Mx`<6KZrl(S@5C zYt$W|*^YO6AB~jv5k8%7aG9UYMCQCdo3NFn308^Ki&1t^N(#v63>uHV3YC%!$+TCB z3JrguMP7$xL|*y~;%A)jyLp3G6AIt4V-uXnT4pW_V?kPf_0v{>p^UY%mP+JrD~ zv0uhsGknIWJmquDRwWfBl&a5FptZ)V&A{y%8?tYx=7jykCo#Nh{^wuda;=jE(iOdE zzOWKz8i5<=z)z@v=mv9bi~SlMhdWbaM7waEk9mh_y)g}mOYP_hrdW)`9BHvKj8rYQ zTYQeO1ZLj&q1cBOIN{%_kapz6!gI;vpnf4kw?_sAN zgT~qYN-Gn2U&Sl}MFeuIHm$9iT>=!v#7_0twb6Ji3OI@EUbXN`Pgj3LR_uY6{1A%* zyLGd42`ZxV&h~}I4%h8;HTF*B9=(UDs%l~u-cATdYjZq56&^piJhY~H(Y zOqHui%%$j$P^e8{-t=Cd-*&AgDkmJo1l^Rv5M7{&4##edkU4+a|Z|Gu+V)8LbjLd2c0-{)n^nDL; z&+@~<=LeboWDFUUTq1m&EC-t2CSrr7I_{Yf6~!ouSo`afZS&O^N?d-HQIc?K^hlSYmRmVP^N)l}= z_a-0A#8@n5GK5vP2oO81qn zw{30*B8`AmdCT5ga=YARXQn>kL$RocwpmB?()*83H1?hGEZIFl)-)Lkzb&_oC1}G6 zIBMtU-MUe)83P^wKfW;3M%(5z7$1X^t%j`f|Ain#9oU_r_QnfTgv1xUo1xLqk%g!yrg23wj_B16k|jLkvm(7n zHpMqN<`z+?>fxnLGh7(76PvES;m;Eohl>>)cyA0kK}SiFDDW{7t@nU^r#PD+^PpsM zm&_Wg8L@O^dq7KG8!c?e^;+VYMMvAo08*&3GorXWC1`9)J9DRAshxAku*QV}Tg@cV zYd1r->0}h_Ei9{abpGo7+w`j(ppWSBu&+Mr-uexF!oNj1&-vX{8|v13P@9t?8=$-X zj`b$(ewp*P5V~>Go4yAR*1-x#FRYJ@p&mALIDbD~u>O8_@54uLd%s6KUjM*OefVU? zdH%x4`bQ4mnX=M5e`S4rbfeuie6t26TQWHYcWG$Z+Q*JH<&I<08rszNE+ih}PDu7Q zv>Wy0zHSN)>n|puUKD6=0v;WaRS#?a*lt1>~z8gaw zdw4l`c=e9PD3v z^LX<|b=uRp<1XvNBbz@P`k#J3-@pEGeRGA0Zequ|a3^GVoKBaf#hiZbv?*_umDaR; z(`Dn!A>P_hf78#0{Ttse^47=D&v={;m(9fj9(UH|+1lIw&7Y5X8|<`a+!+_%+6Zrx z)BkLWKcoyP-6Qb0x&h$jU~roeM4f(;bmAqt8^!AMd?ji}@fhs#h zmFG~gk~AeFny^O1k}GL3gSNeara460$)VvS={iPqJx{s;nQoLzH))`o4bd$)bb@5G zl~J^fXS6Lj+CDdWPeZiRQ1sqh`Wz0ui>l({8RJ2Y@yw0!ZiqQN6yw8*Axg&j@$ZN_ zX?996pc<6jzSz*A*pr-Cl4RUzqqwu4aS`OWogCyG7PCr;yTXa1NXFBQ;-fv|W6ANm zI^*IS;**EsQ+b^DbjgIP@o`7r#$}Qda&r^%8xjhK64E^riX;Jqog{|r0B{-=S~#6I%$E43Y|@QAer1^l-%Z-d@w%ADHk44oqQY}5F0!!&MTwofzf%b@;mU1lZCmhYxP*|^wYa!^&`#FIS>73lO>7jJgYC0w%VYV`Q zdn=laLpK@0ODM2ltgv>wQWP6G%QDSWQlMiIVNA{KM(_X#R|RgFPg|Rol*QyCVTSs*H&g z7cyX@kJw2c$%8t0>F5w)9t>a^1*KOFH`t=WX6$>l-*21%@gQxt-VNPBfsHeE4l=#; z24h+K_71P`l2JIMawW?!IqeifOPc5 z&g{X?+^)vLo_B@Xd@169YT!XXeR~KAb3E>I{(}<45&*_0@JwL5GkKp?=h)z+YBx}1 zpj|0akDxgn?5~3pTv5v7vHX6)y>Yu{NrA)F7CbhRpU)n}<{mLbI}%|{8ai|7g?;ZB zs5OQ&;yU?U1amjt1_%F8L0T?t;8nwAbXi|; zt{P%_n<1Q7er4?@MLJRR!r?&JVQp#p%NJlB7IccE5B;RP}5FV$Nv)=1uo!5oSZ?f%U?K1IPX8hbJN zvPtc4o;6YhwcCZ__FRal*DsdurgP)zL-8nWo_4fuQZ##vW_*!8ra{+`p_>aOs6DQ= z6)Mq^$@gs)OR2xND@eJz^Bzf+ub?k?g(n*{3e2t158SEV-5`7@J?m#>vgvG+%S65Y zY2~0p^f|(V9HTlrnTLJ1%Ed9-gp57fcW9f&yJ*I6AtvH|7wQlJXt9X4XQN)`r)-DU z+AYEglyh&2s_ovFBGH|=eF$KaUef!hVXzZb^1M+vBvUZIVFHJ?VE_^f9=(fC2{Xl5 z-EWv<+fJ!sIsip?gpzuPtAaEXl)RO%uQ!edHM*GAI{VeO6vXZBZs>=nq&zm>VgF<_ zx3J^l_{4m!WRaNn-XPEb%U@mfj%_NGX3tVCvO(Y?>v^flVf%q7-jVThE zrqXj?ud4cjee3KyPu!DM_G2KI*s6QDbz6hi>eto{9i~2Li)-jpcc(;Fq}r?75r+y= zJ|RG3w~$I>k~#x`05)oZ0Q6uWeF)GV8Y(BfU4KFixJ}rN0TgFKa#$sUIJgHvTfYT0 z#D;nh?6o>k$$kPCZk0Ikn5b!tkPBPGgM?rj!PD#8;%l#Zu%PIQG;JlssZf}UC~Aof z5vM5Eu<(o8$V2Jb7bH9Bn(0w&bb&tF01GmxircO)I8Q_^VU#be?+hjiM5-usj|p^F z!#x097sb%Rs$v;W7_)>IJB%t&_lu9}q+NMWJNZPwhjC+DF0|0KCp5HmmeP}1`(T!U z@M869%5IyhilbK>2O9U(xOSZHLCAdad|!3rLMI|3vG=g7Pyn_EKDR%y_Q8QdRv!HN z4vp*TY!(JNu(RfQlrT()P0Yao4~YqBd+cgz0PYG$EZ`0!&LBXzT_-v%Byd0@0%C~( zg9E@FyCE{X7*v@nP=<9)4|lhWpVMGn%L?#w#G!9N4v!wXCjY3J#{dTIzz{x^-qU!* z!?g3Nf#nOR^7o##I)aNOI~y(x8eiA@S-E|V05ZU&H_RRC2>=NAqb5{6rfQ3KmH;fZ zOsDoqJEXj|*~4`F;CU)tZ+*|;;P&f024M1p-zfuE^KA;5!C-7}|AiBp;F4xlG20ys z0EvQPV*_|3kV%%nXDi5#6mU+p*C9Ozfl0rIsu&`;Ux)zmdp$gd+u2bs0Kdy>*tnBz zM8}JuZtBzwI~m6?(K)IpNj92`s{xe=g?%oW1%TwcPyy{UE&-zMnl@LhA3#AZ;nZ|^ z+fXuW@DkxwN#olKvqNq{io#1z#hC#qpSGjlyqNAqX}{CZu4u@xb-h^EkDIBgIQc zM&D^Qxy^pSH0g&n#>sCW_VMDiM?y+$7R@b-h} z48-rl7Mx+4mBnsDx zE?ayuM**vhjnL#IeemmtzLbPEay2tzjx-8yt5Edoj@p{PS6HG918 zXRURwy`R0_b-W)r4!?mq{Lkw=f3=o1`rMEQxbvKAwlRi{ZCvoEI|=WVM@QZ@dS^=1 z7rj$T*(o1xvG!!U5cxgmlWB(iAXHV{v1@Bbi4cmJ_WrgFv04-0r^KR(b`C^$l!zF? z6j@;Mz5!4PWnXl-*-^-ce7T=r?^&LC3F>ln1Ze#thzCSNK{eFm*Vm?FZ@&_}i+lL< zYZ%)Wk?`&5DZGF#$t5_x)f(`gKE5sn7^(i_trI5#N%51QGb0}N25k@ zA|r-q`Mi4rwom^a$oQrk3$t>rY>mg8*~pEA-360}19q(N2?T`3_46Bqf!jz8f9Nm8 zAA{r&vYpTmL|tZiKd7xRKt(xBepoEACt@cYfGd)|>&D-(2Hwv+EfwML4cnjpYznoO z!y*sD?8%3zp#*P-KP^GX2794d zDR|rQqQwL!$ylSKhsj44hZjA%XdIp%v%VlOJ36u6(tUFLf{5L(fy)2$aTd?0*(cNb zV4d{6OBpW$XB_*=jJF%wUFGpx?H8dfU+3ubBEt4q^VRicJX>F!MppbNVDKI*(w?P% zzTO+Kp;Y?&^cRa9BUDM$qD*_aiw{fRz!C+aS7etZ8>HkD&(ZetC8DN`hEcy0rlA+4 zJxz}zai4zRVyAZe#RO$DkiuiJuxHlfJYVOqyzt{6?92V|YAh>DxC2geE#j>@HK+ix zWUxh7Jh!{=7e=ITd44by|BUE|ut?k*b(bC%gRzr-&uhw6yuI&gw!cafCUQgY2)jsK z^Oa_2>S|0p^|gYK5>pYy)qD1i0IW`S&}kGw&wZ@YL@vYkRb)Uhd6H;Ij!hTp77d%F z%#Q*(za9RoAoC91PB@l)LK&djPwqQWaBHrmN6OSs^mZ(pJZ)MG1o1dGfsKpZHum)r zd5}nPGQVv(&+2DIM`YciIuXEoV}F}^cZ*Dvb#<{)fPLGK`Ab0ofjlRVU*t0HfP=)6 z8!{4@!`HFP&fNirJW&tmDS+Q69~@dPD(%1u#>9Fu3#kaH6k!@KQ9h(?D!~PXu*a6wH~q@5F(@?6Uz^ecl_KKUGHq=R zpz=yLe4ansShh}sXP!Ae$G>0uP5S|U)KPC3(7s$JN=Ip^10jmP&f&f!Z&182#d#xE zQ`9UX=8bqW=L#L_6RLD4bJ3b9FKUDAL1ZM0O7_#(`TkE1Yp)DgeUVSyO%e82gLCkP zGe=swu(T3g;hUNMwh2NlXSPZ8a}A?e=K4u8L`!YHg#7&dJ^J1f=MfF46tSoIcLAdk z=H6YH_Xvsp(cX7dPo!>>`}a@B5O7UR^UGR%%8OGckh}uKn}|_G6JTZRo#KJKtMN*R z*F~-)iQh*r9SJGDth}C+D7DY&HqAfg*Y=KdB?EXzO@Ug8Mg>k36#{GM~*y!~BBV&G)uK;O-b-1-HULK|zhD znkVOa_5c4I4RhGJnI%bR_==psPKzCtIH;JTu!1%m;S4!q)47} z*~A;U{Fyhoyaj#l6e+FUt|1k-=OMMW9Vl9SSr7lp@+g>^sd_{CZ}xTbFnqYnfqT;3 z@Wq())N>l~BvkTaP9Xiz6II+QQ)iY0Uph?%3(VL*Ju8*$HP0%F9CF zNRZk1W8=o$uWpWaFU+Q5jGKyi+?}<9%;(CCo2wn%T_0YUFAf^FG?u!%y9ZgUY#Fz9 zes%W@xv*FxF=^}L@$gO#vi!np(mv+k;q&^!a_50b$84#`)7qeidyh>zm%n;E>%Mr1 zi81LibD**rle0Q1GwJ^B;2HGk!s<+)4iJ5pfS#A`(dCxOyPD$&yIn8T6^SXDu*~hA z)zKnT5|ZZ(;)P(lwC>G;HQa$?WFf&el#bbLKjO+`?q1q}`=rQ@NxkE>gEz_qwHnP~ z-iZ(Q)>YKb25y&mCsQmxnk4TTl-t@5#9Bu(9hnX*^FDc=9Bj{hfi+^pPW3pxd~VOL zdZ%qf;N^?j;IB73Ih%aYPqMlxmjfh7o7B&qyqZX|r7kd=aDD5Q^YQYrjHcB1Ek>Wb zAHmy$E=*09gFXdUtczdLPT0eEeG3oCsg(K5rxPFh7PEbt7gR8x$!KxEC%p2J9d16G z+u{;?=gJwLyD*zAsa&BQ@-4IGCYcy|4?Gv)Vv0Ctv9A**kAbI{Zs*MYIE#(sON=)8 zZoZh~p;CtRMhtLWaNMzu%8k9+dvM#7Er_vK0iL$#dTI@7s~ft9y!xTqil7fd_A2-a zo3a_$fNJ5ftvy#Bs<-`b9aqH4?}c~qg?dHZemY0y z=-kLxj|FzKOhq7o#be_$+GP~bobgjE!OhOO}>B;?a(JL$}PVNIR=b9ynd2B zWVv;_JYd2-)F*G-a$9aY;C;xo4-)WjN0~2hIyuz0gzw?5reomj>ucW%)ra2<$^++X zL!Z_-KKyR69r&U9`Y9^*;hr5|(8q~TzoznsKU^JymOowlwGBP|=~Eu`=|||ZuI-1v zg0_QJudbh=0ah3UU+_9rn14T?)qbL5@FpA9e^}M(Afr6^t4LVDxTDo!?so9@U2MQq ztkqF5U&yX@Sm0c_)p50B$oGfXz{Me}lg9FpAMRm6E8AA5o!c=Xze2DC!g5NNW4lMj9{div{1*_}5U&Us~p-Lhye>(CJo;N2 z@87O3|HrB)@Z%N5(0|i1|0{UZ_jd%+Yqg*AKZjbn+km+(oqy%7{zM@EUG8dg;KaS| zUufE_Vf#H&IgwqvqiJ`xw9naKtR0v9#i41b;YrbN%l|^tp1m&}DL8a+8*V8rVgEwY zU43i?I5hoqbEMF9^-l!y$IKhthMVqN`Q^RUztD8|24-t>tU&h>SK#R%Y5Waa=oJ=^ zgmE>bpwgI0P9tQ~RA#55j0!=XfPlzttPpdC{YjWa68uS+4BR;TC|!TKIe?p;Enc%- z6($@K*Rsj5b+DRi8YU8st7uv03 zU13On}0rfudMavnU1nAm+ zze4qECjce<3+`!#THu=VYoU2dMFlJ+Ct*UIJI#3))qgUyi`#IsNGtch$|#reD{U^a z-cUOL0p+tz*LmvWXf#nS)6Y{&yc@`MIb>wwT#Kx(mHQPm-*Rr13@o_5@yr&)B}|mW zbLj;efl>XqK{giC_Fk{w@40qf;AU15tkT_CmQWcR&A9g=>v;2++hsDp$eUCUb`62Q zM)(UdZ@m+4)hZ}I6l~He3>|P*mf7e(jH1&h)hnbdY&z5C$3k<8q{g%|OuTyIXl}cj zkW2jH-$1@Rm^amIX&AG7=Kd*n15dt30gRoEAtk1xWON;KQa<7!5D|d$&m8wmLNr5i z)$;tau{FuKC?;Y;L4NHll}!iR*RNptB(iD7OjY!btwoUg@2`z)OvfU)NAZT27_$t>6vdb@TY?qG31lOtkT*mKO_~H74e(EN)p5i;Krsm*XEi z-$hU3w9Lm`4G)xO2{GF8pOoHl1L-4 z{mN{j1bQAxC>hoSPCJWJd}mr4 zki@OUHNVIr+_9eyWwpJLDwE93HQ|nRDw;vIwZ6qnlB+C_NuR_qc(bNI*+0BF(;Go0 zWNDj_Ls`*5M>$F(Tc*?*iz};? zjSM$f5AL((q?!Bm-XIt1?}hmz7hh2N@Oe}IB$Kfo+R6N))(0(hqLnSSDjC5t+%;Q& z6)-Z(#n{}0B2%Ghs~IOq>FMCohd~T2Ru$;cJVHH*M!?a`Lrkn2LeTMbcL#uAchyX)1k(o;B7fvxidBZiurpwq*4*&pL4K)F3HhUqg z*l>^r6@VL_sQ;^Xu9BHtk*b5@-Z?$+mBw~i$OV8v1y!Vc2L@@K8&klMQ|fp^Pc>vz z9&|~#g?Y-ewv+lCY*=AbR;mLRpO#ffc=Mc@KJJQT7~kwZtdb3!Rr{sCSM)}-l;IH- z^bA0?15Py3#v>~$QeozPT2667?de}XOY>sJfRfe|5-P~WH0Q_QjU~NFrFYhMbKph- zO|t}Ul3nSZE;3odbp3(BMlC}PBn{nygYBdoQe9pI5o?O9vrY)!ne-wI@)oTwV|3mO z^T#3)bS**sY%zVry%BK@hR`6|2NtcwLU3CF?xjd5$qix|N)O4za6$#L>V`h%v*=kn zL)w}TJbPsJlGWy;)`MUXW_fAHYfy4tIxJlhN9~DF4w!`#I19;^D`fvD@$HgFbm#5kCD86_MgL zEgWp&u|UsjokIJX3H_lL{j{=U@A2x(s!tD-D~->ATYia7T3!w(NMlF2@4O`eI@0Cn z82RVK0T3=m3l(iXaEzWL1%ZIAd0`oqZ%IYb1{#eJLS%V4LF_c{C`QX_tCm&q%O9dy zUd=suXk54%WGm8N@<^MLC2cJ&)qJoxVJIa+%*ne3o4%Vd@Vv-pfj%-y$X+O;n>nJU zsaE=0_33gu^T_8Fy!Z5~PrF?he7I{Nq^41K%6fp24G1zC{?>MHgvV2h*V<4D38lw= z&=C!>k$Z1>>fi7Da!tJ1Xu!QeBSGLQ z{JBI*^4X8^b?-_Tz3bSE{c_72;%P1Ww(zWjeSqfnOUcBnr|)i+F*gre&(McpECk@w ztv>O!hVy99R)h~lU4RGqMaQZ;-r0wQOAeL_IF@mH40w-X-q}~}XfyfYxP9@aijZ{! zOuUBP1|}r$oION;eFGa|pA8uXNESk=LNK86SUi1h0l1+UobTxy6YEBXi1S`y!(-uJ z8Aga6=0~beYMyHl_5djlJbkzX&dR6?9`jT3kVWtreAsqY;v=Blu=a;M`h^V8dMfb? zqwlC9#l~PXpsbfFPAq0~2=&Uet&SwpMG-8N(n7!p z9&tn%Jle$RA?1liY9!N0Wze7 z?mfQi#v`x}wMBp(Tl<-Py~v!c@$A!Nd<;}w9vI22;Xjv-ZkFUcfrnFa0i&TpSx}Rj zzC{0UymvBSIDtvx8nGh}*^XCWwhYw2NJeu#@JWT_)3)TCC$Y&kuF;10T1Gw;5OA!1 zz%LAlJdGHF0sR=ZmPwQG8vydnm{{r?PVsuFfvK!d%(>83)e!)PG9O=Xf^Im_rJi76 zmc$NmoBU=9X+Q1wF^F-E_*0*4@B^(KjOg=iyxL5L_1Px_(xhA%cp!1Q@F}PRMU_MB zivN;?*2~z}iy#^eESCm*Vd0#q>9HS4gI3NmTsM+a@Zpvls_-+=G&Hz|@TNWi&w{Np zM+Jj>qk;RTEAcb6OVfL{_P|dW5>?ac^anx{;?B1b4)*O-!`pmm#mpl#K1dnQK%Tx3!C@~so8K1oY$ha4-b!| zdI8RYHQx*4W(!`I{zkx0W9W53qzH_pmo3SzqE%0|*9h9ThBb)2z~tb6_33ovtX9u1 zyAc%Io;$?(qTPnik>`~GmyZl0`Pm4=3PvEZC*xb;y*Y7@HZ#+%JFl_pxow4zY^K-o z&tEUSQl9($`q1X}9jtHeXuR5qZC>KX*Uw1u_wKN7ROVwwlsZ1b90 zBm~mpPO~af&MVTGE7N5wUzk^B23BV0ROWP4<}Fp`pI0K8tBPc+O3bUu0;?)=s;WAw zYL=?%&Z|(&)eW-Mc{$)i6i_Fly5lc2{r0>X&0NzfThnh|GZ>UeXx^yd2fFElNOBCixt{%^9ukDoJqJ;+H1GyB@Z->QXM^zf`kNLFA}o#Ka*YxejZ)a4#)M^#gg3;){==KCzo`f|;N7R_8iO{=n`Ek)HK;e0y9 zu)7wVGHX1Kb5ipWH zo(_8$nG~3ul!@Fv9PrJDC#;$*5ycmEKt(l6!n`Gcv)$v@h!yPs_ERF&v=q*G0C;Zu zaBkOVXBV)uedV0U3PZSgNOYPHtFC@0SL;l>j<~5t*fdG{i_TFd{!| z1DQ0^prdzE!F@7$eGfDnRy&{w7!hWzPX!8UBaU?8B&&xJI&=1H4)M1Vbuv=|^k95r z#F2ha3H+@=dTYeA2SoWoKs9)>6q+!}R{FfU&na)v#jOv&rdGvTNGe=dS^04LVO7H~eRJY8u%GLQDb?7Wej z@h8u~41ISA51`#RGQKiCDctU{?oG81Bx1h5~~-*@*$34)A}n z4YIc*mV!i$fo+sX-ZEDB^t-r5YZg9vW9k9Jhz_>A?WBy+lH#RA$gYE3dBi zmQy4lQ^G70nz???+ilu%bux#-sfiCe64HX@>UOLdQZ%PFHYEnOCDsijDaxPS_|epB zC`S5vMzDK^wR%eDrkoner%oNrhdWK7?3$%bDsXebpdPk7ARm{;a0y-F(WX+uE>v{8sO`;P-xQQuAQZqOO=>8E=LCd_;>jjrj z4diSkokwZ``jI0%O5G4_)7lHepb;9-SZJUN0t4)jaHm zMvE-yC@)~G7Z9&W>-l*tz>|xk8hQ&&ge2>%t6N;Zx10wp2et}s@Y^HVR(fma?@|F6F>u)3*}@}Pki*>Db1K~P z02im~V%r8D2zYMt;o&p}JM~G&;2{DgLAcib?C#3W>(7&WGj%FSD7p_68EvH1w`rlv zp#q$uw!|Ufw_6ec2o$CB8gUlH0MjYmJ_;;`zAKfxm1S*kh>-=>ex|;+K0f|gPhX3Q zt!C~YHqC4hHlE__tA%b z9_(0W(~+195m7Vc${z#%;)>dVKT{j#`+5rUgn|yIqzl*DR*}P_S(~Q$U$0%)r@w8U zXn&&H6kCGgH@gw?K@1Mj#;LA?20A>?Yy>$n>2VigVw2P6q1*me?Uuk=tDoz(kwolp z{EeO1y)|!AGTQ+xKN%^Qd@CGCcLK7TU8?gn3_B#0%HPh;-XTJqg49(K6p*FITbG|D%@4BQk`La$Wk|D|RA4`@2z_~+N<55FFv zBZxt~mWvph{&>1uk5?A=?S}tL%aruoTKRbBI8}H*mG|M&k@H-o(VNGgmX2K)>mA;g zVIMA^xUaN7{q-MOCWy~!<;?rb$je)fpI3hS>`a&HmRo%~f4cXv*>n5zr;BHp^?_8r zN1rbPj{c=(2A%DHeP8})^*ZG8_wldo)m3a59wDt4jDSJ{_a7bZL#!eRi!Vlz^yUjM zC32xA%)hiuxg(4+See#ag<4Z$Uxn7d(EC2Uh421-M!Of@513t>#0LDI%z3M_2OaIJ zLJ<44aJ<$~HJUYs3kFL=%X#Q?0cv!^S74S zbf_bLIOn5#@8alC7e+|u3s<6$Jc46YXpDUIr>li$>M8;Z8sWZ{;WP{JQDKkaRV0WjBY)f1PfSN`6K*f~+by+J zxKO1iOT|;i+?V<-8eGe~VV`+Nqr|?N%Q@h2o2^>p<2=rUBSuEnLGqkDgcWH)7zzFP z&MF)hxEpy8Z!096oypAclJ{sLfxPln*^)~QYLXfE`}bSHZ*RuxG@Kg|yh~2CCm;FT zbDk}HBSEfD2C)0p%PMhnA~Kn(wa@A0D;cBUj+i_e0sPHMSw(p#u?N{2yP@nuOzRaX zDEXb>rNXL%3B&?dO!+7o+pby^#1i@78vn|7(OLiQ$(u*N0TdGkGU#^1*r+tU!(#^X8d2I!oofGk3=VX*fCj#ljyWN zTYgM0VVeJv=rZ4U{zWRhH{1Uv75!^Nf8}k@KYf{ObX~1umHpzpX0wW=TMbwe$Jh;6 z;3T>$$GsICsqkFvv~%A$NSuvfPzZCowmD1=U5%nc2H|4sYgU-)ZCq@9)Skm`DM@Ki z!J_NNSlD942hM zRa=26Htz7f>3w>BdKY$Nq=7Kj>mV8C?!TY-^Sbu^*lq zl!l+TfA83vTo6KG2EXT_Y;Ig=^Lt(@_mNAqF^x*Us4aCC?q$r0lc|92gQ&4i2dUgr zmuZ6%uH@W(h2FrUa@a;oz0&hH8%sZjxr0vXy7kMd@sg}v4Gh2F8Gn>Czqc4)b)Q?r1=t=P-t2tJ0>=tD;Yvr!E$2 zVc!Athd9OCi4H2!ha(+o!r->4%14pdwFMH@4j$HW70u`JUkY;c9x&P|E)~w*rypf4 z7Vc{*<$U>DZA#@y`qWAeDP6i;WEz@K&FY_>pPxv*+>u#i7Y*+hep!h3Am2TJR6jLfYX+g|iz1iQ@+#Nv{!m!; zL^4(9>w5{; zK5zB9U;kx7(gQDnPaklD7^m_J71l%gXfWY4s9>1+RN{aT&438yG#h z2pHd|`uHk_yC$vtZV@lVZf|x9^*+gCUgPLQ_nOF?(sv@{Bg6^ytY$*fY9^|lL10V0 z_b-kl$YpRrWrG(|P#A&nU2U5At09_`Mnkt64>vE^_Xb{6L|AZ}xcUVn!CW*mx1kn< zg&)RjuVsEC`)7j0RjhNt%c_t)GfxirvCJ%bN6AR_2B~rT6ix? zO^Dp;@plWcN~&nKJ6HDj2f~N|_56~q6Tal;2WhA*Nq*L9NTSNz9sxieGvtXZ! z(HP*DD+F8k1;F{kTx>$TQNYsKQbzh|s<4ExLR8;R1zW)sy$Vxj!d+v0Q9Z~eumt>@ zNpB{B8>qZ*(!`8xfFW_B<669}O~6vvRstc&(yT5=*7M##&#GidK)Xs?UBmX0TWX6j zq2hrd$JF!N5i*oyZM4JGUX>`#^*is$wi-LW&Nq8SZP;?bdKXEkrAnPogw;Qn!fH?j znjXdCA0#tn4!-h5=FO~J=DAkH6PNN~iwOd+RB4~^ma42*cDULuKy-FpKICjFVZn_B z>4_X4MQoXqu~MY?cU0;*7&|N2G+LT`4QIom76Q|*Oq)tf4kaMSw*>K#W;8rNopnY0 ztcqq=Dp5I~0t<;itJXQTe(@}ah}XpXF#6sP_b0d9x3J-ncbcAmcpJajA;EH%!0%(? zYb);gUHgn?*&4_%GOb_qBa`5*ydZu+0>t5$3@N{X*v)u!pogWN3?9#t)WplCQSDTj zq*khI(3yX+ftcKr2V%B*4%*cBCupQNq!aYAy0G=!iu7r|$CJk#6l#XE@fW@oEunf$k!_wXA14y~iFKL36HR#UL$JGAAa{6=2^uldh=q*`92oR{_(Ci| zQ$4sBE{}DHBSw}p=`u2xpXR{Qa8F7vANxczH6%y~;lP}^7ZguyHE9b zdYJ9kILigI2a!~`U$5<+k&$4CaKJ$qi5CVY-+-Bmr;#Rxhs)?$oOyH39H_Y4LYB*^ z#`VEBX>Gz(!M-=aTpx7cB~+v`;1@_Lb;1CX8RdaEGYus%DHOgFqmMin*>^`83 zhoFQQ@Sq6)$VB@3%L8DzVfcY@qZ%FLqcfK{Rc@oL?reza%#&-!fDJCa+t>hM16wQb z1F#g7{G}uN`79m_7ZAE;_i|r5>VY}J8W?jx#BdY5jH2Aa5HYU#{*1FeXnWbi6(7h+ zsU9ygu%X+k#Ko%id^D;4H3@uF3B)4a(RdIvMQ`OSoW-bXq zUbepOof}*!QHswV!%`P5o>#&u>Pcgc{I*N2ddv*p$LIX2$f;qRzWFjaBGsvfbE^{RIsKYGCkNP3C`4GubY5uHk7SiC65B6;1QrQ!7jbtK3ELF$ zpBFKe6bZ`~^F;u+y@me=QlZgNqP0|_b6x@;EndXSmoP6ii2#1L{)<%PuwBE*b9K!TT*de zyT)9%AzSzVM=FlY-<$-#Im>x-{{JNvK`7E(6!~8g-9JgiABpatq=Kb^ORj<2;vb}9 zxk2=z0s2o;A(PuE+u10;+<5OlNrgqzgPyy7pMNq4MZfjs?Yw&Vw=tXNd zOIxH|Ta-mxOi)`~Q0vo|phFCaGo~$#r9EA){l(v;BDX!KvpsLQJ^!K|$uj>nt3?wY*lnqujmk?Wqb=w1lwUd-)Y>g--w?*4qyy@q9ZyCL`Xi^bcmptn1@Z@+cE z-CKV9^WrUr<=uhYyCaKtCqeJda^Ic*O)4(l;jy9#a6fjIXp&$wPPQfQLQ}4wftP47 zYY(-253OYneQ*zBUJr9u59>+~`(+P=wU1 zY7+tnfQD;vBo;N zAwT)Wa&jwpawl){Ti4{?%H+?>Net`z1NrwymhVr3-=F2ZKks^P&RTJK`5x>F0@#3l z;Z0F&y(f7+MczF{`DqGxH3epyroKBJ9x+ABrtR-D{Tz`(S^t>Xo%)}nM<6^EJRQQY z|JAbxZq)~Z{Ys(U@NaiB{`u^|<6QdRSAE#oH2&I{`}6Ga)Ir8~GY{GT5gauUP-~&_aJOI<9jh||BRgrcUi@k zbS? zeM8+bQfI4jJ3pA7sMP5YE-)o8c|el|EBT5TQZSi3d{3j;fEW@-xOtPsrIad?BuDTT zX)`-J5ppTmFQWUYFXuj@UmqRseyc{wV$b-TM9A%MUX!GNJ!<`N1ow8^`L=N<=2u)eK;bu3u^$EnGuM02 z+WpzhP>zE*UqZxsoG)P(yiREHF*79A>2Q(+feOY!UD75ZYE6GJ5Ie;8XrXdI6?V{9Kd1m74i?*5eF6py@lNqCsxsF*66io`5n~{yMXQ z%rj-_h(RgM&UJF^Bei}macE3lC1+vgu*#P>v=^t#%Nylt1M{5IF#aUlZ&kwsqRc-M z4#|q>H8Wtha`uz72{su&INhW#+E4b9f6n|-ZRq_Au@wLD;z04+L$c@lDS?zFkoG`is@Y>Vr`F# zAyk!Wsv5X<&>_pv#?HXnx?TouHv^hnspS}`{?e+ZP-3D|=h^6N{)3}@DINx$5RJJa z=Hm)AIshC&UR}t0T#0K3847e5mUWC)g0EtXEtYia0)4B^=_pLfH5M9j=7o)#1q)E; zaAk24Bs;aHB~FLY&Ofg67jZS8ui|TAH)PB?z`a9=W!8H%qwMHnrFM|XDB(;L0)ns? zST~geOTEdSaML>-HR1dH63PFNXm<&s;O5j&SDx(ZGD4kY7e&m1g%Qj7gGI!#X(?>8 zaU!M?W6PgMUo^gUm~jLo=&N7iv8QQ9#Y!lW$Ye_iBQU+#e(H~!ds4z zxi+^`qEAm5XIh_V>E!6{Ic`A#d^=2lehjUnl#b@+T8ap}|se%kOF=c+q(&mI!D zF%tpsvy3F+U?Bnx+OB;lI^$3aS7vd-1NeEJ_D};G((pCb$#ZvlnLld@uc3%o(BbA@ zJ09t?`YU0=$}EOOL~<$nMBLDIj$-RrUN0Dt%-mzN(UOr&|IE?{`gklGT_ho8{u<{` z;O|NQpN`LyFOd|*v@BL?aSW(+M?W8|CB zS^DcMOq<-n7}I|8e$FJCqXgW&vh8I_OuJgyz2Z}G|GVdEXE znbBUkhBZ7HxYGj{?|lWYa6;x1Mb;f_D3wVA00+d>>Figfr)D#nksz69c;=ni(T}!?1`j8`I^r1Hw)S5 zb~Cv(Sqifb2H5H4A2??(Z@{E)4c<;EcwaPkU&=J*j%8Gom(xvFy7N_WKS@$<0InS* zy)zZ>QE);q0y(bwpxA(G2kB;i@0DyF<~ZO<$i=mTPPZ4P-?G1pkqSaC`?9;o-IiCSQjCVMS3A+77+^TDbHnT6U>#lD_NTp$lv zy@@hls=5bI%IJ6;$u=(buDhd(-U0;Gs6t`c10zt9XBo z{nl}V18BluV1~QA#(97Uw&4iNXuFSGV9{28j>T|}VKNK6Fp;mEFJMF}F#0_(h1HZ? z=P8KF%dG1G0UaD)0!#smK~(X%-#szn*n!8E+AKZ5cl8U~Ee%sW@p)tdwuuEhM*s~Q zNJDOztfqk|V}w@GAQ=??WjK(ZhwC=CNmaCdSUA4+nKvRWB8$%?1V7^Q6PJ%t5W9W; zynRAGG*cjHFa{iF7h!2R47#QUu|1C2n*tDE3-P#Nc2Fojt{a}WX2U-(X57M5)J&8G zAtRWM(x`~fDvjDG_06FLMVYcZ_`a zSqr#r5s$%B{H9F#@00mL5_PeriNFSL+zIPr(kEP}bZs;U6MlXo8*|*r7QsGAFO2AA z9ATDi0D;~uUIuz*Oe_g0FBkPaD(RFBn1uxL1&Ys;9VLWagYQ^dKrtW^G|{1Hs=zTw z2#Uw)w}XFW>p=aR zJ}MGG3Z{4&4`fHD^kH;%V^!C_KS20$2=A_yw^Nq30(L7urCBRJ;hj+Xad|4m9Ee4tbgJw-YC&JJpxH z+VJAfyWuob;qHlT5zV{dY77O{lK`?RUv9use})4UW`zVwUo-YH6^7%L2MDu(VU=i*>87&1Kyy1{A+QPa4ksg+JCFX}Z-{w=Q3L{md)_nEsiLA6$oV+}#2NiR z!N6ZrVntlw6iLv015=u<*C7~qyOp2pj6V9QFrcG6RUloiDtgBDbz55|86kayEC^9m zD1`OO2NIXfOOcX7k_(Y|cLfFMHga^)Jc2HI!b?T{dQRbGImGw`R!Ay#22V5o^z}pF z8&cl5(QMT!ggC0CvxSrvDSOFAe=mv}O2?nDhXE4YAh#BC-|TqAA5DJWws5rU z=B{iK`IHb+;ua-r8_P1DHgY&d2EWCQD0!2<0)iCteunz&0Q*b3qL*aHkqKsA zo}$Z*8;ix^EF!q8vMB=IB8L)zV?ZJDQ9K?fl8bDH3vbq}m_Z_Ccg*>?fi$wHjUT~3rUlw06SqJ1a|}7 zjU%DXYbCB#Z>zWQM2sW$6Y({XRLd^JEaCWk5Hj{iU>LH5FB~9*u4cg_#-}8V)2NLf zWHvhQw^=8aZNNWW2&c0p-h;Uq)D+%mFVC97Bd;C53(nA+YUmDyA=9s$BJu3I9V+4|3`2K`iD<@Tz z^Lm!5$zq^mTj!;8{D=exCBLr&fbXVxnIRk2VNWE-*|#{YHRx9#(~nx3VzxtPFP#S) zY8SK$(msy+SF88$0H>42D{DrpSJ-Ql^!A_?E-VyOAH%bt6ED6P)lnJ%+vvxvz1;M= zx4VjVYbNE~^paZR6+UF2J_~V(0AjwiU@i3~Sf<|8Pcj~lf|8S&zPd8}ctc&wPTM^V z5%IY3X*z3vnlogkLTZNlAro)%jHrk`?ou;jdgd1P;p`6GtT-DJEvd2mr`dZ~voN+f zrMq(~59c2Ib)c!yJ*V|)PUmV4&Ni=qcU~oV_D(GWhz6w9G;jH7-s)=Jnr*@M?t=Zp zg~uTae-1Re7u-HAcw8-bv3+9MG@u^5)|S9iy~BEPu7 zSwFdrziVtsogN1Yn%OK9xpyb>O($M^PZVWLys4WgnVu*;nNeDw?bGYbOe3ux9wL5W2nvqj8v0TiJcrhdMp$-1m9v8-dZtmm@~6f}+MmrZ7t&CZw2SyvuNtXP??JoH(y z&0Kj@zhXbL;&{H|%)0tSV%3#(#n9YXbcEQme${IRAONjmS=am|)&k7df_&CO2$^eP z^=lC`YfG}WKn}K89{3fwEWVZQ#_GX;UY@eTRgV^x@cW*}4Prk1|`DcF$5PtsOy%~ByK{M-r z?9KR31$B(gmd>`=2=8Rx{A8p0 zYj}zRDS%X=9{-c@lqy%)g+(STg7nG12v61g7RJ`6I{zXa*2*BvO`CN6ZQZUqZqfWt zCHe?}_2d4S`_FZ|hkL2qNB70laUW`_j%Xr~04f>AzuI$?N3O5CqU9so?u@ua)`yr~YiW`o&-csUIBxmu#c zmb4BufKxCIC3-k^3HOL7$5O$`@bvPS5W&NRQd}AC2WKFdKcy2rJgXqOa2OIdAH)*H zyBrUI*GFV_PjAEsY{wVoViwA5P!fWqRtV7tbon4D!r9G6Nr7Yc5kqf->dYW!^`*=N zuV3q{S!1x(9E|CRfhm}hl_;9@j^BZm)ahwCR(Si=pG5jJ8JNP8*mWW@wIF=oi}It4 zI9|y#VnMaB-j=6&z7v8Si# zMl>(e9#OD!&y@cS`4UIHU!z3~E#x3^_49`C2EI8wAvyjdDc4cI;4F9kCEwkU(X{81 zm=w!&H(zt-DzfoTbPIpg#s`xN6s|udjfd=adj&}`#PzU@))x0$bd?e6E9|w6lJ2FN z6JiUtU3pzO_5AxoI?rgI%5FDsr@tn?Ca58@?e*oBl*y{TyHq=i@&1H;RmQNa4HIYi zW7xph&Btzb@0e>G4+xtFvmB=I6wQm%#%5|SAb@l@;yR~?a&TJfv&DFQ^`k`|izw}d z#AjR2hf{yF9x+i!$#%oRQLMEc z2_PNr;CX-gv*-5q=`K#;`q|!)zSG(M*zfRk>e=?$!Cdh5b09o*IzL(~1hz{XRomw$ z_;#Sivp4EQIQv67e0s1AAb%;dK_rR|5Jo(fR5cq2PaB9u@xG{tY$6`TMsg#(A3e|+ zyWP=<06JsrlsTjdicJ?%fzFs}4mq%Hr>g+LB49OSnt{t=%3j4x9Cm*J_nHQn5BXHvEwrIW4y|06u@c`aPk+54ccBK)V+LOwPI!cu0N#1e}t#X zgQ*jHuSNb5o*n~bNcbDg;D3atYHzfYx`tkh{u!PYdbVFfg}+hTYofCX8s_$?cxw)S zYta5XJl!v`ReNhR+BNd2X8*Tz*ko0Cv}OVbPkY~*0pRueF zBg#V*R2MF!9k%eQmjlw_1-dVXt>~!o2piQ!#>vArk-qY%XFZFoCx`7AAUq9LUE*Lm z>QGew@LM{}Eq2tY8TBE)P<5Hl_^9i*bhxT#S>VM{w>hFBxm|VTw{+N6y&`qAXGQqS z(bZn(sNd4zRnf_#zO5H@nFl?q5+_IffEF{G`GUv_=65pZ}N}xlkxZM;Xs~Z9G~gGou9Wv zuczbw=v==+vwnSV{tr6LAD!!yTWpf9CX2x3<3+2Mm-xfw$?m_=VUF-S)cRHBi|sK~ zo^pSROAZ90dVtHvlDL}y9VYeXNzRsLkpAcG3&6-*lK8uFYZPH;xMD*cjYT ztC621`X76Z?XK*%l4#VgFE7J*=k!+LK^ku1q%AFUlBD^)-n-?y0T^_)w5i0g#oDa+ z6ibLj(2L$hBo)D(9?W9-KauMA`YQ4CNXmw+MN7`>;E`hOWI;$Wo|fFmi-eX1 zq{QGfX{PI-U7}T7>Wxngg6BGgy^>dsZIV3%+}8^f(dPvQ{7~@~N(8G4g zBh`awTCTpEA;RGvR9)`jb+K*!r=%4jB2EpL`rOZJ8=I*$bULGj@3Go3B;D70bVc4` zYgK~-V!gv2!XfOT_L?&D};{OA3C_HIqh z={y7FH&X=w9Y*W5mxFwO+r^(;3+x0+yoLzMN5Su+ZjGCD7@=gnpM$)C1X4)A?L)_MbW=x&SGVlBqY?dSUPtR25Ouz#%9FHt;{9R*r4kvgUhpv{Uh> z<#0v+hY*3iagEvvf3t(k;p{t7jNs7#U_e6_qw9_6+5kGt3$mMyC7D-#rI?v2m3uiR z@)pvnF76^*&(XqglZc~=Qb}+0SpumOH(@QXP4<(^M~KctwXqRX9*OJ*NAG);ZnxzH zjv{3roFti{OW3U#2@%=_Bm~>b2so#K!cW1K7>m|-Tt55#X;)P}7A2ia!uRt*e5GH) zBd-2hku;0Bbc$~o1Yze2eqO#5_~`*Jgc{z3z2;^p8|jbUXabFphhtbM+=fGC8U-T? z=*_mi6tb}|rcO_pNi$M5Mc?%sAj!%*#5Qye-XFI_dT(oLYvm`f9b$voc5p~Bj4S{C zoig!_K}xp;sYY^b6swu*n$HlgCNL7oc93IS-dmVzD+4Ru3 z3cj&rc*88)|4!X91wMP%XTCaZ;K*2%EB{&QxaWE`NsH*X^35>t zT$Q%wabwqg2Rv838QmV?%54W` zi@OI=#aVwIeP@s;$D&_wa__@E7yN@uM^zYqT8FSC#40Z9F)m_;@0?QInX!h}ho_u6{me zsYWOQP23s)!t59vO?THk z*in`qz0@ken}_Ap^WBD>{%621+C*d_ja1!idYZWx^{qT|n8ea4KTtAQ@#js5n~h#V#tRpWcLv(b{5LkfiyI-?*md=NdRp zZ55#xSj|DHcQ!n*3cS=yy6n-UX8MNF{yPnQ7AxjO7GMI69SuM_+Ocx={*WgXcsf@O zvSpgPq{&3!{EkJ_o!{)RND=|u;Dk9B1K+KsBP0E>eGw!BA)bfNd?H|>L9$(rK)06t zZT)p)!HTnIrywb6r-iz@Jnzy2Y!i4U;q-J7M!Ah*R4(%BEH>AkrMdwYd&@dJ&#?^; z$)QvnlVN;z&k^4qN6UYq8$y_}O)F!^{E9B0KdoqtmTE2T~oifcB^2oq&61?D>FwW5rp}a)UNgYmTvV{#rQX+@98eAgVPxY z^Otvf&gQ9GC7aA&uzuo6M=A50oyvsTF_oaY1<@@heaOLUH=iW;moI@kcvNfox7y6*xLh==UcI+gDv`JKzZ0_ubVx?atbU(3L^ z3yZz_)I*eG67i(+s>WLuu~#LSfhC9qR>7Sx9ZDs(oqeT`IJ^hS_{p}d;V*xYNt~?p`KE}6yT^jv4di|y0UAg({v(sof zL$f<%f4-M|u;PJ!?zm0&pWaI-WJ75okbi!?|C?|AfBRPoAZHNk9}k6h{1k=n{(LCt zh^Qt1iQ*HXnrsTEzZew0f1O#><kXaOtZxp>lYt^XbF~ zJB&X$@K9L8S&KgB<2urq`kJni7k2s(cqqIX%U5wtfci+hu1#Dcl$gL zt4+Gz+h)X;$DuvH{?$&SkP)EtrJx-GyubI>&@bIix%8AzkNkMUQv7vRvhcMh6}awl zAM+y21DvUSN*-I%)=g1|m}o-i;URJwKYi;(JK^l2#^9hcex{ts%y`N)2S@NVa8)|Y zb{?Ga=Zv6PGR>b|Yl(WM{`?@Gd&8>&!AD3YnX&Qh9lj`*B);r>XDwiN$a8ixcvNs9 z-zDzF1`6C{YG*6mPThBuuf}U=kfS6O-f$y~*39*QMx1~v%k!fYt=0{&DTk&L69S~O z9hhg5QbpR1_TZG_A#o|f;rys@VH$J1Kb+h^OOu0U%)D9Bzbx)DCAUOqAMxvTtJD~+ z(A#lQ&E7(^!36e{i5D0pA|X2 zxrlLECvgdpZ?6f8A2MG^Uc0%B417>*o$E{!_ApT7lzoeOA|%;ZfA$iJBDixz>AU*t z+~XmKy^*((zT-zph9Q?9H6t#t2NZx=S3;<}B$HbFu&-PEXf{3*KnF-_m1N=m#?8!q zM-PJY(6kt#=km`Ej&|rZg;rDHNYY)?2e4EN3+E=Tob;GMVPTyzMP3bd->A=HaGMRu zHtmMh$}NFzrYsGE`Mp8Sr*G(8Cpd!}?_Cq9XhfnlouzqU*Zf%cQt0wOflpvYWwVMZF8SJB@YNIWX!Z@Q(&}N?}AKU!yU(N6ny!?c$I~5 zhUKShj6H^o`grx#m$xJ9nRdD;6^tQ``mU>kDiBuaZ@&P-ivF~F4?KaeLKwC}b+nfE za!`%Pdg&iw1%)@Y3g(~RaX3gG=w&AVPol*?j&SOS41euqs>B(eH^$O@_!FG@@6jTh zL2j@}^UqhK^=~Snxyl)T^fI+B+CAy|cy&-qXuQ{7;nQE8j@ILL6oEV|xW9li1N*`S z$r;}?|1n23*x@OOxoo*?Rb9T^9h&2P92}kX*IwqmxXj=#mAOBhj=&t%VO&cj=V$Am zPDetbYOP1EMpN8XDE#m{5 z1=ZQE{yaSNeR*Lw{R`xj%ZD{%fC;W&`SLwPyo$wG`)l&d308 zCOfeRQgVZtbUu{#b{h?ec(LX0e(-BRpw zok!l2tR92KK$W&6OYH2vZ9&5Bp}#WSF8lg&l6lY-ZbfDJ&h$8qZ#FAW?mYRC&sXNOWY z%YSEp$sSr3y3IVN(-6;aC+=rjID`)8=Nz~9Gc6T-->jMoN*jHnCA}L&k8AMM%pxK zE;^JUBFkR6xB9_tXemEy1d?f!9!`7rZJ?{>& zytbYWdEpBdV>P%D3C%H?$Y?{m9v71sA71W*INy|l7M(gTz*sjt=(;(^@}=5EPx%)D zx#K*8$Qg!?ujO8xr56CA#k{b?fUp-a*PZC}_Cv{T&dCl;WMd^2n;;A^jTDp^qQP5p zun5rv49zBciQ{jpVXzxM-w_ zWs#2!fmKS;3&k}ob7P1tA~-If4xwZO-CJ@wnt`rA0c{@|1THa`9kEM;Xu4eS<&uV; zVuF#rya;YgHydM_zvMX@?BCi9QW2qN)+CGO3RpD18UmRDNNaY|9Cz zD1BnuyBFNno>OdV@4Cn{$E!eY-}od{6U%gL4r1tNAmLENa#m*hwkt|QjZr~pFLvmY z0Dm$LXHHJ8VuIM)qdsrLi6tH+ow7^BhoIN81$=Lx)!}1&DXBgDCD+Oz8lmh|88rdK z@=MX0;*}7}n<<~dihEuG(PH%gJTTjZ2yC09drQpH`IH3XP?Ll8r19s@{RR@mHG;09 z3n^b=$gb&tT(ht?!a|q8}V`!KzYBCW6`Urb<+{l_YBdyJ?s1rB}VO^U;7bS`sc=mwXOt0LBuJi$e%y+^xY@3QgtEWg?FbF|jEUEY*n2He} zU`NpTn3WhW+n9clF~9;u)@8&>Wi==M63b5UC~|V2P755+m<>NKgOYIvBB`em*q$#3 zqbWhr{#2KeeE0js=aR;LM~iHR{FWqT)n}L9@Ua0h#I=|Jq862C*6e0BWoI3=h6b?H z8Wia@N5QSbPE}J3daN?%@2qOcNPHZOz7PxYNkqM^WikK@lt&nN95%C=5fg({nw^v3 zOx^6#u(5}@@3)+2h!q2bCqN)U)%a4}P6Hd>2<+q4400k>U?T>ZT?@0M*g-LS{*D%J z%hvUZ`>7_~OHed$fv~bSKctod!$cdP-q}QJUKn5UkU4naK9*P^)>=)-obV_zE&zJW zPDW;s{OZeORPL9E8EaPh`YWa~P1 zhdQTU7a9=BBYi=W$L{_iHqy8;cd?lX)BqB_gDq~8W;!ivcN1-nwvnR{-8k&4*b+wC zUaFiL@Fl^wJ-O}G;Qx4LfTK!e@Vn=vSo!;?Z&rmIWH_Zji0I}s-Ba)MS$5;GdJEyE z>*+RIOYnCh7{8X-ImDOslp5z6W7q0y+IRg!?DZ6%SZu5Pe0BNetQO(bzOF<}U`E?_ zO_3&aX6nB4%dxz-<6Y0)e#%nE0Zzw#na?Tg3JH(Rto6~s(PV^Q3>jkHqJ|T%GO&Qt z@r&x3%zLeGJe7NeFIZ$H!yt5IkM}>MP5baoi~6uqljX79)qiyVOV3yJXGoysymt9y z9B?|`usG9{38($GC{XZu^bB9yt>BxBA+z5<+lfJ1PLO4>nJ`4^ITwnL!lMuSP89Er zMK&2IupR+UM_K?HmN0j&P1&JZ4h=$?5nK z#?Nsn$W8pm-<^()1i)Cf^nK~Zp5d{aI}KWIAt_wq5Sj|SlkJ7J{0;jo-=_~?DMW7p?8`+UW7O}&m;T(pMJNI7E8b)y*A^ky+i^bvB zI}thNFdk=0cDsO5f)Si@46jtk614B2 z)EHiAC?)9MiM@9xR)rx!?@7BK_-NalAp@bHanGHPJf+5oih_}4&@47_xUYhk~OuBaGx24tBc>UnVzg8^zW zIi-|0iqMd}7lQac5)IkW+p|sCbxtZNipV>Pn8hL%J(6Dy&~_pOG4Z*@;rQ&N zJaz~*8cb{+H%%W=f=iYW)sRAkF{lL`7n&V4 zNVt;e$&iuyJTp-VRc4p+Z9Fs4Fmovd=wxR?%QFYoGY>j5x9?;vgk~~@W-X9qXI;s_ zAElg+XYF6fUc8fi%#Z;KODFEi+JBf_jv+QIBkeDOJEO=$JHr6U)bGAB2&rfeeHMy% zRR%M|u$ER+nw3!kU0jcBc$E^+j7^DpPR7Sbo}WUjSPm3)DbEa|lkiA8=XC5HQfFYn zZw{mPjMzYvTg~Oa=S93MBV{oPe*z{Ed=pU;f*5Nk@Xn6F7L(a!!_`DdOGQZa%t?ga zL^h$p;^Zlibr?WHOhUoB(WIm?(XC_w;hOo>DWplx5ih#rc5cG`vGHSsIrDJO#3n3> zB3W`ChGf-)?9oJY5{7J=EZkQ)A=F%T0!?bdiOLHrQaCEwxlHMghTDXa`#VSFl#wob zkjWn*vqX#9_TV|9s6x-g7ox=%M&8&kP?U?JDvaJ%rM`tUmelgS$@feQ<0~oo{$>aI z_Ji_UXn8S_#ap8JlDbE41{ft9$%|V&-?j6hI>SmP_)s&Vs5$a?MMkA>l}k9=5gyqf zP7stF4|QdrfT5^&LP+G7!q{p^*+Mhm=1@*7XzDT|M-#+e_MW^9JjjmN!4Q#S^MENk zIS9&w1;rZ10=;h@5R@7RO2;Q{;E5@H5NHhSWhlTZ1ffAtVoZEi zG0@0Mz0XKNivY_Q4~uy7~B2@Sdmf{J`0lAQw!X1_P%1{z-(N{90$xfOeVpQ&$ISh&whe#1Yakfhy5a=AZm$cgN1UZ-uhN3b% z?|`9LqLr9Wa5Tuw9I?S}M2rQurx3e7uJQ;M_dJ3^vEZh%s+_C97#tq(7QW(K9eRQU zB!m$^s@I&!C}JuWc#WVSl7*trZfQvwDy+mQRkjp0KH)XKS2;`GR+mjyub{tt`~j%+9f^_?&`1po zSlX{@Y8SzJl2Segaz+b!Xo8r-5Ik7ul0hV(t@LG(c;l9-W`q?8 zq=?UWg$DV9G62(H&5tm8bJ*otaOe?y0{hbW}$>UhQ85n$i(rl)HI)aiTD z1wnOdp}RkNwVyoh&Z_M$Q0*$J?RdM<(Yn#Kf$8i#P3hifU9g4q^dN90-8oa;gTK0m z#e2r=dtTM{mZ^SrNA-W81XZf`^)dIZigy*Nb`PE@L7DfetEj;wP(7AtT1HI`JFjp^?tn zAR)duhPzP{SWwMLdN>;V(>4Z{*7l4n*-<6=WtTs;Sc{`UZVgRJ1A;1sBAn4+$uc-( z9)`dZK`aoSy`zvFBI+38=(lhY_7>02lxR(`e|Az42*Tt{7X5YK8S>>_l*$|^;0!s@0Hp?zgucpZY*MREXJE`f@QLT@DAK_ zHu70*G&5tgj%Cc>bS(JYM03V;b0oO6ZaOYwY9VsmwQfA?+BD!F%s;Ds8cI&!f@qXc zvUNtzqL6x;m4IX~4-JMpBzrhR=rLf5GEf!+ximb!wr=1K^EcZbBC>9Ta9JGh8PPT< zMsA`>YafZgEl`w^{wgD4KTG%OlKVvpWkQ1ySP*G=i@bMJcuXS;U)13nP~35)wAwIn zOms!s@Iu*e%}Lt_7;+wyl+!Q>Y!vb|M+nX%PrL%wP?F~?E8nlgu4LChT~^K=-aJOc z!$j+es1T14F_x_H7eW!wqE_lBR^~Ocy;)Z+1y&rCR`%^yJo{E$2v89+-wjdWi1i4wHQ4;RL*FBLHJ@@U3H#jK+w}s# z_&Jpa-$ud65y|}1#N}mhXY+Z3cSGCeFlrQnZF62Qf@qO6S@I;7eK=aZ?gRHVKs9JS zSq7I72L~B00s|d5hG^8COuP@VqB%tVd4#kfXQBh)jsMhNOm^$t(oqLuVWvr=Z+Hu{ zNsVjLRZn7?gPDOKZx|%aGDkLnk;)BZIrcZ~_mCYyj3XW(OAgfpJMkgt8@hLzp{y8= z(->k>G+fXQ1VDXruRwslx&-C7pyxFN+yF5jCyo&QO~o9!fJN$MgU0X($(X@y_NXRo zIg45JEeWL1S;j9EIj6?fiU4BOIccH{WQP;pjUm>;Z4zTicAat{L={hlj~Fnbo~StOkYNtjwiZxtwR!9ApMz$$Luq`^NyYv z2yn^jm4TX0k;G+Se-yb&*{8BAV-)q2lK5_lO%S@Qm>m}fDFGqK}L`Q$hj;Jk8g6_l>Q z8bhJZ&XAlrL{1q6{ZAfnH)tcH&F%ht&ST{$x+p z7dtu|xl$*X8&aOxL1bMFD?U_MKyyZ%HqJ#F-ncCOE2ras24^}x&$Pa%o^~gdsmk+C zABm+6myee)ea*2f?0wo2Nn0`&M@UZGm;4rPnsz4dX{pm; zYpNpDO@Di7f%GGnYUGK%Q>0;eto(!K46B8OrohGYnyRIR(EkX|tnIBJP|*I^_@Lr^ zE0@CBTx+S0+hZZ4zW#Zi05t2WfcU4>MOpf)8h@w1fiqi5DX8G$nLF~#Pk)0mXO+>VPGv_bX>@K@Xz3%*{|=mqZl-yvD%kzM24~7M zx$S8wUQTe;{@;Q#-So{~?Eg=}nZJykn`xh!0N~666W2lAXQu9xZU?3wiwV!nJb!#V zF!S0Ses&SV~71n6fX^X-dRa}Dv(&=$jMbTiTh>+yCh&agk zB``5e*kC2=wz?*k==z5zo?B!a_y>MTSvV6n!FtchCo|U8Jmk=gnjlY}ivL5aFb#!{>hP1>}I$NTmKD&+<%j;jOEbG0E*Wl~D9tLPh5(B%y7sWH8^H zme9aBO=+^=3^^K7>&zbx%S32)O=)4g|8}T`@8Vz^ODyTSStkyqTQdDwy~VFTz3f za>xdn30~vp3u{H5B;h~QCy zJp5pCc4vUZbR(Kg6MO5O76@*tC?_5j_7aL|;+jMIGBKJ%{YhId5aZ-rRAs=jq;d?! z!qLwT^8$1$vY_V9p()p=ccc)+|HCBp_E<5EVe*?L2GKd(D6Lv|?g9j|C zyZVgW>^A^IKW>mxqOoA+_~p1}<^&Og7NjV(x zwJg0ZXnIAe9|S=VW2J>Z)92N1l&1a2*AeN+R)Tw6lI51B65A|(TO@Mt));8^?%AEU zKIiv<@Yhxf?x`8Tt&>AxZ7aiatQjJ&lS^G-D|gLPE8?L}9@Da|!rkNllhZNMQzxHW z_rGyE7Uhi29w_^lQ^0qohv|?uRacNYl|UfiJ?gQceXbW=Deh z^%In`#{=Q_)2zn6d|S6;gCKi!f>H^s$6O!$Q#QtQ^wMILc+2dH?GsRGV(H}Y4aGOk4mrTH^fR+m*~&^SES7dDujaQ~2(@y?DZ&8RI`_|yB3%aiVpRnl5N zR+O?ZJ-O4Jy}*5-SOR;O;PKY5i|nAPM$N(Q+SSX0iCWd|@ayIkUkqj+1Xtz-GQ7C) zVxmxM%eFso<5>Zd+2+cpYvY@a-UBaY2tK!K*mE7-3Ia?Q-q1D1+?{Yc=$f5}Bv(zk zl{n@YPcn2qYdSl7@6R;vq*AC?6xJ)ZwbH`bu>WXJQd*8-#d zNF6=G<(WR^;>E|wtt^RUkMzZ@h^hz|s~e`B10-t~Cm?s)@41(SycA!&x?|A6TE89p z-piEjMM@`wyFvW_u1=DG#?er}fl?&<7ZCHCl>C*wrOo26W5+ajYUKvVj#K51`> zquu!Z^vs9nO#CZaStso~`?rf#-pxL+WNtce-+u#^m`?gyGtfVBSY_eh;`8*^=XbjA zpHzjL^$7E>(PVy(kcZE#;Z5oi)kN<@)#eT!8mz0R3pm=PtsJF#O>K;n+JDa6*9>M^ zec0vZHNrCaIsQz}=Hl_hq*=oWW`(g7RosOrg_X#JfF9~M=K${W+X2wAHF;H;~ktzmef+4oYkN{4{2n<>JZ>J+g zErzlO1E0bmHZaIj42n{UidpIczZA8&6pgABt%($!z0^f7Df$R0hIFYrog_@PjgcKB zjHiiA8;z_}QVu&(ms+IQYZEVtH*r)ZUS^iA#7XhkQ?Q%{^NLgOq&Ho$m%ifFR9Pl< zWuxh8zVvky3IWw7jG;7lOCw*Uw7{tpflpTYqjNKVK(h!<6ZfeU2WPXmMbjN288MTl zyF<;wOENbpWkq`$rCMaf2V|s7WaX`zCGA^o1jtItwuouTD%HxWn#f3b$tvZyDEG*! z^~kCd$!W*SXhg(m^S5ekH0#i`-YaR;BWl%+Xi|%i!+5pcE|*gkYEdeY6SbE!k(DzA z08~pdW_!&RwawT6ELp9MYQ~1;pJ{m46~D z^Mq32A-}8xbDO1Ai(7!Kt7`jWF9lOhSx4q}v-B1Zduj!7c{8GR(J2}GDQV6P1*}l} zbJ>pTQ|-63+A#Jqk4!rFHk!2ZI|#sQ{q6XUtF`h9wQ@lLG|{IWmSjq?{B*DQl^&!h z#G54EFi}eKQc8~KR59%MV%y17t|Ze#kv@eAE>RK)l})9QX(Uzt%&vUphH}H05|dU} zoq1Pki&8#ObD>vr&6rXle?(D4_}h48zth%|fbe%qN|8gI?*pPUE5pmYE@bm7N18Oh z;qNYOQRWv?speE+RZ@8up!ii&zP6=XGNSu)Sy#;l#n%Aoynwa{aoJkUPDUj~f2-~$ zD`cBjSK}1bJu8ZO9fdNl9%iAQ3QpA)O4a7%o}AO3s*s)mA+;LP)-F}mK-aEL8dWz= zwHq4>eVmjNl&XWOGPg?P8dOyYzC$hPJsqO8GV_#*e*EgT_T8iQN@)Sgz4>iR{C%G* z71w3^CQQ`Zth$8k+pl}|{TS-*jY!XTDOAqMXwo-du$c9?8m+YQb|Eg3Km zPj5RD!jUd_k$uE163HaQYXnp(oQZ3+yS4}Ss9Txzl?rLmv}*Y7DPSwxDHGJQH#*jY z`Yt5k=9dQAX}moEZ_>j$;=P(zbX8?cwGEkd z(eOTn{XWfW?RU-;vmviqJ>!;vhml$iEF(^vBahVDtTu-XOh%kd zbsuI7yA6-{@{gKx=_|_V-BKHU>OG=!D*s|xS2s{!bAMEJdQ=Z);QO&fWmETw+}P7r zoK%6HIkUd|M_s2SJ%Ua8$n%Ur`)gxQ(sk^o^&-7>V@wTf@(n_->4tKRNAJsd2j~U9 zQ+TdB9`n(__1e&r48!PxaZk>%6DgW-o^o#k46%#cBmnlU1B zGQDo1Dq%ACu5o(rXkp-_wYNfT-FRuFLACc-E{n}(UcorG3^e5jm*%Rc`p`oxT)3!OK7uc`g+jmMij%+TGcxp?;o#nfZb)^pI9qyavfg6Y=YN`dmiRI`?l?_H)BD zh+*2KzXs+jJGQ4gC|NC1PX|Le=V>CL<#w@I1Ho& zEQ-sj9k)>3iHet#Kwf^WDsz583Sm`EqpBulCDFJ98;Mo&iC1up24^aZhFYcYTMu7e z&gaB6ld7otEY>kc=$%u9Zd}mui8hk&2Ca1JmM$C1$D4i9>|`Fi&$`&V7o#rGYeBqZ zDxt|Ax=be3D^J&T`O`ve{*u*}<`*Jbl%ui@BI4yTh0Ug=HC?|2-KyovoLSVv5~~H5 z5jDh)C0uJ2z13Y*FgVO_GkR(*I-=&4c>!-fC?!90S-@PH?%T(LF6wKGHxSATn?pev zS|ZCT1Yz!#>%Y2?^@@J=GZ-#)8VT)(0+w9+^M`I7KbNMBZ+)Y6oMii@M^&(br(BNN zGe4&>ubR@h(Wf0NeE%Tb=BZ9!$jmUrbT+TcX3>&$ zT3G_NTJQ%va5IC1nUcOXF9|)^*7sw3i}uQO3$Di*J_|b;1Eq)=wBw?Gsn(0W!OCCX zFUgsUdp~mTwFwc{6zF}F`cP-JckcY|I$UmceYyAW-4EJjZKhVE55lbq8IKY(hpRtr za1H3rs@eC-8y#O8*|F~H)U}`J9ojgXEmE5eWtlwUTEF*k7RNe+aGwIb8;?EvLD6?l zY+s(x(Pw;KV19pdW7JgRE|=M)gYB%N{^>1;+8}#%_tv)I+0QVe=(~<})1#^0CS5So z_ioce*BmzF9VX6=vmb7(t!zwkZ#(r-^>69pea3ekwpDs3Et$$XcP4Db%i(MG9huyn=4Xc9Pj3}knitu(OIWTrdj5_Ih@3Ss$KQUEs*3mm(9-` zgk7Qbr!B*FpZoNsU;ga&9#vd*qMO})t>&`TW;(*^O2($hchU9h`K0pwVVUc$aZxh? znH{h7+=r(xlhTkeN>(rZEOxQLobXB)D)1P(Q;?zAW=xdYL(Q{IF%osP? zo?2#cdzj@sTe{Yi>EIvW81BAtbM>i(+Hi-v1MSR2<%;`#o$l*rYkqojDfPBI@{g$9 zEyK)g>9_|hZue8^TC)kvbC=plM!Ma|m^a$KPs2U%Tz`O?Rp*o0#%JO6x2)#L?wbTJ z9-FsI4@l(}n{_w5kaO`f&*miNKk1EsPd~i+t~XY4QKx^P-0ML0*E*co0xesDE2H2K9KOSR`NOS3G$>=vM6Vxc~=#qCT#tQM6yKV`>?C|XCgIC#}j*E zo9f%E0%ogqsxRK{ojBXuSgm;8bn?u4_OLN{x#fCOh7*o!WGx^{@xb*7czGRv>%<}G zS8OJ3>dfZtiq{<7Mnmv}P0GjgAD!hj9D@$Dt&naD>JB!A1A6@qkXxrLUb}v#-UN-W z+iz!GAG0}+1pUybakXD{#n?Pm(r_~2JeT5eXGz-9{_0WjPJZR9b5h-=$VG-|i=8S3IBo2mAG( zTs_HxHs4C@cDeh~MW6lLSl{KrWlQsrdns1VDTc{jHWN1zTJMNayCW)Vp#5OrjYb6r zS7+?{V34My`*i9pJA*vacE_oS0lujSnbmLa9v=wI)OsHL-28EH-8_dqvX6#ATm*0a zJ_vc!apUmDQpYQvd!-R$kJNh-w*}m5j&82uauwnPfU5bo%G+Nve|`Ry|9#|x&0wj+ ziLm3Vzs_ZDfsmlb5UKw`p|*N}{_k5up#IL)@&Nwwj8SB89AgYMq-{}hUita}4Ts#c z4d*G#5uiF&{kN@QhO#cZW{^txg$*6t>TnP%jMbW<^6~J$`CSG}3{6q?)-hYx^AXR< zmx`ZlD5ZN&le4N;16zInQ6bkr>gx}Dz0P4ie{KzD$%n{dZz^ZS!L*GTe$h4FN%`$} zsc|#vhsx-v{@MnqaFss&0r*{R%Z!uDFfr%EHg*w_+^5X{@deoG<1{{D=50~asmw-mPXGu$71~!5tDS&H6j(`CvqMr|BJstrvkBu(L7|J(G{i*ByQx-|{-WH)ORp%^st3H%?k-uyz%Pls;()ST ziMg)ozRnf5GtQ~`g3#g|n+SJ7gD-IA|3OYlvUw}A-}N`Zf;g@)&}zX6tafr1Nk!qhKW#C*A@gY)EPXD4rA$UJc>%RPRD0~pp{LTNgt%zQN5iEkB(WM=YZcmSrny4r?dZl)U!Bw1L+FT+B8F!0 zck)0sBdfZHEjPO<*z)ZapiZ{AqCeCu=5XQ(P^XwN zg0wmM1a*UTIHoH2Wjy8~MEmd&V?06xcwTpu)|F-i3Pp_Zm z107Wt4tBC3KSHKugbQQF=P$vD8k@<^80kpLkK|$W z5Sd^hZFUXPQ0hoxxq!|H6;b-Y);^I|VofZ2DmsKI+gHv{b;pVZ1!ZAG*JPC15ch?XSxOM0Wtoy%d@+h>l!#D1ilE53L*z$f=V%ot zwA4alfD;X8@VN4lbc9`}zvk0R2lqP8b3gP3*}7g*FzFya z*Vm50##s{-{W?XJ`r>tCJ#VnUxQ6UN?&JZ~WsjTLVw&ZstNF=U59fg97L2URocQu( zKeDZ4TvaUk%3*`8G*$#9(fjrlDey`V!i##pvKRwfUz8HhPP8_Xckzp3xL7_%VL;=Y zp2cg^o64j7vK|bHRa}qyxHk@LOqin(;{+x3e0gLfuZ$A& zqG97cgw|FbBT4px13{{!Mm!I+0?<1gVHIgTWBIWAgb^BFlVcLD%EM?R=`XfCyqw!c zOvsIgP{yYJkG%K(YN~Cxbwd(TfHZn1(iKERK$;MGm7+)oLlsaUDj-cws0lUnE&(hE zmQV$;(2Jln3u2>56Euh-Jtz8>?|JsO&pz)sd+eX~U%-Gd7Hi$}p7Xk@u#m1_lpYSl zG96mG8HJc!?blRpPHHzOXruPCe-~tqi(WceeBVww9tm6Z4dftNWp#Dj<+gi&+ySJOgwMAe3+Ui=~H|mn8Rm(5;dZ&GCW+juS_$aii0`_85SQJOpQ@J zy>XHU_q+x-SOZrq8yv}+?8>UmBga2h@h#mmk_@*vqI#q|j6pOrKlvE_;qka@;_E&g zq7IteqjjLcA9ePc^|KV73 z(4pt&&eZ&R6tnxKXEi$H9i+Jpn!a*wyZFCrpxB}`lb*zw5)yNs`&l`~of&seV&z1x}7c)BU)^x|U4wVin!O3QLu|3`($ z?t+=k!_oW~KW=J2UNp#f`0^I3Q0vM9oen*G{ouvVd(pdJ@Tk`DUKZ%Icz4;~rgh@Y zi}j{f7U(pi_5Hl{uZQn;zeWtTeqL!~$H~ zMa07Jed+QH+V@_xzT$9ZL|45yewhwLT%j4g$!3GE5q4yB(8-bK*xF}OLgc%xbZFb)?P0027U*`2AdX{woV!Ews<%dDc6#cU*&4rF!b zusG5H09Z?fSPc^$nw9QBp@IO=`DQjtEV9uQJYpI-JsD-Zf$)__hB0AKBWlt$Q(y<3 zYXE!oUJ}>?2X^+JmuTfhqXdMnU$}H)Qe;N-s?5eiAfo199UGbQuSaqow!wM^RpqSw`di3DA_u z)G8_@0L@uHpQ<;UFojFou1a5#S0bCG<;+L<_fg4Cnb}9t7z#2c3F+I1tO9_>+o*_u zRMdVYN+0#!Lb9}T#G3-MxZbS0(ejG{e9I3k7v_w_3$Ptq5nVFD&yGw7eX4 zzEac^gg9UP25$748Cs6J?t#QqfNP^JZi#N+c6l0x-flVKp@r5~$4n5BU;E2MJgXEe zFcq69L!axp)l$=}(w){#z2u7K$P;G{V08M>waMsrR5T9<-yQXuyMg;kwJ;qPw4S4P z-qqD`cveVO|G${%|Bw1zQZ7AwTicQqjQzLYXKUYFoP3QEcYt+i&UB^#Is`PCd9;4U3?@ zfwkUcS{8nsU10muJ*Z&@KS47`GWpt36j2z0QZ5VaiAhJ z>M9n91tj+LfzQLB6kbkc97J-G9r~6WN&qTQ+4XRc6;n_I*1?s`(PIWazX9z^LZ;wA z=f@Muu}N4w$ifF?ltcCbfE0*e=uD&;86-&rOBA;wR-dTQSoK-NcpG^Foyr2Yj^vV^ zG+?P42vr=omJSoaWxSOKMJRKu!qNqa&<+5U7aKiA0N$o>U>Rv+1fU(OcppJyvmhsg zV7Wq+*>Wby2vH=l1-KNz4in@wqXWknQGz&dJ^_xUvmaUL79KZJ8MVWJsJ9`V`bd@(qz;;Wtu0eylAVD@n#nsPx?K(efL13599S@D zEL@nt^+*HusSl~ca#eAFLX}Z(tKbY=fiwWRhJV&eB>51ls&~-eYNEcWa_B_#q~Ho3 zoFihfa8YxJI5qm2sh5*r-nE51x#3P?KWI=CsW9A$z1YXW^7jn!PJRlwZqHh9xa3o zqdBtW7|@kpDCCDh4B|QOE?Aig@xVR`wu&sFLR4u{z}mr7$I-TeQOmYbCB&G@v9Z%` z(dvXT#i9hQ-O**+l!uCj-|(nM;{`@y*L3jg$2yU2y9k+sgGo9lM)4`PXpnTZqh2fEM0$h*AR@sKM+5lIfIrMB_P?Xt1S!;^%gasCU8qHaF=b9Y>ScK-Z zqyxtpQF&rxCAn|G&yYcMw);EJB$=9hpBEzK7#ZSp(|1B2lo{X-BGMK180raCAaoAP zJM`kflGssYya#jg9ji+34Dec@!c>Ppy_U|osz zdxEOm?!@;#(RdOz9Kj_qBKAdHuzcwGG6skr*AEinF zJ!K(>rK9&SKPTrC9^K=9Mn}2$p)X%U$9kasrX?XGsLX2%xlhn)Z7|>z0>lQpMOqX- zzW}L49bqg$Gm*^U`E6Sy*DAK+V<2pwz!qAHIgEst<7z!H#~9cjM?ji%BXIdAh^IYp zp8#}qf%xPLbV`=1e2HtsVlW*oZ;O64wG7fneo$Kx^^|*mk6XH^hIUUBT#K69T|tb1 z)NZ1+>FD0ySN?63`!A52-+`VD5cVGg zdVRH0CT{;t*qn73#=q@&$M1OD(bG%+UdW3@jqpD&R!6;%{;xpqzsyB=g*cPee+ruq zb;in=9mgs(5dOf!Y7UWJNQ+nf10H57D>dHP@f1ViQScaSKHO=--YtK`kD50#P;lhz z>%f-D$>%h~6!F~)kj1gz@Gx_HqKYF_h->-L!={yYRloSJ$NpZ(SDY5RzMG-JnCZ)| zg&#zC6J)7i~Xlg(8^DlXH(*3wfDKG?uXW@%C@{X4#)WZ)5Gt zr$2$-?}a=O%_=42>D_^b}owxeGzW_{Bfe`USjymVuHQ%Z*P z{kBs@nNeLrd6LRQ|B6hUa6m;i#O3ylW0J`MUa7&)7Spdmif)%xq!)SLzH)8iOm+1~ zW?yy1z3=d|cO*X>PTjfc@qsKu;?`rIx_#pw`gUq zlDi8&zdh9Sl3SZf;08$EGkCTX|6EUaXRR~0iWht^ZuErF!H2-d`)q;1M7j5R^XUq< zZ4em%Lyid)y>nCN*y@l9uy$rh56R`j3OhiWAx0vdj`xjv(h1*izc{4O5lkHm+qRBl zMMe*Rj|nOrOdST~S-;AiY%ZYho$oj#vvb+N3qq080tMmXaWD8c4jc8Qr=rUygNoXp zFyA-!;<#b)-8f$IFPjeLOZYO#`A};0NqtmMH(Uf3pVZ3i+Koq8DGr8cfrfH@?lG4s5=P-KGYnre6S9YNY^zl+ z?U4>$b)Y5Sw4I8d_uu;Tet1c-QGB2)LEI?1Cjf1)b4^-tl)7w;+(n7^j`h~O1fsy# zW_Gk;W%$~p1>ZDRxN@37)31zs2x+$BI_IUECi|Pz;w3f#?O2aqB@&j+bn#^0H`P(y zZ_6Q)@4U#hRSCyP#B?mAxCdvln!zEMLSiLPC{{-IrqC*OrUq}rR0-FJO=VyQr=hT@ za5xDDVuO^IrC|9FnybyiQaSPTy(v=cMu!VH((}GN{aBZeH?QiBDack`pN+##4(Zy{ zDxHXy=kG#FqFE&Og2R6M0Z6svcQ^xow1wb8yzNoSNEcR{uRENzX{QQ z((cfN?7Hi(#;cXge>h@1_uewC2G@W|Z}x(Vm5)#2r1c>x5p1Y02ECH}q;$KzI59gq zNPF@Cy3t|1?#eN4?K~nI`Es0?6_HJBln544^cJ_n-|*Gvgocwr_Zb^WE~`b(Qq1mY zsG0xDnKCNm%Ir_5&!j2;9p$&=V{FJZa<@nW33;w z6?ue%3e|x+ktRue$^alwiWG<3Mmx#@kZ?(Q8ahBFr9I`GM$atNI|JHL1Qr4f4{CVK zm#v}O%07KX>ah*kKsbWoA{(JEIsogv)VLZ4;n^HC=^0V6ZiAoqpJ9q<&_TL+lkDM- zBA+e$Gjb(b!Pc_lWQW{v_Fwm^n$khbF;Kz88Xe-#CD*Mf{)C7oXqFAXrt_e(LL$ zc=0aK@oA9>Rw=wLe`SFrU)=Y}48&zH<)SJf^0sL&sSchMMAF%A9eHBp-S}bo>`>MF zcS>e0kKKLz=5ycns2@$8{ty(bo%ilV_?^bP2Sa>nqH|XL9_W8vS&Ch2{p8G{(NTMy zk>GlJ3fcSY@$CwS0sgLOOp@)@(i5tW{dV+pE}E4#Tu99jkmm<73yJ&LFU0aW`3^A! zJy3A&1V}-No_zj5ScnHrC)pPW#w_dM+tzl;FJ?f8i4Mwb7IVRM3v62?*kHSW5HMKM zK1nMq0+(m|?4p#j9(f^xf)09Q_@!u*+rZlP@a@z()~%;`|308u!x`CSsB?uqBOOD2 zx2v%*-eNFZyLyu$w$GUJ{ip4V5vkefF|g`f&;rl zjA3z;HxV8i3C%a-?yJQcIfZzQ$N9)7HbJ~zCHf6P;x5zKq_IFr0#F>!-j7io-$gjJrkPAVowD0t2x? za~o&4k24?%e(B@r^c3njVlE`04bCK_djLSHILOEi_|@U$T87)(1t$s}Hi3r*Fha%r zQ>TZD5*0jPpavthU|0H7i?Z($r{8O^~u4YICZD%$)8F1D(jyko$KBWgftcMO*vT* z2VW*~_}`AGCxhkaO4CHJG+N`kcQ&@o{}UE8pX(+v3|*#k2@~CJG_j5Fq4ZeCV1|ni zmNH+(_AM+&aMT0n^^0~~Gwv{FXgXEt(q z_JM5B5zE7cWw{V6Aeez?H@fAa*M{`lgkaH;LjI{LnyJ%7WYr{F)S_QgKXL&F7snYc zv+O1WAQ#8cFn$hzi`%kEX{tlnkWi_nY55uhD$F|Z&s^oMuJ>u4>z5H0KhRCVvKNoYO+j(me2po zeSprUxj%b-(zDfCDYcBkxnLEftn&Y`hYF(_3u zwp4yj!C`1ZGQuV?XB>^%B;L-#$`)#BC~i5&5!p!3SX)o<#h>7B-<{VNb9oCY1+nn* zHh770`hqftFfIRa6Wev?+dEYbW`E{>1T2t^Khyf5!!i-dI}Gu3Ng^FVSoFZ-a3H=m z|NU{SlQ+jz94ijV|S=bvHyQGhJXPsjmu4ge&H2L%y; zAGc5;0FcgN+Al#)Vxe~tI)ca!lBU7a!!1IAB!q)o-un4bVG%&pN!TFIUV)Wl6x*7mmfElp&4_c847d4~* zowxc|jx~lgH+-U%EV`&%)@dnyvicur#orw3MRZ=DqpGR3ce4RjH=9YE6a5!&HEmx30>A%WBU(|wR7gMBDZFA<`*0q8YIoTjz+%w18cM70U8uD z6?Fy~-w>Y?rO_O_w@BDbc_vP|z1FB*pCjqW*^h&T-;(0vp|a$cg%!Ew*>L(dC%KO8&ruY{U@#k)Gw;K-@sFt>vp7Nnf2qXuA72}0;&MF zLl&_V)Dx!E#+HOQx#n)o@1tl(12WreBR^03^aD`6ppl1iS|rI)&2jkfS3cHJG%}57 z3x_<%VKA}hZ>H&U!hXba0lQ!K=sOQ!zmbv_sIcBv#dEImSTctuMojyQkyv>H?@T)tkR1;p+?3j<%+`Y^W zL*8fQch@H)Mg)rNbxQU}Qw(y9vp0)C1l>s$sz|sZDXcpo9BR18)gMc zC!7h!hn)EQ4(C6cfH2|_b7cVBMb{gWMsa$qyL2vpX{0dKOjI2k`@n!i(%p$WCy9q@ zbzwNsX0c+@(IRKVi5DGQe~~%Y?~u$?MBZaAfOI z#V=fSL%pGzWZ4CJSSz@qqBA@`p*36C-QIJW+3qS#=8{<#HS#v23Ziw`?tvVFo7IO) zjUw7^wfI6qJn;#aavZ>N8aH4L%7{=!7Eq5);`1u^48N}bcstcN`jb8!9dA&Ew0tkA zqs*}zOnbWL3nQA&-CN_`Xafrwg@Se@;GEK;Sb7RBe0fabZ1V&0*KJs>{pfI5C0dUG zklUN`63h;BqSm3y~|o!6Y#Ga zLdO&6frDy6^x1Z8JKDl3edQ;JB!fv#l%SxJwrx2nrmhnyNCX{lISvUrX#jeJyq%{O zb2cG03yXCEx*1P8fBa4h1@Eucm?Ve@wbi+^zJB$aE{jO}?3(~m*fBUjoazq1?Gvt7 zauyh;xEz1Y)n|di#h}|nDd~du5 zWwVQ_5?ek%p529)sWRsfl3`UBk@?-VQN!k5Ka!*?ZbABe-$;1XYlW05rl9o?4_V9B z*I(ZM*HUW}3-nF^?E9a7c*HvF*-c-+UD2H)c;w%pcLy#R>X`g#`M-X6nDcy=Clv+$ z$-r%L=rJ}?_*HZOYI$jDsZwZBwn z|2Vh*No>>n>4oc0bXU+$)5q~nm%ZiE((;?|%}@Lakjo0AdfN9F#h)k&3)8O3TA$rx zps3%jhDaN(u?Iy3Exi`;KJ6MtE44dy`6~05DBq8_BfXZ(ep)VrPu<7r5iv4zEoa9w zYtfYW?;CoGPSV`t{!Y#GBJan$kdF>?BWglXz?BQ50hPN_ao#{NIT|~2nHGnWH@;O2 z4DqS#)>GQBWh0-mqbG=EluLhP2Sw>xYC*nz}=eyq0%!(%Z|GVBt@_MXhzYlY}oCCdv&T54QS6 z0goU;<2i3q4g=FNDRv+9j`$e9D1l#*^K(F4nH#_`{q0Jx7Bwu&x(Ol!gt2JY#!!9{ z?4^kW|GlQ7Ltk(F$e@+MW;<~d9FqqQPM+J~OLz5@hom|>2YN>%Z0A#AOcIw&)Q96= zB}pGo%tq7=lUGUAUW)tCNR6GbHtx6a0W~Mmu0RIl&FKyScAp@NqL^WQo@#iev{!6E zWgng-=DBOA*!VVL5~SGyGkig*jL^R6!qovpT%W&b?-2;f9quu2U2al(xLtA~0VJ%#KT^EdNj=C}aAYL=|rC!*&uX>0A+gmla zQuAYKg%?ok_BCN~mg2#_32tXEIoN04ef#lr3jg{ivRt~vhgeou*E3a5Y11{J@a4vA zfu2l`V9t2zk9k^>($>QD{NSy{s$2K8Mp2Dlx4zJO@1b>=ZQk1}e*{#Ietl&LsGx@) ztu%J>{N>WAfz1?f;hzxok=>d2z?}9t7|#K>K6?2dh`Op^%4l8$(-t1r^Iv9#-^Mph z@v$HGKUd}aG1iRD+)`9JyCl7M_LdFxm|o+~ukW|)(vM|qacMPFjg~TLGM6o~1FWmn zGMt99f(|$oI8d$}*L-D7i4xxm&0T8rbFCYW_c*omC8+OuSouofR0c9Fq+>aqPw4e+ zyLPeD>&qN9-q$&HZjuDAs~1e6ja%x}?0}=Ld;>UVC7V5HkDnOf z*DW&sSK#6*A|An1AuQL`Rbc{)o}ez2}BW)-xNQLBS^O zU_0eRjHi3#T>MwzCw87s#-Yx(=ciqfg`|`+Q6YysUE#U4LKy)&-y3X&NHPydnW7bbNoaWR7vv6ys1Ans(Pp;JXNn_r*blX zPktc-kwBed({z=m!Za&)yi1h_-Tg@-vj>H;$-S8NQ|_|X{nt&Ub~Z&Sd=zX2s zh|@G{_BO1*lCg&Nt`XQlOR002$JKS0(bn27kLa1DYou^DZYH_*cFv()$`q%r6*K#d z2mtL_c}%$=TY-zBMF%2{VD;TlvCH&s)z+QLBGf_itmBd^mm_Xv-}~tMSr33+JVV|w z2c%s!x1DU0QU4nF0=_p{OH!YXfx6eaviQK3=F<9IdM?y8n}%0F-M|0tU&B-=nYE^w{1_rl}(}MKZgM7ShfzZAf9Ef7&zx=&+}4ZYV*N~ z$_UnXlH51|Dz2_mPWg1oc<%@P$m8!QnS3`Djsr#hu^gXY>~e0gM%~EG1yV}-)>7#0 z7h7Mn;mGY}TaJWGlZJU+p35aHX2S0Wj%BP7_+xx~Dx&y=>#tIiCf1{O{-2MYX}9p- z582=Z_pM)t9Gbk}{#|M!ZE~79SFUAJ`!`vXtKYWVoJ4i0W*kQlJg91fW_;RiC$i!8 z^9t+A4y%W}RWEDa#`wlXojP!;%%N55xPkq-m-nrnb0icUJTaihOg5NV%267jBmm!5l-8Nq5OFdO31ec#^EIPG_B@l4Sv zlc+N@ULTw3ozZT)1RhjpyAjgjU@l`wZl`v`D?X~xN!ob&)rPd)XR>3VmIjHA(3+(h zPFUj_rA}&C8wkq8dtQFySlG*!WqjNw{(IIb*9LD{t*M=pDC04+tkU8n1OMWRvwLyZ z+3)yPQn*wj0Vi3y_5`4q7sai`uL}KI!`|*AZL{q!)Qp1Cs<$?SYJEJYLw$uujv+tkeO$7@mK&;1Nn9)ID^Lz^H4na4-r z7e{LQrY*}J??Ds^0gK3U^atP)j!2%#X>If z{`)HoU8EoT=cT|XYmY-nS%_}Xr@KO)|(57%EFjlD7VQ~-$ZO94;usB-egESu3}-kso72{ofvzNl(D zM-i8Pwr<1OLI~zwerD4C{K;i6tvsm|^c|Tn#?pcft$Er(B5%j^jl9uxa3B1LdGr+d*kL_~)BfM5*5iF{M*|VC|56w< zvL4vmZ1_Kamb&iB4nk=O73Ug ze=f>`Jq7d|{<`_1SMPR*=kE4(ov5GN8*AQ=bPIO71wJ&Yb?8xID!TQibH`o~zP~(r zN}KaqZb|IQ_1yGBh`}>%THi-Z3xc;2Q-sbqbY6tIB;3B3&aSw4eGP>*q)eqauejwHaj$e8J)ICo(wNS%KhPtAX93U*G^eT}_@j@A zjz~vSc0dSCrVs0|=2a!-1JPY3;*YuR95LjUDA>=ZDY0zlmpmH^yK(sxf1tdV<3qKn ze7Qi)+@l`XWTg2QUgo_}SS#POIrj5W`5E@;+0G$;nV0oRz$Y)WkAsMc5hI1nN6Zmx zn$4Ang}cokbLCwP?%8?f$qs@SU9%lrUfd{j!1vyHi(MGdGu<)lGM;T1mo5s%6?9>? zmsv0UdXB)e_h$3Hyu5c74sYXUp|H+fPfc_V$fG}d<_5`(shG`jvDfaf`kYE#vE@?? z=+Jx8AQhZ5r@lo#vygRd}xfAPpJw&-Vs8KLPy#YD_7m##^mcq)JHKvce!|* zCoS_oqV3wDN6b1JIaLc8J=GLy_VMH)3*d2(POI3jxsaj9d9$%VL&_5ib7%pKFVfi7txOxj9|R-15nwvvbrO;eUN{U7b9d_N$HNZxktygAp0l?Cws zEj#Ln!babJVn=o2&7#jp)C~N?j{3h$#Tkr7-04#r>VFab zaIKUP>Z^6E?nT5OyW70R{99&b)YrO#S3bRBPcZ`oT4lk6vb=uxKHG4zVK&FBTSjsg zBe97*1)s>>dXE?=jh#5!y`3JFNqXul&(6wMKVmHeR?|XCepK?M-L^WzVR7X`S6Qo$ ztk5VS(^oN+2J>Ao!cf zSYV}Bdxv4?V`jKTe+C#FHW+@j+s>IISgo#-Y=2Yhl&IbYROjnPM(lgO8X`hbr?DDy zFY!WH&5r|!JW`saMq{ONTPM$+r&am{8^l)miyn1yM2{=?o+aO+`=L6=UN3JU>8fhH zNG2+1NZeCyh+k~B#`*IALaSv1P!lD7t7Io5@Q7`j*V;?x|4u% zTH}|KH-4h7J@Kr*8gw`Ofx`1QUeZlPccc%fr(mRaKzt1{HyoM!Qg7~gV-|+xF?obH zmK#lo34D@iWHja+z`oKF0X@}y_-=~g6wUsI;w6DWyl2#drv5k5KnL?ZD zT*D`5-<&E}fhCwd#Ux*yeN{Q()T655e0)9Gu%p*#$(XLqaXudO0Ku}O`fg5M?Q#&E zDQ(kR*v?QV6vhE@-aBb2I-bJg8&H&sR-FJd z4$kyHAa~-?v{>sV^Lfp3t*37)#wKi>gNJ3`Rj#wQOOE4a#Yy)26<7s_Ny!{mh!isb z%6oFOT5o(s_G#@IP!d1J~Qu16rjDETd#YAICJuNB1N zW<^X&&^&HFUOUe4ZEek-mOsf`wc=s@Vr-*@=Ti+<5!#J8F7zpp*75trAaH zCG(`88`({J%+(b0=r2%a`nm~}Yhhq`SUabSRUVsv0FRpWzCJ2YuCC6iEbg5ASWv?7 zVBye%hV3U95M>5R6$h0kI=O-Tka`aFDTK=cBKoVmBO$q`mWPeV>`yyV7B- zk&-~I@Asou-`ZbA=Q`R^1FahaH|3Do38q)(&#`Twutbb*w;hXGu9NG?#DmVke19X% zN9$|_^h0!PX z=Ohz{*QQ_?fgI=0UVnzHj0en}ikBM>6MGZFGs&hKMq(1#m{^}$yp>KEP=VlM#sG;> ziIQy*o2TUb)=?50XTVoYuA=4RBLVrkGGFZHxo#}>U%En5wAsH zkUvo{Y|6N@gHaz9MnBV&L(Jpl$Tv@ao0p}a933tcBgkA$R+=mE&BbI<3aTT#4R7Lm zf7tTU!I!}4MZK6cV!=9;P;ZfCy%_bfxI?)wxICrtIytE>!5pQ ztcqxWJ2zb@y_Y*QOGbQhr(}>cKMw&<4>d?+p8%2H(P}W#>WroXmSM{kiJ27$lUlGX zr=PxX$v#}!A?8%}G!fdO!9mpYzOf(4Yv#O6yY^unAxyoR1}{si^~j&hnL=}}n8~YUE+1-BgcFkO2VJp$C@CLPW zlliWOE{SsjzK8Q2m+`h|;SOU&fFlb^%rXG53K~z%seNYW8IEiYOX^O*C}rcaw%S4=}||*t=t8? z2k3zOz{?qnD={z!WpAJ!;;!F!g0b36yQU5=O+jWputc5ZCh#~|`J{4kCSSql3Dy|3lgRjy$0r{gDX6sO99%&^kgLK9N zPIM;;uX?&p-1*i`3m7gD4YQA7I(RN%bA%@DKL2sI_Ij#~X5PI^n2s&GB{K9I1I0mQ zkMqx~PrAya2AteN`LrR0!}6H)E0`kNDO7T;_&XbmJgfE4>VnT{tGoi%ZoSh?EGcRs~$U-7*smfP@eDka@}jQ#Z~51wr73| zAUAXx?Kj7;tubV?QARzKMZUwg8s2UV=rJ!mYc-{ez`@TJr#QKpuO)U>_MTjZlCkkw&-8wF6Jh>R*Eo^0R>%?|a;KFeS>)4$8fuZoT0#y%8n7QIC3K-uGVF=_QKx z#p?9MyY(f;^pQ*YlKCI?rM~YAEn%16K%qj7bKJySH&K7R$NFnjP^mbU_&-JkTuDo1 zf4j%}YgBN;r#bwS*S|&ZzoUXPgW(-6zulz&5EdV!tcbMSu+7anDe%uNhx^12iI%^M z;I+eQ{zL@{6F(N;w+h~#{_uOtaW{nk(c$Qk~n0N9GQhH^i@$f`wO2^)7=R3{Bho*cXz#7-Y?xb$!ugCx@5laY^=s~aA)?HHGgNwPVz{8 zx6$OgS4Ws`*K|5os%9iSmv9&DwCly1-lHD^VkKyLt+BWCSbNP*|)x07FJ{V3z znGEktxZuiKj@XcIp?-|(HGw%=ysX+ylY<;^335PqgJ1E|BWG7>{>U{ONr=)WD%E~! z(;n^)vB=6#$P29YvQ7&e5&gA)fiM44H`8ghV#yvrcI*zQunVF^@l}ROjNDqf-Nmmk zTD~tcZmvd*Pe6wCD>;d4@#HV^xO4TE8Z%ZUN1VUT(IERFj?0T{^L|3lQ#1TW)>@m)Lhz= zic5~(@Ef_Cs>`0n?wmX@*)GphOz+UEJcoWGjX%^3UD>7fKE{zL`^(mA9`WYmWq}vtpU~XF8`BB<_Gr_orZ4hm zkOoSdpD*mMLHd_m`nvg-d#rNj^vxw#F~jtu^j@Xy<%T!G|GLL=O&0s|*I51j&iBEi z-**190?p8b&ez`i&KeUhLzuf8^Y@v1JHNiMTEtxc5EctFh%hS}P~acJ;-A~5IylMn z0JGUx!9g12XkR-riV=t9DuKhb7tn!eF>z-2YrY0wK@sbMS~xSE3J zsub)UM=M>3PgW^O6n?y*f9NYgc))f>f84A`i-iYPRGl#yh8YHtXELU-eb8NbGXr&Y z>L+>fzSX`y6RitzQ7>T|4jC zBrj{s$$z_z@TB6(mGAwqMtV#hX<5{FFZHUdfoK)JLyyWpxtn)D^vpU{`vNdR0Q*vb_FKl)6N!AV`s+&U zH9m5d{mx%&^JrYXhnt2o4~j~O7O26DzCd54a-HItFq4zhwXi;C_9O9mYWLCPt&9iE zy&2RR%sA-5!*^SD>krxx<*a$rYwJYA2mDht#yI+$lz3nRg0yfruN}@kNh+C`%XAvAso`8* zG*>f$oQsdEP+rKB?h~@lnj4i|sKNs{Wjl305aK&Mt%nI%27@-wa;8#r#g7E|^Jy_4?491Kja zFNy5)J6tKvOSo2l?N^B@CVDLUlOWh6|2$V`pk&h6WW2G*SB5IL`noqMKqx8n4gu+)A_7K03B5`aktWhqP!Lc= zK$Jk}HS~`3j)vYb6e-ezpwg70AR=lgB0V>{)?RDvea_u?oiWZi;~nGu&IdkB{>=H5 z-*X-UHvq1AtVlj@%%M>dFSm9jd=vOa<%R-Ez#;^D;tVerqPhTjcOo5om)BO$5Cvle zSWMFz74W%YIk+mLLu8w!H9bXU8XQv!5A3h(fKs*luJvSQlFin|jFYy$5o6(dm3Gf&3DX?vu_k05jb zCTwpZu$}6OQ{?gdeix^OCs8DD_Yb!iZS}Yk9rkrF%8N--%UT6vZW($mPvFq^t7IzRtbzrF zsu=_&~J83 zYZzNH+V8^z?bJ2}69Zes>4lK-K`kNv??em+Wem9Sn02`@6h4rn^K!6=f!{0RY%eA( zN!vxlfQ!bsa0|{+M7KtO`PVc0S%mBNT~@UZVY+DbtOP1{Nj2cInb5rU@ujFAUiUP^ zLj#-+|}83SSpB{$LjR<3msfiYd}O5||7jhBGCn zMXcmrRid`%45dmZzfNE!>L>4*FtJj1Pk2mm-k;$#e~U8Lzy8RQ%AI=wV2N~!6AeV3 zjWo7{O_RcxL|~houteR&8e#6 zaZfyy>3%b#leiJbSPF*;M4&l{BlHHXHxrMz^E4*mtX$+bh?Tg40e$*KnH@Nm0NbnA zbtd0*^%MZ+~lGs<=umKTdfJ|r6_&tQ^K z!Z6k3i;Fn(OgYEf7OIaq-A~;|grq&F)J!AWQPtPGjFeCXnMc6GU|zfTDbqt59`|~h zDeN%tWiqWvS0RA2n4!ha7M8A_E)MHwGZAf0D-HK#_Y6LJ?_!q|?eo06{-ldMbo;5sF|~w>Ta| z?!Bz?{E|Qm?x8V2N#3W~60)ORN_O%)Uv5)i2HC>esMG@rCrr5muc}ZRuMjR5Oysa8 zGp!dwhuCzY2%y0&xVatEDNYu4)d!q{FuR-PWqQRD*FBs{0xH5iLtf;%)0bRfRbRj- z9E1VPDG#cQ9=tTbqzi^muPC{1(cO1tMn=yT17lOAvpH*^BNG+*q3T%%hLb67kpvdk zu|9j&ol`KE9RPY!gkj*s${0V`j06-U>G>2xy)Xb65=dEMDlCEOTZBVN4}Qp9C|)c4Qedup5cgQ9hZoZr9!jo*EEDx7>lwRpb=;bG7f4W4h5IxypIg+`)+NOL8>~2aOn6o6;AU?w~FlN7UjDWx-`wavc(UKE;~8XU)|M6lIBr| zTd7U5-u9Zl)$Yr*b?nmh6sGM>qwspjd?=G?gnf)|$u$iln$%RIGfh|qJvZy?6XqJp zGrGYHOyAY%c9$D{&07ML)B}~&4{qEEDSxW=-HYy&)1bXM)a0g5cru@v3b#y*%p28e z>#Ks`%Px~G7q(hplJ)9Kjjf-Jd4w5H@-W?+@qaPXGSnI>0B(n=Hoa@R)dCLJT*z#f zU8-5m>=CRJKBFn#Y+Gx6TmhE8h-*L zi$LE-V4Nku4hZ{E&Cy7<(aGx3YwIzX?J+#) zG3I@4s{Gu{=69(;HH-QV=U;MiZGb|mTmYEj{RdHz53TzQ|L={MpE)@OkujuN&Ci_N zKLt~3i-$8&cYpK#6IId<-uDPcCI53U^*8VTS1`5W4dsJg@_P`xaOyAKKbac*XCwhe zb&tvzbmxzx#=j}*R6BOWA=Uq?Gt~T3hUo6{=lp1l1PNucNqSKGbDdij1qV8dBB3ilhUf9eeSIR;a! zXS^eS7Im6yZ(vEBm-#QW)KQobVgVEgEG3e_`>{CZLMw$C(R9Cy5=m(M_NvB{LBO!B zX&v7ZAa!-5t$A#|kHU;FJoLWvakBQ-+*gF+ocp{cmdrMaQr;oQ_FY!8ZPqFcCW{SCYMRWj88p2u%?YXGY@Sc;D59h$j z$S;aqI#EAFFCogsprpYl3s=rKD2P!Y_n3;Mgo`R>rr7>0>d*)7vW8xcaOsv?0JjwK z$!;+{S6RS=MEJ=TCf8W?iR+`tPPX)qbPt}8_r%14Zh}7NMN%NJPSI)<2&}E1Umj-B zzW{m#WO#2%giIoJt2nK2ypUa!21zN&&3DEr_~7r-K^KNjQ{G0KBR|xWDX;<_IYMV_ zPSaP(Z*BhH>DStKPKdi!1ABpe%nAg%Zf1|Qfv08;N43!kc?arrN`10(8SF9)s z0SH5pO7}+L3YeQE@jC1;V9bVHq~J$>FF$V#`b=eh8Q*CtIbo7@2!|-S8abPy({V(4?GU)Q?YDI3;}saV{5uD`tHMFC?YNt zwL;WWZ^`i4$JUBQs_Q0d*r?A!`Np!6ZN&I^FV=*mfZ|4Jxs5Dbz z0ZS~pn($tsbj3dse5u#)qI|$5hl?ULW>=RVqu7~eTDKaL9N9?f)A+}SB$^JIm-_xo zQMur8wSqy$MF9ni(6cAPgFT#&@rp-{%c}KC(kPOG#l0B8VicWWPs4Onwx90kR5!Me z6pHd6qQ64X8LYqt-yT39WcuW8A?{Px&r@`Ur5Zt=~`tmTcPhuviD zmrL2`^?>NazLm8%%~j(+@vi!0tiO}fMpK_&13vCd4Uo0Sxl;1we5B>9kXcG~$k(&i z;*XekEOr`L;g8~zP3^-T2}um}8ox|vk`_R)2m!fZV@Zbg+T9k{2t_>(3|;eY1QbH~ zgR@K0(Kjt3d#Zu=$EcMElVkEzIKAvPmz<~DO6rA+>AJe*F1<&nd_&m3O*I*P6E;L z%+o5KD0o~e%)obz`DI=Q04_?z@a5$v(;TwI`OHg7@Go&Fva5!c;tPtJ5=0ai1qmvE zvV!Y3>27HN}w*5a~SDP&V1bzY2@MgoTFM+4HVDfKcmKs30^+MP(lTZw}d=N@Bs>hBVw zhlCmPMjD5pC6vKIDteTS_<&0%HhjbuwT06+mmVB<`oPVIeTuXmn1z6 zS>%(`jN=dn#OZ6;b8%!{zwFEQdSj2|PsVg>6YoURKSq{YIRLdW})wLcvXIt_GdSZxZjp2_cY_=z;mUd zTV(F1gI&rs^Mz-+=7$XJYtTLGv7z^F@k67YEBAE5g|h63kFp%|Ew7=3hu-sX%sNO{ zdUJ7FEbvW-Pt!zdz77a>;NRTo1cSHWAmvWpHJ``QkBtEjM5jI1j9-at9m5$sH9;40 zQ%O&><`wZ4+mb~c(VJpMb3Uk%^TR<{#8I91(;=g{=9Ky?~W~TG)B!n z*7y(&J`zlRHL2=m_5L;yT2P^qF(6YZsp^y@e7- zzvq7g&zof{*EFzOFi*oT3>|-UuhCleYDnXxTT=ok`73R0LzZ4Di%G5eNQ29zC) zy>V$Ejz(w9tp0ecg@4mM#5pnLN@J(_Jco~TWZHUw06jH=?=*}gKajl0Cbs=@R-AA>?1X{*4pfaBMXub{biw5WykmBrE6x!kAfS8oV<*#H6L zoR*iL4Z%K~8&%cLY0q1p$an|v$=VCFZhc?UaoVl8?Io4=h~RRuk%Qr@-{uxs>+=bq zx0uy8GD_m{#S4-`8RH&rZ|;^{ABa`KpX6=h5C!r-e(rIhLxKkb7Qe@(Q7piQmrd8A zBa<9>WaJ0!RAz{Gh}0?HnQ&}F9kvD{eGYP=Dwxd$ zr4RFPS(Krtwxhnt>Ijj6RFhzZ4xlIxy1fQQD=fgOkdF2AEzJZ-#%ao^tQRNR#WaOF z0jDWCF2f@O$wQy&d=6u2GJo>sKG2gcNk+r`Jpjl=?m$J#h_u^Ov<&mq+vFm;RV=LX7y?VUeKnD09g%p_ zn~Q=};Ot{ntW~PNuPNtY@r`3om+3O^wSjq1v?Pv z4p+cDRoHI)ciDu5DG?Da_Bo;`l3CHUk5%9UtehFPjQ2jfq+0r#6`~8Q{irZNBg`wL zZ-4d{_Zz)5U%}B7mof{=w=2BbgUfK0b3DfD134q6Oo?1}Mjd<3R-;!>GiRoRf2AB|XUY*L_<}%*f&D}6bkM~S> z06b=uWFMW%MTI{`Oy@-i=5cf9!+P@gt@4+<9}8vVzf;c_ozA}v$d}|UaO%!KVO78y zoG+VEa6-L6VY)#56lR&JP))H=!>UlruTUqWP_L!XV7kz7uh5vg$W*b&%&N%TugD^@ zPvZw$yOxm&AwXR7)4tC2i4LcbueuqebjL2wdZBc&vMV!uX)r&d;} zR?fOsf&8Erm07FUTB|%$dw#zb%~PkQRQG>_z@C+Yts5gIWuVgk9s;}H;K9>)N2$@v zy3zYVqfcg|Uu&cPOyi^dMz@Ok%dI!87@9&KG=*n2MYc9Y&ossEH{p1i6O@{htedZ7 zqC+>~JGs!5ndY4R=3JhZe5IB`>z3jNEu~ToZ$jZ^Gc8Z{Tkt%sHA<~@)~yZu_0t%5 z#)H(+9xWYn#}Y&ne~=4&*;{k(bv_y9we;ubTTV8_B27*rKx)FIG|*E@R)}?ijE>)$=SRo%`fZ z)F?{}dT~LSd5gy{X&$SAZAlU#ST7e6SOwqfaWIg}`yzj~<<3y!Gzk=d0l#&iH6YSn zOoYgwUD;R?11<>zq(POY!l^i66J;HDq8$lAz1&N8UE1FyH)4eZviSjjh=i%kpP04- z#TO6?Y=&R^4^R4cZtgb*%R&N(b`=}L`5WLlyo@|My)34qfj1Et9~VU!jF5(9y6RXW zsHy6bgy&Bf;i!4L!0rS&4g9lDVW10l%qFsiA=yOwcA~TOiw6&0_+X$@LgG)cFn8qD zWOcd%j98WwD8p`W5XpGyB4Is?#=uU;JU7A`4Hl>YyCH@$SYi4krC@Ttjl2R>=pm7@ zeO&qNINcYbpOn8M1?xRw7j_0hNgv7@%YPAqoe}C!E`d_8>VqC)S-D-2ZD8?3aOXiM zJw(R{F|LCiS0NrZCxOQCjBYaYv}@zH4_|w{?M1(>w_>P0zFz~WY$>1hUDT!xn8WGyYg;;`N@k3xs=oFbdGUHwd~q zQ`2~Xu%aa1B_n=6Yle83HRr+D^Y(Sc!?%;!ZwF_lrLrIzWV{{7Rif42&q8$CO>eSB`a>g`_p+`-${*%%5FK9!yZ6d?9aWBWcQj)s3? zj`7_*Y-D`)5PGIY5?+eN=A&KCLBueCsG@QB(E^fx5jFI78c&&p6U$PXsm;#g@ah*j zT9n~`&pbN~R#s=gcG1SqR5`wtefR$S(ffY=_pkG1nqm-&Ph^zeHT3Z{>_C4(U~?LU zZO@3`o|QD55qP<%JJJ9tY#H=mG~r)%IKOPuF>I>@Wwr$f%RpMP=Pq3_k23CZ$b3y6 ztXa|jc!HO~+^*M7pD+HJDC{}P#RUW)8JU)8M$K7vaZ&+~unB6JH%MxhgP z`ZJp#q}*|=@R>{9HO-AvA?H^MzrH6QW?5SxXbce{_@xtTiYDl2a|FYJhe5Rr#Dvrn z6yosuoX++S`;FrZ=E&Yu%hldTYp_SF{sptuZcF$>)%bTcSC1i(M9fG5hO`5xw?qM9 zKyzHbbyOGjk`&d6Gt*zGpU1APc6_a$mPF5Vq7kk3QY z{SX&A6)ub2fHE|gW9yWl@bweqjW^Qk9^|O53^&0imFq%ICw3=|^>#?X>v7-)^~U>y>eA5W!1qmgdILrXtjM7Zm`aD}(}pZK`wP&t-tskSHRi@ziePxM$9*97Jx=1&^6?WjcF*}udBLJ%%sC{^^|M9`hm%ZXPnOpMk z=j9Cb&n$+anx+Ob4`Y{18PV)y;I|j#!})KmxY33VTYv{aI{BmOJ-FuMuU*b_AHMM) zZK#mzm5%f}sDXiixg*tXu%g6V!|qwtV?P$8G0mK}o_m6XiB&d$Sv3Ia z+#F?18N}*=1~SNz&|nrb4^nc zx<8fc9(J%MaR2+Q64e}q-<9j%1$7s78?qlTihiqPyu#v7LH&2yvGaKOh0iZmr++Kc zH9mP8Rf&k?bvBrSN*J>5V{dXCIzckFRzW$;%;{8FhX~nzvLd)9C z5`gNau$OM@)?V?N%E5eryXyM&o?Goyw9@#L_b^vdCq(R@=gTg;u0Q9zQMumBdw*@c z4;d!C(J!3pO2In(oG+7ZT>E{#Ozw^A<}m7G<>rX;kG0KFH0|*%qQ=kpG99t1t(OL8 zzHE&fYajnUVRqT=`zwnZRo`FR-2d|Zjor`rGN)9x?J4Jys_kjl#xD&;ouPLTweDA= zW6?ie^VP`s-6BhmoB^8b8-c**s?l>?<1wgSR>W2xT1wj1liKKj1*WvAXLv^r=yp1O zF+;$*Q)N3Etf(6HqZOgx8+NUB=i}2!Z`ogR{>K1n z^wHktV=8DVnpTe(#J!aZxlpRc@q!qtxRr;9Dg9Zdi?rIx=NvB8I`d*I)^DqTvAwJL zi0LoxB#f$%Vgq9T&M`GP))xD3ZNSypX5t`w&OdCxU&nM60UIwdF3`*VS9kKyWBR)d z_!AvZIi~-x0q1@-lYh4Xf1>06VFNb*Y$pF^1O7awQNP=OzqTMLHXxq7l=+(t_-hOD z*D?Lg2BgG|f7yVPWBStuZ2xsk8MIOE*-gK;AO(y%DCqc=@n5{Ye}j&P2qg-=_;>d!66)}k_?);gSbEx6l|hin0Ohjfx= z=5L|^HY1DjaDm#W7*l#Fr3~pi5gbX<;fB&~g-nC@qofnUIr97+X}R`aaO!h=tVw+N zxdrJFJ3+G(%%QSYHVn5t*vv-z9H@uloou9t;%mYia6WzIz>&h_6aBFlop7o_!X%H4 zQ686m#Gtad&CF?Lr>!=|wlmtQ8aeP^$5fm16h=e1rU$vXwcdjy<7>ki7l&$xRJ!od zqlE?G>fJ({2J~gEC%RR^N(UD4&y`D)9#-(}TG&&c-$an(HP4@h0pUy(el-jhu`<22 z7=7j|>Y)+&$EXk%vCg!lF_fsqI`vLcS6WU+M?nFU=QmJwd=R3|+T~}j*&s;13*_I{ zSfI`}vXg=_M;tkV;HBGnJMSqJ59RgveeK{p1-s*heVfEUa244@h>>y5La$rg=~jWv zU`qKDS!HXB&NvR8o;@(%wna6y(#i+Jk1E(9ab46n@6WigN|t{BnSfnB8Vg-%|H7Hb zLd&Nn2IH9w2HYAa%hmH2Cf}e7NYfT4i&MjuWdtpfc4*7DMPf}39O=yQfPs{C za9~~Dl`=tg9gZAR5S+PXMFZaEzoVAm2()@Se*thG&~WU3Y#S(=k1z=t~7{ zAJ6Hodm24gq_kMOpHNFg8}5SQip=W8z3kL^>N1WAvWcpb?69~P?sx_Iv;7vnCvo?G z9#gtPzI;a}9TY&dO&iC8v}Ym+{W_+Lhj2c__-sZeB*Og2F%_6c#p_@>=)k?1Hm1j~ zl&}-0=31jHD-EWTL`N(L{yL_aM}1&==|#Rr2aW>I0r13goxIaOk7-%ahY&|<@(#b+ zhut`}n$4V@p&Js}VDbf@m*dE-IA}MChj}JL&qpO$WYot;oEbUpHq3cH zkLh!kvs_nzATb>cv2%sc#6xxF*U?NFL&0T6#~m5GPsb@JE^Z2XDerKDmTFgi>R zVNou|>x1tSN?8cWtG>q-g$MGpzCIOL7^o~O%LS$AkNU0vyWE3vJKC9N$}b1<9(*X^ zA6Hil3@3n^9Kwdt6vA;~B%eD1Lf1yHUZ`5%&@tZW7pJEQ zr&byY1nZHjyCWo9tI*3^_4=HK7NomeOlU%!BzJ{lR9YhXM7-%M#UIVk!GcFefx&eY zbRV-*A0oXX#&=>{Nxf2_f|BAhcGj=VOAGUKEYkDn)r%Mop~Q`)BtI?QFm>^u6|UB* z5ftS}AbVgMtS&(mzg$A{oOYb8(Gp{n?&RxbhFt!nzd-+NpyS7GuS#3))v9Z6o~>b= zMrme;x((HN#_hD(T$JKKJhKg)AAj~FMg>x77%D-nbUXL@IiJc+skF-8mWN?3}@@{CIG64A=32*M#Hrz=|G5RZH$AWZO>ASfgpvsi~=oW%ScbiAlfka${e3Nfh!mfQw6utf zrid)g@C2QRY|hAhRAiw=WU)_V`c6bhcx2^d zY>J!=hYlLYF8jnT@5HP&#jZ`pKJ$rD=m$fq!Cx)nwteDuC}@FF%+>b@`@}dZE*$VI z&bKKpZX%9R5ZFi1J~xRu*Tm?v6GE?pW3`NDmx!b(gHrxc!K!f-c@Q2E+ea0z<*j=(CZLA0o$V4rvn=p=ypGMH{-AROSCNZDI zo=!`W=S-iGoy04WR`Ezon}B6QazxK(M5*-(*{jx zZi2V5wSmm#Nd&Bqiq=*J5?2`Sf}oE=)3{q=xg%)XBj`0hWn|WlG=rp*K6cr;T#q--KAwj}dU7VZ z>Sh1|k3B!Jj#Ej;O-KR*B|Gw=E(DO?7EPSfX&GZ{4G)K^I}|EEa49_@_dP%Zll$se z4jYD=oh$xK7OhFDpf!j-00cr1rf~q~Rb7*H=KJ*-mOjQuA zKsi*_j(UM)DTQa0ItJ3mr0H$(rRD-YcfoSRV6q>s-p-H|YI!_|G`GZySsbHde^o%S z0i|{e7{^lfB?@=RiTWxSNgYy(u4FNio?2GeJQc62yG2XcSW}l%)77J0!6ivf9cLGY ziGqp&a>X*aryzCIrD(cRq}-gbbskQo55cgTcuKAms*ng-MA(*at1c##%T7o>B;|!t zY(RRP%qN5nrcgC9QKMXP!B}+0_qd~;>J3Tr;RtgwK84pMQTJw1;hwy^ohqsYP_9>| zeht?8TCakOL5B(`i=YM+NVc`)lBp_1$FmTAvMV?Ul9(mC3K*EKq}YIKJC){z@hi@; znZdXNi4+a194oG*m#cY5ZtAy^mog`4n`#)lkc?K%)h{v~jqvl@hqLIIGjBAtY2H=W)-d_XV7h++fsM326xfvH|(i^{m`OP7SE zOCU5YDHh9;!1p5oRJFUHW= zJj9U|Hi-dk-KEQUL;I#Yv86Ye2M3u$zzoM|vT_q`J&L1dnvxWoDK=nRTKs$RR6QcT zS-m`wj06^ORY5*HLD?0HrJORFKQ~Em275Vt-fxkxY}FtYy4cnqShxE!wr4 zNgGnnj3oMcn^SMt^xlnjkcjBDq%@N&AKl3s z-D?{?m>oSj7^UJPE=EHs?fsM>*l#u<<69!^kci+LV?H;=dS#6L;TUK37}RU{r@uN;RIc;KGM?e6Js? zJ|iRkBCkNHK2Wr3_21-`!zd1;e;@w-CwV2kI=9o;w$gP#?XThQKT_8JdHDNy%kxN% zz}@9`-SOhezlOj6y}S~A2FjcAD`ovp@(Rmt;J?Z%j)T9atpAq0a*MLe*ZSkXB(MCh zF7y3QYt`ke<=#)aM=J2W#gd;Z>t5gdT-8KLSwCsprlhP1G*WB$9`;*n)%~IsYc)d( zpMT0LQeWyO%&pf{jgWa?8fLvee`%Z#rQmx%q}=-2yi#28wPmg0^Ve2Vuhh4;t=G4{ zweKufeCs&a{`~D3)wBH`l>-tBoXgTDkMvVsA$23PrPq5{|Atn5KTDGv_|F9a6DlLn z>%XH6{?7MRUU#?c@ZHq-%aX?*%)*GV-e>;g9cT}2Qpq38!vCNQ-nu9RI^)Bq-|+jR z`r_{Ja-s7}S?0!hPe0z5(?Z%Vc<$nPY zlp67)xPYBwG`yybtA2k45&RC+KKKXUH_qYw-v<%=ZnJFD4%fz_{$C)1KW&yjK?E{^ zYdX5s9qXOU8UKmRqJ9)_9Q+w+554~v5Wz2-Wh`KB^5Xf}Um$`%Y?h;MV&+4C+APXK zn_W7+vKi~q&ISeC&rrScE$cBJ(FMF8b+l<4s{gQA6xL%O{bR|aCJ>`sD8w;90TH~6 ziSREJ{>^3yi*KXYECU0Ee#}AX2Zg_E7F*e*-SN<$Hp``q-)xrda6M1uJVB$hq7(NA z25o7Wk{jukFZgVV!0CC3*x}LCX8IglGHmf=C@B{*5(D3 zy3(IHoBy;~ZZJOmgt*j21#UhNP}$)S+I*<66@Z3cM?+k$^QFc7 z$*dL58|ObV(bGd5`A3~-mHT&1J^Skd6ro~9zN;7WX_5o9p9`oc92>t@*IBfyf79$? z%Xr++QOWODb-42Nm?4FQhvPI*Te~3FS)+hyAFPZ+JPnE$6!4HjAIOmj56K7d_#nH5 zOm{;qjD?_fnxBzdevBxCZgY2t<`D@==d~ik-Y_(?ysQ0S5&? zy?KlwKo6{>raI4$Cz0_6clVne0*t9(xXDAT#(2n-}I}+SG7gSHi^~93%VnoC|+pSl=shnW41z7LCs2286V;piKi%e3B zqkv3=uHXc{CO8SAU0R7S)H_foSFaF*qa2wcEl{a^Hw;gckE*G?!6!hstvjeIQ~rO4j3QrLlD8-b9Iw&~3h& zU`XP`AU(gTg1^2Y9rp4tW#mZlb%ljDrOOi~qLUG)$NU>)`lBX1{eeY&I zdL$p@2#pDmO`2?aE&uf4-e(JENA7WAXa=C;na2SLmIX_y)xP4&tFJ^^Sdr>tJ2L$_ zX#!jbHtLK~@!aI*yGcGD8a3X2)ugrQ#YSxN5JQ;(J^J8X75Ho5?vEwrA-^nb@9$M_ zTt9qQA{nuziokwW;po9eY!-9Ys9!}k3ofz zCE3Z+3v#nQ{QmZ*a!mqNB6%Jx`;4~oCr^d?pV0tad3Pr5(hE96=b&edC4fwm3V>{n zP=sW$y7;4WD+qXd)*96oyXZ+oDYGg#D!1o&JEf0)pvaJy8@$*(0E!G%lMM-%xJt9& zYUZ3vnJWZC|%<5}{>ad`SYGv|#0nBGEC{pdH*weVToAuHB_D%FJ+> z7s6Y#A=@4&@6icUUFyE!Pv3;OW~RS)SD)Ywq>M^3>mY_3jT_WJid4`K7-(g%3NLNY zQ?{V<*Q7)sLHEPV7SZ-vVSx=Vofl**Pdb~O@(C()auwFGRhYcsoE*eRQ3lgAVd3Ve z5|23!pMwIZED`jMNRdTu9u}w%76XivO{kt<0DBp$Hig*qYn3mf*QjVcJp++V!SAR{ zM3XcE6Kt?WpGkAbp@R+9+6E^2AgyqPoneE!i$-~;cu|TT zlm}((M8EclL9@otZ9xq%)Jw?7NzT~CNwpysV~R4kk`}v~7CSi)T|&bU^|2eMxUJum z!L+!&rnrO2xTBrll|is2j>Z=UNypJQ;~1xKuw5L&Hx3=h{8Jh9jps~{=dhILo{Hz+ zjYo1N2%Sw3woDN9O%P8{ko=_#p4d&0;YyS}n zolVlPOw#gA(n(L!YfdtlN;2F{GUiG)J)3N1nQZQxY>}R9)tqcIm2A74Y{!-2a5ly1 zmojMX>+o07mmHM|l`^pRZ@*wGJ=sZxUmd{xOEFk5CSd1im&d23_|FFJU%m~YN1UaC z<*$CrF?|>9QekWQ-Fg@J0vQVX)xiD7x8Z#^UIQ$J^z{t0-$3`vdcQs;^wWBG#2qkq zL>>Hsf-{+(C`J9sG5rVY{r~IVhRYGy_pvAPy5Gk^21v$2X!cLW(!dKcuh3%AP9GBK z1nHKN*dx$O$q)$=TnWO3SxN;TB7@G_rRXBy*NY{LS?RLpF&UZ(=?I9&*Kk7=H1EA* z_T?A4#54YCW$)1#n*Xh-y#JXO?Aop2KBUSe=RU1!kdd-@-zKY)O@hsYkZlj|>4alq z_5DH*T6kHNbh2=vL#F&rn&QXbti_LkU09yWS{KJ_4F<0ouqYY$*vAc!Acat+Ip@vYYz@7)!`$S*ct0$sQk1w14YySK*DN8$dN!b|>(2QAuIi>ksFDYcc*Kr$J?!R@Zd}L4X(+E7i3?Neiw&4!i!2Om zEu5N8t-46e!ro1GQVm6w)@&Ue+W6D}7 z6Q@Z)BiAQwfV%i-c1oFue+cr5y)X7YUR_(PDL4l=AL8SD%zq6M1~o*2I430x}g3;nfSlX+M zl(Z@?1Qq!{N!aW?-CGC<7THacewRnkR|mnjKm{Wa1?kqp27P4`_^6z?->M11yrr=; zwv-)I$H3T6Ca0J(5BikrO40TQ>b$v?OA-wE1s7p@Nl-v1d(SDTtDQRUG%`>$LpNCA z5s1da=P{olhTfAg|6c0k(&KqCvWgoK&a;LOg4QuHa3WQ;l}Wq`FLUW~m;&8U#J6&2 z9HjHChnozk3w(+;&LSL>{*-Y^zR_|p{t-(SJ=04{u)9z02tp+(dg@_L}& z!7WPZIEllVi!yKFIRtetUiCXVXf;;DW=2s0hQ{1(S~ka6n=xktisaNFg^I;mAQybR zP}fvLJYNX-7$MQ$i**AdUxpCTw8S*6vn7Dix;#^!cTO`B&E6xYy)RUuHD^?eRR=HT zuw=h;qE%>nJzSp*Ep>UoW7{4P^J7W=>B7o&EBV%`22l+jSz_Rs>G;?6w^OQ$&sCuz zp8^MR8fu@)f3kY!sW-h^^SuER;^uNLvySL@jJM(Kb(gz?Q*ZLUxjYCL-D0F{mSK&} z#Cu=wzIrheh%v7dDER8-Suy?gwnTj|^;0h;sdtNaQVU-_%yh5SpIvFV-#Q1L@Of)( zQ@5H@xDZoyzv96acl>>0(!*~Ln$ON{q@=d(U8-^!uc-e{^QNsP``e@RTUY4>EV>|C zZhrgwS0Q)rBY$X~3}=g8puhNoz+hVuVKn_(m2Th}yy07TJ>B9h3MU-0PKvzczX)OB z>{Ix<933_^AGm^vu*-r(u>j}!3z9P#E$_tMvT+LIJv$&d<(|xOZ1Fg)$X{Z-3RH(w zSEyKimqz!s@VSIP_M?L5`1LgUVL8uz){Ixcq$??Sr%Y2lOSI>p+?F(5@E{_z# z4!ab6^!Y{n;belAmbkZ$4(_ZHjxUGJqs^5w_*+EsQ=R^ysB73EcAmk>c=E0)W$h#Y zYp3x^;uuq#gv}ujzyGB%xb&>Aj36H%yWtw*9C4z`h_PN3t|J*pfazj*EUJJ&G7{1Ve?QBN6)h`7KRrBU zl%T@eWr$LU4k^ty@Ch7|UGSjSPUmvCtL7qC^GQ>goOK2c_X#K08n~gDZa#-|F;_!! zTlYSViOAxyMfpB5tEHj11dc3)Jaaz@sQC#YbsuacA`11;7zk%B(5hp<^D*R(n&4)k zJ;+^*5ID*sXLl@?FHd+!8z=1CJz|XAz#Ze2!|Z|Lum|(;d^)5E*98AN10a}8VIYjT zHFOi?FpcQ!zUtL8pkZq?X|k-^hp@-Gw;E!>j7F1oedy&4gr-mG0WwLLoO?PP62L5& zj&hkNU^%u=1c9rKiBqU`OR28d^AwsR7q`D_g6Zd*Rq};g)1;D#K48a>g>Zqdjc8c| zbz*18cvX`cw#UJ6F;qPkKbP^b>i#mV@21bYoCmk@3;sDB2&H9z*r(6AeXyq0(r&|j z{lma5;&Myt^;V7MA76=carBh^C?s1PCB|!1=)cMLS9$j=75V>g_vZ0X@BiQbnB6jq zUDmOMEZO(W43m8=Wofe&S{NlM%8VI|Y=a6VjU`kndy>jdb|s0*n!O@I!d#;|=XB2Z zd_Ldvxo*GP^}D{;-~MoWzm0i4U(e^`dA|d80T4f{&$S0WjGD0ijq?W0g~Rv&aUzfQ+E%|z6AP}o_HUktGw?yMVVl?$|M!HTInfI&Ca~k5g$b|@+OTFYepTeiK$=hYq)4E6{mf| z3vy7nH4!6srwH2bZf@c>7-oK)FZTQY#*6ybq{PUhtzlkXeMMRBeEPyMeZB@Y><6WU z1Fslz3&zv*bPWjpSPR*h2X3>Nv?XNoTd`ryWl}dnL7DC8VQ8NreoL*g8K^V zF)Rd0ruJn0$y%NCzBz>7zDuQ4u~~WAhz>;+{0@6{4b;S|w?T^1eF2xTjRa*HlUXG8SlieU% zp+iB`>CRMdH~HSH>c;qbZZvy@nA4JTxK#gkl!ZedV?L&<)sfBXS+~ z#GF?Yf~l1FO2@8CwOw7+Z6QI`d~Qxmf095pi?{ZIVypxZm+a$#cNOia9IsU~@P|5L z5E)FzMIn33ZF&gOK@LaNM@UG6ivG337r##?imVSQ7;dKM*X}n8!+wDim9ZUT0|J1o z4<{7BWMBK$eWuM4jOznm{`}nj8^JhVsz0WRET;2cxJAr%McVdFoRV4wJw61rYCK(k2*zKRC}lGaC|+rz>jn&D`C0 zR4Sb@yp)?x_v4jW+aHO$IC3xgA`hyZpxl--|waGbqyprhsuRTUwqCkw0IhwpRobob_+bu1caw=J6qO{~`-|Q9 zsoi6bmLWBR_TQ2Y+wJ)HM$tblbu673hb^Z5lnp})^G*Qb(E;S_~^zwJ@2VhrnCU35ZIu$CLXUu;??kK z64*-5g3?Gh!tHTRkkHNv#8@^Tto=*>54Na)GoZP!@#%V$V&AIVlmDn&^V1OAxhbA6FZ2lkmr?>C)s${X9+9^Z~Z1l zP3Tq3;YCC@PQUg)x8E?C8{uKTQ(rA1?v2!jv;0%>^L{{cwmZb%Y;VaN@{LKunh6>hN@qzW^NK4lSOH& z7R@+_l81PyXtQC4J)18N;Yy8Y?xegLNn(F3C&(ORP(Z_jhVZ;8)pcN1Of_-qob-#H ztvcQhnH@WF=R_u1$giPGozy~U@m3kQLF!aZxYQV6ax24<@GMqN;S&G z%KMOj)~u`V5QF5)Rm%}Zm)Cyh7J)Ym6ehCr$hbXhTMO$%i-Li2rSnQe1W>EalHdsY z+@q~z5`NH;By-_tl&!Dlwe#*~O6BylFgH)mPdslO?`%T{tqsFK*P{>wfMU07Eg(BM zN^g9AC(Gls#ka8bI=Gl?yOJmO-uydywcH7E))>-Z$fOHrII=AMBCqfo$PsZaTA3Lv z$v0{RlGAT}5g)#U@ZSN~mrslsi0_OkZ@4=NWsCCfljOXnd>gv88f?oj2}Ee#o75!> zzrOeKMiGW`gkP9*`|)8O_BS$?aWeY4N<(*3Qb~rMHStzLfXB=}0Mr&hVV57`nRf;Q zye4VLNg&HM3KB}H$>hLMXRBuUA#3F z04M0O2`!cYRgTDUvCwF|y46r&AAs!Y*<1EX0I(E^3>214mK!^b1_i&udZ+;mtJ&*> z#|t&<@FqNcZlUpy{Q*oh1i0Eg^AZsN_hA5h)d~Taq3u9J`j+6-y-bYqg`yZ){ZZ~G znE=gHVgw>QUQQkWmBUpl3>T337oFIy0$!nn-6#laz=MLg+8Y*bq}>-)5S*kMj;dH> zFlM+@4pRdtW;j7KR(L=SnvBOvM!G{Tov-c=1I=+1NX5>;^FDwu{z3@xL_%_g32YnM z9)7`@&0dHE!d49d*!)PqO7u<8g+hDu{xmVWYQS+CF+6R^58#DsGJI}Na&|ik!p=QY z=bJkmr0ts8>P+GvB9P?@DZpzUW+K5;J3;2*O!BXQXmU!EG9;))C(R33bo8$BOk z_NJK`Y$m*dSO_cGJtSQZs)(@gJ9VuTz}&h5==z30xsjgqrA_E*jGWj~DAgQmEUEhn8%2*v0otcF$)5mF9Jk{{CaZCS+vV@njR2aVxp_f2njqWOkkT+wLG=B z=V~SnaPCtj_{8XGD&K>ZB<`-$-zvqIikEenHJo!%TPzogWbzH+qPQsk^_sy<(hdk+ zbz!O##NHdGp&x$#`1X9uEG!W?Xbj8O*_JtX$WYsOtxQpx>bH31Y&HROD%ZUh5aLS$ zqzxO?G62@9mLWQpgt@>JR)W+GK&eK(A=pz$fGf4gn>xmlLY&gh;Rj$aNd7sin}F*h z`q8SWeT9HbG}S2AR$^Jdx*VHD0m-2e2!>-YiEY@P?cfm7cNd!-E@Bo#PA&GMFgW1= zUqJINO)r>`4+)N8Um(Gts%EFC09|`!Fd1P!1ek*{$Q4U4J|wf7@(z)CKbho}1J`UNC}0?K(IHQfNb)sdVen7n+9VqdsoghEen^a&bl$&Hfcj!Yp&uAw1J zIekzB7($Fgs?37O5j1ZfXCFXR{iZ6iM4U8VGD1qCa1BE^abYElX#iQJ5=$hSIdp7=!9kOj6X*Nr(9M1MBv(O(D%DdyV!ED@^4 zn@NWvRf(6EQ9)Y}ED-Hcr%aOu)r5qr)Kqf-aa$dANxX~R1O9-15(vm-Frwzyml zW=}ND4<>xpAUrv{W&fng(b|Nn%|Up` zpb3n)g=!Q=)w^H}b7DE(AkBaS zgOILghff;JlEOx$EjiqhzBFD{X++8afcF-WlXIaFm*J_rs5K%xL^WeC128~5DKd3A z>UB79;)Jwo)<_ALrc4@ao6|xeT=LvWf*Z_gkKaA78xdNlpgA_Pir^b8bk)$sRVd&b z-20BX2??sgQWzzzm`dw!La~T$faF{3%d*$)h-We}cDA_;?2v6H{pNJwSu2c@R5@iI zJHQtc%X1innNzm(f~2d$bxc*bya3OF*sWCoF|-p6I*7_3_Amg@tU8(03D0`4lf9CTV4ktFX$)$+JfAF z@ldA_Cz90{CbDC2b}!Z0Z1iIqCU-+gphuNJv0hK88>mU2?FBt^p%k85$p&o#NBRaU zJKwCOXY!HQo?is_2%}soZ!SaR)@AQv0aRk~g`3k5UR92eCQzm-{A-0_76r_U!3jnJ z0#*g=NpvjnFnS9~W)amy1ge2^_IQ)FDWDHB4vCZYLK7jK;!FTEhzs&>J1#cDHl5-}>K-d5RNsItI4)7|Es?Z*5=d zKanPK3lDp8Hm6$X4DoF!rd&r_Z1V=dKMCi$xcej|8)SR88FYiu*cL z2@!dO>cK{7+CTZa2bB1*RnbP*!o*U+CTon$(Z!D(62W!fd8DC*9+Z0Wfwv0e^+ZGS zNd!Nxv32Y8OXJLEhCG>wl)}=dQ$`>vdQ}&lO}eEsV7P7GNSJ%ay0#-nQOX;qLP758 zb9~%;HTgd6e#(VNTk0`DfE4aHQ`YjoA?0@&&eKi9RE=s%#qj5)5V$3Ep7)#smsTBqyV8meVU zv#ZnLyOm_@>dkpHs>aZbCA>05!_k7gD4$rFUq_?==Kg zq(8_Q|K`{In;)TAoj?bW;-5Pz+`02fh3+N+NNJ8WQ+j;;zm!w|;AyPQcY0h6o9tru z(jj&UL>cN$U(#Ka+L>mayzN&K^wHfiG2q&7%gTv05}MEm-W6y^HQ~}bg6Ja3qvjg> z-E<%6xlU)Y?<$&03;ovN&QG2O%Q(fWxHY!7)N8$IBxt(nwp*4C)i_>6&sm)=HA8e-8i}ctsgP{A<_6*7AF1Ag9FTRI z7U3IaI!t408N9Z9^U=94c&vmM3F*l{GRgy!2W&aU1n@h>#`fwK*k0PN+q`uw1=qKx z^}6g?(6QMEJj$G#8Ra`SLfVlzckwe)J@+2YmCJTiEjIqhC~uEjTzbUHC~tOD9KY<_ z{*$NS=`~k__?}T7_%>I?|DC6iaJHyIEJ~@^4U=%ZxJ2;AcbO(l&Q4X1yaBT*2B>`3*9W9MrpMbcC({mA}rs6a@yo+P^L(gxNierSUipE zhfeyPPcDAF$Dv5-4rK4n$m24jSGJ zdhYbp68SQg>Mj2DQOYrvaVp{ra&-`MET!ckNPN6m#6rQaIU!U7V(Vv>xczEsygW}ce zkc#=gWl;RjY3W}1w?H318QUAA&rZDuf$J`N8MKV14lGfKiZs3|vrQo+(;nBQmVCrA zLl3l$K|>*T8B_VA7E);0oLtLyH=sqgr|-Nvz*MK}-s32Z#my2pX6|+SF1VQVSJllF zwr|1M9}HY8nZ%uC|LVi+?_i4Iyn!GAC2mFsawaYoN#%Jt@L?c;4QRM z>KkLXox#n7&rKptjBIvZLxMV5EYOkJE|ntl4K*EIm1!Xb%-r6EiHQH)C~=c>(M%+L zO}b4$vx?k~oeiTPyO2BHA1%oq)Md;A=7C7bWQo;cfRjn| zxG*#u%%5axKwM7HnPZ^#1(R_!x7%S|7(T^VhEV@}g52>Mq`{Sk?h(ws zCLLjUv@l#+d`vfddw&twvOrptAOzv(WkdNl2MBK`L5)JWqtD@_**{cckj-icVFpnA zgSoNH5O;*mtaOqSbtE#YtM0g0wvzRVnL#t2YS&Z8qBz5NlW<_ANjFqNB}R`J6>pCw zi;h;C@{F5Pt!;_I>gKiJGjr^LYm)4)UGK2EHF&P)B+)06vUfPK} zn}hOcog%|n<7nir3qbLpW}yC)BC62}U35(TEktAm5m4`uDE;QJf#pMHyuUSFRDEuz zUNKwe$?64O0K2F=`G$-)cRu_x$Up`hsa z<87DWBQun}*_c{?FEXWRs7AA&4WaDwCHw=LBDNacUgrAJI3pdZ-K+b=fA+r8aB2aw zPz^yYNX!7cczZ{ynh9Uz$*4Hiq!~|pbQ&&`$T%_NrL&pEM$bB@|-9LP8@56domB3SX_HihuR0`D0N)%@zhgDuxlqjkM$0@#> zUd_MpUTb>_?#{cV%H_v%arZ+NdJQJ)r6s9X{gXxS3@R%yo;taLNigdnTFT(+6(VG%4udSBnOTqUh{W%ew=KHp)HWjUXr)0l zz9`n)dQ_OjpwR6oia*mfD*kPy35P7EM_G?aiF|D~w=GUeX&c+RT!9(vGS$tse!q>C zG) zqJenxP$vml%7EES8jGxU(QHez1luP~4OYA9@ufL(HdB`SR-dP`7!*3~Q??Z@vGd0~{ztcS?A zzGmC<;*|DH2F2^H`11RwiQwR#F^PGS*^ct|4}Kvm1_km#MH7obaqZiiNfv|RRr|*v z7K36g{z28m-!Le2tCrR1q5B8wakWvkycD9OvZ%zbXFig3>97a6{^io~gxh@TPz4n& zZJBOfl=Uw)x10KzEpMx)ufee$hZ^rC96l+^MtK zD6+LGFZIi(P66@_W;ev~)1jQO=?)HRd4k)K643iAys0M|p7s{K8Q>G-an ziFf6Z?b${5Gd_8n`*wp=;Q^V4AJRKF&ZJ@V%iFF?upoXvxKlA}cdjv3o@c#0YzsS~ ztv3xl3lDWBzQG=Pvp#BL{AElF?GHef?i88U`pKR0Y+IL34FO$V>c^)=!?^bWGfnJ0 zz(rb?J0sS24^x%OCr4QNnP3MWe~X+R5hB9zO4OqUQ*a!+^6)hiFt=o?Dm;2pBtcz! zYLh$VvLy`e;Ctor7IzDKM<&h3K9FB|>-$u%%`@L6da74v&?gP;jgCG`*Y)Z3owy!6 zuE2MVXZ~SgI<))N1cNH@NBFgpml1EeZ7S!|-lJ1DQ;*!9%1sZNp33{qoyxg#boy3) zE^9HepnQ6owHV3#j}{~UlkGANYJxV?XY}W#;~#IAnZ4O=^fkAjIOZ!nr><4?`=#R_ zL;Q9uav4=GC-i=9mbU*Jh~K{-613h~Ma$`xi=Qso_yL;#Xq*cZlDQOUDvwjMHF-)2(Ro5?H9p z4XS~L|E9U!ymUxUhHn`&Ckf5&X(i49)4mM-3F6mN!?mBdCG_}qifYbFfrA*3#3Y8Q z*ZikT2NNq#6|WeQ*ncsT=TI3{un$c&{tofmDY1luYdL2L?0Z=+x5T)(sksg5qbT{! z$L5`@K{9k%myUpohY8uDJKxufT$qnzLHzjrRhx{0ETKU?*%;Q|BUbqHE{gpqY`~c! zOi+WWp|hpG1XD~!KPM%fdo5PyOc7ahgYT%QkvYGD!pA0EJX3|2dFx|pUHREw{#z7R zuWq9-d{|-?4Ofn&Q*4&hVWDeg+s~)f*{G<))KcrPy+n`_JUrFIx=S(7A|Gjd>SE|D z3~%d5*(0?8)kDK&9&e$9UDAMR0e~DWX%u0{VP%PmIQjc}4JUUkWSl)m(UN`51d&5o z%O|?+>LLz}qMEF=UHK-`MmJ4bw-{r_sKvFIN0`d6p+PPth{><$2PbJasi=JD(TNi=Bz0vBI(KIeYAqZ)7+{!r$&>vb>38fxjfs)efdyg`7#X}H+(Sp2p{1$L*kzx)8%o~@t&dsA@SQSY z8c|iBB_`e6&&QQ!Nbf|rN77NibqD%(9j3ICiPGq-k3PsZpi2O9c$9u*hwNA!USWIB zHN2L=b@@(i*#n7O%mV4|=b(UaPz^+ur@SwQr$`mQ#!(8!AF*ThEE_wD)NAlk)_GU3 zUN7#-A7wi4;OX@b@G2Tfo0jRY6H$PDHNNJ}S`$do$=Po*=o9&MB0tY|YLe9#+&rTH zY@+{fk7yQW7bV}{pK!a5`|ZEx>|&1?nGgG8FU>9fVxorrQ ztf8gjQgIYfPVyozpMw{Kt4x2qzk0di-Daz{$7TEs%Td(E#AmpRy>4G zk=#~%xu8@^dI{62C2$hHBfmy61P9rdOM}|&f6sl;A>kZ+3~mfKtR2w>I=z++>*te_ zkC3N|!UJc|*De zjgZw~k|BJmBNAGvHTrQb6xZ@o1V5oM!V7puSfB^kfJnw^FC|Fg956db{o)(k(K4kc zQx#9#>g;*lM81?Q@dORhT?l>_lO)e!&@5{(lNlk_<_5C}9>oUfg2`uHBj3}9Q138 zyv60K5d~X^qvLlxBc8nU9Oa&H8G%x6cs7HavyZeWG-Xj@fOQ ztiGkFe7EVONT>6~vNDkt*p|RT(2j98#5W>^Ge{Nqgy+O9jV?TvC&0xYuvo!#u6e4x zcmE=AN#f?Z88)@DX~vbeQgozABDSJQ5*K$Lbk8G3#%f-3vIHcbjXL0!zg~djk2KG>$0%pHVr1>rz5!pJd(9MXA$f`A4|p=^rYOy>n;ZV^M2t+)dpYv5-)d-@IdyQs~)% z#@4F6+~Mz#K1wLfDr5Ju64sB|iVTceC5_v=>X+-e5wIIdQ(W&o#a#0jZ<-;-MBVUW z=^sn;?v~r^dUURSnXg%TPh6(@U~u03jxzpJMDXE7Pl?ia6(a^Y!eiR?l^!FUMff_7 zgBh5tz+pBC>qnhCGDX|On+tASmkeIh_2)7c~@y*oVAd>(FjSyj2bL{}!) z=HfKpy+_Y@k39JahxTcbHcfVoUG9u}o(Gy==Yy89DgdT1dz%?laj)3HnDJ|qAm6@B z4Ydqe{p2avtj`s|-7zB=+SKjLh!NUFc zr1gf~)9ZxHpI4L_MJ#aai7#IT+75#IU>v{Z^x4ZYQF>pNqjG2OKTcLC7sV-SWMTQQ4!{lA`rW@-J_yl`^4RJ?}@tS zcR-KPW86pSjscTbBxfHo?4)ji?_bq056r`r;=Xa%^j*3&bc>1-Y4q&f-gD7k)M~%H zg{jmt>AJFGQNgn1olGF`AH4x2YZ$c)1Z4xjnScHR{!#Ug;KTmiFp9r%p!vTHqg+*Q z82@<~)#5;Lh;jI381-I0o;8e;6G}q+{4$I>5b4WTJQMuok6{$6R6^<(PQCx@FiI(a zR2};JFbXcm#f2!@`U89P__@~iVbr_zWUcR2uf&rVI{$AEqyDe0djCZ1`aiZ`kPNkZ zd90kZujgeh)uDqOCTqo5DdExjFvQ6h>b`Co*`#fdq{4%(~wzwSvAQNoDT7smVN#EUYk-F0_G#rxAtM1AVmjG5y{bdK-8Ckm1@ z;~3_CNY?e|yGbzMmHh5R$+dT_}Ywc6)-@isurwhI4;jj#J{I z!d~F^y`j;YC)Q2;fetbf5;+KtqX8H`KDaiA3;Kj8^;F_G%(Z19w z^Ze+Q$8X*%^Lh{tH6tb6ILF7mQYsFr@7{@x9Y+;uTT?GV{72>C6D{gCWIpwPK^YqA z`ZFnC(^HoYse9{m-;zCjFZsIjNjbe?1=+LDHLRAZCN6)z#q9hl1C?IJsPVec(Hq_ViY4pYW1jks7DEM+##~kWcKN# zKJnyY&bR_6&m<2v0%Mz{y2oyA$ zx{LivrOSNQt{rw-b;WxMj=VHKxV|m1cH3Ub#IgdMzM|{-oZhm}`FH(3I5;X_NjTL1 za&xKTCP$IPn#`PHRT77+cYQ)_>3@b#HJ^5p)!zZpg?G{;Jhe?{}j z&6}58eYyXVdMAwOZ|(NcrVuxw|H_3^ju3Hen4(>>UI!yB)gYmN45JWnZ)FU1*j;H< z`Q=#giH*^RCe42gqdqQjC*h`dv9L$+<1!Z&PKZzFt-^uqo5QGMwjmFC%!qY4E+52% zt5zk-Si`6th}k)PSI!R@jr;d`?afeZeRc6crJq8;%CqNQzbtC!&QSi~$9b*FLMU9M z@U7S(f06!owrBF&MLycmuNjy%Gb#^@9rMi=cdT>+=B=KpA9@p*SC@=1 zGw%;bH6aa(N$z$!J8wE1=@9kc{YigYqmd%RtGr;&v;I*yw*zSChUfG!?fnhT5vX9F zC@lMeRiZiv(9?t2%R^$dqUBH1vHNXTz-dhU!TAIN4a?csV;XiO#1V`DCx#@_Q6vV$p($|j zfhdRI2_`NG%ngYnpt-<-ize7c9iJz}Gl~x&FRviKqS+$_!%)qlpP?tuHsR;B!5pQa zM41o}+p$jmkdz}Kh1%HoD`4m9kh!J<%(*iPsiG3FlO|ncUGLBVfe^7&U&U0a1uWt@ zKXu?GvEPYWH>~XdKRG5Bz==QBwfCUVGRiM6qMnQjK1E&VjbxY}+))kxLS!M*Mg3@} zUkDsTm{CvbimuQROa{p5OcXWMcTg4$?TczkM6CdJhEtqr^n73AK4Ob%RA4@>CK2_G zPD|{Ij&wdC{|dCC9d&~oUC)ggD}-@RMi=D!8p-J_7YgU5Mt&LMTunJ+)eYK#qsVE; zJR_r)M@cE#e8sQeEp%j#JX-60%$tdz4?~D(UB0d%#I&kT3Ja0mEV@?q9f-aXH^jmo z>BiUb{6dmNvTzBzK&Sq47vosV{i81ZmjlzU+!?LGN}et0zvDQcmE?8aFkm4SDi3oT zpFli~o~a0Q(+v+I{VM9=A5D-T_R|v=6IfmrX2wQ|y4{;yDX4C3FS!)A1Mb+qv@cR~ zC-5bwA8~A2&%UQLJ7`|q*x>oTXc0aRJ7M=(m37j5)bLa76B`f3WE28-lPS&ng=OYC zv4PAvIMoK4FcB8i?6WG^$`P$;{!M=U%&qc?o$0%l548Q(jr~Z6iv_$WRcOG2ki5cG zDSb@3jLH>+`aYudH*b*XRcAj)61gadf)h4n-W^)p&W+HwGAv>RnyV#^CHmW<%yf%L z_5xxAtWr2Gg?Oc`H`(p@Wz)x|E76LeLnwJ33i>N;D# zg9KA54$Jk~Ba}%;a~cgdlQ{IT=C$$K+SJde3kyEH(%RznwVm?O)t=(|Gxb1l{^HZiH+Z_XSXh&9zL$ixMqD+9VKeVSU_;A&Y`_TX4eX73_q56 z1CixxVj@2|H#DBibTa{>bmt1glUS?d1gRqt#-Pnra-fNJsL3$ag~7Cl@-pYD?Klzi zlO%hLKv?5O(Ft?Dlp92XIP@>TIM;_H`~fE89Q7-L^VlLPWk0?dApR~N*n-XN> z`skAGFe^T*53uSDK6FJyT!MZv=19d~vLJAsu zsGhiRb*!8j|83@bO0#EOC2<<2`~@Md*n&KdFWv6l^`-5*^=x2DZ+ZB;^=v`->|d;B z-v_36y-&Y%V};R*WIe_o15?aS0nDcL><=90+@A)f|H^vyYf6*(TT1iK)-y5Vd;dGu zvsXT7k*W6yq~CCypXoH~pE%CXiS*dtaGXD^XPY?A->hermI7X!d(Hom_3S&2v)MZL zH|yDxu~yvpd}OBcKe3+0<{3^{be`+q6!oyIXRN(f?KTt4%1RU#`VXyV7{o1eGt1{m zR~K{SZEl$^u-TK_MsKRd-Lm$!d~vmZF;}VLUqA9vd^&+_GM0WAsV!&BJemY4U=eYxX?xZ~Vp*;{<|OM#Eg9oJVaz2%i( z?h@nf?4PiFRr!~?^zjVeKd4Jxct4MpTu0pX*{+@toT@Fp0-yA8y5?NK;dO6e&P?hc zi#@_5j7WT|j@l2)+S9vwC364UZO7N=VZ6<5;KXJ#KghR%@k-+TBISEX(=Pb?Y3=(R z?J69ZiUWe8nb2nk?j0<%I*6_;zduj3*fE_rFyryy!TY%T5YV+fp{2|BKezjXr($0A z4U)|p)Rv6CdJOq6dRD*tZgCdeX1mCxEfj5^(+TJ$2blAC03_1VJ zXx5GU+DfR53Nga2ZRF$do(cuz9pXM~&+!K52Ca^w8B5VO3(&nyxPx!w0z}88nAU~F zh=7Dc$5|n%{X7X4xAg; z{-gFca9iWhe497V)Y+=@NKM*C>;b$;r3v-j=S)Gwb;66wtdLZE z_NefbrGfWzi}KQ3eu}TQ|LTkAsX$k}&Cz;Vf5A=LGIv|$y?Y+5supxi-|IB&DMBhp0z48TR@0zZUffbyhU$ zbo>`l51A$E(X&N|lGv^LXb;=GZ5OT%tc~mbD(b21in3kI68Tr69(>rtqs-xxB=_|h zH`JrPCflXrYwNQu%8Yf7dPhwJHGx z{Jo2HYl9FtW?iu{IS+s1FD}+wCi@J|FB#^)_)g>aBf9-NBz*l<3#O-x_#49zJyP$^^Mk+8gYS*xZhl?caA&S$EtfmeSf-G6ZQy}a{X|zdSCzP zVr`XCHA%Ze;exgZe4i1wi3sKZzv~0tpXyhv9a^3p+H|o>I5k(>R`?_z+H|qrlSf}* z<}E+obg@#bLsb{Bhkv+OO-uh@apAV5f0`UCE;!Ko{2VI3FjxFeC8kS)zwp?hhrq<@E_UEB+nkpoX6ANW5Zv+Z6Nva)LN%8e*MVa8 z>jMtu4bbR$p9nGeB5x*;ASl2H%X@O**<3%?gC?(}CWD(Rp7XW?UZMr)1( zCwFS{Hcoef^hC3|A&)7G?biDq1m3r~zZ~dKF#B&@tp7g7^`Ah(e~EGZ&5YPd0ckGG z>$rw(+LsgX8Kfo=nm3m*W=POu?p|M@x3CzN5|6D-joJMJxt5|YGB#wYK$45G}i_CC@ge)kpWsb2dF2jLE>mx8}kvLO%?k)Gw zHcJ|C6!YNons6Cn7+*~_Hunm*gfM|6G^c65o3E4SuMT6^BZCZ+AQtlVRK6`4Fk}*z z9*dG9=6hUfSwYDOgTb-^H%#9Ia|aYU#06B2;=8yXXLrp<8q|AcPwGC79a5FvN@dJ* zC^VZCB_;~tn+n0eXQtvv5yR4-knrEClK;fT`q!$YD|QnS{tsQOf2m5IG5=#1>z}I< zOQ)9SS1K=Ey_{|?d`O=m$wNqBu9)Ea$aR?KnmY+Y@31Sf1WFZ~tOEuvE*k#g#$LJ5 z#cxqf8v*(Q5xA%iZ~tgFMf7<|;X3Rwpp`Dm?73X}+QNCONERZX%U#k} zC<7E-7!h858Ur(gH!YcMFZY{^sA%Y@uDnRU%j4z6$z5<7RtvhcUWXJgAy==?!)73dFs>GA-IZ|{By7wFnm>%~|0Fv$~7=e~tQ}%^8#?bEz zcVdF%vn=kFIVz6b(f`cT9K@dGnf#e9b~q;sx0kNLyv_uPAr7M!eSmrpEy4HMDBW%pM){6KSPFD)90`db^~`Ls*f&%{V1J&$HaTBoR42P z(4BsGAGcvapR}cEvqXf3Px#_|-m!Z$U8%Ulw;#9EsaBSp@0#r~do@;+x|ci8#3suj zd9^3sw(Pc(nCwKu=&v!Ze-hpPUX=)5YxC+07c8&---vEkR1Z8pq24y?RuFC?!uKxn zFZ#fLQI&N3MIZPsAR@2Gn|in`is*Moe`Zr3_@}CbGbJIn>F5;S(aN5)jVLetI(~P` zD%L&`9^iQ0#p#fJ@3jupRuW5MhBh@!6aZYM(BI9r`z@ogCxG8%_nzuo?ELDUwUx=G38{!!(U6BayE4~0e}Fj@lv-9L9$7@~fv$B=Dka*H+vEof8N3rM4cdz&H87tD~(oljv%m z_1q_w52=jlm^a33*|C4JdF!qT?1izUiu}aUNTiS8X^%I83eeqo(^*MMwC#uXTt)h(1YjD^8C7e03|u*s!+mntSy)tGX%16L5e_;g!Ept-n>Q>274nEwe!!Lm33J)2Qg8xIx8z=#Ckt& zOI98sM{^oyV1b_>Y ztYGEv(@?aSWgxsLt2SDQ)3qQ~ZL~3TWo7+ZuxWpB;jhic}_bPLbU-`~BTRbFjunMXmStDqBKV;r;mrK@4Q>o3SFjHgDx_3=lq z`rNo&FT)x~=l!9tTR4iIp>=Z(93h7ZF!yQ9`8kF<%{%Zq*+f`a!~&;W^r4R#LDs4t9c=Yj{})p0A%1)f!{KX%^Gx*B^9IYY8sJ^sPN+4_ zE&ARSU4Bm_XT#QZX>7>V4?J0_m`{%5(pt2qaJtf^cb%^|P&R~>;Uc^;X1(P&q}mv9 zh|3SBk#3H;cnPKLA8U%%6y|r#)G_l)H9<;OF(Oz(JBqF0BwadI%(dDr++L%84s{W{ zQ|1l!OB=TdmsZteEnH=iQHRuTn+s=GX|un{X?*0;fTHu8YA@uDU~LnGMvlu7q*?`? zpjdO3{iRWtTX$WO+8d$?;@rsVI^mYd61((+?;hCcm(G$Q{APXq za2Ry?#FrjvyS$W-($hv0m%#)1`MFnpZ>8RwKAKB<45>V7_Ul&64K@R|?SBp2FyHS> zC1P0V5fK%yRJz2|um!23e5rQB_3^N;(J@+`gAH*!*j<{mgh3IsmHhc{%joy`%UpP# zu?O*uF@n-=-37kMLuR21Mm+TscD(FjTKPQ#Q3*Z{*#b5}JqmP*Exh_3v#-rwPVY&W z^RSRw$lc~}VLr4fH2@^+QsQl}r0HG@s|ximdY0L&L%{?NmekM|<8nrL}sz_a% zV^uWd$XBzzSG&TZt1rHFLY+Ez;e<6jx;xtK8CEus;X~+Od>OhnOVV1=*XA(&_VIN` zDBX9m(DN40_Q_qCWA&O}Z8Eq$+uy!7;4SCMqkKf4Zzf zd^c(K!ZxUaUWd>xU2yk6-QAsm3t8LU84bQWYJ%YXn3xcimZ;W+wqk3xlaU^?uRCA= zRhrJlte>;PTBYsjJZHRZs(Vj#vm9DryS9tH(cmm@luin6zHF)i*CgnD^##XqGRNp% zJlbN(4f~m;FofmG*yxsPejJ33)O+lUKz7jkE=vbgVsbrPt7nTz8WWe?u0I#55$8?7 z+^UO@KD^i|u2bqVfunQO=U%pW=LWr6|6w%kQjWMzkI&&2nogG-dy!r(!h~hHzEXZJ z-xhDoMQ#vg%4c`Pd;utm2kLV4bQUxrD}yerN!bdfPuEP-&m4B%Y35g7KB4it z=eqfdisl0NNT$}Df(vyc8W6iFa^vET*cFSj$`}IE9GSTFq6Xf>s z*^`^wV}iOh4sxLU`dp--aAEWI_b<;Z&FrxJp{6Yj z7CR&g4?mqw-tUGBj~;&Zr3)%6eCWU-bPN{zNEE$D@F(`=F7+nbtUM!ay(J8Aj~Xs% zoKlQkDmA0_HQs3x8m>B~=b6Vf2(>TzQExU9W&J{gvn{8_rW5lB-m!ga#Yi=s81nhD zV++L31HsrK;pW{-zb?Jx^`ntUO)k-WsO5&c)if7P=HZ~*PQwkQB^9(0(}MxBYsL(P zRoUCBvMZ7SY7T#59XYy!o(>sep3Ki2Tq08Iy~D{?JV!EjttA@^E}@MR1q+Ut!{A2) zS5=yR@|am-C3g>F&Kz6L!3deS4Hy4RT*7X!e(`CP91s27FZuPu3pLRgKAR)~an8$e zIV^RoR0ScalWSp(6v0ixivLMEAZEv z$8be(Q%u^Uqm>O^s(tp56I1ZmTOZgML%krP`O9#+H9nP}kb=BX2?TPoBpQxAr8qOHI{T zVUNWr_m-K-_*-f>pRBb|Hbbu?8&xA#SJ7*C;-l2!9e)T38h-K6u`T1j#btD6Sgj2gHjdKYK^q>F?HMJTx&iPFmpmzb7_W@^dWp z>q$^;#aEwp*as^A?oaONN^#vKn&F4eq&g%Hq8&pmunxsT78Lt$%07vt+#rOKU(wDP zaD>u`F&TTk=?cTL@6NdZQLWjfYa}PXD;%Y<8TN<^Qagcrg|&^}?`JH~-0Qjz(Nz?- zbEs6s2{q5&6R_L7Rf-so+UbZLv%TDlqOD(oS{*qpBjd$vCw1>Y#=d?>df^bcloNMF zb*W!m3wIk;q<_xopgB@%IblP@kRHxYuk_ zP$OoRkQ<7UA8d4>+3WZbIddhtwS{Q2)4rknr-F!$~$uiL&L$^A!Twv^|dnHyS zQ8no3C1ysv@%m4`?(Oz0d87Q=NX1>796~MEWmVjQy&uMZS$>{)H`F`tiLv!OYiT|3M1dEp%9_{ypAsA-a33v#S%KG|p&YzPH>eL`K>gYLa)I z^?ZtcUu-e<lcST7g1-42srM6=8DE`M%>qBunb!)>opvf-X6Cyg z3}V*Kz5LE?!M-_QI2N2~HL+wDo<0+O{-+cXQ?c%6Cs>3p1ysf#n!J!OZG1I1PJkXN zj^rzwVu?HzON!J)av)fG5I5@|HH%nIkSNAgb1Za_v))SJpE{B*DCb+UX9RQR6wPaE zdl+%JKE2Yt_X8vk#-NfNDZ62lqDjgz|FA-wgi7#RTkwhtLuo1~h(X-`v6^B(tp%DU zYR=-@iOq~l6V@FefgKKx^5GMgf6h5DE^ThT+k(>BsZH@ZA>UPWNn7e1PWI#cRJWpC z=*mEL%emVaL-3o<1OI8-F$IzTIqmp2MJ-LXnf@J2Qp#@DWVq#N?t|S@O51#^BL^?6 z2}#R2)WOhGQ$68(*dLpqoyMO$YI3KSzA(m!G$ypCDGARAqQ6LoP~?n0FsCej9N8M? zwEm^QnA1ZHo~d8kAqsR7H-S%0cN%NB*IoAttL-%Ye5f(aDljwawATB^OUKkME*XoI z-y_wfsFulFF4>AkM<#nuP!2TN%|jcq3Ped^`IJ-3+*j2TdGR~pzoP330vcWk7Y{y` zEcJOAd&fosH7|3hqdV)@m+TwA-#ZSeh5bmptMno?;gsjBeAn==#>x7@r=<>scxwKO zXKHetKcv#X-gxFJB=WH(9R^w#n8VNPg6gBzdx)7GRodKNsw2Jo&2zrAP)!6oT5x>2 zRyJ{W8t%{z*e)HEx3Iv+o35JP;;jhaEBqbvWGb=AxV^Jt03cwgJE*MrfYV@$rW)xw zuXHKy`2{?0R9I|hpyP%>x4?AWGxxf()zbNz>58K*T;lqW9!JqyYGgRsH7^oO_V9v~ zvZ_%-xR1>!6+I7_4$mEm^fJL{v z+A5W0<)IVU>>->z`{CF<=cK-(rP;|oFfbdKWA&r6qn{XqoZgjh;UIqOIQCIXHnHMB zAT~wusnBtULvA_0UovwhG3S2nq7|oTG~`*Y`6MGPBwp;1^jG4E54)6z_20ixZ0FLJ z`fNsf5?%F9YjksbZ>S6&s>Dnr&e%>SO<`>Y(xwbuSJEUM{fZ+(;Fa}p(0+^N)~8uE z9vh~W+$Fcj=BR*caLCZRFgVr*Xzb>kL5uZpEIbSFiI}vg(r@?imzRpMNv>ml>G zX!~{f)*PZ{H!lGL8F;$?Hvi|nK1sdhWw2x}HLutD8$-HJFE*`WCabzuoAKo7mN(f- zV5D}wo9P_k%^7WiE-pK_xAP9kop}HkN&e{nW*8kSHAJ+_3CfqyEPrnP9`(i7OIMhN zr#oX^Y01ueW;|0Yy7IbAO9sKY7x+{2R`4h~rRtyDbO1yFl0dk>cWNe9U0MHkO~j6I zs*eA`O|Q57itoHk_?LeKc4+%h`d|JLQX*JI?=cgm1)W0vP5IK9kTkG179aYnZR*XR zJ}xp<3o(kU`?oY-*e88U^GENisN;{rLblw#S{=Fc^~U|MzqsjD#|f6FCho9Bl&{?z zf2t8<7}yPCoo6dVb*>`FPS3Te4}Jfh=KC^Ow&S5+Z|&cZo&Qgpi1qw@CbXO`r@j}Z z?hk8S(e=DImcE2`IJ7oMbMQ7`2cFj)>r@egt@&`ot&i+!k`^Yl^aVKFl0!SZ-g)TTZ+aY))+na^pP1(R-{fnq5%BV7njmXaIYdD#BxmQUsIcLv*pt#Owf#J9B36i7 z@YX8RGC(QVrtz?QO@}BP36pyD%;WjIHcG(Shdoe?N{~2sOXojc{#k_ z8b2uZXIn+@oOZV%r|o~vkz!CMJI7AiFX4RZ`sEfbrAfT}&G8DFKckU=5h@Q1>~qib zPqA2o{xDL|nf1RwTnkOY63b|9_OLYjb2FN19)OBjM294JQ_jJ895P6}>@l4R)|Ls9 z(-waUe`i%rNGE_C6hReHd`(GhK3v=_G6#E;kgPY0Z{?RDGm;%)8yrW8CxNWA8BK`| zOUV-N_@A0m)s3V^&laeNFPdlJz5E3Hc8s`5>idJ!v=zleGewbX-B%tG3SVx?&u_t< zVfAZ3CQ{|=3O0AEPt&0baC1sYyYYpAgUR$134?dl@ubAFP#;YPe^TPPa>6j3w3IPke=){q!}4H zy~i$QpvnGMF5b9v2KSc2xVbi$y|OjcL@@xiN!bBGmQPSvA4m@;@YL8na^JuDiK`|G{L5HR`=Dd^u6vx1oXH-V5K;IDPdd2`8hgxc{Kz3);gCi~J5 zLyP@cPtuikNKNNE!_wn*V0}3G!BlxKxn0(_SCXOW3fA(Us0BM_>f`5L2%@bB?Tw07 z7naLTPaVos6r1=`$@ebmWP(p92P|W+S4DhfZj@Nf$JfNW?ow?Dz+G>MgTpVZUTtY3 zsf>gN?svd-vtzN6S2caN>~GG}I4a(1!z$~Gz^=4uE^c3Jyl}u`pS|s$;^2GxRWNIC zz2M9(_f@iS1(0X$nqSw|%~hiP^BxEl6{dB)Qa+Eta^f~E{AkDJL)H%dvA86@n6i*v z-6^U0`fy6_FS@Y7kCl_5YW!*LeHCNeHn+FSblM`UJyXebx#Eyz7E5B&pU$HcTlXP< zyjlHP;w#b#iVl|~2t6$;o;&*sOT1oB7*yV1A99@-QB7#YRhAzeQ59P9@x<|bO>euk z0=G*fy$hY33#$Pk&8ROUoj7}vpH`0q_*%)W98>^ggY1<02*-K|8J>3uy`Twm828yr z(a>4uc}wjxPs+x0B&W|1Pqh;AF^dK1!FKkjou!2kXA`9x@TS8B4tG`xylq@j)6GSD zO_C@sFE5)W9uj754)7wfhp^$)|SFxt`x!CB(Iy*zQO3d$O1es=XQ_^30jlQ?P;ZMK0!|M%0^=X%V z8OBgAFt-)AWzHe&iQD8GttZ^tk*_6@nB`43zulGjha2;+myQ24!kFwR$kNPTBg`>% z9oAPUcM9Ja&sRMj&Nm!l#`~;Gs^?7?t8AMve;`vJIj(0S^uxtWkhuss>viP*>P7W? z+Coo3p)nzqtq7~&f9=8@RFX0g&ZN;bTrlcZPiy7X-;}W};L#dGM|iL+@sHN;FSY-A zLo~HmZ(Ea_I%E~6V4>0KGF6-zZ&d&Gy)LVm9h~lwL?dKP<0G*5jQqkC!CajDx8wMyy5($fyCQcdM>`v>A4Vn?6b0+ zOAJ39H5WvXK;FulZZC`#;-PA1#o9y_tOb&@l%0+t<`m2LH-zFubg!e+HrO#TVBEz7Tkh{im}z zL00$6N}VI>&Vc@B7=z?pGouw%z6wFYzvA9WYkT?X&0G-Qzq7*3kNlJF%$v z?MY|fA1&f^Q8Q9n-ptK`FjuAm^Ar{AT3<)pA2Wj<+mUOv^;b$0k(mX( zLlze}Du}Rz(!ocKFEA*X+RybQl@CQkXuD1_t{pjb!X)X|rBmcRx3l4~I|6US{6fl_ zhyU65e;$3|f0JJ8CKR5KCz?5WUL}TZ6Z5uRm1*D6^etqugXqL(wMdGcUfgyWg3b3f zIK}f%a4;PrF_4=(c_~-KIu`pS4k-m~A#3B7k?imaN7x$+k(V9Bm7H+C=u zqgR=CX$eQq@8mmDIs`+PbHJ|+luo+&xri5F!<7RA3g_?i+?kJmY^86G97CV@N2iVvi0P)|h8xk%`Ws_yQ1G)p{y*=W7SWe(Fs zmAu;vHj_n@#H)7m2emU2c`rPC)xNZ`sqT`XK zo_D3_Ft@eOtzvJW9{xWvwk4uRqyeDw4-wW71IB6y&a;>zd<=3^k!5ZGIY!dd+R3fr zuzlQQ>oMhxxh1a2e8Gr%McX7fPs;nkbVqiFgxuyzQZ#b#c2D;DdL+mAFtgyCkdP;c z%2V59Y=mfc%QO7qc2`%p&<#g-XURhj#?)T?sb!zJT8k-{ynLwrsmUYp4`&Z^bHh(f zke$*_6}My7`)ZS{*xAsP&Z? zd`0?CsNU#pwuNYosIXMgtnKYpF|R`Py42|&`_nc>u3E>@6O0mZr>uJ28}!`kzdf}M zhNr8(xq2iOr4)T{o-UN6O8UwP95Xxo1P_f{ZJpv`JrPAy@9Z3|o6i^jw&+a4{F+(b zQ5eO4NpM7E&JFh4vx7VOx0(*&ovfH69awX%dJaS!`F2NfoG^2;MN~`w%>VfJg~V*H z)kyxexR80xslE7YohkdHFA;&+R+xY4lxmPENcLY_&1>Bv^TJ;xo8nrX940v#rL-ZY zfDqV9m)^MVLxH4*YlaLuE|kx}x92KuiwpPNKJ$uUv^gTQ%UG*9L1}MuWg52Fy-;@r zIdji+rao$xa{9W(vuR?4Q&2qvv{}ub30^RcGCu6`qTF{SY{jB9WplZN@TIXjG_08@ z46k-B(EI)&o-g=mC4c{zn@P|q&UK~D>1^dO&W6?(lm2xz)T08^!Oey}k#L_6D65=e z9jQEv_eu9ys-qU?vCVHJ6%8ASTE2DE?o*UG1*4?MZ+ot3*vFN5c5-}5;gxZB!xj_2 zHYYE{chMIe&rhdS#s2(NvbHT-c{z6VaUfb2+wl3r&DkAOvO>Z?hG{iO)HndBiiYXH z-+6EUh8Qtv9otAsNlQFR!QM>1LcKX1tgjK8=A+q~A#HairtGw)TdFoOLMBQoxa*4e zq2}9sliQw|YCH05JNq^J^wRl1^V0^hk0-9d_ngjs5yAL2y^Q>1pcjnmFOt3A;2xKz%IuH|Uq8 zaXGgs+416mqDby!ZZhN_QT@Lh#uUiC|7C}9{D18*KGM1Y>I`1}dE%bK&HszTc%$P1 z;RSGp`0~BMzgyD(9S;3J9maOwUVZ&b2fe!CQT6y>Mmg@Ha)>AQ(ZAr(6FIM-l`EB@F-Iark)yA38zZ}Ni_K)-EkM>1v{>x$fd#@`VS}gr<42s00WJV(C z(BBSY>)Zc-au{Ebw4a!|a>~_4c{vZeClkY-{!rJaR32Md@@8po+d)whQ-OT1ld2IyPAlw&fAaDv1(=rG4QuvXh)F#vX{%6 zj?9AR_+4nrbM@{t*T=HoTvEuZ+0y_SuY8&M%B&;lF$s;^`P4z>)U=q-jd~ z4K%!B|5;J9=k4XA%{{0 zRJoyZE+HM25nEq;H{g6w{VH6;+Q!|RzgFWU=C$La$M?)?Zoc7T#zeJ4AAefh%4=n& z;OOI(PetcNbb4m0k%&%z_2eHBo&G;Sk^hCm7`|D+Yn2n##Qwjg)35S6HVgj&iWK}7 z<(?3=!>-LfxLcAq&1GwU&_S{p(2hK$dyMJCQDQaYqf4lwy{iOxjm1sd}?j8L~nr5hx5<%4rXiBKJWfDQCJM0Doj> zIzs4>Zo1o@4t8J@t9@~Hs!&<5WD{8Z3>Z~efsefHCU%pTLuE~!Fj%-HH(#bDw&;Vm z1)8*k3j~SpJT;|IMBR%8kzG0Huy%S1$!%kX2R*&civ~Tan%8cd7lqX9H@_bMsUSO9Hn}AjfHihU&Ny75ND@-A@cznO1`0Zi z%LDW)(7o48OJuz0z>?ainVZeVTaH-=7BxN<=(?2s5@NCCyc^#Pwrf;@Id+dtm>IQ4 z4TxvFGYNUA63%+1lMv#xuC}6wA1;Mgs?U{$IFo#FCgx`{z!TrM z+D1t&Vp`Mm9ZsE-OK8gv$(0r|jzqX8ba*`IW*QHCcUH7!oEG2nQldtEFJ+_jj;D|Y zh!5Lhht$VJPp;Z^+z+YP{k?OfB{ka4t}SwlPm_3Z&t+)MZC(~_UU)x&v^d% zOum-V?#{*_jX-)N55L)b9`vzj#uQ9(LIdwv ztzu()G?mQQ{mb35rQ2FU$}>;qc|Uz_ES=N^zXo#+Ud{eyG%bXNm?O_kY)_-7;j=Ys zhBKvK9~n1N@53D9x!o$5wmLuE5>5w^*AL*FiX&t$ELV_@3vv#6wPK{l(xRdfQpht^ zEnAFD-Q7HKTcw?bSZ%N?yFTXXw>;H8VVhQHGuh_b(falq`c?8^;OrHuLVknubI+lg z4W}uDLAY`7hq-^*0R^QQg@hyL03ruS{E+`*Tt zY72h`HnvNyYF|y>cKh30tczl(~oN| zC9BO<*I@%6|8BHdUtq|QxfPV0W+i(A0p9VQg4#-7wBNjACtd#6!1wO+$!ht-^*zM? zKW}sFmUCJ4U#b!!HS}VCKJJhG7EOgVcgOy68`!0>Q*A|6o;bgytZ6Em`$w^L{WqKV z%2sAPvg0=%9*VVF%IS|`{r1O3cRC1vui8UdpGhk>et=}ZI{cZnao*Fx1P!W>C#rL9 zsq*i&Norvu)t5;sGnDulcqF7c;*$k@l?B^E6N{$Mf-T~XzJqXh*i~-aHad2lf!Iul z&$$YD#lq6KFd7Y8%7oa_;>c9Uh`_L?A%-KMKP7gD}1kRy3MRWRg1_>w||>AB~-% zVE5r6rYwlNkPE)S!tS+%uHukJTy(S(M##aqG9h1h*e}fVc@E|a2O|Iw+AO#L4?9J| zta2sxQsT!fpn3%GPaa5<6@T<9BwsEqK`+)5ceX86?5-Oo&=O*bL;k^?b!1|%6kuja zP<=d1;14%qV7^kJMgT&A54Pc8rU?)`J(wCH!ITmH#saENfvl6@I0n>}6B}6;HwTYx zw3Hq^pQaKCJw+q0a$!4su|52x%^b`KEqk7mFpo!N&ccLTm>nEFi_aD^#CB9;-PutA za*z!?>>vx!A%IJClLB3H8X|K@(qh(})Yv`oH_|fQNtoY^%%4;dPZ0W3h(n59L));! zzfc*l8i*+Yj#bE=C*|(-2j{Y|=6hp*UWFiW7y%7X<$_VnR82}OnVs=7Eze{LmW7D2 zV!?#7U>h2k#>U#NC&=JfS4{WywTgr|6+mB6vAr#j~)0_rca1;B( zEkaq#)Jh@w3Q?gJX9*pVCUR#9=Wkn6Zo7;|R{y^3^(xAaEy@dE*SQ$rBJ>jt`kiUG zP6ew{p{AUo+rNwKYNZ4;j63sk&btURc!c-o$gn+ma0V1~982)WM2SOuzXG~kh${nQ zd*LowJ;vcdblULom9+4boMOopUX7sSr!OUUpM4LZr6DNe;jK zA^XK$L>ON@9#Z-aEAimIY3|*_oq5F%qV0G929McO`*0nANZ=t(98BM_yQ~LNwPcJt zDew8$`=$&)6~{CO!0RlKs1Wj#Rax7wf$Y`1vk6)s-|EW&&Q@z5co9do!EQI>*`C-c+gTj7?SE(u+$D zRgovEo*O>{J5*gbp%l5lD!yAG&Ze5CRFzU#eX*`O^L=&pYBgiEnr~i{zrW^6bWK5F z&GovPqW3j7S8JF`wYO|)Z||?Y8(mvgSo@%^w)}nVIxw+Q3t4T(oIiuB+o_=vNdr{;6veyl-4z zZ3L9rU|Tly02>y=hA8XP>)5CdY>a@7Q*M&7ZIU_Av>~QRzNksDzDfB*ld7NzuiUI* z+pKkU5yh zC8pJ_sC8?7t0$_-=tC<-xox{`+s*@RyJOmXi`t@;Y7Ts8^AoiBE4K&Qwg(+(KN8a( zQq+FDzCH9q`$<7NRk`DgZAW;K!BUFJx%v$}R>uN>T_9iuTrBx&M=(87h6RW-0ho{p z$m1c$@R&r;PLa40rvOS+um&D1PXGWCr~=oOCy-5h2f&yTkmJ~2Otd)w$WzWhn>M{T zbFMDIsN#eEDy+NWfO$<(Hw@JM^!KYErS`Nc;EqA(k5AY+8VJlntn>711C1IrI$tPv zwt_p8o>|P{usZj8-+AH^VhrfEolA_~31zIj-ew&O?D__S{!Huu>eVsZp0-SbB`($j z0Q@*T=Z=agazr+hYz zR&W47v=hk%%FEFLpmUrF0=R$)00t9)V~nA701i;W7=J*Q1OfnHH_Jf45(DE!NA?h4 zfwu4M&j&^db976zFkkrCV_>UvlW1J!F4$+gZQ@j`Z-G9WJ)4R}vX*EE#XJQ8d2tWcak(hqm zLHD(jcmmMP>6v+e?iC*BKgIIOwj#pP|VABKYK9JEP&C|UK$GESzuij7*7L&*3bff>=G9w zMnJbKckTv&SbV1$XPj=(J9w}^f;!UcX}}HxF#Z@-J3xc>N@5?HU@%t;V4HpDY* zT7Z*qnFsk%k+aOnbe}YnxBlm z`6=%Ew*=L>mcIE3<c>!n84c{xI^%%`#ODKR*|MYz12>rt(k@VRjek>*Q zee}WbrGw+^xLLN>Y<1jHwO{YL|H%73tnT-5^k?ii4c#kKBYEHo! zC>0Fjew_a}p0pVZ5XN@d0b&f;wU-ES0&pRA^rqk>k24tWwY2*9SHg$SqEc&~%X@j@ z$r50N$`}!qsbUDjT?f0|h(?_GaauXjWgajifs5wEz7~CQu>+Y?!D9HOj_p5`Sbzix zGIDSlVr}F{c&mYj(6T^d=vjaS5$^7<60Q$Mu)CCSkBaBVW7gm>=G-I^pfY|lPIOAJ zK6WP1OIUzB(Dkz%y&C|-*C0jy!}18+_)}U4!2xzNO7Lmr83fo`ttPL-7MT-^U&ad! zj$fRmIVnc0&lye!ap8@b5`zG5^3)hHzJd|zOPfg5$(0WV1!L%v_B|W}Fvp%=o zFB=7q4t?2C=5xpE&OT9UsC@79lQW}lzU^TJv}J4We>=Cg(q*E;BXXzu`+R|5t?(+< zs6kYO_i85fR6vXN`hn5sk>RsrEgQxg%&c<^ePR;UMxXDo(zLG$*X-Z+eq`w=r^Go& z=iun_v7!6Bp7fPj-v8DeDKTXy+@z#E-w52SF1j&felW`^QPc2%%=oF8%g2N^kNK7- zW(V$g-}C$U=iJ=LbC3R)g$oh9iMAZwLt}!d|!cX>idN+9V$5qPdleDM^17938Sa&%E;Z-2XLJ{UDf&x8k10< zF&Z;5DxlL-GnC9~*{(Am{RGrqPR( zZmfN8;L~Qb%%c%+VzwI5j zZ{{;$|6LkY=IPnVq^!fQgKPKM+stQHw(t2dWAZD@U!2xnZJxH|vOlc((A#S-5|j>E zz37aEDL%QeW2>c`o`Q5yO{>S-t3Oh+rjBO73OH95W{$TSPJGHPDO8ujf0LZMxOx41 zVQ}VjD#9dYSJlR|&ja8&$V$n3-zfy&&TTd9aD7hlEhfFHrcr;Fs~e3+=1Lt8dRY{zg*bQ!3I5Uimw@Hv z`gU=IGx)1_I{C{4Hu0{b>Zkh|__BRKlbh0}KM;s#%no2%q&@V7ON=~p$C0rz9OeQi z8x56~ulwv+5X#DW`%Oc@xk|)RG?IR3<_AR<6a*g_P#eXUfJg30%CiIF*4YqLkBq|B z>680+9p`Ej{IS1IlOdCAN5#t1jg-g9-VP{kxQN2uWcNCSOczcl>z;jetXp}}QEI=u6PYiWLB!p` z&dy!2O4eZ0FlIN?5y^yliew~rqUg5`Oo7Yx?kOkbBfDmd&RXVBw+#76l1HM^g>xQZA6QjF0I9+;Z=@BOpDq#R?7x*Q+eEliL2{u7a3(zqBB2HD!%}x zfLqKF5+l{GDK@n$jZ(YcR_5Ou2Hq+~9(N8y1>Sko73^$y2D!C00cDx#8kB)(eCsk; zcI3G>&A?=U*g93_6GB6oP3*2#ZPA!xwQQ|(hKJQVRL^KUmRlF^^vEPXz(1ceG&V&9 z7zE-*xrS&hANiEKXK$2FzB%jTvokIx5mS4k1ONCME|dcbEi1cRb1~y>6mZVSukEfg z%ou$la0hJ^pQzFbf=48Q;Z)~)@LM(qv+4&U71KNLw~s$Iv}D7mTUm`(@&>7kVjLkC zsZS+++mXhnt(g|M%I^-0axG$ug5dJ*jD+YD{!&LL8`NMZ<}dpr!#F7^ix zTK)q2^OC9_A3yi$iV3TfK$9M3`rsK3MY3axkwyMuK5PI(IDA3}9{^TKfmVOL?;1r8+l@{ zaPRcR6vtbFilq4!&EpbLiLkMG`nmT(gW^1)8R|n;WLv6jV_tn^gE#n(IId*IkA>Kz z$oL5gX_?k@)kP4VtZcHe?rxZbTa$o9gHQmu^JRqEXu>rLUx^FOaR%`>uFm&n!93Hh zQ}&g=8{MxZDb8<_a`Fel8Fp9p)2{d?Ni%6?v&LDzaJT(;oYPSnrY7+Su`%gPboI66 z=n`pyc{@85=~nK&`t-hZDvS?lN)wZtQahia5bu^$_@x%m0vk)Gip?@(E@hmj0^*Bc z<3hOG65NEL>Zy!Gl-Vq2F$7||GHix{I zZz80MF|<@=GL@a~1xll=3#x?2{1d*OreSU+%HqYPaF$x!f_+NFhyuW9nVg#E&?GMg z!dVic$)Uc0$=1YxDR}L5wm63*{xpFWRfWXCB3NXD+f}WKNm0+(m;{gp4rS~AEFXWz z%Ai>@sFGr%rWAsVPphU<(-}%oEJ6*%M!N3@Vbc<{@n9QP8eS&T3`egakbe!p;Iuoe zCQy?v6ds9;XF^aoQ~)3J@i_Xle?lYRQ&WNlx*s1aaU+{OPJ~*MV@n?p6xWgo7*LQsdoPLV_GEWKeK+ z`Y9QdFleWXLZrhsHsJ0ADsYJ}Yfqw?1fZUW%VeHIA$672tTZ%DaL-Jq;fIq@ z@gy-c6(z-?RaAg7kYcin@CXuMxJuWQfs1oYpq#x{W1!93-F@{5K_Zg z$OFLRI=8DFnx3V5F{nu!b-j}WHm-9bLqVN&s9dD;cI#%(3q%b>+c63)CcNr!2=KBQ zQo7)<64fT=?|!866*3aO-9)>yx(rEphEEhn7?>pU;9ik;IvI3U4;ao%(B*;Qk>qx6 zs@^bQ%Q2~8fw2WN%}@z+8A6=xT%~1X5}5!?b51;ijAzg_Z#O5CzCE_jv@?z z#c+{S!24MutzyfEG8*Q=19O~sO_^sS>s3G@Ozx7z_%dABAI>>($qQ&oMbqd?c6Bf2 zuAsSeXHf^mu*c6JZ|WKEd(yA;R4)1=wnu~jrIlbUdEhiB#q^-3=F23lHaPr5d(`0T zd?pQZyfuji-=-qYmGPQfPDRf$e{kWEWk6VUk}f_`v#4~Bhp|4%$t2b}(00h_;L!fp zwAPCQeLHDaPqbzn2V9Q<1X(zoTT4#9ztwJFMr+%kft=G+U3@^hV5`Bag;Zhzuo+)c z5@KA7a;7GyYezP(Dh)iI(v1_{gUhL>A+JTXYNf`Qwow`;y60}1CvvXpJR{>=wB4H+ zF#WO)Z*Nj`Dac?ETnPaM?M*Yv6#KH*vnUfRwDngpn54@>pRpss3hZs0Z0XYM0bN$Y zVIJI)O}jKnkNQb-yd;r$*-8w0$DErb%TU`?PWe{36^`?zwKL>|} zRHV#E{AdB$FcV2x{qLFH8JS>t`JG3nM|636tETB}hD=mX<)8E}EizpHhQ3c=0!_s? zuFUc3FiJ}1aa`yCx=c}tW}<@|u1HVUhS1+l<}?QFZE}Ok94x-J-)Gk!`29!wb7K=4 z^USg(ijx`b`Io)f+YmB4-`&yL8F<6<^l-xEZoSepxFSN^whZt|NOeHpoj1NJ#(x-s zDsTosQve7z4b{Tar6~{Mlq&X}P!zhNub^Vdl?OjOki1)xmTES&SM`~DBp#x~rWI}+ zx~(D}jsx92H~Jtw)j@2#!Rr(;KIo=4B!d5OOL}Hs>{#Y?I6JIqU*^Fn;DdC*ND^r* z2f8=+IXL{nE9S^PTOQc904AR%hNEp>c&NQEd3zWMoU!1?ll5JgfuouG>)PJeTptOD zO%=Z^^Efjde`hC7<^$})-lUhskxW{}>EZ0&ClBb@m)hPX!`)On^M-s04Z@xVr#%6Y zJ3mXL%d)s+G~2vjPt|Q`^f~sXfp=Zyo^|{^n+saOld0!$&Ck&&eXhKGS#c6B1&!-p z%gTJ>(W$fA|B;b_W{vz=cqjbG7|dR}R=+JN>dwcKv`JFe)}n#5^J}SE^&?`Or~1?K zG9PY{C)I?x61alBTfSGH@})(6(5FWydM(4*ZLfK`qaJLH_*n>CemCjv9##1u*O%HB zD$ikLhqHwJTh0akDxqIJyZ5wzs@;Z6L&kf**yij)_|}4(hQ7}63m(Zln)$_#61gvT z3Lq=fa7o(0gB}e@oOUz?r51X4*JAJ(etQNDw4$&(LqhD%El=odMW_689#T7=^s)ZN z_Or);cXpgy<({OSK=`IjO^9d6X}21;#Hc!=SuELv-9kd%{WE~T;#;A$WqEey5MNjX zh34#+>Jk@pOy_7XI$h|YRmuas+j9C|m=Q8A@00X=EhEz6e8jtJlkn-@_l|GX%tyx$ zrhw$DwUsEQO{9cB3gY^`m08z1(*34RpvSTyZ`8Hl#`A~HM+a-1OV79WIeV=1`Oj>5 zMg3f)$IpWSQ_Q5W`sU~(pHZ^!6n>@Nx-`)+EiZP*B_eK*Oy-_8=w;s!PtBXTr|!~X z&z*?3+xkj6HJD4c{@9{Nf`@TvS0WP(!hx0GRuJxDXfPNB5R}sFAK#?;_`^-y95(Yh zXN-ZGS}0T=8d05;eA`_Em+DafU`fceEpn#@PoP-^oR~@qA=Od>xlNy}<;FrX>BsFs zpQFGl=`VWfpu+ITIp50Ao(5>r%vV_){IvYF4LboZHcVfh)=CzOS>Et@{c6jR)EEzh zJS*%pJM0~E;8Du3%=F9sPzb;dHBOE5!)9o&Cg$h`_0#y zky3qGAj3;^X#_liPY>ASY_N78iwP?ffaDQKTgsj5#!bN#+S>P1l^074BU2Usz=B3- zee0nzZw~QIXdWEH0nMSN@}q|@pL)Ilzf*MSI4R};t+447gKyRr9{esDvP< z?$ma8L?YM1ngZ%t>&H$EMU**&99)T@Ma_j@R=owN@fv@DBP`v7C1(|N6s$W`YWN@= z2Nuern-o6u$w?MNu*;TqWRX%~t$WOU46r<%^M4|0LfTGUrlS#kablN)nRN6p&A>O! zfchL6O1cz#C&Ld;E!9_IJXEs;*&gm5T&BfHFKq9sk)BOF;+G=LN;l`zkuM&Ni#RKijwy@MI@vo&N2g>W~OR8jA%Qhn(ndA zPu^EShZaQ%LfCudGr)C;6ulzWq`zWg{BA|JMcsrvVtj^I&r(%pdkT85R*K+v&LK6N zYEbLqZ>$b^=_w!A6Kx9Y{$47?a|qj>pPmWR}Vi+s$Mafx^to4Bsx3Dydhd&&aXUc+7KmO zxb!OWix^5->dVN-q)p=Y63*PZ?ssueyyvvXDZ}ICCx@=qAKJR|^)aV~lQ__HysPGm z(wiUfN8uAm!<=UiA8%E}e@U#DHoci4=5>ITb7=4c$5PKD;naa}V&eD5ZnP*C{nlvm zJ09zO$LgJ6^3nLZ^Met4UY!~^^|oEyGa^@en`6S`y(;gc`Bqwyu>9GS-cMAaoyqfJ)h*giWetYhX?4rW1HC;Eac=|J8KhMn{IN}$m+w3HLeCC|s7mtR|PHr+? zHdre2E4bDv;&k%v`4{BO$9V@*39t9k4EWr0hm4nuZ8jZjSm`J`d{{`$JKet7>V3}s z-LUag%He?*YQi=z4^gSN1D)e?sDwk_Rn_;@Yp&g^&W{Q$Hp&T!d>%zCNnN*o4DF^A z44mzsr-@c=*#Sxjd$vAtrLexocId#J>dl+#pFN8bt}lDdyYIH;yXxR0%-8#z8*kYe z9cg4dsjf*>=$O71JC^+N?X|(-`*UaC+f`qFaDM)yJY#6=N6`HrJ>8m@)>Q1TJzN>f zyp0d4%ugUHHiRAGpWhk&D;6@1lZ_kj}rd z685^iu0C)_XT$|7V?$BwLF!qqNwk{084i6auuw#An=A2PiaF6s1z?w7S!oXlv8l;; zu}4OFLkBOO(W}`q^qhh9=D1=7E31ZHh#5C#=)I5p_~29x=j*yY^Vn|_aYb>`F$QC2 zVi%wYDQvb=y^pG=1ipU_Xo0zjXbr~V+qN0b6sLb4$d1chelt~C^U&zywfm-n-Z#2U zf7I-07w$b*f8w)aObqAbh0o1=j&~={#6A7YdaUutcp-Y_z42motIYZfXhoFqx1niK z%wL-$uK^6;1}^`n$hT1a)jza@GPXDWCLR_Vy^$cZ|5w#TKeIdEB!O4fZo{Exb6%=w z0iFM%g2;8~+1);DcNn}^(8Ko+t{dAIE(sknizj%Fsb)zFiTIiO=!C446Sy%lmKRT>mKg*|k$UC)(?_sG5jly8 zsrVn7uPe@}V(O<$e`KHUD+tHtsa&yH(#FVvM;Qed=w_nGe~tQZPi8 zQ;HqeZ-u<|e`lP_H$ME8kAP(BB-oOKA!sg{Ye5Ka8WwM7+cz*5P98O}z(pw-p^6 zyCPk<(o5m$h!M}@2v5n5@sty2XtTN9eml%%F}rfa=P zAcWacYoy9*%uJ?yzT~5IqQ5ENTr13kw5fO4zV6`X!CL6hSC2TnN@d9PNvYMsN71Zt zG>4#JnHXY!Vw0b}K=!BFvBoU@g5AX&L^EEKg?W2!NxqbzxkE;sO3plYpXYIhS)%rv zM8D=MuaiFFiKY0Mw&$05hT@K-Ur6++Zm8uHd?1|b>sh^duE5A3KF0gP`$Q!(xNToG z`)f9Odr^bJ^_OP)^QSQtN6*uYfDahWHFTc}6zcpAa_oD47AfwQLY+)N+Mc4Zw~ng49gfBTQ2 zzW)mHJ@D*v`28QhBHznBcm7gc{DOQpwutwz{tT5qdXy~CHl}*P`lf6=`mCZ-!9qG5 z-xWX&yVf=#&XK%p{z4ZWb)@Oc2nH?-bf6=e%w(`=Bp%j`jA=67v_@%{5ADe+X-$9+ zv+k}pG0}Ev*h_E6QVe)8d@=RW*pD9~%#BdEBeM6gTc+WN0%q1Rkft2l<`sV5(W{Zm z;{sBY^Br4b&K~;Il>BW0uBuK($Chb$j0G@s(j30V(%!OStSKW-reCw=<>uMsX0azI z)sBjt(xt@w@hcPI#w$bXiahCNN4miKBrWSr7yxY;>rfsh!YfU+ltbLr8DkVIbM=>r8YE`lb^3|}IHlp4d?R34%MS}Ft?_&stB zfcQfiIZytq#pq$OpVGXgV3UpPP9b7>2Mw58PCoQW_JO@C&54kT7%gnRQ#c+ zm-*>zvH&jw>PF%*CIOwQU@!N04Hhg_4WHh4YHG%H(#n!JqeN3Oo(pp8s*>;F`|eBQG1wkJBz~;VcWhBw3S65(aNpALwq_NJkrga!>*VvCu)=K7H4H zXCoaA!WT~2b%P)UcI1!lXU*M-T|o#*+TXqTdpg>3!D!vHe@I6=xwG}UZ}-NP<0B~i z$jA2^SB^#zOw01FpI44OR;C79FT2Nei$C1y@Y7>csu7*};6B%MzCVbA=9# zH#|0{yWfudX*&Nn();KUhXcUWm}IC}05cAuQpt?x*PCG`V64EmQb*PP>)qNc>o?$RmIi6{}G&7z>fNh?jT8afuWZS4z zO`NmWo1HjMwAwtG^K42Tj~$XeC4%=2Ne?BgIQ_Csn%-@^IXZNO!>VAd=ieg&|g3$>dtFpQnvjav1N+Vj*>g{ z9y*~7d$Kb)X;m^$QrR@AGM6hlE?DQ5?nq! zFIi5IUt?K2_^iH&VE>>)9kfGqGj)WVJ-g9Im9Ey8$u1_bfQGoDCIG8rT%XleY7XWNu9tGJp z=|0e%+(5VzXwv=Q@BrV!klD@+lM4TT^EX?6x?QHEXc;ECjmdKO;7(mAPNy%RbH!L@}w6GIq2IdWp zlbwsuQf^{Q(OC(?p7hqJmADME+E!E`P)pl?lOpVb6`&ktAoExdsJ*y6zK<;GKnI;z z9v6bqnuXnPamT2U{n)Bo-s{aMo<6g3OKKdxtvNBGD-06BvR1>Mp400S9Z(xOF6999 zTmJS+-p{=`a1)4kzf9@5jdBiN)j{0i^50&Mo7vP=`s18f%^P73@XB&AfderB0#c(^9M8dc48vcj2R))5u>AO@<{uUO`741 zrP~v0@B^pT_MLtTiPm{|vTxgqm(_d|hCShjrxL{=Jn*y6bro%c{c<4t0z~?v3El) z*{%{ZQ%U!3bJJHRBk$0<9&qc6qqawXxA%HJ9rVBZC_{1Q(KUC{y~jV(SMT1M?)``Q z;@=>B_5YRnqAc(5hpNi*>JQbm4Ie(#&^tKlA8YGhAO3i)@eh0N#~a)U?i{^MENHH- zOQmM+=2N>A23}QHPi{YWNg5-5Z)x%_K(sosUawGX7W|;Y@?M_=zdiwdZ2fw5K5X)9#d4TyR%M^P z#|bxmYh%0Kt+A_V5_Y$Ff}ZYL`HJqVRoI2nwG$R~==+D3=KqEI;<`TGW}SA_LqNN@ z4+eeayQ8{H+OOAasp7ouvcIN%9Y?#cp^ndc zIF=*Pnz1%nB65QPXhHnF6JOS^EG#c1!|utC+#@Uyw^CO>*WiiIKHcCvT<6F*nk+2O zdExO31q6jPr{7lUZp1E-Oq5uJ%h?c&Vki()pt}`nC7VJdwelrUI>p-Oll3XSm|?)nn9WRb4`HDP zmn{^+22ujaSwi{1hz=Xq8p-d&mk!A3&5(^_Xz^KVq!igM+77;>WTD>iPUR*#MGEC3 zSqDt~SS}XO85~WULnH}^(V<(Sm_ns2bGaZ1-mQaivW64Ru&NofiwhaySOCZtbtb^D zoAU9%6+IOoLA8qkm{`IQX|rVgWekK%cFBZN(6J)FMM<;B3k3iqmSO?H(ZR47ZhJ+I zk@4Cx9#1a@$!Io%(pjU5r72L1r8lH`qLcUNV@@JZ1Sp#OeH6GH5)TSc6vMl76YddU zG!}$JkS!L6iE(OOB=Ejk+9jO`Fe?x%pvs>Ft{+7!^y!EpmXqyyltW-ujB`j-Gp~~b!Ct@5VrqU|TDSl%(}dwMfcD)mU4uy0(#h`2zJk)M zPOK#p9j;5RawEi>tz-|Ln;d!2!IYp~K|A2IZDJ^vhN%P64Fi0+ z96|iBJXJryWQ)R%uu`^{R%En(g+NT~%OK{>Xx>34a^_pSmkGx4`}{aGpJ1-07MHk_ z0YuL!f#qHhhhO^;q;0Sk`@~t0MbkKb`b&cF5gJGX2YR4wViq3vhJ1=r3fea@v3EVr zWAf(r#8hg{f|!$|)Us>d?zt)hG3Py3499u5sk4~j2!>g2}|Z}lZmIL{#Zy6 zCjpSr7BX!2j(CBP86eCw4xx(OyuFaWZRXi{wX*V|tQAngh(f+Bhekj@cd;p>9OUw6Sr#|1T;j^IMGfqUW~J`8dGRatqo00Ir!lu zO9F6jK_0Bx5nERLxSP7g$X6SX2Krv0BzTNi3@Gq=>1rUsS~HmTQkX2#$VXTZ!%3l3 zxPS+{0Hz~(V8Ak%(Bwo&(bL6}c-~h<$5KJdQ*~Ux3qaC#wCrCIMDbE{kA9;wXe^+79zO@`FB9|t@3 zH>Ei?joeht?b2KmfsZuabJ7YrB!C6wReUfoCKiZHw=2q>)fXMS1YT;=QU4_q?KUU~ z1m4&OM8LRdVQb>Q%x3?SKUo53;0Ngbf2GF3i2r>L1-eq>UFQSwZV>4@OuH7p4G9)r z4)Qmdno9qbj;^wiNcM<(p5i1 z;w8Q7%9Zw?@Vb2MrHS>qJ$MOjYh zb_W=~wEJJ`p@bdxAxf(WuROyS)^Ch$`&AsCS-jW87#aMKK$}gSeX-Ct|CJDy@g>oh z+GE`-@dR(Le`Xv>y!abt`|d;6uD#lK>eIJ@e_f3e+1eR=bg${%T`Ounoy1=%(P`76Q#gM%z`j&!psPdgp0??7 z!If8Yf}Ox?ZH12!zJu>KF}$1-`4P=hYd1-cOfh|O(t?Gnek`=5sU;y}Ed~Fy6P@sO zUY^?IXQp0d+J~GtBmg=fyUq4tbcBvV(aTcxJ&&Kb-TRtLKYLa4kZt$A;jej((YepX z`hnCwriSOT6imnmQp(8Gt`g}2C)-lp`KP4sN1KP}5zGB}+pYB1%V{T zi)~p~)yt{{M!E7|x@}lfoUEHD0^c_ODt`4WOLm9ukOs+JWK5iS`*2;~( z1L3FRdK;$}XkqKtJS=9`!_W0M;$(wB#L+DM0BkPi$g~5zr6-b#6*P23cp+OH5?+71 z9{H}u6!HU2am&lOyiL{A_M+piqhgy*Oa=#{)2Tfim|g?acFVy-HTz<3He4z_a_E5m zc697u95i>WLr481$aYz=lwwk$N6&sUu={mm*1KApEGP0Y#>qQ5CNtrD7*uR<9K1;P zNZ@AjP{dn7>6f3-csKIU*wL_a3g0^I;U!)RE#%U*S6>Jo)gvzN^W~K!EWO*GIDw{5 zmHQqcXa?Vlw=*p`ubr!;YVrZ?5V`#ad}nW~v5HJJ&}y;z>9FK0l(zS+sXBJJ(1u_t8G?o>xSvew$- z$$fMbyuPDV>DrY1Q9xs4utiBv6`%KCQlt=G~Kua^ z?sFAQNz6DMZEb3OKfO2gCA#Z66l|yi^D)Z*=)$jdoBG3kIAn$W|&1b8m2;ZNtP6sviao% zaiTX@f14n@zY&@&yk)jp%U5uXBLPLKGn4M9j7cx3ExC^nUmQ8L|Jq7@0t`n;4mfd< zS#?&2za<-HWO?zK^?}5$UF)wRF0b|-wz`KmH~Sn>Qu1b)`2kM|Np*@an^@>G>}^)R zGPZ9Np7afl<`3bn&sa!HF`I>C=!h);W{^F1k1v?Sj9`$VZa_2ga~d4NWFoN)z@DGd0zRSLn0Cvt; zg?K8~y%~1lou7yriARc?Gs0$S2^0_#co-MtPxao1gwQ@|x-gd{0Jgs7qvxs-%x^rH zXtmH@v=W)YD^1wWQRQX5j?g93(<>Q5!Gt(+Is~dqX@QoIp2_>Mq0+b%ysi@<#2{4a zgn-=`r)RIaLI8*(01;+IeC>?>t^`q{@+goX?d`E_I9Zi`+Ksf!l63~lT}Xg~5i`Is zqPqkWWJ87`Xi;kzp6@9j>q+;RaIh#9X1ow7L4~d6!m?hS84-^$8-Qc!pb)^#k_mDH zAXq~9$^<%r#tp83ssahk1O99}*rNkINCZIeQ^w55lj7D2$|o%e;8h+tFEw6*20D=( zua1DPk>PTLM0*K&HWp&w>T%HB^;BiTj}%alKhQ$+!UGVPp1{!xFH{K9VGx+)Y(rlW z=NXtr&k;^DrqDPH4;v?tNlA^7;3ZQ~HOcU6$&ptofng?Ew-1)Fjr%`wkpQ+kAi(qW zw%odYgywZ&If|EnT)hpx(a0ZfL*X2Ba2^2e&Y*2rUSmwaeA4S;1#q^&)4?E>O#>B$ zWK?$|g>h&W4kYY<+JK1;A%j+x`SE~{k21FohZu9H=(epFpjIcFmQ5TB`}JB zke8zn)HF{zNb5GpKsjsF2KL?Hbc&wm9MN5m8MTv@#yt|kfSWWe@*4t7I|OtyoopDC zaO`%yZ3h5#{RPiSgfZHOP<2lShGPzEWhs)HZBb0FH5L{+^J5rREX)MO;M}!o z)bJK2Fh|Ji4dC(a@Sg(qvI$Vl?@&(~P?*o-y#zI8qKn9Y6Nbl=4kkD9OE920A?H6? zZA{^-rqSFf{1OShMoZin&~r#Y%w7K@4NjX4Y@aP>E779RD=_~qMS*D(45%UgR$Qx$lxGh8LB9ZUhozIbJ zazq-Ow`6?TrTclV*9ye&F?y5^;8{@O2c9(mp6u?iw=V$FfWnd?;RN`}RF7OJnAZw? zhEDR~4dy0wtpq@nD+L^QuR8Xf@T=m}JDAK(-68S#;23E~Zu4JTf~2#c)pssjigdh~ z2vuUiVrif|qnGyDDE_-Eo~ARkW@yzeY|8{0^~aI?Z!=l0mN=A}D&WD0dhpQ{^S|gXYR< zuRSG^Cw;Pb81f zyj7GU1j}t-Ud;OKis4icHaXS6^^OluLtbI605eJ#lXor=x_w_|+Wz{K#xra(Dt$SV z_q89Ecp7fx#$$xuR&b7usgm1P%wvQ&C4J9^9KJ*2PW_JvnLHS2HD7_@ZO`)B4KJll zQ%P+_8l-32gf@@hTDfY zin5?)zkzfaw(YqHUgjM)9B}f`tu+9AFbc_wMUDY*9&BVsB@$pkYLE}d(&0OCZXJ6n zUWwO_u^>kRPXqKb#G%7WM34RC(+e#49u2o)8c^^W&cnK$dDqF39L@@PID7x$UL%kv z164=8#G!+17^q^}!yEc7+jm6rk`n4jkX{O$8+J4j2_D9x92seWSnoLiE}rUNiOC)A zg8{UJd;%!78#(b9l~01iZ%^h8sZ}nj<-<{mF_CjPgaD3rII#U(VtWFuUAM2jr5rdW z!;7MyiNI#eQ4zBet>bpoQD*a82){6~JdEtYWj{nXG=Z?Ipq_8&+`+b>2Ecf(buQ?% zG|O$r-ozqIq|t05gyfQg$<;4P4`iMlCPQJ=H0W7WIPu}(aQ@6$v}ii%cr-tjkh?|) zjeh|7=Rb~q1%IC;w{07*G{fOZW$Kwc*xg`ADbckbmt;eST9S78g~SJvp}b4n^X!Tv zBIJ_bT-?MHfOvQ96=nsxwm+34L4-U&34`!=#_0V3H}?se71{XQ3KFTd8%J98 zd%|;@kkU2AQ|z^lEj!>6B=3{7jk;#{^v#3v;l6_%T5eKuS=@a~2sYiX}^glyVq7J;9`IYF`7KKE^zq z0xwr$ro*DZ?V>0pCg}EF5H7H+<$12R1p4x3+xLx5VJ@!QzN6k-4lf-HdaE>?>*D@} z=w7+VG&XjQEyDS(DKzmvn=p?+o42 z4d8oSmERTd?D7b=CR}#7J~h7m{ceOAgtr4-NPtC(SxZcfVPnRfJu$m4Pu$uzX^=8G z?^n6d7wC}Nnq2MgnoE4KWzy^Ney__!m_O}Y1AS7mTI_RJ?@$-<9_hAM1lub^~XUNX3 zp5pe^*Lm_~Vz$X})4+6bDS^c4t5;`d;+#CS1GTjTFSErPVpKIAj&2G4s0f-9+B9cx zHs^D0@}?=~S9qxe5Cmy~u5;UUoZrL!|6c7VbyghxPCs=F+HtP8ug%?;sCK2GU>`)><3=JXlt!+mSmgv#&8jp5-+!_aIXEu|BBjCOvRG?M!@VRyhJ-N!T+)~e&7?NILvp~ zPk2d9P!~Q~IvV;AQ4cqp6>(`zY5BypdL!y6C7}>8KW6&B6TI|iTR_4!hnDEy$IuXEtQh^(P_rSoPi>@Vac)Kx*D8J=nAoDaq zZM(MyM463FE$^CG7RfeCZbvnjcAKsFcWGaa?5iT18+smY#A3I?(T8cWKb(CnZV5lGK5*X!^pke`y9Jw{om#oS z7Hl>;wHpgIf6{K;1)G2AG}=o3C+#-VU$a5G{rJw}c4{3T{d6k)J?+*wf`;*SIsQ(& z?Hq-8d+@P;+jZ1UA`yD@*R%Ma^vD1IpZ<%3gnyGCYU7lUK>u(`hD1eD{yZg)f1HwE zUaNkVVJglsBKld_rq~fu^^ySAn7grt7O*P={7mxim<`v|5O(CXw^qrX? zh|(WT6os8CjyEA@7bi06@Y<0ZzTEph_Jk`zA|D}-#yr0g)<%5E6Nn8*~{XK1YI zj|tY#IHR-l)C(Y@PY`!|@ynRU7vl^O+9~>WIzwgW?<1dEpbaN*ZBq4>&b_8PJyR!- zZr)wjA$KFEzo+03Y?@DZwkfjroPEb;naffCZ6@@W*Xr-gM)*HxHuOIJH<-|PD@!gD zy4~G$>F4s<(#J24CT~MojBTSodUc@sB1vWoD#65mcwzheW=zOa9J6ep~Xb05Wx zH}}pQHP^p3BG3s0tzwHk>ne5QlXQD-XW&m#m@<~v8?xOnoW zWW%t6R({?OEJssJo)`~u>rCHBG8a!)Aj`fc#)~OuT5;Q#*Ox~Ge#MiKzWf|xci%&E zZ!5<#cl^YYdG}hNv}Nj)4y^@yx149YZs5t_$bgfXLkmXAsr&P5PxL|Z{N?U-%J@w& zeEHLRswPdd74&BCo2N*Dx!Q{yLN|VOrr#E=-}uD0K)YMt@nfu7S)dia%hTeT zqAH`QUU`+pJ^uVR7H?S>a+Y-AsP`b1XHs<{G{fwuNa$lRELgk(#@TIV3>~!@kCOM@5Ua)1PF}=;BHIiN(k^IqB2R zy&AkM`}(&Zy@-jm@z%2(K*zjMCxUfUN%<+p9Vrqt z`Wdf0qY;|3_w~XeX{C*0F?-Y3$fy3#W^SIQHo#3d6X*K6mS`6ZjSL&~(y5)&TDm7U*(+{sb2MUEmA z*Q9=ke2L%M>BTlv{HEmoAba4u@B`197kf&h8{k_EG-Z?}1gHI9r9jz=Li-}xC+&fz z@9P#Zg(n6_m*{AHnc~O=bC4n%&WNS?9Nr}z59K+Rk@t)c{~cokX%%gV9USH1ZmHf<^v@|Nfrn-QsNp3H%7_J`5G~yK|A0=W9p7GhTEkva!Te2?A z26{$EXOo4hI!$zh64!VKunX~9Uw!JeN%xpg-1=Bm$1qJWu4uyJD+sOGNItJcRVx)Llb=B{zO@R1=E4jN$U1t*TJ96SmU9y-Ck3Q(zcX* z^^C`rrtLJ|rbOk`Lc=HFb8*hmW#tB^7q7+UK=QytD*CZ^(E+u^@PgIX2FE0OWxC8H zS4mSw-{U!1(e=W+tugT_FuT5ZsYM!MSEYFpH(XyAEVbe{RlS5HkX~EJPO_}@^tgEP z-y}GzPRDXAWWNoj*dcYC(s*I=%fwkzl3jn{@M5{avnq#EQT@5RmRGa{s);FfFH5Ap zRod*T-ktyCWx3I}Dq=>pONHI5>VF3b&eq|jhQ?=~f?{ZI-mf<3F0Wn-N!j^!OzL}M z*VUk%=eysQK=ltj`$Q4tk58NlRr&?$NCN4CyaA=Zihd?O1}3cTv7szZL@C-8+0>P^ zr$Zx2Z@cCZKa2iaGir~1wwkk5(ednecwxwi$|6baPD<>@fDdIuzNCkj2+k^1ow%KS zr9yQ|Z7KDet@9m9)n684%3pF3RI9VztU-L&ux^s1f;xmP3I8!S>oJ)@NS*?6ViufB;>Rg$Zg1j178j*ken-dOfA95)xR!2K! z&gnX~-$pSijZG>KqYGia7m%8nInd z@hJ8b#mC-o3iW;4?p>j)jL(J=ul&bl1ief{Pgj;KPoLg4@XZ9e_m4k4%L=I!%Dte+*+MhpzY89?lBMQ!Pok?8buS0!>*-;LDX|pd_;dbNe==;O^cS)3O0pif>8!*K<**CZ{0tMGq zy$;`neRw-$ykmzM&P(JjsgA#`Gg0Mzwae1ddiplj(EY@k53(~V+O8re-m-W4wP?RO z^8+Jy>+Pb|o45soP^&1Vxqz~Zyu-D}jr{pHeQ?na~;-G~T?l%u;*8b(NWmoO0N25E58Eg&Ew zvft->e)q3??m5pNPn~=3`*|s=DoKc2IRc9be*^xvNMJy6YT^eEp#WMy1>JoXdMY+{ zIs`jA11E&&F(CRejW8GcD{gi%KAxutK3!pis0g2$2&0Us$P+PEq?nkY7@@lafJh1; zBPFaLEe)5UdL|>K@El2x1gObA|Df;~sVJ7A0OmkOJ@s$8@x!a!YRS6x9` zgDq0?k*AiNfDV{gmoP_H(m+pLTAxNuUr5gYX=I?!VnXoLl>DWcnwj~Fcyk3SON$#z zBXKK2Wh)jHD}g|(mj>2P-&?cW+L}1oTI1|d@(yGu2TcPCR>jE~YLn_AV|i z-Y!nYZqE|jh}7Jf`#kL3-k`j^Jk`BnGCl-8K5n*el|6lJa(!PLzT@8495pE@qkJud=yva&j_r6N+=Q)AE9I^U_xH=_m>x#T3RC78ceN z<@Xi6D*lx1S0P(kQ4m}i5K-llTBTQ2RaIS8)LdOTUQ^^-#~)p%gQ?@mu76)ppZKxS zy|%HSsj;fANv)}=v8$=XV{?Y!v&Ve@mL50L2wW6VCIm5Qa!vO=sy)`3o>7y^l z#>N`Q(?=)zswRWFCad}<8%w9XYNkWmr?b1KF_Y87v$M0=b1y#4xy{UtT+a>W&KnoZ zTQAPfVHQlA7Gm=jjpvt!7nY{SmRkx|%okV2R#sO2tj?CK*;K5#*RKVwuYbd>ui(Bf z^x!fFasA(LU)FI;>px~TH#dK5ecRkxTG<&~+Zo;8#o>2XzU{Z|?63SdnAkn|F?E=E zc(}24)PHnzv~@hYf4u(lZ2#x4t%Hlji;JU+i;In`{+p|lKflke{+u2Bo&NLpsUgDv$X{Q~7MEPLW!!T64v>A_N`z znA&N@LM`e`43m0G)l#G5Y=!x#AXe-H$I( z6n`H`8sutuC$XvLX|y+Nk7fwFtdF%f?oMLlW0^HOn)YYQ^eZjKJDLv{>TE{y{@t8S zePs-kDjDx={rMdm3T4siYCGE;O6C2@H2GS4mG-SbzSifC%frP+m+upwJAda+aqr;A z#;Uvi{MwtXw4Chz{P*|i=4iflPxrsS*S`+GPxkcu7fKKp4oyVkhzo{r4q4huhJ){E zC5_waSW_E1{s^bFFZ&T;qsgToCN4=kt2yynUY+Cp>e)aHU(4P`EMkCbGfrgQaWh_G zr))Dp`f6`85eeenN>ZS4+Dca8EZ<7e5Zm8M)mlGO=4kTx^+=1Alc>OEa#fI+Tpou8 zlG%QA+R1b*Dc{L*ZQ0++_88#a&GDLd+RgRdDc{WtxZ2;%M}v6wFkv*#dj(O({PQ_tO*VVycEf&OERY#?f6igz5HdZY*(sS0gJ%bAokpqGGiNJzYic{1wwj0y6 zT=;=7@uSl(l*)W({nRgA&%V++RGkelzCAh{gh%q753#4ao)2@CR-KRVwH}?1A_n;c z3CS^-P!1yP19)GOjBpsqycF)VS?HnL#k9)f>Wdi-@#BkG9cBK@xks~ceUy_bCBp9P zO1_aKj1bMsMcZ_@t0l+M>Z@hfRwhwGdW;9Smw2?l`BTd4FJTbtPW6@>@woMrr0zzSmc)@Kl8~6Ghm`Bj{E+#UlX}2)l{bry4OFSX}siS$S zW#SVH5;CeJt#=?647v|Y=k-i*y6teH^QZ9lyuT-(Qxk5^$6m%^fl^a-??7@GH1-<{ zSFI3CYA){&Sl;$|{kXg|p!<5fwD#^sUK=3Kamq)2e3&pRvb168|J-XFfyMmALt`_( z-Tr>QMoei9}Sc zQI41^lAqd@05l?x1hGSaptdPLg(B3h2!e=$6%CR?;lOxR-##$?S(Ju9GwC_y^MUE& zpsUersdab=B#E4u$N)~t6`{y~kPvHpn~zUY8JJS^fI!NS=Tg7XFk7q%L?s7gg(7AY zf&=L(t^1jE2wcvRgUQPPzVggHikn5?y__(saF*+>OR4KFB{h!}d3jmReaIJAlSI(B zx!`6LN|i(klf0M)FA|DIzBc?tAg+MrqVvVe`>W}s-ESgsqoeR&4ZAXbYj`g&{KkEGX&$eMG#*JR6)t;0_e1b;0TRV4O{ zM3dfZoOEwA2ULXw5+Q|4yvEQ_?^6QDKFVCpI&QWX*8q|kbgm}AVTaR0=Wts;`N`%bL7t<&sVBeT5(jjetYE?+s*ZH6aF(+-luRO8P~}E^z~4+k)_ML2g7!g&Jm!v&nFF0 zoG0D5d&pmOv+TrnE*y7ccvE0I{ASIoRQRRvlV`>a+w&Xkzh91d+x%U-_Xf=@uh|QR zd3M8^-jd)m2EZ%Xj^_ibzlcaYWWYJ`p|H!drXxVTXimuaNe(xQ>MI5Ev+oOjZOuQ% zUzh(AUj-_8rt}1&+BvTuuHtb`ov&VEw?vb+@o)b8JX!Q)^10o~6#g;A!*!Z#=>au_ zbWDwA%V$&6hdg810Mj9eFci5>AN8fqin}jzl$myo9_a3|J&#~jy#YSE*e_?cv zwN_|CL5ORvsydl$>*mm_yEOzw`LMTM3rHLByQ^-hlb&^)R4adAO5Jmtqs^<%4T9b3 zId7M!kWRd%>(edxjg%@Pcb|ymj?Ol*YfoAGW@hjxEv@wy7pQ(tktx0Uk>c~d zgZAI$mF@yJjepyMmUZ*9nzQ-8!uKTLEerl; zaq_M_)$-l9U)>*3PyXN)W$;iFY58j^$F`e=$-Apg{J+!0p4(k*{LKpf?s~H4?)N1A z?(cu^09rJm1e(YQ4e~`pGSH-LXxJk9_^lvJf`A1R(1#0Dp7;OpI4BqFzq3G0jsn1G zLs%t3*o{IseM7i1LU`Ij_!dJ1u0s&Cp+XX&B1WNNzM&Esp;B$3(u<+bu0xTuVR8~- z3XqT;p|HtK;z^z0&Ry@Uhxa3122CPC)u?lpgtbJ3tx<%% zZ-iq;gmYVj>tclab%Y0Pq^CrrmrTw9 zi!n9VF?F=D4HB_UMzJlvv27W#9c{5qL(uLaXfv9$0S-;}AeKx5@+HNMw8f2F13ls3 zBmhYPnsn|uwtF#V0ZnR$fyFL{X>CQZ-3y#ihV~#q1!!nP9kkN}dbF5ua-DEWn|Lmf zcwv-y<(qh&k$BUVc)OT*cby2JOCpp^A~H?_y-R{*CXu!$!IqN9|0I!+K1PjU?^NouKdDH%G&#vM1>-cOcWEk_X=?3h8cS(ff6{d5KIln)FfjgL`0j&I<_DAZ z4`xdrEdG2z?SH5}B~6}!L;}FHBq`4A9~m-%9+V`JI?1&jA7&_FZ{VpS!+ukx!Pu8U zlEcsr9axBDMwoF%#Jh~B%#4`!jJTzYgg+TcbeSoVnQ6wEAKztWWM*cyXXY$r=KaaU z&}9`$W)&M}6{^A-5ztZ`i9HEewvG@^ncX0n-Ox@b3jjw>LGo}UrAb+}=*;O;(zlYS zW4{BnA@?_E-6NF~b|4vFJz!IGxigZvbH=&z?{XJ2bC=t5SC?|v|K#H6@;302d0WPL zJMZ%LGV>1F^NyDCPX6Sb(&e8^<{zrUuAK7A-;q`>W!0)?-_d2aL}z!D=a)-De>>%W zSA|U@pkK=U#v_9h_xv`&{y|GQy&kaPqyqYm0>aIeIctVSLz)c~7zDP)_NW`Q_%&$lyt4OM&NZOmV4)9}i%dE`nJs^^xcP*lFR_*?u{9~N_bYK+E&!9l{%{x9kYp>G zWOI0AtEger{V<#d#borKRP0lU*a9E;eKO7}2{S2+@GFbTDvRkTi^DIMB~%nXA1SG5 z&%PxoRqO!hCXu}1!Kh|sJy(NqRFvu*#83PVi0e(?dRS%@Q;@h^QFBvKM_<_>RoP@x zDRNW(N3~quxx6X5e8Ynx>)@kzMRC4bVPO_@R=MI;bNYSy=*ZHFQ0K}Ssp>hC>UqEF z#jNUro1*rStj>e{Za<3OXo@VgQlMIu22bH&Ruu(l(2uB!7dPqC71b9ewO4+%*IBhU z^hIq_;@D25h# z=X`6^7JHXU=E_RIU&1dqiU|Nj>nX+asiy6bavzf_=7YMC12-|=0&|xFOaGRT&N56) z(YQz3IHE14vn_12`p%@B*Mrbao$?}y_&KWXHyo_z-wNw2@LLH8=xh#pT5mDhR-#@O z358)jV3ALYWN)&2bzr4WJDZ>-PSA#Ae?l}4_UROuen_!_pt#1g??^TK9<~dP!i=CD zCp;bR)jH6ooim4@`ln#kQ=~I+vPNu2^h)73D6AT<1M5XWwkn~)`(+iTZA=v%0$JUy zq_BghpU<-kTTRO!Cnb|jH4za&o}($QDVx8Jwm*5=GThmf@vuXO_T_utvf!sZgh%>o#8|;)XXRsj641?*l#jBDPlr&&pG5hn;YD_VdVnn?E{7EM z^@0BoCZN#E6)cJ*?!=O3o{}V>z%{3X+&5omb%0)IXwxdR4NlCA1UFM=SgH@k=>V1!aRe`LBr`fNhByLJ57HXf!tN>nkq2lN-uXmr@C7Bn8cx#pQIbH z${u>vK`03zqY(QN-#D8+7(%^66&Z-fL~b=4(vc!V102z_e3@CIE0gk|Y5O zit~U8

    571NX6s_L)-q_F`?4lxN7v zgSb_d#OoN6hwb2|CmX^7y^^41I#B8TR0B$25*oIfU4X9wNW-CBM+H7PZ5q}6-=Irk zw~NCbeGJZXq!@rAB}o~8#A}N1$r@xq8ai7;~tjLn_K_VJjtkxHfCH}6Y7UT$V8 zR6Jf*(geJmf_AMgSNqRg(XCF`jawtgn%3rJ{J-()5UQd;e7K2g00fPr_(C~MpfUg3 z7<)9*KZuW>Jgu0Q9GxFoo9soyIx)mPND_Y7;2>s@Gv>1cfH<8C3O_D(paiDia_Da+ zi70_+nTfn?m0oNQr`$Fcp|k+?dG}6dMIw06>|M0RB2? z2WpvAXR#Im1q;j(KO5wStxRK}t=Q35Ik0L3lxn?>$Q?HhCsSTq5wFI)9^05Un}fRJ zzL^q#3m_9WUw<~b`gjTui=(hGUn9CV36P%ucsuZNW$oc&e@;MSUe|!e`s9W`)CWl% ziJ6P?ht*6Cu#o;Jmjv^B5c^S*M#PWz>%ihMgr6yK|Hh~BOgP*s%v%RIa76e5??JlG zI4G(E%maL*MeoIs=e}~=qtyZW;7CN#L?OeQL^y)7Dd<%-v=>1ZTDO#pAY;FSn0Sn- zVD`e*$37+ys7yg($zky)Bpq-vfH^o6wV(0q8*@dU_0()r5UDjK;AIk2n)|S(X6*aQ zUc3(RfHcHsYL*m7z&}M;iyOD;Zq%9>vRWf@KLl|l5uYH3mvXmPH5)C+*HO&1(|?ag z0%2d_WT865J|2)7?B{VnuKPs6JOkM*0xX6h6vvXJpdjU+dxtzgPD#)>rkOi3oH*G8 z&-e^8a-11?06iFENg!ak+uy0gc-Jj0X^kslXSj`FqDX)i}Zi;zyt?;gdsAkBTIrO&rFfdq99^u=;x$mYKlEUEYvP>m~njO4FFn3 zNy73BG^Dvfngr_9fjS|_ZP2h89LdBOFcBFqb2`JEl)$b9E(}_{A|E&^yK#a;YbgFU zOhU3{K#4lgMEJ4}V2+)cq~QrDdyTM_f<*9ipB2wMlg@Ob%MbnZ{z5Ma+BpRorGQRQ z{41}%d3g#+M8vfMhzWG=`mX;3B-T5=Csg4j*91@+@BE?q9jcIUUlzycytO}9Zrm89 z87JmK-e}enB;!9YUL=)tun`}(JGrHr#{HtkR#nJC#hbbP^_qe<_Ep5loVhS5b};nfXvRa85$&RKmTH z+$BdMODuAVa7dM_5iBO=`n9rO+C#{|*SVaPypv__9;}!y%sN^tM0~*&-z^phj|2Y{ zT2V}QN+|+-Fp+*;^RW+28k>QSu}IaYkpi9;q??9hf-+~lrtM(c5I1tMJvrXO^jj|y zrRk*`xv};c*3aRQe?6nUX3TwpiD@*_JMHvk$~qMdoGKtV(u1ppX=UqJ(W^x!vK)vk zF`XA>9~FJuRv&DwSD7)4+Ir|w6no50TC6p9eNm#ETC$?ow6`ETZGQQFNlv$dkA>f1 z^KXj+N~U5EPZKVf^ywmE<{takWr57V2?Tqs5m__=gnP{RD+M9|%RBcEOQeD5gFWS= z*RCl0(R>K0qsr8Lbf%XjtP#=FDS}*6&r+g31x#r|Ndz{J(f7_oRcuGAx0PQd?@$XW zb)R(H)5&zxnIfE$+rgt^bg45xx5V00!VmzF8SCPD3gMz;L%;o!x2kkA756SLV`jF# zzIE?7;8q!ZmK`IKRBBQ7j81ZzX>H(Hd)~v>HFO4N^>*Y3@=?vAEzG zpxb1ccb}4f7r;)$Ijcy9DQkNl$yAcl>&Zr`j?iFtqrIm{ns%bFzwG905`6NpvW9r;_#stZOvA`@(Ket61;zSYxz-b~=;FzB)3 zzo8@rv)2h#=w-k+USCx`=t9xnp$Cxty&>#o8}~g`RwLnZutC`gg>Ay&c7eBFQmdfs z$9@mvw_0T3Al)besR$8JFOdIwlu#SRO2yrqUO#5dQiqM@!_u^v&k-`(8hz!VZ{*#0 zpsm5A&n%cS9d~kyyVrnBC`w3e0I0DcWRf%=K#?P|FXyYet5L+Nb(AE=I8Z=NF*X0z$MrtZ8mi1nPYK%+a+?Opx5E$% z(Ly`XYW%x#mijTEakMN<0NejS`UjP23M(H`F!Ue$Chb>W#b_%pyN0LLq#KsU#5rCi^jq{;=<7vMjTlyHiKx(GA7bnH1{V$W; zFREQ~85Fe^LUm{>|8j}O2G;dsi&Zi%ta5ZeaGoIZHD4m?`z8d zsIxO@RBtu)96#QEXX6xxJev)lprCmBJFhhlJ#$VqBz}q5l|n*!32q=FbBZ3PPHIW*?Se0&X8(1h1`2F^_49K z73Go>alfjlpVv$ZVuboa5#g)ITliy$8ObV)5?0GV;DU+RQBJjj{kBIsTcDChWJCZt zr>sqa_9J2hsRZFliD10S-ZEu#U4pzgJ&b>qcHF5K=*I)T8@)c{~e_G`OWntT&xfS zX>5AP_-yNA|1oYp2ul3TeMPS11P31Q9EQ)l+PzSzIy{UE{c960?zOY{;(O!z?_Ztbcu52`b(~UhFW3nY z_Gq3OrpvTJ88`cvHV5PK_yWC*S)P@S_Utz8SNmH}Xd~!V@Xs{t`R$9?foqwaK=Y|1 zDTj;?0&Plw$7UY6ZGRm~@azA)XS$pNW%0qU-_oB6Ki_JkPN&CK>_oR+e zfY?)YDtsp~Wb9d8d6&jP^zCLha7%`QD0gb~lQ})eF9VE35zxScsf=XiO(6@RGKst- z%lZ#jRvxYDH>LbVGOgA)O2i(yz&11DfSqKkTznnEVM1I)26asTxHJ z;Cl8}($s7!Wrb`lUbFy;R~Sl11LEa_aX!{97T%^NTzXDDL}{+6S!c>r=}Y zKZ+IB$P+7;m*kVy7Yoz4_KN64EKoHqLMxZD zdmr3uS74KTte~J%{7HflD{_RC+KOGlV4XMXL9Q{2l3!l-m+FUoF=PcXWI{q^^M&hz zS@i*Ja>PqRNbear>!#$Fx6yT{(Z+F1-cm|r8cKFy1NIsz#{F{j*e{M#ed$6O4jPp_ zX#+a2!OpW9Q)datBZB+B{SV?4C?$d!RR|r*`sq3pu16F!U8Cs(I29Ue6{(cpQ!84- zRBUYrY!Pzijy=G?iO|lvJd{cSYREf_+5jol(pZRRbR8)4Q2A6w%Hb3PRae?}8cJAA zsKCU$mmap(?FSbKtJo^j1SmUDf@srZ=(>~x7)i#L23HT2(~eZh{1hGdVx(QaObz!O zZ9YTXDCC=|l^h8G|J0ays-_17v(thyoBC;+RL6RUS7uci4Ms{}>hRoqu=~TD{}^`iq${Vjpp`` z_PA;c251Z^^kZGs`{G9Za@6}pNWR{V3>|4q-fB#_HVx;DRPc=@6b+A6jRuJDPQWx5 z0yG!n#^Lmut$doJ>`im48ro@^>qna3Z#D5Y1>?OTx2Uh_?E%}x9o((gUU}`6j(IKtY zfqFL)X;PMwhP(|^Kk(A|Q>>kfWw_6;OC_UAt*QIKT$jd8_hFzeZM-gBt}cDGF2iSC z#&KPyHC^UoU6y~ka5B9|OnR&zrlY19$d0qgMaMy3$HzppQMTm)S$aIxdc2?Y_{R14 z*YpIA^`89GLy+kUGU*HP>kG^1i)iZOMa}ia-1Nl*^(ErpdM~V1MyU z&1hyNu68wUN(&b1HWeAL2v*QEP&7ACax+j4G*F2*P|Y<^t2R*oY@ji2pt)wCb!?#h z&p?Olg)Y+zJ^mN^GA|4?U%W7XVd(ZvMFwmn6ZslP>}xwC3!HoWVD2pm(6bF3hy$a@ z46T?9t@#aYWDIRJ4eiVg?cEF=0u3GG4V`igovRI9J{!7@8@jC-x*r?9`e*1t_VP8; zOHcj6s;)enIV!^K&&sS#eKkw}w)*AU$b_Q#V!ZiM zuK9Ac`O0VWflKhWfBOC~1AM04xTT}%DwD+qzs07E#g^v!)Hm}!^EFOQ(|&W)Y)Xsm zYKz0q7DwY2$7}1kmtc2Ci}kN#X8+8$i!DxMEH5-IFU>8lzMB3dwro0HI}41iDK`G< zW_deq`ESkg?)ZC7;Pcrgbak z6DyWGD>%9JBW7z>fejXNPUuG~jvy0?1WTGc)Wf_b+0WlOYBq{RNxb01vL2zLNZ^w@ zYXrHCAhV5-fQ|4o8@$Lf8zCbbF?SpBARCDU8_7HysT!N77C>oAq8CUoeT(&Tv1Lv% zvpsL~eKHH4I~#>N>TZSnU?99kZ)Hy>{ahs{M7Vk5NWIZC?x$J#p z>}|E|?JVr=-R&KM>>cfQyp6yU5ke)*-*?+gY0H=f_n6a`G|$@o#GdyG?1co% zq9Z-K*HAP>UI6gBJO{rT2mfw|fC-1db%&r62ai3F%K%UWXY(dv`;~k&okS$k7FlArQN*b|A?j3k=M1}wRGh z>~tdLgN}5#fTnvVRUa2{CvIdD$0qA;4Te8qS75!x(1t6pvo8QZ8H|7f?*Y&dOvDXM zI1GX&fB>jaPUV+i5dd2B)a*SjjG&IFOY8X99>ik|&Pi~3phM8=e)N_L2!Ig7-XC^M z5U)hMy0UvkLbE$6h8t6QjlTrTqMh?aNnRld0EY~T!LRf0Ki3{o(GT> z?7{xbHOKCp18h;V0?-p;M@Kat4*&o@Bk*y_eg<>!y$lC@mIc^{_|1aS zn?;LX3kk<>F+f7f*GnJ6mLtwUhG!lEPCfQ34+r1ec?0dD9RJdU5CG5uD%S582?3bb z`QgEUy|7djA^?i;JsK>8BhsTJ1;M@(82&4AH7fM0_ zm^T(b_dSP*Wlo)uFHthud(ZOMpa1l^4F}`g9o9Yp08<3w@IB6l5&s2U+T*S9RRjSx zn0*2bi~{@8qJa{Cdt4sczavG5e%|?ns+Xb(PylK!52D{-{38^=T>{Nfw}kpnS^9on zDfTLJ;8Fm8?pAkc0R{_+{~FW+0x)O+aL6m<6%iawNeLiC69uA)04iuoIGSjPI1uA= z@8LUF2M}Tp?5T}=MKS$bzz-h|7U6QuyNu)nprz60|K)*7!s{LcJlhfufB>OaksP!D zcQhD;KpUNctq=g(Q?LojBNZMB0C?O(0IW_UwLJU-kdXw7gzqy5fv0xHlh-CaSGfXU z(<%4Fh07%H&8sAb2iKPw7Lhs-G((0x(4C;t5sSkAZh~e)!Vw#=b}ZHQa4|7bFTmKU%!0!Qa&Fg8UGG zM|Ia62ml=z%>fBTU_)Jk8^b}P9{{M6U=pkolZ3~2PQ5C}WL60~ zyVW0uduMDzNvz5#d`{==BWXM@8(jC#ImR-CUfyid>^!Wt?8%ea+|g&!@jiiH{2P8cv$Vh7~?okby zQtWcfvX(R;A;i2J)F1}%{fEa&GwFVfnps-^Ktf4c$WNfwLe?hum#N*`5yGA!DE~-4 zvQb+fZ5U?>F8> zV?oNNh$LBE&OeP|1aHC>*i91wt8?;Aj{u@j}NV=j}Tru4P6eVT2Wt}uYC5YDP* zHfg(OtX2?jXyW~_`pYngG@Nq96|oFTAm!=$wfXdBj{v~*?*d2yuKB$wyR3Jd?>EOC@(gs!7npNAx*r+@Od2S%7&;!b zKh6&v*h+*K0-_S1+XCtoQAf!_XcEAUjXD6-^=O2ifDsx0`~X1-AdiL-py}_L*g^<` zV-RTZglT{^!1Q_(1Sn@zBn7;>E{clak5MvlvoI8~i64FE9|2w68dd5-@;(*icJ@Bp3mvVe@u< z0FVNl>lrHqPq0DYat)LR1$)nF5h1SGMgUwzTdcLaCPY=#l3D)A3PS8~z>MlaYHTZ^ z4kfANy$`RO2#wHh&4M<-8*X8@)UJDeHjodQ*DrI)H) zE+)Q@KmFdi;yS>e_SxI?{ab_EWy$aBlm7UBo#+M$y&X*U`@6l&e6KWR^nbU`UoWzX z_93TseE+WlyTOxRhuhZ_{y)PwlRY7cVm7>RfMt3QZ8h-zAt4J9TUqH|Z#;v0Mc(CJ z#OJQpKP~Uy!XZEP9||?u1Rf=mFf-b6-f~Pj*qFk-qd4U0tZm;5hgro5>47}L>LQtT-p>G>w9RzHGSgL}*=<=4B)ob}a@nY9H+l?iHb_L7@)&jQ#(V5t)7w|;x9 zZ?Atw+}%D#0(qu_?^C}e;)@i5>w;7r!O7y-WU zS4dI^3^0&WhFeX)V&+3b)2GLucA2{#QLaokm56=j)_PMhn|p3hI)YLXV85rO;<_0} ze$1u!!%kygGtKWo^PZmdpNV3<2PimeIDEAZB;?UtY4MN8=nKz$eM)ncJwGok7KsNT zxGJmN19{Chc^2AdnrnP=T}&IDMjAH_Yy;pB3Heu%6sh6Ugss!@KgA|1!Q* zv0AUVV|wYlrM3GX{|ilZUAv8W_log6B~$3?woq&P#HEc}vgGOpr()|!pny+f<=U~H za7UFF&+BrRHDk}#t{s^tewS=!r+N>$Hx*s|HhI7QnQ7%X)GYN)9PK@vZ|%AJR~zv2 zaOAc@xG2(`6-`xzBL=?2YI7izUS^w}3$^vpbbDAcF|KcPKkTKwLqy4&sgvPb7utwF zo(BKm-gwa1_7%tWI{M$V)q~Eq0kQ7-7oTG%2;La=RDD7ugz{}Y;{6~!cqcTDV zG17i*i`MVjpy`nq5Ux3kFJ<`Qo+jW=C-9R6n$1qRD>0&9bN->R zYg^#Go~lT`cv2|83O?jMPl2%Om-}jj zj~59gF*s;S2r~ruz~?`kUNSyhy&I5OrSDI&e8wW-F604o@egJdtNM4L?6}~EHyFl= zA4G&!hgO*L50>}t^2H^9v>8Z97!m}t7TD_h?_Undz|ugzkO}aa?pWFb28G&2PP zC#=7htm1}^Kr`9+0u&2rFz*DGY!v`oMe{Gp32|=1!T`jOU?JN_JP`nH5!QzKFVvo! z_13x%=n#=&=8Lxpg`Bsu{4HcUf-HIXe_L;MCaD)@d< z@3WQyVO%gZihvnWD8(7_fUB4OXj9C$kff}rKE9Bot*H3*7JEwQgE`yc_E2^mtfTK2 z?Ykl*=O;Z7i)=!EFjMiLz+&MsB&e;IL=X7bcAH&8&d`%kpd*yQ7E1=_02F><_9_;m zH|1SN-?v3VG_1uXLhtJU$W;i4R{=Ei+vdv~u#87CgxgX)8)ONE#(8m)*X%maHrN%A z|EUoI0vxtVoZF%tx5+F=$B#%@3rM2Sbbff?Y!|EJNs;(s2qXi*z{#NOSIp5M&x+c* zmm&v&1lRl^AjSntcoGt~5lC@`K3FVyx+q743nnWm;&Kl~o!DB%^jUojCaJ=SErw9q zqN!OqjnzuLEgUvf3HTB~oc7k9355VwdoDd7yAqmu152i{rH}&Twng7(4@sxr_aNMP zIDw|D+h^+_Fk$7C!~#X(Yy$Ip+ETkfrSf8}iYd)tOXY3 zAW`UYdu(Y+@KHgbS09k911MPsbkho{^a-WBW|K-HAdVuGZVMGsAz;RSF)k#Nj*?5f zD@S*fKFEoCI)o8gMUxn?Ng^Dm`fS_^<>Q4)NxCci^eS3|Do2cYnCWfbx|cNs&fUi% z--)q>N`YZV`_yXqWu+_*;v`x6jbH{1!vAtBvJnhl{N$Em1sTIKLqY_#LY{ia5%I~g z;(B2^=)@Rg!$dJxS_OkjA2V%{o78{s!v@y|z=63SbDg)l<#Agc--ty{o7@Y$Id@}p zrf|RO7&B&Uz*XGQwlR~#v5%E5jEx&23(o>DuW~=$+Qd#+b8X0$#bBAS0JP_$7-j}Z zk3O;hXCg@T=jo!@iDGtJEWO5~zzFAZAS-Bzw7nYQ?!CHE1PM)vb%uR6@yQj;Y|_e)h^bmw3X4a?sYFOyk{kfs^%> zT><^Qkg%(JtHWP^l(^fT(A1tOt)6u2{S4~=$z1#E*^^_AT6X?gWsxTr7Ei7$@-9a1 zUHXJw+CRCD__^*@d(ZpHvw+&`lG>X>ra$&ce+qxz4AkCgng6xWM%Y*UB>#DP@&r$A z{*QV3UxdY7f;*n3j-YWB@2mruWFcUw13s@Cac3H@Uw_hFdnbngIUvr}5HtH0lnJ~g zq=;V@b)fV*lHaC;lhcF_EF}4L(4N!wPp9nF2)VvGvhQ^r!yY+C9>C2y3jAr)eqGL# zhXEnJ{vOL&Gl`(^_76M1$M>HLQj2HbZv;~)*FUhVuh)ES_1b|d+l*5RP8(TInmyRa88E@TJwkkg4SbVA{0j~I--QHr8w7p|J^9`61TTaDHzFv61s^sFvIq-3ZWMYV zEG*tA{9IT>xlx2ln665;oKB@S+S1PRj3=^R`iNjSqERAISTem)GGAD#v{9;7_-Sk7 z(;mFA^kAd(q_E6Fqs({VXSQ@0snv?8w>GKwh-eHp zX-tY}E;MO=7tz{n()uZ){kusUFQNl()}av9eb}taBC5A5qO$u@J>Wcj|X>rUKb9T7z#9QlB+TzmFLREcr(ITc2Mpv^Y=DyqF z{!{GL@0M5imJ*KTLboe0gAYWRs<@usVD#ba81uPfRD7V2UrB`*4|9|?P{%(Nx2)W$y%vEIE-0RK6@ z!nfi0B_hPzk{(O?+?U)7Y)g4AIU>}4TQ8CJf9$3Db`uhnfW` zVAiDkKhabQua0wOsi~)3Gz3kUQh6J|eG!Br$;1uM2U2-w3X{=TWK*hc=UH-H**xQV zHs=b`jbcubZ*w=T-V_Qr6f-MhJts~rckx*P#-JLtl{q&Ft#pR?n-c-4{WL_;p0o?E{ zY2WGXqPU%dQB7-H$UeDjYpi=|jC(O<3RA2KAS|}hR{E$*=P;uV&|bG9jCyr=W_?@$ zOzcDvqv49|t{cxyVmT5T?^M6vLzfVO85Q7{^P+0(8GmTSx-@Bg{7ccR8zOXy6jmNn7J|bnWTyooIzjVX8c0<^ z?{M=`&C_txLbFs;;n-1eEeiQ~w6jv}WG=K_8KyMeyhXjdsYKIETtKpCL%OY-!yy!~ zM+)uenVzH=utx9WB=&sY^a85$t+g4t@om$1c%%n=gxyo7HbcT%L9HY8xOacXo6X1X zHVM=1IPCdM0wDsYcFys8aVMOB>o@vkV-2=$Sd01LU|{BcncB}VLQaR$Af22x3^#`Cq#n1<46WLI~$Pd5ep7CIhN&vz$FtQL}U-xP4sOn5y1 z6e-rZV1rHP3$Id()rJ|4e&^^kK0&+87qHin(wqm81+RW@TC1-T*85#n4Ro4xro`5f z*)V4pN@Oyl@WBdY$13S7oJxtsUupf-i`k9CC5I1XNeSOCBE*7kK=k!AG?n2KKFeyS zZPe;UC*l=?9Svm{x4n9s`LT%14CXiPd4ef0FMuB%)vFc_jrBwrL6az?@ zw3w)@W4Af}sSE~U2w^~K$VefYrw^Mx?Wp z@P06Xc`Nz=k^O4a0Ew+CTH(p~%y~(9D>9lm)qrhrt6{HD#TPP9lZl*q7;kuDJC4$l z?vU205XKG&p8-OL&c^5-k!`)u;ipM!A8+wZ>wzRfk-^f2ZO_d-atK7eEvlR5 z%nAV^(F7G!y5bwH5OmPQbB)fjxiqAF0dVzX1B9PwCw(w^C?KFU|0dzKc#bUsb7iT6)H5D9*ZgS=9>)VC%dumhS={NwwVhr ze)sw0uc6o7Ds?zY$YU_59gWV}MDAk<3u+)FFYSz{v~fTsho%oEW~#RquG9_r(I);Y zIPQnC4~wO5m#!N6AZ_`6%HHaZv{O-$(O-JFSykWh_0Xj4#=Z2E^X4;$McvS#)yL0D z{O{cQ6-M^gtNW3JBHMPLV15@V~kcj;$>TXEri5 z{77tUJ%!WMG0W;t9oSUZx6fpyD6ko>x6UUF&Cl0JELwCk z7LtG4?-%O;-*a3>A$Q!(?y%bc+y>hZXx)Z?mY!Bk@Ovl7A$9WtZ8at?#GIa2Ueuj7 z?gkXy58l9}UXWOug_!|qq2>WDW!}SgeDQ5vQK7DK*P2qZqHHlzB0wPo5~=5fjtiAZ zZ)mJ71aV~{Oo9ThX<_XUwcpFOjvnDWyF(&e-QV_UGK|4jkEn&VnsK}`oMd+gOo6{7CVD;@ zp-i+*)V=eA4Dxg;(#%n&UE$i?{_ge~dK9F-xzN2jtRA?sHi=8mcVxxz+5 zgk(Ite0J}v^O9srXS5}8PM(69xTreabi$DQK1m}y7yGz1--Lsp7;iTLyZJN=XmZHhho$+`TS`1frX&KZsHNK>z^ z3s&A|1!EflF3Bjm*$}F?0+=HT+>~{!0?xT3Y3JoX8Shwyg4oBhL10z+91`pzafM^K zq-FU6KJ4P(amVsSstP5V*d;0&ChX@reTT5j@z^K_KbTdB1)HGDj1^8)mzEV9+Of;6 z<4)9%RFzuRu`8VCPBfpEl{((BD?!+&+K6h(JtR0)K?WMG9voDl4iB?Kt&Sac5@6YHBO%I1Np6XBN&U zsw{^?C2+5l9O%br)!QZ{u8(szNh|6He7Mb1ap!hLY8oe+xGhU_=MGIP8W$e8ty|a^ zPD5&%H|e--M+z4%ODmcW!H#H`2<1pU1k;1vvym)$Um}B7w7}qZ+)hZGOD{xqZD>-w zE=0vkZ~W7Zf%}I>oSPwDT6G;{ExaE5`AdJ!RUI@>yk2sgt3YveUCa!;zR!wR!78h| zxE*-?%<)&D#_D>68+ZesJ*=!Lzvz&H@dgEPt|Nog^(jg5hr|`Hqmx$kY54Jnf5%_P z7ODTG*Gd{z!rBvb{<6e*jys}*bCWovZory>KW40Wlf1NQz}})Va%X?!-h?qTqG~YvgAeEwFZmLDNvNV6+*FM}YJi$3LeNU6vpkZN~LAZA0 za9hL7J1Sp7xPBG?(0Y`%XMvV5|KjG@_Oxc{2`1cJ#VM!qAwJ_eXjp_)YAi08#&qH# z+D1=!?ES219im0FgLWa_gS&1mTSGW?UT-hXt7#*a(uW@{0F4u?4U^D8w9lOIG-|AA zo3f#fs0`sl6>E*AmqK*t4aeE|TVLY&6A_x$^=~IXcdEvb9vi`L0*a0TK z1Rc08RAmC(?B>pb3~QFid2CGRNv`$p1E>r(eJF1QF2p8Z_c=G5Cp<}RvvA)IxicII zZy_+S!e5U+d0Ki4t8IkSu%8%{{`EZ7ffH!tSr^}M-2jt3W(>5TkYqhClWGlz;yNrB z@Vf5ug9qP%BIgmEFPmDVFG~+@=z$w(y~3ogVv6luxXG87866)N{5W^lc0cPNeeXEn zqYI7jcmX@TR zUV5FcUO=!%LOXW?TPvW1nP|ym$H&VL!HJfui_$O`6Qr$Ma1Ix5P6BY^mT(m^aL(G$ zA=*TER%EqCo+AKXoHl`{Akte9seTrM)#Z)TCeb4#(&XfWQzrT?B#sjV z5`_+MI|6>ZlSCjApb`nwF$O~;doOIir{5b1O# ztZpY_KP0xUHfm%Ss=gI!{uTgyI7u??g*#4;u?j;ZxO&-<0b>K z20*y|omewK{96#RJ@acj@T}WvW)uDO6@;mu`R2upaSOtq?nHDUg9|5vduGPaXTp2z zLOPKpJ?$d7g~X4IL^&fTeIzH{B_}0e!QjzGqZCA@n@42U6B}H&E7I1 zJfVFe|BWHEi8~XCG@eDXo`pO8QGUxLMd^YwBg1&q0hnzPJwsx<2_W}pk@jaI(8V-|t?*99_-#R^L<0P}4+Vgb1M7bdj8A zkygtgQ)aiAeeOm z&$rNB1V}r#;NGuEfVT7lg0P1W>|s-+u$3$zM!YTzHkAlg$snkdnlq;>Kph(XxFE7> zCmU%syu}TA@D+hS^Y<4J>d#DAeGrek0O|Y|oa!d};0)fGAV<&*dgvbxQC37p5N96d zb$U#6b_{=9IV=c*U($w|lpJGxi^N3`HouyPBn#&eM0!XD6DvnbCWs^d=V88vw=NPD zZW<$?h9fN#1uVRU6pi_K%!C7N$ECos;Whb1Jp9MXIZ0)vfH@EcX$r+T>)b^E=P`?P z7ZS@p8h!wv+`K-!N zTM)Cvv4J^(s+<+n54I{Q*m9DUS5{RdCEpHRwtnVdtAl>y1;weA#kFHGtMS^Zjm+(m zWGVJwYnopIA3-P^Q5q3-3OZ;%&rVq{>UL>IHO#+h!s8&`PLi)xDZkZiMB0bMD(IY` zXjesP)5PECGhq;6h+*FAoP_Cw4C=5utkZ_#&sIr({;vA>63~4^u7sukR;H(MsHgUQ zo%R$({2uPtJcRfazygwrRnY+Y{4YMOfv4gcZ7Jy&W{mw!K%tj$%Diy~j!BN9NkP0x zNxbodHYx=q)~8w&m3Sl0?^>U%P(#{&5jt1DhxmBU2CC=q(}=^Xt>sxO4hgu)`SbzxQ@tAidF|_mXHB*Gz~j8 zr~OP18*~kzuL-uoN_JIawpH+UqPX^d9u_4Y{G^rawQwD@!tBEa?e!KMEFX5d73IJ+ z4%Q2fo)1$7?BYdt`fTBjqV))CW3t?gguu<+UG147364NB&s4)_E zDe{U#qs1{2UnTnYLu7%s6AgaMSbcQZ35azmhS@QO@hL`BMMt7(-8HW}5FkZ|Idw&RRb z*MJkJ6k(~7e$xawl@xKel#vmUf&G*L?{Wv#oPbD>wKkr)#hFFOb%^tv^wE#Lic43L z3yo^Zp!q0Dqh^;l=kPCEuHgInapnjv=RTF?$f)Ll6mt_2b75Wb^a$4ERP#+$*Nv9* zZC&!!p0m@Ia_k8T-BAjBk_*es3quHs;?4?Vl8ao;i&6-Rtbl8e>Mi%SSfM9+$A zl1q5aOIip@Kb@8IB$pDJmyQsW`GptkJ?5ctm2Esvj1!a}G*8dFkbpNhvyW6Oq8hTb zV#}Bb@*h+ya~8@+Fe*GAlQmPqOhQ1KiuyYTwrPT1J z4Dt}xiV}j!UeZ6M)c#2s5>cyDYvEE}sna7=kVQK`4&_ApM7lEH6qb*+C{R&Y&D zc28*>QErHdYY2I%*HLQ{Ole9#x1NU1JaX(lFYsZpzHNNIszY3XpS=uvAu zN^Tt?ET153n^A3BXenJ;Y3m_q-)Sj1NNKNGZogVX7D4rgoVxcaf^+Q>u3>C3n-m<}woX2&(q5yXA1N_JC;!dWBcBL{ob)mwRPW zGykaffs^{w)HAe*`u9}&4c*dASNq5C2W)v#9a9IImIgdulYP_&bCL!_+>#=QhC)<^ z5?&KhR)=iyhjUWn3sQ$QmWC^M;%d}K#FIu^USm3lMp;!x2i&4ZR!8yi$7Wh17gERG z7RNSL!*|rjaTCW+QoSySCcqDkgx{MUQhQ!j^}s}v{OXfB@sr3Jy=WRnm>N@(sZ)>( zQ>4UwlxsHTDbuE^6RgDU>}fM&3p4z0;UG0F@N&Tq-h5%=Iq>2%KkkeY@1(efnJITX zKEXV^>YU}8v#rKLSi*uUv6Cn9BB9Fs$3$AoeUVUg5gfDTkdU^dv#^w*VV|S1{JClF z7w%HYT1Jt^ig3b83vp~kn|%%O>dw>3NSoyZ@mh!C+QOUR%G#QO%Bpd~>JD-0*4jEL z&c;n!{DHd@P1;7u>PA7s{0-NpyYmKGx*DeDRzlnsp{61!$#$6Y=1{{rO?u)dlARv; z9rpAjdJlJGk}6)lQ9%!nAL+YUO*?w$)=|r{kLR$e8sL zN9z-4{F9#a(E*aEtEE$B{L=-#Pa$jR#u{g9?Z@CPzKA`Nx})~#BrnR;?^Xtt(FO<=K1G4aqIM z(~YE-059p~Z&FX0j62ZyomR)LuGanMH?7>uHlOy&u~&3Ul%s09FO-nm1Hj2L-6GF?+=nz>v0qNs zVFI9^sob5)l}zCalR13F;*)IwNIPKq%z;?`tpv#(|Eb{`_s1Hn7QTt z@~CCQE06N2#rAB8T1Pl?doxxt3ylpk0T#|xDYLB|(^k9PBKwV&grRvN5BhBJyZf^Hy1 zxEO^Awg7^!^I7(l=lnw;%TZHGeiEiBO0dKP&-gN zaTOb5q{xmB8D@CA7s;o6-WHEe*M;f_d^6R#Oulp4N{SS04J zm~32_9nQ6zZ|rb9QD6~0Y*JD#ZA4e>C!tDGSg{^#T7J|=S5i>>-dJ4TjVgI+*?M1m zY}fa7#H=QgnBKJdiBrcb?$1ZaZAkLe8+*&ZOYbWhuLQuN~R4 zl{1aIu(<*B0Y`m4AE^+y>U#0G%=>V{1e5Rzg>Hj(qYCEnoPIOlBJ*L!lQYaz$ z%yh*7vAXs*>X_XOlTb+Uw6x5xyFgjS)7&0Or7`=3XAN`fF;&-J4ohcOM0JY{(B=*+ z7AY#VjvU!3wG(5KYr9F-Etk@#x!iq zVS^n4(Q)Skt6%Gm+n$Wz^XZsy*Kn6(NSrXD@Ob+6=!AUBa+kAunf_<5@&&^C?JuM* zu9w|tpXOMw{l0RIdHuN^9qZvoYZI3HZR@sY7a{%ZUdxjMw=(64#wYmhR0DTsL*vYu zP7n5oBA7rR@pzqj5gsF$BKK^&Uh*8`Ww;zc>)d=2#xrlsxN}-)S=0%&!EF+z`4~eX z0ke(d!5_y+#7NX!X@}kjAcP;)=7Lxwg~O1L^EIgcd-#@( zJ(xKN{rOLNXx6mgZ5Gvlb`%#=Po1hjg4V}AreSo0Z%aJ{zT^PPbJQT@KNNP^dQt3l zAROTD9CEavK@mEes8F+9N>aojF$tUK*wS6GOPvsgM#zYtE!o|-2Lq@z8`#8GV|+r-{4`OFbPqxy8VNuy@@ ztQm-71};7X-9SIGK>>g9%^w!7z1i=|-5epCd6b zh@l0%7eNzl$F>=-rv?0A#7QqCyG$5{LP50PNnbjccgA<3@^5Y7Nepv?3q#oN9@SGLN zcp}YaBH0&;Fckle2%gQQvoDe|FaDE(G*>8LUo2l%tWXg=SGsMN&+2YWqg*T(n_Qbq z+@}q9V>o#C>sXTcbCHDFvt07J9fn<5i3U1oq4n6l!tJa?3yiebiR4h}%TTI=7P8n& z=TH@DUaCimyfi4`P#s%V`j;_eY1G7_CZVpxkfuuEw^JOJ&ZiQNsm5-R@(MeES&5oL zv0R0VLP>2|nYm@i%IdL0L+e?Yr6=<0CelMewfko?1+Mv-Y@A<5J?p3o%c=|0+l^HW z<@OaJ;I&f|$ClNza>owj^~+$#)?J1Q=ZTQ@+hWJIQ}YVf4dji-VaN8{vI_U2l;B-D zB|l?%s^;{Of-^rvaYxst8ZOE*Y}(por#` zvLu&ogiT{d$SWgnc1^PPLI;7kj_fS4D)K${)BaDYBhJPqCi})45DpgjdHVg`IuS5LQ;t z>l->y&aFaRS9iZQG);t^+m*Pkomw=sY@l2?jkvDgma{jOotY7_urCLd*Oes%UeK<% zZo)7&cAo(>^;{ZSEb!3R!E)`?bkWBb> zY>C_67t5v*E!3OD5x0G|il#Bk@SD^Vw*#t*#`XvEp>Gny;(Uf|6A|IJx%BSfBPq+~ znGDpsLP_^yc`cVIderd{J)UT=`+0u_>V0L1`>8%-%hE*neQk;RnVDtF$_DB~6&CaTVdN_s=qq06yMN$gQtK;& z;P;!>?~j*TV8~!#*kNDfhm zoKa9hU{F$FP|9FX+F?)zLU0ysaE@4To>B0xN92OS;F7`MvO|pE6lR>F$KV>VkUFD~ zWDXoi$p6IT3qbHdyh0&E{)5Q}BNB=I4>9>ZUed+tL)CdB2~2v!{|zR;&G|I3TbEjg z;RBOTCy1k1W3pCUgLO7>f793e3zPr8 z#Iejj*8&Ii{&1{s9MGiGmG;1i3 z40e)k|G?x|$9xJ&N58ZS)?qcF8Fc?^eftk4-}8!HT>BJ}ZB^I@|EH`IJnZF^Yaj!a zsv9b~6A%rmZD8YHn0%>Y;#>mI0wR{K*f+A-Od%W$sxg6jVy8(4!hd1%{b96OkguY% z<+us-Q}EDH9CgD!F!{N^83^LCzlLGwjtDa-%ae)yO#=aqVB8BMeMKFP_7c7$CdB&s z#MN#KGpXk8@goBC0|n#&Yym zsX#V>aLXm~PwDsG;)?F~9MiHv>JZ~U%=UO@f6WxNF!^WqD^>#x536(g$m zlEu?rqG9>deyZd7(+4L1>+@mmKbZW&g7W9%(uVWrlgfdwFQEZ^=HwaldNR~;{S$FtoS-ye4qEZ?6F z3qCOUrwteHFP8(1;Mdy)OYqy{K?V5z_2J?J&az41mf;n)YeP+cy4$N*UgqGH_`$xKe`mvDk|iQSm-5q@+;2mlSS9s+}M zLhzwKs$*df(Kbu~CS*1W7*Z6~J2S@*hJ4_A>2^3dYB<5HmF^SEWds6%3Pu>6LRf_C zN4ttO8T(;29vVd;b8Z|Gi;o%);d+WfM@qF(5u}lYEnUC{@6TgC0K;|dOi130hGFQYqmyrQX@3<-C=E}?H{a$lg%wE|O3WlL-{y0S6-he|&!j)! z76_me%LYiyW)s{Miv225b57=Q@}<=ae91i!%?h_sog6tX&3u@7k` zn(3wdPK!g1MS-A4I8sG{$3*s@Tm1>77X&5*0t5sC`9E8Iqxrv#C?u_pvay0ZNdGdT z=r#o-cImL;0snJVEu`AD!7kTueksJNB>{ljgNZ{q5ppxZ2ng?!}qKGPYgEy zj%FI_e@8Ryx9!}b^8eOgQ=x4{Ov+#-xX*UGma4eqb|M>HQy=1BgB!3Nn71?x%pun5rsEIRT(H`ow7 zE;zyX;jTJ%-{}KgmKFZPU_<0^xMhmh6nm-XTm$X?|&I=knR#f zkPu_DWjNysQ+)W*W05EvTeJHAZLs;}Prer`uV}m%r)*HP7q9+LG;8~R-cS6m4K_*C zu^$GTenLmG=wK%SI;vfBAGqfUMoT90NucoI=3!3cPr9SrSVfbMXf`PRU?e&m|34dS zOiM$NdV2{w0Q!a6Knzvgs8aH4v>cAaOArEuQRZp&==YP;n#rH^XSK76rf2`X!KQi8 zLmLWJ7=avyg8!VW&}<98c%1Y$yHgMb6U*$f3xTHevipB#u(9c;WqTG4rbSw&?-+ul z2U6VH$Xda{DQ*70jAlOE(k>vF7OK#f{@hj7W7mw!jI25u-6ip8r>csWQ&T4k)| ztAx*5i7ScQRECl)&ori4`qNb<`^D=`0h-L~krS)d_v=DQw6~9F?r^_m18ju_Mog%y z7z1o>Wa!u?t(=$g>0kJ!FnO3julI!JF8jlZ+Ti_l`6iHn?d< z4?f9FXg~JOf1rIwslqlRf@va1dOl*s9 z=hr5w8n7R?KTzPKa1f_8*@64IhuR3|BkM=9nb4Ta zu_655)_y6NaHV^A6%Y(Uv!3Ddp_mC`Yi90G@NL4%^@2iJ_x`@PJ^e8+-YzDnr2PD$ z@`+~|_QhtRlMx@Duir0=q%B+{{iuQPuIEO4r=Jk$*7iolvV`IyGr|h2^y$WlM_`mS znk1?8lapu3&I~LS@sa&)!l#YsP22lZrAtLoyBo{BQvgQ#XnvY+A~zVqmrkrI0F6f* zAzrzR!tN4H4Lls14^pT8ECzx*;w38qCmNrnKl>r}>NMmI`k1aos>>;FLyzP_Vlzw) z^AzeP3(j~!w`K;5pT-v#8+F5A=^$bW*e1v*_}cantSomqG}PEb*v6Rz#Uih`|ET~chR{quND`jMmjyP{797`Afd3v`q_j#CA7=M4;5rBFBDVx@?3 zr*9ZMq`3NaH(vNDEQeb&;86#K9LL&F{L3KaVG>@pfz0hsh{Tu;Alhy#ISP-@g#J&v_w2_kAJC_ z9NsW0^F!FY;w8cx5^6Qq>l>bUmQT`mwc>(s_TViEIOv-iAA90jFVgHBVM^7hP>0JL z6g4k#MpX=)aW;Q;29UH5%WU?2DYCi%q_bM8CG{^~+`d*=IVoKXX6fG2HQAHPZn8Ce zFv2y7L_&bLQx}GsRTD+5q$U;*1S~1oD=`DbTrRT{saQa`E`vZ}TCp>vzAPh=M!?|4 ze`VAnaHGk%)2rBnr{BxyA!5nts=Ov{9rkmjER;HthvNYc6lW#ZWAFiz02v|_sS^@X zyKDxp04kyq2km30RpwtW)Z{P@!xtuNq`=vezz)^^I{JM=Zf7XANl=(x@(%Uta_g$S zjYGqVgn%0v`f=DXONy5ld9BS7|G2<}TD-be|JL%)g>rM-M!V~2hx~1ixT z*LJ8EoR~6hj^1nQwsWTKV@ffQ(6DE4e3$&qk%ybSoAB12c1Tnjj^Iz^7f7B$;0;e4 z$j>mEm%as$TOku31zGV|#g*Q0voC%L{0mo{`+FylS%2jUr^3Aa{$AIq33Wmo-N6Py zD9q2ppk}&^z(A8`YC31C&0x7|*-6%l{z1)@m#Y0+w`XmkEhMrpcf1Fu1=J&*H0fFJ z_!1~$HD@;EXt~Pd@T};7!i;6~0DzCDpIvaUNPr2?cZcnC7r24I03sw(tGKay@Wnt< z3c%-kd0-wu(ad`*26ECysxB)i{EAfjz(2woxlj%G0lW-m|Nf}lg)k}fpM%qgpY&tf zcZQwO9lwKpzkz)puw|gp5ROC`>Qld{_ct4a_$7cea|@wg2rU8^h#Kil3O2MAu9VW+ zgkZ~5(@vCpKY&nb48pnf22XNuCAxhKrckGF{cy0bFoJoM-36f`sUqFEnH+^Vj2)Dq zG9rE5Dxq*Ijfrs}DJOqt$9sHI^h;F^6~BQ5@#0Y7EoLgg(a;oOR--?Nk{4 zBL-j#8$Arwl!rVt3WXWsO;4o%u=RbYb$s`MLQx-sDIhN`FK`lqantBSt+V#Pilz-I z*a?q+4})A85{J!(F91on37uFO{eiKjHn4-mkGX{AKfdK$uVrNgJA(%6)JpyeL1ObH zV8-${@ekLka(@OGeRvUsV_1)*jyPgP1Edi3d0ABAvTwZCy&@5vNsxztC`XZnP9z`x zA&j992woJjEQCo>{AxqgFIq^P$`BjzXq-=M_?3`&Onxrn)<5*)JUH3#JMEuuAj5PS zg)*T7oFz^c*n|`k?`{65v+>U6@c(*Dh+pEenuDFYL{@zQAT_xG#0j*Y+;lnPQ^k=d zn4*MtgfQllG490xd;C^SI<+d%d6>xd~;NBlp*w2-?&!k7D;_+BYkmFZYa zssl->{}^n2GpEgpQ{cIzTePa!Wp!rTr#lo6Qxk%OAs=3W$>mP64P%X1J!w^7G(~VXk2QsqBKl+$n;R z+DN2+6IjUqFxU{KRKc|%^#>Oh4_D%t@Vz1Cpwm}jNh0H{RDFG^W+JR%;jZbj226u5 z(F`Y%vw_uv$))tF)vPy_lgZUmNCicch+iaYBwA{vR%)eRYN4WPxJPOXRI|M2f#b); z%*EBBDM$>+bsq*BUG92)wGz3OYKjsc#S&B@idtIJiVt58)=Hh0TD^m7gHuX_O9-K1 zNWElnap`fk8BN$w0+71j!35gDxON@Ms-VzQ zA$X)Qn!7nytvNrWM9>RZbBSvxiyR+PKgd-rl3eE_*|=&|kxAHG(9+Vn($a29UgC<1pH6Gd*D zQ)}OMZ9n8DRP{nybZs(5t{7F#HP2}kCT!10X^fjh%uZ>4UFmq2Y(}{xI9X}0RI5u~ zq2Fk!vw3NKYUu#KbOMRG^y%RXm_L5^Z5JV}fTYeTlg4rGc9bt&6g=HjQq4G_U4JFA zR*`FGhr0Yux_%;er&YpyQt$ch)}!azeIAlc*IK$3Qg@@)a+i|!BczcgBc!)nvLTJOXP;!{Yk)M}sRYu|hqDpV+v44`j0rHOB}`*g3U>)~wT` zwMqG;ev^Li0l7P%buhVgD7BR^%&dPIx$){GZlbu8)UC^(XCSS0xO8zFnI;uQaV=CZsnweoyvuw=Pz0V$H?uBSch7cv4co}>-9b`3o zjy&Z13*Dc0FvWc#Tce)ja`|->3PEr=e{GJftUt+o2$^U)h+)NzI|<}2c^D&4A>=$IR#vsSR3j)n;(3e1BJE(g3neLQ0g<^(E5Pu<8ClnvdjHx zvr+1c9AnErTW31V*QH$n;k>}H&Bcdb8;(+y(AH20QP9zu>&v{0L%%wN-4@5jHY8I3 zE^P}ikRR3e5h&@+__P(z)|K_PP0aNzlJJrbp|PS@acJ&olk8Vo@13OWCDSjcqwZ}m?3=Cc=L_z`dMv4hZwr_A zrJn6Or61@)9^mrr(1dT^rEWWX-E+}A^nXWl18Z*Eq;DCouZVaYMtU3uece$VKeSul zGbK4n4nH7oN089mUm@O2DBmY)KgvhlXPi7@e%}cR-_JQe^m*Sa;5+%TzLSZ%Ym0jL zE9@vw`lP*mr=H{h@BFygV&{tZWQ1qEqy3ba{eq-ndG?X z>sj)zGsp0=?eR^1Cd5s*ld|)5U;4wja@w8u^Mx~jr1tqr_({Cx>CE}w`3K&lJC zdH4B2cKF3c`Nf;ZWp*2kY}k2E`8neF$#vTW0{@k1*(C({)bRWgqlht;McXmG3cLm|Uap-_os3_}f*GzpYu&uIUL%uUPL^M_SX@_0-+U*%9Ur~s z%DAE;y#a!6ggS0_q;BylW_c`6;U;e7Cq|?z4pBUB(JpR?weB=0hLvQFeqQW;LOam_ z-x*a5=}a7}$lQxf+_7fd+f)piPVBOw-l$pLTk}7-X${z0?#ou(5oX*PZanx;^mm3#%EJtt_rOv}8jg29dR7q5#KFM}IzyIOCf5qDKuPwZvS9a@ij;Pm8Z6UgyMg* z*j=335sf7O5l*Et{V#)!rn;QCcAj*a+*t09lZ7!u&Ze{7{PBYcrw@Y-%JU74j0I=L z+N*-CL2+W7I;y(L+_6HbN}a{t^4y71rA}YC#?tlQ(SmPDbU#dHX?{0etriOx#}>^b z{Vu?uT3NWz>IA`}QL8UrZgm>uW#<`V7D%Vt?qH;;u^(ut%G@FChAiF~PG+xnOrli2kM)?25@aD=o*FvA?$RV7b}lhlZP)Y)w}ekx^$)s8+8wB57_o zp5n@AeYymRHo3m`;&_P>R`K)v5qD*9EN#S3`sLbgY~C@TK#TNKyT3I?cg=^v2Hf5+ zIl^nZb4MU!4L*8bZkzV6YQQda#ZnS2QaC+{Go*$O995I`{l75T=&XQskk4-gI zOK#lc-;%m}RmqgQTLpeU>~GV$pZgrtZAOUysC_3!nJWNp(cAOc?!-B2VojOL6QkwC zv57hz-92b|_0&AfSY*X1sCsk(oOJvN%Q41wJnV54eFD8KIUJL!c(5+i?QZ{B=T ze}7if60Ra06IcIY*{RUys?jXA@4DZKsl+qW_2cX6Nl%xio6YczS?bBa1Ig{;F!w|1 z@JP;2*W>n*_gA}F1px8a^p13H`$fGDEBoPXhcw$I+Xao^f$nWKY6(9UxX-2?+25T& zu1Yb#y&G`&Y`4)xJzS1KIqrq?l%8!a=PhVw&j-0bo1HF3*uhMfA=MhLs}qjkx5{a- z)$LVY0NCSIMNzXydgfcor9;(){u8A(qzpyJskwz0oW3?Rw|D2==SfS{Ty0nyD8Z|t zGIyL^Z7@8xx8Sp}6^Mi?3to_eXL_l^mwtB>F)Kl6s7lr!+fE1KlTkDN{-wt^wpA4S zM`7q|pba04F1ll52QD+3KG-%DrJzob@@JHb(CjLvDD96g1QEegAv!p1g+Hhy08U(b z8Zc!XU5xqY_G-Rc_p^`v6IE3n8oqkO^faRA*;C=JuY@F)jz9R|s?9yHGD%>a2P@H| z0|WJ`RC^lYua~(la+c~%#bj`n zP%f8>*$w&2!1-Mgds!|4qURH_$d6?Ky@VxKOxt;zsQE=yVwy+QnS(u|Y> z8zor$mzl|M^3pDKdHvEatL)>HEvkdOeidmug5%U9{eralC26O0`Zx)ICcdc3plSb~ z3|^N5zPG0wkM}>BUyu$3P+YRTN#wJzBo2j$pELaVA`fMYc0=N%(!oQXn>kV1(;-4_93y($0WS)O+J61_4c#UU5Ps6^~C zQ+F~Y;iLv!KJ28A+Ti$Vvs`1dO^p2>y~OOYQfK{Pxzv~`+W*1cdww;&w(Ht~gwR7M zBGN%Xlq$UwdhbY)UZwY51BBi}M+F5zMUf`GcSL&cy(mcUAV}Ug=Tqid&sux!XODO6 zPx~JrV^CK^cs|K)j)sd{ z&~ULA8ZMrGiiV5tRDYn^aWH)OWV)s1V?~CuE*dT_K*Po2Xt>yZ&S<%VtgfAZ+bxz& zd!2T=;Omo8c{E%cFHy^|0pFK+%a%=`dl5f9St?y02o>|1Zip{=uO8c{gJMPR$IsS$ zQQ?qt<bDfVS!BmZW<%@yyeN{dN)j8X&#BS5HveR;Ob?n5#+!ns_xkN8jx*=k zEXrQo{ls~8tk`I5dlhb2Wa-2He6-1!U-Y2Zne)d>!2nxPAn~yO)at7%F&RnJ^T;rA zFI-$SbtiBmFeXXg=r0+%k!Cg<|k4nnFotB0Us+-Zx2Y=P8xyoL2 zIuNItTSz;vdVl$Yr6-_lise~#eXy8oXrS+^yfqPlrAr_vYr$L9H)z^f*}wOE(e1Qd zI)EDz+m-iu#^r4dO`?IPjPDXEh`mD!Zy(C!<+Cy@CS-p#Xcwvz@Ja0OprbojB;IVn z^WO9&4V}byrR&e@4{wED%0MPl?xw9j)L-Z`q!r3)ZAxa=Kl3-_@U-cn{_Zt?HAJIy zayYv%Be85V9Lp(CKu6=Kq~9=^z2x;yoaV$6b7<;y4f z%%v4Wa_K6eBg0=>_Nq0czlBO_t=>iD_nO^G=hFT1(?;tTzy1;kSpEfZLf!Q1hFO>TB`!n;%)nq3d1Ob30A`JDPe>l zR)SBBkiu3XvS#Q+IWee>gs=TqBR}?O>xfJ{Sr;87q5aNtDN0phswOFfNITAi)U);W zjg$7{uTl)3pp5JtxWdv*2~cKZ=@`t8Ezgd9d+CQ&M2|j6$AwGBc1pXAORud!+M11eY<;ac|$*RZ_!Ukk#+GW+#Kd9Ywi92`{{sO_Pm^Fj<7a$jgw3Z>y6y26FDof z9`mLK3q%Wts(hi0yqz(XagKa>k-Xz6p3{K5UW&ZS0F&!Y&%0T9_bxmSc7Q{t;Ql=PA zR?ye*7^UZhU(trH%&xEL6G~|tUsFz%kf6RKGUY_3zIZ<6IN81!UFB%&z6dYna6}(G zT{*O{FStrMsH-nvLfL<#&-+x_3)JsHrsB@j@5-m*BHQnztKw+gZ||jIhv>IXSFtMW zH>*-H?dmt4P%+x*H#}7_01fDosXk#+)#4k_kX2RJ9Zik_+pg9Zi32KkxP`1sVgWz{%!)!3~EA9<-gM5sMTA7n06W2zct>{4Tx zP@~&Wqd6T!QGwJc$%ZJHhRFDaNMwhgx90_(=1*X5)!w9Z;*9 zbhPrm)@R<)av7~MozW7j(GQ+lMG>R#(zME=3dVfM_%4yt zF-MGXR7{14ud$-vt~OsaIlm3;R;5~pZ#*ht+~&CsJB^M~r;gC1j?JphV-7ivW*y5Q zc`3DVW|Ijibcu}|xpZ|kJF||W>I7H9gu`=P3#*=xc(QGJDYcC8=<8^e62>2z6IvM) zzdqm!Jn8l0p47?B&>ft3V)I1Da}qpp&rnWRH+Rfv@s9N5qzFfLc=qI++vJblL@~lC z-HNen147FucrrF`t?uv^#@w^XO|$i$(h1XBb|=usxK~%FXEv$V6r=Y6N7wb&ly~A( z4|JL_?1|@DtCQN)pvAP$AQ^M>nBNmd59YXl#IeBOWM}?Nb)2r?Nir5FRwxIN|6(Sb zCJtq~sGTZo(5yHUxu_dWs1w7XCtN(k%HOFyI+I813=~Img+i2*d1b!xwVoY%zvC^m^nQPg1;Rw{Rw_+Cp4?%(jClEW+$dtwV9+T0YEoEf=KU`tU9CuFiiM9QEwOjvB3O0Q*XKkv z4ZiTFG0y-4YowSxqKr7ptU6 zi0Yu3Ua4Qud17q!0il$Q$zr##&`sJ~9j>%xlFa{fMQ(0v(&%YoGM7J)p*&nX8?^ZZ z#|y{oh{o)g!|dcA4K~hZ7r|ziiDp;1Waj-CaX zoyA=j3(^>id#@}gOD+DJ1{=%!5|&KrmdvJ>EH0K0LM&O6EFZqId<0&*8?t(5XpQt; z82WD#PhmjGf5)&nW`J^myL&h){05izZ+~ytyNmARfnwAmi|+Njg5|kjjp=Q z6H^;K7aRQ$8-pYp!=4TK(oGJ<4Nf6z9(rpjQy{+`P%mU9v9OYb72MA)yC!rnQ#lw3h9$ku%*eqmv_(N!QcIc>=KvklJ@PAZ|#r}`xM%3dP5X2f>oi)2H=GUy5)Osx4sT5z-nlru4{<0;a`^Dd;g1Fz zhfhNeB})#a`wnHd4&@;~@*u#BbNj52ZMZ4y`73k0QhT9ylaWLAvNQJ4Ex^}$_WTq( zX!%{szGLgHV;jV&oz|&?)2UO!sY`vgWNIg!(=k(GM>5YU45A-i`X1r;sxCFXF4M7L zf4i~Av4_^No7Q=Z(|KINc|zTJ($sm1c5ejY&}3>~Ib_w4x%>5%@n(@8&l4Qf%b|tA zS61jQ9>r*da@kvQ+240LxOF*% zxPD7=c0S+EjB?6u+4C#et!de*JCb?XWsa>Jl= z!{l-UD(qip+MP(a&Mj@9>g~Konxp%wp82>6^yf5d+3gOlT)jKMO5Oqyxf9d5lW@6{ zO1hJ2xZg2zCwFzf8|qGx?0)aHJ7t+WRjWJoushANJMF>I@$KR2yPe(|=VcSe8W-2v z5Lf#n65*)rdh-3%(gUmu_eW(OY^@&b!yX*V9-IdrT&Um8F?UgJPD#fo(V{(UuH%ot zXkt0IqS|)Dn^z*b35CJ?4-R%&ADj@~^^`d9ltg*Lh`gleyrj8KIj2vs1U(t(j;r(> zkC7|)FFrT25H5iTEkSD%PRd+;iFX`s9`iM+MwytTQ!btJuYHN2mgdF#1)>xX(9 zBzqga_I_ICZPe;*JnSticmh^9;Y*%U5PVuWIEDMtOUUf$Kpmk-t%=&Qm-_d!=qYde z*FFwqK8~$EPQyOV%RVj#KCUPqt>|-lGp`5ww)|$E6!xBig6H_jp2Dv^&4*8I&Ai0! z`UbZ81`YcLFZ+fZ_=ci<;Y5C6bbjGnei4#>2o1kTGruTTzvxguR7|p8tcG8JuiJwx zU-M))eg0&Ni`R=jI0Nl&4%MfL$!_M)&TW0q<3i74UH!8{{j-z(pTG8hQRe@$)jwz0 zKlj09#`_Bo!3#zrAB)$Affc$jLO#|S=U_vfqzkQz=ziXAIk!YTLV4~2b3%a zlpX|>p#sW@0xReOKXV0EN(NSG1Xh~`)}Z_uU+*eJ8zepW`6f9&2ItBtGR;^2+@{R) z%IEt1v%q$`pboB}PRXDyji7F`pdQyCG*A8+`-U(2dfmWP<^A;&o}Y~h0h)_}Es|I7 zP=R0Rg2%Xm$0dU&G=eA1f~Q=Ar$d8hl7nYo2hWuS&$k9I3 z$=8Xm0|#EmBQtQCe0_(_0^VH&u40F5zYh6P7P8YCvO65Iw;Zy65ORRJoxdBBAbEvj z=DJQd%{&&g`Tb_7)+_Scb&J0K(d*Fn*rC_0p*O>!zm`L94?Z)QBoDL`X0{o7;y(y@6mE{LmmwKmr!alpV^a2hr2jGg`|;cUP@ZRFpOi?y zsbsQiyBEU4?f2Rx4rh<1KK--7=1VGvZ^Xe7w<{`aq}D>5Avu zz>6=OdI^3779Acyl=G}F*N^AHzt9jdMd@niYf|4*=WBueg$Cb~{k?0!!ZY;zB`TZ`Ao3;0S0be4UOl(lhij@-hTVYro5BH=u-u=MIdLx_Qys} zbc4+`XVX!iC|C2@Tp(A=)sIH5)?Yu1UFw;g`<-iFg{VBsU4tZ;xBdEc&D}+;BKElZ zj#1F#o_mf>?%jlf_2qT%t^&C0Rv$H$e|cD()x^`!(Rjl%@VH-$caVQRh<8YMr-^r1 z{Q8D>L<(Qrs~?}#xV)VS=y?29K24c#LQ6%QfAWb@F#nXHV>AD>$@H$5G6Qyh?pSLQ zhf}Zlvi;b&Y2z<}1^0e&!A0-+;Mz&?y^lUKYp+bs+n+@@kt|12g$S+1vb6}UCyL$* zZKSA32ybQ>g$RFp?${#yJy%lra+wJm^v{$$Bj5pm8CZo*$^TmzRXnRY`M}$M!Kfa7 zzB~LH7J)~jTvaq$pqM5h_NMB?_tJYApJzk_!Y&zIb{hzYhd%7eC z%WV<(bm~oYN9(<5e_~X>v)Z91_6=t{GYoxwL(PpB2P>WNC_0Vb7}eHzvDt7-^Yz8y zdjD&U)|Ow_zcH%e)>bsc0%l!-gUIbx!f;rMR>BDcwpJpb@~o=}GQGcMwWH~ySl42h zGVRu4S>F|{#i9S>*W$VVz^JC|))R%k6|EZH3oszn5AJIVl z`H!8lHZa?6c^A3kZbdIk@&B0BhBOi?eJ4rZmz=L^EQK9pHZqECB*=0}UEFp;M7B1r ziG+81=idmX#z?C-?NO3(H3Czt;_vi9Wy{-vO44}3Lpb5sw1LRA!Pde{mczIW^ROP# zqtBf~Ux5DW2a=rFO@zYr3vH^x+nGT;LfmX&(V{g2aJhSW3wd884_5NKMD7uoVq!?ah9k~jo6J`aiHUz)3o8DuZ= z#W9bg@&yMvSQ!u`?!$v$3AO2y&Q@dD7zRZ6dDDhwS+Y(D=L4F8WjQy~3kaaiTM^M@ znfMcf+eM@kgG-bWdl!P6H%4IcWw=hyolGi|At8OSV+a$nNhgW)xq^i7q(KvHBiZwL z4_VfM4ecJnx9J7ZWGih+KhHmw4T(g3H5%d-5=3aLi_}vSi$nVg(}o1s+mhmGy{Ii> z#Cvo~ZzQ$Fw3AL0+`@@Uk?~#z8h8h6U(V|;h`=Gq(l$6@3>#XFp+f86BDdEZ@U{d~ zYN>n6a1-IFd~EuWcIY{q0;iB)Q)r|s^@@SQfvz8dV5}W?yBCa0Y|x1Hb0mx?2q}o- zEDBfjN}}E{h$h=QO!5`O#M!2ZCStN6+EKEkfx%@ZH}WHK*E(#|c6tP6v^w0=(S8*i zj7NR#1S5Mx*tcxhw8sL{J~xNaQVAo_+z}v0Ap@a}rV&>V5-f5FAlV;}AlgEL=}s+a zY=kiHTfwmX3A8;{V7WY{^hz>gQV~gfEBAGg@xEIOAR9$yigfn|@X~0YzZX)l4Fg2S z_lDRj56EQM8*BW63$ckKcW%S1$FGWr=!?4bdcHcRUKQK+N682NbbIBA zpD%udo8*jhK(%A{M!KE2(X)dm_eY_P=ojMFxz&MJ6i-$JK4)npc<>eAEiB3J84iQL z@%m8Ee1P5iF%m7!C$*DfC+;8BFvL;r$>5wf%QLH;^CBrwuzQ=v#4kHRfqOzinvg~?`_b!cV&#WkzIN<($2qjQ>EGFm{Mx06ht^2;tS^{As?zs2}zu+t_x;SV>JpgvX2N{ zujl&rGiyJ4_Yeoc4OhUp)CP(&?uqz?yUfUX=?+$}XtKubrxy(}3_C`({fqkn7mX`~ zWqw~&mX2RsG;OJs1x)uZoi|)GA0{5qG=~wT#*7wR4wi*n^e^B3yl4fWL3nJ{6-=tj zHXQZx@Vf&mVA0EVXi_=if$A!r(Pan4P&wkjJvjwBgi8;ph*3~oBYAn*#j0Krr$4Yp z-iWq9Bvm9lQ(dQ=zw8kjsz~x3Sf{P_0=#OZIi@S+mfdGITEwvDcM0y0=ZZpMJ81hK2Y06OB2SjhpOHQ4sNRl z{v1a)&KD`D{m_2-bD~JS`lJ5f552~pljTX(pPrdGK(Y{>bwkyqzJoia*FUG*AT{OD zYP*(H*E7B9HJ`HvcWp(lXGfB1s@|*ZIT&5f%?#Dl)Dk+!g8uTj<^z-ffWW8!^0-!X zqw0)Cw;$#xR=nwsr{Hy1=&dODl1wiXN3Ha^@M{{Yj?d}0!~#JdaI-g6ug5z<5241v zn=i-9>KT7|Tt71DtJ;>b=o3h-Wd5)sV6oyPLi%nzQ{QN1N?7-u>ihA(Jg%RoR)&A4 zsSD>hs$rV(q!Lw9{*0IN5n(9owtH@1hz;paWK++{d!zs%r0jA3=i{1fBEtVr(#8Ti z)$^TGu0f9Vv{oh)mhqH}jw^+H0=7!Mt2TWimBRO!&%WkcH@6QP{)SdTDC1u(Z{DhP zdu-wrcA^d!P;RL`QzAxsE(>`l&NO?jzV7p5xk;QAq`PLw=xFHgKMwbQuhPqfNo1Lz;{LNS`uOVJ&R7q#Jm zNR_xb8LEj&AeLb|7h3}bg{J^xiPQod@k90A-5n(X@loQ@BzxgnaC+U4x{J(;rGnX$Cm1!jlG_@`MbHkqsKUe$Aa23Bb%6rccAj)J z15v;BawYB`)U%Fnm(W#?uyNp~waAd9@7K(2_ztVMu}CX(Lz;{d8{uoGW!NCyr(*<% z#wQY%BrT1m-_$d1)9>vdZ#azPPRJ*ChNhn94x@wy@*%#xowPp>qhSPZ2v8+!3D~oH zF^Z~hNV0mnSVWFuH51;Dy;tmh^z@`V#U7p)M+tVPYmzIE zq4a=NoZo2Gcf~Ivb4N*j0|m4fy1;);1{cbe(p8BwF0|->07YZWJWJpVc5}>RD;~o8&$QntJwR z-4Oflspr@~sAm9S5e!>-B%Jaz4@a#?`flGy)NkrJu}Jp8pVTwOV3E9F-)K_6X#qXq z2So+t{{!mzk#@@azfsQ)Vi)v(r=Eeld6T6s=mt`TD-aeo00m&@2b2O(m~8*@>&%Lz z?u=NX{a^e#qn#9~^dB^_YjJnqsm@A1$9!JB@U`+kfUlx&IwNdohC*|0!+;M+319g3 zl?u5ZA1If4@AR#>XIK66GG$aKfb!{OxbxtM6t#YVG`I z^mnsTV|_iM^j{jUX~#fUNh&7tE5x0sE#rGxLq0XFEq}jE9c$bzZN#)U5n0F1l@Tnl z7BO!^CAsa1G&Ryrt>ixV+hs~&^vIO;;Frz|sbO)P4AS+>Ga?GF4eze|(+Q8G#88E4 zJ)t;%yG&V1C*SabY0hu%$1;ItFNW{igA|iEV2I-+T|wm5oMfj(mVEs2P2Jj`mnmD5 zd?e5D;aVq*k9^_{gZ{=@9<9PBpGdOvD|nxm7j=6snKmAXG~KP|;rAS`B_& zDL)yC*vt|6QC!|r^~ljC+Z41QRXy|{E>kE!QsO@^Q$*gLY=45Uyy7bV1Nho_*u!?v zbUNjD(0uu=_@L$HeCMDQfX#l`2Dte5kdp5X$w|RcXy@A+aQQ*ts z8!LBd>{9`+K-R5?7me0wQZy63?ENiK5PJN;!V*7#)I@m+O136>HCZFccJ%%FBIxnauko7O@VKC`BZRDx?C}CBN2J&coCs zgrOn|V-)$m^K}tmU|){Vn71FbIlk}`s|*I^{(@UX7R6g%z{6c9(r~g}_?8-@9{Xw( zhqwAyT#CZonnb}}Ch4AR)l``wT05uvy*T!Bg%q6X)>l^W;i@50o50=iTHu&@kHh|;Ss4-aZh(r>o^}`kvmC$ zkOg2{Ljk~?E`N)+fV$No0cB-k03a67pTr{wfWkSF6-a}+WQr`YXDhIlV zfm9kt9gmMoWVbhd6{d|R8wPD_0QLYn@^~X~Dlt_dqQUC-%G0>DAG+q4;~hGdOSkcs zi30IukGk&QZ$P@Y5L=1pZ{2ocUK6HycjOytC}wTn0J>8fxI6lpgN(qf6R?_^c7nl6 zV6MW5oMV0X7KpU$CeQ;twHs$)mx|+2=62(z-C(`n&6HXc8^z1%YNZnU0&fLez9F_t zDCm<_7Y*fAM777}QLN>&Py|URs52^~ha1&~YpajT$ynQQA%b;}i(t_YN)b#N0qX>m z4y96r!Zqv+HBf8RrB9}wL0po+A^T%E&nMb8+=m!AgKStD*xa$gcseTyRGHEx`v}7Z zDk+d4sI5^n(3%?t=8j<;Ai(A}@1We8VFX}dXCpBHg4s@30Cf2kVHqZJ>@zRWY1CtB zBS)is6r&K80|m$e_<&)I*c1TzPauL7*WGIa9M(4YJp=%ttw|y}1%Lw=RseuDncSLH zUVaR7@KSjwhG#b~2bAHW@d zrsALAv6PcIVCpia5$x4zmb<`M$bLn5VXiC5l3qzBjCg3QcM=?Haa7>(TeA6yXYw6xefo`u+vhxDBS^;$!eHDT5+LNEXU9H@ni&E}9}j!X4y5jXC=ODZ=Z zncgX@XNuk;ofc)n;`#|0qD-cmMwE0Y;q`<%`Aj78HxtBz&eY}xcNxc@gl^V@a6aew zct+m=t>D{94?j6G1%_vLJ({!s@*!l`+#=`EgQjh^Gt~3ZsE|T=#+t(k zW26$uoH z?lEDOW2M+n>F?2F(Kc)oG2!jeEt_l$aCR5sXu@A_9kTp{^G^$`wU-VtA1RFTo3zqK znPfD46TlR%idoCA+-Spy2(^g^;=pwr{eH}kR7Bq7uhZS5nDQQJGc5jC@jjUVa1Ycj zDHR|?A!pP{+vja*9R-61#=vxU4CkFMhd<7ID4}#_!C?oqp>l0OfcGQ};UBfMz8FEf zBfG8yJytLPj587RPqV_JI4`;e7bA-Kn)J!zPs?OgVOe?pfGfgBy-_)LAIL%h59@%> zjSa#9AS4F21CYiVHuv6BS@2cI9++P2+b9?+iSUR=$5Yw>kQK1{etR64J^~jpGABqQ z3>rcd&_n3)JdF)=s!YQ*`(v@e1Z{w~F3*Jp7Pm8%!e*79k^#NM0n{@cvlN!mtl{r0 z^dW$R0B>OD7;_h{+%u}an4!pY;z9fhv>p?No&t%99F#E7)|7XDnCuhKmw?D%w_(PH zcGW;rl%jTC-g!;}QPBN3OkPzW-x?~*IDAGWlZIj;7Df=qM=723)@<$a*2^7|Y> zT-A<|!lvQduil@zsy3{+w8jTbJt~X6ZS60{W?v7Ty>C3;N__)$Q8q=|nh1|Dy~>Em zElYKupu^Hr_pPnGjUU1_Bf3bJ4|jHZn@L z!uvFf{sbp-r1W>oiunj%BqYgN6)*Uqu2+a!qb|Q_+#bI~-E3*J{+u4ZJ#R$)I!tc8 z`L2QbIgh%%9B#e67)IS*qc;&0a7-aM$N&!ZgX3hw@#^3NGjPZy97+*JA{0hu5Jv77 zMv)yxSrod8QFTq1>swJzLg70iAa?jl9V9Ye}l300YfeiDq} zPKf~hRiR{u0R!*?WY9-5^u6Y9jy=%JUykpf`RHjAvA}%+B&_R?Y10<>tYqBiEyKE^ zHa%BF8)FjthsT>REOskQPhqH@YUWq5r42h`2Dz|mABl99DpqL%`S^Y1rykp_mc;Sf zmH~&^ZRWZRc$md^|L;pOQiH*QbLw$gl|Gbj@-#p3w zpS>0@fExe;p8OBz-~T+)Q;P5USCOIcyMh07Y5cFwzqy8%zimms&%Zvl?>_&pH*iT0 zUH)oI`Y+DECO`52*uY^bkXncR%LeXGThf2Ef&05H$#MDrZQ#D)CJRn<{UI{^-`>Ef zd=fk)0}=7}39awdu0*lz*KK4v?$>XmOXD9R!#ln^Jbw`xqI?2}VRWP=r(>#u zd#B@?Do%W3T7MH6@{j7^r=L#!O=P&|I*kf)KA&@{MT-m#3Q7e2CNj)o_x-!baHr9C z1@U{N7fO`$hse-x0j=Ub>*(_`a@|~ey3g@#NO$P!+Y8CxBfa?&p}sz+al*!Vr%L~A zw8*e=_C3%kp5sRdBw%yijZv|#W8=-$Uha2kyoVhT;~Ynozejq?)35xuHnlZMNgTZj zuFms(TgAWFtpJ9;%`19DaOe60!4!)#9-Y^)g|@Gk%60kTr)ZI3=)l-@F^KYFCuhfr zA>qi|k*b#0e21hc!KF?LZ=KYf(H7nU&dev+dl|-*Zh_Cf0K2zxX$H7ZK#L3yEr0Dy zqi$D)-Q1*c4>}_-`ne;h%-hLVF0l!oX-0t?q;UJzbFWa3W)kshh;Z_I86H@}Wp*nL z5x+-zhUCH@pZ9yBM|$5Us-*`Wu7mZ;v6!UkW4-e%z&R^jgl#a0n7I@MdKt$w69&c! zC=DW0HM@5k2BUYvjS8@c!E`Uu_9414?NZJYE`igePwJ3R!q6%fNHOPU-_^hfYz z#8-$f|7zfa`q6Z&3vMSpFTDNewqh&{eSs`vze~UsgC$ku3bun=aJCW0LCy0C7-nRM z!(iACHR&_V-K3$+u~E=;3_`&=Oo+2oj}ts1N397%hcs+FckZmTAdU9mmN+bQB@DhC zXWfB#tlW~CU%E~LqTGT-^9<umYLrQ@S)^9j}(+*vU`foqg?< z9`&$JYlb7D9uYl%zr+$JqWAjG(AG>kM|cJ|Q~UhVF?sNkK?H0>zts7rfV!8^5@Lwk z;er_R+`MEFV(ss9eXHJJu-jwMC^hRsFW8WgQ{wlz_}s1%)Za*Wf0^RT7p04wpZH$ z7%Gpdt9;lwp_<6G}ou5K$P@z?K`74aC(LswHV4_IN_uicjPw5vgM`B zSLyD}04-_0o8#+~bdn}jr24*)b~OykLqWrEMqZi|15WA8v#Aba@VRYpWIX$-II@m5 zZTcgH%`H3WWQnlYBQXN>Fbv@vIbscFBV;XZ%xxLez{$X=N#_i2-fRo+qDU zE$X1QKOz=cPnky<)P0-C@z`@un16eV4H>9#LzFaaWbRCL%zipaoWAoS=~kh`Ehj8| z)^bGfGMdW#%#_MCf-*<34R$W>7HZUt0A!U)83kHN?{;|Rqo+DHcDk7mEl zwFerSgPae;k#7<~t6lK>dx`HfuDUwAqygt`?yg%5c^Y(Y@0f2^PY_za#e{wyx|Iqm zq=S=A%*dl8QSK4ILfS6O_A2f?RUa2;;E8O>1tm$q{N>BGd-e3a7P;;b0L3@nR<&5G zS>_d%`1No|Vy)K%62Arxu~NWMorYWIx!yztlK1MgdD(^QxM8Mv9M`%PM?6(-bx0uDDK^!wrx&_f`XopEgMx^DNSHU9o`I0}l8e=l-c~d`wWIw=7^KqNgXIle&xq<-FoSs#<`I$xTR9y4s+}3p zof8~a7es+b98jI}Vy24_Jo#q~BwG--@X`q{2GT)~^Qg|1>8m-el^InnK?e*H4h8tZ z5}vW9^b7c0ObOq;ilTa+Sy-8HGL~RtXN1cg+#F%FcgUer5;-vfNN8n*AJHjDLD|UsjSj_h9A$pKeTgHeU(LT5GsbYFgo;aCXI(0$v20TEo zEqeo|&Git^g`QvwKxB21G31sdBxmrlAf}>@P}x4ai5C~M+{@}Az@|QfbSBxsFh_PO z#qQj3$j*rBL5%WU{LT%F-E&jMa_qyqX0-2LZai>}{T8zzO&D>WfQiBY|6s)$y@<@N z)pkVd%xbU~SUk^WoICPjp3kK2+Hzw6(wQ;@=HLO7v-TKwLX@6ir6IWo&qL?wk*;}3 z#4GqvLkY|05<>PV6!i{0{`pmrIVR^Rc%DYl!q5r@!bE+S?h7J4ZV)g9=gi(M90n=m z23SLrVB~C5Sprz&UMx?!KSWpoN5QyvqA)A47|WmM$$H<*1Dy%`WXluQj5uNDgSP4j z59~u?4+)M0-l)K%PxTyF*t^5J8+$l>i#e1>&4ljf#DY+3Do}kmm$5D=$ z8w52g=<3I`EjO{gJi>l+;kx??oc6ww-iU0OmzW4xF?YZ*Dno=t5idAS__(Hs8rwHl z=Dm-@hpdkUtIzOlFT5%|Jo?y+BB6Rtbwcl{XV7*Ooh znJJgXZC1b?jssqRhCr2^m!J`=f(~XMG_g*+)*0F6?|;IwD&E8_ndm7dw=E$%7b&GQ z&40u>7FWCyAlT5CFd648Z~OkSu!S-`)`EvavR)~ffZIf+T;ES|d0TO%ZSAWd~oCp{Ols=RQny(GHu7y+8g^Si9jOwBS>tbHk#WmI?{N0w6 zDq5dzRG%4GpZ&7_MPq%=eEqBI`s8?^U>;~6Udz%@dYsf*CB1C<(b%#(-?D$*a!A#BEZTZ%)OsG+dfDIND+;}I zBf7nAMNd&N#o9o|ZQ!6boSZhirZ$3wHptCyTM~&_JDG7ic~Cn=PCI2&JM}_4?M*v9 zbqAwy`$T{1RbU5ePRFCB4)%o(&YKQy>P{Z9PCnz#AM=ldaymttI>i<`C2l%l)LqhI zU9!eq@#;WOfseg4 zap;j;BitD2*=U1EMUy#y>N#H}tZB2d?~1@_?-6?ZOYzIG6vU>xmZcnf7uf?u zwL?-ht?uUcl1o7X%dxX&ly>NQPud~o^jHSzGMG{jeC)ouSUu2Vdsp%=Za&Zt0}w)s zKDVacpwqq$IF>;<b3tJDGuMB=V>tCrLd=Z2-;O01C{6rss8^Dc8-i@mb03{>wDeqtg0B|J$ zQoC?$GJ32-GLzQ{*kpvb`rMdwHuyY;+PhHo74FeHdq9KoF+Xk~k{(wAr3v1F;^`wt z2Hdm+ei88@u#;DS_5jE|H4FoKT$?=1BJ+O0FBCTrW*k^vhY)u%JQo_g$8fxTw<-Hvh{TAd+9J({7gqBKejOaYuMLt901X@x zZjL-*Q^VFjBwXZE(O&_nLxIRVq5wCn@#?8~R;|svId$YrJ#4x=Xu3F8uas~;E*Qw* zhLxN*w*$wrM?#j{#(x5EdGiQ9mJmol=L6{V#v_$gIiN!*e(2B{nwXgF3*+v|1+OC^ zU%kiQG>^S(8q+#c>*EG`!SQk;@dnJN=5l3-Y#3M~`chHl1!bK!Ka7h=^M-QB7UKTXhT0Ybyl7ljGu5C0^<9K2{;s$S<3~a(6 z`Fm=%ASqkK^kzAsJ`%iXPSgi`YO6NCq6tB1uIZOg=C_UPl&^l!g@h79)Xm4mO0fqt zA-sE#UvRKp^MW?qxSwOX9yYq;w!$znUHMCGa|INDoM)z8a+Fwm@QQkAWkcK?_*8QP zcMp;Z#ndr%gYk{(!|~!uFg0i~)zR+-2qcAS`Nqw;WdHz??M*6~fVJs?FeLiT#z(vY zBT5N&R=~tbPwX@Y0`f4mk@y>VAQDrEAMA(r;nofeJ7(X+7)#sPbcgx&TMzMf^$}p= z2u{M2Wnd|`i}!dqy-6wzyIFG}XT;!Z^H@%s+UCf&oe}(mNql`6cKt8H=Vv(Ra_;5B zg}S!gLX&}fbHkxuYDgG1$)Mq~*LWts2{+cB?yFq}m!0aQo%&KW1#ZX{JrSq+oI|e6 z#-Z9G46*|?VGBMqEtvzq-WpRMXw02oIfUS~LO$jm_MT0Sszb=Qjv#UaY+nalP z!Z>syUUsT7d?M_6BGh{9ymZW2cEF=Cx5RPwG<3&N0y=uO2WdUClsvbVJkzD4=At{1 zFgv%=SX;6=)t20FlDx1AJ-2f`TQ5KN(70eTg9f24;GsJ{C@OodlK{HQ`<#~{$(K)C zFA^k|B8I6VUtgG*U7bs8B)z_L34ywY9(*{2+Zle#DnnPB*lOLEZ-T0qwPnJDfUiuYyd9*qF-fa0c_~z>D;5*uu6bb;6 zxxp~;n+`+~E$a|AFqPM#6b{;!)R9CkrYJ?gJK-h+5j{PWfy$7%%aW-4-Ihddg> z)8WU5qIVyEeEd}N<9FEU-C47LN-b;XG$S^{{-Fc(Y|ERzz59+w#q}dO|;L!Z?h>gK(fTxiiNruWEs2mm{ zy7U?sMBN7s25R0%4)Ag}t8t5aUEZXPv`)81$TeK;ThY3a>Ct+UE;e z?#7ehHHTq(g84-RVI>aA?Qd{#0IwzT0$;d->!mvkzqCoyuAc&n$`Kv zKmx{@1pt0_9pEQ6p&h-^mvaDEkoC|u2LPmC8s%Zbu_VeljAA-R3M4*^pbSSor$UVg zG`|z>BzL6m763_sc?!!TSO$_8npO!%_?_b@Iw`(4jq=nxug44Sn1N;F<%Vn?#{-X| z$Uhi#C+%^>-F(f1Z3ul492#ZtVv3>p0RD1wS|06PxK0Pe?L`nA)87-bru&akROP3~ zso4Yns{)>L+<;PJ&EDHE$jR)#Tfk$>I~$e^ezf|(Tfl?*XB5>zgS~B??qPcVza2#l zPba|s6)kD|?-%f>?7f2>w*N~3k6Q85X|(0P;jF|hvDo;#@^k}wZtXW%Y;GW($6%UM<0cZ7CwGz z?Cr)EF3*ehuwJKHzQh)iw#7H{i|`Aqlj1y9fjmP>Q|vvFpYy1Ay61k>hN3VXKQ67xsVD4aj0?302hv0~s)MRbW!IxQ^sQLs~Tc1jXRVT&Nv zXmXJ4_s1Aw@6Ws$)TdAoX>rFAZ@PaMw$&{X(!_W;gfPj>AtAx@ih_rMFB8VbA&*XR$;yy+*j<|sQ}49~LEv7H8p_?fGD|Ciz2fq#dNKE73r59yDrhz>;(bS# zCbpV7t2^Ao)TfjtKUeig*<2;JJF{jzj8vwOt?+%oC(GI|g)Okd4s6lMnbK|wm@Ihi zUH%11;;F90_yuVVQlD9^M>bKs8IAGYL=ku$^-%h;Q;Y>N&mdEkDU<_og^n`Gw}eB8 zN!jD|WjcUr>Pkz&?f0F&wGqUNoPy0i;Xu?oh?@BbxL)0HFSnl?$w?zd10Rk?s?sn~ ztzq_ZFM_?*&E?*DZN7eeY3Z{$hVOwgKCFGv3fGOjxb(t^{5>=<5lDQ5Pd;ct=(tC? zlSbZvB=8!&|EY*H0r;r7>hU7-QMnz-H{al$Vp0UivFRNooBr`lEZpK$3TYpAOw*pX z4m++Vm*TP7cJsrQdbGXg(k&rwRYcL|c8?>~njGxN*hCKEwORy)s7BsZ@pD%!xofrV zc2sY3rAg*}u{+Nu4OdU70-^JXiuqkWE9~G<^^&_XQsRv1BwmY->gjhs8IsR}!ds~$ z77=$B-NbA*-Ox}g@5oSA9FK=h{@m$dxAtAgeZvh2t-J9e?HR1K(VK3gJl?)j_8>6u z6;5R8lzkMT$DO7K?jx&3wMadzaPEZQ+>)q#2Ub*iboo?t?`Bg*-q8o^w|EU*ukmT;iK45hPXFUaTCUIlYVhi>2Whn zadT5~3rBH_4De+s_^L5{-4DK*4*xI}hpQ)Ra|GXGi2p1V|HU}|&@cWdJ^rLA{%k7# z;wT=#h`^Rc;F=)t{Sky22;yc0=`@1;7y-;cpxr30;p5WzCop6rFf}KzOee4%Cx93e zIiwT0OcHtg6ZtX{1)38DrxS&b6Ga%4M5U9&O_C)2lcXjxk|0kLwcT)y_Q+}cape4Q zl}(aW{gc%*k~N!?uTLjyA1CWFrszwj+%!os^iMI)NHJ|rF`rJkbDUzqn2M3$w=qez z^G|h%N-;uGphqdrkrao(G!N-CFOxJM|1`ghw1DQchtp|~kJEw}(_zx-p(g3!{^^k! z>Cw&UvD4}B<8%aLMxt~^vPnkxQ;KsDpi6T`_H;(>aR!nx6C=%EXp&jvpIMTTS=yXg zKAl;4oLR+~RU@5MXOh+6pVgF+)zX~RHl6iCI_sNALiZF?2jfp}Y2Sf9&MyM}<%?mfBd8bW(*X5rsJ{}=4M{)Z>xzeG^^Pl5jaYk%jEtB7VNQWW!Lf3Z<37Telt0s}Q#3rQ zt%33^f}+RN%r=L{(ZKWgH#cSKa7PuT$UNTy!i7frCDDSTBWCFb@gNfK8weqLioym= zNe)(d@g;*yg1i$x8QE`fX}aT$dU=y(K$M(J*R`u9F$(Xudg%ctO1%u zze?I4#mgnel3FOFg2MhYr_H~(IU`0>HdiDpeZM&aq9fE_abWDce(mPW zLFEa?=i1?V0n@wQZpoJYxL>_F``}of5W9~r&7(nH{&6({je(hg}9C@~Hj z6G(|m>I4tg`e4q2<;7FcBi`j9EU2aqaL0Ze?6Fer)(*bhyWnB(9eAdjKdyZLKHQ^R ziOQHSggP4oC^0k|ZS^jxNvtSla}?=TT^P-#TZnG2C+NP&Q?|nPSdZk6jBNl8+oT>m z5rXyP!(0eeB?8}Wb(vUg(LwqRFxulG4uz5sj#yh19z-3($pQ2TvCq)CCbN^pkA|?h z?UQTt0;pE_x@euGhn@46@tcoSnKm)DTSr0U(`P-h(+QExi!esVJuI;cc0BdHI?{mA z9A=#$Fk)+IDn!6N(0j|oD@O#Zn;;5*qyjMtipL7yi!+x(6}>-FllF+3^6f34hgzsYh+EBH zeFz~p!kp$~G%{SLunE1+Rl(9rq0WzSnm(zAunA)o7}}JFi)71ZL?`FYe*ERl4z}(-4f8QrDO zkX$gD=1|v7`U^V|Z=p4dI9rduZLl@#!Ia>&(a!tar|{__B{r-fA+B8rxvjb-Y51O- zv>~EH&u`mQ~K4m1Z$J{hAv#SN~WH=O8WN z6~k}}zx~C_x>7wd7h;AkZlS8;oj|WDS7ELMOEcux4g|6*Fat$UBH1~ z%`p@D2%oFHh2*Bg!KJsuKF*%iW+vHYP0!VPI6d4g%z|d-0>YIgU$eTp9hpp7OLt0r zz6xKsFv=l#War_3PirEz;N`NATGyr0s}rIH1dG=;yEY!UI3$0)r(1DPq8^{UM6^&27(z$0fpO9udI0yV zm~7iryRtEIpZrUD+8Q(YyXw4pZJCN~sqH)O-NC7s-OB@6ckJIoFuH7}&q!=`u24L9 zXX)~_?CA78dB>?@_v3*I&qtuZK$&?Gs-u?ss-MDj+Lq}@j=E(o_7U-ItGqhL{bm=R z(?{CY!6V1R4=xUn6wfzv>d6e@7hfPV`2DfNCzGugU#sGu@7U{{&dgpMHjO;r^&L4~ zIKBARPVr(dLg#Fm8GY2N^Wt;b$k{r@-Q+#KUdke!^ABd|lc|vxhm9lWyAOU4w3Ysv zpuK^Pp#GwO;6z@CXFTg-f|aUvV*8fsf<+uh!hqmUc?L zyKB0hS=;TuQTm8Hz|lE=_-LvqPuTZEY;A7VY~=vmPrZ>mCa?DY<-F1e`zPdm>R-t# zO(E2Oc7q_tQeB(f1Ovbi|=`riODO$f9KWzwtUwAv^V+4 zzn|nhC%FTKHB32Ur08V)1}hro+nv$Yz&Zco+u--D2`g|gUNw%+q8IfrvvfX!opc#z zDIszWCQ~^pxm%6};N3YA0^;#lEHte*xx*s>`JvZz38m6sfIl?4SE>Q>5w%0R=yu`A zg^=-U%o{GaP-59l#E<4*!)zV9vS{o2*maWwmf3s}O%|`wq)91O;lljxK^}9@;gAzNg3GqXV-;U(zYy|4RAIXRP z*r>kyW7%5kpdqR8Ph|SQ?6AOSI>@G0QaPvvJBM9(2i~cWR-tNTs^570~&%q~^ex&bZD1 zwM5k>(m-Ndo4%rc;`wfM&pV@t^WLeB@Wr9od0mVarj_e?8KGGFJekZ6;vxhNR(f~C z=NV7i`5_y4&>~D|*T%W=83)ZSnY1kc+ChrN0*W2FFTZ=4bOG6=0nkU{#PeYpCqyo` zlLK00Y;ZwQBGrV}iueyoMvoU;RoPl-GcJM70F;X;nOR@@T{tZ^7f1g=Az>}ScAM!( z8OQ?wi16B8yI3FB_@_IMMi$OEo5dJ$q2VAy021=G6{lNlZf-|owax?ThahL zycf!5qsA8mB&O!M0mD63g1a^&KIl5Dk@xyr zm(_O1C&XXgzjYpdNGwhb&4gxjPh``68~<#@YElMOgt2FcqOIT;7-WZ*Q& zK6R=v>_Rj}XuR>P92&u`h7mF-`VJjCY_tk;s z!n`ZGtVH$XEUyE-`qw`1e;kWcTWha4P1l|-@Z#a@J6%uU81mQ##YKw4i*C3<-!Dy1 z*Yb3FsJD90cDv}1=Dz2Vy!lN@RfS#?%-9#)z)k1H8E3`?{alC3?=Ypif&W1DVH0xRs+H2{!_fTx zFuH}vW;bq&ZI$?`>j4v3P;u?;v^z64$AFi|6Lk9%E&F7Sx0~-DK9-$%eBru?{+Sl^ z->YxmUl|18{Ex6yuL9L#URgl-$Qg5KjArstO4nMHio_oezWmO4R$d%esrSSeZv|?% z*6wYl0Y_5PChVx3*!QkVxd|9)(=bG7$EdFeXPy-&6buI+vYnrSE9XYzAV$Pz4ca4b z+{v!0#t8sfWMnym9(O(UE}ICPo%$HXPM59WYUk^s)M!|4kTg3;oNY$GeLGfv%6VQ3 z^U6syZpZ_C@DNxK9evHsX*gttq1gt4Eo;1M?PJ*i{EfLx#GvAUPO~FmEwlOgc z=eJ9-p``h#{rT_x*#E!(FDRf200FrC7Cq9ZYE?)DSUc`zeXOeVtU|X-kXI%#m z=bF6kwWfSyrpRT@rA%lu(3!1xB<;3181J{rYJPP1aV0O1%QgGa;gNSO8RYcqw@ap2 z#OjL=@fRZA2jQI*(@D?DHh}`Ew436<#HxOaCNxuu+7E!D>FlOhgu&!d^pfdxY8zzay5I9de#&a%!Ck+ZO*c-TFw+XJzKF!t|v# z*-DNXL)Bb{*Ecr1awB((-nipys#_Ih;Jz3=o|B%a`G%3yHBBlhi^YcE28_+_bwsMF zM-H_Nj{|)6rSmfav}dF9p-u2Yn?!bKWs}PcQwmxucee8FDA^N(Z~bs;pve@!5))pU z?zSQ9HXfBk;A7CuVEtM8hv?I<+kyY{`J@}>k(qybZ=BNVO zIVG6DX0o-Tx;d;u-3tB<7`mDDuH;G`@1dgvbQ}jvhu+CO2O8WSxvshp5-P;alQN|GKHF^= zVr9c>NM@btgeEs)-RIhW-PXn|ZHA3|qYMHPgfZ)5sdLX%1}qx8GBz1s#nK^yOA<5@ zS?M$#rxH?@2gid*LxYrLA8dB#JP1Q^MgedTFt`XQAc)`^QPE^Ncv#s21_lwk2MO)j zl(2*t`74o0fC6gc#1cPt+_hWq8ehhwA!k9j_=}AjR-0UnNT}};-m-jv&?H+z(7ETT z2?{{j9~Eelf`Gm+^qf#3YW7}}*AQ}imxkl+|0Kolb~Bk3KHg6Xv-+4$lN|F8Kq!sT z{5Ug0b%qjQlaR9*11&p=N!TwQQ~^=1!Wi&Wdw)}y6;phL^VzC5iAKBh=9Y+!7M$T{ zG~j`$>uf*Qb!0~{^v9Q(y~CTFllXelI;#Nhdk}H5@0ung3Oh$uAZcWg^Smzze3m%E?&+efoz-%IF}T4|T!SGkLcJm=p&-fiI29gZ8ItImGwV%(Z? z+W7I36iCdkm{}s zXf`H^1TLuph?}#gQXD$XECGh2&D1m&amj2A%shaC?CSut%{Dz|i#30*aBQ9oBu~HM#12S0tHSZVBFc@#r!p@!Nt=*tmzlE-Xzo z7-sitl3kbgFYe*NIC;VPx2qmdU%oR~JNwcF!Jd}Y9EYiS;U9->z7HD^37jP5ZM@-2 zuDVGP}Xy8I}>b9=VQ^<}thK*QFv22YYpFZ>!;re55qzB|nFsbO@s;dSZ_ zX1{~&DSsIICVbAbcc-NQZQWCOGPAQe-pKWOS6W8T*p82K)g_*I?Zd$~uj2r7t1=c> zkP<|-r%&JW-CSi%*7V~iCqf)li)6ULz& zWSyvGm%%2D);)-)-SndF#Bw1NRRVxH7_(EIsC?U@6D*^6q4_oV83W7X}nQWZJHLFs2EVGI-LxT@}^gGkW(GPZ_-{Q3_{y&8RTl zZKp9qnaL-UcFcwrGd|n@k|N1p5I%k$YQ`eSaGvf_$a6G4)#+cZ?5l~8<117Yrc5{! zjkq?KeQywFe-SYPQHoI>?WA?rhyK|rBLFIZa_s3p0=nwAruQ!cS`^ik$Y=1!sX~9Y z#^0SP{9$tO52p&X_vTAeaDSKB`R67VN?Jhk;J-Jy_*WK`<+q9qs%FDGqU<=dtRY1n(HLDC1(p! zA$8#0P#{bOT|O@ho-bItA45f|3kqd6@@kV@ENBkKxM8rg2NBtrk9EC|4M|mav6*gx z4OuvP?zf&tdqHiO36ytz4C$7I10pE@TIllY8>v#^nphZr0Xr9MOY_U75W+ zy&#CR6fOXtSDPhX)?T0Q6Pa{&Bru3dQ(gYK-OS(%GK`2Q=itZp_ftQem?e$3eA~La zJ~Vm2J1?`oZ@}X-mlmJ<^1|wrfUX-<`~I9FNS&Hc>P2v^2aB$l1~Xv?%TfCvmm6pe zX@Pe&sojOph8Z-AbP(oK9H=V~O(OcW;TQsN1fT}+{P8i+wP3M=Wsklk{id8++Hj(}-Cd~fRwOm(_hDXA6g=gA>n3U!@tXzs#pr!y%b z>wAhPX5lm)?$_4Lnba<99(MmrrR8GdWd#qlumlggx0pM#h0&`o%0{!+LNMZZZ4Z;n zti3j40}Pu~XiUT7m^F#!oA)K{@$rZYe5U!=V5vH@ld?d^@j%x!)5DX*>iCykQTJaZ zViZUU7GG#w`KDXY?s19Tu&$)$M0Ixt%tPqQmt)}*{B!C5i`E>00ro6E4Z;xL-wn@Y zHnj?cfd@pU9oWN51u&o(G3L3t6WrGrAS~&W%iZux9k;F%Cff8xvbaJS(_Gei1Fu;6 zWSblmwP*9%zNwR@+I*QV8wHs=6m`(glh=Z<5`WDTb7hL(EhN6h&>TX7zB)Hk%Ps>m zqNq_)8)lWM(PSn^hNqZwaR0z!t4~L8X4?`xsX?KMxHyWp_a7J}gUe|YksK zYKdJRK6V4Z4TBxC>j`BeA*;$^gc!3$w&bE1MX$^E;s^l_$u`3Ml<|CiM{TU>?rYZ5 zmi|tsV6PoXgzNk9e_)M&ZPUnXUw|RwN_c_1H z*xz4I|HK*)BXFy1w1z<1hDHtgrTtb=O-3P*&U*Bw_N_1cd_bx9EJi7zI8SNlJ%~~c zK%v6iG^slA!Gz1;_fXnjo2wu(KLh2D{%Yd8Cg0IBv&P`r21;||QDkGFVO0=L&nQyG zb=U>KsOFARiC_d5ZAtW_QlgJj}1dc<#<{@r&@@NS9?EF88l3 z9LSTQcahN)=aVji(kEJh#RU|wvU`Qzns^~WUSOD z)kUT%w+dpeZxr3*Z5l+_gdxM0Gl4vW_YUl&c9nwH7d-Ha5sMAANsOW{<&DK!_G%At zyN@YZQpGQa1V|oH%r&}}#<4lcC7|?Xn@q$=e=UVkf8WU8{XG;#*gHcANHC#T$*vFy zBna^I!Rc-Cpu@8oJaS!$<6LRKRVO8Cr2*#~uW*{8S{g`Nu?a8dALUBU7GrJL>Dw>X zPE~ScA~Qu4>my~7PC}!Y1gz7Mv0}8&d>^Tz07Vt5saSdtL;+vI!Wg z-~!yAbLIpq1)104b+LV+>kI{ZG~gR2yulGs=$IWvQ36TW*s!vb#ji0j|z8q8nMa zng|sdC_a!9+Pj@THA?U`vAi0twT8QPLlfgZPJU1Q>K6*B_piPtgrbwJm%^y@zv}!1 zP`|Ht;Q;a<>)p#Y(?yoz&fKI%6(QA=E|QRcuYpe1?^R|57kU|;;^*Yu;-GC{_HV#7c3ts;iF-RbCtCAiN$qDv6me2D7bIF_^-Icc%)7<9iRt z;L0z0SJ(uUMTfy_q|=27x|$kKQk1P2LsDFH=66Y2hVSf9@@D3s4s@ays+5Lf7MfNza`ODgA=n<1H5LQtpt#X%>coI6!)$ndF(SB9Yw&|7+eTDNRI-tpbJ!dmhQiTAY7l zN}DE-2ICCBcEo?KD~%C?9F^qF+e~;v{PS?>73M2~DRaMLFPbT5WV`b$>BV+;m1W2G z;w83Og#z1ygJLhIxmmt;p?76rByh>^iDpN|YT1A0RF4%fp*lGaUNFAFj&`! zka*NWI^$uc6gnS}$<+f#fqhozoLec!xn4Q1I$18&*@ed^Tni&bhHL8s@QJPiut?xD zFS%(NP%Gk~P!U%SZ(K0dZ#-FceiINI2-Y|hWN*FpgCAusCTxF&c@y4 zg2kgKs9OxbC=20k)~zl4f!jUTfT+$-i)4@SPU-Gg{PYEQiFp~OAK%xN-=uaIU%JfW zyUf~Ep%g;r+sOabQ261M?^1n6=E)a%H9NbzFXur$0KV}2vMVZ zPRCUC+L@A%`f+E(o@nNS#{(_(RWoh zu0|i(e-$u&!tuN>E?q*XyVhm9)4PORa1zPi`ifZ{g@ia8ZDISQ2P2VPQSY_oPK`a+PSIk=waYrSTR zoxeI{9>V~2`k#P0rqZ}BkP4Vby8Q;!y)Z?n(|MNfqRe;)DB9=7m!htk8YZ#74XweOrl;_1kL+yrUhynrCn)L4GP?~0pBS3JBsL7z5EIc1bKowLMj!Ldn@wO&?s z>3ScH)HMDV5);wI?g)_8$nhxOuPAwb3w z-Jc?C67!r)KR@U92*W(56p;OQ>#%*4QrIFNN86Drv$Qr?v0|Xttw1Aqc65mmWj_y& z;ki;YN4=ug?7F7eeSJ!AEmG}scf!r4B>VmpXxN9?s5ka+Q1~&DDoix@6rRdG6}TPw zb#tUtOULfKC1tzYc@`4zs0GL>owPI8!r4PkWm1Og`TS$Ic48u<7FY9lbQ)5`b!QJ? z5fp+Qq>;EQJa^zq0dciR)(zQ+gCUd?i3FG#f?`{fcT1+Dxv|5MnHG$RjvF^7VsV;q z7N-he6;6H=d?M{)QOShVFvZI>(4vJ>fZtiixKDe%N0`kb1|(RHuu%dWcHEk;jb^n= z?6z^|i(n`lEK-T5bW)ih3D<=|$69XE`SbmQ3|^q(k^lIRDx#S6yKhFnnz%~uehqoY-$Rp`|5$n5+MVty_KpzzY6~oa-Mp@FAl?t;D&;mSY!!+zK5g<^ zR2CiFBx6Bv@*0%dY*DSNrBF~bJe1yDzn&=Zpr1w~saPi;8b|)zc*0I4f}K|2jQ8u^ z(fA#vL~BgTB<+QM5$#B0qJ_* zE|I(LP6Dc6yXn)jrAV)*9l2S&NjBN4{YE}&TCS`yp*O|^@fEfbDR&VBU zo_R_{8Tqcths^A7^G$J>C~&_#XLs_M^$7m1IP`;0c0wykRtD;fLSJ9K7Q`|VY+Q-Q zs<%jUwjzITq0QMK_0IF!!`Tw|ww(&I(=SZ>5!~A}_6q0B{A-sqhFO+Eba51M$>|&| zz@zGnCSQj_*BEduLuqc&EQMp=O4rk7=6QvB%#8oj4&yJnSR|kwU<)w+?FTqF;Bm9~ zdA5CtO0Y@jPXj%*Fj59>s?GI+z9e!LBEAx36gXMLTAyaK+)^V$N{*^fwE`KL4RwcX zA74=%E#Tqh84RoASM${V?sxmvWC&JQaxcu!`@7n<1jK;ppN|6nR<-1>3A-c66`$<} zVD`#b0e|9(7v}_IJW_9fu89mY{|=aZq$S8K;q3Lm3bRBZYgk%%ihldzTzx@mB`hJy z(B*;h!A5lg+HRKxV!1V2;fo$wkU8`I6w+a^0Y9%gSzDfMReN4xb^c+4qV<3$i&e%0 zUEReN5S+44Aze^W^omr|ZFx8-%DmP(gz(X!~6 zU*`y-J!&_g;16A_Uh$L5R~n`Q2(tGwVV6&FSmmd$kVzPmf_TX%2y2Qghn>3ez&m`@ zF(M1?T^xyF{?>f=Fj=bYjF@B0@tKg&-1J3sg884+^7m+CHpkdJ{(ZDx$fVvsG#Ive z!Hqjy8h4|3^l3#4ezfi*^v~Xke-Sl*@uzQ?jAr~JqgkY?m^(22IA-PkEUPO5pTMdE zo1CQ!24wv{7QsxB=&nc(mLwGPxNj+na+y$JGMev}u#MwrmR34Q<;|J>!CF%`z3<^G z{sT~7D6#8>peArx(sg$Z#aTSaJDNh?SY2ChN5W=_E*Ev4i8B`_#v;c>0{e;d5_TF= za@e6X%^En0O}7O;Uut%lEo@R~9GZ{AwL0bDxW0P@qK9L*^SnhwDb!^FkVGvW1S{#Y z#&f~sc($Q7_Vu6}5BJ-HG&W0`Yq8r?Wo*F&&xlsEbW4|i;H5?v2b{Jq* zTOs8XqO|HMS|+}M?@2sVDBGN&s=Asyg3#sCQ-7aPQuNFh+zIOapR}T z^%t-IfB#S7fII*e&g0)cI73Ps6~C~1_|37?N*Vkq4>K>k`478?%iUg4!giw=gY7?d z5BWRcyLlZh#+cnh|9@Nv{H4Km1-K0Fl=u&W?ca9~@36xe|HfeZ;r?yBKMl46syEv! zezUjwINNmaqA0ty@zW23?OZ?b?*`kljr;2ugYEA!Nedg3wcq9aWjuEle)EKrwclI+ zVX!S{{w1ri`MbgPqk+ATojzJ{RwED74%OixfL;CjM?Va8@wE1z<4gbix`9L^xP19O+SQ5;L;NOA(n?XQg7aAWta&~(?lkdc%~Ewmbt*e zhYUA)g=iHb4$|1JWNxk|>1swSNAnLeOx|@7kD?9gbTR-%_VX1KrmaE=kum|Rk#FI0 z7foak3#TRRgd``CA;b1(N5N8 zVPX?bZA^aVlILk{`OLj|*$6#mmz=3n92BCEUslw^;O(l2-9hB+vek$oXnnSfi7C_~APjnX^@89=g7mWjG%U_#uwV!Z5OFci?qwQSd31WZM-aX6Ubxvnj`v7Ml;|S+Y;*WY)(LUz#5&wHDjIZkY#fWs z1L~FDrS(1$*r$w_1bcXyhmeLAE1v!(dBCW~Cc9ddhaS)xtI~nnE9&)&nrb?x*J01@ zSz4I#T{82wwU&58TyCMPH1i1K*qJ?Yh7RrE@1GVWu?6AlMlq>SJx|$NK~XrY^Poq$EPEr6;Kr~C2`#; zp=Vk^m`lZ`Q1qy)D?$iKl~2_O1?jOZ_}_TMawySS*Q>mc>e5Nu2^SNPPIGn>O?o74 z(dN@D-1d?0jv2~=-KekMMesUMD=3(VK|(5xOOGh3iQ&@yRdR6=fV;210rm4SN%1cq z@LSCXvLaQYr_Ys$Szc)=BsPYAQK4qd2BsIZrx1#kBd$MyhKM9i_j?2YRdA;ZA!`N0 zH+(oV!Cnz{-9$VfD*&&Z5d!(u(OmzC0HD}Vu!IM_u^!70)LU(hAWclO z(WRHI>eaGtUXu8z$bHMO2mTf-D#naGI&R&Bma;wk-khebo?1pqt6qgQ71`T@P(Xa* z^oY)UbgALoQUx*oS3gjx-Ys)|gjZO*R=+w2xs@t{^Mccc1_CHhwEHW+kVQICTbP6-t?ViQq9)!Q7QWcuG zN!D%=^DvQ<(yOae-8uPIG+KZ?>!Cn;uEi+@xB9ZyFcfFCguQcEnyx1E3=wB8D(&p_ z@lrojEncd+L=F+_*Py<@HOQ{xeLMPDAlau=b3A(HHF1juw;smZ9K;-=_sBM-mIZIa zXnl;a05G75GG6L%pcBeJXgVq3f->l=f5XG`Esr!TJOp>=o_GB`J{Kc*n0qy+hZ;73 z?F>i1bEuu!=XOY<8wl{0-B5i&q$eN1Nov;4d3zGCT4WJbrZI7gaSteb7sa>0#u?k@ zTAkpA?zo*|7t7k+sWjQ9M@x+iVdjXv-6#1iJ`uG9kbG>gaW4$6amM*F&|N@sn(1zu zRKacZVz+4u@+!Ym2by`$A&XX>&N6Lkisf0Bd6lXm7k5k7fR({BDkFPrUYtl4!>Re5 zDR)(dvo74u#c%~anp*k}T-Fw=tqiw1Ymbwo@k#6&v%xZDm~PigTn+ks8Y2W>tqJqG z!z|T{ubd&k8`is^5I{HMdPkz7&0ag@v-JDq&XEWjr~pU;-n$AJf!gf0D*>u_#+2(q zn07}~K#SGO8oC9nyQ8WK#Lx3Dy?({W?;VA8QGMI2nloz;<`5^5>cna=GL9T>L@LkI z=^va-nDb?nZha*4pnpY&15%O^rIotv;;6reH*~Pph!FCHih+15QM~UE9;dwVb)vM_C>pL~GwT_r%{`gttD{fBY zMhlTL0a^_o_iEIwCgmaP_odzXFUl-*yln2kugaDr5DIK4YDm5FzYe|k!G{YQFgZWf zDrfIl3d&hfw1dRw4IlHW+|FoDWqC^gbkjtOi_~&4Mt(hU_T!2t#qBFMt*&}{m3gX` zYs0|k-G}8~Z)avrKa$lEZ2dJ;bp6qO(k zh%n)ReX~mX_$?Va2xkRG?f?S3*n=)0l<~&wZQMjX4X?C@aH=hHbZ`*bi+hYo>divM z`M^GbSb)2#4-Cg_<9P_9);v~4i~zYJ=TU6K?nSosH)L02G z;YT#wv9Z8b4DvmDOU5p8|9a8oaf2&f1;+IDPLL)S$y6%?bs)d3r)A`wZw;LJB7j?V zUg>B_fJv%Tn$^{;h|pByOPYRx{Xw*NDAIa-;Rz7wauM%WMe`0S3fBBEBX<)b_+>J{9P}O<^&81Q9N?p}tbmL?SPL0+2CjNdf+#`Vwg$tpwUO zUcf}f5If0FIq{BrlFd?*s7Z35Vp2eL63tT%f8|&*N4O|9{8BS*E}gNJLb7T`3J)yV z6eE+g#b|z6aI?p{<)hOxgVNyccyc9k8}4J^FB-GeKE;9^v^rW$UAAyJDbkC zIL-qwA+aGyTvOx*qd0yhlDGv)I)fxXK?0djR1g%6DT*!t#gK_&YC*Bgpx91OAf|i{ zNIsWoK2JbCUuM2QOTOStzVJ!D2vdP5q(I!XKr)~}DzgC6QXo51aP6c3%2cQbDO5Hs zR1GLp&n(nzDZD;YsC`nX%k)Ga^5mxJ6T^Tf#+gq{Tb`KDJh^l7#Db~F3Q}ZaT4Wbc z^0?3Y;_&{F(xrugwmaS&4p3{nzmS`r>m5}8>N z-BJ>Zo+*K!lpvU%CPJPjn?6kqc$%L1G_&Pt_RQ1Vlcz|g(tJp1p=oJRKxs*4X=zJo z`Ali$Nof^RSq-GD&a|u{psXpgtfi%_ZKmwSNm)Bnc_*a&m1%i*KzVOwd4Eg!;7s}O zN%<&K#hX$ji$uj_Kn0+bcB-XfPBYJZtYVR=a(kL~*|c)KpKmpw@&j?@R!ik>K*h(C z%Fo0V`;cdc{pIfio}HAIt39bWJ$cqctOJm#dU4N))FRSEp^7-GtcXfPx`&yBxw@4Y zbL!>tR-l^Uewoy$7jb$un@ri~ch#3(-?~lz z?Zaok+-yaHDpDt6=&yyVO{56`G; z-?=co!Jg?Y8a$j8VaEaI9PW68-@7()dqWOw&TIheMe14huzPGf^S;NMwib7YVSZ6O z3+MfL9xnlS3qWJL|Hzm5b7stS-_PSOaT9J@J2tShMt%V5rW2EZ+afmw#eD zZjgtbT+Gn?aaa8h`SOqEi}$`E=LY_c8S2{BgV_S~{Jn_vKfS9~y8PXI95bEq++O(i zUG=ZuRc8qNk9AQhEdr?F%8IZTSZtbHId1u(clQr&*$Ail5ha6@Y(?RteLrLNPG0F8mqV-o)d=7rH3RR|e#(dp*ZVRTKI;ar1Yp(65u`z8}B7 z``s$k`Ec5yxZ-!KP`4gANk+|g&fn(!zRwaLeyjcX?R^k|@X>Epp^?0mKdeHJRuF2! z$E(RkF2`%>_Lax$*}l8S8^{P@j8$lw%gI(raplQ}^2XhhZH!gu=}z4{m(!0;8Hmi3RTsM8;04+QAN z_iN`LbgvL{PDoNHzSehNWzjl5jSN*-Vfoxqaehxv5+Cc#k$ug(=ka1Th1nXOmXyK9 zvZ~2nM%0G#pwx;j-#nIb85NQQ>h~j2pkTFR&GWwRd5mquLj$Y7-(P~a@5lP!4RE>^ zN{zamMVUvN=d{8j%h_0|WTyV{!+FH@OS~Bma7ZQxT=y1UhOc}$u_tk{s%yRQe_;NO z!?wnL=k~&&Nonq|5%d1|8O5?Z7cxm-V`{+WL*)^h9GY|;0#m4ejC8XgK3nBJCs-K) z4OGd;EMMvobJlc16VIFvK0Mc2yUP_0 zeyBLFW&FvgjTn#qPPUPdBKUe9euZLlv{Jv~)s2r@1neWIR6LHc>rKRo|;AHu1Pro&v}5IE5ZweMBZr6hi= zhS58Ayh^T+GY!(^iX5U`mVXXd3WYkVOX>>AuUmz%Tf7zl>MRV=WL~>$5{XN*tmmId z13lwgfC>P`3!&F9sFecIP>uJtczxiTcOg_?R^+b@O7li0#D0yJTMLpJz2OCmD#~QR zKcL{3tM)D+yDGbegeH=aa%Y6AIz}{?9x^yxddYXoi8U{@|9O5Yf$Ty=Dk5#iozL14}MbD5mAG1)A|vz8+BnLjs)!2F+ddm zj@p%+caHu1#RIzU1nng(Z!R8s>826e#Mj;32s_Mm!pk{)_cQe#g*j5n!Y2Epxc>qa z?W1JsOZsmgDZTCc!w3AIG81CB2y>hM9*SbF?tU4H{;??hHx&J_S@7#n^nb}u#XA7A z=fpZ2AC})$uOoXE#y z*dFjpan}|029_m{$)vHo5rQ*Kwn2U^?hl@WY_!HeGZNUKR$d$3;@7_x_rXX!$lEY= z8R zWkxY9%+t;Q`A`t2ROZAFK^n2)WFVApso+OqQ7l4hiU$=v28+SMR=OvpQ`}{M>*fe7 zc?nO?i4&q)Y~ zAVJ|pl+aWN|9*I-CR<5d`FGh$mb8{5yv%k8mf{MSWc(Mxe$us56wy=o(~t{ioX~T! zw^*>IhuB&05BExiV?q|yt7|?yxIKN>VW$demqti_u`GQHP-0LMhz}!N+y_`heaQ?{!}R#Ij|VpBUV(&%#o3`*&i7R-lp=+ns$Yj^8S z#N6gz<6|7S>prL~4bk+mg4uOk>m{Bqo$+QvIOzq0;=^vfL)4&Nj=ES$K{AKix3p#8 z$gR;UPfpaYY#fSA7@ab5dNHz`@GO0b_t27}OKb9_s||1Z9=OhvK!a0AQSs#QD>nyE zL0)W%m0vz@?Rjp3z5P%k=M}X#3vgqO6S@&+mX={9pRBK7T(JB2YF85v15aA!K)=Md zg`&EqW{$M9DJhZ-OoCz^p4K%d^~VxC14xm{$V5!dN(#3~Yn17fMDvUSku$w5h};79 z)NPf*m9U-FHra=M%E6y~4}5*dQ)0f3kkf8Gr*U(u%g^Rf-k|DSaIzdox_ne4yV_$w z&-XRn`&Z8^Ev8LI6v2t#=}k_FU+5ZW6@f9k_a6gbeNmT#<*zTWWB~#55C2Zre3n-_ zJPPvxKI3((Ao*E9S*Qa z)dRn^j_Cwgax!f2CR=}IjwOc%HiPM}I0Gzd24QY2ZySCnO&8t|0I>0fSZ_%?JS*M{ z>QoIRG|N2)rpCT-{M^`|^F`xNx+Zx22Fa&7Yq@jhz@WX4zjVP_ZhS{ir&pImxuphL z9DS&>DmIBOZTqTWYjuIXcs94k?_G?0Cf)Og?JM|VxssKW!SnJHKjkX5B!0sTE%)8L7rrieCqamd6=qrmXiGY)30*CEnge ziXZ)yyUy#!%Oz;32k}^#Cc@cOyaLB0biq;@vvKCW=_iBOB+c8%;L~bbOwK%P%|}Qf z0MX|*0YF)RMI`dZ*|}W5+Z_Uu0sMdtT-t8mG@@wM_k-OxPWiX6ueAh<4#ebp_KoYp zD1jV*#XAcn$=YJ?^t+w_<{ep7P!zW(qK^CG=`-m)`QtM>XC1!;C>}q1lM^tHn@Ik= z@LT5Cx!6x#7Nqj1UEJOCvkOk7^I^i3;G<9X_Rqs!Z0Ub_*rokt=|YED*^t_CWkazhdZSL;!uXN460EU?6Ctag+AIsVbHdhS_th4!xuK5Xo zgS&Wj=s+sSP|2a}C|UK{J@p{5`%B-uw0r}S^bRY9Fr6G-QzczK!l4%O@|$30a0NXu zSj*H%XY}Z6q`BJV7aD14lv|u?MvNNyjQhyL!PHOdLtgb?R?)uM3;>*bTQdDqK>N=c z`3nH1{{+BRGi(3z0Qg+wefx>B{~iGT0ow%6w5|0!U))_E@~z(87`ggejXbnRo5{ZT z&j5I#Fzb$>^TaZp&csv1#DQ*N5u-)}t&3Rpk>RT+5z?DY9ERJ)#Q%O2fed%C4rzhy zTO3@K2%^&&)+vlAf+iJN{%mraqW($(h9Rb@K)>Gax{~P9RU-V1{Q6+{N|HN6Ow*2j zqmj4rYZL)XYmCG^x?TTcnv5mBRB8pqo(seuQhe941?E@476lOF?`6k{2q>J9|DLv# z*9LQ9AxHU5{w;H?OJ3f*<@ zwy@FkekA)rl|@acq~~IcX8x#Zh2dMg@Oc|N7huxcgr}I!eR{t`9&4C_P$iRP0#8r@ z@F>WzscGLeleXez$xCsqRPc_7Y5i~yRs@9sidL8&(!<0k z3UC1w{0>zYv4IpaO8XpD&{x9?($XEF1PsEYk^;oKtw|5k;>?}|5>76(fajrT$KJrV zT3(zLqlWu!pXp28(lE_^vA}^y4P;x0&w`!}RY=|r;=L_pDn(ytm+Kz}xpZNn1lqi! z_>{wg?aS&r#qqX~d4be4nyff9gRB+r{i84_g3#u`wG zM^W<5OJ4AM3XwdJkrA9bR1^J_kzkZXYV@Q ze>-*`!2S&Kt|eR`{bVe9vYsAlDxt++pnd%G#ug$sYY!_yicxQV+8ryC_VwrA}SQp!BdctUI@|8IXQpoJ{aCWTY z3DA#Wal0u4*Yy4t`O6Z4v+Zze&aW3v06yTEpFL|oS&U2n1pO7@YBJbsRz@b`@3RlT z7jF(t>q-oQL1P2|xp-fKw*BhDA(l@nE^f`x_%kq6;yeq(!?yBWDcYf+ZELx?k;mev z^!|xI`nJ>W>@|w()K6QPUjXijTqKa|GWF^2?DeSljf3Nj(X8AICG6pLMSdY($ADh| zu6F`>*KMpTY~e;v5fb!z|4);~N{2r@=%eJC{_M4!@S%bzrWG%6+|o7;YKv23I@i&j zh8jvR{-bz{_*703B(5w>#z6_nQw$|6dP-ICc4djT@`Ht` zq{DBNr<1h?{HD>oBbC#so*m7<74I36sqopEG_;~9)WA}Ek)eb&Q<=?lbUHhm<>FIS zZbb`N^d_C;$G2o(gsj?XbdaiUr~2`gHRT2V>`emla(y)EZpirpsR0j6o{5r7G3W*}t69$gsC0)wU?!UHYwfKcbi&`1-it>3?}( z=>J>x`u^H`#(d*{hrRx%;{AV;z5cy;pRPN)ZBx=3>DK}ljB*N(H?h%KK0UxIEHe^_ zIY2mcO^ zb!X;=$^-QTl}*m*vdYZRCSU_fV6i(CalxIq!wp!t1s*OkxLTnnxGR zu9pb~ULmz|g~Q`?T6Et;Q7OF6jx8{B&}&h&N2>qQ8?g(A%}%sofKD`N(ZIU4CS?R? zXke?D?qsSF#vSx2-V1!+oz0ho6Ho-d)-H2r+l(#Zq0^vyl-f9OV2iLNO-2tI&v%Ig zkxEC;?!Q05XNRAV{_zQCz^~4F0#9X+i8fcGW33e04g>5l1khJ!izf6WyHit)p!~ZI z@q4Y3`8in4krkpw4NlUl4k0VgOaR}z$ll^^4rZI8TWQtdBufrs<$`KB3_O^5sZ2P- z#k0Lp4e0{1UhjFD5UJuco1X1bcuG@!R9B^;?f;rs63pI7C7{?=_#`II zr0A_z?%vk7Pyfu8K_$uR`E|T^7<0cxhu#*Bok+c=ixZe3tAK+Sj8)*ZH&O*#VBc!3 zbVgWtbptsJ#}eW8a67ImN9VQWf!&dr%@5w^H*4psN(an+9XdIToSoXts4@u|W$`mT*PtI zZ>kPa9cu$B5a?bwSMG7h511g|(B6|8Jt=2sgpT1bNl5@k^QhaoGwr=gjcM|Pu9MK0 z^&awGH5KbinkxVvc!i=$^lgPH;}|H3@Y&^v<)wHWz{gaZ4YC2p?8~hx4HcoU$=k2e0eb2G%n!zYokJnl45@4lh+1gOvD=4r*7#4_Ih?tDcwt zd_vlSZ{Pj(TIaT}GIp9HCt@}p@4Nl&N^kwx=BsCFhqjiZK@AfewzL-UJ!<@aog-v4 zyO@~z{^s6VrR~el&b|M$IYP$Eqgdj>BO*K$;;Xfaf65V}|A#q3QL7ouQnVZnseuW? z-Veq-KqQ()HyYp)%^n&J%!%fzi{=@M=A%V}MX>z3SV0%8a41$J2P;;Gl^DWG(XddF z82&oO-$v0VK*~8W`|DyID5q#tl7e2viL}BrC2fYBsy+jLM3kA&v^42$TKd(!= zpqtpSAaFetv`YbP7lGc#Cx0Ld492Gn52XarQZyj(_t2@A{8A4E#fOqpRVP!!fW-YW z#AP&5;S6yzo+wvL{6Qu{Cy8_*NkTn^<#;S$3&L?cj&l*hZ4t*i4DqKWf-GVo&ml3o ziNeq0_EkZI^x`CQA=1xNWx^oRRcRMp(nZwMRjSetbfh;dq$_e}Tv5&tMr0hJpUn`$ zW<03O(CNtVS;&BKW_l`TYFcET@yj$%$UIq@d8i}v#6qSlXV!7$EGdgDC%-JVge<$t zEWVDcQ=0P3`t|%KX87(+W`E*`9b*pFeF7LN?80pK=+4vpTdetvL{%o>*l{i zU;Km(e+|(6)g1n}0lE~l|Nn*FC?;Mef5Ha8sRZRz5tBsKrwrKO`;~b*x%)hFI;CYe zemd1~<9E()hIBFv`4cu&&A@q7|AYa8tn#+|c5jK{f#o3&QgfLQ;*X^)u zy)$a%{_mAEep&hrHblwpXySn9pbF&25?r+eofgJ8U@n9)d?5?4d!+E3 zHTSI=UPH_)7M{O@i-VgeJ7DUbk5Fx&a1CpK^;w?3!_?9FDd zUetHkWFe`El3t|AiLj$P z5a3P$c8sAvFdjL)^bkq<#4>p_@GU)`{*?!D<;HcA-e;~?>(~|l?l~R;Nb8w3%k2BP zQRt_cI&l&mXZtiSBS4Rw(O?KRbgxuJKZ$*$LK=wU2C|BVa<|nD2$Gu1pRw0kQQoo# z@7WsAq`U%W+8V7B7p~=$Rx1bOL4jZH)3eE9{q$Dq4BLIXI`LZiqqr9@_Ib(e`*cbb zBkm3?;mx+K{?a{sfbRKL%*#4`rPbKHdV=C1)}fI7Ezta(Q+6#?A>gt%GEVPt?JFkX zZfg*}2tlcRb@=sB=)V5bNfDdcnxZ5b>3(R^&SoPx@o({?U(QPN_@Z0IK2rCe%lX7x ztMx>3n;O@k!VRXc;8N?nNirG@WC%U&{&{sTF4o0UHcjq&w*c<(q~jjQ2k~hL|B^x2W24uch?f>)We^OuUtFj?b;vn&hH^M=MSCby5iWYsKJ{N>e4vcZsfRB_iFGW;&hk zQp49up}bXSbIbSXd240LN2@HIx9zYL5bxoXDr;ZMzPyiXw{^R#Y_4|p6>P6nz*7cinmKy0lvkJj`3a z=Xdn3>tN@=(--Ua15@t0&07w>__+QcwEOO<&CbDB+eaR($X1fRA%$>khqRI%2eHJ!t;5~S8bdoskkFKLp&Aa#!ZqpvWbxH9zW(Xq7_P*oG1;{V$ncSi__`l_Xg@3m+P#|h zeN8H@^BID?8)qnM2asCO*@xd<#>IRhMviz&Y|dg2(uzU!{WQd8V2#lmFlXdYY0JI> z@R@al$8}xZHrbvUoLce}P@&G@@8yfe#Kda-SNWP~6jx-}uKX>V;2+JDOB|=(-L$X} zICH`!)v%nriC|^Z;(`PV1vB11wM~rjb*|?ETLqtLAITe8%kAsPGx_9C zMgG;hnb@lwojdWuR;}giRmZFypZ(FYvd+$6fNmL`0DrJKl8oZjKN!y^d*;m_$xUZ* zqxu)!O12AYqB4cXPSnke`(SUd4VZB`LF@aPeq+7ry2t1m=5S=7vZ<=qUez(+#iG?% zb9$P_g0uUVfj57!USY%&JTEL7orBlnLVJE^y>2yfenX|^Lutul-DpI-L9Pay?p%}>y@HA$6&qwDqnxFUR5*c*8h(6n!Vw6WpOs! zouD$8`E95^^NTr`P{iT*-pL-9s#Gqh2AEEAk&FGjyzTq*iWa@woUpV(5{pGf2 z5|8;Br4>e42>#ZvFebnS)&{mA#daxQE~YK|Oq)Ugt5h^>cJFQ=kv4*})pMp|0@XGh zpHW{Mo4C~oFUlvcwChZ*9tE-KyeGbfR#=y|77t+n{HSDHO{Z4xP_NLzRd7ewk|9YEf@Q|E?&RDd0Xs^MBU{CKjR%vX)Xm^42>1d+4 z&A3q}@}M62*YD;9Ln8@cQu^b&X^XS{sm$l(HziXGO=9-Aw6!WJg*qX$Z$}~)K)3i0 z>?-QZFYvxi28U?bw9WS(jh8bDh!3IlO5Tvd05Cfx7e=8^YIE)WQ?~K}R6JS#u-iVf{N?oN$rmsNPvCwr{3UqtTC1 zcWqTnqMo{c&+)8YK54I!O7&~&rJ8CbgqcpY_bwx@T1Gs6X*T>2-lQ2JsFzV)Gap5* zp<%jmPQ!Zl_!SP?&$xLG_2VV%42Q?QG)nn84Et7p9}l$m0S>(GeRev@h5uQKSFcu! zh0c{tzvjA{zTPn(CS{0W#p83m30hVcB414!sknL_y9k4f=w2VQm0qf_9bYcm1XPa0 zetmgG%!io|Fe(3a=rCR$U`2*;=uE;Rz;u-mbgfKVlE49ft@xFxH$)Z#55SgPnR&X9 z!8VTLkep=_|H`ObIe^%eEx583Oz(?`{ylZ_UH{@2ey)Sat1g!1 zt&wTL((SBEMFx}?d{@2Go3&yVrat=G~59G4-VLy&EBc@r?`?2!;#*xAPytHGxpE~^W|@xV}Q{J{-=?mb=3E{K^Sz?0es$v3w)`Ex zl4{7EtYVUxr4RDg1&?5Q$Yjf0{?~#-5fJvj8%X}+9Lv9m2YuRqH;{bv#~e$7?{6T< zX6g1%14*M?lYdjt{t1$}viAScTbT`Y|Fg<4CIe#nzkMFH3hHP)AHm9fPy_*;XpWyw z_gqk`^?a5@M@9Z=AjzJ1;s26+KoxOAvxkpkp%x`GM5rRUsNG;wOD`SP(#6yUQKP)G z&2oxic)FetE`F`M9i0F%4j`ydGBhOKAt8g*R$`uT?XaS;(>AV$m*E?Xl;0UzokfK! ze)LJ-ljpDC^_}q~8pp{pbP{G`rFc-xXM4Z$8>c!e4}1I9_LhYb?o_7jS@SD_QX=S# z*-7fc@iJH_nTZWlaCA?dRVi)Ly;i;8c@{0 zqfahG&Jd;41$nqzphw=9!HO$uzDSIYor6fp00p~XD`We4Dh&P7K$ViBJ0O_FDFY=? z{o3?bvyzYJJUzxnNU&S09$1vL!MS$vBP z;tB}Qr^5OA?&=tQZ8e^kbr{?#6uwhx?K`HW*a@?ok<^$5ob<%-?AJZt#>*~rRC|GI zSb(!1BJ_@|doNg!^DAq&+Yh+^sZ}!E7f}7=AbCa(Lw>O=1)%^Gsv8hJn*C4UzkY&+ z%&N?)Ob7n@1dCu%Y(GCi(nRyM9{%DpgEb{6EXPaEhb;2=M7PavCne5;B@4tgq9yb^ z6Cmvy2jl{L~4BuW~dFYzugqc-u&!=0?Qk1^g(ndmDmA?{V$9Cw?q# z^>eJMv{q_H)0FMy)19ulKG$ErgDK}#o2RWjY8RSweadRV8}T;tXP=Ypg}@YU!tDuG z{2Rn&X8d_nB(suYD1ZymmT^FPEPHsWp$Abls!XnyUg$fu0tvz8L0=8loTj(>majJr z)h(YnvqLMKsi^a8&9JW{>-JZJrmkEqJuDPfZaXb7(0!6TNDWYG2rB348uWh5J?(e} z_c?2UMyUkHcRfvx(tZ!dYt^K|KiqoPK?;E;SS}6Zy2ud$hL7Ae)GZ_q$ z*Dkt(Mr}?oLMT}g5KH&q$?5j%tW^W-9_tQ^4&NfHh&?R%EPKJ#q57RO-dJ~-v zeHFF94_Rt{=$$O3zBVF$JiW{yOEUt9e_x7*NEs)=Yh~`9Zp@RE%G*O7KR-4B&4AdA>T^S8o! zhkf$=1hUW=Jw~bl8<^#+dQPC?ZibaV-7hWGhr07fFo4*lOVJT5Mx_X*> zim?j4W0gO41K$HPQDsAM)4>+$##+Mt+(h0i3yiF#pjZ19mk+dT-+Ul0)np7K*C`ez z+z-q~ueT;Sv6cG(uGs8NA9rVc{(R8$!qJ&wI9ob+YX}5xb<+ zrrvw*P{Z8h@kB?Ycen)GG;YDh(De! zDP7!q1pSb$cf6IzlcoC@Qra$r9tDgsjxk9otT~Uc%us1OuQX|U_e4i}(S3qpB6Lf~ z!5yQlUA;Cn>@bmx)dpxMt}Tx|k3oe3)I3(-8Qcc#eia>>9?y^Z!Tw!S*>*rw3SIY6 zrku}#d5toJM=q0qPgDBvd@rEH;|<-n z?^!Nah44M>f_wEJA8Xa1hHShZNeHLYdjjjJL3Fc>sGLK);RM<}`t-_JG0ccwv{UL@ zSX_opS&PFUS|sdl2(Qa%xg(#EDkgB8obndO=|gASu7D5HKnAaNSIJkYUUg7oqG{IY zD+{VJXacBwI$uz8!ZbIA{Wa;Nu04++@=(fIU2tSi*edaozof7X;xfu)94@_qC(6=k zR9ldtmPfDa&VyLzWm_FC&QFWhO5}yDY-_!Iwb!*O_cw>HA3ms$)8)JHEI)Y!5=y+eIqGcm(z__ zQ`#P-&Q5UMOp`H0DA0!&f=qk$qg3dett~Y`q5Wu*pel>TT%#!~V=UDdJ=hqu)pP3y z$GOxSF*3_rJ6CTPy&|g1ZY5`4EHa9*7Z+A(X~i_wu(9PTKL?*jNO^{t>YpI>f2?Bf zJBzh)8Zg4MW=$N4uNe_~==x^yi%#9NQAGIs%yc_3XgY55+so;Si$Oyc8V7Y62CGAc zVvS9SJ%*&b&&{V#>x>9Y#V@5g?XA~JVx`oyYBG#ey-pJBp?`zP2-uQyNxfk*fjT&*lDb44B3x@E*=x%JKegcU;qN=`io0x#?lt)%_tfP($Z*O(4k&9!e8f<23aJ5j_LTHCt$a1LgtbzMaDJCO@fg4r9U?fX1#R7eN#nyLiyHAk>q z+?C^c0yWfPO|VZ`>pgW;isgJ+7>7VghaBrBA{JGH5=%?)X%Y&3aCf|rg-F%a{8kMQ z^W-s>9CC0!(@GcdAMQjZHN0kB<W--I7|zvkh9(%c5j7v2g;Hdhxlr* z7)uq#EWmsUO;Sbhn`S|Dz>l3l(?z0kjUBpMbzAD3Ui4pxe(fWy0V1j>+?M&p8J%_N8AtVWi#9R^7GlX zrNCQ6XT^jM);#O-9uMrjOews~q##V>E5=b!Gw!RE;_<>rmj*}+PKqA(=kE{rL@%C=q zU#rlR>ps`#`E*|zUgCxAx7Q*?i;ndXjknhx-AoeQKhT?zv{;-D2Mb$J2*Z%aUKzjy zXS^5e$XSa0Lu(Q*Nvl zleHRDGf~5I{Ae)w9n?4$a9wpR&5XZ3Rbr(IujIu}ielX9AbzFo5TkFHgN{}2$E8G# z0(Pd=NnHFm^#?=S9Ck_$1+<;v^p!)r_s+4l@AhT+6czreCzPe&7<; z&8={V83$^Aj-#EO@8uc3@ohx#a&tpHA1r?pR3? zJ;aT2@`cPXA|0{2nW3l8?~z#CL8zrxP{Ph@wqkkb#}puJ<8e-D>c=bwYp8R<$Kzsj zH`}Ps_${4LqcY~FH#0vT?*K{Up@?pXB&Dg<>)(Z_Xi)d7p=d>RU*XSAeYtO0 z26V&=Z$%t{7`j;dA4DZM8hnFwKlns~^^XL$K@3f0tC4#}r2t0S-%ZDfO%9A~t#&}aLG`#xhA@5CD(=wO?zB6g}CV(`+t z_aq)6!W4hqz%Q6J7~PSdr_KA{Qoumc#xX#Mm=+^Cs3ne<%{X7AcEXeZIdt%z---t+ zg+pAbpA7M`NS8f&+jJ4*ZFPfhw*#-te@fPVY~p+}y1Ok|+&hF2Kwgw4iG*yg-(9#^ zlyYKwEl9#;W&~CRN#ZhGC62#OGzVHriluexMGC#-gbZUkd9aQfmyZsGxD%sZ$ZnV3 zHf1N($L&|l3z`9DR-LnE(qnK{`4Jlw*)=80H)*nlvMWh$wbtBfWNoWdm(-rR?(J7* z4OeXoU-oa_9&_jnzti+lf|cbjql~A_2xcKh2nZ9ftG8Spnkubq6kX0i|*tfDMc=^;D?B0%$AwSXyc9X_6xAunf zEl-T+n?+Vv$^e6ai;!4Ur${xHBe)N8GJE+$u!Bpxee;xYsVePIzq28!<*+mR!}FhK z5er>ze1pc$`ep~f(vJLC`LLN?-gIr3e^ARhP<=Z3ug8AKY{e|V#LIYW!f%g_fxcoq zHb3LB&zKOvD|(p=hh$A-_Vy-&ec$H3CeY`K1#SRS(!t^1)_*MY=JLzvc#(CdrY)aG zZmMp7x-uQ#D`%AWc<95$Sya^01;A{xV^VR@cCxHssBL0~W=hQN+fCcTSNRTAuO9gl z3y(i!RZw&s?!D_A$F|&&#qIoS&dhyg8)kkcF2Rajv+T|VF3?p5x=Yr=DBKAz36+x9ry3KaV0*2>ZS0_vJ8Q+(jPykr zwQ1TdYYPXD#!DM(>(dsi<1Zu8SNV^rtc*35?+2Oe?gRg|9%and%zVH7=;@z7s?)($ zAGHbCFStPbm;$yR7`3Wn^b!@|sY8_XZ&I%k$L^6A?Bvs5M$+sfC1@b~B=iog*zq~P z%>J;QwXm|Pz!Kx@yI1bG=?2MmXL7Siq4FFT-`(a4+vENvzOIPbl$noifgAxONp8X_|+;wCiRt!5~=Zy>BVK4hKb2H z$u;9yvisW?O2bGQDYCvjiy866GiB%v8t==v>dNNG-8HrIpB*P&Uc=_{_(?RQiPIiXFbQ?gn4=+Cm;^34T@QjlD|Jk-M3-s5gidS07d4 zIR(h)1iP}cV;T95Yqx?0?!tU!!tR-ht)EX5t``CI_nBCg25*a9LB9UJ2#WwNFN$|V z)|#(X+D)I341aQNfa7$%newNQr9ob{9$}GRmii|)mt~S+{HMQL$ zjWJ+3?KK@+kqRUCvXQ#tG9EFDmI;f@P|68eRk&B>Li5A&6cOVGXpgHGXW&r_)R7$K znfP8QR18-v%i}l@dec8${)v2BudHDSJ1R#0j-#@6==zt6es@^`BkXtO_X`70f~I?J z)w7@DbChyjTAb#3H!f9R9!}K_J}1GTWm&ktwO<^0pq{q3arfH3cw$l7(#j+Aux|rz zvz1m$Phh}9a@vpce;l}a?tK~EX8e=>voG*7hS%{MjFTAs0X%tT<+$nE%z!qIE+9Bn zICid4k9%K_oXe%bx91BwoDH_oR{@uK@fTj!Z!(6At~x)HHKrMEj&5zo7*v_sH53}d7&bHbFnCNJ+EqAZ891e?cr`Rfn6y0#t3i!>m? z7L=LsX4Daf9-$lGi%@!G#S?eov!}e2G>vAs&oy4%p6=#d-4jSKyF&25)wq%c?$(!p zy+G!>69)pGjZb;LhMgUog2=050*KEZ*$3n9qw?jxnk|QRwd(Eb%>!m@cR}u(hnID+ zs_xDho&%}n?10{iZvCcZIm`juhAzn&Z${%R#WKTZh??eWO|7CEj@>H8e&eORT@qV; z3<4|Tymraiafiq6#GeP_h8)Rhnwz)$2Cv&b5pSM0ZAQ_~UnXL8Y(o2%mhN91PFh_E zosit}v>LK^F#a-2^qc`H$w)LKZWr`?ZO{aMfHS#;{E~MI8v*W6gF2v4m5@M z&OCbf`J0m#NZ8!;J~wP<;$(x_9xxaQywSO{xhi?NG>*3tL^MKgCTsG2u#W0Y8{J)) zZrBn(`|$z%UxDfVUs)f%OIwkLaU?>bi% zM{Npo9WV%#Xntk<_N!y!XX%z0`#XDYoeu|1k~EABtSg>FjYCXK;;#Tz*lh}EFBXGd zhEE#A5Wz}qS3w`1e7vBbBT8>guNn0FUnkS@&jxqbf6G6 ziI!=F7*PXxVffL7lvKm-U{2CDT(J;Fm6yeXP(5deMiw7|`7pIhF(_-4wuX1sVllE) z%o5M!4MI^T%8hp1vd3Ix@MO?6pY>c@14jlPBFjEG1WSMaWqTBlZ$;U$L1}(n7+*?wX`fQiE}!y zf66NG`@}WTEpJsf)(tyXE~1%%D3s|PS+1T`qIuKw<~V+Y2#~Dex`q|lJ5FN z-0lM&ylVt?1)t6Kxn(CpCNnCSc5HmXdO7!NSuc2sBP#18>cLDOZv1v+X-)3XCnZ4z zL z^OzEUMY|ivp(J{qJP8vQ4k?BQ&#ZzH#J1#;IW=?<=E_ftSV4wk7*;zILfsrlN_E{G z$7MX?-o}gR1jiuiDSiU_I+YCEifZa_%|0y!aj7ce4Y47(ztFa1E!cMB^?) zjdoGT8H-rS6u_!&zUbalSJ=)_MsM-fP%H6DQu?zzW0ILz)%8tB|Elr_zc!Rg{fUr`Y1~u)fqdA z83;-rKzONOa^TYJRTvc)Q%3v3kZo!;u7}(x$11*SkbP*pF^KNId%%_Y&WHqBQR6Ld zdDDpgvA5!2P*d0x^bjBm5s`cEOz>D8D_tdzH9cWj<~%5pP;~q{-)iE^!RkWeCO|78 z08&+pc%3Awbkb!;>{J&{hr2-oV}x;w}%#bZ-!#Yb9907D8?pvp}O`yI3aqbSKcz@kwf6rlcyx8CK-y-0SaNy1KJ%HVJtp^z!q z>st97LqZMO0lC0gHsV9o(zcPmz1Z&JxshD@LBbap@mJGjZB3ahdu?|fP}Jf3m-1dU$+{nnTAKucD}01OuK^;NdrM^bzm>F zoo2oSR<;J+EUWzh&R>_VV3I9UFPX4Hj5~@&1HLov5V^K7a74=iB!}?BVAkHR+ZE+S zDYtCH%oxi!4!h3!Zts!v+jC-1Y}+?*pFjn}F;-c7^BA}l0QSADt}<-Ov) z$X`3%4(OA_TIVh_mS$dmB}ZF0J=w*xyv2PakPk*t9o9pr)g|)|#HhDk@Ov8vH+Uqv z{e@lr*~R{EkymHbsL3xGz`py(*L$0dnyizY0%h{q^TQ>UdpA983t@0YAN({0=@fbx zyN4guea~ouMHZ?|->mSOc!J$5TZ?X)wmUvh#?vo!DWR>YFo@cG#a_OhcP&)_CHQVU zs^+Y^4WljPfx}~x1qwa;yD0B)5XN;xUz+gR<~rbr3+AADrY_V=wje|r8Q$iHN#qH0 zz{pFE3Rs+C`8or#qVCAy&p!)ubm3;d-^XwY^3%Z>`|9}|HvKc1aB!aO zCbeV`eq$7hx%e@SGxi{{yfw?VTIe0nJND}(_B4)_!|kwrEm!11SQu{B1?Mk8+Z^UodxFGwmR7|D4f%9{coA+Hp+;oQAwA|jqe z1(zfp!gqjXC?b|2m5mQFB{p5#FCE!d(H?h0Ry=qXPg@x^xHjFhTc0)8Mah?5UccFy zX46=~t}yoL#S<9}R$KN=_kQ*H(pqGYzGKqUpDMs$hFnGaf@Q7qSCge9)MTTV(O*CE zjHEdbbMS>@xcQ!wCtD)v#g9AyWTERsf!6v-2JwAuSL4}w`!@nZeo{tLX7P4gXag)~ zE!yEZ4qXpBLtm{hXA_iq5y(1cN-Bg~yb9COI4E}iW*j`6O4H#x{OW(O_a077wf(oZ zgqBcDLT}QMCLN?mNJ0k{0-_+GbOAx6NtfPRf>f2>ZGeiRAkwQyS3m)&5~LV_x=3lJu~M$^Uj=^^A}_%d*|BM^)edG8RSNUwj+wBkq8BOm$f~i@{|B(|&c*PD$Trgf-)<2V>gY zpv~c}Xyyn?FMVreJcDqr515Gsf(%=1H}|J}bb<=4prYM@6xVRd_v4{p6|j!20jFKs znz($8tKw*W6q&}afJtPw|FocWNbtYxBQab0Ii1l&{Le1d<3H$`6B2)(oI;2+rxlM4 zAW>kUxh>}ppBv*i6iDguuzVONnZI$X;`stPf#voIVB?k8opM^qdwWSY1}a9G*PNV? zCZu5(R5dOpU(;0*svhk9u5LYzEmCe+_%42HS|_-Mj_ZMhk6oKS8RxymnI%viM(9j^ zC3@vq=WT@S!NZl;GdkSl0I>V70X}h~)gNpNk}KbIBK9)6PiQVp&DIk!9ZJWO5+OwD zc(8TdS9mPxbdQRC8}5B*?x%an0zndc=CwwwR^}pYJkgglofh$uW5E=kUf`ZkJQRfxJ^Y5$t%W z`B(d(n7h*P(2ESkbu)WdVFBT z-WuRl<`|7nR?o|1H&dzF#um!3^;}j>Q!j&G*LlZ#zvZya zDVvk6T~r(T=d1azr{GV=AUcvrICU%;wM0Ws8Ro{r1ho@6_$()Lp3gU>@E~qbK}>~p z?hy}*=$8gcweLw}TD+~O5vb4NMXRODYjul*YY)VB=SR2 zb=1ef{^no{n(xe&{yVQDoL3~2H{0se+ z)-JFY*=<}{8sXD9&&jb|KnUwiwa!w$&X1i~q&A#YyKV-KjAAT}bDBOjidsUx3QVpT`Z%u;*z9B0h(PL`U8>QSa)T~aq3}oSMnD6qT57d{jNM$g%jM0}$ zm^{;t@uyVMEYXT3$o}?;_M=F`2Do#@3}%!1@kc_XsI%>!F}zDh^B=P)7q5In%2mqd z`Sc$NeTxOcPwe$v32`PuUoNs$Xj|4UO)14q_;u!kmO(SwEG^?{jt9+uUybE~Kl#5E zLGbMl`4K?Mvsrw#!21};QBj(JmT^-sOyQ*OIA$IhmT-edb$D*5F_c%p+czg|l!0H* zJ(;rt>eL%8sjA5${~|ui>u}rdU4pFWhh&|^+ zl}WoOBYC!1E|u;&J!i^+afK=jpHEC_B>CJ{NYS}7oIxQ0Jw8IB1^NnaYHM-?~?f00p~3UjN`TJ zG-2rR=Fh(e%tTcbs=oDR37kJTHrygY4?O0cis8Q*a9j3oP=2{B_JNmNdpPfGTyU|1 zbxK^|%}*O3L!Xsn z;|+O}%LA-yo9328JqJ{BPfvx-3z8r91LV?L z({r3iKU&_WJ^uH-fu99Q|0<}7(2=L@ytpr9HrGKmc;WT%ZL`OxDClHkuBy|==_Y2s zcGk9A0UMMQ3T}bCr74(;Ql=*d?gfcOlinAwUwiHfk10~w!YUIA=Rby~U-N}JO2N!{ zZIrDy23XfuyG+$glzgKN_X4_x4L2PtG2Umd#$5`@X#tKWL@ib<(l$_bz3=H}CwI>u7zz7FWjR?WfaCzT)6*`ZuyyTOmKWu1|iNwC(&? z=Y~_py0qJL-~W-~t56_mA)v&7bSTuvTe>R7q(LrBk7re`#OWgUz>s_VhJ7<_aI^BZ z(sZ7QVr{V5E`|y!)je#CwD17z(RznlWo#*{@~Il;Sy$7L+KlaO$TUEHM|z$Wch(8c zRmDkzCLEy13kUm4+TgPN0VOgAopW)LX*3QB%DXg(NW4M^D5S8`M-@Cfo}!O91AQoA zmEOX1=-t}-AhAkgLeOvMSq7c3Ji6;_+YI=;Y^C-|T`F zJbm8Ho&N4wudJ2(tv-o@^FANHO{>57s8-$gvVV6sSH2JQ&$l?&34Y*l#{X8-YJMAk z#1*?Sg-v02cZg=xv6AYxfZM15WvHa5Sh4uzfqP zE<5yi4l{C62NJ|I%i>Svsh{g_(;Dn-w(9Or`5+2?gbuK}akyo&@GaMBp5_Pl6ZvJ7 zA}aW>#MINhsg8>i5MH&{ntT}NcSmn3pXmbJw3Wp3L_Up>h3&&A)12AaE-bIoS)zpl zGW75?dr0z6Uh9vss(hd0F)v+)3XPF8cQHzCpWwL_;Pt#E8gm_Eq_ z-C81!y@s@jW#QW5 zeCW-Sygbv`6#j9&>;V3KCOYO6uTidr^gK40Pnk{PzFd1R-=7-uQ{XGG{T2r!0dcTR z4`bUB0b-U=)UUDA3Txdxj9R)qi;CkoT7yt%_X>HkGTXOVlun&M_RpvSVM5SPCuun1kgyX%;zDw-)V5X(-z z%k5@-6Isp1{)6Wn+pRCPYLEWxuzv+y7SoB7z+>eJ{IdVWKT4d5ubFL)VT5W}<7MXB zV_Ly#xpg>pl(8tkL= zghLMwKKWYo_m~7r2ywmh|9PtEiD8*h5Gc zOV&;^no&q1SO z!4xyzm=hVfSJE_Dtgw(4Wsl3xyg5 z-3b=pqZ5CBboC4>f|{B4LN|=BHI`05LKt6N+>}5R_7Ze*6MV-l?>On{OyE-!LIqhr zANx6t40|2UBQD>WuaQksiBV}Sfj2#r8S0+lJXTQg8AN=`{Fi}Xk~X81_vwO**;am= zV!hRi2qUK;mYzMc!lvN2f)gu0HorC29rd1!{<=BS^7?O=_ksdR!3ng^|6AmoL4|;i zNBXiwfWLrRO`8Kg*B;GEFSzY5=g{JbKlEzHd(Gr?2e`L1=sg!kvufl_t?bnZi$)-7 z$4B%=77=GOf*3}-UX&sYL%*!7uT-M3Zyt%+b; z*974HuydNsDnwdoMo;2Q?n~T|YB=;hOAPFsZOlnhxS z_j+QwT$JFoboN|6VDj;5&+ASH@dejm#c3gpe};ao7a!6bdC^hC=y8RLx*EMR@U>OuM+CTVvYX*Gi*;mn#abz|E>zGJvF&7c(|l4u znb@<&L-Uc(K;KlwHSd@s100}6rkZqXD1+;PES&zvs}`IZc+YlMjFeT`;z6no@G~~? zB8W_9$3IAZl3XV3?|`96$x#Q}Q&tHCljJtb_mIgx+=Gm$wAG`Bk7{hVc=gA3rMJW6JOp8Crvd+lDNDLRrLHi-eUuVW$fqU^Q~VGehk-IO8L^HMAqbY zcbqEmx^7wfa&daTJ4I62gF}7OfkKUv|mD`IH+Ur)avyE--~o<)aCJrGUP0 ze8Cktv6^cNrE-}6aZ`>rd>8&Vz^UzjgHzYy|8m=D{spJ1{WoyxaK?YZsjvQoQ>}0M zZT%<%G7MffhEuX9^AoCP+B|#YwqxIHSay*4&;6I%j;#HExZ4gdWJV%xm7FJG98z#v zq}}7BPg=DDo6IGEx;{2gQ3IKL}w{*#Zu8*JGTrd_iXYJ`GuH-P6XiOB8=r8Jy7@B|V_qu%>;5ICwgDFg17xVKH-6yR_>u zH@soJRkB)V71*@0hV1%O>zLxL+Ep@0ZfVU+EbcYh`kEWiGY4lbqzOxaN8{UBAU;%` zL_@hjDW3Df22jw1-2js49&OzFFH=gH~PdB|C9K=DK^zp64bXrI2BuvhSz zQ#UjG*vIc0H=qK&ajW_6@<=TeP+t}QViTJITyQ$)ez!6H z9n#t$i0fV3hD}Y`G8U9C9*_x{@a}o8&`4t8&{t%`p&mwz@}u1hYU+et_BHzN=DN42 zbOG#ZCU0osE@6MXG09*kmM4CgF&tHdP;UQ|8uiDtxx?=I*AWGeMVN75y3Xqv4^MGh?H^ISL;+G32O z-u-)4ZpX9o86?KlE-rFuyi%~d^O+dTf$|PpzEk?l2O!7L(jW3Kz#JaSzX1fk zcOJQfj23SlTh549(!N%AO$l&tVG>^+*RoZP*i)0(e(k&fkH7%_bc^;*IAZm%%*8DGW#rcr4;!8*8HaD+qM(@E^MwG3`lZBplZIDOeES0acXNfSnw?p^uL|S|xKit(2 z(~u<>5>QtPMAgM9Q%3lVl^KKw!cR>_QD2YQimGHVQ|~?0P(NTm7u!S>Oi=Bz2~!1jwCvD)jP9JVx5%s6+>=@kM`7S?pC# z&eexIKtRixn=!^2>P`U3uEV%6pvT9kxR^o4Jh~SWv|vvU(8}|SO2q*$hqCP-k3wbm zm=->>Qj?e?g^7UgIai`1b50Y%L$g?r-;$maPOj0#l}Pbqvlli03z2eUE2nG5)1 z5b2=_C5c6mS8eRcDlV8uB|#9E|9B?;17P}oYz?t%3G62owQNE^FpArf3`}>SMF58y zA-lRU(1k?IPz6!F#a$DO66WlLy{Qs9AJv-CEo-sB#}uK?@x9()kgrV_G?4eJj^#5_ ziux8x@L=E@FEhx2ZPXlB&}eD$h}3pdUJM;d^@}rE`1Ru!H^!ew=_@tVCh526HK~`O zdG~|@8HYv0umbt8h^xBwv@%{_1=VX={4UgfW97z8JT5!j$Zx{P@w+{*yT7I9XP#Ke zJ9fKfIo4Np3xJbeA=N4GH1ZNvX;^rDh|B3oHp zDfnPi;D7g7oy`4rV*|mu^c)j((!O9XO+yFKo^jqyIc7Nd4zL7_tPk<|9}aQX=h z%#T82)7X`GagiD7sr1?AA{Ccok}c~EmBAqQsq7+an$YNhc%eDHxX|flN@#)z?~)yZ z!jVrD(-h8y*_qhwps%IZJ@O!>TtFWK*O+dbb9)7k&S2BQqzzzEx?P`8^SB$18JnN{ zoRX+`4gQH8{_So7Mjis`FEliQ1f;U;4y3RO=mM*HF+doFLbcee@fR~P!{j+SiT(?G zrT%aojSB(u1<(k+LYC3}>aHe98K<+tF);$zFf(2?6S3ODgh(Ay%Ym{q->0zpVcC$; z4DmJ}2K0~gJ$?F?OwHMd`z@O`Y5G)a@OS2e)1y@&YQ)YyfDgye^YJpeCP~@66J+Mv zK25dJCMi!1Ib*u;U>*QVxIhLlJ~a`k=hvkJp!ZP1z~KLaA|~n65KRf`>t$Fz6Q9Ox zt91O$nfsB`9z-!fTbP`p>|tE63*@heh+5syGQwcnV4x_I>_8~}`Wgdvn>@2&W$Pd1 z`{>sSjpO0{d1Q+MAS0=>1|E=x*!4yDko{kWocTicRz>;HE3dku*r-TAs9sdh5?1QX zhHV_;pfv8Y6zxG<0s}yzfRTr#{``?1l`wnR!*Q)_S>?BP1bK~ES}5g(hwtR0YJ2<# z&W$zC*~EgpyvKbtjxnPn&)4|@J`w)ddzT)(f!RDV$S6f0q5S5we7zz+qayC^<8EHR z%bCLnBQYo5r`}1|jRsN24CLATDB(t%u*sB=g*Z|^Q>poQG9c~<3>h$2PV{B=V#9;ZO$8;7I+0+zN|6>C4mx%P|y516a zyslTm3SNcVQ5TGpB_oSw*B~FjXw#sJVAQZYwMJ`bwd`5+M)RIQZ z#%)W=sFSRp91djlsB*b&Iw=F9U_VN{SmzonpXD=|@ch!nMLG9~-f*zG0^p5i#n>*f z#NQverQQZnlkDTnW8xE5{;IoHy_P0uNfLQ4MNnnoO(SkZDsVll-w`z1ooG8VSdh<= zsX7(FJ$=2Y7BuFyYo3~#6>^A=;&a#AQ7{VMV^y3%^-bOxKp2Yzn8I;0J2R1Vq!CT@ zYuorQ=RvOs-It16{4V;DbDvM&C1%81+CQKUFiw(waW)lkSff9G3;LtPA!)KlNhn3> zg|09T&1P0=@z2+-+EyuEIt47PrHW@ZtHIEyv#AU9s}~FV&S|qVgNz3o zE4mH8{3CQamw$1EZXvfG(Y{zhXKBCKUub-MJO4?Hr~&$QjQk+~KC5Fks+D*m zJrKg>c*1) zn5E{d?7W4QL{0;APLAQ1(`8Y98CiB2H&tBoy8byE{<|TgC6F*P`>SrS5D<7cfudL1 zZCr-9OD^Pi}Pew&YWQ}b#=-L(#V~D-loU{s@~wz#R?R+YineC9L;iD^+{y! zuPz5$dMzpE!zVX0Y^5Xb2+=Wj*U7YahX)=tEWtj&%VVkK*#bly1zXjtbo_`Gwxu1E z>=F|VZ+)*w{JNkQGWv7ADUb^Ja`sT){ezXSz3jmA?7*=O*@pSG}nHQMV^N^AmDb*mF0W_0B<=+@w4B1Fq{_I!GQ8T zt9QvqW8xk%AewLkszcT3_`o6(FoJpPHb=2FjL%<=r_6+F!P5qvv6dMJZIDy*1ffS7 z2vdoPRJqr&_VMR--#mV#6ASw1`AqU3dvWmWzrb+{!BA=Lh;i+JJOEus4E!(feu_c+wv-i3ZdD2)>6Ti{mF ze6pCEbw)ADvbExyy2T`a6az3HiMmcc@yLc#qjvu^)C}hYBy6=GTXJ)O(TFY`;ORCm zrTCz_d+2i7`(UaBtAQkSI~AQ->$JL^!<%ho{O8NzSTzy<VN@GO=PGM}WdX`* zm@5V^)`??LxUs!dJ=+n5Udisu4Q|*Vnf7 zLX?b0TZUI|T17TeNuSETbGoD9ieYXWJX-Nleg*N z?UPNR_bGx;O8IcR_OA1jO9X#zca4IsHoV4HbHf0?$H&+B)m`MdhA8LGINjK0eKHq) zQ)S40lL3%cFfx}5&_(2E!D@{`;*Y!aPcEf1GaY^NTQ|JX-tSzE-}EGp(oc3wakKsW zPP?0z*Ij9s0iqCtiL)uua;S|KQew?padVwW;wk5uu;M=cXt?gAr%Ay5eFb1Ax6Lo; z4CMTDyOUPok*0u;dJd@$7M_Ss-)~D`aljPKbNW#ETy*P!PiU zW8{JMGB5g9p5H*GxzV2vj=unx|F25*%Nt$)7Zd(xW6}QvaGA%-&qYCT(hygmw&=01 z@I8a#+J&@>4}g{1BulXjciA$~DNFF0t8lhJV2^MPFXLO0`+WCRMIP|y_1w_&c;91^ z>wUBKMjm%yLPehMxl7#Iw!4(TIO!f~VvXuy8YKOj;^-}$ zw{iJ-j~_5mvhC2t%3}G2kBp$5!4HT%+qd_BXqui7z_HD5Gp{_QD{|05AK+g~R@3Y| zJwfd%(m~Zp>Qn+Er6)?SN|570EB`L^1qLx<2LjK%gwbXu_~P0_TRA8O(DHo6X<$g z(v0OIM?eJ1CYyryWQ(5?Zx1l=BZ5#roV{lIu!Ps$fR!vhMY3_4(F|X`Fo2~LV3pGR z6w8dD917gJ@$oCrQTKCkxHU@R69?t(Nun|(V_!A3U9W;!G%Ys9VSA@xZ;St3@KGm1 zFbR`{xEo?3O*L6v&|RZV~e@}lqicR^eMQ0 zE6{nsx7v;~`mOk!+*V%Jtqj{^F;wU9_y}03-lKEHhJaxaKs1W`%LsNwlk{WoCec7z zDDgtRwpfDGd8xkj**0b7Ss?)U`w}4hQnWoZ&7F!h=R5a69>`zwqQzz!BM`kKyE10{ z*gT&uX*0gv*k-Kyy6BgrQW2Z0c`hTSN-kI%d6F=BT}B2#p9u{Na{bY zxPU?nV7Hlp`;c@jE-u{$t{8N*zZHA3NXyja#>~F|fSsnDf+KOzw5qt{I6xL}r}Y_u zJ$WSZgCeGjGxP`}=;(iJ7-%@S%R&_EyeKulKKn8r04UgW0O2t1wpeRoE*?=%0vi z0U*9Ye(S#1j%i&$!$XC&o`yKk{+w>n<_sr>`wkQ#)fG~=XFgSbYYH~i+!pgTvO&g0 z_NojCVhi*_hzA?#&OaBBA*|;}FCj0BjYE{}=wie@D+dd8BLq*~lzr4v_BTDX08pCg zerxS^z#U=1-=0B*O+SsVHz)At!{kfH)$nUHI3X#=ofRO|?;%VtG52{FcD~8lDX8{M z$G~-Di0rSOo5I+F_rbD1mq#nAzcq%*`go5*6vZ_1WU09rZ+eQqk1#6GR8o`u!#hz3 zBpkzj|IVq$|3{VImNdwL)>3D0V>F$C6yOg8Ou&#Mh&;uDK9+Z0T5!8Au0)U9xpSvY z47b3*uaU@-;D{~I$Rh8)Q@7Xppc;DC{G>JzCSnZ0(ZF5b2Am(HdV_=rGx1AV#zk4@ z(|uhpRb1Twv|rEY72m-d3bX5#TR+x%Eplheou?n9@a17J>%(QZXuq?AhWkXdy^P7w3 z{MW9OxS1t*)5+nwnydi5zQnN3rm3E9V>Zfp(FPi^PnvkC_h}u|=I8Wdr*6HwxkWRs zlZE4@w|-;L{T@jbPGNLk(9fy0rMKJ2!OVmQpIo0`>*(C7e>~&!NQBzJ&dPWR_=R zi!g1EWfT>?OxNDU%*)27r%9Se!+J2hI`_V)F#VWrOjZv(m}hCzO;fF_%41ep=X6h|g%F>=}-M#V509sxlB;S48Z(twCWpl14h;FVfYjb2%WepSjfCId~J zaNfJpd4 z8WI=nNd--xlYFlI?*O89ZY|(6C(mO7q`QolKp%-H1G&w&?bW@JvRzMc$b?dBZTK(3 z){A}y@#WLqDFg>-zxK~bIhaJj(B|Mp?ge8N(>{!XyrIH1CA7PJntXTK_`G-j>_ao?}TKlzj6bzSU_1L(15@px#g0xQsp1B^e<=VH?pVmX*K zqQ%@}v0(@*4T1agPwo?@k#~1* z=85N&f5SwH(gQ-D?C_;ZFteJJ{wMsWdy7>JwP`B_W7WoMMMqrA(-W`g;B<={#=Mis zKfxM5>UKOd!-@`nI4a}1lv?DTJnI zkNhm^tnX==obmD1>oURFYr@8s8e}`;I_f1-i3deia zdLsq3TeLWdbON^Ik%(1RW_6m6au4tBaoD#->QIfJzfST6U%EmrZ0kV|ac*DN^gy&y zLOH(70Q-C}kUR`i)9U3lB$um<4N9N1YE@ zr`2_AR?`UeE7o^O+cwQBQXq>nV8zWos*$FDe{c6W+F1{LWtVpId6NeK7chg3hw_zz z`sw`q2d5aA1;X(kCfNDxBFskTGUT`7--Xt$1#wsE>>*_lg)DDrtZU@?L2EGW5)d8s z+JVGZRuL`Rk_eLKW%80-oQ+R|i{ptY$w9vFyy}SsHc3GDK;&L2i@0}VfnRMp*b%Uu z5GZ+t&l3D&16%IIq%vusipG#eBHI)2UmJw(kZU5tQ%JK_1xvJVJ5ZbvH#IVIN%QdK zG>QRI+t12*Gp#>Ssz|7V;Z1_eAc8bwFBo?AMn@V7wY)iq0Dq3>Ffl!(oI=KNbdRC; z28SY1>O2%Z=kgHI#^E6Ja}l)M+|sBisDofEwyNFEq)%+t>P4!bJjRle!9!uUZhTIX zpY~h?-0U4s$DdiiLN#YbtO^wlh7TdQ7!1+QTnLHwW0FQj?kEc(rYvt z!||ugFg$=XOztcCpxOzcaxphLYFk1HZKgkRVq81;bA2eFkop@f0r)1%ia#nrRPryX$R#)$;@^7Zrge!B>%5*f9W zl{B{X)(x={X`~&i9J$I5af?;sIJ``5P1wb#X=VdfEJU_-eZu$PCpWr~0M36$QL2aa zvhSI;+J_pXc~KvU-192!<_tdQymw?CJu@6(5ps;2+1S1%2X(*#bf495-+i^}b7p+G zK@t>b&5gg!WW$>RWEYQ3h@XnuJDgRWB5v456rCo!Z5so}hASb!u_0yaRzzzEX8q`# zSDN4w@s!57vfT&HNR+p#<@({t%EQLu@82l2J{Hw@n!h=sGUq_R*6j0~sH+ie&}HYhn>65faS zj)pRd$YY?DCehcqZbC9X8PF;TP6snz6d5!##{;Dl_`~^+Nb|P>IB&{ND#I?+9fhC( z-9BYZq_CQkv2cuBdwPtHV&vE@OT~6Ar>+zyXr8l+m#&jDA@+g>LQ@=*6nn=%r@Y4C z?;~m%;0gG5g9u<<2aJ^Pxwh<@S+w0rA(GV_jG$&%!xkXoNZ#)5)|H6^%&{t_j$+7QrRVbPqB^>Rk}ll!YbFa{uHJS1L|XAHjRPi%`MeA8WsmDm z^+g~H0vG~{o4h&Ew!-1I7_pUcxFrc&b863}tt#~!>cY)ec@6OEq*^xF~K-0$d&X>a4Ax>opzw7Nl<5jvDRp?ayt@u!F+er zAoThdOqr3-2_1xJJ5#RlEWG=0S9_YwLhtSv#t+1vE^ivUlgH<@T#n#WQ{kW%A)QD{ zFVIX7kSaWjzCuJ{+!0450_g69_8tbkL#+;E`CY068+~iA#g(a@B_k`4$#bk$E)2-E zuiHO6D8+8*+kFvRD+^;OqB76bJ1V8Pk2;&9j4>kly1+x@SvRoEzTna0Nc&4Oyqf%G z4 zg2x$16`x%6Lu@7KOw|aJ$f`4qmQ>tqd|GyNYtJVGeWk2kLpEC4yoF~c^ZhqYU(n99 zvuy!QNi17g%uYqwF|!rDsFp+5kY*-&S@&(G!qUz_eYyKnm^jV1r+j`HbKoKmEfZt? z)tssZia7<%yxKdZCqPc%(aZ*0F;)=wa(c)!VkDhUCYwT^=3UAb1tN@p?AQ9Q zvUf)rYE7nWy!Cm}crJLOWMaw%S|GR`5o8aJzT7mk8CJxgNE;Fm&5x0FhESP@QEd$J zASvyJ`0PNgUZ2Crp2L(OwMUd4DA?!BdjSdH3ae1UevJUS;91EjwIqnIZUCEWnvtC; z*sLAdEnE9V=zw+fs=fllC|HL$$qa!~?i}|%Nk!u{^(;$;)2R$LQ&A*_eXMe>Y_y%R zIbHy!MhM@=ih1;z1Ea)k-;@}tj>1+q^BUS{2F1V}t6WyOatEQ$tTa4%jHi9+L&nrM z2ROf*pgTBIV9KyU!jKp&I(6|hw`a=V2SER9vOt9ar~Ze(bB!69XT^^rBh<<3=t(@R z0EGr=RpQa|u|=Urir&_$vS~0*J<_R&-$V<5JzVO!H~%40J%FVsQh2F9pPL!5iaGUH zxh-uj0z(?H;n*$0nNyXEpWi7NPk z$`QGRew+aAOkUZ8=Cdr#Z{KN}D~ZeUUbyei2k0CNg3ha)+VVCQePKSZ=vX??^6)8f zR|=H^Ef)sI!XUEBRsq}Q30f=~M)Ju=y*Qy~c!NyP$_FYg_y>c_V@{7CQ^Gren1KLg zPEjjFoE7@s)5|>0(_lHSq?3*FRGy~7l|UuSWPqiNnsjVG(shS0a61Dsx`14EjAl`a zVq{4OB_~_pbYp8}2jNABQ7~=ZYhqOXl;R25R%F0FAZjmIdqcC7|$8x*E`JfXSxPVZs|85$3FmB)M9++~hs?_*m;M^fGBIYBM<)n`yt8uqL=DrV2Q_ zp9T^xA3rD`R;lTe(|by8SJRgr<4|0(XFW-E8r}B2%rq8ymB!@%ZKru0HUvMg0#cB* z;)o=j3}H-9N|<49Aw;BasxglLRPp)I8UThum<=xP$(5e>=Wr%+0KUVthdQa_0i)Fg zA8@ZT4j&rSfHT9)5(yT)^A9w#kf-S61I%Vro{V`YaiwDm1}dqK*5piNhucCA0H5g? z)0q}dt)#;d5u8n1en3(EDSf}0S8;Ii6t{XnHOe%WnK@ZZ=lOhC!VGDCV_fQ75$NNU zX}9@;#*aYTU6s14uUUx81wE;)>Nx99HDPlPF9y$Kj)PoR00Q&ua8@c>ecY%8vudTwv?7;-N4zP!B|f2$<;xwBE@%>5x-VUxYO)WV5sEOQSof_k!X zp3>pM$%_}g>9r6aX-ri!#f$PT8!NHUp&ekAIjr}NqWZ3zQ?q3%n|6Ehqi0#QyHLsq z!8AJzkPzxl9!#4=HF7x4BJrM)I%rYv?>L3&!zi3A`6-b11v({w5xV*Ij&ag2xNjkx z3UYM>X*rlVu7`D*9Zvs*@V)MzDWHgpUjh(!A#&~(Cz=qG1x5Qg-T$t2>g?ZN{{QI} z{mqvE^)g?L;5)-vaY&To5p_$x z3}ifd-Kq5E$LfRDI9JVa->OtMS8uI}i7kH#I|8koZ>W1RTM|J@2FzR~voU(?-0jSB z&z&NsJ*?m7>f5G$ytWka`iy&L^IdyAp9RGlIP&dd?gQrbt%8wpo}O-E7bWL8a7ppAfyn-u=gM=SlEKD0mTG9E-h_Ru2Vj9E`9L!fh&rUy5{$NiE-X2yo#?LveUr*hfBFE3 z_c_l;X%bpnrcHApN-B+EL2fWM9V2c|w<_nE3f#O+z}>l0z!s@vug(;3TR=lu0eo{J z^Jl^PNE`CRlRKSGaBko2%a_IUJDanp+T^1zN9(24JM=yi$q{qw4GX4C9kCxA#?lJbz~@k;w2w`TG9Ug;>N7dMN4Gp3W>V%TL=$CWhR8?K+iUk?A0|B34PHu z6T!q~VH?UUsA8u_r&d=KK601CHeS$=!#d&gcOZQ3WhQ>(t=G6Ir(Jm?VAO6q8o9F*k_?dMUKsmNu?jgy|BOj42wg9hA)~BH10NS6hIDuK_ zRQ~gmGe9ySrU$M;Bx&x;Z zka#Z!u8vFWm3WML`~{y6bp;yDYOecWD=WVQNoo@gFQu}*+boPhG~bjslgl!zUa z9X5XHp+W!sA#VJH8+_iHd-Wy9j*kw#k2L_-j0cH7)qP#q3Pm2oH;BFEIy!jNxc9zc z2ysMMdHR_5$k6Qx72jdnGrFjD&o*8J!$vm5_`*gF+wFGOPPmrUpY*)~pp`->YbpMV z9rUz~p<_OMc*&whpd|`I%?ngR{N!~Q9e+B zilRGZ=lw{VA9~B!r*NVnoRa?feY@j6TCKh^x^P0=nLzZp6|GF2MXn<5<>El$?DgJm zVnO*}<)yAO7hx}djMX?eXV?<>cqiWYMBOTGsJs0tD2#sKmg}1hz$7MV)Bo=6Rv}CN z`7rB{`kj^Wmu_8&GX=#-RX__NpYi*5?p@zUkmVoKULEW`-l~r?ZjuffW|JHJCZq7C zV_)d*;y2Wlqe~oN;8Z2sKl*4{uDJYf?4vDnZ@=KWnAvwZ;ctDknW*pgU1+il{@zC` zdVg;6(NccM?($MWIE@6pFq-WqzNlb73SS%|VF;s7*8JRu{CFYJQohGA`h01w{aA7q z83yAj&&yI11)faXA*u4$_15K=b;(;$`9cv&1~m3ii2h8~?gJhf#;G9qGwjtLE6+zD z4<%fhIg;Elfg1Gevb}WePi{Xnd?MvuS$<_vj`Lc>+8bPgA-wWc^c9A;tL3Ltd=6eo z{&e1GeVv4A!9;hwlTK@vG2iQry--DFJ9D*p<+@q2bOyCMP<0sb%lLiuvk$WE9A7VA zXa%M9ZaCjRDJ+TZ;1TlbkIL80k19(~xBAuhR<{OFv{KuH z+U&QshxA3CZ4X~iUfUkQ=t=F2nwj6)`DEezZ0EDJ|Ju%&ZJgBZxMTLM-3gb9XS-kA z>(+L^dUi?eP5O@A+M5bkdbT$mvPWLq`xZ_sO`M5l_aM&3i&hcml9ktq^Ef@}{e=v3 zkNw3Q=c@hhx&G_>ONDXL2X|(Yr^_iA=N#&@Q%$#ZwSMqq)1!l*di+{@14MJE)pt!8 z{A`ZfxkzYdzkRsTEn0oJ*{A&TaBEOc=4gA={Pxk#xO4T`bFrHAs=p!-6kIqm#WF6!@Zwm@(G5*6SVrxATB~U_(CCwy;}w@ONf9a z7gCAF7gL%LA~{$-+>&>x@$kf5<2@_{>*=>LJ;0n5P%ol0=SZg8jXiL8WH-lzi#Wk->8*5x7!`Yd`yWIfZdqm+_;C?roNhWanC; zocahoq-i@YGCExuJOS4}y?G(2eO^e^pvTa1^IlG0>1pMj9;2Ym^g@O*F+GD`(+8Uw zl^4n+%zJvT)Nf|iB$u6WHuzxqWi#tdUzxOj&xh*=o7v3_<+5=GefC^iIo%h^<+FSG zZm4bDA51P+tT6a^({k&m zx>OH?-T{f=wCzynZhkULh=-g|bfNw#56GB@O`CJKR3D^mm&#>mdV+Ey_MA+yZia7! zu!0Nt9Aq;t6Lv3+)in-TB&HLQp_q$SFO{)3!Ksk6Q{^XIc$G~}B2ZX=BTWUdzPe%?l;kS(JfI9V#(`V&Lo-FF8|5m5oLnr(K`zf(F zR{)}Di-zdxKfC!yoqA?CwJcd*lXK7InoD}In5o4qjQ|D>jXgg($F=vSd#wd|@5AJG zHUBqfJUrW5LsSdy1mHH&vKJ zfdo$C3>d*7!?gS18|h%L>BLFdaI(M*h}PoWSv-aCI^1Uepq(!`hXO|6zD{XHY~|>$ ziD`3-(vux;QQU~*7Y>Q0b0fBk8F@t{4bFxta@AP+!&57b@0oWj%0h@ z!zkLz(yb~F-YupPg~)Kz_j1rm**H~~|A)Qz3~I7b+qLf$k^mtIMN9x8^rje)4nio> zMQlj11VN=JAhrMsflxvZy+bI{g;1o60Tn?JK~ZU9C`u6!#6pvNJkMI|owfG2XZAPq z&Fp!9ygwO+Kbe~|SFT*gafbAPyiH|Z+>N-=2YN+LyCJhVn4p>cl;_g{zi5A7MBs{v z#5Vm1{{WICo0V7R z56kRx>xeik7dPv?wfTPgFhuAoz!h{>dO*?sP{qkV{K3N)VGqtXsH`VO2tQ?UPZN(I zzfh2RVg9JM+SL0?iLmoI(gMk5p(;{?G%x%#@(QtAW8mDvxnai-)9hzhg(Xk7xuStS zZ%uhOJJ&Pj#u9xNRKWlYRr)ELort(WSYMsB{`y4x*8p7Y%c6VW3{_%-Zf>9^&W*mQ zE8xUgZ$A9O2p(j;^$FzDxZ8rfq;_&r4D4@y@XY{3Tx=Yp$uyE#SA~h@6iJk=ga~&J zCa4NcPV3J;JKT^9yLYyT>*j#Dj1wCgw#8tF2K|U}t>)i+bN1~el~8d%mxRC{u3xF~ zrsy!+CGl7ji8CukNWc4$;#FdZKn{H0DFAvewg`T6?zyztz^%)7_bpu#Z#C$)y&b#m z%HDW(o!@DIDvdRk_>9>cn>F~MUPcnz_U+=oN}>ho7s5Az=X}3{5<2hP6H{VPzt$|> zVzvd0Cf976<>bzZN>7V9xr1g8O;Fz(c8z{u{F>h~isWw~3J?7@S_E+r3m3-3n?L;F zNRtz``d<3)y|_1M|9ZmtGCbcOh3C?qo#>zeNvKUJnvUn)wnJ5v zqQ3&r0|fV*1HR0(z_wXvf-uh+z+d6E$IFtSz1PtW?4V09-cp!HJLSUR6~y)c`a8p` zB%SyB0Pi2bPyY_|JDty*Ng28fQPW1L(r-=ALK1vEBiMfO_MY)DUlDhIIs+`6;o@=| z>OqG&5zkKE=W<;+cYGLPN4anc!NtA>6|#qB3ZJ)Qg**0yZ`^_&Zw|}WyjglXOk|Kx zqx9nbG!k3m{D$m#ed2kSjL6A7mlriWrovr?&HcR{_yb4~&kUEDcNacCyb#BsK~Hwm zgt1W~zdTFLXyJ31ub4=!G@AWwH?8!b%nbfV?2vqNG)ePXx^}e4DuPCfX3lv=vd~9* zqH&$1>ba=e=9oXvJg~ogGTdYMLSp8-W4hNbxCnD&?}TaSSRTq*6W5Z?9@v93O3=pcES&bgkR)@_4W8G z1b8CD>(6iZQCd9qI(miZeo(=0gHGpZp(97=s0}&>mneYqi#Lk_dM|@p_!r91Vk3#- z8;N*al9W!8jAN3Nr8_n=QM{A1eyIQGY$OwL zDaJY}rj99QEn11GDON2hHX|vv8!2|U)ZHD;DaTZM|5S&}RHv5Ivm>eJH&RKsw2L}v zZjNb}{nOkt(>z=tCE-*imB?VnE0Ob=*DzdMp1w2@B1WrXTvggItJ z_-D{EGoo8EVn;IKH!|qB%p{%66vxb{jkL7P%&eBooRQ4DjZ6kEt57Ga*fFcbKP%jW zZ_i*_)ks$LMivv7U8j@X;F#U$pWU39-8!GTXV9TzBb$ZG>C(x0>X_5*pVO0>^P(lE zZzQLGBWD1Y`$i{s$T9c5f9`N*?r2Ny_(<-Aqr;<(+$oMu-i%}37yrDu%)Et`yv32c z<&C_R`Rqwt{tw6eU;g>)nfaS7`P(D;J36^*8~I>e2GofG3t%9!7(AP~<|7Q$CIcf< zAfQ_y=s-s1!MnxbzVrKxg!azM{L16&k#w=!B z;HK(8u_2C|}9FM!<@!2=Vtk1&8>^Toe` z?*0t7D*^~W>VU2_0DxG4m@fd308iaAX8_O!Aj?igy9a}TYz3kmv#6r-S1as+G#CIN zW)+p@JdoSE+z@6B4g=!aGRZ%}$4{z(`vT^k;7~ z{>`bVRucvqmCVNR(ACz`^Nf1P3TzTUYj_nkFcfx3T+6MnaRA+ghe9ww8e2h*@KA1r zm*UA~7LAAoqj4C3KrvbMg`4x-=Md0x0KiY+UZ9}vFlvtambj(Y*|nB&C@3%A`z|XF z(HOuPaHHVP}kV7ag3vpUPI_tK1tE$9U`C1SEkh#+RGXOajC}*G*d;; znC$zh)^)iQW)u-tjB!k6)K^wE_!BCIh)tlq)et&}Kmik0Kts{RRtxB2pjbOQ#SH-N zUayAx!Wpe8)CI1qmBnC}Y8L9lmn=k4MYSg{fZ}{D-&v`#!8k6k01zG; z_p*JO7cgP9OTH`*cW*oC%+&P)&Jjz$1U!N>xpuxjb^{O?A_6meQm_`e=L#@8H7qvnI89XU3O5_H(IA0&{3Z26B z9@~y`mE(DeSx{ZQwuaW4rO_w9MoZ$Yc6Wz0O96l}qnhf}esZO<)4F2EuHBeWQW*h& zd|SnkfcVu?$#GpTHh>eY!QpxAy^0LRtM6L3^`+IF7j>MVbZAqUfvm<{B2O`$=hirP zA)SY6g{E5ZmA?eZwv`=20#eA%BHhkC+nxO!yW;YJ@_pN#&R0w42DrT`5HqJ|(V-B4 z#MMB2UQT*WK3ky{+mR!JW>mmH1fbNbx{y$DGrG_QhxQ>AUtC1@XM=neN`+!yO2Gi+ z%7Y-0&ihvAgsNv@RSh;CykgAW#AgkkV+u1ZiU9m$YBWmB3k(TC&5bqfuS7Tx)I>31 zm0OOhlny)Hj`G;XcXwI#7{@eGzz5&V!83&`6`pN7Ixk-JveQGnY|ti@Q3w^5TaKMp zur5k>9}F$lR!0qZlpRn?y$W2TpjIg;Z+dM;B?O=Vg?LP}Cjenp%;j_j50v%;kdtFQ zRHvF=Ui2!P?;w9on5ZqsxfRearS~$Tz`2ht4%?j< z*d2`1HP@mcF4m72yzQI106zwV(*ux85hOj}(7oCt0Zb|n+LKj9d;tmsD)#C_;iMuM z9z2`#6ku|V6QH=O57Mu_cVj)k?FZ+K>RxRf+F`>$1N~7;u-I20vi@{-;YaZI0Bu5l z)tn>B$T5`%J%mS2Fxyjk+>|$w!m%T-V^JzRBP5=x$JfU1MmIqV%HHG-53zY)uC%?n zhU!}3t>IK9UwTnDd3ZcSx2Nhs%H`lBun`elJ^aaHlCRjV;`_Bv zDuWZ-e?9@?Y_I`)TI$onVJzIcbnBb|cRxR0hftVq8Lzlx;C9Ot{58Y`+|$QW2@{&A&jOs`$be$e2FA>8Lre z=YRobg;-ihdEneREkE{2#sCwD@2)W8vp~(L8%!>4WoeEkaj(4ljwc|V0gG}k_Fi1y#m<{Q;q0& z&fOEFRyc(6+2bYPXd{rm7r4k(qN&moyFC4BU2q2vB`};N2KsdwbGD#%^QAAMlYF80 z**&Ug2m|W9TsrRj5%Yd--z3_KF?$GvMpM9iA>ihAe#FWwN)=GG!OZzWAjAi{z=Q1g z#ZbF>4_h8Ia}h!UtM0g6Uqo)@o(skIIAZ{7mA#Ly0v7uxuDoa$h@U_ujsmQg)CE2W zsp|4QV6Ao8`9;cd?l+!l_N|I#%S6vIrz=oKF=EqYN*eD_J~W zUhE6h#4;a`>eh(^mWl|3-73!~3?Q*G7vFb6f1>5tBi)>HAbfZ7{Z&QIr*E=`-(`-i zF2sLp^aV!pzKQsJCmzHEdGdlVD1~op8z}UyBT$s{%~FROPAr!yIp(tYjZ^_Ses@jr z!cU!p-^E|mHuJRuD}a91+Ik-7dHh7Q07$?PLMG#WSc#%QE1xxAcWCEN zHezbKSbW78(0srTc~0lO3ngUd?~6gz&YL9*=hsM+Xay!zg#K%Y2$QRtzFWOPW|rpA z5fvmZXU6ZhxuCJpnjH)ZM*Id2K|Ofg?aqXHQ|h~bX}1Aro@!agC9W<4cgF(XL&Gv( zm7YiWh})jL_DpC6k%zM4*zpdUvr<-!N4$8_1zP-h3Lt&_9V;*I6%Q zf&;OLv5VoSx`KHhsE1e#OdA_@RXNzqp(pZWox@~JXU#IQWec~tou**tEIpmm7BgoD zrQ0FiHk;BZjarro*&)3oAmexIF zWy{kv{0TT_Gd9cj#EIa3=%%3A<=I!U*69Gx*Vk!u2z6tMzva1#V}kFWhwZI5NZmPl zv#CGK%xHzCaQ5}wUi?>q_D1OCt2$>2Fp_6e7sfe<2J*{Hi!WxG&UZVVeoy!imiz~O z9tIpGY&wI9)N4K$((bxc5|)HLrQVaEDSom>u=smjy!f6^{wJJ~H9LE&7u+(u{v7x6 zBlhIQ%BPYut#KtbSy#W)k`r0qdnK@HJ#+frFuZg@y?`UQpg+Z8Ms~~sr>Gbzz<)&O zAttM)vAPjYjn%MUyRu(dyO$<}6$|YV-c~CNZya3jvZ}P~`o3z8rwaL5;MaJF85(M= zc|fQUOAPC>01=_eNQ$JiG8j`3g^b_tT>$EH1jGAba~6^}krayzkQ6gC1Ua-SpQsqg z0s(9GP!?2D4NKw5RK*1M1N{9sxGpXQ-B$EbZ)dZj+rIegmip>LF$}vOG&(a~icl0IAV@I)I zKn@9EB(Fh1jMiA~K{g<^lQG{bO9EI)Yx^Iq#k=zFap4vOn&+B9RRkBHyv*Yy?T*i| z8lX0Nhy-?e++a*y+lgWu1y|V#*w{iCKk`9go6oZW*8P)VlI{R;EZ7~v7z5mN2Y7&p zfr9ohIcD7pq!|?+5qE^fQh$R#N(%*f8&6ZhORBzG5w7>BH6~aXO#&iQ8G@8!`yh_A zldrZSK|g2i6OVH)zmCxWJ8>J!4^iRc<1Hs<`_KD<^A9uSOO(`Gp$z95} zP<~VW*N*O2vVP^GVeurur>wK{g$ANEFbXk~!tC&k!)4RVk3H_&offCAPG0ToR~Ko~P~`i6_%)An&r=B= zDWC%%moije2o!fooYF-jfm;kqEa~RWByQnAZHmN>5v^EJs=zIcmAy`dpua~@`&~m7 zH>c4WPYQ*7uHeI;`p9PKqj5HDbimBAeiw~$3QMex+((p?Hht=E>z?#`-2_ft;}%C8 zf$_^cg-Vfmcn=nKW2b=7;?5_K^)3K*w%D@Cg$>wW(7kL;l2Oe!9 zw6Nd?sC?&w?NpjZVz%FRGm#eCSBx$V%$Ga zRw%REux)rswQQ~3MRtM!6g)K(3uZ7J<4#?ad3HM)K7Qj!(Iz6Bb-x^>{~03D!;eD} zbAplA)I$|M@*Prx!p4gezhpiI&8X|TV1fOOi!*!Hv|A6`0s6H@(qh)@R-9+2t4#w~ zyB)A0Dva4P*D=Rk1|$($SBBQx?eUnol~|g=aJ<7lfQXWh1~tP##v4mF}*;j>$VyLxFuT@t&135y5U`JdV4o8nvS=SmD4S1dI{p2@KJ z6SPk0$iD)wka(jWz`oyNq&I`OM0$E4a)=cB)|eYjxXZ`A8sU--AxI8+#SKs%j**c9 z)O#SXA~%3vF|>)NcO~LbVaUOu55WDm2emfaB85O4lHP4Go)y3QnA&?9M;|Qis>^`$ z_uhP$o@j>HT$P{Nt*(`aG+Tty+anPSU;PRpQ{(v%fOx8~9WmVRK2E9kZ2ba^UC+P! z8{Zc~t|qJypJoH3DjJ(EYVylmTZ&E5czW?>tU7fhVQ& zfD2H>6L(E}YdkenIO4;PiV!#@RQ4_#;H+9Kl_^DcSq`8z6)zW+NYN908_46=3BdOq zBQykiK*5B+UI*o?GQ3+U*%VNh&^>ont){`zXSc9!s&+MjCN%_&BGN@YVob509Vlc8 z|HLcovvOi^CN6YOM-fI#fBcc#cQY7p0=3?EftA>JV>1K@bR*$S9ETir~fHu9DsvT7X3TB1@1Xv|EK(lKcoKK#$ z-jkf61a0o(w5~-Z^YTjd;eKKZE+9;5Zlo9(NBZBmuZRhKqD#j+VUk`V4icG4IkCz& zwZq8=wK6n<2Pldwk)Ctvs~`=ZbvT4*LUunaP8ETbp&(nuz6grMW~3<*aZdEUa&|I4 zg`lk_7K{O_oJCz&D2`lYVRv<)J^H`swvGy|i*;Bqa>YaXq_z2^-5oanuH1P@;PLnbfB zoLf%W-F&(y@dARPxdOjrp#!OWFgU1Gp_g!{*szgR9?SyW-=aZ7dd#d;LR8ZRiI7z) zT#x|B^$9MVUw6wFoBs9|YOyC?k~ap`E@L{Rh*SXi_C`@lAIm6cRxY($7TKaKA< ze|Q=e2ng*(V_>H-A=*k5+y3acbZ6jB~9}u0Q%Z#mKg%+NSkB~h=XEsIRm*jpydb* zmkSXDBb!+hDEE|OKi-5iV!}dRBI+uO+|(j$YinPfHC2H^Rr|Q5htextmH()TX}Q1p zfGkRbQtqlT>(;c6U?RQ{5nTmbE~HFs3{!(p@?0p)lPGkcEmwnGCtuKFX8FLruuB#b zf$viNIG!Baldmffa>0VDK1I6C6s}C9+;EdTxSqF=ln#DGg*f+xtZ z6EsRnFHL_`%RCBl6B7=&%hDXdPp(H8@<%*_q*(4h_5<@6P62t6VRkyg!l5AN71)!T z0E-5k@oIo$DF*(yr>AVZawM_t!Yc$}(=ze5I!9a_xCQRP1V76jPPF~iC2*+V*p*O1 zV4!8?_UXBX+~DC@{S`Yh5ecJF?7Ik-$M@-shTc^zfJ;hspG2tH3EU(B2}Xj~9@ONz zQ4Z25#|sXR`9cKQwFg-7l6Gd+yYB*QfSQ;*K%hK~rHJ{%)CjuAbkVoSxeyw#|0F`6 z&Fp1R-i&}7zenV%Q_#SX->Seqd>hwwB@UDb$b+GQ2Qt3pE%~&w_5*QVypbMu5VZp_SA~Ip2QoC~Fw4 zebp%8*kUor-9~K_fg7_g94-hhOA0=ayXcUrGB;wGQ5Z`&v*ygdu$)Py)b*Fq@`P5|v_=uAhNn0l#_ZLN7(72Y50MRy$79 zsRdUt-uQahM6ylEyV}SDyQ=@9wvg67H zY0SVAp$^p3BZp2pLzHzK?>t0Lk#aiUyM;1!KG&oE(J+2&96 zkC4wsLRK6&5=N1O%!}c8?a{**PwE+nG{*Ikc$M>bpXZKac1X zxFq>DiN* zM442->*9lOowk@vAXzw5E+6{kkj!%1`tv+?VA52IwmL;J>2SMYF_o|m_iP^06#E?R zpxSa=yr27$P{fR~n`?T}XTkn2Zly755#N*B%pYRNFbpD4L=X zlAqnr`6eCbI$0K52*3c7h${Hs2C3 z-@G|TahZpB%RZn59fl0^yy+Le_C{r?A>#QJ>hs>)S2HK(zpb%8XkHu1yEb}xarixHsCudR z{58&q{k4xf*VriUNpbHf74K;S?-^U~&*#0rcze$VdC$dr&*ynBRC|Bz@LufqUYhV; zUiSXBMQe*XG{oKvDD5jlj=I0LAMlKiHw6BHh81afTfb3w?KcVCWLcHL6 zr4gk|T)z|PbWe1tTZ^!-;GKwfm!nt)zPSABB3dXOe7eR*dsGl5e3e_M$5Tst0?~{J z&@TO~z=N#gvecn~Xt}<+y(svQ^&24unXQ-J223LdtEt6 zt5;nxASkUhsq(s3@Qq);;QDrzzDi$LEwmulQ{Wj&r#$ve8X*)%3_GxS@{u5y6_|%* z>e}eShyJB9gS4^RH!btsk84v*Lw{)YUHgPe8f);g#XL4IJ$KyvYh(A#>W*vY8gJxh z^jM~UvwB{Bw8keQ?`Go!5|`{_9%&&gU2*jF&8l{gi^Y$fh#MCV-d?bMgyfl@F3UP% zD)K&yKpqAg*M{OYDX^fvOT1J8YCtw4@cg!JS6nM&W#M3pHq{0=!taGl%0`7lWHWLMmt~9M;#bAum4HB@=0vAk! z016sIn$rYEGS;(Lw)v)ofol_p!o3bW$Luvups_u+P8flj_G9~6L`zqbJ11+<>&eGX zD=v!UlyIl_cBX_ihtzh0TqtmTS%|0TDrN6Q-ir7(lk`V}Ac-Gox6epFfhFJA&{`|s zIur~*GC<9}p5c_u`~d(?c^FL9j!kRAVfjKYKmamDQ0fPh(Z&g#Gh8{P>0NL8@C8B$ zYiFBrLK3^jhc&0=;S@}ynp)Bi|A^4g!hTzY^8i^TeXo*8f}pbj-#9_@Uw9!3cw1`? z)NO?&ZL~T8M5M`(n+0LmP*ZYg_YIkF!r&5jK*DZ^n2h8Y7y${~-hj9Pd5_`t2wSU} zV-A2d^n>a8?}PCNLfuT)x2_mNje{@1$X{u$F@`5VuubPK{#;@x?A?Z77fYPT#xR;Z!T_Fxabt0)6d@>UZ>KDL+ff%lpMyd}op|Q4H^r&dXlb{b$%CJ|<0Ymy z>i1AmKI}GG;L3zPT!acjPxE+{hv4-g&PQ+xEgU$&h=L3}`zb#MOq;?7`S2!8L5iij z;mxYvnd--uFhrvph3c$+~}y;RfWe&Ogkh z!Cj0S-;g*AG35+lzsxCbc<;Mjz`1#{TNq$HJObF@rP5tNRfFB4)=(m!QCIsU3@O#K zr&DByd8tbv_YW20BD+!l{8X__Gl}XuI1m=KV|RlH^Zs!w-FJmRq>6e~O?G2!9u}vc zRof|y2X#4sAvlwD!A|>4?nA)YtAm`kd6D>N@}($caYkm)B&e4qy6^f8qWqaI=5vui zEd654PO}RT{u&FU2TdXwIMlg~?n#=$ec8PzMF9mGXz6AcG3x5le1a5gU3=b)NoST~q(%sLW z00N?6Lxxf|A!#%}9AB1EQuj&cV+T>}4A82B(6GeLdMFVZ2OB6cjFaNSck%zYr96K9 zVvoHO0EI4x|86lZ|)=P|X6?SQ% z)VUGO<(p=>;P5oVlHcy3pvQQ_&or}=tz`drV_$+QWvgOGiiHWOMmcLEdprV_bWY{_ zV$ecp=hRc}&rU(P1&EejVG34nO)~H~hM{#o(D(@=bgxtOlL^5=KfVKbc1Y$AISQ`K zQ|f500uBzSyx=q=-IGn3@SuwvxC3DJGaODggTgBlj1=U-wGB^iDs3Ez1hX;9j7U5T zd)oci-VpZPI6v|h9$});#h=6sd)_<2xVq{EQZAKL#`J}E)5~-}+ zqZ~DDaW4!+kwi@6UYl@l)HSq#W@aVSNksWH$yDEMX)u8tp=C;e2u9%{5fE5>pmLRw^` zARF9N&`d9&t`P%^lRN?i>jKIR&)sAyU(49M#oc7%8EC-HD$NhTHus3iA<7UzLhu`t z3~st$r`AK+*ARMv=;!YMog<&o)Vs>1hr78hZ4$@-K5ETcf4Uf} zb3kP^reJz2E{vfCuKh0<<1f#`Uv)*hJ;1wuU}>wIf4~?AY{?ETr021ClV~x^y5ins zg_9NkgfR|x=UCR4yvqKo3bA~x=qR85UoghsRft?DOV2_1x21MPe^nu}A=K*?_CMx{ zl^<$$VGOR3J%w*;-T#6y(iF=>kZhiY?`W=<`rv7voQ7G6tj1j!Lp=7qfbpZ7Cq*u= zr!%K_VGL{Avfrf7y%~r8RfQ;BsoT8n7G7U)vhw`VVZMGI;*9{8qiu~#Y1PN`cJCvb zzJ0QBeG;47$?#2k7P1RtoGp0~r(U!36|wKu+Sg%O=Q}NbK6Nik8g6pJ@*cHstnDg= zFZ^F|AWbI_VT!wMNnwwfL<_m{nW$&`O_4^CIX7P>of0XwwzMS;6&nwhrMyf*;7)v| z#F={79eU+c0RadCO61yu~x-S*CGGBoo^gm9(yyGMZ$W)=` zl#bUqnNln?kRm5)IEyEgLxeH1`>mfZ4t-|!@eElX*F^kKFaZ^(%o{4HTf3DC`V&u- z3xL*MR}1Z{98eyeXf8?Bd6h_KvgKq{%%}=bFQZ;eR`uN`N^iGkvVc&p>9|rrVkh+6 zE`qVun&F|IUD)us4w)Ha_Y|RM!{5#Iag2GlB}kY_d?}Z_zm(_b=;t1;>%1rG_6P6J ze`&%mc*t^7#nioD9-ZRTuXH^2;DFl8(o)jltxwZ+>MySJ_%@_~5Hr32s~%FgJI&~`x)RpO>rNOAt8W zjuDipH8JU98>%Z{l)jh9d@N%VFE(TNJdel7=&%fXR!zfw6Y;O=FYZ0vg`zgBN~ktW zDPfX`HyP5h6J-oRK7kN5O*gbpRkR3B;7BC$I{_+LB*EoJjT~Q!fg)s6Zk~#e2SYpOUHQQgiXqtaQ4`3rxD35QW&VN;f|7Gz)12*TN-b zCy{U9-<&NrYBWMQBPhJ;gCsC)sxN{zg-=qjloLF`u{i7P%WFO#fkAXx-Zh=c6I{Kb zwWsiva~gl7HovK{8UTclPv8%CKDo!Kj_e}^O&aY z-_v^pSJ-_is3B<24~1iAt>~A;W+I1Ziu<_LY(mC`q{Q`Uu<|mbcy)u^u?JHJETcNh zG)*bkQ%c$%6}DwxuHz@h&4lwe--TISFFQTHU=C@1x0QNYfqM5V@Hy)pvhHE!{zTh|I*27&QznHY6RTp<*jBeduwe+|uH~Ig7G5Q03)id}WxSLy#zRCL4 zP08yf58}YM{5E;yqx&e`uovJT+Q8C>rb1bzdP1n)dcY)3NfOyE{GHVhA|?v`X*6<>re2gTAI1dltA`hFh*_k znZ7CEw)JO(_}bVjHq#Oi zO=_l}3{5XWh`#;f*hGBcf$k`OZyt)s zG0|R+d|dwKc|q}}BK(4#-wp|5cYbMTuVZs<&Ap^RTYb6w>iFg(B_H!I1S8&?M^Qm% z_br5opvxy8=7P_Svrozi7UQ$=A0I==z?Lasw9s)OC~#~s>(jx* zWvDy9|J`QW06GBz1G{I)2-sgX+r9rb-Tcq5{^!;B@9=C$?M~UUyWhC{3wi!)%7!X$ zS28gFF=Zd}9Pgr4Meo)7o>Ds*y#<@E`F6^{RN z%C0z_X2Jg^b}kYhUilaD{ExzM>sHJELZ0=8pj~o%H3mMuY}&OKQy}m?iHDS*)`SWw zpYLm~b#1vX{!b$Ry1e_&hduSPuXm*+;zy6yySypeow8qz*7dz-y1vXinmxl6SL5|{ z;?-YMHjBzDVcY&U@@#!hSD^O$>_CyVIk!=9%=|9${Hgy@+wX7eqaF8CFdhHrDceVa zJ@k@-kSimZx9^{0Q|CVelVL*k+IBLKeJ+y`Vz=ieBk>UmQ#6^h|IoNgjEx~sbXBsKZ5L^>w;vZbN?P> z?<)UxH$41nT@baLlX<;rq+U;P3i)3_X5Cc%v5ma8a4-7gfzihlt`|8iO@Fx?{t;x; z&u@94P&OkaPs8Z{wJtC*krS=0o2TCA7yi?<+3v7d^qBfiOKMG|z?d z5}49}&>j7^g)w(@IVsL$(>nWJ!_X{xCzSuk!q*xWOnH&{RM=~=wnwgVv93?^>tg+Y znetM@kiFN^!{N)7OO4~Vzb-YgBSe>>f^H-*pEmM%R6<}i!sS{+mk3(QWPlL@LCjQs z>)8JF_1j|rzJG-U-FtPV6Cq!<(uLGoTzP^q-@p1)=*-pCXV@!MtKDLE7Oy`>x)YyZ zXp7CWQOx`HmBiGcn1c|)^Zjdm>L0GI{eN^f90H>lD+#i-cwL8%l;A!J8AgYpGBgy}^ zE;zplKk6$jHT#XnTa23Z_F^auaW+o#^`qxv?fp*<*|mS&T^Hz&JwN#Oy5QV~VFRnk zKf&~Nki7>Ib@MCqd1-DZT!FD-1nd5^-(cfQ(Wmy6M+p~pHs;ND-3%f@uIXV{oPd`g54y~akg<*r(J|{l_czryWt;=0`6|*i4IQe?~ns?0EjSb47ijW z{CWRD)KTQrO5U)Zq=2GfV}5FBeSm~_V!>(CjC?4iKe?)C6fz=i6tB8U=T4z(dd92RqxaVwUvId=3$ z0Prok`Ds=GgWE@lt$0lbbnH!1tUXy8B1Z_5yUV`13Hki;_Q*@?U3Y`++fVQ*V$Z8T zs=ls?h-Hg17z*!q8ONAG4xXte8~f#H*3TY(Y0f$3yLrPeb0}mFJ?bFeHj~?gH3ctJ zB+Bl%2Ez}?joSIX2+k0IaU9$M4_~`|Y+hK;b^#=)d_V$a#1PmcegwTqQIle1p@$0v z&TN`^?hNo?HOeeIpATNpG>YUPE9|1=ZM2ZZ`d^?kzlI!@*S1Y=FWoKeGr+&LdLLV{ zb++IYGwZYv@@HktnxtC7NLb*X6$n$XO98iVBIFIl$DYAhQ73VU;CHV(H7>!L%cy9? zP>k5MF6Vpx&*0m182*~ERi<`G9&F&gVXOyEa`X(GWn0^zue=z+qOefHRSdoZf`%;- zrD<;yRx@mLj|`tGM#)q>|5{-mr|4Y6d1K#L_-bkVsmRHYLX9l<2ba0#0!&qcSWNzL zDU&N-o@hz^h<<#RvgZ)9S3-@IQWm>xoG@%A+i|c+`_&l%2-Y#v^Jo!=khjOR^&|WI z^>&T|!+X~2`PH;Wk+61gyA%=Q_d7l1(Bn^(T%Db>-@R{C*CX89+umr?HQ3C1cn%|A zH69Vf0QcBPt7YFg^UUe(6I~L~?b!lw@&ut(<=WiE%x1f`3M}aJ0crB19susZALEwl zt3R!D0k(w=9@#cF%4xl@GvOv>s+&M`dmeWXNxppc)q;Jcu5;EA|9r!KwIQ*qlTv<7 zXOY9z;e2DzUX}Eg=Q%47xvJrGr?(uyWvDocTKbXi)2+;dXUq0p*nc)gx8a#bn=1Kn zY=@55#=sur_oTJTmi)5!YBoQklJb?>g0(YKyWY+klzW*R{gnK+@29$qQQX{#uY4`& zU)~xo_VJQW_pQrsd=zmGZIt!bXPn8%e{y0f<&Sw_P8@W?oEVr z0T=A67Atz4rp{+?j%I8Z(-mLMNm1#hiOW$*Es}a?!Y6wV1zl817igU2 zYxrW|P|GQ!?BTZ}4*RcWfWqd`ujUSlPG_e)?5o8Y1zaSlU^4OVMjvnI9;9$g;t-$b zm=NCQk%6nha%xD>cg7suT8!`OY$Ey^PBJGrL~ETPp0|b_lRHqv~(+cIoW4>#nN1~Bxyw^ zQt#2c!qe`eeL<$9KPs`q<&=&^|s_ zJEz;Xen(x9q0Z#zM2;Vt4IH>%_DkXXQtWn@qY$-bb4uc9j>ztxYS;yo#QAK9XnRk^ z9lQ5|Yecl$LZ`*`;FkGb_?IU6-<(5vtGwPrcQ@Z1dpr%uJqj-8{ONxwhuc|=S)IW5Hh5+)1x^mZW`u}f z)xV$N`}rFwI83SkeQ*1Wc(Yc}o;j|2O(FY-LsiI>kr^&=TIlcRejjJ=)q+D*;L1%3F_OV51AQql+D) zOUTh>8POHZ(N)9I)$7qrY)qYYOoKyAB8|{cW_W%D8PZG{rv#U4gz_2l691c!ua1DxA&IoJcqxOt;N!3tthf%ExQ#+=s8MWEH#NqB_c1+owfSG}h7r2C z9e5xS)<@#~I2RX|5xjYaraK%g)I;OOCGNT#VlKgnZUc z`Zsq2PV5-BoLuT{8K7YQ}~-x3}|#) z%hZ3m8#ZF?N8%1frA(Ej*frCR`ltQV-GCGG*hu(D{SS9T<7)P<076VB z`>A7AcT{%YKim!eg8jJcP7i(^3%V7@F;}xC_iuMY&d2hwcy0?UD5GQ}^>b$4U+#v? zjITPhrA(1;ipev$wBIW`3Mv)-rcy)OtR2^e=aVwg)4yH~r{v{xo;`d+xv8 z4V&=-t%-v=SmDuBqy$f{WEP{Lg@M)0SJW+1cPe^QTe#&Rh6ClxdKMXM z77;5%wWAs7s)YpIOq@uuRco=02-burUWzAkhMcW7T5KP1-@yr^{Gt#$nf}%>Ta}05 zs9WO3Q|xS2+^br6G9c&DXo+`p;T4|yDx3GcvWk5IO34e1dQ9no7x^bV>7^c}{<>vh zR{3|W3W5qsC`$PkIZkEqn`J$gNRBVhejG2eJnXDej-FGF3r~68X8A%dGL0t(pCzD% z%Re<*#%Qgmiq0<#D0fsWmAYPWc(kH1pfcRCoLP~W86eOwTHY*D)#a2{x`NJQR2Gj` z%B@yfINg7;SvAm_oJWAwWL3HI2=s}R^@}_hAAKNa`JiYs=ffud$cwUxfa*D`Bz9|M zou$C%tcrP&n(sUbL(z%vl`17ytFP+TY>(EwaHv__%hS{!-fg zk9+{Fd;rU43OUyfBtrWKm=lf_3I#Qxp0#+QI4o80%aXXS7Z`yS^S&uhv%k!bw29?SZadgQ=IWz9>^nkE!X*JTlf`t0!#kRYzN> zR^+WY7WmL15OZ9w{-mCOkzTE%XroTsLj%2P@zsYHvm1|DH)eP??h9%36m2qj*;s2; zKjZXpOQ|WqtM=y0O7EBazHLpRymj}+s?WzXN$534>(v|-t+&{!Kh@SC(AJzXR)ZgC zK0MZl4s1%QtV`W$S#@d#w^h-U8yJDDT}rK%)~&f-55a@TGSRm3ugxdCD((ly*JZbL zD)Gu}H3zrx8^yG>>a{;`g7s7A565botlRr|D_#(HLI3u%UTqJQle)&*hn>sZ@JYRu z4W!Yk51i~rQ-P%}tVeGv+uy~|C-piOtxG;D|39^z`8yQ++pxO}k+m#Yh6t5H$dYA< z>}Iiyb!ITO5+eyI+g(Y@Sj)bZREkKEZH#?4jAg7dW8Y`&yI~&p_jta~5AX53$NRiL zJpaV!IIin+p4VC9UR`;w8cT!Mu6W_ry~;N;aJ4^iJBPKpOPs6A)feCBqS-NU@WE-w)eUL(w zi(!6U!!{_%NwHh_Nb5y)5E&&=f0ZrQ@ji1;{)n%KEGqd!uRfZf*CiJG^ z%BJ|>3J6EDO=hMgy;}GroK9+POKVo+z#37T-7?diy&G;Ln%%K2sZU#OB7Sqp=E!ul z_=8e?KsAp*pC2PyL$p$!U~o^pYeKb7?)|h1VDq1W+TJn{DUqbQi`dp^Ks$7#4Gd^o zy3wAhmHZx1nT)AVqqkdBw!=v6@f;mr5Q(|;#=MY@!pwKyI2sE`9ltP%#T>11TAdY` zjy-t8!>-Db%+5Med>OT~9MIJQh-<{w)`oO-VPo4v8ahc`eRNDuW=pRYez*`rlx=yC ziKlc)jA5Hy>G&CMiAn4?GNOAaQ*^#C|G7-tGN5O)w>$ixJ4Lo<&s%g0l+m2oa|{qY z1bvO-=wToT`16FRr+AhcLiKaPC3V7X7vTb#P)%(JBK7cQ_5PV8K#j9M7xs#C3Ib?O z=S2IifQ2OGjHg5TWJ3jHj(8BHJ|($6eg?YneivRE*slTRy-DLytm(gl<^_^@bVLUX zV|ec3c??1aKtLXo8Xhz90O)Aon1^VI9)O{Vwwwd@HGObkKQgP^=|?{*tKXH}=N{90 zucqIVvo{+%h(Pvhb96r798w|=?$8GVIS2iaO__+H;LyIHnD0!Y!}rNUAo`HcQEw!g z@RHUW6FL+HZ2rzMoRHNUC;AB!IGt3pWu}dAdyh)QjQSOhisMJ$ zP)AecMp3feE^=eX9AmFy#$v%^YUr`g__5`}vGa1Iue7n6!meaAX&pp*h9_mkkS^8? z6GBOAot^zzLm5aio=O^H;K{92@`NaP2;KWPmNcD(pUR@(<;e43G8IT#32hq{9p6Aw zRI^9~3}px3wM-r#r;nd1qHNOI)_@aCvEz@?oo8bw`ei4$C}d{GcDm@~Z;nX;pDw}d z$vW&LAhuKNc=8uu>T-97l;hNwkg03O?bmTrsiY}+v33Qq>9<iLlD>agSa5|M=$ zVO0^13ptG7g(yhnTin9iv4vRqig>ZbXPS%e#mbYz7Ckc-)5ZQ|QWotF7e5@A=4vm2 z;Y(j2rTN)Q8r4gMvA>Ismt;kjf64zUbzBw?UaoK~slqL@jVImI3=3X|f9S4nX{XX6r z3|X^$SfUrcy_L4@5SNV@-=5pwb}r6x(b=&8ZQn+3dpJ?=bCVv_?zB;M@{V?VYpH&# z-4RGqK-~5dr=F(_QZTf{{nB2I`d&nBMr8P2W!m1`o^;Ik-uL~z_?)yvo&6O1{bZ-K z)SUgO%KglTso9MEK+-N9u$>!D%ezU?Ie8Vb57@xFg+1RtUOEh;9&pzjltO8L#(UGV z4(BxvaZbr~m+0x(Ln+R~CdNujabI~BeHuqspb@(tt`JuH`*e;L9gp-wkA@jNE2G5& zr17H;=-M03jf$JczZgg6qQ?t5`Hkbpn)}CVs~t;DM5+$s;dRDdapFNOqm+ZE_Wseq zHdRP1Vu|m2^pHZfPSkm6NS>WVZetKTQDVj;exgo+p&u({-#26OUdFn}ZKQA3ELG9H zJI}s<4xFhTurf2!KM(z&`-)52VPL@`9mRA(xy?e-FxuiXq$xk)gz1q;@R=PYF5CV@ zHOETB2dVa@o^jVj?be(dUYURGF0%`mB;LCe?rA+XwCYs%oSFLyVt5Tz=beU|-5HQT zw!uUl45Ij@T)NU!9?p?QHt!L#`KtqjcOSV81oBT2JJq(lhD+RgzA%tuJLA3J$?NZ< zg?}x@ByD=sAW3_VX4;c(GBPttrXz8ZXYPSB_XDUSWe?{mp`SPVuu!ucGh~q0>LRiD z-Z-VkX{}l)_UcVyNb6+E`0D)lI3xU&#oJ=V{@j9x-w$QSZrouJ@n6$ry(hv`^_r{n z;jilM-|~7~np+HX1#k7% z3e6Yv8nz4Pa_DS@!Te0FWGKtGQKGP{%@gq?f{E9u zZ~+wFguMX1J^lW1_iFNGFz(uj|6>DvuI5>Yk*ullVp7RtE8Ywd3#$Skd#3k-1c zw=}&GKi&!cZ6M~3@Y%9}em-e=A@_7ILwUC< zzCTazW$QLpViQ`aPw(f3+U%=exJ6W!gs90Mc;>4d3e__{jAITS6dtA@=%p6;Yk2)^ zRP|cU_ZK+yE)BnY*i^)wbsJqVz2iv}DcFJeU^gL43u_VnLUz^TgTcNH%idn@=aN*b zeOoraWaKuBq-fDvqun&-imwh~*E+s>iwvcyT@G{D?fTbS z2DQ!(pN|U;NfgM$EZ@7c&0YhuC0k}&Cbjs@q@R+}Rxh$lb~BlkMYFwkpjf78w$I9kdcJc! zwoFxIn^TBsPjnHpN*jDIJ1#EN${}x+Ugv#9_zZ)s&eNmco)|H$!k!QI?UqWLRGz!t zE(3a;-Jh}u`zk*&0)gXrvwyvx*Rr;U2B%-h+2@TTVo4Du)=Ov8Mq!-V{NMTCFTo`2 ztPA9M3hY%TY$}EC6o(~+qxcJs+8k{5viN^2SuH!X580GI<1TtsMnNndXjM~Go<1es zSl=5R8b9LyS;0$ncCxi={K8f8hM(*rRG`ji)wc0VNRpkt2DfiO3jBW7PxV+IwC`!* zEdBTypk~Hz(&zE0^c(-!1N#Ea{;G3j#R0iqp1~?(6oChoMD&`=q0+`3I zeD+ZVzEkxRyRI@EvS_AoNEve4C*C|HMa%8&T#TUe`(jxU&ihBZ@V^XyGINuh{eglF zdsaUy1+`Etrh<)QeQS{gL=-oay-C8*`PEne>cZ~BCYhDwH;b=a1Tu)thc6ys`ZZji z6Ng*8n;$j!W2eh^1YHPQ;_>bwWZV%$v1Ux}SHeTg%t)nQ`{ZoVyP&U>zU-?Vz957y}aC29UXw8ys}VdbB6H+0w{ z1>N^G+xz>7&C-?MRegW6eENU_d)W7k5-b8ea?>!&2(>|Cz_R~O)Y%8FDQqL{l*ykG zh*0iI zsjcP%mY;TlDf@H1xc$(kXHM(m47<(UIwZ{Z(v#MdO59O~l0C<b>BLEB&ukKV?68x3pY#@aU7zCi6640l6#J z^F}Q2Q(P9+mQkfLQnT}GRTyt(bT~HIy%U#1ZV;%|^uK|l;ii!*&%bo)G-U>UJ&UVE zzs5{**KQl!z`5ED9qI#adg9E&Hf{^FPtuB>*3M_GB-heE6EiM`yc7#DQ)CU))lTW& z+~4|ga%SOd-@#q8n#(1v46Ith`-9PpxNS^Y^@>bGaQ~&nT}f`-+Cyka1N8G&`)ld7 zjM{N`t=fmhuaBk+7#u!MIiUjt?c#dKv(?ur0zKw0_ke#`Hpaj0wHVb>&G2EnPrNxB zbpda>Z=Ro-R=)3|(ph(RHrZe7x7Q-)DsX75(brPGv zxF8n&y_e_Q1a*xgIA)^u@5Y=8siT>`S`(>%wEqGoh+TMxYHKpL0$S_l0Jm052fpya~N0v3@l|0th5YZ zAOmZJfsMC;ZHR###=t()z@g9pjy14^v7vaFb?-Ns{N&x|y~__^yR@9HDaER}7@_I= z(vWoeUWnoS5W^d(h7Sr2y|9MfU503qp%2yYA>GiI!{`yf$WPYjv6hiP$S43|6zFaA zB*Z8PWArrB=vkprFxJQiV;Jh+V(9-;>u-J32j<%m4YYT9r=(iAK>7xG%#nG_@Sm?> z#>|!{#(#br1H!|M=`zL?GR0DjlSEmQcUmUzK_*EElVop`ln|3tj7eIi zNqV742G%6A%Os0rl1(+qp__c58^;V7M^hu-VA@`L8%F|+Nu9t0P3?%%#_J`Can5Z~ zdB)Ks_l z@MwhgxwN|)P56k+>~Aw*H<+n|I26wZJ;XxkUC<*E^q2}|(4nU|&HoZLKMjN&ya#Ur z^!YGd=~mEig*SjtiQlkYlr8XQEpraMIVahii)PM!WX{8BabDEof}8~}(1H(a!H={M zKwDf4wGfQ45X!O;{$U|4EHlQo252RVB`xDMG_baHhu+@N+M2Whb*7jM}4nM5nHP#5c zHIi)YNV9f2vWC90wv+Btc+V}NPlQ<$Np3_N=|Nk5oBJ^~o>?{ze%N@`*m&bl3~L)7 zn$5!_8(&V_N20cVa<-3ww*Fw-0Hkdo+SWT@&_#u4CC!}_q&PA0{?4nRdjYm#G~4ha z+rK&Oo{QSOkh6OUw2J`SJt5k<%=XeW2M_XXgOkh|Apv%gHFg-hT@2YSmSz`sWEanA zpCD?VC};l;X#XB;pMn_}@bXZ2MGU+$3tsgDUR?vn;^8$}W8Z;;Uomf!hc$C(@DF5oBM{L9Ml>T4 zEoek*D55O}(Vm6q2(W9|9xG@bO}bNEUxSz}KoB^Qy`soIIb=T&IRHiyIpICDJ{CLz zPe#(jx@643e9o#$Z?I+X;&aByv>}<{xZ=Kn3&K%t* zx}G!FiZk~K)69iBFNV4xkK)xv@j+1hjwk^i)WtBAAkJCuBU?^1N(6@jbfZKmD6tjP zrDK#hmy3khl!#4wx_P>vq>Hqp%M~A&t6?tJVqIjiU9PjBq#!oJI2Uw*i~Neq&0`k@ zE>}e{S0w?IoVGDG1cqpPZqt6G@r?O0d!Y*&pUS52I&R<|oKayoO^Rp;3C4wsv* zn46xw+g)upeaMU+h0Van%_z*xIM&T1+s(Ad&8)~ZyTT1jaf7V5L66U;idkMbmkidq17TcwXf@{}-A100sjV zp2T?m1$5v>IZvj_e{E@K1oKOj^86X?2g77N9?)!2PcIW>K=(@2|F_Y}IQn~P@V5N%sXL+T1t>wll zc5BdUwPtVbzXiG!49TnmYj?PLXJXWtMB~qYW=0__n%GmKV((Dtyh?h!Ub2K%{)96L z*SKOb<26MeN44Hb6pVPJ24M62@+F(#SKIDP>|@k9c1 zny>O}0q6_!(<{Lf5gI&c^vxdDddc|yi|EIJciq_; zOt;A1j|FvGm}zuwj&qYp0w=?~inCe&7GI?NjAl}<4VSSmOZGsW<|tUWOLUhG2ilw@92dlHa+2u_$W}`<%{iYh8i0Q z@t8cR|4WSLTl08=pSHTz|5l9WtHw}!cVN|K)*9xk*1Sp2v(e4$y{@m?yX}mgu!GH< zQ=IvC&YBWl2ycC01?THs>>xyHY<=X3$=APX+WXdFD_0PoZ>ZMM8~beQlQ`!$V7GRHSlUO#9P2T5tCM{s#8@-k*K2_4N+tck|b#1G&Q6`NrVy zmMI+rUo^JAna6yG$t+%c-lZ&S8yYp)p!fD~iln@$)c_Qu6ki-hOtBV?;?YVREc_z% z<7~;Wjz;~vdXTxe<)^8kg%|lMn5LV)jcf?AMmx9iwKEcXXHhvWC3ON8XIV=qwx=gF z3VtyC#d-d@*~WGLITZBmElu`jyTEtNE={GUb>G~fpW}V@p*P_E^W{4*v*EIywz$9S$X{kn2& z`~(a*MZ6;QOh%Q}gIg)}W>c#8CBu~%_FU5Q_<&aK{k6!>vu=@g-Yv}g)YvnpPig)K DBBx7p literal 0 HcmV?d00001 diff --git a/examples/hello_world_flutter/hello_world_flutter_openai.gif b/examples/hello_world_flutter/hello_world_flutter_openai.gif new file mode 100644 index 0000000000000000000000000000000000000000..d0547b0897f8dbcb7fac1ab12dcb3ece0533b5d3 GIT binary patch literal 119360 zcmV)DK*7I9Nk%v~VQ2%S0`~v_000043jhlY1P&Dh0T=)m7z?Ty5+55AA|x3wBqRbR z0Es3T4=4dMC?#(xBPuK*J1;CzGcYwYEIl95Ga)+? zL_9n`Jvu}`KpH>*NVMl^IsA~i@JOG!eBNib7NNassLhfN`K zPeU(I2R%_7oKZPSQZ!UkOHx!oT3A#8S^ywg0Xtd`ja@`xUtU#UKlfl-H(>%uVJ=%? zOnqTkNMj#mV_kn^BtvHnRA@1lXLrSjEr@SSc{H>tB!$>kcyL#7?hEUlarH`luMhHl5&_tgP3bvnGSE9 zD4d*}nVpWLotvGWl)0Xe+@2YWpo^8Df)Ao0qN1Y0qi>|9o2sX%gs6LtsBU_yK&Psq ztE!@ttWvD3te~!r&90+(uPlkMP{gpGo3VePu_?5%s;;uBt+b%Av`VzJv);6tF10~c zw_x74f|R&yx4E&rxwN9Xqp7=*ySuxKyic{fsKmUx&Aem#yfg8?T;aj8Si)kz!?TgZ zRItRE#KgqQ#KObHyDi5=kH|`+$UdaVecs5zoXKRy$-$S*SIo@Jv(1;V&Wp^>!mQ7J z&(O-I(Q~fRgT2w9!O^ap(^k{d)1}mH($vZL)W@CHS)tZq*w)ja*IS<0S=QOg*V)m` z+P9(GUD(^q+uPgu+|#AqW2)V5vE6>S-izMe)!^RS;NRE3;F!nY#MR-#-r>^U;o1M; z)#T&j;pElh0>=;z(x>CWcq+vn-w+v~yW>*D_FgMv%@AKsJ^z8KX^!oPh`SfFh*r_Y}j2Lc^Rw5ZXeNRujE%CxD|r%fOt?uiw9b0}CEZxUk{Fh!ZPb%($`O zvIiYYo=my2<;$2eYu?Pcv**vC6N?Q^y0q!js8g$6&APQ~(_CN6o=v;9?c2C>>&|Ue z?>f2bf(su`ytwh>$dfBy&b+zv=g^}|pH98H_3PNPYv0bjyZ7(l!;AkPPaZtn-+80f z{awDj`}gqU%b!obzWw|7^XuR5ULN|nP65Z?fCLt3;DHDxsNjMOHrSwCOP!b8HWXH9 z;e{AxsNsejcIe@UAciR7h$NP1;)y7xsN#w&w&>!EFvck3j5OA0<8RE&0R;^~(BSB!kVY!$ zq?A@_>7|%vs_CYjcIxS;poS{ysHB!^>Zz!vs_Lq&w(9Duu*Ux?>#U9jkpmQSFyr5U zYXRlfkCHS>0}2{UFo6WfCadhS%r@)nv(QE>?X=WZYwfkzW~=SC+;;2jx8Q~=?zrTZ zYwo$|rmOC{?6Nz71}M~;$*xcK)Ru7a7}6*Q4+o3^(lX z!w^R-@x&BYZ1KeyXRPtY9Cz&T#~_C+^2h==zyYIw?8<9eJlzXyqYWhRKm#<_Z1c@H z=dAP2JooJL&p-z)^w2~XZS>JdC$03-OgHWH(@;k(_0&{XZS~byXPt8b4X6;ry!f)E zlgq&|69ft|YpwR$Y`5+9+i=G%_uO>XZTH=HL(R3<%nbk67EWaUHU|nFJTPmo94fDTR!zSOw@nfw7=HQbsHd*_>a4f!`s=5Ipu{%B z{Y_Kg>&$`z2a(_0d+$8|4z%#F7jOLWZv&6~^2|41b>=F}5|7_v&lEWAlF%@L@-%Az zf(j~tpaIa{gD*b$--kcG^X#`@Gz2-&;Bx{R1ffC#UW35B)H39-ea#5~K@jw}GZo|@ z2oMk;0o}*I209RP6Oe%<;z7Hx^-3meAr9>Z_dm>aO$IUmp9mqazrBg@ge26N2~P;V z6h82U?n?j!l%Rx|{qA2e0|6FtkN_EA;eL+u*UtZNNJJspFmgf^Vi1&I0j~W^1z3;) z_vlAM8FcN3IJ9BRIAA{~(jW&e1XvKY2*Wa(5pFV6K^-Vqy9>euceG=}2qd695QLD0 z;NzJJ?ubV{@==5#Jfk3wCqpF!ApsT`pA#XV0YwH-2|QH66z<104RFAPID>!%Vy4Ly zT7ZNJm;eWw$TdwifRPQz0TcZN0p95lh&fXMBuOaA`x%RqpS&U%8!&+-g1~W%ROBf) ziONA5^O#bTAqd>SM(Vk7B^ewJh1O994$u*E8u%j%HK08DY4e2K#F;noD8g_e^PHC} zq^+g^$t}{*kWB=^6e5X5_`MUBn5$(Ery&1DdESo(@618W0IJFwsxqE5TqiyOD9Lw1 z0E+ipA}xQ|P#X4W&rYf-MH%}zd30h}jj zndn2?X7;(~l%WRBu#Ra=)0rw&Ni>5~p~k`#cze98WpS!A%2pP-=+&A!Cwf$NCNv15 zO)V5bFk69Yl!xL}r7UG9OMwp8tPRNO0$=-9;;Pkg@~otP%8rLto#YfByx<5oH@OOKz!JBoR2+{_z!#RW zm94tw~zDTO9WMb}*uqZt2y*~F7Ue%;%%g_{vKJ-$%Rr)~4n_ zs8NNp5>ne#CFoC*_6vfPJKJQvLb9L-&LLI(b!9`VQp)mFf)$SpNumZ?r~$TcpI42% z4R_c=AQth6xl18tc=*JRg)BJ(S-@}dy1e9NwXol3D_R=a0tFr5Bc&+I``I(C1=F&T zg$zoe8o7s#e%wI=xWpWwm68LK9dMhSxg|Er$j>IB07Z?+B^pwHXifC6=Z!j7+xo*P z4e?2~yN>SO`jV>c8jMRQ@IDfJ#bKtm!fWP22!K+C4?iUd9e%`4B67tJ|5wH}{z)Td zQsbbcS^Bm)#Zq#z;t>CE9OfVn@`bxRon>b1nGah%l&qQ7wf2RH0WQu4Y_*(e8=}v_ z8FUp69q0*v`Mk4u!NJQt@Cv)D5sM!fHP$GX}FZ}j0MkOPQMJW-Q=H18>% z1Epm=;V1vP4R^!sGmrJY=kD*j=biGPXI->sOt8?KetFUr!!&$xZ!{xz^GzbLtyzzE zOrHMsxX-=rA;(D7vp)0Km3!=QE_2#{e)q^vzVeqZwA8P@^{#&=NjP73yE9`8$7jCw zx6gguOLq&;hkpO?Vc)sVvmX1l@4oui&wiJGfAzn|x0A9ved-r~7T4#%1lrI4{`;TL z6959>2MqBSebN^a{MLE+rv(jg01o(o5Ey|HIDr&cffjgy7?^JsCIEG|chGuw% zXNUqZPyq}R0TB=bAP@s6fC6Bs13vHt(tryE2zPN;f5V4!vxk3OkO{W13x;@zh?t0q zxQL9{h>rjGh>#eGk~oQ!Sc#T+iI|v)nz)Ia*omI_iJ%yYqBx4ASc;~2il~^1r$`H@ zunC(G2vUFpIM4#LSc|q;i$8z{gm4IlunDK|i=lW5UvLb1xPxg{683k0z90skzzemY z3%YO%(m0LOSdG?rjo6rt+PIC}*p1%!jo=uL;y8}vSdQj+j_8<<>bQ>V*pBY_j_??d z@;Hz5SdZ`M3a*d~m#~j;Km;aG0x1xXB`}Z#8IUPZ0yqE$fe;A$$O!o83bz1_5h;=4 zs0*%O2FYLxd&r0SCKAaQh|A~+yKoD?P?9Ejk|>#yD!Gy@*^(~#k}w&QGC7kpS(7$- zlQ{pGlRCMRJlT^z`IA5yltMX_L|K$Zd6YS>NKlu#5@CF&Vk>lrw<@bX?7?Qh?3&XIMY}uA>`Ic}QmvT9mbXk{nd6#&ZmwLIE zeA$n(mck$lvS13G;FN731S}8&oY|S4 z*_kXL1Z_|ViqHw2U<#B83xHV+&HxKwsgZj)mLp+*wq|#;fR<`m47$0SyxE(+`J2EQ zoWePr#95rid7Q|ZoXWYJ%-NjI`JB)hozgj-)LEU@d7apqo!Ysb+}WMgDGcBVo|OM- z3YLkPn)#XN$(f-!nx#pelqn1037*L*49*}6ulakxH-8<8fU`z-#dns>xC_5f48~BP z26~_fnxG20pbXle4*H-F8le(8p%hx77J8u=nxPuHp&Z(w9{Qmm8loaPq9j_PCVHYM z8lv$znUz_dPkEl{`I+lUnx~1H@VTNBN}u)_ma%z^WGQ=g2cRN3peb6UMtY=3nxsm) zq)ghRPWq%!8l_S?r3o6Jy)d3Gnwgs!qo8R5qDiCfd86?up*nh>J*uBQcys1=n*mCs zRQjfH8mDqPr*vAUc6z6Hnx_`JqE}j(rfHsA8lwxq1eV~X>{+9fxuU`lp=JNdqsCZs zv)P|(ik5DwrY>D1tbpp7rBw?DxljsuKxP302{CZJFs_Ju6}B)TH2XRfU3<<1QGBG{;&up0I#f?sPnp@^-8Vx z`VsjmtNU811RJsKLGPkO&Cp)x6i=g0Hv`Cw@pBkzMd#*4L4F2#6ci;{FFb_Xa1lrK7@oKSSs-Qe8 ze~{{tYKpCt+OJA`wspFwB&rN-s|;xSws32wOlz44tC=j20#Hj3^iTyq&<^=<4|j00 z&w8;3`n6~lw){D3LCddk+qhIJ3~bvAy-*Bn`=KqWn$Iu{j=Q;>yQFhFs#+S5ckl|M z@B=?Ev)(Wd`4G6PinGvKxb+E^v3jP6i?3t*pC42klB=PTtG49Y3)aiK+FPNYTeo-nj}HGJvjlMmKadakV6&|| zxLs?YhKo|Q%dxjRdqT>%+graR>bqWG1W^DE;7|y%P`MZCyTT9+f?x$baKLbYkH4U~ zblMBX(6-8Ovb7*=xTZytCy>w}2XuMc}v1K(%X71!&L$@0z~y zy1vHHzC38Hic7Y*`oTC%u2J9xiqHo9+X{`an|rDZzzYpl&;)>R2xrg)8{h=b2(ku> z!A%OD&Y%p)%L=_f!46!Yy$}a+&f~^|J%UI{!e0vLb+BoGCi zkh#8#0SaIWnLMGYtN;Ws2ARvDSsVoc;04bh46n?n%sdRu%*Y|zy@1LBIM56cOG72l zviIN*h~URuE5U)hqlBEF?|aDctFL7%&Ft);SsVu>-~v?82A~iLji3gJz?v4?3!Ll% zfxrv2@QkjY3zy&mJ#Y#nxvlR^05(99&}Y#!yF9@oU1ckrI5?EUO)oG z{Lt-OveZnf)~pTw@Bu{0M0pkv@)!p`)Rv1jJxYx)Lsp{X>0~kAexa- z){W2!lv|kK&E1ZfG)o?X*sJlHku49M&S&~VpZEwV?gr6y1WV9)~Gyt;Sb55b_vE*#F`Dahpv zwuamh9Xr2@yxT?n3(=qpRv-tFaKXw@4FCT-p};!^R=^9pPzw&Nqhs&_n{dhuYzzzi z0&wlurfm#WjJ#RQ#-9zw&S2l0&AwIqyY%he4@{qez{^x%hyh&# z1ck8UZCebjz~cz24Da~{E+7SZunB$5;fnAAyAa>0oYw>Z-zeR$h8+hsK+Csm3d>xd zHedst5Y)CD1$Yk4^tsBlYy)!82{!*=1Z&CK1n>c;P~H#z3(z143cvuD01Xk&2v$(b zcfQO8D$8|l%SMpP$Ghi+kPFWM&to76Hc;qtu+voC6{cT2}otpriK1}abm@X*J}UDeFp z;!-T4BqI@JJNc+*bGkqL$J~LC=5100AT>o4!xH8 z7zhI$;U_ThylnBg0J&Zu%oP7G^M#PP5q}A~u;Pv0@WM<0A>G~xY6FW+@r}*{1mFa` z;0h})^Ba!=mtga}%;1Y12yd{_3~==yzz8Uv?gLBhEbz4ckPrR<3=qKW{-6y=zO^|U z?)KV;3ea!~ zH$dK*-QMp#l79Wp8~p~ZKnsDu;9&p_zYy^XUXmkMcOK%ed`g#aX_@c-r-=m@%9=b?)TZ)8|j1L4^(_TGZ%Kq)C-7W!luJ zFlDl$Sg|rDQK42@vlc6+Y^O3-&z=E+!|O|hFNZwAJ5;L^v|Kk1@uHP$*iD?hoW&vV zV%M#^Iia!|qi7ecTGhe?08;CgEDQvk&`k^^K$Bh2yb$={>ToSxJP2$!Rm&6x1&ns- z`UTZ%v0=Y{J%j%&nQ}GDm~)wuBZ$_n#Bci6$^{E`AX=I^Z{n5sk^%;d01wkC%!UFg z)eu*EZd(^`-@vO0e?i*uNJ;Q#$EHkwV%K zf`t@JD53=%)bYogA0ns%o=FHY2qA?SdMKiaD!Qnpl~OusL=s0#X{DB4@+GF3YQk$j z7-N)iMjC6hu_s}i@xq8Pcx>cHAY0+etDJgy2AUxlp=%QnULb=Gcr*coI~SCU&nBJp z;>$0Gb&~# zm4yG)eDzq7k@0SPpa z1sF`Q#Gha!oF^Y`1~S3IY%`Q)7;lpS*CKJZ3Gt(ID>v~Y6f;th#dp1=`*YAk7kwvT zo{_@_E2!Xshb7c-eTha4#Yq;p%%BU?Uv9ZXiYDNBgoZPC`OGfQHRiJ891|gdfSX+y z!UdvH2D&b;`I<8{0>YR%M?r4R`Gc5u;rUBtoN>mOsHUgc`qesFzWL|-G8@#?o+iMG zx1AA&YCw7BUI7g7$u6by0;_J}KIVPEdC+@-*)%1-yReN{ZyQ|&EqKA7*i9ICTh{;H zQUHTs_@f!~@P{^9P=rZL;tq(z&_aaKxW|2HTpKBuZM0XKGQ4XUOpdn4*p_dI1(ML?JAYQJJrv!M->Wzy_CyPc;-F zKi2%oe)t>Jm-t7$ynscLHp!6@(8G&FE^<4MEZf;6i7^Sft&&1yWh>{UK@RHeKrASN zHbT;j7OWv3A{hk>wUw-Ig$qRFqTCIQnM0Onj$JEqmy2BaOlU@vC#4(12Y~-Tg%`Xa z150GV5TNs zXmn2j+JlDal!h}r3e*hMXbqv9Ax8|Df;K=iB>Qdwvp3u+8fR)?B&*nLA`=58wGyu!c2m4>_F+q=3yATp$B$ZNoQ}VG8Zc zMNkGa1}nJJ19lDrj9H53DRRM(UIf->Fl$0-a^VG~ove)&B^sGT!vOyv^az078`U!& z8-`q@loeh;%@`a*NH1s2!aGQFa_5(^8q*jh85RQ zz+|vj+Pa*UX*eb6P4ps17u->#Vr6b~gA!C_$r3?_Gr=eT*8&QF;kv#IP#7F1%o%nB zt68nwR+Y)a&N)}T>%CF=oS_ICyul3D41|H!x&$lA;urWhLwug$j9%C^7cPJVuJ)R- zLqdlEBV#ZI1Oca#onQ#40iR+i`bHr9rJX}48372u13@r=G^26`EMUeN9wbcxL+~PK z89OIvbdNJY_`uDE>(NaWFr*_5L@l5pM*@ri8ow|x^IX7#k=g%%Rl1O|HEUesOPR8` z#?Xr#66gY0=#RZyW-fGx>Op6P5Dh;FMittD%M`Exs@?r=Rl{3D@}A4QE?Fk$oXKT8 z=h-Q&=maYg5d~McVhDk#252JDx=6qRrGjvUHS%x?Sg^twjs$|mwiXmEVc-J{;Piq+ z)#F=KlL62%gBb`w1U8c31EJlE0#N-ZXh7oO5_l01S{qAQ&Km`F#KbIVM2VDrueQBu5UKzQlT-0Sh3E`euQc)nslF<4q5M z0by`9v{^%fY1idN!?41pqXQT{=Np<{?%;t!z=CKYq6+^ayh9aA;4C#8UJxDqU|cxhqa98;zkgCL?&ja}?QF$8b{Vniy8Uqr(d zfq(-X0wHuSlFI0~>!Z?PeG8y(;xk&4jVr7Ui(j|`G{X=^F<>EgZ=1sH>)~20fWER_ z>>PK{_<7LRj=Q*n#)rco{2?~snE7N}@)qQ`bXkA{Z}>wU^zg?WBG7^qgy7#$^{y?z zD>-69JmPZ3oVw22)$~qY_80GkfPSF}GxWd=fe8PEE85x4LKI?gaSuZiO5KDL0NYc< zD~dC2F)8Rz>Wjn31@v!m{am5+`tHPuGs;v%rHvubxe!ETFtf_oJpbu;9Ftw{;{2^~ z#Tedir(S=d3(#QyQ?V(NT~=PEa+*H?q`POh1y|9l^uxa~xxW324F$x$?Td|Hs6U;6 zjgs5H*~>uCsXU^hrC^|kc_4%qXn}(hz0os>(mTC}1Bul`m)6sv*RwN=(?A+@E@u#n z=R*uy2s;Fn4J(3_t@{E%IECAh51aFelk!0&bSK!5Kp#ZFVmK+idJ!osohT?7O+yW; zBRbc@3HzHs0OY&|Ab1NC6xuf@OF_8Hz;l znnX%Oi5E;JIzz8Jqege+L}BDbUIa#1v_DX^BD#SMXvn!u;FHgIl&|{5cN|E9JPK<> z5NFv&ZWO(5Tn1oJ!PH~Hb3{jv$ejOmguQkwNR6x@VoXI>oI=pCz~_^`>?4?sJV}%s zii2Fpg@l132!@A*#1)*#%c)4$yGR*K#gyF19}>ur#6vY45+dPAqU6byWJwcrNtlF4 zNt8&MBneAo#~Cb2s;tUb97e0mN|dx4WQi`KLQ2t7%9tF-nS9EUh)Rr{O08T=wroqc zq(QEXrLROvv1H0}bV{=f3AD_~x9m&5{7b-GucK_qmmEuni^#l$%5>x^z6?yqd`!rU z%nBOJMXXDiM9jp*p~Z|rOq5K|{7ldc%^IQ1uq;fwgtNrl%gz){)@)7Jd`*?K8@b#Y zZnR9Y#LUd}2i5c{*z8T;{LTN+q)gj9%%;puz1&QkTutCyPUdV*wmeE07*4CYOXJi{ zbWZKuPVU^um5ff)yv*V>&fSc??mSQQOwSDb&hWg=@yyNg%t-Z|Px`D+^@6Sd zktHGc#-$Wb>LkzWyhJg8gqqMG13gd#O;810PzG&K2YpZojZg`lPztS33%yVb%}@>9 zP!8=-5B*RO4N(ytQ4%dt6QxjwNS6F$7XCEM=|s)E+)bg&ta_?Z8@*8+%~2iQQ6BA4 zAN^4v4N@T;QX(xVjZ!I{QYx)dE4@-Iz0vcz8vrqd;ERM7 zJZqeGr$IZzz6?*SW`A_Q#XB6IE_;|ol`okQ#-v=Jk3)*-BUj8Q$PJvKn+wu z9aKUsR6{*fL`_shT~tPGR7ZVONVU^%nABPTM;CpwF^$nOWu`7LhkM|MeF#-i9aT~- zRZ~4xR83V?T~$_XRabpgSdCR#omE<`Ra?DPT+LNo-Bn)gRbTy8U=3Db9ads3R$~=a zdq`GgP1bk72Yf(>cQ}SGScaJJPZ%{z)$D?8@P~Wo2XFmWa1B>+9anNKS93jAbWK-v zT~~H(S9g6^c#T(iomYCTS9`rze9c#V-B*6?SAYFifDKrI9oTb)2ZL<~cvuE8pjI(8 zM>4fZ7^E{~;MD(Z%?FFUSd7hBjony|?O2cfSda}_ksVo*Em@O2S(Hs#m0ekuZCRIn zS(uGknVnghty!DBS)9#To!!}vy$7F-2cU(Ac#sA$U?ywLRBR2ka-CWZ>UDQoo)m>fICEU&}+s5VF$EC^0ZC%^FUEIxG z-Q8W@Rn&^yrC!NJUlu3<1PSr?XF7@kPt z4P*Z_{@Bj-gC~ImRRD%%h+{dPV>+&5JHBH)&SO2^<2>dDI)DTVK4C!~WJ1p1bU21g zu!A%3gKkjaG=Ag+2Hx+*RQMHN08Y*shGa-~T1OBALO6zYXa{rX25uby=ww3JB~9+-U6N$?rDO)SWCtGIOUz_n24jCnhcmDP zX-J1|zy@)cW@@fxYrbY|&Sq`iW^V3gZ+?eEfCO$;-DPHDFHYu19^>~N<`}+Y2u5c# z?p!f&1Zc(vY4&D(&S!nz=X?$aa2Ds*^@mKrf<}htFji+_9$p!)UxSw5Wfp@>NC*FW zw&rlCXp6pRjLv9{-e`{QXpjDAkN#&{MqPifg)cbghGt=eCdb(|Dv`(F^FlE24??FTX#NYh8F4p_6J9>1BtfjY36CEo@%PjX`lXS zKOh8{R_gp+>9REHVxC`Q=IXBQ1~HI^Xuj#HUTd~y>#HVNr`6z#jfcUxTXztHf+lMM z_G;Yp+n3hsz0PWhR_nITXmKb9b|{C!*670)hl_4(s}AR__FZ5A0R*`0%YJ}H?uG<7 zgUPnpFScPv@Pp3=?DOU8_w?(dZkMq(ZS_qDGB}2`cI=&2Y{m9yb_j=EC@}wV$nC|h zXvemNYlv#cPKI!x?T>zIk@W{M000_z?&l7IMy3Vy8E5nDqx>SOb(cS!PZL z*VbRu?%RhA>i|yb@jmTTpo76i?wl@%WOxBUxb2NDheN0U0vLe4@C4spY<9SU0Js8n zaBOfMfJ1nP<4$hL7Gp6efQlsvX%GM{2yKp40TtNpH2?rxC~e=h16AUCTyIR?-u|70_W&**o0++gG2a&Wh;dOe}{5-hj(Cy|MuroR`81UaUlP3 zix!7tsDJ>dfMZbXchG|XfRd;V=>~^uF%SU2pEB8HBbd}aPg0ZaTzag_lj+n(|PkOBmUhijPfDu;(F?`Y)? zS%27az>Nn9KmbB004xv#0MP1=E%S{fgd+$7JBSCT^#^Idf*=qBINv2;PIGKl)uz}u$9Ch5hcf_%Zg_Q#w(p7-bOOKUbLWKtm;pkc<_ahPCRaBG zI062Kgv9;=062jY0Dv!m^bHt*4G@8R&+T&10S2&z0`LKI@Mr%jxAH>Q=vW8YS?6*U zCIkT}fXfB|8Tf9E1@;K`hcf^G4L|_^2!dUbh6fM;87Ke&2=Pn+00i&>0eApu@CQ2p z0BSecWj+J31$Xnk^Ga@K^u}cM7W(Cl2QpX&ZkYIsE(cpcZgr1$s4oXk81QzW=7b*r z;VysyfCP9bg#i!&c*y!I@Kyomg>pCn1}Fu#2!Jnebz2w!C+LSG@Op7zb%|$ifYx%1 zPjg2QfCF%OWA|8*?_OH~03-;B4cF=%5P%Nvh6b1dfz~B|Pz3-W?Vay=lZ^*42zsU0 zUZQ7bm+tnp{r1tt*aQZE0|l@8YQAr@clG~i?)s@803G;qbMS!y;07`X z03Eo3E6@P|&;xcTcp*OqyJv@Y0E7V018$&l=4Xe9*L}ZN{fn<(e|Q7{Kmm{-b2FD* zodk1Jb#(C#S20R^T81#*h5%7QpjR0)?*#Q6y zbwq#Z&cs9N{5kaK(x)HQ$k8K6kt9vdjOqQm_n9?s=IrQ;CvxiQ*RyZ$Ug#QCW+y{^ zS879mFCw;fl}!JsI57lpNI&er0|x*{z;g@(6^PT1I2CMQ4jll{kxxI^lyVF6bIeE|f(l~;Pw#||WjeQ^j~43uI{G6e`QgC05n@P%6^3~)Ov(s?er>2e7sb|H<) zT}o%l8(vD|ja1%FZ>qa5zx`6@Pbzc}=IV>~ZQw+I1iu;%JDZ$9zyJh@uopIK`QXE0 zQh3Du0 z6o84jG#pR>(zg1;jJ5)3Ae%sI9DM*8`S>%71_eAI1_DC*QGmD-5vK~y{+fL@+L>(> zrn}pfROY;Kvy^5@Yqp7A+Ij1}_d@BAaZEW1|HqCkfeQL+Q@nV>i#ScO!=MxJ*kTJi zfr}HkDvWn^%rz`~#kt|)T(fdGD+gM0tYUYg$}EmJinIU+I5LYW(%>8q?zs!JQa9aL zJ&-@~bYtW1!v{}~XU$c#TsnL^|GeJpe$@6hWX3Bu_H^5HFPrA!4L$hbt7|7A=G6R@ z<%ciaI6M87b4&m}nDb%i?AQPQmF4Lhp1%95HJWxuu<6vvJwUEh$RJC zAZikr5PvwLT;jVR2H_+pVU8@?CHHU?#z{Nj`RRmcFc_3co-|!KJd8#ysOHuP7QU z81a|RJYg+62)A5*XO~lqI(Tsi zJ$#|8W<4uf)2jd0wX(IXZhb3UzaWOW(zUL3y(?bxs@J{pwXb|V!>v>S%Zqxou+X8Z zJgs`wmsm5OhJ9>5^Kp-Uz@xI3y)0%ktJ%$RwzHo7ENFd-)X|c*w5CO>)d&mO)fNS@ z9-QZ5UD8;QvbMIiy)ABYD>gY!&yrP*Elg(HOWhK;xW+whS6w?z;I2ft-$O2RqbuF% zzEQcjWUg+qIzrk~x4Yi`E_h4ITMowawTi{AbEz2K^|H6U?oHu%nfP1m&P}@^9qxPc ztKa?dcUi*Z3<{fc%34G58<9EhB{xOiB*J1&` zILJmmGLpwvV`*Mj!zJeBiIXhlDN`A~*XgZ|m+NF5gOtZnt}>XzEasmKnZ8AC$UM@~ zyEU`9&2D}(oZ~F#In%k$cD^&7^Q`AR^SRG{{xhHhE$BfLy3mF`G@=u&=tVQS(T?_W zI_B|^GC!EhS~anH6Ecr(aO2UQ{xqmVE$UH|y40pVHL6ps>Q%E^&z(M}LMXjun(P?U zZA?d}Io;}A^Sal*{xz_JE$m?vyV#57M)NYVlf2~h zruee^P2`J9SRooWIm}}&^O@7U<~F}M&J~XGeakn%{8b6R)V)(&}j)3dJit#iHWUjI76pFYPdH`(Gqzweh3f_1R7z3py)JKW=r zbg^TZ$qt@%fvp~JqRYMSe*Zh*121@|>l^B1zq{F~erYIYTJ3xvJme!U`N>m0?sWg> z_k34=b}u*nAZxEY=tD31(Ubn;m+!gMHLv)fr`_|9FFovIFZzy9{WKmNh({>;Ol{K>!i`Q@Jg4j=*I-~RcW z>LDM%nP2bS8#YKAHauXxMc}nbV75`9wMC!?RvQLdTLTs#2!@~ta>D_pUjMBg+I8Oi z1t2$=L+{Mbw7Fn6z@W6rAPs6;H?%`L`-WGP@7fM?gdSMV+n>j3l9elwR*ifg< zn>l#GH#J}!(!nxhTO6*zG0>qWtN|VTp&i%)4f=r}prI|GfE}a*6iy){Hlp)Yq4-^4 z{Hb6;V4XSG!U$Y}6QF<~ULp!y!4+_#Ie4NIgrXqgp)L48Uj)GjRKOAJAqR59BFZ8J zX5a(PA~yIy*C?Va`b`A@!wphfE+T**@FF(&%>#&lGD?65KmrV2j1k&`1keF5#v(0F z+blYwHg4nKK_cRf9}04w@&#ZjDx(j4;THOUIih1ZioiK)8#V-i2KfI0HgtnDtU(1# zLGO*CIqYCN++Yrl;ts}OKl)<>VnaK4f-)Wf(H-L&hyXC0Awnvo2rwk4Iiv{K0yaoP zDbRojsKWdlK@hxQ4BCPSBqBESBN+apO4eWx#^6hGqfE}E+j%4PNuvKPAhf-nIg|nl zpnz)p#Ul{A5s~07zzFI`}~aK*pM-!xu- z1|*_6sKOLHRX2QPSdOJR_yG%XnmG_cWYVN%UZ&RBB#vRB<9+|&B~F11sD=zoK^IQJ z42ULa0t0BG<|(3~enezFrerBdz#6ET6NtbJ%)k*a6Mo;)C4V5WS|=iC+E*iGMNPU1L5;|Pd=YIGtd`lnHjVu1clq~ zIdlUSR6r5Mp$0fXH?U^~6k}6ns3IPL2spwLe!Z$!lt!t@ zY39@^Aja)v5OxDFTmc=d!4&MFmQI1OkY?{3gP76*nP!_0j3nP_=LjrmH%x&DcctSg*q!akS@Cd3WC?bnaC$*Wwp(g5}GU`M6A(~cz z6Vzijd?$~ZcY;J0v4ka_O

    -DJe&bU9;lSIs;h1sl_p@V{omf5p$yI-436Rp z=IX7oDj7O~U$p6uu2Y-_D?+Zp({zJJ{^UEKx}qXiw40AltLzCLMf=i z7(%Odro*&WYdX+@1QgH_Ug$$=q%>f|gAyIPmZS(wff)4V#B!`E zd~C>~BR9;z7f7SNJ}JOHEz}YoLsT3%672PL=3MHa)^07=a_t57qy`+pG5#Ph=l}{t zWSpL8Wi7+S9HV~N0`9Z}Ayj29MyttEE3PGIJtCWl5<)w`Y_;hvrv)mG{_R8NWt&Qy zNP_1)rb7@6>OYFX)5HTj$nBya$a}zvaToGAs(IqaQcBG*ue~BXracb1fT#N_`z@5!8sfzZZ3%v5ThcJ>@g}TJd{EO zJZJAL!w4XO&1!(7BBb*|?_y4G^~$BDPFoP{C>=^89mInz%m62*19{GX@gi?Zx~l}N zlhb}K{noFz2`uSat^8Hu`4O)&{x1Lr@c(LnUE(0_60ipNK>5_*75D%h^ydhyLGPrb zC#WNC`hg2_LlEfWF*?Escmh1UC5g(SIfwx#t_TXiA_TUB2;X7}pRgXJ@WCR1MUL+X z7^Wv=A_e;Z0nY#i*WwjuKq(Ze{TBbR5#O(U)?N60BTkkstukP>U1DxdF%?&F70WWGhqNF!tg)I6t~rD+{T(qRLo)v2-K^>)?*XzNXR;=5@+Ony z1yrn zOWPzLUnO7ZC3|3>d0WG}G8s$(Nyaj}u^_%3?6&do5%?~f1+z9wauQ=60uF337cAE1 zUj*LjHlK454|COq^VPZ@HLw5QE2FbK)2})=vHnsbK|Jz2?=wFSUpJ$k6(v`yzUPzSZ?Np$h$r$vi1K?}7~ zFLlW6bT0MuKB+oKt5KR11VuS2b66HN0Il9%XetY_(UHwON}RSU-_ill57* zwOeyrT7%G9lk{8HwOwahTxZWh2OPpOtY7~%Un;JwqYMOVkfp@FE(R0wqrjw zWJk7SPc~&&wq;*7W@rDlW^XoUcXq;Z!#xl+epYEnpYOgkHx3+7)Hf+bX zY|l1r*S2lnHg4y(Ztpg4_qK2UHgE^GaQ}8*L(yI@L_Dkk87wz*H@9;?H*`n0bWb;R zSGRRvH+E;Yc5gR#cei(cH+YA)c#k)Em$!MJH+rYHdapNox3_!0cX*2dbBOj5-(*EQ zu_YtKJY3};_P2lkH-HDYfDbr<7r22RID#j*f-g9OH@Jg8ID|*IgiknySGa{=IEH7q zhHp5BcesatIEYWUc+&U#fppb9@vPvOimy0}x44VHIE=@*jL$fY*SL+}IF9Ey2o-k- z9d|?A1CR%~kPrVkkr%mFR23GVKF_|{ry?{A-d@7-s;-RB;oM?dr&HEUMY{};{78^hfcBg`8s-4v_D z8>i6}r_UR2(iCsao8Z`#;K7?1(3BX#o231a>r{)Be82VQF0rmDrNe1F(@CVHHln8~ zZQUtFrA#n^D`2xJ1JdMo@Zh#?mjTTJu1xe=4v$O-6iB+ zapdXq6*xNoGJaICZZ3%6E26+F{90F-&|F;M{5{S2^V{9^f#y=_xRMgi(h0uuJ!h`j zW}WrsatL3gN_48qb@uk1BqDz`4S(3hiUj*b=GB9EZc>#Zf1O54oj!lPNlU%8OOfPK z61&UCnA@P52T@C{a$El9f|lkA{+0$vOH0R7RvRc{FRB8im%N9+eXphcjKAZqr6U1U zJ=Q`!`}DbIQ5P)G9h23WF{A^r?-q8|WEMyU3TTVB_OaLXy5Qu5*sIRm^mz!VnYapm zZ#A>18}JbrMyVT|K^ZzT8SW6!s1Q)E6A>fWtcl?C-m~=CbMl>lyhYo>UCk^;VikqM zLPXnAj@@Dm{@DEC(m)$mgIjo-Th4s#%AOnLjNsdm!TK`uy1bwO6XOi)%R1P74E}Xl zoRwtB6ifYdS`Bqe>2;c}ebo-qI@DpObm-+#0ZoRV21mlMGtT@c;q}heE!jozD9*J@ z<%)G$l#PBH;Cq6&wpIH*%dB^YbBkPHE*s9=q~Sd|{1?*Gr}tu8r$BLoeU$Cfl=dsT z)IZqHV;ZC`!6I}EIfQ5L%b7NXZ?oTD=bcBA$)g*g;Zky*Wy*DLpfx)bh&mJsrSMZ&snC=033YRVs{- z_vf|g$F*JuU<0pFW90~!2fQZw@oluWLK16LUJXT7EU(q7Q8I-@e|#c^zNG{E-u_@< zR`IBVw$wlnHs8pvAKG6BAT#e+S&aGoocm%5m}j^Q_fk6tbwdDvx&=A=fGSH%q~Bv=j%hsPaD0utc{A*?!v4f?E=< zp_y#EnSwGgi`I^m_dJAvo)j+qL?W{*NMU0t#~ThQN(7FENFaw!nfyREJg1F(c zc2pm(t}A9gzHw?)KLqr>l~rmB*&D5KQ3mUPYtu&Cb>U0$8HmUpvZ@NLC zPiW03>s~1G919JxF#S0Nv7KsEXZWw@ob4|A=I81T3JIHBy7K3%=uoSr3tvI)>5r0d z{EzGfqLAw+?~zX&o!NAh%KHfEU-K(_##u*7gTkMdc)uZlJ!OxOQ>Uxo;lbBB{fJ=I zUd&llHQl&bbzQf2bDved{vahWw%eCpW&}mguQDE@Wv?r1MbEjWx}wTqnN&`M?+pUI zO07JnJUgr?PbyLzTwWT_@h%8M3OoZe(b?S6?|9eRK>}M^^;Z zZyInp%W<2PfJDUf)2-{>1hewKMkrpyY$;KY)Qmgv7lc zOtV|Dp21C>bk_dzN_gH0Uod49>!y>Gt*fsmup8wqiPe}K($>UEYxgN?@8|XDE`HWb z6|P0PPhwWA0vnxVTy|qo_+@jx2y~nKMCFn$OCB#Giu;VyT+}qXEV1CPrH_5+l=Ovm z-%D3N{1S(VA(LqPa8lk1oQ2+z5Ir5&t*1YoiZT&pk*KX9$n@bkHeDo9(8SNp8Oxz> z{`!rxW^%-Px1z;1ro-*Ni8Ms{zLXu$b-KzQnTMR@oOBj&4Tl;$q$JpUg++!{$Rg7j z08IA`c$TB0eW4mK$4=>fo`lP4dB;dxGn07tc|i=X9q7;cjC%hihHWWQN#h;w`pEPh zPjXd+>}gJrB0VVrACWDZ?}ijVi$95(cG_tXc8#`%>9UK&THv7 z7qbZ=Cs>q`7OJ-9z*$EdDd-`-Ee@daTB-D(5n^tykSE9Y0!S1=Q!-wlK06PwDnnIR zO&TJAJY#fSGof$qv*=a;-8i&}_)0d?uqvIOz2&OX=?uD5rK4b4`M07ed2^A);@dB+Y{y%H$#Wdu<#n zYDM0DX}bq3JIYvdxYZzDsYg5-0vzfWoG<)lS}G}Ps%dAnhUK5X2{|A3r>FE5jNL+b zvA2op7^DPoKTX-?U90|a=I8yOvf-64)IC7UmzDZffNn&hS#WC2mD^--%6-w$668T0@{kk~J^P%EE{8!hyI``yy@KE-Jk8R^9h@ zoLehdVvhB5@6LV=Y3mUh-c_%>lvxlL$c`h=w_e1ZxgL~&u7_3H#*8Z-(~pKA=W}?S z(6}s~XC+I|R=q9JS7)9CD+Y)Y4V_DZHjZ~F%V$jYT?BLI{sDxWe@J+`M1EV?^Xh!T z&S=ahxupwaI9}WE#_MB^zX+Ww+FGk_B=7uT>v8j-4}#ziijpLSHKeQq9p^YR-d#o~ ztr%%wI(4&HTnN5f`AVZtFrq2-E;i>lf}%%b#1Qu?(L{~0M-#V0_mwSlXyqGsM-%5a zbg+QqlOctYW}(^~gX^z36JZF!q!;dWCX%kHI2be)q<)=EuxctT44RILzs{x8HIvr| z%_PlT=ku=Z%WN{EwtAmp!ue`n{uaB z3w;P^p$qr6GDz3*E0}O`Nd2}tY1Pt1m~d$-o~x>f=np459f<{LPkqzscWV#Al}+5c z#vxrRy9~nB1NFP+#Z@cE4#KsI_`B8vU2B(h!u5x_yY`1wYYzzj+7}*i0Xe}TzKZ*C z-uk0(=F%Leh0Q?Bg!^7PJ=+j{qHTit`+nXv+XxS$9WuO!L0P>YF&RXE=rkUNHP?P5 zbP(;bCOnLqg*|%VmNP?Tg|)P6p}O=YWY1WHlbAlh*7DVB40kPkGBu zKRT4?6CY{LKh8C+IaGKM9~wkqM}*i7kZ0{q;MbjN=H9kH_nA-V6udp!@* zcijeqABN_iPaM)+(n}uuh7(?|i}c-&^ubSy^FrrI8M^$!9@i%)FSkSb?pGP$mjeyR zqxQO+sy5-Hz&FVAfxgGnIvDbR(f%}p3OT=O*t}MPLX3n$nS{cKgnF3hl(4?*WuAb+ zfTmD}p^1cHn1s<(f)<~EfPhenXE4G*IB{h-X&X59^Cw87#i5KcyhbFv_9Xn!1e_x} z{5CU`{v?7m5YbK<@gp-rnl%DsqFcZQF(48UG6{%?q;jqt?2Lf*0m4K9k*6>y;K)ge zCy^_FC^gC`4Us4n!1IjP4=X;+bQD@E{)xQw9Zy|?gs%|Nx??s{?O`@Lx zF|H;N!#9uzp|HX?NnDlC5I;VQMnXGSJ_9~t=2=s*86qEV!+VNg(L`Y}Okpu&VzYn5 z=B~n`m_q$Y2E_Y_6Ey+k24aGhfzlt}eVqV%8X|uqMF`A9q_hQ?PvKf);@N$~bBw}s znZmQXK(N|C4krW4e18`)b-Qa>kZg_QQHdmfh6IWF2vV5@6$0QFIXEmMRY zm_$7vi3XyG+TOkh3V@z)&Vs!X6%2FVRa@T(*~y10DqLXU?hw5TF_ngT;GNxQ0W ztvxa4rbsZ@uH-(FcmY8wm}KDSi{~zk_&mgWR#Ijx3ieCF08i4VE_^a9O0W{y(W7bDIF>(bIzz;s%@sp0QQ;W0Z`N-)3gIo zh@Mbj^J$utXr3R@NRFRgQ={q8ywSf@QAK=WXoyCH>7ppXVno{_p0b2iyf{;4Wt^D? zH+Lc4PEk2QF^#Y?F=H_~i7*6g5GJy{hooT9r+uWiuwizY1_Q_t=s!_7Z6K{}pp_ah z^?bx+@?sj;AYAKYBD`W@e%r>8u@qG^C!wHm&rlZ_;09q)IAsFQ4Z-(@U~^IGo+!j? zLvX1l$4e93CCQ-~{GElz( zYIYWg{5QA5CR(8<|Air5w}=2lz6SSRlzs0?FbMlD`GSXl4O5MS@}&zKnOzt<8j;Bh z{HqH!849513D6UfW`<_+Fa*IrajZL$iLw24`}}U3h_+HeoHVunM6#Pd14+ zG9?b!@RVx>jju^JW9oKd6c(qzM51u#>=c++gm5qO*@ zh=rX>Jq7^fnh&BEIC`C^_gNqvDD>jr@X4Ow=q$w4E5*B5!5MpA%~*m7ATRMJl?YqK ziVRzDV~7A8xnkIz`ZDudmX8t+NZF0=xgspXw+!s-$_5Pt5nnx7de!dsu@UD2}!F^ z>r-hKp}P9`o}J>tC=v3(~zKG|33-z4X1`cyZBvLUwWA{$M1d8}PqGC7#Imj$#4lo1nl>wWlitWJhDLgN2JHrYa|}9Y=Zq zic=-_d)2Jvx;5D>?t6ATf|pI89+c%DHoCD+)ItL+izwu75iGR?ENw@J4o<=V5iW}< zWNKDNGfaSLgZ&Vz^qr`hsD_72 zk?Y+ZU~$t)|C3w8G*t!^R~Me=Y^8_ez6V|-z>&-iEzvvdhp?ZcC0QaW;k_%#XPs12=@R7UlV|ww!vnmha@Wj>)P}4mlo)#h?_>r)hk%&o=C`l0m z8)!^k{Bc~IEsf!h%Hc;r;ot|r84r7oogBMn40lot|6+_VK`gJPnbKzk$HZuBmgs<9 zAEhSvGknPh==iUi@g_;}7K`!L1POLY@$4$hngr1rjf^q#eooT~44P-><%ucyiBS!H z5T`S{7=q*+UO1H`8EVbs3W5~0$m9cLsSMtf4uaGR>lDtBgHnlS*m_24Hd-l4wOyt zNb)BsbD}n#3Y5d%e1WgkLFDYj3Cb1MI_Cj(F=^#0f%5cyvfWs|$8qzjfbu^k=hBVl z==0@UYvq_|4V!~hZI=qjv1D4sC-+id@s5DXsH}>2CBHy zs@O~hP(h13|->P2pc*U>y7yvT$WQE zQwrX$d7lw9a<^mz5H>lu3{HKy@5mk_2DxYT`LYHr{Uu3ApwO=;gIOkfp3-+h!X@Hi@n@ zlDYP(xJt=C18cO~)rk5F;`+ueTkr(>ty@XhQ`>A)2QYv3`!)4{Y#j(#84zq$(H0nF zNF7XY1v)(UB@qpG+z+HQwSE^Et{_56BN}NCK&wj~=@1wxa4n9}>FHV-)tDTa&>6F3 z8Jk}j)22I=YTuwL)#{Ogy>5JgiK>>ZTm&)SW#K!4pr7L{4fi^bFTHWk^AW@gXs3e^v({lhqQ#6fM8{PR z7viOo)yprdOEtt-mAcCG^5le(+RY3d8BtJ}JHK52GE zu5B2)YgaEuxvO49y6acOYY^QvSiOzav<(!!RSc5NG2Kl(y%oZ>%{JmK3cY2T^sTbh zEoPD>cD?PWv~B*^d0~3`%`|LDKYeAV0KPTRF;pSC91 z1MBWN3Qf7J?ZFf8`>ag_r0?IX>_@nd$LJlbryiuZk7bY?PUsvKkc^hB9X1gi)sT!d zq#xz19Cf4*_2?Z3r5=y84@{7p*y^0j3-vFrov0C=ZoBtF_R>#zS5D7fyRY=ls8Y|K z+Pfem=a@R@h#s9N>*p_o7udobco`Q5%NOAFHVXaAsgz3w{Z?l1Rg?A=_j)t``c)3$ zwYYGTbjEeS^0m@>gNpu*UCNDiM!i1xR#*Gh1X5tJerpT5vlGsD%(zooy7PeK`sm+_ zC*Ow%7d*82ybwRk=skGw%%o%-Why_Z$bqcE?Rc>2ZH|iT^7xFzQ8<|; z5dsnn<|vxZQ%K=+L*^|0orll);~MaZaww6x#rXvE*U*#D?O=zOvuvr(a=F_Pp@bKjpv)aA=p)b}4nJniEW2uy1NC>x!1`la>27ct>GGBtI z*?wmIsQZ3D>zz%EyA{MXF7q{v3n6aHm4JNkO9=7dZI%x=x~x%@)LP=U7wye{j%+4t zq5j)*XIbMobuF0<<4d|BqZl?P09Bwyxa_y2Z}st%Zt_2oW+Ep2=ysBRjLVv&{m5(D zeoHvr`QpQ}gk+lOjjn~0K624WBkv|9Ynl@-gj^)*U!F*7WtEI)lACNbY?c&b{@F|} zBzV!RC_i0myCBg2aH23N8`+|?8m6!~y|nppueAAexa4PDbu(7^?;APGs{ZZ-V@vH~ zq|&?(-Va-q10UqS*M+=5mMlvW5Ryvj*4$A}wHCM-j@>uYL#&!%welGK1C~JMO`nS8 zt=n$dIG8Fg*G8;6UXf$Z%9gtqPdX65&)+*Q(22^L0K%xO704RR*1fp;L`S_r>Y=s+ zB+|tf=|mn1KZbt#kM0gqSBzf9(K;6W7-5?THL4yuxVNfb5<;Qunx00rpQLIZH5wHI zhuM2d+?m=>e-Lh4n`Y!fbC^}vptuxnA3|;FL!^Qb8NRv=Tl<#c*U$?K76P=!sv{+ur@ihBVL=t>xBDa>3Oe`3472d0ui+F z#tYfFboCcOHb2+HWhA}mZs-CQm-y84Z$Kmh#cZO0TGR?zDAF>*jvvhp@qE`&tP?Bo zivg+}*Q8f$fjy*h3IRMRWPOyhy_6sezcY(0tPCEBpR1wrnd0O)6^X3mpQ3csd$aN6 z`y^lyPD6}VAgcJnfL^xZ3nQ&(B0P)zUd~}#Ws~Pkq`g3CwpmOc$I%>cQLQxLt8L_0 zWFsP^AQ_SRDR(vBZG3E`A##GtsHjq-V%($}smI8O_~RX_ynSA&cH0>5w^5Wd$53zD zk2oATL6i-oecW8pFz3oyXF===woXRvW&jZ6%src5D@a?+o(ru&O zvPFk*UAF8pK26aGLl%i0%HPJgxs)Ophq36*7-7Q_h2nnR1&N&@X#!v0F1g9YNy=Tj zH&xL}L4?g?1E)V35!X@3DDmM$mDj`zU((9yFJZ@_$7Z8&(kWQWVJ3Zu&3jiz_rW6= zFD#tHnCcIuazrq5R(Wjh`y0AX8NRaB`XQh( zC_4;K<%4ERl_V|HhMd*vf2d{h8`INa(9LIns#z*Vk2GR#q8^QZ#8T}XF}={Mcl!UR zd<5agJ_awubno&@>d=2=zF+8(y3L7+Q!+Lgo=FPCtWN1ORg3J3>SI$k@JxYc_tex1 zP?rQ1y<=39cA5`k$wBQ4eJ}Dd6~;dKGuXWN$auL)`y0pahNtTQj&ahozP0#Do7EG> z_z}p8%OfnlSz!71_ZIWr&PM2|omM~h4k8p5vRJ(0$bLm=&pfmA09{$moYoI8gn`~| z>F>+OALsd4A7S3<$_%*)55ue0%JY_dXWY_rS%nq;4Xy|JaNs^`PEM<~wr}|29y1u) zd?CXg>>PGncUhtF3nH*ggbfsTRrFZsLc^0nFmAZl57MfmFt)h}F>F;xf4jTe35DPv zXS9M=qs&Ka=!5$$Dgq+sG^|=)9ajGpXbD+_})93iqO@Uu=B!tl~E*Ra9$z4KLMrSOYjCK`6E5b!8TU zemY0a^R1_K6&`5EZ;6Wwy-f905n;zh3@(eKR`oR*XeXxfE=#}5>+34QPAn~4mRC-> zs;tqhK2>C)+xPb|hFX4cf~-BSgWaYY`$9*re5RFts@W|J-tL!wUII3j3iI%5THax*gS?lLx3y-Ijv-Z0VjMwuJkLMlc zj)y)Ew-E;6u~F&EU0$%;L5$zytWw9zSp?+y$OH0x)&YUMg(J{hp8u@pCQ=3>gdz+YgKY1VzyX#mEH3 znFb~N3J5F=qE@#9eb(_BwDOS&6fzA=F%2#V3@$1RE*T6i+lK^K079y0LuzC~>P$l# z0z;Y#Ls|wy+V(>_0HIy9p*^%A{{KirHH`Pu`s~-gA4)0{Iz<~cBNH}j8a5vowpbXp zJQ%jRAJ#Q!G_fD5R%n%s7_3qsIw}*sH5h)f|90~#;(|8fN+#mQG~zBW;-N6&X)xkt zKLP@Xgr2IZ7~$ZEDA3uil8Wpa43rSAPS5W zO-2_@(G-Ov8dl09O6mLiu|5Y}g z`5|^}AR1UCaPTYs@N>L1Qi3gAf}L!FgIR)OP=XU(%y*}Fm7-|v-2~O41RvQ%KeNPu zpv0h}#E_xHu!F=1q@*aiq!`(xIJ2aLproXtq?Dngw1cD!q-0axIP)Ps6@tIAKZ}Zz zONNrm4w5U7QmW`uYGhODkmAH8lXtNbKaeNZAEb03rFPM!_QW4 z%DAA*xRTAdG0V6M%6KTscpA!haZ2ArVt=J8&4iW9gg4Jb49-L<&O{l`L_5sHFwcZS z_5jLdy))0k3(g`a&LSMnB0kJIqRZO9&!UjarZUf_3C^Y~&Ss#`AUn)vM$TcS&taF# z;WW?T4$k2%&fy=<5j@NhM$Q$b&lQ);l{C+l4$hS=&Xpg|RXog9LeBe0pQj?1r)Hj~ z5uB%4oTojUr+b(ud5EGjkL*o(N}A4fP-VtOk54b_?Lxqj4nzPgME1YMqU##H=e@FWaG0BzOTpnL`MfC z;t@$AR?3Qi1_%w)6dm~sK@snxc{^F|phzgH1QZZr{1@PW3zsPR$#W+`5*FEBO!OU} z#JVI6ZY-)ZfM8+s4RH7z&44Ut)GO-!TmvnN3W!b8&*p6ey25VaOG((}<;k1azsn6_ zXu>yLiQz%FM;3Jk#D*Af{RKD>t#W@1f|tC-AeBZ^jS-h2sil(CyJ?IJg=-_73O2eu z*vlwkh1(U0hX+te(`0G_?PRGW2&Dk8SCpBt>Y#jMBs#uHY*8npZyG8{IFgcyuQTLg zp;Asj9sC0)U`G5K;9wCjrD`N4p@11MnTf1vSS-gh$>PckqP4Ka>> z&$NR$_oxvHu@tKnUod4ddZOQnMG}?z+DlXe^}62+z2GRn6W^ru6Y0CE%LpPEH6LZx z%3+7dFW0YcTaAZ&on;XO{_8UdI`cQcVL7h#UAq3({{`Ssb~C5rcycpu5b*wX!6@PT z?V@SH{{!H#S9Z7NdIveVTlYj{y5I1{v%25>C*Z(+dcPeh{ZGK*;ZLGT`NM9i+>n?^p6VVee%adf@~6{QYc-d%PDJKs*$xbASMfe@#b1EK+4I$; zH1o^#Zw>30n+21Km)jM`vzNP#0Or^GodoOGhy8+z*T>_Av)8Bd0cOba^}IFY<$kXM z^7?#t_NK9*W)acEG!ub)unXP!+=mo3 z3m`4ijg@@vORJEDtTEV)+kEcF5}Ji-BGUs}I``)p%|dq^>>+tP4-i4k20{X4ddWc- zfieo&SP6r@)O;5~%Awgf1u}i~S{K2eN3(Go2K$(tFG37ZbMOab`q`2%LQNHNK=XtB zT+J6@HlaC0dolxjOBdn4MsrB+1_y*5FCsiqb4d|p2gTMHiTw_wxQnx87{7><+HZU! z$7{m0!jXW5undTR4kR(3VUgl{`zcV^hNC}Uf(sO6=;UQ1Rng(F5LgizG@besaC)PY zpQZU}i{xT=uqeuL2I+k0fO?;%qWc8`X^N-ALu$V!0UrK{0LVrexndZEIw{GKqU9H5 zYhV(7cQGJs`FKf0lO|FEs00tBgk^k_2_D2mx3VHF)-F;ZHr;??7|3`?WlUJS0D@Sf z7aFLvHywdjT0*r;QCzGS9VTj5tezLB$YxJ^em@}m6x3&=?n^lKyeF|CI^~o4g8)f0 zm!HsEiOD&Zw3cIlRe~PqsHcK}j|q*8wFMi?mMfn^36Ma!oTg9zk&!DQBhM^08XTEL z22|V=jRaMB9sdrlH40SvI5ewmbrY|6WAqU|de+aTCt6?9n0|&2XeJRAL&phV9Y~I> zp|(TA!k1AwFpo6ColP|PZjQaFrRcEsgEWV-RQ%W(Xi)npabe@gFsONXxU1eCArHTCV5_svhzHzL~%5h|Q?eVsu z6Rq4PKz?Nlbl2!+51j`C^Y=AF1gZ;q66$|mGX|@+g^h;^^e3EJ@s;R*qdz#s|3ZH_c}g?c{66LX6a5KR=y*ea>Pt1fG$#H+f8LL0 znjx+I_y_uf*0c2&`lH(hSMvC_X4t42x1qkFKMVUc{`jE3qd!TEKI56hY8w@3iMKq5`iQ}tS zf5a3U4o1Nws+4}1)3R=YhgOKf1_UvvbR^56R)|cK#-7XK@S9)Myv>5K2N9xHiu;g^ zpQ91jk5#*}c78rj^rPO$vwz?guDxJm7cZ_JM$USkXhButs1O~=9JP}WV_)K!QG#5R z_56WR7#M-$C9>+p$ReuC)Rtcbazkkla*5sH`u+gIH7(zYn~CGxWk5s!XC>4^e}aaA zf_bY1gtzYL8Sih6)&EaEf_nqHjQ$05{jI;lp{@9zfv$hm>K7fEe*#_Pv3E-O6B!~t z$bSR6IE<9i-hi&Y#oB-Mcf)@HU2+A%#3|UNi#4V*{{*^fjkbqR9_s%$(B*iv(qYUn zoz_zRpFr34KqSTAYxN&M*MHP1!NeG#(LpSPTjjsiD)YMUNpqutU)SG(u6CEri>)7Y zVgZJ9XLq8ITUkn!9Y#>{oNBB{bZ+q0vniF?Q$4V)aMBM z`h|V#2)x#Q`_|ueteqjIzwTAeXWYGbHcUXC|LX7la6_ZDj*@zI<6U;gQhHf#g9|^e z9^EFidn=w>V|#YOF&2t;8BfB%*N7k@zV&xoOujzbB5SGue%#YBzrzyJHG@K-$CnA; zzxq2cH#C8W4Kc>=nHWef9zFs+vlRr&>y;}JmP&OqNI1|FhZW#Ww_=DH(kPDkS0;@? zB&-ZjoMap-fFd;$=I3%2XnvpP6tf$RV}}s#$420L4k(mxWp6y*1_ZIf4R<0>Pb?xY zKf%i|SZz{Y3RO>O!B;eYTB96F8r~r!Pfvt!-mO_;EN=pwdwPBE!RyGE8qbJz`qBlFy7OH2V1QioBBB7-SY^*{)n_-c+0*@Fh z8h#c$TcIdAhX_*9XK~m@C{#ucPuSnBWG{S)UDt8g*18-!4Be&STli_XvT66jnPB!4}iLWg*)9H7gcGP?pKiOb8OQ^6d z-$G^C!8Qe;*sqP$-9_(A2~l^k6djp9VbMs~s{^GXUS)DMNaO6a8YkjRttWCdpE~+) zGHi~?jS(JCKUGV^1Si-oT%k>bcOs<4YdXX{4aHuC&7ufIojWwKP%wEU@IdgI1sXc{yonY zwy0@7g?A>Li54kfafRA}Y;v3vb-Kew_f&;u@`IRy0a_8@JN;p%FP{W&TheD(+|frQ z0uFY_n1v~m+sRYYV6q01+YPfPJfl*Bw`Z#nLKkV>(Ki#e#Uvt;n&cVU9V!0gVEVyx z4(gVnZu?74tf|RB!1N-L5<85NLRGNz@|WN4OfVsdzM&@1T7)VF9w7X$R6j?zLHD;) zjc;KOqURVa4A3dQ_cu}cxPJ=iMd2TJ9JE($#N|P12;qTTXyeYSN;?}+93mWv4d)ol z^(ycS_ONgy$8(SElln!4CXSGnb?A^gsD;t<2|$+#qbvD>Pwh|DCbW5-C;q|s&}(6- z(Wx=&-D!hhnKZ63Gn`+}FqybFg-Yg7cp1)^@t-+MbkQsf#*^oo@_fq}&d;J}!W+FH z9QAEECkoO)mu9)ht~OW#xSiq(bcazmTsFgi0_UAGl+)g(f)82zANPnXo^Nm)gR~pc8pWP z0Y1OZ{iG*@9(BvFxVt||o`+7gqUD|`qg!PXbL*H4qbDfT8!M6KPKi*e_2@{tUlZrp zSRuJL;&-;ez?Rtz(tPSfD)(zBxJ9(nxImUmTO!|gJIng)UGks-O#^U0_oESoz@wi% zT{58m`J{IhHDBom<&$}FU@dZpglR%44i$|(C~8+1v`e(|u|p@*;OCCTtr7GIC2Wd` z-a;SzZa%lyZ|BuNikP=jV{wLZnMxL!61x4WNAvFCP%v=&4!hjmJaDd-(l(S|e)cEW zU{v8-44_g-F^}z;{la#{Zh&==c0-YbcFVNoh=(R>5T=1b%!P92G_Vkn@|OGMF6yXy ze}gnD3A-Q(5G7Ssy785pgd)W^aKceUJAZqfXhkYwEeF(<%9Fr0FoN!fC+~&7Er14j z+MRMLCn+Ny^jW_lUh~&TI+3DwYX0^{s*Z(z7QpZHq{=6%N}u+1R2vm#AnKm-_(4=g z-o3Cs@HFHhRoVWt9n5DcLB61jl}FqxUQzdB(=etmVyfyT0d ziBtCFSN~Oi`!#wY#19sRu@{=uQ{g5LhCUNok1}+FGIa2oy&nLBCIYKd39q9Z_PY_D zC=n}l6+1`eihJyk4A7Omp5QE1XxG^`NSV+ws9D|Dn!}jf=#XJ!?^4+RT zRVvk?2`6D=S)yGsLpaY>6!s$MA9(Ms_{P~4kJv?zC}alI^fDrmlOUe{I7Z^+H$p-L zF$kYy{=lOe87RF!5N2JB!Lg5(k#%Qyh{c50)Sg6po2Ju!b_E(nB0AwyWk+_C1G5IX zKkY!F*aUymGP#|1*;*g%Wi#}hAF!`DJn+W7u-^4%IRRc=ZUxOP`_VAPL`j=oYG z@Me_Vjde{*G{>>gVnJ(HjvHl({G1dw*A=8AOO8B*8o`S)3XK2q5Qz><&Tjhj<99rs zO>*HuR3#-emPi8Sd9si?v`|iBZ4=M$*bfJ>yp2IA8fq!+n%Qnnzx={rTy&;js0nyJWd zu9unXuqM84QDnpcO3iudW)RQIH(_|V0Gq(rKMxsDpiH@6>7y(tS7xaii|O!AuJ0Fu zMv;8phTjk18F%NIA=Yhdl*_c`x?NIRaP@(iO|0mF;s+FhioTrMc$rYS~ z*_{41PZx5|25x4OPp2{lBTJeqB30&FF6CSOH=xV8xxjU)!2Pkn6IAHUSLmx%=>K+} zlUx|wTo?*jDhz)tM2ZY(X-aXzFA8~7{Jeou)R}k4o2_qdhB8c}jg*ntlzknPV~Pw9 z*?`raE-rs8t^}1-^Oe*hzoP}dBki(Dpr9)E%eOcJM-Q$q$PX7jn?Z@r%+&X>!>|z~Vc${QbwwL+9c#Q0c5%1^QDZkgy7i zzp8wxq8M13TAZ`RS8i|!%nmL+;L9x>u8b6^db{OLn^H}GRNTWtHgS&ewGt)Ivx>L4 zDhi@mN_?bW^)_ z%ZGGrE*IU8bn$ApqlxBY2vqm9v{Qz5^Q89h9apYB0RWaAj4LhhmtDgA)fEig+^s!w zt-T7aEd?7r!aDWQ^5p`ey&4w1m;&93t$jKxeQMJ^(x|-?{7q9wRq89voR&R$t^JlO z{is`Qk(KQPt{q~bm~5@tWQ<7W$NjD=1MWm%8^!^K)&`@|P9eTxvE+6$NNJzP%3%2O z;3v#M;p5imqi*9-Y~r8gCGu@ftpkzILzzUwaAZSvMA>mGU1+F{YAtA_p?#2&0a?c3 zYJm|qOZXJvfT6-re#kIi$e?%1fIlfxmBL7`&L}S<3LG>_edtJ@K);brWU)>iduydM zBT|>ZXn*S1l0erWBXHPtNT9DZbZE-8z0|V!Ghx>p@pI>0GYL|gh_{`!5^Sa%_gcq#h? z%%iMh`Tlpx`#H|V;y#i3stMj?TTO?PuM__Ww+G@ z-0zr?Y12I4=Q>tb#s%@Z6<0!ar~F)}5L3n_1r~T;R#sM5l_8j`b>CNJ!&c_s&zFrY zucfUW>SAx`uKZSs>az z|9%7YbQNxG4k3IwO>sSQbpuyu2_t+7vwZR;Y?YvF1Im4qJbbfvWA zZL2bEv%YPUDs7pBWHIApi$`y8+ zON!*rPqghVwDq0$>tgLIG8;&r*8XTK{-Igh5&1B?D7aH~jH;Ku%ZIir#I#8)wEnGp z_ec8-56d$3+Ah)Q&a2|)D$Abx>y$9?yW{J;lh7_w`$`|=axWNtN_qp(wX*Fiw2dva z6V$#RPck91vG3!)?3cdvS#LMc{UG~wTxN1Sa{M6rbyqX|cbw2+`G=9b*ZoK0gA3yM z61};y*TZJ?;i~onHoc=aDy6jjsP}bfdGe^>^+?0&s5|_q@AWv$^*CSXs1W_w=yfv- z{bXr;VDQ7q@aqEo+A%}g(X!sD_R5L8`^1;igVFGl9;?%H_tTm5({Qve8KB12GoZRD#>%%!M`05N&a``L$5EWx@k?D#rqm4u9XoTsU zS^tt>_BL z{1P9su8MJM8gX-VdiJ&BT&H7CFXQ%yQo|ShGq2SfUg0}ujCu!;m5tL&^f!%9_+EGU zZu{l#6ZoD*={~fg*5CS1AjWkt^L3d1gY?rqnf~py?oIsq14hWh(b{ED#)Zk*L%wiT zrqV5i@J*`6W4T9J(b+9@#&t}`b%n>%73c}K;=wlJsU73VBIEpf$5V;^Qy%1LP`IqQ ze#+yelJ7-E|0M(x@v_!IwFG{sReCIgyzE+&ZGvy2m0tJO zU)7^u9Wm~l&R(ze$9T3bPkQj7eA&+e|-{onmy8}H0%qf(!R|b^@%oL8~tngnJXGG9*5_+ zI-ReSvqbdyJX5X43m$`7XW`tiH|QNRb!a>_O%YE9m#%rj>Bp8|PKRk!b=GSAS&|Xd zx=Yt)zl)Uk)lrg8b{d4<>oY-;A58SpTnZi~3zzP!H@f^VX!KTCj_L|2Ta49tnNF5$ zIr+y{lbB~a`s=Ls)>fY!&kLhk)Ys>MT*TmlOI1#~G9ffogw7k~SV zb7k203^%WeEL)2r5Za%M0$`8MICoR!%>{3xlf!H7f59?}0mX4_C9ve}O}j4*ESTC* z<|!hJ?$D2wMX@bvFQka<;L!gUac|WYXV+%!5+ry*0)&v@5G+XWLV~+naCfJ$3J$^D z-QC?Cg1b8e*93R3Rmt=AEB$pJUw0pCt^NV`M%Bg~*BtX2=jh!T3qhxPVdgr?_k(9x zMwC28$xfW2Y4fau$mD%x8>|7XLOVvvjGZKX%jJ0%<(FCz6>gDl6enKA#i=CQvcrY+ z>+aG>VWxtaiO(z*npiSC50@9?N86XSy)YWJiqsHp4y70ozW0uo@?sGwMXZpXT*Y#x zlTs#G_^?z41vx<+N2LUK1SrzWk0oY6ri%RPi>g7~6{%tl+IclenWf{nn(Nioyqv`c z>{-q)vzJoBSlq8wjZ+-srSfg{Q;wfT;->Lxj;=Kwre{x< z1wwiqzi16VDQUUT^maYgqW;b}hzC5c?=;vu zrQSlt3 z{$+1mt?Gq8q%5_(5lPvt|5g8frU5IQ>!2y2{lTVlW8`7KZG0|fvJ*>1bNIXS8;;dt z+yu9C^7k3P^z%CM-R#E%Yw>H!nMGUerzCa08l9`x;~4fA_;XuL8zlT_9}nwlwHY;; zcfZ*%k2to?(A#r&+H}7lD1t^3+njb&WDjrAv@p`G_evVt8D~DG)oA5tu&0=BRN#6( zR%UiQf$RsN^$RuC1t2-I9RFqYooN$@$DuUs?HyDCEJMEGw_r3x%kn!-G(y^svho zny9lseO1}c-mV6=(SYSczddBXd}0@2nJWv;cGZ9P1F4>M?j+PEcWZWYpqq1-(c-Ja zCjQl}Xv$fXp%?k~1Uz1o00mPdpHkY3k6@1Ht5^QPx!c_Uymkp=1^ooc zVseyJs2ap1l+44T5JHLONe4uD^A+Mpm_EEVahBAXtqyD4{!r?GH>~}VS+%4V`zBBf zNCr?y91fjjpd6GkeI87j%%xZjgi!au37p5-xf!#z-Q}5iD5koj>;Xp?^X<|V(=;Xa zIDa(dg4-3-IfD1NkIb@NRuwbSTPPkocZSfqs#6}w_YYnsaw3%lK38&QHDo}OMWiB)kr_3TRP<`=U`4dT99I7-0>4wXEWFt<+*V$j%YfCB}4%Rh zeq$B>(JWRLxJ*l7x!r(rIx0&N{SbdsU6KUh7gjfB8ezAc4IRQ|j4y_im;u-nnFQsZ zze&FO@tpJi;8xUutZQXg;Gzjluk_`9C+546_-05VKX|7}E68oWwdZr;^`n_)bGdSo z=Eh5dyee(??0J%fIC_t94Ja8dA!`s_()j6N?YjLWM*{soO4LQS@h5H6QnI*mHP~vUEpIn5U%H(prifPRqfS$T+8h&o)5?yT2i`~# z_C=z&nlZEI>ru10?c_yGap=Bylx@5{$$_Q`-zwg?%gjakuLTp{j#m@kWVSMqKZ^(w zHHSdi2{?E^_NjQ@O-aOF=DMqzNp4|IC&h2%bF>M_dM5j3y>Ubo*D^;aP*bj9Ud zxe1SW*7hR&1fbVye$CU@hT2{gLf~ehxmGj#oqi)GBB--jKY9UauFhKz#R#9WC1zNA zud--VzyMWsDZBL=SoXu}%KiPy7S>H;g^IO32ee#FR=F|IiNGp_~=%@nnVPISnj)jg(V^Y4ntq!jefNyZaNBlpwY-+RvtC@L@ z!q%cUNitRj#M^-}8QiwiDje&otLH6k_`BwZPO%L~6G;tCyOYnr2K#jJB#5RZKQ1T! z*W+K9vQ7IrORF*khSNo3EG0TxrfTz?V~wj}xn zbk3!_((C;XPv5N}Tk{9jOf73^2F~*>eYdMOflmad@xBjtUZ0)#aS3|?qHb9@l``^F2wMu|u*- zNH@=UHw9J*=cq-?iyk6~Mkl=)AL7buq|i=^Bt8`10+^X9)Da@o6+ZFvTYX?h!zL*mZm>@aeGd{Wh!VH2<<> z2cu9EgoeAFH5Z@JO>i?%kfXJT&9I%5wuuYAiQBN7$*ta?rfI#Vou~FaC#}#dP)Gs+p}qwnnUx|j@*Y{YW?rdcZNW~0+Aevt_p+M8 z;?%uh1K?J-po(FyoKm-XPqPMmzNTfe@}V&DqVNu^NClRNzF~?EcV8ish*7PusuZ1? zCJ&EgUzvf3c~7!ot4PIwh?T=@i$f7DH<9WPk^OFA&`OI9cYo%Fh~s6VgJplE1)Cak zH{W)XTB?W>dGgm0(O5lE4?|?nN6|PWv2TaP?z978Bx1fZ8PLkDP7ARBa&B7|UOORP zh<50JtGrk!JaX8|K()76#E4iVn)t+f@o4s0sqPT&!C7YU1h>Ih?#{XrZuL;{ z*jQ1pk4R{XsA!LP(#&90&Omx>Uk1FyypMRUTYR{t#Iu}4LGFjb&>?bB+IHe zd?EYRA?+%O3PYqyG)cd=lGQB3l|qsgO2Z|FlEn_g(EaU#*x|ff$=q7W%pS?qnc>83 z$%M<{7rIA2GsQ`x&KVK=|*b(nssc*F-o;^|?Gb3)>Qcjm6 zV0dXq>`{AiX%Ne(wUD%x(x`=@w7J8mv9GjI?5IJmw0^C$ZqKOp%&6w}sQRTeLnp1c;6 zzfzPxGnhQFmp}HI{1qdAkTbbkBfs4}xiKxjwl(?VLVg8qY7t9efoy7ySz%UiYFbfY z(qL-bUSZ5^m8k5)=z z95ZZTz1g&)B7QSsQ8Ug`N-i}@0=+Y$lQU`f{StKpvIw&xuV%Aor@0A~l+dR?)B@bx zzQ{!>rK-+q$RoL0lB#`{P)GlA)AWV!N0R5l7v+O598a@K=yUQPB(#JDxwFDG+5q1B z5=KW@j08yYm~;HL(*`cexf&=*Ny@BMa~6IvmcoVgKpgiZR1l7m1&N9kU7#eiwJT4> z$|%I~>6HncGA5SF3h_LAk&3G$)<>KncY=8E=iskL3SRCQ-=5H9fU^*9qZsl5cwd}s zKUNe^R^`CYsX=86@=Xh&z28NDssdRhk~3AKj$TI_O-o-XH}k1E)zxW#K$7u-?K58# zOM(p@ofqE`&(2&_)+$T+f=acnCYJ=8s)cIJs+LE(WE-XYc~EUdL0z;8fyz`JBBq+; zsKUR3UHk(_W&&2cQ(e>+o_1Z`-6*_bN44lEsp<$*hRTP7Z&{?uhgo;|{8U{cF2J8| zX?+WxM@+r-DTaF8C*w&m>~p4{YH`30cD1y3$!ATVI0CuqN;zFe7#)f~l7@EPGKa3( zaN8^LN;qh17o1^QKf(`#v{y2n-mFww>}qhdovMlM>Mp5^o$jw7Ik_mlRB%gA(X4#1 zq9ETn-4!)5iB2mmQ`RLsY9y-6m5sPgYT#C6x!o|OUm(fK~ ze(};;7u!h}DI8kRrHfLoi`xF56m;?EVaw>@XzRVP*2DGCdmFBY_htk7Eke5<-X~oG z!gUw`EV&#kZKcnX-&((%c41i2IKqz!Eo`5+(0x*Po^?3hL?4byI4)fuR#hJsDj(+k zP5OOUMkE8q30S5+SY{q*L6<))tF{4~wE??_0WkdE6?7T!92@XH8}I=Pe;0Jo8w&Cm z3dtA>Ya5DKZ*zzki1ooT&A~Fh@nJaLWTx_A8PI2|)Tg=jp`!48!r7@E_+f0cx(lsW zt6RLCSbGZl-4({xxye*$ME|_W0Dwjbwwb|(I?qsswXq(du|B=A0gthvjIoinu`$n< z7RA=5z8(5~eGP3NwR#^_z|K>z3hX-xB^mwsuqe1Y?T)vbRJ%T^={sumJL>VqnjXeZ zt47A@CNAYBuI(m&3c5@@j!l5!CSKq@2ID;@ITN;lT`KS<>FSD}U%me+a6eBO;=;OT zFJt0EVfyV^-|OEBx*`E)QG{mE^ky+UX0bA6aoT3_)@BJFW{KfuN$F} zp~tY^Z2La>oA$dtB)i6rd29FX`&v6+?C4Ddk<3GsO}y!S(meK3%gsyL%}dA3%T~?H zkIgHd%_{*GRnP;cy!u}){yPQ!=IzH8 zeS}t1^j6b6Rx>hIv)Wd3)>iW#Rtw=)i|JNNRx4vxKUS?EtH)N;fFtkg1J!h& zOl8yhV~qwL^F|NtsvR`=Bot_?TI-la+e>TkwbkL6_0g*J@v-&Ev-K&!=8Vwhoc?60 z-dbbsSH{bq_H4)7alvpch0SSOyRrF9=>1 zgrEa@X#+y^1R+I$kTXCi6(H0O5ZX8heeD!R;M13!_20D}V*Q7k0uAgtpcV&fp{=^z&2AfDkM zQQ;s->F_qdvX0O^+xnv5NSVUqQ&3|8fTWq6&3p#2kjU&+gnx|&WD*zZgz&ti34%w%0EATE^{u2n>? z)eNpRysovfu5~&ODSP*>{WsgD7KJABB(I%|BbaKEE{t(+zsWl1gQ3I?x9>!5T?}sB zyl&7LvscHh&&I9a(`_KaZ7{=asKRZy!);{TZFJ3TOy|D2+@j3Jew!`A`ZL&YPb;L* z#dh|=Yww{Q;zIx)m25}z>cpAreiXnPxDtMR{eVUhL%m_c&VfI6m<>sdzrXfJ{U@)Kff7={#+JoewQ^DN(WA zE3kx8BMvY;@1d4#hv(zC=hK?!^NA+}5lQ?%E9fHa3w_6KcesB>HV_HV5k;%^8fPfB z(74L-;Mb0%F_V#XiJ3jYSSoPm>~Qdc=yyRE%GH5lflLaU$&fE{jge#=jkH1;%{k3n zsd|Im(a)=o3zho)(R3E|#061b=Ix@BU(-kyFecHI1+f%NrVHI1k1k)(k2NnRGR6FY zpwde=C@eBrAKo%<4@6TcmRsI2?T#d~+aDj^G4D@gi^kAf-Lw3h$;K%W;vW8__uXs$ z?rHD_Jh60cj7p)YTK=o26sLsV`Vn}(J(R#~bNr{EOBO=~|FKSUt@_2Uz9RzDqs3qF z7!;Pbpwqt!x}HBj-k$GGSJ*&!o*(aTE>2D$yk0L*fW+Sr-XX0y?wK_ZeD!d@NweOJ z+FQ2i)WV(phG*VL5t>grgg`B!JDvISf$nY*~;~B&$Ym1vICMKI_>VSSHEc9Lf7zPN#and z>t}OGY+Kjuuf{E8RQjyd<_G%Lvmq{qwyPyqhW5Lkk_;^}!Cc1`886}gv4l+y^AY9) z+|GX~j*tV5un7kLiLiyx$P{b-ZN+&gRs63N=kk@jnu347;{3NbLYhR~Kd(6JcK?ne zSPF_6|82#&+H`Fw?N1zmaqCYUVXedc3YPFsgiX2(6FaoA>i3Fsg&}QI_0Di2GsD~; zlOJq`o{QF8doW#Cl6^1AS@%1RaAQf=Qh)R(!uBt5gw5KBk2+(m*5|tuIg%MNZOvD| z7HTZk{%OVeew45pIW?Uue=hsA%R(<;@+K;A9Wudj~=|7!km`O~0wvQ#hYW+htw zAfFoz2Pk z+SoVz*pZz!rRc{RqcN~1;npI!sK;=OHGQ0RB!aQV_I5zlO+fdMa>M=ng7W*Hr47gT zzy8k^=Z8NLw$mP5o5!;O%Ku7)jmLgv-HpfRpn@Ue9syknljLO`T*m`kc__r~p4o8h z`B6L(9)g$uB2(|iH7KE&rK#I94em~hXsJ`dmed0B`t zWr4ZaC`H6by;v-$G(z66lbvuX`vh&Za>OR41BeKli2f|6_$=7tNc?tGQLoU*O+_C2 zvAqWf%r0e#+2ND`fU`8a)cR58QW=2NZn~y6~t_ zH7E(0F=k?&y~T83`xt+2(VvF>Gdaa#A0Z)hW?x?IADtaX?!kwu7G3<_6bvdjp6J+A*bKL zi9Qy@!{Jv%GQ?Hyk+7HLn!(6`QOQ$O0`TT@(#NY0r~DmgJ0@S)Nva zr*{QAZ(CN(A+fK|TVkQGuM~ovkFNd}a^mBGIhO%$#KuTrq?`(h zqKUeZ_+ivnLf=Q2-8Q;$HaiUvQB+Kwef5U-D^c-{mEP({Bg8oyCvS51r+%s5??esO ziNzY@TM@HG3YK2bUqK5ykVE0NfIq|F>Ks`Y_YxGLImnGw4pw5{+&|p|^&Dg(@&*Kg zhPd{{ULbPkB`(MUldtW?&{|I+-I5r2s^WndJBI>7OgjV4VZa4Yw;EUEerI@j>yUFO zaKkN2Fq)b$iG>v`4xLwRafeXv` zLoOGwGXNQ5=b4`c=rqQLV_!1|j(jK9-VduK6&9Xsr*dmB^}F50GPca==BJ+5{ltFQ zoNtpswP)&wz5u?{cVmp4z!!-43NTS+!zaM3fZAWXr59t|D#4?;q@6EpQtJa@f<9lZ zrZBV%eR@4GUAz9V7xYpPmj0Er*d#orUZ&z$PlvLlOqyChEduB3UzLN@E#!CrF7gCH zIyO-;em}hZKt)P?S!PJr0P6`psc>5BKD8x(01XDI<5979uEtO!}QmmNd15=grO|<^O?-|^e0~ErM(9W8{Mo`Tv87`82K|*FP8Gx`e#CqgvgcFP3q?JYHr$xZh|rV>ja4b z!^ag6q{}wY6Q>+M4+wADbCvv9CXqkiDR9t6n5bs@sy z2T&b}4IiGJ#W#)^3L@}w_mc-Hpi z>h=DjeAwn>HkEXdgzR}(Ytm&NcpFPdoJKw|+93}(S|r1|Ipt4ZoBNcTC#6ND(0qHbLz!UPubZEDQQyHmmFV)nRG*;12~IW^LibWb zO3B3nnEL;EFc$Kk24e@L%k2_|S9U$#kg|o9I}}L%_^E!=yf9i0ZW#V?>~hnxj$Yx^ zFRA?#Oh$werNTZxyn5Yy({>tG;kGBacE5Dfemh#>aWlO3e1Fpc1E};ulv;;&pxsNaJ}s&22=%#N^QPQx$U8nsR~vZ*?iM- z+xscJD%3=3>)rBgA8^bzIIT0nwPy>(@gXrXKx&)x{oR0=Om%d^$TkJn-Jo1}b!>su z4vog$km^`+{ zCjDk)kLTfT+zU{fi734<@cw=xP^LB;cXVHb>wYp4;J|zTV#8BusVQ;HL6OkSOvd$o zIulS=C@TF^A?1FiP^PX}W%Q?V%l&L+cwMQ9^e^@0`?g(W z%=Sl#b^ld~Ux76i=zd2S5$v|!{O3vxo$n>XQFUJTh%l0|B?0ZBO8ocvUdgBdeAGOP zW>&60^6kfnINd_zy1X}5$X{{eir@2n$Q1r3+4S(L7G%35I`3gGfKv>UwFoQK3H)UI@xj!!(G5?4UtiIU$Gv8}bnh7v+X9?o& z6IFO__EbT*^5d7K?fc`aastD48P8Z)bq=y99k~Q*dP&$!h6BR2imt%IzTCV1!W4tXB@8g=US){Yz)#1xe4YGDzw{&S!=4}5wdlO zLu|wFbOH+kvO}4=c5F8AUkq}wwfmE#{}ngp1ds5#p}=Dhd^^tWW_LHggUXSmT`-nR z>qP|iPUBl+j((O6WFF(hZx~zww2S2IM`#HntV;Oa0It~Yj78XMt8~k#?>EW(u;sgK2e^*>6=k|1PRrc` zLUNN8Q5nhe06fM}G+(^^$3VJ=ucgB3;~y5fQ5mQhn(VM3l|BdT&=uz9pwHCoi;)-s zB-yispr_Y3pK`irD^bR!d zR&z1b&>iI(R*(7pgqJ2F{r)$i!b5cy{^>G-G)#gPU7=Bb_@JZ0Lu^VP-51?tsnVB&tUeq)tnfI2^28v|{kNhV>m+`B>s#;C zu;%WSe)dS8^UBG58n0z0S4{3e-IT0LhXzI@~eqAE{!6WgkUUo z;}O<{OlhZ1=H%1cS4!AtWcrV$y?n-Kg1##`KeL^=Lm_mNAQD zAcq`sE`H_+O2Ckq2#&sysyf#Q^%gv%v_ez>b?Wv96Y*hn7dn}&&;`}=#(ssLQ}Lb9 za9}{wYqi%Gi389GW5VEwG3Q0nL})%sf%vGo`bF}>NIqM`;HWiCF3l=h0kHoraXa!N zbziE0bAE6P+i}c%(0-l>e$hgOEB}OmgLzI~Ces`IPBwZ+gJ2c@B zjWA+{6$<@UV)e`Hx1)t3>_d~`E|)pPbOZ+qQ|Xb(m$?Lbff6c1Q}NB0d7r|Hq)a5H zlb0^@e=G6Vq3QJd%K|?1Vz~f`ne2C0g<{gh3Q#4^cmw~Q(IH2vKw`F7{i;}Xv{<=e zXtvztszevPM71BP#K~8srqU(q^Fwp>%~xfhuoBHZ36#E4GKT15Le*G^>97nr)-aJ3?A0a9<`XOxJ37;Afxo`C% z{vD#?-;|j1rfK3oQDO#1$YjK~%dOUZ=?a(smJ&a%eOrOc!wmlRB+ri+yCm22je_DAQnsY_`rp3%`0|@C|A_4#=YyII*;7p> zE|#fpt-(ZK_!!!={5Nk?K18Im?|l1_`w|g(lkj(4!b|*SP?X-s#Bwpkm!x5`DJ~xa z*pN`3ZNn8?I2jLa1CZdOgMtgYNU}WY$~9hh2lsH2q`e75%CG#TpV9LQ?x2ou4)Yc0 zxf_7_{jxAj`Y1Zk3+-Lfm5w1@lcCti6kmW?Cep!@5fcBf?#!!a1`09W6TTlfN~HU?`I7KG*F-9v7)#UNtUIR* zg|PRyB&y$5MxxGMg{MdhAT<%(DafP(?OhzuxGy6J@R;DLphDimC{@jT75q1nh|U-$ z5CDiAflYWpZ>mV#;{79>V6tj3%UBkYd@5majGF4X_V#HDVf>csTQY$b@=e7`buIdI z_Gsznx|@^r?hpu31q#rMYv5ZsiBBG^S z?1ZrTpX|}IJ4wtV2xVGxbU5~d%ntGrjf)Q?O!5`wvk|JOBMBLi;Dz3Lb22L0enxY^ z=K}RWkj*(zf-jbNg);BQ895Uf_jyw?W2CSK6VEpC|00R>-9*rE;!6(m3q9u)#K0D* zgkyapH~2|U^?ExVkj*5}%Px*0WM%}SGW*4wUMS&*cN@6nYd72`Uc$EqKlq^r29fr( zbMt_4A}QOSQ?6w8LQcHuGvg33Pi+`(0CZhv?^P4_p@qsF3`i=7>eE?81JpB`YSAJ#+OhZs1YS+ie zp@l})3F=U}ehU06t&1^M(Pw6vi9hry9%OTcMr7MTo!OsnVh!@*p0_?RkRgsYkuU$D zPrYpllUsf1IIksC_eE*B<5iqsQbQn?HIzODM`&bZ?k^IzlJti@rISano%dg`?jTr- zzpcAg<$i|$Z|GB+Z!}2%MxWY7=4OJ@rzpsY|6!#4{|Q3TgsFoefgyvQC#ipUEC!k1 zeUPwdG;d{5pjQRN_K-$7j9%Xsc)Mg!N(h0V5fv z*02vlRh+AdqHbdfoQ^g24YS^Oh=6=N7FGEBo;;M8Z1`ZgV#VT@W%{EujusQ;+7L5z zt~ravGLx$OyyiEi%PBS*eXJZe23ongqr1W_2^ABSIy$fbZlHx;xewewp_Kk-e~vs% z6HMs~|9?F;p+9q=^8bLkFlIM~|J#elZ*Nj5a5R;YAbr7I^KUO6KUeNBH8?pbFI!Rj zYYHcSdy_i{(_^S2g2QxwJ2u^6jeJj*q)Pw1c(CQnVQIF-Ljg=>%atb69`1i$JSexo zX(0_2YaL#=Kfk)L{O*;p2_x5BD2M@(;^9*{Q%iv7k<&0xz#Sm`yw(H53t&t}AzxH7J6mYekpD`X7X3H}*Qw1s-BOk=+c_VKoXb0ozB%9bZQ>WQ=9po3f9a9&iJ723! zWPGc4G0h6}h7|6{)Nlsxl${ADQ-3-|5>M1UnqcgNvBXcsgWpz{`Uq_l;&Z zG9KEUh;qr$LCi@41Djq8m(1~!tn`!@jVSfBN*_(mi>OaT|Y=c7#t!RfATsiHL zk$eR802D15`;_fdS?4CB(d?=Tn7LqvX%th=2N~u=i5i&%SIS1Y#v{i_)}DqGI7Q)! zVbJlmAZ6@p8Q(7uVBkV0q}dmcWEwXDZ{G2}z!3Afm5mTx1ro$bT35p$V8ftoPT3uE z>_-MZb2Pj`jSQ-q%WrRv>31N6nfuYB-FetIN6$6a`Q;M~j7cZlJGOQ>2Dg(AL=1Iy zW+a$S?0)7G?#qn3wy$f>Iy&yxUX(me8v(8zPPf56p3b{_g5}QpX>ijF59u8`E_=q= ztCkcf4IZA)d5z_j#|=4YN1#3O%@49pfgjV)UpsZ~u%0+ix)NV0kn?Zc`MNGVX!H&B z1J9DieFz*<@25(|8qvaW3!3PDV`N| z{ai6anH!%Eav+O|K9h;yx4=&V4vinE$+}@d6e8l3aZaAj9uC}RVhWxN%x4a=cqrr+ zRv$O86ih$mruKkWmy18f%0P59<9oPMNGeBvn;%OlgGwVGFlLv{dqo`>E1cJaQsjDyS<8YEXmv19 zl{*yPWW*5A8q<}iwY*a5ou&!MN6ZJL%qop^{#K>AYVb6bEr32MH>Em4D}|pQ9R(F-sGMk z=gZ&Tyf!n}e(fhs%WUpU_N4r{n7yzOh*tndCW54fSPj+;GpZ&39u6Xe(XJf*49 z&j%_q(?bub{&^$fe>PS8zy1{~Ob!e@obTVpltYTLzfEKR!I<*zo>27<-^gYTJ57bt zrTl$N`G;`}z1{uO6Kc-iZg(OP;`SVvQ1;wkiy8mMo@29MFP~ca`|a-Mk73*rymHr=L|9KN8X#lG5E> zFHe>kGV}<($hjX0?6At1a>BBvwk#~`~u=I!+5j&FIWPsc?6{R zcB{M$v7~uG;`{-fH0Bp%F?3X84WZDkBoGo?CrB^=<3tZ`5REzZd^}ds%{G+IB~T89 zD=L2a1G@*jZK!{}L%&IICyd}UV}5$k&*IY8amGqKZjkR|1)DuX0CIk}$s9rj8AcyK zG`({V4!1ydR+dcUMo(2hHaju9ikmG0e!oa@7LapaPo_3GJs^yT=TW>H&w=x-gm58t z6OMtJkRLIj?Dt{@?dsbUnbXkY$k%N06Io)bE-bAXN@5e&eVSC+{3_bNi;Okk2}fyF!#Mon^LwtjF3vraEzt+ zDXPfJmvk&)W-`t+q5M<6VcxE=v>(_l_T@fBZ^mx*;Bx8M4v>iAf0wCg_^B(|%WR?MQRXDqg~h8Q;?|P ztv6D+R|m~gHzKEQ!gsy`9{c?)Ljd$UT{Akk}6jUphqhJUn;qI zx;;EKD<;NWtebfuMAD=e;z!32RvG%iYL1EnV22=7k3E0`;(&Y8*Q2uooJd7_k*}*Q z7`*~oFNmkWq7U+s9OzjbHuernnEaV6Se##z4?hZBzsCR09YNV#&!7f4auLYk39Ma(*6+lywcNf4|`nX+miVbVZO0j*xGinn|POIUUuBd zSiFgZ_>H`Z&TwP7bgc%~@l;O>^9vhrNeRoj!gT8}@v&R7tL9#FL)(H?B;SrJ%f0%% z>`fYjN0#KOadtxU`HNHV9-cOA1bgR8WgC~#;n5q4`R~JiCq7rj8gs2EMPgS$$~M&- z8_(Ss`dxhif``aliN%r&RA)i+CHlO=;{YOe+t6tQ16+gdH()ux+k9UGf&jGsjfW}% z??X!B6DPdRQy6~X71DNj_V)=d^D{`oZ`8BDBqTV({{xCbFykVqU zm^s+Bb?g0JBV^R_4O7$&iy_6w#W9&{tk`A2o#e|T3eR-pcm(OdbvtMN0BxJ>#ka=T z5lOvaJoaJKnj15xV9u2ZAHFe|(cje~9+(5ziG$Sg%*?&8$O4)Ri9f&O+!GaFD-sB@ zS*W#a57OUJeX={!CqUXR(>kxy@N}-%`Gx&n6cdSDChZWysIge^?9F!wBTXjVGM)U4 zSRfsLWc9IVIg_n!P~a&-K0;%~QV6D0>j>GgU){E$zK+k7z$WQXow#|ouI=hGmL6Qo zTHm#DO#&Ur6nABDHTs&jFzwWW-ECvxnWf=Fd9EC4=L(MK*P>zfpKRqRy%@K^_kz#X z*gVBkZIC7h#aL|kgvjO{SiO9X{K_PQk=ETGsy>oluVPY9TK70{k;a+BPZBLxvPCG_ zr_|^$ML`F%5YT-(JShy9gTYTo;r%Q`$g$+>;0MD6&N2Ac*A+2OKbbaAmJ+hSG2gi* zt-m_27aXJ$+Sc15|-OIkZ1qbrIeN1qy zux~rp{~Gj`i6L?{<%uX=wncJo6+DSycnmUWS~=6cXscMd^1`_1^*_E@gnQcaZ9uS* z<9Q#)Y$TEY*iS4&@VMbRyagHPJJZT`>-`ax6z$r+mD_P*SMhEzyZyPX0Q6+U!ezLc zaRc$J{O0F5p!=qu9&$Hr<8kRKdFq_@`Z40Or_JI9(d$m3iz?gSbt<h$rg=OZQNy@}_iM$elY3`Qh3STQfSMmLNB2ef^kQC#=eRK7)`zBr9|tpy@% z-oC2*RGpckiV8GxVpImqnw@ffqs-<5vx>ObDXCl$=B2n?8Xc3~YkTp=8qoM>g z@XVs#Q)>_eMNOGT;BZ8fszsAHM@tFHBHu;P21No6qUZ;sB>2tX@ZPnn#Ed`$V}Lcv2q-7O3t_nYH?6VoSAK$`eK~6 zbL8i{IDL+I4ZQfb;CR!dc=M)s%f)!>yLb>@f*nVKgIWUEIl(C@!KEp|Z85>)F2M^g z(VHU?L0n70F)=VHF}Nu)bTKjfE-?}>DVifGRxK&sIVmwIDY+>rbulUZE-4c)Ih!Ln zS1mc;Ie7tuaku`O1rwga z*_3{`n0|bheu|fI&XIAcmT~QzahsHJ-<0vVnDP9ZJ(tlPomO&yaR#64);-`FMJbSdj=-T#z57x}IznzJZYy(r$LC^5Mxxw$BHsVM!vDDz!$ zHYb!KSDf!sT$o&3++19`^iS+L^^$s*lE&nc=H`;trIPmhlFoOfU7V#o>ZN@yr31;O zL(Qd7oZQ%b=|9+WE@cbJWlPOvD@$do_hswv%AuUNZT0e9m-7AO@}JG+hfC$he`n9B zS6sVP+$LAtH&;9^LE9%PVBS~4a#g}>RK9erL`tbdX{kh8uKYWD4qKz@jce80l&W_v zRrt$Qgb!83@2g3wB~QVX&IcqN7s;M>27GJ-`nybcYc1gY{3Euj;@hwY#6{?LX=t zt>?Q*HqbLRE_BvFD(O@rnHX^!-G^~}S{ki1n&O8GY<|I?bOKU80xok{MI*U}I|018 za0=@@r~!h^7Z^wDh>a}WzX3qH|h{048oA&LA-e??2%kRur8;7ymffa zPNddO07?#Gzy#bd6C=(#yh|tQcXG^KK16v3Bn1#G=oEmz#d8!Xe7xN88S{hDFS3*g zxav)01!9DNNVG!`>q#XaGaq8;1e`H3;L;T#qBDfSI02zBbfTVSA~IqEPKc$Tee0he0fU{wuq$xK-2gB# z>*EE07r*B;^W%mt#(XEe+9Lv|CF=`Kl%uWgluuoPnmwE=aC;9@l%-vu&^8@RIE6~+ z@+T_c3?My|cS8;Us-!)HK~&Hc8q92!o`7cnAw5De5f#>j5ScLJYmj$zg`_b%TVZ;4 zhzu}382d0X=`H;a-2l)>zzOsLmIV$3LJ6ouE{H@E(}rdSzoJsgIS zuI{8oV8DF2FE?^12iVWyP3HoR(y|_QqATdWJn6*fuVfTl8TfL6@l1=rc8I+D0O-er z+tUE3(*mSVF-~+baMRFsKuF;*7*8-r!Vt|N85o&7%R%`|z-ecPz{mii0zewnet$UL zayZ_IKOynCx^o4d4+N*QKKU9A;_*NJ@tY5)vZk*7Zy(40WB>gea2#_#cU<@Noagzwl8Za34{C-0 zb!`dY56QM_2{!K)AXC@`Omv=!52G1~gfxlHQId&%T%q z9#zaGiUAN}V_?3vk_O~zZY>Z$g6JRtgw4ngZvccgqP;nCV#d@{TQ#IvZ%GZot&3n# z!0@M%T*AfyP{r)0hglyY5AdG`_Y!&&c@DqJf+usmE4RpQ>J@ zRpF{)f+-fuUMWwfkVslGm_U%MVi{PAD>bYg1SRuNE-qo5u zo_EXY)=Qe(RBg%AFCMPI&R6t3R<2!JCAV0dujw%G7&pl2ix-+V%vrSstr=m1lpH!`50>>#a z!f2E`IuM2yPC_HBmmXc)_(6hx?6H(MhrT|rtnO|aYrT>_xEh(Tk`G(S9~`;^+bFl* zP<*sH)@*`d`Fy8qJ?~v{_~1w)TS@Jsakqetiu2EB0_#r#(5=>6moIO<&)F(Z*y^3z z@*Lb6WZS+0+8(vup4Q!-$l2D4-~KeW?bW|M$M)qD>6azzFDcqz)^fhwAnjKj#;vaY zvg6Unm~Xc1hxQ7Gjy8u*L5I%I4)3%ay3HSYU=BUmkGvI*;5J9TK}UYi zjsjYa0_Tq&VUB{?k3;p2u53~rKHiC%e-VkHF!^+xfH_98pFCDLNwzsj4LV7Gc9Pk0 zqP9u(JJsVta0Z^7335rfUH!ZD zqd;q$c!Pa3kIh(jJO*C=XWuf!QS~@Iq+QQ6)Ql@1qS;NT%nF+WWnJkQ?BFL}&iTbe zi=y7IT+qm6AkV46!JROE-de3jxS7q-4jK;UT;s%-UFBd6S*t$%Avm01)s{;82Q$O* z_Nn;MUB*%VHxG5}Twh-znZ-q%E`LUECk1H;l3vw}__ld#CUDq=kO8gY;R`3i)W#n4 z>`KnaZ|Ku9*Z(;B$;>do%d1v=2&n>~1{E~k5EqhoV~nto=Ro3cU2sXBey{!*xkMWK zHL{N&>2lO<*!{Ovhly6c=9j>tIk}2N(z2DiiM*Uqn)9)u7k$fH(Ne>VoC&tH(i%}C z@rsX*^H}c63hr}-_&oF@kg8PU8zyTl4ORBhI~P}bjPgW1W)0NEp%HDum+7F{$fCr% zJ91C+0{OhL_LMhIu{#l{>lG#w21zS27RSRuDZl}0h$y^A&Xc*GZ&$+dn9RbHDQ+W2 z7<_M1Np$!KvF?_zqR2|Mz&T~a>!pRMcQ(s9wt0{QzIbR&*}iQ|@r*)Ec{z)r{H z4&_^&*nUx{gMZ_F?TUU+&hd6+TG&?tngo0Emi%6_V{lyozci6AIxOu@PDC=nZTRxD z7e{tu=~1&WuDe|fw`XV!p<)k&-r~OCU21ygKf}-32hHa(gxfl}w|J5}c1Ne(GZX8= z=P2To5}4E(HfACeIv7?R>KrhDb4sSke7G~AE1e=)WgbeQM!tW?$~b=9z|{2Nx1o`n z-{24O>3%ZN+>=#cJkG;cD2TrqYO5*np=Jn;PhYWNN->fd0Zaqg1r9uqRYYyIyt(Xt8{ULI2fG3+6ubP74?KlG@}?j<4P3b}c&V+E&s zkJI}Ld89vlfCcP5$t5e|)41Min7Nl+s#7Fz^+Y*jWjUoLu1N4gaQbNvYyBt^zW<@m z`ukqmPr>Q+etV|>EI4)P$8NuW7n}y{KV7&GoL(Qiuzvi{2~MATcF3}lmwmb>F#6bj z%SK16Oe@?e!MCB7RR__FL#{lU@~M^wBBZ8E@$yEdZNW_&a5qeWWb`Q$?WNh{P>O0# zeBP|Z0)@;CJ#B;RA$SK1{Cjs|bpW{X&Nnk$|4)ieTF8pg4=k=bcsf@pjY`>&r8eI- z4c28JkE0h^Cy6wm=Nt+7sCK#@U?PRz3sWD#0-9D^Ch3JbvZ`^KJ5g7M(?rLpd{9eD zZx$nDf0@{^Zg+#S=M*(6tEz%mozPZ>XhxI~va~BYn zED&-X=!3kVd9ACcFdEfZYxqtmQCtlnw`vVB9IZsS$Vd2SzryBRHlqZC1O~T277Wn~ zOverC)O>Gj1+~p6;%xE2KsyTRmX0>%@JtGr(zGs^1M|F1jZjYK4Fyz!quM4vRmutPn)i%7vCT63*I)$WR-OZVL8bje>6It@KUTDuRq>S6HQet`hY_$lw0|A; zLfp^8ReOp9dfgfs{c3B*`NWnU&DYI8$Fh2(^Hb+x&8sq8gLuIXB@zeUop{2kW$&&0 zu}QNRG;?0U@MAN=4}gQhr*0csKNj#qAU*DkaUh`^e$=|da50`Y6oJF}lYsYx5sRBx z_ZQd5UG+$sP(ctsGNbFbrd#^XZoDJp6I%$NU2>Ck#aobpwHwmTzqM-k4{EmdY!})s9kTBohM=@Tb@I3g zB{<@>lf`Mu$-3ncLPhOa@~)~w7z{wxxmGNz(6DItMYq0^GlocK(YcWJm1@(QgsQEC z*PMTVylM(q86(cDrwlAcM&H#1;NNxr7EAtV-+}%vosfH<0Z1YZ@h$(}Re9$M)BZ8K z{Vj0R=g4Odd^&TUTdX&2*i4571uirDlUHG!qbZWf_9ah7*;}B6^DDdiKoA4$+*+l= zafC#54F%q|o_rS6|Gb5zA}{DLl4b?lgId2ieRT7_#v>JcQ$o#vYGVh$F@=~$;O8`( zPQqYSwLsh}E)_VYE4*MfRPd*?;F?QDJR$zrfL?Z?iZ$+9;Ve0rh^KD9f#>Ngd0M`jm*ML{KC!B!*4c(TcW~Y zyAoI)hCMXG(JaErFTy!J;!Z<^8!E!%IKq=5(i<8HH;eT3i}Xv63}}c9L`6P2jtpjq z3WY|&(OPCt0;AHSVj802P*K=oF$&2L{TLdZY!;pB7oDCSo!Jnb^%LWq%MgWx&S2$90;;z4wd5 z!kK#;;`&i>gU4~h4DqAT_;Is1RS5nmfOTCBZw3`VcO1XKkgxQEhryzGY*w&eTQUQlBx!5Zkvj zPcbP=J{pd(<_kAx{EdV6%f0mt?8V*0y}8tdLlLOs&@as4+D zfS$!8?|1!${=om z)5;(EA1%Yoi0I|}Yux950q9OD80s<3wlZmqeJzycq}J8i4iN5;7~bM)<^vnqaRNrn z(<`FX?jcHOlRsdQGLPx_LKa*Q&oNUnb#(iR#&lyv5%Ky`vkgEE2GFl`hHY64XTy8K z`XIhiYHqx=T!)ZblUW)JpB7IyfYc@5vYstu?EHd7em`JSGLwj87cGlNGCv};;vuIi z`tXtI5g%lC$nJ(MrLhv2?x)1tdFesdB@HmE024RLZdoar7J;$!9fqUGRW0N<9A7S4 zG-mK!z#^|h$h{WRvTQNV3ntP^mPufDpQ)q->ZV2s{V0N64P?Cf1z^~5Q<5z`mJ*)M zI*~*>t!Qv>neYK_cKC1*S(n*bP9PQ*seA9E@q|C8;Fzwo%9RW{yV0;xh9^73odbb4 zO=9R&T<~A?7YF%nZj|IYC8ydFHkE>lsiuheONgr|H!EtefZ@-IPd&aHR$*phT$L&b z2sV^RtC}_|e}hH-pQJM!e*W6>Q{s(B|K9@8ReC$%7d?T=iFOO__e)G-0mIPs3m^4Q ziTBT9n%@0jM3cAbU{ptX`(R98L+IPM5xo_d3g-Ba0J{GYFnp*maOM-i@ZSRr-%VKW z{U1wb_}>-N7&F)JM?*KH*NAdu8Vae;Uw5#aYv&rdwJBPlTp&1jMsfu9gw{2PAd-Gt zt)^XkR?R$O)nHuN&OA0BXgejfD)}P_yfm^F2@&d4mv&PkWIXR6erl@$b!<=u-r0$y zzS2#LIS5X0#GWf$G=y>qd8%IN%cM4Zgv0|c;v8xmAX<+0SUXGHtr`^;C7c4qRzEf@ zc9>o13YQ$p4y1<;j$R^)Wa-#dyX?&sw^hn1$A{l}?PeI|55I1?-Zqe?0gM(9>AbiwJTK50ffA0MT#Xz9(x)iB`7n5GGG{2WC16dfn|xw z2H8%O7u9*=IGBD>6qjYrE|#s8gK_ehkYPZHh;@gP(Fv_2=HVA*{78x76dEqqA%lTg zH&C9`VyEML&^yog8wa-ZhAUj+(sz*i5^ZFVt6g%S zMLhT$2YTcwAV|Px&M85cQuB!k$q7^E>QFegtbxF(zk<69p}i&u%MeN@ z$3PJS$3PY!<#lVw zOX6lIzX@)$F~2Bh$p;7!dL=f&i<^dh(ahDtR1O>n?xcX9bqi-9VSJcR%dGjekyooj z44#6FSzdIi%k}Jp-xoA0J@y20ZS7L7!)-3iul>88byC=tha<>-6hqbksqGbrT@g!e&P|Y$c zmtZ7h;O0l~l?+2AN=LEa8b{gl@k+C~efyMB=j^%0TEB_1oS*=GuLXY=EkSpz0c?Vc z%Dm*m3_tNlCXI!1pM(!OPBJDe-43ld(ZGxX7LBXjLCLtxURmEtRaiI8hHl1`oA7#m z>~|rTI$#eGihRlY+QbQinCdq z=A1L5t0-Nko2HgiX_}S?zyT#c=D3uk-yIEnee~nUJbyq6x_O?Ys+454W3(SYqeewt;EN@`vBU^?iXU|%5kGNG~m zHf%q0`g9FC)Hry5a6jkA>AE6C({Lp0AfNdRt*O^Enl^Y)EO)k{pV%~>^bY9<4kI!f zYMQ7Y{8s(o?6WOJGpZYQ`10x5ma|^-r^&&?y5_TO&&1}Lb=XnU^w}4`q2{@x!J|6% zFe0J54OhQ=B?h%38beH>K0lI<-^=B^4yX7n?d5-5`}i0C5DLJ-p#!jE+lv7|HJ-yW zEsA*50xUfPGigDGbXup~RZA&6o(p_}ye~MV^+LcfBXrVia}w#*7}9R4OG=5feO_>W zaIr?Fq`P;=bpu|#Y&kFB*~SfLgG`n9PuxvL{5r*Y*>lM^DL0J@HLQ5LJ|(*vmCO5B z-(osk?aaMB!bA3d{L_^H02~$oW1kv)R zN2{p?VgfATerVPLdQT>6W|ti+ZFOOW#aBI}9P;YCj@EtH+#_fX@WpGnWa|0BAXLpq zLB3?zNWs+W%ub-LXsNF1Z zrLHRyhummxj)(!^!EtuKisvEV0eMq}_r`(fAE~W1-DwYyGoI2QXBpacQNO z7EeO9BD}#=5ucb!pob0IITZWx+2RN#lHaO+epbEkU3}VW51gVvTzU`Y`-{tOF*p=6 zZ~Q!ZM@sriVSXHSa5xFKYML-u&3$Q>l)dAydEteJb+!s#4%j;Ivvx)?A}dJ1Z|IEU zV*;|`iJ|gtmJ$)nqF6teTT&KXKW9rU+JQI)0V2z^2O;3X)uX6 zo;=|2o45+&?5IE00=Ly09o%BD{lhoR@>hTmoLGFaZ~{4zIFanYY>yZK9-5K{smL4X z5b=D`j!}`1x>%~>Vyr4s4Qz`SUSI%SoiQ6WC%AQwR{gp#&81Wzzdu+CMMm@6<>xP6=D+vI0H6q{ z!CCli3j8v-{j=Lt-W7-5WJYSf%7PC+Q>lJs-O)ZW;;A(4fBHujhFhfP%cd|kzo{q}AhOaJeQ$l1E<6`3-im)(B zO==7nrxQG%J65F}VtE557LeYi*{F#3_)8N>9v4jJ>l-dGSP}iC8hXsbJ=nGwic~^@4ofyyFoKx_#q zFWQf9MMl9$i?V<90ZLK0Pmg=NB{sN0<_MBPwW`#PNH1?8Ms*d=_WP8MX5z=xY_R3K zW_pL@xMI-R5bkKF!?zJ9_F#bMo%;iIQ+0)JeaCGcC~J<{)7(^)nox?oS~uzBFfuUV z2-&Hh0EtSjZZ-sY#8Rx0R0u;lsaw3H*yiYKjes!>;tdtwiWB9d;#R^0c9%EYCwG z&r_KF!gHb4y4Bg7OVQa4L7wN=DT&rupR+A8WZNFVSeg-&!>kTruv?J8#cNcmX|HCQ zRd_wtxEX{?pk6y@ysol+_ImHt2Coimb_{7AHL+Iq&2IvOmB42A)zm?O34}#jhWL+^ z1%$&|O?0}lvkSBag9nJUSk#<-1?)PVj;#u%R<`~MqR~N#+P7jD_OE97Ej9KacojUf zYp&et0badp!UFYK9LmB6fvqxf=Nc%GaqoXn2uqQ}El4|k<1FeuJkQV|Il3IrVTZDp zmxM(9&6{iM4Zjr@m~4hPG)AjLR`cI11I7lBX~^yHb*#mv@L<}&d!4Z19-6}8+T5prwN zNU_ycMNdn-_WJ`1a&fDMpJ<#DlVCYt$zn(aB$iq{w+>iXyLZ^Ti2zXcj*F|B78ASXWcvbNM8xg|CNyhb9yd6O5k~;9zij+<&1Yi}VD?0q zl^bA6^zqtNQSW+23aXYOgSLa^@r{=*>3QVbnZG1K*K#_CC9~d313g3q2E8*bF+iJ)kP#zD7jdH)& zm77J`$4WG)IZxk==AVeaH7mhmr$zVp7S_aCyGm!-admCUYTGYgMMw}dPNTe(i!e}e zysAo#gmJ(htEg~6>AR!zTGF%Scw z6pY+?hP_<%Yv#S&u*r=fzqj+f8;uVZPy{Mf60I9Oxr6R)^S66%EQG|&UBU(K&vMYG zoj+_gF|cfK@O+o&pwS(!RYcpE@-)W7QcjjPCnC02_l}I0sh(*2eO;Ex>X}x1R*OS7 zQ@4$Y$M~^eY@(|DdebU_sfR__T20>e(|UKQ*3grsTduwk5=349?6>LT++fHC4}EnY zIHcAihTpVP43O^F9(P%mKbTD7&RZh(IJElHX?qmWS)um2Mwo!|Q>C0fg1aOD+ee{X zIvUei6qLJLd|!Q$%xJx3NJ+y(s z7uyXwPoAGEcmejL^-b}&c_ASrX9#jB4Bo%?VjQqT%<#ur>iT1dF;Ngfv?HV`D1c6@ zb3H8SZDgG9tSCY+U+JnUu+6_MDv1@(qk%PLUrc+-^82bRs2?0A_?8Zq+gWDRXyx}Cc-}(RH5nuwhvQCkGE%Ehaa6kItM1jSF1k+p zj~etH+j%Sv=k3eFZBZc_Wu}#X-D2_6rxU^!Z|)M?Pis$#EVSYh?pHma-%)UjUZwLO zlggHeV-PzxiBwx1b*f)rx+E_)J;uC&Zxg}qD*-IPFEs)*U^j8M)wN+uVu+y4GFsJtMRrlPD42JP@e_vDFSGWg>VcF&c;>m*OvW&Je3c@ibb4RSP6<_1fdQ61!^;{U#J1^&Z6gLa@j=Xz4DRhvnohNQ?chk;kfd{S{OvLS7L)YYrhP83{39Ys~CfvMa?v&=DzW3H8!^67c|q*HfQ{*LQtYJ;kCi2z4GkwwTPT9%9_#&D;?8c?@eg^eqsm zv6!lM#)Nm(#ceCpJB@S|#bEEJc->8}A3jKhNV`33Ap%U0?<%whe64fuCZ9{g>0Zdt zp8R5FUAuNJ!%zEF8^`#Dla9f+ulq`XaxDrZq%AU-q&$<3Cn}#=UJOuNh*;-sW#sjw zc*H2I9$W>VEu>8k4Xkg)Nf>OS77zA<W-()YiPcJQ(@aA((U&o--xj2c`QTY2zWtS)P++CoDP7t;N^zYZ(%heWxFb@b#m1VzqmmmlkOtB4B& z&n}mNS!FFS3uJHVh+#UWbi@fiB84p*yuZ#-p3x_;7o;C^XP)v_mm7yH^|OY4-?mrs zz%)d&T6|%z^M%8(^?1^~sZW@jb1awAF0$-Ig!-t`pC#0yU^XJaGe} z3t^}w`wpkNU7OAj!gg_bbGPP!jZd$evrx8WzB$@f8D57=y2@?AuO8Et(88gM@-WXP zd@x2rrB+`fk@$?a3Y7yd8KtNIC0&LcpPW(uYjqhC`}Y2 zE*x@R;Vf9rwMKp`S)Zwq9AKK(>KdGPJm^M>Z$*s6orx;FCy!PGXg?eB!dh>B%Sd3Z z^Vm_13O*94R8*L4nv&hiRYpo0R;DsH@MH_I6M_4|)amQCAxwM4Dj}R!v#)^&2r~6P zUW~`+7D;kzC?KkSk(Ez2+>a04eEYO^}-jYL242l4JI4> z?6jwi9<9OAgWB25UT$MmrX>Qh7565sU)eJ^Q@!6+Dg$;|U3T>>Q^_YzVG1k-pDDQU z{)?vz65HgX{n_Nh0HRNsHIcSZj~OES4wD)?l`vwARU;=XO9hTAeI^%GGcCp{TW!wW z(~}2{VpDpgBE2`?9I*`ZJuN}>^rn&vrK(&X3o6rizC&R57# zb|chtG0FuG$Z+~pM(btaJ;&=J_^Ji4uudbuGElbCfL1L`tePd*{)y+9wfixki?P{} zt<#uvoTc8Q)#zceT)jno#|9m>FZ%e3^oIxh7BCW8w>J1jgcw20r7@?99Acp7<3axQ z5yc`I^{6JV)sEL8JXhsk?OWzWrcrKOImy%aGVQnQ+tNoXcr6ZxUzR@N+g`_F8$~p& z9t=m}(w_rKe`_{oVTXn4k72oqi`5O~XjH^yTvAUJ#&4Hk*jv*HLESkV583DLjZujOSx&^~ z>*gz9Z~#u#w(UVA57%gVYHge0YYRnVcZ_b{@Ci%kPtV_EAFm>+xhG!F6CA~bPvG}B z|G=iqL-%r&CjC$LHSKgIyhm6V6ILMXFdlQV7UAo4m_OOB@Ku=rka; zF1#UN{mz5PX-9XUin)#icLLf@wLp3&?KN}eja3F5;FZxpWd9WTs=aMpVbK#4#@dgK zvCRfwoA>$e=ZV#`vExU7(@IZW<&kCO^T_gxSec3xm-Wh1d~&EX-SS-o@@Gdd7dvLu zKgaCDMZXw|WUdcE)lgcR4d=FAHtmdd;2X~E^5}!38+c$GDzOAt?!1ZcSkgdpMh9NL zPRU*Gfq|Dg`tRBIXYkNBh4K5`f`!W6{q7cR$fujF_JHkDln_5GW-SX;gc>X!eMUMU zxq6bE{|s&sZ_kRV+R}bOrdG*6y}e}|gP91vH=U9f=fly(W=WLWXIXuN@ug5~DyKzL zNCq3ho8(;c&iKpumgjf@ZGhQo-tnSG?n{@g)Wr*{rNhqvS%gKn?q&B zY1ZLU+eNJoTJaO|&eg22khT8s_)kdYI^VDal-SY4NYZQ5d1as{0h^SMIKcvK^uDb$ zC0|pWVN5dukKZcDKo+Gh0ED z<@%v!hzQEVntnR@^U726srsRwAM+`qSNqPmZM<_ET=())k#=izT{*XV%|2UCZI+TbSL{oyq9vS46_O9#KBYAzvD6h>hctPsmz5;0fw2?)1 z1S|fd+Wk<$0`8thzYPo5vPISo`d-J{2(jBW|4f~EmnM!>wE7z$Db+feNrEXd6 z)aO5YyvV9N)v!7dA&Dhk-X?4>C65t zXFmDz5`mZz$~^Bfz5+v=#Ew7)cM9OLGyL{!MnF6YMGk&25zYaBukMS%+QMSiRgtN>sdBT zuhxJHt(x<{%u?d7oxgpu&>r;r@BWKb{TGjhv7`03dIoz4^;{g>80<*-)z~UUm{vzo zM-Jz8h0LaI412}ftHe^^JIx%EXKRE&RL(#1zzCBb^J0o5_+GARKVet0uO${=@5&T@ zK&8_t&NrAR8!{Is^9pVFYzAv}I!;_SNCpkO7AMqmm5bL)s(U4__A!Og)Xn@BlVmnE zfJzljq~B)pq{i$;Q3p^_N7fGxL?^w3FZPB8l#oH|s-9K>ZL$_prhK(>h(O$#m~t#~>cfhl73Zz*iKY`3SRq+PnjNbx+9t2+8;@iHw) zQ=aam^nE$>xcNjgaqgY5$y@uio9>k$${2c5>}^CQqK?u2DZ?wyfROe18;_pv*eL#a zVE@H1{(Fz6u!H~Wi!tV+0{FSx;&pLYIuAqId~@J__9bAujtJZrMFl>}L(QIKBC@9? zyThdF?0x1m{>YX0)Nwmj6;R|Y?RXZ4K}&u^2rOO9g`{?4zX+CxZ%#MBn7rOqqLqZvZB~l`Q(XVJDuX~k#RQP>etpCFxWB0*aC2VQRxbnVc zu?=xC!;hZlbwvW3g~sxhnbczG+3&1iXE+oIQi~Z=DqOjqjA#53PAtCG=5g5~voe}; zy+2#@!CE1sQSo3JU-S&34ZbWFe<%s->H; zi;wNvh!+DY1cZa1RoMWHz*m65iEqUL{kcC9)usJz19YyZfrtTuanFhPKCYl!C{YVZ zl(~19RRiYpUkL` zXTFSpaESq6-19|w3q{W-7=4y&h*oGe@P*|MGo=L=8DH$;v9owDbhXZ5`OJcaz=aZ`?&rKVji~5uhr$~NX_c~5~DGn-@mhy_F0oCrjQkL`S3-uM4;>0Jpc75S+_m& z8$#q*Q2{!vz)+KmH7p(q7FI=u0iz3O!^t_5|8a`{d%MqH{1pcKuE2jU125K$q8o|~ zo8LUVkaBsc^UOih;MaAIIx^nAE0(b(k9z%uowS#1R1{Wuyzn9UvPF|>nRj7-kcjKc z!YEOWfpoGL_Yj04*^LgO?!s7Z!bFA$Jorpoz$;J2m=*{D>$lyY&fb>Ie0l|PLs|o0 zHPGs0h(+2BHc{9?Fr1j6*^!>g(4|$x8LLYM_EZqilyw7XXRv zZ0Y&s9lmS{zZ(ElXrxhzfW>H&o%ni`&Yd_}_o?dxU~$cCj0BO@Lp%WMsF4fcY5%|s z4bG$B&+_-HpZa{P%j&Q`Iju3y8q>IT6qUU>5UUyh;1J^1$DYBH>U1mC*RpOn|d-A`L+@>R~xA4Ye#^^tY zwx20peovDw%+sIyC;X~FOY(!ztO8=@<)5N$EK=`p$!*wx*?Q0A-;>+wpHF+3l79c2 z1M+Xk4X#%Mk;XrfdaiT7lG}b5(y{zR>iv`%NG^|vTKy?A2(S8;+(ycN-XwOB+=i7I zutQcuojYwjYKTkl=q?VYu;3Y(#K4=oP@Uf8{oS8#iHbiDt0UV-k> zoX2)8wT0zu`YT!&D-MPyGGw4qi1b=*$9x!#Nl#AD{fvDI?gT*xh~+GcL!U_>V4qgB z6)tWg-I{D4D})M+%14x=>=xspYtF0@_wM<;Cjy?S^hBETM-3pr-nRznF@gp5vc9Lw zv{G1=U%rez-bo@mrTvxRB$U~=p(J(Mjobw1;<7b2`QC6{IUr^J5+haVXhPr`%W|H{ z8sHVL^qv>4PTbxLJ|z2nW&TxcED|SDXzk4j1=h7lT_muhv#Ta+Tc^^KPQd1n{EWc< zRh&sk^+j@<^~9Q`>?sC!C=?TjlgO(i8r`m7V#w&2{s`|%xMGf~z#8M52*z7F7Hua( zG`BrTN*M6kTr}iWKTamFUt2jDV%ip@=4Rba&c`jBVcES^ae>sceIDx9uaGdq$n$ao zip}|cbh=C|-xG5@^xEM2qN@_qmmi5jvwSz5boR^l%3{XeKS>ZeXn!K4Gd(K$)tK1f z2FcqEv?7+y6&3uYS+IU<&*I$Ne8EJFnwX>Amhq-w?gsh_vY@-r!MJva3K-qAqdGNE zrCV3ft55`2!DZd3eN}1OD;4dy4)VmB!67YtNS_+G1-t5-&U??#+t3mJ?VGtIn>*AKxDVTcZJ{S$hta%S18RD z$kJ0WwXX^(T(gvVlE%DyUwnem9K1-gA&Y(?q;Z@VFx#;HMJ!;=Hn6LJ?EAS~;akjd zCoNy)jIo@KFfK(e(=p}HQbyO`R;ZlPQ6Fq*h1rblu&wd8$=^__MTR)mle zD(hSlx!cg{?@ylrZ(Nq2McYOmmg}p9REt*9Y*yq1ZFdN^v>I^dk$XgZm3u4e3#P0u9L~yB4r9Wz;5KKOjs=!-Io$R zA9;834YX+$2HgeGbQ28G)Zt8Mbx24$!_`cDoU5q5lMhicpp`0M6#XQHkHB&E$^qjt zx_KoqqyP~KeER&S`Vb3L9&-kfPMnxsB+t8D$RpI0K1&h^#HcB{<{8FyPL#*wjs#sH zDPYjDtMvC@^3}pRAa^@dEy%RB`I-t9i;>qE6-8qS%QJ^_@Q7&y<01WGg!1ad97a-= zifYmEGBIM>VMg)h#@CGmuT$qneiTzLb?e?I8jVk4ZG%3+A~(7_LGEr*X0VJz~W94O0EWd8fdQgKVfjduM zApv6I_$%$mv*6hZvo4CYG(^{PafS_+t`9}XO33UanTK2(&y98}$`^m|6S+}yd^;OL z&ade;HS#!fJnx(s`4hR3gGFvw@dTzW5-VLGH{O=VB^mS`EXp4?{qiQH&1e*CB?t5lZPY)q!;rSEDMTE4>kRilYV`L`;! z@Cu7p{1Z(%AFJVSE38*z%IgkOp3fe*+TuK&XkhZKUYo%pD|=C0vQm|yLW1@cn`{_I zRl&C_T23xJ9GC8;zOstecUI`*H5%=&Es9^ijD-i{Mkl;}eou(cVY+e(GniUyK2>P~ ze`+z~ji?9jRXJak`?MrL{F+|PblFta}uL&DvnKjfrKk6%Ft*%s^+h1``R`Z$9B&G_e6q41Z=+p zJ1BZTQ-?+ymEmjOX9NjTg{72mDvKI6p=v}CSnyw!qH^NRlbV4~eRJQ~dO8C$@5XRY z+7OlbwHy|S1mT{}pHHN}$Bh^cp?>4Q$)Da%?c|BPQfbF|AIifrm><>n(vboI<>3=H zOt$K`|Nbt$SE%~=BT`0(Q{RkMX(C zSxFCmxv#L}_Fk^fFw{{XM-fPlE{M{G0Io6T6FRm9*}k|I{|IivscXOs{tN@ZtZfl> z2kAa&x8Cj4w941M;ZCPoMeYG|Et~PNtOzN&qB9FR+Ao_lrXZ`W?zZyr!*0aVVbx_V<3 z)UcI%)Re)WV^c$CH-&4{yAX5rVb`sun(iY<$r5`$yqH%KAd*_}THCo=wy?wuZRbC9 zq7akUCvT{J1Q}%pm0iQA05Te$de2JDid_q7f9;U=!>OmV-m8HlA@*lSENB7m2Q%R?&-|g;FI4ya{MW7Y z#HNc3fc~nCIG98Y&vK-jMqnL48aE_snXk_IRDo^Cubb+QPc4U#2TXI`*&a6n;5de8?BtMLmC9 z&UfC#=Iq~!AH6pNh0|eU;5K~Teb1Tqky3vC@C&=$j|xmAW|M=aajc`0<$2^dHo5H* z(TkSDKDN`krFkcga^>@O%)`YLKe zmr5h=6LCC1g5NiejC$N-ooE*Q#1E3H8J!jt zWxX1jj_uX$MrTGruqK$oo~R;JR7w)KT$8DiA-W?e9Yf(0Tbmwh)f3&& z5M3S>-AWYO4vj(du!fs4Xugai>xv^DjT=6W8|}F{q8V>(V2vte7<7)GIgXy~iJo`n z8AHdf5G549h+pMPSWJ&y^NX8Giu;0fK&G?pKrbAS(*}vh7Y<04?SO`#tR2z!j&XCtON2o!wC|``R$)NARh~#JQihq@}~n5^LGa% zv7sw9#gO=4Iw1WolG|=JrdVQ=+fGt!8B?*zZI0%tPGu<=2!UBJ{*B@PI=L;(JUzny zKPI<@e&8G>9`Z1|Lwm>ZW}kxp775`Wn_P9%$}M0 zH|hz!B6?Z-}6P8{>|hzlYk<#%%Xpu+?H8DJ+0DrK-nV=rv1$4(E%VkhUeeEKM&0Woozk`Skwe+U3zahLnmFM%Vnpi%E~=$BR2R!7b(05Q~>iH?4V|zI2SK6qtT#*;^^h{K|@{ zO3dPwIknbHsi&_@wQ6LiUm5hix>iyCd(TV~fco>Ec<+bCg}(2CxAM>A=>GsomiFc% z5-IwRkYw>?#w&^l64pQD=zoJG|1II$?57<4ZwcQMn`z?vTf^oTAd>%dPsD;q{!IA( zeheV6aT*)y(Bk6VDnc}PHAyD_MTX``G>i; zvUcLm=ca`L*!0S{Hf!b-ezoqhK<-)AsUa5xXdd`x1S_W3IGKsjcQiS;FxbpCnSSC@-{65Mk%rdP^}DO$YZ5Rlj&h+_M-gyV?|ola0y@3ZPK<2*#_ut=D6q zDUp|U)Bp1p4AiXr`_hsEd;(yAem`3 zAHRb|qotm7QC_~DM^};&As~={B@P=x_50GgCh+;srDd40_0N{p4|*w8=;iI&aTC|= z*C?lo?YbG?&+h4Vp3MK6@+%xi49DRoIP2u6ClP7;4*Na-*G7&2aS8v>Sc_3)r^M3k zqjq{|#gGYJM#sROo+o_LUz()7)qB%K89^uamt%UOkoKE1jXT2%sifNt*pDJ;a&@w^ zhAo5IvcUopZJBH>V_Rgona$E=P2P&hCm|&Er&`$AR(Vp4)hl4rr0P7G_g)ooRa=F; zKK76Cs()Va?*7pkzT16~#k$7xshbZ_mDTU>3n}yb@b$LzNS)gJ5NAAc?E#SET)e$; z<4IVvNAh`QV$$YEIa1=8v|-7u>|za)*aeh0T3n<2-cPvK;=|2{BO zb${sgVJnBeaI?8rp9t{!vAQI1`!FJWZfajo(&?CfS^hZk!8R;j89I z2L}c)x}bnQ5M5VDTjFQ4TxsKT$O$}CQ>Bi>(^M^wKVBU!<-4Ur%!1ho*z zZAps$FZSNEt?6&w7N!IU5J+eO(n1pur7J}s2?4=E5fD(3uAnGgI>Zo4KuYKcNN;vf z6r}f}ND-tcy#}R(UXz^Qe-+l+``Y_i&v|=3!EYvW&by3p&oOu_&2yd?jVx7C9*ejp+Jlq1TZM0=hp`TtC_UW=D3q0t$576rlW&D0) zWPqjU%}$r7XKsreu+K7k3<9Do*2E?}UobtTiux zB{r-%%x3!B35Ax4bh5#u$zy&CNwQWZuxh=j=|wEcoB1|fvS*fmX|eZDSSsFAY#C6C zyaFA6O@BzSB_zl3N7-2VMR!bif?>Oaov&NNj&F(Eqdu#|ok_42Q8P~(X7rd?K#?V0 zs#ql*5ZYx?`}0GtWa50qyea&joXTHU_W0Y_zg41;sWQ~w$v>KnI+(kpt!m$1ruH}} z2m2nwnWHPqeDUhv(k1DO*!!X{F6TXq3i97e=(%W8D$&!PrWLdz_1g#&v|+C9LR~P;B7M)A(FmI|{G?Mv&Ro$N1S-JJ$SS6Z>I*91Ws*Qml)_>NGRsC@i1s*XL zeoy|jmN=lIrM>kRopUsm{f{!;=K23DXnCI3LS|0elP>NsK%%?w>)&6E{)uOF=}SKl z4`@fELZ7}Xf)~-{vi{R%_47ILdW;%VxIl^MQW@HsHGvH%A*@~q_{6xGJM&romOd6# zBhvevqRpJ?^$$8n^Gsz97B#hhlK((>q@=oh>(3c(+BR`}l#p7?1ZLF@FdV5n4QxVtHh< zs)?X!Porz)xuO5C%yTdvu(xH3=!A35z`9~gdW8XHyu4ZD0aNj`ziNq#p_vqsoA!Z+ zbYVjY8FXdlx3cMCI9Po=nUofu(%*;@3!iqliT_ikV}{|M1@efn$i_FcXf~7- zLNi47%pV8!BSFhoYi;~KOKa`Wh%@Up12GTQJ0*)>t$#kEbH4n4*WekOtTr}(_`$F5 z&VQo8Q%pXKd+%QRVrlX}(cqaQk0OmY{xc082B8z`G^%XGl?%Yj`t3Pb#$bxi*xw)m|5B1gSa95)62|~o( zdGSY2zZz__Wspl+VNH|dC|xbjltqw}-w!D^X(dnS%2Yo>Me4ahlxL=|n1m{q6x~@S z0C0}8$%WO5J+S!@&sX#s*1@)$!9y0(i2n3gK8H^HVr31#My!Tq+E z2I3u20EL0uWVb=Co!wV(ZRg={R`Ln%F-6W1x41e_aEck=5injvN0UE6wPD#_R^rO& zukDt~Y9+n+bhoGx5j`mCv>tf8zH(HcC%R*G8u%`n?L|vK`&X`osh4}7#aVo+Z zH{k|Ebw|_^yFs&;*5c>Y9K{TFbwJU}U5+s=iy1wj@*Fym2u(gU(ccHUIPg~|U{I?d zZrd$iVvJAQI9>U8Iz2}bVW6cwOHBp*t3CKp3Sq`=FaFr?R=B;qAM?k4mm+Cb z>+5}Fze_>A0+a4phg1OnhI#=Ow6gx7UN+(!XP$g|e%GK}gDrz7Z2|2Ulre6_%Z~dP!2P0f zJO6u-@~}-mi`;OUXfKz!kpWKiCo8B+0r*sL4@S$Jw7)Pm%Y0RNL`#H5Zq)64*VIq~ zYrqQay}Dg@#Be-VosiZaD}=C=Zw9Tr5pDt@`1@G}B)cX7a?6JtEu50@Z{5GFu{jtX z#IRjATX4C3H>ZC=Kg-sj#Q4{fU)o=O27O0*XbLa8dA6+&DUNLiKBiG9U-4;GZDPZh z(koPcVdW~NJzq45`Jr#i^i3{8!f2B50lzKN9gaqBlDz8Y!e&+xLg?98xeJ~U8G(5> z8s6zCAge#cj6pICXez9Hp#cL?EY+DKd)Y+z=Jawj0GrsZO3+C7UI7$#@X>^M0)#Ak z8F7yp1OjbXeSPid+aE{3zxA=o;~ztCz=;ocsiM6G44@05wCdlkF>z8g^D9YOf;!#e zWHZ-(%^lF$W1z z*lE3kEqj>W-^g7=2vf9~34R-3KOF*RXeNqcWy&%8Z{(V#N)nPCaapH}O4SVu@{^KG z9MqhQt~?mFqc6!oaB@8D)ZZ`Sd!`g|V4!lDrMn@C+#swz2&eTOwdc)}V>km=X+!a& zeI;w`AtZM&ch;1kBQQj@?VvRNUbk*vLqb%Y&y1>wAhNpwi2WTteh% z`5+dRxnj7H4a38a0K*TscnSRFdDL`#Bie?ig=UKl!q(1t*pKlCiw$h z@=L6lo1gl^gWkpyV>vHR7wBCdE64b@cxJ3g4zVZ;$_NmgZY$5@HC?JC@`m}S#XWkvR%IZNr>4=*%A7ee5 zb)M;Ux^n?N^3W$d+kMVc%$|L^W&{s*+lOqq zaS>N%MfmnQ^bFo5q;}1oy1;8VsqrSM`0Cv0TYH}^y5FT#bj`^G?0vCeeV^8TbzUK3 zugl5cJw=ah{(SWw`Kw#(`^<&tyDpizGf zvHz?6uJ;?qv^c+ETbBPH_PZX6IMy|)53ue3T+)t{Tn>i^#U*4M&PtLEKYcduUQek$ zoRf?DUnTAKyLB|z4gV%-S8PD>BqUOV`uJ9|VHYRr_+GpatENxk0 zsV=IosPF!it6e7f-UV4v0i*~sf)1|!GhDR4Di1LE+R!GJriQp+8`4q+`0B#k3Rta$ z3aCEWD1NzTd*HXuicv(n{CPGk$~1qo*zf=be|S(eY4vLdAEB}B?pMJ*vD+TuzkYxq zPF4{2Tp~#WhB7`&Z*>kPv#ZEEt?Zbbh|_dmtKgwV~7AY#I-m?v?Js!KI9TLW=H~~sE0Xl#OR?h_lq&I9hkcd z7#J|rP9juVFVxyGR011%yEv4!BlOlns4;bzxkMO1FHFcO%nTj2R2*it8}{GpL#sGg z9@mHNz|h7Y(}$Lf2vEU>`(pp55AF0QEMxee`p`Sd5NPUuuMf?p3>kp3yc&*rvm5mv z>qDzV59CDkS;P$b#tf&${HTmMt`FUy5<`QI`geWk%19cvW4Ql$szWL&$}_8f)~j$% zM-Trb)CMIr|+J8;`MtW3z11Hh-cR2fR zKFpgFN$5qO%q=`-3j2b_`C_Z&dm9|;mh=jL_50@|?~g>o{=nJd%ZC>dJ8coVLtHOrz>k$PmNF zFAwC#VKtyKDEZRDv&r;8njSLqI5C?;jDCd7M~)Mz=>JJq_olnPp3;2NcYJdIxF@oT ziQ1e(8_)z53L#${dWP|^_$?qI&gvGu&XS;0xp40X$(Kw8n4G2~C5uYz9y<``b@jb~ zJ~t5xa|QgolWen7aElr|hc)L3hcCQ{5a3mB=~K^V6Rw``G*dhGs9E34#Ko!C!i+b` zz~%bgR*od_+>pNMsMm-=D%mTtDDjV&aB#w7lYcQ{!mGF zPwzxs=g33| zyh9In#(WoE?~DiTOfBzBJU;R>`FHq-FTH0LujBHmDammq4wmX;;5MJwjsz-a2Dnt! z=N!ynfoi?-Ym22b%>HV+_+DCw_R@NfM%j*m?A(hdZU+$LXhVla&A}V>n%q>8R+bj< znilj>tp0H8*IgGws5${&!|Vo)qredcGDfgYqbVXpUG7`l)@-}bipFYwzUG?p1ChGJ zLkwS!1|HY6(^MK4yt(>#TuV?5+d!)`ng19VJ^9hMA`-r2FZ4YzhyMK|aIg&_h!14@ z0Y3zWL=i&!Lxh;7*pJ=%Dr$MkvvIM%-TJOn!z0j_f&ENc=~Pn~3x{TCNAHw2$_o#P z-d)Xw!6fbQpW5AXV1|-j5I?pOq4u!R%uTBG)r+06dxvF-b{j%*x5?ABUFO|T@abK> zlrC=84xF$$Cta=IYHy72#qgZp#Yy%sDMLTYUkeE{wSm=EZ~^VK$0>rQ(*~gw6{*=H@mSW>o-7@AA|?oZh@N-^oC9e;<9r zQycZ=OnsSSmFsgJ?L1ccAROpH+50O+7ZNIOuJk{(0rO&3D{OshsI7lg#JN~&iC{r~ zd2#0+wj;2T6GPknMZAiyO$8PfKE72^l<{!)u}YYb2^jo6p3(F6-1Y58bOx-)#0g$d zpQ9F~k~kJ88&P}gf|eZwo2CdRzRPETYq|A-9Ewb#_@3-?>`A_xp%r(-ivxGG3^t2T z>qQIdk3UaJ>&XX;XuHT&rFzUarQ@uqcHZRH|xa^F?U0w7W{{s~H2~vTyG4 zudl`oVy3ZycRC)PX)d72PSfbHUv83jp`+p8^T{bjTaE1>th+A0tb=%QSILL3qjmMlow;4NM-GO-AAUh4-z41}z2-~!wWxkOQrI6Dr@XtgoaJoL zEA)(4?J-q6O?%>NueIq%iSr%ZY-}2q0X=84)Em7Jlga}StMP6QhE;yDSO{#U z%`xs}g%fNzTf`3|33!3k&_+`N;4>fbd0Zkt?7@*-CtFNl+kwqk zsD1QW3x@{N`jvCLY3J=d&!rFdBoLSKe;* zMfM5h@+xZcSNX)-x6<;y%5a&!h^+V6O#C9gxStv0Uo&)Mu!~|9**hT>r*3?Sh{OQMjg4n+ps&AdS_P zOQV&&LZ!TF*viNKhNA6>fVU7~g-|BV;8WK;3V1L;obdPULCq3wl1 z+cw+kyBiCz92z3q3qN~MU8;C0(}T}kzv=^Y{W-seDZLylzWt1}!Q+AD3cBQXU;p<& zc7D1#XL9m|ip82D6266}+Hmm(W~>9}t~Ed26JXvleWuI&cNwp|Roc{$thKs?M!0%k zMbDBgdW#2j=pyeV7BdH;;1-cjlXwzdNZRQHM4kOn0$yA1{p{ zl(gNbY`CKaxEb?eA$DMWBSf_qeIi#K* z&0JWV_KuP&?rs(jfHR3c*`pP)z$F>)e1XZUnb{aW)u~WIq>Ywv7Bkf|+|S*ii4;5K z5GJ#`G-v{qWVtqPyyFzwBMxAlopqxL63ukp#us<+q-pwC#WB=MjwYWKnO(eb@{^f> z_7~pc^0&Fva+I?m{r9s#;T>Tpx}&KQ++<_G7hwFoq0&4ZG(+{?(>!Dgv(?VZZT@?5 z0s=zv*peEy5^GqznC`wwn0Dx8DAJ`8_&!dkD~*e|hsebM4Fr+nqx*v!61`!hCCItY z_FZR#cKz1__L6J|c-8ztydF|7H3Bp~c|Ww+4Cg9xtf=#7-j*+)vZ8Mfje$D8FBjX7G^dWO_{*yqm#abwrM~(b6+$vTA zk}-41%=}+M@V{>|97QwJdI1m4ZH#~j&2tQY?N66AFVOq69(MKl9im87(ypvPt~4#W z#L|ye4=m;XIVN)tESM{*XiUS~^uGABu#CZ2wJt~AYf*WB&}<`SAyMLDwA{n{bA0tU z9mclziHSMMs826~nj=c48R0RP*8IFC9nwY~G%T28mrx zS8^QC@UsnJhG9O#qLgtFmDv9_TNWa?f-?L@&geneZG8Q?)t(LW2RRJ;(tKOc^I8{I z=~LJV3bWB90V^k9%0Sam??ULcs4y~I;RnnEYd*#$vPlS2*<5BY?nl7U_p^86?fXvH}r>x68-mMqv$-N;pwxel|Wt zR9c&%8POEk3UfR;+gwQQIyag_`xMBni8fU;2{&wbPIjaSNs=WX%xc`5z7InO`vVtBqp;| zp!+iuM}0thoRCeJJZbM0eU;60aH%MGU{7|aZjhei3(1k8Vs)hc)|LL;A53*sjTh8J zu(GOV3beKBKbI{hQ-A5K-ZmvJpZ5-1%Pe2s+PR$mOlHFvgT5>9eMYNN@c@2`2VUmori7Uf0T zx-AyW7)F3nQoGqq8ZdT&ymRwELGo#9-5P@IUL7oe+ljVOpi7=DX4;hvdUhPAW<%iW z57*<60O_CTdJK60<>gzKtsF~nnYs?Dr8 zSN`AA!@r#X-;E<&f?!>T+DP#e@tlj|6fz+7?fEQa!B8wn)~h?K9WXXfWZGKGa(+kH z+GUccf*TetNP`B}tQj&q^Oj|ysm|-2ua_XZk=`?EFVx@O2~pd$?~Tf{oB&Ms@AQ1< z%cr%L+vmPskj4YZwq&n#oqz`u0eLU46LfMag%`Va861EHZypq$I>+%1%v4A7lISUI zF$t;nSvz6+XjrNawSAI|ln85|5&piCEO5_Q5h)etXH=$qGv`?*`CYnTn(Phh4&u!n zPT$4#2k4#khgDNP(Y2#A0z?sA;K6!E0AK=b;FP zGvB|5(mj*FiSV$OgY?N3eN*raxk|GjCUz}LY@KZySY4mz(IuGV`E4Vk93n^mgS}1P z87*VTX1A;6b_Wjj5t^10qqE-u5tTi(Z-E(y&fr{F3i~PIQ*-@syy{o)^;^L1KF^fc z1DHD?(LCi>RCSAx9c+p%y)75=UUwEDl>F?js9o@$>AbkcX3HJpe_orJi)qUS2aIKP z2$P=U-TQ`%;o1dTrZBbX@rc~$`w9P)%xh}t^66Uch*e3rpCX-}AInqde2o>?;9#9bJcq zK367fmq?dC>8(UfY(_tcc-&@y6Ue{+WenLONyvbI`8wul2s_~MN#oo+r!?skaV|b5 zgoeFk0wXhL(g#!T(#=KsADZ-sYt&MUAjj~~UJg+iub$911-=jg*-RyRPZ_3Ju=2U{ z<(ICR<rBMYs+{qFXH{w~P!rK`uUvvM8()ciIVf#Cht5El>)Cfe~jo)h&VG%A> zXdBHFY9sYUPYfdWVwXA^W*0j$AjAAruNNc>PcT$>zt77qDkP(Q6FL#e%cq8p?!z&g z4l6#(&22T|v5Kyd56`2i&Fz5o4L5^*ZL6^9@VL*BW2y9LH8Di6;QxFe8Ug_`#0|$% z3jGd9Ro(Z{$%zvsZywTmgY(+M7lGHKsaqzQLkK*fmKMnTi%Ck2O`qSvLlgx2O7#f<&zJVhhjB z-rw+Y+U4uwc!pLO^y*8EuHwYW+G4h!v#u!gDbfC&y)+#6FZ8 zl)9CjhhkzZ+CXS4Y8PE3916!Rf`GJG(H(zly z&A}AFDYlPdSlK)sPOvtA!Z+Mu$NAn%S>>KMgTS>yOPE#gy#d(tsL6arP_5Y)+b+fd zzy-H--UVkgP`npnYm-*|^lfffkGLyvmI9gKl<`^VQaonoQbVulo4c@H&ePalnwiW8 zULNWxP9FsRy7uoS#J`3HIEp;Y*!AZm&}~p3=q`L=6Ec2dDLixUsTLN>r^dcv*uso~ z(`(srD>$fM&?L3;q^(cjLXa+$kziKaE11IA7U+5$J_e-@JSo4cb@^QYjA8#h#%@v{ z{pmj2t8K$;&B$wH{SQ&sh3bOn_J}4glP6gei8tp(6=$sD_{^AoV6cPBhFXspeg*ih z3L-;@y<8xGM_yyCEx9Π#pwEoMSmxOwxEeV}N8I z>(ZQG#@8g;(J{$2P~pK{v(VGuiE`a%N;>7HHy(694E$iICASzIvMrS=L%7l$=v+ye z4CEw^C(m29Uf@^kt?`c-rGF=+w-?m>i<|}V(s%o?@1=$<%7(8EIA)u+Lxj`0cC{A- zjH~(aU^%9BXoSWgq_~|~YEHyf3b}^69ZptJz?qYGCB{totK1m9jHo?_(6o=5nMG|) zY3;SF8{EGr2RxUwf*lOMH-x=x?KgxSI;8d3PLI8xO(4xFiYFNUFt@O6ez@n5qI7X{ zF3l+X(tH{@iT7^0qfGg!49DQTQ5< z^sa@>K>y}Me-6QSG#AYHhRTq1Q=AMS>vr1E zYx6Wb(~6zNuCqzpD$uy|;w6OKx%xD+U(H%iM}3O{U&JXDi<~!~{mi-5 z&$)HoMyK52cF-kcEHqUyHzA(clOt}|GzXf2w@5I4~NwD`g|+K!+@L&QRyggivPg=crz#LSP{ zK$(t>bo*V8CUH|d+l7=QLKBL_Bh9Sa&_xrcj4N|GG`CH$&ycHf>t7?J&zGCS+@AA@ zB6l`Tgt5vp<=lwl7w$Esp@;qhA??z2vq6OH0Voxln{X3FD(`$|iCkVQmXp_NR8l@4 z5zDU;!v7Q}()omRn5^W-o(uB`U3oo#T@%$tjlHQ+@mjTNd&(lT|Na~@B9>b)QLSsJ z`w|$6Ou9@=?XmtOX!4$YR7~!^S)nGE!v1NGRISqv8cm6Lr8j>>Kjy4r=h$5^jwWNYYC=CT;Y9oCU7}029(rhAb7iB15Iv|35+t1s^ma$HP zQL(gb54$6uDE~-QJ`v0fv>p$BM7uNF4kpeA5Jyofs`Ai(4y0F8a4_~i$mGz`iyBEe zYfZ|Fnv+UL@OuOcgJvdA*`#(jm#oYyr!^7X7zR33F*AW9h+!ueXG9!c&p4i@c zIUJKC63O!Owr~jr4Gv%=5q9;>;^(aiBu%nl?~op7p#A^bJDZxS+&D4 z2%v&mk0RN{Nx7bRe}6rMq2oBE-eHuBM1%M=`FZ50wJ}_523<~T!n%>Hun<$cp+jB7 zC~)imFDH;IDxyAj>xO$yea51?0I!~yB`p0Uowv!OQ&_AZ?QP5HAyO>t%nqxnIW4a9|pGtUEiw6&LU;IA1^4`bY1{NH!EWa#G z7@r7+t+L4kzFnJs{Ld|VM{Pu5NI;ZjKl-z5HwxBB$8kd4*T_~Y97GoO0+!h|qt+C> z6YqYp(?P{L@UKl#PO7&c?>! z_rkSF4c~3TrB=sPArma!>elb}x6-g2Hm+~{(geH4V=H-0Z9>t}Vx+9(HWit8g}Wyk zfu`A*LrNNTv8feSN@Z3W<Ic5CF=y6ZeAvd$Rp(iRv%bW-1c$ z8hkr>Y8xx!PIW|)SGImS#$|ub28I=>M=3pTPd|TLcJ~GKHEI?rX37^SW-P>wg(G%| zR^ZM_EagKkigzugwq&z)FeH3e?^=l7r{JA+-mBc&fOz5P-8E*vH6djUc**2XhHrLX z0w!K8&zScYH3aPJuQ@DkNOVM>^|AHa% z1pTZqyi=F0EHFOf*dx)K!+OB05k<)Qny<&872}VSYI%V;{#$dYbts?apnN*!=+g@n zNE%H#Y)$kgvhIih42I@B1-|ChI+G$d9+<9Sn%5!1px{Z&^Qz~(->fGyx#l$ z?JNQnK`GSb?^=J!7{!)k?rjtaK2v`+>FI}i#>s7FIpX}Zhf>TJPEP(@cz^%;OXBVb zr9R6ZNj>*GUBmg&%#RP86HN~#cbrE*anijb{%d|yqO1@~S>eXfNmP-`f`tIdd+$Bz zteViwjxD?2-dZiuZoE3dOYcL4JNfIfyqHs0=vo0GC}dSu%h&iZNVrD!t(8O+rb*v-^@*L} z_qJ1F8+Jgjc{)d(QIwghmQA3ag`cFH)mP;T!qbashNIOL5*u^*&)y2b@~SxIt93Jd z-KMG%g@j5hRe#S{3!QhQxC%8Ly9Y;kUg95maz}JdJ%)lMa`^UIA7P1tnGDg8XR_w8 z{1hxPy7&l7R2u#YW)~2#qPo$!-ugt)cr%HQw@*l23tnUfA&%1gV`>O91sm{Vsiy};# z8VcjJK6hW-p4Y&|Vo`oCg(C&-WtQ56j|Ji(Y0m2UGnorN7*_-rm(Y7}mJWSh)}hef zdUwE&$cWia5KXP^4fM)uc_S1kHnoSFW(_hGMr$XTz(K>38yBwhS{LUDJpqX0e5p}p z6A6h_>{gx_P85DZH|H7W2nb1wiz#2$6AivV&pR6jXId1Er{R4HP5|(BI>hU;AAc74 zR~7azZ>&%o6#v0Wx$K5JSsj*p60H%5C@;YHMtf;~qgQCGua`#@pKcszhIm1NVg-tZ z1IDTHXvL8>94xG=l^fPqGw5eVR7)he5kX{2;_pkizWM-Gjrjxi{7Lb^gKe%Q3CQi& zA+j{?I+u#YDP(i>=^94@dn4c7rAJ-L1H}#ZJz4Vju;+X0%Rnd^4ZS;y6w+v=OrXoc zMpY_;0?JgRO;xnfZHQlZvTa=dnc=Nncg-UGP3c`tjxMDtdsxS_>)_09`RAIEvIakC z)d{aOUw>q5!hD^{c%UUh8rzjn_C;uX@~iw+tTqGVruF-|{Z|LJy;f=;AUi5WkqPA3 ztcdQ|k>74x$Uwzedv4JX-TT)xH3tHtu1W9(6O`7*ICeXfg&s%ggjuW83#Lq>m}RgN zFj88x5V%CjVk(e(-~I}CqIN19E~JL%_Yr|KVnn9TZcl~rBuw4DDgVt02OHaki-5n+ zmdyA+ZGpEy>}}3X;()iO1i=*!h0kJ1RQX6y1o6CJBAt7FJB!>3{mE3rAk6m^2X?Qx z$5)E>lI)U?J9Hdv;_2gX3bfIeX5&vO4MUOFsXev--ZP2HWobn~8P(YPAy~K+Za55Z zuf4sVhdu(BwCCEvA|=h6qNafBsOKa59>FP2`shv%rci9tWCrR3eegl*mH6ZkW!AY( zN5{4S6XMGJJvsPIeRP+iK&sbSj~ zyKr6KyD@;I&Jt*E{^3`^&hFDK{nFcs=-L>e)uGjK-FfmP6;Z;x%4dn0>nVQAg}(-V zO9P;(PaIQ6NIP{_Gu#N)e26xjSDroZ79AY^g@TaqBoHfl{ps1u!-1LNX*bnysyxOI z=uan&!|L@UA;R*zUdf>(uY6iI?aG{7*u16uG{kIICpUlu9~OqFo4)UPG9QL-iB`7^ ziPJcoOtDuF1@zIfDb=mnseU(NI_zuksmD>Y2jy|q8hc^1aY;@>xqG9samUhSsJrw9w>dD&D7K0P@x^Ae#}W=h#lAs+h!hEKnracPoGXIl@I%D}4#)10YCF;l-Z31ZLw-R#*fyM~ zzXfBgv*A<}c#1l6n|0K&G`j;XNTf!h9|7Ym0iT$N3yH#7s4Tm~{5-FRx^Dq&zce`M zvu}ssOCYBHt~pk)U?CWeLf^Tb2pTj#UV~PR0=IL~P=|85CDB|} zTWg_=zQ?{Q%dpU`p=yDsk5d8I)OIH^tZFS#b41nBjLp^ghCU&%`U}aqB$$RU-@Tn2 zPe!i0Ym;|Ut)`Pk6JoX$pK450uD9`-f!11hjtCXR%!d;v?+_&;d$IynOr~9YPM^Z( z42Rvt=f)hr-+yhfG^LdI`{R@bU*Mnnom-FEd(5`KGlvCBnUmtBKD^4+4UcW%2*LjX8viZ5#apOA_hQ^;rSN+0%raxe6iTHEj!<$>B#Nl5xX6BP)0Xx6y zM5Wz^+N-Y~oJ{&T_<9T4g)F6LE(AOO}WcZx<~+LcB) z*f)w_I7Hd#8mrCI#>9AJ;h(KDiZ??)x-I`rH)0fS5`mWlOzV!RG(OXP+&I+~3FrS7 zr*)aG%*soyiIxGZ#Nv+OtWMfUc7811M2k}zgNF)c4J#O_eVZII3ZNixJu;?Ls=dsz zOS?UWpzp@qHmd;kAD0o2PQ;~5R5tM=F7-vu!iZfvalH1RbC#Zd!Hz~~BH-rDW4+c< z{FXjWn;k(Rf39$DL*U-wqmB3T6I>FSnLu@~u3TLVMD)czcfKwHDodiMG_y{%hYO2( zZy+2f06CJhr7{4X{t_ZD+_n+#q1R2pyl8IhC))M~78(Q{6uIWeWYUAr(Vgs{&bP?>~HZ^4(2i|-et(s*!=*l3VW16ALN%h`TV0hDmW;@6pAZFf3>QX!m&-Z zz?8=xXZkMBqEaQv+Lce_xdPHrDO@VEKkI99`GR@zr?;*cN|70VEc}>xmriaQ@W#jS zt;py2$Zif9M8Ku#hZx9d!DulTkO{BsbwGoVpwU+(4|z5CQBp8*p4) zb7Ktouts2c2zWRR1)Jq4T3e1Cnq-AzJL$mTU=otYVVN`3(2c>*qxqU`xznK6@7fkU z#E|+xk`lQ~&ggE=v*A$)(j|rQ63wR^AeLMS;DnJaFGBo3^FGf>pbgV~dK3*n%H)DX zyHk!v_aL#6`r$ueZD%)I&f>;)5O33aWp4P*P%neyZwMIr$F zj7In)IOWZU*225TA(+34uw(bzrVc~1Q7HXKn}W=Gc~jzSM?A`(pZW=~EFT%kKzyE# zn4Nfnj_C5URE0QbQbI3;S_=YXQz2Nq0Wg{{#0vu=K%WqysRnV-)wO_~)dUe~&=!9f zeto<)nD$ZFUc0>w^py#3RhMIOPA$riejUj9G51QWbmK>j%VE&B0c-Q`x+vyby(rV( z#b+mDp+6hdRt4Cd9I!Smx@3iY3GR2z0ee?(vP$bfci^~CRC zcQUcQYtuJxeEaAROj0C_CqISvZdLD`%!ozr0`^ygH~C=G1(b{4f53#B@40j^<9#Uf zXwKjbnCx!F`_2nJK^{BsAS%|c7x-}m%7;cs2miWs;bck4BxTNUbLR)l>(Im)(SRTP z?)>!jfRWXW+LTD3IN~}^LW!{)e9yWC+AChm*zy%kzDhr46YGml)wH`L-s_t}XFCZC z$!vvOc*e3hY@^5-DumeCwZ9Vncv{MDDyhJ#g0^} zjWx_7CXR-KZ-mXkWz(iET_{XW>4#TcmpXNY$bYJrMPiY<6E?QF*K^U^5^>U$`Ti*! z>lm9?4%hcddwYw!zS`QCly~pkxz#)68OenTy95E7%{JxTvAN!i74eA9J&ov-158*F zam*uqFEPa7zDV%#`2W}WkUAFs8w2-9!dIP+*bzGJ%i4G_W!RfZLd0^0!Ad6@%6Na1 z;lV|aR0b&s6nV|QPFBN<%CFCX8=1l_zW>`>8vudQ@=CQlH4&+~Z49h?+#Kh0Et1T0 zL)fY9-j&cb(wL&8*URfql-I(9^?tt`c~0pD1+d*Bow5xzZ@AKA=U{?n1g=e8y--%E z!ub<5!i0oD^v;`2==!v2eXu7-3eA1D;y6L%`ucB)p)(F>WF5=qf)qWQpzG4Z6R!x& zQ>ZFD=@thqrFb~leRP+&`t=to3JrRm5>PR6N1J{HlgZ_YpQ|CeJM+jc)=TW>yzgds z8`&U1JGD5no28-VvpU3Z_pHQEQ zbDne|G|AgNEX3#Mdxd48JUv{UI${&>ve%9$glW-^afpZ-dhJ`Us2_D|vNW3XUbtI0 z13>HpH+gUG`5NcKZ5y1vb1H&DCy=*lPq7;){xAn#w5BALCs5UCBvKy%0Q1w`h!EAH zotqIxu-&`^Wpi8d=z^^|h{uRJbXnkRwZGf-vLx-v&A~Meks}<)o8rkjFrvLpV8|9j zJlbu&O(fG$>9i3luk3h1mrEJ)JwtW+_-^q0T6_tIF|RDxXAmf_SSr2|_B<`r$GdpEJAPavfuFc{WvCBYBdU!Z1=_!z zFw6lHALVm|W?Z3wCM{3}3N{*in3ZryVI`t@MK-YlX)-`IsJhIubCsEW-8*?ND(aAx z$w6Uz!pO{!>s&*ixj)sO`EP6W5M5H^*Kqb?r%+`98b4D} zyi-mN>VWk99LO<88ogr^CHc(Vt5vp!m*#hwUui=}BU_WR2!RnH)R}+95$oz-=SBN$ z=Df?sD=+6?I;oLhG+E&w#H353wjnK2j_`JuZ5(1HYKG+=o3EFR5t!exysr!G^b=zwy=s`Qj8I=Hz}wBWK` zFY{?8hr%a-8}8GWHScRrBow;0LBRH-BIC*|cXxW6`_4#CJR80AuRxF@Wj`hVzMm?+ zLX@Fx%213&FPj_&BzjpL-vf>kT_TNTbhzijE=W>_RzqCu+Y{-QyL$WVfjEbqt9eU$ zPU7)=W}C}15}xyyGS!y1e)}n8Ukf9BW8qKm%+|=#B=bIdafCC#eN8<-yO#7kx7iR_ znn$)bzR|uAPIslZ8xcs;pJyp;W>Y7vjXkX9Iww3J`hJCMmWI4tTf&h6y~>Zgmo4FK7nQsH#v#z#7lB zTvAv0I0>5oi1gu71MPb>lO8&xN#BjOorBqT78JVM5~xlr zH4(tLM;C$lPSjc~s6|3<;tqN(6ByXEx6u$_zR$YX=K6ZE|1GoG6MeZA>mP;mOnF3C zUN*G7FRG?7V@mVKD7J;z-XUt$!#q{9>*(-lG;lbV+hIZFf_M0*?0{=KZ*YB^=xO!) zZ&m6v8n@igODDh7)gDB3V*O9K){}RlJ}vd9qX4nGY@(?uh1O`gn*(5&oxEO-j`Uf{ z+@Lc-Ubd)<4}G&u1@B5&VL1OiwVj|kp4#d$8%(Fg(pD#GU9vg?BP&X6n!Q3vi;%UD zj_QD&(@ded_zsweBzkoCyR3uGEnT}!h)QV0*DHtnXS)_0FCwH}U4oh`42v64@=hea z4MU-8!K$~}%V!chVo;w5jO7)wFSN9-GydQkHvR>_F3EXcz{!jP#GleR|4L6c6fQWS zeR|2v?X^{iCRrxhhi$8pONW8_#VEydCdu6INPS$YDFJNbE>=*3VwCl%BcAbN4!(feEdl5$xBLUd@*RIZbCeY6;vO-g09 z6QZ+i)uxp8@&_z%*Q1H@KX-vB7~cGTg)0;mtj6*!a?NnA{tC#Nl`;mQ^F8g>|6%Vu zqnh5`lAt9kiFQG_R10n`Q2)&3H1OdSk0g4aW|P^2jk5a}Xf zLqr8ZqzFwQa%stPqY588_6cC%fW>^y8I@-mQJf z&t0;$h>K2jfBtv(z|O5W2vYL5tc9hVupsW=ch>)$wXl;gDj`V~HT_c*+g}n!uSyBr zbw)*3^!^cp=&mLI4>5?kQF@KQ{jxtru~8mHKPF1bKC9kg*9!fw{WW3KRw49(@9Itz zTSL`{R^GntzeN}Q#;#3v|G$Yrd=RtiWkQ?IO(!;BN2hAPTuIp9fkZM?peTJ@NT0kk zBZTunk3?7(dFDg-s>15AeJxNTIT8#iId`;eNsb;NYvUTL+Cd+oQ~QXkPor@D?Gy1c z{*jYZnLeb|Ui2Q5aT3?I-OwJap$S<@%V_pJ0sO}GWrA6rb7AtS0OL=`)zu0dqqsDd z6Oxv$#Gb^^1Uonw|4wu9aow7`rLb#UcJ9&p5J+M#b{Lvu??|j;gc2<(G3t;fMBt zCralkhYfXc%2g?Sbi&Ewp>qaxD`gkGhA!?_(3jDI&ey!K+2SsL<>HYAzP!79pxJTuclPFbs$pNADPdbt5&6c&FU-f@_u=X$6wSN{}^l#aUh&}&~U7HAd zP9~nYy62iDCE?QA8`tN%u8sc2uGx7g@%?z~b6xHFJHHz;@uV+P?0?3t8SmYgO1Aaf z_>_8~a$`F4`r^iCM#SFDnS#5Xo3kawm78<_wdf+_eZRiFu)Vw!UDVe3L}!HW{;jpq zbba!{cbzX<^gk*kbFKHKoK4z;z0}sC$SI~z-u$x9eO_lja%x-evq;a6rR&@h$f+CX z4VvIe@g6rds31z00>Jc;(LeVEqQ6*M>=jH1^MVA6G*Q5?8tpJ2CmQSu7^T{i&%<8; z=3RbdG=7~F#USGsD3W)_j3dHX`gnR{Bqv*`rh|rO(lp0zLxhfZT>Vxh8Ba06$*UUB zD!y_;n$Gd~hlT|yFH7)HBE_~<3{$2$gB;XS;d}E8gEKFqcurD;zdGrFwnM1kb|>Av zsZq3Ku+*hY>8i5z1a5Sv=%=msSRtv7NVOWV1rr(YlV@F`G^S)6pG-jg^^RbZ$r}RD ziQRYfw2xLs+$_9lDr=c!X4SlMhZ$R_e8IHeesm?JaiDPT_1^w7n=5x&=psUd>42l~ zYHH7piQ;bWfQ#m8+E8qfTCwR9cl*`!k%1!hhTbQh0jn9~=wf2G>7aM|YUYf2vF7XE z!E4Q{Sxd3STAxjy28^y|uMZS!|LA2u4cuJK0Wc*bgxL^H_!|Rxv_ww;RV{v8hK}B4 zA;j^Ke5?o(uW<^Akw%GmRvbfsXB~AH+n;@kU6Cl@iIiSzNRm#jQ+M(~LJ>7)QQwve zJ5S136tA``Z%-D;2G3X=eBQSX@+~nrkUj?jVfB#g30*XkRe-1EHN|MvZ6eLXmF&&3SYx94#*v@MZ_P3a$- z*8I^l@C)HneB|An-VdZL9wHNeCE*R%5Bb_BoqRrcngB5IaOP-L!0Z0^LCha5OL1Rs zJn;xW?epV--ZFwvL z9301*;JUH?2+vg=X>9(HC&-U=5a^IxR5^!w+9`1@HJD3PnZo;ay^A<_B@%WMV#FMI zthyW_;XD>1Jioq^wV)aqF*F%iC)lIBY^)G@(Ruey)&fx+jI~&HlCSENm_^dKqP3vP zJ6Q`d(;(c^v-A7%JJ}LGOPB9FGykl)ipN=8Tg`Yd0TFgcLr~SPFcQ$N;zRz(53aS@ z#vieIos1r{>2}`tOK>H$eP?tdG`D*x?x5i2aAK>0&StgiZF3Ooypbvv6`XKu$w}hA zV26&u0OZz}N2t@7x{HIR_!HnarkY+zEF*Fx@3|X(fk*@+I}fb1N9u`; zB10pV%{gyB{U;GfcJ5`G=d3|^#X5FdKfTR(>`xhpdV4CCI2hIQyfrO9HK*YoG6SuDUN}Q42 zoe&&;PYWsl1n)$ObszG?Mc%O6hNlJv4`z^{A3ua}lg1;6CSNC?Z{7Ot<)_`s)^@p9 zocX@K?J(PDEY5wmvA%@0SMb*xT%jNLe|-P4q+w+X6v5lb;Q#g&q%)EW5g^dE*Ilq9 z#kT@~V5UFaawz5ulM9^e#%MBcl*#(OW+8*=t`8{Je^IcpB+e}+_ADOk-i?hQb8anh z?XM5|CFfpj4BaA7W2u)b>p6~7sl=kI-Cb1vD_C0~*mN4THH59I=VZg6u}dNAWXu`` zzK_Kz@*4Y;|C#|6`mxS*&@}t{^3$|lox^H0N5m0tf z1h@+VD~d?6bGp+*x$AmfQy?(Xl{>%-66)#*n?48cI#=ily=oL)n&MHK>OJUkG5mED zuAM8!?|iP^1)DW!eOFYC9rptux=}Fpg8JnxAf_dfI~IuTXpAlEa$j2t>GtDRPKj;k ziG3A$DH`viBN+2+9rwJ%>$PA!9`1UV5qEGTZdbcw@6Z*Be0&Ge)roQG@b#S=>G&Uj z#}_--#YmU;>hK1Z&s)0$;dV!hNUzHT_%Zr#CJOW$I@gYgLMV|ZjqbDrzstBJHsk(H ze&=DLfBxD<1A!zdjienTo~(b8ylIlydaPA# zob8PqK=|(_ice8F$Yut5l)<&hzzOH_Xy)?S=dP79_|tQRn{!1+bHz7v@xpoFxjQoU zd9nd{^67bs&3XUSM4@>2{F{735L!>OfQ82_kDn%u@-8m{u%QBrXh4_+GfpqisxE+0 z0iGqmJ^%m%fIO4S#H>(-8;ijMf#cXk9Cm)3)6@$9@bGiOr>>OYC>-1!pK!jLxa)m5 z|54nH%_L_O5K3@3X9@x1K~}=2*mSNJJFlR!l6QE_Ja6$b0b8>R`;Jh`sp2q;FJlYJISRXr0)eQoj>3zo{qUOm@LM8Hvvjxz6H{Y&-$tbA z9ln-6j~yo0N;nkXJOY87s}siG-^ODlIg1M%YF*yCI#N+JcWMv}RH6f7LlG19Hp-?9 zq{;xR0$>hGEycdlwy){j8yG+^j7zDy3e=t3D0LWWpeW+Ljbn%2f_5@f5MVlx+28WK zylR!zAaMlS{+(%xcbH`r=#D>pedzu(r4$MuP;H-JV^$9)SC5y$;zwIP#sGF+EQB&# z6%R-g%4ZoM_*;-Y3CTKA>FCu~o?e4wHC@2h5SJV&d{lmXZhP^gGeV=#Y3 z1G--2IRWit{Jk=-!hvzDfGG4v*~OVYjw+81`LT9oE!+iw2`UG<#MJwhcm8T_)H)R& z?2e?o!C>)#8nrrbs3DVzD#Q1busG7FsGPpm0`?K4;}A$)sZ1cVOFpAY^h>+^L-d2_ z2fNXLoOz||&0yeoZL9Y7=1OL?tk6N zuQBfz%-~XXFo>})id8C(-`O%pkQZ1F+IM4b+$^%{?+n-~#ST2FRRWTal(6yDa)%sV zik`~(;(38!BshP+>_%%Xwfco3{8WFN@`KtOO~f9N@|1vf1RXw$$Ide0em9ym0D!~G zsy|@ot1SPn+;Nj!+K-1g$2`cfXCm1I#GO%$8L8gyVMFH4E@0_N{7{wSkS=>1S&J@? zQEU)h;<#^!mfiyx{VdaX*aqRoN{`o03S&z%tAwclh}qG8AC1KUFalVW@CsnSLepE+ zm`^ZE_t;z2?RU8F6ef1)<<1zYdrR}-qm_#2uG$mK2s4N`Cm=>{{Zz(;;Xypq1{mpC zRT(H@&%JnNxxiNGG8z6Mpiqp0s1<&tO9hSw0MewVwrId^sZOe*vgq(@N1$q46I>l# z5N`I;SQ)S)mAq(v6O%P6uU!8ipim8XD#~$+^PEGuFm{;A`3|TnN4e3ilP9b%WmAeD zxX+cn?k`@ym4C+kgQGAXJL@fIFW)-`>h!5@7#HuamT|7_ah&)B&msZE)prfWzm~-> zWKnsoY@DZhLUj4P_+l|${G$u;qfzjOtzTnG;veKIi}wY7tll-LX!+4h^L>mamTLlW zDue49wHC#V4XrC722R2fxeP6koeTVZYm=CXNzxhIG6i81dxM47mz_ZLc}rpAWNjXL!#|#)dBA>i&W-eo-aSY5L- z{+=*t{IX>w`E=u^@2r+G=Dfqq=p_V<^;wh~>k{2DiwE8WeCFZCT6q;pJ%r2XHp8ed z)_(#nM~XG@kQxi#KKOGEMrEo+AC|teBVh6A9OIi+BWHjgr*Q3q)!v=Oj+8;&MeGGC z;L-^9Fwjl4s4QN@wq^?uZ05Bmu&Zajnol7tPw;HhAsU217=HFUr;z|B8aXuNjq1Bb z07%+c#L}#Ce8=~<<4eE#Gs;lrqF;r9rt&gyPwU5}cT4`lU4lvcJxi*k{XRk2O--xX%2~SrNl{R-(f*nXJ7ux2R_ZHIzH_R^=>*ChdZvsh+ z2onPA;0M5yaaLN(!73hVN2$=Wna_*|_;hgb1eg`s<{ASP>IGFTK$$RM2iYGgovBa{ zY8{J&7{+_mjr4o>|a z+Wb5UHdY(V(L4v|xi;rzWs#B;kLh2lau`mYLm~*S0uw}PszN=I7Hd5G%J|T-Y>@+R zXM!S>^Vc&2Z&@*Y3IZKJCHQnO@c;`~;^@l9krKxKNlY(s$8?`39Nb*JO({2)@d(_` zS9o#^m&2Q-cbGVY6YM?k4JhQ{kq3Z*t4vIvEquy-5#L>S$x=n0Bupu6XDP1)7K)-hk7UIR@mS< zY`(O6%09(Q8Vtw7+Z=gHOq7Je1WyDc#pP|hn(+{-hMT#{j}%eNG)@$y=4r23FGDJZ ziD0E2dv6_#qwJNvOVNSs=xDsd`+x+Z%#2+`ex-fV0I%NhG;mDr&pb>FYdh*|fs(bN zO3_-K@M0ZXdukb<-rPQ?=td{uk-;)oWmMy-8v3%?OhInRuyKcFlO!5>PqmMU6Ot34 z3FcCB#VM1j&~TI^AVbFCdBcColZpm$Oed|NBG=pDY~Uvm35L2N^#CC?laIlOE-AM( zuuSSl5GGIxO68_8K!9QIkz%SjQWqEsG8&?h`It*bLcm3S&+%?3S1McvFri7hag_j3 z+&DUg0e(fjd@P6v@{Qg}PWhMm_-@!bfu9=%COu=l7TcKAa3) zU7?3!x-$ zY@+y-h#)8mOa@+1LVcwmR8gtJB(jrt)8=g@7{L(Hic0fWADamG<}}C?0)4 zo)>7IEutd)zV8~lkiRhWP60}OqQ=LZo?!_55<17(=B-5G51N{t0g9$1t!lh5%eJbk z$UAUQ;Q>Div^!+@8#V6r9hN09IAaBGQww*@Sau}=VG6hrfmHK@FG<;+Cg+ihtXBXP zE(Fj+$rJv&NZ+4_=1{{e5w|iOE^F|d0rQD3%ORlyoOu4ASD;)Q3>Q*48^VJZ8Rl9| zNhybl-mUn`T{J|ZuDaddHR#*DXKl;}q~yr^K-AKeRT{Eq??EsjQb&7N!ucV*{dw1H zAh4!|V;y#OZT|FU*yso-4<1~A$minI)&h4e&Oz|$ZAI3)eVl}|v0 zaE})B(>_f#GFqC5M3PzDDkDtk5~-7&KHJ(=y0#8 zJdrvVZoBPXi5a(u)6=0|fYUh4vx0DT0#c;`j6jHDiQ?e0X|P3e@#TA1qVPVvyXP)VND=U%S(VA>`RZt z)%&|OrxgjO6D3UfAEDduB;R*PCxr9)7%v^H_?kcxTm0TjyGeVDgX7ZRk5*j3jKemhK?9~hrlX=4U0S3ugup+G zbdIH92tN~S`}=X3Jua!=?(efgN&>fFy|j=@Ave1MiD7)5@%cGCy%=*5RA zA|&|_{h*}U)S5+sKX*n2=RUXg&W&`9fy{Jb%cXiv2vLvi?(eQWdeZaiFUcrKL5TQ+ z%10~1|5cI=fTsaR%?+!2Bc1n(__14^;I7)dL zCAoU}LDos;}bdc)Q!jc_{3hYr_?pt?dJ8LmCQtI7Z9uE$L5h zml2p|N(8>~g#b^9oPXMiD-@Y7efHpUV+pZPD3ZzS0kx% z!pMpkVgheynN3NL#AWf`5DF+0u|of(-UnqI5Ryc3$ZN~^@0ygBv5*IH_N%t}?I>Fg zj8uovj~r5FDGj7G$@fe4jx$p<4u4BKd|$u%oMyUJ4Mts<H)Oa}oZ~5zU5%NyUg+IEdO+ULNm+F3ll_g@U6MrJ~)+j4(Y7 z?n%_nNB26AMf47u(p9CUStO`LZrL?DhhFiP^Wn4zh{A-qu~;lpZ4@i7yoobHBT`#U z2g(#xj(nt@t6vF#gzz%#JRwjNHGjU}U?GIdYGBurMnortYoS&S#AM4A%*>L2JVGX_=F%$48jSSq=|%DHah$(uFnQiAnLIc&TuFh^t7w@D~9j85|1UE#4{m z>Kt-cPs0I5=T*Kwsc#4OL;E`$E4M_rgvk9Xl#)|!47E(Xqx<9h$i^5Z@t`TXen7Q9 zE#I_{7}Nu5S5WPr4R78s?v9o`pZLJi08EM0on^{l$n|5IK&nID{+aL!XeCs+9E$9G zDjlWZ3a}l3a7$40+YstR3H8;K>HVK*Jt5G^URmQMBp|r&>uqN1NT$&6E= z+!Gr3@orx_|BlP*y%TE8^5_Xy>Sdwgi8SQ2-_!M}+hs`{(~WeQGHvLR?AaNSE;sU7 zKXTM_0K@IIsPMGBPq$s|g?DAXb>lW3@vIfHlsd7F4Ph40a*k z$(xG2aXi@CZ%Epn3)D#GJRQ&TC>o3l@d1?2a|rAqaAw$-3po%&$U9HxVAft%Wi`DU zmcgB*;F}qbW}I(JUVN~iNeC+^uREuB5+Nf$BP`p&XOZdO-ILvVLgB8mX)FA(9lgf) z(N2cp8p^HQf*@xT7}v;viUAFIpA!X*4ptH1dI21)08RmQeoq8?EzxZ5IW&j-u3kn6 zwlG+RSiqgiTRGnCN3`pmfd>Xq_tJr8zfi0yt&yHkjQWfY?=Uu50hvTe?njEC+`K?d{iK-wJqakc8^U;1* z63XEv`gq*FGc_C`C{@5)WIX@24VJQExsL-^l+s?nEoc>MD6?BL$&XouH3alVwV~t8 zOUq$VOR8-PVEENgxYtRk%|{cwtSmo3dsZy=`_cGXRkrqq-Rcb>=Ee}#kk*_O`~#%i zITH?N*faK+QmFPPc$Hj+q%BJI9w5O58EK-r!OK?rn^8OB!mzJ0Wt&zO;%lnjF0Xo*>fynXcKz@1<&xdGkF?-3rl1gp<7`ex!XZpd( zT0?5qFD2$-WVJ88re$2BOco&89#hH%lY2Gh-fp_R43ZB?v*M!ksMC`wuy;zvNP-+J z+UtSFOqzM`5_g$3UHdR;y%8UekLiz!&_0Y|UmsT%k{oz_hy_VTN3V12x52puOe%+) z@6UAV$sEa=;q<5H9DJo|-q_GLF$^9vbzVqSy zYn-twoV}3<7rd&b4VOO!{+^BwAWo&cQ0TRbFkMt9Tssd5mkuGE?-&vgq$=37U8A^+Gs!avt>LP?wDs(2aa>glbssloW}$PIqw|bv%(>b41qI zOBO3}(m7QNF?hT-D>dxun&Yz?r%&`LMiJQt1pNwB8A57CC!sk>jnx-1eRf@Z34Z1U@=W^f*?lT4b zk{+emqKSQRev${S&BZV9QvGDH_ZDYAk*y2s=O0+i-QRYvmz{5kpKm?mUX?Q6zBZrH z=`rZ+-aGWTj{i&PUXT3k`4&%)!o~RMtF?_bT-~y_hcM6c>piM0E&qnw#KIZ}O2=%Y#B|Oiw zrFVau^<_@VUH%+sgOk7T^}?A2jT&$EBbVjYBIKcUZ>B6~>GBm~!d2W4d^02O{h5WK zH3#7WGKhXkm_?la{r*>;YVgfzjUZn;+vODW*R7Cc`+f6itQd`Se$ zCnK)B7{|Ip()3;LK z-scuWV?uVP&Uj;DIFG|nS1@`+S`U)GTOpQnBXXF8iKq_SBNUl?*fxj^a-1z4o3lUi z^In_Wt%NEar4J*V16|Radp-2=_fyRH~+tfuj;T$E#omE{;42c%_o&z}q z{3=P-ORIc;ozLXiPI7T10z15MXy-NsBLsX|UyTS2*Kq)d2fjfV8Erwr>KfQq~kR))_pRxLOFyfOApY)U= zAM0v0AR?2!9yv8(Cj|d@2EL^1_A;)7V8gVzm3Hi$lCxE};+&i=23>Zlm7W>nEJa zp1=wqN|qATkVD99!{@!(PBt{r=kR@uH}f8z#mZ^(xSctMej^2hdJOgC z*~2O5U!MgG)Af) zgs`2-gEuC61sKu@`SpZi%soj^`L%o1aQ?dBVyMSwQ!3DUB}$T&%!6dQ-s3sm^Gx`_ zGn|2@$fXC5^Y7rX*G!?!P_2@B5g@XI^PwBOx+u?!eX4L5ax_8etj1q@Fd za5e_BolO%j@lckJY4X$=Cz;+4zF-WxfZU@7RykjzXso%Ni1 z)RjHP1JLc^BB?-okCLHXEZ*w}dOxXLH}3Y$mRJ&JJ^!$O>uSX@AFe~1vxOOfnscRJ zg)KqPPn+ZUVSE~3@YqqpQXR-q*Y+O9H(wX5R`Tmo&-4=& z{XFnq%R28m_aln7BZEo8=b4FSmu_91>gKZ}<%S&^Z&3z}{G8EWQ*?X$(h*K*}K#er#OziR02>uA;7L*kGKaFfz_YS z;`X>w&kAnj*zep$LGZvX2@$*OdXTQZ7D@L(`H6%4kvqv#5Kzpd$)prm*ZBZ5Zp`vA z=%Z9MxAiB$2JHlu>*3ZLdVHa5OBxSR=+Wh=Irv$D6tt}y=~>{Uf|(G%u10;}^~)TX zON86JA_I-Sl+bY+794M!a^cn@$n+Eh>a8b#w?bdsKPj@Dy&?^|=k<8^fk#rv{j{*# z-=?HND7`(ubiLFYBu6AXFsB^hO#G^+FV&AbJ0E8tTFX zr#_M@C7K-f!Dq%y;IT75*z2e>{tlO>UMNOYuwBKelV?Ug41O)dimRGG>Cl}^(q5=H z&j@+qL8SIm%hZg0P*N_tSO4(*3`t#t4$s*Lfs<#Zw_aGa&z4Y3X61o|R*sS_8Sc~4 zT934UO~64Vu3$4)q$-N?INtkt24mStGDPu3F3ANz(JEo{8j)}Qh4R@ zm=+L2_NGe|5_Axm?0oXl?;IbU6P#35&KZMzRD4KB&#PB z@GJ{Oi1sL+fjH>O66)W*Gy-c-j8L2mDIr%t@GT?QL~$nE2j7qe0SoXngJKb}Lx#es zur(I1#Qp;^VpzJa%~z59Zc}L_(DOO^JqP|J5iFRM%T4I~7*q8)(L{R5P?28q-1}LB z%}s-QmisFvFjpH(%5uxqP01uLx!KX(g=Cq4sc zL&cyAS=91Z1aN#o2;4dpcyf_fmPd!=LviQ!RD)Z`6->~O^yLxJ;3~)|(>**r<(EFH zgmKC;bQ}o@G5p`twc{15=I6j*6hIp>2ZhtQ7ZU44?*@s^YtFA5!{)rQ;abxdco_GL zoQKZyi>DclEk8O?d;3XsX&!j}IE_}M@)!}R3-<9g5a4h*z@^({Q|3p)q?JffhPXh| zL|$_VDs{hC0Vmbl0OlOW45w~_WYt_@U%I#{taU(DI{{r13ER2L5R{jv9-m5tD%PJ+ zx34^B@=@;##Du|qZu4-pghRH9>-K}OY3(0XgU{nIrRU$dgPv$gUd@OlB#{idkcVq_ zUmhAW`ao}guisvaxjt#M2bpfqy2_j2C!i%hOn&-svK=kH`e0&I#I#j6|EvG{R&G5{ zuO^a9=+@WIvBKE>509uH2q|)&Xc1ZO`;}EeyMQIlR9x2rJjp>{Ma{l75__xpGEzDx zKRV0!ui*z1qxDRKWAd+GJ{u?%j!+gGu{xM{qziq=7&&v_JK&Oh{9^U(mM<=^<67Pc zFz zP2gHp+u_BlRa@^kk9$Tkq0o^#p_eo`2>A=m3kL%YS2Q{+Y*A)4Ir# zaW|+#Gx{Icnj`csM;dw~DX%(MGVI-7*&5V>hB+P*(n8{&TCcJlE@GQ=yf9pvJ(jLt z>;AjPG`ZPZXczJbqc(CdkfSi|gax`u%}9#iv1g26+m z4j*4vawYh2xb%^1bAS0d8VdRltjf>)T2Zq-Q+G-I2&_+vu*N4;zyyKj|&?_%JB;u{-EO!a?>| zi`Iw!5q3|@&pr+GU0Pe3?EkTK#(8sNeRY0cBj-fbR;h0N2P(Ad{wZBUjGDM%U0C-p z4JvkgJe1GIbtkOHrDP&p^xDis1U_8;W28*VA6}@B(L27Ak1>QU`N>$dKeXT_lkr+> zGm{AXj3rc<3$+@{l9i%O@{ z|L{U}DeRiGr5tnWixrP>T@&s|Ctwx@xSjeweeWE zKjKop@S7HVhph?!T?_89_?yRcM+<(f7o+#C?#QGH{IL@5;Mcx>DfLY*H{&){9`F)t zNL-f?Z};K`Gd^2c%c+$sr>Cmrr_oqm3;#c8!T%4jHK(pGe1A=e*t7P#7wU~`@x8U* z*_!{^3-#>(K3j94>et%X^{>BvjQ^npzr4LMQ(U#Z`K96O|B@D5Uxo?OTnfgSQ?%j7 zgH;iT} zj_$1TUt@p%0LOoi1;I(1Wt#lwjz69~cBJ1Oe}um|sjtqB{e_c6iJASzN$S4FpYE&s zi<8=&kMkcnNwFipA)mq;jK4Ukf3M}0?;h+psek7re|J)wH!>&w>iGLV!%5zBxs#Ztm3bL*n{NEkC!`dEY*L+Q$Cy zYa1p{IaQ2?j#H7ce*AK5(US2HT;M0lf2`%whrv6wd{D;mH^<-q)mq*>8cO4zv-wl( z&%gL_>H_@0IGBHD=@Tii3|6=Lot-@srY6|~?$krA;$sq15{Ym=n zr+?NvMn7cyb?MFSyB{@!DB1l@NB*n$+|JVfP4DRS=cPaN4E%?VT>0g`-)qb>wo`{gYG~b&X5A|e>h$EW~%;OiTR5`mf+-g%H)p{BlYdav7&z{F^Z@A)SQoH zk@L+Szp48(g>P0IEtbLirxH`=JlVIfQUBM|<@)?s+j*toETyXd)zd}j=@Vq5uU^bS ztN!ex4|Y0*#;fyxDKYy#gf28UQo`DAE1eyD*tGh)LALeA#PUaP1*zhl)8)t4@kcRi ziDPG)^%h1RTbPfm=S+Uv=@fE~wYC1*+UXQNJpIie3oCu~-hu~2BnKmQPiF_C)n|52 z7bE$JKTa1;ML!&E`<}q~x#Rbw|0|t>MuByt(9}erD3^1p?rz7O+`zwe3Za&P@(D@Y zvke8LV|&;6(#{&4c>niK!O^71-zELL_{h{vCmL}`IcZ4+Arksu>J+jy5fVuQdh{Ih z0-NJQw!i9r1RwYrW4`j?+T45<>oY&z-e@@~14q&`U`naGGg_?>Wf8@MQQo$Grj8&uTQ2776Q}|CPd=I%H zDoWRR#XU{>Ld5v%0cfxuzt z2B_&AIYjgu1%7a(9g@BjdOd{1Ia++826HuY7ZhTNyzu)@<_-*Faee@T*<_|^^hUv8h6mz zWFES&q@5tU9A{(>K$97v)w55}J3&DF-gH2SA{p-TNc!!PNlyxFm>yB*1femYoW#oW zY%+-7nW8H&Yza@;%Ex17wFI@3;2K`}V%)vG!X3-WCC-_mC&&>$jt&xL&Yd+-b2fz6 zoP+WR;*ldWkN0oW@48qAzwKFnysu~_6-dtKq0s?OpQS|H5NrSS5~y|=1*PJX2`A#s z?(4F^PFqDHdvIO5y;+IzTP~2iH&juzn)5hDjyR}EeV6tA*w?WW4?9fRgXZBiAYO_L z5-jxOLM3nJ7lR@_0W+cojfMI8z|pb5gg4rRYyeS!AiPTrFu-bzupom|XYMh5^lAQM zXsLow-|)D?exydK-hu8ekmla!U^dXsSFzC@R5Dcb&Dv3$Eg7QFgD)O22QU}YL#p?@ z%<}aqmVVkWkP1H$@w!d{DkG$Cq=!!m8c-C2m>K79=NI{)^-6_rA?_Iksjd(@C z#i|pc`EJD-{&IeO>1RG#Y+QcyTH0}^$~b1&OZ;10hFzoPtHP@lMThUOg5g8l(9cdq z7XHGIQl|lLEBQPw4FXJ9246IwTn3j(gWx)ola8DKV?q^P_OORZaG!_nc)6t)g5C!} zK0g(0gGNW;5H?*|J_5ljYq#br}#FW=B z2P9saK5^0KwTI?;#ffq?Njr5K-@kCSeC{!3wha{tAGLz1i>x;^8b;fZz~ z(Szfw%a*0-r$mk`yfHos7pPEqQA+EcK2cDg5)t<;pJv3ThS z?ACB3D({(R`kt1KiVFeH9ysxCja=RLSdO2d^t=w<-uml~43cZi`tu2^g2lSe8(-eM z7;kO>8SeQ=4N`i9e0!|8Xu%u3Z{Q_|NL8z9otyn(VS66-I{S&Nfr*Q0)3vg9U|DOm7c|A8}Z0pe94$$^1%h+EeB559cbK zZfi-nJbih8G3S!j@N#;czx>fJwTGf-+D+6t_p})(+x~LiP;+j(i|>VM zCKw$;f3fb~l$iJgWbG@`bMo?`vBgf=<9DdNP=YGr&UxpC`4W$dB3jfoi)+koK>Dt!e&$!9nz+}~~J zG^vd}{;Zh$o3Pr}aC_+0Cy@X#&xTlI}!pE5qIdS8`C zyn4Fbr~S)Q@S+Z4Wovu&t;9|5MYu}I-Yc`m*n4FdO&u8`TH@;ftU&mk7;q2yGWd_7Fp zmAZE%NLuxxp{u_@u>S$O9V{Z)+=OOn7w+O0?shOtS3cCv&zEg{i%X7bVnRLWM;$GO zNfSV>jS)9TB3`S5@T4#nD}3jv#+&@CcF+ELd1D6_ zIwUgqQX^M#gZAl}uz5ykd=Gk;3B+MIc5pp*NHA`g8219XYd>xmHx9yP#K{AKnoB!L z1(-%sWVm36(M+%@GQ4*L_HrbCaXo%XFk$B*|4J}~UClY}hn_^nhM9y$ng|%D#?KKs zTgk!S{OHKE-zj`k$p$7)mLSlK=Xar_>q(KnX2M0*69mu2steMfZgkAwDEv3Emnn&Y zvk5Qma*LP7$J^1f=}CX3@MmKm6C?+huuA@kQ>kH>O)e5lL%NYWvu;TTnr{Dv!uLZs%np_CqttQRMO8;efmxI^6wNrStI8XF?J?6=YzuS zy4f6LIs?^52htP4WnxgXh(wJ{PWxQ`fZUfOjLNiZCU@=`p>RZWu9Rk;OthqMbhZd6 z{k=ji-Yj?bW*$K}AMnom-kZBSSWvk+O;RymYcyZmUQ}(9F+Y(@f0H-7k#CY-V3y9S zJDTxhHWjRqYusF5ZC_|(mjAssOHr7vp_pp3Sx8AQu$wEG<0w4coPRF8$i0lwLBiD# zik?K_*M$m=nv1Rl6eCBAz{^E_z3HEuvMQ#Mw_P@Z64$=a+!W-$32;oV+Uy>pe))m4acCZ9t!a2(n{&dfkn zZ=$No+^hdb07w_N=pY-i|ADcCXss(Du_BwYDtjL=Pz1qHviKUS<65xsy0SL^d$Yyi zvM~Fw`5F=^i?cu*v}B>P0_&P5>#@Hov`V|QIYG20+o?#~tV}z#R7(&}8?)CMu~Dn7 zRok^ed$l|(v+qi?AM3SdE3{y{u{{eCKYO-ri?eBKs%!fZZTq%%o3e4+u5;@Vb$hpd zTd{dNurnK+wEDM(OR+8cvPBE1y}Gshdbp5#t|5@PJFB*0tF>gCw2`~H^BTBD3$>XG zmXN@?q$}=JzKGfd z-HWwHo4nXdzU-T(=4-s@3%b-hq3hee_FJXz8@(LsvyNN8_xry;+70>Zy|UZC0DQnm zD!}I(wpq)&^V_2Si@*|Gq5~Yi$os&B8o?91!53P=44l3ge7zhz!XxUzmfOJmtG*<> z!WL@6x{JbWYrrf#!wBladb_(R9KzyD!#up7HY~VftHa9M!$ka^Km55DOs5%K#7r!n zM~u1!tinw^#nk!4(mS|HEW%X0#l~sH`y0bj%*A4Sn_j%P9kH((JjQBVnPrT_U@XLG zyvA}|m~9NhbSuMioX3+{#~_@=JFLfmOqhHuzkWQ#fqck#Imk)>e5Qts$c}uLiu}EB z{K%ACmXRFAlU&J~ER~l$y@eddn;gm~*~wVU$e=9BsN9gG9K@fT%C4M}tBkps{K~XE zlCkW-tz65xypXq?!@0c6z)X+5{Kmf=%*OnV!+ghKe9X*DjmgZnc-+j=42{ox#?d^@ z*sLYh%n@ju&EBld3w*+TY|7tU&Q`L`8_~_?yv`Vc&T~r6>>SS&;?8DDs9G$~`1~OB z?5MMx&;AS``}~&u{Lciv9{~-V#az$|eIEzSrS|;C3mwt)(a?ihq)RN(7%d+Z{cs7L z(H;#L8!Zv){Lv)+7a=`M*Id#nJrySn5hK0QFdY>w{SYtz9n&^_6Ep1)HGR`Otq?ig z5Ifz|LR}C)?GHgc)JR>_M~&1%t<)*a)I;sm%M8^?9nLR2&JW$lR9)5Z`@w_^&s)va z`fJAA{M2D>(^5UqXMNLY-O+0u(`^mbZ_UDP*_(2NbKj{VpK9ogkN*_7?imQBBztwpMBAx?a`w> z*rhGerw!4m9nq_;)vdkIuZ`KUUCy&z+p2BYNG!>TjobKq+rACix(&siEZ4!U*~5Ls ztZdxJ{oBd?#mcPK%^lCcJ>B_S-PSG7*qz<%yxrXYZO-2P-QOJE;+@UpUEb4t-ss)T z>b>5_+}`dT%;nC`kl)B-QS}8-vGYJ0zTlCT;K-&$OxX`h`ito z9>@;<;CdY45J-r_9$ z;xJCaGCt!RT;n!A!8o4d2)yGw4!}PCHWItoIbCf{^{u&>Y{$Gq+aUWdg`bSt*XB2&f4m(F08N~>$p1Wv~H`me(S87 z>$)zgyx!}e`s=_xslq<&iCXN&eyGTv?035C%)X}1{_O7i++>~3W-aZT8|~QsrP{vj z_)G0>9NoR`?S|X!zsL}O@owwy9PghV|MC18 z@**FfBwzAIobkNP+$gWKC*Shp`SLLTAD%Kl^VV7OHebamkICbn^NYUo#l7)9AGA0> z^j!?}r7ZMDU+74`+)BUnh~D&w&GJz1=u(f%RA2Rxe)Ybb^;%EqTu;ni|Miz1_ToPF zWUuLFkJr+j_6l3{Zhx9^ANQp>_jGTWc7OMjnfH2sn0(*&biDQh-}ZnnuYX_od3pGV zf0v5C_;uO%jt|L#U)BU4`P~ZnmQR+LpZQ|B`JB(mlW*;n|M{;T`tSVoq|fT6U(Xd! z+o>O_p8xtg8T+!IleAy^H+lQG56i31&pp5UzTW!+{rkWl?81-GXkYx4s{6`MlFZ-y zs(kzo-TKg9>(XD*ja~h>`uy7epOD<&{RjE|;6IPzKmPGp{^pO&*U!<(pZ?R{@K;Uo z+3)_#zy2b9?eq`r_K(t|pZ{tK5M6xD^5u&evuz32EnFwhp*(*PB~GMR(c;91@))j5 zxb0v;f?fWNBw5nrNt7v7u4LKLN`s(4|)vssY-u-*O;dM85Sbq9%)z;I)=ilG|e*gna zNxt3cv(G&Is`E%V%LH`rK?oz1Fg60|gXqAi&eN~K2|M)gLl8p@t3v1u#L%t;(c@4= z7F%@jMHmOvYBBLlJFy}S7rZV;9((lh$Nxwi&%_%eb7(Rh_XBcCCYyA!I3aB;QlcCy zi~Mz1WRq1^r($nBc3Ehn zl~zWkMw_ohJV%Xm%v-1R_FG|_J@Hv^%QY8QaWN$KTz1?2G~EN&b$4ER6NT44dF$18 z-!$(nFJFEG7Whejr2}|igcFXj;NA{a_+f}WWcW0PBewWr04MhCNZ<~26=RS?=1=3t zDi--KJIXxg|C)2q zgZ5l-#zQxK=+QIh_H@>xPTg&nHE(@((d*9b?ng0){pZzb_dVy=k(B*+;!h6#?cR%5 zKIG$LPkwpmBW|8|q@%YU;pwj~{d(@*&3^Ij!{_(=$;CH+Uh;=OfBkRMf1Z8#rFFmh z`0G!0{1(hBp|8;xV-`<@KOg>G9e0Gh$RNTP=<4g;S6nfrW)Q*hg!J__OxjrgJ>9#M%QO5zfoIH4v!QHq#Sp@m9V!6{}@LQ&LW7vIB0FNRS)U=(8+ zw?jrVrg1rFRAU>B!$vp8aW-(2V;x;XM?2<`GkDZvA1}j4KL)Z(Rm9d6JD5K~CbBt% zBq0m2s7OdIvRb#Zq9YIKzer|sFo4u#C%MK*DRFRj+Vf;7HwnrO`s$IUWF=cT*-BUr z<(09drB7yAOI+sUmbv65O?KH!V7}y+!6c?ihFMHxmgJbpOy)9~Sxjd>GnmnoW-qD# zSedmoZ&R(ILYZrbDmR^=~QPV+1XA-z7w8;l;=GCSWkQE5uf?A z<39N*M}Pj)jR6&?8VOoZGaeM7VpQltz1UEPY7wFlo#I3(`b3Lfbcq?&=n*;E(II{m zq(2ntNOxFLlim=eDV^a;S^7ekz7&Nql_>~mTGI>O6sHc<=}r~cQ=b+Ps6hqbP>H%f zqaM|MNmXk6nA%kKJr%0wlj>B(S5>Qa535+Rg4YZ*}9V*!*O5weav=}w*1XX#-0j}1Fqm?c2T5HHr zg0Hqxr0s5H)?44E47kBH-fbns9Rl#T^G9w)9!Y$ z$6Rf}7J=UVV0XzIF!P>wU+GnEz1Z7c^no{T;)QQZ<%?S6d9c1Yz3*$0tG@m6cajkd zrGHba-vBr8y$LSYf*IVt{u;}<4_0u5oi$+!C(ywej@E`byetoY_AisQ9n7CY6&_=0haqdMb%)!4>V#j(M3yyL6(xM4pAvQ~vmF(McLxvNLs*nd^4 z8|El8xnxO3b6VBRGAARr&CtX0 zoI6!#J6j6Rd8X8!`MfAU|2c$XUSV_3ENHO4*=B4;H0T1|Xg@s~(sYvaq~%m;OZQ9B zJ&QA#GcDIm8|~1aW^1UO#%NL(4%4aT(yCd_LsM6+)0lkqUSsVwRM%Ruw=UbLb?s?7;cj*~W%;;$|J&Y4few*=|&|BX{lbavQSV*4?ma zE$+sWyM^e^+P2v(B6q(VMDdol#MGVZdW-hn($4p|30+A;gS+4V+TM4|NICGw5i%pL zHM7BWi}21Vyv!HI&%q(?@3&A=f()m)!;^dQ$x1wx4Tqh@K^`^|EgZKcfB4BQEAlRr z{LANddD2oo@e)_O1xGV zn2)(d{()qlQE&2~w=OdL4*0+eUhD^+1v8+|`s_;M^^DK|yyj=0-`ba98@Qj@8FWu0 z(wE-!zjxkS%0T?$3&(C2`nls#-)Ux)VHVI&88FfyeZb><`D#a=UYIclPH-P(nXrTT zr{DC)Y@YL5*AX)e!3Xu9%nyWce@Cu`_V_zJ^7}GWxV`+_7cM}BjEkVyqY9E>KI$=u zF4(>Uyck;$18G=5E>k|udqBA>yn^5aEvUeY!GcUchNcKV@tX(_96$w05@uk7IUqrZ zu>&{AKx7!bLJL3v{J>5th%%T#8bla7I0Ik+1`SNU>gz!HJGE^Qj)GtVEug?5+!riZ zgCop173@70Y(Y5dysXd)Nf3iB@B>Z&hAKQ4V8{gjKM(^ka0FjK!X#9}(~(gk_H#gh!+gQUz`PEJjP^PMrw2fimb^0iZld_%t(#gNRI4CkNilG3`vn3Ns=r{ zlRQb3Oi7hoNtSF$mwZW>j7gcCNt&!lo4iS!%t@Wx$(yuDpQOfR+(%XLM}Zv3%Tq`I zwY+q_NO%+1+)nQ7PVf9q@C;A!98dBrPxCxa^h{6ngwB90O3wt&w(Jd*fXmV} z&C^s)U|diA+)w`OPyhT+01Z$99Z&)-Py;8;&fHKH_06?(P|ys`;UrEcOc0mJLktCp70po{ z-BBLxQ6K$LAPrI>9a17KQX@T5Bu!E!T~a1(QYU>;SA5M^ece}n?N@*OSAY#zfgMND13Wr=yE*JVXmiJe&gimh0Sy;zLRSdHCSj_p{F{aBC|UqAgmZJz9i~*%wX9ctutf zTe#Fy+Q>s%s;ye9y;`izTCLq$uI*Z{tyrm5TBw~_selQfgW9l7TeV$VwryLteOtJV zTe+QEx~*Hgy<5D^TfN;|zU^DT{ae5dT)`b&!Yy3GP1~_0TbgwVl0)0WeO$y%Cs= z&0g)@UheH)@BLoz4PWscU-B(q^F3emO<(oJ-aM&^m0MrZVHR#-7k*(Fj$s*|VH&Ps71jwG&S4$iVIJ;bAO2w= z4q_o5Vj>1&o+x4@PGTiqVkT~4Cw^imj$#=02`R2(E52eZ&SEXzVlM7txT9h(4r4JM wV=^vdGd^P@G%nkpNMkl`V>f`ZMJkDc1-eW12iGTnAI}E}iX8-^I literal 0 HcmV?d00001 diff --git a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist index 7c569640..9625e105 100644 --- a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist +++ b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist @@ -21,6 +21,6 @@ CFBundleVersion 1.0 MinimumOSVersion - 12.0 + 11.0 diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj index a50a737e..d030af56 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj @@ -168,7 +168,7 @@ 97C146E61CF9000F007C117D /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 1510; + LastUpgradeCheck = 1300; ORGANIZATIONNAME = ""; TargetAttributes = { 331C8080294A63A400263BE5 = { @@ -344,7 +344,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 12.0; + IPHONEOS_DEPLOYMENT_TARGET = 11.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; @@ -472,7 +472,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 12.0; + IPHONEOS_DEPLOYMENT_TARGET = 11.0; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = iphoneos; @@ -521,7 +521,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 12.0; + IPHONEOS_DEPLOYMENT_TARGET = 11.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme index 8e3ca5df..e42adcb3 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -1,6 +1,6 @@ { - HomeScreenCubit() : super(const HomeScreenState()) { - _updateChain(); - } - - RunnableSequence? chain; + HomeScreenCubit() : super(const HomeScreenState()); - void onProviderChanged(final Provider provider) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - provider: provider, - response: '', - ), - ); - _updateChain(); + void onClientTypeChanged(final ClientType clientType) { + emit(state.copyWith(clientType: clientType, response: '')); } - void onModelChanged(final String model) { - final newModel = { - ...state.model, - state.provider: model, - }; - emit(state.copyWith(model: newModel)); - _updateChain(); + void onOpenAIKeyChanged(final String openAIKey) { + emit(state.copyWith(openAIKey: openAIKey)); } - void onApiKeyChanged(final String apiKey) { - final newApiKey = { - ...state.apiKey, - state.provider: apiKey, - }; - emit(state.copyWith(apiKey: newApiKey)); - _updateChain(); - } - - void onBaseUrlChanged(final String baseUrl) { - final newBaseUrl = { - ...state.baseUrl, - state.provider: baseUrl, - }; - emit(state.copyWith(baseUrl: newBaseUrl)); - _updateChain(); + void onLocalUrlChanged(final String localUrl) { + emit(state.copyWith(localUrl: localUrl)); } void onQueryChanged(final String query) { @@ -62,106 +27,68 @@ class HomeScreenCubit extends Cubit { } Future onSubmitPressed() async { - if (!_validateInput()) return; - emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); - - assert(chain != null); - final stream = chain!.stream(state.query).handleError(_onErrorGenerating); - await for (final result in stream) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - response: (state.response) + result, - ), - ); + final config = _getClientConfig(); + if (config == null) { + return; } - } + final (apiKey, baseUrl) = config; - bool _validateInput() { - final provider = state.provider; - if (provider.isRemote && (state.apiKey[provider] ?? '').isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.apiKeyEmpty, - ), - ); - return false; - } - - if (state.query.isEmpty) { + final query = state.query; + if (query == null || query.isEmpty) { emit( state.copyWith( status: HomeScreenStatus.idle, error: HomeScreenError.queryEmpty, ), ); - return false; + return; } - return true; - } - - void _updateChain() { - try { - final provider = state.provider; - final model = state.model; - final apiKey = state.apiKey; - - final chatModel = switch (provider) { - Provider.googleAI => ChatGoogleGenerativeAI( - apiKey: apiKey[provider] ?? '', - baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, - defaultOptions: ChatGoogleGenerativeAIOptions( - model: model[provider] ?? provider.defaultModel, - ), - ), - Provider.mistral => ChatMistralAI( - apiKey: apiKey[provider] ?? '', - baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, - defaultOptions: ChatMistralAIOptions( - model: model[provider] ?? provider.defaultModel, - ), - ), - Provider.openAI => ChatOpenAI( - apiKey: apiKey[provider] ?? '', - baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, - defaultOptions: ChatOpenAIOptions( - model: model[provider] ?? provider.defaultModel, - ), - ), - Provider.ollama => ChatOllama( - baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, - defaultOptions: ChatOllamaOptions( - model: model[provider] ?? provider.defaultModel, - ), - ), - } as BaseChatModel; + emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); - chain?.close(); - chain = Runnable.getMapFromInput('query') - .pipe( - ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'Your are a helpful assistant. Reply to the user using Markdown.', - ), - (ChatMessageType.human, '{query}'), - ]), - ) - .pipe(chatModel) - .pipe(const StringOutputParser()); - } catch (_) { - // Ignore invalid base URL exceptions - } - } + final llm = ChatOpenAI( + apiKey: apiKey, + baseUrl: baseUrl ?? '', + ); - void _onErrorGenerating(final Object error) { + final result = await llm([ChatMessage.humanText(query)]); emit( state.copyWith( status: HomeScreenStatus.idle, - error: HomeScreenError.generationError, + response: result.content.trim(), ), ); } + + (String? apiKey, String? baseUrl)? _getClientConfig() { + final clientType = state.clientType; + + if (clientType == ClientType.openAI) { + final openAIKey = state.openAIKey; + if (openAIKey == null || openAIKey.isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.openAIKeyEmpty, + ), + ); + return null; + } + + return (openAIKey, null); + } else { + final localUrl = state.localUrl; + if (localUrl == null || localUrl.isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.localUrlEmpty, + ), + ); + return null; + } + + return (null, localUrl); + } + } } diff --git a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart index c5a95466..d76e34dd 100644 --- a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart +++ b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart @@ -6,40 +6,36 @@ class HomeScreenState extends Equatable { const HomeScreenState({ this.status = HomeScreenStatus.idle, this.error, - this.provider = Provider.ollama, - this.model = const {}, - this.apiKey = const {}, - this.baseUrl = const {}, - this.query = '', - this.response = '', + this.clientType = ClientType.openAI, + this.openAIKey, + this.localUrl, + this.query, + this.response, }); final HomeScreenStatus status; final HomeScreenError? error; - final Provider provider; - final Map model; - final Map apiKey; - final Map baseUrl; - final String query; - final String response; + final ClientType clientType; + final String? openAIKey; + final String? localUrl; + final String? query; + final String? response; HomeScreenState copyWith({ final HomeScreenStatus? status, final HomeScreenError? error, - final Provider? provider, - final Map? model, - final Map? apiKey, - final Map? baseUrl, + final ClientType? clientType, + final String? openAIKey, + final String? localUrl, final String? query, final String? response, }) { return HomeScreenState( status: status ?? this.status, error: error, - provider: provider ?? this.provider, - model: model ?? this.model, - apiKey: apiKey ?? this.apiKey, - baseUrl: baseUrl ?? this.baseUrl, + clientType: clientType ?? this.clientType, + openAIKey: openAIKey ?? this.openAIKey, + localUrl: localUrl ?? this.localUrl, query: query ?? this.query, response: response ?? this.response, ); @@ -49,10 +45,9 @@ class HomeScreenState extends Equatable { List get props => [ status, error, - provider, - model, - apiKey, - baseUrl, + clientType, + openAIKey, + localUrl, query, response, ]; @@ -64,9 +59,12 @@ enum HomeScreenStatus { } enum HomeScreenError { - modelEmpty, - apiKeyEmpty, - baseUrlEmpty, + openAIKeyEmpty, + localUrlEmpty, queryEmpty, - generationError, +} + +enum ClientType { + openAI, + local, } diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart deleted file mode 100644 index 4d9c364b..00000000 --- a/examples/hello_world_flutter/lib/home/bloc/providers.dart +++ /dev/null @@ -1,40 +0,0 @@ -// ignore_for_file: public_member_api_docs - -enum Provider { - googleAI( - name: 'GoogleAI', - defaultModel: 'gemini-1.5-pro', - defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta', - isRemote: true, - ), - mistral( - name: 'Mistral', - defaultModel: 'mistral-small', - defaultBaseUrl: 'https://api.mistral.ai/v1', - isRemote: true, - ), - openAI( - name: 'OpenAI', - defaultModel: 'gpt-4o', - defaultBaseUrl: 'https://api.openai.com/v1', - isRemote: true, - ), - ollama( - name: 'Ollama', - defaultModel: 'llama3.2', - defaultBaseUrl: 'http://localhost:11434/api', - isRemote: false, - ); - - const Provider({ - required this.name, - required this.defaultModel, - required this.defaultBaseUrl, - required this.isRemote, - }); - - final String name; - final String defaultModel; - final String defaultBaseUrl; - final bool isRemote; -} diff --git a/examples/hello_world_flutter/lib/home/home_screen.dart b/examples/hello_world_flutter/lib/home/home_screen.dart index 5b117845..2b46a017 100644 --- a/examples/hello_world_flutter/lib/home/home_screen.dart +++ b/examples/hello_world_flutter/lib/home/home_screen.dart @@ -1,10 +1,8 @@ // ignore_for_file: public_member_api_docs import 'package:flutter/material.dart'; import 'package:flutter_bloc/flutter_bloc.dart'; -import 'package:flutter_markdown/flutter_markdown.dart'; import 'bloc/home_screen_cubit.dart'; -import 'bloc/providers.dart'; class HomeScreen extends StatelessWidget { const HomeScreen({super.key}); @@ -29,7 +27,10 @@ class _Scaffold extends StatelessWidget { backgroundColor: theme.colorScheme.inversePrimary, title: const Text('🦜️🔗 LangChain.dart'), ), - body: const _Body(), + body: const Padding( + padding: EdgeInsets.all(16), + child: _Body(), + ), ); } } @@ -39,201 +40,144 @@ class _Body extends StatelessWidget { @override Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocListener( - listenWhen: (final previous, final current) => - previous.error != current.error, - listener: (final context, final state) { - if (state.error == HomeScreenError.generationError) { - ScaffoldMessenger.of(context).showSnackBar( - const SnackBar( - content: Text( - 'An error occurred while generating the response', - ), - ), - ); - } + return BlocBuilder( + buildWhen: (final previous, final current) => + previous.clientType != current.clientType, + builder: (final context, final state) { + return Column( + mainAxisSize: MainAxisSize.min, + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + _ClientTypeSelector(state.clientType), + const SizedBox(height: 16), + if (state.clientType == ClientType.openAI) + const _OpenAIKeyTextField() + else + const _LocalUrlTextField(), + const SizedBox(height: 16), + const _QueryTextField(), + const SizedBox(height: 16), + const _SubmitButton(), + const SizedBox(height: 12), + const Divider(), + const SizedBox(height: 16), + const _Response(), + ], + ); }, - child: SingleChildScrollView( - child: Padding( - padding: const EdgeInsets.all(16), - child: Column( - mainAxisSize: MainAxisSize.min, - crossAxisAlignment: CrossAxisAlignment.start, - children: [ - const _ProviderSelector(), - const SizedBox(height: 16), - Row( - crossAxisAlignment: CrossAxisAlignment.start, - children: [ - Expanded(child: _ApiKeyTextField(cubit)), - const SizedBox(width: 16), - Expanded(child: _BaseUrlTextField(cubit)), - ], - ), - const SizedBox(height: 16), - _ModelTextField(cubit), - const SizedBox(height: 16), - _QueryTextField(cubit), - const SizedBox(height: 16), - const _SubmitButton(), - const SizedBox(height: 12), - const Divider(), - const SizedBox(height: 16), - const _Response(), - ], + ); + } +} + +class _ClientTypeSelector extends StatelessWidget { + const _ClientTypeSelector(this.selected); + + final ClientType selected; + + @override + Widget build(final BuildContext context) { + final cubit = context.read(); + return Center( + child: SegmentedButton( + segments: const >[ + ButtonSegment( + value: ClientType.openAI, + label: Text('OpenAI'), + icon: Icon(Icons.cloud_outlined), + ), + ButtonSegment( + value: ClientType.local, + label: Text('Local'), + icon: Icon(Icons.install_desktop_outlined), ), - ), + ], + selected: {selected}, + onSelectionChanged: (final Set newSelection) { + cubit.onClientTypeChanged(newSelection.first); + }, ), ); } } -class _ProviderSelector extends StatelessWidget { - const _ProviderSelector(); +class _OpenAIKeyTextField extends StatelessWidget { + const _OpenAIKeyTextField(); @override Widget build(final BuildContext context) { final cubit = context.read(); return BlocBuilder( buildWhen: (final previous, final current) => - previous.provider != current.provider, + previous.error != current.error, builder: (final context, final state) { - return Center( - child: SegmentedButton( - segments: Provider.values - .map( - (final provider) => ButtonSegment( - value: provider, - label: Text(provider.name), - icon: Icon( - provider.isRemote - ? Icons.cloud_outlined - : Icons.install_desktop_outlined, - ), - ), - ) - .toList(), - selected: {state.provider}, - onSelectionChanged: (final Set newSelection) { - cubit.onProviderChanged(newSelection.first); - }, + return TextField( + controller: TextEditingController(text: state.openAIKey), + decoration: InputDecoration( + prefixIcon: const Icon(Icons.password), + labelText: 'OpenAI API key', + filled: true, + errorText: state.error == HomeScreenError.openAIKeyEmpty + ? 'OpenAI API key cannot be empty' + : null, ), + obscureText: true, + onChanged: cubit.onOpenAIKeyChanged, ); }, ); } } -class _ModelTextField extends _BaseTextField { - const _ModelTextField(this.cubit); - - final HomeScreenCubit cubit; - - @override - String get labelText => 'Model name'; - - @override - bool get obscureText => false; - - @override - IconData get prefixIcon => Icons.link; - - @override - HomeScreenError get errorType => HomeScreenError.modelEmpty; - - @override - String get errorText => 'Model name cannot be empty'; - - @override - String onProviderChanged(final HomeScreenState state) => - state.model[state.provider] ?? state.provider.defaultModel; - - @override - void onTextChanged(final String value) => cubit.onModelChanged(value); -} - -class _ApiKeyTextField extends _BaseTextField { - const _ApiKeyTextField(this.cubit); - - final HomeScreenCubit cubit; - - @override - String get labelText => 'API key'; - - @override - bool get obscureText => true; - - @override - IconData get prefixIcon => Icons.password; - - @override - HomeScreenError get errorType => HomeScreenError.apiKeyEmpty; - - @override - String get errorText => 'Api API key cannot be empty'; - - @override - String onProviderChanged(final HomeScreenState state) => - state.apiKey[state.provider] ?? ''; - - @override - void onTextChanged(final String value) => cubit.onApiKeyChanged(value); -} - -class _BaseUrlTextField extends _BaseTextField { - const _BaseUrlTextField(this.cubit); - - final HomeScreenCubit cubit; - - @override - String get labelText => 'Base URL'; - - @override - bool get obscureText => false; - - @override - IconData get prefixIcon => Icons.language; - - @override - HomeScreenError get errorType => HomeScreenError.baseUrlEmpty; - - @override - String get errorText => 'Base URL cannot be empty'; - - @override - String onProviderChanged(final HomeScreenState state) => - state.baseUrl[state.provider] ?? state.provider.defaultBaseUrl; +class _LocalUrlTextField extends StatelessWidget { + const _LocalUrlTextField(); @override - void onTextChanged(final String value) => cubit.onBaseUrlChanged(value); + Widget build(final BuildContext context) { + final cubit = context.read(); + return BlocBuilder( + buildWhen: (final previous, final current) => + previous.error != current.error, + builder: (final context, final state) { + return TextField( + controller: TextEditingController(text: state.localUrl), + decoration: InputDecoration( + prefixIcon: const Icon(Icons.link), + labelText: 'Local URL', + filled: true, + errorText: state.error == HomeScreenError.localUrlEmpty + ? 'Local URL cannot be empty' + : null, + ), + onChanged: cubit.onLocalUrlChanged, + ); + }, + ); + } } -class _QueryTextField extends _BaseTextField { - const _QueryTextField(this.cubit); - - final HomeScreenCubit cubit; - - @override - String get labelText => 'Enter question'; - - @override - bool get obscureText => false; - - @override - IconData get prefixIcon => Icons.question_answer; +class _QueryTextField extends StatelessWidget { + const _QueryTextField(); @override - HomeScreenError get errorType => HomeScreenError.queryEmpty; - - @override - String get errorText => 'Question cannot be empty'; - - @override - String onProviderChanged(final HomeScreenState state) => ''; - - @override - void onTextChanged(final String value) => cubit.onQueryChanged(value); + Widget build(final BuildContext context) { + final cubit = context.read(); + return BlocBuilder( + buildWhen: (final previous, final current) => + previous.error != current.error, + builder: (final context, final state) { + return TextField( + decoration: InputDecoration( + labelText: 'Enter question', + filled: true, + errorText: state.error == HomeScreenError.queryEmpty + ? 'Question cannot be empty' + : null, + ), + onChanged: cubit.onQueryChanged, + ); + }, + ); + } } class _SubmitButton extends StatelessWidget { @@ -267,7 +211,7 @@ class _Response extends StatelessWidget { return BlocBuilder( builder: (final context, final state) { final response = state.response; - if (response.isEmpty) { + if (response == null || response.isEmpty) { return const SizedBox.shrink(); } @@ -280,10 +224,8 @@ class _Response extends StatelessWidget { 'Response', style: theme.textTheme.headlineSmall, ), - Markdown( - data: state.response, - shrinkWrap: true, - padding: EdgeInsets.zero, + SelectableText( + state.response ?? '', ), ], ); @@ -291,64 +233,3 @@ class _Response extends StatelessWidget { ); } } - -abstract class _BaseTextField extends StatefulWidget { - const _BaseTextField(); - - String get labelText; - - bool get obscureText; - - IconData get prefixIcon; - - HomeScreenError get errorType; - - String get errorText; - - String onProviderChanged(final HomeScreenState state); - - void onTextChanged(final String value); - - @override - _BaseTextFieldState createState() => _BaseTextFieldState(); -} - -class _BaseTextFieldState extends State<_BaseTextField> { - late TextEditingController _controller; - - @override - void initState() { - super.initState(); - _controller = TextEditingController(); - } - - @override - Widget build(BuildContext context) { - return BlocBuilder( - buildWhen: (previous, current) => - previous.provider != current.provider || - previous.error != current.error, - builder: (context, state) { - _controller.text = widget.onProviderChanged(state); - return TextField( - controller: _controller, - obscureText: widget.obscureText, - decoration: InputDecoration( - prefixIcon: Icon(widget.prefixIcon), - labelText: widget.labelText, - filled: true, - errorText: - state.error == widget.errorType ? widget.errorText : null, - ), - onChanged: widget.onTextChanged, - ); - }, - ); - } - - @override - void dispose() { - _controller.dispose(); - super.dispose(); - } -} diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index feb099a5..05dca7e4 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -1,22 +1,6 @@ # Generated by pub # See https://dart.dev/tools/pub/glossary#lockfile packages: - _discoveryapis_commons: - dependency: transitive - description: - name: _discoveryapis_commons - sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" - url: "https://pub.dev" - source: hosted - version: "1.0.7" - args: - dependency: transitive - description: - name: args - sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" - url: "https://pub.dev" - source: hosted - version: "2.5.0" async: dependency: transitive description: @@ -29,10 +13,10 @@ packages: dependency: transitive description: name: bloc - sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" + sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" url: "https://pub.dev" source: hosted - version: "8.1.4" + version: "8.1.2" characters: dependency: transitive description: @@ -53,10 +37,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -77,18 +61,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" url: "https://pub.dev" source: hosted - version: "2.2.0" + version: "1.0.3" fetch_client: dependency: transitive description: name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.0.2" fixnum: dependency: transitive description: @@ -106,74 +90,26 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a + sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 url: "https://pub.dev" source: hosted - version: "8.1.6" - flutter_markdown: - dependency: "direct main" - description: - name: flutter_markdown - sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 - url: "https://pub.dev" - source: hosted - version: "0.7.3+1" + version: "8.1.5" freezed_annotation: dependency: transitive description: name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d url: "https://pub.dev" source: hosted - version: "2.4.4" - gcloud: - dependency: transitive - description: - name: gcloud - sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd - url: "https://pub.dev" - source: hosted - version: "0.8.13" - google_generative_ai: - dependency: transitive - description: - name: google_generative_ai - sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be - url: "https://pub.dev" - source: hosted - version: "0.4.4" - google_identity_services_web: - dependency: transitive - description: - name: google_identity_services_web - sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" - url: "https://pub.dev" - source: hosted - version: "0.3.1+4" - googleapis: - dependency: transitive - description: - name: googleapis - sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" - url: "https://pub.dev" - source: hosted - version: "13.2.0" - googleapis_auth: - dependency: transitive - description: - name: googleapis_auth - sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 - url: "https://pub.dev" - source: hosted - version: "1.6.0" + version: "2.4.1" http: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_parser: dependency: transitive description: @@ -182,56 +118,43 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" + js: + dependency: transitive + description: + name: js + sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + url: "https://pub.dev" + source: hosted + version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 url: "https://pub.dev" source: hosted - version: "4.9.0" + version: "4.8.1" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.6" + version: "0.7.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.6" - langchain_google: - dependency: "direct main" - description: - path: "../../packages/langchain_google" - relative: true - source: path - version: "0.6.3+1" - langchain_mistralai: - dependency: "direct main" - description: - path: "../../packages/langchain_mistralai" - relative: true - source: path - version: "0.2.3+1" - langchain_ollama: - dependency: "direct main" - description: - path: "../../packages/langchain_ollama" - relative: true - source: path - version: "0.3.2" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.2" + version: "0.6.1" langchain_tiktoken: dependency: transitive description: @@ -240,37 +163,22 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.1" - markdown: - dependency: transitive - description: - name: markdown - sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 - url: "https://pub.dev" - source: hosted - version: "7.2.2" material_color_utilities: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" meta: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" - mistralai_dart: - dependency: "direct overridden" - description: - path: "../../packages/mistralai_dart" - relative: true - source: path - version: "0.0.3+3" + version: "1.11.0" nested: dependency: transitive description: @@ -279,20 +187,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" - ollama_dart: - dependency: "direct overridden" - description: - path: "../../packages/ollama_dart" - relative: true - source: path - version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.2" + version: "0.3.2" path: dependency: transitive description: @@ -305,26 +206,18 @@ packages: dependency: transitive description: name: provider - sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c - url: "https://pub.dev" - source: hosted - version: "6.1.2" - retry: - dependency: transitive - description: - name: retry - sha256: "822e118d5b3aafed083109c72d5f484c6dc66707885e07c0fbcb8b986bba7efc" + sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" url: "https://pub.dev" source: hosted - version: "3.1.2" + version: "6.1.1" rxdart: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" sky_engine: dependency: transitive description: flutter @@ -350,10 +243,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" url: "https://pub.dev" source: hosted - version: "1.3.0" + version: "1.2.0" term_glyph: dependency: transitive description: @@ -374,10 +267,10 @@ packages: dependency: transitive description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.3.3" vector_math: dependency: transitive description: @@ -386,21 +279,14 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" - vertex_ai: - dependency: "direct overridden" - description: - path: "../../packages/vertex_ai" - relative: true - source: path - version: "0.1.0+2" web: dependency: transitive description: name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" url: "https://pub.dev" source: hosted - version: "1.0.0" + version: "0.5.1" sdks: - dart: ">=3.4.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.3.0 <4.0.0" + flutter: ">=1.16.0" diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index f9fe1384..c000d972 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -4,19 +4,15 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.6 - flutter_markdown: ^0.7.3 - langchain: ^0.7.6 - langchain_google: ^0.6.3+1 - langchain_mistralai: ^0.2.3+1 - langchain_ollama: ^0.3.2 - langchain_openai: ^0.7.2 + flutter_bloc: ^8.1.5 + langchain: ^0.7.1 + langchain_openai: ^0.6.1 flutter: uses-material-design: true diff --git a/examples/hello_world_flutter/pubspec_overrides.yaml b/examples/hello_world_flutter/pubspec_overrides.yaml index 5c8d37f9..93b5421a 100644 --- a/examples/hello_world_flutter/pubspec_overrides.yaml +++ b/examples/hello_world_flutter/pubspec_overrides.yaml @@ -1,22 +1,10 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core dependency_overrides: langchain: path: ../../packages/langchain langchain_core: path: ../../packages/langchain_core - langchain_google: - path: ../../packages/langchain_google - langchain_mistralai: - path: ../../packages/langchain_mistralai - langchain_ollama: - path: ../../packages/langchain_ollama langchain_openai: path: ../../packages/langchain_openai - mistralai_dart: - path: ../../packages/mistralai_dart - ollama_dart: - path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart - vertex_ai: - path: ../../packages/vertex_ai diff --git a/examples/hello_world_flutter/web/flutter_bootstrap.js b/examples/hello_world_flutter/web/flutter_bootstrap.js deleted file mode 100644 index 8ce49d8a..00000000 --- a/examples/hello_world_flutter/web/flutter_bootstrap.js +++ /dev/null @@ -1,12 +0,0 @@ -{{flutter_js}} -{{flutter_build_config}} - -_flutter.loader.load({ - serviceWorkerSettings: { - serviceWorkerVersion: {{flutter_service_worker_version}}, - }, - onEntrypointLoaded: async function(engineInitializer) { - const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); - await appRunner.runApp(); - }, -}); diff --git a/examples/hello_world_flutter/web/index.html b/examples/hello_world_flutter/web/index.html index 68ffe01a..add98e6a 100644 --- a/examples/hello_world_flutter/web/index.html +++ b/examples/hello_world_flutter/web/index.html @@ -1,25 +1,59 @@ - + - - - - + For more details: + * https://developer.mozilla.org/en-US/docs/Web/HTML/Element/base - - + This is a placeholder for base href that will be replaced by the value of + the `--base-href` argument provided to `flutter build`. + --> + - Hello World Flutter - + + + + + + + + + + + + + + hello_world_flutter + + + + + - + diff --git a/examples/hello_world_flutter/web/manifest.json b/examples/hello_world_flutter/web/manifest.json index 2332c807..ab44f4f1 100644 --- a/examples/hello_world_flutter/web/manifest.json +++ b/examples/hello_world_flutter/web/manifest.json @@ -1,11 +1,11 @@ { "name": "hello_world_flutter", - "short_name": "Hello World Flutter", + "short_name": "hello_world_flutter", "start_url": ".", "display": "standalone", "background_color": "#0175C2", "theme_color": "#0175C2", - "description": "A sample Flutter app integrating LangChain.", + "description": "A new Flutter project.", "orientation": "portrait-primary", "prefer_related_applications": false, "icons": [ diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index b3a0f0ae..a29715a0 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -5,18 +5,18 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" + sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d url: "https://pub.dev" source: hosted - version: "1.0.7" + version: "1.0.6" args: dependency: transitive description: name: args - sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 url: "https://pub.dev" source: hosted - version: "2.5.0" + version: "2.4.2" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a url: "https://pub.dev" source: hosted - version: "1.19.0" + version: "1.18.0" crypto: dependency: transitive description: @@ -45,58 +45,58 @@ packages: dependency: "direct main" description: name: gcloud - sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd + sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 url: "https://pub.dev" source: hosted - version: "0.8.13" + version: "0.8.12" google_identity_services_web: dependency: transitive description: name: google_identity_services_web - sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" + sha256: "0c56c2c5d60d6dfaf9725f5ad4699f04749fb196ee5a70487a46ef184837ccf6" url: "https://pub.dev" source: hosted - version: "0.3.1+4" + version: "0.3.0+2" googleapis: dependency: transitive description: name: googleapis - sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" + sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" url: "https://pub.dev" source: hosted - version: "13.2.0" + version: "12.0.0" googleapis_auth: dependency: "direct main" description: name: googleapis_auth - sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 + sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" url: "https://pub.dev" source: hosted - version: "1.6.0" + version: "1.5.1" http: dependency: "direct main" description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: d4872660c46d929f6b8a9ef4e7a7eff7e49bbf0c4ec3f385ee32df5119175139 url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.1.2" http_parser: dependency: transitive description: name: http_parser - sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" url: "https://pub.dev" source: hosted - version: "4.1.0" + version: "4.0.2" meta: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" path: dependency: transitive description: @@ -125,10 +125,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" url: "https://pub.dev" source: hosted - version: "1.3.0" + version: "1.2.0" term_glyph: dependency: transitive description: @@ -151,14 +151,14 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+2" + version: "0.1.0" web: dependency: transitive description: name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + sha256: edc8a9573dd8c5a83a183dae1af2b6fd4131377404706ca4e5420474784906fa url: "https://pub.dev" source: hosted - version: "1.0.0" + version: "0.4.0" sdks: - dart: ">=3.4.0 <4.0.0" + dart: ">=3.2.0 <4.0.0" diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index c37f6c30..34b972bf 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - gcloud: ^0.8.13 - googleapis_auth: ^1.6.0 - http: ^1.2.2 - vertex_ai: ^0.1.0+2 + gcloud: ^0.8.12 + googleapis_auth: ^1.5.1 + http: ^1.1.0 + vertex_ai: ^0.1.0 diff --git a/examples/wikivoyage_eu/.gitignore b/examples/wikivoyage_eu/.gitignore deleted file mode 100644 index 3a857904..00000000 --- a/examples/wikivoyage_eu/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# https://dart.dev/guides/libraries/private-files -# Created by `dart pub` -.dart_tool/ diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md deleted file mode 100644 index 74bbd8e2..00000000 --- a/examples/wikivoyage_eu/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Wikivoyage EU - -This example demonstrates how to build a fully local Retrieval Augmented Generation (RAG) pipeline with Llama 3 and ObjectBox using LangChain.dart and Ollama. - -> This example is adapted from [Ashmi Banerjee](https://ashmibanerjee.com)'s workshop "[Building a RAG using Google Gemma and MongoDB](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk)". - -![RAG Pipeline](rag.png) -*Figure 1: RAG Architecture (source: [Ashmi Banerjee](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk))* - -## Setup - -### 1. Install Ollama - -- Go to the [Ollama](https://ollama.ai/) website and download the latest version of the Ollama app. - -### 2. Download models - -- For this example we will be using the following models: - * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) - * LLM: [`llama3.2`](https://ollama.com/library/llama3.2) -- Open your terminal and run: -```bash -ollama pull jina/jina-embeddings-v2-small-en -ollama run llama3.2 -``` - -### 3. Setup ObjectBox - -- We will be using [ObjectBox](https://objectbox.io) for our vector store. -- In order to use ObjectBox, we need to download the ObjectBox C library. You can find more information on how to do this [here](https://docs.objectbox.io/getting-started). -```bash -bash <(curl -s https://raw.githubusercontent.com/objectbox/objectbox-dart/main/install.sh) -``` - -### 4. Get dependencies - -```bash -dart pub get -``` - -## How it works - -The example has two scripts: -1. `injestion.dart`: This script reads the Wikivoyage dataset, creates embeddings from the data and stores it in the ObjectBox database. -2. `wikivoyage_eu.dart`: This script implements the chatbot implementing the RAG pipeline. - -### Ingestion - -We will be using data from [Wikivoyage](https://wikivoyage.org), a freely accessible online travel guide authored by volunteers. - -The `wikivoyage_eu_dataset.csv` file contains data from 160 European cities, including the city name, country, coordinates, population and a brief description: - -| city | country | lat | lng | population | abstract | -|-----------|-------------|---------|--------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Amsterdam | Netherlands | 52.3728 | 4.8936 | 1459402.0 | Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges. | - -The script does the following: -1. It uses LangChain.dart's `CsvLoader` to load the `wikivoyage_eu_dataset.csv` dataset. -2. It uses the `jina/jina-embeddings-v2-small-en` model to create embeddings for each city's data. The generated embeddings have 1024 dimensions. - + *As the data for each city is not very large, we won't be chunking it into smaller parts, but you could easily do that using the `RecursiveCharacterTextSplitter` class.* -3. It stores the embeddings in the ObjectBox vector database. - -You can run the script using: -```bash -$ dart run bin/injestion.dart -Added 160 documents to the vector store. -``` - -### Chatbot - -The chatbot script implements the RAG pipeline. It does the following: -1. Takes a user query as input. -2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. -3. Retrieves the 5 most similar documents from the ObjectBox database. -4. Builds a prompt using the retrieved documents and the query. -5. Uses the `llama3.2` model to generate a response to the prompt. - -You can run the script using: -```bash -$ dart run bin/wikivoyage_eu.dart -``` - -![Wikivoyage EU](wikivoyage_eu.gif) - -## Conclusion - -This example demonstrates how to build a simple RAG pipeline that can run locally on your machine. You can easily extend this example to build more complex RAG pipelines with more advance retrieval and generation techniques. Check out the [LangChain.dart](https://langchaindart.dev/) documentation for more information. - -For simplicity, this example is a CLI application. However, you can easily adapt this code to work in a Flutter app. To get started with ObjectBox in Flutter, refer to the [ObjectBox documentation](https://docs.objectbox.io/getting-started). diff --git a/examples/wikivoyage_eu/analysis_options.yaml b/examples/wikivoyage_eu/analysis_options.yaml deleted file mode 100644 index f04c6cf0..00000000 --- a/examples/wikivoyage_eu/analysis_options.yaml +++ /dev/null @@ -1 +0,0 @@ -include: ../../analysis_options.yaml diff --git a/examples/wikivoyage_eu/bin/injestion.dart b/examples/wikivoyage_eu/bin/injestion.dart deleted file mode 100644 index 6aa7eaa3..00000000 --- a/examples/wikivoyage_eu/bin/injestion.dart +++ /dev/null @@ -1,21 +0,0 @@ -// ignore_for_file: avoid_print -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; - -void main() async { - final loader = CsvLoader('bin/wikivoyage_eu_dataset.csv'); - final docs = await loader.load(); - - final embeddings = OllamaEmbeddings( - model: 'jina/jina-embeddings-v2-small-en', - ); - final vectorStore = ObjectBoxVectorStore( - embeddings: embeddings, - dimensions: 512, - ); - - final ids = await vectorStore.addDocuments(documents: docs); - print('Added ${ids.length} documents to the vector store.'); - - embeddings.close(); -} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart deleted file mode 100644 index 9fb076eb..00000000 --- a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart +++ /dev/null @@ -1,82 +0,0 @@ -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; - -void main() async { - final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings( - model: 'jina/jina-embeddings-v2-small-en', - ), - dimensions: 512, - ); - - final retriever = vectorStore.asRetriever( - defaultOptions: VectorStoreRetrieverOptions( - searchType: ObjectBoxSimilaritySearch(k: 5), - ), - ); - final setupAndRetrieval = Runnable.fromMap({ - 'context': retriever.pipe( - Runnable.mapInput( - (docs) => docs.map((d) => d.pageContent).join('\n---\n'), - ), - ), - 'question': Runnable.passthrough(), - }); - - final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - ''' -Here is some data from Wikivoyage about travel destinations in Europe: - - -{context} - - -Please read the Wikivoyage data carefully and consider how you can best answer the user's question using only the information provided. - -Use ANSI escape codes instead of Markdown to format your answer. -For example, `\x1B[1m\x1B[0m` will make "" bold. - -If the user's question is not about Europe, just respond with: -"I can only help you with vacation planning in Europe." -Do not provide any other suggestion if the question is not about Europe. -''' - ), - (ChatMessageType.human, '{question}'), - ]); - - final model = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', - ), - ); - const outputParser = StringOutputParser(); - final chain = setupAndRetrieval // - .pipe(promptTemplate) - .pipe(model) - .pipe(outputParser); - - stdout.writeln( - 'Hello! Ask me anything about your vacation plans in Europe, ' - 'and I will provide you with the best itinerary.', - ); - - while (true) { - stdout.write('> '); - final query = stdin.readLineSync() ?? ''; - - if (query.toLowerCase() == 'q') { - break; - } - - final stream = chain.stream(query); - await stream.forEach(stdout.write); - stdout.write('\n\n'); - } - - chain.close(); -} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv deleted file mode 100644 index 0e775870..00000000 --- a/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv +++ /dev/null @@ -1,161 +0,0 @@ -city,country,lat,lng,population,abstract -Aalborg,Denmark,57.05,9.9167,143598.0,"Aalborg is the largest city in North Jutland, Denmark. Its population, as of 2016, is 134,672, making it the fourth largest city in Denmark." -Adana,Turkey,37.0,35.3213,1765981.0,"Adana is a city on the Cilician Plains of central Turkey, on the Seyhan River about 50 km from the Mediterranean coast. It's industrial and mostly modern but with several places of interest in its historic centre." -Amsterdam,Netherlands,52.3728,4.8936,1459402.0,"Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges." -Ancona,Italy,43.6169,13.5167,100924.0,Ancona is the capital of the Italian region called the Marches and an important port city on the coast of the Adriatic Sea. -Ankara,Turkey,39.93,32.85,5503985.0,"Ankara is the capital of Turkey, central within the country on the plateau of Central Anatolia. It's a sprawling modern place around an ancient citadel, and in 2022 had a population of almost 5." -Antalya,Turkey,36.8874,30.7075,2426356.0,"Antalya is a city in Pamphylia on the Turkish Mediterranean coast, and the chief resort of the ""Turkish Riviera"". It's a metropolis with a population of 2." -Arad,Romania,46.175,21.3125,159074.0,There is more than one place in the world called Arad. You might be looking for: -Arkhangelsk,Russia,64.55,40.5333,351488.0,"Arkhangelsk (population 350,000 in 2018) is a regional center in Northwestern Russia, located on both banks of Northern Dvina river near its mouth on the White Sea, about 1250 km by road to the north of Moscow and about 1100 km northeast of Saint Petersburg. It is part of the Silver Ring of cultural and historical centers of Northwestern Russia." -Astrakhan,Russia,46.35,48.035,532504.0,Astrakhan (Russian: А́страхань AH-struh-khun) is a city in Russia. -Baia Mare,Romania,47.6667,23.5833,123738.0,Baia Mare is a city in north-western Romania. -Baku,Azerbaijan,40.3667,49.8352,2300500.0,Baku (Azeri: Bakı) is the capital of Azerbaijan and is the largest city in the Caucasus. Baku's Old Town has UNESCO World Heritage status. -Barcelona,Spain,41.3825,2.1769,4800000.0,"Barcelona is Spain's second largest city, with a population of nearly two million people, and the capital of Catalonia. A major port on the northeastern Mediterranean coast of Spain, Barcelona has a wide variety of attractions that bring in tourists from around the globe." -Bari,Italy,41.1253,16.8667,323370.0,"Bari (Bari dialect: Bare) is the capital of the Apulia region of Italy, on the Adriatic Sea. With a population of 317,000 (in 2019), it's the second largest city in Southern Italy after Naples." -Batman,Turkey,37.887,41.132,447106.0,"Batman (pronounced as baat-maan, not like the name of the superhero; Kurdish: Iluh) is a city in southeastern Anatolia. It is the capital of an important oil producing province." -Belgrade,Serbia,44.82,20.46,1378682.0,"Belgrade (Serbian: Београд, Beograd) is the capital of the Republic of Serbia and the country's largest city. Belgrade has been re-emerging as a tourist destination in the past years." -Bergen,Norway,60.3894,5.33,267117.0,"Bergen is Norway's second largest city and the most popular gateway to the fjords of West Norway. The city is renowned for its great location amidst mountains, fjords, and the ocean." -Berlin,Germany,52.52,13.405,4473101.0,"Berlin is Germany's capital and biggest city. Within the city limits, Berlin in 2022 had a population of 3." -Bologna,Italy,44.4939,11.3428,392564.0,"Bologna (Emilian: Bulåggna) is a beautiful and historic city in the Emilia-Romagna region of Northeast Italy. It has the oldest university in the Western world, a lively student population, excellent food, a striking brick terracotta-roofed cityscape, and lots to see and do." -Bordeaux,France,44.84,-0.58,260958.0,"Bordeaux is a city in the Gironde region of southwest France, standing on the River Garonne. It's the country's fifth largest city, with a population of 259,809 in 2020, and another million living in its associated towns." -Braga,Portugal,41.5503,-8.42,181494.0,"Braga is one of the five largest cities of Portugal, situated in the Minho region in the North of the country. It is known for its abundance of churches and thus called the ""city of archbishops""." -Bratislava,Slovakia,48.1439,17.1097,475503.0,"Bratislava (Hungarian: Pozsony, German: Pressburg, known as Prešporok before 1919), is the capital and largest city of Slovakia. It has a population of more than 475,000 (2021), and is the administrative, cultural and economic centre of the country." -Bremen,Germany,53.0833,8.8,566573.0,"The Free Hanseatic City of Bremen is a city in northern Germany with a major port on the River Weser. The population is 567,000 (2020)." -Brest,Belarus,52.1347,23.6569,340723.0,There is more than one place called Brest: -Brno,Czechia,49.1925,16.6083,382405.0,"Brno (pronounced Bruhno) (German: Brünn, Štatl in the local dialect) is the major city of Moravia (a historical region in the Czech Republic). It is the largest city in Moravia and the second-largest city in the Czech Republic by population and area." -Brussels,Belgium,50.8467,4.3525,1743000.0,"Brussels (French: Bruxelles, Dutch: Brussel) is the capital of Belgium and one of the three administrative regions within the country, together with Flanders and Wallonia. Apart from its role within its country, it is also an internationally important city, hosting numerous international institutions, and in particular the headquarters of NATO and the core institutions of the European Union." -Budapest,Hungary,47.4925,19.0514,2997958.0,"Budapest is the capital city of Hungary. With a unique, youthful atmosphere, world-class classical music scene, a pulsating nightlife increasingly appreciated among European youth, and last but not least, an exceptional offer of natural thermal baths, Budapest is one of Europe's most delightful and enjoyable cities." -Burgas,Bulgaria,42.503,27.4702,210813.0,Burgas (also Bourgas) is a city on the Black Sea coast of Bulgaria. It is a large industrial centre with many tourist attractions in the region. -Bursa,Turkey,40.1833,29.05,2901396.0,"Bursa is a large city in the Southern Marmara region of Turkey, 20 km inland from the Marmara coast. It's the country's fourth-largest city, with a population of 2,161,990 in 2021, and with another million living in the wider metro area." -Bydgoszcz,Poland,53.1219,18.0003,346739.0,"Bydgoszcz (German: Bromberg) is a major city of 360,000 in Poland and with suburban area the agglomeration has nearly 500,000. It has well preserved 19th-century architecture and was known as Little Berlin before the world wars." -Cagliari,Italy,39.2278,9.1111,154106.0,"Cagliari (Sardinian: Casteddu, ""castle""; Latin: Caralis) is the capital city of the Italian island of Sardinia." -Cheboksary,Russia,56.1333,47.25,489498.0,"Cheboksary (Russian: Чебокса́ры chee-bahk-SAH-ree) is the capital of Chuvashia in the Volga Region of the Russian Federation. About 600,000 people live here and in the nearby satellite city Novocheboksarsk." -Chelyabinsk,Russia,55.15,61.4,1202371.0,"Chelyabinsk (Russian: Челя́бинск cheel-YAH-beensk) is a big city, with more than a million inhabitants, the capital of Chelyabinsk Oblast in the European part of Russia." -Cluj-Napoca,Romania,46.7667,23.5833,324576.0,"Cluj-Napoca (Romanian), Kolozsvár (Hungarian) or Klausenburg (German) is the capital of Cluj county and the unofficial capital of the historical region of Transylvania. The city, with about 320,000 people (2016), is very pleasant, and it is a great experience for those who want to see urban Transylvanian life at its best." -Coimbra,Portugal,40.2111,-8.4292,143396.0,"Coimbra is the traditional capital city of Central Portugal's historic Beira Litoral region. With over 140,000 inhabitants (2021), it is the largest municipality there and one of Portugal's four largest metropolises." -Copenhagen,Denmark,55.6761,12.5683,1366301.0,"Copenhagen (Danish: København) is the capital city of Denmark and forms the moderate conurbation that one million Danes call home. It is big enough to form a small Danish metropolis, with shopping, culture and nightlife par excellence, yet small enough still to feel intimate and be safe." -Cork,Ireland,51.8972,-8.47,222333.0,"Cork is the principal city of County Cork in southwest Ireland. It was already the second-largest city in Ireland when in 2019 its boundaries were extended, to have a population of 210,000." -Craiova,Romania,44.3333,23.8167,269506.0,"Craiova with 306,000 inhabitants (2016), is one of the five largest cities of Romania. Craiova is in the southwestern region of the country and hosts the administrative buildings of the Dolj County and of the Oltenia district." -Debrecen,Hungary,47.53,21.6392,328642.0,[a Nagytemplom télen.jpg|thumb|400px|The Great Church of Debrecen in winter] -Denizli,Turkey,37.7667,29.0833,1027782.0,"Denizli is a city in the Southern Aegean region of Turkey, which most visitors simply transit to reach Pamukkale 20 km north. It's a typical modern Turkish city, far from picturesque, but does have enough sights of its own if your schedule allows." -Dijon,France,47.3167,5.0167,158002.0,"Dijon is the largest city in the eastern French region of Bourgogne-Franche-Comté. Dijon is best known for its mustard (named after the town), which is no longer produced in its metropolitan area, but it is still one of the most beautiful cities in France, and its historic buildings and byways were not heavily damaged by bombing in World War II and are largely intact." -Donetsk,Ukraine,48.0028,37.8053,929063.0,"Donetsk (Ukrainian: Донецьк, Russian: Доне́цк) is a city in the Donetsk People's Republic, on the banks of the River Kalmius." -Dresden,Germany,51.05,13.74,561922.0,"Dresden is the capital of Saxony (Sachsen). It's often referred to locally as Elbflorenz, or ""Florence on the Elbe"", reflecting its location on the Elbe river and its historical role as a centre for the arts and beautiful architecture - much like Florence in Italy." -Dublin,Ireland,53.35,-6.2603,1173179.0,"Dublin (Irish: Baile Átha Cliath, ""Town of the Hurdled Ford"") is the capital city of Ireland. Its vibrancy, nightlife and tourist attractions are world renowned and it's the most popular entry point for international visitors to Ireland." -Erfurt,Germany,50.9833,11.0333,213835.0,Erfurt is the capital of the German state of Thuringia (Thüringen). The city is the largest one in that province and likewise a major transportation hub. -Erzincan,Turkey,39.7464,39.4914,157452.0,"Erzincan is a city in Eastern Anatolia. It's modern, on a grid pattern, as its predecessor was destroyed by an earthquake in 1939." -Erzurum,Turkey,39.9086,41.2769,767848.0,"Erzurum is a city in Eastern Anatolia, and is the hub for visiting eastern Turkey." -Gaziantep,Turkey,37.0628,37.3792,2028563.0,"Gaziantep is a city in Southeastern Anatolia. Although it is a major city in Turkey (counting almost 2 million inhabitants) and known as the Turkish capital of gastronomy, it counts very few international tourists." -Geneva,Switzerland,46.2017,6.1469,201818.0,thumb|200px|right|The old town of Geneva in the winter -Hamburg,Germany,53.55,10.0,2484800.0,"The Free and Hanseatic City of Hamburg (Freie und Hansestadt Hamburg) is Germany's second-largest city and, at the same time, one of Germany's 16 federal states or Bundesländer. Prior to the formation of the modern German state, Hamburg for centuries enjoyed a status as de facto independent city state and regional power and trade hub in the North Sea." -Helsinki,Finland,60.1708,24.9375,1268296.0,Helsinki (Helsingfors in Swedish) is Finland's capital and largest city. Helsinki combines modern and historic architectural styles with beautiful open spaces. -Innsbruck,Austria,47.2683,11.3933,132493.0,"Innsbruck is the fifth-largest city in Austria and the provincial capital of Tyrol, as well as one of the largest cities in the Alps. It is in a valley of the river Inn between mountain ranges of above 2000 m above sea level, halfway between Bavaria and northern Italy, and is a hub of a region popular for skiing and other mountain-related activities and a busy tourist destination." -Ioannina,Greece,39.6636,20.8522,113094.0,"Ioannina (Ιωάννινα) (population: 112,486 (2011)) is a beautiful city in Northern Greece whose old town is surrounded by tall defensive walls." -Isparta,Turkey,37.7647,30.5567,258375.0,"Isparta (Greek: Σπάρτη, Baris) is a city of 220,000 inhabitants in the Lakes District of Mediterranean Turkey." -Istanbul,Turkey,41.0136,28.955,16079000.0,"Istanbul (Turkish: İstanbul) is a very large city of fantastic history, culture and beauty. Called Byzantium in ancient times, the city's name was changed to Constantinople in 324 CE when it was rebuilt by the first Christian Roman Emperor, Constantine." -Ivano-Frankivsk,Ukraine,48.9228,24.7106,238196.0,"Ivano-Frankivsk (Ukrainian: Івано-Франківськ, also transliterated Ivano-Frankovsk from Russian: Ивано-Франковск) (formerly in Polish: Stanisławów, German: Stanislau) is a city in the Ukrainian part of East Galicia." -Izmir,Turkey,38.42,27.14,4320519.0,"thumb|270px|Clock tower in Konak Square, iconic symbol of the city" -Kahramanmaras,Turkey,37.5833,36.9333,443575.0,"Kahramanmaraş, which used to be known as Maraş, is a city in Turkey, located on the crossroad of southern, eastern and southeastern Turkey." -Kaliningrad,Russia,54.7003,20.4531,475056.0,"Kaliningrad (Russian: Калинингра́д kuh-leen-een-GRAHD) , also known by its original German name, Königsberg, is the capital city of Kaliningrad Oblast in Russia. It has about 475,000 inhabitants (2018)." -Kars,Turkey,40.6078,43.0958,115891.0,"Kars is a city in Eastern Anatolia. It is most frequently visited as a jumping off point for travelers going to Ani, but it is a viable destination in its own right for its 19th-century Russian imperial buildings, and, of course, its role as the setting for Orhan Pamuk's famous novel Snow." -Kaunas,Lithuania,54.8972,23.8861,381007.0,"Kaunas is the second-largest city in Lithuania, with a population of some 288,000 people. The main reason to visit is its charming Old Town, connected to the 19th century New Town ranged along Laisvės alėja." -Kayseri,Turkey,38.7225,35.4875,1389680.0,"Kayseri is a city in Central Anatolia, 350 km southeast of Ankara. In 2021 the population was 1." -Kazan,Russia,55.7964,49.1089,1243500.0,Kazan (Russian: Каза́нь kuh-ZAHN) is the capital of Russia's republic of Tatarstan and the center of the world Tatar culture. -Kharkiv,Ukraine,49.9925,36.2311,1446107.0,"Kharkiv (Ukrainian: Харків, also transliterated Kharkov from Russian: Харьков) is a major city in the Kharkiv region of Ukraine and is the second largest city in Ukraine with a population of over 1.5 million inhabitants." -Kiel,Germany,54.3233,10.1394,246601.0,"Kiel is the capital city of the German state of Schleswig-Holstein and has a population of roughly 248,000 (2018). It is located on the Baltic Sea at the end of the ""Kieler Förde""." -Kirov,Russia,58.6,49.65,501468.0,"Kirov (Russian: Ки́ров KEE-ruhf) is the capital city of Kirov Oblast, Russia." -Klagenfurt,Austria,46.6167,14.3,101403.0,Klagenfurt (Slovenian: Celovec) is the capital of Carinthia in Austria. It was one of the eight host cities in the 2008 European Football Championships. -Konya,Turkey,37.8667,32.4833,2232374.0,"Konya is a city in Central Anatolia in Turkey, known as the city of ""whirling dervishes"" and for its outstanding Seljuk architecture. In 2021 Konya metropolis had a population of 2,277,017, the sixth largest in Turkey, but the area of most interest is compact." -Krasnodar,Russia,45.0333,38.9667,948827.0,"Krasnodar is the capital of Krasnodar Krai in southern Russia, with a popolulation in 2018 of just under 900,000. Its main industries are based on agriculture and food." -Kutaisi,Georgia,42.25,42.7,147900.0,"Kutaisi is a city in the Rioni Region of Georgia. The city itself is very cinematographic and charming, and a visit to Kutaisi is almost mandatory to see the Bagrati Cathedral and Gelati Monastery, which are UNESCO World Heritage sites and offer views from the mountain slopes over the city and the Rioni River." -Lille,France,50.6278,3.0583,234475.0,"Lille (Dutch: Rijsel) is the capital of the Hauts-de-France region in northern France and the core of one of the largest metropolitan agglomerations in the country. Historically, it has also been the capital of Flanders, and later an industrial powerhouse, thanks to which it now boasts a large and handsome historic centre." -Ljubljana,Slovenia,46.0514,14.5061,286745.0,"Ljubljana (""lee-oo-blee-AH-nuh"") is the small but delightful capital of Slovenia. While the city's population had grown to 295,500 in 2020, the sights and amenities are concentrated in the charming old centre." -London,United Kingdom,51.5072,-0.1275,11262000.0,"Noisy, vibrant and truly multicultural, London is a megalopolis of people, ideas and frenetic energy. The capital and largest city of the United Kingdom sits on the River Thames in South-East England, Greater London has a population of a little over 9 million." -Luxembourg,Luxembourg,49.6117,6.1319,132780.0,"The Grand Duchy of Luxembourg (Luxembourgish: Groussherzogtum Lëtzebuerg, French: Grand-Duché de Luxembourg, German: Großherzogtum Luxemburg), is a landlocked Benelux country at the crossroads of Germanic and Latin cultures." -Lviv,Ukraine,49.8425,24.0322,724314.0,"Lviv (also spelled L'viv; Ukrainian: Львів; Polish: Lwów, German: Lemberg, Russian: Львов), formerly known as Lvov after its Russian name, is in Western Ukraine and used to be the capital of East Galicia. It's the biggest city of the region and a major Ukrainian cultural centre on the UNESCO World Heritage List." -Lyon,France,45.76,4.84,522969.0,"Lyon is the capital of the French administrative region of Auvergne-Rhône-Alpes. A city of half a million, Lyon alone is the country's third-largest city, but its metropolitan area is only second in population to Paris." -Maastricht,Netherlands,50.85,5.6833,277721.0,"By many considered to be the most beautiful city of the country, Maastricht is the southernmost city in the Netherlands. It's the capital of the province of Limburg and famous for what the Dutch call the ""Burgundian"" way of life." -Madrid,Spain,40.4169,-3.7033,6211000.0,"Madrid is Spain's capital and largest city. A city that has been marked by Spain's varied and tumultuous history, Madrid has some of Europe's most impressive cultural and architectural heritage, which includes grand avenues, plazas, buildings and monuments, world-class art galleries and museums, highly popular football teams, and cultural events of international fame for everyone." -Magdeburg,Germany,52.1317,11.6392,236188.0,"Magdeburg is the capital city of the Bundesland of Saxony-Anhalt, Germany, with a population of 240,000 (2018). Magdeburg has become a modern city with numerous interesting sights of high importance and uniqueness, as well as many parks, which make Magdeburg the third greenest city in Germany." -Malatya,Turkey,38.3486,38.3194,426381.0,thumb|350px|New Mosque at the central square -Milan,Italy,45.4669,9.19,1366180.0,"Milan (Italian: Milano; Milanese: Milan) is financially the most important city in Italy, and home to the Borsa Italiana stock exchange. It is the second most populous city proper in the country, but sits at the centre of Italy's largest urban and metropolitan area." -Minsk,Belarus,53.9,27.5667,2009786.0,"Minsk (Belarusian: Мінск, Russian: Минск) is the capital and largest city of the Republic of Belarus. Its population is about two million people in 2021." -Miskolc,Hungary,48.0833,20.6667,150695.0,"Miskolc, with population of about 157,000 (2017), is the third largest city in Hungary, located in the north-east of the country, east of Bükk mountains." -Moscow,Russia,55.7558,37.6178,17332000.0,"Since its founding in 1147, Moscow (Russian: Москва, Moskva) has been at the crossroads of history as the capital of empires and a frequent target for invaders. As the capital of the Russian Empire, the Soviet Union, and, today, the Russian Federation, it has played a central role in the development of the largest country in the world." -Munich,Germany,48.1375,11.575,2606021.0,"Munich (German: München, Bavarian: Minga) is the capital of the federal state of Bavaria in the south of Germany. Within the city limits, Munich in 2021 had a population of just under 1." -Murcia,Spain,37.9861,-1.1303,672773.0,You could be looking for: -Murmansk,Russia,68.9706,33.075,298096.0,"Murmansk (Russian: Му́рманск) is a city in the extreme northwest of Russia and the world's largest city north of the Arctic Circle. It lies in the Kola Bay on the Kola Peninsula, by the Barents Sea." -Mykolaiv,Ukraine,46.975,31.995,498748.0,"Mykolaiv (Ukrainian: Миколаїв, also transliterated Nikolaev or Nikolayev from Russian: Николаев) is a city in Southern Ukraine. It is an important shipbuilding centre and transportation hub for Ukraine, and has a large military presence." -Nalchik,Russia,43.4833,43.6167,265162.0,"Nalchik is the capital city of Kabardino-Balkaria, a republic located in the very south of the Russian Federation." -Nantes,France,47.2181,-1.5528,318808.0,"Nantes (Breton: Naoned) is the capital of Pays de la Loire region in northwest France. Historically it was part of Brittany, whose dukes built up its castle and made the town their capital." -Naples,Italy,40.8333,14.25,966144.0,"Naples (Italian: Napoli; Neapolitan: Napule) in Italy, an ancient port on the Mediterranean sea. With just short of a million citizens, is the third most populous municipality." -Nevsehir,Turkey,38.6264,34.7139,153117.0,"Nevşehir is one of the major cities in Cappadoccia Region, which displays a beautiful combination of nature and history. The traditional main sources of income of the city, carpet weaving and viticulture, have been overtaken by tourism, because of its proximity to the underground shelters, the fairy chimneys, monasteries, caravanserais and the famous rock-hewn churches of Göreme." -Nicosia,Cyprus,35.1725,33.365,330000.0,Nicosia (Greek: Λευκωσία; Turkish: Lefkoşa) is the capital of Cyprus and is the largest city by far. -Novi Sad,Serbia,45.2542,19.8425,380000.0,thumb|right|350px|Freedom square (Trg Slobode) -Oradea,Romania,47.0722,21.9211,196367.0,"Oradea is one the few undiscovered gems of Romania's tourism. Despite being one of the largest and most important cities in Transylvania, and having a high degree of administrative, economic and commercial importance, it is often overlooked by tourists in favor of other Transylvanian cities such as Brasov, Sibiu, Sighisoara or Cluj-Napoca." -Orenburg,Russia,51.7667,55.1,564773.0,"Orenburg (Russian: Оренб'ург, Uh-rehn-BOORK) is the capital of Orenburg Oblast. Every citizen will point you the sign at the bridge across the Ural river, supposedly landmarking the geographical border between Europe and Asia (the actual boundary is further east)." -Pamplona,Spain,42.8167,-1.65,203418.0,"Pamplona (Basque: Iruña) is a city in Navarra, Spain. It is most famous world-wide for its San Fermín festival, held each year from July 6-14." -Paris,France,48.8567,2.3522,11060000.0,thumb|300px|The Eiffel Tower and the river Seine -Penza,Russia,53.2,45.0,523726.0,There's more than one place called Penza: -Perm,Russia,58.0139,56.2489,1048005.0,"Perm (Russian: Пермь p`yehr`m`) is a city in Perm Krai, Russia." -Perugia,Italy,43.1122,12.3889,165683.0,"Perugia is a city in the Italian region of Umbria. It has an important university that attracts many foreign students, is a major center of medieval art, has a stunningly beautiful central area and is home of the Umbria Jazz Festival." -Petrozavodsk,Russia,61.7833,34.35,278551.0,thumb|350 px|Old and New Petrozavodsk -Plovdiv,Bulgaria,42.15,24.75,383540.0,thumb|Old Plovdiv -Podgorica,Montenegro,42.4413,19.2629,150977.0,"Podgorica (Montenegrin: Подгорица) is the capital of Montenegro. While not a typical European eye candy, the city is definitely worth visiting, owing to its interesting mix of old and new, its café culture and nightlife, and its laid back Mediterranean atmosphere." -Porto,Portugal,41.1621,-8.622,1278210.0,"Porto is Portugal's second largest city and the capital of the Northern region, and a busy industrial and commercial centre. The city isn't very populous (about 238,000 inhabitants in 2024), but the Porto metropolitan area has some 1." -Prague,Czechia,50.0875,14.4214,1335084.0,"Prague (Czech: Praha) is the capital and largest city of the Czech Republic. The city's historic buildings and narrow, winding streets are testament to its centuries-old role as capital of the historic region of Bohemia." -Pristina,Kosovo,42.6633,21.1622,161751.0,"Pristina (Albanian: Prishtinë, Serbian: Priština), the capital city of Kosovo, is not beautiful: it is messy, with centuries-old Ottoman heritage competing with communist designs and post-communist architectural monstrosities. However, there is a powerful draw to this city of 162,000 people (2011), offering much to passing visitors." -Pskov,Russia,57.8167,28.3333,209840.0,"Pskov is the largest city and administrative capital of Pskov Oblast. One of the oldest cities in the country, it has preserved many unique architectural monuments of the 12th-16th centuries." -Rennes,France,48.1147,-1.6794,220488.0,"Rennes is the chief city of Brittany in northwest France. It's mostly modern and industrial, but has many grand 18th and 19th century buildings, and survivors of earlier times." -Riga,Latvia,56.9489,24.1064,920643.0,"Riga is the financial, creative, and cultural centre of Latvia. It is the capital and the largest city in Latvia, it is also the largest city in the Baltic States." -Rijeka,Croatia,45.3272,14.4411,191293.0,"Rijeka (literally ""River"" in Croatian language) is a city in Kvarner Bay, a northern inlet of the Adriatic Sea in Croatia. It is the principal seaport of the country." -Rivne,Ukraine,50.6192,26.2519,246574.0,"Rivne (Ukrainian: Рівне, also transliterated Rovno from Russian: Ровно) (Polish: Równe) is a city in Western Ukraine." -Rome,Italy,41.8931,12.4828,2872800.0,"Rome (Italian and Latin: Roma), the 'Eternal City', is the capital and largest city of Italy and of the Lazio region. It's the famed city of the Roman Empire, the Seven Hills, La Dolce Vita, the Vatican City and Three Coins in the Fountain." -Rouen,France,49.4428,1.0886,112321.0,"Rouen is the capital of the French region of Upper Normandy on the River Seine, 135 km (approximately 90 minutes drive) northwest from the centre of Paris. The city has a population of 110,000 and its metropolitan area includes some 666,000 inhabitants (2017)." -Saint Petersburg,Russia,59.95,30.3167,5384342.0,"Saint Petersburg (Russian: Са́нкт-Петербу́рг Sankt-Peterburg), known as Petrograd (Петроград) in 1914-1924 and Leningrad (Ленинград) in 1924-1991, is the second largest city of Russia, with 5.6 million inhabitants (2021), and the former capital of the Russian Empire." -Salzburg,Austria,47.8,13.045,155021.0,"Salzburg is a city in Austria, near the border with Germany's Bavaria state, with a population of 157,000 (2020). It was the setting for the 1965 movie The Sound of Music, so you may think you know all there is to see in Salzburg if you have seen the movie." -Samara,Russia,53.2028,50.1408,1169719.0,thumb|300px|Iversky Convent -Samsun,Turkey,41.2903,36.3336,1335716.0,"Samsun, in the Central Karadeniz region of Turkey, is the largest city on the Turkish Black Sea coast." -Santander,Spain,43.4628,-3.805,172221.0,"Santander is the capital and largest city of the province of Cantabria in Spain. It's on the north coast, with many beaches, ferries from Britain, and a small historic centre." -Sarajevo,Bosnia and Herzegovina,43.8564,18.4131,419957.0,"Sarajevo is the capital of Bosnia and Herzegovina, and its largest city, with 420,000 citizens in its urban area (2013). Sarajevo metropolitan area that has a population of 555,000 also includes some neighbourhoods of ""East Sarajevo"" that are a part of Republika Srpska." -Saratov,Russia,51.5333,46.0167,845300.0,Saratov (Russian: Сара́тов suh-RAH-tuhf) is a city in the Volga region of Russia. -Satu Mare,Romania,47.79,22.89,102411.0,"Satu Mare is a city in the Maramureș region of Romania. As of 2021, it had a population of 91,520." -Sibiu,Romania,45.7928,24.1519,147245.0,"Sibiu is a town in southern Transylvania, Romania, 280 km by road from Bucharest. The old town centre is very attractive." -Siirt,Turkey,37.925,41.9458,166332.0,Siirt is a city in Southeastern Anatolia. -Simferopol,Ukraine,44.9484,34.1,341799.0,"Simferopol (Russian: Симферополь, Ukrainian: Сімферополь) is the capital city of the Crimea." -Sivas,Turkey,39.75,37.0167,377561.0,"Sivas is a city in Central Anatolia, with a population in 2020 of 335,570. By road it's 450 km east of Ankara, and stands at 1278 m elevation." -Skopje,Macedonia,41.9961,21.4317,640000.0,"Skopje (Macedonian: Скопје, Albanian: Shkup, Turkish: Üsküp) is the capital and largest city of the Republic of North Macedonia. Skopje is city of many cultures and many centuries." -Sofia,Bulgaria,42.7,23.33,1547779.0,Sofia (София) is the capital of Bulgaria. It is also the biggest city in the country with about 2 million citizens (including suburbs). -Stavanger,Norway,58.97,5.7314,237369.0,"Stavanger is Norway's fourth largest city, at 145,000 citizens (2021). It is the largest city in, and the administrative centre of, Rogaland county in West Norway." -Stavropol,Russia,45.05,41.9833,450680.0,Stravropol (Ставрополь) is a city in Russia. -Stockholm,Sweden,59.3294,18.0686,1611776.0,"Stockholm is Sweden's capital and largest city, with nearly a million inhabitants in the city, and 2.4 million within Stockholm County (as of 2021)." -Strasbourg,France,48.5833,7.7458,290576.0,"thumb|300px|Strasbourg railway station, known for the sky dome" -Stuttgart,Germany,48.7775,9.18,2787724.0,"Stuttgart is the capital of the Bundesland of Baden-Württemberg in Germany. With a population of approximately 632,000 in the immediate city (2017) and more than 5." -Syktyvkar,Russia,61.6667,50.8167,245313.0,thumb|300px|Street scene in Syktyvkar. -Szczecin,Poland,53.4325,14.5481,403833.0,"Szczecin, (pronounced Shchetsin, German: Stettin, Latin: Stetinum) is a maritime port city and the capital of Zachodniopomorskie in Poland. The city has a population of over 400,000, with almost 780,000 living in its metro area (2019)." -Tallinn,Estonia,59.4372,24.7453,438341.0,"Tallinn is Estonia's capital and largest city. Tallinn is an important port of the Baltic Sea, with the busy passenger section of the port reaching the foothill of the picturesque medieval Old Town, which has been astonishingly well preserved and was inscribed on the UNESCO World Heritage List in 1997." -Tampere,Finland,61.4981,23.76,334112.0,thumb|350px|View to Näsinneula tower in Tampere -Tbilisi,Georgia,41.7225,44.7925,1118035.0,"Tbilisi (Georgian: , Russian: ), is the capital city of the country of Georgia, lying on the banks of the Mtkvari River. The metropolitan area covers 726 km² (280 mi²) and has a population of approximately 1." -Thessaloniki,Greece,40.6403,22.9347,824676.0,"Thessaloniki (Greek: Θεσσαλονίκη, Albanian, Turkish: Selanik, Serbian, Bulgarian, Macedonian: Солун, Solun, Judaeo-Spanish: סאלוניקו / Saloniko, Romanian: Salonic, Aromanian: Sãrunã, French: Salonique) is the capital of the administrative region of Central Macedonia and the whole historical region of Macedonia, Greece, and is, at about one million inhabitants (2011), the second largest city in the country. More importantly, it is a city with a continuous 3,000-year history, preserving relics of its Roman, Byzantine and Ottoman past and of its formerly dominant Jewish population." -Tirana,Albania,41.3289,19.8178,418495.0,"Tirana (Albanian: Tiranë) is the bustling and relatively modernised capital of Albania. It is the most important economic, financial, political and trade centre in the country." -Toulouse,France,43.6045,1.444,493465.0,"Toulouse is the chief city of Haute-Garonne in the Occitanie region of France. It stands north of the Pyrenees on the River Garonne, halfway between the Atlantic and the Mediterranean." -Trabzon,Turkey,41.005,39.7225,426882.0,"Trabzon (formerly Trebizond) is the largest city in the Eastern Karadeniz region of Turkey. Trabzon functioned as an independent state or empire during several periods in its long history, ruling over a vast area from Sinop in the west to Georgia in the east, even including territory in Crimea." -Turku,Finland,60.45,22.2667,252468.0,"Turku (Swedish: Åbo) is Finland's oldest city and the biggest one until the mid 1800s. Believed to have been founded in the early 13th century, it is the cradle of modern Finnish culture and has extensively influenced Finnish history." -Ufa,Russia,54.7261,55.9475,1115560.0,"Ufa (Russian: Уфа́ oo-FAH, Bashkirː ӨФӨ oe-FOE), the capital of Bashkortostan, is a large, interesting, and rapidly developing city, with a population of over 1.1 million in 2018." -Uzhhorod,Ukraine,48.6239,22.295,114897.0,"Uzhhorod (Ukrainian: Ужгород, also transliterated Uzhgorod from Russian: Ужгород; Hungarian: Ungvár, German: Uschhorod) is a city in Western Ukraine, the administrative center of Zakarpatska Oblast (Transcarpthian Region). The population of Uzhhorod is multiethnic." -Valencia,Spain,39.47,-0.3764,792492.0,"Valencia (València in Catalan/Valencian) is a charming old city and the capital of the Valencian Community. With just over 830,000 inhabitants in 2023, it is Spain’s third-largest city and, after Barcelona, the most significant cultural centre along the Spanish Mediterranean coast." -Valladolid,Spain,41.6528,-4.7236,297775.0,You may be looking for: -Van,Turkey,38.4942,43.38,353419.0,"Van (pronounced vahn in Turkish, wahn in Kurdish) is a city in Eastern Anatolia, Turkey. For Turks from the other regions of Turkey, it has a surprising beach resort feel in an area where their country is farthest from the sea." -Varna,Bulgaria,43.2167,27.9167,348668.0,"Varna (Варна) is a large city on the Black Sea coast in the northeast of Bulgaria. It's the larger of the country's two major sea ports (the other one is Burgas), and a gateway to the seaside resorts on the northern part of the coast." -Vienna,Austria,48.2083,16.3725,1973403.0,"Vienna (German: Wien; Austro-Bavarian: Wean) is the capital of Austria and by far its most populous city, with an urban population of 2 million and a metropolitan population of 2.9 million (2023)." -Vilnius,Lithuania,54.6872,25.28,708203.0,"Vilnius is the capital and largest city of Lithuania. It has a beautiful baroque Old Town, listed as a , and excellent tourist facilities in all price ranges." -Vinnytsia,Ukraine,49.2333,28.4833,371855.0,"Vinnytsia (Ukrainian: Вінниця, also transliterated Vinnitsa from Russian: Винница) is a city in Central Ukraine, the administrative center of the Vinnytsia region. 267 km southwest of Kyiv, it has been known since the Middle Ages, and is home to a former Soviet Cold War airbase." -Vitoria-Gasteiz,Spain,42.85,-2.6833,253672.0,"Vitoria-Gasteiz (Spanish: Vitoria, Basque: Gasteiz) is in the heart of the Basque Country in Spain. The old town has some of the best preserved medieval streets and plazas in the region and it is one of very few cities with two cathedrals." -Vladikavkaz,Russia,43.04,44.6775,306978.0,Vladikavkaz is the capital city of North Ossetia and a major transit hub for the North Caucasus region. Its position on the Georgian Military Highway makes it a staging post for journeys to both Georgia and South Ossetia. -Volgograd,Russia,48.7086,44.5147,1015586.0,"Volgograd (Russian: Волгогра́д vuhl-gah-GRAHD) is a large city along the west bank of the Volga River in Southern Russia. It used to be known as Stalingrad, a name which the city is still known as on several war-related dates each year (according to local legislation)." -Voronezh,Russia,51.6717,39.2106,1050602.0,[of the Annunciation] -Warsaw,Poland,52.23,21.0111,1860281.0,Warsaw (Polish: Warszawa) is Poland's capital and largest city. Warsaw is a bustling metropolis and one of the European Union's fastest-developing capitals and the Union's ninth most populous urban centre. -Zagreb,Croatia,45.8167,15.9833,809268.0,thumb|350px|right|Ban Jelačić Square -Zaporizhzhia,Ukraine,47.85,35.1175,741717.0,"Zaporizhzhia (Ukrainian: Запоріжжя, also transliterated Zaporozhye from Russian: Запорожье) is a city in Ukraine." -Zaragoza,Spain,41.65,-0.8833,675301.0,"Zaragoza is the capital and largest city of Aragon in Spain, and one of Spain's five largest cities, but it is one of the least known outside of Spain. Founded on the river Ebro during the Roman Empire as Cesaraugusta, Zaragoza now holds a large cultural and architectural heritage attesting to 2,000 years of affluence and importance." -Zurich,Switzerland,47.3744,8.5411,436332.0,"Zurich (German: Zürich, Swiss German: Züri) is the largest city in Switzerland, with a population of some 435,000 (2018) in the city, and 1.3 million (2009) in the metro area." diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock deleted file mode 100644 index f242c95d..00000000 --- a/examples/wikivoyage_eu/pubspec.lock +++ /dev/null @@ -1,343 +0,0 @@ -# Generated by pub -# See https://dart.dev/tools/pub/glossary#lockfile -packages: - async: - dependency: transitive - description: - name: async - sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" - url: "https://pub.dev" - source: hosted - version: "2.11.0" - beautiful_soup_dart: - dependency: transitive - description: - name: beautiful_soup_dart - sha256: "57e23946c85776dd9515a4e9a14263fff37dbedbd559bc4412bf565886e12b10" - url: "https://pub.dev" - source: hosted - version: "0.3.0" - characters: - dependency: transitive - description: - name: characters - sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" - url: "https://pub.dev" - source: hosted - version: "1.3.0" - collection: - dependency: transitive - description: - name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf - url: "https://pub.dev" - source: hosted - version: "1.19.0" - cross_file: - dependency: transitive - description: - name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" - url: "https://pub.dev" - source: hosted - version: "0.3.4+2" - crypto: - dependency: transitive - description: - name: crypto - sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab - url: "https://pub.dev" - source: hosted - version: "3.0.3" - csslib: - dependency: transitive - description: - name: csslib - sha256: "706b5707578e0c1b4b7550f64078f0a0f19dec3f50a178ffae7006b0a9ca58fb" - url: "https://pub.dev" - source: hosted - version: "1.0.0" - csv: - dependency: transitive - description: - name: csv - sha256: c6aa2679b2a18cb57652920f674488d89712efaf4d3fdf2e537215b35fc19d6c - url: "https://pub.dev" - source: hosted - version: "6.0.0" - fetch_api: - dependency: transitive - description: - name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" - url: "https://pub.dev" - source: hosted - version: "2.2.0" - fetch_client: - dependency: transitive - description: - name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" - url: "https://pub.dev" - source: hosted - version: "1.1.2" - ffi: - dependency: transitive - description: - name: ffi - sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" - url: "https://pub.dev" - source: hosted - version: "2.1.3" - fixnum: - dependency: transitive - description: - name: fixnum - sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" - url: "https://pub.dev" - source: hosted - version: "1.1.0" - flat_buffers: - dependency: transitive - description: - name: flat_buffers - sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" - url: "https://pub.dev" - source: hosted - version: "23.5.26" - freezed_annotation: - dependency: transitive - description: - name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 - url: "https://pub.dev" - source: hosted - version: "2.4.4" - html: - dependency: transitive - description: - name: html - sha256: "3a7812d5bcd2894edf53dfaf8cd640876cf6cef50a8f238745c8b8120ea74d3a" - url: "https://pub.dev" - source: hosted - version: "0.15.4" - http: - dependency: transitive - description: - name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 - url: "https://pub.dev" - source: hosted - version: "1.2.2" - http_parser: - dependency: transitive - description: - name: http_parser - sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" - url: "https://pub.dev" - source: hosted - version: "4.1.0" - iregexp: - dependency: transitive - description: - name: iregexp - sha256: "143859dcaeecf6f683102786762d70a47ef8441a0d2287a158172d32d38799cf" - url: "https://pub.dev" - source: hosted - version: "0.1.2" - json_annotation: - dependency: transitive - description: - name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" - url: "https://pub.dev" - source: hosted - version: "4.9.0" - json_path: - dependency: transitive - description: - name: json_path - sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" - url: "https://pub.dev" - source: hosted - version: "0.7.4" - langchain: - dependency: "direct main" - description: - path: "../../packages/langchain" - relative: true - source: path - version: "0.7.6" - langchain_community: - dependency: "direct main" - description: - path: "../../packages/langchain_community" - relative: true - source: path - version: "0.3.2" - langchain_core: - dependency: "direct overridden" - description: - path: "../../packages/langchain_core" - relative: true - source: path - version: "0.3.6" - langchain_ollama: - dependency: "direct main" - description: - path: "../../packages/langchain_ollama" - relative: true - source: path - version: "0.3.2" - langchain_tiktoken: - dependency: transitive - description: - name: langchain_tiktoken - sha256: c1804f4b3e56574ca67e562305d9f11e3eabe3c8aa87fea8635992f7efc66674 - url: "https://pub.dev" - source: hosted - version: "1.0.1" - math_expressions: - dependency: transitive - description: - name: math_expressions - sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 - url: "https://pub.dev" - source: hosted - version: "2.6.0" - maybe_just_nothing: - dependency: transitive - description: - name: maybe_just_nothing - sha256: "0c06326e26d08f6ed43247404376366dc4d756cef23a4f1db765f546224c35e0" - url: "https://pub.dev" - source: hosted - version: "0.5.3" - meta: - dependency: transitive - description: - name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 - url: "https://pub.dev" - source: hosted - version: "1.15.0" - objectbox: - dependency: transitive - description: - name: objectbox - sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" - url: "https://pub.dev" - source: hosted - version: "4.0.1" - ollama_dart: - dependency: "direct overridden" - description: - path: "../../packages/ollama_dart" - relative: true - source: path - version: "0.2.2" - path: - dependency: transitive - description: - name: path - sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" - url: "https://pub.dev" - source: hosted - version: "1.9.0" - petitparser: - dependency: transitive - description: - name: petitparser - sha256: c15605cd28af66339f8eb6fbe0e541bfe2d1b72d5825efc6598f3e0a31b9ad27 - url: "https://pub.dev" - source: hosted - version: "6.0.2" - rfc_6901: - dependency: transitive - description: - name: rfc_6901 - sha256: df1bbfa3d023009598f19636d6114c6ac1e0b7bb7bf6a260f0e6e6ce91416820 - url: "https://pub.dev" - source: hosted - version: "0.2.0" - rxdart: - dependency: transitive - description: - name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" - url: "https://pub.dev" - source: hosted - version: "0.28.0" - source_span: - dependency: transitive - description: - name: source_span - sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" - url: "https://pub.dev" - source: hosted - version: "1.10.0" - sprintf: - dependency: transitive - description: - name: sprintf - sha256: "1fc9ffe69d4df602376b52949af107d8f5703b77cda567c4d7d86a0693120f23" - url: "https://pub.dev" - source: hosted - version: "7.0.0" - string_scanner: - dependency: transitive - description: - name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" - url: "https://pub.dev" - source: hosted - version: "1.3.0" - tavily_dart: - dependency: "direct overridden" - description: - path: "../../packages/tavily_dart" - relative: true - source: path - version: "0.1.0" - term_glyph: - dependency: transitive - description: - name: term_glyph - sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 - url: "https://pub.dev" - source: hosted - version: "1.2.1" - typed_data: - dependency: transitive - description: - name: typed_data - sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c - url: "https://pub.dev" - source: hosted - version: "1.3.2" - uuid: - dependency: transitive - description: - name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" - url: "https://pub.dev" - source: hosted - version: "4.4.2" - vector_math: - dependency: transitive - description: - name: vector_math - sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" - url: "https://pub.dev" - source: hosted - version: "2.1.4" - web: - dependency: transitive - description: - name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 - url: "https://pub.dev" - source: hosted - version: "1.0.0" -sdks: - dart: ">=3.4.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml deleted file mode 100644 index 5782a40f..00000000 --- a/examples/wikivoyage_eu/pubspec.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: wikivoyage_eu -description: Wikivoyage EU chatbot using llama3.2 and ObjectBox. -version: 1.0.0 -publish_to: none - -environment: - sdk: ">=3.4.0 <4.0.0" - -dependencies: - langchain: ^0.7.6 - langchain_ollama: ^0.3.2 - langchain_community: 0.3.2 diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml deleted file mode 100644 index 5814891d..00000000 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_ollama,ollama_dart,tavily_dart -dependency_overrides: - langchain: - path: ../../packages/langchain - langchain_community: - path: ../../packages/langchain_community - langchain_core: - path: ../../packages/langchain_core - langchain_ollama: - path: ../../packages/langchain_ollama - ollama_dart: - path: ../../packages/ollama_dart - tavily_dart: - path: ../../packages/tavily_dart diff --git a/examples/wikivoyage_eu/rag.png b/examples/wikivoyage_eu/rag.png deleted file mode 100644 index ca46092d1d31d894f03a4eeb8070787bd5c66e28..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18434 zcmbTd1yozX*Dsm`cXulUDK5pKIKiRN;ufq}af(wyixk)5THLj`1SwjixD*Hl3KT6~ z^o9Pv`+ax4_10VK-khxD%szYe{ASP2%sIb3iFl!@gojOy4FCY}RFoBT000y?005%J zL_tdMKZvLx1%MaouM{7UulxJofB$~$T7$K&9$uZsey5M&$3r1z^ey@$i$ z!NI|yp`mqkb!TU1@7}#b8X+bjiI0z8US7`2%R4wYI6XaWZEcm6leM+A&B@6b8ymBC za44^+xc>Y1$&)91e0*zbYi4F<{{H^k+uP30&eYV@M@L7Fj*ga=mh0>5(lRntRaG-H zGs8oJ$;ru>n3$51k|QG{Sy@?JTwEX!NLN>nfPkQ(pB&h-ana1#NmfAy;&A!v@xj3H!R*dhTw%e@{e5A{ z=cCJmiLFsr#H{1mEMWeJ%=#8=?-2fb+o&U!v(%w{X2R=R<>2)tCN;4dh#R_6Eh6}aO0vX=i=Jv?(VU*{T3X&-P?P-vojVEQ6nangOA_$=FNOw z-fB(F;p%F?i%aXvm!ByqPZk$%&(1D)celsJ7K@8-4i3f<{mp~5exY8u)frur%?Y*D z#Un#+W1)G|$-*nnS6;W1)WTHw z5O?dlK9SyH=C6S`X)b7zgE4_O`)UP_gnaM*IM5JEVwQds>09{tqzA+SCB$QrRg5s$ z#Z*wnbT}LSHF_jdsdd@h`f}nt(d?J>IW0PRcu025Os%UjoVa$M286!y@oIw&jSD!+ z49$wfd7(5WNA)aUNGkv1FqVj9uH5KG_P}=Fy8i3e+i3)-WYR{zdp~1d1+%!6Y7tf2 ztFP96^hsT}@nmUgV+E?g>s^3p`b^_~Os_?_P{A~LthW1M^F6SLMSu}|-Ehi64z0h9 znff5>b+09s4wlDKEqK3{tD`;8-hx_z~t`7uipHf z%G>PhdleiESIlGXu=@lqR6whWL3rS~l2yU?xuG{B3~g(Nlvkd&62KG{I3!_-lw+mx z=hxuL={98^liF|7X%4`xC`?rCI`T#HGmY6qGOFLUhM`y)Dy3r3A7>o44R}o%}Kpw ze&J>Ft8%rQ0cY+JHHHZpg!(7TN=^-Fz-PH;Rv%-#=Qk3t?U3BedweLIWA%;)wie|D z$daqM`H@N3cS7s+h=Z%d$2#0 zW^ypmf6kMOD*XIw$oFfGIrk(I)rZ^t!!?ww2o370uLeFFjEGk+RCrHSP4uMAJ{Fmy zyXq0iC3C}Oxk3!S$aYUQhm_`TQTRD$kdV<@QdJRsZ}t7mNnq$!&#mCal%_PwaUTzR zSW|)DMSeyNW34}P$Hn6J5Q^1v5PGUdJ`miDRpm%*W3?AR$=Lt46>s&a zRHL3gutMOatbRJ_=kqXgwAx`>XM*sXHVszhX1TPQOxmJ7aY(i*`AGofY)AhVAkT z@J4~ofB|lQ;RP!fodhtf)N?DfI7-|S9#0n19HR{mMW$d?5k#>XP>^+@BWh;N6@0#IX`ZrHRjXqea*US=n;kh2-JHgsNS7X3%de8!!twZ)>3_E>r!2HVuZ*gzU zZ!J~e#I7101^R`!jy8-SGPc})jS6Z6sffl{+D_B2p4OIQz;RX!yNcecT{0}FQ6m`m zkdCaw7qxw+TK)OcS{9j;sV;44rzQhP>WXR3`cMji8r}kNH1`bX&AY%(AY(f#$G)pS zR)GgO6W5}4JTsIZWsI(^Mqv%%Wx=)EJ&{EH;g)`CHx|{U(4wNcpT9+%o!Ah%{pS;uwqOtDe;f3TRY?^lH3UBbT z>fkaIEjuqrb78yCJ9@r>JcC47ZX&9rH~$rgYd-JU_3s%88LNz-c3+X8?l)mU&FV{k zT%3X*PL+Vs+G(8)oSeM*zU-fEo%pfyYZvHO$iR}IXx=B^mdzRn%^GnnPu>c8F^eZJ zF$Iv5$roe5oa$0ELGM(FkN1tTH;5CxRu&JA)4M+y;pnVR^))HVZNDh~yC+=n zcn|?nn=0A8EurVvjSXj@1&8WO z{cJw{wfY6oJI{s{r?YJ#_y>QnpBF?Jf5&`DjNH1{ih<5&B~NtIrS!}x0%^*=@uiP= zV*L6&5i4(dG&iJQocW>x{fyPLASbl-{o1J$ASnFgc9fM|S`CgIjOv*lvJ(XwFMN-5 zUA$T;!V{V*FrObZ#zf0U4T#fKTGJuv-s9@&t7`Al`_>AMr*49Kq+Xs|T`7Ow>g6v* z+RBsi78-dS(_XB~d9XHX?3H?8f43igXKgq~h+Q*W5itU}elYU8Z8_Xk^@ST-LnbST!r*Lhzq2_5v zq#xfrwcR4LJwdqq_l!Vvcw8CFtCee69@QfJKOfXzp}kSN4mJ6T&l~p3L<*t%iJKx` z+Lbwb1R#zPrd5g7WvAim=F`(^jM)KOqpW4LOMvAjqe=?nez>?0S8`eM1Y#JKP(xYE zb8)|r=Pf#u^^QaGx+xo2fc_h<+xz?bfM0sS^+rDyv)tbTT@L1MzOmOfMnj6U|Hx~s z8%o?(Vq6jaD##>>+8}CdgBiAAY6Sr$p94)qL7{8ZwGZ4{s>rA(*ni=8(7UYHiD6)B zb^y}E`?L|l?b_16&=g#D*+Stzq!;6hN3R?Ch;YFRFzIj$HO_mlODHFot{ev~jr zc&%sQ>laC++y1a+!x;mmi0Mkph5jU#oGXy%kwblczaag76nYVue zebJS47vk88+UCwxR54#PTw2+43I)iG@<#wkhcZCnSrK8@?^Br{y)s8qSsR_H*s>kq$9sViz+uO_`K*^~ zl7-Oxk@znfW~9`dO^kf$Z^E)C+eIYaBaq=?T;!a@b;Ayr|F`gW0&_o;DUGJmXN){cy6sKGOZ-64LCF*~`eBa!1 z{k`RpLua!_QrnXT^MRv_xrEE|8euik1IA}k#oq`?hn=(JX7(=o((G^=Qr^M!uu%`GgpnGi6=ZabS zcuyRFmS#2cffy~tX*;6Ih=^XfriGGn@cs3yD1u$>@kYxZg z-RJf9Hk5$bxnjux$?DWm_JGuXZdirgT=z(%W**df2H^%!^Q`~C3x3ItkYtn{nnimf z2%oc`lRuCA`x!Y0jdqD3{=j!8rPPxryk7p;m(yz4UGhH27oyWt;$RkC4Wu(4dKa5> zq@+pUHP}@3rsf}#8_wNDyDZe53s|HOgFY=gbZbDJ0l*vUsKKpcNz8#!8X=ay3Aj78HBzrXVK((?8{J z1W_XH{aUopTvwuts1YY8yl0g;aRq+pu5&;x7!aKj3dH4ugD9aev|0eN1ViJ418@*T z$gfd*d~V=96}+6h^%d|39#o^;o?e#()())9d?SKJ3ALd`YC<2aqyW$j{&R|~M*P2| zdPYu`(d`FbsnZ6Y48n_C-G8^vj|jp8PYP80dR%vf2Nvk#oUp7&s(0+O&7?W@`Jh3r= zT+gg3Uk@m@mi)dG3wZzTKA1m|)3eJXbU^v>#-E=U(yI&_P^KIvqXuJ9{?FTjIXWZ( zbszfxJ0{`rvBBzJIXZzaky8*t%=bTUY=P)a21;NkI=pK9!9Wd8 zxl7!qe4yE^f7zBTjBF^C65`P+hc$b{}+sOw14J3uO6@Itn8H# z?BN)|4bh_$(EB{4hdv~H#y~3Q+|jI*+z>(BL&pW7(Sqvo`gXu=YCZ^cNga%U0oh2> zF4yEQdnug8U1ni0vKxY?AxrK=4lniX^mi7b1&u4`x^t_@8y0{#l;AP)JvL;i8+jT^ z0}+=*_OsBnPdOZe3mQ%8PN@PV`OdM zcHMSd+!g^%F(YKeni30q5J|B`uw3%|u{2C}eNpE(m?QSIlL!rZ=4Al>&^A{3t5!H% zXfPKZ8bO}?woli?L(^}Su9V$7NG;! zoe;hV{hL{Shl{KQRg58G!XFFTkO!3O^68WK1U)iL++g#_cUmQ47I{}#;8EMjtlDSYNOgQKNs-Yh{2pVYCd0TMPJ=9S{Hsoqr&3!T-pttWfVKht|Yd9 zlPOqG(?}&Sg&j;r4KfbcnmvAE=maU$u}or+gBS@DQc{)K?0x;jotpSX&W>WRb3AHD zc@#i}MWwR;>*B&`Pr}f+%lIqrUj4`DM&?>MtI*e7>B}^RLBnl){oDANS>e%*22c1< zozJ6xn0aFuD)l?(>d3n#B#=L%OdQ+_uVI}X_B(y9P2qtH1~Vf2{yIR}-duBd0Pb;} ze38A24MxgVZw`dn#ry;J%i`YZxZf?`9I){<;HiFC7he<&NCm0No5?`l!(X=O`)jI> z2ePu5JARoiL?FhNwhmGf43c*%46O^+%hP)W@>&OGQ)Gy(>t8Io+5E^Fc zWo$FKAGJ-@gS(4!^Vde@9?`ELaAyHvik)<2wVz?={XVDc)yaeQ-Tg((lYn!>+2{nF z{+(B3YAsRW*4bu*I4nvZ`TbqR|2SFR4lfym#x{8Ma8zf)x>rc(1Hw<&+cA1qn`Z?e zs@f$zlL4PHTm@(v-L7FSCl^9mN(x0dnXc3$(AvN^DnTLxoNgUdMm>K_?m2KNM{kK= zi=kDX^FyxBx}F4_Vs3v0|6vVynEp)(E-ID1u0V-;xPuLLRN(V%+~?tK?ai^Y^>B?< zCKI`y$0YnTD175BgJ-a38GV+D(Rg8sIMV?enGrr}BATx27wV`TH z<~!3G`kQa^f)>XBKFzJHP&5D@yPC&*=7%uRwBSrpNNn83$D*9-xwklgHSxq}rh%LW z0^g=;WeKdqq8=PmaUlT62LWfyuej(+TYC8qX6Y~s{IV-IxDi3cB*Q+A`CahoR$NWU z9YMK8^egNI85-s2YT~zy_9bcAEtq-5LOD1(H+&3HQ@>e%&NvfzSY!ZunMXRGB7Bkw z6n8JMkHh9icS||rB!%1*90}i|zIKJOW%V>Nd=?b~aeoI;6xm}yv)>YFm|>2~366WO z!`5OFKONK2T1*kyvUSt%;8uW4;)_L+`b|D?E7VbjzWL~X15`iZe%{~DNpU>B74(W3 zA;K;hd5w9s{e1l%oFx)>h~qD<-$PkOS(_8(5PyuX$hET3#?8QI8%LF2Y=RKQ9PV#stV(FZy<+R&^uN-+Si(8QsR1uAtP0~$ z*Gtf{q;aINdX;RYA*u;$Th!})tWSCfKnD2zj2U2|8BYS4V0S=DDV^Ji3uxiYBN99T z=YBPH_X^(7h7-S!8bes+%&2vNi+>-kHdms+QnYRD%wwr%fNYwLR{4Z4nRc9u5-TOZVGFDD{bxIr&;o+)^u zxaoY*PCD2kucHRlMHS=1N}msg45gI_^G$T39G!?2r|Q(ET>mC;sFgj9_j$$f$@^82 z1Nt+Fx7$-;Y}T4RB{s1q*xRZ=?W=&GNF-wc(!@SR-(<=^ANj&_?(F};t zxqTThwDnu^7uCWtw#rbbrjaibA{$n*Lk;7c3DqY&ei+GI%I>$l>2(oJK(ChqBbcd< z)#(a*gI#IPf)T8mMnP^s6tG*lv zDnA;G#9#&JVf3K#NwL?OG~v66TtNgWeli1JfZr>bd++t0OI?15IIe+)2jSTC3_s9= z8bCcx@Ft!{vB@NYD7wVQypxcor!By;hlQ6xz?lZ8`jdGjNPqil&3Arr)! zhu`&kkNyE(=uuo-Xrz44IK~eQH+AbD6Pyswj=6HpGJ*QKSvrSM=hcSQY>|yMq6hLN z^rGqJokzed5LPi!u`Nj;6oB7Oq}#e*8g&BJZnYuT-ui54A528d{pS_T0(wZY9=NZ0tE?$J5fzmrO(HFfXv(uv$hNi zD|X^7E-mzEF;0Ht84RdR#>e-2Gnfm7d($W@ATkjR5hbd#T?uNA1!q}ONtJ_&R#a5S2}8YvZliS7b%pB z=J!6!ZiY@9AB8Q2hBF3W?fH2a+DGLF+sqMiK&-FVR`<1VqDoX!+h%1aGRQ(0PHj+W zw-g%Sifup{wP{}&mH7l_QxO>d5uLT?bS7ZV4u4D#4LYsJ@wAKtO2vSxehTm#3gK!6 z8ib8tiarz^FN-bC#G*w%dnQBkrJgNCDghn)HDH{}OlE`G-2q^C{Zg~bh`;wifqiIH)&YK_ zUaWsr|Dm5J4NJ#D7^rgg1VQxCsFdpB9iP<}YV=Jng)JXM+y%&HU#=(oW%}iH1Z+A< z0S#c-kp3+cRl@7P`rZmg2_qvdw2CUCgwoX-Y54u;_WcWT9>MnxPN|%v~APEbw0=kZ@8vVy#J7uuN?OTPV zdeDG=ulb(cO9|hX5_&(s!_H+gn7-0D=^1yFs-UnYp0EfMT#3x+!OgMP^u!TxM*y`7 zPN?oSFz@1;PpNJ36(I<8{?<5F^_rV7W&+o(`j_QN`;QlKinlnh)_d-5j%Uws zO}>GQ$P}7>&;o2S9R;@XMYPesb{#vO$0ckQ=$g$wbNvkK%|)4eiK+>T&z`B3sGSMa zK)Ve-2A3Y=En^VcvW&BeFy)LEHf3>O8-5{N1?rR*k`R~RdiTe3P zT9(;qO*KtsudfQrg?}#W8*VkZyCo~#29*`?C(^q9%?+*2hb_{zKx!pibz>7YVOz}! z)v9msO>C{0#h}PRzfh@+Z2h%l zD)SvLm3%PGd)IBpX`YWGEReWDd{EgPj$FoOFAR4!Oj)U6)>$RSq65z(5%CF2+Wh+s zljnkuAV-zw1W^o+=TkyZ$_)ZtA^0LE-Vx z;_{7Vu8|vU*O8J&I2NKtuWHu9^b0-!XpMh>y!zY#*L;eu370@h0!=+W#8h=MvH|>- z@d4$pf%%*iu8?{3v-ohd%92rN2D?yC^ZVUGOv_H;*TN<}u&5h3{q_Hae;9WQBU-Zt zWlpyiCNhNjI{MJpjxsu!iAo$%Y5*@vqT1w7?7-&Ejqx0TJ)fDD3)F$DIsi_^h4#v} zq0xDlc8c7&7B@fEYQEs8`>594RWc4Jhm3LT{L&G8M)LaQ%e=>(yw zZ^roJGXbtH=%3Ts0i2HDEZtEaCJ*0`W!-D{53t_avA+_AdtxG*2T2dLwiM#dCfbyP zE(&Pc$b5#o;sMSX5w_*iE?MrU&z@n8yrJ(nwI8~e^y`p_cyjzAJBC%K*o0F`N^jgH z{m=53BL>Pn{K05ul$2-%C}>lwqVLobU)?j@7Lo{5gD|~7J&v8t7rP)NZ39z9Y{0fi zGlrhZw`hmkkTB8g##%FXp`-vw>~QOK=n@4!*n5V%{HGrOCG5(8E^>qN_*Yw>D0mpg z0wz)crM;pHn|Y$_sU(n?B{vE)*2C=ICJSQYAv9_W7YE;6C0#j6t3sFJp@_5Fd zuA-0P27BmT651BJblGl&9~u7rwNk+hCUVr1p=2NbOlTm~7A^@6yc=td4)9*{UK2PO zn?EvRDVdyEO`CzuKdq)FC}nfmp}&sqAN`uj+clbmo3?664%o~8`SctHZJG=`ql!qs zG4I+i?P3ONS7hNH>wMMNM3FVyrGFpYzfv@4{thwK{ze7zStQp6K0^Og>QKBH^u9&f z9C>-oKQ-6)@88i|3@)mODWm0>nYxKQ2#3dDkwZa18`>CM>Bz z$0NZ@&Fya(WJFDOq{^fF8TPbZ2~4dPIs;~uQkDvy*_)yC_!W8|u}~f353!%EKHy~< z7F_&n-SOHWSg-!IUG=^RGrx~kSl_yAR?11(dH&|@19L*viM^lu^I~J372U9xuR9xe z2Gw#CHc79;D36^o%Dz7R&SEcRCh%$nJcf%YDT^^CYvQ9P%$3(FPHDqzi9 z>+5jY!t+_F2}%<%UzY@p(&qG5ZUQs7j-IcFU9hv&MvHF#ltB>aXC|Wsm0R=3E_3>h zpBe;7d}&9;ha%GK-M4NhNUdcwy||yU_(t45W_cSgJcD;wSpY$2U5G;Ky2gF|LK(6Y zy0-YmV=n~w1oy+)dh)j~Ep)M}CJD5=U8DT$5n3rFff)zuD7y?6%t(%zvysdQPRYKH zWSo)oGm=u}pGv`hBvlRbk=s`usa7N@jbvnrA@y>``A7=(zp3d*-9#ds{|m)`Mv)eY zYsr@Wce4AR#s5!M{B{fp=l|U{QuDt-{-2wC6WSza#SBF9-ADrXpDUuRA1V6(?fst% z#`#r~NM0JbZwE^w}K;L%h6sT`6}IjLud^yq(^d~}|5f3Gr9iX^kK zT12xo2RYdO)hJAz-V29A-XE`q1X~l=)5v1Nu7I54A5#!?! zh9sOkviQW15r*3TxQsUFu|EUKVpRWnV^L^tCU?}RuqtVY>}vb#Ld3h&OO_^t=_4oK z$lQJRFJ*BFWSCkFLe~2GFFdeBr61vAi9Y|&7e5$l0sHQ@K@REA|MH>ckaDwD&Bt{asplG&9;aC)MdwYmvSjts3N3QusfgBd46Fmr+*3R zmdCm+5f9Y^+1eDiIdd(pG1itRyoxDAy_c9%#u1ne0zu0ZU!x+Jc<=fX*x6X8M)fW%3M7A#NFi2Le{2a&&DHD1UNfj4sG9OmA( zeF~}|HJVipa7l&(tI&Klo~DD2)M%hbAJKfcvdKkDBVy3*I`A)AYN}DYBuc4OW|fQK zv^QKWR)J}tqb)9gCRW|AoDWWR=K>DZ{@bemn!uF4kzBnT{Qx>;y!3I$^AgsGBsHTP&Sp z{?_utd&xGRR<2Nn28g>gFwANg*cCD+*c?A}LUp|rDWwYzBvVE7D05Cyr=vi`VNS-~ z%97YG@WlE{H^DqmF1DZTfa~|trSM=+arl}hVjgyve_Q$Jh(ON27$zIPo)-m62+kbd z;7BrvK>pI;d~^O8^KVdJTi*QvxiC0t!@ed3OvcYp!;?tl1DwoG64>GXw5XU)!l?AL zBDs~ItRheF!rl-lN2!fp>WUAwQo-}jQ4rU0(*`XM4i$e>80^r>^rtDHiv9bABr*ut z&yM7=et>P%uPyIm-E%F}VQeX894;dbra##cFPH(kgMFOAD0i_%Up<_t4Lp2!l6;7>n&gSdGn}bgtesVZ%3n9BUp!QcwQi zT^#(tk^|>#_%)qmedKdGZx|Cx-z}S;k(G&kK%JFW8gori4W^L@Qsg)M)iKPM@Ra7*z zx#V~^pWwhc3(Mg5!kCEU{c~g1M(%e+TBm3XSd!odt9lfJ0QOQ4uYi=%rsCy(8=X?CH1l0&hY@_vc%9o%H8&8G`ydB!f!S2QS+`P{XC9kuOQT>7+`e!d@*Av$pq z3L-m&dp~v<18#8mRUwcE=~uuiv=%4xZ$>=mt*~8qX?ogla}cm~HO806pW* zp#e>o-xv+N&sdAyeTy$Tx#{~7fu>|R@YZ_fgfMY9K{1FmJM>v+5PR~OP1BR|dA*YN zODcDekLSwXp3mYnQ%(0Gj3R}ezw|n{51iz=zlh@A1?qy?gSCPeZNy#)O{EGr^)Uwp zn3+bu0>(zcRTW4E6X>9#0-(u7eu72~l<_}llrAR#7st<91YLDN$HCZdel7vjwGf3V zG+w*`@X-$D7)YLusqB*+LFIE2mGJUng(Rl*`lXmchr(hwE@h2w%-KQ9U))p^tA`zk z(0AXv_sew)t-pX8zrDWDAj5_Ml{E*X=C2VwuL)n-jaM%Xk*FuiPCn~1gaz0d(Pm@7 z^=_8HE138)od!w$zJOYWB9@hBHLooL#aJFryiSkk)KX`tw%lSZT*!=`%!75Xw~9+@k_`jEg3cs%P!&MQ7)hwp`Bopdc~E#dcTS z#x3z~5?yc^HT&PEgqUw;EVX>NgrQ7<>M~2PK!Q#tX%xpg4N-nbFk1ws=q4M>hbl zSn#qvM34YFyEOBny#~>1TkYUI`$7U4z*gAVVj;VpWC&#s^8p=Abw3-bB+ch*@ziew zLcsX7yp9UA5@`n4T6~LY<>?0IUptKPzT9x3XwCfyKt>gt>C1=~ic*y%=L@_DhPRoZ zX6T8@oKaE!?+nR7etbeQP7kSF0_T}wwWKuSt2lYQFUt{jO!duu+b@Ksj15r(-1I+4 zw(x4=VtwbVNb4Zp3p3#N<+u^8p9gKk=v@~T%y3RuqdSzYFoaP%=*QnrR`ikQF0^E? z4o&=s6*}3(C{KvB!g0-7ELNeT6iuM$JfTjKa=Hj@gz6+8W`bP$nlyO0V(pRMas&q= zc!NUVioOP*r%thM5_9`R{NO5&%$lMSow9Gq;eR>LxbLYP3lR^uTbXF|(b)zq zJ+d=4RiIfoN| z5~f7MeA3G?dHBbfugtgL8RZ@p^|?6zOp>PzC?7WXBVx4D81?iaja3wY51IVmOS0i9 z2Y#RGjFqGItAK}^@{QAC?@Fo5OeNE}M$nrR=^vuuvU%RjPLf-!;QHegwEO1MZnRGi zH^RYMfefj?df5IBepw*YR8i+WBmXil$)h^=_M@)EftT?{glta#djoOW4=uequ6PeV z@&~^T1b5MP$Y^knb6}z7>(x`t=6BzDsst)@Gn6ckjz}q41-hn0Fawkv3r~wi9s>4= zMDL>sa2;e}hJ*Z?!@j}xeN!eVB@Qf4@_1#nKU17|oZh||>u)1AIDiQF-m0f=V!p`O zP3^x1>eI4yz+U!3kJO^9R`6>14&&AH75o*nl*HlnX{66B(MF~}u=(ABga4|uJt0*8 zSTG$Up_BB=n!!a)!?J`^#?PZj{+VVDASLzP_X~QI&QJZbL8caVVyvxSj|{@9xvFA9A^Uj2wl<44Q%B)`{T8U&pdhB&rI<$E``Mp4jXg96>#sz~FD<87 zs$d#niyFIoQ5IltEJui6*{zZ0-aN>*sDQTI-gGkk<2=VU1CEK%K)S1FEDuN9nm_k( zV{}}|RGqf+jn&{5OL!f+n){M-g-Wthciq`VOtifBXX%-DOo3}!h@(_F{b=H&bRu3> zu)wBZRp@(3J4{Iq4FPz3|Brc4MZTA>dHYg~=huK%?C3#y!ckC;UFRRyeZ2V5U=93W zvMQL!MkbmO?Mgg-cYVj!HOFqY`+jH~+JH3HOCaLYt$QH$-6rl(NeLsj6I>yvN)y5T zB683$UKjoa-OZdnE#bR=3r(3W#78UY%+fL`LJQ3-UU8Kq+P>*V9~>y*z^Yd>E8fni zRchlrBH)}%DTFpgYeoF$kAWG`M@2#qR%DDpE7;r&KJ zoleqIO8hSSPlG?fkEj-sy{)a&zBFFZ``OhqYR5JLP^J`?c}DgJ8TBfYIIl}~ldo&g zG;?JZ2@~Eq?HVDz7k=!YO1~6ah>u46AR($U_u7pVy5AFL)fhP1a4wYT*7lUQXSP7y z;S+|N5mAVs(#LNd_(l?!TiT}zbjI4A>+sly)3Ni_ge{fjdua@ONhR(n_T3e|}+x0EWAa)jMirfNc2< z&R>+;%!bx1zR(M$tus0MT}Tny-^p(_jV{7*WgBhZXkB8Qz{LRs(9 zP#b%lK&d(rlCmrK>O)`P;q?T^L!=9~z#ZZVoH`(?O)3YUYb9RwFePisO&g7Uk!zRl zW*xxC^wUp}*Eys4?o{YEq!VJU*N_x#*Xrw-N-V z$B&kx`{Zc!6^c8m=b-g`(kgI7;8o4v6L`Lh0dnbbN=7~`871abWThvs|IU&0jJcxq zrJi}_jt`C7`@~~BKpC2CP;A3dnmxzJnYpNQr(o92HE5F%ho-5-l<-ca1C-t(; z=M7evp!s&1i0y)j666P|Xtj&x;`LjCtl!V7oB9j)f8E5qcNF3`v>n@Ne*5-!5{uA` zrYX&Cv$jxTGlEcQS=Bmt)K%_mi?P~1&VWCiKte$}8->zz?yK7?(8)&E70+^0^pG{@ zi$lb7wgm5Jb6jYB0%@S1*I&>=+zkXXYlm2xE4J^oo2|~~A~gbkfO*4@{*T$+{46Ft zU}dz8xt9RB(ocNWvC{>+c?fSFn5?=t4BLF_mMgo@TvHl8X=QLL3tM7G2EI295*pr@ zmqLj@d1jRRfQOVcMbfr0g*tX$FKZI^|G*|TNn$18O&MV0yBFEa}J;?)c+6k6n)j8If zNeL$Rp%xpZHeF2+eN?5@6&-O2EWjS`o)wN&KCo)mzfEK|jgBg{L+nD-+FS z+{$p~hf{V@#PFw9pS)L#STQ}j@lNh}#te0@%*KCZts;KDcr$k67u56+dbj>6&>^_* zoR{zleCUK-rDLZ>6GW=jcX_m=j4~VsPf@2EsrX7h#1_m`W#9pdZVa9ngV;7=D5DLO zTrvo1Y?n*_w>`6A@{%Pdo z&kJnG8|#jaNQ&&*lsmuRhdndwN>s03_yAkI!ZG|TQ;9V0Q%C$wz|$z@@jmXxn3Z^H#|DL*+{JY@9a2g^A4ra(dxaq|Th(1~GHMv+Ovw1@=jJ&9lsXnPBJi_lLm zZy!Ah;G8fGXqj=zz8oi?T^UcvEVhDgmIM>kFI;nd>k}L|mYJcU`)n5UFdtM-y=yk7 zy#5THgX0m8w?<0&&KSX$6>)d5ePkNb*y_c?=Ms4?6Og}y9OLU3jfBq)ms8OvNd!SL|d&g^yNJ4O%Um}J-C?gi^CtRU)DM&0BLPzrJV3; z1SjDXu@HPEl%1f!yyXodEQhp>f+T8^B}PqGJbMk5!mSBluB~p+@iE{<{N57m>Q`WdIrmNHG?C%Y6`v&*1*J^3WcruKuRg&(;G7s9i z{k?oM@&=!e=T;WMCy6A9 zG~FyI*oIz;X12GYlWR@CYyV4YB{pp(y4T_@M9gxS0cYtdK~9cseISvz&aF7q91?$d zXk6Miw>$XJ(dK;w!GfLH5R0bm#Uubg$P08II66_Sy#aTBTpkmBHKA zJU>iO)|#nRuqu&kPmY|3KcY6t+;NjuJHSLEnlC5I+I8Hoqll{y=6p*`VFhmd^<0 zhea3S2uz)uu7s#?yqDhaZ_~_A?_qCzJ*4d1{^K{o^%>*V%=l@?Qz^L9B)1KPRK-y&96R9EvJ?gt4FXVEuDd(Jakdf&N$JHLl4Aa@H-TO}t{Z;$QyetHlvOS<|eNlf@5u6}2} zk-asLwjc`ukyDjnXD&V8^4j27maY-d_=!P?RHAgNCMnzUu5+8YuNqTQS})931|l#x_q5rDyn|68gV~Kl zF?OTv?)k$}Tl2((HM8#0&9eGD9-cKb@+iz!Y2Wy&e%0f0WyvYZku)9fYD!E7VhNzy zJ&t#gG>+tY@nmy`lZVZEs#ON^6G*k&G9?hnWWMl>J(Ms!clh|k(o=Wpuk7O`m9K>X zvUIF>))V-V0Duf-*kA7$98tfr@W7Nelq&Plq(e9d$W(~{6a}9z5t&#}6-eYuB;kZB z_bQ{CK$$I=pW7fmESPy4DU2wGVR*XC?zC?;8WOfDjF|&eHd#q+=XTgjj42_<_d6YdKo1wx`qzw5#3Ke7mS z_*Z&VKxSV4r!8b^YpT`-!tG#fNc#X94!NgLac)be5@pPVJ+_t(=?^Y0n2}s&w{?UzF{NE^Dggq z){XS)pftc~Il$=0g+GLG1*TQPThg)hw&Eq9k;uDCtoBH+i*@iO|s?tgy zTkwMSlmVJ2lrjZR{nC;kdv?!efoFa%JW{$AF>%U!P;c-=HrkpB&UnY~Ct9qv-^)6Z zuSnXjoXs~AU?iXYAX0B9u-0?0GoY+h%hNB;gXOky@Y1}6Z0&edOP@aCM#TE`>>y_C z=I-xu_fVor1Wa*2SzdJK&P>$PDZ{Frtp3N##o?buXLWzZq~1SNfAhzt>N2}y{+9WB zF*-pV7eCE$qkf-&n#7Pf8**xuew_7sLq_UUQ7)mz-GgWDU8Zmif}*DE4vx1bOEz_hKxKGo80%P`2-JQ$XzMZK1t?VV2HKc-hXM4uc?dk#Dr zq0r7@dKL{lKv%kWENSkeG}+fBysbR`mrD0y)cKMJvfh#akBa|O9bWE&SQA}?~dBL4dLpDQUv zyu@9xID#0d5?5NTq7ylqMbd^96p5^Oh=v$a1&R3}hrZ{fQ5~X9q=cyI+tX@K`y(l| zaYA&Aw(EbT1p#?#|2eN4ep?LpsUi>YMQsS8guz*HX9(q z`#56mA~L})^MRh?5++5i0|(S zDQDD+Vh!iSIsDzXwpbWJjC7jZ^E}V|_&3^e;GV*qKpQAR9SyR9h~plu(IW zcv3&6DvCrgRB06!Q4#hD`b0x`{s!OBXVJiuq%x8IH8Ex-Rpx{GLD@I#5q3Js9CP!i zT*FNM0D0yTYVv6^Z=?Auvdnc{KS-K>?Jk9Zui$5KIix)h+c4iL>I3P~=rf^)<>-O* z^%E^~Cx&N2ya(P05kc;YDBk$i4p>}_6vMyeTYAsZ;S{u_OO4^CDnjG18$k^-IMW(Qzp1jdDcx z$QkufsX9ZVevIgY(<17nl<+alwYF0sYq~e2lTcsL2)u1h|JUAo_WC(E*_#-LM!<;B@4$Z@ z2qYGT!9j5_i~uMAihy`2A-pIkB!(B3M2cdCL?r+*2u4g$SWH?}f+!)WATA{%EG>bQ z!Ai- zNnsTUB^6DSy0C;g5u<@u)zDVe&;v9PNKFB}rlho{qJWmDke0ZJmb8MFs)m-1l(xK< zj-HO*E+u_U4Sii*{oQB-5N99?8c2y6$jBHd5)D-J42;BfDM%X08yM}DH&r(_wKO)h z65TB;U?whOrle~{l(#Fn7)un4DcH`_ydT@HD9MEHb7 zL`Ou&+8*BPa@gDBuusHcs&|ZkbWD6eZ0Nzbh}ihV_#)-j)B!Li2@vyRcj>4y{O zDaYx#(b;h)vI}B!5|8F&q~{!`=P+^^MM3$87=^`*qKfe1*yQ4jtm15X@jpXZOi5CD z>2YdVN=aEoc|~Q}$(oc?$0|-$(JL9bl|@CBrRk?n96Oy;dHPIR&GG6p=Z@9p)Sj)Q zpJU{mD=s`&R&wrS#kr~z=L^o)HPo@1O6pJMHWU{&l$~y;^NxxEf=}P<6_UrBJ8`p1iR@^*uv$LnGr?2OBf8W6Hz|h^{k>--AaI5#_QFB>b0oxUy! z2Kg`34>$q^Yy;Z=C651+34q=PFciu0*W;&h&?1_Ff1~vQ8lj#=j3P8nH9DTs z465~ReB4TDbUyEV>(bN9zBdjpJou)PkCwyf%kAyF^6ZAx?Mp!uomXFUQI{T^H`}lM z{C3*=)X}b*0Z$C z2zRY+C{xKf-VxCe!>Hd`X6f5=|%uL>D30U-= z5VA^@^tP25xe|IlgC#bFP~bAA`{ZHtMZ81ffs#cva%V07vijYC`knm6~=Yki5_>QbI{OtatB`O6{Na9tVd}# zs`nsEj9M;zx^$)U`7CN%a64?TRu}CcL4D-r+Fq?Bw}go+JujcwNw8n8P3UyYALLrw zMM0f~x;EyEhz>UoPLOJ4yX|$6?Ro-=CND!LZl(66;#A6oOdNz#e|ui(xL%i%t7&0b z@7z980`-^Zd}$q{>#&9^*8DDrI-oSjOvGvMZ)Ls}8RV_L&O7ik6Q|8r(0951(%(Vr z>fp_UyXsw)fmeuX-IvxLXm@>Hb3xoWsUtomPQRxf$7a5ipM95Dm_1^0Fi4_@7fKy% zN`C!SUOjOXI`DEoT&nTPo$RhH zL;Z&p)x2ywq3WBScLhstZbJ`Be80}WtCO8;_Kj<9kZU}u(BO-#+qL?_;V z8LR4jSGGs?m3oVP+a&ng+v<&9Vw?~QWbxiV4z&HiI5f)e{uHL+*73BNX`k_t1vjGZmnea%j zlD*i>m_iDRLof$UN~vupSD;ouva>!Ei)+}^F}w6)K#bXfI{Z3J*}pxfL~0STz0~dT z8+y3$ZwqWpF)=$Eu@;w-%;_tBkRd|U}i_Xt5ZgWDf1MwUStk~ z{;9^EGmou{Vsd_P(-f6;Nhy{Y&`QSlD{=yF#2tO#%EeP*ZZk0@hhp=T0ylCW-C8}v zDQVdi>H~M5{V4HR_%3PJQ}jWBgDBsjX7mLn&h<(tcsYGWyN97 z>b*pa?>o`}d)vt4PlJ%>$>*d;$Xbu_gMx`$ALxLGfTyTu*u5UOp7JIlT*}+`xePGrGT)BBu*U#b{IBNC-a3eGaBvFyuH_kEmaiUpr)zUBnb+M~zgSGo7saxzS3_ z+b0uJvD4GmRb*s{s6F6hQ1~U%Q$=ZnYVCB(DaQ$ORQ@vZsLtp?*ySZ@=ebj%_{!0Gq1;}CxIjZ;$o+#Z#9o{GSw0t)0PD*$Yedf* z5oVrmFEyg}9?n#coW==0^;L&miZGt=i+bZ&jd7Z0ml-|MsJTbSc3n#QBGEy)pq2%(#NN05wOHw9CFP*Vd>tLxx_9AI zyR-K7P?xBVW#&Dm(fQ{=59j7*287e+ZcBIyLhZJm>ZcUCKG%BIHN1RB7kD%9{b>#} z`Syi9!90(LPS;SVmq9s>bG>Lcc6R#undLqtfdLhYnkB1K=<1fMKiYHfeOxeeYa}9XU+}^rJ3N9|#@3JJl)O=6x(e<*_&my4t`#v(*!OVAgt0?nh z+|jM$Wc9PEqQcz1tU=8tTxTTH!5++)1I}Vl`;Tqam{G8vsSV{p{)JEvNP% zVJtO3dAe!>s>qByl9LNHCc-+BgyXAAWQ@@)GHdE;{CP?UWVUS6jn}_YY0G*J`qBmj z*T0m%{53o4V<<%>9&!7SIS=kk^H%5PY73lG68?%EG!urv(`;X;7N$Bp?-x!QY+6#= zemx{1xmc0**lgj?N~sQ@K&8+WpN~D-g`iSPqISVVrMNWMEO$MU7YbQ^)YB4IUs${| z6OvTGke8)?IQPq+R~B`TeMYVdAO3SbeKX@>(wA>+tz*mm;BJ(ON+q61CUlOQI#?3u zfxIQq@$t~ynV^*R?cUE;%*F3uEJ)#^ zN1{dzYAXcE#G&P=vA&m)M>c1Yf}fsg?PHgpr%Mau2y}cccmb&pJSdfbZX3Jy z9eo9|W31b7D(}Pwp1t^u{di!@ediy0hwardw=r``%O7D9agc`mEG4VVTI19* zw08~}j>e+y0J_ovVn-4XpG!OcJ>7wjk?oKM^`-TQL+&fU>&LQ!j1Plo`i$D~_xZ=Y zBM(WMWH+{E4vc21v>m%uogQL*Sfe04Z4+!F66TqLZ$}|GG9cfTG|0z&A%JV*kZ`KN zAQ`_WDnpb8Ajt9Mv)~jh@02)XkcIzf9C^S4fU|*&d|-qIPBDN$NpT@I5X%Fs?g*hN z46D{CQ3`@CBUG(Ca`dld(z_$dwR=b``IsRKWR=N;Xo8n@cgld!dg~9Puwr zQYPP*qn_oknwRu;Qf}+SWO9pNCknSjHq(@_tKf}STulMr^0=Hl$@_Q7RD=d z{ZToDt|DHqkW0_^yI2Bkt^BB7vRo5s=Ubv(SX}O#zquv$!?#4;xW;WRY#G4He;0TR zh>LQNEC8*>tbj6cuYm*v6ZlBNn*R_pp`k082x2}sLOelSg}fz#AE_|cG4X7p~J4PWH1kKuz1W__FgJ0kW z&@&rA&ktxnB5-M&2D(!*G*YAxA708*(GW?VHbp%x%-cakzo=0AESZ+oh)H4#fmDIV zWF(7(nmSWg$w6BK;>JWY`v+v&36eoWFZ!MG8Y&Z|z~IU;>mT8Zn{Y%1ONz;2Yn<=@ z0ip5X1yoG`v%_K;^%-htH)>1}6;sJ?NV12RWuuLO0&57mmV{O#KrlZm4f4^}T)>N6 zm&B>talal}#G-NGP3f$wn__-6w@PZ=6IH?r2MuDf^?x?(z@i-p&ASiR*V52h)C&Wx zbr~Gw7mWsfbgkDX-~b==BLg7eh$wLGi7Gss0u$oG(-T{&e~7t!sy8OK4IL!T7?{Ghi(RNNPdd z#9VBOs2_XYAnnX@{aKIIJTm?9>|iTOR1GbhhWgl37yq*&XAKwd;0kdI%phMVt-ZSO z1GLeEqq!ISSQlWLg{3su_nu>4$>&N#&K(GCIRJ=z?>-yHjr;luEEu}d+u6|GD`d#L z_Umwyq2@zWyA1L&lG> ziwDV|EDh));&)v-h7Q54kwH5d{0JaELcl)*@MA;gKn`+^11^*B)oehG1@7$z*GMQR z6|>H75G8{1WXKd3{K^ME@xe;q(kc-Qjlu2Ps{BI43tmpml>r%MsPmh+bu!2!<7;V` zC94sq&Vt`K*hM3OuVip}sJ9D=US)${IACfoa*d62oC6zaz%yb`WlRs66lMJy9Hhi0 zQSfnOAe(c0P^NFBIC%xAHRglM+dx-}bGsVr+LHi$5*cWu;7{Z5;nbK*L_Cjx|3rlk zOM`1>`1v7-JMPvB_jU{!sKfQrctGFs?Jpbzf{a|HfwpHsYf{ySJ+Qi5`z-`VtAV_G zfkrZ;g#hdZX8_`YvG4&1_VX{0PZ}2dJtXuSGDW)UYL0F< zOGN{?kJL=FVzdJlU(1JlB_XWY|J;2W!9Qw-tKHvv=PUK(j(LR3=ez2^!7s!CKQ>?j zbh!#(aocR@`(8R>gznb&s!{T~AQ=Zc4d$iVR4z8xWb z=BBBX@1PV5`;85)q=8WFan_5TYytc-7o1|ZdT4=aSqW}hV_*4$UkF2Y`QU3B_zl3` zmchRzcZ=+fT4_b`xwY#Aa8D78#|5`uB$vIs{g!&?)=RMW;Y676!=#s3tEs_z0z**# zaEQ=IUS>B(^`>piO(6=5yLTK7-1acLeI7Sz{~7Qj;|tgTj)mp3*q*D9n%}q)4mimK z+==*X05AsdfuD{${SsOxV~=KaI;X)v(p0KFrU^>-fs-`g_Z4h~ zI-bS^)c6n{6$g@L;biPOZR8RGFRdq<1mJ@>(R#9iehUH!>Qr^egLT$@y${D}Q~@;r zXe8o|*}(lkIEIP)!p;215!mr_9(V5Ez85tJ((IvQv+5sH8u_3J1&Hnz`bg?DCO(T{ z-gBI*fwQNk0^sXIk4`Lf>MwLIvxPQ90FVWaQ2;w4zKI4n5b;Kvd00)QA2egmz= zC!vL?P#@%6qE2R`+_SU=#5xO$AR*SdXzT5($3ESjL6ETVIx#XY?{pr+bYFde;2_pm z3ol(U5Eg2kj0AwJq^mJcEan;O#UNpAUy~&<|XJ zY6?E-&-`o<;77z?r2!doKoSw}@?~aUvG9kE#oMI|{DnDF3p|Yi6p-=ROyFz~5OF0c z$P_z`!yRCQ8cRn%0xx8Rp1{dv?GAC1RBn7|v7fX<)f z)PITR;d`ltXFIr+0e^3NWC9Qxx{DeMvUqa7(PZ#NlF`RUf(v6lyy=BkspscHu1?7w zE8#P1+XZnDB0Af3KHe25AiWj~e(;Pb=-mZ&kwz`8W*Zj-R%mmeCHBfX_-tj$`Gt^z z&WtDlWc^JESfsw8tel!XJxP9gwh++!i>u~VNO6HKHdx95?~+F%({L$Q!PHwIhJ)sF z(PbR;8tJOK6eNy>Wf27R&qKbDu7YHNXXH-ot7pzdU?&C0aJXYp0wfXeN<~2HeQ!4a4TL~{Uq*dMTfQq)BQF+)SwzeB3NWGWw73zB?U zz=Z6^yt6OgaDKdTvHz6Ju?;DX0Gm9rGKXAeBZb%?n+fRj+I}PmM#_#J|1+E&pX zEQC8Y&%#tW@khkBNRMx1@>Y8yMkc8^r_)^A$_PGqLh~2=Be%luz!p*tUwGidRRAdC z@bM?!w|tPXRzO!@_|ZG~0~q3~u{$PRmo7&yS!f?Os@vK25$$Y7uv*-*-L8A$(Ef$q z`s3)|oC8PJCPQ#=?&{Ogp>Q$~Tef-lM3yWW(32~OBhJMh>4Wfpf%sKnJV6SMeM@*< zU~Vh5NY`Y)k!ygW~ zLN0%)_Gm1)lPP@TVvgF!=V;j$dK^W3+X`8}pLr?*9ifG2lX?;cuOXj{$> zeZ&1ZjZ&)HN&Cq<4-}5{54j@T)vJBU z(z%hP?I{iI!#KYiMSW2XLgANjucV%cCzsf+^?IPYt;XS&g!Nk@PWRtjdD>9;LF!|Dqy9a-lS}`(udgqN?#M$yC!;e_y*Keo$YB8 zr_nKupNUoh99EWG_x#nT85+61h(*%fdf+UfwBBEdOwz|;Y*jMls3_fjIGRxz!H`mY zh&LI+(V+;>F%uU9_KKW?u9$@SS+2)MZhAl5>mgp&7go4)Y794Vm ziWCh$J$YnM@=!^lb34t#d%`aLQ%!ah!_;030qODHS8b`z6^8oV(vY_ch}%#C)p<$o zF69|`h(m3MiCpYj%)J*^EVHmzLb-QTWw&E%(;KuX$fghC-4X3QOZ4ExF0r)Czdzi& zy$yxz;9?Ksx)z0fLqqf;udLcXN<(06u(HtR)0Zq8h(^OWe5M-ZLSYNdv{O^Skwpub z;?PsDAwB=*lq~=Rd0)I&qlj9lyXMn-NYwXD4~(uvCDVB65r6VzQSlWY3Z};T-}R>q z)a-=VF5c&^rd?foPzALAnHqu$XSov-o+O~0E^tgYb)dW6vPSS4L>BH*?Vbt+F_E-` zb@ot`^^@mOMezy13bpS{tSs@1!!_B8q)m7)Gg1&T6e)`27kqeh;z0PpHu%d99Grq4Ei&rEK@tN6UB+M}-gw$1m~id2HHmYy3wFYDJ$l@@OazsP)Z<5oUI39=;py-cT?2O7W%uVKNGDN^Gua zIsM3%1dNacpt?8d0_Lxgn12F&b3OZY#q_yDS0_Sgz`dBn>kYOr8L<0~S%C*UHenf#pxB{5u#vSyHMvv{ zOnO_XK$m4>0`-Gv@3Lyi9#Dcp9LZ+kLNuT8CD4w&AcEx=Yx{jp-&5hID=vEBhb-y{@n zo0%(I8$;)lYxVH4%rN;%c1wT56sVu9AiJgxg%;@HZNurZJnC%QIB`$M)gifg6R0{b zAs>}RgVLYc2nq9Q@ytefU0g*>Xfg22ehJ;k8k1#eHR)zdK(q=Sv=%#N4~Rm9)}lsX zve~6-6zC3?GQt9$kQ+E-!;q?Zp*Ks-m!{CJm*ssHl?{3MNgOatavX}C{n!RAX^^i@ zv);8RwDV4QqhK~+*s8+}oo+q?{9XE5NJ185(4ejFzw8Wp|7(8S%}7Q7flb$LC^RBG z#UKc_7V58asj+Q9r3tn0yjbbczRuoPXU7bNV}hYqI<6j>l!A5^D9 zcwLO(;?~)DBC++0@v9|Jx2O@l=Lsl%W*O8_g`@X5As;Ggg-`O$&-}m@!rGR%`4Pu= ziwdCCejCLme_9pM4PAt0T@(@-#wzk&7ied7lBe?=j{f>ZH)Xz-_)7dFIAB|%MjMu| zCeWA*8BN_8d%N#W29}kdefiF)%P-U*(b7KwmfqAz-Z5}jwuRtsV+MU_l%7}tN&Ev}YP)0Cm%j$>wTLyHkth=;!t$3)1Lbj6_@ zS?rbj24z+k(Z3sx+o8wqNDLDLwACT0oasvvQN@wnmvIGThdV`^{~EKVBYI5C@2(0Y zimJ<1lR2ci*TT0R*@(d3RtLQj!aD$DF$4D@+2?mohuwuWzgEbDb(XOX*oINcB<&=S z)6dXZR&xBPh}~>;R_ypOkevZ7TTCcfv1NVYOCqdMio;{{?|XV% zHc&6IDF`?1Z)d^m!zY8ygtyDAuCx$o^JV6{tY{wN#}7}|P_r7`Yeas(;jT(D^!EsV z@0n;^NlhKvIq+Bc?9HX6FHBVA9-ug9q{*$e{EY@$jcPugMBAB$ilyeOi1f5(qAmuo zWOD7YRy@6@)JDD7&*Hd{`EU$!L)~_$MYDOCe_)p&L73(vFC?q;Ic3M zUmg-8j@AzS8y>}HC2xpHo}2T9yfLAR@y-aCWb3xkBNMPZ6MODYu>*w?C||lZ)pEXo zCYF{BwNl?Kk{H*xnWhd!Q-#AS?Yj%A@okV=)@YcyeK~t9IK0$f5f)dhJMoLAMs7RY zRGH#5K14gMNH{;+&D0=`ZPVKvW?|T)-FfjEXl#L_Z#{-^al9+!Qi-w_xd@PDYR4;= zi=;uPoLG10PV&|y`Q4+(K0BO9t}3r|WC4x>t5u(^P$2PsIZzR%O*kEX|M6wW1+Lxc z&rsO{!Q-FDPGmWMhhfg!+DH1`&#o`Thm2X|m==mQQ3LLmOO7Q9*kfy?=Lt?X-;T1g zoG%mr3Ta^QhWPo14`#XQDFPGyeU9M#{aV1q@l&-Sd4dj%H}=uhhF04Y?>;lR3Qu}4dD9{I%7Z3R`@^5bSZ^na-;N!->rfVEe*b};g9d>a zD|q@L^w3hZ05yACgXc^9tYDYx76?sqiKAN570qft0zxyUK^Mxsf@&Fh8A-9Txn|CRim=vx@E$G>yk zzY0)dCyv7->N2d92=HoZO27E`>y$D(mlud}-AdLFMSKW6@Bp*|L~ z=~I_|Q62Zk*UwGK=YhhP!Oe5-z3gI>;&{MMq zg#f~9q-Sbd@{7l}EKk(FCtJx?Iu>K&H{CHu-EjyHnWJ6_8Wflu#X#_}_R-3J7o69G(ofIya;J+tDE^*No@3*Hy)_*>krP{K>QG zz3;pon!TT??;G~pH{zi}j(Vz{yKnvOKA&U4LCqXJ8@K}JMxdRTTEVp-3!m`P(gPN! zhb#7J?tAL2a^A~zU$CoBO!I7n%G1OBK3|h(?fY{~sb2AN`xABchublO1wF6@1#mVc z>FCAm@xp%#@1WBC8U1tljR2gme{Rbq>(AV9#eQ0xMy8x^j)iZo;W-4r$&K^P|MnWn zf9b0K<$nJwlm1s1{AL@`}WrJn{w!m{-TUL_uF;>m$zI5xpQ5%rz=&^x9<5s5ic5(3!R$_Z@lw= zI2ky(5O_UK<5wjOO$IRC0qeP?clW)xVfgar6)}$B3(Fvnv15g3N7_3{*$ccgN*p>4 z9dNt*<#yeVfZ=+Y-LZ-5`xc-<0EYQwUW^kiwiuxuIA@YA#-1EjQ2Clndq)PI1TAj# z1@c0may;5ALCV*6f$J6rHkzMzyBd3Pa^1-YR~97UPrxYR#f_kSH>400AkTt#HD$M?NW^^SsF0gaL-cz_wa2-|%`_uQoypY|3PX4EjQ|<$JdQ9~IY{@#Td<)*JOG)&xM3J}+|M`lTf@yX6*#>N;S1ao zbNXdV%#4P)vJgk2vKMp~^MXKVDPUbz$`N~e+#(!Go?H61Ul6?@#>r77(slnnEE1U= zv3QH@2LA49?J5B*#*q$G_>rv~B;V0F8?+Hxd~FMWoE$9zUDz@3wQIP>@rbCh_qA8) z5x#vTw0)4a+>kaJHsp}$Bj^*Q92piZ5iX<;z<1xMR_necFht+U(Sh@5(cB!YZv-8H zk=3EDzHdzaVla|4*P1*6k^Zm7#c?Sc3^@enx}4N{5#5#(GIsma-?o~_H4Q0tCP_6f|?i&hvP z)xH~5`aVi_-c^c3EI_r5K?3N9S^#H0 z@&F$ZL(9eTBI`cVLioAyDIr!Sj4(Em81il#P0MrVz4zxMq?t6UG=_g0@<7d^We`1p zh>QtAhB-3y$bcFVd1yQ@QZfhUsDd4$hm9kA(~yTKE4a4YU^LxwW#s?`>C0yL^8gGn z@AT@w9s$GfeX!KrNPb=*8>z~k9Jr78z*q?f{*~dukF+6t`3!eoa4&TwykHp|g2Vt{ zBexLJ1;9>_daw-{?u(45S^4CK{5-p|Cie)>W;nG01T& zh(AA!JQNp$h92oe93tig@{nJGUk|t?svJm+xrH$-WROGx1RWV-4xeY)Dp!riFlhsJ0r_MA;#0K`%lu&3 zMw8?L-Ncn}YSijJhGhugMOeoatm;VuJ2;HpzDpu=I;t#&0YH}|(s8`J{RD_G58=h$ zm_JR^;4$pl0G&4EF5e{_5#dAr3Q_@Q;=k1o$n=|5#OUa9ENcXVEUd0MrB86 zNL*mJrF|JXlvwgOtu%igL7?rVp=SLbi?uOKOg@_|27|Ob6DlHvo8w8z#riVDuF>p# z83Z=ua2m|AHP1Ee3zC!@PNkbV(5xI81OTE@!+5fiWcL)Z|3k8Md8<1i$|ohmeLVLF zC!0_M*tan((YY@33{uUy$Oix}nPY;2csz#ezeXd}AizRA zuY;MPySRgg3xUR{=ZGI*IB0+nDQEu#{=YVxq%*@yYr1a*2i?S~XlfUAwlqtq)54-trODC>~y zTto5f-I}q-CU(?>;G@-SE0CmIMPr< zfkMjh4!yP@Ahav6Z7Bb&IO2+1UJWPGzeMVQ6>ug3b)8YJ5E;1TIr$p@0&l;BM{PeY-VyEqOd-ZI2Xkp@-ij!&jfK#J z$BCh@g=*%8%oalAX1Xa>BD0mNOnATdFc~dG0rskkc~da@x{hK~nX=kk2mPx-%(E~# z^=VX&_?Dh4I_2EbJ)2uD!vI=mT-_Su@a(P?w6@Rlj*Y}kk4YNYJLyHRQIY)v4KWgb zY?pZu)i&aH(2%X6KZ!qTM5!^F#1jeIdS|f}&o-utm0gm2JnY*~FO_-Pv1|;Fn0s4e zW|~R4Lztl7`F8+}pk#*#+Q)n3V5uxERja+{Q?J&w{+~a!*mtmIR~)pOU=xCE z&;4Z)*5>v1cSSjRcty!>Gzj90*dl23p4e$8{3+Uv9nxI>s*TS5Hcl{VfYXpCxtmDF z8u+t41Y>mJMKe$aO=L`be-!nuXuS6gM@bL$SzO@oXJ|I!3JGQ>IgxB#T^n0*(nadv zTiTGsBEJHzX?8tE88x81-DoE@6Wj0v5s)n6Bz7>F1~VzvRwywFbR8~1ixgA{-A?hK z;CAkD@qDCdA*$gAm#LgCBl<$KtJLTwK_CN#g?_6d0C@@G|YVFQwT z_p?W4qkg|_0z&@=(MXU)xm#Zv=^3@|DBMR|w@15*#+E9EI!@74Z{~WP7bg?Sk+wYS2^jYT{3eNB;`w2wjF~LAk|Alq~pjn?A0;vjh;lK^1 zT?{7hOefK3xR>(w(qBnoDj&1a@0-w9>%%PZ(+-ol0Sw!tMX)?Q7*b+B*QiD)-`6n* ze%2%ptxd@fAE&9R7Xy+n_ZQm5E(waX;r@Ki-vhm=AoW=fi!h<$z6!ylU$ohTqPt7? z6A7LhGBEuXk5av?DEc)2mHm5$k(ej!xLCKSE~{S9YkX4V18&}gMz!;b5v@mWf;OX?2qw6!cB`~U<)NI4eLcCL=;R_I_-&Zg`q)LuXt zv#AJC)TYCCC6vQ=6LKW^@1RCxF3y?hXxOe|*;zzJMX<)eAy#GPIfzOqhu&Wqg52qQ z2(w*9BVS|@j!JXGROp;>hvNiif8L$2w2O2~Z-<)-FiZ?1=JeOF97!xMoj75LBwz)1 z%Bs~#klNuYtzxM*-Y9sS1QQV8-Pok5BG<_^uz+bNI4ou}Y zx8gF~`nMw>DM=-lpE^g!m;e)7%1_FZvea#rG^YwZZ#Eboc7rQ&#}!SJPv`ilgbJS- z-)$G(c=j&Sp;(#jP`6=U7dSyOj&8LrqN!%R-fR2z8N{{hX8EW`o8zx2&saqSWX~th zOqSGzn&TT#MHMR#YQ}WIIZ#7|`pN#7|J6$8Te_?FdCt-L^xaX7D59gL(}|+(zVZ|@ z^sLLW&(07pgn+e_t3s29IdLxHB+E^L6Yg4{;nnE!w1HuveNg1`{Jn_QOXG#(-f|D< zp_|vwV&$|M>c!J#E=C=-I-hOdOZY#C5WgW=^g7%_`q@}U3VWeM6j4J!Rh&%rV}6OS z+Z~d<>yXinvb>AYa?;N)wusv}_Qat`oZM%~qjeR%akdBB{9dPQwzx~%SiSYDcpP7K zd%#sEY(#5u^<-CH;_ol+#ahqbJS;b9T=?wS7~oLzzNKsQ*8Pm90TnZDRnWDgA5*^m zn*a5Dzkb8@p^&yB({WGW*5*BqCn}J;6keq$w$gM&?_WL93%!)pZ#xfLXuRZXUowXJ zev8g(%Jkp1OzVGtOs^zNdr8V`qq)NRT9EEy<} z{ktqpn)NChwclxYrSGz3=;1zh*=`;+|33!w5@*+UYCr4U2n_h9L&$ zmb5>ZPWL=IzXTwBi1_p?L`KSA0~Sn&XC;!MBid$V@i_suN1}JC?D(!GC7odNYb^P1 zJcBZos~=sHdZ+8&i#6-?XWz=|ooTT&yyVq?jxv{nX!0?<5~j24C$f8HlX|oz`s#Af zdheWmr(ydsozI~nx99T_o0m(kem-Py`&q3`O8a@8FVSANpFg5^-nw!1OI(u7wnn;J zGW)j9O2VlZbMv~!z0+4$+%IGxC! zwbM8H-szq&eHf;@erCGwz0siQqonrr^Xq*dEVoTLIl3G5BKO<; zV)sm=?zf9x{hxdWchBBv|8_O0e>r%2_xyZ?cBN}GQel-F@Ga+ZR?)Fz-rkE^S4Snf86&PSgYC| zG=Fb!?Z;%&z!QhWaCoDG_cmDb4HTZLO&~o#}wLf2y27fJX zTW*c){JVN;@b~*Ydwxz|`}^(2;GdNfdwwtN-2O2=_;+(~&)@ZH+ke&vxBqVM0sd72 zf<%Zo3nEX1sOs0zsbV?B@&SJ1W-Xpyk1CNL0EOWURYN_ z#He1xQbE+dUerxN%%@%~SV26ZUOZkwBBfs9xPm03Ub0Le*Ubz#{wffW4Mb2;LA*ghUQtoCK~Yyx$*4ieQc>By zLD@}F#iv0fSWz{iK{Z}cEu}&2xS~3vLA^{-qpCrpPEoU^L9<;^tFu9CKv8?NLHm)S z&TNCuqN48m2Hh3Kotq6i|0PrOl2UP)iIQD0Zdz^Ku{QfZfc<1RNPL!U;& zU?ro7Mx%Ho*q9RhvC^RlJOvy)0F{?VG*bRQCBa?+aG(iD>qTSJ|J^y#Kh0 zFQeJFOvSIN*{@E;zopr~T_vEiIbc8~aI`t_kxI~PbI_tn@cZWA6_o><%?JLfgdkf& zK-Ez3mQZ=sgQ_hDbydTRTEZ+XanjfiN8h*ynFIoc9=Ts4Z(5>=)e zUDXm@r+T=h<#4-dOlM2XfNJb$OY9@nxY?GtMb-HCE%7U=M>boI{8gnQTdAO0f_Q6! zyjr4aYoe}Nl2L1trCPFmYqFc#QJ>bM!D=ZHtts(psVS|g$3HmRlW$h21`x^cp$NDr znf(7C(QZHp5GViz05lTV#{8Bb{12q||LznNumaG46P0YxwEe?NFgNMH$~TgMUk|y>et~yJXDNtTpvlX&ty! zGM@O};f!wRleZvU3I9Lx7ZlI~i~#k2G1&&T z|11&NtULd~4JpKY0m1*m4XJnb6o?oc7Ih5Gv&xpSF?tAi)Qz8#x=j-#opJ%-vUpFPFFD+ z0v20hpYKQ;VowI%7T3-%TYGzz#b?F^oR=+A%nE3%>z7G>mZ)<7^ufDm^yu;5b6txs zd!^KEf!fdx+xtb|J=GQ4>?7!)RRe@FQa+P_l&Qx7zs*ytek8XqZzfjF$P>Xx__K>q0{w7pd=X+@vkgJIGU zGH;83Xs->rCj7`ZtzA zn3*4eBw)EVV}pTWp6fr~{iVcrN0Qouq=~YtoauX_%R?Q*Dw{*XK+WV8`lLl;Np!hM zjI60ML~!>rN9ZxcsJaHdCz=alsa<$;$roV%OH{Tz`~u9?Y^s;8Rp)YJ)Z+=FV8PEs(u$3R<_!4 zb8X`4Gui2J)R%`zIHSz>F^Y<7ys2xdYsuH4CI%Dx zAM3!0ZyuRHE;v;-{{1PnGTIsU_HLpUUzsB`u`o1^@!7F)2cu>?`OV!}6cQ)Yc%Aax zUz7}=qQI|bGG7Q3e)D@FYfH^O5MeXxV z&5zSX8kF&&-7u@WZMfUwYdfz1Hn?i>_Itvinip5!73_j;O44>ExjB z*3;H%eGs9}4R)3EMp-FG0Waw>LP-l`tZ2BS)kCZ9Ih^tkje^1-=7}f6tqAl2^_FhY z;4VSY|FZ!{3?KpT0geA;FpTV0ISiGJ5YqIn3H=95Lk%5s{}(X*-?12h3khVH^E~Qp z1z{?c{|!v=sYJ}339dh$gmV?p=Guc{Y5TjZyz^zXroPl>3du?m9=b)s6|mR;L*=&Dz zTRc=Mz3cj={Ram6_x@)vQ4HmO`+O+oK<{AuHl6>J;%@QnX{T`X<9F3(={CpM6nNy* zyDV$acDy17?ph!eM-{ZC!=#4AUeOT4aD_~vTn8b9xa!kWMNki}tvIRo?m@dCS&t>V zowc`=$|LC#VI{^)`s+Q`iapT6&7SaLogLw1I$`YLbs9s@Gx4FQ!kuq-P`=_D%~m1c z#=8nx+K)r|CIZab+7Ui=k^wG z;y{B?yYmd+U7$_pA77J5V(~5031R^o>hQ>4Zl+eMvK#8#PxA(a9ATr@6xVEHKw%j1 zi3?Z@HU)~p3G~6Ot(j3!D1pxhwsxJ!tWR*JuyuHd(%MG&SEvOzL5&W&Ugdlo0E*iU zOx-%2%d@!(x!bSM`n#WJ0tyUCi%SK-s@o+-hvF7Ueiw}HT~Oy8 z>_Zx$Wot=?667$5Qx%lsnLITJ9h*+!-N>U=@ll&g8*NCCmx{WB!`y)bzJzw z?`v5}wL`k4<^LjnYes#8j#sE-YCfpHEG+SEn{ux!8P5eOiFlJ_&%yK2+ppD;VY zs>6gRyDm@b&y zOW!oFI~7QCxMh|f6{xCp2q7f7^4a{Cc{LHaKvv4F4i#3twS1#{p>nG;(0K-gdacD?o9i zLXJRyA6fENxnFVBW~By30Rw{47wTpNHRzdFurEK>vK9_Uf%YY4jI}1-dr$(mI43|Q zrnZ1^1d95g4^UYz0wxHZN@O{e6ajDZ15EogPMH%$rvV_-H1Yz49mbX*tfnJdV+)jO zd=BCp3*itLa3+E%+?!ig8u zTF?pZ1p2vR9K#BeM3f z3_oqI>Ixuno+N#{slpPDTz|Hms=~jyZE)Z%;d)m7A?Eea(*xElfT++du|2WUpSiA$hs};p!S|Wa=pG{7aqjxr_OS z-Jiy(3_Sk5xbPc0op>I95BhHePSyzn>;PosT*lJWm;b-<=ATlSzOaJAf0e>0*|+>p zDGc?>BUveo3~xs3AphXaze{1d8!tZpKT2W#9d91hTxGcNr!UP5#oAS6*%cBzcvCuw z3jYgx<^54CbMD67#Qm@CC@;opEH7A5wWW86;m!TC1yRz?n2fLJQ;r#Fikm-ctf(AG zV>X@py(dR(P`CF}-^wP1kXlavZw{a245fE#e*#NA!?zc4TJ((_N}??NSsM0Uj*0;^ z%Zy{>OH7x;q{o_u@~7xajw8ErGwvf#xE__jsJC3(M%HWGZ(mBEGvX4Mt?j9Q?eq2o z?dok5dkNqQR0Avii7Wqs@uhpc z{k%?mGXrauP>s^NHsoX9`{nVJ&=$3 zarH5_B-p^8|CcDT3lR&V<+9cFGPT0jXvo4YZQJW0>yLC%`n^}j6cm^Kl5nCvan7HN zRdaD(aDAGi6}yB9d3v?~GR=JF^RWg0e&u6odM_t;vXvH<{NPp%I7hUzxKn~ zQLs}$Q~$o5_q2TB{uGKHBn`n^P?QGMZx*D(d~T_;Fhz%cB8&|%rod4G_=m!zltuyW z4W#Aj0YanMR0=3;{>3LY&`YNY6Bk#PyxWLMwaF)w2K{_pOc0I2boiQwyOib$X+qcU zs$v~K=(;i_>a`OC0QCyO@Cl)KPM4A<;t#4(wDXLXGAJF|QaYoJP{g10SK{J8&*5n# zP@sA-Bh<)mqyHR~pEObS2v7fIf(i3wuj(}V9IpX7we5&~Ct;Xt;AODEUjibOD#p5T z1KcGKoj()GyPbz?C4JhZUy_3TxQxC`7cP#Gp|gR}*QI2=x|gdH9d9Z_U2L}w;!sZ* zng=aQN5M__((4;mcam6O@U7FBE1xxXcq!!|9u275NksU$}R%V9R z(AL=vEH_xep7hvSIFrUT^IF^Vl>x#sN^64>lrgu#WiGY11X3irj&hy}u2^{)@?@SK zn-lVUU93W25*!rS6Xyw3bj?G-b;5KSsA4HdNE}DRKQ;o91W#>XBt7&kdxO>4>0Mu}AhvTG)H*~yJ3n?l|EV2YWsS%H#nuDd(lml)e z8H&L2WGKQ{7vo9piUD7-d5Yc2?Qr&=mx5yZX_&~{Z>)3Jh-8adU1Xb_6MZY&q6?zt z4#3F%OXtxy-EJ@R3=b%XogFAm| z$6>w-=vc21^m>HQ?PgM%<)jVw*h2_!tyAHj;D^gPJ@634QymII_m-_AR4#d9pWe8Z z5Va0r+DBjQRc0S349kgRqv2}&;|8nbaI;CCcCFO*8&09P9f8@#iW0Pcxq{lLzC5ad zQ^yoN2UT#*9md~i+=8vuspK0w4EORv8pAlZ(%i}WfSS5{7xDbmi_QZHipfPb_+oLS z^u0~)LDDp8iZV&zQC}xW8^d%i5dh7XAgd^H)EVd_q0;2{d{vD^So@}K`F3TF78!Ja zI>dI~KeR?~nBb+mw8h{N1+8*e2r}<#RqAd##WOZC^S(2^m;SJ17?nuaVCn{CY&DDu z&eGEj1i2RfEHv;sb{}i%hJE?6Z29*mXs7*^(6OJ;hhnjf%D(Qksj;{2kw2!hYdqyD z{xTGif|WAfXz?~QsKvThae)RRJ10KW^4uuoGZO|*3*@)fFBztpDe@vyji;L1JLqQ5 zkc4K{JB+3MrN@Q|Y89IDIBLpF56}B)x$;W$hsZ5Y^46^nQobDg@lsGojyre(BTq&o z1*zqz%@>AN5I*Xq#W}?WGJ{kH>ysn*M%Zo$js3Ld=XZkj(ENG$)TitJj#N|u3=jcW z$^JFZ%C6hm|9%;3u|oa1C;NpKb^>s#TY{qT=U+e44<{{6{Xdd~kxbnXt zmCr8}>G%BU5?@;2H6_2YcLUL~`Z~{Nt!Mu#OcSn>ZWO(HjD>%AV78|ONM*FTVl)2# z9>@O+I)KWqPMP3;T%DYje|DCMh5xMSqoGzH_LXSF2(lD4NW?-6qlW2619Gfip_sf#vt!EP&;Pqb0W4Qqyc zs`c74>><(Ly0Q#UF^4H!(=j(VOOL)DJ&WD=+!c9d;}E5lfJ-woSzPkvSQ{?1*>RFS zjud$5S3jkm!oQ@(!}Zb`fztSK%n^mz!pf{sC0x4Oj{Rn1B}ePn7+5B3vqzCsFzdU2 zh2@Ju*N12ATBv*Y6T9rI^hV{}d?w7s1!2kIM&Wy@&l#{?`zs6FN(uCJtE7hI24yB$ z613i-kjaT^`fB5#jF@x%5Vzs=C74qJWA(N@*d5YWleLYDUafm<{f{w_dZ-1hlJZB} zBKKZ0?r%BBMP2^Va3NXn=8VeWlV2#US&wQHUnSd3-55;J3Jklvbt87;`yZs~n;)w# zTW#*miJZD4=`f#ZUU+$2aAh&YXaD`>#qF0Kob$(638Bx9A7}}ENziIlEPjmhv~^I# z-Ps#MX_24n3NwDFoMgwk@h@N?S<-pIEd(}zeGs<8rNfaJo&X1853^MCB zsktjQ3q`tqVzwyaQ~TmZw`X-P-m1CmK==E9lh&D-Rxkh?H(h$Y+?q}j;W<24>##Se zcO1*1o+{Kz!#fu zJw8(WU^fY?$iski84VrD+H@J7+(ql)Kq`Bl%iDYfLN&oMVG~pss{_RdePlcxOR>@5 z^yeKxZuOLv3SuB&;@(YZ;w`2OHh3v2T7kfxxP$|F4z}1qy|OKV`kQG`{t;c4P`i8! z+LOTEqktTONvWJ0s=?W6G(gtbX_Ie|%djui@UR;#$TXPbv!#K50qgf%)AbP0bj87- zGhS>-fYvzt3TlWXGaVb`a=>P0_*O>3fIhm|=73^x^%+_$bTsqz4cu%cep zjswJGP#JCrY}YUxC_6?yLkaezKnY`_RmbZ%iaSK@+7hsG%(7dInhVH4i<52u&H+O1N*c$D49*yrpaKjhGwc|`N94$My6G`3#*kCVL%Yhyo zCxYtlC)Dd>`mlMjgp$74he{a6d(byRq4jb}>!?YhqA{gQ4P9o0VP)SEZAUXbt_ctt zg_WhEOl_o@;|#&6<=Sp-IXzTNQXFbQ<3pdQi!s9=(e1kft;x_+yS* zqb2@4f5onC-{4wLa|a(_lQ10uYHB0ue8<8SLv^N_V45|Bu;j+p#<`{pph1*t1_Ue3 zH>E=chBFe!Lz=M~9WFU+c1WYa`ZQ*Gm_G6EX(hqh_b9dJ;n?C{4np6rEz8T#rd zKxqAjia8iOqHE&S2~E7&kGzb+-=bU`17dPjfMDD#Q#0e|rG7pGfIneEfE*&)6G}l_ z8Vm8VJT$-u>$rOfEl2$_x|}0GaW!9(e9sK4-f9A5CrRQqCv;#cNyq6XtVltk5O3I| z`kCMr4-vCB7wwMc4K%bLCk1f@oz_;fYu@R|Vwg?tF5@-` zaJ-&QBx${~mmmA6_K6-M=aN9yao!!Z4!2qB(82+Si}2uTv^J zdmhR2PqNbo+5|g53&3vOqd=FNmDd`4$d{y`a=?)d*Oz~nyB$GLN4pCfj*Ee)3<#Dx zU~~7{EXv<*7}2_2XnSr5cW-v82k`VQeLaAssM4Dd_3;KCF8L89?UA}%4`*%3Cx_fE z_)7?(`t%Q%WTNx&4YC_o$7gl~Sp&qA)+{9Fs6DVE)9|xQPj&svw}V7HOF+mX}2?1)_aETQ}v zrRbQ!na@KTyKZ~}@~uc&;BB8$Y#S8z@A#^{qW$0Jx8yo?SE2f3!})-upm&8Joyx`S{<-o<)ABpYO_{wig8$WylXr- z+#%VpM0-xMee%S?6e9dHOEHoKM+oGHHbHL^(0e^KW(cSq^>R}fC4>kzFH!WRAX6j4 z7Fb%h8R#?yu!9KaV$hXD_^x{3el~DXJvE>Jwth3zc5EN1k(0(iOBNKrilJCOYM2O% z`NC7^WRSRoVrAptU5&&GX)>SiiU ziBX;+_FVni0SSX!qU0-reXjR-wWu&=l-cz4fKgIz?a=;CRx&*uNXMXMapx%jJuU>f zVNeqBM)%BvNm)rV7?dX)(8^YtCZZn@9Y|~?n{NuA2?%I?Sl<9@mcOx$qRYC83M|3Y z@$+UeO8s`0KbKI`M8w7z_;XX#GYU%9jWmHlO-SNzkl-7P@?4Cd?TuLx9}!TBT&z*Z z1krOAf{>L*%~6o&D=;P$#h@386D*|lOVm;Vc#4P+s|D)34tp*gCU5wPil%@%g_}A> zg|hF^ny;#|%DPH6f+tQiL!xgSbdwShEcd(}?41Fkt+N8=VsLKPz{%sBM4`K#Iq_6j z^1-AS=vD?~%{+F~5Hy{-ZU=ylWJ8+6Z900xOo$F9#8dXfQ&9!Qp%a*g84kO8F%Ruv z+U}a>5@^_Uo6jtywm2cw`;%qv;gp#9l_PpR7cZsruJ!%zYEx)RVusf zJm+nHCPTPDp`KraY0N66Yt{sL#KYMwTZhWpI?8UuDdXzpWFf*`^<}yR7G`1SMTR#f zr6L-%>Zv9URHPl0UM?L1TpLeW7C362I72F}%=EXV_}R4%RfaaKb}S?MY`egWA#Tpm z0H3%Jv{Ehb%>_2q3IzIJUqg(Hy(t2+YDI*(jJ@HOc8i4ua9wUh}oLuDrXOlw8I;EVYlW*(yN1`n%Ws?yZ>* z>F>I68dTwft#S{RKNBU-g{s2ilUT1f0dlPOdgO^!Oi3!cXn3I--heyg>I} zr_#ZQ{5>L!zj+X&WfOB|;#`Tw0%Px~PG zWZH_|@h@1fvzi2fuTbIpqpVHp7W(q>a>n>1%%lX(T_}D&=%Rdb=|0{F4kThQRtO=ecXybnhhP z(YeBPS&2tl-&TB^zMGFN+Mg2P$fq8Hrgl6%v|YhM4D^xx7|WB|1nZ)Oj%w(pA@lev zq^Sql*7vjZ&e|7F>el|ILT``9b%G}XBv8WDW6o@y${4x@)@jO!Dq$^&=4-k3q5xF41$9r+8tW(+3`2%Z+>YnHxSvvH=aM74R& zVDeMy$ni48U+HY|;V5INEUHMVz{q2QvS0LOqsFc?Xw+ld5%&jhS`ej0gu42Ow1W-=IK zj7@S|x#e~DmJVfXifNFYeXkUu-Ta(s)}O9oDx+Tn`ngNt7u<%ukv>hf;2lYD0yGAx zeMX>D++%3s*zNaDSfPp%Al)lXP)U_xI=8yYuy6J9tutaSNCLB6%C7-FJ#9#=fK95O z^~LCm)knn?@P57gE43B)p}=hN82|a^E(m@W(TUt3;n_EWbW@UAU4Bz@lDRbPg?e}2 z%io~%0kBRn6RLl1Vq?6}K+B#>QF`BtiftajKWcoXvgM8c$v*okTUBqnQ9a(>JI{A& z2eNg}<)D%?M&zL4&+7UTuhs31_*no7x-a3y96*AcKK20P2}}p)d*H;-Np6ShhI(EE zPF5Wxf`auYg5njy*l&V%e4iiNug5?ECgT)}&Sy+et@tUnr^{qF#~?iM?mUYQ+AQIP zkOYW%raK5X(E7G-@d!#Bz3;r!zZjLQZZFH#zIVSLe)`D*B8lr^1wQ1dX(*RVF1ULVjS*Ufj z0y{A0lqBy9(29u@;7=MZy0Gf^y>po$_GS`0$8_e3+RYYi@RDMFmu-!0@8VgWWg?s};F?5_Tt#b6t zcHn-x^3gSJT7(t(_T0VrIyO9aPtWA$wvW)U<6SC;k;0<@q4IMijck1A!OO+XY?Msh zA0zOBaEbPhZ3*D07LOF>wAY-<5;0t2T)GC@&T&{uMJ=T zl+42P!Ai8=-ZK4LT!B{lmMK5_dd2J~Xjj{hgP6lv&azYb;N;1^?Ddv_fgtiDAi@_n z*@2xhnBZ~=Gaa;s z9lt58uo3UuJiIkFyVh!dlB`8A8~;OAI$fmLncHd9c$!8RwP6zp=`XM$`B55{=jNK) z+4H{w;)9y;uLV(!={hL*pcwxoDJ^P-!S>1-fee>;s`uCOF5cbRhc8Mc=SWm1hIW3N z7q33wc7>7_yi=w0K^x9l`rV1I2bM`WQi~i`c5-0ybk4zfm#bHkqn!yWrUKiv5qIBR z*tz0G)~oU1Z_kdN>I%a>v?oJ)QFO4yU3pb@lz;qP}4 zir~X$i>ad&`S00DzlyVd-U6G{Z@U}=@s+Ft$AInGj!DQp#gENU`@oMg7hn zj?^TsqpZqyeAOoT0;(@&FXVF>D+fW5{|Yi^P){bM$QYkJ-#MFmz_ctofy-S@2c^_7 zuq?<{%ARy4rPvE2U}o2btNIexZEgXTC%HPZ{j#gGng@<=8p$d>5wO3_rRh4;z=8Q9 zPVr-bX6bb%yuj&(DfWg<-enTzKpUo&nP=`tLd4`%+o|9Htrz3DSHY?zL!K`cFRDB~ zGJbq|d|zwa8XZoSY8E2l8`_;>>c(-3rscb|9gWJhEoD-^oXp(%FXP8P zA2`gw@T52O;*0+^8n1t`+%}u_~iLSZ!NcHcygs?=hI@@y@P|)lk}oaKx087| z2}l6bW3$f}WUbs+vK0}|rc^MyCn&n4RqNCQJn72MwSG7CM9zYm9Ahe0dse~hwP@v+ zddjIEzF=wXiOH(FsG>4JpsljyaGnhMk=r9KN7Hp1uYb7|tw)h}a*W?}-V&nMFm>mP zLk7?IPG~y^5?xz{bTV(h^d71BPBi`CO9ZPG8E5r8t|+yInpwTjv`@N-hlOljUJi0R)r(^2JIkC zQqk$9;f%0{e$cEBc^KHawRDck*4iCQ0cYM?-iDZLeB$Px-gzGb*KK@adKo8Xt+RW= z9=3p+ApD^fH23u`?;9O^_Dy+wetLIC(dg>NipE1P#^JZeud(Okyl1|A9W;5^-0QyV zpYrw}8b|QS5Q!%yKXGZeLi%jQ%_E1&Zy%;#En4u2LneW=-r)o^G9T%XMb zvva@-BoSDfoxW?i96waA4U+P!;J_$r=Qf)aV5mO{j-$kZw*sJ3B`&&PYz(}N0y;X!VITQW#@0}moVE4{&OulRtJjFxjyI@44y{|$?3!F0Rv>GBrvb4g zILkK}Guv9d8Z`o&Ne|08;y*%}yz#03eJ}rY9FtD=xce6zkM)}R#QXrUO9X0VVr@swD*y3R>SeI zR8Mp9klVmRNojN{*vND?qwO69v*mdkj%30P$_NWjlX-2UKnVK6UZqIpaTQ>;#GW6b zBq*Vs852m6^g)Wb0)p+d=hHs~4$~7l=Y-D~{$fa)h_2QLvbOte0Cr?%7>@y`d*2Nq zf03zM^abnpzo}E05kUw`l_ysa+%Sn6?_KxtpC%5tVWyi+QQFjFF*oOs#l#8MF(M*{ zOT*+5XwV$WvuyA%0`}r>j2_$DD?ntfgUaq=Nx1<<2vM8MOE$u@Ke*EBMZ0?AupeJP zbgz^N(O-~caxgDyyw=wQ*xMiqs-6kblE8!|RnFN(yLfpg5Me(nohlYX*fmtBj6V1n zZak-}WHv4nS*|wBm0_QNoxQO6`&u!7IgGqX)D~nX$w2%cSpAZaZ!e8{hL0d~AX$36 z&CVhC&oa!r^R672Hd!2Z=6 z{eW`do6L{7AEexjifM;nalunZN6PMn^{DH#WWqp!rcgNH2^Iuo9K5W>TVWx%`{5q6 zO`n*ZM1D@t4WvXq$I6I;!7dUd+a;wIxm-?r@QnGGI5@QJqtZxQOY}qVW>MI$m9<+w zC&8WG%<>Ef>{d084C42Mv}-p(x(bKkk3=@X$?R5Xmsi9_s0>$Iq;9_VC9l)e=cil$ zzGx7FJ?tvN<{dpl(H`}pY>~Cip~XG{3Zm)PIY2&@E{58sdX!O||^Zs77eY&OEx8f?x zJdxbQj5|Fthqulc^%xjYuG5ftqjv9EJ$V1X)AD|sDufIQ0^rv(O8{Ux%hSTJUq8*d z*-@mBlhEG~GPjrjt@?*l^hAmho;%KGZ*Q-V-Dht>fcj}JKMccdf*VcCdO+E3I=&R4 zT?}jXPw@JTOHze_IAfa>rpANJUcGS6 z%qFZz^;<7Ze?CK^f_WUJwen#6op)wtUZ3A@{+6JJWwx18uz3lR%XmpFVAJ5ob`Vx5 zbh#}bQc~tv=Z50i*_|o~N{sUwJ^buwN_xI1D>DD^vF<+HW1;1A>!q_(IH3n=1&6zx zA<-?D{#cRKz}`b{J6xIJS+hmLOND@SJiUZSksjh9@|+|~+Ic*>$45KcUWc(7L|&oC z4ymlb<}n{i$uBYNr!M08a1*vM8Flx)^nRXa6Vz;5%n+~)ejk$Qd2Dw+_e}uiL|7Y0 zL8MqGlAiz+z;vLEuas$!5Gk98$v)!kdDv+ziQfsDqR9L2S-;W54m=Um2lpGZ_icyluM2YOfXS=K+G2pjcU>Ubd zB9cQg5|qZwCMfbCjQ3T*$iNIs#^@jnX;t;{JHVzD96>YVNnGyqQ?PyLH9o#(kfdq#s7v)tw zfjR=(qB?`9c?=Q*S>^74ARwk>J3AEABv90>x;C<&j~PE!TUg{7Ohbr@6`gp!q=V0w zY{4YNXH0zE#-vEmj$xtmG)Bo&HXy=+*9HKWh-l)CL!J}fJ$h4$Uif7^L9YaE5!6GVA z;-|oZ&N3)B9bqu>F1M{@q1l;s@4&YS3XuZ%$pFkk*4b0s&^*~f2MgL*6}77bN1Fx3 zQRE&7&nF5gKVP2@s6T!w8Tv;*qW}RC)D-)VR+a88KldFvKtOrvos~s)UXakk63{Ra zt#}NvAVvf*k~Y6C<3f3;OK^J@q{m7AE9PKdQ;AL>kj_W#5FrXbLWcqLEC$tPyC+Bv zUBUNoXDi8A?2w3vr68w?yZREqZ2($ALA@LX9x!AjR^WE=jwgK7FJpf};{7HUP)`J?UlExnt5I40HP!uyJXxJDv1<3^ZrMsu_%|znjhGex znVvcHhKQq7Cphmu4G&Eesa5+_ML35AP*5`jbcKM#l~m1$5m%hxQ-q?rczC1y#lme~ z6^!Z`_DtBkOndl{bAyba#{7nD}rwcm6r;jes1$8>&`e{$;`Qs zbMeBOc@XReDl@{aLd8|k>g>ycFv1&5>cMJ{i9{EiAOQMO3SJ3OIg4XK<%+InR@M{$ z!eHycz9OPI0Yi;su6RV79l5TviY8;zYp;WKQyR76NxrP>-q*n0Bhgtm!NFQLS+flr z->R*(hfO^9sffGYynrTNCL5oESv+>wy%uA<2we`n0oJ=7bR470xOMn)z*fQbsIY5V znm2>DuiaMA6dhvb&e|KCWC3LZ+^NU--rJ zt$q->-txrp`1zx6|LoN-sij05|7ru+#jF&%Okr27udck5ZAmX&T$^B zu*w*;VYuh`6R+TSmi^2A_3bIh_y|fWJrvoDh zP>v6?^D;o|UT!5ttwOis0!rJ~EVt>jv{K0Jbh3+KPYhR*z*8~D#`@c%jBE2HfHCUouanWJSL)ja2RvU}*foDu`J6VJV&4^#5H z6|14ODRN7pZ-u{x#@Dttm!dBCs0KPj4mQSJFy@+LrXL{osQj>RvUhuN$=5Yd$zl~G zyM6^`?G8-K%6E657p=5^e$zGGSh}QK86Gp}iiKF;{?)lv41Z|!IF_Nfec$+lJ+{pb z13z@Z8NLSEWw@2sDM$tRRXdx8&Y#49l~!M0$Fl6c{Yc&8pjV4?OEqKhH^hb9TY9GY$JmFKiiUK( zP9_^LRNpjH-?+?9m#I0E%}D|UinMW4%8gWnd6 zQZ}hoj+nXbYrjMhX|^e^0eFimbmON9mrRVBaLtNW0LiGYRt~!EtP6AaMSxWrSB4mHdCXSiKyeqakiqY+C z{z)9mvaIGKu?J3W3f5_AzbGmQc|m;!-*kbKxTodgmc$)(_6N7PnP;2~JiK}=aM#Lr zNc&gX@rO0Tf~)EllRX0H^KWf9&YL3qID7wphHU=WXg7iXao@6C{#pD_8*NfO0j4A+ zDJ~NkvS1C?XtQ0QAB3Kz3ilTP|FL#0@5B-h*yy(+u$XNb{|Gky&yS@Twz}J}q_2gV z2X=~(G_hJJN@||J!wSRnJdfGs|FO~T8MwF>KGu&d6t^|}S-W0)%c`fw;ZfFMpOfKk zN-NM2)DpEJ6MmH!^yQ=eD3+Jl-I22)hTS)Kl=+qY(5xKsW*q*4(@SsMGBCK$Xm|4CG__St2%ko2R^&*@vF zOPB1n!QGi|AUlBB`Gy$J& zlpJ%?+h!C|Xo}EO|9Bc9Sgb(cv{#{F_&?ts;yBO*I>k|ZG}n)I@mTbjh0Y|m$9N@+wF!8 z7Vj{6-1<@16FTv8RBPdqFA@>*aqiiqTEZ(9^1_+f9^={NXSY4}WE4J?-%HTi#0BGb zAbpNj+QlxHZoKv)cz)}gFP?}{eo^%cRL=rv`NyUgu z<+^qC1b9HPqtZEOd<^nkz{VzJk!43FxePe4v9(q0rN~j|OZG}Ssdd?%4c5xgij=B} z*}4fGAlonAOjhAv`f_|DT<|m4MES{Iqr?o8alOgdvTT@F)kf_lwuWmxusV1|qcboa z0#@ljTSVkWWmI^->GC+ujnS6|(zJdLb^PYqFy+4I_}T|d5~ z;qf?o7-7Oa-~=XUGPbyKZMLxmV%@$g!I;t#h&R{=lxXL6df!B=k!*nE1w#yFi|7U{ z3MU-R1>efSrD`mc4{o}ioB7+PF zz$#&w{Igt{Bk&q3a~y~ht#fLQNJDF?puz7i?aHTj|F=*(aG=wx>uMUSvBW#wS<-|5eu! zLl1WX5W?2YH?Ljrg7Sc%@8sSplI2;lRm<}Neuljx22Sp?$q!mL51sL<(lP+#Q)))= z5<)s;4d5Fild*kSl%x?oUQ$hkpmB&|^YI^cVUkl8x$7D)g&hj&Y_cZ51fe(8+UV6f zwK~72@)DXy5Qq{8M=4i~rU%RHbk+1V47Oh_?G!}QgWLeh50lvz*nGINNH3q&&jyN; zJY7a-GcP#X#f|qBiZy zvn06ln*yce8hzQ?rB`R{0wLcFBSy^z!ks2V6f6z4KAP$R|9$@p?!#9A)upe$M1WJN zUBiM-n=5PC=un%xl@*;viV}`W(hq2;h0thpHUrEo*V)m9OnKqL-_FnnP>fC+dHz4u zZ#+Vfoy-RTvQd^xkS~DGXe$UL+h>v8hC`Yw5GZDr%e2PRqwEwtEYzKxBRe=nX@NRC zZZ_=p@4@YoEne)L1tZ?U05VnQ?)|nu#%gLm4UHgyU}Hdjh|~ao3ju)JsfytKA1`m8 zYs;kF1}NGiP(UM-ykGQXk3sODG_aE>yK5)drR!n29mE_=zR7@_HdM-nIiHGyCy_Zo zri=e1V#1vqfnETg;%05DmQI)$1X1xXh za;rq*a^%{X?ff<~lM)C@u3_KqhA~JQ;h|~Ytm3cxG;IN68eNUaXEKMcD16wWYy6L^XB-1Z9O{2;hh&=t8o5*&)2jeD0 zhAGc&7krbequlQPe`tI2uoxS^|NES+nYMFgYTCDHUop`#DoiyklA5WcD5ga#nHDOd znbv7hQ$n(hHc6NgvQLVVXd)zCOp6rKOeJw)o-^0IeDB|V-1qSu$Mbvs)*mx+9?s8u zdA)U{Nq)T@FG7L+pQE?$M|1Uc*KF^Sn!&k>AMPQ3jG6%klaithO>FYnoF8@PPTx>& z?f9rY2`A1IIm@j~)kXy#P(m+$K42<$pAueCB39@WB|hp$_3d_ zuo@EfHAx{}CN0YzADlw#6K;XRl=d<#DHqr$PLpLE&`a}8d$9ZWF}KM)Uu`D*lqvSL zID3#Eze0V7qbJ_vrHq7T40*THTo9VT9u&%Hi2w*1%|4oH^d2)yVJ__XGfBwTE@5Qo zK?AWO!V`Xmx@yurCPK_d^m{A~P{Lm(eg6vM@uTLU8BRft4<`4b2)LwnaO{}ku0_bp zIk37Pqhp8Flcp*Ux@8`p10DCkK^7p-1?DyZ$uV>?Mb3w!G?|NmXmm>%Ux*-w17aJt z)1!a{PdOwJNz4O$N!X@9{~3ln3;<4>W@US*>%UTX^kEC0D)-@!;uCSb=uS{ZaAbPD z8lAw+2Dv~z2Lp|ZKKc+V%gFc3qlJo+@V15gs9oJ(kvO18Op$YYx$>6cyu?AdDZ%D6nRMt9aDA|buzg1sBQ?BArL%wxb4uHw)8FnQ6=R3_S&PxD_u&hWuOA^3%c zl-eEHv_3sx?*T~hiFlRXMn=1QP4JMF6O+(LRwOkI7fFI-J?t?O+PFY*N{EUVf;VV@ zdqKq<-=DS~rWUhh1Oig3=ZT^+q(rd59QrVvc#h&d9GB?{q8PiqRlq4C1Ak4%0~dBR zx3cgxn2`qmM-uzWM57oSD*#*I3HFe&sWjv@*6~f7fT0PbhuR*WuJZjgV1*!djV)Pi1*2; zaWOPeEa!PO50Y9qB4|YT%;I{`coAE}+&;r&e!+*$a6wBE5CP?6DelQaFmpAoi^QHL zMK)5P1=?+)QMyuA%Cv@}J#!y|fa7o`s6rRX~3^xeJN3 zE|%}?_3~#^WK$ef77v5uk*g?3qJ%%oMNIU^#w8+xc}E8bFfz1Lyu%BPhgnL+NW$3- zr?25wH=m<@C*Y3k3GQr2yP31E=iI=2fF*Ckfl3s?9&bs}GUcCZtU3?UFke2Lz-)Y3zd=v16}<;uBG;gRKYvd1{G*X`y{a_})~KTmO}@N3r4aziIM?00 zpe@*1=B4BzMA?!;5NE-Aet?C>sYE$D#R2uBKU`Pjgl&0v$vaYOWs1x6&{p)>1|_;% z5=!314qGESCDBy&O}V_OQ}g4N%W@;3>PNN3n;YKt;QB3;zBb|yO86#c8wQLt#cG=A z_zQ}E^Y_h#J{kdJRNSU^(8so1+8^OEo#m1ou+r+5i${yfT+BBT8-B!FWZQ`bDOTj{q?aN@rUwOm;QqBy7@mSZ~q*pOOfRz%G>qJ z@9zCWdEsJRcBquniPIK_ph z4oeL<(tAG7+1Z$Pjs`)@(*`KyRKLB=itR+ykEv!43hA$Nv%i7Hz|cej!P;Au@Y6(y z#y7G1tigfD6^-bEvSwmsx%`q2P5-b!4G=+E(| z!0>6P9#VhzVYixm&B8YaW!;!>dChmaCZpvEUO*keN}#i!{Bw4L<4El{UD(kiDI`gs zG?{OQ9~zkJwI<88QZBZoe+1Itk^1L>`h()9W&o~m2#yrn*EBYjsKYdf&s@FE`~{;t zas&kqxYoS#MJ&8O>$>3bj3hM{hE#C;0Chb7_C3|IS5l(Oq8W7Vu_YRG>$r(b;_=5Y zD<|aS177EvBoC;vfq20xR0V;1)`K4mJUh`8+$-rc*4OtVjN*)*5DR2Phk zoKp=Z%Tp1ehV^LkynFOf$?69>_CR=xWPWz_DlgdfZB@C%du`HejKLkHo}E{(i8~!W4b;fNJ@xvZI*#8y zh=h0CHUu5Mvfw+Ashx*W6&TveQsv2lY{fc;DzDIKwAR0L+1pcVZ#r=xtcJrUj&7ee zvPr*BbKA{TaxhaeshIr0jr+i{=GF6_{r79KA@C!S+OEl=Uhy%k7}gh(W-0kjC3q8U z?wAf7iGmFYOZEtA*q_Mv*9n_T5D#k<@o^8GkQ`9os5@iQ@@eeL#Ot>Y@PK!5Xs4GW zB{8H1m04VGx{q=PbwY5sQ2v%(`_^p zDC^K!$}}^biwy=49HZ30NTeD{1+3rFkwaI@PBH2r4Y2iU?LF>4}o6i$)j}) z38>Q8q0`|h(45fcz=y+h_hAHP;}RsY6|QTK>((4?mT`@X(HSHe8Q^I-|L$^6Vjkg? zB6Vs7CoXF93ibw-Is0%8fdrt`9Y>UZQV9oMQc71-MLJo+4vpu|g@BErjx3bAOz1bs zAqs-Md(W+%9+lo$E-qhZ)OjM6ALND{&eHzy4B=ErV?}6R1`efl$%1tc-)+ReZp3)N z-Sz2WRsqoH*>z*@>s(~cn3^nwMPz<2-5XO4fQz|gC5p_peUHjaV@#a??M+#n28Z4K z3PU^T>%?=o&Ix;pQg(`vrW6YG@vCPWkRtWhB;SqqcrVhp`mhIy=H5H@nQ4==2(=Kv zN?iXui{f83s1fJrS>evNHK@MM>ccO0E)48+#evK&h(=YC$;~DJvXM zXBmPkF3>@%dKf)p)p~NPLz&k zm25!8Iuw2!DnA|IZ@DbUky6IObO#&}d&Wi29DVM2ke5+XnNQ~Em+-)=HQHo$5_I|p z5mfCPrzd4Y_PtMnggJUGf~zqkG{E&>^|WPxxfIV* z{%OnKM{Ek4S;)~c;sM*Y!gvWZ5;mCv@W}2H-rk?K*wBW^9>>W1cZ{kVq5zP4T+ICokZtPR@jo~Ptsqu-a- zR-`q8O=)rEw#jB%~Ic`3JDb+6GrGJn&vYdQ=yTl_vaiX=rUQ~=%2 z@qjL9(3#56$#&aLcGm}zI;DBI+8!okK{(qF3!xR)ZXz$b#} zu9n^RerM-i+A4^srd=gETjM?-$s~mD3JnOBhx+j2{mCTcLs7?a)Z0`s7eU}@RJhIGSl zmfVQW8jdma4(mp;IwL_gqgezvOL>ouF9~HzVdF)}If~o%`!J9q1Dz87mG)w_yzoJ# zv{ji1iB3$VZi|c8GzrV21;@qcEwI6U;Xyr=d=^zpX;kHvzIIQN{R2G5lcSxbtd(z& z&zZ!1Z`Yq>Yi39%=|~scur()x9M5j!WF6L%aZb{a(e;d3ezaRBvB8`*8|6A4q~{8= zXDRmMyy*6MeDxwdy6Kl@nF=9hJ5HM;M#@#L)rlZ=jBv8JC~u`=MM)t+V%?1?;O(EJn}~c5OI&?uM4iLqlYxR!=-gGAqqBkfn$0GO+w|AGc_;g1{(vGg$Ja`w)Vz zseabvF@{0kY|s@{I%9TdX0WtXM(1~>YLz+{dE6HW<}#Y$k=a}YsJ*J8tla{iC1M-v zOR?!?>c`8jIm2ve*agZe)$?TrtIO3N>FWezRo0hV+$gs)$DMp%W+fss_sT%w2#z?q0SimU{vSQ%Uqbo|K4 zxz)!gvYm5SGJx@6t&|#&EmX6ekl?6uLFMW1$B#I} zN|*}UNd}xT+Zry|C)jZt8EAwAY_7vWAUWstabEg~w)N`Td|($54k=Hn-X8mIQcR!#~%?u|gbVJs64H0(I}=zaL1JWSg?D$qN1GQs@R z$2^E!@$S0 zB+VKxXW6~a%}Ub>MlrgUqG~ORnzcV_NNj}V{nATO_y+H*0OJxj1~8a_k9j zhq?d@6$d%<)5QOaJ57TOH*Ek9-89Wmp!{c77_VJHL%|{>M#x6y zVx+SD67E^IjWY6by?f(a&_!}+de4!aj7}_ZSzx4R-s7$cgsmAta(T45l4RQ*62vt2 zW;>t@zn{zEJ!BipMENz$P4;Dm{D!SkAKm`EV&&gI<-mCiGJytWBZmcG0(7+feS|8@zWRGpC7ZRBnD@2N@(kzYWfr0AJeSbi1xDg+ zkfV5|-3q1Ws2^m)`S82}J9Z%B!s~V6(rb06H#&ZP+=bddzZhM2ZRVY;t@OEIq?&>^ zU|DWI=-)vf>8;st?okWxX_s1k{5zj#bsL6Ep zf|byOCSs|RC5<**5#W>aJ~Mh{Mg96MaYoOo(Cyz%P__dReTv~0ZWCqL^n7EqUIx*W zdQk32S*o$@kckU{EX48bVEdwwaqNJ}nyi-BJJx*7IgUpWXphdh;>kF6IU-TSLS(OP z{dnGamozf2-cX;T_?)x7Rw`b+*&E=C$faEMs{?QF52s#v3&nj*726UUu;v{Nwc5 z5A|CUZhf~HzwDz&d**G^XfzLl^FBYvz@#@TX3zvD7^m7?rMT?D3fYg6>)H`bk~``U zMSIPN*>O7WXQ30R-L2%$O{^c~5^@RfS~W;hV&7Y}KYo4#T5I6Sspm;#Fyz8Qz!3S-GGOBm3$eGgvH15Hhz7px>#AbV@Lw5r}=G_Tquhj3`y4!TsT6N*)z* ztJI4I6O(gl}eDa>FDPHI!2 zgew9{^a{Q*8TT-Ir@+)0D&9(wI==f+iMhKB?a%1Sbc6zp?Dz<7dPvTg0yW?lo@W+d#QGJ$xUIr>bo zd!&Tfi6F=awL9)?Zd@&l0FeL{a<-%;dmz?zBnRzqUsAG1B+~0mF|jds23A7vpre&t z)?8a0dNNsOIwl|ShQ*0#s=-%~s-1OZI9V?dI6;cQ`mH{gB1912L4P@jn!!}6>#jzw zhuvHJA8iflhV)!<=?V6lM8I^9+Q#Abde013Oj#}LEJevN0$Xy%J6-d=^_QjV1po0 z1TLME+E83u`X{gLf=o!1g^?*boo%Wx;S?VZ0T=aXI!*}`U~BQR7UWM4hXg!;If|0y_DT3a-aZnJkl`vLR)vKz0^@ese%+!9_r~LpbKdfB}Ay{wk6-7~Cvz3!o6u zD!Yvt_xSy?#O5AY{E|Gt2wZ~{bV!lS9P=`HG*Z;bP7-Jj?Yo$RtI?TZc{sd<6}2n< zW2E`ekPy0DX`IY<)T9G@Xs>Q|cmmG|e`C`W?l;fIuSX_KG-&pQNm1Iu&u>b~6bDZG zA4kBBysmH9@^XtmR$Q4CaQRzKN)rrbibI;amz6|k>}^Z?tqq@j$#HKD*Nr!01z&st z1`$LuUlwyUu9Sgji0704)XGeb&(5s81}ry+go!1K3TEFc;)DBWAKWT%K3Kf3wZ!O0o73ge&}2`qbQ2nKV{lv-@Hi6fAIx}#ryFTYXCPJ z^)RLC zUcy+XUA+J5w*)zAXT_qEjv*0GBMYs3;0Mum_t}S{oJO`x6La^z;)W$YMX zacQlvtDl0dUXS@bmucgoJzbd}pHlb9y{SiaQhczfy>CdX=!eI|eLMoo* zaLIlc7G@sm^f1FKQ4wMQdr=f5WhqW^|9E5E&{-0?7@EqAw-D?!8u;EJl#!5l;?2|WsdhM4GI#@Op(B%g@gjswJ;mayJaa(^JPMD;4CpwB6ijt1WPDD?ZcvJ<`$zpWgsPgqaiN6GD=ID2){=w z`b0u|CnE2nv0bEn8gxV_YqQGgGU(AeM6~4t*xM=b3l9qlP;wH1{|Hk7$HiOn{caCz zlaW+VHf3le!bgbvVGBrSbI_LsgyPCyWr0<=ZKR{%h8yJ`Tt%8JwhHGcBVpYA26nN6 z52=ANW5CfP^l2RS9)od-4IJWu%pk-QZlt{mES0%k-M#V?bHlrpA)`P#zawpTJwE9jI>NO=+F3vi@@v9ShMB}+noA*JtQZf`hWg|d-*IuC9?W`d_E zbcxJfxypRxMMCbK(mDDe9&?Ih9T!}*Gu@j-fq+PdpGuD%t_~;QX-Hq5-Q*nBm$%BX zW69071oA`Yu=b=gvw5fE=2q5h#^G5|g?ZV$tc5A-jAsr4I-JkX!sfqQe%9dKLPW!z zcfzf0d}m=#C0GwG%3hSK*-mhyA-}J(pPwSlrdPiW&wlsp%<5QPgDuJpx7Jdy0`6e1 zqzQxYM0X6%qNp&`kfk0CF`Is7U38K|m94_ia{HX}s$IE$*4P-?ryp&iTDYr;pSlxW3K4G-h zQ0~l`79JdCp($F{XJEQPV(s2;@4>cm+!3MUWw@4e^IYd_z3vaC27_}6Nfwz(yF>iW zpBTdB{J`aYtcCYy>}fN7{VdU|!|F_jRgnSya?h%!p~hYK3t-rVo_1qf1~%2DKC--V zql@mEIb+*&l^Ug{2|A+ks!?~3?u~y9J}+fCCjr`jxxA3JG-Bo1Fd4R{4JBl8#V7^D z{MWAXIHd@ov$n~s%((L?T)Fb!yUKDQ>A10bSOv7ZaQz?jtDB!wTRFV%tklK$6f}zD z!!x8Ogd8pxJ!3y=g0h?o4S{7!zhRAJ{+8F*nPZxY_wn_^x6%?Fa8UV#?uIl~Xw~Ou zIlH7QTh^yPYE-!{^}^lemkq7M8L7qxo_x9+k9IqYi`6VKkvq3{i*5U=w$EtZd2(dH_F**CRqmQv`2Xwjg1XA%T&im9c$w!?SJ~TcYWy&5 zsjD0ixO&{{%j*wsGwhOH>+QiNa^Dwid%1bSN6L$;3knP0V{%NJVOyh&c(huNrYG2I zazgD_HsHx$Gap^dTSgM{4#?!b^7D-$OnAiAle^bVV^X>(yCMgIs?rX*&|p&S2<}PD z`<22iq`)mSOGKEf&|w^NcEHVf?)D~2_d{&=XO*OfbHj?M!%QI!iJ>-9_!yh*PKR!K z7BuRyCXP`Ks(h-tba3yc&koX^Ts-`mLdw*-m*v4UJ85LLC){#istSz@I4R!mDD2{9mW_?EAeZ$jKj#(h`?e7qSDJ=&nnv3;1mzH*J>6_ijl%zRT&`DpD`^ zzj`(q``61}-K%qg{+Uu9xyC0ntnclirdTMPym-~zxjE{?N89cZ*SF`vzy&w?lwEiB z1vLh9OnH~(k6=pR?XJp;VTx^Fi}tL!sgV z+tVq=UrlvWrA^GBaHb0X6a{0|#>*?+AreGN)y;66t99znc3s8`3mtDL8u;Sz85Cz2 zN<9c_WaIgOvpi0CHuKG2o)kuKt}2@wy)2j&6-H-KV3+_q6?k=Qt^n?@_T5izCVs+uqpgB;4aGv!7*8iH>GKRYQcCclT+Zy(~!ZK9E9@> z-ll8a6xEQXdW1gPcj?mkn9nX&_dNjfsSG}gI786a04r!KA4C$k&JQ(tpEZNEs(_@d z%G9rWT@9;x!<11z!`2UsImPl-yh9@4`JE^At7Q!=LvG%OHV){|bYnDBM6}EkHxxHHv942CnX9nRI0~g^C zG+=O&#C=#|<2Sd?esTn9P%*HxrswC|tvHxzXEGXqWt_J*UMqjDi@*wbhPr`~n?+VY z2tgP~-CCtW^ckS$-QO1p!#xKZliAhsoYK z@7=6U*Aj>6pdV_{l@WJ+o{+h7pu;RHOMpLhJsVstNpWToZ(e?W>si?gT!vGYGIN82 z)QXqy<<@L~@e)kWIO`7b zbZ7Q$h61Vxo%#*=IC1`sU+1%FQ|q5Aa%4)kD%{7r2{$@5y>din5Orp7U5?|{`I;Iz z@X_wP$a)f%isL2Cc;-9d63?W@jIMuz@i+we|F_dH;kpX~oJ)fKiEJE;=<9isu)2l3 z)V%FbcektF#M;j5fB`YN4*OOW$E@aQ%q(`{$6**?7|=aLp~##<{pk|daJm#REKYha z8#vbBp{C6%*@^^Uqw4PP+42*Py%CB4`9A3`!);cdq9O@`fuqfN8@OZ0FELcu%P>!> z#q0_U(@4W$^?KM~b%H!+SQ#TO`s&8h_{66>l#~J5tu-1IhoSw0MbHC6zRm^J8OhJH z4FnN5JlQ$NjD${8Xfe8l+~QE>%(CjuCZN*pJDDZ+Mz*w2qF_&y2b#gO*6h@Hrois% zYX#+cgqBKNm$i40lsimx(e)>?EQXu2mS01l!3N`up~M1hJVI;xO;h1ZwxZG9TuM+(HDCm`TUZbB#8BeJ zVc>4sK?2RU*4>#WMLHZah?T`IK@s;VVnhNDgfO=;&IM<>_{`aJwAa~*5`FB66+FSY zqzTGL5VqZXVC|Kx`ty#f3dSrl|Gq>XJt?ER8yeGE3vEecoY!eP1Xxvv(XkqlI?{$C zTa}ftdBiTfk>cezLDq?fYZ5L&^>dq{=4U%`V=@@-RJ`B*{IpJz*6Wy@IQdbQdoRR< zrHxpH)vp34ccP}-qV4$oh1PBi#lfI1r)Tt>$|zT2{AAp`S>niEn^C^uDX+EE>$_AAoQfdT6{x&la;)O z?wx0_x?QgDwal%G$JHM^oAr&*b3F3+qdnYm0!TTqFY8P~^tH*LY^Txh*wI-}DNc-Q zNm9nKCo~&$YRp(RyGi!Hm1=9k`ZvLqTr?%nh{yOYy!n8HjTEfD^LjBCP@=$M9Vqoi zUN1J5it3aJmCmGMcNlX(oczsi#NCi(V_+tY{! zaWYX?FEr01(`j&kd58skd^{D_xkr7P^~B-e+32OEwHM>9WqT&#&^i9PC6l+f|tv$-&y!wjGDmSRW zR|{{s^x+ZNeq0xN`6)%Ejqm?)g(bb__n3DgC*cz^KTF^fTpi!S zO2ZhrEAT{DNFjWZqJ9=f~@Q^#bWs&2~3}y<)I4O z_rM`LMd8Z;2zj`Te0Ec^f<3SUx`n^7!772xJ%NK0+2rCikVV?T8$nvkgI*ZDNg?gV zH%%1N&Bjm3lzY&e;D%=+M|cOjSSqHtylWmRdglmNvh4XIdb^ts&eDgY74T4qq0-C{z2${VM zhgBLU+Vs)Z%Xn@2AaqvplNsfP{X8(<1+Wzc_(w$s;R^iQH9|?w8S z&l%=MjC?~!bkn2CBn4+*LRk{hTH}|hmeI;Q z7M&~aqVR?961gLITC?m*ZSP^~1ZF1||ygsQJHdjhLWh)oqh1*SEN~m)ZVn)Aplyw9HCl`R_O8 zo3eWon=0e}f|g(Z__{~eB%`+Y-LNL41Pe0W3KpA8QA zCtoiQt&2ise*a+~6@^pw-~G$*t7X;bpB3IU4mr+BZdX)Wy_PmO_z`H7#FtuhIUz^EGdLn-O)qk^Z%Y@95m6 z(yO*(*&nMf`>6S#2ct#JT1W4v|LFrirI6Lfqc&}v!>ZKx4**v1O_c84{)$UhpC2ZN zVqHP<&yz0JZp($>SO$rB?`YZ{n^;d8((u~kXi;JFhj;5Lr>Tc!J}b&BS$$@zDDv;@ z2(IQN`FZi$rGt|S%cXPwz_9`XW8eRQV;v3IJN5n;1jm|KLds4YkEvgRV_ln?IKg~q z{^4ZG5*#aS;{1ox84G`qvJf0AM>THpEXSmAvRc_7DaUyq$zp0h$f^|7R_3Xlciyg0 zfhx6&vS^I)RUmw9XsFl3quJ*_HeNVaC51|GtuNy~U2L0Z{B-H|!pBdS1u~YO`JJlq z5K>l`aKTf5`H9b0R+I(A7IoUBlwA=?#KQE6< zxb<%v9Pu-E7AG#uKpPyNX6^xKE3p8nwo}}WF>MmxSFr!gEFaX29Pc2g*(SQEZEmur z2iOuM4~h1VaM$&-ue#^RuK()$use?mGgCU`M{9C?v6KGD<5b~M3JNpdd>M~p#5K&g zP`zBoN4*2oS}tw1=)n`{xFW19Dft;<${KCpWmpGwbIJN#hZAcYeVxB}+ejXP)_>Xh z`(3})Pr>K>n4@&vUZ(oE`{@B^?z@%P$mK6&f9-+yto(5C8-2!zx~KnP5@UHf{OpyB zJqn@Y-J@P3!pn~NzSF{CI#^<~>JT?;kc(Twc3~t?kNNA?xD( z&w_LE$Kn4{0{)M6!xM%&?vao~oKr+JjpOjk;<~cjekbnm3}bW`5PB@$n+e;4jwzy_ z5*FJ(J^w#))HHqOd(Pm*`-idAO6Uwu4NZ>TKaayz6ATGMV= z%2_Wx$53oQ#f+`jgLE~A)oF7Be(xv5aQ_?zYcm|S3o6PH*ul-o{n-KGYT6&C7O*<-*ZG68 zy^6IjGybyucgr5T;D<9)!=xy<=rG?OqP(El3B#;8#-7fdDpc3J{dWk(^2qn$IE$_Z zmyv(B?7Nj$N^2o%m=00w%hAWo=OoMEKhSoUDjK=C1g5N#@%N6irlFO123$F*nLW;S z?Y}+8^>Hfd$SiNEx*zu>Ja$UUBL+u(VMT19WP#EYgC0AC^{lVn`U89*nIRX6c0NKj zp<#rW@eZ3bjLqY!lY5~p9bR6XuzoZ5dnp?pOybIEsvqad5GYdf(PL8YFfQhd(~A-nd zNjhbYaN5_w4GB{o$oCtBHiF+{?<=IZAHK@HA%_EyHj%{!v8V?wTukHdIrszeho-rw zj(6WQQFOfb(#ltGosOI~s}69HU8N9k-;Wo2Jm-1q`vrI+f7*d(&=ySSnrM)#FEa1;$BC4yVw-axm)8z?dwc{Ww9gd)GyyZ@a}02ZQ$!}yXhRertAB9-F=N4^nSS%&Yc0!o|FcF&%$`~;N}Q{ zXqlLeJGmW!P;Zyp!atx~D?-_KJ4<%ZvUL>BV$OeaMwf_l%p$kR`gh+~b=aM|y!fm_ zS@eC4A#q;V1${8JyF>SfIG>_Wjg#hkb{c3(IG*0sO2@i8O&lZzwBl;QInM_cTO@@+ zz16DMyB{q70g+i0YSa_q?rWx)#W4yIh$rNBo16WIG7>#|kKQ7~Q4C91`C+U+yW)?MvFe%s#dwX!)UKei^w>d_Rj0n)0Vr}7aWDwhF9stm|c{1LNRpvAbz$e@B&j~9@=c)BEpb5^Qt6AShK8fUVH1VucaviHLFxM@*HJWqR|6&S^=JtzSB$<|yO!ZjTNh43kDt7;s&N_@(or?4k!;r*q-BhB$_+x+W`{e*( z8uIB4xX`Q)90o;ujpd-X&y*3hcue0a+w}GnlF{Xx2vh6d*1CUdOi|@DIEaKq1#aYk z6yLel#u97c6aJq60o%HCpe6y<4BLHby;jA30iVf?S<@XmmB{(<~eCO5Jvc7-JZ zdMDm2-Sls=6L$kqL^||c0P(cxS7;Or!mf#pcPdjBqF{qBmZy$1%XDn3y!|s|#eO>l4LTEAP02a!F94tPQHvDY`VQne!#Q`?*))4j{Ey%PYyO2+9PR) zN29Z%_~|!o>n*yo;yA(I2UBu;?wDlzJ{sAdly>bUPj{gW79#s@{z_;b+-hw>N^iCB zgvYR{iE-i93|F?wK#gwJIVYjoWg>Iv&U_?o;lZ8P{^-cH`m5ZMdw1VwuTV%a3r@4N z-jKO3$#mP-+ZBo*1{(`E~$L3Ezz8zltDu9Ez8CI_jY!@SqVXK~e zmCtsEO)(X^odIZ^$Ah;`TLhqJfRc&{O6+Hn$VTE6`zZiMoQTBx%~Aa1S?T>a5W`~w z?tpEs5>gDS;L9){1g_`~x2fAEO7u&x+tctx6V#MS{p*%wad#DxpjqEHKQip zORNyelTBta2_#fVE@EvE&Nmm6#Y4MMXvKDzgVI49EDuw`MTNA(_KA_uaz+Kmheg6{ zdB5*C88e1V_nyo#P=ooC(DGcxMi#)LZNcn`5DPH?E7z9>WQoB~6fBDc6s|-|_~0ZB z%NMA0h{1MGpAHc?!z&UC<&IwuExZbfg0&?yE-S>QY%*TT*n=Rq4gx)$|%MK5-Z zy9JJl!6^#%I#JbE5Hv-^a`=EL7o5g{pNQCG7GO#NKhYpZU5g-=uQA4sFp=~897Igv zNb9bj$G}lqa4In~&l7|gc6=`Q*d6MnqVKi=U0e_Y5mXuSCW-J@Dq*kA?buo$s>2TH zc@TnU7N=JR>q^VO>(Fa35$phB(AGXvJUQE-CAuDDzE@E~k=$T=5pdiV|4Cf_i4rpA z0qduP=s_k33H##J5grb!@Hz*a^3oHdu9L75lAJG%{N#n)RTJK-Rs=_d6CM*3FTQ?g&mmisdKy;4uo2Pyp=X?}or> zcN^4wxDnAZa+5-Eo*{3X2uI-})$y=Carp(qqAXFBT0fO=f2v5-Cl`p zYzW_(i-1O=KGE23+yJ<0l-Fzpgc_HKHpO0xs!sO{PN$nR5E-gK$Otf80Qeb4%&;&s z8az`dXU+l}0f3B>pA`C$1xG=yViyg{aGj7c&Y*CxE|wyv8PHEtP_(U*(2y#1vioQ! z>e6_xT0@gXm#<%IxZwYooFbP0y%*Uf22HHdk=$6QrX2=5qL*H9IGS^4 zA*QUp+N-Bp%!nK0g2B*$6;u9Q6vs``q|0KU-tj0S$X#=Z z76_?IsY02h#0Uwk%|y!TNLQPq?v+*U&oC6p11FLho6(>a*lMX-?xuN~xGAM&mV zbAl2N6Z5cdGP|F3s`!YJru;y0AmZEJ%pJ|}-dv@}sNK5O5h=~7Z``(zH3#0cj`${X z7~Cg}wWJjgaEy!zygf6@s;GJ0{@qr4;K8EfU_1|RnUZ!?)#nZKmV~XIpI8!>+H_^V zlE+3|Jm-8w>P{8d;SV_a$F>NUZ4B)U?t5xmhT>pN~T& zk;&YV_8fVw<+V6T*n<*fJ_A^jfVq=knaJ4V)j>XcR-=%KXQ zFrj*Z)Oio9L_Ek}XmL@GMVGn#@W=|*ZGv3|xuy%MH3$9_&>4@iHQlPMx+S$eOSbmO^{e#V z(YDaJt&!Ni4vF->sZFjpC&0QNwg~O9Im>02{N91D^z(myA;=NlvxSLUlFbfgY#wJI z*|Xi}l^+N0yqy(|DpWQza-|<1AA)MlsUw%~vYLG~(#}oaWKY8_w|UR0w#VRcq|@H3 zjN30J9xAt}+@=1$kEefk6=!%k78W5O2JCTw{51OnJmV#@ztOvwMwISrQzcBX^u=J!W8hQ1y+7?OAV$gvFv zXb4t#{rTrdcD6ivjz$*W?=wU4&QF{oU41=q0@?7a)MC%5d5rhIo9hW^PB~Jkx?tgZ zG)$;yVeXj=x5uIIR6dl7-J26zPY_H+TXKEkK6+?>KCXmr)K2zZUNRu%>v`x6SPZyp z>AS^W)FboVS9NH+_Q;l$Yfhc-c4&=tKfmwzGxX%Zb4c`~Cf`3;rjLVpfC1l15g3{P z5B){u2#B+{{Y%N6C0pWJ#A{o#+Mdc8K|4dkG^)+QFg`)`5OK8gS}Fz}s+B!}YnNrw z%9+TkFU3A~(+d$hZ>z5|da5|Tbe z{8i|WUF|aX4dyfwkdgS1sz=2dqe8|d=+WEHF=o@f&mYK&ZyR?ar!^S27#xb}tG_+dhF`WrXCD=TrdC-NUU-Dvu2UJ*r8+Uq#Z6bW>u z!n3c8SNMmpZgs_hD?r4>5lW+OGSC968g`9Vo1-G89CAHqRL7a#RdPO02Cm2etYJjY z_|7vpz?}K*%l;OhpNFvqr;=MO9A+|TNT=^f+^0_WY$8fy@4Ri9RepCfcMS(tm@)`? zs+0=yB>vD=UbfzG$L+U8{c@Y|##&PlwV7cJwRste=|vtkJ=M;Xff|kYB7ySkLP?YW zL69LkBPkecS~nsq@i3#M9twxFJz8nX>~qhHgGlTuXQSWz=z<5(u~Z|L5pMtetDG1m z^iGisCh_q)#D@3_wMKJV+kuKT$Eb{zgP z=KXrTp3ldlC|Ylq7VB8?RKJp&U(p5a)REHTGBN3Dw8o->kA2J)2Ms+H=ZntB>~My| z|E>R8|6+eg1X#c_jQ&6WhbChk!Tu+l2Bdxrc(?JdoK8W7!L`}}j=aNi^}%a@<#cfB zeX(^z$DH+Ug&h1#l3}TMxUYD|q1LbZ^T>J^egHmj{c53ZgP>D?(~&H%`z>#tO!pL! zAs+r>OaG$JquT>z$a}Vz$8_Oohh(KBYfr?Ne|>MUSK@rO;?TWM`0B$-k$llj`FGnM zNL;acJ2ITL%3iUNRdGi((1+36=-uYz&1ux}M5fLZ7&%UdZyi;P1b)dALg3B5OdZu! zva%5_-ekkBR+TBYqneiy)tNsO$N*!(ZfQ_S6VJE@w)EE(d5N}f76;8ZqP zd2izOZLziZ$k-)`IPT^6>8FlQiE{jk7xe0p6~o!#svI{Pq=gQz03C*1;mDL@ysqH< zoKT<-8;1@_vB&QY;RrbYZczG$g$DiTgYruLZs*VwI~WR0!}}!NfzKGHVx%rO8JgUw z1o>%?4O~@`>;b7n*ZA!UztwVob8-LQ+yTUY+aJ!a`)(9OcG`FK z8oBM{)y@n1`}fI~r{=Nx4E*lHP=1CXI>_*BQx3}bn;e@2wWg`Kf2 zEp#$N{5gP3(ZUFu4vVfm4r!H`_dd4TqWASdhS|k5DQhglug+`bIUYmqsb?$kwoGil zF*k+y5$)y2tSipzhFkUNBs&T*k8630V|9LU=vW?;_aDSi`NU0@J#uG z_clU_rTg=J;e#fg)@<+**>2&1?NYZNW0#>52ceUVmm3~XV;4KPOfL|6Nl zS_F40H6F?^*iRRs7-QiYdszTlHOOMzITQv5IP2WPj$gE(!3~cBus$Hy5VaGA6oIxc z!|dPf_R)QM$ZeOzt%*mEn8B!E@;FkBb?1$8oQ`#5J!yyh+EddAE3S+EV^)VX6HapE zf-~N1*(BzyCSCy3HZVbtpCb_qtmGoKX((6f#F4q+r|9=0jC}=lCz6cHQjgT!)q<5W zS@2PAcEmbj+M4$8lU<^nio@e($y4cO`K+gQFNdU*H6l=(J_eibVZp&jM~!hILi-FT zfj!p}kUkKzt@?`cp^k{=Py^b&&N>rChN*upceQzAf&=oZG^T?hZEHbxn8-=vQv=m9 zYW!L}wD$Q!X1kGL5t%(384YG^0B+IWj^X&*(yH1_qV7NixrjIlL7 zCb`y7Y zN0TBZ@#&&nLHMWKgXY?5jY5WfUnqOc$Iq=@RA6PhG(aB>znT$+1Rm1}n?Ir;WR&ZU zMBBn}A_T1P+8wzbe*RQN=sH7Eupc<2cXns!;oM7?nQBE_%~N3-VobIb+t9IYk6)hO zIak{%lObD3aW-q9j^CwRm{6mmJ`tIfAc^h^90|u`sI1i#CR^LQ4ZyO;lcm|*V{r&EkLMTm8cA-Evn;K=tI2pt3usNJNCcD9J11uY~04rKI6^9m!t-slV9OxD^(3W z<*jQapx`SUlwW(|+?9{vNe`Q7t#rj?X5>DGx80C#E4Cc6+Mcc_K}07kRX z*+}lBx+76QJqao8%|`*eK_}@KgrJBB7W~R|-ga@W_P8A9&rj^y%kvA!4I(EZFb%gC zv~Ppi!Zm@^6}qoK6L1(o22@@|+@KXkLRF#<;(EeTh};%>&2j1AqeM)jl$!K}=F{C- z_SzOa-@YcVZos)_udSQ*D;$j9=4x(E#~+gEg6Xpa#;Ue?n;9&P%aJI_N8+195!DI{ zPpCwi=9nJuY9Exd&u-o+NN-ePmIYk*IK47yyZP;w@ zghmEUFuP8evyJ1V0pTxnsCI>$L#waJt*~@EDPOE1KZR6bqjDZLF)lj1R+b36Q&*W6 z?qY&5H&f7~5DUWS0YYPd)qCuEFlQ&TL7f66V5W0u{NE z-z;}G+hixgqQXS^Xrrlt8xdmnU4g4sJT!TvNI5r=&hpE+;i>Cju=ff;gL0e z#kz^0kr{&tE6)0!`KVK5X1^*Rp`!ih74g5lu)tu+ZG z?$q3qeTGfS=x2?3pQr(sG}lG^R6P;ue>ZTAw6VuH_wufxH&(#?Z%6s^(3gtJ@l5#6 ziXuy$dgE6e&ha6?Y_{#;)4~)M&NmL zA2m1?+~|TKiZymO4Mg{^`DB$Y*twuUOgkVga2900s#KzLgM67_vg6a{2@YIt8cdd1 zh(D8|GXF^%jSu2h>fosY^-ZrUGag{oO3?vGLm5Y`imq>il**P!Q}p@-`^xn?R=hqT zrEL0YSpvRo5(B{U5<;Lr_F}Frms;hD+#;xkV4N8G1Ui`=)4R21-dW)A-n_9AjN9ON zRGze43k9Adhf#;g^=z(=HxSdk$pWe&E9L*{TA^3gM2DTqXjeI3&b(0TO3J57He~F@ zj|sV@zH%VFlHM}xlZgE@HsToGmyNLQyoYc{k|>BmtErJ|`;dR7nddx77ahC$GQ912 zODx=U=4CS8%6}w%q{EOch(Vk83@5r$XVz~f7SG(%?x`Tw^YE+F;)2R!&Val9Y&&95 zRl0%)A+p-!2Px7i#C*Fcz8OIqDPkAcj`vk#QTQ>^pw0A1tTb%?4CO=YZ7qOnHxWsB zfWt>DJoxitlN`(wZfRQmhd_>T&tZx>Qyzd~%MFqodt#m7UG07lcdMz`*QOQ8hEkBa zKq(@s58(!j?KkUnn66(cu$^hsaf6d>`0H);Q&{j*OFW6AfV{+%6Hpf8V9*T7m0EHj zW9qebwnD=>VwN~5K+l9O4dUkQGCtew(lITh!txd$U=f|MPO#pZNf6+Ub#`DACXFBO zlO7{|eVb0C_Lwm;!Ibwk8%>>FGy{SAPB6gS-`G52i) z?ig@kg!=9hP0<1qiMbMN_VRf0Nhh|>SY>NM#A>zCNM{dRL$|_M zgL)aUAKefHuuIPYWeqQ0-XIpPh0LsTeP?y1((?C#3-~g5{^W!5qR8$^mF`G1qSXqA zn4U*sl#!?dkdKNOsw{?VNDV`I`fWb)(~)TV{n1XeXZxd9jUepK5Yyz_oMKA;#*s>- z(5duTHY~3EI7eoV>L^GHB#Nb@jo83m1B#ralCc6OL)v{~{3|i~2ERQ+4#UMMZMMF1 z6SY1$eoxoyVGMimr;fdTXziv{6`(CHEH1M1rMW9f@M(I#EKJxn4X5R?$W++-`+f4Y zfxw3Da1>NqH=8t-tW#W<1{4?q)>Y8wyu2P$M=1x~6YZjK9F2ALBhamwdLLSk{^ln& zq*kyuF+rs zHxOxgv7r^)7I>J$Ba+Q>>RFCL(5dF zwD0S-iTr|$7EA_Lgt#GzLj|qA+kK|pUQNjPWi)o`sP5a7y?-6Mzys3ndiX0FwMQy} z5`l+C8I>)DdhRUBB&_~Vj;4G@h6g)6nafu*5-k)YP`4WggS`(#j4CyAr?5e`A@$`# zUSgk94*qL8l!Fz-Rm2=%vo_jJbQD%;MW9remD7#PAr3<%kn>|06+h<_H?2i!ZSXXAG}>j|LX^Q^{2kAIc$t!a#7FoZ*xp;)oc3Z@4EBj z`W_GbNN4gU^bfCCq^HErY{;&$lpX@y_Paty>Le{SljizfN1|-qcg>bF7EaqQ&u#so zs`rmy(7)+4Ww4b0*yLEz5cG5@g2sq|UiqKylsmb~|D$o0(+&}19I#|lU2;+cHIu7q zp7$RWLMRFLaMC?Z(fc#S{jUmP@hUy;^h1XosBNi4gNbd3G?TKwCps zwUqENnEK~Xh47yGrg-}E>wgh96RX^Fb-&{@Jlkl)XC>XsmvVv_M-{SkJkEFqSJR6} zw0r6xc+1;+hDARwfena}Bdb@ZKuNIptxh7TdvRVjTcTP|1fNJ*L44LhY|W6?^nNHs zSH|xmZhO#f-P0`5M%H3pK)TcBUH5-Isob9UPSkZsQp?{0!CN}RkGuYV1aJB0tLcB1 z=+s@yS&`-E)x-u-GBs~bNk-ZNmi_`XMDyxl#l7mNs7z z%CyZ(HK+r{7|wtLp3J)Iz@?AWf(y$|GUrNHm$N}U*_;QV%_I@RstY@<`sw;)@%aig z)DXLjl$&K($>qa~Y|zePpIX5xWqQ??XP+RrKfIQL9>HUVb06t4RM} z-{g=tQYufY{b@bA4Sqr8;vHo3ZXf%{7p|}DlYb3|uo6^phdNauZR07FL2it5|1YOS zr?OuNT-a^QM?<>NtB@h`oJ<6P-n%TH4+3K|!r-l!>8sC31bG*RNKY3F?f2HJlBi#n zB=MsN3hw^kdO))?!Yr1YlsInuh(Y)m4` z@U0+f#msMreL2B#bk^DG_w3+Rqr}#k$kZW!spcl%*7b)&R?`o~9!~02VT3L@P})Ru+30U7;)II3zPcAU8?HH`zIB8u zUJmX0t!k+cF2}2$*>aPdhBzxR!02|j{6)iE7tcGsr}w1dUF9ilrEsM`xyBEyO;`m2 zysjMd*}NXJb-MwK&|llwu;Wjp)OYRay3p*w-zm-c;ST}}Z$o1L-Q;-Av%gftEIN=j zBXwi{?mYg!+zjHGH|nL2YtvW!TF<}J{yU|)9&Q)}ugt|`jfN$^5^ruw;OlzU9ZC9~ zw)T1SC*oxHcKN~=l6 zB8L!SiBlGow%+b06&6`^RBgS95T#}v#KR+Y$2{j!?(WF7kT919&EtjBlZB78EXcg% z-TbExChZ_p@4d1@*Aw{V=ZL>Gn@6tcbQ9OBSZ^kk6qDm?CyVH=9ZPiRoN#LHD zbLBfP&eZ-rrMYUhuIBij*$Z{G7iTZlx6IC7YUuf`BL3%T>;JP`R`SMu3wM-v)i3l= z65lQKQnQT~`*h1HjQIIB&r9!K`hIlZCZpm_MF=z1Cg7S%oG+vQvi3P<)t@5Ar4whqf$zz`tj~`ZB{5lW2FDs`X;Fv0O^_bBH&=9J z;1In+)}Kv9mwvtk1x0;@VS>Sf&q?a)>*Ut%dzAy~NZKb0Korl>`(s!t|2hNi3w{$S z9rT{V&aKh?G$Lv9URSBomM}H#>v3Q56OiFUDNO-f71AX_DNUt5fs_rD(#+!{_1&P9 zrZ`=>pD$(E*MUzKXHcP(rmI`0+({^<=~;=}2&FV{h%;$WO4D`fmP%@U*4nAjs+h>e zqhER1x=Z7DNk*qWb;5&Nkh~uG;Czv^!Vvd`qc(oMBTPT#oDMl?LhhBE;l-3t{myT_ zN=4^vK3CS{opXE&Una_?)px*L?!HnYS-9Rj>>HmLqLO1JPK)bDp#3IwG#wgT(M^>q z14%t*RYjQ?Vq@f%=n3Uav%42ajRk4lQ=UfpRpYV|sU3?`^xAUMz#RarQp<-<1p6S1 z41nUYw^2X<$rP-qYd?`|z;0*C6Jvx8e{%_@ZN88x$-JvRoAg-Wg0q5a!!ZO2K-o_%6<8SgDlCGK z*um(BqG*VTVk%v?c5Qk9>q>GZQG|KlC}A2WK~Skg4Y2Re zkVBPs*&#`6-}Xra07X#ugAO)FkJfOSIAky4+lKREKgPHA=ys;bi$V z=Rea~Yj$?=$g|;RL zvV6ctNlhIv{UEeKX#Hb2vq~KgvR~Ey$UCAdxM8<0Ylj01jvhY0p5Q zK=be%Hp0f%QRAwM8*<_%9<3V++roBLlM}<7&625me`Z@ex;%r#fVWORjvJ6sDO{On zrhb_kZ+8Y{%SR~Rn}F)CL@$L;_Yw^{jT2Dnk*ef80*XsHVvoX{dXu!%y6eQ0i6WJQzGH2BvbRfSRtzAINi*JwH<9nK> zFOMW7<@p3(-Hch1+Af_Yh|+ax~g~(ppz*kaG1@h7U!gypBY2!FU zgr8Gh4xj+sLOnOi^i@P~hm=Bn7$HC~@?c3@3K z0L7~CA>Q$$ME>OWy9%D;Qd7&lq#NvCT)_d9cpFUX^%|n1*%ZRiiY^<-%-C_Bx9^C| zA}_&^eB4hV?p$xyWaIixquT2u<@RH;W7@Lfl#P8NiNoDWP_1*giJ^ZrJ30vP`kFl~ zX=Efg!XnDtAO{Is;GX=^P07@iOv4SaWNtH&jzR0Uo4ci&xaAmR=TCD7sxqO7Ds6{0 zgydZ!%H9@B_p;E^0;SdIKqJHKk0!vZ65FIt$e&UIrMx9fPYluHl&&{foR!;_Q(tEU z26#?{N)H|P#*0y69%^zbmjYn+3gt}$B2-IJCu!&y5)9rTC8nW*6qWX}u=;vRng9k0 zq?Qr)nppTj7pas!?jwvbXn~4t0G{%3>6w9C(_EIK$L%`YMzQz9fd~C6Sio^SGXOziA_AT9m&#DmY{aw(pPz}Trsbm~-;5~K$WK)KjHq~tzw2VL zR|G(K%G}q>-aw|}$64r4Y)nhZ7H1xCU5MW@4#d)qY0V&y=x@Cj02qq#UE(84&}tjR zeLw+aJbWv=*!r8%{|IfewHR2!02tm%4D@NhL-D?XDd1$SR|Y^R$td0Q4JJ*Ca8`u5 z1_BlUfy=fW2$e<>clbTVy@!M_{G0<=W2K|`ITjYlE+b+Klq}+(h7!_6$(*D1cy?G` zS}ALVi?n$6gDzNAAYcg)^1=NTZvd+&d&YUv1ES-V*NyV9xGxQbUwFJ7C1r|=FlB{E zz8F6!#Iwn;UMPJC1z08E#@RRJ$OhY}h68Vp9zcpuXGJxAe$PNR{KE~abiwKOKo}ZIDW4A&d^j(^sAl zV9%yyVLs9lda3v*Ec8J$YOmW5cenXalN_i=m@btaCq^bx2^k=iB`#hR;$zu6$n4{n zEYV0l`sf$@MqdXgbvm_z(n3qJPApz^qv|onAR+z8LfH|BV9Y}uN%$nADMX#l%KKm$ zBE@ARijJgX(kuYrYw)?i9Ane+bzwF8hp70J%5>B&%W3}%G>p@zn;&Cy{@HeIydw;) z>ZZK9DBHWwmE+FKN?v{FyWD;&+%kyEo6ao0=T~uq6z-OPdJ`?w>LOz4LanOdKdv|a z#dP&Ae&hxXURM8Y@cOT(2~rpbqk`Yr7xcHhUe13Rym~S4Fx9c?#@d137@EJ;;}lMi zr+~jNMgC*(f;1$}dq#|qD}(-Cb1j6E>N<`KLSA%U{7tld-io#MKV`v^gSbi%(e}CD zG`KSbAq_1>#GNXL!^=OyAPq?`@Ap#VdgBgWM7>s=SwlCJ*K^7}UdMk7I>Fa3KcxMx z$5mP+W&K`?+*98__j2IJYIK#v>hs@fu6@OZ&{E|3UyjVP-v3#T`}5}Sn(Mzsv~7DO zk1o;@r5!gGE5I>pt)8k&dksB83#=oFv@;A}e zraZh5Y)tZWVniuV6_Hi2$B!W{Q;C3^OE4(|eYfUlNzl#z(!3;_~Sp0 zHQO@priVS;ucK^PO2362cQ0uc9~M1uz}Wrpv4e+AD%4ghGl_ev%jxY`SRZVpu6WdVlFV)Ya6I3ufvc)8?n5PK&deEKv{NAA?d^|2yh&A0!fB zAF=9JG{;X?Hz{?v{L5PNMu)>;23IG1E}0Im>I1S9S;IMZ09eQDX~QiN{+97&-Gai< zD|QQ@EB&NYAxp-HmS8q~6Sw2ik!}wF8J!L)C-7VRaSkyt$xw%@)%Yh=6r8DD&qLz@1QKPt$p+N zn1g@Qg5uy}xr4XbUwbpJ%k8{%=Y(b28=v$GIjMD>KfuG)-~8Pyf<+k^F@ z8<7=^V_Zc4b>1tUSr{rBG`7_qNX+5^kM=pkbe~G>qEy#!R9NmcFmy~ixIusoF5g90 zB*G5irl9YaS~`VoaV%~egZsGxrP%NwFYss}g^Ix2sgiPbW}a5*+U4-^uFsoj@D%bJ zfI0(UaCe(9x7`dfw||Fiot>3CG`Etwj#qFnk8EZCVt1z%D|-zSyfEg!r*og`s{M+e z>Q2n=Zmqg^y)KaJIL4TDc-&xoVMTo1*3v%uoEF|SGvJbw*DU-aJ3k!iX5qnAtcu+a zL#FAuq#(q)f}$rETuL{GzSgVlQ|%-`Q)L&?5o1^qn8A!a_akrX)?%zS4q(fDU;!|L z@2KP^I)LG`&n3A4Ug>?XAq*OhYXC$N1{+9@M7~u7*&}i&7}71<7!U|wF+*Eqy@HU+ z&z`4Q@quI(M=yMM^P?MbZGRlw4a4!qwi=fKcek*CDpLN-b<#JVZjXees1=>Db|h>X z7q}$edHJef?WS0(o;9=M7-ZbVHVJLzNZ(gHlgvi+)O4#(sHMfPwe-8Wy>2gRg1vCu zvKa$(+Z8akc&H~fIovSvsQ!Y=Ti$RznAp8$4C4Guk6-;DL{JU1IPYg6Ehz6pA()uA zD~>K7%kozXSzi6^3_v^?iNx0?p&511%yrt4zLAD;=g|$(fBvn17GlZ*m74TK#V-GH`-t>)fLL`%N$yYg7Nw7pzRtAP#>xs8~P0`jFx^O50Dp=;P6c_ z0)-5amf8@o)xL$R8z`Tz6gJtprLlGToX(a)M$vu%U;{w47_TV~{=kde)|`~sYPZDRxTaYP`bB@!O|HV!ICb-;f2u*6#W5Yg6(2pc8hjT597*@%0?@pw>X)g&+|meH)@hH*3V zc`O)I@?#;fEci4P^Q&xStDG#NE-smuV}vH;6M@@8ZVKY)CQ_OdKOX9FDVG1$;1Qxi zP4iHpS-NdLUn8t7l?k0nvYZ`JrsYIw@zF{ykW3`FvU2mOKo<|6FT`ACaiWrZzI7)J z>acxiQG)8jCB3k+FPx}`9KWE>LXG&Sud0{nnzTW4o}XktOL)ZIQy2r{gd1zcx3LS`yWIa#Qb3lFlH2XJ@F zX=fxs8UFq`zse!C=4P=7DXZt7@zC2;k4*w|#bV@}^xPYL$0VX-E?M!HlpW_;*wC1? z-ts)7-aLJ=nl2a6C+0)Lb8}FBZ-b#{5a-6Cj71XsWm}2YQw)+OBc>J)5DT3~3!KaC zjLJAgZx7Fl0Q;pNF|Sawp%DFzfD*(c_J*Sl^1jKz&RUdXi7-oQi4>$d1#tt@S(k}` zW;y`zk*ngfxA3wtGe&2d@#IV(0>b&yHkAMv^^H+2ke(zHQ3K7JJXxq480xs*;_jBM zbs>$T<-M60#fRnEk;mx=PeGc{-X!u`b2GyK6N>fK1G%0H)_WC@#4Ek0qA3H(UJ`iNyd ziIGSUx5(cy3a$@&>*^neZ*%IQVh!Y0JzVBG-vktcdb&f zOyD9P|Lt4;iziZxP?J!EpAj6FX4NFqP@NElR26^>s$3RQ5^QtmXz<~qcJpk^Mdzri zO9Ts{OtO+}x$C}79Q>9q1oVZ(2L7s3$}|XA@i_rLLra{8i1GlY3r+YyBn&ZabWY$u ze%`GKNQkk}MR>u$I*N95hym^PPSb;A@T zr4O;GHw}KrH?X(O^UJ7mu-*oIF5CthqTfSfY%4rgj2};eL(?l(09&5@L-w2@yW#St zqy(+({oZo0z_0jT2_Ihu&EG_VdT!LDC2op$7J?|FMxrjW!hw@q62IXC?J%5=W;`#= z5N&G#uFMND>iXN?PuuB>fQkSj3bkIL6@GfdL2?V+Pmt9RNz_)hHAk#BB(_EvGA(%! zym*^Q2Q2#e%JLcH4M(yj%Lbv99%I{Dv51VjK`j4Xon8?=j~4GYO@}rxkLf>4pUxn9t?{tITii zy>DD~9_jN?y5;#?Agsk7r?suRY2%HFKbJV}!_v8rZEryUt`EX(kAgi$p9jxL|0%rb z-OnJG$lo6ExZN#2umqT$bR;9&Z)-fcP1xjhs<*vo(&r<`TVdRrF~Ot`yQ=mZQ`-G; zOE4d58+D;$zC=fP>@TPlYnOIf$Nf`YG?a#cg@UwzueD4ZD>>KQ)I6wD9}eHRy3#}P z#AC>5Lw|-xYGW7sADPHhsG7ou+5MNJ>3^HuwX$>mBhZkhYw7Ra-7DRC%RSj;-6_9x#0;*VRd*M4vW8M+S7|I0x|s zbhJbfV{Y3L<=q@ASuoFGrEP3YTEbWCAqsEXN(`pe`x}QG+!1fJO_LHuqS52FYRwQ=;&V_7KIH!D{ZJr~_Cac^q)Nlm_{+Q{F-ytH0ydd3 zGkD*@ZNEGHO9gh8q`ZG!Vt21>T<#I%o{utgLkxM4g?jwvrNb7P5^u)~_B8<+$~GIH zWvb*H7kR5^l@n1l3c3}beomxn6g;}_e{Q4se;#P~_Yqa9$8&Sme&_9@TCdve@>v%7 zqNe;8Vhl`|VLWxn+qr6%URiLlE)02nZRLg9E4{Oq>N`k(9ZlW$UTD$YBOhgyqxcVv z(|@u2`QQE{Di8~7f&X(qQpJBSOC>X?jWu`EDN?n|Wm;kPR{!0P*Tzqd(2vJ+qRIa+poDqK!DdRfj<@9VerA73z)#oayDmMOha z-lhc~zTE0UI=+4jm8Dk2IUUr!>vR{qRTLPy^-XYGX9m@oIN-3Q-h`JRi!%>fOV0XA zwU-3xLw+R3DWp>w&s=DG>YTx3_(kKjOz*PYR+9llCI|4Jm>*>x!hdQWCONKH`PlvR zN{G@OcKETM)P7&yhGL(c2mRd7MqhY+Y}@}DG%auK?Ek4VKEb9lz-Lb36Tzlmd%baz zL30_bXv5=fdJAY>`K(kzWD;SqGKM3`bB8^K+HJyi6H6)N+T%X-dvSdFUIugWYV0Fh z-mTB884=!PGNm2rcS%$8>SW3?DPi?$y8wIi2^XI2;L9PFvv|~%XXBO-e}b+n6|y0c zG#vNzHsZyxW1aUpbqGiG9pa?vS4bbb!KnUlW+EetaW+Z)rT<3yhWeke>+|4u zDIfO@rkrCwg0K8p$hOOvYWDgu<1cK}j5Dggi`5biKhrmJ$pfXPCFLC)6vJ!VlLDU3 zm{(Y)%OiK}KGa~EjGE!|CxjDL#I=VBWUZEVt>#B|!B#8_*F7kK$bU-f(;!DO?e=X> zLWY%t-FH?94fL+8Wwu{r+55XCPn$fw=ZAd@c$TfaHQ$)jmC77~cU;@%t!wzQW>K$b znef2qQUAs#7T(^kIn7V-?{cD>OInsN7tY9);?J#R$`&1Hf=3(HZ*&l#obUQws}A2L zfyeTWGq{MbA!RqRG6+kE(6=q}MOr$adt)t5-}aC#MeFNWySo88dnen zem7;s`ZRq5G7-{F!7KRa&B95=SY;&s!B>>KmA3SB9!N5{Dg}-p$SA%*mgX+?2s|gf zsN7>AEoaFXb>edQu7;h9X|`651h)Blm0pUJLUPHi^pS97eXanFZ-QsMv{1F^8%2vq`16hy$2Y{kbT~~U zMAy@a^G0y$yM=%XiAFV$(Uwhg4I{WBMRMOlx@clpjv8h{@l)F!bz)kEJak@%I4*YG z(ZuKtof^GCh_&xwIlx$_Mlwra2JWKuxgQTPbD2wOP7=WjQ>F45WWWtnp5E22+2-8lZ1X+`18b_%(BJPA(-re-SW(oZ z{pOE91EmSk=8i-kE@(eDj%jdd0UDsf+b|s;Jtmg>OynTESx8DsD6C+5zFZQGi0RLe zP(~rI5`$tH09*$W8_=G2NQAh0C(1;dQxMfd24go&EtWY?tv$uo9IQOB)Q2h>9~#k_ zv%KqGs~9jjlqcA&2up{=RRAmdK91d*h!J4VG-SZp!9?uz6E2DOW|ag1yVi^Xehn2^ z>)Zg^osj^A9s&>5(XgTlWX}lF7t6oEUgoOlJsy$@Xho=FMNu477$NBE8{`M!F2F5% z#`8#2^AguZ3OTbaNieB6HGLU)!z_#o>EsJXy6vUHfP}Nbgijh$COKeM@?LFK&N;KX z`80e`eV0ZJ5J5m0EG&h|c*?)0>c4MmbdPS^tYPu$!$4X8 zAMzCfC-fLDB)AvwBdITc>V;*~(*b zg{c*LcYmEfqKK9L=Q~1UtJ_xmqP~Jff&-q(#p=nsJU14(X{X6 zN~XP1^a(oMSy;Q({$D?+)9DiM=b^RC*AAPNNm?5o`BLv6=|wt@8f>6ak4j0cE`R7N zxTEczEXQ#5eb^o6j=d84m50c_0M~T+c|Kk%^D-Rq|GF4YgD!`m|K)NhkbzsXXnhUi z4zj8g_y2tBRrG01IHs$bQpen&x87f+roT@gJ^qmrt+4(M_yAJL65M`cl-Ui&Su z_#N3Ci9E$Yo&UPey?IqnruQ$D!gVpi_tM`Yn??`02z4?sNnU%?HCFLUT|IH6F5@eW_Xpbcprte1HSoQmIc&Aw^@Q0H2_(rUx z&YnQY=te=n`rntsGrKQ)AvO}oyNOS4jG6WD0HQXZA9^05@U9Ae8i zZ<^H|F5U9~ySLuIRgB+EDVNznJlFp9&797k!`g9c{-6*){c+3CMR87GubpDVLmW28 z(a8pOLX)f37kJ&rCoHmyH+ijC*h;&r6W)7Yn?Z(4?e#CD?^T!roscQ_{^*q$(Rc%P z#vGHY)Z5F0ddyOCEYi|^yO6@Q((OLC0XsDc`Z2ewpmhCS_kmQkQ;i^4=E8hzbl6vD za~3W)W@~a{sHj=~#;SOh3Fs^0RQ&v1Xki*)<29nfEv7NDn@f z*aZ+DsGR11zEAM;9G|Gx2BqF4Ja>FBewh9~rpxAn7xSgm(U)`RI<=1nJ;r6auh&KO z+Y_Iy^GSF%BW1rlY@_+^k@F<{f_#)tq+V=l*qStOm_Kq{SbaX16surFJ9Nt zp>PFC!wZDCuk)H+@{fK+Sl+zTj`w{0!}b35Mb44XN%n244#&lvg<*#UCbsw8q_kt< z7Y|Qu-7K$ws^6HQa9SkAxUl(dNEH!RUWibMaFdZ(>}b0$%ssI3gjUD#e+t%2UdRoF zz?!{oJ*FoY@*+K}RiE_rti7?2&&sQ&ymjlfdA3lH)L*UfrLTA0_k}{XObr#izRyv0 zk;e_m>%$F8>F%#Na#SXesIz;u>;CRzB}Rd$0Br@gog0G6mvx)gh1#R-oJz*w8uXKN zu#`e9?a)5uapleWpznKjTixtZUv#wj^TK>uOPu1Pz6JM%M;=r(@^{^;;bh^IUNq2w4;F~MG<8vOKNFO+_ zEWP#d&e?~fDO7mu*G&B{jalcBYv5aydxn#6{y9vL(97+ieV*_;4Ueytt=`?W)yn^> z&B}9zmp6H=OlQl!*A4yYsSaK08e3dKVPGX2Ac-9Rlmm8QyA5Ht&hs#2_7%)#3|#6I zZ&4+Xjb#A9Y7$`s&KDTs_Jn@IaSu|$uGuVI`X=Le&!{hh($(iY2yQ-!5|W{R60Jce^Q0Q0;m&oe0)CgYwssMmHA9l)N?7#I(tBUx;GVneX}?)~rNiDA-7Ayp*qsE1kV z*iMQSRrLetYTKwq2q1%)DNM5+7*^arTPi2E>VT)nL};xEtuuLJoG(3L+rl$gVUq7< zyPw2HHaW^;lG5!AeAyQLlX5U3(k@5DmT#{n!HL}_26j0~V{Cw1-m8vb9ZinwfX!0Z z!n;lKkc^e>1JM1W;&3*=bJPen01ex>u(#D`um&Ps&tCmp2$fD^Fl}9SRrM?o2qk~` z3IGNR1+q`bund-~t!Stqfj5~o5QzYC8XzD=GiE`Ol>Qb2;11fsU_Wlr;b%HjkX94w zd(;xHZjL@`{vn~~<}cTE8prd);hWo(dU|Z#hPXPMCRuTxSf7LNhG>~)W{Sx2P>)e{++DwbE) zco%d43Cn^J_Hhtu1FVoQ8bjV~s?}U}T1|7b7Zb*(#zU%93jA`xsJ5s+J@a1#Ik}=a&}20h(;88A?7p4ZQ4#TGP;9Mp(}UV%ol}DjH^Fn>b z<965xU4{{GFg+a1g(s+QPe2?3d=M!)Mq=$b1nU=KB|nZFNbo&y;RFEQOhz@~&w0E0 z1>y6?hzfPdG7=EW$D}>OB!Xy9X7DT@lZ_2-UCK|c!D`UN=c`%MVWL%0V%mZOWh@CO zL`=ZR3kkz(@Q#CIM8(gNauvDnL{x8!L`tR;^2Viq8De{k5l$SMSWS>~7<7g!iU@N9 zk0i#lXp{+pT?6%*Gt~Pg>BRK$^uRhZcjS>bUW%~nEj@Em3nv;Grd=&tHf>0%3-S=e zp zUKDopZ-F(3DtTo~XtBiFO@FHg*<3Do^gve}F?i;SbL-GbnDm^Z@ik_M(ATK`^E$?> z2dov}B&94rnW+R}ryG-(*XD&m^DFw0OM|Chc48qo-O^y?$z?R$zBg^jPkb z>VQY*B~I?Lk`^`?ez3W_vi;a@aWBr2ZrUz(LS)4^8IkUS&Qt%H(<+{}?tI9b-lK<8 zKdtyC4-n%!tt3zG`ahW9G!2_XiqgLyAx)kIhb|tVjk|4g} zuUY}ZwxH2ux!vWXDMWDlNd*IIV`-}Y+BZ4=SFHdTSo?qT_5#*}{NI&sr&wPR*l~^erJ2^cnk3FX zkzM??P3LvX<}6kRgwA(rUcNftrE}}U{3E?~orP}0XQ2x{ziS1kUq3AL(U5CD^BJ=H zKKI*cRDFKD$@t^v0jBNR#X-02`xc+L@2OgR>K*lQrB-0=m!ZJZ`@TF6x?J_;Md+=M zUtWf_ul+h4`E1|USJ4+DnXer`314mPj1#{5x^49!Hf-dlVp+qO%TXFna~b3uj;7tZ zwKN9#^}b=v&-(Q-Zv{TRkP2FHAjt!6j+$E1yKmuqY?6M32yO=yC+ z0!eG$E1RGu{ZE_=ZP$~kuv@cx1h%c!-|&M=&K|q@zlo&70In@+w)O@x$B0a_J8>ye*Y_IA!J{qe2#*2tneO ztU<$9_Mz6&!iAtV3y>0ZDC`7DnfQ=ugOhYuWyRw7EhYosB;%#(6+g1}Am5tw?L6GC z)ah+yo`PdJe&|oAUSmmZ&1C_G0ZLEu+YMx1@KX=>vFGho+=Sb?>t zeq4S|wIU+_9c$N3n9MXS7%&Xap_70kMqm?wO?YNBu^zS1@Jtl!8@zC-R`IJglupyn z3Mx1#SLhyZHbT$vYQU{>Ik_exKefq`x2RZ2iATMoyFIoBnf3)pll$M`#73)P4HD8c z&H;-iT3trOwEYAn&fPjdZQes0(?XnklZb5$GJO=cjdij))I}F+9NmN559CV|XW>OX zuhGyikdf~)4b{y}Ax_x%ZS_xYJisX;k{(y&ArLB*qK@{kp{kkMbq)NUp||4D%FL73 z$iqL--NZH~PfMqdlQc{OH@7$XXcZlu&3yAn^V5N1obj9vp`Q(cH8QtX0c-T`A z&-+KtI@j~8?&t6QbazOa6#S~24c$C|uqDUF(P=+Sl|I_Yo zg9(;6HZ*KxdD_qN*Um&}wd1$lf2U%gZW;@KqE};q!l+hL#Pw@9f)K2sOo)MsiRh+Pb7}CKC>6@% z#DeDU&}U7qn$^E*ni22@4Gs*-z-J$t{W);q$oH+b=V_}yv~xoM<~$oVMZf&}v!%Q# zgvpimArt{=>e(Z_^GdT<CsIP1Qp8cs->p_4f~$W1^)|RQIfpXaXn=sQT2DAp=9B6y;eR%u`oGo} zq^-24>Vzej&Lk~3QTLI+u5y#h8$jTWSZJyfWiCje2{+SdjLKp zxscr~qYV$K2|#tTUKG6Lp&Re=%Bz1p_1aIBMaw2ccTGbtk7;3a~An!kNgJ;mE^Tjz` zK-$8yXdklV=}nU!+i5Rc&82M^U5d%1S~Gdq+C&g97H4_Lb;}Ndq@Hy?=gmutp%o;fGnI)zMI-Qli9j!<4{r`Ez~Pc;IemxZEhO?E(F7}j8rt*Iny&UVy_XeY8x$=9Hmrf?!ThF5|@!>j=~PV#G03vOdsD)|z&39lNa zl-L0^;g8ltUJa0lV70H6Vs)5s$i#gOFvMqpp1evZ`b7rUEbFaV|5frT=cxqKlKxAG zJ&-z zF*>?&jRJ+nQ+(0ia9F&VRm}p^&NV467p;i%LG8rXhlLf9)b}VvfmPWH`qjf%eKDVc zhD}9AfpN~5wV=FcR>1%j+yqnfE6r1XqnzV{^+kL#B+*0tdJh{~k~Byu)Mp^Z@X3hW zfRyiHfS$86^u&6Np|lxnJC1P2Nsq_lStukjkq= z4)I(2bGB(NHOckrrQ9uPl1O=`(U&bq`ca%A7P8c;dFfMxM8bBnjVWsh?htvIVf@Ga zg0-UZHOaw6V@f~VFHh#`DyPq71u)0pZ#prWUq;iQBkG`K-!9n6YL|M|vo6=(Ig41b z!kVM%hAlPoyHT`JMd4C+(CgBU`vUlf!Gpp}i=;NaF`UGp6c@#%WO<2kis?37Kzu#% z82biGU`jF%Jjx7s8laer?V!Ex$3!&_kf2l7y49WPE*baSu(qWUD?>DQD;wU98X*B8 zX+99pk3j1l6|v~Ts~PsEm{^a9=5yg^nRmfxKD1LUINYjfvvxGF$>Un?)eLh3!R{TMeSp@mlqMkmt;1h!(JxKs}VLdBWDBQsp-Z9B|uT!)>^)P z&K}iP|6KGoZB6MMpIv)h3Bq=QijB+Yrko#d^`3F^0wVfAeG_x5dF*a!R6>-YhociK zRY`_*C7_H9+1QvI!dK?u(-Mi8u!M&=esUs$CC*m`p3Y)fE)B3?8BYb3{Z1^$!ItPk zd5-~SZf~MmsGqi(0-&4A_Npfi>-XJ#t+5clZrERGPA*x-s4gPLtR#z$kCn&iWyB08 zNcreJZgh@HI-0UR7Dq$s6kTQS1VI4X9C_;=_oqF3s0+wgj^L+EUL?fnMs6zfe)-zQ z98!Jlb5)q_h9$qrdp~}CAZfSsMG1dU(6pnxRUo0VEYmkN zsu*rSz8LURhh1{elOHi?r!73cV}Y?7tBbhbW{O;h3u)b1O& z&a+VaI1-=2VCVIbpdq(R54p1%(7PdG3%H&lNPGkwa>f8qGL$S6HMUhEa pk9WD zi&{20mL9wJ){$3+bYLP*>4;@}oh7HWnS_0bq{ zUgfu{#dA6^?zC$ngV%a6F(|FoJ&sSH%LW^H6zuY>%H8&{ z&11_KNeSy5$#_qAle_A?QlO2$u11ddbI#7(tbS>~Rq+an1=`INO zdkAVahYi!1e_X8Vqr*#^v$g+Ov$@tS&SSiwn*V>c1c@jn?tZYe~J# ztZN$#3+K9JdHyl{KCcTJxRjKRmL5b2HO~c^Ebipj(Gz2r1@x+&@p=z-Yks@+yV6yA zhqoRSnh};0baYu~`T>e?=T~+EAL5;kkqdc3qWcD2x3vCU1sU>;RU3D-R*>S;;1Dy} zRw43UG0{m?SSs+mED^tRo0HZbSy!=?6DUXD>EneCGs+?-3ZJ6&D0PIoxhsl9G=a2Y>tXWY%UxH;J6 zmTU42p=wrqvaof+dS$O##NcwxD~IhgUOKAZ8F_PuYt z@ptP)n#diM+}tZKOgZ||aqvv?H5=`jppFhsw(mC(LRmC*~MpIiGk%eZhnN<_rq>)WMoeW=awSu9xhNGF6-(_wK)r@w0# zLOP^u-`*DNx%O?c=yw%ls_yrxveWy&PnTc523FzT`t*IKx?T6jhx%vx ze|&8CT?P5|(~sZ7ub@@-zmauawN?0Y>D^A#gzsOEPr1exKdbznVDJDdSo4egimIw& z$S`6WBFe>J`10E-EBQQmY2gks4V}l9gL)uA-Uv-HVZYo}l~md!2@72wTdj@FR@!8D z#|C70_jK5ZsK`|Xi6==RHa!XuYDyJi9&^G4<#qSkubLV?daukL!lkZ zX2;g+aGjJ?Hys~?52@v`*s`S5Tw57oKl?Zw!0slX%$w~@XjpiXeWMk8WXqW#6Besd zFU#y21*@bUPLeMRm`JE41|L;d(W3)|J}IljO@m-JHYyZsdw3OY7jT7SB;8$&j9#kvz5eizP;h+n3(+xH$UTqq)Boc&98R&*{*yxwnRts z%jz-BDcZ#M9J3B106|Z9u@{id{bg0QkF0_ zDAQ17SD<@eKFkDqNYoLFbdL(LMFp;!Sjy3;o)g(|V>>fz@3!r9+1jf4t27204FEXR zTi8{OZ<<_PkEy4BSF;PM*(NXe@|KgGcKx2}@EWMi1e|3WzfWc!zI1H_UKD=7L2uLR zt49ysgN5tJ7?Aw07l^0$d+mRssZ(|t;0NLJN7T)0?aNY<+N`WT>VHqLf$pzOL&StX z+aRUU%f*K}11e(EZSIlp#Y1qNBa91IZ)vAW>)=wHct^z^2B5C0{(=#b^BPyLR)yiG{`xlNnoVFRK^F8OWDknc$7o-06YHvEZQ z8+nzz<~p{d0UJ7i2i2#kLLqNySzwqbd8PoKX2gi)vOd!kjNke|&M}grYD(^Gl}2 zXlwc|+eTdK1r20}Cp=ot-&-Y2k*SiOA7dtOSN80wG#pK3uWry?zBAz87yHGXp+?!N zia=si@;BKjH!~Uz-qi&o2Kyg2_~Ygmf2_OCIIqpGRO~AL$jcNq+w5A7zsjpVX+d$v zS3Nf{7rXBd#wMGKcAmoLVH_dtHN9NSwO-RBd*1Sp1{Q5P@??+}%uNvz z3)pJ`R_dx$rG$cEkZjCiB|)!GN##EVRWhbsJyg1w3Rr{GwWy2*y>Q8ilpgWpsN)ed_yoMvyJ3H z_xIlNr(l7GnNDWYMKmpYkIMC-)c3^PB3MhBl@Hv*@T`-0RihEIVPY`A(Wk6Ck!`vt zE>##REW-9yFECtTP?mTmn>u-|zt>ctL;Q3OWfU-5-DExPRQJBoN)%d=eyhz?d|hKN zPROluqRey{q1 zZi-q(VD*~{e3E$UW@t|~nY}m`*8EH(e_Il@j1-ncts;drL*MSW36R>0z&fh=VJ1(1 z8YF(G++qly{MNeARo*?u)&{Ns*aH=X4Qr?7gFF!P{-1R=)R501ikO~y2o1MjSn>rq zH9STeLcfiQne@4yxcR}1p(_hNkAm_f1Zn%vbPxF$^(nl3o5doHk)+s^sPYJ_RdZCF ztZ6lY38SVF1kMK46cY9c+?PN%yd%S05Ebp7>opZy?ZSKO5%k7d)}?0OCmJqJze}@~ zw*ZGMzOb~HeNjp?d`-`=A^MqBrt@&R^o(t-phq5nQ`sCAo@<2-Q?{aZa9FE8szNDY zE4GUqNB11)RCV&AdJ-UUkYoCh&wvnVAQ##fBapKuE6lOI2)uv7xM3}{}n&bbd}+i%&mn@z@}K6Z-u%-SLK3zc)|4Ce9C+neEr4JjyJ zmkmjlZ}jDYo3H18iflL|sYVu@lq}k7)81hN`2>2|1IJHh(@2LBG&uwNxMnbqc*=|T zv~9cqIh60~Un(5-HY_WHXx3qBg9EeMbp{9&Z%L2@AXRBc2J?{eD3UFWlee!jNet0} z%Z7qTZ)^Tz2`MJ9g=gjEJi>~p)_*We0AyyDYyl^l@Sg3Lbkjixvhh8Y`*#prK!4e< z(-Q}L{HDeAA+fwSiDsZK&Zo>#pvDhUI$J+Gky?VF(9(WW`+{EFqrCIpf}rUROFdIlsVVYk&u>(h+|M*mXh&OMRiCNclU0b&HDQ ziXoKdtv7D4L#f4L*Aj$O%*7bGfW09ULw(v=Qg)JDE==|>roiI|2avox0^8=e(*+aOQJyt4FoeJSonNk(+& z92?(VeE7xP7Slp&bbOJoV!;yB zy@9;0Y+CoObe7eg=}nStSKagyn!3uCp6ou>6LVL#YayrHYIjF#SJ#&CZmZ=uyrsrN zK8arMNVqq0iK=Gis$a1~eRJ?d(xk?@U72OtJ=0F_%0!rR_2stb^36_sA1uGtwe=rQ zMQ}hLNP_14x%YOHR^Yn-{Gt69N1s3|%)dDL{Ka^pB_?E5)BbX5*H?50IQ@0>(HHgq zn^$24t_$wH!d%DSjHj@in;DL`e#3RaPR8F(?ca>2Klk1hufku9r?-y^3_||!Du9e9 zrOkKhr&qiRAov<&Jgs;Y{%~qXfQ+Z#UWFC#bz{Ta`2WFpI&O=7{q5kryHT8K@S*wc zv~l`wvJW$4bHjZwl_wZ?&Sr+0AN{tpAjU}NJoqI9ME;{r#$N+F{^nKiv&49Vd((f- ztKg~OpZ8=}b=jU@ou5@t;H{?%wr$(u;d?48oE9H!eb4aXsnX)lZ)t?|CCG94IHp__ z^61EJdyurBLq-`+z|TN?f=B(H|7^K%0eNv82U!HKkHzNJ)4+w8Ukh*1P=r9UI1Un4 zK}Q80dSh#e>KZzuj@XN>h*NQEp&L^+SeFo`g#u@4=8aO0*_tXcvrJgD9a|!tDy*b8 zqus9VY)~+iG@T;m>s-QL)ZpWVOcg#g7Eqr-_9!sA9Db=)^F`x!AK_ol!i9<29{xrC zbZr?ylRGWs8+6}Mm)6%|6ap&P+r_`gGX+OHel*?1%-!MZeWDu3*Xt`c&fdsE!<_>@ zZ&97K5TR~(b^*0HUe`{RtUA;BX~&hJHt4rCdDMLb!!ExeIX7PWGo|=S+Xz*HL>IK5 z%p_ZC)b9-l^iJAL>Cucl=<6)T2pOCWT3>5BzZ@`Q(L#7zO?xlyxS!raU{pTp__BsW zMpZFuY>eW#PlmQ5ZGuHETyxOltFIM4fB!nx$8Ne`{-cg&Yg{-j=q%Kpb;&N}>*>8B zp`T7O+oH2YDR2IV_g;p@sw9(5zx?dwictkWoV!mQfAG`AZ^P$u=d*7X-`<>kCN}fk z-KXLF18O+;B#I=M<7c#1Sq_~l5NB4B3M^MawNBh9H{VUgi#tNcqVSuS zt^LJZc8cUkmDecRvRY8>sC&)vKd^z+>}pcjomPy}W0%Yo%Z{;f3n-OR z3HKF_sd*UTihb`aUO&F{NO3{W-*vr>aS>xWKDoFL-%h9h~MG8(KACw3UpjCgD2v?p;!|V=s%vyU1Zdfu_MJ7K9u9x zqXOUG1)s`2xaG574p=rl#KtIFxR1X%8{aI{y3h+gl~DFY? zV4#>T<`3Gsj`~bJrM0S|MpID5C@|ELx0vU>c0FM{%kXK?|DvtyzsMo?#AV(89_VJn z6A8I$8@=yFa80|2bdy6tGZKdoQhIG+Mn-Ri=_y@qwJ3Nae z(W_y$5CUP2UPWZ^N(L=eKej1Xg)yN7Tv!3!6Ni3!6Lw=_U6%7W}uyv{q#Kx`N4dlKR8;Fdhi`c`3>gxn5j zNcU>wd4pp$!D4ZtoGEzG>+?^qiBy@=>rNk9z`xE_f_S&M2%0+O^N-zDmwpwtO&5O8 zV3zj&lE{8bikzXOIkG1rQ}F6rXeD)G+3Fsx5=C&{!0I1B^wb+!aA1J#&X0@`-bSC= z?-*hZjqmb__Nz3R%KQP%eM2YC z?YSf2wpOC~C!uEVWOVIay_@&(u|t1mSrRm8$B-ot;jx^C!6C7d36|ySld4tZrwVUogFG5|Vf0EoO4};Hy_!ivQIpixsxT`q-E#>=N&qn2Zm4cs{6)*Hm8zQ*y{%xfnn~2 z1N;G~;pfYzMfVzvHQjP)#&9MqwxMMO#8jhhpEW9Fu?VX%o0TKyd&+&Oy+Nxmy z8$wIM!s#jZe@R!L6n$?n<^l%@bftbfn(kCQI~FwhS-JyS@98R z5wTj_Z@8D{DRo9hX@ZnWsf9?|j&POweBYP`x+g{Q$x4fyc14-$gaE#k(&vJ8h+2Jc z?(-#l3xdf{&aq>-Fr`@<(jpH1ig!N=+QUf=Ne!3W0_Vwm32IX)@sngQNW^~4=G_l- z0oovWRR|@;JOyNV4_mdhW3{1cCZFiIuTxxLJ)|ncC&n0-`c1i~!m#0z?hIdvAvpv@ zBwR8fdrT=?lV&&;4u`Uug-Rh)mga_YnNh>VE#xmD>6?onguVc!iR?BBabVmGpkWsq z;NWQ}eH+g*1{0KIt_fio4JLL}Ri_reWafD3uqc}^*=p}s#xPu;ci}*I_ETKE*&tc* z0f4xJL&N~y{g*@*UoW4`AbBHv(~<^NkLv6 z5!G&hn1CsqP##$^_ganryZGtYZU`i#z*bk+funBw4I*qvG*_sBX$!5M%#iq+FZ3mk z@Drt=oP(x=xo=!3zCHUy9-6-(?eV@fF4|^uv$j9MYWFx7(Ei4%uju&%1)7y^vV6^W zYo}cR-jfy~*q}XmH0TIV1+rHgqm5bTT2vAz^gqgp9h((Iu z2CnS@Gtz#NG~RIOrK(h&ppC&W{!zKU=p$qSF54{YtGq~cq^YjH8v{PlJ=Ty+eI1@? zG}JQc%rkg$Wyk3&U(d+RQlQ=@)xcJeK=47PW9^u}WGL5)2;XZ>9n;S`JW-;?)%#-B z_~iO%TdDNZ!bB<&j0%OWx@*nip^r7i7DYjzn2h_zwLT31=k)T)62As(1fNG4PQ@qhHs;^UJpc@(kuagZt_ETMFg(0)mG1zf`b_A zAvVCbR~(Mqu;5G)z%@AG1BBzvPten4d)~^b3^8mG$`(5gt>5uvokM`3r|Wt|kioe` ze6Y2p>IAT8?X2xb*j~4}EG)kBRQztb6VXoUv#Qi=y%E6*=uJ-Cb}Rn*pzO=R&2J(T z83l>9Q*w`6lQtJ5cH}Z$DKdPx@jqR9A$ou$_yeFoqtnk)8a+`YUIej**dXw2P8PCh ze`J$|3BKNT3I3O}l?#08FNJw>v$3x7!Y%@=HfL^!!uRc__-YlDR0Tc2FtI#jb={@@aC z^h4`yIV}pX_m;m;)^Xtu@0ZR`yc#fCq_7bZ^dSnRyTzS8+1iJ)U%RE_YE1lV)KLwo z!$-ziQfm%sB<@YgmYTX9DcZd8M?C1HjJ#Z_DChMtG=4Rf_2OMQZ}3j_t^w&d=B~MI&A3>#D`rwWr)Z0QJ)~tiA7M4abkR-Ty$IuRx z^@dT4+%u_u@gy6jG9*`Y5TgS}*~K$qD%AlIaNgtIMBMxmIrF1peverxgX}De*>r;j zS*sEw1HG>3&Pxf+6+w}fv?f?U9~A}wZPwubEQ}kKLt+T@PPITLmM)X9Jcdz8o0{vB zNF1&%RdR--ItXdrTQtGaW@rb|m}S?b5Exn9Nj$T;QdBZmmULZzw|<`@5XNVF zS@wjdD@`8N>%dz;^ODaU+)3tjOO+`&NU+oaW%UF?SbH5*&jJCd z3~NUdPiLa+oQPE&FGVwW#^Zr3lULtt4XfvJK#&Ba#SW>m(%BKohVGL?XqLp^K8|Hc zeWYZ*%c8GhC_ z@I|c-l7ghP^mxHjVG#DYFfi4vT<3(1NO9-@C_=)(m zDez!tYk<;C6pNhHaQd*fivKJV0UZSSCIuQvTwV&rx0rXlpclCOT--`V zAg+p5g+5>*H#Em*?*2FJE8 zZ%5P52B|g0Itu+*1~r3ww>nz_>0{Tv?wXdc6|Hbk{}!!XL1q#_XZb5tZfaMRj!tqs zKPj@?5rtIY+wcy~l3gf&LpWG(ZF#$?R-9zDMeZ^w#G~B; z4zz162S8ZSwx)%}o3K7}Vh~Nh6im(9@W$bY5uTZ5{Ka)?TZz2sAV4mO4bN-KtKtQU zl+fAyd=02ThZ%;YOEQRl2XnVf5uS3`QtGT4b^}a)O>{Mxm7o-*$3wdp+Aah(`OJ|X z67TaEXj7*&QNY@~6^Dry5@C$gL#HSkN$k0`IZEsTsHk&T^D(rIVik%fJ5`K^M*4we zmCe5l!?y}@OO!G)Y%HEwvSU$W0V4W+Xt3~6gW0?vUXVcDKbrKO-bwARZdW6ALOQSc zDNb-zgV{iii=3~nhxQdItHW8UFQ7{JkX8$oFl0jdxcr3sUJ15x!=6o%82HR}no~7< zR;e9wb85TU3tWz0D?129gKHy7kX!3V)7bhbgcG0br|K)GJqhSwhplBQqATu_ANt$P zu{9__%xs#ip~~XG=PI%SN+~IU$PtUG25yb+3QK7+=(lNT@xONFkzj)E%rS}%6bv44 z2hGkk3|}F;eGp@*JY^qIiKyqhh1dYBp5403ZWQg{RsZU znWwl85ve{Yi23Qs4)j%ru+M-6WL+y2y*~3?^UR*QW%6_AxBLr6TE#)eKUQm#km;}H zb|l$(JwIUj=+=X^6>VIFrxs`o={>I3L3{3b`G%aj(Lu!Q{W}}j%DP_SWSJ{h!Dnwj z3Qi{o1Tx-+X<Lh2>_D1Nbi+OKHl;2=t9CY7hk60_aRG8fL?_IbYrcV01xWxp*0RU?MuZIRuaH4j^ zx3vCKDdj))FFZP}2YS-v=TLv-2ygd~HA|`+2>vRi{AI&D=UzqCI#?F;4z_Z)W-PtP9l(h+vBj-45Ka~I4J_IXDLIxQSN!O@5KZ+C73 zh$g*M{ul;2T` z;yUjLNA4YVe(>wz)W*yq<2z$*>BniosppuhuU5Z_N1Z})EjL=99!bR7XpScRZ{`RY z#J~|(Z`v8-6V}o{$GM60?^;-HRBzcHI#Gw0&lPGK50u_fJ3Vy~F%8*yXu}QLCZh2u zi$lsMRw!Cp1}6kNCP}Ln(n=nG9eio=dFl}8LcWB@P?0^i);K*JbyJ|1VUw<~lhB5J z7Ab3g(G+7#vabFYzRMg{$p@)L2o z2oUpLLDW}-MgCYD`H6NLYxrqX<0&=f2I%VN&uK5eoRu;Wb}UF97RnXj%|-5cPTVRq z=|01>raf%B3r~KQ2Ln3}bn=LUH2!hu$r=5~HMYJxx`btowT_c-J+yNBMR)BG!F-?^ z*fHbL)BUKe1kC}BU8JYQ%sq>~hzSLOe4-79{%=uW+*#i=ZvD?CarM4DY}$p~#Zx?y63m6TjSl*!ei3+_+zHvmJjYNsFIMo^gYj_cRUym- z%h2AwSX7rOqb2gZd8%y5r;}rD-(|l`cK4R93ugXO8BEIg$#>f&ij@aFD(x?M@bMQn z2!n!ikG1WcscM+4oVnKc<^9Zco{08`>QcYCW_6_HH#LPjA2x0axhv>J&o=7a zJIlHEdGgBaee~$v=ZBY-rmycNFtK`7U837zl}M!4%NEhQZ4iWZ2U*g8euFo;Rq{&Y zrn9+R$S}Dbw_Rjg+e?qGc-Rqt>G=+|vyJ4gFEXK=UeV2sEEpo<7Eht1i`axT z6boEn!@@sc5tZIjP5Jq}E&Ma9i*B=&hKK{*3b8(lO~gf()E8t>sl=%l9%7L%+qYl& z=<#BoYtmxZ%i^Qh7X)wN<1XsrF!%FglP}LByTViikGzFUPOrY0T{5_A#f|WY?aP)q zEJ0M9{R`u^Xn4>%wsw949yiu5wtc zgegnxy#DCAJbL_q_)#!LG)Em}VP{Fru@Y%P^AC3su(T$#?8slva35wu2mbUqJ(NnC zxQZPPah;V$wgwKiLVddX8?XtqP4KW0l*6Xc^~3_$^2q7rr`CZE#z9HylfHWBtMTGK z1tZfsDD--9GI60gb-p0AG{){@n8cX^>_~qafGUZ_@F1qcV839{SWs~c@?EVAggF?X zILm5R5MslUBI~UbKbEI{al&SUl37d|K+^?TNAM}Sk~58me_DeH2UR)S%r>*9xtJpp zP{<>^*I$?yLK;v1wK66$Um`l6+iuFA!-OVKkeq>`B#$uU(K?($|l?sOdUT`!diq(!s3B)2PU|Nl7OWzol zfT~wf@^u`y*jNqFh~scTE}6Q2gyq**ibBbWC<|-YeiSb1qo5Rpl(kc^E>+ZsmgAMA zvb=v;5pRdFV8>QCCu0YE@0RUT{{o?N{0Y<1ZBwD~LTe;=MG#QpVf%p`%{#Rjv7EQi zS{!ESt)-H++-?ZGm|+;ZI|jGZtZ75fiXs*R?P)e}G(Rn>Xar`e0|uOx*%k-u&aSsa zQdYOsCEKkl4hcf4xGiLE%mCRXtS4&tumo_mR_Y_!m=)@G_dS7OF%$?C$dF4=Q31et z|B-*Nd1#;kafp;MLgXjM)cZqb_6|=I=GWVHSP@&XskUN#@ldyZoVvA^nrX|H(`fu9 zjWxj(kw4}i0 ztf}AS*tRX!LXC9ulz8b1+%@ZwN%R7Sy<5986F!j75~hf5z_@u<365Smny_12c z>pkV?7{q=-2)^ZrH8Js~lUu}rk;=PdshdN0CE0@KhSW8(dqUdC2Vss|s1BO4`er8vy;exzezXw7RuA43 z*_X*`5V7Ve-cj2%!n%#oVPWrzAEoWPg%=ISlALb`aFnph%~)RGXxR3f*&X%QEsX@3 z{iZXTpL@4_q$Fk2wUTV%ytAeRN!_ z#a;onGqd!K17J;Li5QT9ed|c$A_|xP`h1MwDHAVs9 zuUDm)7EOfhipReZsgf|UOpw|K0 z9AV+*T@o*(w9PMFUUg*He*+>^>A5egA(?V5vV3~0;W2fCWodh?5MWuN!z^Sc%Ok*6 zGI~MsS?mj}t!)DYEetR)4YC%Cv``h7I#D3EdT(d?rmPaeWxI!KJm)gpyGvwlMQdsv z#MwA5`)6X+ZRBoT%Q9L|RB%jU8%+kK34@APStUs_uZDs!!+p16`Z zLDw_sS}K@7Q6AT&=A~66?p>Qdl96_!?4g5?O33Ne;WV=BdPmP*br^vQ=(?l>=s@i= z%A>SKPj#U^e)mI zpWS$T96JKbceh$I;{ia291B&{f}NGS3IiAcC8POIiMqu|40A*z0W59~g`gNH2&6sQ zM#fMddCbNay;APAqZwkp34$%0vUJI`2DV&nfk1-brcS=j>#yFZ`uM(CO>-0;^$Nh+ z(%@=IBi9s2mRPV$`2G7BxI@qw=A*-mU!DDv#~gCvNcUTMin3(na6Z(D*>3uJlzm88 zA0PpjYcp#FZ@Dc0%d9k{$zVTQi!uV6jz*fiR?aaY8Y{2X1T-i7hT%mlZu`(j)Az;c z=MqYk-h&{PFvpZ=E}1xMmx`4Uz%!m$NgHO-WWo4wINvG}$6tHk=-rOTc{YAM?z`_0 zt5af7B|sF2=wo}qqs3LA&w6IfGFU9X`j#ux4b}jb#P%Zrw^w=5%?jiBNzq$T(*^AY z*nVwMS!KxWAlvKv>FEH4rr54YlbwXT(dur&84~xHdeW_+-TlYqbmG)teV zIlgru>;K~J&Eui`_y7NEHnSMl%-G2?_O%+ajHS%j$ug2cDoGM8Bg&F$1|f+dB5h-- zkZOvMwy~5YGPJ2wV@V`6woBzUO>zzvXZK!MI-6>+yU%@Ar$V z9v5T6Ay>7qW^a)FIVfIbFN?)*cjN+rY8Pw{KzcRW4)LA|zzL?pl$4TQ2B!jtsPBvE zX#XcLlw!+qEz$3{&2$NP12Nki8L0~Gbfa^vLX{yA@-#F<&-<2*$@|(F`1)O*kMEYM z5z~gOz9~B|;g%>Yy|Xnx?1Y%jT78i9WQ^}@!Rx_Mvzvgr%fD6kj- z9LuehC*vecoNtZ(wq2*4@@BScXbBG-CA-*+L^TCG3w%;)Aq6kl^tS4NuvbK-!>A(A z9Xb9amdLs}f9q=Wn{TS=yvfS7=xuH`DeyWn3QlAu$B~nOE(|*O^p9*+Xhj9)Uz;`1 zlZs5Fy&MdTvvwr`v8ph%q&T+4_8JX{z7W_q1|A(4CJBw6O=G9(56J|ZWG|uaKz*X# zSDs-akj5vU=B3vmcsW^1s)|+Xs5@;mJ#Drv=F86#TVxJsWb3VtQ%fF&?@|?|cqnWl zPQ~3XIT`t?LPm&5D0Hw%Ie&bwLO2OMb%&*u%2w~7NnKD!|B+Wmshw`G};{?xw&v>O#ham=deWdJUT$(WZ zYItl_<8%=Q&>S4d+FOHA8x^vM*EW5N?dyRxa}%|U33I*uf&3*_jWYErKjf-_0KNJ* zXzMlI2W{B_l=Y3`H))bEcl-TiUINsTY7pJoaNS{TaMk$b7vJXIeXh&m98#`Y z{2g)5w97^Ld|BYwssj}@?&vReHux8rHe3qKbsgyX%QsRzN-Bp zU@>ZSh1pWidTUvTWGF~gzUKoI`GB*j5%t#AsPJ>XOS)20wzMtUum>4S!6XQgMIz)! zdl66F)#Cj584~62AFggon zVA3n&7FE@6c`kq?#svCf{0;zc5p9NX(bFQ7kcwX~l9sZ-^nHe5Dfm$#00?knWX}da zBq+cu`0h;tAlI|xL#Lxp{1;XAfwDxYB0(^;R7VwJ%4j&qhYaV3_c5_78l-ClHth|5 zmLxSO2&*F#ocOT=nwZ8|#78p7C1dQhIS98!sIP<`0OtY_yjh4^@g95^nZW%*h-68P zlL=$o0I?_{iM2z5BsC$->0{#U%_6~bUh75p_xyx2af^S<=H4M4!vq|25@4p8h(%Aq zA_m|DN#a<*mUoyh6407Fll0q2*1Th0*YeG7gf{?_AcmDOS4+_I0u59?IY;%;j%gY! zKpS0XxPCc+#m{tt6k-(29xNdy4S}L8z#qL?Qp21aNUgWqfng&d>BI@%0^xgT*gJk9 z$zK_mz3Rj9tG$4hF46A-52L zLL!NY2%zOX*DQRar1B&spIz;)UQTS+re1x->UKPUdoL|+SE+yGJ)@n3@gWY@B)|U3 zy#lkT3=P3i?b=FUgz{qTqUARJB~vCP`WhJTpb_ zIFF8G<$KexM`;1FGzsL8pEnC*Q)HH7SaD)<&2Jkz2Ef<@NxhB|A_0o#6z;4`7!2N6 zE0Xq>-|$IbLLvs$J^^!LvC{&7St2-0A=nyTsQ;$?>GS$QZi2yiw4}&80SBqiw|G5Y zYqS=)L?kRvh$ImsByzy4Xv7$8pScLEqyfvGU?zhJ9OcW^ldz3U$!Q|wi9=qIfwjEA z*HQ3wOoX$%IfjXzFWtmLj2fEmxgDJ~Y^ z;)WUZ6VOM!35Mj7^Ig7;t5lVhz&e%wwtqtbVjwqw z$A3B1`l}n@FEwhd19N=NupOxJ_n5Jf2Y8e!K8Qv<*9B$re_W?^oc#-R{)!okSa}-z zD&QYCP9ABtxQlx4o{LzZW7b^`s+Xic|6QXt#VS6QtpBxf;^3k8P@}ZU^{Ynx zz`{9w+4_+MtN59W>5WHw7B)^)d-eWj8z+Bvoqne7edpHK>Cs0}je3EODH?ik*8A=P zX<>SNp+*fko&U>?lfTR4{ihTEpkt!%BoAiXym5c5sfwXWwkDckWM(J7Jn%`FOv9|@mzuW*j(~h}qId3JK9Z@?r za+2GKcz0^qq~p8OaU&J)&Ln<*eo7Z{Lnr-g%8C?Par%MbH6@uVQbz3l_HX?!Ml%bk z3K5R|m8vK(6zBf?(agN-0HQHR|L;Ere}$y}8qNIiG5G&ts#3cwH2u~=avCdDAnSS!!h~697_qODQ~p8rmTna(|KRdkSFBoOf$+9Dv91* zgUkxOJ@3$DXCse2q00?e=1jjj@5|2H(-T587pr{DPAd1D&c>NA+ZBw9jU9C(_vp`* z73~~j5KRN;6i0SclO5izb2KfpkL;XBjL?GvC*Q5xrujI02i-WL03+8P{sNU7_Dvc+ zxs9rO0UcjC76{iZ9NMPBq>hv`&s7>;hqnXk37e6+9s2%@!Q*__x(*kYo z)9ty1(aikk@lSUFw6TbfRM;+R#2hQ+MPFHh52{#XZG5Vd=w2m{CT{dIv0t>VDt|wU z9K5DkE%x4EGRBE>b?wsQL2@WPg)%QXH8cN>-MH*9_kn*;wLnHw9Npx%*l4~RCpPot zKeY8Wzh~E%d7ZC}f0dMl@UWDUYZuEhB-k?O|t%<25vmtwE*0>BK|r5WkD{F z=K^R~K3rME!gvI)ojT7y8_JK8%EO{16zB!q33aVXp~q+{$10qon>>d`rE6p#EH70< z1$aZ(lE&lZ$?V0a+D(2&RmaGd?UL|Whxh5tOvX$!Qc~pdn1{C=o{;I4tjTp3WU3t{arz z_Q2yQ@{@n%>eDo$OT9K)lX=)DXev*g^VVLzLVO?MAIg4bBH=4&-up(Pk5^ci)29qp zt{L&Q|CsVlXHk;dn)h8NXM+clDJP)bVUxT}C?|zUNiRAPD%%6u(=Qun9HVNKRu60G z7qO+>9m&Pt`rn~@O|px0%*ry4gxi_~2=-Ot%6*5mmIMUXY1|cklJ4VvvDa zo)v|6apY@b$Sj1)+O^BYR5*R#uu*7hC6SNYdve#?%C+y5*1X;@bNq8~gmq3niWZD# zwpOvp`Y^&e08X-}@lDS3pgcPpS4{s_MD*&0f;*_XN+U$J78y>A=4hi;n3-W5nCu%l z4W%}o#C%pXSVT+FHeeuWDvhM0<#gmu%B%Mq)?)5IIjMFA5E>7^L$DNERQu#LR%j+k zdjTmj7%ncxc0l^!Q5p$J&xp;mC#ILP5lEgyoX4ixf=*GYloc*cz|DlN^%W5?KSsbW5pn49>z_WqVk?pY`@OsvL5Dot4+RVn!qfpi*rA+70b^BR^Na|rtziJZV4F-t~39b)u*xC<2SJe@4z66ktS@ z#JgMF9ek0@H5;ddiGnXkkO%)1ykE>sN}}w?L+%Cum$3)`oaw8}mg)kcM$!(vZ#@7A z52$m1Sb(s72$-ppxFV+^NkNN*{QVOy#NbTJh9M(lX7&PR~R*3o_ zmM83>>|gKC_H=|9QZbJ#HoBz)l(vLr{>hg5Nhe*xp5?M8dc2Nf2(d&Ud~?E5#pF*c zOqIUbckQ$Vh8_j|NMcP%EE+B%%;ZafknE5V-V(b^g#qM|36{iAnYlzJ^^jlOe#82V zmwCy@6k%@ZbR=ihwL?HOiv?rGBr*4m@nJhLX?$0ScvnDSZ(5JuIs|*|>q7Nu+4PWf z;4aTkf}d38>X29mf*e3nfD#ELu>3<@+F=vFqi~M8!(2v{!jbB5k0P|2cH!dZTy>v4 zL7$lTNG95n2P{bk*2uH^IA8!zVuC4o#lmTjzx93|>bd+ce4khdG*bx49Mo-z%r0W; z`4s2}AmlNSWx)i7B4JfM{E43FYD{h{1F>!fHo@b(Py~hN|1g$wyiE>Eon4a!T@*(Z z527ZKQ%uPn6m)W3Kpjsafr+r>=WqCWe0=0!^;^hLxL_i0=7CfBM0E-}l#w1w$v>ID zdyN}gRZGLitpMT`>bR}fKLM`i=PprVmbx>) z5*uk}qU%87`J=vDiM0S>d@4itO10(kLIa+^DX+HJ1%Zu7#Ec~$V91#kVY*3 zr3pkt6!O_jiBX4!QE<+cdnK92%21oi+$u{oukD$kpe2cAE*%=gcV$*FaE}g{RY!@( zyAt^TqCz1G)<@i+e7>*-j+c%Cub#8aIA_ve;mvnDVU{HJU#wqLqHxY>sD^o^=0f*s zv%#%qPFfnt-!wD#2OgF!){Q8sjmXx0*!U#4_|aQR#xs zsmO@PE=kWxv0l9Y{Ypjp%B${9*Q}+dXOMXU%q9amS4sL@jf;6OA?I=BqgAfkhvlbb zO=*t}6SWD`w2~JgZhJ|qWQXPVDlKUZRO=6Q*7Ujf_HPv#3kbxsaL%6>bDw=^ZMf8>8lFC(|K|H=OjUx}7Pel+_hbM7Z$mPyuDogegBz@`F~nlab)FwgOS3};foe;l-J$j~`+Yx2L_4N7p3e zBC!6I+gAF{PiN9*w;R59ohbXtJ_UKhBOLXrC}Wkph<3+-s+y<&u^XAj69WMW=>x~z z@{wSD$`cXu9^dh)ML;*Qlh3Zuc6Y23HQ_g2U^Ww7@TbdA^N%uH6t{j*K`F<6m~17d z8Beuo7yQyz%x^tu2q6#yRqoy@wKl!0-%W!*u9|)ralL6lTXEZ<%>F^)H0^}^ZA}CB zX^(y%St{rSKGV8tZe|1b`tAb`TjzOPWwp}Q(y}`=qce?c@PObkn{B<6m6UgLueYj#=UKusJZuc9%68`I!LBe$!c88>YNK^&R#quc1+ zlW6hn^V}1w<=tyDYM?jiXyk?PC(@Y`S6->^`E+eRbgdzsv)bUW*QfK_#+0RbDqBoc zi>=N%yQn0Jxgr?x9Nk=Ve%-PQscOrIeAKtis8B<5`<|YYbOBG-qu1Z^B`lxCBYNPz zoza~;NgUJQ<&)$jK50xKct_W!R;m0Jn8{TgF@pP};Yw73Zo; z6jORTrgU60_A!2JBysf+V%hKXVK<-1Enf`11#gm=MM-Qr> z60LkPo&E>~_fFS(VIm^G?6i51`ngI=_GQLQlU`;ZqhCyAG|RwW5p!u*?? z9wr^|j|+}mR}MKX>7Vh7mdsNq&9p2v^9-ma32TCbu9ZRC7@J&= zQbZ(1$XMwW*E`IT zlNU*OGyRlzLx;wHs+_gi3TK?raD5$4s>@XV2xUvC=Vy#`xv_q1XaYjge}%oEXQ&*r zf1P~7k*P;Tg=dk-KYs=Pdyg+9blUHSKmYTzUwC|x!sZG#ZK8uJYPzRfb2R8u%Er#~ zX;({Hlcko1$00^`Nqt#;PSPwK_=u&3>0u&8MtgHgF~h-RX;_NmwX`OV2RIii`v8pHY0X1mj& zQ(bh9Xr>X)Cxy29N@IuN^d~cu1dP$1Lg5k0ef^=##Q|H6p*E~v({ z)>&<)VfBrnh5U6>Wz|FPjde`+pq;}!+a6y@7c{z~yyd@i}_&a&dwIhU-qq=YrV ztrA7|6t`AQ-dFj@)BfL;RGq|o|96tAKWQcszWBcaj~@milZ^f?;PK71S1MH!@1ZPy z-YjhH4&vKASF@XyIZ?clsFO(ekl3hEd+Oe7VD~c47gUX@K<7qHzbzd}E>W?*8I@hN z7_qqJr5zYb-0(n_W8$7kYp@#$!L;AEYrAUdLL83NvCp2IFW?p@l3^(JSrO8nucX0Z zhuF8EQu>(KNwTooA6QAJFJNj%VQ8rdTvPe%t>gPQg;>AMcC3>0?EuR1q; zJ4nvyTzyw+Q{&2{4+M?TpkkY!B(Y=BSC>LK%+WT5axL-a<^q%k|L5bi zq{?yvY3cS7Z9iT9LCjXouf!AIfT+A(gSm=G_91Kk-Kr9~BK1f`S1sf!hvV_r$lnuN zEga6Lm2Y3FjCj6PQRyqeG1Pa9CDE0!{xe&dFRkGMgf)_HmGI`^8_Zw%#W(SI*DhCud0q^B*73BqA*y7j)UeS|gju|Chux#Hi(T`j zi*8pgF?=->`A%5r*I0C!m+GWyA^qKoXS!xLQ|_37`pasva^AJVuBtH02hSfSe6Hq7 zT?%(u`J%Vzb4|JDrHHi;UOa91e4Tgn()O(@U;eUv)OTIl8T;Vn>u;ay_)?ddX)F5& zmA}-td0yUK@SuOh`pXT$(aSLlwvWIs4Sijg_trgl^)cbgO`+74IQE$T(@9y*+x4&R zz_(p}`Gj9S>g2Ys+4Sl4C>Gp^lV)2J_{OXw_Ri9=Pr{Guk`o?lmBUvI9qjo=!Z}*7 zkz=3YH|GXtB$723onRg{43cw=pi+<)l!HI&g)~AZp;85y~Kl~Q~DNx0q(6(@d5Jnl{DjS98yzZ z4t$s2q!_>jj-dyK*+KE`#KU}@&vnC?(&yr?Jw8)*5tK}GDl5QUtvZaC7?9pSAQ_T+ zjl4Z)EA_%f?MO^_;nMlA4zq;rL%P#weIUc<18VVZi*zpbRoF5+Wz8Jj_R^l&Z7VF} zdkezv*@8i#E1nYCn}cXbB7=1skAxw^)le7)lA$fvOlsSMlqZkGX2Jms@fzX|E!zSK zz-r~&;5e?^BS+SYki9gs79SSeY-wAcS_YzA>U6rWL1HQ#2^Yj~41892!0O?wBl9%4 z4q){mSxz=dKX%|^zP~yuOEW8>vqD`QpMJd0)WtHlvIT{2kV=-Y5{R3MKWSRMV(Xok z13=+Dv;)q&CKdLQTL_}<7pv5FOzd*U6u6ThSw_DnebL;U zV)tm0?g%Zv?AZG8RRIVcAS&s$u;AT#W-D15;FE_~?*f8@enR0qqijkemX=~&QjB&9 zr301F3ilEiCU}TX$}4Qje)osqIk^xviF6Da#M)=kehHps55XhJ$uFK~wKp9bQ0!sy zl~bbMoPYNgfn=bp9&d>6f)-`+>QhF~*-JDnQah#xq%7#9SuPMMHM~V9oUrftmHkV2 z+26#JCXMMepK{bL`Cqt~G>`hDqgig685cUF+De?Uc}geHM7Q5Ve%4{$!Ii1tI1z;U zPti1BqX-;jfm0kv(R*+TV1G>m5*TcvB4`9`UOey<7n~CMk|5zL4IHN=*`bm@ioi)UArAm5iG(vQgj$N6RXsRJ zB&;|@RG$MyJWwRSVHgM)mGy~(l4K!9h2Q|0P!t^13M6T|CXr~^86Jq3!=sqkX(7^B z7YbbWt2p8&Y2bSX!AB7mO(sn6z-cCSyJCui7=w%bjGrQduIULoNTC6&lrh!;-zdC@ znB=IK7M%`$VoAMi-LyrP;KY@uvky8-@3Xc>pj!cs=C?;Fb&MM>50H*X5us;3D9t#L@-)P!NfwZRxdA*Ug6S;GLeG!519cVX7U6%=Vd{r555$k~wKSml*I=QtRswQ2_b_*y zi-%uu+X4#Dh&jObLZFm(Vx;xdvJ@avc>HXSL@9vl5rBFepgDd=5(`uTQU?KC5(DAC zjYtxqN-4M&-iZmTqAc2pX{Ft*WN?~${KpBGpgt6V8LIdUH7Sq?3M+sKF;g5Viwvn) z{uWswQUqXvbPHRVJMtKr=~!I*Ui{JLC=pPq#|3lUu=DZo!zWH$X{X|>I7bcv>fc~` z0O2TSb1etN0H?KEV53Y)NNFSD`a$xW8{3FCze++18-4&1MLAXCOxjw+k$irN;CLQn zNrfwceKdSnfdc(Ao~mk=VVzYzj)C7)>6Q=86$G>K>W;0?jC z6=N4bVG;c~=Q&ilWN~o~{uxp5!a4kVBEi7FIBx(jGXz^`hh3NzCk)S?>`>AQEQxG) z=>qbMlndNnNF?f&s&{xNy2~P22oVzv4_1D6xMF2M8HxyV{gyQ)kbo0m<1`$=J+H?A zR77A;`HADB%($Y8m7hVVEGFXyVgwSCA|zM##I}aqC(48`;l+KtEI0-8iHX4Qxaw^1 z5)n8>N`&SZ-Cv6^yh<2AZ;jvI!^=F|Q?8t%@J$g!1q0ajypqEe>V{P{=>THw=0Ohl zF?j1;u|ncF5eDa9(VhXTPh8IXT1i`lNC*cku}2!*Kq#RHpoX02qS2zN3vNfS0=#xa zNjcSd5+LkqFDdfbgKLY0hEFgKY6_6NU0a6bprTorNg6JiCLzg_Af-!v$v_nrhFOA@xlSM8IP3X&|?fA!8p@K$K%P{cc> zo{_@i=hX&Z)K?_jrO2%p;1U3wV-Z-u*?yNO^{F15vVoN5(j)=KrM_Y&8a+Y0q9SBB zK?4dNnEw?CIhE!tSI2UbKk-(6$-hay0hUFE39azZcx9MopOfJ$M+$xW9h?+mJVtMV z+$LT;d{M)pSYeu?B?_TWD5U{LJaCsau{OAQg1cf#W<@%=x#1Z&$&wl%meEI$(_Czs zyMjHo$(4$cztD7c4u5})?e0OW3~rtzO8xwPeWREQikMkrOytm9Q)JMINa6kGEGd)p z>j!3FmdsH9Mwk&-8aKv-MB+bOrHwcp6`2apbPJV~I^ZT9&^$$!h448ft0zL{AMV&; z(qYiD(a8tt#E>OvgnHA8?KC@u&#$U0z-m2YNdphm8mSdk_CXyFR*_&%6qLH4f&Q6r z(U@o?xccxE>gFl?^JWh=o_w$}tP3NoZ8l!K<1_MthpZ9zfl%eaa3Ugrb6W4)b#)Q4 z<`%r?)I(>v&=2zjDlIg?$XQ2Mp*b{c=23*N38jUZGPZYXIqZNdY10bl9$6EGd$+c# zwS+jB^jbRiwy5-|Pwr`J@7-40yO{1s+CQn}08`dn-qUs%yzz``x_{&I(B-tnkBX;G zqcwU#`3HE<5Xq;#>Yo?ou`ukq7OCZ%-pxKH^aw^*J=r`9ODA4gt-9$xA|gPN1TpkK zbKCdN2&g7IFszGqo0;uV8^1!o;#1JOzfAo3C%UCuJ0R9bXh=o#=AnGE%)dn(EX-f{ zF#g{sGJijR32DMB{oJBb$WEq7DgTVGalQ7(p5e7~5bk4L$ZCw~pN%;0Qz|PIJ3m+d z1;za$*CFK~Ngi{`Aiv_t#f3e?2$@uxY0zI3R9g{y@HJtzmtI2J@Ksg5f*jf+q@640 z7m7QqSPGB*Rg4TcV>ma&#WtZX>YVYovHPv7l$-woihGkjlbE#5b)`_fd!ZOiL-eDtumeK@T(WC!w-7{3X?vwAo zP>h`OVevg5AC}WCDBsUKM^`&)b+QO`)G;h%yV{qxC-Z$wRre&%Pd}u`cIrCPeQlkH z`Y~%py!KAcmwFYa`OS7?;WsLjEhK+5jczPcjKMMo6;oW7z~z{D3sPr%MbK!KmNfii zhU~T|M=5cPhuMXnd%~TIX!$Hp?{t*+rDJ;nITh6FuxDsLEUs#!Ic6r(XKRSuyK7PF z-Cjv?4)B_2FE2m7RQNbhelZi2&FiTj_`lnT)2o8!FP0xf(EP=CvPE|7_Q~IgjCP3= zu|d$QyRA9vwdFNB5|f!uxBn5{;0!Hd-#tucLLG7q4kH`I(gYB;Wf((DVENW8*oWoz5?p1*^^esC0gmwB!GP zf=Vk!2fcH}ncJ&gAH0(H7_PIQ_;qmU^<{wajiNh#uZp~hd$Jh0adTqgw*wyU&Bi}G zgKt}8_4DdA3|`A;yzRvI&uPc-t`i2FYqE%fL7-V`%PurMjRwpSYCMTL5i+iCSG47k za)}0R-=EqYKVBgtuj2#X7GN`@O%&e{>5+%{gFC~IEsMIcAJM2zkcuB{*7&R)Xk(`7 z@wc%4f06XY!6$)pf1O6n7icu||AFH_N&Z`5j@&9Przg1W7*waN@-MyLm_(5(3UaGH z@0q2dU&YbebGTjAZJxtQs-e|r9C>xYE?b`{`>nV(w(2BMD}3cHE%In*@>=ie?|RPl4sQyz0GWO zeOp*cTyV&}ti_wF0_CbVcW7<#e# zUMogiq^h_iX&^a{B-{4=7#t1<@9a)zu=q(Mfd1|1;Qc~49HSpBj#)uT^ty%Wv|P#{ zf3&;x=7bHAxLA4a&i2M5t?q2TtS9vYS{>H8cqYiY7U0ni@@i!0vGBJo8`!~Pj!&6l z6`7+^rm8bA@^8g5Z_NS=uty;>fKlSMC~?alishlMVyvcR=4#}Y8?fCeh~I*nnK@Wa zUXZ;#(c`JD^T??b>y^1-QWyHX&L3*2&xGd6h)Yr)mN!o+feD7GArWF8n z&8)qH;B8?fp%A0vJF_L{5Nm9YM<8Ns<}cJfNg*rCW}v0CeNA*rHE1>4XV2O@!LQD; z>G6NkX*=4MoJB<9HM5Cem6({V1p`VVDhD3(_`iXG1q*KHiInOr+h{T~v$Uk?Q3u>I zT_(HcJ?84!Gz`l+JLAGR-w#)#9Sa2Lp8BD3YY_v^GJKTbkiA&T>gq%5UQPRAruP6O zhf(p;A)@s7(~>#Ud^lk6DUqXUYbne0u*M=;ZHxV!0CyWbZD-hDd^7^Gi;f^*_(u=D$AhQ4vEwS6r|f1Y65;UJP6r9V1^>wDB*%w>wq1GleAYwT z@BM{a)Va|Q5K1z%4%+}(fxE3GQ6y;%0)h9+#~)1rUtb_`-L1t$)ovNdP?@Zwb#0|r zVs@e>V(%`41;xV)tSETfzLaFoWf0#KraxNa(7o73i;2DO=fh~Ro=|y|X|$d zNBP?^5gLKZo)2VwVIxOjERd*b(ZZGrp&m*wsu%bY8R{Q&GInBkxQ}AUk~33i?CD)k zhvS6Qv$8aqaV9MeN5jVVlL1tCt3Jfki<4yo<}9>k1d0#eFI26?0Dy81rI#II)hc)% zQ__?98G}T-&;a-u;x)42Fnv8hX0M>wQ$vPQaC8?VIA3n)Try$_k_^WyvXCFDc32tb z(T02Qjz1*Ruh9X`QFeOSb%(F&hH$5^IDtxL>R%6AdC9v!-W!sRj!Rlp@CYv> zv2;N%lY`+|xW0$bEutdZkD?SM3yF=2*f%DLbK>?r_@sU4t0-IfC+PqU-=?~CrnC5jWG(1;)Z4E#*J;ls! zLG3fw2OGFoQcnTA^m|5L-(PTU0c$L%KWI|-W!Rb?Z1cj$#T!BfuQ)w2-K+N> zxa7a2B&hDO^9`#CnD0(k4malM4Jgia$!n6qV;!|#Kyj|kZo~y`Dlr5xYGxV#-2!6!UVf#kmzs52dlP4v49>#4t&C zp^LjN!(W$&6bPW#RNZoVauea==Cx%^K`YJXTJ-q&{%>#6P&rb)uRT=;h6me@O&{7J zXy-e19C>Dnp*GBpZ}$uM?johMcUZ&vAT&7KP;qo>VSndfulcY2oo9!A7xs59-R%CK z?C<=iYeIk7-#NZ*e*`(PmQKv7Ob ziC15ROGt6V`Xv>Y#s1Au6+6zQO8v5+xALAGC&#*egJL$-pfQj~v z@h!`#jQ`HCM#V6aSfRH3V%i?$>KJ&fA{Z-BQD1(tVked;yW|G_&dZ%K{Y^&(rO?Z- zGRm=&D__@qwr(4zZCju6fi~e^wvC@4??ZHgW+CIO5s1PTjZ)WM?R;#>_P?(Q{TqWr zqxSI&V;I-7cj-H*qHDwu+^L4hIr+~GR`irHU-<3YAFl6wGDDZ>{OUe#_eMfIN`xmY zim5x4{q)GdP2>|9pK{sGJXjwNb;j?KK|G#I7OxJP?jvAB=X9_GHXJps(DCGrX#WkX z#|)p_3mO6GmDx0V%nXdbXzz~(TD5oi9F6qdy^c0Hx--7IXzA>CyG!B25Tobe#oA=j zmHowckWz>e*y-e*jO)lQ$XV7E{cvr|biaLj1KMIK1Ez9HwT z0-5ug_Vii$!SEz<9{-If!JhO=6-h*}yT1K|5il=ylE)-Z4*!h4m^8ZMz|(yK26?Ln z3_q;cVv6~KE4iJVVzl}_oJ+&$bHak0_DB>X2Y-^-IaQ3c_`x+ev5r}u99^iso{CW# zWE0bjg@_?TI4Di4G)SQsDLrT-ZKo1VG5V`pX>A|U*~MXg!|9Th3O@#QkApbJeaXQg zPz!(Y{3)*ZMc?{GZOUDP_l=Yt*A6vcKg*U~!S{0$pxlqRtfm7GGcmrSq!3*rXX3lu zP!g|%p%7$pa}<5a?FhmBK7|szF_E6sgWcbf>$I}i5E1Ygx4*KW_($kb&)4%|F&X*~ z&2p+wje1Few8vWo_V4BoF0iBs`~Bc(%%ApVE?avu6}*bL+&S6p7pxjF#Yc7 z|F7UfHZPE)+!Xl;eHT!rQs2D~`*_qNcaz>|N!|g+#!?jZMY-*^S`CO}J3W%b(=8^S4z>Bi$RXpW!9he~;MPWG7SbKy>eI81J(G_zKH02W7DX z+tb3SZt|Y~g@5B|yYRb-0ObSVG4yXYgUs=@LqsfVIn5*x~Mg5LI+Z ztMh(gwk)x9Ib=$c}+t~S2P2wMss)RYs+btL6olc1j>!+mKf#9F9AN!66>In0;I zJnWIvb3{6w--u3kZ8NRRrByld$sg!gI5Su(lE&8=$;pa`^dC1nQ*E=zEFU3)h$J^# zkK{D3bY#Jk>5V zzEuauIvS_K2DrRdRSd*>j2S=-v(oAwlL(F&O*IG!qtFXGvS>jAlkP(DJ;6ZUxTvcX zJZfQ?kfgI}M(rrTSN%|o%SyQ)j7? zgwAGf^%_Cys*)orRG8C*2?lO_UC!%AxT{Vb7zd%p2Z=5USrkGDqY6N}@)cf_vY9Ck zYT>U8?4P|qTMKVe<6&r|7L&{yUQ?G*aj9+y(UFAyM%e0nBk+tUTa)jgLSzC4sgp22 zalZo*m}?4n)ZyEqo1QOAgm2!Jb2PDE6DX=jYr>h&fXqR)Un&rv6l8ysT62Ah&C`KP z7r`NgBQdnyrBT@3{M@=@vl9mdim>I}&1#~#ds6z=1Ug0tPAfXWc2Y9qIvrG#SbV#k z0(>0Dm!P>1ZZT|<&5VKe!1T94A|o(_yY{x^Hx1Qn;u>}$SHkowZ>J{jMY{a8mct#P zk=F---9Lt~&bBb{e&0I0g6m;11Bdpaa&}M59X*ujl_6QM*|o>9l|+S^M{Xa2{SfH- z^U!SE=WBZ?;r5a(b?Y{zch^Gx@WWRaZZp^oL@hjs0ZsmL%o%>hUB^0ou zI`S_*{VT}t-1?hm*8g^04rO?b`~Bg%EEsgZPXCTm^H?}bS6Ttie>qFnm2nj#f){AO zuRFh9;$ZR*?)~k$bbjV}I9Tbo>+(+d$2U0kC8ECk@?U4^M#4Vc_uZle*X0|VU2&6d zAsX;muvT*UyMqy0(sz}b3q)$ z5)raC@D`)o4Gc){og&+GJ@~J#%YlD-bFqvPHKJAUjzgK|FQ{5{(_YL_Hf^w# zjzp57Wg*a!1;AWzHN$Z6<2D#-r8RLl-@izKu28@jbVT~RH$jn$V(2Gm39Q|%dFoWA zgH}-|oqjy#snRPBS5W#8GFx2bW5;eeJhYkW-1L?*89M#YMBR~d&*oE^ueXusi)~}) zZok^?27u)b9&q)tNtP%3iRWlmArKAt_p3uv;_KsiL+|x3RH#ui7qT7@ziW;$c zWMGHI^Wfy87qi;5K$F2+r8|zwc7C0>3w*i9My&6IibyasC~Z)$TILQ@cRUPeeuA*s zH^u2Po-m-FhE z=_b`17CD{M*Si$sA9sJ+%WObpnpR#=ek2l^Mf(OC?>JBl+K!nW^CH3b6Cc{FC*PiV zv3;!^4D_0>zD%1^8s_5y`h(wc)NRFAliF@vCa2upp|G>w*Egx1Fkhkbkt`eGcJ0a>{fC-5EKk~p6N>!lb%T~GOZk-e^_?X5peDCAsa#XJRq?(F4<>0a zpzjVc9gR9hv~DbCYAk#4dUkJ$5zZDyv@~3oU7z1*aY;kDD=t+{Uc^R?!q~0h2oy$e z>z$J~i@H5K^XKFty&3O#IP4xpTw;WH06+=zar4OSwS#sFUpcbyEpw>lu5^|~*MQ>P@-!`ZTC?~SC5@%h z&5?)t=zWljtZgGbeFhm9LkHd;=ARrKE(=OW>^7pM5F*bXb{2A3Zu7tiiKwJpxlq>t zZi?Wu`ZLt1XPl ziy*oUS>nPi=lvRLJD7Z|3d+FkdJK?}ioxBFhd7(JqwQXiq?Aw@Dj)Up5>n~j@hYX!b%U-huJ5AIsY;=)>(g76YiN|a*(@8y>&~Rp&-1)QSx3V1O2hLXqVF(| z#J6mg^{&@+_p3Z}(2*PXED3ELeHC=9Nmg8A&9*t9@nAa_q2XZNq~qF?5r>DbMr|9^ zABmE5FOy0v2zSZSuLd_;@#Q;0o?;(A%39e!qyCM*#asxv$%TS>BC zrB_Y0<$4A9HQie>d7o=44k_LZ{0<;bnb(SbcuLZvvh~QC>e0cQ6zU%9%`#Xji^ib^ zTan~$ZT=~5!H5m9u_A8jGQcbM2Mi6@OMxv|b5|M+lA*t3)~S+Q`L7|r{o%$TWZizW zPRukR4aH({zS9CEiNNG@${p&NF3yM$Gs_Awr>XcW3}5XfK%C+U;Dv2AEjT8;TD3F- zu;}eMWL`5QzeguMbW9^%Yq&)1|KjewqnggwegAJ72`vRePeSM*2tg^4A_*<12*C;p z3W6F1H8d%TrqD|Wih_z7ipnS|C?b~8P!vW{gCb%X4Mjx|2vt-h_lwT#J+tpQbNBh} zbI!fz_ji`Ar7J6+r@WunOJnh7xL{OZPvEvS9@BOuL}%E0{v;3Fln0#K*$oeDGup9I ziQzOX0=xI^#OJJW+YQHh-J8-=?3b$TyettM+W90>M$70D&^YUJ=Rv*_JRv8*m<3pu z_%mM??!;c$tx`@5dsk2{fnay$IW+@a}rjxRe{J6E1<*dd-sHS4l(jZKEOQtIC9cL`H^NoXzc zBcIfL>X%}c*vL|JKcdkww8l)b-V*ie$9?){G4{!4Xk%KpIs6aVSJkMShK{xdTa3gn zaiPZh>zY6^^Y`jbUcD=a4Ott$GMZgKb(b-F{`EKOL}RRf(l2k$j~z}>?y=5awBe6C zHY~sfqM(1dg$IiS)kUHIXn9?!IV1XWkzljE?GIOwzir`@v5+TcsLI$jKqF#dl;Xnb zh@dOy)>b$Z(}}GyAD>+&EV7z@FFm=k7XRU!v$x~}R0*n8m3IHC&EqR!r@jzaBFeU_=c<33{>RC zpM0f6E*0+m{^6~k_muq{>#Tj_bPhG?!y6D!wEx}3tFL|<2M>r)=1k3RCjAFPmDJWW zx!t>_)v5H;xA9QE_Lj@v_A9=$rFUZgO{8`<6ki%Hmo6GuoRKIhFebfq)r*4>88u=(V!XQ6t@skA=b)EzGhFZ3Rx zjpHXmxPioT+^HI2R`9YrU%SKIWT&ZT9u{>rG)9?U z;T@G6Yq;@rZ&M|!-N9oXR2U2A$0E7toM&_diX$^82s=8y-S{(D^LKkf2uE-nPRP5zprroW%_!M?kr zPom+Fe=F#Tn}HR?1n!kGe%-?2GFHAf1G^#NL9<$6@P=yf=Qcsq-8~l<%`TgQ`)fCQ zpG-V9d(@SAFd2w3^;-tl41A@8nE$DScVIJ?yA(6q#HGQ*K>svhcfSEkmn92h8wglE zS>~rJK^=8nf6tEtuKzJQYReTGQ+FHU^rr6VA#~k~8*0Q)nI6?G4wfI8Vh4@+46O^~ zd>2b5Wk(G$bH!AbCjkpYAtWvBB?U^Rn?xEjkj?TqsqI#-3{J#`^l)@#oL;G8FrEo6_1IV z8@ef|cFot&%!gB&$0bM7dLwdW2((4SjYtnV}`0R(3G|N_7{Kue7YH)pWE4kAC)E^M<+Sv#^CuDmz&zRicK@WeLMBj3F-c&x0X z*G1h9Oo3pLWL7;!PigYOzeh?xJ zUwlF@$8(?{Ik{$^Eha|9Cy4-ntah!HU97SL9q8fQ3CRyS+ho=s$e?!O@*QqgY2@x@ zI2?`_Vz3;%jGW;#5SCN4D2zCWWw>DJR?H5M@3t0HwL(%=Y05BwfL`hr&^H}agKC4* ztN;ux;-dmz1b5kKi(*Kn`p4};%)h0}IRRDS>yvKk#}mRhElD{$D{Q8BV?`@&pLdl{>dz zPO-NFJldm`krca~ayHbZZYtlbl7D!J$#Cw(LJCaDP?JeL$c&=oi^+v8Efy^Kx=*=V z;VMzdnJu`sF9PrSRW*uKh!Z@3!=7n`gz;OhzbI8auqFJItlu3<6+Z`=sWn^d=dEZ@ z2NpE*RTO)oKp1x}TaFJ!%e1|WbVmj2@nZJfRbEcBT;N!?gI+hy(`T!_N=y- zQ(60qV>}H!q8{rM)lJ&%B|=5Um#h^#qfq_tVI`AQ8$N%Te|rE&D|ic#!!;pkQ_7HJ zP*6i)^6y?>b$g%X(aoz(QHl6&{oUUfOXx2VNZ}gw(@!tL)`(twjwVAdoU)X5wg=tZ z!UqyEL5nrL$D7(kZdl%eBye6TFv^KtD5Qg+<^rp^SErf9SeSio`Q;1Y_}kx*K@SEF zVVGjY(XONWtWcPH8&E$NG5wcly;$}6yy}i&jsEDHn=YN-Lc29$=mS@9e7WC>T=O-A zyB`9Pz5|tlcM(_`*HJkeAd7UmXuwk9v9!aQ)CYMXL)`>gx?UW>lU1~r>R|wiEwB{+-7=x76djQ@U`~;LC zgcHd)K9ZY3ZBKaBkec~wMDfdXmsuq`svNsr3;#L) zOI^7#nuvhTw&x~=@(KF{jH8xT)hHJ_=_;#LV6x`&bt7Gd_8pJ~zECN`h!`tr!torCp3QuGtem5V zU}N*wKfC`@dN`xrp96S;pT-tD9#)?M*|<%_G3JNfY8ON_a2yYncsOhB;l-1AWy{tQ zi!vbj5sSmf3@qbmeuF5@X%jxAG7KnuII$bpyAM_hsJy8W#FvIw;IzH%fZZ9pBgVBt z7}1AnY+LER{qSZE^5qC~c|*&z_OBQAJY~?|*jMxtN2`y`2dU%tTEexP8|lsfcTjn2 z%HZJGJtcu{Yut>m%VqyalJNvrqe==NpB#1z#?))14o*g2c0D}5|KO95`tx4X+LI`n zLNd8A`ua)N7s~Ejb?Lq-Q+{pQURT6>>k5{CeM5`OnpCS*>=bJKmutw0*lvt(X?ce8 z@xe!fU!L{OuC*6G8aaubNypv)TtEsLxI#Fd(QtnQ;j*pH3d*l{t-H$>x78^~ntjxb zIon#WZ=nmddo&>76x8T~?Xh9%Wey~m&AD>w6E)8AUXJbM#kVq;nEKdBCgv#vXCxNz)7Dmye@`yG3L{Eslh*J8@dib%Lhz|^eMePZg|E!U`ltn;3Rg1X1 zH$4D0_}Ji4nI7e`rRH3W@egVmD%f`V{vX>;P!Rol+qt2S{O9m@_=Yy}@3ZM&E^mFL zg4_GI+4S~`pMKx--jg-C9e^!a@GV1d_Q#%gF!~%#2}(podHx80JH*Q>$m_0s-}A1G zxWRu^cX#j)dtTdBG&u28yzjM3Z|^Tq4t;HP1@-wxB=oH~wPX@)sX5K?d|mXv06biOJPp{O44|oY~YGB<}qE z)B*G|T5eJNyHcsJ%sI{A*zZcEBNwKhS^b`BaOgdM=?ArAep}_vAE>Ece`LrUO?kJz zqo)2L)vydabpXHZ-#0PWz#$9Bfc~+G`KNl9gO(c~&Ye2UZDPPw!{on=nj&u*@*S0? z-Zr6ir&ZhYzV|Nw_9o^#eBvK^7u%=sKB1@Pw!Q+i6K8J7@=K5tqP@4Ta76%_xpvhS z#g&W8jU!GS4(;jiD~=dg#i3ah{`Agoyb5GMv+r80(L%Ocyg#n#MG+oQP|SLNQtb*| zc#@;J4p*9Ps-#wy?J&?^F7%%Fp~Bqup82WbKP#0Msx1^2R?NXCs_F&?{@dZd|ID{N z*k-wDWX7eyji;@;?uDi>VZqjeUQZr4ZftH~IInDOK*DnyADJU0!Iwj*O;yZCs1&iC zAMM;@e@jFOJ8lCtRy676XH$EHt}HuJ(78_USIM&g|B%V2J6&C|5AmL4N9cSbJmgn} z2XyaWHSfrY4bc79erCcqo?d?CY1Fy+T-shaf1!B`cKp5xl}ejJ`i*?F)5(;I?VsVQ zxR0DI@xyV?px<)8(DA6z`;?YfyjCQhB}jVYR&+idE+E8u!u9dw$?mLkX@2fh#>#x> z(Jl|cdjkUw4SqwN;_K><6~8}H&YH#`@xCK8$X;GP@EOIGr@D;Q8u`1nOx&bF;zkJj z;Jxcf4YpHzAud2UQ?Do6QLDt>hy*^ol#ksgWeo^U&E3@4R8gC z9Ui1iGEMVwKz?#tlV+ZCH?A*Q1rA8nCbd&xg^c{XF49JG!1i4` zdLPz5E9ROG;;2FXvS35v6xI_0ZiB2#I0RWGT}y$Rr>x|TB~(Jl3N?bbzz2}XOolTY zlARpd1W}HX*r&z947DxAmezm`JNNY{fldcVLIagq-~Oei-=w5m}A2 zU02}gdpIrYXkUc(+xAoH73IW2mZ!r&y8_f@!JfTGH#kLomSd1YF6lN_toXt}fQf*S z0tjB_yTJC5+D#log}YcA2(39~8coG56eSSosNx}le`9#>prexr2Am?h_kr+d%_>os z3eNB4L0&25$5na|y1S0~Ec=W(7d8wZsD7*X3lp=RDusKQLy?ViCpfc_#1HMK33V_r zcJ5gJ%$HiPOWWA2!t%-bG}vNc2P(pjH15*^^9k@fB)9`f*2OR1kPM}F8EGl9-#byk z^Hi6HZTMD}*24*m5@>k(`tdz<)}m8Y#rQOYbH{z#X-1M=IiLuTpzESBbU><1LYxcS zqu`rdMc4ccsytr|thplns55TPbH)X7*2}xXwToCSPTOS|cx5B0Tl?;Ob6R@13XC+O zeV4MG*3mv21*xcKP zmZ#NPm1Zh_3A@>gLP(ljytx2@z$Ogn(} zw8T8hJKK%Z=#@h%d+kaXE`n}Bur|5ql1S#Z6OwO@m8u0_>3i{7@35ZlYO-B7O~J1k zN^BWZkd}*8Pq3i_*Q(E$$+jKXC5$+C=fI`t8x1%=a$U!A`(FghkDn-E!j!7S+s#5_ z=!t{_7*9@##i`NNf!Vzki!Ly$*9^b^^v;T^WM`c(eextD+VQE&ET7HZLRf_ociNpx zvRZOZvHaFL2BQ&Q~}Fbo_eB+`(<_)TYaA#duJE4PJ4SzoKx5z;rnxs zEEZ4!AKB%A8#o|jgJ>)S!qCbmw7XG7ejQ)(Skmp5JfP5Mnb=ZxmuE&uw3>d{Tf=22 z91cchhB~E@a#VD+!=%>>3T^s}$A5HVdiEbm-T(dZ zpNnCbK0qw6dGS3!J~y}=`UP_OVX4o-yDn$vN?p2g;6&EAKD09MvRW=p$oU?_2nme) z!I=zEuJOJX27RkIaj$OpAzOawy8y!bw7hegcyj?)yd_=nmJt{r_h$->`16-B&w`xE zpbaLKj&I+!%{~6aCzqa{JnwVPx>6u5IDanuzNt@oDDH3}a^I1kbuZExvP9*zKi6*= z?LJpyu+i_Rdz*&itVX4(Bo$j50;hwi*xHBa!-&-1jDSIBU<%Gr7sE0|55cF`; zsb92u=cl5wKOo-sDiLD5+D}r2Pae9BvMx!SOfM|ybAh$5>^^!G>)Gbck-QCG?b;cB zpH_&DFwf@Az=~*R3%_h~?y`^{=}O&vXI!!yX`$6y7E404xb~WRl-Dzmj*gCW7(%(6gxR)Mdzu1TH#_pY} z`8eh;xMeloX7?;=`~men$eDEc^l^?eIe$XpskLLG-N)?w#A9E_p@}C<#{9{SRcm)l zJ`MbfNaNH?cFX)vui~EV`1EVy-^o)Pr1js(Qx~RtavlFA^3>N6G16v6)~vN_X0+Ap z;>_5O4U2y2=)mlWF0+q15&FGI`{4VNceZS{xivMkeAkoT>Y1D8u6z#6vzc8d+YjI8 zH5r?FYu>lo$NR)j^_BuB4c*3E(_s4bCnu~AyXAR?Zo-xV@#bYTuJ5k>-(x)fYs!C#j4}|EvLgJ8nnPhyT!Bv#lP2Nt~_5fJvE{`+U zgddIEr`o+1svc7VABzm0nYSzeTkjd>RkO^$mW&XD=4|y0N24nZp%#>E7NJ+mOT_tH zh`JLQP_O{yo8iJ1d2`L%_k_3u=2P97(8MfmkY{Gm3yH!!Z;|qCY@^#*bE3&u^?tny zEvx#4g>jyyFce!nO}GR=$s)mHk1)8BOb5*r0SGn+Z$#rjJwvl!jT)PucLz%Fcb0V} zw%7=2(vd4#3YA41({!9_QbhM5L>$}8olYwAKE z!fqnY{tdyr6e)C)D&$F;2_@morL3|d=p2o{`dChY6wZ-&8nJ1T=p8s5r`RLT_nN|j zIQkjSnHGWq4jqkx;v&oe@@u++C$&rgKTU&g2YL2FI^ddX{BzVDENZIn#5Ns;#dbA- zv2}&cI4#3PLT?${D+106GpLX0nDu=U#lb-2q8@H0lE?e*I&77Ro6C$y161hReYkv( zrLK)4q#C`lRiQI`P$=q|W9E>2xZFDwJxVntAs6H}-0Rie{-DwMbwk%0AMDiw8X9ZU zyU~eN9xEO(KeA5(4VcPt;-CmgRugrG-}E5#i;(6=mkPsFv~G;B5D)-|iAyyibqCTt z+PekpRv50F(}%gZX8z?+k9|tECg`K>3IINglt&X^kub88;!1(?OeFeu^)~B)+s^3U zb&0q>x)Pi~C!I%$34QGd0G!wDm_|CifM60Tnuy28%2uz*AtI#?%4b&{gmqFdbUYZK ztmgyp=v;Lm6hBr^hX@J;AbS>f)%>2KCXl^5@fXNS^98^fiVEex{bhcM8Kg#S$ham% z4r!8IBFm#6?k|zTQM5}&C`v|BfA=pV(*R&yAXwl3DKxoZ%uo7kV@-3pav1*2#PD0@ z3ZWsXNES|?+PVZ@)!mM2^`>IEo0IUR&OGuR^T)V6Xl-|U$A?C~-YA+uEyImTJiq!$ z(97c(=bgNP{A=UAWWDC=wTmI~FA6^F-y4Q6Co8y|U;-;zn=}hKwFpJYTl-(d#2z4x zyfgG8AA~5$0>|fk3M}x7Z=~xYjUdb2>WhWU4XriVyaYRUpvi|}q}#5P5W&|UZ(F;) zP_4D0TD+m>@;S|%YQgTgC{61GFR54%{|G`7-t~5_#-XF^jGWr#oR)=|?Sd`Zp{nGo zmjR!w!Kf~RUV`W$vN(iO<|%qD_W^G1nHjaNSiKyfef{_zBCxb=&J~Vy5n{cWU`{fx z!>CGAhim2HK^$cvg^#LMBm#!HXjXbmDsk7?jEnB>yO&9)hd3JIavWS+!NrFp2)65$ zKypTzYHSa_os-p+b{g;URi5F;sp`Va7(hu9xjZ2&V=#2HUY~6m6JrFVhjC%onSB79 zHe^%Oq7}SEk&Xp$IZNIiA8H*}XLEk-^5FpH?Ys4x%8#QHOXG2neD#SLVj&rDTBpi! z7MG)NiH)ji%puTB4#fe0$qO8$>6N$)RS%K7_=J7tRjVOo%uDd6w|eWAJ%Y5eyyu-? z@X74e#4tqLn|k;IXAQ6+Y@U~TAz%ZiL)b-{BUsGTdlL~BBWbZ}T8k z#+il1vzeLL?R=u(t&j@sT8aW=Oj=q`#yw~f6D3-v7HY|V`i zwH10m(o;5$nI!#G#Q)4#R0~SnQi5$=^RqyN;St}|bXdFD>58TK74*$PPGjQ`_vcuR z2&}@S<(X?o`(ln_yN^;J$7-@@CoR^VT}g|vzdsO@*L*c=h~w_JovaagyTsxD#aKlF>IBTW><5@o(+L{kk1U z{?ixV{c_-J(I&n1J4WSypDivbp#Sx3(f-ZV3*q;FTujw(_2^*RffyBVlCJOf18n=B zJzJFWvEFkj??3tF{?pnE5Tg>j8}!SKO?C?1!+BZjc0p6vl7Q_yQBMNk#frhl%nf<` zlIG>MWllpXa11?R_KDevQ}aaVv5 z^m*?VX%*La=6I>mPkCpyWN+Wv*DwE2R(|nuU1LW07AXg&mbY&3qFBN5V|BA;?Z9$MW9oZ#LWicyycXn+xOLO#Ek7NIo^wCx-!rZedDi@d#}*p2X^s4enp}cqR*hv5qnV;VkKI;N))qk zKVs26)Us&|I*XgG*}X+JN*o9yG`;b1dK;nv17q1DDA8R-l|pFh`FY-fuKTp>awv!Ny+lM;!iV=L;*$k+hbH$}E;Vd7?8BAGKM zmjcXzae@UrROw)`oMJL>brOEfb+yBi*gi#tQbqr7O__NT551ilTr1RX>2VrlL4Rp( z5CH#j`jos9VVz1E(9I~(Rz)G1oF21V-vIKL= zJV-s{)pL=Jg0sD-l!*;+QZ_m<0(7Yr!wj1Z1au}(Wt&wdSn@tTaKVGz*9a0R z9i`YqEY;45e7#H<=|eHY8VnyStMedj6wy|{X$NcgCSrRaVDhdzH9Z@qXEAx0y3|V; zFKapA=o9|wajk%PJe2qVL_+i#pnz9WVjeisJ!*>pKs8hXSxkHf9*LpKNLt3LU6#hr z;~Ps9$0Ts*bqa=omue3sB3Hd>SF+Hp$c*H)FgsQ)z0q?8C>|PAmA0eca&|RPBy3Sv z5@jVq$osf6_!q$RPowjPRcqOb;NAGj7mtfOYY(3b1lEf@1XQwD zkp6Q7M4cvtQ!c=!*WP7|nuyQ5R^sj=lz1I5w8{YXOP6Mv{!8%!P{=Oo8z3SNxB_KhqJMHiM2+rcv#MpfMgdXIM{n;4%!*9GvYH3 z<3n=+TS3DBKk*we^mc=r;Kal!$8R43@(t+gAQ({ShP~;O{`6(LAgQt^$DWnWDvx&E z|H$KVRfyPRz*gz_%4GppR}?P0S9VgJ8d?YsqHlH#@nHCuq6TAsQLkJ61v59^y?3c! zy%D0axLaXR9reM)aF5QTS^WxssiNPTDW0cPVI`F-_ha%ksQVGyWBS}m_Rl+FWJvN7 zPu87ZZTe<8C3&S)vRr5Wp2EpAZ2NoP3fm)JPUlW2?EAvs__?7{ICiGy*wp&lpB`oj zY%`eTeunde2kvdGyK@2#(W4d0hJ$mnPbQ0J@BJ#0(cQ;L~v*!l|hOtxZ(1TA1^uCCAAq|E{=_b!g=Z6W&1W0i4PKtFbq zK2~*Hhz1$1Cs~+M^X$;Dj8S+%o&>GWhCPrd08IFC4Il@BLU2&y5>!SjG_*3uUmNnE z6VW6=@ucVz8887&sRD~RiUoAuC+&~VkyK_ruEbw?7PXPGBTtGZFySBzxP`59#6%#& zuBFP*SBrB#OsAs70-O+d`xZz~gh80Vdrd&L8#V_EmE^Zi=Ld#mBs&NZpwNJ(=tTp$ zm5Ok7Mx7L1N5eeK%u!!bu%du=yq*Pmp$DqBO?Az6JU*W=Z&psDLD`qthx~azXdT<_UHdNJ?3wl|H3ufKm6n7 zzvA%rfuAb>AKd(xtWY3jl+14>pV19E?(ks>T=3VZob=Iu?zyDFT3#OHVc+{KO=OXA zy2afw=jIbd)ya=R5QV2?y8!2mh{*K|H#jBi zbG~(bx23si(udmHx7H27OHbgC_^R*0ZjN)e!PA{zY1h{l&P3gO#>{thM2%39SNIWN zh?`uy*BxE!hZ}ggR%4dl>)-rUX|h9oMwYsg1{vxM-wo4Qgz1}gCkVOxPil*#H26%; zJ-K#_TW*O>%95~+Em7_ZxS$l&vX=%&j6rUE=tZOIUwo(G%P#UF#Vxi3@_|F=RkrF+ zw<7o9^0w3(ZdEH0G5lJE+Vdzpi; z=kMhJXUPmrJ4j|Rcpp3NqJwxLQe5iNef(t<%p4fys=Hh~oa@oz*2hIb0C!S9IGJyZ zhC!J<#35me*6R=8xHO#D&tWWVWkAfT!ikeCFUY(ugeaR&YLT5F(&?D^n=n!YClum6 z${@P0B#hF|IkB-2g0Pq9gt%j%eCcU&qDEEh{8nU{0hsTG1yQtxu z!mvw~J)S3b5X?ogiSN2ma|M1f4QPJ{?Dv1q;o;(mPlg&+dAJG#~Bd_S;uEu6bFieRhK=2y{r zjyf5@O-$c0nQaWumkqX5iDdRxoPJ)D>(Ft8Hze}> zbhO)ds1$v@QNzcm6)aWzzmVJKrtuB0P`O1_mEj-(o0AvcYDDz%5!+UWwHUjxBil1u z>W=KeR()F}akRD*@>ASbhIbOmv7-KU>L3tDNJb}F&?%PG?FLN=_ z9iiEMAfs1SD|`^93<*{JC5Qp6DsKwZCBOv$MnH~PRY=|LfgKXAPmrJ+9I!1|%n*e) zCQ<_b5ku8my4flb&larJ#0*n-G7#2JhIa#)g9s&y3>O?liJGZuS*c@54jK`n^`?}m z+L-63JgUKqTG`GQNTpHoddn`}wS#^|00?MA+dt*%Pj7ApFrb3MZ5*>=8ZFjm~tUK*?!F+z!{;Sa-MQ!Wb`8%+Dr=YrRa@slV_iBH%=;3LzB|No%$o}b>!j& z9UfS-YG|QPR?lvzfAxYFr`Fmtvn)mIf@pXMGf=5CIOo|4QrL!*t4)8yxnkg zhVu@S#L7xnxp--y0}@*cwPq_7W! zAM6YhX7wH{zpVGi0r#AqO&MzWk5lZwbH99QdYV*psO;&h?PV?h!2OCbN#kqV*Zpyd z{kDLoll-?l_@y`|1=_M-8GCcp?{)H_`YpdDWM7VH*6jbKxZnq?B+5&CeA!%`Ja20^ zNLjqN@u~zB^Z9!oeC@AHy}%*;OzljgxjNanp1a)=WR+Z7=)V2Q?COHSYqcMqq;j!# z+_~$%9ZSS>Q|y3Tyq!wNjyYC|9e+&Wf5H@74!37Ae7zS553X4H3*CFQ;gZdVgLP{+ zgOtUjUV<5RqM7EG^}O5}zKD#X2a!eZ1oVc5Z)uKgKfOO;RTm&}-b-3S6D{^L5IUur zo(1)6UmRDv>)U||*?+xGhI$Q{6t;e0+;zL>aPEzF{_M(BTH8`*(eW2OT0?XM*3S1T zWP#Vty$du7mmPfn@Vzl2rtaLygU{}L-RL~^?rJ;IvWm>S4fWzQ9oe=*xaHPxfUL6N z>Fg6qBbHGjRA^fAI}H3^K12Th9FmzF`t;^M7?OECb@lX~jsh4`ufwVR`K`&#m+-E! zzkUq5OBz`Z=ECl~1?^?L9W*{R*i75E$;`8`8JvWzFBitC#PR1D3+|6@dJ@cNf4^+$ zfMK*H6KXN^@YaSb3>wXKbs*JWuG-{6@|K25qWD6xpXP}(1O ztAEi<|HVfNfIW~6wfg@1k@es!<3D~K{72T&KVl5(e9!#eXD2U&{YGiM!csUF@|#TH zzxO)$KRsdp=RUjgw}ibB7h&jW`29M#QwRT4OeZ- zUo;wsHF18yDy72jBuY$ZBrgSezn#TfzabG7=`J#VPm2nn)B>4=s70HZvf9etV%B5) zBe&4yT&5=ceAs$GA#FUr;EKJ6gr;zfZva(r4VUC^8}2i6pJLTF(%m(;KPi(}WtL@{ zs15Z07xmfm3!3I}lTCewRNr%QOxGakt~Q$GC%tl zby=-?=T;6E_jw>bzC5W;(e&gVP-18sH7{`pjs|7U|Dbm)2`o4L!42F?I-#&+U~FH} zt8gK(@O@Qm5FFnz{&3nAQp=fC`F3)?x7qpHRVp_BBSLom5!)s@zB;5#W(VYco4OOE z(fjCC28ML6$MLBh9Tx7n?=w9ltKjxWJ(J5`1Tn#Fl~fM9Pv^hV`{2G{{hM#eF{$2F zS!6GMrbIjDXNc}*E_&}$g?9|>kpMOm=eaa@LFnxa6y@Lh{m3m<**f?8Axo#Wm7>c( zX;4xq>r`tcP4e2OO_--5g=b%~)eZ%o(7%uh!v}I*`f8v`R9w)q;GQ)XSh{HTs6I28 z?m7?vvs_`B;~UX|ylJj(b7|BN&w1n1$r3ugXciEIj7Yc@x1Cc)$+&u{dn5I(*SCT| zR)o5{AqPNu()L(oaf}d(1?C6n8%^jOorCZtN2oM`UXU0{Smfiq0)P}op%5j)G(Xc+ z;UuLZAv1u{77u%X#IVnryvoqE?MyLt&_ z-evDey?00~kexy;0~}&{?TfCpgl~9~W2W7Vo6+VjtB5SQr)?fLQ_2kTt(t4e zXmdcsss+5K@ZNB9sCnn-vL)~EKf#N)d1#6{^3-VA$tW?jM_V--F9IA8;cyg_v)oDT z3?t=~v618{F+f^1dvu#$lQD0&U(ZGvL zt9in><$&$Ti&mZ$2S{dlQY|E_;I&r*stY&A_Dx`26W_ttd<@G{k^%{GDiqihVvYu7 ze&y~3DiPrL;#CXQmD7V}_HjrrWYxOl9z}E@Oy{~NYf-Z~6iRM#K8;)>$0U5swo_R!#` zMIh&jeEQyTDA}$(t#Q~K8dTh*F23)J#BD_1!`!An|bV1Ar1@EXb{FQ3qtHaaE;2NHORZtl4-)vjJzYW`!QMG{bCE3p;Qs z6&qm{XnS`-Mce2aj}FMus18wuz3%t4p?qB!a5diaMUZ4he>n@7Ka&>Q(6Z;ZdoG;Nh^m>m+ z93QcJJ_Hjj3V+0nYX*am=p+~ZeFYuw({H15SFf2Op4r)=i=yNv)a!QQz9YB<@xBcZ zGoOs|HwO6|UyQ)u;#%tC0L*$O85%1DFT-u~m%+!hlU|QQ1qRy_t6-pqP^{XF!z7=B zAvM=jtM~bC^(2dK)^S=)bZs-y>DyGtS*b9)Jv?abCeq8(8CM(T?~IAuw5*Apabr@0 zYtsxPb{Aj9o;|XTvd3(Hfqc;$x4CWTUh%xpQd-2DAwgWd2NpVg$7!dO6IRQY+_C1t z1l?%9O{nTvKeV7X8u=jGh>%d3aa)d?fY0>2m$PZYYd4r?tsd&A{zjKu*LVpkO-B`0FwzikB~d*lH#rlAcci{GKOo>d>JYnz3V#%n))LHE=(I8pEUGl(4gp<(}!IR#N zNb_h@%x$(ATB7oKEszOdzLdouGTlZ~z(_P8{eUup36pV9iceIk0bh~=hVK~HUcDwy zhHmUb7t+>0z^Wc8S|hh@qjO!c`KPmrKRwDnin zDvI14XT|tAy_*Dqra^X4lq4N7 z6wv0zGPmJn5M|mn_a;a`XP=`u=B#FF!lk7$043SNlXCz8jSngc1)QZet?8kwfG3qU zA2axJ5npELyI4E#iFWD`GpVLaDYOG!9tzOA_8K03iRGY{_A(hz)YJND$ zGT~bT|vUv$t z3a$*z?^2LB1iiUxSY*Dh8)tp6fMUjmvQ8&C$^(HBEl3wz1wjV*GPENz^m7aLWh!b! zhN(T2*iHrAkSaqQSixhJZf5e0;`mq@8WdmTdZzh{4}Fj*!P&XV8-S^bR6?t zB~M+7;{L6{0D{3sLi%|mIDz2OnLTuv)@$S(h~U@mhy%tj`#VIj;N zio(>wM5rXFkegK)Gkd>YU~D<74h=Pk)Dm!iuYkRp?0C z*U-fk$g>g!8*?9}bd>095mpmYC_DGbMuK6^PQMPCZHm)clLF4*6I(r!Q< z7F@DlkBy6mZFzQDiJJ(<2;lKhS7k5H98aaYyWL|~IZ7_H6z=F3LsoJw1zm;ui)@4o zF2&ax+Dk5Lq{2eThxijaPU=|Lt+?!lXV|kZDT^Tpky6OlE0$`ej+8npWpAc(ts}XX zK(0Nbw6OL0CM{)RTaeEB9NUktO656Y zY`L)+4V2|#hC|Z^VP2<}8SUy=vh;E0{ynzdOMq&o$-x^&#|G(|;cM}~nAb+e9Qj(a zrh|R^Sp2aFqnN_$@V68W+^GB6(Kj`h4i@;(3;hkZ2DrU`yLjy?Kdpic0U71KR_)z= ztkPiwN6IL;&)C^VAKz0a)4TP{Hq4ZC!`5cgLwf3H;hOnj>y~L4=fX7<3AY05yeNz2 zd&Mz^_Nc*5si#cfwS3haMFG#pewIbwcJ|az|ETlF*_}Ed0#cy)|Nf5KT*$w}ZO$~9 zAyZ&d@(0fb#&Kb^Jz2&|`^?a`dS>I=O3)Y?2Kx!_L$&S4J ze%<~}t;3bSME#c04>Fg|_4{w7sq~hb*umEiZcx(pGuRu9Tk#1abDUwf0mpH-USz4w zfBd?A=FKd^oV@R`D##fge!!RVRL&N8{V9Q3FWjNKyT2Pup!SEs4G*(6ZSX4r$y^b? zcqsl4AahmyH?SO*o#QGX6kZvxgy{uuZcy@93~dTtpi*<7#dpRy3xNo=Jvgco$Dl_> zRrT==V~?zhHTl}wv0R>bcrsW|EODgkE~w0aNEGO&*Px| z=jzS2u=nSW#Enf~?c1os9Z^=Br%if9^VjjZ^qK>oV`!s>;a=V^Q59bFoCy3f-uHC1 zqTfPh0(6o0igl*;LJbS(YKl-<$AsHGN1fZ0ymY2~xrceon#Td;EfXS-w!l-3ZoyPm zFHOu2$Lh*`MteYV!-u|bV-p->EXI40la-#VM~68bIz&>H_ig{FwsZTrD5{hFApPuJxTXKk zujl>$G+piR^P*d-aj)79$)DOY37%P3;@-%IKBc9|zm3~Ke~A@#Lw_}#?$MYxD={x8 z>osWQs;g>3&QepCzb(Yf>S_0UaGWfkq}K<$RuYJ=s` zAAKD6Zoly;*EY3lRd5E9972>QgGu2-9|-4EF6( z#b(9eac21SXKCDyasz=Ls%JRHonOmd7`7+m%YqE_9!|PSm4VOyV(&bonpzt@zmx2c z5JIv;7byWmKrBH}LJ@Z8HVA?UHbPNRECCS^Q9}<(35tT)LQw%lqat7>Gyw$-76dFO z6pO}pxXK|gjQldfi(ap?= z()zNYLCU3tJ61m3*fDmx zqUqA&h{mT|r^m8{#LKj#l}-NYsE-%e$Y}#%)enymX+3d>7=3N`dbK06NFE4Lw z-WxT3R!qD?uV48rG-v!D(SJI|FHALEvH994eSTzm{30M<2;)|XV$~=3__c+0s!v3T zmJ_*9YN3O{s+MCLC-T&q3!TiLw4_B%359Q^zqFbBk*(i~D@%om{0xge5j_&*E0vypH8KYAJx9&Qz z9X}5UQ&zGhI75}!VQLI<_iY&Url>U@bJX)%_VSac;#JFibiOTc>T1n=8Z~*FFJBVm zyt?B>&SYuf+LB!xo_4g}o4g}PE!n+e_1g|<$7ESWbIIO_r*9|1!guB<_wGBT;LI_X8p-Cyx!#z##LX6?`Kb;&?&Ln2v09GwSV)cv3cy4!@DCtja_ZG7ThcDb2csw zemA^0l&Z~gi4L$sJyI!nupa9#)?l@t7MB<6civb-7i=kt?s-9>015%T&{)h}kM>Lch~Yeyu?+Cu!x01YLgHJfv6fzQV2{31L@Y znSt-{;|tbWMrJtYaq@7R+!eTUgMDxmibr*qLQap;Yz@_5GQXp9ANg9mOaCb%ts$KVsqv_YteAsK@qy zT<-g=Ib^aSYj88m+Li$sPz8B%4|?XADl|*#O1JlEbT206$F3qBK)tyCL2-tn#O}Nq zgFSKF<>!>Hwl_Stmm3DAmyhOfc9fkG@ezyRgsV9)9pas!T?}1S}Stz z8Wd;GC2T`B@H8Bca;1`2fUG*+`7(^n3z9bkx7jmbP$O48e=6#Y5^3!EHX;%g;{Yvq z=Q36JGkFQCgu+DH6+G<0Kn8$7+Wj9Y5=2{dx!j-sZTikdI|`K9C)e<LLZnB`d(M}N8N$*W{!tB z005Wkj5CsetIX{1hwq}OX&n42So4-S96ukr8LQ}_3GMIOQg;zJX0h2{la-9by;|@Z z3(sPrvVW#1dMc*U z0~s{|;uGcVVwNGbi+Zk;uOTg|==cP);G(|X?RSRi=9faf@1J1q66hKMGxN3LCU60euOLll%CM! zV4RsB$&}DA@u_XmQhKgK&&Pgh?S5AVKRpYXp4bB326lR zD&mn0pqL&$NqH)?UlNNXF7}H7JEZ_u0clV|8m7er!@zzjc}PGSXUlRkNQ1~{DTn0m zL>gq0o5gYC24rGF&21wtktP8gHaAp_WGn`@NJv9e(gd5B@dbqy zgbV`YK{2U?d9Xy7AmPbx$8WLjsQQaW-54NkLzELmc9}46x-;9H5Iv#Iw=; z0t;gi$w$qc=nhI~cnTYB>>_(t44`Plr3It{01(i~ZsIr=Mczj(c)SnqLML+|ga|}- zA6<3d^NH`J3paEvdD7h%W6c%qCi2oq$G^+t$n z&ID>WB+@V4D|x>x{tBsxk!C5pv2PdGC=B<+i1VZFM$f|H<8kUOh;hiS& zE+849C?vD=4K)I#r^1N#Dtqnr;S>iTQ$%tpLuRhYu0r2)2ea&xEc#iY5fItnjC?06 zx|~XO6koV$PUx2qpZ1;;?UZX~0;)N=b2kF!5>hb-7=j5o;xpt<61{4HE(6L`q4{iF zEUtjM3+G@)FC_WN^|O*kMKZE%^q}NiP=frLY|=%PiYq|oh~p@smWBvV zqmz%khAu?PVvV-Tv2hY{WFK4Z&aq3hX_PEbVvsC9%#rWp$k&RjTG+wVb|NG}4bccP z2rm=5Wvk!hK9P#<-P5gNdCy04u__Q9%vfMKsLs;Q@D( zFKe^TVwvDjGT;Vh-n0i=58}|UOPh#@hkz^z$-g%T#ZlXT=knr#C@z5e1$Pxv|V3US$LW# zT)b(AYl*^`kU>56=jx>A%4v95_KOJbCWv{WCO-(vsCE!Q3D&wWwqG4Fwg`2IF3J?_ z=YCwd_ilatn7yY$CQ4*A8^@C(c|6JaK@kBZ!A=NR zc6{7^nAC}=vmv?X2;@{4@Zcox9Uxt#m@7e~J`pySN^W5STqZdaDeN=JS-b!|jqnxO za8^O79+KNb!e^L2*buCa?Uh-dHD4=XHUuTabF~IZg6$5I4(Pi zoC9Zn%~38q{cv6`wCB_k&uQ|9sl~-_DSD%*N`O2+`zl2Yb_pNXvoWVDEN!?XpF`;2 z2gif@0rQnHh$dIbJl{@3diJ%-(_n~{h@90Yv=J?S%}x`qyv1!;rw&rx6p=Db?x7Ko zN$UcD>?n%O<~9xq(EG)tp`y!iJhmH5;EM5iO_}e^uil$Osy&EKG|cE1pE9kG@hhs4 zXP|EFh}~~R47>wTL^6wAVzMjH5fajKL0s%K*%2b`o+0sR#lD*`^@8KiscBKP_z_rl z*O{yyam|ph2niLoyWwCrHNUjr(h87 zov91exkJeZmKca{91UMA>rzaif^LFX-FQxEqb>Tj1Sb<1q5o8YSG|+ry472PPL56FKXTs$q{DA9O8FPkC zV?w}-lo#$!gkkXuYB<{Y2YH3OuiZm**fm^WB)E|1BV~hIScjkx|BWR9OX^>=axd-w zf^uBz;z+|9_noCfF3h3qM8a!FW+H}yYKOhf8K;`{LX?RYax3h>0tB4oae!c>7J6z66iqQMSy|#^ z*^#lfxJcfL!#Y95SyFiHISv^&$DkYqYm>xwWp{_VI<@xsXpkmRSJxpB18e zpXdD!pk5_!B8ZbI`6aJT1|l(WY#cj8T3T-4W);yQ2 zvrhT}D6JFf#U8=&r$3(Qqk5)0vGSy^pA~JW)8j3eYXPX9EYkoNl(_bGB{h)q!qHl#4gcq(k%$EK zZ;M7=@R0ctrRC9=S3i$!N4v0fdSI7(k#7IB`n|6S*iP#GHF0gXA?xz*@h&6!KYl*f zxQsr!4vpS&_L*JrT(@<%pB-1a1mDFMB;RYf8rB3y;T;&1_M@fxLzwoT=@>!MKKCmk z>B&da{|-V6@maO5HzY6M#)2_h>AK$O{g<9lOM+NspdFD&0yoUpz>g5eh%qz9vD#eV z?y;}%CfPvLvKRJW)#%5R_tFyVBCBEfz=FJeCQPUEQNAz!mf`eI^lJ>T614`i_+R&C z{xJiRH^;Sj^S@+3-dj_I(CdFuQ+{6_cHZut^H*=C_;t$a+e=FtI{(<&UR`$0t;9z= zw6B9#(q2A$c&siTW|nT6YsqCw;@-w6X|(;}%_vH1p5FG$(o@1O=1Q`v2}=!>W^>g@ z|8S}4)azZmINvgDLyaPXy{tD@zJ}4z6e{NO;WR?i&mB?bBX_iCVzqwdsuNAA{Bx(N zNq8)QsZ#N-Me~Ccg5Vu;!a$M|UP+3=-Zh@;dx>3470fId`zq;vdhy17k&wu_Mcdc( zwn}kTrghaD9ETWMuZRv|<8%ic@{{y@7K$D8I87n;fzLa|E}ZBe_KdkX>^`l-zvG=Y zn$=lzsS?Ik4LToZJy@!kt|fO3er7pgX1~d7_`!IZ(@DAKLb7dHBSoXNwjuHxS7>sz{zwf(6P9 zvu3(T8LPyWZs2SpDKQE$MUILjUG6(wHwuZ>O)^T==-g$eO<$UFL@#_`b{*vEOZDr= zEU#E)&10j{D9LH{<;@P%ceaOrwy)piT8nA8D0O)0YSzxOCz42F_G-(KCQ~j8=g6*I zQV^&&gd85bX)dWVHCa~3V_YOVv{?Z_M{GXP5!IsM0aW&sDF`nhy=RPd1SHm3UW4Ze z&q<>tAPzqhw>B7Gq7N9>heb+e#|QV@O$H8LzH_zg_G67PuE&Vxey*bH+=xIY#`6BA z+8RtT8)(IfmmcL_om*?;++D1)TuF- zHKigHQX*^p)NQq-A3f3NH3(0cs}yW9%TuNXsoTRMD)SdlS#~@|J#y!-T;#*C-=#)T zGk!K#t#v7WR$uh&tDen8l6;(o44Ak?_77^6#ns)m~SvR45cp=A440oxv$u`zHvFi_rOx^SHQ@AEwY@@$xE8Q;Pp~; zEp+TMqF4V47*Ny?>1IcN^FP~IjDdH+XGf}{0lU&2KM$s2eVnfB8M1_QUij1srie>=s zO-jAuJ;76uE>`U{%BK+R=g9-L}g~98I-MogdbKvSqdCNCXHcl?mHbj;Tbqqdp2P1;Ny_b#5Izv7OEHek( zK(@d-ZDkoh3(>Ee?PSB;Qgl~{W0b#z9pY7n)uA#L^=vY-dfTP`)kp)uR4=(z6PGz& zAxho5Zn4@4g$q2Todp-e%AdZ!8^)`6FMlKQH@o+vIbR>JS{6sWdHUfv2m(6aKr9ZB z0t;z~NBRxD_@8xm@`OZHQ?@YZX8ynI?)?5hebmIyh-*4a|Jw&D;pKKlzQbQ1sH>C* z#M;=iF1XNhPwxD=ZeP}U48QGV@gJOF)o{J+)vRS3rHgB4yF0&Caxk`^O?gi$nK+9+ z7@&8kqZ!6Wntk*h0 zspX@H|Bve(CF6r*=8>wIzt-)2SL0tv(jCG+i))TcwMRUxetD!3jfYv^zs%k-Zk4l~ z;c)~ZSUJlXmN69n;gQZSMEw8degEKr`pf?}?Bf1+LJ0onyF35k1*)?~>sb6#t; zeR~sQMCnGsQ}8gyUG<4x<1{Q|rul=n@b;8Gxh0)^ykdik=Z+j58zycXmupiIeW)62>;#Poxf)9w_epI4cR{+ zM)MC_6_A~z%N1%-mu^s%4D*imN)qPHPo<(g{ExmefEKK}X*1xBhi2r5kFkdps6;THPj`^CJ0 zTv*|d_p;wT(t>#gEB`$n>1xxx8Mbe4udJ?iU;To*EyeQlN8cxg5aR(GD?iW<4}=i? zR?RmaQFJl;iM8K0U7swaBDwW-r?y*~nxd%`#$rjVh_UOlZ=ckI}j zJl&iAr}i9U`f4rOv8~}?F6Td8fSNsod1&XqKZNDCH}GbIDv;j4Is>S#sP~|zhU^dHvD-A{~1()TExE}!d~m= z_rRTen(&uO(3U5TohK6>^gCh%t3~l%n|J!x%J})P4qGoxmlfa5<<9Z}j?r7t8X}n4}cAn59v~3H@$wpni zeC)+J^V17NCdH4X3trGL{U%JaTfH6bApT}?u&=sOMBWV<<;lVSKLfa7@3)WHJFb80 zJr^p+fd;S(S>b z&Jf~1*=jVMK~3&?LOwl>2?5YE=2@TQ1C>BxBY+!Ai7ItCG<9e_dgX}+IRd1~_zh?8 zMSUcjH!p56<$}zLpc8}>e_x>d>mfW_fQo3S(VCVd!NfcWdFa>bs*iGA*5oNj~)0Pz`t=J3Ht3NZrgwMDj$u1sUB{20%Vf|eB#%haoRP@tvJoXmAB`L&bWYzMu zd?1GUu?!5dw`OugC*I}=^{95=MR?u8RTk%$p2GO)TTI5_!GaU(1!3EaN9&&~|Jb5+ zuC{<+n+&Xfm#-Bo_-G3OD?x`{u6RQLJ>AE_G659k+)H#`QIeNZF0Dyv{rTP=VCUp$ zPTOVxWg}>Gea22}XD3;++^;AS^W?Vh<55NuDGFx#L(9ak)qxm{1JyrcKLk>db%MDw z`7&`-jK&w*!|K_x58e=ajE0BtcDX*;INE!ongmD&Ckc|VXsC+8(;3LDxATg=Tv<>| z)i5o=S;Rvz2>V4dIa&s1!y_FM1!zqG4yaqq^c@aj%wSMG&X6q^)exuEWr#*WCNGz_ z+AEH#712B@cx0-W^o1S2s5c}YwC>cDAafPH86@00Kh1nP!&t&dMw{hpRc6~;_TEgy zze7vJ2J>3;J7$|Thhb&Sfj;OJ_N1jV_;_16=#*DHUCjHQ0o z!K1BpwAx?}rnZGNDlPF-DyBQFaAI(Rn!06ypoA3@JPsZi`6v+J0yO2?6$=71NkI(V$gF3y%B9gn)qWg7GY{c#(HxCQwa2C}b^~9^ zx}^#ZGLC?TJ2~QAmr4y(8?;j0{-rKUXUvWZ`5^4v)=9zfig(H|dHZf(!3kM?>_Z*Kk zX;meEOUH9Es0p1w&Y%u?t+iDw?{m+&NaY3vy}NJqe-yB2cO3_A?jof^4l24vSGryw zqDIbKw_g%nZ9D9$_jU~jqjrS1tSqC{FPiS`eNb}zl`z**Ly!4P)2SeAb0X?*quKvN z8T^m^5erxfsG=SJ{+Xe;U~>cV%!t?cc{uVP>xn02DRlU6^~AMV6vBy$zx1pZcq;#C zM)a?B#+$^4{;|`bb?MR{&y2O%yZ^E_T=KS@9pgI3d;YIIGtzleQ7ya)oh)0BUp{L_ zxb2!Mr{C;G@~|GeqXnd8jR?uCZ1us}J+s&0Hjz5WPD9~MA<3_UDvaPc6%S+&9e%4z4OySV`6FbnL|&qx93QCbNX_*zjhk_vwLRA z?4Ju3=2o|cHHNw=t$uaxNiS|a17&vggCR&c%R`kdVg=>UT?S9EBx<#v_eDj$Rn(PI zcMc!gSvH!UIW}|a`oVrRu#YjqwYYK6KD_uw!kW!)*24(+oM917Lr>@)7SoE@tl`yDzj?q)bfz)9GPQc0TXqZS#V~ zqe>+|FT*)0jnQ^!mYMs|%eml#v36~`}dY(%x>pe?9+IF5k%DgC7b+p<6 zv+^5xa`-nXxq9l;zvEl4@r#>XEZ;Nh$ueC4nx}F682%f$F7CxPt^(fYH@FsO^gRO~ zEv0SEv*7^Cl|iLZcD1dv_?6Ck>=PFsLZ_{N*5dgaxy<*|&M3mI6Po`0+RyA^(@FC@ z^46WMiNzKs%4d&A|9&&FI4s7E=&K+pKm^=&}E(I{akJM&J%Ls_=k`D8Rz_v4y3V-F72Z)nZex=s9a zA+fQ}+=43CHRT9~t(2nXIdazgur8RVCkAi}S^5+Pz&O*l3Y>U}Dm>09QxUkmNxsFj z=r?dAOh1obn4Xb%FUWE0N*Rv3FxSu`2z=s*G6<|3hEJ)4ch)DR&bi z`2q4(Jwn{GGtKyDKQ(i%5O)M&sNr?hw-~}IiS8hX@0!yb>13AU4qn=-tI<+XV`IdQ z^Wy5M?`Q4;&(VmXP`7MV^h-39O90L6F76Rv2NI_*?I(S~C<&`f$_U4-&}DKpLdIq} ze>pfvPKnRyXEqvn=B)xz{I(=L0{m(&F>lk$9%G9i9d8=#;?YrDC%R#bW$tBrtz*gq zwyomDPo=8@3(f8nSr5enNueyi+G)VD_GT?tfe(Cd(nrgfq+hQ-p#X5|Z1yrZR!w?x zXo2H{BPJ=np6iv7Dy_)?3UBVHr&YjxY~9M?s3T7jK2rAP$X+2su<`&GHj7gA-MEeq zJW|FxQee)}F0pE5kTZRADp#=4gWz>a*mEPlMAzcr+VMr&>U{d(AZbfQig8e}Q8Lk${8b9@Og zkQ4_c(ZLq{Qne>^U+LBl@k?R!gHp7 z$VgRp;LD5zCB6791B)hBYYtLJlHQ2Y4hZvy2M{mH>82#{q>e#lHqJuCNMEhQ!PIb_ zmv@B_--H0DyKHBQhG^RcQjI3#%5Ss}E$sW{{`$$!WvmlX+m84a%R$N&4#+Zg_+~e1 z*(|}FPx6P)ox{^_Lxy`tfp_S+HFTLd;?7a8+Wa}q!pAo?gzv)Nb&dF@InPa5yej)Q zPM2BI#93WhLO4}dy}rUhYDT<~w&(*@vz{MmDHu&xF4cQ!nK*6h3Ul1~7N~Nb^9K3( zo5QG6KV&7~7!{8&)m1hCNLsE&3^+nt&wU}gN=Q*Wz|Il1;*QL?0aQ*sR2o6VO2Bbe zWH1zq5(cEq=j;~%{k6M%B%~ArJc7xP0|?g*)=K$-<5vQL1wnvhwrxEIZM%QRpy*1} z| z&N-6%Bu6^0b0+jK$&A)DASeE-JxY!pjNi85w#)H8WwM=`QLTTN<@)VdSayUx-#&Ef zbC@{tHQwtB>B8mhsp3OjLR6oKI4HuRIZ3jDwDO2hq?c=iBfm7KSexNZ%#WJYCsMNH z`$Pn;ByD(m2sR-ksv$|6HUFy~xAp)J&*VOyhnLWTBf#S*zGEAdJW3_C3o=r9z`C{! z`vdbiA3>z+?esOlSQW*IP3^!Zm*CwGsR3zJ^4v4>ZET#AGzrpG4X1>ovtN^18+L6x z6&LP!a_$`YSQ@!G$cW5CRc?#-(kIs@hHjye5q%FOBDP4}x6eO?VuGU_{3B-)Rz!$+ z6J7?BbT=>^Ir5j(fGn0Xy3LcslS8;QI}$M zMtm4RZE(97j{JxO#KHumMSboKX^FFp z3y$0=70jbB6Ap&SDN<17vg8q1_H?5?l2IIfL2Bcl@8UVv!#2)4BBMcXJD_j`B5APE zN87pmd~QTMDTlMDUPJ^B;1UVsO34z0x6`p>O~v z+$3A>JQvd#$|Hn?`>+pVaE3@Zy3Z41%oZ5aiXiW8joZT9MC&n~fH4gVoP`W%+2cD> zrJf)(mm<4qOY4Pp*At#*g^EHX?qlYRg?E zoqJbquW{IPJc5vOJ_YaQxMwFE;?p!69N8V<6zrSTo z-ZsT$u=CjyD=A}T)o*o5uFn74uW1Mf1%&9qe;lTfw}Af{YUj_7-+2|RS&X;#s+q4k zSKN*%9r=BvFy1PC!Il9NyJ{l#lgasCAHN$St{Y}6%^IDlA7qg*P5Y^3&x$V*_LZ!T z_#NZDxGfA^DVw<~V%r-8 z-raMzbZ72<*Q?=%fd^_t^v$!Vo#pkBQleq$w)@nwqY;37X4XB#=#&n~XATHi8+%T_ zWgtc8sJh-gC2xKRzSVlgxu$zc_g#B9)87Wl9@Ur{Wo^Z##j z{y&AyDQ1|U*D_XgbNnRmEW}Qux*x)$A7TxIA7cuZ(!qts^JBQ_8lAa z7A}%ehAcATx7b9;ga-ewHTF-}zYjn7I{bGePy4qA$FBUSn~4uNGWf&C-{N2V@%da$6=6u$=UJKm3?=oPg}TE?mT09C)y0{cvLFgYkAT6 zvN>-CC4v=X|7{dsPn0i84^aENUvRVq8#bib%OpZr;X6Mk491RK(Ppo8(J~zRrEv7gh%86y0Bx*^g6Ef@@(>6@}$>guMj9V|iv6sUYCo39jIz07a z*HLa_ofSdsjb;{T*Ojq~dfK!w(or|?)LN)@3&FAB;eC@#`}lytu160B%m?Lhb98wb z<=2n1V+XiZ4ykL;FJ>fAE|^)G9GOmyNYgzz5^VF)m4GK5iPZHpOHiL|^Z3nHGelCg zh2N;JuoBbp=>=oX%B&Z0xV4_{tCeD3AqAbkL`42$V&gygDJlY0Hbcw&u0w8{`cLjX z-aUc&YpyZQ204m@id!1V+*`z_cYmbU>g_M9qZu7{T6oC;fIrQF*w@oFcQLDnkU z`~19$)k<^e=)*x>UXs9Zw-|%6piHf=Gaj7dW{Xd)wp-X*nD2_wHS1Jlv^eBtUUs5tQzgDFCFWLaw99lHl7MQhXvOVZZ$z;dw@}d7V^-M9+ zyUAvLn#Tr>q@jwn%hv2wvX*Jteb6bcCbK}L1Oa{^wAlqmagM7aX|gL8#W#* zur1xwb~Ne9w9kv`Zi{GArK;~I@g;(4j!g-Z*HaVsDy&V_^f&9(FWmMW7SHi~exkp5 z$5BOW@r~gXA5R^>_~To5Wi1M8+@SZIpmffaKHaxt-ciOoz0xD{U)uKD&*bJ`?9U7= zb9}=(}QB7OHixRorGyd`v7Xr?P zRiKjOD-ArY3uev!g)s(6ITTp_E*q=XW@ks4i_jAiJG&NSMMAU}sg_A9!sYfUc zX#EBW=paV8rG{u-^%kt;Fp6Ra_-dykjoCgwQm~|PT76|QL*sc0_1)&l7l!yl#BPu> zC6xae5*r}0*P7rymWM=%5wG;L9A9Bj9ilYs6O0nb2W?zN@9k*omUHCSqhB*}kG`X7 ze1R_0FVM*3?Jq*@9-u5gqtb)kRUL?}aGvWNlI{eF)xHGPX{JTuKvupMA8@ehios~G z`?T#Mah5-Cq7kE4v!5?QKemt_Q!K64;&YQsW2VZ-9R>JumwNPE9bi2|B#7lCtk|2u z*)1>y6wYI?tvAz-LR8f+-u9?GlXTs_Dy?D{00)UVVLgiR!DN^vTR_axjRtjN-AQ2pB70+#p56;$ZxqRr%;aXSFmNH5A>YGkVJ9T z;u!f5Vaw$muL5OW^l=ZA>Hyk7I7>wvfj4FszXeq3xf;4TP^mmc)9$6sqm?W3*2+-FADJJo8C>B2;l6!o@WMxo`>Lmqb)B%`yqi>woK-}XJj zXbB8^)i;_X9sQ6CI0iXc_r|0xw-$r4FlgNoV~3;qBkpRwWv6?*E*JBtJ~C;6*=lBG z%aTlIQHh&f(FRbZO4gTJZ+#CwrK5tnwqP^Eeo)0 zYGQY>$kI`Ih|S^*dkqc;^ptNT`1pSB$?$r0eo63Bjg|6YPw!8(1jcP`-RdTIkn^9< z4OJl*Dg5U}`lFlrZx~LtFu^pQH1znN3jzbP<4tby;L*PfuM^C6j`_wPmCO8N;KtK5 z+HeZ{$3=QP<3lCrD&MO2v~1SU`!hEbIX>|IX~gvlM>l<+y+|eQxWBnb-=^$fXA8Gu z->jT12z*eM!WSiL?#&u{9#^;iKHl^feCYo!Lr>?(9J6Je38_BZ&cygmVP{gp9ZVPZ zV3Eh1WW(y$E)3nveOUqRUw%4P4DhiOAJf^EK%V6uGg{qqoGTfMZ7(Pmv& zvdXrktvUR|4%ONij8cl12nV8mD&Tml_hw*r62QBTM?H?1JI8*_Ki1i)`!hWeW5IKz zS2{pSXgNByXpIt;dZ>5Kfe?q^n0L3jgA}COi>4-Qt?2Fy<(DUFI0&4q`>rvnepBjW ztTC(E{iSO1EV=Z@Z^4J2b^nIW@csFf7y?%j_-1kP`qz)&p!d8rcZgB^!u6Q^Eh3u1 z_f=H*f%&MTeX-%Kxp-`zE>Brk7(w{P!Fal-2WZq#C{yr|lpy9b6w%Q7yBp*+ zbCYiAaDYAm7EpmI@yLRPSyQKFo|=3Vr#b=|{vE}4ag>XB2}`5>RbrAH=8=XYyNhE6 zA8pqmE#%9OXWOHl>`_3Uj+Ozy0YJMLiy$<`Jo1TpuZU~ZR7Kh3^BdKWAwzR#$p=r> z{{SeB%Y$MS+=M!I8h>y=1)D%GTH~r+FJvPkt99JicJa{w6vy56ZJ2{U;9yTq%SK}j z+2E?6E>eys9@mP{?5Tjt5dWjT5emQYXFx_(bdQ%VJI*P{pLBXNGWN?}gMJVa+pU`f zU9Qip*Fn}#E2>~!>9Kucwi`JH!E74T- zVdOGG43$+7hlto3^u7?{2P57YG{n*C3W>v(qWKbhl7{RqW_)oh;3L{;InY8NIEd9G z5YBQS2G4{@=ew$L`kz}}o0Vb3A(0kZrUh|B0yKxH^|E_KDa!#qew+=W1RLTbt*cae zsDNS5CK?2$n+Z5Hb(6pf zhZXmjweiy3*tyMyd7nffa=3G1us0ruL$j0oykDN~S9`1ujgofLg}=i{p|&bSvrSS) z(xxFPaGISK!tkMeKzvD!IE@mzF}7-bIk{As266^FY@is1Fdr|aS9P()_;oy8tsDqm zzBeQmx%rhlG@X_QIwwX(`w3dIcW-k8S|WIP6z9T{Jw{N=%mpfnu$^t)N<}cU_DOCo zxRv_mqIYAdX!k%J9E5N#XSx*5XmIzmpwh!%>8U#!3N^%|U{fwe1uc|`kxyY=iS|>< zr^;BTae%Kr84CTJgsA%U>64GXVz4xmF}mRWYu!Y&qj1n9M<;zXeoCrp&Z#Fsn>hi2 zHAaZHr%r(6v*)v_k!;c;7b~(ukeZGhjhd#xChn#u5Ti((?1u&xq^{*$MrX;sxEJ`) z(g3Y&_g+h<(e&`P>hLJ)7#z78*e(2~N$20z>(wFobGtR24LRTEuTIw6XR$e|A}P!5 zlPiO^9G(9SKm#Ga%_PowfHH)w&?%{#ud(ig{Y;>1z?@|b`EvIjJE7<8X+LW1_gE@S zltI0*#@usl@8}JWyU*+jeVZQNLYF~}#ix0JfU~o(O0s|{ER#mN&*>9`Jj6grlHhdT3q6q8T1H_vXd2-6-(BR4 z(s<`b>cKnD9TwK_E(jn+(PJ%qH-VcPy74mFE>;7Hn8h{UzdsbrRY{yAVmIzRpTB9`Lqb`jIT_lSru(;N)b+S#qsrWcQy z9GTL+-x4B=p>vdmpbD+lT#t41OLNLuC~5!7j8rh1p_%{1`czWPoF|F)JwYY>jt`Hm zg+0|;<&yU}FJZo)7OJxHqhGEL$2=7C$!pWr_2BJKRt=NjCzS{p3P+}DEx9d}-wAy$ zN2^z$ryX51_%Rt1WM>{Wv=&mBBAe6H857_#j$Y-L7<5IfvB2icC%SGwDh$B1SaT^#u(yp;M*I&PrKZ$8r_|P!g1V#R;Z>Nx@bhHZ zImLTI+w>TNuNPg8vs_&P_!O);Js#&5U_+Qll-Eg`*J7dSmn7$Bu|1NmT52BVopgg@ zUI3eqM$lhpEZ^a6`Nq%FAD^EnqHv%7E%TBC@BuB9=yyHr*W-{8EAvO%>+eJ5h9L}8 z8ud@Myt3&$r3p7TyHn^5{6DBXa|itHLNRu+H0-gbD-n8SzKeYLO)jnI=V9Gx#nv&W$I zS@SHF=l^=P{QpF+ynAZTisI6@XI+--pE_sPaaS439`1uxYm^EM_Fx;g@wCphxWINc zvZaM*p}V!n660%uXd6vtfN8y{!~?Y3(`f3Pf_KEY1MYW5$(Lo9dE{TkysmkT@M*<{ z?e^Dfu*Vp&59x(&lE7_h4iC!;UkI?u_w5AUB^f-{!Mbe5_MVj2-WQFw3#$U;LRdRJ zlOJ!?vnv`}Ug%Ml(W_GMxomLwzT~m-$b*U2;E{(@y;nyb%}frCR3ago+3NXjNfrKI zXUp&2czrZ<6BE>}oa<$GV&M3~f(>&DTpys@#%|32Uc5;}9a*kE{AAQg*sf~hjvmoO zr$!Avd{!N>DD!WPoPQ!Lu0#8xzWqf4Zu(cghlq$M>;eVUrfU8EUw-8=B}0uS;aI`b ztl7cOB7swC?85zJ1-}B0))q`}9k&n1j%tMUf=^0!GE!ahk1!2OogPM+^A^0{2^Flq zp=irlWZ~80ja$&XYvFh0&l}5dZfscS)pzWw^U1v{WWV}8Ifgf!s8iEf)L8A-D5j;f z#9(SoHRpyAeL8U_D@@`v{x)0UFjj%^t}P_a{dcVgiblo9a7Y1@e9glQ5sZ%!$IirG_2L1URr7AS`=;D;+IF=244 z1(MuuiLdctp%=k^Nm!*RGRmQ!B1EUOsK&&{e*35syGY9gmLI3a0#TZaA#YTB2!uk( zMkN3!r3V-SN&}hs(GRXIq@unjPm$3|(=OB<+HGTUmV=pZ+A~43$W8Iq1bk1D93EP0 z2I_KZmQd_q}D6BR36q&^nhPEgiG&w6(zu+ zdla5|4BYyjaOLe0iK{V||8>XK365RoI+py$N1%&3NO5bbvD zb{c(@YR3RGMYl;760yhV=OT3*M6~dum@|QL(jB`!vg;u<7SZCJJiC^udV-t}Cef4q+^Z zz=nss1bIk{A(|>nC4uJo8@>FsCMSq>1&$oQ(q z^IP>5_LG*9(k>y6MARe2r#mlBpRYa5U29r19-+#2k$?FNXQ_--D4I)AFlC&sE7Tb8 za|d+C@o3JXzBLDN)^3pC5(X%(9zr<<&8Ghj(^bbg|G%qbh$ps|E<>)~loG z1OOb9Ax+h*U*Ig!5ZXgn8o3-~9CI(wQ^)tkE}%%rsC#1>8DV_c`?1v#fmM_?!|i&y zk~H72rE~k)LUp9#49?_hcI6PTv1+%lGrF3oK@P^fe#_fRbkIYb1oQi^pN3gu(l%IY zqW3bSY2FuDq~j~w3}H%Y_&F-NWfr*^690K2!sO&$3h#}4(2=cpejsp1=+E*0(cPKH zL;df4|2vymFbj%g8Dy7+EJKzV+o;5Zl%kY1R3oybnn8?xL|LN7k|arKk+!jfM21vI zHI|Bmu~b@q=Kj`ixz0J)@0{zL`#O*NzOVc5JbFC*^Zj_g-mmxbnP#=2+d31!N@c3~ zy04LM3?v9{k*T`g4i1*a1Weeg&v2wqnPPu?>->Jfm5tm7xhS{*3Ga7|_Im>}96E*H z?`fZ8r>4|RbXb?G(P`q|ldt1{!O*3jr%zX1`8u@4dMG=yXpe35K0&~;Mo-`t1ElIP z$JV?8<3@|i6=24wZCJknYhhgF_8@TLR1W9S*hsV+ggND4kQFKcQxrS$ban4+ro7}D zXg6C-LDH^gkRDib=x#_uCZWsboL_7VdV;vQ!hS>hsMwFcz{SSfmx=HIk}3}9^%5YE zAr&QiN2V`K&?U{ARg|{l6_WamAaUW$1U3>Y1y7A~1*@!FpH#6^$MH}iK$(JC4Eoqq z%YVEvtEIB__3)P3nlb{GK|~B!Zn{elD9`W{Axqf+oSPs6$#F~vB z6zu~zp=p{!rAqJp176;Vy%I@|2l_jkdd9|fySg7y&Gl+8E=PpAFVG>Hx$3bQje;@z@mKrQcJ zCuAEhVc$&RsDS{Ib!Z5(%P)v#QA^156-;8M1fR8B(@{~v9m|fF+pT-mxYNZTTxOOV z(wOiibIVacapWsfdu$dvsXpjpBMPD|moM>*&~QEk4&+rKBxJ27aeU0_4S+ga}+E4Fm-QuV_eM#?yipAS*pAwkZ@a-sWnRs3?)a;c? z*5)BLIB|*%G#Q-a-uE3(>u56XC478Q?<Qnwzg?;FlpqKilCBnfzMsrhsRPzO@4I{I+O_Ymj~x5%^^!PuCY6U#6=jtP z(&p6nD+M0CEMKyC@zeEH_vS$G9Xpl-^A<~GN{>-HRxWD75>(40h5oZ&)C6M24cYWP zXsHZ4rf5EJHv}aQVe8`OWE<= zRo}!qI;Q>0yXY7GEWE4S66mp|C=$a~+y@ znFtg8RTBX9SCh{yF*#N4PoP_W9*b*spv2?;h{bih2j@$hC7^4}SEEi65f$5ha^JyN z+#6kPN95@B*9E0{Z2B3CD_OhWSw(^l-H!HmH6*M5WEzMoIm!9oZUWC~UwNvPC9n>P zE_pk}2s1jsJC}Rt_pi+#iatpdHT>|AR=VAnY%ti-2(S7Xi+}2sCcsc}pAS^Q>TVq;B5c^cKt!(32=Ob$^PCvgG^7`@J+b{IwO4LMNRQzY_xkGm^?&`kx z%Z}TKl-p-_oiO@;n`v;X+#N~~%Gt~fcouFu>iXQl-KsFT#QPH>RX#+_Tb*u!m<=c^ zN%fE?91xGg%b4@L?Xm$_r*Ley?uc|)= zm_EMSCohwG#OBqp1501V&tG2p`r+ox(zkyBx|Pm_YdSV!whMo9b4=SCv47u3`X}eK zwd6P7hKoeceDM8f@SL`2{S}}@Cy}V1a`XgJW-7ugflR|U6znOIdvlS6npF1e+jCA| z&B1K_jIx)V%ZiCHg)^6W-1%nIJFogH+Kn`}kD*l++r};@jE7}0CCZ7&_3bwDa~!r) zxvK2Lc88zb_kWUU@P0Z=X5xu-%$~Ck9j^o_Ffe-dCL8rP_^l4K+weufU}GU7oVY)I zg||+fW!i^1C$?&8{SripWfAVjlykf@R#$yIu|v1f>unjz{u(u6wQQLu2C9{vh5`h# zm2QRRV)2hJ5UvoCk{ z_A#%wVjpv9)0Gvc9m?&EYSFo4p;D0N3W?-p{wod;i5&e1cZFlw$lFtWsS_#lyZuTye+oDjW+(zwMa&!JcW+ z;`s>eL2*dj_itPf)o2B`&{)!A9+9Hof%eo}DVt6}akQ?l9zxv{@V z{!{J5Bi%io*G^4oM_$p4f9a{BTUH*Qbd}V>^f!1kFz-(+M+^i8ZZXoTSzew}w&~L7 zQ58J3^3(OZWlY-ckQ^rDzl~;q0fzuLnCze4O#XF>)_$domV&kV1K{}QZ^>^Aw$#mk z+)VyR(M~s|{qDAY+3|EFx#lOp5v+~=hnv>-xq2t?!)*<&SeKoi@+tcb;7HnN`TL5M zg~doX2~}268?x5^V6cU|Vxcc$ls5feUa<}*{&a}Q1^n)&B_{n1AE&=}D{S|t?ydR$ zKq1BOapkDBSp&FY6@kww#}X4UYK=&YSjBNNF~h4P_wL~;jx!z zQo+b6SjJp8L2|YB+d^Uh!-jeS?N>m_yff-p0PUIYRuBDz9$AYyLIj{FRNr&!eu)T8 zYVg;I@uWEkGrX$B(kSnAp!}+_vZ9@G6ENNSwRkHug2AsSu8`kdihL74VO`oJda~hM z)iZOIDiYS{IG(I}ZLjR9L7nPEQ&#~#yIJG++Gy=wF63IDgVw(5{Q(!JZVW|EPTdqF z=}y;=oeG)0HC_R_Y5#jE+P`_l8a4Itxli)H-c{19SsY0>on9Q}(Dc8I<^D@(hCgbf z?_KGuge|fz%c@$V4y)yQu6kX4rZxC|+^ulunIJNL%K^(A2lZ8tKM9i0KEWW=3mc{9 z+1;UHEAlV=8{ZPXN&I3m(dp2yZtJ*yX}r`0x#i;9WhHvw9xcX^XR}mw4Nh4fbcJe=ty!f0X~g zpvJ%|Sg##P=2x1Y(9DFZxWnHE(S*#349l-fwWRFhmY-%VmZPWgDvRFa+^7Ru;lWMATsgwG8(i01}X^kUm3-_D@ zj}Hv*z*cIqDK(*A6csC6^CE@;#C~Q*P(&lAWoT6v$CD%9M=X3Z;g_X_w+zjs<>{A{ znI3`J=S74UHE3&Y^k8g?D5VwXa*{1PC@j~8oOK=5RLc)xqJ-F2ITpfd5UjAb`&fV` z_mdAqTN0y)jq}Nb#f;moJWfBpJh!CT(+`3dbRW|rLQzD(XxVz!vJX#6<@Pzl&rnhv zP*g_+ezOcvlA<5c=csNuVdJ(z!Tc3FkI8N`AMDB0BZK)(H1fx|Wu|blTw~L?bBsY7#4ckaJa$>)5+G zb2BH^+4$0(VIFjlh^1HV$zmL#sVNA69o*5HG7=BzEYzuczfj6N*`->?#45=6WXdcu zi{TXJF1Byhl-z?r+{o6>027mV#TN53^h*1q;F8R*-qvC}iz!_md0lMpRLGIvkZ(bD zlBxoD7;jXuO-|i)4P@xvTlcCOMg34K3P8D|CSNY&N=OBs9INJDeP%hi>-z1L8@RqQ zhvAY=Y3K06S8tk<#1B*Ug#73VZLLYDa3P9S{sg_*i)*EWKrF!z67f<@ z)P;t`{w3obKN&;F2X8j9v6U|p&l;qG57|N&OU4OY2btG2KLZ&<;Qhn@faCwsEQ*-+ zKkN$q|5v)jjZ=IfvJz^}VzSSjn{GBX*Y2ye&@JMp6<$fwco`m<_)~E*G2dAeM>WBr6|YTrt-2? z_VB5+nhg`h8UHS4#cl^ubY#QZbZrunH0FDv0C@K&vaK_AXw9Z zYB8inRVVo8l#gTZ zGYXp0=W|r4yhR?^jH9E(kO_!&78?vs>g(*u+~Ycp;A&;0(#kqdk=>#6d__jfx;; zvjq#*?gJ8T1SwT0p8AKdbYq`Ur7{$c^Z}B#u6$InpW-{!CkHWUm{nG$x0x@gFunSG zB?*$@^II_hmzi;Wm3 zEdLmk0J9rFxvDayb~@3_Tf)+yAj&36i>LTuw{6%RUj(W?(c)Rp#*xjn@h4?8G@cA+ zu9V@fke7HPy+&oZ=3s&pq@O39N}ptI?uQWo2HYw%Uoyr*1-QwknRl2`8%yS8KQcy< zs~zVZ+ZB-o@F4&8@1L}nT~QjoU@Gu}?`I@R!@0}64~@TLo;-VlgL z<>Y<&14nBCohZ#vJ^9p+jj9<5242mDM~(@vayJ!I5-?jF zS@GQx*2y~Fg>?=Yg^pz!{^0nt9xLO~o^x|V8F^td;b<);S92gOKyr`w?(SETmwmF8 z#%cwxQ&@XvgTD(^W(E=nsxk}f@&dclj-X{fe9H>6N5(%5xU}{1kJKhVbQR9qV9u(^-DPtZg05qi+fhh6_T;JEoprAH@bPM2BUb-uT4ed` zedB%d$Jl)Td`@e!nb5L`6Mx3HCn-bpu_Mpt^%S%_MA_w4*z8A%FAclS76-<#2A^~_ zQhd|xS|@$qFafx*`c%FW zim(&*Ld9h$5U76aHVTA-@so!fT73=3*cmhKn=hg)hk&SB3R5X_6}l|lDD!Pu8i5nm zxn_Z5!;BBM=Z$4%nDHGgyA@y^UIagai&1%dcxPtt)s)wTn?EH|;F;qrWkiF)3aVtT z=M6E0PXP_k68eyzDI~B(YMI0g&Sro(!(p-2j6aZdz}0BjO6oQUmu8^}fS)g!YATv2 zXCl@v&GwqJgDZC$B&*pIq3p|Kvdu%8_{|it6(LMRq-}VRVLQ6toMEe0*ty%i&M#u~ zfz6_8EVj}v3HZfoX5aQcB(pwE1%X(_d8}2eEz>BSYowZ(O91#7a|oQyqblPm9p+d# zA7w7Ap*k%8hOv7Iu6R}Cni7U)?R;fqAnk-q^3^8fxmaVIUS-np=g?-h?YMRGoq4Rq zB<~3Z!fZ)!CeJ0-*}%!%QS7rq$G!X(c0aURJ%ecItvkn+b}=zd|N7W`jBNS9daArG z(|p^ch{}_mtX6ZjV8b1pEXF`6xYqd7pT-go|Q3vQ3^TI+4DS>~-wWVC_&Ymzk) zl0Nqs3DD;r?-)vXO&lT^zsyd6(Le|@o{#uS@IeUWl3=N%EDy3kF`jR`4g{dN0zPu% z3}$OzNqNPxMo1TfHkE6a6vIf8_*icTz{Ar0GJf;|nP_QFn&hBAgu9J)Lex5UPns8k z>B@%!&q!J8^aSdYMEe%06PW?uN5$Tg9U!5@nUQyF6{qD|y|jq%Uo33*iP${sVe+XR zbX5RLM;a*5l|qdYGR5gp8mE7SH%r^L9RXp;o-N0wXz!3^4@ha*$vEXBSt!;u7zL;xse!7{^xz-tGWG zIM_Sd%TS0QF%!5tDi?9y043KVQs7sgwik$($sr5`VTn*?;QEd`8{3UkS`1fT&EA>9 zBWO`mJz9M6C}yclNL#h^o#vCMdW4yL9XKo`z&dj9Sc-}$^TWA`LPWd9jh|cF(G~l2 z_@9<@k-_FZh?#mji>2{p8~S0z$BQsXRbhG%KCMh?%PY0v*~H{~WdwMTyh+RMwh+Wa z-J9`kW8(*rCt?wbH???@8U@U$BCaB{-qt)cWn<_y*vjGBQAsIc=R)p&19AG>9aebe zCSx0=5n>Yg)#k1WbAT|z*MP>$-Oh;Y(-&5GnP>J$#Gq6mYc{u;@A`_v)YlQtUOB%N z1*C1)_bWSB(xQ=&l3~R9sNchGTg&dj>INr6Bh!J;^4ne68*)n(n8%6ez3r``VT%5k zKvq3mr=u|UL^Fy8k*Kw7P?g4)eBZrtWHwP*bj~+;ojqJ-@~fLAzxPx*ib+l)Fya0F zFf6&1v`osH+5$s*K&{QW=X0PynS>90PmXwA%)k{C`0? z25-1fr#!A?`HT)IsZ1%>6+xc{eWOQ{OL+^F7DCb$EKr4jJYF52tHnr&G8#n}a@!Om z%-7r8v%5!C)G>F5FdYXZR*r4kX_90tx9p*>oD0$3iky>iXiPkWM&Xg(8nE>_Wv2#I zo7Yg{k|*_iI-~drY#J3uRZtPhevH;lY0Azt3LMVY|E9R2_C@NM{XLh(s{)J80=G1S z3Lfpm4oknCO;cT%%_Q@i722~iOokfQ%~gIF&~6SaF8e42g%Z@PJr*=Ec6RfDck% zX#_uo?WLkEM&$gah+IEn_2_qXuiFJLta*oO+KOnJ^^J>$K1KB8dF2Gbsw@Rl&Z|3# z1vQdgKE3t3EV?zwx5c`43-54^R!_~|NJ}DnJ;^B*rfoa)R6OTVOH3cFc-*4B)P4lPQAEzPg&{yyRZ*DbF? z0;7f}A5fHy6&^~eFG{N(eG}-f%lw|))4E1_LiFGld>H22JC|eWC&G1Q*KuGJXie~&{rmq&&q_5ua+^=1<^>sgZ1L5x#QW$?J@31bsh!QKia&S*bs|dt0_$&Xb1+4vzk=0wx$D&bfbGquuCL4}5tIBW zRv(xejW^f%5gf05EH0z6E_e9n64Ex+`R8p8be)Bl53(v^g;7YNj`QCd|xRty0nMMWkkKiVRfL zGM_~*G`7|ByN~kIiWzIyJkQNl%FhUl#Pn%Id3S1}?4mt%g6TQt&s!@$AV?fF5qtyZ zjN?HOP!W@d1ueBTr!d~o(;LLP&Ea>YmCUEn-*9K0YB=~}wDt~f$)#JVUX?c{FlVk; z{+okiR!U5Tt0r-wGpAwCi$M!fWt9L557G&QA{wh!)xp9jt6Oaym`qqA=31+@{u(bA z>O}v1cOjG|b*{ERBd%E;VLZCZ1lKsU0|L)1O|`XA*2YoqN=2kMYvan>D1}B}X03A_ z^#JWQlZSIf3|eS6$z41m?SybB3`unoiE@s&M6E-2TTj=oJzqCtH#pzOH#G9^R^Sbg zY&NWSE-&g#b(;UDn7)4m$2{bk?_h9z;QMU5=H>5mou)J2=lTB*`Sia7tp2x5-zYW1 zN0PH_f}yIEdY!4dNA*^-M_F(44`z9+HZa_?=jlYb241r%C{!x1bq=vAqHyPvIA65hJ^lm;xq{B{O7EVo)p zP})qDvHK9F+tbC82fYQQs{P3*@`>cK5wq@tKxlJds2{-rV zo``%qPlr=t#b=2t)v7CALb4{DMg@> zg*q75(-w8*S|^=0x^BpQRQp3dy?4e@=gH}!h-9PUM+z**tGu{{5DWM_o5=05c?9E3 zHpi(y_I+mpv-U$5$A-4}p8Z|Lo1!n1)z;@3(gSUwSisMYiXBvg7{YW;&!^Q1yGq zq~KQWFSa)%2A}`77t~4lDR$dadIh%YZ(a)f zAB${1M}1%*_crf`*ezNvfK#J}ue94l^p&Zb!2-qH_;I3IT^wKl$EPNU7d`7%E zgMQ}ZX5p5V36aU|)*-9AC zu;!lVy+-x3>t_5a_}v_Ydc3-@+d?c`Mdy;v;C4D$Btz&w1MRVWB0YkKhi;n0uPiQ6 z)*EAcGw<2GcWU0ArpVRxOY674dh~jH{nLkiQ;{#8wdmdc-)YzXFju{aN87~J#^8VGwAz42xAyQ!zrv0$9~ zPm10C7AF(S-mA1Q{eGrvqvnTiM|||BAp2w2LE(bw*OT|^Oed^mqEqR7Q@D$AS%*}_ zQ_4|&VH#*W$GuXAZraV!*uOGj9d+l9HA=7H#`CyJC$!NWyEcIizkg&$TdoEiHJb$! zqsZK&%JkunMP~>d8&|5{r>+!J4Dt7*QHf9)$7Qy}aSp1FD2|H5BFR3w0!zj* z5mVVli`XO)(3YT4jYX!at&49TQ(og#A`$4={KL)kx1aZajBM{LU4FCufqv|4nvX=8 zT$^L7Vc~3gkW-ob^Y&IS7tLTONMT0%Im;U|{UfirDv7Sp%uD^5QJtRXEsHBYZ}!Wy zTJhxbnZCAf27xX}i3}KGZF`R1L1M-bedb!u`tDAu>ackR+%`h{%T$2NTU|%Z)2>-X z*G||^l5N5`ht|G{IAA=PlV-V{il<&HCzmreTc{d8oLYRp76)`{96BL+-awpl)~w=; z%$!8!H7RIbORvu9wd^;2y;|OQ$0eDAH*<-GX|vk}?&A&`4zpqAW9v|6*WMMxToTuR zH`Z=A3K3=PZ_rW<(9!U?dh49TV)q8vDy{7|zLZ%Q_GRHSD(y5k^rUWID3$A~bTIAc z$&6hnBT8Oyq;2RuQMgd9wdul!tsT9m8h(7ZHk&T=*_=stAv#5!8OuNh#$F zRJ+1cH2lQYdGE#YN`K3^i5!z2Sc01kvO-}mp4E#1p6H_7_8DF}7FS0^2)7dyIB9B# zNGCuMWw5kmeO~#z)IFOWR0~8GaRtb-d*kSzC*u?9CDKs~0v>&_X>9=Am=gcoi*QW~={lVzV$P3G{dX?SXZVmg| z=@SRd2J>S$1=Ea8Fn_+wR`jp{F;@W|gt3yuq>6-1RMrs4phu?6DP} z^}51c!nB`COT}ie+RA#jK1KH-o3gIAtd%-eR{G@dFQ0c8pPT63!#2d?I-2FNSJ*VC z5vkL1j%5~CQnz-FEWf?@fRuS9J@7ks+N!OUa)*==)j6tk7};V`NP5P+qlO8{Xg9BF zq+UFgt#LEc2IsGZ)T)KXwEH4t_+ec|A8P7RX4<;~9_5D6afgS9kK9M+a+a08$%yk2 zCG5)tzBdhJpUF&wuCPieMoYb6^I{U*YiO3WLvIT%+-k&>%xuEf0!Tr0ymNcgk^(2t zn&wlhP{NaJ=X;!Ba4uwNI9ATQ3$jNHD(OXQKK1DLv(s!@o?EDgixl&xU%Nt=DCTcj33`DclIb=k%3*ig&TRMwlZl2BgI9B2sN6pP zOn>^yzLhMmabK)>`?J%HoA5g}u2gxpw6p$p@zlQ0xlnt#r1IBY&-Sc54c#T@P}ev$ zvsdjK>^|FJ?RM5a!?Nv9-5wph$hwK$h6i3u*z(Rv?>LW;VQRbWWkkYm1}26EwkSkOTWCq89b^C!!X`4YP7(Mxokoo!NipU>S@lHPip) zNY?aqgdK6r)Wuv`O%PQ2Bn1l3kJk{hnn&p|fvu(oAf0DE?VWRemOx zE# zjp*Ex(!m?2oC4^YMSWXKhRtOL0qCGj0%0yJI5FRCjzfN8+ z!+<6*4o8dt_7rp;^9aG`==k}gv3Q6#h*~ zS4%Y-ohyLvD?MCTi|Ag$Nyo+VvXG*% zA7tApqWgeqgopu+l*{%502)cc`GKASDQQ9gK$xhTJP3FuTgb=Sn4RGb9RH;FhJ1;`MdfHTk^ z@p(5HXovuKPm#b1!CgWYl^pYB(MS`(W$?1J60tp`{5(OBOTCy=&&f#BlYDzLnuE>Y zON=nkZGw|sOi9yss2S3Zmp6bL9IWLbDC&n>UIMe154D~F>n$>L``UCfpp#5NIHsv zoCR2sl{>>8(k8j+aGJybFON|GDEOQ?=82Dt79aD`(j*}-*g*D?FkHN(#}^#xtA;As zsm%-nFpD$k$L0Xg@-E)v3dG`Z=g^17zQ8AZiZqF+auQ~OzDbjQ`V%|rq~_W?6Q@Dtg;l%iR_SltAd{*I#Qa|X_oY-UJQF(d|vC^)@X#02^;6vx>CwcM?7<>v=h zlsP~YfWvDQ?8D>u&)p^gj6Ci9s>0mk!2mM_xP@QcF9sac;0}-^Mk{xq zfT{-d0vC40p@}jO-#b9R1mb(4Bnc29jAc~ilPg_g2t9Zl2pFF7v6|nOg)Kkl@#OR` z(NG%iJS;mA#lgOs0K5U*J`UhDaWVtI&9_5l1ckK#?mmFx&^XaLm2ttzH-Jh(3JTZ( zKqo;e&&A*%d;{$#QMZYTF^x)%hrLe$T=1u6Jn_zS7@7{0<8hO&z?KO>{yD2zG-fhI zDL-FWHN!_O>wwgxULhA=c40<(p)nz}^JvD^3_RdTNxxTBiU2zYh!xvYKPhb8vc-eaUj_@exe0!<0HfqDyOAit+`=J4VUX16|Owz9_Fi-CYjzQr; z-Fa4IIrb7Bh&_*i3lVc9;4=9Q z)PWQl39~>#!UZ=ftEyxMG4FwEQIvdatr$`cmq$XziE`>fo=e<&DlyNL90RW3*Fk~g zU>BjpM}Rd!#JYi>Kt@?>+MP?f@o6vWvvhH_bT}8lh2w!-5-x*-=qKXdEZ$laE4c%xBJEW0YbM88NAa=jLcBB^t1*C`25_+gK!$`E z6JqB`7z6`)AHdDvan0Ofgp zLiyiu&n2jWp~^=^mAO};o<7|U?_wV&bib^=Lgtx%`%Vo`gT?JImSMWB;GIkvcf*VA z{^fkcnwQDar{PEUxoYA~Y^sgdj5pzbNq%6vY`24bl+iB~`_+ATxXn(txSbXCvMj%w7<;4z&KvE!b!^jjN_sA1wT+(-Di;m zKoQdm=_yltNAwyn;y>we|1~P>*Ek-$3Hlkw{~uva zW-GhgyYtg?3f=_W{r1zP{U@H&;4Zt=3.4.0 <4.0.0" - flutter: ">=3.22.0" + sdk: ">=3.0.0 <4.0.0" + flutter: ">=3.19.0" dependencies: async: ^2.11.0 beautiful_soup_dart: ^0.3.0 characters: ^1.3.0 - collection: ^1.18.0 - cross_file: ^0.3.4+2 + collection: '>=1.17.0 <1.19.0' + cross_file: ^0.3.4+1 crypto: ^3.0.3 csv: ^6.0.0 equatable: ^2.0.5 - fetch_client: ^1.1.2 - firebase_app_check: ^0.3.0 - firebase_auth: ^5.1.0 - firebase_core: ^3.3.0 - firebase_vertexai: ^0.2.2 - flat_buffers: ^23.5.26 - flutter_bloc: ^8.1.6 - flutter_markdown: ^0.7.3 - freezed_annotation: ^2.4.2 - gcloud: ^0.8.13 - google_generative_ai: 0.4.4 - googleapis: ^13.0.0 - googleapis_auth: ^1.6.0 - http: ^1.2.2 + fetch_client: ^1.0.2 + firebase_app_check: ^0.2.2+5 + firebase_core: ^2.31.0 + firebase_vertexai: ^0.1.0 + flutter_bloc: ^8.1.5 + flutter_markdown: ^0.6.22 + freezed_annotation: ^2.4.1 + gcloud: ^0.8.12 + google_generative_ai: 0.4.0 + googleapis: ^12.0.0 + googleapis_auth: ^1.5.1 + http: ^1.1.0 js: ^0.7.1 - json_annotation: ^4.9.0 - json_path: ^0.7.4 + json_annotation: ^4.8.1 + json_path: ^0.7.1 langchain_tiktoken: ^1.0.1 - math_expressions: ^2.6.0 + math_expressions: ^2.4.0 meta: ^1.11.0 - objectbox: ^4.0.1 pinecone: ^0.7.2 - rxdart: ">=0.27.7 <0.29.0" - shared_preferences: ^2.3.0 - shelf: ^1.4.2 + shared_preferences: ^2.2.2 + shelf: ^1.4.1 shelf_router: ^1.1.4 - supabase: ^2.2.7 - uuid: ^4.4.2 + supabase: ^2.0.8 + uuid: ^4.3.3 dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 - objectbox_generator: ^4.0.1 + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 + ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + test: ^1.25.2 scripts: lint: diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md deleted file mode 100644 index c9710913..00000000 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ /dev/null @@ -1,18 +0,0 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.0 - - - **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) - - **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) - - **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) - - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 0.0.1 - - - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) - -## 0.0.1-dev.1 - - - Bootstrap package. diff --git a/packages/anthropic_sdk_dart/LICENSE b/packages/anthropic_sdk_dart/LICENSE deleted file mode 100644 index f407ffdd..00000000 --- a/packages/anthropic_sdk_dart/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 David Miguel Lozano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md deleted file mode 100644 index dc51d776..00000000 --- a/packages/anthropic_sdk_dart/README.md +++ /dev/null @@ -1,304 +0,0 @@ -# Anthropic Dart Client - -[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) -[![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) -[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) -[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) - -Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (aka Claude API). - -## Features - -- Fully type-safe, [documented](https://pub.dev/documentation/anthropic_sdk_dart/latest) and tested -- All platforms supported (including streaming on web) -- Custom base URL, headers and query params support (e.g. HTTP proxies) -- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - -**Supported endpoints:** - -- Messages (with tools and streaming support) - -## Table of contents - -- [Usage](#usage) - * [Authentication](#authentication) - * [Messages](#messages) - * [Tool use](#tool-use) -- [Advance Usage](#advance-usage) - * [Default HTTP client](#default-http-client) - * [Custom HTTP client](#custom-http-client) - * [Using a proxy](#using-a-proxy) - + [HTTP proxy](#http-proxy) - + [SOCKS5 proxy](#socks5-proxy) -- [Acknowledgements](#acknowledgements) -- [License](#license) - -## Usage - -Refer to the [documentation](https://docs.anthropic.com) for more information about the API. - -### Authentication - -The Anthropic API uses API keys for authentication. Visit the [Anthropic console](https://console.anthropic.com/settings/keys) to retrieve the API key you'll use in your requests. - -> **Remember that your API key is a secret!** -> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; -final client = AnthropicClient(apiKey: apiKey); -``` - -### Messages - -Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. - -**Create a Message:** - -```dart -final res = await client.createMessage( - request: CreateMessageRequest( - model: Model.model(Models.claude35Sonnet20240620), - maxTokens: 1024, - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text('Hello, Claude'), - ), - ], - ), -); -print(res.content.text); -// Hello! It's nice to meet you. How are you doing today? -``` - -`Model` is a sealed class that offers two ways to specify the model: -- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-instant-1.2'`). -- `Model.model(Models.claude35Sonnet20240620)`: a value from `Models` enum which lists all the available models. - -Mind that this list may not be up-to-date. Refer to the [documentation](https://docs.anthropic.com/en/docs/models-overview) for the updated list. - -**Streaming messages:** - -```dart -final stream = client.createMessageStream( - request: CreateMessageRequest( - model: Model.model(Models.claude35Sonnet20240620), - maxTokens: 1024, - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text('Hello, Claude'), - ), - ], - ), -); -await for (final res in stream) { - res.map( - messageStart: (MessageStartEvent e) {}, - messageDelta: (MessageDeltaEvent e) {}, - messageStop: (MessageStopEvent e) {}, - contentBlockStart: (ContentBlockStartEvent e) {}, - contentBlockDelta: (ContentBlockDeltaEvent e) { - stdout.write(e.delta.text); - }, - contentBlockStop: (ContentBlockStopEvent e) {}, - ping: (PingEvent e) {}, - ); -} -// Hello! It's nice to meet you. How are you doing today? -``` - -### Tool use - -Claude is capable of interacting with external client-side tools and functions, allowing you to equip Claude with your own custom tools to perform a wider variety of tasks. - -Refer to the [official documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information. - -In the following example, we want the model to be able to use our function that return the current weather in a given city: - -```dart -Map _getCurrentWeather( - final String location, - final String unit, -) { - const temperature = 22; - const weather = 'Sunny'; - return { - 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, - 'unit': unit, - 'description': weather, - }; -} -``` - -To do that, we need to provide the definition of the tool: -```dart -const tool = Tool( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, -); -``` - -Then we can use the tool in the message request: -```dart -final request1 = CreateMessageRequest( - model: Model.model(Models.claude35Sonnet20240620), - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now?', - ), - ), - ], - tools: [tool], - toolChoice: ToolChoice( - type: ToolChoiceType.tool, - name: tool.name, - ), - maxTokens: 1024, -); -final aiMessage1 = await client.createMessage(request: request1); - -final toolUse = aiMessage1.content.blocks.firstOrNull; -if (toolUse == null || toolUse is! ToolUseBlock) { - return; -} - -// Call your tool here with the given input -final toolResult = _getCurrentWeather( - toolUse.input['location'], - toolUse.input['unit'], -); - -final request2 = CreateMessageRequest( - model: Model.model(Models.claude35Sonnet20240620), - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now in Fahrenheit?', - ), - ), - Message( - role: MessageRole.assistant, - content: aiMessage1.content, - ), - Message( - role: MessageRole.user, - content: MessageContent.blocks([ - Block.toolResult( - toolUseId: toolUse.id, - content: ToolResultBlockContent.text(json.encode(toolResult)), - ), - ]), - ), - ], - tools: [tool], - maxTokens: 1024, -); -final aiMessage2 = await client.createMessage(request: request2); - -print(aiMessage2.content.text); -// Based on the current weather information for Boston, here's what it's like right now: -// -// The temperature in Boston is 71.6°F (Fahrenheit). -// The weather conditions are described as sunny. -``` - -You can also stream the input for a tool: - -```dart -final stream = client.createMessageStream(request: request); -await for (final res in stream) { - res.map( - messageStart: (MessageStartEvent v) {}, - messageDelta: (MessageDeltaEvent v) {}, - messageStop: (MessageStopEvent v) {}, - contentBlockStart: (ContentBlockStartEvent v) {}, - contentBlockDelta: (ContentBlockDeltaEvent v) { - stdout.write(v.delta.inputJson); - }, - contentBlockStop: (ContentBlockStopEvent v) {}, - ping: (PingEvent v) {}, - ); -} -// {"location": "Boston, MA", "unit": "fahrenheit"} -``` - -## Advance Usage - -### Default HTTP client - -By default, the client uses `https://api.anthropic.com/v1` as the `baseUrl` and the following implementations of `http.Client`: - -- Non-web: [`IOClient`](https://pub.dev/documentation/http/latest/io_client/IOClient-class.html) -- Web: [`FetchClient`](https://pub.dev/documentation/fetch_client/latest/fetch_client/FetchClient-class.html) (to support streaming on web) - -### Custom HTTP client - -You can always provide your own implementation of `http.Client` for further customization: - -```dart -final client = AnthropicClient( - apiKey: 'MISTRAL_API_KEY', - client: MyHttpClient(), -); -``` - -### Using a proxy - -#### HTTP proxy - -You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: - -```dart -final client = AnthropicClient( - baseUrl: 'https://my-proxy.com', - headers: { - 'x-my-proxy-header': 'value', - }, -); -``` - -If you need further customization, you can always provide your own `http.Client`. - -#### SOCKS5 proxy - -To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: - -```dart -final baseHttpClient = HttpClient(); -SocksTCPClient.assignToHttpClient(baseHttpClient, [ - ProxySettings(InternetAddress.loopbackIPv4, 1080), -]); -final httpClient = IOClient(baseClient); - -final client = AnthropicClient( - client: httpClient, -); -``` - -## Acknowledgements - -The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. - -## License - -Anthropic Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/anthropic_sdk_dart/analysis_options.yaml b/packages/anthropic_sdk_dart/analysis_options.yaml deleted file mode 100644 index f04c6cf0..00000000 --- a/packages/anthropic_sdk_dart/analysis_options.yaml +++ /dev/null @@ -1 +0,0 @@ -include: ../../analysis_options.yaml diff --git a/packages/anthropic_sdk_dart/build.yaml b/packages/anthropic_sdk_dart/build.yaml deleted file mode 100644 index dee719ac..00000000 --- a/packages/anthropic_sdk_dart/build.yaml +++ /dev/null @@ -1,13 +0,0 @@ -targets: - $default: - builders: - source_gen|combining_builder: - options: - ignore_for_file: - - prefer_final_parameters - - require_trailing_commas - - non_constant_identifier_names - - unnecessary_null_checks - json_serializable: - options: - explicit_to_json: true diff --git a/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart deleted file mode 100644 index 0a576196..00000000 --- a/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart +++ /dev/null @@ -1,200 +0,0 @@ -// ignore_for_file: avoid_print -import 'dart:async'; -import 'dart:convert'; -import 'dart:io'; - -import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; - -Future main() async { - final client = AnthropicClient( - apiKey: Platform.environment['ANTHROPIC_API_KEY'], - ); - - await _createMessage(client); - await _createMessageStream(client); - await _toolUse(client); - await _toolUseStreaming(client); - - client.endSession(); -} - -Future _createMessage(final AnthropicClient client) async { - final res = await client.createMessage( - request: const CreateMessageRequest( - model: Model.model(Models.claude35Sonnet20240620), - maxTokens: 1024, - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text('Hello, Claude'), - ), - ], - ), - ); - print(res.content.text); - // Hello! It's nice to meet you. How are you doing today? -} - -Future _createMessageStream(final AnthropicClient client) async { - final stream = client.createMessageStream( - request: const CreateMessageRequest( - model: Model.model(Models.claude35Sonnet20240620), - maxTokens: 1024, - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text('Hello, Claude'), - ), - ], - ), - ); - await for (final res in stream) { - res.map( - messageStart: (MessageStartEvent e) {}, - messageDelta: (MessageDeltaEvent e) {}, - messageStop: (MessageStopEvent e) {}, - contentBlockStart: (ContentBlockStartEvent e) {}, - contentBlockDelta: (ContentBlockDeltaEvent e) { - stdout.write(e.delta.text); - }, - contentBlockStop: (ContentBlockStopEvent e) {}, - ping: (PingEvent e) {}, - ); - } - // Hello! It's nice to meet you. How are you doing today? -} - -Future _toolUse(final AnthropicClient client) async { - final request1 = CreateMessageRequest( - model: const Model.model(Models.claude35Sonnet20240620), - messages: [ - const Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now?', - ), - ), - ], - tools: [tool], - toolChoice: ToolChoice( - type: ToolChoiceType.tool, - name: tool.name, - ), - maxTokens: 1024, - ); - - final aiMessage1 = await client.createMessage(request: request1); - - final toolUse = aiMessage1.content.blocks.firstOrNull; - if (toolUse == null || toolUse is! ToolUseBlock) { - return; - } - - // Call your tool here with the given input - final toolResult = _getCurrentWeather( - toolUse.input['location'], - toolUse.input['unit'], - ); - - final request2 = CreateMessageRequest( - model: const Model.model(Models.claude35Sonnet20240620), - messages: [ - const Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now in Fahrenheit?', - ), - ), - Message( - role: MessageRole.assistant, - content: aiMessage1.content, - ), - Message( - role: MessageRole.user, - content: MessageContent.blocks([ - Block.toolResult( - toolUseId: toolUse.id, - content: ToolResultBlockContent.text(json.encode(toolResult)), - ), - ]), - ), - ], - tools: [tool], - maxTokens: 1024, - ); - final aiMessage2 = await client.createMessage(request: request2); - print(aiMessage2.content.text); - // Based on the current weather information for Boston, here's what it's like right now: - // - // The temperature in Boston is 71.6°F (Fahrenheit). - // The weather conditions are described as sunny. -} - -Future _toolUseStreaming(final AnthropicClient client) async { - final request = CreateMessageRequest( - model: const Model.model(Models.claude35Sonnet20240620), - messages: [ - const Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now in Fahrenheit?', - ), - ), - ], - tools: [tool], - toolChoice: ToolChoice( - type: ToolChoiceType.tool, - name: tool.name, - ), - maxTokens: 1024, - ); - - final stream = client.createMessageStream(request: request); - await for (final res in stream) { - res.map( - messageStart: (MessageStartEvent v) {}, - messageDelta: (MessageDeltaEvent v) {}, - messageStop: (MessageStopEvent v) {}, - contentBlockStart: (ContentBlockStartEvent v) {}, - contentBlockDelta: (ContentBlockDeltaEvent v) { - stdout.write(v.delta.inputJson); - }, - contentBlockStop: (ContentBlockStopEvent v) {}, - ping: (PingEvent v) {}, - ); - } - // {"location": "Boston, MA", "unit": "fahrenheit"} -} - -Map _getCurrentWeather( - final String location, - final String unit, -) { - const temperature = 22; - const weather = 'Sunny'; - return { - 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, - 'unit': unit, - 'description': weather, - }; -} - -const tool = Tool( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, -); diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart deleted file mode 100644 index 4cc40a27..00000000 --- a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart +++ /dev/null @@ -1,7 +0,0 @@ -/// Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). -library; - -export 'src/client.dart'; -export 'src/extensions.dart'; -export 'src/generated/client.dart' show AnthropicClientException; -export 'src/generated/schema/schema.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/client.dart b/packages/anthropic_sdk_dart/lib/src/client.dart deleted file mode 100644 index 3d02a34b..00000000 --- a/packages/anthropic_sdk_dart/lib/src/client.dart +++ /dev/null @@ -1,104 +0,0 @@ -// ignore_for_file: use_super_parameters -import 'dart:async'; -import 'dart:convert'; - -import 'package:http/http.dart' as http; - -import 'generated/client.dart' as g; -import 'generated/schema/schema.dart'; -import 'http_client/http_client.dart'; - -/// Client for Anthropic API. -/// -/// Please see https://docs.anthropic.com/en/api for more details. -class AnthropicClient extends g.AnthropicClient { - /// Create a new Anthropic API client. - /// - /// Main configuration options: - /// - `apiKey`: your Anthropic API key. You can find your API key in the - /// [Anthropic console](https://console.anthropic.com/settings/keys). - /// - /// Advance configuration options: - /// - `baseUrl`: the base URL to use. Defaults to `https://api.anthropic.com/v1`. - /// You can override this to use a different API URL, or to use a proxy. - /// - `headers`: global headers to send with every request. You can use - /// this to set custom headers, or to override the default headers. - /// - `queryParams`: global query parameters to send with every request. You - /// can use this to set custom query parameters. - /// - `client`: the HTTP client to use. You can set your own HTTP client if - /// you need further customization (e.g. to use a Socks5 proxy). - AnthropicClient({ - final String? apiKey, - final String? baseUrl, - final Map? headers, - final Map? queryParams, - final http.Client? client, - }) : super( - apiKey: apiKey ?? '', - baseUrl: baseUrl, - headers: { - 'anthropic-version': '2023-06-01', - ...?headers, - }, - queryParams: queryParams ?? const {}, - client: client ?? createDefaultHttpClient(), - ); - - // ------------------------------------------ - // METHOD: createMessageStream - // ------------------------------------------ - - /// Create a Message - /// - /// Send a structured list of input messages with text and/or image content, and the - /// model will generate the next message in the conversation. - /// - /// The Messages API can be used for either single queries or stateless multi-turn - /// conversations. - /// - /// `request`: The request parameters for creating a message. - /// - /// `POST` `https://api.anthropic.com/v1/messages` - Stream createMessageStream({ - required final CreateMessageRequest request, - }) async* { - final r = await makeRequestStream( - baseUrl: 'https://api.anthropic.com/v1', - path: '/messages', - method: g.HttpMethod.post, - requestType: 'application/json', - responseType: 'application/json', - body: request.copyWith(stream: true), - headerParams: { - if (apiKey.isNotEmpty) 'x-api-key': apiKey, - }, - ); - yield* r.stream - .transform(const _AnthropicStreamTransformer()) // - .map( - (final d) { - final j = json.decode(d) as Map; - return MessageStreamEvent.fromJson(j); - }, - ); - } - - @override - Future onRequest(final http.BaseRequest request) { - return onRequestHandler(request); - } -} - -class _AnthropicStreamTransformer - extends StreamTransformerBase, String> { - const _AnthropicStreamTransformer(); - - @override - Stream bind(final Stream> stream) { - return stream // - .transform(utf8.decoder) // - .transform(const LineSplitter()) // - .where((final i) => i.startsWith('data: ')) - .map((final item) => item.substring(6)); - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart deleted file mode 100644 index ddafbab9..00000000 --- a/packages/anthropic_sdk_dart/lib/src/extensions.dart +++ /dev/null @@ -1,92 +0,0 @@ -import 'generated/schema/schema.dart'; - -/// Extension methods for [MessageContent]. -extension MessageContentX on MessageContent { - /// Returns the text content of the message. - String get text { - return map( - text: (text) => text.value, - blocks: (blocks) => - blocks.value.whereType().map((t) => t.text).join('\n'), - ); - } - - /// Returns the blocks of the message. - List get blocks { - return map( - text: (text) => [Block.text(text: text.value)], - blocks: (blocks) => blocks.value, - ); - } -} - -/// Extension methods for [ToolResultBlockContent]. -extension ToolResultBlockContentX on ToolResultBlockContent { - /// Returns the text content of the tool result block content. - String get text { - return map( - text: (ToolResultBlockContentText t) => t.value, - blocks: (b) => - b.value.whereType().map((t) => t.text).join('\n'), - ); - } - - /// Returns the blocks of the tool result block content. - List get blocks { - return map( - text: (t) => [Block.text(text: t.value)], - blocks: (b) => b.value, - ); - } -} - -/// Extension methods for [Block]. -extension BlockX on Block { - /// Returns the text content of the block. - String get text { - return mapOrNull( - text: (text) => text.text, - ) ?? - ''; - } - - /// Returns the image source of the block. - ImageBlock? get image { - return mapOrNull( - image: (image) => image, - ); - } - - /// Returns the tool use block. - ToolUseBlock? get toolUse { - return mapOrNull( - toolUse: (toolUse) => toolUse, - ); - } - - /// Returns the tool result block. - ToolResultBlock? get toolResult { - return mapOrNull( - toolResult: (toolResult) => toolResult, - ); - } -} - -/// Extension methods for [BlockDelta]. -extension BlockDeltaX on BlockDelta { - /// Returns the text content of the block delta. - String get text { - return map( - textDelta: (text) => text.text, - inputJsonDelta: (inputJson) => '', - ); - } - - /// Returns the type of the block delta. - String get inputJson { - return map( - textDelta: (text) => '', - inputJsonDelta: (inputJson) => inputJson.partialJson ?? '', - ); - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/client.dart b/packages/anthropic_sdk_dart/lib/src/generated/client.dart deleted file mode 100644 index 0f3e82a8..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/client.dart +++ /dev/null @@ -1,395 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target, unused_import - -import 'dart:convert'; -import 'dart:typed_data'; - -import 'package:http/http.dart' as http; -import 'package:http/retry.dart'; -import 'package:meta/meta.dart'; - -import 'schema/schema.dart'; - -/// Enum of HTTP methods -enum HttpMethod { get, put, post, delete, options, head, patch, trace } - -// ========================================== -// CLASS: AnthropicClientException -// ========================================== - -/// HTTP exception handler for AnthropicClient -class AnthropicClientException implements Exception { - AnthropicClientException({ - required this.message, - required this.uri, - required this.method, - this.code, - this.body, - }); - - final String message; - final Uri uri; - final HttpMethod method; - final int? code; - final Object? body; - - @override - String toString() { - Object? data; - try { - data = body is String ? jsonDecode(body as String) : body.toString(); - } catch (e) { - data = body.toString(); - } - final s = JsonEncoder.withIndent(' ').convert({ - 'uri': uri.toString(), - 'method': method.name.toUpperCase(), - 'code': code, - 'message': message, - 'body': data, - }); - return 'AnthropicClientException($s)'; - } -} - -// ========================================== -// CLASS: AnthropicClient -// ========================================== - -/// Client for Anthropic API (v.1) -/// -/// API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. -class AnthropicClient { - /// Creates a new AnthropicClient instance. - /// - /// - [AnthropicClient.baseUrl] Override base URL (default: server url defined in spec) - /// - [AnthropicClient.headers] Global headers to be sent with every request - /// - [AnthropicClient.queryParams] Global query parameters to be sent with every request - /// - [AnthropicClient.client] Override HTTP client to use for requests - AnthropicClient({ - this.apiKey = '', - this.baseUrl, - this.headers = const {}, - this.queryParams = const {}, - http.Client? client, - }) : assert( - baseUrl == null || baseUrl.startsWith('http'), - 'baseUrl must start with http', - ), - assert( - baseUrl == null || !baseUrl.endsWith('/'), - 'baseUrl must not end with /', - ), - client = RetryClient(client ?? http.Client()); - - /// Override base URL (default: server url defined in spec) - final String? baseUrl; - - /// Global headers to be sent with every request - final Map headers; - - /// Global query parameters to be sent with every request - final Map queryParams; - - /// HTTP client for requests - final http.Client client; - - /// Authentication related variables - final String apiKey; - - // ------------------------------------------ - // METHOD: endSession - // ------------------------------------------ - - /// Close the HTTP client and end session - void endSession() => client.close(); - - // ------------------------------------------ - // METHOD: onRequest - // ------------------------------------------ - - /// Middleware for HTTP requests (user can override) - /// - /// The request can be of type [http.Request] or [http.MultipartRequest] - Future onRequest(http.BaseRequest request) { - return Future.value(request); - } - - // ------------------------------------------ - // METHOD: onStreamedResponse - // ------------------------------------------ - - /// Middleware for HTTP streamed responses (user can override) - Future onStreamedResponse( - final http.StreamedResponse response, - ) { - return Future.value(response); - } - - // ------------------------------------------ - // METHOD: onResponse - // ------------------------------------------ - - /// Middleware for HTTP responses (user can override) - Future onResponse(http.Response response) { - return Future.value(response); - } - - // ------------------------------------------ - // METHOD: _jsonDecode - // ------------------------------------------ - - dynamic _jsonDecode(http.Response r) { - return json.decode(utf8.decode(r.bodyBytes)); - } - - // ------------------------------------------ - // METHOD: _request - // ------------------------------------------ - - /// Reusable request method - @protected - Future _request({ - required String baseUrl, - required String path, - required HttpMethod method, - Map queryParams = const {}, - Map headerParams = const {}, - bool isMultipart = false, - String requestType = '', - String responseType = '', - Object? body, - }) async { - // Override with the user provided baseUrl - baseUrl = this.baseUrl ?? baseUrl; - - // Ensure a baseUrl is provided - assert( - baseUrl.isNotEmpty, - 'baseUrl is required, but none defined in spec or provided by user', - ); - - // Add global query parameters - queryParams = {...queryParams, ...this.queryParams}; - - // Ensure query parameters are strings or iterable of strings - queryParams = queryParams.map((key, value) { - if (value is Iterable) { - return MapEntry(key, value.map((v) => v.toString())); - } else { - return MapEntry(key, value.toString()); - } - }); - - // Build the request URI - Uri uri = Uri.parse(baseUrl + path); - if (queryParams.isNotEmpty) { - uri = uri.replace(queryParameters: queryParams); - } - - // Build the headers - Map headers = {...headerParams}; - - // Define the request type being sent to server - if (requestType.isNotEmpty) { - headers['content-type'] = requestType; - } - - // Define the response type expected to receive from server - if (responseType.isNotEmpty) { - headers['accept'] = responseType; - } - - // Add global headers - headers.addAll(this.headers); - - // Build the request object - http.BaseRequest request; - if (isMultipart) { - // Handle multipart request - request = http.MultipartRequest(method.name, uri); - request = request as http.MultipartRequest; - if (body is List) { - request.files.addAll(body); - } else { - request.files.add(body as http.MultipartFile); - } - } else { - // Handle normal request - request = http.Request(method.name, uri); - request = request as http.Request; - try { - if (body != null) { - request.body = json.encode(body); - } - } catch (e) { - // Handle request encoding error - throw AnthropicClientException( - uri: uri, - method: method, - message: 'Could not encode: ${body.runtimeType}', - body: e, - ); - } - } - - // Add request headers - request.headers.addAll(headers); - - // Handle user request middleware - request = await onRequest(request); - - // Submit request - return await client.send(request); - } - - // ------------------------------------------ - // METHOD: makeRequestStream - // ------------------------------------------ - - /// Reusable request stream method - @protected - Future makeRequestStream({ - required String baseUrl, - required String path, - required HttpMethod method, - Map queryParams = const {}, - Map headerParams = const {}, - bool isMultipart = false, - String requestType = '', - String responseType = '', - Object? body, - }) async { - final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); - late http.StreamedResponse response; - try { - response = await _request( - baseUrl: baseUrl, - path: path, - method: method, - queryParams: queryParams, - headerParams: headerParams, - requestType: requestType, - responseType: responseType, - body: body, - ); - // Handle user response middleware - response = await onStreamedResponse(response); - } catch (e) { - // Handle request and response errors - throw AnthropicClientException( - uri: uri, - method: method, - message: 'Response error', - body: e, - ); - } - - // Check for successful response - if ((response.statusCode ~/ 100) == 2) { - return response; - } - - // Handle unsuccessful response - throw AnthropicClientException( - uri: uri, - method: method, - message: 'Unsuccessful response', - code: response.statusCode, - body: (await http.Response.fromStream(response)).body, - ); - } - - // ------------------------------------------ - // METHOD: makeRequest - // ------------------------------------------ - - /// Reusable request method - @protected - Future makeRequest({ - required String baseUrl, - required String path, - required HttpMethod method, - Map queryParams = const {}, - Map headerParams = const {}, - bool isMultipart = false, - String requestType = '', - String responseType = '', - Object? body, - }) async { - final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); - late http.Response response; - try { - final streamedResponse = await _request( - baseUrl: baseUrl, - path: path, - method: method, - queryParams: queryParams, - headerParams: headerParams, - requestType: requestType, - responseType: responseType, - body: body, - ); - response = await http.Response.fromStream(streamedResponse); - // Handle user response middleware - response = await onResponse(response); - } catch (e) { - // Handle request and response errors - throw AnthropicClientException( - uri: uri, - method: method, - message: 'Response error', - body: e, - ); - } - - // Check for successful response - if ((response.statusCode ~/ 100) == 2) { - return response; - } - - // Handle unsuccessful response - throw AnthropicClientException( - uri: uri, - method: method, - message: 'Unsuccessful response', - code: response.statusCode, - body: response.body, - ); - } - - // ------------------------------------------ - // METHOD: createMessage - // ------------------------------------------ - - /// Create a Message - /// - /// Send a structured list of input messages with text and/or image content, and the - /// model will generate the next message in the conversation. - /// - /// The Messages API can be used for either single queries or stateless multi-turn - /// conversations. - /// - /// `request`: The request parameters for creating a message. - /// - /// `POST` `https://api.anthropic.com/v1/messages` - Future createMessage({ - required CreateMessageRequest request, - }) async { - final r = await makeRequest( - baseUrl: 'https://api.anthropic.com/v1', - path: '/messages', - method: HttpMethod.post, - isMultipart: false, - requestType: 'application/json', - responseType: 'application/json', - body: request, - headerParams: { - if (apiKey.isNotEmpty) 'x-api-key': apiKey, - }, - ); - return Message.fromJson(_jsonDecode(r)); - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart deleted file mode 100644 index e15126a3..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart +++ /dev/null @@ -1,155 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: Block -// ========================================== - -/// A block of content in a message. -@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) -sealed class Block with _$Block { - const Block._(); - - // ------------------------------------------ - // UNION: TextBlock - // ------------------------------------------ - - /// A block of text content. - const factory Block.text({ - /// The text content. - required String text, - - /// The type of content block. - @Default('text') String type, - }) = TextBlock; - - // ------------------------------------------ - // UNION: ImageBlock - // ------------------------------------------ - - /// A block of image content. - const factory Block.image({ - /// The source of an image block. - required ImageBlockSource source, - - /// The type of content block. - @Default('image') String type, - }) = ImageBlock; - - // ------------------------------------------ - // UNION: ToolUseBlock - // ------------------------------------------ - - /// The tool the model wants to use. - const factory Block.toolUse({ - /// A unique identifier for this particular tool use block. - /// This will be used to match up the tool results later. - required String id, - - /// The name of the tool being used. - required String name, - - /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. - required Map input, - - /// The type of content block. - @Default('tool_use') String type, - }) = ToolUseBlock; - - // ------------------------------------------ - // UNION: ToolResultBlock - // ------------------------------------------ - - /// The result of using a tool. - const factory Block.toolResult({ - /// The `id` of the tool use request this is a result for. - @JsonKey(name: 'tool_use_id') required String toolUseId, - - /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) - /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). - /// These content blocks can use the text or image types. - @_ToolResultBlockContentConverter() required ToolResultBlockContent content, - - /// Set to `true` if the tool execution resulted in an error. - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - - /// The type of content block. - @Default('tool_result') String type, - }) = ToolResultBlock; - - /// Object construction from a JSON representation - factory Block.fromJson(Map json) => _$BlockFromJson(json); -} - -// ========================================== -// ENUM: BlockEnumType -// ========================================== - -enum BlockEnumType { - @JsonValue('text') - text, - @JsonValue('image') - image, - @JsonValue('tool_use') - toolUse, - @JsonValue('tool_result') - toolResult, -} - -// ========================================== -// CLASS: ToolResultBlockContent -// ========================================== - -/// The result of the tool, as a string (e.g. `"content": "15 degrees"`) -/// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). -/// These content blocks can use the text or image types. -@freezed -sealed class ToolResultBlockContent with _$ToolResultBlockContent { - const ToolResultBlockContent._(); - - /// An array of content blocks. - const factory ToolResultBlockContent.blocks( - List value, - ) = ToolResultBlockContentBlocks; - - /// A single text block. - const factory ToolResultBlockContent.text( - String value, - ) = ToolResultBlockContentText; - - /// Object construction from a JSON representation - factory ToolResultBlockContent.fromJson(Map json) => - _$ToolResultBlockContentFromJson(json); -} - -/// Custom JSON converter for [ToolResultBlockContent] -class _ToolResultBlockContentConverter - implements JsonConverter { - const _ToolResultBlockContentConverter(); - - @override - ToolResultBlockContent fromJson(Object? data) { - if (data is List && data.every((item) => item is Map)) { - return ToolResultBlockContentBlocks(data - .map((i) => Block.fromJson(i as Map)) - .toList(growable: false)); - } - if (data is String) { - return ToolResultBlockContentText(data); - } - throw Exception( - 'Unexpected value for ToolResultBlockContent: $data', - ); - } - - @override - Object? toJson(ToolResultBlockContent data) { - return switch (data) { - ToolResultBlockContentBlocks(value: final v) => v, - ToolResultBlockContentText(value: final v) => v, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart deleted file mode 100644 index d107a864..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart +++ /dev/null @@ -1,56 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: BlockDelta -// ========================================== - -/// A delta in a streaming message. -@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) -sealed class BlockDelta with _$BlockDelta { - const BlockDelta._(); - - // ------------------------------------------ - // UNION: TextBlockDelta - // ------------------------------------------ - - /// A delta in a streaming text block. - const factory BlockDelta.textDelta({ - /// The text delta. - required String text, - - /// The type of content block. - required String type, - }) = TextBlockDelta; - - // ------------------------------------------ - // UNION: InputJsonBlockDelta - // ------------------------------------------ - - /// A delta in a streaming input JSON. - const factory BlockDelta.inputJsonDelta({ - /// The partial JSON delta. - @JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, - - /// The type of content block. - required String type, - }) = InputJsonBlockDelta; - - /// Object construction from a JSON representation - factory BlockDelta.fromJson(Map json) => - _$BlockDeltaFromJson(json); -} - -// ========================================== -// ENUM: BlockDeltaEnumType -// ========================================== - -enum BlockDeltaEnumType { - @JsonValue('text_delta') - textDelta, - @JsonValue('input_json_delta') - inputJsonDelta, -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart deleted file mode 100644 index e310adff..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart +++ /dev/null @@ -1,380 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: CreateMessageRequest -// ========================================== - -/// The request parameters for creating a message. -@freezed -class CreateMessageRequest with _$CreateMessageRequest { - const CreateMessageRequest._(); - - /// Factory constructor for CreateMessageRequest - const factory CreateMessageRequest({ - /// The model that will complete your prompt. - /// - /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional - /// details and options. - @_ModelConverter() required Model model, - - /// Input messages. - /// - /// Our models are trained to operate on alternating `user` and `assistant` - /// conversational turns. When creating a new `Message`, you specify the prior - /// conversational turns with the `messages` parameter, and the model then generates - /// the next `Message` in the conversation. - /// - /// Each input message must be an object with a `role` and `content`. You can - /// specify a single `user`-role message, or you can include multiple `user` and - /// `assistant` messages. The first message must always use the `user` role. - /// - /// If the final message uses the `assistant` role, the response content will - /// continue immediately from the content in that message. This can be used to - /// constrain part of the model's response. - /// - /// See [message content](https://docs.anthropic.com/en/api/messages-content) for - /// details on how to construct valid message objects. - /// - /// Example with a single `user` message: - /// - /// ```json - /// [{ "role": "user", "content": "Hello, Claude" }] - /// ``` - /// - /// Example with multiple conversational turns: - /// - /// ```json - /// [ - /// { "role": "user", "content": "Hello there." }, - /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, - /// { "role": "user", "content": "Can you explain LLMs in plain English?" } - /// ] - /// ``` - /// - /// Example with a partially-filled response from Claude: - /// - /// ```json - /// [ - /// { - /// "role": "user", - /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" - /// }, - /// { "role": "assistant", "content": "The best answer is (" } - /// ] - /// ``` - /// - /// Each input message `content` may be either a single `string` or an array of - /// content blocks, where each block has a specific `type`. Using a `string` for - /// `content` is shorthand for an array of one content block of type `"text"`. The - /// following input messages are equivalent: - /// - /// ```json - /// { "role": "user", "content": "Hello, Claude" } - /// ``` - /// - /// ```json - /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } - /// ``` - /// - /// Starting with Claude 3 models, you can also send image content blocks: - /// - /// ```json - /// { - /// "role": "user", - /// "content": [ - /// { - /// "type": "image", - /// "source": { - /// "type": "base64", - /// "media_type": "image/jpeg", - /// "data": "/9j/4AAQSkZJRg..." - /// } - /// }, - /// { "type": "text", "text": "What is in this image?" } - /// ] - /// } - /// ``` - /// - /// We currently support the `base64` source type for images, and the `image/jpeg`, - /// `image/png`, `image/gif`, and `image/webp` media types. - /// - /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more - /// input examples. - /// - /// Note that if you want to include a - /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use - /// the top-level `system` parameter — there is no `"system"` role for input - /// messages in the Messages API. - required List messages, - - /// The maximum number of tokens to generate before stopping. - /// - /// Note that our models may stop _before_ reaching this maximum. This parameter - /// only specifies the absolute maximum number of tokens to generate. - /// - /// Different models have different maximum values for this parameter. See - /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. - @JsonKey(name: 'max_tokens') required int maxTokens, - - /// An object describing metadata about the request. - @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, - - /// Custom text sequences that will cause the model to stop generating. - /// - /// Our models will normally stop when they have naturally completed their turn, - /// which will result in a response `stop_reason` of `"end_turn"`. - /// - /// If you want the model to stop generating when it encounters custom strings of - /// text, you can use the `stop_sequences` parameter. If the model encounters one of - /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` - /// and the response `stop_sequence` value will contain the matched stop sequence. - @JsonKey(name: 'stop_sequences', includeIfNull: false) - List? stopSequences, - - /// System prompt. - /// - /// A system prompt is a way of providing context and instructions to Claude, such - /// as specifying a particular goal or role. See our - /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). - @JsonKey(includeIfNull: false) String? system, - - /// Amount of randomness injected into the response. - /// - /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` - /// for analytical / multiple choice, and closer to `1.0` for creative and - /// generative tasks. - /// - /// Note that even with `temperature` of `0.0`, the results will not be fully - /// deterministic. - @JsonKey(includeIfNull: false) double? temperature, - - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - @JsonKey(name: 'tool_choice', includeIfNull: false) ToolChoice? toolChoice, - - /// Definitions of tools that the model may use. - /// - /// If you include `tools` in your API request, the model may return `tool_use` - /// content blocks that represent the model's use of those tools. You can then run - /// those tools using the tool input generated by the model and then optionally - /// return results back to the model using `tool_result` content blocks. - /// - /// Each tool definition includes: - /// - /// - `name`: Name of the tool. - /// - `description`: Optional, but strongly-recommended description of the tool. - /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` - /// shape that the model will produce in `tool_use` output content blocks. - /// - /// For example, if you defined `tools` as: - /// - /// ```json - /// [ - /// { - /// "name": "get_stock_price", - /// "description": "Get the current stock price for a given ticker symbol.", - /// "input_schema": { - /// "type": "object", - /// "properties": { - /// "ticker": { - /// "type": "string", - /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." - /// } - /// }, - /// "required": ["ticker"] - /// } - /// } - /// ] - /// ``` - /// - /// And then asked the model "What's the S&P 500 at today?", the model might produce - /// `tool_use` content blocks in the response like this: - /// - /// ```json - /// [ - /// { - /// "type": "tool_use", - /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "name": "get_stock_price", - /// "input": { "ticker": "^GSPC" } - /// } - /// ] - /// ``` - /// - /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an - /// input, and return the following back to the model in a subsequent `user` - /// message: - /// - /// ```json - /// [ - /// { - /// "type": "tool_result", - /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "content": "259.75 USD" - /// } - /// ] - /// ``` - /// - /// Tools can be used for workflows that include running client-side tools and - /// functions, or more generally whenever you want the model to produce a particular - /// JSON structure of output. - /// - /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. - @JsonKey(includeIfNull: false) List? tools, - - /// Only sample from the top K options for each subsequent token. - /// - /// Used to remove "long tail" low probability responses. - /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - - /// Use nucleus sampling. - /// - /// In nucleus sampling, we compute the cumulative distribution over all the options - /// for each subsequent token in decreasing probability order and cut it off once it - /// reaches a particular probability specified by `top_p`. You should either alter - /// `temperature` or `top_p`, but not both. - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - - /// Whether to incrementally stream the response using server-sent events. - /// - /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for - /// details. - @Default(false) bool stream, - }) = _CreateMessageRequest; - - /// Object construction from a JSON representation - factory CreateMessageRequest.fromJson(Map json) => - _$CreateMessageRequestFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'model', - 'messages', - 'max_tokens', - 'metadata', - 'stop_sequences', - 'system', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - 'stream' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'model': model, - 'messages': messages, - 'max_tokens': maxTokens, - 'metadata': metadata, - 'stop_sequences': stopSequences, - 'system': system, - 'temperature': temperature, - 'tool_choice': toolChoice, - 'tools': tools, - 'top_k': topK, - 'top_p': topP, - 'stream': stream, - }; - } -} - -// ========================================== -// ENUM: Models -// ========================================== - -/// Available models. Mind that the list may not be exhaustive nor up-to-date. -enum Models { - @JsonValue('claude-3-5-sonnet-20240620') - claude35Sonnet20240620, - @JsonValue('claude-3-haiku-20240307') - claude3Haiku20240307, - @JsonValue('claude-3-opus-20240229') - claude3Opus20240229, - @JsonValue('claude-3-sonnet-20240229') - claude3Sonnet20240229, - @JsonValue('claude-2.0') - claude20, - @JsonValue('claude-2.1') - claude21, - @JsonValue('claude-instant-1.2') - claudeInstant12, -} - -// ========================================== -// CLASS: Model -// ========================================== - -/// The model that will complete your prompt. -/// -/// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional -/// details and options. -@freezed -sealed class Model with _$Model { - const Model._(); - - /// Available models. Mind that the list may not be exhaustive nor up-to-date. - const factory Model.model( - Models value, - ) = ModelCatalog; - - /// The ID of the model to use for this request. - const factory Model.modelId( - String value, - ) = ModelId; - - /// Object construction from a JSON representation - factory Model.fromJson(Map json) => _$ModelFromJson(json); -} - -/// Custom JSON converter for [Model] -class _ModelConverter implements JsonConverter { - const _ModelConverter(); - - @override - Model fromJson(Object? data) { - if (data is String && _$ModelsEnumMap.values.contains(data)) { - return ModelCatalog( - _$ModelsEnumMap.keys.elementAt( - _$ModelsEnumMap.values.toList().indexOf(data), - ), - ); - } - if (data is String) { - return ModelId(data); - } - throw Exception( - 'Unexpected value for Model: $data', - ); - } - - @override - Object? toJson(Model data) { - return switch (data) { - ModelCatalog(value: final v) => _$ModelsEnumMap[v]!, - ModelId(value: final v) => v, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart deleted file mode 100644 index bf588756..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart +++ /dev/null @@ -1,44 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: CreateMessageRequestMetadata -// ========================================== - -/// An object describing metadata about the request. -@freezed -class CreateMessageRequestMetadata with _$CreateMessageRequestMetadata { - const CreateMessageRequestMetadata._(); - - /// Factory constructor for CreateMessageRequestMetadata - const factory CreateMessageRequestMetadata({ - /// An external identifier for the user who is associated with the request. - /// - /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use - /// this id to help detect abuse. Do not include any identifying information such as - /// name, email address, or phone number. - @JsonKey(name: 'user_id', includeIfNull: false) String? userId, - }) = _CreateMessageRequestMetadata; - - /// Object construction from a JSON representation - factory CreateMessageRequestMetadata.fromJson(Map json) => - _$CreateMessageRequestMetadataFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['user_id']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'user_id': userId, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart deleted file mode 100644 index e0a89687..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart +++ /dev/null @@ -1,74 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: ImageBlockSource -// ========================================== - -/// The source of an image block. -@freezed -class ImageBlockSource with _$ImageBlockSource { - const ImageBlockSource._(); - - /// Factory constructor for ImageBlockSource - const factory ImageBlockSource({ - /// The base64-encoded image data. - required String data, - - /// The media type of the image. - @JsonKey(name: 'media_type') required ImageBlockSourceMediaType mediaType, - - /// The type of image source. - required ImageBlockSourceType type, - }) = _ImageBlockSource; - - /// Object construction from a JSON representation - factory ImageBlockSource.fromJson(Map json) => - _$ImageBlockSourceFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['data', 'media_type', 'type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'data': data, - 'media_type': mediaType, - 'type': type, - }; - } -} - -// ========================================== -// ENUM: ImageBlockSourceMediaType -// ========================================== - -/// The media type of the image. -enum ImageBlockSourceMediaType { - @JsonValue('image/jpeg') - imageJpeg, - @JsonValue('image/png') - imagePng, - @JsonValue('image/gif') - imageGif, - @JsonValue('image/webp') - imageWebp, -} - -// ========================================== -// ENUM: ImageBlockSourceType -// ========================================== - -/// The type of image source. -enum ImageBlockSourceType { - @JsonValue('base64') - base64, -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart deleted file mode 100644 index 2444ac92..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart +++ /dev/null @@ -1,162 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: Message -// ========================================== - -/// A message in a chat conversation. -@freezed -class Message with _$Message { - const Message._(); - - /// Factory constructor for Message - const factory Message({ - /// Unique object identifier. - /// - /// The format and length of IDs may change over time. - @JsonKey(includeIfNull: false) String? id, - - /// The content of the message. - @_MessageContentConverter() required MessageContent content, - - /// The role of the messages author. - required MessageRole role, - - /// The model that handled the request. - @JsonKey(includeIfNull: false) String? model, - - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - StopReason? stopReason, - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, - - /// Object type. - /// - /// For Messages, this is always `"message"`. - @JsonKey(includeIfNull: false) String? type, - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - @JsonKey(includeIfNull: false) Usage? usage, - }) = _Message; - - /// Object construction from a JSON representation - factory Message.fromJson(Map json) => - _$MessageFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'id', - 'content', - 'role', - 'model', - 'stop_reason', - 'stop_sequence', - 'type', - 'usage' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'id': id, - 'content': content, - 'role': role, - 'model': model, - 'stop_reason': stopReason, - 'stop_sequence': stopSequence, - 'type': type, - 'usage': usage, - }; - } -} - -// ========================================== -// CLASS: MessageContent -// ========================================== - -/// The content of the message. -@freezed -sealed class MessageContent with _$MessageContent { - const MessageContent._(); - - /// An array of content blocks. - const factory MessageContent.blocks( - List value, - ) = MessageContentBlocks; - - /// A single text block. - const factory MessageContent.text( - String value, - ) = MessageContentText; - - /// Object construction from a JSON representation - factory MessageContent.fromJson(Map json) => - _$MessageContentFromJson(json); -} - -/// Custom JSON converter for [MessageContent] -class _MessageContentConverter - implements JsonConverter { - const _MessageContentConverter(); - - @override - MessageContent fromJson(Object? data) { - if (data is List && data.every((item) => item is Map)) { - return MessageContentBlocks(data - .map((i) => Block.fromJson(i as Map)) - .toList(growable: false)); - } - if (data is String) { - return MessageContentText(data); - } - throw Exception( - 'Unexpected value for MessageContent: $data', - ); - } - - @override - Object? toJson(MessageContent data) { - return switch (data) { - MessageContentBlocks(value: final v) => v, - MessageContentText(value: final v) => v, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart deleted file mode 100644 index aa23db40..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart +++ /dev/null @@ -1,61 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: MessageDelta -// ========================================== - -/// A delta in a streaming message. -@freezed -class MessageDelta with _$MessageDelta { - const MessageDelta._(); - - /// Factory constructor for MessageDelta - const factory MessageDelta({ - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - StopReason? stopReason, - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, - }) = _MessageDelta; - - /// Object construction from a JSON representation - factory MessageDelta.fromJson(Map json) => - _$MessageDeltaFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['stop_reason', 'stop_sequence']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'stop_reason': stopReason, - 'stop_sequence': stopSequence, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart deleted file mode 100644 index 3ce710cc..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart +++ /dev/null @@ -1,51 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: MessageDeltaUsage -// ========================================== - -/// Billing and rate-limit usage. -/// -/// Anthropic's API bills and rate-limits by token counts, as tokens represent the -/// underlying cost to our systems. -/// -/// Under the hood, the API transforms requests into a format suitable for the -/// model. The model's output then goes through a parsing stage before becoming an -/// API response. As a result, the token counts in `usage` will not match one-to-one -/// with the exact visible content of an API request or response. -/// -/// For example, `output_tokens` will be non-zero, even for an empty string response -/// from Claude. -@freezed -class MessageDeltaUsage with _$MessageDeltaUsage { - const MessageDeltaUsage._(); - - /// Factory constructor for MessageDeltaUsage - const factory MessageDeltaUsage({ - /// The cumulative number of output tokens which were used. - @JsonKey(name: 'output_tokens') required int outputTokens, - }) = _MessageDeltaUsage; - - /// Object construction from a JSON representation - factory MessageDeltaUsage.fromJson(Map json) => - _$MessageDeltaUsageFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['output_tokens']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'output_tokens': outputTokens, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart deleted file mode 100644 index e502789a..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart +++ /dev/null @@ -1,17 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// ENUM: MessageRole -// ========================================== - -/// The role of the messages author. -enum MessageRole { - @JsonValue('user') - user, - @JsonValue('assistant') - assistant, -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart deleted file mode 100644 index 46ef88ba..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart +++ /dev/null @@ -1,126 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: MessageStreamEvent -// ========================================== - -/// A event in a streaming conversation. -@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) -sealed class MessageStreamEvent with _$MessageStreamEvent { - const MessageStreamEvent._(); - - // ------------------------------------------ - // UNION: MessageStartEvent - // ------------------------------------------ - - /// A start event in a streaming conversation. - const factory MessageStreamEvent.messageStart({ - /// A message in a chat conversation. - required Message message, - - /// The type of a streaming event. - required MessageStreamEventType type, - }) = MessageStartEvent; - - // ------------------------------------------ - // UNION: MessageDeltaEvent - // ------------------------------------------ - - /// A delta event in a streaming conversation. - const factory MessageStreamEvent.messageDelta({ - /// A delta in a streaming message. - required MessageDelta delta, - - /// The type of a streaming event. - required MessageStreamEventType type, - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - required MessageDeltaUsage usage, - }) = MessageDeltaEvent; - - // ------------------------------------------ - // UNION: MessageStopEvent - // ------------------------------------------ - - /// A stop event in a streaming conversation. - const factory MessageStreamEvent.messageStop({ - /// The type of a streaming event. - required MessageStreamEventType type, - }) = MessageStopEvent; - - // ------------------------------------------ - // UNION: ContentBlockStartEvent - // ------------------------------------------ - - /// A start event in a streaming content block. - const factory MessageStreamEvent.contentBlockStart({ - /// A block of content in a message. - /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] - @JsonKey(name: 'content_block') required Block contentBlock, - - /// The index of the content block. - required int index, - - /// The type of a streaming event. - required MessageStreamEventType type, - }) = ContentBlockStartEvent; - - // ------------------------------------------ - // UNION: ContentBlockDeltaEvent - // ------------------------------------------ - - /// A delta event in a streaming content block. - const factory MessageStreamEvent.contentBlockDelta({ - /// A delta in a streaming message. - /// Any of: [TextBlockDelta], [InputJsonBlockDelta] - required BlockDelta delta, - - /// The index of the content block. - required int index, - - /// The type of a streaming event. - required MessageStreamEventType type, - }) = ContentBlockDeltaEvent; - - // ------------------------------------------ - // UNION: ContentBlockStopEvent - // ------------------------------------------ - - /// A stop event in a streaming content block. - const factory MessageStreamEvent.contentBlockStop({ - /// The index of the content block. - required int index, - - /// The type of a streaming event. - required MessageStreamEventType type, - }) = ContentBlockStopEvent; - - // ------------------------------------------ - // UNION: PingEvent - // ------------------------------------------ - - /// A ping event in a streaming conversation. - const factory MessageStreamEvent.ping({ - /// The type of a streaming event. - required MessageStreamEventType type, - }) = PingEvent; - - /// Object construction from a JSON representation - factory MessageStreamEvent.fromJson(Map json) => - _$MessageStreamEventFromJson(json); -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart deleted file mode 100644 index 0e6aa425..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart +++ /dev/null @@ -1,27 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// ENUM: MessageStreamEventType -// ========================================== - -/// The type of a streaming event. -enum MessageStreamEventType { - @JsonValue('message_start') - messageStart, - @JsonValue('message_delta') - messageDelta, - @JsonValue('message_stop') - messageStop, - @JsonValue('content_block_start') - contentBlockStart, - @JsonValue('content_block_delta') - contentBlockDelta, - @JsonValue('content_block_stop') - contentBlockStop, - @JsonValue('ping') - ping, -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart deleted file mode 100644 index b9d2ef26..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart +++ /dev/null @@ -1,28 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target - -library anthropic_schema; - -import 'package:freezed_annotation/freezed_annotation.dart'; - -part 'schema.g.dart'; -part 'schema.freezed.dart'; - -part 'create_message_request.dart'; -part 'create_message_request_metadata.dart'; -part 'tool_choice.dart'; -part 'tool_choice_type.dart'; -part 'message.dart'; -part 'message_role.dart'; -part 'tool.dart'; -part 'image_block_source.dart'; -part 'stop_reason.dart'; -part 'usage.dart'; -part 'message_stream_event_type.dart'; -part 'message_delta.dart'; -part 'message_delta_usage.dart'; -part 'block.dart'; -part 'message_stream_event.dart'; -part 'block_delta.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart deleted file mode 100644 index 4045606f..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart +++ /dev/null @@ -1,7758 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark - -part of 'schema.dart'; - -// ************************************************************************** -// FreezedGenerator -// ************************************************************************** - -T _$identity(T value) => value; - -final _privateConstructorUsedError = UnsupportedError( - 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); - -CreateMessageRequest _$CreateMessageRequestFromJson(Map json) { - return _CreateMessageRequest.fromJson(json); -} - -/// @nodoc -mixin _$CreateMessageRequest { - /// The model that will complete your prompt. - /// - /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional - /// details and options. - @_ModelConverter() - Model get model => throw _privateConstructorUsedError; - - /// Input messages. - /// - /// Our models are trained to operate on alternating `user` and `assistant` - /// conversational turns. When creating a new `Message`, you specify the prior - /// conversational turns with the `messages` parameter, and the model then generates - /// the next `Message` in the conversation. - /// - /// Each input message must be an object with a `role` and `content`. You can - /// specify a single `user`-role message, or you can include multiple `user` and - /// `assistant` messages. The first message must always use the `user` role. - /// - /// If the final message uses the `assistant` role, the response content will - /// continue immediately from the content in that message. This can be used to - /// constrain part of the model's response. - /// - /// See [message content](https://docs.anthropic.com/en/api/messages-content) for - /// details on how to construct valid message objects. - /// - /// Example with a single `user` message: - /// - /// ```json - /// [{ "role": "user", "content": "Hello, Claude" }] - /// ``` - /// - /// Example with multiple conversational turns: - /// - /// ```json - /// [ - /// { "role": "user", "content": "Hello there." }, - /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, - /// { "role": "user", "content": "Can you explain LLMs in plain English?" } - /// ] - /// ``` - /// - /// Example with a partially-filled response from Claude: - /// - /// ```json - /// [ - /// { - /// "role": "user", - /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" - /// }, - /// { "role": "assistant", "content": "The best answer is (" } - /// ] - /// ``` - /// - /// Each input message `content` may be either a single `string` or an array of - /// content blocks, where each block has a specific `type`. Using a `string` for - /// `content` is shorthand for an array of one content block of type `"text"`. The - /// following input messages are equivalent: - /// - /// ```json - /// { "role": "user", "content": "Hello, Claude" } - /// ``` - /// - /// ```json - /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } - /// ``` - /// - /// Starting with Claude 3 models, you can also send image content blocks: - /// - /// ```json - /// { - /// "role": "user", - /// "content": [ - /// { - /// "type": "image", - /// "source": { - /// "type": "base64", - /// "media_type": "image/jpeg", - /// "data": "/9j/4AAQSkZJRg..." - /// } - /// }, - /// { "type": "text", "text": "What is in this image?" } - /// ] - /// } - /// ``` - /// - /// We currently support the `base64` source type for images, and the `image/jpeg`, - /// `image/png`, `image/gif`, and `image/webp` media types. - /// - /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more - /// input examples. - /// - /// Note that if you want to include a - /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use - /// the top-level `system` parameter — there is no `"system"` role for input - /// messages in the Messages API. - List get messages => throw _privateConstructorUsedError; - - /// The maximum number of tokens to generate before stopping. - /// - /// Note that our models may stop _before_ reaching this maximum. This parameter - /// only specifies the absolute maximum number of tokens to generate. - /// - /// Different models have different maximum values for this parameter. See - /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. - @JsonKey(name: 'max_tokens') - int get maxTokens => throw _privateConstructorUsedError; - - /// An object describing metadata about the request. - @JsonKey(includeIfNull: false) - CreateMessageRequestMetadata? get metadata => - throw _privateConstructorUsedError; - - /// Custom text sequences that will cause the model to stop generating. - /// - /// Our models will normally stop when they have naturally completed their turn, - /// which will result in a response `stop_reason` of `"end_turn"`. - /// - /// If you want the model to stop generating when it encounters custom strings of - /// text, you can use the `stop_sequences` parameter. If the model encounters one of - /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` - /// and the response `stop_sequence` value will contain the matched stop sequence. - @JsonKey(name: 'stop_sequences', includeIfNull: false) - List? get stopSequences => throw _privateConstructorUsedError; - - /// System prompt. - /// - /// A system prompt is a way of providing context and instructions to Claude, such - /// as specifying a particular goal or role. See our - /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). - @JsonKey(includeIfNull: false) - String? get system => throw _privateConstructorUsedError; - - /// Amount of randomness injected into the response. - /// - /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` - /// for analytical / multiple choice, and closer to `1.0` for creative and - /// generative tasks. - /// - /// Note that even with `temperature` of `0.0`, the results will not be fully - /// deterministic. - @JsonKey(includeIfNull: false) - double? get temperature => throw _privateConstructorUsedError; - - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - @JsonKey(name: 'tool_choice', includeIfNull: false) - ToolChoice? get toolChoice => throw _privateConstructorUsedError; - - /// Definitions of tools that the model may use. - /// - /// If you include `tools` in your API request, the model may return `tool_use` - /// content blocks that represent the model's use of those tools. You can then run - /// those tools using the tool input generated by the model and then optionally - /// return results back to the model using `tool_result` content blocks. - /// - /// Each tool definition includes: - /// - /// - `name`: Name of the tool. - /// - `description`: Optional, but strongly-recommended description of the tool. - /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` - /// shape that the model will produce in `tool_use` output content blocks. - /// - /// For example, if you defined `tools` as: - /// - /// ```json - /// [ - /// { - /// "name": "get_stock_price", - /// "description": "Get the current stock price for a given ticker symbol.", - /// "input_schema": { - /// "type": "object", - /// "properties": { - /// "ticker": { - /// "type": "string", - /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." - /// } - /// }, - /// "required": ["ticker"] - /// } - /// } - /// ] - /// ``` - /// - /// And then asked the model "What's the S&P 500 at today?", the model might produce - /// `tool_use` content blocks in the response like this: - /// - /// ```json - /// [ - /// { - /// "type": "tool_use", - /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "name": "get_stock_price", - /// "input": { "ticker": "^GSPC" } - /// } - /// ] - /// ``` - /// - /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an - /// input, and return the following back to the model in a subsequent `user` - /// message: - /// - /// ```json - /// [ - /// { - /// "type": "tool_result", - /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "content": "259.75 USD" - /// } - /// ] - /// ``` - /// - /// Tools can be used for workflows that include running client-side tools and - /// functions, or more generally whenever you want the model to produce a particular - /// JSON structure of output. - /// - /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. - @JsonKey(includeIfNull: false) - List? get tools => throw _privateConstructorUsedError; - - /// Only sample from the top K options for each subsequent token. - /// - /// Used to remove "long tail" low probability responses. - /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @JsonKey(name: 'top_k', includeIfNull: false) - int? get topK => throw _privateConstructorUsedError; - - /// Use nucleus sampling. - /// - /// In nucleus sampling, we compute the cumulative distribution over all the options - /// for each subsequent token in decreasing probability order and cut it off once it - /// reaches a particular probability specified by `top_p`. You should either alter - /// `temperature` or `top_p`, but not both. - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @JsonKey(name: 'top_p', includeIfNull: false) - double? get topP => throw _privateConstructorUsedError; - - /// Whether to incrementally stream the response using server-sent events. - /// - /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for - /// details. - bool get stream => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateMessageRequestCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $CreateMessageRequestCopyWith<$Res> { - factory $CreateMessageRequestCopyWith(CreateMessageRequest value, - $Res Function(CreateMessageRequest) then) = - _$CreateMessageRequestCopyWithImpl<$Res, CreateMessageRequest>; - @useResult - $Res call( - {@_ModelConverter() Model model, - List messages, - @JsonKey(name: 'max_tokens') int maxTokens, - @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, - @JsonKey(name: 'stop_sequences', includeIfNull: false) - List? stopSequences, - @JsonKey(includeIfNull: false) String? system, - @JsonKey(includeIfNull: false) double? temperature, - @JsonKey(name: 'tool_choice', includeIfNull: false) - ToolChoice? toolChoice, - @JsonKey(includeIfNull: false) List? tools, - @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - bool stream}); - - $ModelCopyWith<$Res> get model; - $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; - $ToolChoiceCopyWith<$Res>? get toolChoice; -} - -/// @nodoc -class _$CreateMessageRequestCopyWithImpl<$Res, - $Val extends CreateMessageRequest> - implements $CreateMessageRequestCopyWith<$Res> { - _$CreateMessageRequestCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = null, - Object? messages = null, - Object? maxTokens = null, - Object? metadata = freezed, - Object? stopSequences = freezed, - Object? system = freezed, - Object? temperature = freezed, - Object? toolChoice = freezed, - Object? tools = freezed, - Object? topK = freezed, - Object? topP = freezed, - Object? stream = null, - }) { - return _then(_value.copyWith( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as Model, - messages: null == messages - ? _value.messages - : messages // ignore: cast_nullable_to_non_nullable - as List, - maxTokens: null == maxTokens - ? _value.maxTokens - : maxTokens // ignore: cast_nullable_to_non_nullable - as int, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as CreateMessageRequestMetadata?, - stopSequences: freezed == stopSequences - ? _value.stopSequences - : stopSequences // ignore: cast_nullable_to_non_nullable - as List?, - system: freezed == system - ? _value.system - : system // ignore: cast_nullable_to_non_nullable - as String?, - temperature: freezed == temperature - ? _value.temperature - : temperature // ignore: cast_nullable_to_non_nullable - as double?, - toolChoice: freezed == toolChoice - ? _value.toolChoice - : toolChoice // ignore: cast_nullable_to_non_nullable - as ToolChoice?, - tools: freezed == tools - ? _value.tools - : tools // ignore: cast_nullable_to_non_nullable - as List?, - topK: freezed == topK - ? _value.topK - : topK // ignore: cast_nullable_to_non_nullable - as int?, - topP: freezed == topP - ? _value.topP - : topP // ignore: cast_nullable_to_non_nullable - as double?, - stream: null == stream - ? _value.stream - : stream // ignore: cast_nullable_to_non_nullable - as bool, - ) as $Val); - } - - @override - @pragma('vm:prefer-inline') - $ModelCopyWith<$Res> get model { - return $ModelCopyWith<$Res>(_value.model, (value) { - return _then(_value.copyWith(model: value) as $Val); - }); - } - - @override - @pragma('vm:prefer-inline') - $CreateMessageRequestMetadataCopyWith<$Res>? get metadata { - if (_value.metadata == null) { - return null; - } - - return $CreateMessageRequestMetadataCopyWith<$Res>(_value.metadata!, - (value) { - return _then(_value.copyWith(metadata: value) as $Val); - }); - } - - @override - @pragma('vm:prefer-inline') - $ToolChoiceCopyWith<$Res>? get toolChoice { - if (_value.toolChoice == null) { - return null; - } - - return $ToolChoiceCopyWith<$Res>(_value.toolChoice!, (value) { - return _then(_value.copyWith(toolChoice: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$CreateMessageRequestImplCopyWith<$Res> - implements $CreateMessageRequestCopyWith<$Res> { - factory _$$CreateMessageRequestImplCopyWith(_$CreateMessageRequestImpl value, - $Res Function(_$CreateMessageRequestImpl) then) = - __$$CreateMessageRequestImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@_ModelConverter() Model model, - List messages, - @JsonKey(name: 'max_tokens') int maxTokens, - @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, - @JsonKey(name: 'stop_sequences', includeIfNull: false) - List? stopSequences, - @JsonKey(includeIfNull: false) String? system, - @JsonKey(includeIfNull: false) double? temperature, - @JsonKey(name: 'tool_choice', includeIfNull: false) - ToolChoice? toolChoice, - @JsonKey(includeIfNull: false) List? tools, - @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - bool stream}); - - @override - $ModelCopyWith<$Res> get model; - @override - $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; - @override - $ToolChoiceCopyWith<$Res>? get toolChoice; -} - -/// @nodoc -class __$$CreateMessageRequestImplCopyWithImpl<$Res> - extends _$CreateMessageRequestCopyWithImpl<$Res, _$CreateMessageRequestImpl> - implements _$$CreateMessageRequestImplCopyWith<$Res> { - __$$CreateMessageRequestImplCopyWithImpl(_$CreateMessageRequestImpl _value, - $Res Function(_$CreateMessageRequestImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = null, - Object? messages = null, - Object? maxTokens = null, - Object? metadata = freezed, - Object? stopSequences = freezed, - Object? system = freezed, - Object? temperature = freezed, - Object? toolChoice = freezed, - Object? tools = freezed, - Object? topK = freezed, - Object? topP = freezed, - Object? stream = null, - }) { - return _then(_$CreateMessageRequestImpl( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as Model, - messages: null == messages - ? _value._messages - : messages // ignore: cast_nullable_to_non_nullable - as List, - maxTokens: null == maxTokens - ? _value.maxTokens - : maxTokens // ignore: cast_nullable_to_non_nullable - as int, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as CreateMessageRequestMetadata?, - stopSequences: freezed == stopSequences - ? _value._stopSequences - : stopSequences // ignore: cast_nullable_to_non_nullable - as List?, - system: freezed == system - ? _value.system - : system // ignore: cast_nullable_to_non_nullable - as String?, - temperature: freezed == temperature - ? _value.temperature - : temperature // ignore: cast_nullable_to_non_nullable - as double?, - toolChoice: freezed == toolChoice - ? _value.toolChoice - : toolChoice // ignore: cast_nullable_to_non_nullable - as ToolChoice?, - tools: freezed == tools - ? _value._tools - : tools // ignore: cast_nullable_to_non_nullable - as List?, - topK: freezed == topK - ? _value.topK - : topK // ignore: cast_nullable_to_non_nullable - as int?, - topP: freezed == topP - ? _value.topP - : topP // ignore: cast_nullable_to_non_nullable - as double?, - stream: null == stream - ? _value.stream - : stream // ignore: cast_nullable_to_non_nullable - as bool, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CreateMessageRequestImpl extends _CreateMessageRequest { - const _$CreateMessageRequestImpl( - {@_ModelConverter() required this.model, - required final List messages, - @JsonKey(name: 'max_tokens') required this.maxTokens, - @JsonKey(includeIfNull: false) this.metadata, - @JsonKey(name: 'stop_sequences', includeIfNull: false) - final List? stopSequences, - @JsonKey(includeIfNull: false) this.system, - @JsonKey(includeIfNull: false) this.temperature, - @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, - @JsonKey(includeIfNull: false) final List? tools, - @JsonKey(name: 'top_k', includeIfNull: false) this.topK, - @JsonKey(name: 'top_p', includeIfNull: false) this.topP, - this.stream = false}) - : _messages = messages, - _stopSequences = stopSequences, - _tools = tools, - super._(); - - factory _$CreateMessageRequestImpl.fromJson(Map json) => - _$$CreateMessageRequestImplFromJson(json); - - /// The model that will complete your prompt. - /// - /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional - /// details and options. - @override - @_ModelConverter() - final Model model; - - /// Input messages. - /// - /// Our models are trained to operate on alternating `user` and `assistant` - /// conversational turns. When creating a new `Message`, you specify the prior - /// conversational turns with the `messages` parameter, and the model then generates - /// the next `Message` in the conversation. - /// - /// Each input message must be an object with a `role` and `content`. You can - /// specify a single `user`-role message, or you can include multiple `user` and - /// `assistant` messages. The first message must always use the `user` role. - /// - /// If the final message uses the `assistant` role, the response content will - /// continue immediately from the content in that message. This can be used to - /// constrain part of the model's response. - /// - /// See [message content](https://docs.anthropic.com/en/api/messages-content) for - /// details on how to construct valid message objects. - /// - /// Example with a single `user` message: - /// - /// ```json - /// [{ "role": "user", "content": "Hello, Claude" }] - /// ``` - /// - /// Example with multiple conversational turns: - /// - /// ```json - /// [ - /// { "role": "user", "content": "Hello there." }, - /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, - /// { "role": "user", "content": "Can you explain LLMs in plain English?" } - /// ] - /// ``` - /// - /// Example with a partially-filled response from Claude: - /// - /// ```json - /// [ - /// { - /// "role": "user", - /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" - /// }, - /// { "role": "assistant", "content": "The best answer is (" } - /// ] - /// ``` - /// - /// Each input message `content` may be either a single `string` or an array of - /// content blocks, where each block has a specific `type`. Using a `string` for - /// `content` is shorthand for an array of one content block of type `"text"`. The - /// following input messages are equivalent: - /// - /// ```json - /// { "role": "user", "content": "Hello, Claude" } - /// ``` - /// - /// ```json - /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } - /// ``` - /// - /// Starting with Claude 3 models, you can also send image content blocks: - /// - /// ```json - /// { - /// "role": "user", - /// "content": [ - /// { - /// "type": "image", - /// "source": { - /// "type": "base64", - /// "media_type": "image/jpeg", - /// "data": "/9j/4AAQSkZJRg..." - /// } - /// }, - /// { "type": "text", "text": "What is in this image?" } - /// ] - /// } - /// ``` - /// - /// We currently support the `base64` source type for images, and the `image/jpeg`, - /// `image/png`, `image/gif`, and `image/webp` media types. - /// - /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more - /// input examples. - /// - /// Note that if you want to include a - /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use - /// the top-level `system` parameter — there is no `"system"` role for input - /// messages in the Messages API. - final List _messages; - - /// Input messages. - /// - /// Our models are trained to operate on alternating `user` and `assistant` - /// conversational turns. When creating a new `Message`, you specify the prior - /// conversational turns with the `messages` parameter, and the model then generates - /// the next `Message` in the conversation. - /// - /// Each input message must be an object with a `role` and `content`. You can - /// specify a single `user`-role message, or you can include multiple `user` and - /// `assistant` messages. The first message must always use the `user` role. - /// - /// If the final message uses the `assistant` role, the response content will - /// continue immediately from the content in that message. This can be used to - /// constrain part of the model's response. - /// - /// See [message content](https://docs.anthropic.com/en/api/messages-content) for - /// details on how to construct valid message objects. - /// - /// Example with a single `user` message: - /// - /// ```json - /// [{ "role": "user", "content": "Hello, Claude" }] - /// ``` - /// - /// Example with multiple conversational turns: - /// - /// ```json - /// [ - /// { "role": "user", "content": "Hello there." }, - /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, - /// { "role": "user", "content": "Can you explain LLMs in plain English?" } - /// ] - /// ``` - /// - /// Example with a partially-filled response from Claude: - /// - /// ```json - /// [ - /// { - /// "role": "user", - /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" - /// }, - /// { "role": "assistant", "content": "The best answer is (" } - /// ] - /// ``` - /// - /// Each input message `content` may be either a single `string` or an array of - /// content blocks, where each block has a specific `type`. Using a `string` for - /// `content` is shorthand for an array of one content block of type `"text"`. The - /// following input messages are equivalent: - /// - /// ```json - /// { "role": "user", "content": "Hello, Claude" } - /// ``` - /// - /// ```json - /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } - /// ``` - /// - /// Starting with Claude 3 models, you can also send image content blocks: - /// - /// ```json - /// { - /// "role": "user", - /// "content": [ - /// { - /// "type": "image", - /// "source": { - /// "type": "base64", - /// "media_type": "image/jpeg", - /// "data": "/9j/4AAQSkZJRg..." - /// } - /// }, - /// { "type": "text", "text": "What is in this image?" } - /// ] - /// } - /// ``` - /// - /// We currently support the `base64` source type for images, and the `image/jpeg`, - /// `image/png`, `image/gif`, and `image/webp` media types. - /// - /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more - /// input examples. - /// - /// Note that if you want to include a - /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use - /// the top-level `system` parameter — there is no `"system"` role for input - /// messages in the Messages API. - @override - List get messages { - if (_messages is EqualUnmodifiableListView) return _messages; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_messages); - } - - /// The maximum number of tokens to generate before stopping. - /// - /// Note that our models may stop _before_ reaching this maximum. This parameter - /// only specifies the absolute maximum number of tokens to generate. - /// - /// Different models have different maximum values for this parameter. See - /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. - @override - @JsonKey(name: 'max_tokens') - final int maxTokens; - - /// An object describing metadata about the request. - @override - @JsonKey(includeIfNull: false) - final CreateMessageRequestMetadata? metadata; - - /// Custom text sequences that will cause the model to stop generating. - /// - /// Our models will normally stop when they have naturally completed their turn, - /// which will result in a response `stop_reason` of `"end_turn"`. - /// - /// If you want the model to stop generating when it encounters custom strings of - /// text, you can use the `stop_sequences` parameter. If the model encounters one of - /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` - /// and the response `stop_sequence` value will contain the matched stop sequence. - final List? _stopSequences; - - /// Custom text sequences that will cause the model to stop generating. - /// - /// Our models will normally stop when they have naturally completed their turn, - /// which will result in a response `stop_reason` of `"end_turn"`. - /// - /// If you want the model to stop generating when it encounters custom strings of - /// text, you can use the `stop_sequences` parameter. If the model encounters one of - /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` - /// and the response `stop_sequence` value will contain the matched stop sequence. - @override - @JsonKey(name: 'stop_sequences', includeIfNull: false) - List? get stopSequences { - final value = _stopSequences; - if (value == null) return null; - if (_stopSequences is EqualUnmodifiableListView) return _stopSequences; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// System prompt. - /// - /// A system prompt is a way of providing context and instructions to Claude, such - /// as specifying a particular goal or role. See our - /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). - @override - @JsonKey(includeIfNull: false) - final String? system; - - /// Amount of randomness injected into the response. - /// - /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` - /// for analytical / multiple choice, and closer to `1.0` for creative and - /// generative tasks. - /// - /// Note that even with `temperature` of `0.0`, the results will not be fully - /// deterministic. - @override - @JsonKey(includeIfNull: false) - final double? temperature; - - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - @override - @JsonKey(name: 'tool_choice', includeIfNull: false) - final ToolChoice? toolChoice; - - /// Definitions of tools that the model may use. - /// - /// If you include `tools` in your API request, the model may return `tool_use` - /// content blocks that represent the model's use of those tools. You can then run - /// those tools using the tool input generated by the model and then optionally - /// return results back to the model using `tool_result` content blocks. - /// - /// Each tool definition includes: - /// - /// - `name`: Name of the tool. - /// - `description`: Optional, but strongly-recommended description of the tool. - /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` - /// shape that the model will produce in `tool_use` output content blocks. - /// - /// For example, if you defined `tools` as: - /// - /// ```json - /// [ - /// { - /// "name": "get_stock_price", - /// "description": "Get the current stock price for a given ticker symbol.", - /// "input_schema": { - /// "type": "object", - /// "properties": { - /// "ticker": { - /// "type": "string", - /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." - /// } - /// }, - /// "required": ["ticker"] - /// } - /// } - /// ] - /// ``` - /// - /// And then asked the model "What's the S&P 500 at today?", the model might produce - /// `tool_use` content blocks in the response like this: - /// - /// ```json - /// [ - /// { - /// "type": "tool_use", - /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "name": "get_stock_price", - /// "input": { "ticker": "^GSPC" } - /// } - /// ] - /// ``` - /// - /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an - /// input, and return the following back to the model in a subsequent `user` - /// message: - /// - /// ```json - /// [ - /// { - /// "type": "tool_result", - /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "content": "259.75 USD" - /// } - /// ] - /// ``` - /// - /// Tools can be used for workflows that include running client-side tools and - /// functions, or more generally whenever you want the model to produce a particular - /// JSON structure of output. - /// - /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. - final List? _tools; - - /// Definitions of tools that the model may use. - /// - /// If you include `tools` in your API request, the model may return `tool_use` - /// content blocks that represent the model's use of those tools. You can then run - /// those tools using the tool input generated by the model and then optionally - /// return results back to the model using `tool_result` content blocks. - /// - /// Each tool definition includes: - /// - /// - `name`: Name of the tool. - /// - `description`: Optional, but strongly-recommended description of the tool. - /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` - /// shape that the model will produce in `tool_use` output content blocks. - /// - /// For example, if you defined `tools` as: - /// - /// ```json - /// [ - /// { - /// "name": "get_stock_price", - /// "description": "Get the current stock price for a given ticker symbol.", - /// "input_schema": { - /// "type": "object", - /// "properties": { - /// "ticker": { - /// "type": "string", - /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." - /// } - /// }, - /// "required": ["ticker"] - /// } - /// } - /// ] - /// ``` - /// - /// And then asked the model "What's the S&P 500 at today?", the model might produce - /// `tool_use` content blocks in the response like this: - /// - /// ```json - /// [ - /// { - /// "type": "tool_use", - /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "name": "get_stock_price", - /// "input": { "ticker": "^GSPC" } - /// } - /// ] - /// ``` - /// - /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an - /// input, and return the following back to the model in a subsequent `user` - /// message: - /// - /// ```json - /// [ - /// { - /// "type": "tool_result", - /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "content": "259.75 USD" - /// } - /// ] - /// ``` - /// - /// Tools can be used for workflows that include running client-side tools and - /// functions, or more generally whenever you want the model to produce a particular - /// JSON structure of output. - /// - /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. - @override - @JsonKey(includeIfNull: false) - List? get tools { - final value = _tools; - if (value == null) return null; - if (_tools is EqualUnmodifiableListView) return _tools; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// Only sample from the top K options for each subsequent token. - /// - /// Used to remove "long tail" low probability responses. - /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @override - @JsonKey(name: 'top_k', includeIfNull: false) - final int? topK; - - /// Use nucleus sampling. - /// - /// In nucleus sampling, we compute the cumulative distribution over all the options - /// for each subsequent token in decreasing probability order and cut it off once it - /// reaches a particular probability specified by `top_p`. You should either alter - /// `temperature` or `top_p`, but not both. - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @override - @JsonKey(name: 'top_p', includeIfNull: false) - final double? topP; - - /// Whether to incrementally stream the response using server-sent events. - /// - /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for - /// details. - @override - @JsonKey() - final bool stream; - - @override - String toString() { - return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, toolChoice: $toolChoice, tools: $tools, topK: $topK, topP: $topP, stream: $stream)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CreateMessageRequestImpl && - (identical(other.model, model) || other.model == model) && - const DeepCollectionEquality().equals(other._messages, _messages) && - (identical(other.maxTokens, maxTokens) || - other.maxTokens == maxTokens) && - (identical(other.metadata, metadata) || - other.metadata == metadata) && - const DeepCollectionEquality() - .equals(other._stopSequences, _stopSequences) && - (identical(other.system, system) || other.system == system) && - (identical(other.temperature, temperature) || - other.temperature == temperature) && - (identical(other.toolChoice, toolChoice) || - other.toolChoice == toolChoice) && - const DeepCollectionEquality().equals(other._tools, _tools) && - (identical(other.topK, topK) || other.topK == topK) && - (identical(other.topP, topP) || other.topP == topP) && - (identical(other.stream, stream) || other.stream == stream)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash( - runtimeType, - model, - const DeepCollectionEquality().hash(_messages), - maxTokens, - metadata, - const DeepCollectionEquality().hash(_stopSequences), - system, - temperature, - toolChoice, - const DeepCollectionEquality().hash(_tools), - topK, - topP, - stream); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> - get copyWith => - __$$CreateMessageRequestImplCopyWithImpl<_$CreateMessageRequestImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$CreateMessageRequestImplToJson( - this, - ); - } -} - -abstract class _CreateMessageRequest extends CreateMessageRequest { - const factory _CreateMessageRequest( - {@_ModelConverter() required final Model model, - required final List messages, - @JsonKey(name: 'max_tokens') required final int maxTokens, - @JsonKey(includeIfNull: false) - final CreateMessageRequestMetadata? metadata, - @JsonKey(name: 'stop_sequences', includeIfNull: false) - final List? stopSequences, - @JsonKey(includeIfNull: false) final String? system, - @JsonKey(includeIfNull: false) final double? temperature, - @JsonKey(name: 'tool_choice', includeIfNull: false) - final ToolChoice? toolChoice, - @JsonKey(includeIfNull: false) final List? tools, - @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, - @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, - final bool stream}) = _$CreateMessageRequestImpl; - const _CreateMessageRequest._() : super._(); - - factory _CreateMessageRequest.fromJson(Map json) = - _$CreateMessageRequestImpl.fromJson; - - @override - - /// The model that will complete your prompt. - /// - /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional - /// details and options. - @_ModelConverter() - Model get model; - @override - - /// Input messages. - /// - /// Our models are trained to operate on alternating `user` and `assistant` - /// conversational turns. When creating a new `Message`, you specify the prior - /// conversational turns with the `messages` parameter, and the model then generates - /// the next `Message` in the conversation. - /// - /// Each input message must be an object with a `role` and `content`. You can - /// specify a single `user`-role message, or you can include multiple `user` and - /// `assistant` messages. The first message must always use the `user` role. - /// - /// If the final message uses the `assistant` role, the response content will - /// continue immediately from the content in that message. This can be used to - /// constrain part of the model's response. - /// - /// See [message content](https://docs.anthropic.com/en/api/messages-content) for - /// details on how to construct valid message objects. - /// - /// Example with a single `user` message: - /// - /// ```json - /// [{ "role": "user", "content": "Hello, Claude" }] - /// ``` - /// - /// Example with multiple conversational turns: - /// - /// ```json - /// [ - /// { "role": "user", "content": "Hello there." }, - /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, - /// { "role": "user", "content": "Can you explain LLMs in plain English?" } - /// ] - /// ``` - /// - /// Example with a partially-filled response from Claude: - /// - /// ```json - /// [ - /// { - /// "role": "user", - /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" - /// }, - /// { "role": "assistant", "content": "The best answer is (" } - /// ] - /// ``` - /// - /// Each input message `content` may be either a single `string` or an array of - /// content blocks, where each block has a specific `type`. Using a `string` for - /// `content` is shorthand for an array of one content block of type `"text"`. The - /// following input messages are equivalent: - /// - /// ```json - /// { "role": "user", "content": "Hello, Claude" } - /// ``` - /// - /// ```json - /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } - /// ``` - /// - /// Starting with Claude 3 models, you can also send image content blocks: - /// - /// ```json - /// { - /// "role": "user", - /// "content": [ - /// { - /// "type": "image", - /// "source": { - /// "type": "base64", - /// "media_type": "image/jpeg", - /// "data": "/9j/4AAQSkZJRg..." - /// } - /// }, - /// { "type": "text", "text": "What is in this image?" } - /// ] - /// } - /// ``` - /// - /// We currently support the `base64` source type for images, and the `image/jpeg`, - /// `image/png`, `image/gif`, and `image/webp` media types. - /// - /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more - /// input examples. - /// - /// Note that if you want to include a - /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use - /// the top-level `system` parameter — there is no `"system"` role for input - /// messages in the Messages API. - List get messages; - @override - - /// The maximum number of tokens to generate before stopping. - /// - /// Note that our models may stop _before_ reaching this maximum. This parameter - /// only specifies the absolute maximum number of tokens to generate. - /// - /// Different models have different maximum values for this parameter. See - /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. - @JsonKey(name: 'max_tokens') - int get maxTokens; - @override - - /// An object describing metadata about the request. - @JsonKey(includeIfNull: false) - CreateMessageRequestMetadata? get metadata; - @override - - /// Custom text sequences that will cause the model to stop generating. - /// - /// Our models will normally stop when they have naturally completed their turn, - /// which will result in a response `stop_reason` of `"end_turn"`. - /// - /// If you want the model to stop generating when it encounters custom strings of - /// text, you can use the `stop_sequences` parameter. If the model encounters one of - /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` - /// and the response `stop_sequence` value will contain the matched stop sequence. - @JsonKey(name: 'stop_sequences', includeIfNull: false) - List? get stopSequences; - @override - - /// System prompt. - /// - /// A system prompt is a way of providing context and instructions to Claude, such - /// as specifying a particular goal or role. See our - /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). - @JsonKey(includeIfNull: false) - String? get system; - @override - - /// Amount of randomness injected into the response. - /// - /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` - /// for analytical / multiple choice, and closer to `1.0` for creative and - /// generative tasks. - /// - /// Note that even with `temperature` of `0.0`, the results will not be fully - /// deterministic. - @JsonKey(includeIfNull: false) - double? get temperature; - @override - - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - @JsonKey(name: 'tool_choice', includeIfNull: false) - ToolChoice? get toolChoice; - @override - - /// Definitions of tools that the model may use. - /// - /// If you include `tools` in your API request, the model may return `tool_use` - /// content blocks that represent the model's use of those tools. You can then run - /// those tools using the tool input generated by the model and then optionally - /// return results back to the model using `tool_result` content blocks. - /// - /// Each tool definition includes: - /// - /// - `name`: Name of the tool. - /// - `description`: Optional, but strongly-recommended description of the tool. - /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` - /// shape that the model will produce in `tool_use` output content blocks. - /// - /// For example, if you defined `tools` as: - /// - /// ```json - /// [ - /// { - /// "name": "get_stock_price", - /// "description": "Get the current stock price for a given ticker symbol.", - /// "input_schema": { - /// "type": "object", - /// "properties": { - /// "ticker": { - /// "type": "string", - /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." - /// } - /// }, - /// "required": ["ticker"] - /// } - /// } - /// ] - /// ``` - /// - /// And then asked the model "What's the S&P 500 at today?", the model might produce - /// `tool_use` content blocks in the response like this: - /// - /// ```json - /// [ - /// { - /// "type": "tool_use", - /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "name": "get_stock_price", - /// "input": { "ticker": "^GSPC" } - /// } - /// ] - /// ``` - /// - /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an - /// input, and return the following back to the model in a subsequent `user` - /// message: - /// - /// ```json - /// [ - /// { - /// "type": "tool_result", - /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - /// "content": "259.75 USD" - /// } - /// ] - /// ``` - /// - /// Tools can be used for workflows that include running client-side tools and - /// functions, or more generally whenever you want the model to produce a particular - /// JSON structure of output. - /// - /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. - @JsonKey(includeIfNull: false) - List? get tools; - @override - - /// Only sample from the top K options for each subsequent token. - /// - /// Used to remove "long tail" low probability responses. - /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @JsonKey(name: 'top_k', includeIfNull: false) - int? get topK; - @override - - /// Use nucleus sampling. - /// - /// In nucleus sampling, we compute the cumulative distribution over all the options - /// for each subsequent token in decreasing probability order and cut it off once it - /// reaches a particular probability specified by `top_p`. You should either alter - /// `temperature` or `top_p`, but not both. - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - @JsonKey(name: 'top_p', includeIfNull: false) - double? get topP; - @override - - /// Whether to incrementally stream the response using server-sent events. - /// - /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for - /// details. - bool get stream; - @override - @JsonKey(ignore: true) - _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> - get copyWith => throw _privateConstructorUsedError; -} - -Model _$ModelFromJson(Map json) { - switch (json['runtimeType']) { - case 'model': - return ModelCatalog.fromJson(json); - case 'modelId': - return ModelId.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'runtimeType', 'Model', - 'Invalid union type "${json['runtimeType']}"!'); - } -} - -/// @nodoc -mixin _$Model { - Object get value => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(Models value) model, - required TResult Function(String value) modelId, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Models value)? model, - TResult? Function(String value)? modelId, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Models value)? model, - TResult Function(String value)? modelId, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(ModelCatalog value) model, - required TResult Function(ModelId value) modelId, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ModelCatalog value)? model, - TResult? Function(ModelId value)? modelId, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ModelCatalog value)? model, - TResult Function(ModelId value)? modelId, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ModelCopyWith<$Res> { - factory $ModelCopyWith(Model value, $Res Function(Model) then) = - _$ModelCopyWithImpl<$Res, Model>; -} - -/// @nodoc -class _$ModelCopyWithImpl<$Res, $Val extends Model> - implements $ModelCopyWith<$Res> { - _$ModelCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; -} - -/// @nodoc -abstract class _$$ModelCatalogImplCopyWith<$Res> { - factory _$$ModelCatalogImplCopyWith( - _$ModelCatalogImpl value, $Res Function(_$ModelCatalogImpl) then) = - __$$ModelCatalogImplCopyWithImpl<$Res>; - @useResult - $Res call({Models value}); -} - -/// @nodoc -class __$$ModelCatalogImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelCatalogImpl> - implements _$$ModelCatalogImplCopyWith<$Res> { - __$$ModelCatalogImplCopyWithImpl( - _$ModelCatalogImpl _value, $Res Function(_$ModelCatalogImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$ModelCatalogImpl( - null == value - ? _value.value - : value // ignore: cast_nullable_to_non_nullable - as Models, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ModelCatalogImpl extends ModelCatalog { - const _$ModelCatalogImpl(this.value, {final String? $type}) - : $type = $type ?? 'model', - super._(); - - factory _$ModelCatalogImpl.fromJson(Map json) => - _$$ModelCatalogImplFromJson(json); - - @override - final Models value; - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'Model.model(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ModelCatalogImpl && - (identical(other.value, value) || other.value == value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, value); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => - __$$ModelCatalogImplCopyWithImpl<_$ModelCatalogImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Models value) model, - required TResult Function(String value) modelId, - }) { - return model(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Models value)? model, - TResult? Function(String value)? modelId, - }) { - return model?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Models value)? model, - TResult Function(String value)? modelId, - required TResult orElse(), - }) { - if (model != null) { - return model(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ModelCatalog value) model, - required TResult Function(ModelId value) modelId, - }) { - return model(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ModelCatalog value)? model, - TResult? Function(ModelId value)? modelId, - }) { - return model?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ModelCatalog value)? model, - TResult Function(ModelId value)? modelId, - required TResult orElse(), - }) { - if (model != null) { - return model(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ModelCatalogImplToJson( - this, - ); - } -} - -abstract class ModelCatalog extends Model { - const factory ModelCatalog(final Models value) = _$ModelCatalogImpl; - const ModelCatalog._() : super._(); - - factory ModelCatalog.fromJson(Map json) = - _$ModelCatalogImpl.fromJson; - - @override - Models get value; - @JsonKey(ignore: true) - _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ModelIdImplCopyWith<$Res> { - factory _$$ModelIdImplCopyWith( - _$ModelIdImpl value, $Res Function(_$ModelIdImpl) then) = - __$$ModelIdImplCopyWithImpl<$Res>; - @useResult - $Res call({String value}); -} - -/// @nodoc -class __$$ModelIdImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelIdImpl> - implements _$$ModelIdImplCopyWith<$Res> { - __$$ModelIdImplCopyWithImpl( - _$ModelIdImpl _value, $Res Function(_$ModelIdImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$ModelIdImpl( - null == value - ? _value.value - : value // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ModelIdImpl extends ModelId { - const _$ModelIdImpl(this.value, {final String? $type}) - : $type = $type ?? 'modelId', - super._(); - - factory _$ModelIdImpl.fromJson(Map json) => - _$$ModelIdImplFromJson(json); - - @override - final String value; - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'Model.modelId(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ModelIdImpl && - (identical(other.value, value) || other.value == value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, value); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => - __$$ModelIdImplCopyWithImpl<_$ModelIdImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Models value) model, - required TResult Function(String value) modelId, - }) { - return modelId(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Models value)? model, - TResult? Function(String value)? modelId, - }) { - return modelId?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Models value)? model, - TResult Function(String value)? modelId, - required TResult orElse(), - }) { - if (modelId != null) { - return modelId(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ModelCatalog value) model, - required TResult Function(ModelId value) modelId, - }) { - return modelId(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ModelCatalog value)? model, - TResult? Function(ModelId value)? modelId, - }) { - return modelId?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ModelCatalog value)? model, - TResult Function(ModelId value)? modelId, - required TResult orElse(), - }) { - if (modelId != null) { - return modelId(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ModelIdImplToJson( - this, - ); - } -} - -abstract class ModelId extends Model { - const factory ModelId(final String value) = _$ModelIdImpl; - const ModelId._() : super._(); - - factory ModelId.fromJson(Map json) = _$ModelIdImpl.fromJson; - - @override - String get value; - @JsonKey(ignore: true) - _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => - throw _privateConstructorUsedError; -} - -CreateMessageRequestMetadata _$CreateMessageRequestMetadataFromJson( - Map json) { - return _CreateMessageRequestMetadata.fromJson(json); -} - -/// @nodoc -mixin _$CreateMessageRequestMetadata { - /// An external identifier for the user who is associated with the request. - /// - /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use - /// this id to help detect abuse. Do not include any identifying information such as - /// name, email address, or phone number. - @JsonKey(name: 'user_id', includeIfNull: false) - String? get userId => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateMessageRequestMetadataCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $CreateMessageRequestMetadataCopyWith<$Res> { - factory $CreateMessageRequestMetadataCopyWith( - CreateMessageRequestMetadata value, - $Res Function(CreateMessageRequestMetadata) then) = - _$CreateMessageRequestMetadataCopyWithImpl<$Res, - CreateMessageRequestMetadata>; - @useResult - $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); -} - -/// @nodoc -class _$CreateMessageRequestMetadataCopyWithImpl<$Res, - $Val extends CreateMessageRequestMetadata> - implements $CreateMessageRequestMetadataCopyWith<$Res> { - _$CreateMessageRequestMetadataCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? userId = freezed, - }) { - return _then(_value.copyWith( - userId: freezed == userId - ? _value.userId - : userId // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$CreateMessageRequestMetadataImplCopyWith<$Res> - implements $CreateMessageRequestMetadataCopyWith<$Res> { - factory _$$CreateMessageRequestMetadataImplCopyWith( - _$CreateMessageRequestMetadataImpl value, - $Res Function(_$CreateMessageRequestMetadataImpl) then) = - __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); -} - -/// @nodoc -class __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res> - extends _$CreateMessageRequestMetadataCopyWithImpl<$Res, - _$CreateMessageRequestMetadataImpl> - implements _$$CreateMessageRequestMetadataImplCopyWith<$Res> { - __$$CreateMessageRequestMetadataImplCopyWithImpl( - _$CreateMessageRequestMetadataImpl _value, - $Res Function(_$CreateMessageRequestMetadataImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? userId = freezed, - }) { - return _then(_$CreateMessageRequestMetadataImpl( - userId: freezed == userId - ? _value.userId - : userId // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CreateMessageRequestMetadataImpl extends _CreateMessageRequestMetadata { - const _$CreateMessageRequestMetadataImpl( - {@JsonKey(name: 'user_id', includeIfNull: false) this.userId}) - : super._(); - - factory _$CreateMessageRequestMetadataImpl.fromJson( - Map json) => - _$$CreateMessageRequestMetadataImplFromJson(json); - - /// An external identifier for the user who is associated with the request. - /// - /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use - /// this id to help detect abuse. Do not include any identifying information such as - /// name, email address, or phone number. - @override - @JsonKey(name: 'user_id', includeIfNull: false) - final String? userId; - - @override - String toString() { - return 'CreateMessageRequestMetadata(userId: $userId)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CreateMessageRequestMetadataImpl && - (identical(other.userId, userId) || other.userId == userId)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, userId); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$CreateMessageRequestMetadataImplCopyWith< - _$CreateMessageRequestMetadataImpl> - get copyWith => __$$CreateMessageRequestMetadataImplCopyWithImpl< - _$CreateMessageRequestMetadataImpl>(this, _$identity); - - @override - Map toJson() { - return _$$CreateMessageRequestMetadataImplToJson( - this, - ); - } -} - -abstract class _CreateMessageRequestMetadata - extends CreateMessageRequestMetadata { - const factory _CreateMessageRequestMetadata( - {@JsonKey(name: 'user_id', includeIfNull: false) - final String? userId}) = _$CreateMessageRequestMetadataImpl; - const _CreateMessageRequestMetadata._() : super._(); - - factory _CreateMessageRequestMetadata.fromJson(Map json) = - _$CreateMessageRequestMetadataImpl.fromJson; - - @override - - /// An external identifier for the user who is associated with the request. - /// - /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use - /// this id to help detect abuse. Do not include any identifying information such as - /// name, email address, or phone number. - @JsonKey(name: 'user_id', includeIfNull: false) - String? get userId; - @override - @JsonKey(ignore: true) - _$$CreateMessageRequestMetadataImplCopyWith< - _$CreateMessageRequestMetadataImpl> - get copyWith => throw _privateConstructorUsedError; -} - -ToolChoice _$ToolChoiceFromJson(Map json) { - return _ToolChoice.fromJson(json); -} - -/// @nodoc -mixin _$ToolChoice { - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - ToolChoiceType get type => throw _privateConstructorUsedError; - - /// The name of the tool to use. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ToolChoiceCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ToolChoiceCopyWith<$Res> { - factory $ToolChoiceCopyWith( - ToolChoice value, $Res Function(ToolChoice) then) = - _$ToolChoiceCopyWithImpl<$Res, ToolChoice>; - @useResult - $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); -} - -/// @nodoc -class _$ToolChoiceCopyWithImpl<$Res, $Val extends ToolChoice> - implements $ToolChoiceCopyWith<$Res> { - _$ToolChoiceCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? name = freezed, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ToolChoiceType, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ToolChoiceImplCopyWith<$Res> - implements $ToolChoiceCopyWith<$Res> { - factory _$$ToolChoiceImplCopyWith( - _$ToolChoiceImpl value, $Res Function(_$ToolChoiceImpl) then) = - __$$ToolChoiceImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); -} - -/// @nodoc -class __$$ToolChoiceImplCopyWithImpl<$Res> - extends _$ToolChoiceCopyWithImpl<$Res, _$ToolChoiceImpl> - implements _$$ToolChoiceImplCopyWith<$Res> { - __$$ToolChoiceImplCopyWithImpl( - _$ToolChoiceImpl _value, $Res Function(_$ToolChoiceImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? name = freezed, - }) { - return _then(_$ToolChoiceImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ToolChoiceType, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ToolChoiceImpl extends _ToolChoice { - const _$ToolChoiceImpl( - {required this.type, @JsonKey(includeIfNull: false) this.name}) - : super._(); - - factory _$ToolChoiceImpl.fromJson(Map json) => - _$$ToolChoiceImplFromJson(json); - - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - @override - final ToolChoiceType type; - - /// The name of the tool to use. - @override - @JsonKey(includeIfNull: false) - final String? name; - - @override - String toString() { - return 'ToolChoice(type: $type, name: $name)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ToolChoiceImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.name, name) || other.name == name)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, type, name); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => - __$$ToolChoiceImplCopyWithImpl<_$ToolChoiceImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ToolChoiceImplToJson( - this, - ); - } -} - -abstract class _ToolChoice extends ToolChoice { - const factory _ToolChoice( - {required final ToolChoiceType type, - @JsonKey(includeIfNull: false) final String? name}) = _$ToolChoiceImpl; - const _ToolChoice._() : super._(); - - factory _ToolChoice.fromJson(Map json) = - _$ToolChoiceImpl.fromJson; - - @override - - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - ToolChoiceType get type; - @override - - /// The name of the tool to use. - @JsonKey(includeIfNull: false) - String? get name; - @override - @JsonKey(ignore: true) - _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Message _$MessageFromJson(Map json) { - return _Message.fromJson(json); -} - -/// @nodoc -mixin _$Message { - /// Unique object identifier. - /// - /// The format and length of IDs may change over time. - @JsonKey(includeIfNull: false) - String? get id => throw _privateConstructorUsedError; - - /// The content of the message. - @_MessageContentConverter() - MessageContent get content => throw _privateConstructorUsedError; - - /// The role of the messages author. - MessageRole get role => throw _privateConstructorUsedError; - - /// The model that handled the request. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; - - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? get stopReason => throw _privateConstructorUsedError; - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? get stopSequence => throw _privateConstructorUsedError; - - /// Object type. - /// - /// For Messages, this is always `"message"`. - @JsonKey(includeIfNull: false) - String? get type => throw _privateConstructorUsedError; - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - @JsonKey(includeIfNull: false) - Usage? get usage => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageCopyWith<$Res> { - factory $MessageCopyWith(Message value, $Res Function(Message) then) = - _$MessageCopyWithImpl<$Res, Message>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? id, - @_MessageContentConverter() MessageContent content, - MessageRole role, - @JsonKey(includeIfNull: false) String? model, - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? stopSequence, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(includeIfNull: false) Usage? usage}); - - $MessageContentCopyWith<$Res> get content; - $UsageCopyWith<$Res>? get usage; -} - -/// @nodoc -class _$MessageCopyWithImpl<$Res, $Val extends Message> - implements $MessageCopyWith<$Res> { - _$MessageCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = freezed, - Object? content = null, - Object? role = null, - Object? model = freezed, - Object? stopReason = freezed, - Object? stopSequence = freezed, - Object? type = freezed, - Object? usage = freezed, - }) { - return _then(_value.copyWith( - id: freezed == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String?, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as MessageContent, - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as MessageRole, - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - stopReason: freezed == stopReason - ? _value.stopReason - : stopReason // ignore: cast_nullable_to_non_nullable - as StopReason?, - stopSequence: freezed == stopSequence - ? _value.stopSequence - : stopSequence // ignore: cast_nullable_to_non_nullable - as String?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - usage: freezed == usage - ? _value.usage - : usage // ignore: cast_nullable_to_non_nullable - as Usage?, - ) as $Val); - } - - @override - @pragma('vm:prefer-inline') - $MessageContentCopyWith<$Res> get content { - return $MessageContentCopyWith<$Res>(_value.content, (value) { - return _then(_value.copyWith(content: value) as $Val); - }); - } - - @override - @pragma('vm:prefer-inline') - $UsageCopyWith<$Res>? get usage { - if (_value.usage == null) { - return null; - } - - return $UsageCopyWith<$Res>(_value.usage!, (value) { - return _then(_value.copyWith(usage: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { - factory _$$MessageImplCopyWith( - _$MessageImpl value, $Res Function(_$MessageImpl) then) = - __$$MessageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? id, - @_MessageContentConverter() MessageContent content, - MessageRole role, - @JsonKey(includeIfNull: false) String? model, - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? stopSequence, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(includeIfNull: false) Usage? usage}); - - @override - $MessageContentCopyWith<$Res> get content; - @override - $UsageCopyWith<$Res>? get usage; -} - -/// @nodoc -class __$$MessageImplCopyWithImpl<$Res> - extends _$MessageCopyWithImpl<$Res, _$MessageImpl> - implements _$$MessageImplCopyWith<$Res> { - __$$MessageImplCopyWithImpl( - _$MessageImpl _value, $Res Function(_$MessageImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = freezed, - Object? content = null, - Object? role = null, - Object? model = freezed, - Object? stopReason = freezed, - Object? stopSequence = freezed, - Object? type = freezed, - Object? usage = freezed, - }) { - return _then(_$MessageImpl( - id: freezed == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String?, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as MessageContent, - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as MessageRole, - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - stopReason: freezed == stopReason - ? _value.stopReason - : stopReason // ignore: cast_nullable_to_non_nullable - as StopReason?, - stopSequence: freezed == stopSequence - ? _value.stopSequence - : stopSequence // ignore: cast_nullable_to_non_nullable - as String?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - usage: freezed == usage - ? _value.usage - : usage // ignore: cast_nullable_to_non_nullable - as Usage?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageImpl extends _Message { - const _$MessageImpl( - {@JsonKey(includeIfNull: false) this.id, - @_MessageContentConverter() required this.content, - required this.role, - @JsonKey(includeIfNull: false) this.model, - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence, - @JsonKey(includeIfNull: false) this.type, - @JsonKey(includeIfNull: false) this.usage}) - : super._(); - - factory _$MessageImpl.fromJson(Map json) => - _$$MessageImplFromJson(json); - - /// Unique object identifier. - /// - /// The format and length of IDs may change over time. - @override - @JsonKey(includeIfNull: false) - final String? id; - - /// The content of the message. - @override - @_MessageContentConverter() - final MessageContent content; - - /// The role of the messages author. - @override - final MessageRole role; - - /// The model that handled the request. - @override - @JsonKey(includeIfNull: false) - final String? model; - - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @override - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final StopReason? stopReason; - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @override - @JsonKey(name: 'stop_sequence', includeIfNull: false) - final String? stopSequence; - - /// Object type. - /// - /// For Messages, this is always `"message"`. - @override - @JsonKey(includeIfNull: false) - final String? type; - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - @override - @JsonKey(includeIfNull: false) - final Usage? usage; - - @override - String toString() { - return 'Message(id: $id, content: $content, role: $role, model: $model, stopReason: $stopReason, stopSequence: $stopSequence, type: $type, usage: $usage)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.content, content) || other.content == content) && - (identical(other.role, role) || other.role == role) && - (identical(other.model, model) || other.model == model) && - (identical(other.stopReason, stopReason) || - other.stopReason == stopReason) && - (identical(other.stopSequence, stopSequence) || - other.stopSequence == stopSequence) && - (identical(other.type, type) || other.type == type) && - (identical(other.usage, usage) || other.usage == usage)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, id, content, role, model, - stopReason, stopSequence, type, usage); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageImplCopyWith<_$MessageImpl> get copyWith => - __$$MessageImplCopyWithImpl<_$MessageImpl>(this, _$identity); - - @override - Map toJson() { - return _$$MessageImplToJson( - this, - ); - } -} - -abstract class _Message extends Message { - const factory _Message( - {@JsonKey(includeIfNull: false) final String? id, - @_MessageContentConverter() required final MessageContent content, - required final MessageRole role, - @JsonKey(includeIfNull: false) final String? model, - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final StopReason? stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) - final String? stopSequence, - @JsonKey(includeIfNull: false) final String? type, - @JsonKey(includeIfNull: false) final Usage? usage}) = _$MessageImpl; - const _Message._() : super._(); - - factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; - - @override - - /// Unique object identifier. - /// - /// The format and length of IDs may change over time. - @JsonKey(includeIfNull: false) - String? get id; - @override - - /// The content of the message. - @_MessageContentConverter() - MessageContent get content; - @override - - /// The role of the messages author. - MessageRole get role; - @override - - /// The model that handled the request. - @JsonKey(includeIfNull: false) - String? get model; - @override - - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? get stopReason; - @override - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? get stopSequence; - @override - - /// Object type. - /// - /// For Messages, this is always `"message"`. - @JsonKey(includeIfNull: false) - String? get type; - @override - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - @JsonKey(includeIfNull: false) - Usage? get usage; - @override - @JsonKey(ignore: true) - _$$MessageImplCopyWith<_$MessageImpl> get copyWith => - throw _privateConstructorUsedError; -} - -MessageContent _$MessageContentFromJson(Map json) { - switch (json['runtimeType']) { - case 'blocks': - return MessageContentBlocks.fromJson(json); - case 'text': - return MessageContentText.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'runtimeType', 'MessageContent', - 'Invalid union type "${json['runtimeType']}"!'); - } -} - -/// @nodoc -mixin _$MessageContent { - Object get value => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(List value) blocks, - required TResult Function(String value) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? blocks, - TResult? Function(String value)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? blocks, - TResult Function(String value)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(MessageContentBlocks value) blocks, - required TResult Function(MessageContentText value) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageContentBlocks value)? blocks, - TResult? Function(MessageContentText value)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageContentBlocks value)? blocks, - TResult Function(MessageContentText value)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageContentCopyWith<$Res> { - factory $MessageContentCopyWith( - MessageContent value, $Res Function(MessageContent) then) = - _$MessageContentCopyWithImpl<$Res, MessageContent>; -} - -/// @nodoc -class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> - implements $MessageContentCopyWith<$Res> { - _$MessageContentCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; -} - -/// @nodoc -abstract class _$$MessageContentBlocksImplCopyWith<$Res> { - factory _$$MessageContentBlocksImplCopyWith(_$MessageContentBlocksImpl value, - $Res Function(_$MessageContentBlocksImpl) then) = - __$$MessageContentBlocksImplCopyWithImpl<$Res>; - @useResult - $Res call({List value}); -} - -/// @nodoc -class __$$MessageContentBlocksImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentBlocksImpl> - implements _$$MessageContentBlocksImplCopyWith<$Res> { - __$$MessageContentBlocksImplCopyWithImpl(_$MessageContentBlocksImpl _value, - $Res Function(_$MessageContentBlocksImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$MessageContentBlocksImpl( - null == value - ? _value._value - : value // ignore: cast_nullable_to_non_nullable - as List, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageContentBlocksImpl extends MessageContentBlocks { - const _$MessageContentBlocksImpl(final List value, - {final String? $type}) - : _value = value, - $type = $type ?? 'blocks', - super._(); - - factory _$MessageContentBlocksImpl.fromJson(Map json) => - _$$MessageContentBlocksImplFromJson(json); - - final List _value; - @override - List get value { - if (_value is EqualUnmodifiableListView) return _value; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_value); - } - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'MessageContent.blocks(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageContentBlocksImpl && - const DeepCollectionEquality().equals(other._value, _value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> - get copyWith => - __$$MessageContentBlocksImplCopyWithImpl<_$MessageContentBlocksImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) blocks, - required TResult Function(String value) text, - }) { - return blocks(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? blocks, - TResult? Function(String value)? text, - }) { - return blocks?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? blocks, - TResult Function(String value)? text, - required TResult orElse(), - }) { - if (blocks != null) { - return blocks(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageContentBlocks value) blocks, - required TResult Function(MessageContentText value) text, - }) { - return blocks(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageContentBlocks value)? blocks, - TResult? Function(MessageContentText value)? text, - }) { - return blocks?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageContentBlocks value)? blocks, - TResult Function(MessageContentText value)? text, - required TResult orElse(), - }) { - if (blocks != null) { - return blocks(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageContentBlocksImplToJson( - this, - ); - } -} - -abstract class MessageContentBlocks extends MessageContent { - const factory MessageContentBlocks(final List value) = - _$MessageContentBlocksImpl; - const MessageContentBlocks._() : super._(); - - factory MessageContentBlocks.fromJson(Map json) = - _$MessageContentBlocksImpl.fromJson; - - @override - List get value; - @JsonKey(ignore: true) - _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$MessageContentTextImplCopyWith<$Res> { - factory _$$MessageContentTextImplCopyWith(_$MessageContentTextImpl value, - $Res Function(_$MessageContentTextImpl) then) = - __$$MessageContentTextImplCopyWithImpl<$Res>; - @useResult - $Res call({String value}); -} - -/// @nodoc -class __$$MessageContentTextImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextImpl> - implements _$$MessageContentTextImplCopyWith<$Res> { - __$$MessageContentTextImplCopyWithImpl(_$MessageContentTextImpl _value, - $Res Function(_$MessageContentTextImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$MessageContentTextImpl( - null == value - ? _value.value - : value // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageContentTextImpl extends MessageContentText { - const _$MessageContentTextImpl(this.value, {final String? $type}) - : $type = $type ?? 'text', - super._(); - - factory _$MessageContentTextImpl.fromJson(Map json) => - _$$MessageContentTextImplFromJson(json); - - @override - final String value; - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'MessageContent.text(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageContentTextImpl && - (identical(other.value, value) || other.value == value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, value); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => - __$$MessageContentTextImplCopyWithImpl<_$MessageContentTextImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) blocks, - required TResult Function(String value) text, - }) { - return text(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? blocks, - TResult? Function(String value)? text, - }) { - return text?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? blocks, - TResult Function(String value)? text, - required TResult orElse(), - }) { - if (text != null) { - return text(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageContentBlocks value) blocks, - required TResult Function(MessageContentText value) text, - }) { - return text(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageContentBlocks value)? blocks, - TResult? Function(MessageContentText value)? text, - }) { - return text?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageContentBlocks value)? blocks, - TResult Function(MessageContentText value)? text, - required TResult orElse(), - }) { - if (text != null) { - return text(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageContentTextImplToJson( - this, - ); - } -} - -abstract class MessageContentText extends MessageContent { - const factory MessageContentText(final String value) = - _$MessageContentTextImpl; - const MessageContentText._() : super._(); - - factory MessageContentText.fromJson(Map json) = - _$MessageContentTextImpl.fromJson; - - @override - String get value; - @JsonKey(ignore: true) - _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Tool _$ToolFromJson(Map json) { - return _Tool.fromJson(json); -} - -/// @nodoc -mixin _$Tool { - /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. - String get name => throw _privateConstructorUsedError; - - /// Description of what this tool does. - /// - /// Tool descriptions should be as detailed as possible. The more information that - /// the model has about what the tool is and how to use it, the better it will - /// perform. You can use natural language descriptions to reinforce important - /// aspects of the tool input JSON schema. - @JsonKey(includeIfNull: false) - String? get description => throw _privateConstructorUsedError; - - /// [JSON schema](https://json-schema.org/) for this tool's input. - /// - /// This defines the shape of the `input` that your tool accepts and that the model - /// will produce. - @JsonKey(name: 'input_schema') - Map get inputSchema => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ToolCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ToolCopyWith<$Res> { - factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = - _$ToolCopyWithImpl<$Res, Tool>; - @useResult - $Res call( - {String name, - @JsonKey(includeIfNull: false) String? description, - @JsonKey(name: 'input_schema') Map inputSchema}); -} - -/// @nodoc -class _$ToolCopyWithImpl<$Res, $Val extends Tool> - implements $ToolCopyWith<$Res> { - _$ToolCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? name = null, - Object? description = freezed, - Object? inputSchema = null, - }) { - return _then(_value.copyWith( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - description: freezed == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String?, - inputSchema: null == inputSchema - ? _value.inputSchema - : inputSchema // ignore: cast_nullable_to_non_nullable - as Map, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { - factory _$$ToolImplCopyWith( - _$ToolImpl value, $Res Function(_$ToolImpl) then) = - __$$ToolImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String name, - @JsonKey(includeIfNull: false) String? description, - @JsonKey(name: 'input_schema') Map inputSchema}); -} - -/// @nodoc -class __$$ToolImplCopyWithImpl<$Res> - extends _$ToolCopyWithImpl<$Res, _$ToolImpl> - implements _$$ToolImplCopyWith<$Res> { - __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? name = null, - Object? description = freezed, - Object? inputSchema = null, - }) { - return _then(_$ToolImpl( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - description: freezed == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String?, - inputSchema: null == inputSchema - ? _value._inputSchema - : inputSchema // ignore: cast_nullable_to_non_nullable - as Map, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ToolImpl extends _Tool { - const _$ToolImpl( - {required this.name, - @JsonKey(includeIfNull: false) this.description, - @JsonKey(name: 'input_schema') - required final Map inputSchema}) - : _inputSchema = inputSchema, - super._(); - - factory _$ToolImpl.fromJson(Map json) => - _$$ToolImplFromJson(json); - - /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. - @override - final String name; - - /// Description of what this tool does. - /// - /// Tool descriptions should be as detailed as possible. The more information that - /// the model has about what the tool is and how to use it, the better it will - /// perform. You can use natural language descriptions to reinforce important - /// aspects of the tool input JSON schema. - @override - @JsonKey(includeIfNull: false) - final String? description; - - /// [JSON schema](https://json-schema.org/) for this tool's input. - /// - /// This defines the shape of the `input` that your tool accepts and that the model - /// will produce. - final Map _inputSchema; - - /// [JSON schema](https://json-schema.org/) for this tool's input. - /// - /// This defines the shape of the `input` that your tool accepts and that the model - /// will produce. - @override - @JsonKey(name: 'input_schema') - Map get inputSchema { - if (_inputSchema is EqualUnmodifiableMapView) return _inputSchema; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_inputSchema); - } - - @override - String toString() { - return 'Tool(name: $name, description: $description, inputSchema: $inputSchema)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ToolImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.description, description) || - other.description == description) && - const DeepCollectionEquality() - .equals(other._inputSchema, _inputSchema)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_inputSchema)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ToolImplCopyWith<_$ToolImpl> get copyWith => - __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ToolImplToJson( - this, - ); - } -} - -abstract class _Tool extends Tool { - const factory _Tool( - {required final String name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(name: 'input_schema') - required final Map inputSchema}) = _$ToolImpl; - const _Tool._() : super._(); - - factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; - - @override - - /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. - String get name; - @override - - /// Description of what this tool does. - /// - /// Tool descriptions should be as detailed as possible. The more information that - /// the model has about what the tool is and how to use it, the better it will - /// perform. You can use natural language descriptions to reinforce important - /// aspects of the tool input JSON schema. - @JsonKey(includeIfNull: false) - String? get description; - @override - - /// [JSON schema](https://json-schema.org/) for this tool's input. - /// - /// This defines the shape of the `input` that your tool accepts and that the model - /// will produce. - @JsonKey(name: 'input_schema') - Map get inputSchema; - @override - @JsonKey(ignore: true) - _$$ToolImplCopyWith<_$ToolImpl> get copyWith => - throw _privateConstructorUsedError; -} - -ImageBlockSource _$ImageBlockSourceFromJson(Map json) { - return _ImageBlockSource.fromJson(json); -} - -/// @nodoc -mixin _$ImageBlockSource { - /// The base64-encoded image data. - String get data => throw _privateConstructorUsedError; - - /// The media type of the image. - @JsonKey(name: 'media_type') - ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; - - /// The type of image source. - ImageBlockSourceType get type => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ImageBlockSourceCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ImageBlockSourceCopyWith<$Res> { - factory $ImageBlockSourceCopyWith( - ImageBlockSource value, $Res Function(ImageBlockSource) then) = - _$ImageBlockSourceCopyWithImpl<$Res, ImageBlockSource>; - @useResult - $Res call( - {String data, - @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, - ImageBlockSourceType type}); -} - -/// @nodoc -class _$ImageBlockSourceCopyWithImpl<$Res, $Val extends ImageBlockSource> - implements $ImageBlockSourceCopyWith<$Res> { - _$ImageBlockSourceCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? data = null, - Object? mediaType = null, - Object? type = null, - }) { - return _then(_value.copyWith( - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as String, - mediaType: null == mediaType - ? _value.mediaType - : mediaType // ignore: cast_nullable_to_non_nullable - as ImageBlockSourceMediaType, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ImageBlockSourceType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ImageBlockSourceImplCopyWith<$Res> - implements $ImageBlockSourceCopyWith<$Res> { - factory _$$ImageBlockSourceImplCopyWith(_$ImageBlockSourceImpl value, - $Res Function(_$ImageBlockSourceImpl) then) = - __$$ImageBlockSourceImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String data, - @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, - ImageBlockSourceType type}); -} - -/// @nodoc -class __$$ImageBlockSourceImplCopyWithImpl<$Res> - extends _$ImageBlockSourceCopyWithImpl<$Res, _$ImageBlockSourceImpl> - implements _$$ImageBlockSourceImplCopyWith<$Res> { - __$$ImageBlockSourceImplCopyWithImpl(_$ImageBlockSourceImpl _value, - $Res Function(_$ImageBlockSourceImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? data = null, - Object? mediaType = null, - Object? type = null, - }) { - return _then(_$ImageBlockSourceImpl( - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as String, - mediaType: null == mediaType - ? _value.mediaType - : mediaType // ignore: cast_nullable_to_non_nullable - as ImageBlockSourceMediaType, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ImageBlockSourceType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ImageBlockSourceImpl extends _ImageBlockSource { - const _$ImageBlockSourceImpl( - {required this.data, - @JsonKey(name: 'media_type') required this.mediaType, - required this.type}) - : super._(); - - factory _$ImageBlockSourceImpl.fromJson(Map json) => - _$$ImageBlockSourceImplFromJson(json); - - /// The base64-encoded image data. - @override - final String data; - - /// The media type of the image. - @override - @JsonKey(name: 'media_type') - final ImageBlockSourceMediaType mediaType; - - /// The type of image source. - @override - final ImageBlockSourceType type; - - @override - String toString() { - return 'ImageBlockSource(data: $data, mediaType: $mediaType, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ImageBlockSourceImpl && - (identical(other.data, data) || other.data == data) && - (identical(other.mediaType, mediaType) || - other.mediaType == mediaType) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, data, mediaType, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => - __$$ImageBlockSourceImplCopyWithImpl<_$ImageBlockSourceImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$ImageBlockSourceImplToJson( - this, - ); - } -} - -abstract class _ImageBlockSource extends ImageBlockSource { - const factory _ImageBlockSource( - {required final String data, - @JsonKey(name: 'media_type') - required final ImageBlockSourceMediaType mediaType, - required final ImageBlockSourceType type}) = _$ImageBlockSourceImpl; - const _ImageBlockSource._() : super._(); - - factory _ImageBlockSource.fromJson(Map json) = - _$ImageBlockSourceImpl.fromJson; - - @override - - /// The base64-encoded image data. - String get data; - @override - - /// The media type of the image. - @JsonKey(name: 'media_type') - ImageBlockSourceMediaType get mediaType; - @override - - /// The type of image source. - ImageBlockSourceType get type; - @override - @JsonKey(ignore: true) - _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Usage _$UsageFromJson(Map json) { - return _Usage.fromJson(json); -} - -/// @nodoc -mixin _$Usage { - /// The number of input tokens which were used. - @JsonKey(name: 'input_tokens') - int get inputTokens => throw _privateConstructorUsedError; - - /// The number of output tokens which were used. - @JsonKey(name: 'output_tokens') - int get outputTokens => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $UsageCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $UsageCopyWith<$Res> { - factory $UsageCopyWith(Usage value, $Res Function(Usage) then) = - _$UsageCopyWithImpl<$Res, Usage>; - @useResult - $Res call( - {@JsonKey(name: 'input_tokens') int inputTokens, - @JsonKey(name: 'output_tokens') int outputTokens}); -} - -/// @nodoc -class _$UsageCopyWithImpl<$Res, $Val extends Usage> - implements $UsageCopyWith<$Res> { - _$UsageCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? inputTokens = null, - Object? outputTokens = null, - }) { - return _then(_value.copyWith( - inputTokens: null == inputTokens - ? _value.inputTokens - : inputTokens // ignore: cast_nullable_to_non_nullable - as int, - outputTokens: null == outputTokens - ? _value.outputTokens - : outputTokens // ignore: cast_nullable_to_non_nullable - as int, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$UsageImplCopyWith<$Res> implements $UsageCopyWith<$Res> { - factory _$$UsageImplCopyWith( - _$UsageImpl value, $Res Function(_$UsageImpl) then) = - __$$UsageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'input_tokens') int inputTokens, - @JsonKey(name: 'output_tokens') int outputTokens}); -} - -/// @nodoc -class __$$UsageImplCopyWithImpl<$Res> - extends _$UsageCopyWithImpl<$Res, _$UsageImpl> - implements _$$UsageImplCopyWith<$Res> { - __$$UsageImplCopyWithImpl( - _$UsageImpl _value, $Res Function(_$UsageImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? inputTokens = null, - Object? outputTokens = null, - }) { - return _then(_$UsageImpl( - inputTokens: null == inputTokens - ? _value.inputTokens - : inputTokens // ignore: cast_nullable_to_non_nullable - as int, - outputTokens: null == outputTokens - ? _value.outputTokens - : outputTokens // ignore: cast_nullable_to_non_nullable - as int, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$UsageImpl extends _Usage { - const _$UsageImpl( - {@JsonKey(name: 'input_tokens') required this.inputTokens, - @JsonKey(name: 'output_tokens') required this.outputTokens}) - : super._(); - - factory _$UsageImpl.fromJson(Map json) => - _$$UsageImplFromJson(json); - - /// The number of input tokens which were used. - @override - @JsonKey(name: 'input_tokens') - final int inputTokens; - - /// The number of output tokens which were used. - @override - @JsonKey(name: 'output_tokens') - final int outputTokens; - - @override - String toString() { - return 'Usage(inputTokens: $inputTokens, outputTokens: $outputTokens)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$UsageImpl && - (identical(other.inputTokens, inputTokens) || - other.inputTokens == inputTokens) && - (identical(other.outputTokens, outputTokens) || - other.outputTokens == outputTokens)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, inputTokens, outputTokens); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$UsageImplCopyWith<_$UsageImpl> get copyWith => - __$$UsageImplCopyWithImpl<_$UsageImpl>(this, _$identity); - - @override - Map toJson() { - return _$$UsageImplToJson( - this, - ); - } -} - -abstract class _Usage extends Usage { - const factory _Usage( - {@JsonKey(name: 'input_tokens') required final int inputTokens, - @JsonKey(name: 'output_tokens') required final int outputTokens}) = - _$UsageImpl; - const _Usage._() : super._(); - - factory _Usage.fromJson(Map json) = _$UsageImpl.fromJson; - - @override - - /// The number of input tokens which were used. - @JsonKey(name: 'input_tokens') - int get inputTokens; - @override - - /// The number of output tokens which were used. - @JsonKey(name: 'output_tokens') - int get outputTokens; - @override - @JsonKey(ignore: true) - _$$UsageImplCopyWith<_$UsageImpl> get copyWith => - throw _privateConstructorUsedError; -} - -MessageDelta _$MessageDeltaFromJson(Map json) { - return _MessageDelta.fromJson(json); -} - -/// @nodoc -mixin _$MessageDelta { - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? get stopReason => throw _privateConstructorUsedError; - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? get stopSequence => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageDeltaCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageDeltaCopyWith<$Res> { - factory $MessageDeltaCopyWith( - MessageDelta value, $Res Function(MessageDelta) then) = - _$MessageDeltaCopyWithImpl<$Res, MessageDelta>; - @useResult - $Res call( - {@JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? stopSequence}); -} - -/// @nodoc -class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> - implements $MessageDeltaCopyWith<$Res> { - _$MessageDeltaCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? stopReason = freezed, - Object? stopSequence = freezed, - }) { - return _then(_value.copyWith( - stopReason: freezed == stopReason - ? _value.stopReason - : stopReason // ignore: cast_nullable_to_non_nullable - as StopReason?, - stopSequence: freezed == stopSequence - ? _value.stopSequence - : stopSequence // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$MessageDeltaImplCopyWith<$Res> - implements $MessageDeltaCopyWith<$Res> { - factory _$$MessageDeltaImplCopyWith( - _$MessageDeltaImpl value, $Res Function(_$MessageDeltaImpl) then) = - __$$MessageDeltaImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? stopSequence}); -} - -/// @nodoc -class __$$MessageDeltaImplCopyWithImpl<$Res> - extends _$MessageDeltaCopyWithImpl<$Res, _$MessageDeltaImpl> - implements _$$MessageDeltaImplCopyWith<$Res> { - __$$MessageDeltaImplCopyWithImpl( - _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? stopReason = freezed, - Object? stopSequence = freezed, - }) { - return _then(_$MessageDeltaImpl( - stopReason: freezed == stopReason - ? _value.stopReason - : stopReason // ignore: cast_nullable_to_non_nullable - as StopReason?, - stopSequence: freezed == stopSequence - ? _value.stopSequence - : stopSequence // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageDeltaImpl extends _MessageDelta { - const _$MessageDeltaImpl( - {@JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence}) - : super._(); - - factory _$MessageDeltaImpl.fromJson(Map json) => - _$$MessageDeltaImplFromJson(json); - - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @override - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final StopReason? stopReason; - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @override - @JsonKey(name: 'stop_sequence', includeIfNull: false) - final String? stopSequence; - - @override - String toString() { - return 'MessageDelta(stopReason: $stopReason, stopSequence: $stopSequence)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageDeltaImpl && - (identical(other.stopReason, stopReason) || - other.stopReason == stopReason) && - (identical(other.stopSequence, stopSequence) || - other.stopSequence == stopSequence)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, stopReason, stopSequence); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => - __$$MessageDeltaImplCopyWithImpl<_$MessageDeltaImpl>(this, _$identity); - - @override - Map toJson() { - return _$$MessageDeltaImplToJson( - this, - ); - } -} - -abstract class _MessageDelta extends MessageDelta { - const factory _MessageDelta( - {@JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final StopReason? stopReason, - @JsonKey(name: 'stop_sequence', includeIfNull: false) - final String? stopSequence}) = _$MessageDeltaImpl; - const _MessageDelta._() : super._(); - - factory _MessageDelta.fromJson(Map json) = - _$MessageDeltaImpl.fromJson; - - @override - - /// The reason that we stopped. - /// - /// This may be one the following values: - /// - /// - `"end_turn"`: the model reached a natural stopping point - /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - /// - /// In non-streaming mode this value is always non-null. In streaming mode, it is - /// null in the `message_start` event and non-null otherwise. - @JsonKey( - name: 'stop_reason', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - StopReason? get stopReason; - @override - - /// Which custom stop sequence was generated, if any. - /// - /// This value will be a non-null string if one of your custom stop sequences was - /// generated. - @JsonKey(name: 'stop_sequence', includeIfNull: false) - String? get stopSequence; - @override - @JsonKey(ignore: true) - _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => - throw _privateConstructorUsedError; -} - -MessageDeltaUsage _$MessageDeltaUsageFromJson(Map json) { - return _MessageDeltaUsage.fromJson(json); -} - -/// @nodoc -mixin _$MessageDeltaUsage { - /// The cumulative number of output tokens which were used. - @JsonKey(name: 'output_tokens') - int get outputTokens => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageDeltaUsageCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageDeltaUsageCopyWith<$Res> { - factory $MessageDeltaUsageCopyWith( - MessageDeltaUsage value, $Res Function(MessageDeltaUsage) then) = - _$MessageDeltaUsageCopyWithImpl<$Res, MessageDeltaUsage>; - @useResult - $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); -} - -/// @nodoc -class _$MessageDeltaUsageCopyWithImpl<$Res, $Val extends MessageDeltaUsage> - implements $MessageDeltaUsageCopyWith<$Res> { - _$MessageDeltaUsageCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? outputTokens = null, - }) { - return _then(_value.copyWith( - outputTokens: null == outputTokens - ? _value.outputTokens - : outputTokens // ignore: cast_nullable_to_non_nullable - as int, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$MessageDeltaUsageImplCopyWith<$Res> - implements $MessageDeltaUsageCopyWith<$Res> { - factory _$$MessageDeltaUsageImplCopyWith(_$MessageDeltaUsageImpl value, - $Res Function(_$MessageDeltaUsageImpl) then) = - __$$MessageDeltaUsageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); -} - -/// @nodoc -class __$$MessageDeltaUsageImplCopyWithImpl<$Res> - extends _$MessageDeltaUsageCopyWithImpl<$Res, _$MessageDeltaUsageImpl> - implements _$$MessageDeltaUsageImplCopyWith<$Res> { - __$$MessageDeltaUsageImplCopyWithImpl(_$MessageDeltaUsageImpl _value, - $Res Function(_$MessageDeltaUsageImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? outputTokens = null, - }) { - return _then(_$MessageDeltaUsageImpl( - outputTokens: null == outputTokens - ? _value.outputTokens - : outputTokens // ignore: cast_nullable_to_non_nullable - as int, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageDeltaUsageImpl extends _MessageDeltaUsage { - const _$MessageDeltaUsageImpl( - {@JsonKey(name: 'output_tokens') required this.outputTokens}) - : super._(); - - factory _$MessageDeltaUsageImpl.fromJson(Map json) => - _$$MessageDeltaUsageImplFromJson(json); - - /// The cumulative number of output tokens which were used. - @override - @JsonKey(name: 'output_tokens') - final int outputTokens; - - @override - String toString() { - return 'MessageDeltaUsage(outputTokens: $outputTokens)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageDeltaUsageImpl && - (identical(other.outputTokens, outputTokens) || - other.outputTokens == outputTokens)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, outputTokens); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => - __$$MessageDeltaUsageImplCopyWithImpl<_$MessageDeltaUsageImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$MessageDeltaUsageImplToJson( - this, - ); - } -} - -abstract class _MessageDeltaUsage extends MessageDeltaUsage { - const factory _MessageDeltaUsage( - {@JsonKey(name: 'output_tokens') required final int outputTokens}) = - _$MessageDeltaUsageImpl; - const _MessageDeltaUsage._() : super._(); - - factory _MessageDeltaUsage.fromJson(Map json) = - _$MessageDeltaUsageImpl.fromJson; - - @override - - /// The cumulative number of output tokens which were used. - @JsonKey(name: 'output_tokens') - int get outputTokens; - @override - @JsonKey(ignore: true) - _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Block _$BlockFromJson(Map json) { - switch (json['type']) { - case 'text': - return TextBlock.fromJson(json); - case 'image': - return ImageBlock.fromJson(json); - case 'tool_use': - return ToolUseBlock.fromJson(json); - case 'tool_result': - return ToolResultBlock.fromJson(json); - - default: - throw CheckedFromJsonException( - json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$Block { - /// The type of content block. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - required TResult Function( - String id, String name, Map input, String type) - toolUse, - required TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type) - toolResult, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - TResult? Function( - String id, String name, Map input, String type)? - toolUse, - TResult? Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - TResult Function( - String id, String name, Map input, String type)? - toolUse, - TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - required TResult Function(ToolUseBlock value) toolUse, - required TResult Function(ToolResultBlock value) toolResult, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - TResult? Function(ToolUseBlock value)? toolUse, - TResult? Function(ToolResultBlock value)? toolResult, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - TResult Function(ToolUseBlock value)? toolUse, - TResult Function(ToolResultBlock value)? toolResult, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BlockCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BlockCopyWith<$Res> { - factory $BlockCopyWith(Block value, $Res Function(Block) then) = - _$BlockCopyWithImpl<$Res, Block>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$BlockCopyWithImpl<$Res, $Val extends Block> - implements $BlockCopyWith<$Res> { - _$BlockCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { - factory _$$TextBlockImplCopyWith( - _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = - __$$TextBlockImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String text, String type}); -} - -/// @nodoc -class __$$TextBlockImplCopyWithImpl<$Res> - extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> - implements _$$TextBlockImplCopyWith<$Res> { - __$$TextBlockImplCopyWithImpl( - _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? text = null, - Object? type = null, - }) { - return _then(_$TextBlockImpl( - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$TextBlockImpl extends TextBlock { - const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); - - factory _$TextBlockImpl.fromJson(Map json) => - _$$TextBlockImplFromJson(json); - - /// The text content. - @override - final String text; - - /// The type of content block. - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'Block.text(text: $text, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$TextBlockImpl && - (identical(other.text, text) || other.text == text) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, text, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => - __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - required TResult Function( - String id, String name, Map input, String type) - toolUse, - required TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type) - toolResult, - }) { - return text(this.text, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - TResult? Function( - String id, String name, Map input, String type)? - toolUse, - TResult? Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - }) { - return text?.call(this.text, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - TResult Function( - String id, String name, Map input, String type)? - toolUse, - TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - required TResult orElse(), - }) { - if (text != null) { - return text(this.text, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - required TResult Function(ToolUseBlock value) toolUse, - required TResult Function(ToolResultBlock value) toolResult, - }) { - return text(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - TResult? Function(ToolUseBlock value)? toolUse, - TResult? Function(ToolResultBlock value)? toolResult, - }) { - return text?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - TResult Function(ToolUseBlock value)? toolUse, - TResult Function(ToolResultBlock value)? toolResult, - required TResult orElse(), - }) { - if (text != null) { - return text(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$TextBlockImplToJson( - this, - ); - } -} - -abstract class TextBlock extends Block { - const factory TextBlock({required final String text, final String type}) = - _$TextBlockImpl; - const TextBlock._() : super._(); - - factory TextBlock.fromJson(Map json) = - _$TextBlockImpl.fromJson; - - /// The text content. - String get text; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ImageBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { - factory _$$ImageBlockImplCopyWith( - _$ImageBlockImpl value, $Res Function(_$ImageBlockImpl) then) = - __$$ImageBlockImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({ImageBlockSource source, String type}); - - $ImageBlockSourceCopyWith<$Res> get source; -} - -/// @nodoc -class __$$ImageBlockImplCopyWithImpl<$Res> - extends _$BlockCopyWithImpl<$Res, _$ImageBlockImpl> - implements _$$ImageBlockImplCopyWith<$Res> { - __$$ImageBlockImplCopyWithImpl( - _$ImageBlockImpl _value, $Res Function(_$ImageBlockImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? source = null, - Object? type = null, - }) { - return _then(_$ImageBlockImpl( - source: null == source - ? _value.source - : source // ignore: cast_nullable_to_non_nullable - as ImageBlockSource, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } - - @override - @pragma('vm:prefer-inline') - $ImageBlockSourceCopyWith<$Res> get source { - return $ImageBlockSourceCopyWith<$Res>(_value.source, (value) { - return _then(_value.copyWith(source: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ImageBlockImpl extends ImageBlock { - const _$ImageBlockImpl({required this.source, this.type = 'image'}) - : super._(); - - factory _$ImageBlockImpl.fromJson(Map json) => - _$$ImageBlockImplFromJson(json); - - /// The source of an image block. - @override - final ImageBlockSource source; - - /// The type of content block. - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'Block.image(source: $source, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ImageBlockImpl && - (identical(other.source, source) || other.source == source) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, source, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => - __$$ImageBlockImplCopyWithImpl<_$ImageBlockImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - required TResult Function( - String id, String name, Map input, String type) - toolUse, - required TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type) - toolResult, - }) { - return image(source, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - TResult? Function( - String id, String name, Map input, String type)? - toolUse, - TResult? Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - }) { - return image?.call(source, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - TResult Function( - String id, String name, Map input, String type)? - toolUse, - TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - required TResult orElse(), - }) { - if (image != null) { - return image(source, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - required TResult Function(ToolUseBlock value) toolUse, - required TResult Function(ToolResultBlock value) toolResult, - }) { - return image(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - TResult? Function(ToolUseBlock value)? toolUse, - TResult? Function(ToolResultBlock value)? toolResult, - }) { - return image?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - TResult Function(ToolUseBlock value)? toolUse, - TResult Function(ToolResultBlock value)? toolResult, - required TResult orElse(), - }) { - if (image != null) { - return image(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ImageBlockImplToJson( - this, - ); - } -} - -abstract class ImageBlock extends Block { - const factory ImageBlock( - {required final ImageBlockSource source, - final String type}) = _$ImageBlockImpl; - const ImageBlock._() : super._(); - - factory ImageBlock.fromJson(Map json) = - _$ImageBlockImpl.fromJson; - - /// The source of an image block. - ImageBlockSource get source; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ToolUseBlockImplCopyWith<$Res> - implements $BlockCopyWith<$Res> { - factory _$$ToolUseBlockImplCopyWith( - _$ToolUseBlockImpl value, $Res Function(_$ToolUseBlockImpl) then) = - __$$ToolUseBlockImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String id, String name, Map input, String type}); -} - -/// @nodoc -class __$$ToolUseBlockImplCopyWithImpl<$Res> - extends _$BlockCopyWithImpl<$Res, _$ToolUseBlockImpl> - implements _$$ToolUseBlockImplCopyWith<$Res> { - __$$ToolUseBlockImplCopyWithImpl( - _$ToolUseBlockImpl _value, $Res Function(_$ToolUseBlockImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = null, - Object? name = null, - Object? input = null, - Object? type = null, - }) { - return _then(_$ToolUseBlockImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - input: null == input - ? _value._input - : input // ignore: cast_nullable_to_non_nullable - as Map, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ToolUseBlockImpl extends ToolUseBlock { - const _$ToolUseBlockImpl( - {required this.id, - required this.name, - required final Map input, - this.type = 'tool_use'}) - : _input = input, - super._(); - - factory _$ToolUseBlockImpl.fromJson(Map json) => - _$$ToolUseBlockImplFromJson(json); - - /// A unique identifier for this particular tool use block. - /// This will be used to match up the tool results later. - @override - final String id; - - /// The name of the tool being used. - @override - final String name; - - /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. - final Map _input; - - /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. - @override - Map get input { - if (_input is EqualUnmodifiableMapView) return _input; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_input); - } - - /// The type of content block. - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'Block.toolUse(id: $id, name: $name, input: $input, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ToolUseBlockImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.name, name) || other.name == name) && - const DeepCollectionEquality().equals(other._input, _input) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash( - runtimeType, id, name, const DeepCollectionEquality().hash(_input), type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => - __$$ToolUseBlockImplCopyWithImpl<_$ToolUseBlockImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - required TResult Function( - String id, String name, Map input, String type) - toolUse, - required TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type) - toolResult, - }) { - return toolUse(id, name, input, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - TResult? Function( - String id, String name, Map input, String type)? - toolUse, - TResult? Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - }) { - return toolUse?.call(id, name, input, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - TResult Function( - String id, String name, Map input, String type)? - toolUse, - TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - required TResult orElse(), - }) { - if (toolUse != null) { - return toolUse(id, name, input, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - required TResult Function(ToolUseBlock value) toolUse, - required TResult Function(ToolResultBlock value) toolResult, - }) { - return toolUse(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - TResult? Function(ToolUseBlock value)? toolUse, - TResult? Function(ToolResultBlock value)? toolResult, - }) { - return toolUse?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - TResult Function(ToolUseBlock value)? toolUse, - TResult Function(ToolResultBlock value)? toolResult, - required TResult orElse(), - }) { - if (toolUse != null) { - return toolUse(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ToolUseBlockImplToJson( - this, - ); - } -} - -abstract class ToolUseBlock extends Block { - const factory ToolUseBlock( - {required final String id, - required final String name, - required final Map input, - final String type}) = _$ToolUseBlockImpl; - const ToolUseBlock._() : super._(); - - factory ToolUseBlock.fromJson(Map json) = - _$ToolUseBlockImpl.fromJson; - - /// A unique identifier for this particular tool use block. - /// This will be used to match up the tool results later. - String get id; - - /// The name of the tool being used. - String get name; - - /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. - Map get input; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ToolResultBlockImplCopyWith<$Res> - implements $BlockCopyWith<$Res> { - factory _$$ToolResultBlockImplCopyWith(_$ToolResultBlockImpl value, - $Res Function(_$ToolResultBlockImpl) then) = - __$$ToolResultBlockImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type}); - - $ToolResultBlockContentCopyWith<$Res> get content; -} - -/// @nodoc -class __$$ToolResultBlockImplCopyWithImpl<$Res> - extends _$BlockCopyWithImpl<$Res, _$ToolResultBlockImpl> - implements _$$ToolResultBlockImplCopyWith<$Res> { - __$$ToolResultBlockImplCopyWithImpl( - _$ToolResultBlockImpl _value, $Res Function(_$ToolResultBlockImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? toolUseId = null, - Object? content = null, - Object? isError = freezed, - Object? type = null, - }) { - return _then(_$ToolResultBlockImpl( - toolUseId: null == toolUseId - ? _value.toolUseId - : toolUseId // ignore: cast_nullable_to_non_nullable - as String, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as ToolResultBlockContent, - isError: freezed == isError - ? _value.isError - : isError // ignore: cast_nullable_to_non_nullable - as bool?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } - - @override - @pragma('vm:prefer-inline') - $ToolResultBlockContentCopyWith<$Res> get content { - return $ToolResultBlockContentCopyWith<$Res>(_value.content, (value) { - return _then(_value.copyWith(content: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ToolResultBlockImpl extends ToolResultBlock { - const _$ToolResultBlockImpl( - {@JsonKey(name: 'tool_use_id') required this.toolUseId, - @_ToolResultBlockContentConverter() required this.content, - @JsonKey(name: 'is_error', includeIfNull: false) this.isError, - this.type = 'tool_result'}) - : super._(); - - factory _$ToolResultBlockImpl.fromJson(Map json) => - _$$ToolResultBlockImplFromJson(json); - - /// The `id` of the tool use request this is a result for. - @override - @JsonKey(name: 'tool_use_id') - final String toolUseId; - - /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) - /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). - /// These content blocks can use the text or image types. - @override - @_ToolResultBlockContentConverter() - final ToolResultBlockContent content; - - /// Set to `true` if the tool execution resulted in an error. - @override - @JsonKey(name: 'is_error', includeIfNull: false) - final bool? isError; - - /// The type of content block. - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'Block.toolResult(toolUseId: $toolUseId, content: $content, isError: $isError, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ToolResultBlockImpl && - (identical(other.toolUseId, toolUseId) || - other.toolUseId == toolUseId) && - (identical(other.content, content) || other.content == content) && - (identical(other.isError, isError) || other.isError == isError) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, toolUseId, content, isError, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => - __$$ToolResultBlockImplCopyWithImpl<_$ToolResultBlockImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - required TResult Function( - String id, String name, Map input, String type) - toolUse, - required TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type) - toolResult, - }) { - return toolResult(toolUseId, content, isError, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - TResult? Function( - String id, String name, Map input, String type)? - toolUse, - TResult? Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - }) { - return toolResult?.call(toolUseId, content, isError, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - TResult Function( - String id, String name, Map input, String type)? - toolUse, - TResult Function( - @JsonKey(name: 'tool_use_id') String toolUseId, - @_ToolResultBlockContentConverter() ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, - String type)? - toolResult, - required TResult orElse(), - }) { - if (toolResult != null) { - return toolResult(toolUseId, content, isError, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - required TResult Function(ToolUseBlock value) toolUse, - required TResult Function(ToolResultBlock value) toolResult, - }) { - return toolResult(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - TResult? Function(ToolUseBlock value)? toolUse, - TResult? Function(ToolResultBlock value)? toolResult, - }) { - return toolResult?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - TResult Function(ToolUseBlock value)? toolUse, - TResult Function(ToolResultBlock value)? toolResult, - required TResult orElse(), - }) { - if (toolResult != null) { - return toolResult(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ToolResultBlockImplToJson( - this, - ); - } -} - -abstract class ToolResultBlock extends Block { - const factory ToolResultBlock( - {@JsonKey(name: 'tool_use_id') required final String toolUseId, - @_ToolResultBlockContentConverter() - required final ToolResultBlockContent content, - @JsonKey(name: 'is_error', includeIfNull: false) final bool? isError, - final String type}) = _$ToolResultBlockImpl; - const ToolResultBlock._() : super._(); - - factory ToolResultBlock.fromJson(Map json) = - _$ToolResultBlockImpl.fromJson; - - /// The `id` of the tool use request this is a result for. - @JsonKey(name: 'tool_use_id') - String get toolUseId; - - /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) - /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). - /// These content blocks can use the text or image types. - @_ToolResultBlockContentConverter() - ToolResultBlockContent get content; - - /// Set to `true` if the tool execution resulted in an error. - @JsonKey(name: 'is_error', includeIfNull: false) - bool? get isError; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => - throw _privateConstructorUsedError; -} - -ToolResultBlockContent _$ToolResultBlockContentFromJson( - Map json) { - switch (json['runtimeType']) { - case 'blocks': - return ToolResultBlockContentBlocks.fromJson(json); - case 'text': - return ToolResultBlockContentText.fromJson(json); - - default: - throw CheckedFromJsonException( - json, - 'runtimeType', - 'ToolResultBlockContent', - 'Invalid union type "${json['runtimeType']}"!'); - } -} - -/// @nodoc -mixin _$ToolResultBlockContent { - Object get value => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(List value) blocks, - required TResult Function(String value) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? blocks, - TResult? Function(String value)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? blocks, - TResult Function(String value)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(ToolResultBlockContentBlocks value) blocks, - required TResult Function(ToolResultBlockContentText value) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentBlocks value)? blocks, - TResult? Function(ToolResultBlockContentText value)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ToolResultBlockContentBlocks value)? blocks, - TResult Function(ToolResultBlockContentText value)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ToolResultBlockContentCopyWith<$Res> { - factory $ToolResultBlockContentCopyWith(ToolResultBlockContent value, - $Res Function(ToolResultBlockContent) then) = - _$ToolResultBlockContentCopyWithImpl<$Res, ToolResultBlockContent>; -} - -/// @nodoc -class _$ToolResultBlockContentCopyWithImpl<$Res, - $Val extends ToolResultBlockContent> - implements $ToolResultBlockContentCopyWith<$Res> { - _$ToolResultBlockContentCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; -} - -/// @nodoc -abstract class _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { - factory _$$ToolResultBlockContentBlocksImplCopyWith( - _$ToolResultBlockContentBlocksImpl value, - $Res Function(_$ToolResultBlockContentBlocksImpl) then) = - __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res>; - @useResult - $Res call({List value}); -} - -/// @nodoc -class __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res> - extends _$ToolResultBlockContentCopyWithImpl<$Res, - _$ToolResultBlockContentBlocksImpl> - implements _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { - __$$ToolResultBlockContentBlocksImplCopyWithImpl( - _$ToolResultBlockContentBlocksImpl _value, - $Res Function(_$ToolResultBlockContentBlocksImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$ToolResultBlockContentBlocksImpl( - null == value - ? _value._value - : value // ignore: cast_nullable_to_non_nullable - as List, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ToolResultBlockContentBlocksImpl extends ToolResultBlockContentBlocks { - const _$ToolResultBlockContentBlocksImpl(final List value, - {final String? $type}) - : _value = value, - $type = $type ?? 'blocks', - super._(); - - factory _$ToolResultBlockContentBlocksImpl.fromJson( - Map json) => - _$$ToolResultBlockContentBlocksImplFromJson(json); - - final List _value; - @override - List get value { - if (_value is EqualUnmodifiableListView) return _value; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_value); - } - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'ToolResultBlockContent.blocks(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ToolResultBlockContentBlocksImpl && - const DeepCollectionEquality().equals(other._value, _value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ToolResultBlockContentBlocksImplCopyWith< - _$ToolResultBlockContentBlocksImpl> - get copyWith => __$$ToolResultBlockContentBlocksImplCopyWithImpl< - _$ToolResultBlockContentBlocksImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) blocks, - required TResult Function(String value) text, - }) { - return blocks(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? blocks, - TResult? Function(String value)? text, - }) { - return blocks?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? blocks, - TResult Function(String value)? text, - required TResult orElse(), - }) { - if (blocks != null) { - return blocks(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ToolResultBlockContentBlocks value) blocks, - required TResult Function(ToolResultBlockContentText value) text, - }) { - return blocks(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentBlocks value)? blocks, - TResult? Function(ToolResultBlockContentText value)? text, - }) { - return blocks?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ToolResultBlockContentBlocks value)? blocks, - TResult Function(ToolResultBlockContentText value)? text, - required TResult orElse(), - }) { - if (blocks != null) { - return blocks(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ToolResultBlockContentBlocksImplToJson( - this, - ); - } -} - -abstract class ToolResultBlockContentBlocks extends ToolResultBlockContent { - const factory ToolResultBlockContentBlocks(final List value) = - _$ToolResultBlockContentBlocksImpl; - const ToolResultBlockContentBlocks._() : super._(); - - factory ToolResultBlockContentBlocks.fromJson(Map json) = - _$ToolResultBlockContentBlocksImpl.fromJson; - - @override - List get value; - @JsonKey(ignore: true) - _$$ToolResultBlockContentBlocksImplCopyWith< - _$ToolResultBlockContentBlocksImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ToolResultBlockContentTextImplCopyWith<$Res> { - factory _$$ToolResultBlockContentTextImplCopyWith( - _$ToolResultBlockContentTextImpl value, - $Res Function(_$ToolResultBlockContentTextImpl) then) = - __$$ToolResultBlockContentTextImplCopyWithImpl<$Res>; - @useResult - $Res call({String value}); -} - -/// @nodoc -class __$$ToolResultBlockContentTextImplCopyWithImpl<$Res> - extends _$ToolResultBlockContentCopyWithImpl<$Res, - _$ToolResultBlockContentTextImpl> - implements _$$ToolResultBlockContentTextImplCopyWith<$Res> { - __$$ToolResultBlockContentTextImplCopyWithImpl( - _$ToolResultBlockContentTextImpl _value, - $Res Function(_$ToolResultBlockContentTextImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$ToolResultBlockContentTextImpl( - null == value - ? _value.value - : value // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ToolResultBlockContentTextImpl extends ToolResultBlockContentText { - const _$ToolResultBlockContentTextImpl(this.value, {final String? $type}) - : $type = $type ?? 'text', - super._(); - - factory _$ToolResultBlockContentTextImpl.fromJson( - Map json) => - _$$ToolResultBlockContentTextImplFromJson(json); - - @override - final String value; - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'ToolResultBlockContent.text(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ToolResultBlockContentTextImpl && - (identical(other.value, value) || other.value == value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, value); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> - get copyWith => __$$ToolResultBlockContentTextImplCopyWithImpl< - _$ToolResultBlockContentTextImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) blocks, - required TResult Function(String value) text, - }) { - return text(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? blocks, - TResult? Function(String value)? text, - }) { - return text?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? blocks, - TResult Function(String value)? text, - required TResult orElse(), - }) { - if (text != null) { - return text(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ToolResultBlockContentBlocks value) blocks, - required TResult Function(ToolResultBlockContentText value) text, - }) { - return text(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentBlocks value)? blocks, - TResult? Function(ToolResultBlockContentText value)? text, - }) { - return text?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ToolResultBlockContentBlocks value)? blocks, - TResult Function(ToolResultBlockContentText value)? text, - required TResult orElse(), - }) { - if (text != null) { - return text(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ToolResultBlockContentTextImplToJson( - this, - ); - } -} - -abstract class ToolResultBlockContentText extends ToolResultBlockContent { - const factory ToolResultBlockContentText(final String value) = - _$ToolResultBlockContentTextImpl; - const ToolResultBlockContentText._() : super._(); - - factory ToolResultBlockContentText.fromJson(Map json) = - _$ToolResultBlockContentTextImpl.fromJson; - - @override - String get value; - @JsonKey(ignore: true) - _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> - get copyWith => throw _privateConstructorUsedError; -} - -MessageStreamEvent _$MessageStreamEventFromJson(Map json) { - switch (json['type']) { - case 'message_start': - return MessageStartEvent.fromJson(json); - case 'message_delta': - return MessageDeltaEvent.fromJson(json); - case 'message_stop': - return MessageStopEvent.fromJson(json); - case 'content_block_start': - return ContentBlockStartEvent.fromJson(json); - case 'content_block_delta': - return ContentBlockDeltaEvent.fromJson(json); - case 'content_block_stop': - return ContentBlockStopEvent.fromJson(json); - case 'ping': - return PingEvent.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$MessageStreamEvent { - /// The type of a streaming event. - MessageStreamEventType get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageStreamEventCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageStreamEventCopyWith<$Res> { - factory $MessageStreamEventCopyWith( - MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = - _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; - @useResult - $Res call({MessageStreamEventType type}); -} - -/// @nodoc -class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> - implements $MessageStreamEventCopyWith<$Res> { - _$MessageStreamEventCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$MessageStartEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, - $Res Function(_$MessageStartEventImpl) then) = - __$$MessageStartEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({Message message, MessageStreamEventType type}); - - $MessageCopyWith<$Res> get message; -} - -/// @nodoc -class __$$MessageStartEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> - implements _$$MessageStartEventImplCopyWith<$Res> { - __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, - $Res Function(_$MessageStartEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? message = null, - Object? type = null, - }) { - return _then(_$MessageStartEventImpl( - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as Message, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - )); - } - - @override - @pragma('vm:prefer-inline') - $MessageCopyWith<$Res> get message { - return $MessageCopyWith<$Res>(_value.message, (value) { - return _then(_value.copyWith(message: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageStartEventImpl extends MessageStartEvent { - const _$MessageStartEventImpl({required this.message, required this.type}) - : super._(); - - factory _$MessageStartEventImpl.fromJson(Map json) => - _$$MessageStartEventImplFromJson(json); - - /// A message in a chat conversation. - @override - final Message message; - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - @override - String toString() { - return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageStartEventImpl && - (identical(other.message, message) || other.message == message) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, message, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => - __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return messageStart(message, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return messageStart?.call(message, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (messageStart != null) { - return messageStart(message, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return messageStart(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return messageStart?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (messageStart != null) { - return messageStart(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageStartEventImplToJson( - this, - ); - } -} - -abstract class MessageStartEvent extends MessageStreamEvent { - const factory MessageStartEvent( - {required final Message message, - required final MessageStreamEventType type}) = _$MessageStartEventImpl; - const MessageStartEvent._() : super._(); - - factory MessageStartEvent.fromJson(Map json) = - _$MessageStartEventImpl.fromJson; - - /// A message in a chat conversation. - Message get message; - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - @override - @JsonKey(ignore: true) - _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$MessageDeltaEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, - $Res Function(_$MessageDeltaEventImpl) then) = - __$$MessageDeltaEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {MessageDelta delta, - MessageStreamEventType type, - MessageDeltaUsage usage}); - - $MessageDeltaCopyWith<$Res> get delta; - $MessageDeltaUsageCopyWith<$Res> get usage; -} - -/// @nodoc -class __$$MessageDeltaEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> - implements _$$MessageDeltaEventImplCopyWith<$Res> { - __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, - $Res Function(_$MessageDeltaEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? delta = null, - Object? type = null, - Object? usage = null, - }) { - return _then(_$MessageDeltaEventImpl( - delta: null == delta - ? _value.delta - : delta // ignore: cast_nullable_to_non_nullable - as MessageDelta, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - usage: null == usage - ? _value.usage - : usage // ignore: cast_nullable_to_non_nullable - as MessageDeltaUsage, - )); - } - - @override - @pragma('vm:prefer-inline') - $MessageDeltaCopyWith<$Res> get delta { - return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { - return _then(_value.copyWith(delta: value)); - }); - } - - @override - @pragma('vm:prefer-inline') - $MessageDeltaUsageCopyWith<$Res> get usage { - return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { - return _then(_value.copyWith(usage: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageDeltaEventImpl extends MessageDeltaEvent { - const _$MessageDeltaEventImpl( - {required this.delta, required this.type, required this.usage}) - : super._(); - - factory _$MessageDeltaEventImpl.fromJson(Map json) => - _$$MessageDeltaEventImplFromJson(json); - - /// A delta in a streaming message. - @override - final MessageDelta delta; - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - @override - final MessageDeltaUsage usage; - - @override - String toString() { - return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageDeltaEventImpl && - (identical(other.delta, delta) || other.delta == delta) && - (identical(other.type, type) || other.type == type) && - (identical(other.usage, usage) || other.usage == usage)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, delta, type, usage); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => - __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return messageDelta(delta, type, usage); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return messageDelta?.call(delta, type, usage); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (messageDelta != null) { - return messageDelta(delta, type, usage); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return messageDelta(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return messageDelta?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (messageDelta != null) { - return messageDelta(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageDeltaEventImplToJson( - this, - ); - } -} - -abstract class MessageDeltaEvent extends MessageStreamEvent { - const factory MessageDeltaEvent( - {required final MessageDelta delta, - required final MessageStreamEventType type, - required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; - const MessageDeltaEvent._() : super._(); - - factory MessageDeltaEvent.fromJson(Map json) = - _$MessageDeltaEventImpl.fromJson; - - /// A delta in a streaming message. - MessageDelta get delta; - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - MessageDeltaUsage get usage; - @override - @JsonKey(ignore: true) - _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$MessageStopEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, - $Res Function(_$MessageStopEventImpl) then) = - __$$MessageStopEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({MessageStreamEventType type}); -} - -/// @nodoc -class __$$MessageStopEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> - implements _$$MessageStopEventImplCopyWith<$Res> { - __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, - $Res Function(_$MessageStopEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$MessageStopEventImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageStopEventImpl extends MessageStopEvent { - const _$MessageStopEventImpl({required this.type}) : super._(); - - factory _$MessageStopEventImpl.fromJson(Map json) => - _$$MessageStopEventImplFromJson(json); - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - @override - String toString() { - return 'MessageStreamEvent.messageStop(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageStopEventImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => - __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return messageStop(type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return messageStop?.call(type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (messageStop != null) { - return messageStop(type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return messageStop(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return messageStop?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (messageStop != null) { - return messageStop(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageStopEventImplToJson( - this, - ); - } -} - -abstract class MessageStopEvent extends MessageStreamEvent { - const factory MessageStopEvent({required final MessageStreamEventType type}) = - _$MessageStopEventImpl; - const MessageStopEvent._() : super._(); - - factory MessageStopEvent.fromJson(Map json) = - _$MessageStopEventImpl.fromJson; - - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - @override - @JsonKey(ignore: true) - _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ContentBlockStartEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockStartEventImplCopyWith( - _$ContentBlockStartEventImpl value, - $Res Function(_$ContentBlockStartEventImpl) then) = - __$$ContentBlockStartEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type}); - - $BlockCopyWith<$Res> get contentBlock; -} - -/// @nodoc -class __$$ContentBlockStartEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> - implements _$$ContentBlockStartEventImplCopyWith<$Res> { - __$$ContentBlockStartEventImplCopyWithImpl( - _$ContentBlockStartEventImpl _value, - $Res Function(_$ContentBlockStartEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? contentBlock = null, - Object? index = null, - Object? type = null, - }) { - return _then(_$ContentBlockStartEventImpl( - contentBlock: null == contentBlock - ? _value.contentBlock - : contentBlock // ignore: cast_nullable_to_non_nullable - as Block, - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - )); - } - - @override - @pragma('vm:prefer-inline') - $BlockCopyWith<$Res> get contentBlock { - return $BlockCopyWith<$Res>(_value.contentBlock, (value) { - return _then(_value.copyWith(contentBlock: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { - const _$ContentBlockStartEventImpl( - {@JsonKey(name: 'content_block') required this.contentBlock, - required this.index, - required this.type}) - : super._(); - - factory _$ContentBlockStartEventImpl.fromJson(Map json) => - _$$ContentBlockStartEventImplFromJson(json); - - /// A block of content in a message. - /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] - @override - @JsonKey(name: 'content_block') - final Block contentBlock; - - /// The index of the content block. - @override - final int index; - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - @override - String toString() { - return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ContentBlockStartEventImpl && - (identical(other.contentBlock, contentBlock) || - other.contentBlock == contentBlock) && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, contentBlock, index, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> - get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< - _$ContentBlockStartEventImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return contentBlockStart(contentBlock, index, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return contentBlockStart?.call(contentBlock, index, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (contentBlockStart != null) { - return contentBlockStart(contentBlock, index, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return contentBlockStart(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return contentBlockStart?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (contentBlockStart != null) { - return contentBlockStart(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ContentBlockStartEventImplToJson( - this, - ); - } -} - -abstract class ContentBlockStartEvent extends MessageStreamEvent { - const factory ContentBlockStartEvent( - {@JsonKey(name: 'content_block') required final Block contentBlock, - required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockStartEventImpl; - const ContentBlockStartEvent._() : super._(); - - factory ContentBlockStartEvent.fromJson(Map json) = - _$ContentBlockStartEventImpl.fromJson; - - /// A block of content in a message. - /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] - @JsonKey(name: 'content_block') - Block get contentBlock; - - /// The index of the content block. - int get index; - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - @override - @JsonKey(ignore: true) - _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockDeltaEventImplCopyWith( - _$ContentBlockDeltaEventImpl value, - $Res Function(_$ContentBlockDeltaEventImpl) then) = - __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({BlockDelta delta, int index, MessageStreamEventType type}); - - $BlockDeltaCopyWith<$Res> get delta; -} - -/// @nodoc -class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> - implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { - __$$ContentBlockDeltaEventImplCopyWithImpl( - _$ContentBlockDeltaEventImpl _value, - $Res Function(_$ContentBlockDeltaEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? delta = null, - Object? index = null, - Object? type = null, - }) { - return _then(_$ContentBlockDeltaEventImpl( - delta: null == delta - ? _value.delta - : delta // ignore: cast_nullable_to_non_nullable - as BlockDelta, - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - )); - } - - @override - @pragma('vm:prefer-inline') - $BlockDeltaCopyWith<$Res> get delta { - return $BlockDeltaCopyWith<$Res>(_value.delta, (value) { - return _then(_value.copyWith(delta: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { - const _$ContentBlockDeltaEventImpl( - {required this.delta, required this.index, required this.type}) - : super._(); - - factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => - _$$ContentBlockDeltaEventImplFromJson(json); - - /// A delta in a streaming message. - /// Any of: [TextBlockDelta], [InputJsonBlockDelta] - @override - final BlockDelta delta; - - /// The index of the content block. - @override - final int index; - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - @override - String toString() { - return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ContentBlockDeltaEventImpl && - (identical(other.delta, delta) || other.delta == delta) && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, delta, index, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> - get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< - _$ContentBlockDeltaEventImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return contentBlockDelta(delta, index, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return contentBlockDelta?.call(delta, index, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (contentBlockDelta != null) { - return contentBlockDelta(delta, index, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return contentBlockDelta(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return contentBlockDelta?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (contentBlockDelta != null) { - return contentBlockDelta(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ContentBlockDeltaEventImplToJson( - this, - ); - } -} - -abstract class ContentBlockDeltaEvent extends MessageStreamEvent { - const factory ContentBlockDeltaEvent( - {required final BlockDelta delta, - required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockDeltaEventImpl; - const ContentBlockDeltaEvent._() : super._(); - - factory ContentBlockDeltaEvent.fromJson(Map json) = - _$ContentBlockDeltaEventImpl.fromJson; - - /// A delta in a streaming message. - /// Any of: [TextBlockDelta], [InputJsonBlockDelta] - BlockDelta get delta; - - /// The index of the content block. - int get index; - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - @override - @JsonKey(ignore: true) - _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ContentBlockStopEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockStopEventImplCopyWith( - _$ContentBlockStopEventImpl value, - $Res Function(_$ContentBlockStopEventImpl) then) = - __$$ContentBlockStopEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({int index, MessageStreamEventType type}); -} - -/// @nodoc -class __$$ContentBlockStopEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> - implements _$$ContentBlockStopEventImplCopyWith<$Res> { - __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, - $Res Function(_$ContentBlockStopEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = null, - Object? type = null, - }) { - return _then(_$ContentBlockStopEventImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { - const _$ContentBlockStopEventImpl({required this.index, required this.type}) - : super._(); - - factory _$ContentBlockStopEventImpl.fromJson(Map json) => - _$$ContentBlockStopEventImplFromJson(json); - - /// The index of the content block. - @override - final int index; - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - @override - String toString() { - return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ContentBlockStopEventImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, index, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> - get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< - _$ContentBlockStopEventImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return contentBlockStop(index, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return contentBlockStop?.call(index, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (contentBlockStop != null) { - return contentBlockStop(index, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return contentBlockStop(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return contentBlockStop?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (contentBlockStop != null) { - return contentBlockStop(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ContentBlockStopEventImplToJson( - this, - ); - } -} - -abstract class ContentBlockStopEvent extends MessageStreamEvent { - const factory ContentBlockStopEvent( - {required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockStopEventImpl; - const ContentBlockStopEvent._() : super._(); - - factory ContentBlockStopEvent.fromJson(Map json) = - _$ContentBlockStopEventImpl.fromJson; - - /// The index of the content block. - int get index; - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - @override - @JsonKey(ignore: true) - _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$PingEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$PingEventImplCopyWith( - _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = - __$$PingEventImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({MessageStreamEventType type}); -} - -/// @nodoc -class __$$PingEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> - implements _$$PingEventImplCopyWith<$Res> { - __$$PingEventImplCopyWithImpl( - _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$PingEventImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$PingEventImpl extends PingEvent { - const _$PingEventImpl({required this.type}) : super._(); - - factory _$PingEventImpl.fromJson(Map json) => - _$$PingEventImplFromJson(json); - - /// The type of a streaming event. - @override - final MessageStreamEventType type; - - @override - String toString() { - return 'MessageStreamEvent.ping(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$PingEventImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => - __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') Block contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - BlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, - }) { - return ping(type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, - }) { - return ping?.call(type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') Block contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, - TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, - required TResult orElse(), - }) { - if (ping != null) { - return ping(type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, - }) { - return ping(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, - }) { - return ping?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, - required TResult orElse(), - }) { - if (ping != null) { - return ping(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$PingEventImplToJson( - this, - ); - } -} - -abstract class PingEvent extends MessageStreamEvent { - const factory PingEvent({required final MessageStreamEventType type}) = - _$PingEventImpl; - const PingEvent._() : super._(); - - factory PingEvent.fromJson(Map json) = - _$PingEventImpl.fromJson; - - @override - - /// The type of a streaming event. - MessageStreamEventType get type; - @override - @JsonKey(ignore: true) - _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => - throw _privateConstructorUsedError; -} - -BlockDelta _$BlockDeltaFromJson(Map json) { - switch (json['type']) { - case 'text_delta': - return TextBlockDelta.fromJson(json); - case 'input_json_delta': - return InputJsonBlockDelta.fromJson(json); - - default: - throw CheckedFromJsonException( - json, 'type', 'BlockDelta', 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$BlockDelta { - /// The type of content block. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) textDelta, - required TResult Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type) - inputJsonDelta, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? textDelta, - TResult? Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type)? - inputJsonDelta, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? textDelta, - TResult Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type)? - inputJsonDelta, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlockDelta value) textDelta, - required TResult Function(InputJsonBlockDelta value) inputJsonDelta, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlockDelta value)? textDelta, - TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlockDelta value)? textDelta, - TResult Function(InputJsonBlockDelta value)? inputJsonDelta, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BlockDeltaCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BlockDeltaCopyWith<$Res> { - factory $BlockDeltaCopyWith( - BlockDelta value, $Res Function(BlockDelta) then) = - _$BlockDeltaCopyWithImpl<$Res, BlockDelta>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$BlockDeltaCopyWithImpl<$Res, $Val extends BlockDelta> - implements $BlockDeltaCopyWith<$Res> { - _$BlockDeltaCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$TextBlockDeltaImplCopyWith<$Res> - implements $BlockDeltaCopyWith<$Res> { - factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, - $Res Function(_$TextBlockDeltaImpl) then) = - __$$TextBlockDeltaImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String text, String type}); -} - -/// @nodoc -class __$$TextBlockDeltaImplCopyWithImpl<$Res> - extends _$BlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> - implements _$$TextBlockDeltaImplCopyWith<$Res> { - __$$TextBlockDeltaImplCopyWithImpl( - _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? text = null, - Object? type = null, - }) { - return _then(_$TextBlockDeltaImpl( - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$TextBlockDeltaImpl extends TextBlockDelta { - const _$TextBlockDeltaImpl({required this.text, required this.type}) - : super._(); - - factory _$TextBlockDeltaImpl.fromJson(Map json) => - _$$TextBlockDeltaImplFromJson(json); - - /// The text delta. - @override - final String text; - - /// The type of content block. - @override - final String type; - - @override - String toString() { - return 'BlockDelta.textDelta(text: $text, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$TextBlockDeltaImpl && - (identical(other.text, text) || other.text == text) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, text, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => - __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) textDelta, - required TResult Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type) - inputJsonDelta, - }) { - return textDelta(text, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? textDelta, - TResult? Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type)? - inputJsonDelta, - }) { - return textDelta?.call(text, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? textDelta, - TResult Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type)? - inputJsonDelta, - required TResult orElse(), - }) { - if (textDelta != null) { - return textDelta(text, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlockDelta value) textDelta, - required TResult Function(InputJsonBlockDelta value) inputJsonDelta, - }) { - return textDelta(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlockDelta value)? textDelta, - TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, - }) { - return textDelta?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlockDelta value)? textDelta, - TResult Function(InputJsonBlockDelta value)? inputJsonDelta, - required TResult orElse(), - }) { - if (textDelta != null) { - return textDelta(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$TextBlockDeltaImplToJson( - this, - ); - } -} - -abstract class TextBlockDelta extends BlockDelta { - const factory TextBlockDelta( - {required final String text, - required final String type}) = _$TextBlockDeltaImpl; - const TextBlockDelta._() : super._(); - - factory TextBlockDelta.fromJson(Map json) = - _$TextBlockDeltaImpl.fromJson; - - /// The text delta. - String get text; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$InputJsonBlockDeltaImplCopyWith<$Res> - implements $BlockDeltaCopyWith<$Res> { - factory _$$InputJsonBlockDeltaImplCopyWith(_$InputJsonBlockDeltaImpl value, - $Res Function(_$InputJsonBlockDeltaImpl) then) = - __$$InputJsonBlockDeltaImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, - String type}); -} - -/// @nodoc -class __$$InputJsonBlockDeltaImplCopyWithImpl<$Res> - extends _$BlockDeltaCopyWithImpl<$Res, _$InputJsonBlockDeltaImpl> - implements _$$InputJsonBlockDeltaImplCopyWith<$Res> { - __$$InputJsonBlockDeltaImplCopyWithImpl(_$InputJsonBlockDeltaImpl _value, - $Res Function(_$InputJsonBlockDeltaImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? partialJson = freezed, - Object? type = null, - }) { - return _then(_$InputJsonBlockDeltaImpl( - partialJson: freezed == partialJson - ? _value.partialJson - : partialJson // ignore: cast_nullable_to_non_nullable - as String?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$InputJsonBlockDeltaImpl extends InputJsonBlockDelta { - const _$InputJsonBlockDeltaImpl( - {@JsonKey(name: 'partial_json', includeIfNull: false) this.partialJson, - required this.type}) - : super._(); - - factory _$InputJsonBlockDeltaImpl.fromJson(Map json) => - _$$InputJsonBlockDeltaImplFromJson(json); - - /// The partial JSON delta. - @override - @JsonKey(name: 'partial_json', includeIfNull: false) - final String? partialJson; - - /// The type of content block. - @override - final String type; - - @override - String toString() { - return 'BlockDelta.inputJsonDelta(partialJson: $partialJson, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$InputJsonBlockDeltaImpl && - (identical(other.partialJson, partialJson) || - other.partialJson == partialJson) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, partialJson, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => - __$$InputJsonBlockDeltaImplCopyWithImpl<_$InputJsonBlockDeltaImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) textDelta, - required TResult Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type) - inputJsonDelta, - }) { - return inputJsonDelta(partialJson, type); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? textDelta, - TResult? Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type)? - inputJsonDelta, - }) { - return inputJsonDelta?.call(partialJson, type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? textDelta, - TResult Function( - @JsonKey(name: 'partial_json', includeIfNull: false) - String? partialJson, - String type)? - inputJsonDelta, - required TResult orElse(), - }) { - if (inputJsonDelta != null) { - return inputJsonDelta(partialJson, type); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlockDelta value) textDelta, - required TResult Function(InputJsonBlockDelta value) inputJsonDelta, - }) { - return inputJsonDelta(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlockDelta value)? textDelta, - TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, - }) { - return inputJsonDelta?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlockDelta value)? textDelta, - TResult Function(InputJsonBlockDelta value)? inputJsonDelta, - required TResult orElse(), - }) { - if (inputJsonDelta != null) { - return inputJsonDelta(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$InputJsonBlockDeltaImplToJson( - this, - ); - } -} - -abstract class InputJsonBlockDelta extends BlockDelta { - const factory InputJsonBlockDelta( - {@JsonKey(name: 'partial_json', includeIfNull: false) - final String? partialJson, - required final String type}) = _$InputJsonBlockDeltaImpl; - const InputJsonBlockDelta._() : super._(); - - factory InputJsonBlockDelta.fromJson(Map json) = - _$InputJsonBlockDeltaImpl.fromJson; - - /// The partial JSON delta. - @JsonKey(name: 'partial_json', includeIfNull: false) - String? get partialJson; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => - throw _privateConstructorUsedError; -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart deleted file mode 100644 index dc8d9833..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart +++ /dev/null @@ -1,558 +0,0 @@ -// GENERATED CODE - DO NOT MODIFY BY HAND - -// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks - -part of 'schema.dart'; - -// ************************************************************************** -// JsonSerializableGenerator -// ************************************************************************** - -_$CreateMessageRequestImpl _$$CreateMessageRequestImplFromJson( - Map json) => - _$CreateMessageRequestImpl( - model: const _ModelConverter().fromJson(json['model']), - messages: (json['messages'] as List) - .map((e) => Message.fromJson(e as Map)) - .toList(), - maxTokens: (json['max_tokens'] as num).toInt(), - metadata: json['metadata'] == null - ? null - : CreateMessageRequestMetadata.fromJson( - json['metadata'] as Map), - stopSequences: (json['stop_sequences'] as List?) - ?.map((e) => e as String) - .toList(), - system: json['system'] as String?, - temperature: (json['temperature'] as num?)?.toDouble(), - toolChoice: json['tool_choice'] == null - ? null - : ToolChoice.fromJson(json['tool_choice'] as Map), - tools: (json['tools'] as List?) - ?.map((e) => Tool.fromJson(e as Map)) - .toList(), - topK: (json['top_k'] as num?)?.toInt(), - topP: (json['top_p'] as num?)?.toDouble(), - stream: json['stream'] as bool? ?? false, - ); - -Map _$$CreateMessageRequestImplToJson( - _$CreateMessageRequestImpl instance) { - final val = { - 'model': const _ModelConverter().toJson(instance.model), - 'messages': instance.messages.map((e) => e.toJson()).toList(), - 'max_tokens': instance.maxTokens, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('metadata', instance.metadata?.toJson()); - writeNotNull('stop_sequences', instance.stopSequences); - writeNotNull('system', instance.system); - writeNotNull('temperature', instance.temperature); - writeNotNull('tool_choice', instance.toolChoice?.toJson()); - writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); - writeNotNull('top_k', instance.topK); - writeNotNull('top_p', instance.topP); - val['stream'] = instance.stream; - return val; -} - -_$ModelCatalogImpl _$$ModelCatalogImplFromJson(Map json) => - _$ModelCatalogImpl( - $enumDecode(_$ModelsEnumMap, json['value']), - $type: json['runtimeType'] as String?, - ); - -Map _$$ModelCatalogImplToJson(_$ModelCatalogImpl instance) => - { - 'value': _$ModelsEnumMap[instance.value]!, - 'runtimeType': instance.$type, - }; - -const _$ModelsEnumMap = { - Models.claude35Sonnet20240620: 'claude-3-5-sonnet-20240620', - Models.claude3Haiku20240307: 'claude-3-haiku-20240307', - Models.claude3Opus20240229: 'claude-3-opus-20240229', - Models.claude3Sonnet20240229: 'claude-3-sonnet-20240229', - Models.claude20: 'claude-2.0', - Models.claude21: 'claude-2.1', - Models.claudeInstant12: 'claude-instant-1.2', -}; - -_$ModelIdImpl _$$ModelIdImplFromJson(Map json) => - _$ModelIdImpl( - json['value'] as String, - $type: json['runtimeType'] as String?, - ); - -Map _$$ModelIdImplToJson(_$ModelIdImpl instance) => - { - 'value': instance.value, - 'runtimeType': instance.$type, - }; - -_$CreateMessageRequestMetadataImpl _$$CreateMessageRequestMetadataImplFromJson( - Map json) => - _$CreateMessageRequestMetadataImpl( - userId: json['user_id'] as String?, - ); - -Map _$$CreateMessageRequestMetadataImplToJson( - _$CreateMessageRequestMetadataImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('user_id', instance.userId); - return val; -} - -_$ToolChoiceImpl _$$ToolChoiceImplFromJson(Map json) => - _$ToolChoiceImpl( - type: $enumDecode(_$ToolChoiceTypeEnumMap, json['type']), - name: json['name'] as String?, - ); - -Map _$$ToolChoiceImplToJson(_$ToolChoiceImpl instance) { - final val = { - 'type': _$ToolChoiceTypeEnumMap[instance.type]!, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('name', instance.name); - return val; -} - -const _$ToolChoiceTypeEnumMap = { - ToolChoiceType.auto: 'auto', - ToolChoiceType.any: 'any', - ToolChoiceType.tool: 'tool', -}; - -_$MessageImpl _$$MessageImplFromJson(Map json) => - _$MessageImpl( - id: json['id'] as String?, - content: const _MessageContentConverter().fromJson(json['content']), - role: $enumDecode(_$MessageRoleEnumMap, json['role']), - model: json['model'] as String?, - stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], - unknownValue: JsonKey.nullForUndefinedEnumValue), - stopSequence: json['stop_sequence'] as String?, - type: json['type'] as String?, - usage: json['usage'] == null - ? null - : Usage.fromJson(json['usage'] as Map), - ); - -Map _$$MessageImplToJson(_$MessageImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('id', instance.id); - val['content'] = const _MessageContentConverter().toJson(instance.content); - val['role'] = _$MessageRoleEnumMap[instance.role]!; - writeNotNull('model', instance.model); - writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); - writeNotNull('stop_sequence', instance.stopSequence); - writeNotNull('type', instance.type); - writeNotNull('usage', instance.usage?.toJson()); - return val; -} - -const _$MessageRoleEnumMap = { - MessageRole.user: 'user', - MessageRole.assistant: 'assistant', -}; - -const _$StopReasonEnumMap = { - StopReason.endTurn: 'end_turn', - StopReason.maxTokens: 'max_tokens', - StopReason.stopSequence: 'stop_sequence', - StopReason.toolUse: 'tool_use', -}; - -_$MessageContentBlocksImpl _$$MessageContentBlocksImplFromJson( - Map json) => - _$MessageContentBlocksImpl( - (json['value'] as List) - .map((e) => Block.fromJson(e as Map)) - .toList(), - $type: json['runtimeType'] as String?, - ); - -Map _$$MessageContentBlocksImplToJson( - _$MessageContentBlocksImpl instance) => - { - 'value': instance.value.map((e) => e.toJson()).toList(), - 'runtimeType': instance.$type, - }; - -_$MessageContentTextImpl _$$MessageContentTextImplFromJson( - Map json) => - _$MessageContentTextImpl( - json['value'] as String, - $type: json['runtimeType'] as String?, - ); - -Map _$$MessageContentTextImplToJson( - _$MessageContentTextImpl instance) => - { - 'value': instance.value, - 'runtimeType': instance.$type, - }; - -_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( - name: json['name'] as String, - description: json['description'] as String?, - inputSchema: json['input_schema'] as Map, - ); - -Map _$$ToolImplToJson(_$ToolImpl instance) { - final val = { - 'name': instance.name, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('description', instance.description); - val['input_schema'] = instance.inputSchema; - return val; -} - -_$ImageBlockSourceImpl _$$ImageBlockSourceImplFromJson( - Map json) => - _$ImageBlockSourceImpl( - data: json['data'] as String, - mediaType: - $enumDecode(_$ImageBlockSourceMediaTypeEnumMap, json['media_type']), - type: $enumDecode(_$ImageBlockSourceTypeEnumMap, json['type']), - ); - -Map _$$ImageBlockSourceImplToJson( - _$ImageBlockSourceImpl instance) => - { - 'data': instance.data, - 'media_type': _$ImageBlockSourceMediaTypeEnumMap[instance.mediaType]!, - 'type': _$ImageBlockSourceTypeEnumMap[instance.type]!, - }; - -const _$ImageBlockSourceMediaTypeEnumMap = { - ImageBlockSourceMediaType.imageJpeg: 'image/jpeg', - ImageBlockSourceMediaType.imagePng: 'image/png', - ImageBlockSourceMediaType.imageGif: 'image/gif', - ImageBlockSourceMediaType.imageWebp: 'image/webp', -}; - -const _$ImageBlockSourceTypeEnumMap = { - ImageBlockSourceType.base64: 'base64', -}; - -_$UsageImpl _$$UsageImplFromJson(Map json) => _$UsageImpl( - inputTokens: (json['input_tokens'] as num).toInt(), - outputTokens: (json['output_tokens'] as num).toInt(), - ); - -Map _$$UsageImplToJson(_$UsageImpl instance) => - { - 'input_tokens': instance.inputTokens, - 'output_tokens': instance.outputTokens, - }; - -_$MessageDeltaImpl _$$MessageDeltaImplFromJson(Map json) => - _$MessageDeltaImpl( - stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], - unknownValue: JsonKey.nullForUndefinedEnumValue), - stopSequence: json['stop_sequence'] as String?, - ); - -Map _$$MessageDeltaImplToJson(_$MessageDeltaImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); - writeNotNull('stop_sequence', instance.stopSequence); - return val; -} - -_$MessageDeltaUsageImpl _$$MessageDeltaUsageImplFromJson( - Map json) => - _$MessageDeltaUsageImpl( - outputTokens: (json['output_tokens'] as num).toInt(), - ); - -Map _$$MessageDeltaUsageImplToJson( - _$MessageDeltaUsageImpl instance) => - { - 'output_tokens': instance.outputTokens, - }; - -_$TextBlockImpl _$$TextBlockImplFromJson(Map json) => - _$TextBlockImpl( - text: json['text'] as String, - type: json['type'] as String? ?? 'text', - ); - -Map _$$TextBlockImplToJson(_$TextBlockImpl instance) => - { - 'text': instance.text, - 'type': instance.type, - }; - -_$ImageBlockImpl _$$ImageBlockImplFromJson(Map json) => - _$ImageBlockImpl( - source: ImageBlockSource.fromJson(json['source'] as Map), - type: json['type'] as String? ?? 'image', - ); - -Map _$$ImageBlockImplToJson(_$ImageBlockImpl instance) => - { - 'source': instance.source.toJson(), - 'type': instance.type, - }; - -_$ToolUseBlockImpl _$$ToolUseBlockImplFromJson(Map json) => - _$ToolUseBlockImpl( - id: json['id'] as String, - name: json['name'] as String, - input: json['input'] as Map, - type: json['type'] as String? ?? 'tool_use', - ); - -Map _$$ToolUseBlockImplToJson(_$ToolUseBlockImpl instance) => - { - 'id': instance.id, - 'name': instance.name, - 'input': instance.input, - 'type': instance.type, - }; - -_$ToolResultBlockImpl _$$ToolResultBlockImplFromJson( - Map json) => - _$ToolResultBlockImpl( - toolUseId: json['tool_use_id'] as String, - content: - const _ToolResultBlockContentConverter().fromJson(json['content']), - isError: json['is_error'] as bool?, - type: json['type'] as String? ?? 'tool_result', - ); - -Map _$$ToolResultBlockImplToJson( - _$ToolResultBlockImpl instance) { - final val = { - 'tool_use_id': instance.toolUseId, - 'content': - const _ToolResultBlockContentConverter().toJson(instance.content), - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('is_error', instance.isError); - val['type'] = instance.type; - return val; -} - -_$ToolResultBlockContentBlocksImpl _$$ToolResultBlockContentBlocksImplFromJson( - Map json) => - _$ToolResultBlockContentBlocksImpl( - (json['value'] as List) - .map((e) => Block.fromJson(e as Map)) - .toList(), - $type: json['runtimeType'] as String?, - ); - -Map _$$ToolResultBlockContentBlocksImplToJson( - _$ToolResultBlockContentBlocksImpl instance) => - { - 'value': instance.value.map((e) => e.toJson()).toList(), - 'runtimeType': instance.$type, - }; - -_$ToolResultBlockContentTextImpl _$$ToolResultBlockContentTextImplFromJson( - Map json) => - _$ToolResultBlockContentTextImpl( - json['value'] as String, - $type: json['runtimeType'] as String?, - ); - -Map _$$ToolResultBlockContentTextImplToJson( - _$ToolResultBlockContentTextImpl instance) => - { - 'value': instance.value, - 'runtimeType': instance.$type, - }; - -_$MessageStartEventImpl _$$MessageStartEventImplFromJson( - Map json) => - _$MessageStartEventImpl( - message: Message.fromJson(json['message'] as Map), - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - ); - -Map _$$MessageStartEventImplToJson( - _$MessageStartEventImpl instance) => - { - 'message': instance.message.toJson(), - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - }; - -const _$MessageStreamEventTypeEnumMap = { - MessageStreamEventType.messageStart: 'message_start', - MessageStreamEventType.messageDelta: 'message_delta', - MessageStreamEventType.messageStop: 'message_stop', - MessageStreamEventType.contentBlockStart: 'content_block_start', - MessageStreamEventType.contentBlockDelta: 'content_block_delta', - MessageStreamEventType.contentBlockStop: 'content_block_stop', - MessageStreamEventType.ping: 'ping', -}; - -_$MessageDeltaEventImpl _$$MessageDeltaEventImplFromJson( - Map json) => - _$MessageDeltaEventImpl( - delta: MessageDelta.fromJson(json['delta'] as Map), - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - usage: MessageDeltaUsage.fromJson(json['usage'] as Map), - ); - -Map _$$MessageDeltaEventImplToJson( - _$MessageDeltaEventImpl instance) => - { - 'delta': instance.delta.toJson(), - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - 'usage': instance.usage.toJson(), - }; - -_$MessageStopEventImpl _$$MessageStopEventImplFromJson( - Map json) => - _$MessageStopEventImpl( - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - ); - -Map _$$MessageStopEventImplToJson( - _$MessageStopEventImpl instance) => - { - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - }; - -_$ContentBlockStartEventImpl _$$ContentBlockStartEventImplFromJson( - Map json) => - _$ContentBlockStartEventImpl( - contentBlock: - Block.fromJson(json['content_block'] as Map), - index: (json['index'] as num).toInt(), - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - ); - -Map _$$ContentBlockStartEventImplToJson( - _$ContentBlockStartEventImpl instance) => - { - 'content_block': instance.contentBlock.toJson(), - 'index': instance.index, - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - }; - -_$ContentBlockDeltaEventImpl _$$ContentBlockDeltaEventImplFromJson( - Map json) => - _$ContentBlockDeltaEventImpl( - delta: BlockDelta.fromJson(json['delta'] as Map), - index: (json['index'] as num).toInt(), - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - ); - -Map _$$ContentBlockDeltaEventImplToJson( - _$ContentBlockDeltaEventImpl instance) => - { - 'delta': instance.delta.toJson(), - 'index': instance.index, - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - }; - -_$ContentBlockStopEventImpl _$$ContentBlockStopEventImplFromJson( - Map json) => - _$ContentBlockStopEventImpl( - index: (json['index'] as num).toInt(), - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - ); - -Map _$$ContentBlockStopEventImplToJson( - _$ContentBlockStopEventImpl instance) => - { - 'index': instance.index, - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - }; - -_$PingEventImpl _$$PingEventImplFromJson(Map json) => - _$PingEventImpl( - type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), - ); - -Map _$$PingEventImplToJson(_$PingEventImpl instance) => - { - 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, - }; - -_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => - _$TextBlockDeltaImpl( - text: json['text'] as String, - type: json['type'] as String, - ); - -Map _$$TextBlockDeltaImplToJson( - _$TextBlockDeltaImpl instance) => - { - 'text': instance.text, - 'type': instance.type, - }; - -_$InputJsonBlockDeltaImpl _$$InputJsonBlockDeltaImplFromJson( - Map json) => - _$InputJsonBlockDeltaImpl( - partialJson: json['partial_json'] as String?, - type: json['type'] as String, - ); - -Map _$$InputJsonBlockDeltaImplToJson( - _$InputJsonBlockDeltaImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('partial_json', instance.partialJson); - val['type'] = instance.type; - return val; -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart deleted file mode 100644 index d1950d33..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart +++ /dev/null @@ -1,30 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// ENUM: StopReason -// ========================================== - -/// The reason that we stopped. -/// -/// This may be one the following values: -/// -/// - `"end_turn"`: the model reached a natural stopping point -/// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum -/// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated -/// -/// In non-streaming mode this value is always non-null. In streaming mode, it is -/// null in the `message_start` event and non-null otherwise. -enum StopReason { - @JsonValue('end_turn') - endTurn, - @JsonValue('max_tokens') - maxTokens, - @JsonValue('stop_sequence') - stopSequence, - @JsonValue('tool_use') - toolUse, -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart deleted file mode 100644 index 578701a9..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart +++ /dev/null @@ -1,59 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: Tool -// ========================================== - -/// A tool the model may use. -@freezed -class Tool with _$Tool { - const Tool._(); - - /// Factory constructor for Tool - const factory Tool({ - /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. - required String name, - - /// Description of what this tool does. - /// - /// Tool descriptions should be as detailed as possible. The more information that - /// the model has about what the tool is and how to use it, the better it will - /// perform. You can use natural language descriptions to reinforce important - /// aspects of the tool input JSON schema. - @JsonKey(includeIfNull: false) String? description, - - /// [JSON schema](https://json-schema.org/) for this tool's input. - /// - /// This defines the shape of the `input` that your tool accepts and that the model - /// will produce. - @JsonKey(name: 'input_schema') required Map inputSchema, - }) = _Tool; - - /// Object construction from a JSON representation - factory Tool.fromJson(Map json) => _$ToolFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'name', - 'description', - 'input_schema' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'name': name, - 'description': description, - 'input_schema': inputSchema, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart deleted file mode 100644 index cb3d65eb..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart +++ /dev/null @@ -1,54 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: ToolChoice -// ========================================== - -/// How the model should use the provided tools. The model can use a specific tool, -/// any available tool, or decide by itself. -/// -/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. -/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. -/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. -@freezed -class ToolChoice with _$ToolChoice { - const ToolChoice._(); - - /// Factory constructor for ToolChoice - const factory ToolChoice({ - /// How the model should use the provided tools. The model can use a specific tool, - /// any available tool, or decide by itself. - /// - /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - required ToolChoiceType type, - - /// The name of the tool to use. - @JsonKey(includeIfNull: false) String? name, - }) = _ToolChoice; - - /// Object construction from a JSON representation - factory ToolChoice.fromJson(Map json) => - _$ToolChoiceFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type', 'name']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - 'name': name, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart deleted file mode 100644 index 22b88c4d..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart +++ /dev/null @@ -1,24 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// ENUM: ToolChoiceType -// ========================================== - -/// How the model should use the provided tools. The model can use a specific tool, -/// any available tool, or decide by itself. -/// -/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. -/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. -/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. -enum ToolChoiceType { - @JsonValue('auto') - auto, - @JsonValue('any') - any, - @JsonValue('tool') - tool, -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart deleted file mode 100644 index 37f3d39d..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart +++ /dev/null @@ -1,54 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: Usage -// ========================================== - -/// Billing and rate-limit usage. -/// -/// Anthropic's API bills and rate-limits by token counts, as tokens represent the -/// underlying cost to our systems. -/// -/// Under the hood, the API transforms requests into a format suitable for the -/// model. The model's output then goes through a parsing stage before becoming an -/// API response. As a result, the token counts in `usage` will not match one-to-one -/// with the exact visible content of an API request or response. -/// -/// For example, `output_tokens` will be non-zero, even for an empty string response -/// from Claude. -@freezed -class Usage with _$Usage { - const Usage._(); - - /// Factory constructor for Usage - const factory Usage({ - /// The number of input tokens which were used. - @JsonKey(name: 'input_tokens') required int inputTokens, - - /// The number of output tokens which were used. - @JsonKey(name: 'output_tokens') required int outputTokens, - }) = _Usage; - - /// Object construction from a JSON representation - factory Usage.fromJson(Map json) => _$UsageFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['input_tokens', 'output_tokens']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'input_tokens': inputTokens, - 'output_tokens': outputTokens, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart deleted file mode 100644 index 0ad0b2fc..00000000 --- a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart +++ /dev/null @@ -1,3 +0,0 @@ -export 'http_client_stub.dart' - if (dart.library.io) 'http_client_io.dart' - if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart deleted file mode 100644 index 59abc229..00000000 --- a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart +++ /dev/null @@ -1,18 +0,0 @@ -import 'package:fetch_client/fetch_client.dart' as fetch; -import 'package:http/http.dart' as http; -import 'package:http/retry.dart'; - -/// Creates an IOClient with a retry policy. -http.Client createDefaultHttpClient() { - return RetryClient(fetch.FetchClient(mode: fetch.RequestMode.cors)); -} - -/// Middleware for HTTP requests. -Future onRequestHandler(final http.BaseRequest request) { - // If the request if bigger than 60KiB set persistentConnection to false - // Ref: https://github.com/Zekfad/fetch_client#large-payload - if ((request.contentLength ?? 0) > 61440) { - request.persistentConnection = false; - } - return Future.value(request); -} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart deleted file mode 100644 index 0b24e7db..00000000 --- a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart +++ /dev/null @@ -1,12 +0,0 @@ -import 'package:http/http.dart' as http; -import 'package:http/retry.dart'; - -/// Creates an IOClient with a retry policy. -http.Client createDefaultHttpClient() { - return RetryClient(http.Client()); -} - -/// Middleware for HTTP requests. -Future onRequestHandler(final http.BaseRequest request) { - return Future.value(request); -} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart deleted file mode 100644 index 2668d1ac..00000000 --- a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart +++ /dev/null @@ -1,10 +0,0 @@ -import 'package:http/http.dart' as http; - -/// Creates a default HTTP client for the current platform. -http.Client createDefaultHttpClient() => throw UnsupportedError( - 'Cannot create a client without dart:html or dart:io.', - ); - -/// Middleware for HTTP requests. -Future onRequestHandler(final http.BaseRequest request) => - throw UnsupportedError('stub'); diff --git a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml deleted file mode 100644 index 5ad1f3db..00000000 --- a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml +++ /dev/null @@ -1,778 +0,0 @@ -openapi: 3.0.3 - -info: - title: Anthropic API - description: API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. - version: "1" - -servers: - - url: https://api.anthropic.com/v1 - -tags: - - name: Messages - description: Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. - -paths: - /messages: - post: - operationId: createMessage - tags: - - Messages - summary: Create a Message - description: | - Send a structured list of input messages with text and/or image content, and the - model will generate the next message in the conversation. - - The Messages API can be used for either single queries or stateless multi-turn - conversations. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateMessageRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/Message" -components: - securitySchemes: - ApiKeyAuth: - type: apiKey - in: header - name: x-api-key - - schemas: - CreateMessageRequest: - type: object - description: The request parameters for creating a message. - properties: - model: - title: Model - description: | - The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional - details and options. - example: "claude-3-5-sonnet-20240620" - anyOf: - - type: string - description: The ID of the model to use for this request. - - type: string - title: Models - description: | - Available models. Mind that the list may not be exhaustive nor up-to-date. - enum: - - claude-3-5-sonnet-20240620 - - claude-3-haiku-20240307 - - claude-3-opus-20240229 - - claude-3-sonnet-20240229 - - claude-2.0 - - claude-2.1 - - claude-instant-1.2 - messages: - type: array - description: | - Input messages. - - Our models are trained to operate on alternating `user` and `assistant` - conversational turns. When creating a new `Message`, you specify the prior - conversational turns with the `messages` parameter, and the model then generates - the next `Message` in the conversation. - - Each input message must be an object with a `role` and `content`. You can - specify a single `user`-role message, or you can include multiple `user` and - `assistant` messages. The first message must always use the `user` role. - - If the final message uses the `assistant` role, the response content will - continue immediately from the content in that message. This can be used to - constrain part of the model's response. - - See [message content](https://docs.anthropic.com/en/api/messages-content) for - details on how to construct valid message objects. - - Example with a single `user` message: - - ```json - [{ "role": "user", "content": "Hello, Claude" }] - ``` - - Example with multiple conversational turns: - - ```json - [ - { "role": "user", "content": "Hello there." }, - { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, - { "role": "user", "content": "Can you explain LLMs in plain English?" } - ] - ``` - - Example with a partially-filled response from Claude: - - ```json - [ - { - "role": "user", - "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" - }, - { "role": "assistant", "content": "The best answer is (" } - ] - ``` - - Each input message `content` may be either a single `string` or an array of - content blocks, where each block has a specific `type`. Using a `string` for - `content` is shorthand for an array of one content block of type `"text"`. The - following input messages are equivalent: - - ```json - { "role": "user", "content": "Hello, Claude" } - ``` - - ```json - { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } - ``` - - Starting with Claude 3 models, you can also send image content blocks: - - ```json - { - "role": "user", - "content": [ - { - "type": "image", - "source": { - "type": "base64", - "media_type": "image/jpeg", - "data": "/9j/4AAQSkZJRg..." - } - }, - { "type": "text", "text": "What is in this image?" } - ] - } - ``` - - We currently support the `base64` source type for images, and the `image/jpeg`, - `image/png`, `image/gif`, and `image/webp` media types. - - See [examples](https://docs.anthropic.com/en/api/messages-examples) for more - input examples. - - Note that if you want to include a - [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use - the top-level `system` parameter — there is no `"system"` role for input - messages in the Messages API. - minItems: 1 - items: - $ref: '#/components/schemas/Message' - max_tokens: - type: integer - description: | - The maximum number of tokens to generate before stopping. - - Note that our models may stop _before_ reaching this maximum. This parameter - only specifies the absolute maximum number of tokens to generate. - - Different models have different maximum values for this parameter. See - [models](https://docs.anthropic.com/en/docs/models-overview) for details. - metadata: - $ref: '#/components/schemas/CreateMessageRequestMetadata' - stop_sequences: - type: array - description: | - Custom text sequences that will cause the model to stop generating. - - Our models will normally stop when they have naturally completed their turn, - which will result in a response `stop_reason` of `"end_turn"`. - - If you want the model to stop generating when it encounters custom strings of - text, you can use the `stop_sequences` parameter. If the model encounters one of - the custom sequences, the response `stop_reason` value will be `"stop_sequence"` - and the response `stop_sequence` value will contain the matched stop sequence. - items: - type: string - system: - type: string - description: | - System prompt. - - A system prompt is a way of providing context and instructions to Claude, such - as specifying a particular goal or role. See our - [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). - temperature: - type: number - description: | - Amount of randomness injected into the response. - - Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` - for analytical / multiple choice, and closer to `1.0` for creative and - generative tasks. - - Note that even with `temperature` of `0.0`, the results will not be fully - deterministic. - tool_choice: - $ref: '#/components/schemas/ToolChoice' - tools: - type: array - description: | - Definitions of tools that the model may use. - - If you include `tools` in your API request, the model may return `tool_use` - content blocks that represent the model's use of those tools. You can then run - those tools using the tool input generated by the model and then optionally - return results back to the model using `tool_result` content blocks. - - Each tool definition includes: - - - `name`: Name of the tool. - - `description`: Optional, but strongly-recommended description of the tool. - - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` - shape that the model will produce in `tool_use` output content blocks. - - For example, if you defined `tools` as: - - ```json - [ - { - "name": "get_stock_price", - "description": "Get the current stock price for a given ticker symbol.", - "input_schema": { - "type": "object", - "properties": { - "ticker": { - "type": "string", - "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." - } - }, - "required": ["ticker"] - } - } - ] - ``` - - And then asked the model "What's the S&P 500 at today?", the model might produce - `tool_use` content blocks in the response like this: - - ```json - [ - { - "type": "tool_use", - "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - "name": "get_stock_price", - "input": { "ticker": "^GSPC" } - } - ] - ``` - - You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an - input, and return the following back to the model in a subsequent `user` - message: - - ```json - [ - { - "type": "tool_result", - "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", - "content": "259.75 USD" - } - ] - ``` - - Tools can be used for workflows that include running client-side tools and - functions, or more generally whenever you want the model to produce a particular - JSON structure of output. - - See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. - items: - $ref: '#/components/schemas/Tool' - top_k: - type: integer - description: | - Only sample from the top K options for each subsequent token. - - Used to remove "long tail" low probability responses. - [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - - Recommended for advanced use cases only. You usually only need to use - `temperature`. - top_p: - type: number - description: | - Use nucleus sampling. - - In nucleus sampling, we compute the cumulative distribution over all the options - for each subsequent token in decreasing probability order and cut it off once it - reaches a particular probability specified by `top_p`. You should either alter - `temperature` or `top_p`, but not both. - - Recommended for advanced use cases only. You usually only need to use - `temperature`. - stream: - type: boolean - default: false - description: | - Whether to incrementally stream the response using server-sent events. - - See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for - details. - required: - - model - - messages - - max_tokens - CreateMessageRequestMetadata: - type: object - description: An object describing metadata about the request. - properties: - user_id: - type: string - description: | - An external identifier for the user who is associated with the request. - - This should be a uuid, hash value, or other opaque identifier. Anthropic may use - this id to help detect abuse. Do not include any identifying information such as - name, email address, or phone number. - ToolChoice: - type: object - description: | - How the model should use the provided tools. The model can use a specific tool, - any available tool, or decide by itself. - - - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - properties: - type: - $ref: "#/components/schemas/ToolChoiceType" - name: - type: string - description: The name of the tool to use. - required: - - type - ToolChoiceType: - type: string - description: | - How the model should use the provided tools. The model can use a specific tool, - any available tool, or decide by itself. - - - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. - - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. - - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. - enum: - - auto - - any - - tool - Message: - type: object - description: A message in a chat conversation. - properties: - id: - type: string - description: | - Unique object identifier. - - The format and length of IDs may change over time. - content: - description: The content of the message. - oneOf: - - type: string - description: A single text block. - - type: array - description: An array of content blocks. - items: - $ref: "#/components/schemas/Block" - role: - $ref: "#/components/schemas/MessageRole" - model: - type: string - description: The model that handled the request. - stop_reason: - $ref: "#/components/schemas/StopReason" - stop_sequence: - type: string - description: | - Which custom stop sequence was generated, if any. - - This value will be a non-null string if one of your custom stop sequences was - generated. - type: - type: string - description: | - Object type. - - For Messages, this is always `"message"`. - usage: - $ref: "#/components/schemas/Usage" - required: - - content - - role - MessageRole: - type: string - description: The role of the messages author. - enum: - - user - - assistant - Tool: - type: object - description: A tool the model may use. - properties: - name: - type: string - description: The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. - description: - type: string - description: | - Description of what this tool does. - - Tool descriptions should be as detailed as possible. The more information that - the model has about what the tool is and how to use it, the better it will - perform. You can use natural language descriptions to reinforce important - aspects of the tool input JSON schema. - input_schema: - type: object - description: | - [JSON schema](https://json-schema.org/) for this tool's input. - - This defines the shape of the `input` that your tool accepts and that the model - will produce. - additionalProperties: true - required: - - name - - input_schema - Block: - description: A block of content in a message. - oneOf: - - $ref: "#/components/schemas/TextBlock" - - $ref: "#/components/schemas/ImageBlock" - - $ref: "#/components/schemas/ToolUseBlock" - - $ref: "#/components/schemas/ToolResultBlock" - discriminator: - propertyName: type - TextBlock: - type: object - description: A block of text content. - properties: - text: - type: string - description: The text content. - type: - type: string - description: The type of content block. - default: text - required: - - text - ImageBlock: - type: object - description: A block of image content. - properties: - source: - $ref: "#/components/schemas/ImageBlockSource" - type: - type: string - description: The type of content block. - default: image - required: - - source - ImageBlockSource: - type: object - description: The source of an image block. - properties: - data: - type: string - description: The base64-encoded image data. - media_type: - type: string - description: The media type of the image. - enum: - - image/jpeg - - image/png - - image/gif - - image/webp - type: - type: string - description: The type of image source. - enum: - - base64 - required: - - data - - media_type - - type - ToolUseBlock: - type: object - description: The tool the model wants to use. - properties: - id: - type: string - description: | - A unique identifier for this particular tool use block. - This will be used to match up the tool results later. - example: toolu_01A09q90qw90lq917835lq9 - name: - type: string - description: The name of the tool being used. - example: get_weather - input: - type: object - description: An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. - additionalProperties: true - type: - type: string - description: The type of content block. - default: tool_use - required: - - id - - name - - input - ToolResultBlock: - type: object - description: The result of using a tool. - properties: - tool_use_id: - type: string - description: The `id` of the tool use request this is a result for. - content: - description: | - The result of the tool, as a string (e.g. `"content": "15 degrees"`) - or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). - These content blocks can use the text or image types. - oneOf: - - type: string - description: A single text block. - - type: array - description: An array of content blocks. - items: - $ref: "#/components/schemas/Block" - is_error: - type: boolean - description: Set to `true` if the tool execution resulted in an error. - type: - type: string - description: The type of content block. - default: tool_result - required: - - tool_use_id - - content - StopReason: - type: string - description: | - The reason that we stopped. - - This may be one the following values: - - - `"end_turn"`: the model reached a natural stopping point - - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum - - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated - - In non-streaming mode this value is always non-null. In streaming mode, it is - null in the `message_start` event and non-null otherwise. - nullable: true - enum: - - end_turn - - max_tokens - - stop_sequence - - tool_use - Usage: - type: object - description: | - Billing and rate-limit usage. - - Anthropic's API bills and rate-limits by token counts, as tokens represent the - underlying cost to our systems. - - Under the hood, the API transforms requests into a format suitable for the - model. The model's output then goes through a parsing stage before becoming an - API response. As a result, the token counts in `usage` will not match one-to-one - with the exact visible content of an API request or response. - - For example, `output_tokens` will be non-zero, even for an empty string response - from Claude. - properties: - input_tokens: - type: integer - description: The number of input tokens which were used. - output_tokens: - type: integer - description: The number of output tokens which were used. - required: - - input_tokens - - output_tokens - MessageStreamEvent: - type: object - description: A event in a streaming conversation. - oneOf: - - $ref: "#/components/schemas/MessageStartEvent" - - $ref: "#/components/schemas/MessageDeltaEvent" - - $ref: "#/components/schemas/MessageStopEvent" - - $ref: "#/components/schemas/ContentBlockStartEvent" - - $ref: "#/components/schemas/ContentBlockDeltaEvent" - - $ref: "#/components/schemas/ContentBlockStopEvent" - - $ref: "#/components/schemas/PingEvent" - discriminator: - propertyName: type - MessageStreamEventType: - type: string - description: The type of a streaming event. - enum: - - message_start - - message_delta - - message_stop - - content_block_start - - content_block_delta - - content_block_stop - - ping - MessageStartEvent: - type: object - description: A start event in a streaming conversation. - properties: - message: - $ref: "#/components/schemas/Message" - type: - $ref: "#/components/schemas/MessageStreamEventType" - required: - - message - - type - MessageDeltaEvent: - type: object - description: A delta event in a streaming conversation. - properties: - delta: - $ref: "#/components/schemas/MessageDelta" - type: - $ref: "#/components/schemas/MessageStreamEventType" - usage: - $ref: "#/components/schemas/MessageDeltaUsage" - required: - - delta - - type - - usage - MessageDelta: - type: object - description: A delta in a streaming message. - properties: - stop_reason: - $ref: "#/components/schemas/StopReason" - stop_sequence: - type: string - description: | - Which custom stop sequence was generated, if any. - - This value will be a non-null string if one of your custom stop sequences was - generated. - MessageDeltaUsage: - type: object - description: | - Billing and rate-limit usage. - - Anthropic's API bills and rate-limits by token counts, as tokens represent the - underlying cost to our systems. - - Under the hood, the API transforms requests into a format suitable for the - model. The model's output then goes through a parsing stage before becoming an - API response. As a result, the token counts in `usage` will not match one-to-one - with the exact visible content of an API request or response. - - For example, `output_tokens` will be non-zero, even for an empty string response - from Claude. - properties: - output_tokens: - type: integer - description: The cumulative number of output tokens which were used. - required: - - output_tokens - MessageStopEvent: - type: object - description: A stop event in a streaming conversation. - properties: - type: - $ref: "#/components/schemas/MessageStreamEventType" - required: - - type - ContentBlockStartEvent: - type: object - description: A start event in a streaming content block. - properties: - content_block: - $ref: "#/components/schemas/Block" - index: - type: integer - description: The index of the content block. - type: - $ref: "#/components/schemas/MessageStreamEventType" - required: - - content_block - - index - - type - ContentBlockDeltaEvent: - type: object - description: A delta event in a streaming content block. - properties: - delta: - $ref: "#/components/schemas/BlockDelta" - index: - type: integer - description: The index of the content block. - type: - $ref: "#/components/schemas/MessageStreamEventType" - required: - - delta - - index - - type - BlockDelta: - description: A delta in a streaming message. - oneOf: - - $ref: "#/components/schemas/TextBlockDelta" - - $ref: "#/components/schemas/InputJsonBlockDelta" - discriminator: - propertyName: type - TextBlockDelta: - type: object - description: A delta in a streaming text block. - properties: - text: - type: string - description: The text delta. - type: - type: string - description: The type of content block. - default: text_delta - required: - - text - - type - InputJsonBlockDelta: - type: object - description: A delta in a streaming input JSON. - properties: - partial_json: - type: string - description: The partial JSON delta. - type: - type: string - description: The type of content block. - default: input_json_delta - required: - - text - - type - ContentBlockStopEvent: - type: object - description: A stop event in a streaming content block. - properties: - index: - type: integer - description: The index of the content block. - type: - $ref: "#/components/schemas/MessageStreamEventType" - required: - - index - - type - PingEvent: - type: object - description: A ping event in a streaming conversation. - properties: - type: - $ref: "#/components/schemas/MessageStreamEventType" - required: - - type - -security: - - ApiKeyAuth: [ ] diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart deleted file mode 100644 index 316cc26c..00000000 --- a/packages/anthropic_sdk_dart/oas/main.dart +++ /dev/null @@ -1,60 +0,0 @@ -import 'dart:io'; - -import 'package:openapi_spec/openapi_spec.dart'; - -/// Generates Anthropic API client Dart code from the OpenAPI spec. -/// https://docs.anthropic.com/en/api -void main() async { - final spec = OpenApi.fromFile(source: 'oas/anthropic_openapi_curated.yaml'); - - await spec.generate( - package: 'Anthropic', - destination: 'lib/src/generated/', - replace: true, - schemaOptions: const SchemaGeneratorOptions( - onSchemaName: _onSchemaName, - onSchemaUnionFactoryName: _onSchemaUnionFactoryName, - ), - clientOptions: const ClientGeneratorOptions( - enabled: true, - ), - ); - - await Process.run( - 'dart', - ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], - ); -} - -String? _onSchemaName(final String schemaName) => switch (schemaName) { - 'ModelEnumeration' => 'ModelCatalog', - 'ModelString' => 'ModelId', - 'MessageContentString' => 'MessageContentText', - 'MessageContentListBlock' => 'MessageContentBlocks', - 'ToolResultBlockContentListBlock' => 'ToolResultBlockContentBlocks', - 'ToolResultBlockContentString' => 'ToolResultBlockContentText', - _ => schemaName, - }; - -String? _onSchemaUnionFactoryName( - final String union, - final String unionSubclass, -) => - switch (unionSubclass) { - 'ModelCatalog' => 'model', - 'ModelId' => 'modelId', - 'MessageContentText' => 'text', - 'MessageContentBlocks' => 'blocks', - 'ToolResultBlockContentBlocks' => 'blocks', - 'ToolResultBlockContentText' => 'text', - 'TextBlockDelta' => 'textDelta', - 'InputJsonBlockDelta' => 'inputJsonDelta', - 'MessageStartEvent' => 'messageStart', - 'MessageDeltaEvent' => 'messageDelta', - 'MessageStopEvent' => 'messageStop', - 'ContentBlockStartEvent' => 'contentBlockStart', - 'ContentBlockDeltaEvent' => 'contentBlockDelta', - 'ContentBlockStopEvent' => 'contentBlockStop', - 'PingEvent' => 'ping', - _ => null, - }; diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock deleted file mode 100644 index 6e950ea1..00000000 --- a/packages/anthropic_sdk_dart/pubspec.lock +++ /dev/null @@ -1,627 +0,0 @@ -# Generated by pub -# See https://dart.dev/tools/pub/glossary#lockfile -packages: - _fe_analyzer_shared: - dependency: transitive - description: - name: _fe_analyzer_shared - sha256: "45cfa8471b89fb6643fe9bf51bd7931a76b8f5ec2d65de4fb176dba8d4f22c77" - url: "https://pub.dev" - source: hosted - version: "73.0.0" - _macros: - dependency: transitive - description: dart - source: sdk - version: "0.3.2" - analyzer: - dependency: transitive - description: - name: analyzer - sha256: "4959fec185fe70cce007c57e9ab6983101dbe593d2bf8bbfb4453aaec0cf470a" - url: "https://pub.dev" - source: hosted - version: "6.8.0" - args: - dependency: transitive - description: - name: args - sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" - url: "https://pub.dev" - source: hosted - version: "2.5.0" - async: - dependency: transitive - description: - name: async - sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" - url: "https://pub.dev" - source: hosted - version: "2.11.0" - boolean_selector: - dependency: transitive - description: - name: boolean_selector - sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" - url: "https://pub.dev" - source: hosted - version: "2.1.1" - build: - dependency: transitive - description: - name: build - sha256: "80184af8b6cb3e5c1c4ec6d8544d27711700bc3e6d2efad04238c7b5290889f0" - url: "https://pub.dev" - source: hosted - version: "2.4.1" - build_config: - dependency: transitive - description: - name: build_config - sha256: bf80fcfb46a29945b423bd9aad884590fb1dc69b330a4d4700cac476af1708d1 - url: "https://pub.dev" - source: hosted - version: "1.1.1" - build_daemon: - dependency: transitive - description: - name: build_daemon - sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9" - url: "https://pub.dev" - source: hosted - version: "4.0.2" - build_resolvers: - dependency: transitive - description: - name: build_resolvers - sha256: "339086358431fa15d7eca8b6a36e5d783728cf025e559b834f4609a1fcfb7b0a" - url: "https://pub.dev" - source: hosted - version: "2.4.2" - build_runner: - dependency: "direct dev" - description: - name: build_runner - sha256: dd09dd4e2b078992f42aac7f1a622f01882a8492fef08486b27ddde929c19f04 - url: "https://pub.dev" - source: hosted - version: "2.4.12" - build_runner_core: - dependency: transitive - description: - name: build_runner_core - sha256: f8126682b87a7282a339b871298cc12009cb67109cfa1614d6436fb0289193e0 - url: "https://pub.dev" - source: hosted - version: "7.3.2" - built_collection: - dependency: transitive - description: - name: built_collection - sha256: "376e3dd27b51ea877c28d525560790aee2e6fbb5f20e2f85d5081027d94e2100" - url: "https://pub.dev" - source: hosted - version: "5.1.1" - built_value: - dependency: transitive - description: - name: built_value - sha256: c7913a9737ee4007efedaffc968c049fd0f3d0e49109e778edc10de9426005cb - url: "https://pub.dev" - source: hosted - version: "8.9.2" - checked_yaml: - dependency: transitive - description: - name: checked_yaml - sha256: feb6bed21949061731a7a75fc5d2aa727cf160b91af9a3e464c5e3a32e28b5ff - url: "https://pub.dev" - source: hosted - version: "2.0.3" - clock: - dependency: transitive - description: - name: clock - sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf - url: "https://pub.dev" - source: hosted - version: "1.1.1" - code_builder: - dependency: transitive - description: - name: code_builder - sha256: f692079e25e7869c14132d39f223f8eec9830eb76131925143b2129c4bb01b37 - url: "https://pub.dev" - source: hosted - version: "4.10.0" - collection: - dependency: transitive - description: - name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf - url: "https://pub.dev" - source: hosted - version: "1.19.0" - convert: - dependency: transitive - description: - name: convert - sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592" - url: "https://pub.dev" - source: hosted - version: "3.1.1" - coverage: - dependency: transitive - description: - name: coverage - sha256: "576aaab8b1abdd452e0f656c3e73da9ead9d7880e15bdc494189d9c1a1baf0db" - url: "https://pub.dev" - source: hosted - version: "1.9.0" - crypto: - dependency: transitive - description: - name: crypto - sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab - url: "https://pub.dev" - source: hosted - version: "3.0.3" - dart_style: - dependency: transitive - description: - name: dart_style - sha256: "99e066ce75c89d6b29903d788a7bb9369cf754f7b24bf70bf4b6d6d6b26853b9" - url: "https://pub.dev" - source: hosted - version: "2.3.6" - fetch_api: - dependency: transitive - description: - name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" - url: "https://pub.dev" - source: hosted - version: "2.2.0" - fetch_client: - dependency: "direct main" - description: - name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" - url: "https://pub.dev" - source: hosted - version: "1.1.2" - file: - dependency: transitive - description: - name: file - sha256: "5fc22d7c25582e38ad9a8515372cd9a93834027aacf1801cf01164dac0ffa08c" - url: "https://pub.dev" - source: hosted - version: "7.0.0" - fixnum: - dependency: transitive - description: - name: fixnum - sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" - url: "https://pub.dev" - source: hosted - version: "1.1.0" - freezed: - dependency: "direct dev" - description: - name: freezed - sha256: "44c19278dd9d89292cf46e97dc0c1e52ce03275f40a97c5a348e802a924bf40e" - url: "https://pub.dev" - source: hosted - version: "2.5.7" - freezed_annotation: - dependency: "direct main" - description: - name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 - url: "https://pub.dev" - source: hosted - version: "2.4.4" - frontend_server_client: - dependency: transitive - description: - name: frontend_server_client - sha256: f64a0333a82f30b0cca061bc3d143813a486dc086b574bfb233b7c1372427694 - url: "https://pub.dev" - source: hosted - version: "4.0.0" - glob: - dependency: transitive - description: - name: glob - sha256: "0e7014b3b7d4dac1ca4d6114f82bf1782ee86745b9b42a92c9289c23d8a0ab63" - url: "https://pub.dev" - source: hosted - version: "2.1.2" - graphs: - dependency: transitive - description: - name: graphs - sha256: "741bbf84165310a68ff28fe9e727332eef1407342fca52759cb21ad8177bb8d0" - url: "https://pub.dev" - source: hosted - version: "2.3.2" - http: - dependency: "direct main" - description: - name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 - url: "https://pub.dev" - source: hosted - version: "1.2.2" - http_multi_server: - dependency: transitive - description: - name: http_multi_server - sha256: "97486f20f9c2f7be8f514851703d0119c3596d14ea63227af6f7a481ef2b2f8b" - url: "https://pub.dev" - source: hosted - version: "3.2.1" - http_parser: - dependency: transitive - description: - name: http_parser - sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" - url: "https://pub.dev" - source: hosted - version: "4.1.0" - intl: - dependency: transitive - description: - name: intl - sha256: d6f56758b7d3014a48af9701c085700aac781a92a87a62b1333b46d8879661cf - url: "https://pub.dev" - source: hosted - version: "0.19.0" - io: - dependency: transitive - description: - name: io - sha256: "2ec25704aba361659e10e3e5f5d672068d332fc8ac516421d483a11e5cbd061e" - url: "https://pub.dev" - source: hosted - version: "1.0.4" - js: - dependency: transitive - description: - name: js - sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf - url: "https://pub.dev" - source: hosted - version: "0.7.1" - json_annotation: - dependency: "direct main" - description: - name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" - url: "https://pub.dev" - source: hosted - version: "4.9.0" - json_serializable: - dependency: "direct dev" - description: - name: json_serializable - sha256: ea1432d167339ea9b5bb153f0571d0039607a873d6e04e0117af043f14a1fd4b - url: "https://pub.dev" - source: hosted - version: "6.8.0" - logging: - dependency: transitive - description: - name: logging - sha256: "623a88c9594aa774443aa3eb2d41807a48486b5613e67599fb4c41c0ad47c340" - url: "https://pub.dev" - source: hosted - version: "1.2.0" - macros: - dependency: transitive - description: - name: macros - sha256: "0acaed5d6b7eab89f63350bccd82119e6c602df0f391260d0e32b5e23db79536" - url: "https://pub.dev" - source: hosted - version: "0.1.2-main.4" - matcher: - dependency: transitive - description: - name: matcher - sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb - url: "https://pub.dev" - source: hosted - version: "0.12.16+1" - meta: - dependency: "direct main" - description: - name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 - url: "https://pub.dev" - source: hosted - version: "1.15.0" - mime: - dependency: transitive - description: - name: mime - sha256: "2e123074287cc9fd6c09de8336dae606d1ddb88d9ac47358826db698c176a1f2" - url: "https://pub.dev" - source: hosted - version: "1.0.5" - node_preamble: - dependency: transitive - description: - name: node_preamble - sha256: "6e7eac89047ab8a8d26cf16127b5ed26de65209847630400f9aefd7cd5c730db" - url: "https://pub.dev" - source: hosted - version: "2.0.2" - openapi_spec: - dependency: "direct dev" - description: - path: "." - ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" - resolved-ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" - url: "https://github.com/davidmigloz/openapi_spec.git" - source: git - version: "0.7.8" - package_config: - dependency: transitive - description: - name: package_config - sha256: "1c5b77ccc91e4823a5af61ee74e6b972db1ef98c2ff5a18d3161c982a55448bd" - url: "https://pub.dev" - source: hosted - version: "2.1.0" - path: - dependency: transitive - description: - name: path - sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" - url: "https://pub.dev" - source: hosted - version: "1.9.0" - pool: - dependency: transitive - description: - name: pool - sha256: "20fe868b6314b322ea036ba325e6fc0711a22948856475e2c2b6306e8ab39c2a" - url: "https://pub.dev" - source: hosted - version: "1.5.1" - pub_semver: - dependency: transitive - description: - name: pub_semver - sha256: "40d3ab1bbd474c4c2328c91e3a7df8c6dd629b79ece4c4bd04bee496a224fb0c" - url: "https://pub.dev" - source: hosted - version: "2.1.4" - pubspec_parse: - dependency: transitive - description: - name: pubspec_parse - sha256: c799b721d79eb6ee6fa56f00c04b472dcd44a30d258fac2174a6ec57302678f8 - url: "https://pub.dev" - source: hosted - version: "1.3.0" - recase: - dependency: transitive - description: - name: recase - sha256: e4eb4ec2dcdee52dcf99cb4ceabaffc631d7424ee55e56f280bc039737f89213 - url: "https://pub.dev" - source: hosted - version: "4.1.0" - shelf: - dependency: transitive - description: - name: shelf - sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 - url: "https://pub.dev" - source: hosted - version: "1.4.2" - shelf_packages_handler: - dependency: transitive - description: - name: shelf_packages_handler - sha256: "89f967eca29607c933ba9571d838be31d67f53f6e4ee15147d5dc2934fee1b1e" - url: "https://pub.dev" - source: hosted - version: "3.0.2" - shelf_static: - dependency: transitive - description: - name: shelf_static - sha256: a41d3f53c4adf0f57480578c1d61d90342cd617de7fc8077b1304643c2d85c1e - url: "https://pub.dev" - source: hosted - version: "1.1.2" - shelf_web_socket: - dependency: transitive - description: - name: shelf_web_socket - sha256: "073c147238594ecd0d193f3456a5fe91c4b0abbcc68bf5cd95b36c4e194ac611" - url: "https://pub.dev" - source: hosted - version: "2.0.0" - source_gen: - dependency: transitive - description: - name: source_gen - sha256: "14658ba5f669685cd3d63701d01b31ea748310f7ab854e471962670abcf57832" - url: "https://pub.dev" - source: hosted - version: "1.5.0" - source_helper: - dependency: transitive - description: - name: source_helper - sha256: "6adebc0006c37dd63fe05bca0a929b99f06402fc95aa35bf36d67f5c06de01fd" - url: "https://pub.dev" - source: hosted - version: "1.3.4" - source_map_stack_trace: - dependency: transitive - description: - name: source_map_stack_trace - sha256: "84cf769ad83aa6bb61e0aa5a18e53aea683395f196a6f39c4c881fb90ed4f7ae" - url: "https://pub.dev" - source: hosted - version: "2.1.1" - source_maps: - dependency: transitive - description: - name: source_maps - sha256: "708b3f6b97248e5781f493b765c3337db11c5d2c81c3094f10904bfa8004c703" - url: "https://pub.dev" - source: hosted - version: "0.10.12" - source_span: - dependency: transitive - description: - name: source_span - sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" - url: "https://pub.dev" - source: hosted - version: "1.10.0" - stack_trace: - dependency: transitive - description: - name: stack_trace - sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" - url: "https://pub.dev" - source: hosted - version: "1.11.1" - stream_channel: - dependency: transitive - description: - name: stream_channel - sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 - url: "https://pub.dev" - source: hosted - version: "2.1.2" - stream_transform: - dependency: transitive - description: - name: stream_transform - sha256: "14a00e794c7c11aa145a170587321aedce29769c08d7f58b1d141da75e3b1c6f" - url: "https://pub.dev" - source: hosted - version: "2.1.0" - string_scanner: - dependency: transitive - description: - name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" - url: "https://pub.dev" - source: hosted - version: "1.3.0" - term_glyph: - dependency: transitive - description: - name: term_glyph - sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 - url: "https://pub.dev" - source: hosted - version: "1.2.1" - test: - dependency: "direct dev" - description: - name: test - sha256: "713a8789d62f3233c46b4a90b174737b2c04cb6ae4500f2aa8b1be8f03f5e67f" - url: "https://pub.dev" - source: hosted - version: "1.25.8" - test_api: - dependency: transitive - description: - name: test_api - sha256: "664d3a9a64782fcdeb83ce9c6b39e78fd2971d4e37827b9b06c3aa1edc5e760c" - url: "https://pub.dev" - source: hosted - version: "0.7.3" - test_core: - dependency: transitive - description: - name: test_core - sha256: "12391302411737c176b0b5d6491f466b0dd56d4763e347b6714efbaa74d7953d" - url: "https://pub.dev" - source: hosted - version: "0.6.5" - timing: - dependency: transitive - description: - name: timing - sha256: "70a3b636575d4163c477e6de42f247a23b315ae20e86442bebe32d3cabf61c32" - url: "https://pub.dev" - source: hosted - version: "1.0.1" - typed_data: - dependency: transitive - description: - name: typed_data - sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c - url: "https://pub.dev" - source: hosted - version: "1.3.2" - vm_service: - dependency: transitive - description: - name: vm_service - sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc - url: "https://pub.dev" - source: hosted - version: "14.2.4" - watcher: - dependency: transitive - description: - name: watcher - sha256: "3d2ad6751b3c16cf07c7fca317a1413b3f26530319181b37e3b9039b84fc01d8" - url: "https://pub.dev" - source: hosted - version: "1.1.0" - web: - dependency: transitive - description: - name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 - url: "https://pub.dev" - source: hosted - version: "1.0.0" - web_socket: - dependency: transitive - description: - name: web_socket - sha256: "3c12d96c0c9a4eec095246debcea7b86c0324f22df69893d538fcc6f1b8cce83" - url: "https://pub.dev" - source: hosted - version: "0.1.6" - web_socket_channel: - dependency: transitive - description: - name: web_socket_channel - sha256: "9f187088ed104edd8662ca07af4b124465893caf063ba29758f97af57e61da8f" - url: "https://pub.dev" - source: hosted - version: "3.0.1" - webkit_inspection_protocol: - dependency: transitive - description: - name: webkit_inspection_protocol - sha256: "87d3f2333bb240704cd3f1c6b5b7acd8a10e7f0bc28c28dcf14e782014f4a572" - url: "https://pub.dev" - source: hosted - version: "1.2.1" - yaml: - dependency: transitive - description: - name: yaml - sha256: "75769501ea3489fca56601ff33454fe45507ea3bfb014161abc3b43ae25989d5" - url: "https://pub.dev" - source: hosted - version: "3.1.2" -sdks: - dart: ">=3.5.0-259.0.dev <4.0.0" diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml deleted file mode 100644 index 84376d27..00000000 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ /dev/null @@ -1,34 +0,0 @@ -name: anthropic_sdk_dart -description: Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). -version: 0.1.0 -repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart -issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart -homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev - -topics: - - ai - - nlp - - llms - - anthropic - -environment: - sdk: ">=3.4.0 <4.0.0" - -dependencies: - fetch_client: ^1.1.2 - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 - meta: ^1.11.0 - -dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 - # openapi_spec: ^0.7.8 - openapi_spec: - git: - url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 diff --git a/packages/anthropic_sdk_dart/test/messages_test.dart b/packages/anthropic_sdk_dart/test/messages_test.dart deleted file mode 100644 index 648ad7f4..00000000 --- a/packages/anthropic_sdk_dart/test/messages_test.dart +++ /dev/null @@ -1,320 +0,0 @@ -// ignore_for_file: avoid_print -@TestOn('vm') -library; // Uses dart:io - -import 'dart:convert'; -import 'dart:io'; - -import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anthropic Messages API tests', () { - late AnthropicClient client; - - setUp(() async { - client = AnthropicClient( - apiKey: Platform.environment['ANTHROPIC_API_KEY'], - ); - }); - - tearDown(() { - client.endSession(); - }); - - test('Test call messages API', timeout: const Timeout(Duration(minutes: 5)), - () async { - const models = Models.values; - for (final model in models) { - print('Testing model: ${model.name}'); - final res = await client.createMessage( - request: CreateMessageRequest( - model: Model.model(model), - temperature: 0, - maxTokens: 1024, - system: - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces, commas or additional explanations.', - messages: const [ - Message( - role: MessageRole.user, - content: MessageContent.text( - 'List the numbers from 1 to 9 in order.', - ), - ), - ], - ), - ); - expect(res.id, isNotEmpty); - expect( - res.content.text.replaceAll(RegExp(r'[\s\n]'), ''), - contains('123456789'), - ); - expect(res.role, MessageRole.assistant); - expect( - res.model?.replaceAll(RegExp(r'[-.]'), ''), - model.name.toLowerCase(), - ); - expect(res.stopReason, StopReason.endTurn); - expect(res.stopSequence, isNull); - expect(res.type, 'message'); - expect(res.usage?.inputTokens, greaterThan(0)); - expect(res.usage?.outputTokens, greaterThan(0)); - await Future.delayed( - const Duration(seconds: 5), - ); // To avoid rate limit - } - }); - - test('Test call messages streaming API', - timeout: const Timeout(Duration(minutes: 5)), () async { - final stream = client.createMessageStream( - request: const CreateMessageRequest( - model: Model.model(Models.claudeInstant12), - temperature: 0, - maxTokens: 1024, - system: 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces, commas or additional explanations.', - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text( - 'List the numbers from 1 to 9 in order.', - ), - ), - ], - ), - ); - String text = ''; - await for (final res in stream) { - res.map( - messageStart: (v) { - expect(res.type, MessageStreamEventType.messageStart); - expect(v.message.id, isNotEmpty); - expect(v.message.role, MessageRole.assistant); - expect( - v.message.model?.replaceAll(RegExp(r'[-.]'), ''), - Models.claudeInstant12.name.toLowerCase(), - ); - expect(v.message.stopReason, isNull); - expect(v.message.stopSequence, isNull); - expect(v.message.usage?.inputTokens, greaterThan(0)); - expect(v.message.usage?.outputTokens, greaterThan(0)); - }, - messageDelta: (v) { - expect(res.type, MessageStreamEventType.messageDelta); - expect(v.delta.stopReason, StopReason.endTurn); - expect(v.usage.outputTokens, greaterThan(0)); - }, - messageStop: (v) { - expect(res.type, MessageStreamEventType.messageStop); - }, - contentBlockStart: (v) { - expect(res.type, MessageStreamEventType.contentBlockStart); - expect(v.index, 0); - expect(v.contentBlock.text, isNotNull); - expect(v.contentBlock.type, 'text'); - }, - contentBlockDelta: (v) { - expect(res.type, MessageStreamEventType.contentBlockDelta); - expect(v.index, greaterThanOrEqualTo(0)); - expect(v.delta.text, isNotEmpty); - expect(v.delta.type, 'text_delta'); - text += v.delta - .mapOrNull(textDelta: (v) => v.text) - ?.replaceAll(RegExp(r'[\s\n]'), '') ?? - ''; - }, - contentBlockStop: (v) { - expect(res.type, MessageStreamEventType.contentBlockStop); - expect(v.index, greaterThanOrEqualTo(0)); - }, - ping: (PingEvent v) { - expect(res.type, MessageStreamEventType.ping); - }, - ); - } - expect(text, contains('123456789')); - }); - - test('Test response max tokens', () async { - const request = CreateMessageRequest( - model: Model.model(Models.claudeInstant12), - maxTokens: 1, - messages: [ - Message( - role: MessageRole.user, - content: MessageContent.text( - 'Tell me a joke.', - ), - ), - ], - ); - - final res = await client.createMessage(request: request); - expect(res.stopReason, StopReason.maxTokens); - }); - - const tool = Tool( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - test('Test tool use', () async { - final request1 = CreateMessageRequest( - model: const Model.model(Models.claude35Sonnet20240620), - messages: [ - const Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now?', - ), - ), - ], - tools: [tool], - toolChoice: ToolChoice( - type: ToolChoiceType.tool, - name: tool.name, - ), - maxTokens: 1024, - ); - final aiMessage1 = await client.createMessage(request: request1); - expect(aiMessage1.role, MessageRole.assistant); - - var toolUse = aiMessage1.content.blocks.first; - expect(toolUse, isA()); - toolUse = toolUse as ToolUseBlock; - - expect(toolUse.name, tool.name); - expect(toolUse.input, isNotEmpty); - expect(toolUse.input.containsKey('location'), isTrue); - expect(toolUse.input['location'], contains('Boston')); - - final toolResult = json.encode({ - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }); - - final request2 = CreateMessageRequest( - model: const Model.model(Models.claude35Sonnet20240620), - messages: [ - const Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now?', - ), - ), - Message( - role: MessageRole.assistant, - content: aiMessage1.content, - ), - Message( - role: MessageRole.user, - content: MessageContent.blocks([ - Block.toolResult( - toolUseId: toolUse.id, - content: ToolResultBlockContent.text(toolResult), - ), - ]), - ), - ], - tools: [tool], - maxTokens: 1024, - ); - final aiMessage2 = await client.createMessage(request: request2); - - expect(aiMessage2.role, MessageRole.assistant); - expect(aiMessage2.content.text, contains('22')); - }); - - test('Test tool use streaming', - timeout: const Timeout(Duration(minutes: 5)), () async { - final request1 = CreateMessageRequest( - model: const Model.model(Models.claude35Sonnet20240620), - messages: [ - const Message( - role: MessageRole.user, - content: MessageContent.text( - 'What’s the weather like in Boston right now in Celsius?', - ), - ), - ], - tools: [tool], - toolChoice: ToolChoice( - type: ToolChoiceType.tool, - name: tool.name, - ), - maxTokens: 1024, - ); - final stream = client.createMessageStream( - request: request1, - ); - String inputJson = ''; - await for (final res in stream) { - res.map( - messageStart: (v) { - expect(res.type, MessageStreamEventType.messageStart); - expect(v.message.id, isNotEmpty); - expect(v.message.role, MessageRole.assistant); - expect( - v.message.model?.replaceAll(RegExp(r'[-.]'), ''), - Models.claude35Sonnet20240620.name.toLowerCase(), - ); - expect(v.message.stopReason, isNull); - expect(v.message.stopSequence, isNull); - expect(v.message.usage?.inputTokens, greaterThan(0)); - expect(v.message.usage?.outputTokens, greaterThan(0)); - }, - messageDelta: (v) { - expect(res.type, MessageStreamEventType.messageDelta); - expect(v.delta.stopReason, StopReason.toolUse); - expect(v.usage.outputTokens, greaterThan(0)); - }, - messageStop: (v) { - expect(res.type, MessageStreamEventType.messageStop); - }, - contentBlockStart: (v) { - expect(res.type, MessageStreamEventType.contentBlockStart); - expect(v.index, 0); - expect(v.contentBlock.type, 'tool_use'); - expect(v.contentBlock.toolUse, isNotNull); - expect(v.contentBlock.toolUse!.id, isNotEmpty); - expect(v.contentBlock.toolUse!.name, tool.name); - }, - contentBlockDelta: (v) { - expect(res.type, MessageStreamEventType.contentBlockDelta); - expect(v.index, greaterThanOrEqualTo(0)); - expect(v.delta.type, 'input_json_delta'); - inputJson += v.delta.inputJson; - }, - contentBlockStop: (v) { - expect(res.type, MessageStreamEventType.contentBlockStop); - expect(v.index, greaterThanOrEqualTo(0)); - }, - ping: (PingEvent v) { - expect(res.type, MessageStreamEventType.ping); - }, - ); - } - final input = json.decode(inputJson) as Map; - expect(input['location'], contains('Boston')); - expect(input['unit'], 'celsius'); - }); - }); -} diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 70f441bd..899efe6f 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,11 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.2.0+1 - - - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) - ## 0.2.0 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index eb6a6f29..7a218b9c 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -1,10 +1,10 @@ name: chromadb description: Dart Client for the Chroma open-source embedding database API. -version: 0.2.0+1 +version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/chromadb issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -12,20 +12,20 @@ topics: - vector-db environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 + ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + test: ^1.25.2 diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 7bc6e29d..8277d0d5 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,15 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.0+2 - - - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 0.1.0+1 - - - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) - ## 0.1.0 - **REFACTOR**: Minor changes ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e)) diff --git a/packages/googleai_dart/lib/googleai_dart.dart b/packages/googleai_dart/lib/googleai_dart.dart index f0807211..e673e9d9 100644 --- a/packages/googleai_dart/lib/googleai_dart.dart +++ b/packages/googleai_dart/lib/googleai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -library; +library googleai_dart; export 'src/client.dart'; export 'src/generated/client.dart' show GoogleAIClientException; diff --git a/packages/googleai_dart/lib/src/http_client/http_client.dart b/packages/googleai_dart/lib/src/http_client/http_client.dart index 0ad0b2fc..99555ca4 100644 --- a/packages/googleai_dart/lib/src/http_client/http_client.dart +++ b/packages/googleai_dart/lib/src/http_client/http_client.dart @@ -1,3 +1,4 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js_interop) 'http_client_html.dart'; + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index ee294296..ca8f0f00 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: googleai_dart description: Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -version: 0.1.0+2 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,22 +14,22 @@ topics: - gemini environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - fetch_client: ^1.1.2 - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 + ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + test: ^1.25.2 diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index b5ee86a2..47e5a89d 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,43 +1,6 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.7.6 - - - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) - -## 0.7.5 - - - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) - -## 0.7.4 - - - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) - -## 0.7.3 - -> Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. - -- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) -- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) - -## 0.7.2 - -> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package. - - - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) - + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) - - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) - ## 0.7.1 -> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is now available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. +> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. - **DOCS**: Add docs for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) - **DOCS**: Update ChatOllama docs ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e)) @@ -190,7 +153,7 @@ ## 0.0.13 -> Check out the [LangChain Expression Language documentation](https://langchaindart.dev/#/expression_language/interface) for more details +> Check out the [LangChain Expression Language documentation](https://langchaindart.com/#/expression_language/interface) for more details - **FEAT**: Add support for JsonOutputFunctionsParser ([#165](https://github.com/davidmigloz/langchain_dart/issues/165)). ([66c8e644](https://github.com/davidmigloz/langchain_dart/commit/66c8e64410d1dbf8b75e5734cb0cbb0e43dc0615)) - **FEAT**: Add support for StringOutputParser ([#164](https://github.com/davidmigloz/langchain_dart/issues/164)). ([ee29e99a](https://github.com/davidmigloz/langchain_dart/commit/ee29e99a410c3cc6a7ae263fea1cde283f904edf)) @@ -311,7 +274,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 @@ -359,7 +322,7 @@ https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef - Add support for LLMs - `BaseLLM` class (#14). - Add support for Chat models - `BaseChatModel` class (#10). - Add support for prompt templates - `PromptTemplate` class (#7). -- Publish LangChain.dart documentation on http://langchaindart.dev. +- Publish LangChain.dart documentation on http://langchaindart.com. ## 0.0.1-dev.1 diff --git a/packages/langchain/README.md b/packages/langchain/README.md index d01e9ccd..bef19382 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -5,15 +5,14 @@ [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) -[![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) Build LLM-powered Dart/Flutter applications. ## What is LangChain.dart? -LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). +LangChain.dart is a Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). -LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, translation, extraction, recsys, etc.). +LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, extraction, etc.). The components can be grouped into a few core modules: @@ -23,7 +22,7 @@ The components can be grouped into a few core modules: - 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). - 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. -The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). +The different components can be composed together using the LangChain Expression Language (LCEL). ## Motivation @@ -38,130 +37,66 @@ LangChain.dart aims to fill this gap by abstracting the intricacies of working w ## Packages LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: - -### [`langchain_core`](https://pub.dev/packages/langchain_core) [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) - -Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. - -> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. - -### [`langchain`](https://pub.dev/packages/langchain) [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) - -Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - -> Depend on this package to build LLM applications with LangChain.dart. -> -> This package exposes `langchain_core` so you don't need to depend on it explicitly. - -### [`langchain_community`](https://pub.dev/packages/langchain_community) [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) - -Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. - -> Depend on this package if you want to use any of the integrations or components it provides. - -### Integration-specific packages - -Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. - -> Depend on an integration-specific package if you want to use the specific integration. - -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-4o, o1, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +- [`langchain_core`](https://pub.dev/packages/langchain_core): contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + > Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. +- [`langchain`](https://pub.dev/packages/langchain): contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + > Depend on this package to build LLM applications with LangChain.dart. + > This package exposes `langchain_core` so you don't need to depend on it explicitly. +- [`langchain_community`](https://pub.dev/packages/langchain_community): contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + > Depend on this package if you want to use any of the integrations or components it provides. +- Integration-specific packages (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), etc.): popular third-party integrations are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + > Depend on an integration-specific package if you want to use the specific integration.

    -### API clients packages +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4 Turbo, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | + +Functionality provided by each integration package: + +| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | +|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| +| [langchain_community](https://pub.dev/packages/langchain_community) | | | | | | | | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | +| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: -> Depend on an API client package if you just want to consume the API of a specific provider directly without using LangChain.dart abstractions. - -| Package | Version | Description | -|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| -| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | -| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | - -## Integrations - -The following integrations are available in LangChain.dart: - -### Chat Models - -| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | -|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | -| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | -| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | -| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | -| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | -| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | -| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | - -### LLMs - -_Note: Prefer using Chat Models over LLMs as many providers have deprecated them._ - -| LLM | Package | Streaming | Description | -|-------------------------------------------------------------------------------------------------|---------------------------------------------------------------|-----------|--------------------------------------------------------------------------------------| -| [Ollama](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | [Ollama Completions API](https://ollama.ai) | -| [OpenAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions) | -| [VertexAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | [GCP Vertex AI Text API](https://cloud.google.com/vertex-ai) | - -### Embedding Models - -| Embedding model | Package | Description | -|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------| -| [GoogleGenerativeAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/google_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Google AI Embeddings API](https://ai.google.dev) | -| [MistralAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [Mistral Embeddings API](https://docs.mistral.ai) | -| [OllamaEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [Ollama Embeddings API](https://ollama.ai) | -| [OpenAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) | -| [VertexAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [GCP Vertex AI Embeddings API](https://cloud.google.com/vertex-ai) | - -### Vector Stores - -| Vector store | Package | Description | -|--------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| -| [Chroma](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/chroma) | [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [Chroma](https://trychroma.com/) integration | -| [MemoryVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/memory) | [langchain](https://pub.dev/packages/langchain) | In-memory vector store for prototype and testing | -| [ObjectBoxVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox) | [langchain_community](https://pub.dev/packages/langchain_community) | [ObjectBox](https://objectbox.io/) integration | -| [Pinecone](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/pinecone) | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [Pinecone](https://pinecone.io/) integration | -| [Supabase](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [Supabase Vector](https://supabase.com/vector) integration | -| [VertexAIMatchingEngine](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview) (former Matching Engine) integration | - -### Tools - -| Tool | Package | Description | -|-----------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------| -| [CalculatorTool](https://langchaindart.dev/#/modules/agents/tools/calculator) | [langchain_community](https://pub.dev/packages/langchain_community) | To calculate math expressions | -| [OpenAIDallETool](https://langchaindart.dev/#/modules/agents/tools/openai_dall_e) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI's DALL-E Image Generator](https://platform.openai.com/docs/api-reference/images) | -| TavilyAnswerTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns an answer for a query using the [Tavily](https://tavily.com) search engine | -| TavilySearchResultsTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns a list of results for a query using the [Tavily](https://tavily.com) search engine | +| Package | Version | Description | +|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | ## Getting started -To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_openai` or `langchain_google`): ```yaml dependencies: langchain: {version} - langchain_community: {version} langchain_openai: {version} langchain_google: {version} ... @@ -221,10 +156,9 @@ print(res); ## Documentation -- [LangChain.dart documentation](https://langchaindart.dev) -- [Code Assist AI](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) (Chatbot for LangChain.dart documentation) +- [LangChain.dart documentation](https://langchaindart.com) - [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) -- [LangChain.dart blog](https://blog.langchaindart.dev) +- [LangChain.dart blog](https://blog.langchaindart.com) - [Project board](https://github.com/users/davidmigloz/projects/2/views/1) ## Community diff --git a/packages/langchain/lib/src/agents/agents.dart b/packages/langchain/lib/src/agents/agents.dart index cc12a558..ec89c95c 100644 --- a/packages/langchain/lib/src/agents/agents.dart +++ b/packages/langchain/lib/src/agents/agents.dart @@ -1,4 +1,3 @@ export 'package:langchain_core/agents.dart'; export 'executor.dart'; -export 'tools.dart'; diff --git a/packages/langchain/lib/src/agents/tools.dart b/packages/langchain/lib/src/agents/tools.dart deleted file mode 100644 index 02a16284..00000000 --- a/packages/langchain/lib/src/agents/tools.dart +++ /dev/null @@ -1,304 +0,0 @@ -import 'package:langchain_core/agents.dart'; -import 'package:langchain_core/chains.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/exceptions.dart'; -import 'package:langchain_core/memory.dart'; -import 'package:langchain_core/output_parsers.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; - -const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( - prompt: PromptTemplate( - inputVariables: {}, - template: 'You are a helpful AI assistant', - ), -); - -/// {@template tools_agent} -/// An agent powered by the tool calling API. -/// -/// Example: -/// ```dart -/// final llm = ChatOllama( -/// defaultOptions: ChatOllamaOptions( -/// model: 'llama3-groq-tool-use', -/// temperature: 0, -/// ), -/// ); -/// final tools = [CalculatorTool()]; -/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); -/// final executor = AgentExecutor(agent: agent); -/// final res = await executor.run('What is 40 raised to the 0.43 power? '); -/// ``` -/// -/// You can use any chat model that supports tools, like `ChatOpenAI`, -/// `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the -/// [documentation](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) -/// for a complete list. -/// -/// You can easily add memory to the agent using the memory parameter from the -/// [ToolsAgent.fromLLMAndTools] constructor. Make sure you enable -/// [BaseChatMemory.returnMessages] on your memory, as the agent works with -/// [ChatMessage]s. The default prompt template already takes care of adding -/// the history to the prompt. For example: -/// ```dart -/// final memory = ConversationBufferMemory(returnMessages: true); -/// final agent = ToolsAgent.fromLLMAndTools( -/// llm: llm, -/// tools: tools, -/// memory: memory, -/// ); -/// ``` -/// -/// If you need to use your own [llmChain] make sure your prompt template -/// includes: -/// - `MessagePlaceholder(variableName: agentInputKey)`: the input to the agent. -/// - If you are using memory: -/// * `MessagesPlaceholder(variableName: '{memoryKey}')`: the history of chat -/// messages. -/// - If you are not using memory: -/// * `MessagesPlaceholder(variableName: BaseActionAgent.agentScratchpadInputKey)`: -/// the intermediary work of the agent (if you are using memory, the agent -/// uses the memory to store the intermediary work). -/// Example: -/// ```dart -/// ChatPromptTemplate.fromTemplates([ -/// (ChatMessageType.system, 'You are a helpful AI assistant'), -/// (ChatMessageType.messagesPlaceholder, 'history'), -/// (ChatMessageType.messagePlaceholder, 'input'), -/// ]); -/// ``` -/// -/// You can use [ToolsAgent.createPrompt] to build the prompt -/// template if you only need to customize the system message or add some -/// extra messages. -/// {@endtemplate} -class ToolsAgent extends BaseSingleActionAgent { - /// {@macro tools_agent} - ToolsAgent({ - required this.llmChain, - required super.tools, - }) : _parser = const ToolsAgentOutputParser(), - assert( - llmChain.memory != null || - llmChain.prompt.inputVariables - .contains(BaseActionAgent.agentScratchpadInputKey), - '`${BaseActionAgent.agentScratchpadInputKey}` should be one of the ' - 'variables in the prompt, got ${llmChain.prompt.inputVariables}', - ), - assert( - llmChain.memory == null || llmChain.memory!.returnMessages, - 'The memory must have `returnMessages` set to true', - ); - - /// Chain to use to call the LLM. - /// - /// If the chain does not have a memory, the prompt MUST include a variable - /// called [BaseActionAgent.agentScratchpadInputKey] where the agent can put - /// its intermediary work. - /// - /// If the chain has a memory, the agent will use the memory to store the - /// intermediary work. - /// - /// The memory must have [BaseChatMemory.returnMessages] set to true for - /// the agent to work properly. - final LLMChain llmChain; - - /// Parser to use to parse the output of the LLM. - final ToolsAgentOutputParser _parser; - - /// The key for the input to the agent. - static const agentInputKey = 'input'; - - @override - Set get inputKeys => {agentInputKey}; - - /// Construct an [ToolsAgent] from an [llm] and [tools]. - /// - /// - [llm] - The model to use for the agent. - /// - [tools] - The tools the agent has access to. You can omit this field if - /// you have already configured the tools in the [llm]. - /// - [memory] - The memory to use for the agent. - /// - [systemChatMessage] message to use as the system message that will be - /// the first in the prompt. Default: "You are a helpful AI assistant". - /// - [extraPromptMessages] prompt messages that will be placed between the - /// system message and the input from the agent. - factory ToolsAgent.fromLLMAndTools({ - required final BaseChatModel llm, - final List? tools, - final BaseChatMemory? memory, - final SystemChatMessagePromptTemplate systemChatMessage = - _systemChatMessagePromptTemplate, - final List? extraPromptMessages, - }) { - assert( - tools != null || llm.defaultOptions.tools != null, - 'Tools must be provided or configured in the llm', - ); - assert( - tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool), - 'All elements in `tools` must be of type `Tool` or its subclasses', - ); - - final actualTools = tools ?? llm.defaultOptions.tools!.cast(); - - return ToolsAgent( - llmChain: LLMChain( - llm: llm, - llmOptions: llm.defaultOptions.copyWith( - tools: actualTools, - ), - prompt: createPrompt( - systemChatMessage: systemChatMessage, - extraPromptMessages: extraPromptMessages, - memory: memory, - ), - memory: memory, - ), - tools: actualTools, - ); - } - - @override - Future> plan(final AgentPlanInput input) async { - final llmChainInputs = _constructLlmChainInputs( - input.intermediateSteps, - input.inputs, - ); - final ChainValues output = await llmChain.invoke(llmChainInputs); - final predictedMessage = output[LLMChain.defaultOutputKey] as AIChatMessage; - return _parser.parseChatMessage(predictedMessage); - } - - Map _constructLlmChainInputs( - final List intermediateSteps, - final InputValues inputs, - ) { - final dynamic agentInput; - - // If there is a memory, we pass the last agent step as a function message. - // Otherwise, we pass the input as a human message. - if (llmChain.memory != null && intermediateSteps.isNotEmpty) { - final lastStep = intermediateSteps.last; - final functionMsg = ChatMessage.tool( - toolCallId: lastStep.action.id, - content: lastStep.observation, - ); - agentInput = functionMsg; - } else { - agentInput = switch (inputs[agentInputKey]) { - final String inputStr => ChatMessage.humanText(inputStr), - final ChatMessage inputMsg => inputMsg, - final List inputMsgs => inputMsgs, - _ => throw LangChainException( - message: 'Agent expected a String or ChatMessage as input,' - ' got ${inputs[agentInputKey]}', - ), - }; - } - - return { - ...inputs, - agentInputKey: agentInput, - if (llmChain.memory == null) - BaseActionAgent.agentScratchpadInputKey: - _constructScratchPad(intermediateSteps), - }; - } - - List _constructScratchPad( - final List intermediateSteps, - ) { - return [ - ...intermediateSteps.map((final s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }).expand((final m) => m), - ]; - } - - @override - String get agentType => 'tool-agent'; - - /// Creates prompt for this agent. - /// - /// It takes care of adding the necessary placeholders to handle the - /// intermediary work of the agent or the memory. - /// - /// - [systemChatMessage] message to use as the system message that will be - /// the first in the prompt. - /// - [extraPromptMessages] prompt messages that will be placed between the - /// system message and the new human input. - /// - [memory] optional memory to use for the agent. - static BasePromptTemplate createPrompt({ - final SystemChatMessagePromptTemplate systemChatMessage = - _systemChatMessagePromptTemplate, - final List? extraPromptMessages, - final BaseChatMemory? memory, - }) { - return ChatPromptTemplate.fromPromptMessages([ - systemChatMessage, - ...?extraPromptMessages, - for (final memoryKey in memory?.memoryKeys ?? {}) - MessagesPlaceholder(variableName: memoryKey), - const MessagePlaceholder(variableName: agentInputKey), - if (memory == null) - const MessagesPlaceholder( - variableName: BaseActionAgent.agentScratchpadInputKey, - ), - ]); - } -} - -/// {@template tools_agent_output_parser} -/// Parser for [ToolsAgent]. -/// -/// It parses the output of the LLM and returns the corresponding -/// [BaseAgentAction] to be executed. -/// {@endtemplate} -class ToolsAgentOutputParser extends BaseOutputParser> { - /// {@macro tools_agent_output_parser} - const ToolsAgentOutputParser() - : super(defaultOptions: const OutputParserOptions()); - - @override - Future> invoke( - final ChatResult input, { - final OutputParserOptions? options, - }) { - return parseChatMessage(input.output); - } - - /// Parses the [message] and returns the corresponding [BaseAgentAction]. - Future> parseChatMessage( - final AIChatMessage message, - ) async { - final toolCalls = message.toolCalls; - if (toolCalls.isNotEmpty) { - return toolCalls.map((final toolCall) { - return AgentAction( - id: toolCall.id, - tool: toolCall.name, - toolInput: toolCall.arguments, - log: 'Invoking: `${toolCall.name}` ' - 'with `${toolCall.arguments}`\n' - 'Responded: ${message.content}\n', - messageLog: [message], - ); - }).toList(growable: false); - } else { - return [ - AgentFinish( - returnValues: {'output': message.content}, - log: message.content, - ), - ]; - } - } -} diff --git a/packages/langchain/lib/src/embeddings/cache.dart b/packages/langchain/lib/src/embeddings/cache.dart index 3a67cd39..270ae124 100644 --- a/packages/langchain/lib/src/embeddings/cache.dart +++ b/packages/langchain/lib/src/embeddings/cache.dart @@ -135,7 +135,7 @@ class EmbeddingsByteStoreEncoder @override String encodeKey(final String key) { final keyHash = sha1.convert(utf8.encode(key)).toString(); - return uuid.v5(Namespace.URL, keyHash); + return uuid.v5(Uuid.NAMESPACE_URL, keyHash); } @override diff --git a/packages/langchain/lib/src/utils/utils.dart b/packages/langchain/lib/src/utils/utils.dart index 28748719..d41e35b9 100644 --- a/packages/langchain/lib/src/utils/utils.dart +++ b/packages/langchain/lib/src/utils/utils.dart @@ -1,6 +1,2 @@ export 'package:langchain_core/utils.dart' - show - RetryOptions, - calculateSimilarity, - cosineSimilarity, - getIndexesMostSimilarEmbeddings; + show calculateSimilarity, cosineSimilarity, getIndexesMostSimilarEmbeddings; diff --git a/packages/langchain/lib/src/vector_stores/memory.dart b/packages/langchain/lib/src/vector_stores/memory.dart index a439e1cf..e812d275 100644 --- a/packages/langchain/lib/src/vector_stores/memory.dart +++ b/packages/langchain/lib/src/vector_stores/memory.dart @@ -14,9 +14,7 @@ import 'package:uuid/uuid.dart'; /// This is not efficient for large vector stores as it has a time complexity /// of O(vector_dimensionality * num_vectors). /// -/// This class is useful for testing and prototyping, but it is not recommended -/// for production use cases. See other vector store integrations for -/// production use cases. +/// For more efficient vector stores, see [VertexAIMatchingEngine](https://pub.dev/documentation/langchain_google/latest/langchain_google/VertexAIMatchingEngine-class.html). /// /// ### Filtering /// diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 48657423..4326a8fb 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.6 +version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -13,18 +13,15 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: characters: ^1.3.0 - collection: ^1.18.0 + collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: 0.3.6 + langchain_core: ^0.3.1 meta: ^1.11.0 - uuid: ^4.4.2 + uuid: ^4.3.3 dev_dependencies: - test: ^1.25.8 - langchain_community: ^0.3.2 - langchain_openai: ^0.7.2 - langchain_ollama: ^0.3.2 + test: ^1.25.2 diff --git a/packages/langchain/pubspec_overrides.yaml b/packages/langchain/pubspec_overrides.yaml index d9c6fc7e..3508ed77 100644 --- a/packages/langchain/pubspec_overrides.yaml +++ b/packages/langchain/pubspec_overrides.yaml @@ -1,16 +1,4 @@ -# melos_managed_dependency_overrides: langchain_community,langchain_core,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart +# melos_managed_dependency_overrides: langchain_core dependency_overrides: - langchain_community: - path: ../langchain_community langchain_core: path: ../langchain_core - langchain_ollama: - path: ../langchain_ollama - langchain_openai: - path: ../langchain_openai - ollama_dart: - path: ../ollama_dart - openai_dart: - path: ../openai_dart - tavily_dart: - path: ../tavily_dart diff --git a/packages/langchain/test/agents/assets/state_of_the_union.txt b/packages/langchain/test/agents/assets/state_of_the_union.txt deleted file mode 100644 index d50175de..00000000 --- a/packages/langchain/test/agents/assets/state_of_the_union.txt +++ /dev/null @@ -1,723 +0,0 @@ -Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. - -Last year COVID-19 kept us apart. This year we are finally together again. - -Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. - -With a duty to one another to the American people to the Constitution. - -And with an unwavering resolve that freedom will always triumph over tyranny. - -Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. - -He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. - -He met the Ukrainian people. - -From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. - -Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. - -In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. - -Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. - -Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. - -Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. - -They keep moving. - -And the costs and the threats to America and the world keep rising. - -That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. - -The United States is a member along with 29 other nations. - -It matters. American diplomacy matters. American resolve matters. - -Putin’s latest attack on Ukraine was premeditated and unprovoked. - -He rejected repeated efforts at diplomacy. - -He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. - -We prepared extensively and carefully. - -We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. - -I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. - -We countered Russia’s lies with truth. - -And now that he has acted the free world is holding him accountable. - -Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. - -We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. - -Together with our allies –we are right now enforcing powerful economic sanctions. - -We are cutting off Russia’s largest banks from the international financial system. - -Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. - -We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. - -Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. - -The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. - -We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. - -And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. - -The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. - -Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. - -We are giving more than $1 Billion in direct assistance to Ukraine. - -And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. - -Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. - -Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. - -For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. - -As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. - -And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. - -Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. - -And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. - -To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. - -And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. - -Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. - -America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. - -These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. - -But I want you to know that we are going to be okay. - -When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. - -While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. - -We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. - -In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. - -This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. - -To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. - -Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. - -He will never extinguish their love of freedom. He will never weaken the resolve of the free world. - -We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. - -The pandemic has been punishing. - -And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. - -I understand. - -I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. - -That’s why one of the first things I did as President was fight to pass the American Rescue Plan. - -Because people were hurting. We needed to act, and we did. - -Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. - -It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. - -Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. - -And as my Dad used to say, it gave people a little breathing room. - -And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. - -And it worked. It created jobs. Lots of jobs. - -In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year -than ever before in the history of America. - -Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. - -For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. - -But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. - -Vice President Harris and I ran for office with a new economic vision for America. - -Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up -and the middle out, not from the top down. - -Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. - -America used to have the best roads, bridges, and airports on Earth. - -Now our infrastructure is ranked 13th in the world. - -We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. - -That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. - -This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. - -We’re done talking about infrastructure weeks. - -We’re going to have an infrastructure decade. - -It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. - -As I’ve told Xi Jinping, it is never a good bet to bet against the American people. - -We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. - -And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. - -We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. - -4,000 projects have already been announced. - -And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. - -When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. - -The federal government spends about $600 Billion a year to keep the country safe and secure. - -There’s been a law on the books for almost a century -to make sure taxpayers’ dollars support American jobs and businesses. - -Every Administration says they’ll do it, but we are actually doing it. - -We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. - -But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. - -That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. - -Let me give you one example of why it’s so important to pass it. - -If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. - -It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. - -This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. - -Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. - -Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. - -Smartphones. The Internet. Technology we have yet to invent. - -But that’s just the beginning. - -Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from -$20 billion to $100 billion. - -That would be one of the biggest investments in manufacturing in American history. - -And all they’re waiting for is for you to pass this bill. - -So let’s not wait any longer. Send it to my desk. I’ll sign it. - -And we will really take off. - -And Intel is not alone. - -There’s something happening in America. - -Just look around and you’ll see an amazing story. - -The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing. - -Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas. - -That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. - -GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. - -All told, we created 369,000 new manufacturing jobs in America just last year. - -Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. - -As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” - -It’s time. - -But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. - -Inflation is robbing them of the gains they might otherwise feel. - -I get it. That’s why my top priority is getting prices under control. - -Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. - -The pandemic also disrupted global supply chains. - -When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. - -Look at cars. - -Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. - -And guess what, prices of automobiles went up. - -So—we have a choice. - -One way to fight inflation is to drive down wages and make Americans poorer. - -I have a better plan to fight inflation. - -Lower your costs, not your wages. - -Make more cars and semiconductors in America. - -More infrastructure and innovation in America. - -More goods moving faster and cheaper in America. - -More jobs where you can earn a good living in America. - -And instead of relying on foreign supply chains, let’s make it in America. - -Economists call it “increasing the productive capacity of our economy.” - -I call it building a better America. - -My plan to fight inflation will lower your costs and lower the deficit. - -17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: - -First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis. - -He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make. - -But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. - -Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it. - -What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. - -Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy. - -For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it. - -Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. - -Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. - -Second – cut energy costs for families an average of $500 a year by combatting climate change. - -Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. - -Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child. - -Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children. - -My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. - -My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old. - -All of these will lower costs. - -And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody. - -The one thing all Americans agree on is that the tax system is not fair. We have to fix it. - -I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. - -Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax. - -That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. - -We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. - -That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter. - -So that’s my plan. It will grow the economy and lower costs for families. - -So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. - -My plan will not only lower costs to give families a fair shot, it will lower the deficit. - -The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. - -But in my administration, the watchdogs have been welcomed back. - -We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. - -And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. - -By the end of this year, the deficit will be down to less than half what it was before I took office. - -The only president ever to cut the deficit by more than one trillion dollars in a single year. - -Lowering your costs also means demanding more competition. - -I’m a capitalist, but capitalism without competition isn’t capitalism. - -It’s exploitation—and it drives up prices. - -When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. - -We see it happening with ocean carriers moving goods in and out of America. - -During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. - -Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. - -And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. - -That ends on my watch. - -Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. - -We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. - -Let’s pass the Paycheck Fairness Act and paid leave. - -Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. - -Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. - -And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. - -When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. - -For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. - -And I know you’re tired, frustrated, and exhausted. - -But I also know this. - -Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say -we are moving forward safely, back to more normal routines. - -We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. - -Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. - -Under these new guidelines, most Americans in most of the country can now be mask free. - -And based on the projections, more of the country will reach that point across the next couple of weeks. - -Thanks to the progress we have made this past year, COVID-19 need no longer control our lives. - -I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. - -We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. - -Here are four common sense steps as we move forward safely. - -First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. - -We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. - -The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. - -We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%. - -We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month. - -And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost. - -If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. - -We’re leaving no one behind or ignoring anyone’s needs as we move forward. - -And on testing, we have made hundreds of millions of tests available for you to order for free. - -Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. - -Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. - -If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years. - -And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. - -I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. - -Third – we can end the shutdown of schools and businesses. We have the tools we need. - -It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. - -We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. - -Our schools are open. Let’s keep it that way. Our kids need to be in school. - -And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. - -We achieved this because we provided free vaccines, treatments, tests, and masks. - -Of course, continuing this costs money. - -I will soon send Congress a request. - -The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly. - -Fourth, we will continue vaccinating the world. - -We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. - -And we won’t stop. - -We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. - -Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. - -Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. - -We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. - -I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. - -They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. - -Officer Mora was 27 years old. - -Officer Rivera was 22. - -Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. - -I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. - -I’ve worked on these issues a long time. - -I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. - -So let’s not abandon our streets. Or choose between safety and equal justice. - -Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. - -That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. - -That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. - -We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. - -I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. - -And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. - -And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? - -Ban assault weapons and high-capacity magazines. - -Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. - -These laws don’t infringe on the Second Amendment. They save lives. - -The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. - -In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. - -We cannot let this happen. - -Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. - -Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. - -One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. - -And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. - -A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. - -And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. - -We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. - -We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. - -We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. - -We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. - -We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. - -Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. - -Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. - -It’s not only the right thing to do—it’s the economically smart thing to do. - -That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. - -Let’s get it done once and for all. - -Advancing liberty and justice also requires protecting the rights of women. - -The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. - -If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. - -And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. - -As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. - -While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. - -And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. - -So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. - -First, beat the opioid epidemic. - -There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. - -Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. - -If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. - -Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down. - -The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. - -I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. - -Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. - -As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. - -It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. - -And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. - -Third, support our veterans. - -Veterans are the best of us. - -I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. - -My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. - -Our troops in Iraq and Afghanistan faced many dangers. - -One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. - -When they came home, many of the world’s fittest and best trained warriors were never the same. - -Headaches. Numbness. Dizziness. - -A cancer that would put them in a flag-draped coffin. - -I know. - -One of those soldiers was my son Major Beau Biden. - -We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. - -But I’m committed to finding out everything we can. - -Committed to military families like Danielle Robinson from Ohio. - -The widow of Sergeant First Class Heath Robinson. - -He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. - -Stationed near Baghdad, just yards from burn pits the size of football fields. - -Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. - -But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. - -Danielle says Heath was a fighter to the very end. - -He didn’t know how to stop fighting, and neither did she. - -Through her pain she found purpose to demand we do better. - -Tonight, Danielle—we are. - -The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. - -And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. - -I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. - -And fourth, let’s end cancer as we know it. - -This is personal to me and Jill, to Kamala, and to so many of you. - -Cancer is the #2 cause of death in America–second only to heart disease. - -Last month, I announced our plan to supercharge -the Cancer Moonshot that President Obama asked me to lead six years ago. - -Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. - -More support for patients and families. - -To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. - -It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. - -ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. - -A unity agenda for the nation. - -We can do this. - -My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. - -In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. - -We have fought for freedom, expanded liberty, defeated totalitarianism and terror. - -And built the strongest, freest, and most prosperous nation the world has ever known. - -Now is the hour. - -Our moment of responsibility. - -Our test of resolve and conscience, of history itself. - -It is in this moment that our character is formed. Our purpose is found. Our future is forged. - -Well I know this nation. - -We will meet the test. - -To protect freedom and liberty, to expand fairness and opportunity. - -We will save democracy. - -As hard as these times have been, I am more optimistic about America today than I have been my whole life. - -Because I see the future that is within our grasp. - -Because I know there is simply nothing beyond our capacity. - -We are the only nation on Earth that has always turned every crisis we have faced into an opportunity. - -The only nation that can be defined by a single word: possibilities. - -So on this night, in our 245th year as a nation, I have come to report on the State of the Union. - -And my report is this: the State of the Union is strong—because you, the American people, are strong. - -We are stronger today than we were a year ago. - -And we will be stronger a year from now than we are today. - -Now is our moment to meet and overcome the challenges of our time. - -And we will, as one people. - -One America. - -The United States of America. - -May God bless you all. May God protect our troops. \ No newline at end of file diff --git a/packages/langchain/test/agents/tools_agent_test.dart b/packages/langchain/test/agents/tools_agent_test.dart deleted file mode 100644 index e879ba88..00000000 --- a/packages/langchain/test/agents/tools_agent_test.dart +++ /dev/null @@ -1,226 +0,0 @@ -@TestOn('vm') -@Timeout(Duration(minutes: 50)) -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:meta/meta.dart'; -import 'package:test/test.dart'; - -void main() { - late BaseChatModel llm; - const defaultOllamaModel = 'llama3-groq-tool-use'; - const defaultOpenAIModel = 'gpt-4o-mini'; - - group('ChatToolsAgent using Ollama tests', - skip: Platform.environment.containsKey('CI'), () { - setUp(() async { - llm = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: defaultOllamaModel, - temperature: 0, - tools: [CalculatorTool(), searchTool], - keepAlive: 1, - ), - ); - }); - - test('Test ChatToolsAgent with calculator tool', () async { - await testAgentWithCalculator(llm); - }); - - test('Test ToolsAgent with messages memory', () async { - await testMemory(llm, returnMessages: true); - }); - - test('Test ToolsAgent with string memory throws error', () async { - expect( - () async => testMemory(llm, returnMessages: false), - throwsA(isA()), - ); - }); - - test('Test ToolsAgent LCEL equivalent using Ollama', () async { - final res = - await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ - 'input': 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', - }); - expect(res['output'], contains('4.88')); - }); - }); - - group('ChatToolsAgent using OpenAi tests', () { - setUp(() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: defaultOpenAIModel, - tools: [CalculatorTool(), searchTool], - ), - ); - }); - - test('Test ChatToolsAgent with calculator tool', () async { - await testAgentWithCalculator(llm); - }); - - test('Test ToolsAgent with messages memory', () async { - await testMemory(llm, returnMessages: true); - }); - - test('Test ToolsAgent with string memory throws error', () async { - expect( - () async => testMemory(llm, returnMessages: false), - throwsA(isA()), - ); - }); - - test('Test ToolsAgent LCEL equivalent using OpenAi', () async { - final res = - await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ - 'input': 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', - }); - expect(res['output'], contains('4.88')); - }); - }); -} - -Future testAgentWithCalculator( - BaseChatModel llm, -) async { - final agent = ToolsAgent.fromLLMAndTools( - llm: llm, - ); - final executor = AgentExecutor(agent: agent); - final res = await executor.run( - 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', - ); - expect(res, contains('4.885')); -} - -Future testMemory( - BaseChatModel llm, { - required final bool returnMessages, -}) async { - final memory = ConversationBufferMemory(returnMessages: returnMessages); - final agent = ToolsAgent.fromLLMAndTools( - llm: llm, - memory: memory, - ); - - final executor = AgentExecutor(agent: agent); - - final res1 = await executor.run( - 'Search for cat names. Return only 3 results.', - ); - - expect(res1, contains('AAA')); - expect(res1, contains('BBB')); - expect(res1, contains('CCC')); - expect(res1, isNot(contains('DDD'))); - - final res2 = await executor.run( - 'How many results did the search return? Respond with a number.', - ); - expect(res2, contains('3')); - expect(res2, isNot(contains('1'))); - expect(res2, isNot(contains('2'))); - expect(res2, isNot(contains('4'))); - - final res3 = await executor.run('What was the last result?'); - expect(res3, contains('CCC')); -} - -AgentExecutor testLCDLEquivalent({ - required BaseChatModel llm, - required Tool tool, -}) { - final prompt = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), - ]); - - final agent = Agent.fromRunnable( - Runnable.mapInput( - (AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': planInput.intermediateSteps - .map((s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((m) => m) - .toList(growable: false), - }, - ).pipe(prompt).pipe(llm).pipe(const ToolsAgentOutputParser()), - tools: [tool], - ); - - return AgentExecutor(agent: agent); -} - -@immutable -class _SearchInput { - const _SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - _SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); - - @override - bool operator ==(covariant _SearchInput other) => - identical(this, other) || query == other.query && n == other.n; - - @override - int get hashCode => query.hashCode ^ n.hashCode; -} - -final searchTool = Tool.fromFunction<_SearchInput, String>( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: const { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'integer', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: (final _SearchInput toolInput) async { - final n = toolInput.n; - final res = List.generate( - n, - (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', - ); - return 'Results:\n${res.join('\n')}'; - }, - getInputFromJson: _SearchInput.fromJson, -); diff --git a/packages/langchain_amazon/pubspec.yaml b/packages/langchain_amazon/pubspec.yaml index d948eb8c..41af11b0 100644 --- a/packages/langchain_amazon/pubspec.yaml +++ b/packages/langchain_amazon/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_amazon issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_amazon homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 167d8c93..6df81faa 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,25 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.1+2 - - - Update a dependency to the latest release. - -## 0.1.1+1 - - - Update a dependency to the latest release. - -## 0.1.1 - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.1.0 - -- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) - ## 0.0.1-dev.1 - Bootstrap project. diff --git a/packages/langchain_anthropic/README.md b/packages/langchain_anthropic/README.md index 85d07866..2d9f50a0 100644 --- a/packages/langchain_anthropic/README.md +++ b/packages/langchain_anthropic/README.md @@ -1,17 +1,6 @@ -# 🦜️🔗 LangChain.dart / Anthropic +# 🦜️🔗 LangChain.dart -[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) -[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) -[![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) -[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) -[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) - -[Anthropic](https://anthropic.com) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). - -## Features - -- Chat models: - * `ChatAnthropic`: wrapper around [Anthropic Messages](https://docs.anthropic.com/en/api/messages) API (Claude). +Anthropic module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). ## License diff --git a/packages/langchain_anthropic/example/langchain_anthropic_example.dart b/packages/langchain_anthropic/example/langchain_anthropic_example.dart index fabef4bd..21f3e9f2 100644 --- a/packages/langchain_anthropic/example/langchain_anthropic_example.dart +++ b/packages/langchain_anthropic/example/langchain_anthropic_example.dart @@ -1,41 +1,3 @@ -// ignore_for_file: avoid_print, unused_element -import 'dart:io'; - -import 'package:langchain_anthropic/langchain_anthropic.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/prompts.dart'; - -/// Check the docs for more examples: -/// https://langchaindart.dev -void main() async { - // Uncomment the example you want to run: - await _example1(); - // await _example2(); -} - -/// The most basic example of LangChain is calling a model on some input -Future _example1() async { - final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; - final llm = ChatAnthropic( - apiKey: openAiApiKey, - defaultOptions: const ChatAnthropicOptions(temperature: 1), - ); - final ChatResult res = await llm.invoke( - PromptValue.string('Tell me a joke'), - ); - print(res); -} - -/// Instead of waiting for the full response from the model, you can stream it -/// while it's being generated -Future _example2() async { - final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; - final llm = ChatAnthropic( - apiKey: openAiApiKey, - defaultOptions: const ChatAnthropicOptions(temperature: 1), - ); - final Stream stream = llm.stream( - PromptValue.string('Tell me a joke'), - ); - await stream.forEach((final chunk) => stdout.write(chunk.output.content)); +void main() { + // TODO } diff --git a/packages/langchain_anthropic/lib/langchain_anthropic.dart b/packages/langchain_anthropic/lib/langchain_anthropic.dart index 78ee6803..d8becc4d 100644 --- a/packages/langchain_anthropic/lib/langchain_anthropic.dart +++ b/packages/langchain_anthropic/lib/langchain_anthropic.dart @@ -1,4 +1,2 @@ /// Anthropic module for LangChain.dart. library; - -export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart deleted file mode 100644 index 1c8360d4..00000000 --- a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart +++ /dev/null @@ -1,243 +0,0 @@ -import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; -import 'package:http/http.dart' as http; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_tiktoken/langchain_tiktoken.dart'; - -import 'mappers.dart'; -import 'types.dart'; - -/// Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) -/// (aka Claude API). -/// -/// Example: -/// ```dart -/// final chatModel = ChatAnthropic(apiKey: '...'); -/// final messages = [ -/// ChatMessage.system('You are a helpful assistant that translates English to French.'), -/// ChatMessage.humanText('I love programming.'), -/// ]; -/// final prompt = PromptValue.chat(messages); -/// final res = await llm.invoke(prompt); -/// ``` -/// -/// - Docs: https://docs.anthropic.com -/// -/// ### Authentication -/// -/// The Anthropic API uses API keys for authentication. Visit your -/// [API Keys](https://console.anthropic.com/settings/keys) page to retrieve -/// the API key you'll use in your requests. -/// -/// ### Available models -/// -/// The following models are available: -/// - `claude-3-5-sonnet-20240620` -/// - `claude-3-haiku-20240307` -/// - `claude-3-opus-20240229` -/// - `claude-3-sonnet-20240229` -/// - `claude-2.0` -/// - `claude-2.1` -/// -/// Mind that the list may not be up-to-date. -/// See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. -/// -/// ### Call options -/// -/// You can configure the parameters that will be used when calling the -/// chat completions API in several ways: -/// -/// **Default options:** -/// -/// Use the [defaultOptions] parameter to set the default options. These -/// options will be used unless you override them when generating completions. -/// -/// ```dart -/// final chatModel = ChatAnthropic( -/// apiKey: anthropicApiKey, -/// defaultOptions: const ChatAnthropicOptions( -/// temperature: 0.9, -/// maxTokens: 100, -/// ), -/// ); -/// ``` -/// -/// **Call options:** -/// -/// You can override the default options when invoking the model: -/// -/// ```dart -/// final res = await chatModel.invoke( -/// prompt, -/// options: const ChatAnthropicOptions(temperature: 0.5), -/// ); -/// ``` -/// -/// **Bind:** -/// -/// You can also change the options in a [Runnable] pipeline using the bind -/// method. -/// -/// In this example, we are using two totally different models for each -/// question: -/// -/// ```dart -/// final chatModel = ChatAnthropic(apiKey: anthropicApiKey); -/// const outputParser = StringOutputParser(); -/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); -/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); -/// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-5-sonnet-20240620)) | outputParser, -/// 'q2': prompt2 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-sonnet-20240229)) | outputParser, -/// }); -/// final res = await chain.invoke({'name': 'David'}); -/// ``` -/// -/// ### Advance -/// -/// #### Custom HTTP client -/// -/// You can always provide your own implementation of `http.Client` for further -/// customization: -/// -/// ```dart -/// final client = ChatAnthropic( -/// apiKey: 'ANTHROPIC_API_KEY', -/// client: MyHttpClient(), -/// ); -/// ``` -/// -/// #### Using a proxy -/// -/// ##### HTTP proxy -/// -/// You can use your own HTTP proxy by overriding the `baseUrl` and providing -/// your required `headers`: -/// -/// ```dart -/// final client = ChatAnthropic( -/// baseUrl: 'https://my-proxy.com', -/// headers: {'x-my-proxy-header': 'value'}, -/// ); -/// ``` -/// -/// If you need further customization, you can always provide your own -/// `http.Client`. -/// -/// ##### SOCKS5 proxy -/// -/// To use a SOCKS5 proxy, you can use the -/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a -/// custom `http.Client`. -class ChatAnthropic extends BaseChatModel { - /// Create a new [ChatAnthropic] instance. - /// - /// Main configuration options: - /// - `apiKey`: your Anthropic API key. You can find your API key in the - /// [Anthropic dashboard](https://console.anthropic.com/settings/keys). - /// - [ChatAnthropic.encoding] - /// - [ChatAnthropic.defaultOptions] - /// - /// Advance configuration options: - /// - `baseUrl`: the base URL to use. Defaults to Anthropic's API URL. You can - /// override this to use a different API URL, or to use a proxy. - /// - `headers`: global headers to send with every request. You can use - /// this to set custom headers, or to override the default headers. - /// - `queryParams`: global query parameters to send with every request. You - /// can use this to set custom query parameters. - /// - `client`: the HTTP client to use. You can set your own HTTP client if - /// you need further customization (e.g. to use a Socks5 proxy). - ChatAnthropic({ - final String? apiKey, - final String baseUrl = 'https://api.anthropic.com/v1', - final Map? headers, - final Map? queryParams, - final http.Client? client, - super.defaultOptions = const ChatAnthropicOptions( - model: defaultModel, - maxTokens: defaultMaxTokens, - ), - this.encoding = 'cl100k_base', - }) : _client = a.AnthropicClient( - apiKey: apiKey ?? '', - baseUrl: baseUrl, - headers: headers, - queryParams: queryParams, - client: client, - ); - - /// A client for interacting with Anthropic API. - final a.AnthropicClient _client; - - /// The encoding to use by tiktoken when [tokenize] is called. - /// - /// Anthropic does not provide any API to count tokens, so we use tiktoken - /// to get an estimation of the number of tokens in a prompt. - String encoding; - - @override - String get modelType => 'anthropic-chat'; - - /// The default model to use unless another is specified. - static const defaultModel = 'claude-3-5-sonnet-20240620'; - - /// The default max tokens to use unless another is specified. - static const defaultMaxTokens = 1024; - - @override - Future invoke( - final PromptValue input, { - final ChatAnthropicOptions? options, - }) async { - final completion = await _client.createMessage( - request: createMessageRequest( - input.toChatMessages(), - options: options, - defaultOptions: defaultOptions, - ), - ); - return completion.toChatResult(); - } - - @override - Stream stream( - final PromptValue input, { - final ChatAnthropicOptions? options, - }) { - return _client - .createMessageStream( - request: createMessageRequest( - input.toChatMessages(), - options: options, - defaultOptions: defaultOptions, - stream: true, - ), - ) - .transform(MessageStreamEventTransformer()); - } - - /// Tokenizes the given prompt using tiktoken. - /// - /// Currently Anthropic does not provide a tokenizer for the models it supports. - /// So we use tiktoken and [encoding] model to get an approximation - /// for counting tokens. Mind that the actual tokens will be totally - /// different from the ones used by the Anthropic model. - /// - /// If an encoding model is specified in [encoding] field, that - /// encoding is used instead. - /// - /// - [promptValue] The prompt to tokenize. - @override - Future> tokenize( - final PromptValue promptValue, { - final ChatAnthropicOptions? options, - }) async { - final encoding = getEncoding(this.encoding); - return encoding.encode(promptValue.toString()); - } - - @override - void close() { - _client.endSession(); - } -} diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart deleted file mode 100644 index 1a011d3c..00000000 --- a/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart +++ /dev/null @@ -1,2 +0,0 @@ -export 'chat_anthropic.dart'; -export 'types.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart deleted file mode 100644 index 020ef844..00000000 --- a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart +++ /dev/null @@ -1,433 +0,0 @@ -// ignore_for_file: public_member_api_docs -import 'dart:async'; -import 'dart:convert'; - -import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; -import 'package:collection/collection.dart' show IterableExtension; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:rxdart/rxdart.dart' show WhereNotNullExtension; - -import 'chat_anthropic.dart'; -import 'types.dart'; - -/// Creates a [CreateMessageRequest] from the given input. -a.CreateMessageRequest createMessageRequest( - final List messages, { - required final ChatAnthropicOptions? options, - required final ChatAnthropicOptions defaultOptions, - final bool stream = false, -}) { - final systemMsg = messages.firstOrNull is SystemChatMessage - ? messages.firstOrNull?.contentAsString - : null; - - final messagesDtos = messages.toMessages(); - final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; - final toolChoiceDto = toolChoice?.toToolChoice(); - final toolsDtos = - (options?.tools ?? defaultOptions.tools)?.toTool(toolChoice); - - return a.CreateMessageRequest( - model: a.Model.modelId( - options?.model ?? defaultOptions.model ?? ChatAnthropic.defaultModel, - ), - messages: messagesDtos, - maxTokens: options?.maxTokens ?? - defaultOptions.maxTokens ?? - ChatAnthropic.defaultMaxTokens, - stopSequences: options?.stopSequences ?? defaultOptions.stopSequences, - system: systemMsg, - temperature: options?.temperature ?? defaultOptions.temperature, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - metadata: a.CreateMessageRequestMetadata( - userId: options?.userId ?? defaultOptions.userId, - ), - tools: toolsDtos, - toolChoice: toolChoiceDto, - stream: stream, - ); -} - -extension ChatMessageListMapper on List { - List toMessages() { - final List result = []; - final List consecutiveToolMessages = []; - - void flushToolMessages() { - if (consecutiveToolMessages.isNotEmpty) { - result.add(_mapToolChatMessages(consecutiveToolMessages)); - consecutiveToolMessages.clear(); - } - } - - for (final message in this) { - switch (message) { - case SystemChatMessage(): - flushToolMessages(); - continue; // System message set in request params - case final HumanChatMessage msg: - flushToolMessages(); - final res = _mapHumanChatMessage(msg); - result.add(res); - case final AIChatMessage msg: - flushToolMessages(); - final res = _mapAIChatMessage(msg); - result.add(res); - case final ToolChatMessage msg: - consecutiveToolMessages.add(msg); - case CustomChatMessage(): - throw UnsupportedError('Anthropic does not support custom messages'); - } - } - - flushToolMessages(); // Flush any remaining tool messages - return result; - } - - a.Message _mapHumanChatMessage(final HumanChatMessage msg) { - return a.Message( - role: a.MessageRole.user, - content: switch (msg.content) { - final ChatMessageContentText t => a.MessageContent.text(t.text), - final ChatMessageContentImage i => a.MessageContent.blocks([ - _mapHumanChatMessageContentImage(i), - ]), - final ChatMessageContentMultiModal mm => a.MessageContent.blocks( - mm.parts - .map( - (final part) => switch (part) { - final ChatMessageContentText t => - a.Block.text(text: t.text), - final ChatMessageContentImage i => - _mapHumanChatMessageContentImage(i), - ChatMessageContentMultiModal() => throw ArgumentError( - 'Cannot have multimodal content in multimodal content', - ), - }, - ) - .toList(growable: false), - ), - }, - ); - } - - a.Block _mapHumanChatMessageContentImage(ChatMessageContentImage i) { - return a.Block.image( - source: a.ImageBlockSource( - type: a.ImageBlockSourceType.base64, - mediaType: switch (i.mimeType) { - 'image/jpeg' => a.ImageBlockSourceMediaType.imageJpeg, - 'image/png' => a.ImageBlockSourceMediaType.imagePng, - 'image/gif' => a.ImageBlockSourceMediaType.imageGif, - 'image/webp' => a.ImageBlockSourceMediaType.imageWebp, - _ => - throw AssertionError('Unsupported image MIME type: ${i.mimeType}'), - }, - data: i.data.startsWith('http') - ? throw AssertionError( - 'Anthropic only supports base64-encoded images', - ) - : i.data, - ), - ); - } - - a.Message _mapAIChatMessage(final AIChatMessage msg) { - if (msg.toolCalls.isEmpty) { - return a.Message( - role: a.MessageRole.assistant, - content: a.MessageContent.text(msg.content), - ); - } else { - return a.Message( - role: a.MessageRole.assistant, - content: a.MessageContent.blocks( - msg.toolCalls - .map( - (final toolCall) => a.Block.toolUse( - id: toolCall.id, - name: toolCall.name, - input: toolCall.arguments, - ), - ) - .toList(growable: false), - ), - ); - } - } - - a.Message _mapToolChatMessages(final List msgs) { - return a.Message( - role: a.MessageRole.user, - content: a.MessageContent.blocks( - msgs - .map( - (msg) => a.Block.toolResult( - toolUseId: msg.toolCallId, - content: a.ToolResultBlockContent.text(msg.content), - ), - ) - .toList(growable: false), - ), - ); - } -} - -extension MessageMapper on a.Message { - ChatResult toChatResult() { - final (content, toolCalls) = _mapMessageContent(this.content); - return ChatResult( - id: id ?? '', - output: AIChatMessage( - content: content, - toolCalls: toolCalls, - ), - finishReason: _mapFinishReason(stopReason), - metadata: { - 'model': model, - 'stop_sequence': stopSequence, - }, - usage: _mapUsage(usage), - ); - } -} - -class MessageStreamEventTransformer - extends StreamTransformerBase { - MessageStreamEventTransformer(); - - String? lastMessageId; - String? lastToolCallId; - - @override - Stream bind(final Stream stream) { - return stream - .map( - (event) => switch (event) { - final a.MessageStartEvent e => _mapMessageStartEvent(e), - final a.MessageDeltaEvent e => _mapMessageDeltaEvent(e), - final a.ContentBlockStartEvent e => _mapContentBlockStartEvent(e), - final a.ContentBlockDeltaEvent e => _mapContentBlockDeltaEvent(e), - final a.ContentBlockStopEvent e => _mapContentBlockStopEvent(e), - final a.MessageStopEvent e => _mapMessageStopEvent(e), - a.PingEvent() => null, - }, - ) - .whereNotNull(); - } - - ChatResult _mapMessageStartEvent(final a.MessageStartEvent e) { - final msg = e.message; - - final msgId = msg.id ?? lastMessageId ?? ''; - lastMessageId = msgId; - final (content, toolCalls) = _mapMessageContent(e.message.content); - - return ChatResult( - id: msgId, - output: AIChatMessage( - content: content, - toolCalls: toolCalls, - ), - finishReason: _mapFinishReason(e.message.stopReason), - metadata: { - if (e.message.model != null) 'model': e.message.model, - if (e.message.stopSequence != null) - 'stop_sequence': e.message.stopSequence, - }, - usage: _mapUsage(e.message.usage), - streaming: true, - ); - } - - ChatResult _mapMessageDeltaEvent(final a.MessageDeltaEvent e) { - return ChatResult( - id: lastMessageId ?? '', - output: const AIChatMessage(content: ''), - finishReason: _mapFinishReason(e.delta.stopReason), - metadata: { - if (e.delta.stopSequence != null) 'stop_sequence': e.delta.stopSequence, - }, - usage: _mapMessageDeltaUsage(e.usage), - streaming: true, - ); - } - - ChatResult _mapContentBlockStartEvent(final a.ContentBlockStartEvent e) { - final (content, toolCall) = _mapContentBlock(e.contentBlock); - if (toolCall != null) { - lastToolCallId = toolCall.id; - } - - return ChatResult( - id: lastMessageId ?? '', - output: AIChatMessage( - content: content, - toolCalls: [if (toolCall != null) toolCall], - ), - finishReason: FinishReason.unspecified, - metadata: const {}, - usage: const LanguageModelUsage(), - streaming: true, - ); - } - - ChatResult _mapContentBlockDeltaEvent(final a.ContentBlockDeltaEvent e) { - final (content, toolCals) = _mapContentBlockDelta(lastToolCallId, e.delta); - return ChatResult( - id: lastMessageId ?? '', - output: AIChatMessage( - content: content, - toolCalls: toolCals, - ), - finishReason: FinishReason.unspecified, - metadata: { - 'index': e.index, - }, - usage: const LanguageModelUsage(), - streaming: true, - ); - } - - ChatResult? _mapContentBlockStopEvent(final a.ContentBlockStopEvent e) { - lastToolCallId = null; - return null; - } - - ChatResult? _mapMessageStopEvent(final a.MessageStopEvent e) { - lastMessageId = null; - return null; - } -} - -(String content, List toolCalls) _mapMessageContent( - final a.MessageContent content, -) => - switch (content) { - final a.MessageContentText t => ( - t.value, - const [] - ), - final a.MessageContentBlocks b => ( - b.text, - b.value - .whereType() - .map( - (toolUse) => AIChatMessageToolCall( - id: toolUse.id, - name: toolUse.name, - argumentsRaw: toolUse.input.isNotEmpty - ? json.encode(toolUse.input) - : '', - arguments: toolUse.input, - ), - ) - .toList(growable: false), - ), - }; - -(String content, AIChatMessageToolCall? toolCall) _mapContentBlock( - final a.Block contentBlock, -) => - switch (contentBlock) { - final a.TextBlock t => (t.text, null), - final a.ImageBlock i => (i.source.data, null), - final a.ToolUseBlock tu => ( - '', - AIChatMessageToolCall( - id: tu.id, - name: tu.name, - argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '', - arguments: tu.input, - ), - ), - final a.ToolResultBlock tr => (tr.content.text, null), - }; - -(String content, List toolCalls) _mapContentBlockDelta( - final String? lastToolId, - final a.BlockDelta blockDelta, -) => - switch (blockDelta) { - final a.TextBlockDelta t => (t.text, const []), - final a.InputJsonBlockDelta jb => ( - '', - [ - AIChatMessageToolCall( - id: lastToolId ?? '', - name: '', - argumentsRaw: jb.partialJson ?? '', - arguments: const {}, - ), - ], - ), - }; - -extension ToolSpecListMapper on List { - List toTool(final ChatToolChoice? toolChoice) { - if (toolChoice is ChatToolChoiceNone) { - return const []; - } - - if (toolChoice is ChatToolChoiceForced) { - final tool = firstWhereOrNull((final t) => t.name == toolChoice.name); - return [if (tool != null) _mapTool(tool)]; - } - - return map(_mapTool).toList(growable: false); - } - - a.Tool _mapTool(final ToolSpec tool) { - return a.Tool( - name: tool.name, - description: tool.description, - inputSchema: tool.inputJsonSchema, - ); - } -} - -extension ChatToolChoiceMapper on ChatToolChoice { - a.ToolChoice toToolChoice() { - return switch (this) { - ChatToolChoiceNone _ => const a.ToolChoice(type: a.ToolChoiceType.auto), - ChatToolChoiceAuto _ => const a.ToolChoice(type: a.ToolChoiceType.auto), - ChatToolChoiceRequired _ => - const a.ToolChoice(type: a.ToolChoiceType.any), - final ChatToolChoiceForced t => a.ToolChoice( - type: a.ToolChoiceType.tool, - name: t.name, - ), - }; - } -} - -FinishReason _mapFinishReason( - final a.StopReason? reason, -) => - switch (reason) { - a.StopReason.endTurn => FinishReason.stop, - a.StopReason.maxTokens => FinishReason.length, - a.StopReason.stopSequence => FinishReason.stop, - a.StopReason.toolUse => FinishReason.toolCalls, - null => FinishReason.unspecified, - }; - -LanguageModelUsage _mapUsage(final a.Usage? usage) { - return LanguageModelUsage( - promptTokens: usage?.inputTokens, - responseTokens: usage?.outputTokens, - totalTokens: usage?.inputTokens != null && usage?.outputTokens != null - ? usage!.inputTokens + usage.outputTokens - : null, - ); -} - -LanguageModelUsage _mapMessageDeltaUsage(final a.MessageDeltaUsage? usage) { - return LanguageModelUsage( - responseTokens: usage?.outputTokens, - totalTokens: usage?.outputTokens, - ); -} diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart deleted file mode 100644 index f91abdb3..00000000 --- a/packages/langchain_anthropic/lib/src/chat_models/types.dart +++ /dev/null @@ -1,160 +0,0 @@ -import 'package:collection/collection.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; - -/// {@template chat_anthropic_options} -/// Options to pass into the Anthropic Chat Model. -/// -/// Available models: -/// - `claude-3-5-sonnet-20240620` -/// - `claude-3-haiku-20240307` -/// - `claude-3-opus-20240229` -/// - `claude-3-sonnet-20240229` -/// - `claude-2.0` -/// - `claude-2.1` -/// -/// Mind that the list may be outdated. -/// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. -/// {@endtemplate} -@immutable -class ChatAnthropicOptions extends ChatModelOptions { - /// {@macro chat_anthropic_options} - const ChatAnthropicOptions({ - super.model, - this.maxTokens, - this.stopSequences, - this.temperature, - this.topK, - this.topP, - this.userId, - super.tools, - super.toolChoice, - super.concurrencyLimit, - }); - - /// The maximum number of tokens to generate before stopping. - /// - /// Note that our models may stop _before_ reaching this maximum. This parameter - /// only specifies the absolute maximum number of tokens to generate. - /// - /// Different models have different maximum values for this parameter. See - /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. - final int? maxTokens; - - /// Custom text sequences that will cause the model to stop generating. - /// - /// Anthropic models will normally stop when they have naturally completed - /// their turn. If you want the model to stop generating when it encounters - /// custom strings of text, you can use the `stopSequences` parameter. - final List? stopSequences; - - /// Amount of randomness injected into the response. - /// - /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` - /// for analytical / multiple choice, and closer to `1.0` for creative and - /// generative tasks. - /// - /// Note that even with `temperature` of `0.0`, the results will not be fully - /// deterministic. - final double? temperature; - - /// Only sample from the top K options for each subsequent token. - /// - /// Used to remove "long tail" low probability responses. - /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - final int? topK; - - /// Use nucleus sampling. - /// - /// In nucleus sampling, we compute the cumulative distribution over all the options - /// for each subsequent token in decreasing probability order and cut it off once it - /// reaches a particular probability specified by `top_p`. You should either alter - /// `temperature` or `top_p`, but not both. - /// - /// Recommended for advanced use cases only. You usually only need to use - /// `temperature`. - final double? topP; - - /// An external identifier for the user who is associated with the request. - /// - /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use - /// this id to help detect abuse. Do not include any identifying information such as - /// name, email address, or phone number. - final String? userId; - - @override - ChatAnthropicOptions copyWith({ - final String? model, - final int? maxTokens, - final List? stopSequences, - final double? temperature, - final int? topK, - final double? topP, - final String? userId, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, - }) { - return ChatAnthropicOptions( - model: model ?? this.model, - maxTokens: maxTokens ?? this.maxTokens, - stopSequences: stopSequences ?? this.stopSequences, - temperature: temperature ?? this.temperature, - topK: topK ?? this.topK, - topP: topP ?? this.topP, - userId: userId ?? this.userId, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - ChatAnthropicOptions merge(covariant final ChatAnthropicOptions? other) { - return copyWith( - model: other?.model, - maxTokens: other?.maxTokens, - stopSequences: other?.stopSequences, - temperature: other?.temperature, - topK: other?.topK, - topP: other?.topP, - userId: other?.userId, - tools: other?.tools, - toolChoice: other?.toolChoice, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatAnthropicOptions other) { - return model == other.model && - maxTokens == other.maxTokens && - const ListEquality() - .equals(stopSequences, other.stopSequences) && - temperature == other.temperature && - topK == other.topK && - topP == other.topP && - userId == other.userId && - const ListEquality().equals(tools, other.tools) && - toolChoice == other.toolChoice && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - maxTokens.hashCode ^ - const ListEquality().hash(stopSequences) ^ - temperature.hashCode ^ - topK.hashCode ^ - topP.hashCode ^ - userId.hashCode ^ - const ListEquality().hash(tools) ^ - toolChoice.hashCode ^ - concurrencyLimit.hashCode; - } -} diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 7b23e44a..de768b22 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,10 +1,11 @@ name: langchain_anthropic -description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.1+2 +description: Anthropic module for LangChain.dart. +version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com +publish_to: none # Remove when the package is ready to be published topics: - ai @@ -13,16 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" - -dependencies: - anthropic_sdk_dart: ^0.1.0 - collection: ^1.18.0 - http: ^1.2.2 - langchain_core: 0.3.6 - langchain_tiktoken: ^1.0.1 - meta: ^1.11.0 - rxdart: ">=0.27.7 <0.29.0" - -dev_dependencies: - test: ^1.25.8 + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_anthropic/pubspec_overrides.yaml b/packages/langchain_anthropic/pubspec_overrides.yaml deleted file mode 100644 index 4d7afffa..00000000 --- a/packages/langchain_anthropic/pubspec_overrides.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# melos_managed_dependency_overrides: anthropic_sdk_dart,langchain_core -dependency_overrides: - anthropic_sdk_dart: - path: ../anthropic_sdk_dart - langchain_core: - path: ../langchain_core diff --git a/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg b/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg deleted file mode 100644 index 62d7ca92657957e3ab45fc3ec7332a9938465b68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 66803 zcmbq)WmJ?;*f$`8NFyOB;L<5ecf-=%&C;QC8-z#+?9#C0(!F$xluPW=of6AZN-Bu{ zU!Lcj_ni0pJ9E#MnKOA`IdlE~TmEkwheAsoq>h7&i-Uvvka7N7!->Tqcu0f#7X!&myhvX414j#oLiib|ODV(wmkQKmZH%*M2BMgRfK%jiOkt`r@-A^{^%!KT% zCbC*A+v-_ z75^(?i};{WajvBcH#;=>8ExpdtY%Q4S9g6b^c9qoepbcC{qdBZCrmZIyX<4bvQ)hc z(+NtuMV{9nk{Q+>5_8Hy?~O|1$1=gn5jxvF90;A?Ty87U7JxM5TryI|gHr<^l>p8{ zt~fwc;qXUXS?Ea6Ck1^8Guko*6Q&j3aUVV!;c;&@3mq3*QKv%$IHywE>l+iYi`HV+ z;j4t)2w7%DZrUukIka+Rg`ZhuRqq>7iwqy&H^L>6HKIZ*!zqQWzU~NPL+>4Hh7^g! z%9E7SAXi?6B<;v>YbSH@F06L!)3LfgWAv;s=n5=|_mHCDO5g>x=fk^>Gb1?lm0_P$ zs7(&Otc0%U=9ujx%I#(oRn1V7mOx5QS7}fY`23A6C%Yc%X%MTW?lXddU^lQxa1=y8 zrlpQb+s(}@(vu(GV6g=QA_?99&HDd(dO= z3gGAlQ3_j{A8YrXr6}5 zl2fjh$jF>Pp2%k;%mKMfePjU~wE*PM1zUh1QZiD45C)7;8@9;(|wgkH!o@nbaPYWO4^3OwDg^yS{ZW4+PHz1 zxWNA41h+)#;WQ1yD0nFbfNtT8GzPW1YSU}+;ho_gXqK$RGONbxCGn`;fFwy^P2kgq z(NPd5)FH6vq}&g)>Z8#yS#lE05osD8!f3~@5i15f`d zVre-i*!mvc`R6eoktMBV+R$R&o-9*_>nmu(h~r`^jv&ijwb&VVp6g6(X#lW}`C-B= z6p{oh0F{1V>tIb+kT(jQ7}}5IVgs~4&=Ara5|z7hu2V#(_ea$2F%ggV;ot&havx{xJ~n8#t4u*mcO4Uy27b#ZR>@M2wo1VGGOKf`aN z{`{CjPh+l8GO{bdMdh&KOljk3o`bi}S4)gD`@g-?gzV8EF78>k5oj?pL~2$;rsV|s zGS<~X+NCwO0E?>llJNBSljrl}GV6jpV$ZIoXqab1U{ktNj#DZ-|DpOESa{qT5=UHWdXl2wQ=V1Ug%p z76ac0r}))$eb0*nd@zcsj@*x@>gvcYCb^1PLdN6H2?$XJow! z9PZX_fq&`U(hJE}X6yaeo|`)|G&jFExLTyXSg5{IXrC96Ej5F=e-KGX^AR%IulLhX z88=C%@)8M4+fZPt8gJLDUh%0+5b`d7q#WhEXu!NiN$17d)IaupoCD$&ob92S$#9 z;S3=uPSNYGw$il;Jiu;|z}wt)+6=Hrz9q0{{rumD@7I_AJP!Z8-a?(BH)!2-=(#uw zAM}7c31o8en68gTJ{w@owHy|kd?A}v?EQRzIKQnP+g8F96zHr6_c z$$OoWYoWgkF;>0a?oB2TDL{hGX$w5Yk{4btJ^uv6X##zZ_Y!Eb-Mg`$V-;3PMvSq;1z2WzXvV-04-u zEkVOWjQ6aV-@aW+y?cRkeqj*U(zsKpr~0kp>Il&MG(OA)PbVA&6s_JeISC*QO86#P zxb}Knq;}-+K3>YvxDck;_2D|J`qDRKL#>iL{@JpB;CUgfZr2c_iFB36TzXA+avf>} z5|rrTy1-ZQm2FXtUhe#Z;72v6Yfk-^2jvii05L%vq4mkPw0G|!$W^{1-GM**{)dz4-`{dMBt(|3 zbo2XWygch#%jT3O6Z&wt?MZO00*DR4z{MLxkwcJX6wflW_Pt6x%fNu3>!KNgGLPn>9*WSrQ7CNT^C zhm&oTbn0_uE0dVpgy zS@S9Wn2KP*LN_ZV89Q(6D7ZYKjxH;&b--SzUwy5z07 zH%mWhir|=6i(by+bQ1PgWvMyea_A7%0;q7zgFeu+{1xCo#XnhE89u~WIgvtZ|F}R% z>09lE-}c6T>svzI{|OV#2><5~N*`QAInzN8Vq{=+&=mSsmFMcV#n@qMgLL;0ogy6` z<>%m}`E-yWGjKbX4Wi#_Txtn#PO2P%Suj-l1{)q-GRkc^=T~AtQM8rsip;0265bUg z-0|1HF3|rqr}FzVrn2|06V^8N<|B{!D#OYBAmhRQZq|VGaYM)fi10Z4Wz6k=I2zSIiz0qz=b*Ct zFqPn)Ic-|Kk9q~Bz~Zy>H*=W*vm@4NW0c^XLRcRCexqIgm`sUuNZ-3W zkFEHbi(AR7%dKxeTu1+r5b96dM8Y8^?*mf4@p+87TN1Y;CZ-he+XQp=N3bHt(e0*E z1z5-{0ppK#5o0wYGSbp+gPbbBN{&3k2lFT+@tP;-VzaTYWIv*fkt$4v!3@!6ZYtKY z;P_cNfxU%JSEbg5q-v3O=9>EhlMOTfmTJ)LU)2|%eq4%4B^T%mt#1%BH0TXQqX|vE zM3W>T3JCWv2hS=ue`|oABZBEgA2bBjgHZ;VJUL0}TfD`D?1tP4MgJxIF8ikt{2z|m z0YY!EoSq5=Z-7a{P0A7jo5R!=TQdD=5kVH&i1X7TC0bHI*Qa2b9`P?;Lv(HfIat^3$|g1T^XEc|g+9Mr!Gs z5_(Y8ofM-`tb|!de8jj9#N-k8oPNuVk#>w1Fc_uB6!PROVeu%2MQ|b9Z>mTEU6g%(~1X6M&0-1 z7PdlAaNO+W=a@~dc=29bZX}HfGw%{@@j|f|0r^AdlxJ>~?o}s+mb?;@`*AD9Y0DC@ zE_Xj$5nba-`plN9vOv6P{n)^BT9#UrL5v65qGyV;YTepQLf;%h|~V z-n|On5;TPDRdpQmM8urhXoN{N16a~RdYC}{Q!IFhdl^rE1=V#GQ*zkcL(J@7`<$; z6(^`Xl50gv2)0oGTp_AI11LML`|R4XeCL#@PwWJw!1dnd_&-gj-?7b*?dpG7voH4| z9pW;Fsa;b^WX}OP0tpZC9OGck*;dAq%cHv9ilxAq77?||AdabWzDZ02e{k&@i?s8C zvbovU$NCHV!B@vYk@~JfIsdF3NxKpS>2s}ODyt5p!+*SBRDeiE$YEudH1yn>@9u`8 zhmN|iH*3>=*4{9O=yW^IiAvER;Xxdjb zrZ2Dip0iPEY1{3LrM3`vIB|YBam(QOWbh(OiPQc1pC@_#6HWy?D=wbCLgJ3U+>xE0 z7YV+peBunzTVH5otVcoo2`k>PETY*JI68Nuyb2N&B&j0@CkFQr*l(CjVA`|(O;&@xF1(w|f4{i_d#w@B=QiYdqZ z7hY}V)oh5H8nI`4uGB9-gCqAkFM^byNn4gCfsm@PiVHczAM*dC7otX_9kzo)g-;K@ zW&Y`~uiWpJFSO5Z{u?pOxOCDUddl&3sq-SBD8Hq#U{Ptx-q-f^%xB5GkQf^^weh*B zVA?OdK`Yd*?$bon#xeNJb%l+BU0UR{B=DcOL2IRNJrqn29cOkv{=xSCwIh4i+8d?7 zI(JIIT#yn)A$l|hn=ZfYEgZmG$2r2t+iM@ZaT9E40X!d4)vlZ${8r&Dd)hji`;0zF zTag}Jcl4#t>Nx&+*T`PoDg`>rA~j0?DYWH+yEoS;G{%HDg&k*AbZ+>urR~2Uhz1O> zEUk|S;>U7PgQkPHJ3Yobtd?sOS3-wti^_BBS?0uFiMgz_RKu!s5GQlsv2SO$wHLp~ zr)}*BajXh@zkVKRb88Nf*Ye3R+8$Qsa}vN`(h;MZ-P4+=F08irV{)S*>MVWZ<%p&1 zlK;kKzaM@v-SkhI#D*WQv^r$sS;;rr?VUzcKm@bkuNTc>-jO|5m;%dQXXj*@`kt7u zmnCbn?iX3rhZk|oqbZ$>8Pb-SK6TtPK~>7`v(?+DG^z(|Y2@$_k;};Vnyat??KFj| zeu;xWFO;NZ@BKDFFY_r{R(rcL-OYzz`D}C*;j?@-Z~UBlEG7Qqb_m_q@!{}~9guiI zcB!Q|^au=%QstGmA)@bOFw6v?Dn7$!X!6VmE5w%|X2z_SIx)-F7EI|rAN$&TWj<)EDN-TALqT~L(ld`nen1n->WaQDDeyY%VurCO zm`$0}d%~IuV{$?m$!a?V)M8UKJ=BB-VR!{J+tS|j}wQ5L4OaQ)FWjL_3` z@*e#HTc9CPnOU1A&n#8?o3b9sHqnAas*J-(LH@wCnOlZTf0 zkvhVIR0q}ySZZn`65!$1%s8@W(Ni1q+O4!kzE3Vq-Z7*ZkR0@wc}GGRJTS@S;eeg4 z34ViJA$jni=T6G$<2!3e;=`;SxsUY^9kNM_U-_fQ8crgAy(>enUvB2-z-2v1s&{Eh zrmldOdmfS0%d2-lJDC@~c=_Khd-PsE+_5PX z)7CByA4hHl_SH?XNMrM1#h2X+5}8{2IRt_Uvm&s?h5b__%%sOz=O(%9ikWl29m~e! z{;-3nr4hMQ%E__*i*Bbj0vXImf_L3e<7{}|x@UJeV zzc=Vi$0%Jw!teBoI$P}wL#xNK8o_Sr?KIluI|mzE`4TOOTtf3N8ZY$hD=|XH$6#MV z*H31c)=#u=+?9BqtAm`*Dr+r8zZostdS1CT)3AhsveZ-3zw&B+Y?cpPvS}mId(1%) znL_+bzz>=;$5`v@pgK=jyU!P#DGSFE>Y;!Yeh_WyjP?jN|G;%qfX1|pwrth)VNqdd zvvp`UqnGDp4`=XBey-kYHWR_dLOwf*iSa`9pZR`i^YzI+{fw3pUh$g7)0=zHvn6Zv zIPsB^f1I|Uv!@ugaXRkkci&nYZR!ivz~b=QeBFJbWLrpxu>`517?oIawY!ePH5Eo$ z(ajF$7;!7-bbb?16!1^uL{`Ei(rLuJ>tatQ%$4)YUYj&KhlFyz^rl)J$N z#ZV$R8zp$G@D2kY-%_sJQO3<-BWFu;!9fUUt*DiD=0=6cy2x0&Eqhpp?bubsrgh1* zpTym!SCNAFMqZGaaX}vV3X^Z?TM=QY?T0d5OG0M#!s@Bb*rCOmw7=s)n#1DxqU>0Q zOOM396w$U~7EWsQZwmZIN#k#VC#2zt^RG;oCQ)98hqn?S3F%-9nUO)gG!dPh{RjU+ z$t|XYT>WCSJ&FN9;9HU>m6w|+88P0kmnEZtr15Gz2qFI}XlJgZdEqiisKHH_Htkc! zG%8T~AxByQlM|ylIkjT)d9*N#Wroq|@Xbx0dpUdd8+3C(sRiwKky9?R)rJB!g^mIx zJzBBi-aC#cr4p5$u(wdc)Y&Q8KrV~>G2O{P^avtcFyv5P8C2MdI#)#TFK?}0!nPiV z%e@zs?HACC;R=aK+t4D%EET?LAnLAlcN5Qc4ADDTTmqCQWaep+wbhCid{)pf8pL{m ze@5BE?6JH1wwUywKiBJ0`e7Ml(OWzX_4`2X4=)YH-_jPiw58H}b(Lgv24E<@|B1%lN!k}S+&j*RTH)s+MokhpqsUV1Sz8{ak=wp*4^ z2hoF|-+(t@C2Ln~YXeAe&fck(5^|c0Hchz#-Xp1}2gIkf2-cTUv#J`Ao!wybV$BVyQQnU*5r}kkpB}LdxpV)l&OPj;qILSo10CpzH{G`>jRw| zQaqvA7;NzE>RUgd5ZF;?XuCvhaC(69Wxt~`Eg~fE z<H0wNWunZc$3{+(89-kw>);-u0AM{4#n2Ov zuliUWUl(+|#mMlLKW~&2|8jXWjPr$B>3ei4_{f8f7|@oa_@SQOzTNcWr_1-Hu$7VWAhSxP$4^Yc#{ukaK0f?WhGyLdQ+#7h zvi_*i$`?iOKJv8A^9D>-i4<6$F+X(sk!c#D@8`_Z!J}2?KZD|B$LZCeUip**(~>DK zeR%AWtCB<-CL`slzeI=h34AjsWoTL z61m`aMRBMwmNg=PLn2)u&wG(2+Q z6_dozE!1j9n`h4dP=z_R%KnF=6GNzwbwFcy>807Q&f>ki`hAgUSnT&={Re)zuI@@Q z(MmHnsvxP@5)T&piXD#yzGnUwPbbTCU&XKqzbJ+5Pd9T*-xraRMjyKsUQZLf&cqrm zP{{V3B|RKyS2uH{oV~PkIDNQOt0qa(_Y;Qk<9*fihP1_MHl-AS@G=>UU%rj_ozF(^ zEM{SJKQJ;V>ZG5d?(9FD0Ffj|kLDO?PI^71Pu0gJPr_xU1{u5ggKqJ(`ElqV#bV0c z-P^`J>+9XSOXu;w)(iJz=@*T~x6NCZAv`B`hC{#ZQy47oqY(8P0&2v*+Ij6XA`Yz1 z9EO=!mbCL2Na4WxaGDXlTT`#VxGZRa0iuS54PE|`;;Zi@lc{?Ulr?j&* zv{F`^)IlIQGd|%m(>CnY-R6kpjP2RvZmZzIj;>SW9xDRnJkTAYb>Gpt%K7uJkl+P{LkWtp!<{rbLD z^5-q%9CthOe>g&y|KU(BgjI53J8yd|HbYvBMTbY*^AojtuvVOIJTAr#*bMq-S^~T~ zU~-WRLsKQ{xp)DLx&4UjjtC^fMLrh@dM%>PBTd{6DVcA@5wL;A@;RSAj|G@Z1Tv0C zy_k}bkAgNdQd1EG`qZ<$)_}EQolN#pnrKWK#vWDK{Y~+ndTXyf@lw#bXnOa@7_H`| zOZU;9NVVv(F8DK_PZhM1EufC%vyaR5WdcXLV!lbhRlrCtl%=7Tot^!5AnT7C`t3mG zo9e&%*&t3;B=d9RXRf~Q5rr>QoRZ?cm%Cwd?ST=+nDz(qC1pxsQJs?jAbpGw-;681 zTao@E4CUBlp4=bJ!E`?Tl3jfeCdb9L^^2KFo#Mvh5`Ia(gx(*#YYby&lo=nfE54TV z;Xe8Px@JuH-ndoCbkWq-G*6mF`NOoXe;FxWfbSyq&R&7zR(>l!Hu!Dj;C2{c6nNMq0{bOSCXT}wBx+d*vRbxk zqZG9ay29-$WvkNFrnzMjTr0?a&9^PTwbP%SVia=q&tHfm)@oQxAb@t}ER1Cgb)`*p zUco0!bv!uG7S}j6F;D_6LCQT7L4KI$63%=uH8$bLDxHvqr~6K&vTEh=B`JM{`X6V$ z9RGP+#D8XA+3k~!kdb;Ldg0jt%TV{heO+wUy(z%`+5wBBN2i9~F&;TaN{P1Z(tXMe zv>KfTNa6aFBJC>f zcEB9Hm}D-nwHiG)HGq=M#qL+DYisDT{_p`>7QZXlsY~OyR%EpIxJh8DN3qFue6mztI@hf??o)CQ~eny>f%S9 zatM5C=;2b6=y)#Im=df!3eeNS+Je|^jH5GuPM5A z0x|A05zty`Yu5-pl8j;>=GW!rPg;xDwtl8%xFPfW%<@QwXV@b%%}Rbyic_t%f#4Q$ zz@+OtX(@hnf(h#Ci6RgS7<*S-s3cZ6$e&lw%92ywl3{yG#QmspS=0w!!U@*EpSWA0hM%HAfS0 z(5Cbyl&_so{`W_J3R($k`n$sEdrG&<1uXVG4uGF#lfG)W<<|2GRC~#I0^}Afq?`PL zJD!cq!CmO+A>IiqH~l8deSi^>tL5U~BYB*zi%4#nLD|=)2+<=}GpiNo`8qtiykfD| zXi?FtdAjgT%4f#i`Dm!)UGd(Pi_gY7OH5wgD83d? zOX>9zs#eLIZ!io*DW&>RxL-oPy=!Cwt`De8tt`Ex#AbO zoEr<1j<1gvqSbBX>bkWldlF_QtT1U(msNP~yE2sYtF2GISuu1^k{h1ZH}QYbWMmgt zo=%bmdO7_5O7$7K0?fK?7uD-B6uvZVwk|U+Fx`e^msHw;l-770*Ni>qM!M=-BNlH^ zwOqlatNyvBsC-U*!?^?2si&K!#z||#nQhC-N-r5%=7^a-t*>GEXq!25X;!Q+zzE%RBl&t?l#HS`YP)At|91s0L1WcgOg|H0*Ap-@atO zVP*Nw|FMPahv`l)I$8sl(}*Qtp{~1!rA|WsH%r@qQf@+c`UELs#;Iz3?!5D9>@HGP z;piO>IVSq>@y5PLG@NJ?(;*e!9j!R`y{Yt&NYCn|diAlDK&i_*bJFKDsF`GME%@$z zQ&}x*ulDirSxWvZ$C0TyxtWh~Djl@6sd~}kVH0*AZ$+l#SM}+{yB6fuQvVCa)zN8Jw6hGa4-x0!p67 z53sCDN@XoATn@Emt_nkZDoWc=-;85A@4RS9*QO+@MJ%SXfZa)Ec?_L-UUMC735|R| zd>lP5)*{zap0p?AJ}#NfflsBjdi@kUPkytg4z7I};h7khS`VXLm)uJpsZY*44Koa}nkX4ElZ$x*# zij}UpUdE~iA=t7!x9QPgipA4?vP)R9RvTWJ4Wd%hXGC@=DK+H1#4;Fnm} zX+wlB_B-}+`dCAR%us1Xd_hR|3rLg-25N`=;6(H!WYl(dOQ=pfF$(m2|Eg@fDTc?y zyg$Jx`ET9gCfD~ZlgJgdeI8Lgv@}P;8@=pzIh&V%+PV8*;m(@U#QCDmL+UR!cp+^K zGMId_yPc$Bg44b-=E$Exh(Z{fRQW`QtPHPjw{0f%L1IzRB8ln`mrR6RcU?>pB+rWT zQ#chVdY+KO76+GUXA?{mkj6Y@ftDu0KiUO*OrE+(t|BMOIUhsoVd{#jXi5(A_a$#0;8vKR}wQ8ii_W7{A zM#2uvYZK7z4;ZcYwKaj&96Knu&Na z=)yr%46rq<%CvQjQ}X1Lt)8=Io&L>UAo4WCM@mbNpEto)D>QR~ja$-Un)mYyXmvIF zq`$}fa|MNM1<)H}`)pwM=YYod=7xwF)(9V}6(xO|z3+>|D4KT1l zT| ztS^R|I{mDKPsxi6wW;2i~BB~_n$pTBd9wV=gYv)92A ztJBC?Yj4jT?BFla=Bu#vix_sYx1R`44By)wfk$Atdzef`(_NbBtmVZkEg`mMPIjwI z*Ji#28NSh`)@Twk^=*odsZHDK7pU?s&k4rHi9(EAHRo|q*xxOg_BZ_ntwxGJc(L6b zFz-@j7b?#nz4GfMFT-JOm->~wF!YQvc(M2W1hWco$Xt2QO}?v@*L*r{M7yCT@goAd z+~|>E_K_Niie3E*sIBC9<<{#95Ww#?$oi5}+KgI9KQ33Enozgf z0ey7f{dN$w!0&Q8*GVMBMKtHN6{FQXkAKCa|3+|f3;(=4o0P;}A*rtTf=u(;^E{FM zf1lZqx~Kd;Qr0C4UsmJDgOQ z=*F1|=eNx=d(v$CvD11LVrDJQB%o7S1qm)cFmNF&nG^8HpbzL~sxeC!R#t1ZEScII zJ)pAkOZJ1fpPYkY8D6=MSX=X_JiV@OhCS=`Lr2m&>}V5}l;sv@%&|s% z?K4CU5OSID4@?>nm6&jXZyy2yJ%ke>(zG*R-Gp4T*V_Ai#W{T*Ni*C%CiP4kswJyZ zT=gMW=htO3EtN^l44&>Tp8iV~>)+?;gY=Zd4Q$2W#_x2d093C#F zRtS&AJDg{_0N4U4vqD2!^6|HIlvUjDMC$J-l_~2Mo^!UMNq%3e>r=Op(z)@f*z}2H z`df{gY{05R_q)UxZLT%zGf=GTj{dWO*(UY4X!v&VR3IDFQ~g;LJmvZRAyb3{H3EL# zxu|*(6IWI!oX76l+A?e&H$Z64{QEA|XiwUBtU6R(LV(FW7$&QX`VS}VxeqSrX;g`2 z#rozi%7tC!>H5kf;RKq?htR<}ij$MT4CAMno=(45RIG~s=s9a*$3-S(M#P@ZPKwHx zDACLK_ypv?>PJ<`xB4JL8DxW}g{jo%T8(W5x(aKQ9?^jNFfw|)rH@q9liURjGtItz zzODCiY~cZjnb{yG%M3Kli!K_y$Kv%zLJMJ-?2jHM>i2SXTWo!paG0!TOV%kd>qLH0 zV>Y+a^ntcAV)?!U3uY$w{%!d9ueu{J!Z4sS?RZIo|s48yKO~ z%C;09WF5$Gq?ZI3|3SyLPeg@$8SP%$lIanYNKD`IN?a>z)jxJ((}AGu@I`_Dn!d4w zy7}s1Q@Fg>VIk3aD7MrLuVKop30)0FjmI>|?CVRH^s+!rh86h1PffZN11si{S(onEJ? zV#ZuK4J7d~9=n@KT?~;ra5e2pT`1Qstt~854Qo;nM<~)omWbgC8Y})eqQ*|^NuztE z*CZ;zeGLfr50(KI zaDuNg4@}7k;tlbE=L$o>np81qCx@;XE{TTeB!z(99od^4O5=b)VBlBW_o=kAGWzWcso*lt$Ev1ixy5|IzCi`$xuYym>(*)--#>lwRmrs8}Zu7qLXXw3;< zm>RQMaf(6Q%+w4NvzeJcv2Z>nbox)uFOE;+_)`3iMJOU^`sg(ph`Mr>B5n7g)2=5C zBpIvAYdvZ>Gzya44j_-TNYR}6#`IpQI>w|aO+vl6h;&2DM*Ry%j0Ym{;9zun5S$}$r=$fngpMuh!`gDSEJa|(*wqyr^vVemtFd;fk!GxzP^{V{&hZ#M!GO|ex~s*roU z2d5tza6l^YNcI6531Sx)3wSywW%RK=`AN|NE+_#Ke zx{8mucm*aHP8{D1&pchMPH*p#gc zE3+Dgvft75dd9=?`0Ga-sb?H% z_?6ZT&tscajoegyX35Q4GWsYIjt)fAp0t8IF|CEIq)moq-~w5V{)E}QhIqMSo%Jiv zy6E=c7aI9|kxK{mX7?K*d)za`J>!L2l95}r)0m*%MmNi`fx?p;O>P5`()WwDL0XOh zh|h~sR`DVNEyp6+cq^RFQVafjM~dZKqz{j7=3+S7$(f2>)&loW;Mk3s#1w2!6o;-7lyn0iZFt9(OAj^ z!g*sZ@H`$CX46bFU@$_UHB$~#fY*`+q zT2JGeDjj2bz3Kx7{Eqn|=LLTooVZlHh!HP)+hLuzw%^c&;|{7v7_Ek=9}Ls7wdG2) zW?Nu4NjHo|j`#yPyTK|H^&`3m0m>?PC0=DIj8!nc1|OtrRbmSP%lq|3bP;wC7J?ut50h(16NisAw)kvTbPYL178DDfM_Q73I z`JnSV0`NzD_=s61#TKj~Jl8C4(AFGX;NKQJjG{``(#kdSPiiq#s`1k47P6k>qY%p@ zN<8p5SbIWI@(QKODwXsE?fNO(H*eSDiN%ivVdu39^Ce&d3?D1|5_C|@ii#+U=SpMs z-NFSyl?{XObk4tO2?oIT;19qa9C$$%h>36HYquM{EVBhvbvj-gWX|VxC<;S8SZ^T|kot_2Q=dEFO4wds`ov7(l6ZPOfB!L^=f(UO&*Qc5;O3oJ5 zmKngpJdtSvjTg<$sqU2xF@X#3*IE-V;|04(zMIyI-TC|~(l>W50a z$nS-BGVhGDm$t8^+0R>L2Mqpv>856UqHhQ9umv6;+5G?+yve0T!4Cg$Lqp3pNL ztr%yptM2X{(B=a5dD)cFA6FV#6Gl<-n|L+`XsS;&0zw7sw0pj~X>A5(fPtbp;8|-C z{b&%%O>+-=0+S*FyzziwKp(@D!%`L|H!tt5xD|kzgX4v-OORa2-HdK3FP*LaY;NDA z@j%<#2jrU3Gab$7qpx|xN#&w5(-gG<&(#2wE=`C!&@1yfq~I`q=(aeQ&33@|N*9Q?s+L zUA8`C`7%l`>ptclt+^K4Z`w}wf#tEmF;Q!xsJDeF9wT>9wlVj4w_qlc9TF;U9+Cux zpO(UY#`;=vr_At*xh#&0sm*=~KP^r2VBai1ZBX2BzcA@DSPl8wLlCdKe0P4<+1DJg zc2Mxb`^}Kf6gqgX3DX!3(q1F8Jx8M<@#{yKfp$? z1@R#79}iM%&H>_rI{E0as(QVDr7Y)36nX zgQvbmJ@UA%Hl}RM_R>GFA|K=8b#;+p5$j;c#wP{AOS?-$iv%`}&0;eeO8!(B0$ntnV}F$|l0wBDXQT4i97orn^R9~aX76zH60LWDS-L<=oXV>mjxm(RU)^hs z(<1_{?p+6p4zE?eGarQK`BN=h-5NTz@C?^-Sp&7C_~L%@%078udQU#BZvHeQRnZOU zme%&bv(l&76JlmMTtoQCtC97Zq$FkrmN)GaN;Do=*Wr%(D@uFeYe9@~kZ+8;X`{h1 zCNN;-(rwPpmhX1I^<)U~co55gGc)cUm+`JB@b&|SV`T?$h=iyBPFpIXAZ~*z%UHmg zOiMB(Afp%q5|QM}pwr@USX^RVgK6+BghhqtM08e3-v9L;Fd|wt5kGS|M6GEo_%`h& z9^Sbo9@~Ds7(b{~>zMfQEs!DfW6oJ-W*DI=-F*MJSP(~JQSAaMs?B<7if4hjj-|xe z$-?KcS8^PT=?sc9ooO6WzhXJssaociM@VOosB*i}>T-zDZ_NIgTGD&5^ph<-`GwaQ z0YLemi{Q!m^x}cQ!OZ%ofC6{0sf63A7wmc<+d1yf(XC3!1&6!A6;#+3+i0r6A0 zjmYrZ0z@+G-9X;3BJjy4h{pqFiHQ0OAO{orO}s-NdDZ_;Z`0CJ^HqQ@UAq`at}r~t z9rY*va*fzMp%@RAaDaWv@FqmL+m=x5!P(Z|Z`+d){3hvx|7xmr5~m@-!-8X8mkP)q z^lCVtNcovMU}mzn9xCT_hSaJ#iXr4GrA@1q$%bzW;h9H%59o}56X9vpv5Msi>=zne zEpg2jNu?aw2$kQ-`nMae>!OQAiy*9JoxOaR)yhpZkhTVU!A$G$kHV?yfGD#e( zpRU5BS{w~M4@U<#yiFP_VWeq%ITEvJ=+WcO(=^wfkttc1t5N1z%%81I7I;If)=hME z#WFB-$KP|;UrSzh)yvc$fjH2<4E;+dvHK?zdpUyYBkjD&aSr=cd+SBNNfTv&zSK*K zJN?Nl6Bt%4!iy~;2#&Oz9bDlzZb>euCxK=0tO$p=;cAJj0P^vX8Pb}2M@my0gA4nI zE2vH$lIF9@27Y&IQX{eT!<%S(U6j+uSz=Go=y@}q7SS?W|4aQ$anbWuT%Zb|O8oqG ztoP~)U#3)ou_(%*W|hlUR}A!RhS0U8siyH{KOa%b#hev8V_{ew);@!tF@U?B2dB~F zYL&+O$=bf}`|;_gsO7X3@q0<8_^Jh^OLLlQa%hPEt*PtW@$Z;YPB7#1c7l-Ehezr( z>Gqx8t*|D@Y0U+t%2>52HGjx-1{|LyV9I*UX+1=G<;qRIt=ijsOF(F9W8O#kUOu(t z%INsAUSD%{4e~we%2c2IOv-upogKFKPu(wBTauok!}pSXEU%%5w^)Z6On4uqq%>iX zGg_u!Wc|0K#?PpKA=2z`e-wd$W$@1+Hi$MDdIDvGL+G`HTHv1pC-6_UyTJkQd;$nA zOrSnbYuLS<45FsxES}L_yy`)GEyruKvF2Q^i=JW?N!4T&(4t*SG_F9^XxGnaf&Wq` zdQ)ezf`|g^yfoYJIi7mvWrxsf25sfYP&M}HDectyC)pB;u^dz3N1(+` zQ$WKtkET6!)rSD2$?r`DyA44NnhEOebmCsF=kQspIUZ6zau^~vYlnutf}HMu?GqBJ zMLp6+T~}kN)(68%ZQq87jg@0Vt04o=0I+9pWe~vw&gTL8ND4?pK~|)78X%7!pkXgz zfT0IOHYWuR-v6XW{|mqVA5=Cj9?l~QN_?uvtj~lM38)Qd9B3aP*iRmi*|>PPf6Ppj znpWP+k;s1~dGd$~bYDieeCJ)j|JZ_gJE2_w+xNFrP*dTk4%EW`?dnzE{OOxv=&kB= zcJfw=!J(VYNsgSNVfF&=|8T0y6Yw3TF%c_HQL&V&?prxvOvKhg|6mLuiMH~)Cq_*a zuD|{jX0TFxOoyN$CWn@WCOE4|nS>59jgI=faWneSg{cDYCgP zZ?^VN2ZlQ-ORl$WtrigkAF1nJl?~l|B6k}84`=(sbN6>>`X#$!+z(VFvhG{KzXtAE zefR$9P6ge6C$FOMowVur!?1L+wP)J+u>gPPv(%35PUHE1I2pp;?n)98tuh?Q(E-l? zaP*?DkLO8M$UBnHnNqiZ+iOVuv+Db~~C zHK|4OTX5O4?KW0k{Am9ZeA-zFF8K6riqdKS!^an;+1p#Ybr<;mSos>-EZ8%6@Ox4w zciPQTP;UN%)jb!x_W4J`JnAmE`vIeianpzg9E<;owsY_WU}%HYy;N-P(DI z^S;`GT{Uxzs3+b&Hzaw_9nrAk34c;tuvAzU2H5 zFMlzqfv&I4TLaq)!it0X?v(ai(xfZ;dW@%FDTPy|#DFV9B5{^_mqtsL zgP%f{D|fgue8EhA(@V-jkL55`TmtK;#l@>5v3__1`yjQz@bAu- z*VW?f;i)1zAD&lI(z(cO`#ooprA^-axoNC6QrlXTZx!+RT4LahOu-&V0gE6kDpt(b z{U)G0(@hLBwg%flRKf^tiWk|9N8RYuyGv{cQ?9d04E)9E^!eerA;$Bj4(AI@G$qvMX{tN4_7|1%BSKE@(bKWyl|9_(BoybgsczlC+f&FswY*GGw5ltd; z?_F%bM9ZhXFXRyCx|Z3s?qu1c|@O|2bD`%5GAa`)YtH+M~k^Ug&a z>+mg2$3yqK|0a(HrlIfiUQ+AJt}mab>4pA=zUl1ZlMuYbaB9ua0G8{6*9^=gfmVOz zChKB8c^%d~#i7ab1MwN0^K2DcjP+(*HHD_wN%wNL-(JINd^%op_YGft9XZ&jN5C+X zJ^$O3|8H`4ZNB!)>IkY2Bj??LtBafkp9F}9d_HMTa@`!^!vzVQ!k0}uFBKGd^Ho+q z2R+(>&v!LFS@pPMWC%PeN&6eUD81z5HOQKX1(H`VHp7Wp`i6oq7jpvms}-Z|t?%db z`_BCgkpoQHm#q0^A9Sg!!AdGBHLagLLc+aJYc)-CP4iD?PR#VODL zS)~bF`w$%89;J%rRh1nw;_ip|9LLTta2fcQkYCB(DKgbrNw(^B6dqH*8>r!`o8g*7#3i#p;Vi4gp zX|K_?qCCyt&`7Ztg4MyME?6cSQ|hdH65aS&N`Ba>N}T?x+qOgf4`4n$f&+-rKVQ<~>Sc`roOp6; z&dEM8$yX$)o}Sk)iMouhd9^=N+mi6{d=<0u4+eA}GX|xt!D?>e%Q3YCH?Z^fM}J9M zRr9h|wfv8}Plj*&Ql)TB(G^9Z?}uJhTq~M))B-w== z*Bij#I-X5D?g3q2kUvvk{MKb6^hvWhO_p^Kpeo= z{{T#e#BdeGQmM}B!sd%tM!#1K{{iF(Mstob5(+B)8$Q!`z1t-@%gi8o`=ZdWh^jH7 z08+IDwi+HHHQ3uQPVLHApj)u;i_ZD!isR!RfY;PfxuAoPqxpjiZ_uPppf+Ugs=Dwy zX^*uX=A+XNd!>MDFTZx1dE7ZOeCAxH!)-`0zbB-kOMb!39h@sA=zD242Gm<<>sgj~ zVlyVu>c*rZa&2=L-b-o~{etix031m0Ah8a<{&4;w*`wfT%;5g4f|X+D`uB*usEofV zHPQj=f58o^Pn37 z9k%nKfu|38-*!Szm~tJrLfwm9B5%bHF?^Y@BJfTTW3&`85sdk=@4BP1zIcZ{YjXqc z4P&wN3m-qcIluFK>YO+mUoEj)T3zzzm&{{AS5v+dmR-J{`!0f`Fh!p`Lm5)uMxRf9 zjLuaV06ZM{p4lz|NAhS2pDqn(V8(<+-Q%9f!Ui{-f(EA3U~THe{K$O3{?`}Uk|JsK zcU92^bgGu!el>170x6q5?Z4;aa2HAh*vkQ>M?bynM^<;4Hr}t6zK8m!sE%fR3A(Wx7Ofe z#x!b9V_S~oydY`LZ@y(AvTw)7tr7|drXozrO?Xw!kw`V}jb%&*k@H-6+~&E~-ZwwiJ1X4TBeRE0)KKWiW{)dUTpo- zbGzj|Wm{ud0pdoGm)d#By}OHWzT{YnUFFK))B_@nsM{{!K|UsS zpKTC6u#YnS#d|HD6d7Y*Lk)pil@3-IhJT)&QLi1Hh$0uf9n%*?a>pdL|s(Xo$ol6v{SHM+ouN7*j5t7d! zru{hB^gTdO(651Cyld_SbHWseH;(IGbidfrJmRX+L~_jaN67l0k*Y*fB#J#6n-%BC zHo-~c{0Ey%mFNVI*rtMr_fveZ6wD#}3LChgnwS1Nry^SBirp_p_Wc$|L`eZxsH|egzWuuZETs7SEs7Fl0@o?B{=-}yQuHMK&xPj? zX65POSYi?{8BtfYJ4bj6%Xtamz z&{A#gH^1k6eifpcLq6X13cr8xm-`8QV?TKzFsjyK2+NsSTs624F1cn(eQdI2qS>DicEmHSw)AHm=@cMN%J?4bS&9a$=8$n zto4A6w^svc$Xrby`C4O2V7&stti$o38Nt(&or}G}7)-Ui$m3ZDCq|$AuM7)gm3xWS z2SXD}2*+4Tx_`KEBjgu)>3om#(&b`2#_nHNkG zXByjz z=|88-XYK+QPvG+=ZdqX6Y)k zrdI)Pu*Jf~sqF8w4u~~liyYt#P7YVhzn{ohH(MF`sDwn(Z&N2I=yxA6_5b{p>EPi7 zMUL+Jp30<_szwVwggtGz-8wBu~7*b|p zgQw2Uem-5_jz^?|!%nFhhh?m7Bc`WQJ9iNBF3pO+_w5Q(ce)Q)6^&6R`w(j-LCE^% zW%Gt@Pk+!@Q$7O7I-r^;iW}uAIJzmwE`Kxn~_r*Bx1=Q?_jEvs-`g`*-D^QD)2XmW}nI=a50v_&COC0|k7F z@BY*J@&&U?R?SYPcN8&+P+`2qnd}CU&affO&Bpjm&-<4_F5{I&dTj9`+av{*4=BtB zMbzR_e0cX-etfG0o5vKP(nrm!fIBZLE7+Y}TCa!8Ygpwvz2bgBkuo)sj+2fWj8g!C z6iC~-`j_|^f4OPVKWeiI*;K%FXY~2@m?>+EAtZ{}Pv(ul1-;CN7mr-ng>yND&Saww z{sGh@5UdgW{u5uCW~u$$4WX!hT*F!*YtHH{#=;Hu(PF!4TcEnSEH2O0=!MYWco62} z5I|`HTQB?7)hZmBpqIb9t)V23MKcIEIPeL(qlQ}(7skx*GeFnXkPY46uS*qhm;01K zlpg-FePwyGbI^DiZg{GKcmwVX&2Z;WvQAJHcQib6;ft}Chg*Wg*+6tWsPsR77vxX& zr@P5?HI@Q7-vry;6}#sdxCOEepO5t3(nvR}57o!8lnxwh=tK;mCsaM{?%AqP^a-g3 z)gT2icHp;VH*9x13-$RgjkSj6U&VXcFS6_uXgJl8>Z}N|eu9u3C#UZW3~>U@t#hZ6 zwc=$$OZWX76)~g07%}B37MIw4kODZO{n0npdv;?e52#wh1kM`4dtA^AneY6qTkT5s zfoAS|Aj#8x)BppvF}O&-UI`n>dfNK=$&-s(OdTJG#lD8~OI_#iBIK32mc4yS^F0nR zt~x74Qu{qSXLX5@9uGVVzR-Zk@Y&8=8$PQ9|{0~m2p-N)~9 z0*uDLT6{mu(6%8~%HB~dFdaCE^%RuF3|+4u1lw@2k<&>8&FjqT6oHNhKhD0}rgC?CqD3Kg{;Kt@!O zFoZ>tKm?^7TW6S#Go|QEg)8yrN0Wax6y0R*d+DFn1^fo#5dpYef%s*u()YGFCVI)w zD*h;@T07Zd6|zcAjaYU|rG3x~O%9D_RfvM3AU1snY#>`JM7Xiu^Q*-zgQS4H(VEI? z+~=QRSYFk^r$lBm9zUM~5rTag!9Z3h7=T|Mms|Bz{jg2}tGY;EpX1##IC*(YKoP$D ztb||;xg^f0(I!O{rH-=TrUDi&NC~?d==ps(r2NFc;jC#7KD}s3JuA89d-^iZ4g(!p zI9arBk{H`1D8DJ-QV8}SWSUowYmr25CQ9a%#jTvRT(r^@)-fcJhfQypyYxRBS_?8| zy?u*Wi6j`={`f02nB1kN?xV)N*=1C2B&91b4EiQKnqFd~BIj~lRh$7EvJHE!fU5%_ z!WEP^zkuA3@cIef!W7FKbRB%?3G1yB#gmh~CI^zyuosVqca0Qs;(1Y7VG0ow2!@ug z^LKe!EA}&$eKwE-FMLMNMztv&-W{Bc@SRzQLG`~N6#y&o!qmi3SZCIkk#Gd8*0Unn zq?WLO#>4t@vac^&a^ZQY#O~Z~rM2%P$0ia6P2|9)y|wkOig3F^wli%hMS0OJKAAFc4XZz1eGdW#PcyKSr*rGNVeIa@p@G%H zRQ1Zd>*djeEu+7&DJmD=A!=kcC?{1ZjokWg#Jf+-#G-R>l^CNpi;binY z&f^?PV&VMzZ!{6$AyaL*M~C?cCNZ0Fi)E1|)?`M&KYlj_Y~3@?pQa*_M%{cvxQn4JLolIzj2w{4l&A{-bCkc>nwoij0RsG5$b+iL3IT=_v1#|Rl5U>?_Z=WJCobc;XJbDw5a32^+x)uP2L; zepz68BkPsBf}vR;Vz?$IdP!-fjj0&Mii`$WPpW-fqT-OCzz^PUc&@H@O3yxts(YL~zWsZ;pN5~=H`nH2 z?UWA#-V~9V$_VUK1$+_KcyZtuT-c{rgi5H}#$1A&JaA=mvf)Zd zJ;Aibc4zh&#VTe-K7V=w&aZgtpT#sh)hK?N<8)uRFY;ZTuG!i=Ql{S)QD^VR#m43; zwEc`rF!{rj7d`&z6nlA5nR~XatYmTkpUjd3h=qztuym`213A?aY0NBfaWqBPl1Zka zIFYCvJ;#V3Q@Owj{aOQud0`U>%(L;Jj=?wZYX4YBsd8}(joxD zU&us7XQLp#JQum*CY|}^1Y$pQZXtbff}hQu31jK|a*}DuBLNV+xTNP`B?e4AN|Pqx zhKHV{*igm`Ce1YP#8u#;?a)S^BMnPeAm(W7r-Uh)t49hqXuj#R(vHAyHdb^q#hA{= zd7HjaYg_s}hCpjozv>eaUNpamHI?Z&nhfkJ1CnQb<2bggL?R(SD6iwDTsY~UkYCdN0LXr7)v7gK4arEcMZK5fv=_Q9zjea$vxtz7f5 z;W^2?h*}XS2?xPG@zw?ab7mhP0Re>;DwC7@2KxZYI=_^8tDECZFPJgTCP)F|Y?mP> z`t7eM3LpbgM(|WrSMcL1iLYtoVK|jS^1872ftvX565`W+eQtiORBA3OM}0vN0!c8 zjmzS3LaA{nIBW*}#Yn3kZSUv6?nXaFrLV8-oJ3w|4++C35i&^|_+scqs3`q}itQ#T z-gF-rMir7HkZe6M;buCO3=#hOjm1@}Pj_knTUf39jrwqTcgsN-c_oy@W|~d68aR|K z&C997s*5!|=HJjEcKRz0KZ~Lo*x=wps+v(<4FZzP>gwOa1@osA-ySAsaAr>*1as<@S{vanOZ&oFzfH1#EpJ46 z*Q*eM=XgaM>m7p15C8ziTfljmNCSc>(9g0(;`!~i=dMF}DCp=+)LC!bq=s8WPitJ= zRtFM(c*LETXBF*TQdQ(&PTPU;4igvGyw@8$cLvEae~6Q@7c!G$+>RT+&9ZA!Bc}(_d3gD7^~h2qkn9SyI1I-Z{%d{} z>*8V~AIpI;A`a|yu0do1ycq=C`?eC+0piDXVPW(OmSuKi?R1kb5|g(-StSXXxEr+u zeX`GkI`@}o<=^(gQR6nWiBG|E&?FGgd2FwPJ-1;7PJM`1eWYGJ>*bPXq5Uq#hQesc zq39NdDGNR=9Bi)pHzZS3rZ=y!==aZ2GXf*$NTQax{_Vi*eZS+*MeU%k!r+){nTEfi zDT|NvvQ*Gh_@Wx-*!7A=92~G(0)ntET6mRdq$nZGtnqQj!0=}^#_Z5MTo!q~*%o)w z?$e*^e*6E?kWMO6m*8yF>n1CK5e%&?8o)~vXV~Ribx=ev>G;F)wKIy|PRpSuk(A*1Y}ML7EhYF_L?hr?<)<^+L?u zV|g94=ldRcK!-fcs!H&WTTqc;1VfwcvuUg&n~0-kcWICKjD@dTR)Y>4Gqyu5?q)dt z8y=@28yG(Nu9v6%f^_d*nplm~_aC4$NaYgG?=0|>Yx{O*ZRJ$|(}BCZyKH5rkm^0N z+e4K0velHaA^pTQl*(N78MF#owA^P*#C2s>4<%@bcF&Nu9V+4i^dIbccgRY8m9z+U zms;khx$Au#zSczv@2FpPKmL7KXO*%=I9wMeF!?V5$vg?(-f4D@o9`R*Q z(I(UxdTSYw`sCS;Pw1N1Hulp1dU75+7n7XgHY`0RC6rjuE3<&@T*x>*8If|uSjr<9 z4RN?t&fZ_`a`f%*`Ig#lzrsj7jiHiZOe(@R&>cNF^-sS5JBV*8<+#r2*kezX`K1T! z!JJ01zy`hS)0)GXer{r9L<~SB!Hp;RNJ+eNO8z&@q9Aa6^=mPs2sWPU-_Yj)Nv{qU z>i!EVXrH$Js&TUj{)t&;p&UsPC9#-zQOeHiALrLqBd$DJ?V0C+S3YZm{TalME~=n@ zGCxRnJ@0q0`RN(wV!&Sw9bf8o3}*lf#|6`r;qT4Gpa6j?$S_@Ne=Uu6yE3Ttp3#2b?pa<4G-P!OGwUwskKPWT5fOu-Em@S>i`CwG5=j6Z9zL&nK~)sj2($K3+ym;d5&wQB{vK>Luf z90j(MXGl>sNC{c}18|3AMMl4LIAn=U-_`=X{L%a}wb40|iz^f+9{9{#kg9$n-955( zRrM?{`eIscu`v~spcF0CU~`>@Gk%%e|65LO$oxvV7DP|V$@K}=177e;8#b8L)ZVwd`bFIp1sTDX4de7mxl~9kEnzJ(o%KY=Esy9&_!gYn z7V%ZC6qDy-gUDhcBPOnBYfDIEZA`8IO1Ev$Tx+bm(QYEIorpqUA*c#$?1f(YUhY~X z;%wP4V|(XTQRGHFbiBf6?Zj)(`?1{E#_M2y8VThc^qm-+dBuT@IrmODyXFu={@aE(Y>A?1?(q}ydx<&QFyq0#F;?)ASqCZ2N zTmdRMPFYlPqr>LOHH_s9{+gH+EsTslf1mqd^Ha+}V=Ue+keF&o{Cs02+&{3iN{8lc zWBZ%wjaZ*EBk5}wcNNNI)i_J?FRA_xE(4&4l&?#{_G7XpW-`}UG#F=v*|`# zC#IwwK-Q|zENLklW?LT(pgT^jsUJ7*XrrC62tvZ-GxTv;I;IGGm9bZ%LFZ}~#P)S5D)6?>>kJWh0n4u9J@)nN9h2d0PRLn;v z61H3{&v*U-ri(_p^M7oG7GgJ|*lir0h`FG#Te&CLzaa&E)y5Bdh-uC`?XzwPMi?Lv zuKhraPcHu~2UnI#4f!1mL7p`c>hG!H(HSXM51NpOe2W&G^wBfy(3MZfko4=iJrcyM zX2(m+kj2w3I?;cg!bXsp6a%N9+WgQIKs^_>@Y3TjBWd3q0PL9_aU`6C)^%WDle;8M zzA##E|GhH^<*8ppr-+T@WSt)+tXG6(sr=E0qQag&^2_gE^y0WfxCG|wNgsZ5=^TXk1qLip{~K5)Ut!fyY^}6b&MBW5iNucXkvvU-JQ$ zgjD@1cqk1@=HSLg>j zGQvI}ZwjTON374cq-SjJS-)Q-w!a{a<&lED$w$;JnzK*NosdY%;3eIyR}pqZ(LWtM z|C{sko}<|q=wBNIqG-@5uGKZssZp1i))|ir-~|mvdvEozdV7Yl{-w-Cm-E7* zpaat9qX)^-p{`=CO&g>bLmTjI@91{@Ed35+}xR0TYIK^ z1mhFj$Un~wn7pZPvGdO2=ro&Qi`6$C>~Flcr%=Lt9U3>PbHCZoWm*=Y!q=?sy_R_9Gj)%5}$CJM(Et!waAYArJtoDalsQ zeRl28%O*V&+}6q`Osm(DmTqLuh$K0=)gE+d9Nqu>A^m$@o=ID7C^)p7oUn&MDHAnj zURC(ggeE(lGHd=}Gbew@rlr88E)+d+r`oU0zF!;1%WI%ATkBwTZUz4Q*r4+04Mzl) zJdF-^3w~TTcK4I5+kuQpffi5W~*CbO24+VX%p#i%xAjlBYfExeE=O^z=<^%8r;m?FU**!kuFitMbmQ*yeSu};J}q|D6+c#Y=j6rdB;lTgalx9lR3<@w30lQi69{6qQK?v zsznBy=|j95Uq{HLceCQ>j|rt^UH29Bm(QZT^UWpJIIAarzzS6!d6zUa z22>I}K3BLH$%&qCqEzH_Lm%k%(dzBrVQPwujf{8|^poRg^h%vOx8VH7ntsn?#S^>aj8Tth-UY%dYi#^m(kKNM z9gz;xJ_!E|xa`ckKcz-< zuJY4Pv(Wccg&}5%ZjCJ69Qc&Ph8Bo;Y?gp~3?;OMygt@v_4W0Qf);clEm8sUd#}zS zxxi3?h>oSsU+c@cwJy$9rT+l?=F_*C%Bh++^RE8DLKUR;Bc)2#D(czS-EVjT>zHB` z9Nrh}7#Tg1oVhP~Py3t7AsD@noUW}gwu89BdOm9L<~csw z*x15>LB{iU{p`7J40ue!#r4TBgHMNV8zTy#$Ps&D4I?lKW$qXEsPXZYzgF!ZGCFr!8C3EYEQo0mQfLBxT)}G zx|y!VB%8V(SklBq>rJIOWX5Lkv*u4#<;U{GGO`q0AkrVmEI4$b%)-RVqcEvIypS>{ zuJ0-zWLMjpiTQ`)tZ<>cb%vu$3f80;BifQ&JxHBS+R(A5nD$(UwbsS9*3Xg6-nnc6 zyi?y8q5D#Ml36E2d*KDNqVn;rPMsNaens3uo?3Ql;3B@?{v8_9m(laH4R42Hw25Ni zm{7-+Sl(F5!NkgSEjg3Aamz2VYKMkenyXN%^T~D6MPo$_}}Kv zTE9)Czl9AT$H`obryPQTbJQqYZZ@G)tOqJspR-SC8z`mJ`&{@F_`3%7xZ?Q1hGCmP zFW&RYVX3c4gWOm?&98`%eqekgoskK%@%tI6CR-$W&%&xH1uDG>ri=BJQ2jZY-p_lT1M zDvDH8II^)LiNMXMEyeGLsK-{Q34x`>l1JecD#<)w@eLCdB{iVXuU~Wmt0nkRqK_)u z$W-@}{sFpr#$EWJq*=9YK}uAB_nio*me#FG`ezjbJ~w(h_s3d@)|^@~+*#ck!bMC1 z{e-M8@v(MhYLiHv8+cl#Qfjxlnuwt!x$1CV+~Rqsin`21jh?5m(bNm-=`Q(?Dw_K4@6}1AKf%Mie3h#MAaz+Gze9ar{-xt1$bWe73%K z`KVk*$#{+q(mN`f#*$Ve?1{S{5?z1nP-Zb%x`q!@6ksfbWJ{INcFMS0I3ck`(C-yp z@O=Ex;m?kt)Qi_PA3o}OQsE?Ty-G(bVVqE333#{F?TlDrRY2y9pX;lZ9Ak5rp7_}v z`6)HYT`u#4&8?#D#z0Pg*rf$h-y`lX0TP9 zxeG6L$I26ckyg$Ui3(NDD0ch?NVdI%n)A>>py^|`Qcx+ z)#q-3#z;=KV`Lc3jBOHs&4&A`x=EhBa3YcmVTihw_7^J^5f4ew-17N|U((|0WS4V6 z)h$=?QoVV_b*)XKQ`kXVrbD#db4#l{76$#T(7$B8j4eXOPRo7m3#vI2N-Xk@)*-C{hHek&(d9cyLDCw zioml%u~}#$iZmW++_~2$i!TGYr_y0xr9U}0ZYkyu9^EGSrOO=dH17FTxC+;(TM+?F z@__gijya^N(mBspnP`u=){+nXJjT{KB{h@uYsXj4sSiM%Ixf&BhKDo2an6XiNR2vr zhklYSHP_`}v1;(oo(CNROuG3Z>6nX-lkLhC@2=*pNuGwHfmCX386lC^$7P2uMX{Rl zmCQQbjUV;f-Moj%;!An`#HjbRyo591gJ{WWgP~bGzqx0Vjq^W2w7dr(zQujHsKzw2 z+LB)W1@I3?7auLsxTC!?8kr^YpWPB087M#NI-<`tT8czNrkOg}Co`pX^B>5pwMj>zwvHZC)peE>M~R5ePp1y zG3sUMKDA7x57P&xOSZ%qM0jVZ8HzsIT^I-6D382(IJ%bZDg_kLUP- zvt~ruvXsVBvZVKTCR6BKQ3as<(>LFToy`&~i1Un$HWEx*jn_N36uB~{L>u3kd6+$0&(sCKh5CiP zPe6}FF+ZPDVd`?;^3j@KtFZhIMxuT+_QYkwr>WEP9z@|)^K-pUevcr=MrHGkneIfU zJ>P|{85D@Z+&9#~jV%HztCcPk4#(YVH9rk1YW|cKp-ju$W=PBCV%FwF1 zvjr`-z`8%t(wXNK#yWT&d|pTU)l%5S%?(!_uV}aNHzGZ%@cdjynUeQ;KbIq~r0a zxJ=xF1g5M~Wk0t!-3EQG=Me8RIcGfWxf~ME@;@sDubR`g?K8rRMeZt&yo!fj7B5T8 zfID0Gi(40aPKieKR2&pc5`G%1TqiqULiQ8V!=jEhn@?Zln{v|C! z#PrjXou1aSjYde)OZDe{!!K>y%=J`0l{fTQmx55-RcD>3Ao9$Lp_pI2=HiMYmZ zB&Hm~hrv}<9&jVFs^qGl>C_AH3IIY~E+Z#;-|jE+AUA+f(ODdmW};L2of{AhJ>2qb#PP%ItAl0!c5{6sdOP#fo|-Kr z=_~f=Hk2iz&Beh1-9~h9Fq>fF0PH>hfO|%u$G|ry%N}M>=LEAZpk%2U_m(ZyjxkG8 zPmUgSGM|>7_7eyZ9{bz6k03V^qQH!Nd5qd^6IbogW?T)F?iBBqBm`xmR{0?fcM!0(b{TJ6LcK1=upamj58KZ z8D4L6lk#kg_FWN2Kie&1w9~||CA2F8NlS8?)XB2@!K?h@j%i&)F04zwo8T9A zBU|V9{;Uy7GN{O(^-6*(_vz|e*!il?0EL1?^q-@v<=$nd%N* zQ|$vMv+#-$>ejy^Xqi_@5tg=;9yrSleJr4#a%U~@0uh**poGeVN)x|8%Vy5Y1U|w6 zCXmu8sQl>Ju^7Tdxhh!3-R{i3PEorm8+xWUesKSuS7beNl0A3nh2Hq9w61TSlUX_2 z1um7ngd;JJKdqKyadOownZ9A>@{XkJOs|^xu*yswsjylx2|#6~N7v*9ozJLdRxZ{w$FebP|DG9VE%2Kwskz_CxbrY^2T+@dyI|VwauK)NodB ztiAoY*QjpX_X0eJ$6H{ccq3jZg*yToqwUP8Ev2GYs4dI4bLa*jt&bT`ofK!qf;3Id z@1>A@3Y({YmRWd8#$5?}xiwvZ^k(V9iS00&u;SBYHp$^VT2Lv%NEL}i@4e@$n5GbW z@RE%26PVz)7Fcju_htN@oQ^S`JQW&I>$2si*luE;R5ht0!%gHWtpRR<#Tz48mz=T) zzvKk4g%v64N?BawV}1=U(tr4BJ2|)k*JN9AAlzbdf{ecvo zQbrC%^;u|Q@h*E5-?p01&%LvPj^n|3@JA&-J+_(1(64v9>N)(QzZk}TN_a@IYr^v4 z&6ne!Py$+le*i>7*pnjkXEXv_CD2TPg7NJMST6iZ4X=Vi= zy4&zx9|y`HCk>jTO%giOMho%W_)!i-{iFnqoe35umKjsiojQZNk4Xjtm6;k7q{c-= zxbN6g&;jUx{^$Py)QM;Md0RBE>7GR}&I*2skiUKw!nUpG(m$FLhaSPjw5^Q4EJa#Z z*O*U0`FVi1K`Z0AB23WI^6jmR7sTAcWlIWo&+LueQa{VuIQ1iPPy<`9T0}rwyZi>W zY`L|x>J~~Whv;s;LiaqH83$LBr*&w+KfoggH1d%pmH7ahP0Q6Y;cO|PEafMH?BgzO+O1t9A#K^z!z$Dfb;^iSe z7H*Vlmk2-V<|}Yd89z0`(W~@M=CQ=%AL6?oezc@L{ecN$ae)4}{q_HL!TzriHWq+_ zO$el*Ys2ajDVSE&@c*l@jen4YcwilsWs+x zB)1h94V1=4Lp+cPAdQ72FS-!Vd&`v&(%WBMzw%Z#esvTvbTN(kKesGAqThwozt3=P ztCkffkEH2^jt+lgS3IG5nrg=2J8lPhW>{dym-MdHa^;&Wd1$0E=R1aVuj{o$4jVRX z;f)5B_p&r0cAm8LcODp8S-gM&Sru~L%lwz&p%LP7I(+(;1fSxUI@gV(_5>&y@R z!VPVio>%a&u_+s=m%?em>yjyRxuq-3KKDwd%z_ z`P{X{QEN|N_oC9`boZ9=FD8x`U%>n+E7m3i`nZepZ79v{O4|0i(p}q&$Zz>~521|L z8y9kt=dt~#{bx@74{AFHpMA;D5ZQ)Np7%Dp!55HW_ac^dx~8{3yneNZrd=>JhEZDI z(U4%udvY^3Wkm&QdWuxnT~`D?yy#P2bwcA>T`9-@W7rP@g3HW#e$ObiXo_oY7xe(MUr0S&Z?~j8puEEXZl4+g+yq z+Wc|r>+mO9a#j)1Q5m65e>dK}?pxfrCHT9~VN)P5!r?^>WK|k^_nw-pzjo+EiPqy= zL?YoLMhO9TQ42f4N_Nq=oQb_;8zH_Yug@55o^G58C(-Ruw z&0m{We|8~X;z`D2t=>JkI~`tm2zX!c^cP3+Y#4gXZvX9Pb2du4aM=gp zHLqTQ@{xC3VoziR{7nbDeg`jIggOl$o(*D}Qe|%HvVm6ojUO$^P5uKEJ!ld1?Mk?_ zVmiT*_pNocu_Ui63wY#bru3QX#Mjdu33W0kE}Idz_O)q={qop&6M8!A;u70;qk5!p z->7tdCFz9@Wop`y#rhWW*o?(2Xm^ghPvJa|V)SMymoWFoY_oidVSIwn&_6)q#m1YX zcW>=AzN=XaTaHexh+F@FXD;4YcD0?3X!SCO9%MHB`tdE}F2L^LThAT2FUwKeqc*R) za7@`^t7)~F!Z;P)}e?Q9UpgWUs!tFwS)8STW@9HGjqGeX1X}@F^;2~ z`cw1@;aO|1Oxq;uyOl33#IdbWXP$0!?u{7ufV10iCdcuByCO+&F#tO{8!yN7VU@o{6HBc1QHYn* zimYsPQ*VG!Q-;-f!y?ltPB;MV&1pqPFxQ%jS?d1)ctcJ1skG;O5m!%>qTnoz1e~|w zxa`bNN@-PB%o}0;0ghsHJ;xVt-Aq~XcD|c$c{T;HY2n`n9`4@A;U8_7KGU9-rrehk z7#(R!DcNXe~xfK;t107EIXNPJBH}BE4 zc(_PQ4_e|gHz9?nZ) zb}+^ziPSsccJtu=&vO3&YzCj2Qr~&8TQquo&OG$~dnGN0mv_DP55U=Y7;+vvT#(^4 zAjhcp#4O+duSu3U`i6zBl*=Gv`4>dFl40GdEH<3^4wu|oIK001qTyu;8Lbk}j<`~# za5owGNCAuIipL2^=@S0^n?U#VqwBjU69%T3GmfuC6CA5zCoPB46ip126ZXe{!!zer zIN_8HyRrS~&Cw9)dIjv71T$GBn9*Fg_Y<8AyGWwEU;)yR*taJnR)WURj43^TN}21Ss|2W znp=d_EHi`LiIsa~hB@A!f50=&=kuKVIp;pt^}VJtA2m76_7SR3AcSe zc{ACyu%X%(yx5GzTJ6E}Km&xtNZWTqp*o>@W(72SRCU|#Nc8Cmd2p$a-bGMYF`|F3)W=~ z%E};}y8OH+jhDf;T2oXvy!GhyWYJ((8mo`d$AA%!O$~*&)m+Aj-;wk}y3U$S|Jube zQ-$|i!@Fn06CikR`K5-ph?t#S>26^7>TJoB?|%XrK2H&`j)}X;`nHO+1B^(TR(k)M zYG3nJM|bR&ert_!-bu#ml$0Em@_L9Ky>V+Vc}1HRnbbzsoWvSVD{Q857`ZWW4LEi6+`QE4C z&z86n->%wske&p^GjVm^FwtPZTyQqw_~jbQME-@vWzO zaaquHfNJaCXkoV@qAPtd&C@%)_?_36Ia`ogVPuO`+w-8r2&ZHcQbW-%`jxbP$maG= zcDr5D#jeI3J>}Aw#dY5D=uYcY?bK%b&xu!jalB~2iZ~Q)6kVVlxVw$FLz8f7qyYF< z%6(@nk3E~b_99_8b*N~rrF&+%dHGMjX{YJ8VJSR}{l@)0V^&+W#NdJveLy=&=84;c zueuTby^ig)fvsCm02k}day%_MXeU$T?UC^g){0G^`=Li&(47(f^Vg?1>#FGf*oPKN zg>}ULfNHGB#ykWo7137bu>#`lN~ah81>)&8+rrvTXayg&wSw}Prl}pg#d-^nR@1+2 z)~+(NnOLiIeb)}&xfXdKYK!CqQkmBL^?Vj6YrJZ;ySAyc5wcMm(@kxL9Zeg2Z_?zQ z&i)6p{deOte@1mG4IPt~B{phoD+HtRBpJ7C9~j<}BJ^s=@0de2$=AS>;CBH&H&=%; zL;vW>8Lg>X;KLj0{SpE-UTxZ+PYaDgJt*Xlm63LeZ_%Zgm zdu_7~Aow~m)UoSBqar=m@VM{}ZrSD;w(j z((#^~KH6`XB^jSRnleNnYmIT`xGI^;#hGBEhI+KONNdnXbbhM!jWGXULVzGKoXHR| zQwQ{QARAu4Sg!^BWlmLlLSY)6Gxru=JeWvd?HeI4S+h*LY^AbhHWFZsO3LMop~5&9 zRhd``qAB+NZ}Weeyw+>mOEh3S177;s5DgM&YjnJ_5PmcFHNL+R3EyIY z8*H{FN&RVKDeLty7y9q6Qzy1Gd?PQWJK`PpuvPmr#SuHPk?K=;r`cM7LHC-$Ce=9Y zKx2lOcA9j=?V-h#=zrkS|xK&wMCw7PR%fqf8A#b6PKsoQX>~cRy6=YJsKgv&4FJMMNNd^!gLM?a0pk z+$p?rHr7qlnJ+S~e+H=d58`9!j*V~#H6`!odY5m+RkdhD+~fD3Tul)C%`OR`05Z(G zN~@)@D3)FOVQaNK#H1_Q+Pu-#yXEL7SU_V+$#5Ly@NRfEZmF1Q%>0Dr#r>KEDz%0A zasGE4W^fq>-lO@iB}{C*PmM&QLB%b>YMJV4k2$IAp-f%a4)pjg^r)sFrOpnu%?XBe zDX1x7#`;ye+_>9dvgDXfsH@8Dwd+g`g2Yc0Bv-U9>?Q3k|91Du{F`8iG{>rD62cb*7HwY4JwMwiEpZ~eNzpV=ShGs}QvD-94{oDef-)R>B`tvC z?A-$imw4csF{|@91$tn42L8Dp?ZBGy8~;xL-u~y{JBb9Bm`^p8+B$CR5Qk)tb}ko- zj@@$6*fyKJv@P1Nxos;7M&+jVs}=}9YDB2FFZNCIeXE6jYrMRy(t!Wj3nK~9f65JGw}!H9ZTD4ok)=hu<-eY! zhjv}J?Q}BmXXk#J;ZMT*Y6S<2H&WGvhcaP9*{E!^o=szV-;YiZf6`-L9NLKY>Bka3 zZzYBo?Y$}LOR38OtVsK`hUW5eQo=oV@+KO;#(0jY^k~y1hq}TH)!2ItM?HqUMt>hl zxH!;SGr0?DOifH0XW;lrH^qg&%$l-{yFM%Z8+x?-qwYma;EL%dmJL15INMZD{*kVV z=?4&}cjP{~s*shDVz%9MtlSW9*p}u89zRRH{@f_Ov=h2Xh{k#^1wS-;SliyHv0L$I zCykk2k~~FNuKkg6;J$xphq+AaGN0@F{OboR5{Wmf-8BWHV!)U9Ak%M~<+bP+eYMk! z1A#2OQ>CCH^|C)w!UA*&B?fMSRzfe}w*mN%Fw3FG|=EkHdKR=8E% z>2HaZ&cbg{FLotm?M0_lquOzDNqmv{e*#?geaL(i=FoRXd4yl4$5)J2-JM~RYvwVR zKN+r$ge15F3l4VpPtU|i_AIq$F2$+XosCqhAa5>hC4%?dA@xpmCRQ>}X!Dl;Zm1@bzcFV%C3;t;uM7wGm!6aI$NHq(hWbuM&l@0`3NhCQL zeAqaR6hN}N4mcCrYF44WHz|!tv+^%R zmzRe+Rf^ag2~_7Tm%(f+%RR?R#s!75cqLq0|4^}hi!`@kW1f8HEz#7^-a#2A$<8O& z1~>&Zr@sxqKWmZzUE&dJin6b#?+sX3{yZJ@6bpYU@>xs1!@xedhAWLY;tRqhy}ZMf zZ4zZ(-B!|uzLTcohiciPr!NKA3~Bc-?rwu<>BJJe@l=+2O7?w(og`X3DZ+1xI!0kd zj@0WuV0~1sI~^dB@guDWNrD5PmupT8GNj~=JAao!a8Ru14&q_`=eHtC>R#Z12gJPe zAcz{gU4TjCZZ6$F#}S(-UGYQ2Bn&Ku&gz(`&4Q1x!_1E5d8aC)0`zm zu1aP)glD!HWeogWl9MR~wHH!Ce9Uz-r_tlx!M-!1kFd+xhY@O6(n?ShF-YhHc@W`q?c5?Whcc z8y|fCv0OZ|E_}X}sGaeR@_1%m$j}a#pi$q-`V#WFKUnrX?`{d`PdjgwuMV-UB~AO4 zjLO4m(ZGj3gZipYRlxUXXpDfYO%3l&$=2nWBYGc@UA~zb)}G@hQlLIMf<` zP;6a03=Hlh2)Ab7A3h{6+AXep4Y(mQ{gT*mry+KYlPeebh;ipvT24ji*L#k?rQWT$ zpB#!JeW*zA8_LKq|G5MEqUlz-1r8$Z_iu9^Jl{?p<{E*JRYtF9?B8tHe)(Oj*6usKWSwgVYFO8{|K3$HRFcA~*bq(DC=v4RR@{(UwN}L_6>AEX$xv7l0sX~8a%X|7!Gcy&z=k4o*|Mk!ZEJ$V5KF-zr!3h@Qtvad#KJ=Nm zi-jh+>3Q$wk&O#NQF<-=(X7*gC(EsuQ$@5hjIgx>go90X3wz8SUCE@XCSsuwXngeMfJa#+&yCkI~e$-Aj^8@Uh_B3+v>XR{&wM*a`x_OVPTN+gn zT=YxwZ=Hu~P}SX`U(NeIS%}ucTyS=zu54?byB5tknF&* z2*oTO&dfe&l8Ai9jsRyd^dVxW;S+eAeO6=oH z-P;L1%PqtY-s=0uph3@x<$GO1)r5DJ-=GxT@`vH z?+JYr#*j+T^l&$Sv6Wy77Y(6u{>k*b_04O*RP^4v!*zQPGn_u6k@8X5nqtbz_%>L2eoAV&lMf(;VyL0axtY1GrcWTZuy&1p&;|K6QY|1;cB;=*hRuN zAmTe2WnVX`nj3?|$@_hKq%mE3f`TWX=mOUmjIifYY^q_ST|Icm2JmB9ItDg<1C;)t zw@gR%fM&87BT#sWHkFYuP-uCPOn5kQ_((us8uy{VhM@tOdHP}Jz%6MP^Rb_rXO|BD zxgRRB!$4bFJARfH?f?185OhDZ;s!JN;VIYs4wEG}`PYLbJ*|I92j;4|TZ@21{A!=} zU!Mua8w1(f%Kjd_S(#$4giJeuPw0x*-!a$yn{z)++9&TtzxjHm8b~`&Yy3$9GC_u1YcmGX=7P#cZ9V&R>O< zp^mU&zmN@^w;7$L$Vj}%8wPxnmEehV?;Rp+tL@wFR&Qv1Y-ze{=9u_YQ7GyG-FOOD z{hOk8)-rXb)YPUa;o^tfD*&4}-}?t&iD7k$M~>opMqMF?H^8B0yKX*34r~i_iThId z?<2yaAx#$VJR>)WXfXiIIfspin5**P-79B-Y}_8f2N8wPky_GAiw!mMS@p{k1C(UI z(LGK}&+}Z5|GAsf>@DDzN}n73Xg~LI>==7Mvo}&VvF2UYo1sW4;s=yi=d}y}39vZd z9!xENzR{K&`4pCE0g@9)^WXwDPQ8#5AT$VWgBAWKqUm~Qu2$NPd8!6p_j|UN@MXik z-@RcjgS{4BDT#2)M4+v!gB53E)H2JApZVJjk*(h{)je~RP97BYq4Cs}fVJqtykNAV zS7r#o@}^~q_Qx`jil%Lds`l$N!`d2IPzh=W;>ucfx2n_<*3zE6Dgkv;*4Q%~4Blhn z37?8}gVr@hJwFonDSKZaY97}8dWJ-{!RWKWDbSJaXUM|cF6!|9Y46Z@IaICz6Z!gjz=&;=tTHX3o7qIV{pXe*YvxfF*3H<~3 zn=c3@&a|Hq3X}L>;wb75!Q6LXpupn9*MhODB2U_DP78F^_RH)L7yja1%8G!LB5g&j zZL1!v@jxC1U?jA&e=1|GQ4k@>+B($XcF0dETd&fMBxV}Z>2l->H*NP9liwqy;2cLvOWmafn zNEA;Hj^GGc2&y=OJ&d=~9vgfWa>c&DJ(uN0t36y%b01uBe&6`dk|kI6C%brd2Fet_ z0W{3&5p-zd)PsTIK`yh5@o#z>3%!LC90 z5h0~)K<&v5T$pSuwJTk3Mb$VP?7D?A-T)|<-dZDs9h%F7L`bMKa|l27@vNB}GTvEk z%IsuG0m^c|&VqlQx|%Px_6!X^8rYl{lXQ%!K~+(5XPB!uB;7OoI5KOe z(wZV2IbNI~+BIpqZ9TbH#qSrU3CP`ZMk$Sk3Wg$x0uKK9bsGTy2-?6uv=bOr=&80_ zdjTHa+jaU6xFGkryV)K+E1;?br>B&x7fwoWN|wBdcLR7B@=F{yAPihq|GpOWbdRtB z%ZV>hyr?5;tt}k{JF{QSftKL4S#$je!m-vAnnJuCkGQ zOa}&i|6mkg;U}`t73YivzgM1zCBE*HPx(9yUYBaG@G1PGSJAz5T$hWTd*#hD_u*?8yK)>|gL}E6?xQfH@gta389J z&9>yb+JZ7N_|E+QF`g{}HM7{pvxW>ZjRGEkhf%*07lhnxw<;Ee2mbv(_kagLDUck>aY{P?7B-o zN${7R%j};~xWk&wRQa5a9y+|6n|Gn~xqtQsqHfEwPt|>pqMJAUYxbW^@P#unlXHa( z%^(SR8P_E;IA1>3Ln-DXA6{qVRme3&&m2&wW%wqJR%9()AttMQcTy2k zLONbtzctVc=)x`fEBn87P9-}OY72l~tIMTP)!#xeX%t&Iaf%0y>+r@%?~Ol#>U3P% z2b-Yudw;Mfpj~xWHOOzNhn;CDVL{}xRj_Z-j>KdO>F0m_?7lG$2c0wjT_`^AnO2i( zDT;w{X8ll=ky&3CY5$Hv1hC*gFG6xS4#Cr+ir?}z-u4Ugj?Dc;A~^P2rt?S5FP)jU z@lSH{7gmSo`Q(2b)1usHFWKI!9_}i)uJ##Qi=ABohu{v3zKt?=c|#hu{qQ&0{WVRMDP7RZt?IH32sLDb0Zx|Alw+g_Q zCkaONl5{EH*_@lXQ3XjqSk2wAoTmtk#C?)%|Cj!?;{Inf%MFreiwp5Fh$(W0=C1gk z1Xbn2=ze)4Jr|?1?w?F$r`bx|x_hs}(Z9ve@{M=t^NobTs%!LYy@1Ij{l;&B5)O|w zd^Y>_fM!sDDxjxFCoVP5zKr;|y2+?(zAE~qL;fZbSyHwr?gG3oZJde_$SboFIO=`u z%f6FyhLTJk%=Kq1r6$>yd5y+o+(F1>^P#Byy%Kx`u} z{p|VM9njqTwIVUCC(0}xQCGf^g+B8=Y}8{xjB2jV{^w} z*6Tk{{!M|Z^I{{2B|vHKS-SR^yqk&u$x}d3HrJq=Jl}AT{AH3QZ&n|M_|rWtvn5<# zKP#-+4fb?RaP$nq{p)eWP4a~sr&9jHb$_&iuiZvJd@&*Yujb+g@IRes27@`N%-? z96(GfL-sr!wSJaoy5t1X2uMybFvDHmb3C~h{Hj+%^?^o}x_=(%@mUurUEZtUe#xP~ zHTG7WO?5Jxh*Q?+08 zp7xU=l}pjOKIp1t9Ug1~0reg;iY4z(r-v%vPUh^cYU)BOW{zCL`mwoV7b%dk&#(1o z$d-fl_S^UIjUrWpcQN9^j(RtX>%-z;=kD9y1*nZJYmxodZ{67ri$gllkFu~&K>IyM zv}SGp6Ckiz^`7Tuesn4+!}4v-e;{$rGh))GJTTd;aanQLsY=+|-(;h;gy6W9!B)h&_>?zh6l$~8CEUg>u< zh#QSNt|f>*vs->|0D-D=YZ_0%KA=_VrPvLX*43%j>$q2leM~8K*okZs+jHwEue3M- zXu$r$=4fLQ%B3v#y;!X%;=8D8tqO7^WPSv@8d0a*@#=j%rkqsj3&`=7(`SHQKLThy z%eg5RUSJHn@;4NbuDx)HME>3GL%(PE@IY?EH+tre2hqnYW4($5wBiE0QJ<*4wPB1*Q7pvBNv*OOZOR?=JIH+YaVUF}K9tjk0H?02>cI^%AN zfl~*+ncm9$s5{)Do0X9?k8^_y{V8M}?{w5f=G&d0!NTP(g@rXlsh?E;r{GCT+Aa6{ z#?-IEX5u%=A6&!pJmlfbZsMd>SR@+~+SiD2w35`EHaPEQkFX~`Jk|AeMk-!UJY@wq<%+PC+*B4fUGH z?E#7Pk-wtvlU}CumzPCgnCgBnH_Pxh(@y5Y{%JXsYKO>@DPFho?<<2c`?(gzI2nOjN@jRBA#}M=bG|mwID$(Db8`k5 zwubPI=)5``GNu%2$+6w@mR)&Uf*sEhz0b8r`tu)_{9W_-3ga8cTo!ptlmBAM^Z>Y1 zjF9jBgQaT^y(HzD-QFRZkWz42;yE{3x0L)$(2atl)_n_*Fx5K#QT>898?Z<1u{Xv0 z+7(abBl()?6gyjXL*FzW!2eTv(7Q>NmgyHSXbXU{J_HDH{ag!3gfzCqw7e8$c_2aS zrAHvDGgOjdrZF8vmHx580|K%H{G>3C1)n|T6>-Rtzza(b%jK$2uVY-rkTmVMvb6xY ze=kIFoy8tELo$1gJp$Pd+x1cG00A2lR#Vqzz{=gA7ai^0$gW5eZ#^J_Z=H|MHF$8$w} zK$u(Q^AhpYAK^;`4F~sfgPo!xsw1%M;hO_(CMQ|=apUF@w?*juA;ekF+r@JB?!HT& zrPTh_j%3ZM?QjNXKuxR@6ke-`)ip}mw$sZ7#)-Vn@^&`;p4hePmgB$LM}2P*y9LnN z!wl;RhQVe+?6b@qa*-7fo9HrG897ZM@KyVwM;(;em%a#<#{5XVgFX?AT6gix*`Eb$BHJIzo$O2F`Hf(k?n;IWv*=- zL|{owf5FM2>!$*><37r%q{Qq#ZQM!Gk$h9~yv#Eu|C~RX%nlB0yZ?-AQ!>9j1~s%2 zAx$#563zNg>9&R=Jri_^pQh)bAGnGe(GjNW3!RBSj5gRL$3yYzr}Fwf6V$pmh16-B zh@HAL+j(HLYk6CbBSDZE)xYQC#8O;Pjca(*xsL)ePjxBJh*_CmffAhnH}55$c?rn+ za|2QH{6&TElk-Fk~-BsmtK8MmV=LVUSDcCI#|&yOFJEw8Sc3Wik^ z)-!DFgLEy+RKes0Tk>aiX5%XDK*o$@D3=D-lsFBMx2ebEe8_FT`w8~<7TN=SdT!WvT*?Q&R-??9}8~|aE!^$Cd z`^7u0xr+);R9d+Paoda$!i~EBp!^q=Df^>oFEy#Pz=F+#*H70W*{^orQfBdqEwYs{ z{rv->KVKN7vTss^{gnNmwj4QPhxHvab@*64$-G%VknM#ufY)kPhiYKVXV57n2?qCJ zA7H12goUKzHVH%*m0U&Cejl>rJI36LrPr`B@G&GeaFekJ(rB{-qDuc1HXAdQIw4kA z+*`)a@&01i$~sOu64;(Dkdb}E>3@x<_3ON%`JB8Lyj~BDwMIQW`P9vpHRm1ut^`G1 zf*9$^#QjSOYkMhXX(Nw5TuSY!ObNWTkd4)w4xB^yd$;% z5i$@S{bWOOP(^D?%HIdX#PbR^oa5ECWd-W5%UK82{g`n}X&08%EVez_X=o&dJ<=8C zqoVXaZB~bn02GHD#jbgvh!8hP9b$bSOjekMI9q0clwh9?}N zogXkgAN{S#i*)S(xpMWcw!=m`xiDl)4{3UsZJNZ9W;sR&BvZp1svyzaxD*SClr_YZ zZ}$v;%Y1fL35ufwkevTrrhkAGfSe_(lUyYKRxoxpcGZ-Jih|rLu0cy>;2&U|5ul`Q z83VZ?FOj1Z*i4go%Z3O;!xxm&nb63Nogq)n&Qkg z{#ED%nnxF$b#yIQGZx%uIpD;huScM2OS3>rKIjL3Xl<^jzb*y&Uf$Ym`DQpLWK?vZ z;mb5Dx)gtICvjI$@}Ubx@@S9b#yAi>1TRzqqZHv1zJ1UGG6()qy^OogI(4I;6Q?43Z3+CpkOhLmOW-0DY6VRb=RQBu5P7u6eFf) zbC;uwJ@4wf-kp4F9Ipf|#!TaJleVpwDZR`n&FLZdF_ov+K2w#hTcyRkPn9gZ(Jzwp z&N#O>lgeq&$bRQo{Rg4Vt4{ppDO`sg(b#<-w~ia(ONqmcJy8CA3`_;>R|p+n*_=4o z-rj=`&}83)BAoo^Zqgo3Jw?>N-HO#XohE-Is8Q?7g2|0n?3|W%|M?#pN^O$|2RRtS zhA*8X;sRSe4h6$L%v}oT9cv=b@*S>$X}ned*UG(=tKRLIT6U|VQxT+Y{yl)evFBbZ za_O8Z)pI44YfR*cUx?Lw&`fx3Q?8Yfk;h8Faeufwp;2NQ*B6Vk9K( zE~?VvMB9^nR+`3o7$cP->+eI(psCf9UOQq-a0Qt*!S(x@$td z&W~f=VQNifyta`jgKbD5T>Ur3(N7G|Y(T8JHzhGLf5h_#3|pMb!E>Z+_es}imNZIr z8&LA1qsuXz_9ZZ{CNMycz7OG;_g2UOOhdy4j?%^PeGZ`qni~!JH_Yvq;19HEk8*L1 zcA?_}e)3XSIaQ{}P3s-5_kw2ses>X|C#viLILKUcD)!ev}88{XGyMs?igY-;JC zguMzLCUtp?p&R1yJbv&7mw&``aLc^$!HYudSWpi2jQhjg9ZA=&)Ex1?5gX0=Lp=r= zRy#5q=od^FQx7#a1L3~hn#Or+LO1#gl!kcIcKcozJmeq=a@GSS7Kcr>zq=*4Z*edO ztdzYO$E4`LrFtt2W4^ZNGJ}97Qsj)F36du>C^DKz2b$2Xk*G{jzSO8N6j#tNY66RJ z8pqyKz9Oy7s9isZ$Jv2|e5t2fyU9O=H4I|<+zy6^A{bB0Ph=@a<=zRL@dX z(gbo{QRf<`<(zw{RA&Lr72Y#a0a)jCv*8&X z>aY zY!};r{wrj-R-;uTxfUK@9JCSewz`#Pc}g}HO+Jj~lVgs`{E3Mjf0>U^4qmBg*{fPA zvC{g{ZP*2EOry5#o-B`>ThjK2chp=h84%`kyA?7+1+UJkT@X6bB(|X9AsHT?KWRp! zo{J1$?cGrDaA!obat2-+dgxY^7>bbWw5Mj!l~ShC-A`hQNtB_-SCkK&Iy-p>{Z^3U zF@n&g4iNk2BWt_hi^kKCEiSTy59_q8`$zm6@%hsHk&26d`x<5%o0YZLS4tA#M+h_- ztw4z#@oQ|sf}G4o2afD*_w$-~VT4U>CSNVHY1&L(nQm~~_mZA0E<^5l+c$s#j>p2b z*ddd9K zdfChnm|`!OrN4e@3Vy~iWx40Z#FmGvM`Ck_e3hYz!5%t74&-jFhb5BWiyR0*d+a!D zXc?E7M1#XuP_3D*?0n$c;%I%BrQ{r1x1@~~Xe%1ci*)Nw$dZ2!Zw`9$hf-;z{hN%M z3x0waGW~2r&L2WN$hh=+h=yB!)SI8%%`X}bIBTdNLU>6mumU(=Cw+;GhSh>J2=5SlZ31;jE~4PBYGM?rpku^wvAARw4$f zNj8eU)edS>=b$E?G!D8N6FvyXDWh=BT5slvA1to5 zh}Sp`XKl)btm$1PSm54a(T}mJKe~3)fjXrI>rzb~A zUqJnPQ-lu$NbksQ%}ignbC;CnoZ<`%ps^^%O9P1IsA2w%$#20G{|jmNhi9eQX-2i0 zUY9&0TV&=H{w64@qJ_ToZ-PGp>wI$l*v8K;sD`1$mRDf2yQ9{Z0NbS&oc9$J?9< zFI9T$w`GQS(-(tGRhC_re`tC=`~O3xbZ>T;P#&rzSHCionwOUXb;z2NxF73P<$ zWSKRxv-J)=BhJGw+5Rnhe*!O_pgPu)$kl(S7~>X4jnz$(v-KBNWqK&xH7E7vM}8hj z2%NZScTT9bN_$V^!ViQ2cX><~swo@;dfk(aI%>ev_CK#_R+rsTnG{5gb1k68Ye_vn zN@%U!>g0Ri5Zsi1&SIw-lA<9DA3V>Fd8akmiiX71jt%d--i#4~|&c zv=WuO`u<3Sh3K~dAp_O!D(nsh6VE>+htY?`&uTY%DmOJc1kWpfC|p)|6;lj3Of2KV zQNAgElHI;9Ww}iSYJR2^HS%%#?kVOG4O0+o!r{+la%b~-LMEg-n_Bq~F-lP!r$!U? zF!C{jO}4^UfC|%Iq?Of;&Pdmqp^dc!qr``D$Fwh~NM#YaN%1$Is_Ips*a?2{X(Tn( zl}LtVQ})*^qXIc)y2xqw8;IV&3q8T_WId)&9XbcyZ+QdEl#z9``$C zbaW90lDe4%m6)WSI+MX#6o_t&@t>H=T<#9JLmXJ^8c*oBPgE?o3F)6Ah7L#-2WnQ>CiTQ^u5f+myUAJiVjAF8+jgFM5F?fmn zM1w-zJ542Aj;E749MI3Z*R)ruo7C(PHy8!7px(&C~Ov+EisQ$|M1KB}LAl<9cDHZDG9{?~O zR>Ej;F_Wn{OpNtd9yHV`u&K(*aEYKfnv_#z{8RG+bt6HwK96@QY}iSr#H_qQ9Cr+0 zLosKwKTdI?{}Z6R_2ER5z9_{j-5%444Jn49qzoxy+vXbTH-%)j1ltfgaipkA#w%l* znXtm#*MoxE?)~Z;wW{w==x$x!O2lNa$8;iOjv+=!=QYX#KE7>2h7>u{`_4SR2(lG} zwyZqVH^3ii}9rhP~+5 zU!xbVs7^vY?V?EmYX%bLP82~LZ8k2P_GkJ`Revfb3bccpKB#BKZ9`48vNF#Z>!n5w z?{1mANI>vmzc*bnFQ~F`sLKfzzF$1a|A*Vh8uIJ&e>)M zz@=entHZ+=E@=&rG<~co8j|4y5~uSn^fcX7b!AL@>dEw-!3h}}xz+#H15k6goFxa! zsKI{%pYH|ci;hVWE@2c!p&(vO&m{W^2?|+Mc9BBNAMtkk04K)hAICyGJ8S>8IQa@- zur3`;p-Rdj-Fwh>dYH`(1anXR#v=BdIkS%zLv8fp)Dzrxh)4o%at?BZK_{;x> z!(HXH8b%gvD8TxaSB7LQOkwn531E3VOX<{T*=A1SmO~<9ZrwduuW7TwmNca z?YL5z*C1Uo?8zN}$X@P6qnLwqMCi&HzMi~Y8UpY3n_#{@sGlzq)U=FVHkN>5NzcQ#A z+nuFLd%B>;*u4r77w?B(q9~SJ4_`Hs)!Yu1nXT0%Org6grLo>DYO0NxNRVH9q z)5r)RBqY1x(3D$@| z`Rm#L2GnT|VlMz>Gy2NjMo0neVA5LTm`FR=jpm%3cDnj|!d3s8wfNhacTH%U5|<0_ zYydhL!6%NDhrf|55BgW6kWrqb`Ll!*(H-XX%c?<0^IngaMYJ z{Dt3>Kgy~}8XDM2Q?AKIXW0NlzPZS2r1p0z6Dy2HF-xEle*;3lcA}`TW<&#{PDBE7 z{P1O0N_ueqRf~o@cosPqFFW7vnWXgFW-(TFw{2TW+I^DS1DF2V@R_YLY-aQ~=K~q~ zG|^v-V&EtPC^u=eFd1*oJxHlkx?n|@Xm05FKTY2r&h-EO-$5cOM$V_r%pr$1GvpMu z*%+ddLx{~Oqe!AqIghcEFo(PyLegQINQx4(AsQ+}CsZm5A?JSke6Q>Gx~^UOZ->|G z`MjU^{dnB><1t%#^NPEpVF?9mbl}bbY!T?j-sCj&^IhQ#(Y~K3-G8ZB>29+m%Twm; z#wzV{*hAez`$!+{fCjX(A|)w@cE*V0l*zvxt=_@BxIyDx8+z&1dnUk<>E+ztmu~JU zy8rU6@AXUB);oNTkm;^iyXO;>4BI`cstXGbxtFiZ+T1ez!!9}c;cv)p#)x&ig$?OC zXw%=EZ-37Wt~OVO4q?$Ev0hK2^mobFi$N=eZF(yQTlHdh!*&fNgo5VX zh6JEUV{-JM=OV9m_HN-F`kv{7_)13H_0%MZp9d`uSV`=8>QM^#YaVj{m}}`y*5@`@ zsCw??bwMw)x=mYWoJ}-%OEoWi7IcAKC6!!3R zqUp7upvB|EpHtA@9~U;GY>)b7@m6doAwyT~X5UNXt1fFuan-HlrH3`dbdLknkgwvf+et+82h(qG)#(3EfZm|zMX)qANXQ>HGFmVelyS~G|nV~me_ z4xDSdve0H-5v!Kgz~dUGu`M%Rm+$8s%f7{@%0?TXA-Hvd_mX!47~ert>Yr%v^Fzkd ztC0KN#;?9ARO=j6{8HU2#yco(V_%35`M2Yzj>Ghd5niWeXNY3du&_8y2}n{ z`4?m#Yd90_l->HvM1ARX&&S-k<8Q%48|5GF*9#TGWaEK%X&?L+@}MMv&geeFYui9R z<_&3fWT*N}A7<~go&+kfaQZ$t=rudmN9}2TVSu@Ajl@c{nfUAS&#oLFXsB|(@-|(( z8~*SMXMS~pakbELx2~9K4Samb=ZH|^R}V8}=l8Qz@Bz}*qGQi%ir`9H;COB6JYHiD zWIBEPP6Q#KhWVxXRRDV(R z=m-UC?F*8L86S7OvQ%<4aBM&;a9L&Ef{H^~e^;Dp)I9qocd;<-CWcNr0U_bLK+yz= z*H&-^8+h?AzH3?;`Y7W>OV+N0epCp0MP2yr`6l7ojlOtCTbFUE*nzz1j=j7Zv{fN- zj%s)Vcr5$y8Z;=akt}&e`IkT23LJ0qg>)r8K~d=s?Hk>GPm#o+5bMasrPWf}zX}b&lP~i}E1O?Dd^~&fB5zcU7RHudD8dP)>9oG<6D^|ZO)_JP9N+UDn(rP5kq3{T!ztaXUFpOQTmQ_Y7ZUleSLC!miH+_<6LPXq?FHd8vJ9BUf> z8$s3y&IJ&AW( z*AWDxej{m<%iGwWkSCZce4g%m-ZhiPhY1-J;un$skdqzgq6^m(9Ci%9FlT8t!uUx5Ta9ed(R{ zH*p(yct+;bBX?@?F~%p}{E!UqbE~q2FD1%UD{j*ml_&;D*;K5Uy|yYFmnfDMMkUJK zk#mWS+t6cUNu|Zjf3{7v?4ow0_{u2;bxQ>aIfh4ts3Cjd2k^O+olyC%vc6F0D!?k- z7kkt>$a}v|f-g(G%tzv2-Zx#Tzotv#gS>L>E6TJV0Q`~6?kxO^%iL?t&lpfCD@IY9 zjS+kA_Grh&{3E;XSRfzHU~rqnFMsXM;&&^3t0`j3cm*baoPJ zd+pbZ#U1xEPG~dfXCB7Bsd$8c7-SuCZ%pBs4C7#*YKxrl9sZRt3-=wUTjQBBH!3vG zV(LKE5(Z!;D%FWS^{|qj?iIaqWcE`^bDW3;6@{-a9jeW-6C9;+JL>11Cg8=9Mbjl# z=LNskgTODdg`A(l#dDQs_w=R1rhZ9o>px{+YnZZsn;K++b^k4}_ul&&|98>sXj<4e1gvPRJ- zLxZB^pJ+3B72mNm+A9%JlnoIHuqVZI>i5>3mZ_Hc)PbIKg~iyr3zKe)fwtY|OY8%m zC1$oaT&jyhHbTebs511Y>}~IDkAX`f{~iU@U9@WV^OJ68vs8Pd)*;;}mr?8JfME~fuB1Q*v?o)em{d?%x-DWgw zIkd4`Jcx_LkvjLxr?VH^+|WbzM=cF_%j6drx%9MO!qUD4jGuB%15Tr_Im>(N3yY}~ z(h|Feo`1y|NM;}|?YnwTY2BSZ(;dCkc_{${r_EBV&YGJ`9u%sN*H-?u-Ox_6JahiV zg)_UgVA4d%i$CW1Qem;CVQOi~)GM;p(~}0+T7o_V#%?wK+9{{c89f=g0E)|xmE0LD zt9%RblDHMNSJk+2z)oak<>L0ae3ihdaT%UGvSaf)1Yv?0o@0X*VTs*&@D=&foxw@t zwtBDs=dpqkiR4LtpD}2MV|I$58%>9 z8L}b(J8s|ntr_$1E8;&y3b)P`+Jbz`Kw`NI*GPY~ht;q>^JWztAM$>Jsn(c(B5Sx|I<>duW=JxzO0a;~t~1Xwq|tg`TDeJ@JZ#-; zJ7Q-4m``#~e*rJaRQ~f;N%4o) z1vrn1>?fw#MEy3Epq5wZCIpH8=S4H%GmIyg{(n0*EH0$sU<02+jpbxVHZ#;U3A7tP zR@)#?J62IkhS50@gc82px%*o5Cm{WD*gXotf*;BQ*tR^-MFh;=IbyHDwI5jb?VT;- zM^lVa4xX8|W|}>&lE()>I-95Ab;`*}(zbGneoSd~KU3UlBj#koPm$o;AUW6W`NeO) zZ4W5Vv!0+rk!xXw>4~cG8vs)~szjmmuHtK*(IUJD4USP3jF!Cx%nWwA*Cc(+#sXdX ziN6{%cU+By$cU(d7eb(d1X`4=`IGR&%1paz}wbnQAP$ZmwESX;z*0DVrWE}_mb!7EO zdr;(RV)Qn@t}sTYQ+~?U(xy+mH`Fcj-E`q4t+Uu$?4Re)PBxokp65@W9q|5zG6nKK zM0Z=|e<9b|p%@G};Fzo+Rou?wXOPM<4=wxvZxY#R3(hZ9lKx`eq5W^i^Az(x$z5rlPtc5+z_6BweQzWTBuWCI{yTv>9;%!2r=|wz-7H_(fazfA?+8rj3sa@3Q zq~}MqTg93@%gAc})p?#=nwvN+y?1z_^0E6wYS{NkI>*!!WB{;;Fgcm>-(~Fkx9S(0 zW9IIEzjX1J!UfLR;q(LS!=kg{P5Ikn(9BjwOaM~%GUlQZpBWUCs^x3^s-=XxY8r5Q zM|0_D@uH}E1gSgSbRlDBulM!@ICa(`Q%=J7KJ3ujU9dKw8S%Z}<;}mF6;^*9tc|>X zrd!x&JL_aW!^?-eZ4~`Pn;p95WUX z&r~OaMG9X(Gu}MfB^VXg>fDs3np=;ae|XT4`f$-ElQt~*_pFsxDs(v*JG;*MUA;gu z!aA{lF?%%pEHEX>EgeB}qd$Gj2+LX%T3Z2j_n;+Q&YJK1^sH z_DCzmcWF18RPe-c!`}1pR}MI))CrCFp-er|$kB8K_FTTR4cHnOr_(BN%Xvm7Gk5fr zyur=~pN*;%Pdz&D^=_JXkH=hoVdhev*YU96vo#dLUg;!wu!$H;K4#e=|odMCi zBNLO~udc5yi)w)FOHFHX=Y3W;y`@733ZLgkC=J)j`~{3|9A+IPu53{?xqBql>APUF zGV4g^2?@moGafMOEezNgj)qGqNLe;Is-g!x2V8|80UC-TUj}t1l+5PkhNg611~)?}Rvmj_j0u zRd|E@?&Pz8Z;OG+D}q!&Ir(vRN=N*r>Q=sH^-+8B_I&;2+zUxfR_rT^ zZ=_ZTnxler>BE}B{+B`G9ox>2&Dqsa(nkt+lDCb;6ctC#e*3q>M*_6o6l{lV8y#kivXMza=m0LO0-8Xo#=s}54O*6aqi zMS`9z<%Pld#`l(l_y`G658mWxgu0vC&zjg*?DTP`#%K*wr@*2oeZ?ueIwMxS??fyaZ7{d!(MFuhANixAT!DdDOx$`sD~ z$5H7e&;3Ua?=1elcYz~(Fp0H1=WIJ*+gVO{H99fw^aIWkvO2*}8mQaf3a1YP8ENW2 zui8@{kNyEq5(c zUYod>L$htGY;M+MoMavQ&h2|VG44hC#3@+#zN6jF^PrG+;!o^*Zr3f&?d4~yM(q!( zH{&%Ies5?Ny^k8#tgzRPDcGXrGgUekuRKd@Q+~Mn2=k~0;eL(AQN*G4iqWz_`fj8k z^&(}$@JqXb@$O}1T}J)O6RWq9q!#0So;`Lq?t;w> z3JSE1_jfd=-EuwqC~ow_4%eMqy%%*9u%cuWSloV_QvS%@$U$7$>LoFh^LM%kid`u+ zT41zqYI+wYk{R5HonlZm)r7u}#6X6-)(88qP&XDs*&kt4mvYDDUSDX?S5}pBP^Sv< zY^*}f*$dYlB`-;Q`|;P1St!?meXjCv$Ex&ob5EHAv{l$1!>`t=MekO>t(s5$uoYVy zBxh}QsLtC9mmt451I}y4&!~Kp7#)|ASu@$o;NUpGt~WM+XXEwKk=VYcSq`ol*p{Ds>K8@?y{l}q2+ zggpDgjyRCPGZNJT30ZP~?ppk*^rjM@1e zsUIW)qdrcQbzVt}1Wp+lZk%QB*2-w{;;M}9WIeQz_|0der%LyBuXSEn_)^}KG8^#Y z%zr9Tj~r)3jM?3kXPjH;s|*gvxg0;`5$SGxhKxzIxd@YC@p0;XuYhLN_L2K9k#oij- zcDjTuY~NcgliBbIqUm3&!<(@YlxAbsH*BXx&pTI@T$>!k=DD4Gg&}^`_Jo93{7QoJ zuTjN=(RA2FwF0xSQ`)NbMW@e}j_!(e&=*Pb=I*@u;C=sFC+L!B!(ZzSb1%M0%6=I% z-!j+19-S{Rb#FV2IQiH|V^cNb(W(Mp}i{n25 zKH56qY&oB8Ghq580h$S%q1_`ju9!IvaPcdrKF*WQe~y=$c8Wh^`s1(Mza6D!Jn@i` zfU`%=`}xyxGuJ5~lepM7i?0z(nlTevDOC$`3%ya{S%t7x+3^rx`w!jDgDsDHZt?Ks|1Ng%iX z+o9@6a&)+X{7;W|GH~On8U;4%;J3zvy_8GX&|Ys2ft0m6eHZ@u5?k;kydxgk+w3$uJ1=Pe(M7mgi;n8CU=QI(7j^?#$4%!^((tzNmkI)P}Y=LW=aXoels5({k#0#jxTvfTi9DhJu zzhUrlrOB%hN@Qik52xNQ6>_UmLKc~jX^wkYe%p~wr1%mU%I-0Iuzx;o>uq~CGbaU; zUEHx0PzXT~;z*jBO?Id?BCGdsv&;00OMH`}!=IJ5{h~|cnwX9OWDMOlP;O?`ctMJI z1wV|;gzKWM79CDDap4|bPO2Mv3?-h!>uNoY+#*WxcCC#cSh&(}KTx<_DQ>{K!|(QH`|_lUKrwPYpX}F%FytzokvB!bM;bdfThXz-1p>gDmR^cBg4Ue@s41# z`X2heqG~S>rSDb9rhdyaNAYW(=NcDV2aTBUm7HDD5i?Bk2ZLNd;?GgK82di!*_ zJ%iLOLk{l`X3PE#-qkFkLL@eOCUCpriD5yaaO3H%SbT}v$Ccz*uKB+m!KJwTNFey% z4moh92d>ioUecv)Dc3>>Fx$6iHy`Iy%tjOm+w9fV?4cX3fBtS>pq6wvk{n6IyFyg^t?@0eeuh+G0_R_df0Oa05* ze2s`?%{Gyk4P9@++DAbkxqK~yyr70P3V29}Oyg6$c>O1yL41s{oyfCY^?Vvi`43|B z7tVXwF;7zI-QF8G&+v%cdndGIfd+5QV~k|WPLJsbzs@F}OzFA#ik9Z8G&uzGC%v3?Gk+m5 z)a;6Qqjbu~{v3jdU+#F$lsr+jB?SH;`&i?V-{MeC-QGOb65%to8Ed6U2SQMEx^w?p zQt>ztGze%wi+aJ)nA-Y+B=2%k(_k*sO@0IUUl#d7aQ1)vM~8@mv}V2^o-j=0mV|fS zZ^Xn9%REE%-atcIkc9kVro!pFZyxxWU<)DmB8^3GPYp0W=VX?zC!~=aQQ)j!Y3$;G zCN`T7R6n03={=coJ1$3jCDt|Jzkfa{!VGSFDqs7?2EM*}8h5jqpf8j@J*L>Rp{=Z~ z)>uR6oL)s%Rqoq3f?6C`uhM@abE~NOD*J7zs+@vA0X4QlGx92Ir=jFMJ|*N`I0_@M z-IfP#8)zTKeni_TcbkG`r5n^d^t*b8lN5qFz(JQKe$j5lg%$0k8T(9TgY;sMQ8tD- z`a;V!E9g<+HZDCPf(3;}v0||vA%)&u5Z@s=pg~H1QR7v;cnS~->(dDgJwpO{s^uCs zITHxKhL3!$ie-ndf6vIjVfLk^)TOT{Z2 zTWgqJ!*uJ)HV74IFGRZabMDK$VPZ82VtuM|=h<=2`nM!6;6G*Q%Ql(Ux1*X(rR;*= zFnT7(@MomAGc%i>$UJ#dTybcIA7}nEt1SDTr}c4x8~+j7XaA2(nv>1}z4;rxmRgO+ zp;Ob{2V`f~oCVZBR={8NzmA}bg+?QzMtij4kwa7_8>3a}J{NoSCIs~0_ z{BE~K{yzMfc`)3g7a-F_j=o(BrE84K-Y_w89yHK4nhr%nLDZjtgy3;&Nql&CgootG zZ1kII1=T9(z0JC9u|P1N?zXIaz_ps6GuyuRMLweVzk+kwDA_ani|#~7;nHlaKQ1Io ztQi?~Ak&SUSh1N|cMirQpSKO)u-GS11`veMyD1HzIQ=%(M3Hu09vp35alqC?`_Q4= z59mC=895}ir(syWH1z0#iWWW6pLOX@JW4XU)DbINk}Fc9OuAV8)#dj#?-}%9L@tK= zOIxE@a5Xhl$3w4~j~3`1r-6`7gFrk!0OVT}3PzP{C11H;h4 znQ%wVakU~$v{1m;SQ4DX`YgmII9K?Eb4(UPVUCbi6_p{qg=RDeDG8GFYVX33mS^9V zjLQxCI>=H4bd91=Gu{)EQ+I$>$ikxCS56`_fR>UChT&A^0{cwFGhPMZ5*Q1u>?&{u zfU@=Sr1Ao8k7m7Y%zJlNqdj+cNMmxDvL=nEeyxwPsg_O{3i-i>W98K1e+{7I_1?ix zQ>%R!7-|Or`(ISaxf)@3tuiElBa&tW)>}%rYn`BXx}M=nK)t>7@%F{0rVov2a==Y0 zAApbUGxRT>igl7Sf+W6X>o&t;z&4(G4Gz=UUCcOt(tcC<$4=qZdQ1gqWtE4HEP9kB zkf)=L(^ah9a>HZgo*@JBogxMIFCpW&P35xjj%yM0F~D@U%H{9TyjtyR%*5=Kw-`a_PU9xV&?u~&<7?ayuk2y)lFb!gTh_G{#4+$}ri54c$wZ-3wH<^|VSRBnx&;^^Kq^@GpW#Px_9#CjB$}SpwM$r>=ih)$^%IQ z{xBz;Dgfj2^40dkEnTXZNHA&){1^t5dN1WM48DuO%LXS&e%B0`A%=VW+hJs{t@4?! z`yC)#U)@z1IRK(xd4@p>z?gU)La=C@)Gzm3eIA^UXDex+qn)d`9|m~qy%^UeN|SfH zb(IXOM>fG>O{3M%yCB_mB9KAS49Wk+%L<(<#G(80H3o(NqwFIu<{-}ArG$G@a-JvBqkn7c@poknm^ul zh|^0`c&E46gb5D*LOq$i?y=!CcWdtr5`0~ceYX*|UYV6-{(`(sx;n>hDG_w5|nf=TDVJE-oET;kZ~eax6ro_;gnbru&+!trST=v zg3d&E>>+^Ct1N2JLcKAdLenG`oXNArw86LnC=?3!W?pd^2Cu0Xz?-RAcT9PnUOKJN z3t2$gFMj(T!LBtq`SzS4^dUUyps!xltxnY86>+tCpVHk#`vPNGG#F!LM6&e_#TIv^MSuB`gFj~h0 zoZS%cs6nqP6Na|0tOCpXU?sEjh)sUhptY^t%HLMfcQ5S=$|3m?&8Yf#}l> z&wJT6EmiCWS&;S{$WYjhZcFJIye$x~mgjV{p~(nraxzDWi{UXH9WgnyUM&<`Uw{H0 zOyk8O=T>YrA^JHbrjv7dC*DdU#l;^VM7kE*7)!z=&i!3+3kE~F`SNfZ5BcDX zCcWDYhlS68EVpIged$!47jaG|HWY1+)Oog~dK7CBx?1H5XyGeA(pKa5@Rb)%ggJze zo|_?i0>aRn@R1WG=!8Z0kcT_i)IQ+?Qpre1=CDvzG)q=@16=bXhM*%q#_eOc)6eB}jl2i!M z--i;_i0&@fGM|XU>SkAJ!4&Y9=dUvxQ8J_)ySz$(sdHl%#4yB*2@0oD>D5)XNGTV# zj~3+1kV0+a;#bO^=R<5P%B`7%nP3OgArkk*31K*?opA%%XMrAdo$%&0CZo zt??GRF2UdUL}pSZqEO80xUO`RPyT)u_?g$cDy2c{z2YkTwaHBAce*2XA1a@K*Ef~d za~>cOKwltwHJRLLzfZh1(xTk-4Dkhn8{U1>7Nyfzp^Uvit~gXDFm#1>=S9+xwI?j) z3ElbbGl8nwf!&lTLg0diz6jo>*6`NasS9qcpN@Yedry)VNbkfTZrDl*(J*w;7vIoR zeV|rgt$8gb#A6y#s%>yG%kroLA8uGNZUmDEIZ$F9-gmQ$4<9l$8+tPgO(NOfUJ725 z+@hAqA*&UIj49nASdy0=MQ16cuRSB%yZZ*)PPU`UPMXriZBwvts;|zdlSr;{Q4qmf zdh=cF6uqu89{i(vF+MaYvWr&|%&?xeZxQa{=h*(9=YZIU>FR(ul1r4pcv%N8ifZNi zS^VP3kA^wJ5ppGHCy09!VWHCL6^on6B%CS2t^V5q!ajhY&oQhACs4H(_FD(Ve70|a zUTDr7&sjGD3BG1g6P=jX@N2S2&d1S3mRo-j5;8(tsEdHb32q$CdM zq}(Q_D#tmg8si>;_8eU)<8|gWzRda%#FnpY1xqfr?XU3luap6OoU0lwg)SLs&lDlY zb@ZY$NnE$B(g-b+wr%XK5ussAR}t| z*Z>^<@Otucu#jBUvF-vwz;Xa9Mio(CR&z|5Yu6|WJ;1GoyVVb6DFMPb%=)BCj{f-vqq z?z_3G23vjce=#nXw+sKW`-JBYTTbJ85U)j?5>P(@Z6y6=I-sC~u!ZoL=YJ|wZHl?d zHR6afePf~aCK9O@^qhIY-PaBgz58n1i~%v{O2(ge?2-4>^Aowe4L@R|t}5a?OXe?l z#Mxaz8(KdtKEj1N=piT1LG^V7MVbn59pX!sY89;~S~)ZPqrttU*pe++$;F&G$^BB7 zSE;YJnmj8hTYJ>@X|iompXCBCtfX|2y&p8&dt~>F(%>N_U%CbNmlG5*{j8O1<*?b|P@s<)H7xyOOgZ?}2fYrU3d+mGI^l!E(Y)gFZ zGm)WDh98~ywCDZ)xSsFcKsT>$io&50PX)h1;O%I*vzWV5)U>dm#bQ;Y8S8L=U8lo0xjh*bN_NRw% zGY_teIsUD0UUw~CN~}92?liP$HjYfzX`iLk*l)6~xQVq9|Lu76{brmaDpdnoWih2G z>jdz`!u!XbUO0JM$gn*#o@W9j7HF<+AKy&{N2JYQwqLsu6B6Nmt{}4)?(; z;QcBV4kbCP;7X?`WIJE^D&OwUFPA98a-H?i)-%EXcCf0_(_4@}3MeW~tzfX>+$BxP zybnAQ@8Smy9H!^f^(^@T@Z_Cal`8$c!NS=O@7Vai5lg?p9n}YP#OQt~Z^nFczL^2x zzjvudU~>M+`oaW@3(=);Pre!_eXg6cU8ui^wwBFp%5JbefMkOsO=G&vuk9H>KkoPa zZu7ay1cr(}E$#e^rQ7Qkob(P{zW$RMUO1;iM!S0C+6&t7*MPGF07?OuPpEN=?|y<0 z$wo@MRI7j>5Ln7V(Rz5**m=mU#zi{m%jG`kNe0%8l4 zDTZJ=1It{RFJt5xgc>GR+ioh&#q}aCRT?%ImBv=y$BJkK%?F z#Dj;w^ql$c^+W52Q%D;X*TZN#r!LZZypj&~pbIHc!B7N>F3qO|*XOHw{7zMH8#_K9 zTw3G?G21(wqf_&Oc3CWJsW|>qB)l~SQ9t=RWX=IN-DO`*dnL=W&RubKX8zd!^!NGQ zRHp1~dU~UWZ3+HH*UP$#*95sLsaWZ*;wenG@zHEnr@*3V9Tt`kE_-T@RP*y^1_hHD zEdUup@SXi0y6D6um&5M7MMi(MDzQ)3ve}09xzBD&ykCP+5`oD0ZmB&_S(ilVVvB(} zw!wMU7aliY-hEEX7@85FO>`7Bnc5JPKisu9Q1><8PLjGb`!`AAO486AZAoYcQ>+zX zdekLd!KdtAN`1*CbhY%wM0DM7h*XT1+X3Se{S6svw#b-4O52u0fQ&Lxrhzr&cknx9 zqR17x;s<&+JDtlpU>w`>DIMUPTg~7|Q_>7JXt|&QP;V~mJL?NY8FYM-5xDEzaR*8= z2^_1&AFMviF}#XLezsiA)vg?I*?P2%S;ti@mY_{shaFqp`aOs7SOcV4N%*yAGH=dS zkbq9Af#S)U(Z|tBA>83fz9S^W*@UfP8NBg?H5I#!TbEr0)>6JpN#hr3dk<-X_@o>U zRCjeX=sQb@%|AR{s5&2MW8*wV3B`H#KuR=bTnPH$iv;yz+McGm(?SNQ;9UR1oYHxl z`g?Z?&6sL&T|@^G%GEb8OfCqD6XfaJD?^JJ9x=dM6$%^L7ENXfo){L3jpJyev7Li= z#hl>sQ3A$D$R&X9=1qZ-8`dh8aB*;D@UF=?umYTfEjAl#v8aEw2KW4?v}jvKxKCx( zv!-&<-Z9jtG~B!n;ihbc%$fQ6==m(~i0t);@%CT(j&@4aaI0F~#;P9cV-vf%w z(V43D(A8jOwB!j3TUpsvIeL-8EGfRuI&|`Q4tD=+GsRWq45@)g$GLD$ zPAL8T^`gOIvg416QwKr1H^E^e%4V=&-Zu8@`ROl@_JN!*uMhX;?dlgf1H6#BMA(!e zjy%V3hjNi`1Y$x9&!(90&nshAmfrL*i6-l7dljCjMtE4q=cSO`jIr^g7u`)@ow*#2 zJO&7?Mi`%XqG5Z4UU|2mgCD@<_Jyz!0*YKIRwYSF?f53Z&%45rmJ4RMDsc>HfvhK2 zBcawOzG+JL+=)n3sPXA3+RzA1K7pw0U>JLi*7&X5@0KHdAFX>8lbox7aRUUX_{#=% z^-{lZ$@yIw0B8rqw#EWdUx>&bRQw?YE@OJ+ zRDeS=8@h6aS3;79eojB5U4?s~)!baQ-7}9+^0{0@SD_&M2MSDR=Xko93n!h`6V!Z=c!3fJeB}waK8ZS}>xr%d6!@B11X$-qr-Si}!Tu8NVPmid ztn$U8wGUh5?z{o4N7}yBfPoBEHS8JXe7DyNmaE#5b?9<4YytW1Y-mGupEp<#o;cR^2OlbK(*dfkYUlv9d${e2@d_j8V>xXFiPvR1X}VHI9Z zUjxu6PjGpVFuKdHc>(2Z+sz5EW4XUH$VMXlL5a`F>%xNrA zHc+zyoXn{ccs0JUAC#VK_qg99?Mt$^itMv78Cg7Al3p>k&T*>0g-L!1-X2!P7RDOY z;tH;^=w6M>hDj_$(G8L;pjz{VJlJT<_fnq8xYC%4%D9*@8lHV`jz;wBZtD<(h%ih@ z=53AH9t7c4cayO3?c)YdU30{^u*7?d&-6zzblQXovhrusE|RK5_)%BK^LdBI@1jbG zM^fu+&~Cgg46hwCj4i{*6XG_Yd@IL5h-IQtQ*Ycv}t$u0|xkuCE~-s9gy;q9?Y$pVVx%c1;Jv( z>GyI@AHYR3>8ih{yYnwav2BuMp4ITM_0IWGU_wcF#+oCKPKv<7(U_6oMAm=};*GX$ z^*I(Hw&47)Rhy#+_Q|+?zua_FCh!X6o{WDv#AQGz)88QYq4fA0!9&UcIz$L~i*O6& z=vuRdqI`{vR0nZaFkjJL-o@+#NN~o@?&8H*r+e_JJse@8XjW7{bRKi7FAk(%qI8CK zyN9c}ek4922RsC%+ks2(H)oidED+iHF`A2j4HOXOabgu3+@mm-XZ|f>3~e^rH5M{@ zrxGH#3BVPFH8}pxt^&#u$+VEXlCm-&#ITe&uN+J48f7GQMb&Hv?`IY8ATFJDxX+oipRqGTK7@D0XH(u8>1Y-$AEy%R#m z+aBoR#Z9h+pT--(jA9HK8@Y-%n1^8KcJqT<1!#R*cW~#0$WN7dC9_3G16Az^fuRT# z?dqjRbG&Ittm4Xm2Qt*K5PQ+p)FX||L?%^fb}`L_xTx5(P~`y6h)n|m|#A>)MgOVf^=4a$Ss$g{vL8}F_!vwl4di>gJRsr(z_oCloiYYh9#aiO60qzx$7!3d_ZP; zdO8_8CzDfSuXLLLgJROlp|;t5pJ{Q3*x;zj+V}gw`LjKn9;FR(cVHXF*{`%qHRCK; zYXpC!Bl(G{SO0QYy=PYkum4<^?&}&d9XCRl5_j_&UvYM%h0sLB8}a7G2}eA^m)-69 zhUR)y{%$p#7~Zu~0?yHMfG!F4M#zR*lb|jyl;iLfDQ-w5CZCo(cS#Vrgw?K++>~?6 z-N?eriVJx;Pas9WBw2DR#ADeJp6)M1 zSbECWd*ZOheGW7WQ4LE7oXBn`fjsmoC1ptY*LDTBXZJn7y;9cZP+H^Q>=2hodlQGA zxg3$ly5E(d^NFFdL5k~rqAyQn&XYTKwNvhVR%;)=%Q1DTcm8&09eJ8?R13;Ej8!4u zPYn3`>`+sZECG}4pftLbClGf)4a+%1aLKFhDpQw z_}2`a0+~ACOP?6V+SbLP635!V;!b0tDK`-H(KF{64wne({iP9bofT8l-_<@zYchl+ zSyk207IW$7ryWtU(L6gL*YHy_X{UsPs$OMfUT(9VO=H?Du1vm)aKm0^)$S2xLiuU(mxkVSg?GVBEZ{TP->wzu>f@r0y z)-$h7gq|sf;v<%jBnW|y<{Z8i`28wzGJVEJsn$O`S578l_U7xZMhY(1RqlHkwWOL2 zST*e&m#f7Wf#DYUy6V)Yg(zyMr6iRqBn`XLiX6(v)||jT5bnmkN&^&q4IkA-5lMkR zEgqx8juN!JJeUU1k6>>9(0Ao>&?N&_mkSGEW@*uh9A~hs{=%Bw!$~VZ;@degjzDle z%@(o7yfs~o;~c{T-tgcPnK$wl`5@~kfMI%uwrUd)D^O05Thlh~OX{(kVe{bnao9@! zqmzN7neRO^%>dV4X&gXqI5>lE|Me&ZaZCtL$(1Zl;L%)1MfS7$05OoQgarO)sCS8f zUafc4kG?~u>fsqBhTbN+cwhN-M5kh~8j6f+ft%Cg~4@U-|?ZJ=}4 zds(lZPmD9P%s?Vj6A`;;9v~IqWTi&VYt=wE<6*DBk-7sBDO3?=1ds)QT`Z*;SIsy? z9GNfT%vKyuIyf{^9~6Y9;av|ljY%}t+gt&zlI=ER+j`wGmUc2eDOoT`srrM1we?qHp7s3Edz8%f&{QNCll9^G1WStSo~EZ zhRi)0Qa{e1Abc?AK@}h-{%ju~81D(r%n1?kGfBUBy~C{7r6+EW&7vJAHNwTmfn@l zR3K#OF|~$|giH=`k-1>dT?*g zQLo^QHo{R+d}8Wrdz7P%7>t6_l;v56>^IRFNbhD(OK2Vf&ZX=jWuMG$<~37GwYuI^ zd!)xqzA~<^mMyrO_?LQ2AaYwRgP9@nr zT_{c3EcE7PHiyTk98S@dyZ)(%d?8FSCHwm~V1(x{&zYAL&2#!bN*%4xN%EOI1Zsj9 zohhJ))*Vd`OlI))o5weKY2N`?O~A->B$E0mV<%FSk5Om6&|;6f0XJvqAu?-6Xs6L& zw|y7F2Wum1N%-()^3gl!#-0Xp_B3u6y%Es-Atw#JiWVsY6K*?(mWV?siSIV4Hj zC{biFwqt@uIGEM4)CD^fjz={5W}{D!BKuOaCZ1PG3r{c5@ITPuByyS-St05qJo0}d zl$ph!PEW#QaLxQOI(dJ^kQ6$%rZBXY%r&6N^PEJy7{&qx(EwK^sF^kBB;Q1K$N5LV8hw(lU0~dlH22Ti& z$d8eW!W?7YNR(sWNRmcGyCy#*ghR;)#=IgikLeMNZFGo6H{ow=w|h&&AM<7p{A|?! a03jIsq5l9AA*g-~@9twCwR;f7xBuDQJ7seK diff --git a/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart deleted file mode 100644 index 6023d581..00000000 --- a/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart +++ /dev/null @@ -1,293 +0,0 @@ -// ignore_for_file: avoid_redundant_argument_values, avoid_print -@TestOn('vm') -library; // Uses dart:io - -import 'dart:convert'; -import 'dart:io'; - -import 'package:langchain_anthropic/langchain_anthropic.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/output_parsers.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:test/test.dart'; - -void main() { - group('ChatAnthropic tests', () { - const defaultModel = 'claude-3-5-sonnet-20240620'; - - late ChatAnthropic chatModel; - - setUp(() async { - chatModel = ChatAnthropic( - apiKey: Platform.environment['ANTHROPIC_API_KEY'], - defaultOptions: const ChatAnthropicOptions( - model: defaultModel, - ), - ); - }); - - tearDown(() { - chatModel.close(); - }); - - test('Test Text-only input with different models', () async { - final models = [ - 'claude-3-5-sonnet-20240620', - 'claude-3-haiku-20240307', - 'claude-3-opus-20240229', - 'claude-3-sonnet-20240229', - ]; - for (final model in models) { - print('Testing model: $model'); - final res = await chatModel.invoke( - PromptValue.string( - 'List the numbers from 1 to 9 in order ' - 'without any spaces, commas or additional explanations.', - ), - options: ChatAnthropicOptions( - model: model, - temperature: 0, - ), - ); - expect(res.id, isNotEmpty); - expect(res.finishReason, isNot(FinishReason.unspecified)); - expect(res.metadata['model'], contains(model.toLowerCase())); - expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), - contains('123456789'), - ); - await Future.delayed(const Duration(seconds: 5)); - } - }); - - test('Text-and-image input', () async { - final res = await chatModel.invoke( - PromptValue.chat([ - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: base64.encode( - await File('./test/chat_models/assets/apple.jpeg') - .readAsBytes(), - ), - ), - ]), - ), - ]), - ); - - expect(res.output.content.toLowerCase(), contains('apple')); - }); - - test('Test stop sequence', () async { - final res = await chatModel.invoke( - PromptValue.string( - 'List the numbers from 1 to 9 in order ' - 'without any spaces, commas or additional explanations.', - ), - options: const ChatAnthropicOptions( - model: defaultModel, - stopSequences: ['4'], - ), - ); - final text = res.output.content; - expect(text, contains('123')); - expect(text, isNot(contains('456789'))); - expect(res.finishReason, FinishReason.stop); - }); - - test('Test max tokens', () async { - final res = await chatModel.invoke( - PromptValue.string('Tell me a joke'), - options: const ChatAnthropicOptions( - model: defaultModel, - maxTokens: 10, - ), - ); - expect(res.output.content.length, lessThan(50)); - expect(res.finishReason, FinishReason.length); - }); - - test('Test Multi-turn conversations', () async { - final prompt = PromptValue.chat([ - ChatMessage.humanText( - 'List the numbers from 1 to 9 in order ' - 'without any spaces, commas or additional explanations.', - ), - ChatMessage.ai('123456789'), - ChatMessage.humanText( - 'Remove the number 4 from the list', - ), - ]); - final res = await chatModel.invoke( - prompt, - options: const ChatAnthropicOptions( - model: defaultModel, - temperature: 0, - ), - ); - expect( - res.output.content, - contains('12356789'), - ); - }); - - test('Test streaming', () async { - final stream = chatModel.stream( - PromptValue.string( - 'List the numbers from 1 to 100 in order ' - 'without any spaces, commas or additional explanations.', - ), - ); - - String content = ''; - int count = 0; - await for (final res in stream) { - content += res.output.content; - count++; - } - expect(count, greaterThan(1)); - expect(content, contains('123456789')); - }); - - test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), - () async { - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - final model = chatModel.bind( - const ChatAnthropicOptions( - model: defaultModel, - tools: [tool], - ), - ); - - final humanMessage = ChatMessage.humanText( - "What's the weather like in Boston and Madrid right now in celsius?", - ); - final res1 = await model.invoke(PromptValue.chat([humanMessage])); - - final aiMessage1 = res1.output; - expect(aiMessage1.toolCalls, hasLength(2)); - - final toolCall1 = aiMessage1.toolCalls.first; - expect(toolCall1.name, tool.name); - expect(toolCall1.arguments.containsKey('location'), isTrue); - expect(toolCall1.arguments['location'], contains('Boston')); - expect(toolCall1.arguments['unit'], 'celsius'); - - final toolCall2 = aiMessage1.toolCalls.last; - expect(toolCall2.name, tool.name); - expect(toolCall2.arguments.containsKey('location'), isTrue); - expect(toolCall2.arguments['location'], contains('Madrid')); - expect(toolCall2.arguments['unit'], 'celsius'); - - final functionResult1 = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - final functionMessage1 = ChatMessage.tool( - toolCallId: toolCall1.id, - content: json.encode(functionResult1), - ); - - final functionResult2 = { - 'temperature': '25', - 'unit': 'celsius', - 'description': 'Cloudy', - }; - final functionMessage2 = ChatMessage.tool( - toolCallId: toolCall2.id, - content: json.encode(functionResult2), - ); - - final res2 = await model.invoke( - PromptValue.chat([ - humanMessage, - aiMessage1, - functionMessage1, - functionMessage2, - ]), - ); - - final aiMessage2 = res2.output; - - expect(aiMessage2.toolCalls, isEmpty); - expect(aiMessage2.content, contains('22')); - expect(aiMessage2.content, contains('25')); - }); - - test('Test streaming with tools', - timeout: const Timeout(Duration(minutes: 5)), () async { - const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, - }, - 'required': ['location', 'punchline'], - }, - ); - - final promptTemplate = ChatPromptTemplate.fromTemplate( - 'tell me a long joke about {foo}', - ); - final chat = chatModel.bind( - ChatAnthropicOptions( - model: defaultModel, - tools: const [tool], - toolChoice: ChatToolChoice.forced(name: 'joke'), - ), - ); - final jsonOutputParser = ToolsOutputParser(); - - final chain = promptTemplate.pipe(chat).pipe(jsonOutputParser); - - final stream = chain.stream({'foo': 'bears'}); - - List lastResult = []; - int count = 0; - await for (final res in stream) { - print(res); - lastResult = res; - count++; - } - - expect(count, greaterThan(1)); - expect(lastResult, hasLength(1)); - final toolCall = lastResult.first; - expect(toolCall.arguments['setup'], isNotEmpty); - expect(toolCall.arguments['punchline'], isNotEmpty); - }); - }); -} diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 7e458f37..266080ac 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,27 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.2.1+3 - - - Update a dependency to the latest release. - -## 0.2.1+2 - - - Update a dependency to the latest release. - -## 0.2.1+1 - - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.2.1 - - - Update a dependency to the latest release. - -## 0.2.0+5 - - - Update a dependency to the latest release. - ## 0.2.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart index dc170896..9ed252ba 100644 --- a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart +++ b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart @@ -42,7 +42,7 @@ import 'types.dart'; /// If you are interacting with Chroma server from a web browser, /// you may need to configure the CORS policy. You can do this by /// passing the following environment variable: -/// ```sh +/// ``` /// docker run -p 8000:8000 -e 'CHROMA_SERVER_CORS_ALLOW_ORIGINS=["*"]' chromadb/chroma /// ``` /// The previous command will allow all origins to access the Chroma server diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index e216f998..3da841ef 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1+3 +version: 0.2.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,17 +14,17 @@ topics: - vector-db environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - chromadb: ^0.2.0+1 - http: ^1.2.2 - langchain_core: 0.3.6 + chromadb: ^0.2.0 + http: ^1.1.0 + langchain_core: ^0.3.1 meta: ^1.11.0 - uuid: ^4.4.2 + uuid: ^4.3.3 dev_dependencies: - test: ^1.25.8 - langchain: ^0.7.6 - langchain_community: 0.3.2 - langchain_openai: ^0.7.2 + test: ^1.25.2 + langchain: ^0.7.1 + langchain_community: 0.2.0+1 + langchain_openai: ^0.6.1 diff --git a/packages/langchain_chroma/pubspec_overrides.yaml b/packages/langchain_chroma/pubspec_overrides.yaml index d53c4efe..4583d481 100644 --- a/packages/langchain_chroma/pubspec_overrides.yaml +++ b/packages/langchain_chroma/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart +# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain dependency_overrides: chromadb: path: ../chromadb @@ -12,5 +12,3 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart - tavily_dart: - path: ../tavily_dart diff --git a/packages/langchain_cohere/pubspec.yaml b/packages/langchain_cohere/pubspec.yaml index ed26abe5..bcb53a98 100644 --- a/packages/langchain_cohere/pubspec.yaml +++ b/packages/langchain_cohere/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_cohere issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_cohere homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 0336c13e..7f48bd87 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,34 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.3.2 - - - **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) - - **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) - -## 0.3.1 - - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - -## 0.3.0 - -- **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) -- **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.2.2 - - - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) - -## 0.2.1+1 - - - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330)) - -## 0.2.1 - - - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) - + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) - ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_community/README.md b/packages/langchain_community/README.md index 1dcb80e3..b76ee3c3 100644 --- a/packages/langchain_community/README.md +++ b/packages/langchain_community/README.md @@ -27,10 +27,6 @@ The most popular third-party integrations have their own packages (e.g. [langcha * `WebBaseLoader`: for web pages. - Tools: * `CalculatorTool`: to calculate math expressions. - * `TavilySearchResultsTool`: returns a list of results for a query using the [Tavily](https://tavily.com) search engine. - * `TavilyAnswerTool`: returns an answer for a query using the [Tavily](https://tavily.com) search engine. -- Vector stores: - * `ObjectBoxVectorStore`: [ObjectBox](https://objectbox.io/) on-device vector database. Check out the [API reference](https://pub.dev/documentation/langchain_community/latest) for more details. diff --git a/packages/langchain_community/lib/langchain_community.dart b/packages/langchain_community/lib/langchain_community.dart index b91a968a..3aee4cf9 100644 --- a/packages/langchain_community/lib/langchain_community.dart +++ b/packages/langchain_community/lib/langchain_community.dart @@ -3,4 +3,3 @@ library; export 'src/document_loaders/document_loaders.dart'; export 'src/tools/tools.dart'; -export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_community/lib/src/document_loaders/csv.dart b/packages/langchain_community/lib/src/document_loaders/csv.dart index 2a4a4872..155e520d 100644 --- a/packages/langchain_community/lib/src/document_loaders/csv.dart +++ b/packages/langchain_community/lib/src/document_loaders/csv.dart @@ -17,7 +17,7 @@ import 'package:langchain_core/documents.dart'; /// and [eol]. /// /// The fields are added to the page content in the following format: -/// ```txt +/// ``` /// {field1Name}: {field1Value} /// {field2Name}: {field2Value} /// ... @@ -56,6 +56,7 @@ class CsvLoader extends BaseDocumentLoader { /// the page content of the document. /// /// If not provided, all row fields are extracted. + /// ``` final List? fields; /// Optional field to override the field names from the CSV file. diff --git a/packages/langchain_community/lib/src/tools/calculator.dart b/packages/langchain_community/lib/src/tools/calculator.dart index 26becb93..9f41a130 100644 --- a/packages/langchain_community/lib/src/tools/calculator.dart +++ b/packages/langchain_community/lib/src/tools/calculator.dart @@ -14,7 +14,7 @@ import 'package:math_expressions/math_expressions.dart'; /// temperature: 0, /// ); /// final tool = CalculatorTool(); -/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/packages/langchain_community/lib/src/tools/tavily/mappers.dart b/packages/langchain_community/lib/src/tools/tavily/mappers.dart deleted file mode 100644 index 21e907e5..00000000 --- a/packages/langchain_community/lib/src/tools/tavily/mappers.dart +++ /dev/null @@ -1,21 +0,0 @@ -// ignore_for_file: public_member_api_docs -import 'package:tavily_dart/tavily_dart.dart'; - -import 'types.dart'; - -extension TavilySearchDepthX on TavilySearchDepth { - SearchRequestSearchDepth toSearchRequestSearchDepth() => switch (this) { - TavilySearchDepth.basic => SearchRequestSearchDepth.basic, - TavilySearchDepth.advanced => SearchRequestSearchDepth.advanced, - }; -} - -extension TavilySearchResultX on SearchResult { - TavilySearchResult toTavilySearchResult() => TavilySearchResult( - title: title, - url: url, - content: content, - rawContent: rawContent, - score: score, - ); -} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily.dart b/packages/langchain_community/lib/src/tools/tavily/tavily.dart deleted file mode 100644 index 64f26c5d..00000000 --- a/packages/langchain_community/lib/src/tools/tavily/tavily.dart +++ /dev/null @@ -1,3 +0,0 @@ -export 'tavily_answer.dart'; -export 'tavily_search_results.dart'; -export 'types.dart'; diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart deleted file mode 100644 index a5ad637f..00000000 --- a/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart +++ /dev/null @@ -1,102 +0,0 @@ -import 'dart:async'; - -import 'package:http/http.dart' as http; -import 'package:langchain_core/tools.dart'; -import 'package:tavily_dart/tavily_dart.dart'; - -import 'mappers.dart'; -import 'tavily_search_results.dart'; -import 'types.dart'; - -/// Tool that queries the [Tavily Search API](https://tavily.com) and -/// gets an answer to the search query. -/// -/// The Tavily API uses API keys for authentication. Visit the -/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll -/// use in your requests. -/// -/// If you want to get a list of search results instead, use the -/// [TavilySearchResultsTool] instead. -/// -/// Example: -/// ```dart -/// final tool = TavilyAnswerTool( -/// apiKey: Platform.environment['TAVILY_API_KEY']!, -/// ); -/// final res = await tool.invoke('What is the weather like in New York?'); -/// print(res); -/// // The current weather in New York is clear with a temperature of 22.8°C (73.0°F)... -/// ``` -final class TavilyAnswerTool extends StringTool { - /// Creates a [TavilyAnswerTool] instance. - /// - /// Main configuration options: - /// - `apiKey`: your Tavily API key. You can find your API key in the - /// [Tavily console](https://app.tavily.com/). - /// - /// Advance configuration options: - /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can - /// override this to use a different API URL, or to use a proxy. - /// - `headers`: global headers to send with every request. You can use - /// this to set custom headers, or to override the default headers. - /// - `queryParams`: global query parameters to send with every request. You - /// can use this to set custom query parameters (e.g. Azure OpenAI API - /// required to attach a `version` query parameter to every request). - /// - `client`: the HTTP client to use. You can set your own HTTP client if - /// you need further customization (e.g. to use a Socks5 proxy). - TavilyAnswerTool({ - required this.apiKey, - final String? baseUrl, - final Map headers = const {}, - final Map queryParams = const {}, - final http.Client? client, - super.defaultOptions = const TavilyAnswerToolOptions(), - }) : _client = TavilyClient( - baseUrl: baseUrl, - headers: headers, - queryParams: queryParams, - client: client, - ), - super( - name: 'tavily_answer', - description: - 'A search engine optimized for comprehensive, accurate, and trusted answers. ' - 'Useful for when you need to answer questions about current events. ' - 'The tool returns an answer to the search query - not the search results.', - inputDescription: 'The search query to get an answer to. ' - 'Eg: "What is the weather like in New York?"', - ); - - /// A client for interacting with Tavily API. - final TavilyClient _client; - - /// Your Tavily API key. - String apiKey; - - @override - Future invokeInternal( - final String toolInput, { - final TavilyAnswerToolOptions? options, - }) async { - final res = await _client.search( - request: SearchRequest( - apiKey: apiKey, - query: toolInput, - includeAnswer: true, - searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) - .toSearchRequestSearchDepth(), - maxResults: options?.maxResults ?? defaultOptions.maxResults, - includeDomains: - options?.includeDomains ?? defaultOptions.includeDomains, - excludeDomains: - options?.excludeDomains ?? defaultOptions.excludeDomains, - ), - ); - return res.answer ?? ''; - } - - @override - void close() { - _client.endSession(); - } -} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart deleted file mode 100644 index 7e5693c7..00000000 --- a/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart +++ /dev/null @@ -1,130 +0,0 @@ -import 'dart:async'; - -import 'package:http/http.dart' as http; -import 'package:langchain_core/tools.dart'; -import 'package:tavily_dart/tavily_dart.dart'; - -import 'mappers.dart'; -import 'tavily_answer.dart'; -import 'types.dart'; - -/// Tool that queries the [Tavily Search API](https://tavily.com) and -/// gets back a list of search results. -/// -/// The Tavily API uses API keys for authentication. Visit the -/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll -/// use in your requests. -/// -/// If you want to get directly an answer to a search query, use the -/// [TavilyAnswerTool] instead. -/// -/// Example: -/// ```dart -/// final tool = TavilySearchResultsTool( -/// apiKey: Platform.environment['TAVILY_API_KEY']!, -/// ); -/// final res = await tool.invoke('What is the weather like in New York?'); -/// print(res); -/// // [ -/// // { -/// // "title": "Weather in New York", -/// // "url": "https://www.weatherapi.com/", -/// // "content": "{'location': {'lat': 40.71, 'lon': -74.01}, 'current': {'last_updated': '2024-06-20 17:00', 'temp_c': 31.1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png'}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 161, 'wind_dir': 'SSE', 'pressure_mb': 1025.0, 'pressure_in': 30.26, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 48, 'cloud': 0, 'feelslike_c': 33.1, 'feelslike_f': 91.6, 'windchill_c': 29.5, 'windchill_f': 85.0, 'heatindex_c': 30.6, 'heatindex_f': 87.0, 'dewpoint_c': 17.7, 'dewpoint_f': 63.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 16.4, 'gust_kph': 26.4}}", -/// // "score": 0.98855 -/// // }, -/// // ... -/// // ] -/// ``` -final class TavilySearchResultsTool - extends Tool { - /// Creates a [TavilySearchResultsTool] instance. - /// - /// Main configuration options: - /// - `apiKey`: your Tavily API key. You can find your API key in the - /// [Tavily console](https://app.tavily.com/). - /// - /// Advance configuration options: - /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can - /// override this to use a different API URL, or to use a proxy. - /// - `headers`: global headers to send with every request. You can use - /// this to set custom headers, or to override the default headers. - /// - `queryParams`: global query parameters to send with every request. You - /// can use this to set custom query parameters (e.g. Azure OpenAI API - /// required to attach a `version` query parameter to every request). - /// - `client`: the HTTP client to use. You can set your own HTTP client if - /// you need further customization (e.g. to use a Socks5 proxy). - TavilySearchResultsTool({ - required this.apiKey, - final String? baseUrl, - final Map headers = const {}, - final Map queryParams = const {}, - final http.Client? client, - super.defaultOptions = const TavilySearchResultsToolOptions(), - }) : _client = TavilyClient( - baseUrl: baseUrl, - headers: headers, - queryParams: queryParams, - client: client, - ), - super( - name: 'tavily_search_results', - description: - 'A search engine optimized for comprehensive, accurate, and trusted results. ' - 'Useful for when you need to answer questions about current events. ' - 'The tool returns a JSON object with search results.', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The search query to look up. ' - 'Eg: "What is the weather like in New York?"', - }, - }, - 'required': ['query'], - }, - ); - - /// A client for interacting with Tavily API. - final TavilyClient _client; - - /// Your Tavily API key. - String apiKey; - - @override - Future invokeInternal( - final String input, { - final TavilySearchResultsToolOptions? options, - }) async { - final res = await _client.search( - request: SearchRequest( - apiKey: apiKey, - query: input, - searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) - .toSearchRequestSearchDepth(), - maxResults: options?.maxResults ?? defaultOptions.maxResults, - includeRawContent: - options?.includeRawContent ?? defaultOptions.includeRawContent, - includeDomains: - options?.includeDomains ?? defaultOptions.includeDomains, - excludeDomains: - options?.excludeDomains ?? defaultOptions.excludeDomains, - ), - ); - return TavilySearchResults( - results: res.results - .map((r) => r.toTavilySearchResult()) - .toList(growable: false), - ); - } - - @override - String getInputFromJson(final Map json) { - return json['query'] as String; - } - - @override - void close() { - _client.endSession(); - } -} diff --git a/packages/langchain_community/lib/src/tools/tavily/types.dart b/packages/langchain_community/lib/src/tools/tavily/types.dart deleted file mode 100644 index 872723cf..00000000 --- a/packages/langchain_community/lib/src/tools/tavily/types.dart +++ /dev/null @@ -1,181 +0,0 @@ -import 'dart:convert'; - -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; - -import 'tavily_answer.dart'; -import 'tavily_search_results.dart'; - -/// The depth of the search. -enum TavilySearchDepth { - /// Basic search depth. - basic, - - /// Advanced search depth. - advanced, -} - -/// {@template tavily_search_results} -/// A search results from the Tavily search engine. -/// {@endtemplate} -@immutable -class TavilySearchResults { - /// {@macro tavily_search_results} - const TavilySearchResults({ - required this.results, - }); - - /// The search results. - final List results; - - @override - String toString() { - return json.encode( - results - .map( - (result) => { - 'title': result.title, - 'url': result.url, - 'content': result.content, - 'rawContent': result.rawContent, - 'score': result.score, - }, - ) - .toList(growable: false), - ); - } -} - -/// {@template tavily_search_result} -/// A search result from the Tavily search engine. -/// {@endtemplate} -@immutable -class TavilySearchResult { - /// {@macro tavily_search_result} - const TavilySearchResult({ - required this.title, - required this.url, - required this.content, - this.rawContent, - required this.score, - }); - - /// The title of the search result url. - final String title; - - /// The url of the search result. - final String url; - - /// The most query related content from the scraped url. - final String content; - - /// The parsed and cleaned HTML of the site. For now includes parsed text only. - final String? rawContent; - - /// The relevance score of the search result. - final double score; -} - -/// {@template tavily_search_results_tool_options} -/// Generation options to pass into the [TavilySearchResultsTool]. -/// {@endtemplate} -class TavilySearchResultsToolOptions extends ToolOptions { - /// {@macro tavily_search_results_tool_options} - const TavilySearchResultsToolOptions({ - this.maxResults = 5, - this.searchDepth = TavilySearchDepth.basic, - this.includeRawContent = false, - this.includeDomains, - this.excludeDomains, - }); - - /// The number of maximum search results to return. - final int maxResults; - - /// The depth of the search. - final TavilySearchDepth searchDepth; - - /// Include raw content in the search results. - final bool includeRawContent; - - /// A list of domains to specifically include in the search results. - final List? includeDomains; - - /// A list of domains to specifically exclude from the search results. - final List? excludeDomains; -} - -/// {@template tavily_answer_tool_options} -/// Generation options to pass into the [TavilyAnswerTool]. -/// {@endtemplate} -@immutable -class TavilyAnswerToolOptions extends ToolOptions { - /// {@macro tavily_answer_tool_options} - const TavilyAnswerToolOptions({ - this.maxResults = 5, - this.searchDepth = TavilySearchDepth.basic, - this.includeDomains, - this.excludeDomains, - super.concurrencyLimit, - }); - - /// The number of maximum search results to return. - final int maxResults; - - /// The depth of the search. - final TavilySearchDepth searchDepth; - - /// A list of domains to specifically include in the search results. - final List? includeDomains; - - /// A list of domains to specifically exclude from the search results. - final List? excludeDomains; - - @override - TavilyAnswerToolOptions copyWith({ - final int? maxResults, - final TavilySearchDepth? searchDepth, - final List? includeDomains, - final List? excludeDomains, - final int? concurrencyLimit, - }) { - return TavilyAnswerToolOptions( - maxResults: maxResults ?? this.maxResults, - searchDepth: searchDepth ?? this.searchDepth, - includeDomains: includeDomains ?? this.includeDomains, - excludeDomains: excludeDomains ?? this.excludeDomains, - concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, - ); - } - - @override - TavilyAnswerToolOptions merge( - covariant final TavilyAnswerToolOptions? other, - ) { - return copyWith( - maxResults: other?.maxResults, - searchDepth: other?.searchDepth, - includeDomains: other?.includeDomains, - excludeDomains: other?.excludeDomains, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final TavilyAnswerToolOptions other) { - return maxResults == other.maxResults && - searchDepth == other.searchDepth && - includeDomains == other.includeDomains && - excludeDomains == other.excludeDomains && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return maxResults.hashCode ^ - searchDepth.hashCode ^ - includeDomains.hashCode ^ - excludeDomains.hashCode ^ - concurrencyLimit.hashCode; - } -} diff --git a/packages/langchain_community/lib/src/tools/tools.dart b/packages/langchain_community/lib/src/tools/tools.dart index 4aa306f8..9601880a 100644 --- a/packages/langchain_community/lib/src/tools/tools.dart +++ b/packages/langchain_community/lib/src/tools/tools.dart @@ -1,2 +1 @@ export 'calculator.dart'; -export 'tavily/tavily.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart deleted file mode 100644 index 84658107..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart +++ /dev/null @@ -1,220 +0,0 @@ -import 'dart:convert'; - -import 'package:langchain_core/documents.dart'; -import 'package:langchain_core/vector_stores.dart'; -import 'package:objectbox/objectbox.dart' - show - Box, - Condition, - ObjectWithScore, - QueryHnswProperty, - QueryStringProperty; -import 'package:uuid/uuid.dart'; - -/// {@template base_object_box_vector_store} -/// Base class for ObjectBox vector store. -/// -/// The [ObjectBoxVectorStore] class is a pre-configured version of this class, -/// but it can only be used if you don't use ObjectBox for anything else. -/// -/// If you need more control over the ObjectBox store, use this class instead. -/// For example, if you are using ObjectBox to store other entities, or if you -/// need to customize the Document entity class. -/// -/// Here is an example of how to use this class: -/// -/// First, you can define our own Document entity class instead of using the -/// one provided by the [ObjectBoxVectorStore]. In this way, you can customize -/// the entity to your needs. You will need to define the mapping logic between -/// the entity and the LangChain [Document] model. -/// -/// ```dart -/// @Entity() -/// class MyDocumentEntity { -/// MyDocumentEntity({ -/// required this.id, -/// required this.content, -/// required this.metadata, -/// required this.embedding, -/// }); -/// -/// @Id() -/// int internalId = 0; -/// -/// @Unique(onConflict: ConflictStrategy.replace) -/// String id; -/// -/// String content; -/// -/// String metadata; -/// -/// @HnswIndex( -/// dimensions: 768, -/// distanceType: VectorDistanceType.cosine, -/// ) -/// @Property(type: PropertyType.floatVector) -/// List embedding; -/// -/// factory MyDocumentEntity.fromModel( -/// Document doc, List embedding, -/// ) => MyDocumentEntity( -/// id: doc.id ?? '', -/// content: doc.pageContent, -/// metadata: jsonEncode(doc.metadata), -/// embedding: embedding, -/// ); -/// -/// Document toModel() => Document( -/// id: id, -/// pageContent: content, -/// metadata: jsonDecode(metadata), -/// ); -/// } -/// ``` -/// -/// After defining the entity class, you will need to run the ObjectBox -/// generator: -/// -/// ```sh -/// dart run build_runner build --delete-conflicting-outputs -/// ``` -/// -/// Then, you just need to create your custom vector store class that -/// extends [BaseObjectBoxVectorStore] and wire everything up: -/// -/// ```dart -/// class MyCustomVectorStore extends BaseObjectBoxVectorStore { -/// MyCustomVectorStore({ -/// required super.embeddings, -/// required Store store, -/// }) : super( -/// box: store.box(), -/// createEntity: ( -/// String id, -/// String content, -/// String metadata, -/// List embedding, -/// ) => -/// MyDocumentEntity( -/// id: id, -/// content: content, -/// metadata: metadata, -/// embedding: embedding, -/// ), -/// createDocument: (MyDocumentEntity docDto) => docDto.toModel(), -/// getIdProperty: () => MyDocumentEntity_.id, -/// getEmbeddingProperty: () => MyDocumentEntity_.embedding, -/// ); -/// } -/// ``` -/// -/// Now you can use the [MyCustomVectorStore] class to store and search documents. -/// {@endtemplate} -class BaseObjectBoxVectorStore extends VectorStore { - /// {@macro base_object_box_vector_store} - BaseObjectBoxVectorStore({ - required super.embeddings, - required final Box box, - required final T Function( - String id, - String content, - String metadata, - List embedding, - ) createEntity, - required final Document Function(T) createDocument, - required final QueryStringProperty Function() getIdProperty, - required final QueryHnswProperty Function() getEmbeddingProperty, - }) : _box = box, - _createEntity = createEntity, - _createDocument = createDocument, - _getIdProperty = getIdProperty, - _getEmbeddingProperty = getEmbeddingProperty; - - /// The [Box] to store the entities in. - final Box _box; - - /// The function to create an entity [T] from the given data. - final T Function( - String id, - String content, - String metadata, - List embedding, - ) _createEntity; - - /// The function to create a [Document] from the given entity [T]. - final Document Function(T) _createDocument; - - /// A getter for the ID query property. - final QueryStringProperty Function() _getIdProperty; - - /// A getter for the embedding query property. - final QueryHnswProperty Function() _getEmbeddingProperty; - - /// UUID generator. - final Uuid _uuid = const Uuid(); - - @override - Future> addVectors({ - required final List> vectors, - required final List documents, - }) async { - assert(vectors.length == documents.length); - - final List ids = []; - final List records = []; - for (var i = 0; i < documents.length; i++) { - final doc = documents[i]; - final id = doc.id ?? _uuid.v4(); - final entity = _createEntity( - id, - doc.pageContent, - jsonEncode(doc.metadata), - vectors[i], - ); - ids.add(id); - records.add(entity); - } - - _box.putMany(records); - return ids; - } - - @override - Future delete({required final List ids}) { - return _box.query(_getIdProperty().oneOf(ids)).build().removeAsync(); - } - - /// Delete by condition. - /// - /// - [condition] is the condition to delete by. - Future deleteWhere(final Condition condition) { - return _box.query(condition).build().removeAsync(); - } - - @override - Future> similaritySearchByVectorWithScores({ - required final List embedding, - final VectorStoreSimilaritySearch config = - const VectorStoreSimilaritySearch(), - }) async { - var filter = - _getEmbeddingProperty().nearestNeighborsF32(embedding, config.k); - - final filterCondition = config.filter?.values.firstOrNull; - if (filterCondition != null && filterCondition is Condition) { - filter = filter.and(filterCondition); - } - - final query = _box.query(filter).build(); - - Iterable> results = query.findWithScores(); - - if (config.scoreThreshold != null) { - results = results.where((final r) => r.score >= config.scoreThreshold!); - } - - return results - .map((r) => (_createDocument(r.object), r.score)) - .toList(growable: false); - } -} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart deleted file mode 100644 index 308e7da0..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart +++ /dev/null @@ -1,40 +0,0 @@ -// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters -import 'package:langchain_core/documents.dart'; -import 'package:langchain_core/vector_stores.dart'; - -// This is a stub class -class BaseObjectBoxVectorStore extends VectorStore { - BaseObjectBoxVectorStore({ - required super.embeddings, - required final Object? box, - required final Object? createEntity, - required final Object? createDocument, - required final Object? getIdProperty, - required final Object? getEmbeddingProperty, - }); - - @override - Future> addVectors({ - required List> vectors, - required List documents, - }) { - throw UnsupportedError('ObjectBox is not supported on web platform.'); - } - - @override - Future delete({required List ids}) { - throw UnsupportedError('ObjectBox is not supported on web platform.'); - } - - Future deleteWhere(final Object condition) { - throw UnsupportedError('ObjectBox is not supported on web platform.'); - } - - @override - Future> similaritySearchByVectorWithScores({ - required List embedding, - VectorStoreSimilaritySearch config = const VectorStoreSimilaritySearch(), - }) { - throw UnsupportedError('ObjectBox is not supported on web platform.'); - } -} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart deleted file mode 100644 index 63b1f86d..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart +++ /dev/null @@ -1,7 +0,0 @@ -export 'ob_io.dart' if (dart.library.js_interop) 'ob_stub.dart' - show - BaseObjectBoxVectorStore, - ObjectBoxDocument, - ObjectBoxDocumentProps, - ObjectBoxSimilaritySearch, - ObjectBoxVectorStore; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart deleted file mode 100644 index db6546e3..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart +++ /dev/null @@ -1,3 +0,0 @@ -export 'base_objectbox.dart'; -export 'objectbox.dart'; -export 'types.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart deleted file mode 100644 index 87329806..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart +++ /dev/null @@ -1,3 +0,0 @@ -export 'base_objectbox_stub.dart'; -export 'objectbox_stub.dart'; -export 'types_stub.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json deleted file mode 100644 index 32251c2e..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "_note1": "KEEP THIS FILE! Check it into a version control system (VCS) like git.", - "_note2": "ObjectBox manages crucial IDs for your object model. See docs for details.", - "_note3": "If you have VCS merge conflicts, you must resolve them according to ObjectBox docs.", - "entities": [ - { - "id": "1:4662034750769022750", - "lastPropertyId": "5:5762998900965066008", - "name": "ObjectBoxDocument", - "properties": [ - { - "id": "1:328437667364158177", - "name": "internalId", - "type": 6, - "flags": 1 - }, - { - "id": "2:3766173764062654800", - "name": "id", - "type": 9, - "flags": 34848, - "indexId": "1:8818474670164842374" - }, - { - "id": "3:7972539540824041325", - "name": "content", - "type": 9 - }, - { - "id": "4:866532944790310363", - "name": "metadata", - "type": 9 - }, - { - "id": "5:5762998900965066008", - "name": "embedding", - "type": 28, - "flags": 8, - "indexId": "2:3016727589204567263" - } - ], - "relations": [] - } - ], - "lastEntityId": "1:4662034750769022750", - "lastIndexId": "2:3016727589204567263", - "lastRelationId": "0:0", - "lastSequenceId": "0:0", - "modelVersion": 5, - "modelVersionParserMinimum": 5, - "retiredEntityUids": [], - "retiredIndexUids": [], - "retiredPropertyUids": [], - "retiredRelationUids": [], - "version": 1 -} \ No newline at end of file diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart deleted file mode 100644 index 22ddeee4..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ /dev/null @@ -1,196 +0,0 @@ -import 'dart:convert'; - -import 'package:langchain_core/documents.dart'; -import 'package:objectbox/objectbox.dart' - show - Condition, - ConflictStrategy, - Entity, - HnswIndex, - Id, - Property, - PropertyType, - Store, - Unique; - -import 'base_objectbox.dart'; -import 'objectbox.g.dart' as obxg; -import 'types.dart'; - -/// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. -/// -/// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); -/// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); -/// ``` -/// -/// This vector stores creates a [Store] with an [ObjectBoxDocument] entity -/// that persists LangChain [Document]s along with their embeddings. If you -/// need more control over the entity or the storeo, you can use the -/// [BaseObjectBoxVectorStore] class instead. -/// -/// See documentation for more details: -/// - [LangChain.dart ObjectBox docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/objectbox) -/// - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) -/// -/// ### Filtering -/// -/// You can use the [ObjectBoxSimilaritySearch] class to pass ObjectBox-specific -/// filtering options. -/// -/// [ObjectBoxVectorStore] supports filtering queries by id, content or metadata -/// using ObjectBox's [Condition]. You can define the filter condition in the -/// [ObjectBoxSimilaritySearch] `filterCondition` parameter. Use the -/// [ObjectBoxDocumentProps] class to reference the entity fields to use in the -/// query. -/// -/// For example: -/// ```dart -/// final vectorStore = ObjectBoxVectorStore(...); -/// final res = await vectorStore.similaritySearch( -/// query: 'What should I feed my cat?', -/// config: ObjectBoxSimilaritySearch( -/// k: 5, -/// scoreThreshold: 0.8, -/// filterCondition: ObjectBoxDocumentProps.id.equals('my-id') -/// .or(ObjectBoxDocumentProps.metadata.contains('some-text')), -/// ), -/// ); -/// ``` -class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { - /// Creates an [ObjectBoxVectorStore] instance. - /// - /// Main configuration options: - /// - [embeddings] The embeddings model to use. - /// - [dimensions] The number of dimensions of the embeddings (vector size). - /// - /// ObjectBox-specific options: - /// - Check the ObjectBox's [Store] documentation for more details on the - /// different options. - ObjectBoxVectorStore({ - required super.embeddings, - required final int dimensions, - final String? directory, - final int? maxDBSizeInKB, - final int? maxDataSizeInKB, - final int? fileMode, - final int? maxReaders, - final bool queriesCaseSensitiveDefault = true, - final String? macosApplicationGroup, - }) : super( - box: _openStore( - dimensions: dimensions, - directory: directory, - maxDBSizeInKB: maxDBSizeInKB, - maxDataSizeInKB: maxDataSizeInKB, - fileMode: fileMode, - maxReaders: maxReaders, - queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, - macosApplicationGroup: macosApplicationGroup, - ).box(), - createEntity: _createObjectBoxDocument, - createDocument: _createDoc, - getIdProperty: () => obxg.ObjectBoxDocument_.id, - getEmbeddingProperty: () => obxg.ObjectBoxDocument_.embedding, - ); - - /// The ObjectBox store. - static Store? _store; - - /// Opens the ObjectBox store. - static Store _openStore({ - required final int dimensions, - final String? directory, - final int? maxDBSizeInKB, - final int? maxDataSizeInKB, - final int? fileMode, - final int? maxReaders, - final bool queriesCaseSensitiveDefault = true, - final String? macosApplicationGroup, - }) { - return _store ??= obxg.openStore( - dimensions: dimensions, - directory: directory, - maxDBSizeInKB: maxDBSizeInKB, - maxDataSizeInKB: maxDataSizeInKB, - fileMode: fileMode, - maxReaders: maxReaders, - queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, - macosApplicationGroup: macosApplicationGroup, - ); - } - - /// Creates an [ObjectBoxDocument] entity. - static ObjectBoxDocument _createObjectBoxDocument( - String id, - String content, - String metadata, - List embedding, - ) => - ObjectBoxDocument(0, id, content, metadata, embedding); - - /// Creates a [Document] from an [ObjectBoxDocument] entity. - static Document _createDoc(ObjectBoxDocument entity) { - Map metadata = const {}; - try { - metadata = jsonDecode(entity.metadata); - } catch (_) {} - return Document( - id: entity.id, - pageContent: entity.content, - metadata: metadata, - ); - } - - /// Closes the ObjectBox store; - /// - /// Don't try to call any other methods after the store is closed. - void close() { - _store?.close(); - _store = null; - } -} - -/// {@template objectbox_document} -/// The ObjectBox entity representing a LangChain [Document]. -/// {@endtemplate} -@Entity() -class ObjectBoxDocument { - /// {@macro objectbox_document} - ObjectBoxDocument( - this.internalId, - this.id, - this.content, - this.metadata, - this.embedding, - ); - - /// The internal ID used by ObjectBox. - @Id() - int internalId = 0; - - /// The ID of the document. - @Unique(onConflict: ConflictStrategy.replace) - String id; - - /// The content of the document. - String content; - - /// The metadata of the document. - String metadata; - - /// The embedding of the document. - @HnswIndex(dimensions: 0) // Set dynamically in the ObjectBoxVectorStore - @Property(type: PropertyType.floatVector) - List embedding; -} - -/// [ObjectBoxDocument] entity fields to define ObjectBox queries. -/// -/// Example: -/// ```dart -/// final filterCondition = ObjectBoxDocumentProps.metadata -/// .contains('animal') -/// .or(ObjectBoxDocumentProps.metadata.contains('natural'); -/// ``` -typedef ObjectBoxDocumentProps = obxg.ObjectBoxDocument_; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart deleted file mode 100644 index 4eed33be..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart +++ /dev/null @@ -1,193 +0,0 @@ -// GENERATED CODE - DO NOT MODIFY BY HAND -// This code was generated by ObjectBox. To update it run the generator again -// with `dart run build_runner build`. -// See also https://docs.objectbox.io/getting-started#generate-objectbox-code - -// ignore_for_file: camel_case_types, depend_on_referenced_packages, avoid_js_rounded_ints, require_trailing_commas, cascade_invocations, strict_raw_type -// coverage:ignore-file - -import 'dart:typed_data'; - -import 'package:flat_buffers/flat_buffers.dart' as fb; -import 'package:objectbox/internal.dart' - as obx_int; // generated code can access "internal" functionality -import 'package:objectbox/objectbox.dart' as obx; - -import '../../../src/vector_stores/objectbox/objectbox.dart'; - -export 'package:objectbox/objectbox.dart'; // so that callers only have to import this file - -List? _entities; - -List _getEntities(int dimensions) { - if (_entities != null) { - final objectBoxDocumentEntity = _entities![0]; - final embeddingProperty = objectBoxDocumentEntity.properties[4]; - - if (embeddingProperty.hnswParams?.dimensions != dimensions) { - _entities = null; - } else { - return _entities!; - } - } - - return _entities ??= [ - obx_int.ModelEntity( - id: const obx_int.IdUid(1, 4662034750769022750), - name: 'ObjectBoxDocument', - lastPropertyId: const obx_int.IdUid(5, 5762998900965066008), - flags: 0, - properties: [ - obx_int.ModelProperty( - id: const obx_int.IdUid(1, 328437667364158177), - name: 'internalId', - type: 6, - flags: 1), - obx_int.ModelProperty( - id: const obx_int.IdUid(2, 3766173764062654800), - name: 'id', - type: 9, - flags: 34848, - indexId: const obx_int.IdUid(1, 8818474670164842374)), - obx_int.ModelProperty( - id: const obx_int.IdUid(3, 7972539540824041325), - name: 'content', - type: 9, - flags: 0), - obx_int.ModelProperty( - id: const obx_int.IdUid(4, 866532944790310363), - name: 'metadata', - type: 9, - flags: 0), - obx_int.ModelProperty( - id: const obx_int.IdUid(5, 5762998900965066008), - name: 'embedding', - type: 28, - flags: 8, - indexId: const obx_int.IdUid(2, 3016727589204567263), - hnswParams: obx_int.ModelHnswParams( - dimensions: dimensions, - )) - ], - relations: [], - backlinks: []) - ]; -} - -/// Shortcut for [obx.Store.new] that passes [getObjectBoxModel] and for Flutter -/// apps by default a [directory] using `defaultStoreDirectory()` from the -/// ObjectBox Flutter library. -/// -/// Note: for desktop apps it is recommended to specify a unique [directory]. -/// -/// See [obx.Store.new] for an explanation of all parameters. -/// -/// For Flutter apps, also calls `loadObjectBoxLibraryAndroidCompat()` from -/// the ObjectBox Flutter library to fix loading the native ObjectBox library -/// on Android 6 and older. -obx.Store openStore( - {required int dimensions, - String? directory, - int? maxDBSizeInKB, - int? maxDataSizeInKB, - int? fileMode, - int? maxReaders, - bool queriesCaseSensitiveDefault = true, - String? macosApplicationGroup}) { - return obx.Store(getObjectBoxModel(dimensions), - directory: directory, - maxDBSizeInKB: maxDBSizeInKB, - maxDataSizeInKB: maxDataSizeInKB, - fileMode: fileMode, - maxReaders: maxReaders, - queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, - macosApplicationGroup: macosApplicationGroup); -} - -/// Returns the ObjectBox model definition for this project for use with -/// [obx.Store.new]. -obx_int.ModelDefinition getObjectBoxModel(int dimensions) { - final entities = _getEntities(dimensions); - final model = obx_int.ModelInfo( - entities: _getEntities(dimensions), - lastEntityId: const obx_int.IdUid(1, 4662034750769022750), - lastIndexId: const obx_int.IdUid(2, 3016727589204567263), - lastRelationId: const obx_int.IdUid(0, 0), - lastSequenceId: const obx_int.IdUid(0, 0), - retiredEntityUids: const [], - retiredIndexUids: const [], - retiredPropertyUids: const [], - retiredRelationUids: const [], - modelVersion: 5, - modelVersionParserMinimum: 5, - version: 1); - - final bindings = { - ObjectBoxDocument: obx_int.EntityDefinition( - model: entities[0], - toOneRelations: (ObjectBoxDocument object) => [], - toManyRelations: (ObjectBoxDocument object) => {}, - getId: (ObjectBoxDocument object) => object.internalId, - setId: (ObjectBoxDocument object, int id) { - object.internalId = id; - }, - objectToFB: (ObjectBoxDocument object, fb.Builder fbb) { - final idOffset = fbb.writeString(object.id); - final contentOffset = fbb.writeString(object.content); - final metadataOffset = fbb.writeString(object.metadata); - final embeddingOffset = fbb.writeListFloat32(object.embedding); - fbb.startTable(6); - fbb.addInt64(0, object.internalId); - fbb.addOffset(1, idOffset); - fbb.addOffset(2, contentOffset); - fbb.addOffset(3, metadataOffset); - fbb.addOffset(4, embeddingOffset); - fbb.finish(fbb.endTable()); - return object.internalId; - }, - objectFromFB: (obx.Store store, ByteData fbData) { - final buffer = fb.BufferContext(fbData); - final rootOffset = buffer.derefObject(0); - final internalIdParam = - const fb.Int64Reader().vTableGet(buffer, rootOffset, 4, 0); - final idParam = const fb.StringReader(asciiOptimization: true) - .vTableGet(buffer, rootOffset, 6, ''); - final contentParam = const fb.StringReader(asciiOptimization: true) - .vTableGet(buffer, rootOffset, 8, ''); - final metadataParam = const fb.StringReader(asciiOptimization: true) - .vTableGet(buffer, rootOffset, 10, ''); - final embeddingParam = - const fb.ListReader(fb.Float32Reader(), lazy: false) - .vTableGet(buffer, rootOffset, 12, []); - final object = ObjectBoxDocument(internalIdParam, idParam, - contentParam, metadataParam, embeddingParam); - - return object; - }) - }; - - return obx_int.ModelDefinition(model, bindings); -} - -/// [ObjectBoxDocument] entity fields to define ObjectBox queries. -class ObjectBoxDocument_ { - /// See [ObjectBoxDocument.internalId]. - static final internalId = - obx.QueryIntegerProperty(_entities![0].properties[0]); - - /// See [ObjectBoxDocument.id]. - static final id = - obx.QueryStringProperty(_entities![0].properties[1]); - - /// See [ObjectBoxDocument.content]. - static final content = - obx.QueryStringProperty(_entities![0].properties[2]); - - /// See [ObjectBoxDocument.metadata]. - static final metadata = - obx.QueryStringProperty(_entities![0].properties[3]); - - /// See [ObjectBoxDocument.embedding]. - static final embedding = - obx.QueryHnswProperty(_entities![0].properties[4]); -} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart deleted file mode 100644 index 7763f9cf..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart +++ /dev/null @@ -1,53 +0,0 @@ -// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters -import 'base_objectbox_stub.dart'; - -// This is a stub class -class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { - ObjectBoxVectorStore({ - required super.embeddings, - required int dimensions, - final String? directory, - final int? maxDBSizeInKB, - final int? maxDataSizeInKB, - final int? fileMode, - final int? maxReaders, - final bool queriesCaseSensitiveDefault = true, - final String? macosApplicationGroup, - }) : super( - box: null, - createEntity: null, - createDocument: null, - getIdProperty: null, - getEmbeddingProperty: null, - ); - - void close() { - throw UnsupportedError('ObjectBox is not supported on web platform.'); - } -} - -// This is a stub class -class ObjectBoxDocument { - ObjectBoxDocument( - this.internalId, - this.id, - this.content, - this.metadata, - this.embedding, - ); - - int internalId = 0; - String id; - String content; - String metadata; - List embedding; -} - -// This is a stub class -class ObjectBoxDocumentProps { - static const internalId = null; - static const id = null; - static const content = null; - static const metadata = null; - static const embedding = null; -} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart deleted file mode 100644 index aaa08078..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart +++ /dev/null @@ -1,29 +0,0 @@ -import 'package:langchain_core/vector_stores.dart'; -import 'package:objectbox/objectbox.dart' show Condition; - -/// {@template objectbox_similarity_search} -/// ObjectBox similarity search config. -/// -/// ObjectBox supports filtering queries by id, content or metadata using -/// [Condition]. You can define the filter condition in the [filterCondition] -/// parameter. -/// -/// Example: -/// ```dart -/// ObjectBoxSimilaritySearch( -/// k: 10, -/// scoreThreshold: 1.3, -/// filterCondition: ObjectBoxDocumentProps.metadata.contains('cat'), -/// ); -/// ``` -/// {@endtemplate} -class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { - /// {@macro objectbox_similarity_search} - ObjectBoxSimilaritySearch({ - super.k = 4, - super.scoreThreshold, - final Condition? filterCondition, - }) : super( - filter: filterCondition != null ? {'filter': filterCondition} : null, - ); -} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart deleted file mode 100644 index 4b1aa144..00000000 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart +++ /dev/null @@ -1,11 +0,0 @@ -// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters -import 'package:langchain_core/vector_stores.dart'; - -// This is a stub class -class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { - ObjectBoxSimilaritySearch({ - super.k = 0, - super.scoreThreshold, - Object? filterCondition, - }) : super(filter: null); -} diff --git a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart deleted file mode 100644 index d9da952b..00000000 --- a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart +++ /dev/null @@ -1 +0,0 @@ -export 'objectbox/ob.dart'; diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 9fd5f428..29fbdb15 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.3.2 +version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -13,27 +13,17 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: beautiful_soup_dart: ^0.3.0 - cross_file: ^0.3.4+2 + cross_file: ^0.3.4+1 csv: ^6.0.0 - flat_buffers: ^23.5.26 - http: ^1.2.2 - json_path: ^0.7.4 - langchain_core: 0.3.6 - math_expressions: ^2.6.0 + http: ^1.1.0 + json_path: ^0.7.1 + langchain_core: ^0.3.1 + math_expressions: ^2.4.0 meta: ^1.11.0 - objectbox: ^4.0.1 - tavily_dart: ^0.1.0 - uuid: ^4.4.2 dev_dependencies: - build_runner: ^2.4.11 - langchain_openai: ^0.7.2 - objectbox_generator: ^4.0.1 - test: ^1.25.8 - -objectbox: - output_dir: src/vector_stores/objectbox + test: ^1.25.2 diff --git a/packages/langchain_community/pubspec_overrides.yaml b/packages/langchain_community/pubspec_overrides.yaml index 19febce5..3508ed77 100644 --- a/packages/langchain_community/pubspec_overrides.yaml +++ b/packages/langchain_community/pubspec_overrides.yaml @@ -1,10 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart,tavily_dart +# melos_managed_dependency_overrides: langchain_core dependency_overrides: langchain_core: path: ../langchain_core - langchain_openai: - path: ../langchain_openai - openai_dart: - path: ../openai_dart - tavily_dart: - path: ../tavily_dart diff --git a/packages/langchain_community/test/tools/tavily_test.dart b/packages/langchain_community/test/tools/tavily_test.dart deleted file mode 100644 index 85214c6c..00000000 --- a/packages/langchain_community/test/tools/tavily_test.dart +++ /dev/null @@ -1,31 +0,0 @@ -import 'dart:convert'; -import 'dart:io'; - -import 'package:langchain_community/langchain_community.dart'; -import 'package:test/test.dart'; - -void main() { - group('TavilySearchResultsTool tests', () { - test('Calculate expressions', () async { - final tool = TavilySearchResultsTool( - apiKey: Platform.environment['TAVILY_API_KEY']!, - ); - final res = await tool.invoke('What is the weather like in New York?'); - expect(res.results, isNotEmpty); - final jsonString = res.toString(); - expect(() => json.decode(jsonString), returnsNormally); - tool.close(); - }); - }); - - group('TavilyAnswerTool tests', () { - test('Invoke TavilyAnswerTool', () async { - final tool = TavilyAnswerTool( - apiKey: Platform.environment['TAVILY_API_KEY']!, - ); - final res = await tool.invoke('What is the weather like in New York?'); - expect(res, isNotEmpty); - tool.close(); - }); - }); -} diff --git a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart deleted file mode 100644 index fdce5a1b..00000000 --- a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart +++ /dev/null @@ -1,190 +0,0 @@ -import 'dart:io'; - -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_core/documents.dart'; -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:objectbox/objectbox.dart'; -import 'package:test/test.dart'; - -void main() async { - late final OpenAIEmbeddings embeddings; - late final ObjectBoxVectorStore vectorStore; - - setUpAll(() async { - embeddings = OpenAIEmbeddings( - apiKey: Platform.environment['OPENAI_API_KEY'], - ); - vectorStore = ObjectBoxVectorStore( - embeddings: embeddings, - dimensions: 1536, - directory: 'test/vector_stores/objectbox', - ); - }); - - group('ObjectBoxVectorStore tests', () { - test('Test add new vectors', () async { - final res = await vectorStore.addDocuments( - documents: [ - const Document( - id: '1', - pageContent: 'The cat sat on the mat', - metadata: {'cat': 'animal'}, - ), - const Document( - id: '2', - pageContent: 'The dog chased the ball.', - metadata: {'cat': 'animal'}, - ), - const Document( - id: '3', - pageContent: 'The boy ate the apple.', - metadata: {'cat': 'person'}, - ), - const Document( - id: '4', - pageContent: 'The girl drank the milk.', - metadata: {'cat': 'person'}, - ), - const Document( - id: '5', - pageContent: 'The sun is shining.', - metadata: {'cat': 'natural'}, - ), - ], - ); - - expect(res.length, 5); - }); - - test('Test query return 1 result', () async { - final res = await vectorStore.similaritySearch( - query: 'Is it raining?', - config: ObjectBoxSimilaritySearch(k: 1), - ); - expect(res.length, 1); - expect( - res.first.id, - '5', - ); - }); - - test('Test query with scoreThreshold', () async { - final res = await vectorStore.similaritySearchWithScores( - query: 'Is it raining?', - config: ObjectBoxSimilaritySearch(scoreThreshold: 0.3), - ); - for (final (_, score) in res) { - expect(score, greaterThan(0.3)); - } - }); - - test('Test query with equality filter', () async { - final res = await vectorStore.similaritySearch( - query: 'What are they eating?', - config: ObjectBoxSimilaritySearch( - k: 10, - scoreThreshold: 1.3, - filterCondition: ObjectBoxDocumentProps.metadata.contains('person'), - ), - ); - for (final doc in res) { - expect(doc.metadata['cat'], 'person'); - } - }); - - test('Test query with filter with multiple operators', () async { - final res = await vectorStore.similaritySearch( - query: 'What are they eating?', - config: ObjectBoxSimilaritySearch( - k: 10, - filterCondition: ObjectBoxDocumentProps.metadata - .contains('animal') - .or(ObjectBoxDocumentProps.metadata.contains('natural')), - ), - ); - for (final doc in res) { - expect(doc.metadata['cat'], isNot('person')); - } - }); - - test('Test delete document', () async { - await vectorStore.addDocuments( - documents: [ - const Document( - id: '9999', - pageContent: 'This document will be deleted', - metadata: {'cat': 'xxx'}, - ), - ], - ); - final res1 = await vectorStore.similaritySearch( - query: 'Deleted doc', - config: ObjectBoxSimilaritySearch( - filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), - ), - ); - expect(res1.length, 1); - expect(res1.first.id, '9999'); - - await vectorStore.delete(ids: ['9999']); - final res2 = await vectorStore.similaritySearch( - query: 'Deleted doc', - config: ObjectBoxSimilaritySearch( - filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), - ), - ); - expect(res2.length, 0); - }); - - test('Test delete where', () async { - await vectorStore.addDocuments( - documents: [ - const Document( - id: '9999', - pageContent: 'This document will be deleted', - metadata: {'cat': 'xxx'}, - ), - ], - ); - final res1 = await vectorStore.similaritySearch( - query: 'Deleted doc', - config: ObjectBoxSimilaritySearch( - filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), - ), - ); - expect(res1.length, 1); - expect(res1.first.id, '9999'); - - await vectorStore.deleteWhere( - ObjectBoxDocumentProps.metadata.contains('xxx'), - ); - final res2 = await vectorStore.similaritySearch( - query: 'Deleted doc', - config: ObjectBoxSimilaritySearch( - filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), - ), - ); - expect(res2.length, 0); - }); - }); - - group('ObjectBoxSimilaritySearch', () { - test('ObjectBoxSimilaritySearch fields', () { - final config = ObjectBoxSimilaritySearch( - k: 5, - scoreThreshold: 0.8, - filterCondition: ObjectBoxDocumentProps.metadata.contains('style1'), - ); - expect(config.k, 5); - expect(config.scoreThreshold, 0.8); - expect(config.filter?['filter'], isA>()); - }); - }); - - tearDownAll(() async { - embeddings.close(); - vectorStore.close(); - await File('test/vector_stores/objectbox/data.mdb').delete(); - await File('test/vector_stores/objectbox/lock.mdb').delete(); - }); -} diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 382d3dd3..25cf9ffd 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,34 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.3.6 - - - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) - -## 0.3.5 - - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - -## 0.3.4 - - - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - -## 0.3.3 - - - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) - - **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) - -## 0.3.2 - - - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) - - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) - - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) - ## 0.3.1 - **FEAT**: Add equals to ChatToolChoiceForced ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_core/lib/agents.dart b/packages/langchain_core/lib/agents.dart index 97382b62..e99fdb9f 100644 --- a/packages/langchain_core/lib/agents.dart +++ b/packages/langchain_core/lib/agents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to agents. -library; +library agents; export 'src/agents/agents.dart'; diff --git a/packages/langchain_core/lib/chains.dart b/packages/langchain_core/lib/chains.dart index a35484cd..3214cef2 100644 --- a/packages/langchain_core/lib/chains.dart +++ b/packages/langchain_core/lib/chains.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chains. -library; +library chains; export 'src/chains/chains.dart'; diff --git a/packages/langchain_core/lib/chat_history.dart b/packages/langchain_core/lib/chat_history.dart index 726dbd3c..316cbccc 100644 --- a/packages/langchain_core/lib/chat_history.dart +++ b/packages/langchain_core/lib/chat_history.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat history. -library; +library chat_history; export 'src/chat_history/chat_history.dart'; diff --git a/packages/langchain_core/lib/chat_models.dart b/packages/langchain_core/lib/chat_models.dart index 259fa3c3..803668df 100644 --- a/packages/langchain_core/lib/chat_models.dart +++ b/packages/langchain_core/lib/chat_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat models. -library; +library chat_models; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_core/lib/document_loaders.dart b/packages/langchain_core/lib/document_loaders.dart index b8340c67..51fdbead 100644 --- a/packages/langchain_core/lib/document_loaders.dart +++ b/packages/langchain_core/lib/document_loaders.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to document loaders. -library; +library document_loaders; export 'src/document_loaders/document_loaders.dart'; diff --git a/packages/langchain_core/lib/documents.dart b/packages/langchain_core/lib/documents.dart index a0f68ebd..24d340a4 100644 --- a/packages/langchain_core/lib/documents.dart +++ b/packages/langchain_core/lib/documents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to documents. -library; +library documents; export 'src/documents/documents.dart'; diff --git a/packages/langchain_core/lib/embeddings.dart b/packages/langchain_core/lib/embeddings.dart index b6c2bc82..829de2c7 100644 --- a/packages/langchain_core/lib/embeddings.dart +++ b/packages/langchain_core/lib/embeddings.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to embeddings. -library; +library embeddings; export 'src/embeddings/embeddings.dart'; diff --git a/packages/langchain_core/lib/exceptions.dart b/packages/langchain_core/lib/exceptions.dart index 1e0d7fa0..4371a3a3 100644 --- a/packages/langchain_core/lib/exceptions.dart +++ b/packages/langchain_core/lib/exceptions.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to exceptions. -library; +library exceptions; export 'src/exceptions/exceptions.dart'; diff --git a/packages/langchain_core/lib/langchain.dart b/packages/langchain_core/lib/langchain.dart index cf5bb742..b30c4d14 100644 --- a/packages/langchain_core/lib/langchain.dart +++ b/packages/langchain_core/lib/langchain.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LangChain. -library; +library langchain; export 'src/langchain/langchain.dart'; diff --git a/packages/langchain_core/lib/language_models.dart b/packages/langchain_core/lib/language_models.dart index 1fae54b5..7cabafc7 100644 --- a/packages/langchain_core/lib/language_models.dart +++ b/packages/langchain_core/lib/language_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to language models. -library; +library language_models; export 'src/language_models/language_models.dart'; diff --git a/packages/langchain_core/lib/llms.dart b/packages/langchain_core/lib/llms.dart index ed130b60..5b98240d 100644 --- a/packages/langchain_core/lib/llms.dart +++ b/packages/langchain_core/lib/llms.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LLMs. -library; +library llms; export 'src/llms/llms.dart'; diff --git a/packages/langchain_core/lib/memory.dart b/packages/langchain_core/lib/memory.dart index 7193923f..b79467cf 100644 --- a/packages/langchain_core/lib/memory.dart +++ b/packages/langchain_core/lib/memory.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to memory. -library; +library memory; export 'src/memory/memory.dart'; diff --git a/packages/langchain_core/lib/output_parsers.dart b/packages/langchain_core/lib/output_parsers.dart index 2915a146..7f0d0d5f 100644 --- a/packages/langchain_core/lib/output_parsers.dart +++ b/packages/langchain_core/lib/output_parsers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to output parsers. -library; +library output_parsers; export 'src/output_parsers/output_parsers.dart'; diff --git a/packages/langchain_core/lib/prompts.dart b/packages/langchain_core/lib/prompts.dart index b7873da5..dbb7ef5b 100644 --- a/packages/langchain_core/lib/prompts.dart +++ b/packages/langchain_core/lib/prompts.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to prompts. -library; +library prompts; export 'src/prompts/prompts.dart'; diff --git a/packages/langchain_core/lib/retrievers.dart b/packages/langchain_core/lib/retrievers.dart index 5d1278bf..5b1ec71e 100644 --- a/packages/langchain_core/lib/retrievers.dart +++ b/packages/langchain_core/lib/retrievers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to retrievers. -library; +library retrievers; export 'src/retrievers/retrievers.dart'; diff --git a/packages/langchain_core/lib/runnables.dart b/packages/langchain_core/lib/runnables.dart index 72b67584..e111eb58 100644 --- a/packages/langchain_core/lib/runnables.dart +++ b/packages/langchain_core/lib/runnables.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to runnables. -library; +library runnables; export 'src/runnables/runnables.dart'; diff --git a/packages/langchain_core/lib/src/chains/types.dart b/packages/langchain_core/lib/src/chains/types.dart index e76d876c..f677381e 100644 --- a/packages/langchain_core/lib/src/chains/types.dart +++ b/packages/langchain_core/lib/src/chains/types.dart @@ -6,7 +6,7 @@ import '../langchain/types.dart'; typedef ChainValues = Map; /// {@template chain_options} -/// Options to pass to the chain. +/// Options to pass to a chain. /// {@endtemplate} @immutable class ChainOptions extends BaseLangChainOptions { diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index bda1d6e3..f465223d 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -1,8 +1,5 @@ -import 'package:collection/collection.dart'; - import '../../language_models.dart'; import '../prompts/types.dart'; -import '../tools/base.dart'; import 'base.dart'; import 'types.dart'; @@ -10,12 +7,11 @@ import 'types.dart'; /// Fake Chat Model for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeChatModel extends BaseChatModel { +class FakeChatModel extends SimpleChatModel { /// {@macro fake_list_llm} FakeChatModel({ required this.responses, - super.defaultOptions = const FakeChatModelOptions(), - }); + }) : super(defaultOptions: const ChatModelOptions()); /// Responses to return in order when called. final List responses; @@ -26,28 +22,17 @@ class FakeChatModel extends BaseChatModel { String get modelType => 'fake-chat-model'; @override - Future invoke( - final PromptValue input, { - final FakeChatModelOptions? options, - }) async { - final text = responses[_i++ % responses.length]; - final message = AIChatMessage(content: text); - return ChatResult( - id: '1', - output: message, - finishReason: FinishReason.unspecified, - metadata: { - 'model': options?.model ?? defaultOptions.model, - ...?options?.metadata ?? defaultOptions.metadata, - }, - usage: const LanguageModelUsage(), - ); + Future callInternal( + final List messages, { + final ChatModelOptions? options, + }) { + return Future.value(responses[_i++ % responses.length]); } @override Stream stream( final PromptValue input, { - final FakeChatModelOptions? options, + final ChatModelOptions? options, }) { final res = responses[_i++ % responses.length].split(''); return Stream.fromIterable(res).map( @@ -55,10 +40,7 @@ class FakeChatModel extends BaseChatModel { id: 'fake-chat-model', output: AIChatMessage(content: char), finishReason: FinishReason.stop, - metadata: { - 'model': options?.model ?? defaultOptions.model, - ...?options?.metadata ?? defaultOptions.metadata, - }, + metadata: const {}, usage: const LanguageModelUsage(), streaming: true, ), @@ -78,133 +60,41 @@ class FakeChatModel extends BaseChatModel { } } -/// {@template fake_chat_model_options} -/// Fake Chat Model Options for testing. -/// {@endtemplate} -class FakeChatModelOptions extends ChatModelOptions { - /// {@macro fake_chat_model_options} - const FakeChatModelOptions({ - super.model, - this.metadata, - super.tools, - super.toolChoice, - super.concurrencyLimit, - }); - - /// Metadata. - final Map? metadata; - - @override - FakeChatModelOptions copyWith({ - final String? model, - final Map? metadata, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, - }) { - return FakeChatModelOptions( - model: model ?? this.model, - metadata: metadata ?? this.metadata, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - FakeChatModelOptions merge( - covariant final FakeChatModelOptions? other, - ) { - return copyWith( - model: other?.model, - metadata: other?.metadata, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final FakeChatModelOptions other) { - return model == other.model && - const MapEquality().equals(metadata, other.metadata) && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - const MapEquality().hash(metadata) ^ - concurrencyLimit.hashCode; - } -} - -/// {@template fake_echo_chat_model} +/// {@template fake_echo_llm} /// Fake Chat Model for testing. /// It just returns the content of the last message of the prompt /// or streams it char by char. /// {@endtemplate} -class FakeEchoChatModel extends BaseChatModel { - /// {@macro fake_echo_chat_model} - const FakeEchoChatModel({ - super.defaultOptions = const FakeEchoChatModelOptions(), - }); +class FakeEchoChatModel extends SimpleChatModel { + /// {@macro fake_echo_llm} + const FakeEchoChatModel() : super(defaultOptions: const ChatModelOptions()); @override String get modelType => 'fake-echo-chat-model'; @override - Future invoke( - final PromptValue input, { - final FakeEchoChatModelOptions? options, - }) async { - final throwError = - options?.throwRandomError ?? defaultOptions.throwRandomError; - if (throwError) { - throw Exception('Random error'); - } - - final text = input.toChatMessages().last.contentAsString; - final message = AIChatMessage(content: text); - return ChatResult( - id: '1', - output: message, - finishReason: FinishReason.unspecified, - metadata: { - 'model': options?.model ?? defaultOptions.model, - ...?options?.metadata ?? defaultOptions.metadata, - }, - usage: const LanguageModelUsage(), - ); + Future callInternal( + final List messages, { + final ChatModelOptions? options, + }) { + return Future.value(messages.last.contentAsString); } @override Stream stream( final PromptValue input, { - final FakeEchoChatModelOptions? options, + final ChatModelOptions? options, }) { final prompt = input.toChatMessages().first.contentAsString.split(''); - final throwError = - options?.throwRandomError ?? defaultOptions.throwRandomError; - - var index = 0; return Stream.fromIterable(prompt).map( - (final char) { - if (throwError && index == prompt.length ~/ 2) { - throw Exception('Random error'); - } - - return ChatResult( - id: 'fake-echo-chat-model', - output: AIChatMessage(content: char), - finishReason: FinishReason.stop, - metadata: { - 'model': options?.model ?? defaultOptions.model, - ...?options?.metadata ?? defaultOptions.metadata, - 'index': index++, - }, - usage: const LanguageModelUsage(), - streaming: true, - ); - }, + (final char) => ChatResult( + id: 'fake-echo-chat-model', + output: AIChatMessage(content: char), + finishReason: FinishReason.stop, + metadata: const {}, + usage: const LanguageModelUsage(), + streaming: true, + ), ); } @@ -220,71 +110,3 @@ class FakeEchoChatModel extends BaseChatModel { .toList(growable: false); } } - -/// {@template fake_chat_model_options} -/// Fake Echo Chat Model Options for testing. -/// {@endtemplate} -class FakeEchoChatModelOptions extends ChatModelOptions { - /// {@macro fake_chat_model_options} - const FakeEchoChatModelOptions({ - super.model, - this.metadata, - this.throwRandomError = false, - super.tools, - super.toolChoice, - super.concurrencyLimit, - }); - - /// Metadata. - final Map? metadata; - - /// If true, throws a random error. - final bool throwRandomError; - - @override - FakeEchoChatModelOptions copyWith({ - final String? model, - final Map? metadata, - final bool? throwRandomError, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, - }) { - return FakeEchoChatModelOptions( - model: model ?? this.model, - metadata: metadata ?? this.metadata, - throwRandomError: throwRandomError ?? this.throwRandomError, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - FakeEchoChatModelOptions merge( - covariant final FakeEchoChatModelOptions? other, - ) { - return copyWith( - model: other?.model, - metadata: other?.metadata, - throwRandomError: other?.throwRandomError, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final FakeEchoChatModelOptions other) { - return model == other.model && - const MapEquality().equals(metadata, other.metadata) && - throwRandomError == other.throwRandomError && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - const MapEquality().hash(metadata) ^ - throwRandomError.hashCode ^ - concurrencyLimit.hashCode; - } -} diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index f92fe4af..1ada3bbe 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -7,14 +7,12 @@ import '../tools/base.dart'; /// {@template chat_model_options} /// Generation options to pass into the Chat Model. /// {@endtemplate} -@immutable -abstract class ChatModelOptions extends LanguageModelOptions { +class ChatModelOptions extends LanguageModelOptions { /// {@macro chat_model_options} const ChatModelOptions({ - super.model, + super.concurrencyLimit, this.tools, this.toolChoice, - super.concurrencyLimit, }); /// A list of tools the model may call. @@ -22,14 +20,6 @@ abstract class ChatModelOptions extends LanguageModelOptions { /// Controls which (if any) tool is called by the model. final ChatToolChoice? toolChoice; - - @override - ChatModelOptions copyWith({ - final String? model, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, - }); } /// {@template chat_result} @@ -54,7 +44,7 @@ class ChatResult extends LanguageModelResult { final LanguageModelResult other, ) { return ChatResult( - id: other.id.isNotEmpty ? other.id : id, + id: other.id, output: output.concat(other.output), finishReason: finishReason != FinishReason.unspecified && other.finishReason == FinishReason.unspecified @@ -433,8 +423,6 @@ class AIChatMessageToolCall { }); /// The id of the tool to call. - /// - /// This is used to match up the tool results later. final String id; /// The name of the tool to call. @@ -728,7 +716,7 @@ class ChatMessageContentImage extends ChatMessageContent { /// Depending on the model, this can be either: /// - The base64 encoded image data - /// - A URL of the image (only supported by some providers) + /// - A URL of the image. final String data; /// The IANA standard MIME type of the source data. @@ -830,12 +818,9 @@ sealed class ChatToolChoice { /// The model does not call a tool, and responds to the end-user. static const none = ChatToolChoiceNone(); - /// The model can pick between responding to the end-user or calling a tool. + /// The model can pick between an end-user or calling a tool. static const auto = ChatToolChoiceAuto(); - /// The model must call at least one tool, but doesn’t force a particular tool. - static const required = ChatToolChoiceRequired(); - /// The model is forced to to call the specified tool. factory ChatToolChoice.forced({required final String name}) => ChatToolChoiceForced(name: name); @@ -850,21 +835,13 @@ final class ChatToolChoiceNone extends ChatToolChoice { } /// {@template chat_tool_choice_auto} -/// The model can pick between responding to the end-user or calling a tool. +/// The model can pick between an end-user or calling a tool. /// {@endtemplate} final class ChatToolChoiceAuto extends ChatToolChoice { /// {@macro chat_tool_choice_auto} const ChatToolChoiceAuto(); } -/// {@template chat_tool_choice_required} -/// The model must call at least one tool, but doesn’t force a particular tool. -/// {@endtemplate} -final class ChatToolChoiceRequired extends ChatToolChoice { - /// {@macro chat_tool_choice_none} - const ChatToolChoiceRequired(); -} - /// {@template chat_tool_choice_forced} /// The model is forced to to call the specified tool. /// {@endtemplate} diff --git a/packages/langchain_core/lib/src/chat_models/utils.dart b/packages/langchain_core/lib/src/chat_models/utils.dart index ebfc011c..5c84a142 100644 --- a/packages/langchain_core/lib/src/chat_models/utils.dart +++ b/packages/langchain_core/lib/src/chat_models/utils.dart @@ -1,6 +1,6 @@ import 'types.dart'; -/// Extensions on `List`. +/// Extensions on [List]. extension ChatMessagesX on List { /// This function is to get a string representation of the chat messages /// based on the message content and role. diff --git a/packages/langchain_core/lib/src/langchain/types.dart b/packages/langchain_core/lib/src/langchain/types.dart index 091429d6..8dabca52 100644 --- a/packages/langchain_core/lib/src/langchain/types.dart +++ b/packages/langchain_core/lib/src/langchain/types.dart @@ -3,7 +3,7 @@ import 'package:meta/meta.dart'; import '../runnables/types.dart'; /// {@template base_lang_chain_options} -/// Base options class for LangChain components. +/// Base class for LangChain components' options. /// {@endtemplate} @immutable class BaseLangChainOptions extends RunnableOptions { diff --git a/packages/langchain_core/lib/src/language_models/base.dart b/packages/langchain_core/lib/src/language_models/base.dart index 3156cd74..33d3b002 100644 --- a/packages/langchain_core/lib/src/language_models/base.dart +++ b/packages/langchain_core/lib/src/language_models/base.dart @@ -1,3 +1,5 @@ +import 'package:meta/meta.dart'; + import '../langchain/base.dart'; import '../prompts/types.dart'; import 'types.dart'; @@ -56,4 +58,33 @@ abstract class BaseLanguageModel< @override String toString() => modelType; + + /// Throws an error if the model id is not specified. + @protected + Never throwNullModelError() { + throw ArgumentError(''' +Null model in $runtimeType. + +You need to specify the id of model to use either in `$runtimeType.defaultOptions` +or in the options passed when invoking the model. + +Example: +``` +// In defaultOptions +final model = $runtimeType( + defaultOptions: ${runtimeType}Options( + model: 'model-id', + ), +); + +// Or when invoking the model +final res = await model.invoke( + prompt, + options: ${runtimeType}Options( + model: 'model-id', + ), +); +``` +'''); + } } diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index 39e071bd..8112ab37 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -4,25 +4,14 @@ import 'package:meta/meta.dart'; import '../langchain/types.dart'; /// {@template language_model_options} -/// Options to pass into the language model. +/// Generation options to pass into the language model. /// {@endtemplate} @immutable abstract class LanguageModelOptions extends BaseLangChainOptions { /// {@macro language_model_options} const LanguageModelOptions({ - this.model, super.concurrencyLimit, }); - - /// ID of the language model to use. - /// Check the provider's documentation for available models. - final String? model; - - @override - LanguageModelOptions copyWith({ - final String? model, - final int? concurrencyLimit, - }); } /// {@template language_model} @@ -110,16 +99,12 @@ class LanguageModelUsage { }); /// The number of tokens in the prompt. - /// - /// Some providers call this "input_tokens". final int? promptTokens; /// The total number of billable characters in the prompt if applicable. final int? promptBillableCharacters; /// The number of tokens in the completion. - /// - /// Some providers call this "output_tokens". final int? responseTokens; /// The total number of billable characters in the completion if applicable. @@ -187,13 +172,9 @@ LanguageModelUsage{ /// The reason the model stopped generating tokens. enum FinishReason { /// The model hit a natural stop point or a provided stop sequence. - /// - /// Some providers call this "end_turn". stop, /// The maximum number of tokens specified in the request was reached. - /// - /// Some providers call this "max_tokens". length, /// The content was flagged for content filter reasons. @@ -203,8 +184,6 @@ enum FinishReason { recitation, /// The model called a tool. - /// - /// Some providers call this "tool_use". toolCalls, /// The finish reason is unspecified. diff --git a/packages/langchain_core/lib/src/llms/fake.dart b/packages/langchain_core/lib/src/llms/fake.dart index ffb64c00..0781e607 100644 --- a/packages/langchain_core/lib/src/llms/fake.dart +++ b/packages/langchain_core/lib/src/llms/fake.dart @@ -7,11 +7,11 @@ import 'types.dart'; /// Fake LLM for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeLLM extends SimpleLLM { +class FakeLLM extends SimpleLLM { /// {@macro fake_list_llm} FakeLLM({ required this.responses, - }) : super(defaultOptions: const FakeLLMOptions()); + }) : super(defaultOptions: const LLMOptions()); /// Responses to return in order when called. final List responses; @@ -60,35 +60,13 @@ class FakeLLM extends SimpleLLM { } } -/// {@template fake_llm_options} -/// Fake LLM options for testing. -/// {@endtemplate} -class FakeLLMOptions extends LLMOptions { - /// {@macro fake_llm_options} - const FakeLLMOptions({ - super.model, - super.concurrencyLimit, - }); - - @override - FakeLLMOptions copyWith({ - final String? model, - final int? concurrencyLimit, - }) { - return FakeLLMOptions( - model: model ?? this.model, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } -} - /// {@template fake_echo_llm} /// Fake LLM for testing. /// It just returns the prompt or streams it char by char. /// {@endtemplate} -class FakeEchoLLM extends BaseLLM { +class FakeEchoLLM extends BaseLLM { /// {@macro fake_echo_llm} - const FakeEchoLLM() : super(defaultOptions: const FakeLLMOptions()); + const FakeEchoLLM() : super(defaultOptions: const LLMOptions()); @override String get modelType => 'fake-echo'; @@ -144,11 +122,11 @@ class FakeEchoLLM extends BaseLLM { /// Fake LLM for testing. /// It returns the string returned by the [handler] function. /// {@endtemplate} -class FakeHandlerLLM extends SimpleLLM { +class FakeHandlerLLM extends SimpleLLM { /// {@macro fake_handler_llm} FakeHandlerLLM({ required this.handler, - }) : super(defaultOptions: const FakeLLMOptions()); + }) : super(defaultOptions: const LLMOptions()); /// Function called to generate the response. final String Function( diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index 7a81a0ab..d6bed6f3 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -3,13 +3,12 @@ import 'package:meta/meta.dart'; import '../language_models/types.dart'; /// {@template llm_options} -/// Options to pass into the LLM. +/// Generation options to pass into the LLM. /// {@endtemplate} @immutable -abstract class LLMOptions extends LanguageModelOptions { +class LLMOptions extends LanguageModelOptions { /// {@macro llm_options} const LLMOptions({ - super.model, super.concurrencyLimit, }); } diff --git a/packages/langchain_core/lib/src/output_parsers/string.dart b/packages/langchain_core/lib/src/output_parsers/string.dart index 9dd4722a..f5ea11a8 100644 --- a/packages/langchain_core/lib/src/output_parsers/string.dart +++ b/packages/langchain_core/lib/src/output_parsers/string.dart @@ -68,7 +68,9 @@ class StringOutputParser if (reduceOutputStream) { yield await inputStream.map(_parse).reduce((final a, final b) => '$a$b'); } else { - yield* inputStream.map(_parse); + await for (final input in inputStream) { + yield _parse(input); + } } } diff --git a/packages/langchain_core/lib/src/output_parsers/types.dart b/packages/langchain_core/lib/src/output_parsers/types.dart index 9e8906b7..460840fa 100644 --- a/packages/langchain_core/lib/src/output_parsers/types.dart +++ b/packages/langchain_core/lib/src/output_parsers/types.dart @@ -60,9 +60,7 @@ class ParsedToolCall { } @override - int get hashCode { - return id.hashCode ^ name.hashCode ^ arguments.hashCode; - } + int get hashCode => id.hashCode ^ name.hashCode ^ arguments.hashCode; @override String toString() { diff --git a/packages/langchain_core/lib/src/prompts/types.dart b/packages/langchain_core/lib/src/prompts/types.dart index 3bd9756b..c2a9474b 100644 --- a/packages/langchain_core/lib/src/prompts/types.dart +++ b/packages/langchain_core/lib/src/prompts/types.dart @@ -90,7 +90,7 @@ class StringPromptValue implements PromptValue { /// /// When [toString] is called, it returns the string representation of the /// messages using the following format: -/// ```txt +/// ``` /// System: /// Human: /// AI: @@ -145,7 +145,7 @@ class ChatPromptValue implements PromptValue { return message.concat(otherMessage); } }) - .nonNulls + .whereNotNull() .toList(growable: false), ), }; diff --git a/packages/langchain_core/lib/src/retrievers/types.dart b/packages/langchain_core/lib/src/retrievers/types.dart index e3938296..4ed82147 100644 --- a/packages/langchain_core/lib/src/retrievers/types.dart +++ b/packages/langchain_core/lib/src/retrievers/types.dart @@ -9,9 +9,7 @@ import '../vector_stores/types.dart'; @immutable class RetrieverOptions extends BaseLangChainOptions { /// {@macro retriever_options} - const RetrieverOptions({ - super.concurrencyLimit, - }); + const RetrieverOptions(); } /// {@template vector_store_retriever_options} @@ -21,22 +19,10 @@ class VectorStoreRetrieverOptions extends RetrieverOptions { /// {@macro vector_store_retriever_options} const VectorStoreRetrieverOptions({ this.searchType = const VectorStoreSimilaritySearch(), - super.concurrencyLimit, }); /// The type of search to perform, either: /// - [VectorStoreSearchType.similarity] (default) /// - [VectorStoreSearchType.mmr] final VectorStoreSearchType searchType; - - @override - VectorStoreRetrieverOptions copyWith({ - final VectorStoreSearchType? searchType, - final int? concurrencyLimit, - }) { - return VectorStoreRetrieverOptions( - searchType: searchType ?? this.searchType, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } } diff --git a/packages/langchain_core/lib/src/runnables/binding.dart b/packages/langchain_core/lib/src/runnables/binding.dart index 1bd1bee4..75e6084f 100644 --- a/packages/langchain_core/lib/src/runnables/binding.dart +++ b/packages/langchain_core/lib/src/runnables/binding.dart @@ -60,9 +60,7 @@ class RunnableBinding - extends Runnable { - /// {@macro runnable_fallback} - RunnableWithFallback({ - required this.mainRunnable, - required this.fallbacks, - }) : super(defaultOptions: const RunnableOptions()); - - /// The Runnable to run first. - final Runnable mainRunnable; - - /// A sequence of fallbacks to try if the [mainRunnable] fails. - final List> fallbacks; - - @override - Future invoke(RunInput input, {RunnableOptions? options}) async { - Object? firstError; - for (final runnable in [mainRunnable, ...fallbacks]) { - try { - return await runnable.invoke( - input, - options: firstError == null - ? options - : runnable.getCompatibleOptions(options), - ); - } catch (e) { - firstError ??= e; - } - } - throw Exception('All runnables failed. First error: $firstError'); - } - - @override - Future> batch( - List inputs, { - List? options, - }) async { - Object? firstError; - for (final runnable in [mainRunnable, ...fallbacks]) { - List? currentOptions; - if (firstError == null) { - currentOptions = options; - } else { - final compatibleOptions = - options?.map(runnable.getCompatibleOptions).toList(growable: false); - final hasNullOptions = - compatibleOptions?.any((o) => o == null) ?? false; - if (!hasNullOptions) { - currentOptions = compatibleOptions?.cast(); - } - } - - try { - return await runnable.batch( - inputs, - options: currentOptions, - ); - } catch (e) { - firstError ??= e; - } - } - throw Exception('All runnables failed. First error: $firstError'); - } - - @override - Stream stream( - RunInput input, { - RunnableOptions? options, - }) async* { - Object? firstError; - for (final runnable in [mainRunnable, ...fallbacks]) { - try { - final stream = runnable.stream( - input, - options: firstError == null - ? options - : runnable.getCompatibleOptions(options), - ); - await for (final output in stream) { - yield output; - } - return; - } catch (e) { - firstError ??= e; - } - } - throw Exception('All runnables failed. First error: $firstError'); - } -} diff --git a/packages/langchain_core/lib/src/runnables/function.dart b/packages/langchain_core/lib/src/runnables/function.dart index 7af32bcc..32843641 100644 --- a/packages/langchain_core/lib/src/runnables/function.dart +++ b/packages/langchain_core/lib/src/runnables/function.dart @@ -89,7 +89,7 @@ class RunnableFunction final RunnableOptions? options, }) async { if (_invokeFunc != null) { - return _invokeFunc(input, options); + return _invokeFunc!(input, options); } else { return stream(input, options: options).first; } @@ -113,7 +113,7 @@ class RunnableFunction final RunnableOptions? options, }) async* { if (_streamFunc != null) { - yield* _streamFunc(inputStream, options); + yield* _streamFunc!(inputStream, options); } else { yield* inputStream.asyncMap((final input) async { return invoke(input, options: options); diff --git a/packages/langchain_core/lib/src/runnables/map.dart b/packages/langchain_core/lib/src/runnables/map.dart index 0b3cb925..f9029da9 100644 --- a/packages/langchain_core/lib/src/runnables/map.dart +++ b/packages/langchain_core/lib/src/runnables/map.dart @@ -108,11 +108,4 @@ class RunnableMap }), ).asBroadcastStream(); } - - @override - void close() { - for (final step in steps.values) { - step.close(); - } - } } diff --git a/packages/langchain_core/lib/src/runnables/retry.dart b/packages/langchain_core/lib/src/runnables/retry.dart deleted file mode 100644 index e49c4d22..00000000 --- a/packages/langchain_core/lib/src/runnables/retry.dart +++ /dev/null @@ -1,63 +0,0 @@ -import 'dart:async'; -import '../utils/retry_client.dart'; -import 'runnables.dart'; - -/// {@template runnable_retry} -/// A [Runnable] that automatically retries the operation if it fails. -/// -/// You can create a [RunnableRetry] using [Runnable.withRetry], passing in the -/// [RetryOptions]. -/// -/// When [invoke] or [batch] is called on the runnable, if the initial attempt -/// fails, it will be retried according to the specified [RetryOptions]. -/// -/// Example usage: -/// ```dart -/// final model = ChatOpenAI(...); -/// final modelWithRetry = model.withRetry(maxRetries: 2); -/// final res = await modelWithRetry.invoke(...); -/// ``` -/// {@endtemplate} -class RunnableRetry - extends Runnable { - /// {@macro runnable_retry} - RunnableRetry({ - required this.runnable, - required super.defaultOptions, - required this.retryOptions, - }); - - /// Runnable that will be retried on error. - final Runnable runnable; - - /// Options to retry the runnable. - final RetryOptions retryOptions; - - @override - Future invoke( - RunInput input, { - RunnableOptions? options, - }) async { - return retryClient( - options: retryOptions, - fn: () => runnable.invoke( - input, - options: options, - ), - ); - } - - @override - Future> batch( - List inputs, { - List? options, - }) async { - return retryClient( - options: retryOptions, - fn: () => runnable.batch( - inputs, - options: options, - ), - ); - } -} diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 05f828ca..792bc80a 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -2,13 +2,11 @@ import 'dart:async'; import '../../utils.dart'; import 'binding.dart'; -import 'fallbacks.dart'; import 'function.dart'; import 'input_map.dart'; import 'input_stream_map.dart'; import 'map.dart'; import 'passthrough.dart'; -import 'retry.dart'; import 'router.dart'; import 'sequence.dart'; import 'types.dart'; @@ -284,57 +282,6 @@ abstract class Runnable withFallbacks( - List> fallbacks, - ) { - return RunnableWithFallback( - mainRunnable: this, - fallbacks: fallbacks, - ); - } - - /// Adds retry logic to an existing runnable. - /// - /// This method create a [RunnableRetry] instance, if the current [Runnable] - /// throws an exception during invocation, it will be retried based on the - /// configuration provided. By default the runnable will be retried 3 times - /// with exponential delay between each retry. - /// - /// - [maxRetries] - max attempts to retry the runnable. - /// - [retryIf] - evaluator function to check whether to retry based the - /// exception thrown. - /// - [delayDurations] - by default runnable will be retried based on an - /// exponential backoff strategy with base delay as 1 second. But you can - /// override this behavior by providing an optional list of [Duration]s. - /// - [addJitter] - whether to add jitter to the delay. - RunnableRetry withRetry({ - final int maxRetries = 3, - final FutureOr Function(Object e)? retryIf, - final List? delayDurations, - final bool addJitter = false, - }) { - return RunnableRetry( - runnable: this, - defaultOptions: defaultOptions, - retryOptions: RetryOptions( - maxRetries: maxRetries, - retryIf: retryIf, - delayDurations: delayDurations, - addJitter: addJitter, - ), - ); - } - /// Returns the given [options] if they are compatible with the [Runnable], /// otherwise returns `null`. CallOptions? getCompatibleOptions( @@ -342,14 +289,4 @@ abstract class Runnable Stream streamFromInputStream( final Stream inputStream, { final RunnableOptions? options, - }) async* { + }) { Stream nextStepStream; try { nextStepStream = first.streamFromInputStream( @@ -152,7 +152,7 @@ class RunnableSequence } try { - yield* last.streamFromInputStream( + return last.streamFromInputStream( nextStepStream, options: last.getCompatibleOptions(options), ); @@ -205,11 +205,4 @@ Please ensure that the output of the previous runnable in the sequence matches t '''; throw ArgumentError(errorMessage); } - - @override - void close() { - for (final step in steps) { - step.close(); - } - } } diff --git a/packages/langchain_core/lib/src/runnables/types.dart b/packages/langchain_core/lib/src/runnables/types.dart index efec915e..0a70a4d3 100644 --- a/packages/langchain_core/lib/src/runnables/types.dart +++ b/packages/langchain_core/lib/src/runnables/types.dart @@ -13,31 +13,4 @@ class RunnableOptions { /// The maximum number of concurrent calls that the runnable can make. /// Defaults to 1000 (different Runnable types may have different defaults). final int concurrencyLimit; - - /// Creates a copy of this [RunnableOptions] with the given fields replaced - /// by the new values. - RunnableOptions copyWith({ - int? concurrencyLimit, - }) { - return RunnableOptions( - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - /// Merges this [RunnableOptions] with another [RunnableOptions]. - RunnableOptions merge(RunnableOptions? other) { - return copyWith( - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final RunnableOptions other) { - return concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return concurrencyLimit.hashCode; - } } diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index 079dbab7..e676fc6b 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -12,13 +12,13 @@ import 'types.dart'; /// {@template tool_spec} /// The specification of a LangChain tool without the actual implementation. /// {@endtemplate} +@immutable class ToolSpec { /// {@macro tool_spec} const ToolSpec({ required this.name, required this.description, required this.inputJsonSchema, - this.strict = false, }); /// The unique name of the tool that clearly communicates its purpose. @@ -51,31 +51,18 @@ class ToolSpec { /// ``` final Map inputJsonSchema; - /// Whether to enable strict schema adherence when generating the tool call. - /// If set to true, the model will follow the exact schema defined in the - /// [inputJsonSchema] field. - /// - /// This is only supported by some providers (e.g. OpenAI). Mind that when - /// enabled, only a subset of JSON Schema may be supported. Check out the - /// provider's tool calling documentation for more information. - final bool strict; - @override bool operator ==(covariant final ToolSpec other) { final mapEquals = const DeepCollectionEquality().equals; return identical(this, other) || name == other.name && description == other.description && - mapEquals(inputJsonSchema, other.inputJsonSchema) && - strict == other.strict; + mapEquals(inputJsonSchema, other.inputJsonSchema); } @override int get hashCode => - name.hashCode ^ - description.hashCode ^ - inputJsonSchema.hashCode ^ - strict.hashCode; + name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; @override String toString() { @@ -84,20 +71,9 @@ ToolSpec{ name: $name, description: $description, inputJsonSchema: $inputJsonSchema, - strict: $strict, } '''; } - - /// Converts the tool spec to a JSON-serializable map. - Map toJson() { - return { - 'name': name, - 'description': description, - 'inputJsonSchema': inputJsonSchema, - 'strict': strict, - }; - } } /// {@template tool} @@ -118,7 +94,6 @@ abstract base class Tool inputJsonSchema; - @override - final bool strict; - /// Whether to return the tool's output directly. /// Setting this to true means that after the tool is called, /// the AgentExecutor will stop looping. @@ -152,9 +124,7 @@ abstract base class Tool inputJsonSchema, - final bool strict = false, required final FutureOr Function(Input input) func, Input Function(Map json)? getInputFromJson, final bool returnDirect = false, @@ -180,7 +149,6 @@ abstract base class Tool json['input'] as Input, returnDirect: returnDirect, @@ -241,26 +209,12 @@ abstract base class Tool - name.hashCode ^ - description.hashCode ^ - inputJsonSchema.hashCode ^ - strict.hashCode; - - @override - Map toJson() { - return { - 'name': name, - 'description': description, - 'inputJsonSchema': inputJsonSchema, - 'strict': strict, - }; - } + name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; } /// {@template tool_func} @@ -274,7 +228,6 @@ final class _ToolFunc required super.name, required super.description, required super.inputJsonSchema, - required super.strict, required FutureOr Function(Input input) function, required Input Function(Map json) getInputFromJson, super.returnDirect = false, diff --git a/packages/langchain_core/lib/src/tools/string.dart b/packages/langchain_core/lib/src/tools/string.dart index 43e1e2e7..3c9973d5 100644 --- a/packages/langchain_core/lib/src/tools/string.dart +++ b/packages/langchain_core/lib/src/tools/string.dart @@ -14,7 +14,6 @@ abstract base class StringTool required super.name, required super.description, final String inputDescription = 'The input to the tool', - super.strict = false, super.returnDirect = false, super.handleToolError, super.defaultOptions, @@ -37,8 +36,6 @@ abstract base class StringTool /// purpose. /// - [description] is used to tell the model how/when/why to use the tool. /// You can provide few-shot examples as a part of the description. - /// - [strict] whether to enable strict schema adherence when generating the - /// tool call (only supported by some providers). /// - [func] is the function that will be called when the tool is run. /// - [returnDirect] whether to return the tool's output directly. /// Setting this to true means that after the tool is called, @@ -49,7 +46,6 @@ abstract base class StringTool required final String name, required final String description, final String inputDescription = 'The input to the tool', - final bool strict = false, required final FutureOr Function(String input) func, final bool returnDirect = false, final String Function(ToolException)? handleToolError, @@ -58,7 +54,6 @@ abstract base class StringTool name: name, description: description, inputDescription: inputDescription, - strict: strict, func: func, returnDirect: returnDirect, handleToolError: handleToolError, @@ -89,7 +84,6 @@ final class _StringToolFunc required super.name, required super.description, super.inputDescription, - required super.strict, required FutureOr Function(String) func, super.returnDirect = false, super.handleToolError, diff --git a/packages/langchain_core/lib/src/tools/types.dart b/packages/langchain_core/lib/src/tools/types.dart index a51b3b49..e533b480 100644 --- a/packages/langchain_core/lib/src/tools/types.dart +++ b/packages/langchain_core/lib/src/tools/types.dart @@ -6,9 +6,7 @@ import '../langchain/types.dart'; /// {@endtemplate} class ToolOptions extends BaseLangChainOptions { /// {@macro tool_options} - const ToolOptions({ - super.concurrencyLimit, - }); + const ToolOptions(); } /// {@template tool_exception} diff --git a/packages/langchain_core/lib/src/utils/retry_client.dart b/packages/langchain_core/lib/src/utils/retry_client.dart deleted file mode 100644 index 9cd15317..00000000 --- a/packages/langchain_core/lib/src/utils/retry_client.dart +++ /dev/null @@ -1,92 +0,0 @@ -import 'dart:async'; -import 'dart:math'; - -/// {@template retry_options} -/// Options to pass into [retryClient] to control the retry behavior. -/// {@endtemplate} -class RetryOptions { - /// {@macro retry_options} - RetryOptions({ - required this.maxRetries, - required this.addJitter, - this.retryIf, - this.delayDurations, - }); - - /// The maximum number of attempts to retry. - final int maxRetries; - - /// An evaluator function that can be used to decide if the function should - /// be retried based on the exception it throws. - /// - /// If you decide not to retry on a particular exception, [retryIf] can return - /// `false` and the retry won't happen. By default [retryIf] is `true` and - /// all exceptions are retried. - final FutureOr Function(Object e)? retryIf; - - /// The function will be retried based on an exponential backoff strategy - /// with a base delay of 1 second. - /// - /// But you can override this behavior by providing an optional list of - /// [delayDurations]`. Each entry in the list corresponds to a specific - /// retry attempt, and the corresponding delay from the list will be used - /// instead of the default exponential delay. - /// - /// For example, if you provide a list of `[2, 4, 8]`, the delays between the - /// first three retries will be 2, 4, and 8 seconds, respectively. - final List? delayDurations; - - /// Whether to add jitter to the exponential backoff. - /// - /// Jitter is a random value added to the delay to prevent multiple clients - /// from retrying at the same time. - final bool addJitter; -} - -/// A client that handles retry logic for a given function. -/// -/// This client takes [RetryOptions] and a function to execute. If the -/// function fails, it will be retried according to the specified options. -/// If it succeeds, the result of the function will be returned. -FutureOr retryClient({ - required RetryOptions options, - required FutureOr Function() fn, -}) async { - const defaultDelay = Duration(seconds: 1); - - for (int attempt = 0; attempt < options.maxRetries; attempt++) { - try { - return await fn(); - } catch (e) { - final isLastAttempt = attempt == options.maxRetries - 1; - final shouldRetry = await options.retryIf?.call(e) ?? true; - - if (isLastAttempt || !shouldRetry) { - rethrow; - } - - final duration = - options.delayDurations?[attempt] ?? defaultDelay * pow(2, attempt); - await _delay(duration, attempt, options.addJitter); - } - } - - // This line should never be reached - throw StateError('Exhausted all retry attempts'); -} - -Future _delay( - final Duration duration, - final int attempt, - final bool addJitter, -) async { - final Duration delay; - if (addJitter) { - final random = Random(); - final jitter = random.nextInt(100); - delay = Duration(milliseconds: duration.inMilliseconds + jitter); - } else { - delay = duration; - } - await Future.delayed(delay); -} diff --git a/packages/langchain_core/lib/src/utils/utils.dart b/packages/langchain_core/lib/src/utils/utils.dart index 57924640..d439ed98 100644 --- a/packages/langchain_core/lib/src/utils/utils.dart +++ b/packages/langchain_core/lib/src/utils/utils.dart @@ -1,4 +1,3 @@ export 'chunk.dart'; export 'reduce.dart'; -export 'retry_client.dart'; export 'similarity.dart'; diff --git a/packages/langchain_core/lib/src/vector_stores/base.dart b/packages/langchain_core/lib/src/vector_stores/base.dart index 3a5ecb51..9ef54df3 100644 --- a/packages/langchain_core/lib/src/vector_stores/base.dart +++ b/packages/langchain_core/lib/src/vector_stores/base.dart @@ -45,6 +45,8 @@ abstract class VectorStore { /// Delete by vector ID. /// /// - [ids] is a list of ids to delete. + /// + /// Returns true if the delete was successful. Future delete({required final List ids}); /// Returns docs most similar to query using specified search type. diff --git a/packages/langchain_core/lib/stores.dart b/packages/langchain_core/lib/stores.dart index 96eb406a..2a234153 100644 --- a/packages/langchain_core/lib/stores.dart +++ b/packages/langchain_core/lib/stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to stores. -library; +library stores; export 'src/stores/stores.dart'; diff --git a/packages/langchain_core/lib/tools.dart b/packages/langchain_core/lib/tools.dart index d829f7d5..9d0b95aa 100644 --- a/packages/langchain_core/lib/tools.dart +++ b/packages/langchain_core/lib/tools.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to tools. -library; +library tools; export 'src/tools/tools.dart'; diff --git a/packages/langchain_core/lib/utils.dart b/packages/langchain_core/lib/utils.dart index 7ceacd01..cdcc6670 100644 --- a/packages/langchain_core/lib/utils.dart +++ b/packages/langchain_core/lib/utils.dart @@ -1,4 +1,4 @@ /// Contains core utilities. -library; +library utils; export 'src/utils/utils.dart'; diff --git a/packages/langchain_core/lib/vector_stores.dart b/packages/langchain_core/lib/vector_stores.dart index 129d296c..35174345 100644 --- a/packages/langchain_core/lib/vector_stores.dart +++ b/packages/langchain_core/lib/vector_stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to vector stores. -library; +library vector_stores; export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 69e8bac9..65650ce8 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.6 +version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -13,15 +13,15 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: async: ^2.11.0 - collection: ^1.18.0 - cross_file: ^0.3.4+2 + collection: '>=1.17.0 <1.19.0' + cross_file: ^0.3.4+1 crypto: ^3.0.3 meta: ^1.11.0 - rxdart: ">=0.27.7 <0.29.0" + rxdart: ^0.27.7 dev_dependencies: - test: ^1.25.8 + test: ^1.25.2 diff --git a/packages/langchain_core/test/runnables/binding_test.dart b/packages/langchain_core/test/runnables/binding_test.dart index e64f0042..af192ed2 100644 --- a/packages/langchain_core/test/runnables/binding_test.dart +++ b/packages/langchain_core/test/runnables/binding_test.dart @@ -4,7 +4,6 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; import 'package:langchain_core/runnables.dart'; -import 'package:langchain_core/tools.dart'; import 'package:test/test.dart'; void main() { @@ -21,32 +20,6 @@ void main() { expect(res, 'Hello '); }); - test('Chaining bind calls', () async { - final model = FakeChatModel( - responses: ['a', 'b'], - defaultOptions: const FakeChatModelOptions( - model: 'modelA', - metadata: {'foo': 'bar'}, - ), - ); - - final res1 = await model.invoke(PromptValue.string('1')); - expect(res1.metadata['model'], 'modelA'); - expect(res1.metadata['foo'], 'bar'); - - final chain2 = model.bind(const FakeChatModelOptions(model: 'modelB')); - final res2 = await chain2.invoke(PromptValue.string('2')); - expect(res2.metadata['model'], 'modelB'); - expect(res2.metadata['foo'], 'bar'); - - final chain3 = chain2.bind( - const FakeChatModelOptions(metadata: {'foo': 'baz'}), - ); - final res3 = await chain3.invoke(PromptValue.string('3')); - expect(res3.metadata['model'], 'modelB'); - expect(res3.metadata['foo'], 'baz'); - }); - test('Streaming RunnableBinding', () async { final prompt = PromptTemplate.fromTemplate('Hello {input}'); const model = _FakeOptionsChatModel(); @@ -124,14 +97,4 @@ class _FakeOptionsChatModelOptions extends ChatModelOptions { const _FakeOptionsChatModelOptions(this.stop); final String stop; - - @override - ChatModelOptions copyWith({ - final String? model, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, - }) { - return _FakeOptionsChatModelOptions(stop); - } } diff --git a/packages/langchain_core/test/runnables/fallbacks_test.dart b/packages/langchain_core/test/runnables/fallbacks_test.dart deleted file mode 100644 index 7bc7a72d..00000000 --- a/packages/langchain_core/test/runnables/fallbacks_test.dart +++ /dev/null @@ -1,102 +0,0 @@ -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/output_parsers.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:test/test.dart'; - -void main() { - group('RunnableFallback tests', () { - late FakeEchoChatModel model; - late FakeChatModel fallbackModel; - final promptTemplate = - ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); - final input = PromptValue.string('why is the sky blue'); - - setUp(() { - model = const FakeEchoChatModel(); - fallbackModel = FakeChatModel(responses: ['fallback response']); - }); - - test('RunnableFallback should return main runnable output', () async { - final modelWithFallback = model.withFallbacks([fallbackModel]); - final res = await modelWithFallback.invoke(input); - expect(res.output.content, 'why is the sky blue'); - }); - - test('Should call fallback runnable if main runnable fails', () async { - final brokenModel = model.bind( - const FakeEchoChatModelOptions(throwRandomError: true), - ); - final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); - final res = await modelWithFallback.invoke(input); - expect(res.output.content, 'fallback response'); - }); - - test('Test batch response of main runnable in RunnableFallback', () async { - const model = FakeEchoChatModel(); - const fallbackModel = FakeEchoChatModel(); - final fallbackChain = promptTemplate.pipe(fallbackModel); - final chainWithFallbacks = - promptTemplate.pipe(model).withFallbacks([fallbackChain]); - final res = await chainWithFallbacks.batch( - [ - {'topic': 'bears'}, - {'topic': 'cats'}, - ], - ); - expect(res[0].output.content, 'tell me a joke about bears'); - expect(res[1].output.content, 'tell me a joke about cats'); - }); - - test('Test fallbacks response in batch', () async { - final brokenModel = model.bind( - const FakeEchoChatModelOptions(throwRandomError: true), - ); - final fallbackChain = promptTemplate.pipe(fallbackModel); - final chainWithFallbacks = - promptTemplate.pipe(brokenModel).withFallbacks([fallbackChain]); - final res = await chainWithFallbacks.batch( - [ - {'topic': 'bears'}, - ], - ); - expect(res.first.output.content, 'fallback response'); - }); - - test('Should throw error if none of runnable returned output', () async { - final brokenModel1 = model.bind( - const FakeEchoChatModelOptions(throwRandomError: true), - ); - final brokenModel2 = model.bind( - const FakeEchoChatModelOptions(throwRandomError: true), - ); - final fallbackChain = promptTemplate.pipe(brokenModel2); - final chainWithFallbacks = - promptTemplate.pipe(brokenModel1).withFallbacks([fallbackChain]); - expect( - () async => chainWithFallbacks.batch( - [ - {'topic': 'bears'}, - ], - ), - throwsException, - ); - }); - - test('Test stream response of main runnable in RunnableFallback', () async { - final modelWithFallback = model.withFallbacks([fallbackModel]); - final chain = modelWithFallback.pipe(const StringOutputParser()); - final res = await chain.stream(input).toList(); - expect(res.join('|'), 'w|h|y| |i|s| |t|h|e| |s|k|y| |b|l|u|e'); - }); - - test('Test fallbacks response in stream', () async { - final brokenModel = model.bind( - const FakeEchoChatModelOptions(throwRandomError: true), - ); - final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); - final chain = modelWithFallback.pipe(const StringOutputParser()); - final res = await chain.stream(input).toList(); - expect(res.join('|'), endsWith('f|a|l|l|b|a|c|k| |r|e|s|p|o|n|s|e')); - }); - }); -} diff --git a/packages/langchain_core/test/runnables/map_test.dart b/packages/langchain_core/test/runnables/map_test.dart index e65dc73a..98a4a3ff 100644 --- a/packages/langchain_core/test/runnables/map_test.dart +++ b/packages/langchain_core/test/runnables/map_test.dart @@ -1,4 +1,5 @@ // ignore_for_file: unused_element +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/output_parsers.dart'; @@ -42,11 +43,11 @@ void main() { final left = streamList .map((final it) => it['left']) // - .nonNulls + .whereNotNull() .join(); final right = streamList .map((final it) => it['right']) // - .nonNulls + .whereNotNull() .join(); expect(left, 'Hello world!'); diff --git a/packages/langchain_core/test/runnables/retry_test.dart b/packages/langchain_core/test/runnables/retry_test.dart deleted file mode 100644 index f1e8f625..00000000 --- a/packages/langchain_core/test/runnables/retry_test.dart +++ /dev/null @@ -1,87 +0,0 @@ -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/runnables.dart'; -import 'package:test/test.dart'; - -void main() { - group('Runnable Retry Test', () { - late FakeEchoChatModel model; - final input = PromptValue.string('why is the sky blue'); - final promptTemplate = - ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); - - setUp(() { - model = const FakeEchoChatModel(); - }); - - test('Runnable retry should return output for invoke', () async { - final modelWithRetry = model.withRetry(maxRetries: 2); - final res = await modelWithRetry.invoke(input); - expect(res.output.content, 'why is the sky blue'); - }); - - test('Runnable retry should return output for batch', () async { - final chain = promptTemplate.pipe(model); - final chainWithRetry = chain.withRetry(); - final res = await chainWithRetry.batch( - [ - {'topic': 'bears'}, - {'topic': 'cats'}, - ], - ); - expect(res[0].output.content, 'tell me a joke about bears'); - expect(res[1].output.content, 'tell me a joke about cats'); - }); - - test('Should retry based RetryOptions, maxRetries = 2', () async { - final modelWithRetry = model.withRetry(maxRetries: 2); - expect( - () async => modelWithRetry.invoke( - input, - options: const FakeEchoChatModelOptions(throwRandomError: true), - ), - throwsException, - ); - }); - - test('Should return the output after successful retry', () async { - int count = 0; - final modelWithRetry = model.pipe( - Runnable.fromFunction( - invoke: (input, opt) { - if (count++ < 1) { - throw Exception('Random error'); - } - return input; - }, - ), - ).withRetry(maxRetries: 2); - final res = await modelWithRetry.invoke(input); - expect(res.outputAsString, input.toString()); - expect(count, 2); - }); - - test('Should not retry if retryIf returned false', () async { - late String error; - final modelWithRetry = model.withRetry( - maxRetries: 3, - retryIf: (e) { - if (e.toString() == 'Exception: Random error') { - return false; - } else { - return true; - } - }, - ); - try { - await modelWithRetry.invoke( - input, - options: const FakeEchoChatModelOptions(throwRandomError: true), - ); - } catch (e) { - error = e.toString(); - } - expect(error, 'Exception: Random error'); - }); - }); -} diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index ab291e7e..862156b6 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,44 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.2.1+2 - - - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) - -## 0.2.1+1 - - - Update a dependency to the latest release. - -## 0.2.1 - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.2.0 - -> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. - - - **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) - - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) - - **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) - - **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) - - **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) - - **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) - -## 0.1.0+3 - - - Update a dependency to the latest release. - -## 0.1.0+2 - - - Update a dependency to the latest release. - -## 0.1.0+1 - - - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) - ## 0.1.0 - **FEAT**: Add support for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_firebase/example/lib/main.dart b/packages/langchain_firebase/example/lib/main.dart index f9d5db92..7cbb5e8e 100644 --- a/packages/langchain_firebase/example/lib/main.dart +++ b/packages/langchain_firebase/example/lib/main.dart @@ -155,7 +155,7 @@ class _ChatWidgetState extends State { _model = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-preview-0514', tools: [exchangeRateTool], ), // location: 'us-central1', @@ -580,7 +580,7 @@ class MessageWidget extends StatelessWidget { decoration: BoxDecoration( color: isFromUser ? Theme.of(context).colorScheme.primaryContainer - : Theme.of(context).colorScheme.surfaceContainerHighest, + : Theme.of(context).colorScheme.surfaceVariant, borderRadius: BorderRadius.circular(18), ), padding: const EdgeInsets.symmetric( diff --git a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift index a2fafff9..2884d031 100644 --- a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -7,12 +7,10 @@ import Foundation import cloud_firestore import firebase_app_check -import firebase_auth import firebase_core func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { FLTFirebaseFirestorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseFirestorePlugin")) FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAppCheckPlugin")) - FLTFirebaseAuthPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAuthPlugin")) FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseCorePlugin")) } diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index b08dde36..6ea344fc 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa + sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" url: "https://pub.dev" source: hosted - version: "1.3.40" + version: "1.3.33" args: dependency: transitive description: @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -117,82 +117,58 @@ packages: dependency: transitive description: name: firebase_app_check - sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" + sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b url: "https://pub.dev" source: hosted - version: "0.3.0+4" + version: "0.2.2+5" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 + sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 url: "https://pub.dev" source: hosted - version: "0.1.0+34" + version: "0.1.0+27" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" + sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" url: "https://pub.dev" source: hosted - version: "0.1.2+12" - firebase_auth: - dependency: transitive - description: - name: firebase_auth - sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" - url: "https://pub.dev" - source: hosted - version: "5.1.4" - firebase_auth_platform_interface: - dependency: transitive - description: - name: firebase_auth_platform_interface - sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" - url: "https://pub.dev" - source: hosted - version: "7.4.3" - firebase_auth_web: - dependency: transitive - description: - name: firebase_auth_web - sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" - url: "https://pub.dev" - source: hosted - version: "5.12.5" + version: "0.1.2+5" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" + sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" url: "https://pub.dev" source: hosted - version: "3.3.0" + version: "2.31.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" + sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 url: "https://pub.dev" source: hosted - version: "5.2.0" + version: "5.0.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e + sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" url: "https://pub.dev" source: hosted - version: "2.17.4" + version: "2.17.0" firebase_vertexai: dependency: transitive description: name: firebase_vertexai - sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 + sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" url: "https://pub.dev" source: hosted - version: "0.2.2+4" + version: "0.1.0" fixnum: dependency: transitive description: @@ -218,10 +194,10 @@ packages: dependency: "direct main" description: name: flutter_markdown - sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 + sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" url: "https://pub.dev" source: hosted - version: "0.7.3+1" + version: "0.6.23" flutter_test: dependency: "direct dev" description: flutter @@ -236,18 +212,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be + sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be url: "https://pub.dev" source: hosted - version: "0.4.4" + version: "0.4.0" http: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_parser: dependency: transitive description: @@ -262,45 +238,45 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.6" + version: "0.7.1" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.6" + version: "0.3.1" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.1+2" + version: "0.1.0" leak_tracker: dependency: transitive description: name: leak_tracker - sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" url: "https://pub.dev" source: hosted - version: "10.0.5" + version: "10.0.0" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 url: "https://pub.dev" source: hosted - version: "3.0.5" + version: "2.0.1" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 url: "https://pub.dev" source: hosted - version: "3.0.1" + version: "2.0.1" lints: dependency: transitive description: @@ -329,18 +305,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" meta: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" path: dependency: transitive description: @@ -361,10 +337,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" sky_engine: dependency: transitive description: flutter @@ -422,10 +398,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" + sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" url: "https://pub.dev" source: hosted - version: "0.7.2" + version: "0.6.1" typed_data: dependency: transitive description: @@ -438,10 +414,10 @@ packages: dependency: transitive description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.4.0" vector_math: dependency: transitive description: @@ -454,10 +430,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 url: "https://pub.dev" source: hosted - version: "14.2.5" + version: "13.0.0" web: dependency: transitive description: @@ -467,5 +443,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.4.0 <4.0.0" - flutter: ">=3.22.0" + dart: ">=3.3.0 <4.0.0" + flutter: ">=3.19.0" diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 2c34d324..76900f0d 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -4,17 +4,17 @@ version: 1.0.0+1 publish_to: 'none' environment: - sdk: ">=3.4.0 <4.0.0" - flutter: ">=3.22.0" + sdk: ">=3.0.0 <4.0.0" + flutter: ">=3.19.0" dependencies: cupertino_icons: ^1.0.6 - firebase_core: ^3.3.0 + firebase_core: ^2.31.0 flutter: sdk: flutter - flutter_markdown: ^0.7.3 - langchain: 0.7.6 - langchain_firebase: 0.2.1+2 + flutter_markdown: ^0.6.22 + langchain: 0.7.1 + langchain_firebase: 0.1.0 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/example/pubspec_overrides.yaml b/packages/langchain_firebase/example/pubspec_overrides.yaml index fb671352..35cb2da2 100644 --- a/packages/langchain_firebase/example/pubspec_overrides.yaml +++ b/packages/langchain_firebase/example/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_firebase +# melos_managed_dependency_overrides: langchain_core,langchain_firebase,langchain dependency_overrides: langchain: path: ../../langchain diff --git a/packages/langchain_firebase/example/web/flutter_bootstrap.js b/packages/langchain_firebase/example/web/flutter_bootstrap.js deleted file mode 100644 index 8ce49d8a..00000000 --- a/packages/langchain_firebase/example/web/flutter_bootstrap.js +++ /dev/null @@ -1,12 +0,0 @@ -{{flutter_js}} -{{flutter_build_config}} - -_flutter.loader.load({ - serviceWorkerSettings: { - serviceWorkerVersion: {{flutter_service_worker_version}}, - }, - onEntrypointLoaded: async function(engineInitializer) { - const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); - await appRunner.runApp(); - }, -}); diff --git a/packages/langchain_firebase/example/web/index.html b/packages/langchain_firebase/example/web/index.html index cce674b5..27ef6265 100644 --- a/packages/langchain_firebase/example/web/index.html +++ b/packages/langchain_firebase/example/web/index.html @@ -1,25 +1,61 @@ + - + - - - - + + + + - + + + example + - VertexAI for Firebase in LangChain.dart - + + + - + diff --git a/packages/langchain_firebase/lib/langchain_firebase.dart b/packages/langchain_firebase/lib/langchain_firebase.dart index 45448a85..0b76e587 100644 --- a/packages/langchain_firebase/lib/langchain_firebase.dart +++ b/packages/langchain_firebase/lib/langchain_firebase.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). +/// LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 83ac8d8c..f8c3870d 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -1,10 +1,10 @@ import 'package:collection/collection.dart'; import 'package:firebase_app_check/firebase_app_check.dart'; -import 'package:firebase_auth/firebase_auth.dart'; import 'package:firebase_core/firebase_core.dart'; import 'package:firebase_vertexai/firebase_vertexai.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:uuid/uuid.dart'; import 'mappers.dart'; @@ -36,25 +36,25 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.5-flash`: +/// - `gemini-1.0-pro` +/// * text -> text model +/// * Max input token: 30720 +/// * Max output tokens: 2048 +/// - `gemini-1.0-pro-vision`: +/// * text / image -> text model +/// * Max input token: 12288 +/// * Max output tokens: 4096 +/// - `gemini-1.5-pro-preview-0514`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-pro`: +/// - `gemini-1.5-flash-preview-0514`: /// * text / image / audio -> text model -/// * Max input token: 2097152 -/// * Max output tokens: 8192 -/// - `gemini-1.0-pro-vision`: -/// * text / image -> text model -/// * Max input token: 16384 -/// * Max output tokens: 2048 -/// - `gemini-1.0-pro` -/// * text -> text model -/// * Max input token: 32760 +/// * Max input token: 1048576 /// * Max output tokens: 8192 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) +/// Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) /// for the updated list. /// /// ### Call options @@ -111,7 +111,7 @@ import 'types.dart'; /// /// [ChatFirebaseVertexAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: @@ -132,7 +132,7 @@ import 'types.dart'; /// ); /// final chatModel = ChatFirebaseVertexAI( /// defaultOptions: ChatFirebaseVertexAIOptions( -/// model: 'gemini-1.5-pro', +/// model: 'gemini-1.5-pro-preview-0514', /// temperature: 0, /// tools: [tool], /// ), @@ -154,11 +154,10 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// - [ChatFirebaseVertexAI.location] ChatFirebaseVertexAI({ super.defaultOptions = const ChatFirebaseVertexAIOptions( - model: defaultModel, + model: 'gemini-1.0-pro', ), this.app, this.appCheck, - this.auth, this.options, this.location, }) : _currentModel = defaultOptions.model ?? '' { @@ -173,9 +172,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// The optional [FirebaseAppCheck] to use to protect the project from abuse. final FirebaseAppCheck? appCheck; - /// The optional [FirebaseAuth] to use for authentication. - final FirebaseAuth? auth; - /// Configuration parameters for sending requests to Firebase. final RequestOptions? options; @@ -188,17 +184,20 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// A UUID generator. late final Uuid _uuid = const Uuid(); + @override + String get modelType => 'chat-firebase-vertex-ai'; + /// The current model set in [_firebaseClient]; String _currentModel; /// The current system instruction set in [_firebaseClient]; String? _currentSystemInstruction; - @override - String get modelType => 'chat-firebase-vertex-ai'; + /// The current tools set in [_firebaseClient]; + List? _currentTools; - /// The default model to use unless another is specified. - static const defaultModel = 'gemini-1.5-flash'; + /// The current tool choice set in [_firebaseClient]; + ChatToolChoice? _currentToolChoice; @override Future invoke( @@ -206,14 +205,12 @@ class ChatFirebaseVertexAI extends BaseChatModel { final ChatFirebaseVertexAIOptions? options, }) async { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = + final (model, prompt, safetySettings, generationConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); final completion = await _firebaseClient.generateContent( prompt, safetySettings: safetySettings, generationConfig: generationConfig, - tools: tools, - toolConfig: toolConfig, ); return completion.toChatResult(id, model); } @@ -224,15 +221,13 @@ class ChatFirebaseVertexAI extends BaseChatModel { final ChatFirebaseVertexAIOptions? options, }) { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = + final (model, prompt, safetySettings, generationConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); return _firebaseClient .generateContentStream( prompt, safetySettings: safetySettings, generationConfig: generationConfig, - tools: tools, - toolConfig: toolConfig, ) .map((final completion) => completion.toChatResult(id, model)); } @@ -243,8 +238,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { Iterable prompt, List? safetySettings, GenerationConfig? generationConfig, - List? tools, - ToolConfig? toolConfig, ) _generateCompletionRequest( final List messages, { final ChatFirebaseVertexAIOptions? options, @@ -266,15 +259,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, - responseMimeType: - options?.responseMimeType ?? defaultOptions.responseMimeType, - // responseSchema not supported yet - // responseSchema: - // (options?.responseSchema ?? defaultOptions.responseSchema) - // ?.toSchema(), ), - (options?.tools ?? defaultOptions.tools)?.toToolList(), - (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), ); } @@ -303,6 +288,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { GenerativeModel _createFirebaseClient( final String model, { final String? systemInstruction, + final List? tools, + final ChatToolChoice? toolChoice, }) { return FirebaseVertexAI.instanceFor( app: app, @@ -313,6 +300,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { model: model, systemInstruction: systemInstruction != null ? Content.system(systemInstruction) : null, + tools: tools?.toToolList(), + toolConfig: toolChoice?.toToolConfig(), ); } @@ -320,10 +309,14 @@ class ChatFirebaseVertexAI extends BaseChatModel { void _recreateFirebaseClient( final String model, final String? systemInstruction, + final List? tools, + final ChatToolChoice? toolChoice, ) { _firebaseClient = _createFirebaseClient( model, systemInstruction: systemInstruction, + tools: tools, + toolChoice: toolChoice, ); } @@ -332,12 +325,16 @@ class ChatFirebaseVertexAI extends BaseChatModel { final List messages, final ChatFirebaseVertexAIOptions? options, ) { - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString : null; + final tools = options?.tools ?? defaultOptions.tools; + final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; + bool recreate = false; if (model != _currentModel) { _currentModel = model; @@ -347,9 +344,17 @@ class ChatFirebaseVertexAI extends BaseChatModel { _currentSystemInstruction = systemInstruction; recreate = true; } + if (!const ListEquality().equals(tools, _currentTools)) { + _currentTools = tools; + recreate = true; + } + if (toolChoice != _currentToolChoice) { + _currentToolChoice = toolChoice; + recreate = true; + } if (recreate) { - _recreateFirebaseClient(model, systemInstruction); + _recreateFirebaseClient(model, systemInstruction, tools, toolChoice); } } } diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 9c55d409..05840e8f 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -134,11 +134,11 @@ extension GenerateContentResponseMapper on f.GenerateContentResponse { .toList(growable: false), 'finish_message': candidate.finishMessage, }, - usage: LanguageModelUsage( - promptTokens: usageMetadata?.promptTokenCount, - responseTokens: usageMetadata?.candidatesTokenCount, - totalTokens: usageMetadata?.totalTokenCount, - ), + usage: const LanguageModelUsage( + // promptTokens: usageMetadata?.promptTokenCount, // not yet supported + // responseTokens: usageMetadata?.candidatesTokenCount, + // totalTokens: usageMetadata?.totalTokenCount, + ), ); } @@ -197,17 +197,14 @@ extension ChatToolListMapper on List { (tool) => f.FunctionDeclaration( tool.name, tool.description, - tool.inputJsonSchema.toSchema(), + _mapJsonSchemaToSchema(tool.inputJsonSchema), ), ).toList(growable: false), ), ]; } -} -extension SchemaMapper on Map { - f.Schema toSchema() { - final jsonSchema = this; + f.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -220,38 +217,45 @@ extension SchemaMapper on Map { switch (type) { case 'string': if (enumValues != null) { - return f.Schema.enumString( + return f.Schema( + f.SchemaType.string, enumValues: enumValues, description: description, nullable: nullable, + format: 'enum', ); } else { - return f.Schema.string( + return f.Schema( + f.SchemaType.string, description: description, nullable: nullable, ); } case 'number': - return f.Schema.number( + return f.Schema( + f.SchemaType.number, description: description, nullable: nullable, format: format, ); case 'integer': - return f.Schema.integer( + return f.Schema( + f.SchemaType.integer, description: description, nullable: nullable, format: format, ); case 'boolean': - return f.Schema.boolean( + return f.Schema( + f.SchemaType.boolean, description: description, nullable: nullable, ); case 'array': if (items != null) { - final itemsSchema = items.toSchema(); - return f.Schema.array( + final itemsSchema = _mapJsonSchemaToSchema(items); + return f.Schema( + f.SchemaType.array, description: description, nullable: nullable, items: itemsSchema, @@ -261,12 +265,10 @@ extension SchemaMapper on Map { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry( - key, - (value as Map).toSchema(), - ), + (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), ); - return f.Schema.object( + return f.Schema( + f.SchemaType.object, properties: propertiesSchema, requiredProperties: requiredProperties, description: description, @@ -293,11 +295,6 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: f.FunctionCallingMode.auto, ), ), - ChatToolChoiceRequired() => f.ToolConfig( - functionCallingConfig: f.FunctionCallingConfig( - mode: f.FunctionCallingMode.any, - ), - ), final ChatToolChoiceForced t => f.ToolConfig( functionCallingConfig: f.FunctionCallingConfig( mode: f.FunctionCallingMode.any, diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index fc28f66e..d41e4032 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -1,32 +1,30 @@ -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; /// {@template chat_firebase_vertex_ai_options} /// Options to pass into the Vertex AI for Firebase model. -/// -/// You can find a list of available models here: -/// https://firebase.google.com/docs/vertex-ai/gemini-models /// {@endtemplate} -@immutable class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// {@macro chat_firebase_vertex_ai_options} const ChatFirebaseVertexAIOptions({ - super.model, + this.model = 'gemini-1.0-pro', this.topP, this.topK, this.candidateCount, this.maxOutputTokens, this.temperature, this.stopSequences, - this.responseMimeType, this.safetySettings, super.tools, super.toolChoice, super.concurrencyLimit, }); + /// The LLM to use. + /// + /// You can find a list of available models here: + /// https://firebase.google.com/docs/vertex-ai/gemini-models + final String? model; + /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -71,13 +69,6 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; - /// Output response mimetype of the generated candidate text. - /// - /// Supported mimetype: - /// - `text/plain`: (default) Text output. - /// - `application/json`: JSON response in the candidates. - final String? responseMimeType; - /// A list of unique [ChatFirebaseVertexAISafetySetting] instances for blocking /// unsafe content. /// @@ -90,7 +81,8 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// the default safety setting for that category. final List? safetySettings; - @override + /// Creates a copy of this [ChatFirebaseVertexAIOptions] object with the given fields + /// replaced with the new values. ChatFirebaseVertexAIOptions copyWith({ final String? model, final double? topP, @@ -99,11 +91,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { final int? maxOutputTokens, final double? temperature, final List? stopSequences, - final String? responseMimeType, final List? safetySettings, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, }) { return ChatFirebaseVertexAIOptions( model: model ?? this.model, @@ -113,68 +101,9 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens, temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, - responseMimeType: responseMimeType ?? this.responseMimeType, safetySettings: safetySettings ?? this.safetySettings, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } - - @override - ChatFirebaseVertexAIOptions merge( - covariant final ChatFirebaseVertexAIOptions? other, - ) { - return copyWith( - model: other?.model, - topP: other?.topP, - topK: other?.topK, - candidateCount: other?.candidateCount, - maxOutputTokens: other?.maxOutputTokens, - temperature: other?.temperature, - stopSequences: other?.stopSequences, - responseMimeType: other?.responseMimeType, - safetySettings: other?.safetySettings, - tools: other?.tools, - toolChoice: other?.toolChoice, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatFirebaseVertexAIOptions other) { - return model == other.model && - topP == other.topP && - topK == other.topK && - candidateCount == other.candidateCount && - maxOutputTokens == other.maxOutputTokens && - temperature == other.temperature && - const ListEquality() - .equals(stopSequences, other.stopSequences) && - responseMimeType == other.responseMimeType && - const ListEquality() - .equals(safetySettings, other.safetySettings) && - const ListEquality().equals(tools, other.tools) && - toolChoice == other.toolChoice && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - topP.hashCode ^ - topK.hashCode ^ - candidateCount.hashCode ^ - maxOutputTokens.hashCode ^ - temperature.hashCode ^ - const ListEquality().hash(stopSequences) ^ - responseMimeType.hashCode ^ - const ListEquality() - .hash(safetySettings) ^ - const ListEquality().hash(tools) ^ - toolChoice.hashCode ^ - concurrencyLimit.hashCode; - } } /// {@template chat_google_generative_ai_safety_setting} @@ -182,7 +111,6 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// Passing a safety setting for a category changes the allowed probability that /// content is blocked. /// {@endtemplate} -@immutable class ChatFirebaseVertexAISafetySetting { /// {@macro chat_google_generative_ai_safety_setting} const ChatFirebaseVertexAISafetySetting({ @@ -195,28 +123,6 @@ class ChatFirebaseVertexAISafetySetting { /// Controls the probability threshold at which harm is blocked. final ChatFirebaseVertexAISafetySettingThreshold threshold; - - /// Creates a copy of this [ChatFirebaseVertexAISafetySetting] object with - /// the given fields replaced with the new values. - ChatFirebaseVertexAISafetySetting copyWith({ - final ChatFirebaseVertexAISafetySettingCategory? category, - final ChatFirebaseVertexAISafetySettingThreshold? threshold, - }) { - return ChatFirebaseVertexAISafetySetting( - category: category ?? this.category, - threshold: threshold ?? this.threshold, - ); - } - - @override - bool operator ==(covariant final ChatFirebaseVertexAISafetySetting other) { - return category == other.category && threshold == other.threshold; - } - - @override - int get hashCode { - return category.hashCode ^ threshold.hashCode; - } } /// Safety settings categorizes. diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index eb451c0d..c13007f5 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa + sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" url: "https://pub.dev" source: hosted - version: "1.3.40" + version: "1.3.33" async: dependency: transitive description: @@ -77,10 +77,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -101,82 +101,58 @@ packages: dependency: "direct main" description: name: firebase_app_check - sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" + sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b url: "https://pub.dev" source: hosted - version: "0.3.0+4" + version: "0.2.2+5" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 + sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 url: "https://pub.dev" source: hosted - version: "0.1.0+34" + version: "0.1.0+27" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" + sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" url: "https://pub.dev" source: hosted - version: "0.1.2+12" - firebase_auth: - dependency: "direct main" - description: - name: firebase_auth - sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" - url: "https://pub.dev" - source: hosted - version: "5.1.4" - firebase_auth_platform_interface: - dependency: transitive - description: - name: firebase_auth_platform_interface - sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" - url: "https://pub.dev" - source: hosted - version: "7.4.3" - firebase_auth_web: - dependency: transitive - description: - name: firebase_auth_web - sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" - url: "https://pub.dev" - source: hosted - version: "5.12.5" + version: "0.1.2+5" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" + sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" url: "https://pub.dev" source: hosted - version: "3.3.0" + version: "2.31.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" + sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 url: "https://pub.dev" source: hosted - version: "5.2.0" + version: "5.0.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e + sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" url: "https://pub.dev" source: hosted - version: "2.17.4" + version: "2.17.0" firebase_vertexai: dependency: "direct main" description: name: firebase_vertexai - sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 + sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" url: "https://pub.dev" source: hosted - version: "0.2.2+4" + version: "0.1.0" fixnum: dependency: transitive description: @@ -204,18 +180,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be + sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be url: "https://pub.dev" source: hosted - version: "0.4.4" + version: "0.4.0" http: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_parser: dependency: transitive description: @@ -230,31 +206,31 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.6" + version: "0.3.1" leak_tracker: dependency: transitive description: name: leak_tracker - sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" url: "https://pub.dev" source: hosted - version: "10.0.5" + version: "10.0.0" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 url: "https://pub.dev" source: hosted - version: "3.0.5" + version: "2.0.1" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 url: "https://pub.dev" source: hosted - version: "3.0.1" + version: "2.0.1" matcher: dependency: transitive description: @@ -267,18 +243,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" meta: dependency: "direct main" description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" path: dependency: transitive description: @@ -299,10 +275,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" sky_engine: dependency: transitive description: flutter @@ -360,10 +336,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" + sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" url: "https://pub.dev" source: hosted - version: "0.7.2" + version: "0.6.1" typed_data: dependency: transitive description: @@ -376,10 +352,10 @@ packages: dependency: "direct main" description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.4.0" vector_math: dependency: transitive description: @@ -392,10 +368,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 url: "https://pub.dev" source: hosted - version: "14.2.5" + version: "13.0.0" web: dependency: transitive description: @@ -405,5 +381,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.4.0 <4.0.0" - flutter: ">=3.22.0" + dart: ">=3.3.0 <4.0.0" + flutter: ">=3.19.0" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index d71a826d..8c5e6995 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_firebase -description: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). -version: 0.2.1+2 +description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,20 +14,19 @@ topics: - firebase environment: - sdk: ">=3.4.0 <4.0.0" - flutter: ">=3.22.0" + sdk: ">=3.0.0 <4.0.0" + flutter: ">=3.19.0" dependencies: - collection: ^1.18.0 - firebase_app_check: ^0.3.0 + collection: ">=1.17.0 <1.19.0" + firebase_app_check: ^0.2.2+5 + firebase_core: ^2.31.0 firebase_auth: ^5.1.0 - firebase_core: ^3.3.0 cloud_firestore: ^4.17.0 - - firebase_vertexai: ^0.2.2 - langchain_core: 0.3.6 + firebase_vertexai: ^0.1.0 + langchain_core: ^0.3.1 meta: ^1.11.0 - uuid: ^4.4.2 + uuid: ^4.3.3 dev_dependencies: flutter_test: diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index 36d0882e..b61c71d8 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,35 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.6.3+1 - - - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) - - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) - -## 0.6.2 - - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - -## 0.6.1 - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.6.0 - -> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. - - - **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) - - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) - - **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) - - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 0.5.1 - - - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) - ## 0.5.0 > Note: `ChatGoogleGenerativeAI` and `GoogleGenerativeAIEmbeddings` now use the version `v1beta` of the Gemini API (instead of `v1`) which support the latest models (`gemini-1.5-pro-latest` and `gemini-1.5-flash-latest`). diff --git a/packages/langchain_google/lib/langchain_google.dart b/packages/langchain_google/lib/langchain_google.dart index a4dd4908..371e45ad 100644 --- a/packages/langchain_google/lib/langchain_google.dart +++ b/packages/langchain_google/lib/langchain_google.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). +/// LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 5b41f34d..02fde0bb 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -31,26 +31,25 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.5-flash`: +/// - `gemini-1.0-pro` (or `gemini-pro`): +/// * text -> text model +/// * Max input token: 30720 +/// * Max output tokens: 2048 +/// - `gemini-pro-vision`: +/// * text / image -> text model +/// * Max input token: 12288 +/// * Max output tokens: 4096 +/// - `gemini-1.5-pro-latest`: text / image -> text model /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-pro`: +/// - `gemini-1.5-flash-latest`: /// * text / image / audio -> text model -/// * Max input token: 2097152 -/// * Max output tokens: 8192 -/// - `gemini-1.0-pro` (or `gemini-pro`): -/// * text -> text model -/// * Max input token: 32760 +/// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `aqa`: -/// * text -> text model -/// * Max input token: 7168 -/// * Max output tokens: 1024 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) -/// for the updated list. +/// Refer to the [documentation](https://ai.google.dev/models) for the updated list. /// /// #### Tuned models /// @@ -119,7 +118,7 @@ import 'types.dart'; /// /// [ChatGoogleGenerativeAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: @@ -212,7 +211,7 @@ class ChatGoogleGenerativeAI final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatGoogleGenerativeAIOptions( - model: defaultModel, + model: 'gemini-pro', ), }) : _currentModel = defaultOptions.model ?? '', _httpClient = createDefaultHttpClient( @@ -248,18 +247,15 @@ class ChatGoogleGenerativeAI /// Get the API key. String get apiKey => _httpClient.headers['x-goog-api-key'] ?? ''; + @override + String get modelType => 'chat-google-generative-ai'; + /// The current model set in [_googleAiClient]; String _currentModel; /// The current system instruction set in [_googleAiClient]; String? _currentSystemInstruction; - @override - String get modelType => 'chat-google-generative-ai'; - - /// The default model to use unless another is specified. - static const defaultModel = 'gemini-1.5-flash'; - @override Future invoke( final PromptValue input, { @@ -326,11 +322,6 @@ class ChatGoogleGenerativeAI temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, - responseMimeType: - options?.responseMimeType ?? defaultOptions.responseMimeType, - responseSchema: - (options?.responseSchema ?? defaultOptions.responseSchema) - ?.toSchema(), ), (options?.tools ?? defaultOptions.tools)?.toToolList(), (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), @@ -358,7 +349,7 @@ class ChatGoogleGenerativeAI return tokens.totalTokens; } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _httpClient.close(); } @@ -393,7 +384,8 @@ class ChatGoogleGenerativeAI final List messages, final ChatGoogleGenerativeAIOptions? options, ) { - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 521921ac..8623a2c1 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -1,6 +1,7 @@ // ignore_for_file: public_member_api_docs import 'dart:convert'; +import 'package:collection/collection.dart'; import 'package:google_generative_ai/google_generative_ai.dart' as g; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; @@ -95,7 +96,7 @@ extension GenerateContentResponseMapper on g.GenerateContentResponse { _ => throw AssertionError('Unknown part type: $p'), }, ) - .nonNulls + .whereNotNull() .join('\n'), toolCalls: candidate.content.parts .whereType() @@ -197,17 +198,14 @@ extension ChatToolListMapper on List { (tool) => g.FunctionDeclaration( tool.name, tool.description, - tool.inputJsonSchema.toSchema(), + _mapJsonSchemaToSchema(tool.inputJsonSchema), ), ).toList(growable: false), ), ]; } -} -extension SchemaMapper on Map { - g.Schema toSchema() { - final jsonSchema = this; + g.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -250,7 +248,7 @@ extension SchemaMapper on Map { ); case 'array': if (items != null) { - final itemsSchema = items.toSchema(); + final itemsSchema = _mapJsonSchemaToSchema(items); return g.Schema.array( items: itemsSchema, description: description, @@ -261,10 +259,7 @@ extension SchemaMapper on Map { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry( - key, - (value as Map).toSchema(), - ), + (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), ); return g.Schema.object( properties: propertiesSchema, @@ -293,11 +288,6 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: g.FunctionCallingMode.auto, ), ), - ChatToolChoiceRequired() => g.ToolConfig( - functionCallingConfig: g.FunctionCallingConfig( - mode: g.FunctionCallingMode.any, - ), - ), final ChatToolChoiceForced t => g.ToolConfig( functionCallingConfig: g.FunctionCallingConfig( mode: g.FunctionCallingMode.any, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index 4c2f4063..b3553cab 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -1,31 +1,29 @@ import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; /// {@template chat_google_generative_ai_options} /// Options to pass into the Google Generative AI Chat Model. -/// -/// You can find a list of available models [here](https://ai.google.dev/models). /// {@endtemplate} -@immutable class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// {@macro chat_google_generative_ai_options} const ChatGoogleGenerativeAIOptions({ - super.model, + this.model = 'gemini-pro', this.topP, this.topK, this.candidateCount, this.maxOutputTokens, this.temperature, this.stopSequences, - this.responseMimeType, - this.responseSchema, this.safetySettings, super.tools, super.toolChoice, super.concurrencyLimit, }); + /// The LLM to use. + /// + /// You can find a list of available models here: https://ai.google.dev/models + final String? model; + /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -70,39 +68,6 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; - /// Output response mimetype of the generated candidate text. - /// - /// Supported mimetype: - /// - `text/plain`: (default) Text output. - /// - `application/json`: JSON response in the candidates. - final String? responseMimeType; - - /// Output response schema of the generated candidate text. - /// Following the [JSON Schema specification](https://json-schema.org). - /// - /// - Note: This only applies when the specified ``responseMIMEType`` supports - /// a schema; currently this is limited to `application/json`. - /// - /// Example: - /// ```json - /// { - /// 'type': 'object', - /// 'properties': { - /// 'answer': { - /// 'type': 'string', - /// 'description': 'The answer to the question being asked', - /// }, - /// 'sources': { - /// 'type': 'array', - /// 'items': {'type': 'string'}, - /// 'description': 'The sources used to answer the question', - /// }, - /// }, - /// 'required': ['answer', 'sources'], - /// }, - /// ``` - final Map? responseSchema; - /// A list of unique [ChatGoogleGenerativeAISafetySetting] instances for blocking /// unsafe content. /// @@ -115,7 +80,8 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// the default safety setting for that category. final List? safetySettings; - @override + /// Creates a copy of this [ChatGoogleGenerativeAIOptions] object with the given fields + /// replaced with the new values. ChatGoogleGenerativeAIOptions copyWith({ final String? model, final double? topP, @@ -125,9 +91,6 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { final double? temperature, final List? stopSequences, final List? safetySettings, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, }) { return ChatGoogleGenerativeAIOptions( model: model ?? this.model, @@ -138,60 +101,8 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, safetySettings: safetySettings ?? this.safetySettings, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } - - @override - ChatGoogleGenerativeAIOptions merge( - covariant final ChatGoogleGenerativeAIOptions? other, - ) { - return copyWith( - model: other?.model, - topP: other?.topP, - topK: other?.topK, - candidateCount: other?.candidateCount, - maxOutputTokens: other?.maxOutputTokens, - temperature: other?.temperature, - stopSequences: other?.stopSequences, - safetySettings: other?.safetySettings, - tools: other?.tools, - toolChoice: other?.toolChoice, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) { - return model == other.model && - topP == other.topP && - topK == other.topK && - candidateCount == other.candidateCount && - maxOutputTokens == other.maxOutputTokens && - temperature == other.temperature && - stopSequences == other.stopSequences && - safetySettings == other.safetySettings && - tools == other.tools && - toolChoice == other.toolChoice && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - topP.hashCode ^ - topK.hashCode ^ - candidateCount.hashCode ^ - maxOutputTokens.hashCode ^ - temperature.hashCode ^ - stopSequences.hashCode ^ - safetySettings.hashCode ^ - tools.hashCode ^ - toolChoice.hashCode ^ - concurrencyLimit.hashCode; - } } /// {@template chat_google_generative_ai_safety_setting} diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart index e79f00b4..4f668b40 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart @@ -117,8 +117,8 @@ class ChatVertexAI extends BaseChatModel { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const ChatVertexAIOptions( - publisher: defaultPublisher, - model: defaultModel, + publisher: 'google', + model: 'chat-bison', ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -139,12 +139,6 @@ class ChatVertexAI extends BaseChatModel { @override String get modelType => 'vertex-ai-chat'; - /// The default publisher to use unless another is specified. - static const defaultPublisher = 'google'; - - /// The default model to use unless another is specified. - static const defaultModel = 'chat-bison'; - @override Future invoke( final PromptValue input, { @@ -164,15 +158,19 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final publisher = - options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final result = await client.chat.predict( context: context, examples: examples, messages: vertexMessages, - publisher: publisher, + publisher: options?.publisher ?? + defaultOptions.publisher ?? + ArgumentError.checkNotNull( + defaultOptions.publisher, + 'VertexAIOptions.publisher', + ), model: model, parameters: VertexAITextChatModelRequestParams( maxOutputTokens: @@ -218,15 +216,18 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final publisher = - options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final res = await client.chat.countTokens( context: context, examples: examples, messages: vertexMessages, - publisher: publisher, + publisher: options?.publisher ?? + ArgumentError.checkNotNull( + defaultOptions.publisher, + 'VertexAIOptions.publisher', + ), model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index 50249bf3..49316c4e 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -1,20 +1,13 @@ -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; /// {@template chat_vertex_ai_options} /// Options to pass into the Vertex AI Chat Model. -/// -/// You can find a list of available models here: -/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} -@immutable class ChatVertexAIOptions extends ChatModelOptions { /// {@macro chat_vertex_ai_options} const ChatVertexAIOptions({ - this.publisher, - super.model, + this.publisher = 'google', + this.model = 'chat-bison', this.maxOutputTokens, this.temperature, this.topP, @@ -30,6 +23,17 @@ class ChatVertexAIOptions extends ChatModelOptions { /// Use `google` for first-party models. final String? publisher; + /// The text model to use. + /// + /// To use the latest model version, specify the model name without a version + /// number (e.g. `chat-bison`). + /// To use a stable model version, specify the model version number + /// (e.g. `chat-bison@001`). + /// + /// You can find a list of available models here: + /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models + final String? model; + /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -98,7 +102,8 @@ class ChatVertexAIOptions extends ChatModelOptions { /// List of messages to the model to learn how to respond to the conversation. final List? examples; - @override + /// Creates a copy of this [ChatVertexAIOptions] object with the given fields + /// replaced with the new values. ChatVertexAIOptions copyWith({ final String? publisher, final String? model, @@ -109,9 +114,6 @@ class ChatVertexAIOptions extends ChatModelOptions { final List? stopSequences, final int? candidateCount, final List? examples, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, }) { return ChatVertexAIOptions( publisher: publisher ?? this.publisher, @@ -123,52 +125,6 @@ class ChatVertexAIOptions extends ChatModelOptions { stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, examples: examples ?? this.examples, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - ChatVertexAIOptions merge(covariant ChatVertexAIOptions? other) { - return copyWith( - publisher: other?.publisher, - model: other?.model, - maxOutputTokens: other?.maxOutputTokens, - temperature: other?.temperature, - topP: other?.topP, - topK: other?.topK, - stopSequences: other?.stopSequences, - candidateCount: other?.candidateCount, - examples: other?.examples, - concurrencyLimit: other?.concurrencyLimit, ); } - - @override - bool operator ==(covariant final ChatVertexAIOptions other) { - return publisher == other.publisher && - model == other.model && - maxOutputTokens == other.maxOutputTokens && - temperature == other.temperature && - topP == other.topP && - topK == other.topK && - const ListEquality() - .equals(stopSequences, other.stopSequences) && - candidateCount == other.candidateCount && - const ListEquality().equals(examples, other.examples) && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return publisher.hashCode ^ - model.hashCode ^ - maxOutputTokens.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - topK.hashCode ^ - const ListEquality().hash(stopSequences) ^ - candidateCount.hashCode ^ - const ListEquality().hash(examples) ^ - concurrencyLimit.hashCode; - } } diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index 263a2c44..b5996abd 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -1,3 +1,4 @@ +import 'package:collection/collection.dart' show IterableNullableExtension; import 'package:google_generative_ai/google_generative_ai.dart' show Content, EmbedContentRequest, GenerativeModel, TaskType; import 'package:http/http.dart' as http; @@ -24,6 +25,8 @@ import '../../utils/https_client/http_client.dart'; /// /// - `text-embedding-004` /// * Dimensions: 768 (with support for reduced dimensionality) +/// - `embedding-001` +/// * Dimensions: 768 /// /// The previous list of models may not be exhaustive or up-to-date. Check out /// the [Google AI documentation](https://ai.google.dev/models/gemini) @@ -136,6 +139,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { /// The number of dimensions the resulting output embeddings should have. /// Only supported in `text-embedding-004` and later models. + /// TODO https://github.com/google-gemini/generative-ai-dart/pull/149 int? dimensions; /// The maximum number of documents to embed in a single request. @@ -165,13 +169,13 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { Content.text(doc.pageContent), taskType: TaskType.retrievalDocument, title: doc.metadata[docTitleKey], - outputDimensionality: dimensions, + // outputDimensionality: dimensions, TODO ); }).toList(growable: false), ); return data.embeddings .map((final p) => p.values) - .nonNulls + .whereNotNull() .toList(growable: false); }), ); @@ -184,7 +188,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { final data = await _googleAiClient.embedContent( Content.text(query), taskType: TaskType.retrievalQuery, - outputDimensionality: dimensions, + // outputDimensionality: dimensions, TODO ); return data.embedding.values; } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart index e11589c4..bf382c44 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart @@ -1,19 +1,13 @@ -import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; -import 'package:meta/meta.dart'; /// {@template vertex_ai_options} /// Options to pass into the Vertex AI LLM. -/// -/// You can find a list of available models here: -/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} -@immutable class VertexAIOptions extends LLMOptions { /// {@macro vertex_ai_options} const VertexAIOptions({ - this.publisher, - super.model, + this.publisher = 'google', + this.model = 'text-bison', this.maxOutputTokens, this.temperature, this.topP, @@ -28,6 +22,17 @@ class VertexAIOptions extends LLMOptions { /// Use `google` for first-party models. final String? publisher; + /// The text model to use. + /// + /// To use the latest model version, specify the model name without a version + /// number (e.g. `text-bison`). + /// To use a stable model version, specify the model version number + /// (e.g. `text-bison@001`). + /// + /// You can find a list of available models here: + /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models + final String? model; + /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -93,7 +98,8 @@ class VertexAIOptions extends LLMOptions { /// Range: `[1–8]` final int? candidateCount; - @override + /// Creates a copy of this [VertexAIOptions] object with the given fields + /// replaced with the new values. VertexAIOptions copyWith({ final String? publisher, final String? model, @@ -103,7 +109,6 @@ class VertexAIOptions extends LLMOptions { final int? topK, final List? stopSequences, final int? candidateCount, - final int? concurrencyLimit, }) { return VertexAIOptions( publisher: publisher ?? this.publisher, @@ -114,49 +119,6 @@ class VertexAIOptions extends LLMOptions { topK: topK ?? this.topK, stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - VertexAIOptions merge(covariant final VertexAIOptions? other) { - return copyWith( - publisher: other?.publisher, - model: other?.model, - maxOutputTokens: other?.maxOutputTokens, - temperature: other?.temperature, - topP: other?.topP, - topK: other?.topK, - stopSequences: other?.stopSequences, - candidateCount: other?.candidateCount, - concurrencyLimit: other?.concurrencyLimit, ); } - - @override - bool operator ==(covariant final VertexAIOptions other) { - return publisher == other.publisher && - model == other.model && - maxOutputTokens == other.maxOutputTokens && - temperature == other.temperature && - topP == other.topP && - topK == other.topK && - const ListEquality() - .equals(stopSequences, other.stopSequences) && - candidateCount == other.candidateCount && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return publisher.hashCode ^ - model.hashCode ^ - maxOutputTokens.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - topK.hashCode ^ - const ListEquality().hash(stopSequences) ^ - candidateCount.hashCode ^ - concurrencyLimit.hashCode; - } } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart index 955cc7ca..a0873fcc 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart @@ -123,8 +123,8 @@ class VertexAI extends BaseLLM { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const VertexAIOptions( - publisher: defaultPublisher, - model: defaultModel, + publisher: 'google', + model: 'text-bison', ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -145,24 +145,21 @@ class VertexAI extends BaseLLM { @override String get modelType => 'vertex-ai'; - /// The default publisher to use unless another is specified. - static const defaultPublisher = 'google'; - - /// The default model to use unless another is specified. - static const defaultModel = 'text-bison'; - @override Future invoke( final PromptValue input, { final VertexAIOptions? options, }) async { final id = _uuid.v4(); - final publisher = - options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final result = await client.text.predict( prompt: input.toString(), - publisher: publisher, + publisher: options?.publisher ?? + ArgumentError.checkNotNull( + defaultOptions.publisher, + 'VertexAIOptions.publisher', + ), model: model, parameters: VertexAITextModelRequestParams( maxOutputTokens: @@ -194,12 +191,15 @@ class VertexAI extends BaseLLM { final PromptValue promptValue, { final VertexAIOptions? options, }) async { - final publisher = - options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final res = await client.text.countTokens( prompt: promptValue.toString(), - publisher: publisher, + publisher: options?.publisher ?? + ArgumentError.checkNotNull( + defaultOptions.publisher, + 'VertexAIOptions.publisher', + ), model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/lib/src/utils/https_client/http_client.dart b/packages/langchain_google/lib/src/utils/https_client/http_client.dart index 6b9ed76c..479d2164 100644 --- a/packages/langchain_google/lib/src/utils/https_client/http_client.dart +++ b/packages/langchain_google/lib/src/utils/https_client/http_client.dart @@ -2,7 +2,8 @@ import 'package:http/http.dart' as http; export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js_interop) 'http_client_html.dart'; + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; /// {@template custom_http_client} /// Custom HTTP client that wraps the base HTTP client and allows to override diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 6bbf914b..7b441954 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_google -description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). -version: 0.6.3+1 +description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). +version: 0.5.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,24 +14,24 @@ topics: - vertex-ai environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ^1.18.0 - fetch_client: ^1.1.2 - gcloud: ^0.8.13 - google_generative_ai: 0.4.4 - googleapis: ^13.0.0 - googleapis_auth: ^1.6.0 - http: ^1.2.2 - langchain_core: 0.3.6 + collection: ">=1.17.0 <1.19.0" + fetch_client: ^1.0.2 + gcloud: ^0.8.12 + google_generative_ai: 0.4.0 + googleapis: ^12.0.0 + googleapis_auth: ^1.5.1 + http: ^1.1.0 + langchain_core: ^0.3.1 meta: ^1.11.0 - uuid: ^4.4.2 - vertex_ai: ^0.1.0+2 + uuid: ^4.3.3 + vertex_ai: ^0.1.0 langchain_firebase: ^0.1.0 firebase_core: ^2.31.0 dev_dependencies: - test: ^1.25.8 + test: ^1.24.9 fake_cloud_firestore: ^2.5.1 langchain: ^0.6.0+1 diff --git a/packages/langchain_google/pubspec_overrides.yaml b/packages/langchain_google/pubspec_overrides.yaml index 9844d8a9..1fabfd3c 100644 --- a/packages/langchain_google/pubspec_overrides.yaml +++ b/packages/langchain_google/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,vertex_ai +# melos_managed_dependency_overrides: vertex_ai,langchain_core dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart index 6d692977..f6567f6d 100644 --- a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart +++ b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatGoogleGenerativeAI tests', () { - const defaultModel = 'gemini-1.5-pro'; + const defaultModel = 'gemini-1.5-pro-latest'; late ChatGoogleGenerativeAI chatModel; @@ -73,7 +73,7 @@ void main() { expect(res.output.content, isNotEmpty); }); - test('Text-and-image input', () async { + test('Text-and-image input with gemini-pro-vision', () async { final res = await chatModel.invoke( PromptValue.chat([ ChatMessage.human( @@ -89,6 +89,9 @@ void main() { ]), ), ]), + options: const ChatGoogleGenerativeAIOptions( + model: 'gemini-pro-vision', + ), ); expect(res.output.content.toLowerCase(), contains('apple')); @@ -119,8 +122,7 @@ void main() { ), ); expect(res.output.content.length, lessThan(20)); - // It seems the gemini-1.5 doesn't return length reason anymore - // expect(res.finishReason, FinishReason.length); + expect(res.finishReason, FinishReason.length); }); test('Test Multi-turn conversations with gemini-pro', () async { @@ -175,7 +177,7 @@ void main() { 'properties': { 'location': { 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', + 'description': 'The city and state, e.g. San Francisco, CA', }, 'unit': { 'type': 'string', @@ -194,7 +196,7 @@ void main() { ); final humanMessage = ChatMessage.humanText( - 'What’s the weather like in Boston, US and Madrid, Spain in Celsius?', + 'What’s the weather like in Boston and Madrid right now in celsius?', ); final res1 = await model.invoke(PromptValue.chat([humanMessage])); diff --git a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart index a2f88906..bc942e51 100644 --- a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart +++ b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart @@ -49,7 +49,8 @@ void main() { expect(res[1].length, 768); }); - test('Test shortening embeddings', () async { + // TODO https://github.com/google-gemini/generative-ai-dart/pull/149 + test('Test shortening embeddings', skip: true, () async { embeddings.dimensions = 256; final res = await embeddings.embedQuery('Hello world'); expect(res.length, 256); diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 2f29e62b..576a8f6f 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_huggingface issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_huggingface homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_microsoft/pubspec.yaml b/packages/langchain_microsoft/pubspec.yaml index 11d0021c..685287b7 100644 --- a/packages/langchain_microsoft/pubspec.yaml +++ b/packages/langchain_microsoft/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_microsoft issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_microsoft homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index 99b6c0e2..c87fd2db 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,29 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.2.3+1 - - - Update a dependency to the latest release. - -## 0.2.3 - - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - -## 0.2.2 - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.2.1 - - - Update a dependency to the latest release. - -## 0.2.1 - - - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) - ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart index 70f6bd4b..ae0877a0 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart @@ -156,7 +156,7 @@ class ChatMistralAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatMistralAIOptions( - model: defaultModel, + model: 'mistral-small', ), this.encoding = 'cl100k_base', }) : _client = MistralAIClient( @@ -179,9 +179,6 @@ class ChatMistralAI extends BaseChatModel { @override String get modelType => 'chat-mistralai'; - /// The default model to use unless another is specified. - static const defaultModel = 'mistral-small'; - @override Future invoke( final PromptValue input, { @@ -219,7 +216,7 @@ class ChatMistralAI extends BaseChatModel { }) { return ChatCompletionRequest( model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? defaultModel, + options?.model ?? defaultOptions.model ?? throwNullModelError(), ), messages: messages.toChatCompletionMessages(), temperature: options?.temperature ?? defaultOptions.temperature, @@ -251,7 +248,7 @@ class ChatMistralAI extends BaseChatModel { return encoding.encode(promptValue.toString()); } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _client.endSession(); } diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index d9a75761..60158ea7 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -1,17 +1,12 @@ import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; /// {@template chat_mistral_ai_options} /// Options to pass into ChatMistralAI. -/// -/// You can check the list of available models [here](https://docs.mistral.ai/models). /// {@endtemplate} -@immutable class ChatMistralAIOptions extends ChatModelOptions { /// {@macro chat_mistral_ai_options} const ChatMistralAIOptions({ - super.model, + this.model = 'mistral-small', this.temperature, this.topP, this.maxTokens, @@ -20,6 +15,11 @@ class ChatMistralAIOptions extends ChatModelOptions { super.concurrencyLimit, }); + /// ID of the model to use. You can use the [List Available Models](https://docs.mistral.ai/api#operation/listModels) + /// API to see all of your available models, or see our [Model overview](https://docs.mistral.ai/models) + /// for model descriptions. + final String? model; + /// What sampling temperature to use, between 0.0 and 2.0. Higher values like /// 0.8 will make the output more random, while lower values like 0.2 will /// make it more focused and deterministic. @@ -47,7 +47,8 @@ class ChatMistralAIOptions extends ChatModelOptions { /// If set, different calls will generate deterministic results. final int? randomSeed; - @override + /// Creates a copy of this [ChatMistralAIOptions] object with the given fields + /// replaced with the new values. ChatMistralAIOptions copyWith({ final String? model, final double? temperature, @@ -55,9 +56,6 @@ class ChatMistralAIOptions extends ChatModelOptions { final int? maxTokens, final bool? safePrompt, final int? randomSeed, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, }) { return ChatMistralAIOptions( model: model ?? this.model, @@ -66,42 +64,6 @@ class ChatMistralAIOptions extends ChatModelOptions { maxTokens: maxTokens ?? this.maxTokens, safePrompt: safePrompt ?? this.safePrompt, randomSeed: randomSeed ?? this.randomSeed, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - ChatMistralAIOptions merge(covariant ChatMistralAIOptions? other) { - return copyWith( - model: other?.model, - temperature: other?.temperature, - topP: other?.topP, - maxTokens: other?.maxTokens, - safePrompt: other?.safePrompt, - randomSeed: other?.randomSeed, - concurrencyLimit: other?.concurrencyLimit, ); } - - @override - bool operator ==(covariant final ChatMistralAIOptions other) { - return model == other.model && - temperature == other.temperature && - topP == other.topP && - maxTokens == other.maxTokens && - safePrompt == other.safePrompt && - randomSeed == other.randomSeed && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - maxTokens.hashCode ^ - safePrompt.hashCode ^ - randomSeed.hashCode ^ - concurrencyLimit.hashCode; - } } diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 4b29f8a0..90b027e9 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.3+1 +version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,15 +14,15 @@ topics: - mistral environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ^1.18.0 - http: ^1.2.2 - langchain_core: 0.3.6 + collection: ">=1.17.0 <1.19.0" + http: ^1.1.0 + langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - mistralai_dart: ^0.0.3+3 + mistralai_dart: ^0.0.3+1 dev_dependencies: - test: ^1.25.8 + test: ^1.25.2 diff --git a/packages/langchain_mistralai/pubspec_overrides.yaml b/packages/langchain_mistralai/pubspec_overrides.yaml index 0bb3e94e..4a44a89b 100644 --- a/packages/langchain_mistralai/pubspec_overrides.yaml +++ b/packages/langchain_mistralai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,mistralai_dart +# melos_managed_dependency_overrides: mistralai_dart,langchain_core dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index f83459af..6f38a23b 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,37 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.3.2 - - - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) - -## 0.3.1 - - - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - -## 0.3.0 - - - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) - - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) - -## 0.2.2+1 - - - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) - -## 0.2.2 - - - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) - -## 0.2.1+1 - - - Update a dependency to the latest release. - ## 0.2.1 - **FEAT**: Handle finish reason in ChatOllama ([#416](https://github.com/davidmigloz/langchain_dart/issues/416)). ([a5e1af13](https://github.com/davidmigloz/langchain_dart/commit/a5e1af13ef4d2db690ab599dbf5e42f28659a059)) diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index 885dbf9f..e6d6d884 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -2,7 +2,7 @@ [![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) [![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) -[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) +[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollam.svg)](https://pub.dev/packages/langchain_ollama) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) @@ -13,7 +13,7 @@ - LLMs: * `Ollama`: wrapper around Ollama Completions API. - Chat models: - * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion. + * `ChatOllama`: wrapper around Ollama Completions API in a chat-like fashion. - Embeddings: * `OllamaEmbeddings`: wrapper around Ollama Embeddings API. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart index 0232e939..731f4e59 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart @@ -1,2 +1,2 @@ -export 'chat_ollama/chat_ollama.dart'; -export 'chat_ollama/types.dart'; +export 'chat_ollama.dart'; +export 'types.dart'; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart similarity index 70% rename from packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart index 190170d6..a62962e4 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart @@ -5,14 +5,15 @@ import 'package:langchain_tiktoken/langchain_tiktoken.dart'; import 'package:ollama_dart/ollama_dart.dart'; import 'package:uuid/uuid.dart'; +import '../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; -/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables +/// Wrapper around [Ollama](https://ollama.ai) Completions API that enables /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, -/// such as Llama 3.2, Gemma 2 or LLaVA, locally. +/// such as Llama 3 or LLaVA, locally. /// /// For a complete list of supported models and model variants, see the /// [Ollama model library](https://ollama.ai/library). @@ -34,7 +35,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3.2` +/// * e.g., for Llama 3: `ollama pull llama3` /// /// ### Ollama base URL /// @@ -55,7 +56,7 @@ import 'types.dart'; /// ```dart /// final chatModel = ChatOllama( /// defaultOptions: const ChatOllamaOptions( -/// model: 'llama3.2', +/// model: 'llama3', /// temperature: 0, /// format: 'json', /// ), @@ -87,7 +88,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.2')) | outputParser, +/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3')) | outputParser, /// 'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -150,7 +151,7 @@ class ChatOllama extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOllamaOptions( - model: defaultModel, + model: 'llama3', ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -175,9 +176,6 @@ class ChatOllama extends BaseChatModel { @override String get modelType => 'chat-ollama'; - /// The default model to use unless another is specified. - static const defaultModel = 'llama3.2'; - @override Future invoke( final PromptValue input, { @@ -185,10 +183,9 @@ class ChatOllama extends BaseChatModel { }) async { final id = _uuid.v4(); final completion = await _client.generateChatCompletion( - request: generateChatCompletionRequest( + request: _generateCompletionRequest( input.toChatMessages(), options: options, - defaultOptions: defaultOptions, ), ); return completion.toChatResult(id); @@ -202,11 +199,9 @@ class ChatOllama extends BaseChatModel { final id = _uuid.v4(); return _client .generateChatCompletionStream( - request: generateChatCompletionRequest( + request: _generateCompletionRequest( input.toChatMessages(), options: options, - defaultOptions: defaultOptions, - stream: true, ), ) .map( @@ -214,6 +209,55 @@ class ChatOllama extends BaseChatModel { ); } + /// Creates a [GenerateChatCompletionRequest] from the given input. + GenerateChatCompletionRequest _generateCompletionRequest( + final List messages, { + final bool stream = false, + final ChatOllamaOptions? options, + }) { + return GenerateChatCompletionRequest( + model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + messages: messages.toMessages(), + format: options?.format?.toResponseFormat(), + keepAlive: options?.keepAlive, + stream: stream, + options: RequestOptions( + numKeep: options?.numKeep ?? defaultOptions.numKeep, + seed: options?.seed ?? defaultOptions.seed, + numPredict: options?.numPredict ?? defaultOptions.numPredict, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, + typicalP: options?.typicalP ?? defaultOptions.typicalP, + repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, + temperature: options?.temperature ?? defaultOptions.temperature, + repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + mirostat: options?.mirostat ?? defaultOptions.mirostat, + mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, + mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, + penalizeNewline: + options?.penalizeNewline ?? defaultOptions.penalizeNewline, + stop: options?.stop ?? defaultOptions.stop, + numa: options?.numa ?? defaultOptions.numa, + numCtx: options?.numCtx ?? defaultOptions.numCtx, + numBatch: options?.numBatch ?? defaultOptions.numBatch, + numGpu: options?.numGpu ?? defaultOptions.numGpu, + mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, + lowVram: options?.lowVram ?? defaultOptions.lowVram, + f16Kv: options?.f16KV ?? defaultOptions.f16KV, + logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, + vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, + useMmap: options?.useMmap ?? defaultOptions.useMmap, + useMlock: options?.useMlock ?? defaultOptions.useMlock, + numThread: options?.numThread ?? defaultOptions.numThread, + ), + ); + } + /// Tokenizes the given prompt using tiktoken. /// /// Currently Ollama does not provide a tokenizer for the models it supports. @@ -234,7 +278,7 @@ class ChatOllama extends BaseChatModel { return encoding.encode(promptValue.toString()); } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart deleted file mode 100644 index ce12e70f..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ /dev/null @@ -1,267 +0,0 @@ -// ignore_for_file: public_member_api_docs -import 'dart:convert'; - -import 'package:collection/collection.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:ollama_dart/ollama_dart.dart' as o; -import 'package:uuid/uuid.dart'; - -import '../../llms/mappers.dart' show OllamaResponseFormatMapper; -import 'chat_ollama.dart'; -import 'types.dart'; - -/// Creates a [GenerateChatCompletionRequest] from the given input. -o.GenerateChatCompletionRequest generateChatCompletionRequest( - final List messages, { - required final ChatOllamaOptions? options, - required final ChatOllamaOptions defaultOptions, - final bool stream = false, -}) { - return o.GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? ChatOllama.defaultModel, - messages: messages.toMessages(), - format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), - keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, - tools: _mapTools( - tools: options?.tools ?? defaultOptions.tools, - toolChoice: options?.toolChoice ?? defaultOptions.toolChoice, - ), - stream: stream, - options: o.RequestOptions( - numKeep: options?.numKeep ?? defaultOptions.numKeep, - seed: options?.seed ?? defaultOptions.seed, - numPredict: options?.numPredict ?? defaultOptions.numPredict, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - minP: options?.minP ?? defaultOptions.minP, - tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, - typicalP: options?.typicalP ?? defaultOptions.typicalP, - repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, - temperature: options?.temperature ?? defaultOptions.temperature, - repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - mirostat: options?.mirostat ?? defaultOptions.mirostat, - mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, - mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, - penalizeNewline: - options?.penalizeNewline ?? defaultOptions.penalizeNewline, - stop: options?.stop ?? defaultOptions.stop, - numa: options?.numa ?? defaultOptions.numa, - numCtx: options?.numCtx ?? defaultOptions.numCtx, - numBatch: options?.numBatch ?? defaultOptions.numBatch, - numGpu: options?.numGpu ?? defaultOptions.numGpu, - mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, - lowVram: options?.lowVram ?? defaultOptions.lowVram, - f16Kv: options?.f16KV ?? defaultOptions.f16KV, - logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, - vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, - useMmap: options?.useMmap ?? defaultOptions.useMmap, - useMlock: options?.useMlock ?? defaultOptions.useMlock, - numThread: options?.numThread ?? defaultOptions.numThread, - ), - ); -} - -List? _mapTools({ - final List? tools, - final ChatToolChoice? toolChoice, -}) { - if (tools == null || tools.isEmpty) { - return null; - } - - return switch (toolChoice) { - ChatToolChoiceNone() => null, - ChatToolChoiceAuto() || - ChatToolChoiceRequired() || - null => - tools.map(_mapTool).toList(growable: false), - final ChatToolChoiceForced f => [ - _mapTool(tools.firstWhere((t) => t.name == f.name)), - ] - }; -} - -o.Tool _mapTool(final ToolSpec tool) { - return o.Tool( - function: o.ToolFunction( - name: tool.name, - description: tool.description, - parameters: tool.inputJsonSchema, - ), - ); -} - -extension OllamaChatMessagesMapper on List { - List toMessages() { - return map(_mapMessage).expand((final msg) => msg).toList(growable: false); - } - - List _mapMessage(final ChatMessage msg) { - return switch (msg) { - final SystemChatMessage msg => [ - o.Message( - role: o.MessageRole.system, - content: msg.content, - ), - ], - final HumanChatMessage msg => _mapHumanMessage(msg), - final AIChatMessage msg => _mapAIMessage(msg), - final ToolChatMessage msg => [ - o.Message( - role: o.MessageRole.tool, - content: msg.content, - ), - ], - CustomChatMessage() => - throw UnsupportedError('Ollama does not support custom messages'), - }; - } - - List _mapHumanMessage(final HumanChatMessage message) { - return switch (message.content) { - final ChatMessageContentText c => [ - o.Message( - role: o.MessageRole.user, - content: c.text, - ), - ], - final ChatMessageContentImage c => [ - o.Message( - role: o.MessageRole.user, - content: c.data, - ), - ], - final ChatMessageContentMultiModal c => _mapContentMultiModal(c), - }; - } - - List _mapContentMultiModal( - final ChatMessageContentMultiModal content, - ) { - final parts = content.parts.groupListsBy((final p) => p.runtimeType); - - if ((parts[ChatMessageContentMultiModal]?.length ?? 0) > 0) { - throw UnsupportedError( - 'Cannot have multimodal content in multimodal content', - ); - } - - // If there's only one text part and the rest are images, then we combine them in one message - if ((parts[ChatMessageContentText]?.length ?? 0) == 1) { - return [ - o.Message( - role: o.MessageRole.user, - content: - (parts[ChatMessageContentText]!.first as ChatMessageContentText) - .text, - images: parts[ChatMessageContentImage] - ?.map((final p) => (p as ChatMessageContentImage).data) - .toList(growable: false), - ), - ]; - } - - // Otherwise, we return the parts as separate messages - return content.parts - .map( - (final p) => switch (p) { - final ChatMessageContentText c => o.Message( - role: o.MessageRole.user, - content: c.text, - ), - final ChatMessageContentImage c => o.Message( - role: o.MessageRole.user, - content: c.data, - ), - ChatMessageContentMultiModal() => throw UnsupportedError( - 'Cannot have multimodal content in multimodal content', - ), - }, - ) - .toList(growable: false); - } - - List _mapAIMessage(final AIChatMessage message) { - return [ - o.Message( - role: o.MessageRole.assistant, - content: message.content, - toolCalls: message.toolCalls.isNotEmpty - ? message.toolCalls.map(_mapToolCall).toList(growable: false) - : null, - ), - ]; - } - - o.ToolCall _mapToolCall(final AIChatMessageToolCall toolCall) { - return o.ToolCall( - function: o.ToolCallFunction( - name: toolCall.name, - arguments: toolCall.arguments, - ), - ); - } -} - -extension ChatResultMapper on o.GenerateChatCompletionResponse { - ChatResult toChatResult(final String id, {final bool streaming = false}) { - return ChatResult( - id: id, - output: AIChatMessage( - content: message.content, - toolCalls: - message.toolCalls?.map(_mapToolCall).toList(growable: false) ?? - const [], - ), - finishReason: _mapFinishReason(doneReason), - metadata: { - 'model': model, - 'created_at': createdAt, - 'done': done, - 'total_duration': totalDuration, - 'load_duration': loadDuration, - 'prompt_eval_count': promptEvalCount, - 'prompt_eval_duration': promptEvalDuration, - 'eval_count': evalCount, - 'eval_duration': evalDuration, - }, - usage: _mapUsage(), - streaming: streaming, - ); - } - - AIChatMessageToolCall _mapToolCall(final o.ToolCall toolCall) { - return AIChatMessageToolCall( - id: const Uuid().v4(), - name: toolCall.function?.name ?? '', - argumentsRaw: json.encode(toolCall.function?.arguments ?? const {}), - arguments: toolCall.function?.arguments ?? const {}, - ); - } - - LanguageModelUsage _mapUsage() { - return LanguageModelUsage( - promptTokens: promptEvalCount, - responseTokens: evalCount, - totalTokens: (promptEvalCount != null || evalCount != null) - ? (promptEvalCount ?? 0) + (evalCount ?? 0) - : null, - ); - } - - FinishReason _mapFinishReason( - final o.DoneReason? reason, - ) => - switch (reason) { - o.DoneReason.stop => FinishReason.stop, - o.DoneReason.length => FinishReason.length, - o.DoneReason.load => FinishReason.unspecified, - null => FinishReason.unspecified, - }; -} diff --git a/packages/langchain_ollama/lib/src/chat_models/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/mappers.dart new file mode 100644 index 00000000..0553fb88 --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/mappers.dart @@ -0,0 +1,142 @@ +// ignore_for_file: public_member_api_docs +import 'package:collection/collection.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:ollama_dart/ollama_dart.dart'; + +extension OllamaChatMessagesMapper on List { + List toMessages() { + return map(_mapMessage).expand((final msg) => msg).toList(growable: false); + } + + List _mapMessage(final ChatMessage msg) { + return switch (msg) { + final SystemChatMessage msg => [ + Message( + role: MessageRole.system, + content: msg.content, + ), + ], + final HumanChatMessage msg => _mapHumanMessage(msg), + final AIChatMessage msg => [ + Message( + role: MessageRole.assistant, + content: msg.content, + ), + ], + ToolChatMessage() => + throw UnsupportedError('Ollama does not support tool calls'), + CustomChatMessage() => + throw UnsupportedError('Ollama does not support custom messages'), + }; + } + + List _mapHumanMessage(final HumanChatMessage message) { + return switch (message.content) { + final ChatMessageContentText c => [ + Message( + role: MessageRole.user, + content: c.text, + ), + ], + final ChatMessageContentImage c => [ + Message( + role: MessageRole.user, + content: c.data, + ), + ], + final ChatMessageContentMultiModal c => _mapContentMultiModal(c), + }; + } + + List _mapContentMultiModal( + final ChatMessageContentMultiModal content, + ) { + final parts = content.parts.groupListsBy((final p) => p.runtimeType); + + if ((parts[ChatMessageContentMultiModal]?.length ?? 0) > 0) { + throw UnsupportedError( + 'Cannot have multimodal content in multimodal content', + ); + } + + // If there's only one text part and the rest are images, then we combine them in one message + if ((parts[ChatMessageContentText]?.length ?? 0) == 1) { + return [ + Message( + role: MessageRole.user, + content: + (parts[ChatMessageContentText]!.first as ChatMessageContentText) + .text, + images: parts[ChatMessageContentImage] + ?.map((final p) => (p as ChatMessageContentImage).data) + .toList(growable: false), + ), + ]; + } + + // Otherwise, we return the parts as separate messages + return content.parts + .map( + (final p) => switch (p) { + final ChatMessageContentText c => Message( + role: MessageRole.user, + content: c.text, + ), + final ChatMessageContentImage c => Message( + role: MessageRole.user, + content: c.data, + ), + ChatMessageContentMultiModal() => throw UnsupportedError( + 'Cannot have multimodal content in multimodal content', + ), + }, + ) + .toList(growable: false); + } +} + +extension ChatResultMapper on GenerateChatCompletionResponse { + ChatResult toChatResult(final String id, {final bool streaming = false}) { + return ChatResult( + id: id, + output: AIChatMessage( + content: message?.content ?? '', + ), + finishReason: _mapFinishReason(doneReason), + metadata: { + 'model': model, + 'created_at': createdAt, + 'done': done, + 'total_duration': totalDuration, + 'load_duration': loadDuration, + 'prompt_eval_count': promptEvalCount, + 'prompt_eval_duration': promptEvalDuration, + 'eval_count': evalCount, + 'eval_duration': evalDuration, + }, + usage: _mapUsage(), + streaming: streaming, + ); + } + + LanguageModelUsage _mapUsage() { + return LanguageModelUsage( + promptTokens: promptEvalCount, + responseTokens: evalCount, + totalTokens: (promptEvalCount != null || evalCount != null) + ? (promptEvalCount ?? 0) + (evalCount ?? 0) + : null, + ); + } + + FinishReason _mapFinishReason( + final DoneReason? reason, + ) => + switch (reason) { + DoneReason.stop => FinishReason.stop, + DoneReason.length => FinishReason.length, + DoneReason.load => FinishReason.unspecified, + null => FinishReason.unspecified, + }; +} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/types.dart similarity index 61% rename from packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart rename to packages/langchain_ollama/lib/src/chat_models/types.dart index cf02b00c..3f14d2a2 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/types.dart @@ -1,22 +1,14 @@ -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; -import '../../../langchain_ollama.dart'; -import '../../llms/types.dart'; +import '../llms/types.dart'; /// {@template chat_ollama_options} /// Options to pass into ChatOllama. -/// -/// For a complete list of supported models and model variants, see the -/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} -@immutable class ChatOllamaOptions extends ChatModelOptions { /// {@macro chat_ollama_options} const ChatOllamaOptions({ - super.model, + this.model = 'llama3', this.format, this.keepAlive, this.numKeep, @@ -24,7 +16,6 @@ class ChatOllamaOptions extends ChatModelOptions { this.numPredict, this.topK, this.topP, - this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -49,11 +40,12 @@ class ChatOllamaOptions extends ChatModelOptions { this.useMmap, this.useMlock, this.numThread, - super.tools, - super.toolChoice, super.concurrencyLimit, }); + /// The model used to generate completions + final String? model; + /// The format to return a response in. Currently the only accepted value is /// json. /// @@ -92,20 +84,12 @@ class ChatOllamaOptions extends ChatModelOptions { /// (Default: 40) final int? topK; - /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; - /// Alternative to the [topP], and aims to ensure a balance of quality and - /// variety. [minP] represents the minimum probability for a token to be - /// considered, relative to the probability of the most likely token. For - /// example, with min_p=0.05 and the most likely token having a probability - /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. - /// (Default: 0.0) - final double? minP; - /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -157,7 +141,7 @@ class ChatOllamaOptions extends ChatModelOptions { final double? mirostatEta; /// Penalize newlines in the output. - /// (Default: true) + /// (Default: false) final bool? penalizeNewline; /// Sequences where the API will stop generating further tokens. The returned @@ -188,7 +172,7 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? lowVram; /// Enable f16 key/value. - /// (Default: true) + /// (Default: false) final bool? f16KV; /// Enable logits all. @@ -213,17 +197,16 @@ class ChatOllamaOptions extends ChatModelOptions { /// the logical number of cores). final int? numThread; - @override + /// Creates a copy of this [ChatOllamaOptions] object with the given fields + /// replaced with the new values. ChatOllamaOptions copyWith({ final String? model, final OllamaResponseFormat? format, - final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, final int? topK, final double? topP, - final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -239,6 +222,7 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? numa, final int? numCtx, final int? numBatch, + final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -247,21 +231,19 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, + final bool? embeddingOnly, + final double? ropeFrequencyBase, + final double? ropeFrequencyScale, final int? numThread, - final List? tools, - final ChatToolChoice? toolChoice, - final int? concurrencyLimit, }) { return ChatOllamaOptions( model: model ?? this.model, format: format ?? this.format, - keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, - minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -286,125 +268,6 @@ class ChatOllamaOptions extends ChatModelOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } - - @override - ChatOllamaOptions merge(covariant final ChatOllamaOptions? other) { - return copyWith( - model: other?.model, - format: other?.format, - keepAlive: other?.keepAlive, - numKeep: other?.numKeep, - seed: other?.seed, - numPredict: other?.numPredict, - topK: other?.topK, - topP: other?.topP, - minP: other?.minP, - tfsZ: other?.tfsZ, - typicalP: other?.typicalP, - repeatLastN: other?.repeatLastN, - temperature: other?.temperature, - repeatPenalty: other?.repeatPenalty, - presencePenalty: other?.presencePenalty, - frequencyPenalty: other?.frequencyPenalty, - mirostat: other?.mirostat, - mirostatTau: other?.mirostatTau, - mirostatEta: other?.mirostatEta, - penalizeNewline: other?.penalizeNewline, - stop: other?.stop, - numa: other?.numa, - numCtx: other?.numCtx, - numBatch: other?.numBatch, - numGpu: other?.numGpu, - mainGpu: other?.mainGpu, - lowVram: other?.lowVram, - f16KV: other?.f16KV, - logitsAll: other?.logitsAll, - vocabOnly: other?.vocabOnly, - useMmap: other?.useMmap, - useMlock: other?.useMlock, - numThread: other?.numThread, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatOllamaOptions other) { - return model == other.model && - format == other.format && - keepAlive == other.keepAlive && - numKeep == other.numKeep && - seed == other.seed && - numPredict == other.numPredict && - topK == other.topK && - topP == other.topP && - minP == other.minP && - tfsZ == other.tfsZ && - typicalP == other.typicalP && - repeatLastN == other.repeatLastN && - temperature == other.temperature && - repeatPenalty == other.repeatPenalty && - presencePenalty == other.presencePenalty && - frequencyPenalty == other.frequencyPenalty && - mirostat == other.mirostat && - mirostatTau == other.mirostatTau && - mirostatEta == other.mirostatEta && - penalizeNewline == other.penalizeNewline && - const ListEquality().equals(stop, other.stop) && - numa == other.numa && - numCtx == other.numCtx && - numBatch == other.numBatch && - numGpu == other.numGpu && - mainGpu == other.mainGpu && - lowVram == other.lowVram && - f16KV == other.f16KV && - logitsAll == other.logitsAll && - vocabOnly == other.vocabOnly && - useMmap == other.useMmap && - useMlock == other.useMlock && - numThread == other.numThread && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - format.hashCode ^ - keepAlive.hashCode ^ - numKeep.hashCode ^ - seed.hashCode ^ - numPredict.hashCode ^ - topK.hashCode ^ - topP.hashCode ^ - minP.hashCode ^ - tfsZ.hashCode ^ - typicalP.hashCode ^ - repeatLastN.hashCode ^ - temperature.hashCode ^ - repeatPenalty.hashCode ^ - presencePenalty.hashCode ^ - frequencyPenalty.hashCode ^ - mirostat.hashCode ^ - mirostatTau.hashCode ^ - mirostatEta.hashCode ^ - penalizeNewline.hashCode ^ - const ListEquality().hash(stop) ^ - numa.hashCode ^ - numCtx.hashCode ^ - numBatch.hashCode ^ - numGpu.hashCode ^ - mainGpu.hashCode ^ - lowVram.hashCode ^ - f16KV.hashCode ^ - logitsAll.hashCode ^ - vocabOnly.hashCode ^ - useMmap.hashCode ^ - useMlock.hashCode ^ - numThread.hashCode ^ - concurrencyLimit.hashCode; - } } diff --git a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart index ffef2882..66ac2edb 100644 --- a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart +++ b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart @@ -13,7 +13,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// Example: /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); +/// final embeddings = OllamaEmbeddings(model: 'llama3'); /// final res = await embeddings.embedQuery('Hello world'); /// ``` /// @@ -23,7 +23,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3.2` +/// * e.g., for `Llama-7b`: `ollama pull llama3` /// /// ### Advance /// @@ -76,7 +76,7 @@ class OllamaEmbeddings implements Embeddings { /// - `client`: the HTTP client to use. You can set your own HTTP client if /// you need further customization (e.g. to use a Socks5 proxy). OllamaEmbeddings({ - this.model = 'llama3.2', + this.model = 'llama3', this.keepAlive, final String baseUrl = 'http://localhost:11434/api', final Map? headers, diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index 9be8ed12..7eeb7e7c 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOption: const OllamaOptions( -/// model: 'llama3.2', +/// model: 'llama3', /// temperature: 1, /// ), /// ); @@ -49,7 +49,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOptions: const OllamaOptions( -/// model: 'llama3.2', +/// model: 'llama3', /// temperature: 0, /// format: 'json', /// ), @@ -83,7 +83,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.2')) | outputParser, +/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3')) | outputParser, /// 'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -93,7 +93,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3.2` +/// * e.g., for `Llama-7b`: `ollama pull llama3` /// /// ### Advance /// @@ -152,7 +152,7 @@ class Ollama extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OllamaOptions( - model: defaultModel, + model: 'llama3', ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -177,9 +177,6 @@ class Ollama extends BaseLLM { @override String get modelType => 'ollama'; - /// The default model to use unless another is specified. - static const defaultModel = 'llama3.2'; - @override Future invoke( final PromptValue input, { @@ -213,15 +210,14 @@ class Ollama extends BaseLLM { final OllamaOptions? options, }) { return GenerateCompletionRequest( - model: options?.model ?? defaultOptions.model ?? defaultModel, + model: options?.model ?? defaultOptions.model ?? throwNullModelError(), prompt: prompt, - system: options?.system ?? defaultOptions.system, - suffix: options?.suffix ?? defaultOptions.suffix, - template: options?.template ?? defaultOptions.template, - context: options?.context ?? defaultOptions.context, - format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), - raw: options?.raw ?? defaultOptions.raw, - keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, + system: options?.system, + template: options?.template, + context: options?.context, + format: options?.format?.toResponseFormat(), + raw: options?.raw, + keepAlive: options?.keepAlive, stream: stream, options: RequestOptions( numKeep: options?.numKeep ?? defaultOptions.numKeep, @@ -229,7 +225,6 @@ class Ollama extends BaseLLM { numPredict: options?.numPredict ?? defaultOptions.numPredict, topK: options?.topK ?? defaultOptions.topK, topP: options?.topP ?? defaultOptions.topP, - minP: options?.minP ?? defaultOptions.minP, tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, typicalP: options?.typicalP ?? defaultOptions.typicalP, repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, @@ -281,7 +276,7 @@ class Ollama extends BaseLLM { return encoding.encode(promptValue.toString()); } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index a8807248..dcbe7669 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -1,20 +1,13 @@ -import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; -import 'package:meta/meta.dart'; /// {@template ollama_options} /// Options to pass into the Ollama LLM. -/// -/// For a complete list of supported models and model variants, see the -/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} -@immutable class OllamaOptions extends LLMOptions { /// {@macro ollama_options} const OllamaOptions({ - super.model, + this.model = 'llama3', this.system, - this.suffix, this.template, this.context, this.format, @@ -25,7 +18,6 @@ class OllamaOptions extends LLMOptions { this.numPredict, this.topK, this.topP, - this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -53,12 +45,12 @@ class OllamaOptions extends LLMOptions { super.concurrencyLimit, }); + /// The model used to generate completions + final String? model; + /// The system prompt (Overrides what is defined in the Modelfile). final String? system; - /// The text that comes after the inserted text. - final String? suffix; - /// The full prompt or prompt template (overrides what is defined in the /// Modelfile). final String? template; @@ -114,20 +106,12 @@ class OllamaOptions extends LLMOptions { /// (Default: 40) final int? topK; - /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; - /// Alternative to the [topP], and aims to ensure a balance of quality and - /// variety. [minP] represents the minimum probability for a token to be - /// considered, relative to the probability of the most likely token. For - /// example, with min_p=0.05 and the most likely token having a probability - /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. - /// (Default: 0.0) - final double? minP; - /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -235,22 +219,20 @@ class OllamaOptions extends LLMOptions { /// the logical number of cores). final int? numThread; - @override + /// Creates a copy of this [OllamaOptions] object with the given fields + /// replaced with the new values. OllamaOptions copyWith({ final String? model, final String? system, - final String? suffix, final String? template, final List? context, final OllamaResponseFormat? format, final bool? raw, - final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, final int? topK, final double? topP, - final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -266,6 +248,7 @@ class OllamaOptions extends LLMOptions { final bool? numa, final int? numCtx, final int? numBatch, + final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -274,24 +257,23 @@ class OllamaOptions extends LLMOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, + final bool? embeddingOnly, + final double? ropeFrequencyBase, + final double? ropeFrequencyScale, final int? numThread, - final int? concurrencyLimit, }) { return OllamaOptions( model: model ?? this.model, system: system ?? this.system, - suffix: suffix ?? this.suffix, template: template ?? this.template, context: context ?? this.context, format: format ?? this.format, raw: raw ?? this.raw, - keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, - minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -316,142 +298,8 @@ class OllamaOptions extends LLMOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, - concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, ); } - - @override - OllamaOptions merge(covariant final OllamaOptions? other) { - return copyWith( - model: other?.model, - system: other?.system, - suffix: other?.suffix, - template: other?.template, - context: other?.context, - format: other?.format, - raw: other?.raw, - keepAlive: other?.keepAlive, - numKeep: other?.numKeep, - seed: other?.seed, - numPredict: other?.numPredict, - topK: other?.topK, - topP: other?.topP, - minP: other?.minP, - tfsZ: other?.tfsZ, - typicalP: other?.typicalP, - repeatLastN: other?.repeatLastN, - temperature: other?.temperature, - repeatPenalty: other?.repeatPenalty, - presencePenalty: other?.presencePenalty, - frequencyPenalty: other?.frequencyPenalty, - mirostat: other?.mirostat, - mirostatTau: other?.mirostatTau, - mirostatEta: other?.mirostatEta, - penalizeNewline: other?.penalizeNewline, - stop: other?.stop, - numa: other?.numa, - numCtx: other?.numCtx, - numBatch: other?.numBatch, - numGpu: other?.numGpu, - mainGpu: other?.mainGpu, - lowVram: other?.lowVram, - f16KV: other?.f16KV, - logitsAll: other?.logitsAll, - vocabOnly: other?.vocabOnly, - useMmap: other?.useMmap, - useMlock: other?.useMlock, - numThread: other?.numThread, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final OllamaOptions other) { - return identical(this, other) || - runtimeType == other.runtimeType && - model == other.model && - system == other.system && - suffix == other.suffix && - template == other.template && - const ListEquality().equals(context, other.context) && - format == other.format && - raw == other.raw && - keepAlive == other.keepAlive && - numKeep == other.numKeep && - seed == other.seed && - numPredict == other.numPredict && - topK == other.topK && - topP == other.topP && - minP == other.minP && - tfsZ == other.tfsZ && - typicalP == other.typicalP && - repeatLastN == other.repeatLastN && - temperature == other.temperature && - repeatPenalty == other.repeatPenalty && - presencePenalty == other.presencePenalty && - frequencyPenalty == other.frequencyPenalty && - mirostat == other.mirostat && - mirostatTau == other.mirostatTau && - mirostatEta == other.mirostatEta && - penalizeNewline == other.penalizeNewline && - const ListEquality().equals(stop, other.stop) && - numa == other.numa && - numCtx == other.numCtx && - numBatch == other.numBatch && - numGpu == other.numGpu && - mainGpu == other.mainGpu && - lowVram == other.lowVram && - f16KV == other.f16KV && - logitsAll == other.logitsAll && - vocabOnly == other.vocabOnly && - useMmap == other.useMmap && - useMlock == other.useMlock && - numThread == other.numThread && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - system.hashCode ^ - suffix.hashCode ^ - template.hashCode ^ - const ListEquality().hash(context) ^ - format.hashCode ^ - raw.hashCode ^ - keepAlive.hashCode ^ - numKeep.hashCode ^ - seed.hashCode ^ - numPredict.hashCode ^ - topK.hashCode ^ - topP.hashCode ^ - minP.hashCode ^ - tfsZ.hashCode ^ - typicalP.hashCode ^ - repeatLastN.hashCode ^ - temperature.hashCode ^ - repeatPenalty.hashCode ^ - presencePenalty.hashCode ^ - frequencyPenalty.hashCode ^ - mirostat.hashCode ^ - mirostatTau.hashCode ^ - mirostatEta.hashCode ^ - penalizeNewline.hashCode ^ - const ListEquality().hash(stop) ^ - numa.hashCode ^ - numCtx.hashCode ^ - numBatch.hashCode ^ - numGpu.hashCode ^ - mainGpu.hashCode ^ - lowVram.hashCode ^ - f16KV.hashCode ^ - logitsAll.hashCode ^ - vocabOnly.hashCode ^ - useMmap.hashCode ^ - useMlock.hashCode ^ - numThread.hashCode ^ - concurrencyLimit.hashCode; - } } /// The format to return a response in. diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index eb2c1fc8..9b4736df 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_ollama -description: LangChain.dart integration module for Ollama (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). -version: 0.3.2 +description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,16 +14,16 @@ topics: - ollama environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ^1.18.0 - http: ^1.2.2 - langchain_core: 0.3.6 + collection: ">=1.17.0 <1.19.0" + http: ^1.1.0 + langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.2.2 - uuid: ^4.4.2 + ollama_dart: ^0.1.0 + uuid: ^4.3.3 dev_dependencies: - test: ^1.25.8 + test: ^1.25.2 diff --git a/packages/langchain_ollama/pubspec_overrides.yaml b/packages/langchain_ollama/pubspec_overrides.yaml index 1cab36be..9090f50e 100644 --- a/packages/langchain_ollama/pubspec_overrides.yaml +++ b/packages/langchain_ollama/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,ollama_dart +# melos_managed_dependency_overrides: ollama_dart,langchain_core dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 9aac4640..0fa46c03 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -6,14 +6,13 @@ import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; import 'package:langchain_ollama/langchain_ollama.dart'; import 'package:test/test.dart'; void main() { group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () { late ChatOllama chatModel; - const defaultModel = 'llama3.2'; + const defaultModel = 'llama3:latest'; const visionModel = 'llava:latest'; setUp(() async { @@ -108,7 +107,7 @@ void main() { ]), ); expect( - res.output.content.replaceAll(RegExp(r'[\s\n-]'), ''), + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), contains('123456789'), ); expect(res.finishReason, FinishReason.stop); @@ -210,10 +209,10 @@ void main() { test('Test Multi-turn conversations', () async { final prompt = PromptValue.chat([ - ChatMessage.humanText('List the numbers from 1 to 9 in order.'), + ChatMessage.humanText('List the numbers from 1 to 9 in order. '), ChatMessage.ai('123456789'), ChatMessage.humanText( - 'Remove the number "4" from the list. Output only the remaining numbers in ascending order.', + 'Remove the number "4" from the list', ), ]); final res = await chatModel.invoke( @@ -252,160 +251,5 @@ void main() { expect(res.output.content.toLowerCase(), contains('apple')); }); - - const tool1 = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - const tool2 = ToolSpec( - name: 'get_historic_weather', - description: 'Get the historic weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), - () async { - final model = chatModel.bind( - const ChatOllamaOptions( - model: defaultModel, - tools: [tool1], - ), - ); - - final humanMessage = ChatMessage.humanText( - "What's the weather like in Boston and Madrid right now in celsius?", - ); - final res1 = await model.invoke(PromptValue.chat([humanMessage])); - - final aiMessage1 = res1.output; - expect(aiMessage1.toolCalls, hasLength(2)); - - final toolCall1 = aiMessage1.toolCalls.first; - expect(toolCall1.name, tool1.name); - expect(toolCall1.arguments.containsKey('location'), isTrue); - expect(toolCall1.arguments['location'], contains('Boston')); - expect(toolCall1.arguments['unit'], 'celsius'); - - final toolCall2 = aiMessage1.toolCalls.last; - expect(toolCall2.name, tool1.name); - expect(toolCall2.arguments.containsKey('location'), isTrue); - expect(toolCall2.arguments['location'], contains('Madrid')); - expect(toolCall2.arguments['unit'], 'celsius'); - - final functionResult1 = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - final functionMessage1 = ChatMessage.tool( - toolCallId: toolCall1.id, - content: json.encode(functionResult1), - ); - - final functionResult2 = { - 'temperature': '25', - 'unit': 'celsius', - 'description': 'Cloudy', - }; - final functionMessage2 = ChatMessage.tool( - toolCallId: toolCall2.id, - content: json.encode(functionResult2), - ); - - final res2 = await model.invoke( - PromptValue.chat([ - humanMessage, - aiMessage1, - functionMessage1, - functionMessage2, - ]), - ); - - final aiMessage2 = res2.output; - - expect(aiMessage2.toolCalls, isEmpty); - expect(aiMessage2.content, contains('22')); - expect(aiMessage2.content, contains('25')); - }); - - test('Test multi tool call', () async { - final res = await chatModel.invoke( - PromptValue.string( - "What's the weather in Vellore, India and in Barcelona, Spain?", - ), - options: const ChatOllamaOptions( - model: defaultModel, - tools: [tool1, tool2], - ), - ); - expect(res.output.toolCalls, hasLength(2)); - final toolCall1 = res.output.toolCalls.first; - expect(toolCall1.name, 'get_current_weather'); - expect(toolCall1.argumentsRaw, isNotEmpty); - expect(toolCall1.arguments, isNotEmpty); - expect(toolCall1.arguments['location'], 'Vellore, India'); - expect(toolCall1.arguments['unit'], 'celsius'); - final toolCall2 = res.output.toolCalls.last; - expect(toolCall2.name, 'get_current_weather'); - expect(toolCall2.argumentsRaw, isNotEmpty); - expect(toolCall2.arguments, isNotEmpty); - expect(toolCall2.arguments['location'], 'Barcelona, Spain'); - expect(toolCall2.arguments['unit'], 'celsius'); - expect(res.finishReason, FinishReason.stop); - }); - - test('Test ChatToolChoice.none', () async { - final res = await chatModel.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: const ChatOllamaOptions( - model: defaultModel, - tools: [tool1], - toolChoice: ChatToolChoice.none, - ), - ); - expect(res.output.toolCalls, isEmpty); - expect(res.output.content, isNotEmpty); - }); - - test('Test ChatToolChoice.forced', () async { - final res = await chatModel.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: ChatOllamaOptions( - model: defaultModel, - tools: const [tool1, tool2], - toolChoice: ChatToolChoice.forced(name: tool2.name), - ), - ); - expect(res.output.toolCalls, hasLength(1)); - final toolCall = res.output.toolCalls.first; - expect(toolCall.name, tool2.name); - }); }); } diff --git a/packages/langchain_ollama/test/embeddings/ollama_test.dart b/packages/langchain_ollama/test/embeddings/ollama_test.dart index 5363d47c..0f94ad0d 100644 --- a/packages/langchain_ollama/test/embeddings/ollama_test.dart +++ b/packages/langchain_ollama/test/embeddings/ollama_test.dart @@ -8,7 +8,7 @@ void main() { group('OllamaEmbeddings tests', skip: Platform.environment.containsKey('CI'), () { late OllamaEmbeddings embeddings; - const defaultModel = 'llama3.2'; + const defaultModel = 'llama3:latest'; setUp(() async { embeddings = OllamaEmbeddings( diff --git a/packages/langchain_ollama/test/llms/ollama_test.dart b/packages/langchain_ollama/test/llms/ollama_test.dart index 7426b0c6..e9a6ac55 100644 --- a/packages/langchain_ollama/test/llms/ollama_test.dart +++ b/packages/langchain_ollama/test/llms/ollama_test.dart @@ -10,7 +10,7 @@ import 'package:test/test.dart'; void main() { group('Ollama tests', skip: Platform.environment.containsKey('CI'), () { late Ollama llm; - const defaultModel = 'llama3.2'; + const defaultModel = 'llama3:latest'; setUp(() async { llm = Ollama( diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index a7a12549..2d1e113a 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,45 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.7.2 - - - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) - - **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) - -## 0.7.1 - - - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) - - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) - - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) - - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) - - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) - -## 0.7.0 - - - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) - - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) - - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) - - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.6.3 - - - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) - -## 0.6.2 - - - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) - - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) - -## 0.6.1+1 - - - Update a dependency to the latest release. - ## 0.6.1 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) @@ -115,7 +73,7 @@ ## 0.3.2 - - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/open_router)) + - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.com/#/modules/model_io/models/chat_models/integrations/open_router)) - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e)) - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f)) - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f)) @@ -295,7 +253,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 diff --git a/packages/langchain_openai/README.md b/packages/langchain_openai/README.md index 39073b3b..b7a080da 100644 --- a/packages/langchain_openai/README.md +++ b/packages/langchain_openai/README.md @@ -20,6 +20,8 @@ OpenAI module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart * `OpenAIQAWithStructureChain` a chain that answer questions in the specified structure. * `OpenAIQAWithSourcesChain`: a chain that answer questions providing sources. +- Agents: + * `OpenAIToolsAgent`: an agent driven by OpenAIs Tools powered API. - Tools: * `OpenAIDallETool`: a tool that uses DallE to generate images from text. diff --git a/packages/langchain_openai/lib/fix_data/fix.yaml b/packages/langchain_openai/lib/fix_data/fix.yaml deleted file mode 100644 index 5db14fd0..00000000 --- a/packages/langchain_openai/lib/fix_data/fix.yaml +++ /dev/null @@ -1,19 +0,0 @@ -version: 1 - -transforms: - - title: "Migrate to 'ToolsAgent'" - date: 2024-08-21 - element: - uris: ['langchain_openai.dart', 'src/agents/tools.dart'] - class: 'OpenAIToolsAgent' - changes: - - kind: 'rename' - newName: 'ToolsAgent' - - title: "Migrate to 'ToolsAgentOutputParser'" - date: 2024-08-21 - element: - uris: ['langchain_openai.dart', 'src/agents/tools.dart'] - class: 'OpenAIToolsAgentOutputParser' - changes: - - kind: 'rename' - newName: 'ToolsAgentOutputParser' diff --git a/packages/langchain_openai/lib/langchain_openai.dart b/packages/langchain_openai/lib/langchain_openai.dart index 77e92aa5..d2730a6b 100644 --- a/packages/langchain_openai/lib/langchain_openai.dart +++ b/packages/langchain_openai/lib/langchain_openai.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). +/// LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). library; export 'package:openai_dart/openai_dart.dart' show OpenAIClientException; diff --git a/packages/langchain_openai/lib/src/agents/tools.dart b/packages/langchain_openai/lib/src/agents/tools.dart index a1a13583..2867427d 100644 --- a/packages/langchain_openai/lib/src/agents/tools.dart +++ b/packages/langchain_openai/lib/src/agents/tools.dart @@ -1,4 +1,3 @@ -// ignore_for_file: deprecated_member_use_from_same_package import 'package:langchain_core/agents.dart'; import 'package:langchain_core/chains.dart'; import 'package:langchain_core/chat_models.dart'; @@ -18,11 +17,6 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( ); /// {@template openai_tools_agent} -/// > Note: This class is deprecated. Use `ToolsAgent` (from the `langchain` -/// > package instead). It works with the same API as this class, but can be -/// > used with any provider that supports tool calling. -/// > You can run `dart fix --apply` to automatically update your code. -/// /// An Agent driven by OpenAI's Tools powered API. /// /// Example: @@ -33,7 +27,7 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// temperature: 0, /// ); /// final tools = [CalculatorTool()]; -/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// ``` @@ -75,10 +69,8 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// template if you only need to customize the system message or add some /// extra messages. /// {@endtemplate} -@Deprecated('Use ToolsAgent instead') class OpenAIToolsAgent extends BaseSingleActionAgent { /// {@macro openai_functions_agent} - @Deprecated('Use ToolsAgent instead') OpenAIToolsAgent({ required this.llmChain, required super.tools, @@ -126,7 +118,6 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { /// the first in the prompt. Default: "You are a helpful AI assistant". /// - [extraPromptMessages] prompt messages that will be placed between the /// system message and the input from the agent. - @Deprecated('Use ToolsAgent.fromLLMAndTools() instead') factory OpenAIToolsAgent.fromLLMAndTools({ required final ChatOpenAI llm, required final List tools, @@ -250,21 +241,14 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { } /// {@template openai_tools_agent_output_parser} -/// > Note: This class is deprecated. Use `ToolsAgentOutputParser` (from the -/// > `langchain` package instead). It is equivalent to this class, but -/// > prepared to work with the `ToolsAgent`. -/// > You can run `dart fix --apply` to automatically update your code. -/// /// Parser for [OpenAIToolsAgent]. /// /// It parses the output of the LLM and returns the corresponding /// [BaseAgentAction] to be executed. /// {@endtemplate} -@Deprecated('Use ToolsAgentOutputParser instead') class OpenAIToolsAgentOutputParser extends BaseOutputParser> { /// {@macro openai_tools_agent_output_parser} - @Deprecated('Use ToolsAgentOutputParser instead') const OpenAIToolsAgentOutputParser() : super(defaultOptions: const OutputParserOptions()); diff --git a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart index 7c812836..207577a1 100644 --- a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart +++ b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart @@ -12,7 +12,7 @@ import 'qa_with_structure.dart'; /// ```dart /// final llm = ChatOpenAI( /// apiKey: openaiApiKey, -/// model: 'gpt-4o-mini', +/// model: 'gpt-3.5-turbo-0613', /// temperature: 0, /// ); /// final qaChain = OpenAIQAWithSourcesChain(llm: llm); diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index c8a670f5..053bf481 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -25,10 +25,8 @@ import 'types.dart'; /// - [Completions API docs](https://platform.openai.com/docs/api-reference/chat) /// /// You can also use this wrapper to consume OpenAI-compatible APIs like -/// [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), -/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), -/// [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), -/// [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. +/// [Anyscale](https://www.anyscale.com), [Together AI](https://www.together.ai), +/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), etc. /// /// ### Call options /// @@ -76,7 +74,7 @@ import 'types.dart'; /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ /// 'q1': prompt1 | chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4')) | outputParser, -/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, +/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); /// ``` @@ -174,7 +172,7 @@ class ChatOpenAI extends BaseChatModel { /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). /// - [ChatOpenAI.encoding] - /// - [ChatOpenAI.defaultOptions] + /// - [OpenAI.defaultOptions] /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -194,13 +192,12 @@ class ChatOpenAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOpenAIOptions( - model: defaultModel, + model: 'gpt-3.5-turbo', ), this.encoding, }) : _client = OpenAIClient( apiKey: apiKey ?? '', organization: organization, - beta: null, baseUrl: baseUrl, headers: headers, queryParams: queryParams, @@ -239,19 +236,15 @@ class ChatOpenAI extends BaseChatModel { @override String get modelType => 'openai-chat'; - /// The default model to use unless another is specified. - static const defaultModel = 'gpt-4o-mini'; - @override Future invoke( final PromptValue input, { final ChatOpenAIOptions? options, }) async { final completion = await _client.createChatCompletion( - request: createChatCompletionRequest( + request: _createChatCompletionRequest( input.toChatMessages(), options: options, - defaultOptions: defaultOptions, ), ); return completion.toChatResult(completion.id ?? _uuid.v4()); @@ -264,10 +257,9 @@ class ChatOpenAI extends BaseChatModel { }) { return _client .createChatCompletionStream( - request: createChatCompletionRequest( + request: _createChatCompletionRequest( input.toChatMessages(), options: options, - defaultOptions: defaultOptions, stream: true, ), ) @@ -277,6 +269,48 @@ class ChatOpenAI extends BaseChatModel { ); } + /// Creates a [CreateChatCompletionRequest] from the given input. + CreateChatCompletionRequest _createChatCompletionRequest( + final List messages, { + final ChatOpenAIOptions? options, + final bool stream = false, + }) { + final messagesDtos = messages.toChatCompletionMessages(); + final toolsDtos = options?.tools?.toChatCompletionTool() ?? + defaultOptions.tools?.toChatCompletionTool(); + final toolChoice = options?.toolChoice?.toChatCompletionToolChoice() ?? + defaultOptions.toolChoice?.toChatCompletionToolChoice(); + final responseFormat = + options?.responseFormat ?? defaultOptions.responseFormat; + final responseFormatDto = responseFormat?.toChatCompletionResponseFormat(); + + return CreateChatCompletionRequest( + model: ChatCompletionModel.modelId( + options?.model ?? defaultOptions.model ?? throwNullModelError(), + ), + messages: messagesDtos, + tools: toolsDtos, + toolChoice: toolChoice, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + logitBias: options?.logitBias ?? defaultOptions.logitBias, + maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + n: options?.n ?? defaultOptions.n, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + responseFormat: responseFormatDto, + seed: options?.seed ?? defaultOptions.seed, + stop: (options?.stop ?? defaultOptions.stop) != null + ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) + : null, + temperature: options?.temperature ?? defaultOptions.temperature, + topP: options?.topP ?? defaultOptions.topP, + user: options?.user ?? defaultOptions.user, + streamOptions: + stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, + ); + } + /// Tokenizes the given prompt using tiktoken with the encoding used by the /// [model]. If an encoding model is specified in [encoding] field, that /// encoding is used instead. @@ -295,7 +329,8 @@ class ChatOpenAI extends BaseChatModel { final PromptValue promptValue, { final ChatOpenAIOptions? options, }) async { - final model = options?.model ?? defaultOptions.model ?? defaultModel; + final model = + options?.model ?? defaultOptions.model ?? throwNullModelError(); final tiktoken = _getTiktoken(); final messages = promptValue.toChatMessages(); @@ -304,6 +339,7 @@ class ChatOpenAI extends BaseChatModel { final int tokensPerName; switch (model) { + case 'gpt-3.5-turbo-0613': case 'gpt-3.5-turbo-16k-0613': case 'gpt-4-0314': case 'gpt-4-32k-0314': @@ -317,8 +353,8 @@ class ChatOpenAI extends BaseChatModel { // If there's a name, the role is omitted tokensPerName = -1; default: - if (model.startsWith('gpt-4o-mini') || model.startsWith('gpt-4')) { - // Returning num tokens assuming gpt-4 + if (model.startsWith('gpt-3.5-turbo') || model.startsWith('gpt-4')) { + // Returning num tokens assuming gpt-3.5-turbo-0613 tokensPerMessage = 3; tokensPerName = 1; } else { @@ -363,7 +399,7 @@ class ChatOpenAI extends BaseChatModel { : getEncoding('cl100k_base'); } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index ad8bec0b..78054bed 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -6,56 +6,8 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/tools.dart'; import 'package:openai_dart/openai_dart.dart'; -import 'chat_openai.dart'; import 'types.dart'; -/// Creates a [CreateChatCompletionRequest] from the given input. -CreateChatCompletionRequest createChatCompletionRequest( - final List messages, { - required final ChatOpenAIOptions? options, - required final ChatOpenAIOptions defaultOptions, - final bool stream = false, -}) { - final messagesDtos = messages.toChatCompletionMessages(); - final toolsDtos = - (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); - final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) - ?.toChatCompletionToolChoice(); - final responseFormatDto = - (options?.responseFormat ?? defaultOptions.responseFormat) - ?.toChatCompletionResponseFormat(); - final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) - .toCreateChatCompletionRequestServiceTier(); - - return CreateChatCompletionRequest( - model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? ChatOpenAI.defaultModel, - ), - messages: messagesDtos, - tools: toolsDtos, - toolChoice: toolChoice, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - logitBias: options?.logitBias ?? defaultOptions.logitBias, - maxCompletionTokens: options?.maxTokens ?? defaultOptions.maxTokens, - n: options?.n ?? defaultOptions.n, - presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, - responseFormat: responseFormatDto, - seed: options?.seed ?? defaultOptions.seed, - stop: (options?.stop ?? defaultOptions.stop) != null - ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) - : null, - temperature: options?.temperature ?? defaultOptions.temperature, - topP: options?.topP ?? defaultOptions.topP, - parallelToolCalls: - options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, - serviceTier: serviceTierDto, - user: options?.user ?? defaultOptions.user, - streamOptions: - stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, - ); -} - extension ChatMessageListMapper on List { List toChatCompletionMessages() { return map(_mapMessage).toList(growable: false); @@ -63,36 +15,36 @@ extension ChatMessageListMapper on List { ChatCompletionMessage _mapMessage(final ChatMessage msg) { return switch (msg) { - final SystemChatMessage msg => _mapSystemMessage(msg), - final HumanChatMessage msg => _mapHumanMessage(msg), - final AIChatMessage msg => _mapAIMessage(msg), - final ToolChatMessage msg => _mapToolMessage(msg), + final SystemChatMessage systemChatMessage => ChatCompletionMessage.system( + content: systemChatMessage.content, + ), + final HumanChatMessage humanChatMessage => ChatCompletionMessage.user( + content: switch (humanChatMessage.content) { + final ChatMessageContentText c => _mapMessageContentString(c), + final ChatMessageContentImage c => + ChatCompletionUserMessageContent.parts( + [_mapMessageContentPartImage(c)], + ), + final ChatMessageContentMultiModal c => _mapMessageContentPart(c), + }, + ), + final AIChatMessage aiChatMessage => ChatCompletionMessage.assistant( + content: aiChatMessage.content, + toolCalls: aiChatMessage.toolCalls.isNotEmpty + ? aiChatMessage.toolCalls + .map(_mapMessageToolCall) + .toList(growable: false) + : null, + ), + final ToolChatMessage toolChatMessage => ChatCompletionMessage.tool( + toolCallId: toolChatMessage.toolCallId, + content: toolChatMessage.content, + ), CustomChatMessage() => throw UnsupportedError('OpenAI does not support custom messages'), }; } - ChatCompletionMessage _mapSystemMessage( - final SystemChatMessage systemChatMessage, - ) { - return ChatCompletionMessage.system(content: systemChatMessage.content); - } - - ChatCompletionMessage _mapHumanMessage( - final HumanChatMessage humanChatMessage, - ) { - return ChatCompletionMessage.user( - content: switch (humanChatMessage.content) { - final ChatMessageContentText c => _mapMessageContentString(c), - final ChatMessageContentImage c => - ChatCompletionUserMessageContent.parts( - [_mapMessageContentPartImage(c)], - ), - final ChatMessageContentMultiModal c => _mapMessageContentPart(c), - }, - ); - } - ChatCompletionUserMessageContentString _mapMessageContentString( final ChatMessageContentText c, ) { @@ -153,17 +105,6 @@ extension ChatMessageListMapper on List { return ChatCompletionMessageContentParts(partsList); } - ChatCompletionMessage _mapAIMessage(final AIChatMessage aiChatMessage) { - return ChatCompletionMessage.assistant( - content: aiChatMessage.content, - toolCalls: aiChatMessage.toolCalls.isNotEmpty - ? aiChatMessage.toolCalls - .map(_mapMessageToolCall) - .toList(growable: false) - : null, - ); - } - ChatCompletionMessageToolCall _mapMessageToolCall( final AIChatMessageToolCall toolCall, ) { @@ -176,26 +117,12 @@ extension ChatMessageListMapper on List { ), ); } - - ChatCompletionMessage _mapToolMessage( - final ToolChatMessage toolChatMessage, - ) { - return ChatCompletionMessage.tool( - toolCallId: toolChatMessage.toolCallId, - content: toolChatMessage.content, - ); - } } extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { ChatResult toChatResult(final String id) { final choice = choices.first; final msg = choice.message; - - if (msg.refusal != null && msg.refusal!.isNotEmpty) { - throw OpenAIRefusalException(msg.refusal!); - } - return ChatResult( id: id, output: AIChatMessage( @@ -209,7 +136,6 @@ extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { 'model': model, 'created': created, 'system_fingerprint': systemFingerprint, - 'logprobs': choice.logprobs?.toMap(), }, usage: _mapUsage(usage), ); @@ -267,9 +193,6 @@ extension ChatToolChoiceMapper on ChatToolChoice { ChatToolChoiceAuto _ => const ChatCompletionToolChoiceOption.mode( ChatCompletionToolChoiceMode.auto, ), - ChatToolChoiceRequired() => const ChatCompletionToolChoiceOption.mode( - ChatCompletionToolChoiceMode.required, - ), final ChatToolChoiceForced t => ChatCompletionToolChoiceOption.tool( ChatCompletionNamedToolChoice( type: ChatCompletionNamedToolChoiceType.function, @@ -285,11 +208,6 @@ extension CreateChatCompletionStreamResponseMapper ChatResult toChatResult(final String id) { final choice = choices.firstOrNull; final delta = choice?.delta; - - if (delta?.refusal != null && delta!.refusal!.isNotEmpty) { - throw OpenAIRefusalException(delta.refusal!); - } - return ChatResult( id: id, output: AIChatMessage( @@ -327,33 +245,18 @@ extension CreateChatCompletionStreamResponseMapper } extension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat { - ResponseFormat toChatCompletionResponseFormat() { - return switch (this) { - ChatOpenAIResponseFormatText() => const ResponseFormat.text(), - ChatOpenAIResponseFormatJsonObject() => const ResponseFormat.jsonObject(), - final ChatOpenAIResponseFormatJsonSchema res => ResponseFormat.jsonSchema( - jsonSchema: JsonSchemaObject( - name: res.jsonSchema.name, - description: res.jsonSchema.description, - schema: res.jsonSchema.schema, - strict: res.jsonSchema.strict, - ), - ), - }; + ChatCompletionResponseFormat toChatCompletionResponseFormat() { + return ChatCompletionResponseFormat( + type: switch (type) { + ChatOpenAIResponseFormatType.text => + ChatCompletionResponseFormatType.text, + ChatOpenAIResponseFormatType.jsonObject => + ChatCompletionResponseFormatType.jsonObject, + }, + ); } } -extension ChatOpenAIServiceTierX on ChatOpenAIServiceTier? { - CreateChatCompletionRequestServiceTier? - toCreateChatCompletionRequestServiceTier() => switch (this) { - ChatOpenAIServiceTier.auto => - CreateChatCompletionRequestServiceTier.auto, - ChatOpenAIServiceTier.vDefault => - CreateChatCompletionRequestServiceTier.vDefault, - null => null, - }; -} - FinishReason _mapFinishReason( final ChatCompletionFinishReason? reason, ) => diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 3173e293..a82ab9a1 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -1,51 +1,13 @@ -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; /// {@template chat_openai_options} /// Options to pass into the OpenAI Chat Model. -/// -/// Available [ChatOpenAIOptions.model]s: -/// - `chatgpt-4o-latest` -/// - `gpt-4` -/// - `gpt-4-32k` -/// - `gpt-4-32k-0314` -/// - `gpt-4-32k-0613` -/// - `gpt-4-0125-preview` -/// - `gpt-4-0314` -/// - `gpt-4-0613` -/// - `gpt-4-1106-preview` -/// - `gpt-4-turbo` -/// - `gpt-4-turbo-2024-04-09` -/// - `gpt-4-turbo-preview` -/// - `gpt-4-vision-preview` -/// - `gpt-4o` -/// - `gpt-4o-2024-05-13` -/// - `gpt-4o-2024-08-06` -/// - `gpt-4o-2024-08-06` -/// - `gpt-4o-mini` -/// - `gpt-4o-mini-2024-07-18` -/// - `gpt-3.5-turbo` -/// - `gpt-3.5-turbo-16k` -/// - `gpt-3.5-turbo-16k-0613` -/// - `gpt-3.5-turbo-0125` -/// - `gpt-3.5-turbo-0301` -/// - `gpt-3.5-turbo-0613` -/// - `gpt-3.5-turbo-1106` -/// - `o1-mini` -/// - `o1-mini-2024-09-12` -/// - `o1-preview` -/// - `o1-preview-2024-09-12` -/// -/// Mind that the list may be outdated. -/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} -@immutable class ChatOpenAIOptions extends ChatModelOptions { /// {@macro chat_openai_options} const ChatOpenAIOptions({ - super.model, + this.model = 'gpt-3.5-turbo', this.frequencyPenalty, this.logitBias, this.maxTokens, @@ -56,14 +18,38 @@ class ChatOpenAIOptions extends ChatModelOptions { this.stop, this.temperature, this.topP, + this.user, super.tools, super.toolChoice, - this.parallelToolCalls, - this.serviceTier, - this.user, super.concurrencyLimit, }); + /// ID of the model to use (e.g. 'gpt-3.5-turbo'). + /// + /// Available models: + /// - `gpt-4` + /// - `gpt-4-0314` + /// - `gpt-4-0613` + /// - `gpt-4-32k` + /// - `gpt-4-32k-0314` + /// - `gpt-4-32k-0613` + /// - `gpt-4-turbo-preview` + /// - `gpt-4-1106-preview` + /// - `gpt-4-0125-preview` + /// - `gpt-4-vision-preview` + /// - `gpt-4o` + /// - `gpt-4o-2024-05-13` + /// - `gpt-3.5-turbo` + /// - `gpt-3.5-turbo-16k` + /// - `gpt-3.5-turbo-0301` + /// - `gpt-3.5-turbo-0613` + /// - `gpt-3.5-turbo-1106` + /// - `gpt-3.5-turbo-16k-0613` + /// + /// Mind that the list may be outdated. + /// See https://platform.openai.com/docs/models for the latest list. + final String? model; + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on /// their existing frequency in the text so far, decreasing the model's /// likelihood to repeat the same line verbatim. @@ -137,24 +123,14 @@ class ChatOpenAIOptions extends ChatModelOptions { /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p final double? topP; - /// Whether to enable parallel tool calling during tool use. - /// By default, it is enabled. - /// - /// - /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling - final bool? parallelToolCalls; - - /// Specifies the latency tier to use for processing the request. - /// This is relevant for customers subscribed to the scale tier service. - final ChatOpenAIServiceTier? serviceTier; - /// A unique identifier representing your end-user, which can help OpenAI to /// monitor and detect abuse. /// /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - @override + /// Creates a copy of this [ChatOpenAIOptions] object with the given fields + /// replaced with the new values. ChatOpenAIOptions copyWith({ final String? model, final double? frequencyPenalty, @@ -167,12 +143,9 @@ class ChatOpenAIOptions extends ChatModelOptions { final List? stop, final double? temperature, final double? topP, + final String? user, final List? tools, final ChatToolChoice? toolChoice, - final bool? parallelToolCalls, - final ChatOpenAIServiceTier? serviceTier, - final String? user, - final int? concurrencyLimit, }) { return ChatOpenAIOptions( model: model ?? this.model, @@ -186,223 +159,32 @@ class ChatOpenAIOptions extends ChatModelOptions { stop: stop ?? this.stop, temperature: temperature ?? this.temperature, topP: topP ?? this.topP, + user: user ?? this.user, tools: tools ?? this.tools, toolChoice: toolChoice ?? this.toolChoice, - parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls, - serviceTier: serviceTier ?? this.serviceTier, - user: user ?? this.user, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } - - @override - ChatOpenAIOptions merge(covariant final ChatOpenAIOptions? other) { - return copyWith( - model: other?.model, - frequencyPenalty: other?.frequencyPenalty, - logitBias: other?.logitBias, - maxTokens: other?.maxTokens, - n: other?.n, - presencePenalty: other?.presencePenalty, - responseFormat: other?.responseFormat, - seed: other?.seed, - stop: other?.stop, - temperature: other?.temperature, - topP: other?.topP, - tools: other?.tools, - toolChoice: other?.toolChoice, - parallelToolCalls: other?.parallelToolCalls, - serviceTier: other?.serviceTier, - user: other?.user, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatOpenAIOptions other) { - return identical(this, other) || - runtimeType == other.runtimeType && - model == other.model && - frequencyPenalty == other.frequencyPenalty && - const MapEquality() - .equals(logitBias, other.logitBias) && - maxTokens == other.maxTokens && - n == other.n && - presencePenalty == other.presencePenalty && - responseFormat == other.responseFormat && - seed == other.seed && - const ListEquality().equals(stop, other.stop) && - temperature == other.temperature && - topP == other.topP && - const ListEquality().equals(tools, other.tools) && - toolChoice == other.toolChoice && - parallelToolCalls == other.parallelToolCalls && - serviceTier == other.serviceTier && - user == other.user && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - frequencyPenalty.hashCode ^ - const MapEquality().hash(logitBias) ^ - maxTokens.hashCode ^ - n.hashCode ^ - presencePenalty.hashCode ^ - responseFormat.hashCode ^ - seed.hashCode ^ - const ListEquality().hash(stop) ^ - temperature.hashCode ^ - topP.hashCode ^ - const ListEquality().hash(tools) ^ - toolChoice.hashCode ^ - parallelToolCalls.hashCode ^ - serviceTier.hashCode ^ - user.hashCode ^ - concurrencyLimit.hashCode; - } } /// {@template chat_openai_response_format} /// An object specifying the format that the model must output. /// {@endtemplate} -sealed class ChatOpenAIResponseFormat { - const ChatOpenAIResponseFormat(); - - /// The model will respond with text. - static const text = ChatOpenAIResponseFormatText(); - - /// The model will respond with a valid JSON object. - static const jsonObject = ChatOpenAIResponseFormatJsonObject(); - - /// The model will respond with a valid JSON object that adheres to the - /// specified schema. - factory ChatOpenAIResponseFormat.jsonSchema( - final ChatOpenAIJsonSchema jsonSchema, - ) => - ChatOpenAIResponseFormatJsonSchema(jsonSchema: jsonSchema); -} - -/// {@template chat_openai_response_format_text} -/// The model will respond with text. -/// {@endtemplate} -class ChatOpenAIResponseFormatText extends ChatOpenAIResponseFormat { - /// {@macro chat_openai_response_format_text} - const ChatOpenAIResponseFormatText(); -} - -/// {@template chat_openai_response_format_json_object} -/// The model will respond with a valid JSON object. -/// {@endtemplate} -class ChatOpenAIResponseFormatJsonObject extends ChatOpenAIResponseFormat { - /// {@macro chat_openai_response_format_json_object} - const ChatOpenAIResponseFormatJsonObject(); -} - -/// {@template chat_openai_response_format_json_schema} -/// The model will respond with a valid JSON object that adheres to the -/// specified schema. -/// {@endtemplate} -@immutable -class ChatOpenAIResponseFormatJsonSchema extends ChatOpenAIResponseFormat { - /// {@macro chat_openai_response_format_json_schema} - const ChatOpenAIResponseFormatJsonSchema({ - required this.jsonSchema, - }); - - /// The JSON schema that the model must adhere to. - final ChatOpenAIJsonSchema jsonSchema; - - @override - bool operator ==(covariant ChatOpenAIResponseFormatJsonSchema other) { - return identical(this, other) || - runtimeType == other.runtimeType && jsonSchema == other.jsonSchema; - } - - @override - int get hashCode => jsonSchema.hashCode; -} - -/// {@template chat_openai_json_schema} -/// Specifies the schema for the response format. -/// {@endtemplate} -@immutable -class ChatOpenAIJsonSchema { - /// {@macro chat_openai_json_schema} - const ChatOpenAIJsonSchema({ - required this.name, - required this.schema, - this.description, - this.strict = false, +class ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format} + const ChatOpenAIResponseFormat({ + required this.type, }); - /// The name of the response format. Must be a-z, A-Z, 0-9, or contain - /// underscores and dashes, with a maximum length of 64. - final String name; - - /// A description of what the response format is for, used by the model to - /// determine how to respond in the format. - final String? description; - - /// The schema for the response format, described as a JSON Schema object. - final Map schema; - - /// Whether to enable strict schema adherence when generating the output. - /// If set to true, the model will always follow the exact schema defined in - /// the `schema` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. To learn more, read the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - final bool strict; - - @override - bool operator ==(covariant ChatOpenAIJsonSchema other) { - return identical(this, other) || - runtimeType == other.runtimeType && - name == other.name && - description == other.description && - const MapEquality().equals(schema, other.schema) && - strict == other.strict; - } - - @override - int get hashCode { - return name.hashCode ^ - description.hashCode ^ - const MapEquality().hash(schema) ^ - strict.hashCode; - } + /// The format type. + final ChatOpenAIResponseFormatType type; } -/// Specifies the latency tier to use for processing the request. -/// This is relevant for customers subscribed to the scale tier service. -enum ChatOpenAIServiceTier { - /// The system will utilize scale tier credits until they are exhausted. - auto, +/// Types of response formats. +enum ChatOpenAIResponseFormatType { + /// Standard text mode. + text, - /// The request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. - vDefault, -} - -/// {@template openai_refusal_exception} -/// Exception thrown when OpenAI Structured Outputs API returns a refusal. -/// -/// When using OpenAI's Structured Outputs API with user-generated input, the -/// model may occasionally refuse to fulfill the request for safety reasons. -/// -/// See here for more on refusals: -/// https://platform.openai.com/docs/guides/structured-outputs/refusals -/// {@endtemplate} -class OpenAIRefusalException implements Exception { - /// {@macro openai_refusal_exception} - const OpenAIRefusalException(this.message); - - /// The refusal message. - final String message; - - @override - String toString() { - return 'OpenAIRefusalException: $message'; - } + /// [ChatOpenAIResponseFormatType.jsonObject] enables JSON mode, which + /// guarantees the message the model generates is valid JSON. + jsonObject, } diff --git a/packages/langchain_openai/lib/src/llms/openai.dart b/packages/langchain_openai/lib/src/llms/openai.dart index aed0e9e9..086b8b8a 100644 --- a/packages/langchain_openai/lib/src/llms/openai.dart +++ b/packages/langchain_openai/lib/src/llms/openai.dart @@ -1,5 +1,3 @@ -import 'dart:math'; - import 'package:http/http.dart' as http; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/prompts.dart'; @@ -188,9 +186,8 @@ class OpenAI extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OpenAIOptions( - model: defaultModel, - maxTokens: defaultMaxTokens, - concurrencyLimit: defaultConcurrencyLimit, + model: 'gpt-3.5-turbo-instruct', + maxTokens: 256, ), this.encoding, }) : _client = OpenAIClient( @@ -231,15 +228,6 @@ class OpenAI extends BaseLLM { @override String get modelType => 'openai'; - /// The default model to use unless another is specified. - static const defaultModel = 'gpt-3.5-turbo-instruct'; - - /// The default max tokens to use unless another is specified. - static const defaultMaxTokens = 256; - - /// The default concurrency limit to use unless another is specified. - static const defaultConcurrencyLimit = 20; - @override Future invoke( final PromptValue input, { @@ -271,8 +259,7 @@ class OpenAI extends BaseLLM { // Otherwise, we can batch the calls to the API final finalOptions = options?.first ?? defaultOptions; - final concurrencyLimit = - min(finalOptions.concurrencyLimit, defaultConcurrencyLimit); + final concurrencyLimit = finalOptions.concurrencyLimit; var index = 0; final results = []; @@ -315,7 +302,7 @@ class OpenAI extends BaseLLM { }) { return CreateCompletionRequest( model: CompletionModel.modelId( - options?.model ?? defaultOptions.model ?? defaultModel, + options?.model ?? defaultOptions.model ?? throwNullModelError(), ), prompt: CompletionPrompt.listString(prompts), bestOf: options?.bestOf ?? defaultOptions.bestOf, @@ -323,8 +310,7 @@ class OpenAI extends BaseLLM { options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, logitBias: options?.logitBias ?? defaultOptions.logitBias, logprobs: options?.logprobs ?? defaultOptions.logprobs, - maxTokens: - options?.maxTokens ?? defaultOptions.maxTokens ?? defaultMaxTokens, + maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, n: options?.n ?? defaultOptions.n, presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, @@ -354,12 +340,12 @@ class OpenAI extends BaseLLM { final encoding = this.encoding != null ? getEncoding(this.encoding!) : encodingForModel( - options?.model ?? defaultOptions.model ?? defaultModel, + options?.model ?? defaultOptions.model ?? throwNullModelError(), ); return encoding.encode(promptValue.toString()); } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/llms/types.dart b/packages/langchain_openai/lib/src/llms/types.dart index a6bc2ee2..6869a4c4 100644 --- a/packages/langchain_openai/lib/src/llms/types.dart +++ b/packages/langchain_openai/lib/src/llms/types.dart @@ -4,24 +4,17 @@ import 'package:meta/meta.dart'; /// {@template openai_options} /// Options to pass into the OpenAI LLM. -/// -/// Available models: -/// - `gpt-3.5-turbo-instruct` -/// - `davinci-002` -/// - `babbage-002` -/// Mind that the list may be outdated. -/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} @immutable class OpenAIOptions extends LLMOptions { /// {@macro openai_options} const OpenAIOptions({ - super.model, + this.model = 'gpt-3.5-turbo-instruct', this.bestOf, this.frequencyPenalty, this.logitBias, this.logprobs, - this.maxTokens, + this.maxTokens = 256, this.n, this.presencePenalty, this.seed, @@ -30,9 +23,20 @@ class OpenAIOptions extends LLMOptions { this.temperature, this.topP, this.user, - super.concurrencyLimit, + super.concurrencyLimit = 20, }); + /// ID of the model to use (e.g. 'gpt-3.5-turbo-instruct'). + /// + /// Available models: + /// - `gpt-3.5-turbo-instruct` + /// - `davinci-002` + /// - `babbage-002` + /// + /// Mind that the list may be outdated. + /// See https://platform.openai.com/docs/models for the latest list. + final String? model; + /// Generates best_of completions server-side and returns the "best" /// (the one with the highest log probability per token). /// @@ -121,7 +125,8 @@ class OpenAIOptions extends LLMOptions { /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - @override + /// Creates a copy of this [OpenAIOptions] object with the given fields + /// replaced with the new values. OpenAIOptions copyWith({ final String? model, final int? bestOf, @@ -137,7 +142,6 @@ class OpenAIOptions extends LLMOptions { final double? temperature, final double? topP, final String? user, - final int? concurrencyLimit, }) { return OpenAIOptions( model: model ?? this.model, @@ -154,69 +158,42 @@ class OpenAIOptions extends LLMOptions { temperature: temperature ?? this.temperature, topP: topP ?? this.topP, user: user ?? this.user, - concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, - ); - } - - @override - OpenAIOptions merge(covariant final OpenAIOptions? other) { - return copyWith( - model: other?.model, - bestOf: other?.bestOf, - frequencyPenalty: other?.frequencyPenalty, - logitBias: other?.logitBias, - logprobs: other?.logprobs, - maxTokens: other?.maxTokens, - n: other?.n, - presencePenalty: other?.presencePenalty, - seed: other?.seed, - stop: other?.stop, - suffix: other?.suffix, - temperature: other?.temperature, - topP: other?.topP, - user: other?.user, - concurrencyLimit: other?.concurrencyLimit, ); } @override - bool operator ==(covariant final OpenAIOptions other) { - return identical(this, other) || - runtimeType == other.runtimeType && - model == other.model && - bestOf == other.bestOf && - frequencyPenalty == other.frequencyPenalty && - const MapEquality() - .equals(logitBias, other.logitBias) && - logprobs == other.logprobs && - maxTokens == other.maxTokens && - n == other.n && - presencePenalty == other.presencePenalty && - seed == other.seed && - const ListEquality().equals(stop, other.stop) && - suffix == other.suffix && - temperature == other.temperature && - topP == other.topP && - user == other.user && - concurrencyLimit == other.concurrencyLimit; - } + bool operator ==(covariant final OpenAIOptions other) => + identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + bestOf == other.bestOf && + frequencyPenalty == other.frequencyPenalty && + const MapEquality().equals(logitBias, other.logitBias) && + logprobs == other.logprobs && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + seed == other.seed && + stop == other.stop && + suffix == other.suffix && + temperature == other.temperature && + topP == other.topP && + user == other.user; @override - int get hashCode { - return model.hashCode ^ - bestOf.hashCode ^ - frequencyPenalty.hashCode ^ - const MapEquality().hash(logitBias) ^ - logprobs.hashCode ^ - maxTokens.hashCode ^ - n.hashCode ^ - presencePenalty.hashCode ^ - seed.hashCode ^ - const ListEquality().hash(stop) ^ - suffix.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - user.hashCode ^ - concurrencyLimit.hashCode; - } + int get hashCode => + model.hashCode ^ + bestOf.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + logprobs.hashCode ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + seed.hashCode ^ + stop.hashCode ^ + suffix.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + user.hashCode; } diff --git a/packages/langchain_openai/lib/src/tools/dall_e.dart b/packages/langchain_openai/lib/src/tools/dall_e.dart index aefba7b9..3137dcfa 100644 --- a/packages/langchain_openai/lib/src/tools/dall_e.dart +++ b/packages/langchain_openai/lib/src/tools/dall_e.dart @@ -34,7 +34,7 @@ export 'package:openai_dart/openai_dart.dart' /// ), /// ), /// ]; -/// final agent = ToolsAgent.fromLLMAndTools( +/// final agent = OpenAIToolsAgent.fromLLMAndTools( /// llm: llm, /// tools: tools, /// ); @@ -111,7 +111,7 @@ final class OpenAIDallETool extends StringTool { } } - @override + /// Closes the client and cleans up any resources associated with it. void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/tools/types.dart b/packages/langchain_openai/lib/src/tools/types.dart index 086ba0f5..3b049dc6 100644 --- a/packages/langchain_openai/lib/src/tools/types.dart +++ b/packages/langchain_openai/lib/src/tools/types.dart @@ -1,12 +1,10 @@ import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; import 'dall_e.dart'; /// {@template open_ai_dall_e_tool_options} /// Generation options to pass into the [OpenAIDallETool]. /// {@endtemplate} -@immutable class OpenAIDallEToolOptions extends ToolOptions { /// {@macro open_ai_dall_e_tool_options} const OpenAIDallEToolOptions({ @@ -16,7 +14,6 @@ class OpenAIDallEToolOptions extends ToolOptions { this.size = ImageSize.v1024x1024, this.style = ImageStyle.vivid, this.user, - super.concurrencyLimit, }); /// ID of the model to use (e.g. `dall-e-2` or 'dall-e-3'). @@ -66,60 +63,4 @@ class OpenAIDallEToolOptions extends ToolOptions { /// /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - - @override - OpenAIDallEToolOptions copyWith({ - final String? model, - final ImageQuality? quality, - final ImageResponseFormat? responseFormat, - final ImageSize? size, - final ImageStyle? style, - final String? user, - final int? concurrencyLimit, - }) { - return OpenAIDallEToolOptions( - model: model ?? this.model, - quality: quality ?? this.quality, - responseFormat: responseFormat ?? this.responseFormat, - size: size ?? this.size, - style: style ?? this.style, - user: user ?? this.user, - concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, - ); - } - - @override - OpenAIDallEToolOptions merge(covariant final OpenAIDallEToolOptions? other) { - return copyWith( - model: other?.model, - quality: other?.quality, - responseFormat: other?.responseFormat, - size: other?.size, - style: other?.style, - user: other?.user, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final OpenAIDallEToolOptions other) { - return model == other.model && - quality == other.quality && - responseFormat == other.responseFormat && - size == other.size && - style == other.style && - user == other.user && - concurrencyLimit == other.concurrencyLimit; - } - - @override - int get hashCode { - return model.hashCode ^ - quality.hashCode ^ - responseFormat.hashCode ^ - size.hashCode ^ - style.hashCode ^ - user.hashCode ^ - concurrencyLimit.hashCode; - } } diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 1161ee71..b70412db 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_openai -description: LangChain.dart integration module for OpenAI (GPT-4o, o1, Embeddings, DALL·E, etc.). -version: 0.7.2 +description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). +version: 0.6.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,18 +14,18 @@ topics: - gpt environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ^1.18.0 - http: ^1.2.2 - langchain_core: 0.3.6 + collection: ">=1.17.0 <1.19.0" + http: ^1.1.0 + langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.4.2 - uuid: ^4.4.2 + openai_dart: ^0.3.2 + uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.6 - langchain_community: 0.3.2 - test: ^1.25.8 + langchain: ^0.7.1 + langchain_community: 0.2.0+1 + test: ^1.25.2 diff --git a/packages/langchain_openai/pubspec_overrides.yaml b/packages/langchain_openai/pubspec_overrides.yaml index 92ad1620..18a3bcaa 100644 --- a/packages/langchain_openai/pubspec_overrides.yaml +++ b/packages/langchain_openai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,openai_dart,tavily_dart +# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain dependency_overrides: langchain: path: ../langchain @@ -8,5 +8,3 @@ dependency_overrides: path: ../langchain_core openai_dart: path: ../openai_dart - tavily_dart: - path: ../tavily_dart diff --git a/packages/langchain_openai/test/agents/tools_test.dart b/packages/langchain_openai/test/agents/tools_test.dart index 03b52dea..57342631 100644 --- a/packages/langchain_openai/test/agents/tools_test.dart +++ b/packages/langchain_openai/test/agents/tools_test.dart @@ -1,4 +1,3 @@ -// ignore_for_file: deprecated_member_use_from_same_package @TestOn('vm') library; // Uses dart:io @@ -24,6 +23,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', temperature: 0, ), ); @@ -45,6 +45,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', temperature: 0, ), ); @@ -133,6 +134,7 @@ void main() { final model = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', temperature: 0, ), ).bind(ChatOpenAIOptions(tools: [tool])); diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index c655af98..b1080986 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -53,6 +53,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( + model: 'gpt-3.5-turbo-0613', temperature: 0, ), ); @@ -125,6 +126,7 @@ Question: {question} final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( + model: 'gpt-3.5-turbo', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/anyscale_test.dart b/packages/langchain_openai/test/chat_models/anyscale_test.dart new file mode 100644 index 00000000..1a2fdef1 --- /dev/null +++ b/packages/langchain_openai/test/chat_models/anyscale_test.dart @@ -0,0 +1,115 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:test/test.dart'; + +void main() { + group('Anyscale tests', () { + late ChatOpenAI chatModel; + + setUp(() async { + chatModel = ChatOpenAI( + apiKey: Platform.environment['ANYSCALE_API_KEY'], + baseUrl: 'https://api.endpoints.anyscale.com/v1', + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test invoke Anyscale API with different models', () async { + final models = [ + 'meta-llama/Llama-2-70b-chat-hf', + 'codellama/CodeLlama-34b-Instruct-hf', + 'mistralai/Mistral-7B-Instruct-v0.1', + 'mistralai/Mixtral-8x7B-Instruct-v0.1', + 'HuggingFaceH4/zephyr-7b-beta', + 'Open-Orca/Mistral-7B-OpenOrca', + ]; + for (final model in models) { + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions(model: model), + ); + + expect(res.id, isNotEmpty); + expect( + res.finishReason, + isNot(FinishReason.unspecified), + reason: model, + ); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + reason: model, + ); + expect(res.metadata, isNotNull, reason: model); + expect(res.metadata['created'], greaterThan(0), reason: model); + expect(res.metadata['model'], isNotEmpty, reason: model); + await Future.delayed(const Duration(seconds: 1)); // Rate limit + } + }); + + test('Test stream Anyscale API with different models', () async { + final models = [ + 'meta-llama/Llama-2-70b-chat-hf', + 'codellama/CodeLlama-34b-Instruct-hf', + 'mistralai/Mistral-7B-Instruct-v0.1', + 'mistralai/Mixtral-8x7B-Instruct-v0.1', + 'HuggingFaceH4/zephyr-7b-beta', + 'Open-Orca/Mistral-7B-OpenOrca', + ]; + for (final model in models) { + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions(model: model), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content.replaceAll(RegExp(r'[\s\n,]'), ''); + count++; + } + expect(count, greaterThan(1), reason: model); + expect(content, contains('123456789'), reason: model); + await Future.delayed(const Duration(seconds: 1)); // Rate limit + } + }); + + test('Test countTokens', () async { + final models = [ + 'mistralai/Mixtral-8x7B-Instruct-v0.1', + 'mistralai/Mistral-7B-Instruct-v0.2', + 'NousResearch/Nous-Hermes-2-Yi-34B', + 'openchat/openchat-3.5-1210', + 'togethercomputer/llama-2-70b-chat', + 'togethercomputer/falcon-40b-instruct', + ]; + for (final model in models) { + const text = 'Hello, how are you?'; + + final numTokens = await chatModel.countTokens( + PromptValue.chat([ChatMessage.humanText(text)]), + options: ChatOpenAIOptions(model: model), + ); + expect(numTokens, 13, reason: model); + } + }); + }); +} diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index a0ea44fb..6268a77b 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatOpenAI tests', () { final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - const defaultModel = 'gpt-4o-mini'; + const defaultModel = 'gpt-3.5-turbo'; test('Test ChatOpenAI parameters', () async { final chat = ChatOpenAI( @@ -118,36 +118,36 @@ void main() { expect(res.content, isNotEmpty); }); - const getCurrentWeatherTool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - test('Test ChatOpenAI tool calling', timeout: const Timeout(Duration(minutes: 1)), () async { final chat = ChatOpenAI(apiKey: openaiApiKey); + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + final humanMessage = ChatMessage.humanText( 'What’s the weather like in Boston right now?', ); final res1 = await chat.invoke( PromptValue.chat([humanMessage]), - options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), + options: const ChatOpenAIOptions(tools: [tool]), ); final aiMessage1 = res1.output; @@ -156,7 +156,7 @@ void main() { expect(aiMessage1.toolCalls, isNotEmpty); final toolCall = aiMessage1.toolCalls.first; - expect(toolCall.name, getCurrentWeatherTool.name); + expect(toolCall.name, tool.name); expect(toolCall.arguments.containsKey('location'), isTrue); expect(toolCall.arguments['location'], contains('Boston')); @@ -172,7 +172,7 @@ void main() { final res2 = await chat.invoke( PromptValue.chat([humanMessage, aiMessage1, functionMessage]), - options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), + options: const ChatOpenAIOptions(tools: [tool]), ); final aiMessage2 = res2.output; @@ -208,6 +208,9 @@ void main() { test('Test countTokens messages', () async { final models = [ + 'gpt-3.5-turbo-0301', + 'gpt-3.5-turbo-0613', + 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', 'gpt-4-0613', ]; @@ -267,26 +270,26 @@ void main() { expect(result.usage.totalTokens, greaterThan(0)); }); - const jokeTool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', + test('Test ChatOpenAI streaming with functions', () async { + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, }, + 'required': ['location', 'punchline'], }, - 'required': ['location', 'punchline'], - }, - ); + ); - test('Test ChatOpenAI streaming with functions', () async { final promptTemplate = ChatPromptTemplate.fromTemplate( 'tell me a long joke about {foo}', ); @@ -298,7 +301,7 @@ void main() { ), ).bind( ChatOpenAIOptions( - tools: const [jokeTool], + tools: const [tool], toolChoice: ChatToolChoice.forced(name: 'joke'), ), ); @@ -327,7 +330,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: defaultModel, + model: 'gpt-4-turbo', temperature: 0, seed: 12345, ), @@ -356,68 +359,11 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: defaultModel, - temperature: 0, - seed: 9999, - responseFormat: ChatOpenAIResponseFormat.jsonObject, - ), - ); - - final res = await llm.invoke(prompt); - final outputMsg = res.output; - final outputJson = json.decode(outputMsg.content) as Map; - expect(outputJson['companies'], isNotNull); - final companies = outputJson['companies'] as List; - expect(companies, hasLength(2)); - final firstCompany = companies.first as Map; - expect(firstCompany['name'], 'Google'); - expect(firstCompany['origin'], 'USA'); - final secondCompany = companies.last as Map; - expect(secondCompany['name'], 'Deepmind'); - expect(secondCompany['origin'], 'UK'); - }); - - test('Test Structured Output', () async { - final prompt = PromptValue.chat([ - ChatMessage.system( - 'Extract the data of any companies mentioned in the ' - 'following statement. Return a JSON list.', - ), - ChatMessage.humanText( - 'Google was founded in the USA, while Deepmind was founded in the UK', - ), - ]); - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: defaultModel, + model: 'gpt-4-1106-preview', temperature: 0, seed: 9999, - responseFormat: ChatOpenAIResponseFormat.jsonSchema( - const ChatOpenAIJsonSchema( - name: 'Companies', - description: 'A list of companies', - strict: true, - schema: { - 'type': 'object', - 'properties': { - 'companies': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'origin': {'type': 'string'}, - }, - 'additionalProperties': false, - 'required': ['name', 'origin'], - }, - }, - }, - 'additionalProperties': false, - 'required': ['companies'], - }, - ), + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, ), ), ); @@ -454,7 +400,7 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: defaultModel, + model: 'gpt-4-turbo', ), ); @@ -481,70 +427,12 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: defaultModel, + model: 'gpt-4-turbo', ), ); final res = await chatModel.invoke(prompt); expect(res.output.content.toLowerCase(), contains('apple')); }); - - test('Test additive bind calls', () async { - final chatModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions( - model: defaultModel, - temperature: 0, - ), - ); - - final chatModelWithTools = chatModel.bind( - const ChatOpenAIOptions( - tools: [getCurrentWeatherTool, jokeTool], - ), - ); - - final res1 = await chatModelWithTools.invoke( - PromptValue.string( - 'Tell me the weather in Barcelona, Spain and a joke about bears', - ), - ); - expect( - res1.output.toolCalls.map((tc) => tc.name).toSet(), - {getCurrentWeatherTool.name, jokeTool.name}, - ); - - final chatModelForceWeatherTool = chatModelWithTools.bind( - ChatOpenAIOptions( - toolChoice: ChatToolChoice.forced(name: getCurrentWeatherTool.name), - ), - ); - - final res2 = await chatModelForceWeatherTool.invoke( - PromptValue.string( - 'Tell me the weather in Barcelona, Spain and a joke about bears', - ), - ); - expect( - res2.output.toolCalls.map((tc) => tc.name).toSet(), - {getCurrentWeatherTool.name}, - ); - - final chatModelForceJokeTool = chatModelWithTools.bind( - ChatOpenAIOptions( - toolChoice: ChatToolChoice.forced(name: jokeTool.name), - ), - ); - - final res3 = await chatModelForceJokeTool.invoke( - PromptValue.string( - 'Tell me the weather in Barcelona, Spain and a joke about bears', - ), - ); - expect( - res3.output.toolCalls.map((tc) => tc.name).toSet(), - {jokeTool.name}, - ); - }); }); } diff --git a/packages/langchain_openai/test/chat_models/github_models_test.dart b/packages/langchain_openai/test/chat_models/github_models_test.dart deleted file mode 100644 index 7eac34dd..00000000 --- a/packages/langchain_openai/test/chat_models/github_models_test.dart +++ /dev/null @@ -1,181 +0,0 @@ -// ignore_for_file: avoid_print -@TestOn('vm') -library; // Uses dart:io - -import 'dart:convert'; -import 'dart:io'; - -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('GitHub Models tests', () { - late ChatOpenAI chatModel; - - setUp(() async { - chatModel = ChatOpenAI( - apiKey: Platform.environment['GITHUB_TOKEN'], - baseUrl: 'https://models.inference.ai.azure.com', - ); - }); - - tearDown(() { - chatModel.close(); - }); - - test('Test invoke GitHub Models API with different models', () async { - final models = [ - 'gpt-4o', - 'AI21-Jamba-Instruct', - 'meta-llama-3.1-405b-instruct', - 'Mistral-large', - 'Phi-3.5-mini-instruct', - ]; - for (final model in models) { - print('Testing model: $model'); - final res = await chatModel.invoke( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions( - model: model, - temperature: 0, - ), - ); - - expect(res.id, isNotEmpty); - expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), - contains('123456789'), - ); - expect(res.metadata, isNotEmpty, reason: model); - expect(res.metadata['created'], greaterThan(0), reason: model); - expect(res.metadata['model'], isNotEmpty, reason: model); - } - }); - - test('Test stream GitHub Models API with different models', () async { - final models = [ - 'gpt-4o', - 'AI21-Jamba-Instruct', - 'meta-llama-3.1-405b-instruct', - 'Phi-3.5-mini-instruct', - ]; - for (final model in models) { - print('Testing model: $model'); - final stream = chatModel.stream( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions( - model: model, - temperature: 0, - ), - ); - - String content = ''; - int count = 0; - await for (final res in stream) { - content += res.output.content.replaceAll(RegExp(r'[\s\n]'), ''); - count++; - } - expect(count, greaterThan(1), reason: model); - expect(content, contains('123456789'), reason: model); - } - }); - - test('Test countTokens', () async { - final models = [ - 'gpt-4o', - 'AI21-Jamba-Instruct', - 'meta-llama-3.1-405b-instruct', - 'Mistral-large', - 'Phi-3.5-mini-instruct', - ]; - for (final model in models) { - print('Testing model: $model'); - const text = 'Hello, how are you?'; - - final numTokens = await chatModel.countTokens( - PromptValue.chat([ChatMessage.humanText(text)]), - options: ChatOpenAIOptions(model: model), - ); - expect(numTokens, 13, reason: model); - } - }); - - test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), - () async { - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - final humanMessage = ChatMessage.humanText( - 'What’s the weather like in Boston right now?', - ); - final res1 = await chatModel.invoke( - PromptValue.chat([humanMessage]), - options: const ChatOpenAIOptions( - model: 'gpt-4o', - tools: [tool], - ), - ); - - final aiMessage1 = res1.output; - - expect(aiMessage1.content, isEmpty); - expect(aiMessage1.toolCalls, isNotEmpty); - final toolCall = aiMessage1.toolCalls.first; - - expect(toolCall.name, tool.name); - expect(toolCall.arguments.containsKey('location'), isTrue); - expect(toolCall.arguments['location'], contains('Boston')); - - final functionResult = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - final functionMessage = ChatMessage.tool( - toolCallId: toolCall.id, - content: json.encode(functionResult), - ); - - final res2 = await chatModel.invoke( - PromptValue.chat([humanMessage, aiMessage1, functionMessage]), - options: const ChatOpenAIOptions( - model: 'gpt-4o', - tools: [tool], - ), - ); - - final aiMessage2 = res2.output; - - expect(aiMessage2.toolCalls, isEmpty); - expect(aiMessage2.content, contains('22')); - }); - }); -} diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index d7c8fc9c..396f8ac4 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -1,12 +1,10 @@ @TestOn('vm') library; // Uses dart:io -import 'dart:convert'; import 'dart:io'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; import 'package:test/test.dart'; @@ -27,7 +25,7 @@ void main() { test('Test invoke OpenRouter API with different models', () async { final models = [ - 'gpt-4o-mini', + 'gpt-3.5-turbo', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', @@ -57,7 +55,7 @@ void main() { test('Test stream OpenRouter API with different models', () async { final models = [ - 'gpt-4o-mini', + 'gpt-3.5-turbo', 'gpt-4', // 'google/gemini-pro', // Not supported 'anthropic/claude-2', @@ -88,7 +86,7 @@ void main() { test('Test countTokens', () async { final models = [ - 'gpt-4o-mini', + 'gpt-3.5-turbo', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', @@ -106,72 +104,5 @@ void main() { expect(numTokens, 13, reason: model); } }); - - test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), - () async { - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - final humanMessage = ChatMessage.humanText( - 'What’s the weather like in Boston right now?', - ); - final res1 = await chatModel.invoke( - PromptValue.chat([humanMessage]), - options: const ChatOpenAIOptions( - model: 'gpt-4o', - tools: [tool], - ), - ); - - final aiMessage1 = res1.output; - - expect(aiMessage1.content, isEmpty); - expect(aiMessage1.toolCalls, isNotEmpty); - final toolCall = aiMessage1.toolCalls.first; - - expect(toolCall.name, tool.name); - expect(toolCall.arguments.containsKey('location'), isTrue); - expect(toolCall.arguments['location'], contains('Boston')); - - final functionResult = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - final functionMessage = ChatMessage.tool( - toolCallId: toolCall.id, - content: json.encode(functionResult), - ); - - final res2 = await chatModel.invoke( - PromptValue.chat([humanMessage, aiMessage1, functionMessage]), - options: const ChatOpenAIOptions( - model: 'gpt-4o', - tools: [tool], - ), - ); - - final aiMessage2 = res2.output; - - expect(aiMessage2.toolCalls, isEmpty); - expect(aiMessage2.content, contains('22')); - }); }); } diff --git a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart b/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart new file mode 100644 index 00000000..988c7e4c --- /dev/null +++ b/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart @@ -0,0 +1,36 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:test/test.dart'; + +void main() { + group('Anyscale AI Embeddings tests', () { + late OpenAIEmbeddings embeddings; + + setUp(() async { + embeddings = OpenAIEmbeddings( + apiKey: Platform.environment['ANYSCALE_API_KEY'], + baseUrl: 'https://api.endpoints.anyscale.com/v1', + ); + }); + + tearDown(() { + embeddings.close(); + }); + + test('Test Anyscale Embeddings models', () async { + final models = [ + 'thenlper/gte-large', + ]; + for (final model in models) { + embeddings.model = model; + final res = await embeddings.embedQuery('Hello world'); + expect(res.length, greaterThan(0)); + await Future.delayed(const Duration(seconds: 1)); // Rate limit + } + }); + }); +} diff --git a/packages/langchain_openai/test/tools/dall_e_test.dart b/packages/langchain_openai/test/tools/dall_e_test.dart index 7a8a8407..5a9aba09 100644 --- a/packages/langchain_openai/test/tools/dall_e_test.dart +++ b/packages/langchain_openai/test/tools/dall_e_test.dart @@ -4,7 +4,7 @@ library; // Uses dart:io import 'dart:io'; -import 'package:langchain/langchain.dart' show AgentExecutor, ToolsAgent; +import 'package:langchain/langchain.dart' show AgentExecutor; import 'package:langchain_community/langchain_community.dart'; import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; @@ -62,7 +62,7 @@ void main() { ), ]; - final agent = ToolsAgent.fromLLMAndTools( + final agent = OpenAIToolsAgent.fromLLMAndTools( llm: llm, tools: tools, ); diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index f616d549..276d2616 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,27 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.0+9 - - - Update a dependency to the latest release. - -## 0.1.0+8 - - - Update a dependency to the latest release. - -## 0.1.0+7 - - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.1.0+6 - - - Update a dependency to the latest release. - -## 0.1.0+5 - - - Update a dependency to the latest release. - ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 82e39fa2..141a96f4 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+9 +version: 0.1.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,15 +14,15 @@ topics: - vector-db environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - http: ^1.2.2 - langchain_core: 0.3.6 + http: ^1.1.0 + langchain_core: ^0.3.1 meta: ^1.11.0 pinecone: ^0.7.2 - uuid: ^4.4.2 + uuid: ^4.3.3 dev_dependencies: - test: ^1.25.8 - langchain_openai: ^0.7.2 + test: ^1.25.2 + langchain_openai: ^0.6.1 diff --git a/packages/langchain_pinecone/pubspec_overrides.yaml b/packages/langchain_pinecone/pubspec_overrides.yaml index de62cfcc..8dd8d545 100644 --- a/packages/langchain_pinecone/pubspec_overrides.yaml +++ b/packages/langchain_pinecone/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart +# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index bd6956b4..d98b5fe3 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,27 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.1+3 - - - Update a dependency to the latest release. - -## 0.1.1+2 - - - Update a dependency to the latest release. - -## 0.1.1+1 - - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -## 0.1.1 - - - Update a dependency to the latest release. - -## 0.1.0+5 - - - Update a dependency to the latest release. - ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart index 0c777f01..f6d1e11e 100644 --- a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart +++ b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart @@ -54,7 +54,7 @@ import 'package:supabase/supabase.dart'; /// ``` /// /// See documentation for more details: -/// - [LangChain.dart Supabase docs](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) +/// - [LangChain.dart Supabase docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/supabase) /// - [Supabase Vector docs](https://supabase.com/docs/guides/ai) /// {@endtemplate} class Supabase extends VectorStore { diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 9b5530ad..c480cdc7 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1+3 +version: 0.1.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -14,16 +14,16 @@ topics: - vector-db environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - http: ^1.2.2 - langchain_core: 0.3.6 + http: ^1.1.0 + langchain_core: ^0.3.1 meta: ^1.11.0 - supabase: ^2.2.7 + supabase: ^2.0.8 dev_dependencies: - test: ^1.25.8 - langchain: ^0.7.6 - langchain_community: 0.3.2 - langchain_openai: ^0.7.2 + test: ^1.25.2 + langchain: ^0.7.1 + langchain_community: 0.2.0+1 + langchain_openai: ^0.6.1 diff --git a/packages/langchain_supabase/pubspec_overrides.yaml b/packages/langchain_supabase/pubspec_overrides.yaml index b03ffbc5..5eb34624 100644 --- a/packages/langchain_supabase/pubspec_overrides.yaml +++ b/packages/langchain_supabase/pubspec_overrides.yaml @@ -1,4 +1,5 @@ -# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart +# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community +# melos_managed_dependency_overrides: langchain dependency_overrides: langchain: path: ../langchain @@ -10,5 +11,3 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart - tavily_dart: - path: ../tavily_dart diff --git a/packages/langchain_weaviate/pubspec.yaml b/packages/langchain_weaviate/pubspec.yaml index 3d9b8cd3..f5f5de33 100644 --- a/packages/langchain_weaviate/pubspec.yaml +++ b/packages/langchain_weaviate/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_weaviate issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_weaviate homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_wikipedia/pubspec.yaml b/packages/langchain_wikipedia/pubspec.yaml index 2dcc9e5c..e1377267 100644 --- a/packages/langchain_wikipedia/pubspec.yaml +++ b/packages/langchain_wikipedia/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wikipedia issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wikipedia homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langchain_wolfram/pubspec.yaml b/packages/langchain_wolfram/pubspec.yaml index 14b30014..b64e02a0 100644 --- a/packages/langchain_wolfram/pubspec.yaml +++ b/packages/langchain_wolfram/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wolfram issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wolfram homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" diff --git a/packages/langgraph/.gitignore b/packages/langgraph/.gitignore deleted file mode 100644 index 3cceda55..00000000 --- a/packages/langgraph/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# https://dart.dev/guides/libraries/private-files -# Created by `dart pub` -.dart_tool/ - -# Avoid committing pubspec.lock for library packages; see -# https://dart.dev/guides/libraries/private-files#pubspeclock. -pubspec.lock diff --git a/packages/langgraph/CHANGELOG.md b/packages/langgraph/CHANGELOG.md deleted file mode 100644 index 90f8e244..00000000 --- a/packages/langgraph/CHANGELOG.md +++ /dev/null @@ -1,3 +0,0 @@ -## 0.0.1-dev.1 - -- Bootstrap package. diff --git a/packages/langgraph/LICENSE b/packages/langgraph/LICENSE deleted file mode 100644 index f407ffdd..00000000 --- a/packages/langgraph/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 David Miguel Lozano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/langgraph/README.md b/packages/langgraph/README.md deleted file mode 100644 index 70fc2aae..00000000 --- a/packages/langgraph/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# 🦜🕸️LangGraph - -[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) -[![langgraph](https://img.shields.io/pub/v/langgraph.svg)](https://pub.dev/packages/langgraph) -[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) -[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) - -⚡ Building language agents as graphs ⚡ - -## Overview - -TODO - -## License - -LangChain.dart is licensed under the -[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/langgraph/analysis_options.yaml b/packages/langgraph/analysis_options.yaml deleted file mode 100644 index f04c6cf0..00000000 --- a/packages/langgraph/analysis_options.yaml +++ /dev/null @@ -1 +0,0 @@ -include: ../../analysis_options.yaml diff --git a/packages/langgraph/example/langgraph_example.dart b/packages/langgraph/example/langgraph_example.dart deleted file mode 100644 index 21f3e9f2..00000000 --- a/packages/langgraph/example/langgraph_example.dart +++ /dev/null @@ -1,3 +0,0 @@ -void main() { - // TODO -} diff --git a/packages/langgraph/lib/langgraph.dart b/packages/langgraph/lib/langgraph.dart deleted file mode 100644 index 790b457d..00000000 --- a/packages/langgraph/lib/langgraph.dart +++ /dev/null @@ -1,2 +0,0 @@ -/// Build resilient language agents as graphs. -library; diff --git a/packages/langgraph/pubspec.yaml b/packages/langgraph/pubspec.yaml deleted file mode 100644 index e6ef9c18..00000000 --- a/packages/langgraph/pubspec.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: langgraph -description: Build resilient language agents as graphs. -version: 0.0.1-dev.1 -repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langgraph -issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langgraph -homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev - -topics: - - ai - - nlp - - llms - - langchain - -environment: - sdk: ">=3.4.0 <4.0.0" diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index ec5979cc..fcb706a7 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,15 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.0.3+3 - - - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 0.0.3+2 - - - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) - ## 0.0.3+1 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/mistralai_dart/lib/mistralai_dart.dart b/packages/mistralai_dart/lib/mistralai_dart.dart index 05cfac61..31efab90 100644 --- a/packages/mistralai_dart/lib/mistralai_dart.dart +++ b/packages/mistralai_dart/lib/mistralai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -library; +library mistralai_dart; export 'src/client.dart'; export 'src/generated/client.dart' show MistralAIClientException; diff --git a/packages/mistralai_dart/lib/src/http_client/http_client.dart b/packages/mistralai_dart/lib/src/http_client/http_client.dart index 0ad0b2fc..99555ca4 100644 --- a/packages/mistralai_dart/lib/src/http_client/http_client.dart +++ b/packages/mistralai_dart/lib/src/http_client/http_client.dart @@ -1,3 +1,4 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js_interop) 'http_client_html.dart'; + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index 406b7170..a7aa8347 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: mistralai_dart description: Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.0.3+3 +version: 0.0.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -13,22 +13,22 @@ topics: - mistral environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - fetch_client: ^1.1.2 - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 + ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + test: ^1.25.2 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index d6f79865..4b5ff033 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,40 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.2.2 - - - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) - -## 0.2.1 - - - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) - -## 0.2.0 - -> Note: This release has breaking changes. - - - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) - - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) - - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) - - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) - - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) - - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) - -## 0.1.2 - - - **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) - - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 0.1.1 - - - **FEAT**: Support buffered stream responses in ollama_dart ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) - - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) - -## 0.1.0+1 - - - **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) - ## 0.1.0 > Note: This release has breaking changes. diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index dc637664..0aaf5a97 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -17,33 +17,24 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. **Supported endpoints:** - Completions (with streaming support) -- Chat completions (with streaming and tool calling support) +- Chat completions - Embeddings - Models - Blobs -- Version ## Table of contents - [Usage](#usage) * [Completions](#completions) - + [Generate completion](#generate-completion) - + [Stream completion](#stream-completion) * [Chat completions](#chat-completions) - + [Generate chat completion](#generate-chat-completion) - + [Stream chat completion](#stream-chat-completion) - + [Tool calling](#tool-calling) * [Embeddings](#embeddings) - + [Generate embedding](#generate-embedding) * [Models](#models) + [Create model](#create-model) + [List models](#list-models) - + [List running models](#list-running-models) + [Show Model Information](#show-model-information) + [Pull a Model](#pull-a-model) + [Push a Model](#push-a-model) + [Check if a Blob Exists](#check-if-a-blob-exists) - * [Version](#version) - [Advance Usage](#advance-usage) * [Default HTTP client](#default-http-client) * [Custom HTTP client ](#custom-http-client) @@ -61,7 +52,7 @@ Refer to the [documentation](https://github.com/jmorganca/ollama/blob/main/docs/ Given a prompt, the model will generate a response. -#### Generate completion +**Generate completion:** ```dart final generated = await client.generateCompletion( @@ -74,7 +65,7 @@ print(generated.response); // The sky appears blue because of a phenomenon called Rayleigh scattering... ``` -#### Stream completion +**Stream completion:** ```dart final stream = client.generateCompletionStream( @@ -95,7 +86,7 @@ print(text); Given a prompt, the model will generate a response in a chat format. -#### Generate chat completion +**Generate chat completion:** ```dart final res = await client.generateChatCompletion( @@ -118,7 +109,7 @@ print(res); // Message(role: MessageRole.assistant, content: 123456789) ``` -#### Stream chat completion +**Stream chat completion:** ```dart final stream = client.generateChatCompletionStream( @@ -146,91 +137,11 @@ print(text); // 123456789 ``` -#### Tool calling - -Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema, that you can then use to call the tools in your code and return the result back to the model to complete the conversation. - -**Notes:** -- Tool calling requires Ollama 0.2.8 or newer. -- Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2)). - -```dart -const tool = Tool( - function: ToolFunction( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ), -); - -const userMsg = Message( - role: MessageRole.user, - content: 'What’s the weather like in Barcelona in celsius?', -); - -final res1 = await client.generateChatCompletion( - request: GenerateChatCompletionRequest( - model: 'llama3.2', - messages: [userMsg], - tools: [tool], - ), -); - -print(res1.message.toolCalls); -// [ -// ToolCall( -// function: -// ToolCallFunction( -// name: get_current_weather, -// arguments: { -// location: Barcelona, ES, -// unit: celsius -// } -// ) -// ) -// ] - -// Call your tool here. For this example, we'll just mock the response. -const toolResult = '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; - -// Submit the response of the tool call to the model -final res2 = await client.generateChatCompletion( - request: GenerateChatCompletionRequest( - model: 'llama3.2', - messages: [ - userMsg, - res1.message, - Message( - role: MessageRole.tool, - content: toolResult, - ), - ], - ), -); -print(res2.message.content); -// The current weather in Barcelona is 20°C. -``` - ### Embeddings Given a prompt, the model will generate an embedding representing the prompt. -#### Generate embedding +**Generate embedding:** ```dart final generated = await client.generateEmbedding( @@ -281,15 +192,6 @@ final res = await client.listModels(); print(res.models); ``` -#### List running models - -Lists models currently loaded and their memory footprint. - -```dart -final res = await client.listRunningModels(); -print(res.models); -``` - #### Show Model Information Show details about a model including modelfile, template, parameters, license, and system prompt. @@ -349,25 +251,16 @@ await for (final res in stream) { #### Check if a Blob Exists -Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai. +Check if a blob is known to the server. ```dart await client.checkBlob( - digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', + name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); ``` If the blob doesn't exist, an `OllamaClientException` exception will be thrown. -### Version - -Get the version of the Ollama server. - -```dart -final res = await client.getVersion(); -print(res.version); -``` - ## Advance Usage ### Default HTTP client diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index 53dc2abf..15ef53d9 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -11,7 +11,6 @@ Future main() async { await _generateChatCompletion(client); await _generateChatCompletionWithHistory(client); await _generateChatCompletionStream(client); - await _generateChatToolCalling(client); // Embeddings await _generateEmbedding(client); @@ -70,7 +69,7 @@ Future _generateCompletionStream(final OllamaClient client) async { Future _generateChatCompletion(final OllamaClient client) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.2', + model: 'llama3:latest', messages: [ Message( role: MessageRole.system, @@ -87,7 +86,7 @@ Future _generateChatCompletion(final OllamaClient client) async { ], ), ); - print(generated.message.content); + print(generated.message?.content); } Future _generateChatCompletionWithHistory( @@ -95,7 +94,7 @@ Future _generateChatCompletionWithHistory( ) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.2', + model: 'llama3:latest', messages: [ Message( role: MessageRole.user, @@ -112,13 +111,13 @@ Future _generateChatCompletionWithHistory( ], ), ); - print(generated.message.content); + print(generated.message?.content); } Future _generateChatCompletionStream(final OllamaClient client) async { final stream = client.generateChatCompletionStream( request: const GenerateChatCompletionRequest( - model: 'llama3.2', + model: 'llama3:latest', messages: [ Message( role: MessageRole.system, @@ -133,84 +132,11 @@ Future _generateChatCompletionStream(final OllamaClient client) async { ); String text = ''; await for (final res in stream) { - text += res.message.content.trim(); + text += (res.message?.content ?? '').trim(); } print(text); } -Future _generateChatToolCalling(final OllamaClient client) async { - const tool = Tool( - function: ToolFunction( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ), - ); - - const userMsg = Message( - role: MessageRole.user, - content: 'What’s the weather like in Barcelona in celsius?', - ); - - final res1 = await client.generateChatCompletion( - request: const GenerateChatCompletionRequest( - model: 'llama3.2', - messages: [userMsg], - tools: [tool], - keepAlive: 1, - ), - ); - - print(res1.message.toolCalls); - // [ - // ToolCall( - // function: - // ToolCallFunction( - // name: get_current_weather, - // arguments: { - // location: Barcelona, ES, - // unit: celsius - // } - // ) - // ) - // ] - - // Call your tool here. For this example, we'll just mock the response. - const toolResult = - '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; - - // Submit the response of the tool call to the model - final res2 = await client.generateChatCompletion( - request: GenerateChatCompletionRequest( - model: 'llama3.2', - messages: [ - userMsg, - res1.message, - const Message( - role: MessageRole.tool, - content: toolResult, - ), - ], - ), - ); - print(res2.message.content); - // The current weather in Barcelona is 20°C. -} - Future _generateEmbedding(final OllamaClient client) async { final generated = await client.generateEmbedding( request: const GenerateEmbeddingRequest( @@ -291,7 +217,7 @@ Future _pushModelStream(final OllamaClient client) async { Future _checkBlob(final OllamaClient client) async { await client.checkBlob( - digest: + name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); } diff --git a/packages/ollama_dart/lib/ollama_dart.dart b/packages/ollama_dart/lib/ollama_dart.dart index 1195c10c..a62c32c4 100644 --- a/packages/ollama_dart/lib/ollama_dart.dart +++ b/packages/ollama_dart/lib/ollama_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Ollama API (run Llama 3, Code Llama, and other models locally). -library; +library ollama_dart; export 'src/client.dart'; export 'src/generated/client.dart' show OllamaClientException; diff --git a/packages/ollama_dart/lib/src/client.dart b/packages/ollama_dart/lib/src/client.dart index c5dded40..2bb5a7be 100644 --- a/packages/ollama_dart/lib/src/client.dart +++ b/packages/ollama_dart/lib/src/client.dart @@ -1,5 +1,4 @@ // ignore_for_file: use_super_parameters -import 'dart:async'; import 'dart:convert'; import 'package:http/http.dart' as http; @@ -57,9 +56,11 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream - .transform(const _OllamaStreamTransformer()) // - .map((d) => GenerateCompletionResponse.fromJson(json.decode(d))); + yield* r.stream.map( + (final d) => GenerateCompletionResponse.fromJson( + json.decode(const Utf8Decoder().convert(d)), + ), + ); } // ------------------------------------------ @@ -84,9 +85,11 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream - .transform(const _OllamaStreamTransformer()) // - .map((d) => GenerateChatCompletionResponse.fromJson(json.decode(d))); + yield* r.stream.map( + (final d) => GenerateChatCompletionResponse.fromJson( + json.decode(const Utf8Decoder().convert(d)), + ), + ); } // ------------------------------------------ @@ -111,9 +114,11 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream - .transform(const _OllamaStreamTransformer()) // - .map((d) => CreateModelResponse.fromJson(json.decode(d))); + yield* r.stream.map( + (final d) => CreateModelResponse.fromJson( + json.decode(const Utf8Decoder().convert(d)), + ), + ); } // ------------------------------------------ @@ -138,9 +143,11 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream - .transform(const _OllamaStreamTransformer()) // - .map((d) => PullModelResponse.fromJson(json.decode(d))); + yield* r.stream.map( + (final d) => PullModelResponse.fromJson( + json.decode(const Utf8Decoder().convert(d)), + ), + ); } // ------------------------------------------ @@ -165,9 +172,11 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream - .transform(const _OllamaStreamTransformer()) // - .map((d) => PushModelResponse.fromJson(json.decode(d))); + yield* r.stream.map( + (final d) => PushModelResponse.fromJson( + json.decode(const Utf8Decoder().convert(d)), + ), + ); } @override @@ -175,15 +184,3 @@ class OllamaClient extends g.OllamaClient { return onRequestHandler(request); } } - -class _OllamaStreamTransformer - extends StreamTransformerBase, String> { - const _OllamaStreamTransformer(); - - @override - Stream bind(final Stream> stream) { - return stream // - .transform(utf8.decoder) // - .transform(const LineSplitter()); - } -} diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index 0a530915..3ab44797 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -356,27 +356,6 @@ class OllamaClient { ); } - // ------------------------------------------ - // METHOD: getVersion - // ------------------------------------------ - - /// Returns the version of the Ollama server. - /// - /// This endpoint returns the version of the Ollama server. - /// - /// `GET` `http://localhost:11434/api/version` - Future getVersion() async { - final r = await makeRequest( - baseUrl: 'http://localhost:11434/api', - path: '/version', - method: HttpMethod.get, - isMultipart: false, - requestType: '', - responseType: 'application/json', - ); - return VersionResponse.fromJson(_jsonDecode(r)); - } - // ------------------------------------------ // METHOD: generateCompletion // ------------------------------------------ @@ -498,25 +477,6 @@ class OllamaClient { return ModelsResponse.fromJson(_jsonDecode(r)); } - // ------------------------------------------ - // METHOD: listRunningModels - // ------------------------------------------ - - /// List models that are running. - /// - /// `GET` `http://localhost:11434/api/ps` - Future listRunningModels() async { - final r = await makeRequest( - baseUrl: 'http://localhost:11434/api', - path: '/ps', - method: HttpMethod.get, - isMultipart: false, - requestType: '', - responseType: 'application/json', - ); - return ProcessResponse.fromJson(_jsonDecode(r)); - } - // ------------------------------------------ // METHOD: showModelInfo // ------------------------------------------ @@ -607,7 +567,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/x-ndjson', + responseType: 'application/json', body: request, ); return PullModelResponse.fromJson(_jsonDecode(r)); @@ -633,7 +593,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/x-ndjson', + responseType: 'application/json', body: request, ); return PushModelResponse.fromJson(_jsonDecode(r)); @@ -645,23 +605,26 @@ class OllamaClient { /// Create a blob from a file. Returns the server file path. /// - /// `digest`: the SHA256 digest of the blob + /// `name`: the SHA256 digest of the blob /// /// `request`: No description /// /// `POST` `http://localhost:11434/api/blobs/{digest}` Future createBlob({ - required String digest, + required String name, String? request, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/$digest', + path: '/blobs/{digest}', method: HttpMethod.post, isMultipart: false, requestType: 'application/octet-stream', responseType: '', body: request, + queryParams: { + 'name': name, + }, ); } @@ -669,23 +632,24 @@ class OllamaClient { // METHOD: checkBlob // ------------------------------------------ - /// Ensures that the file blob used for a FROM or ADAPTER field exists on the server. - /// - /// This is checking your Ollama server and not Ollama.ai. + /// Check to see if a blob exists on the Ollama server which is useful when creating models. /// - /// `digest`: the SHA256 digest of the blob + /// `name`: the SHA256 digest of the blob /// /// `HEAD` `http://localhost:11434/api/blobs/{digest}` Future checkBlob({ - required String digest, + required String name, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/$digest', + path: '/blobs/{digest}', method: HttpMethod.head, isMultipart: false, requestType: '', responseType: '', + queryParams: { + 'name': name, + }, ); } } diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart index fe47da47..491efa66 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart @@ -47,9 +47,6 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { /// - If set to 0, the model will be unloaded immediately once finished. /// - If not set, the model will stay loaded for 5 minutes by default @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, - - /// A list of tools the model may call. - @JsonKey(includeIfNull: false) List? tools, }) = _GenerateChatCompletionRequest; /// Object construction from a JSON representation @@ -63,8 +60,7 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'format', 'options', 'stream', - 'keep_alive', - 'tools' + 'keep_alive' ]; /// Perform validations on the schema property values @@ -81,7 +77,6 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'options': options, 'stream': stream, 'keep_alive': keepAlive, - 'tools': tools, }; } } diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart index d7857fd4..faf7462a 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart @@ -16,18 +16,18 @@ class GenerateChatCompletionResponse with _$GenerateChatCompletionResponse { /// Factory constructor for GenerateChatCompletionResponse const factory GenerateChatCompletionResponse({ /// A message in the chat endpoint - required Message message, + @JsonKey(includeIfNull: false) Message? message, /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - required String model, + @JsonKey(includeIfNull: false) String? model, /// Date on which a model was created. - @JsonKey(name: 'created_at') required String createdAt, + @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, /// Whether the response has completed. - required bool done, + @JsonKey(includeIfNull: false) bool? done, /// Reason why the model is done generating a response. @JsonKey( diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart index 014e2654..1368ac7a 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart @@ -23,9 +23,6 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { /// The prompt to generate a response. required String prompt, - /// The text that comes after the inserted text. - @JsonKey(includeIfNull: false) String? suffix, - /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, @@ -77,7 +74,6 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { static const List propertyNames = [ 'model', 'prompt', - 'suffix', 'images', 'system', 'template', @@ -99,7 +95,6 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { return { 'model': model, 'prompt': prompt, - 'suffix': suffix, 'images': images, 'system': system, 'template': template, diff --git a/packages/ollama_dart/lib/src/generated/schema/message.dart b/packages/ollama_dart/lib/src/generated/schema/message.dart index add48dc2..362e2349 100644 --- a/packages/ollama_dart/lib/src/generated/schema/message.dart +++ b/packages/ollama_dart/lib/src/generated/schema/message.dart @@ -23,10 +23,6 @@ class Message with _$Message { /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, - - /// A list of tools the model wants to call. - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, }) = _Message; /// Object construction from a JSON representation @@ -34,12 +30,7 @@ class Message with _$Message { _$MessageFromJson(json); /// List of all property names of schema - static const List propertyNames = [ - 'role', - 'content', - 'images', - 'tool_calls' - ]; + static const List propertyNames = ['role', 'content', 'images']; /// Perform validations on the schema property values String? validateSchema() { @@ -52,7 +43,6 @@ class Message with _$Message { 'role': role, 'content': content, 'images': images, - 'tool_calls': toolCalls, }; } } @@ -69,6 +59,4 @@ enum MessageRole { user, @JsonValue('assistant') assistant, - @JsonValue('tool') - tool, } diff --git a/packages/ollama_dart/lib/src/generated/schema/model_info.dart b/packages/ollama_dart/lib/src/generated/schema/model_info.dart index 30c2a949..cb212131 100644 --- a/packages/ollama_dart/lib/src/generated/schema/model_info.dart +++ b/packages/ollama_dart/lib/src/generated/schema/model_info.dart @@ -33,10 +33,6 @@ class ModelInfo with _$ModelInfo { /// Details about a model. @JsonKey(includeIfNull: false) ModelDetails? details, - /// Details about a model. - @JsonKey(name: 'model_info', includeIfNull: false) - ModelInformation? modelInfo, - /// The default messages for the model. @JsonKey(includeIfNull: false) List? messages, }) = _ModelInfo; @@ -53,7 +49,6 @@ class ModelInfo with _$ModelInfo { 'template', 'system', 'details', - 'model_info', 'messages' ]; @@ -71,7 +66,6 @@ class ModelInfo with _$ModelInfo { 'template': template, 'system': system, 'details': details, - 'model_info': modelInfo, 'messages': messages, }; } diff --git a/packages/ollama_dart/lib/src/generated/schema/model_information.dart b/packages/ollama_dart/lib/src/generated/schema/model_information.dart deleted file mode 100644 index d10848f8..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/model_information.dart +++ /dev/null @@ -1,61 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: ModelInformation -// ========================================== - -/// Details about a model. -@freezed -class ModelInformation with _$ModelInformation { - const ModelInformation._(); - - /// Factory constructor for ModelInformation - const factory ModelInformation({ - /// The architecture of the model. - @JsonKey(name: 'general.architecture', includeIfNull: false) - String? generalArchitecture, - - /// The file type of the model. - @JsonKey(name: 'general.file_type', includeIfNull: false) - int? generalFileType, - - /// The number of parameters in the model. - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - int? generalParameterCount, - - /// The number of parameters in the model. - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - int? generalQuantizationVersion, - }) = _ModelInformation; - - /// Object construction from a JSON representation - factory ModelInformation.fromJson(Map json) => - _$ModelInformationFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'general.architecture', - 'general.file_type', - 'general.parameter_count', - 'general.quantization_version' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'general.architecture': generalArchitecture, - 'general.file_type': generalFileType, - 'general.parameter_count': generalParameterCount, - 'general.quantization_version': generalQuantizationVersion, - }; - } -} diff --git a/packages/ollama_dart/lib/src/generated/schema/process_model.dart b/packages/ollama_dart/lib/src/generated/schema/process_model.dart deleted file mode 100644 index dad453f0..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/process_model.dart +++ /dev/null @@ -1,69 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: ProcessModel -// ========================================== - -/// A model that is currently loaded. -@freezed -class ProcessModel with _$ProcessModel { - const ProcessModel._(); - - /// Factory constructor for ProcessModel - const factory ProcessModel({ - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) String? model, - - /// Size of the model on disk. - @JsonKey(includeIfNull: false) int? size, - - /// The model's digest. - @JsonKey(includeIfNull: false) String? digest, - - /// Details about a model. - @JsonKey(includeIfNull: false) ModelDetails? details, - - /// No Description - @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, - - /// Size of the model on disk. - @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram, - }) = _ProcessModel; - - /// Object construction from a JSON representation - factory ProcessModel.fromJson(Map json) => - _$ProcessModelFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'model', - 'size', - 'digest', - 'details', - 'expires_at', - 'size_vram' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'model': model, - 'size': size, - 'digest': digest, - 'details': details, - 'expires_at': expiresAt, - 'size_vram': sizeVram, - }; - } -} diff --git a/packages/ollama_dart/lib/src/generated/schema/process_response.dart b/packages/ollama_dart/lib/src/generated/schema/process_response.dart deleted file mode 100644 index 6261a813..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/process_response.dart +++ /dev/null @@ -1,40 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: ProcessResponse -// ========================================== - -/// Response class for the list running models endpoint. -@freezed -class ProcessResponse with _$ProcessResponse { - const ProcessResponse._(); - - /// Factory constructor for ProcessResponse - const factory ProcessResponse({ - /// List of running models. - @JsonKey(includeIfNull: false) List? models, - }) = _ProcessResponse; - - /// Object construction from a JSON representation - factory ProcessResponse.fromJson(Map json) => - _$ProcessResponseFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['models']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'models': models, - }; - } -} diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart index d3bb5142..bdfb3574 100644 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart @@ -16,7 +16,11 @@ class PushModelResponse with _$PushModelResponse { /// Factory constructor for PushModelResponse const factory PushModelResponse({ /// Status pushing the model. - @JsonKey(includeIfNull: false) String? status, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + PushModelStatus? status, /// the model's digest @JsonKey(includeIfNull: false) String? digest, diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart new file mode 100644 index 00000000..c043c843 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart @@ -0,0 +1,21 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// ENUM: PushModelStatus +// ========================================== + +/// Status pushing the model. +enum PushModelStatus { + @JsonValue('retrieving manifest') + retrievingManifest, + @JsonValue('starting upload') + startingUpload, + @JsonValue('pushing manifest') + pushingManifest, + @JsonValue('success') + success, +} diff --git a/packages/ollama_dart/lib/src/generated/schema/request_options.dart b/packages/ollama_dart/lib/src/generated/schema/request_options.dart index 940d09d4..a83df364 100644 --- a/packages/ollama_dart/lib/src/generated/schema/request_options.dart +++ b/packages/ollama_dart/lib/src/generated/schema/request_options.dart @@ -18,90 +18,68 @@ class RequestOptions with _$RequestOptions { /// Number of tokens to keep from the prompt. @JsonKey(name: 'num_keep', includeIfNull: false) int? numKeep, - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model - /// generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? seed, - /// Maximum number of tokens to predict when generating text. - /// (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, - /// while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value - /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum - /// probability for a token to be considered, relative to the probability of the most likely token. For - /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less - /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) - @JsonKey(name: 'min_p', includeIfNull: false) double? minP, - - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value - /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, - /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) + /// Typical p is used to reduce the impact of less probable tokens from the output. @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, - /// Sets how far back for the model to look back to prevent repetition. - /// (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. - /// (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) @JsonKey(includeIfNull: false) double? temperature, - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more - /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? repeatPenalty, - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the - /// model's likelihood to talk about new topics. (Default: 0) + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the - /// model's likelihood to repeat the same line verbatim. (Default: 0) + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? frequencyPenalty, - /// Enable Mirostat sampling for controlling perplexity. - /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? mirostat, - /// Controls the balance between coherence and diversity of the output. A lower value will result in more - /// focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? mirostatTau, - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate - /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. - /// (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? mirostatEta, - /// Penalize newlines in the output. (Default: true) + /// Penalize newlines in the output. (Default: false) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? penalizeNewline, - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop - /// sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @JsonKey(includeIfNull: false) List? stop, /// Enable NUMA support. (Default: false) @JsonKey(includeIfNull: false) bool? numa, - /// Sets the size of the context window used to generate the next token. (Default: 2048) + /// Sets the size of the context window used to generate the next token. @JsonKey(name: 'num_ctx', includeIfNull: false) int? numCtx, - /// Sets the number of batches to use for generation. (Default: 512) + /// Sets the number of batches to use for generation. (Default: 1) @JsonKey(name: 'num_batch', includeIfNull: false) int? numBatch, - /// The number of layers to send to the GPU(s). - /// On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? numGpu, /// The GPU to use for the main model. Default is 0. @@ -110,7 +88,7 @@ class RequestOptions with _$RequestOptions { /// Enable low VRAM mode. (Default: false) @JsonKey(name: 'low_vram', includeIfNull: false) bool? lowVram, - /// Enable f16 key/value. (Default: true) + /// Enable f16 key/value. (Default: false) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? f16Kv, /// Enable logits all. (Default: false) @@ -125,9 +103,7 @@ class RequestOptions with _$RequestOptions { /// Enable mlock. (Default: false) @JsonKey(name: 'use_mlock', includeIfNull: false) bool? useMlock, - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal - /// performance. It is recommended to set this value to the number of physical CPU cores your system has - /// (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? numThread, }) = _RequestOptions; @@ -142,7 +118,6 @@ class RequestOptions with _$RequestOptions { 'num_predict', 'top_k', 'top_p', - 'min_p', 'tfs_z', 'typical_p', 'repeat_last_n', @@ -182,7 +157,6 @@ class RequestOptions with _$RequestOptions { 'num_predict': numPredict, 'top_k': topK, 'top_p': topP, - 'min_p': minP, 'tfs_z': tfsZ, 'typical_p': typicalP, 'repeat_last_n': repeatLastN, diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index f951912a..5c8eb964 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -13,18 +13,11 @@ part 'schema.freezed.dart'; part 'generate_completion_request.dart'; part 'request_options.dart'; part 'response_format.dart'; -part 'version_response.dart'; part 'generate_completion_response.dart'; part 'generate_chat_completion_request.dart'; part 'generate_chat_completion_response.dart'; part 'done_reason.dart'; part 'message.dart'; -part 'tool.dart'; -part 'tool_function.dart'; -part 'tool_function_params.dart'; -part 'tool_call.dart'; -part 'tool_call_function.dart'; -part 'tool_call_function_args.dart'; part 'generate_embedding_request.dart'; part 'generate_embedding_response.dart'; part 'create_model_request.dart'; @@ -33,9 +26,6 @@ part 'create_model_status.dart'; part 'models_response.dart'; part 'model.dart'; part 'model_details.dart'; -part 'model_information.dart'; -part 'process_response.dart'; -part 'process_model.dart'; part 'model_info_request.dart'; part 'model_info.dart'; part 'copy_model_request.dart'; @@ -45,3 +35,4 @@ part 'pull_model_response.dart'; part 'pull_model_status.dart'; part 'push_model_request.dart'; part 'push_model_response.dart'; +part 'push_model_status.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 1c77269c..ab02ac2b 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -29,10 +29,6 @@ mixin _$GenerateCompletionRequest { /// The prompt to generate a response. String get prompt => throw _privateConstructorUsedError; - /// The text that comes after the inserted text. - @JsonKey(includeIfNull: false) - String? get suffix => throw _privateConstructorUsedError; - /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; @@ -95,7 +91,6 @@ abstract class $GenerateCompletionRequestCopyWith<$Res> { $Res call( {String model, String prompt, - @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -128,7 +123,6 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, $Res call({ Object? model = null, Object? prompt = null, - Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -148,10 +142,6 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, - suffix: freezed == suffix - ? _value.suffix - : suffix // ignore: cast_nullable_to_non_nullable - as String?, images: freezed == images ? _value.images : images // ignore: cast_nullable_to_non_nullable @@ -216,7 +206,6 @@ abstract class _$$GenerateCompletionRequestImplCopyWith<$Res> $Res call( {String model, String prompt, - @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -249,7 +238,6 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> $Res call({ Object? model = null, Object? prompt = null, - Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -269,10 +257,6 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, - suffix: freezed == suffix - ? _value.suffix - : suffix // ignore: cast_nullable_to_non_nullable - as String?, images: freezed == images ? _value._images : images // ignore: cast_nullable_to_non_nullable @@ -319,7 +303,6 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { const _$GenerateCompletionRequestImpl( {required this.model, required this.prompt, - @JsonKey(includeIfNull: false) this.suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.template, @@ -349,11 +332,6 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override final String prompt; - /// The text that comes after the inserted text. - @override - @JsonKey(includeIfNull: false) - final String? suffix; - /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) final List? _images; @@ -431,7 +409,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override String toString() { - return 'GenerateCompletionRequest(model: $model, prompt: $prompt, suffix: $suffix, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateCompletionRequest(model: $model, prompt: $prompt, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; } @override @@ -441,7 +419,6 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { other is _$GenerateCompletionRequestImpl && (identical(other.model, model) || other.model == model) && (identical(other.prompt, prompt) || other.prompt == prompt) && - (identical(other.suffix, suffix) || other.suffix == suffix) && const DeepCollectionEquality().equals(other._images, _images) && (identical(other.system, system) || other.system == system) && (identical(other.template, template) || @@ -461,7 +438,6 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { runtimeType, model, prompt, - suffix, const DeepCollectionEquality().hash(_images), system, template, @@ -491,7 +467,6 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { const factory _GenerateCompletionRequest( {required final String model, required final String prompt, - @JsonKey(includeIfNull: false) final String? suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final String? template, @@ -522,11 +497,6 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { String get prompt; @override - /// The text that comes after the inserted text. - @JsonKey(includeIfNull: false) - String? get suffix; - @override - /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images; @@ -597,89 +567,67 @@ mixin _$RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) int? get numKeep => throw _privateConstructorUsedError; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model - /// generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; - /// Maximum number of tokens to predict when generating text. - /// (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict => throw _privateConstructorUsedError; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, - /// while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK => throw _privateConstructorUsedError; - /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value - /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum - /// probability for a token to be considered, relative to the probability of the most likely token. For - /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less - /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) - @JsonKey(name: 'min_p', includeIfNull: false) - double? get minP => throw _privateConstructorUsedError; - - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value - /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ => throw _privateConstructorUsedError; - /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) + /// Typical p is used to reduce the impact of less probable tokens from the output. @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP => throw _privateConstructorUsedError; - /// Sets how far back for the model to look back to prevent repetition. - /// (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN => throw _privateConstructorUsedError; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. - /// (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more - /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the - /// model's likelihood to talk about new topics. (Default: 0) + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the - /// model's likelihood to repeat the same line verbatim. (Default: 0) + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty => throw _privateConstructorUsedError; - /// Enable Mirostat sampling for controlling perplexity. - /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat => throw _privateConstructorUsedError; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more - /// focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau => throw _privateConstructorUsedError; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate - /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. - /// (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta => throw _privateConstructorUsedError; - /// Penalize newlines in the output. (Default: true) + /// Penalize newlines in the output. (Default: false) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline => throw _privateConstructorUsedError; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop - /// sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @JsonKey(includeIfNull: false) List? get stop => throw _privateConstructorUsedError; @@ -687,16 +635,15 @@ mixin _$RequestOptions { @JsonKey(includeIfNull: false) bool? get numa => throw _privateConstructorUsedError; - /// Sets the size of the context window used to generate the next token. (Default: 2048) + /// Sets the size of the context window used to generate the next token. @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx => throw _privateConstructorUsedError; - /// Sets the number of batches to use for generation. (Default: 512) + /// Sets the number of batches to use for generation. (Default: 1) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch => throw _privateConstructorUsedError; - /// The number of layers to send to the GPU(s). - /// On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu => throw _privateConstructorUsedError; @@ -708,7 +655,7 @@ mixin _$RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) bool? get lowVram => throw _privateConstructorUsedError; - /// Enable f16 key/value. (Default: true) + /// Enable f16 key/value. (Default: false) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv => throw _privateConstructorUsedError; @@ -728,9 +675,7 @@ mixin _$RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) bool? get useMlock => throw _privateConstructorUsedError; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal - /// performance. It is recommended to set this value to the number of physical CPU cores your system has - /// (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread => throw _privateConstructorUsedError; @@ -752,7 +697,6 @@ abstract class $RequestOptionsCopyWith<$Res> { @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -801,7 +745,6 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, - Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -848,10 +791,6 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, - minP: freezed == minP - ? _value.minP - : minP // ignore: cast_nullable_to_non_nullable - as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -966,7 +905,6 @@ abstract class _$$RequestOptionsImplCopyWith<$Res> @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -1013,7 +951,6 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, - Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -1060,10 +997,6 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, - minP: freezed == minP - ? _value.minP - : minP // ignore: cast_nullable_to_non_nullable - as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -1173,7 +1106,6 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) this.numPredict, @JsonKey(name: 'top_k', includeIfNull: false) this.topK, @JsonKey(name: 'top_p', includeIfNull: false) this.topP, - @JsonKey(name: 'min_p', includeIfNull: false) this.minP, @JsonKey(name: 'tfs_z', includeIfNull: false) this.tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) this.typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) this.repeatLastN, @@ -1212,109 +1144,85 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) final int? numKeep; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model - /// generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) @override @JsonKey(includeIfNull: false) final int? seed; - /// Maximum number of tokens to predict when generating text. - /// (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) @override @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, - /// while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) @override @JsonKey(name: 'top_k', includeIfNull: false) final int? topK; - /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value - /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum - /// probability for a token to be considered, relative to the probability of the most likely token. For - /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less - /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) - @override - @JsonKey(name: 'min_p', includeIfNull: false) - final double? minP; - - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value - /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @override @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ; - /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) + /// Typical p is used to reduce the impact of less probable tokens from the output. @override @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP; - /// Sets how far back for the model to look back to prevent repetition. - /// (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) @override @JsonKey(name: 'repeat_last_n', includeIfNull: false) final int? repeatLastN; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. - /// (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) @override @JsonKey(includeIfNull: false) final double? temperature; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more - /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @override @JsonKey(name: 'repeat_penalty', includeIfNull: false) final double? repeatPenalty; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the - /// model's likelihood to talk about new topics. (Default: 0) + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @override @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the - /// model's likelihood to repeat the same line verbatim. (Default: 0) + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) final double? frequencyPenalty; - /// Enable Mirostat sampling for controlling perplexity. - /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @override @JsonKey(includeIfNull: false) final int? mirostat; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more - /// focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) @override @JsonKey(name: 'mirostat_tau', includeIfNull: false) final double? mirostatTau; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate - /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. - /// (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) @override @JsonKey(name: 'mirostat_eta', includeIfNull: false) final double? mirostatEta; - /// Penalize newlines in the output. (Default: true) + /// Penalize newlines in the output. (Default: false) @override @JsonKey(name: 'penalize_newline', includeIfNull: false) final bool? penalizeNewline; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop - /// sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. final List? _stop; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop - /// sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @override @JsonKey(includeIfNull: false) List? get stop { @@ -1330,18 +1238,17 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(includeIfNull: false) final bool? numa; - /// Sets the size of the context window used to generate the next token. (Default: 2048) + /// Sets the size of the context window used to generate the next token. @override @JsonKey(name: 'num_ctx', includeIfNull: false) final int? numCtx; - /// Sets the number of batches to use for generation. (Default: 512) + /// Sets the number of batches to use for generation. (Default: 1) @override @JsonKey(name: 'num_batch', includeIfNull: false) final int? numBatch; - /// The number of layers to send to the GPU(s). - /// On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. @override @JsonKey(name: 'num_gpu', includeIfNull: false) final int? numGpu; @@ -1356,7 +1263,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) final bool? lowVram; - /// Enable f16 key/value. (Default: true) + /// Enable f16 key/value. (Default: false) @override @JsonKey(name: 'f16_kv', includeIfNull: false) final bool? f16Kv; @@ -1381,16 +1288,14 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) final bool? useMlock; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal - /// performance. It is recommended to set this value to the number of physical CPU cores your system has - /// (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). @override @JsonKey(name: 'num_thread', includeIfNull: false) final int? numThread; @override String toString() { - return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, minP: $minP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; + return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; } @override @@ -1404,7 +1309,6 @@ class _$RequestOptionsImpl extends _RequestOptions { other.numPredict == numPredict) && (identical(other.topK, topK) || other.topK == topK) && (identical(other.topP, topP) || other.topP == topP) && - (identical(other.minP, minP) || other.minP == minP) && (identical(other.tfsZ, tfsZ) || other.tfsZ == tfsZ) && (identical(other.typicalP, typicalP) || other.typicalP == typicalP) && @@ -1455,7 +1359,6 @@ class _$RequestOptionsImpl extends _RequestOptions { numPredict, topK, topP, - minP, tfsZ, typicalP, repeatLastN, @@ -1504,7 +1407,6 @@ abstract class _RequestOptions extends RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, - @JsonKey(name: 'min_p', includeIfNull: false) final double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) @@ -1549,105 +1451,82 @@ abstract class _RequestOptions extends RequestOptions { int? get numKeep; @override - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model - /// generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed; @override - /// Maximum number of tokens to predict when generating text. - /// (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict; @override - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, - /// while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK; @override - /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value - /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; @override - /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum - /// probability for a token to be considered, relative to the probability of the most likely token. For - /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less - /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) - @JsonKey(name: 'min_p', includeIfNull: false) - double? get minP; - @override - - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value - /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ; @override - /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) + /// Typical p is used to reduce the impact of less probable tokens from the output. @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP; @override - /// Sets how far back for the model to look back to prevent repetition. - /// (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN; @override - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. - /// (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature; @override - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more - /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty; @override - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the - /// model's likelihood to talk about new topics. (Default: 0) + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; @override - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the - /// model's likelihood to repeat the same line verbatim. (Default: 0) + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; @override - /// Enable Mirostat sampling for controlling perplexity. - /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat; @override - /// Controls the balance between coherence and diversity of the output. A lower value will result in more - /// focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau; @override - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate - /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. - /// (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta; @override - /// Penalize newlines in the output. (Default: true) + /// Penalize newlines in the output. (Default: false) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline; @override - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop - /// sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @JsonKey(includeIfNull: false) List? get stop; @override @@ -1657,18 +1536,17 @@ abstract class _RequestOptions extends RequestOptions { bool? get numa; @override - /// Sets the size of the context window used to generate the next token. (Default: 2048) + /// Sets the size of the context window used to generate the next token. @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx; @override - /// Sets the number of batches to use for generation. (Default: 512) + /// Sets the number of batches to use for generation. (Default: 1) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch; @override - /// The number of layers to send to the GPU(s). - /// On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu; @override @@ -1683,7 +1561,7 @@ abstract class _RequestOptions extends RequestOptions { bool? get lowVram; @override - /// Enable f16 key/value. (Default: true) + /// Enable f16 key/value. (Default: false) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv; @override @@ -1708,9 +1586,7 @@ abstract class _RequestOptions extends RequestOptions { bool? get useMlock; @override - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal - /// performance. It is recommended to set this value to the number of physical CPU cores your system has - /// (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread; @override @@ -1719,154 +1595,6 @@ abstract class _RequestOptions extends RequestOptions { throw _privateConstructorUsedError; } -VersionResponse _$VersionResponseFromJson(Map json) { - return _VersionResponse.fromJson(json); -} - -/// @nodoc -mixin _$VersionResponse { - /// The version of the Ollama server. - @JsonKey(includeIfNull: false) - String? get version => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VersionResponseCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $VersionResponseCopyWith<$Res> { - factory $VersionResponseCopyWith( - VersionResponse value, $Res Function(VersionResponse) then) = - _$VersionResponseCopyWithImpl<$Res, VersionResponse>; - @useResult - $Res call({@JsonKey(includeIfNull: false) String? version}); -} - -/// @nodoc -class _$VersionResponseCopyWithImpl<$Res, $Val extends VersionResponse> - implements $VersionResponseCopyWith<$Res> { - _$VersionResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? version = freezed, - }) { - return _then(_value.copyWith( - version: freezed == version - ? _value.version - : version // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$VersionResponseImplCopyWith<$Res> - implements $VersionResponseCopyWith<$Res> { - factory _$$VersionResponseImplCopyWith(_$VersionResponseImpl value, - $Res Function(_$VersionResponseImpl) then) = - __$$VersionResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({@JsonKey(includeIfNull: false) String? version}); -} - -/// @nodoc -class __$$VersionResponseImplCopyWithImpl<$Res> - extends _$VersionResponseCopyWithImpl<$Res, _$VersionResponseImpl> - implements _$$VersionResponseImplCopyWith<$Res> { - __$$VersionResponseImplCopyWithImpl( - _$VersionResponseImpl _value, $Res Function(_$VersionResponseImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? version = freezed, - }) { - return _then(_$VersionResponseImpl( - version: freezed == version - ? _value.version - : version // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$VersionResponseImpl extends _VersionResponse { - const _$VersionResponseImpl({@JsonKey(includeIfNull: false) this.version}) - : super._(); - - factory _$VersionResponseImpl.fromJson(Map json) => - _$$VersionResponseImplFromJson(json); - - /// The version of the Ollama server. - @override - @JsonKey(includeIfNull: false) - final String? version; - - @override - String toString() { - return 'VersionResponse(version: $version)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$VersionResponseImpl && - (identical(other.version, version) || other.version == version)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, version); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => - __$$VersionResponseImplCopyWithImpl<_$VersionResponseImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$VersionResponseImplToJson( - this, - ); - } -} - -abstract class _VersionResponse extends VersionResponse { - const factory _VersionResponse( - {@JsonKey(includeIfNull: false) final String? version}) = - _$VersionResponseImpl; - const _VersionResponse._() : super._(); - - factory _VersionResponse.fromJson(Map json) = - _$VersionResponseImpl.fromJson; - - @override - - /// The version of the Ollama server. - @JsonKey(includeIfNull: false) - String? get version; - @override - @JsonKey(ignore: true) - _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => - throw _privateConstructorUsedError; -} - GenerateCompletionResponse _$GenerateCompletionResponseFromJson( Map json) { return _GenerateCompletionResponse.fromJson(json); @@ -2402,10 +2130,6 @@ mixin _$GenerateChatCompletionRequest { @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive => throw _privateConstructorUsedError; - /// A list of tools the model may call. - @JsonKey(includeIfNull: false) - List? get tools => throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $GenerateChatCompletionRequestCopyWith @@ -2429,8 +2153,7 @@ abstract class $GenerateChatCompletionRequestCopyWith<$Res> { ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, - @JsonKey(includeIfNull: false) List? tools}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); $RequestOptionsCopyWith<$Res>? get options; } @@ -2455,7 +2178,6 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, - Object? tools = freezed, }) { return _then(_value.copyWith( model: null == model @@ -2482,10 +2204,6 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, - tools: freezed == tools - ? _value.tools - : tools // ignore: cast_nullable_to_non_nullable - as List?, ) as $Val); } @@ -2520,8 +2238,7 @@ abstract class _$$GenerateChatCompletionRequestImplCopyWith<$Res> ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, - @JsonKey(includeIfNull: false) List? tools}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); @override $RequestOptionsCopyWith<$Res>? get options; @@ -2546,7 +2263,6 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, - Object? tools = freezed, }) { return _then(_$GenerateChatCompletionRequestImpl( model: null == model @@ -2573,10 +2289,6 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, - tools: freezed == tools - ? _value._tools - : tools // ignore: cast_nullable_to_non_nullable - as List?, )); } } @@ -2594,10 +2306,8 @@ class _$GenerateChatCompletionRequestImpl this.format, @JsonKey(includeIfNull: false) this.options, this.stream = false, - @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive, - @JsonKey(includeIfNull: false) final List? tools}) + @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) : _messages = messages, - _tools = tools, super._(); factory _$GenerateChatCompletionRequestImpl.fromJson( @@ -2651,23 +2361,9 @@ class _$GenerateChatCompletionRequestImpl @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive; - /// A list of tools the model may call. - final List? _tools; - - /// A list of tools the model may call. - @override - @JsonKey(includeIfNull: false) - List? get tools { - final value = _tools; - if (value == null) return null; - if (_tools is EqualUnmodifiableListView) return _tools; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - @override String toString() { - return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive, tools: $tools)'; + return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive)'; } @override @@ -2681,8 +2377,7 @@ class _$GenerateChatCompletionRequestImpl (identical(other.options, options) || other.options == options) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.keepAlive, keepAlive) || - other.keepAlive == keepAlive) && - const DeepCollectionEquality().equals(other._tools, _tools)); + other.keepAlive == keepAlive)); } @JsonKey(ignore: true) @@ -2694,8 +2389,7 @@ class _$GenerateChatCompletionRequestImpl format, options, stream, - keepAlive, - const DeepCollectionEquality().hash(_tools)); + keepAlive); @JsonKey(ignore: true) @override @@ -2724,9 +2418,8 @@ abstract class _GenerateChatCompletionRequest final ResponseFormat? format, @JsonKey(includeIfNull: false) final RequestOptions? options, final bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive, - @JsonKey(includeIfNull: false) - final List? tools}) = _$GenerateChatCompletionRequestImpl; + @JsonKey(name: 'keep_alive', includeIfNull: false) + final int? keepAlive}) = _$GenerateChatCompletionRequestImpl; const _GenerateChatCompletionRequest._() : super._(); factory _GenerateChatCompletionRequest.fromJson(Map json) = @@ -2772,11 +2465,6 @@ abstract class _GenerateChatCompletionRequest @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive; @override - - /// A list of tools the model may call. - @JsonKey(includeIfNull: false) - List? get tools; - @override @JsonKey(ignore: true) _$$GenerateChatCompletionRequestImplCopyWith< _$GenerateChatCompletionRequestImpl> @@ -2791,19 +2479,22 @@ GenerateChatCompletionResponse _$GenerateChatCompletionResponseFromJson( /// @nodoc mixin _$GenerateChatCompletionResponse { /// A message in the chat endpoint - Message get message => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + Message? get message => throw _privateConstructorUsedError; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; /// Date on which a model was created. - @JsonKey(name: 'created_at') - String get createdAt => throw _privateConstructorUsedError; + @JsonKey(name: 'created_at', includeIfNull: false) + String? get createdAt => throw _privateConstructorUsedError; /// Whether the response has completed. - bool get done => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + bool? get done => throw _privateConstructorUsedError; /// Reason why the model is done generating a response. @JsonKey( @@ -2851,10 +2542,10 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { GenerateChatCompletionResponse>; @useResult $Res call( - {Message message, - String model, - @JsonKey(name: 'created_at') String createdAt, - bool done, + {@JsonKey(includeIfNull: false) Message? message, + @JsonKey(includeIfNull: false) String? model, + @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, + @JsonKey(includeIfNull: false) bool? done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2869,7 +2560,7 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { @JsonKey(name: 'eval_count', includeIfNull: false) int? evalCount, @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); - $MessageCopyWith<$Res> get message; + $MessageCopyWith<$Res>? get message; } /// @nodoc @@ -2886,10 +2577,10 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ - Object? message = null, - Object? model = null, - Object? createdAt = null, - Object? done = null, + Object? message = freezed, + Object? model = freezed, + Object? createdAt = freezed, + Object? done = freezed, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2899,22 +2590,22 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, Object? evalDuration = freezed, }) { return _then(_value.copyWith( - message: null == message + message: freezed == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message, - model: null == model + as Message?, + model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String, - createdAt: null == createdAt + as String?, + createdAt: freezed == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String, - done: null == done + as String?, + done: freezed == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool, + as bool?, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2948,8 +2639,12 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @override @pragma('vm:prefer-inline') - $MessageCopyWith<$Res> get message { - return $MessageCopyWith<$Res>(_value.message, (value) { + $MessageCopyWith<$Res>? get message { + if (_value.message == null) { + return null; + } + + return $MessageCopyWith<$Res>(_value.message!, (value) { return _then(_value.copyWith(message: value) as $Val); }); } @@ -2965,10 +2660,10 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @override @useResult $Res call( - {Message message, - String model, - @JsonKey(name: 'created_at') String createdAt, - bool done, + {@JsonKey(includeIfNull: false) Message? message, + @JsonKey(includeIfNull: false) String? model, + @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, + @JsonKey(includeIfNull: false) bool? done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2984,7 +2679,7 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); @override - $MessageCopyWith<$Res> get message; + $MessageCopyWith<$Res>? get message; } /// @nodoc @@ -3000,10 +2695,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> @pragma('vm:prefer-inline') @override $Res call({ - Object? message = null, - Object? model = null, - Object? createdAt = null, - Object? done = null, + Object? message = freezed, + Object? model = freezed, + Object? createdAt = freezed, + Object? done = freezed, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -3013,22 +2708,22 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> Object? evalDuration = freezed, }) { return _then(_$GenerateChatCompletionResponseImpl( - message: null == message + message: freezed == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message, - model: null == model + as Message?, + model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String, - createdAt: null == createdAt + as String?, + createdAt: freezed == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String, - done: null == done + as String?, + done: freezed == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool, + as bool?, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -3066,10 +2761,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> class _$GenerateChatCompletionResponseImpl extends _GenerateChatCompletionResponse { const _$GenerateChatCompletionResponseImpl( - {required this.message, - required this.model, - @JsonKey(name: 'created_at') required this.createdAt, - required this.done, + {@JsonKey(includeIfNull: false) this.message, + @JsonKey(includeIfNull: false) this.model, + @JsonKey(name: 'created_at', includeIfNull: false) this.createdAt, + @JsonKey(includeIfNull: false) this.done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -3091,22 +2786,25 @@ class _$GenerateChatCompletionResponseImpl /// A message in the chat endpoint @override - final Message message; + @JsonKey(includeIfNull: false) + final Message? message; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. @override - final String model; + @JsonKey(includeIfNull: false) + final String? model; /// Date on which a model was created. @override - @JsonKey(name: 'created_at') - final String createdAt; + @JsonKey(name: 'created_at', includeIfNull: false) + final String? createdAt; /// Whether the response has completed. @override - final bool done; + @JsonKey(includeIfNull: false) + final bool? done; /// Reason why the model is done generating a response. @override @@ -3212,10 +2910,11 @@ class _$GenerateChatCompletionResponseImpl abstract class _GenerateChatCompletionResponse extends GenerateChatCompletionResponse { const factory _GenerateChatCompletionResponse( - {required final Message message, - required final String model, - @JsonKey(name: 'created_at') required final String createdAt, - required final bool done, + {@JsonKey(includeIfNull: false) final Message? message, + @JsonKey(includeIfNull: false) final String? model, + @JsonKey(name: 'created_at', includeIfNull: false) + final String? createdAt, + @JsonKey(includeIfNull: false) final bool? done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -3240,22 +2939,25 @@ abstract class _GenerateChatCompletionResponse @override /// A message in the chat endpoint - Message get message; + @JsonKey(includeIfNull: false) + Message? get message; @override /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model; + @JsonKey(includeIfNull: false) + String? get model; @override /// Date on which a model was created. - @JsonKey(name: 'created_at') - String get createdAt; + @JsonKey(name: 'created_at', includeIfNull: false) + String? get createdAt; @override /// Whether the response has completed. - bool get done; + @JsonKey(includeIfNull: false) + bool? get done; @override /// Reason why the model is done generating a response. @@ -3317,10 +3019,6 @@ mixin _$Message { @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; - /// A list of tools the model wants to call. - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls => throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $MessageCopyWith get copyWith => throw _privateConstructorUsedError; @@ -3334,9 +3032,7 @@ abstract class $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls}); + @JsonKey(includeIfNull: false) List? images}); } /// @nodoc @@ -3355,7 +3051,6 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> Object? role = null, Object? content = null, Object? images = freezed, - Object? toolCalls = freezed, }) { return _then(_value.copyWith( role: null == role @@ -3370,10 +3065,6 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> ? _value.images : images // ignore: cast_nullable_to_non_nullable as List?, - toolCalls: freezed == toolCalls - ? _value.toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List?, ) as $Val); } } @@ -3388,9 +3079,7 @@ abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls}); + @JsonKey(includeIfNull: false) List? images}); } /// @nodoc @@ -3407,7 +3096,6 @@ class __$$MessageImplCopyWithImpl<$Res> Object? role = null, Object? content = null, Object? images = freezed, - Object? toolCalls = freezed, }) { return _then(_$MessageImpl( role: null == role @@ -3422,10 +3110,6 @@ class __$$MessageImplCopyWithImpl<$Res> ? _value._images : images // ignore: cast_nullable_to_non_nullable as List?, - toolCalls: freezed == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List?, )); } } @@ -3436,11 +3120,8 @@ class _$MessageImpl extends _Message { const _$MessageImpl( {required this.role, required this.content, - @JsonKey(includeIfNull: false) final List? images, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls}) + @JsonKey(includeIfNull: false) final List? images}) : _images = images, - _toolCalls = toolCalls, super._(); factory _$MessageImpl.fromJson(Map json) => @@ -3468,23 +3149,9 @@ class _$MessageImpl extends _Message { return EqualUnmodifiableListView(value); } - /// A list of tools the model wants to call. - final List? _toolCalls; - - /// A list of tools the model wants to call. - @override - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls { - final value = _toolCalls; - if (value == null) return null; - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - @override String toString() { - return 'Message(role: $role, content: $content, images: $images, toolCalls: $toolCalls)'; + return 'Message(role: $role, content: $content, images: $images)'; } @override @@ -3494,19 +3161,13 @@ class _$MessageImpl extends _Message { other is _$MessageImpl && (identical(other.role, role) || other.role == role) && (identical(other.content, content) || other.content == content) && - const DeepCollectionEquality().equals(other._images, _images) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls)); + const DeepCollectionEquality().equals(other._images, _images)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, - role, - content, - const DeepCollectionEquality().hash(_images), - const DeepCollectionEquality().hash(_toolCalls)); + runtimeType, role, content, const DeepCollectionEquality().hash(_images)); @JsonKey(ignore: true) @override @@ -3524,11 +3185,10 @@ class _$MessageImpl extends _Message { abstract class _Message extends Message { const factory _Message( - {required final MessageRole role, - required final String content, - @JsonKey(includeIfNull: false) final List? images, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls}) = _$MessageImpl; + {required final MessageRole role, + required final String content, + @JsonKey(includeIfNull: false) final List? images}) = + _$MessageImpl; const _Message._() : super._(); factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; @@ -3547,49 +3207,65 @@ abstract class _Message extends Message { @JsonKey(includeIfNull: false) List? get images; @override - - /// A list of tools the model wants to call. - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls; - @override @JsonKey(ignore: true) _$$MessageImplCopyWith<_$MessageImpl> get copyWith => throw _privateConstructorUsedError; } -Tool _$ToolFromJson(Map json) { - return _Tool.fromJson(json); +GenerateEmbeddingRequest _$GenerateEmbeddingRequestFromJson( + Map json) { + return _GenerateEmbeddingRequest.fromJson(json); } /// @nodoc -mixin _$Tool { - /// The type of tool. - ToolType get type => throw _privateConstructorUsedError; +mixin _$GenerateEmbeddingRequest { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model => throw _privateConstructorUsedError; + + /// Text to generate embeddings for. + String get prompt => throw _privateConstructorUsedError; - /// A function that the model may call. + /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. @JsonKey(includeIfNull: false) - ToolFunction? get function => throw _privateConstructorUsedError; + RequestOptions? get options => throw _privateConstructorUsedError; + + /// How long (in minutes) to keep the model loaded in memory. + /// + /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. + /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. + /// - If set to 0, the model will be unloaded immediately once finished. + /// - If not set, the model will stay loaded for 5 minutes by default + @JsonKey(name: 'keep_alive', includeIfNull: false) + int? get keepAlive => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ToolCopyWith get copyWith => throw _privateConstructorUsedError; + $GenerateEmbeddingRequestCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $ToolCopyWith<$Res> { - factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = - _$ToolCopyWithImpl<$Res, Tool>; +abstract class $GenerateEmbeddingRequestCopyWith<$Res> { + factory $GenerateEmbeddingRequestCopyWith(GenerateEmbeddingRequest value, + $Res Function(GenerateEmbeddingRequest) then) = + _$GenerateEmbeddingRequestCopyWithImpl<$Res, GenerateEmbeddingRequest>; @useResult $Res call( - {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); + {String model, + String prompt, + @JsonKey(includeIfNull: false) RequestOptions? options, + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); - $ToolFunctionCopyWith<$Res>? get function; + $RequestOptionsCopyWith<$Res>? get options; } /// @nodoc -class _$ToolCopyWithImpl<$Res, $Val extends Tool> - implements $ToolCopyWith<$Res> { - _$ToolCopyWithImpl(this._value, this._then); +class _$GenerateEmbeddingRequestCopyWithImpl<$Res, + $Val extends GenerateEmbeddingRequest> + implements $GenerateEmbeddingRequestCopyWith<$Res> { + _$GenerateEmbeddingRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3599,186 +3275,251 @@ class _$ToolCopyWithImpl<$Res, $Val extends Tool> @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? function = freezed, + Object? model = null, + Object? prompt = null, + Object? options = freezed, + Object? keepAlive = freezed, }) { return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ToolType, - function: freezed == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as ToolFunction?, - ) as $Val); - } - - @override - @pragma('vm:prefer-inline') - $ToolFunctionCopyWith<$Res>? get function { - if (_value.function == null) { - return null; + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + prompt: null == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as String, + options: freezed == options + ? _value.options + : options // ignore: cast_nullable_to_non_nullable + as RequestOptions?, + keepAlive: freezed == keepAlive + ? _value.keepAlive + : keepAlive // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $RequestOptionsCopyWith<$Res>? get options { + if (_value.options == null) { + return null; } - return $ToolFunctionCopyWith<$Res>(_value.function!, (value) { - return _then(_value.copyWith(function: value) as $Val); + return $RequestOptionsCopyWith<$Res>(_value.options!, (value) { + return _then(_value.copyWith(options: value) as $Val); }); } } /// @nodoc -abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { - factory _$$ToolImplCopyWith( - _$ToolImpl value, $Res Function(_$ToolImpl) then) = - __$$ToolImplCopyWithImpl<$Res>; +abstract class _$$GenerateEmbeddingRequestImplCopyWith<$Res> + implements $GenerateEmbeddingRequestCopyWith<$Res> { + factory _$$GenerateEmbeddingRequestImplCopyWith( + _$GenerateEmbeddingRequestImpl value, + $Res Function(_$GenerateEmbeddingRequestImpl) then) = + __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res>; @override @useResult $Res call( - {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); + {String model, + String prompt, + @JsonKey(includeIfNull: false) RequestOptions? options, + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); @override - $ToolFunctionCopyWith<$Res>? get function; + $RequestOptionsCopyWith<$Res>? get options; } /// @nodoc -class __$$ToolImplCopyWithImpl<$Res> - extends _$ToolCopyWithImpl<$Res, _$ToolImpl> - implements _$$ToolImplCopyWith<$Res> { - __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) +class __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res> + extends _$GenerateEmbeddingRequestCopyWithImpl<$Res, + _$GenerateEmbeddingRequestImpl> + implements _$$GenerateEmbeddingRequestImplCopyWith<$Res> { + __$$GenerateEmbeddingRequestImplCopyWithImpl( + _$GenerateEmbeddingRequestImpl _value, + $Res Function(_$GenerateEmbeddingRequestImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? function = freezed, + Object? model = null, + Object? prompt = null, + Object? options = freezed, + Object? keepAlive = freezed, }) { - return _then(_$ToolImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ToolType, - function: freezed == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as ToolFunction?, + return _then(_$GenerateEmbeddingRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + prompt: null == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as String, + options: freezed == options + ? _value.options + : options // ignore: cast_nullable_to_non_nullable + as RequestOptions?, + keepAlive: freezed == keepAlive + ? _value.keepAlive + : keepAlive // ignore: cast_nullable_to_non_nullable + as int?, )); } } /// @nodoc @JsonSerializable() -class _$ToolImpl extends _Tool { - const _$ToolImpl( - {this.type = ToolType.function, - @JsonKey(includeIfNull: false) this.function}) +class _$GenerateEmbeddingRequestImpl extends _GenerateEmbeddingRequest { + const _$GenerateEmbeddingRequestImpl( + {required this.model, + required this.prompt, + @JsonKey(includeIfNull: false) this.options, + @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) : super._(); - factory _$ToolImpl.fromJson(Map json) => - _$$ToolImplFromJson(json); + factory _$GenerateEmbeddingRequestImpl.fromJson(Map json) => + _$$GenerateEmbeddingRequestImplFromJson(json); - /// The type of tool. + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. @override - @JsonKey() - final ToolType type; + final String model; + + /// Text to generate embeddings for. + @override + final String prompt; - /// A function that the model may call. + /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. @override @JsonKey(includeIfNull: false) - final ToolFunction? function; + final RequestOptions? options; + + /// How long (in minutes) to keep the model loaded in memory. + /// + /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. + /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. + /// - If set to 0, the model will be unloaded immediately once finished. + /// - If not set, the model will stay loaded for 5 minutes by default + @override + @JsonKey(name: 'keep_alive', includeIfNull: false) + final int? keepAlive; @override String toString() { - return 'Tool(type: $type, function: $function)'; + return 'GenerateEmbeddingRequest(model: $model, prompt: $prompt, options: $options, keepAlive: $keepAlive)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.function, function) || - other.function == function)); + other is _$GenerateEmbeddingRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.options, options) || other.options == options) && + (identical(other.keepAlive, keepAlive) || + other.keepAlive == keepAlive)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, function); + int get hashCode => + Object.hash(runtimeType, model, prompt, options, keepAlive); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolImplCopyWith<_$ToolImpl> get copyWith => - __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); + _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> + get copyWith => __$$GenerateEmbeddingRequestImplCopyWithImpl< + _$GenerateEmbeddingRequestImpl>(this, _$identity); @override Map toJson() { - return _$$ToolImplToJson( + return _$$GenerateEmbeddingRequestImplToJson( this, ); } } -abstract class _Tool extends Tool { - const factory _Tool( - {final ToolType type, - @JsonKey(includeIfNull: false) final ToolFunction? function}) = - _$ToolImpl; - const _Tool._() : super._(); +abstract class _GenerateEmbeddingRequest extends GenerateEmbeddingRequest { + const factory _GenerateEmbeddingRequest( + {required final String model, + required final String prompt, + @JsonKey(includeIfNull: false) final RequestOptions? options, + @JsonKey(name: 'keep_alive', includeIfNull: false) + final int? keepAlive}) = _$GenerateEmbeddingRequestImpl; + const _GenerateEmbeddingRequest._() : super._(); + + factory _GenerateEmbeddingRequest.fromJson(Map json) = + _$GenerateEmbeddingRequestImpl.fromJson; - factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; + @override + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model; @override - /// The type of tool. - ToolType get type; + /// Text to generate embeddings for. + String get prompt; @override - /// A function that the model may call. + /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. @JsonKey(includeIfNull: false) - ToolFunction? get function; + RequestOptions? get options; + @override + + /// How long (in minutes) to keep the model loaded in memory. + /// + /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. + /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. + /// - If set to 0, the model will be unloaded immediately once finished. + /// - If not set, the model will stay loaded for 5 minutes by default + @JsonKey(name: 'keep_alive', includeIfNull: false) + int? get keepAlive; @override @JsonKey(ignore: true) - _$$ToolImplCopyWith<_$ToolImpl> get copyWith => - throw _privateConstructorUsedError; + _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> + get copyWith => throw _privateConstructorUsedError; } -ToolFunction _$ToolFunctionFromJson(Map json) { - return _ToolFunction.fromJson(json); +GenerateEmbeddingResponse _$GenerateEmbeddingResponseFromJson( + Map json) { + return _GenerateEmbeddingResponse.fromJson(json); } /// @nodoc -mixin _$ToolFunction { - /// The name of the function to be called. - String get name => throw _privateConstructorUsedError; - - /// A description of what the function does, used by the model to choose when and how to call the function. - String get description => throw _privateConstructorUsedError; - - /// The parameters the functions accepts, described as a JSON Schema object. - Map get parameters => throw _privateConstructorUsedError; +mixin _$GenerateEmbeddingResponse { + /// The embedding for the prompt. + @JsonKey(includeIfNull: false) + List? get embedding => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ToolFunctionCopyWith get copyWith => + $GenerateEmbeddingResponseCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ToolFunctionCopyWith<$Res> { - factory $ToolFunctionCopyWith( - ToolFunction value, $Res Function(ToolFunction) then) = - _$ToolFunctionCopyWithImpl<$Res, ToolFunction>; +abstract class $GenerateEmbeddingResponseCopyWith<$Res> { + factory $GenerateEmbeddingResponseCopyWith(GenerateEmbeddingResponse value, + $Res Function(GenerateEmbeddingResponse) then) = + _$GenerateEmbeddingResponseCopyWithImpl<$Res, GenerateEmbeddingResponse>; @useResult - $Res call({String name, String description, Map parameters}); + $Res call({@JsonKey(includeIfNull: false) List? embedding}); } /// @nodoc -class _$ToolFunctionCopyWithImpl<$Res, $Val extends ToolFunction> - implements $ToolFunctionCopyWith<$Res> { - _$ToolFunctionCopyWithImpl(this._value, this._then); +class _$GenerateEmbeddingResponseCopyWithImpl<$Res, + $Val extends GenerateEmbeddingResponse> + implements $GenerateEmbeddingResponseCopyWith<$Res> { + _$GenerateEmbeddingResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3788,196 +3529,181 @@ class _$ToolFunctionCopyWithImpl<$Res, $Val extends ToolFunction> @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? description = null, - Object? parameters = null, + Object? embedding = freezed, }) { return _then(_value.copyWith( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - description: null == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String, - parameters: null == parameters - ? _value.parameters - : parameters // ignore: cast_nullable_to_non_nullable - as Map, + embedding: freezed == embedding + ? _value.embedding + : embedding // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } /// @nodoc -abstract class _$$ToolFunctionImplCopyWith<$Res> - implements $ToolFunctionCopyWith<$Res> { - factory _$$ToolFunctionImplCopyWith( - _$ToolFunctionImpl value, $Res Function(_$ToolFunctionImpl) then) = - __$$ToolFunctionImplCopyWithImpl<$Res>; +abstract class _$$GenerateEmbeddingResponseImplCopyWith<$Res> + implements $GenerateEmbeddingResponseCopyWith<$Res> { + factory _$$GenerateEmbeddingResponseImplCopyWith( + _$GenerateEmbeddingResponseImpl value, + $Res Function(_$GenerateEmbeddingResponseImpl) then) = + __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res>; @override @useResult - $Res call({String name, String description, Map parameters}); + $Res call({@JsonKey(includeIfNull: false) List? embedding}); } /// @nodoc -class __$$ToolFunctionImplCopyWithImpl<$Res> - extends _$ToolFunctionCopyWithImpl<$Res, _$ToolFunctionImpl> - implements _$$ToolFunctionImplCopyWith<$Res> { - __$$ToolFunctionImplCopyWithImpl( - _$ToolFunctionImpl _value, $Res Function(_$ToolFunctionImpl) _then) +class __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res> + extends _$GenerateEmbeddingResponseCopyWithImpl<$Res, + _$GenerateEmbeddingResponseImpl> + implements _$$GenerateEmbeddingResponseImplCopyWith<$Res> { + __$$GenerateEmbeddingResponseImplCopyWithImpl( + _$GenerateEmbeddingResponseImpl _value, + $Res Function(_$GenerateEmbeddingResponseImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? description = null, - Object? parameters = null, + Object? embedding = freezed, }) { - return _then(_$ToolFunctionImpl( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - description: null == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String, - parameters: null == parameters - ? _value._parameters - : parameters // ignore: cast_nullable_to_non_nullable - as Map, + return _then(_$GenerateEmbeddingResponseImpl( + embedding: freezed == embedding + ? _value._embedding + : embedding // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$ToolFunctionImpl extends _ToolFunction { - const _$ToolFunctionImpl( - {required this.name, - required this.description, - required final Map parameters}) - : _parameters = parameters, +class _$GenerateEmbeddingResponseImpl extends _GenerateEmbeddingResponse { + const _$GenerateEmbeddingResponseImpl( + {@JsonKey(includeIfNull: false) final List? embedding}) + : _embedding = embedding, super._(); - factory _$ToolFunctionImpl.fromJson(Map json) => - _$$ToolFunctionImplFromJson(json); - - /// The name of the function to be called. - @override - final String name; - - /// A description of what the function does, used by the model to choose when and how to call the function. - @override - final String description; + factory _$GenerateEmbeddingResponseImpl.fromJson(Map json) => + _$$GenerateEmbeddingResponseImplFromJson(json); - /// The parameters the functions accepts, described as a JSON Schema object. - final Map _parameters; + /// The embedding for the prompt. + final List? _embedding; - /// The parameters the functions accepts, described as a JSON Schema object. + /// The embedding for the prompt. @override - Map get parameters { - if (_parameters is EqualUnmodifiableMapView) return _parameters; + @JsonKey(includeIfNull: false) + List? get embedding { + final value = _embedding; + if (value == null) return null; + if (_embedding is EqualUnmodifiableListView) return _embedding; // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_parameters); + return EqualUnmodifiableListView(value); } @override String toString() { - return 'ToolFunction(name: $name, description: $description, parameters: $parameters)'; + return 'GenerateEmbeddingResponse(embedding: $embedding)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolFunctionImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.description, description) || - other.description == description) && + other is _$GenerateEmbeddingResponseImpl && const DeepCollectionEquality() - .equals(other._parameters, _parameters)); + .equals(other._embedding, _embedding)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_parameters)); + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_embedding)); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => - __$$ToolFunctionImplCopyWithImpl<_$ToolFunctionImpl>(this, _$identity); + _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> + get copyWith => __$$GenerateEmbeddingResponseImplCopyWithImpl< + _$GenerateEmbeddingResponseImpl>(this, _$identity); @override Map toJson() { - return _$$ToolFunctionImplToJson( + return _$$GenerateEmbeddingResponseImplToJson( this, ); } } -abstract class _ToolFunction extends ToolFunction { - const factory _ToolFunction( - {required final String name, - required final String description, - required final Map parameters}) = _$ToolFunctionImpl; - const _ToolFunction._() : super._(); - - factory _ToolFunction.fromJson(Map json) = - _$ToolFunctionImpl.fromJson; - - @override +abstract class _GenerateEmbeddingResponse extends GenerateEmbeddingResponse { + const factory _GenerateEmbeddingResponse( + {@JsonKey(includeIfNull: false) final List? embedding}) = + _$GenerateEmbeddingResponseImpl; + const _GenerateEmbeddingResponse._() : super._(); - /// The name of the function to be called. - String get name; - @override + factory _GenerateEmbeddingResponse.fromJson(Map json) = + _$GenerateEmbeddingResponseImpl.fromJson; - /// A description of what the function does, used by the model to choose when and how to call the function. - String get description; @override - /// The parameters the functions accepts, described as a JSON Schema object. - Map get parameters; + /// The embedding for the prompt. + @JsonKey(includeIfNull: false) + List? get embedding; @override @JsonKey(ignore: true) - _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => - throw _privateConstructorUsedError; + _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> + get copyWith => throw _privateConstructorUsedError; } -ToolCall _$ToolCallFromJson(Map json) { - return _ToolCall.fromJson(json); +CreateModelRequest _$CreateModelRequestFromJson(Map json) { + return _CreateModelRequest.fromJson(json); } /// @nodoc -mixin _$ToolCall { - /// The function the model wants to call. +mixin _$CreateModelRequest { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model => throw _privateConstructorUsedError; + + /// The contents of the Modelfile. + String get modelfile => throw _privateConstructorUsedError; + + /// Path to the Modelfile (optional) @JsonKey(includeIfNull: false) - ToolCallFunction? get function => throw _privateConstructorUsedError; + String? get path => throw _privateConstructorUsedError; + + /// The quantization level of the model. + @JsonKey(includeIfNull: false) + String? get quantize => throw _privateConstructorUsedError; + + /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. + bool get stream => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ToolCallCopyWith get copyWith => + $CreateModelRequestCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ToolCallCopyWith<$Res> { - factory $ToolCallCopyWith(ToolCall value, $Res Function(ToolCall) then) = - _$ToolCallCopyWithImpl<$Res, ToolCall>; +abstract class $CreateModelRequestCopyWith<$Res> { + factory $CreateModelRequestCopyWith( + CreateModelRequest value, $Res Function(CreateModelRequest) then) = + _$CreateModelRequestCopyWithImpl<$Res, CreateModelRequest>; @useResult - $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); - - $ToolCallFunctionCopyWith<$Res>? get function; + $Res call( + {String model, + String modelfile, + @JsonKey(includeIfNull: false) String? path, + @JsonKey(includeIfNull: false) String? quantize, + bool stream}); } /// @nodoc -class _$ToolCallCopyWithImpl<$Res, $Val extends ToolCall> - implements $ToolCallCopyWith<$Res> { - _$ToolCallCopyWithImpl(this._value, this._then); +class _$CreateModelRequestCopyWithImpl<$Res, $Val extends CreateModelRequest> + implements $CreateModelRequestCopyWith<$Res> { + _$CreateModelRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3987,162 +3713,249 @@ class _$ToolCallCopyWithImpl<$Res, $Val extends ToolCall> @pragma('vm:prefer-inline') @override $Res call({ - Object? function = freezed, + Object? model = null, + Object? modelfile = null, + Object? path = freezed, + Object? quantize = freezed, + Object? stream = null, }) { return _then(_value.copyWith( - function: freezed == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as ToolCallFunction?, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + modelfile: null == modelfile + ? _value.modelfile + : modelfile // ignore: cast_nullable_to_non_nullable + as String, + path: freezed == path + ? _value.path + : path // ignore: cast_nullable_to_non_nullable + as String?, + quantize: freezed == quantize + ? _value.quantize + : quantize // ignore: cast_nullable_to_non_nullable + as String?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, ) as $Val); } - - @override - @pragma('vm:prefer-inline') - $ToolCallFunctionCopyWith<$Res>? get function { - if (_value.function == null) { - return null; - } - - return $ToolCallFunctionCopyWith<$Res>(_value.function!, (value) { - return _then(_value.copyWith(function: value) as $Val); - }); - } } /// @nodoc -abstract class _$$ToolCallImplCopyWith<$Res> - implements $ToolCallCopyWith<$Res> { - factory _$$ToolCallImplCopyWith( - _$ToolCallImpl value, $Res Function(_$ToolCallImpl) then) = - __$$ToolCallImplCopyWithImpl<$Res>; +abstract class _$$CreateModelRequestImplCopyWith<$Res> + implements $CreateModelRequestCopyWith<$Res> { + factory _$$CreateModelRequestImplCopyWith(_$CreateModelRequestImpl value, + $Res Function(_$CreateModelRequestImpl) then) = + __$$CreateModelRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); - - @override - $ToolCallFunctionCopyWith<$Res>? get function; + $Res call( + {String model, + String modelfile, + @JsonKey(includeIfNull: false) String? path, + @JsonKey(includeIfNull: false) String? quantize, + bool stream}); } /// @nodoc -class __$$ToolCallImplCopyWithImpl<$Res> - extends _$ToolCallCopyWithImpl<$Res, _$ToolCallImpl> - implements _$$ToolCallImplCopyWith<$Res> { - __$$ToolCallImplCopyWithImpl( - _$ToolCallImpl _value, $Res Function(_$ToolCallImpl) _then) +class __$$CreateModelRequestImplCopyWithImpl<$Res> + extends _$CreateModelRequestCopyWithImpl<$Res, _$CreateModelRequestImpl> + implements _$$CreateModelRequestImplCopyWith<$Res> { + __$$CreateModelRequestImplCopyWithImpl(_$CreateModelRequestImpl _value, + $Res Function(_$CreateModelRequestImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? function = freezed, + Object? model = null, + Object? modelfile = null, + Object? path = freezed, + Object? quantize = freezed, + Object? stream = null, }) { - return _then(_$ToolCallImpl( - function: freezed == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as ToolCallFunction?, + return _then(_$CreateModelRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + modelfile: null == modelfile + ? _value.modelfile + : modelfile // ignore: cast_nullable_to_non_nullable + as String, + path: freezed == path + ? _value.path + : path // ignore: cast_nullable_to_non_nullable + as String?, + quantize: freezed == quantize + ? _value.quantize + : quantize // ignore: cast_nullable_to_non_nullable + as String?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, )); } } /// @nodoc @JsonSerializable() -class _$ToolCallImpl extends _ToolCall { - const _$ToolCallImpl({@JsonKey(includeIfNull: false) this.function}) +class _$CreateModelRequestImpl extends _CreateModelRequest { + const _$CreateModelRequestImpl( + {required this.model, + required this.modelfile, + @JsonKey(includeIfNull: false) this.path, + @JsonKey(includeIfNull: false) this.quantize, + this.stream = false}) : super._(); - factory _$ToolCallImpl.fromJson(Map json) => - _$$ToolCallImplFromJson(json); + factory _$CreateModelRequestImpl.fromJson(Map json) => + _$$CreateModelRequestImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + final String model; + + /// The contents of the Modelfile. + @override + final String modelfile; + + /// Path to the Modelfile (optional) + @override + @JsonKey(includeIfNull: false) + final String? path; - /// The function the model wants to call. + /// The quantization level of the model. @override @JsonKey(includeIfNull: false) - final ToolCallFunction? function; + final String? quantize; + + /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. + @override + @JsonKey() + final bool stream; @override String toString() { - return 'ToolCall(function: $function)'; + return 'CreateModelRequest(model: $model, modelfile: $modelfile, path: $path, quantize: $quantize, stream: $stream)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolCallImpl && - (identical(other.function, function) || - other.function == function)); + other is _$CreateModelRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.modelfile, modelfile) || + other.modelfile == modelfile) && + (identical(other.path, path) || other.path == path) && + (identical(other.quantize, quantize) || + other.quantize == quantize) && + (identical(other.stream, stream) || other.stream == stream)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, function); + int get hashCode => + Object.hash(runtimeType, model, modelfile, path, quantize, stream); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => - __$$ToolCallImplCopyWithImpl<_$ToolCallImpl>(this, _$identity); + _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => + __$$CreateModelRequestImplCopyWithImpl<_$CreateModelRequestImpl>( + this, _$identity); @override Map toJson() { - return _$$ToolCallImplToJson( + return _$$CreateModelRequestImplToJson( this, ); } } -abstract class _ToolCall extends ToolCall { - const factory _ToolCall( - {@JsonKey(includeIfNull: false) final ToolCallFunction? function}) = - _$ToolCallImpl; - const _ToolCall._() : super._(); +abstract class _CreateModelRequest extends CreateModelRequest { + const factory _CreateModelRequest( + {required final String model, + required final String modelfile, + @JsonKey(includeIfNull: false) final String? path, + @JsonKey(includeIfNull: false) final String? quantize, + final bool stream}) = _$CreateModelRequestImpl; + const _CreateModelRequest._() : super._(); + + factory _CreateModelRequest.fromJson(Map json) = + _$CreateModelRequestImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model; + @override - factory _ToolCall.fromJson(Map json) = - _$ToolCallImpl.fromJson; + /// The contents of the Modelfile. + String get modelfile; + @override + /// Path to the Modelfile (optional) + @JsonKey(includeIfNull: false) + String? get path; @override - /// The function the model wants to call. + /// The quantization level of the model. @JsonKey(includeIfNull: false) - ToolCallFunction? get function; + String? get quantize; + @override + + /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. + bool get stream; @override @JsonKey(ignore: true) - _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => + _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => throw _privateConstructorUsedError; } -ToolCallFunction _$ToolCallFunctionFromJson(Map json) { - return _ToolCallFunction.fromJson(json); +CreateModelResponse _$CreateModelResponseFromJson(Map json) { + return _CreateModelResponse.fromJson(json); } /// @nodoc -mixin _$ToolCallFunction { - /// The name of the function to be called. - String get name => throw _privateConstructorUsedError; - - /// The arguments to pass to the function. - Map get arguments => throw _privateConstructorUsedError; +mixin _$CreateModelResponse { + /// Status creating the model + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? get status => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ToolCallFunctionCopyWith get copyWith => + $CreateModelResponseCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ToolCallFunctionCopyWith<$Res> { - factory $ToolCallFunctionCopyWith( - ToolCallFunction value, $Res Function(ToolCallFunction) then) = - _$ToolCallFunctionCopyWithImpl<$Res, ToolCallFunction>; +abstract class $CreateModelResponseCopyWith<$Res> { + factory $CreateModelResponseCopyWith( + CreateModelResponse value, $Res Function(CreateModelResponse) then) = + _$CreateModelResponseCopyWithImpl<$Res, CreateModelResponse>; @useResult - $Res call({String name, Map arguments}); + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? status}); } /// @nodoc -class _$ToolCallFunctionCopyWithImpl<$Res, $Val extends ToolCallFunction> - implements $ToolCallFunctionCopyWith<$Res> { - _$ToolCallFunctionCopyWithImpl(this._value, this._then); +class _$CreateModelResponseCopyWithImpl<$Res, $Val extends CreateModelResponse> + implements $CreateModelResponseCopyWith<$Res> { + _$CreateModelResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -4152,1838 +3965,157 @@ class _$ToolCallFunctionCopyWithImpl<$Res, $Val extends ToolCallFunction> @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? arguments = null, + Object? status = freezed, }) { return _then(_value.copyWith( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - arguments: null == arguments - ? _value.arguments - : arguments // ignore: cast_nullable_to_non_nullable - as Map, + status: freezed == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as CreateModelStatus?, ) as $Val); } } /// @nodoc -abstract class _$$ToolCallFunctionImplCopyWith<$Res> - implements $ToolCallFunctionCopyWith<$Res> { - factory _$$ToolCallFunctionImplCopyWith(_$ToolCallFunctionImpl value, - $Res Function(_$ToolCallFunctionImpl) then) = - __$$ToolCallFunctionImplCopyWithImpl<$Res>; +abstract class _$$CreateModelResponseImplCopyWith<$Res> + implements $CreateModelResponseCopyWith<$Res> { + factory _$$CreateModelResponseImplCopyWith(_$CreateModelResponseImpl value, + $Res Function(_$CreateModelResponseImpl) then) = + __$$CreateModelResponseImplCopyWithImpl<$Res>; @override @useResult - $Res call({String name, Map arguments}); + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? status}); } /// @nodoc -class __$$ToolCallFunctionImplCopyWithImpl<$Res> - extends _$ToolCallFunctionCopyWithImpl<$Res, _$ToolCallFunctionImpl> - implements _$$ToolCallFunctionImplCopyWith<$Res> { - __$$ToolCallFunctionImplCopyWithImpl(_$ToolCallFunctionImpl _value, - $Res Function(_$ToolCallFunctionImpl) _then) +class __$$CreateModelResponseImplCopyWithImpl<$Res> + extends _$CreateModelResponseCopyWithImpl<$Res, _$CreateModelResponseImpl> + implements _$$CreateModelResponseImplCopyWith<$Res> { + __$$CreateModelResponseImplCopyWithImpl(_$CreateModelResponseImpl _value, + $Res Function(_$CreateModelResponseImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? arguments = null, + Object? status = freezed, }) { - return _then(_$ToolCallFunctionImpl( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - arguments: null == arguments - ? _value._arguments - : arguments // ignore: cast_nullable_to_non_nullable - as Map, + return _then(_$CreateModelResponseImpl( + status: freezed == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as CreateModelStatus?, )); } } /// @nodoc @JsonSerializable() -class _$ToolCallFunctionImpl extends _ToolCallFunction { - const _$ToolCallFunctionImpl( - {required this.name, required final Map arguments}) - : _arguments = arguments, - super._(); - - factory _$ToolCallFunctionImpl.fromJson(Map json) => - _$$ToolCallFunctionImplFromJson(json); - - /// The name of the function to be called. - @override - final String name; +class _$CreateModelResponseImpl extends _CreateModelResponse { + const _$CreateModelResponseImpl( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.status}) + : super._(); - /// The arguments to pass to the function. - final Map _arguments; + factory _$CreateModelResponseImpl.fromJson(Map json) => + _$$CreateModelResponseImplFromJson(json); - /// The arguments to pass to the function. + /// Status creating the model @override - Map get arguments { - if (_arguments is EqualUnmodifiableMapView) return _arguments; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_arguments); - } + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateModelStatus? status; @override String toString() { - return 'ToolCallFunction(name: $name, arguments: $arguments)'; + return 'CreateModelResponse(status: $status)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolCallFunctionImpl && - (identical(other.name, name) || other.name == name) && - const DeepCollectionEquality() - .equals(other._arguments, _arguments)); + other is _$CreateModelResponseImpl && + (identical(other.status, status) || other.status == status)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, name, const DeepCollectionEquality().hash(_arguments)); + int get hashCode => Object.hash(runtimeType, status); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => - __$$ToolCallFunctionImplCopyWithImpl<_$ToolCallFunctionImpl>( + _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => + __$$CreateModelResponseImplCopyWithImpl<_$CreateModelResponseImpl>( this, _$identity); @override Map toJson() { - return _$$ToolCallFunctionImplToJson( + return _$$CreateModelResponseImplToJson( this, ); } } -abstract class _ToolCallFunction extends ToolCallFunction { - const factory _ToolCallFunction( - {required final String name, - required final Map arguments}) = _$ToolCallFunctionImpl; - const _ToolCallFunction._() : super._(); +abstract class _CreateModelResponse extends CreateModelResponse { + const factory _CreateModelResponse( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateModelStatus? status}) = _$CreateModelResponseImpl; + const _CreateModelResponse._() : super._(); - factory _ToolCallFunction.fromJson(Map json) = - _$ToolCallFunctionImpl.fromJson; + factory _CreateModelResponse.fromJson(Map json) = + _$CreateModelResponseImpl.fromJson; @override - /// The name of the function to be called. - String get name; - @override - - /// The arguments to pass to the function. - Map get arguments; - @override - @JsonKey(ignore: true) - _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => - throw _privateConstructorUsedError; -} - -GenerateEmbeddingRequest _$GenerateEmbeddingRequestFromJson( - Map json) { - return _GenerateEmbeddingRequest.fromJson(json); -} - -/// @nodoc -mixin _$GenerateEmbeddingRequest { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model => throw _privateConstructorUsedError; - - /// Text to generate embeddings for. - String get prompt => throw _privateConstructorUsedError; - - /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. - @JsonKey(includeIfNull: false) - RequestOptions? get options => throw _privateConstructorUsedError; - - /// How long (in minutes) to keep the model loaded in memory. - /// - /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. - /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. - /// - If set to 0, the model will be unloaded immediately once finished. - /// - If not set, the model will stay loaded for 5 minutes by default - @JsonKey(name: 'keep_alive', includeIfNull: false) - int? get keepAlive => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $GenerateEmbeddingRequestCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $GenerateEmbeddingRequestCopyWith<$Res> { - factory $GenerateEmbeddingRequestCopyWith(GenerateEmbeddingRequest value, - $Res Function(GenerateEmbeddingRequest) then) = - _$GenerateEmbeddingRequestCopyWithImpl<$Res, GenerateEmbeddingRequest>; - @useResult - $Res call( - {String model, - String prompt, - @JsonKey(includeIfNull: false) RequestOptions? options, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); - - $RequestOptionsCopyWith<$Res>? get options; -} - -/// @nodoc -class _$GenerateEmbeddingRequestCopyWithImpl<$Res, - $Val extends GenerateEmbeddingRequest> - implements $GenerateEmbeddingRequestCopyWith<$Res> { - _$GenerateEmbeddingRequestCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = null, - Object? prompt = null, - Object? options = freezed, - Object? keepAlive = freezed, - }) { - return _then(_value.copyWith( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - prompt: null == prompt - ? _value.prompt - : prompt // ignore: cast_nullable_to_non_nullable - as String, - options: freezed == options - ? _value.options - : options // ignore: cast_nullable_to_non_nullable - as RequestOptions?, - keepAlive: freezed == keepAlive - ? _value.keepAlive - : keepAlive // ignore: cast_nullable_to_non_nullable - as int?, - ) as $Val); - } - - @override - @pragma('vm:prefer-inline') - $RequestOptionsCopyWith<$Res>? get options { - if (_value.options == null) { - return null; - } - - return $RequestOptionsCopyWith<$Res>(_value.options!, (value) { - return _then(_value.copyWith(options: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$GenerateEmbeddingRequestImplCopyWith<$Res> - implements $GenerateEmbeddingRequestCopyWith<$Res> { - factory _$$GenerateEmbeddingRequestImplCopyWith( - _$GenerateEmbeddingRequestImpl value, - $Res Function(_$GenerateEmbeddingRequestImpl) then) = - __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String model, - String prompt, - @JsonKey(includeIfNull: false) RequestOptions? options, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); - - @override - $RequestOptionsCopyWith<$Res>? get options; -} - -/// @nodoc -class __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res> - extends _$GenerateEmbeddingRequestCopyWithImpl<$Res, - _$GenerateEmbeddingRequestImpl> - implements _$$GenerateEmbeddingRequestImplCopyWith<$Res> { - __$$GenerateEmbeddingRequestImplCopyWithImpl( - _$GenerateEmbeddingRequestImpl _value, - $Res Function(_$GenerateEmbeddingRequestImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = null, - Object? prompt = null, - Object? options = freezed, - Object? keepAlive = freezed, - }) { - return _then(_$GenerateEmbeddingRequestImpl( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - prompt: null == prompt - ? _value.prompt - : prompt // ignore: cast_nullable_to_non_nullable - as String, - options: freezed == options - ? _value.options - : options // ignore: cast_nullable_to_non_nullable - as RequestOptions?, - keepAlive: freezed == keepAlive - ? _value.keepAlive - : keepAlive // ignore: cast_nullable_to_non_nullable - as int?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$GenerateEmbeddingRequestImpl extends _GenerateEmbeddingRequest { - const _$GenerateEmbeddingRequestImpl( - {required this.model, - required this.prompt, - @JsonKey(includeIfNull: false) this.options, - @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) - : super._(); - - factory _$GenerateEmbeddingRequestImpl.fromJson(Map json) => - _$$GenerateEmbeddingRequestImplFromJson(json); - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @override - final String model; - - /// Text to generate embeddings for. - @override - final String prompt; - - /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. - @override - @JsonKey(includeIfNull: false) - final RequestOptions? options; - - /// How long (in minutes) to keep the model loaded in memory. - /// - /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. - /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. - /// - If set to 0, the model will be unloaded immediately once finished. - /// - If not set, the model will stay loaded for 5 minutes by default - @override - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive; - - @override - String toString() { - return 'GenerateEmbeddingRequest(model: $model, prompt: $prompt, options: $options, keepAlive: $keepAlive)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$GenerateEmbeddingRequestImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.prompt, prompt) || other.prompt == prompt) && - (identical(other.options, options) || other.options == options) && - (identical(other.keepAlive, keepAlive) || - other.keepAlive == keepAlive)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, model, prompt, options, keepAlive); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> - get copyWith => __$$GenerateEmbeddingRequestImplCopyWithImpl< - _$GenerateEmbeddingRequestImpl>(this, _$identity); - - @override - Map toJson() { - return _$$GenerateEmbeddingRequestImplToJson( - this, - ); - } -} - -abstract class _GenerateEmbeddingRequest extends GenerateEmbeddingRequest { - const factory _GenerateEmbeddingRequest( - {required final String model, - required final String prompt, - @JsonKey(includeIfNull: false) final RequestOptions? options, - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive}) = _$GenerateEmbeddingRequestImpl; - const _GenerateEmbeddingRequest._() : super._(); - - factory _GenerateEmbeddingRequest.fromJson(Map json) = - _$GenerateEmbeddingRequestImpl.fromJson; - - @override - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model; - @override - - /// Text to generate embeddings for. - String get prompt; - @override - - /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. - @JsonKey(includeIfNull: false) - RequestOptions? get options; - @override - - /// How long (in minutes) to keep the model loaded in memory. - /// - /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. - /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. - /// - If set to 0, the model will be unloaded immediately once finished. - /// - If not set, the model will stay loaded for 5 minutes by default - @JsonKey(name: 'keep_alive', includeIfNull: false) - int? get keepAlive; - @override - @JsonKey(ignore: true) - _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> - get copyWith => throw _privateConstructorUsedError; -} - -GenerateEmbeddingResponse _$GenerateEmbeddingResponseFromJson( - Map json) { - return _GenerateEmbeddingResponse.fromJson(json); -} - -/// @nodoc -mixin _$GenerateEmbeddingResponse { - /// The embedding for the prompt. - @JsonKey(includeIfNull: false) - List? get embedding => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $GenerateEmbeddingResponseCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $GenerateEmbeddingResponseCopyWith<$Res> { - factory $GenerateEmbeddingResponseCopyWith(GenerateEmbeddingResponse value, - $Res Function(GenerateEmbeddingResponse) then) = - _$GenerateEmbeddingResponseCopyWithImpl<$Res, GenerateEmbeddingResponse>; - @useResult - $Res call({@JsonKey(includeIfNull: false) List? embedding}); -} - -/// @nodoc -class _$GenerateEmbeddingResponseCopyWithImpl<$Res, - $Val extends GenerateEmbeddingResponse> - implements $GenerateEmbeddingResponseCopyWith<$Res> { - _$GenerateEmbeddingResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? embedding = freezed, - }) { - return _then(_value.copyWith( - embedding: freezed == embedding - ? _value.embedding - : embedding // ignore: cast_nullable_to_non_nullable - as List?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$GenerateEmbeddingResponseImplCopyWith<$Res> - implements $GenerateEmbeddingResponseCopyWith<$Res> { - factory _$$GenerateEmbeddingResponseImplCopyWith( - _$GenerateEmbeddingResponseImpl value, - $Res Function(_$GenerateEmbeddingResponseImpl) then) = - __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({@JsonKey(includeIfNull: false) List? embedding}); -} - -/// @nodoc -class __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res> - extends _$GenerateEmbeddingResponseCopyWithImpl<$Res, - _$GenerateEmbeddingResponseImpl> - implements _$$GenerateEmbeddingResponseImplCopyWith<$Res> { - __$$GenerateEmbeddingResponseImplCopyWithImpl( - _$GenerateEmbeddingResponseImpl _value, - $Res Function(_$GenerateEmbeddingResponseImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? embedding = freezed, - }) { - return _then(_$GenerateEmbeddingResponseImpl( - embedding: freezed == embedding - ? _value._embedding - : embedding // ignore: cast_nullable_to_non_nullable - as List?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$GenerateEmbeddingResponseImpl extends _GenerateEmbeddingResponse { - const _$GenerateEmbeddingResponseImpl( - {@JsonKey(includeIfNull: false) final List? embedding}) - : _embedding = embedding, - super._(); - - factory _$GenerateEmbeddingResponseImpl.fromJson(Map json) => - _$$GenerateEmbeddingResponseImplFromJson(json); - - /// The embedding for the prompt. - final List? _embedding; - - /// The embedding for the prompt. - @override - @JsonKey(includeIfNull: false) - List? get embedding { - final value = _embedding; - if (value == null) return null; - if (_embedding is EqualUnmodifiableListView) return _embedding; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - @override - String toString() { - return 'GenerateEmbeddingResponse(embedding: $embedding)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$GenerateEmbeddingResponseImpl && - const DeepCollectionEquality() - .equals(other._embedding, _embedding)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_embedding)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> - get copyWith => __$$GenerateEmbeddingResponseImplCopyWithImpl< - _$GenerateEmbeddingResponseImpl>(this, _$identity); - - @override - Map toJson() { - return _$$GenerateEmbeddingResponseImplToJson( - this, - ); - } -} - -abstract class _GenerateEmbeddingResponse extends GenerateEmbeddingResponse { - const factory _GenerateEmbeddingResponse( - {@JsonKey(includeIfNull: false) final List? embedding}) = - _$GenerateEmbeddingResponseImpl; - const _GenerateEmbeddingResponse._() : super._(); - - factory _GenerateEmbeddingResponse.fromJson(Map json) = - _$GenerateEmbeddingResponseImpl.fromJson; - - @override - - /// The embedding for the prompt. - @JsonKey(includeIfNull: false) - List? get embedding; - @override - @JsonKey(ignore: true) - _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> - get copyWith => throw _privateConstructorUsedError; -} - -CreateModelRequest _$CreateModelRequestFromJson(Map json) { - return _CreateModelRequest.fromJson(json); -} - -/// @nodoc -mixin _$CreateModelRequest { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model => throw _privateConstructorUsedError; - - /// The contents of the Modelfile. - String get modelfile => throw _privateConstructorUsedError; - - /// Path to the Modelfile (optional) - @JsonKey(includeIfNull: false) - String? get path => throw _privateConstructorUsedError; - - /// The quantization level of the model. - @JsonKey(includeIfNull: false) - String? get quantize => throw _privateConstructorUsedError; - - /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. - bool get stream => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateModelRequestCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $CreateModelRequestCopyWith<$Res> { - factory $CreateModelRequestCopyWith( - CreateModelRequest value, $Res Function(CreateModelRequest) then) = - _$CreateModelRequestCopyWithImpl<$Res, CreateModelRequest>; - @useResult - $Res call( - {String model, - String modelfile, - @JsonKey(includeIfNull: false) String? path, - @JsonKey(includeIfNull: false) String? quantize, - bool stream}); -} - -/// @nodoc -class _$CreateModelRequestCopyWithImpl<$Res, $Val extends CreateModelRequest> - implements $CreateModelRequestCopyWith<$Res> { - _$CreateModelRequestCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = null, - Object? modelfile = null, - Object? path = freezed, - Object? quantize = freezed, - Object? stream = null, - }) { - return _then(_value.copyWith( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - modelfile: null == modelfile - ? _value.modelfile - : modelfile // ignore: cast_nullable_to_non_nullable - as String, - path: freezed == path - ? _value.path - : path // ignore: cast_nullable_to_non_nullable - as String?, - quantize: freezed == quantize - ? _value.quantize - : quantize // ignore: cast_nullable_to_non_nullable - as String?, - stream: null == stream - ? _value.stream - : stream // ignore: cast_nullable_to_non_nullable - as bool, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$CreateModelRequestImplCopyWith<$Res> - implements $CreateModelRequestCopyWith<$Res> { - factory _$$CreateModelRequestImplCopyWith(_$CreateModelRequestImpl value, - $Res Function(_$CreateModelRequestImpl) then) = - __$$CreateModelRequestImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String model, - String modelfile, - @JsonKey(includeIfNull: false) String? path, - @JsonKey(includeIfNull: false) String? quantize, - bool stream}); -} - -/// @nodoc -class __$$CreateModelRequestImplCopyWithImpl<$Res> - extends _$CreateModelRequestCopyWithImpl<$Res, _$CreateModelRequestImpl> - implements _$$CreateModelRequestImplCopyWith<$Res> { - __$$CreateModelRequestImplCopyWithImpl(_$CreateModelRequestImpl _value, - $Res Function(_$CreateModelRequestImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = null, - Object? modelfile = null, - Object? path = freezed, - Object? quantize = freezed, - Object? stream = null, - }) { - return _then(_$CreateModelRequestImpl( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - modelfile: null == modelfile - ? _value.modelfile - : modelfile // ignore: cast_nullable_to_non_nullable - as String, - path: freezed == path - ? _value.path - : path // ignore: cast_nullable_to_non_nullable - as String?, - quantize: freezed == quantize - ? _value.quantize - : quantize // ignore: cast_nullable_to_non_nullable - as String?, - stream: null == stream - ? _value.stream - : stream // ignore: cast_nullable_to_non_nullable - as bool, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CreateModelRequestImpl extends _CreateModelRequest { - const _$CreateModelRequestImpl( - {required this.model, - required this.modelfile, - @JsonKey(includeIfNull: false) this.path, - @JsonKey(includeIfNull: false) this.quantize, - this.stream = false}) - : super._(); - - factory _$CreateModelRequestImpl.fromJson(Map json) => - _$$CreateModelRequestImplFromJson(json); - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @override - final String model; - - /// The contents of the Modelfile. - @override - final String modelfile; - - /// Path to the Modelfile (optional) - @override - @JsonKey(includeIfNull: false) - final String? path; - - /// The quantization level of the model. - @override - @JsonKey(includeIfNull: false) - final String? quantize; - - /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. - @override - @JsonKey() - final bool stream; - - @override - String toString() { - return 'CreateModelRequest(model: $model, modelfile: $modelfile, path: $path, quantize: $quantize, stream: $stream)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CreateModelRequestImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.modelfile, modelfile) || - other.modelfile == modelfile) && - (identical(other.path, path) || other.path == path) && - (identical(other.quantize, quantize) || - other.quantize == quantize) && - (identical(other.stream, stream) || other.stream == stream)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, model, modelfile, path, quantize, stream); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => - __$$CreateModelRequestImplCopyWithImpl<_$CreateModelRequestImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$CreateModelRequestImplToJson( - this, - ); - } -} - -abstract class _CreateModelRequest extends CreateModelRequest { - const factory _CreateModelRequest( - {required final String model, - required final String modelfile, - @JsonKey(includeIfNull: false) final String? path, - @JsonKey(includeIfNull: false) final String? quantize, - final bool stream}) = _$CreateModelRequestImpl; - const _CreateModelRequest._() : super._(); - - factory _CreateModelRequest.fromJson(Map json) = - _$CreateModelRequestImpl.fromJson; - - @override - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model; - @override - - /// The contents of the Modelfile. - String get modelfile; - @override - - /// Path to the Modelfile (optional) - @JsonKey(includeIfNull: false) - String? get path; - @override - - /// The quantization level of the model. - @JsonKey(includeIfNull: false) - String? get quantize; - @override - - /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. - bool get stream; - @override - @JsonKey(ignore: true) - _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => - throw _privateConstructorUsedError; -} - -CreateModelResponse _$CreateModelResponseFromJson(Map json) { - return _CreateModelResponse.fromJson(json); -} - -/// @nodoc -mixin _$CreateModelResponse { - /// Status creating the model - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? get status => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateModelResponseCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $CreateModelResponseCopyWith<$Res> { - factory $CreateModelResponseCopyWith( - CreateModelResponse value, $Res Function(CreateModelResponse) then) = - _$CreateModelResponseCopyWithImpl<$Res, CreateModelResponse>; - @useResult - $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? status}); -} - -/// @nodoc -class _$CreateModelResponseCopyWithImpl<$Res, $Val extends CreateModelResponse> - implements $CreateModelResponseCopyWith<$Res> { - _$CreateModelResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? status = freezed, - }) { - return _then(_value.copyWith( - status: freezed == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as CreateModelStatus?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$CreateModelResponseImplCopyWith<$Res> - implements $CreateModelResponseCopyWith<$Res> { - factory _$$CreateModelResponseImplCopyWith(_$CreateModelResponseImpl value, - $Res Function(_$CreateModelResponseImpl) then) = - __$$CreateModelResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? status}); -} - -/// @nodoc -class __$$CreateModelResponseImplCopyWithImpl<$Res> - extends _$CreateModelResponseCopyWithImpl<$Res, _$CreateModelResponseImpl> - implements _$$CreateModelResponseImplCopyWith<$Res> { - __$$CreateModelResponseImplCopyWithImpl(_$CreateModelResponseImpl _value, - $Res Function(_$CreateModelResponseImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? status = freezed, - }) { - return _then(_$CreateModelResponseImpl( - status: freezed == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as CreateModelStatus?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CreateModelResponseImpl extends _CreateModelResponse { - const _$CreateModelResponseImpl( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.status}) - : super._(); - - factory _$CreateModelResponseImpl.fromJson(Map json) => - _$$CreateModelResponseImplFromJson(json); - - /// Status creating the model - @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final CreateModelStatus? status; - - @override - String toString() { - return 'CreateModelResponse(status: $status)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CreateModelResponseImpl && - (identical(other.status, status) || other.status == status)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, status); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => - __$$CreateModelResponseImplCopyWithImpl<_$CreateModelResponseImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$CreateModelResponseImplToJson( - this, - ); - } -} - -abstract class _CreateModelResponse extends CreateModelResponse { - const factory _CreateModelResponse( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final CreateModelStatus? status}) = _$CreateModelResponseImpl; - const _CreateModelResponse._() : super._(); - - factory _CreateModelResponse.fromJson(Map json) = - _$CreateModelResponseImpl.fromJson; - - @override - - /// Status creating the model - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? get status; - @override - @JsonKey(ignore: true) - _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => - throw _privateConstructorUsedError; -} - -ModelsResponse _$ModelsResponseFromJson(Map json) { - return _ModelsResponse.fromJson(json); -} - -/// @nodoc -mixin _$ModelsResponse { - /// List of models available locally. - @JsonKey(includeIfNull: false) - List? get models => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ModelsResponseCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ModelsResponseCopyWith<$Res> { - factory $ModelsResponseCopyWith( - ModelsResponse value, $Res Function(ModelsResponse) then) = - _$ModelsResponseCopyWithImpl<$Res, ModelsResponse>; - @useResult - $Res call({@JsonKey(includeIfNull: false) List? models}); -} - -/// @nodoc -class _$ModelsResponseCopyWithImpl<$Res, $Val extends ModelsResponse> - implements $ModelsResponseCopyWith<$Res> { - _$ModelsResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? models = freezed, - }) { - return _then(_value.copyWith( - models: freezed == models - ? _value.models - : models // ignore: cast_nullable_to_non_nullable - as List?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ModelsResponseImplCopyWith<$Res> - implements $ModelsResponseCopyWith<$Res> { - factory _$$ModelsResponseImplCopyWith(_$ModelsResponseImpl value, - $Res Function(_$ModelsResponseImpl) then) = - __$$ModelsResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({@JsonKey(includeIfNull: false) List? models}); -} - -/// @nodoc -class __$$ModelsResponseImplCopyWithImpl<$Res> - extends _$ModelsResponseCopyWithImpl<$Res, _$ModelsResponseImpl> - implements _$$ModelsResponseImplCopyWith<$Res> { - __$$ModelsResponseImplCopyWithImpl( - _$ModelsResponseImpl _value, $Res Function(_$ModelsResponseImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? models = freezed, - }) { - return _then(_$ModelsResponseImpl( - models: freezed == models - ? _value._models - : models // ignore: cast_nullable_to_non_nullable - as List?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ModelsResponseImpl extends _ModelsResponse { - const _$ModelsResponseImpl( - {@JsonKey(includeIfNull: false) final List? models}) - : _models = models, - super._(); - - factory _$ModelsResponseImpl.fromJson(Map json) => - _$$ModelsResponseImplFromJson(json); - - /// List of models available locally. - final List? _models; - - /// List of models available locally. - @override - @JsonKey(includeIfNull: false) - List? get models { - final value = _models; - if (value == null) return null; - if (_models is EqualUnmodifiableListView) return _models; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - @override - String toString() { - return 'ModelsResponse(models: $models)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ModelsResponseImpl && - const DeepCollectionEquality().equals(other._models, _models)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => - __$$ModelsResponseImplCopyWithImpl<_$ModelsResponseImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$ModelsResponseImplToJson( - this, - ); - } -} - -abstract class _ModelsResponse extends ModelsResponse { - const factory _ModelsResponse( - {@JsonKey(includeIfNull: false) final List? models}) = - _$ModelsResponseImpl; - const _ModelsResponse._() : super._(); - - factory _ModelsResponse.fromJson(Map json) = - _$ModelsResponseImpl.fromJson; - - @override - - /// List of models available locally. - @JsonKey(includeIfNull: false) - List? get models; - @override - @JsonKey(ignore: true) - _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Model _$ModelFromJson(Map json) { - return _Model.fromJson(json); -} - -/// @nodoc -mixin _$Model { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; - - /// Model modification date. - @JsonKey(name: 'modified_at', includeIfNull: false) - String? get modifiedAt => throw _privateConstructorUsedError; - - /// Size of the model on disk. - @JsonKey(includeIfNull: false) - int? get size => throw _privateConstructorUsedError; - - /// The model's digest. - @JsonKey(includeIfNull: false) - String? get digest => throw _privateConstructorUsedError; - - /// Details about a model. - @JsonKey(includeIfNull: false) - ModelDetails? get details => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ModelCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ModelCopyWith<$Res> { - factory $ModelCopyWith(Model value, $Res Function(Model) then) = - _$ModelCopyWithImpl<$Res, Model>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, - @JsonKey(includeIfNull: false) int? size, - @JsonKey(includeIfNull: false) String? digest, - @JsonKey(includeIfNull: false) ModelDetails? details}); - - $ModelDetailsCopyWith<$Res>? get details; -} - -/// @nodoc -class _$ModelCopyWithImpl<$Res, $Val extends Model> - implements $ModelCopyWith<$Res> { - _$ModelCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = freezed, - Object? modifiedAt = freezed, - Object? size = freezed, - Object? digest = freezed, - Object? details = freezed, - }) { - return _then(_value.copyWith( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - modifiedAt: freezed == modifiedAt - ? _value.modifiedAt - : modifiedAt // ignore: cast_nullable_to_non_nullable - as String?, - size: freezed == size - ? _value.size - : size // ignore: cast_nullable_to_non_nullable - as int?, - digest: freezed == digest - ? _value.digest - : digest // ignore: cast_nullable_to_non_nullable - as String?, - details: freezed == details - ? _value.details - : details // ignore: cast_nullable_to_non_nullable - as ModelDetails?, - ) as $Val); - } - - @override - @pragma('vm:prefer-inline') - $ModelDetailsCopyWith<$Res>? get details { - if (_value.details == null) { - return null; - } - - return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { - return _then(_value.copyWith(details: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$ModelImplCopyWith<$Res> implements $ModelCopyWith<$Res> { - factory _$$ModelImplCopyWith( - _$ModelImpl value, $Res Function(_$ModelImpl) then) = - __$$ModelImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, - @JsonKey(includeIfNull: false) int? size, - @JsonKey(includeIfNull: false) String? digest, - @JsonKey(includeIfNull: false) ModelDetails? details}); - - @override - $ModelDetailsCopyWith<$Res>? get details; -} - -/// @nodoc -class __$$ModelImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelImpl> - implements _$$ModelImplCopyWith<$Res> { - __$$ModelImplCopyWithImpl( - _$ModelImpl _value, $Res Function(_$ModelImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = freezed, - Object? modifiedAt = freezed, - Object? size = freezed, - Object? digest = freezed, - Object? details = freezed, - }) { - return _then(_$ModelImpl( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - modifiedAt: freezed == modifiedAt - ? _value.modifiedAt - : modifiedAt // ignore: cast_nullable_to_non_nullable - as String?, - size: freezed == size - ? _value.size - : size // ignore: cast_nullable_to_non_nullable - as int?, - digest: freezed == digest - ? _value.digest - : digest // ignore: cast_nullable_to_non_nullable - as String?, - details: freezed == details - ? _value.details - : details // ignore: cast_nullable_to_non_nullable - as ModelDetails?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ModelImpl extends _Model { - const _$ModelImpl( - {@JsonKey(includeIfNull: false) this.model, - @JsonKey(name: 'modified_at', includeIfNull: false) this.modifiedAt, - @JsonKey(includeIfNull: false) this.size, - @JsonKey(includeIfNull: false) this.digest, - @JsonKey(includeIfNull: false) this.details}) - : super._(); - - factory _$ModelImpl.fromJson(Map json) => - _$$ModelImplFromJson(json); - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @override - @JsonKey(includeIfNull: false) - final String? model; - - /// Model modification date. - @override - @JsonKey(name: 'modified_at', includeIfNull: false) - final String? modifiedAt; - - /// Size of the model on disk. - @override - @JsonKey(includeIfNull: false) - final int? size; - - /// The model's digest. - @override - @JsonKey(includeIfNull: false) - final String? digest; - - /// Details about a model. - @override - @JsonKey(includeIfNull: false) - final ModelDetails? details; - - @override - String toString() { - return 'Model(model: $model, modifiedAt: $modifiedAt, size: $size, digest: $digest, details: $details)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ModelImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.modifiedAt, modifiedAt) || - other.modifiedAt == modifiedAt) && - (identical(other.size, size) || other.size == size) && - (identical(other.digest, digest) || other.digest == digest) && - (identical(other.details, details) || other.details == details)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, model, modifiedAt, size, digest, details); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ModelImplCopyWith<_$ModelImpl> get copyWith => - __$$ModelImplCopyWithImpl<_$ModelImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ModelImplToJson( - this, - ); - } -} - -abstract class _Model extends Model { - const factory _Model( - {@JsonKey(includeIfNull: false) final String? model, - @JsonKey(name: 'modified_at', includeIfNull: false) - final String? modifiedAt, - @JsonKey(includeIfNull: false) final int? size, - @JsonKey(includeIfNull: false) final String? digest, - @JsonKey(includeIfNull: false) final ModelDetails? details}) = - _$ModelImpl; - const _Model._() : super._(); - - factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; - - @override - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model; - @override - - /// Model modification date. - @JsonKey(name: 'modified_at', includeIfNull: false) - String? get modifiedAt; - @override - - /// Size of the model on disk. - @JsonKey(includeIfNull: false) - int? get size; - @override - - /// The model's digest. - @JsonKey(includeIfNull: false) - String? get digest; - @override - - /// Details about a model. - @JsonKey(includeIfNull: false) - ModelDetails? get details; - @override - @JsonKey(ignore: true) - _$$ModelImplCopyWith<_$ModelImpl> get copyWith => - throw _privateConstructorUsedError; -} - -ModelDetails _$ModelDetailsFromJson(Map json) { - return _ModelDetails.fromJson(json); -} - -/// @nodoc -mixin _$ModelDetails { - /// The parent model of the model. - @JsonKey(name: 'parent_model', includeIfNull: false) - String? get parentModel => throw _privateConstructorUsedError; - - /// The format of the model. - @JsonKey(includeIfNull: false) - String? get format => throw _privateConstructorUsedError; - - /// The family of the model. - @JsonKey(includeIfNull: false) - String? get family => throw _privateConstructorUsedError; - - /// The families of the model. - @JsonKey(includeIfNull: false) - List? get families => throw _privateConstructorUsedError; - - /// The size of the model's parameters. - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? get parameterSize => throw _privateConstructorUsedError; - - /// The quantization level of the model. - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? get quantizationLevel => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ModelDetailsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ModelDetailsCopyWith<$Res> { - factory $ModelDetailsCopyWith( - ModelDetails value, $Res Function(ModelDetails) then) = - _$ModelDetailsCopyWithImpl<$Res, ModelDetails>; - @useResult - $Res call( - {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, - @JsonKey(includeIfNull: false) String? format, - @JsonKey(includeIfNull: false) String? family, - @JsonKey(includeIfNull: false) List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? quantizationLevel}); -} - -/// @nodoc -class _$ModelDetailsCopyWithImpl<$Res, $Val extends ModelDetails> - implements $ModelDetailsCopyWith<$Res> { - _$ModelDetailsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? parentModel = freezed, - Object? format = freezed, - Object? family = freezed, - Object? families = freezed, - Object? parameterSize = freezed, - Object? quantizationLevel = freezed, - }) { - return _then(_value.copyWith( - parentModel: freezed == parentModel - ? _value.parentModel - : parentModel // ignore: cast_nullable_to_non_nullable - as String?, - format: freezed == format - ? _value.format - : format // ignore: cast_nullable_to_non_nullable - as String?, - family: freezed == family - ? _value.family - : family // ignore: cast_nullable_to_non_nullable - as String?, - families: freezed == families - ? _value.families - : families // ignore: cast_nullable_to_non_nullable - as List?, - parameterSize: freezed == parameterSize - ? _value.parameterSize - : parameterSize // ignore: cast_nullable_to_non_nullable - as String?, - quantizationLevel: freezed == quantizationLevel - ? _value.quantizationLevel - : quantizationLevel // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ModelDetailsImplCopyWith<$Res> - implements $ModelDetailsCopyWith<$Res> { - factory _$$ModelDetailsImplCopyWith( - _$ModelDetailsImpl value, $Res Function(_$ModelDetailsImpl) then) = - __$$ModelDetailsImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, - @JsonKey(includeIfNull: false) String? format, - @JsonKey(includeIfNull: false) String? family, - @JsonKey(includeIfNull: false) List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? quantizationLevel}); -} - -/// @nodoc -class __$$ModelDetailsImplCopyWithImpl<$Res> - extends _$ModelDetailsCopyWithImpl<$Res, _$ModelDetailsImpl> - implements _$$ModelDetailsImplCopyWith<$Res> { - __$$ModelDetailsImplCopyWithImpl( - _$ModelDetailsImpl _value, $Res Function(_$ModelDetailsImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? parentModel = freezed, - Object? format = freezed, - Object? family = freezed, - Object? families = freezed, - Object? parameterSize = freezed, - Object? quantizationLevel = freezed, - }) { - return _then(_$ModelDetailsImpl( - parentModel: freezed == parentModel - ? _value.parentModel - : parentModel // ignore: cast_nullable_to_non_nullable - as String?, - format: freezed == format - ? _value.format - : format // ignore: cast_nullable_to_non_nullable - as String?, - family: freezed == family - ? _value.family - : family // ignore: cast_nullable_to_non_nullable - as String?, - families: freezed == families - ? _value._families - : families // ignore: cast_nullable_to_non_nullable - as List?, - parameterSize: freezed == parameterSize - ? _value.parameterSize - : parameterSize // ignore: cast_nullable_to_non_nullable - as String?, - quantizationLevel: freezed == quantizationLevel - ? _value.quantizationLevel - : quantizationLevel // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ModelDetailsImpl extends _ModelDetails { - const _$ModelDetailsImpl( - {@JsonKey(name: 'parent_model', includeIfNull: false) this.parentModel, - @JsonKey(includeIfNull: false) this.format, - @JsonKey(includeIfNull: false) this.family, - @JsonKey(includeIfNull: false) final List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) this.parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - this.quantizationLevel}) - : _families = families, - super._(); - - factory _$ModelDetailsImpl.fromJson(Map json) => - _$$ModelDetailsImplFromJson(json); - - /// The parent model of the model. - @override - @JsonKey(name: 'parent_model', includeIfNull: false) - final String? parentModel; - - /// The format of the model. - @override - @JsonKey(includeIfNull: false) - final String? format; - - /// The family of the model. - @override - @JsonKey(includeIfNull: false) - final String? family; - - /// The families of the model. - final List? _families; - - /// The families of the model. - @override - @JsonKey(includeIfNull: false) - List? get families { - final value = _families; - if (value == null) return null; - if (_families is EqualUnmodifiableListView) return _families; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// The size of the model's parameters. - @override - @JsonKey(name: 'parameter_size', includeIfNull: false) - final String? parameterSize; - - /// The quantization level of the model. - @override - @JsonKey(name: 'quantization_level', includeIfNull: false) - final String? quantizationLevel; - - @override - String toString() { - return 'ModelDetails(parentModel: $parentModel, format: $format, family: $family, families: $families, parameterSize: $parameterSize, quantizationLevel: $quantizationLevel)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ModelDetailsImpl && - (identical(other.parentModel, parentModel) || - other.parentModel == parentModel) && - (identical(other.format, format) || other.format == format) && - (identical(other.family, family) || other.family == family) && - const DeepCollectionEquality().equals(other._families, _families) && - (identical(other.parameterSize, parameterSize) || - other.parameterSize == parameterSize) && - (identical(other.quantizationLevel, quantizationLevel) || - other.quantizationLevel == quantizationLevel)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash( - runtimeType, - parentModel, - format, - family, - const DeepCollectionEquality().hash(_families), - parameterSize, - quantizationLevel); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => - __$$ModelDetailsImplCopyWithImpl<_$ModelDetailsImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ModelDetailsImplToJson( - this, - ); - } -} - -abstract class _ModelDetails extends ModelDetails { - const factory _ModelDetails( - {@JsonKey(name: 'parent_model', includeIfNull: false) - final String? parentModel, - @JsonKey(includeIfNull: false) final String? format, - @JsonKey(includeIfNull: false) final String? family, - @JsonKey(includeIfNull: false) final List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) - final String? parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - final String? quantizationLevel}) = _$ModelDetailsImpl; - const _ModelDetails._() : super._(); - - factory _ModelDetails.fromJson(Map json) = - _$ModelDetailsImpl.fromJson; - - @override - - /// The parent model of the model. - @JsonKey(name: 'parent_model', includeIfNull: false) - String? get parentModel; - @override - - /// The format of the model. - @JsonKey(includeIfNull: false) - String? get format; - @override - - /// The family of the model. - @JsonKey(includeIfNull: false) - String? get family; - @override - - /// The families of the model. - @JsonKey(includeIfNull: false) - List? get families; - @override - - /// The size of the model's parameters. - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? get parameterSize; - @override - - /// The quantization level of the model. - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? get quantizationLevel; + /// Status creating the model + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? get status; @override @JsonKey(ignore: true) - _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => + _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => throw _privateConstructorUsedError; } -ModelInformation _$ModelInformationFromJson(Map json) { - return _ModelInformation.fromJson(json); +ModelsResponse _$ModelsResponseFromJson(Map json) { + return _ModelsResponse.fromJson(json); } /// @nodoc -mixin _$ModelInformation { - /// The architecture of the model. - @JsonKey(name: 'general.architecture', includeIfNull: false) - String? get generalArchitecture => throw _privateConstructorUsedError; - - /// The file type of the model. - @JsonKey(name: 'general.file_type', includeIfNull: false) - int? get generalFileType => throw _privateConstructorUsedError; - - /// The number of parameters in the model. - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - int? get generalParameterCount => throw _privateConstructorUsedError; - - /// The number of parameters in the model. - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - int? get generalQuantizationVersion => throw _privateConstructorUsedError; +mixin _$ModelsResponse { + /// List of models available locally. + @JsonKey(includeIfNull: false) + List? get models => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ModelInformationCopyWith get copyWith => + $ModelsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ModelInformationCopyWith<$Res> { - factory $ModelInformationCopyWith( - ModelInformation value, $Res Function(ModelInformation) then) = - _$ModelInformationCopyWithImpl<$Res, ModelInformation>; +abstract class $ModelsResponseCopyWith<$Res> { + factory $ModelsResponseCopyWith( + ModelsResponse value, $Res Function(ModelsResponse) then) = + _$ModelsResponseCopyWithImpl<$Res, ModelsResponse>; @useResult - $Res call( - {@JsonKey(name: 'general.architecture', includeIfNull: false) - String? generalArchitecture, - @JsonKey(name: 'general.file_type', includeIfNull: false) - int? generalFileType, - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - int? generalParameterCount, - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - int? generalQuantizationVersion}); + $Res call({@JsonKey(includeIfNull: false) List? models}); } /// @nodoc -class _$ModelInformationCopyWithImpl<$Res, $Val extends ModelInformation> - implements $ModelInformationCopyWith<$Res> { - _$ModelInformationCopyWithImpl(this._value, this._then); +class _$ModelsResponseCopyWithImpl<$Res, $Val extends ModelsResponse> + implements $ModelsResponseCopyWith<$Res> { + _$ModelsResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -5993,427 +4125,473 @@ class _$ModelInformationCopyWithImpl<$Res, $Val extends ModelInformation> @pragma('vm:prefer-inline') @override $Res call({ - Object? generalArchitecture = freezed, - Object? generalFileType = freezed, - Object? generalParameterCount = freezed, - Object? generalQuantizationVersion = freezed, + Object? models = freezed, }) { return _then(_value.copyWith( - generalArchitecture: freezed == generalArchitecture - ? _value.generalArchitecture - : generalArchitecture // ignore: cast_nullable_to_non_nullable - as String?, - generalFileType: freezed == generalFileType - ? _value.generalFileType - : generalFileType // ignore: cast_nullable_to_non_nullable - as int?, - generalParameterCount: freezed == generalParameterCount - ? _value.generalParameterCount - : generalParameterCount // ignore: cast_nullable_to_non_nullable - as int?, - generalQuantizationVersion: freezed == generalQuantizationVersion - ? _value.generalQuantizationVersion - : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable - as int?, + models: freezed == models + ? _value.models + : models // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } /// @nodoc -abstract class _$$ModelInformationImplCopyWith<$Res> - implements $ModelInformationCopyWith<$Res> { - factory _$$ModelInformationImplCopyWith(_$ModelInformationImpl value, - $Res Function(_$ModelInformationImpl) then) = - __$$ModelInformationImplCopyWithImpl<$Res>; +abstract class _$$ModelsResponseImplCopyWith<$Res> + implements $ModelsResponseCopyWith<$Res> { + factory _$$ModelsResponseImplCopyWith(_$ModelsResponseImpl value, + $Res Function(_$ModelsResponseImpl) then) = + __$$ModelsResponseImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'general.architecture', includeIfNull: false) - String? generalArchitecture, - @JsonKey(name: 'general.file_type', includeIfNull: false) - int? generalFileType, - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - int? generalParameterCount, - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - int? generalQuantizationVersion}); + $Res call({@JsonKey(includeIfNull: false) List? models}); } /// @nodoc -class __$$ModelInformationImplCopyWithImpl<$Res> - extends _$ModelInformationCopyWithImpl<$Res, _$ModelInformationImpl> - implements _$$ModelInformationImplCopyWith<$Res> { - __$$ModelInformationImplCopyWithImpl(_$ModelInformationImpl _value, - $Res Function(_$ModelInformationImpl) _then) +class __$$ModelsResponseImplCopyWithImpl<$Res> + extends _$ModelsResponseCopyWithImpl<$Res, _$ModelsResponseImpl> + implements _$$ModelsResponseImplCopyWith<$Res> { + __$$ModelsResponseImplCopyWithImpl( + _$ModelsResponseImpl _value, $Res Function(_$ModelsResponseImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? generalArchitecture = freezed, - Object? generalFileType = freezed, - Object? generalParameterCount = freezed, - Object? generalQuantizationVersion = freezed, + Object? models = freezed, }) { - return _then(_$ModelInformationImpl( - generalArchitecture: freezed == generalArchitecture - ? _value.generalArchitecture - : generalArchitecture // ignore: cast_nullable_to_non_nullable - as String?, - generalFileType: freezed == generalFileType - ? _value.generalFileType - : generalFileType // ignore: cast_nullable_to_non_nullable - as int?, - generalParameterCount: freezed == generalParameterCount - ? _value.generalParameterCount - : generalParameterCount // ignore: cast_nullable_to_non_nullable - as int?, - generalQuantizationVersion: freezed == generalQuantizationVersion - ? _value.generalQuantizationVersion - : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable - as int?, + return _then(_$ModelsResponseImpl( + models: freezed == models + ? _value._models + : models // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$ModelInformationImpl extends _ModelInformation { - const _$ModelInformationImpl( - {@JsonKey(name: 'general.architecture', includeIfNull: false) - this.generalArchitecture, - @JsonKey(name: 'general.file_type', includeIfNull: false) - this.generalFileType, - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - this.generalParameterCount, - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - this.generalQuantizationVersion}) - : super._(); - - factory _$ModelInformationImpl.fromJson(Map json) => - _$$ModelInformationImplFromJson(json); - - /// The architecture of the model. - @override - @JsonKey(name: 'general.architecture', includeIfNull: false) - final String? generalArchitecture; +class _$ModelsResponseImpl extends _ModelsResponse { + const _$ModelsResponseImpl( + {@JsonKey(includeIfNull: false) final List? models}) + : _models = models, + super._(); - /// The file type of the model. - @override - @JsonKey(name: 'general.file_type', includeIfNull: false) - final int? generalFileType; + factory _$ModelsResponseImpl.fromJson(Map json) => + _$$ModelsResponseImplFromJson(json); - /// The number of parameters in the model. - @override - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - final int? generalParameterCount; + /// List of models available locally. + final List? _models; - /// The number of parameters in the model. + /// List of models available locally. @override - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - final int? generalQuantizationVersion; + @JsonKey(includeIfNull: false) + List? get models { + final value = _models; + if (value == null) return null; + if (_models is EqualUnmodifiableListView) return _models; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } @override String toString() { - return 'ModelInformation(generalArchitecture: $generalArchitecture, generalFileType: $generalFileType, generalParameterCount: $generalParameterCount, generalQuantizationVersion: $generalQuantizationVersion)'; + return 'ModelsResponse(models: $models)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelInformationImpl && - (identical(other.generalArchitecture, generalArchitecture) || - other.generalArchitecture == generalArchitecture) && - (identical(other.generalFileType, generalFileType) || - other.generalFileType == generalFileType) && - (identical(other.generalParameterCount, generalParameterCount) || - other.generalParameterCount == generalParameterCount) && - (identical(other.generalQuantizationVersion, - generalQuantizationVersion) || - other.generalQuantizationVersion == - generalQuantizationVersion)); + other is _$ModelsResponseImpl && + const DeepCollectionEquality().equals(other._models, _models)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, generalArchitecture, - generalFileType, generalParameterCount, generalQuantizationVersion); + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => - __$$ModelInformationImplCopyWithImpl<_$ModelInformationImpl>( + _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => + __$$ModelsResponseImplCopyWithImpl<_$ModelsResponseImpl>( this, _$identity); @override Map toJson() { - return _$$ModelInformationImplToJson( + return _$$ModelsResponseImplToJson( this, ); } } -abstract class _ModelInformation extends ModelInformation { - const factory _ModelInformation( - {@JsonKey(name: 'general.architecture', includeIfNull: false) - final String? generalArchitecture, - @JsonKey(name: 'general.file_type', includeIfNull: false) - final int? generalFileType, - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - final int? generalParameterCount, - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - final int? generalQuantizationVersion}) = _$ModelInformationImpl; - const _ModelInformation._() : super._(); - - factory _ModelInformation.fromJson(Map json) = - _$ModelInformationImpl.fromJson; - - @override - - /// The architecture of the model. - @JsonKey(name: 'general.architecture', includeIfNull: false) - String? get generalArchitecture; - @override +abstract class _ModelsResponse extends ModelsResponse { + const factory _ModelsResponse( + {@JsonKey(includeIfNull: false) final List? models}) = + _$ModelsResponseImpl; + const _ModelsResponse._() : super._(); - /// The file type of the model. - @JsonKey(name: 'general.file_type', includeIfNull: false) - int? get generalFileType; - @override + factory _ModelsResponse.fromJson(Map json) = + _$ModelsResponseImpl.fromJson; - /// The number of parameters in the model. - @JsonKey(name: 'general.parameter_count', includeIfNull: false) - int? get generalParameterCount; @override - /// The number of parameters in the model. - @JsonKey(name: 'general.quantization_version', includeIfNull: false) - int? get generalQuantizationVersion; + /// List of models available locally. + @JsonKey(includeIfNull: false) + List? get models; @override @JsonKey(ignore: true) - _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => + _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => throw _privateConstructorUsedError; } -ProcessResponse _$ProcessResponseFromJson(Map json) { - return _ProcessResponse.fromJson(json); +Model _$ModelFromJson(Map json) { + return _Model.fromJson(json); } /// @nodoc -mixin _$ProcessResponse { - /// List of running models. +mixin _$Model { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// Model modification date. + @JsonKey(name: 'modified_at', includeIfNull: false) + String? get modifiedAt => throw _privateConstructorUsedError; + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size => throw _privateConstructorUsedError; + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest => throw _privateConstructorUsedError; + + /// Details about a model. @JsonKey(includeIfNull: false) - List? get models => throw _privateConstructorUsedError; + ModelDetails? get details => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ProcessResponseCopyWith get copyWith => - throw _privateConstructorUsedError; + $ModelCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ProcessResponseCopyWith<$Res> { - factory $ProcessResponseCopyWith( - ProcessResponse value, $Res Function(ProcessResponse) then) = - _$ProcessResponseCopyWithImpl<$Res, ProcessResponse>; +abstract class $ModelCopyWith<$Res> { + factory $ModelCopyWith(Model value, $Res Function(Model) then) = + _$ModelCopyWithImpl<$Res, Model>; @useResult - $Res call({@JsonKey(includeIfNull: false) List? models}); + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details}); + + $ModelDetailsCopyWith<$Res>? get details; } /// @nodoc -class _$ProcessResponseCopyWithImpl<$Res, $Val extends ProcessResponse> - implements $ProcessResponseCopyWith<$Res> { - _$ProcessResponseCopyWithImpl(this._value, this._then); +class _$ModelCopyWithImpl<$Res, $Val extends Model> + implements $ModelCopyWith<$Res> { + _$ModelCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - @pragma('vm:prefer-inline') + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? modifiedAt = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + }) { + return _then(_value.copyWith( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + modifiedAt: freezed == modifiedAt + ? _value.modifiedAt + : modifiedAt // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + ) as $Val); + } + @override - $Res call({ - Object? models = freezed, - }) { - return _then(_value.copyWith( - models: freezed == models - ? _value.models - : models // ignore: cast_nullable_to_non_nullable - as List?, - ) as $Val); + @pragma('vm:prefer-inline') + $ModelDetailsCopyWith<$Res>? get details { + if (_value.details == null) { + return null; + } + + return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { + return _then(_value.copyWith(details: value) as $Val); + }); } } /// @nodoc -abstract class _$$ProcessResponseImplCopyWith<$Res> - implements $ProcessResponseCopyWith<$Res> { - factory _$$ProcessResponseImplCopyWith(_$ProcessResponseImpl value, - $Res Function(_$ProcessResponseImpl) then) = - __$$ProcessResponseImplCopyWithImpl<$Res>; +abstract class _$$ModelImplCopyWith<$Res> implements $ModelCopyWith<$Res> { + factory _$$ModelImplCopyWith( + _$ModelImpl value, $Res Function(_$ModelImpl) then) = + __$$ModelImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(includeIfNull: false) List? models}); + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details}); + + @override + $ModelDetailsCopyWith<$Res>? get details; } /// @nodoc -class __$$ProcessResponseImplCopyWithImpl<$Res> - extends _$ProcessResponseCopyWithImpl<$Res, _$ProcessResponseImpl> - implements _$$ProcessResponseImplCopyWith<$Res> { - __$$ProcessResponseImplCopyWithImpl( - _$ProcessResponseImpl _value, $Res Function(_$ProcessResponseImpl) _then) +class __$$ModelImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelImpl> + implements _$$ModelImplCopyWith<$Res> { + __$$ModelImplCopyWithImpl( + _$ModelImpl _value, $Res Function(_$ModelImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? models = freezed, + Object? model = freezed, + Object? modifiedAt = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, }) { - return _then(_$ProcessResponseImpl( - models: freezed == models - ? _value._models - : models // ignore: cast_nullable_to_non_nullable - as List?, + return _then(_$ModelImpl( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + modifiedAt: freezed == modifiedAt + ? _value.modifiedAt + : modifiedAt // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, )); } } /// @nodoc @JsonSerializable() -class _$ProcessResponseImpl extends _ProcessResponse { - const _$ProcessResponseImpl( - {@JsonKey(includeIfNull: false) final List? models}) - : _models = models, - super._(); +class _$ModelImpl extends _Model { + const _$ModelImpl( + {@JsonKey(includeIfNull: false) this.model, + @JsonKey(name: 'modified_at', includeIfNull: false) this.modifiedAt, + @JsonKey(includeIfNull: false) this.size, + @JsonKey(includeIfNull: false) this.digest, + @JsonKey(includeIfNull: false) this.details}) + : super._(); + + factory _$ModelImpl.fromJson(Map json) => + _$$ModelImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// Model modification date. + @override + @JsonKey(name: 'modified_at', includeIfNull: false) + final String? modifiedAt; - factory _$ProcessResponseImpl.fromJson(Map json) => - _$$ProcessResponseImplFromJson(json); + /// Size of the model on disk. + @override + @JsonKey(includeIfNull: false) + final int? size; - /// List of running models. - final List? _models; + /// The model's digest. + @override + @JsonKey(includeIfNull: false) + final String? digest; - /// List of running models. + /// Details about a model. @override @JsonKey(includeIfNull: false) - List? get models { - final value = _models; - if (value == null) return null; - if (_models is EqualUnmodifiableListView) return _models; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + final ModelDetails? details; @override String toString() { - return 'ProcessResponse(models: $models)'; + return 'Model(model: $model, modifiedAt: $modifiedAt, size: $size, digest: $digest, details: $details)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ProcessResponseImpl && - const DeepCollectionEquality().equals(other._models, _models)); + other is _$ModelImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.modifiedAt, modifiedAt) || + other.modifiedAt == modifiedAt) && + (identical(other.size, size) || other.size == size) && + (identical(other.digest, digest) || other.digest == digest) && + (identical(other.details, details) || other.details == details)); } @JsonKey(ignore: true) @override int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); + Object.hash(runtimeType, model, modifiedAt, size, digest, details); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => - __$$ProcessResponseImplCopyWithImpl<_$ProcessResponseImpl>( - this, _$identity); + _$$ModelImplCopyWith<_$ModelImpl> get copyWith => + __$$ModelImplCopyWithImpl<_$ModelImpl>(this, _$identity); @override Map toJson() { - return _$$ProcessResponseImplToJson( + return _$$ModelImplToJson( this, ); } } -abstract class _ProcessResponse extends ProcessResponse { - const factory _ProcessResponse( - {@JsonKey(includeIfNull: false) final List? models}) = - _$ProcessResponseImpl; - const _ProcessResponse._() : super._(); +abstract class _Model extends Model { + const factory _Model( + {@JsonKey(includeIfNull: false) final String? model, + @JsonKey(name: 'modified_at', includeIfNull: false) + final String? modifiedAt, + @JsonKey(includeIfNull: false) final int? size, + @JsonKey(includeIfNull: false) final String? digest, + @JsonKey(includeIfNull: false) final ModelDetails? details}) = + _$ModelImpl; + const _Model._() : super._(); + + factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model; + @override - factory _ProcessResponse.fromJson(Map json) = - _$ProcessResponseImpl.fromJson; + /// Model modification date. + @JsonKey(name: 'modified_at', includeIfNull: false) + String? get modifiedAt; + @override + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size; + @override + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest; @override - /// List of running models. + /// Details about a model. @JsonKey(includeIfNull: false) - List? get models; + ModelDetails? get details; @override @JsonKey(ignore: true) - _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => + _$$ModelImplCopyWith<_$ModelImpl> get copyWith => throw _privateConstructorUsedError; } -ProcessModel _$ProcessModelFromJson(Map json) { - return _ProcessModel.fromJson(json); +ModelDetails _$ModelDetailsFromJson(Map json) { + return _ModelDetails.fromJson(json); } /// @nodoc -mixin _$ProcessModel { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; +mixin _$ModelDetails { + /// The parent model of the model. + @JsonKey(name: 'parent_model', includeIfNull: false) + String? get parentModel => throw _privateConstructorUsedError; - /// Size of the model on disk. + /// The format of the model. @JsonKey(includeIfNull: false) - int? get size => throw _privateConstructorUsedError; + String? get format => throw _privateConstructorUsedError; - /// The model's digest. + /// The family of the model. @JsonKey(includeIfNull: false) - String? get digest => throw _privateConstructorUsedError; + String? get family => throw _privateConstructorUsedError; - /// Details about a model. + /// The families of the model. @JsonKey(includeIfNull: false) - ModelDetails? get details => throw _privateConstructorUsedError; + List? get families => throw _privateConstructorUsedError; - /// No Description - @JsonKey(name: 'expires_at', includeIfNull: false) - String? get expiresAt => throw _privateConstructorUsedError; + /// The size of the model's parameters. + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? get parameterSize => throw _privateConstructorUsedError; - /// Size of the model on disk. - @JsonKey(name: 'size_vram', includeIfNull: false) - int? get sizeVram => throw _privateConstructorUsedError; + /// The quantization level of the model. + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? get quantizationLevel => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ProcessModelCopyWith get copyWith => + $ModelDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ProcessModelCopyWith<$Res> { - factory $ProcessModelCopyWith( - ProcessModel value, $Res Function(ProcessModel) then) = - _$ProcessModelCopyWithImpl<$Res, ProcessModel>; +abstract class $ModelDetailsCopyWith<$Res> { + factory $ModelDetailsCopyWith( + ModelDetails value, $Res Function(ModelDetails) then) = + _$ModelDetailsCopyWithImpl<$Res, ModelDetails>; @useResult $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(includeIfNull: false) int? size, - @JsonKey(includeIfNull: false) String? digest, - @JsonKey(includeIfNull: false) ModelDetails? details, - @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, - @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); - - $ModelDetailsCopyWith<$Res>? get details; + {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, + @JsonKey(includeIfNull: false) String? format, + @JsonKey(includeIfNull: false) String? family, + @JsonKey(includeIfNull: false) List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? quantizationLevel}); } /// @nodoc -class _$ProcessModelCopyWithImpl<$Res, $Val extends ProcessModel> - implements $ProcessModelCopyWith<$Res> { - _$ProcessModelCopyWithImpl(this._value, this._then); +class _$ModelDetailsCopyWithImpl<$Res, $Val extends ModelDetails> + implements $ModelDetailsCopyWith<$Res> { + _$ModelDetailsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -6423,257 +4601,259 @@ class _$ProcessModelCopyWithImpl<$Res, $Val extends ProcessModel> @pragma('vm:prefer-inline') @override $Res call({ - Object? model = freezed, - Object? size = freezed, - Object? digest = freezed, - Object? details = freezed, - Object? expiresAt = freezed, - Object? sizeVram = freezed, + Object? parentModel = freezed, + Object? format = freezed, + Object? family = freezed, + Object? families = freezed, + Object? parameterSize = freezed, + Object? quantizationLevel = freezed, }) { return _then(_value.copyWith( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable + parentModel: freezed == parentModel + ? _value.parentModel + : parentModel // ignore: cast_nullable_to_non_nullable as String?, - size: freezed == size - ? _value.size - : size // ignore: cast_nullable_to_non_nullable - as int?, - digest: freezed == digest - ? _value.digest - : digest // ignore: cast_nullable_to_non_nullable + format: freezed == format + ? _value.format + : format // ignore: cast_nullable_to_non_nullable + as String?, + family: freezed == family + ? _value.family + : family // ignore: cast_nullable_to_non_nullable + as String?, + families: freezed == families + ? _value.families + : families // ignore: cast_nullable_to_non_nullable + as List?, + parameterSize: freezed == parameterSize + ? _value.parameterSize + : parameterSize // ignore: cast_nullable_to_non_nullable as String?, - details: freezed == details - ? _value.details - : details // ignore: cast_nullable_to_non_nullable - as ModelDetails?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable + quantizationLevel: freezed == quantizationLevel + ? _value.quantizationLevel + : quantizationLevel // ignore: cast_nullable_to_non_nullable as String?, - sizeVram: freezed == sizeVram - ? _value.sizeVram - : sizeVram // ignore: cast_nullable_to_non_nullable - as int?, ) as $Val); } - - @override - @pragma('vm:prefer-inline') - $ModelDetailsCopyWith<$Res>? get details { - if (_value.details == null) { - return null; - } - - return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { - return _then(_value.copyWith(details: value) as $Val); - }); - } } /// @nodoc -abstract class _$$ProcessModelImplCopyWith<$Res> - implements $ProcessModelCopyWith<$Res> { - factory _$$ProcessModelImplCopyWith( - _$ProcessModelImpl value, $Res Function(_$ProcessModelImpl) then) = - __$$ProcessModelImplCopyWithImpl<$Res>; +abstract class _$$ModelDetailsImplCopyWith<$Res> + implements $ModelDetailsCopyWith<$Res> { + factory _$$ModelDetailsImplCopyWith( + _$ModelDetailsImpl value, $Res Function(_$ModelDetailsImpl) then) = + __$$ModelDetailsImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(includeIfNull: false) int? size, - @JsonKey(includeIfNull: false) String? digest, - @JsonKey(includeIfNull: false) ModelDetails? details, - @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, - @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); - - @override - $ModelDetailsCopyWith<$Res>? get details; + {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, + @JsonKey(includeIfNull: false) String? format, + @JsonKey(includeIfNull: false) String? family, + @JsonKey(includeIfNull: false) List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? quantizationLevel}); } /// @nodoc -class __$$ProcessModelImplCopyWithImpl<$Res> - extends _$ProcessModelCopyWithImpl<$Res, _$ProcessModelImpl> - implements _$$ProcessModelImplCopyWith<$Res> { - __$$ProcessModelImplCopyWithImpl( - _$ProcessModelImpl _value, $Res Function(_$ProcessModelImpl) _then) +class __$$ModelDetailsImplCopyWithImpl<$Res> + extends _$ModelDetailsCopyWithImpl<$Res, _$ModelDetailsImpl> + implements _$$ModelDetailsImplCopyWith<$Res> { + __$$ModelDetailsImplCopyWithImpl( + _$ModelDetailsImpl _value, $Res Function(_$ModelDetailsImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? model = freezed, - Object? size = freezed, - Object? digest = freezed, - Object? details = freezed, - Object? expiresAt = freezed, - Object? sizeVram = freezed, + Object? parentModel = freezed, + Object? format = freezed, + Object? family = freezed, + Object? families = freezed, + Object? parameterSize = freezed, + Object? quantizationLevel = freezed, }) { - return _then(_$ProcessModelImpl( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable + return _then(_$ModelDetailsImpl( + parentModel: freezed == parentModel + ? _value.parentModel + : parentModel // ignore: cast_nullable_to_non_nullable as String?, - size: freezed == size - ? _value.size - : size // ignore: cast_nullable_to_non_nullable - as int?, - digest: freezed == digest - ? _value.digest - : digest // ignore: cast_nullable_to_non_nullable + format: freezed == format + ? _value.format + : format // ignore: cast_nullable_to_non_nullable as String?, - details: freezed == details - ? _value.details - : details // ignore: cast_nullable_to_non_nullable - as ModelDetails?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable + family: freezed == family + ? _value.family + : family // ignore: cast_nullable_to_non_nullable + as String?, + families: freezed == families + ? _value._families + : families // ignore: cast_nullable_to_non_nullable + as List?, + parameterSize: freezed == parameterSize + ? _value.parameterSize + : parameterSize // ignore: cast_nullable_to_non_nullable + as String?, + quantizationLevel: freezed == quantizationLevel + ? _value.quantizationLevel + : quantizationLevel // ignore: cast_nullable_to_non_nullable as String?, - sizeVram: freezed == sizeVram - ? _value.sizeVram - : sizeVram // ignore: cast_nullable_to_non_nullable - as int?, )); } } /// @nodoc @JsonSerializable() -class _$ProcessModelImpl extends _ProcessModel { - const _$ProcessModelImpl( - {@JsonKey(includeIfNull: false) this.model, - @JsonKey(includeIfNull: false) this.size, - @JsonKey(includeIfNull: false) this.digest, - @JsonKey(includeIfNull: false) this.details, - @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, - @JsonKey(name: 'size_vram', includeIfNull: false) this.sizeVram}) - : super._(); +class _$ModelDetailsImpl extends _ModelDetails { + const _$ModelDetailsImpl( + {@JsonKey(name: 'parent_model', includeIfNull: false) this.parentModel, + @JsonKey(includeIfNull: false) this.format, + @JsonKey(includeIfNull: false) this.family, + @JsonKey(includeIfNull: false) final List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) this.parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + this.quantizationLevel}) + : _families = families, + super._(); - factory _$ProcessModelImpl.fromJson(Map json) => - _$$ProcessModelImplFromJson(json); + factory _$ModelDetailsImpl.fromJson(Map json) => + _$$ModelDetailsImplFromJson(json); - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + /// The parent model of the model. @override - @JsonKey(includeIfNull: false) - final String? model; + @JsonKey(name: 'parent_model', includeIfNull: false) + final String? parentModel; - /// Size of the model on disk. + /// The format of the model. @override @JsonKey(includeIfNull: false) - final int? size; + final String? format; - /// The model's digest. + /// The family of the model. @override @JsonKey(includeIfNull: false) - final String? digest; + final String? family; - /// Details about a model. + /// The families of the model. + final List? _families; + + /// The families of the model. @override @JsonKey(includeIfNull: false) - final ModelDetails? details; + List? get families { + final value = _families; + if (value == null) return null; + if (_families is EqualUnmodifiableListView) return _families; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } - /// No Description + /// The size of the model's parameters. @override - @JsonKey(name: 'expires_at', includeIfNull: false) - final String? expiresAt; + @JsonKey(name: 'parameter_size', includeIfNull: false) + final String? parameterSize; - /// Size of the model on disk. + /// The quantization level of the model. @override - @JsonKey(name: 'size_vram', includeIfNull: false) - final int? sizeVram; + @JsonKey(name: 'quantization_level', includeIfNull: false) + final String? quantizationLevel; @override String toString() { - return 'ProcessModel(model: $model, size: $size, digest: $digest, details: $details, expiresAt: $expiresAt, sizeVram: $sizeVram)'; + return 'ModelDetails(parentModel: $parentModel, format: $format, family: $family, families: $families, parameterSize: $parameterSize, quantizationLevel: $quantizationLevel)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ProcessModelImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.size, size) || other.size == size) && - (identical(other.digest, digest) || other.digest == digest) && - (identical(other.details, details) || other.details == details) && - (identical(other.expiresAt, expiresAt) || - other.expiresAt == expiresAt) && - (identical(other.sizeVram, sizeVram) || - other.sizeVram == sizeVram)); + other is _$ModelDetailsImpl && + (identical(other.parentModel, parentModel) || + other.parentModel == parentModel) && + (identical(other.format, format) || other.format == format) && + (identical(other.family, family) || other.family == family) && + const DeepCollectionEquality().equals(other._families, _families) && + (identical(other.parameterSize, parameterSize) || + other.parameterSize == parameterSize) && + (identical(other.quantizationLevel, quantizationLevel) || + other.quantizationLevel == quantizationLevel)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, model, size, digest, details, expiresAt, sizeVram); + runtimeType, + parentModel, + format, + family, + const DeepCollectionEquality().hash(_families), + parameterSize, + quantizationLevel); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => - __$$ProcessModelImplCopyWithImpl<_$ProcessModelImpl>(this, _$identity); + _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => + __$$ModelDetailsImplCopyWithImpl<_$ModelDetailsImpl>(this, _$identity); @override Map toJson() { - return _$$ProcessModelImplToJson( + return _$$ModelDetailsImplToJson( this, ); } } -abstract class _ProcessModel extends ProcessModel { - const factory _ProcessModel( - {@JsonKey(includeIfNull: false) final String? model, - @JsonKey(includeIfNull: false) final int? size, - @JsonKey(includeIfNull: false) final String? digest, - @JsonKey(includeIfNull: false) final ModelDetails? details, - @JsonKey(name: 'expires_at', includeIfNull: false) - final String? expiresAt, - @JsonKey(name: 'size_vram', includeIfNull: false) - final int? sizeVram}) = _$ProcessModelImpl; - const _ProcessModel._() : super._(); +abstract class _ModelDetails extends ModelDetails { + const factory _ModelDetails( + {@JsonKey(name: 'parent_model', includeIfNull: false) + final String? parentModel, + @JsonKey(includeIfNull: false) final String? format, + @JsonKey(includeIfNull: false) final String? family, + @JsonKey(includeIfNull: false) final List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) + final String? parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + final String? quantizationLevel}) = _$ModelDetailsImpl; + const _ModelDetails._() : super._(); - factory _ProcessModel.fromJson(Map json) = - _$ProcessModelImpl.fromJson; + factory _ModelDetails.fromJson(Map json) = + _$ModelDetailsImpl.fromJson; @override - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model; + /// The parent model of the model. + @JsonKey(name: 'parent_model', includeIfNull: false) + String? get parentModel; @override - /// Size of the model on disk. + /// The format of the model. @JsonKey(includeIfNull: false) - int? get size; + String? get format; @override - /// The model's digest. + /// The family of the model. @JsonKey(includeIfNull: false) - String? get digest; + String? get family; @override - /// Details about a model. + /// The families of the model. @JsonKey(includeIfNull: false) - ModelDetails? get details; + List? get families; @override - /// No Description - @JsonKey(name: 'expires_at', includeIfNull: false) - String? get expiresAt; + /// The size of the model's parameters. + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? get parameterSize; @override - /// Size of the model on disk. - @JsonKey(name: 'size_vram', includeIfNull: false) - int? get sizeVram; + /// The quantization level of the model. + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? get quantizationLevel; @override @JsonKey(ignore: true) - _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => + _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -6856,10 +5036,6 @@ mixin _$ModelInfo { @JsonKey(includeIfNull: false) ModelDetails? get details => throw _privateConstructorUsedError; - /// Details about a model. - @JsonKey(name: 'model_info', includeIfNull: false) - ModelInformation? get modelInfo => throw _privateConstructorUsedError; - /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages => throw _privateConstructorUsedError; @@ -6882,12 +5058,9 @@ abstract class $ModelInfoCopyWith<$Res> { @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, - @JsonKey(name: 'model_info', includeIfNull: false) - ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); $ModelDetailsCopyWith<$Res>? get details; - $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -6909,7 +5082,6 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> Object? template = freezed, Object? system = freezed, Object? details = freezed, - Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_value.copyWith( @@ -6937,10 +5109,6 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, - modelInfo: freezed == modelInfo - ? _value.modelInfo - : modelInfo // ignore: cast_nullable_to_non_nullable - as ModelInformation?, messages: freezed == messages ? _value.messages : messages // ignore: cast_nullable_to_non_nullable @@ -6959,18 +5127,6 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> return _then(_value.copyWith(details: value) as $Val); }); } - - @override - @pragma('vm:prefer-inline') - $ModelInformationCopyWith<$Res>? get modelInfo { - if (_value.modelInfo == null) { - return null; - } - - return $ModelInformationCopyWith<$Res>(_value.modelInfo!, (value) { - return _then(_value.copyWith(modelInfo: value) as $Val); - }); - } } /// @nodoc @@ -6988,14 +5144,10 @@ abstract class _$$ModelInfoImplCopyWith<$Res> @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, - @JsonKey(name: 'model_info', includeIfNull: false) - ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); @override $ModelDetailsCopyWith<$Res>? get details; - @override - $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -7015,7 +5167,6 @@ class __$$ModelInfoImplCopyWithImpl<$Res> Object? template = freezed, Object? system = freezed, Object? details = freezed, - Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_$ModelInfoImpl( @@ -7043,10 +5194,6 @@ class __$$ModelInfoImplCopyWithImpl<$Res> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, - modelInfo: freezed == modelInfo - ? _value.modelInfo - : modelInfo // ignore: cast_nullable_to_non_nullable - as ModelInformation?, messages: freezed == messages ? _value._messages : messages // ignore: cast_nullable_to_non_nullable @@ -7065,7 +5212,6 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) this.template, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.details, - @JsonKey(name: 'model_info', includeIfNull: false) this.modelInfo, @JsonKey(includeIfNull: false) final List? messages}) : _messages = messages, super._(); @@ -7103,11 +5249,6 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) final ModelDetails? details; - /// Details about a model. - @override - @JsonKey(name: 'model_info', includeIfNull: false) - final ModelInformation? modelInfo; - /// The default messages for the model. final List? _messages; @@ -7124,7 +5265,7 @@ class _$ModelInfoImpl extends _ModelInfo { @override String toString() { - return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, modelInfo: $modelInfo, messages: $messages)'; + return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, messages: $messages)'; } @override @@ -7141,8 +5282,6 @@ class _$ModelInfoImpl extends _ModelInfo { other.template == template) && (identical(other.system, system) || other.system == system) && (identical(other.details, details) || other.details == details) && - (identical(other.modelInfo, modelInfo) || - other.modelInfo == modelInfo) && const DeepCollectionEquality().equals(other._messages, _messages)); } @@ -7156,7 +5295,6 @@ class _$ModelInfoImpl extends _ModelInfo { template, system, details, - modelInfo, const DeepCollectionEquality().hash(_messages)); @JsonKey(ignore: true) @@ -7181,8 +5319,6 @@ abstract class _ModelInfo extends ModelInfo { @JsonKey(includeIfNull: false) final String? template, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final ModelDetails? details, - @JsonKey(name: 'model_info', includeIfNull: false) - final ModelInformation? modelInfo, @JsonKey(includeIfNull: false) final List? messages}) = _$ModelInfoImpl; const _ModelInfo._() : super._(); @@ -7222,11 +5358,6 @@ abstract class _ModelInfo extends ModelInfo { ModelDetails? get details; @override - /// Details about a model. - @JsonKey(name: 'model_info', includeIfNull: false) - ModelInformation? get modelInfo; - @override - /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages; @@ -8360,8 +6491,9 @@ PushModelResponse _$PushModelResponseFromJson(Map json) { /// @nodoc mixin _$PushModelResponse { /// Status pushing the model. - @JsonKey(includeIfNull: false) - String? get status => throw _privateConstructorUsedError; + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + PushModelStatus? get status => throw _privateConstructorUsedError; /// the model's digest @JsonKey(includeIfNull: false) @@ -8388,7 +6520,10 @@ abstract class $PushModelResponseCopyWith<$Res> { _$PushModelResponseCopyWithImpl<$Res, PushModelResponse>; @useResult $Res call( - {@JsonKey(includeIfNull: false) String? status, + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + PushModelStatus? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -8416,7 +6551,7 @@ class _$PushModelResponseCopyWithImpl<$Res, $Val extends PushModelResponse> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as String?, + as PushModelStatus?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -8442,7 +6577,10 @@ abstract class _$$PushModelResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) String? status, + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + PushModelStatus? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -8468,7 +6606,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as String?, + as PushModelStatus?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -8489,7 +6627,10 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> @JsonSerializable() class _$PushModelResponseImpl extends _PushModelResponse { const _$PushModelResponseImpl( - {@JsonKey(includeIfNull: false) this.status, + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.status, @JsonKey(includeIfNull: false) this.digest, @JsonKey(includeIfNull: false) this.total, @JsonKey(includeIfNull: false) this.completed}) @@ -8500,8 +6641,9 @@ class _$PushModelResponseImpl extends _PushModelResponse { /// Status pushing the model. @override - @JsonKey(includeIfNull: false) - final String? status; + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final PushModelStatus? status; /// the model's digest @override @@ -8557,7 +6699,10 @@ class _$PushModelResponseImpl extends _PushModelResponse { abstract class _PushModelResponse extends PushModelResponse { const factory _PushModelResponse( - {@JsonKey(includeIfNull: false) final String? status, + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final PushModelStatus? status, @JsonKey(includeIfNull: false) final String? digest, @JsonKey(includeIfNull: false) final int? total, @JsonKey(includeIfNull: false) final int? completed}) = @@ -8570,8 +6715,9 @@ abstract class _PushModelResponse extends PushModelResponse { @override /// Status pushing the model. - @JsonKey(includeIfNull: false) - String? get status; + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + PushModelStatus? get status; @override /// the model's digest diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index 473e7825..f5548646 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -13,7 +13,6 @@ _$GenerateCompletionRequestImpl _$$GenerateCompletionRequestImplFromJson( _$GenerateCompletionRequestImpl( model: json['model'] as String, prompt: json['prompt'] as String, - suffix: json['suffix'] as String?, images: (json['images'] as List?)?.map((e) => e as String).toList(), system: json['system'] as String?, @@ -43,7 +42,6 @@ Map _$$GenerateCompletionRequestImplToJson( } } - writeNotNull('suffix', instance.suffix); writeNotNull('images', instance.images); writeNotNull('system', instance.system); writeNotNull('template', instance.template); @@ -67,7 +65,6 @@ _$RequestOptionsImpl _$$RequestOptionsImplFromJson(Map json) => numPredict: json['num_predict'] as int?, topK: json['top_k'] as int?, topP: (json['top_p'] as num?)?.toDouble(), - minP: (json['min_p'] as num?)?.toDouble(), tfsZ: (json['tfs_z'] as num?)?.toDouble(), typicalP: (json['typical_p'] as num?)?.toDouble(), repeatLastN: json['repeat_last_n'] as int?, @@ -109,7 +106,6 @@ Map _$$RequestOptionsImplToJson( writeNotNull('num_predict', instance.numPredict); writeNotNull('top_k', instance.topK); writeNotNull('top_p', instance.topP); - writeNotNull('min_p', instance.minP); writeNotNull('tfs_z', instance.tfsZ); writeNotNull('typical_p', instance.typicalP); writeNotNull('repeat_last_n', instance.repeatLastN); @@ -137,26 +133,6 @@ Map _$$RequestOptionsImplToJson( return val; } -_$VersionResponseImpl _$$VersionResponseImplFromJson( - Map json) => - _$VersionResponseImpl( - version: json['version'] as String?, - ); - -Map _$$VersionResponseImplToJson( - _$VersionResponseImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('version', instance.version); - return val; -} - _$GenerateCompletionResponseImpl _$$GenerateCompletionResponseImplFromJson( Map json) => _$GenerateCompletionResponseImpl( @@ -213,9 +189,6 @@ _$GenerateChatCompletionRequestImpl json['options'] as Map), stream: json['stream'] as bool? ?? false, keepAlive: json['keep_alive'] as int?, - tools: (json['tools'] as List?) - ?.map((e) => Tool.fromJson(e as Map)) - .toList(), ); Map _$$GenerateChatCompletionRequestImplToJson( @@ -235,17 +208,18 @@ Map _$$GenerateChatCompletionRequestImplToJson( writeNotNull('options', instance.options?.toJson()); val['stream'] = instance.stream; writeNotNull('keep_alive', instance.keepAlive); - writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); return val; } _$GenerateChatCompletionResponseImpl _$$GenerateChatCompletionResponseImplFromJson(Map json) => _$GenerateChatCompletionResponseImpl( - message: Message.fromJson(json['message'] as Map), - model: json['model'] as String, - createdAt: json['created_at'] as String, - done: json['done'] as bool, + message: json['message'] == null + ? null + : Message.fromJson(json['message'] as Map), + model: json['model'] as String?, + createdAt: json['created_at'] as String?, + done: json['done'] as bool?, doneReason: $enumDecodeNullable( _$DoneReasonEnumMap, json['done_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -259,12 +233,7 @@ _$GenerateChatCompletionResponseImpl Map _$$GenerateChatCompletionResponseImplToJson( _$GenerateChatCompletionResponseImpl instance) { - final val = { - 'message': instance.message.toJson(), - 'model': instance.model, - 'created_at': instance.createdAt, - 'done': instance.done, - }; + final val = {}; void writeNotNull(String key, dynamic value) { if (value != null) { @@ -272,6 +241,10 @@ Map _$$GenerateChatCompletionResponseImplToJson( } } + writeNotNull('message', instance.message?.toJson()); + writeNotNull('model', instance.model); + writeNotNull('created_at', instance.createdAt); + writeNotNull('done', instance.done); writeNotNull('done_reason', _$DoneReasonEnumMap[instance.doneReason]); writeNotNull('total_duration', instance.totalDuration); writeNotNull('load_duration', instance.loadDuration); @@ -294,9 +267,6 @@ _$MessageImpl _$$MessageImplFromJson(Map json) => content: json['content'] as String, images: (json['images'] as List?)?.map((e) => e as String).toList(), - toolCalls: (json['tool_calls'] as List?) - ?.map((e) => ToolCall.fromJson(e as Map)) - .toList(), ); Map _$$MessageImplToJson(_$MessageImpl instance) { @@ -312,8 +282,6 @@ Map _$$MessageImplToJson(_$MessageImpl instance) { } writeNotNull('images', instance.images); - writeNotNull( - 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); return val; } @@ -321,84 +289,8 @@ const _$MessageRoleEnumMap = { MessageRole.system: 'system', MessageRole.user: 'user', MessageRole.assistant: 'assistant', - MessageRole.tool: 'tool', }; -_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( - type: $enumDecodeNullable(_$ToolTypeEnumMap, json['type']) ?? - ToolType.function, - function: json['function'] == null - ? null - : ToolFunction.fromJson(json['function'] as Map), - ); - -Map _$$ToolImplToJson(_$ToolImpl instance) { - final val = { - 'type': _$ToolTypeEnumMap[instance.type]!, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('function', instance.function?.toJson()); - return val; -} - -const _$ToolTypeEnumMap = { - ToolType.function: 'function', -}; - -_$ToolFunctionImpl _$$ToolFunctionImplFromJson(Map json) => - _$ToolFunctionImpl( - name: json['name'] as String, - description: json['description'] as String, - parameters: json['parameters'] as Map, - ); - -Map _$$ToolFunctionImplToJson(_$ToolFunctionImpl instance) => - { - 'name': instance.name, - 'description': instance.description, - 'parameters': instance.parameters, - }; - -_$ToolCallImpl _$$ToolCallImplFromJson(Map json) => - _$ToolCallImpl( - function: json['function'] == null - ? null - : ToolCallFunction.fromJson(json['function'] as Map), - ); - -Map _$$ToolCallImplToJson(_$ToolCallImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('function', instance.function?.toJson()); - return val; -} - -_$ToolCallFunctionImpl _$$ToolCallFunctionImplFromJson( - Map json) => - _$ToolCallFunctionImpl( - name: json['name'] as String, - arguments: json['arguments'] as Map, - ); - -Map _$$ToolCallFunctionImplToJson( - _$ToolCallFunctionImpl instance) => - { - 'name': instance.name, - 'arguments': instance.arguments, - }; - _$GenerateEmbeddingRequestImpl _$$GenerateEmbeddingRequestImplFromJson( Map json) => _$GenerateEmbeddingRequestImpl( @@ -584,85 +476,6 @@ Map _$$ModelDetailsImplToJson(_$ModelDetailsImpl instance) { return val; } -_$ModelInformationImpl _$$ModelInformationImplFromJson( - Map json) => - _$ModelInformationImpl( - generalArchitecture: json['general.architecture'] as String?, - generalFileType: json['general.file_type'] as int?, - generalParameterCount: json['general.parameter_count'] as int?, - generalQuantizationVersion: json['general.quantization_version'] as int?, - ); - -Map _$$ModelInformationImplToJson( - _$ModelInformationImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('general.architecture', instance.generalArchitecture); - writeNotNull('general.file_type', instance.generalFileType); - writeNotNull('general.parameter_count', instance.generalParameterCount); - writeNotNull( - 'general.quantization_version', instance.generalQuantizationVersion); - return val; -} - -_$ProcessResponseImpl _$$ProcessResponseImplFromJson( - Map json) => - _$ProcessResponseImpl( - models: (json['models'] as List?) - ?.map((e) => ProcessModel.fromJson(e as Map)) - .toList(), - ); - -Map _$$ProcessResponseImplToJson( - _$ProcessResponseImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('models', instance.models?.map((e) => e.toJson()).toList()); - return val; -} - -_$ProcessModelImpl _$$ProcessModelImplFromJson(Map json) => - _$ProcessModelImpl( - model: json['model'] as String?, - size: json['size'] as int?, - digest: json['digest'] as String?, - details: json['details'] == null - ? null - : ModelDetails.fromJson(json['details'] as Map), - expiresAt: json['expires_at'] as String?, - sizeVram: json['size_vram'] as int?, - ); - -Map _$$ProcessModelImplToJson(_$ProcessModelImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('model', instance.model); - writeNotNull('size', instance.size); - writeNotNull('digest', instance.digest); - writeNotNull('details', instance.details?.toJson()); - writeNotNull('expires_at', instance.expiresAt); - writeNotNull('size_vram', instance.sizeVram); - return val; -} - _$ModelInfoRequestImpl _$$ModelInfoRequestImplFromJson( Map json) => _$ModelInfoRequestImpl( @@ -685,10 +498,6 @@ _$ModelInfoImpl _$$ModelInfoImplFromJson(Map json) => details: json['details'] == null ? null : ModelDetails.fromJson(json['details'] as Map), - modelInfo: json['model_info'] == null - ? null - : ModelInformation.fromJson( - json['model_info'] as Map), messages: (json['messages'] as List?) ?.map((e) => Message.fromJson(e as Map)) .toList(), @@ -709,7 +518,6 @@ Map _$$ModelInfoImplToJson(_$ModelInfoImpl instance) { writeNotNull('template', instance.template); writeNotNull('system', instance.system); writeNotNull('details', instance.details?.toJson()); - writeNotNull('model_info', instance.modelInfo?.toJson()); writeNotNull('messages', instance.messages?.map((e) => e.toJson()).toList()); return val; } @@ -837,7 +645,8 @@ Map _$$PushModelRequestImplToJson( _$PushModelResponseImpl _$$PushModelResponseImplFromJson( Map json) => _$PushModelResponseImpl( - status: json['status'] as String?, + status: $enumDecodeNullable(_$PushModelStatusEnumMap, json['status'], + unknownValue: JsonKey.nullForUndefinedEnumValue), digest: json['digest'] as String?, total: json['total'] as int?, completed: json['completed'] as int?, @@ -853,9 +662,16 @@ Map _$$PushModelResponseImplToJson( } } - writeNotNull('status', instance.status); + writeNotNull('status', _$PushModelStatusEnumMap[instance.status]); writeNotNull('digest', instance.digest); writeNotNull('total', instance.total); writeNotNull('completed', instance.completed); return val; } + +const _$PushModelStatusEnumMap = { + PushModelStatus.retrievingManifest: 'retrieving manifest', + PushModelStatus.startingUpload: 'starting upload', + PushModelStatus.pushingManifest: 'pushing manifest', + PushModelStatus.success: 'success', +}; diff --git a/packages/ollama_dart/lib/src/generated/schema/tool.dart b/packages/ollama_dart/lib/src/generated/schema/tool.dart deleted file mode 100644 index 4a225d1a..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/tool.dart +++ /dev/null @@ -1,53 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: Tool -// ========================================== - -/// A tool the model may call. -@freezed -class Tool with _$Tool { - const Tool._(); - - /// Factory constructor for Tool - const factory Tool({ - /// The type of tool. - @Default(ToolType.function) ToolType type, - - /// A function that the model may call. - @JsonKey(includeIfNull: false) ToolFunction? function, - }) = _Tool; - - /// Object construction from a JSON representation - factory Tool.fromJson(Map json) => _$ToolFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type', 'function']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - 'function': function, - }; - } -} - -// ========================================== -// ENUM: ToolType -// ========================================== - -/// The type of tool. -enum ToolType { - @JsonValue('function') - function, -} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart deleted file mode 100644 index ec1d82e0..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/tool_call.dart +++ /dev/null @@ -1,40 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: ToolCall -// ========================================== - -/// The tool the model wants to call. -@freezed -class ToolCall with _$ToolCall { - const ToolCall._(); - - /// Factory constructor for ToolCall - const factory ToolCall({ - /// The function the model wants to call. - @JsonKey(includeIfNull: false) ToolCallFunction? function, - }) = _ToolCall; - - /// Object construction from a JSON representation - factory ToolCall.fromJson(Map json) => - _$ToolCallFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['function']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'function': function, - }; - } -} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart deleted file mode 100644 index 4d5e969c..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart +++ /dev/null @@ -1,44 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: ToolCallFunction -// ========================================== - -/// The function the model wants to call. -@freezed -class ToolCallFunction with _$ToolCallFunction { - const ToolCallFunction._(); - - /// Factory constructor for ToolCallFunction - const factory ToolCallFunction({ - /// The name of the function to be called. - required String name, - - /// The arguments to pass to the function. - required ToolCallFunctionArgs arguments, - }) = _ToolCallFunction; - - /// Object construction from a JSON representation - factory ToolCallFunction.fromJson(Map json) => - _$ToolCallFunctionFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['name', 'arguments']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'name': name, - 'arguments': arguments, - }; - } -} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart deleted file mode 100644 index a1d7d7b8..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart +++ /dev/null @@ -1,12 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// TYPE: ToolCallFunctionArgs -// ========================================== - -/// The arguments to pass to the function. -typedef ToolCallFunctionArgs = Map; diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart deleted file mode 100644 index 35d5e8f1..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/tool_function.dart +++ /dev/null @@ -1,52 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: ToolFunction -// ========================================== - -/// A function that the model may call. -@freezed -class ToolFunction with _$ToolFunction { - const ToolFunction._(); - - /// Factory constructor for ToolFunction - const factory ToolFunction({ - /// The name of the function to be called. - required String name, - - /// A description of what the function does, used by the model to choose when and how to call the function. - required String description, - - /// The parameters the functions accepts, described as a JSON Schema object. - required ToolFunctionParams parameters, - }) = _ToolFunction; - - /// Object construction from a JSON representation - factory ToolFunction.fromJson(Map json) => - _$ToolFunctionFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'name', - 'description', - 'parameters' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'name': name, - 'description': description, - 'parameters': parameters, - }; - } -} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart deleted file mode 100644 index 89fa74fb..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart +++ /dev/null @@ -1,12 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// TYPE: ToolFunctionParams -// ========================================== - -/// The parameters the functions accepts, described as a JSON Schema object. -typedef ToolFunctionParams = Map; diff --git a/packages/ollama_dart/lib/src/generated/schema/version_response.dart b/packages/ollama_dart/lib/src/generated/schema/version_response.dart deleted file mode 100644 index 21d3259e..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/version_response.dart +++ /dev/null @@ -1,40 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// CLASS: VersionResponse -// ========================================== - -/// The response class for the version endpoint. -@freezed -class VersionResponse with _$VersionResponse { - const VersionResponse._(); - - /// Factory constructor for VersionResponse - const factory VersionResponse({ - /// The version of the Ollama server. - @JsonKey(includeIfNull: false) String? version, - }) = _VersionResponse; - - /// Object construction from a JSON representation - factory VersionResponse.fromJson(Map json) => - _$VersionResponseFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['version']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'version': version, - }; - } -} diff --git a/packages/ollama_dart/lib/src/http_client/http_client.dart b/packages/ollama_dart/lib/src/http_client/http_client.dart index 0ad0b2fc..99555ca4 100644 --- a/packages/ollama_dart/lib/src/http_client/http_client.dart +++ b/packages/ollama_dart/lib/src/http_client/http_client.dart @@ -1,3 +1,4 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js_interop) 'http_client_html.dart'; + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 05b3f593..b63d0c21 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -20,18 +20,6 @@ tags: description: List and describe the various models available. paths: - /version: - get: - operationId: getVersion - summary: Returns the version of the Ollama server. - description: This endpoint returns the version of the Ollama server. - responses: - '200': - description: Successful operation. - content: - application/json: - schema: - $ref: '#/components/schemas/VersionResponse' /generate: post: operationId: generateCompletion @@ -121,19 +109,6 @@ paths: application/json: schema: $ref: '#/components/schemas/ModelsResponse' - /ps: - get: - operationId: listRunningModels - tags: - - Models - summary: List models that are running. - responses: - '200': - description: Successful operation. - content: - application/json: - schema: - $ref: '#/components/schemas/ProcessResponse' /show: post: operationId: showModelInfo @@ -196,7 +171,7 @@ paths: '200': description: Successful operation. content: - application/x-ndjson: + application/json: schema: $ref: '#/components/schemas/PullModelResponse' /push: @@ -215,7 +190,7 @@ paths: '200': description: Successful operation. content: - application/x-ndjson: + application/json: schema: $ref: '#/components/schemas/PushModelResponse' /blobs/{digest}: @@ -223,11 +198,10 @@ paths: operationId: checkBlob tags: - Models - summary: Ensures that the file blob used for a FROM or ADAPTER field exists on the server. - description: This is checking your Ollama server and not Ollama.ai. + summary: Check to see if a blob exists on the Ollama server which is useful when creating models. parameters: - - in: path - name: digest + - in: query + name: name schema: type: string required: true @@ -244,8 +218,8 @@ paths: - Models summary: Create a blob from a file. Returns the server file path. parameters: - - in: path - name: digest + - in: query + name: name schema: type: string required: true @@ -273,14 +247,11 @@ components: The model name. Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - example: llama3.2 + example: llama3:8b prompt: type: string description: The prompt to generate a response. example: Why is the sky blue? - suffix: - type: string - description: The text that comes after the inserted text. images: type: array description: (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @@ -315,10 +286,10 @@ components: description: &stream | If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. default: false - keep_alive: &keep_alive + keep_alive: type: integer nullable: true - description: | + description: &keep_alive | How long (in minutes) to keep the model loaded in memory. - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. @@ -341,115 +312,90 @@ components: type: integer nullable: true description: | - Sets the random number seed to use for generation. Setting this to a specific number will make the model - generate the same text for the same prompt. (Default: 0) + Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) num_predict: type: integer nullable: true description: | - Maximum number of tokens to predict when generating text. - (Default: 128, -1 = infinite generation, -2 = fill context) + Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) top_k: type: integer nullable: true description: | - Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, - while a lower value (e.g. 10) will be more conservative. (Default: 40) + Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: type: number format: float nullable: true description: | - Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value - (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) - min_p: - type: number - format: float - nullable: true - description: | - Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum - probability for a token to be considered, relative to the probability of the most likely token. For - example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less - than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) tfs_z: type: number format: float nullable: true description: | - Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value - (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) typical_p: type: number format: float nullable: true description: | - Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) + Typical p is used to reduce the impact of less probable tokens from the output. repeat_last_n: type: integer nullable: true description: | - Sets how far back for the model to look back to prevent repetition. - (Default: 64, 0 = disabled, -1 = num_ctx) + Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) temperature: type: number format: float nullable: true description: | - The temperature of the model. Increasing the temperature will make the model answer more creatively. - (Default: 0.8) + The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) repeat_penalty: type: number format: float nullable: true description: | - Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more - strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) presence_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on whether they appear in the text so far, increasing the - model's likelihood to talk about new topics. (Default: 0) + Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. frequency_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the - model's likelihood to repeat the same line verbatim. (Default: 0) + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. mirostat: type: integer nullable: true description: | - Enable Mirostat sampling for controlling perplexity. - (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) mirostat_tau: type: number format: float nullable: true description: | - Controls the balance between coherence and diversity of the output. A lower value will result in more - focused and coherent text. (Default: 5.0) + Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) mirostat_eta: type: number format: float nullable: true description: | - Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate - will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. - (Default: 0.1) + Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) penalize_newline: type: boolean nullable: true description: | - Penalize newlines in the output. (Default: true) + Penalize newlines in the output. (Default: false) stop: type: array nullable: true - description: | - Sequences where the API will stop generating further tokens. The returned text will not contain the stop - sequence. + description: Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. items: type: string numa: @@ -461,18 +407,17 @@ components: type: integer nullable: true description: | - Sets the size of the context window used to generate the next token. (Default: 2048) + Sets the size of the context window used to generate the next token. num_batch: type: integer nullable: true description: | - Sets the number of batches to use for generation. (Default: 512) + Sets the number of batches to use for generation. (Default: 1) num_gpu: type: integer nullable: true description: | - The number of layers to send to the GPU(s). - On macOS it defaults to 1 to enable metal support, 0 to disable. + The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. main_gpu: type: integer nullable: true @@ -487,7 +432,7 @@ components: type: boolean nullable: true description: | - Enable f16 key/value. (Default: true) + Enable f16 key/value. (Default: false) logits_all: type: boolean nullable: true @@ -512,9 +457,7 @@ components: type: integer nullable: true description: | - Sets the number of threads to use during computation. By default, Ollama will detect this for optimal - performance. It is recommended to set this value to the number of physical CPU cores your system has - (as opposed to the logical number of cores). + Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). ResponseFormat: type: string description: | @@ -525,13 +468,6 @@ components: Note: it's important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace. enum: - json - VersionResponse: - type: object - description: The response class for the version endpoint. - properties: - version: - type: string - description: The version of the Ollama server. GenerateCompletionResponse: type: object description: The response class for the generate endpoint. @@ -539,7 +475,7 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b created_at: type: string format: date-time @@ -596,7 +532,7 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b messages: type: array description: The messages of the chat, this can be used to keep a chat memory @@ -610,12 +546,10 @@ components: type: boolean description: *stream default: false - keep_alive: *keep_alive - tools: - type: array - description: A list of tools the model may call. - items: - $ref: '#/components/schemas/Tool' + keep_alive: + type: integer + nullable: true + description: *keep_alive required: - model - messages @@ -628,7 +562,7 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b created_at: type: string format: date-time @@ -668,11 +602,6 @@ components: format: int64 description: Time in nanoseconds spent generating the response. example: 1325948000 - required: - - model - - created_at - - message - - done DoneReason: type: string description: Reason why the model is done generating a response. @@ -687,7 +616,7 @@ components: role: type: string description: The role of the message - enum: [ "system", "user", "assistant", "tool" ] + enum: [ "system", "user", "assistant" ] content: type: string description: The content of the message @@ -699,69 +628,9 @@ components: type: string description: Base64-encoded image (for multimodal models such as llava) example: iVBORw0KGgoAAAANSUhEUgAAAAkAAAANCAIAAAD0YtNRAAAABnRSTlMA/AD+APzoM1ogAAAAWklEQVR4AWP48+8PLkR7uUdzcMvtU8EhdykHKAciEXL3pvw5FQIURaBDJkARoDhY3zEXiCgCHbNBmAlUiyaBkENoxZSDWnOtBmoAQu7TnT+3WuDOA7KBIkAGAGwiNeqjusp/AAAAAElFTkSuQmCC - tool_calls: - type: array - description: A list of tools the model wants to call. - items: - $ref: '#/components/schemas/ToolCall' required: - role - content - Tool: - type: object - description: A tool the model may call. - properties: - type: - type: string - enum: - - function - default: function - description: The type of tool. - function: - $ref: '#/components/schemas/ToolFunction' - ToolFunction: - type: object - description: A function that the model may call. - properties: - name: - type: string - description: The name of the function to be called. - description: - type: string - description: | - A description of what the function does, used by the model to choose when and how to call the function. - parameters: - $ref: '#/components/schemas/ToolFunctionParams' - required: - - name - - description - - parameters - ToolFunctionParams: - type: object - description: The parameters the functions accepts, described as a JSON Schema object. - additionalProperties: true - ToolCall: - type: object - description: The tool the model wants to call. - properties: - function: - $ref: '#/components/schemas/ToolCallFunction' - ToolCallFunction: - type: object - description: The function the model wants to call. - properties: - name: - type: string - description: The name of the function to be called. - arguments: - $ref: '#/components/schemas/ToolCallFunctionArgs' - required: - - name - - arguments - ToolCallFunctionArgs: - type: object - description: The arguments to pass to the function. - additionalProperties: true GenerateEmbeddingRequest: description: Generate embeddings from a model. type: object @@ -769,14 +638,17 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b prompt: type: string description: Text to generate embeddings for. example: 'Here is an article about llamas...' options: $ref: '#/components/schemas/RequestOptions' - keep_alive: *keep_alive + keep_alive: + type: integer + nullable: true + description: *keep_alive required: - model - prompt @@ -846,7 +718,7 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b modified_at: type: string format: date-time @@ -887,63 +759,6 @@ components: quantization_level: type: string description: The quantization level of the model. - ModelInformation: - type: object - description: Details about a model. - properties: - general.architecture: - type: string - description: The architecture of the model. - general.file_type: - type: integer - nullable: true - description: The file type of the model. - general.parameter_count: - type: integer - format: int64 - nullable: true - description: The number of parameters in the model. - general.quantization_version: - type: integer - nullable: true - description: The number of parameters in the model. - ProcessResponse: - type: object - description: Response class for the list running models endpoint. - properties: - models: - type: array - description: List of running models. - items: - $ref: '#/components/schemas/ProcessModel' - ProcessModel: - type: object - description: A model that is currently loaded. - properties: - model: - type: string - description: *model_name - example: llama3.2 - size: - type: integer - format: int64 - description: Size of the model on disk. - example: 7323310500 - digest: - type: string - description: The model's digest. - example: 'sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711a' - details: - $ref: '#/components/schemas/ModelDetails' - expires_at: - type: string - format: date-time - example: 2023-08-02T17:02:23.713454393-07:00 - size_vram: - type: integer - format: int64 - description: Size of the model on disk. - example: 7323310500 ModelInfoRequest: description: Request class for the show model info endpoint. type: object @@ -951,7 +766,7 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b required: - model ModelInfo: @@ -984,14 +799,12 @@ components: description: The system prompt for the model. details: $ref: '#/components/schemas/ModelDetails' - model_info: - $ref: '#/components/schemas/ModelInformation' messages: type: array nullable: true description: The default messages for the model. items: - $ref: '#/components/schemas/Message' + $ref: '#/components/schemas/Message' CopyModelRequest: description: Request class for copying a model. type: object @@ -999,7 +812,7 @@ components: source: type: string description: Name of the model to copy. - example: llama3.2 + example: llama3:8b destination: type: string description: Name of the new model. @@ -1024,7 +837,7 @@ components: model: type: string description: *model_name - example: llama3.2 + example: llama3:8b insecure: type: boolean description: | @@ -1112,8 +925,7 @@ components: description: Response class for pushing a model. properties: status: - type: string - description: Status pushing the model. + $ref: '#/components/schemas/PushModelStatus' digest: type: string description: the model's digest @@ -1128,3 +940,11 @@ components: format: int64 description: Total bytes transferred. example: 2142590208 + PushModelStatus: + type: string + description: Status pushing the model. + enum: + - retrieving manifest + - starting upload + - pushing manifest + - success diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 52b3b896..30f792f5 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: ollama_dart -description: Dart Client for the Ollama API (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). -version: 0.2.2 +description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -13,22 +13,22 @@ topics: - ollama environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - fetch_client: ^1.1.2 - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 + ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + test: ^1.25.2 diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index 3e8afd82..af90c448 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,19 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3.2'; - const visionModel = 'llava'; + const defaultModel = 'llama3:latest'; + const visionModel = 'llava:latest'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), + res.models?.firstWhere((final m) => m.model == defaultModel), isNotNull, ); expect( - res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), + res.models?.firstWhere((final m) => m.model == visionModel), isNotNull, ); }); @@ -48,7 +48,7 @@ void main() { expect(response.model, defaultModel); expect(response.createdAt, isNotNull); expect( - response.message.content, + response.message?.content, isNotEmpty, ); expect(response.done, isTrue); @@ -79,7 +79,7 @@ void main() { ); String text = ''; await for (final res in stream) { - text += res.message.content.trim(); + text += (res.message?.content ?? '').trim(); } expect(text, contains('123456789')); }); @@ -103,7 +103,7 @@ void main() { format: ResponseFormat.json, ), ); - final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('[1,2,3,4,5,6,7,8,9]')); }); @@ -125,7 +125,7 @@ void main() { options: RequestOptions(stop: ['4']), ), ); - final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('123')); expect(generation, isNot(contains('456789'))); expect(res.doneReason, DoneReason.stop); @@ -170,65 +170,8 @@ void main() { ); final res1 = await client.generateChatCompletion(request: request); - final text1 = res1.message.content; + final text1 = res1.message?.content; expect(text1, contains('star')); }); - - test('Test tool calling', () async { - const tool = Tool( - function: ToolFunction( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ), - ); - - final res = await client.generateChatCompletion( - request: const GenerateChatCompletionRequest( - model: defaultModel, - messages: [ - Message( - role: MessageRole.system, - content: 'You are a helpful assistant.', - ), - Message( - role: MessageRole.user, - content: - 'What’s the weather like in Boston and Barcelona in celsius?', - ), - ], - tools: [tool], - keepAlive: 1, - ), - ); - // https://github.com/ollama/ollama/issues/5796 - expect(res.doneReason, DoneReason.stop); - expect(res.message.role, MessageRole.assistant); - expect(res.message.content, isEmpty); - final toolCalls = res.message.toolCalls; - expect(toolCalls, hasLength(2)); - final toolCall1 = toolCalls?.first.function; - expect(toolCall1?.name, tool.function?.name); - expect(toolCall1?.arguments['location'], contains('Boston')); - expect(toolCall1?.arguments['unit'], 'celsius'); - final toolCall2 = toolCalls?.last.function; - expect(toolCall2?.name, tool.function?.name); - expect(toolCall2?.arguments['location'], contains('Barcelona')); - expect(toolCall2?.arguments['unit'], 'celsius'); - }); }); } diff --git a/packages/ollama_dart/test/ollama_dart_completions_test.dart b/packages/ollama_dart/test/ollama_dart_completions_test.dart index 5a134b37..5c4b2981 100644 --- a/packages/ollama_dart/test/ollama_dart_completions_test.dart +++ b/packages/ollama_dart/test/ollama_dart_completions_test.dart @@ -7,19 +7,20 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'gemma2'; - const visionModel = 'llava'; + const defaultModel = 'llama3:latest'; + const visionModel = 'llava:latest'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), + res.models?.firstWhere((final m) => m.model == defaultModel), isNotNull, ); + expect( - res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), + res.models?.firstWhere((final m) => m.model == visionModel), isNotNull, ); }); @@ -75,9 +76,9 @@ void main() { }); test('Test call completions API with raw mode', () async { - const testPrompt = 'List the numbers from 1 to 9 in order. ' + const testPrompt = '[INST] List the numbers from 1 to 9 in order. ' 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:'; + 'NUMBERS: [/INST]'; final res = await client.generateCompletion( request: const GenerateCompletionRequest( diff --git a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart index e6ff8b6f..c32701a8 100644 --- a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart +++ b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Generate Embeddings API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'mxbai-embed-large:335m'; + const defaultModel = 'llama3:latest'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), + res.models?.firstWhere((final m) => m.model == defaultModel), isNotNull, ); }); diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index 03086e4b..f77a9d32 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Models API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'gemma2'; + const defaultModel = 'llama3:latest'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), + res.models?.firstWhere((final m) => m.model == defaultModel), isNotNull, ); }); @@ -62,26 +62,7 @@ void main() { test('Test list models', () async { final res = await client.listModels(); - expect( - res.models?.any((final m) => m.model!.startsWith(defaultModel)), - isTrue, - ); - }); - - test('Test list running models', () async { - await client.generateCompletion( - request: const GenerateCompletionRequest( - model: defaultModel, - prompt: 'You are a llama', - options: RequestOptions(numPredict: 1), - ), - ); - - final res = await client.listRunningModels(); - expect( - res.models?.any((final m) => m.model!.startsWith(defaultModel)), - isTrue, - ); + expect(res.models?.any((final m) => m.model == defaultModel), isTrue); }); test('Test show model info', () async { @@ -90,17 +71,7 @@ void main() { ); expect(res.license, isNotEmpty); expect(res.modelfile, isNotEmpty); - expect(res.parameters, isNotEmpty); expect(res.template, isNotEmpty); - expect(res.details?.format, isNotEmpty); - expect(res.details?.family, isNotEmpty); - expect(res.details?.families, isNotEmpty); - expect(res.details?.parameterSize, isNotEmpty); - expect(res.details?.quantizationLevel, isNotEmpty); - expect(res.modelInfo?.generalArchitecture, isNotEmpty); - expect(res.modelInfo?.generalFileType, greaterThan(0)); - expect(res.modelInfo?.generalParameterCount, greaterThan(0)); - expect(res.modelInfo?.generalQuantizationVersion, greaterThan(0)); }); test('Test copy model', () async { @@ -158,7 +129,7 @@ void main() { request: const PushModelRequest(model: 'mattw/pygmalion:latest'), ); - expect(res.status, equals('success')); + expect(res.status, PushModelStatus.success); }); test('Test push model stream', skip: true, () async { @@ -167,25 +138,25 @@ void main() { ); int count = 0; - String? lastStatus; + PushModelStatus? lastStatus; await for (final res in stream) { lastStatus = res.status; count++; } expect(count, greaterThan(1)); - expect(lastStatus, equals('success')); + expect(lastStatus, equals(PushModelStatus.success)); }); test('Test check blob', skip: true, () async { await client.checkBlob( - digest: + name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); }); test('Test create blob', skip: true, () async { await client.createBlob( - digest: + name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', request: 'file contents', ); diff --git a/packages/ollama_dart/test/ollama_dart_version_test.dart b/packages/ollama_dart/test/ollama_dart_version_test.dart deleted file mode 100644 index 002f8167..00000000 --- a/packages/ollama_dart/test/ollama_dart_version_test.dart +++ /dev/null @@ -1,24 +0,0 @@ -import 'dart:io'; - -import 'package:ollama_dart/ollama_dart.dart'; -import 'package:test/test.dart'; - -void main() { - group('Ollama Version API tests', - skip: Platform.environment.containsKey('CI'), () { - late OllamaClient client; - - setUp(() async { - client = OllamaClient(); - }); - - tearDown(() { - client.endSession(); - }); - - test('Test get version', () async { - final res = await client.getVersion(); - expect(res.version, isNotEmpty); - }); - }); -} diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index aa3ac2cc..632fa141 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,44 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.4.2 - - - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) - - **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) - - **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) - -## 0.4.1 - - - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) - - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) - - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) - - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) - -## 0.4.0 - - - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) - - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) - - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) - - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) - - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) - - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) - -## 0.3.3+1 - - - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 0.3.3 - - - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) - - **FIX**: Make vector store name optional in openai_dart ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) - - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) - -## 0.3.2+1 - - - **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) - - **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) - ## 0.3.2 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 68a26356..f020d128 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,11 +16,11 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. +- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), etc. **Supported endpoints:** -- Chat (with structured outputs, tools and streaming support) +- Chat (with tools and streaming support) - Completions (legacy) - Embeddings - Fine-tuning @@ -28,7 +28,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Images - Models - Moderations -- Assistants v2 (with structured outputs, tools and streaming support) `beta` +- Assistants v2 (with tools and streaming support) `beta` * Threads * Messages * Runs @@ -97,14 +97,14 @@ final client = OpenAIClient( Given a list of messages comprising a conversation, the model will return a response. -Related guide: [Chat Completions](https://platform.openai.com/docs/guides/chat-completions) +Related guide: [Chat Completions](https://platform.openai.com/docs/guides/text-generation) **Create chat completion:** ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4o'), + model: ChatCompletionModel.modelId('gpt-4'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -121,28 +121,28 @@ print(res.choices.first.message.content); ``` `ChatCompletionModel` is a sealed class that offers two ways to specify the model: -- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4o'` or your fine-tuned model ID). -- `ChatCompletionModel.model(ChatCompletionModels.gpt4o)`: a value from `ChatCompletionModels` enum which lists all of the available models. +- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4'` or your fine-tuned model ID). +- `ChatCompletionModel.model(ChatCompletionModels.gpt4)`: a value from `ChatCompletionModels` enum which lists all of the available models. `ChatCompletionMessage` is a sealed class that supports the following message types: - `ChatCompletionMessage.system()`: a system message. - `ChatCompletionMessage.user()`: a user message. - `ChatCompletionMessage.assistant()`: an assistant message. - `ChatCompletionMessage.tool()`: a tool message. -- `ChatCompletionMessage.function()`: a function message (deprecated in favor of tools). +- `ChatCompletionMessage.function()`: a function message. `ChatCompletionMessage.user()` takes a `ChatCompletionUserMessageContent` object that supports the following content types: - `ChatCompletionUserMessageContent.string('content')`: string content. - `ChatCompletionUserMessageContent.parts([...])`: multi-modal content (check the 'Multi-modal prompt' section below). * `ChatCompletionMessageContentPart.text('content')`: text content. - * `ChatCompletionMessageContentPart.image(...)`: image content (URL or base64-encoded image). + * `ChatCompletionMessageContentPart.image(imageUrl: ...)`: image content. **Stream chat completion:** ```dart final stream = client.createChatCompletionStream( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4o'), + model: ChatCompletionModel.modelId('gpt-4-turbo'), messages: [ ChatCompletionMessage.system( content: @@ -165,9 +165,7 @@ await for (final res in stream) { // 789 ``` -**Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision)) - -You can either provide the image URL: +**Multi-modal prompt:** ```dart final res = await client.createChatCompletion( @@ -200,76 +198,7 @@ print(res.choices.first.message.content); // The fruit in the image is an apple. ``` -Or provide the base64-encoded image: -```dart -//... -ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.parts( - [ - ChatCompletionMessageContentPart.text( - text: 'What fruit is this?', - ), - ChatCompletionMessageContentPart.image( - imageUrl: ChatCompletionMessageImageUrl( - url: '/9j/4AAQSkZJRgABAQAAAQABAAD/2wB...P3s/XHQ8cE/nmiupbL0+fz/r/MjnSbsr69/Rdu1j//2Q==', - detail: ChatCompletionMessageImageDetail.high, - ), - ), - ], - ), -), -//... -``` - -**Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))** - -Structured Outputs is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema. - -```dart -final res = await client.createChatCompletion( - request: CreateChatCompletionRequest( - model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, - ), - messages: [ - ChatCompletionMessage.system( - content: 'You are a helpful assistant. That extracts names from text.', - ), - ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'John, Mary, and Peter.', - ), - ), - ], - temperature: 0, - responseFormat: ResponseFormat.jsonSchema( - jsonSchema: JsonSchemaObject( - name: 'Names', - description: 'A list of names', - strict: true, - schema: { - 'type': 'object', - 'properties': { - 'names': { - 'type': 'array', - 'items': { - 'type': 'string', - }, - }, - }, - 'additionalProperties': false, - 'required': ['names'], - }, - ), - ), - ), -); -// {"names":["John","Mary","Peter"]} -``` - -**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) - -> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It us recommended to use Structured Outputs if it is supported for your use case. +**JSON mode:** ```dart final res = await client.createChatCompletion( @@ -298,9 +227,7 @@ final res = await client.createChatCompletion( // { "names": ["John", "Mary", "Peter"] } ``` -**Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling)) - -Tool calling allows you to connect models to external tools and systems. +**Tools:** ```dart const function = FunctionObject( @@ -329,8 +256,8 @@ const tool = ChatCompletionTool( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + model: const ChatCompletionModel.model( + ChatCompletionModels.gpt35Turbo, ), messages: [ ChatCompletionMessage.system( @@ -381,8 +308,6 @@ final answer = res2.choices.first.message.content; // The weather in Boston right now is sunny with a temperature of 22°C ``` -You can enable Structured Outputs for your tools by setting `strict: true` in your `FunctionObject` definition. Structured Outputs ensures that the arguments generated by the model for a tool call exactly match the JSON Schema you provided in the tool definition. - **Function calling:** (deprecated in favor of tools) ```dart @@ -408,7 +333,7 @@ const function = FunctionObject( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4o-mini'), + model: ChatCompletionModel.modelId('gpt-3.5-turbo'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -430,7 +355,7 @@ final functionResult = getCurrentWeather(arguments['location'], arguments['unit' final res2 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4o-mini'), + model: ChatCompletionModel.modelId('gpt-3.5-turbo'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -555,7 +480,7 @@ Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-t ```dart const request = CreateFineTuningJobRequest( - model: FineTuningModel.modelId('gpt-4o-mini'), + model: FineTuningModel.modelId('gpt-3.5-turbo'), trainingFile: 'file-abc123', validationFile: 'file-abc123', hyperparameters: FineTuningJobHyperparameters( @@ -843,7 +768,7 @@ final res = await client.createThreadMessage( ), MessageContent.imageUrl( imageUrl: MessageContentImageUrl( - url: 'https://example.com/image.jpg', // or base64-encoded image + url: 'https://example.com/image.jpg', ), ), ]), @@ -897,41 +822,6 @@ final res = await client.createThreadRun( ); ``` -You can also use Structured Outputs to ensure that the model-generated responses adhere to a specific JSON schema: - -```dart -final res = await client.createThreadRun( - threadId: threadId, - request: CreateRunRequest( - assistantId: assistantId, - instructions: 'You are a helpful assistant that extracts names from text.', - model: CreateRunRequestModel.modelId('gpt-4o'), - responseFormat: CreateRunRequestResponseFormat.responseFormat( - ResponseFormat.jsonSchema( - jsonSchema: JsonSchemaObject( - name: 'Names', - description: 'A list of names', - strict: true, - schema: { - 'type': 'object', - 'properties': { - 'names': { - 'type': 'array', - 'items': { - 'type': 'string', - }, - }, - }, - 'additionalProperties': false, - 'required': ['names'], - }, - ), - ) - ) - ), -); -``` - **Create run: (streaming)** ```dart @@ -1197,21 +1087,21 @@ final client = OpenAIClient( This client can be used to consume APIs that are compatible with the OpenAI API spec. -[GitHub Models](https://github.com/marketplace/models): +[TogetherAI](https://www.together.ai/): ```dart final client = OpenAIClient( - baseUrl: 'https://models.inference.ai.azure.com', - headers: { 'api-key': 'YOUR_GITHUB_TOKEN' }, + baseUrl: 'https://api.together.xyz/v1', + headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, ); ``` -[TogetherAI](https://www.together.ai/): +[Anyscale](https://www.anyscale.com/): ```dart final client = OpenAIClient( - baseUrl: 'https://api.together.xyz/v1', - headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + headers: { 'api-key': 'YOUR_ANYSCALE_API_KEY' }, ); ``` diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart index 57003125..87830981 100644 --- a/packages/openai_dart/lib/openai_dart.dart +++ b/packages/openai_dart/lib/openai_dart.dart @@ -1,4 +1,4 @@ -/// Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. +/// Dart Client for the OpenAI API (completions, chat, embeddings, etc.). library; export 'src/client.dart'; diff --git a/packages/openai_dart/lib/src/client.dart b/packages/openai_dart/lib/src/client.dart index b01a1594..098a4cf2 100644 --- a/packages/openai_dart/lib/src/client.dart +++ b/packages/openai_dart/lib/src/client.dart @@ -18,8 +18,6 @@ class OpenAIClient extends g.OpenAIClient { /// - `apiKey`: your OpenAI API key. You can find your API key in the /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). - /// - `beta`: the content to use for the `OpenAI-Beta` header which can be - /// used to enable beta features. /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -34,7 +32,6 @@ class OpenAIClient extends g.OpenAIClient { OpenAIClient({ final String? apiKey, final String? organization, - final String? beta = 'assistants=v2', final String? baseUrl, final Map? headers, final Map? queryParams, @@ -44,7 +41,7 @@ class OpenAIClient extends g.OpenAIClient { baseUrl: baseUrl, headers: { if (organization != null) 'OpenAI-Organization': organization, - if (beta != null) 'OpenAI-Beta': beta, + 'OpenAI-Beta': 'assistants=v2', ...?headers, }, queryParams: queryParams ?? const {}, diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index b58d7e15..aca8f85f 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -58,7 +58,7 @@ class OpenAIClientException implements Exception { // CLASS: OpenAIClient // ========================================== -/// Client for OpenAI API (v.2.3.0) +/// Client for OpenAI API (v.2.0.0) /// /// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. class OpenAIClient { @@ -1175,14 +1175,11 @@ class OpenAIClient { /// /// `threadId`: The ID of the thread to run. /// - /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - /// /// `request`: Request object for the Create run endpoint. /// /// `POST` `https://api.openai.com/v1/threads/{thread_id}/runs` Future createThreadRun({ required String threadId, - String? include, required CreateRunRequest request, }) async { final r = await makeRequest( @@ -1193,9 +1190,6 @@ class OpenAIClient { requestType: 'application/json', responseType: 'application/json', body: request, - queryParams: { - if (include != null) 'include': include, - }, ); return RunObject.fromJson(_jsonDecode(r)); } @@ -1330,8 +1324,6 @@ class OpenAIClient { /// /// `before`: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps` Future listThreadRunSteps({ required String threadId, @@ -1340,7 +1332,6 @@ class OpenAIClient { String order = 'desc', String? after, String? before, - String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1354,7 +1345,6 @@ class OpenAIClient { 'order': order, if (after != null) 'after': after, if (before != null) 'before': before, - if (include != null) 'include': include, }, ); return ListRunStepsResponse.fromJson(_jsonDecode(r)); @@ -1372,14 +1362,11 @@ class OpenAIClient { /// /// `stepId`: The ID of the run step to retrieve. /// - /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps/{step_id}` Future getThreadRunStep({ required String threadId, required String runId, required String stepId, - String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1388,9 +1375,6 @@ class OpenAIClient { isMultipart: false, requestType: '', responseType: 'application/json', - queryParams: { - if (include != null) 'include': include, - }, ); return RunStepObject.fromJson(_jsonDecode(r)); } @@ -1862,7 +1846,7 @@ class OpenAIClient { // METHOD: cancelBatch // ------------------------------------------ - /// Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. + /// Cancels an in-progress batch. /// /// `batchId`: The ID of the batch to cancel. /// diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index 4c7ba8df..59bac618 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -36,47 +36,29 @@ class AssistantObject with _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. required String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. required List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. required Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? responseFormat, @@ -178,6 +160,8 @@ enum AssistantObjectObject { /// `auto` is the default value enum AssistantResponseFormatMode { + @JsonValue('none') + none, @JsonValue('auto') auto, } @@ -186,37 +170,25 @@ enum AssistantResponseFormatMode { // CLASS: AssistantObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures -/// the model will match your supplied JSON schema. Learn more in the -/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates -/// is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a -/// system or user message. Without this, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note -/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the -/// generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class AssistantObjectResponseFormat with _$AssistantObjectResponseFormat { const AssistantObjectResponseFormat._(); /// `auto` is the default value - const factory AssistantObjectResponseFormat.mode( + const factory AssistantObjectResponseFormat.enumeration( AssistantResponseFormatMode value, ) = AssistantObjectResponseFormatEnumeration; /// No Description - const factory AssistantObjectResponseFormat.responseFormat( - ResponseFormat value, - ) = AssistantObjectResponseFormatResponseFormat; + const factory AssistantObjectResponseFormat.assistantsResponseFormat( + AssistantsResponseFormat value, + ) = AssistantObjectResponseFormatAssistantsResponseFormat; /// Object construction from a JSON representation factory AssistantObjectResponseFormat.fromJson(Map json) => @@ -243,8 +215,8 @@ class _AssistantObjectResponseFormatConverter } if (data is Map) { try { - return AssistantObjectResponseFormatResponseFormat( - ResponseFormat.fromJson(data), + return AssistantObjectResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), ); } catch (e) {} } @@ -258,7 +230,8 @@ class _AssistantObjectResponseFormatConverter return switch (data) { AssistantObjectResponseFormatEnumeration(value: final v) => _$AssistantResponseFormatModeEnumMap[v]!, - AssistantObjectResponseFormatResponseFormat(value: final v) => v.toJson(), + AssistantObjectResponseFormatAssistantsResponseFormat(value: final v) => + v.toJson(), null => null, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart index 0686da7b..348155db 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart @@ -61,7 +61,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. const factory AssistantStreamEvent.runStepStreamEvent({ /// The type of the event. required EventType event, @@ -74,7 +74,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamDeltaEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. const factory AssistantStreamEvent.runStepStreamDeltaEvent({ /// The type of the event. required EventType event, diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 30a5cacc..6e45f715 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -30,11 +30,7 @@ sealed class AssistantTools with _$AssistantTools { /// FileSearch tool const factory AssistantTools.fileSearch({ /// The type of tool being defined: `file_search` - required String type, - - /// Overrides for the file search tool. - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch, + @Default('file_search') String type, }) = AssistantToolsFileSearch; // ------------------------------------------ @@ -67,67 +63,3 @@ enum AssistantToolsEnumType { @JsonValue('function') function, } - -// ========================================== -// CLASS: AssistantToolsFileSearchFileSearch -// ========================================== - -/// Overrides for the file search tool. -@freezed -class AssistantToolsFileSearchFileSearch - with _$AssistantToolsFileSearchFileSearch { - const AssistantToolsFileSearchFileSearch._(); - - /// Factory constructor for AssistantToolsFileSearchFileSearch - const factory AssistantToolsFileSearchFileSearch({ - /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models - /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the - /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. - @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, - - /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and - /// a score_threshold of 0. - /// - /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. - @JsonKey(name: 'ranking_options', includeIfNull: false) - FileSearchRankingOptions? rankingOptions, - }) = _AssistantToolsFileSearchFileSearch; - - /// Object construction from a JSON representation - factory AssistantToolsFileSearchFileSearch.fromJson( - Map json) => - _$AssistantToolsFileSearchFileSearchFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'max_num_results', - 'ranking_options' - ]; - - /// Validation constants - static const maxNumResultsMinValue = 1; - static const maxNumResultsMaxValue = 50; - - /// Perform validations on the schema property values - String? validateSchema() { - if (maxNumResults != null && maxNumResults! < maxNumResultsMinValue) { - return "The value of 'maxNumResults' cannot be < $maxNumResultsMinValue"; - } - if (maxNumResults != null && maxNumResults! > maxNumResultsMaxValue) { - return "The value of 'maxNumResults' cannot be > $maxNumResultsMaxValue"; - } - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'max_num_results': maxNumResults, - 'ranking_options': rankingOptions, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart b/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart new file mode 100644 index 00000000..bc5f9c8b --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart @@ -0,0 +1,53 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: AssistantsResponseFormat +// ========================================== + +/// An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. +@freezed +class AssistantsResponseFormat with _$AssistantsResponseFormat { + const AssistantsResponseFormat._(); + + /// Factory constructor for AssistantsResponseFormat + const factory AssistantsResponseFormat({ + /// Must be one of `text` or `json_object`. + @Default(AssistantsResponseFormatType.text) + AssistantsResponseFormatType type, + }) = _AssistantsResponseFormat; + + /// Object construction from a JSON representation + factory AssistantsResponseFormat.fromJson(Map json) => + _$AssistantsResponseFormatFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + }; + } +} + +// ========================================== +// ENUM: AssistantsResponseFormatType +// ========================================== + +/// Must be one of `text` or `json_object`. +enum AssistantsResponseFormatType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, +} diff --git a/packages/openai_dart/lib/src/generated/schema/batch.dart b/packages/openai_dart/lib/src/generated/schema/batch.dart index 471ac112..94cc6080 100644 --- a/packages/openai_dart/lib/src/generated/schema/batch.dart +++ b/packages/openai_dart/lib/src/generated/schema/batch.dart @@ -74,9 +74,7 @@ class Batch with _$Batch { @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? requestCounts, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _Batch; diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart index 4b4adc2c..8678903a 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart @@ -16,10 +16,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { /// Factory constructor for ChatCompletionLogprobs const factory ChatCompletionLogprobs({ /// A list of message content tokens with log probability information. - @JsonKey(includeIfNull: false) List? content, - - /// A list of message refusal tokens with log probability information. - @JsonKey(includeIfNull: false) List? refusal, + required List? content, }) = _ChatCompletionLogprobs; /// Object construction from a JSON representation @@ -27,7 +24,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { _$ChatCompletionLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content', 'refusal']; + static const List propertyNames = ['content']; /// Perform validations on the schema property values String? validateSchema() { @@ -38,7 +35,6 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { Map toMap() { return { 'content': content, - 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index 93afcd9b..ae4d6e9c 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -59,9 +59,6 @@ sealed class ChatCompletionMessage with _$ChatCompletionMessage { /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. @JsonKey(includeIfNull: false) String? content, - /// The refusal message by the assistant. - @JsonKey(includeIfNull: false) String? refusal, - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? name, @@ -138,12 +135,12 @@ sealed class ChatCompletionUserMessageContent with _$ChatCompletionUserMessageContent { const ChatCompletionUserMessageContent._(); - /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. + /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. const factory ChatCompletionUserMessageContent.parts( List value, ) = ChatCompletionMessageContentParts; - /// The text contents of the user message. + /// The text contents of the message. const factory ChatCompletionUserMessageContent.string( String value, ) = ChatCompletionUserMessageContentString; @@ -161,11 +158,9 @@ class _ChatCompletionUserMessageContentConverter @override ChatCompletionUserMessageContent fromJson(Object? data) { - if (data is List && data.every((item) => item is Map)) { - return ChatCompletionMessageContentParts(data - .map((i) => ChatCompletionMessageContentPart.fromJson( - i as Map)) - .toList(growable: false)); + if (data is List && + data.every((item) => item is ChatCompletionMessageContentPart)) { + return ChatCompletionMessageContentParts(data.cast()); } if (data is String) { return ChatCompletionUserMessageContentString(data); diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart index 6e38e239..e96bf346 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart @@ -18,7 +18,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartText // ------------------------------------------ - /// A text content part of a message. + /// A text content part of a user message. const factory ChatCompletionMessageContentPart.text({ /// The type of the content part, in this case `text`. @Default(ChatCompletionMessageContentPartType.text) @@ -32,7 +32,8 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartImage // ------------------------------------------ - /// An image content part of a user message. + /// Union constructor for [ChatCompletionMessageContentPartImage] + @FreezedUnionValue('image_url') const factory ChatCompletionMessageContentPart.image({ /// The type of the content part, in this case `image_url`. @Default(ChatCompletionMessageContentPartType.imageUrl) @@ -42,20 +43,6 @@ sealed class ChatCompletionMessageContentPart @JsonKey(name: 'image_url') required ChatCompletionMessageImageUrl imageUrl, }) = ChatCompletionMessageContentPartImage; - // ------------------------------------------ - // UNION: ChatCompletionMessageContentPartRefusal - // ------------------------------------------ - - /// A refusal content part of a message. - const factory ChatCompletionMessageContentPart.refusal({ - /// The type of the content part, in this case `refusal`. - @Default(ChatCompletionMessageContentPartType.refusal) - ChatCompletionMessageContentPartType type, - - /// The refusal message generated by the model. - required String refusal, - }) = ChatCompletionMessageContentPartRefusal; - /// Object construction from a JSON representation factory ChatCompletionMessageContentPart.fromJson( Map json) => @@ -71,8 +58,6 @@ enum ChatCompletionMessageContentPartEnumType { text, @JsonValue('image_url') imageUrl, - @JsonValue('refusal') - refusal, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart index 1aeebe14..0b4409fb 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart @@ -14,6 +14,4 @@ enum ChatCompletionMessageContentPartType { text, @JsonValue('image_url') imageUrl, - @JsonValue('refusal') - refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart index 8d81379d..1b4f0705 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -79,10 +79,7 @@ class ChatCompletionStreamResponseChoiceLogprobs /// Factory constructor for ChatCompletionStreamResponseChoiceLogprobs const factory ChatCompletionStreamResponseChoiceLogprobs({ /// A list of message content tokens with log probability information. - @JsonKey(includeIfNull: false) List? content, - - /// A list of message refusal tokens with log probability information. - @JsonKey(includeIfNull: false) List? refusal, + required List? content, }) = _ChatCompletionStreamResponseChoiceLogprobs; /// Object construction from a JSON representation @@ -91,7 +88,7 @@ class ChatCompletionStreamResponseChoiceLogprobs _$ChatCompletionStreamResponseChoiceLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content', 'refusal']; + static const List propertyNames = ['content']; /// Perform validations on the schema property values String? validateSchema() { @@ -102,7 +99,6 @@ class ChatCompletionStreamResponseChoiceLogprobs Map toMap() { return { 'content': content, - 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart index 5cc5fa0d..e676c18c 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart @@ -19,9 +19,6 @@ class ChatCompletionStreamResponseDelta /// The contents of the chunk message. @JsonKey(includeIfNull: false) String? content, - /// The refusal message generated by the model. - @JsonKey(includeIfNull: false) String? refusal, - /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @@ -46,7 +43,6 @@ class ChatCompletionStreamResponseDelta /// List of all property names of schema static const List propertyNames = [ 'content', - 'refusal', 'function_call', 'tool_calls', 'role' @@ -61,7 +57,6 @@ class ChatCompletionStreamResponseDelta Map toMap() { return { 'content': content, - 'refusal': refusal, 'function_call': functionCall, 'tool_calls': toolCalls, 'role': role, diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart deleted file mode 100644 index a8f0c03d..00000000 --- a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart +++ /dev/null @@ -1,54 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: ChunkingStrategyRequestParam -// ========================================== - -/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. -@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) -sealed class ChunkingStrategyRequestParam with _$ChunkingStrategyRequestParam { - const ChunkingStrategyRequestParam._(); - - // ------------------------------------------ - // UNION: AutoChunkingStrategyRequestParam - // ------------------------------------------ - - /// Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` - /// and `chunk_overlap_tokens` of `400`. - const factory ChunkingStrategyRequestParam.auto({ - /// Always `auto`. - required String type, - }) = AutoChunkingStrategyRequestParam; - - // ------------------------------------------ - // UNION: StaticChunkingStrategyRequestParam - // ------------------------------------------ - - /// Static chunking strategy - const factory ChunkingStrategyRequestParam.static({ - /// Always `static`. - required String type, - - /// Static chunking strategy - required StaticChunkingStrategy static, - }) = StaticChunkingStrategyRequestParam; - - /// Object construction from a JSON representation - factory ChunkingStrategyRequestParam.fromJson(Map json) => - _$ChunkingStrategyRequestParamFromJson(json); -} - -// ========================================== -// ENUM: ChunkingStrategyRequestParamEnumType -// ========================================== - -enum ChunkingStrategyRequestParamEnumType { - @JsonValue('auto') - auto, - @JsonValue('static') - static, -} diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart deleted file mode 100644 index c706df60..00000000 --- a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart +++ /dev/null @@ -1,55 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: ChunkingStrategyResponseParam -// ========================================== - -/// The chunking strategy used to chunk the file(s). -@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) -sealed class ChunkingStrategyResponseParam - with _$ChunkingStrategyResponseParam { - const ChunkingStrategyResponseParam._(); - - // ------------------------------------------ - // UNION: StaticChunkingStrategyResponseParam - // ------------------------------------------ - - /// Static Chunking Strategy. - const factory ChunkingStrategyResponseParam.static({ - /// Always `static`. - required String type, - - /// Static chunking strategy - required StaticChunkingStrategy static, - }) = StaticChunkingStrategyResponseParam; - - // ------------------------------------------ - // UNION: OtherChunkingStrategyResponseParam - // ------------------------------------------ - - /// Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because - /// the file was indexed before the `chunking_strategy` concept was introduced in the API. - const factory ChunkingStrategyResponseParam.other({ - /// Always `other`. - required String type, - }) = OtherChunkingStrategyResponseParam; - - /// Object construction from a JSON representation - factory ChunkingStrategyResponseParam.fromJson(Map json) => - _$ChunkingStrategyResponseParamFromJson(json); -} - -// ========================================== -// ENUM: ChunkingStrategyResponseParamEnumType -// ========================================== - -enum ChunkingStrategyResponseParamEnumType { - @JsonValue('static') - static, - @JsonValue('other') - other, -} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart deleted file mode 100644 index 14fe08a8..00000000 --- a/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart +++ /dev/null @@ -1,41 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: CompletionTokensDetails -// ========================================== - -/// Breakdown of tokens used in a completion. -@freezed -class CompletionTokensDetails with _$CompletionTokensDetails { - const CompletionTokensDetails._(); - - /// Factory constructor for CompletionTokensDetails - const factory CompletionTokensDetails({ - /// Tokens generated by the model for reasoning. - @JsonKey(name: 'reasoning_tokens', includeIfNull: false) - int? reasoningTokens, - }) = _CompletionTokensDetails; - - /// Object construction from a JSON representation - factory CompletionTokensDetails.fromJson(Map json) => - _$CompletionTokensDetailsFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['reasoning_tokens']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'reasoning_tokens': reasoningTokens, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart index 86877b8e..17826175 100644 --- a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart +++ b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart @@ -23,10 +23,6 @@ class CompletionUsage with _$CompletionUsage { /// Total number of tokens used in the request (prompt + completion). @JsonKey(name: 'total_tokens') required int totalTokens, - - /// Breakdown of tokens used in a completion. - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - CompletionTokensDetails? completionTokensDetails, }) = _CompletionUsage; /// Object construction from a JSON representation @@ -37,8 +33,7 @@ class CompletionUsage with _$CompletionUsage { static const List propertyNames = [ 'completion_tokens', 'prompt_tokens', - 'total_tokens', - 'completion_tokens_details' + 'total_tokens' ]; /// Perform validations on the schema property values @@ -52,7 +47,6 @@ class CompletionUsage with _$CompletionUsage { 'completion_tokens': completionTokens, 'prompt_tokens': promptTokens, 'total_tokens': totalTokens, - 'completion_tokens_details': completionTokensDetails, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 312d8f5c..16db2e01 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -27,47 +27,29 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? responseFormat, @@ -153,8 +135,6 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum AssistantModels { - @JsonValue('chatgpt-4o-latest') - chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -183,12 +163,6 @@ enum AssistantModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, - @JsonValue('gpt-4o-mini') - gpt4oMini, - @JsonValue('gpt-4o-mini-2024-07-18') - gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -203,14 +177,6 @@ enum AssistantModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, - @JsonValue('o1-mini') - o1Mini, - @JsonValue('o1-mini-2024-09-12') - o1Mini20240912, - @JsonValue('o1-preview') - o1Preview, - @JsonValue('o1-preview-2024-09-12') - o1Preview20240912, } // ========================================== @@ -274,6 +240,8 @@ class _AssistantModelConverter /// `auto` is the default value enum CreateAssistantResponseFormatMode { + @JsonValue('none') + none, @JsonValue('auto') auto, } @@ -282,23 +250,11 @@ enum CreateAssistantResponseFormatMode { // CLASS: CreateAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures -/// the model will match your supplied JSON schema. Learn more in the -/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates -/// is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a -/// system or user message. Without this, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note -/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the -/// generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateAssistantRequestResponseFormat with _$CreateAssistantRequestResponseFormat { @@ -310,9 +266,9 @@ sealed class CreateAssistantRequestResponseFormat ) = CreateAssistantRequestResponseFormatEnumeration; /// No Description - const factory CreateAssistantRequestResponseFormat.responseFormat( - ResponseFormat value, - ) = CreateAssistantRequestResponseFormatResponseFormat; + const factory CreateAssistantRequestResponseFormat.format( + AssistantsResponseFormat value, + ) = CreateAssistantRequestResponseFormatAssistantsResponseFormat; /// Object construction from a JSON representation factory CreateAssistantRequestResponseFormat.fromJson( @@ -342,8 +298,8 @@ class _CreateAssistantRequestResponseFormatConverter } if (data is Map) { try { - return CreateAssistantRequestResponseFormatResponseFormat( - ResponseFormat.fromJson(data), + return CreateAssistantRequestResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), ); } catch (e) {} } @@ -357,7 +313,9 @@ class _CreateAssistantRequestResponseFormatConverter return switch (data) { CreateAssistantRequestResponseFormatEnumeration(value: final v) => _$CreateAssistantResponseFormatModeEnumMap[v]!, - CreateAssistantRequestResponseFormatResponseFormat(value: final v) => + CreateAssistantRequestResponseFormatAssistantsResponseFormat( + value: final v + ) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart index b7a86f72..5014b4f1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart @@ -19,7 +19,7 @@ class CreateBatchRequest with _$CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. @JsonKey(name: 'input_file_id') required String inputFileId, /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 3d59ae2b..997af317 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -15,12 +15,10 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Factory constructor for CreateChatCompletionRequest const factory CreateChatCompletionRequest({ - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) - /// table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. @_ChatCompletionModelConverter() required ChatCompletionModel model, - /// A list of messages comprising the conversation so far. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). required List messages, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -32,37 +30,22 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias - /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to - /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase - /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the - /// relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? logitBias, - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of - /// each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? logprobs, - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, - /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat - /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated - /// via API. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. /// - /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with - /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, - /// An upper bound for the number of tokens that can be generated for a completion, including visible output - /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - int? maxCompletionTokens, - - /// How many chat completion choices to generate for each input message. Note that you will be charged based on - /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) @Default(1) int? n, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -72,59 +55,25 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? presencePenalty, - /// An object specifying the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer - /// than `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model - /// will match your supplied JSON schema. - /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is - /// valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system - /// or user message. Without this, the model may generate an unending stream of whitespace until the generation - /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message - /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded - /// `max_tokens` or the conversation exceeded the max context length. - /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @JsonKey(name: 'response_format', includeIfNull: false) - ResponseFormat? responseFormat, + ChatCompletionResponseFormat? responseFormat, /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests - /// with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to - /// monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. @JsonKey(includeIfNull: false) int? seed, - /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers - /// subscribed to the scale tier service: - /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits - /// until they are exhausted. - /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the - /// default service tier with a lower uptime SLA and no latency guarantee. - /// - If set to 'default', the request will be processed using the default service tier with a lower uptime - /// SLA and no latency guarantee. - /// - When not set, the default behavior is 'auto'. - /// - /// When this parameter is set, the response body will include the `service_tier` utilized. - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - CreateChatCompletionRequestServiceTier? serviceTier, - /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) @Default(false) bool? stream, /// Options for streaming response. Only set this when you set `stream: true`. @@ -141,28 +90,20 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// We generally recommend altering this or `temperature` but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// A list of tools the model may call. Currently, only functions are supported as a tool. - /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are - /// supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. @JsonKey(includeIfNull: false) List? tools, /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the - /// model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? user, @@ -171,8 +112,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that - /// function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -198,12 +138,10 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs', 'top_logprobs', 'max_tokens', - 'max_completion_tokens', 'n', 'presence_penalty', 'response_format', 'seed', - 'service_tier', 'stop', 'stream', 'stream_options', @@ -211,7 +149,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p', 'tools', 'tool_choice', - 'parallel_tool_calls', 'user', 'function_call', 'functions' @@ -289,12 +226,10 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs': logprobs, 'top_logprobs': topLogprobs, 'max_tokens': maxTokens, - 'max_completion_tokens': maxCompletionTokens, 'n': n, 'presence_penalty': presencePenalty, 'response_format': responseFormat, 'seed': seed, - 'service_tier': serviceTier, 'stop': stop, 'stream': stream, 'stream_options': streamOptions, @@ -302,7 +237,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p': topP, 'tools': tools, 'tool_choice': toolChoice, - 'parallel_tool_calls': parallelToolCalls, 'user': user, 'function_call': functionCall, 'functions': functions, @@ -316,8 +250,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum ChatCompletionModels { - @JsonValue('chatgpt-4o-latest') - chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -346,12 +278,6 @@ enum ChatCompletionModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, - @JsonValue('gpt-4o-mini') - gpt4oMini, - @JsonValue('gpt-4o-mini-2024-07-18') - gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -366,22 +292,13 @@ enum ChatCompletionModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, - @JsonValue('o1-mini') - o1Mini, - @JsonValue('o1-mini-2024-09-12') - o1Mini20240912, - @JsonValue('o1-preview') - o1Preview, - @JsonValue('o1-preview-2024-09-12') - o1Preview20240912, } // ========================================== // CLASS: ChatCompletionModel // ========================================== -/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) -/// table for details on which models work with the Chat API. +/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. @freezed sealed class ChatCompletionModel with _$ChatCompletionModel { const ChatCompletionModel._(); @@ -434,25 +351,43 @@ class _ChatCompletionModelConverter } // ========================================== -// ENUM: CreateChatCompletionRequestServiceTier +// CLASS: ChatCompletionResponseFormat // ========================================== -/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers -/// subscribed to the scale tier service: -/// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits -/// until they are exhausted. -/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the -/// default service tier with a lower uptime SLA and no latency guarantee. -/// - If set to 'default', the request will be processed using the default service tier with a lower uptime -/// SLA and no latency guarantee. -/// - When not set, the default behavior is 'auto'. +/// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// -/// When this parameter is set, the response body will include the `service_tier` utilized. -enum CreateChatCompletionRequestServiceTier { - @JsonValue('auto') - auto, - @JsonValue('default') - vDefault, +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +@freezed +class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { + const ChatCompletionResponseFormat._(); + + /// Factory constructor for ChatCompletionResponseFormat + const factory ChatCompletionResponseFormat({ + /// Must be one of `text` or `json_object`. + @Default(ChatCompletionResponseFormatType.text) + ChatCompletionResponseFormatType type, + }) = _ChatCompletionResponseFormat; + + /// Object construction from a JSON representation + factory ChatCompletionResponseFormat.fromJson(Map json) => + _$ChatCompletionResponseFormatFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + }; + } } // ========================================== @@ -532,8 +467,7 @@ enum ChatCompletionToolChoiceMode { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. -/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the -/// model to call that tool. +/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @freezed @@ -621,8 +555,7 @@ enum ChatCompletionFunctionCallMode { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. -/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that -/// function. +/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @freezed @@ -687,3 +620,15 @@ class _ChatCompletionFunctionCallConverter }; } } + +// ========================================== +// ENUM: ChatCompletionResponseFormatType +// ========================================== + +/// Must be one of `text` or `json_object`. +enum ChatCompletionResponseFormatType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart index 9a9687d7..95771ce0 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart @@ -27,15 +27,6 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { /// The model used for the chat completion. required String model, - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - ServiceTier? serviceTier, - /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -59,7 +50,6 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices', 'created', 'model', - 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -77,7 +67,6 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices': choices, 'created': created, 'model': model, - 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index cc0341fc..18cab5fa 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -24,20 +24,11 @@ class CreateChatCompletionStreamResponse required List choices, /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - @JsonKey(includeIfNull: false) int? created, + required int created, /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - ServiceTier? serviceTier, - /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -45,7 +36,7 @@ class CreateChatCompletionStreamResponse String? systemFingerprint, /// The object type, which is always `chat.completion.chunk`. - @JsonKey(includeIfNull: false) String? object, + required String object, /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? usage, @@ -62,7 +53,6 @@ class CreateChatCompletionStreamResponse 'choices', 'created', 'model', - 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -80,7 +70,6 @@ class CreateChatCompletionStreamResponse 'choices': choices, 'created': created, 'model': model, - 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart index ff66b86c..31bb714a 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart @@ -347,7 +347,7 @@ class _CompletionPromptConverter @override CompletionPrompt fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return CompletionPromptListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart index 10c24925..fec9f621 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart @@ -179,7 +179,7 @@ class _EmbeddingInputConverter @override EmbeddingInput fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return EmbeddingInputListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 863ffb57..14929898 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -23,12 +23,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose - /// `fine-tune`. - /// - /// The contents of the file should differ depending on if the model uses the - /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') required String trainingFile, @@ -37,9 +32,9 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? hyperparameters, - /// A string of up to 64 characters that will be added to your fine-tuned model name. + /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? suffix, /// The ID of an uploaded file that contains validation data. @@ -80,7 +75,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// Validation constants static const suffixMinLengthValue = 1; - static const suffixMaxLengthValue = 64; + static const suffixMaxLengthValue = 40; static const seedMinValue = 0; static const seedMaxValue = 2147483647; @@ -127,8 +122,6 @@ enum FineTuningModels { davinci002, @JsonValue('gpt-3.5-turbo') gpt35Turbo, - @JsonValue('gpt-4o-mini') - gpt4oMini, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart index fc42a4d2..bad29bc1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart @@ -25,9 +25,7 @@ class CreateMessageRequest with _$CreateMessageRequest { /// A list of files attached to the message, and the tools they were added to. @JsonKey(includeIfNull: false) List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateMessageRequest; @@ -90,10 +88,8 @@ class _CreateMessageRequestContentConverter @override CreateMessageRequestContent fromJson(Object? data) { - if (data is List && data.every((item) => item is Map)) { - return CreateMessageRequestContentListMessageContent(data - .map((i) => MessageContent.fromJson(i as Map)) - .toList(growable: false)); + if (data is List && data.every((item) => item is MessageContent)) { + return CreateMessageRequestContentListMessageContent(data.cast()); } if (data is String) { return CreateMessageRequestContentString(data); diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 3698ed7c..edd89f09 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -37,18 +37,13 @@ class CreateRunRequest with _$CreateRunRequest { /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. @JsonKey(includeIfNull: false) List? tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -74,28 +69,11 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -123,7 +101,6 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', - 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -178,7 +155,6 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, - 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; @@ -191,8 +167,6 @@ class CreateRunRequest with _$CreateRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum RunModels { - @JsonValue('chatgpt-4o-latest') - chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -221,12 +195,6 @@ enum RunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, - @JsonValue('gpt-4o-mini') - gpt4oMini, - @JsonValue('gpt-4o-mini-2024-07-18') - gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -241,14 +209,6 @@ enum RunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, - @JsonValue('o1-mini') - o1Mini, - @JsonValue('o1-mini-2024-09-12') - o1Mini20240912, - @JsonValue('o1-preview') - o1Preview, - @JsonValue('o1-preview-2024-09-12') - o1Preview20240912, } // ========================================== @@ -261,12 +221,12 @@ sealed class CreateRunRequestModel with _$CreateRunRequestModel { const CreateRunRequestModel._(); /// Available models. Mind that the list may not be exhaustive nor up-to-date. - const factory CreateRunRequestModel.model( + const factory CreateRunRequestModel.enumeration( RunModels value, ) = CreateRunRequestModelEnumeration; /// The ID of the model to use for this request. - const factory CreateRunRequestModel.modelId( + const factory CreateRunRequestModel.string( String value, ) = CreateRunRequestModelString; @@ -401,6 +361,8 @@ class _CreateRunRequestToolChoiceConverter /// `auto` is the default value enum CreateRunRequestResponseFormatMode { + @JsonValue('none') + none, @JsonValue('auto') auto, } @@ -409,23 +371,11 @@ enum CreateRunRequestResponseFormatMode { // CLASS: CreateRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures -/// the model will match your supplied JSON schema. Learn more in the -/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates -/// is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a -/// system or user message. Without this, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note -/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the -/// generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateRunRequestResponseFormat with _$CreateRunRequestResponseFormat { @@ -437,9 +387,9 @@ sealed class CreateRunRequestResponseFormat ) = CreateRunRequestResponseFormatEnumeration; /// No Description - const factory CreateRunRequestResponseFormat.responseFormat( - ResponseFormat value, - ) = CreateRunRequestResponseFormatResponseFormat; + const factory CreateRunRequestResponseFormat.format( + AssistantsResponseFormat value, + ) = CreateRunRequestResponseFormatAssistantsResponseFormat; /// Object construction from a JSON representation factory CreateRunRequestResponseFormat.fromJson(Map json) => @@ -468,8 +418,8 @@ class _CreateRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateRunRequestResponseFormatResponseFormat( - ResponseFormat.fromJson(data), + return CreateRunRequestResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), ); } catch (e) {} } @@ -483,7 +433,7 @@ class _CreateRunRequestResponseFormatConverter return switch (data) { CreateRunRequestResponseFormatEnumeration(value: final v) => _$CreateRunRequestResponseFormatModeEnumMap[v]!, - CreateRunRequestResponseFormatResponseFormat(value: final v) => + CreateRunRequestResponseFormatAssistantsResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index d58474f8..5f7692df 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -36,18 +36,13 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -73,28 +68,11 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -122,7 +100,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', - 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -177,7 +154,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, - 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; @@ -190,8 +166,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum ThreadAndRunModels { - @JsonValue('chatgpt-4o-latest') - chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -220,12 +194,6 @@ enum ThreadAndRunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, - @JsonValue('gpt-4o-mini') - gpt4oMini, - @JsonValue('gpt-4o-mini-2024-07-18') - gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -240,14 +208,6 @@ enum ThreadAndRunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, - @JsonValue('o1-mini') - o1Mini, - @JsonValue('o1-mini-2024-09-12') - o1Mini20240912, - @JsonValue('o1-preview') - o1Preview, - @JsonValue('o1-preview-2024-09-12') - o1Preview20240912, } // ========================================== @@ -407,6 +367,8 @@ class _CreateThreadAndRunRequestToolChoiceConverter /// `auto` is the default value enum CreateThreadAndRunRequestResponseFormatMode { + @JsonValue('none') + none, @JsonValue('auto') auto, } @@ -415,23 +377,11 @@ enum CreateThreadAndRunRequestResponseFormatMode { // CLASS: CreateThreadAndRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures -/// the model will match your supplied JSON schema. Learn more in the -/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates -/// is valid JSON. -/// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a -/// system or user message. Without this, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note -/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the -/// generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateThreadAndRunRequestResponseFormat with _$CreateThreadAndRunRequestResponseFormat { @@ -443,9 +393,9 @@ sealed class CreateThreadAndRunRequestResponseFormat ) = CreateThreadAndRunRequestResponseFormatEnumeration; /// No Description - const factory CreateThreadAndRunRequestResponseFormat.responseFormat( - ResponseFormat value, - ) = CreateThreadAndRunRequestResponseFormatResponseFormat; + const factory CreateThreadAndRunRequestResponseFormat.format( + AssistantsResponseFormat value, + ) = CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat; /// Object construction from a JSON representation factory CreateThreadAndRunRequestResponseFormat.fromJson( @@ -477,8 +427,8 @@ class _CreateThreadAndRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateThreadAndRunRequestResponseFormatResponseFormat( - ResponseFormat.fromJson(data), + return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), ); } catch (e) {} } @@ -492,7 +442,9 @@ class _CreateThreadAndRunRequestResponseFormatConverter return switch (data) { CreateThreadAndRunRequestResponseFormatEnumeration(value: final v) => _$CreateThreadAndRunRequestResponseFormatModeEnumMap[v]!, - CreateThreadAndRunRequestResponseFormatResponseFormat(value: final v) => + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( + value: final v + ) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart index 2cfb4b35..22823647 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart @@ -22,9 +22,7 @@ class CreateThreadRequest with _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart index 3111c855..6a607eae 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart @@ -18,11 +18,6 @@ class CreateVectorStoreFileBatchRequest const factory CreateVectorStoreFileBatchRequest({ /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids') required List fileIds, - - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileBatchRequest; /// Object construction from a JSON representation @@ -31,7 +26,7 @@ class CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids', 'chunking_strategy']; + static const List propertyNames = ['file_ids']; /// Perform validations on the schema property values String? validateSchema() { @@ -42,7 +37,6 @@ class CreateVectorStoreFileBatchRequest Map toMap() { return { 'file_ids': fileIds, - 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart index c18eadee..742fae3b 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart @@ -17,11 +17,6 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { const factory CreateVectorStoreFileRequest({ /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_id') required String fileId, - - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileRequest; /// Object construction from a JSON representation @@ -29,7 +24,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { _$CreateVectorStoreFileRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id', 'chunking_strategy']; + static const List propertyNames = ['file_id']; /// Perform validations on the schema property values String? validateSchema() { @@ -40,7 +35,6 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { Map toMap() { return { 'file_id': fileId, - 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index b26b786e..cce0ccd3 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -15,24 +15,17 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Factory constructor for CreateVectorStoreRequest const factory CreateVectorStoreRequest({ - /// The name of the vector store. - @JsonKey(includeIfNull: false) String? name, - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + /// The name of the vector store. + required String name, + /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _CreateVectorStoreRequest; @@ -42,10 +35,9 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// List of all property names of schema static const List propertyNames = [ - 'name', 'file_ids', + 'name', 'expires_after', - 'chunking_strategy', 'metadata' ]; @@ -57,10 +49,9 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Map representation of object (not serialized) Map toMap() { return { - 'name': name, 'file_ids': fileIds, + 'name': name, 'expires_after': expiresAfter, - 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart deleted file mode 100644 index 6dfc6218..00000000 --- a/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart +++ /dev/null @@ -1,17 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// ENUM: FileSearchRanker -// ========================================== - -/// The ranker to use for the file search. If not specified will use the `auto` ranker. -enum FileSearchRanker { - @JsonValue('auto') - auto, - @JsonValue('default_2024_08_21') - default20240821, -} diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart deleted file mode 100644 index 03533c56..00000000 --- a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart +++ /dev/null @@ -1,62 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: FileSearchRankingOptions -// ========================================== - -/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and -/// a score_threshold of 0. -/// -/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) -/// for more information. -@freezed -class FileSearchRankingOptions with _$FileSearchRankingOptions { - const FileSearchRankingOptions._(); - - /// Factory constructor for FileSearchRankingOptions - const factory FileSearchRankingOptions({ - /// The ranker to use for the file search. If not specified will use the `auto` ranker. - @JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - FileSearchRanker? ranker, - - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold') required double scoreThreshold, - }) = _FileSearchRankingOptions; - - /// Object construction from a JSON representation - factory FileSearchRankingOptions.fromJson(Map json) => - _$FileSearchRankingOptionsFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['ranker', 'score_threshold']; - - /// Validation constants - static const scoreThresholdMinValue = 0.0; - static const scoreThresholdMaxValue = 1.0; - - /// Perform validations on the schema property values - String? validateSchema() { - if (scoreThreshold < scoreThresholdMinValue) { - return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; - } - if (scoreThreshold > scoreThresholdMaxValue) { - return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; - } - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'ranker': ranker, - 'score_threshold': scoreThreshold, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart index 409aa1d7..51d89b60 100644 --- a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart @@ -15,10 +15,8 @@ class FineTuningJobHyperparameters with _$FineTuningJobHyperparameters { /// Factory constructor for FineTuningJobHyperparameters const factory FineTuningJobHyperparameters({ - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - /// - /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number - /// manually, we support any number between 1 and 50 epochs. + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') required FineTuningNEpochs nEpochs, @@ -58,10 +56,8 @@ enum FineTuningNEpochsOptions { // CLASS: FineTuningNEpochs // ========================================== -/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. -/// -/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number -/// manually, we support any number between 1 and 50 epochs. +/// The number of epochs to train the model for. An epoch refers to one +/// full cycle through the training dataset. @freezed sealed class FineTuningNEpochs with _$FineTuningNEpochs { const FineTuningNEpochs._(); diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index ac87dc02..8049253e 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -15,23 +15,16 @@ class FunctionObject with _$FunctionObject { /// Factory constructor for FunctionObject const factory FunctionObject({ - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a - /// maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. required String name, /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? description, - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, - - /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will - /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). - @JsonKey(includeIfNull: false) @Default(false) bool? strict, }) = _FunctionObject; /// Object construction from a JSON representation @@ -42,8 +35,7 @@ class FunctionObject with _$FunctionObject { static const List propertyNames = [ 'name', 'description', - 'parameters', - 'strict' + 'parameters' ]; /// Perform validations on the schema property values @@ -57,7 +49,6 @@ class FunctionObject with _$FunctionObject { 'name': name, 'description': description, 'parameters': parameters, - 'strict': strict, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart index 2429f8ba..abd11036 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart @@ -8,7 +8,7 @@ part of open_a_i_schema; // TYPE: FunctionParameters // ========================================== -/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. +/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. typedef FunctionParameters = Map; diff --git a/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart deleted file mode 100644 index 32f20701..00000000 --- a/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart +++ /dev/null @@ -1,62 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: JsonSchemaObject -// ========================================== - -/// A JSON Schema object. -@freezed -class JsonSchemaObject with _$JsonSchemaObject { - const JsonSchemaObject._(); - - /// Factory constructor for JsonSchemaObject - const factory JsonSchemaObject({ - /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum - /// length of 64. - required String name, - - /// A description of what the response format is for, used by the model to determine how to respond in the - /// format. - @JsonKey(includeIfNull: false) String? description, - - /// The schema for the response format, described as a JSON Schema object. - required Map schema, - - /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always - /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. To learn more, read the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - @JsonKey(includeIfNull: false) @Default(false) bool? strict, - }) = _JsonSchemaObject; - - /// Object construction from a JSON representation - factory JsonSchemaObject.fromJson(Map json) => - _$JsonSchemaObjectFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'name', - 'description', - 'schema', - 'strict' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'name': name, - 'description': description, - 'schema': schema, - 'strict': strict, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/message_content.dart b/packages/openai_dart/lib/src/generated/schema/message_content.dart index 46783eae..14e23e22 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content.dart @@ -52,19 +52,6 @@ sealed class MessageContent with _$MessageContent { required MessageContentText text, }) = MessageContentTextObject; - // ------------------------------------------ - // UNION: MessageContentRefusalObject - // ------------------------------------------ - - /// The refusal content generated by the assistant. - const factory MessageContent.refusal({ - /// Always `refusal`. - required String type, - - /// No Description - required String refusal, - }) = MessageContentRefusalObject; - /// Object construction from a JSON representation factory MessageContent.fromJson(Map json) => _$MessageContentFromJson(json); @@ -81,6 +68,4 @@ enum MessageContentEnumType { imageUrl, @JsonValue('text') text, - @JsonValue('refusal') - refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart index c8d3a8f1..5317431b 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart @@ -18,6 +18,9 @@ class MessageContentTextAnnotationsFileCitation const factory MessageContentTextAnnotationsFileCitation({ /// The ID of the specific File the citation is from. @JsonKey(name: 'file_id') required String fileId, + + /// The specific quote in the file. + required String quote, }) = _MessageContentTextAnnotationsFileCitation; /// Object construction from a JSON representation @@ -26,7 +29,7 @@ class MessageContentTextAnnotationsFileCitation _$MessageContentTextAnnotationsFileCitationFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id']; + static const List propertyNames = ['file_id', 'quote']; /// Perform validations on the schema property values String? validateSchema() { @@ -37,6 +40,7 @@ class MessageContentTextAnnotationsFileCitation Map toMap() { return { 'file_id': fileId, + 'quote': quote, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart index 738ab400..f53291ee 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart @@ -46,49 +46,7 @@ sealed class MessageDeltaContent with _$MessageDeltaContent { @JsonKey(includeIfNull: false) MessageDeltaContentText? text, }) = MessageDeltaContentTextObject; - // ------------------------------------------ - // UNION: MessageDeltaContentRefusalObject - // ------------------------------------------ - - /// The refusal content that is part of a message. - const factory MessageDeltaContent.refusal({ - /// The index of the refusal part in the message. - required int index, - - /// Always `refusal`. - required String type, - - /// The refusal content generated by the assistant. - @JsonKey(includeIfNull: false) String? refusal, - }) = MessageDeltaContentRefusalObject; - - // ------------------------------------------ - // UNION: MessageDeltaContentImageUrlObject - // ------------------------------------------ - - /// References an image URL in the content of a message. - const factory MessageDeltaContent.imageUrl({ - /// The index of the content part in the message. - required int index, - - /// Always `image_url`. - required String type, - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl, - }) = MessageDeltaContentImageUrlObject; - /// Object construction from a JSON representation factory MessageDeltaContent.fromJson(Map json) => _$MessageDeltaContentFromJson(json); } - -// ========================================== -// ENUM: MessageDeltaContentEnumType -// ========================================== - -enum MessageDeltaContentEnumType { - @JsonValue('refusal') - refusal, -} diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart new file mode 100644 index 00000000..1008bbb0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart @@ -0,0 +1,51 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: MessageDeltaContentImageUrlObject +// ========================================== + +/// References an image URL in the content of a message. +@freezed +class MessageDeltaContentImageUrlObject + with _$MessageDeltaContentImageUrlObject { + const MessageDeltaContentImageUrlObject._(); + + /// Factory constructor for MessageDeltaContentImageUrlObject + const factory MessageDeltaContentImageUrlObject({ + /// The index of the content part in the message. + @JsonKey(includeIfNull: false) int? index, + + /// Always `image_url`. + @JsonKey(includeIfNull: false) String? type, + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl, + }) = _MessageDeltaContentImageUrlObject; + + /// Object construction from a JSON representation + factory MessageDeltaContentImageUrlObject.fromJson( + Map json) => + _$MessageDeltaContentImageUrlObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['index', 'type', 'image_url']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'index': index, + 'type': type, + 'image_url': imageUrl, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_object.dart b/packages/openai_dart/lib/src/generated/schema/message_object.dart index 9e991a27..fae9d2ae 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_object.dart @@ -58,9 +58,7 @@ class MessageObject with _$MessageObject { /// A list of files attached to the message, and the tools they were added to. required List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. required Map? metadata, }) = _MessageObject; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index 5bd7ad65..b02d123e 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -27,8 +27,7 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -38,39 +37,22 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? responseFormat, @@ -158,6 +140,8 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// `auto` is the default value enum ModifyAssistantResponseFormatMode { + @JsonValue('none') + none, @JsonValue('auto') auto, } @@ -166,23 +150,11 @@ enum ModifyAssistantResponseFormatMode { // CLASS: ModifyAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures -/// the model will match your supplied JSON schema. Learn more in the -/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates -/// is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a -/// system or user message. Without this, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note -/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the -/// generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class ModifyAssistantRequestResponseFormat with _$ModifyAssistantRequestResponseFormat { @@ -194,9 +166,9 @@ sealed class ModifyAssistantRequestResponseFormat ) = ModifyAssistantRequestResponseFormatEnumeration; /// No Description - const factory ModifyAssistantRequestResponseFormat.responseFormat( - ResponseFormat value, - ) = ModifyAssistantRequestResponseFormatResponseFormat; + const factory ModifyAssistantRequestResponseFormat.format( + AssistantsResponseFormat value, + ) = ModifyAssistantRequestResponseFormatAssistantsResponseFormat; /// Object construction from a JSON representation factory ModifyAssistantRequestResponseFormat.fromJson( @@ -226,8 +198,8 @@ class _ModifyAssistantRequestResponseFormatConverter } if (data is Map) { try { - return ModifyAssistantRequestResponseFormatResponseFormat( - ResponseFormat.fromJson(data), + return ModifyAssistantRequestResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), ); } catch (e) {} } @@ -241,7 +213,9 @@ class _ModifyAssistantRequestResponseFormatConverter return switch (data) { ModifyAssistantRequestResponseFormatEnumeration(value: final v) => _$ModifyAssistantResponseFormatModeEnumMap[v]!, - ModifyAssistantRequestResponseFormatResponseFormat(value: final v) => + ModifyAssistantRequestResponseFormatAssistantsResponseFormat( + value: final v + ) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart index b7ec05e1..b6e7d119 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart @@ -15,9 +15,7 @@ class ModifyMessageRequest with _$ModifyMessageRequest { /// Factory constructor for ModifyMessageRequest const factory ModifyMessageRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyMessageRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart index 973a0b3d..3d113815 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart @@ -15,9 +15,7 @@ class ModifyRunRequest with _$ModifyRunRequest { /// Factory constructor for ModifyRunRequest const factory ModifyRunRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyRunRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart index 96f4983f..a335f1b6 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart @@ -19,9 +19,7 @@ class ModifyThreadRequest with _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/response_format.dart b/packages/openai_dart/lib/src/generated/schema/response_format.dart deleted file mode 100644 index 7b975680..00000000 --- a/packages/openai_dart/lib/src/generated/schema/response_format.dart +++ /dev/null @@ -1,82 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: ResponseFormat -// ========================================== - -/// An object specifying the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer -/// than `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model -/// will match your supplied JSON schema. -/// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). -/// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is -/// valid JSON. -/// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system -/// or user message. Without this, the model may generate an unending stream of whitespace until the generation -/// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message -/// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded -/// `max_tokens` or the conversation exceeded the max context length. -@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) -sealed class ResponseFormat with _$ResponseFormat { - const ResponseFormat._(); - - // ------------------------------------------ - // UNION: ResponseFormatText - // ------------------------------------------ - - /// The model should respond with plain text. - const factory ResponseFormat.text({ - /// The type of response format being defined. - @Default(ResponseFormatType.text) ResponseFormatType type, - }) = ResponseFormatText; - - // ------------------------------------------ - // UNION: ResponseFormatJsonObject - // ------------------------------------------ - - /// The model should respond with a JSON object. - const factory ResponseFormat.jsonObject({ - /// The type of response format being defined. - @Default(ResponseFormatType.jsonObject) ResponseFormatType type, - }) = ResponseFormatJsonObject; - - // ------------------------------------------ - // UNION: ResponseFormatJsonSchema - // ------------------------------------------ - - /// The model should respond with a JSON object that adheres to the specified schema. - const factory ResponseFormat.jsonSchema({ - /// The type of response format being defined. - @Default(ResponseFormatType.jsonSchema) ResponseFormatType type, - - /// A JSON Schema object. - @JsonKey(name: 'json_schema') required JsonSchemaObject jsonSchema, - }) = ResponseFormatJsonSchema; - - /// Object construction from a JSON representation - factory ResponseFormat.fromJson(Map json) => - _$ResponseFormatFromJson(json); -} - -// ========================================== -// ENUM: ResponseFormatEnumType -// ========================================== - -enum ResponseFormatEnumType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, - @JsonValue('json_schema') - jsonSchema, -} diff --git a/packages/openai_dart/lib/src/generated/schema/response_format_type.dart b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart deleted file mode 100644 index da215209..00000000 --- a/packages/openai_dart/lib/src/generated/schema/response_format_type.dart +++ /dev/null @@ -1,19 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// ENUM: ResponseFormatType -// ========================================== - -/// The type of response format being defined. -enum ResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, - @JsonValue('json_schema') - jsonSchema, -} diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 98fd5f0c..e34403a8 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -68,9 +68,7 @@ class RunObject with _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. required List tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -101,27 +99,11 @@ class RunObject with _$RunObject { @JsonKey(name: 'tool_choice') required RunObjectToolChoice? toolChoice, - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required RunObjectResponseFormat responseFormat, @@ -158,7 +140,6 @@ class RunObject with _$RunObject { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', - 'parallel_tool_calls', 'response_format' ]; @@ -206,7 +187,6 @@ class RunObject with _$RunObject { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, - 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, }; } @@ -452,6 +432,8 @@ class _RunObjectToolChoiceConverter /// `auto` is the default value enum RunObjectResponseFormatMode { + @JsonValue('none') + none, @JsonValue('auto') auto, } @@ -460,23 +442,11 @@ enum RunObjectResponseFormatMode { // CLASS: RunObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with -/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures -/// the model will match your supplied JSON schema. Learn more in the -/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates -/// is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a -/// system or user message. Without this, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note -/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the -/// generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { const RunObjectResponseFormat._(); @@ -487,9 +457,9 @@ sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { ) = RunObjectResponseFormatEnumeration; /// No Description - const factory RunObjectResponseFormat.responseFormat( - ResponseFormat value, - ) = RunObjectResponseFormatResponseFormat; + const factory RunObjectResponseFormat.format( + AssistantsResponseFormat value, + ) = RunObjectResponseFormatAssistantsResponseFormat; /// Object construction from a JSON representation factory RunObjectResponseFormat.fromJson(Map json) => @@ -513,8 +483,8 @@ class _RunObjectResponseFormatConverter } if (data is Map) { try { - return RunObjectResponseFormatResponseFormat( - ResponseFormat.fromJson(data), + return RunObjectResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), ); } catch (e) {} } @@ -528,7 +498,8 @@ class _RunObjectResponseFormatConverter return switch (data) { RunObjectResponseFormatEnumeration(value: final v) => _$RunObjectResponseFormatModeEnumMap[v]!, - RunObjectResponseFormatResponseFormat(value: final v) => v.toJson(), + RunObjectResponseFormatAssistantsResponseFormat(value: final v) => + v.toJson(), }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart index 327de9f5..c4605b7b 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart @@ -42,9 +42,8 @@ sealed class RunStepDetailsToolCalls with _$RunStepDetailsToolCalls { /// The type of tool call. This is always going to be `file_search` for this type of tool call. required String type, - /// The definition of the file search that was called. - @JsonKey(name: 'file_search') - required RunStepDetailsToolCallsFileSearch fileSearch, + /// For now, this is always going to be an empty object. + @JsonKey(name: 'file_search') required Map fileSearch, }) = RunStepDetailsToolCallsFileSearchObject; // ------------------------------------------ diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart deleted file mode 100644 index 16f72322..00000000 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart +++ /dev/null @@ -1,48 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: RunStepDetailsToolCallsFileSearch -// ========================================== - -/// The definition of the file search that was called. -@freezed -class RunStepDetailsToolCallsFileSearch - with _$RunStepDetailsToolCallsFileSearch { - const RunStepDetailsToolCallsFileSearch._(); - - /// Factory constructor for RunStepDetailsToolCallsFileSearch - const factory RunStepDetailsToolCallsFileSearch({ - /// The ranking options for the file search. - @JsonKey(name: 'ranking_options', includeIfNull: false) - RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, - - /// The results of the file search. - @JsonKey(includeIfNull: false) - List? results, - }) = _RunStepDetailsToolCallsFileSearch; - - /// Object construction from a JSON representation - factory RunStepDetailsToolCallsFileSearch.fromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['ranking_options', 'results']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'ranking_options': rankingOptions, - 'results': results, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart deleted file mode 100644 index 61b2ff06..00000000 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart +++ /dev/null @@ -1,56 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: RunStepDetailsToolCallsFileSearchRankingOptionsObject -// ========================================== - -/// The ranking options for the file search. -@freezed -class RunStepDetailsToolCallsFileSearchRankingOptionsObject - with _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { - const RunStepDetailsToolCallsFileSearchRankingOptionsObject._(); - - /// Factory constructor for RunStepDetailsToolCallsFileSearchRankingOptionsObject - const factory RunStepDetailsToolCallsFileSearchRankingOptionsObject({ - /// The ranker to use for the file search. If not specified will use the `auto` ranker. - required FileSearchRanker ranker, - - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold') required double scoreThreshold, - }) = _RunStepDetailsToolCallsFileSearchRankingOptionsObject; - - /// Object construction from a JSON representation - factory RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['ranker', 'score_threshold']; - - /// Validation constants - static const scoreThresholdMinValue = 0.0; - static const scoreThresholdMaxValue = 1.0; - - /// Perform validations on the schema property values - String? validateSchema() { - if (scoreThreshold < scoreThresholdMinValue) { - return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; - } - if (scoreThreshold > scoreThresholdMaxValue) { - return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; - } - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'ranker': ranker, - 'score_threshold': scoreThreshold, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart deleted file mode 100644 index 3ba23a07..00000000 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart +++ /dev/null @@ -1,46 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: RunStepDetailsToolCallsFileSearchResultContent -// ========================================== - -/// The content of the result that was found. -@freezed -class RunStepDetailsToolCallsFileSearchResultContent - with _$RunStepDetailsToolCallsFileSearchResultContent { - const RunStepDetailsToolCallsFileSearchResultContent._(); - - /// Factory constructor for RunStepDetailsToolCallsFileSearchResultContent - const factory RunStepDetailsToolCallsFileSearchResultContent({ - /// The type of the content. - @Default('text') String type, - - /// The text content of the file. - @JsonKey(includeIfNull: false) String? text, - }) = _RunStepDetailsToolCallsFileSearchResultContent; - - /// Object construction from a JSON representation - factory RunStepDetailsToolCallsFileSearchResultContent.fromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchResultContentFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type', 'text']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - 'text': text, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart deleted file mode 100644 index 4b1a1de0..00000000 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart +++ /dev/null @@ -1,71 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: RunStepDetailsToolCallsFileSearchResultObject -// ========================================== - -/// A result instance of the file search. -@freezed -class RunStepDetailsToolCallsFileSearchResultObject - with _$RunStepDetailsToolCallsFileSearchResultObject { - const RunStepDetailsToolCallsFileSearchResultObject._(); - - /// Factory constructor for RunStepDetailsToolCallsFileSearchResultObject - const factory RunStepDetailsToolCallsFileSearchResultObject({ - /// The ID of the file that result was found in. - @JsonKey(name: 'file_id') required String fileId, - - /// The name of the file that result was found in. - @JsonKey(name: 'file_name') required String fileName, - - /// The score of the result. All values must be a floating point number between 0 and 1. - required double score, - - /// The content of the result that was found. The content is only included if requested via the include - /// query parameter. - @JsonKey(includeIfNull: false) - List? content, - }) = _RunStepDetailsToolCallsFileSearchResultObject; - - /// Object construction from a JSON representation - factory RunStepDetailsToolCallsFileSearchResultObject.fromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchResultObjectFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'file_id', - 'file_name', - 'score', - 'content' - ]; - - /// Validation constants - static const scoreMinValue = 0.0; - static const scoreMaxValue = 1.0; - - /// Perform validations on the schema property values - String? validateSchema() { - if (score < scoreMinValue) { - return "The value of 'score' cannot be < $scoreMinValue"; - } - if (score > scoreMaxValue) { - return "The value of 'score' cannot be > $scoreMaxValue"; - } - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'file_id': fileId, - 'file_name': fileName, - 'score': score, - 'content': content, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart index ede505da..2e56839e 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart @@ -58,9 +58,7 @@ class RunStepObject with _$RunStepObject { /// The Unix timestamp (in seconds) for when the run step completed. @JsonKey(name: 'completed_at') required int? completedAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 265649d4..6d9b2613 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -22,8 +22,6 @@ part 'chat_completion_message_function_call.dart'; part 'chat_completion_function_call_option.dart'; part 'function_object.dart'; part 'function_parameters.dart'; -part 'response_format_type.dart'; -part 'json_schema_object.dart'; part 'chat_completion_tool.dart'; part 'chat_completion_named_tool_choice.dart'; part 'chat_completion_message_tool_calls.dart'; @@ -32,7 +30,6 @@ part 'chat_completion_stream_options.dart'; part 'create_chat_completion_response.dart'; part 'chat_completion_response_choice.dart'; part 'chat_completion_finish_reason.dart'; -part 'service_tier.dart'; part 'chat_completion_logprobs.dart'; part 'chat_completion_token_logprob.dart'; part 'chat_completion_token_top_logprob.dart'; @@ -42,7 +39,6 @@ part 'chat_completion_stream_response_delta.dart'; part 'chat_completion_stream_message_function_call.dart'; part 'chat_completion_stream_message_tool_call_chunk.dart'; part 'completion_usage.dart'; -part 'completion_tokens_details.dart'; part 'create_embedding_request.dart'; part 'create_embedding_response.dart'; part 'embedding.dart'; @@ -74,10 +70,9 @@ part 'create_assistant_request.dart'; part 'modify_assistant_request.dart'; part 'delete_assistant_response.dart'; part 'list_assistants_response.dart'; -part 'file_search_ranking_options.dart'; -part 'file_search_ranker.dart'; part 'assistants_named_tool_choice.dart'; part 'assistants_function_call_option.dart'; +part 'assistants_response_format.dart'; part 'truncation_object.dart'; part 'run_object.dart'; part 'run_completion_usage.dart'; @@ -110,6 +105,7 @@ part 'message_content_image_detail.dart'; part 'message_request_content_text_object.dart'; part 'message_content_text.dart'; part 'message_content_text_annotations_file_citation.dart'; +part 'message_delta_content_image_url_object.dart'; part 'message_delta_content_text.dart'; part 'message_delta_content_text_annotations_file_citation.dart'; part 'run_step_object.dart'; @@ -122,10 +118,6 @@ part 'run_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_delta_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_details_tool_calls_code_output_image.dart'; part 'run_step_delta_step_details_tool_calls_code_output_image.dart'; -part 'run_step_details_tool_calls_file_search.dart'; -part 'run_step_details_tool_calls_file_search_ranking_options_object.dart'; -part 'run_step_details_tool_calls_file_search_result_object.dart'; -part 'run_step_details_tool_calls_file_search_result_content.dart'; part 'run_step_completion_usage.dart'; part 'vector_store_expiration_after.dart'; part 'vector_store_object.dart'; @@ -134,7 +126,6 @@ part 'update_vector_store_request.dart'; part 'list_vector_stores_response.dart'; part 'delete_vector_store_response.dart'; part 'vector_store_file_object.dart'; -part 'static_chunking_strategy.dart'; part 'create_vector_store_file_request.dart'; part 'list_vector_store_files_response.dart'; part 'delete_vector_store_file_response.dart'; @@ -149,7 +140,6 @@ part 'batch.dart'; part 'list_batches_response.dart'; part 'chat_completion_message.dart'; part 'chat_completion_message_content_part.dart'; -part 'response_format.dart'; part 'assistant_tools.dart'; part 'message_content.dart'; part 'message_delta_content.dart'; @@ -161,6 +151,4 @@ part 'run_step_details_tool_calls.dart'; part 'run_step_delta_step_details_tool_calls.dart'; part 'run_step_details_tool_calls_code_output.dart'; part 'run_step_delta_step_details_tool_calls_code_output.dart'; -part 'chunking_strategy_request_param.dart'; -part 'chunking_strategy_response_param.dart'; part 'assistant_stream_event.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index af25caaf..472cae5b 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -123,12 +123,8 @@ mixin _$CreateCompletionRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; - /// Serializes this CreateCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -182,8 +178,6 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -282,8 +276,6 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionModelCopyWith<$Res> get model { @@ -292,8 +284,6 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionPromptCopyWith<$Res>? get prompt { @@ -306,8 +296,6 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionStopCopyWith<$Res>? get stop { @@ -320,8 +308,6 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -391,8 +377,6 @@ class __$$CreateCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -692,7 +676,7 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -715,9 +699,7 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { topP, user); - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> @@ -764,129 +746,127 @@ abstract class _CreateCompletionRequest extends CreateCompletionRequest { factory _CreateCompletionRequest.fromJson(Map json) = _$CreateCompletionRequestImpl.fromJson; - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @override + + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @_CompletionModelConverter() CompletionModel get model; + @override /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. /// /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - @override @_CompletionPromptConverter() CompletionPrompt? get prompt; + @override /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. /// /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - @override @JsonKey(name: 'best_of', includeIfNull: false) int? get bestOf; + @override /// Echo back the prompt in addition to the completion - @override @JsonKey(includeIfNull: false) bool? get echo; + @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; + @override /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. /// /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; + @override /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. /// /// The maximum value for `logprobs` is 5. - @override @JsonKey(includeIfNull: false) int? get logprobs; + @override /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion. /// /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; + @override /// How many completions to generate for each prompt. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - @override @JsonKey(includeIfNull: false) int? get n; + @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; + @override /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. /// /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - @override @JsonKey(includeIfNull: false) int? get seed; + @override /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - @override @_CompletionStopConverter() @JsonKey(includeIfNull: false) CompletionStop? get stop; + @override /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - @override @JsonKey(includeIfNull: false) bool? get stream; + @override /// Options for streaming response. Only set this when you set `stream: true`. - @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; + @override /// The suffix that comes after a completion of inserted text. /// /// This parameter is only supported for `gpt-3.5-turbo-instruct`. - @override @JsonKey(includeIfNull: false) String? get suffix; + @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. - @override @JsonKey(includeIfNull: false) double? get temperature; + @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). - @override @JsonKey(includeIfNull: false) String? get user; - - /// Create a copy of CreateCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -945,8 +925,6 @@ mixin _$CompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -966,9 +944,6 @@ class _$CompletionModelCopyWithImpl<$Res, $Val extends CompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -991,8 +966,6 @@ class __$$CompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CompletionModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1037,13 +1010,11 @@ class _$CompletionModelEnumerationImpl extends CompletionModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> @@ -1130,10 +1101,7 @@ abstract class CompletionModelEnumeration extends CompletionModel { @override CompletionModels get value; - - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1156,8 +1124,6 @@ class __$$CompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$CompletionModelStringImpl) _then) : super(_value, _then); - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1201,13 +1167,11 @@ class _$CompletionModelStringImpl extends CompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> @@ -1294,10 +1258,7 @@ abstract class CompletionModelString extends CompletionModel { @override String get value; - - /// Create a copy of CompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1372,8 +1333,6 @@ mixin _$CompletionPrompt { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CompletionPrompt to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -1393,9 +1352,6 @@ class _$CompletionPromptCopyWithImpl<$Res, $Val extends CompletionPrompt> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -1418,8 +1374,6 @@ class __$$CompletionPromptListListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListListIntImpl) _then) : super(_value, _then); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1471,14 +1425,12 @@ class _$CompletionPromptListListIntImpl extends CompletionPromptListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> @@ -1577,10 +1529,7 @@ abstract class CompletionPromptListListInt extends CompletionPrompt { @override List> get value; - - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1604,8 +1553,6 @@ class __$$CompletionPromptListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListIntImpl) _then) : super(_value, _then); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1656,14 +1603,12 @@ class _$CompletionPromptListIntImpl extends CompletionPromptListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> @@ -1762,10 +1707,7 @@ abstract class CompletionPromptListInt extends CompletionPrompt { @override List get value; - - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1790,8 +1732,6 @@ class __$$CompletionPromptListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListStringImpl) _then) : super(_value, _then); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1843,14 +1783,12 @@ class _$CompletionPromptListStringImpl extends CompletionPromptListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> @@ -1949,10 +1887,7 @@ abstract class CompletionPromptListString extends CompletionPrompt { @override List get value; - - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1976,8 +1911,6 @@ class __$$CompletionPromptStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptStringImpl) _then) : super(_value, _then); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2021,13 +1954,11 @@ class _$CompletionPromptStringImpl extends CompletionPromptString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> @@ -2126,10 +2057,7 @@ abstract class CompletionPromptString extends CompletionPrompt { @override String get value; - - /// Create a copy of CompletionPrompt - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2188,8 +2116,6 @@ mixin _$CompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -2209,9 +2135,6 @@ class _$CompletionStopCopyWithImpl<$Res, $Val extends CompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -2233,8 +2156,6 @@ class __$$CompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopListStringImpl) _then) : super(_value, _then); - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2285,14 +2206,12 @@ class _$CompletionStopListStringImpl extends CompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> @@ -2379,10 +2298,7 @@ abstract class CompletionStopListString extends CompletionStop { @override List get value; - - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2404,8 +2320,6 @@ class __$$CompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopStringImpl) _then) : super(_value, _then); - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2449,13 +2363,11 @@ class _$CompletionStopStringImpl extends CompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> @@ -2543,10 +2455,7 @@ abstract class CompletionStopString extends CompletionStop { @override String? get value; - - /// Create a copy of CompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2584,12 +2493,8 @@ mixin _$CreateCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; - /// Serializes this CreateCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateCompletionResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2624,8 +2529,6 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateCompletionResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2669,8 +2572,6 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateCompletionResponse - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -2717,8 +2618,6 @@ class __$$CreateCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionResponseImpl) _then) : super(_value, _then); - /// Create a copy of CreateCompletionResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2840,7 +2739,7 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -2852,9 +2751,7 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { object, usage); - /// Create a copy of CreateCompletionResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> @@ -2885,42 +2782,40 @@ abstract class _CreateCompletionResponse extends CreateCompletionResponse { factory _CreateCompletionResponse.fromJson(Map json) = _$CreateCompletionResponseImpl.fromJson; - /// A unique identifier for the completion. @override + + /// A unique identifier for the completion. String get id; + @override /// The list of completion choices the model generated for the input prompt. - @override List get choices; + @override /// The Unix timestamp (in seconds) of when the completion was created. - @override int get created; + @override /// The model used for completion. - @override String get model; + @override /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; + @override /// The object type, which is always "text_completion" - @override CreateCompletionResponseObject get object; + @override /// Usage statistics for the completion request. - @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; - - /// Create a copy of CreateCompletionResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2949,12 +2844,8 @@ mixin _$CompletionChoice { /// The text of the completion. String get text => throw _privateConstructorUsedError; - /// Serializes this CompletionChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CompletionChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CompletionChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2987,8 +2878,6 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CompletionChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3017,8 +2906,6 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> ) as $Val); } - /// Create a copy of CompletionChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionLogprobsCopyWith<$Res>? get logprobs { @@ -3061,8 +2948,6 @@ class __$$CompletionChoiceImplCopyWithImpl<$Res> $Res Function(_$CompletionChoiceImpl) _then) : super(_value, _then); - /// Create a copy of CompletionChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3147,14 +3032,12 @@ class _$CompletionChoiceImpl extends _CompletionChoice { (identical(other.text, text) || other.text == text)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, finishReason, index, logprobs, text); - /// Create a copy of CompletionChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => @@ -3183,31 +3066,29 @@ abstract class _CompletionChoice extends CompletionChoice { factory _CompletionChoice.fromJson(Map json) = _$CompletionChoiceImpl.fromJson; + @override + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// or `content_filter` if content was omitted due to a flag from our content filters. - @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) CompletionFinishReason? get finishReason; + @override /// The index of the choice in the list of generated choices. - @override int get index; + @override /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - @override CompletionLogprobs? get logprobs; + @override /// The text of the completion. - @override String get text; - - /// Create a copy of CompletionChoice - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3235,12 +3116,8 @@ mixin _$CompletionLogprobs { List?>? get topLogprobs => throw _privateConstructorUsedError; - /// Serializes this CompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CompletionLogprobs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3271,8 +3148,6 @@ class _$CompletionLogprobsCopyWithImpl<$Res, $Val extends CompletionLogprobs> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CompletionLogprobs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3328,8 +3203,6 @@ class __$$CompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$CompletionLogprobsImpl) _then) : super(_value, _then); - /// Create a copy of CompletionLogprobs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3454,7 +3327,7 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -3463,9 +3336,7 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { const DeepCollectionEquality().hash(_tokens), const DeepCollectionEquality().hash(_topLogprobs)); - /// Create a copy of CompletionLogprobs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => @@ -3495,30 +3366,28 @@ abstract class _CompletionLogprobs extends CompletionLogprobs { factory _CompletionLogprobs.fromJson(Map json) = _$CompletionLogprobsImpl.fromJson; - /// The offset of the token from the beginning of the prompt. @override + + /// The offset of the token from the beginning of the prompt. @JsonKey(name: 'text_offset', includeIfNull: false) List? get textOffset; + @override /// The log probabilities of tokens in the completion. - @override @JsonKey(name: 'token_logprobs', includeIfNull: false) List? get tokenLogprobs; + @override /// The tokens generated by the model converted back to text. - @override @JsonKey(includeIfNull: false) List? get tokens; + @override /// The log probabilities of the `logprobs` most likely tokens. - @override @JsonKey(name: 'top_logprobs', includeIfNull: false) List?>? get topLogprobs; - - /// Create a copy of CompletionLogprobs - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3530,13 +3399,11 @@ CreateChatCompletionRequest _$CreateChatCompletionRequestFromJson( /// @nodoc mixin _$CreateChatCompletionRequest { - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) - /// table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. @_ChatCompletionModelConverter() ChatCompletionModel get model => throw _privateConstructorUsedError; - /// A list of messages comprising the conversation so far. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). List get messages => throw _privateConstructorUsedError; @@ -3548,40 +3415,25 @@ mixin _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias - /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to - /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase - /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the - /// relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias => throw _privateConstructorUsedError; - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of - /// each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? get logprobs => throw _privateConstructorUsedError; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, - /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs => throw _privateConstructorUsedError; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat - /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated - /// via API. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. /// - /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with - /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens => throw _privateConstructorUsedError; - /// An upper bound for the number of tokens that can be generated for a completion, including visible output - /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - int? get maxCompletionTokens => throw _privateConstructorUsedError; - - /// How many chat completion choices to generate for each input message. Note that you will be charged based on - /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) int? get n => throw _privateConstructorUsedError; @@ -3591,60 +3443,27 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// An object specifying the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer - /// than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model - /// will match your supplied JSON schema. - /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is - /// valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system - /// or user message. Without this, the model may generate an unending stream of whitespace until the generation - /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message - /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded - /// `max_tokens` or the conversation exceeded the max context length. - /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @JsonKey(name: 'response_format', includeIfNull: false) - ResponseFormat? get responseFormat => throw _privateConstructorUsedError; + ChatCompletionResponseFormat? get responseFormat => + throw _privateConstructorUsedError; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests - /// with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to - /// monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; - /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers - /// subscribed to the scale tier service: - /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits - /// until they are exhausted. - /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the - /// default service tier with a lower uptime SLA and no latency guarantee. - /// - If set to 'default', the request will be processed using the default service tier with a lower uptime - /// SLA and no latency guarantee. - /// - When not set, the default behavior is 'auto'. - /// - /// When this parameter is set, the response body will include the `service_tier` utilized. - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateChatCompletionRequestServiceTier? get serviceTier => - throw _privateConstructorUsedError; - /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? get stop => throw _privateConstructorUsedError; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; @@ -3665,9 +3484,7 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// A list of tools the model may call. Currently, only functions are supported as a tool. - /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are - /// supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; @@ -3675,8 +3492,7 @@ mixin _$CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the - /// model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @@ -3684,11 +3500,6 @@ mixin _$CreateChatCompletionRequest { ChatCompletionToolChoiceOption? get toolChoice => throw _privateConstructorUsedError; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; @@ -3698,8 +3509,7 @@ mixin _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that - /// function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -3713,12 +3523,8 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) List? get functions => throw _privateConstructorUsedError; - /// Serializes this CreateChatCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateChatCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3741,19 +3547,12 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ResponseFormat? responseFormat, + ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3766,8 +3565,6 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3775,7 +3572,7 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) List? functions}); $ChatCompletionModelCopyWith<$Res> get model; - $ResponseFormatCopyWith<$Res>? get responseFormat; + $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; $ChatCompletionStopCopyWith<$Res>? get stop; $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions; $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice; @@ -3793,8 +3590,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3805,12 +3600,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, - Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, - Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3818,7 +3611,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3852,10 +3644,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, - maxCompletionTokens: freezed == maxCompletionTokens - ? _value.maxCompletionTokens - : maxCompletionTokens // ignore: cast_nullable_to_non_nullable - as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -3867,15 +3655,11 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ResponseFormat?, + as ChatCompletionResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, - serviceTier: freezed == serviceTier - ? _value.serviceTier - : serviceTier // ignore: cast_nullable_to_non_nullable - as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3904,10 +3688,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3923,8 +3703,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionModelCopyWith<$Res> get model { @@ -3933,22 +3711,19 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res>? get responseFormat { + $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat { if (_value.responseFormat == null) { return null; } - return $ResponseFormatCopyWith<$Res>(_value.responseFormat!, (value) { + return $ChatCompletionResponseFormatCopyWith<$Res>(_value.responseFormat!, + (value) { return _then(_value.copyWith(responseFormat: value) as $Val); }); } - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStopCopyWith<$Res>? get stop { @@ -3961,8 +3736,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -3976,8 +3749,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice { @@ -3991,8 +3762,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallCopyWith<$Res>? get functionCall { @@ -4026,19 +3795,12 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ResponseFormat? responseFormat, + ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -4051,8 +3813,6 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4062,7 +3822,7 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @override $ChatCompletionModelCopyWith<$Res> get model; @override - $ResponseFormatCopyWith<$Res>? get responseFormat; + $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; @override $ChatCompletionStopCopyWith<$Res>? get stop; @override @@ -4083,8 +3843,6 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4095,12 +3853,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, - Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, - Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -4108,7 +3864,6 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -4142,10 +3897,6 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, - maxCompletionTokens: freezed == maxCompletionTokens - ? _value.maxCompletionTokens - : maxCompletionTokens // ignore: cast_nullable_to_non_nullable - as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -4157,15 +3908,11 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ResponseFormat?, + as ChatCompletionResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, - serviceTier: freezed == serviceTier - ? _value.serviceTier - : serviceTier // ignore: cast_nullable_to_non_nullable - as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -4194,10 +3941,6 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -4227,19 +3970,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) this.logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) this.topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) this.maxTokens, - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - this.maxCompletionTokens, @JsonKey(includeIfNull: false) this.n = 1, @JsonKey(name: 'presence_penalty', includeIfNull: false) this.presencePenalty = 0.0, @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @JsonKey(includeIfNull: false) this.seed, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) this.stop, @JsonKey(includeIfNull: false) this.stream = false, @JsonKey(name: 'stream_options', includeIfNull: false) this.streamOptions, @@ -4249,8 +3985,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls, @JsonKey(includeIfNull: false) this.user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4266,18 +4000,15 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { Map json) => _$$CreateChatCompletionRequestImplFromJson(json); - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) - /// table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. @override @_ChatCompletionModelConverter() final ChatCompletionModel model; - /// A list of messages comprising the conversation so far. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). final List _messages; - /// A list of messages comprising the conversation so far. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override List get messages { if (_messages is EqualUnmodifiableListView) return _messages; @@ -4294,20 +4025,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias - /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to - /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase - /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the - /// relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. final Map? _logitBias; /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias - /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to - /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase - /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the - /// relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias { @@ -4318,36 +4041,24 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { return EqualUnmodifiableMapView(value); } - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of - /// each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @override @JsonKey(includeIfNull: false) final bool? logprobs; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, - /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat - /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated - /// via API. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. /// - /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with - /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. @override @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens; - /// An upper bound for the number of tokens that can be generated for a completion, including visible output - /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). - @override - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - final int? maxCompletionTokens; - - /// How many chat completion choices to generate for each input message. Note that you will be charged based on - /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @override @JsonKey(includeIfNull: false) final int? n; @@ -4359,63 +4070,29 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// An object specifying the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer - /// than `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model - /// will match your supplied JSON schema. - /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is - /// valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system - /// or user message. Without this, the model may generate an unending stream of whitespace until the generation - /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message - /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded - /// `max_tokens` or the conversation exceeded the max context length. - /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @JsonKey(name: 'response_format', includeIfNull: false) - final ResponseFormat? responseFormat; + final ChatCompletionResponseFormat? responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests - /// with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to - /// monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. @override @JsonKey(includeIfNull: false) final int? seed; - /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers - /// subscribed to the scale tier service: - /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits - /// until they are exhausted. - /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the - /// default service tier with a lower uptime SLA and no latency guarantee. - /// - If set to 'default', the request will be processed using the default service tier with a lower uptime - /// SLA and no latency guarantee. - /// - When not set, the default behavior is 'auto'. - /// - /// When this parameter is set, the response body will include the `service_tier` utilized. - @override - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final CreateChatCompletionRequestServiceTier? serviceTier; - /// Up to 4 sequences where the API will stop generating further tokens. @override @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) final ChatCompletionStop? stop; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override @JsonKey(includeIfNull: false) final bool? stream; @@ -4439,14 +4116,10 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// A list of tools the model may call. Currently, only functions are supported as a tool. - /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are - /// supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. final List? _tools; - /// A list of tools the model may call. Currently, only functions are supported as a tool. - /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are - /// supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. @override @JsonKey(includeIfNull: false) List? get tools { @@ -4461,8 +4134,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the - /// model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @override @@ -4470,12 +4142,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @override - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - final bool? parallelToolCalls; - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @override @JsonKey(includeIfNull: false) @@ -4486,8 +4152,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that - /// function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @override @@ -4515,7 +4180,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, maxCompletionTokens: $maxCompletionTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4535,16 +4200,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { other.topLogprobs == topLogprobs) && (identical(other.maxTokens, maxTokens) || other.maxTokens == maxTokens) && - (identical(other.maxCompletionTokens, maxCompletionTokens) || - other.maxCompletionTokens == maxCompletionTokens) && (identical(other.n, n) || other.n == n) && (identical(other.presencePenalty, presencePenalty) || other.presencePenalty == presencePenalty) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.seed, seed) || other.seed == seed) && - (identical(other.serviceTier, serviceTier) || - other.serviceTier == serviceTier) && (identical(other.stop, stop) || other.stop == stop) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.streamOptions, streamOptions) || @@ -4555,8 +4216,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().equals(other._tools, _tools) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && - (identical(other.parallelToolCalls, parallelToolCalls) || - other.parallelToolCalls == parallelToolCalls) && (identical(other.user, user) || other.user == user) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && @@ -4564,7 +4223,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { .equals(other._functions, _functions)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hashAll([ runtimeType, @@ -4575,12 +4234,10 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { logprobs, topLogprobs, maxTokens, - maxCompletionTokens, n, presencePenalty, responseFormat, seed, - serviceTier, stop, stream, streamOptions, @@ -4588,15 +4245,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { topP, const DeepCollectionEquality().hash(_tools), toolChoice, - parallelToolCalls, user, functionCall, const DeepCollectionEquality().hash(_functions) ]); - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> @@ -4625,19 +4279,12 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens, - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - final int? maxCompletionTokens, @JsonKey(includeIfNull: false) final int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - final ResponseFormat? responseFormat, + final ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) final int? seed, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) final ChatCompletionStop? stop, @@ -4650,8 +4297,6 @@ abstract class _CreateChatCompletionRequest @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - final bool? parallelToolCalls, @JsonKey(includeIfNull: false) final String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4664,212 +4309,148 @@ abstract class _CreateChatCompletionRequest factory _CreateChatCompletionRequest.fromJson(Map json) = _$CreateChatCompletionRequestImpl.fromJson; - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) - /// table for details on which models work with the Chat API. @override + + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. @_ChatCompletionModelConverter() ChatCompletionModel get model; - - /// A list of messages comprising the conversation so far. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override + + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). List get messages; + @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; + @override /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias - /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to - /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase - /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the - /// relevant token. - @override + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of - /// each output token returned in the `content` of `message`. @override + + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? get logprobs; - - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, - /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override + + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; + @override - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat - /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated - /// via API. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. /// - /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with - /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). - @override + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - - /// An upper bound for the number of tokens that can be generated for a completion, including visible output - /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). @override - @JsonKey(name: 'max_completion_tokens', includeIfNull: false) - int? get maxCompletionTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on - /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - @override + /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) int? get n; + @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; + @override - /// An object specifying the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer - /// than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model - /// will match your supplied JSON schema. - /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is - /// valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system - /// or user message. Without this, the model may generate an unending stream of whitespace until the generation - /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message - /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded - /// `max_tokens` or the conversation exceeded the max context length. - /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @JsonKey(name: 'response_format', includeIfNull: false) - ResponseFormat? get responseFormat; + ChatCompletionResponseFormat? get responseFormat; + @override /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests - /// with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to - /// monitor changes in the backend. - @override + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. @JsonKey(includeIfNull: false) int? get seed; - - /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers - /// subscribed to the scale tier service: - /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits - /// until they are exhausted. - /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the - /// default service tier with a lower uptime SLA and no latency guarantee. - /// - If set to 'default', the request will be processed using the default service tier with a lower uptime - /// SLA and no latency guarantee. - /// - When not set, the default behavior is 'auto'. - /// - /// When this parameter is set, the response body will include the `service_tier` utilized. @override - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateChatCompletionRequestServiceTier? get serviceTier; /// Up to 4 sequences where the API will stop generating further tokens. - @override @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? get stop; - - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override + + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) bool? get stream; + @override /// Options for streaming response. Only set this when you set `stream: true`. - @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; + @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. - @override @JsonKey(includeIfNull: false) double? get temperature; + @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - - /// A list of tools the model may call. Currently, only functions are supported as a tool. - /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are - /// supported. @override + + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. @JsonKey(includeIfNull: false) List? get tools; + @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the - /// model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. - @override @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? get toolChoice; - - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. @override - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? get parallelToolCalls; /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). - @override @JsonKey(includeIfNull: false) String? get user; + @override /// Deprecated in favor of `tool_choice`. /// /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that - /// function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. - @override @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionFunctionCall? get functionCall; + @override /// Deprecated in favor of `tools`. /// /// A list of functions the model may generate JSON inputs for. - @override @JsonKey(includeIfNull: false) List? get functions; - - /// Create a copy of CreateChatCompletionRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4928,8 +4509,6 @@ mixin _$ChatCompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -4949,9 +4528,6 @@ class _$ChatCompletionModelCopyWithImpl<$Res, $Val extends ChatCompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -4974,8 +4550,6 @@ class __$$ChatCompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5021,13 +4595,11 @@ class _$ChatCompletionModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelEnumerationImplCopyWith< @@ -5115,10 +4687,7 @@ abstract class ChatCompletionModelEnumeration extends ChatCompletionModel { @override ChatCompletionModels get value; - - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionModelEnumerationImplCopyWith< _$ChatCompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -5144,8 +4713,6 @@ class __$$ChatCompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelStringImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5189,13 +4756,11 @@ class _$ChatCompletionModelStringImpl extends ChatCompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> @@ -5282,14 +4847,170 @@ abstract class ChatCompletionModelString extends ChatCompletionModel { @override String get value; - - /// Create a copy of ChatCompletionModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } +ChatCompletionResponseFormat _$ChatCompletionResponseFormatFromJson( + Map json) { + return _ChatCompletionResponseFormat.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionResponseFormat { + /// Must be one of `text` or `json_object`. + ChatCompletionResponseFormatType get type => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionResponseFormatCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionResponseFormatCopyWith<$Res> { + factory $ChatCompletionResponseFormatCopyWith( + ChatCompletionResponseFormat value, + $Res Function(ChatCompletionResponseFormat) then) = + _$ChatCompletionResponseFormatCopyWithImpl<$Res, + ChatCompletionResponseFormat>; + @useResult + $Res call({ChatCompletionResponseFormatType type}); +} + +/// @nodoc +class _$ChatCompletionResponseFormatCopyWithImpl<$Res, + $Val extends ChatCompletionResponseFormat> + implements $ChatCompletionResponseFormatCopyWith<$Res> { + _$ChatCompletionResponseFormatCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionResponseFormatType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ChatCompletionResponseFormatImplCopyWith<$Res> + implements $ChatCompletionResponseFormatCopyWith<$Res> { + factory _$$ChatCompletionResponseFormatImplCopyWith( + _$ChatCompletionResponseFormatImpl value, + $Res Function(_$ChatCompletionResponseFormatImpl) then) = + __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ChatCompletionResponseFormatType type}); +} + +/// @nodoc +class __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res> + extends _$ChatCompletionResponseFormatCopyWithImpl<$Res, + _$ChatCompletionResponseFormatImpl> + implements _$$ChatCompletionResponseFormatImplCopyWith<$Res> { + __$$ChatCompletionResponseFormatImplCopyWithImpl( + _$ChatCompletionResponseFormatImpl _value, + $Res Function(_$ChatCompletionResponseFormatImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$ChatCompletionResponseFormatImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionResponseFormatType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionResponseFormatImpl extends _ChatCompletionResponseFormat { + const _$ChatCompletionResponseFormatImpl( + {this.type = ChatCompletionResponseFormatType.text}) + : super._(); + + factory _$ChatCompletionResponseFormatImpl.fromJson( + Map json) => + _$$ChatCompletionResponseFormatImplFromJson(json); + + /// Must be one of `text` or `json_object`. + @override + @JsonKey() + final ChatCompletionResponseFormatType type; + + @override + String toString() { + return 'ChatCompletionResponseFormat(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionResponseFormatImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionResponseFormatImplCopyWith< + _$ChatCompletionResponseFormatImpl> + get copyWith => __$$ChatCompletionResponseFormatImplCopyWithImpl< + _$ChatCompletionResponseFormatImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionResponseFormatImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionResponseFormat + extends ChatCompletionResponseFormat { + const factory _ChatCompletionResponseFormat( + {final ChatCompletionResponseFormatType type}) = + _$ChatCompletionResponseFormatImpl; + const _ChatCompletionResponseFormat._() : super._(); + + factory _ChatCompletionResponseFormat.fromJson(Map json) = + _$ChatCompletionResponseFormatImpl.fromJson; + + @override + + /// Must be one of `text` or `json_object`. + ChatCompletionResponseFormatType get type; + @override + @JsonKey(ignore: true) + _$$ChatCompletionResponseFormatImplCopyWith< + _$ChatCompletionResponseFormatImpl> + get copyWith => throw _privateConstructorUsedError; +} + ChatCompletionStop _$ChatCompletionStopFromJson(Map json) { switch (json['runtimeType']) { case 'listString': @@ -5344,8 +5065,6 @@ mixin _$ChatCompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5365,9 +5084,6 @@ class _$ChatCompletionStopCopyWithImpl<$Res, $Val extends ChatCompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5390,8 +5106,6 @@ class __$$ChatCompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopListStringImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5443,14 +5157,12 @@ class _$ChatCompletionStopListStringImpl extends ChatCompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopListStringImplCopyWith< @@ -5538,10 +5250,7 @@ abstract class ChatCompletionStopListString extends ChatCompletionStop { @override List get value; - - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStopListStringImplCopyWith< _$ChatCompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -5567,8 +5276,6 @@ class __$$ChatCompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopStringImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5612,13 +5319,11 @@ class _$ChatCompletionStopStringImpl extends ChatCompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> @@ -5705,10 +5410,7 @@ abstract class ChatCompletionStopString extends ChatCompletionStop { @override String? get value; - - /// Create a copy of ChatCompletionStop - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5779,8 +5481,6 @@ mixin _$ChatCompletionToolChoiceOption { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionToolChoiceOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5803,9 +5503,6 @@ class _$ChatCompletionToolChoiceOptionCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5828,8 +5525,6 @@ class __$$ChatCompletionToolChoiceOptionEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolChoiceOptionEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5876,13 +5571,11 @@ class _$ChatCompletionToolChoiceOptionEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< @@ -5982,10 +5675,7 @@ abstract class ChatCompletionToolChoiceOptionEnumeration @override ChatCompletionToolChoiceMode get value; - - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< _$ChatCompletionToolChoiceOptionEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -6022,8 +5712,6 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit _then) : super(_value, _then); - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6038,8 +5726,6 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit )); } - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionNamedToolChoiceCopyWith<$Res> get value { @@ -6084,13 +5770,11 @@ class _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< @@ -6192,10 +5876,7 @@ abstract class ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice @override ChatCompletionNamedToolChoice get value; - - /// Create a copy of ChatCompletionToolChoiceOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -6266,8 +5947,6 @@ mixin _$ChatCompletionFunctionCall { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -6289,9 +5968,6 @@ class _$ChatCompletionFunctionCallCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -6314,8 +5990,6 @@ class __$$ChatCompletionFunctionCallEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6362,13 +6036,11 @@ class _$ChatCompletionFunctionCallEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallEnumerationImplCopyWith< @@ -6465,10 +6137,7 @@ abstract class ChatCompletionFunctionCallEnumeration @override ChatCompletionFunctionCallMode get value; - - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionFunctionCallEnumerationImplCopyWith< _$ChatCompletionFunctionCallEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -6505,8 +6174,6 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith _then) : super(_value, _then); - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6521,8 +6188,6 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith )); } - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get value { @@ -6568,13 +6233,11 @@ class _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< @@ -6674,10 +6337,7 @@ abstract class ChatCompletionFunctionCallChatCompletionFunctionCallOption @override ChatCompletionFunctionCallOption get value; - - /// Create a copy of ChatCompletionFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6696,12 +6356,8 @@ mixin _$ChatCompletionMessageFunctionCall { /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. String get arguments => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionMessageFunctionCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6728,8 +6384,6 @@ class _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6771,8 +6425,6 @@ class __$$ChatCompletionMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageFunctionCallImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6827,13 +6479,11 @@ class _$ChatCompletionMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name, arguments); - /// Create a copy of ChatCompletionMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageFunctionCallImplCopyWith< @@ -6860,18 +6510,16 @@ abstract class _ChatCompletionMessageFunctionCall Map json) = _$ChatCompletionMessageFunctionCallImpl.fromJson; - /// The name of the function to call. @override + + /// The name of the function to call. String get name; + @override /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - @override String get arguments; - - /// Create a copy of ChatCompletionMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionMessageFunctionCallImplCopyWith< _$ChatCompletionMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -6887,12 +6535,8 @@ mixin _$ChatCompletionFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionFunctionCallOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6919,8 +6563,6 @@ class _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionFunctionCallOption - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6957,8 +6599,6 @@ class __$$ChatCompletionFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallOptionImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionFunctionCallOption - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7001,13 +6641,11 @@ class _$ChatCompletionFunctionCallOptionImpl (identical(other.name, name) || other.name == name)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name); - /// Create a copy of ChatCompletionFunctionCallOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallOptionImplCopyWith< @@ -7033,14 +6671,12 @@ abstract class _ChatCompletionFunctionCallOption Map json) = _$ChatCompletionFunctionCallOptionImpl.fromJson; - /// The name of the function to call. @override - String get name; - /// Create a copy of ChatCompletionFunctionCallOption - /// with the given fields replaced by the non-null parameter values. + /// The name of the function to call. + String get name; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -7052,33 +6688,21 @@ FunctionObject _$FunctionObjectFromJson(Map json) { /// @nodoc mixin _$FunctionObject { - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a - /// maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. String get name => throw _privateConstructorUsedError; /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? get description => throw _privateConstructorUsedError; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) Map? get parameters => throw _privateConstructorUsedError; - /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will - /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). - @JsonKey(includeIfNull: false) - bool? get strict => throw _privateConstructorUsedError; - - /// Serializes this FunctionObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FunctionObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FunctionObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7092,8 +6716,7 @@ abstract class $FunctionObjectCopyWith<$Res> { $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters, - @JsonKey(includeIfNull: false) bool? strict}); + @JsonKey(includeIfNull: false) Map? parameters}); } /// @nodoc @@ -7106,15 +6729,12 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FunctionObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? name = null, Object? description = freezed, Object? parameters = freezed, - Object? strict = freezed, }) { return _then(_value.copyWith( name: null == name @@ -7129,10 +6749,6 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> ? _value.parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, - strict: freezed == strict - ? _value.strict - : strict // ignore: cast_nullable_to_non_nullable - as bool?, ) as $Val); } } @@ -7148,8 +6764,7 @@ abstract class _$$FunctionObjectImplCopyWith<$Res> $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters, - @JsonKey(includeIfNull: false) bool? strict}); + @JsonKey(includeIfNull: false) Map? parameters}); } /// @nodoc @@ -7160,15 +6775,12 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> _$FunctionObjectImpl _value, $Res Function(_$FunctionObjectImpl) _then) : super(_value, _then); - /// Create a copy of FunctionObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? name = null, Object? description = freezed, Object? parameters = freezed, - Object? strict = freezed, }) { return _then(_$FunctionObjectImpl( name: null == name @@ -7183,10 +6795,6 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> ? _value._parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, - strict: freezed == strict - ? _value.strict - : strict // ignore: cast_nullable_to_non_nullable - as bool?, )); } } @@ -7197,16 +6805,14 @@ class _$FunctionObjectImpl extends _FunctionObject { const _$FunctionObjectImpl( {required this.name, @JsonKey(includeIfNull: false) this.description, - @JsonKey(includeIfNull: false) final Map? parameters, - @JsonKey(includeIfNull: false) this.strict = false}) + @JsonKey(includeIfNull: false) final Map? parameters}) : _parameters = parameters, super._(); factory _$FunctionObjectImpl.fromJson(Map json) => _$$FunctionObjectImplFromJson(json); - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a - /// maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. @override final String name; @@ -7215,12 +6821,12 @@ class _$FunctionObjectImpl extends _FunctionObject { @JsonKey(includeIfNull: false) final String? description; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. final Map? _parameters; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @override @@ -7233,17 +6839,9 @@ class _$FunctionObjectImpl extends _FunctionObject { return EqualUnmodifiableMapView(value); } - /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will - /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). - @override - @JsonKey(includeIfNull: false) - final bool? strict; - @override String toString() { - return 'FunctionObject(name: $name, description: $description, parameters: $parameters, strict: $strict)'; + return 'FunctionObject(name: $name, description: $description, parameters: $parameters)'; } @override @@ -7255,18 +6853,15 @@ class _$FunctionObjectImpl extends _FunctionObject { (identical(other.description, description) || other.description == description) && const DeepCollectionEquality() - .equals(other._parameters, _parameters) && - (identical(other.strict, strict) || other.strict == strict)); + .equals(other._parameters, _parameters)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_parameters), strict); + const DeepCollectionEquality().hash(_parameters)); - /// Create a copy of FunctionObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => @@ -7283,318 +6878,37 @@ class _$FunctionObjectImpl extends _FunctionObject { abstract class _FunctionObject extends FunctionObject { const factory _FunctionObject( - {required final String name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(includeIfNull: false) final Map? parameters, - @JsonKey(includeIfNull: false) final bool? strict}) = - _$FunctionObjectImpl; + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(includeIfNull: false) + final Map? parameters}) = _$FunctionObjectImpl; const _FunctionObject._() : super._(); factory _FunctionObject.fromJson(Map json) = _$FunctionObjectImpl.fromJson; - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a - /// maximum length of 64. @override + + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. String get name; + @override /// A description of what the function does, used by the model to choose when and how to call the function. - @override @JsonKey(includeIfNull: false) String? get description; + @override - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. - @override @JsonKey(includeIfNull: false) Map? get parameters; - - /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will - /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @override - @JsonKey(includeIfNull: false) - bool? get strict; - - /// Create a copy of FunctionObject - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; } -JsonSchemaObject _$JsonSchemaObjectFromJson(Map json) { - return _JsonSchemaObject.fromJson(json); -} - -/// @nodoc -mixin _$JsonSchemaObject { - /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum - /// length of 64. - String get name => throw _privateConstructorUsedError; - - /// A description of what the response format is for, used by the model to determine how to respond in the - /// format. - @JsonKey(includeIfNull: false) - String? get description => throw _privateConstructorUsedError; - - /// The schema for the response format, described as a JSON Schema object. - Map get schema => throw _privateConstructorUsedError; - - /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always - /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. To learn more, read the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - @JsonKey(includeIfNull: false) - bool? get strict => throw _privateConstructorUsedError; - - /// Serializes this JsonSchemaObject to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of JsonSchemaObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $JsonSchemaObjectCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $JsonSchemaObjectCopyWith<$Res> { - factory $JsonSchemaObjectCopyWith( - JsonSchemaObject value, $Res Function(JsonSchemaObject) then) = - _$JsonSchemaObjectCopyWithImpl<$Res, JsonSchemaObject>; - @useResult - $Res call( - {String name, - @JsonKey(includeIfNull: false) String? description, - Map schema, - @JsonKey(includeIfNull: false) bool? strict}); -} - -/// @nodoc -class _$JsonSchemaObjectCopyWithImpl<$Res, $Val extends JsonSchemaObject> - implements $JsonSchemaObjectCopyWith<$Res> { - _$JsonSchemaObjectCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of JsonSchemaObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? name = null, - Object? description = freezed, - Object? schema = null, - Object? strict = freezed, - }) { - return _then(_value.copyWith( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - description: freezed == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String?, - schema: null == schema - ? _value.schema - : schema // ignore: cast_nullable_to_non_nullable - as Map, - strict: freezed == strict - ? _value.strict - : strict // ignore: cast_nullable_to_non_nullable - as bool?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$JsonSchemaObjectImplCopyWith<$Res> - implements $JsonSchemaObjectCopyWith<$Res> { - factory _$$JsonSchemaObjectImplCopyWith(_$JsonSchemaObjectImpl value, - $Res Function(_$JsonSchemaObjectImpl) then) = - __$$JsonSchemaObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String name, - @JsonKey(includeIfNull: false) String? description, - Map schema, - @JsonKey(includeIfNull: false) bool? strict}); -} - -/// @nodoc -class __$$JsonSchemaObjectImplCopyWithImpl<$Res> - extends _$JsonSchemaObjectCopyWithImpl<$Res, _$JsonSchemaObjectImpl> - implements _$$JsonSchemaObjectImplCopyWith<$Res> { - __$$JsonSchemaObjectImplCopyWithImpl(_$JsonSchemaObjectImpl _value, - $Res Function(_$JsonSchemaObjectImpl) _then) - : super(_value, _then); - - /// Create a copy of JsonSchemaObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? name = null, - Object? description = freezed, - Object? schema = null, - Object? strict = freezed, - }) { - return _then(_$JsonSchemaObjectImpl( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - description: freezed == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String?, - schema: null == schema - ? _value._schema - : schema // ignore: cast_nullable_to_non_nullable - as Map, - strict: freezed == strict - ? _value.strict - : strict // ignore: cast_nullable_to_non_nullable - as bool?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$JsonSchemaObjectImpl extends _JsonSchemaObject { - const _$JsonSchemaObjectImpl( - {required this.name, - @JsonKey(includeIfNull: false) this.description, - required final Map schema, - @JsonKey(includeIfNull: false) this.strict = false}) - : _schema = schema, - super._(); - - factory _$JsonSchemaObjectImpl.fromJson(Map json) => - _$$JsonSchemaObjectImplFromJson(json); - - /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum - /// length of 64. - @override - final String name; - - /// A description of what the response format is for, used by the model to determine how to respond in the - /// format. - @override - @JsonKey(includeIfNull: false) - final String? description; - - /// The schema for the response format, described as a JSON Schema object. - final Map _schema; - - /// The schema for the response format, described as a JSON Schema object. - @override - Map get schema { - if (_schema is EqualUnmodifiableMapView) return _schema; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_schema); - } - - /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always - /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. To learn more, read the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - @override - @JsonKey(includeIfNull: false) - final bool? strict; - - @override - String toString() { - return 'JsonSchemaObject(name: $name, description: $description, schema: $schema, strict: $strict)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$JsonSchemaObjectImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.description, description) || - other.description == description) && - const DeepCollectionEquality().equals(other._schema, _schema) && - (identical(other.strict, strict) || other.strict == strict)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_schema), strict); - - /// Create a copy of JsonSchemaObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => - __$$JsonSchemaObjectImplCopyWithImpl<_$JsonSchemaObjectImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$JsonSchemaObjectImplToJson( - this, - ); - } -} - -abstract class _JsonSchemaObject extends JsonSchemaObject { - const factory _JsonSchemaObject( - {required final String name, - @JsonKey(includeIfNull: false) final String? description, - required final Map schema, - @JsonKey(includeIfNull: false) final bool? strict}) = - _$JsonSchemaObjectImpl; - const _JsonSchemaObject._() : super._(); - - factory _JsonSchemaObject.fromJson(Map json) = - _$JsonSchemaObjectImpl.fromJson; - - /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum - /// length of 64. - @override - String get name; - - /// A description of what the response format is for, used by the model to determine how to respond in the - /// format. - @override - @JsonKey(includeIfNull: false) - String? get description; - - /// The schema for the response format, described as a JSON Schema object. - @override - Map get schema; - - /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always - /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. To learn more, read the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - @override - @JsonKey(includeIfNull: false) - bool? get strict; - - /// Create a copy of JsonSchemaObject - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => - throw _privateConstructorUsedError; -} - ChatCompletionTool _$ChatCompletionToolFromJson(Map json) { return _ChatCompletionTool.fromJson(json); } @@ -7607,12 +6921,8 @@ mixin _$ChatCompletionTool { /// A function that the model may call. FunctionObject get function => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionTool to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionTool - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionToolCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7638,8 +6948,6 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionTool - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7658,8 +6966,6 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> ) as $Val); } - /// Create a copy of ChatCompletionTool - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FunctionObjectCopyWith<$Res> get function { @@ -7691,8 +6997,6 @@ class __$$ChatCompletionToolImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionTool - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7744,13 +7048,11 @@ class _$ChatCompletionToolImpl extends _ChatCompletionTool { other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, function); - /// Create a copy of ChatCompletionTool - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => @@ -7774,18 +7076,16 @@ abstract class _ChatCompletionTool extends ChatCompletionTool { factory _ChatCompletionTool.fromJson(Map json) = _$ChatCompletionToolImpl.fromJson; - /// The type of the tool. Currently, only `function` is supported. @override + + /// The type of the tool. Currently, only `function` is supported. ChatCompletionToolType get type; + @override /// A function that the model may call. - @override FunctionObject get function; - - /// Create a copy of ChatCompletionTool - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7805,12 +7105,8 @@ mixin _$ChatCompletionNamedToolChoice { ChatCompletionFunctionCallOption get function => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionNamedToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7841,8 +7137,6 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7861,8 +7155,6 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ChatCompletionNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get function { @@ -7900,8 +7192,6 @@ class __$$ChatCompletionNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionNamedToolChoiceImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7956,13 +7246,11 @@ class _$ChatCompletionNamedToolChoiceImpl other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, function); - /// Create a copy of ChatCompletionNamedToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionNamedToolChoiceImplCopyWith< @@ -7989,18 +7277,16 @@ abstract class _ChatCompletionNamedToolChoice factory _ChatCompletionNamedToolChoice.fromJson(Map json) = _$ChatCompletionNamedToolChoiceImpl.fromJson; - /// The type of the tool. Currently, only `function` is supported. @override + + /// The type of the tool. Currently, only `function` is supported. ChatCompletionNamedToolChoiceType get type; + @override /// Forces the model to call the specified function. - @override ChatCompletionFunctionCallOption get function; - - /// Create a copy of ChatCompletionNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -8024,12 +7310,8 @@ mixin _$ChatCompletionMessageToolCall { ChatCompletionMessageFunctionCall get function => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionMessageToolCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionMessageToolCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionMessageToolCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8061,8 +7343,6 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionMessageToolCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8086,8 +7366,6 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ChatCompletionMessageToolCall - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageFunctionCallCopyWith<$Res> get function { @@ -8126,8 +7404,6 @@ class __$$ChatCompletionMessageToolCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageToolCallImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionMessageToolCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8192,13 +7468,11 @@ class _$ChatCompletionMessageToolCallImpl other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, type, function); - /// Create a copy of ChatCompletionMessageToolCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageToolCallImplCopyWith< @@ -8226,22 +7500,20 @@ abstract class _ChatCompletionMessageToolCall factory _ChatCompletionMessageToolCall.fromJson(Map json) = _$ChatCompletionMessageToolCallImpl.fromJson; - /// The ID of the tool call. @override + + /// The ID of the tool call. String get id; + @override /// The type of the tool. Currently, only `function` is supported. - @override ChatCompletionMessageToolCallType get type; + @override /// The name and arguments of a function that should be called, as generated by the model. - @override ChatCompletionMessageFunctionCall get function; - - /// Create a copy of ChatCompletionMessageToolCall - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionMessageToolCallImplCopyWith< _$ChatCompletionMessageToolCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -8258,12 +7530,8 @@ mixin _$ChatCompletionStreamOptions { @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionStreamOptions to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionStreamOptions - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionStreamOptionsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8292,8 +7560,6 @@ class _$ChatCompletionStreamOptionsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionStreamOptions - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8332,8 +7598,6 @@ class __$$ChatCompletionStreamOptionsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamOptionsImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStreamOptions - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8378,13 +7642,11 @@ class _$ChatCompletionStreamOptionsImpl extends _ChatCompletionStreamOptions { other.includeUsage == includeUsage)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, includeUsage); - /// Create a copy of ChatCompletionStreamOptions - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> @@ -8409,15 +7671,13 @@ abstract class _ChatCompletionStreamOptions factory _ChatCompletionStreamOptions.fromJson(Map json) = _$ChatCompletionStreamOptionsImpl.fromJson; - /// If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. @override + + /// If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage; - - /// Create a copy of ChatCompletionStreamOptions - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8443,14 +7703,6 @@ mixin _$CreateChatCompletionResponse { /// The model used for the chat completion. String get model => throw _privateConstructorUsedError; - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? get serviceTier => throw _privateConstructorUsedError; - /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -8464,12 +7716,8 @@ mixin _$CreateChatCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; - /// Serializes this CreateChatCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateChatCompletionResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateChatCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8487,11 +7735,6 @@ abstract class $CreateChatCompletionResponseCopyWith<$Res> { List choices, int created, String model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -8511,8 +7754,6 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateChatCompletionResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8520,7 +7761,6 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, Object? choices = null, Object? created = null, Object? model = null, - Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -8542,10 +7782,6 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, - serviceTier: freezed == serviceTier - ? _value.serviceTier - : serviceTier // ignore: cast_nullable_to_non_nullable - as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -8561,8 +7797,6 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateChatCompletionResponse - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -8590,11 +7824,6 @@ abstract class _$$CreateChatCompletionResponseImplCopyWith<$Res> List choices, int created, String model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -8614,8 +7843,6 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionResponseImpl) _then) : super(_value, _then); - /// Create a copy of CreateChatCompletionResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8623,7 +7850,6 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> Object? choices = null, Object? created = null, Object? model = null, - Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -8645,10 +7871,6 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, - serviceTier: freezed == serviceTier - ? _value.serviceTier - : serviceTier // ignore: cast_nullable_to_non_nullable - as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -8673,11 +7895,6 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { required final List choices, required this.created, required this.model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, required this.object, @@ -8713,15 +7930,6 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override final String model; - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. - @override - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final ServiceTier? serviceTier; - /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -8740,7 +7948,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override String toString() { - return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -8752,15 +7960,13 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && - (identical(other.serviceTier, serviceTier) || - other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -8768,14 +7974,11 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().hash(_choices), created, model, - serviceTier, systemFingerprint, object, usage); - /// Create a copy of CreateChatCompletionResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionResponseImplCopyWith< @@ -8798,11 +8001,6 @@ abstract class _CreateChatCompletionResponse required final List choices, required final int created, required final String model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, required final String object, @@ -8813,52 +8011,41 @@ abstract class _CreateChatCompletionResponse factory _CreateChatCompletionResponse.fromJson(Map json) = _$CreateChatCompletionResponseImpl.fromJson; - /// A unique identifier for the chat completion. @override + + /// A unique identifier for the chat completion. @JsonKey(includeIfNull: false) String? get id; + @override /// A list of chat completion choices. Can be more than one if `n` is greater than 1. - @override List get choices; + @override /// The Unix timestamp (in seconds) of when the chat completion was created. - @override int get created; + @override /// The model used for the chat completion. - @override String get model; - - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. @override - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? get serviceTier; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; + @override /// The object type, which is always `chat.completion`. - @override String get object; + @override /// Usage statistics for the completion request. - @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; - - /// Create a copy of CreateChatCompletionResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateChatCompletionResponseImplCopyWith< _$CreateChatCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -8892,12 +8079,8 @@ mixin _$ChatCompletionResponseChoice { /// Log probability information for the choice. ChatCompletionLogprobs? get logprobs => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionResponseChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionResponseChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8933,8 +8116,6 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionResponseChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8963,8 +8144,6 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ChatCompletionResponseChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionLogprobsCopyWith<$Res>? get logprobs { @@ -9010,8 +8189,6 @@ class __$$ChatCompletionResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionResponseChoiceImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionResponseChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9099,14 +8276,12 @@ class _$ChatCompletionResponseChoiceImpl extends _ChatCompletionResponseChoice { other.logprobs == logprobs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, finishReason, index, const DeepCollectionEquality().hash(message), logprobs); - /// Create a copy of ChatCompletionResponseChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionResponseChoiceImplCopyWith< @@ -9138,33 +8313,31 @@ abstract class _ChatCompletionResponseChoice factory _ChatCompletionResponseChoice.fromJson(Map json) = _$ChatCompletionResponseChoiceImpl.fromJson; + @override + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; + @override /// The index of the choice in the list of choices. - @override @JsonKey(includeIfNull: false) int? get index; + @override /// An assistant message in a chat conversation. - @override ChatCompletionAssistantMessage get message; + @override /// Log probability information for the choice. - @override ChatCompletionLogprobs? get logprobs; - - /// Create a copy of ChatCompletionResponseChoice - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionResponseChoiceImplCopyWith< _$ChatCompletionResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -9178,21 +8351,11 @@ ChatCompletionLogprobs _$ChatCompletionLogprobsFromJson( /// @nodoc mixin _$ChatCompletionLogprobs { /// A list of message content tokens with log probability information. - @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; - /// A list of message refusal tokens with log probability information. - @JsonKey(includeIfNull: false) - List? get refusal => - throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionLogprobs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -9203,10 +8366,7 @@ abstract class $ChatCompletionLogprobsCopyWith<$Res> { $Res Function(ChatCompletionLogprobs) then) = _$ChatCompletionLogprobsCopyWithImpl<$Res, ChatCompletionLogprobs>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) List? content, - @JsonKey(includeIfNull: false) - List? refusal}); + $Res call({List? content}); } /// @nodoc @@ -9220,23 +8380,16 @@ class _$ChatCompletionLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionLogprobs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, - Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, - refusal: freezed == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as List?, ) as $Val); } } @@ -9250,10 +8403,7 @@ abstract class _$$ChatCompletionLogprobsImplCopyWith<$Res> __$$ChatCompletionLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) List? content, - @JsonKey(includeIfNull: false) - List? refusal}); + $Res call({List? content}); } /// @nodoc @@ -9266,23 +8416,16 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionLogprobsImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionLogprobs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, - Object? refusal = freezed, }) { return _then(_$ChatCompletionLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, - refusal: freezed == refusal - ? _value._refusal - : refusal // ignore: cast_nullable_to_non_nullable - as List?, )); } } @@ -9291,12 +8434,8 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> @JsonSerializable() class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const _$ChatCompletionLogprobsImpl( - {@JsonKey(includeIfNull: false) - final List? content, - @JsonKey(includeIfNull: false) - final List? refusal}) + {required final List? content}) : _content = content, - _refusal = refusal, super._(); factory _$ChatCompletionLogprobsImpl.fromJson(Map json) => @@ -9307,7 +8446,6 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { /// A list of message content tokens with log probability information. @override - @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -9316,23 +8454,9 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return EqualUnmodifiableListView(value); } - /// A list of message refusal tokens with log probability information. - final List? _refusal; - - /// A list of message refusal tokens with log probability information. - @override - @JsonKey(includeIfNull: false) - List? get refusal { - final value = _refusal; - if (value == null) return null; - if (_refusal is EqualUnmodifiableListView) return _refusal; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - @override String toString() { - return 'ChatCompletionLogprobs(content: $content, refusal: $refusal)'; + return 'ChatCompletionLogprobs(content: $content)'; } @override @@ -9340,20 +8464,15 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content) && - const DeepCollectionEquality().equals(other._refusal, _refusal)); + const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, - const DeepCollectionEquality().hash(_content), - const DeepCollectionEquality().hash(_refusal)); + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); - /// Create a copy of ChatCompletionLogprobs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> @@ -9370,30 +8489,19 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { const factory _ChatCompletionLogprobs( - {@JsonKey(includeIfNull: false) - final List? content, - @JsonKey(includeIfNull: false) - final List? refusal}) = + {required final List? content}) = _$ChatCompletionLogprobsImpl; const _ChatCompletionLogprobs._() : super._(); factory _ChatCompletionLogprobs.fromJson(Map json) = _$ChatCompletionLogprobsImpl.fromJson; - /// A list of message content tokens with log probability information. @override - @JsonKey(includeIfNull: false) - List? get content; - /// A list of message refusal tokens with log probability information. - @override - @JsonKey(includeIfNull: false) - List? get refusal; - - /// Create a copy of ChatCompletionLogprobs - /// with the given fields replaced by the non-null parameter values. + /// A list of message content tokens with log probability information. + List? get content; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -9419,12 +8527,8 @@ mixin _$ChatCompletionTokenLogprob { List get topLogprobs => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionTokenLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionTokenLogprob - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionTokenLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -9455,8 +8559,6 @@ class _$ChatCompletionTokenLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionTokenLogprob - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9513,8 +8615,6 @@ class __$$ChatCompletionTokenLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenLogprobImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionTokenLogprob - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9611,7 +8711,7 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -9620,9 +8720,7 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { const DeepCollectionEquality().hash(_bytes), const DeepCollectionEquality().hash(_topLogprobs)); - /// Create a copy of ChatCompletionTokenLogprob - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> @@ -9650,27 +8748,25 @@ abstract class _ChatCompletionTokenLogprob extends ChatCompletionTokenLogprob { factory _ChatCompletionTokenLogprob.fromJson(Map json) = _$ChatCompletionTokenLogprobImpl.fromJson; - /// The token. @override + + /// The token. String get token; + @override /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - @override double get logprob; + @override /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - @override List? get bytes; + @override /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. - @override @JsonKey(name: 'top_logprobs') List get topLogprobs; - - /// Create a copy of ChatCompletionTokenLogprob - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -9691,12 +8787,8 @@ mixin _$ChatCompletionTokenTopLogprob { /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. List? get bytes => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionTokenTopLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionTokenTopLogprob - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionTokenTopLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -9723,8 +8815,6 @@ class _$ChatCompletionTokenTopLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionTokenTopLogprob - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9771,8 +8861,6 @@ class __$$ChatCompletionTokenTopLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenTopLogprobImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionTokenTopLogprob - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9848,14 +8936,12 @@ class _$ChatCompletionTokenTopLogprobImpl const DeepCollectionEquality().equals(other._bytes, _bytes)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, token, logprob, const DeepCollectionEquality().hash(_bytes)); - /// Create a copy of ChatCompletionTokenTopLogprob - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenTopLogprobImplCopyWith< @@ -9882,22 +8968,20 @@ abstract class _ChatCompletionTokenTopLogprob factory _ChatCompletionTokenTopLogprob.fromJson(Map json) = _$ChatCompletionTokenTopLogprobImpl.fromJson; - /// The token. @override + + /// The token. String get token; + @override /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - @override double get logprob; + @override /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - @override List? get bytes; - - /// Create a copy of ChatCompletionTokenTopLogprob - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionTokenTopLogprobImplCopyWith< _$ChatCompletionTokenTopLogprobImpl> get copyWith => throw _privateConstructorUsedError; @@ -9920,21 +9004,12 @@ mixin _$CreateChatCompletionStreamResponse { throw _privateConstructorUsedError; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - @JsonKey(includeIfNull: false) - int? get created => throw _privateConstructorUsedError; + int get created => throw _privateConstructorUsedError; /// The model to generate the completion. @JsonKey(includeIfNull: false) String? get model => throw _privateConstructorUsedError; - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? get serviceTier => throw _privateConstructorUsedError; - /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9942,19 +9017,14 @@ mixin _$CreateChatCompletionStreamResponse { String? get systemFingerprint => throw _privateConstructorUsedError; /// The object type, which is always `chat.completion.chunk`. - @JsonKey(includeIfNull: false) - String? get object => throw _privateConstructorUsedError; + String get object => throw _privateConstructorUsedError; /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; - /// Serializes this CreateChatCompletionStreamResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateChatCompletionStreamResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateChatCompletionStreamResponseCopyWith< CreateChatCompletionStreamResponse> get copyWith => throw _privateConstructorUsedError; @@ -9971,16 +9041,11 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - @JsonKey(includeIfNull: false) int? created, + int created, @JsonKey(includeIfNull: false) String? model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - @JsonKey(includeIfNull: false) String? object, + String object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); $CompletionUsageCopyWith<$Res>? get usage; @@ -9997,18 +9062,15 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateChatCompletionStreamResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = freezed, Object? choices = null, - Object? created = freezed, + Object? created = null, Object? model = freezed, - Object? serviceTier = freezed, Object? systemFingerprint = freezed, - Object? object = freezed, + Object? object = null, Object? usage = freezed, }) { return _then(_value.copyWith( @@ -10020,26 +9082,22 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: freezed == created + created: null == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int?, + as int, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, - serviceTier: freezed == serviceTier - ? _value.serviceTier - : serviceTier // ignore: cast_nullable_to_non_nullable - as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: freezed == object + object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String?, + as String, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -10047,8 +9105,6 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateChatCompletionStreamResponse - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -10074,16 +9130,11 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - @JsonKey(includeIfNull: false) int? created, + int created, @JsonKey(includeIfNull: false) String? model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - @JsonKey(includeIfNull: false) String? object, + String object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); @override @@ -10100,18 +9151,15 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionStreamResponseImpl) _then) : super(_value, _then); - /// Create a copy of CreateChatCompletionStreamResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = freezed, Object? choices = null, - Object? created = freezed, + Object? created = null, Object? model = freezed, - Object? serviceTier = freezed, Object? systemFingerprint = freezed, - Object? object = freezed, + Object? object = null, Object? usage = freezed, }) { return _then(_$CreateChatCompletionStreamResponseImpl( @@ -10123,26 +9171,22 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value._choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: freezed == created + created: null == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int?, + as int, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, - serviceTier: freezed == serviceTier - ? _value.serviceTier - : serviceTier // ignore: cast_nullable_to_non_nullable - as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: freezed == object + object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String?, + as String, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -10158,16 +9202,11 @@ class _$CreateChatCompletionStreamResponseImpl const _$CreateChatCompletionStreamResponseImpl( {@JsonKey(includeIfNull: false) this.id, required final List choices, - @JsonKey(includeIfNull: false) this.created, + required this.created, @JsonKey(includeIfNull: false) this.model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, - @JsonKey(includeIfNull: false) this.object, + required this.object, @JsonKey(includeIfNull: false) this.usage}) : _choices = choices, super._(); @@ -10196,23 +9235,13 @@ class _$CreateChatCompletionStreamResponseImpl /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. @override - @JsonKey(includeIfNull: false) - final int? created; + final int created; /// The model to generate the completion. @override @JsonKey(includeIfNull: false) final String? model; - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. - @override - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final ServiceTier? serviceTier; - /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -10222,8 +9251,7 @@ class _$CreateChatCompletionStreamResponseImpl /// The object type, which is always `chat.completion.chunk`. @override - @JsonKey(includeIfNull: false) - final String? object; + final String object; /// Usage statistics for the completion request. @override @@ -10232,7 +9260,7 @@ class _$CreateChatCompletionStreamResponseImpl @override String toString() { - return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -10244,15 +9272,13 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && - (identical(other.serviceTier, serviceTier) || - other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -10260,14 +9286,11 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().hash(_choices), created, model, - serviceTier, systemFingerprint, object, usage); - /// Create a copy of CreateChatCompletionStreamResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionStreamResponseImplCopyWith< @@ -10288,16 +9311,11 @@ abstract class _CreateChatCompletionStreamResponse const factory _CreateChatCompletionStreamResponse( {@JsonKey(includeIfNull: false) final String? id, required final List choices, - @JsonKey(includeIfNull: false) final int? created, + required final int created, @JsonKey(includeIfNull: false) final String? model, - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, - @JsonKey(includeIfNull: false) final String? object, + required final String object, @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = _$CreateChatCompletionStreamResponseImpl; const _CreateChatCompletionStreamResponse._() : super._(); @@ -10306,56 +9324,43 @@ abstract class _CreateChatCompletionStreamResponse Map json) = _$CreateChatCompletionStreamResponseImpl.fromJson; - /// A unique identifier for the chat completion. Each chunk has the same ID. @override + + /// A unique identifier for the chat completion. Each chunk has the same ID. @JsonKey(includeIfNull: false) String? get id; + @override /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the /// last chunk if you set `stream_options: {"include_usage": true}`. - @override List get choices; + @override /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + int get created; @override - @JsonKey(includeIfNull: false) - int? get created; /// The model to generate the completion. - @override @JsonKey(includeIfNull: false) String? get model; - - /// The service tier used for processing the request. This field is only included if the `service_tier` parameter - /// is specified in the request. @override - @JsonKey( - name: 'service_tier', - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - ServiceTier? get serviceTier; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact - @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; + @override /// The object type, which is always `chat.completion.chunk`. + String get object; @override - @JsonKey(includeIfNull: false) - String? get object; /// Usage statistics for the completion request. - @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; - - /// Create a copy of CreateChatCompletionStreamResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateChatCompletionStreamResponseImplCopyWith< _$CreateChatCompletionStreamResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -10391,12 +9396,8 @@ mixin _$ChatCompletionStreamResponseChoice { @JsonKey(includeIfNull: false) int? get index => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionStreamResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionStreamResponseChoiceCopyWith< ChatCompletionStreamResponseChoice> get copyWith => throw _privateConstructorUsedError; @@ -10435,8 +9436,6 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10465,8 +9464,6 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta { @@ -10476,8 +9473,6 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, }); } - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res>? get logprobs { @@ -10527,8 +9522,6 @@ class __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10618,14 +9611,12 @@ class _$ChatCompletionStreamResponseChoiceImpl (identical(other.index, index) || other.index == index)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, delta, logprobs, finishReason, index); - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceImplCopyWith< @@ -10659,34 +9650,32 @@ abstract class _ChatCompletionStreamResponseChoice Map json) = _$ChatCompletionStreamResponseChoiceImpl.fromJson; - /// A chat completion delta generated by streamed model responses. @override + + /// A chat completion delta generated by streamed model responses. ChatCompletionStreamResponseDelta get delta; + @override /// Log probability information for the choice. - @override @JsonKey(includeIfNull: false) ChatCompletionStreamResponseChoiceLogprobs? get logprobs; + @override /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; + @override /// The index of the choice in the list of choices. - @override @JsonKey(includeIfNull: false) int? get index; - - /// Create a copy of ChatCompletionStreamResponseChoice - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStreamResponseChoiceImplCopyWith< _$ChatCompletionStreamResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -10701,21 +9690,11 @@ ChatCompletionStreamResponseChoiceLogprobs /// @nodoc mixin _$ChatCompletionStreamResponseChoiceLogprobs { /// A list of message content tokens with log probability information. - @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; - /// A list of message refusal tokens with log probability information. - @JsonKey(includeIfNull: false) - List? get refusal => - throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionStreamResponseChoiceLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionStreamResponseChoiceLogprobsCopyWith< ChatCompletionStreamResponseChoiceLogprobs> get copyWith => throw _privateConstructorUsedError; @@ -10729,10 +9708,7 @@ abstract class $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res> { _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, ChatCompletionStreamResponseChoiceLogprobs>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) List? content, - @JsonKey(includeIfNull: false) - List? refusal}); + $Res call({List? content}); } /// @nodoc @@ -10747,23 +9723,16 @@ class _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, - Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, - refusal: freezed == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as List?, ) as $Val); } } @@ -10778,10 +9747,7 @@ abstract class _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith<$Res> __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) List? content, - @JsonKey(includeIfNull: false) - List? refusal}); + $Res call({List? content}); } /// @nodoc @@ -10794,23 +9760,16 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceLogprobsImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, - Object? refusal = freezed, }) { return _then(_$ChatCompletionStreamResponseChoiceLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, - refusal: freezed == refusal - ? _value._refusal - : refusal // ignore: cast_nullable_to_non_nullable - as List?, )); } } @@ -10820,12 +9779,8 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> class _$ChatCompletionStreamResponseChoiceLogprobsImpl extends _ChatCompletionStreamResponseChoiceLogprobs { const _$ChatCompletionStreamResponseChoiceLogprobsImpl( - {@JsonKey(includeIfNull: false) - final List? content, - @JsonKey(includeIfNull: false) - final List? refusal}) + {required final List? content}) : _content = content, - _refusal = refusal, super._(); factory _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson( @@ -10837,7 +9792,6 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl /// A list of message content tokens with log probability information. @override - @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -10846,23 +9800,9 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return EqualUnmodifiableListView(value); } - /// A list of message refusal tokens with log probability information. - final List? _refusal; - - /// A list of message refusal tokens with log probability information. - @override - @JsonKey(includeIfNull: false) - List? get refusal { - final value = _refusal; - if (value == null) return null; - if (_refusal is EqualUnmodifiableListView) return _refusal; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - @override String toString() { - return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content, refusal: $refusal)'; + return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content)'; } @override @@ -10870,20 +9810,15 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseChoiceLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content) && - const DeepCollectionEquality().equals(other._refusal, _refusal)); + const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, - const DeepCollectionEquality().hash(_content), - const DeepCollectionEquality().hash(_refusal)); + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); - /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< @@ -10904,10 +9839,7 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl abstract class _ChatCompletionStreamResponseChoiceLogprobs extends ChatCompletionStreamResponseChoiceLogprobs { const factory _ChatCompletionStreamResponseChoiceLogprobs( - {@JsonKey(includeIfNull: false) - final List? content, - @JsonKey(includeIfNull: false) - final List? refusal}) = + {required final List? content}) = _$ChatCompletionStreamResponseChoiceLogprobsImpl; const _ChatCompletionStreamResponseChoiceLogprobs._() : super._(); @@ -10915,20 +9847,12 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs Map json) = _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson; - /// A list of message content tokens with log probability information. @override - @JsonKey(includeIfNull: false) - List? get content; - - /// A list of message refusal tokens with log probability information. - @override - @JsonKey(includeIfNull: false) - List? get refusal; - /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs - /// with the given fields replaced by the non-null parameter values. + /// A list of message content tokens with log probability information. + List? get content; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< _$ChatCompletionStreamResponseChoiceLogprobsImpl> get copyWith => throw _privateConstructorUsedError; @@ -10945,10 +9869,6 @@ mixin _$ChatCompletionStreamResponseDelta { @JsonKey(includeIfNull: false) String? get content => throw _privateConstructorUsedError; - /// The refusal message generated by the model. - @JsonKey(includeIfNull: false) - String? get refusal => throw _privateConstructorUsedError; - /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall => @@ -10964,12 +9884,8 @@ mixin _$ChatCompletionStreamResponseDelta { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionStreamResponseDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionStreamResponseDelta - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionStreamResponseDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10984,7 +9900,6 @@ abstract class $ChatCompletionStreamResponseDeltaCopyWith<$Res> { @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -11008,13 +9923,10 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionStreamResponseDelta - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, - Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -11024,10 +9936,6 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, - refusal: freezed == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -11043,8 +9951,6 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ChatCompletionStreamResponseDelta - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get functionCall { @@ -11070,7 +9976,6 @@ abstract class _$$ChatCompletionStreamResponseDeltaImplCopyWith<$Res> @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -11094,13 +9999,10 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseDeltaImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStreamResponseDelta - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, - Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -11110,10 +10012,6 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, - refusal: freezed == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -11136,7 +10034,6 @@ class _$ChatCompletionStreamResponseDeltaImpl extends _ChatCompletionStreamResponseDelta { const _$ChatCompletionStreamResponseDeltaImpl( {@JsonKey(includeIfNull: false) this.content, - @JsonKey(includeIfNull: false) this.refusal, @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -11156,11 +10053,6 @@ class _$ChatCompletionStreamResponseDeltaImpl @JsonKey(includeIfNull: false) final String? content; - /// The refusal message generated by the model. - @override - @JsonKey(includeIfNull: false) - final String? refusal; - /// The name and arguments of a function that should be called, as generated by the model. @override @JsonKey(name: 'function_call', includeIfNull: false) @@ -11188,7 +10080,7 @@ class _$ChatCompletionStreamResponseDeltaImpl @override String toString() { - return 'ChatCompletionStreamResponseDelta(content: $content, refusal: $refusal, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; + return 'ChatCompletionStreamResponseDelta(content: $content, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; } @override @@ -11197,7 +10089,6 @@ class _$ChatCompletionStreamResponseDeltaImpl (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseDeltaImpl && (identical(other.content, content) || other.content == content) && - (identical(other.refusal, refusal) || other.refusal == refusal) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && const DeepCollectionEquality() @@ -11205,14 +10096,12 @@ class _$ChatCompletionStreamResponseDeltaImpl (identical(other.role, role) || other.role == role)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, content, refusal, functionCall, + int get hashCode => Object.hash(runtimeType, content, functionCall, const DeepCollectionEquality().hash(_toolCalls), role); - /// Create a copy of ChatCompletionStreamResponseDelta - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseDeltaImplCopyWith< @@ -11232,7 +10121,6 @@ abstract class _ChatCompletionStreamResponseDelta extends ChatCompletionStreamResponseDelta { const factory _ChatCompletionStreamResponseDelta( {@JsonKey(includeIfNull: false) final String? content, - @JsonKey(includeIfNull: false) final String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) final ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -11248,36 +10136,29 @@ abstract class _ChatCompletionStreamResponseDelta Map json) = _$ChatCompletionStreamResponseDeltaImpl.fromJson; - /// The contents of the chunk message. @override + + /// The contents of the chunk message. @JsonKey(includeIfNull: false) String? get content; - - /// The refusal message generated by the model. @override - @JsonKey(includeIfNull: false) - String? get refusal; /// The name and arguments of a function that should be called, as generated by the model. - @override @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall; + @override /// No Description - @override @JsonKey(name: 'tool_calls', includeIfNull: false) List? get toolCalls; + @override /// The role of the messages author. One of `system`, `user`, `assistant`, or `tool` (`function` is deprecated). - @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role; - - /// Create a copy of ChatCompletionStreamResponseDelta - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStreamResponseDeltaImplCopyWith< _$ChatCompletionStreamResponseDeltaImpl> get copyWith => throw _privateConstructorUsedError; @@ -11299,12 +10180,8 @@ mixin _$ChatCompletionStreamMessageFunctionCall { @JsonKey(includeIfNull: false) String? get arguments => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionStreamMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionStreamMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionStreamMessageFunctionCallCopyWith< ChatCompletionStreamMessageFunctionCall> get copyWith => throw _privateConstructorUsedError; @@ -11335,8 +10212,6 @@ class _$ChatCompletionStreamMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionStreamMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11380,8 +10255,6 @@ class __$$ChatCompletionStreamMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageFunctionCallImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStreamMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11439,13 +10312,11 @@ class _$ChatCompletionStreamMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name, arguments); - /// Create a copy of ChatCompletionStreamMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< @@ -11474,20 +10345,18 @@ abstract class _ChatCompletionStreamMessageFunctionCall Map json) = _$ChatCompletionStreamMessageFunctionCallImpl.fromJson; - /// The name of the function to call. @override + + /// The name of the function to call. @JsonKey(includeIfNull: false) String? get name; + @override /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - @override @JsonKey(includeIfNull: false) String? get arguments; - - /// Create a copy of ChatCompletionStreamMessageFunctionCall - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< _$ChatCompletionStreamMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -11519,12 +10388,8 @@ mixin _$ChatCompletionStreamMessageToolCallChunk { ChatCompletionStreamMessageFunctionCall? get function => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionStreamMessageToolCallChunk to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionStreamMessageToolCallChunk - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ChatCompletionStreamMessageToolCallChunkCopyWith< ChatCompletionStreamMessageToolCallChunk> get copyWith => throw _privateConstructorUsedError; @@ -11563,8 +10428,6 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionStreamMessageToolCallChunk - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11593,8 +10456,6 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ChatCompletionStreamMessageToolCallChunk - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get function { @@ -11642,8 +10503,6 @@ class __$$ChatCompletionStreamMessageToolCallChunkImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageToolCallChunkImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionStreamMessageToolCallChunk - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11728,13 +10587,11 @@ class _$ChatCompletionStreamMessageToolCallChunkImpl other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, index, id, type, function); - /// Create a copy of ChatCompletionStreamMessageToolCallChunk - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< @@ -11769,30 +10626,28 @@ abstract class _ChatCompletionStreamMessageToolCallChunk Map json) = _$ChatCompletionStreamMessageToolCallChunkImpl.fromJson; - /// No Description @override + + /// No Description int get index; + @override /// The ID of the tool call. - @override @JsonKey(includeIfNull: false) String? get id; + @override /// The type of the tool. Currently, only `function` is supported. - @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionStreamMessageToolCallChunkType? get type; + @override /// The name and arguments of a function that should be called, as generated by the model. - @override @JsonKey(includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get function; - - /// Create a copy of ChatCompletionStreamMessageToolCallChunk - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< _$ChatCompletionStreamMessageToolCallChunkImpl> get copyWith => throw _privateConstructorUsedError; @@ -11816,17 +10671,8 @@ mixin _$CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; - /// Breakdown of tokens used in a completion. - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - CompletionTokensDetails? get completionTokensDetails => - throw _privateConstructorUsedError; - - /// Serializes this CompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CompletionUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -11840,11 +10686,7 @@ abstract class $CompletionUsageCopyWith<$Res> { $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens, - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - CompletionTokensDetails? completionTokensDetails}); - - $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; + @JsonKey(name: 'total_tokens') int totalTokens}); } /// @nodoc @@ -11857,15 +10699,12 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CompletionUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, - Object? completionTokensDetails = freezed, }) { return _then(_value.copyWith( completionTokens: freezed == completionTokens @@ -11880,27 +10719,8 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, - completionTokensDetails: freezed == completionTokensDetails - ? _value.completionTokensDetails - : completionTokensDetails // ignore: cast_nullable_to_non_nullable - as CompletionTokensDetails?, ) as $Val); } - - /// Create a copy of CompletionUsage - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails { - if (_value.completionTokensDetails == null) { - return null; - } - - return $CompletionTokensDetailsCopyWith<$Res>( - _value.completionTokensDetails!, (value) { - return _then(_value.copyWith(completionTokensDetails: value) as $Val); - }); - } } /// @nodoc @@ -11914,12 +10734,7 @@ abstract class _$$CompletionUsageImplCopyWith<$Res> $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens, - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - CompletionTokensDetails? completionTokensDetails}); - - @override - $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; + @JsonKey(name: 'total_tokens') int totalTokens}); } /// @nodoc @@ -11930,15 +10745,12 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> _$CompletionUsageImpl _value, $Res Function(_$CompletionUsageImpl) _then) : super(_value, _then); - /// Create a copy of CompletionUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, - Object? completionTokensDetails = freezed, }) { return _then(_$CompletionUsageImpl( completionTokens: freezed == completionTokens @@ -11953,10 +10765,6 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, - completionTokensDetails: freezed == completionTokensDetails - ? _value.completionTokensDetails - : completionTokensDetails // ignore: cast_nullable_to_non_nullable - as CompletionTokensDetails?, )); } } @@ -11967,9 +10775,7 @@ class _$CompletionUsageImpl extends _CompletionUsage { const _$CompletionUsageImpl( {@JsonKey(name: 'completion_tokens') required this.completionTokens, @JsonKey(name: 'prompt_tokens') required this.promptTokens, - @JsonKey(name: 'total_tokens') required this.totalTokens, - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - this.completionTokensDetails}) + @JsonKey(name: 'total_tokens') required this.totalTokens}) : super._(); factory _$CompletionUsageImpl.fromJson(Map json) => @@ -11990,14 +10796,9 @@ class _$CompletionUsageImpl extends _CompletionUsage { @JsonKey(name: 'total_tokens') final int totalTokens; - /// Breakdown of tokens used in a completion. - @override - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - final CompletionTokensDetails? completionTokensDetails; - @override String toString() { - return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens, completionTokensDetails: $completionTokensDetails)'; + return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; } @override @@ -12010,20 +10811,15 @@ class _$CompletionUsageImpl extends _CompletionUsage { (identical(other.promptTokens, promptTokens) || other.promptTokens == promptTokens) && (identical(other.totalTokens, totalTokens) || - other.totalTokens == totalTokens) && - (identical( - other.completionTokensDetails, completionTokensDetails) || - other.completionTokensDetails == completionTokensDetails)); + other.totalTokens == totalTokens)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, - totalTokens, completionTokensDetails); + int get hashCode => + Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - /// Create a copy of CompletionUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => @@ -12042,215 +10838,34 @@ abstract class _CompletionUsage extends CompletionUsage { const factory _CompletionUsage( {@JsonKey(name: 'completion_tokens') required final int? completionTokens, @JsonKey(name: 'prompt_tokens') required final int promptTokens, - @JsonKey(name: 'total_tokens') required final int totalTokens, - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - final CompletionTokensDetails? - completionTokensDetails}) = _$CompletionUsageImpl; + @JsonKey(name: 'total_tokens') + required final int totalTokens}) = _$CompletionUsageImpl; const _CompletionUsage._() : super._(); factory _CompletionUsage.fromJson(Map json) = _$CompletionUsageImpl.fromJson; - /// Number of tokens in the generated completion. @override + + /// Number of tokens in the generated completion. @JsonKey(name: 'completion_tokens') int? get completionTokens; + @override /// Number of tokens in the prompt. - @override @JsonKey(name: 'prompt_tokens') int get promptTokens; + @override /// Total number of tokens used in the request (prompt + completion). - @override @JsonKey(name: 'total_tokens') int get totalTokens; - - /// Breakdown of tokens used in a completion. @override - @JsonKey(name: 'completion_tokens_details', includeIfNull: false) - CompletionTokensDetails? get completionTokensDetails; - - /// Create a copy of CompletionUsage - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } -CompletionTokensDetails _$CompletionTokensDetailsFromJson( - Map json) { - return _CompletionTokensDetails.fromJson(json); -} - -/// @nodoc -mixin _$CompletionTokensDetails { - /// Tokens generated by the model for reasoning. - @JsonKey(name: 'reasoning_tokens', includeIfNull: false) - int? get reasoningTokens => throw _privateConstructorUsedError; - - /// Serializes this CompletionTokensDetails to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CompletionTokensDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $CompletionTokensDetailsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $CompletionTokensDetailsCopyWith<$Res> { - factory $CompletionTokensDetailsCopyWith(CompletionTokensDetails value, - $Res Function(CompletionTokensDetails) then) = - _$CompletionTokensDetailsCopyWithImpl<$Res, CompletionTokensDetails>; - @useResult - $Res call( - {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) - int? reasoningTokens}); -} - -/// @nodoc -class _$CompletionTokensDetailsCopyWithImpl<$Res, - $Val extends CompletionTokensDetails> - implements $CompletionTokensDetailsCopyWith<$Res> { - _$CompletionTokensDetailsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of CompletionTokensDetails - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? reasoningTokens = freezed, - }) { - return _then(_value.copyWith( - reasoningTokens: freezed == reasoningTokens - ? _value.reasoningTokens - : reasoningTokens // ignore: cast_nullable_to_non_nullable - as int?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$CompletionTokensDetailsImplCopyWith<$Res> - implements $CompletionTokensDetailsCopyWith<$Res> { - factory _$$CompletionTokensDetailsImplCopyWith( - _$CompletionTokensDetailsImpl value, - $Res Function(_$CompletionTokensDetailsImpl) then) = - __$$CompletionTokensDetailsImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) - int? reasoningTokens}); -} - -/// @nodoc -class __$$CompletionTokensDetailsImplCopyWithImpl<$Res> - extends _$CompletionTokensDetailsCopyWithImpl<$Res, - _$CompletionTokensDetailsImpl> - implements _$$CompletionTokensDetailsImplCopyWith<$Res> { - __$$CompletionTokensDetailsImplCopyWithImpl( - _$CompletionTokensDetailsImpl _value, - $Res Function(_$CompletionTokensDetailsImpl) _then) - : super(_value, _then); - - /// Create a copy of CompletionTokensDetails - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? reasoningTokens = freezed, - }) { - return _then(_$CompletionTokensDetailsImpl( - reasoningTokens: freezed == reasoningTokens - ? _value.reasoningTokens - : reasoningTokens // ignore: cast_nullable_to_non_nullable - as int?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CompletionTokensDetailsImpl extends _CompletionTokensDetails { - const _$CompletionTokensDetailsImpl( - {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) - this.reasoningTokens}) - : super._(); - - factory _$CompletionTokensDetailsImpl.fromJson(Map json) => - _$$CompletionTokensDetailsImplFromJson(json); - - /// Tokens generated by the model for reasoning. - @override - @JsonKey(name: 'reasoning_tokens', includeIfNull: false) - final int? reasoningTokens; - - @override - String toString() { - return 'CompletionTokensDetails(reasoningTokens: $reasoningTokens)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CompletionTokensDetailsImpl && - (identical(other.reasoningTokens, reasoningTokens) || - other.reasoningTokens == reasoningTokens)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, reasoningTokens); - - /// Create a copy of CompletionTokensDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> - get copyWith => __$$CompletionTokensDetailsImplCopyWithImpl< - _$CompletionTokensDetailsImpl>(this, _$identity); - - @override - Map toJson() { - return _$$CompletionTokensDetailsImplToJson( - this, - ); - } -} - -abstract class _CompletionTokensDetails extends CompletionTokensDetails { - const factory _CompletionTokensDetails( - {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) - final int? reasoningTokens}) = _$CompletionTokensDetailsImpl; - const _CompletionTokensDetails._() : super._(); - - factory _CompletionTokensDetails.fromJson(Map json) = - _$CompletionTokensDetailsImpl.fromJson; - - /// Tokens generated by the model for reasoning. - @override - @JsonKey(name: 'reasoning_tokens', includeIfNull: false) - int? get reasoningTokens; - - /// Create a copy of CompletionTokensDetails - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> - get copyWith => throw _privateConstructorUsedError; -} - CreateEmbeddingRequest _$CreateEmbeddingRequestFromJson( Map json) { return _CreateEmbeddingRequest.fromJson(json); @@ -12279,12 +10894,8 @@ mixin _$CreateEmbeddingRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; - /// Serializes this CreateEmbeddingRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateEmbeddingRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12317,8 +10928,6 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12352,8 +10961,6 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingModelCopyWith<$Res> get model { @@ -12362,8 +10969,6 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingInputCopyWith<$Res> get input { @@ -12405,8 +11010,6 @@ class __$$CreateEmbeddingRequestImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12500,14 +11103,12 @@ class _$CreateEmbeddingRequestImpl extends _CreateEmbeddingRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, model, input, encodingFormat, dimensions, user); - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> @@ -12536,35 +11137,33 @@ abstract class _CreateEmbeddingRequest extends CreateEmbeddingRequest { factory _CreateEmbeddingRequest.fromJson(Map json) = _$CreateEmbeddingRequestImpl.fromJson; - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @override + + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @_EmbeddingModelConverter() EmbeddingModel get model; + @override /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - @override @_EmbeddingInputConverter() EmbeddingInput get input; + @override /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). - @override @JsonKey(name: 'encoding_format') EmbeddingEncodingFormat get encodingFormat; + @override /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. - @override @JsonKey(includeIfNull: false) int? get dimensions; + @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). - @override @JsonKey(includeIfNull: false) String? get user; - - /// Create a copy of CreateEmbeddingRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12623,8 +11222,6 @@ mixin _$EmbeddingModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this EmbeddingModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -12644,9 +11241,6 @@ class _$EmbeddingModelCopyWithImpl<$Res, $Val extends EmbeddingModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -12668,8 +11262,6 @@ class __$$EmbeddingModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12713,13 +11305,11 @@ class _$EmbeddingModelEnumerationImpl extends EmbeddingModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> @@ -12806,10 +11396,7 @@ abstract class EmbeddingModelEnumeration extends EmbeddingModel { @override EmbeddingModels get value; - - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12831,8 +11418,6 @@ class __$$EmbeddingModelStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelStringImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12876,13 +11461,11 @@ class _$EmbeddingModelStringImpl extends EmbeddingModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> @@ -12970,10 +11553,7 @@ abstract class EmbeddingModelString extends EmbeddingModel { @override String get value; - - /// Create a copy of EmbeddingModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13048,8 +11628,6 @@ mixin _$EmbeddingInput { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this EmbeddingInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -13069,9 +11647,6 @@ class _$EmbeddingInputCopyWithImpl<$Res, $Val extends EmbeddingInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -13093,8 +11668,6 @@ class __$$EmbeddingInputListListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListListIntImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13145,14 +11718,12 @@ class _$EmbeddingInputListListIntImpl extends EmbeddingInputListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> @@ -13251,10 +11822,7 @@ abstract class EmbeddingInputListListInt extends EmbeddingInput { @override List> get value; - - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13277,8 +11845,6 @@ class __$$EmbeddingInputListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListIntImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13329,14 +11895,12 @@ class _$EmbeddingInputListIntImpl extends EmbeddingInputListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> @@ -13435,10 +11999,7 @@ abstract class EmbeddingInputListInt extends EmbeddingInput { @override List get value; - - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13462,8 +12023,6 @@ class __$$EmbeddingInputListStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListStringImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13514,14 +12073,12 @@ class _$EmbeddingInputListStringImpl extends EmbeddingInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> @@ -13620,10 +12177,7 @@ abstract class EmbeddingInputListString extends EmbeddingInput { @override List get value; - - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13645,8 +12199,6 @@ class __$$EmbeddingInputStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputStringImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13690,13 +12242,11 @@ class _$EmbeddingInputStringImpl extends EmbeddingInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> @@ -13796,10 +12346,7 @@ abstract class EmbeddingInputString extends EmbeddingInput { @override String get value; - - /// Create a copy of EmbeddingInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13825,12 +12372,8 @@ mixin _$CreateEmbeddingResponse { @JsonKey(includeIfNull: false) EmbeddingUsage? get usage => throw _privateConstructorUsedError; - /// Serializes this CreateEmbeddingResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateEmbeddingResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateEmbeddingResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13861,8 +12404,6 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateEmbeddingResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13891,8 +12432,6 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateEmbeddingResponse - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingUsageCopyWith<$Res>? get usage { @@ -13935,8 +12474,6 @@ class __$$CreateEmbeddingResponseImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingResponseImpl) _then) : super(_value, _then); - /// Create a copy of CreateEmbeddingResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14020,14 +12557,12 @@ class _$CreateEmbeddingResponseImpl extends _CreateEmbeddingResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_data), model, object, usage); - /// Create a copy of CreateEmbeddingResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> @@ -14054,27 +12589,25 @@ abstract class _CreateEmbeddingResponse extends CreateEmbeddingResponse { factory _CreateEmbeddingResponse.fromJson(Map json) = _$CreateEmbeddingResponseImpl.fromJson; - /// The list of embeddings generated by the model. @override + + /// The list of embeddings generated by the model. List get data; + @override /// The name of the model used to generate the embedding. - @override String get model; + @override /// The object type, which is always "list". - @override CreateEmbeddingResponseObject get object; + @override /// The usage information for the request. - @override @JsonKey(includeIfNull: false) EmbeddingUsage? get usage; - - /// Create a copy of CreateEmbeddingResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14095,12 +12628,8 @@ mixin _$Embedding { /// The object type, which is always "embedding". EmbeddingObject get object => throw _privateConstructorUsedError; - /// Serializes this Embedding to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of Embedding - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $EmbeddingCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -14128,8 +12657,6 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of Embedding - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14153,8 +12680,6 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> ) as $Val); } - /// Create a copy of Embedding - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingVectorCopyWith<$Res> get embedding { @@ -14189,8 +12714,6 @@ class __$$EmbeddingImplCopyWithImpl<$Res> _$EmbeddingImpl _value, $Res Function(_$EmbeddingImpl) _then) : super(_value, _then); - /// Create a copy of Embedding - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14256,13 +12779,11 @@ class _$EmbeddingImpl extends _Embedding { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, index, embedding, object); - /// Create a copy of Embedding - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => @@ -14286,23 +12807,21 @@ abstract class _Embedding extends Embedding { factory _Embedding.fromJson(Map json) = _$EmbeddingImpl.fromJson; - /// The index of the embedding in the list of embeddings. @override + + /// The index of the embedding in the list of embeddings. int get index; + @override /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). - @override @_EmbeddingVectorConverter() EmbeddingVector get embedding; + @override /// The object type, which is always "embedding". - @override EmbeddingObject get object; - - /// Create a copy of Embedding - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14361,8 +12880,6 @@ mixin _$EmbeddingVector { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this EmbeddingVector to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -14382,9 +12899,6 @@ class _$EmbeddingVectorCopyWithImpl<$Res, $Val extends EmbeddingVector> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -14406,8 +12920,6 @@ class __$$EmbeddingVectorListDoubleImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorListDoubleImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14458,14 +12970,12 @@ class _$EmbeddingVectorListDoubleImpl extends EmbeddingVectorListDouble { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> @@ -14552,10 +13062,7 @@ abstract class EmbeddingVectorListDouble extends EmbeddingVector { @override List get value; - - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14578,8 +13085,6 @@ class __$$EmbeddingVectorStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorStringImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14623,13 +13128,11 @@ class _$EmbeddingVectorStringImpl extends EmbeddingVectorString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> @@ -14716,10 +13219,7 @@ abstract class EmbeddingVectorString extends EmbeddingVector { @override String get value; - - /// Create a copy of EmbeddingVector - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14738,12 +13238,8 @@ mixin _$EmbeddingUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; - /// Serializes this EmbeddingUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of EmbeddingUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $EmbeddingUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -14769,8 +13265,6 @@ class _$EmbeddingUsageCopyWithImpl<$Res, $Val extends EmbeddingUsage> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of EmbeddingUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14811,8 +13305,6 @@ class __$$EmbeddingUsageImplCopyWithImpl<$Res> _$EmbeddingUsageImpl _value, $Res Function(_$EmbeddingUsageImpl) _then) : super(_value, _then); - /// Create a copy of EmbeddingUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14869,13 +13361,11 @@ class _$EmbeddingUsageImpl extends _EmbeddingUsage { other.totalTokens == totalTokens)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, promptTokens, totalTokens); - /// Create a copy of EmbeddingUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => @@ -14900,20 +13390,18 @@ abstract class _EmbeddingUsage extends EmbeddingUsage { factory _EmbeddingUsage.fromJson(Map json) = _$EmbeddingUsageImpl.fromJson; - /// The number of tokens used by the prompt. @override + + /// The number of tokens used by the prompt. @JsonKey(name: 'prompt_tokens') int get promptTokens; + @override /// The total number of tokens used by the request. - @override @JsonKey(name: 'total_tokens') int get totalTokens; - - /// Create a copy of EmbeddingUsage - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14934,12 +13422,7 @@ mixin _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose - /// `fine-tune`. - /// - /// The contents of the file should differ depending on if the model uses the - /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') @@ -14950,9 +13433,9 @@ mixin _$CreateFineTuningJobRequest { FineTuningJobHyperparameters? get hyperparameters => throw _privateConstructorUsedError; - /// A string of up to 64 characters that will be added to your fine-tuned model name. + /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? get suffix => throw _privateConstructorUsedError; @@ -14979,12 +13462,8 @@ mixin _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; - /// Serializes this CreateFineTuningJobRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateFineTuningJobRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15022,8 +13501,6 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15067,8 +13544,6 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningModelCopyWith<$Res> get model { @@ -15077,8 +13552,6 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters { @@ -15129,8 +13602,6 @@ class __$$CreateFineTuningJobRequestImplCopyWithImpl<$Res> $Res Function(_$CreateFineTuningJobRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15205,12 +13676,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose - /// `fine-tune`. - /// - /// The contents of the file should differ depending on if the model uses the - /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @override @@ -15222,9 +13688,9 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) final FineTuningJobHyperparameters? hyperparameters; - /// A string of up to 64 characters that will be added to your fine-tuned model name. + /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. @override @JsonKey(includeIfNull: false) final String? suffix; @@ -15286,7 +13752,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { (identical(other.seed, seed) || other.seed == seed)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -15298,9 +13764,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { const DeepCollectionEquality().hash(_integrations), seed); - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> @@ -15333,39 +13797,36 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { factory _CreateFineTuningJobRequest.fromJson(Map json) = _$CreateFineTuningJobRequestImpl.fromJson; + @override + /// The name of the model to fine-tune. You can select one of the /// [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - @override @_FineTuningModelConverter() FineTuningModel get model; + @override /// The ID of an uploaded file that contains training data. /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose - /// `fine-tune`. - /// - /// The contents of the file should differ depending on if the model uses the - /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - @override @JsonKey(name: 'training_file') String get trainingFile; + @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - @override @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? get hyperparameters; + @override - /// A string of up to 64 characters that will be added to your fine-tuned model name. + /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. - @override + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? get suffix; + @override /// The ID of an uploaded file that contains validation data. /// @@ -15377,25 +13838,21 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - @override @JsonKey(name: 'validation_file', includeIfNull: false) String? get validationFile; + @override /// A list of integrations to enable for your fine-tuning job. - @override @JsonKey(includeIfNull: false) List? get integrations; + @override /// The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. /// If a seed is not specified, one will be generated for you. - @override @JsonKey(includeIfNull: false) int? get seed; - - /// Create a copy of CreateFineTuningJobRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15454,8 +13911,6 @@ mixin _$FineTuningModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this FineTuningModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -15475,9 +13930,6 @@ class _$FineTuningModelCopyWithImpl<$Res, $Val extends FineTuningModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -15500,8 +13952,6 @@ class __$$FineTuningModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15546,13 +13996,11 @@ class _$FineTuningModelEnumerationImpl extends FineTuningModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> @@ -15639,10 +14087,7 @@ abstract class FineTuningModelEnumeration extends FineTuningModel { @override FineTuningModels get value; - - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15665,8 +14110,6 @@ class __$$FineTuningModelStringImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelStringImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15710,13 +14153,11 @@ class _$FineTuningModelStringImpl extends FineTuningModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> @@ -15803,10 +14244,7 @@ abstract class FineTuningModelString extends FineTuningModel { @override String get value; - - /// Create a copy of FineTuningModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15873,12 +14311,8 @@ mixin _$FineTuningJob { List? get integrations => throw _privateConstructorUsedError; - /// Serializes this FineTuningJob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningJobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15921,8 +14355,6 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16006,8 +14438,6 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> ) as $Val); } - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobErrorCopyWith<$Res>? get error { @@ -16020,8 +14450,6 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> }); } - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters { @@ -16072,8 +14500,6 @@ class __$$FineTuningJobImplCopyWithImpl<$Res> _$FineTuningJobImpl _value, $Res Function(_$FineTuningJobImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16307,7 +14733,7 @@ class _$FineTuningJobImpl extends _FineTuningJob { .equals(other._integrations, _integrations)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -16327,9 +14753,7 @@ class _$FineTuningJobImpl extends _FineTuningJob { validationFile, const DeepCollectionEquality().hash(_integrations)); - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => @@ -16366,79 +14790,77 @@ abstract class _FineTuningJob extends FineTuningJob { factory _FineTuningJob.fromJson(Map json) = _$FineTuningJobImpl.fromJson; - /// The object identifier, which can be referenced in the API endpoints. @override + + /// The object identifier, which can be referenced in the API endpoints. String get id; + @override /// The Unix timestamp (in seconds) for when the fine-tuning job was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. - @override FineTuningJobError? get error; + @override /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - @override @JsonKey(name: 'fine_tuned_model') String? get fineTunedModel; + @override /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - @override @JsonKey(name: 'finished_at') int? get finishedAt; + @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - @override FineTuningJobHyperparameters get hyperparameters; + @override /// The base model that is being fine-tuned. - @override String get model; + @override /// The object type, which is always "fine_tuning.job". - @override FineTuningJobObject get object; + @override /// The organization that owns the fine-tuning job. - @override @JsonKey(name: 'organization_id') String get organizationId; + @override /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). - @override @JsonKey(name: 'result_files') List get resultFiles; + @override /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - @override FineTuningJobStatus get status; + @override /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - @override @JsonKey(name: 'trained_tokens') int? get trainedTokens; + @override /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). - @override @JsonKey(name: 'training_file') String get trainingFile; + @override /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). - @override @JsonKey(name: 'validation_file') String? get validationFile; + @override /// A list of integrations to enable for this fine-tuning job. - @override @JsonKey(includeIfNull: false) List? get integrations; - - /// Create a copy of FineTuningJob - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -16458,12 +14880,8 @@ mixin _$FineTuningIntegration { /// to your run, and set a default entity (team, username, etc) to be associated with your run. FineTuningIntegrationWandb get wandb => throw _privateConstructorUsedError; - /// Serializes this FineTuningIntegration to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningIntegration - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningIntegrationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16490,8 +14908,6 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningIntegration - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16510,8 +14926,6 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of FineTuningIntegration - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningIntegrationWandbCopyWith<$Res> get wandb { @@ -16545,8 +14959,6 @@ class __$$FineTuningIntegrationImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningIntegration - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16599,13 +15011,11 @@ class _$FineTuningIntegrationImpl extends _FineTuningIntegration { (identical(other.wandb, wandb) || other.wandb == wandb)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, wandb); - /// Create a copy of FineTuningIntegration - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> @@ -16630,20 +15040,18 @@ abstract class _FineTuningIntegration extends FineTuningIntegration { factory _FineTuningIntegration.fromJson(Map json) = _$FineTuningIntegrationImpl.fromJson; - /// The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. @override + + /// The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. FineTuningIntegrationType get type; + @override /// The settings for your integration with Weights and Biases. This payload specifies the project that /// metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags /// to your run, and set a default entity (team, username, etc) to be associated with your run. - @override FineTuningIntegrationWandb get wandb; - - /// Create a copy of FineTuningIntegration - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -16672,12 +15080,8 @@ mixin _$FineTuningIntegrationWandb { @JsonKey(includeIfNull: false) List? get tags => throw _privateConstructorUsedError; - /// Serializes this FineTuningIntegrationWandb to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningIntegrationWandb - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningIntegrationWandbCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16707,8 +15111,6 @@ class _$FineTuningIntegrationWandbCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningIntegrationWandb - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16764,8 +15166,6 @@ class __$$FineTuningIntegrationWandbImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationWandbImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningIntegrationWandb - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16857,14 +15257,12 @@ class _$FineTuningIntegrationWandbImpl extends _FineTuningIntegrationWandb { const DeepCollectionEquality().equals(other._tags, _tags)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, project, name, entity, const DeepCollectionEquality().hash(_tags)); - /// Create a copy of FineTuningIntegrationWandb - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> @@ -16891,31 +15289,29 @@ abstract class _FineTuningIntegrationWandb extends FineTuningIntegrationWandb { factory _FineTuningIntegrationWandb.fromJson(Map json) = _$FineTuningIntegrationWandbImpl.fromJson; - /// The name of the project that the new run will be created under. @override + + /// The name of the project that the new run will be created under. String get project; + @override /// A display name to set for the run. If not set, we will use the Job ID as the name. - @override @JsonKey(includeIfNull: false) String? get name; + @override /// The entity to use for the run. This allows you to set the team or username of the WandB user that you would /// like associated with the run. If not set, the default entity for the registered WandB API key is used. - @override @JsonKey(includeIfNull: false) String? get entity; + @override /// A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some /// default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - @override @JsonKey(includeIfNull: false) List? get tags; - - /// Create a copy of FineTuningIntegrationWandb - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> get copyWith => throw _privateConstructorUsedError; } @@ -16935,12 +15331,8 @@ mixin _$FineTuningJobError { /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. String? get param => throw _privateConstructorUsedError; - /// Serializes this FineTuningJobError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningJobError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningJobErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16964,8 +15356,6 @@ class _$FineTuningJobErrorCopyWithImpl<$Res, $Val extends FineTuningJobError> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningJobError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17009,8 +15399,6 @@ class __$$FineTuningJobErrorImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobErrorImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningJobError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17072,13 +15460,11 @@ class _$FineTuningJobErrorImpl extends _FineTuningJobError { (identical(other.param, param) || other.param == param)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, code, message, param); - /// Create a copy of FineTuningJobError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => @@ -17103,22 +15489,20 @@ abstract class _FineTuningJobError extends FineTuningJobError { factory _FineTuningJobError.fromJson(Map json) = _$FineTuningJobErrorImpl.fromJson; - /// A machine-readable error code. @override + + /// A machine-readable error code. String get code; + @override /// A human-readable error message. - @override String get message; + @override /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. - @override String? get param; - - /// Create a copy of FineTuningJobError - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17130,20 +15514,14 @@ FineTuningJobHyperparameters _$FineTuningJobHyperparametersFromJson( /// @nodoc mixin _$FineTuningJobHyperparameters { - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - /// - /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number - /// manually, we support any number between 1 and 50 epochs. + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; - /// Serializes this FineTuningJobHyperparameters to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningJobHyperparameters - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningJobHyperparametersCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17175,8 +15553,6 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningJobHyperparameters - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17190,8 +15566,6 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of FineTuningJobHyperparameters - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningNEpochsCopyWith<$Res> get nEpochs { @@ -17229,8 +15603,6 @@ class __$$FineTuningJobHyperparametersImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobHyperparametersImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningJobHyperparameters - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17258,10 +15630,8 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { Map json) => _$$FineTuningJobHyperparametersImplFromJson(json); - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - /// - /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number - /// manually, we support any number between 1 and 50 epochs. + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. @override @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') @@ -17280,13 +15650,11 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { (identical(other.nEpochs, nEpochs) || other.nEpochs == nEpochs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, nEpochs); - /// Create a copy of FineTuningJobHyperparameters - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningJobHyperparametersImplCopyWith< @@ -17314,19 +15682,15 @@ abstract class _FineTuningJobHyperparameters factory _FineTuningJobHyperparameters.fromJson(Map json) = _$FineTuningJobHyperparametersImpl.fromJson; - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - /// - /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number - /// manually, we support any number between 1 and 50 epochs. @override + + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs; - - /// Create a copy of FineTuningJobHyperparameters - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningJobHyperparametersImplCopyWith< _$FineTuningJobHyperparametersImpl> get copyWith => throw _privateConstructorUsedError; @@ -17386,8 +15750,6 @@ mixin _$FineTuningNEpochs { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this FineTuningNEpochs to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -17407,9 +15769,6 @@ class _$FineTuningNEpochsCopyWithImpl<$Res, $Val extends FineTuningNEpochs> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -17432,8 +15791,6 @@ class __$$FineTuningNEpochsEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17478,13 +15835,11 @@ class _$FineTuningNEpochsEnumerationImpl extends FineTuningNEpochsEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsEnumerationImplCopyWith< @@ -17573,10 +15928,7 @@ abstract class FineTuningNEpochsEnumeration extends FineTuningNEpochs { @override FineTuningNEpochsOptions get value; - - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningNEpochsEnumerationImplCopyWith< _$FineTuningNEpochsEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -17599,8 +15951,6 @@ class __$$FineTuningNEpochsIntImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsIntImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17644,13 +15994,11 @@ class _$FineTuningNEpochsIntImpl extends FineTuningNEpochsInt { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> @@ -17738,10 +16086,7 @@ abstract class FineTuningNEpochsInt extends FineTuningNEpochs { @override int get value; - - /// Create a copy of FineTuningNEpochs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17764,12 +16109,8 @@ mixin _$ListPaginatedFineTuningJobsResponse { ListPaginatedFineTuningJobsResponseObject get object => throw _privateConstructorUsedError; - /// Serializes this ListPaginatedFineTuningJobsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListPaginatedFineTuningJobsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListPaginatedFineTuningJobsResponseCopyWith< ListPaginatedFineTuningJobsResponse> get copyWith => throw _privateConstructorUsedError; @@ -17800,8 +16141,6 @@ class _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListPaginatedFineTuningJobsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17851,8 +16190,6 @@ class __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl<$Res> $Res Function(_$ListPaginatedFineTuningJobsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListPaginatedFineTuningJobsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17927,14 +16264,12 @@ class _$ListPaginatedFineTuningJobsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), hasMore, object); - /// Create a copy of ListPaginatedFineTuningJobsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListPaginatedFineTuningJobsResponseImplCopyWith< @@ -17963,23 +16298,21 @@ abstract class _ListPaginatedFineTuningJobsResponse Map json) = _$ListPaginatedFineTuningJobsResponseImpl.fromJson; - /// The list of fine-tuning jobs. @override + + /// The list of fine-tuning jobs. List get data; + @override /// Whether there are more fine-tuning jobs to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; + @override /// The object type, which is always "list". - @override ListPaginatedFineTuningJobsResponseObject get object; - - /// Create a copy of ListPaginatedFineTuningJobsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListPaginatedFineTuningJobsResponseImplCopyWith< _$ListPaginatedFineTuningJobsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -17999,12 +16332,8 @@ mixin _$ListFineTuningJobEventsResponse { ListFineTuningJobEventsResponseObject get object => throw _privateConstructorUsedError; - /// Serializes this ListFineTuningJobEventsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListFineTuningJobEventsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListFineTuningJobEventsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18033,8 +16362,6 @@ class _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListFineTuningJobEventsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18078,8 +16405,6 @@ class __$$ListFineTuningJobEventsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobEventsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListFineTuningJobEventsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18141,14 +16466,12 @@ class _$ListFineTuningJobEventsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), object); - /// Create a copy of ListFineTuningJobEventsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobEventsResponseImplCopyWith< @@ -18175,18 +16498,16 @@ abstract class _ListFineTuningJobEventsResponse factory _ListFineTuningJobEventsResponse.fromJson(Map json) = _$ListFineTuningJobEventsResponseImpl.fromJson; - /// The list of fine-tuning job events. @override + + /// The list of fine-tuning job events. List get data; + @override /// The object type, which is always "list". - @override ListFineTuningJobEventsResponseObject get object; - - /// Create a copy of ListFineTuningJobEventsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListFineTuningJobEventsResponseImplCopyWith< _$ListFineTuningJobEventsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -18218,12 +16539,8 @@ mixin _$ListFineTuningJobCheckpointsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this ListFineTuningJobCheckpointsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListFineTuningJobCheckpointsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListFineTuningJobCheckpointsResponseCopyWith< ListFineTuningJobCheckpointsResponse> get copyWith => throw _privateConstructorUsedError; @@ -18256,8 +16573,6 @@ class _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListFineTuningJobCheckpointsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18319,8 +16634,6 @@ class __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobCheckpointsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListFineTuningJobCheckpointsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18419,7 +16732,7 @@ class _$ListFineTuningJobCheckpointsResponseImpl (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -18429,9 +16742,7 @@ class _$ListFineTuningJobCheckpointsResponseImpl lastId, hasMore); - /// Create a copy of ListFineTuningJobCheckpointsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobCheckpointsResponseImplCopyWith< @@ -18463,33 +16774,31 @@ abstract class _ListFineTuningJobCheckpointsResponse Map json) = _$ListFineTuningJobCheckpointsResponseImpl.fromJson; - /// The list of fine-tuning job checkpoints. @override + + /// The list of fine-tuning job checkpoints. List get data; + @override /// The object type, which is always "list". - @override ListFineTuningJobCheckpointsResponseObject get object; + @override /// The ID of the first checkpoint in the list. - @override @JsonKey(name: 'first_id', includeIfNull: false) String? get firstId; + @override /// The ID of the last checkpoint in the list. - @override @JsonKey(name: 'last_id', includeIfNull: false) String? get lastId; + @override /// Whether there are more checkpoints to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListFineTuningJobCheckpointsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListFineTuningJobCheckpointsResponseImplCopyWith< _$ListFineTuningJobCheckpointsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -18517,12 +16826,8 @@ mixin _$FineTuningJobEvent { /// The object type, which is always "fine_tuning.job.event". FineTuningJobEventObject get object => throw _privateConstructorUsedError; - /// Serializes this FineTuningJobEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningJobEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningJobEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18551,8 +16856,6 @@ class _$FineTuningJobEventCopyWithImpl<$Res, $Val extends FineTuningJobEvent> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningJobEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18611,8 +16914,6 @@ class __$$FineTuningJobEventImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobEventImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningJobEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18700,14 +17001,12 @@ class _$FineTuningJobEventImpl extends _FineTuningJobEvent { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, createdAt, level, message, object); - /// Create a copy of FineTuningJobEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => @@ -18735,31 +17034,29 @@ abstract class _FineTuningJobEvent extends FineTuningJobEvent { factory _FineTuningJobEvent.fromJson(Map json) = _$FineTuningJobEventImpl.fromJson; - /// The event identifier, which can be referenced in the API endpoints. @override + + /// The event identifier, which can be referenced in the API endpoints. String get id; + @override /// The Unix timestamp (in seconds) for when the event was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// The log level of the event. - @override FineTuningJobEventLevel get level; + @override /// The message of the event. - @override String get message; + @override /// The object type, which is always "fine_tuning.job.event". - @override FineTuningJobEventObject get object; - - /// Create a copy of FineTuningJobEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -18798,12 +17095,8 @@ mixin _$FineTuningJobCheckpoint { FineTuningJobCheckpointObject get object => throw _privateConstructorUsedError; - /// Serializes this FineTuningJobCheckpoint to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningJobCheckpoint - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningJobCheckpointCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18838,8 +17131,6 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningJobCheckpoint - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18883,8 +17174,6 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of FineTuningJobCheckpoint - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics { @@ -18928,8 +17217,6 @@ class __$$FineTuningJobCheckpointImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningJobCheckpoint - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19047,14 +17334,12 @@ class _$FineTuningJobCheckpointImpl extends _FineTuningJobCheckpoint { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, createdAt, fineTunedModelCheckpoint, stepNumber, metrics, fineTuningJobId, object); - /// Create a copy of FineTuningJobCheckpoint - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> @@ -19086,42 +17371,40 @@ abstract class _FineTuningJobCheckpoint extends FineTuningJobCheckpoint { factory _FineTuningJobCheckpoint.fromJson(Map json) = _$FineTuningJobCheckpointImpl.fromJson; - /// The checkpoint identifier, which can be referenced in the API endpoints. @override + + /// The checkpoint identifier, which can be referenced in the API endpoints. String get id; + @override /// The Unix timestamp (in seconds) for when the checkpoint was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// The name of the fine-tuned checkpoint model that is created. - @override @JsonKey(name: 'fine_tuned_model_checkpoint') String get fineTunedModelCheckpoint; + @override /// The step number that the checkpoint was created at. - @override @JsonKey(name: 'step_number') int get stepNumber; + @override /// Metrics at the step number during the fine-tuning job. - @override FineTuningJobCheckpointMetrics get metrics; + @override /// The name of the fine-tuning job that this checkpoint was created from. - @override @JsonKey(name: 'fine_tuning_job_id') String get fineTuningJobId; + @override /// The object type, which is always "fine_tuning.job.checkpoint". - @override FineTuningJobCheckpointObject get object; - - /// Create a copy of FineTuningJobCheckpoint - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19161,12 +17444,8 @@ mixin _$FineTuningJobCheckpointMetrics { @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy => throw _privateConstructorUsedError; - /// Serializes this FineTuningJobCheckpointMetrics to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FineTuningJobCheckpointMetrics - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $FineTuningJobCheckpointMetricsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19204,8 +17483,6 @@ class _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of FineTuningJobCheckpointMetrics - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19283,8 +17560,6 @@ class __$$FineTuningJobCheckpointMetricsImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointMetricsImpl) _then) : super(_value, _then); - /// Create a copy of FineTuningJobCheckpointMetrics - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19413,7 +17688,7 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidMeanTokenAccuracy)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -19425,9 +17700,7 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidLoss, fullValidMeanTokenAccuracy); - /// Create a copy of FineTuningJobCheckpointMetrics - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointMetricsImplCopyWith< @@ -19465,45 +17738,43 @@ abstract class _FineTuningJobCheckpointMetrics factory _FineTuningJobCheckpointMetrics.fromJson(Map json) = _$FineTuningJobCheckpointMetricsImpl.fromJson; - /// The step number that the metrics were recorded at. @override + + /// The step number that the metrics were recorded at. @JsonKey(includeIfNull: false) double? get step; + @override /// The training loss at the step number. - @override @JsonKey(name: 'train_loss', includeIfNull: false) double? get trainLoss; + @override /// The training mean token accuracy at the step number. - @override @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) double? get trainMeanTokenAccuracy; + @override /// The validation loss at the step number. - @override @JsonKey(name: 'valid_loss', includeIfNull: false) double? get validLoss; + @override /// The validation mean token accuracy at the step number. - @override @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) double? get validMeanTokenAccuracy; + @override /// The full validation loss at the step number. - @override @JsonKey(name: 'full_valid_loss', includeIfNull: false) double? get fullValidLoss; + @override /// The full validation mean token accuracy at the step number. - @override @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy; - - /// Create a copy of FineTuningJobCheckpointMetrics - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$FineTuningJobCheckpointMetricsImplCopyWith< _$FineTuningJobCheckpointMetricsImpl> get copyWith => throw _privateConstructorUsedError; @@ -19551,12 +17822,8 @@ mixin _$CreateImageRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; - /// Serializes this CreateImageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateImageRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateImageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19602,8 +17869,6 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateImageRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19652,8 +17917,6 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> ) as $Val); } - /// Create a copy of CreateImageRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateImageRequestModelCopyWith<$Res>? get model { @@ -19709,8 +17972,6 @@ class __$$CreateImageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateImageRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19855,14 +18116,12 @@ class _$CreateImageRequestImpl extends _CreateImageRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, prompt, model, n, quality, responseFormat, size, style, user); - /// Create a copy of CreateImageRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => @@ -19905,54 +18164,52 @@ abstract class _CreateImageRequest extends CreateImageRequest { factory _CreateImageRequest.fromJson(Map json) = _$CreateImageRequestImpl.fromJson; - /// A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. @override + + /// A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. String get prompt; + @override /// The model to use for image generation. - @override @_CreateImageRequestModelConverter() @JsonKey(includeIfNull: false) CreateImageRequestModel? get model; + @override /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - @override @JsonKey(includeIfNull: false) int? get n; + @override /// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - @override ImageQuality get quality; + @override /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - @override @JsonKey( name: 'response_format', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageResponseFormat? get responseFormat; + @override /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageSize? get size; + @override /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageStyle? get style; + @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). - @override @JsonKey(includeIfNull: false) String? get user; - - /// Create a copy of CreateImageRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20015,8 +18272,6 @@ mixin _$CreateImageRequestModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateImageRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -20037,9 +18292,6 @@ class _$CreateImageRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -20062,8 +18314,6 @@ class __$$CreateImageRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20110,13 +18360,11 @@ class _$CreateImageRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelEnumerationImplCopyWith< @@ -20206,10 +18454,7 @@ abstract class CreateImageRequestModelEnumeration @override ImageModels get value; - - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateImageRequestModelEnumerationImplCopyWith< _$CreateImageRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -20235,8 +18480,6 @@ class __$$CreateImageRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelStringImpl) _then) : super(_value, _then); - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20282,13 +18525,11 @@ class _$CreateImageRequestModelStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelStringImplCopyWith< @@ -20376,10 +18617,7 @@ abstract class CreateImageRequestModelString extends CreateImageRequestModel { @override String get value; - - /// Create a copy of CreateImageRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateImageRequestModelStringImplCopyWith< _$CreateImageRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -20397,12 +18635,8 @@ mixin _$ImagesResponse { /// The list of images generated by the model. List get data => throw _privateConstructorUsedError; - /// Serializes this ImagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ImagesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ImagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20426,8 +18660,6 @@ class _$ImagesResponseCopyWithImpl<$Res, $Val extends ImagesResponse> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ImagesResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20466,8 +18698,6 @@ class __$$ImagesResponseImplCopyWithImpl<$Res> _$ImagesResponseImpl _value, $Res Function(_$ImagesResponseImpl) _then) : super(_value, _then); - /// Create a copy of ImagesResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20527,14 +18757,12 @@ class _$ImagesResponseImpl extends _ImagesResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, created, const DeepCollectionEquality().hash(_data)); - /// Create a copy of ImagesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => @@ -20558,18 +18786,16 @@ abstract class _ImagesResponse extends ImagesResponse { factory _ImagesResponse.fromJson(Map json) = _$ImagesResponseImpl.fromJson; - /// The Unix timestamp (in seconds) when the image was created. @override + + /// The Unix timestamp (in seconds) when the image was created. int get created; + @override /// The list of images generated by the model. - @override List get data; - - /// Create a copy of ImagesResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20592,12 +18818,8 @@ mixin _$Image { @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt => throw _privateConstructorUsedError; - /// Serializes this Image to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of Image - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ImageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20623,8 +18845,6 @@ class _$ImageCopyWithImpl<$Res, $Val extends Image> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of Image - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20671,8 +18891,6 @@ class __$$ImageImplCopyWithImpl<$Res> _$ImageImpl _value, $Res Function(_$ImageImpl) _then) : super(_value, _then); - /// Create a copy of Image - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20741,13 +18959,11 @@ class _$ImageImpl extends _Image { other.revisedPrompt == revisedPrompt)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, b64Json, url, revisedPrompt); - /// Create a copy of Image - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ImageImplCopyWith<_$ImageImpl> get copyWith => @@ -20771,25 +18987,23 @@ abstract class _Image extends Image { factory _Image.fromJson(Map json) = _$ImageImpl.fromJson; - /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. @override + + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. @JsonKey(name: 'b64_json', includeIfNull: false) String? get b64Json; + @override /// The URL of the generated image, if `response_format` is `url` (default). - @override @JsonKey(includeIfNull: false) String? get url; + @override /// The prompt that was used to generate the image, if there was any revision to the prompt. - @override @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt; - - /// Create a copy of Image - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ImageImplCopyWith<_$ImageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20813,12 +19027,8 @@ mixin _$Model { @JsonKey(name: 'owned_by') String get ownedBy => throw _privateConstructorUsedError; - /// Serializes this Model to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of Model - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModelCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20844,8 +19054,6 @@ class _$ModelCopyWithImpl<$Res, $Val extends Model> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of Model - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20897,8 +19105,6 @@ class __$$ModelImplCopyWithImpl<$Res> _$ModelImpl _value, $Res Function(_$ModelImpl) _then) : super(_value, _then); - /// Create a copy of Model - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20974,13 +19180,11 @@ class _$ModelImpl extends _Model { (identical(other.ownedBy, ownedBy) || other.ownedBy == ownedBy)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, created, object, ownedBy); - /// Create a copy of Model - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModelImplCopyWith<_$ModelImpl> get copyWith => @@ -21004,27 +19208,25 @@ abstract class _Model extends Model { factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; - /// The model identifier, which can be referenced in the API endpoints. @override + + /// The model identifier, which can be referenced in the API endpoints. String get id; + @override /// The Unix timestamp (in seconds) when the model was created. - @override int get created; + @override /// The object type, which is always "model". - @override ModelObject get object; + @override /// The organization that owns the model. - @override @JsonKey(name: 'owned_by') String get ownedBy; - - /// Create a copy of Model - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModelImplCopyWith<_$ModelImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21041,12 +19243,8 @@ mixin _$ListModelsResponse { /// The list of models. List get data => throw _privateConstructorUsedError; - /// Serializes this ListModelsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListModelsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListModelsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21070,8 +19268,6 @@ class _$ListModelsResponseCopyWithImpl<$Res, $Val extends ListModelsResponse> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListModelsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21110,8 +19306,6 @@ class __$$ListModelsResponseImplCopyWithImpl<$Res> $Res Function(_$ListModelsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListModelsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21171,14 +19365,12 @@ class _$ListModelsResponseImpl extends _ListModelsResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, object, const DeepCollectionEquality().hash(_data)); - /// Create a copy of ListModelsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => @@ -21202,18 +19394,16 @@ abstract class _ListModelsResponse extends ListModelsResponse { factory _ListModelsResponse.fromJson(Map json) = _$ListModelsResponseImpl.fromJson; - /// The object type, which is always "list". @override + + /// The object type, which is always "list". ListModelsResponseObject get object; + @override /// The list of models. - @override List get data; - - /// Create a copy of ListModelsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21233,12 +19423,8 @@ mixin _$DeleteModelResponse { /// The object type, which is always "model". String get object => throw _privateConstructorUsedError; - /// Serializes this DeleteModelResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of DeleteModelResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $DeleteModelResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21262,8 +19448,6 @@ class _$DeleteModelResponseCopyWithImpl<$Res, $Val extends DeleteModelResponse> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of DeleteModelResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21307,8 +19491,6 @@ class __$$DeleteModelResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteModelResponseImpl) _then) : super(_value, _then); - /// Create a copy of DeleteModelResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21370,13 +19552,11 @@ class _$DeleteModelResponseImpl extends _DeleteModelResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - /// Create a copy of DeleteModelResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => @@ -21401,22 +19581,20 @@ abstract class _DeleteModelResponse extends DeleteModelResponse { factory _DeleteModelResponse.fromJson(Map json) = _$DeleteModelResponseImpl.fromJson; - /// The model identifier. @override + + /// The model identifier. String get id; + @override /// Whether the model was deleted. - @override bool get deleted; + @override /// The object type, which is always "model". - @override String get object; - - /// Create a copy of DeleteModelResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21439,12 +19617,8 @@ mixin _$CreateModerationRequest { @_ModerationInputConverter() ModerationInput get input => throw _privateConstructorUsedError; - /// Serializes this CreateModerationRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateModerationRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21476,8 +19650,6 @@ class _$CreateModerationRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21496,8 +19668,6 @@ class _$CreateModerationRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationModelCopyWith<$Res>? get model { @@ -21510,8 +19680,6 @@ class _$CreateModerationRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationInputCopyWith<$Res> get input { @@ -21552,8 +19720,6 @@ class __$$CreateModerationRequestImplCopyWithImpl<$Res> $Res Function(_$CreateModerationRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21613,13 +19779,11 @@ class _$CreateModerationRequestImpl extends _CreateModerationRequest { (identical(other.input, input) || other.input == input)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, model, input); - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> @@ -21646,23 +19810,21 @@ abstract class _CreateModerationRequest extends CreateModerationRequest { factory _CreateModerationRequest.fromJson(Map json) = _$CreateModerationRequestImpl.fromJson; + @override + /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. /// /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - @override @_ModerationModelConverter() @JsonKey(includeIfNull: false) ModerationModel? get model; + @override /// The input text to classify - @override @_ModerationInputConverter() ModerationInput get input; - - /// Create a copy of CreateModerationRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21721,8 +19883,6 @@ mixin _$ModerationModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ModerationModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -21742,9 +19902,6 @@ class _$ModerationModelCopyWithImpl<$Res, $Val extends ModerationModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -21767,8 +19924,6 @@ class __$$ModerationModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ModerationModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21813,13 +19968,11 @@ class _$ModerationModelEnumerationImpl extends ModerationModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> @@ -21906,10 +20059,7 @@ abstract class ModerationModelEnumeration extends ModerationModel { @override ModerationModels get value; - - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21932,8 +20082,6 @@ class __$$ModerationModelStringImplCopyWithImpl<$Res> $Res Function(_$ModerationModelStringImpl) _then) : super(_value, _then); - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21977,13 +20125,11 @@ class _$ModerationModelStringImpl extends ModerationModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> @@ -22070,10 +20216,7 @@ abstract class ModerationModelString extends ModerationModel { @override String get value; - - /// Create a copy of ModerationModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22132,8 +20275,6 @@ mixin _$ModerationInput { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ModerationInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -22153,9 +20294,6 @@ class _$ModerationInputCopyWithImpl<$Res, $Val extends ModerationInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -22177,8 +20315,6 @@ class __$$ModerationInputListStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputListStringImpl) _then) : super(_value, _then); - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22229,14 +20365,12 @@ class _$ModerationInputListStringImpl extends ModerationInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> @@ -22323,10 +20457,7 @@ abstract class ModerationInputListString extends ModerationInput { @override List get value; - - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22349,8 +20480,6 @@ class __$$ModerationInputStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputStringImpl) _then) : super(_value, _then); - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22394,13 +20523,11 @@ class _$ModerationInputStringImpl extends ModerationInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> @@ -22487,10 +20614,7 @@ abstract class ModerationInputString extends ModerationInput { @override String get value; - - /// Create a copy of ModerationInput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22511,12 +20635,8 @@ mixin _$CreateModerationResponse { /// A list of moderation objects. List get results => throw _privateConstructorUsedError; - /// Serializes this CreateModerationResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateModerationResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateModerationResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -22541,8 +20661,6 @@ class _$CreateModerationResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateModerationResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22589,8 +20707,6 @@ class __$$CreateModerationResponseImplCopyWithImpl<$Res> $Res Function(_$CreateModerationResponseImpl) _then) : super(_value, _then); - /// Create a copy of CreateModerationResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22662,14 +20778,12 @@ class _$CreateModerationResponseImpl extends _CreateModerationResponse { const DeepCollectionEquality().equals(other._results, _results)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, id, model, const DeepCollectionEquality().hash(_results)); - /// Create a copy of CreateModerationResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> @@ -22695,22 +20809,20 @@ abstract class _CreateModerationResponse extends CreateModerationResponse { factory _CreateModerationResponse.fromJson(Map json) = _$CreateModerationResponseImpl.fromJson; - /// The unique identifier for the moderation request. @override + + /// The unique identifier for the moderation request. String get id; + @override /// The model used to generate the moderation results. - @override String get model; + @override /// A list of moderation objects. - @override List get results; - - /// Create a copy of CreateModerationResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22732,12 +20844,8 @@ mixin _$Moderation { ModerationCategoriesScores get categoryScores => throw _privateConstructorUsedError; - /// Serializes this Moderation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModerationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -22768,8 +20876,6 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22793,8 +20899,6 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> ) as $Val); } - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesCopyWith<$Res> get categories { @@ -22803,8 +20907,6 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> }); } - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesScoresCopyWith<$Res> get categoryScores { @@ -22843,8 +20945,6 @@ class __$$ModerationImplCopyWithImpl<$Res> _$ModerationImpl _value, $Res Function(_$ModerationImpl) _then) : super(_value, _then); - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22911,14 +21011,12 @@ class _$ModerationImpl extends _Moderation { other.categoryScores == categoryScores)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, flagged, categories, categoryScores); - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => @@ -22944,23 +21042,21 @@ abstract class _Moderation extends Moderation { factory _Moderation.fromJson(Map json) = _$ModerationImpl.fromJson; - /// Whether any of the below categories are flagged. @override + + /// Whether any of the below categories are flagged. bool get flagged; + @override /// A list of the categories, and whether they are flagged or not. - @override ModerationCategories get categories; + @override /// A list of the categories along with their scores as predicted by model. - @override @JsonKey(name: 'category_scores') ModerationCategoriesScores get categoryScores; - - /// Create a copy of Moderation - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23011,12 +21107,8 @@ mixin _$ModerationCategories { @JsonKey(name: 'violence/graphic') bool get violenceGraphic => throw _privateConstructorUsedError; - /// Serializes this ModerationCategories to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ModerationCategories - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModerationCategoriesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -23052,8 +21144,6 @@ class _$ModerationCategoriesCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ModerationCategories - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23148,8 +21238,6 @@ class __$$ModerationCategoriesImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesImpl) _then) : super(_value, _then); - /// Create a copy of ModerationCategories - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23319,7 +21407,7 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { other.violenceGraphic == violenceGraphic)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -23335,9 +21423,7 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { violence, violenceGraphic); - /// Create a copy of ModerationCategories - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> @@ -23374,61 +21460,59 @@ abstract class _ModerationCategories extends ModerationCategories { factory _ModerationCategories.fromJson(Map json) = _$ModerationCategoriesImpl.fromJson; - /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. @override + + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. bool get hate; + @override /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - @override @JsonKey(name: 'hate/threatening') bool get hateThreatening; + @override /// Content that expresses, incites, or promotes harassing language towards any target. - @override bool get harassment; + @override /// Harassment content that also includes violence or serious harm towards any target. - @override @JsonKey(name: 'harassment/threatening') bool get harassmentThreatening; + @override /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - @override @JsonKey(name: 'self-harm') bool get selfHarm; + @override /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - @override @JsonKey(name: 'self-harm/intent') bool get selfHarmIntent; + @override /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - @override @JsonKey(name: 'self-harm/instructions') bool get selfHarmInstructions; + @override /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - @override bool get sexual; + @override /// Sexual content that includes an individual who is under 18 years old. - @override @JsonKey(name: 'sexual/minors') bool get sexualMinors; + @override /// Content that depicts death, violence, or physical injury. - @override bool get violence; + @override /// Content that depicts death, violence, or physical injury in graphic detail. - @override @JsonKey(name: 'violence/graphic') bool get violenceGraphic; - - /// Create a copy of ModerationCategories - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23480,12 +21564,8 @@ mixin _$ModerationCategoriesScores { @JsonKey(name: 'violence/graphic') double get violenceGraphic => throw _privateConstructorUsedError; - /// Serializes this ModerationCategoriesScores to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ModerationCategoriesScores - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModerationCategoriesScoresCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -23522,8 +21602,6 @@ class _$ModerationCategoriesScoresCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ModerationCategoriesScores - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23621,8 +21699,6 @@ class __$$ModerationCategoriesScoresImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesScoresImpl) _then) : super(_value, _then); - /// Create a copy of ModerationCategoriesScores - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23793,7 +21869,7 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { other.violenceGraphic == violenceGraphic)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -23809,9 +21885,7 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { violence, violenceGraphic); - /// Create a copy of ModerationCategoriesScores - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> @@ -23848,61 +21922,59 @@ abstract class _ModerationCategoriesScores extends ModerationCategoriesScores { factory _ModerationCategoriesScores.fromJson(Map json) = _$ModerationCategoriesScoresImpl.fromJson; - /// The score for the category 'hate'. @override + + /// The score for the category 'hate'. double get hate; + @override /// The score for the category 'hate/threatening'. - @override @JsonKey(name: 'hate/threatening') double get hateThreatening; + @override /// The score for the category 'harassment'. - @override double get harassment; + @override /// The score for the category 'harassment/threatening'. - @override @JsonKey(name: 'harassment/threatening') double get harassmentThreatening; + @override /// The score for the category 'self-harm'. - @override @JsonKey(name: 'self-harm') double get selfHarm; + @override /// The score for the category 'self-harm/intent'. - @override @JsonKey(name: 'self-harm/intent') double get selfHarmIntent; + @override /// The score for the category 'self-harm/instructions'. - @override @JsonKey(name: 'self-harm/instructions') double get selfHarmInstructions; + @override /// The score for the category 'sexual'. - @override double get sexual; + @override /// The score for the category 'sexual/minors'. - @override @JsonKey(name: 'sexual/minors') double get sexualMinors; + @override /// The score for the category 'violence'. - @override double get violence; + @override /// The score for the category 'violence/graphic'. - @override @JsonKey(name: 'violence/graphic') double get violenceGraphic; - - /// Create a copy of ModerationCategoriesScores - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23935,60 +22007,38 @@ mixin _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat => throw _privateConstructorUsedError; - /// Serializes this AssistantObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $AssistantObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -24031,8 +22081,6 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24106,8 +22154,6 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> ) as $Val); } - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -24120,8 +22166,6 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> }); } - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantObjectResponseFormatCopyWith<$Res>? get responseFormat { @@ -24176,8 +22220,6 @@ class __$$AssistantObjectImplCopyWithImpl<$Res> _$AssistantObjectImpl _value, $Res Function(_$AssistantObjectImpl) _then) : super(_value, _then); - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24307,12 +22349,10 @@ class _$AssistantObjectImpl extends _AssistantObject { @override final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. @override List get tools { if (_tools is EqualUnmodifiableListView) return _tools; @@ -24325,14 +22365,10 @@ class _$AssistantObjectImpl extends _AssistantObject { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -24342,38 +22378,23 @@ class _$AssistantObjectImpl extends _AssistantObject { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -24410,7 +22431,7 @@ class _$AssistantObjectImpl extends _AssistantObject { other.responseFormat == responseFormat)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -24428,9 +22449,7 @@ class _$AssistantObjectImpl extends _AssistantObject { topP, responseFormat); - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => @@ -24469,92 +22488,72 @@ abstract class _AssistantObject extends AssistantObject { factory _AssistantObject.fromJson(Map json) = _$AssistantObjectImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override + + /// The identifier, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `assistant`. - @override AssistantObjectObject get object; + @override /// The Unix timestamp (in seconds) for when the assistant was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// The name of the assistant. The maximum length is 256 characters. - @override String? get name; + @override /// The description of the assistant. The maximum length is 512 characters. - @override String? get description; + @override /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. - @override String get model; + @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - @override String? get instructions; - - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. @override + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. List get tools; + @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override - Map? get metadata; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + Map? get metadata; @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature; + @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat; - - /// Create a copy of AssistantObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24562,10 +22561,11 @@ abstract class _AssistantObject extends AssistantObject { AssistantObjectResponseFormat _$AssistantObjectResponseFormatFromJson( Map json) { switch (json['runtimeType']) { - case 'mode': + case 'enumeration': return AssistantObjectResponseFormatEnumeration.fromJson(json); - case 'responseFormat': - return AssistantObjectResponseFormatResponseFormat.fromJson(json); + case 'assistantsResponseFormat': + return AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( + json); default: throw CheckedFromJsonException( @@ -24581,48 +22581,52 @@ mixin _$AssistantObjectResponseFormat { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantResponseFormatMode value) enumeration, + required TResult Function(AssistantsResponseFormat value) + assistantsResponseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantResponseFormatMode value)? enumeration, + TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantResponseFormatMode value)? enumeration, + TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - mode, - required TResult Function(AssistantObjectResponseFormatResponseFormat value) - responseFormat, + enumeration, + required TResult Function( + AssistantObjectResponseFormatAssistantsResponseFormat value) + assistantsResponseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, - TResult? Function(AssistantObjectResponseFormatResponseFormat value)? - responseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? + enumeration, + TResult? Function( + AssistantObjectResponseFormatAssistantsResponseFormat value)? + assistantsResponseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, - TResult Function(AssistantObjectResponseFormatResponseFormat value)? - responseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? + enumeration, + TResult Function( + AssistantObjectResponseFormatAssistantsResponseFormat value)? + assistantsResponseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this AssistantObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -24645,9 +22649,6 @@ class _$AssistantObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -24670,8 +22671,6 @@ class __$$AssistantObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24692,7 +22691,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl extends AssistantObjectResponseFormatEnumeration { const _$AssistantObjectResponseFormatEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'mode', + : $type = $type ?? 'enumeration', super._(); factory _$AssistantObjectResponseFormatEnumerationImpl.fromJson( @@ -24707,7 +22706,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override String toString() { - return 'AssistantObjectResponseFormat.mode(value: $value)'; + return 'AssistantObjectResponseFormat.enumeration(value: $value)'; } @override @@ -24718,13 +22717,11 @@ class _$AssistantObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$AssistantObjectResponseFormatEnumerationImplCopyWith< @@ -24736,30 +22733,31 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantResponseFormatMode value) enumeration, + required TResult Function(AssistantsResponseFormat value) + assistantsResponseFormat, }) { - return mode(value); + return enumeration(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantResponseFormatMode value)? enumeration, + TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, }) { - return mode?.call(value); + return enumeration?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantResponseFormatMode value)? enumeration, + TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, required TResult orElse(), }) { - if (mode != null) { - return mode(value); + if (enumeration != null) { + return enumeration(value); } return orElse(); } @@ -24768,33 +22766,38 @@ class _$AssistantObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - mode, - required TResult Function(AssistantObjectResponseFormatResponseFormat value) - responseFormat, + enumeration, + required TResult Function( + AssistantObjectResponseFormatAssistantsResponseFormat value) + assistantsResponseFormat, }) { - return mode(this); + return enumeration(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, - TResult? Function(AssistantObjectResponseFormatResponseFormat value)? - responseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? + enumeration, + TResult? Function( + AssistantObjectResponseFormatAssistantsResponseFormat value)? + assistantsResponseFormat, }) { - return mode?.call(this); + return enumeration?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, - TResult Function(AssistantObjectResponseFormatResponseFormat value)? - responseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? + enumeration, + TResult Function( + AssistantObjectResponseFormatAssistantsResponseFormat value)? + assistantsResponseFormat, required TResult orElse(), }) { - if (mode != null) { - return mode(this); + if (enumeration != null) { + return enumeration(this); } return orElse(); } @@ -24820,61 +22823,59 @@ abstract class AssistantObjectResponseFormatEnumeration @override AssistantResponseFormatMode get value; - - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$AssistantObjectResponseFormatEnumerationImplCopyWith< _$AssistantObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< +abstract class _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - factory _$$AssistantObjectResponseFormatResponseFormatImplCopyWith( - _$AssistantObjectResponseFormatResponseFormatImpl value, - $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) + factory _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith( + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl value, + $Res Function( + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl) then) = - __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; + __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res>; @useResult - $Res call({ResponseFormat value}); + $Res call({AssistantsResponseFormat value}); - $ResponseFormatCopyWith<$Res> get value; + $AssistantsResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> +class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res> extends _$AssistantObjectResponseFormatCopyWithImpl<$Res, - _$AssistantObjectResponseFormatResponseFormatImpl> + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> implements - _$$AssistantObjectResponseFormatResponseFormatImplCopyWith<$Res> { - __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl( - _$AssistantObjectResponseFormatResponseFormatImpl _value, - $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) _then) + _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl _value, + $Res Function(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl) + _then) : super(_value, _then); - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$AssistantObjectResponseFormatResponseFormatImpl( + return _then(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as ResponseFormat, + as AssistantsResponseFormat, )); } - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res> get value { - return $ResponseFormatCopyWith<$Res>(_value.value, (value) { + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -24882,79 +22883,80 @@ class __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$AssistantObjectResponseFormatResponseFormatImpl - extends AssistantObjectResponseFormatResponseFormat { - const _$AssistantObjectResponseFormatResponseFormatImpl(this.value, +class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl + extends AssistantObjectResponseFormatAssistantsResponseFormat { + const _$AssistantObjectResponseFormatAssistantsResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'responseFormat', + : $type = $type ?? 'assistantsResponseFormat', super._(); - factory _$AssistantObjectResponseFormatResponseFormatImpl.fromJson( + factory _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson( Map json) => - _$$AssistantObjectResponseFormatResponseFormatImplFromJson(json); + _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( + json); @override - final ResponseFormat value; + final AssistantsResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'AssistantObjectResponseFormat.responseFormat(value: $value)'; + return 'AssistantObjectResponseFormat.assistantsResponseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantObjectResponseFormatResponseFormatImpl && + other + is _$AssistantObjectResponseFormatAssistantsResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatResponseFormatImpl> + _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => - __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl< - _$AssistantObjectResponseFormatResponseFormatImpl>( + __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantResponseFormatMode value) enumeration, + required TResult Function(AssistantsResponseFormat value) + assistantsResponseFormat, }) { - return responseFormat(value); + return assistantsResponseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantResponseFormatMode value)? enumeration, + TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, }) { - return responseFormat?.call(value); + return assistantsResponseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantResponseFormatMode value)? enumeration, + TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(value); + if (assistantsResponseFormat != null) { + return assistantsResponseFormat(value); } return orElse(); } @@ -24963,64 +22965,66 @@ class _$AssistantObjectResponseFormatResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - mode, - required TResult Function(AssistantObjectResponseFormatResponseFormat value) - responseFormat, + enumeration, + required TResult Function( + AssistantObjectResponseFormatAssistantsResponseFormat value) + assistantsResponseFormat, }) { - return responseFormat(this); + return assistantsResponseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, - TResult? Function(AssistantObjectResponseFormatResponseFormat value)? - responseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? + enumeration, + TResult? Function( + AssistantObjectResponseFormatAssistantsResponseFormat value)? + assistantsResponseFormat, }) { - return responseFormat?.call(this); + return assistantsResponseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, - TResult Function(AssistantObjectResponseFormatResponseFormat value)? - responseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? + enumeration, + TResult Function( + AssistantObjectResponseFormatAssistantsResponseFormat value)? + assistantsResponseFormat, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(this); + if (assistantsResponseFormat != null) { + return assistantsResponseFormat(this); } return orElse(); } @override Map toJson() { - return _$$AssistantObjectResponseFormatResponseFormatImplToJson( + return _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( this, ); } } -abstract class AssistantObjectResponseFormatResponseFormat +abstract class AssistantObjectResponseFormatAssistantsResponseFormat extends AssistantObjectResponseFormat { - const factory AssistantObjectResponseFormatResponseFormat( - final ResponseFormat value) = - _$AssistantObjectResponseFormatResponseFormatImpl; - const AssistantObjectResponseFormatResponseFormat._() : super._(); + const factory AssistantObjectResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl; + const AssistantObjectResponseFormatAssistantsResponseFormat._() : super._(); - factory AssistantObjectResponseFormatResponseFormat.fromJson( + factory AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( Map json) = - _$AssistantObjectResponseFormatResponseFormatImpl.fromJson; + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson; @override - ResponseFormat get value; - - /// Create a copy of AssistantObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatResponseFormatImpl> + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25047,61 +23051,39 @@ mixin _$CreateAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; - /// Serializes this CreateAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25143,8 +23125,6 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25203,8 +23183,6 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantModelCopyWith<$Res> get model { @@ -25213,8 +23191,6 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -25227,8 +23203,6 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -25285,8 +23259,6 @@ class __$$CreateAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$CreateAssistantRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25389,12 +23361,10 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -25408,14 +23378,10 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -25426,38 +23392,23 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -25490,7 +23441,7 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -25505,9 +23456,7 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { topP, responseFormat); - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> @@ -25543,84 +23492,64 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { factory _CreateAssistantRequest.fromJson(Map json) = _$CreateAssistantRequestImpl.fromJson; - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @override + + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @_AssistantModelConverter() AssistantModel get model; + @override /// The name of the assistant. The maximum length is 256 characters. - @override @JsonKey(includeIfNull: false) String? get name; + @override /// The description of the assistant. The maximum length is 512 characters. - @override @JsonKey(includeIfNull: false) String? get description; + @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - @override @JsonKey(includeIfNull: false) String? get instructions; - - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. @override + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. List get tools; + @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature; + @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat; - - /// Create a copy of CreateAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25679,8 +23608,6 @@ mixin _$AssistantModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this AssistantModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -25700,9 +23627,6 @@ class _$AssistantModelCopyWithImpl<$Res, $Val extends AssistantModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -25724,8 +23648,6 @@ class __$$AssistantModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25769,13 +23691,11 @@ class _$AssistantModelEnumerationImpl extends AssistantModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> @@ -25862,10 +23782,7 @@ abstract class AssistantModelEnumeration extends AssistantModel { @override AssistantModels get value; - - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25887,8 +23804,6 @@ class __$$AssistantModelStringImplCopyWithImpl<$Res> $Res Function(_$AssistantModelStringImpl) _then) : super(_value, _then); - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25932,13 +23847,11 @@ class _$AssistantModelStringImpl extends AssistantModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> @@ -26026,10 +23939,7 @@ abstract class AssistantModelString extends AssistantModel { @override String get value; - - /// Create a copy of AssistantModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26039,8 +23949,9 @@ CreateAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'responseFormat': - return CreateAssistantRequestResponseFormatResponseFormat.fromJson(json); + case 'format': + return CreateAssistantRequestResponseFormatAssistantsResponseFormat + .fromJson(json); default: throw CheckedFromJsonException( @@ -26057,19 +23968,19 @@ mixin _$CreateAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -26079,29 +23990,29 @@ mixin _$CreateAssistantRequestResponseFormat { CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatResponseFormat value) - responseFormat, + CreateAssistantRequestResponseFormatAssistantsResponseFormat value) + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -26124,9 +24035,6 @@ class _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -26153,8 +24061,6 @@ class __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26201,13 +24107,11 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -26221,7 +24125,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { return mode(value); } @@ -26230,7 +24134,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { return mode?.call(value); } @@ -26239,7 +24143,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { if (mode != null) { @@ -26255,8 +24159,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatResponseFormat value) - responseFormat, + CreateAssistantRequestResponseFormatAssistantsResponseFormat value) + format, }) { return mode(this); } @@ -26266,8 +24170,9 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, }) { return mode?.call(this); } @@ -26277,8 +24182,9 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { if (mode != null) { @@ -26308,66 +24214,61 @@ abstract class CreateAssistantRequestResponseFormatEnumeration @override CreateAssistantResponseFormatMode get value; - - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< _$CreateAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< +abstract class _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - factory _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith( - _$CreateAssistantRequestResponseFormatResponseFormatImpl value, + factory _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl value, $Res Function( - _$CreateAssistantRequestResponseFormatResponseFormatImpl) + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) then) = - __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({ResponseFormat value}); + $Res call({AssistantsResponseFormat value}); - $ResponseFormatCopyWith<$Res> get value; + $AssistantsResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< +class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res> extends _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, - _$CreateAssistantRequestResponseFormatResponseFormatImpl> + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> implements - _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( - _$CreateAssistantRequestResponseFormatResponseFormatImpl _value, - $Res Function(_$CreateAssistantRequestResponseFormatResponseFormatImpl) + __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, + $Res Function( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) _then) : super(_value, _then); - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$CreateAssistantRequestResponseFormatResponseFormatImpl( + return _then( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as ResponseFormat, + as AssistantsResponseFormat, )); } - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res> get value { - return $ResponseFormatCopyWith<$Res>(_value.value, (value) { + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -26375,79 +24276,80 @@ class __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$CreateAssistantRequestResponseFormatResponseFormatImpl - extends CreateAssistantRequestResponseFormatResponseFormat { - const _$CreateAssistantRequestResponseFormatResponseFormatImpl(this.value, +class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + extends CreateAssistantRequestResponseFormatAssistantsResponseFormat { + const _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + this.value, {final String? $type}) - : $type = $type ?? 'responseFormat', + : $type = $type ?? 'format', super._(); - factory _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson( + factory _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( Map json) => - _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson(json); + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( + json); @override - final ResponseFormat value; + final AssistantsResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateAssistantRequestResponseFormat.responseFormat(value: $value)'; + return 'CreateAssistantRequestResponseFormat.format(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateAssistantRequestResponseFormatResponseFormatImpl && + other + is _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatResponseFormatImpl> + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => - __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< - _$CreateAssistantRequestResponseFormatResponseFormatImpl>( + __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { - return responseFormat(value); + return format(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { - return responseFormat?.call(value); + return format?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(value); + if (format != null) { + return format(value); } return orElse(); } @@ -26459,10 +24361,10 @@ class _$CreateAssistantRequestResponseFormatResponseFormatImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatResponseFormat value) - responseFormat, + CreateAssistantRequestResponseFormatAssistantsResponseFormat value) + format, }) { - return responseFormat(this); + return format(this); } @override @@ -26470,10 +24372,11 @@ class _$CreateAssistantRequestResponseFormatResponseFormatImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, }) { - return responseFormat?.call(this); + return format?.call(this); } @override @@ -26481,43 +24384,43 @@ class _$CreateAssistantRequestResponseFormatResponseFormatImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(this); + if (format != null) { + return format(this); } return orElse(); } @override Map toJson() { - return _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( + return _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( this, ); } } -abstract class CreateAssistantRequestResponseFormatResponseFormat +abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat extends CreateAssistantRequestResponseFormat { - const factory CreateAssistantRequestResponseFormatResponseFormat( - final ResponseFormat value) = - _$CreateAssistantRequestResponseFormatResponseFormatImpl; - const CreateAssistantRequestResponseFormatResponseFormat._() : super._(); + const factory CreateAssistantRequestResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl; + const CreateAssistantRequestResponseFormatAssistantsResponseFormat._() + : super._(); - factory CreateAssistantRequestResponseFormatResponseFormat.fromJson( + factory CreateAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( Map json) = - _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson; + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + .fromJson; @override - ResponseFormat get value; - - /// Create a copy of CreateAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatResponseFormatImpl> + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26544,8 +24447,7 @@ mixin _$ModifyAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -26556,53 +24458,32 @@ mixin _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; - /// Serializes this ModifyAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModifyAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26644,8 +24525,6 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26709,8 +24588,6 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -26723,8 +24600,6 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, }); } - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -26780,8 +24655,6 @@ class __$$ModifyAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyAssistantRequestImpl) _then) : super(_value, _then); - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26891,12 +24764,10 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -26922,14 +24793,10 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -26940,38 +24807,23 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -27005,7 +24857,7 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -27021,9 +24873,7 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { topP, responseFormat); - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> @@ -27060,89 +24910,69 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { factory _ModifyAssistantRequest.fromJson(Map json) = _$ModifyAssistantRequestImpl.fromJson; - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @override + + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @JsonKey(includeIfNull: false) String? get model; + @override /// The name of the assistant. The maximum length is 256 characters. - @override @JsonKey(includeIfNull: false) String? get name; + @override /// The description of the assistant. The maximum length is 512 characters. - @override @JsonKey(includeIfNull: false) String? get description; + @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - @override @JsonKey(includeIfNull: false) String? get instructions; - - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - /// types `code_interpreter`, `file_search`, or `function`. @override + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. List get tools; + @override /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. - @override @JsonKey(name: 'file_ids') List get fileIds; + @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature; + @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat; - - /// Create a copy of ModifyAssistantRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27152,8 +24982,9 @@ ModifyAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return ModifyAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'responseFormat': - return ModifyAssistantRequestResponseFormatResponseFormat.fromJson(json); + case 'format': + return ModifyAssistantRequestResponseFormatAssistantsResponseFormat + .fromJson(json); default: throw CheckedFromJsonException( @@ -27170,19 +25001,19 @@ mixin _$ModifyAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -27192,29 +25023,29 @@ mixin _$ModifyAssistantRequestResponseFormat { ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatResponseFormat value) - responseFormat, + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ModifyAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -27237,9 +25068,6 @@ class _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -27266,8 +25094,6 @@ class __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27314,13 +25140,11 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -27334,7 +25158,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { return mode(value); } @@ -27343,7 +25167,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { return mode?.call(value); } @@ -27352,7 +25176,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { if (mode != null) { @@ -27368,8 +25192,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatResponseFormat value) - responseFormat, + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) + format, }) { return mode(this); } @@ -27379,8 +25203,9 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, }) { return mode?.call(this); } @@ -27390,8 +25215,9 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { if (mode != null) { @@ -27421,66 +25247,61 @@ abstract class ModifyAssistantRequestResponseFormatEnumeration @override ModifyAssistantResponseFormatMode get value; - - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< _$ModifyAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< +abstract class _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - factory _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith( - _$ModifyAssistantRequestResponseFormatResponseFormatImpl value, + factory _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl value, $Res Function( - _$ModifyAssistantRequestResponseFormatResponseFormatImpl) + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) then) = - __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({ResponseFormat value}); + $Res call({AssistantsResponseFormat value}); - $ResponseFormatCopyWith<$Res> get value; + $AssistantsResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< +class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res> extends _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, - _$ModifyAssistantRequestResponseFormatResponseFormatImpl> + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> implements - _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( - _$ModifyAssistantRequestResponseFormatResponseFormatImpl _value, - $Res Function(_$ModifyAssistantRequestResponseFormatResponseFormatImpl) + __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, + $Res Function( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) _then) : super(_value, _then); - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$ModifyAssistantRequestResponseFormatResponseFormatImpl( + return _then( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as ResponseFormat, + as AssistantsResponseFormat, )); } - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res> get value { - return $ResponseFormatCopyWith<$Res>(_value.value, (value) { + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -27488,79 +25309,80 @@ class __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$ModifyAssistantRequestResponseFormatResponseFormatImpl - extends ModifyAssistantRequestResponseFormatResponseFormat { - const _$ModifyAssistantRequestResponseFormatResponseFormatImpl(this.value, +class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + extends ModifyAssistantRequestResponseFormatAssistantsResponseFormat { + const _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + this.value, {final String? $type}) - : $type = $type ?? 'responseFormat', + : $type = $type ?? 'format', super._(); - factory _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson( + factory _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( Map json) => - _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson(json); + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( + json); @override - final ResponseFormat value; + final AssistantsResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'ModifyAssistantRequestResponseFormat.responseFormat(value: $value)'; + return 'ModifyAssistantRequestResponseFormat.format(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModifyAssistantRequestResponseFormatResponseFormatImpl && + other + is _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatResponseFormatImpl> + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => - __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< - _$ModifyAssistantRequestResponseFormatResponseFormatImpl>( + __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { - return responseFormat(value); + return format(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { - return responseFormat?.call(value); + return format?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(value); + if (format != null) { + return format(value); } return orElse(); } @@ -27572,10 +25394,10 @@ class _$ModifyAssistantRequestResponseFormatResponseFormatImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatResponseFormat value) - responseFormat, + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) + format, }) { - return responseFormat(this); + return format(this); } @override @@ -27583,10 +25405,11 @@ class _$ModifyAssistantRequestResponseFormatResponseFormatImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, }) { - return responseFormat?.call(this); + return format?.call(this); } @override @@ -27594,43 +25417,43 @@ class _$ModifyAssistantRequestResponseFormatResponseFormatImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(this); + if (format != null) { + return format(this); } return orElse(); } @override Map toJson() { - return _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( + return _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( this, ); } } -abstract class ModifyAssistantRequestResponseFormatResponseFormat +abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat extends ModifyAssistantRequestResponseFormat { - const factory ModifyAssistantRequestResponseFormatResponseFormat( - final ResponseFormat value) = - _$ModifyAssistantRequestResponseFormatResponseFormatImpl; - const ModifyAssistantRequestResponseFormatResponseFormat._() : super._(); + const factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl; + const ModifyAssistantRequestResponseFormatAssistantsResponseFormat._() + : super._(); - factory ModifyAssistantRequestResponseFormatResponseFormat.fromJson( + factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( Map json) = - _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson; + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + .fromJson; @override - ResponseFormat get value; - - /// Create a copy of ModifyAssistantRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatResponseFormatImpl> + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27651,12 +25474,8 @@ mixin _$DeleteAssistantResponse { DeleteAssistantResponseObject get object => throw _privateConstructorUsedError; - /// Serializes this DeleteAssistantResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of DeleteAssistantResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $DeleteAssistantResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -27681,8 +25500,6 @@ class _$DeleteAssistantResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of DeleteAssistantResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27729,8 +25546,6 @@ class __$$DeleteAssistantResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteAssistantResponseImpl) _then) : super(_value, _then); - /// Create a copy of DeleteAssistantResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27792,13 +25607,11 @@ class _$DeleteAssistantResponseImpl extends _DeleteAssistantResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - /// Create a copy of DeleteAssistantResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> @@ -27824,22 +25637,20 @@ abstract class _DeleteAssistantResponse extends DeleteAssistantResponse { factory _DeleteAssistantResponse.fromJson(Map json) = _$DeleteAssistantResponseImpl.fromJson; - /// The assistant identifier. @override + + /// The assistant identifier. String get id; + @override /// Whether the assistant was deleted. - @override bool get deleted; + @override /// The object type, which is always `assistant.deleted`. - @override DeleteAssistantResponseObject get object; - - /// Create a copy of DeleteAssistantResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27869,12 +25680,8 @@ mixin _$ListAssistantsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this ListAssistantsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListAssistantsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListAssistantsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -27904,8 +25711,6 @@ class _$ListAssistantsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListAssistantsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27967,8 +25772,6 @@ class __$$ListAssistantsResponseImplCopyWithImpl<$Res> $Res Function(_$ListAssistantsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListAssistantsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28065,14 +25868,12 @@ class _$ListAssistantsResponseImpl extends _ListAssistantsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of ListAssistantsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> @@ -28100,250 +25901,35 @@ abstract class _ListAssistantsResponse extends ListAssistantsResponse { factory _ListAssistantsResponse.fromJson(Map json) = _$ListAssistantsResponseImpl.fromJson; - /// The object type, which is always `list`. @override + + /// The object type, which is always `list`. String get object; + @override /// The list of assistants. - @override List get data; + @override /// The ID of the first assistant in the list. - @override @JsonKey(name: 'first_id') String get firstId; + @override /// The ID of the last assistant in the list. - @override @JsonKey(name: 'last_id') String get lastId; + @override /// Whether there are more assistants to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListAssistantsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> get copyWith => throw _privateConstructorUsedError; } -FileSearchRankingOptions _$FileSearchRankingOptionsFromJson( - Map json) { - return _FileSearchRankingOptions.fromJson(json); -} - -/// @nodoc -mixin _$FileSearchRankingOptions { - /// The ranker to use for the file search. If not specified will use the `auto` ranker. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - FileSearchRanker? get ranker => throw _privateConstructorUsedError; - - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold') - double get scoreThreshold => throw _privateConstructorUsedError; - - /// Serializes this FileSearchRankingOptions to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of FileSearchRankingOptions - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $FileSearchRankingOptionsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $FileSearchRankingOptionsCopyWith<$Res> { - factory $FileSearchRankingOptionsCopyWith(FileSearchRankingOptions value, - $Res Function(FileSearchRankingOptions) then) = - _$FileSearchRankingOptionsCopyWithImpl<$Res, FileSearchRankingOptions>; - @useResult - $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold') double scoreThreshold}); -} - -/// @nodoc -class _$FileSearchRankingOptionsCopyWithImpl<$Res, - $Val extends FileSearchRankingOptions> - implements $FileSearchRankingOptionsCopyWith<$Res> { - _$FileSearchRankingOptionsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of FileSearchRankingOptions - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? ranker = freezed, - Object? scoreThreshold = null, - }) { - return _then(_value.copyWith( - ranker: freezed == ranker - ? _value.ranker - : ranker // ignore: cast_nullable_to_non_nullable - as FileSearchRanker?, - scoreThreshold: null == scoreThreshold - ? _value.scoreThreshold - : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$FileSearchRankingOptionsImplCopyWith<$Res> - implements $FileSearchRankingOptionsCopyWith<$Res> { - factory _$$FileSearchRankingOptionsImplCopyWith( - _$FileSearchRankingOptionsImpl value, - $Res Function(_$FileSearchRankingOptionsImpl) then) = - __$$FileSearchRankingOptionsImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold') double scoreThreshold}); -} - -/// @nodoc -class __$$FileSearchRankingOptionsImplCopyWithImpl<$Res> - extends _$FileSearchRankingOptionsCopyWithImpl<$Res, - _$FileSearchRankingOptionsImpl> - implements _$$FileSearchRankingOptionsImplCopyWith<$Res> { - __$$FileSearchRankingOptionsImplCopyWithImpl( - _$FileSearchRankingOptionsImpl _value, - $Res Function(_$FileSearchRankingOptionsImpl) _then) - : super(_value, _then); - - /// Create a copy of FileSearchRankingOptions - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? ranker = freezed, - Object? scoreThreshold = null, - }) { - return _then(_$FileSearchRankingOptionsImpl( - ranker: freezed == ranker - ? _value.ranker - : ranker // ignore: cast_nullable_to_non_nullable - as FileSearchRanker?, - scoreThreshold: null == scoreThreshold - ? _value.scoreThreshold - : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { - const _$FileSearchRankingOptionsImpl( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.ranker, - @JsonKey(name: 'score_threshold') required this.scoreThreshold}) - : super._(); - - factory _$FileSearchRankingOptionsImpl.fromJson(Map json) => - _$$FileSearchRankingOptionsImplFromJson(json); - - /// The ranker to use for the file search. If not specified will use the `auto` ranker. - @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final FileSearchRanker? ranker; - - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @override - @JsonKey(name: 'score_threshold') - final double scoreThreshold; - - @override - String toString() { - return 'FileSearchRankingOptions(ranker: $ranker, scoreThreshold: $scoreThreshold)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$FileSearchRankingOptionsImpl && - (identical(other.ranker, ranker) || other.ranker == ranker) && - (identical(other.scoreThreshold, scoreThreshold) || - other.scoreThreshold == scoreThreshold)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); - - /// Create a copy of FileSearchRankingOptions - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> - get copyWith => __$$FileSearchRankingOptionsImplCopyWithImpl< - _$FileSearchRankingOptionsImpl>(this, _$identity); - - @override - Map toJson() { - return _$$FileSearchRankingOptionsImplToJson( - this, - ); - } -} - -abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { - const factory _FileSearchRankingOptions( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold') - required final double scoreThreshold}) = _$FileSearchRankingOptionsImpl; - const _FileSearchRankingOptions._() : super._(); - - factory _FileSearchRankingOptions.fromJson(Map json) = - _$FileSearchRankingOptionsImpl.fromJson; - - /// The ranker to use for the file search. If not specified will use the `auto` ranker. - @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - FileSearchRanker? get ranker; - - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @override - @JsonKey(name: 'score_threshold') - double get scoreThreshold; - - /// Create a copy of FileSearchRankingOptions - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> - get copyWith => throw _privateConstructorUsedError; -} - AssistantsNamedToolChoice _$AssistantsNamedToolChoiceFromJson( Map json) { return _AssistantsNamedToolChoice.fromJson(json); @@ -28359,12 +25945,8 @@ mixin _$AssistantsNamedToolChoice { AssistantsFunctionCallOption? get function => throw _privateConstructorUsedError; - /// Serializes this AssistantsNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantsNamedToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $AssistantsNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28393,8 +25975,6 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of AssistantsNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28413,8 +25993,6 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of AssistantsNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsFunctionCallOptionCopyWith<$Res>? get function { @@ -28456,8 +26034,6 @@ class __$$AssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$AssistantsNamedToolChoiceImpl) _then) : super(_value, _then); - /// Create a copy of AssistantsNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28511,13 +26087,11 @@ class _$AssistantsNamedToolChoiceImpl extends _AssistantsNamedToolChoice { other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, function); - /// Create a copy of AssistantsNamedToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> @@ -28543,19 +26117,17 @@ abstract class _AssistantsNamedToolChoice extends AssistantsNamedToolChoice { factory _AssistantsNamedToolChoice.fromJson(Map json) = _$AssistantsNamedToolChoiceImpl.fromJson; - /// The type of the tool. If type is `function`, the function name must be set @override + + /// The type of the tool. If type is `function`, the function name must be set AssistantsToolType get type; + @override /// No Description - @override @JsonKey(includeIfNull: false) AssistantsFunctionCallOption? get function; - - /// Create a copy of AssistantsNamedToolChoice - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28570,12 +26142,8 @@ mixin _$AssistantsFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; - /// Serializes this AssistantsFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantsFunctionCallOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $AssistantsFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28602,8 +26170,6 @@ class _$AssistantsFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of AssistantsFunctionCallOption - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28640,8 +26206,6 @@ class __$$AssistantsFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$AssistantsFunctionCallOptionImpl) _then) : super(_value, _then); - /// Create a copy of AssistantsFunctionCallOption - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28682,13 +26246,11 @@ class _$AssistantsFunctionCallOptionImpl extends _AssistantsFunctionCallOption { (identical(other.name, name) || other.name == name)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name); - /// Create a copy of AssistantsFunctionCallOption - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$AssistantsFunctionCallOptionImplCopyWith< @@ -28713,19 +26275,169 @@ abstract class _AssistantsFunctionCallOption factory _AssistantsFunctionCallOption.fromJson(Map json) = _$AssistantsFunctionCallOptionImpl.fromJson; - /// The name of the function to call. @override - String get name; - /// Create a copy of AssistantsFunctionCallOption - /// with the given fields replaced by the non-null parameter values. + /// The name of the function to call. + String get name; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$AssistantsFunctionCallOptionImplCopyWith< _$AssistantsFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; } +AssistantsResponseFormat _$AssistantsResponseFormatFromJson( + Map json) { + return _AssistantsResponseFormat.fromJson(json); +} + +/// @nodoc +mixin _$AssistantsResponseFormat { + /// Must be one of `text` or `json_object`. + AssistantsResponseFormatType get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $AssistantsResponseFormatCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $AssistantsResponseFormatCopyWith<$Res> { + factory $AssistantsResponseFormatCopyWith(AssistantsResponseFormat value, + $Res Function(AssistantsResponseFormat) then) = + _$AssistantsResponseFormatCopyWithImpl<$Res, AssistantsResponseFormat>; + @useResult + $Res call({AssistantsResponseFormatType type}); +} + +/// @nodoc +class _$AssistantsResponseFormatCopyWithImpl<$Res, + $Val extends AssistantsResponseFormat> + implements $AssistantsResponseFormatCopyWith<$Res> { + _$AssistantsResponseFormatCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as AssistantsResponseFormatType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AssistantsResponseFormatImplCopyWith<$Res> + implements $AssistantsResponseFormatCopyWith<$Res> { + factory _$$AssistantsResponseFormatImplCopyWith( + _$AssistantsResponseFormatImpl value, + $Res Function(_$AssistantsResponseFormatImpl) then) = + __$$AssistantsResponseFormatImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({AssistantsResponseFormatType type}); +} + +/// @nodoc +class __$$AssistantsResponseFormatImplCopyWithImpl<$Res> + extends _$AssistantsResponseFormatCopyWithImpl<$Res, + _$AssistantsResponseFormatImpl> + implements _$$AssistantsResponseFormatImplCopyWith<$Res> { + __$$AssistantsResponseFormatImplCopyWithImpl( + _$AssistantsResponseFormatImpl _value, + $Res Function(_$AssistantsResponseFormatImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$AssistantsResponseFormatImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as AssistantsResponseFormatType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$AssistantsResponseFormatImpl extends _AssistantsResponseFormat { + const _$AssistantsResponseFormatImpl( + {this.type = AssistantsResponseFormatType.text}) + : super._(); + + factory _$AssistantsResponseFormatImpl.fromJson(Map json) => + _$$AssistantsResponseFormatImplFromJson(json); + + /// Must be one of `text` or `json_object`. + @override + @JsonKey() + final AssistantsResponseFormatType type; + + @override + String toString() { + return 'AssistantsResponseFormat(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$AssistantsResponseFormatImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> + get copyWith => __$$AssistantsResponseFormatImplCopyWithImpl< + _$AssistantsResponseFormatImpl>(this, _$identity); + + @override + Map toJson() { + return _$$AssistantsResponseFormatImplToJson( + this, + ); + } +} + +abstract class _AssistantsResponseFormat extends AssistantsResponseFormat { + const factory _AssistantsResponseFormat( + {final AssistantsResponseFormatType type}) = + _$AssistantsResponseFormatImpl; + const _AssistantsResponseFormat._() : super._(); + + factory _AssistantsResponseFormat.fromJson(Map json) = + _$AssistantsResponseFormatImpl.fromJson; + + @override + + /// Must be one of `text` or `json_object`. + AssistantsResponseFormatType get type; + @override + @JsonKey(ignore: true) + _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> + get copyWith => throw _privateConstructorUsedError; +} + TruncationObject _$TruncationObjectFromJson(Map json) { return _TruncationObject.fromJson(json); } @@ -28739,12 +26451,8 @@ mixin _$TruncationObject { @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages => throw _privateConstructorUsedError; - /// Serializes this TruncationObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of TruncationObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $TruncationObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28770,8 +26478,6 @@ class _$TruncationObjectCopyWithImpl<$Res, $Val extends TruncationObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of TruncationObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28812,8 +26518,6 @@ class __$$TruncationObjectImplCopyWithImpl<$Res> $Res Function(_$TruncationObjectImpl) _then) : super(_value, _then); - /// Create a copy of TruncationObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28868,13 +26572,11 @@ class _$TruncationObjectImpl extends _TruncationObject { other.lastMessages == lastMessages)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, lastMessages); - /// Create a copy of TruncationObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => @@ -28899,19 +26601,17 @@ abstract class _TruncationObject extends TruncationObject { factory _TruncationObject.fromJson(Map json) = _$TruncationObjectImpl.fromJson; - /// The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. @override + + /// The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. TruncationObjectType get type; + @override /// The number of most recent messages from the thread when constructing the context for the run. - @override @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages; - - /// Create a copy of TruncationObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28985,9 +26685,7 @@ mixin _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. List get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -29023,39 +26721,18 @@ mixin _$RunObject { @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice => throw _privateConstructorUsedError; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls') - bool? get parallelToolCalls => throw _privateConstructorUsedError; - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat => throw _privateConstructorUsedError; - /// Serializes this RunObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29095,7 +26772,6 @@ abstract class $RunObjectCopyWith<$Res> { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -29119,8 +26795,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29149,7 +26823,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_value.copyWith( @@ -29253,10 +26926,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29264,8 +26933,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ) as $Val); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunRequiredActionCopyWith<$Res>? get requiredAction { @@ -29278,8 +26945,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunLastErrorCopyWith<$Res>? get lastError { @@ -29292,8 +26957,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -29307,8 +26970,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunCompletionUsageCopyWith<$Res>? get usage { @@ -29321,8 +26982,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -29335,8 +26994,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectToolChoiceCopyWith<$Res>? get toolChoice { @@ -29349,8 +27006,6 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectResponseFormatCopyWith<$Res> get responseFormat { @@ -29399,7 +27054,6 @@ abstract class _$$RunObjectImplCopyWith<$Res> @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -29428,8 +27082,6 @@ class __$$RunObjectImplCopyWithImpl<$Res> _$RunObjectImpl _value, $Res Function(_$RunObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29458,7 +27110,6 @@ class __$$RunObjectImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_$RunObjectImpl( @@ -29562,10 +27213,6 @@ class __$$RunObjectImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29605,7 +27252,6 @@ class _$RunObjectImpl extends _RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required this.toolChoice, - @JsonKey(name: 'parallel_tool_calls') required this.parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required this.responseFormat}) @@ -29702,14 +27348,10 @@ class _$RunObjectImpl extends _RunObject { return EqualUnmodifiableListView(_tools); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -29758,29 +27400,11 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'tool_choice') final RunObjectToolChoice? toolChoice; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @override - @JsonKey(name: 'parallel_tool_calls') - final bool? parallelToolCalls; - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') @@ -29788,7 +27412,7 @@ class _$RunObjectImpl extends _RunObject { @override String toString() { - return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat)'; + return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat)'; } @override @@ -29838,13 +27462,11 @@ class _$RunObjectImpl extends _RunObject { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && - (identical(other.parallelToolCalls, parallelToolCalls) || - other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hashAll([ runtimeType, @@ -29873,13 +27495,10 @@ class _$RunObjectImpl extends _RunObject { maxCompletionTokens, truncationStrategy, toolChoice, - parallelToolCalls, responseFormat ]); - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => @@ -29926,8 +27545,6 @@ abstract class _RunObject extends RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required final RunObjectToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls') - required final bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required final RunObjectResponseFormat responseFormat}) = _$RunObjectImpl; @@ -29936,162 +27553,140 @@ abstract class _RunObject extends RunObject { factory _RunObject.fromJson(Map json) = _$RunObjectImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override + + /// The identifier, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `thread.run`. - @override RunObjectObject get object; + @override /// The Unix timestamp (in seconds) for when the run was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run. - @override @JsonKey(name: 'thread_id') String get threadId; + @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run. - @override @JsonKey(name: 'assistant_id') String get assistantId; + @override /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. - @override RunStatus get status; + @override /// Details on the action required to continue the run. Will be `null` if no action is required. - @override @JsonKey(name: 'required_action') RunRequiredAction? get requiredAction; + @override /// The last error associated with this run. Will be `null` if there are no errors. - @override @JsonKey(name: 'last_error') RunLastError? get lastError; + @override /// The Unix timestamp (in seconds) for when the run will expire. - @override @JsonKey(name: 'expires_at') int? get expiresAt; + @override /// The Unix timestamp (in seconds) for when the run was started. - @override @JsonKey(name: 'started_at') int? get startedAt; + @override /// The Unix timestamp (in seconds) for when the run was cancelled. - @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; + @override /// The Unix timestamp (in seconds) for when the run failed. - @override @JsonKey(name: 'failed_at') int? get failedAt; + @override /// The Unix timestamp (in seconds) for when the run was completed. - @override @JsonKey(name: 'completed_at') int? get completedAt; + @override /// Details on why the run is incomplete. Will be `null` if the run is not incomplete. - @override @JsonKey(name: 'incomplete_details') RunObjectIncompleteDetails? get incompleteDetails; + @override /// The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - @override String get model; + @override /// The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - @override String get instructions; + @override /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - @override List get tools; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata; + @override /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). - @override RunCompletionUsage? get usage; + @override /// The sampling temperature used for this run. If not set, defaults to 1. - @override @JsonKey(includeIfNull: false) double? get temperature; + @override /// The nucleus sampling value used for this run. If not set, defaults to 1. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override /// The maximum number of prompt tokens specified to have been used over the course of the run. - @override @JsonKey(name: 'max_prompt_tokens') int? get maxPromptTokens; + @override /// The maximum number of completion tokens specified to have been used over the course of the run. - @override @JsonKey(name: 'max_completion_tokens') int? get maxCompletionTokens; + @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - @override @JsonKey(name: 'truncation_strategy') TruncationObject? get truncationStrategy; + @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - @override @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice; - - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. @override - @JsonKey(name: 'parallel_tool_calls') - bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat; - - /// Create a copy of RunObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30110,12 +27705,8 @@ mixin _$RunRequiredAction { RunSubmitToolOutputs get submitToolOutputs => throw _privateConstructorUsedError; - /// Serializes this RunRequiredAction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunRequiredAction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunRequiredActionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -30144,8 +27735,6 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunRequiredAction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30164,8 +27753,6 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> ) as $Val); } - /// Create a copy of RunRequiredAction - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunSubmitToolOutputsCopyWith<$Res> get submitToolOutputs { @@ -30201,8 +27788,6 @@ class __$$RunRequiredActionImplCopyWithImpl<$Res> $Res Function(_$RunRequiredActionImpl) _then) : super(_value, _then); - /// Create a copy of RunRequiredAction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30257,13 +27842,11 @@ class _$RunRequiredActionImpl extends _RunRequiredAction { other.submitToolOutputs == submitToolOutputs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, submitToolOutputs); - /// Create a copy of RunRequiredAction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => @@ -30289,19 +27872,17 @@ abstract class _RunRequiredAction extends RunRequiredAction { factory _RunRequiredAction.fromJson(Map json) = _$RunRequiredActionImpl.fromJson; - /// For now, this is always `submit_tool_outputs`. @override + + /// For now, this is always `submit_tool_outputs`. RunRequiredActionType get type; + @override /// Details on the tool outputs needed for this run to continue. - @override @JsonKey(name: 'submit_tool_outputs') RunSubmitToolOutputs get submitToolOutputs; - - /// Create a copy of RunRequiredAction - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30318,12 +27899,8 @@ mixin _$RunLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; - /// Serializes this RunLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunLastError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -30347,8 +27924,6 @@ class _$RunLastErrorCopyWithImpl<$Res, $Val extends RunLastError> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunLastError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30387,8 +27962,6 @@ class __$$RunLastErrorImplCopyWithImpl<$Res> _$RunLastErrorImpl _value, $Res Function(_$RunLastErrorImpl) _then) : super(_value, _then); - /// Create a copy of RunLastError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30439,13 +28012,11 @@ class _$RunLastErrorImpl extends _RunLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, code, message); - /// Create a copy of RunLastError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => @@ -30468,18 +28039,16 @@ abstract class _RunLastError extends RunLastError { factory _RunLastError.fromJson(Map json) = _$RunLastErrorImpl.fromJson; - /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. @override + + /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. RunLastErrorCode get code; + @override /// A human-readable description of the error. - @override String get message; - - /// Create a copy of RunLastError - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30497,12 +28066,8 @@ mixin _$RunObjectIncompleteDetails { RunObjectIncompleteDetailsReason? get reason => throw _privateConstructorUsedError; - /// Serializes this RunObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -30532,8 +28097,6 @@ class _$RunObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30574,8 +28137,6 @@ class __$$RunObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$RunObjectIncompleteDetailsImpl) _then) : super(_value, _then); - /// Create a copy of RunObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30623,13 +28184,11 @@ class _$RunObjectIncompleteDetailsImpl extends _RunObjectIncompleteDetails { (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, reason); - /// Create a copy of RunObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> @@ -30656,16 +28215,14 @@ abstract class _RunObjectIncompleteDetails extends RunObjectIncompleteDetails { factory _RunObjectIncompleteDetails.fromJson(Map json) = _$RunObjectIncompleteDetailsImpl.fromJson; - /// The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. @override + + /// The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) RunObjectIncompleteDetailsReason? get reason; - - /// Create a copy of RunObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30726,8 +28283,6 @@ mixin _$RunObjectToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this RunObjectToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -30747,9 +28302,6 @@ class _$RunObjectToolChoiceCopyWithImpl<$Res, $Val extends RunObjectToolChoice> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -30772,8 +28324,6 @@ class __$$RunObjectToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30819,13 +28369,11 @@ class _$RunObjectToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceEnumerationImplCopyWith< @@ -30916,10 +28464,7 @@ abstract class RunObjectToolChoiceEnumeration extends RunObjectToolChoice { @override RunObjectToolChoiceMode get value; - - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunObjectToolChoiceEnumerationImplCopyWith< _$RunObjectToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -30950,8 +28495,6 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceAssistantsNamedToolChoiceImpl) _then) : super(_value, _then); - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30965,8 +28508,6 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> )); } - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -31008,13 +28549,11 @@ class _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -31109,10 +28648,7 @@ abstract class RunObjectToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - - /// Create a copy of RunObjectToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -31123,8 +28659,8 @@ RunObjectResponseFormat _$RunObjectResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return RunObjectResponseFormatEnumeration.fromJson(json); - case 'responseFormat': - return RunObjectResponseFormatResponseFormat.fromJson(json); + case 'format': + return RunObjectResponseFormatAssistantsResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -31141,46 +28677,45 @@ mixin _$RunObjectResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function(RunObjectResponseFormatResponseFormat value) - responseFormat, + required TResult Function( + RunObjectResponseFormatAssistantsResponseFormat value) + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatResponseFormat value)? - responseFormat, + TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatResponseFormat value)? - responseFormat, + TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this RunObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -31201,9 +28736,6 @@ class _$RunObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -31226,8 +28758,6 @@ class __$$RunObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31274,13 +28804,11 @@ class _$RunObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunObjectResponseFormatEnumerationImplCopyWith< @@ -31292,7 +28820,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { return mode(value); } @@ -31301,7 +28829,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { return mode?.call(value); } @@ -31310,7 +28838,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { if (mode != null) { @@ -31323,8 +28851,9 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function(RunObjectResponseFormatResponseFormat value) - responseFormat, + required TResult Function( + RunObjectResponseFormatAssistantsResponseFormat value) + format, }) { return mode(this); } @@ -31333,8 +28862,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatResponseFormat value)? - responseFormat, + TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? + format, }) { return mode?.call(this); } @@ -31343,8 +28872,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatResponseFormat value)? - responseFormat, + TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { if (mode != null) { @@ -31374,58 +28903,55 @@ abstract class RunObjectResponseFormatEnumeration @override RunObjectResponseFormatMode get value; - - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunObjectResponseFormatEnumerationImplCopyWith< _$RunObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { - factory _$$RunObjectResponseFormatResponseFormatImplCopyWith( - _$RunObjectResponseFormatResponseFormatImpl value, - $Res Function(_$RunObjectResponseFormatResponseFormatImpl) then) = - __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; +abstract class _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + factory _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith( + _$RunObjectResponseFormatAssistantsResponseFormatImpl value, + $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) + then) = + __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({ResponseFormat value}); + $Res call({AssistantsResponseFormat value}); - $ResponseFormatCopyWith<$Res> get value; + $AssistantsResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> +class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> extends _$RunObjectResponseFormatCopyWithImpl<$Res, - _$RunObjectResponseFormatResponseFormatImpl> - implements _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { - __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl( - _$RunObjectResponseFormatResponseFormatImpl _value, - $Res Function(_$RunObjectResponseFormatResponseFormatImpl) _then) + _$RunObjectResponseFormatAssistantsResponseFormatImpl> + implements + _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith<$Res> { + __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$RunObjectResponseFormatAssistantsResponseFormatImpl _value, + $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) + _then) : super(_value, _then); - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$RunObjectResponseFormatResponseFormatImpl( + return _then(_$RunObjectResponseFormatAssistantsResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as ResponseFormat, + as AssistantsResponseFormat, )); } - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res> get value { - return $ResponseFormatCopyWith<$Res>(_value.value, (value) { + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -31433,77 +28959,77 @@ class __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunObjectResponseFormatResponseFormatImpl - extends RunObjectResponseFormatResponseFormat { - const _$RunObjectResponseFormatResponseFormatImpl(this.value, +class _$RunObjectResponseFormatAssistantsResponseFormatImpl + extends RunObjectResponseFormatAssistantsResponseFormat { + const _$RunObjectResponseFormatAssistantsResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'responseFormat', + : $type = $type ?? 'format', super._(); - factory _$RunObjectResponseFormatResponseFormatImpl.fromJson( + factory _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson( Map json) => - _$$RunObjectResponseFormatResponseFormatImplFromJson(json); + _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson(json); @override - final ResponseFormat value; + final AssistantsResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'RunObjectResponseFormat.responseFormat(value: $value)'; + return 'RunObjectResponseFormat.format(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunObjectResponseFormatResponseFormatImpl && + other is _$RunObjectResponseFormatAssistantsResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunObjectResponseFormatResponseFormatImplCopyWith< - _$RunObjectResponseFormatResponseFormatImpl> - get copyWith => __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl< - _$RunObjectResponseFormatResponseFormatImpl>(this, _$identity); + _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< + _$RunObjectResponseFormatAssistantsResponseFormatImpl> + get copyWith => + __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$RunObjectResponseFormatAssistantsResponseFormatImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { - return responseFormat(value); + return format(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { - return responseFormat?.call(value); + return format?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(value); + if (format != null) { + return format(value); } return orElse(); } @@ -31512,62 +29038,61 @@ class _$RunObjectResponseFormatResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function(RunObjectResponseFormatResponseFormat value) - responseFormat, + required TResult Function( + RunObjectResponseFormatAssistantsResponseFormat value) + format, }) { - return responseFormat(this); + return format(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatResponseFormat value)? - responseFormat, + TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? + format, }) { - return responseFormat?.call(this); + return format?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatResponseFormat value)? - responseFormat, + TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(this); + if (format != null) { + return format(this); } return orElse(); } @override Map toJson() { - return _$$RunObjectResponseFormatResponseFormatImplToJson( + return _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( this, ); } } -abstract class RunObjectResponseFormatResponseFormat +abstract class RunObjectResponseFormatAssistantsResponseFormat extends RunObjectResponseFormat { - const factory RunObjectResponseFormatResponseFormat( - final ResponseFormat value) = _$RunObjectResponseFormatResponseFormatImpl; - const RunObjectResponseFormatResponseFormat._() : super._(); + const factory RunObjectResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$RunObjectResponseFormatAssistantsResponseFormatImpl; + const RunObjectResponseFormatAssistantsResponseFormat._() : super._(); - factory RunObjectResponseFormatResponseFormat.fromJson( + factory RunObjectResponseFormatAssistantsResponseFormat.fromJson( Map json) = - _$RunObjectResponseFormatResponseFormatImpl.fromJson; + _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson; @override - ResponseFormat get value; - - /// Create a copy of RunObjectResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunObjectResponseFormatResponseFormatImplCopyWith< - _$RunObjectResponseFormatResponseFormatImpl> + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< + _$RunObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31581,12 +29106,8 @@ mixin _$RunSubmitToolOutputs { @JsonKey(name: 'tool_calls') List get toolCalls => throw _privateConstructorUsedError; - /// Serializes this RunSubmitToolOutputs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunSubmitToolOutputs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunSubmitToolOutputsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -31611,8 +29132,6 @@ class _$RunSubmitToolOutputsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunSubmitToolOutputs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31646,8 +29165,6 @@ class __$$RunSubmitToolOutputsImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputsImpl) _then) : super(_value, _then); - /// Create a copy of RunSubmitToolOutputs - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31700,14 +29217,12 @@ class _$RunSubmitToolOutputsImpl extends _RunSubmitToolOutputs { .equals(other._toolCalls, _toolCalls)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_toolCalls)); - /// Create a copy of RunSubmitToolOutputs - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> @@ -31733,15 +29248,13 @@ abstract class _RunSubmitToolOutputs extends RunSubmitToolOutputs { factory _RunSubmitToolOutputs.fromJson(Map json) = _$RunSubmitToolOutputsImpl.fromJson; - /// A list of the relevant tool calls. @override + + /// A list of the relevant tool calls. @JsonKey(name: 'tool_calls') List get toolCalls; - - /// Create a copy of RunSubmitToolOutputs - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31764,12 +29277,8 @@ mixin _$RunCompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; - /// Serializes this RunCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunCompletionUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -31796,8 +29305,6 @@ class _$RunCompletionUsageCopyWithImpl<$Res, $Val extends RunCompletionUsage> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunCompletionUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31844,8 +29351,6 @@ class __$$RunCompletionUsageImplCopyWithImpl<$Res> $Res Function(_$RunCompletionUsageImpl) _then) : super(_value, _then); - /// Create a copy of RunCompletionUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31915,14 +29420,12 @@ class _$RunCompletionUsageImpl extends _RunCompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - /// Create a copy of RunCompletionUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => @@ -31948,25 +29451,23 @@ abstract class _RunCompletionUsage extends RunCompletionUsage { factory _RunCompletionUsage.fromJson(Map json) = _$RunCompletionUsageImpl.fromJson; - /// Number of completion tokens used over the course of the run. @override + + /// Number of completion tokens used over the course of the run. @JsonKey(name: 'completion_tokens') int get completionTokens; + @override /// Number of prompt tokens used over the course of the run. - @override @JsonKey(name: 'prompt_tokens') int get promptTokens; + @override /// Total number of tokens used (prompt + completion). - @override @JsonKey(name: 'total_tokens') int get totalTokens; - - /// Create a copy of RunCompletionUsage - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32003,20 +29504,15 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -32045,28 +29541,11 @@ mixin _$CreateRunRequest { CreateRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? get parallelToolCalls => throw _privateConstructorUsedError; - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat => @@ -32076,12 +29555,8 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; - /// Serializes this CreateRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32115,8 +29590,6 @@ abstract class $CreateRunRequestCopyWith<$Res> { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -32138,8 +29611,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32156,7 +29627,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -32213,10 +29683,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -32228,8 +29694,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ) as $Val); } - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestModelCopyWith<$Res>? get model { @@ -32242,8 +29706,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -32256,8 +29718,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -32271,8 +29731,6 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -32318,8 +29776,6 @@ abstract class _$$CreateRunRequestImplCopyWith<$Res> @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -32343,8 +29799,6 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32361,7 +29815,6 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -32418,10 +29871,6 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -32460,8 +29909,6 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -32524,14 +29971,10 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -32542,15 +29985,12 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -32582,29 +30022,11 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @override - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - final bool? parallelToolCalls; - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -32617,7 +30039,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @override String toString() { - return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -32647,14 +30069,12 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && - (identical(other.parallelToolCalls, parallelToolCalls) || - other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -32671,13 +30091,10 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, - parallelToolCalls, responseFormat, stream); - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => @@ -32716,8 +30133,6 @@ abstract class _CreateRunRequest extends CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - final bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateRunRequestResponseFormat? responseFormat, @@ -32728,121 +30143,96 @@ abstract class _CreateRunRequest extends CreateRunRequest { factory _CreateRunRequest.fromJson(Map json) = _$CreateRunRequestImpl.fromJson; - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. @override + + /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. @JsonKey(name: 'assistant_id') String get assistantId; + @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - @override @_CreateRunRequestModelConverter() @JsonKey(includeIfNull: false) CreateRunRequestModel? get model; + @override /// Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. - @override @JsonKey(includeIfNull: false) String? get instructions; + @override /// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. - @override @JsonKey(name: 'additional_instructions', includeIfNull: false) String? get additionalInstructions; + @override /// Adds additional messages to the thread before creating the run. - @override @JsonKey(name: 'additional_messages', includeIfNull: false) List? get additionalMessages; + @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - @override @JsonKey(includeIfNull: false) List? get tools; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature; + @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; + @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; + @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; + @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - @override @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? get toolChoice; - - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. @override - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat; + @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - @override @JsonKey(includeIfNull: false) bool? get stream; - - /// Create a copy of CreateRunRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32850,9 +30240,9 @@ abstract class _CreateRunRequest extends CreateRunRequest { CreateRunRequestModel _$CreateRunRequestModelFromJson( Map json) { switch (json['runtimeType']) { - case 'model': + case 'enumeration': return CreateRunRequestModelEnumeration.fromJson(json); - case 'modelId': + case 'string': return CreateRunRequestModelString.fromJson(json); default: @@ -32869,44 +30259,43 @@ mixin _$CreateRunRequestModel { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) model, - required TResult Function(String value) modelId, + required TResult Function(RunModels value) enumeration, + required TResult Function(String value) string, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? model, - TResult? Function(String value)? modelId, + TResult? Function(RunModels value)? enumeration, + TResult? Function(String value)? string, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? model, - TResult Function(String value)? modelId, + TResult Function(RunModels value)? enumeration, + TResult Function(String value)? string, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) model, - required TResult Function(CreateRunRequestModelString value) modelId, + required TResult Function(CreateRunRequestModelEnumeration value) + enumeration, + required TResult Function(CreateRunRequestModelString value) string, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? model, - TResult? Function(CreateRunRequestModelString value)? modelId, + TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, + TResult? Function(CreateRunRequestModelString value)? string, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? model, - TResult Function(CreateRunRequestModelString value)? modelId, + TResult Function(CreateRunRequestModelEnumeration value)? enumeration, + TResult Function(CreateRunRequestModelString value)? string, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateRunRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -32927,9 +30316,6 @@ class _$CreateRunRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -32952,8 +30338,6 @@ class __$$CreateRunRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32974,7 +30358,7 @@ class _$CreateRunRequestModelEnumerationImpl extends CreateRunRequestModelEnumeration { const _$CreateRunRequestModelEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'model', + : $type = $type ?? 'enumeration', super._(); factory _$CreateRunRequestModelEnumerationImpl.fromJson( @@ -32989,7 +30373,7 @@ class _$CreateRunRequestModelEnumerationImpl @override String toString() { - return 'CreateRunRequestModel.model(value: $value)'; + return 'CreateRunRequestModel.enumeration(value: $value)'; } @override @@ -33000,13 +30384,11 @@ class _$CreateRunRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelEnumerationImplCopyWith< @@ -33017,30 +30399,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) model, - required TResult Function(String value) modelId, + required TResult Function(RunModels value) enumeration, + required TResult Function(String value) string, }) { - return model(value); + return enumeration(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? model, - TResult? Function(String value)? modelId, + TResult? Function(RunModels value)? enumeration, + TResult? Function(String value)? string, }) { - return model?.call(value); + return enumeration?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? model, - TResult Function(String value)? modelId, + TResult Function(RunModels value)? enumeration, + TResult Function(String value)? string, required TResult orElse(), }) { - if (model != null) { - return model(value); + if (enumeration != null) { + return enumeration(value); } return orElse(); } @@ -33048,30 +30430,31 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) model, - required TResult Function(CreateRunRequestModelString value) modelId, + required TResult Function(CreateRunRequestModelEnumeration value) + enumeration, + required TResult Function(CreateRunRequestModelString value) string, }) { - return model(this); + return enumeration(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? model, - TResult? Function(CreateRunRequestModelString value)? modelId, + TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, + TResult? Function(CreateRunRequestModelString value)? string, }) { - return model?.call(this); + return enumeration?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? model, - TResult Function(CreateRunRequestModelString value)? modelId, + TResult Function(CreateRunRequestModelEnumeration value)? enumeration, + TResult Function(CreateRunRequestModelString value)? string, required TResult orElse(), }) { - if (model != null) { - return model(this); + if (enumeration != null) { + return enumeration(this); } return orElse(); } @@ -33094,10 +30477,7 @@ abstract class CreateRunRequestModelEnumeration extends CreateRunRequestModel { @override RunModels get value; - - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateRunRequestModelEnumerationImplCopyWith< _$CreateRunRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -33123,8 +30503,6 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelStringImpl) _then) : super(_value, _then); - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33143,7 +30521,7 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { const _$CreateRunRequestModelStringImpl(this.value, {final String? $type}) - : $type = $type ?? 'modelId', + : $type = $type ?? 'string', super._(); factory _$CreateRunRequestModelStringImpl.fromJson( @@ -33158,7 +30536,7 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override String toString() { - return 'CreateRunRequestModel.modelId(value: $value)'; + return 'CreateRunRequestModel.string(value: $value)'; } @override @@ -33169,13 +30547,11 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> @@ -33185,30 +30561,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) model, - required TResult Function(String value) modelId, + required TResult Function(RunModels value) enumeration, + required TResult Function(String value) string, }) { - return modelId(value); + return string(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? model, - TResult? Function(String value)? modelId, + TResult? Function(RunModels value)? enumeration, + TResult? Function(String value)? string, }) { - return modelId?.call(value); + return string?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? model, - TResult Function(String value)? modelId, + TResult Function(RunModels value)? enumeration, + TResult Function(String value)? string, required TResult orElse(), }) { - if (modelId != null) { - return modelId(value); + if (string != null) { + return string(value); } return orElse(); } @@ -33216,30 +30592,31 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) model, - required TResult Function(CreateRunRequestModelString value) modelId, + required TResult Function(CreateRunRequestModelEnumeration value) + enumeration, + required TResult Function(CreateRunRequestModelString value) string, }) { - return modelId(this); + return string(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? model, - TResult? Function(CreateRunRequestModelString value)? modelId, + TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, + TResult? Function(CreateRunRequestModelString value)? string, }) { - return modelId?.call(this); + return string?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? model, - TResult Function(CreateRunRequestModelString value)? modelId, + TResult Function(CreateRunRequestModelEnumeration value)? enumeration, + TResult Function(CreateRunRequestModelString value)? string, required TResult orElse(), }) { - if (modelId != null) { - return modelId(this); + if (string != null) { + return string(this); } return orElse(); } @@ -33262,10 +30639,7 @@ abstract class CreateRunRequestModelString extends CreateRunRequestModel { @override String get value; - - /// Create a copy of CreateRunRequestModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33333,8 +30707,6 @@ mixin _$CreateRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -33356,9 +30728,6 @@ class _$CreateRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -33381,8 +30750,6 @@ class __$$CreateRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33429,13 +30796,11 @@ class _$CreateRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< @@ -33531,10 +30896,7 @@ abstract class CreateRunRequestToolChoiceEnumeration @override CreateRunRequestToolChoiceMode get value; - - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< _$CreateRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -33570,8 +30932,6 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< _then) : super(_value, _then); - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33585,8 +30945,6 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< )); } - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -33629,13 +30987,11 @@ class _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -33733,10 +31089,7 @@ abstract class CreateRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - - /// Create a copy of CreateRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -33747,8 +31100,9 @@ CreateRunRequestResponseFormat _$CreateRunRequestResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return CreateRunRequestResponseFormatEnumeration.fromJson(json); - case 'responseFormat': - return CreateRunRequestResponseFormatResponseFormat.fromJson(json); + case 'format': + return CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( + json); default: throw CheckedFromJsonException( @@ -33765,19 +31119,19 @@ mixin _$CreateRunRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -33786,27 +31140,27 @@ mixin _$CreateRunRequestResponseFormat { required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatResponseFormat value) - responseFormat, + CreateRunRequestResponseFormatAssistantsResponseFormat value) + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + CreateRunRequestResponseFormatAssistantsResponseFormat value)? + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function(CreateRunRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + CreateRunRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -33829,9 +31183,6 @@ class _$CreateRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -33854,8 +31205,6 @@ class __$$CreateRunRequestResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestResponseFormatEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33902,13 +31251,11 @@ class _$CreateRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< @@ -33922,7 +31269,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { return mode(value); } @@ -33931,7 +31278,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { return mode?.call(value); } @@ -33940,7 +31287,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { if (mode != null) { @@ -33955,8 +31302,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatResponseFormat value) - responseFormat, + CreateRunRequestResponseFormatAssistantsResponseFormat value) + format, }) { return mode(this); } @@ -33965,8 +31312,9 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + CreateRunRequestResponseFormatAssistantsResponseFormat value)? + format, }) { return mode?.call(this); } @@ -33975,8 +31323,9 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function(CreateRunRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + CreateRunRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { if (mode != null) { @@ -34006,61 +31355,60 @@ abstract class CreateRunRequestResponseFormatEnumeration @override CreateRunRequestResponseFormatMode get value; - - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< _$CreateRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< +abstract class _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - factory _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith( - _$CreateRunRequestResponseFormatResponseFormatImpl value, - $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) + factory _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl value, + $Res Function( + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) then) = - __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res>; + __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res>; @useResult - $Res call({ResponseFormat value}); + $Res call({AssistantsResponseFormat value}); - $ResponseFormatCopyWith<$Res> get value; + $AssistantsResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res> +class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res> extends _$CreateRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateRunRequestResponseFormatResponseFormatImpl> + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> implements - _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith<$Res> { - __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl( - _$CreateRunRequestResponseFormatResponseFormatImpl _value, - $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) _then) + _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl _value, + $Res Function( + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) + _then) : super(_value, _then); - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$CreateRunRequestResponseFormatResponseFormatImpl( + return _then(_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as ResponseFormat, + as AssistantsResponseFormat, )); } - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res> get value { - return $ResponseFormatCopyWith<$Res>(_value.value, (value) { + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -34068,79 +31416,79 @@ class __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$CreateRunRequestResponseFormatResponseFormatImpl - extends CreateRunRequestResponseFormatResponseFormat { - const _$CreateRunRequestResponseFormatResponseFormatImpl(this.value, +class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl + extends CreateRunRequestResponseFormatAssistantsResponseFormat { + const _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'responseFormat', + : $type = $type ?? 'format', super._(); - factory _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson( + factory _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( Map json) => - _$$CreateRunRequestResponseFormatResponseFormatImplFromJson(json); + _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( + json); @override - final ResponseFormat value; + final AssistantsResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateRunRequestResponseFormat.responseFormat(value: $value)'; + return 'CreateRunRequestResponseFormat.format(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateRunRequestResponseFormatResponseFormatImpl && + other + is _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatResponseFormatImpl> + _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => - __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl< - _$CreateRunRequestResponseFormatResponseFormatImpl>( + __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { - return responseFormat(value); + return format(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { - return responseFormat?.call(value); + return format?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(value); + if (format != null) { + return format(value); } return orElse(); } @@ -34151,63 +31499,62 @@ class _$CreateRunRequestResponseFormatResponseFormatImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatResponseFormat value) - responseFormat, + CreateRunRequestResponseFormatAssistantsResponseFormat value) + format, }) { - return responseFormat(this); + return format(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? - responseFormat, + TResult? Function( + CreateRunRequestResponseFormatAssistantsResponseFormat value)? + format, }) { - return responseFormat?.call(this); + return format?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function(CreateRunRequestResponseFormatResponseFormat value)? - responseFormat, + TResult Function( + CreateRunRequestResponseFormatAssistantsResponseFormat value)? + format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(this); + if (format != null) { + return format(this); } return orElse(); } @override Map toJson() { - return _$$CreateRunRequestResponseFormatResponseFormatImplToJson( + return _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( this, ); } } -abstract class CreateRunRequestResponseFormatResponseFormat +abstract class CreateRunRequestResponseFormatAssistantsResponseFormat extends CreateRunRequestResponseFormat { - const factory CreateRunRequestResponseFormatResponseFormat( - final ResponseFormat value) = - _$CreateRunRequestResponseFormatResponseFormatImpl; - const CreateRunRequestResponseFormatResponseFormat._() : super._(); + const factory CreateRunRequestResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl; + const CreateRunRequestResponseFormatAssistantsResponseFormat._() : super._(); - factory CreateRunRequestResponseFormatResponseFormat.fromJson( + factory CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( Map json) = - _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson; + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson; @override - ResponseFormat get value; - - /// Create a copy of CreateRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatResponseFormatImpl> + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34235,12 +31582,8 @@ mixin _$ListRunsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this ListRunsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListRunsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListRunsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -34269,8 +31612,6 @@ class _$ListRunsResponseCopyWithImpl<$Res, $Val extends ListRunsResponse> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListRunsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34329,8 +31670,6 @@ class __$$ListRunsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListRunsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34427,14 +31766,12 @@ class _$ListRunsResponseImpl extends _ListRunsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of ListRunsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => @@ -34462,33 +31799,31 @@ abstract class _ListRunsResponse extends ListRunsResponse { factory _ListRunsResponse.fromJson(Map json) = _$ListRunsResponseImpl.fromJson; - /// The object type, which is always `list`. @override + + /// The object type, which is always `list`. String get object; + @override /// The list of runs. - @override List get data; + @override /// The ID of the first run in the list. - @override @JsonKey(name: 'first_id') String get firstId; + @override /// The ID of the last run in the list. - @override @JsonKey(name: 'last_id') String get lastId; + @override /// Whether there are more runs to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListRunsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34499,18 +31834,12 @@ ModifyRunRequest _$ModifyRunRequestFromJson(Map json) { /// @nodoc mixin _$ModifyRunRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this ModifyRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ModifyRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModifyRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -34534,8 +31863,6 @@ class _$ModifyRunRequestCopyWithImpl<$Res, $Val extends ModifyRunRequest> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ModifyRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34569,8 +31896,6 @@ class __$$ModifyRunRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyRunRequestImpl) _then) : super(_value, _then); - /// Create a copy of ModifyRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34596,14 +31921,10 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { factory _$ModifyRunRequestImpl.fromJson(Map json) => _$$ModifyRunRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -34627,14 +31948,12 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of ModifyRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => @@ -34658,17 +31977,13 @@ abstract class _ModifyRunRequest extends ModifyRunRequest { factory _ModifyRunRequest.fromJson(Map json) = _$ModifyRunRequestImpl.fromJson; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// Create a copy of ModifyRunRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34689,12 +32004,8 @@ mixin _$SubmitToolOutputsRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; - /// Serializes this SubmitToolOutputsRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of SubmitToolOutputsRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $SubmitToolOutputsRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -34723,8 +32034,6 @@ class _$SubmitToolOutputsRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of SubmitToolOutputsRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34768,8 +32077,6 @@ class __$$SubmitToolOutputsRunRequestImplCopyWithImpl<$Res> $Res Function(_$SubmitToolOutputsRunRequestImpl) _then) : super(_value, _then); - /// Create a copy of SubmitToolOutputsRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34835,14 +32142,12 @@ class _$SubmitToolOutputsRunRequestImpl extends _SubmitToolOutputsRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_toolOutputs), stream); - /// Create a copy of SubmitToolOutputsRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> @@ -34869,20 +32174,18 @@ abstract class _SubmitToolOutputsRunRequest factory _SubmitToolOutputsRunRequest.fromJson(Map json) = _$SubmitToolOutputsRunRequestImpl.fromJson; - /// A list of tools for which the outputs are being submitted. @override + + /// A list of tools for which the outputs are being submitted. @JsonKey(name: 'tool_outputs') List get toolOutputs; + @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - @override @JsonKey(includeIfNull: false) bool? get stream; - - /// Create a copy of SubmitToolOutputsRunRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34901,12 +32204,8 @@ mixin _$RunSubmitToolOutput { @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; - /// Serializes this RunSubmitToolOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunSubmitToolOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunSubmitToolOutputCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -34932,8 +32231,6 @@ class _$RunSubmitToolOutputCopyWithImpl<$Res, $Val extends RunSubmitToolOutput> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunSubmitToolOutput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34974,8 +32271,6 @@ class __$$RunSubmitToolOutputImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputImpl) _then) : super(_value, _then); - /// Create a copy of RunSubmitToolOutput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35031,13 +32326,11 @@ class _$RunSubmitToolOutputImpl extends _RunSubmitToolOutput { (identical(other.output, output) || other.output == output)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, toolCallId, output); - /// Create a copy of RunSubmitToolOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => @@ -35063,20 +32356,18 @@ abstract class _RunSubmitToolOutput extends RunSubmitToolOutput { factory _RunSubmitToolOutput.fromJson(Map json) = _$RunSubmitToolOutputImpl.fromJson; - /// The ID of the tool call in the `required_action` object within the run object the output is being submitted for. @override + + /// The ID of the tool call in the `required_action` object within the run object the output is being submitted for. @JsonKey(name: 'tool_call_id', includeIfNull: false) String? get toolCallId; + @override /// The output of the tool call to be submitted to continue the run. - @override @JsonKey(includeIfNull: false) String? get output; - - /// Create a copy of RunSubmitToolOutput - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35096,12 +32387,8 @@ mixin _$RunToolCallObject { /// The function definition. RunToolCallFunction get function => throw _privateConstructorUsedError; - /// Serializes this RunToolCallObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunToolCallObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunToolCallObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35128,8 +32415,6 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunToolCallObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35153,8 +32438,6 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> ) as $Val); } - /// Create a copy of RunToolCallObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunToolCallFunctionCopyWith<$Res> get function { @@ -35187,8 +32470,6 @@ class __$$RunToolCallObjectImplCopyWithImpl<$Res> $Res Function(_$RunToolCallObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunToolCallObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35251,13 +32532,11 @@ class _$RunToolCallObjectImpl extends _RunToolCallObject { other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, type, function); - /// Create a copy of RunToolCallObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => @@ -35282,22 +32561,20 @@ abstract class _RunToolCallObject extends RunToolCallObject { factory _RunToolCallObject.fromJson(Map json) = _$RunToolCallObjectImpl.fromJson; - /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint. @override + + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint. String get id; + @override /// The type of tool call the output is required for. For now, this is always `function`. - @override RunToolCallObjectType get type; + @override /// The function definition. - @override RunToolCallFunction get function; - - /// Create a copy of RunToolCallObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35314,12 +32591,8 @@ mixin _$RunToolCallFunction { /// The arguments that the model expects you to pass to the function. String get arguments => throw _privateConstructorUsedError; - /// Serializes this RunToolCallFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunToolCallFunction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunToolCallFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35343,8 +32616,6 @@ class _$RunToolCallFunctionCopyWithImpl<$Res, $Val extends RunToolCallFunction> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunToolCallFunction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35383,8 +32654,6 @@ class __$$RunToolCallFunctionImplCopyWithImpl<$Res> $Res Function(_$RunToolCallFunctionImpl) _then) : super(_value, _then); - /// Create a copy of RunToolCallFunction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35436,13 +32705,11 @@ class _$RunToolCallFunctionImpl extends _RunToolCallFunction { other.arguments == arguments)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name, arguments); - /// Create a copy of RunToolCallFunction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => @@ -35466,18 +32733,16 @@ abstract class _RunToolCallFunction extends RunToolCallFunction { factory _RunToolCallFunction.fromJson(Map json) = _$RunToolCallFunctionImpl.fromJson; - /// The name of the function. @override + + /// The name of the function. String get name; + @override /// The arguments that the model expects you to pass to the function. - @override String get arguments; - - /// Create a copy of RunToolCallFunction - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35514,20 +32779,15 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -35556,28 +32816,11 @@ mixin _$CreateThreadAndRunRequest { CreateThreadAndRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? get parallelToolCalls => throw _privateConstructorUsedError; - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat => @@ -35587,12 +32830,8 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; - /// Serializes this CreateThreadAndRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateThreadAndRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35625,8 +32864,6 @@ abstract class $CreateThreadAndRunRequestCopyWith<$Res> { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -35651,8 +32888,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35669,7 +32904,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -35726,10 +32960,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -35741,8 +32971,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadRequestCopyWith<$Res>? get thread { @@ -35755,8 +32983,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadAndRunModelCopyWith<$Res>? get model { @@ -35769,8 +32995,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35783,8 +33007,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -35797,8 +33019,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -35812,8 +33032,6 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -35859,8 +33077,6 @@ abstract class _$$CreateThreadAndRunRequestImplCopyWith<$Res> @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -35890,8 +33106,6 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35908,7 +33122,6 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, - Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -35965,10 +33178,6 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, - parallelToolCalls: freezed == parallelToolCalls - ? _value.parallelToolCalls - : parallelToolCalls // ignore: cast_nullable_to_non_nullable - as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -36003,8 +33212,6 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -36056,14 +33263,10 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -36074,15 +33277,12 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -36114,29 +33314,11 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice; - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. - @override - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - final bool? parallelToolCalls; - - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -36149,7 +33331,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @override String toString() { - return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -36178,14 +33360,12 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && - (identical(other.parallelToolCalls, parallelToolCalls) || - other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -36202,13 +33382,10 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, - parallelToolCalls, responseFormat, stream); - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> @@ -36246,8 +33423,6 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice, - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - final bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -36258,121 +33433,96 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { factory _CreateThreadAndRunRequest.fromJson(Map json) = _$CreateThreadAndRunRequestImpl.fromJson; - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. @override + + /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. @JsonKey(name: 'assistant_id') String get assistantId; + @override /// If no thread is provided, an empty thread will be created. - @override @JsonKey(includeIfNull: false) CreateThreadRequest? get thread; + @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - @override @_ThreadAndRunModelConverter() @JsonKey(includeIfNull: false) ThreadAndRunModel? get model; + @override /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. - @override @JsonKey(includeIfNull: false) String? get instructions; + @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - @override @JsonKey(includeIfNull: false) List? get tools; + @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature; + @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - /// mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or temperature but not both. - @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; + @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; + @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; + @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; + @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - @override @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? get toolChoice; - - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - /// during tool use. @override - @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with - /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - /// the model will match your supplied JSON schema. Learn more in the - /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - /// is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the - /// generation exceeded `max_tokens` or the conversation exceeded the max context length. - @override + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat; + @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - @override @JsonKey(includeIfNull: false) bool? get stream; - - /// Create a copy of CreateThreadAndRunRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36431,8 +33581,6 @@ mixin _$ThreadAndRunModel { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ThreadAndRunModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -36452,9 +33600,6 @@ class _$ThreadAndRunModelCopyWithImpl<$Res, $Val extends ThreadAndRunModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -36477,8 +33622,6 @@ class __$$ThreadAndRunModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36523,13 +33666,11 @@ class _$ThreadAndRunModelEnumerationImpl extends ThreadAndRunModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelEnumerationImplCopyWith< @@ -36617,10 +33758,7 @@ abstract class ThreadAndRunModelEnumeration extends ThreadAndRunModel { @override ThreadAndRunModels get value; - - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ThreadAndRunModelEnumerationImplCopyWith< _$ThreadAndRunModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -36645,8 +33783,6 @@ class __$$ThreadAndRunModelStringImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelStringImpl) _then) : super(_value, _then); - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36690,13 +33826,11 @@ class _$ThreadAndRunModelStringImpl extends ThreadAndRunModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> @@ -36783,10 +33917,7 @@ abstract class ThreadAndRunModelString extends ThreadAndRunModel { @override String get value; - - /// Create a copy of ThreadAndRunModel - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36861,8 +33992,6 @@ mixin _$CreateThreadAndRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateThreadAndRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -36885,9 +34014,6 @@ class _$CreateThreadAndRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -36913,8 +34039,6 @@ class __$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36961,13 +34085,11 @@ class _$CreateThreadAndRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< @@ -37071,10 +34193,7 @@ abstract class CreateThreadAndRunRequestToolChoiceEnumeration @override CreateThreadAndRunRequestToolChoiceMode get value; - - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< _$CreateThreadAndRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -37111,8 +34230,6 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi _then) : super(_value, _then); - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37127,8 +34244,6 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi )); } - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -37173,13 +34288,11 @@ class _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -37285,10 +34398,7 @@ abstract class CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - - /// Create a copy of CreateThreadAndRunRequestToolChoice - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -37300,9 +34410,9 @@ CreateThreadAndRunRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateThreadAndRunRequestResponseFormatEnumeration.fromJson(json); - case 'responseFormat': - return CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( - json); + case 'format': + return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + .fromJson(json); default: throw CheckedFromJsonException( @@ -37320,19 +34430,19 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -37342,8 +34452,9 @@ mixin _$CreateThreadAndRunRequestResponseFormat { CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value) - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value) + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -37351,8 +34462,9 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value)? - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value)? + format, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -37360,13 +34472,12 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value)? - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value)? + format, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateThreadAndRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -37390,9 +34501,6 @@ class _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -37423,8 +34531,6 @@ class __$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWithImpl< _then) : super(_value, _then); - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37471,13 +34577,11 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< @@ -37492,7 +34596,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { return mode(value); } @@ -37501,7 +34605,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { return mode?.call(value); } @@ -37510,7 +34614,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { if (mode != null) { @@ -37526,8 +34630,9 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value) - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value) + format, }) { return mode(this); } @@ -37538,8 +34643,9 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value)? - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value)? + format, }) { return mode?.call(this); } @@ -37550,8 +34656,9 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value)? - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value)? + format, required TResult orElse(), }) { if (mode != null) { @@ -37581,66 +34688,63 @@ abstract class CreateThreadAndRunRequestResponseFormatEnumeration @override CreateThreadAndRunRequestResponseFormatMode get value; - - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< _$CreateThreadAndRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< +abstract class _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - factory _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith( - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl value, + factory _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl + value, $Res Function( - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) then) = - __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< + __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({ResponseFormat value}); + $Res call({AssistantsResponseFormat value}); - $ResponseFormatCopyWith<$Res> get value; + $AssistantsResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< +class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res> extends _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> implements - _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< $Res> { - __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl( - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl _value, - $Res Function(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) + __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl + _value, + $Res Function( + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) _then) : super(_value, _then); - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( + return _then( + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as ResponseFormat, + as AssistantsResponseFormat, )); } - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ResponseFormatCopyWith<$Res> get value { - return $ResponseFormatCopyWith<$Res>(_value.value, (value) { + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -37648,27 +34752,28 @@ class __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl - extends CreateThreadAndRunRequestResponseFormatResponseFormat { - const _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl(this.value, +class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl + extends CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat { + const _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( + this.value, {final String? $type}) - : $type = $type ?? 'responseFormat', + : $type = $type ?? 'format', super._(); - factory _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson( + factory _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( Map json) => - _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplFromJson( + _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplFromJson( json); @override - final ResponseFormat value; + final AssistantsResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateThreadAndRunRequestResponseFormat.responseFormat(value: $value)'; + return 'CreateThreadAndRunRequestResponseFormat.format(value: $value)'; } @override @@ -37676,24 +34781,22 @@ class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl return identical(this, other) || (other.runtimeType == runtimeType && other - is _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl && + is _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> + _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => - __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl>( + __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl>( this, _$identity); @override @@ -37701,29 +34804,29 @@ class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(ResponseFormat value) responseFormat, + required TResult Function(AssistantsResponseFormat value) format, }) { - return responseFormat(value); + return format(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(ResponseFormat value)? responseFormat, + TResult? Function(AssistantsResponseFormat value)? format, }) { - return responseFormat?.call(value); + return format?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(ResponseFormat value)? responseFormat, + TResult Function(AssistantsResponseFormat value)? format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(value); + if (format != null) { + return format(value); } return orElse(); } @@ -37735,10 +34838,11 @@ class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value) - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value) + format, }) { - return responseFormat(this); + return format(this); } @override @@ -37747,10 +34851,11 @@ class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value)? - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value)? + format, }) { - return responseFormat?.call(this); + return format?.call(this); } @override @@ -37759,43 +34864,43 @@ class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatResponseFormat value)? - responseFormat, + CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat + value)? + format, required TResult orElse(), }) { - if (responseFormat != null) { - return responseFormat(this); + if (format != null) { + return format(this); } return orElse(); } @override Map toJson() { - return _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( + return _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( this, ); } } -abstract class CreateThreadAndRunRequestResponseFormatResponseFormat +abstract class CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat extends CreateThreadAndRunRequestResponseFormat { - const factory CreateThreadAndRunRequestResponseFormatResponseFormat( - final ResponseFormat value) = - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl; - const CreateThreadAndRunRequestResponseFormatResponseFormat._() : super._(); + const factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl; + const CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat._() + : super._(); - factory CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( + factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat.fromJson( Map json) = - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson; + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl + .fromJson; @override - ResponseFormat get value; - - /// Create a copy of CreateThreadAndRunRequestResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37819,17 +34924,11 @@ mixin _$ThreadObject { @JsonKey(name: 'tool_resources') ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this ThreadObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ThreadObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ThreadObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37860,8 +34959,6 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ThreadObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37895,8 +34992,6 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> ) as $Val); } - /// Create a copy of ThreadObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -37937,8 +35032,6 @@ class __$$ThreadObjectImplCopyWithImpl<$Res> _$ThreadObjectImpl _value, $Res Function(_$ThreadObjectImpl) _then) : super(_value, _then); - /// Create a copy of ThreadObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38006,14 +35099,10 @@ class _$ThreadObjectImpl extends _ThreadObject { @JsonKey(name: 'tool_resources') final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -38042,14 +35131,12 @@ class _$ThreadObjectImpl extends _ThreadObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, object, createdAt, toolResources, const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of ThreadObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => @@ -38076,34 +35163,30 @@ abstract class _ThreadObject extends ThreadObject { factory _ThreadObject.fromJson(Map json) = _$ThreadObjectImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override + + /// The identifier, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `thread`. - @override ThreadObjectObject get object; + @override /// The Unix timestamp (in seconds) for when the thread was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - @override @JsonKey(name: 'tool_resources') ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override - Map? get metadata; - /// Create a copy of ThreadObject - /// with the given fields replaced by the non-null parameter values. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + Map? get metadata; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38123,18 +35206,12 @@ mixin _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this CreateThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateThreadRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38164,8 +35241,6 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateThreadRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38189,8 +35264,6 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> ) as $Val); } - /// Create a copy of CreateThreadRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -38230,8 +35303,6 @@ class __$$CreateThreadRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateThreadRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38290,14 +35361,10 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -38324,7 +35391,7 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -38332,9 +35399,7 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { toolResources, const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of CreateThreadRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => @@ -38362,27 +35427,23 @@ abstract class _CreateThreadRequest extends CreateThreadRequest { factory _CreateThreadRequest.fromJson(Map json) = _$CreateThreadRequestImpl.fromJson; - /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. @override + + /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. @JsonKey(includeIfNull: false) List? get messages; + @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// Create a copy of CreateThreadRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38397,18 +35458,12 @@ mixin _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this ModifyThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ModifyThreadRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModifyThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38437,8 +35492,6 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ModifyThreadRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38457,8 +35510,6 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> ) as $Val); } - /// Create a copy of ModifyThreadRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -38497,8 +35548,6 @@ class __$$ModifyThreadRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyThreadRequestImpl) _then) : super(_value, _then); - /// Create a copy of ModifyThreadRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38536,14 +35585,10 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -38569,14 +35614,12 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, toolResources, const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of ModifyThreadRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => @@ -38602,22 +35645,18 @@ abstract class _ModifyThreadRequest extends ModifyThreadRequest { factory _ModifyThreadRequest.fromJson(Map json) = _$ModifyThreadRequestImpl.fromJson; - /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @override + + /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// Create a copy of ModifyThreadRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38637,12 +35676,8 @@ mixin _$ToolResources { @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch => throw _privateConstructorUsedError; - /// Serializes this ToolResources to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ToolResourcesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38673,8 +35708,6 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38693,8 +35726,6 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> ) as $Val); } - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCodeInterpreterCopyWith<$Res>? get codeInterpreter { @@ -38708,8 +35739,6 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> }); } - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesFileSearchCopyWith<$Res>? get fileSearch { @@ -38751,8 +35780,6 @@ class __$$ToolResourcesImplCopyWithImpl<$Res> _$ToolResourcesImpl _value, $Res Function(_$ToolResourcesImpl) _then) : super(_value, _then); - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38810,13 +35837,11 @@ class _$ToolResourcesImpl extends _ToolResources { other.fileSearch == fileSearch)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, codeInterpreter, fileSearch); - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => @@ -38841,20 +35866,18 @@ abstract class _ToolResources extends ToolResources { factory _ToolResources.fromJson(Map json) = _$ToolResourcesImpl.fromJson; - /// No Description @override + + /// No Description @JsonKey(name: 'code_interpreter', includeIfNull: false) ToolResourcesCodeInterpreter? get codeInterpreter; + @override /// No Description - @override @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch; - - /// Create a copy of ToolResources - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38870,12 +35893,8 @@ mixin _$ToolResourcesCodeInterpreter { @JsonKey(name: 'file_ids') List get fileIds => throw _privateConstructorUsedError; - /// Serializes this ToolResourcesCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ToolResourcesCodeInterpreter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ToolResourcesCodeInterpreterCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38902,8 +35921,6 @@ class _$ToolResourcesCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ToolResourcesCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38940,8 +35957,6 @@ class __$$ToolResourcesCodeInterpreterImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesCodeInterpreterImpl) _then) : super(_value, _then); - /// Create a copy of ToolResourcesCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38993,14 +36008,12 @@ class _$ToolResourcesCodeInterpreterImpl extends _ToolResourcesCodeInterpreter { const DeepCollectionEquality().equals(other._fileIds, _fileIds)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); - /// Create a copy of ToolResourcesCodeInterpreter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ToolResourcesCodeInterpreterImplCopyWith< @@ -39026,15 +36039,13 @@ abstract class _ToolResourcesCodeInterpreter factory _ToolResourcesCodeInterpreter.fromJson(Map json) = _$ToolResourcesCodeInterpreterImpl.fromJson; - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. @override + + /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. @JsonKey(name: 'file_ids') List get fileIds; - - /// Create a copy of ToolResourcesCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ToolResourcesCodeInterpreterImplCopyWith< _$ToolResourcesCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -39056,12 +36067,8 @@ mixin _$ToolResourcesFileSearch { List? get vectorStores => throw _privateConstructorUsedError; - /// Serializes this ToolResourcesFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ToolResourcesFileSearch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ToolResourcesFileSearchCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39090,8 +36097,6 @@ class _$ToolResourcesFileSearchCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ToolResourcesFileSearch - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39137,8 +36142,6 @@ class __$$ToolResourcesFileSearchImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchImpl) _then) : super(_value, _then); - /// Create a copy of ToolResourcesFileSearch - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39217,16 +36220,14 @@ class _$ToolResourcesFileSearchImpl extends _ToolResourcesFileSearch { .equals(other._vectorStores, _vectorStores)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_vectorStoreIds), const DeepCollectionEquality().hash(_vectorStores)); - /// Create a copy of ToolResourcesFileSearch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> @@ -39253,20 +36254,18 @@ abstract class _ToolResourcesFileSearch extends ToolResourcesFileSearch { factory _ToolResourcesFileSearch.fromJson(Map json) = _$ToolResourcesFileSearchImpl.fromJson; - /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. @override + + /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. @JsonKey(name: 'vector_store_ids', includeIfNull: false) List? get vectorStoreIds; + @override /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. - @override @JsonKey(name: 'vector_stores', includeIfNull: false) List? get vectorStores; - - /// Create a copy of ToolResourcesFileSearch - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39282,22 +36281,12 @@ mixin _$ToolResourcesFileSearchVectorStore { @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds => throw _privateConstructorUsedError; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy => - throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; - /// Serializes this ToolResourcesFileSearchVectorStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ToolResourcesFileSearchVectorStore - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ToolResourcesFileSearchVectorStoreCopyWith< ToolResourcesFileSearchVectorStore> get copyWith => throw _privateConstructorUsedError; @@ -39313,11 +36302,7 @@ abstract class $ToolResourcesFileSearchVectorStoreCopyWith<$Res> { @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); - - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -39331,13 +36316,10 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ToolResourcesFileSearchVectorStore - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileIds = freezed, - Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( @@ -39345,31 +36327,12 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable as dynamic, ) as $Val); } - - /// Create a copy of ToolResourcesFileSearchVectorStore - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { - if (_value.chunkingStrategy == null) { - return null; - } - - return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, - (value) { - return _then(_value.copyWith(chunkingStrategy: value) as $Val); - }); - } } /// @nodoc @@ -39383,12 +36346,7 @@ abstract class _$$ToolResourcesFileSearchVectorStoreImplCopyWith<$Res> @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -39401,13 +36359,10 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchVectorStoreImpl) _then) : super(_value, _then); - /// Create a copy of ToolResourcesFileSearchVectorStore - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileIds = freezed, - Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_$ToolResourcesFileSearchVectorStoreImpl( @@ -39415,10 +36370,6 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -39434,8 +36385,6 @@ class _$ToolResourcesFileSearchVectorStoreImpl const _$ToolResourcesFileSearchVectorStoreImpl( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - this.chunkingStrategy, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, super._(); @@ -39458,12 +36407,6 @@ class _$ToolResourcesFileSearchVectorStoreImpl return EqualUnmodifiableListView(value); } - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy; - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) @@ -39471,7 +36414,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl @override String toString() { - return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; + return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, metadata: $metadata)'; } @override @@ -39480,22 +36423,17 @@ class _$ToolResourcesFileSearchVectorStoreImpl (other.runtimeType == runtimeType && other is _$ToolResourcesFileSearchVectorStoreImpl && const DeepCollectionEquality().equals(other._fileIds, _fileIds) && - (identical(other.chunkingStrategy, chunkingStrategy) || - other.chunkingStrategy == chunkingStrategy) && const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_fileIds), - chunkingStrategy, const DeepCollectionEquality().hash(metadata)); - /// Create a copy of ToolResourcesFileSearchVectorStore - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchVectorStoreImplCopyWith< @@ -39516,8 +36454,6 @@ abstract class _ToolResourcesFileSearchVectorStore const factory _ToolResourcesFileSearchVectorStore( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) final dynamic metadata}) = _$ToolResourcesFileSearchVectorStoreImpl; const _ToolResourcesFileSearchVectorStore._() : super._(); @@ -39526,26 +36462,18 @@ abstract class _ToolResourcesFileSearchVectorStore Map json) = _$ToolResourcesFileSearchVectorStoreImpl.fromJson; - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. @override + + /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; - - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy; /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @override @JsonKey(includeIfNull: false) dynamic get metadata; - - /// Create a copy of ToolResourcesFileSearchVectorStore - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ToolResourcesFileSearchVectorStoreImplCopyWith< _$ToolResourcesFileSearchVectorStoreImpl> get copyWith => throw _privateConstructorUsedError; @@ -39566,12 +36494,8 @@ mixin _$DeleteThreadResponse { /// The object type, which is always `thread.deleted`. DeleteThreadResponseObject get object => throw _privateConstructorUsedError; - /// Serializes this DeleteThreadResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of DeleteThreadResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $DeleteThreadResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39596,8 +36520,6 @@ class _$DeleteThreadResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of DeleteThreadResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39641,8 +36563,6 @@ class __$$DeleteThreadResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteThreadResponseImpl) _then) : super(_value, _then); - /// Create a copy of DeleteThreadResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39704,13 +36624,11 @@ class _$DeleteThreadResponseImpl extends _DeleteThreadResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - /// Create a copy of DeleteThreadResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> @@ -39737,22 +36655,20 @@ abstract class _DeleteThreadResponse extends DeleteThreadResponse { factory _DeleteThreadResponse.fromJson(Map json) = _$DeleteThreadResponseImpl.fromJson; - /// The thread identifier. @override + + /// The thread identifier. String get id; + @override /// Whether the thread was deleted. - @override bool get deleted; + @override /// The object type, which is always `thread.deleted`. - @override DeleteThreadResponseObject get object; - - /// Create a copy of DeleteThreadResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39781,12 +36697,8 @@ mixin _$ListThreadsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this ListThreadsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListThreadsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListThreadsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39815,8 +36727,6 @@ class _$ListThreadsResponseCopyWithImpl<$Res, $Val extends ListThreadsResponse> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListThreadsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39875,8 +36785,6 @@ class __$$ListThreadsResponseImplCopyWithImpl<$Res> $Res Function(_$ListThreadsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListThreadsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39973,14 +36881,12 @@ class _$ListThreadsResponseImpl extends _ListThreadsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of ListThreadsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => @@ -40008,33 +36914,31 @@ abstract class _ListThreadsResponse extends ListThreadsResponse { factory _ListThreadsResponse.fromJson(Map json) = _$ListThreadsResponseImpl.fromJson; - /// The object type, which is always `list`. @override + + /// The object type, which is always `list`. String get object; + @override /// The list of threads. - @override List get data; + @override /// The ID of the first thread in the list. - @override @JsonKey(name: 'first_id') String get firstId; + @override /// The ID of the last thread in the list. - @override @JsonKey(name: 'last_id') String get lastId; + @override /// Whether there are more threads to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListThreadsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40094,17 +36998,11 @@ mixin _$MessageObject { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this MessageObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40146,8 +37044,6 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40226,8 +37122,6 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> ) as $Val); } - /// Create a copy of MessageObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -40280,8 +37174,6 @@ class __$$MessageObjectImplCopyWithImpl<$Res> _$MessageObjectImpl _value, $Res Function(_$MessageObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40464,14 +37356,10 @@ class _$MessageObjectImpl extends _MessageObject { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -40514,7 +37402,7 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -40533,9 +37421,7 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of MessageObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => @@ -40572,76 +37458,72 @@ abstract class _MessageObject extends MessageObject { factory _MessageObject.fromJson(Map json) = _$MessageObjectImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override + + /// The identifier, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `thread.message`. - @override MessageObjectObject get object; + @override /// The Unix timestamp (in seconds) for when the message was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to. - @override @JsonKey(name: 'thread_id') String get threadId; + @override /// The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. - @override @JsonKey(unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageObjectStatus? get status; + @override /// On an incomplete message, details about why the message is incomplete. - @override @JsonKey(name: 'incomplete_details') MessageObjectIncompleteDetails? get incompleteDetails; + @override /// The Unix timestamp (in seconds) for when the message was completed. - @override @JsonKey(name: 'completed_at') int? get completedAt; + @override /// The Unix timestamp (in seconds) for when the message was marked as incomplete. - @override @JsonKey(name: 'incomplete_at') int? get incompleteAt; + @override /// The entity that produced the message. One of `user` or `assistant`. - @override MessageRole get role; + @override /// The content of the message in array of text and/or images. - @override List get content; + @override /// If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message. - @override @JsonKey(name: 'assistant_id') String? get assistantId; + @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. - @override @JsonKey(name: 'run_id') String? get runId; + @override /// A list of files attached to the message, and the tools they were added to. - @override List? get attachments; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override - Map? get metadata; - /// Create a copy of MessageObject - /// with the given fields replaced by the non-null parameter values. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + Map? get metadata; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40657,12 +37539,8 @@ mixin _$MessageObjectIncompleteDetails { MessageObjectIncompleteDetailsReason get reason => throw _privateConstructorUsedError; - /// Serializes this MessageObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40689,8 +37567,6 @@ class _$MessageObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40727,8 +37603,6 @@ class __$$MessageObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$MessageObjectIncompleteDetailsImpl) _then) : super(_value, _then); - /// Create a copy of MessageObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40771,13 +37645,11 @@ class _$MessageObjectIncompleteDetailsImpl (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, reason); - /// Create a copy of MessageObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageObjectIncompleteDetailsImplCopyWith< @@ -40803,14 +37675,12 @@ abstract class _MessageObjectIncompleteDetails factory _MessageObjectIncompleteDetails.fromJson(Map json) = _$MessageObjectIncompleteDetailsImpl.fromJson; - /// The reason the message is incomplete. @override - MessageObjectIncompleteDetailsReason get reason; - /// Create a copy of MessageObjectIncompleteDetails - /// with the given fields replaced by the non-null parameter values. + /// The reason the message is incomplete. + MessageObjectIncompleteDetailsReason get reason; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageObjectIncompleteDetailsImplCopyWith< _$MessageObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; @@ -40830,12 +37700,8 @@ mixin _$MessageAttachment { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; - /// Serializes this MessageAttachment to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageAttachment - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageAttachmentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40861,8 +37727,6 @@ class _$MessageAttachmentCopyWithImpl<$Res, $Val extends MessageAttachment> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageAttachment - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40903,8 +37767,6 @@ class __$$MessageAttachmentImplCopyWithImpl<$Res> $Res Function(_$MessageAttachmentImpl) _then) : super(_value, _then); - /// Create a copy of MessageAttachment - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40969,14 +37831,12 @@ class _$MessageAttachmentImpl extends _MessageAttachment { const DeepCollectionEquality().equals(other._tools, _tools)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, fileId, const DeepCollectionEquality().hash(_tools)); - /// Create a copy of MessageAttachment - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => @@ -41001,20 +37861,18 @@ abstract class _MessageAttachment extends MessageAttachment { factory _MessageAttachment.fromJson(Map json) = _$MessageAttachmentImpl.fromJson; - /// The ID of the file to attach to the message. @override + + /// The ID of the file to attach to the message. @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + @override /// The tools to add this file to. - @override @JsonKey(includeIfNull: false) List? get tools; - - /// Create a copy of MessageAttachment - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41034,12 +37892,8 @@ mixin _$MessageDeltaObject { /// The delta containing the fields that have changed on the Message. MessageDelta get delta => throw _privateConstructorUsedError; - /// Serializes this MessageDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41065,8 +37919,6 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageDeltaObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41090,8 +37942,6 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> ) as $Val); } - /// Create a copy of MessageDeltaObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaCopyWith<$Res> get delta { @@ -41123,8 +37973,6 @@ class __$$MessageDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41186,13 +38034,11 @@ class _$MessageDeltaObjectImpl extends _MessageDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - /// Create a copy of MessageDeltaObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => @@ -41217,22 +38063,20 @@ abstract class _MessageDeltaObject extends MessageDeltaObject { factory _MessageDeltaObject.fromJson(Map json) = _$MessageDeltaObjectImpl.fromJson; - /// The identifier of the message, which can be referenced in API endpoints. @override + + /// The identifier of the message, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `thread.message.delta`. - @override MessageDeltaObjectObject get object; + @override /// The delta containing the fields that have changed on the Message. - @override MessageDelta get delta; - - /// Create a copy of MessageDeltaObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41252,12 +38096,8 @@ mixin _$MessageDelta { @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; - /// Serializes this MessageDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDelta - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41286,8 +38126,6 @@ class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageDelta - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41331,8 +38169,6 @@ class __$$MessageDeltaImplCopyWithImpl<$Res> _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) : super(_value, _then); - /// Create a copy of MessageDelta - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41401,14 +38237,12 @@ class _$MessageDeltaImpl extends _MessageDelta { const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, role, const DeepCollectionEquality().hash(_content)); - /// Create a copy of MessageDelta - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => @@ -41435,21 +38269,19 @@ abstract class _MessageDelta extends MessageDelta { factory _MessageDelta.fromJson(Map json) = _$MessageDeltaImpl.fromJson; - /// The entity that produced the message. One of `user` or `assistant`. @override + + /// The entity that produced the message. One of `user` or `assistant`. @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageRole? get role; + @override /// The content of the message in array of text and/or images. - @override @JsonKey(includeIfNull: false) List? get content; - - /// Create a copy of MessageDelta - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41472,18 +38304,12 @@ mixin _$CreateMessageRequest { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this CreateMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateMessageRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $CreateMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41515,8 +38341,6 @@ class _$CreateMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateMessageRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41545,8 +38369,6 @@ class _$CreateMessageRequestCopyWithImpl<$Res, ) as $Val); } - /// Create a copy of CreateMessageRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateMessageRequestContentCopyWith<$Res> get content { @@ -41583,8 +38405,6 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestImpl) _then) : super(_value, _then); - /// Create a copy of CreateMessageRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41652,14 +38472,10 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -41687,7 +38503,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -41696,9 +38512,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of CreateMessageRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> @@ -41727,31 +38541,27 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { factory _CreateMessageRequest.fromJson(Map json) = _$CreateMessageRequestImpl.fromJson; - /// The entity that produced the message. One of `user` or `assistant`. @override + + /// The entity that produced the message. One of `user` or `assistant`. MessageRole get role; + @override /// The content of the message. - @override @_CreateMessageRequestContentConverter() CreateMessageRequestContent get content; + @override /// A list of files attached to the message, and the tools they were added to. - @override @JsonKey(includeIfNull: false) List? get attachments; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// Create a copy of CreateMessageRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41818,8 +38628,6 @@ mixin _$CreateMessageRequestContent { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this CreateMessageRequestContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -41842,9 +38650,6 @@ class _$CreateMessageRequestContentCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -41870,8 +38675,6 @@ class __$$CreateMessageRequestContentListMessageContentImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentListMessageContentImpl) _then) : super(_value, _then); - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41925,14 +38728,12 @@ class _$CreateMessageRequestContentListMessageContentImpl const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentListMessageContentImplCopyWith< @@ -42029,10 +38830,7 @@ abstract class CreateMessageRequestContentListMessageContent @override List get value; - - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateMessageRequestContentListMessageContentImplCopyWith< _$CreateMessageRequestContentListMessageContentImpl> get copyWith => throw _privateConstructorUsedError; @@ -42058,8 +38856,6 @@ class __$$CreateMessageRequestContentStringImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentStringImpl) _then) : super(_value, _then); - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42106,13 +38902,11 @@ class _$CreateMessageRequestContentStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentStringImplCopyWith< @@ -42206,10 +39000,7 @@ abstract class CreateMessageRequestContentString @override String get value; - - /// Create a copy of CreateMessageRequestContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$CreateMessageRequestContentStringImplCopyWith< _$CreateMessageRequestContentStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -42221,18 +39012,12 @@ ModifyMessageRequest _$ModifyMessageRequestFromJson(Map json) { /// @nodoc mixin _$ModifyMessageRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this ModifyMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ModifyMessageRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ModifyMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42257,8 +39042,6 @@ class _$ModifyMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ModifyMessageRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42292,8 +39075,6 @@ class __$$ModifyMessageRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyMessageRequestImpl) _then) : super(_value, _then); - /// Create a copy of ModifyMessageRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42319,14 +39100,10 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { factory _$ModifyMessageRequestImpl.fromJson(Map json) => _$$ModifyMessageRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -42350,14 +39127,12 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of ModifyMessageRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> @@ -42382,17 +39157,13 @@ abstract class _ModifyMessageRequest extends ModifyMessageRequest { factory _ModifyMessageRequest.fromJson(Map json) = _$ModifyMessageRequestImpl.fromJson; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; - - /// Create a copy of ModifyMessageRequest - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42413,12 +39184,8 @@ mixin _$DeleteMessageResponse { /// The object type, which is always `thread.message.deleted`. DeleteMessageResponseObject get object => throw _privateConstructorUsedError; - /// Serializes this DeleteMessageResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of DeleteMessageResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $DeleteMessageResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42443,8 +39210,6 @@ class _$DeleteMessageResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of DeleteMessageResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42490,8 +39255,6 @@ class __$$DeleteMessageResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteMessageResponseImpl) _then) : super(_value, _then); - /// Create a copy of DeleteMessageResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42553,13 +39316,11 @@ class _$DeleteMessageResponseImpl extends _DeleteMessageResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - /// Create a copy of DeleteMessageResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> @@ -42585,22 +39346,20 @@ abstract class _DeleteMessageResponse extends DeleteMessageResponse { factory _DeleteMessageResponse.fromJson(Map json) = _$DeleteMessageResponseImpl.fromJson; - /// The message identifier. @override + + /// The message identifier. String get id; + @override /// Whether the message was deleted. - @override bool get deleted; + @override /// The object type, which is always `thread.message.deleted`. - @override DeleteMessageResponseObject get object; - - /// Create a copy of DeleteMessageResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42629,12 +39388,8 @@ mixin _$ListMessagesResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this ListMessagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListMessagesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListMessagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42664,8 +39419,6 @@ class _$ListMessagesResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListMessagesResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42724,8 +39477,6 @@ class __$$ListMessagesResponseImplCopyWithImpl<$Res> $Res Function(_$ListMessagesResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListMessagesResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42822,14 +39573,12 @@ class _$ListMessagesResponseImpl extends _ListMessagesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of ListMessagesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> @@ -42858,33 +39607,31 @@ abstract class _ListMessagesResponse extends ListMessagesResponse { factory _ListMessagesResponse.fromJson(Map json) = _$ListMessagesResponseImpl.fromJson; - /// The object type, which is always `list`. @override + + /// The object type, which is always `list`. String get object; + @override /// The list of messages. - @override List get data; + @override /// The ID of the first message in the list. - @override @JsonKey(name: 'first_id') String get firstId; + @override /// The ID of the last message in the list. - @override @JsonKey(name: 'last_id') String get lastId; + @override /// Whether there are more messages to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListMessagesResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42903,12 +39650,8 @@ mixin _$MessageContentImageFile { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; - /// Serializes this MessageContentImageFile to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageContentImageFile - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageContentImageFileCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42935,8 +39678,6 @@ class _$MessageContentImageFileCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageContentImageFile - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42980,8 +39721,6 @@ class __$$MessageContentImageFileImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageFileImpl) _then) : super(_value, _then); - /// Create a copy of MessageContentImageFile - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43036,13 +39775,11 @@ class _$MessageContentImageFileImpl extends _MessageContentImageFile { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, fileId, detail); - /// Create a copy of MessageContentImageFile - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> @@ -43066,19 +39803,17 @@ abstract class _MessageContentImageFile extends MessageContentImageFile { factory _MessageContentImageFile.fromJson(Map json) = _$MessageContentImageFileImpl.fromJson; - /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. @override + + /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. @JsonKey(name: 'file_id') String get fileId; + @override /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - @override MessageContentImageDetail get detail; - - /// Create a copy of MessageContentImageFile - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43096,12 +39831,8 @@ mixin _$MessageContentImageUrl { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; - /// Serializes this MessageContentImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageContentImageUrl - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageContentImageUrlCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -43126,8 +39857,6 @@ class _$MessageContentImageUrlCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageContentImageUrl - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43169,8 +39898,6 @@ class __$$MessageContentImageUrlImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageUrlImpl) _then) : super(_value, _then); - /// Create a copy of MessageContentImageUrl - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43223,13 +39950,11 @@ class _$MessageContentImageUrlImpl extends _MessageContentImageUrl { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, url, detail); - /// Create a copy of MessageContentImageUrl - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> @@ -43253,18 +39978,16 @@ abstract class _MessageContentImageUrl extends MessageContentImageUrl { factory _MessageContentImageUrl.fromJson(Map json) = _$MessageContentImageUrlImpl.fromJson; - /// The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp. @override + + /// The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp. String get url; + @override /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - @override MessageContentImageDetail get detail; - - /// Create a copy of MessageContentImageUrl - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43282,12 +40005,8 @@ mixin _$MessageRequestContentTextObject { /// Text content to be sent to the model String get text => throw _privateConstructorUsedError; - /// Serializes this MessageRequestContentTextObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageRequestContentTextObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageRequestContentTextObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -43314,8 +40033,6 @@ class _$MessageRequestContentTextObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageRequestContentTextObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43357,8 +40074,6 @@ class __$$MessageRequestContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageRequestContentTextObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageRequestContentTextObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43412,13 +40127,11 @@ class _$MessageRequestContentTextObjectImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, type, text); - /// Create a copy of MessageRequestContentTextObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageRequestContentTextObjectImplCopyWith< @@ -43444,18 +40157,16 @@ abstract class _MessageRequestContentTextObject factory _MessageRequestContentTextObject.fromJson(Map json) = _$MessageRequestContentTextObjectImpl.fromJson; - /// Always `text`. @override + + /// Always `text`. String get type; + @override /// Text content to be sent to the model - @override String get text; - - /// Create a copy of MessageRequestContentTextObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageRequestContentTextObjectImplCopyWith< _$MessageRequestContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -43475,12 +40186,8 @@ mixin _$MessageContentText { List? get annotations => throw _privateConstructorUsedError; - /// Serializes this MessageContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageContentText - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -43507,8 +40214,6 @@ class _$MessageContentTextCopyWithImpl<$Res, $Val extends MessageContentText> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageContentText - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43550,8 +40255,6 @@ class __$$MessageContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextImpl) _then) : super(_value, _then); - /// Create a copy of MessageContentText - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43617,14 +40320,12 @@ class _$MessageContentTextImpl extends _MessageContentText { .equals(other._annotations, _annotations)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - /// Create a copy of MessageContentText - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => @@ -43650,19 +40351,17 @@ abstract class _MessageContentText extends MessageContentText { factory _MessageContentText.fromJson(Map json) = _$MessageContentTextImpl.fromJson; - /// The data that makes up the text. @override + + /// The data that makes up the text. String get value; + @override /// A list of annotations that point to specific quotes from specific files. - @override @JsonKey(includeIfNull: false) List? get annotations; - - /// Create a copy of MessageContentText - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43679,12 +40378,11 @@ mixin _$MessageContentTextAnnotationsFileCitation { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; - /// Serializes this MessageContentTextAnnotationsFileCitation to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The specific quote in the file. + String get quote => throw _privateConstructorUsedError; - /// Create a copy of MessageContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) $MessageContentTextAnnotationsFileCitationCopyWith< MessageContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -43698,7 +40396,7 @@ abstract class $MessageContentTextAnnotationsFileCitationCopyWith<$Res> { _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, MessageContentTextAnnotationsFileCitation>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); } /// @nodoc @@ -43713,18 +40411,21 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileId = null, + Object? quote = null, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, + quote: null == quote + ? _value.quote + : quote // ignore: cast_nullable_to_non_nullable + as String, ) as $Val); } } @@ -43738,7 +40439,7 @@ abstract class _$$MessageContentTextAnnotationsFileCitationImplCopyWith<$Res> __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); } /// @nodoc @@ -43751,18 +40452,21 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); - /// Create a copy of MessageContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileId = null, + Object? quote = null, }) { return _then(_$MessageContentTextAnnotationsFileCitationImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, + quote: null == quote + ? _value.quote + : quote // ignore: cast_nullable_to_non_nullable + as String, )); } } @@ -43772,7 +40476,7 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> class _$MessageContentTextAnnotationsFileCitationImpl extends _MessageContentTextAnnotationsFileCitation { const _$MessageContentTextAnnotationsFileCitationImpl( - {@JsonKey(name: 'file_id') required this.fileId}) + {@JsonKey(name: 'file_id') required this.fileId, required this.quote}) : super._(); factory _$MessageContentTextAnnotationsFileCitationImpl.fromJson( @@ -43784,9 +40488,13 @@ class _$MessageContentTextAnnotationsFileCitationImpl @JsonKey(name: 'file_id') final String fileId; + /// The specific quote in the file. + @override + final String quote; + @override String toString() { - return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId)'; + return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId, quote: $quote)'; } @override @@ -43794,16 +40502,15 @@ class _$MessageContentTextAnnotationsFileCitationImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$MessageContentTextAnnotationsFileCitationImpl && - (identical(other.fileId, fileId) || other.fileId == fileId)); + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.quote, quote) || other.quote == quote)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId); + int get hashCode => Object.hash(runtimeType, fileId, quote); - /// Create a copy of MessageContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFileCitationImplCopyWith< @@ -43824,7 +40531,8 @@ class _$MessageContentTextAnnotationsFileCitationImpl abstract class _MessageContentTextAnnotationsFileCitation extends MessageContentTextAnnotationsFileCitation { const factory _MessageContentTextAnnotationsFileCitation( - {@JsonKey(name: 'file_id') required final String fileId}) = + {@JsonKey(name: 'file_id') required final String fileId, + required final String quote}) = _$MessageContentTextAnnotationsFileCitationImpl; const _MessageContentTextAnnotationsFileCitation._() : super._(); @@ -43832,20 +40540,265 @@ abstract class _MessageContentTextAnnotationsFileCitation Map json) = _$MessageContentTextAnnotationsFileCitationImpl.fromJson; - /// The ID of the specific File the citation is from. @override + + /// The ID of the specific File the citation is from. @JsonKey(name: 'file_id') String get fileId; + @override - /// Create a copy of MessageContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. + /// The specific quote in the file. + String get quote; @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< _$MessageContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; } +MessageDeltaContentImageUrlObject _$MessageDeltaContentImageUrlObjectFromJson( + Map json) { + return _MessageDeltaContentImageUrlObject.fromJson(json); +} + +/// @nodoc +mixin _$MessageDeltaContentImageUrlObject { + /// The index of the content part in the message. + @JsonKey(includeIfNull: false) + int? get index => throw _privateConstructorUsedError; + + /// Always `image_url`. + @JsonKey(includeIfNull: false) + String? get type => throw _privateConstructorUsedError; + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? get imageUrl => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaContentImageUrlObjectCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaContentImageUrlObjectCopyWith<$Res> { + factory $MessageDeltaContentImageUrlObjectCopyWith( + MessageDeltaContentImageUrlObject value, + $Res Function(MessageDeltaContentImageUrlObject) then) = + _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, + MessageDeltaContentImageUrlObject>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) int? index, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl}); + + $MessageContentImageUrlCopyWith<$Res>? get imageUrl; +} + +/// @nodoc +class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, + $Val extends MessageDeltaContentImageUrlObject> + implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { + _$MessageDeltaContentImageUrlObjectCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = freezed, + Object? type = freezed, + Object? imageUrl = freezed, + }) { + return _then(_value.copyWith( + index: freezed == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + imageUrl: freezed == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as MessageContentImageUrl?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $MessageContentImageUrlCopyWith<$Res>? get imageUrl { + if (_value.imageUrl == null) { + return null; + } + + return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { + return _then(_value.copyWith(imageUrl: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> + implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { + factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( + _$MessageDeltaContentImageUrlObjectImpl value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) int? index, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl}); + + @override + $MessageContentImageUrlCopyWith<$Res>? get imageUrl; +} + +/// @nodoc +class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, + _$MessageDeltaContentImageUrlObjectImpl> + implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( + _$MessageDeltaContentImageUrlObjectImpl _value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = freezed, + Object? type = freezed, + Object? imageUrl = freezed, + }) { + return _then(_$MessageDeltaContentImageUrlObjectImpl( + index: freezed == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + imageUrl: freezed == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as MessageContentImageUrl?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaContentImageUrlObjectImpl + extends _MessageDeltaContentImageUrlObject { + const _$MessageDeltaContentImageUrlObjectImpl( + {@JsonKey(includeIfNull: false) this.index, + @JsonKey(includeIfNull: false) this.type, + @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) + : super._(); + + factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentImageUrlObjectImplFromJson(json); + + /// The index of the content part in the message. + @override + @JsonKey(includeIfNull: false) + final int? index; + + /// Always `image_url`. + @override + @JsonKey(includeIfNull: false) + final String? type; + + /// The image URL part of a message. + @override + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl; + + @override + String toString() { + return 'MessageDeltaContentImageUrlObject(index: $index, type: $type, imageUrl: $imageUrl)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaContentImageUrlObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.imageUrl, imageUrl) || + other.imageUrl == imageUrl)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, index, type, imageUrl); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< + _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaContentImageUrlObjectImplToJson( + this, + ); + } +} + +abstract class _MessageDeltaContentImageUrlObject + extends MessageDeltaContentImageUrlObject { + const factory _MessageDeltaContentImageUrlObject( + {@JsonKey(includeIfNull: false) final int? index, + @JsonKey(includeIfNull: false) final String? type, + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl}) = + _$MessageDeltaContentImageUrlObjectImpl; + const _MessageDeltaContentImageUrlObject._() : super._(); + + factory _MessageDeltaContentImageUrlObject.fromJson( + Map json) = + _$MessageDeltaContentImageUrlObjectImpl.fromJson; + + @override + + /// The index of the content part in the message. + @JsonKey(includeIfNull: false) + int? get index; + @override + + /// Always `image_url`. + @JsonKey(includeIfNull: false) + String? get type; + @override + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? get imageUrl; + @override + @JsonKey(ignore: true) + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageDeltaContentText _$MessageDeltaContentTextFromJson( Map json) { return _MessageDeltaContentText.fromJson(json); @@ -43862,12 +40815,8 @@ mixin _$MessageDeltaContentText { List? get annotations => throw _privateConstructorUsedError; - /// Serializes this MessageDeltaContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContentText - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageDeltaContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -43895,8 +40844,6 @@ class _$MessageDeltaContentTextCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageDeltaContentText - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43941,8 +40888,6 @@ class __$$MessageDeltaContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContentText - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44009,14 +40954,12 @@ class _$MessageDeltaContentTextImpl extends _MessageDeltaContentText { .equals(other._annotations, _annotations)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - /// Create a copy of MessageDeltaContentText - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> @@ -44042,20 +40985,18 @@ abstract class _MessageDeltaContentText extends MessageDeltaContentText { factory _MessageDeltaContentText.fromJson(Map json) = _$MessageDeltaContentTextImpl.fromJson; - /// The data that makes up the text. @override + + /// The data that makes up the text. @JsonKey(includeIfNull: false) String? get value; + @override /// A list of annotations that point to specific quotes from specific files. - @override @JsonKey(includeIfNull: false) List? get annotations; - - /// Create a copy of MessageDeltaContentText - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -44076,12 +41017,8 @@ mixin _$MessageDeltaContentTextAnnotationsFileCitation { @JsonKey(includeIfNull: false) String? get quote => throw _privateConstructorUsedError; - /// Serializes this MessageDeltaContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $MessageDeltaContentTextAnnotationsFileCitationCopyWith< MessageDeltaContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -44112,8 +41049,6 @@ class _$MessageDeltaContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44160,8 +41095,6 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44218,13 +41151,11 @@ class _$MessageDeltaContentTextAnnotationsFileCitationImpl (identical(other.quote, quote) || other.quote == quote)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, fileId, quote); - /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< @@ -44254,20 +41185,18 @@ abstract class _MessageDeltaContentTextAnnotationsFileCitation Map json) = _$MessageDeltaContentTextAnnotationsFileCitationImpl.fromJson; - /// The ID of the specific File the citation is from. @override + + /// The ID of the specific File the citation is from. @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + @override /// The specific quote in the file. - @override @JsonKey(includeIfNull: false) String? get quote; - - /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< _$MessageDeltaContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; @@ -44332,20 +41261,14 @@ mixin _$RunStepObject { @JsonKey(name: 'completed_at') int? get completedAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. RunStepCompletionUsage? get usage => throw _privateConstructorUsedError; - /// Serializes this RunStepObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44389,8 +41312,6 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44479,8 +41400,6 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> ) as $Val); } - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsCopyWith<$Res> get stepDetails { @@ -44489,8 +41408,6 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepLastErrorCopyWith<$Res>? get lastError { @@ -44503,8 +41420,6 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepCompletionUsageCopyWith<$Res>? get usage { @@ -44560,8 +41475,6 @@ class __$$RunStepObjectImplCopyWithImpl<$Res> _$RunStepObjectImpl _value, $Res Function(_$RunStepObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44744,14 +41657,10 @@ class _$RunStepObjectImpl extends _RunStepObject { @JsonKey(name: 'completed_at') final int? completedAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -44802,7 +41711,7 @@ class _$RunStepObjectImpl extends _RunStepObject { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, @@ -44823,9 +41732,7 @@ class _$RunStepObjectImpl extends _RunStepObject { const DeepCollectionEquality().hash(_metadata), usage); - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => @@ -44862,87 +41769,83 @@ abstract class _RunStepObject extends RunStepObject { factory _RunStepObject.fromJson(Map json) = _$RunStepObjectImpl.fromJson; - /// The identifier of the run step, which can be referenced in API endpoints. @override + + /// The identifier of the run step, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `thread.run.step`. - @override RunStepObjectObject get object; + @override /// The Unix timestamp (in seconds) for when the run step was created. - @override @JsonKey(name: 'created_at') int get createdAt; + @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step. - @override @JsonKey(name: 'assistant_id') String get assistantId; + @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run. - @override @JsonKey(name: 'thread_id') String get threadId; + @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of. - @override @JsonKey(name: 'run_id') String get runId; + @override /// The type of run step, which can be either `message_creation` or `tool_calls`. - @override RunStepType get type; + @override /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. - @override RunStepStatus get status; + @override /// The details of the run step. /// Any of: [RunStepDetailsMessageCreationObject], [RunStepDetailsToolCallsObject] - @override @JsonKey(name: 'step_details') RunStepDetails get stepDetails; + @override /// The last error associated with this run step. Will be `null` if there are no errors. - @override @JsonKey(name: 'last_error') RunStepLastError? get lastError; + @override /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - @override @JsonKey(name: 'expired_at') int? get expiredAt; + @override /// The Unix timestamp (in seconds) for when the run step was cancelled. - @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; + @override /// The Unix timestamp (in seconds) for when the run step failed. - @override @JsonKey(name: 'failed_at') int? get failedAt; + @override /// The Unix timestamp (in seconds) for when the run step completed. - @override @JsonKey(name: 'completed_at') int? get completedAt; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. Map? get metadata; + @override /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. - @override RunStepCompletionUsage? get usage; - - /// Create a copy of RunStepObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -44959,12 +41862,8 @@ mixin _$RunStepLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; - /// Serializes this RunStepLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepLastError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44988,8 +41887,6 @@ class _$RunStepLastErrorCopyWithImpl<$Res, $Val extends RunStepLastError> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepLastError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45028,8 +41925,6 @@ class __$$RunStepLastErrorImplCopyWithImpl<$Res> $Res Function(_$RunStepLastErrorImpl) _then) : super(_value, _then); - /// Create a copy of RunStepLastError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45080,13 +41975,11 @@ class _$RunStepLastErrorImpl extends _RunStepLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, code, message); - /// Create a copy of RunStepLastError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => @@ -45110,18 +42003,16 @@ abstract class _RunStepLastError extends RunStepLastError { factory _RunStepLastError.fromJson(Map json) = _$RunStepLastErrorImpl.fromJson; - /// One of `server_error` or `rate_limit_exceeded`. @override + + /// One of `server_error` or `rate_limit_exceeded`. RunStepLastErrorCode get code; + @override /// A human-readable description of the error. - @override String get message; - - /// Create a copy of RunStepLastError - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45141,12 +42032,8 @@ mixin _$RunStepDeltaObject { /// The delta containing the fields that have changed on the run step. RunStepDelta get delta => throw _privateConstructorUsedError; - /// Serializes this RunStepDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDeltaObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45172,8 +42059,6 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45197,8 +42082,6 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> ) as $Val); } - /// Create a copy of RunStepDeltaObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaCopyWith<$Res> get delta { @@ -45230,8 +42113,6 @@ class __$$RunStepDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45293,13 +42174,11 @@ class _$RunStepDeltaObjectImpl extends _RunStepDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - /// Create a copy of RunStepDeltaObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => @@ -45324,22 +42203,20 @@ abstract class _RunStepDeltaObject extends RunStepDeltaObject { factory _RunStepDeltaObject.fromJson(Map json) = _$RunStepDeltaObjectImpl.fromJson; - /// The identifier of the run step, which can be referenced in API endpoints. @override + + /// The identifier of the run step, which can be referenced in API endpoints. String get id; + @override /// The object type, which is always `thread.run.step.delta`. - @override RunStepDeltaObjectObject get object; + @override /// The delta containing the fields that have changed on the run step. - @override RunStepDelta get delta; - - /// Create a copy of RunStepDeltaObject - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45355,12 +42232,8 @@ mixin _$RunStepDelta { @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails => throw _privateConstructorUsedError; - /// Serializes this RunStepDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDelta - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45388,8 +42261,6 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDelta - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45403,8 +42274,6 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> ) as $Val); } - /// Create a copy of RunStepDelta - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaDetailsCopyWith<$Res>? get stepDetails { @@ -45442,8 +42311,6 @@ class __$$RunStepDeltaImplCopyWithImpl<$Res> _$RunStepDeltaImpl _value, $Res Function(_$RunStepDeltaImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDelta - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45488,13 +42355,11 @@ class _$RunStepDeltaImpl extends _RunStepDelta { other.stepDetails == stepDetails)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, stepDetails); - /// Create a copy of RunStepDelta - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => @@ -45517,16 +42382,14 @@ abstract class _RunStepDelta extends RunStepDelta { factory _RunStepDelta.fromJson(Map json) = _$RunStepDeltaImpl.fromJson; + @override + /// The details of the run step /// Any of: [RunStepDeltaStepDetailsMessageCreationObject], [RunStepDeltaStepDetailsToolCallsObject] - @override @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails; - - /// Create a copy of RunStepDelta - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45555,12 +42418,8 @@ mixin _$ListRunStepsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this ListRunStepsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListRunStepsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $ListRunStepsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45590,8 +42449,6 @@ class _$ListRunStepsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListRunStepsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45650,8 +42507,6 @@ class __$$ListRunStepsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunStepsResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListRunStepsResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45748,14 +42603,12 @@ class _$ListRunStepsResponseImpl extends _ListRunStepsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of ListRunStepsResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> @@ -45784,33 +42637,31 @@ abstract class _ListRunStepsResponse extends ListRunStepsResponse { factory _ListRunStepsResponse.fromJson(Map json) = _$ListRunStepsResponseImpl.fromJson; - /// The object type, which is always `list`. @override + + /// The object type, which is always `list`. String get object; + @override /// The list of run steps. - @override List get data; + @override /// The ID of the first run step in the list. - @override @JsonKey(name: 'first_id') String get firstId; + @override /// The ID of the last run step in the list. - @override @JsonKey(name: 'last_id') String get lastId; + @override /// Whether there are more run steps to retrieve. - @override @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListRunStepsResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45826,12 +42677,8 @@ mixin _$RunStepDetailsMessageCreation { @JsonKey(name: 'message_id') String get messageId => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDetailsMessageCreationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45858,8 +42705,6 @@ class _$RunStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45896,8 +42741,6 @@ class __$$RunStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsMessageCreationImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45943,13 +42786,11 @@ class _$RunStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, messageId); - /// Create a copy of RunStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDetailsMessageCreationImplCopyWith< @@ -45975,15 +42816,13 @@ abstract class _RunStepDetailsMessageCreation factory _RunStepDetailsMessageCreation.fromJson(Map json) = _$RunStepDetailsMessageCreationImpl.fromJson; - /// The ID of the message that was created by this run step. @override + + /// The ID of the message that was created by this run step. @JsonKey(name: 'message_id') String get messageId; - - /// Create a copy of RunStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDetailsMessageCreationImplCopyWith< _$RunStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -46001,12 +42840,8 @@ mixin _$RunStepDeltaStepDetailsMessageCreation { @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId => throw _privateConstructorUsedError; - /// Serializes this RunStepDeltaStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDeltaStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDeltaStepDetailsMessageCreationCopyWith< RunStepDeltaStepDetailsMessageCreation> get copyWith => throw _privateConstructorUsedError; @@ -46035,8 +42870,6 @@ class _$RunStepDeltaStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46074,8 +42907,6 @@ class __$$RunStepDeltaStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsMessageCreationImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46121,13 +42952,11 @@ class _$RunStepDeltaStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, messageId); - /// Create a copy of RunStepDeltaStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< @@ -46155,15 +42984,13 @@ abstract class _RunStepDeltaStepDetailsMessageCreation Map json) = _$RunStepDeltaStepDetailsMessageCreationImpl.fromJson; - /// The ID of the message that was created by this run step. @override + + /// The ID of the message that was created by this run step. @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId; - - /// Create a copy of RunStepDeltaStepDetailsMessageCreation - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< _$RunStepDeltaStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -46184,12 +43011,8 @@ mixin _$RunStepDetailsToolCallsCodeObjectCodeInterpreter { List get outputs => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -46219,8 +43042,6 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46267,8 +43088,6 @@ class __$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithImpl<$Res> _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46331,14 +43150,12 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -46368,18 +43185,16 @@ abstract class _RunStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - /// The input to the Code Interpreter tool call. @override + + /// The input to the Code Interpreter tool call. String get input; + @override /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - @override List get outputs; - - /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -46403,12 +43218,8 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter { List? get outputs => throw _privateConstructorUsedError; - /// Serializes this RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -46446,8 +43257,6 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl< // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46503,8 +43312,6 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithI _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46575,14 +43382,12 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -46614,20 +43419,18 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - /// The input to the Code Interpreter tool call. @override + + /// The input to the Code Interpreter tool call. @JsonKey(includeIfNull: false) String? get input; + @override /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - @override @JsonKey(includeIfNull: false) List? get outputs; - - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -46645,12 +43448,8 @@ mixin _$RunStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -46678,8 +43477,6 @@ class _$RunStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46716,8 +43513,6 @@ class __$$RunStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputImageImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46762,13 +43557,11 @@ class _$RunStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, fileId); - /// Create a copy of RunStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -46796,15 +43589,13 @@ abstract class _RunStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDetailsToolCallsCodeOutputImageImpl.fromJson; - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. @override + + /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. @JsonKey(name: 'file_id') String get fileId; - - /// Create a copy of RunStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -46822,12 +43613,8 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId => throw _privateConstructorUsedError; - /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDeltaStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -46856,8 +43643,6 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46898,8 +43683,6 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46944,13 +43727,11 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, fileId); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -46980,2874 +43761,2898 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl.fromJson; - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. @override + + /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCallsFileSearch _$RunStepDetailsToolCallsFileSearchFromJson( +RunStepCompletionUsage _$RunStepCompletionUsageFromJson( Map json) { - return _RunStepDetailsToolCallsFileSearch.fromJson(json); + return _RunStepCompletionUsage.fromJson(json); } /// @nodoc -mixin _$RunStepDetailsToolCallsFileSearch { - /// The ranking options for the file search. - @JsonKey(name: 'ranking_options', includeIfNull: false) - RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions => - throw _privateConstructorUsedError; +mixin _$RunStepCompletionUsage { + /// Number of completion tokens used over the course of the run step. + @JsonKey(name: 'completion_tokens') + int get completionTokens => throw _privateConstructorUsedError; - /// The results of the file search. - @JsonKey(includeIfNull: false) - List? get results => - throw _privateConstructorUsedError; + /// Number of prompt tokens used over the course of the run step. + @JsonKey(name: 'prompt_tokens') + int get promptTokens => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsToolCallsFileSearch to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// Total number of tokens used (prompt + completion). + @JsonKey(name: 'total_tokens') + int get totalTokens => throw _privateConstructorUsedError; - /// Create a copy of RunStepDetailsToolCallsFileSearch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsFileSearchCopyWith - get copyWith => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $RunStepCompletionUsageCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { - factory $RunStepDetailsToolCallsFileSearchCopyWith( - RunStepDetailsToolCallsFileSearch value, - $Res Function(RunStepDetailsToolCallsFileSearch) then) = - _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, - RunStepDetailsToolCallsFileSearch>; +abstract class $RunStepCompletionUsageCopyWith<$Res> { + factory $RunStepCompletionUsageCopyWith(RunStepCompletionUsage value, + $Res Function(RunStepCompletionUsage) then) = + _$RunStepCompletionUsageCopyWithImpl<$Res, RunStepCompletionUsage>; @useResult $Res call( - {@JsonKey(name: 'ranking_options', includeIfNull: false) - RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, - @JsonKey(includeIfNull: false) - List? results}); - - $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? - get rankingOptions; + {@JsonKey(name: 'completion_tokens') int completionTokens, + @JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); } /// @nodoc -class _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsFileSearch> - implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { - _$RunStepDetailsToolCallsFileSearchCopyWithImpl(this._value, this._then); +class _$RunStepCompletionUsageCopyWithImpl<$Res, + $Val extends RunStepCompletionUsage> + implements $RunStepCompletionUsageCopyWith<$Res> { + _$RunStepCompletionUsageCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsFileSearch - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? rankingOptions = freezed, - Object? results = freezed, + Object? completionTokens = null, + Object? promptTokens = null, + Object? totalTokens = null, }) { return _then(_value.copyWith( - rankingOptions: freezed == rankingOptions - ? _value.rankingOptions - : rankingOptions // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, - results: freezed == results - ? _value.results - : results // ignore: cast_nullable_to_non_nullable - as List?, + completionTokens: null == completionTokens + ? _value.completionTokens + : completionTokens // ignore: cast_nullable_to_non_nullable + as int, + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } - - /// Create a copy of RunStepDetailsToolCallsFileSearch - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? - get rankingOptions { - if (_value.rankingOptions == null) { - return null; - } - - return $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>( - _value.rankingOptions!, (value) { - return _then(_value.copyWith(rankingOptions: value) as $Val); - }); - } } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> - implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFileSearchImplCopyWith( - _$RunStepDetailsToolCallsFileSearchImpl value, - $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) then) = - __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res>; +abstract class _$$RunStepCompletionUsageImplCopyWith<$Res> + implements $RunStepCompletionUsageCopyWith<$Res> { + factory _$$RunStepCompletionUsageImplCopyWith( + _$RunStepCompletionUsageImpl value, + $Res Function(_$RunStepCompletionUsageImpl) then) = + __$$RunStepCompletionUsageImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'ranking_options', includeIfNull: false) - RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, - @JsonKey(includeIfNull: false) - List? results}); - - @override - $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? - get rankingOptions; + {@JsonKey(name: 'completion_tokens') int completionTokens, + @JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); } /// @nodoc -class __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFileSearchImpl> - implements _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl( - _$RunStepDetailsToolCallsFileSearchImpl _value, - $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) _then) +class __$$RunStepCompletionUsageImplCopyWithImpl<$Res> + extends _$RunStepCompletionUsageCopyWithImpl<$Res, + _$RunStepCompletionUsageImpl> + implements _$$RunStepCompletionUsageImplCopyWith<$Res> { + __$$RunStepCompletionUsageImplCopyWithImpl( + _$RunStepCompletionUsageImpl _value, + $Res Function(_$RunStepCompletionUsageImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsFileSearch - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? rankingOptions = freezed, - Object? results = freezed, - }) { - return _then(_$RunStepDetailsToolCallsFileSearchImpl( - rankingOptions: freezed == rankingOptions - ? _value.rankingOptions - : rankingOptions // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, - results: freezed == results - ? _value._results - : results // ignore: cast_nullable_to_non_nullable - as List?, + Object? completionTokens = null, + Object? promptTokens = null, + Object? totalTokens = null, + }) { + return _then(_$RunStepCompletionUsageImpl( + completionTokens: null == completionTokens + ? _value.completionTokens + : completionTokens // ignore: cast_nullable_to_non_nullable + as int, + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFileSearchImpl - extends _RunStepDetailsToolCallsFileSearch { - const _$RunStepDetailsToolCallsFileSearchImpl( - {@JsonKey(name: 'ranking_options', includeIfNull: false) - this.rankingOptions, - @JsonKey(includeIfNull: false) - final List? results}) - : _results = results, - super._(); +class _$RunStepCompletionUsageImpl extends _RunStepCompletionUsage { + const _$RunStepCompletionUsageImpl( + {@JsonKey(name: 'completion_tokens') required this.completionTokens, + @JsonKey(name: 'prompt_tokens') required this.promptTokens, + @JsonKey(name: 'total_tokens') required this.totalTokens}) + : super._(); - factory _$RunStepDetailsToolCallsFileSearchImpl.fromJson( - Map json) => - _$$RunStepDetailsToolCallsFileSearchImplFromJson(json); + factory _$RunStepCompletionUsageImpl.fromJson(Map json) => + _$$RunStepCompletionUsageImplFromJson(json); - /// The ranking options for the file search. + /// Number of completion tokens used over the course of the run step. @override - @JsonKey(name: 'ranking_options', includeIfNull: false) - final RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions; + @JsonKey(name: 'completion_tokens') + final int completionTokens; - /// The results of the file search. - final List? _results; + /// Number of prompt tokens used over the course of the run step. + @override + @JsonKey(name: 'prompt_tokens') + final int promptTokens; - /// The results of the file search. + /// Total number of tokens used (prompt + completion). @override - @JsonKey(includeIfNull: false) - List? get results { - final value = _results; - if (value == null) return null; - if (_results is EqualUnmodifiableListView) return _results; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + @JsonKey(name: 'total_tokens') + final int totalTokens; @override String toString() { - return 'RunStepDetailsToolCallsFileSearch(rankingOptions: $rankingOptions, results: $results)'; + return 'RunStepCompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFileSearchImpl && - (identical(other.rankingOptions, rankingOptions) || - other.rankingOptions == rankingOptions) && - const DeepCollectionEquality().equals(other._results, _results)); + other is _$RunStepCompletionUsageImpl && + (identical(other.completionTokens, completionTokens) || + other.completionTokens == completionTokens) && + (identical(other.promptTokens, promptTokens) || + other.promptTokens == promptTokens) && + (identical(other.totalTokens, totalTokens) || + other.totalTokens == totalTokens)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, rankingOptions, - const DeepCollectionEquality().hash(_results)); + int get hashCode => + Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - /// Create a copy of RunStepDetailsToolCallsFileSearch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFileSearchImplCopyWith< - _$RunStepDetailsToolCallsFileSearchImpl> - get copyWith => __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl< - _$RunStepDetailsToolCallsFileSearchImpl>(this, _$identity); + _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> + get copyWith => __$$RunStepCompletionUsageImplCopyWithImpl< + _$RunStepCompletionUsageImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepDetailsToolCallsFileSearchImplToJson( + return _$$RunStepCompletionUsageImplToJson( this, ); } } -abstract class _RunStepDetailsToolCallsFileSearch - extends RunStepDetailsToolCallsFileSearch { - const factory _RunStepDetailsToolCallsFileSearch( - {@JsonKey(name: 'ranking_options', includeIfNull: false) - final RunStepDetailsToolCallsFileSearchRankingOptionsObject? - rankingOptions, - @JsonKey(includeIfNull: false) - final List? results}) = - _$RunStepDetailsToolCallsFileSearchImpl; - const _RunStepDetailsToolCallsFileSearch._() : super._(); +abstract class _RunStepCompletionUsage extends RunStepCompletionUsage { + const factory _RunStepCompletionUsage( + {@JsonKey(name: 'completion_tokens') required final int completionTokens, + @JsonKey(name: 'prompt_tokens') required final int promptTokens, + @JsonKey(name: 'total_tokens') + required final int totalTokens}) = _$RunStepCompletionUsageImpl; + const _RunStepCompletionUsage._() : super._(); - factory _RunStepDetailsToolCallsFileSearch.fromJson( - Map json) = - _$RunStepDetailsToolCallsFileSearchImpl.fromJson; + factory _RunStepCompletionUsage.fromJson(Map json) = + _$RunStepCompletionUsageImpl.fromJson; - /// The ranking options for the file search. @override - @JsonKey(name: 'ranking_options', includeIfNull: false) - RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions; - /// The results of the file search. + /// Number of completion tokens used over the course of the run step. + @JsonKey(name: 'completion_tokens') + int get completionTokens; + @override + + /// Number of prompt tokens used over the course of the run step. + @JsonKey(name: 'prompt_tokens') + int get promptTokens; @override - @JsonKey(includeIfNull: false) - List? get results; - /// Create a copy of RunStepDetailsToolCallsFileSearch - /// with the given fields replaced by the non-null parameter values. + /// Total number of tokens used (prompt + completion). + @JsonKey(name: 'total_tokens') + int get totalTokens; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFileSearchImplCopyWith< - _$RunStepDetailsToolCallsFileSearchImpl> + @JsonKey(ignore: true) + _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCallsFileSearchRankingOptionsObject - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson( - Map json) { - return _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson(json); +VectorStoreExpirationAfter _$VectorStoreExpirationAfterFromJson( + Map json) { + return _VectorStoreExpirationAfter.fromJson(json); } /// @nodoc -mixin _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { - /// The ranker to use for the file search. If not specified will use the `auto` ranker. - FileSearchRanker get ranker => throw _privateConstructorUsedError; +mixin _$VectorStoreExpirationAfter { + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. + VectorStoreExpirationAfterAnchor get anchor => + throw _privateConstructorUsedError; - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold') - double get scoreThreshold => throw _privateConstructorUsedError; + /// The number of days after the anchor time that the vector store will expire. + int get days => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsToolCallsFileSearchRankingOptionsObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< - RunStepDetailsToolCallsFileSearchRankingOptionsObject> + @JsonKey(ignore: true) + $VectorStoreExpirationAfterCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< - $Res> { - factory $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith( - RunStepDetailsToolCallsFileSearchRankingOptionsObject value, - $Res Function(RunStepDetailsToolCallsFileSearchRankingOptionsObject) - then) = - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, - RunStepDetailsToolCallsFileSearchRankingOptionsObject>; +abstract class $VectorStoreExpirationAfterCopyWith<$Res> { + factory $VectorStoreExpirationAfterCopyWith(VectorStoreExpirationAfter value, + $Res Function(VectorStoreExpirationAfter) then) = + _$VectorStoreExpirationAfterCopyWithImpl<$Res, + VectorStoreExpirationAfter>; @useResult - $Res call( - {FileSearchRanker ranker, - @JsonKey(name: 'score_threshold') double scoreThreshold}); + $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); } /// @nodoc -class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsFileSearchRankingOptionsObject> - implements - $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl( - this._value, this._then); +class _$VectorStoreExpirationAfterCopyWithImpl<$Res, + $Val extends VectorStoreExpirationAfter> + implements $VectorStoreExpirationAfterCopyWith<$Res> { + _$VectorStoreExpirationAfterCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? ranker = null, - Object? scoreThreshold = null, + Object? anchor = null, + Object? days = null, }) { return _then(_value.copyWith( - ranker: null == ranker - ? _value.ranker - : ranker // ignore: cast_nullable_to_non_nullable - as FileSearchRanker, - scoreThreshold: null == scoreThreshold - ? _value.scoreThreshold - : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double, + anchor: null == anchor + ? _value.anchor + : anchor // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfterAnchor, + days: null == days + ? _value.days + : days // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< - $Res> - implements - $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith( - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl value, - $Res Function( - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) - then) = - __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< - $Res>; +abstract class _$$VectorStoreExpirationAfterImplCopyWith<$Res> + implements $VectorStoreExpirationAfterCopyWith<$Res> { + factory _$$VectorStoreExpirationAfterImplCopyWith( + _$VectorStoreExpirationAfterImpl value, + $Res Function(_$VectorStoreExpirationAfterImpl) then) = + __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {FileSearchRanker ranker, - @JsonKey(name: 'score_threshold') double scoreThreshold}); + $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); } /// @nodoc -class __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< - $Res> - extends _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl< - $Res, _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> - implements - _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< - $Res> { - __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) - _then) +class __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res> + extends _$VectorStoreExpirationAfterCopyWithImpl<$Res, + _$VectorStoreExpirationAfterImpl> + implements _$$VectorStoreExpirationAfterImplCopyWith<$Res> { + __$$VectorStoreExpirationAfterImplCopyWithImpl( + _$VectorStoreExpirationAfterImpl _value, + $Res Function(_$VectorStoreExpirationAfterImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? ranker = null, - Object? scoreThreshold = null, - }) { - return _then(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( - ranker: null == ranker - ? _value.ranker - : ranker // ignore: cast_nullable_to_non_nullable - as FileSearchRanker, - scoreThreshold: null == scoreThreshold - ? _value.scoreThreshold - : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double, + Object? anchor = null, + Object? days = null, + }) { + return _then(_$VectorStoreExpirationAfterImpl( + anchor: null == anchor + ? _value.anchor + : anchor // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfterAnchor, + days: null == days + ? _value.days + : days // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl - extends _RunStepDetailsToolCallsFileSearchRankingOptionsObject { - const _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( - {required this.ranker, - @JsonKey(name: 'score_threshold') required this.scoreThreshold}) +class _$VectorStoreExpirationAfterImpl extends _VectorStoreExpirationAfter { + const _$VectorStoreExpirationAfterImpl( + {required this.anchor, required this.days}) : super._(); - factory _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson( + factory _$VectorStoreExpirationAfterImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( - json); + _$$VectorStoreExpirationAfterImplFromJson(json); - /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. @override - final FileSearchRanker ranker; + final VectorStoreExpirationAfterAnchor anchor; - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// The number of days after the anchor time that the vector store will expire. @override - @JsonKey(name: 'score_threshold') - final double scoreThreshold; + final int days; @override String toString() { - return 'RunStepDetailsToolCallsFileSearchRankingOptionsObject(ranker: $ranker, scoreThreshold: $scoreThreshold)'; + return 'VectorStoreExpirationAfter(anchor: $anchor, days: $days)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl && - (identical(other.ranker, ranker) || other.ranker == ranker) && - (identical(other.scoreThreshold, scoreThreshold) || - other.scoreThreshold == scoreThreshold)); + other is _$VectorStoreExpirationAfterImpl && + (identical(other.anchor, anchor) || other.anchor == anchor) && + (identical(other.days, days) || other.days == days)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); + int get hashCode => Object.hash(runtimeType, anchor, days); - /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> - get copyWith => - __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl>( - this, _$identity); + _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> + get copyWith => __$$VectorStoreExpirationAfterImplCopyWithImpl< + _$VectorStoreExpirationAfterImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( + return _$$VectorStoreExpirationAfterImplToJson( this, ); } } -abstract class _RunStepDetailsToolCallsFileSearchRankingOptionsObject - extends RunStepDetailsToolCallsFileSearchRankingOptionsObject { - const factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject( - {required final FileSearchRanker ranker, - @JsonKey(name: 'score_threshold') - required final double scoreThreshold}) = - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl; - const _RunStepDetailsToolCallsFileSearchRankingOptionsObject._() : super._(); +abstract class _VectorStoreExpirationAfter extends VectorStoreExpirationAfter { + const factory _VectorStoreExpirationAfter( + {required final VectorStoreExpirationAfterAnchor anchor, + required final int days}) = _$VectorStoreExpirationAfterImpl; + const _VectorStoreExpirationAfter._() : super._(); - factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson; + factory _VectorStoreExpirationAfter.fromJson(Map json) = + _$VectorStoreExpirationAfterImpl.fromJson; - /// The ranker to use for the file search. If not specified will use the `auto` ranker. @override - FileSearchRanker get ranker; - /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. + VectorStoreExpirationAfterAnchor get anchor; @override - @JsonKey(name: 'score_threshold') - double get scoreThreshold; - /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject - /// with the given fields replaced by the non-null parameter values. + /// The number of days after the anchor time that the vector store will expire. + int get days; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + @JsonKey(ignore: true) + _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCallsFileSearchResultObject - _$RunStepDetailsToolCallsFileSearchResultObjectFromJson( - Map json) { - return _RunStepDetailsToolCallsFileSearchResultObject.fromJson(json); +VectorStoreObject _$VectorStoreObjectFromJson(Map json) { + return _VectorStoreObject.fromJson(json); } /// @nodoc -mixin _$RunStepDetailsToolCallsFileSearchResultObject { - /// The ID of the file that result was found in. - @JsonKey(name: 'file_id') - String get fileId => throw _privateConstructorUsedError; +mixin _$VectorStoreObject { + /// The identifier, which can be referenced in API endpoints. + String get id => throw _privateConstructorUsedError; + + /// The object type, which is always `vector_store`. + String get object => throw _privateConstructorUsedError; - /// The name of the file that result was found in. - @JsonKey(name: 'file_name') - String get fileName => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the vector store was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; - /// The score of the result. All values must be a floating point number between 0 and 1. - double get score => throw _privateConstructorUsedError; + /// The name of the vector store. + String? get name => throw _privateConstructorUsedError; - /// The content of the result that was found. The content is only included if requested via the include - /// query parameter. - @JsonKey(includeIfNull: false) - List? get content => + /// The total number of bytes used by the files in the vector store. + @JsonKey(name: 'usage_bytes') + int get usageBytes => throw _privateConstructorUsedError; + + /// The number of files in the vector store. + @JsonKey(name: 'file_counts') + VectorStoreObjectFileCounts get fileCounts => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsToolCallsFileSearchResultObject to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + VectorStoreObjectStatus get status => throw _privateConstructorUsedError; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsFileSearchResultObjectCopyWith< - RunStepDetailsToolCallsFileSearchResultObject> - get copyWith => throw _privateConstructorUsedError; + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter => + throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the vector store will expire. + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the vector store was last active. + @JsonKey(name: 'last_active_at') + int? get lastActiveAt => throw _privateConstructorUsedError; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + dynamic get metadata => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VectorStoreObjectCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { - factory $RunStepDetailsToolCallsFileSearchResultObjectCopyWith( - RunStepDetailsToolCallsFileSearchResultObject value, - $Res Function(RunStepDetailsToolCallsFileSearchResultObject) then) = - _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, - RunStepDetailsToolCallsFileSearchResultObject>; +abstract class $VectorStoreObjectCopyWith<$Res> { + factory $VectorStoreObjectCopyWith( + VectorStoreObject value, $Res Function(VectorStoreObject) then) = + _$VectorStoreObjectCopyWithImpl<$Res, VectorStoreObject>; @useResult $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(name: 'file_name') String fileName, - double score, - @JsonKey(includeIfNull: false) - List? content}); + {String id, + String object, + @JsonKey(name: 'created_at') int createdAt, + String? name, + @JsonKey(name: 'usage_bytes') int usageBytes, + @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, + VectorStoreObjectStatus status, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'last_active_at') int? lastActiveAt, + dynamic metadata}); + + $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsFileSearchResultObject> - implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { - _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl( - this._value, this._then); +class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> + implements $VectorStoreObjectCopyWith<$Res> { + _$VectorStoreObjectCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, - Object? fileName = null, - Object? score = null, - Object? content = freezed, + Object? id = null, + Object? object = null, + Object? createdAt = null, + Object? name = freezed, + Object? usageBytes = null, + Object? fileCounts = null, + Object? status = null, + Object? expiresAfter = freezed, + Object? expiresAt = freezed, + Object? lastActiveAt = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable as String, - fileName: null == fileName - ? _value.fileName - : fileName // ignore: cast_nullable_to_non_nullable + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable as String, - score: null == score - ? _value.score - : score // ignore: cast_nullable_to_non_nullable - as double, - content: freezed == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as List?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< - $Res> - implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith( - _$RunStepDetailsToolCallsFileSearchResultObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) - then) = - __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(name: 'file_name') String fileName, - double score, - @JsonKey(includeIfNull: false) - List? content}); -} - -/// @nodoc -class __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFileSearchResultObjectImpl> - implements - _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsFileSearchResultObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) _then) - : super(_value, _then); - - /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? fileId = null, - Object? fileName = null, - Object? score = null, - Object? content = freezed, - }) { - return _then(_$RunStepDetailsToolCallsFileSearchResultObjectImpl( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectFileCounts, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectStatus, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + lastActiveAt: freezed == lastActiveAt + ? _value.lastActiveAt + : lastActiveAt // ignore: cast_nullable_to_non_nullable + as int?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts { + return $VectorStoreObjectFileCountsCopyWith<$Res>(_value.fileCounts, + (value) { + return _then(_value.copyWith(fileCounts: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { + if (_value.expiresAfter == null) { + return null; + } + + return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, + (value) { + return _then(_value.copyWith(expiresAfter: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$VectorStoreObjectImplCopyWith<$Res> + implements $VectorStoreObjectCopyWith<$Res> { + factory _$$VectorStoreObjectImplCopyWith(_$VectorStoreObjectImpl value, + $Res Function(_$VectorStoreObjectImpl) then) = + __$$VectorStoreObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + String object, + @JsonKey(name: 'created_at') int createdAt, + String? name, + @JsonKey(name: 'usage_bytes') int usageBytes, + @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, + VectorStoreObjectStatus status, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'last_active_at') int? lastActiveAt, + dynamic metadata}); + + @override + $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; + @override + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; +} + +/// @nodoc +class __$$VectorStoreObjectImplCopyWithImpl<$Res> + extends _$VectorStoreObjectCopyWithImpl<$Res, _$VectorStoreObjectImpl> + implements _$$VectorStoreObjectImplCopyWith<$Res> { + __$$VectorStoreObjectImplCopyWithImpl(_$VectorStoreObjectImpl _value, + $Res Function(_$VectorStoreObjectImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? object = null, + Object? createdAt = null, + Object? name = freezed, + Object? usageBytes = null, + Object? fileCounts = null, + Object? status = null, + Object? expiresAfter = freezed, + Object? expiresAt = freezed, + Object? lastActiveAt = freezed, + Object? metadata = freezed, + }) { + return _then(_$VectorStoreObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable as String, - fileName: null == fileName - ? _value.fileName - : fileName // ignore: cast_nullable_to_non_nullable + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable as String, - score: null == score - ? _value.score - : score // ignore: cast_nullable_to_non_nullable - as double, - content: freezed == content - ? _value._content - : content // ignore: cast_nullable_to_non_nullable - as List?, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectFileCounts, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectStatus, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + lastActiveAt: freezed == lastActiveAt + ? _value.lastActiveAt + : lastActiveAt // ignore: cast_nullable_to_non_nullable + as int?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFileSearchResultObjectImpl - extends _RunStepDetailsToolCallsFileSearchResultObject { - const _$RunStepDetailsToolCallsFileSearchResultObjectImpl( - {@JsonKey(name: 'file_id') required this.fileId, - @JsonKey(name: 'file_name') required this.fileName, - required this.score, - @JsonKey(includeIfNull: false) - final List? content}) - : _content = content, - super._(); +class _$VectorStoreObjectImpl extends _VectorStoreObject { + const _$VectorStoreObjectImpl( + {required this.id, + required this.object, + @JsonKey(name: 'created_at') required this.createdAt, + required this.name, + @JsonKey(name: 'usage_bytes') required this.usageBytes, + @JsonKey(name: 'file_counts') required this.fileCounts, + required this.status, + @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'last_active_at') required this.lastActiveAt, + required this.metadata}) + : super._(); - factory _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson( - Map json) => - _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson(json); + factory _$VectorStoreObjectImpl.fromJson(Map json) => + _$$VectorStoreObjectImplFromJson(json); - /// The ID of the file that result was found in. + /// The identifier, which can be referenced in API endpoints. @override - @JsonKey(name: 'file_id') - final String fileId; + final String id; + + /// The object type, which is always `vector_store`. + @override + final String object; + + /// The Unix timestamp (in seconds) for when the vector store was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; - /// The name of the file that result was found in. + /// The name of the vector store. @override - @JsonKey(name: 'file_name') - final String fileName; + final String? name; - /// The score of the result. All values must be a floating point number between 0 and 1. + /// The total number of bytes used by the files in the vector store. @override - final double score; + @JsonKey(name: 'usage_bytes') + final int usageBytes; - /// The content of the result that was found. The content is only included if requested via the include - /// query parameter. - final List? _content; + /// The number of files in the vector store. + @override + @JsonKey(name: 'file_counts') + final VectorStoreObjectFileCounts fileCounts; - /// The content of the result that was found. The content is only included if requested via the include - /// query parameter. + /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. @override - @JsonKey(includeIfNull: false) - List? get content { - final value = _content; - if (value == null) return null; - if (_content is EqualUnmodifiableListView) return _content; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + final VectorStoreObjectStatus status; + + /// The expiration policy for a vector store. + @override + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter; + + /// The Unix timestamp (in seconds) for when the vector store will expire. + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + final int? expiresAt; + + /// The Unix timestamp (in seconds) for when the vector store was last active. + @override + @JsonKey(name: 'last_active_at') + final int? lastActiveAt; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override + final dynamic metadata; @override String toString() { - return 'RunStepDetailsToolCallsFileSearchResultObject(fileId: $fileId, fileName: $fileName, score: $score, content: $content)'; + return 'VectorStoreObject(id: $id, object: $object, createdAt: $createdAt, name: $name, usageBytes: $usageBytes, fileCounts: $fileCounts, status: $status, expiresAfter: $expiresAfter, expiresAt: $expiresAt, lastActiveAt: $lastActiveAt, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFileSearchResultObjectImpl && - (identical(other.fileId, fileId) || other.fileId == fileId) && - (identical(other.fileName, fileName) || - other.fileName == fileName) && - (identical(other.score, score) || other.score == score) && - const DeepCollectionEquality().equals(other._content, _content)); + other is _$VectorStoreObjectImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.object, object) || other.object == object) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.name, name) || other.name == name) && + (identical(other.usageBytes, usageBytes) || + other.usageBytes == usageBytes) && + (identical(other.fileCounts, fileCounts) || + other.fileCounts == fileCounts) && + (identical(other.status, status) || other.status == status) && + (identical(other.expiresAfter, expiresAfter) || + other.expiresAfter == expiresAfter) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.lastActiveAt, lastActiveAt) || + other.lastActiveAt == lastActiveAt) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId, fileName, score, - const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + id, + object, + createdAt, + name, + usageBytes, + fileCounts, + status, + expiresAfter, + expiresAt, + lastActiveAt, + const DeepCollectionEquality().hash(metadata)); - /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchResultObjectImpl> - get copyWith => - __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsFileSearchResultObjectImpl>( - this, _$identity); + _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => + __$$VectorStoreObjectImplCopyWithImpl<_$VectorStoreObjectImpl>( + this, _$identity); @override Map toJson() { - return _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( + return _$$VectorStoreObjectImplToJson( this, ); } } -abstract class _RunStepDetailsToolCallsFileSearchResultObject - extends RunStepDetailsToolCallsFileSearchResultObject { - const factory _RunStepDetailsToolCallsFileSearchResultObject( - {@JsonKey(name: 'file_id') required final String fileId, - @JsonKey(name: 'file_name') required final String fileName, - required final double score, - @JsonKey(includeIfNull: false) - final List? - content}) = _$RunStepDetailsToolCallsFileSearchResultObjectImpl; - const _RunStepDetailsToolCallsFileSearchResultObject._() : super._(); +abstract class _VectorStoreObject extends VectorStoreObject { + const factory _VectorStoreObject( + {required final String id, + required final String object, + @JsonKey(name: 'created_at') required final int createdAt, + required final String? name, + @JsonKey(name: 'usage_bytes') required final int usageBytes, + @JsonKey(name: 'file_counts') + required final VectorStoreObjectFileCounts fileCounts, + required final VectorStoreObjectStatus status, + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, + @JsonKey(name: 'last_active_at') required final int? lastActiveAt, + required final dynamic metadata}) = _$VectorStoreObjectImpl; + const _VectorStoreObject._() : super._(); - factory _RunStepDetailsToolCallsFileSearchResultObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson; + factory _VectorStoreObject.fromJson(Map json) = + _$VectorStoreObjectImpl.fromJson; - /// The ID of the file that result was found in. @override - @JsonKey(name: 'file_id') - String get fileId; - /// The name of the file that result was found in. + /// The identifier, which can be referenced in API endpoints. + String get id; @override - @JsonKey(name: 'file_name') - String get fileName; - /// The score of the result. All values must be a floating point number between 0 and 1. + /// The object type, which is always `vector_store`. + String get object; @override - double get score; - /// The content of the result that was found. The content is only included if requested via the include - /// query parameter. + /// The Unix timestamp (in seconds) for when the vector store was created. + @JsonKey(name: 'created_at') + int get createdAt; + @override + + /// The name of the vector store. + String? get name; @override - @JsonKey(includeIfNull: false) - List? get content; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject - /// with the given fields replaced by the non-null parameter values. + /// The total number of bytes used by the files in the vector store. + @JsonKey(name: 'usage_bytes') + int get usageBytes; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchResultObjectImpl> - get copyWith => throw _privateConstructorUsedError; + + /// The number of files in the vector store. + @JsonKey(name: 'file_counts') + VectorStoreObjectFileCounts get fileCounts; + @override + + /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + VectorStoreObjectStatus get status; + @override + + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter; + @override + + /// The Unix timestamp (in seconds) for when the vector store will expire. + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt; + @override + + /// The Unix timestamp (in seconds) for when the vector store was last active. + @JsonKey(name: 'last_active_at') + int? get lastActiveAt; + @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + dynamic get metadata; + @override + @JsonKey(ignore: true) + _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => + throw _privateConstructorUsedError; } -RunStepDetailsToolCallsFileSearchResultContent - _$RunStepDetailsToolCallsFileSearchResultContentFromJson( - Map json) { - return _RunStepDetailsToolCallsFileSearchResultContent.fromJson(json); +VectorStoreObjectFileCounts _$VectorStoreObjectFileCountsFromJson( + Map json) { + return _VectorStoreObjectFileCounts.fromJson(json); } /// @nodoc -mixin _$RunStepDetailsToolCallsFileSearchResultContent { - /// The type of the content. - String get type => throw _privateConstructorUsedError; +mixin _$VectorStoreObjectFileCounts { + /// The number of files that are currently being processed. + @JsonKey(name: 'in_progress') + int get inProgress => throw _privateConstructorUsedError; - /// The text content of the file. - @JsonKey(includeIfNull: false) - String? get text => throw _privateConstructorUsedError; + /// The number of files that have been successfully processed. + int get completed => throw _privateConstructorUsedError; - /// Serializes this RunStepDetailsToolCallsFileSearchResultContent to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The number of files that have failed to process. + int get failed => throw _privateConstructorUsedError; + + /// The number of files that were cancelled. + int get cancelled => throw _privateConstructorUsedError; + + /// The total number of files. + int get total => throw _privateConstructorUsedError; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsFileSearchResultContentCopyWith< - RunStepDetailsToolCallsFileSearchResultContent> + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VectorStoreObjectFileCountsCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { - factory $RunStepDetailsToolCallsFileSearchResultContentCopyWith( - RunStepDetailsToolCallsFileSearchResultContent value, - $Res Function(RunStepDetailsToolCallsFileSearchResultContent) then) = - _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, - RunStepDetailsToolCallsFileSearchResultContent>; +abstract class $VectorStoreObjectFileCountsCopyWith<$Res> { + factory $VectorStoreObjectFileCountsCopyWith( + VectorStoreObjectFileCounts value, + $Res Function(VectorStoreObjectFileCounts) then) = + _$VectorStoreObjectFileCountsCopyWithImpl<$Res, + VectorStoreObjectFileCounts>; @useResult - $Res call({String type, @JsonKey(includeIfNull: false) String? text}); + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); } /// @nodoc -class _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsFileSearchResultContent> - implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { - _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl( - this._value, this._then); +class _$VectorStoreObjectFileCountsCopyWithImpl<$Res, + $Val extends VectorStoreObjectFileCounts> + implements $VectorStoreObjectFileCountsCopyWith<$Res> { + _$VectorStoreObjectFileCountsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? text = freezed, + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, }) { return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String?, + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< - $Res> - implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith( - _$RunStepDetailsToolCallsFileSearchResultContentImpl value, - $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) - then) = - __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreObjectFileCountsImplCopyWith<$Res> + implements $VectorStoreObjectFileCountsCopyWith<$Res> { + factory _$$VectorStoreObjectFileCountsImplCopyWith( + _$VectorStoreObjectFileCountsImpl value, + $Res Function(_$VectorStoreObjectFileCountsImpl) then) = + __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, @JsonKey(includeIfNull: false) String? text}); + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); } /// @nodoc -class __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFileSearchResultContentImpl> - implements - _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl( - _$RunStepDetailsToolCallsFileSearchResultContentImpl _value, - $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) _then) +class __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res> + extends _$VectorStoreObjectFileCountsCopyWithImpl<$Res, + _$VectorStoreObjectFileCountsImpl> + implements _$$VectorStoreObjectFileCountsImplCopyWith<$Res> { + __$$VectorStoreObjectFileCountsImplCopyWithImpl( + _$VectorStoreObjectFileCountsImpl _value, + $Res Function(_$VectorStoreObjectFileCountsImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? text = freezed, + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, }) { - return _then(_$RunStepDetailsToolCallsFileSearchResultContentImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String?, + return _then(_$VectorStoreObjectFileCountsImpl( + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFileSearchResultContentImpl - extends _RunStepDetailsToolCallsFileSearchResultContent { - const _$RunStepDetailsToolCallsFileSearchResultContentImpl( - {this.type = 'text', @JsonKey(includeIfNull: false) this.text}) +class _$VectorStoreObjectFileCountsImpl extends _VectorStoreObjectFileCounts { + const _$VectorStoreObjectFileCountsImpl( + {@JsonKey(name: 'in_progress') required this.inProgress, + required this.completed, + required this.failed, + required this.cancelled, + required this.total}) : super._(); - factory _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson( + factory _$VectorStoreObjectFileCountsImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson(json); + _$$VectorStoreObjectFileCountsImplFromJson(json); - /// The type of the content. + /// The number of files that are currently being processed. @override - @JsonKey() - final String type; + @JsonKey(name: 'in_progress') + final int inProgress; - /// The text content of the file. + /// The number of files that have been successfully processed. @override - @JsonKey(includeIfNull: false) - final String? text; + final int completed; + /// The number of files that have failed to process. @override - String toString() { - return 'RunStepDetailsToolCallsFileSearchResultContent(type: $type, text: $text)'; - } + final int failed; + /// The number of files that were cancelled. @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFileSearchResultContentImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); - } + final int cancelled; - @JsonKey(includeFromJson: false, includeToJson: false) + /// The total number of files. @override - int get hashCode => Object.hash(runtimeType, type, text); + final int total; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) @override - @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< - _$RunStepDetailsToolCallsFileSearchResultContentImpl> - get copyWith => - __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl< - _$RunStepDetailsToolCallsFileSearchResultContentImpl>( - this, _$identity); + String toString() { + return 'VectorStoreObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$VectorStoreObjectFileCountsImpl && + (identical(other.inProgress, inProgress) || + other.inProgress == inProgress) && + (identical(other.completed, completed) || + other.completed == completed) && + (identical(other.failed, failed) || other.failed == failed) && + (identical(other.cancelled, cancelled) || + other.cancelled == cancelled) && + (identical(other.total, total) || other.total == total)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> + get copyWith => __$$VectorStoreObjectFileCountsImplCopyWithImpl< + _$VectorStoreObjectFileCountsImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( + return _$$VectorStoreObjectFileCountsImplToJson( this, ); } } -abstract class _RunStepDetailsToolCallsFileSearchResultContent - extends RunStepDetailsToolCallsFileSearchResultContent { - const factory _RunStepDetailsToolCallsFileSearchResultContent( - {final String type, - @JsonKey(includeIfNull: false) final String? text}) = - _$RunStepDetailsToolCallsFileSearchResultContentImpl; - const _RunStepDetailsToolCallsFileSearchResultContent._() : super._(); +abstract class _VectorStoreObjectFileCounts + extends VectorStoreObjectFileCounts { + const factory _VectorStoreObjectFileCounts( + {@JsonKey(name: 'in_progress') required final int inProgress, + required final int completed, + required final int failed, + required final int cancelled, + required final int total}) = _$VectorStoreObjectFileCountsImpl; + const _VectorStoreObjectFileCounts._() : super._(); - factory _RunStepDetailsToolCallsFileSearchResultContent.fromJson( - Map json) = - _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson; + factory _VectorStoreObjectFileCounts.fromJson(Map json) = + _$VectorStoreObjectFileCountsImpl.fromJson; - /// The type of the content. @override - String get type; - /// The text content of the file. + /// The number of files that are currently being processed. + @JsonKey(name: 'in_progress') + int get inProgress; + @override + + /// The number of files that have been successfully processed. + int get completed; + @override + + /// The number of files that have failed to process. + int get failed; + @override + + /// The number of files that were cancelled. + int get cancelled; @override - @JsonKey(includeIfNull: false) - String? get text; - /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent - /// with the given fields replaced by the non-null parameter values. + /// The total number of files. + int get total; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< - _$RunStepDetailsToolCallsFileSearchResultContentImpl> + @JsonKey(ignore: true) + _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepCompletionUsage _$RunStepCompletionUsageFromJson( +CreateVectorStoreRequest _$CreateVectorStoreRequestFromJson( Map json) { - return _RunStepCompletionUsage.fromJson(json); + return _CreateVectorStoreRequest.fromJson(json); } /// @nodoc -mixin _$RunStepCompletionUsage { - /// Number of completion tokens used over the course of the run step. - @JsonKey(name: 'completion_tokens') - int get completionTokens => throw _privateConstructorUsedError; +mixin _$CreateVectorStoreRequest { + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_ids', includeIfNull: false) + List? get fileIds => throw _privateConstructorUsedError; - /// Number of prompt tokens used over the course of the run step. - @JsonKey(name: 'prompt_tokens') - int get promptTokens => throw _privateConstructorUsedError; + /// The name of the vector store. + String get name => throw _privateConstructorUsedError; - /// Total number of tokens used (prompt + completion). - @JsonKey(name: 'total_tokens') - int get totalTokens => throw _privateConstructorUsedError; + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter => + throw _privateConstructorUsedError; - /// Serializes this RunStepCompletionUsage to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata => throw _privateConstructorUsedError; - /// Create a copy of RunStepCompletionUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepCompletionUsageCopyWith get copyWith => + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateVectorStoreRequestCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepCompletionUsageCopyWith<$Res> { - factory $RunStepCompletionUsageCopyWith(RunStepCompletionUsage value, - $Res Function(RunStepCompletionUsage) then) = - _$RunStepCompletionUsageCopyWithImpl<$Res, RunStepCompletionUsage>; +abstract class $CreateVectorStoreRequestCopyWith<$Res> { + factory $CreateVectorStoreRequestCopyWith(CreateVectorStoreRequest value, + $Res Function(CreateVectorStoreRequest) then) = + _$CreateVectorStoreRequestCopyWithImpl<$Res, CreateVectorStoreRequest>; @useResult $Res call( - {@JsonKey(name: 'completion_tokens') int completionTokens, - @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + String name, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) dynamic metadata}); + + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class _$RunStepCompletionUsageCopyWithImpl<$Res, - $Val extends RunStepCompletionUsage> - implements $RunStepCompletionUsageCopyWith<$Res> { - _$RunStepCompletionUsageCopyWithImpl(this._value, this._then); +class _$CreateVectorStoreRequestCopyWithImpl<$Res, + $Val extends CreateVectorStoreRequest> + implements $CreateVectorStoreRequestCopyWith<$Res> { + _$CreateVectorStoreRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepCompletionUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? completionTokens = null, - Object? promptTokens = null, - Object? totalTokens = null, + Object? fileIds = freezed, + Object? name = null, + Object? expiresAfter = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( - completionTokens: null == completionTokens - ? _value.completionTokens - : completionTokens // ignore: cast_nullable_to_non_nullable - as int, - promptTokens: null == promptTokens - ? _value.promptTokens - : promptTokens // ignore: cast_nullable_to_non_nullable - as int, - totalTokens: null == totalTokens - ? _value.totalTokens - : totalTokens // ignore: cast_nullable_to_non_nullable - as int, + fileIds: freezed == fileIds + ? _value.fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List?, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { + if (_value.expiresAfter == null) { + return null; + } + + return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, + (value) { + return _then(_value.copyWith(expiresAfter: value) as $Val); + }); + } } /// @nodoc -abstract class _$$RunStepCompletionUsageImplCopyWith<$Res> - implements $RunStepCompletionUsageCopyWith<$Res> { - factory _$$RunStepCompletionUsageImplCopyWith( - _$RunStepCompletionUsageImpl value, - $Res Function(_$RunStepCompletionUsageImpl) then) = - __$$RunStepCompletionUsageImplCopyWithImpl<$Res>; +abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> + implements $CreateVectorStoreRequestCopyWith<$Res> { + factory _$$CreateVectorStoreRequestImplCopyWith( + _$CreateVectorStoreRequestImpl value, + $Res Function(_$CreateVectorStoreRequestImpl) then) = + __$$CreateVectorStoreRequestImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'completion_tokens') int completionTokens, - @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + String name, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class __$$RunStepCompletionUsageImplCopyWithImpl<$Res> - extends _$RunStepCompletionUsageCopyWithImpl<$Res, - _$RunStepCompletionUsageImpl> - implements _$$RunStepCompletionUsageImplCopyWith<$Res> { - __$$RunStepCompletionUsageImplCopyWithImpl( - _$RunStepCompletionUsageImpl _value, - $Res Function(_$RunStepCompletionUsageImpl) _then) +class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> + extends _$CreateVectorStoreRequestCopyWithImpl<$Res, + _$CreateVectorStoreRequestImpl> + implements _$$CreateVectorStoreRequestImplCopyWith<$Res> { + __$$CreateVectorStoreRequestImplCopyWithImpl( + _$CreateVectorStoreRequestImpl _value, + $Res Function(_$CreateVectorStoreRequestImpl) _then) : super(_value, _then); - /// Create a copy of RunStepCompletionUsage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? completionTokens = null, - Object? promptTokens = null, - Object? totalTokens = null, + Object? fileIds = freezed, + Object? name = null, + Object? expiresAfter = freezed, + Object? metadata = freezed, }) { - return _then(_$RunStepCompletionUsageImpl( - completionTokens: null == completionTokens - ? _value.completionTokens - : completionTokens // ignore: cast_nullable_to_non_nullable - as int, - promptTokens: null == promptTokens - ? _value.promptTokens - : promptTokens // ignore: cast_nullable_to_non_nullable - as int, - totalTokens: null == totalTokens - ? _value.totalTokens - : totalTokens // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$CreateVectorStoreRequestImpl( + fileIds: freezed == fileIds + ? _value._fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List?, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$RunStepCompletionUsageImpl extends _RunStepCompletionUsage { - const _$RunStepCompletionUsageImpl( - {@JsonKey(name: 'completion_tokens') required this.completionTokens, - @JsonKey(name: 'prompt_tokens') required this.promptTokens, - @JsonKey(name: 'total_tokens') required this.totalTokens}) - : super._(); +class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { + const _$CreateVectorStoreRequestImpl( + {@JsonKey(name: 'file_ids', includeIfNull: false) + final List? fileIds, + required this.name, + @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(includeIfNull: false) this.metadata}) + : _fileIds = fileIds, + super._(); - factory _$RunStepCompletionUsageImpl.fromJson(Map json) => - _$$RunStepCompletionUsageImplFromJson(json); + factory _$CreateVectorStoreRequestImpl.fromJson(Map json) => + _$$CreateVectorStoreRequestImplFromJson(json); - /// Number of completion tokens used over the course of the run step. + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + final List? _fileIds; + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @override - @JsonKey(name: 'completion_tokens') - final int completionTokens; + @JsonKey(name: 'file_ids', includeIfNull: false) + List? get fileIds { + final value = _fileIds; + if (value == null) return null; + if (_fileIds is EqualUnmodifiableListView) return _fileIds; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } - /// Number of prompt tokens used over the course of the run step. + /// The name of the vector store. @override - @JsonKey(name: 'prompt_tokens') - final int promptTokens; + final String name; - /// Total number of tokens used (prompt + completion). + /// The expiration policy for a vector store. @override - @JsonKey(name: 'total_tokens') - final int totalTokens; + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + final dynamic metadata; @override String toString() { - return 'RunStepCompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; + return 'CreateVectorStoreRequest(fileIds: $fileIds, name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepCompletionUsageImpl && - (identical(other.completionTokens, completionTokens) || - other.completionTokens == completionTokens) && - (identical(other.promptTokens, promptTokens) || - other.promptTokens == promptTokens) && - (identical(other.totalTokens, totalTokens) || - other.totalTokens == totalTokens)); + other is _$CreateVectorStoreRequestImpl && + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.name, name) || other.name == name) && + (identical(other.expiresAfter, expiresAfter) || + other.expiresAfter == expiresAfter) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_fileIds), + name, + expiresAfter, + const DeepCollectionEquality().hash(metadata)); - /// Create a copy of RunStepCompletionUsage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> - get copyWith => __$$RunStepCompletionUsageImplCopyWithImpl< - _$RunStepCompletionUsageImpl>(this, _$identity); + _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> + get copyWith => __$$CreateVectorStoreRequestImplCopyWithImpl< + _$CreateVectorStoreRequestImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepCompletionUsageImplToJson( + return _$$CreateVectorStoreRequestImplToJson( this, ); } } -abstract class _RunStepCompletionUsage extends RunStepCompletionUsage { - const factory _RunStepCompletionUsage( - {@JsonKey(name: 'completion_tokens') required final int completionTokens, - @JsonKey(name: 'prompt_tokens') required final int promptTokens, - @JsonKey(name: 'total_tokens') - required final int totalTokens}) = _$RunStepCompletionUsageImpl; - const _RunStepCompletionUsage._() : super._(); +abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { + const factory _CreateVectorStoreRequest( + {@JsonKey(name: 'file_ids', includeIfNull: false) + final List? fileIds, + required final String name, + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) final dynamic metadata}) = + _$CreateVectorStoreRequestImpl; + const _CreateVectorStoreRequest._() : super._(); - factory _RunStepCompletionUsage.fromJson(Map json) = - _$RunStepCompletionUsageImpl.fromJson; + factory _CreateVectorStoreRequest.fromJson(Map json) = + _$CreateVectorStoreRequestImpl.fromJson; - /// Number of completion tokens used over the course of the run step. @override - @JsonKey(name: 'completion_tokens') - int get completionTokens; - /// Number of prompt tokens used over the course of the run step. + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_ids', includeIfNull: false) + List? get fileIds; @override - @JsonKey(name: 'prompt_tokens') - int get promptTokens; - /// Total number of tokens used (prompt + completion). + /// The name of the vector store. + String get name; + @override + + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter; @override - @JsonKey(name: 'total_tokens') - int get totalTokens; - /// Create a copy of RunStepCompletionUsage - /// with the given fields replaced by the non-null parameter values. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> + @JsonKey(ignore: true) + _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreExpirationAfter _$VectorStoreExpirationAfterFromJson( +UpdateVectorStoreRequest _$UpdateVectorStoreRequestFromJson( Map json) { - return _VectorStoreExpirationAfter.fromJson(json); + return _UpdateVectorStoreRequest.fromJson(json); } /// @nodoc -mixin _$VectorStoreExpirationAfter { - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. - VectorStoreExpirationAfterAnchor get anchor => +mixin _$UpdateVectorStoreRequest { + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter => throw _privateConstructorUsedError; - /// The number of days after the anchor time that the vector store will expire. - int get days => throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata => throw _privateConstructorUsedError; - /// Serializes this VectorStoreExpirationAfter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of VectorStoreExpirationAfter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreExpirationAfterCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $UpdateVectorStoreRequestCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreExpirationAfterCopyWith<$Res> { - factory $VectorStoreExpirationAfterCopyWith(VectorStoreExpirationAfter value, - $Res Function(VectorStoreExpirationAfter) then) = - _$VectorStoreExpirationAfterCopyWithImpl<$Res, - VectorStoreExpirationAfter>; +abstract class $UpdateVectorStoreRequestCopyWith<$Res> { + factory $UpdateVectorStoreRequestCopyWith(UpdateVectorStoreRequest value, + $Res Function(UpdateVectorStoreRequest) then) = + _$UpdateVectorStoreRequestCopyWithImpl<$Res, UpdateVectorStoreRequest>; @useResult - $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) dynamic metadata}); + + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class _$VectorStoreExpirationAfterCopyWithImpl<$Res, - $Val extends VectorStoreExpirationAfter> - implements $VectorStoreExpirationAfterCopyWith<$Res> { - _$VectorStoreExpirationAfterCopyWithImpl(this._value, this._then); +class _$UpdateVectorStoreRequestCopyWithImpl<$Res, + $Val extends UpdateVectorStoreRequest> + implements $UpdateVectorStoreRequestCopyWith<$Res> { + _$UpdateVectorStoreRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of VectorStoreExpirationAfter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? anchor = null, - Object? days = null, + Object? name = freezed, + Object? expiresAfter = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( - anchor: null == anchor - ? _value.anchor - : anchor // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfterAnchor, - days: null == days - ? _value.days - : days // ignore: cast_nullable_to_non_nullable - as int, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { + if (_value.expiresAfter == null) { + return null; + } + + return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, + (value) { + return _then(_value.copyWith(expiresAfter: value) as $Val); + }); + } } /// @nodoc -abstract class _$$VectorStoreExpirationAfterImplCopyWith<$Res> - implements $VectorStoreExpirationAfterCopyWith<$Res> { - factory _$$VectorStoreExpirationAfterImplCopyWith( - _$VectorStoreExpirationAfterImpl value, - $Res Function(_$VectorStoreExpirationAfterImpl) then) = - __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res>; +abstract class _$$UpdateVectorStoreRequestImplCopyWith<$Res> + implements $UpdateVectorStoreRequestCopyWith<$Res> { + factory _$$UpdateVectorStoreRequestImplCopyWith( + _$UpdateVectorStoreRequestImpl value, + $Res Function(_$UpdateVectorStoreRequestImpl) then) = + __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res> - extends _$VectorStoreExpirationAfterCopyWithImpl<$Res, - _$VectorStoreExpirationAfterImpl> - implements _$$VectorStoreExpirationAfterImplCopyWith<$Res> { - __$$VectorStoreExpirationAfterImplCopyWithImpl( - _$VectorStoreExpirationAfterImpl _value, - $Res Function(_$VectorStoreExpirationAfterImpl) _then) +class __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res> + extends _$UpdateVectorStoreRequestCopyWithImpl<$Res, + _$UpdateVectorStoreRequestImpl> + implements _$$UpdateVectorStoreRequestImplCopyWith<$Res> { + __$$UpdateVectorStoreRequestImplCopyWithImpl( + _$UpdateVectorStoreRequestImpl _value, + $Res Function(_$UpdateVectorStoreRequestImpl) _then) : super(_value, _then); - /// Create a copy of VectorStoreExpirationAfter - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? anchor = null, - Object? days = null, + Object? name = freezed, + Object? expiresAfter = freezed, + Object? metadata = freezed, }) { - return _then(_$VectorStoreExpirationAfterImpl( - anchor: null == anchor - ? _value.anchor - : anchor // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfterAnchor, - days: null == days - ? _value.days - : days // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$UpdateVectorStoreRequestImpl( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreExpirationAfterImpl extends _VectorStoreExpirationAfter { - const _$VectorStoreExpirationAfterImpl( - {required this.anchor, required this.days}) - : super._(); - - factory _$VectorStoreExpirationAfterImpl.fromJson( - Map json) => - _$$VectorStoreExpirationAfterImplFromJson(json); +class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { + const _$UpdateVectorStoreRequestImpl( + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(includeIfNull: false) this.metadata}) + : super._(); - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. + factory _$UpdateVectorStoreRequestImpl.fromJson(Map json) => + _$$UpdateVectorStoreRequestImplFromJson(json); + + /// The name of the vector store. @override - final VectorStoreExpirationAfterAnchor anchor; + @JsonKey(includeIfNull: false) + final String? name; - /// The number of days after the anchor time that the vector store will expire. + /// The expiration policy for a vector store. @override - final int days; + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + final dynamic metadata; @override String toString() { - return 'VectorStoreExpirationAfter(anchor: $anchor, days: $days)'; + return 'UpdateVectorStoreRequest(name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreExpirationAfterImpl && - (identical(other.anchor, anchor) || other.anchor == anchor) && - (identical(other.days, days) || other.days == days)); + other is _$UpdateVectorStoreRequestImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.expiresAfter, expiresAfter) || + other.expiresAfter == expiresAfter) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, anchor, days); + int get hashCode => Object.hash(runtimeType, name, expiresAfter, + const DeepCollectionEquality().hash(metadata)); - /// Create a copy of VectorStoreExpirationAfter - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> - get copyWith => __$$VectorStoreExpirationAfterImplCopyWithImpl< - _$VectorStoreExpirationAfterImpl>(this, _$identity); + _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> + get copyWith => __$$UpdateVectorStoreRequestImplCopyWithImpl< + _$UpdateVectorStoreRequestImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreExpirationAfterImplToJson( + return _$$UpdateVectorStoreRequestImplToJson( this, ); } } -abstract class _VectorStoreExpirationAfter extends VectorStoreExpirationAfter { - const factory _VectorStoreExpirationAfter( - {required final VectorStoreExpirationAfterAnchor anchor, - required final int days}) = _$VectorStoreExpirationAfterImpl; - const _VectorStoreExpirationAfter._() : super._(); +abstract class _UpdateVectorStoreRequest extends UpdateVectorStoreRequest { + const factory _UpdateVectorStoreRequest( + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) final dynamic metadata}) = + _$UpdateVectorStoreRequestImpl; + const _UpdateVectorStoreRequest._() : super._(); - factory _VectorStoreExpirationAfter.fromJson(Map json) = - _$VectorStoreExpirationAfterImpl.fromJson; + factory _UpdateVectorStoreRequest.fromJson(Map json) = + _$UpdateVectorStoreRequestImpl.fromJson; - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. @override - VectorStoreExpirationAfterAnchor get anchor; - /// The number of days after the anchor time that the vector store will expire. + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name; + @override + + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter; @override - int get days; - /// Create a copy of VectorStoreExpirationAfter - /// with the given fields replaced by the non-null parameter values. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> + @JsonKey(ignore: true) + _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreObject _$VectorStoreObjectFromJson(Map json) { - return _VectorStoreObject.fromJson(json); +ListVectorStoresResponse _$ListVectorStoresResponseFromJson( + Map json) { + return _ListVectorStoresResponse.fromJson(json); } /// @nodoc -mixin _$VectorStoreObject { - /// The identifier, which can be referenced in API endpoints. - String get id => throw _privateConstructorUsedError; - - /// The object type, which is always `vector_store`. +mixin _$ListVectorStoresResponse { + /// The object type, which is always `list`. String get object => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the vector store was created. - @JsonKey(name: 'created_at') - int get createdAt => throw _privateConstructorUsedError; - - /// The name of the vector store. - String? get name => throw _privateConstructorUsedError; - - /// The total number of bytes used by the files in the vector store. - @JsonKey(name: 'usage_bytes') - int get usageBytes => throw _privateConstructorUsedError; - - /// The number of files in the vector store. - @JsonKey(name: 'file_counts') - VectorStoreObjectFileCounts get fileCounts => - throw _privateConstructorUsedError; - - /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - VectorStoreObjectStatus get status => throw _privateConstructorUsedError; - - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter => - throw _privateConstructorUsedError; + /// A list of assistant files. + List get data => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the vector store will expire. - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt => throw _privateConstructorUsedError; + /// The ID of the first assistant file in the list. + @JsonKey(name: 'first_id') + String? get firstId => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the vector store was last active. - @JsonKey(name: 'last_active_at') - int? get lastActiveAt => throw _privateConstructorUsedError; + /// The ID of the last assistant file in the list. + @JsonKey(name: 'last_id') + String? get lastId => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. - dynamic get metadata => throw _privateConstructorUsedError; + /// Whether there are more assistant files available. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; - /// Serializes this VectorStoreObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreObjectCopyWith get copyWith => + @JsonKey(ignore: true) + $ListVectorStoresResponseCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreObjectCopyWith<$Res> { - factory $VectorStoreObjectCopyWith( - VectorStoreObject value, $Res Function(VectorStoreObject) then) = - _$VectorStoreObjectCopyWithImpl<$Res, VectorStoreObject>; +abstract class $ListVectorStoresResponseCopyWith<$Res> { + factory $ListVectorStoresResponseCopyWith(ListVectorStoresResponse value, + $Res Function(ListVectorStoresResponse) then) = + _$ListVectorStoresResponseCopyWithImpl<$Res, ListVectorStoresResponse>; @useResult $Res call( - {String id, - String object, - @JsonKey(name: 'created_at') int createdAt, - String? name, - @JsonKey(name: 'usage_bytes') int usageBytes, - @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, - VectorStoreObjectStatus status, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'last_active_at') int? lastActiveAt, - dynamic metadata}); - - $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {String object, + List data, + @JsonKey(name: 'first_id') String? firstId, + @JsonKey(name: 'last_id') String? lastId, + @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> - implements $VectorStoreObjectCopyWith<$Res> { - _$VectorStoreObjectCopyWithImpl(this._value, this._then); +class _$ListVectorStoresResponseCopyWithImpl<$Res, + $Val extends ListVectorStoresResponse> + implements $ListVectorStoresResponseCopyWith<$Res> { + _$ListVectorStoresResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, Object? object = null, - Object? createdAt = null, - Object? name = freezed, - Object? usageBytes = null, - Object? fileCounts = null, - Object? status = null, - Object? expiresAfter = freezed, - Object? expiresAt = freezed, - Object? lastActiveAt = freezed, - Object? metadata = freezed, + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, }) { return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable as String?, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectFileCounts, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectStatus, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - lastActiveAt: freezed == lastActiveAt - ? _value.lastActiveAt - : lastActiveAt // ignore: cast_nullable_to_non_nullable - as int?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, ) as $Val); } - - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts { - return $VectorStoreObjectFileCountsCopyWith<$Res>(_value.fileCounts, - (value) { - return _then(_value.copyWith(fileCounts: value) as $Val); - }); - } - - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { - if (_value.expiresAfter == null) { - return null; - } - - return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, - (value) { - return _then(_value.copyWith(expiresAfter: value) as $Val); - }); - } } /// @nodoc -abstract class _$$VectorStoreObjectImplCopyWith<$Res> - implements $VectorStoreObjectCopyWith<$Res> { - factory _$$VectorStoreObjectImplCopyWith(_$VectorStoreObjectImpl value, - $Res Function(_$VectorStoreObjectImpl) then) = - __$$VectorStoreObjectImplCopyWithImpl<$Res>; +abstract class _$$ListVectorStoresResponseImplCopyWith<$Res> + implements $ListVectorStoresResponseCopyWith<$Res> { + factory _$$ListVectorStoresResponseImplCopyWith( + _$ListVectorStoresResponseImpl value, + $Res Function(_$ListVectorStoresResponseImpl) then) = + __$$ListVectorStoresResponseImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String id, - String object, - @JsonKey(name: 'created_at') int createdAt, - String? name, - @JsonKey(name: 'usage_bytes') int usageBytes, - @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, - VectorStoreObjectStatus status, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'last_active_at') int? lastActiveAt, - dynamic metadata}); - - @override - $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; - @override - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {String object, + List data, + @JsonKey(name: 'first_id') String? firstId, + @JsonKey(name: 'last_id') String? lastId, + @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class __$$VectorStoreObjectImplCopyWithImpl<$Res> - extends _$VectorStoreObjectCopyWithImpl<$Res, _$VectorStoreObjectImpl> - implements _$$VectorStoreObjectImplCopyWith<$Res> { - __$$VectorStoreObjectImplCopyWithImpl(_$VectorStoreObjectImpl _value, - $Res Function(_$VectorStoreObjectImpl) _then) +class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> + extends _$ListVectorStoresResponseCopyWithImpl<$Res, + _$ListVectorStoresResponseImpl> + implements _$$ListVectorStoresResponseImplCopyWith<$Res> { + __$$ListVectorStoresResponseImplCopyWithImpl( + _$ListVectorStoresResponseImpl _value, + $Res Function(_$ListVectorStoresResponseImpl) _then) : super(_value, _then); - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, Object? object = null, - Object? createdAt = null, - Object? name = freezed, - Object? usageBytes = null, - Object? fileCounts = null, - Object? status = null, - Object? expiresAfter = freezed, - Object? expiresAt = freezed, - Object? lastActiveAt = freezed, - Object? metadata = freezed, + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, }) { - return _then(_$VectorStoreObjectImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$ListVectorStoresResponseImpl( object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable as String?, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectFileCounts, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectStatus, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - lastActiveAt: freezed == lastActiveAt - ? _value.lastActiveAt - : lastActiveAt // ignore: cast_nullable_to_non_nullable - as int?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreObjectImpl extends _VectorStoreObject { - const _$VectorStoreObjectImpl( - {required this.id, - required this.object, - @JsonKey(name: 'created_at') required this.createdAt, - required this.name, - @JsonKey(name: 'usage_bytes') required this.usageBytes, - @JsonKey(name: 'file_counts') required this.fileCounts, - required this.status, - @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, - @JsonKey(name: 'last_active_at') required this.lastActiveAt, - required this.metadata}) - : super._(); - - factory _$VectorStoreObjectImpl.fromJson(Map json) => - _$$VectorStoreObjectImplFromJson(json); +class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { + const _$ListVectorStoresResponseImpl( + {required this.object, + required final List data, + @JsonKey(name: 'first_id') required this.firstId, + @JsonKey(name: 'last_id') required this.lastId, + @JsonKey(name: 'has_more') required this.hasMore}) + : _data = data, + super._(); - /// The identifier, which can be referenced in API endpoints. - @override - final String id; + factory _$ListVectorStoresResponseImpl.fromJson(Map json) => + _$$ListVectorStoresResponseImplFromJson(json); - /// The object type, which is always `vector_store`. + /// The object type, which is always `list`. @override final String object; - /// The Unix timestamp (in seconds) for when the vector store was created. - @override - @JsonKey(name: 'created_at') - final int createdAt; - - /// The name of the vector store. - @override - final String? name; - - /// The total number of bytes used by the files in the vector store. - @override - @JsonKey(name: 'usage_bytes') - final int usageBytes; - - /// The number of files in the vector store. - @override - @JsonKey(name: 'file_counts') - final VectorStoreObjectFileCounts fileCounts; - - /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - @override - final VectorStoreObjectStatus status; + /// A list of assistant files. + final List _data; - /// The expiration policy for a vector store. + /// A list of assistant files. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter; + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } - /// The Unix timestamp (in seconds) for when the vector store will expire. + /// The ID of the first assistant file in the list. @override - @JsonKey(name: 'expires_at', includeIfNull: false) - final int? expiresAt; + @JsonKey(name: 'first_id') + final String? firstId; - /// The Unix timestamp (in seconds) for when the vector store was last active. + /// The ID of the last assistant file in the list. @override - @JsonKey(name: 'last_active_at') - final int? lastActiveAt; + @JsonKey(name: 'last_id') + final String? lastId; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Whether there are more assistant files available. @override - final dynamic metadata; + @JsonKey(name: 'has_more') + final bool hasMore; @override String toString() { - return 'VectorStoreObject(id: $id, object: $object, createdAt: $createdAt, name: $name, usageBytes: $usageBytes, fileCounts: $fileCounts, status: $status, expiresAfter: $expiresAfter, expiresAt: $expiresAt, lastActiveAt: $lastActiveAt, metadata: $metadata)'; + return 'ListVectorStoresResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreObjectImpl && - (identical(other.id, id) || other.id == id) && + other is _$ListVectorStoresResponseImpl && (identical(other.object, object) || other.object == object) && - (identical(other.createdAt, createdAt) || - other.createdAt == createdAt) && - (identical(other.name, name) || other.name == name) && - (identical(other.usageBytes, usageBytes) || - other.usageBytes == usageBytes) && - (identical(other.fileCounts, fileCounts) || - other.fileCounts == fileCounts) && - (identical(other.status, status) || other.status == status) && - (identical(other.expiresAfter, expiresAfter) || - other.expiresAfter == expiresAfter) && - (identical(other.expiresAt, expiresAt) || - other.expiresAt == expiresAt) && - (identical(other.lastActiveAt, lastActiveAt) || - other.lastActiveAt == lastActiveAt) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.firstId, firstId) || other.firstId == firstId) && + (identical(other.lastId, lastId) || other.lastId == lastId) && + (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, - id, - object, - createdAt, - name, - usageBytes, - fileCounts, - status, - expiresAfter, - expiresAt, - lastActiveAt, - const DeepCollectionEquality().hash(metadata)); + int get hashCode => Object.hash(runtimeType, object, + const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => - __$$VectorStoreObjectImplCopyWithImpl<_$VectorStoreObjectImpl>( - this, _$identity); + _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> + get copyWith => __$$ListVectorStoresResponseImplCopyWithImpl< + _$ListVectorStoresResponseImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreObjectImplToJson( + return _$$ListVectorStoresResponseImplToJson( this, ); } } -abstract class _VectorStoreObject extends VectorStoreObject { - const factory _VectorStoreObject( - {required final String id, - required final String object, - @JsonKey(name: 'created_at') required final int createdAt, - required final String? name, - @JsonKey(name: 'usage_bytes') required final int usageBytes, - @JsonKey(name: 'file_counts') - required final VectorStoreObjectFileCounts fileCounts, - required final VectorStoreObjectStatus status, - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, - @JsonKey(name: 'last_active_at') required final int? lastActiveAt, - required final dynamic metadata}) = _$VectorStoreObjectImpl; - const _VectorStoreObject._() : super._(); +abstract class _ListVectorStoresResponse extends ListVectorStoresResponse { + const factory _ListVectorStoresResponse( + {required final String object, + required final List data, + @JsonKey(name: 'first_id') required final String? firstId, + @JsonKey(name: 'last_id') required final String? lastId, + @JsonKey(name: 'has_more') required final bool hasMore}) = + _$ListVectorStoresResponseImpl; + const _ListVectorStoresResponse._() : super._(); - factory _VectorStoreObject.fromJson(Map json) = - _$VectorStoreObjectImpl.fromJson; + factory _ListVectorStoresResponse.fromJson(Map json) = + _$ListVectorStoresResponseImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override - String get id; - /// The object type, which is always `vector_store`. - @override + /// The object type, which is always `list`. String get object; - - /// The Unix timestamp (in seconds) for when the vector store was created. - @override - @JsonKey(name: 'created_at') - int get createdAt; - - /// The name of the vector store. - @override - String? get name; - - /// The total number of bytes used by the files in the vector store. - @override - @JsonKey(name: 'usage_bytes') - int get usageBytes; - - /// The number of files in the vector store. - @override - @JsonKey(name: 'file_counts') - VectorStoreObjectFileCounts get fileCounts; - - /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. @override - VectorStoreObjectStatus get status; - - /// The expiration policy for a vector store. - @override - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter; - /// The Unix timestamp (in seconds) for when the vector store will expire. + /// A list of assistant files. + List get data; @override - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt; - /// The Unix timestamp (in seconds) for when the vector store was last active. + /// The ID of the first assistant file in the list. + @JsonKey(name: 'first_id') + String? get firstId; @override - @JsonKey(name: 'last_active_at') - int? get lastActiveAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// The ID of the last assistant file in the list. + @JsonKey(name: 'last_id') + String? get lastId; @override - dynamic get metadata; - /// Create a copy of VectorStoreObject - /// with the given fields replaced by the non-null parameter values. + /// Whether there are more assistant files available. + @JsonKey(name: 'has_more') + bool get hasMore; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> + get copyWith => throw _privateConstructorUsedError; } -VectorStoreObjectFileCounts _$VectorStoreObjectFileCountsFromJson( +DeleteVectorStoreResponse _$DeleteVectorStoreResponseFromJson( Map json) { - return _VectorStoreObjectFileCounts.fromJson(json); + return _DeleteVectorStoreResponse.fromJson(json); } /// @nodoc -mixin _$VectorStoreObjectFileCounts { - /// The number of files that are currently being processed. - @JsonKey(name: 'in_progress') - int get inProgress => throw _privateConstructorUsedError; - - /// The number of files that have been successfully processed. - int get completed => throw _privateConstructorUsedError; - - /// The number of files that have failed to process. - int get failed => throw _privateConstructorUsedError; +mixin _$DeleteVectorStoreResponse { + /// The ID of the deleted vector store. + String get id => throw _privateConstructorUsedError; - /// The number of files that were cancelled. - int get cancelled => throw _privateConstructorUsedError; + /// Whether the vector store was deleted. + bool get deleted => throw _privateConstructorUsedError; - /// The total number of files. - int get total => throw _privateConstructorUsedError; + /// The object type, which is always `vector_store.deleted`. + String get object => throw _privateConstructorUsedError; - /// Serializes this VectorStoreObjectFileCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of VectorStoreObjectFileCounts - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreObjectFileCountsCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $DeleteVectorStoreResponseCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreObjectFileCountsCopyWith<$Res> { - factory $VectorStoreObjectFileCountsCopyWith( - VectorStoreObjectFileCounts value, - $Res Function(VectorStoreObjectFileCounts) then) = - _$VectorStoreObjectFileCountsCopyWithImpl<$Res, - VectorStoreObjectFileCounts>; +abstract class $DeleteVectorStoreResponseCopyWith<$Res> { + factory $DeleteVectorStoreResponseCopyWith(DeleteVectorStoreResponse value, + $Res Function(DeleteVectorStoreResponse) then) = + _$DeleteVectorStoreResponseCopyWithImpl<$Res, DeleteVectorStoreResponse>; @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); + $Res call({String id, bool deleted, String object}); } /// @nodoc -class _$VectorStoreObjectFileCountsCopyWithImpl<$Res, - $Val extends VectorStoreObjectFileCounts> - implements $VectorStoreObjectFileCountsCopyWith<$Res> { - _$VectorStoreObjectFileCountsCopyWithImpl(this._value, this._then); +class _$DeleteVectorStoreResponseCopyWithImpl<$Res, + $Val extends DeleteVectorStoreResponse> + implements $DeleteVectorStoreResponseCopyWith<$Res> { + _$DeleteVectorStoreResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of VectorStoreObjectFileCounts - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, + Object? id = null, + Object? deleted = null, + Object? object = null, }) { return _then(_value.copyWith( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + deleted: null == deleted + ? _value.deleted + : deleted // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, ) as $Val); } } /// @nodoc -abstract class _$$VectorStoreObjectFileCountsImplCopyWith<$Res> - implements $VectorStoreObjectFileCountsCopyWith<$Res> { - factory _$$VectorStoreObjectFileCountsImplCopyWith( - _$VectorStoreObjectFileCountsImpl value, - $Res Function(_$VectorStoreObjectFileCountsImpl) then) = - __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res>; +abstract class _$$DeleteVectorStoreResponseImplCopyWith<$Res> + implements $DeleteVectorStoreResponseCopyWith<$Res> { + factory _$$DeleteVectorStoreResponseImplCopyWith( + _$DeleteVectorStoreResponseImpl value, + $Res Function(_$DeleteVectorStoreResponseImpl) then) = + __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); + $Res call({String id, bool deleted, String object}); } /// @nodoc -class __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res> - extends _$VectorStoreObjectFileCountsCopyWithImpl<$Res, - _$VectorStoreObjectFileCountsImpl> - implements _$$VectorStoreObjectFileCountsImplCopyWith<$Res> { - __$$VectorStoreObjectFileCountsImplCopyWithImpl( - _$VectorStoreObjectFileCountsImpl _value, - $Res Function(_$VectorStoreObjectFileCountsImpl) _then) +class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> + extends _$DeleteVectorStoreResponseCopyWithImpl<$Res, + _$DeleteVectorStoreResponseImpl> + implements _$$DeleteVectorStoreResponseImplCopyWith<$Res> { + __$$DeleteVectorStoreResponseImplCopyWithImpl( + _$DeleteVectorStoreResponseImpl _value, + $Res Function(_$DeleteVectorStoreResponseImpl) _then) : super(_value, _then); - /// Create a copy of VectorStoreObjectFileCounts - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, + Object? id = null, + Object? deleted = null, + Object? object = null, }) { - return _then(_$VectorStoreObjectFileCountsImpl( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$DeleteVectorStoreResponseImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + deleted: null == deleted + ? _value.deleted + : deleted // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreObjectFileCountsImpl extends _VectorStoreObjectFileCounts { - const _$VectorStoreObjectFileCountsImpl( - {@JsonKey(name: 'in_progress') required this.inProgress, - required this.completed, - required this.failed, - required this.cancelled, - required this.total}) +class _$DeleteVectorStoreResponseImpl extends _DeleteVectorStoreResponse { + const _$DeleteVectorStoreResponseImpl( + {required this.id, required this.deleted, required this.object}) : super._(); - factory _$VectorStoreObjectFileCountsImpl.fromJson( - Map json) => - _$$VectorStoreObjectFileCountsImplFromJson(json); - - /// The number of files that are currently being processed. - @override - @JsonKey(name: 'in_progress') - final int inProgress; - - /// The number of files that have been successfully processed. - @override - final int completed; + factory _$DeleteVectorStoreResponseImpl.fromJson(Map json) => + _$$DeleteVectorStoreResponseImplFromJson(json); - /// The number of files that have failed to process. + /// The ID of the deleted vector store. @override - final int failed; + final String id; - /// The number of files that were cancelled. + /// Whether the vector store was deleted. @override - final int cancelled; + final bool deleted; - /// The total number of files. + /// The object type, which is always `vector_store.deleted`. @override - final int total; + final String object; @override String toString() { - return 'VectorStoreObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; + return 'DeleteVectorStoreResponse(id: $id, deleted: $deleted, object: $object)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreObjectFileCountsImpl && - (identical(other.inProgress, inProgress) || - other.inProgress == inProgress) && - (identical(other.completed, completed) || - other.completed == completed) && - (identical(other.failed, failed) || other.failed == failed) && - (identical(other.cancelled, cancelled) || - other.cancelled == cancelled) && - (identical(other.total, total) || other.total == total)); + other is _$DeleteVectorStoreResponseImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.deleted, deleted) || other.deleted == deleted) && + (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); + int get hashCode => Object.hash(runtimeType, id, deleted, object); - /// Create a copy of VectorStoreObjectFileCounts - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> - get copyWith => __$$VectorStoreObjectFileCountsImplCopyWithImpl< - _$VectorStoreObjectFileCountsImpl>(this, _$identity); + _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> + get copyWith => __$$DeleteVectorStoreResponseImplCopyWithImpl< + _$DeleteVectorStoreResponseImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreObjectFileCountsImplToJson( + return _$$DeleteVectorStoreResponseImplToJson( this, ); } } -abstract class _VectorStoreObjectFileCounts - extends VectorStoreObjectFileCounts { - const factory _VectorStoreObjectFileCounts( - {@JsonKey(name: 'in_progress') required final int inProgress, - required final int completed, - required final int failed, - required final int cancelled, - required final int total}) = _$VectorStoreObjectFileCountsImpl; - const _VectorStoreObjectFileCounts._() : super._(); - - factory _VectorStoreObjectFileCounts.fromJson(Map json) = - _$VectorStoreObjectFileCountsImpl.fromJson; - - /// The number of files that are currently being processed. - @override - @JsonKey(name: 'in_progress') - int get inProgress; +abstract class _DeleteVectorStoreResponse extends DeleteVectorStoreResponse { + const factory _DeleteVectorStoreResponse( + {required final String id, + required final bool deleted, + required final String object}) = _$DeleteVectorStoreResponseImpl; + const _DeleteVectorStoreResponse._() : super._(); - /// The number of files that have been successfully processed. - @override - int get completed; + factory _DeleteVectorStoreResponse.fromJson(Map json) = + _$DeleteVectorStoreResponseImpl.fromJson; - /// The number of files that have failed to process. @override - int get failed; - /// The number of files that were cancelled. + /// The ID of the deleted vector store. + String get id; @override - int get cancelled; - /// The total number of files. + /// Whether the vector store was deleted. + bool get deleted; @override - int get total; - /// Create a copy of VectorStoreObjectFileCounts - /// with the given fields replaced by the non-null parameter values. + /// The object type, which is always `vector_store.deleted`. + String get object; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> + @JsonKey(ignore: true) + _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> get copyWith => throw _privateConstructorUsedError; } -CreateVectorStoreRequest _$CreateVectorStoreRequestFromJson( +VectorStoreFileObject _$VectorStoreFileObjectFromJson( Map json) { - return _CreateVectorStoreRequest.fromJson(json); + return _VectorStoreFileObject.fromJson(json); } /// @nodoc -mixin _$CreateVectorStoreRequest { - /// The name of the vector store. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; +mixin _$VectorStoreFileObject { + /// The identifier, which can be referenced in API endpoints. + String get id => throw _privateConstructorUsedError; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_ids', includeIfNull: false) - List? get fileIds => throw _privateConstructorUsedError; + /// The object type, which is always `vector_store.file`. + String get object => throw _privateConstructorUsedError; - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter => - throw _privateConstructorUsedError; + /// The total vector store usage in bytes. Note that this may be different from the original file size. + @JsonKey(name: 'usage_bytes') + int get usageBytes => throw _privateConstructorUsedError; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy => - throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the vector store file was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata => throw _privateConstructorUsedError; + /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @JsonKey(name: 'vector_store_id') + String get vectorStoreId => throw _privateConstructorUsedError; - /// Serializes this CreateVectorStoreRequest to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + VectorStoreFileStatus get status => throw _privateConstructorUsedError; - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $CreateVectorStoreRequestCopyWith get copyWith => + /// The last error associated with this vector store file. Will be `null` if there are no errors. + @JsonKey(name: 'last_error') + VectorStoreFileObjectLastError? get lastError => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VectorStoreFileObjectCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateVectorStoreRequestCopyWith<$Res> { - factory $CreateVectorStoreRequestCopyWith(CreateVectorStoreRequest value, - $Res Function(CreateVectorStoreRequest) then) = - _$CreateVectorStoreRequestCopyWithImpl<$Res, CreateVectorStoreRequest>; +abstract class $VectorStoreFileObjectCopyWith<$Res> { + factory $VectorStoreFileObjectCopyWith(VectorStoreFileObject value, + $Res Function(VectorStoreFileObject) then) = + _$VectorStoreFileObjectCopyWithImpl<$Res, VectorStoreFileObject>; @useResult $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, - @JsonKey(includeIfNull: false) dynamic metadata}); + {String id, + String object, + @JsonKey(name: 'usage_bytes') int usageBytes, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'vector_store_id') String vectorStoreId, + VectorStoreFileStatus status, + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; + $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; } /// @nodoc -class _$CreateVectorStoreRequestCopyWithImpl<$Res, - $Val extends CreateVectorStoreRequest> - implements $CreateVectorStoreRequestCopyWith<$Res> { - _$CreateVectorStoreRequestCopyWithImpl(this._value, this._then); +class _$VectorStoreFileObjectCopyWithImpl<$Res, + $Val extends VectorStoreFileObject> + implements $VectorStoreFileObjectCopyWith<$Res> { + _$VectorStoreFileObjectCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? fileIds = freezed, - Object? expiresAfter = freezed, - Object? chunkingStrategy = freezed, - Object? metadata = freezed, + Object? id = null, + Object? object = null, + Object? usageBytes = null, + Object? createdAt = null, + Object? vectorStoreId = null, + Object? status = null, + Object? lastError = freezed, }) { return _then(_value.copyWith( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - fileIds: freezed == fileIds - ? _value.fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List?, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + vectorStoreId: null == vectorStoreId + ? _value.vectorStoreId + : vectorStoreId // ignore: cast_nullable_to_non_nullable + as String, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreFileStatus, + lastError: freezed == lastError + ? _value.lastError + : lastError // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastError?, ) as $Val); } - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { - if (_value.expiresAfter == null) { - return null; - } - - return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, - (value) { - return _then(_value.copyWith(expiresAfter: value) as $Val); - }); - } - - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { - if (_value.chunkingStrategy == null) { + $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError { + if (_value.lastError == null) { return null; } - return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + return $VectorStoreFileObjectLastErrorCopyWith<$Res>(_value.lastError!, (value) { - return _then(_value.copyWith(chunkingStrategy: value) as $Val); + return _then(_value.copyWith(lastError: value) as $Val); }); } } /// @nodoc -abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> - implements $CreateVectorStoreRequestCopyWith<$Res> { - factory _$$CreateVectorStoreRequestImplCopyWith( - _$CreateVectorStoreRequestImpl value, - $Res Function(_$CreateVectorStoreRequestImpl) then) = - __$$CreateVectorStoreRequestImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileObjectImplCopyWith<$Res> + implements $VectorStoreFileObjectCopyWith<$Res> { + factory _$$VectorStoreFileObjectImplCopyWith( + _$VectorStoreFileObjectImpl value, + $Res Function(_$VectorStoreFileObjectImpl) then) = + __$$VectorStoreFileObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, - @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {String id, + String object, + @JsonKey(name: 'usage_bytes') int usageBytes, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'vector_store_id') String vectorStoreId, + VectorStoreFileStatus status, + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + @override - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; + $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; } /// @nodoc -class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> - extends _$CreateVectorStoreRequestCopyWithImpl<$Res, - _$CreateVectorStoreRequestImpl> - implements _$$CreateVectorStoreRequestImplCopyWith<$Res> { - __$$CreateVectorStoreRequestImplCopyWithImpl( - _$CreateVectorStoreRequestImpl _value, - $Res Function(_$CreateVectorStoreRequestImpl) _then) +class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> + extends _$VectorStoreFileObjectCopyWithImpl<$Res, + _$VectorStoreFileObjectImpl> + implements _$$VectorStoreFileObjectImplCopyWith<$Res> { + __$$VectorStoreFileObjectImplCopyWithImpl(_$VectorStoreFileObjectImpl _value, + $Res Function(_$VectorStoreFileObjectImpl) _then) : super(_value, _then); - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? fileIds = freezed, - Object? expiresAfter = freezed, - Object? chunkingStrategy = freezed, - Object? metadata = freezed, + Object? id = null, + Object? object = null, + Object? usageBytes = null, + Object? createdAt = null, + Object? vectorStoreId = null, + Object? status = null, + Object? lastError = freezed, }) { - return _then(_$CreateVectorStoreRequestImpl( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - fileIds: freezed == fileIds - ? _value._fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List?, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + return _then(_$VectorStoreFileObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + vectorStoreId: null == vectorStoreId + ? _value.vectorStoreId + : vectorStoreId // ignore: cast_nullable_to_non_nullable + as String, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreFileStatus, + lastError: freezed == lastError + ? _value.lastError + : lastError // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastError?, )); } } /// @nodoc @JsonSerializable() -class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { - const _$CreateVectorStoreRequestImpl( - {@JsonKey(includeIfNull: false) this.name, - @JsonKey(name: 'file_ids', includeIfNull: false) - final List? fileIds, - @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - this.chunkingStrategy, - @JsonKey(includeIfNull: false) this.metadata}) - : _fileIds = fileIds, - super._(); +class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { + const _$VectorStoreFileObjectImpl( + {required this.id, + required this.object, + @JsonKey(name: 'usage_bytes') required this.usageBytes, + @JsonKey(name: 'created_at') required this.createdAt, + @JsonKey(name: 'vector_store_id') required this.vectorStoreId, + required this.status, + @JsonKey(name: 'last_error') required this.lastError}) + : super._(); - factory _$CreateVectorStoreRequestImpl.fromJson(Map json) => - _$$CreateVectorStoreRequestImplFromJson(json); + factory _$VectorStoreFileObjectImpl.fromJson(Map json) => + _$$VectorStoreFileObjectImplFromJson(json); - /// The name of the vector store. + /// The identifier, which can be referenced in API endpoints. @override - @JsonKey(includeIfNull: false) - final String? name; + final String id; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - final List? _fileIds; + /// The object type, which is always `vector_store.file`. + @override + final String object; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + /// The total vector store usage in bytes. Note that this may be different from the original file size. @override - @JsonKey(name: 'file_ids', includeIfNull: false) - List? get fileIds { - final value = _fileIds; - if (value == null) return null; - if (_fileIds is EqualUnmodifiableListView) return _fileIds; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + @JsonKey(name: 'usage_bytes') + final int usageBytes; - /// The expiration policy for a vector store. + /// The Unix timestamp (in seconds) for when the vector store file was created. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter; + @JsonKey(name: 'created_at') + final int createdAt; + + /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override + @JsonKey(name: 'vector_store_id') + final String vectorStoreId; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy; + final VectorStoreFileStatus status; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// The last error associated with this vector store file. Will be `null` if there are no errors. @override - @JsonKey(includeIfNull: false) - final dynamic metadata; + @JsonKey(name: 'last_error') + final VectorStoreFileObjectLastError? lastError; @override String toString() { - return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; + return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateVectorStoreRequestImpl && - (identical(other.name, name) || other.name == name) && - const DeepCollectionEquality().equals(other._fileIds, _fileIds) && - (identical(other.expiresAfter, expiresAfter) || - other.expiresAfter == expiresAfter) && - (identical(other.chunkingStrategy, chunkingStrategy) || - other.chunkingStrategy == chunkingStrategy) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + other is _$VectorStoreFileObjectImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.object, object) || other.object == object) && + (identical(other.usageBytes, usageBytes) || + other.usageBytes == usageBytes) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.vectorStoreId, vectorStoreId) || + other.vectorStoreId == vectorStoreId) && + (identical(other.status, status) || other.status == status) && + (identical(other.lastError, lastError) || + other.lastError == lastError)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, - name, - const DeepCollectionEquality().hash(_fileIds), - expiresAfter, - chunkingStrategy, - const DeepCollectionEquality().hash(metadata)); + int get hashCode => Object.hash(runtimeType, id, object, usageBytes, + createdAt, vectorStoreId, status, lastError); - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> - get copyWith => __$$CreateVectorStoreRequestImplCopyWithImpl< - _$CreateVectorStoreRequestImpl>(this, _$identity); + _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> + get copyWith => __$$VectorStoreFileObjectImplCopyWithImpl< + _$VectorStoreFileObjectImpl>(this, _$identity); @override Map toJson() { - return _$$CreateVectorStoreRequestImplToJson( + return _$$VectorStoreFileObjectImplToJson( this, ); } } -abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { - const factory _CreateVectorStoreRequest( - {@JsonKey(includeIfNull: false) final String? name, - @JsonKey(name: 'file_ids', includeIfNull: false) - final List? fileIds, - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy, - @JsonKey(includeIfNull: false) final dynamic metadata}) = - _$CreateVectorStoreRequestImpl; - const _CreateVectorStoreRequest._() : super._(); +abstract class _VectorStoreFileObject extends VectorStoreFileObject { + const factory _VectorStoreFileObject( + {required final String id, + required final String object, + @JsonKey(name: 'usage_bytes') required final int usageBytes, + @JsonKey(name: 'created_at') required final int createdAt, + @JsonKey(name: 'vector_store_id') required final String vectorStoreId, + required final VectorStoreFileStatus status, + @JsonKey(name: 'last_error') + required final VectorStoreFileObjectLastError? lastError}) = + _$VectorStoreFileObjectImpl; + const _VectorStoreFileObject._() : super._(); - factory _CreateVectorStoreRequest.fromJson(Map json) = - _$CreateVectorStoreRequestImpl.fromJson; + factory _VectorStoreFileObject.fromJson(Map json) = + _$VectorStoreFileObjectImpl.fromJson; - /// The name of the vector store. @override - @JsonKey(includeIfNull: false) - String? get name; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + /// The identifier, which can be referenced in API endpoints. + String get id; @override - @JsonKey(name: 'file_ids', includeIfNull: false) - List? get fileIds; - /// The expiration policy for a vector store. + /// The object type, which is always `vector_store.file`. + String get object; @override - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + /// The total vector store usage in bytes. Note that this may be different from the original file size. + @JsonKey(name: 'usage_bytes') + int get usageBytes; @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// The Unix timestamp (in seconds) for when the vector store file was created. + @JsonKey(name: 'created_at') + int get createdAt; @override - @JsonKey(includeIfNull: false) - dynamic get metadata; - /// Create a copy of CreateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. + /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @JsonKey(name: 'vector_store_id') + String get vectorStoreId; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> + + /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + VectorStoreFileStatus get status; + @override + + /// The last error associated with this vector store file. Will be `null` if there are no errors. + @JsonKey(name: 'last_error') + VectorStoreFileObjectLastError? get lastError; + @override + @JsonKey(ignore: true) + _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } -UpdateVectorStoreRequest _$UpdateVectorStoreRequestFromJson( +VectorStoreFileObjectLastError _$VectorStoreFileObjectLastErrorFromJson( Map json) { - return _UpdateVectorStoreRequest.fromJson(json); + return _VectorStoreFileObjectLastError.fromJson(json); } /// @nodoc -mixin _$UpdateVectorStoreRequest { - /// The name of the vector store. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; - - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter => +mixin _$VectorStoreFileObjectLastError { + /// One of `server_error` or `rate_limit_exceeded`. + VectorStoreFileObjectLastErrorCode get code => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata => throw _privateConstructorUsedError; + /// A human-readable description of the error. + String get message => throw _privateConstructorUsedError; - /// Serializes this UpdateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of UpdateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $UpdateVectorStoreRequestCopyWith get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VectorStoreFileObjectLastErrorCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $UpdateVectorStoreRequestCopyWith<$Res> { - factory $UpdateVectorStoreRequestCopyWith(UpdateVectorStoreRequest value, - $Res Function(UpdateVectorStoreRequest) then) = - _$UpdateVectorStoreRequestCopyWithImpl<$Res, UpdateVectorStoreRequest>; +abstract class $VectorStoreFileObjectLastErrorCopyWith<$Res> { + factory $VectorStoreFileObjectLastErrorCopyWith( + VectorStoreFileObjectLastError value, + $Res Function(VectorStoreFileObjectLastError) then) = + _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, + VectorStoreFileObjectLastError>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) dynamic metadata}); - - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $Res call({VectorStoreFileObjectLastErrorCode code, String message}); } /// @nodoc -class _$UpdateVectorStoreRequestCopyWithImpl<$Res, - $Val extends UpdateVectorStoreRequest> - implements $UpdateVectorStoreRequestCopyWith<$Res> { - _$UpdateVectorStoreRequestCopyWithImpl(this._value, this._then); +class _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, + $Val extends VectorStoreFileObjectLastError> + implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { + _$VectorStoreFileObjectLastErrorCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of UpdateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? expiresAfter = freezed, - Object? metadata = freezed, + Object? code = null, + Object? message = null, }) { return _then(_value.copyWith( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + code: null == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastErrorCode, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, ) as $Val); } - - /// Create a copy of UpdateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { - if (_value.expiresAfter == null) { - return null; - } - - return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, - (value) { - return _then(_value.copyWith(expiresAfter: value) as $Val); - }); - } } /// @nodoc -abstract class _$$UpdateVectorStoreRequestImplCopyWith<$Res> - implements $UpdateVectorStoreRequestCopyWith<$Res> { - factory _$$UpdateVectorStoreRequestImplCopyWith( - _$UpdateVectorStoreRequestImpl value, - $Res Function(_$UpdateVectorStoreRequestImpl) then) = - __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> + implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { + factory _$$VectorStoreFileObjectLastErrorImplCopyWith( + _$VectorStoreFileObjectLastErrorImpl value, + $Res Function(_$VectorStoreFileObjectLastErrorImpl) then) = + __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $Res call({VectorStoreFileObjectLastErrorCode code, String message}); } /// @nodoc -class __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res> - extends _$UpdateVectorStoreRequestCopyWithImpl<$Res, - _$UpdateVectorStoreRequestImpl> - implements _$$UpdateVectorStoreRequestImplCopyWith<$Res> { - __$$UpdateVectorStoreRequestImplCopyWithImpl( - _$UpdateVectorStoreRequestImpl _value, - $Res Function(_$UpdateVectorStoreRequestImpl) _then) +class __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res> + extends _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, + _$VectorStoreFileObjectLastErrorImpl> + implements _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> { + __$$VectorStoreFileObjectLastErrorImplCopyWithImpl( + _$VectorStoreFileObjectLastErrorImpl _value, + $Res Function(_$VectorStoreFileObjectLastErrorImpl) _then) : super(_value, _then); - /// Create a copy of UpdateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? expiresAfter = freezed, - Object? metadata = freezed, + Object? code = null, + Object? message = null, }) { - return _then(_$UpdateVectorStoreRequestImpl( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + return _then(_$VectorStoreFileObjectLastErrorImpl( + code: null == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastErrorCode, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { - const _$UpdateVectorStoreRequestImpl( - {@JsonKey(includeIfNull: false) this.name, - @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, - @JsonKey(includeIfNull: false) this.metadata}) +class _$VectorStoreFileObjectLastErrorImpl + extends _VectorStoreFileObjectLastError { + const _$VectorStoreFileObjectLastErrorImpl( + {required this.code, required this.message}) : super._(); - factory _$UpdateVectorStoreRequestImpl.fromJson(Map json) => - _$$UpdateVectorStoreRequestImplFromJson(json); - - /// The name of the vector store. - @override - @JsonKey(includeIfNull: false) - final String? name; + factory _$VectorStoreFileObjectLastErrorImpl.fromJson( + Map json) => + _$$VectorStoreFileObjectLastErrorImplFromJson(json); - /// The expiration policy for a vector store. + /// One of `server_error` or `rate_limit_exceeded`. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter; + final VectorStoreFileObjectLastErrorCode code; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// A human-readable description of the error. @override - @JsonKey(includeIfNull: false) - final dynamic metadata; + final String message; @override String toString() { - return 'UpdateVectorStoreRequest(name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'VectorStoreFileObjectLastError(code: $code, message: $message)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$UpdateVectorStoreRequestImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.expiresAfter, expiresAfter) || - other.expiresAfter == expiresAfter) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + other is _$VectorStoreFileObjectLastErrorImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, name, expiresAfter, - const DeepCollectionEquality().hash(metadata)); + int get hashCode => Object.hash(runtimeType, code, message); - /// Create a copy of UpdateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> - get copyWith => __$$UpdateVectorStoreRequestImplCopyWithImpl< - _$UpdateVectorStoreRequestImpl>(this, _$identity); + _$$VectorStoreFileObjectLastErrorImplCopyWith< + _$VectorStoreFileObjectLastErrorImpl> + get copyWith => __$$VectorStoreFileObjectLastErrorImplCopyWithImpl< + _$VectorStoreFileObjectLastErrorImpl>(this, _$identity); @override Map toJson() { - return _$$UpdateVectorStoreRequestImplToJson( + return _$$VectorStoreFileObjectLastErrorImplToJson( this, ); } } -abstract class _UpdateVectorStoreRequest extends UpdateVectorStoreRequest { - const factory _UpdateVectorStoreRequest( - {@JsonKey(includeIfNull: false) final String? name, - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) final dynamic metadata}) = - _$UpdateVectorStoreRequestImpl; - const _UpdateVectorStoreRequest._() : super._(); - - factory _UpdateVectorStoreRequest.fromJson(Map json) = - _$UpdateVectorStoreRequestImpl.fromJson; +abstract class _VectorStoreFileObjectLastError + extends VectorStoreFileObjectLastError { + const factory _VectorStoreFileObjectLastError( + {required final VectorStoreFileObjectLastErrorCode code, + required final String message}) = _$VectorStoreFileObjectLastErrorImpl; + const _VectorStoreFileObjectLastError._() : super._(); - /// The name of the vector store. - @override - @JsonKey(includeIfNull: false) - String? get name; + factory _VectorStoreFileObjectLastError.fromJson(Map json) = + _$VectorStoreFileObjectLastErrorImpl.fromJson; - /// The expiration policy for a vector store. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// One of `server_error` or `rate_limit_exceeded`. + VectorStoreFileObjectLastErrorCode get code; @override - @JsonKey(includeIfNull: false) - dynamic get metadata; - /// Create a copy of UpdateVectorStoreRequest - /// with the given fields replaced by the non-null parameter values. + /// A human-readable description of the error. + String get message; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> + @JsonKey(ignore: true) + _$$VectorStoreFileObjectLastErrorImplCopyWith< + _$VectorStoreFileObjectLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } -ListVectorStoresResponse _$ListVectorStoresResponseFromJson( +CreateVectorStoreFileRequest _$CreateVectorStoreFileRequestFromJson( Map json) { - return _ListVectorStoresResponse.fromJson(json); + return _CreateVectorStoreFileRequest.fromJson(json); } /// @nodoc -mixin _$ListVectorStoresResponse { - /// The object type, which is always `list`. - String get object => throw _privateConstructorUsedError; - - /// A list of assistant files. - List get data => throw _privateConstructorUsedError; - - /// The ID of the first assistant file in the list. - @JsonKey(name: 'first_id') - String? get firstId => throw _privateConstructorUsedError; - - /// The ID of the last assistant file in the list. - @JsonKey(name: 'last_id') - String? get lastId => throw _privateConstructorUsedError; - - /// Whether there are more assistant files available. - @JsonKey(name: 'has_more') - bool get hasMore => throw _privateConstructorUsedError; +mixin _$CreateVectorStoreFileRequest { + /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; - /// Serializes this ListVectorStoresResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListVectorStoresResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ListVectorStoresResponseCopyWith get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateVectorStoreFileRequestCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ListVectorStoresResponseCopyWith<$Res> { - factory $ListVectorStoresResponseCopyWith(ListVectorStoresResponse value, - $Res Function(ListVectorStoresResponse) then) = - _$ListVectorStoresResponseCopyWithImpl<$Res, ListVectorStoresResponse>; +abstract class $CreateVectorStoreFileRequestCopyWith<$Res> { + factory $CreateVectorStoreFileRequestCopyWith( + CreateVectorStoreFileRequest value, + $Res Function(CreateVectorStoreFileRequest) then) = + _$CreateVectorStoreFileRequestCopyWithImpl<$Res, + CreateVectorStoreFileRequest>; @useResult - $Res call( - {String object, - List data, - @JsonKey(name: 'first_id') String? firstId, - @JsonKey(name: 'last_id') String? lastId, - @JsonKey(name: 'has_more') bool hasMore}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc -class _$ListVectorStoresResponseCopyWithImpl<$Res, - $Val extends ListVectorStoresResponse> - implements $ListVectorStoresResponseCopyWith<$Res> { - _$ListVectorStoresResponseCopyWithImpl(this._value, this._then); +class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, + $Val extends CreateVectorStoreFileRequest> + implements $CreateVectorStoreFileRequestCopyWith<$Res> { + _$CreateVectorStoreFileRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileId = null, + }) { + return _then(_value.copyWith( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateVectorStoreFileRequestImplCopyWith<$Res> + implements $CreateVectorStoreFileRequestCopyWith<$Res> { + factory _$$CreateVectorStoreFileRequestImplCopyWith( + _$CreateVectorStoreFileRequestImpl value, + $Res Function(_$CreateVectorStoreFileRequestImpl) then) = + __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'file_id') String fileId}); +} + +/// @nodoc +class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> + extends _$CreateVectorStoreFileRequestCopyWithImpl<$Res, + _$CreateVectorStoreFileRequestImpl> + implements _$$CreateVectorStoreFileRequestImplCopyWith<$Res> { + __$$CreateVectorStoreFileRequestImplCopyWithImpl( + _$CreateVectorStoreFileRequestImpl _value, + $Res Function(_$CreateVectorStoreFileRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileId = null, + }) { + return _then(_$CreateVectorStoreFileRequestImpl( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { + const _$CreateVectorStoreFileRequestImpl( + {@JsonKey(name: 'file_id') required this.fileId}) + : super._(); + + factory _$CreateVectorStoreFileRequestImpl.fromJson( + Map json) => + _$$CreateVectorStoreFileRequestImplFromJson(json); + + /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + @override + @JsonKey(name: 'file_id') + final String fileId; + + @override + String toString() { + return 'CreateVectorStoreFileRequest(fileId: $fileId)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateVectorStoreFileRequestImpl && + (identical(other.fileId, fileId) || other.fileId == fileId)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, fileId); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateVectorStoreFileRequestImplCopyWith< + _$CreateVectorStoreFileRequestImpl> + get copyWith => __$$CreateVectorStoreFileRequestImplCopyWithImpl< + _$CreateVectorStoreFileRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateVectorStoreFileRequestImplToJson( + this, + ); + } +} + +abstract class _CreateVectorStoreFileRequest + extends CreateVectorStoreFileRequest { + const factory _CreateVectorStoreFileRequest( + {@JsonKey(name: 'file_id') required final String fileId}) = + _$CreateVectorStoreFileRequestImpl; + const _CreateVectorStoreFileRequest._() : super._(); + + factory _CreateVectorStoreFileRequest.fromJson(Map json) = + _$CreateVectorStoreFileRequestImpl.fromJson; + + @override + + /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_id') + String get fileId; + @override + @JsonKey(ignore: true) + _$$CreateVectorStoreFileRequestImplCopyWith< + _$CreateVectorStoreFileRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ListVectorStoreFilesResponse _$ListVectorStoreFilesResponseFromJson( + Map json) { + return _ListVectorStoreFilesResponse.fromJson(json); +} + +/// @nodoc +mixin _$ListVectorStoreFilesResponse { + /// The object type, which is always `list`. + String get object => throw _privateConstructorUsedError; + + /// A list of message files. + List get data => throw _privateConstructorUsedError; + + /// The ID of the first message file in the list. + @JsonKey(name: 'first_id') + String get firstId => throw _privateConstructorUsedError; + + /// The ID of the last message file in the list. + @JsonKey(name: 'last_id') + String get lastId => throw _privateConstructorUsedError; + + /// Whether there are more message files available. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ListVectorStoreFilesResponseCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ListVectorStoreFilesResponseCopyWith<$Res> { + factory $ListVectorStoreFilesResponseCopyWith( + ListVectorStoreFilesResponse value, + $Res Function(ListVectorStoreFilesResponse) then) = + _$ListVectorStoreFilesResponseCopyWithImpl<$Res, + ListVectorStoreFilesResponse>; + @useResult + $Res call( + {String object, + List data, + @JsonKey(name: 'first_id') String firstId, + @JsonKey(name: 'last_id') String lastId, + @JsonKey(name: 'has_more') bool hasMore}); +} + +/// @nodoc +class _$ListVectorStoreFilesResponseCopyWithImpl<$Res, + $Val extends ListVectorStoreFilesResponse> + implements $ListVectorStoreFilesResponseCopyWith<$Res> { + _$ListVectorStoreFilesResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListVectorStoresResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? object = null, Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, + Object? firstId = null, + Object? lastId = null, Object? hasMore = null, }) { return _then(_value.copyWith( @@ -49858,15 +46663,15 @@ class _$ListVectorStoresResponseCopyWithImpl<$Res, data: null == data ? _value.data : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId + as List, + firstId: null == firstId ? _value.firstId : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId + as String, + lastId: null == lastId ? _value.lastId : lastId // ignore: cast_nullable_to_non_nullable - as String?, + as String, hasMore: null == hasMore ? _value.hasMore : hasMore // ignore: cast_nullable_to_non_nullable @@ -49876,44 +46681,42 @@ class _$ListVectorStoresResponseCopyWithImpl<$Res, } /// @nodoc -abstract class _$$ListVectorStoresResponseImplCopyWith<$Res> - implements $ListVectorStoresResponseCopyWith<$Res> { - factory _$$ListVectorStoresResponseImplCopyWith( - _$ListVectorStoresResponseImpl value, - $Res Function(_$ListVectorStoresResponseImpl) then) = - __$$ListVectorStoresResponseImplCopyWithImpl<$Res>; +abstract class _$$ListVectorStoreFilesResponseImplCopyWith<$Res> + implements $ListVectorStoreFilesResponseCopyWith<$Res> { + factory _$$ListVectorStoreFilesResponseImplCopyWith( + _$ListVectorStoreFilesResponseImpl value, + $Res Function(_$ListVectorStoreFilesResponseImpl) then) = + __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res>; @override @useResult $Res call( {String object, - List data, - @JsonKey(name: 'first_id') String? firstId, - @JsonKey(name: 'last_id') String? lastId, + List data, + @JsonKey(name: 'first_id') String firstId, + @JsonKey(name: 'last_id') String lastId, @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> - extends _$ListVectorStoresResponseCopyWithImpl<$Res, - _$ListVectorStoresResponseImpl> - implements _$$ListVectorStoresResponseImplCopyWith<$Res> { - __$$ListVectorStoresResponseImplCopyWithImpl( - _$ListVectorStoresResponseImpl _value, - $Res Function(_$ListVectorStoresResponseImpl) _then) +class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> + extends _$ListVectorStoreFilesResponseCopyWithImpl<$Res, + _$ListVectorStoreFilesResponseImpl> + implements _$$ListVectorStoreFilesResponseImplCopyWith<$Res> { + __$$ListVectorStoreFilesResponseImplCopyWithImpl( + _$ListVectorStoreFilesResponseImpl _value, + $Res Function(_$ListVectorStoreFilesResponseImpl) _then) : super(_value, _then); - /// Create a copy of ListVectorStoresResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? object = null, Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, + Object? firstId = null, + Object? lastId = null, Object? hasMore = null, }) { - return _then(_$ListVectorStoresResponseImpl( + return _then(_$ListVectorStoreFilesResponseImpl( object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable @@ -49921,15 +46724,15 @@ class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> data: null == data ? _value._data : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId + as List, + firstId: null == firstId ? _value.firstId : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId + as String, + lastId: null == lastId ? _value.lastId : lastId // ignore: cast_nullable_to_non_nullable - as String?, + as String, hasMore: null == hasMore ? _value.hasMore : hasMore // ignore: cast_nullable_to_non_nullable @@ -49940,59 +46743,60 @@ class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { - const _$ListVectorStoresResponseImpl( +class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { + const _$ListVectorStoreFilesResponseImpl( {required this.object, - required final List data, + required final List data, @JsonKey(name: 'first_id') required this.firstId, @JsonKey(name: 'last_id') required this.lastId, @JsonKey(name: 'has_more') required this.hasMore}) : _data = data, super._(); - factory _$ListVectorStoresResponseImpl.fromJson(Map json) => - _$$ListVectorStoresResponseImplFromJson(json); + factory _$ListVectorStoreFilesResponseImpl.fromJson( + Map json) => + _$$ListVectorStoreFilesResponseImplFromJson(json); /// The object type, which is always `list`. @override final String object; - /// A list of assistant files. - final List _data; + /// A list of message files. + final List _data; - /// A list of assistant files. + /// A list of message files. @override - List get data { + List get data { if (_data is EqualUnmodifiableListView) return _data; // ignore: implicit_dynamic_type return EqualUnmodifiableListView(_data); } - /// The ID of the first assistant file in the list. + /// The ID of the first message file in the list. @override @JsonKey(name: 'first_id') - final String? firstId; + final String firstId; - /// The ID of the last assistant file in the list. + /// The ID of the last message file in the list. @override @JsonKey(name: 'last_id') - final String? lastId; + final String lastId; - /// Whether there are more assistant files available. + /// Whether there are more message files available. @override @JsonKey(name: 'has_more') final bool hasMore; @override String toString() { - return 'ListVectorStoresResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; + return 'ListVectorStoreFilesResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ListVectorStoresResponseImpl && + other is _$ListVectorStoreFilesResponseImpl && (identical(other.object, object) || other.object == object) && const DeepCollectionEquality().equals(other._data, _data) && (identical(other.firstId, firstId) || other.firstId == firstId) && @@ -50000,120 +46804,115 @@ class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - /// Create a copy of ListVectorStoresResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> - get copyWith => __$$ListVectorStoresResponseImplCopyWithImpl< - _$ListVectorStoresResponseImpl>(this, _$identity); + _$$ListVectorStoreFilesResponseImplCopyWith< + _$ListVectorStoreFilesResponseImpl> + get copyWith => __$$ListVectorStoreFilesResponseImplCopyWithImpl< + _$ListVectorStoreFilesResponseImpl>(this, _$identity); @override Map toJson() { - return _$$ListVectorStoresResponseImplToJson( + return _$$ListVectorStoreFilesResponseImplToJson( this, ); } } -abstract class _ListVectorStoresResponse extends ListVectorStoresResponse { - const factory _ListVectorStoresResponse( +abstract class _ListVectorStoreFilesResponse + extends ListVectorStoreFilesResponse { + const factory _ListVectorStoreFilesResponse( {required final String object, - required final List data, - @JsonKey(name: 'first_id') required final String? firstId, - @JsonKey(name: 'last_id') required final String? lastId, + required final List data, + @JsonKey(name: 'first_id') required final String firstId, + @JsonKey(name: 'last_id') required final String lastId, @JsonKey(name: 'has_more') required final bool hasMore}) = - _$ListVectorStoresResponseImpl; - const _ListVectorStoresResponse._() : super._(); + _$ListVectorStoreFilesResponseImpl; + const _ListVectorStoreFilesResponse._() : super._(); - factory _ListVectorStoresResponse.fromJson(Map json) = - _$ListVectorStoresResponseImpl.fromJson; + factory _ListVectorStoreFilesResponse.fromJson(Map json) = + _$ListVectorStoreFilesResponseImpl.fromJson; - /// The object type, which is always `list`. @override - String get object; - /// A list of assistant files. + /// The object type, which is always `list`. + String get object; @override - List get data; - /// The ID of the first assistant file in the list. + /// A list of message files. + List get data; @override - @JsonKey(name: 'first_id') - String? get firstId; - /// The ID of the last assistant file in the list. + /// The ID of the first message file in the list. + @JsonKey(name: 'first_id') + String get firstId; @override - @JsonKey(name: 'last_id') - String? get lastId; - /// Whether there are more assistant files available. + /// The ID of the last message file in the list. + @JsonKey(name: 'last_id') + String get lastId; @override + + /// Whether there are more message files available. @JsonKey(name: 'has_more') bool get hasMore; - - /// Create a copy of ListVectorStoresResponse - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> + @JsonKey(ignore: true) + _$$ListVectorStoreFilesResponseImplCopyWith< + _$ListVectorStoreFilesResponseImpl> get copyWith => throw _privateConstructorUsedError; } -DeleteVectorStoreResponse _$DeleteVectorStoreResponseFromJson( +DeleteVectorStoreFileResponse _$DeleteVectorStoreFileResponseFromJson( Map json) { - return _DeleteVectorStoreResponse.fromJson(json); + return _DeleteVectorStoreFileResponse.fromJson(json); } /// @nodoc -mixin _$DeleteVectorStoreResponse { - /// The ID of the deleted vector store. +mixin _$DeleteVectorStoreFileResponse { + /// The ID of the deleted vector store file. String get id => throw _privateConstructorUsedError; - /// Whether the vector store was deleted. + /// Whether the vector store file was deleted. bool get deleted => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.deleted`. + /// The object type, which is always `vector_store.file.deleted`. String get object => throw _privateConstructorUsedError; - /// Serializes this DeleteVectorStoreResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of DeleteVectorStoreResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $DeleteVectorStoreResponseCopyWith get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $DeleteVectorStoreFileResponseCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $DeleteVectorStoreResponseCopyWith<$Res> { - factory $DeleteVectorStoreResponseCopyWith(DeleteVectorStoreResponse value, - $Res Function(DeleteVectorStoreResponse) then) = - _$DeleteVectorStoreResponseCopyWithImpl<$Res, DeleteVectorStoreResponse>; +abstract class $DeleteVectorStoreFileResponseCopyWith<$Res> { + factory $DeleteVectorStoreFileResponseCopyWith( + DeleteVectorStoreFileResponse value, + $Res Function(DeleteVectorStoreFileResponse) then) = + _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, + DeleteVectorStoreFileResponse>; @useResult $Res call({String id, bool deleted, String object}); } /// @nodoc -class _$DeleteVectorStoreResponseCopyWithImpl<$Res, - $Val extends DeleteVectorStoreResponse> - implements $DeleteVectorStoreResponseCopyWith<$Res> { - _$DeleteVectorStoreResponseCopyWithImpl(this._value, this._then); +class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, + $Val extends DeleteVectorStoreFileResponse> + implements $DeleteVectorStoreFileResponseCopyWith<$Res> { + _$DeleteVectorStoreFileResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of DeleteVectorStoreResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50139,29 +46938,27 @@ class _$DeleteVectorStoreResponseCopyWithImpl<$Res, } /// @nodoc -abstract class _$$DeleteVectorStoreResponseImplCopyWith<$Res> - implements $DeleteVectorStoreResponseCopyWith<$Res> { - factory _$$DeleteVectorStoreResponseImplCopyWith( - _$DeleteVectorStoreResponseImpl value, - $Res Function(_$DeleteVectorStoreResponseImpl) then) = - __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res>; +abstract class _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> + implements $DeleteVectorStoreFileResponseCopyWith<$Res> { + factory _$$DeleteVectorStoreFileResponseImplCopyWith( + _$DeleteVectorStoreFileResponseImpl value, + $Res Function(_$DeleteVectorStoreFileResponseImpl) then) = + __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res>; @override @useResult $Res call({String id, bool deleted, String object}); } /// @nodoc -class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> - extends _$DeleteVectorStoreResponseCopyWithImpl<$Res, - _$DeleteVectorStoreResponseImpl> - implements _$$DeleteVectorStoreResponseImplCopyWith<$Res> { - __$$DeleteVectorStoreResponseImplCopyWithImpl( - _$DeleteVectorStoreResponseImpl _value, - $Res Function(_$DeleteVectorStoreResponseImpl) _then) +class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> + extends _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, + _$DeleteVectorStoreFileResponseImpl> + implements _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> { + __$$DeleteVectorStoreFileResponseImplCopyWithImpl( + _$DeleteVectorStoreFileResponseImpl _value, + $Res Function(_$DeleteVectorStoreFileResponseImpl) _then) : super(_value, _then); - /// Create a copy of DeleteVectorStoreResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50169,7 +46966,7 @@ class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> Object? deleted = null, Object? object = null, }) { - return _then(_$DeleteVectorStoreResponseImpl( + return _then(_$DeleteVectorStoreFileResponseImpl( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable @@ -50188,110 +46985,107 @@ class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$DeleteVectorStoreResponseImpl extends _DeleteVectorStoreResponse { - const _$DeleteVectorStoreResponseImpl( +class _$DeleteVectorStoreFileResponseImpl + extends _DeleteVectorStoreFileResponse { + const _$DeleteVectorStoreFileResponseImpl( {required this.id, required this.deleted, required this.object}) : super._(); - factory _$DeleteVectorStoreResponseImpl.fromJson(Map json) => - _$$DeleteVectorStoreResponseImplFromJson(json); + factory _$DeleteVectorStoreFileResponseImpl.fromJson( + Map json) => + _$$DeleteVectorStoreFileResponseImplFromJson(json); - /// The ID of the deleted vector store. + /// The ID of the deleted vector store file. @override final String id; - /// Whether the vector store was deleted. + /// Whether the vector store file was deleted. @override final bool deleted; - /// The object type, which is always `vector_store.deleted`. + /// The object type, which is always `vector_store.file.deleted`. @override final String object; @override String toString() { - return 'DeleteVectorStoreResponse(id: $id, deleted: $deleted, object: $object)'; + return 'DeleteVectorStoreFileResponse(id: $id, deleted: $deleted, object: $object)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$DeleteVectorStoreResponseImpl && + other is _$DeleteVectorStoreFileResponseImpl && (identical(other.id, id) || other.id == id) && (identical(other.deleted, deleted) || other.deleted == deleted) && (identical(other.object, object) || other.object == object)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - /// Create a copy of DeleteVectorStoreResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> - get copyWith => __$$DeleteVectorStoreResponseImplCopyWithImpl< - _$DeleteVectorStoreResponseImpl>(this, _$identity); + _$$DeleteVectorStoreFileResponseImplCopyWith< + _$DeleteVectorStoreFileResponseImpl> + get copyWith => __$$DeleteVectorStoreFileResponseImplCopyWithImpl< + _$DeleteVectorStoreFileResponseImpl>(this, _$identity); @override Map toJson() { - return _$$DeleteVectorStoreResponseImplToJson( + return _$$DeleteVectorStoreFileResponseImplToJson( this, ); } } -abstract class _DeleteVectorStoreResponse extends DeleteVectorStoreResponse { - const factory _DeleteVectorStoreResponse( +abstract class _DeleteVectorStoreFileResponse + extends DeleteVectorStoreFileResponse { + const factory _DeleteVectorStoreFileResponse( {required final String id, required final bool deleted, - required final String object}) = _$DeleteVectorStoreResponseImpl; - const _DeleteVectorStoreResponse._() : super._(); + required final String object}) = _$DeleteVectorStoreFileResponseImpl; + const _DeleteVectorStoreFileResponse._() : super._(); - factory _DeleteVectorStoreResponse.fromJson(Map json) = - _$DeleteVectorStoreResponseImpl.fromJson; + factory _DeleteVectorStoreFileResponse.fromJson(Map json) = + _$DeleteVectorStoreFileResponseImpl.fromJson; - /// The ID of the deleted vector store. @override - String get id; - /// Whether the vector store was deleted. + /// The ID of the deleted vector store file. + String get id; @override - bool get deleted; - /// The object type, which is always `vector_store.deleted`. + /// Whether the vector store file was deleted. + bool get deleted; @override - String get object; - /// Create a copy of DeleteVectorStoreResponse - /// with the given fields replaced by the non-null parameter values. + /// The object type, which is always `vector_store.file.deleted`. + String get object; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> + @JsonKey(ignore: true) + _$$DeleteVectorStoreFileResponseImplCopyWith< + _$DeleteVectorStoreFileResponseImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreFileObject _$VectorStoreFileObjectFromJson( +VectorStoreFileBatchObject _$VectorStoreFileBatchObjectFromJson( Map json) { - return _VectorStoreFileObject.fromJson(json); + return _VectorStoreFileBatchObject.fromJson(json); } /// @nodoc -mixin _$VectorStoreFileObject { +mixin _$VectorStoreFileBatchObject { /// The identifier, which can be referenced in API endpoints. String get id => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.file`. + /// The object type, which is always `vector_store.file_batch`. String get object => throw _privateConstructorUsedError; - /// The total vector store usage in bytes. Note that this may be different from the original file size. - @JsonKey(name: 'usage_bytes') - int get usageBytes => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the vector store file was created. + /// The Unix timestamp (in seconds) for when the vector store files batch was created. @JsonKey(name: 'created_at') int get createdAt => throw _privateConstructorUsedError; @@ -50299,75 +47093,60 @@ mixin _$VectorStoreFileObject { @JsonKey(name: 'vector_store_id') String get vectorStoreId => throw _privateConstructorUsedError; - /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - VectorStoreFileStatus get status => throw _privateConstructorUsedError; - - /// The last error associated with this vector store file. Will be `null` if there are no errors. - @JsonKey(name: 'last_error') - VectorStoreFileObjectLastError? get lastError => + /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + VectorStoreFileBatchObjectStatus get status => throw _privateConstructorUsedError; - /// The chunking strategy used to chunk the file(s). - /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyResponseParam? get chunkingStrategy => + /// The number of files per status. + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts get fileCounts => throw _privateConstructorUsedError; - /// Serializes this VectorStoreFileObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreFileObjectCopyWith get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VectorStoreFileBatchObjectCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileObjectCopyWith<$Res> { - factory $VectorStoreFileObjectCopyWith(VectorStoreFileObject value, - $Res Function(VectorStoreFileObject) then) = - _$VectorStoreFileObjectCopyWithImpl<$Res, VectorStoreFileObject>; +abstract class $VectorStoreFileBatchObjectCopyWith<$Res> { + factory $VectorStoreFileBatchObjectCopyWith(VectorStoreFileBatchObject value, + $Res Function(VectorStoreFileBatchObject) then) = + _$VectorStoreFileBatchObjectCopyWithImpl<$Res, + VectorStoreFileBatchObject>; @useResult $Res call( {String id, String object, - @JsonKey(name: 'usage_bytes') int usageBytes, @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyResponseParam? chunkingStrategy}); + VectorStoreFileBatchObjectStatus status, + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts fileCounts}); - $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; - $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; + $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; } /// @nodoc -class _$VectorStoreFileObjectCopyWithImpl<$Res, - $Val extends VectorStoreFileObject> - implements $VectorStoreFileObjectCopyWith<$Res> { - _$VectorStoreFileObjectCopyWithImpl(this._value, this._then); +class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, + $Val extends VectorStoreFileBatchObject> + implements $VectorStoreFileBatchObjectCopyWith<$Res> { + _$VectorStoreFileBatchObjectCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, Object? object = null, - Object? usageBytes = null, Object? createdAt = null, Object? vectorStoreId = null, Object? status = null, - Object? lastError = freezed, - Object? chunkingStrategy = freezed, + Object? fileCounts = null, }) { return _then(_value.copyWith( id: null == id @@ -50378,10 +47157,6 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable @@ -50393,99 +47168,67 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, status: null == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileStatus, - lastError: freezed == lastError - ? _value.lastError - : lastError // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastError?, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyResponseParam?, + as VectorStoreFileBatchObjectStatus, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreFileBatchObjectFileCounts, ) as $Val); } - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError { - if (_value.lastError == null) { - return null; - } - - return $VectorStoreFileObjectLastErrorCopyWith<$Res>(_value.lastError!, - (value) { - return _then(_value.copyWith(lastError: value) as $Val); - }); - } - - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy { - if (_value.chunkingStrategy == null) { - return null; - } - - return $ChunkingStrategyResponseParamCopyWith<$Res>( - _value.chunkingStrategy!, (value) { - return _then(_value.copyWith(chunkingStrategy: value) as $Val); + $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts { + return $VectorStoreFileBatchObjectFileCountsCopyWith<$Res>( + _value.fileCounts, (value) { + return _then(_value.copyWith(fileCounts: value) as $Val); }); } } /// @nodoc -abstract class _$$VectorStoreFileObjectImplCopyWith<$Res> - implements $VectorStoreFileObjectCopyWith<$Res> { - factory _$$VectorStoreFileObjectImplCopyWith( - _$VectorStoreFileObjectImpl value, - $Res Function(_$VectorStoreFileObjectImpl) then) = - __$$VectorStoreFileObjectImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileBatchObjectImplCopyWith<$Res> + implements $VectorStoreFileBatchObjectCopyWith<$Res> { + factory _$$VectorStoreFileBatchObjectImplCopyWith( + _$VectorStoreFileBatchObjectImpl value, + $Res Function(_$VectorStoreFileBatchObjectImpl) then) = + __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( {String id, String object, - @JsonKey(name: 'usage_bytes') int usageBytes, @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyResponseParam? chunkingStrategy}); + VectorStoreFileBatchObjectStatus status, + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts fileCounts}); @override - $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; - @override - $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; + $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; } /// @nodoc -class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> - extends _$VectorStoreFileObjectCopyWithImpl<$Res, - _$VectorStoreFileObjectImpl> - implements _$$VectorStoreFileObjectImplCopyWith<$Res> { - __$$VectorStoreFileObjectImplCopyWithImpl(_$VectorStoreFileObjectImpl _value, - $Res Function(_$VectorStoreFileObjectImpl) _then) +class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> + extends _$VectorStoreFileBatchObjectCopyWithImpl<$Res, + _$VectorStoreFileBatchObjectImpl> + implements _$$VectorStoreFileBatchObjectImplCopyWith<$Res> { + __$$VectorStoreFileBatchObjectImplCopyWithImpl( + _$VectorStoreFileBatchObjectImpl _value, + $Res Function(_$VectorStoreFileBatchObjectImpl) _then) : super(_value, _then); - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, Object? object = null, - Object? usageBytes = null, Object? createdAt = null, Object? vectorStoreId = null, Object? status = null, - Object? lastError = freezed, - Object? chunkingStrategy = freezed, + Object? fileCounts = null, }) { - return _then(_$VectorStoreFileObjectImpl( + return _then(_$VectorStoreFileBatchObjectImpl( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable @@ -50494,10 +47237,6 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable @@ -50509,51 +47248,40 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> status: null == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileStatus, - lastError: freezed == lastError - ? _value.lastError - : lastError // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastError?, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyResponseParam?, + as VectorStoreFileBatchObjectStatus, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreFileBatchObjectFileCounts, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { - const _$VectorStoreFileObjectImpl( +class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { + const _$VectorStoreFileBatchObjectImpl( {required this.id, required this.object, - @JsonKey(name: 'usage_bytes') required this.usageBytes, @JsonKey(name: 'created_at') required this.createdAt, @JsonKey(name: 'vector_store_id') required this.vectorStoreId, required this.status, - @JsonKey(name: 'last_error') required this.lastError, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - this.chunkingStrategy}) + @JsonKey(name: 'file_counts') required this.fileCounts}) : super._(); - factory _$VectorStoreFileObjectImpl.fromJson(Map json) => - _$$VectorStoreFileObjectImplFromJson(json); + factory _$VectorStoreFileBatchObjectImpl.fromJson( + Map json) => + _$$VectorStoreFileBatchObjectImplFromJson(json); /// The identifier, which can be referenced in API endpoints. @override final String id; - /// The object type, which is always `vector_store.file`. + /// The object type, which is always `vector_store.file_batch`. @override final String object; - /// The total vector store usage in bytes. Note that this may be different from the original file size. - @override - @JsonKey(name: 'usage_bytes') - final int usageBytes; - - /// The Unix timestamp (in seconds) for when the vector store file was created. + /// The Unix timestamp (in seconds) for when the vector store files batch was created. @override @JsonKey(name: 'created_at') final int createdAt; @@ -50563,1356 +47291,1179 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { @JsonKey(name: 'vector_store_id') final String vectorStoreId; - /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - @override - final VectorStoreFileStatus status; - - /// The last error associated with this vector store file. Will be `null` if there are no errors. + /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. @override - @JsonKey(name: 'last_error') - final VectorStoreFileObjectLastError? lastError; + final VectorStoreFileBatchObjectStatus status; - /// The chunking strategy used to chunk the file(s). - /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + /// The number of files per status. @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyResponseParam? chunkingStrategy; + @JsonKey(name: 'file_counts') + final VectorStoreFileBatchObjectFileCounts fileCounts; @override String toString() { - return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError, chunkingStrategy: $chunkingStrategy)'; + return 'VectorStoreFileBatchObject(id: $id, object: $object, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, fileCounts: $fileCounts)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileObjectImpl && + other is _$VectorStoreFileBatchObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.object, object) || other.object == object) && - (identical(other.usageBytes, usageBytes) || - other.usageBytes == usageBytes) && (identical(other.createdAt, createdAt) || other.createdAt == createdAt) && (identical(other.vectorStoreId, vectorStoreId) || other.vectorStoreId == vectorStoreId) && (identical(other.status, status) || other.status == status) && - (identical(other.lastError, lastError) || - other.lastError == lastError) && - (identical(other.chunkingStrategy, chunkingStrategy) || - other.chunkingStrategy == chunkingStrategy)); + (identical(other.fileCounts, fileCounts) || + other.fileCounts == fileCounts)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, id, object, usageBytes, - createdAt, vectorStoreId, status, lastError, chunkingStrategy); + int get hashCode => Object.hash( + runtimeType, id, object, createdAt, vectorStoreId, status, fileCounts); - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> - get copyWith => __$$VectorStoreFileObjectImplCopyWithImpl< - _$VectorStoreFileObjectImpl>(this, _$identity); + _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> + get copyWith => __$$VectorStoreFileBatchObjectImplCopyWithImpl< + _$VectorStoreFileBatchObjectImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileObjectImplToJson( + return _$$VectorStoreFileBatchObjectImplToJson( this, ); } } -abstract class _VectorStoreFileObject extends VectorStoreFileObject { - const factory _VectorStoreFileObject( +abstract class _VectorStoreFileBatchObject extends VectorStoreFileBatchObject { + const factory _VectorStoreFileBatchObject( {required final String id, required final String object, - @JsonKey(name: 'usage_bytes') required final int usageBytes, @JsonKey(name: 'created_at') required final int createdAt, @JsonKey(name: 'vector_store_id') required final String vectorStoreId, - required final VectorStoreFileStatus status, - @JsonKey(name: 'last_error') - required final VectorStoreFileObjectLastError? lastError, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyResponseParam? chunkingStrategy}) = - _$VectorStoreFileObjectImpl; - const _VectorStoreFileObject._() : super._(); + required final VectorStoreFileBatchObjectStatus status, + @JsonKey(name: 'file_counts') + required final VectorStoreFileBatchObjectFileCounts fileCounts}) = + _$VectorStoreFileBatchObjectImpl; + const _VectorStoreFileBatchObject._() : super._(); - factory _VectorStoreFileObject.fromJson(Map json) = - _$VectorStoreFileObjectImpl.fromJson; + factory _VectorStoreFileBatchObject.fromJson(Map json) = + _$VectorStoreFileBatchObjectImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override - String get id; - /// The object type, which is always `vector_store.file`. + /// The identifier, which can be referenced in API endpoints. + String get id; @override - String get object; - /// The total vector store usage in bytes. Note that this may be different from the original file size. + /// The object type, which is always `vector_store.file_batch`. + String get object; @override - @JsonKey(name: 'usage_bytes') - int get usageBytes; - /// The Unix timestamp (in seconds) for when the vector store file was created. - @override + /// The Unix timestamp (in seconds) for when the vector store files batch was created. @JsonKey(name: 'created_at') int get createdAt; + @override /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. - @override @JsonKey(name: 'vector_store_id') String get vectorStoreId; - - /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - @override - VectorStoreFileStatus get status; - - /// The last error associated with this vector store file. Will be `null` if there are no errors. @override - @JsonKey(name: 'last_error') - VectorStoreFileObjectLastError? get lastError; - /// The chunking strategy used to chunk the file(s). - /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + VectorStoreFileBatchObjectStatus get status; @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyResponseParam? get chunkingStrategy; - /// Create a copy of VectorStoreFileObject - /// with the given fields replaced by the non-null parameter values. + /// The number of files per status. + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts get fileCounts; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> + @JsonKey(ignore: true) + _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreFileObjectLastError _$VectorStoreFileObjectLastErrorFromJson( - Map json) { - return _VectorStoreFileObjectLastError.fromJson(json); +VectorStoreFileBatchObjectFileCounts + _$VectorStoreFileBatchObjectFileCountsFromJson(Map json) { + return _VectorStoreFileBatchObjectFileCounts.fromJson(json); } /// @nodoc -mixin _$VectorStoreFileObjectLastError { - /// One of `server_error` or `rate_limit_exceeded`. - VectorStoreFileObjectLastErrorCode get code => - throw _privateConstructorUsedError; +mixin _$VectorStoreFileBatchObjectFileCounts { + /// The number of files that are currently being processed. + @JsonKey(name: 'in_progress') + int get inProgress => throw _privateConstructorUsedError; - /// A human-readable description of the error. - String get message => throw _privateConstructorUsedError; + /// The number of files that have been processed. + int get completed => throw _privateConstructorUsedError; - /// Serializes this VectorStoreFileObjectLastError to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The number of files that have failed to process. + int get failed => throw _privateConstructorUsedError; - /// Create a copy of VectorStoreFileObjectLastError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreFileObjectLastErrorCopyWith + /// The number of files that where cancelled. + int get cancelled => throw _privateConstructorUsedError; + + /// The total number of files. + int get total => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VectorStoreFileBatchObjectFileCountsCopyWith< + VectorStoreFileBatchObjectFileCounts> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileObjectLastErrorCopyWith<$Res> { - factory $VectorStoreFileObjectLastErrorCopyWith( - VectorStoreFileObjectLastError value, - $Res Function(VectorStoreFileObjectLastError) then) = - _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, - VectorStoreFileObjectLastError>; +abstract class $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { + factory $VectorStoreFileBatchObjectFileCountsCopyWith( + VectorStoreFileBatchObjectFileCounts value, + $Res Function(VectorStoreFileBatchObjectFileCounts) then) = + _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, + VectorStoreFileBatchObjectFileCounts>; @useResult - $Res call({VectorStoreFileObjectLastErrorCode code, String message}); + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); } /// @nodoc -class _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, - $Val extends VectorStoreFileObjectLastError> - implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { - _$VectorStoreFileObjectLastErrorCopyWithImpl(this._value, this._then); +class _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, + $Val extends VectorStoreFileBatchObjectFileCounts> + implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { + _$VectorStoreFileBatchObjectFileCountsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of VectorStoreFileObjectLastError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = null, - Object? message = null, + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, }) { return _then(_value.copyWith( - code: null == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastErrorCode, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } } /// @nodoc -abstract class _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> - implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { - factory _$$VectorStoreFileObjectLastErrorImplCopyWith( - _$VectorStoreFileObjectLastErrorImpl value, - $Res Function(_$VectorStoreFileObjectLastErrorImpl) then) = - __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> + implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { + factory _$$VectorStoreFileBatchObjectFileCountsImplCopyWith( + _$VectorStoreFileBatchObjectFileCountsImpl value, + $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) then) = + __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res>; @override @useResult - $Res call({VectorStoreFileObjectLastErrorCode code, String message}); + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); } /// @nodoc -class __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res> - extends _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, - _$VectorStoreFileObjectLastErrorImpl> - implements _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> { - __$$VectorStoreFileObjectLastErrorImplCopyWithImpl( - _$VectorStoreFileObjectLastErrorImpl _value, - $Res Function(_$VectorStoreFileObjectLastErrorImpl) _then) +class __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res> + extends _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, + _$VectorStoreFileBatchObjectFileCountsImpl> + implements _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> { + __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl( + _$VectorStoreFileBatchObjectFileCountsImpl _value, + $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) _then) : super(_value, _then); - /// Create a copy of VectorStoreFileObjectLastError - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = null, - Object? message = null, + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, }) { - return _then(_$VectorStoreFileObjectLastErrorImpl( - code: null == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastErrorCode, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$VectorStoreFileBatchObjectFileCountsImpl( + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileObjectLastErrorImpl - extends _VectorStoreFileObjectLastError { - const _$VectorStoreFileObjectLastErrorImpl( - {required this.code, required this.message}) +class _$VectorStoreFileBatchObjectFileCountsImpl + extends _VectorStoreFileBatchObjectFileCounts { + const _$VectorStoreFileBatchObjectFileCountsImpl( + {@JsonKey(name: 'in_progress') required this.inProgress, + required this.completed, + required this.failed, + required this.cancelled, + required this.total}) : super._(); - factory _$VectorStoreFileObjectLastErrorImpl.fromJson( + factory _$VectorStoreFileBatchObjectFileCountsImpl.fromJson( Map json) => - _$$VectorStoreFileObjectLastErrorImplFromJson(json); + _$$VectorStoreFileBatchObjectFileCountsImplFromJson(json); - /// One of `server_error` or `rate_limit_exceeded`. + /// The number of files that are currently being processed. @override - final VectorStoreFileObjectLastErrorCode code; + @JsonKey(name: 'in_progress') + final int inProgress; - /// A human-readable description of the error. + /// The number of files that have been processed. @override - final String message; + final int completed; + + /// The number of files that have failed to process. + @override + final int failed; + + /// The number of files that where cancelled. + @override + final int cancelled; + + /// The total number of files. + @override + final int total; @override String toString() { - return 'VectorStoreFileObjectLastError(code: $code, message: $message)'; + return 'VectorStoreFileBatchObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileObjectLastErrorImpl && - (identical(other.code, code) || other.code == code) && - (identical(other.message, message) || other.message == message)); + other is _$VectorStoreFileBatchObjectFileCountsImpl && + (identical(other.inProgress, inProgress) || + other.inProgress == inProgress) && + (identical(other.completed, completed) || + other.completed == completed) && + (identical(other.failed, failed) || other.failed == failed) && + (identical(other.cancelled, cancelled) || + other.cancelled == cancelled) && + (identical(other.total, total) || other.total == total)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, code, message); + int get hashCode => + Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - /// Create a copy of VectorStoreFileObjectLastError - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileObjectLastErrorImplCopyWith< - _$VectorStoreFileObjectLastErrorImpl> - get copyWith => __$$VectorStoreFileObjectLastErrorImplCopyWithImpl< - _$VectorStoreFileObjectLastErrorImpl>(this, _$identity); + _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< + _$VectorStoreFileBatchObjectFileCountsImpl> + get copyWith => __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl< + _$VectorStoreFileBatchObjectFileCountsImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileObjectLastErrorImplToJson( + return _$$VectorStoreFileBatchObjectFileCountsImplToJson( this, ); } } -abstract class _VectorStoreFileObjectLastError - extends VectorStoreFileObjectLastError { - const factory _VectorStoreFileObjectLastError( - {required final VectorStoreFileObjectLastErrorCode code, - required final String message}) = _$VectorStoreFileObjectLastErrorImpl; - const _VectorStoreFileObjectLastError._() : super._(); +abstract class _VectorStoreFileBatchObjectFileCounts + extends VectorStoreFileBatchObjectFileCounts { + const factory _VectorStoreFileBatchObjectFileCounts( + {@JsonKey(name: 'in_progress') required final int inProgress, + required final int completed, + required final int failed, + required final int cancelled, + required final int total}) = _$VectorStoreFileBatchObjectFileCountsImpl; + const _VectorStoreFileBatchObjectFileCounts._() : super._(); - factory _VectorStoreFileObjectLastError.fromJson(Map json) = - _$VectorStoreFileObjectLastErrorImpl.fromJson; + factory _VectorStoreFileBatchObjectFileCounts.fromJson( + Map json) = + _$VectorStoreFileBatchObjectFileCountsImpl.fromJson; - /// One of `server_error` or `rate_limit_exceeded`. @override - VectorStoreFileObjectLastErrorCode get code; - /// A human-readable description of the error. + /// The number of files that are currently being processed. + @JsonKey(name: 'in_progress') + int get inProgress; @override - String get message; - /// Create a copy of VectorStoreFileObjectLastError - /// with the given fields replaced by the non-null parameter values. + /// The number of files that have been processed. + int get completed; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreFileObjectLastErrorImplCopyWith< - _$VectorStoreFileObjectLastErrorImpl> + + /// The number of files that have failed to process. + int get failed; + @override + + /// The number of files that where cancelled. + int get cancelled; + @override + + /// The total number of files. + int get total; + @override + @JsonKey(ignore: true) + _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< + _$VectorStoreFileBatchObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; } -StaticChunkingStrategy _$StaticChunkingStrategyFromJson( +CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson( Map json) { - return _StaticChunkingStrategy.fromJson(json); + return _CreateVectorStoreFileBatchRequest.fromJson(json); } /// @nodoc -mixin _$StaticChunkingStrategy { - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the - /// maximum value is `4096`. - @JsonKey(name: 'max_chunk_size_tokens') - int get maxChunkSizeTokens => throw _privateConstructorUsedError; - - /// The number of tokens that overlap between chunks. The default value is `400`. - /// - /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. - @JsonKey(name: 'chunk_overlap_tokens') - int get chunkOverlapTokens => throw _privateConstructorUsedError; +mixin _$CreateVectorStoreFileBatchRequest { + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_ids') + List get fileIds => throw _privateConstructorUsedError; - /// Serializes this StaticChunkingStrategy to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of StaticChunkingStrategy - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $StaticChunkingStrategyCopyWith get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateVectorStoreFileBatchRequestCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $StaticChunkingStrategyCopyWith<$Res> { - factory $StaticChunkingStrategyCopyWith(StaticChunkingStrategy value, - $Res Function(StaticChunkingStrategy) then) = - _$StaticChunkingStrategyCopyWithImpl<$Res, StaticChunkingStrategy>; +abstract class $CreateVectorStoreFileBatchRequestCopyWith<$Res> { + factory $CreateVectorStoreFileBatchRequestCopyWith( + CreateVectorStoreFileBatchRequest value, + $Res Function(CreateVectorStoreFileBatchRequest) then) = + _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, + CreateVectorStoreFileBatchRequest>; @useResult - $Res call( - {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, - @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); + $Res call({@JsonKey(name: 'file_ids') List fileIds}); } /// @nodoc -class _$StaticChunkingStrategyCopyWithImpl<$Res, - $Val extends StaticChunkingStrategy> - implements $StaticChunkingStrategyCopyWith<$Res> { - _$StaticChunkingStrategyCopyWithImpl(this._value, this._then); +class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, + $Val extends CreateVectorStoreFileBatchRequest> + implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { + _$CreateVectorStoreFileBatchRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of StaticChunkingStrategy - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? maxChunkSizeTokens = null, - Object? chunkOverlapTokens = null, + Object? fileIds = null, }) { return _then(_value.copyWith( - maxChunkSizeTokens: null == maxChunkSizeTokens - ? _value.maxChunkSizeTokens - : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable - as int, - chunkOverlapTokens: null == chunkOverlapTokens - ? _value.chunkOverlapTokens - : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable - as int, + fileIds: null == fileIds + ? _value.fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List, ) as $Val); } } /// @nodoc -abstract class _$$StaticChunkingStrategyImplCopyWith<$Res> - implements $StaticChunkingStrategyCopyWith<$Res> { - factory _$$StaticChunkingStrategyImplCopyWith( - _$StaticChunkingStrategyImpl value, - $Res Function(_$StaticChunkingStrategyImpl) then) = - __$$StaticChunkingStrategyImplCopyWithImpl<$Res>; +abstract class _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> + implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { + factory _$$CreateVectorStoreFileBatchRequestImplCopyWith( + _$CreateVectorStoreFileBatchRequestImpl value, + $Res Function(_$CreateVectorStoreFileBatchRequestImpl) then) = + __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, - @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); + $Res call({@JsonKey(name: 'file_ids') List fileIds}); } /// @nodoc -class __$$StaticChunkingStrategyImplCopyWithImpl<$Res> - extends _$StaticChunkingStrategyCopyWithImpl<$Res, - _$StaticChunkingStrategyImpl> - implements _$$StaticChunkingStrategyImplCopyWith<$Res> { - __$$StaticChunkingStrategyImplCopyWithImpl( - _$StaticChunkingStrategyImpl _value, - $Res Function(_$StaticChunkingStrategyImpl) _then) +class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> + extends _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, + _$CreateVectorStoreFileBatchRequestImpl> + implements _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> { + __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl( + _$CreateVectorStoreFileBatchRequestImpl _value, + $Res Function(_$CreateVectorStoreFileBatchRequestImpl) _then) : super(_value, _then); - /// Create a copy of StaticChunkingStrategy - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? maxChunkSizeTokens = null, - Object? chunkOverlapTokens = null, + Object? fileIds = null, }) { - return _then(_$StaticChunkingStrategyImpl( - maxChunkSizeTokens: null == maxChunkSizeTokens - ? _value.maxChunkSizeTokens - : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable - as int, - chunkOverlapTokens: null == chunkOverlapTokens - ? _value.chunkOverlapTokens - : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$CreateVectorStoreFileBatchRequestImpl( + fileIds: null == fileIds + ? _value._fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List, )); } } /// @nodoc @JsonSerializable() -class _$StaticChunkingStrategyImpl extends _StaticChunkingStrategy { - const _$StaticChunkingStrategyImpl( - {@JsonKey(name: 'max_chunk_size_tokens') required this.maxChunkSizeTokens, - @JsonKey(name: 'chunk_overlap_tokens') required this.chunkOverlapTokens}) - : super._(); +class _$CreateVectorStoreFileBatchRequestImpl + extends _CreateVectorStoreFileBatchRequest { + const _$CreateVectorStoreFileBatchRequestImpl( + {@JsonKey(name: 'file_ids') required final List fileIds}) + : _fileIds = fileIds, + super._(); - factory _$StaticChunkingStrategyImpl.fromJson(Map json) => - _$$StaticChunkingStrategyImplFromJson(json); + factory _$CreateVectorStoreFileBatchRequestImpl.fromJson( + Map json) => + _$$CreateVectorStoreFileBatchRequestImplFromJson(json); - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the - /// maximum value is `4096`. - @override - @JsonKey(name: 'max_chunk_size_tokens') - final int maxChunkSizeTokens; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + final List _fileIds; - /// The number of tokens that overlap between chunks. The default value is `400`. - /// - /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @override - @JsonKey(name: 'chunk_overlap_tokens') - final int chunkOverlapTokens; + @JsonKey(name: 'file_ids') + List get fileIds { + if (_fileIds is EqualUnmodifiableListView) return _fileIds; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_fileIds); + } @override String toString() { - return 'StaticChunkingStrategy(maxChunkSizeTokens: $maxChunkSizeTokens, chunkOverlapTokens: $chunkOverlapTokens)'; + return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$StaticChunkingStrategyImpl && - (identical(other.maxChunkSizeTokens, maxChunkSizeTokens) || - other.maxChunkSizeTokens == maxChunkSizeTokens) && - (identical(other.chunkOverlapTokens, chunkOverlapTokens) || - other.chunkOverlapTokens == chunkOverlapTokens)); + other is _$CreateVectorStoreFileBatchRequestImpl && + const DeepCollectionEquality().equals(other._fileIds, _fileIds)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => - Object.hash(runtimeType, maxChunkSizeTokens, chunkOverlapTokens); + Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); - /// Create a copy of StaticChunkingStrategy - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> - get copyWith => __$$StaticChunkingStrategyImplCopyWithImpl< - _$StaticChunkingStrategyImpl>(this, _$identity); + _$$CreateVectorStoreFileBatchRequestImplCopyWith< + _$CreateVectorStoreFileBatchRequestImpl> + get copyWith => __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl< + _$CreateVectorStoreFileBatchRequestImpl>(this, _$identity); @override Map toJson() { - return _$$StaticChunkingStrategyImplToJson( + return _$$CreateVectorStoreFileBatchRequestImplToJson( this, ); } } -abstract class _StaticChunkingStrategy extends StaticChunkingStrategy { - const factory _StaticChunkingStrategy( - {@JsonKey(name: 'max_chunk_size_tokens') - required final int maxChunkSizeTokens, - @JsonKey(name: 'chunk_overlap_tokens') - required final int chunkOverlapTokens}) = _$StaticChunkingStrategyImpl; - const _StaticChunkingStrategy._() : super._(); - - factory _StaticChunkingStrategy.fromJson(Map json) = - _$StaticChunkingStrategyImpl.fromJson; +abstract class _CreateVectorStoreFileBatchRequest + extends CreateVectorStoreFileBatchRequest { + const factory _CreateVectorStoreFileBatchRequest( + {@JsonKey(name: 'file_ids') required final List fileIds}) = + _$CreateVectorStoreFileBatchRequestImpl; + const _CreateVectorStoreFileBatchRequest._() : super._(); - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the - /// maximum value is `4096`. - @override - @JsonKey(name: 'max_chunk_size_tokens') - int get maxChunkSizeTokens; + factory _CreateVectorStoreFileBatchRequest.fromJson( + Map json) = + _$CreateVectorStoreFileBatchRequestImpl.fromJson; - /// The number of tokens that overlap between chunks. The default value is `400`. - /// - /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. @override - @JsonKey(name: 'chunk_overlap_tokens') - int get chunkOverlapTokens; - /// Create a copy of StaticChunkingStrategy - /// with the given fields replaced by the non-null parameter values. + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_ids') + List get fileIds; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> + @JsonKey(ignore: true) + _$$CreateVectorStoreFileBatchRequestImplCopyWith< + _$CreateVectorStoreFileBatchRequestImpl> get copyWith => throw _privateConstructorUsedError; } -CreateVectorStoreFileRequest _$CreateVectorStoreFileRequestFromJson( - Map json) { - return _CreateVectorStoreFileRequest.fromJson(json); +Error _$ErrorFromJson(Map json) { + return _Error.fromJson(json); } /// @nodoc -mixin _$CreateVectorStoreFileRequest { - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_id') - String get fileId => throw _privateConstructorUsedError; +mixin _$Error { + /// The error code. + String? get code => throw _privateConstructorUsedError; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy => - throw _privateConstructorUsedError; + /// A human-readable description of the error. + String get message => throw _privateConstructorUsedError; - /// Serializes this CreateVectorStoreFileRequest to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The parameter in the request that caused the error. + String? get param => throw _privateConstructorUsedError; - /// Create a copy of CreateVectorStoreFileRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $CreateVectorStoreFileRequestCopyWith - get copyWith => throw _privateConstructorUsedError; + /// The type of error. + String get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ErrorCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateVectorStoreFileRequestCopyWith<$Res> { - factory $CreateVectorStoreFileRequestCopyWith( - CreateVectorStoreFileRequest value, - $Res Function(CreateVectorStoreFileRequest) then) = - _$CreateVectorStoreFileRequestCopyWithImpl<$Res, - CreateVectorStoreFileRequest>; +abstract class $ErrorCopyWith<$Res> { + factory $ErrorCopyWith(Error value, $Res Function(Error) then) = + _$ErrorCopyWithImpl<$Res, Error>; @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy}); - - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; + $Res call({String? code, String message, String? param, String type}); } /// @nodoc -class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, - $Val extends CreateVectorStoreFileRequest> - implements $CreateVectorStoreFileRequestCopyWith<$Res> { - _$CreateVectorStoreFileRequestCopyWithImpl(this._value, this._then); +class _$ErrorCopyWithImpl<$Res, $Val extends Error> + implements $ErrorCopyWith<$Res> { + _$ErrorCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateVectorStoreFileRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, - Object? chunkingStrategy = freezed, + Object? code = freezed, + Object? message = null, + Object? param = freezed, + Object? type = null, }) { return _then(_value.copyWith( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, ) as $Val); } - - /// Create a copy of CreateVectorStoreFileRequest - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { - if (_value.chunkingStrategy == null) { - return null; - } - - return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, - (value) { - return _then(_value.copyWith(chunkingStrategy: value) as $Val); - }); - } } /// @nodoc -abstract class _$$CreateVectorStoreFileRequestImplCopyWith<$Res> - implements $CreateVectorStoreFileRequestCopyWith<$Res> { - factory _$$CreateVectorStoreFileRequestImplCopyWith( - _$CreateVectorStoreFileRequestImpl value, - $Res Function(_$CreateVectorStoreFileRequestImpl) then) = - __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res>; +abstract class _$$ErrorImplCopyWith<$Res> implements $ErrorCopyWith<$Res> { + factory _$$ErrorImplCopyWith( + _$ErrorImpl value, $Res Function(_$ErrorImpl) then) = + __$$ErrorImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy}); - - @override - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; + $Res call({String? code, String message, String? param, String type}); } /// @nodoc -class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> - extends _$CreateVectorStoreFileRequestCopyWithImpl<$Res, - _$CreateVectorStoreFileRequestImpl> - implements _$$CreateVectorStoreFileRequestImplCopyWith<$Res> { - __$$CreateVectorStoreFileRequestImplCopyWithImpl( - _$CreateVectorStoreFileRequestImpl _value, - $Res Function(_$CreateVectorStoreFileRequestImpl) _then) +class __$$ErrorImplCopyWithImpl<$Res> + extends _$ErrorCopyWithImpl<$Res, _$ErrorImpl> + implements _$$ErrorImplCopyWith<$Res> { + __$$ErrorImplCopyWithImpl( + _$ErrorImpl _value, $Res Function(_$ErrorImpl) _then) : super(_value, _then); - /// Create a copy of CreateVectorStoreFileRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, - Object? chunkingStrategy = freezed, + Object? code = freezed, + Object? message = null, + Object? param = freezed, + Object? type = null, }) { - return _then(_$CreateVectorStoreFileRequestImpl( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable + return _then(_$ErrorImpl( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, )); } } /// @nodoc @JsonSerializable() -class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { - const _$CreateVectorStoreFileRequestImpl( - {@JsonKey(name: 'file_id') required this.fileId, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - this.chunkingStrategy}) +class _$ErrorImpl extends _Error { + const _$ErrorImpl( + {required this.code, + required this.message, + required this.param, + required this.type}) : super._(); - factory _$CreateVectorStoreFileRequestImpl.fromJson( - Map json) => - _$$CreateVectorStoreFileRequestImplFromJson(json); + factory _$ErrorImpl.fromJson(Map json) => + _$$ErrorImplFromJson(json); - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + /// The error code. @override - @JsonKey(name: 'file_id') - final String fileId; + final String? code; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + /// A human-readable description of the error. + @override + final String message; + + /// The parameter in the request that caused the error. + @override + final String? param; + + /// The type of error. @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy; + final String type; @override String toString() { - return 'CreateVectorStoreFileRequest(fileId: $fileId, chunkingStrategy: $chunkingStrategy)'; + return 'Error(code: $code, message: $message, param: $param, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateVectorStoreFileRequestImpl && - (identical(other.fileId, fileId) || other.fileId == fileId) && - (identical(other.chunkingStrategy, chunkingStrategy) || - other.chunkingStrategy == chunkingStrategy)); + other is _$ErrorImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message) && + (identical(other.param, param) || other.param == param) && + (identical(other.type, type) || other.type == type)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId, chunkingStrategy); + int get hashCode => Object.hash(runtimeType, code, message, param, type); - /// Create a copy of CreateVectorStoreFileRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateVectorStoreFileRequestImplCopyWith< - _$CreateVectorStoreFileRequestImpl> - get copyWith => __$$CreateVectorStoreFileRequestImplCopyWithImpl< - _$CreateVectorStoreFileRequestImpl>(this, _$identity); + _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => + __$$ErrorImplCopyWithImpl<_$ErrorImpl>(this, _$identity); @override Map toJson() { - return _$$CreateVectorStoreFileRequestImplToJson( + return _$$ErrorImplToJson( this, ); } } -abstract class _CreateVectorStoreFileRequest - extends CreateVectorStoreFileRequest { - const factory _CreateVectorStoreFileRequest( - {@JsonKey(name: 'file_id') required final String fileId, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy}) = - _$CreateVectorStoreFileRequestImpl; - const _CreateVectorStoreFileRequest._() : super._(); +abstract class _Error extends Error { + const factory _Error( + {required final String? code, + required final String message, + required final String? param, + required final String type}) = _$ErrorImpl; + const _Error._() : super._(); - factory _CreateVectorStoreFileRequest.fromJson(Map json) = - _$CreateVectorStoreFileRequestImpl.fromJson; + factory _Error.fromJson(Map json) = _$ErrorImpl.fromJson; - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @override - @JsonKey(name: 'file_id') - String get fileId; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + /// The error code. + String? get code; + @override + + /// A human-readable description of the error. + String get message; @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy; - /// Create a copy of CreateVectorStoreFileRequest - /// with the given fields replaced by the non-null parameter values. + /// The parameter in the request that caused the error. + String? get param; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateVectorStoreFileRequestImplCopyWith< - _$CreateVectorStoreFileRequestImpl> - get copyWith => throw _privateConstructorUsedError; + + /// The type of error. + String get type; + @override + @JsonKey(ignore: true) + _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => + throw _privateConstructorUsedError; } -ListVectorStoreFilesResponse _$ListVectorStoreFilesResponseFromJson( - Map json) { - return _ListVectorStoreFilesResponse.fromJson(json); +CreateBatchRequest _$CreateBatchRequestFromJson(Map json) { + return _CreateBatchRequest.fromJson(json); } /// @nodoc -mixin _$ListVectorStoreFilesResponse { - /// The object type, which is always `list`. - String get object => throw _privateConstructorUsedError; - - /// A list of message files. - List get data => throw _privateConstructorUsedError; +mixin _$CreateBatchRequest { + /// The ID of an uploaded file that contains requests for the new batch. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + @JsonKey(name: 'input_file_id') + String get inputFileId => throw _privateConstructorUsedError; - /// The ID of the first message file in the list. - @JsonKey(name: 'first_id') - String get firstId => throw _privateConstructorUsedError; + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + BatchEndpoint get endpoint => throw _privateConstructorUsedError; - /// The ID of the last message file in the list. - @JsonKey(name: 'last_id') - String get lastId => throw _privateConstructorUsedError; + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow => + throw _privateConstructorUsedError; - /// Whether there are more message files available. - @JsonKey(name: 'has_more') - bool get hasMore => throw _privateConstructorUsedError; + /// Optional custom metadata for the batch. + @JsonKey(includeIfNull: false) + Map? get metadata => throw _privateConstructorUsedError; - /// Serializes this ListVectorStoreFilesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListVectorStoreFilesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ListVectorStoreFilesResponseCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateBatchRequestCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $ListVectorStoreFilesResponseCopyWith<$Res> { - factory $ListVectorStoreFilesResponseCopyWith( - ListVectorStoreFilesResponse value, - $Res Function(ListVectorStoreFilesResponse) then) = - _$ListVectorStoreFilesResponseCopyWithImpl<$Res, - ListVectorStoreFilesResponse>; +abstract class $CreateBatchRequestCopyWith<$Res> { + factory $CreateBatchRequestCopyWith( + CreateBatchRequest value, $Res Function(CreateBatchRequest) then) = + _$CreateBatchRequestCopyWithImpl<$Res, CreateBatchRequest>; @useResult $Res call( - {String object, - List data, - @JsonKey(name: 'first_id') String firstId, - @JsonKey(name: 'last_id') String lastId, - @JsonKey(name: 'has_more') bool hasMore}); + {@JsonKey(name: 'input_file_id') String inputFileId, + BatchEndpoint endpoint, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + @JsonKey(includeIfNull: false) Map? metadata}); } /// @nodoc -class _$ListVectorStoreFilesResponseCopyWithImpl<$Res, - $Val extends ListVectorStoreFilesResponse> - implements $ListVectorStoreFilesResponseCopyWith<$Res> { - _$ListVectorStoreFilesResponseCopyWithImpl(this._value, this._then); +class _$CreateBatchRequestCopyWithImpl<$Res, $Val extends CreateBatchRequest> + implements $CreateBatchRequestCopyWith<$Res> { + _$CreateBatchRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ListVectorStoreFilesResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? object = null, - Object? data = null, - Object? firstId = null, - Object? lastId = null, - Object? hasMore = null, + Object? inputFileId = null, + Object? endpoint = null, + Object? completionWindow = null, + Object? metadata = freezed, }) { return _then(_value.copyWith( - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: null == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String, - lastId: null == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable as String, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as Map?, ) as $Val); } } /// @nodoc -abstract class _$$ListVectorStoreFilesResponseImplCopyWith<$Res> - implements $ListVectorStoreFilesResponseCopyWith<$Res> { - factory _$$ListVectorStoreFilesResponseImplCopyWith( - _$ListVectorStoreFilesResponseImpl value, - $Res Function(_$ListVectorStoreFilesResponseImpl) then) = - __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res>; +abstract class _$$CreateBatchRequestImplCopyWith<$Res> + implements $CreateBatchRequestCopyWith<$Res> { + factory _$$CreateBatchRequestImplCopyWith(_$CreateBatchRequestImpl value, + $Res Function(_$CreateBatchRequestImpl) then) = + __$$CreateBatchRequestImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String object, - List data, - @JsonKey(name: 'first_id') String firstId, - @JsonKey(name: 'last_id') String lastId, - @JsonKey(name: 'has_more') bool hasMore}); + {@JsonKey(name: 'input_file_id') String inputFileId, + BatchEndpoint endpoint, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + @JsonKey(includeIfNull: false) Map? metadata}); } /// @nodoc -class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> - extends _$ListVectorStoreFilesResponseCopyWithImpl<$Res, - _$ListVectorStoreFilesResponseImpl> - implements _$$ListVectorStoreFilesResponseImplCopyWith<$Res> { - __$$ListVectorStoreFilesResponseImplCopyWithImpl( - _$ListVectorStoreFilesResponseImpl _value, - $Res Function(_$ListVectorStoreFilesResponseImpl) _then) +class __$$CreateBatchRequestImplCopyWithImpl<$Res> + extends _$CreateBatchRequestCopyWithImpl<$Res, _$CreateBatchRequestImpl> + implements _$$CreateBatchRequestImplCopyWith<$Res> { + __$$CreateBatchRequestImplCopyWithImpl(_$CreateBatchRequestImpl _value, + $Res Function(_$CreateBatchRequestImpl) _then) : super(_value, _then); - /// Create a copy of ListVectorStoreFilesResponse - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? object = null, - Object? data = null, - Object? firstId = null, - Object? lastId = null, - Object? hasMore = null, + Object? inputFileId = null, + Object? endpoint = null, + Object? completionWindow = null, + Object? metadata = freezed, }) { - return _then(_$ListVectorStoreFilesResponseImpl( - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - data: null == data - ? _value._data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: null == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String, - lastId: null == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable + return _then(_$CreateBatchRequestImpl( + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable as String, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + metadata: freezed == metadata + ? _value._metadata + : metadata // ignore: cast_nullable_to_non_nullable + as Map?, )); } } /// @nodoc @JsonSerializable() -class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { - const _$ListVectorStoreFilesResponseImpl( - {required this.object, - required final List data, - @JsonKey(name: 'first_id') required this.firstId, - @JsonKey(name: 'last_id') required this.lastId, - @JsonKey(name: 'has_more') required this.hasMore}) - : _data = data, +class _$CreateBatchRequestImpl extends _CreateBatchRequest { + const _$CreateBatchRequestImpl( + {@JsonKey(name: 'input_file_id') required this.inputFileId, + required this.endpoint, + @JsonKey(name: 'completion_window') required this.completionWindow, + @JsonKey(includeIfNull: false) final Map? metadata}) + : _metadata = metadata, super._(); - factory _$ListVectorStoreFilesResponseImpl.fromJson( - Map json) => - _$$ListVectorStoreFilesResponseImplFromJson(json); + factory _$CreateBatchRequestImpl.fromJson(Map json) => + _$$CreateBatchRequestImplFromJson(json); - /// The object type, which is always `list`. + /// The ID of an uploaded file that contains requests for the new batch. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. @override - final String object; - - /// A list of message files. - final List _data; + @JsonKey(name: 'input_file_id') + final String inputFileId; - /// A list of message files. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. @override - List get data { - if (_data is EqualUnmodifiableListView) return _data; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_data); - } + final BatchEndpoint endpoint; - /// The ID of the first message file in the list. + /// The time frame within which the batch should be processed. Currently only `24h` is supported. @override - @JsonKey(name: 'first_id') - final String firstId; + @JsonKey(name: 'completion_window') + final BatchCompletionWindow completionWindow; - /// The ID of the last message file in the list. - @override - @JsonKey(name: 'last_id') - final String lastId; + /// Optional custom metadata for the batch. + final Map? _metadata; - /// Whether there are more message files available. + /// Optional custom metadata for the batch. @override - @JsonKey(name: 'has_more') - final bool hasMore; + @JsonKey(includeIfNull: false) + Map? get metadata { + final value = _metadata; + if (value == null) return null; + if (_metadata is EqualUnmodifiableMapView) return _metadata; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(value); + } @override String toString() { - return 'ListVectorStoreFilesResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; + return 'CreateBatchRequest(inputFileId: $inputFileId, endpoint: $endpoint, completionWindow: $completionWindow, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ListVectorStoreFilesResponseImpl && - (identical(other.object, object) || other.object == object) && - const DeepCollectionEquality().equals(other._data, _data) && - (identical(other.firstId, firstId) || other.firstId == firstId) && - (identical(other.lastId, lastId) || other.lastId == lastId) && - (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); + other is _$CreateBatchRequestImpl && + (identical(other.inputFileId, inputFileId) || + other.inputFileId == inputFileId) && + (identical(other.endpoint, endpoint) || + other.endpoint == endpoint) && + (identical(other.completionWindow, completionWindow) || + other.completionWindow == completionWindow) && + const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, object, - const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); + int get hashCode => Object.hash(runtimeType, inputFileId, endpoint, + completionWindow, const DeepCollectionEquality().hash(_metadata)); - /// Create a copy of ListVectorStoreFilesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ListVectorStoreFilesResponseImplCopyWith< - _$ListVectorStoreFilesResponseImpl> - get copyWith => __$$ListVectorStoreFilesResponseImplCopyWithImpl< - _$ListVectorStoreFilesResponseImpl>(this, _$identity); + _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => + __$$CreateBatchRequestImplCopyWithImpl<_$CreateBatchRequestImpl>( + this, _$identity); @override Map toJson() { - return _$$ListVectorStoreFilesResponseImplToJson( + return _$$CreateBatchRequestImplToJson( this, ); } } -abstract class _ListVectorStoreFilesResponse - extends ListVectorStoreFilesResponse { - const factory _ListVectorStoreFilesResponse( - {required final String object, - required final List data, - @JsonKey(name: 'first_id') required final String firstId, - @JsonKey(name: 'last_id') required final String lastId, - @JsonKey(name: 'has_more') required final bool hasMore}) = - _$ListVectorStoreFilesResponseImpl; - const _ListVectorStoreFilesResponse._() : super._(); - - factory _ListVectorStoreFilesResponse.fromJson(Map json) = - _$ListVectorStoreFilesResponseImpl.fromJson; +abstract class _CreateBatchRequest extends CreateBatchRequest { + const factory _CreateBatchRequest( + {@JsonKey(name: 'input_file_id') required final String inputFileId, + required final BatchEndpoint endpoint, + @JsonKey(name: 'completion_window') + required final BatchCompletionWindow completionWindow, + @JsonKey(includeIfNull: false) final Map? metadata}) = + _$CreateBatchRequestImpl; + const _CreateBatchRequest._() : super._(); - /// The object type, which is always `list`. - @override - String get object; + factory _CreateBatchRequest.fromJson(Map json) = + _$CreateBatchRequestImpl.fromJson; - /// A list of message files. @override - List get data; - /// The ID of the first message file in the list. + /// The ID of an uploaded file that contains requests for the new batch. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + @JsonKey(name: 'input_file_id') + String get inputFileId; @override - @JsonKey(name: 'first_id') - String get firstId; - /// The ID of the last message file in the list. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + BatchEndpoint get endpoint; @override - @JsonKey(name: 'last_id') - String get lastId; - /// Whether there are more message files available. + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow; @override - @JsonKey(name: 'has_more') - bool get hasMore; - /// Create a copy of ListVectorStoreFilesResponse - /// with the given fields replaced by the non-null parameter values. + /// Optional custom metadata for the batch. + @JsonKey(includeIfNull: false) + Map? get metadata; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ListVectorStoreFilesResponseImplCopyWith< - _$ListVectorStoreFilesResponseImpl> - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => + throw _privateConstructorUsedError; } -DeleteVectorStoreFileResponse _$DeleteVectorStoreFileResponseFromJson( - Map json) { - return _DeleteVectorStoreFileResponse.fromJson(json); +Batch _$BatchFromJson(Map json) { + return _Batch.fromJson(json); } /// @nodoc -mixin _$DeleteVectorStoreFileResponse { - /// The ID of the deleted vector store file. +mixin _$Batch { + /// No Description String get id => throw _privateConstructorUsedError; - /// Whether the vector store file was deleted. - bool get deleted => throw _privateConstructorUsedError; - - /// The object type, which is always `vector_store.file.deleted`. - String get object => throw _privateConstructorUsedError; - - /// Serializes this DeleteVectorStoreFileResponse to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of DeleteVectorStoreFileResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $DeleteVectorStoreFileResponseCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $DeleteVectorStoreFileResponseCopyWith<$Res> { - factory $DeleteVectorStoreFileResponseCopyWith( - DeleteVectorStoreFileResponse value, - $Res Function(DeleteVectorStoreFileResponse) then) = - _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, - DeleteVectorStoreFileResponse>; - @useResult - $Res call({String id, bool deleted, String object}); -} - -/// @nodoc -class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, - $Val extends DeleteVectorStoreFileResponse> - implements $DeleteVectorStoreFileResponseCopyWith<$Res> { - _$DeleteVectorStoreFileResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of DeleteVectorStoreFileResponse - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = null, - Object? deleted = null, - Object? object = null, - }) { - return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - deleted: null == deleted - ? _value.deleted - : deleted // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> - implements $DeleteVectorStoreFileResponseCopyWith<$Res> { - factory _$$DeleteVectorStoreFileResponseImplCopyWith( - _$DeleteVectorStoreFileResponseImpl value, - $Res Function(_$DeleteVectorStoreFileResponseImpl) then) = - __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String id, bool deleted, String object}); -} - -/// @nodoc -class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> - extends _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, - _$DeleteVectorStoreFileResponseImpl> - implements _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> { - __$$DeleteVectorStoreFileResponseImplCopyWithImpl( - _$DeleteVectorStoreFileResponseImpl _value, - $Res Function(_$DeleteVectorStoreFileResponseImpl) _then) - : super(_value, _then); - - /// Create a copy of DeleteVectorStoreFileResponse - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = null, - Object? deleted = null, - Object? object = null, - }) { - return _then(_$DeleteVectorStoreFileResponseImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - deleted: null == deleted - ? _value.deleted - : deleted // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$DeleteVectorStoreFileResponseImpl - extends _DeleteVectorStoreFileResponse { - const _$DeleteVectorStoreFileResponseImpl( - {required this.id, required this.deleted, required this.object}) - : super._(); - - factory _$DeleteVectorStoreFileResponseImpl.fromJson( - Map json) => - _$$DeleteVectorStoreFileResponseImplFromJson(json); - - /// The ID of the deleted vector store file. - @override - final String id; - - /// Whether the vector store file was deleted. - @override - final bool deleted; - - /// The object type, which is always `vector_store.file.deleted`. - @override - final String object; + /// The object type, which is always `batch`. + BatchObject get object => throw _privateConstructorUsedError; - @override - String toString() { - return 'DeleteVectorStoreFileResponse(id: $id, deleted: $deleted, object: $object)'; - } + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + BatchEndpoint get endpoint => throw _privateConstructorUsedError; - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$DeleteVectorStoreFileResponseImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.deleted, deleted) || other.deleted == deleted) && - (identical(other.object, object) || other.object == object)); - } + /// No Description + @JsonKey(includeIfNull: false) + BatchErrors? get errors => throw _privateConstructorUsedError; - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, id, deleted, object); + /// The ID of the input file for the batch. + @JsonKey(name: 'input_file_id') + String get inputFileId => throw _privateConstructorUsedError; - /// Create a copy of DeleteVectorStoreFileResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$DeleteVectorStoreFileResponseImplCopyWith< - _$DeleteVectorStoreFileResponseImpl> - get copyWith => __$$DeleteVectorStoreFileResponseImplCopyWithImpl< - _$DeleteVectorStoreFileResponseImpl>(this, _$identity); + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow => + throw _privateConstructorUsedError; - @override - Map toJson() { - return _$$DeleteVectorStoreFileResponseImplToJson( - this, - ); - } -} + /// The current status of the batch. + BatchStatus get status => throw _privateConstructorUsedError; -abstract class _DeleteVectorStoreFileResponse - extends DeleteVectorStoreFileResponse { - const factory _DeleteVectorStoreFileResponse( - {required final String id, - required final bool deleted, - required final String object}) = _$DeleteVectorStoreFileResponseImpl; - const _DeleteVectorStoreFileResponse._() : super._(); + /// The ID of the file containing the outputs of successfully executed requests. + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? get outputFileId => throw _privateConstructorUsedError; - factory _DeleteVectorStoreFileResponse.fromJson(Map json) = - _$DeleteVectorStoreFileResponseImpl.fromJson; + /// The ID of the file containing the outputs of requests with errors. + @JsonKey(name: 'error_file_id', includeIfNull: false) + String? get errorFileId => throw _privateConstructorUsedError; - /// The ID of the deleted vector store file. - @override - String get id; + /// The Unix timestamp (in seconds) for when the batch was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; - /// Whether the vector store file was deleted. - @override - bool get deleted; + /// The Unix timestamp (in seconds) for when the batch started processing. + @JsonKey(name: 'in_progress_at', includeIfNull: false) + int? get inProgressAt => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.file.deleted`. - @override - String get object; + /// The Unix timestamp (in seconds) for when the batch will expire. + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt => throw _privateConstructorUsedError; - /// Create a copy of DeleteVectorStoreFileResponse - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$DeleteVectorStoreFileResponseImplCopyWith< - _$DeleteVectorStoreFileResponseImpl> - get copyWith => throw _privateConstructorUsedError; -} + /// The Unix timestamp (in seconds) for when the batch started finalizing. + @JsonKey(name: 'finalizing_at', includeIfNull: false) + int? get finalizingAt => throw _privateConstructorUsedError; -VectorStoreFileBatchObject _$VectorStoreFileBatchObjectFromJson( - Map json) { - return _VectorStoreFileBatchObject.fromJson(json); -} + /// The Unix timestamp (in seconds) for when the batch was completed. + @JsonKey(name: 'completed_at', includeIfNull: false) + int? get completedAt => throw _privateConstructorUsedError; -/// @nodoc -mixin _$VectorStoreFileBatchObject { - /// The identifier, which can be referenced in API endpoints. - String get id => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch failed. + @JsonKey(name: 'failed_at', includeIfNull: false) + int? get failedAt => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.file_batch`. - String get object => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch expired. + @JsonKey(name: 'expired_at', includeIfNull: false) + int? get expiredAt => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the vector store files batch was created. - @JsonKey(name: 'created_at') - int get createdAt => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch started cancelling. + @JsonKey(name: 'cancelling_at', includeIfNull: false) + int? get cancellingAt => throw _privateConstructorUsedError; - /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. - @JsonKey(name: 'vector_store_id') - String get vectorStoreId => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch was cancelled. + @JsonKey(name: 'cancelled_at', includeIfNull: false) + int? get cancelledAt => throw _privateConstructorUsedError; - /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - VectorStoreFileBatchObjectStatus get status => - throw _privateConstructorUsedError; + /// The request counts for different statuses within the batch. + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? get requestCounts => throw _privateConstructorUsedError; - /// The number of files per status. - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts get fileCounts => - throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata => throw _privateConstructorUsedError; - /// Serializes this VectorStoreFileBatchObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of VectorStoreFileBatchObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreFileBatchObjectCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BatchCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileBatchObjectCopyWith<$Res> { - factory $VectorStoreFileBatchObjectCopyWith(VectorStoreFileBatchObject value, - $Res Function(VectorStoreFileBatchObject) then) = - _$VectorStoreFileBatchObjectCopyWithImpl<$Res, - VectorStoreFileBatchObject>; +abstract class $BatchCopyWith<$Res> { + factory $BatchCopyWith(Batch value, $Res Function(Batch) then) = + _$BatchCopyWithImpl<$Res, Batch>; @useResult $Res call( {String id, - String object, + BatchObject object, + BatchEndpoint endpoint, + @JsonKey(includeIfNull: false) BatchErrors? errors, + @JsonKey(name: 'input_file_id') String inputFileId, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + BatchStatus status, + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileBatchObjectStatus status, - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts fileCounts}); + @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? requestCounts, + @JsonKey(includeIfNull: false) dynamic metadata}); - $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; + $BatchErrorsCopyWith<$Res>? get errors; + $BatchRequestCountsCopyWith<$Res>? get requestCounts; } /// @nodoc -class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, - $Val extends VectorStoreFileBatchObject> - implements $VectorStoreFileBatchObjectCopyWith<$Res> { - _$VectorStoreFileBatchObjectCopyWithImpl(this._value, this._then); +class _$BatchCopyWithImpl<$Res, $Val extends Batch> + implements $BatchCopyWith<$Res> { + _$BatchCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of VectorStoreFileBatchObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, Object? object = null, - Object? createdAt = null, - Object? vectorStoreId = null, + Object? endpoint = null, + Object? errors = freezed, + Object? inputFileId = null, + Object? completionWindow = null, Object? status = null, - Object? fileCounts = null, + Object? outputFileId = freezed, + Object? errorFileId = freezed, + Object? createdAt = null, + Object? inProgressAt = freezed, + Object? expiresAt = freezed, + Object? finalizingAt = freezed, + Object? completedAt = freezed, + Object? failedAt = freezed, + Object? expiredAt = freezed, + Object? cancellingAt = freezed, + Object? cancelledAt = freezed, + Object? requestCounts = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( id: null == id @@ -51922,83 +48473,178 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable + as BatchObject, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + errors: freezed == errors + ? _value.errors + : errors // ignore: cast_nullable_to_non_nullable + as BatchErrors?, + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable as String, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as BatchStatus, + outputFileId: freezed == outputFileId + ? _value.outputFileId + : outputFileId // ignore: cast_nullable_to_non_nullable + as String?, + errorFileId: freezed == errorFileId + ? _value.errorFileId + : errorFileId // ignore: cast_nullable_to_non_nullable + as String?, createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable as int, - vectorStoreId: null == vectorStoreId - ? _value.vectorStoreId - : vectorStoreId // ignore: cast_nullable_to_non_nullable - as String, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectStatus, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectFileCounts, + inProgressAt: freezed == inProgressAt + ? _value.inProgressAt + : inProgressAt // ignore: cast_nullable_to_non_nullable + as int?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + finalizingAt: freezed == finalizingAt + ? _value.finalizingAt + : finalizingAt // ignore: cast_nullable_to_non_nullable + as int?, + completedAt: freezed == completedAt + ? _value.completedAt + : completedAt // ignore: cast_nullable_to_non_nullable + as int?, + failedAt: freezed == failedAt + ? _value.failedAt + : failedAt // ignore: cast_nullable_to_non_nullable + as int?, + expiredAt: freezed == expiredAt + ? _value.expiredAt + : expiredAt // ignore: cast_nullable_to_non_nullable + as int?, + cancellingAt: freezed == cancellingAt + ? _value.cancellingAt + : cancellingAt // ignore: cast_nullable_to_non_nullable + as int?, + cancelledAt: freezed == cancelledAt + ? _value.cancelledAt + : cancelledAt // ignore: cast_nullable_to_non_nullable + as int?, + requestCounts: freezed == requestCounts + ? _value.requestCounts + : requestCounts // ignore: cast_nullable_to_non_nullable + as BatchRequestCounts?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, ) as $Val); } - /// Create a copy of VectorStoreFileBatchObject - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts { - return $VectorStoreFileBatchObjectFileCountsCopyWith<$Res>( - _value.fileCounts, (value) { - return _then(_value.copyWith(fileCounts: value) as $Val); + $BatchErrorsCopyWith<$Res>? get errors { + if (_value.errors == null) { + return null; + } + + return $BatchErrorsCopyWith<$Res>(_value.errors!, (value) { + return _then(_value.copyWith(errors: value) as $Val); }); } -} -/// @nodoc -abstract class _$$VectorStoreFileBatchObjectImplCopyWith<$Res> - implements $VectorStoreFileBatchObjectCopyWith<$Res> { - factory _$$VectorStoreFileBatchObjectImplCopyWith( - _$VectorStoreFileBatchObjectImpl value, - $Res Function(_$VectorStoreFileBatchObjectImpl) then) = - __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res>; + @override + @pragma('vm:prefer-inline') + $BatchRequestCountsCopyWith<$Res>? get requestCounts { + if (_value.requestCounts == null) { + return null; + } + + return $BatchRequestCountsCopyWith<$Res>(_value.requestCounts!, (value) { + return _then(_value.copyWith(requestCounts: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$BatchImplCopyWith<$Res> implements $BatchCopyWith<$Res> { + factory _$$BatchImplCopyWith( + _$BatchImpl value, $Res Function(_$BatchImpl) then) = + __$$BatchImplCopyWithImpl<$Res>; @override @useResult $Res call( {String id, - String object, + BatchObject object, + BatchEndpoint endpoint, + @JsonKey(includeIfNull: false) BatchErrors? errors, + @JsonKey(name: 'input_file_id') String inputFileId, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + BatchStatus status, + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileBatchObjectStatus status, - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts fileCounts}); + @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? requestCounts, + @JsonKey(includeIfNull: false) dynamic metadata}); @override - $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; + $BatchErrorsCopyWith<$Res>? get errors; + @override + $BatchRequestCountsCopyWith<$Res>? get requestCounts; } /// @nodoc -class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> - extends _$VectorStoreFileBatchObjectCopyWithImpl<$Res, - _$VectorStoreFileBatchObjectImpl> - implements _$$VectorStoreFileBatchObjectImplCopyWith<$Res> { - __$$VectorStoreFileBatchObjectImplCopyWithImpl( - _$VectorStoreFileBatchObjectImpl _value, - $Res Function(_$VectorStoreFileBatchObjectImpl) _then) +class __$$BatchImplCopyWithImpl<$Res> + extends _$BatchCopyWithImpl<$Res, _$BatchImpl> + implements _$$BatchImplCopyWith<$Res> { + __$$BatchImplCopyWithImpl( + _$BatchImpl _value, $Res Function(_$BatchImpl) _then) : super(_value, _then); - /// Create a copy of VectorStoreFileBatchObject - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, Object? object = null, - Object? createdAt = null, - Object? vectorStoreId = null, + Object? endpoint = null, + Object? errors = freezed, + Object? inputFileId = null, + Object? completionWindow = null, Object? status = null, - Object? fileCounts = null, + Object? outputFileId = freezed, + Object? errorFileId = freezed, + Object? createdAt = null, + Object? inProgressAt = freezed, + Object? expiresAt = freezed, + Object? finalizingAt = freezed, + Object? completedAt = freezed, + Object? failedAt = freezed, + Object? expiredAt = freezed, + Object? cancellingAt = freezed, + Object? cancelledAt = freezed, + Object? requestCounts = freezed, + Object? metadata = freezed, }) { - return _then(_$VectorStoreFileBatchObjectImpl( + return _then(_$BatchImpl( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable @@ -52006,5601 +48652,1704 @@ class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable + as BatchObject, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + errors: freezed == errors + ? _value.errors + : errors // ignore: cast_nullable_to_non_nullable + as BatchErrors?, + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable as String, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as BatchStatus, + outputFileId: freezed == outputFileId + ? _value.outputFileId + : outputFileId // ignore: cast_nullable_to_non_nullable + as String?, + errorFileId: freezed == errorFileId + ? _value.errorFileId + : errorFileId // ignore: cast_nullable_to_non_nullable + as String?, createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable as int, - vectorStoreId: null == vectorStoreId - ? _value.vectorStoreId - : vectorStoreId // ignore: cast_nullable_to_non_nullable - as String, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectStatus, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectFileCounts, + inProgressAt: freezed == inProgressAt + ? _value.inProgressAt + : inProgressAt // ignore: cast_nullable_to_non_nullable + as int?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + finalizingAt: freezed == finalizingAt + ? _value.finalizingAt + : finalizingAt // ignore: cast_nullable_to_non_nullable + as int?, + completedAt: freezed == completedAt + ? _value.completedAt + : completedAt // ignore: cast_nullable_to_non_nullable + as int?, + failedAt: freezed == failedAt + ? _value.failedAt + : failedAt // ignore: cast_nullable_to_non_nullable + as int?, + expiredAt: freezed == expiredAt + ? _value.expiredAt + : expiredAt // ignore: cast_nullable_to_non_nullable + as int?, + cancellingAt: freezed == cancellingAt + ? _value.cancellingAt + : cancellingAt // ignore: cast_nullable_to_non_nullable + as int?, + cancelledAt: freezed == cancelledAt + ? _value.cancelledAt + : cancelledAt // ignore: cast_nullable_to_non_nullable + as int?, + requestCounts: freezed == requestCounts + ? _value.requestCounts + : requestCounts // ignore: cast_nullable_to_non_nullable + as BatchRequestCounts?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { - const _$VectorStoreFileBatchObjectImpl( +class _$BatchImpl extends _Batch { + const _$BatchImpl( {required this.id, required this.object, - @JsonKey(name: 'created_at') required this.createdAt, - @JsonKey(name: 'vector_store_id') required this.vectorStoreId, + required this.endpoint, + @JsonKey(includeIfNull: false) this.errors, + @JsonKey(name: 'input_file_id') required this.inputFileId, + @JsonKey(name: 'completion_window') required this.completionWindow, required this.status, - @JsonKey(name: 'file_counts') required this.fileCounts}) + @JsonKey(name: 'output_file_id', includeIfNull: false) this.outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) this.errorFileId, + @JsonKey(name: 'created_at') required this.createdAt, + @JsonKey(name: 'in_progress_at', includeIfNull: false) this.inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) this.finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) this.completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) this.failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) this.expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) this.cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) this.cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) this.requestCounts, + @JsonKey(includeIfNull: false) this.metadata}) : super._(); - factory _$VectorStoreFileBatchObjectImpl.fromJson( - Map json) => - _$$VectorStoreFileBatchObjectImplFromJson(json); + factory _$BatchImpl.fromJson(Map json) => + _$$BatchImplFromJson(json); - /// The identifier, which can be referenced in API endpoints. + /// No Description @override final String id; - /// The object type, which is always `vector_store.file_batch`. + /// The object type, which is always `batch`. @override - final String object; + final BatchObject object; - /// The Unix timestamp (in seconds) for when the vector store files batch was created. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + @override + final BatchEndpoint endpoint; + + /// No Description + @override + @JsonKey(includeIfNull: false) + final BatchErrors? errors; + + /// The ID of the input file for the batch. + @override + @JsonKey(name: 'input_file_id') + final String inputFileId; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override + @JsonKey(name: 'completion_window') + final BatchCompletionWindow completionWindow; + + /// The current status of the batch. + @override + final BatchStatus status; + + /// The ID of the file containing the outputs of successfully executed requests. + @override + @JsonKey(name: 'output_file_id', includeIfNull: false) + final String? outputFileId; + + /// The ID of the file containing the outputs of requests with errors. + @override + @JsonKey(name: 'error_file_id', includeIfNull: false) + final String? errorFileId; + + /// The Unix timestamp (in seconds) for when the batch was created. @override @JsonKey(name: 'created_at') final int createdAt; - /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + /// The Unix timestamp (in seconds) for when the batch started processing. @override - @JsonKey(name: 'vector_store_id') - final String vectorStoreId; + @JsonKey(name: 'in_progress_at', includeIfNull: false) + final int? inProgressAt; - /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + /// The Unix timestamp (in seconds) for when the batch will expire. @override - final VectorStoreFileBatchObjectStatus status; + @JsonKey(name: 'expires_at', includeIfNull: false) + final int? expiresAt; - /// The number of files per status. + /// The Unix timestamp (in seconds) for when the batch started finalizing. @override - @JsonKey(name: 'file_counts') - final VectorStoreFileBatchObjectFileCounts fileCounts; + @JsonKey(name: 'finalizing_at', includeIfNull: false) + final int? finalizingAt; + + /// The Unix timestamp (in seconds) for when the batch was completed. + @override + @JsonKey(name: 'completed_at', includeIfNull: false) + final int? completedAt; + + /// The Unix timestamp (in seconds) for when the batch failed. + @override + @JsonKey(name: 'failed_at', includeIfNull: false) + final int? failedAt; + + /// The Unix timestamp (in seconds) for when the batch expired. + @override + @JsonKey(name: 'expired_at', includeIfNull: false) + final int? expiredAt; + + /// The Unix timestamp (in seconds) for when the batch started cancelling. + @override + @JsonKey(name: 'cancelling_at', includeIfNull: false) + final int? cancellingAt; + + /// The Unix timestamp (in seconds) for when the batch was cancelled. + @override + @JsonKey(name: 'cancelled_at', includeIfNull: false) + final int? cancelledAt; + + /// The request counts for different statuses within the batch. + @override + @JsonKey(name: 'request_counts', includeIfNull: false) + final BatchRequestCounts? requestCounts; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + final dynamic metadata; @override String toString() { - return 'VectorStoreFileBatchObject(id: $id, object: $object, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, fileCounts: $fileCounts)'; + return 'Batch(id: $id, object: $object, endpoint: $endpoint, errors: $errors, inputFileId: $inputFileId, completionWindow: $completionWindow, status: $status, outputFileId: $outputFileId, errorFileId: $errorFileId, createdAt: $createdAt, inProgressAt: $inProgressAt, expiresAt: $expiresAt, finalizingAt: $finalizingAt, completedAt: $completedAt, failedAt: $failedAt, expiredAt: $expiredAt, cancellingAt: $cancellingAt, cancelledAt: $cancelledAt, requestCounts: $requestCounts, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileBatchObjectImpl && + other is _$BatchImpl && (identical(other.id, id) || other.id == id) && (identical(other.object, object) || other.object == object) && + (identical(other.endpoint, endpoint) || + other.endpoint == endpoint) && + (identical(other.errors, errors) || other.errors == errors) && + (identical(other.inputFileId, inputFileId) || + other.inputFileId == inputFileId) && + (identical(other.completionWindow, completionWindow) || + other.completionWindow == completionWindow) && + (identical(other.status, status) || other.status == status) && + (identical(other.outputFileId, outputFileId) || + other.outputFileId == outputFileId) && + (identical(other.errorFileId, errorFileId) || + other.errorFileId == errorFileId) && (identical(other.createdAt, createdAt) || other.createdAt == createdAt) && - (identical(other.vectorStoreId, vectorStoreId) || - other.vectorStoreId == vectorStoreId) && - (identical(other.status, status) || other.status == status) && - (identical(other.fileCounts, fileCounts) || - other.fileCounts == fileCounts)); + (identical(other.inProgressAt, inProgressAt) || + other.inProgressAt == inProgressAt) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.finalizingAt, finalizingAt) || + other.finalizingAt == finalizingAt) && + (identical(other.completedAt, completedAt) || + other.completedAt == completedAt) && + (identical(other.failedAt, failedAt) || + other.failedAt == failedAt) && + (identical(other.expiredAt, expiredAt) || + other.expiredAt == expiredAt) && + (identical(other.cancellingAt, cancellingAt) || + other.cancellingAt == cancellingAt) && + (identical(other.cancelledAt, cancelledAt) || + other.cancelledAt == cancelledAt) && + (identical(other.requestCounts, requestCounts) || + other.requestCounts == requestCounts) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, id, object, createdAt, vectorStoreId, status, fileCounts); + int get hashCode => Object.hashAll([ + runtimeType, + id, + object, + endpoint, + errors, + inputFileId, + completionWindow, + status, + outputFileId, + errorFileId, + createdAt, + inProgressAt, + expiresAt, + finalizingAt, + completedAt, + failedAt, + expiredAt, + cancellingAt, + cancelledAt, + requestCounts, + const DeepCollectionEquality().hash(metadata) + ]); - /// Create a copy of VectorStoreFileBatchObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> - get copyWith => __$$VectorStoreFileBatchObjectImplCopyWithImpl< - _$VectorStoreFileBatchObjectImpl>(this, _$identity); + _$$BatchImplCopyWith<_$BatchImpl> get copyWith => + __$$BatchImplCopyWithImpl<_$BatchImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileBatchObjectImplToJson( + return _$$BatchImplToJson( this, ); } } -abstract class _VectorStoreFileBatchObject extends VectorStoreFileBatchObject { - const factory _VectorStoreFileBatchObject( - {required final String id, - required final String object, - @JsonKey(name: 'created_at') required final int createdAt, - @JsonKey(name: 'vector_store_id') required final String vectorStoreId, - required final VectorStoreFileBatchObjectStatus status, - @JsonKey(name: 'file_counts') - required final VectorStoreFileBatchObjectFileCounts fileCounts}) = - _$VectorStoreFileBatchObjectImpl; - const _VectorStoreFileBatchObject._() : super._(); +abstract class _Batch extends Batch { + const factory _Batch( + {required final String id, + required final BatchObject object, + required final BatchEndpoint endpoint, + @JsonKey(includeIfNull: false) final BatchErrors? errors, + @JsonKey(name: 'input_file_id') required final String inputFileId, + @JsonKey(name: 'completion_window') + required final BatchCompletionWindow completionWindow, + required final BatchStatus status, + @JsonKey(name: 'output_file_id', includeIfNull: false) + final String? outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) + final String? errorFileId, + @JsonKey(name: 'created_at') required final int createdAt, + @JsonKey(name: 'in_progress_at', includeIfNull: false) + final int? inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) + final int? finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) + final int? completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) final int? failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) final int? expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) + final int? cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) + final int? cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) + final BatchRequestCounts? requestCounts, + @JsonKey(includeIfNull: false) final dynamic metadata}) = _$BatchImpl; + const _Batch._() : super._(); - factory _VectorStoreFileBatchObject.fromJson(Map json) = - _$VectorStoreFileBatchObjectImpl.fromJson; + factory _Batch.fromJson(Map json) = _$BatchImpl.fromJson; - /// The identifier, which can be referenced in API endpoints. @override + + /// No Description String get id; + @override - /// The object type, which is always `vector_store.file_batch`. + /// The object type, which is always `batch`. + BatchObject get object; @override - String get object; - /// The Unix timestamp (in seconds) for when the vector store files batch was created. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + BatchEndpoint get endpoint; @override - @JsonKey(name: 'created_at') - int get createdAt; - /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + /// No Description + @JsonKey(includeIfNull: false) + BatchErrors? get errors; @override - @JsonKey(name: 'vector_store_id') - String get vectorStoreId; - /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + /// The ID of the input file for the batch. + @JsonKey(name: 'input_file_id') + String get inputFileId; @override - VectorStoreFileBatchObjectStatus get status; - /// The number of files per status. + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow; @override - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts get fileCounts; - /// Create a copy of VectorStoreFileBatchObject - /// with the given fields replaced by the non-null parameter values. + /// The current status of the batch. + BatchStatus get status; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} -VectorStoreFileBatchObjectFileCounts - _$VectorStoreFileBatchObjectFileCountsFromJson(Map json) { - return _VectorStoreFileBatchObjectFileCounts.fromJson(json); -} + /// The ID of the file containing the outputs of successfully executed requests. + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? get outputFileId; + @override -/// @nodoc -mixin _$VectorStoreFileBatchObjectFileCounts { - /// The number of files that are currently being processed. - @JsonKey(name: 'in_progress') - int get inProgress => throw _privateConstructorUsedError; + /// The ID of the file containing the outputs of requests with errors. + @JsonKey(name: 'error_file_id', includeIfNull: false) + String? get errorFileId; + @override - /// The number of files that have been processed. - int get completed => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch was created. + @JsonKey(name: 'created_at') + int get createdAt; + @override - /// The number of files that have failed to process. - int get failed => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch started processing. + @JsonKey(name: 'in_progress_at', includeIfNull: false) + int? get inProgressAt; + @override - /// The number of files that where cancelled. - int get cancelled => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch will expire. + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt; + @override - /// The total number of files. - int get total => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch started finalizing. + @JsonKey(name: 'finalizing_at', includeIfNull: false) + int? get finalizingAt; + @override - /// Serializes this VectorStoreFileBatchObjectFileCounts to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the batch was completed. + @JsonKey(name: 'completed_at', includeIfNull: false) + int? get completedAt; + @override - /// Create a copy of VectorStoreFileBatchObjectFileCounts - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $VectorStoreFileBatchObjectFileCountsCopyWith< - VectorStoreFileBatchObjectFileCounts> - get copyWith => throw _privateConstructorUsedError; -} + /// The Unix timestamp (in seconds) for when the batch failed. + @JsonKey(name: 'failed_at', includeIfNull: false) + int? get failedAt; + @override -/// @nodoc -abstract class $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { - factory $VectorStoreFileBatchObjectFileCountsCopyWith( - VectorStoreFileBatchObjectFileCounts value, - $Res Function(VectorStoreFileBatchObjectFileCounts) then) = - _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, - VectorStoreFileBatchObjectFileCounts>; - @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); -} + /// The Unix timestamp (in seconds) for when the batch expired. + @JsonKey(name: 'expired_at', includeIfNull: false) + int? get expiredAt; + @override -/// @nodoc -class _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, - $Val extends VectorStoreFileBatchObjectFileCounts> - implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { - _$VectorStoreFileBatchObjectFileCountsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of VectorStoreFileBatchObjectFileCounts - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, - }) { - return _then(_value.copyWith( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> - implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { - factory _$$VectorStoreFileBatchObjectFileCountsImplCopyWith( - _$VectorStoreFileBatchObjectFileCountsImpl value, - $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) then) = - __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); -} - -/// @nodoc -class __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res> - extends _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, - _$VectorStoreFileBatchObjectFileCountsImpl> - implements _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> { - __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl( - _$VectorStoreFileBatchObjectFileCountsImpl _value, - $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) _then) - : super(_value, _then); - - /// Create a copy of VectorStoreFileBatchObjectFileCounts - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, - }) { - return _then(_$VectorStoreFileBatchObjectFileCountsImpl( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$VectorStoreFileBatchObjectFileCountsImpl - extends _VectorStoreFileBatchObjectFileCounts { - const _$VectorStoreFileBatchObjectFileCountsImpl( - {@JsonKey(name: 'in_progress') required this.inProgress, - required this.completed, - required this.failed, - required this.cancelled, - required this.total}) - : super._(); - - factory _$VectorStoreFileBatchObjectFileCountsImpl.fromJson( - Map json) => - _$$VectorStoreFileBatchObjectFileCountsImplFromJson(json); - - /// The number of files that are currently being processed. - @override - @JsonKey(name: 'in_progress') - final int inProgress; - - /// The number of files that have been processed. - @override - final int completed; - - /// The number of files that have failed to process. - @override - final int failed; - - /// The number of files that where cancelled. - @override - final int cancelled; - - /// The total number of files. - @override - final int total; - - @override - String toString() { - return 'VectorStoreFileBatchObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$VectorStoreFileBatchObjectFileCountsImpl && - (identical(other.inProgress, inProgress) || - other.inProgress == inProgress) && - (identical(other.completed, completed) || - other.completed == completed) && - (identical(other.failed, failed) || other.failed == failed) && - (identical(other.cancelled, cancelled) || - other.cancelled == cancelled) && - (identical(other.total, total) || other.total == total)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => - Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - - /// Create a copy of VectorStoreFileBatchObjectFileCounts - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< - _$VectorStoreFileBatchObjectFileCountsImpl> - get copyWith => __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl< - _$VectorStoreFileBatchObjectFileCountsImpl>(this, _$identity); - - @override - Map toJson() { - return _$$VectorStoreFileBatchObjectFileCountsImplToJson( - this, - ); - } -} - -abstract class _VectorStoreFileBatchObjectFileCounts - extends VectorStoreFileBatchObjectFileCounts { - const factory _VectorStoreFileBatchObjectFileCounts( - {@JsonKey(name: 'in_progress') required final int inProgress, - required final int completed, - required final int failed, - required final int cancelled, - required final int total}) = _$VectorStoreFileBatchObjectFileCountsImpl; - const _VectorStoreFileBatchObjectFileCounts._() : super._(); - - factory _VectorStoreFileBatchObjectFileCounts.fromJson( - Map json) = - _$VectorStoreFileBatchObjectFileCountsImpl.fromJson; - - /// The number of files that are currently being processed. - @override - @JsonKey(name: 'in_progress') - int get inProgress; - - /// The number of files that have been processed. - @override - int get completed; - - /// The number of files that have failed to process. + /// The Unix timestamp (in seconds) for when the batch started cancelling. + @JsonKey(name: 'cancelling_at', includeIfNull: false) + int? get cancellingAt; @override - int get failed; - /// The number of files that where cancelled. + /// The Unix timestamp (in seconds) for when the batch was cancelled. + @JsonKey(name: 'cancelled_at', includeIfNull: false) + int? get cancelledAt; @override - int get cancelled; - /// The total number of files. + /// The request counts for different statuses within the batch. + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? get requestCounts; @override - int get total; - /// Create a copy of VectorStoreFileBatchObjectFileCounts - /// with the given fields replaced by the non-null parameter values. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< - _$VectorStoreFileBatchObjectFileCountsImpl> - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + _$$BatchImplCopyWith<_$BatchImpl> get copyWith => + throw _privateConstructorUsedError; } -CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson( - Map json) { - return _CreateVectorStoreFileBatchRequest.fromJson(json); +BatchErrors _$BatchErrorsFromJson(Map json) { + return _BatchErrors.fromJson(json); } /// @nodoc -mixin _$CreateVectorStoreFileBatchRequest { - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_ids') - List get fileIds => throw _privateConstructorUsedError; +mixin _$BatchErrors { + /// The object type, which is always `list`. + @JsonKey(includeIfNull: false) + String? get object => throw _privateConstructorUsedError; - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy => - throw _privateConstructorUsedError; + /// No Description + @JsonKey(includeIfNull: false) + List? get data => throw _privateConstructorUsedError; - /// Serializes this CreateVectorStoreFileBatchRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateVectorStoreFileBatchRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $CreateVectorStoreFileBatchRequestCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BatchErrorsCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateVectorStoreFileBatchRequestCopyWith<$Res> { - factory $CreateVectorStoreFileBatchRequestCopyWith( - CreateVectorStoreFileBatchRequest value, - $Res Function(CreateVectorStoreFileBatchRequest) then) = - _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, - CreateVectorStoreFileBatchRequest>; +abstract class $BatchErrorsCopyWith<$Res> { + factory $BatchErrorsCopyWith( + BatchErrors value, $Res Function(BatchErrors) then) = + _$BatchErrorsCopyWithImpl<$Res, BatchErrors>; @useResult $Res call( - {@JsonKey(name: 'file_ids') List fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy}); - - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; + {@JsonKey(includeIfNull: false) String? object, + @JsonKey(includeIfNull: false) List? data}); } /// @nodoc -class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, - $Val extends CreateVectorStoreFileBatchRequest> - implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { - _$CreateVectorStoreFileBatchRequestCopyWithImpl(this._value, this._then); +class _$BatchErrorsCopyWithImpl<$Res, $Val extends BatchErrors> + implements $BatchErrorsCopyWith<$Res> { + _$BatchErrorsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of CreateVectorStoreFileBatchRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileIds = null, - Object? chunkingStrategy = freezed, + Object? object = freezed, + Object? data = freezed, }) { return _then(_value.copyWith( - fileIds: null == fileIds - ? _value.fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, + object: freezed == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String?, + data: freezed == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } - - /// Create a copy of CreateVectorStoreFileBatchRequest - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { - if (_value.chunkingStrategy == null) { - return null; - } - - return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, - (value) { - return _then(_value.copyWith(chunkingStrategy: value) as $Val); - }); - } } /// @nodoc -abstract class _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> - implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { - factory _$$CreateVectorStoreFileBatchRequestImplCopyWith( - _$CreateVectorStoreFileBatchRequestImpl value, - $Res Function(_$CreateVectorStoreFileBatchRequestImpl) then) = - __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res>; +abstract class _$$BatchErrorsImplCopyWith<$Res> + implements $BatchErrorsCopyWith<$Res> { + factory _$$BatchErrorsImplCopyWith( + _$BatchErrorsImpl value, $Res Function(_$BatchErrorsImpl) then) = + __$$BatchErrorsImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'file_ids') List fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy}); - - @override - $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; + {@JsonKey(includeIfNull: false) String? object, + @JsonKey(includeIfNull: false) List? data}); } /// @nodoc -class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> - extends _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, - _$CreateVectorStoreFileBatchRequestImpl> - implements _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> { - __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl( - _$CreateVectorStoreFileBatchRequestImpl _value, - $Res Function(_$CreateVectorStoreFileBatchRequestImpl) _then) +class __$$BatchErrorsImplCopyWithImpl<$Res> + extends _$BatchErrorsCopyWithImpl<$Res, _$BatchErrorsImpl> + implements _$$BatchErrorsImplCopyWith<$Res> { + __$$BatchErrorsImplCopyWithImpl( + _$BatchErrorsImpl _value, $Res Function(_$BatchErrorsImpl) _then) : super(_value, _then); - /// Create a copy of CreateVectorStoreFileBatchRequest - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileIds = null, - Object? chunkingStrategy = freezed, + Object? object = freezed, + Object? data = freezed, }) { - return _then(_$CreateVectorStoreFileBatchRequestImpl( - fileIds: null == fileIds - ? _value._fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List, - chunkingStrategy: freezed == chunkingStrategy - ? _value.chunkingStrategy - : chunkingStrategy // ignore: cast_nullable_to_non_nullable - as ChunkingStrategyRequestParam?, + return _then(_$BatchErrorsImpl( + object: freezed == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String?, + data: freezed == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$CreateVectorStoreFileBatchRequestImpl - extends _CreateVectorStoreFileBatchRequest { - const _$CreateVectorStoreFileBatchRequestImpl( - {@JsonKey(name: 'file_ids') required final List fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - this.chunkingStrategy}) - : _fileIds = fileIds, +class _$BatchErrorsImpl extends _BatchErrors { + const _$BatchErrorsImpl( + {@JsonKey(includeIfNull: false) this.object, + @JsonKey(includeIfNull: false) final List? data}) + : _data = data, super._(); - factory _$CreateVectorStoreFileBatchRequestImpl.fromJson( - Map json) => - _$$CreateVectorStoreFileBatchRequestImplFromJson(json); + factory _$BatchErrorsImpl.fromJson(Map json) => + _$$BatchErrorsImplFromJson(json); - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - final List _fileIds; + /// The object type, which is always `list`. + @override + @JsonKey(includeIfNull: false) + final String? object; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + /// No Description + final List? _data; + + /// No Description @override - @JsonKey(name: 'file_ids') - List get fileIds { - if (_fileIds is EqualUnmodifiableListView) return _fileIds; + @JsonKey(includeIfNull: false) + List? get data { + final value = _data; + if (value == null) return null; + if (_data is EqualUnmodifiableListView) return _data; // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_fileIds); + return EqualUnmodifiableListView(value); } - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy; - @override String toString() { - return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy)'; + return 'BatchErrors(object: $object, data: $data)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateVectorStoreFileBatchRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds) && - (identical(other.chunkingStrategy, chunkingStrategy) || - other.chunkingStrategy == chunkingStrategy)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, - const DeepCollectionEquality().hash(_fileIds), chunkingStrategy); - - /// Create a copy of CreateVectorStoreFileBatchRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$CreateVectorStoreFileBatchRequestImplCopyWith< - _$CreateVectorStoreFileBatchRequestImpl> - get copyWith => __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl< - _$CreateVectorStoreFileBatchRequestImpl>(this, _$identity); - - @override - Map toJson() { - return _$$CreateVectorStoreFileBatchRequestImplToJson( - this, - ); - } -} - -abstract class _CreateVectorStoreFileBatchRequest - extends CreateVectorStoreFileBatchRequest { - const factory _CreateVectorStoreFileBatchRequest( - {@JsonKey(name: 'file_ids') required final List fileIds, - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - final ChunkingStrategyRequestParam? chunkingStrategy}) = - _$CreateVectorStoreFileBatchRequestImpl; - const _CreateVectorStoreFileBatchRequest._() : super._(); - - factory _CreateVectorStoreFileBatchRequest.fromJson( - Map json) = - _$CreateVectorStoreFileBatchRequestImpl.fromJson; - - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @override - @JsonKey(name: 'file_ids') - List get fileIds; - - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @override - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? get chunkingStrategy; - - /// Create a copy of CreateVectorStoreFileBatchRequest - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateVectorStoreFileBatchRequestImplCopyWith< - _$CreateVectorStoreFileBatchRequestImpl> - get copyWith => throw _privateConstructorUsedError; -} - -Error _$ErrorFromJson(Map json) { - return _Error.fromJson(json); -} - -/// @nodoc -mixin _$Error { - /// The error code. - String? get code => throw _privateConstructorUsedError; - - /// A human-readable description of the error. - String get message => throw _privateConstructorUsedError; - - /// The parameter in the request that caused the error. - String? get param => throw _privateConstructorUsedError; - - /// The type of error. - String get type => throw _privateConstructorUsedError; - - /// Serializes this Error to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of Error - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ErrorCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ErrorCopyWith<$Res> { - factory $ErrorCopyWith(Error value, $Res Function(Error) then) = - _$ErrorCopyWithImpl<$Res, Error>; - @useResult - $Res call({String? code, String message, String? param, String type}); -} - -/// @nodoc -class _$ErrorCopyWithImpl<$Res, $Val extends Error> - implements $ErrorCopyWith<$Res> { - _$ErrorCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of Error - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? code = freezed, - Object? message = null, - Object? param = freezed, - Object? type = null, - }) { - return _then(_value.copyWith( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ErrorImplCopyWith<$Res> implements $ErrorCopyWith<$Res> { - factory _$$ErrorImplCopyWith( - _$ErrorImpl value, $Res Function(_$ErrorImpl) then) = - __$$ErrorImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String? code, String message, String? param, String type}); -} - -/// @nodoc -class __$$ErrorImplCopyWithImpl<$Res> - extends _$ErrorCopyWithImpl<$Res, _$ErrorImpl> - implements _$$ErrorImplCopyWith<$Res> { - __$$ErrorImplCopyWithImpl( - _$ErrorImpl _value, $Res Function(_$ErrorImpl) _then) - : super(_value, _then); - - /// Create a copy of Error - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? code = freezed, - Object? message = null, - Object? param = freezed, - Object? type = null, - }) { - return _then(_$ErrorImpl( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ErrorImpl extends _Error { - const _$ErrorImpl( - {required this.code, - required this.message, - required this.param, - required this.type}) - : super._(); - - factory _$ErrorImpl.fromJson(Map json) => - _$$ErrorImplFromJson(json); - - /// The error code. - @override - final String? code; - - /// A human-readable description of the error. - @override - final String message; - - /// The parameter in the request that caused the error. - @override - final String? param; - - /// The type of error. - @override - final String type; - - @override - String toString() { - return 'Error(code: $code, message: $message, param: $param, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ErrorImpl && - (identical(other.code, code) || other.code == code) && - (identical(other.message, message) || other.message == message) && - (identical(other.param, param) || other.param == param) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, code, message, param, type); - - /// Create a copy of Error - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => - __$$ErrorImplCopyWithImpl<_$ErrorImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ErrorImplToJson( - this, - ); - } -} - -abstract class _Error extends Error { - const factory _Error( - {required final String? code, - required final String message, - required final String? param, - required final String type}) = _$ErrorImpl; - const _Error._() : super._(); - - factory _Error.fromJson(Map json) = _$ErrorImpl.fromJson; - - /// The error code. - @override - String? get code; - - /// A human-readable description of the error. - @override - String get message; - - /// The parameter in the request that caused the error. - @override - String? get param; - - /// The type of error. - @override - String get type; - - /// Create a copy of Error - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => - throw _privateConstructorUsedError; -} - -CreateBatchRequest _$CreateBatchRequestFromJson(Map json) { - return _CreateBatchRequest.fromJson(json); -} - -/// @nodoc -mixin _$CreateBatchRequest { - /// The ID of an uploaded file that contains requests for the new batch. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - /// - /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. - @JsonKey(name: 'input_file_id') - String get inputFileId => throw _privateConstructorUsedError; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint => throw _privateConstructorUsedError; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow => - throw _privateConstructorUsedError; - - /// Optional custom metadata for the batch. - @JsonKey(includeIfNull: false) - Map? get metadata => throw _privateConstructorUsedError; - - /// Serializes this CreateBatchRequest to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of CreateBatchRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $CreateBatchRequestCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $CreateBatchRequestCopyWith<$Res> { - factory $CreateBatchRequestCopyWith( - CreateBatchRequest value, $Res Function(CreateBatchRequest) then) = - _$CreateBatchRequestCopyWithImpl<$Res, CreateBatchRequest>; - @useResult - $Res call( - {@JsonKey(name: 'input_file_id') String inputFileId, - BatchEndpoint endpoint, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - @JsonKey(includeIfNull: false) Map? metadata}); -} - -/// @nodoc -class _$CreateBatchRequestCopyWithImpl<$Res, $Val extends CreateBatchRequest> - implements $CreateBatchRequestCopyWith<$Res> { - _$CreateBatchRequestCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of CreateBatchRequest - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? inputFileId = null, - Object? endpoint = null, - Object? completionWindow = null, - Object? metadata = freezed, - }) { - return _then(_value.copyWith( - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable - as String, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as Map?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$CreateBatchRequestImplCopyWith<$Res> - implements $CreateBatchRequestCopyWith<$Res> { - factory _$$CreateBatchRequestImplCopyWith(_$CreateBatchRequestImpl value, - $Res Function(_$CreateBatchRequestImpl) then) = - __$$CreateBatchRequestImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'input_file_id') String inputFileId, - BatchEndpoint endpoint, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - @JsonKey(includeIfNull: false) Map? metadata}); -} - -/// @nodoc -class __$$CreateBatchRequestImplCopyWithImpl<$Res> - extends _$CreateBatchRequestCopyWithImpl<$Res, _$CreateBatchRequestImpl> - implements _$$CreateBatchRequestImplCopyWith<$Res> { - __$$CreateBatchRequestImplCopyWithImpl(_$CreateBatchRequestImpl _value, - $Res Function(_$CreateBatchRequestImpl) _then) - : super(_value, _then); - - /// Create a copy of CreateBatchRequest - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? inputFileId = null, - Object? endpoint = null, - Object? completionWindow = null, - Object? metadata = freezed, - }) { - return _then(_$CreateBatchRequestImpl( - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable - as String, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - metadata: freezed == metadata - ? _value._metadata - : metadata // ignore: cast_nullable_to_non_nullable - as Map?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CreateBatchRequestImpl extends _CreateBatchRequest { - const _$CreateBatchRequestImpl( - {@JsonKey(name: 'input_file_id') required this.inputFileId, - required this.endpoint, - @JsonKey(name: 'completion_window') required this.completionWindow, - @JsonKey(includeIfNull: false) final Map? metadata}) - : _metadata = metadata, - super._(); - - factory _$CreateBatchRequestImpl.fromJson(Map json) => - _$$CreateBatchRequestImplFromJson(json); - - /// The ID of an uploaded file that contains requests for the new batch. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - /// - /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. - @override - @JsonKey(name: 'input_file_id') - final String inputFileId; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - @override - final BatchEndpoint endpoint; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @override - @JsonKey(name: 'completion_window') - final BatchCompletionWindow completionWindow; - - /// Optional custom metadata for the batch. - final Map? _metadata; - - /// Optional custom metadata for the batch. - @override - @JsonKey(includeIfNull: false) - Map? get metadata { - final value = _metadata; - if (value == null) return null; - if (_metadata is EqualUnmodifiableMapView) return _metadata; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(value); - } - - @override - String toString() { - return 'CreateBatchRequest(inputFileId: $inputFileId, endpoint: $endpoint, completionWindow: $completionWindow, metadata: $metadata)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CreateBatchRequestImpl && - (identical(other.inputFileId, inputFileId) || - other.inputFileId == inputFileId) && - (identical(other.endpoint, endpoint) || - other.endpoint == endpoint) && - (identical(other.completionWindow, completionWindow) || - other.completionWindow == completionWindow) && - const DeepCollectionEquality().equals(other._metadata, _metadata)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, inputFileId, endpoint, - completionWindow, const DeepCollectionEquality().hash(_metadata)); - - /// Create a copy of CreateBatchRequest - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => - __$$CreateBatchRequestImplCopyWithImpl<_$CreateBatchRequestImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$CreateBatchRequestImplToJson( - this, - ); - } -} - -abstract class _CreateBatchRequest extends CreateBatchRequest { - const factory _CreateBatchRequest( - {@JsonKey(name: 'input_file_id') required final String inputFileId, - required final BatchEndpoint endpoint, - @JsonKey(name: 'completion_window') - required final BatchCompletionWindow completionWindow, - @JsonKey(includeIfNull: false) final Map? metadata}) = - _$CreateBatchRequestImpl; - const _CreateBatchRequest._() : super._(); - - factory _CreateBatchRequest.fromJson(Map json) = - _$CreateBatchRequestImpl.fromJson; - - /// The ID of an uploaded file that contains requests for the new batch. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - /// - /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. - @override - @JsonKey(name: 'input_file_id') - String get inputFileId; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - @override - BatchEndpoint get endpoint; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @override - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow; - - /// Optional custom metadata for the batch. - @override - @JsonKey(includeIfNull: false) - Map? get metadata; - - /// Create a copy of CreateBatchRequest - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Batch _$BatchFromJson(Map json) { - return _Batch.fromJson(json); -} - -/// @nodoc -mixin _$Batch { - /// No Description - String get id => throw _privateConstructorUsedError; - - /// The object type, which is always `batch`. - BatchObject get object => throw _privateConstructorUsedError; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint => throw _privateConstructorUsedError; - - /// No Description - @JsonKey(includeIfNull: false) - BatchErrors? get errors => throw _privateConstructorUsedError; - - /// The ID of the input file for the batch. - @JsonKey(name: 'input_file_id') - String get inputFileId => throw _privateConstructorUsedError; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow => - throw _privateConstructorUsedError; - - /// The current status of the batch. - BatchStatus get status => throw _privateConstructorUsedError; - - /// The ID of the file containing the outputs of successfully executed requests. - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? get outputFileId => throw _privateConstructorUsedError; - - /// The ID of the file containing the outputs of requests with errors. - @JsonKey(name: 'error_file_id', includeIfNull: false) - String? get errorFileId => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch was created. - @JsonKey(name: 'created_at') - int get createdAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch started processing. - @JsonKey(name: 'in_progress_at', includeIfNull: false) - int? get inProgressAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch will expire. - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch started finalizing. - @JsonKey(name: 'finalizing_at', includeIfNull: false) - int? get finalizingAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch was completed. - @JsonKey(name: 'completed_at', includeIfNull: false) - int? get completedAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch failed. - @JsonKey(name: 'failed_at', includeIfNull: false) - int? get failedAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch expired. - @JsonKey(name: 'expired_at', includeIfNull: false) - int? get expiredAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch started cancelling. - @JsonKey(name: 'cancelling_at', includeIfNull: false) - int? get cancellingAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch was cancelled. - @JsonKey(name: 'cancelled_at', includeIfNull: false) - int? get cancelledAt => throw _privateConstructorUsedError; - - /// The request counts for different statuses within the batch. - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? get requestCounts => throw _privateConstructorUsedError; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata => throw _privateConstructorUsedError; - - /// Serializes this Batch to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $BatchCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BatchCopyWith<$Res> { - factory $BatchCopyWith(Batch value, $Res Function(Batch) then) = - _$BatchCopyWithImpl<$Res, Batch>; - @useResult - $Res call( - {String id, - BatchObject object, - BatchEndpoint endpoint, - @JsonKey(includeIfNull: false) BatchErrors? errors, - @JsonKey(name: 'input_file_id') String inputFileId, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - BatchStatus status, - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, - @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? requestCounts, - @JsonKey(includeIfNull: false) dynamic metadata}); - - $BatchErrorsCopyWith<$Res>? get errors; - $BatchRequestCountsCopyWith<$Res>? get requestCounts; -} - -/// @nodoc -class _$BatchCopyWithImpl<$Res, $Val extends Batch> - implements $BatchCopyWith<$Res> { - _$BatchCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = null, - Object? object = null, - Object? endpoint = null, - Object? errors = freezed, - Object? inputFileId = null, - Object? completionWindow = null, - Object? status = null, - Object? outputFileId = freezed, - Object? errorFileId = freezed, - Object? createdAt = null, - Object? inProgressAt = freezed, - Object? expiresAt = freezed, - Object? finalizingAt = freezed, - Object? completedAt = freezed, - Object? failedAt = freezed, - Object? expiredAt = freezed, - Object? cancellingAt = freezed, - Object? cancelledAt = freezed, - Object? requestCounts = freezed, - Object? metadata = freezed, - }) { - return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as BatchObject, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - errors: freezed == errors - ? _value.errors - : errors // ignore: cast_nullable_to_non_nullable - as BatchErrors?, - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable - as String, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as BatchStatus, - outputFileId: freezed == outputFileId - ? _value.outputFileId - : outputFileId // ignore: cast_nullable_to_non_nullable - as String?, - errorFileId: freezed == errorFileId - ? _value.errorFileId - : errorFileId // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - inProgressAt: freezed == inProgressAt - ? _value.inProgressAt - : inProgressAt // ignore: cast_nullable_to_non_nullable - as int?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - finalizingAt: freezed == finalizingAt - ? _value.finalizingAt - : finalizingAt // ignore: cast_nullable_to_non_nullable - as int?, - completedAt: freezed == completedAt - ? _value.completedAt - : completedAt // ignore: cast_nullable_to_non_nullable - as int?, - failedAt: freezed == failedAt - ? _value.failedAt - : failedAt // ignore: cast_nullable_to_non_nullable - as int?, - expiredAt: freezed == expiredAt - ? _value.expiredAt - : expiredAt // ignore: cast_nullable_to_non_nullable - as int?, - cancellingAt: freezed == cancellingAt - ? _value.cancellingAt - : cancellingAt // ignore: cast_nullable_to_non_nullable - as int?, - cancelledAt: freezed == cancelledAt - ? _value.cancelledAt - : cancelledAt // ignore: cast_nullable_to_non_nullable - as int?, - requestCounts: freezed == requestCounts - ? _value.requestCounts - : requestCounts // ignore: cast_nullable_to_non_nullable - as BatchRequestCounts?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, - ) as $Val); - } - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $BatchErrorsCopyWith<$Res>? get errors { - if (_value.errors == null) { - return null; - } - - return $BatchErrorsCopyWith<$Res>(_value.errors!, (value) { - return _then(_value.copyWith(errors: value) as $Val); - }); - } - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $BatchRequestCountsCopyWith<$Res>? get requestCounts { - if (_value.requestCounts == null) { - return null; - } - - return $BatchRequestCountsCopyWith<$Res>(_value.requestCounts!, (value) { - return _then(_value.copyWith(requestCounts: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$BatchImplCopyWith<$Res> implements $BatchCopyWith<$Res> { - factory _$$BatchImplCopyWith( - _$BatchImpl value, $Res Function(_$BatchImpl) then) = - __$$BatchImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String id, - BatchObject object, - BatchEndpoint endpoint, - @JsonKey(includeIfNull: false) BatchErrors? errors, - @JsonKey(name: 'input_file_id') String inputFileId, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - BatchStatus status, - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, - @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? requestCounts, - @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $BatchErrorsCopyWith<$Res>? get errors; - @override - $BatchRequestCountsCopyWith<$Res>? get requestCounts; -} - -/// @nodoc -class __$$BatchImplCopyWithImpl<$Res> - extends _$BatchCopyWithImpl<$Res, _$BatchImpl> - implements _$$BatchImplCopyWith<$Res> { - __$$BatchImplCopyWithImpl( - _$BatchImpl _value, $Res Function(_$BatchImpl) _then) - : super(_value, _then); - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? id = null, - Object? object = null, - Object? endpoint = null, - Object? errors = freezed, - Object? inputFileId = null, - Object? completionWindow = null, - Object? status = null, - Object? outputFileId = freezed, - Object? errorFileId = freezed, - Object? createdAt = null, - Object? inProgressAt = freezed, - Object? expiresAt = freezed, - Object? finalizingAt = freezed, - Object? completedAt = freezed, - Object? failedAt = freezed, - Object? expiredAt = freezed, - Object? cancellingAt = freezed, - Object? cancelledAt = freezed, - Object? requestCounts = freezed, - Object? metadata = freezed, - }) { - return _then(_$BatchImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as BatchObject, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - errors: freezed == errors - ? _value.errors - : errors // ignore: cast_nullable_to_non_nullable - as BatchErrors?, - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable - as String, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as BatchStatus, - outputFileId: freezed == outputFileId - ? _value.outputFileId - : outputFileId // ignore: cast_nullable_to_non_nullable - as String?, - errorFileId: freezed == errorFileId - ? _value.errorFileId - : errorFileId // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - inProgressAt: freezed == inProgressAt - ? _value.inProgressAt - : inProgressAt // ignore: cast_nullable_to_non_nullable - as int?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - finalizingAt: freezed == finalizingAt - ? _value.finalizingAt - : finalizingAt // ignore: cast_nullable_to_non_nullable - as int?, - completedAt: freezed == completedAt - ? _value.completedAt - : completedAt // ignore: cast_nullable_to_non_nullable - as int?, - failedAt: freezed == failedAt - ? _value.failedAt - : failedAt // ignore: cast_nullable_to_non_nullable - as int?, - expiredAt: freezed == expiredAt - ? _value.expiredAt - : expiredAt // ignore: cast_nullable_to_non_nullable - as int?, - cancellingAt: freezed == cancellingAt - ? _value.cancellingAt - : cancellingAt // ignore: cast_nullable_to_non_nullable - as int?, - cancelledAt: freezed == cancelledAt - ? _value.cancelledAt - : cancelledAt // ignore: cast_nullable_to_non_nullable - as int?, - requestCounts: freezed == requestCounts - ? _value.requestCounts - : requestCounts // ignore: cast_nullable_to_non_nullable - as BatchRequestCounts?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$BatchImpl extends _Batch { - const _$BatchImpl( - {required this.id, - required this.object, - required this.endpoint, - @JsonKey(includeIfNull: false) this.errors, - @JsonKey(name: 'input_file_id') required this.inputFileId, - @JsonKey(name: 'completion_window') required this.completionWindow, - required this.status, - @JsonKey(name: 'output_file_id', includeIfNull: false) this.outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) this.errorFileId, - @JsonKey(name: 'created_at') required this.createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) this.inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) this.finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) this.completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) this.failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) this.expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) this.cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) this.cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) this.requestCounts, - @JsonKey(includeIfNull: false) this.metadata}) - : super._(); - - factory _$BatchImpl.fromJson(Map json) => - _$$BatchImplFromJson(json); - - /// No Description - @override - final String id; - - /// The object type, which is always `batch`. - @override - final BatchObject object; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - @override - final BatchEndpoint endpoint; - - /// No Description - @override - @JsonKey(includeIfNull: false) - final BatchErrors? errors; - - /// The ID of the input file for the batch. - @override - @JsonKey(name: 'input_file_id') - final String inputFileId; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @override - @JsonKey(name: 'completion_window') - final BatchCompletionWindow completionWindow; - - /// The current status of the batch. - @override - final BatchStatus status; - - /// The ID of the file containing the outputs of successfully executed requests. - @override - @JsonKey(name: 'output_file_id', includeIfNull: false) - final String? outputFileId; - - /// The ID of the file containing the outputs of requests with errors. - @override - @JsonKey(name: 'error_file_id', includeIfNull: false) - final String? errorFileId; - - /// The Unix timestamp (in seconds) for when the batch was created. - @override - @JsonKey(name: 'created_at') - final int createdAt; - - /// The Unix timestamp (in seconds) for when the batch started processing. - @override - @JsonKey(name: 'in_progress_at', includeIfNull: false) - final int? inProgressAt; - - /// The Unix timestamp (in seconds) for when the batch will expire. - @override - @JsonKey(name: 'expires_at', includeIfNull: false) - final int? expiresAt; - - /// The Unix timestamp (in seconds) for when the batch started finalizing. - @override - @JsonKey(name: 'finalizing_at', includeIfNull: false) - final int? finalizingAt; - - /// The Unix timestamp (in seconds) for when the batch was completed. - @override - @JsonKey(name: 'completed_at', includeIfNull: false) - final int? completedAt; - - /// The Unix timestamp (in seconds) for when the batch failed. - @override - @JsonKey(name: 'failed_at', includeIfNull: false) - final int? failedAt; - - /// The Unix timestamp (in seconds) for when the batch expired. - @override - @JsonKey(name: 'expired_at', includeIfNull: false) - final int? expiredAt; - - /// The Unix timestamp (in seconds) for when the batch started cancelling. - @override - @JsonKey(name: 'cancelling_at', includeIfNull: false) - final int? cancellingAt; - - /// The Unix timestamp (in seconds) for when the batch was cancelled. - @override - @JsonKey(name: 'cancelled_at', includeIfNull: false) - final int? cancelledAt; - - /// The request counts for different statuses within the batch. - @override - @JsonKey(name: 'request_counts', includeIfNull: false) - final BatchRequestCounts? requestCounts; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. - @override - @JsonKey(includeIfNull: false) - final dynamic metadata; - - @override - String toString() { - return 'Batch(id: $id, object: $object, endpoint: $endpoint, errors: $errors, inputFileId: $inputFileId, completionWindow: $completionWindow, status: $status, outputFileId: $outputFileId, errorFileId: $errorFileId, createdAt: $createdAt, inProgressAt: $inProgressAt, expiresAt: $expiresAt, finalizingAt: $finalizingAt, completedAt: $completedAt, failedAt: $failedAt, expiredAt: $expiredAt, cancellingAt: $cancellingAt, cancelledAt: $cancelledAt, requestCounts: $requestCounts, metadata: $metadata)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$BatchImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.object, object) || other.object == object) && - (identical(other.endpoint, endpoint) || - other.endpoint == endpoint) && - (identical(other.errors, errors) || other.errors == errors) && - (identical(other.inputFileId, inputFileId) || - other.inputFileId == inputFileId) && - (identical(other.completionWindow, completionWindow) || - other.completionWindow == completionWindow) && - (identical(other.status, status) || other.status == status) && - (identical(other.outputFileId, outputFileId) || - other.outputFileId == outputFileId) && - (identical(other.errorFileId, errorFileId) || - other.errorFileId == errorFileId) && - (identical(other.createdAt, createdAt) || - other.createdAt == createdAt) && - (identical(other.inProgressAt, inProgressAt) || - other.inProgressAt == inProgressAt) && - (identical(other.expiresAt, expiresAt) || - other.expiresAt == expiresAt) && - (identical(other.finalizingAt, finalizingAt) || - other.finalizingAt == finalizingAt) && - (identical(other.completedAt, completedAt) || - other.completedAt == completedAt) && - (identical(other.failedAt, failedAt) || - other.failedAt == failedAt) && - (identical(other.expiredAt, expiredAt) || - other.expiredAt == expiredAt) && - (identical(other.cancellingAt, cancellingAt) || - other.cancellingAt == cancellingAt) && - (identical(other.cancelledAt, cancelledAt) || - other.cancelledAt == cancelledAt) && - (identical(other.requestCounts, requestCounts) || - other.requestCounts == requestCounts) && - const DeepCollectionEquality().equals(other.metadata, metadata)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hashAll([ - runtimeType, - id, - object, - endpoint, - errors, - inputFileId, - completionWindow, - status, - outputFileId, - errorFileId, - createdAt, - inProgressAt, - expiresAt, - finalizingAt, - completedAt, - failedAt, - expiredAt, - cancellingAt, - cancelledAt, - requestCounts, - const DeepCollectionEquality().hash(metadata) - ]); - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$BatchImplCopyWith<_$BatchImpl> get copyWith => - __$$BatchImplCopyWithImpl<_$BatchImpl>(this, _$identity); - - @override - Map toJson() { - return _$$BatchImplToJson( - this, - ); - } -} - -abstract class _Batch extends Batch { - const factory _Batch( - {required final String id, - required final BatchObject object, - required final BatchEndpoint endpoint, - @JsonKey(includeIfNull: false) final BatchErrors? errors, - @JsonKey(name: 'input_file_id') required final String inputFileId, - @JsonKey(name: 'completion_window') - required final BatchCompletionWindow completionWindow, - required final BatchStatus status, - @JsonKey(name: 'output_file_id', includeIfNull: false) - final String? outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) - final String? errorFileId, - @JsonKey(name: 'created_at') required final int createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) - final int? inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) - final int? finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) - final int? completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) final int? failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) final int? expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) - final int? cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) - final int? cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) - final BatchRequestCounts? requestCounts, - @JsonKey(includeIfNull: false) final dynamic metadata}) = _$BatchImpl; - const _Batch._() : super._(); - - factory _Batch.fromJson(Map json) = _$BatchImpl.fromJson; - - /// No Description - @override - String get id; - - /// The object type, which is always `batch`. - @override - BatchObject get object; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - @override - BatchEndpoint get endpoint; - - /// No Description - @override - @JsonKey(includeIfNull: false) - BatchErrors? get errors; - - /// The ID of the input file for the batch. - @override - @JsonKey(name: 'input_file_id') - String get inputFileId; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @override - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow; - - /// The current status of the batch. - @override - BatchStatus get status; - - /// The ID of the file containing the outputs of successfully executed requests. - @override - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? get outputFileId; - - /// The ID of the file containing the outputs of requests with errors. - @override - @JsonKey(name: 'error_file_id', includeIfNull: false) - String? get errorFileId; - - /// The Unix timestamp (in seconds) for when the batch was created. - @override - @JsonKey(name: 'created_at') - int get createdAt; - - /// The Unix timestamp (in seconds) for when the batch started processing. - @override - @JsonKey(name: 'in_progress_at', includeIfNull: false) - int? get inProgressAt; - - /// The Unix timestamp (in seconds) for when the batch will expire. - @override - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt; - - /// The Unix timestamp (in seconds) for when the batch started finalizing. - @override - @JsonKey(name: 'finalizing_at', includeIfNull: false) - int? get finalizingAt; - - /// The Unix timestamp (in seconds) for when the batch was completed. - @override - @JsonKey(name: 'completed_at', includeIfNull: false) - int? get completedAt; - - /// The Unix timestamp (in seconds) for when the batch failed. - @override - @JsonKey(name: 'failed_at', includeIfNull: false) - int? get failedAt; - - /// The Unix timestamp (in seconds) for when the batch expired. - @override - @JsonKey(name: 'expired_at', includeIfNull: false) - int? get expiredAt; - - /// The Unix timestamp (in seconds) for when the batch started cancelling. - @override - @JsonKey(name: 'cancelling_at', includeIfNull: false) - int? get cancellingAt; - - /// The Unix timestamp (in seconds) for when the batch was cancelled. - @override - @JsonKey(name: 'cancelled_at', includeIfNull: false) - int? get cancelledAt; - - /// The request counts for different statuses within the batch. - @override - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? get requestCounts; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. - @override - @JsonKey(includeIfNull: false) - dynamic get metadata; - - /// Create a copy of Batch - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$BatchImplCopyWith<_$BatchImpl> get copyWith => - throw _privateConstructorUsedError; -} - -BatchErrors _$BatchErrorsFromJson(Map json) { - return _BatchErrors.fromJson(json); -} - -/// @nodoc -mixin _$BatchErrors { - /// The object type, which is always `list`. - @JsonKey(includeIfNull: false) - String? get object => throw _privateConstructorUsedError; - - /// No Description - @JsonKey(includeIfNull: false) - List? get data => throw _privateConstructorUsedError; - - /// Serializes this BatchErrors to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of BatchErrors - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $BatchErrorsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BatchErrorsCopyWith<$Res> { - factory $BatchErrorsCopyWith( - BatchErrors value, $Res Function(BatchErrors) then) = - _$BatchErrorsCopyWithImpl<$Res, BatchErrors>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? object, - @JsonKey(includeIfNull: false) List? data}); -} - -/// @nodoc -class _$BatchErrorsCopyWithImpl<$Res, $Val extends BatchErrors> - implements $BatchErrorsCopyWith<$Res> { - _$BatchErrorsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of BatchErrors - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? object = freezed, - Object? data = freezed, - }) { - return _then(_value.copyWith( - object: freezed == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String?, - data: freezed == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$BatchErrorsImplCopyWith<$Res> - implements $BatchErrorsCopyWith<$Res> { - factory _$$BatchErrorsImplCopyWith( - _$BatchErrorsImpl value, $Res Function(_$BatchErrorsImpl) then) = - __$$BatchErrorsImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? object, - @JsonKey(includeIfNull: false) List? data}); -} - -/// @nodoc -class __$$BatchErrorsImplCopyWithImpl<$Res> - extends _$BatchErrorsCopyWithImpl<$Res, _$BatchErrorsImpl> - implements _$$BatchErrorsImplCopyWith<$Res> { - __$$BatchErrorsImplCopyWithImpl( - _$BatchErrorsImpl _value, $Res Function(_$BatchErrorsImpl) _then) - : super(_value, _then); - - /// Create a copy of BatchErrors - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? object = freezed, - Object? data = freezed, - }) { - return _then(_$BatchErrorsImpl( - object: freezed == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String?, - data: freezed == data - ? _value._data - : data // ignore: cast_nullable_to_non_nullable - as List?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$BatchErrorsImpl extends _BatchErrors { - const _$BatchErrorsImpl( - {@JsonKey(includeIfNull: false) this.object, - @JsonKey(includeIfNull: false) final List? data}) - : _data = data, - super._(); - - factory _$BatchErrorsImpl.fromJson(Map json) => - _$$BatchErrorsImplFromJson(json); - - /// The object type, which is always `list`. - @override - @JsonKey(includeIfNull: false) - final String? object; - - /// No Description - final List? _data; - - /// No Description - @override - @JsonKey(includeIfNull: false) - List? get data { - final value = _data; - if (value == null) return null; - if (_data is EqualUnmodifiableListView) return _data; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - @override - String toString() { - return 'BatchErrors(object: $object, data: $data)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$BatchErrorsImpl && - (identical(other.object, object) || other.object == object) && - const DeepCollectionEquality().equals(other._data, _data)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash( - runtimeType, object, const DeepCollectionEquality().hash(_data)); - - /// Create a copy of BatchErrors - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => - __$$BatchErrorsImplCopyWithImpl<_$BatchErrorsImpl>(this, _$identity); - - @override - Map toJson() { - return _$$BatchErrorsImplToJson( - this, - ); - } -} - -abstract class _BatchErrors extends BatchErrors { - const factory _BatchErrors( - {@JsonKey(includeIfNull: false) final String? object, - @JsonKey(includeIfNull: false) - final List? data}) = _$BatchErrorsImpl; - const _BatchErrors._() : super._(); - - factory _BatchErrors.fromJson(Map json) = - _$BatchErrorsImpl.fromJson; - - /// The object type, which is always `list`. - @override - @JsonKey(includeIfNull: false) - String? get object; - - /// No Description - @override - @JsonKey(includeIfNull: false) - List? get data; - - /// Create a copy of BatchErrors - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => - throw _privateConstructorUsedError; -} - -BatchRequestCounts _$BatchRequestCountsFromJson(Map json) { - return _BatchRequestCounts.fromJson(json); -} - -/// @nodoc -mixin _$BatchRequestCounts { - /// Total number of requests in the batch. - int get total => throw _privateConstructorUsedError; - - /// Number of requests that have been completed successfully. - int get completed => throw _privateConstructorUsedError; - - /// Number of requests that have failed. - int get failed => throw _privateConstructorUsedError; - - /// Serializes this BatchRequestCounts to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of BatchRequestCounts - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $BatchRequestCountsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BatchRequestCountsCopyWith<$Res> { - factory $BatchRequestCountsCopyWith( - BatchRequestCounts value, $Res Function(BatchRequestCounts) then) = - _$BatchRequestCountsCopyWithImpl<$Res, BatchRequestCounts>; - @useResult - $Res call({int total, int completed, int failed}); -} - -/// @nodoc -class _$BatchRequestCountsCopyWithImpl<$Res, $Val extends BatchRequestCounts> - implements $BatchRequestCountsCopyWith<$Res> { - _$BatchRequestCountsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of BatchRequestCounts - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? total = null, - Object? completed = null, - Object? failed = null, - }) { - return _then(_value.copyWith( - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$BatchRequestCountsImplCopyWith<$Res> - implements $BatchRequestCountsCopyWith<$Res> { - factory _$$BatchRequestCountsImplCopyWith(_$BatchRequestCountsImpl value, - $Res Function(_$BatchRequestCountsImpl) then) = - __$$BatchRequestCountsImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({int total, int completed, int failed}); -} - -/// @nodoc -class __$$BatchRequestCountsImplCopyWithImpl<$Res> - extends _$BatchRequestCountsCopyWithImpl<$Res, _$BatchRequestCountsImpl> - implements _$$BatchRequestCountsImplCopyWith<$Res> { - __$$BatchRequestCountsImplCopyWithImpl(_$BatchRequestCountsImpl _value, - $Res Function(_$BatchRequestCountsImpl) _then) - : super(_value, _then); - - /// Create a copy of BatchRequestCounts - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? total = null, - Object? completed = null, - Object? failed = null, - }) { - return _then(_$BatchRequestCountsImpl( - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$BatchRequestCountsImpl extends _BatchRequestCounts { - const _$BatchRequestCountsImpl( - {required this.total, required this.completed, required this.failed}) - : super._(); - - factory _$BatchRequestCountsImpl.fromJson(Map json) => - _$$BatchRequestCountsImplFromJson(json); - - /// Total number of requests in the batch. - @override - final int total; - - /// Number of requests that have been completed successfully. - @override - final int completed; - - /// Number of requests that have failed. - @override - final int failed; - - @override - String toString() { - return 'BatchRequestCounts(total: $total, completed: $completed, failed: $failed)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$BatchRequestCountsImpl && - (identical(other.total, total) || other.total == total) && - (identical(other.completed, completed) || - other.completed == completed) && - (identical(other.failed, failed) || other.failed == failed)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, total, completed, failed); - - /// Create a copy of BatchRequestCounts - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => - __$$BatchRequestCountsImplCopyWithImpl<_$BatchRequestCountsImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$BatchRequestCountsImplToJson( - this, - ); - } -} - -abstract class _BatchRequestCounts extends BatchRequestCounts { - const factory _BatchRequestCounts( - {required final int total, - required final int completed, - required final int failed}) = _$BatchRequestCountsImpl; - const _BatchRequestCounts._() : super._(); - - factory _BatchRequestCounts.fromJson(Map json) = - _$BatchRequestCountsImpl.fromJson; - - /// Total number of requests in the batch. - @override - int get total; - - /// Number of requests that have been completed successfully. - @override - int get completed; - - /// Number of requests that have failed. - @override - int get failed; - - /// Create a copy of BatchRequestCounts - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => - throw _privateConstructorUsedError; -} - -BatchErrorsDataInner _$BatchErrorsDataInnerFromJson(Map json) { - return _BatchErrorsDataInner.fromJson(json); -} - -/// @nodoc -mixin _$BatchErrorsDataInner { - /// An error code identifying the error type. - @JsonKey(includeIfNull: false) - String? get code => throw _privateConstructorUsedError; - - /// A human-readable message providing more details about the error. - @JsonKey(includeIfNull: false) - String? get message => throw _privateConstructorUsedError; - - /// The name of the parameter that caused the error, if applicable. - @JsonKey(includeIfNull: false) - String? get param => throw _privateConstructorUsedError; - - /// The line number of the input file where the error occurred, if applicable. - @JsonKey(includeIfNull: false) - int? get line => throw _privateConstructorUsedError; - - /// Serializes this BatchErrorsDataInner to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of BatchErrorsDataInner - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $BatchErrorsDataInnerCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BatchErrorsDataInnerCopyWith<$Res> { - factory $BatchErrorsDataInnerCopyWith(BatchErrorsDataInner value, - $Res Function(BatchErrorsDataInner) then) = - _$BatchErrorsDataInnerCopyWithImpl<$Res, BatchErrorsDataInner>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? code, - @JsonKey(includeIfNull: false) String? message, - @JsonKey(includeIfNull: false) String? param, - @JsonKey(includeIfNull: false) int? line}); -} - -/// @nodoc -class _$BatchErrorsDataInnerCopyWithImpl<$Res, - $Val extends BatchErrorsDataInner> - implements $BatchErrorsDataInnerCopyWith<$Res> { - _$BatchErrorsDataInnerCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of BatchErrorsDataInner - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? code = freezed, - Object? message = freezed, - Object? param = freezed, - Object? line = freezed, - }) { - return _then(_value.copyWith( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: freezed == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String?, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - line: freezed == line - ? _value.line - : line // ignore: cast_nullable_to_non_nullable - as int?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$BatchErrorsDataInnerImplCopyWith<$Res> - implements $BatchErrorsDataInnerCopyWith<$Res> { - factory _$$BatchErrorsDataInnerImplCopyWith(_$BatchErrorsDataInnerImpl value, - $Res Function(_$BatchErrorsDataInnerImpl) then) = - __$$BatchErrorsDataInnerImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? code, - @JsonKey(includeIfNull: false) String? message, - @JsonKey(includeIfNull: false) String? param, - @JsonKey(includeIfNull: false) int? line}); -} - -/// @nodoc -class __$$BatchErrorsDataInnerImplCopyWithImpl<$Res> - extends _$BatchErrorsDataInnerCopyWithImpl<$Res, _$BatchErrorsDataInnerImpl> - implements _$$BatchErrorsDataInnerImplCopyWith<$Res> { - __$$BatchErrorsDataInnerImplCopyWithImpl(_$BatchErrorsDataInnerImpl _value, - $Res Function(_$BatchErrorsDataInnerImpl) _then) - : super(_value, _then); - - /// Create a copy of BatchErrorsDataInner - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? code = freezed, - Object? message = freezed, - Object? param = freezed, - Object? line = freezed, - }) { - return _then(_$BatchErrorsDataInnerImpl( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: freezed == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String?, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - line: freezed == line - ? _value.line - : line // ignore: cast_nullable_to_non_nullable - as int?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$BatchErrorsDataInnerImpl extends _BatchErrorsDataInner { - const _$BatchErrorsDataInnerImpl( - {@JsonKey(includeIfNull: false) this.code, - @JsonKey(includeIfNull: false) this.message, - @JsonKey(includeIfNull: false) this.param, - @JsonKey(includeIfNull: false) this.line}) - : super._(); - - factory _$BatchErrorsDataInnerImpl.fromJson(Map json) => - _$$BatchErrorsDataInnerImplFromJson(json); - - /// An error code identifying the error type. - @override - @JsonKey(includeIfNull: false) - final String? code; - - /// A human-readable message providing more details about the error. - @override - @JsonKey(includeIfNull: false) - final String? message; - - /// The name of the parameter that caused the error, if applicable. - @override - @JsonKey(includeIfNull: false) - final String? param; - - /// The line number of the input file where the error occurred, if applicable. - @override - @JsonKey(includeIfNull: false) - final int? line; - - @override - String toString() { - return 'BatchErrorsDataInner(code: $code, message: $message, param: $param, line: $line)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$BatchErrorsDataInnerImpl && - (identical(other.code, code) || other.code == code) && - (identical(other.message, message) || other.message == message) && - (identical(other.param, param) || other.param == param) && - (identical(other.line, line) || other.line == line)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, code, message, param, line); - - /// Create a copy of BatchErrorsDataInner - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> - get copyWith => - __$$BatchErrorsDataInnerImplCopyWithImpl<_$BatchErrorsDataInnerImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$BatchErrorsDataInnerImplToJson( - this, - ); - } -} - -abstract class _BatchErrorsDataInner extends BatchErrorsDataInner { - const factory _BatchErrorsDataInner( - {@JsonKey(includeIfNull: false) final String? code, - @JsonKey(includeIfNull: false) final String? message, - @JsonKey(includeIfNull: false) final String? param, - @JsonKey(includeIfNull: false) final int? line}) = - _$BatchErrorsDataInnerImpl; - const _BatchErrorsDataInner._() : super._(); - - factory _BatchErrorsDataInner.fromJson(Map json) = - _$BatchErrorsDataInnerImpl.fromJson; - - /// An error code identifying the error type. - @override - @JsonKey(includeIfNull: false) - String? get code; - - /// A human-readable message providing more details about the error. - @override - @JsonKey(includeIfNull: false) - String? get message; - - /// The name of the parameter that caused the error, if applicable. - @override - @JsonKey(includeIfNull: false) - String? get param; - - /// The line number of the input file where the error occurred, if applicable. - @override - @JsonKey(includeIfNull: false) - int? get line; - - /// Create a copy of BatchErrorsDataInner - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> - get copyWith => throw _privateConstructorUsedError; -} - -ListBatchesResponse _$ListBatchesResponseFromJson(Map json) { - return _ListBatchesResponse.fromJson(json); -} - -/// @nodoc -mixin _$ListBatchesResponse { - /// No Description - List get data => throw _privateConstructorUsedError; - - /// The ID of the first batch in the list. - @JsonKey(name: 'first_id', includeIfNull: false) - String? get firstId => throw _privateConstructorUsedError; - - /// The ID of the last batch in the list. - @JsonKey(name: 'last_id', includeIfNull: false) - String? get lastId => throw _privateConstructorUsedError; - - /// Whether there are more batches available. - @JsonKey(name: 'has_more') - bool get hasMore => throw _privateConstructorUsedError; - - /// The object type, which is always `list`. - ListBatchesResponseObject get object => throw _privateConstructorUsedError; - - /// Serializes this ListBatchesResponse to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ListBatchesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ListBatchesResponseCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ListBatchesResponseCopyWith<$Res> { - factory $ListBatchesResponseCopyWith( - ListBatchesResponse value, $Res Function(ListBatchesResponse) then) = - _$ListBatchesResponseCopyWithImpl<$Res, ListBatchesResponse>; - @useResult - $Res call( - {List data, - @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, - @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, - @JsonKey(name: 'has_more') bool hasMore, - ListBatchesResponseObject object}); -} - -/// @nodoc -class _$ListBatchesResponseCopyWithImpl<$Res, $Val extends ListBatchesResponse> - implements $ListBatchesResponseCopyWith<$Res> { - _$ListBatchesResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of ListBatchesResponse - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, - Object? hasMore = null, - Object? object = null, - }) { - return _then(_value.copyWith( - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable - as String?, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as ListBatchesResponseObject, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ListBatchesResponseImplCopyWith<$Res> - implements $ListBatchesResponseCopyWith<$Res> { - factory _$$ListBatchesResponseImplCopyWith(_$ListBatchesResponseImpl value, - $Res Function(_$ListBatchesResponseImpl) then) = - __$$ListBatchesResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {List data, - @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, - @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, - @JsonKey(name: 'has_more') bool hasMore, - ListBatchesResponseObject object}); -} - -/// @nodoc -class __$$ListBatchesResponseImplCopyWithImpl<$Res> - extends _$ListBatchesResponseCopyWithImpl<$Res, _$ListBatchesResponseImpl> - implements _$$ListBatchesResponseImplCopyWith<$Res> { - __$$ListBatchesResponseImplCopyWithImpl(_$ListBatchesResponseImpl _value, - $Res Function(_$ListBatchesResponseImpl) _then) - : super(_value, _then); - - /// Create a copy of ListBatchesResponse - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, - Object? hasMore = null, - Object? object = null, - }) { - return _then(_$ListBatchesResponseImpl( - data: null == data - ? _value._data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable - as String?, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as ListBatchesResponseObject, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ListBatchesResponseImpl extends _ListBatchesResponse { - const _$ListBatchesResponseImpl( - {required final List data, - @JsonKey(name: 'first_id', includeIfNull: false) this.firstId, - @JsonKey(name: 'last_id', includeIfNull: false) this.lastId, - @JsonKey(name: 'has_more') required this.hasMore, - required this.object}) - : _data = data, - super._(); - - factory _$ListBatchesResponseImpl.fromJson(Map json) => - _$$ListBatchesResponseImplFromJson(json); - - /// No Description - final List _data; - - /// No Description - @override - List get data { - if (_data is EqualUnmodifiableListView) return _data; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_data); - } - - /// The ID of the first batch in the list. - @override - @JsonKey(name: 'first_id', includeIfNull: false) - final String? firstId; - - /// The ID of the last batch in the list. - @override - @JsonKey(name: 'last_id', includeIfNull: false) - final String? lastId; - - /// Whether there are more batches available. - @override - @JsonKey(name: 'has_more') - final bool hasMore; - - /// The object type, which is always `list`. - @override - final ListBatchesResponseObject object; - - @override - String toString() { - return 'ListBatchesResponse(data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore, object: $object)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ListBatchesResponseImpl && - const DeepCollectionEquality().equals(other._data, _data) && - (identical(other.firstId, firstId) || other.firstId == firstId) && - (identical(other.lastId, lastId) || other.lastId == lastId) && - (identical(other.hasMore, hasMore) || other.hasMore == hasMore) && - (identical(other.object, object) || other.object == object)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash( - runtimeType, - const DeepCollectionEquality().hash(_data), - firstId, - lastId, - hasMore, - object); - - /// Create a copy of ListBatchesResponse - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => - __$$ListBatchesResponseImplCopyWithImpl<_$ListBatchesResponseImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$ListBatchesResponseImplToJson( - this, - ); - } -} - -abstract class _ListBatchesResponse extends ListBatchesResponse { - const factory _ListBatchesResponse( - {required final List data, - @JsonKey(name: 'first_id', includeIfNull: false) final String? firstId, - @JsonKey(name: 'last_id', includeIfNull: false) final String? lastId, - @JsonKey(name: 'has_more') required final bool hasMore, - required final ListBatchesResponseObject - object}) = _$ListBatchesResponseImpl; - const _ListBatchesResponse._() : super._(); - - factory _ListBatchesResponse.fromJson(Map json) = - _$ListBatchesResponseImpl.fromJson; - - /// No Description - @override - List get data; - - /// The ID of the first batch in the list. - @override - @JsonKey(name: 'first_id', includeIfNull: false) - String? get firstId; - - /// The ID of the last batch in the list. - @override - @JsonKey(name: 'last_id', includeIfNull: false) - String? get lastId; - - /// Whether there are more batches available. - @override - @JsonKey(name: 'has_more') - bool get hasMore; - - /// The object type, which is always `list`. - @override - ListBatchesResponseObject get object; - - /// Create a copy of ListBatchesResponse - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => - throw _privateConstructorUsedError; -} - -ChatCompletionMessage _$ChatCompletionMessageFromJson( - Map json) { - switch (json['role']) { - case 'system': - return ChatCompletionSystemMessage.fromJson(json); - case 'user': - return ChatCompletionUserMessage.fromJson(json); - case 'assistant': - return ChatCompletionAssistantMessage.fromJson(json); - case 'tool': - return ChatCompletionToolMessage.fromJson(json); - case 'function': - return ChatCompletionFunctionMessage.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'role', 'ChatCompletionMessage', - 'Invalid union type "${json['role']}"!'); - } -} - -/// @nodoc -mixin _$ChatCompletionMessage { - /// The role of the messages author, in this case `system`. - ChatCompletionMessageRole get role => throw _privateConstructorUsedError; - - /// The contents of the system message. - Object? get content => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionMessage to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChatCompletionMessageCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ChatCompletionMessageCopyWith<$Res> { - factory $ChatCompletionMessageCopyWith(ChatCompletionMessage value, - $Res Function(ChatCompletionMessage) then) = - _$ChatCompletionMessageCopyWithImpl<$Res, ChatCompletionMessage>; - @useResult - $Res call({ChatCompletionMessageRole role}); -} - -/// @nodoc -class _$ChatCompletionMessageCopyWithImpl<$Res, - $Val extends ChatCompletionMessage> - implements $ChatCompletionMessageCopyWith<$Res> { - _$ChatCompletionMessageCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? role = null, - }) { - return _then(_value.copyWith( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ChatCompletionSystemMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionSystemMessageImplCopyWith( - _$ChatCompletionSystemMessageImpl value, - $Res Function(_$ChatCompletionSystemMessageImpl) then) = - __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {ChatCompletionMessageRole role, - String content, - @JsonKey(includeIfNull: false) String? name}); -} - -/// @nodoc -class __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionSystemMessageImpl> - implements _$$ChatCompletionSystemMessageImplCopyWith<$Res> { - __$$ChatCompletionSystemMessageImplCopyWithImpl( - _$ChatCompletionSystemMessageImpl _value, - $Res Function(_$ChatCompletionSystemMessageImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? role = null, - Object? content = null, - Object? name = freezed, - }) { - return _then(_$ChatCompletionSystemMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { - const _$ChatCompletionSystemMessageImpl( - {this.role = ChatCompletionMessageRole.system, - required this.content, - @JsonKey(includeIfNull: false) this.name}) - : super._(); - - factory _$ChatCompletionSystemMessageImpl.fromJson( - Map json) => - _$$ChatCompletionSystemMessageImplFromJson(json); - - /// The role of the messages author, in this case `system`. - @override - @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the system message. - @override - final String content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @override - @JsonKey(includeIfNull: false) - final String? name; - - @override - String toString() { - return 'ChatCompletionMessage.system(role: $role, content: $content, name: $name)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionSystemMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, role, content, name); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> - get copyWith => __$$ChatCompletionSystemMessageImplCopyWithImpl< - _$ChatCompletionSystemMessageImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) { - return system(role, content, name); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) { - return system?.call(role, content, name); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - required TResult orElse(), - }) { - if (system != null) { - return system(role, content, name); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) { - return system(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) { - return system?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, - required TResult orElse(), - }) { - if (system != null) { - return system(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ChatCompletionSystemMessageImplToJson( - this, - ); - } -} - -abstract class ChatCompletionSystemMessage extends ChatCompletionMessage { - const factory ChatCompletionSystemMessage( - {final ChatCompletionMessageRole role, - required final String content, - @JsonKey(includeIfNull: false) final String? name}) = - _$ChatCompletionSystemMessageImpl; - const ChatCompletionSystemMessage._() : super._(); - - factory ChatCompletionSystemMessage.fromJson(Map json) = - _$ChatCompletionSystemMessageImpl.fromJson; - - /// The role of the messages author, in this case `system`. - @override - ChatCompletionMessageRole get role; - - /// The contents of the system message. - @override - String get content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @JsonKey(includeIfNull: false) - String? get name; - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ChatCompletionUserMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionUserMessageImplCopyWith( - _$ChatCompletionUserMessageImpl value, - $Res Function(_$ChatCompletionUserMessageImpl) then) = - __$$ChatCompletionUserMessageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name}); - - $ChatCompletionUserMessageContentCopyWith<$Res> get content; -} - -/// @nodoc -class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionUserMessageImpl> - implements _$$ChatCompletionUserMessageImplCopyWith<$Res> { - __$$ChatCompletionUserMessageImplCopyWithImpl( - _$ChatCompletionUserMessageImpl _value, - $Res Function(_$ChatCompletionUserMessageImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? role = null, - Object? content = null, - Object? name = freezed, - }) { - return _then(_$ChatCompletionUserMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as ChatCompletionUserMessageContent, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - )); - } - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $ChatCompletionUserMessageContentCopyWith<$Res> get content { - return $ChatCompletionUserMessageContentCopyWith<$Res>(_value.content, - (value) { - return _then(_value.copyWith(content: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { - const _$ChatCompletionUserMessageImpl( - {this.role = ChatCompletionMessageRole.user, - @_ChatCompletionUserMessageContentConverter() required this.content, - @JsonKey(includeIfNull: false) this.name}) - : super._(); - - factory _$ChatCompletionUserMessageImpl.fromJson(Map json) => - _$$ChatCompletionUserMessageImplFromJson(json); - - /// The role of the messages author, in this case `user`. - @override - @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the user message. - @override - @_ChatCompletionUserMessageContentConverter() - final ChatCompletionUserMessageContent content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @override - @JsonKey(includeIfNull: false) - final String? name; - - @override - String toString() { - return 'ChatCompletionMessage.user(role: $role, content: $content, name: $name)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionUserMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, role, content, name); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> - get copyWith => __$$ChatCompletionUserMessageImplCopyWithImpl< - _$ChatCompletionUserMessageImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) { - return user(role, content, name); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) { - return user?.call(role, content, name); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - required TResult orElse(), - }) { - if (user != null) { - return user(role, content, name); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) { - return user(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) { - return user?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, - required TResult orElse(), - }) { - if (user != null) { - return user(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ChatCompletionUserMessageImplToJson( - this, - ); - } -} - -abstract class ChatCompletionUserMessage extends ChatCompletionMessage { - const factory ChatCompletionUserMessage( - {final ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - required final ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) final String? name}) = - _$ChatCompletionUserMessageImpl; - const ChatCompletionUserMessage._() : super._(); - - factory ChatCompletionUserMessage.fromJson(Map json) = - _$ChatCompletionUserMessageImpl.fromJson; - - /// The role of the messages author, in this case `user`. - @override - ChatCompletionMessageRole get role; - - /// The contents of the user message. - @override - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent get content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @JsonKey(includeIfNull: false) - String? get name; - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ChatCompletionAssistantMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionAssistantMessageImplCopyWith( - _$ChatCompletionAssistantMessageImpl value, - $Res Function(_$ChatCompletionAssistantMessageImpl) then) = - __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall}); - - $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; -} - -/// @nodoc -class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionAssistantMessageImpl> - implements _$$ChatCompletionAssistantMessageImplCopyWith<$Res> { - __$$ChatCompletionAssistantMessageImplCopyWithImpl( - _$ChatCompletionAssistantMessageImpl _value, - $Res Function(_$ChatCompletionAssistantMessageImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? role = null, - Object? content = freezed, - Object? refusal = freezed, - Object? name = freezed, - Object? toolCalls = freezed, - Object? functionCall = freezed, - }) { - return _then(_$ChatCompletionAssistantMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: freezed == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String?, - refusal: freezed == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as String?, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - toolCalls: freezed == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List?, - functionCall: freezed == functionCall - ? _value.functionCall - : functionCall // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageFunctionCall?, - )); - } - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { - if (_value.functionCall == null) { - return null; - } - - return $ChatCompletionMessageFunctionCallCopyWith<$Res>( - _value.functionCall!, (value) { - return _then(_value.copyWith(functionCall: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionAssistantMessageImpl - extends ChatCompletionAssistantMessage { - const _$ChatCompletionAssistantMessageImpl( - {this.role = ChatCompletionMessageRole.assistant, - @JsonKey(includeIfNull: false) this.content, - @JsonKey(includeIfNull: false) this.refusal, - @JsonKey(includeIfNull: false) this.name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall}) - : _toolCalls = toolCalls, - super._(); - - factory _$ChatCompletionAssistantMessageImpl.fromJson( - Map json) => - _$$ChatCompletionAssistantMessageImplFromJson(json); - - /// The role of the messages author, in this case `assistant`. - @override - @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - @override - @JsonKey(includeIfNull: false) - final String? content; - - /// The refusal message by the assistant. - @override - @JsonKey(includeIfNull: false) - final String? refusal; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @override - @JsonKey(includeIfNull: false) - final String? name; - - /// No Description - final List? _toolCalls; - - /// No Description - @override - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls { - final value = _toolCalls; - if (value == null) return null; - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. - @override - @JsonKey(name: 'function_call', includeIfNull: false) - final ChatCompletionMessageFunctionCall? functionCall; - - @override - String toString() { - return 'ChatCompletionMessage.assistant(role: $role, content: $content, refusal: $refusal, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionAssistantMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.refusal, refusal) || other.refusal == refusal) && - (identical(other.name, name) || other.name == name) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls) && - (identical(other.functionCall, functionCall) || - other.functionCall == functionCall)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, role, content, refusal, name, - const DeepCollectionEquality().hash(_toolCalls), functionCall); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionAssistantMessageImplCopyWith< - _$ChatCompletionAssistantMessageImpl> - get copyWith => __$$ChatCompletionAssistantMessageImplCopyWithImpl< - _$ChatCompletionAssistantMessageImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) { - return assistant(role, content, refusal, name, toolCalls, functionCall); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) { - return assistant?.call( - role, content, refusal, name, toolCalls, functionCall); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - required TResult orElse(), - }) { - if (assistant != null) { - return assistant(role, content, refusal, name, toolCalls, functionCall); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) { - return assistant(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) { - return assistant?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, - required TResult orElse(), - }) { - if (assistant != null) { - return assistant(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ChatCompletionAssistantMessageImplToJson( - this, - ); - } -} - -abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { - const factory ChatCompletionAssistantMessage( - {final ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) final String? content, - @JsonKey(includeIfNull: false) final String? refusal, - @JsonKey(includeIfNull: false) final String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - final ChatCompletionMessageFunctionCall? functionCall}) = - _$ChatCompletionAssistantMessageImpl; - const ChatCompletionAssistantMessage._() : super._(); - - factory ChatCompletionAssistantMessage.fromJson(Map json) = - _$ChatCompletionAssistantMessageImpl.fromJson; - - /// The role of the messages author, in this case `assistant`. - @override - ChatCompletionMessageRole get role; - - /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - @override - @JsonKey(includeIfNull: false) - String? get content; - - /// The refusal message by the assistant. - @JsonKey(includeIfNull: false) - String? get refusal; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @JsonKey(includeIfNull: false) - String? get name; - - /// No Description - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls; - - /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? get functionCall; - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionAssistantMessageImplCopyWith< - _$ChatCompletionAssistantMessageImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ChatCompletionToolMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionToolMessageImplCopyWith( - _$ChatCompletionToolMessageImpl value, - $Res Function(_$ChatCompletionToolMessageImpl) then) = - __$$ChatCompletionToolMessageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {ChatCompletionMessageRole role, - String content, - @JsonKey(name: 'tool_call_id') String toolCallId}); -} - -/// @nodoc -class __$$ChatCompletionToolMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionToolMessageImpl> - implements _$$ChatCompletionToolMessageImplCopyWith<$Res> { - __$$ChatCompletionToolMessageImplCopyWithImpl( - _$ChatCompletionToolMessageImpl _value, - $Res Function(_$ChatCompletionToolMessageImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? role = null, - Object? content = null, - Object? toolCallId = null, - }) { - return _then(_$ChatCompletionToolMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String, - toolCallId: null == toolCallId - ? _value.toolCallId - : toolCallId // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { - const _$ChatCompletionToolMessageImpl( - {this.role = ChatCompletionMessageRole.tool, - required this.content, - @JsonKey(name: 'tool_call_id') required this.toolCallId}) - : super._(); - - factory _$ChatCompletionToolMessageImpl.fromJson(Map json) => - _$$ChatCompletionToolMessageImplFromJson(json); - - /// The role of the messages author, in this case `tool`. - @override - @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the tool message. - @override - final String content; - - /// Tool call that this message is responding to. - @override - @JsonKey(name: 'tool_call_id') - final String toolCallId; - - @override - String toString() { - return 'ChatCompletionMessage.tool(role: $role, content: $content, toolCallId: $toolCallId)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionToolMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.toolCallId, toolCallId) || - other.toolCallId == toolCallId)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, role, content, toolCallId); - - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> - get copyWith => __$$ChatCompletionToolMessageImplCopyWithImpl< - _$ChatCompletionToolMessageImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) { - return tool(role, content, toolCallId); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) { - return tool?.call(role, content, toolCallId); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - required TResult orElse(), - }) { - if (tool != null) { - return tool(role, content, toolCallId); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) { - return tool(this); + other is _$BatchErrorsImpl && + (identical(other.object, object) || other.object == object) && + const DeepCollectionEquality().equals(other._data, _data)); } + @JsonKey(ignore: true) @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) { - return tool?.call(this); - } + int get hashCode => Object.hash( + runtimeType, object, const DeepCollectionEquality().hash(_data)); + @JsonKey(ignore: true) @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, - required TResult orElse(), - }) { - if (tool != null) { - return tool(this); - } - return orElse(); - } + @pragma('vm:prefer-inline') + _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => + __$$BatchErrorsImplCopyWithImpl<_$BatchErrorsImpl>(this, _$identity); @override Map toJson() { - return _$$ChatCompletionToolMessageImplToJson( + return _$$BatchErrorsImplToJson( this, ); } } -abstract class ChatCompletionToolMessage extends ChatCompletionMessage { - const factory ChatCompletionToolMessage( - {final ChatCompletionMessageRole role, - required final String content, - @JsonKey(name: 'tool_call_id') required final String toolCallId}) = - _$ChatCompletionToolMessageImpl; - const ChatCompletionToolMessage._() : super._(); +abstract class _BatchErrors extends BatchErrors { + const factory _BatchErrors( + {@JsonKey(includeIfNull: false) final String? object, + @JsonKey(includeIfNull: false) + final List? data}) = _$BatchErrorsImpl; + const _BatchErrors._() : super._(); - factory ChatCompletionToolMessage.fromJson(Map json) = - _$ChatCompletionToolMessageImpl.fromJson; + factory _BatchErrors.fromJson(Map json) = + _$BatchErrorsImpl.fromJson; - /// The role of the messages author, in this case `tool`. @override - ChatCompletionMessageRole get role; - /// The contents of the tool message. + /// The object type, which is always `list`. + @JsonKey(includeIfNull: false) + String? get object; @override - String get content; - /// Tool call that this message is responding to. - @JsonKey(name: 'tool_call_id') - String get toolCallId; + /// No Description + @JsonKey(includeIfNull: false) + List? get data; + @override + @JsonKey(ignore: true) + _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BatchRequestCounts _$BatchRequestCountsFromJson(Map json) { + return _BatchRequestCounts.fromJson(json); +} + +/// @nodoc +mixin _$BatchRequestCounts { + /// Total number of requests in the batch. + int get total => throw _privateConstructorUsedError; + + /// Number of requests that have been completed successfully. + int get completed => throw _privateConstructorUsedError; + + /// Number of requests that have failed. + int get failed => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BatchRequestCountsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BatchRequestCountsCopyWith<$Res> { + factory $BatchRequestCountsCopyWith( + BatchRequestCounts value, $Res Function(BatchRequestCounts) then) = + _$BatchRequestCountsCopyWithImpl<$Res, BatchRequestCounts>; + @useResult + $Res call({int total, int completed, int failed}); +} + +/// @nodoc +class _$BatchRequestCountsCopyWithImpl<$Res, $Val extends BatchRequestCounts> + implements $BatchRequestCountsCopyWith<$Res> { + _$BatchRequestCountsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? total = null, + Object? completed = null, + Object? failed = null, + }) { + return _then(_value.copyWith( + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } } /// @nodoc -abstract class _$$ChatCompletionFunctionMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionFunctionMessageImplCopyWith( - _$ChatCompletionFunctionMessageImpl value, - $Res Function(_$ChatCompletionFunctionMessageImpl) then) = - __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res>; +abstract class _$$BatchRequestCountsImplCopyWith<$Res> + implements $BatchRequestCountsCopyWith<$Res> { + factory _$$BatchRequestCountsImplCopyWith(_$BatchRequestCountsImpl value, + $Res Function(_$BatchRequestCountsImpl) then) = + __$$BatchRequestCountsImplCopyWithImpl<$Res>; @override @useResult - $Res call({ChatCompletionMessageRole role, String? content, String name}); + $Res call({int total, int completed, int failed}); } /// @nodoc -class __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionFunctionMessageImpl> - implements _$$ChatCompletionFunctionMessageImplCopyWith<$Res> { - __$$ChatCompletionFunctionMessageImplCopyWithImpl( - _$ChatCompletionFunctionMessageImpl _value, - $Res Function(_$ChatCompletionFunctionMessageImpl) _then) +class __$$BatchRequestCountsImplCopyWithImpl<$Res> + extends _$BatchRequestCountsCopyWithImpl<$Res, _$BatchRequestCountsImpl> + implements _$$BatchRequestCountsImplCopyWith<$Res> { + __$$BatchRequestCountsImplCopyWithImpl(_$BatchRequestCountsImpl _value, + $Res Function(_$BatchRequestCountsImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? role = null, - Object? content = freezed, - Object? name = null, + Object? total = null, + Object? completed = null, + Object? failed = null, }) { - return _then(_$ChatCompletionFunctionMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: freezed == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$BatchRequestCountsImpl( + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionFunctionMessageImpl - extends ChatCompletionFunctionMessage { - const _$ChatCompletionFunctionMessageImpl( - {this.role = ChatCompletionMessageRole.function, - required this.content, - required this.name}) +class _$BatchRequestCountsImpl extends _BatchRequestCounts { + const _$BatchRequestCountsImpl( + {required this.total, required this.completed, required this.failed}) : super._(); - factory _$ChatCompletionFunctionMessageImpl.fromJson( - Map json) => - _$$ChatCompletionFunctionMessageImplFromJson(json); + factory _$BatchRequestCountsImpl.fromJson(Map json) => + _$$BatchRequestCountsImplFromJson(json); - /// The role of the messages author, in this case `function`. + /// Total number of requests in the batch. @override - @JsonKey() - final ChatCompletionMessageRole role; + final int total; - /// The contents of the function message. + /// Number of requests that have been completed successfully. @override - final String? content; + final int completed; - /// The name of the function to call. + /// Number of requests that have failed. @override - final String name; + final int failed; @override String toString() { - return 'ChatCompletionMessage.function(role: $role, content: $content, name: $name)'; + return 'BatchRequestCounts(total: $total, completed: $completed, failed: $failed)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionFunctionMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name)); + other is _$BatchRequestCountsImpl && + (identical(other.total, total) || other.total == total) && + (identical(other.completed, completed) || + other.completed == completed) && + (identical(other.failed, failed) || other.failed == failed)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, role, content, name); + int get hashCode => Object.hash(runtimeType, total, completed, failed); - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ChatCompletionFunctionMessageImplCopyWith< - _$ChatCompletionFunctionMessageImpl> - get copyWith => __$$ChatCompletionFunctionMessageImplCopyWithImpl< - _$ChatCompletionFunctionMessageImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) { - return function(role, content, name); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) { - return function?.call(role, content, name); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? refusal, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - required TResult orElse(), - }) { - if (function != null) { - return function(role, content, name); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) { - return function(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) { - return function?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, - required TResult orElse(), - }) { - if (function != null) { - return function(this); - } - return orElse(); - } + _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => + __$$BatchRequestCountsImplCopyWithImpl<_$BatchRequestCountsImpl>( + this, _$identity); @override Map toJson() { - return _$$ChatCompletionFunctionMessageImplToJson( + return _$$BatchRequestCountsImplToJson( this, ); } } -abstract class ChatCompletionFunctionMessage extends ChatCompletionMessage { - const factory ChatCompletionFunctionMessage( - {final ChatCompletionMessageRole role, - required final String? content, - required final String name}) = _$ChatCompletionFunctionMessageImpl; - const ChatCompletionFunctionMessage._() : super._(); +abstract class _BatchRequestCounts extends BatchRequestCounts { + const factory _BatchRequestCounts( + {required final int total, + required final int completed, + required final int failed}) = _$BatchRequestCountsImpl; + const _BatchRequestCounts._() : super._(); - factory ChatCompletionFunctionMessage.fromJson(Map json) = - _$ChatCompletionFunctionMessageImpl.fromJson; + factory _BatchRequestCounts.fromJson(Map json) = + _$BatchRequestCountsImpl.fromJson; - /// The role of the messages author, in this case `function`. @override - ChatCompletionMessageRole get role; - /// The contents of the function message. + /// Total number of requests in the batch. + int get total; @override - String? get content; - /// The name of the function to call. - String get name; + /// Number of requests that have been completed successfully. + int get completed; + @override - /// Create a copy of ChatCompletionMessage - /// with the given fields replaced by the non-null parameter values. + /// Number of requests that have failed. + int get failed; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionFunctionMessageImplCopyWith< - _$ChatCompletionFunctionMessageImpl> - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => + throw _privateConstructorUsedError; } -ChatCompletionUserMessageContent _$ChatCompletionUserMessageContentFromJson( - Map json) { - switch (json['runtimeType']) { - case 'parts': - return ChatCompletionMessageContentParts.fromJson(json); - case 'string': - return ChatCompletionUserMessageContentString.fromJson(json); - - default: - throw CheckedFromJsonException( - json, - 'runtimeType', - 'ChatCompletionUserMessageContent', - 'Invalid union type "${json['runtimeType']}"!'); - } +BatchErrorsDataInner _$BatchErrorsDataInnerFromJson(Map json) { + return _BatchErrorsDataInner.fromJson(json); } /// @nodoc -mixin _$ChatCompletionUserMessageContent { - Object get value => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(List value) - parts, - required TResult Function(String value) string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? parts, - TResult? Function(String value)? string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? parts, - TResult Function(String value)? string, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentParts value) parts, - required TResult Function(ChatCompletionUserMessageContentString value) - string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentParts value)? parts, - TResult? Function(ChatCompletionUserMessageContentString value)? string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentParts value)? parts, - TResult Function(ChatCompletionUserMessageContentString value)? string, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; +mixin _$BatchErrorsDataInner { + /// An error code identifying the error type. + @JsonKey(includeIfNull: false) + String? get code => throw _privateConstructorUsedError; + + /// A human-readable message providing more details about the error. + @JsonKey(includeIfNull: false) + String? get message => throw _privateConstructorUsedError; + + /// The name of the parameter that caused the error, if applicable. + @JsonKey(includeIfNull: false) + String? get param => throw _privateConstructorUsedError; + + /// The line number of the input file where the error occurred, if applicable. + @JsonKey(includeIfNull: false) + int? get line => throw _privateConstructorUsedError; - /// Serializes this ChatCompletionUserMessageContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BatchErrorsDataInnerCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $ChatCompletionUserMessageContentCopyWith<$Res> { - factory $ChatCompletionUserMessageContentCopyWith( - ChatCompletionUserMessageContent value, - $Res Function(ChatCompletionUserMessageContent) then) = - _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - ChatCompletionUserMessageContent>; +abstract class $BatchErrorsDataInnerCopyWith<$Res> { + factory $BatchErrorsDataInnerCopyWith(BatchErrorsDataInner value, + $Res Function(BatchErrorsDataInner) then) = + _$BatchErrorsDataInnerCopyWithImpl<$Res, BatchErrorsDataInner>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? code, + @JsonKey(includeIfNull: false) String? message, + @JsonKey(includeIfNull: false) String? param, + @JsonKey(includeIfNull: false) int? line}); } /// @nodoc -class _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - $Val extends ChatCompletionUserMessageContent> - implements $ChatCompletionUserMessageContentCopyWith<$Res> { - _$ChatCompletionUserMessageContentCopyWithImpl(this._value, this._then); +class _$BatchErrorsDataInnerCopyWithImpl<$Res, + $Val extends BatchErrorsDataInner> + implements $BatchErrorsDataInnerCopyWith<$Res> { + _$BatchErrorsDataInnerCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = freezed, + Object? message = freezed, + Object? param = freezed, + Object? line = freezed, + }) { + return _then(_value.copyWith( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: freezed == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String?, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + line: freezed == line + ? _value.line + : line // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } } /// @nodoc -abstract class _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartsImplCopyWith( - _$ChatCompletionMessageContentPartsImpl value, - $Res Function(_$ChatCompletionMessageContentPartsImpl) then) = - __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res>; +abstract class _$$BatchErrorsDataInnerImplCopyWith<$Res> + implements $BatchErrorsDataInnerCopyWith<$Res> { + factory _$$BatchErrorsDataInnerImplCopyWith(_$BatchErrorsDataInnerImpl value, + $Res Function(_$BatchErrorsDataInnerImpl) then) = + __$$BatchErrorsDataInnerImplCopyWithImpl<$Res>; + @override @useResult - $Res call({List value}); + $Res call( + {@JsonKey(includeIfNull: false) String? code, + @JsonKey(includeIfNull: false) String? message, + @JsonKey(includeIfNull: false) String? param, + @JsonKey(includeIfNull: false) int? line}); } /// @nodoc -class __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res> - extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartsImpl> - implements _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartsImplCopyWithImpl( - _$ChatCompletionMessageContentPartsImpl _value, - $Res Function(_$ChatCompletionMessageContentPartsImpl) _then) +class __$$BatchErrorsDataInnerImplCopyWithImpl<$Res> + extends _$BatchErrorsDataInnerCopyWithImpl<$Res, _$BatchErrorsDataInnerImpl> + implements _$$BatchErrorsDataInnerImplCopyWith<$Res> { + __$$BatchErrorsDataInnerImplCopyWithImpl(_$BatchErrorsDataInnerImpl _value, + $Res Function(_$BatchErrorsDataInnerImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? value = null, + Object? code = freezed, + Object? message = freezed, + Object? param = freezed, + Object? line = freezed, }) { - return _then(_$ChatCompletionMessageContentPartsImpl( - null == value - ? _value._value - : value // ignore: cast_nullable_to_non_nullable - as List, + return _then(_$BatchErrorsDataInnerImpl( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: freezed == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String?, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + line: freezed == line + ? _value.line + : line // ignore: cast_nullable_to_non_nullable + as int?, )); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionMessageContentPartsImpl - extends ChatCompletionMessageContentParts { - const _$ChatCompletionMessageContentPartsImpl( - final List value, - {final String? $type}) - : _value = value, - $type = $type ?? 'parts', - super._(); +class _$BatchErrorsDataInnerImpl extends _BatchErrorsDataInner { + const _$BatchErrorsDataInnerImpl( + {@JsonKey(includeIfNull: false) this.code, + @JsonKey(includeIfNull: false) this.message, + @JsonKey(includeIfNull: false) this.param, + @JsonKey(includeIfNull: false) this.line}) + : super._(); - factory _$ChatCompletionMessageContentPartsImpl.fromJson( - Map json) => - _$$ChatCompletionMessageContentPartsImplFromJson(json); + factory _$BatchErrorsDataInnerImpl.fromJson(Map json) => + _$$BatchErrorsDataInnerImplFromJson(json); - final List _value; + /// An error code identifying the error type. @override - List get value { - if (_value is EqualUnmodifiableListView) return _value; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_value); - } + @JsonKey(includeIfNull: false) + final String? code; - @JsonKey(name: 'runtimeType') - final String $type; + /// A human-readable message providing more details about the error. + @override + @JsonKey(includeIfNull: false) + final String? message; + + /// The name of the parameter that caused the error, if applicable. + @override + @JsonKey(includeIfNull: false) + final String? param; + + /// The line number of the input file where the error occurred, if applicable. + @override + @JsonKey(includeIfNull: false) + final int? line; @override String toString() { - return 'ChatCompletionUserMessageContent.parts(value: $value)'; + return 'BatchErrorsDataInner(code: $code, message: $message, param: $param, line: $line)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartsImpl && - const DeepCollectionEquality().equals(other._value, _value)); + other is _$BatchErrorsDataInnerImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message) && + (identical(other.param, param) || other.param == param) && + (identical(other.line, line) || other.line == line)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + int get hashCode => Object.hash(runtimeType, code, message, param, line); - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartsImplCopyWith< - _$ChatCompletionMessageContentPartsImpl> - get copyWith => __$$ChatCompletionMessageContentPartsImplCopyWithImpl< - _$ChatCompletionMessageContentPartsImpl>(this, _$identity); + _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> + get copyWith => + __$$BatchErrorsDataInnerImplCopyWithImpl<_$BatchErrorsDataInnerImpl>( + this, _$identity); @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) - parts, - required TResult Function(String value) string, - }) { - return parts(value); + Map toJson() { + return _$$BatchErrorsDataInnerImplToJson( + this, + ); } +} - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? parts, - TResult? Function(String value)? string, - }) { - return parts?.call(value); - } +abstract class _BatchErrorsDataInner extends BatchErrorsDataInner { + const factory _BatchErrorsDataInner( + {@JsonKey(includeIfNull: false) final String? code, + @JsonKey(includeIfNull: false) final String? message, + @JsonKey(includeIfNull: false) final String? param, + @JsonKey(includeIfNull: false) final int? line}) = + _$BatchErrorsDataInnerImpl; + const _BatchErrorsDataInner._() : super._(); + + factory _BatchErrorsDataInner.fromJson(Map json) = + _$BatchErrorsDataInnerImpl.fromJson; @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? parts, - TResult Function(String value)? string, - required TResult orElse(), - }) { - if (parts != null) { - return parts(value); - } - return orElse(); - } + /// An error code identifying the error type. + @JsonKey(includeIfNull: false) + String? get code; @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentParts value) parts, - required TResult Function(ChatCompletionUserMessageContentString value) - string, - }) { - return parts(this); - } + /// A human-readable message providing more details about the error. + @JsonKey(includeIfNull: false) + String? get message; @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentParts value)? parts, - TResult? Function(ChatCompletionUserMessageContentString value)? string, - }) { - return parts?.call(this); - } + /// The name of the parameter that caused the error, if applicable. + @JsonKey(includeIfNull: false) + String? get param; @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentParts value)? parts, - TResult Function(ChatCompletionUserMessageContentString value)? string, - required TResult orElse(), - }) { - if (parts != null) { - return parts(this); - } - return orElse(); - } + /// The line number of the input file where the error occurred, if applicable. + @JsonKey(includeIfNull: false) + int? get line; @override - Map toJson() { - return _$$ChatCompletionMessageContentPartsImplToJson( - this, - ); - } + @JsonKey(ignore: true) + _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> + get copyWith => throw _privateConstructorUsedError; } -abstract class ChatCompletionMessageContentParts - extends ChatCompletionUserMessageContent { - const factory ChatCompletionMessageContentParts( - final List value) = - _$ChatCompletionMessageContentPartsImpl; - const ChatCompletionMessageContentParts._() : super._(); +ListBatchesResponse _$ListBatchesResponseFromJson(Map json) { + return _ListBatchesResponse.fromJson(json); +} - factory ChatCompletionMessageContentParts.fromJson( - Map json) = - _$ChatCompletionMessageContentPartsImpl.fromJson; +/// @nodoc +mixin _$ListBatchesResponse { + /// No Description + List get data => throw _privateConstructorUsedError; - @override - List get value; + /// The ID of the first batch in the list. + @JsonKey(name: 'first_id', includeIfNull: false) + String? get firstId => throw _privateConstructorUsedError; - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionMessageContentPartsImplCopyWith< - _$ChatCompletionMessageContentPartsImpl> - get copyWith => throw _privateConstructorUsedError; + /// The ID of the last batch in the list. + @JsonKey(name: 'last_id', includeIfNull: false) + String? get lastId => throw _privateConstructorUsedError; + + /// Whether there are more batches available. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + + /// The object type, which is always `list`. + ListBatchesResponseObject get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ListBatchesResponseCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { - factory _$$ChatCompletionUserMessageContentStringImplCopyWith( - _$ChatCompletionUserMessageContentStringImpl value, - $Res Function(_$ChatCompletionUserMessageContentStringImpl) then) = - __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res>; +abstract class $ListBatchesResponseCopyWith<$Res> { + factory $ListBatchesResponseCopyWith( + ListBatchesResponse value, $Res Function(ListBatchesResponse) then) = + _$ListBatchesResponseCopyWithImpl<$Res, ListBatchesResponse>; @useResult - $Res call({String value}); + $Res call( + {List data, + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + @JsonKey(name: 'has_more') bool hasMore, + ListBatchesResponseObject object}); } /// @nodoc -class __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res> - extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - _$ChatCompletionUserMessageContentStringImpl> - implements _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { - __$$ChatCompletionUserMessageContentStringImplCopyWithImpl( - _$ChatCompletionUserMessageContentStringImpl _value, - $Res Function(_$ChatCompletionUserMessageContentStringImpl) _then) +class _$ListBatchesResponseCopyWithImpl<$Res, $Val extends ListBatchesResponse> + implements $ListBatchesResponseCopyWith<$Res> { + _$ListBatchesResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, + Object? object = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as ListBatchesResponseObject, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ListBatchesResponseImplCopyWith<$Res> + implements $ListBatchesResponseCopyWith<$Res> { + factory _$$ListBatchesResponseImplCopyWith(_$ListBatchesResponseImpl value, + $Res Function(_$ListBatchesResponseImpl) then) = + __$$ListBatchesResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {List data, + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + @JsonKey(name: 'has_more') bool hasMore, + ListBatchesResponseObject object}); +} + +/// @nodoc +class __$$ListBatchesResponseImplCopyWithImpl<$Res> + extends _$ListBatchesResponseCopyWithImpl<$Res, _$ListBatchesResponseImpl> + implements _$$ListBatchesResponseImplCopyWith<$Res> { + __$$ListBatchesResponseImplCopyWithImpl(_$ListBatchesResponseImpl _value, + $Res Function(_$ListBatchesResponseImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? value = null, + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, + Object? object = null, }) { - return _then(_$ChatCompletionUserMessageContentStringImpl( - null == value - ? _value.value - : value // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$ListBatchesResponseImpl( + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as ListBatchesResponseObject, )); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionUserMessageContentStringImpl - extends ChatCompletionUserMessageContentString { - const _$ChatCompletionUserMessageContentStringImpl(this.value, - {final String? $type}) - : $type = $type ?? 'string', +class _$ListBatchesResponseImpl extends _ListBatchesResponse { + const _$ListBatchesResponseImpl( + {required final List data, + @JsonKey(name: 'first_id', includeIfNull: false) this.firstId, + @JsonKey(name: 'last_id', includeIfNull: false) this.lastId, + @JsonKey(name: 'has_more') required this.hasMore, + required this.object}) + : _data = data, super._(); - factory _$ChatCompletionUserMessageContentStringImpl.fromJson( - Map json) => - _$$ChatCompletionUserMessageContentStringImplFromJson(json); - - @override - final String value; - - @JsonKey(name: 'runtimeType') - final String $type; + factory _$ListBatchesResponseImpl.fromJson(Map json) => + _$$ListBatchesResponseImplFromJson(json); - @override - String toString() { - return 'ChatCompletionUserMessageContent.string(value: $value)'; - } + /// No Description + final List _data; + /// No Description @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionUserMessageContentStringImpl && - (identical(other.value, value) || other.value == value)); + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); } - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, value); - - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + /// The ID of the first batch in the list. @override - @pragma('vm:prefer-inline') - _$$ChatCompletionUserMessageContentStringImplCopyWith< - _$ChatCompletionUserMessageContentStringImpl> - get copyWith => - __$$ChatCompletionUserMessageContentStringImplCopyWithImpl< - _$ChatCompletionUserMessageContentStringImpl>(this, _$identity); + @JsonKey(name: 'first_id', includeIfNull: false) + final String? firstId; + /// The ID of the last batch in the list. @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) - parts, - required TResult Function(String value) string, - }) { - return string(value); - } + @JsonKey(name: 'last_id', includeIfNull: false) + final String? lastId; + /// Whether there are more batches available. @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? parts, - TResult? Function(String value)? string, - }) { - return string?.call(value); - } + @JsonKey(name: 'has_more') + final bool hasMore; + /// The object type, which is always `list`. @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? parts, - TResult Function(String value)? string, - required TResult orElse(), - }) { - if (string != null) { - return string(value); - } - return orElse(); - } + final ListBatchesResponseObject object; @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentParts value) parts, - required TResult Function(ChatCompletionUserMessageContentString value) - string, - }) { - return string(this); + String toString() { + return 'ListBatchesResponse(data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore, object: $object)'; } @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentParts value)? parts, - TResult? Function(ChatCompletionUserMessageContentString value)? string, - }) { - return string?.call(this); + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ListBatchesResponseImpl && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.firstId, firstId) || other.firstId == firstId) && + (identical(other.lastId, lastId) || other.lastId == lastId) && + (identical(other.hasMore, hasMore) || other.hasMore == hasMore) && + (identical(other.object, object) || other.object == object)); } + @JsonKey(ignore: true) @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentParts value)? parts, - TResult Function(ChatCompletionUserMessageContentString value)? string, - required TResult orElse(), - }) { - if (string != null) { - return string(this); - } - return orElse(); - } + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_data), + firstId, + lastId, + hasMore, + object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => + __$$ListBatchesResponseImplCopyWithImpl<_$ListBatchesResponseImpl>( + this, _$identity); @override Map toJson() { - return _$$ChatCompletionUserMessageContentStringImplToJson( + return _$$ListBatchesResponseImplToJson( this, ); } } -abstract class ChatCompletionUserMessageContentString - extends ChatCompletionUserMessageContent { - const factory ChatCompletionUserMessageContentString(final String value) = - _$ChatCompletionUserMessageContentStringImpl; - const ChatCompletionUserMessageContentString._() : super._(); +abstract class _ListBatchesResponse extends ListBatchesResponse { + const factory _ListBatchesResponse( + {required final List data, + @JsonKey(name: 'first_id', includeIfNull: false) final String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) final String? lastId, + @JsonKey(name: 'has_more') required final bool hasMore, + required final ListBatchesResponseObject + object}) = _$ListBatchesResponseImpl; + const _ListBatchesResponse._() : super._(); - factory ChatCompletionUserMessageContentString.fromJson( - Map json) = - _$ChatCompletionUserMessageContentStringImpl.fromJson; + factory _ListBatchesResponse.fromJson(Map json) = + _$ListBatchesResponseImpl.fromJson; @override - String get value; - /// Create a copy of ChatCompletionUserMessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionUserMessageContentStringImplCopyWith< - _$ChatCompletionUserMessageContentStringImpl> - get copyWith => throw _privateConstructorUsedError; + /// No Description + List get data; + @override + + /// The ID of the first batch in the list. + @JsonKey(name: 'first_id', includeIfNull: false) + String? get firstId; + @override + + /// The ID of the last batch in the list. + @JsonKey(name: 'last_id', includeIfNull: false) + String? get lastId; + @override + + /// Whether there are more batches available. + @JsonKey(name: 'has_more') + bool get hasMore; + @override + + /// The object type, which is always `list`. + ListBatchesResponseObject get object; + @override + @JsonKey(ignore: true) + _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => + throw _privateConstructorUsedError; } -ChatCompletionMessageContentPart _$ChatCompletionMessageContentPartFromJson( +ChatCompletionMessage _$ChatCompletionMessageFromJson( Map json) { - switch (json['type']) { - case 'text': - return ChatCompletionMessageContentPartText.fromJson(json); - case 'image': - return ChatCompletionMessageContentPartImage.fromJson(json); - case 'refusal': - return ChatCompletionMessageContentPartRefusal.fromJson(json); + switch (json['role']) { + case 'system': + return ChatCompletionSystemMessage.fromJson(json); + case 'user': + return ChatCompletionUserMessage.fromJson(json); + case 'assistant': + return ChatCompletionAssistantMessage.fromJson(json); + case 'tool': + return ChatCompletionToolMessage.fromJson(json); + case 'function': + return ChatCompletionFunctionMessage.fromJson(json); default: - throw CheckedFromJsonException( - json, - 'type', - 'ChatCompletionMessageContentPart', - 'Invalid union type "${json['type']}"!'); + throw CheckedFromJsonException(json, 'role', 'ChatCompletionMessage', + 'Invalid union type "${json['role']}"!'); } } /// @nodoc -mixin _$ChatCompletionMessageContentPart { - /// The type of the content part, in this case `text`. - ChatCompletionMessageContentPartType get type => - throw _privateConstructorUsedError; +mixin _$ChatCompletionMessage { + /// The role of the messages author, in this case `system`. + ChatCompletionMessageRole get role => throw _privateConstructorUsedError; + + /// The contents of the system message. + Object? get content => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, required TResult Function( - ChatCompletionMessageContentPartType type, String refusal) - refusal, + ChatCompletionMessageRole role, String? content, String name) + function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, TResult? Function( - ChatCompletionMessageContentPartType type, String refusal)? - refusal, + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, - TResult Function(ChatCompletionMessageContentPartType type, String refusal)? - refusal, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, - required TResult Function(ChatCompletionMessageContentPartRefusal value) - refusal, + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, - TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, - TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionMessageContentPart to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChatCompletionMessageContentPartCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionMessageCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $ChatCompletionMessageContentPartCopyWith<$Res> { - factory $ChatCompletionMessageContentPartCopyWith( - ChatCompletionMessageContentPart value, - $Res Function(ChatCompletionMessageContentPart) then) = - _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - ChatCompletionMessageContentPart>; +abstract class $ChatCompletionMessageCopyWith<$Res> { + factory $ChatCompletionMessageCopyWith(ChatCompletionMessage value, + $Res Function(ChatCompletionMessage) then) = + _$ChatCompletionMessageCopyWithImpl<$Res, ChatCompletionMessage>; @useResult - $Res call({ChatCompletionMessageContentPartType type}); + $Res call({ChatCompletionMessageRole role}); } /// @nodoc -class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - $Val extends ChatCompletionMessageContentPart> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - _$ChatCompletionMessageContentPartCopyWithImpl(this._value, this._then); +class _$ChatCompletionMessageCopyWithImpl<$Res, + $Val extends ChatCompletionMessage> + implements $ChatCompletionMessageCopyWith<$Res> { + _$ChatCompletionMessageCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, + Object? role = null, }) { return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, ) as $Val); } } /// @nodoc -abstract class _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartTextImplCopyWith( - _$ChatCompletionMessageContentPartTextImpl value, - $Res Function(_$ChatCompletionMessageContentPartTextImpl) then) = - __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionSystemMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionSystemMessageImplCopyWith( + _$ChatCompletionSystemMessageImpl value, + $Res Function(_$ChatCompletionSystemMessageImpl) then) = + __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res>; @override @useResult - $Res call({ChatCompletionMessageContentPartType type, String text}); + $Res call( + {ChatCompletionMessageRole role, + String content, + @JsonKey(includeIfNull: false) String? name}); } /// @nodoc -class __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartTextImpl> - implements _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartTextImplCopyWithImpl( - _$ChatCompletionMessageContentPartTextImpl _value, - $Res Function(_$ChatCompletionMessageContentPartTextImpl) _then) +class __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionSystemMessageImpl> + implements _$$ChatCompletionSystemMessageImplCopyWith<$Res> { + __$$ChatCompletionSystemMessageImplCopyWithImpl( + _$ChatCompletionSystemMessageImpl _value, + $Res Function(_$ChatCompletionSystemMessageImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? text = null, + Object? role = null, + Object? content = null, + Object? name = freezed, }) { - return _then(_$ChatCompletionMessageContentPartTextImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable + return _then(_$ChatCompletionSystemMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable as String, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, )); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionMessageContentPartTextImpl - extends ChatCompletionMessageContentPartText { - const _$ChatCompletionMessageContentPartTextImpl( - {this.type = ChatCompletionMessageContentPartType.text, - required this.text}) +class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { + const _$ChatCompletionSystemMessageImpl( + {this.role = ChatCompletionMessageRole.system, + required this.content, + @JsonKey(includeIfNull: false) this.name}) : super._(); - factory _$ChatCompletionMessageContentPartTextImpl.fromJson( + factory _$ChatCompletionSystemMessageImpl.fromJson( Map json) => - _$$ChatCompletionMessageContentPartTextImplFromJson(json); + _$$ChatCompletionSystemMessageImplFromJson(json); - /// The type of the content part, in this case `text`. + /// The role of the messages author, in this case `system`. @override @JsonKey() - final ChatCompletionMessageContentPartType type; - - /// The text content. - @override - final String text; - - @override - String toString() { - return 'ChatCompletionMessageContentPart.text(type: $type, text: $text)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartTextImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type, text); - - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartTextImplCopyWith< - _$ChatCompletionMessageContentPartTextImpl> - get copyWith => __$$ChatCompletionMessageContentPartTextImplCopyWithImpl< - _$ChatCompletionMessageContentPartTextImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, - required TResult Function( - ChatCompletionMessageContentPartType type, String refusal) - refusal, - }) { - return text(type, this.text); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, - TResult? Function( - ChatCompletionMessageContentPartType type, String refusal)? - refusal, - }) { - return text?.call(type, this.text); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, - TResult Function(ChatCompletionMessageContentPartType type, String refusal)? - refusal, - required TResult orElse(), - }) { - if (text != null) { - return text(type, this.text); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, - required TResult Function(ChatCompletionMessageContentPartRefusal value) - refusal, - }) { - return text(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, - TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, - }) { - return text?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, - TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, - required TResult orElse(), - }) { - if (text != null) { - return text(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ChatCompletionMessageContentPartTextImplToJson( - this, - ); - } -} - -abstract class ChatCompletionMessageContentPartText - extends ChatCompletionMessageContentPart { - const factory ChatCompletionMessageContentPartText( - {final ChatCompletionMessageContentPartType type, - required final String text}) = _$ChatCompletionMessageContentPartTextImpl; - const ChatCompletionMessageContentPartText._() : super._(); - - factory ChatCompletionMessageContentPartText.fromJson( - Map json) = - _$ChatCompletionMessageContentPartTextImpl.fromJson; - - /// The type of the content part, in this case `text`. - @override - ChatCompletionMessageContentPartType get type; - - /// The text content. - String get text; - - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionMessageContentPartTextImplCopyWith< - _$ChatCompletionMessageContentPartTextImpl> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartImageImplCopyWith( - _$ChatCompletionMessageContentPartImageImpl value, - $Res Function(_$ChatCompletionMessageContentPartImageImpl) then) = - __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl}); - - $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl; -} - -/// @nodoc -class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartImageImpl> - implements _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartImageImplCopyWithImpl( - _$ChatCompletionMessageContentPartImageImpl _value, - $Res Function(_$ChatCompletionMessageContentPartImageImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? imageUrl = null, - }) { - return _then(_$ChatCompletionMessageContentPartImageImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, - imageUrl: null == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageImageUrl, - )); - } - - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl { - return $ChatCompletionMessageImageUrlCopyWith<$Res>(_value.imageUrl, - (value) { - return _then(_value.copyWith(imageUrl: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionMessageContentPartImageImpl - extends ChatCompletionMessageContentPartImage { - const _$ChatCompletionMessageContentPartImageImpl( - {this.type = ChatCompletionMessageContentPartType.imageUrl, - @JsonKey(name: 'image_url') required this.imageUrl}) - : super._(); - - factory _$ChatCompletionMessageContentPartImageImpl.fromJson( - Map json) => - _$$ChatCompletionMessageContentPartImageImplFromJson(json); + final ChatCompletionMessageRole role; - /// The type of the content part, in this case `image_url`. + /// The contents of the system message. @override - @JsonKey() - final ChatCompletionMessageContentPartType type; + final String content; - /// The URL of the image. + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @override - @JsonKey(name: 'image_url') - final ChatCompletionMessageImageUrl imageUrl; + @JsonKey(includeIfNull: false) + final String? name; @override String toString() { - return 'ChatCompletionMessageContentPart.image(type: $type, imageUrl: $imageUrl)'; + return 'ChatCompletionMessage.system(role: $role, content: $content, name: $name)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartImageImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.imageUrl, imageUrl) || - other.imageUrl == imageUrl)); + other is _$ChatCompletionSystemMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, imageUrl); + int get hashCode => Object.hash(runtimeType, role, content, name); - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartImageImplCopyWith< - _$ChatCompletionMessageContentPartImageImpl> - get copyWith => __$$ChatCompletionMessageContentPartImageImplCopyWithImpl< - _$ChatCompletionMessageContentPartImageImpl>(this, _$identity); + _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> + get copyWith => __$$ChatCompletionSystemMessageImplCopyWithImpl< + _$ChatCompletionSystemMessageImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, required TResult Function( - ChatCompletionMessageContentPartType type, String refusal) - refusal, + ChatCompletionMessageRole role, String? content, String name) + function, }) { - return image(type, imageUrl); + return system(role, content, name); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, TResult? Function( - ChatCompletionMessageContentPartType type, String refusal)? - refusal, + ChatCompletionMessageRole role, String? content, String name)? + function, }) { - return image?.call(type, imageUrl); + return system?.call(role, content, name); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, - TResult Function(ChatCompletionMessageContentPartType type, String refusal)? - refusal, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, required TResult orElse(), }) { - if (image != null) { - return image(type, imageUrl); + if (system != null) { + return system(role, content, name); } return orElse(); } @@ -57608,713 +50357,625 @@ class _$ChatCompletionMessageContentPartImageImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, - required TResult Function(ChatCompletionMessageContentPartRefusal value) - refusal, + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, }) { - return image(this); + return system(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, - TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, }) { - return image?.call(this); + return system?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, - TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, required TResult orElse(), }) { - if (image != null) { - return image(this); + if (system != null) { + return system(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionMessageContentPartImageImplToJson( + return _$$ChatCompletionSystemMessageImplToJson( this, ); } } -abstract class ChatCompletionMessageContentPartImage - extends ChatCompletionMessageContentPart { - const factory ChatCompletionMessageContentPartImage( - {final ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') - required final ChatCompletionMessageImageUrl imageUrl}) = - _$ChatCompletionMessageContentPartImageImpl; - const ChatCompletionMessageContentPartImage._() : super._(); +abstract class ChatCompletionSystemMessage extends ChatCompletionMessage { + const factory ChatCompletionSystemMessage( + {final ChatCompletionMessageRole role, + required final String content, + @JsonKey(includeIfNull: false) final String? name}) = + _$ChatCompletionSystemMessageImpl; + const ChatCompletionSystemMessage._() : super._(); - factory ChatCompletionMessageContentPartImage.fromJson( - Map json) = - _$ChatCompletionMessageContentPartImageImpl.fromJson; + factory ChatCompletionSystemMessage.fromJson(Map json) = + _$ChatCompletionSystemMessageImpl.fromJson; - /// The type of the content part, in this case `image_url`. @override - ChatCompletionMessageContentPartType get type; - /// The URL of the image. - @JsonKey(name: 'image_url') - ChatCompletionMessageImageUrl get imageUrl; + /// The role of the messages author, in this case `system`. + ChatCompletionMessageRole get role; + @override + + /// The contents of the system message. + String get content; - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @JsonKey(includeIfNull: false) + String? get name; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionMessageContentPartImageImplCopyWith< - _$ChatCompletionMessageContentPartImageImpl> + @JsonKey(ignore: true) + _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartRefusalImplCopyWith( - _$ChatCompletionMessageContentPartRefusalImpl value, - $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) then) = - __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionUserMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionUserMessageImplCopyWith( + _$ChatCompletionUserMessageImpl value, + $Res Function(_$ChatCompletionUserMessageImpl) then) = + __$$ChatCompletionUserMessageImplCopyWithImpl<$Res>; @override @useResult - $Res call({ChatCompletionMessageContentPartType type, String refusal}); + $Res call( + {ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name}); + + $ChatCompletionUserMessageContentCopyWith<$Res> get content; } /// @nodoc -class __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartRefusalImpl> - implements _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl( - _$ChatCompletionMessageContentPartRefusalImpl _value, - $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) _then) +class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionUserMessageImpl> + implements _$$ChatCompletionUserMessageImplCopyWith<$Res> { + __$$ChatCompletionUserMessageImplCopyWithImpl( + _$ChatCompletionUserMessageImpl _value, + $Res Function(_$ChatCompletionUserMessageImpl) _then) : super(_value, _then); - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? refusal = null, + Object? role = null, + Object? content = null, + Object? name = freezed, }) { - return _then(_$ChatCompletionMessageContentPartRefusalImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, - refusal: null == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$ChatCompletionUserMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as ChatCompletionUserMessageContent, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, )); } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionUserMessageContentCopyWith<$Res> get content { + return $ChatCompletionUserMessageContentCopyWith<$Res>(_value.content, + (value) { + return _then(_value.copyWith(content: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$ChatCompletionMessageContentPartRefusalImpl - extends ChatCompletionMessageContentPartRefusal { - const _$ChatCompletionMessageContentPartRefusalImpl( - {this.type = ChatCompletionMessageContentPartType.refusal, - required this.refusal}) +class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { + const _$ChatCompletionUserMessageImpl( + {this.role = ChatCompletionMessageRole.user, + @_ChatCompletionUserMessageContentConverter() required this.content, + @JsonKey(includeIfNull: false) this.name}) : super._(); - factory _$ChatCompletionMessageContentPartRefusalImpl.fromJson( - Map json) => - _$$ChatCompletionMessageContentPartRefusalImplFromJson(json); + factory _$ChatCompletionUserMessageImpl.fromJson(Map json) => + _$$ChatCompletionUserMessageImplFromJson(json); - /// The type of the content part, in this case `refusal`. + /// The role of the messages author, in this case `user`. @override @JsonKey() - final ChatCompletionMessageContentPartType type; + final ChatCompletionMessageRole role; + + /// The contents of the user message. + @override + @_ChatCompletionUserMessageContentConverter() + final ChatCompletionUserMessageContent content; - /// The refusal message generated by the model. + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @override - final String refusal; + @JsonKey(includeIfNull: false) + final String? name; @override String toString() { - return 'ChatCompletionMessageContentPart.refusal(type: $type, refusal: $refusal)'; + return 'ChatCompletionMessage.user(role: $role, content: $content, name: $name)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartRefusalImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.refusal, refusal) || other.refusal == refusal)); + other is _$ChatCompletionUserMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, refusal); + int get hashCode => Object.hash(runtimeType, role, content, name); - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartRefusalImplCopyWith< - _$ChatCompletionMessageContentPartRefusalImpl> - get copyWith => - __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl< - _$ChatCompletionMessageContentPartRefusalImpl>(this, _$identity); + _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> + get copyWith => __$$ChatCompletionUserMessageImplCopyWithImpl< + _$ChatCompletionUserMessageImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, required TResult Function( - ChatCompletionMessageContentPartType type, String refusal) - refusal, + ChatCompletionMessageRole role, String? content, String name) + function, }) { - return refusal(type, this.refusal); + return user(role, content, name); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, TResult? Function( - ChatCompletionMessageContentPartType type, String refusal)? - refusal, + ChatCompletionMessageRole role, String? content, String name)? + function, }) { - return refusal?.call(type, this.refusal); + return user?.call(role, content, name); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, - TResult Function(ChatCompletionMessageContentPartType type, String refusal)? - refusal, - required TResult orElse(), - }) { - if (refusal != null) { - return refusal(type, this.refusal); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, - required TResult Function(ChatCompletionMessageContentPartRefusal value) - refusal, - }) { - return refusal(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, - TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, - }) { - return refusal?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, - TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, required TResult orElse(), }) { - if (refusal != null) { - return refusal(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$ChatCompletionMessageContentPartRefusalImplToJson( - this, - ); - } -} - -abstract class ChatCompletionMessageContentPartRefusal - extends ChatCompletionMessageContentPart { - const factory ChatCompletionMessageContentPartRefusal( - {final ChatCompletionMessageContentPartType type, - required final String refusal}) = - _$ChatCompletionMessageContentPartRefusalImpl; - const ChatCompletionMessageContentPartRefusal._() : super._(); - - factory ChatCompletionMessageContentPartRefusal.fromJson( - Map json) = - _$ChatCompletionMessageContentPartRefusalImpl.fromJson; - - /// The type of the content part, in this case `refusal`. - @override - ChatCompletionMessageContentPartType get type; - - /// The refusal message generated by the model. - String get refusal; - - /// Create a copy of ChatCompletionMessageContentPart - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionMessageContentPartRefusalImplCopyWith< - _$ChatCompletionMessageContentPartRefusalImpl> - get copyWith => throw _privateConstructorUsedError; -} - -ChatCompletionMessageImageUrl _$ChatCompletionMessageImageUrlFromJson( - Map json) { - return _ChatCompletionMessageImageUrl.fromJson(json); -} - -/// @nodoc -mixin _$ChatCompletionMessageImageUrl { - /// Either a URL of the image or the base64 encoded image data. - String get url => throw _privateConstructorUsedError; - - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). - ChatCompletionMessageImageDetail get detail => - throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionMessageImageUrl to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionMessageImageUrl - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChatCompletionMessageImageUrlCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ChatCompletionMessageImageUrlCopyWith<$Res> { - factory $ChatCompletionMessageImageUrlCopyWith( - ChatCompletionMessageImageUrl value, - $Res Function(ChatCompletionMessageImageUrl) then) = - _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, - ChatCompletionMessageImageUrl>; - @useResult - $Res call({String url, ChatCompletionMessageImageDetail detail}); -} - -/// @nodoc -class _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, - $Val extends ChatCompletionMessageImageUrl> - implements $ChatCompletionMessageImageUrlCopyWith<$Res> { - _$ChatCompletionMessageImageUrlCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionMessageImageUrl - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? url = null, - Object? detail = null, - }) { - return _then(_value.copyWith( - url: null == url - ? _value.url - : url // ignore: cast_nullable_to_non_nullable - as String, - detail: null == detail - ? _value.detail - : detail // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageImageDetail, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> - implements $ChatCompletionMessageImageUrlCopyWith<$Res> { - factory _$$ChatCompletionMessageImageUrlImplCopyWith( - _$ChatCompletionMessageImageUrlImpl value, - $Res Function(_$ChatCompletionMessageImageUrlImpl) then) = - __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String url, ChatCompletionMessageImageDetail detail}); -} - -/// @nodoc -class __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, - _$ChatCompletionMessageImageUrlImpl> - implements _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> { - __$$ChatCompletionMessageImageUrlImplCopyWithImpl( - _$ChatCompletionMessageImageUrlImpl _value, - $Res Function(_$ChatCompletionMessageImageUrlImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionMessageImageUrl - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? url = null, - Object? detail = null, - }) { - return _then(_$ChatCompletionMessageImageUrlImpl( - url: null == url - ? _value.url - : url // ignore: cast_nullable_to_non_nullable - as String, - detail: null == detail - ? _value.detail - : detail // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageImageDetail, - )); + if (user != null) { + return user(role, content, name); + } + return orElse(); } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionMessageImageUrlImpl - extends _ChatCompletionMessageImageUrl { - const _$ChatCompletionMessageImageUrlImpl( - {required this.url, this.detail = ChatCompletionMessageImageDetail.auto}) - : super._(); - - factory _$ChatCompletionMessageImageUrlImpl.fromJson( - Map json) => - _$$ChatCompletionMessageImageUrlImplFromJson(json); - - /// Either a URL of the image or the base64 encoded image data. - @override - final String url; - - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). - @override - @JsonKey() - final ChatCompletionMessageImageDetail detail; @override - String toString() { - return 'ChatCompletionMessageImageUrl(url: $url, detail: $detail)'; + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) { + return user(this); } @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageImageUrlImpl && - (identical(other.url, url) || other.url == url) && - (identical(other.detail, detail) || other.detail == detail)); + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) { + return user?.call(this); } - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, url, detail); - - /// Create a copy of ChatCompletionMessageImageUrl - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) @override - @pragma('vm:prefer-inline') - _$$ChatCompletionMessageImageUrlImplCopyWith< - _$ChatCompletionMessageImageUrlImpl> - get copyWith => __$$ChatCompletionMessageImageUrlImplCopyWithImpl< - _$ChatCompletionMessageImageUrlImpl>(this, _$identity); + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) { + if (user != null) { + return user(this); + } + return orElse(); + } @override Map toJson() { - return _$$ChatCompletionMessageImageUrlImplToJson( + return _$$ChatCompletionUserMessageImplToJson( this, ); } } -abstract class _ChatCompletionMessageImageUrl - extends ChatCompletionMessageImageUrl { - const factory _ChatCompletionMessageImageUrl( - {required final String url, - final ChatCompletionMessageImageDetail detail}) = - _$ChatCompletionMessageImageUrlImpl; - const _ChatCompletionMessageImageUrl._() : super._(); +abstract class ChatCompletionUserMessage extends ChatCompletionMessage { + const factory ChatCompletionUserMessage( + {final ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + required final ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) final String? name}) = + _$ChatCompletionUserMessageImpl; + const ChatCompletionUserMessage._() : super._(); - factory _ChatCompletionMessageImageUrl.fromJson(Map json) = - _$ChatCompletionMessageImageUrlImpl.fromJson; + factory ChatCompletionUserMessage.fromJson(Map json) = + _$ChatCompletionUserMessageImpl.fromJson; - /// Either a URL of the image or the base64 encoded image data. @override - String get url; - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + /// The role of the messages author, in this case `user`. + ChatCompletionMessageRole get role; @override - ChatCompletionMessageImageDetail get detail; - /// Create a copy of ChatCompletionMessageImageUrl - /// with the given fields replaced by the non-null parameter values. + /// The contents of the user message. + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent get content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @JsonKey(includeIfNull: false) + String? get name; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionMessageImageUrlImplCopyWith< - _$ChatCompletionMessageImageUrlImpl> + @JsonKey(ignore: true) + _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> get copyWith => throw _privateConstructorUsedError; } -ResponseFormat _$ResponseFormatFromJson(Map json) { - switch (json['type']) { - case 'text': - return ResponseFormatText.fromJson(json); - case 'json_object': - return ResponseFormatJsonObject.fromJson(json); - case 'json_schema': - return ResponseFormatJsonSchema.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'ResponseFormat', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$ResponseFormat { - /// The type of response format being defined. - ResponseFormatType get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(ResponseFormatType type) text, - required TResult Function(ResponseFormatType type) jsonObject, - required TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) - jsonSchema, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(ResponseFormatType type)? text, - TResult? Function(ResponseFormatType type)? jsonObject, - TResult? Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(ResponseFormatType type)? text, - TResult Function(ResponseFormatType type)? jsonObject, - TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(ResponseFormatText value) text, - required TResult Function(ResponseFormatJsonObject value) jsonObject, - required TResult Function(ResponseFormatJsonSchema value) jsonSchema, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ResponseFormatText value)? text, - TResult? Function(ResponseFormatJsonObject value)? jsonObject, - TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ResponseFormatText value)? text, - TResult Function(ResponseFormatJsonObject value)? jsonObject, - TResult Function(ResponseFormatJsonSchema value)? jsonSchema, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - - /// Serializes this ResponseFormat to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ResponseFormatCopyWith get copyWith => - throw _privateConstructorUsedError; -} - /// @nodoc -abstract class $ResponseFormatCopyWith<$Res> { - factory $ResponseFormatCopyWith( - ResponseFormat value, $Res Function(ResponseFormat) then) = - _$ResponseFormatCopyWithImpl<$Res, ResponseFormat>; +abstract class _$$ChatCompletionAssistantMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionAssistantMessageImplCopyWith( + _$ChatCompletionAssistantMessageImpl value, + $Res Function(_$ChatCompletionAssistantMessageImpl) then) = + __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res>; + @override @useResult - $Res call({ResponseFormatType type}); + $Res call( + {ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall}); + + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; } /// @nodoc -class _$ResponseFormatCopyWithImpl<$Res, $Val extends ResponseFormat> - implements $ResponseFormatCopyWith<$Res> { - _$ResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; +class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionAssistantMessageImpl> + implements _$$ChatCompletionAssistantMessageImplCopyWith<$Res> { + __$$ChatCompletionAssistantMessageImplCopyWithImpl( + _$ChatCompletionAssistantMessageImpl _value, + $Res Function(_$ChatCompletionAssistantMessageImpl) _then) + : super(_value, _then); - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, + Object? role = null, + Object? content = freezed, + Object? name = freezed, + Object? toolCalls = freezed, + Object? functionCall = freezed, }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ResponseFormatType, - ) as $Val); + return _then(_$ChatCompletionAssistantMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageFunctionCall?, + )); } -} -/// @nodoc -abstract class _$$ResponseFormatTextImplCopyWith<$Res> - implements $ResponseFormatCopyWith<$Res> { - factory _$$ResponseFormatTextImplCopyWith(_$ResponseFormatTextImpl value, - $Res Function(_$ResponseFormatTextImpl) then) = - __$$ResponseFormatTextImplCopyWithImpl<$Res>; @override - @useResult - $Res call({ResponseFormatType type}); -} - -/// @nodoc -class __$$ResponseFormatTextImplCopyWithImpl<$Res> - extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatTextImpl> - implements _$$ResponseFormatTextImplCopyWith<$Res> { - __$$ResponseFormatTextImplCopyWithImpl(_$ResponseFormatTextImpl _value, - $Res Function(_$ResponseFormatTextImpl) _then) - : super(_value, _then); - - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$ResponseFormatTextImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ResponseFormatType, - )); + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { + if (_value.functionCall == null) { + return null; + } + + return $ChatCompletionMessageFunctionCallCopyWith<$Res>( + _value.functionCall!, (value) { + return _then(_value.copyWith(functionCall: value)); + }); } } /// @nodoc @JsonSerializable() -class _$ResponseFormatTextImpl extends ResponseFormatText { - const _$ResponseFormatTextImpl({this.type = ResponseFormatType.text}) - : super._(); +class _$ChatCompletionAssistantMessageImpl + extends ChatCompletionAssistantMessage { + const _$ChatCompletionAssistantMessageImpl( + {this.role = ChatCompletionMessageRole.assistant, + @JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall}) + : _toolCalls = toolCalls, + super._(); - factory _$ResponseFormatTextImpl.fromJson(Map json) => - _$$ResponseFormatTextImplFromJson(json); + factory _$ChatCompletionAssistantMessageImpl.fromJson( + Map json) => + _$$ChatCompletionAssistantMessageImplFromJson(json); - /// The type of response format being defined. + /// The role of the messages author, in this case `assistant`. @override @JsonKey() - final ResponseFormatType type; + final ChatCompletionMessageRole role; + + /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + @override + @JsonKey(includeIfNull: false) + final String? content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @override + @JsonKey(includeIfNull: false) + final String? name; + + /// No Description + final List? _toolCalls; + + /// No Description + @override + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + @override + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall; @override String toString() { - return 'ResponseFormat.text(type: $type)'; + return 'ChatCompletionMessage.assistant(role: $role, content: $content, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ResponseFormatTextImpl && - (identical(other.type, type) || other.type == type)); + other is _$ChatCompletionAssistantMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls) && + (identical(other.functionCall, functionCall) || + other.functionCall == functionCall)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, role, content, name, + const DeepCollectionEquality().hash(_toolCalls), functionCall); - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => - __$$ResponseFormatTextImplCopyWithImpl<_$ResponseFormatTextImpl>( - this, _$identity); + _$$ChatCompletionAssistantMessageImplCopyWith< + _$ChatCompletionAssistantMessageImpl> + get copyWith => __$$ChatCompletionAssistantMessageImplCopyWithImpl< + _$ChatCompletionAssistantMessageImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ResponseFormatType type) text, - required TResult Function(ResponseFormatType type) jsonObject, - required TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) - jsonSchema, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, }) { - return text(type); + return assistant(role, content, name, toolCalls, functionCall); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ResponseFormatType type)? text, - TResult? Function(ResponseFormatType type)? jsonObject, - TResult? Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, }) { - return text?.call(type); + return assistant?.call(role, content, name, toolCalls, functionCall); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ResponseFormatType type)? text, - TResult Function(ResponseFormatType type)? jsonObject, - TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, required TResult orElse(), }) { - if (text != null) { - return text(type); + if (assistant != null) { + return assistant(role, content, name, toolCalls, functionCall); } return orElse(); } @@ -58322,179 +50983,289 @@ class _$ResponseFormatTextImpl extends ResponseFormatText { @override @optionalTypeArgs TResult map({ - required TResult Function(ResponseFormatText value) text, - required TResult Function(ResponseFormatJsonObject value) jsonObject, - required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, }) { - return text(this); + return assistant(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ResponseFormatText value)? text, - TResult? Function(ResponseFormatJsonObject value)? jsonObject, - TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, }) { - return text?.call(this); + return assistant?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ResponseFormatText value)? text, - TResult Function(ResponseFormatJsonObject value)? jsonObject, - TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, required TResult orElse(), }) { - if (text != null) { - return text(this); + if (assistant != null) { + return assistant(this); } return orElse(); } @override Map toJson() { - return _$$ResponseFormatTextImplToJson( + return _$$ChatCompletionAssistantMessageImplToJson( this, ); } } -abstract class ResponseFormatText extends ResponseFormat { - const factory ResponseFormatText({final ResponseFormatType type}) = - _$ResponseFormatTextImpl; - const ResponseFormatText._() : super._(); +abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { + const factory ChatCompletionAssistantMessage( + {final ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall}) = + _$ChatCompletionAssistantMessageImpl; + const ChatCompletionAssistantMessage._() : super._(); - factory ResponseFormatText.fromJson(Map json) = - _$ResponseFormatTextImpl.fromJson; + factory ChatCompletionAssistantMessage.fromJson(Map json) = + _$ChatCompletionAssistantMessageImpl.fromJson; - /// The type of response format being defined. @override - ResponseFormatType get type; - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. + /// The role of the messages author, in this case `assistant`. + ChatCompletionMessageRole get role; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => - throw _privateConstructorUsedError; + + /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + @JsonKey(includeIfNull: false) + String? get content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @JsonKey(includeIfNull: false) + String? get name; + + /// No Description + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; + + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? get functionCall; + @override + @JsonKey(ignore: true) + _$$ChatCompletionAssistantMessageImplCopyWith< + _$ChatCompletionAssistantMessageImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ResponseFormatJsonObjectImplCopyWith<$Res> - implements $ResponseFormatCopyWith<$Res> { - factory _$$ResponseFormatJsonObjectImplCopyWith( - _$ResponseFormatJsonObjectImpl value, - $Res Function(_$ResponseFormatJsonObjectImpl) then) = - __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionToolMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionToolMessageImplCopyWith( + _$ChatCompletionToolMessageImpl value, + $Res Function(_$ChatCompletionToolMessageImpl) then) = + __$$ChatCompletionToolMessageImplCopyWithImpl<$Res>; @override @useResult - $Res call({ResponseFormatType type}); + $Res call( + {ChatCompletionMessageRole role, + String content, + @JsonKey(name: 'tool_call_id') String toolCallId}); } /// @nodoc -class __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res> - extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonObjectImpl> - implements _$$ResponseFormatJsonObjectImplCopyWith<$Res> { - __$$ResponseFormatJsonObjectImplCopyWithImpl( - _$ResponseFormatJsonObjectImpl _value, - $Res Function(_$ResponseFormatJsonObjectImpl) _then) +class __$$ChatCompletionToolMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionToolMessageImpl> + implements _$$ChatCompletionToolMessageImplCopyWith<$Res> { + __$$ChatCompletionToolMessageImplCopyWithImpl( + _$ChatCompletionToolMessageImpl _value, + $Res Function(_$ChatCompletionToolMessageImpl) _then) : super(_value, _then); - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, + Object? role = null, + Object? content = null, + Object? toolCallId = null, }) { - return _then(_$ResponseFormatJsonObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ResponseFormatType, + return _then(_$ChatCompletionToolMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + toolCallId: null == toolCallId + ? _value.toolCallId + : toolCallId // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$ResponseFormatJsonObjectImpl extends ResponseFormatJsonObject { - const _$ResponseFormatJsonObjectImpl( - {this.type = ResponseFormatType.jsonObject}) +class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { + const _$ChatCompletionToolMessageImpl( + {this.role = ChatCompletionMessageRole.tool, + required this.content, + @JsonKey(name: 'tool_call_id') required this.toolCallId}) : super._(); - factory _$ResponseFormatJsonObjectImpl.fromJson(Map json) => - _$$ResponseFormatJsonObjectImplFromJson(json); + factory _$ChatCompletionToolMessageImpl.fromJson(Map json) => + _$$ChatCompletionToolMessageImplFromJson(json); - /// The type of response format being defined. + /// The role of the messages author, in this case `tool`. @override @JsonKey() - final ResponseFormatType type; + final ChatCompletionMessageRole role; + + /// The contents of the tool message. + @override + final String content; + + /// Tool call that this message is responding to. + @override + @JsonKey(name: 'tool_call_id') + final String toolCallId; @override String toString() { - return 'ResponseFormat.jsonObject(type: $type)'; + return 'ChatCompletionMessage.tool(role: $role, content: $content, toolCallId: $toolCallId)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ResponseFormatJsonObjectImpl && - (identical(other.type, type) || other.type == type)); + other is _$ChatCompletionToolMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.toolCallId, toolCallId) || + other.toolCallId == toolCallId)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, role, content, toolCallId); - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> - get copyWith => __$$ResponseFormatJsonObjectImplCopyWithImpl< - _$ResponseFormatJsonObjectImpl>(this, _$identity); + _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> + get copyWith => __$$ChatCompletionToolMessageImplCopyWithImpl< + _$ChatCompletionToolMessageImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ResponseFormatType type) text, - required TResult Function(ResponseFormatType type) jsonObject, - required TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) - jsonSchema, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, }) { - return jsonObject(type); + return tool(role, content, toolCallId); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ResponseFormatType type)? text, - TResult? Function(ResponseFormatType type)? jsonObject, - TResult? Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, }) { - return jsonObject?.call(type); + return tool?.call(role, content, toolCallId); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ResponseFormatType type)? text, - TResult Function(ResponseFormatType type)? jsonObject, - TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, required TResult orElse(), }) { - if (jsonObject != null) { - return jsonObject(type); + if (tool != null) { + return tool(role, content, toolCallId); } return orElse(); } @@ -58502,206 +51273,273 @@ class _$ResponseFormatJsonObjectImpl extends ResponseFormatJsonObject { @override @optionalTypeArgs TResult map({ - required TResult Function(ResponseFormatText value) text, - required TResult Function(ResponseFormatJsonObject value) jsonObject, - required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, }) { - return jsonObject(this); + return tool(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ResponseFormatText value)? text, - TResult? Function(ResponseFormatJsonObject value)? jsonObject, - TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, }) { - return jsonObject?.call(this); + return tool?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ResponseFormatText value)? text, - TResult Function(ResponseFormatJsonObject value)? jsonObject, - TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, required TResult orElse(), }) { - if (jsonObject != null) { - return jsonObject(this); + if (tool != null) { + return tool(this); } return orElse(); } @override Map toJson() { - return _$$ResponseFormatJsonObjectImplToJson( + return _$$ChatCompletionToolMessageImplToJson( this, ); } } -abstract class ResponseFormatJsonObject extends ResponseFormat { - const factory ResponseFormatJsonObject({final ResponseFormatType type}) = - _$ResponseFormatJsonObjectImpl; - const ResponseFormatJsonObject._() : super._(); +abstract class ChatCompletionToolMessage extends ChatCompletionMessage { + const factory ChatCompletionToolMessage( + {final ChatCompletionMessageRole role, + required final String content, + @JsonKey(name: 'tool_call_id') required final String toolCallId}) = + _$ChatCompletionToolMessageImpl; + const ChatCompletionToolMessage._() : super._(); + + factory ChatCompletionToolMessage.fromJson(Map json) = + _$ChatCompletionToolMessageImpl.fromJson; - factory ResponseFormatJsonObject.fromJson(Map json) = - _$ResponseFormatJsonObjectImpl.fromJson; + @override - /// The type of response format being defined. + /// The role of the messages author, in this case `tool`. + ChatCompletionMessageRole get role; @override - ResponseFormatType get type; - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. + /// The contents of the tool message. + String get content; + + /// Tool call that this message is responding to. + @JsonKey(name: 'tool_call_id') + String get toolCallId; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> + @JsonKey(ignore: true) + _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ResponseFormatJsonSchemaImplCopyWith<$Res> - implements $ResponseFormatCopyWith<$Res> { - factory _$$ResponseFormatJsonSchemaImplCopyWith( - _$ResponseFormatJsonSchemaImpl value, - $Res Function(_$ResponseFormatJsonSchemaImpl) then) = - __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionFunctionMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionFunctionMessageImplCopyWith( + _$ChatCompletionFunctionMessageImpl value, + $Res Function(_$ChatCompletionFunctionMessageImpl) then) = + __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema}); - - $JsonSchemaObjectCopyWith<$Res> get jsonSchema; + $Res call({ChatCompletionMessageRole role, String? content, String name}); } /// @nodoc -class __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res> - extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonSchemaImpl> - implements _$$ResponseFormatJsonSchemaImplCopyWith<$Res> { - __$$ResponseFormatJsonSchemaImplCopyWithImpl( - _$ResponseFormatJsonSchemaImpl _value, - $Res Function(_$ResponseFormatJsonSchemaImpl) _then) +class __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionFunctionMessageImpl> + implements _$$ChatCompletionFunctionMessageImplCopyWith<$Res> { + __$$ChatCompletionFunctionMessageImplCopyWithImpl( + _$ChatCompletionFunctionMessageImpl _value, + $Res Function(_$ChatCompletionFunctionMessageImpl) _then) : super(_value, _then); - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? jsonSchema = null, + Object? role = null, + Object? content = freezed, + Object? name = null, }) { - return _then(_$ResponseFormatJsonSchemaImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ResponseFormatType, - jsonSchema: null == jsonSchema - ? _value.jsonSchema - : jsonSchema // ignore: cast_nullable_to_non_nullable - as JsonSchemaObject, + return _then(_$ChatCompletionFunctionMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, )); } - - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $JsonSchemaObjectCopyWith<$Res> get jsonSchema { - return $JsonSchemaObjectCopyWith<$Res>(_value.jsonSchema, (value) { - return _then(_value.copyWith(jsonSchema: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$ResponseFormatJsonSchemaImpl extends ResponseFormatJsonSchema { - const _$ResponseFormatJsonSchemaImpl( - {this.type = ResponseFormatType.jsonSchema, - @JsonKey(name: 'json_schema') required this.jsonSchema}) +class _$ChatCompletionFunctionMessageImpl + extends ChatCompletionFunctionMessage { + const _$ChatCompletionFunctionMessageImpl( + {this.role = ChatCompletionMessageRole.function, + required this.content, + required this.name}) : super._(); - factory _$ResponseFormatJsonSchemaImpl.fromJson(Map json) => - _$$ResponseFormatJsonSchemaImplFromJson(json); + factory _$ChatCompletionFunctionMessageImpl.fromJson( + Map json) => + _$$ChatCompletionFunctionMessageImplFromJson(json); - /// The type of response format being defined. + /// The role of the messages author, in this case `function`. @override @JsonKey() - final ResponseFormatType type; + final ChatCompletionMessageRole role; + + /// The contents of the function message. + @override + final String? content; - /// A JSON Schema object. + /// The name of the function to call. @override - @JsonKey(name: 'json_schema') - final JsonSchemaObject jsonSchema; + final String name; @override String toString() { - return 'ResponseFormat.jsonSchema(type: $type, jsonSchema: $jsonSchema)'; + return 'ChatCompletionMessage.function(role: $role, content: $content, name: $name)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ResponseFormatJsonSchemaImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.jsonSchema, jsonSchema) || - other.jsonSchema == jsonSchema)); + other is _$ChatCompletionFunctionMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, jsonSchema); + int get hashCode => Object.hash(runtimeType, role, content, name); - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> - get copyWith => __$$ResponseFormatJsonSchemaImplCopyWithImpl< - _$ResponseFormatJsonSchemaImpl>(this, _$identity); + _$$ChatCompletionFunctionMessageImplCopyWith< + _$ChatCompletionFunctionMessageImpl> + get copyWith => __$$ChatCompletionFunctionMessageImplCopyWithImpl< + _$ChatCompletionFunctionMessageImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ResponseFormatType type) text, - required TResult Function(ResponseFormatType type) jsonObject, - required TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) - jsonSchema, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, }) { - return jsonSchema(type, this.jsonSchema); + return function(role, content, name); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ResponseFormatType type)? text, - TResult? Function(ResponseFormatType type)? jsonObject, - TResult? Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, }) { - return jsonSchema?.call(type, this.jsonSchema); + return function?.call(role, content, name); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ResponseFormatType type)? text, - TResult Function(ResponseFormatType type)? jsonObject, - TResult Function(ResponseFormatType type, - @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? - jsonSchema, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, required TResult orElse(), }) { - if (jsonSchema != null) { - return jsonSchema(type, this.jsonSchema); + if (function != null) { + return function(role, content, name); } return orElse(); } @@ -58709,537 +51547,277 @@ class _$ResponseFormatJsonSchemaImpl extends ResponseFormatJsonSchema { @override @optionalTypeArgs TResult map({ - required TResult Function(ResponseFormatText value) text, - required TResult Function(ResponseFormatJsonObject value) jsonObject, - required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, }) { - return jsonSchema(this); + return function(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ResponseFormatText value)? text, - TResult? Function(ResponseFormatJsonObject value)? jsonObject, - TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, }) { - return jsonSchema?.call(this); + return function?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ResponseFormatText value)? text, - TResult Function(ResponseFormatJsonObject value)? jsonObject, - TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, required TResult orElse(), }) { - if (jsonSchema != null) { - return jsonSchema(this); + if (function != null) { + return function(this); } return orElse(); } @override Map toJson() { - return _$$ResponseFormatJsonSchemaImplToJson( + return _$$ChatCompletionFunctionMessageImplToJson( this, ); } } -abstract class ResponseFormatJsonSchema extends ResponseFormat { - const factory ResponseFormatJsonSchema( - {final ResponseFormatType type, - @JsonKey(name: 'json_schema') - required final JsonSchemaObject jsonSchema}) = - _$ResponseFormatJsonSchemaImpl; - const ResponseFormatJsonSchema._() : super._(); +abstract class ChatCompletionFunctionMessage extends ChatCompletionMessage { + const factory ChatCompletionFunctionMessage( + {final ChatCompletionMessageRole role, + required final String? content, + required final String name}) = _$ChatCompletionFunctionMessageImpl; + const ChatCompletionFunctionMessage._() : super._(); + + factory ChatCompletionFunctionMessage.fromJson(Map json) = + _$ChatCompletionFunctionMessageImpl.fromJson; - factory ResponseFormatJsonSchema.fromJson(Map json) = - _$ResponseFormatJsonSchemaImpl.fromJson; + @override - /// The type of response format being defined. + /// The role of the messages author, in this case `function`. + ChatCompletionMessageRole get role; @override - ResponseFormatType get type; - /// A JSON Schema object. - @JsonKey(name: 'json_schema') - JsonSchemaObject get jsonSchema; + /// The contents of the function message. + String? get content; - /// Create a copy of ResponseFormat - /// with the given fields replaced by the non-null parameter values. + /// The name of the function to call. + String get name; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> + @JsonKey(ignore: true) + _$$ChatCompletionFunctionMessageImplCopyWith< + _$ChatCompletionFunctionMessageImpl> get copyWith => throw _privateConstructorUsedError; } -AssistantTools _$AssistantToolsFromJson(Map json) { - switch (json['type']) { - case 'code_interpreter': - return AssistantToolsCodeInterpreter.fromJson(json); - case 'file_search': - return AssistantToolsFileSearch.fromJson(json); - case 'function': - return AssistantToolsFunction.fromJson(json); +ChatCompletionUserMessageContent _$ChatCompletionUserMessageContentFromJson( + Map json) { + switch (json['runtimeType']) { + case 'parts': + return ChatCompletionMessageContentParts.fromJson(json); + case 'string': + return ChatCompletionUserMessageContentString.fromJson(json); default: - throw CheckedFromJsonException(json, 'type', 'AssistantTools', - 'Invalid union type "${json['type']}"!'); + throw CheckedFromJsonException( + json, + 'runtimeType', + 'ChatCompletionUserMessageContent', + 'Invalid union type "${json['runtimeType']}"!'); } } /// @nodoc -mixin _$AssistantTools { - /// The type of tool being defined: `code_interpreter` - String get type => throw _privateConstructorUsedError; +mixin _$ChatCompletionUserMessageContent { + Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch) - fileSearch, - required TResult Function(String type, FunctionObject function) function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult? Function(String type, FunctionObject function)? function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult Function(String type, FunctionObject function)? function, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, - required TResult orElse(), + required TResult Function(List value) + parts, + required TResult Function(String value) string, }) => throw _privateConstructorUsedError; - - /// Serializes this AssistantTools to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $AssistantToolsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $AssistantToolsCopyWith<$Res> { - factory $AssistantToolsCopyWith( - AssistantTools value, $Res Function(AssistantTools) then) = - _$AssistantToolsCopyWithImpl<$Res, AssistantTools>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$AssistantToolsCopyWithImpl<$Res, $Val extends AssistantTools> - implements $AssistantToolsCopyWith<$Res> { - _$AssistantToolsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> - implements $AssistantToolsCopyWith<$Res> { - factory _$$AssistantToolsCodeInterpreterImplCopyWith( - _$AssistantToolsCodeInterpreterImpl value, - $Res Function(_$AssistantToolsCodeInterpreterImpl) then) = - __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String type}); -} - -/// @nodoc -class __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res> - extends _$AssistantToolsCopyWithImpl<$Res, - _$AssistantToolsCodeInterpreterImpl> - implements _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> { - __$$AssistantToolsCodeInterpreterImplCopyWithImpl( - _$AssistantToolsCodeInterpreterImpl _value, - $Res Function(_$AssistantToolsCodeInterpreterImpl) _then) - : super(_value, _then); - - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$AssistantToolsCodeInterpreterImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$AssistantToolsCodeInterpreterImpl - extends AssistantToolsCodeInterpreter { - const _$AssistantToolsCodeInterpreterImpl({this.type = 'code_interpreter'}) - : super._(); - - factory _$AssistantToolsCodeInterpreterImpl.fromJson( - Map json) => - _$$AssistantToolsCodeInterpreterImplFromJson(json); - - /// The type of tool being defined: `code_interpreter` - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'AssistantTools.codeInterpreter(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$AssistantToolsCodeInterpreterImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type); - - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$AssistantToolsCodeInterpreterImplCopyWith< - _$AssistantToolsCodeInterpreterImpl> - get copyWith => __$$AssistantToolsCodeInterpreterImplCopyWithImpl< - _$AssistantToolsCodeInterpreterImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch) - fileSearch, - required TResult Function(String type, FunctionObject function) function, - }) { - return codeInterpreter(type); - } - - @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult? Function(String type, FunctionObject function)? function, - }) { - return codeInterpreter?.call(type); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult? Function(List value)? parts, + TResult? Function(String value)? string, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? parts, + TResult Function(String value)? string, required TResult orElse(), - }) { - if (codeInterpreter != null) { - return codeInterpreter(type); - } - return orElse(); - } - - @override + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, - }) { - return codeInterpreter(this); - } - - @override + required TResult Function(ChatCompletionMessageContentParts value) parts, + required TResult Function(ChatCompletionUserMessageContentString value) + string, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, - }) { - return codeInterpreter?.call(this); - } - - @override + TResult? Function(ChatCompletionMessageContentParts value)? parts, + TResult? Function(ChatCompletionUserMessageContentString value)? string, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(ChatCompletionMessageContentParts value)? parts, + TResult Function(ChatCompletionUserMessageContentString value)? string, required TResult orElse(), - }) { - if (codeInterpreter != null) { - return codeInterpreter(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$AssistantToolsCodeInterpreterImplToJson( - this, - ); - } + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; } -abstract class AssistantToolsCodeInterpreter extends AssistantTools { - const factory AssistantToolsCodeInterpreter({final String type}) = - _$AssistantToolsCodeInterpreterImpl; - const AssistantToolsCodeInterpreter._() : super._(); - - factory AssistantToolsCodeInterpreter.fromJson(Map json) = - _$AssistantToolsCodeInterpreterImpl.fromJson; +/// @nodoc +abstract class $ChatCompletionUserMessageContentCopyWith<$Res> { + factory $ChatCompletionUserMessageContentCopyWith( + ChatCompletionUserMessageContent value, + $Res Function(ChatCompletionUserMessageContent) then) = + _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + ChatCompletionUserMessageContent>; +} - /// The type of tool being defined: `code_interpreter` - @override - String get type; +/// @nodoc +class _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + $Val extends ChatCompletionUserMessageContent> + implements $ChatCompletionUserMessageContentCopyWith<$Res> { + _$ChatCompletionUserMessageContentCopyWithImpl(this._value, this._then); - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantToolsCodeInterpreterImplCopyWith< - _$AssistantToolsCodeInterpreterImpl> - get copyWith => throw _privateConstructorUsedError; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; } /// @nodoc -abstract class _$$AssistantToolsFileSearchImplCopyWith<$Res> - implements $AssistantToolsCopyWith<$Res> { - factory _$$AssistantToolsFileSearchImplCopyWith( - _$AssistantToolsFileSearchImpl value, - $Res Function(_$AssistantToolsFileSearchImpl) then) = - __$$AssistantToolsFileSearchImplCopyWithImpl<$Res>; - @override +abstract class _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartsImplCopyWith( + _$ChatCompletionMessageContentPartsImpl value, + $Res Function(_$ChatCompletionMessageContentPartsImpl) then) = + __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res>; @useResult - $Res call( - {String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch}); - - $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch; + $Res call({List value}); } /// @nodoc -class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> - extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFileSearchImpl> - implements _$$AssistantToolsFileSearchImplCopyWith<$Res> { - __$$AssistantToolsFileSearchImplCopyWithImpl( - _$AssistantToolsFileSearchImpl _value, - $Res Function(_$AssistantToolsFileSearchImpl) _then) +class __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res> + extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartsImpl> + implements _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartsImplCopyWithImpl( + _$ChatCompletionMessageContentPartsImpl _value, + $Res Function(_$ChatCompletionMessageContentPartsImpl) _then) : super(_value, _then); - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? fileSearch = freezed, + Object? value = null, }) { - return _then(_$AssistantToolsFileSearchImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - fileSearch: freezed == fileSearch - ? _value.fileSearch - : fileSearch // ignore: cast_nullable_to_non_nullable - as AssistantToolsFileSearchFileSearch?, + return _then(_$ChatCompletionMessageContentPartsImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, )); } - - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch { - if (_value.fileSearch == null) { - return null; - } - - return $AssistantToolsFileSearchFileSearchCopyWith<$Res>(_value.fileSearch!, - (value) { - return _then(_value.copyWith(fileSearch: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { - const _$AssistantToolsFileSearchImpl( - {required this.type, - @JsonKey(name: 'file_search', includeIfNull: false) this.fileSearch}) - : super._(); +class _$ChatCompletionMessageContentPartsImpl + extends ChatCompletionMessageContentParts { + const _$ChatCompletionMessageContentPartsImpl( + final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'parts', + super._(); - factory _$AssistantToolsFileSearchImpl.fromJson(Map json) => - _$$AssistantToolsFileSearchImplFromJson(json); + factory _$ChatCompletionMessageContentPartsImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartsImplFromJson(json); - /// The type of tool being defined: `file_search` + final List _value; @override - final String type; + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } - /// Overrides for the file search tool. - @override - @JsonKey(name: 'file_search', includeIfNull: false) - final AssistantToolsFileSearchFileSearch? fileSearch; + @JsonKey(name: 'runtimeType') + final String $type; @override String toString() { - return 'AssistantTools.fileSearch(type: $type, fileSearch: $fileSearch)'; + return 'ChatCompletionUserMessageContent.parts(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantToolsFileSearchImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.fileSearch, fileSearch) || - other.fileSearch == fileSearch)); + other is _$ChatCompletionMessageContentPartsImpl && + const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, fileSearch); + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> - get copyWith => __$$AssistantToolsFileSearchImplCopyWithImpl< - _$AssistantToolsFileSearchImpl>(this, _$identity); + _$$ChatCompletionMessageContentPartsImplCopyWith< + _$ChatCompletionMessageContentPartsImpl> + get copyWith => __$$ChatCompletionMessageContentPartsImplCopyWithImpl< + _$ChatCompletionMessageContentPartsImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch) - fileSearch, - required TResult Function(String type, FunctionObject function) function, + required TResult Function(List value) + parts, + required TResult Function(String value) string, }) { - return fileSearch(type, this.fileSearch); + return parts(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult? Function(String type, FunctionObject function)? function, + TResult? Function(List value)? parts, + TResult? Function(String value)? string, }) { - return fileSearch?.call(type, this.fileSearch); + return parts?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult Function(List value)? parts, + TResult Function(String value)? string, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(type, this.fileSearch); + if (parts != null) { + return parts(value); } return orElse(); } @@ -59247,216 +51825,169 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { @override @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, + required TResult Function(ChatCompletionMessageContentParts value) parts, + required TResult Function(ChatCompletionUserMessageContentString value) + string, }) { - return fileSearch(this); + return parts(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, + TResult? Function(ChatCompletionMessageContentParts value)? parts, + TResult? Function(ChatCompletionUserMessageContentString value)? string, }) { - return fileSearch?.call(this); + return parts?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(ChatCompletionMessageContentParts value)? parts, + TResult Function(ChatCompletionUserMessageContentString value)? string, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(this); + if (parts != null) { + return parts(this); } return orElse(); } @override Map toJson() { - return _$$AssistantToolsFileSearchImplToJson( + return _$$ChatCompletionMessageContentPartsImplToJson( this, ); } } -abstract class AssistantToolsFileSearch extends AssistantTools { - const factory AssistantToolsFileSearch( - {required final String type, - @JsonKey(name: 'file_search', includeIfNull: false) - final AssistantToolsFileSearchFileSearch? fileSearch}) = - _$AssistantToolsFileSearchImpl; - const AssistantToolsFileSearch._() : super._(); - - factory AssistantToolsFileSearch.fromJson(Map json) = - _$AssistantToolsFileSearchImpl.fromJson; - - /// The type of tool being defined: `file_search` - @override - String get type; +abstract class ChatCompletionMessageContentParts + extends ChatCompletionUserMessageContent { + const factory ChatCompletionMessageContentParts( + final List value) = + _$ChatCompletionMessageContentPartsImpl; + const ChatCompletionMessageContentParts._() : super._(); - /// Overrides for the file search tool. - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? get fileSearch; + factory ChatCompletionMessageContentParts.fromJson( + Map json) = + _$ChatCompletionMessageContentPartsImpl.fromJson; - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> + List get value; + @JsonKey(ignore: true) + _$$ChatCompletionMessageContentPartsImplCopyWith< + _$ChatCompletionMessageContentPartsImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$AssistantToolsFunctionImplCopyWith<$Res> - implements $AssistantToolsCopyWith<$Res> { - factory _$$AssistantToolsFunctionImplCopyWith( - _$AssistantToolsFunctionImpl value, - $Res Function(_$AssistantToolsFunctionImpl) then) = - __$$AssistantToolsFunctionImplCopyWithImpl<$Res>; - @override +abstract class _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { + factory _$$ChatCompletionUserMessageContentStringImplCopyWith( + _$ChatCompletionUserMessageContentStringImpl value, + $Res Function(_$ChatCompletionUserMessageContentStringImpl) then) = + __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res>; @useResult - $Res call({String type, FunctionObject function}); - - $FunctionObjectCopyWith<$Res> get function; + $Res call({String value}); } /// @nodoc -class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> - extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFunctionImpl> - implements _$$AssistantToolsFunctionImplCopyWith<$Res> { - __$$AssistantToolsFunctionImplCopyWithImpl( - _$AssistantToolsFunctionImpl _value, - $Res Function(_$AssistantToolsFunctionImpl) _then) +class __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res> + extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + _$ChatCompletionUserMessageContentStringImpl> + implements _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { + __$$ChatCompletionUserMessageContentStringImplCopyWithImpl( + _$ChatCompletionUserMessageContentStringImpl _value, + $Res Function(_$ChatCompletionUserMessageContentStringImpl) _then) : super(_value, _then); - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? function = null, + Object? value = null, }) { - return _then(_$AssistantToolsFunctionImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable + return _then(_$ChatCompletionUserMessageContentStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable as String, - function: null == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as FunctionObject, )); } - - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $FunctionObjectCopyWith<$Res> get function { - return $FunctionObjectCopyWith<$Res>(_value.function, (value) { - return _then(_value.copyWith(function: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { - const _$AssistantToolsFunctionImpl( - {this.type = 'function', required this.function}) - : super._(); +class _$ChatCompletionUserMessageContentStringImpl + extends ChatCompletionUserMessageContentString { + const _$ChatCompletionUserMessageContentStringImpl(this.value, + {final String? $type}) + : $type = $type ?? 'string', + super._(); - factory _$AssistantToolsFunctionImpl.fromJson(Map json) => - _$$AssistantToolsFunctionImplFromJson(json); + factory _$ChatCompletionUserMessageContentStringImpl.fromJson( + Map json) => + _$$ChatCompletionUserMessageContentStringImplFromJson(json); - /// The type of tool being defined: `function` @override - @JsonKey() - final String type; + final String value; - /// A function that the model may call. - @override - final FunctionObject function; + @JsonKey(name: 'runtimeType') + final String $type; @override String toString() { - return 'AssistantTools.function(type: $type, function: $function)'; + return 'ChatCompletionUserMessageContent.string(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantToolsFunctionImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.function, function) || - other.function == function)); + other is _$ChatCompletionUserMessageContentStringImpl && + (identical(other.value, value) || other.value == value)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, function); + int get hashCode => Object.hash(runtimeType, value); - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> - get copyWith => __$$AssistantToolsFunctionImplCopyWithImpl< - _$AssistantToolsFunctionImpl>(this, _$identity); + _$$ChatCompletionUserMessageContentStringImplCopyWith< + _$ChatCompletionUserMessageContentStringImpl> + get copyWith => + __$$ChatCompletionUserMessageContentStringImplCopyWithImpl< + _$ChatCompletionUserMessageContentStringImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch) - fileSearch, - required TResult Function(String type, FunctionObject function) function, + required TResult Function(List value) + parts, + required TResult Function(String value) string, }) { - return function(type, this.function); + return string(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult? Function(String type, FunctionObject function)? function, + TResult? Function(List value)? parts, + TResult? Function(String value)? string, }) { - return function?.call(type, this.function); + return string?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function( - String type, - @JsonKey(name: 'file_search', includeIfNull: false) - AssistantToolsFileSearchFileSearch? fileSearch)? - fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult Function(List value)? parts, + TResult Function(String value)? string, required TResult orElse(), }) { - if (function != null) { - return function(type, this.function); + if (string != null) { + return string(value); } return orElse(); } @@ -59464,617 +51995,510 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @override @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, + required TResult Function(ChatCompletionMessageContentParts value) parts, + required TResult Function(ChatCompletionUserMessageContentString value) + string, }) { - return function(this); + return string(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, + TResult? Function(ChatCompletionMessageContentParts value)? parts, + TResult? Function(ChatCompletionUserMessageContentString value)? string, }) { - return function?.call(this); + return string?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(ChatCompletionMessageContentParts value)? parts, + TResult Function(ChatCompletionUserMessageContentString value)? string, required TResult orElse(), }) { - if (function != null) { - return function(this); + if (string != null) { + return string(this); } return orElse(); } @override Map toJson() { - return _$$AssistantToolsFunctionImplToJson( + return _$$ChatCompletionUserMessageContentStringImplToJson( this, ); } } -abstract class AssistantToolsFunction extends AssistantTools { - const factory AssistantToolsFunction( - {final String type, - required final FunctionObject function}) = _$AssistantToolsFunctionImpl; - const AssistantToolsFunction._() : super._(); - - factory AssistantToolsFunction.fromJson(Map json) = - _$AssistantToolsFunctionImpl.fromJson; - - /// The type of tool being defined: `function` - @override - String get type; +abstract class ChatCompletionUserMessageContentString + extends ChatCompletionUserMessageContent { + const factory ChatCompletionUserMessageContentString(final String value) = + _$ChatCompletionUserMessageContentStringImpl; + const ChatCompletionUserMessageContentString._() : super._(); - /// A function that the model may call. - FunctionObject get function; + factory ChatCompletionUserMessageContentString.fromJson( + Map json) = + _$ChatCompletionUserMessageContentStringImpl.fromJson; - /// Create a copy of AssistantTools - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> + String get value; + @JsonKey(ignore: true) + _$$ChatCompletionUserMessageContentStringImplCopyWith< + _$ChatCompletionUserMessageContentStringImpl> get copyWith => throw _privateConstructorUsedError; } -AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson( +ChatCompletionMessageContentPart _$ChatCompletionMessageContentPartFromJson( Map json) { - return _AssistantToolsFileSearchFileSearch.fromJson(json); + switch (json['type']) { + case 'text': + return ChatCompletionMessageContentPartText.fromJson(json); + case 'image_url': + return ChatCompletionMessageContentPartImage.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'ChatCompletionMessageContentPart', + 'Invalid union type "${json['type']}"!'); + } } /// @nodoc -mixin _$AssistantToolsFileSearchFileSearch { - /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models - /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the - /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. - @JsonKey(name: 'max_num_results', includeIfNull: false) - int? get maxNumResults => throw _privateConstructorUsedError; - - /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and - /// a score_threshold of 0. - /// - /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. - @JsonKey(name: 'ranking_options', includeIfNull: false) - FileSearchRankingOptions? get rankingOptions => +mixin _$ChatCompletionMessageContentPart { + /// The type of the content part, in this case `text`. + ChatCompletionMessageContentPartType get type => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + required TResult orElse(), + }) => throw _privateConstructorUsedError; - - /// Serializes this AssistantToolsFileSearchFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantToolsFileSearchFileSearch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $AssistantToolsFileSearchFileSearchCopyWith< - AssistantToolsFileSearchFileSearch> + @JsonKey(ignore: true) + $ChatCompletionMessageContentPartCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $AssistantToolsFileSearchFileSearchCopyWith<$Res> { - factory $AssistantToolsFileSearchFileSearchCopyWith( - AssistantToolsFileSearchFileSearch value, - $Res Function(AssistantToolsFileSearchFileSearch) then) = - _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, - AssistantToolsFileSearchFileSearch>; +abstract class $ChatCompletionMessageContentPartCopyWith<$Res> { + factory $ChatCompletionMessageContentPartCopyWith( + ChatCompletionMessageContentPart value, + $Res Function(ChatCompletionMessageContentPart) then) = + _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + ChatCompletionMessageContentPart>; @useResult - $Res call( - {@JsonKey(name: 'max_num_results', includeIfNull: false) - int? maxNumResults, - @JsonKey(name: 'ranking_options', includeIfNull: false) - FileSearchRankingOptions? rankingOptions}); - - $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; + $Res call({ChatCompletionMessageContentPartType type}); } /// @nodoc -class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, - $Val extends AssistantToolsFileSearchFileSearch> - implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { - _$AssistantToolsFileSearchFileSearchCopyWithImpl(this._value, this._then); +class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + $Val extends ChatCompletionMessageContentPart> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + _$ChatCompletionMessageContentPartCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of AssistantToolsFileSearchFileSearch - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? maxNumResults = freezed, - Object? rankingOptions = freezed, + Object? type = null, }) { return _then(_value.copyWith( - maxNumResults: freezed == maxNumResults - ? _value.maxNumResults - : maxNumResults // ignore: cast_nullable_to_non_nullable - as int?, - rankingOptions: freezed == rankingOptions - ? _value.rankingOptions - : rankingOptions // ignore: cast_nullable_to_non_nullable - as FileSearchRankingOptions?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, ) as $Val); } - - /// Create a copy of AssistantToolsFileSearchFileSearch - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions { - if (_value.rankingOptions == null) { - return null; - } - - return $FileSearchRankingOptionsCopyWith<$Res>(_value.rankingOptions!, - (value) { - return _then(_value.copyWith(rankingOptions: value) as $Val); - }); - } } /// @nodoc -abstract class _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> - implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { - factory _$$AssistantToolsFileSearchFileSearchImplCopyWith( - _$AssistantToolsFileSearchFileSearchImpl value, - $Res Function(_$AssistantToolsFileSearchFileSearchImpl) then) = - __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartTextImplCopyWith( + _$ChatCompletionMessageContentPartTextImpl value, + $Res Function(_$ChatCompletionMessageContentPartTextImpl) then) = + __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'max_num_results', includeIfNull: false) - int? maxNumResults, - @JsonKey(name: 'ranking_options', includeIfNull: false) - FileSearchRankingOptions? rankingOptions}); - - @override - $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; + $Res call({ChatCompletionMessageContentPartType type, String text}); } /// @nodoc -class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> - extends _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, - _$AssistantToolsFileSearchFileSearchImpl> - implements _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> { - __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl( - _$AssistantToolsFileSearchFileSearchImpl _value, - $Res Function(_$AssistantToolsFileSearchFileSearchImpl) _then) +class __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartTextImpl> + implements _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartTextImplCopyWithImpl( + _$ChatCompletionMessageContentPartTextImpl _value, + $Res Function(_$ChatCompletionMessageContentPartTextImpl) _then) : super(_value, _then); - /// Create a copy of AssistantToolsFileSearchFileSearch - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? maxNumResults = freezed, - Object? rankingOptions = freezed, + Object? type = null, + Object? text = null, }) { - return _then(_$AssistantToolsFileSearchFileSearchImpl( - maxNumResults: freezed == maxNumResults - ? _value.maxNumResults - : maxNumResults // ignore: cast_nullable_to_non_nullable - as int?, - rankingOptions: freezed == rankingOptions - ? _value.rankingOptions - : rankingOptions // ignore: cast_nullable_to_non_nullable - as FileSearchRankingOptions?, + return _then(_$ChatCompletionMessageContentPartTextImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$AssistantToolsFileSearchFileSearchImpl - extends _AssistantToolsFileSearchFileSearch { - const _$AssistantToolsFileSearchFileSearchImpl( - {@JsonKey(name: 'max_num_results', includeIfNull: false) - this.maxNumResults, - @JsonKey(name: 'ranking_options', includeIfNull: false) - this.rankingOptions}) +class _$ChatCompletionMessageContentPartTextImpl + extends ChatCompletionMessageContentPartText { + const _$ChatCompletionMessageContentPartTextImpl( + {this.type = ChatCompletionMessageContentPartType.text, + required this.text}) : super._(); - factory _$AssistantToolsFileSearchFileSearchImpl.fromJson( + factory _$ChatCompletionMessageContentPartTextImpl.fromJson( Map json) => - _$$AssistantToolsFileSearchFileSearchImplFromJson(json); + _$$ChatCompletionMessageContentPartTextImplFromJson(json); - /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models - /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the - /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. + /// The type of the content part, in this case `text`. @override - @JsonKey(name: 'max_num_results', includeIfNull: false) - final int? maxNumResults; + @JsonKey() + final ChatCompletionMessageContentPartType type; - /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and - /// a score_threshold of 0. - /// - /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. + /// The text content. @override - @JsonKey(name: 'ranking_options', includeIfNull: false) - final FileSearchRankingOptions? rankingOptions; + final String text; @override String toString() { - return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults, rankingOptions: $rankingOptions)'; + return 'ChatCompletionMessageContentPart.text(type: $type, text: $text)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantToolsFileSearchFileSearchImpl && - (identical(other.maxNumResults, maxNumResults) || - other.maxNumResults == maxNumResults) && - (identical(other.rankingOptions, rankingOptions) || - other.rankingOptions == rankingOptions)); + other is _$ChatCompletionMessageContentPartTextImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, maxNumResults, rankingOptions); + int get hashCode => Object.hash(runtimeType, type, text); - /// Create a copy of AssistantToolsFileSearchFileSearch - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$AssistantToolsFileSearchFileSearchImplCopyWith< - _$AssistantToolsFileSearchFileSearchImpl> - get copyWith => __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl< - _$AssistantToolsFileSearchFileSearchImpl>(this, _$identity); + _$$ChatCompletionMessageContentPartTextImplCopyWith< + _$ChatCompletionMessageContentPartTextImpl> + get copyWith => __$$ChatCompletionMessageContentPartTextImplCopyWithImpl< + _$ChatCompletionMessageContentPartTextImpl>(this, _$identity); @override - Map toJson() { - return _$$AssistantToolsFileSearchFileSearchImplToJson( - this, - ); + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + }) { + return text(type, this.text); } -} - -abstract class _AssistantToolsFileSearchFileSearch - extends AssistantToolsFileSearchFileSearch { - const factory _AssistantToolsFileSearchFileSearch( - {@JsonKey(name: 'max_num_results', includeIfNull: false) - final int? maxNumResults, - @JsonKey(name: 'ranking_options', includeIfNull: false) - final FileSearchRankingOptions? rankingOptions}) = - _$AssistantToolsFileSearchFileSearchImpl; - const _AssistantToolsFileSearchFileSearch._() : super._(); - - factory _AssistantToolsFileSearchFileSearch.fromJson( - Map json) = - _$AssistantToolsFileSearchFileSearchImpl.fromJson; - - /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models - /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the - /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. - @override - @JsonKey(name: 'max_num_results', includeIfNull: false) - int? get maxNumResults; - - /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and - /// a score_threshold of 0. - /// - /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - /// for more information. - @override - @JsonKey(name: 'ranking_options', includeIfNull: false) - FileSearchRankingOptions? get rankingOptions; - /// Create a copy of AssistantToolsFileSearchFileSearch - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantToolsFileSearchFileSearchImplCopyWith< - _$AssistantToolsFileSearchFileSearchImpl> - get copyWith => throw _privateConstructorUsedError; -} - -MessageContent _$MessageContentFromJson(Map json) { - switch (json['type']) { - case 'image_file': - return MessageContentImageFileObject.fromJson(json); - case 'image_url': - return MessageContentImageUrlObject.fromJson(json); - case 'text': - return MessageContentTextObject.fromJson(json); - case 'refusal': - return MessageContentRefusalObject.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'MessageContent', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$MessageContent { - /// Always `image_file`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, - required TResult Function(String type, String refusal) refusal, - }) => - throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, - TResult? Function(String type, String refusal)? refusal, - }) => - throw _privateConstructorUsedError; + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + }) { + return text?.call(type, this.text); + } + + @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, - TResult Function(String type, String refusal)? refusal, + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (text != null) { + return text(type, this.text); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, - required TResult Function(MessageContentRefusalObject value) refusal, - }) => - throw _privateConstructorUsedError; + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + }) { + return text(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, - TResult? Function(MessageContentRefusalObject value)? refusal, - }) => - throw _privateConstructorUsedError; + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + }) { + return text?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, - TResult Function(MessageContentRefusalObject value)? refusal, + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - - /// Serializes this MessageContent to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + }) { + if (text != null) { + return text(this); + } + return orElse(); + } - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageContentCopyWith get copyWith => - throw _privateConstructorUsedError; + @override + Map toJson() { + return _$$ChatCompletionMessageContentPartTextImplToJson( + this, + ); + } } -/// @nodoc -abstract class $MessageContentCopyWith<$Res> { - factory $MessageContentCopyWith( - MessageContent value, $Res Function(MessageContent) then) = - _$MessageContentCopyWithImpl<$Res, MessageContent>; - @useResult - $Res call({String type}); -} +abstract class ChatCompletionMessageContentPartText + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartText( + {final ChatCompletionMessageContentPartType type, + required final String text}) = _$ChatCompletionMessageContentPartTextImpl; + const ChatCompletionMessageContentPartText._() : super._(); -/// @nodoc -class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> - implements $MessageContentCopyWith<$Res> { - _$MessageContentCopyWithImpl(this._value, this._then); + factory ChatCompletionMessageContentPartText.fromJson( + Map json) = + _$ChatCompletionMessageContentPartTextImpl.fromJson; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + @override - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') + /// The type of the content part, in this case `text`. + ChatCompletionMessageContentPartType get type; + + /// The text content. + String get text; @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + @JsonKey(ignore: true) + _$$ChatCompletionMessageContentPartTextImplCopyWith< + _$ChatCompletionMessageContentPartTextImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentImageFileObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentImageFileObjectImplCopyWith( - _$MessageContentImageFileObjectImpl value, - $Res Function(_$MessageContentImageFileObjectImpl) then) = - __$$MessageContentImageFileObjectImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartImageImplCopyWith( + _$ChatCompletionMessageContentPartImageImpl value, + $Res Function(_$ChatCompletionMessageContentPartImageImpl) then) = + __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile}); + {ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl}); - $MessageContentImageFileCopyWith<$Res> get imageFile; + $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl; } /// @nodoc -class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, - _$MessageContentImageFileObjectImpl> - implements _$$MessageContentImageFileObjectImplCopyWith<$Res> { - __$$MessageContentImageFileObjectImplCopyWithImpl( - _$MessageContentImageFileObjectImpl _value, - $Res Function(_$MessageContentImageFileObjectImpl) _then) +class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartImageImpl> + implements _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartImageImplCopyWithImpl( + _$ChatCompletionMessageContentPartImageImpl _value, + $Res Function(_$ChatCompletionMessageContentPartImageImpl) _then) : super(_value, _then); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? imageFile = null, + Object? imageUrl = null, }) { - return _then(_$MessageContentImageFileObjectImpl( + return _then(_$ChatCompletionMessageContentPartImageImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as String, - imageFile: null == imageFile - ? _value.imageFile - : imageFile // ignore: cast_nullable_to_non_nullable - as MessageContentImageFile, + as ChatCompletionMessageContentPartType, + imageUrl: null == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageImageUrl, )); } - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentImageFileCopyWith<$Res> get imageFile { - return $MessageContentImageFileCopyWith<$Res>(_value.imageFile, (value) { - return _then(_value.copyWith(imageFile: value)); + $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl { + return $ChatCompletionMessageImageUrlCopyWith<$Res>(_value.imageUrl, + (value) { + return _then(_value.copyWith(imageUrl: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageContentImageFileObjectImpl - extends MessageContentImageFileObject { - const _$MessageContentImageFileObjectImpl( - {this.type = 'image_file', - @JsonKey(name: 'image_file') required this.imageFile}) +class _$ChatCompletionMessageContentPartImageImpl + extends ChatCompletionMessageContentPartImage { + const _$ChatCompletionMessageContentPartImageImpl( + {this.type = ChatCompletionMessageContentPartType.imageUrl, + @JsonKey(name: 'image_url') required this.imageUrl}) : super._(); - factory _$MessageContentImageFileObjectImpl.fromJson( + factory _$ChatCompletionMessageContentPartImageImpl.fromJson( Map json) => - _$$MessageContentImageFileObjectImplFromJson(json); + _$$ChatCompletionMessageContentPartImageImplFromJson(json); - /// Always `image_file`. + /// The type of the content part, in this case `image_url`. @override @JsonKey() - final String type; + final ChatCompletionMessageContentPartType type; - /// The image file that is part of a message. + /// The URL of the image. @override - @JsonKey(name: 'image_file') - final MessageContentImageFile imageFile; + @JsonKey(name: 'image_url') + final ChatCompletionMessageImageUrl imageUrl; @override String toString() { - return 'MessageContent.imageFile(type: $type, imageFile: $imageFile)'; + return 'ChatCompletionMessageContentPart.image(type: $type, imageUrl: $imageUrl)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentImageFileObjectImpl && + other is _$ChatCompletionMessageContentPartImageImpl && (identical(other.type, type) || other.type == type) && - (identical(other.imageFile, imageFile) || - other.imageFile == imageFile)); + (identical(other.imageUrl, imageUrl) || + other.imageUrl == imageUrl)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, imageFile); + int get hashCode => Object.hash(runtimeType, type, imageUrl); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentImageFileObjectImplCopyWith< - _$MessageContentImageFileObjectImpl> - get copyWith => __$$MessageContentImageFileObjectImplCopyWithImpl< - _$MessageContentImageFileObjectImpl>(this, _$identity); + _$$ChatCompletionMessageContentPartImageImplCopyWith< + _$ChatCompletionMessageContentPartImageImpl> + get copyWith => __$$ChatCompletionMessageContentPartImageImplCopyWithImpl< + _$ChatCompletionMessageContentPartImageImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, - required TResult Function(String type, String refusal) refusal, + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, }) { - return imageFile(type, this.imageFile); + return image(type, imageUrl); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, - TResult? Function(String type, String refusal)? refusal, + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, }) { - return imageFile?.call(type, this.imageFile); + return image?.call(type, imageUrl); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, - TResult Function(String type, String refusal)? refusal, + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(type, this.imageFile); + if (image != null) { + return image(type, imageUrl); } return orElse(); } @@ -60082,450 +52506,464 @@ class _$MessageContentImageFileObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, - required TResult Function(MessageContentRefusalObject value) refusal, + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, }) { - return imageFile(this); + return image(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, - TResult? Function(MessageContentRefusalObject value)? refusal, + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, }) { - return imageFile?.call(this); + return image?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, - TResult Function(MessageContentRefusalObject value)? refusal, + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(this); + if (image != null) { + return image(this); } return orElse(); } @override Map toJson() { - return _$$MessageContentImageFileObjectImplToJson( + return _$$ChatCompletionMessageContentPartImageImplToJson( this, ); } } -abstract class MessageContentImageFileObject extends MessageContent { - const factory MessageContentImageFileObject( - {final String type, - @JsonKey(name: 'image_file') - required final MessageContentImageFile imageFile}) = - _$MessageContentImageFileObjectImpl; - const MessageContentImageFileObject._() : super._(); +abstract class ChatCompletionMessageContentPartImage + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartImage( + {final ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') + required final ChatCompletionMessageImageUrl imageUrl}) = + _$ChatCompletionMessageContentPartImageImpl; + const ChatCompletionMessageContentPartImage._() : super._(); - factory MessageContentImageFileObject.fromJson(Map json) = - _$MessageContentImageFileObjectImpl.fromJson; + factory ChatCompletionMessageContentPartImage.fromJson( + Map json) = + _$ChatCompletionMessageContentPartImageImpl.fromJson; - /// Always `image_file`. @override - String get type; - /// The image file that is part of a message. - @JsonKey(name: 'image_file') - MessageContentImageFile get imageFile; + /// The type of the content part, in this case `image_url`. + ChatCompletionMessageContentPartType get type; + + /// The URL of the image. + @JsonKey(name: 'image_url') + ChatCompletionMessageImageUrl get imageUrl; + @override + @JsonKey(ignore: true) + _$$ChatCompletionMessageContentPartImageImplCopyWith< + _$ChatCompletionMessageContentPartImageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionMessageImageUrl _$ChatCompletionMessageImageUrlFromJson( + Map json) { + return _ChatCompletionMessageImageUrl.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionMessageImageUrl { + /// Either a URL of the image or the base64 encoded image data. + String get url => throw _privateConstructorUsedError; + + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + ChatCompletionMessageImageDetail get detail => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionMessageImageUrlCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionMessageImageUrlCopyWith<$Res> { + factory $ChatCompletionMessageImageUrlCopyWith( + ChatCompletionMessageImageUrl value, + $Res Function(ChatCompletionMessageImageUrl) then) = + _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, + ChatCompletionMessageImageUrl>; + @useResult + $Res call({String url, ChatCompletionMessageImageDetail detail}); +} + +/// @nodoc +class _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, + $Val extends ChatCompletionMessageImageUrl> + implements $ChatCompletionMessageImageUrlCopyWith<$Res> { + _$ChatCompletionMessageImageUrlCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentImageFileObjectImplCopyWith< - _$MessageContentImageFileObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? url = null, + Object? detail = null, + }) { + return _then(_value.copyWith( + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + detail: null == detail + ? _value.detail + : detail // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageImageDetail, + ) as $Val); + } } /// @nodoc -abstract class _$$MessageContentImageUrlObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentImageUrlObjectImplCopyWith( - _$MessageContentImageUrlObjectImpl value, - $Res Function(_$MessageContentImageUrlObjectImpl) then) = - __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> + implements $ChatCompletionMessageImageUrlCopyWith<$Res> { + factory _$$ChatCompletionMessageImageUrlImplCopyWith( + _$ChatCompletionMessageImageUrlImpl value, + $Res Function(_$ChatCompletionMessageImageUrlImpl) then) = + __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl}); - - $MessageContentImageUrlCopyWith<$Res> get imageUrl; + $Res call({String url, ChatCompletionMessageImageDetail detail}); } /// @nodoc -class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, - _$MessageContentImageUrlObjectImpl> - implements _$$MessageContentImageUrlObjectImplCopyWith<$Res> { - __$$MessageContentImageUrlObjectImplCopyWithImpl( - _$MessageContentImageUrlObjectImpl _value, - $Res Function(_$MessageContentImageUrlObjectImpl) _then) +class __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, + _$ChatCompletionMessageImageUrlImpl> + implements _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> { + __$$ChatCompletionMessageImageUrlImplCopyWithImpl( + _$ChatCompletionMessageImageUrlImpl _value, + $Res Function(_$ChatCompletionMessageImageUrlImpl) _then) : super(_value, _then); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? imageUrl = null, + Object? url = null, + Object? detail = null, }) { - return _then(_$MessageContentImageUrlObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable + return _then(_$ChatCompletionMessageImageUrlImpl( + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable as String, - imageUrl: null == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl, + detail: null == detail + ? _value.detail + : detail // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageImageDetail, )); } - - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $MessageContentImageUrlCopyWith<$Res> get imageUrl { - return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl, (value) { - return _then(_value.copyWith(imageUrl: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { - const _$MessageContentImageUrlObjectImpl( - {this.type = 'image_url', - @JsonKey(name: 'image_url') required this.imageUrl}) +class _$ChatCompletionMessageImageUrlImpl + extends _ChatCompletionMessageImageUrl { + const _$ChatCompletionMessageImageUrlImpl( + {required this.url, this.detail = ChatCompletionMessageImageDetail.auto}) : super._(); - factory _$MessageContentImageUrlObjectImpl.fromJson( + factory _$ChatCompletionMessageImageUrlImpl.fromJson( Map json) => - _$$MessageContentImageUrlObjectImplFromJson(json); + _$$ChatCompletionMessageImageUrlImplFromJson(json); - /// The type of the content part. Always `image_url`. + /// Either a URL of the image or the base64 encoded image data. @override - @JsonKey() - final String type; + final String url; - /// The image URL part of a message. + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). @override - @JsonKey(name: 'image_url') - final MessageContentImageUrl imageUrl; + @JsonKey() + final ChatCompletionMessageImageDetail detail; @override String toString() { - return 'MessageContent.imageUrl(type: $type, imageUrl: $imageUrl)'; + return 'ChatCompletionMessageImageUrl(url: $url, detail: $detail)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentImageUrlObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.imageUrl, imageUrl) || - other.imageUrl == imageUrl)); + other is _$ChatCompletionMessageImageUrlImpl && + (identical(other.url, url) || other.url == url) && + (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, imageUrl); + int get hashCode => Object.hash(runtimeType, url, detail); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentImageUrlObjectImplCopyWith< - _$MessageContentImageUrlObjectImpl> - get copyWith => __$$MessageContentImageUrlObjectImplCopyWithImpl< - _$MessageContentImageUrlObjectImpl>(this, _$identity); + _$$ChatCompletionMessageImageUrlImplCopyWith< + _$ChatCompletionMessageImageUrlImpl> + get copyWith => __$$ChatCompletionMessageImageUrlImplCopyWithImpl< + _$ChatCompletionMessageImageUrlImpl>(this, _$identity); @override - @optionalTypeArgs - TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, - required TResult Function(String type, String refusal) refusal, - }) { - return imageUrl(type, this.imageUrl); + Map toJson() { + return _$$ChatCompletionMessageImageUrlImplToJson( + this, + ); } +} + +abstract class _ChatCompletionMessageImageUrl + extends ChatCompletionMessageImageUrl { + const factory _ChatCompletionMessageImageUrl( + {required final String url, + final ChatCompletionMessageImageDetail detail}) = + _$ChatCompletionMessageImageUrlImpl; + const _ChatCompletionMessageImageUrl._() : super._(); + + factory _ChatCompletionMessageImageUrl.fromJson(Map json) = + _$ChatCompletionMessageImageUrlImpl.fromJson; @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, - TResult? Function(String type, String refusal)? refusal, - }) { - return imageUrl?.call(type, this.imageUrl); - } + /// Either a URL of the image or the base64 encoded image data. + String get url; + @override + + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + ChatCompletionMessageImageDetail get detail; @override + @JsonKey(ignore: true) + _$$ChatCompletionMessageImageUrlImplCopyWith< + _$ChatCompletionMessageImageUrlImpl> + get copyWith => throw _privateConstructorUsedError; +} + +AssistantTools _$AssistantToolsFromJson(Map json) { + switch (json['type']) { + case 'code_interpreter': + return AssistantToolsCodeInterpreter.fromJson(json); + case 'file_search': + return AssistantToolsFileSearch.fromJson(json); + case 'function': + return AssistantToolsFunction.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'AssistantTools', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$AssistantTools { + /// The type of tool being defined: `code_interpreter` + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type) codeInterpreter, + required TResult Function(String type) fileSearch, + required TResult Function(String type, FunctionObject function) function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? codeInterpreter, + TResult? Function(String type)? fileSearch, + TResult? Function(String type, FunctionObject function)? function, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, - TResult Function(String type, String refusal)? refusal, + TResult Function(String type)? codeInterpreter, + TResult Function(String type)? fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), - }) { - if (imageUrl != null) { - return imageUrl(type, this.imageUrl); - } - return orElse(); - } - - @override + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, - required TResult Function(MessageContentRefusalObject value) refusal, - }) { - return imageUrl(this); - } - - @override + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, - TResult? Function(MessageContentRefusalObject value)? refusal, - }) { - return imageUrl?.call(this); - } - - @override + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, - TResult Function(MessageContentRefusalObject value)? refusal, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), - }) { - if (imageUrl != null) { - return imageUrl(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageContentImageUrlObjectImplToJson( - this, - ); - } + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $AssistantToolsCopyWith get copyWith => + throw _privateConstructorUsedError; } -abstract class MessageContentImageUrlObject extends MessageContent { - const factory MessageContentImageUrlObject( - {final String type, - @JsonKey(name: 'image_url') - required final MessageContentImageUrl imageUrl}) = - _$MessageContentImageUrlObjectImpl; - const MessageContentImageUrlObject._() : super._(); - - factory MessageContentImageUrlObject.fromJson(Map json) = - _$MessageContentImageUrlObjectImpl.fromJson; +/// @nodoc +abstract class $AssistantToolsCopyWith<$Res> { + factory $AssistantToolsCopyWith( + AssistantTools value, $Res Function(AssistantTools) then) = + _$AssistantToolsCopyWithImpl<$Res, AssistantTools>; + @useResult + $Res call({String type}); +} - /// The type of the content part. Always `image_url`. - @override - String get type; +/// @nodoc +class _$AssistantToolsCopyWithImpl<$Res, $Val extends AssistantTools> + implements $AssistantToolsCopyWith<$Res> { + _$AssistantToolsCopyWithImpl(this._value, this._then); - /// The image URL part of a message. - @JsonKey(name: 'image_url') - MessageContentImageUrl get imageUrl; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentImageUrlObjectImplCopyWith< - _$MessageContentImageUrlObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$MessageContentTextObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentTextObjectImplCopyWith( - _$MessageContentTextObjectImpl value, - $Res Function(_$MessageContentTextObjectImpl) then) = - __$$MessageContentTextObjectImplCopyWithImpl<$Res>; +abstract class _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> + implements $AssistantToolsCopyWith<$Res> { + factory _$$AssistantToolsCodeInterpreterImplCopyWith( + _$AssistantToolsCodeInterpreterImpl value, + $Res Function(_$AssistantToolsCodeInterpreterImpl) then) = + __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, MessageContentText text}); - - $MessageContentTextCopyWith<$Res> get text; + $Res call({String type}); } /// @nodoc -class __$$MessageContentTextObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextObjectImpl> - implements _$$MessageContentTextObjectImplCopyWith<$Res> { - __$$MessageContentTextObjectImplCopyWithImpl( - _$MessageContentTextObjectImpl _value, - $Res Function(_$MessageContentTextObjectImpl) _then) +class __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res> + extends _$AssistantToolsCopyWithImpl<$Res, + _$AssistantToolsCodeInterpreterImpl> + implements _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> { + __$$AssistantToolsCodeInterpreterImplCopyWithImpl( + _$AssistantToolsCodeInterpreterImpl _value, + $Res Function(_$AssistantToolsCodeInterpreterImpl) _then) : super(_value, _then); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? text = null, }) { - return _then(_$MessageContentTextObjectImpl( + return _then(_$AssistantToolsCodeInterpreterImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as MessageContentText, )); } - - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $MessageContentTextCopyWith<$Res> get text { - return $MessageContentTextCopyWith<$Res>(_value.text, (value) { - return _then(_value.copyWith(text: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageContentTextObjectImpl extends MessageContentTextObject { - const _$MessageContentTextObjectImpl({this.type = 'text', required this.text}) +class _$AssistantToolsCodeInterpreterImpl + extends AssistantToolsCodeInterpreter { + const _$AssistantToolsCodeInterpreterImpl({this.type = 'code_interpreter'}) : super._(); - factory _$MessageContentTextObjectImpl.fromJson(Map json) => - _$$MessageContentTextObjectImplFromJson(json); + factory _$AssistantToolsCodeInterpreterImpl.fromJson( + Map json) => + _$$AssistantToolsCodeInterpreterImplFromJson(json); - /// Always `text`. + /// The type of tool being defined: `code_interpreter` @override @JsonKey() final String type; - /// The text content that is part of a message. - @override - final MessageContentText text; - @override String toString() { - return 'MessageContent.text(type: $type, text: $text)'; + return 'AssistantTools.codeInterpreter(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); + other is _$AssistantToolsCodeInterpreterImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, text); + int get hashCode => Object.hash(runtimeType, type); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> - get copyWith => __$$MessageContentTextObjectImplCopyWithImpl< - _$MessageContentTextObjectImpl>(this, _$identity); + _$$AssistantToolsCodeInterpreterImplCopyWith< + _$AssistantToolsCodeInterpreterImpl> + get copyWith => __$$AssistantToolsCodeInterpreterImplCopyWithImpl< + _$AssistantToolsCodeInterpreterImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, - required TResult Function(String type, String refusal) refusal, + required TResult Function(String type) codeInterpreter, + required TResult Function(String type) fileSearch, + required TResult Function(String type, FunctionObject function) function, }) { - return text(type, this.text); + return codeInterpreter(type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, - TResult? Function(String type, String refusal)? refusal, + TResult? Function(String type)? codeInterpreter, + TResult? Function(String type)? fileSearch, + TResult? Function(String type, FunctionObject function)? function, }) { - return text?.call(type, this.text); + return codeInterpreter?.call(type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, - TResult Function(String type, String refusal)? refusal, + TResult Function(String type)? codeInterpreter, + TResult Function(String type)? fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { - if (text != null) { - return text(type, this.text); + if (codeInterpreter != null) { + return codeInterpreter(type); } return orElse(); } @@ -60533,206 +52971,167 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, - required TResult Function(MessageContentRefusalObject value) refusal, + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, }) { - return text(this); + return codeInterpreter(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, - TResult? Function(MessageContentRefusalObject value)? refusal, + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, }) { - return text?.call(this); + return codeInterpreter?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, - TResult Function(MessageContentRefusalObject value)? refusal, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), }) { - if (text != null) { - return text(this); + if (codeInterpreter != null) { + return codeInterpreter(this); } return orElse(); } @override Map toJson() { - return _$$MessageContentTextObjectImplToJson( + return _$$AssistantToolsCodeInterpreterImplToJson( this, ); } } -abstract class MessageContentTextObject extends MessageContent { - const factory MessageContentTextObject( - {final String type, - required final MessageContentText text}) = _$MessageContentTextObjectImpl; - const MessageContentTextObject._() : super._(); +abstract class AssistantToolsCodeInterpreter extends AssistantTools { + const factory AssistantToolsCodeInterpreter({final String type}) = + _$AssistantToolsCodeInterpreterImpl; + const AssistantToolsCodeInterpreter._() : super._(); - factory MessageContentTextObject.fromJson(Map json) = - _$MessageContentTextObjectImpl.fromJson; + factory AssistantToolsCodeInterpreter.fromJson(Map json) = + _$AssistantToolsCodeInterpreterImpl.fromJson; - /// Always `text`. @override - String get type; - /// The text content that is part of a message. - MessageContentText get text; - - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. + /// The type of tool being defined: `code_interpreter` + String get type; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> + @JsonKey(ignore: true) + _$$AssistantToolsCodeInterpreterImplCopyWith< + _$AssistantToolsCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentRefusalObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentRefusalObjectImplCopyWith( - _$MessageContentRefusalObjectImpl value, - $Res Function(_$MessageContentRefusalObjectImpl) then) = - __$$MessageContentRefusalObjectImplCopyWithImpl<$Res>; +abstract class _$$AssistantToolsFileSearchImplCopyWith<$Res> + implements $AssistantToolsCopyWith<$Res> { + factory _$$AssistantToolsFileSearchImplCopyWith( + _$AssistantToolsFileSearchImpl value, + $Res Function(_$AssistantToolsFileSearchImpl) then) = + __$$AssistantToolsFileSearchImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, String refusal}); + $Res call({String type}); } /// @nodoc -class __$$MessageContentRefusalObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, - _$MessageContentRefusalObjectImpl> - implements _$$MessageContentRefusalObjectImplCopyWith<$Res> { - __$$MessageContentRefusalObjectImplCopyWithImpl( - _$MessageContentRefusalObjectImpl _value, - $Res Function(_$MessageContentRefusalObjectImpl) _then) +class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> + extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFileSearchImpl> + implements _$$AssistantToolsFileSearchImplCopyWith<$Res> { + __$$AssistantToolsFileSearchImplCopyWithImpl( + _$AssistantToolsFileSearchImpl _value, + $Res Function(_$AssistantToolsFileSearchImpl) _then) : super(_value, _then); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? refusal = null, }) { - return _then(_$MessageContentRefusalObjectImpl( + return _then(_$AssistantToolsFileSearchImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - refusal: null == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as String, )); } } /// @nodoc @JsonSerializable() -class _$MessageContentRefusalObjectImpl extends MessageContentRefusalObject { - const _$MessageContentRefusalObjectImpl( - {required this.type, required this.refusal}) - : super._(); +class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { + const _$AssistantToolsFileSearchImpl({this.type = 'file_search'}) : super._(); - factory _$MessageContentRefusalObjectImpl.fromJson( - Map json) => - _$$MessageContentRefusalObjectImplFromJson(json); + factory _$AssistantToolsFileSearchImpl.fromJson(Map json) => + _$$AssistantToolsFileSearchImplFromJson(json); - /// Always `refusal`. + /// The type of tool being defined: `file_search` @override + @JsonKey() final String type; - /// No Description - @override - final String refusal; - @override String toString() { - return 'MessageContent.refusal(type: $type, refusal: $refusal)'; + return 'AssistantTools.fileSearch(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentRefusalObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.refusal, refusal) || other.refusal == refusal)); + other is _$AssistantToolsFileSearchImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, refusal); + int get hashCode => Object.hash(runtimeType, type); - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> - get copyWith => __$$MessageContentRefusalObjectImplCopyWithImpl< - _$MessageContentRefusalObjectImpl>(this, _$identity); + _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> + get copyWith => __$$AssistantToolsFileSearchImplCopyWithImpl< + _$AssistantToolsFileSearchImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, - required TResult Function(String type, String refusal) refusal, + required TResult Function(String type) codeInterpreter, + required TResult Function(String type) fileSearch, + required TResult Function(String type, FunctionObject function) function, }) { - return refusal(type, this.refusal); + return fileSearch(type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, - TResult? Function(String type, String refusal)? refusal, + TResult? Function(String type)? codeInterpreter, + TResult? Function(String type)? fileSearch, + TResult? Function(String type, FunctionObject function)? function, }) { - return refusal?.call(type, this.refusal); + return fileSearch?.call(type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, - TResult Function(String type, String refusal)? refusal, + TResult Function(String type)? codeInterpreter, + TResult Function(String type)? fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { - if (refusal != null) { - return refusal(type, this.refusal); + if (fileSearch != null) { + return fileSearch(type); } return orElse(); } @@ -60740,440 +53139,189 @@ class _$MessageContentRefusalObjectImpl extends MessageContentRefusalObject { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, - required TResult Function(MessageContentRefusalObject value) refusal, + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, }) { - return refusal(this); + return fileSearch(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, - TResult? Function(MessageContentRefusalObject value)? refusal, + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, }) { - return refusal?.call(this); + return fileSearch?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, - TResult Function(MessageContentRefusalObject value)? refusal, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), }) { - if (refusal != null) { - return refusal(this); + if (fileSearch != null) { + return fileSearch(this); } return orElse(); } @override Map toJson() { - return _$$MessageContentRefusalObjectImplToJson( + return _$$AssistantToolsFileSearchImplToJson( this, ); } } -abstract class MessageContentRefusalObject extends MessageContent { - const factory MessageContentRefusalObject( - {required final String type, - required final String refusal}) = _$MessageContentRefusalObjectImpl; - const MessageContentRefusalObject._() : super._(); +abstract class AssistantToolsFileSearch extends AssistantTools { + const factory AssistantToolsFileSearch({final String type}) = + _$AssistantToolsFileSearchImpl; + const AssistantToolsFileSearch._() : super._(); - factory MessageContentRefusalObject.fromJson(Map json) = - _$MessageContentRefusalObjectImpl.fromJson; + factory AssistantToolsFileSearch.fromJson(Map json) = + _$AssistantToolsFileSearchImpl.fromJson; - /// Always `refusal`. @override - String get type; - - /// No Description - String get refusal; - /// Create a copy of MessageContent - /// with the given fields replaced by the non-null parameter values. + /// The type of tool being defined: `file_search` + String get type; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> + @JsonKey(ignore: true) + _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContent _$MessageDeltaContentFromJson(Map json) { - switch (json['type']) { - case 'image_file': - return MessageDeltaContentImageFileObject.fromJson(json); - case 'text': - return MessageDeltaContentTextObject.fromJson(json); - case 'refusal': - return MessageDeltaContentRefusalObject.fromJson(json); - case 'image_url': - return MessageDeltaContentImageUrlObject.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'MessageDeltaContent', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$MessageDeltaContent { - /// The index of the content part in the message. - int get index => throw _privateConstructorUsedError; - - /// Always `image_file`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) - imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal) - refusal, - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl) - imageUrl, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? - imageUrl, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? - imageUrl, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - required TResult Function(MessageDeltaContentRefusalObject value) refusal, - required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - TResult? Function(MessageDeltaContentRefusalObject value)? refusal, - TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - TResult Function(MessageDeltaContentRefusalObject value)? refusal, - TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - - /// Serializes this MessageDeltaContent to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageDeltaContentCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageDeltaContentCopyWith<$Res> { - factory $MessageDeltaContentCopyWith( - MessageDeltaContent value, $Res Function(MessageDeltaContent) then) = - _$MessageDeltaContentCopyWithImpl<$Res, MessageDeltaContent>; - @useResult - $Res call({int index, String type}); -} - -/// @nodoc -class _$MessageDeltaContentCopyWithImpl<$Res, $Val extends MessageDeltaContent> - implements $MessageDeltaContentCopyWith<$Res> { - _$MessageDeltaContentCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = null, - Object? type = null, - }) { - return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - /// @nodoc -abstract class _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> - implements $MessageDeltaContentCopyWith<$Res> { - factory _$$MessageDeltaContentImageFileObjectImplCopyWith( - _$MessageDeltaContentImageFileObjectImpl value, - $Res Function(_$MessageDeltaContentImageFileObjectImpl) then) = - __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res>; +abstract class _$$AssistantToolsFunctionImplCopyWith<$Res> + implements $AssistantToolsCopyWith<$Res> { + factory _$$AssistantToolsFunctionImplCopyWith( + _$AssistantToolsFunctionImpl value, + $Res Function(_$AssistantToolsFunctionImpl) then) = + __$$AssistantToolsFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile}); + $Res call({String type, FunctionObject function}); - $MessageContentImageFileCopyWith<$Res>? get imageFile; + $FunctionObjectCopyWith<$Res> get function; } /// @nodoc -class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentCopyWithImpl<$Res, - _$MessageDeltaContentImageFileObjectImpl> - implements _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> { - __$$MessageDeltaContentImageFileObjectImplCopyWithImpl( - _$MessageDeltaContentImageFileObjectImpl _value, - $Res Function(_$MessageDeltaContentImageFileObjectImpl) _then) +class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> + extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFunctionImpl> + implements _$$AssistantToolsFunctionImplCopyWith<$Res> { + __$$AssistantToolsFunctionImplCopyWithImpl( + _$AssistantToolsFunctionImpl _value, + $Res Function(_$AssistantToolsFunctionImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? imageFile = freezed, + Object? function = null, }) { - return _then(_$MessageDeltaContentImageFileObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$AssistantToolsFunctionImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - imageFile: freezed == imageFile - ? _value.imageFile - : imageFile // ignore: cast_nullable_to_non_nullable - as MessageContentImageFile?, + function: null == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as FunctionObject, )); } - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentImageFileCopyWith<$Res>? get imageFile { - if (_value.imageFile == null) { - return null; - } - - return $MessageContentImageFileCopyWith<$Res>(_value.imageFile!, (value) { - return _then(_value.copyWith(imageFile: value)); + $FunctionObjectCopyWith<$Res> get function { + return $FunctionObjectCopyWith<$Res>(_value.function, (value) { + return _then(_value.copyWith(function: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentImageFileObjectImpl - extends MessageDeltaContentImageFileObject { - const _$MessageDeltaContentImageFileObjectImpl( - {required this.index, - required this.type, - @JsonKey(name: 'image_file', includeIfNull: false) this.imageFile}) +class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { + const _$AssistantToolsFunctionImpl( + {this.type = 'function', required this.function}) : super._(); - factory _$MessageDeltaContentImageFileObjectImpl.fromJson( - Map json) => - _$$MessageDeltaContentImageFileObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - final int index; + factory _$AssistantToolsFunctionImpl.fromJson(Map json) => + _$$AssistantToolsFunctionImplFromJson(json); - /// Always `image_file`. + /// The type of tool being defined: `function` @override + @JsonKey() final String type; - /// The image file that is part of a message. + /// A function that the model may call. @override - @JsonKey(name: 'image_file', includeIfNull: false) - final MessageContentImageFile? imageFile; + final FunctionObject function; @override String toString() { - return 'MessageDeltaContent.imageFile(index: $index, type: $type, imageFile: $imageFile)'; + return 'AssistantTools.function(type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentImageFileObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$AssistantToolsFunctionImpl && (identical(other.type, type) || other.type == type) && - (identical(other.imageFile, imageFile) || - other.imageFile == imageFile)); + (identical(other.function, function) || + other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type, imageFile); + int get hashCode => Object.hash(runtimeType, type, function); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentImageFileObjectImplCopyWith< - _$MessageDeltaContentImageFileObjectImpl> - get copyWith => __$$MessageDeltaContentImageFileObjectImplCopyWithImpl< - _$MessageDeltaContentImageFileObjectImpl>(this, _$identity); + _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> + get copyWith => __$$AssistantToolsFunctionImplCopyWithImpl< + _$AssistantToolsFunctionImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) - imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal) - refusal, - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl) - imageUrl, + required TResult Function(String type) codeInterpreter, + required TResult Function(String type) fileSearch, + required TResult Function(String type, FunctionObject function) function, }) { - return imageFile(index, type, this.imageFile); + return function(type, this.function); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? - imageUrl, + TResult? Function(String type)? codeInterpreter, + TResult? Function(String type)? fileSearch, + TResult? Function(String type, FunctionObject function)? function, }) { - return imageFile?.call(index, type, this.imageFile); + return function?.call(type, this.function); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? - imageUrl, + TResult Function(String type)? codeInterpreter, + TResult Function(String type)? fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(index, type, this.imageFile); + if (function != null) { + return function(type, this.function); } return orElse(); } @@ -61181,281 +53329,327 @@ class _$MessageDeltaContentImageFileObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - required TResult Function(MessageDeltaContentRefusalObject value) refusal, - required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, }) { - return imageFile(this); + return function(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - TResult? Function(MessageDeltaContentRefusalObject value)? refusal, - TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, }) { - return imageFile?.call(this); + return function?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - TResult Function(MessageDeltaContentRefusalObject value)? refusal, - TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(this); + if (function != null) { + return function(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentImageFileObjectImplToJson( + return _$$AssistantToolsFunctionImplToJson( this, ); } } -abstract class MessageDeltaContentImageFileObject extends MessageDeltaContent { - const factory MessageDeltaContentImageFileObject( - {required final int index, - required final String type, - @JsonKey(name: 'image_file', includeIfNull: false) - final MessageContentImageFile? imageFile}) = - _$MessageDeltaContentImageFileObjectImpl; - const MessageDeltaContentImageFileObject._() : super._(); +abstract class AssistantToolsFunction extends AssistantTools { + const factory AssistantToolsFunction( + {final String type, + required final FunctionObject function}) = _$AssistantToolsFunctionImpl; + const AssistantToolsFunction._() : super._(); - factory MessageDeltaContentImageFileObject.fromJson( - Map json) = - _$MessageDeltaContentImageFileObjectImpl.fromJson; + factory AssistantToolsFunction.fromJson(Map json) = + _$AssistantToolsFunctionImpl.fromJson; + + @override + + /// The type of tool being defined: `function` + String get type; + + /// A function that the model may call. + FunctionObject get function; + @override + @JsonKey(ignore: true) + _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> + get copyWith => throw _privateConstructorUsedError; +} + +MessageContent _$MessageContentFromJson(Map json) { + switch (json['type']) { + case 'image_file': + return MessageContentImageFileObject.fromJson(json); + case 'image_url': + return MessageContentImageUrlObject.fromJson(json); + case 'text': + return MessageContentTextObject.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageContent', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageContent { + /// Always `image_file`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageContentCopyWith get copyWith => + throw _privateConstructorUsedError; +} - /// The index of the content part in the message. - @override - int get index; +/// @nodoc +abstract class $MessageContentCopyWith<$Res> { + factory $MessageContentCopyWith( + MessageContent value, $Res Function(MessageContent) then) = + _$MessageContentCopyWithImpl<$Res, MessageContent>; + @useResult + $Res call({String type}); +} - /// Always `image_file`. - @override - String get type; +/// @nodoc +class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> + implements $MessageContentCopyWith<$Res> { + _$MessageContentCopyWithImpl(this._value, this._then); - /// The image file that is part of a message. - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? get imageFile; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentImageFileObjectImplCopyWith< - _$MessageDeltaContentImageFileObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$MessageDeltaContentTextObjectImplCopyWith<$Res> - implements $MessageDeltaContentCopyWith<$Res> { - factory _$$MessageDeltaContentTextObjectImplCopyWith( - _$MessageDeltaContentTextObjectImpl value, - $Res Function(_$MessageDeltaContentTextObjectImpl) then) = - __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageContentImageFileObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentImageFileObjectImplCopyWith( + _$MessageContentImageFileObjectImpl value, + $Res Function(_$MessageContentImageFileObjectImpl) then) = + __$$MessageContentImageFileObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text}); + {String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile}); - $MessageDeltaContentTextCopyWith<$Res>? get text; + $MessageContentImageFileCopyWith<$Res> get imageFile; } /// @nodoc -class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentCopyWithImpl<$Res, - _$MessageDeltaContentTextObjectImpl> - implements _$$MessageDeltaContentTextObjectImplCopyWith<$Res> { - __$$MessageDeltaContentTextObjectImplCopyWithImpl( - _$MessageDeltaContentTextObjectImpl _value, - $Res Function(_$MessageDeltaContentTextObjectImpl) _then) +class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentImageFileObjectImpl> + implements _$$MessageContentImageFileObjectImplCopyWith<$Res> { + __$$MessageContentImageFileObjectImplCopyWithImpl( + _$MessageContentImageFileObjectImpl _value, + $Res Function(_$MessageContentImageFileObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, + Object? imageFile = null, }) { - return _then(_$MessageDeltaContentTextObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$MessageContentImageFileObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as MessageDeltaContentText?, + imageFile: null == imageFile + ? _value.imageFile + : imageFile // ignore: cast_nullable_to_non_nullable + as MessageContentImageFile, )); } - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageDeltaContentTextCopyWith<$Res>? get text { - if (_value.text == null) { - return null; - } - - return $MessageDeltaContentTextCopyWith<$Res>(_value.text!, (value) { - return _then(_value.copyWith(text: value)); + $MessageContentImageFileCopyWith<$Res> get imageFile { + return $MessageContentImageFileCopyWith<$Res>(_value.imageFile, (value) { + return _then(_value.copyWith(imageFile: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextObjectImpl - extends MessageDeltaContentTextObject { - const _$MessageDeltaContentTextObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.text}) +class _$MessageContentImageFileObjectImpl + extends MessageContentImageFileObject { + const _$MessageContentImageFileObjectImpl( + {this.type = 'image_file', + @JsonKey(name: 'image_file') required this.imageFile}) : super._(); - factory _$MessageDeltaContentTextObjectImpl.fromJson( + factory _$MessageContentImageFileObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentTextObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - final int index; + _$$MessageContentImageFileObjectImplFromJson(json); - /// Always `text`. + /// Always `image_file`. @override + @JsonKey() final String type; - /// The text content that is part of a message. + /// The image file that is part of a message. @override - @JsonKey(includeIfNull: false) - final MessageDeltaContentText? text; + @JsonKey(name: 'image_file') + final MessageContentImageFile imageFile; @override String toString() { - return 'MessageDeltaContent.text(index: $index, type: $type, text: $text)'; + return 'MessageContent.imageFile(type: $type, imageFile: $imageFile)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentTextObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$MessageContentImageFileObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); + (identical(other.imageFile, imageFile) || + other.imageFile == imageFile)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type, text); + int get hashCode => Object.hash(runtimeType, type, imageFile); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextObjectImplCopyWith< - _$MessageDeltaContentTextObjectImpl> - get copyWith => __$$MessageDeltaContentTextObjectImplCopyWithImpl< - _$MessageDeltaContentTextObjectImpl>(this, _$identity); + _$$MessageContentImageFileObjectImplCopyWith< + _$MessageContentImageFileObjectImpl> + get copyWith => __$$MessageContentImageFileObjectImplCopyWithImpl< + _$MessageContentImageFileObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal) - refusal, - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl) + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, + required TResult Function(String type, MessageContentText text) text, }) { - return text(index, type, this.text); + return imageFile(type, this.imageFile); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, + TResult? Function(String type, MessageContentText text)? text, }) { - return text?.call(index, type, this.text); + return imageFile?.call(type, this.imageFile); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, + TResult Function(String type, MessageContentText text)? text, required TResult orElse(), }) { - if (text != null) { - return text(index, type, this.text); + if (imageFile != null) { + return imageFile(type, this.imageFile); } return orElse(); } @@ -61463,261 +53657,215 @@ class _$MessageDeltaContentTextObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - required TResult Function(MessageDeltaContentRefusalObject value) refusal, - required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, }) { - return text(this); + return imageFile(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - TResult? Function(MessageDeltaContentRefusalObject value)? refusal, - TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, }) { - return text?.call(this); + return imageFile?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - TResult Function(MessageDeltaContentRefusalObject value)? refusal, - TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, required TResult orElse(), }) { - if (text != null) { - return text(this); + if (imageFile != null) { + return imageFile(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentTextObjectImplToJson( + return _$$MessageContentImageFileObjectImplToJson( this, ); } } -abstract class MessageDeltaContentTextObject extends MessageDeltaContent { - const factory MessageDeltaContentTextObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final MessageDeltaContentText? text}) = - _$MessageDeltaContentTextObjectImpl; - const MessageDeltaContentTextObject._() : super._(); +abstract class MessageContentImageFileObject extends MessageContent { + const factory MessageContentImageFileObject( + {final String type, + @JsonKey(name: 'image_file') + required final MessageContentImageFile imageFile}) = + _$MessageContentImageFileObjectImpl; + const MessageContentImageFileObject._() : super._(); - factory MessageDeltaContentTextObject.fromJson(Map json) = - _$MessageDeltaContentTextObjectImpl.fromJson; + factory MessageContentImageFileObject.fromJson(Map json) = + _$MessageContentImageFileObjectImpl.fromJson; - /// The index of the content part in the message. @override - int get index; - /// Always `text`. - @override + /// Always `image_file`. String get type; - /// The text content that is part of a message. - @JsonKey(includeIfNull: false) - MessageDeltaContentText? get text; - - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. + /// The image file that is part of a message. + @JsonKey(name: 'image_file') + MessageContentImageFile get imageFile; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentTextObjectImplCopyWith< - _$MessageDeltaContentTextObjectImpl> + @JsonKey(ignore: true) + _$$MessageContentImageFileObjectImplCopyWith< + _$MessageContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> - implements $MessageDeltaContentCopyWith<$Res> { - factory _$$MessageDeltaContentRefusalObjectImplCopyWith( - _$MessageDeltaContentRefusalObjectImpl value, - $Res Function(_$MessageDeltaContentRefusalObjectImpl) then) = - __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageContentImageUrlObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentImageUrlObjectImplCopyWith( + _$MessageContentImageUrlObjectImpl value, + $Res Function(_$MessageContentImageUrlObjectImpl) then) = + __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, String type, @JsonKey(includeIfNull: false) String? refusal}); + {String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl}); + + $MessageContentImageUrlCopyWith<$Res> get imageUrl; } /// @nodoc -class __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentCopyWithImpl<$Res, - _$MessageDeltaContentRefusalObjectImpl> - implements _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> { - __$$MessageDeltaContentRefusalObjectImplCopyWithImpl( - _$MessageDeltaContentRefusalObjectImpl _value, - $Res Function(_$MessageDeltaContentRefusalObjectImpl) _then) +class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentImageUrlObjectImpl> + implements _$$MessageContentImageUrlObjectImplCopyWith<$Res> { + __$$MessageContentImageUrlObjectImplCopyWithImpl( + _$MessageContentImageUrlObjectImpl _value, + $Res Function(_$MessageContentImageUrlObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? refusal = freezed, + Object? imageUrl = null, }) { - return _then(_$MessageDeltaContentRefusalObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$MessageContentImageUrlObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - refusal: freezed == refusal - ? _value.refusal - : refusal // ignore: cast_nullable_to_non_nullable - as String?, + imageUrl: null == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as MessageContentImageUrl, )); } + + @override + @pragma('vm:prefer-inline') + $MessageContentImageUrlCopyWith<$Res> get imageUrl { + return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl, (value) { + return _then(_value.copyWith(imageUrl: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentRefusalObjectImpl - extends MessageDeltaContentRefusalObject { - const _$MessageDeltaContentRefusalObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.refusal}) +class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { + const _$MessageContentImageUrlObjectImpl( + {this.type = 'image_url', + @JsonKey(name: 'image_url') required this.imageUrl}) : super._(); - factory _$MessageDeltaContentRefusalObjectImpl.fromJson( + factory _$MessageContentImageUrlObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentRefusalObjectImplFromJson(json); - - /// The index of the refusal part in the message. - @override - final int index; + _$$MessageContentImageUrlObjectImplFromJson(json); - /// Always `refusal`. + /// The type of the content part. Always `image_url`. @override + @JsonKey() final String type; - /// The refusal content generated by the assistant. + /// The image URL part of a message. @override - @JsonKey(includeIfNull: false) - final String? refusal; + @JsonKey(name: 'image_url') + final MessageContentImageUrl imageUrl; @override String toString() { - return 'MessageDeltaContent.refusal(index: $index, type: $type, refusal: $refusal)'; + return 'MessageContent.imageUrl(type: $type, imageUrl: $imageUrl)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentRefusalObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$MessageContentImageUrlObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.refusal, refusal) || other.refusal == refusal)); + (identical(other.imageUrl, imageUrl) || + other.imageUrl == imageUrl)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type, refusal); + int get hashCode => Object.hash(runtimeType, type, imageUrl); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentRefusalObjectImplCopyWith< - _$MessageDeltaContentRefusalObjectImpl> - get copyWith => __$$MessageDeltaContentRefusalObjectImplCopyWithImpl< - _$MessageDeltaContentRefusalObjectImpl>(this, _$identity); + _$$MessageContentImageUrlObjectImplCopyWith< + _$MessageContentImageUrlObjectImpl> + get copyWith => __$$MessageContentImageUrlObjectImplCopyWithImpl< + _$MessageContentImageUrlObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal) - refusal, - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl) + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, + required TResult Function(String type, MessageContentText text) text, }) { - return refusal(index, type, this.refusal); + return imageUrl(type, this.imageUrl); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, + TResult? Function(String type, MessageContentText text)? text, }) { - return refusal?.call(index, type, this.refusal); + return imageUrl?.call(type, this.imageUrl); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, + TResult Function(String type, MessageContentText text)? text, required TResult orElse(), }) { - if (refusal != null) { - return refusal(index, type, this.refusal); + if (imageUrl != null) { + return imageUrl(type, this.imageUrl); } return orElse(); } @@ -61725,281 +53873,206 @@ class _$MessageDeltaContentRefusalObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - required TResult Function(MessageDeltaContentRefusalObject value) refusal, - required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, }) { - return refusal(this); + return imageUrl(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - TResult? Function(MessageDeltaContentRefusalObject value)? refusal, - TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, }) { - return refusal?.call(this); + return imageUrl?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - TResult Function(MessageDeltaContentRefusalObject value)? refusal, - TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, required TResult orElse(), }) { - if (refusal != null) { - return refusal(this); + if (imageUrl != null) { + return imageUrl(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentRefusalObjectImplToJson( + return _$$MessageContentImageUrlObjectImplToJson( this, ); } } -abstract class MessageDeltaContentRefusalObject extends MessageDeltaContent { - const factory MessageDeltaContentRefusalObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final String? refusal}) = - _$MessageDeltaContentRefusalObjectImpl; - const MessageDeltaContentRefusalObject._() : super._(); +abstract class MessageContentImageUrlObject extends MessageContent { + const factory MessageContentImageUrlObject( + {final String type, + @JsonKey(name: 'image_url') + required final MessageContentImageUrl imageUrl}) = + _$MessageContentImageUrlObjectImpl; + const MessageContentImageUrlObject._() : super._(); - factory MessageDeltaContentRefusalObject.fromJson(Map json) = - _$MessageDeltaContentRefusalObjectImpl.fromJson; + factory MessageContentImageUrlObject.fromJson(Map json) = + _$MessageContentImageUrlObjectImpl.fromJson; - /// The index of the refusal part in the message. @override - int get index; - /// Always `refusal`. - @override + /// The type of the content part. Always `image_url`. String get type; - /// The refusal content generated by the assistant. - @JsonKey(includeIfNull: false) - String? get refusal; - - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. + /// The image URL part of a message. + @JsonKey(name: 'image_url') + MessageContentImageUrl get imageUrl; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentRefusalObjectImplCopyWith< - _$MessageDeltaContentRefusalObjectImpl> + @JsonKey(ignore: true) + _$$MessageContentImageUrlObjectImplCopyWith< + _$MessageContentImageUrlObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> - implements $MessageDeltaContentCopyWith<$Res> { - factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( - _$MessageDeltaContentImageUrlObjectImpl value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageContentTextObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentTextObjectImplCopyWith( + _$MessageContentTextObjectImpl value, + $Res Function(_$MessageContentTextObjectImpl) then) = + __$$MessageContentTextObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); + $Res call({String type, MessageContentText text}); - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; + $MessageContentTextCopyWith<$Res> get text; } /// @nodoc -class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentCopyWithImpl<$Res, - _$MessageDeltaContentImageUrlObjectImpl> - implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( - _$MessageDeltaContentImageUrlObjectImpl _value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) +class __$$MessageContentTextObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextObjectImpl> + implements _$$MessageContentTextObjectImplCopyWith<$Res> { + __$$MessageContentTextObjectImplCopyWithImpl( + _$MessageContentTextObjectImpl _value, + $Res Function(_$MessageContentTextObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? imageUrl = freezed, + Object? text = null, }) { - return _then(_$MessageDeltaContentImageUrlObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$MessageContentTextObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as MessageContentText, )); } - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentImageUrlCopyWith<$Res>? get imageUrl { - if (_value.imageUrl == null) { - return null; - } - - return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { - return _then(_value.copyWith(imageUrl: value)); + $MessageContentTextCopyWith<$Res> get text { + return $MessageContentTextCopyWith<$Res>(_value.text, (value) { + return _then(_value.copyWith(text: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentImageUrlObjectImpl - extends MessageDeltaContentImageUrlObject { - const _$MessageDeltaContentImageUrlObjectImpl( - {required this.index, - required this.type, - @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) +class _$MessageContentTextObjectImpl extends MessageContentTextObject { + const _$MessageContentTextObjectImpl({this.type = 'text', required this.text}) : super._(); - factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( - Map json) => - _$$MessageDeltaContentImageUrlObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - final int index; + factory _$MessageContentTextObjectImpl.fromJson(Map json) => + _$$MessageContentTextObjectImplFromJson(json); - /// Always `image_url`. + /// Always `text`. @override + @JsonKey() final String type; - /// The image URL part of a message. + /// The text content that is part of a message. @override - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl; + final MessageContentText text; @override String toString() { - return 'MessageDeltaContent.imageUrl(index: $index, type: $type, imageUrl: $imageUrl)'; + return 'MessageContent.text(type: $type, text: $text)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentImageUrlObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$MessageContentTextObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.imageUrl, imageUrl) || - other.imageUrl == imageUrl)); + (identical(other.text, text) || other.text == text)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type, imageUrl); + int get hashCode => Object.hash(runtimeType, type, text); - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< - _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); + _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> + get copyWith => __$$MessageContentTextObjectImplCopyWithImpl< + _$MessageContentTextObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal) - refusal, - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl) + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, + required TResult Function(String type, MessageContentText text) text, }) { - return imageUrl(index, type, this.imageUrl); + return text(type, this.text); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, + TResult? Function(String type, MessageContentText text)? text, }) { - return imageUrl?.call(index, type, this.imageUrl); + return text?.call(type, this.text); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? refusal)? - refusal, - TResult Function( - int index, - String type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl)? + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, + TResult Function(String type, MessageContentText text)? text, required TResult orElse(), }) { - if (imageUrl != null) { - return imageUrl(index, type, this.imageUrl); + if (text != null) { + return text(type, this.text); } return orElse(); } @@ -62007,817 +54080,358 @@ class _$MessageDeltaContentImageUrlObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - required TResult Function(MessageDeltaContentRefusalObject value) refusal, - required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, }) { - return imageUrl(this); + return text(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - TResult? Function(MessageDeltaContentRefusalObject value)? refusal, - TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, }) { - return imageUrl?.call(this); + return text?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - TResult Function(MessageDeltaContentRefusalObject value)? refusal, - TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, required TResult orElse(), }) { - if (imageUrl != null) { - return imageUrl(this); + if (text != null) { + return text(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentImageUrlObjectImplToJson( + return _$$MessageContentTextObjectImplToJson( this, ); } } -abstract class MessageDeltaContentImageUrlObject extends MessageDeltaContent { - const factory MessageDeltaContentImageUrlObject( - {required final int index, - required final String type, - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl}) = - _$MessageDeltaContentImageUrlObjectImpl; - const MessageDeltaContentImageUrlObject._() : super._(); +abstract class MessageContentTextObject extends MessageContent { + const factory MessageContentTextObject( + {final String type, + required final MessageContentText text}) = _$MessageContentTextObjectImpl; + const MessageContentTextObject._() : super._(); - factory MessageDeltaContentImageUrlObject.fromJson( - Map json) = - _$MessageDeltaContentImageUrlObjectImpl.fromJson; + factory MessageContentTextObject.fromJson(Map json) = + _$MessageContentTextObjectImpl.fromJson; - /// The index of the content part in the message. @override - int get index; - /// Always `image_url`. - @override + /// Always `text`. String get type; - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl; - - /// Create a copy of MessageDeltaContent - /// with the given fields replaced by the non-null parameter values. + /// The text content that is part of a message. + MessageContentText get text; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> + @JsonKey(ignore: true) + _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; } -MessageContentTextAnnotations _$MessageContentTextAnnotationsFromJson( - Map json) { +MessageDeltaContent _$MessageDeltaContentFromJson(Map json) { switch (json['type']) { - case 'file_citation': - return MessageContentTextAnnotationsFileCitationObject.fromJson(json); - case 'file_path': - return MessageContentTextAnnotationsFilePathObject.fromJson(json); + case 'image_file': + return MessageDeltaContentImageFileObject.fromJson(json); + case 'text': + return MessageDeltaContentTextObject.fromJson(json); default: - throw CheckedFromJsonException( - json, - 'type', - 'MessageContentTextAnnotations', + throw CheckedFromJsonException(json, 'type', 'MessageDeltaContent', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$MessageContentTextAnnotations { - /// Always `file_citation`. - String get type => throw _privateConstructorUsedError; - - /// The text in the message content that needs to be replaced. - String get text => throw _privateConstructorUsedError; - - /// The start index of the text in the message content that needs to be replaced. - @JsonKey(name: 'start_index') - int get startIndex => throw _privateConstructorUsedError; +mixin _$MessageDeltaContent { + /// The index of the content part in the message. + int get index => throw _privateConstructorUsedError; - /// The end index of the text in the message content that needs to be replaced. - @JsonKey(name: 'end_index') - int get endIndex => throw _privateConstructorUsedError; + /// Always `image_file`. + String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) - fileCitation, - required TResult Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) - filePath, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - fileCitation, - TResult? Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - filePath, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - fileCitation, - TResult Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - filePath, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function( - MessageContentTextAnnotationsFileCitationObject value) - fileCitation, - required TResult Function(MessageContentTextAnnotationsFilePathObject value) - filePath, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult? Function(MessageContentTextAnnotationsFilePathObject value)? - filePath, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult Function(MessageContentTextAnnotationsFilePathObject value)? - filePath, - required TResult orElse(), + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, }) => - throw _privateConstructorUsedError; - - /// Serializes this MessageContentTextAnnotations to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageContentTextAnnotationsCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageContentTextAnnotationsCopyWith<$Res> { - factory $MessageContentTextAnnotationsCopyWith( - MessageContentTextAnnotations value, - $Res Function(MessageContentTextAnnotations) then) = - _$MessageContentTextAnnotationsCopyWithImpl<$Res, - MessageContentTextAnnotations>; - @useResult - $Res call( - {String type, - String text, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex}); -} - -/// @nodoc -class _$MessageContentTextAnnotationsCopyWithImpl<$Res, - $Val extends MessageContentTextAnnotations> - implements $MessageContentTextAnnotationsCopyWith<$Res> { - _$MessageContentTextAnnotationsCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? text = null, - Object? startIndex = null, - Object? endIndex = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - startIndex: null == startIndex - ? _value.startIndex - : startIndex // ignore: cast_nullable_to_non_nullable - as int, - endIndex: null == endIndex - ? _value.endIndex - : endIndex // ignore: cast_nullable_to_non_nullable - as int, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< - $Res> implements $MessageContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith( - _$MessageContentTextAnnotationsFileCitationObjectImpl value, - $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) - then) = - __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex}); - - $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation; -} - -/// @nodoc -class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> - extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, - _$MessageContentTextAnnotationsFileCitationObjectImpl> - implements - _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith<$Res> { - __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl( - _$MessageContentTextAnnotationsFileCitationObjectImpl _value, - $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) - _then) - : super(_value, _then); - - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? text = null, - Object? fileCitation = null, - Object? startIndex = null, - Object? endIndex = null, - }) { - return _then(_$MessageContentTextAnnotationsFileCitationObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - fileCitation: null == fileCitation - ? _value.fileCitation - : fileCitation // ignore: cast_nullable_to_non_nullable - as MessageContentTextAnnotationsFileCitation, - startIndex: null == startIndex - ? _value.startIndex - : startIndex // ignore: cast_nullable_to_non_nullable - as int, - endIndex: null == endIndex - ? _value.endIndex - : endIndex // ignore: cast_nullable_to_non_nullable - as int, - )); - } - - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation { - return $MessageContentTextAnnotationsFileCitationCopyWith<$Res>( - _value.fileCitation, (value) { - return _then(_value.copyWith(fileCitation: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageContentTextAnnotationsFileCitationObjectImpl - extends MessageContentTextAnnotationsFileCitationObject { - const _$MessageContentTextAnnotationsFileCitationObjectImpl( - {required this.type, - required this.text, - @JsonKey(name: 'file_citation') required this.fileCitation, - @JsonKey(name: 'start_index') required this.startIndex, - @JsonKey(name: 'end_index') required this.endIndex}) - : super._(); - - factory _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson( - Map json) => - _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson(json); - - /// Always `file_citation`. - @override - final String type; - - /// The text in the message content that needs to be replaced. - @override - final String text; - - /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. - @override - @JsonKey(name: 'file_citation') - final MessageContentTextAnnotationsFileCitation fileCitation; - - /// The start index of the text in the message content that needs to be replaced. - @override - @JsonKey(name: 'start_index') - final int startIndex; - - /// The end index of the text in the message content that needs to be replaced. - @override - @JsonKey(name: 'end_index') - final int endIndex; - - @override - String toString() { - return 'MessageContentTextAnnotations.fileCitation(type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageContentTextAnnotationsFileCitationObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text) && - (identical(other.fileCitation, fileCitation) || - other.fileCitation == fileCitation) && - (identical(other.startIndex, startIndex) || - other.startIndex == startIndex) && - (identical(other.endIndex, endIndex) || - other.endIndex == endIndex)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => - Object.hash(runtimeType, type, text, fileCitation, startIndex, endIndex); - - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageContentTextAnnotationsFileCitationObjectImpl> - get copyWith => - __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - _$MessageContentTextAnnotationsFileCitationObjectImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function( - String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) - fileCitation, - required TResult Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) - filePath, - }) { - return fileCitation(type, text, this.fileCitation, startIndex, endIndex); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - fileCitation, - TResult? Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - filePath, - }) { - return fileCitation?.call( - type, text, this.fileCitation, startIndex, endIndex); - } - - @override + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - fileCitation, - TResult Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - filePath, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, required TResult orElse(), - }) { - if (fileCitation != null) { - return fileCitation(type, text, this.fileCitation, startIndex, endIndex); - } - return orElse(); - } - - @override + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function( - MessageContentTextAnnotationsFileCitationObject value) - fileCitation, - required TResult Function(MessageContentTextAnnotationsFilePathObject value) - filePath, - }) { - return fileCitation(this); - } - - @override + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult? Function(MessageContentTextAnnotationsFilePathObject value)? - filePath, - }) { - return fileCitation?.call(this); - } - - @override + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult Function(MessageContentTextAnnotationsFilePathObject value)? - filePath, + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, required TResult orElse(), - }) { - if (fileCitation != null) { - return fileCitation(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$MessageContentTextAnnotationsFileCitationObjectImplToJson( - this, - ); - } + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaContentCopyWith get copyWith => + throw _privateConstructorUsedError; } -abstract class MessageContentTextAnnotationsFileCitationObject - extends MessageContentTextAnnotations { - const factory MessageContentTextAnnotationsFileCitationObject( - {required final String type, - required final String text, - @JsonKey(name: 'file_citation') - required final MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') required final int startIndex, - @JsonKey(name: 'end_index') required final int endIndex}) = - _$MessageContentTextAnnotationsFileCitationObjectImpl; - const MessageContentTextAnnotationsFileCitationObject._() : super._(); - - factory MessageContentTextAnnotationsFileCitationObject.fromJson( - Map json) = - _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson; - - /// Always `file_citation`. - @override - String get type; - - /// The text in the message content that needs to be replaced. - @override - String get text; - - /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation get fileCitation; +/// @nodoc +abstract class $MessageDeltaContentCopyWith<$Res> { + factory $MessageDeltaContentCopyWith( + MessageDeltaContent value, $Res Function(MessageDeltaContent) then) = + _$MessageDeltaContentCopyWithImpl<$Res, MessageDeltaContent>; + @useResult + $Res call({int index, String type}); +} - /// The start index of the text in the message content that needs to be replaced. - @override - @JsonKey(name: 'start_index') - int get startIndex; +/// @nodoc +class _$MessageDeltaContentCopyWithImpl<$Res, $Val extends MessageDeltaContent> + implements $MessageDeltaContentCopyWith<$Res> { + _$MessageDeltaContentCopyWithImpl(this._value, this._then); - /// The end index of the text in the message content that needs to be replaced. - @override - @JsonKey(name: 'end_index') - int get endIndex; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageContentTextAnnotationsFileCitationObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? index = null, + Object? type = null, + }) { + return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> - implements $MessageContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith( - _$MessageContentTextAnnotationsFilePathObjectImpl value, - $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) - then) = - __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentImageFileObjectImplCopyWith( + _$MessageDeltaContentImageFileObjectImpl value, + $Res Function(_$MessageDeltaContentImageFileObjectImpl) then) = + __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex}); + {int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile}); - $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath; + $MessageContentImageFileCopyWith<$Res>? get imageFile; } /// @nodoc -class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> - extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, - _$MessageContentTextAnnotationsFilePathObjectImpl> - implements - _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { - __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl( - _$MessageContentTextAnnotationsFilePathObjectImpl _value, - $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) _then) +class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentImageFileObjectImpl> + implements _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> { + __$$MessageDeltaContentImageFileObjectImplCopyWithImpl( + _$MessageDeltaContentImageFileObjectImpl _value, + $Res Function(_$MessageDeltaContentImageFileObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? text = null, - Object? filePath = null, - Object? startIndex = null, - Object? endIndex = null, + Object? imageFile = freezed, }) { - return _then(_$MessageContentTextAnnotationsFilePathObjectImpl( + return _then(_$MessageDeltaContentImageFileObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - filePath: null == filePath - ? _value.filePath - : filePath // ignore: cast_nullable_to_non_nullable - as MessageContentTextAnnotationsFilePath, - startIndex: null == startIndex - ? _value.startIndex - : startIndex // ignore: cast_nullable_to_non_nullable - as int, - endIndex: null == endIndex - ? _value.endIndex - : endIndex // ignore: cast_nullable_to_non_nullable - as int, + imageFile: freezed == imageFile + ? _value.imageFile + : imageFile // ignore: cast_nullable_to_non_nullable + as MessageContentImageFile?, )); } - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath { - return $MessageContentTextAnnotationsFilePathCopyWith<$Res>(_value.filePath, - (value) { - return _then(_value.copyWith(filePath: value)); + $MessageContentImageFileCopyWith<$Res>? get imageFile { + if (_value.imageFile == null) { + return null; + } + + return $MessageContentImageFileCopyWith<$Res>(_value.imageFile!, (value) { + return _then(_value.copyWith(imageFile: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageContentTextAnnotationsFilePathObjectImpl - extends MessageContentTextAnnotationsFilePathObject { - const _$MessageContentTextAnnotationsFilePathObjectImpl( - {required this.type, - required this.text, - @JsonKey(name: 'file_path') required this.filePath, - @JsonKey(name: 'start_index') required this.startIndex, - @JsonKey(name: 'end_index') required this.endIndex}) +class _$MessageDeltaContentImageFileObjectImpl + extends MessageDeltaContentImageFileObject { + const _$MessageDeltaContentImageFileObjectImpl( + {required this.index, + required this.type, + @JsonKey(name: 'image_file', includeIfNull: false) this.imageFile}) : super._(); - factory _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson( + factory _$MessageDeltaContentImageFileObjectImpl.fromJson( Map json) => - _$$MessageContentTextAnnotationsFilePathObjectImplFromJson(json); - - /// Always `file_path`. - @override - final String type; - - /// The text in the message content that needs to be replaced. - @override - final String text; + _$$MessageDeltaContentImageFileObjectImplFromJson(json); - /// No Description + /// The index of the content part in the message. @override - @JsonKey(name: 'file_path') - final MessageContentTextAnnotationsFilePath filePath; + final int index; - /// No Description + /// Always `image_file`. @override - @JsonKey(name: 'start_index') - final int startIndex; + final String type; - /// No Description + /// The image file that is part of a message. @override - @JsonKey(name: 'end_index') - final int endIndex; + @JsonKey(name: 'image_file', includeIfNull: false) + final MessageContentImageFile? imageFile; @override String toString() { - return 'MessageContentTextAnnotations.filePath(type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; + return 'MessageDeltaContent.imageFile(index: $index, type: $type, imageFile: $imageFile)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextAnnotationsFilePathObjectImpl && + other is _$MessageDeltaContentImageFileObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text) && - (identical(other.filePath, filePath) || - other.filePath == filePath) && - (identical(other.startIndex, startIndex) || - other.startIndex == startIndex) && - (identical(other.endIndex, endIndex) || - other.endIndex == endIndex)); + (identical(other.imageFile, imageFile) || + other.imageFile == imageFile)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, type, text, filePath, startIndex, endIndex); + int get hashCode => Object.hash(runtimeType, index, type, imageFile); - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageContentTextAnnotationsFilePathObjectImpl> - get copyWith => - __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl< - _$MessageContentTextAnnotationsFilePathObjectImpl>( - this, _$identity); + _$$MessageDeltaContentImageFileObjectImplCopyWith< + _$MessageDeltaContentImageFileObjectImpl> + get copyWith => __$$MessageDeltaContentImageFileObjectImplCopyWithImpl< + _$MessageDeltaContentImageFileObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) - fileCitation, - required TResult Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) - filePath, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, }) { - return filePath(type, text, this.filePath, startIndex, endIndex); + return imageFile(index, type, this.imageFile); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - fileCitation, - TResult? Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - filePath, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, }) { - return filePath?.call(type, text, this.filePath, startIndex, endIndex); + return imageFile?.call(index, type, this.imageFile); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - fileCitation, - TResult Function( - String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? - filePath, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, required TResult orElse(), }) { - if (filePath != null) { - return filePath(type, text, this.filePath, startIndex, endIndex); + if (imageFile != null) { + return imageFile(index, type, this.imageFile); } return orElse(); } @@ -62825,371 +54439,403 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function( - MessageContentTextAnnotationsFileCitationObject value) - fileCitation, - required TResult Function(MessageContentTextAnnotationsFilePathObject value) - filePath, + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, }) { - return filePath(this); + return imageFile(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult? Function(MessageContentTextAnnotationsFilePathObject value)? - filePath, + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, }) { - return filePath?.call(this); + return imageFile?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult Function(MessageContentTextAnnotationsFilePathObject value)? - filePath, + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, required TResult orElse(), }) { - if (filePath != null) { - return filePath(this); + if (imageFile != null) { + return imageFile(this); } return orElse(); } @override Map toJson() { - return _$$MessageContentTextAnnotationsFilePathObjectImplToJson( + return _$$MessageDeltaContentImageFileObjectImplToJson( this, ); } } -abstract class MessageContentTextAnnotationsFilePathObject - extends MessageContentTextAnnotations { - const factory MessageContentTextAnnotationsFilePathObject( - {required final String type, - required final String text, - @JsonKey(name: 'file_path') - required final MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') required final int startIndex, - @JsonKey(name: 'end_index') required final int endIndex}) = - _$MessageContentTextAnnotationsFilePathObjectImpl; - const MessageContentTextAnnotationsFilePathObject._() : super._(); +abstract class MessageDeltaContentImageFileObject extends MessageDeltaContent { + const factory MessageDeltaContentImageFileObject( + {required final int index, + required final String type, + @JsonKey(name: 'image_file', includeIfNull: false) + final MessageContentImageFile? imageFile}) = + _$MessageDeltaContentImageFileObjectImpl; + const MessageDeltaContentImageFileObject._() : super._(); - factory MessageContentTextAnnotationsFilePathObject.fromJson( + factory MessageDeltaContentImageFileObject.fromJson( Map json) = - _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson; - - /// Always `file_path`. - @override - String get type; - - /// The text in the message content that needs to be replaced. - @override - String get text; - - /// No Description - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath get filePath; - - /// No Description - @override - @JsonKey(name: 'start_index') - int get startIndex; + _$MessageDeltaContentImageFileObjectImpl.fromJson; - /// No Description @override - @JsonKey(name: 'end_index') - int get endIndex; - /// Create a copy of MessageContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. + /// The index of the content part in the message. + int get index; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageContentTextAnnotationsFilePathObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -MessageContentTextAnnotationsFilePath - _$MessageContentTextAnnotationsFilePathFromJson(Map json) { - return _MessageContentTextAnnotationsFilePath.fromJson(json); -} - -/// @nodoc -mixin _$MessageContentTextAnnotationsFilePath { - /// The ID of the file that was generated. - @JsonKey(name: 'file_id') - String get fileId => throw _privateConstructorUsedError; - - /// Serializes this MessageContentTextAnnotationsFilePath to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageContentTextAnnotationsFilePath - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageContentTextAnnotationsFilePathCopyWith< - MessageContentTextAnnotationsFilePath> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageContentTextAnnotationsFilePathCopyWith<$Res> { - factory $MessageContentTextAnnotationsFilePathCopyWith( - MessageContentTextAnnotationsFilePath value, - $Res Function(MessageContentTextAnnotationsFilePath) then) = - _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, - MessageContentTextAnnotationsFilePath>; - @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); -} - -/// @nodoc -class _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, - $Val extends MessageContentTextAnnotationsFilePath> - implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { - _$MessageContentTextAnnotationsFilePathCopyWithImpl(this._value, this._then); - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// Always `image_file`. + String get type; - /// Create a copy of MessageContentTextAnnotationsFilePath - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') + /// The image file that is part of a message. + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? get imageFile; @override - $Res call({ - Object? fileId = null, - }) { - return _then(_value.copyWith( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + @JsonKey(ignore: true) + _$$MessageDeltaContentImageFileObjectImplCopyWith< + _$MessageDeltaContentImageFileObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> - implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { - factory _$$MessageContentTextAnnotationsFilePathImplCopyWith( - _$MessageContentTextAnnotationsFilePathImpl value, - $Res Function(_$MessageContentTextAnnotationsFilePathImpl) then) = - __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentTextObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentTextObjectImplCopyWith( + _$MessageDeltaContentTextObjectImpl value, + $Res Function(_$MessageDeltaContentTextObjectImpl) then) = + __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call( + {int index, + String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text}); + + $MessageDeltaContentTextCopyWith<$Res>? get text; } /// @nodoc -class __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res> - extends _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, - _$MessageContentTextAnnotationsFilePathImpl> - implements _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> { - __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl( - _$MessageContentTextAnnotationsFilePathImpl _value, - $Res Function(_$MessageContentTextAnnotationsFilePathImpl) _then) +class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentTextObjectImpl> + implements _$$MessageDeltaContentTextObjectImplCopyWith<$Res> { + __$$MessageDeltaContentTextObjectImplCopyWithImpl( + _$MessageDeltaContentTextObjectImpl _value, + $Res Function(_$MessageDeltaContentTextObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageContentTextAnnotationsFilePath - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, + Object? index = null, + Object? type = null, + Object? text = freezed, }) { - return _then(_$MessageContentTextAnnotationsFilePathImpl( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable + return _then(_$MessageDeltaContentTextObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as MessageDeltaContentText?, )); } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaContentTextCopyWith<$Res>? get text { + if (_value.text == null) { + return null; + } + + return $MessageDeltaContentTextCopyWith<$Res>(_value.text!, (value) { + return _then(_value.copyWith(text: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$MessageContentTextAnnotationsFilePathImpl - extends _MessageContentTextAnnotationsFilePath { - const _$MessageContentTextAnnotationsFilePathImpl( - {@JsonKey(name: 'file_id') required this.fileId}) +class _$MessageDeltaContentTextObjectImpl + extends MessageDeltaContentTextObject { + const _$MessageDeltaContentTextObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.text}) : super._(); - factory _$MessageContentTextAnnotationsFilePathImpl.fromJson( + factory _$MessageDeltaContentTextObjectImpl.fromJson( Map json) => - _$$MessageContentTextAnnotationsFilePathImplFromJson(json); + _$$MessageDeltaContentTextObjectImplFromJson(json); - /// The ID of the file that was generated. + /// The index of the content part in the message. @override - @JsonKey(name: 'file_id') - final String fileId; + final int index; + + /// Always `text`. + @override + final String type; + + /// The text content that is part of a message. + @override + @JsonKey(includeIfNull: false) + final MessageDeltaContentText? text; @override String toString() { - return 'MessageContentTextAnnotationsFilePath(fileId: $fileId)'; + return 'MessageDeltaContent.text(index: $index, type: $type, text: $text)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextAnnotationsFilePathImpl && - (identical(other.fileId, fileId) || other.fileId == fileId)); + other is _$MessageDeltaContentTextObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId); + int get hashCode => Object.hash(runtimeType, index, type, text); - /// Create a copy of MessageContentTextAnnotationsFilePath - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentTextAnnotationsFilePathImplCopyWith< - _$MessageContentTextAnnotationsFilePathImpl> - get copyWith => __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl< - _$MessageContentTextAnnotationsFilePathImpl>(this, _$identity); + _$$MessageDeltaContentTextObjectImplCopyWith< + _$MessageDeltaContentTextObjectImpl> + get copyWith => __$$MessageDeltaContentTextObjectImplCopyWithImpl< + _$MessageDeltaContentTextObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + }) { + return text(index, type, this.text); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + }) { + return text?.call(index, type, this.text); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + required TResult orElse(), + }) { + if (text != null) { + return text(index, type, this.text); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } @override Map toJson() { - return _$$MessageContentTextAnnotationsFilePathImplToJson( + return _$$MessageDeltaContentTextObjectImplToJson( this, ); } } -abstract class _MessageContentTextAnnotationsFilePath - extends MessageContentTextAnnotationsFilePath { - const factory _MessageContentTextAnnotationsFilePath( - {@JsonKey(name: 'file_id') required final String fileId}) = - _$MessageContentTextAnnotationsFilePathImpl; - const _MessageContentTextAnnotationsFilePath._() : super._(); +abstract class MessageDeltaContentTextObject extends MessageDeltaContent { + const factory MessageDeltaContentTextObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final MessageDeltaContentText? text}) = + _$MessageDeltaContentTextObjectImpl; + const MessageDeltaContentTextObject._() : super._(); - factory _MessageContentTextAnnotationsFilePath.fromJson( - Map json) = - _$MessageContentTextAnnotationsFilePathImpl.fromJson; + factory MessageDeltaContentTextObject.fromJson(Map json) = + _$MessageDeltaContentTextObjectImpl.fromJson; - /// The ID of the file that was generated. @override - @JsonKey(name: 'file_id') - String get fileId; - /// Create a copy of MessageContentTextAnnotationsFilePath - /// with the given fields replaced by the non-null parameter values. + /// The index of the content part in the message. + int get index; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageContentTextAnnotationsFilePathImplCopyWith< - _$MessageContentTextAnnotationsFilePathImpl> + + /// Always `text`. + String get type; + + /// The text content that is part of a message. + @JsonKey(includeIfNull: false) + MessageDeltaContentText? get text; + @override + @JsonKey(ignore: true) + _$$MessageDeltaContentTextObjectImplCopyWith< + _$MessageDeltaContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContentTextAnnotations _$MessageDeltaContentTextAnnotationsFromJson( +MessageContentTextAnnotations _$MessageContentTextAnnotationsFromJson( Map json) { switch (json['type']) { case 'file_citation': - return MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( - json); + return MessageContentTextAnnotationsFileCitationObject.fromJson(json); case 'file_path': - return MessageDeltaContentTextAnnotationsFilePathObject.fromJson(json); + return MessageContentTextAnnotationsFilePathObject.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'MessageDeltaContentTextAnnotations', + 'MessageContentTextAnnotations', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$MessageDeltaContentTextAnnotations { - /// The index of the annotation in the text content part. - int get index => throw _privateConstructorUsedError; - +mixin _$MessageContentTextAnnotations { /// Always `file_citation`. String get type => throw _privateConstructorUsedError; /// The text in the message content that needs to be replaced. - @JsonKey(includeIfNull: false) - String? get text => throw _privateConstructorUsedError; + String get text => throw _privateConstructorUsedError; /// The start index of the text in the message content that needs to be replaced. - @JsonKey(name: 'start_index', includeIfNull: false) - int? get startIndex => throw _privateConstructorUsedError; + @JsonKey(name: 'start_index') + int get startIndex => throw _privateConstructorUsedError; /// The end index of the text in the message content that needs to be replaced. - @JsonKey(name: 'end_index', includeIfNull: false) - int? get endIndex => throw _privateConstructorUsedError; + @JsonKey(name: 'end_index') + int get endIndex => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) fileCitation, required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? fileCitation, TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? fileCitation, TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? filePath, required TResult orElse(), }) => @@ -63197,198 +54843,161 @@ mixin _$MessageDeltaContentTextAnnotations { @optionalTypeArgs TResult map({ required TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value) + MessageContentTextAnnotationsFileCitationObject value) fileCitation, - required TResult Function( - MessageDeltaContentTextAnnotationsFilePathObject value) + required TResult Function(MessageContentTextAnnotationsFilePathObject value) filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? + TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + TResult? Function(MessageContentTextAnnotationsFilePathObject value)? filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? + TResult Function(MessageContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + TResult Function(MessageContentTextAnnotationsFilePathObject value)? filePath, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this MessageDeltaContentTextAnnotations to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageDeltaContentTextAnnotationsCopyWith< - MessageDeltaContentTextAnnotations> + @JsonKey(ignore: true) + $MessageContentTextAnnotationsCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - factory $MessageDeltaContentTextAnnotationsCopyWith( - MessageDeltaContentTextAnnotations value, - $Res Function(MessageDeltaContentTextAnnotations) then) = - _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - MessageDeltaContentTextAnnotations>; +abstract class $MessageContentTextAnnotationsCopyWith<$Res> { + factory $MessageContentTextAnnotationsCopyWith( + MessageContentTextAnnotations value, + $Res Function(MessageContentTextAnnotations) then) = + _$MessageContentTextAnnotationsCopyWithImpl<$Res, + MessageContentTextAnnotations>; @useResult $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); + {String type, + String text, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex}); } /// @nodoc -class _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - $Val extends MessageDeltaContentTextAnnotations> - implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - _$MessageDeltaContentTextAnnotationsCopyWithImpl(this._value, this._then); +class _$MessageContentTextAnnotationsCopyWithImpl<$Res, + $Val extends MessageContentTextAnnotations> + implements $MessageContentTextAnnotationsCopyWith<$Res> { + _$MessageContentTextAnnotationsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, - Object? startIndex = freezed, - Object? endIndex = freezed, + Object? text = null, + Object? startIndex = null, + Object? endIndex = null, }) { return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text + text: null == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as String?, - startIndex: freezed == startIndex + as String, + startIndex: null == startIndex ? _value.startIndex : startIndex // ignore: cast_nullable_to_non_nullable - as int?, - endIndex: freezed == endIndex + as int, + endIndex: null == endIndex ? _value.endIndex : endIndex // ignore: cast_nullable_to_non_nullable - as int?, + as int, ) as $Val); } } /// @nodoc -abstract class _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith( - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl value, - $Res Function( - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) +abstract class _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< + $Res> implements $MessageContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith( + _$MessageContentTextAnnotationsFileCitationObjectImpl value, + $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) then) = - __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - $Res>; + __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); + {String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex}); - $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? - get fileCitation; + $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation; } /// @nodoc -class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - $Res> - extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> +class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> + extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, + _$MessageContentTextAnnotationsFileCitationObjectImpl> implements - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - $Res> { - __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl( - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _value, - $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) + _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith<$Res> { + __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl( + _$MessageContentTextAnnotationsFileCitationObjectImpl _value, + $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, - Object? fileCitation = freezed, - Object? startIndex = freezed, - Object? endIndex = freezed, + Object? text = null, + Object? fileCitation = null, + Object? startIndex = null, + Object? endIndex = null, }) { - return _then(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$MessageContentTextAnnotationsFileCitationObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text + text: null == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as String?, - fileCitation: freezed == fileCitation + as String, + fileCitation: null == fileCitation ? _value.fileCitation : fileCitation // ignore: cast_nullable_to_non_nullable - as MessageDeltaContentTextAnnotationsFileCitation?, - startIndex: freezed == startIndex + as MessageContentTextAnnotationsFileCitation, + startIndex: null == startIndex ? _value.startIndex : startIndex // ignore: cast_nullable_to_non_nullable - as int?, - endIndex: freezed == endIndex + as int, + endIndex: null == endIndex ? _value.endIndex : endIndex // ignore: cast_nullable_to_non_nullable - as int?, + as int, )); } - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? - get fileCitation { - if (_value.fileCitation == null) { - return null; - } - - return $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>( - _value.fileCitation!, (value) { + $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation { + return $MessageContentTextAnnotationsFileCitationCopyWith<$Res>( + _value.fileCitation, (value) { return _then(_value.copyWith(fileCitation: value)); }); } @@ -63396,24 +55005,19 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl - extends MessageDeltaContentTextAnnotationsFileCitationObject { - const _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.text, - @JsonKey(name: 'file_citation', includeIfNull: false) this.fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) +class _$MessageContentTextAnnotationsFileCitationObjectImpl + extends MessageContentTextAnnotationsFileCitationObject { + const _$MessageContentTextAnnotationsFileCitationObjectImpl( + {required this.type, + required this.text, + @JsonKey(name: 'file_citation') required this.fileCitation, + @JsonKey(name: 'start_index') required this.startIndex, + @JsonKey(name: 'end_index') required this.endIndex}) : super._(); - factory _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson( + factory _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson(json); - - /// The index of the annotation in the text content part. - @override - final int index; + _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson(json); /// Always `file_citation`. @override @@ -63421,36 +55025,33 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl /// The text in the message content that needs to be replaced. @override - @JsonKey(includeIfNull: false) - final String? text; + final String text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @override - @JsonKey(name: 'file_citation', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFileCitation? fileCitation; + @JsonKey(name: 'file_citation') + final MessageContentTextAnnotationsFileCitation fileCitation; /// The start index of the text in the message content that needs to be replaced. @override - @JsonKey(name: 'start_index', includeIfNull: false) - final int? startIndex; + @JsonKey(name: 'start_index') + final int startIndex; /// The end index of the text in the message content that needs to be replaced. @override - @JsonKey(name: 'end_index', includeIfNull: false) - final int? endIndex; + @JsonKey(name: 'end_index') + final int endIndex; @override String toString() { - return 'MessageDeltaContentTextAnnotations.fileCitation(index: $index, type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; + return 'MessageContentTextAnnotations.fileCitation(type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$MessageContentTextAnnotationsFileCitationObjectImpl && (identical(other.type, type) || other.type == type) && (identical(other.text, text) || other.text == text) && (identical(other.fileCitation, fileCitation) || @@ -63461,101 +55062,91 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl other.endIndex == endIndex)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, index, type, text, fileCitation, startIndex, endIndex); + int get hashCode => + Object.hash(runtimeType, type, text, fileCitation, startIndex, endIndex); - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> + _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageContentTextAnnotationsFileCitationObjectImpl> get copyWith => - __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl>( + __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + _$MessageContentTextAnnotationsFileCitationObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) fileCitation, required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) filePath, }) { - return fileCitation( - index, type, text, this.fileCitation, startIndex, endIndex); + return fileCitation(type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? fileCitation, TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? filePath, }) { return fileCitation?.call( - index, type, text, this.fileCitation, startIndex, endIndex); + type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? fileCitation, TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? filePath, required TResult orElse(), }) { if (fileCitation != null) { - return fileCitation( - index, type, text, this.fileCitation, startIndex, endIndex); + return fileCitation(type, text, this.fileCitation, startIndex, endIndex); } return orElse(); } @@ -63564,10 +55155,9 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl @optionalTypeArgs TResult map({ required TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value) + MessageContentTextAnnotationsFileCitationObject value) fileCitation, - required TResult Function( - MessageDeltaContentTextAnnotationsFilePathObject value) + required TResult Function(MessageContentTextAnnotationsFilePathObject value) filePath, }) { return fileCitation(this); @@ -63576,10 +55166,9 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? + TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + TResult? Function(MessageContentTextAnnotationsFilePathObject value)? filePath, }) { return fileCitation?.call(this); @@ -63588,10 +55177,9 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? + TResult Function(MessageContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + TResult Function(MessageContentTextAnnotationsFilePathObject value)? filePath, required TResult orElse(), }) { @@ -63603,154 +55191,127 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl @override Map toJson() { - return _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplToJson( + return _$$MessageContentTextAnnotationsFileCitationObjectImplToJson( this, ); } } -abstract class MessageDeltaContentTextAnnotationsFileCitationObject - extends MessageDeltaContentTextAnnotations { - const factory MessageDeltaContentTextAnnotationsFileCitationObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) - final int? - endIndex}) = _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl; - const MessageDeltaContentTextAnnotationsFileCitationObject._() : super._(); +abstract class MessageContentTextAnnotationsFileCitationObject + extends MessageContentTextAnnotations { + const factory MessageContentTextAnnotationsFileCitationObject( + {required final String type, + required final String text, + @JsonKey(name: 'file_citation') + required final MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') required final int startIndex, + @JsonKey(name: 'end_index') required final int endIndex}) = + _$MessageContentTextAnnotationsFileCitationObjectImpl; + const MessageContentTextAnnotationsFileCitationObject._() : super._(); - factory MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( + factory MessageContentTextAnnotationsFileCitationObject.fromJson( Map json) = - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson; + _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson; - /// The index of the annotation in the text content part. @override - int get index; /// Always `file_citation`. - @override String get type; + @override /// The text in the message content that needs to be replaced. - @override - @JsonKey(includeIfNull: false) - String? get text; + String get text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? get fileCitation; + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation get fileCitation; + @override /// The start index of the text in the message content that needs to be replaced. + @JsonKey(name: 'start_index') + int get startIndex; @override - @JsonKey(name: 'start_index', includeIfNull: false) - int? get startIndex; /// The end index of the text in the message content that needs to be replaced. + @JsonKey(name: 'end_index') + int get endIndex; @override - @JsonKey(name: 'end_index', includeIfNull: false) - int? get endIndex; - - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> + @JsonKey(ignore: true) + _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageContentTextAnnotationsFileCitationObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< - $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith( - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl value, - $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) +abstract class _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> + implements $MessageContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith( + _$MessageContentTextAnnotationsFilePathObjectImpl value, + $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) then) = - __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< - $Res>; + __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); + {String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex}); - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? - get filePath; + $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath; } /// @nodoc -class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> +class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> + extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, + _$MessageContentTextAnnotationsFilePathObjectImpl> implements - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { - __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl( - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _value, - $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) - _then) + _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { + __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl( + _$MessageContentTextAnnotationsFilePathObjectImpl _value, + $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, - Object? filePath = freezed, - Object? startIndex = freezed, - Object? endIndex = freezed, + Object? text = null, + Object? filePath = null, + Object? startIndex = null, + Object? endIndex = null, }) { - return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$MessageContentTextAnnotationsFilePathObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text + text: null == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as String?, - filePath: freezed == filePath + as String, + filePath: null == filePath ? _value.filePath : filePath // ignore: cast_nullable_to_non_nullable - as MessageDeltaContentTextAnnotationsFilePathObjectFilePath?, - startIndex: freezed == startIndex + as MessageContentTextAnnotationsFilePath, + startIndex: null == startIndex ? _value.startIndex : startIndex // ignore: cast_nullable_to_non_nullable - as int?, - endIndex: freezed == endIndex + as int, + endIndex: null == endIndex ? _value.endIndex : endIndex // ignore: cast_nullable_to_non_nullable - as int?, + as int, )); } - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? - get filePath { - if (_value.filePath == null) { - return null; - } - - return $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res>(_value.filePath!, (value) { + $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath { + return $MessageContentTextAnnotationsFilePathCopyWith<$Res>(_value.filePath, + (value) { return _then(_value.copyWith(filePath: value)); }); } @@ -63758,24 +55319,19 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl - extends MessageDeltaContentTextAnnotationsFilePathObject { - const _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.text, - @JsonKey(name: 'file_path', includeIfNull: false) this.filePath, - @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) +class _$MessageContentTextAnnotationsFilePathObjectImpl + extends MessageContentTextAnnotationsFilePathObject { + const _$MessageContentTextAnnotationsFilePathObjectImpl( + {required this.type, + required this.text, + @JsonKey(name: 'file_path') required this.filePath, + @JsonKey(name: 'start_index') required this.startIndex, + @JsonKey(name: 'end_index') required this.endIndex}) : super._(); - factory _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson( + factory _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson(json); - - /// The index of the annotation in the text content part. - @override - final int index; + _$$MessageContentTextAnnotationsFilePathObjectImplFromJson(json); /// Always `file_path`. @override @@ -63783,35 +55339,33 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl /// The text in the message content that needs to be replaced. @override - @JsonKey(includeIfNull: false) - final String? text; + final String text; /// No Description @override - @JsonKey(name: 'file_path', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath; + @JsonKey(name: 'file_path') + final MessageContentTextAnnotationsFilePath filePath; /// No Description @override - @JsonKey(name: 'start_index', includeIfNull: false) - final int? startIndex; + @JsonKey(name: 'start_index') + final int startIndex; /// No Description @override - @JsonKey(name: 'end_index', includeIfNull: false) - final int? endIndex; + @JsonKey(name: 'end_index') + final int endIndex; @override String toString() { - return 'MessageDeltaContentTextAnnotations.filePath(index: $index, type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; + return 'MessageContentTextAnnotations.filePath(type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentTextAnnotationsFilePathObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$MessageContentTextAnnotationsFilePathObjectImpl && (identical(other.type, type) || other.type == type) && (identical(other.text, text) || other.text == text) && (identical(other.filePath, filePath) || @@ -63822,99 +55376,90 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl other.endIndex == endIndex)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, index, type, text, filePath, startIndex, endIndex); + int get hashCode => + Object.hash(runtimeType, type, text, filePath, startIndex, endIndex); - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> + _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageContentTextAnnotationsFilePathObjectImpl> get copyWith => - __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl>( + __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl< + _$MessageContentTextAnnotationsFilePathObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) fileCitation, required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) filePath, }) { - return filePath(index, type, text, this.filePath, startIndex, endIndex); + return filePath(type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? fileCitation, TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? filePath, }) { - return filePath?.call( - index, type, text, this.filePath, startIndex, endIndex); + return filePath?.call(type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? fileCitation, TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? filePath, required TResult orElse(), }) { if (filePath != null) { - return filePath(index, type, text, this.filePath, startIndex, endIndex); + return filePath(type, text, this.filePath, startIndex, endIndex); } return orElse(); } @@ -63923,10 +55468,9 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl @optionalTypeArgs TResult map({ required TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value) + MessageContentTextAnnotationsFileCitationObject value) fileCitation, - required TResult Function( - MessageDeltaContentTextAnnotationsFilePathObject value) + required TResult Function(MessageContentTextAnnotationsFilePathObject value) filePath, }) { return filePath(this); @@ -63935,10 +55479,9 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? + TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + TResult? Function(MessageContentTextAnnotationsFilePathObject value)? filePath, }) { return filePath?.call(this); @@ -63947,10 +55490,9 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? + TResult Function(MessageContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + TResult Function(MessageContentTextAnnotationsFilePathObject value)? filePath, required TResult orElse(), }) { @@ -63962,788 +55504,673 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl @override Map toJson() { - return _$$MessageDeltaContentTextAnnotationsFilePathObjectImplToJson( + return _$$MessageContentTextAnnotationsFilePathObjectImplToJson( this, ); } } -abstract class MessageDeltaContentTextAnnotationsFilePathObject - extends MessageDeltaContentTextAnnotations { - const factory MessageDeltaContentTextAnnotationsFilePathObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) - final int? - endIndex}) = _$MessageDeltaContentTextAnnotationsFilePathObjectImpl; - const MessageDeltaContentTextAnnotationsFilePathObject._() : super._(); +abstract class MessageContentTextAnnotationsFilePathObject + extends MessageContentTextAnnotations { + const factory MessageContentTextAnnotationsFilePathObject( + {required final String type, + required final String text, + @JsonKey(name: 'file_path') + required final MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') required final int startIndex, + @JsonKey(name: 'end_index') required final int endIndex}) = + _$MessageContentTextAnnotationsFilePathObjectImpl; + const MessageContentTextAnnotationsFilePathObject._() : super._(); - factory MessageDeltaContentTextAnnotationsFilePathObject.fromJson( + factory MessageContentTextAnnotationsFilePathObject.fromJson( Map json) = - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson; + _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson; - /// The index of the annotation in the text content part. @override - int get index; /// Always `file_path`. - @override String get type; - - /// The text in the message content that needs to be replaced. @override - @JsonKey(includeIfNull: false) - String? get text; - /// No Description - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? get filePath; + /// The text in the message content that needs to be replaced. + String get text; /// No Description + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath get filePath; @override - @JsonKey(name: 'start_index', includeIfNull: false) - int? get startIndex; /// No Description + @JsonKey(name: 'start_index') + int get startIndex; @override - @JsonKey(name: 'end_index', includeIfNull: false) - int? get endIndex; - /// Create a copy of MessageDeltaContentTextAnnotations - /// with the given fields replaced by the non-null parameter values. + /// No Description + @JsonKey(name: 'end_index') + int get endIndex; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> + @JsonKey(ignore: true) + _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContentTextAnnotationsFilePathObjectFilePath - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathFromJson( - Map json) { - return _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( - json); +MessageContentTextAnnotationsFilePath + _$MessageContentTextAnnotationsFilePathFromJson(Map json) { + return _MessageContentTextAnnotationsFilePath.fromJson(json); } /// @nodoc -mixin _$MessageDeltaContentTextAnnotationsFilePathObjectFilePath { +mixin _$MessageContentTextAnnotationsFilePath { /// The ID of the file that was generated. - @JsonKey(name: 'file_id', includeIfNull: false) - String? get fileId => throw _privateConstructorUsedError; + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; - /// Serializes this MessageDeltaContentTextAnnotationsFilePathObjectFilePath to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - MessageDeltaContentTextAnnotationsFilePathObjectFilePath> + @JsonKey(ignore: true) + $MessageContentTextAnnotationsFilePathCopyWith< + MessageContentTextAnnotationsFilePath> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res> { - factory $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith( - MessageDeltaContentTextAnnotationsFilePathObjectFilePath value, - $Res Function( - MessageDeltaContentTextAnnotationsFilePathObjectFilePath) - then) = - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< - $Res, MessageDeltaContentTextAnnotationsFilePathObjectFilePath>; - @useResult - $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); -} - -/// @nodoc -class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< - $Res, - $Val extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath> - implements - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res> { - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl( - this._value, this._then); +abstract class $MessageContentTextAnnotationsFilePathCopyWith<$Res> { + factory $MessageContentTextAnnotationsFilePathCopyWith( + MessageContentTextAnnotationsFilePath value, + $Res Function(MessageContentTextAnnotationsFilePath) then) = + _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, + MessageContentTextAnnotationsFilePath>; + @useResult + $Res call({@JsonKey(name: 'file_id') String fileId}); +} + +/// @nodoc +class _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, + $Val extends MessageContentTextAnnotationsFilePath> + implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { + _$MessageContentTextAnnotationsFilePathCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = freezed, + Object? fileId = null, }) { return _then(_value.copyWith( - fileId: freezed == fileId + fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable - as String?, + as String, ) as $Val); } } /// @nodoc -abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - $Res> - implements - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res> { - factory _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl value, - $Res Function( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) - then) = - __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< - $Res>; +abstract class _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> + implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { + factory _$$MessageContentTextAnnotationsFilePathImplCopyWith( + _$MessageContentTextAnnotationsFilePathImpl value, + $Res Function(_$MessageContentTextAnnotationsFilePathImpl) then) = + __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc -class __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< - $Res> - extends _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< - $Res, _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> - implements - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - $Res> { - __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl _value, - $Res Function( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) - _then) +class __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res> + extends _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, + _$MessageContentTextAnnotationsFilePathImpl> + implements _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> { + __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl( + _$MessageContentTextAnnotationsFilePathImpl _value, + $Res Function(_$MessageContentTextAnnotationsFilePathImpl) _then) : super(_value, _then); - /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = freezed, + Object? fileId = null, }) { - return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( - fileId: freezed == fileId + return _then(_$MessageContentTextAnnotationsFilePathImpl( + fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable - as String?, + as String, )); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl - extends _MessageDeltaContentTextAnnotationsFilePathObjectFilePath { - const _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( - {@JsonKey(name: 'file_id', includeIfNull: false) this.fileId}) +class _$MessageContentTextAnnotationsFilePathImpl + extends _MessageContentTextAnnotationsFilePath { + const _$MessageContentTextAnnotationsFilePathImpl( + {@JsonKey(name: 'file_id') required this.fileId}) : super._(); - factory _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson( + factory _$MessageContentTextAnnotationsFilePathImpl.fromJson( Map json) => - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplFromJson( - json); + _$$MessageContentTextAnnotationsFilePathImplFromJson(json); /// The ID of the file that was generated. @override - @JsonKey(name: 'file_id', includeIfNull: false) - final String? fileId; + @JsonKey(name: 'file_id') + final String fileId; @override String toString() { - return 'MessageDeltaContentTextAnnotationsFilePathObjectFilePath(fileId: $fileId)'; + return 'MessageContentTextAnnotationsFilePath(fileId: $fileId)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl && + other is _$MessageContentTextAnnotationsFilePathImpl && (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, fileId); - /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> - get copyWith => - __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl>( - this, _$identity); + _$$MessageContentTextAnnotationsFilePathImplCopyWith< + _$MessageContentTextAnnotationsFilePathImpl> + get copyWith => __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl< + _$MessageContentTextAnnotationsFilePathImpl>(this, _$identity); @override Map toJson() { - return _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplToJson( + return _$$MessageContentTextAnnotationsFilePathImplToJson( this, ); } } -abstract class _MessageDeltaContentTextAnnotationsFilePathObjectFilePath - extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath { - const factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath( - {@JsonKey(name: 'file_id', includeIfNull: false) - final String? fileId}) = - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl; - const _MessageDeltaContentTextAnnotationsFilePathObjectFilePath._() - : super._(); +abstract class _MessageContentTextAnnotationsFilePath + extends MessageContentTextAnnotationsFilePath { + const factory _MessageContentTextAnnotationsFilePath( + {@JsonKey(name: 'file_id') required final String fileId}) = + _$MessageContentTextAnnotationsFilePathImpl; + const _MessageContentTextAnnotationsFilePath._() : super._(); - factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( + factory _MessageContentTextAnnotationsFilePath.fromJson( Map json) = - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson; + _$MessageContentTextAnnotationsFilePathImpl.fromJson; - /// The ID of the file that was generated. @override - @JsonKey(name: 'file_id', includeIfNull: false) - String? get fileId; - /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath - /// with the given fields replaced by the non-null parameter values. + /// The ID of the file that was generated. + @JsonKey(name: 'file_id') + String get fileId; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> + @JsonKey(ignore: true) + _$$MessageContentTextAnnotationsFilePathImplCopyWith< + _$MessageContentTextAnnotationsFilePathImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetails _$RunStepDetailsFromJson(Map json) { +MessageDeltaContentTextAnnotations _$MessageDeltaContentTextAnnotationsFromJson( + Map json) { switch (json['type']) { - case 'message_creation': - return RunStepDetailsMessageCreationObject.fromJson(json); - case 'tool_calls': - return RunStepDetailsToolCallsObject.fromJson(json); + case 'file_citation': + return MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( + json); + case 'file_path': + return MessageDeltaContentTextAnnotationsFilePathObject.fromJson(json); default: - throw CheckedFromJsonException(json, 'type', 'RunStepDetails', + throw CheckedFromJsonException( + json, + 'type', + 'MessageDeltaContentTextAnnotations', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDetails { - /// Always `message_creation`. +mixin _$MessageDeltaContentTextAnnotations { + /// The index of the annotation in the text content part. + int get index => throw _privateConstructorUsedError; + + /// Always `file_citation`. String get type => throw _privateConstructorUsedError; + + /// The text in the message content that needs to be replaced. + @JsonKey(includeIfNull: false) + String? get text => throw _privateConstructorUsedError; + + /// The start index of the text in the message content that needs to be replaced. + @JsonKey(name: 'start_index', includeIfNull: false) + int? get startIndex => throw _privateConstructorUsedError; + + /// The end index of the text in the message content that needs to be replaced. + @JsonKey(name: 'end_index', includeIfNull: false) + int? get endIndex => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation) - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + fileCitation, required TResult Function( + int index, String type, - @JsonKey(name: 'tool_calls') - List toolCalls) - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + fileCitation, TResult? Function( + int index, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + fileCitation, TResult Function( + int index, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + filePath, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, + required TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value) + fileCitation, + required TResult Function( + MessageDeltaContentTextAnnotationsFilePathObject value) + filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult? Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + filePath, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this RunStepDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsCopyWith get copyWith => - throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaContentTextAnnotationsCopyWith< + MessageDeltaContentTextAnnotations> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsCopyWith<$Res> { - factory $RunStepDetailsCopyWith( - RunStepDetails value, $Res Function(RunStepDetails) then) = - _$RunStepDetailsCopyWithImpl<$Res, RunStepDetails>; +abstract class $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + factory $MessageDeltaContentTextAnnotationsCopyWith( + MessageDeltaContentTextAnnotations value, + $Res Function(MessageDeltaContentTextAnnotations) then) = + _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + MessageDeltaContentTextAnnotations>; @useResult - $Res call({String type}); + $Res call( + {int index, + String type, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); } /// @nodoc -class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> - implements $RunStepDetailsCopyWith<$Res> { - _$RunStepDetailsCopyWithImpl(this._value, this._then); +class _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + $Val extends MessageDeltaContentTextAnnotations> + implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + _$MessageDeltaContentTextAnnotationsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, + Object? text = freezed, + Object? startIndex = freezed, + Object? endIndex = freezed, }) { return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> - implements $RunStepDetailsCopyWith<$Res> { - factory _$$RunStepDetailsMessageCreationObjectImplCopyWith( - _$RunStepDetailsMessageCreationObjectImpl value, - $Res Function(_$RunStepDetailsMessageCreationObjectImpl) then) = - __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation}); - - $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation; -} - -/// @nodoc -class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsCopyWithImpl<$Res, - _$RunStepDetailsMessageCreationObjectImpl> - implements _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> { - __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl( - _$RunStepDetailsMessageCreationObjectImpl _value, - $Res Function(_$RunStepDetailsMessageCreationObjectImpl) _then) - : super(_value, _then); - - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? messageCreation = null, - }) { - return _then(_$RunStepDetailsMessageCreationObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - messageCreation: null == messageCreation - ? _value.messageCreation - : messageCreation // ignore: cast_nullable_to_non_nullable - as RunStepDetailsMessageCreation, - )); - } - - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation { - return $RunStepDetailsMessageCreationCopyWith<$Res>(_value.messageCreation, - (value) { - return _then(_value.copyWith(messageCreation: value)); - }); - } -} - -/// @nodoc -@JsonSerializable() -class _$RunStepDetailsMessageCreationObjectImpl - extends RunStepDetailsMessageCreationObject { - const _$RunStepDetailsMessageCreationObjectImpl( - {required this.type, - @JsonKey(name: 'message_creation') required this.messageCreation}) - : super._(); - - factory _$RunStepDetailsMessageCreationObjectImpl.fromJson( - Map json) => - _$$RunStepDetailsMessageCreationObjectImplFromJson(json); - - /// Always `message_creation`. - @override - final String type; - - /// Details of the message creation by the run step. - @override - @JsonKey(name: 'message_creation') - final RunStepDetailsMessageCreation messageCreation; - - @override - String toString() { - return 'RunStepDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$RunStepDetailsMessageCreationObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.messageCreation, messageCreation) || - other.messageCreation == messageCreation)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type, messageCreation); - - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$RunStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDetailsMessageCreationObjectImpl> - get copyWith => __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl< - _$RunStepDetailsMessageCreationObjectImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function( - String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation) - messageCreation, - required TResult Function( - String type, - @JsonKey(name: 'tool_calls') - List toolCalls) - toolCalls, - }) { - return messageCreation(type, this.messageCreation); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, - TResult? Function( - String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, - }) { - return messageCreation?.call(type, this.messageCreation); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, - TResult Function( - String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, - required TResult orElse(), - }) { - if (messageCreation != null) { - return messageCreation(type, this.messageCreation); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(RunStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, - }) { - return messageCreation(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, - }) { - return messageCreation?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, - required TResult orElse(), - }) { - if (messageCreation != null) { - return messageCreation(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$RunStepDetailsMessageCreationObjectImplToJson( - this, - ); - } -} - -abstract class RunStepDetailsMessageCreationObject extends RunStepDetails { - const factory RunStepDetailsMessageCreationObject( - {required final String type, - @JsonKey(name: 'message_creation') - required final RunStepDetailsMessageCreation messageCreation}) = - _$RunStepDetailsMessageCreationObjectImpl; - const RunStepDetailsMessageCreationObject._() : super._(); - - factory RunStepDetailsMessageCreationObject.fromJson( - Map json) = - _$RunStepDetailsMessageCreationObjectImpl.fromJson; - - /// Always `message_creation`. - @override - String get type; - - /// Details of the message creation by the run step. - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation get messageCreation; - - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDetailsMessageCreationObjectImpl> - get copyWith => throw _privateConstructorUsedError; + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + startIndex: freezed == startIndex + ? _value.startIndex + : startIndex // ignore: cast_nullable_to_non_nullable + as int?, + endIndex: freezed == endIndex + ? _value.endIndex + : endIndex // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } } /// @nodoc -abstract class _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> - implements $RunStepDetailsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsObjectImplCopyWith( - _$RunStepDetailsToolCallsObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsObjectImpl) then) = - __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith( + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl value, + $Res Function( + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) + then) = + __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + $Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'tool_calls') List toolCalls}); + {int index, + String type, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); + + $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? + get fileCitation; } /// @nodoc -class __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsObjectImpl> - implements _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsObjectImpl) _then) +class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + $Res> + extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> + implements + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + $Res> { + __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl( + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _value, + $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) + _then) : super(_value, _then); - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? toolCalls = null, + Object? text = freezed, + Object? fileCitation = freezed, + Object? startIndex = freezed, + Object? endIndex = freezed, }) { - return _then(_$RunStepDetailsToolCallsObjectImpl( + return _then(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - toolCalls: null == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + fileCitation: freezed == fileCitation + ? _value.fileCitation + : fileCitation // ignore: cast_nullable_to_non_nullable + as MessageDeltaContentTextAnnotationsFileCitation?, + startIndex: freezed == startIndex + ? _value.startIndex + : startIndex // ignore: cast_nullable_to_non_nullable + as int?, + endIndex: freezed == endIndex + ? _value.endIndex + : endIndex // ignore: cast_nullable_to_non_nullable + as int?, )); } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? + get fileCitation { + if (_value.fileCitation == null) { + return null; + } + + return $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>( + _value.fileCitation!, (value) { + return _then(_value.copyWith(fileCitation: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsObjectImpl - extends RunStepDetailsToolCallsObject { - const _$RunStepDetailsToolCallsObjectImpl( - {required this.type, - @JsonKey(name: 'tool_calls') - required final List toolCalls}) - : _toolCalls = toolCalls, - super._(); +class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl + extends MessageDeltaContentTextAnnotationsFileCitationObject { + const _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.text, + @JsonKey(name: 'file_citation', includeIfNull: false) this.fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) + : super._(); - factory _$RunStepDetailsToolCallsObjectImpl.fromJson( + factory _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsObjectImplFromJson(json); + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson(json); - /// Always `tool_calls`. + /// The index of the annotation in the text content part. + @override + final int index; + + /// Always `file_citation`. @override final String type; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - final List _toolCalls; + /// The text in the message content that needs to be replaced. + @override + @JsonKey(includeIfNull: false) + final String? text; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @override - @JsonKey(name: 'tool_calls') - List get toolCalls { - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_toolCalls); - } + @JsonKey(name: 'file_citation', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFileCitation? fileCitation; + + /// The start index of the text in the message content that needs to be replaced. + @override + @JsonKey(name: 'start_index', includeIfNull: false) + final int? startIndex; + + /// The end index of the text in the message content that needs to be replaced. + @override + @JsonKey(name: 'end_index', includeIfNull: false) + final int? endIndex; @override String toString() { - return 'RunStepDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; + return 'MessageDeltaContentTextAnnotations.fileCitation(index: $index, type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsObjectImpl && + other + is _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls)); + (identical(other.text, text) || other.text == text) && + (identical(other.fileCitation, fileCitation) || + other.fileCitation == fileCitation) && + (identical(other.startIndex, startIndex) || + other.startIndex == startIndex) && + (identical(other.endIndex, endIndex) || + other.endIndex == endIndex)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); + runtimeType, index, type, text, fileCitation, startIndex, endIndex); - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDetailsToolCallsObjectImpl> - get copyWith => __$$RunStepDetailsToolCallsObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsObjectImpl>(this, _$identity); + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> + get copyWith => + __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation) - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + fileCitation, required TResult Function( + int index, String type, - @JsonKey(name: 'tool_calls') - List toolCalls) - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + filePath, }) { - return toolCalls(type, this.toolCalls); + return fileCitation( + index, type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + fileCitation, TResult? Function( + int index, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + filePath, }) { - return toolCalls?.call(type, this.toolCalls); + return fileCitation?.call( + index, type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + fileCitation, TResult Function( + int index, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + filePath, required TResult orElse(), }) { - if (toolCalls != null) { - return toolCalls(type, this.toolCalls); + if (fileCitation != null) { + return fileCitation( + index, type, text, this.fileCitation, startIndex, endIndex); } return orElse(); } @@ -64751,373 +56178,350 @@ class _$RunStepDetailsToolCallsObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, + required TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value) + fileCitation, + required TResult Function( + MessageDeltaContentTextAnnotationsFilePathObject value) + filePath, }) { - return toolCalls(this); + return fileCitation(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult? Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + filePath, }) { - return toolCalls?.call(this); + return fileCitation?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + filePath, required TResult orElse(), }) { - if (toolCalls != null) { - return toolCalls(this); + if (fileCitation != null) { + return fileCitation(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsObjectImplToJson( + return _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsObject extends RunStepDetails { - const factory RunStepDetailsToolCallsObject( - {required final String type, - @JsonKey(name: 'tool_calls') - required final List toolCalls}) = - _$RunStepDetailsToolCallsObjectImpl; - const RunStepDetailsToolCallsObject._() : super._(); +abstract class MessageDeltaContentTextAnnotationsFileCitationObject + extends MessageDeltaContentTextAnnotations { + const factory MessageDeltaContentTextAnnotationsFileCitationObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) + final int? + endIndex}) = _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl; + const MessageDeltaContentTextAnnotationsFileCitationObject._() : super._(); - factory RunStepDetailsToolCallsObject.fromJson(Map json) = - _$RunStepDetailsToolCallsObjectImpl.fromJson; + factory MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( + Map json) = + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson; - /// Always `tool_calls`. @override - String get type; - - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - @JsonKey(name: 'tool_calls') - List get toolCalls; - /// Create a copy of RunStepDetails - /// with the given fields replaced by the non-null parameter values. + /// The index of the annotation in the text content part. + int get index; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDetailsToolCallsObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -RunStepDeltaDetails _$RunStepDeltaDetailsFromJson(Map json) { - switch (json['type']) { - case 'message_creation': - return RunStepDeltaStepDetailsMessageCreationObject.fromJson(json); - case 'tool_calls': - return RunStepDeltaStepDetailsToolCallsObject.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'RunStepDeltaDetails', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$RunStepDeltaDetails { - /// Always `message_creation`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation) - messageCreation, - required TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls) - toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult? Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function( - RunStepDeltaStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) - toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - - /// Serializes this RunStepDeltaDetails to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDeltaDetailsCopyWith get copyWith => - throw _privateConstructorUsedError; -} + /// Always `file_citation`. + String get type; + @override -/// @nodoc -abstract class $RunStepDeltaDetailsCopyWith<$Res> { - factory $RunStepDeltaDetailsCopyWith( - RunStepDeltaDetails value, $Res Function(RunStepDeltaDetails) then) = - _$RunStepDeltaDetailsCopyWithImpl<$Res, RunStepDeltaDetails>; - @useResult - $Res call({String type}); -} + /// The text in the message content that needs to be replaced. + @JsonKey(includeIfNull: false) + String? get text; -/// @nodoc -class _$RunStepDeltaDetailsCopyWithImpl<$Res, $Val extends RunStepDeltaDetails> - implements $RunStepDeltaDetailsCopyWith<$Res> { - _$RunStepDeltaDetailsCopyWithImpl(this._value, this._then); + /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? get fileCitation; + @override - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// The start index of the text in the message content that needs to be replaced. + @JsonKey(name: 'start_index', includeIfNull: false) + int? get startIndex; + @override - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') + /// The end index of the text in the message content that needs to be replaced. + @JsonKey(name: 'end_index', includeIfNull: false) + int? get endIndex; @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + @JsonKey(ignore: true) + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> - implements $RunStepDeltaDetailsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith( - _$RunStepDeltaStepDetailsMessageCreationObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) +abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< + $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith( + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl value, + $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) then) = - __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; + __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< + $Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation}); + {int index, + String type, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); - $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation; + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? + get filePath; } /// @nodoc -class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaDetailsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsMessageCreationObjectImpl> +class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> implements - _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsMessageCreationObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) _then) + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { + __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl( + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _value, + $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) + _then) : super(_value, _then); - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? messageCreation = freezed, + Object? text = freezed, + Object? filePath = freezed, + Object? startIndex = freezed, + Object? endIndex = freezed, }) { - return _then(_$RunStepDeltaStepDetailsMessageCreationObjectImpl( + return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - messageCreation: freezed == messageCreation - ? _value.messageCreation - : messageCreation // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsMessageCreation?, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + filePath: freezed == filePath + ? _value.filePath + : filePath // ignore: cast_nullable_to_non_nullable + as MessageDeltaContentTextAnnotationsFilePathObjectFilePath?, + startIndex: freezed == startIndex + ? _value.startIndex + : startIndex // ignore: cast_nullable_to_non_nullable + as int?, + endIndex: freezed == endIndex + ? _value.endIndex + : endIndex // ignore: cast_nullable_to_non_nullable + as int?, )); } - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation { - if (_value.messageCreation == null) { + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? + get filePath { + if (_value.filePath == null) { return null; } - return $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>( - _value.messageCreation!, (value) { - return _then(_value.copyWith(messageCreation: value)); + return $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res>(_value.filePath!, (value) { + return _then(_value.copyWith(filePath: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsMessageCreationObjectImpl - extends RunStepDeltaStepDetailsMessageCreationObject { - const _$RunStepDeltaStepDetailsMessageCreationObjectImpl( - {required this.type, - @JsonKey(name: 'message_creation', includeIfNull: false) - this.messageCreation}) +class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl + extends MessageDeltaContentTextAnnotationsFilePathObject { + const _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.text, + @JsonKey(name: 'file_path', includeIfNull: false) this.filePath, + @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) : super._(); - factory _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson( + factory _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsMessageCreationObjectImplFromJson(json); + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson(json); - /// Always `message_creation`. + /// The index of the annotation in the text content part. + @override + final int index; + + /// Always `file_path`. @override final String type; - /// Details of the message creation by the run step. + /// The text in the message content that needs to be replaced. @override - @JsonKey(name: 'message_creation', includeIfNull: false) - final RunStepDeltaStepDetailsMessageCreation? messageCreation; + @JsonKey(includeIfNull: false) + final String? text; + + /// No Description + @override + @JsonKey(name: 'file_path', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath; + + /// No Description + @override + @JsonKey(name: 'start_index', includeIfNull: false) + final int? startIndex; + + /// No Description + @override + @JsonKey(name: 'end_index', includeIfNull: false) + final int? endIndex; @override String toString() { - return 'RunStepDeltaDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; + return 'MessageDeltaContentTextAnnotations.filePath(index: $index, type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsMessageCreationObjectImpl && + other is _$MessageDeltaContentTextAnnotationsFilePathObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && - (identical(other.messageCreation, messageCreation) || - other.messageCreation == messageCreation)); + (identical(other.text, text) || other.text == text) && + (identical(other.filePath, filePath) || + other.filePath == filePath) && + (identical(other.startIndex, startIndex) || + other.startIndex == startIndex) && + (identical(other.endIndex, endIndex) || + other.endIndex == endIndex)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, messageCreation); + int get hashCode => Object.hash( + runtimeType, index, type, text, filePath, startIndex, endIndex); - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDeltaStepDetailsMessageCreationObjectImpl> + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsMessageCreationObjectImpl>( + __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation) - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + fileCitation, required TResult Function( + int index, String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls) - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) + filePath, }) { - return messageCreation(type, this.messageCreation); + return filePath(index, type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + fileCitation, TResult? Function( + int index, String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + filePath, }) { - return messageCreation?.call(type, this.messageCreation); + return filePath?.call( + index, type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + fileCitation, TResult Function( + int index, String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? + filePath, required TResult orElse(), }) { - if (messageCreation != null) { - return messageCreation(type, this.messageCreation); + if (filePath != null) { + return filePath(index, type, text, this.filePath, startIndex, endIndex); } return orElse(); } @@ -65126,466 +56530,405 @@ class _$RunStepDeltaStepDetailsMessageCreationObjectImpl @optionalTypeArgs TResult map({ required TResult Function( - RunStepDeltaStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) - toolCalls, + MessageDeltaContentTextAnnotationsFileCitationObject value) + fileCitation, + required TResult Function( + MessageDeltaContentTextAnnotationsFilePathObject value) + filePath, }) { - return messageCreation(this); + return filePath(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + TResult? Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + filePath, }) { - return messageCreation?.call(this); + return filePath?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? + filePath, required TResult orElse(), }) { - if (messageCreation != null) { - return messageCreation(this); + if (filePath != null) { + return filePath(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsMessageCreationObjectImplToJson( + return _$$MessageDeltaContentTextAnnotationsFilePathObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsMessageCreationObject - extends RunStepDeltaDetails { - const factory RunStepDeltaStepDetailsMessageCreationObject( - {required final String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - final RunStepDeltaStepDetailsMessageCreation? messageCreation}) = - _$RunStepDeltaStepDetailsMessageCreationObjectImpl; - const RunStepDeltaStepDetailsMessageCreationObject._() : super._(); +abstract class MessageDeltaContentTextAnnotationsFilePathObject + extends MessageDeltaContentTextAnnotations { + const factory MessageDeltaContentTextAnnotationsFilePathObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) + final int? + endIndex}) = _$MessageDeltaContentTextAnnotationsFilePathObjectImpl; + const MessageDeltaContentTextAnnotationsFilePathObject._() : super._(); - factory RunStepDeltaStepDetailsMessageCreationObject.fromJson( + factory MessageDeltaContentTextAnnotationsFilePathObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson; + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson; - /// Always `message_creation`. @override + + /// The index of the annotation in the text content part. + int get index; + @override + + /// Always `file_path`. String get type; + @override - /// Details of the message creation by the run step. - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? get messageCreation; + /// The text in the message content that needs to be replaced. + @JsonKey(includeIfNull: false) + String? get text; - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. + /// No Description + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? get filePath; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDeltaStepDetailsMessageCreationObjectImpl> + + /// No Description + @JsonKey(name: 'start_index', includeIfNull: false) + int? get startIndex; + @override + + /// No Description + @JsonKey(name: 'end_index', includeIfNull: false) + int? get endIndex; + @override + @JsonKey(ignore: true) + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; } +MessageDeltaContentTextAnnotationsFilePathObjectFilePath + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathFromJson( + Map json) { + return _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( + json); +} + /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> - implements $RunStepDeltaDetailsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; - @override +mixin _$MessageDeltaContentTextAnnotationsFilePathObjectFilePath { + /// The ID of the file that was generated. + @JsonKey(name: 'file_id', includeIfNull: false) + String? get fileId => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + MessageDeltaContentTextAnnotationsFilePathObjectFilePath> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res> { + factory $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith( + MessageDeltaContentTextAnnotationsFilePathObjectFilePath value, + $Res Function( + MessageDeltaContentTextAnnotationsFilePathObjectFilePath) + then) = + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< + $Res, MessageDeltaContentTextAnnotationsFilePathObjectFilePath>; @useResult - $Res call( - {String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls}); + $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaDetailsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsObjectImpl> - implements _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) _then) - : super(_value, _then); +class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< + $Res, + $Val extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath> + implements + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res> { + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? toolCalls = freezed, + Object? fileId = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - toolCalls: freezed == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List?, - )); + return _then(_value.copyWith( + fileId: freezed == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); } } /// @nodoc -@JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsObjectImpl - extends RunStepDeltaStepDetailsToolCallsObject { - const _$RunStepDeltaStepDetailsToolCallsObjectImpl( - {required this.type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls}) - : _toolCalls = toolCalls, - super._(); - - factory _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson( - Map json) => - _$$RunStepDeltaStepDetailsToolCallsObjectImplFromJson(json); - - /// Always `tool_calls`. - @override - final String type; - - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - final List? _toolCalls; - - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - @override - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls { - final value = _toolCalls; - if (value == null) return null; - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - @override - String toString() { - return 'RunStepDeltaDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; - } - +abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + $Res> + implements + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res> { + factory _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl value, + $Res Function( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) + then) = + __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< + $Res>; @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsObjectImpl && - (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls)); - } + @useResult + $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); +} - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash( - runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); +/// @nodoc +class __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< + $Res> + extends _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< + $Res, _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> + implements + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + $Res> { + __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl _value, + $Res Function( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) + _then) + : super(_value, _then); - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsObjectImpl>(this, _$identity); - @override - @optionalTypeArgs - TResult when({ - required TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation) - messageCreation, - required TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls) - toolCalls, + $Res call({ + Object? fileId = freezed, }) { - return toolCalls(type, this.toolCalls); + return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( + fileId: freezed == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String?, + )); } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl + extends _MessageDeltaContentTextAnnotationsFilePathObjectFilePath { + const _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( + {@JsonKey(name: 'file_id', includeIfNull: false) this.fileId}) + : super._(); + + factory _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson( + Map json) => + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplFromJson( + json); + /// The ID of the file that was generated. @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult? Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - }) { - return toolCalls?.call(type, this.toolCalls); - } + @JsonKey(name: 'file_id', includeIfNull: false) + final String? fileId; @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - required TResult orElse(), - }) { - if (toolCalls != null) { - return toolCalls(type, this.toolCalls); - } - return orElse(); + String toString() { + return 'MessageDeltaContentTextAnnotationsFilePathObjectFilePath(fileId: $fileId)'; } @override - @optionalTypeArgs - TResult map({ - required TResult Function( - RunStepDeltaStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) - toolCalls, - }) { - return toolCalls(this); + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other + is _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl && + (identical(other.fileId, fileId) || other.fileId == fileId)); } + @JsonKey(ignore: true) @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - }) { - return toolCalls?.call(this); - } + int get hashCode => Object.hash(runtimeType, fileId); + @JsonKey(ignore: true) @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - required TResult orElse(), - }) { - if (toolCalls != null) { - return toolCalls(this); - } - return orElse(); - } + @pragma('vm:prefer-inline') + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> + get copyWith => + __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl>( + this, _$identity); @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsObjectImplToJson( + return _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsObject - extends RunStepDeltaDetails { - const factory RunStepDeltaStepDetailsToolCallsObject( - {required final String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls}) = - _$RunStepDeltaStepDetailsToolCallsObjectImpl; - const RunStepDeltaStepDetailsToolCallsObject._() : super._(); +abstract class _MessageDeltaContentTextAnnotationsFilePathObjectFilePath + extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath { + const factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath( + {@JsonKey(name: 'file_id', includeIfNull: false) + final String? fileId}) = + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl; + const _MessageDeltaContentTextAnnotationsFilePathObjectFilePath._() + : super._(); - factory RunStepDeltaStepDetailsToolCallsObject.fromJson( + factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson; + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson; - /// Always `tool_calls`. @override - String get type; - - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls; - /// Create a copy of RunStepDeltaDetails - /// with the given fields replaced by the non-null parameter values. + /// The ID of the file that was generated. + @JsonKey(name: 'file_id', includeIfNull: false) + String? get fileId; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsObjectImpl> + @JsonKey(ignore: true) + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCalls _$RunStepDetailsToolCallsFromJson( - Map json) { +RunStepDetails _$RunStepDetailsFromJson(Map json) { switch (json['type']) { - case 'code_interpreter': - return RunStepDetailsToolCallsCodeObject.fromJson(json); - case 'file_search': - return RunStepDetailsToolCallsFileSearchObject.fromJson(json); - case 'function': - return RunStepDetailsToolCallsFunctionObject.fromJson(json); + case 'message_creation': + return RunStepDetailsMessageCreationObject.fromJson(json); + case 'tool_calls': + return RunStepDetailsToolCallsObject.fromJson(json); default: - throw CheckedFromJsonException(json, 'type', 'RunStepDetailsToolCalls', + throw CheckedFromJsonException(json, 'type', 'RunStepDetails', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDetailsToolCalls { - /// The ID of the tool call. - String get id => throw _privateConstructorUsedError; - - /// Always `code_interpreter`. +mixin _$RunStepDetails { + /// Always `message_creation`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation) + messageCreation, required TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch) - fileSearch, - required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) - function, + @JsonKey(name: 'tool_calls') + List toolCalls) + toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult? Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) - codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) - fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) - function, + required TResult Function(RunStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this RunStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsCopyWith get copyWith => + @JsonKey(ignore: true) + $RunStepDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsCopyWith<$Res> { - factory $RunStepDetailsToolCallsCopyWith(RunStepDetailsToolCalls value, - $Res Function(RunStepDetailsToolCalls) then) = - _$RunStepDetailsToolCallsCopyWithImpl<$Res, RunStepDetailsToolCalls>; +abstract class $RunStepDetailsCopyWith<$Res> { + factory $RunStepDetailsCopyWith( + RunStepDetails value, $Res Function(RunStepDetails) then) = + _$RunStepDetailsCopyWithImpl<$Res, RunStepDetails>; @useResult - $Res call({String id, String type}); + $Res call({String type}); } /// @nodoc -class _$RunStepDetailsToolCallsCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCalls> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - _$RunStepDetailsToolCallsCopyWithImpl(this._value, this._then); +class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> + implements $RunStepDetailsCopyWith<$Res> { + _$RunStepDetailsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, Object? type = null, }) { return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -65595,195 +56938,160 @@ class _$RunStepDetailsToolCallsCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsCodeObjectImplCopyWith( - _$RunStepDetailsToolCallsCodeObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) then) = - __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> + implements $RunStepDetailsCopyWith<$Res> { + factory _$$RunStepDetailsMessageCreationObjectImplCopyWith( + _$RunStepDetailsMessageCreationObjectImpl value, + $Res Function(_$RunStepDetailsMessageCreationObjectImpl) then) = + __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String id, - String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter}); + {String type, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation}); - $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> - get codeInterpreter; + $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation; } /// @nodoc -class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsCodeObjectImpl> - implements _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsCodeObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) _then) +class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsCopyWithImpl<$Res, + _$RunStepDetailsMessageCreationObjectImpl> + implements _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> { + __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl( + _$RunStepDetailsMessageCreationObjectImpl _value, + $Res Function(_$RunStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, Object? type = null, - Object? codeInterpreter = null, + Object? messageCreation = null, }) { - return _then(_$RunStepDetailsToolCallsCodeObjectImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$RunStepDetailsMessageCreationObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - codeInterpreter: null == codeInterpreter - ? _value.codeInterpreter - : codeInterpreter // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsCodeObjectCodeInterpreter, + messageCreation: null == messageCreation + ? _value.messageCreation + : messageCreation // ignore: cast_nullable_to_non_nullable + as RunStepDetailsMessageCreation, )); } - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> - get codeInterpreter { - return $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>( - _value.codeInterpreter, (value) { - return _then(_value.copyWith(codeInterpreter: value)); + $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation { + return $RunStepDetailsMessageCreationCopyWith<$Res>(_value.messageCreation, + (value) { + return _then(_value.copyWith(messageCreation: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsCodeObjectImpl - extends RunStepDetailsToolCallsCodeObject { - const _$RunStepDetailsToolCallsCodeObjectImpl( - {required this.id, - required this.type, - @JsonKey(name: 'code_interpreter') required this.codeInterpreter}) +class _$RunStepDetailsMessageCreationObjectImpl + extends RunStepDetailsMessageCreationObject { + const _$RunStepDetailsMessageCreationObjectImpl( + {required this.type, + @JsonKey(name: 'message_creation') required this.messageCreation}) : super._(); - factory _$RunStepDetailsToolCallsCodeObjectImpl.fromJson( + factory _$RunStepDetailsMessageCreationObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsCodeObjectImplFromJson(json); - - /// The ID of the tool call. - @override - final String id; + _$$RunStepDetailsMessageCreationObjectImplFromJson(json); - /// Always `code_interpreter`. + /// Always `message_creation`. @override final String type; - /// The Code Interpreter tool call definition. + /// Details of the message creation by the run step. @override - @JsonKey(name: 'code_interpreter') - final RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter; + @JsonKey(name: 'message_creation') + final RunStepDetailsMessageCreation messageCreation; @override String toString() { - return 'RunStepDetailsToolCalls.codeInterpreter(id: $id, type: $type, codeInterpreter: $codeInterpreter)'; + return 'RunStepDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsCodeObjectImpl && - (identical(other.id, id) || other.id == id) && + other is _$RunStepDetailsMessageCreationObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.codeInterpreter, codeInterpreter) || - other.codeInterpreter == codeInterpreter)); + (identical(other.messageCreation, messageCreation) || + other.messageCreation == messageCreation)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, id, type, codeInterpreter); + int get hashCode => Object.hash(runtimeType, type, messageCreation); - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeObjectImpl> - get copyWith => __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsCodeObjectImpl>(this, _$identity); + _$$RunStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDetailsMessageCreationObjectImpl> + get copyWith => __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl< + _$RunStepDetailsMessageCreationObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation) + messageCreation, required TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch) - fileSearch, - required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) - function, + @JsonKey(name: 'tool_calls') + List toolCalls) + toolCalls, }) { - return codeInterpreter(id, type, this.codeInterpreter); + return messageCreation(type, this.messageCreation); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult? Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, }) { - return codeInterpreter?.call(id, type, this.codeInterpreter); + return messageCreation?.call(type, this.messageCreation); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, required TResult orElse(), }) { - if (codeInterpreter != null) { - return codeInterpreter(id, type, this.codeInterpreter); + if (messageCreation != null) { + return messageCreation(type, this.messageCreation); } return orElse(); } @@ -65791,273 +57099,225 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) - codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) - fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) - function, + required TResult Function(RunStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, }) { - return codeInterpreter(this); + return messageCreation(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, }) { - return codeInterpreter?.call(this); + return messageCreation?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) { - if (codeInterpreter != null) { - return codeInterpreter(this); + if (messageCreation != null) { + return messageCreation(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsCodeObjectImplToJson( + return _$$RunStepDetailsMessageCreationObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsCodeObject - extends RunStepDetailsToolCalls { - const factory RunStepDetailsToolCallsCodeObject( - {required final String id, - required final String type, - @JsonKey(name: 'code_interpreter') - required final RunStepDetailsToolCallsCodeObjectCodeInterpreter - codeInterpreter}) = _$RunStepDetailsToolCallsCodeObjectImpl; - const RunStepDetailsToolCallsCodeObject._() : super._(); +abstract class RunStepDetailsMessageCreationObject extends RunStepDetails { + const factory RunStepDetailsMessageCreationObject( + {required final String type, + @JsonKey(name: 'message_creation') + required final RunStepDetailsMessageCreation messageCreation}) = + _$RunStepDetailsMessageCreationObjectImpl; + const RunStepDetailsMessageCreationObject._() : super._(); - factory RunStepDetailsToolCallsCodeObject.fromJson( + factory RunStepDetailsMessageCreationObject.fromJson( Map json) = - _$RunStepDetailsToolCallsCodeObjectImpl.fromJson; + _$RunStepDetailsMessageCreationObjectImpl.fromJson; - /// The ID of the tool call. @override - String get id; - /// Always `code_interpreter`. - @override + /// Always `message_creation`. String get type; - /// The Code Interpreter tool call definition. - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter get codeInterpreter; - - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. + /// Details of the message creation by the run step. + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation get messageCreation; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeObjectImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDetailsMessageCreationObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith( - _$RunStepDetailsToolCallsFileSearchObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) then) = - __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> + implements $RunStepDetailsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsObjectImplCopyWith( + _$RunStepDetailsToolCallsObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsObjectImpl) then) = + __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String id, - String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch}); - - $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch; + {String type, + @JsonKey(name: 'tool_calls') List toolCalls}); } /// @nodoc -class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFileSearchObjectImpl> - implements _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsFileSearchObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) _then) +class __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsObjectImpl> + implements _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, Object? type = null, - Object? fileSearch = null, + Object? toolCalls = null, }) { - return _then(_$RunStepDetailsToolCallsFileSearchObjectImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$RunStepDetailsToolCallsObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - fileSearch: null == fileSearch - ? _value.fileSearch - : fileSearch // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsFileSearch, + toolCalls: null == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List, )); } - - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch { - return $RunStepDetailsToolCallsFileSearchCopyWith<$Res>(_value.fileSearch, - (value) { - return _then(_value.copyWith(fileSearch: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFileSearchObjectImpl - extends RunStepDetailsToolCallsFileSearchObject { - const _$RunStepDetailsToolCallsFileSearchObjectImpl( - {required this.id, - required this.type, - @JsonKey(name: 'file_search') required this.fileSearch}) - : super._(); +class _$RunStepDetailsToolCallsObjectImpl + extends RunStepDetailsToolCallsObject { + const _$RunStepDetailsToolCallsObjectImpl( + {required this.type, + @JsonKey(name: 'tool_calls') + required final List toolCalls}) + : _toolCalls = toolCalls, + super._(); - factory _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFileSearchObjectImplFromJson(json); - - /// The ID of the tool call object. - @override - final String id; + _$$RunStepDetailsToolCallsObjectImplFromJson(json); - /// The type of tool call. This is always going to be `file_search` for this type of tool call. + /// Always `tool_calls`. @override final String type; - /// The definition of the file search that was called. + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + final List _toolCalls; + + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @override - @JsonKey(name: 'file_search') - final RunStepDetailsToolCallsFileSearch fileSearch; + @JsonKey(name: 'tool_calls') + List get toolCalls { + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_toolCalls); + } @override String toString() { - return 'RunStepDetailsToolCalls.fileSearch(id: $id, type: $type, fileSearch: $fileSearch)'; + return 'RunStepDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFileSearchObjectImpl && - (identical(other.id, id) || other.id == id) && + other is _$RunStepDetailsToolCallsObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.fileSearch, fileSearch) || - other.fileSearch == fileSearch)); + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, id, type, fileSearch); + int get hashCode => Object.hash( + runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchObjectImpl> - get copyWith => - __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsFileSearchObjectImpl>(this, _$identity); + _$$RunStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDetailsToolCallsObjectImpl> + get copyWith => __$$RunStepDetailsToolCallsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation) + messageCreation, required TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch) - fileSearch, - required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) - function, + @JsonKey(name: 'tool_calls') + List toolCalls) + toolCalls, }) { - return fileSearch(id, type, this.fileSearch); + return toolCalls(type, this.toolCalls); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult? Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, }) { - return fileSearch?.call(id, type, this.fileSearch); + return toolCalls?.call(type, this.toolCalls); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(id, type, this.fileSearch); + if (toolCalls != null) { + return toolCalls(type, this.toolCalls); } return orElse(); } @@ -66065,265 +57325,358 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) - codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) - fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) - function, + required TResult Function(RunStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, }) { - return fileSearch(this); + return toolCalls(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, }) { - return fileSearch?.call(this); + return toolCalls?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(this); + if (toolCalls != null) { + return toolCalls(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( + return _$$RunStepDetailsToolCallsObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsFileSearchObject - extends RunStepDetailsToolCalls { - const factory RunStepDetailsToolCallsFileSearchObject( - {required final String id, - required final String type, - @JsonKey(name: 'file_search') - required final RunStepDetailsToolCallsFileSearch fileSearch}) = - _$RunStepDetailsToolCallsFileSearchObjectImpl; - const RunStepDetailsToolCallsFileSearchObject._() : super._(); +abstract class RunStepDetailsToolCallsObject extends RunStepDetails { + const factory RunStepDetailsToolCallsObject( + {required final String type, + @JsonKey(name: 'tool_calls') + required final List toolCalls}) = + _$RunStepDetailsToolCallsObjectImpl; + const RunStepDetailsToolCallsObject._() : super._(); - factory RunStepDetailsToolCallsFileSearchObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson; + factory RunStepDetailsToolCallsObject.fromJson(Map json) = + _$RunStepDetailsToolCallsObjectImpl.fromJson; - /// The ID of the tool call object. @override - String get id; - /// The type of tool call. This is always going to be `file_search` for this type of tool call. - @override + /// Always `tool_calls`. String get type; - /// The definition of the file search that was called. - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch get fileSearch; - - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + @JsonKey(name: 'tool_calls') + List get toolCalls; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchObjectImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; } +RunStepDeltaDetails _$RunStepDeltaDetailsFromJson(Map json) { + switch (json['type']) { + case 'message_creation': + return RunStepDeltaStepDetailsMessageCreationObject.fromJson(json); + case 'tool_calls': + return RunStepDeltaStepDetailsToolCallsObject.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'RunStepDeltaDetails', + 'Invalid union type "${json['type']}"!'); + } +} + /// @nodoc -abstract class _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith( - _$RunStepDetailsToolCallsFunctionObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) then) = - __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; +mixin _$RunStepDeltaDetails { + /// Always `message_creation`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation) + messageCreation, + required TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls) + toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult? Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function( + RunStepDeltaStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) + toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $RunStepDeltaDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDeltaDetailsCopyWith<$Res> { + factory $RunStepDeltaDetailsCopyWith( + RunStepDeltaDetails value, $Res Function(RunStepDeltaDetails) then) = + _$RunStepDeltaDetailsCopyWithImpl<$Res, RunStepDeltaDetails>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$RunStepDeltaDetailsCopyWithImpl<$Res, $Val extends RunStepDeltaDetails> + implements $RunStepDeltaDetailsCopyWith<$Res> { + _$RunStepDeltaDetailsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> + implements $RunStepDeltaDetailsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith( + _$RunStepDeltaStepDetailsMessageCreationObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) + then) = + __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String id, String type, RunStepDetailsToolCallsFunction function}); + $Res call( + {String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation}); - $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function; + $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation; } /// @nodoc -class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFunctionObjectImpl> - implements _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsFunctionObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) _then) +class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaDetailsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsMessageCreationObjectImpl> + implements + _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsMessageCreationObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, Object? type = null, - Object? function = null, + Object? messageCreation = freezed, }) { - return _then(_$RunStepDetailsToolCallsFunctionObjectImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$RunStepDeltaStepDetailsMessageCreationObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - function: null == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsFunction, + messageCreation: freezed == messageCreation + ? _value.messageCreation + : messageCreation // ignore: cast_nullable_to_non_nullable + as RunStepDeltaStepDetailsMessageCreation?, )); } - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function { - return $RunStepDetailsToolCallsFunctionCopyWith<$Res>(_value.function, - (value) { - return _then(_value.copyWith(function: value)); + $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation { + if (_value.messageCreation == null) { + return null; + } + + return $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>( + _value.messageCreation!, (value) { + return _then(_value.copyWith(messageCreation: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFunctionObjectImpl - extends RunStepDetailsToolCallsFunctionObject { - const _$RunStepDetailsToolCallsFunctionObjectImpl( - {required this.id, required this.type, required this.function}) +class _$RunStepDeltaStepDetailsMessageCreationObjectImpl + extends RunStepDeltaStepDetailsMessageCreationObject { + const _$RunStepDeltaStepDetailsMessageCreationObjectImpl( + {required this.type, + @JsonKey(name: 'message_creation', includeIfNull: false) + this.messageCreation}) : super._(); - factory _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFunctionObjectImplFromJson(json); - - /// The ID of the tool call object. - @override - final String id; + _$$RunStepDeltaStepDetailsMessageCreationObjectImplFromJson(json); - /// Always `function`. + /// Always `message_creation`. @override final String type; - /// The definition of the function that was called. + /// Details of the message creation by the run step. @override - final RunStepDetailsToolCallsFunction function; + @JsonKey(name: 'message_creation', includeIfNull: false) + final RunStepDeltaStepDetailsMessageCreation? messageCreation; @override String toString() { - return 'RunStepDetailsToolCalls.function(id: $id, type: $type, function: $function)'; + return 'RunStepDeltaDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFunctionObjectImpl && - (identical(other.id, id) || other.id == id) && + other is _$RunStepDeltaStepDetailsMessageCreationObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.function, function) || - other.function == function)); + (identical(other.messageCreation, messageCreation) || + other.messageCreation == messageCreation)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, id, type, function); + int get hashCode => Object.hash(runtimeType, type, messageCreation); - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDetailsToolCallsFunctionObjectImpl> - get copyWith => __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsFunctionObjectImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDeltaStepDetailsMessageCreationObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsMessageCreationObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) - codeInterpreter, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation) + messageCreation, required TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch) - fileSearch, - required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) - function, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls) + toolCalls, }) { - return function(id, type, this.function); + return messageCreation(type, this.messageCreation); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, TResult? Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, }) { - return function?.call(id, type, this.function); + return messageCreation?.call(type, this.messageCreation); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, TResult Function( - String id, String type, - @JsonKey(name: 'file_search') - RunStepDetailsToolCallsFileSearch fileSearch)? - fileSearch, - TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? - function, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, required TResult orElse(), }) { - if (function != null) { - return function(id, type, this.function); + if (messageCreation != null) { + return messageCreation(type, this.messageCreation); } return orElse(); } @@ -66331,489 +57684,443 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) - codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) - fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) - function, + required TResult Function( + RunStepDeltaStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) + toolCalls, }) { - return function(this); + return messageCreation(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, }) { - return function?.call(this); + return messageCreation?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) { - if (function != null) { - return function(this); + if (messageCreation != null) { + return messageCreation(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsFunctionObjectImplToJson( + return _$$RunStepDeltaStepDetailsMessageCreationObjectImplToJson( this, - ); - } -} - -abstract class RunStepDetailsToolCallsFunctionObject - extends RunStepDetailsToolCalls { - const factory RunStepDetailsToolCallsFunctionObject( - {required final String id, - required final String type, - required final RunStepDetailsToolCallsFunction function}) = - _$RunStepDetailsToolCallsFunctionObjectImpl; - const RunStepDetailsToolCallsFunctionObject._() : super._(); - - factory RunStepDetailsToolCallsFunctionObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson; - - /// The ID of the tool call object. - @override - String get id; - - /// Always `function`. - @override - String get type; - - /// The definition of the function that was called. - RunStepDetailsToolCallsFunction get function; - - /// Create a copy of RunStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDetailsToolCallsFunctionObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -RunStepDetailsToolCallsFunction _$RunStepDetailsToolCallsFunctionFromJson( - Map json) { - return _RunStepDetailsToolCallsFunction.fromJson(json); -} - -/// @nodoc -mixin _$RunStepDetailsToolCallsFunction { - /// The name of the function. - String get name => throw _privateConstructorUsedError; - - /// The arguments passed to the function. - String get arguments => throw _privateConstructorUsedError; - - /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - String? get output => throw _privateConstructorUsedError; - - /// Serializes this RunStepDetailsToolCallsFunction to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsFunctionCopyWith - get copyWith => throw _privateConstructorUsedError; + ); + } } -/// @nodoc -abstract class $RunStepDetailsToolCallsFunctionCopyWith<$Res> { - factory $RunStepDetailsToolCallsFunctionCopyWith( - RunStepDetailsToolCallsFunction value, - $Res Function(RunStepDetailsToolCallsFunction) then) = - _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, - RunStepDetailsToolCallsFunction>; - @useResult - $Res call({String name, String arguments, String? output}); -} +abstract class RunStepDeltaStepDetailsMessageCreationObject + extends RunStepDeltaDetails { + const factory RunStepDeltaStepDetailsMessageCreationObject( + {required final String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + final RunStepDeltaStepDetailsMessageCreation? messageCreation}) = + _$RunStepDeltaStepDetailsMessageCreationObjectImpl; + const RunStepDeltaStepDetailsMessageCreationObject._() : super._(); -/// @nodoc -class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsFunction> - implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { - _$RunStepDetailsToolCallsFunctionCopyWithImpl(this._value, this._then); + factory RunStepDeltaStepDetailsMessageCreationObject.fromJson( + Map json) = + _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + @override - /// Create a copy of RunStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') + /// Always `message_creation`. + String get type; + + /// Details of the message creation by the run step. + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? get messageCreation; @override - $Res call({ - Object? name = null, - Object? arguments = null, - Object? output = freezed, - }) { - return _then(_value.copyWith( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - arguments: null == arguments - ? _value.arguments - : arguments // ignore: cast_nullable_to_non_nullable - as String, - output: freezed == output - ? _value.output - : output // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDeltaStepDetailsMessageCreationObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> - implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFunctionImplCopyWith( - _$RunStepDetailsToolCallsFunctionImpl value, - $Res Function(_$RunStepDetailsToolCallsFunctionImpl) then) = - __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> + implements $RunStepDeltaDetailsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) then) = + __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String name, String arguments, String? output}); + $Res call( + {String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc -class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFunctionImpl> - implements _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl( - _$RunStepDetailsToolCallsFunctionImpl _value, - $Res Function(_$RunStepDetailsToolCallsFunctionImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaDetailsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsObjectImpl> + implements _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? arguments = null, - Object? output = freezed, + Object? type = null, + Object? toolCalls = freezed, }) { - return _then(_$RunStepDetailsToolCallsFunctionImpl( - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - arguments: null == arguments - ? _value.arguments - : arguments // ignore: cast_nullable_to_non_nullable + return _then(_$RunStepDeltaStepDetailsToolCallsObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, - output: freezed == output - ? _value.output - : output // ignore: cast_nullable_to_non_nullable - as String?, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFunctionImpl - extends _RunStepDetailsToolCallsFunction { - const _$RunStepDetailsToolCallsFunctionImpl( - {required this.name, required this.arguments, required this.output}) - : super._(); +class _$RunStepDeltaStepDetailsToolCallsObjectImpl + extends RunStepDeltaStepDetailsToolCallsObject { + const _$RunStepDeltaStepDetailsToolCallsObjectImpl( + {required this.type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) + : _toolCalls = toolCalls, + super._(); - factory _$RunStepDetailsToolCallsFunctionImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFunctionImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsObjectImplFromJson(json); - /// The name of the function. + /// Always `tool_calls`. @override - final String name; + final String type; - /// The arguments passed to the function. - @override - final String arguments; + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + final List? _toolCalls; - /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @override - final String? output; + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } @override String toString() { - return 'RunStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; + return 'RunStepDeltaDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFunctionImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.arguments, arguments) || - other.arguments == arguments) && - (identical(other.output, output) || other.output == output)); + other is _$RunStepDeltaStepDetailsToolCallsObjectImpl && + (identical(other.type, type) || other.type == type) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, name, arguments, output); + int get hashCode => Object.hash( + runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - /// Create a copy of RunStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDetailsToolCallsFunctionImpl> - get copyWith => __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl< - _$RunStepDetailsToolCallsFunctionImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation) + messageCreation, + required TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls) + toolCalls, + }) { + return toolCalls(type, this.toolCalls); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult? Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + }) { + return toolCalls?.call(type, this.toolCalls); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + required TResult orElse(), + }) { + if (toolCalls != null) { + return toolCalls(type, this.toolCalls); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function( + RunStepDeltaStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) + toolCalls, + }) { + return toolCalls(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + }) { + return toolCalls?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + required TResult orElse(), + }) { + if (toolCalls != null) { + return toolCalls(this); + } + return orElse(); + } @override Map toJson() { - return _$$RunStepDetailsToolCallsFunctionImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsObjectImplToJson( this, ); } } -abstract class _RunStepDetailsToolCallsFunction - extends RunStepDetailsToolCallsFunction { - const factory _RunStepDetailsToolCallsFunction( - {required final String name, - required final String arguments, - required final String? output}) = _$RunStepDetailsToolCallsFunctionImpl; - const _RunStepDetailsToolCallsFunction._() : super._(); - - factory _RunStepDetailsToolCallsFunction.fromJson(Map json) = - _$RunStepDetailsToolCallsFunctionImpl.fromJson; +abstract class RunStepDeltaStepDetailsToolCallsObject + extends RunStepDeltaDetails { + const factory RunStepDeltaStepDetailsToolCallsObject( + {required final String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) = + _$RunStepDeltaStepDetailsToolCallsObjectImpl; + const RunStepDeltaStepDetailsToolCallsObject._() : super._(); - /// The name of the function. - @override - String get name; + factory RunStepDeltaStepDetailsToolCallsObject.fromJson( + Map json) = + _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson; - /// The arguments passed to the function. @override - String get arguments; - /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - @override - String? get output; + /// Always `tool_calls`. + String get type; - /// Create a copy of RunStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDetailsToolCallsFunctionImpl> + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDeltaStepDetailsToolCalls _$RunStepDeltaStepDetailsToolCallsFromJson( +RunStepDetailsToolCalls _$RunStepDetailsToolCallsFromJson( Map json) { switch (json['type']) { case 'code_interpreter': - return RunStepDeltaStepDetailsToolCallsCodeObject.fromJson(json); + return RunStepDetailsToolCallsCodeObject.fromJson(json); case 'file_search': - return RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson(json); + return RunStepDetailsToolCallsFileSearchObject.fromJson(json); case 'function': - return RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson(json); + return RunStepDetailsToolCallsFunctionObject.fromJson(json); default: - throw CheckedFromJsonException( - json, - 'type', - 'RunStepDeltaStepDetailsToolCalls', + throw CheckedFromJsonException(json, 'type', 'RunStepDetailsToolCalls', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDeltaStepDetailsToolCalls { - /// The index of the tool call in the tool calls array. - int get index => throw _privateConstructorUsedError; - +mixin _$RunStepDetailsToolCalls { /// The ID of the tool call. - @JsonKey(includeIfNull: false) - String? get id => throw _privateConstructorUsedError; + String get id => throw _privateConstructorUsedError; /// Always `code_interpreter`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + required TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) + String id, String type, RunStepDetailsToolCallsFunction function) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult? Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) + required TResult Function(RunStepDetailsToolCallsFunctionObject value) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this RunStepDeltaStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDeltaStepDetailsToolCallsCopyWith - get copyWith => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $RunStepDetailsToolCallsCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory $RunStepDeltaStepDetailsToolCallsCopyWith( - RunStepDeltaStepDetailsToolCalls value, - $Res Function(RunStepDeltaStepDetailsToolCalls) then) = - _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - RunStepDeltaStepDetailsToolCalls>; +abstract class $RunStepDetailsToolCallsCopyWith<$Res> { + factory $RunStepDetailsToolCallsCopyWith(RunStepDetailsToolCalls value, + $Res Function(RunStepDetailsToolCalls) then) = + _$RunStepDetailsToolCallsCopyWithImpl<$Res, RunStepDetailsToolCalls>; @useResult - $Res call( - {int index, @JsonKey(includeIfNull: false) String? id, String type}); + $Res call({String id, String type}); } /// @nodoc -class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - $Val extends RunStepDeltaStepDetailsToolCalls> - implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - _$RunStepDeltaStepDetailsToolCallsCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCalls> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + _$RunStepDetailsToolCallsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, - Object? id = freezed, + Object? id = null, Object? type = null, }) { return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - id: freezed == id + id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String?, + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -66823,79 +58130,63 @@ class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> - implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) - then) = - __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsCodeObjectImplCopyWith( + _$RunStepDetailsToolCallsCodeObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) then) = + __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - @JsonKey(includeIfNull: false) String? id, + {String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter}); + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter}); - $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? + $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> get codeInterpreter; } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> - implements _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) _then) +class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsCodeObjectImpl> + implements _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsCodeObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, - Object? id = freezed, + Object? id = null, Object? type = null, - Object? codeInterpreter = freezed, + Object? codeInterpreter = null, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - id: freezed == id + return _then(_$RunStepDetailsToolCallsCodeObjectImpl( + id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String?, + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - codeInterpreter: freezed == codeInterpreter + codeInterpreter: null == codeInterpreter ? _value.codeInterpreter : codeInterpreter // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter?, + as RunStepDetailsToolCallsCodeObjectCodeInterpreter, )); } - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? + $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> get codeInterpreter { - if (_value.codeInterpreter == null) { - return null; - } - - return $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< - $Res>(_value.codeInterpreter!, (value) { + return $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>( + _value.codeInterpreter, (value) { return _then(_value.copyWith(codeInterpreter: value)); }); } @@ -66903,157 +58194,116 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl - extends RunStepDeltaStepDetailsToolCallsCodeObject { - const _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - {required this.index, - @JsonKey(includeIfNull: false) this.id, +class _$RunStepDetailsToolCallsCodeObjectImpl + extends RunStepDetailsToolCallsCodeObject { + const _$RunStepDetailsToolCallsCodeObjectImpl( + {required this.id, required this.type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - this.codeInterpreter}) + @JsonKey(name: 'code_interpreter') required this.codeInterpreter}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsCodeObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson(json); - - /// The index of the tool call in the tool calls array. - @override - final int index; + _$$RunStepDetailsToolCallsCodeObjectImplFromJson(json); /// The ID of the tool call. @override - @JsonKey(includeIfNull: false) - final String? id; + final String id; /// Always `code_interpreter`. @override final String type; - /// The Code Interpreter tool call definition. - outputs + /// The Code Interpreter tool call definition. @override - @JsonKey(name: 'code_interpreter', includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter; + @JsonKey(name: 'code_interpreter') + final RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter; @override String toString() { - return 'RunStepDeltaStepDetailsToolCalls.codeInterpreter(index: $index, id: $id, type: $type, codeInterpreter: $codeInterpreter)'; + return 'RunStepDetailsToolCalls.codeInterpreter(id: $id, type: $type, codeInterpreter: $codeInterpreter)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$RunStepDetailsToolCallsCodeObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && (identical(other.codeInterpreter, codeInterpreter) || other.codeInterpreter == codeInterpreter)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, index, id, type, codeInterpreter); + int get hashCode => Object.hash(runtimeType, id, type, codeInterpreter); - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl>( - this, _$identity); + _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeObjectImpl> + get copyWith => __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsCodeObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + required TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) + String id, String type, RunStepDetailsToolCallsFunction function) function, }) { - return codeInterpreter(index, id, type, this.codeInterpreter); + return codeInterpreter(id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult? Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, }) { - return codeInterpreter?.call(index, id, type, this.codeInterpreter); + return codeInterpreter?.call(id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, required TResult orElse(), }) { if (codeInterpreter != null) { - return codeInterpreter(index, id, type, this.codeInterpreter); + return codeInterpreter(id, type, this.codeInterpreter); } return orElse(); } @@ -67061,13 +58311,11 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) + required TResult Function(RunStepDetailsToolCallsFunctionObject value) function, }) { return codeInterpreter(this); @@ -67076,12 +58324,10 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) { return codeInterpreter?.call(this); } @@ -67089,12 +58335,9 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) { if (codeInterpreter != null) { @@ -67105,103 +58348,82 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplToJson( + return _$$RunStepDetailsToolCallsCodeObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsCodeObject - extends RunStepDeltaStepDetailsToolCalls { - const factory RunStepDeltaStepDetailsToolCallsCodeObject( - {required final int index, - @JsonKey(includeIfNull: false) final String? id, +abstract class RunStepDetailsToolCallsCodeObject + extends RunStepDetailsToolCalls { + const factory RunStepDetailsToolCallsCodeObject( + {required final String id, required final String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter}) = _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl; - const RunStepDeltaStepDetailsToolCallsCodeObject._() : super._(); + @JsonKey(name: 'code_interpreter') + required final RunStepDetailsToolCallsCodeObjectCodeInterpreter + codeInterpreter}) = _$RunStepDetailsToolCallsCodeObjectImpl; + const RunStepDetailsToolCallsCodeObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsCodeObject.fromJson( + factory RunStepDetailsToolCallsCodeObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson; + _$RunStepDetailsToolCallsCodeObjectImpl.fromJson; - /// The index of the tool call in the tool calls array. @override - int get index; /// The ID of the tool call. + String get id; @override - @JsonKey(includeIfNull: false) - String? get id; /// Always `code_interpreter`. - @override String get type; - /// The Code Interpreter tool call definition. - outputs - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - get codeInterpreter; - - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. + /// The Code Interpreter tool call definition. + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter get codeInterpreter; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) - then) = - __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< - $Res>; +abstract class _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) then) = + __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - @JsonKey(includeIfNull: false) String? id, + {String id, String type, @JsonKey(name: 'file_search') Map fileSearch}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> - implements - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) - _then) +class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchObjectImpl> + implements _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, - Object? id = freezed, + Object? id = null, Object? type = null, Object? fileSearch = null, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - id: freezed == id + return _then(_$RunStepDetailsToolCallsFileSearchObjectImpl( + id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String?, + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -67216,29 +58438,23 @@ class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl - extends RunStepDeltaStepDetailsToolCallsFileSearchObject { - const _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( - {required this.index, - @JsonKey(includeIfNull: false) this.id, +class _$RunStepDetailsToolCallsFileSearchObjectImpl + extends RunStepDetailsToolCallsFileSearchObject { + const _$RunStepDetailsToolCallsFileSearchObjectImpl( + {required this.id, required this.type, @JsonKey(name: 'file_search') required final Map fileSearch}) : _fileSearch = fileSearch, super._(); - factory _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson(json); - - /// The index of the tool call in the tool calls array. - @override - final int index; + _$$RunStepDetailsToolCallsFileSearchObjectImplFromJson(json); /// The ID of the tool call object. @override - @JsonKey(includeIfNull: false) - final String? id; + final String id; /// The type of tool call. This is always going to be `file_search` for this type of tool call. @override @@ -67258,122 +58474,91 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl @override String toString() { - return 'RunStepDeltaStepDetailsToolCalls.fileSearch(index: $index, id: $id, type: $type, fileSearch: $fileSearch)'; + return 'RunStepDetailsToolCalls.fileSearch(id: $id, type: $type, fileSearch: $fileSearch)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$RunStepDetailsToolCallsFileSearchObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && const DeepCollectionEquality() .equals(other._fileSearch, _fileSearch)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, id, type, - const DeepCollectionEquality().hash(_fileSearch)); + int get hashCode => Object.hash( + runtimeType, id, type, const DeepCollectionEquality().hash(_fileSearch)); - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> + _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl>( - this, _$identity); + __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + required TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) + String id, String type, RunStepDetailsToolCallsFunction function) function, }) { - return fileSearch(index, id, type, this.fileSearch); + return fileSearch(id, type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult? Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, }) { - return fileSearch?.call(index, id, type, this.fileSearch); + return fileSearch?.call(id, type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, required TResult orElse(), }) { if (fileSearch != null) { - return fileSearch(index, id, type, this.fileSearch); + return fileSearch(id, type, this.fileSearch); } return orElse(); } @@ -67381,13 +58566,11 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) + required TResult Function(RunStepDetailsToolCallsFunctionObject value) function, }) { return fileSearch(this); @@ -67396,12 +58579,10 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) { return fileSearch?.call(this); } @@ -67409,12 +58590,9 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) { if (fileSearch != null) { @@ -67425,125 +58603,97 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplToJson( + return _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsFileSearchObject - extends RunStepDeltaStepDetailsToolCalls { - const factory RunStepDeltaStepDetailsToolCallsFileSearchObject( - {required final int index, - @JsonKey(includeIfNull: false) final String? id, +abstract class RunStepDetailsToolCallsFileSearchObject + extends RunStepDetailsToolCalls { + const factory RunStepDetailsToolCallsFileSearchObject( + {required final String id, required final String type, @JsonKey(name: 'file_search') required final Map fileSearch}) = - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl; - const RunStepDeltaStepDetailsToolCallsFileSearchObject._() : super._(); + _$RunStepDetailsToolCallsFileSearchObjectImpl; + const RunStepDetailsToolCallsFileSearchObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson( + factory RunStepDetailsToolCallsFileSearchObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson; + _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson; - /// The index of the tool call in the tool calls array. @override - int get index; /// The ID of the tool call object. + String get id; @override - @JsonKey(includeIfNull: false) - String? get id; /// The type of tool call. This is always going to be `file_search` for this type of tool call. - @override String get type; /// For now, this is always going to be an empty object. @JsonKey(name: 'file_search') Map get fileSearch; - - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) - then) = - __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith( + _$RunStepDetailsToolCallsFunctionObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) then) = + __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function}); + $Res call({String id, String type, RunStepDetailsToolCallsFunction function}); - $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function; + $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function; } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> - implements - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) _then) +class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFunctionObjectImpl> + implements _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFunctionObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, - Object? id = freezed, + Object? id = null, Object? type = null, - Object? function = freezed, + Object? function = null, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - id: freezed == id + return _then(_$RunStepDetailsToolCallsFunctionObjectImpl( + id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String?, + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - function: freezed == function + function: null == function ? _value.function : function // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsToolCallsFunction?, + as RunStepDetailsToolCallsFunction, )); } - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function { - if (_value.function == null) { - return null; - } - - return $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>( - _value.function!, (value) { + $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function { + return $RunStepDetailsToolCallsFunctionCopyWith<$Res>(_value.function, + (value) { return _then(_value.copyWith(function: value)); }); } @@ -67551,27 +58701,19 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl - extends RunStepDeltaStepDetailsToolCallsFunctionObject { - const _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( - {required this.index, - @JsonKey(includeIfNull: false) this.id, - required this.type, - @JsonKey(includeIfNull: false) this.function}) +class _$RunStepDetailsToolCallsFunctionObjectImpl + extends RunStepDetailsToolCallsFunctionObject { + const _$RunStepDetailsToolCallsFunctionObjectImpl( + {required this.id, required this.type, required this.function}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson(json); - - /// The index of the tool call in the tool calls array. - @override - final int index; + _$$RunStepDetailsToolCallsFunctionObjectImplFromJson(json); /// The ID of the tool call object. @override - @JsonKey(includeIfNull: false) - final String? id; + final String id; /// Always `function`. @override @@ -67579,126 +58721,93 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl /// The definition of the function that was called. @override - @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsFunction? function; + final RunStepDetailsToolCallsFunction function; @override String toString() { - return 'RunStepDeltaStepDetailsToolCalls.function(index: $index, id: $id, type: $type, function: $function)'; + return 'RunStepDetailsToolCalls.function(id: $id, type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$RunStepDetailsToolCallsFunctionObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && (identical(other.function, function) || other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, id, type, function); + int get hashCode => Object.hash(runtimeType, id, type, function); - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl>( - this, _$identity); + _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDetailsToolCallsFunctionObjectImpl> + get copyWith => __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFunctionObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + required TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) + String id, String type, RunStepDetailsToolCallsFunction function) function, }) { - return function(index, id, type, this.function); + return function(id, type, this.function); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, + TResult? Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, }) { - return function?.call(index, id, type, this.function); + return function?.call(id, type, this.function); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, + String id, String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, + TResult Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? + String id, String type, RunStepDetailsToolCallsFunction function)? function, required TResult orElse(), }) { if (function != null) { - return function(index, id, type, this.function); + return function(id, type, this.function); } return orElse(); } @@ -67706,13 +58815,11 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) + required TResult Function(RunStepDetailsToolCallsFunctionObject value) function, }) { return function(this); @@ -67721,12 +58828,10 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) { return function?.call(this); } @@ -67734,12 +58839,9 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) { if (function != null) { @@ -67750,128 +58852,103 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplToJson( + return _$$RunStepDetailsToolCallsFunctionObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsFunctionObject - extends RunStepDeltaStepDetailsToolCalls { - const factory RunStepDeltaStepDetailsToolCallsFunctionObject( - {required final int index, - @JsonKey(includeIfNull: false) final String? id, +abstract class RunStepDetailsToolCallsFunctionObject + extends RunStepDetailsToolCalls { + const factory RunStepDetailsToolCallsFunctionObject( + {required final String id, required final String type, - @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsFunction? function}) = - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl; - const RunStepDeltaStepDetailsToolCallsFunctionObject._() : super._(); + required final RunStepDetailsToolCallsFunction function}) = + _$RunStepDetailsToolCallsFunctionObjectImpl; + const RunStepDetailsToolCallsFunctionObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson( + factory RunStepDetailsToolCallsFunctionObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson; + _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson; - /// The index of the tool call in the tool calls array. @override - int get index; /// The ID of the tool call object. + String get id; @override - @JsonKey(includeIfNull: false) - String? get id; /// Always `function`. - @override String get type; /// The definition of the function that was called. - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? get function; - - /// Create a copy of RunStepDeltaStepDetailsToolCalls - /// with the given fields replaced by the non-null parameter values. + RunStepDetailsToolCallsFunction get function; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDetailsToolCallsFunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDeltaStepDetailsToolCallsFunction - _$RunStepDeltaStepDetailsToolCallsFunctionFromJson( - Map json) { - return _RunStepDeltaStepDetailsToolCallsFunction.fromJson(json); +RunStepDetailsToolCallsFunction _$RunStepDetailsToolCallsFunctionFromJson( + Map json) { + return _RunStepDetailsToolCallsFunction.fromJson(json); } /// @nodoc -mixin _$RunStepDeltaStepDetailsToolCallsFunction { +mixin _$RunStepDetailsToolCallsFunction { /// The name of the function. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; + String get name => throw _privateConstructorUsedError; /// The arguments passed to the function. - @JsonKey(includeIfNull: false) - String? get arguments => throw _privateConstructorUsedError; + String get arguments => throw _privateConstructorUsedError; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; - /// Serializes this RunStepDeltaStepDetailsToolCallsFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDeltaStepDetailsToolCallsFunctionCopyWith< - RunStepDeltaStepDetailsToolCallsFunction> + @JsonKey(ignore: true) + $RunStepDetailsToolCallsFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { - factory $RunStepDeltaStepDetailsToolCallsFunctionCopyWith( - RunStepDeltaStepDetailsToolCallsFunction value, - $Res Function(RunStepDeltaStepDetailsToolCallsFunction) then) = - _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, - RunStepDeltaStepDetailsToolCallsFunction>; +abstract class $RunStepDetailsToolCallsFunctionCopyWith<$Res> { + factory $RunStepDetailsToolCallsFunctionCopyWith( + RunStepDetailsToolCallsFunction value, + $Res Function(RunStepDetailsToolCallsFunction) then) = + _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, + RunStepDetailsToolCallsFunction>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(includeIfNull: false) String? arguments, - @JsonKey(includeIfNull: false) String? output}); + $Res call({String name, String arguments, String? output}); } /// @nodoc -class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, - $Val extends RunStepDeltaStepDetailsToolCallsFunction> - implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { - _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl( - this._value, this._then); +class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFunction> + implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { + _$RunStepDetailsToolCallsFunctionCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? arguments = freezed, + Object? name = null, + Object? arguments = null, Object? output = freezed, }) { return _then(_value.copyWith( - name: freezed == name + name: null == name ? _value.name : name // ignore: cast_nullable_to_non_nullable - as String?, - arguments: freezed == arguments + as String, + arguments: null == arguments ? _value.arguments : arguments // ignore: cast_nullable_to_non_nullable - as String?, + as String, output: freezed == output ? _value.output : output // ignore: cast_nullable_to_non_nullable @@ -67881,48 +58958,43 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> - implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsFunctionImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> + implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFunctionImplCopyWith( + _$RunStepDetailsToolCallsFunctionImpl value, + $Res Function(_$RunStepDetailsToolCallsFunctionImpl) then) = + __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(includeIfNull: false) String? arguments, - @JsonKey(includeIfNull: false) String? output}); + $Res call({String name, String arguments, String? output}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsFunctionImpl> - implements _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsFunctionImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) _then) +class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFunctionImpl> + implements _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl( + _$RunStepDetailsToolCallsFunctionImpl _value, + $Res Function(_$RunStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? arguments = freezed, + Object? name = null, + Object? arguments = null, Object? output = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsFunctionImpl( - name: freezed == name + return _then(_$RunStepDetailsToolCallsFunctionImpl( + name: null == name ? _value.name : name // ignore: cast_nullable_to_non_nullable - as String?, - arguments: freezed == arguments + as String, + arguments: null == arguments ? _value.arguments : arguments // ignore: cast_nullable_to_non_nullable - as String?, + as String, output: freezed == output ? _value.output : output // ignore: cast_nullable_to_non_nullable @@ -67933,959 +59005,826 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsFunctionImpl - extends _RunStepDeltaStepDetailsToolCallsFunction { - const _$RunStepDeltaStepDetailsToolCallsFunctionImpl( - {@JsonKey(includeIfNull: false) this.name, - @JsonKey(includeIfNull: false) this.arguments, - @JsonKey(includeIfNull: false) this.output}) +class _$RunStepDetailsToolCallsFunctionImpl + extends _RunStepDetailsToolCallsFunction { + const _$RunStepDetailsToolCallsFunctionImpl( + {required this.name, required this.arguments, required this.output}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson( + factory _$RunStepDetailsToolCallsFunctionImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsFunctionImplFromJson(json); + _$$RunStepDetailsToolCallsFunctionImplFromJson(json); /// The name of the function. @override - @JsonKey(includeIfNull: false) - final String? name; + final String name; /// The arguments passed to the function. @override - @JsonKey(includeIfNull: false) - final String? arguments; + final String arguments; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. @override - @JsonKey(includeIfNull: false) final String? output; @override String toString() { - return 'RunStepDeltaStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; + return 'RunStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsFunctionImpl && + other is _$RunStepDetailsToolCallsFunctionImpl && (identical(other.name, name) || other.name == name) && (identical(other.arguments, arguments) || other.arguments == arguments) && (identical(other.output, output) || other.output == output)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, name, arguments, output); - /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsFunctionImpl>(this, _$identity); + _$$RunStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDetailsToolCallsFunctionImpl> + get copyWith => __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl< + _$RunStepDetailsToolCallsFunctionImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsFunctionImplToJson( + return _$$RunStepDetailsToolCallsFunctionImplToJson( this, ); } } -abstract class _RunStepDeltaStepDetailsToolCallsFunction - extends RunStepDeltaStepDetailsToolCallsFunction { - const factory _RunStepDeltaStepDetailsToolCallsFunction( - {@JsonKey(includeIfNull: false) final String? name, - @JsonKey(includeIfNull: false) final String? arguments, - @JsonKey(includeIfNull: false) final String? output}) = - _$RunStepDeltaStepDetailsToolCallsFunctionImpl; - const _RunStepDeltaStepDetailsToolCallsFunction._() : super._(); +abstract class _RunStepDetailsToolCallsFunction + extends RunStepDetailsToolCallsFunction { + const factory _RunStepDetailsToolCallsFunction( + {required final String name, + required final String arguments, + required final String? output}) = _$RunStepDetailsToolCallsFunctionImpl; + const _RunStepDetailsToolCallsFunction._() : super._(); - factory _RunStepDeltaStepDetailsToolCallsFunction.fromJson( - Map json) = - _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson; + factory _RunStepDetailsToolCallsFunction.fromJson(Map json) = + _$RunStepDetailsToolCallsFunctionImpl.fromJson; + + @override /// The name of the function. + String get name; @override - @JsonKey(includeIfNull: false) - String? get name; /// The arguments passed to the function. + String get arguments; @override - @JsonKey(includeIfNull: false) - String? get arguments; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - @override - @JsonKey(includeIfNull: false) String? get output; - - /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCallsCodeOutput _$RunStepDetailsToolCallsCodeOutputFromJson( +RunStepDeltaStepDetailsToolCalls _$RunStepDeltaStepDetailsToolCallsFromJson( Map json) { switch (json['type']) { - case 'logs': - return RunStepDetailsToolCallsCodeOutputLogsObject.fromJson(json); - case 'image': - return RunStepDetailsToolCallsCodeOutputImageObject.fromJson(json); + case 'code_interpreter': + return RunStepDeltaStepDetailsToolCallsCodeObject.fromJson(json); + case 'file_search': + return RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson(json); + case 'function': + return RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'RunStepDetailsToolCallsCodeOutput', + 'RunStepDeltaStepDetailsToolCalls', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDetailsToolCallsCodeOutput { - /// Always `logs`. +mixin _$RunStepDeltaStepDetailsToolCalls { + /// The index of the tool call in the tool calls array. + int get index => throw _privateConstructorUsedError; + + /// The ID of the tool call. + @JsonKey(includeIfNull: false) + String? get id => throw _privateConstructorUsedError; + + /// Always `code_interpreter`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(String type, String logs) logs, required TResult Function( - String type, RunStepDetailsToolCallsCodeOutputImage image) - image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String type, String logs)? logs, - TResult? Function( - String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String type, String logs)? logs, - TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) - logs, + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) + codeInterpreter, required TResult Function( - RunStepDetailsToolCallsCodeOutputImageObject value) - image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? - image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, - required TResult orElse(), + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch) + fileSearch, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function) + function, }) => throw _privateConstructorUsedError; - - /// Serializes this RunStepDetailsToolCallsCodeOutput to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDetailsToolCallsCodeOutputCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory $RunStepDetailsToolCallsCodeOutputCopyWith( - RunStepDetailsToolCallsCodeOutput value, - $Res Function(RunStepDetailsToolCallsCodeOutput) then) = - _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - RunStepDetailsToolCallsCodeOutput>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsCodeOutput> - implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - _$RunStepDetailsToolCallsCodeOutputCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) - then) = - __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String type, String logs}); -} - -/// @nodoc -class __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> - implements - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) - : super(_value, _then); - - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - Object? logs = null, - }) { - return _then(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - logs: null == logs - ? _value.logs - : logs // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl - extends RunStepDetailsToolCallsCodeOutputLogsObject { - const _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( - {required this.type, required this.logs}) - : super._(); - - factory _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( - Map json) => - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); - - /// Always `logs`. - @override - final String type; - - /// The text output from the Code Interpreter tool call. - @override - final String logs; - - @override - String toString() { - return 'RunStepDetailsToolCallsCodeOutput.logs(type: $type, logs: $logs)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.logs, logs) || other.logs == logs)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type, logs); - - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> - get copyWith => - __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl>( - this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String type, String logs) logs, - required TResult Function( - String type, RunStepDetailsToolCallsCodeOutputImage image) - image, - }) { - return logs(type, this.logs); - } - - @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, String logs)? logs, TResult? Function( - String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, - }) { - return logs?.call(type, this.logs); - } - - @override + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, String logs)? logs, - TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, required TResult orElse(), - }) { - if (logs != null) { - return logs(type, this.logs); - } - return orElse(); - } - - @override + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) - logs, + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + codeInterpreter, required TResult Function( - RunStepDetailsToolCallsCodeOutputImageObject value) - image, - }) { - return logs(this); - } - - @override + RunStepDeltaStepDetailsToolCallsFileSearchObject value) + fileSearch, + required TResult Function( + RunStepDeltaStepDetailsToolCallsFunctionObject value) + function, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? - image, - }) { - return logs?.call(this); - } - - @override + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), - }) { - if (logs != null) { - return logs(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplToJson( - this, - ); - } + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $RunStepDeltaStepDetailsToolCallsCopyWith + get copyWith => throw _privateConstructorUsedError; } -abstract class RunStepDetailsToolCallsCodeOutputLogsObject - extends RunStepDetailsToolCallsCodeOutput { - const factory RunStepDetailsToolCallsCodeOutputLogsObject( - {required final String type, required final String logs}) = - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl; - const RunStepDetailsToolCallsCodeOutputLogsObject._() : super._(); - - factory RunStepDetailsToolCallsCodeOutputLogsObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; +/// @nodoc +abstract class $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory $RunStepDeltaStepDetailsToolCallsCopyWith( + RunStepDeltaStepDetailsToolCalls value, + $Res Function(RunStepDeltaStepDetailsToolCalls) then) = + _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + RunStepDeltaStepDetailsToolCalls>; + @useResult + $Res call( + {int index, @JsonKey(includeIfNull: false) String? id, String type}); +} - /// Always `logs`. - @override - String get type; +/// @nodoc +class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + $Val extends RunStepDeltaStepDetailsToolCalls> + implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + _$RunStepDeltaStepDetailsToolCallsCopyWithImpl(this._value, this._then); - /// The text output from the Code Interpreter tool call. - String get logs; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? index = null, + Object? id = freezed, + Object? type = null, + }) { + return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) +abstract class _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> + implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) then) = - __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res>; + __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, RunStepDetailsToolCallsCodeOutputImage image}); + $Res call( + {int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter}); - $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image; + $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? + get codeInterpreter; } /// @nodoc -class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> - implements - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> + implements _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, + Object? id = freezed, Object? type = null, - Object? image = null, + Object? codeInterpreter = freezed, }) { - return _then(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - image: null == image - ? _value.image - : image // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsCodeOutputImage, + codeInterpreter: freezed == codeInterpreter + ? _value.codeInterpreter + : codeInterpreter // ignore: cast_nullable_to_non_nullable + as RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter?, )); } - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image { - return $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res>(_value.image, - (value) { - return _then(_value.copyWith(image: value)); + $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? + get codeInterpreter { + if (_value.codeInterpreter == null) { + return null; + } + + return $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< + $Res>(_value.codeInterpreter!, (value) { + return _then(_value.copyWith(codeInterpreter: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl - extends RunStepDetailsToolCallsCodeOutputImageObject { - const _$RunStepDetailsToolCallsCodeOutputImageObjectImpl( - {required this.type, required this.image}) +class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl + extends RunStepDeltaStepDetailsToolCallsCodeObject { + const _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( + {required this.index, + @JsonKey(includeIfNull: false) this.id, + required this.type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + this.codeInterpreter}) : super._(); - factory _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson(json); - /// Always `image`. + /// The index of the tool call in the tool calls array. + @override + final int index; + + /// The ID of the tool call. + @override + @JsonKey(includeIfNull: false) + final String? id; + + /// Always `code_interpreter`. @override final String type; - /// Code interpreter image output. + /// The Code Interpreter tool call definition. - outputs @override - final RunStepDetailsToolCallsCodeOutputImage image; + @JsonKey(name: 'code_interpreter', includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter; @override String toString() { - return 'RunStepDetailsToolCallsCodeOutput.image(type: $type, image: $image)'; + return 'RunStepDeltaStepDetailsToolCalls.codeInterpreter(index: $index, id: $id, type: $type, codeInterpreter: $codeInterpreter)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsCodeOutputImageObjectImpl && + other is _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - (identical(other.image, image) || other.image == image)); + (identical(other.codeInterpreter, codeInterpreter) || + other.codeInterpreter == codeInterpreter)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, image); + int get hashCode => + Object.hash(runtimeType, index, id, type, codeInterpreter); - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> + _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> get copyWith => - __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl>( + __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, String logs) logs, required TResult Function( - String type, RunStepDetailsToolCallsCodeOutputImage image) - image, + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) + codeInterpreter, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch) + fileSearch, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function) + function, }) { - return image(type, this.image); + return codeInterpreter(index, id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, String logs)? logs, TResult? Function( - String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, }) { - return image?.call(type, this.image); + return codeInterpreter?.call(index, id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, String logs)? logs, - TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, - required TResult orElse(), - }) { - if (image != null) { - return image(type, this.image); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDetailsToolCallsCodeOutputImageObject value) - image, - }) { - return image(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? - image, - }) { - return image?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, - required TResult orElse(), - }) { - if (image != null) { - return image(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$RunStepDetailsToolCallsCodeOutputImageObjectImplToJson( - this, - ); - } -} - -abstract class RunStepDetailsToolCallsCodeOutputImageObject - extends RunStepDetailsToolCallsCodeOutput { - const factory RunStepDetailsToolCallsCodeOutputImageObject( - {required final String type, - required final RunStepDetailsToolCallsCodeOutputImage image}) = - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl; - const RunStepDetailsToolCallsCodeOutputImageObject._() : super._(); - - factory RunStepDetailsToolCallsCodeOutputImageObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - - /// Always `image`. - @override - String get type; - - /// Code interpreter image output. - RunStepDetailsToolCallsCodeOutputImage get image; - - /// Create a copy of RunStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -RunStepDeltaStepDetailsToolCallsCodeOutput - _$RunStepDeltaStepDetailsToolCallsCodeOutputFromJson( - Map json) { - switch (json['type']) { - case 'logs': - return RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( - json); - case 'image': - return RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( - json); - - default: - throw CheckedFromJsonException( - json, - 'type', - 'RunStepDeltaStepDetailsToolCallsCodeOutput', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$RunStepDeltaStepDetailsToolCallsCodeOutput { - /// The index of the output in the outputs array. - int get index => throw _privateConstructorUsedError; - - /// Always `logs`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function( - int index, String type, @JsonKey(includeIfNull: false) String? logs) - logs, - required TResult Function( + TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) - image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult? Function( + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (codeInterpreter != null) { + return codeInterpreter(index, id, type, this.codeInterpreter); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + codeInterpreter, required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) - logs, + RunStepDeltaStepDetailsToolCallsFileSearchObject value) + fileSearch, required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) - image, - }) => - throw _privateConstructorUsedError; + RunStepDeltaStepDetailsToolCallsFunctionObject value) + function, + }) { + return codeInterpreter(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, - }) => - throw _privateConstructorUsedError; + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, + }) { + return codeInterpreter?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - - /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutput to a JSON map. - Map toJson() => throw _privateConstructorUsedError; + }) { + if (codeInterpreter != null) { + return codeInterpreter(this); + } + return orElse(); + } - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith< - RunStepDeltaStepDetailsToolCallsCodeOutput> - get copyWith => throw _privateConstructorUsedError; + @override + Map toJson() { + return _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplToJson( + this, + ); + } } -/// @nodoc -abstract class $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith( - RunStepDeltaStepDetailsToolCallsCodeOutput value, - $Res Function(RunStepDeltaStepDetailsToolCallsCodeOutput) then) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - RunStepDeltaStepDetailsToolCallsCodeOutput>; - @useResult - $Res call({int index, String type}); -} +abstract class RunStepDeltaStepDetailsToolCallsCodeObject + extends RunStepDeltaStepDetailsToolCalls { + const factory RunStepDeltaStepDetailsToolCallsCodeObject( + {required final int index, + @JsonKey(includeIfNull: false) final String? id, + required final String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter}) = _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl; + const RunStepDeltaStepDetailsToolCallsCodeObject._() : super._(); -/// @nodoc -class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - $Val extends RunStepDeltaStepDetailsToolCallsCodeOutput> - implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl( - this._value, this._then); + factory RunStepDeltaStepDetailsToolCallsCodeObject.fromJson( + Map json) = + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + @override - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') + /// The index of the tool call in the tool calls array. + int get index; @override - $Res call({ - Object? index = null, - Object? type = null, - }) { - return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + + /// The ID of the tool call. + @JsonKey(includeIfNull: false) + String? get id; + @override + + /// Always `code_interpreter`. + String get type; + + /// The Code Interpreter tool call definition. - outputs + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + get codeInterpreter; + @override + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl value, - $Res Function( - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) +abstract class _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< $Res>; @override @useResult $Res call( - {int index, String type, @JsonKey(includeIfNull: false) String? logs}); + {int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - $Res> - extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> +class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> implements - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - $Res> { - __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? index = null, + Object? id = freezed, Object? type = null, - Object? logs = freezed, + Object? fileSearch = null, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( index: null == index ? _value.index : index // ignore: cast_nullable_to_non_nullable as int, + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - logs: freezed == logs - ? _value.logs - : logs // ignore: cast_nullable_to_non_nullable - as String?, + fileSearch: null == fileSearch + ? _value._fileSearch + : fileSearch // ignore: cast_nullable_to_non_nullable + as Map, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl - extends RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject { - const _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( +class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl + extends RunStepDeltaStepDetailsToolCallsFileSearchObject { + const _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( {required this.index, + @JsonKey(includeIfNull: false) this.id, required this.type, - @JsonKey(includeIfNull: false) this.logs}) - : super._(); + @JsonKey(name: 'file_search') + required final Map fileSearch}) + : _fileSearch = fileSearch, + super._(); - factory _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson(json); - /// The index of the output in the outputs array. + /// The index of the tool call in the tool calls array. @override final int index; - /// Always `logs`. + /// The ID of the tool call object. + @override + @JsonKey(includeIfNull: false) + final String? id; + + /// The type of tool call. This is always going to be `file_search` for this type of tool call. @override final String type; - /// The text output from the Code Interpreter tool call. + /// For now, this is always going to be an empty object. + final Map _fileSearch; + + /// For now, this is always going to be an empty object. @override - @JsonKey(includeIfNull: false) - final String? logs; + @JsonKey(name: 'file_search') + Map get fileSearch { + if (_fileSearch is EqualUnmodifiableMapView) return _fileSearch; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_fileSearch); + } @override String toString() { - return 'RunStepDeltaStepDetailsToolCallsCodeOutput.logs(index: $index, type: $type, logs: $logs)'; + return 'RunStepDeltaStepDetailsToolCalls.fileSearch(index: $index, id: $id, type: $type, fileSearch: $fileSearch)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl && + other is _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl && (identical(other.index, index) || other.index == index) && + (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - (identical(other.logs, logs) || other.logs == logs)); + const DeepCollectionEquality() + .equals(other._fileSearch, _fileSearch)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type, logs); + int get hashCode => Object.hash(runtimeType, index, id, type, + const DeepCollectionEquality().hash(_fileSearch)); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl>( + __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, @JsonKey(includeIfNull: false) String? logs) - logs, + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) + codeInterpreter, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch) + fileSearch, required TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) - image, + RunStepDeltaStepDetailsToolCallsFunction? function) + function, }) { - return logs(index, type, this.logs); + return fileSearch(index, id, type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, TResult? Function( int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, }) { - return logs?.call(index, type, this.logs); + return fileSearch?.call(index, id, type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, required TResult orElse(), }) { - if (logs != null) { - return logs(index, type, this.logs); + if (fileSearch != null) { + return fileSearch(index, id, type, this.fileSearch); } return orElse(); } @@ -68893,272 +59832,316 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl @override @optionalTypeArgs TResult map({ + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + codeInterpreter, required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) - logs, + RunStepDeltaStepDetailsToolCallsFileSearchObject value) + fileSearch, required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) - image, + RunStepDeltaStepDetailsToolCallsFunctionObject value) + function, }) { - return logs(this); + return fileSearch(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, }) { - return logs?.call(this); + return fileSearch?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), }) { - if (logs != null) { - return logs(this); + if (fileSearch != null) { + return fileSearch(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject - extends RunStepDeltaStepDetailsToolCallsCodeOutput { - const factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject( +abstract class RunStepDeltaStepDetailsToolCallsFileSearchObject + extends RunStepDeltaStepDetailsToolCalls { + const factory RunStepDeltaStepDetailsToolCallsFileSearchObject( {required final int index, + @JsonKey(includeIfNull: false) final String? id, required final String type, - @JsonKey(includeIfNull: false) final String? logs}) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl; - const RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject._() : super._(); + @JsonKey(name: 'file_search') + required final Map fileSearch}) = + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl; + const RunStepDeltaStepDetailsToolCallsFileSearchObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson; - /// The index of the output in the outputs array. @override - int get index; - /// Always `logs`. + /// The index of the tool call in the tool calls array. + int get index; @override - String get type; - /// The text output from the Code Interpreter tool call. + /// The ID of the tool call object. @JsonKey(includeIfNull: false) - String? get logs; + String? get id; + @override - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. + /// The type of tool call. This is always going to be `file_search` for this type of tool call. + String get type; + + /// For now, this is always going to be an empty object. + @JsonKey(name: 'file_search') + Map get fileSearch; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl value, - $Res Function( - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) +abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - $Res>; + __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( {int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}); + RunStepDeltaStepDetailsToolCallsFunction? function}); - $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image; + $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function; } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - $Res> - extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> +class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> implements - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - $Res> { - __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) - _then) + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? index = null, + Object? id = freezed, Object? type = null, - Object? image = freezed, + Object? function = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( index: null == index ? _value.index : index // ignore: cast_nullable_to_non_nullable as int, + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - image: freezed == image - ? _value.image - : image // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsToolCallsCodeOutputImage?, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as RunStepDeltaStepDetailsToolCallsFunction?, )); } - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image { - if (_value.image == null) { + $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function { + if (_value.function == null) { return null; } - return $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>( - _value.image!, (value) { - return _then(_value.copyWith(image: value)); + return $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>( + _value.function!, (value) { + return _then(_value.copyWith(function: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl - extends RunStepDeltaStepDetailsToolCallsCodeOutputImageObject { - const _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( +class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl + extends RunStepDeltaStepDetailsToolCallsFunctionObject { + const _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( {required this.index, + @JsonKey(includeIfNull: false) this.id, required this.type, - @JsonKey(includeIfNull: false) this.image}) + @JsonKey(includeIfNull: false) this.function}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( - json); + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson(json); - /// The index of the output in the outputs array. + /// The index of the tool call in the tool calls array. @override final int index; - /// Always `image`. + /// The ID of the tool call object. + @override + @JsonKey(includeIfNull: false) + final String? id; + + /// Always `function`. @override final String type; - /// Code interpreter image output. + /// The definition of the function that was called. @override @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image; + final RunStepDeltaStepDetailsToolCallsFunction? function; @override String toString() { - return 'RunStepDeltaStepDetailsToolCallsCodeOutput.image(index: $index, type: $type, image: $image)'; + return 'RunStepDeltaStepDetailsToolCalls.function(index: $index, id: $id, type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl && + other is _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl && (identical(other.index, index) || other.index == index) && + (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - (identical(other.image, image) || other.image == image)); + (identical(other.function, function) || + other.function == function)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type, image); + int get hashCode => Object.hash(runtimeType, index, id, type, function); - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl>( + __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, @JsonKey(includeIfNull: false) String? logs) - logs, + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) + codeInterpreter, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch) + fileSearch, required TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) - image, + RunStepDeltaStepDetailsToolCallsFunction? function) + function, }) { - return image(index, type, this.image); + return function(index, id, type, this.function); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, TResult? Function( int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, }) { - return image?.call(index, type, this.image); + return function?.call(index, id, type, this.function); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, TResult Function( int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'file_search') Map fileSearch)? + fileSearch, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + RunStepDeltaStepDetailsToolCallsFunction? function)? + function, required TResult orElse(), }) { - if (image != null) { - return image(index, type, this.image); + if (function != null) { + return function(index, id, type, this.function); } return orElse(); } @@ -69166,184 +60149,418 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl @override @optionalTypeArgs TResult map({ + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) + codeInterpreter, required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) - logs, + RunStepDeltaStepDetailsToolCallsFileSearchObject value) + fileSearch, required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) - image, + RunStepDeltaStepDetailsToolCallsFunctionObject value) + function, }) { - return image(this); + return function(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, }) { - return image?.call(this); + return function?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), }) { - if (image != null) { - return image(this); + if (function != null) { + return function(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject - extends RunStepDeltaStepDetailsToolCallsCodeOutput { - const factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject( +abstract class RunStepDeltaStepDetailsToolCallsFunctionObject + extends RunStepDeltaStepDetailsToolCalls { + const factory RunStepDeltaStepDetailsToolCallsFunctionObject( {required final int index, + @JsonKey(includeIfNull: false) final String? id, required final String type, @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl; - const RunStepDeltaStepDetailsToolCallsCodeOutputImageObject._() : super._(); + final RunStepDeltaStepDetailsToolCallsFunction? function}) = + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl; + const RunStepDeltaStepDetailsToolCallsFunctionObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson; - /// The index of the output in the outputs array. @override + + /// The index of the tool call in the tool calls array. int get index; + @override - /// Always `image`. + /// The ID of the tool call object. + @JsonKey(includeIfNull: false) + String? get id; @override + + /// Always `function`. String get type; - /// Code interpreter image output. + /// The definition of the function that was called. @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? get image; + RunStepDeltaStepDetailsToolCallsFunction? get function; + @override + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDeltaStepDetailsToolCallsFunction + _$RunStepDeltaStepDetailsToolCallsFunctionFromJson( + Map json) { + return _RunStepDeltaStepDetailsToolCallsFunction.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDeltaStepDetailsToolCallsFunction { + /// The name of the function. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + /// The arguments passed to the function. + @JsonKey(includeIfNull: false) + String? get arguments => throw _privateConstructorUsedError; + + /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @JsonKey(includeIfNull: false) + String? get output => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $RunStepDeltaStepDetailsToolCallsFunctionCopyWith< + RunStepDeltaStepDetailsToolCallsFunction> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { + factory $RunStepDeltaStepDetailsToolCallsFunctionCopyWith( + RunStepDeltaStepDetailsToolCallsFunction value, + $Res Function(RunStepDeltaStepDetailsToolCallsFunction) then) = + _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, + RunStepDeltaStepDetailsToolCallsFunction>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(includeIfNull: false) String? arguments, + @JsonKey(includeIfNull: false) String? output}); +} + +/// @nodoc +class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, + $Val extends RunStepDeltaStepDetailsToolCallsFunction> + implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { + _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = freezed, + Object? arguments = freezed, + Object? output = freezed, + }) { + return _then(_value.copyWith( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + arguments: freezed == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as String?, + output: freezed == output + ? _value.output + : output // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} - /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput - /// with the given fields replaced by the non-null parameter values. +/// @nodoc +abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> + implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsFunctionImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) then) = + __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(includeIfNull: false) String? arguments, + @JsonKey(includeIfNull: false) String? output}); +} + +/// @nodoc +class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsFunctionImpl> + implements _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsFunctionImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = freezed, + Object? arguments = freezed, + Object? output = freezed, + }) { + return _then(_$RunStepDeltaStepDetailsToolCallsFunctionImpl( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + arguments: freezed == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as String?, + output: freezed == output + ? _value.output + : output // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDeltaStepDetailsToolCallsFunctionImpl + extends _RunStepDeltaStepDetailsToolCallsFunction { + const _$RunStepDeltaStepDetailsToolCallsFunctionImpl( + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(includeIfNull: false) this.arguments, + @JsonKey(includeIfNull: false) this.output}) + : super._(); + + factory _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson( + Map json) => + _$$RunStepDeltaStepDetailsToolCallsFunctionImplFromJson(json); + + /// The name of the function. + @override + @JsonKey(includeIfNull: false) + final String? name; + + /// The arguments passed to the function. + @override + @JsonKey(includeIfNull: false) + final String? arguments; + + /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @override + @JsonKey(includeIfNull: false) + final String? output; + + @override + String toString() { + return 'RunStepDeltaStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDeltaStepDetailsToolCallsFunctionImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.arguments, arguments) || + other.arguments == arguments) && + (identical(other.output, output) || other.output == output)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, arguments, output); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsFunctionImpl>(this, _$identity); + + @override + Map toJson() { + return _$$RunStepDeltaStepDetailsToolCallsFunctionImplToJson( + this, + ); + } +} + +abstract class _RunStepDeltaStepDetailsToolCallsFunction + extends RunStepDeltaStepDetailsToolCallsFunction { + const factory _RunStepDeltaStepDetailsToolCallsFunction( + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(includeIfNull: false) final String? arguments, + @JsonKey(includeIfNull: false) final String? output}) = + _$RunStepDeltaStepDetailsToolCallsFunctionImpl; + const _RunStepDeltaStepDetailsToolCallsFunction._() : super._(); + + factory _RunStepDeltaStepDetailsToolCallsFunction.fromJson( + Map json) = + _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson; + + @override + + /// The name of the function. + @JsonKey(includeIfNull: false) + String? get name; + @override + + /// The arguments passed to the function. + @JsonKey(includeIfNull: false) + String? get arguments; + @override + + /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @JsonKey(includeIfNull: false) + String? get output; + @override + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } -ChunkingStrategyRequestParam _$ChunkingStrategyRequestParamFromJson( +RunStepDetailsToolCallsCodeOutput _$RunStepDetailsToolCallsCodeOutputFromJson( Map json) { switch (json['type']) { - case 'auto': - return AutoChunkingStrategyRequestParam.fromJson(json); - case 'static': - return StaticChunkingStrategyRequestParam.fromJson(json); + case 'logs': + return RunStepDetailsToolCallsCodeOutputLogsObject.fromJson(json); + case 'image': + return RunStepDetailsToolCallsCodeOutputImageObject.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'ChunkingStrategyRequestParam', + 'RunStepDetailsToolCallsCodeOutput', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$ChunkingStrategyRequestParam { - /// Always `auto`. +mixin _$RunStepDetailsToolCallsCodeOutput { + /// Always `logs`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(String type) auto, - required TResult Function(String type, StaticChunkingStrategy static) - static, + required TResult Function(String type, String logs) logs, + required TResult Function( + String type, RunStepDetailsToolCallsCodeOutputImage image) + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? auto, - TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type, String logs)? logs, + TResult? Function( + String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? auto, - TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type, String logs)? logs, + TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(AutoChunkingStrategyRequestParam value) auto, - required TResult Function(StaticChunkingStrategyRequestParam value) static, + required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDetailsToolCallsCodeOutputImageObject value) + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AutoChunkingStrategyRequestParam value)? auto, - TResult? Function(StaticChunkingStrategyRequestParam value)? static, + TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(AutoChunkingStrategyRequestParam value)? auto, - TResult Function(StaticChunkingStrategyRequestParam value)? static, + TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChunkingStrategyRequestParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChunkingStrategyRequestParamCopyWith + @JsonKey(ignore: true) + $RunStepDetailsToolCallsCodeOutputCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ChunkingStrategyRequestParamCopyWith<$Res> { - factory $ChunkingStrategyRequestParamCopyWith( - ChunkingStrategyRequestParam value, - $Res Function(ChunkingStrategyRequestParam) then) = - _$ChunkingStrategyRequestParamCopyWithImpl<$Res, - ChunkingStrategyRequestParam>; +abstract class $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory $RunStepDetailsToolCallsCodeOutputCopyWith( + RunStepDetailsToolCallsCodeOutput value, + $Res Function(RunStepDetailsToolCallsCodeOutput) then) = + _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + RunStepDetailsToolCallsCodeOutput>; @useResult $Res call({String type}); } /// @nodoc -class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, - $Val extends ChunkingStrategyRequestParam> - implements $ChunkingStrategyRequestParamCopyWith<$Res> { - _$ChunkingStrategyRequestParamCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsCodeOutput> + implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + _$RunStepDetailsToolCallsCodeOutputCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -69359,113 +60576,128 @@ class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, } /// @nodoc -abstract class _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> - implements $ChunkingStrategyRequestParamCopyWith<$Res> { - factory _$$AutoChunkingStrategyRequestParamImplCopyWith( - _$AutoChunkingStrategyRequestParamImpl value, - $Res Function(_$AutoChunkingStrategyRequestParamImpl) then) = - __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) + then) = + __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type}); + $Res call({String type, String logs}); } /// @nodoc -class __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res> - extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, - _$AutoChunkingStrategyRequestParamImpl> - implements _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> { - __$$AutoChunkingStrategyRequestParamImplCopyWithImpl( - _$AutoChunkingStrategyRequestParamImpl _value, - $Res Function(_$AutoChunkingStrategyRequestParamImpl) _then) +class __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> + implements + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) : super(_value, _then); - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, + Object? logs = null, }) { - return _then(_$AutoChunkingStrategyRequestParamImpl( + return _then(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, + logs: null == logs + ? _value.logs + : logs // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$AutoChunkingStrategyRequestParamImpl - extends AutoChunkingStrategyRequestParam { - const _$AutoChunkingStrategyRequestParamImpl({required this.type}) +class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl + extends RunStepDetailsToolCallsCodeOutputLogsObject { + const _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( + {required this.type, required this.logs}) : super._(); - factory _$AutoChunkingStrategyRequestParamImpl.fromJson( + factory _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( Map json) => - _$$AutoChunkingStrategyRequestParamImplFromJson(json); + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); - /// Always `auto`. + /// Always `logs`. @override final String type; + /// The text output from the Code Interpreter tool call. + @override + final String logs; + @override String toString() { - return 'ChunkingStrategyRequestParam.auto(type: $type)'; + return 'RunStepDetailsToolCallsCodeOutput.logs(type: $type, logs: $logs)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AutoChunkingStrategyRequestParamImpl && - (identical(other.type, type) || other.type == type)); + other is _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, type, logs); - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$AutoChunkingStrategyRequestParamImplCopyWith< - _$AutoChunkingStrategyRequestParamImpl> - get copyWith => __$$AutoChunkingStrategyRequestParamImplCopyWithImpl< - _$AutoChunkingStrategyRequestParamImpl>(this, _$identity); + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) auto, - required TResult Function(String type, StaticChunkingStrategy static) - static, + required TResult Function(String type, String logs) logs, + required TResult Function( + String type, RunStepDetailsToolCallsCodeOutputImage image) + image, }) { - return auto(type); + return logs(type, this.logs); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? auto, - TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type, String logs)? logs, + TResult? Function( + String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, }) { - return auto?.call(type); + return logs?.call(type, this.logs); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? auto, - TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type, String logs)? logs, + TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, required TResult orElse(), }) { - if (auto != null) { - return auto(type); + if (logs != null) { + return logs(type, this.logs); } return orElse(); } @@ -69473,195 +60705,205 @@ class _$AutoChunkingStrategyRequestParamImpl @override @optionalTypeArgs TResult map({ - required TResult Function(AutoChunkingStrategyRequestParam value) auto, - required TResult Function(StaticChunkingStrategyRequestParam value) static, + required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return auto(this); + return logs(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AutoChunkingStrategyRequestParam value)? auto, - TResult? Function(StaticChunkingStrategyRequestParam value)? static, + TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? + image, }) { - return auto?.call(this); + return logs?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AutoChunkingStrategyRequestParam value)? auto, - TResult Function(StaticChunkingStrategyRequestParam value)? static, + TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, required TResult orElse(), }) { - if (auto != null) { - return auto(this); + if (logs != null) { + return logs(this); } return orElse(); } @override Map toJson() { - return _$$AutoChunkingStrategyRequestParamImplToJson( + return _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplToJson( this, ); } } -abstract class AutoChunkingStrategyRequestParam - extends ChunkingStrategyRequestParam { - const factory AutoChunkingStrategyRequestParam({required final String type}) = - _$AutoChunkingStrategyRequestParamImpl; - const AutoChunkingStrategyRequestParam._() : super._(); +abstract class RunStepDetailsToolCallsCodeOutputLogsObject + extends RunStepDetailsToolCallsCodeOutput { + const factory RunStepDetailsToolCallsCodeOutputLogsObject( + {required final String type, required final String logs}) = + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl; + const RunStepDetailsToolCallsCodeOutputLogsObject._() : super._(); - factory AutoChunkingStrategyRequestParam.fromJson(Map json) = - _$AutoChunkingStrategyRequestParamImpl.fromJson; + factory RunStepDetailsToolCallsCodeOutputLogsObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - /// Always `auto`. @override + + /// Always `logs`. String get type; - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. + /// The text output from the Code Interpreter tool call. + String get logs; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AutoChunkingStrategyRequestParamImplCopyWith< - _$AutoChunkingStrategyRequestParamImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> - implements $ChunkingStrategyRequestParamCopyWith<$Res> { - factory _$$StaticChunkingStrategyRequestParamImplCopyWith( - _$StaticChunkingStrategyRequestParamImpl value, - $Res Function(_$StaticChunkingStrategyRequestParamImpl) then) = - __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) + then) = + __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, StaticChunkingStrategy static}); + $Res call({String type, RunStepDetailsToolCallsCodeOutputImage image}); - $StaticChunkingStrategyCopyWith<$Res> get static; + $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image; } /// @nodoc -class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> - extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, - _$StaticChunkingStrategyRequestParamImpl> - implements _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> { - __$$StaticChunkingStrategyRequestParamImplCopyWithImpl( - _$StaticChunkingStrategyRequestParamImpl _value, - $Res Function(_$StaticChunkingStrategyRequestParamImpl) _then) +class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> + implements + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) _then) : super(_value, _then); - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? static = null, + Object? image = null, }) { - return _then(_$StaticChunkingStrategyRequestParamImpl( + return _then(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - static: null == static - ? _value.static - : static // ignore: cast_nullable_to_non_nullable - as StaticChunkingStrategy, + image: null == image + ? _value.image + : image // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsCodeOutputImage, )); } - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $StaticChunkingStrategyCopyWith<$Res> get static { - return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { - return _then(_value.copyWith(static: value)); + $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image { + return $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res>(_value.image, + (value) { + return _then(_value.copyWith(image: value)); }); } } /// @nodoc @JsonSerializable() -class _$StaticChunkingStrategyRequestParamImpl - extends StaticChunkingStrategyRequestParam { - const _$StaticChunkingStrategyRequestParamImpl( - {required this.type, required this.static}) +class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl + extends RunStepDetailsToolCallsCodeOutputImageObject { + const _$RunStepDetailsToolCallsCodeOutputImageObjectImpl( + {required this.type, required this.image}) : super._(); - factory _$StaticChunkingStrategyRequestParamImpl.fromJson( + factory _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( Map json) => - _$$StaticChunkingStrategyRequestParamImplFromJson(json); + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplFromJson(json); - /// Always `static`. + /// Always `image`. @override final String type; - /// Static chunking strategy + /// Code interpreter image output. @override - final StaticChunkingStrategy static; + final RunStepDetailsToolCallsCodeOutputImage image; @override String toString() { - return 'ChunkingStrategyRequestParam.static(type: $type, static: $static)'; + return 'RunStepDetailsToolCallsCodeOutput.image(type: $type, image: $image)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$StaticChunkingStrategyRequestParamImpl && + other is _$RunStepDetailsToolCallsCodeOutputImageObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.static, static) || other.static == static)); + (identical(other.image, image) || other.image == image)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, static); + int get hashCode => Object.hash(runtimeType, type, image); - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$StaticChunkingStrategyRequestParamImplCopyWith< - _$StaticChunkingStrategyRequestParamImpl> - get copyWith => __$$StaticChunkingStrategyRequestParamImplCopyWithImpl< - _$StaticChunkingStrategyRequestParamImpl>(this, _$identity); + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) auto, - required TResult Function(String type, StaticChunkingStrategy static) - static, + required TResult Function(String type, String logs) logs, + required TResult Function( + String type, RunStepDetailsToolCallsCodeOutputImage image) + image, }) { - return static(type, this.static); + return image(type, this.image); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? auto, - TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type, String logs)? logs, + TResult? Function( + String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, }) { - return static?.call(type, this.static); + return image?.call(type, this.image); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? auto, - TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type, String logs)? logs, + TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, required TResult orElse(), }) { - if (static != null) { - return static(type, this.static); + if (image != null) { + return image(type, this.image); } return orElse(); } @@ -69669,171 +60911,211 @@ class _$StaticChunkingStrategyRequestParamImpl @override @optionalTypeArgs TResult map({ - required TResult Function(AutoChunkingStrategyRequestParam value) auto, - required TResult Function(StaticChunkingStrategyRequestParam value) static, + required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return static(this); + return image(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AutoChunkingStrategyRequestParam value)? auto, - TResult? Function(StaticChunkingStrategyRequestParam value)? static, + TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? + image, }) { - return static?.call(this); + return image?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AutoChunkingStrategyRequestParam value)? auto, - TResult Function(StaticChunkingStrategyRequestParam value)? static, + TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, required TResult orElse(), }) { - if (static != null) { - return static(this); + if (image != null) { + return image(this); } return orElse(); } @override Map toJson() { - return _$$StaticChunkingStrategyRequestParamImplToJson( + return _$$RunStepDetailsToolCallsCodeOutputImageObjectImplToJson( this, ); } } -abstract class StaticChunkingStrategyRequestParam - extends ChunkingStrategyRequestParam { - const factory StaticChunkingStrategyRequestParam( +abstract class RunStepDetailsToolCallsCodeOutputImageObject + extends RunStepDetailsToolCallsCodeOutput { + const factory RunStepDetailsToolCallsCodeOutputImageObject( {required final String type, - required final StaticChunkingStrategy static}) = - _$StaticChunkingStrategyRequestParamImpl; - const StaticChunkingStrategyRequestParam._() : super._(); + required final RunStepDetailsToolCallsCodeOutputImage image}) = + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl; + const RunStepDetailsToolCallsCodeOutputImageObject._() : super._(); - factory StaticChunkingStrategyRequestParam.fromJson( + factory RunStepDetailsToolCallsCodeOutputImageObject.fromJson( Map json) = - _$StaticChunkingStrategyRequestParamImpl.fromJson; + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - /// Always `static`. @override - String get type; - /// Static chunking strategy - StaticChunkingStrategy get static; + /// Always `image`. + String get type; - /// Create a copy of ChunkingStrategyRequestParam - /// with the given fields replaced by the non-null parameter values. + /// Code interpreter image output. + RunStepDetailsToolCallsCodeOutputImage get image; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$StaticChunkingStrategyRequestParamImplCopyWith< - _$StaticChunkingStrategyRequestParamImpl> + @JsonKey(ignore: true) + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; } -ChunkingStrategyResponseParam _$ChunkingStrategyResponseParamFromJson( - Map json) { +RunStepDeltaStepDetailsToolCallsCodeOutput + _$RunStepDeltaStepDetailsToolCallsCodeOutputFromJson( + Map json) { switch (json['type']) { - case 'static': - return StaticChunkingStrategyResponseParam.fromJson(json); - case 'other': - return OtherChunkingStrategyResponseParam.fromJson(json); + case 'logs': + return RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( + json); + case 'image': + return RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( + json); default: throw CheckedFromJsonException( json, 'type', - 'ChunkingStrategyResponseParam', + 'RunStepDeltaStepDetailsToolCallsCodeOutput', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$ChunkingStrategyResponseParam { - /// Always `static`. +mixin _$RunStepDeltaStepDetailsToolCallsCodeOutput { + /// The index of the output in the outputs array. + int get index => throw _privateConstructorUsedError; + + /// Always `logs`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(String type, StaticChunkingStrategy static) - static, - required TResult Function(String type) other, + required TResult Function( + int index, String type, @JsonKey(includeIfNull: false) String? logs) + logs, + required TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, StaticChunkingStrategy static)? static, - TResult? Function(String type)? other, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult? Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, StaticChunkingStrategy static)? static, - TResult Function(String type)? other, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(StaticChunkingStrategyResponseParam value) static, - required TResult Function(OtherChunkingStrategyResponseParam value) other, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(StaticChunkingStrategyResponseParam value)? static, - TResult? Function(OtherChunkingStrategyResponseParam value)? other, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(StaticChunkingStrategyResponseParam value)? static, - TResult Function(OtherChunkingStrategyResponseParam value)? other, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this ChunkingStrategyResponseParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChunkingStrategyResponseParamCopyWith + @JsonKey(ignore: true) + $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith< + RunStepDeltaStepDetailsToolCallsCodeOutput> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ChunkingStrategyResponseParamCopyWith<$Res> { - factory $ChunkingStrategyResponseParamCopyWith( - ChunkingStrategyResponseParam value, - $Res Function(ChunkingStrategyResponseParam) then) = - _$ChunkingStrategyResponseParamCopyWithImpl<$Res, - ChunkingStrategyResponseParam>; +abstract class $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith( + RunStepDeltaStepDetailsToolCallsCodeOutput value, + $Res Function(RunStepDeltaStepDetailsToolCallsCodeOutput) then) = + _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + RunStepDeltaStepDetailsToolCallsCodeOutput>; @useResult - $Res call({String type}); + $Res call({int index, String type}); } /// @nodoc -class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, - $Val extends ChunkingStrategyResponseParam> - implements $ChunkingStrategyResponseParamCopyWith<$Res> { - _$ChunkingStrategyResponseParamCopyWithImpl(this._value, this._then); +class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + $Val extends RunStepDeltaStepDetailsToolCallsCodeOutput> + implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl( + this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, }) { return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -69843,136 +61125,164 @@ class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, } /// @nodoc -abstract class _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> - implements $ChunkingStrategyResponseParamCopyWith<$Res> { - factory _$$StaticChunkingStrategyResponseParamImplCopyWith( - _$StaticChunkingStrategyResponseParamImpl value, - $Res Function(_$StaticChunkingStrategyResponseParamImpl) then) = - __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl value, + $Res Function( + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) + then) = + __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + $Res>; @override @useResult - $Res call({String type, StaticChunkingStrategy static}); - - $StaticChunkingStrategyCopyWith<$Res> get static; + $Res call( + {int index, String type, @JsonKey(includeIfNull: false) String? logs}); } /// @nodoc -class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> - extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, - _$StaticChunkingStrategyResponseParamImpl> - implements _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> { - __$$StaticChunkingStrategyResponseParamImplCopyWithImpl( - _$StaticChunkingStrategyResponseParamImpl _value, - $Res Function(_$StaticChunkingStrategyResponseParamImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + $Res> + extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> + implements + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + $Res> { + __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) + _then) : super(_value, _then); - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? static = null, + Object? logs = freezed, }) { - return _then(_$StaticChunkingStrategyResponseParamImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - static: null == static - ? _value.static - : static // ignore: cast_nullable_to_non_nullable - as StaticChunkingStrategy, + logs: freezed == logs + ? _value.logs + : logs // ignore: cast_nullable_to_non_nullable + as String?, )); } - - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $StaticChunkingStrategyCopyWith<$Res> get static { - return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { - return _then(_value.copyWith(static: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$StaticChunkingStrategyResponseParamImpl - extends StaticChunkingStrategyResponseParam { - const _$StaticChunkingStrategyResponseParamImpl( - {required this.type, required this.static}) +class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl + extends RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject { + const _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.logs}) : super._(); - factory _$StaticChunkingStrategyResponseParamImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( Map json) => - _$$StaticChunkingStrategyResponseParamImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); + + /// The index of the output in the outputs array. + @override + final int index; - /// Always `static`. + /// Always `logs`. @override final String type; - /// Static chunking strategy + /// The text output from the Code Interpreter tool call. @override - final StaticChunkingStrategy static; + @JsonKey(includeIfNull: false) + final String? logs; @override String toString() { - return 'ChunkingStrategyResponseParam.static(type: $type, static: $static)'; + return 'RunStepDeltaStepDetailsToolCallsCodeOutput.logs(index: $index, type: $type, logs: $logs)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$StaticChunkingStrategyResponseParamImpl && + other + is _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && - (identical(other.static, static) || other.static == static)); + (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type, static); + int get hashCode => Object.hash(runtimeType, index, type, logs); - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$StaticChunkingStrategyResponseParamImplCopyWith< - _$StaticChunkingStrategyResponseParamImpl> - get copyWith => __$$StaticChunkingStrategyResponseParamImplCopyWithImpl< - _$StaticChunkingStrategyResponseParamImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, StaticChunkingStrategy static) - static, - required TResult Function(String type) other, + required TResult Function( + int index, String type, @JsonKey(includeIfNull: false) String? logs) + logs, + required TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) + image, }) { - return static(type, this.static); + return logs(index, type, this.logs); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, StaticChunkingStrategy static)? static, - TResult? Function(String type)? other, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult? Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, }) { - return static?.call(type, this.static); + return logs?.call(index, type, this.logs); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, StaticChunkingStrategy static)? static, - TResult Function(String type)? other, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, required TResult orElse(), }) { - if (static != null) { - return static(type, this.static); + if (logs != null) { + return logs(index, type, this.logs); } return orElse(); } @@ -69980,178 +61290,264 @@ class _$StaticChunkingStrategyResponseParamImpl @override @optionalTypeArgs TResult map({ - required TResult Function(StaticChunkingStrategyResponseParam value) static, - required TResult Function(OtherChunkingStrategyResponseParam value) other, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return static(this); + return logs(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(StaticChunkingStrategyResponseParam value)? static, - TResult? Function(OtherChunkingStrategyResponseParam value)? other, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, }) { - return static?.call(this); + return logs?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(StaticChunkingStrategyResponseParam value)? static, - TResult Function(OtherChunkingStrategyResponseParam value)? other, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, required TResult orElse(), }) { - if (static != null) { - return static(this); + if (logs != null) { + return logs(this); } return orElse(); } @override Map toJson() { - return _$$StaticChunkingStrategyResponseParamImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplToJson( this, ); } } -abstract class StaticChunkingStrategyResponseParam - extends ChunkingStrategyResponseParam { - const factory StaticChunkingStrategyResponseParam( - {required final String type, - required final StaticChunkingStrategy static}) = - _$StaticChunkingStrategyResponseParamImpl; - const StaticChunkingStrategyResponseParam._() : super._(); +abstract class RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject + extends RunStepDeltaStepDetailsToolCallsCodeOutput { + const factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? logs}) = + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl; + const RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject._() : super._(); - factory StaticChunkingStrategyResponseParam.fromJson( + factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( Map json) = - _$StaticChunkingStrategyResponseParamImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - /// Always `static`. @override - String get type; - /// Static chunking strategy - StaticChunkingStrategy get static; + /// The index of the output in the outputs array. + int get index; + @override + + /// Always `logs`. + String get type; - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. + /// The text output from the Code Interpreter tool call. + @JsonKey(includeIfNull: false) + String? get logs; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$StaticChunkingStrategyResponseParamImplCopyWith< - _$StaticChunkingStrategyResponseParamImpl> + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> - implements $ChunkingStrategyResponseParamCopyWith<$Res> { - factory _$$OtherChunkingStrategyResponseParamImplCopyWith( - _$OtherChunkingStrategyResponseParamImpl value, - $Res Function(_$OtherChunkingStrategyResponseParamImpl) then) = - __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl value, + $Res Function( + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) + then) = + __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + $Res>; @override @useResult - $Res call({String type}); + $Res call( + {int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}); + + $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image; } /// @nodoc -class __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res> - extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, - _$OtherChunkingStrategyResponseParamImpl> - implements _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> { - __$$OtherChunkingStrategyResponseParamImplCopyWithImpl( - _$OtherChunkingStrategyResponseParamImpl _value, - $Res Function(_$OtherChunkingStrategyResponseParamImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + $Res> + extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> + implements + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + $Res> { + __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) + _then) : super(_value, _then); - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, + Object? image = freezed, }) { - return _then(_$OtherChunkingStrategyResponseParamImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, + image: freezed == image + ? _value.image + : image // ignore: cast_nullable_to_non_nullable + as RunStepDeltaStepDetailsToolCallsCodeOutputImage?, )); } + + @override + @pragma('vm:prefer-inline') + $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image { + if (_value.image == null) { + return null; + } + + return $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>( + _value.image!, (value) { + return _then(_value.copyWith(image: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$OtherChunkingStrategyResponseParamImpl - extends OtherChunkingStrategyResponseParam { - const _$OtherChunkingStrategyResponseParamImpl({required this.type}) +class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl + extends RunStepDeltaStepDetailsToolCallsCodeOutputImageObject { + const _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.image}) : super._(); - factory _$OtherChunkingStrategyResponseParamImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( Map json) => - _$$OtherChunkingStrategyResponseParamImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( + json); + + /// The index of the output in the outputs array. + @override + final int index; - /// Always `other`. + /// Always `image`. @override final String type; + /// Code interpreter image output. + @override + @JsonKey(includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image; + @override String toString() { - return 'ChunkingStrategyResponseParam.other(type: $type)'; + return 'RunStepDeltaStepDetailsToolCallsCodeOutput.image(index: $index, type: $type, image: $image)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$OtherChunkingStrategyResponseParamImpl && - (identical(other.type, type) || other.type == type)); + other + is _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.image, image) || other.image == image)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, index, type, image); - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$OtherChunkingStrategyResponseParamImplCopyWith< - _$OtherChunkingStrategyResponseParamImpl> - get copyWith => __$$OtherChunkingStrategyResponseParamImplCopyWithImpl< - _$OtherChunkingStrategyResponseParamImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, StaticChunkingStrategy static) - static, - required TResult Function(String type) other, + required TResult Function( + int index, String type, @JsonKey(includeIfNull: false) String? logs) + logs, + required TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) + image, }) { - return other(type); + return image(index, type, this.image); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, StaticChunkingStrategy static)? static, - TResult? Function(String type)? other, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult? Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, }) { - return other?.call(type); + return image?.call(index, type, this.image); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, StaticChunkingStrategy static)? static, - TResult Function(String type)? other, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, required TResult orElse(), }) { - if (other != null) { - return other(type); + if (image != null) { + return image(index, type, this.image); } return orElse(); } @@ -70159,62 +61555,84 @@ class _$OtherChunkingStrategyResponseParamImpl @override @optionalTypeArgs TResult map({ - required TResult Function(StaticChunkingStrategyResponseParam value) static, - required TResult Function(OtherChunkingStrategyResponseParam value) other, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return other(this); + return image(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(StaticChunkingStrategyResponseParam value)? static, - TResult? Function(OtherChunkingStrategyResponseParam value)? other, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, }) { - return other?.call(this); + return image?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(StaticChunkingStrategyResponseParam value)? static, - TResult Function(OtherChunkingStrategyResponseParam value)? other, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, required TResult orElse(), }) { - if (other != null) { - return other(this); + if (image != null) { + return image(this); } return orElse(); } @override Map toJson() { - return _$$OtherChunkingStrategyResponseParamImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplToJson( this, ); } } -abstract class OtherChunkingStrategyResponseParam - extends ChunkingStrategyResponseParam { - const factory OtherChunkingStrategyResponseParam( - {required final String type}) = _$OtherChunkingStrategyResponseParamImpl; - const OtherChunkingStrategyResponseParam._() : super._(); +abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject + extends RunStepDeltaStepDetailsToolCallsCodeOutput { + const factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}) = + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl; + const RunStepDeltaStepDetailsToolCallsCodeOutputImageObject._() : super._(); - factory OtherChunkingStrategyResponseParam.fromJson( + factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( Map json) = - _$OtherChunkingStrategyResponseParamImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; + + @override - /// Always `other`. + /// The index of the output in the outputs array. + int get index; @override + + /// Always `image`. String get type; - /// Create a copy of ChunkingStrategyResponseParam - /// with the given fields replaced by the non-null parameter values. + /// Code interpreter image output. + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? get image; @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$OtherChunkingStrategyResponseParamImplCopyWith< - _$OtherChunkingStrategyResponseParamImpl> + @JsonKey(ignore: true) + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -70335,13 +61753,8 @@ mixin _$AssistantStreamEvent { required TResult orElse(), }) => throw _privateConstructorUsedError; - - /// Serializes this AssistantStreamEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) $AssistantStreamEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -70366,8 +61779,6 @@ class _$AssistantStreamEventCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -70403,8 +61814,6 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> $Res Function(_$ThreadStreamEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -70423,8 +61832,6 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadObjectCopyWith<$Res> get data { @@ -70465,13 +61872,11 @@ class _$ThreadStreamEventImpl extends ThreadStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => @@ -70604,18 +62009,16 @@ abstract class ThreadStreamEvent extends AssistantStreamEvent { factory ThreadStreamEvent.fromJson(Map json) = _$ThreadStreamEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages). - @override ThreadObject get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -70641,8 +62044,6 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> _$RunStreamEventImpl _value, $Res Function(_$RunStreamEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -70661,8 +62062,6 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectCopyWith<$Res> get data { @@ -70703,13 +62102,11 @@ class _$RunStreamEventImpl extends RunStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => @@ -70842,18 +62239,16 @@ abstract class RunStreamEvent extends AssistantStreamEvent { factory RunStreamEvent.fromJson(Map json) = _$RunStreamEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). - @override RunObject get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -70879,8 +62274,6 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -70899,8 +62292,6 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepObjectCopyWith<$Res> get data { @@ -70941,13 +62332,11 @@ class _$RunStepStreamEventImpl extends RunStepStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => @@ -71080,18 +62469,16 @@ abstract class RunStepStreamEvent extends AssistantStreamEvent { factory RunStepStreamEvent.fromJson(Map json) = _$RunStepStreamEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents a step in execution of a run. - @override RunStepObject get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -71120,8 +62507,6 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamDeltaEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -71140,8 +62525,6 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaObjectCopyWith<$Res> get data { @@ -71182,13 +62565,11 @@ class _$RunStepStreamDeltaEventImpl extends RunStepStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> @@ -71321,18 +62702,16 @@ abstract class RunStepStreamDeltaEvent extends AssistantStreamEvent { factory RunStepStreamDeltaEvent.fromJson(Map json) = _$RunStepStreamDeltaEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents a run step delta i.e. any changed fields on a run step during streaming. - @override RunStepDeltaObject get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -71358,8 +62737,6 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -71378,8 +62755,6 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectCopyWith<$Res> get data { @@ -71420,13 +62795,11 @@ class _$MessageStreamEventImpl extends MessageStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => @@ -71559,18 +62932,16 @@ abstract class MessageStreamEvent extends AssistantStreamEvent { factory MessageStreamEvent.fromJson(Map json) = _$MessageStreamEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). - @override MessageObject get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -71599,8 +62970,6 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamDeltaEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -71619,8 +62988,6 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaObjectCopyWith<$Res> get data { @@ -71661,13 +63028,11 @@ class _$MessageStreamDeltaEventImpl extends MessageStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> @@ -71800,18 +63165,16 @@ abstract class MessageStreamDeltaEvent extends AssistantStreamEvent { factory MessageStreamDeltaEvent.fromJson(Map json) = _$MessageStreamDeltaEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents a message delta i.e. any changed fields on a message during streaming. - @override MessageDeltaObject get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -71837,8 +63200,6 @@ class __$$ErrorEventImplCopyWithImpl<$Res> _$ErrorEventImpl _value, $Res Function(_$ErrorEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -71857,8 +63218,6 @@ class __$$ErrorEventImplCopyWithImpl<$Res> )); } - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ErrorCopyWith<$Res> get data { @@ -71898,13 +63257,11 @@ class _$ErrorEventImpl extends ErrorEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => @@ -72036,18 +63393,16 @@ abstract class ErrorEvent extends AssistantStreamEvent { factory ErrorEvent.fromJson(Map json) = _$ErrorEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// Represents an error that occurred during an API request. - @override Error get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -72071,8 +63426,6 @@ class __$$DoneEventImplCopyWithImpl<$Res> _$DoneEventImpl _value, $Res Function(_$DoneEventImpl) _then) : super(_value, _then); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -72122,13 +63475,11 @@ class _$DoneEventImpl extends DoneEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, event, data); - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => @@ -72260,18 +63611,16 @@ abstract class DoneEvent extends AssistantStreamEvent { factory DoneEvent.fromJson(Map json) = _$DoneEventImpl.fromJson; - /// The type of the event. @override + + /// The type of the event. EventType get event; + @override /// No Description - @override String get data; - - /// Create a copy of AssistantStreamEvent - /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(includeFromJson: false, includeToJson: false) + @JsonKey(ignore: true) _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index c57effb3..03a49b59 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -13,17 +13,17 @@ _$CreateCompletionRequestImpl _$$CreateCompletionRequestImplFromJson( _$CreateCompletionRequestImpl( model: const _CompletionModelConverter().fromJson(json['model']), prompt: const _CompletionPromptConverter().fromJson(json['prompt']), - bestOf: (json['best_of'] as num?)?.toInt(), + bestOf: json['best_of'] as int?, echo: json['echo'] as bool? ?? false, frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, (e as num).toInt()), + (k, e) => MapEntry(k, e as int), ), - logprobs: (json['logprobs'] as num?)?.toInt(), - maxTokens: (json['max_tokens'] as num?)?.toInt() ?? 16, - n: (json['n'] as num?)?.toInt() ?? 1, + logprobs: json['logprobs'] as int?, + maxTokens: json['max_tokens'] as int? ?? 16, + n: json['n'] as int? ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, - seed: (json['seed'] as num?)?.toInt(), + seed: json['seed'] as int?, stop: const _CompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -113,8 +113,7 @@ _$CompletionPromptListListIntImpl _$$CompletionPromptListListIntImplFromJson( Map json) => _$CompletionPromptListListIntImpl( (json['value'] as List) - .map((e) => - (e as List).map((e) => (e as num).toInt()).toList()) + .map((e) => (e as List).map((e) => e as int).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -129,7 +128,7 @@ Map _$$CompletionPromptListListIntImplToJson( _$CompletionPromptListIntImpl _$$CompletionPromptListIntImplFromJson( Map json) => _$CompletionPromptListIntImpl( - (json['value'] as List).map((e) => (e as num).toInt()).toList(), + (json['value'] as List).map((e) => e as int).toList(), $type: json['runtimeType'] as String?, ); @@ -203,7 +202,7 @@ _$CreateCompletionResponseImpl _$$CreateCompletionResponseImplFromJson( choices: (json['choices'] as List) .map((e) => CompletionChoice.fromJson(e as Map)) .toList(), - created: (json['created'] as num).toInt(), + created: json['created'] as int, model: json['model'] as String, systemFingerprint: json['system_fingerprint'] as String?, object: @@ -244,7 +243,7 @@ _$CompletionChoiceImpl _$$CompletionChoiceImplFromJson( finishReason: $enumDecodeNullable( _$CompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: (json['index'] as num).toInt(), + index: json['index'] as int, logprobs: json['logprobs'] == null ? null : CompletionLogprobs.fromJson( @@ -271,7 +270,7 @@ _$CompletionLogprobsImpl _$$CompletionLogprobsImplFromJson( Map json) => _$CompletionLogprobsImpl( textOffset: (json['text_offset'] as List?) - ?.map((e) => (e as num).toInt()) + ?.map((e) => e as int) .toList(), tokenLogprobs: (json['token_logprobs'] as List?) ?.map((e) => (e as num?)?.toDouble()) @@ -311,22 +310,18 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, (e as num).toInt()), + (k, e) => MapEntry(k, e as int), ), logprobs: json['logprobs'] as bool?, - topLogprobs: (json['top_logprobs'] as num?)?.toInt(), - maxTokens: (json['max_tokens'] as num?)?.toInt(), - maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), - n: (json['n'] as num?)?.toInt() ?? 1, + topLogprobs: json['top_logprobs'] as int?, + maxTokens: json['max_tokens'] as int?, + n: json['n'] as int? ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null ? null - : ResponseFormat.fromJson( + : ChatCompletionResponseFormat.fromJson( json['response_format'] as Map), - seed: (json['seed'] as num?)?.toInt(), - serviceTier: $enumDecodeNullable( - _$CreateChatCompletionRequestServiceTierEnumMap, json['service_tier'], - unknownValue: JsonKey.nullForUndefinedEnumValue), + seed: json['seed'] as int?, stop: const _ChatCompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -340,7 +335,6 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), toolChoice: const _ChatCompletionToolChoiceOptionConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool?, user: json['user'] as String?, functionCall: const _ChatCompletionFunctionCallConverter() .fromJson(json['function_call']), @@ -367,13 +361,10 @@ Map _$$CreateChatCompletionRequestImplToJson( writeNotNull('logprobs', instance.logprobs); writeNotNull('top_logprobs', instance.topLogprobs); writeNotNull('max_tokens', instance.maxTokens); - writeNotNull('max_completion_tokens', instance.maxCompletionTokens); writeNotNull('n', instance.n); writeNotNull('presence_penalty', instance.presencePenalty); writeNotNull('response_format', instance.responseFormat?.toJson()); writeNotNull('seed', instance.seed); - writeNotNull('service_tier', - _$CreateChatCompletionRequestServiceTierEnumMap[instance.serviceTier]); writeNotNull( 'stop', const _ChatCompletionStopConverter().toJson(instance.stop)); writeNotNull('stream', instance.stream); @@ -385,7 +376,6 @@ Map _$$CreateChatCompletionRequestImplToJson( 'tool_choice', const _ChatCompletionToolChoiceOptionConverter() .toJson(instance.toolChoice)); - writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull('user', instance.user); writeNotNull( 'function_call', @@ -396,11 +386,6 @@ Map _$$CreateChatCompletionRequestImplToJson( return val; } -const _$CreateChatCompletionRequestServiceTierEnumMap = { - CreateChatCompletionRequestServiceTier.auto: 'auto', - CreateChatCompletionRequestServiceTier.vDefault: 'default', -}; - _$ChatCompletionModelEnumerationImpl _$$ChatCompletionModelEnumerationImplFromJson(Map json) => _$ChatCompletionModelEnumerationImpl( @@ -416,7 +401,6 @@ Map _$$ChatCompletionModelEnumerationImplToJson( }; const _$ChatCompletionModelsEnumMap = { - ChatCompletionModels.chatgpt4oLatest: 'chatgpt-4o-latest', ChatCompletionModels.gpt4: 'gpt-4', ChatCompletionModels.gpt432k: 'gpt-4-32k', ChatCompletionModels.gpt432k0314: 'gpt-4-32k-0314', @@ -431,9 +415,6 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt4VisionPreview: 'gpt-4-vision-preview', ChatCompletionModels.gpt4o: 'gpt-4o', ChatCompletionModels.gpt4o20240513: 'gpt-4o-2024-05-13', - ChatCompletionModels.gpt4o20240806: 'gpt-4o-2024-08-06', - ChatCompletionModels.gpt4oMini: 'gpt-4o-mini', - ChatCompletionModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', ChatCompletionModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ChatCompletionModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -441,10 +422,6 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ChatCompletionModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ChatCompletionModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', - ChatCompletionModels.o1Mini: 'o1-mini', - ChatCompletionModels.o1Mini20240912: 'o1-mini-2024-09-12', - ChatCompletionModels.o1Preview: 'o1-preview', - ChatCompletionModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ChatCompletionModelStringImpl _$$ChatCompletionModelStringImplFromJson( @@ -461,6 +438,25 @@ Map _$$ChatCompletionModelStringImplToJson( 'runtimeType': instance.$type, }; +_$ChatCompletionResponseFormatImpl _$$ChatCompletionResponseFormatImplFromJson( + Map json) => + _$ChatCompletionResponseFormatImpl( + type: $enumDecodeNullable( + _$ChatCompletionResponseFormatTypeEnumMap, json['type']) ?? + ChatCompletionResponseFormatType.text, + ); + +Map _$$ChatCompletionResponseFormatImplToJson( + _$ChatCompletionResponseFormatImpl instance) => + { + 'type': _$ChatCompletionResponseFormatTypeEnumMap[instance.type]!, + }; + +const _$ChatCompletionResponseFormatTypeEnumMap = { + ChatCompletionResponseFormatType.text: 'text', + ChatCompletionResponseFormatType.jsonObject: 'json_object', +}; + _$ChatCompletionStopListStringImpl _$$ChatCompletionStopListStringImplFromJson( Map json) => _$ChatCompletionStopListStringImpl( @@ -599,7 +595,6 @@ _$FunctionObjectImpl _$$FunctionObjectImplFromJson(Map json) => name: json['name'] as String, description: json['description'] as String?, parameters: json['parameters'] as Map?, - strict: json['strict'] as bool? ?? false, ); Map _$$FunctionObjectImplToJson( @@ -616,34 +611,6 @@ Map _$$FunctionObjectImplToJson( writeNotNull('description', instance.description); writeNotNull('parameters', instance.parameters); - writeNotNull('strict', instance.strict); - return val; -} - -_$JsonSchemaObjectImpl _$$JsonSchemaObjectImplFromJson( - Map json) => - _$JsonSchemaObjectImpl( - name: json['name'] as String, - description: json['description'] as String?, - schema: json['schema'] as Map, - strict: json['strict'] as bool? ?? false, - ); - -Map _$$JsonSchemaObjectImplToJson( - _$JsonSchemaObjectImpl instance) { - final val = { - 'name': instance.name, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('description', instance.description); - val['schema'] = instance.schema; - writeNotNull('strict', instance.strict); return val; } @@ -736,11 +703,8 @@ _$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( .map((e) => ChatCompletionResponseChoice.fromJson(e as Map)) .toList(), - created: (json['created'] as num).toInt(), + created: json['created'] as int, model: json['model'] as String, - serviceTier: $enumDecodeNullable( - _$ServiceTierEnumMap, json['service_tier'], - unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, object: json['object'] as String, usage: json['usage'] == null @@ -762,25 +726,19 @@ Map _$$CreateChatCompletionResponseImplToJson( val['choices'] = instance.choices.map((e) => e.toJson()).toList(); val['created'] = instance.created; val['model'] = instance.model; - writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); val['object'] = instance.object; writeNotNull('usage', instance.usage?.toJson()); return val; } -const _$ServiceTierEnumMap = { - ServiceTier.scale: 'scale', - ServiceTier.vDefault: 'default', -}; - _$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( Map json) => _$ChatCompletionResponseChoiceImpl( finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: (json['index'] as num?)?.toInt(), + index: json['index'] as int?, message: ChatCompletionAssistantMessage.fromJson( json['message'] as Map), logprobs: json['logprobs'] == null @@ -822,35 +780,20 @@ _$ChatCompletionLogprobsImpl _$$ChatCompletionLogprobsImplFromJson( ?.map((e) => ChatCompletionTokenLogprob.fromJson(e as Map)) .toList(), - refusal: (json['refusal'] as List?) - ?.map((e) => - ChatCompletionTokenLogprob.fromJson(e as Map)) - .toList(), ); Map _$$ChatCompletionLogprobsImplToJson( - _$ChatCompletionLogprobsImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); - writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); - return val; -} + _$ChatCompletionLogprobsImpl instance) => + { + 'content': instance.content?.map((e) => e.toJson()).toList(), + }; _$ChatCompletionTokenLogprobImpl _$$ChatCompletionTokenLogprobImplFromJson( Map json) => _$ChatCompletionTokenLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: (json['bytes'] as List?) - ?.map((e) => (e as num).toInt()) - .toList(), + bytes: (json['bytes'] as List?)?.map((e) => e as int).toList(), topLogprobs: (json['top_logprobs'] as List) .map((e) => ChatCompletionTokenTopLogprob.fromJson(e as Map)) @@ -871,9 +814,8 @@ _$ChatCompletionTokenTopLogprobImpl _$ChatCompletionTokenTopLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: (json['bytes'] as List?) - ?.map((e) => (e as num).toInt()) - .toList(), + bytes: + (json['bytes'] as List?)?.map((e) => e as int).toList(), ); Map _$$ChatCompletionTokenTopLogprobImplToJson( @@ -893,13 +835,10 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: (json['created'] as num?)?.toInt(), + created: json['created'] as int, model: json['model'] as String?, - serviceTier: $enumDecodeNullable( - _$ServiceTierEnumMap, json['service_tier'], - unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, - object: json['object'] as String?, + object: json['object'] as String, usage: json['usage'] == null ? null : CompletionUsage.fromJson(json['usage'] as Map), @@ -917,11 +856,10 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( writeNotNull('id', instance.id); val['choices'] = instance.choices.map((e) => e.toJson()).toList(); - writeNotNull('created', instance.created); + val['created'] = instance.created; writeNotNull('model', instance.model); - writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); - writeNotNull('object', instance.object); + val['object'] = instance.object; writeNotNull('usage', instance.usage?.toJson()); return val; } @@ -939,7 +877,7 @@ _$ChatCompletionStreamResponseChoiceImpl finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: (json['index'] as num?)?.toInt(), + index: json['index'] as int?, ); Map _$$ChatCompletionStreamResponseChoiceImplToJson( @@ -969,33 +907,19 @@ _$ChatCompletionStreamResponseChoiceLogprobsImpl ?.map((e) => ChatCompletionTokenLogprob.fromJson( e as Map)) .toList(), - refusal: (json['refusal'] as List?) - ?.map((e) => ChatCompletionTokenLogprob.fromJson( - e as Map)) - .toList(), ); Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( - _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); - writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); - return val; -} + _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) => + { + 'content': instance.content?.map((e) => e.toJson()).toList(), + }; _$ChatCompletionStreamResponseDeltaImpl _$$ChatCompletionStreamResponseDeltaImplFromJson( Map json) => _$ChatCompletionStreamResponseDeltaImpl( content: json['content'] as String?, - refusal: json['refusal'] as String?, functionCall: json['function_call'] == null ? null : ChatCompletionStreamMessageFunctionCall.fromJson( @@ -1020,7 +944,6 @@ Map _$$ChatCompletionStreamResponseDeltaImplToJson( } writeNotNull('content', instance.content); - writeNotNull('refusal', instance.refusal); writeNotNull('function_call', instance.functionCall?.toJson()); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -1063,7 +986,7 @@ _$ChatCompletionStreamMessageToolCallChunkImpl _$$ChatCompletionStreamMessageToolCallChunkImplFromJson( Map json) => _$ChatCompletionStreamMessageToolCallChunkImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, id: json['id'] as String?, type: $enumDecodeNullable( _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap, @@ -1101,53 +1024,18 @@ const _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap = { _$CompletionUsageImpl _$$CompletionUsageImplFromJson( Map json) => _$CompletionUsageImpl( - completionTokens: (json['completion_tokens'] as num?)?.toInt(), - promptTokens: (json['prompt_tokens'] as num).toInt(), - totalTokens: (json['total_tokens'] as num).toInt(), - completionTokensDetails: json['completion_tokens_details'] == null - ? null - : CompletionTokensDetails.fromJson( - json['completion_tokens_details'] as Map), + completionTokens: json['completion_tokens'] as int?, + promptTokens: json['prompt_tokens'] as int, + totalTokens: json['total_tokens'] as int, ); Map _$$CompletionUsageImplToJson( - _$CompletionUsageImpl instance) { - final val = { - 'completion_tokens': instance.completionTokens, - 'prompt_tokens': instance.promptTokens, - 'total_tokens': instance.totalTokens, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull( - 'completion_tokens_details', instance.completionTokensDetails?.toJson()); - return val; -} - -_$CompletionTokensDetailsImpl _$$CompletionTokensDetailsImplFromJson( - Map json) => - _$CompletionTokensDetailsImpl( - reasoningTokens: (json['reasoning_tokens'] as num?)?.toInt(), - ); - -Map _$$CompletionTokensDetailsImplToJson( - _$CompletionTokensDetailsImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('reasoning_tokens', instance.reasoningTokens); - return val; -} + _$CompletionUsageImpl instance) => + { + 'completion_tokens': instance.completionTokens, + 'prompt_tokens': instance.promptTokens, + 'total_tokens': instance.totalTokens, + }; _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( Map json) => @@ -1157,7 +1045,7 @@ _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( encodingFormat: $enumDecodeNullable( _$EmbeddingEncodingFormatEnumMap, json['encoding_format']) ?? EmbeddingEncodingFormat.float, - dimensions: (json['dimensions'] as num?)?.toInt(), + dimensions: json['dimensions'] as int?, user: json['user'] as String?, ); @@ -1224,8 +1112,7 @@ _$EmbeddingInputListListIntImpl _$$EmbeddingInputListListIntImplFromJson( Map json) => _$EmbeddingInputListListIntImpl( (json['value'] as List) - .map((e) => - (e as List).map((e) => (e as num).toInt()).toList()) + .map((e) => (e as List).map((e) => e as int).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -1240,7 +1127,7 @@ Map _$$EmbeddingInputListListIntImplToJson( _$EmbeddingInputListIntImpl _$$EmbeddingInputListIntImplFromJson( Map json) => _$EmbeddingInputListIntImpl( - (json['value'] as List).map((e) => (e as num).toInt()).toList(), + (json['value'] as List).map((e) => e as int).toList(), $type: json['runtimeType'] as String?, ); @@ -1317,7 +1204,7 @@ const _$CreateEmbeddingResponseObjectEnumMap = { _$EmbeddingImpl _$$EmbeddingImplFromJson(Map json) => _$EmbeddingImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, embedding: const _EmbeddingVectorConverter().fromJson(json['embedding']), object: $enumDecode(_$EmbeddingObjectEnumMap, json['object']), ); @@ -1365,8 +1252,8 @@ Map _$$EmbeddingVectorStringImplToJson( _$EmbeddingUsageImpl _$$EmbeddingUsageImplFromJson(Map json) => _$EmbeddingUsageImpl( - promptTokens: (json['prompt_tokens'] as num).toInt(), - totalTokens: (json['total_tokens'] as num).toInt(), + promptTokens: json['prompt_tokens'] as int, + totalTokens: json['total_tokens'] as int, ); Map _$$EmbeddingUsageImplToJson( @@ -1391,7 +1278,7 @@ _$CreateFineTuningJobRequestImpl _$$CreateFineTuningJobRequestImplFromJson( ?.map( (e) => FineTuningIntegration.fromJson(e as Map)) .toList(), - seed: (json['seed'] as num?)?.toInt(), + seed: json['seed'] as int?, ); Map _$$CreateFineTuningJobRequestImplToJson( @@ -1434,7 +1321,6 @@ const _$FineTuningModelsEnumMap = { FineTuningModels.babbage002: 'babbage-002', FineTuningModels.davinci002: 'davinci-002', FineTuningModels.gpt35Turbo: 'gpt-3.5-turbo', - FineTuningModels.gpt4oMini: 'gpt-4o-mini', }; _$FineTuningModelStringImpl _$$FineTuningModelStringImplFromJson( @@ -1454,12 +1340,12 @@ Map _$$FineTuningModelStringImplToJson( _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => _$FineTuningJobImpl( id: json['id'] as String, - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, error: json['error'] == null ? null : FineTuningJobError.fromJson(json['error'] as Map), fineTunedModel: json['fine_tuned_model'] as String?, - finishedAt: (json['finished_at'] as num?)?.toInt(), + finishedAt: json['finished_at'] as int?, hyperparameters: FineTuningJobHyperparameters.fromJson( json['hyperparameters'] as Map), model: json['model'] as String, @@ -1469,7 +1355,7 @@ _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => .map((e) => e as String) .toList(), status: $enumDecode(_$FineTuningJobStatusEnumMap, json['status']), - trainedTokens: (json['trained_tokens'] as num?)?.toInt(), + trainedTokens: json['trained_tokens'] as int?, trainingFile: json['training_file'] as String, validationFile: json['validation_file'] as String?, integrations: (json['integrations'] as List?) @@ -1615,7 +1501,7 @@ const _$FineTuningNEpochsOptionsEnumMap = { _$FineTuningNEpochsIntImpl _$$FineTuningNEpochsIntImplFromJson( Map json) => _$FineTuningNEpochsIntImpl( - (json['value'] as num).toInt(), + json['value'] as int, $type: json['runtimeType'] as String?, ); @@ -1719,7 +1605,7 @@ _$FineTuningJobEventImpl _$$FineTuningJobEventImplFromJson( Map json) => _$FineTuningJobEventImpl( id: json['id'] as String, - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, level: $enumDecode(_$FineTuningJobEventLevelEnumMap, json['level']), message: json['message'] as String, object: $enumDecode(_$FineTuningJobEventObjectEnumMap, json['object']), @@ -1749,9 +1635,9 @@ _$FineTuningJobCheckpointImpl _$$FineTuningJobCheckpointImplFromJson( Map json) => _$FineTuningJobCheckpointImpl( id: json['id'] as String, - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, fineTunedModelCheckpoint: json['fine_tuned_model_checkpoint'] as String, - stepNumber: (json['step_number'] as num).toInt(), + stepNumber: json['step_number'] as int, metrics: FineTuningJobCheckpointMetrics.fromJson( json['metrics'] as Map), fineTuningJobId: json['fine_tuning_job_id'] as String, @@ -1819,7 +1705,7 @@ _$CreateImageRequestImpl _$$CreateImageRequestImplFromJson( model: json['model'] == null ? const CreateImageRequestModelString('dall-e-2') : const _CreateImageRequestModelConverter().fromJson(json['model']), - n: (json['n'] as num?)?.toInt() ?? 1, + n: json['n'] as int? ?? 1, quality: $enumDecodeNullable(_$ImageQualityEnumMap, json['quality']) ?? ImageQuality.standard, responseFormat: $enumDecodeNullable( @@ -1918,7 +1804,7 @@ Map _$$CreateImageRequestModelStringImplToJson( _$ImagesResponseImpl _$$ImagesResponseImplFromJson(Map json) => _$ImagesResponseImpl( - created: (json['created'] as num).toInt(), + created: json['created'] as int, data: (json['data'] as List) .map((e) => Image.fromJson(e as Map)) .toList(), @@ -1954,7 +1840,7 @@ Map _$$ImageImplToJson(_$ImageImpl instance) { _$ModelImpl _$$ModelImplFromJson(Map json) => _$ModelImpl( id: json['id'] as String, - created: (json['created'] as num).toInt(), + created: json['created'] as int, object: $enumDecode(_$ModelObjectEnumMap, json['object']), ownedBy: json['owned_by'] as String, ); @@ -2196,7 +2082,7 @@ _$AssistantObjectImpl _$$AssistantObjectImplFromJson( _$AssistantObjectImpl( id: json['id'] as String, object: $enumDecode(_$AssistantObjectObjectEnumMap, json['object']), - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, name: json['name'] as String?, description: json['description'] as String?, model: json['model'] as String, @@ -2265,19 +2151,22 @@ Map _$$AssistantObjectResponseFormatEnumerationImplToJson( }; const _$AssistantResponseFormatModeEnumMap = { + AssistantResponseFormatMode.none: 'none', AssistantResponseFormatMode.auto: 'auto', }; -_$AssistantObjectResponseFormatResponseFormatImpl - _$$AssistantObjectResponseFormatResponseFormatImplFromJson( +_$AssistantObjectResponseFormatAssistantsResponseFormatImpl + _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( Map json) => - _$AssistantObjectResponseFormatResponseFormatImpl( - ResponseFormat.fromJson(json['value'] as Map), + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$AssistantObjectResponseFormatResponseFormatImplToJson( - _$AssistantObjectResponseFormatResponseFormatImpl instance) => +Map _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( + _$AssistantObjectResponseFormatAssistantsResponseFormatImpl instance) => { 'value': instance.value.toJson(), 'runtimeType': instance.$type, @@ -2347,7 +2236,6 @@ Map _$$AssistantModelEnumerationImplToJson( }; const _$AssistantModelsEnumMap = { - AssistantModels.chatgpt4oLatest: 'chatgpt-4o-latest', AssistantModels.gpt4: 'gpt-4', AssistantModels.gpt432k: 'gpt-4-32k', AssistantModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2362,9 +2250,6 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt4VisionPreview: 'gpt-4-vision-preview', AssistantModels.gpt4o: 'gpt-4o', AssistantModels.gpt4o20240513: 'gpt-4o-2024-05-13', - AssistantModels.gpt4o20240806: 'gpt-4o-2024-08-06', - AssistantModels.gpt4oMini: 'gpt-4o-mini', - AssistantModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', AssistantModels.gpt35Turbo: 'gpt-3.5-turbo', AssistantModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', AssistantModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2372,10 +2257,6 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', AssistantModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', AssistantModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', - AssistantModels.o1Mini: 'o1-mini', - AssistantModels.o1Mini20240912: 'o1-mini-2024-09-12', - AssistantModels.o1Preview: 'o1-preview', - AssistantModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$AssistantModelStringImpl _$$AssistantModelStringImplFromJson( @@ -2410,24 +2291,27 @@ Map }; const _$CreateAssistantResponseFormatModeEnumMap = { + CreateAssistantResponseFormatMode.none: 'none', CreateAssistantResponseFormatMode.auto: 'auto', }; -_$CreateAssistantRequestResponseFormatResponseFormatImpl - _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson( +_$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( Map json) => - _$CreateAssistantRequestResponseFormatResponseFormatImpl( - ResponseFormat.fromJson(json['value'] as Map), + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( - _$CreateAssistantRequestResponseFormatResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ModifyAssistantRequestImpl _$$ModifyAssistantRequestImplFromJson( Map json) => @@ -2500,24 +2384,27 @@ Map }; const _$ModifyAssistantResponseFormatModeEnumMap = { + ModifyAssistantResponseFormatMode.none: 'none', ModifyAssistantResponseFormatMode.auto: 'auto', }; -_$ModifyAssistantRequestResponseFormatResponseFormatImpl - _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson( +_$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( Map json) => - _$ModifyAssistantRequestResponseFormatResponseFormatImpl( - ResponseFormat.fromJson(json['value'] as Map), + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( - _$ModifyAssistantRequestResponseFormatResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$DeleteAssistantResponseImpl _$$DeleteAssistantResponseImplFromJson( Map json) => @@ -2562,34 +2449,6 @@ Map _$$ListAssistantsResponseImplToJson( 'has_more': instance.hasMore, }; -_$FileSearchRankingOptionsImpl _$$FileSearchRankingOptionsImplFromJson( - Map json) => - _$FileSearchRankingOptionsImpl( - ranker: $enumDecodeNullable(_$FileSearchRankerEnumMap, json['ranker'], - unknownValue: JsonKey.nullForUndefinedEnumValue), - scoreThreshold: (json['score_threshold'] as num).toDouble(), - ); - -Map _$$FileSearchRankingOptionsImplToJson( - _$FileSearchRankingOptionsImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('ranker', _$FileSearchRankerEnumMap[instance.ranker]); - val['score_threshold'] = instance.scoreThreshold; - return val; -} - -const _$FileSearchRankerEnumMap = { - FileSearchRanker.auto: 'auto', - FileSearchRanker.default20240821: 'default_2024_08_21', -}; - _$AssistantsNamedToolChoiceImpl _$$AssistantsNamedToolChoiceImplFromJson( Map json) => _$AssistantsNamedToolChoiceImpl( @@ -2634,11 +2493,30 @@ Map _$$AssistantsFunctionCallOptionImplToJson( 'name': instance.name, }; +_$AssistantsResponseFormatImpl _$$AssistantsResponseFormatImplFromJson( + Map json) => + _$AssistantsResponseFormatImpl( + type: $enumDecodeNullable( + _$AssistantsResponseFormatTypeEnumMap, json['type']) ?? + AssistantsResponseFormatType.text, + ); + +Map _$$AssistantsResponseFormatImplToJson( + _$AssistantsResponseFormatImpl instance) => + { + 'type': _$AssistantsResponseFormatTypeEnumMap[instance.type]!, + }; + +const _$AssistantsResponseFormatTypeEnumMap = { + AssistantsResponseFormatType.text: 'text', + AssistantsResponseFormatType.jsonObject: 'json_object', +}; + _$TruncationObjectImpl _$$TruncationObjectImplFromJson( Map json) => _$TruncationObjectImpl( type: $enumDecode(_$TruncationObjectTypeEnumMap, json['type']), - lastMessages: (json['last_messages'] as num?)?.toInt(), + lastMessages: json['last_messages'] as int?, ); Map _$$TruncationObjectImplToJson( @@ -2666,7 +2544,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => _$RunObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunObjectObjectEnumMap, json['object']), - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, threadId: json['thread_id'] as String, assistantId: json['assistant_id'] as String, status: $enumDecode(_$RunStatusEnumMap, json['status']), @@ -2677,11 +2555,11 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => lastError: json['last_error'] == null ? null : RunLastError.fromJson(json['last_error'] as Map), - expiresAt: (json['expires_at'] as num?)?.toInt(), - startedAt: (json['started_at'] as num?)?.toInt(), - cancelledAt: (json['cancelled_at'] as num?)?.toInt(), - failedAt: (json['failed_at'] as num?)?.toInt(), - completedAt: (json['completed_at'] as num?)?.toInt(), + expiresAt: json['expires_at'] as int?, + startedAt: json['started_at'] as int?, + cancelledAt: json['cancelled_at'] as int?, + failedAt: json['failed_at'] as int?, + completedAt: json['completed_at'] as int?, incompleteDetails: json['incomplete_details'] == null ? null : RunObjectIncompleteDetails.fromJson( @@ -2697,15 +2575,14 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => : RunCompletionUsage.fromJson(json['usage'] as Map), temperature: (json['temperature'] as num?)?.toDouble(), topP: (json['top_p'] as num?)?.toDouble(), - maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), - maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), + maxPromptTokens: json['max_prompt_tokens'] as int?, + maxCompletionTokens: json['max_completion_tokens'] as int?, truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( json['truncation_strategy'] as Map), toolChoice: const _RunObjectToolChoiceConverter().fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _RunObjectResponseFormatConverter() .fromJson(json['response_format']), ); @@ -2746,7 +2623,6 @@ Map _$$RunObjectImplToJson(_$RunObjectImpl instance) { val['truncation_strategy'] = instance.truncationStrategy?.toJson(); val['tool_choice'] = _$JsonConverterToJson( instance.toolChoice, const _RunObjectToolChoiceConverter().toJson); - val['parallel_tool_calls'] = instance.parallelToolCalls; val['response_format'] = const _RunObjectResponseFormatConverter().toJson(instance.responseFormat); return val; @@ -2885,23 +2761,26 @@ Map _$$RunObjectResponseFormatEnumerationImplToJson( }; const _$RunObjectResponseFormatModeEnumMap = { + RunObjectResponseFormatMode.none: 'none', RunObjectResponseFormatMode.auto: 'auto', }; -_$RunObjectResponseFormatResponseFormatImpl - _$$RunObjectResponseFormatResponseFormatImplFromJson( +_$RunObjectResponseFormatAssistantsResponseFormatImpl + _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson( Map json) => - _$RunObjectResponseFormatResponseFormatImpl( - ResponseFormat.fromJson(json['value'] as Map), + _$RunObjectResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$RunObjectResponseFormatResponseFormatImplToJson( - _$RunObjectResponseFormatResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map + _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( + _$RunObjectResponseFormatAssistantsResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$RunSubmitToolOutputsImpl _$$RunSubmitToolOutputsImplFromJson( Map json) => @@ -2920,9 +2799,9 @@ Map _$$RunSubmitToolOutputsImplToJson( _$RunCompletionUsageImpl _$$RunCompletionUsageImplFromJson( Map json) => _$RunCompletionUsageImpl( - completionTokens: (json['completion_tokens'] as num).toInt(), - promptTokens: (json['prompt_tokens'] as num).toInt(), - totalTokens: (json['total_tokens'] as num).toInt(), + completionTokens: json['completion_tokens'] as int, + promptTokens: json['prompt_tokens'] as int, + totalTokens: json['total_tokens'] as int, ); Map _$$RunCompletionUsageImplToJson( @@ -2949,15 +2828,14 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), - maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), + maxPromptTokens: json['max_prompt_tokens'] as int?, + maxCompletionTokens: json['max_completion_tokens'] as int?, truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -2990,7 +2868,6 @@ Map _$$CreateRunRequestImplToJson( writeNotNull('truncation_strategy', instance.truncationStrategy?.toJson()); writeNotNull('tool_choice', const _CreateRunRequestToolChoiceConverter().toJson(instance.toolChoice)); - writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateRunRequestResponseFormatConverter() @@ -3015,7 +2892,6 @@ Map _$$CreateRunRequestModelEnumerationImplToJson( }; const _$RunModelsEnumMap = { - RunModels.chatgpt4oLatest: 'chatgpt-4o-latest', RunModels.gpt4: 'gpt-4', RunModels.gpt432k: 'gpt-4-32k', RunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -3030,9 +2906,6 @@ const _$RunModelsEnumMap = { RunModels.gpt4VisionPreview: 'gpt-4-vision-preview', RunModels.gpt4o: 'gpt-4o', RunModels.gpt4o20240513: 'gpt-4o-2024-05-13', - RunModels.gpt4o20240806: 'gpt-4o-2024-08-06', - RunModels.gpt4oMini: 'gpt-4o-mini', - RunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', RunModels.gpt35Turbo: 'gpt-3.5-turbo', RunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', RunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -3040,10 +2913,6 @@ const _$RunModelsEnumMap = { RunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', RunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', RunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', - RunModels.o1Mini: 'o1-mini', - RunModels.o1Mini20240912: 'o1-mini-2024-09-12', - RunModels.o1Preview: 'o1-preview', - RunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$CreateRunRequestModelStringImpl _$$CreateRunRequestModelStringImplFromJson( @@ -3115,23 +2984,27 @@ Map _$$CreateRunRequestResponseFormatEnumerationImplToJson( }; const _$CreateRunRequestResponseFormatModeEnumMap = { + CreateRunRequestResponseFormatMode.none: 'none', CreateRunRequestResponseFormatMode.auto: 'auto', }; -_$CreateRunRequestResponseFormatResponseFormatImpl - _$$CreateRunRequestResponseFormatResponseFormatImplFromJson( +_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl + _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( Map json) => - _$CreateRunRequestResponseFormatResponseFormatImpl( - ResponseFormat.fromJson(json['value'] as Map), + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$CreateRunRequestResponseFormatResponseFormatImplToJson( - _$CreateRunRequestResponseFormatResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map + _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( + _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl + instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ListRunsResponseImpl _$$ListRunsResponseImplFromJson( Map json) => @@ -3277,15 +3150,14 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), - maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), + maxPromptTokens: json['max_prompt_tokens'] as int?, + maxCompletionTokens: json['max_completion_tokens'] as int?, truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateThreadAndRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateThreadAndRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -3319,7 +3191,6 @@ Map _$$CreateThreadAndRunRequestImplToJson( 'tool_choice', const _CreateThreadAndRunRequestToolChoiceConverter() .toJson(instance.toolChoice)); - writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateThreadAndRunRequestResponseFormatConverter() @@ -3343,7 +3214,6 @@ Map _$$ThreadAndRunModelEnumerationImplToJson( }; const _$ThreadAndRunModelsEnumMap = { - ThreadAndRunModels.chatgpt4oLatest: 'chatgpt-4o-latest', ThreadAndRunModels.gpt4: 'gpt-4', ThreadAndRunModels.gpt432k: 'gpt-4-32k', ThreadAndRunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -3358,9 +3228,6 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt4VisionPreview: 'gpt-4-vision-preview', ThreadAndRunModels.gpt4o: 'gpt-4o', ThreadAndRunModels.gpt4o20240513: 'gpt-4o-2024-05-13', - ThreadAndRunModels.gpt4o20240806: 'gpt-4o-2024-08-06', - ThreadAndRunModels.gpt4oMini: 'gpt-4o-mini', - ThreadAndRunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ThreadAndRunModels.gpt35Turbo: 'gpt-3.5-turbo', ThreadAndRunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ThreadAndRunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -3368,10 +3235,6 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ThreadAndRunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ThreadAndRunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', - ThreadAndRunModels.o1Mini: 'o1-mini', - ThreadAndRunModels.o1Mini20240912: 'o1-mini-2024-09-12', - ThreadAndRunModels.o1Preview: 'o1-preview', - ThreadAndRunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ThreadAndRunModelStringImpl _$$ThreadAndRunModelStringImplFromJson( @@ -3449,30 +3312,33 @@ Map json) => - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( - ResponseFormat.fromJson(json['value'] as Map), + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( - _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map + _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( + _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl + instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ThreadObjectImpl _$$ThreadObjectImplFromJson(Map json) => _$ThreadObjectImpl( id: json['id'] as String, object: $enumDecode(_$ThreadObjectObjectEnumMap, json['object']), - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, toolResources: json['tool_resources'] == null ? null : ToolResources.fromJson( @@ -3623,10 +3489,6 @@ _$ToolResourcesFileSearchVectorStoreImpl fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), - chunkingStrategy: json['chunking_strategy'] == null - ? null - : ChunkingStrategyRequestParam.fromJson( - json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -3641,7 +3503,6 @@ Map _$$ToolResourcesFileSearchVectorStoreImplToJson( } writeNotNull('file_ids', instance.fileIds); - writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -3692,7 +3553,7 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => _$MessageObjectImpl( id: json['id'] as String, object: $enumDecode(_$MessageObjectObjectEnumMap, json['object']), - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, threadId: json['thread_id'] as String, status: $enumDecodeNullable(_$MessageObjectStatusEnumMap, json['status'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -3700,8 +3561,8 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => ? null : MessageObjectIncompleteDetails.fromJson( json['incomplete_details'] as Map), - completedAt: (json['completed_at'] as num?)?.toInt(), - incompleteAt: (json['incomplete_at'] as num?)?.toInt(), + completedAt: json['completed_at'] as int?, + incompleteAt: json['incomplete_at'] as int?, role: $enumDecode(_$MessageRoleEnumMap, json['role']), content: (json['content'] as List) .map((e) => MessageContent.fromJson(e as Map)) @@ -4045,14 +3906,44 @@ _$MessageContentTextAnnotationsFileCitationImpl Map json) => _$MessageContentTextAnnotationsFileCitationImpl( fileId: json['file_id'] as String, + quote: json['quote'] as String, ); Map _$$MessageContentTextAnnotationsFileCitationImplToJson( _$MessageContentTextAnnotationsFileCitationImpl instance) => { 'file_id': instance.fileId, + 'quote': instance.quote, }; +_$MessageDeltaContentImageUrlObjectImpl + _$$MessageDeltaContentImageUrlObjectImplFromJson( + Map json) => + _$MessageDeltaContentImageUrlObjectImpl( + index: json['index'] as int?, + type: json['type'] as String?, + imageUrl: json['image_url'] == null + ? null + : MessageContentImageUrl.fromJson( + json['image_url'] as Map), + ); + +Map _$$MessageDeltaContentImageUrlObjectImplToJson( + _$MessageDeltaContentImageUrlObjectImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('index', instance.index); + writeNotNull('type', instance.type); + writeNotNull('image_url', instance.imageUrl?.toJson()); + return val; +} + _$MessageDeltaContentTextImpl _$$MessageDeltaContentTextImplFromJson( Map json) => _$MessageDeltaContentTextImpl( @@ -4107,7 +3998,7 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => _$RunStepObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunStepObjectObjectEnumMap, json['object']), - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, assistantId: json['assistant_id'] as String, threadId: json['thread_id'] as String, runId: json['run_id'] as String, @@ -4119,10 +4010,10 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => ? null : RunStepLastError.fromJson( json['last_error'] as Map), - expiredAt: (json['expired_at'] as num?)?.toInt(), - cancelledAt: (json['cancelled_at'] as num?)?.toInt(), - failedAt: (json['failed_at'] as num?)?.toInt(), - completedAt: (json['completed_at'] as num?)?.toInt(), + expiredAt: json['expired_at'] as int?, + cancelledAt: json['cancelled_at'] as int?, + failedAt: json['failed_at'] as int?, + completedAt: json['completed_at'] as int?, metadata: json['metadata'] as Map?, usage: json['usage'] == null ? null @@ -4364,115 +4255,12 @@ Map return val; } -_$RunStepDetailsToolCallsFileSearchImpl - _$$RunStepDetailsToolCallsFileSearchImplFromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchImpl( - rankingOptions: json['ranking_options'] == null - ? null - : RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( - json['ranking_options'] as Map), - results: (json['results'] as List?) - ?.map((e) => - RunStepDetailsToolCallsFileSearchResultObject.fromJson( - e as Map)) - .toList(), - ); - -Map _$$RunStepDetailsToolCallsFileSearchImplToJson( - _$RunStepDetailsToolCallsFileSearchImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('ranking_options', instance.rankingOptions?.toJson()); - writeNotNull('results', instance.results?.map((e) => e.toJson()).toList()); - return val; -} - -_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl - _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( - ranker: $enumDecode(_$FileSearchRankerEnumMap, json['ranker']), - scoreThreshold: (json['score_threshold'] as num).toDouble(), - ); - -Map _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( - _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl instance) => - { - 'ranker': _$FileSearchRankerEnumMap[instance.ranker]!, - 'score_threshold': instance.scoreThreshold, - }; - -_$RunStepDetailsToolCallsFileSearchResultObjectImpl - _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchResultObjectImpl( - fileId: json['file_id'] as String, - fileName: json['file_name'] as String, - score: (json['score'] as num).toDouble(), - content: (json['content'] as List?) - ?.map((e) => - RunStepDetailsToolCallsFileSearchResultContent.fromJson( - e as Map)) - .toList(), - ); - -Map _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( - _$RunStepDetailsToolCallsFileSearchResultObjectImpl instance) { - final val = { - 'file_id': instance.fileId, - 'file_name': instance.fileName, - 'score': instance.score, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); - return val; -} - -_$RunStepDetailsToolCallsFileSearchResultContentImpl - _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson( - Map json) => - _$RunStepDetailsToolCallsFileSearchResultContentImpl( - type: json['type'] as String? ?? 'text', - text: json['text'] as String?, - ); - -Map - _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( - _$RunStepDetailsToolCallsFileSearchResultContentImpl instance) { - final val = { - 'type': instance.type, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('text', instance.text); - return val; -} - _$RunStepCompletionUsageImpl _$$RunStepCompletionUsageImplFromJson( Map json) => _$RunStepCompletionUsageImpl( - completionTokens: (json['completion_tokens'] as num).toInt(), - promptTokens: (json['prompt_tokens'] as num).toInt(), - totalTokens: (json['total_tokens'] as num).toInt(), + completionTokens: json['completion_tokens'] as int, + promptTokens: json['prompt_tokens'] as int, + totalTokens: json['total_tokens'] as int, ); Map _$$RunStepCompletionUsageImplToJson( @@ -4488,7 +4276,7 @@ _$VectorStoreExpirationAfterImpl _$$VectorStoreExpirationAfterImplFromJson( _$VectorStoreExpirationAfterImpl( anchor: $enumDecode( _$VectorStoreExpirationAfterAnchorEnumMap, json['anchor']), - days: (json['days'] as num).toInt(), + days: json['days'] as int, ); Map _$$VectorStoreExpirationAfterImplToJson( @@ -4507,9 +4295,9 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( _$VectorStoreObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, name: json['name'] as String?, - usageBytes: (json['usage_bytes'] as num).toInt(), + usageBytes: json['usage_bytes'] as int, fileCounts: VectorStoreObjectFileCounts.fromJson( json['file_counts'] as Map), status: $enumDecode(_$VectorStoreObjectStatusEnumMap, json['status']), @@ -4517,8 +4305,8 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), - expiresAt: (json['expires_at'] as num?)?.toInt(), - lastActiveAt: (json['last_active_at'] as num?)?.toInt(), + expiresAt: json['expires_at'] as int?, + lastActiveAt: json['last_active_at'] as int?, metadata: json['metadata'], ); @@ -4556,11 +4344,11 @@ const _$VectorStoreObjectStatusEnumMap = { _$VectorStoreObjectFileCountsImpl _$$VectorStoreObjectFileCountsImplFromJson( Map json) => _$VectorStoreObjectFileCountsImpl( - inProgress: (json['in_progress'] as num).toInt(), - completed: (json['completed'] as num).toInt(), - failed: (json['failed'] as num).toInt(), - cancelled: (json['cancelled'] as num).toInt(), - total: (json['total'] as num).toInt(), + inProgress: json['in_progress'] as int, + completed: json['completed'] as int, + failed: json['failed'] as int, + cancelled: json['cancelled'] as int, + total: json['total'] as int, ); Map _$$VectorStoreObjectFileCountsImplToJson( @@ -4576,18 +4364,14 @@ Map _$$VectorStoreObjectFileCountsImplToJson( _$CreateVectorStoreRequestImpl _$$CreateVectorStoreRequestImplFromJson( Map json) => _$CreateVectorStoreRequestImpl( - name: json['name'] as String?, fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), + name: json['name'] as String, expiresAfter: json['expires_after'] == null ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), - chunkingStrategy: json['chunking_strategy'] == null - ? null - : ChunkingStrategyRequestParam.fromJson( - json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -4601,10 +4385,9 @@ Map _$$CreateVectorStoreRequestImplToJson( } } - writeNotNull('name', instance.name); writeNotNull('file_ids', instance.fileIds); + val['name'] = instance.name; writeNotNull('expires_after', instance.expiresAfter?.toJson()); - writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -4679,41 +4462,27 @@ _$VectorStoreFileObjectImpl _$$VectorStoreFileObjectImplFromJson( _$VectorStoreFileObjectImpl( id: json['id'] as String, object: json['object'] as String, - usageBytes: (json['usage_bytes'] as num).toInt(), - createdAt: (json['created_at'] as num).toInt(), + usageBytes: json['usage_bytes'] as int, + createdAt: json['created_at'] as int, vectorStoreId: json['vector_store_id'] as String, status: $enumDecode(_$VectorStoreFileStatusEnumMap, json['status']), lastError: json['last_error'] == null ? null : VectorStoreFileObjectLastError.fromJson( json['last_error'] as Map), - chunkingStrategy: json['chunking_strategy'] == null - ? null - : ChunkingStrategyResponseParam.fromJson( - json['chunking_strategy'] as Map), ); Map _$$VectorStoreFileObjectImplToJson( - _$VectorStoreFileObjectImpl instance) { - final val = { - 'id': instance.id, - 'object': instance.object, - 'usage_bytes': instance.usageBytes, - 'created_at': instance.createdAt, - 'vector_store_id': instance.vectorStoreId, - 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, - 'last_error': instance.lastError?.toJson(), - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); - return val; -} + _$VectorStoreFileObjectImpl instance) => + { + 'id': instance.id, + 'object': instance.object, + 'usage_bytes': instance.usageBytes, + 'created_at': instance.createdAt, + 'vector_store_id': instance.vectorStoreId, + 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, + 'last_error': instance.lastError?.toJson(), + }; const _$VectorStoreFileStatusEnumMap = { VectorStoreFileStatus.inProgress: 'in_progress', @@ -4738,50 +4507,23 @@ Map _$$VectorStoreFileObjectLastErrorImplToJson( }; const _$VectorStoreFileObjectLastErrorCodeEnumMap = { - VectorStoreFileObjectLastErrorCode.serverError: 'server_error', - VectorStoreFileObjectLastErrorCode.unsupportedFile: 'unsupported_file', - VectorStoreFileObjectLastErrorCode.invalidFile: 'invalid_file', + VectorStoreFileObjectLastErrorCode.internalError: 'internal_error', + VectorStoreFileObjectLastErrorCode.fileNotFound: 'file_not_found', + VectorStoreFileObjectLastErrorCode.parsingError: 'parsing_error', + VectorStoreFileObjectLastErrorCode.unhandledMimeType: 'unhandled_mime_type', }; -_$StaticChunkingStrategyImpl _$$StaticChunkingStrategyImplFromJson( - Map json) => - _$StaticChunkingStrategyImpl( - maxChunkSizeTokens: (json['max_chunk_size_tokens'] as num).toInt(), - chunkOverlapTokens: (json['chunk_overlap_tokens'] as num).toInt(), - ); - -Map _$$StaticChunkingStrategyImplToJson( - _$StaticChunkingStrategyImpl instance) => - { - 'max_chunk_size_tokens': instance.maxChunkSizeTokens, - 'chunk_overlap_tokens': instance.chunkOverlapTokens, - }; - _$CreateVectorStoreFileRequestImpl _$$CreateVectorStoreFileRequestImplFromJson( Map json) => _$CreateVectorStoreFileRequestImpl( fileId: json['file_id'] as String, - chunkingStrategy: json['chunking_strategy'] == null - ? null - : ChunkingStrategyRequestParam.fromJson( - json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileRequestImplToJson( - _$CreateVectorStoreFileRequestImpl instance) { - final val = { - 'file_id': instance.fileId, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); - return val; -} + _$CreateVectorStoreFileRequestImpl instance) => + { + 'file_id': instance.fileId, + }; _$ListVectorStoreFilesResponseImpl _$$ListVectorStoreFilesResponseImplFromJson( Map json) => @@ -4826,7 +4568,7 @@ _$VectorStoreFileBatchObjectImpl _$$VectorStoreFileBatchObjectImplFromJson( _$VectorStoreFileBatchObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: (json['created_at'] as num).toInt(), + createdAt: json['created_at'] as int, vectorStoreId: json['vector_store_id'] as String, status: $enumDecode( _$VectorStoreFileBatchObjectStatusEnumMap, json['status']), @@ -4856,11 +4598,11 @@ _$VectorStoreFileBatchObjectFileCountsImpl _$$VectorStoreFileBatchObjectFileCountsImplFromJson( Map json) => _$VectorStoreFileBatchObjectFileCountsImpl( - inProgress: (json['in_progress'] as num).toInt(), - completed: (json['completed'] as num).toInt(), - failed: (json['failed'] as num).toInt(), - cancelled: (json['cancelled'] as num).toInt(), - total: (json['total'] as num).toInt(), + inProgress: json['in_progress'] as int, + completed: json['completed'] as int, + failed: json['failed'] as int, + cancelled: json['cancelled'] as int, + total: json['total'] as int, ); Map _$$VectorStoreFileBatchObjectFileCountsImplToJson( @@ -4880,27 +4622,13 @@ _$CreateVectorStoreFileBatchRequestImpl fileIds: (json['file_ids'] as List) .map((e) => e as String) .toList(), - chunkingStrategy: json['chunking_strategy'] == null - ? null - : ChunkingStrategyRequestParam.fromJson( - json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileBatchRequestImplToJson( - _$CreateVectorStoreFileBatchRequestImpl instance) { - final val = { - 'file_ids': instance.fileIds, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); - return val; -} + _$CreateVectorStoreFileBatchRequestImpl instance) => + { + 'file_ids': instance.fileIds, + }; _$ErrorImpl _$$ErrorImplFromJson(Map json) => _$ErrorImpl( code: json['code'] as String?, @@ -4971,15 +4699,15 @@ _$BatchImpl _$$BatchImplFromJson(Map json) => _$BatchImpl( status: $enumDecode(_$BatchStatusEnumMap, json['status']), outputFileId: json['output_file_id'] as String?, errorFileId: json['error_file_id'] as String?, - createdAt: (json['created_at'] as num).toInt(), - inProgressAt: (json['in_progress_at'] as num?)?.toInt(), - expiresAt: (json['expires_at'] as num?)?.toInt(), - finalizingAt: (json['finalizing_at'] as num?)?.toInt(), - completedAt: (json['completed_at'] as num?)?.toInt(), - failedAt: (json['failed_at'] as num?)?.toInt(), - expiredAt: (json['expired_at'] as num?)?.toInt(), - cancellingAt: (json['cancelling_at'] as num?)?.toInt(), - cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + createdAt: json['created_at'] as int, + inProgressAt: json['in_progress_at'] as int?, + expiresAt: json['expires_at'] as int?, + finalizingAt: json['finalizing_at'] as int?, + completedAt: json['completed_at'] as int?, + failedAt: json['failed_at'] as int?, + expiredAt: json['expired_at'] as int?, + cancellingAt: json['cancelling_at'] as int?, + cancelledAt: json['cancelled_at'] as int?, requestCounts: json['request_counts'] == null ? null : BatchRequestCounts.fromJson( @@ -5061,9 +4789,9 @@ Map _$$BatchErrorsImplToJson(_$BatchErrorsImpl instance) { _$BatchRequestCountsImpl _$$BatchRequestCountsImplFromJson( Map json) => _$BatchRequestCountsImpl( - total: (json['total'] as num).toInt(), - completed: (json['completed'] as num).toInt(), - failed: (json['failed'] as num).toInt(), + total: json['total'] as int, + completed: json['completed'] as int, + failed: json['failed'] as int, ); Map _$$BatchRequestCountsImplToJson( @@ -5080,7 +4808,7 @@ _$BatchErrorsDataInnerImpl _$$BatchErrorsDataInnerImplFromJson( code: json['code'] as String?, message: json['message'] as String?, param: json['param'] as String?, - line: (json['line'] as num?)?.toInt(), + line: json['line'] as int?, ); Map _$$BatchErrorsDataInnerImplToJson( @@ -5198,7 +4926,6 @@ _$ChatCompletionAssistantMessageImpl _$ChatCompletionMessageRoleEnumMap, json['role']) ?? ChatCompletionMessageRole.assistant, content: json['content'] as String?, - refusal: json['refusal'] as String?, name: json['name'] as String?, toolCalls: (json['tool_calls'] as List?) ?.map((e) => ChatCompletionMessageToolCall.fromJson( @@ -5223,7 +4950,6 @@ Map _$$ChatCompletionAssistantMessageImplToJson( } writeNotNull('content', instance.content); - writeNotNull('refusal', instance.refusal); writeNotNull('name', instance.name); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -5321,7 +5047,6 @@ Map _$$ChatCompletionMessageContentPartTextImplToJson( const _$ChatCompletionMessageContentPartTypeEnumMap = { ChatCompletionMessageContentPartType.text: 'text', ChatCompletionMessageContentPartType.imageUrl: 'image_url', - ChatCompletionMessageContentPartType.refusal: 'refusal', }; _$ChatCompletionMessageContentPartImageImpl @@ -5343,24 +5068,6 @@ Map _$$ChatCompletionMessageContentPartImageImplToJson( 'image_url': instance.imageUrl.toJson(), }; -_$ChatCompletionMessageContentPartRefusalImpl - _$$ChatCompletionMessageContentPartRefusalImplFromJson( - Map json) => - _$ChatCompletionMessageContentPartRefusalImpl( - type: $enumDecodeNullable( - _$ChatCompletionMessageContentPartTypeEnumMap, - json['type']) ?? - ChatCompletionMessageContentPartType.refusal, - refusal: json['refusal'] as String, - ); - -Map _$$ChatCompletionMessageContentPartRefusalImplToJson( - _$ChatCompletionMessageContentPartRefusalImpl instance) => - { - 'type': _$ChatCompletionMessageContentPartTypeEnumMap[instance.type]!, - 'refusal': instance.refusal, - }; - _$ChatCompletionMessageImageUrlImpl _$$ChatCompletionMessageImageUrlImplFromJson(Map json) => _$ChatCompletionMessageImageUrlImpl( @@ -5383,54 +5090,6 @@ const _$ChatCompletionMessageImageDetailEnumMap = { ChatCompletionMessageImageDetail.high: 'high', }; -_$ResponseFormatTextImpl _$$ResponseFormatTextImplFromJson( - Map json) => - _$ResponseFormatTextImpl( - type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? - ResponseFormatType.text, - ); - -Map _$$ResponseFormatTextImplToJson( - _$ResponseFormatTextImpl instance) => - { - 'type': _$ResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$ResponseFormatTypeEnumMap = { - ResponseFormatType.text: 'text', - ResponseFormatType.jsonObject: 'json_object', - ResponseFormatType.jsonSchema: 'json_schema', -}; - -_$ResponseFormatJsonObjectImpl _$$ResponseFormatJsonObjectImplFromJson( - Map json) => - _$ResponseFormatJsonObjectImpl( - type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? - ResponseFormatType.jsonObject, - ); - -Map _$$ResponseFormatJsonObjectImplToJson( - _$ResponseFormatJsonObjectImpl instance) => - { - 'type': _$ResponseFormatTypeEnumMap[instance.type]!, - }; - -_$ResponseFormatJsonSchemaImpl _$$ResponseFormatJsonSchemaImplFromJson( - Map json) => - _$ResponseFormatJsonSchemaImpl( - type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? - ResponseFormatType.jsonSchema, - jsonSchema: JsonSchemaObject.fromJson( - json['json_schema'] as Map), - ); - -Map _$$ResponseFormatJsonSchemaImplToJson( - _$ResponseFormatJsonSchemaImpl instance) => - { - 'type': _$ResponseFormatTypeEnumMap[instance.type]!, - 'json_schema': instance.jsonSchema.toJson(), - }; - _$AssistantToolsCodeInterpreterImpl _$$AssistantToolsCodeInterpreterImplFromJson(Map json) => _$AssistantToolsCodeInterpreterImpl( @@ -5446,28 +5105,14 @@ Map _$$AssistantToolsCodeInterpreterImplToJson( _$AssistantToolsFileSearchImpl _$$AssistantToolsFileSearchImplFromJson( Map json) => _$AssistantToolsFileSearchImpl( - type: json['type'] as String, - fileSearch: json['file_search'] == null - ? null - : AssistantToolsFileSearchFileSearch.fromJson( - json['file_search'] as Map), + type: json['type'] as String? ?? 'file_search', ); Map _$$AssistantToolsFileSearchImplToJson( - _$AssistantToolsFileSearchImpl instance) { - final val = { - 'type': instance.type, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('file_search', instance.fileSearch?.toJson()); - return val; -} + _$AssistantToolsFileSearchImpl instance) => + { + 'type': instance.type, + }; _$AssistantToolsFunctionImpl _$$AssistantToolsFunctionImplFromJson( Map json) => @@ -5484,32 +5129,6 @@ Map _$$AssistantToolsFunctionImplToJson( 'function': instance.function.toJson(), }; -_$AssistantToolsFileSearchFileSearchImpl - _$$AssistantToolsFileSearchFileSearchImplFromJson( - Map json) => - _$AssistantToolsFileSearchFileSearchImpl( - maxNumResults: (json['max_num_results'] as num?)?.toInt(), - rankingOptions: json['ranking_options'] == null - ? null - : FileSearchRankingOptions.fromJson( - json['ranking_options'] as Map), - ); - -Map _$$AssistantToolsFileSearchFileSearchImplToJson( - _$AssistantToolsFileSearchFileSearchImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('max_num_results', instance.maxNumResults); - writeNotNull('ranking_options', instance.rankingOptions?.toJson()); - return val; -} - _$MessageContentImageFileObjectImpl _$$MessageContentImageFileObjectImplFromJson(Map json) => _$MessageContentImageFileObjectImpl( @@ -5554,25 +5173,11 @@ Map _$$MessageContentTextObjectImplToJson( 'text': instance.text.toJson(), }; -_$MessageContentRefusalObjectImpl _$$MessageContentRefusalObjectImplFromJson( - Map json) => - _$MessageContentRefusalObjectImpl( - type: json['type'] as String, - refusal: json['refusal'] as String, - ); - -Map _$$MessageContentRefusalObjectImplToJson( - _$MessageContentRefusalObjectImpl instance) => - { - 'type': instance.type, - 'refusal': instance.refusal, - }; - _$MessageDeltaContentImageFileObjectImpl _$$MessageDeltaContentImageFileObjectImplFromJson( Map json) => _$MessageDeltaContentImageFileObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, type: json['type'] as String, imageFile: json['image_file'] == null ? null @@ -5600,7 +5205,7 @@ Map _$$MessageDeltaContentImageFileObjectImplToJson( _$MessageDeltaContentTextObjectImpl _$$MessageDeltaContentTextObjectImplFromJson(Map json) => _$MessageDeltaContentTextObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, type: json['type'] as String, text: json['text'] == null ? null @@ -5625,61 +5230,6 @@ Map _$$MessageDeltaContentTextObjectImplToJson( return val; } -_$MessageDeltaContentRefusalObjectImpl - _$$MessageDeltaContentRefusalObjectImplFromJson( - Map json) => - _$MessageDeltaContentRefusalObjectImpl( - index: (json['index'] as num).toInt(), - type: json['type'] as String, - refusal: json['refusal'] as String?, - ); - -Map _$$MessageDeltaContentRefusalObjectImplToJson( - _$MessageDeltaContentRefusalObjectImpl instance) { - final val = { - 'index': instance.index, - 'type': instance.type, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('refusal', instance.refusal); - return val; -} - -_$MessageDeltaContentImageUrlObjectImpl - _$$MessageDeltaContentImageUrlObjectImplFromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectImpl( - index: (json['index'] as num).toInt(), - type: json['type'] as String, - imageUrl: json['image_url'] == null - ? null - : MessageContentImageUrl.fromJson( - json['image_url'] as Map), - ); - -Map _$$MessageDeltaContentImageUrlObjectImplToJson( - _$MessageDeltaContentImageUrlObjectImpl instance) { - final val = { - 'index': instance.index, - 'type': instance.type, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('image_url', instance.imageUrl?.toJson()); - return val; -} - _$MessageContentTextAnnotationsFileCitationObjectImpl _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => @@ -5688,8 +5238,8 @@ _$MessageContentTextAnnotationsFileCitationObjectImpl text: json['text'] as String, fileCitation: MessageContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: (json['start_index'] as num).toInt(), - endIndex: (json['end_index'] as num).toInt(), + startIndex: json['start_index'] as int, + endIndex: json['end_index'] as int, ); Map @@ -5711,8 +5261,8 @@ _$MessageContentTextAnnotationsFilePathObjectImpl text: json['text'] as String, filePath: MessageContentTextAnnotationsFilePath.fromJson( json['file_path'] as Map), - startIndex: (json['start_index'] as num).toInt(), - endIndex: (json['end_index'] as num).toInt(), + startIndex: json['start_index'] as int, + endIndex: json['end_index'] as int, ); Map _$$MessageContentTextAnnotationsFilePathObjectImplToJson( @@ -5742,15 +5292,15 @@ _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, type: json['type'] as String, text: json['text'] as String?, fileCitation: json['file_citation'] == null ? null : MessageDeltaContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: (json['start_index'] as num?)?.toInt(), - endIndex: (json['end_index'] as num?)?.toInt(), + startIndex: json['start_index'] as int?, + endIndex: json['end_index'] as int?, ); Map @@ -5778,15 +5328,15 @@ _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, type: json['type'] as String, text: json['text'] as String?, filePath: json['file_path'] == null ? null : MessageDeltaContentTextAnnotationsFilePathObjectFilePath .fromJson(json['file_path'] as Map), - startIndex: (json['start_index'] as num?)?.toInt(), - endIndex: (json['end_index'] as num?)?.toInt(), + startIndex: json['start_index'] as int?, + endIndex: json['end_index'] as int?, ); Map @@ -5946,8 +5496,7 @@ _$RunStepDetailsToolCallsFileSearchObjectImpl _$RunStepDetailsToolCallsFileSearchObjectImpl( id: json['id'] as String, type: json['type'] as String, - fileSearch: RunStepDetailsToolCallsFileSearch.fromJson( - json['file_search'] as Map), + fileSearch: json['file_search'] as Map, ); Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( @@ -5955,7 +5504,7 @@ Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( { 'id': instance.id, 'type': instance.type, - 'file_search': instance.fileSearch.toJson(), + 'file_search': instance.fileSearch, }; _$RunStepDetailsToolCallsFunctionObjectImpl @@ -5996,7 +5545,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, id: json['id'] as String?, type: json['type'] as String, codeInterpreter: json['code_interpreter'] == null @@ -6027,7 +5576,7 @@ _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, id: json['id'] as String?, type: json['type'] as String, fileSearch: json['file_search'] as Map, @@ -6056,7 +5605,7 @@ _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, id: json['id'] as String?, type: json['type'] as String, function: json['function'] == null @@ -6144,7 +5693,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, type: json['type'] as String, logs: json['logs'] as String?, ); @@ -6171,7 +5720,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( - index: (json['index'] as num).toInt(), + index: json['index'] as int, type: json['type'] as String, image: json['image'] == null ? null @@ -6197,64 +5746,6 @@ Map return val; } -_$AutoChunkingStrategyRequestParamImpl - _$$AutoChunkingStrategyRequestParamImplFromJson( - Map json) => - _$AutoChunkingStrategyRequestParamImpl( - type: json['type'] as String, - ); - -Map _$$AutoChunkingStrategyRequestParamImplToJson( - _$AutoChunkingStrategyRequestParamImpl instance) => - { - 'type': instance.type, - }; - -_$StaticChunkingStrategyRequestParamImpl - _$$StaticChunkingStrategyRequestParamImplFromJson( - Map json) => - _$StaticChunkingStrategyRequestParamImpl( - type: json['type'] as String, - static: StaticChunkingStrategy.fromJson( - json['static'] as Map), - ); - -Map _$$StaticChunkingStrategyRequestParamImplToJson( - _$StaticChunkingStrategyRequestParamImpl instance) => - { - 'type': instance.type, - 'static': instance.static.toJson(), - }; - -_$StaticChunkingStrategyResponseParamImpl - _$$StaticChunkingStrategyResponseParamImplFromJson( - Map json) => - _$StaticChunkingStrategyResponseParamImpl( - type: json['type'] as String, - static: StaticChunkingStrategy.fromJson( - json['static'] as Map), - ); - -Map _$$StaticChunkingStrategyResponseParamImplToJson( - _$StaticChunkingStrategyResponseParamImpl instance) => - { - 'type': instance.type, - 'static': instance.static.toJson(), - }; - -_$OtherChunkingStrategyResponseParamImpl - _$$OtherChunkingStrategyResponseParamImplFromJson( - Map json) => - _$OtherChunkingStrategyResponseParamImpl( - type: json['type'] as String, - ); - -Map _$$OtherChunkingStrategyResponseParamImplToJson( - _$OtherChunkingStrategyResponseParamImpl instance) => - { - 'type': instance.type, - }; - _$ThreadStreamEventImpl _$$ThreadStreamEventImplFromJson( Map json) => _$ThreadStreamEventImpl( diff --git a/packages/openai_dart/lib/src/generated/schema/service_tier.dart b/packages/openai_dart/lib/src/generated/schema/service_tier.dart deleted file mode 100644 index 8a01afc5..00000000 --- a/packages/openai_dart/lib/src/generated/schema/service_tier.dart +++ /dev/null @@ -1,18 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// ENUM: ServiceTier -// ========================================== - -/// The service tier used for processing the request. This field is only included if the `service_tier` parameter -/// is specified in the request. -enum ServiceTier { - @JsonValue('scale') - scale, - @JsonValue('default') - vDefault, -} diff --git a/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart deleted file mode 100644 index aa67e062..00000000 --- a/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart +++ /dev/null @@ -1,60 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: StaticChunkingStrategy -// ========================================== - -/// Static chunking strategy -@freezed -class StaticChunkingStrategy with _$StaticChunkingStrategy { - const StaticChunkingStrategy._(); - - /// Factory constructor for StaticChunkingStrategy - const factory StaticChunkingStrategy({ - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the - /// maximum value is `4096`. - @JsonKey(name: 'max_chunk_size_tokens') required int maxChunkSizeTokens, - - /// The number of tokens that overlap between chunks. The default value is `400`. - /// - /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. - @JsonKey(name: 'chunk_overlap_tokens') required int chunkOverlapTokens, - }) = _StaticChunkingStrategy; - - /// Object construction from a JSON representation - factory StaticChunkingStrategy.fromJson(Map json) => - _$StaticChunkingStrategyFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'max_chunk_size_tokens', - 'chunk_overlap_tokens' - ]; - - /// Validation constants - static const maxChunkSizeTokensMinValue = 100; - static const maxChunkSizeTokensMaxValue = 4096; - - /// Perform validations on the schema property values - String? validateSchema() { - if (maxChunkSizeTokens < maxChunkSizeTokensMinValue) { - return "The value of 'maxChunkSizeTokens' cannot be < $maxChunkSizeTokensMinValue"; - } - if (maxChunkSizeTokens > maxChunkSizeTokensMaxValue) { - return "The value of 'maxChunkSizeTokens' cannot be > $maxChunkSizeTokensMaxValue"; - } - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'max_chunk_size_tokens': maxChunkSizeTokens, - 'chunk_overlap_tokens': chunkOverlapTokens, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/thread_object.dart b/packages/openai_dart/lib/src/generated/schema/thread_object.dart index 20f2e014..a5ae0ea8 100644 --- a/packages/openai_dart/lib/src/generated/schema/thread_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/thread_object.dart @@ -27,9 +27,7 @@ class ThreadObject with _$ThreadObject { /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources') required ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. required Map? metadata, }) = _ThreadObject; diff --git a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart index cc01299d..63247873 100644 --- a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart +++ b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart @@ -19,11 +19,6 @@ class ToolResourcesFileSearchVectorStore /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyRequestParam? chunkingStrategy, - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _ToolResourcesFileSearchVectorStore; @@ -34,11 +29,7 @@ class ToolResourcesFileSearchVectorStore _$ToolResourcesFileSearchVectorStoreFromJson(json); /// List of all property names of schema - static const List propertyNames = [ - 'file_ids', - 'chunking_strategy', - 'metadata' - ]; + static const List propertyNames = ['file_ids', 'metadata']; /// Perform validations on the schema property values String? validateSchema() { @@ -49,7 +40,6 @@ class ToolResourcesFileSearchVectorStore Map toMap() { return { 'file_ids': fileIds, - 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart index d2ef2414..7105bd0c 100644 --- a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart @@ -22,9 +22,7 @@ class UpdateVectorStoreRequest with _$UpdateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _UpdateVectorStoreRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart index 3664758b..53e6f928 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart @@ -36,11 +36,6 @@ class VectorStoreFileObject with _$VectorStoreFileObject { /// The last error associated with this vector store file. Will be `null` if there are no errors. @JsonKey(name: 'last_error') required VectorStoreFileObjectLastError? lastError, - - /// The chunking strategy used to chunk the file(s). - /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] - @JsonKey(name: 'chunking_strategy', includeIfNull: false) - ChunkingStrategyResponseParam? chunkingStrategy, }) = _VectorStoreFileObject; /// Object construction from a JSON representation @@ -55,8 +50,7 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'created_at', 'vector_store_id', 'status', - 'last_error', - 'chunking_strategy' + 'last_error' ]; /// Perform validations on the schema property values @@ -74,7 +68,6 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'vector_store_id': vectorStoreId, 'status': status, 'last_error': lastError, - 'chunking_strategy': chunkingStrategy, }; } } @@ -140,10 +133,12 @@ class VectorStoreFileObjectLastError with _$VectorStoreFileObjectLastError { /// One of `server_error` or `rate_limit_exceeded`. enum VectorStoreFileObjectLastErrorCode { - @JsonValue('server_error') - serverError, - @JsonValue('unsupported_file') - unsupportedFile, - @JsonValue('invalid_file') - invalidFile, + @JsonValue('internal_error') + internalError, + @JsonValue('file_not_found') + fileNotFound, + @JsonValue('parsing_error') + parsingError, + @JsonValue('unhandled_mime_type') + unhandledMimeType, } diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart index a3d49591..836d8337 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart @@ -47,9 +47,7 @@ class VectorStoreObject with _$VectorStoreObject { /// The Unix timestamp (in seconds) for when the vector store was last active. @JsonKey(name: 'last_active_at') required int? lastActiveAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values - /// can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. required dynamic metadata, }) = _VectorStoreObject; diff --git a/packages/openai_dart/lib/src/http_client/http_client.dart b/packages/openai_dart/lib/src/http_client/http_client.dart index 0ad0b2fc..99555ca4 100644 --- a/packages/openai_dart/lib/src/http_client/http_client.dart +++ b/packages/openai_dart/lib/src/http_client/http_client.dart @@ -1,3 +1,4 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js_interop) 'http_client_html.dart'; + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index ab870afb..1f2fe406 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -1,4 +1,3 @@ -// ignore_for_file: avoid_print import 'dart:io'; import 'package:openapi_spec/openapi_spec.dart'; @@ -19,12 +18,10 @@ void main() async { enabled: true, ), ); - final res = await Process.run( + await Process.run( 'dart', ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], ); - print(res.stdout); - print(res.stderr); } String? _onSchemaName(final String schemaName) => switch (schemaName) { @@ -49,15 +46,11 @@ String? _onSchemaUnionFactoryName( 'ChatCompletionMessageContentParts' => 'parts', 'ChatCompletionMessageContentPartText' => 'text', 'ChatCompletionMessageContentPartImage' => 'image', - 'ChatCompletionMessageContentPartRefusal' => 'refusal', 'ChatCompletionToolChoiceOptionEnumeration' => 'mode', 'ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice' => 'tool', 'ChatCompletionFunctionCallEnumeration' => 'mode', 'ChatCompletionFunctionCallChatCompletionFunctionCallOption' => 'function', - 'ResponseFormatText' => 'text', - 'ResponseFormatJsonObject' => 'jsonObject', - 'ResponseFormatJsonSchema' => 'jsonSchema', // Completion 'CompletionModelEnumeration' => 'model', 'CompletionModelString' => 'modelId', @@ -84,59 +77,53 @@ String? _onSchemaUnionFactoryName( // Assistant 'AssistantModelEnumeration' => 'model', 'AssistantModelString' => 'modelId', - 'AssistantObjectResponseFormatEnumeration' => 'mode', - 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', - 'CreateMessageRequestContentListMessageContent' => 'parts', - 'CreateMessageRequestContentString' => 'text', - 'CreateRunRequestModelEnumeration' => 'model', - 'CreateRunRequestModelString' => 'modelId', - 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', - 'CreateRunRequestResponseFormatEnumeration' => 'mode', - 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateRunRequestToolChoiceEnumeration' => 'mode', - 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', - 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', 'MessageContentImageFileObject' => 'imageFile', + 'MessageDeltaContentImageFileObject' => 'imageFile', + 'MessageContentTextObject' => 'text', + 'MessageDeltaContentTextObject' => 'text', 'MessageContentImageUrlObject' => 'imageUrl', 'MessageContentTextAnnotationsFileCitationObject' => 'fileCitation', - 'MessageContentTextAnnotationsFilePathObject' => 'filePath', - 'MessageContentTextObject' => 'text', - 'MessageContentRefusalObject' => 'refusal', - 'MessageDeltaContentImageFileObject' => 'imageFile', - 'MessageDeltaContentRefusalObject' => 'refusal', - 'MessageDeltaContentImageUrlObject' => 'imageUrl', 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', + 'MessageContentTextAnnotationsFilePathObject' => 'filePath', 'MessageDeltaContentTextAnnotationsFilePathObject' => 'filePath', - 'MessageDeltaContentTextObject' => 'text', - 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', 'RunModelEnumeration' => 'model', 'RunModelString' => 'modelId', - 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', - 'RunObjectResponseFormatEnumeration' => 'mode', - 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', - 'RunObjectToolChoiceEnumeration' => 'mode', - 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', + 'ThreadAndRunModelEnumeration' => 'model', + 'ThreadAndRunModelString' => 'modelId', + 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', 'RunStepDeltaStepDetailsToolCallsCodeObject' => 'codeInterpreter', - 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', - 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', + 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', 'RunStepDeltaStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDeltaStepDetailsToolCallsFunctionObject' => 'function', - 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', - 'RunStepDetailsMessageCreationObject' => 'messageCreation', - 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', - 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', - 'RunStepDetailsToolCallsFunctionObject' => 'function', + 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', + 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDetailsMessageCreationObject' => 'messageCreation', + 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', 'RunStepDetailsToolCallsObject' => 'toolCalls', - 'ThreadAndRunModelEnumeration' => 'model', - 'ThreadAndRunModelString' => 'modelId', + 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', + 'CreateRunRequestResponseFormatEnumeration' => 'mode', + 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', + 'RunObjectResponseFormatEnumeration' => 'mode', + 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', + 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', + 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', + 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', + 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateRunRequestToolChoiceEnumeration' => 'mode', + 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', + 'RunObjectToolChoiceEnumeration' => 'mode', + 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateMessageRequestContentString' => 'text', + 'CreateMessageRequestContentListMessageContent' => 'parts', _ => null, }; diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index b7333f2c..9490261d 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4,7 +4,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.3.0" + version: "2.0.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -310,7 +310,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-4o-mini + example: gpt-3.5-turbo description: The ID of the model to use for this request responses: "200": @@ -330,7 +330,7 @@ paths: required: true schema: type: string - example: ft:gpt-4o-mini:acemeco:suffix:abc123 + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -796,16 +796,6 @@ paths: schema: type: string description: The ID of the thread to run. - - name: include - in: query - description: &include_param_description | - A list of additional fields to include in the response. Currently the only supported value is - `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. - - See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - for more information. - schema: - type: string requestBody: required: true content: @@ -978,11 +968,6 @@ paths: description: *pagination_before_param_description schema: type: string - - name: include - in: query - description: *include_param_description - schema: - type: string responses: "200": description: OK @@ -1015,11 +1000,6 @@ paths: schema: type: string description: The ID of the run step to retrieve. - - name: include - in: query - description: *include_param_description - schema: - type: string responses: "200": description: OK @@ -1047,7 +1027,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -1173,7 +1153,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -1189,7 +1169,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -1391,7 +1371,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -1407,7 +1387,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -1485,7 +1465,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. + summary: Cancels an in-progress batch. parameters: - in: path name: batch_id @@ -1815,10 +1795,8 @@ components: properties: model: title: ChatCompletionModel - description: | - ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) - table for details on which models work with the Chat API. - example: "gpt-4o" + description: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4-turbo" anyOf: - type: string description: The ID of the model to use for this request. @@ -1828,7 +1806,6 @@ components: Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum: [ - "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -1843,9 +1820,6 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -1853,15 +1827,9 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", - "o1-mini", - "o1-mini-2024-09-12", - "o1-preview", - "o1-preview-2024-09-12", ] messages: - description: | - A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). type: array minItems: 1 items: @@ -1882,39 +1850,22 @@ components: description: | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias - value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to - sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase - likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the - relevant token. + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: - description: | - Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of - each output token returned in the `content` of `message`. + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. type: boolean nullable: true top_logprobs: - description: | - An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, - each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 maximum: 20 nullable: true max_tokens: description: | - The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat - completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated - via API. - - This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with - [o1 series models](https://platform.openai.com/docs/guides/reasoning). - type: integer - nullable: true - max_completion_tokens: - description: | - An upper bound for the number of tokens that can be generated for a completion, including visible output - tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. type: integer nullable: true n: @@ -1924,9 +1875,7 @@ components: default: 1 example: 1 nullable: true - description: | - How many chat completion choices to generate for each input message. Note that you will be charged based on - the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 @@ -1935,7 +1884,21 @@ components: nullable: true description: *completions_presence_penalty_description response_format: - $ref: "#/components/schemas/ResponseFormat" + title: ChatCompletionResponseFormat + type: object + description: | + An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + properties: + type: + type: string + enum: [ "text", "json_object" ] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. seed: type: integer # minimum: -9223372036854775808 # The value can't be represented exactly in JavaScript @@ -1943,27 +1906,8 @@ components: nullable: true description: | This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests - with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to - monitor changes in the backend. - service_tier: - description: | - Specifies the latency tier to use for processing the request. This parameter is relevant for customers - subscribed to the scale tier service: - - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits - until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the - default service tier with a lower uptime SLA and no latency guarantee. - - If set to 'default', the request will be processed using the default service tier with a lower uptime - SLA and no latency guarantee. - - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` utilized. - type: string - enum: [ "auto", "default" ] - nullable: true - default: null + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: title: ChatCompletionStop description: | @@ -1981,10 +1925,8 @@ components: type: string stream: description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false @@ -2008,10 +1950,9 @@ components: description: *completions_top_p_description tools: type: array - description: | + description: > A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are - supported. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. items: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: @@ -2021,9 +1962,8 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the - model to call that tool. - + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -2032,15 +1972,8 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" - parallel_tool_calls: ¶llel_tool_calls - description: | - Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - during tool use. - type: boolean - default: null - nullable: true user: *end_user_param_configuration function_call: title: ChatCompletionFunctionCall @@ -2051,8 +1984,7 @@ components: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that - function. + Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: @@ -2113,12 +2045,13 @@ components: default: user description: The role of the messages author, in this case `user`. content: + # TODO extract to ChatCompletionMessageContent once generator bug fixed description: The contents of the user message. oneOf: - type: string - description: The text contents of the user message. + description: The text contents of the message. - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. items: $ref: "#/components/schemas/ChatCompletionMessageContentPart" minItems: 1 @@ -2140,10 +2073,6 @@ components: type: string description: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - refusal: - nullable: true - type: string - description: The refusal message by the assistant. name: type: string description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. @@ -2195,12 +2124,11 @@ components: oneOf: - $ref: "#/components/schemas/ChatCompletionMessageContentPartText" - $ref: "#/components/schemas/ChatCompletionMessageContentPartImage" - - $ref: "#/components/schemas/ChatCompletionMessageContentPartRefusal" discriminator: propertyName: type ChatCompletionMessageContentPartText: type: object - description: A text content part of a message. + description: A text content part of a user message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2213,7 +2141,7 @@ components: - text ChatCompletionMessageContentPartImage: type: object - description: An image content part of a user message. + title: Image content part properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2238,25 +2166,9 @@ components: - url required: - image_url - ChatCompletionMessageContentPartRefusal: - type: object - description: A refusal content part of a message. - properties: - type: - $ref: "#/components/schemas/ChatCompletionMessageContentPartType" - default: refusal - description: The type of the content part, in this case `refusal`. - refusal: - type: string - description: The refusal message generated by the model. - required: - - refusal ChatCompletionMessageContentPartType: type: string - enum: - - text - - image_url - - refusal + enum: [ "text", "image_url" ] description: The type of the content part. ChatCompletionMessageRole: type: string @@ -2290,120 +2202,18 @@ components: properties: name: type: string - description: | - The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a - maximum length of 64. + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. description: type: string - description: | - A description of what the function does, used by the model to choose when and how to call the function. + description: A description of what the function does, used by the model to choose when and how to call the function. parameters: $ref: "#/components/schemas/FunctionParameters" - strict: - type: boolean - nullable: true - default: false - description: | - Whether to enable strict schema adherence when generating the function call. If set to true, the model will - follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when - `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](](https://platform.openai.com/docs/guides/function-calling). required: - name FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true - ResponseFormat: - type: object - description: | - An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer - than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model - will match your supplied JSON schema. - Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is - valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system - or user message. Without this, the model may generate an unending stream of whitespace until the generation - reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message - content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded - `max_tokens` or the conversation exceeded the max context length. - oneOf: - - $ref: "#/components/schemas/ResponseFormatText" - - $ref: "#/components/schemas/ResponseFormatJsonObject" - - $ref: "#/components/schemas/ResponseFormatJsonSchema" - discriminator: - propertyName: type - ResponseFormatType: - type: string - enum: - - text - - json_object - - json_schema - description: The type of response format being defined. - ResponseFormatText: - type: object - description: "The model should respond with plain text." - properties: - type: - $ref: "#/components/schemas/ResponseFormatType" - default: "text" - ResponseFormatJsonObject: - type: object - description: "The model should respond with a JSON object." - properties: - type: - $ref: "#/components/schemas/ResponseFormatType" - default: "json_object" - ResponseFormatJsonSchema: - type: object - description: "The model should respond with a JSON object that adheres to the specified schema." - properties: - type: - $ref: "#/components/schemas/ResponseFormatType" - default: "json_schema" - json_schema: - $ref: "#/components/schemas/JsonSchemaObject" - required: - - json_schema - JsonSchemaObject: - type: object - description: "A JSON Schema object." - properties: - name: - type: string - description: | - The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum - length of 64. - description: - type: string - description: | - A description of what the response format is for, used by the model to determine how to respond in the - format. - schema: - type: object - description: | - The schema for the response format, described as a JSON Schema object. - additionalProperties: true - strict: - type: boolean - nullable: true - default: false - description: | - Whether to enable strict schema adherence when generating the output. If set to true, the model will always - follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when - `strict` is `true`. To learn more, read the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - required: - - name - - schema ChatCompletionTool: type: object description: A tool the model may use. @@ -2480,8 +2290,6 @@ components: model: type: string description: The model used for the chat completion. - service_tier: - $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | @@ -2534,14 +2342,6 @@ components: "content_filter", "function_call", ] - ServiceTier: - description: | - The service tier used for processing the request. This field is only included if the `service_tier` parameter - is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" - nullable: true ChatCompletionLogprobs: &chat_completion_response_logprobs description: Log probability information for the choice. type: object @@ -2553,12 +2353,8 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - refusal: - description: A list of message refusal tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true + required: + - content ChatCompletionTokenLogprob: type: object description: Log probability information for a token. @@ -2616,8 +2412,6 @@ components: model: type: string description: The model to generate the completion. - service_tier: - $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | @@ -2632,10 +2426,10 @@ components: $ref: "#/components/schemas/CompletionUsage" required: - choices - # - created # Made nullable to support FastChat API which doesn't return this field with some models + - created # - id # Made nullable to support OpenRouter API which doesn't return this field with some models # - model # Made nullable to support TogetherAI API which doesn't return this field with some models - # - object # Made nullable to support FastChat API which doesn't return this field with some models + - object ChatCompletionStreamResponseChoice: type: object description: A choice the model generated for the input prompt. @@ -2661,10 +2455,6 @@ components: type: string description: The contents of the chunk message. nullable: true - refusal: - type: string - description: The refusal message generated by the model. - nullable: true function_call: $ref: "#/components/schemas/ChatCompletionStreamMessageFunctionCall" tool_calls: @@ -2714,19 +2504,10 @@ components: total_tokens: type: integer description: Total number of tokens used in the request (prompt + completion). - completion_tokens_details: - $ref: "#/components/schemas/CompletionTokensDetails" required: - prompt_tokens - completion_tokens - total_tokens - CompletionTokensDetails: - type: object - description: Breakdown of tokens used in a completion. - properties: - reasoning_tokens: - type: integer - description: Tokens generated by the model for reasoning. CreateEmbeddingRequest: type: object description: Request object for the Create embedding endpoint. @@ -2872,7 +2653,7 @@ components: description: | The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-4o-mini" + example: "gpt-3.5-turbo" anyOf: - type: string description: The ID of the model to use for this request. @@ -2880,20 +2661,15 @@ components: title: FineTuningModels description: | Available fine-tuning models. Mind that the list may not be exhaustive nor up-to-date. - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] training_file: description: | The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose - `fine-tune`. + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -2901,12 +2677,12 @@ components: $ref: "#/components/schemas/FineTuningJobHyperparameters" suffix: description: | - A string of up to 64 characters that will be added to your fine-tuned model name. + A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. type: string minLength: 1 - maxLength: 64 + maxLength: 40 default: null nullable: true validation_file: @@ -3032,7 +2808,7 @@ components: type: string description: | The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - enum: [ "wandb" ] + enum: ["wandb"] wandb: id: FineTuningIntegrationWandB type: object @@ -3105,10 +2881,8 @@ components: n_epochs: title: FineTuningNEpochs description: | - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number - manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. oneOf: - type: string title: FineTuningNEpochsOptions @@ -3169,7 +2943,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [ list ] + enum: [list] first_id: type: string description: The ID of the first checkpoint in the list. @@ -3262,7 +3036,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [ fine_tuning.job.checkpoint ] + enum: [fine_tuning.job.checkpoint] required: - created_at - fine_tuning_job_id @@ -3622,8 +3396,7 @@ components: nullable: true tools: description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of - types `code_interpreter`, `file_search`, or `function`. + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. default: [ ] type: array maxItems: 128 @@ -3633,16 +3406,13 @@ components: $ref: "#/components/schemas/ToolResources" metadata: description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional - information about the object in a structured format. Keys can be a maximum of 64 characters long and values - can be a maxium of 512 characters long. + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. type: object additionalProperties: true nullable: true temperature: description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 @@ -3657,38 +3427,23 @@ components: example: 1 nullable: true description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results - of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability - mass are considered. + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. response_format: - description: &assistant_response_format | - Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures - the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates - is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - system or user message. Without this, the model may generate an unending stream of whitespace until the - generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note - that the message content may be partially cut off if `finish_reason="length"`, which indicates the - generation exceeded `max_tokens` or the conversation exceeded the max context length. + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: AssistantResponseFormatMode description: > `auto` is the default value - enum: [ auto ] - default: auto - - $ref: "#/components/schemas/ResponseFormat" + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" required: - id - object @@ -3717,7 +3472,6 @@ components: Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum: [ - "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -3732,9 +3486,6 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -3742,10 +3493,6 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", - "o1-mini", - "o1-mini-2024-09-12", - "o1-preview", - "o1-preview-2024-09-12", ] name: description: *assistant_name_param_description @@ -3777,7 +3524,8 @@ components: additionalProperties: true nullable: true temperature: - description: *run_temperature_description + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 @@ -3791,17 +3539,24 @@ components: default: 1 example: 1 nullable: true - description: *run_top_p_description + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. response_format: - description: *assistant_response_format + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: CreateAssistantResponseFormatMode description: > `auto` is the default value - enum: [ auto ] - default: auto - - $ref: "#/components/schemas/ResponseFormat" + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" required: - model ModifyAssistantRequest: @@ -3864,17 +3619,24 @@ components: default: 1 example: 1 nullable: true - description: *run_top_p_description + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. response_format: - description: *assistant_response_format + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: ModifyAssistantResponseFormatMode description: > `auto` is the default value - enum: [ auto ] - default: auto - - $ref: "#/components/schemas/ResponseFormat" + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" DeleteAssistantResponse: type: object description: Represents a deleted response returned by the Delete assistant endpoint. @@ -3948,48 +3710,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - default: file_search - file_search: - type: object - description: Overrides for the file search tool. - properties: - max_num_results: - type: integer - minimum: 1 - maximum: 50 - description: | - The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models - and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - for more information. - ranking_options: - $ref: "#/components/schemas/FileSearchRankingOptions" - required: - - type - FileSearchRankingOptions: - type: object - description: | - The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and - a score_threshold of 0. - - See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) - for more information. - properties: - ranker: - $ref: "#/components/schemas/FileSearchRanker" - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - required: - - score_threshold - FileSearchRanker: - type: string - description: The ranker to use for the file search. If not specified will use the `auto` ranker. - enum: [ "auto", "default_2024_08_21" ] + default: "file_search" AssistantToolsFunction: type: object description: Function tool @@ -4009,7 +3730,7 @@ components: type: type: string title: AssistantsToolType - enum: [ "function", "code_interpreter", "file_search" ] + enum: ["function", "code_interpreter", "file_search"] description: The type of the tool. If type is `function`, the function name must be set function: $ref: "#/components/schemas/AssistantsFunctionCallOption" @@ -4023,6 +3744,18 @@ components: description: The name of the function to call. required: - name + AssistantsResponseFormat: + type: object + description: | + An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. + properties: + type: + type: string + title: AssistantsResponseFormatType + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. TruncationObject: type: object description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -4031,7 +3764,7 @@ components: type: string name: TruncationStrategy description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: [ "auto", "last_messages" ] + enum: ["auto", "last_messages"] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -4109,7 +3842,7 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. @@ -4144,7 +3877,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: [ "max_completion_tokens", "max_prompt_tokens" ] + enum: ["max_completion_tokens", "max_prompt_tokens"] model: description: The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. type: string @@ -4203,19 +3936,22 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/AssistantsNamedToolChoice" - parallel_tool_calls: *parallel_tool_calls response_format: - description: *assistant_response_format + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: RunObjectResponseFormatMode description: > `auto` is the default value - enum: [ auto ] - default: auto - - $ref: "#/components/schemas/ResponseFormat" + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" required: - id - object @@ -4240,7 +3976,6 @@ components: - max_completion_tokens - truncation_strategy - tool_choice - - parallel_tool_calls - response_format RunCompletionUsage: type: object @@ -4281,7 +4016,6 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ - "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4296,9 +4030,6 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4306,10 +4037,6 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", - "o1-mini", - "o1-mini-2024-09-12", - "o1-preview", - "o1-preview-2024-09-12", ] instructions: description: Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -4352,7 +4079,10 @@ components: default: 1 example: 1 nullable: true - description: *run_top_p_description + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. max_prompt_tokens: type: integer nullable: true @@ -4383,19 +4113,22 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/AssistantsNamedToolChoice" - parallel_tool_calls: *parallel_tool_calls response_format: - description: *assistant_response_format + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: CreateRunRequestResponseFormatMode description: > `auto` is the default value - enum: [ auto ] - default: auto - - $ref: "#/components/schemas/ResponseFormat" + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" stream: type: boolean nullable: true @@ -4524,7 +4257,6 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ - "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4539,9 +4271,6 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4549,10 +4278,6 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", - "o1-mini", - "o1-mini-2024-09-12", - "o1-preview", - "o1-preview-2024-09-12", ] instructions: description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -4618,19 +4343,22 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/AssistantsNamedToolChoice" - parallel_tool_calls: *parallel_tool_calls response_format: - description: *assistant_response_format + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: CreateThreadAndRunRequestResponseFormatMode description: > `auto` is the default value - enum: [ auto ] - default: auto - - $ref: "#/components/schemas/ResponseFormat" + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" stream: type: boolean nullable: true @@ -4708,7 +4436,7 @@ components: type: array description: | A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -4742,8 +4470,6 @@ components: maxItems: 10000 items: type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: type: object description: | @@ -4818,7 +4544,7 @@ components: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string nullable: true - enum: [ "in_progress", "incomplete", "completed" ] + enum: ["in_progress", "incomplete", "completed"] incomplete_details: id: MessageIncompleteDetails type: object @@ -4915,7 +4641,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: [ "thread.message.delta" ] + enum: ["thread.message.delta"] delta: $ref: "#/components/schemas/MessageDelta" required: @@ -4940,7 +4666,6 @@ components: - $ref: "#/components/schemas/MessageContentImageFileObject" - $ref: "#/components/schemas/MessageContentImageUrlObject" - $ref: "#/components/schemas/MessageContentTextObject" - - $ref: "#/components/schemas/MessageContentRefusalObject" discriminator: propertyName: type MessageDeltaContent: @@ -4949,8 +4674,6 @@ components: oneOf: - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - $ref: "#/components/schemas/MessageDeltaContentTextObject" - - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" - - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" discriminator: propertyName: type CreateMessageRequest: @@ -5162,8 +4885,12 @@ components: file_id: description: The ID of the specific File the citation is from. type: string + quote: + description: The specific quote in the file. + type: string required: - file_id + - quote MessageContentTextAnnotationsFilePathObject: type: object description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. @@ -5195,20 +4922,6 @@ components: - file_path - start_index - end_index - MessageContentRefusalObject: - type: object - description: The refusal content generated by the assistant. - properties: - type: - description: Always `refusal`. - type: string - default: refusal - refusal: - type: string - nullable: false - required: - - type - - refusal MessageDeltaContentImageFileObject: type: object description: References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. @@ -5248,9 +4961,6 @@ components: type: string image_url: $ref: "#/components/schemas/MessageContentImageUrl" - required: - - index - - type MessageDeltaContentTextObject: type: object description: The text content that is part of a message. @@ -5352,23 +5062,6 @@ components: required: - index - type - MessageDeltaContentRefusalObject: - type: object - description: The refusal content that is part of a message. - properties: - index: - type: integer - description: The index of the refusal part in the message. - type: - type: string - description: Always `refusal`. - default: refusal - refusal: - type: string - description: The refusal content generated by the assistant. - required: - - index - - type RunStepObject: type: object description: | @@ -5473,7 +5166,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: [ "thread.run.step.delta" ] + enum: ["thread.run.step.delta"] delta: $ref: "#/components/schemas/RunStepDelta" required: @@ -5770,74 +5463,13 @@ components: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. file_search: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearch" + type: object + description: For now, this is always going to be an empty object. + additionalProperties: true required: - id - type - file_search - RunStepDetailsToolCallsFileSearch: - type: object - description: The definition of the file search that was called. - properties: - ranking_options: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" - results: - type: array - description: The results of the file search. - items: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" - RunStepDetailsToolCallsFileSearchRankingOptionsObject: - type: object - description: The ranking options for the file search. - properties: - ranker: - $ref: "#/components/schemas/FileSearchRanker" - score_threshold: - type: number - description: | - The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - required: - - ranker - - score_threshold - RunStepDetailsToolCallsFileSearchResultObject: - type: object - description: A result instance of the file search. - properties: - file_id: - type: string - description: The ID of the file that result was found in. - file_name: - type: string - description: The name of the file that result was found in. - score: - type: number - description: The score of the result. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - content: - type: array - description: | - The content of the result that was found. The content is only included if requested via the include - query parameter. - items: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultContent" - required: - - file_id - - file_name - - score - RunStepDetailsToolCallsFileSearchResultContent: - type: object - description: The content of the result that was found. - properties: - type: - type: string - description: The type of the content. - default: text - text: - type: string - description: The text content of the file. RunStepDeltaStepDetailsToolCallsFileSearchObject: type: object description: File search tool call @@ -5948,7 +5580,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: [ "last_active_at" ] + enum: ["last_active_at"] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -6007,7 +5639,7 @@ components: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string name: VectorStoreStatus - enum: [ "expired", "in_progress", "completed" ] + enum: ["expired", "in_progress", "completed"] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -6037,23 +5669,23 @@ components: description: Request object for the Create assistant file endpoint. additionalProperties: false properties: - name: - description: The name of the vector store. - type: string file_ids: description: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. type: array maxItems: 500 items: type: string + name: + description: The name of the vector store. + type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: description: *metadata_description type: object nullable: true + required: + - name UpdateVectorStoreRequest: type: object description: Request object for the Update vector store endpoint. @@ -6142,7 +5774,7 @@ components: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string title: VectorStoreFileStatus - enum: [ "in_progress", "completed", "cancelled", "failed" ] + enum: ["in_progress", "completed", "cancelled", "failed"] last_error: id: VectorStoreFileLastError type: object @@ -6154,9 +5786,10 @@ components: description: One of `server_error` or `rate_limit_exceeded`. enum: [ - "server_error", - "unsupported_file", - "invalid_file", + "internal_error", + "file_not_found", + "parsing_error", + "unhandled_mime_type", ] message: type: string @@ -6164,8 +5797,6 @@ components: required: - code - message - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyResponseParam" required: - id - object @@ -6174,97 +5805,6 @@ components: - vector_store_id - status - last_error - ChunkingStrategyRequestParam: - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - discriminator: - propertyName: type - AutoChunkingStrategyRequestParam: - type: object - description: | - Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` - and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - default: auto - required: - - type - StaticChunkingStrategyRequestParam: - type: object - description: Static chunking strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - default: static - static: - $ref: "#/components/schemas/StaticChunkingStrategy" - required: - - type - - static - StaticChunkingStrategy: - type: object - description: Static chunking strategy - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: | - The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the - maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - ChunkingStrategyResponseParam: - type: object - description: The chunking strategy used to chunk the file(s). - oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" - discriminator: - propertyName: type - OtherChunkingStrategyResponseParam: - type: object - description: | - Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because - the file was indexed before the `chunking_strategy` concept was introduced in the API. - additionalProperties: false - properties: - type: - type: string - description: Always `other`. - default: other - required: - - type - StaticChunkingStrategyResponseParam: - type: object - description: Static Chunking Strategy. - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - default: static - static: - $ref: "#/components/schemas/StaticChunkingStrategy" - required: - - type - - static CreateVectorStoreFileRequest: type: object description: Request object for the Create vector store file endpoint. @@ -6272,8 +5812,6 @@ components: file_id: description: A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_id ListVectorStoreFilesResponse: @@ -6343,7 +5881,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] + enum: ["in_progress", "completed", "cancelled", "failed"] file_counts: type: object description: The number of files per status. @@ -6387,8 +5925,6 @@ components: maxItems: 500 items: type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_ids AssistantStreamEvent: @@ -6476,7 +6012,7 @@ components: - data RunStepStreamEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" @@ -6487,7 +6023,7 @@ components: - data RunStepStreamDeltaEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" @@ -6581,7 +6117,7 @@ components: See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. endpoint: $ref: "#/components/schemas/BatchEndpoint" completion_window: @@ -6594,16 +6130,11 @@ components: nullable: true BatchEndpoint: type: string - enum: - [ - "/v1/chat/completions", - "/v1/embeddings", - "/v1/completions", - ] + enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string - enum: [ "24h" ] + enum: ["24h"] description: The time frame within which the batch should be processed. Currently only `24h` is supported. Batch: type: object @@ -6613,7 +6144,7 @@ components: type: string object: type: string - enum: [ batch ] + enum: [batch] description: The object type, which is always `batch`. endpoint: $ref: "#/components/schemas/BatchEndpoint" @@ -6733,7 +6264,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: [ "POST" ] + enum: ["POST"] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -6798,7 +6329,7 @@ components: object: type: string description: The object type, which is always `list`. - enum: [ list ] + enum: [list] required: - object - data diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index d9b16b55..fa38d7f7 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.3.0" + version: "2.0.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -16,7 +16,7 @@ tags: - name: Assistants description: Build Assistants that can call models and use tools. - name: Audio - description: Turn audio into text or text into audio. + description: Learn how to turn audio into text or text into audio. - name: Chat description: Given a list of messages comprising a conversation, the model will return a response. - name: Completions @@ -29,16 +29,12 @@ tags: description: Create large batches of API requests to run asynchronously. - name: Files description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. - - name: Uploads - description: Use Uploads to upload large files in multiple parts. - name: Images description: Given a prompt and/or an input image, the model will generate a new image. - name: Models description: List and describe the various models available in the API. - name: Moderations description: Given a input text, outputs if the model classifies it as potentially harmful. - - name: Audit Logs - description: List user actions and configuration changes within this organization. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -91,7 +87,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -99,29 +95,29 @@ paths: {"role": "user", "content": "Hello!"} ] ) - + print(completion.choices[0].message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "system", content: "You are a helpful assistant." }], model: "VAR_model_id", }); - + console.log(completion.choices[0]); } - + main(); response: &chat_completion_example | { "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-4o-mini", + "model": "gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -145,7 +141,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4o", + "model": "gpt-4-turbo", "messages": [ { "role": "user", @@ -167,11 +163,11 @@ paths: }' python: | from openai import OpenAI - + client = OpenAI() - + response = client.chat.completions.create( - model="gpt-4o", + model="gpt-4-turbo", messages=[ { "role": "user", @@ -179,25 +175,23 @@ paths: {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": { - "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - } + "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", }, ], } ], max_tokens=300, ) - + print(response.choices[0]) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.chat.completions.create({ - model: "gpt-4o", + model: "gpt-4-turbo", messages: [ { role: "user", @@ -205,10 +199,9 @@ paths: { type: "text", text: "What's in this image?" }, { type: "image_url", - image_url: { - "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - }, - } + image_url: + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, ], }, ], @@ -221,7 +214,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-4o-mini", + "model": "gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -261,7 +254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -270,15 +263,15 @@ paths: ], stream=True ) - + for chunk in completion: print(chunk.choices[0].delta) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ model: "VAR_model_id", @@ -288,21 +281,21 @@ paths: ], stream: true, }); - + for await (const chunk of completion) { console.log(chunk.choices[0].delta.content); } } - + main(); response: &chat_completion_chunk_example | - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + .... - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: curl: | @@ -310,7 +303,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4o", + "model": "gpt-4-turbo", "messages": [ { "role": "user", @@ -345,7 +338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -373,13 +366,13 @@ paths: tools=tools, tool_choice="auto" ) - + print(completion) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; const tools = [ @@ -402,24 +395,24 @@ paths: } } ]; - + const response = await openai.chat.completions.create({ - model: "gpt-4o", + model: "gpt-4-turbo", messages: messages, tools: tools, tool_choice: "auto", }); - + console.log(response); } - + main(); response: &chat_completion_function_example | { "id": "chatcmpl-abc123", "object": "chat.completion", "created": 1699896916, - "model": "gpt-4o-mini", + "model": "gpt-3.5-turbo-0125", "choices": [ { "index": 0, @@ -467,7 +460,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -476,14 +469,14 @@ paths: logprobs=True, top_logprobs=2 ) - + print(completion.choices[0].message) print(completion.choices[0].logprobs) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: "Hello!" }], @@ -491,17 +484,17 @@ paths: logprobs: true, top_logprobs: 2, }); - + console.log(completion.choices[0]); } - + main(); response: | { "id": "chatcmpl-123", "object": "chat.completion", "created": 1702685778, - "model": "gpt-4o-mini", + "model": "gpt-3.5-turbo-0125", "choices": [ { "index": 0, @@ -723,7 +716,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -732,9 +725,9 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.completions.create({ model: "VAR_model_id", @@ -742,7 +735,7 @@ paths: max_tokens: 7, temperature: 0, }); - + console.log(completion); } main(); @@ -783,7 +776,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -794,16 +787,16 @@ paths: print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.completions.create({ model: "VAR_model_id", prompt: "Say this is a test.", stream: true, }); - + for await (const chunk of stream) { console.log(chunk.choices[0].text) } @@ -864,7 +857,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.generate( model="dall-e-3", prompt="A cute baby sea otter", @@ -873,12 +866,12 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); - + console.log(image.data); } main(); @@ -930,7 +923,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), @@ -941,16 +934,16 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.edit({ image: fs.createReadStream("otter.png"), mask: fs.createReadStream("mask.png"), prompt: "A cute baby sea otter wearing a beret", }); - + console.log(image.data); } main(); @@ -1000,7 +993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.images.create_variation( image=open("image_edit_original.png", "rb"), n=2, @@ -1009,14 +1002,14 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.createVariation({ image: fs.createReadStream("otter.png"), }); - + console.log(image.data); } main(); @@ -1070,7 +1063,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.embeddings.create( model="text-embedding-ada-002", input="The food was delicious and the waiter...", @@ -1078,19 +1071,19 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", encoding_format: "float", }); - + console.log(embedding); } - + main(); response: | { @@ -1158,7 +1151,7 @@ paths: python: | from pathlib import Path import openai - + speech_file_path = Path(__file__).parent / "speech.mp3" response = openai.audio.speech.create( model="tts-1", @@ -1170,11 +1163,11 @@ paths: import fs from "fs"; import path from "path"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + const speechFile = path.resolve("./speech.mp3"); - + async function main() { const mp3 = await openai.audio.speech.create({ model: "tts-1", @@ -1223,7 +1216,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( model="whisper-1", @@ -1232,15 +1225,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), model: "whisper-1", }); - + console.log(transcription.text); } main(); @@ -1261,7 +1254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1269,14 +1262,14 @@ paths: response_format="verbose_json", timestamp_granularities=["word"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1284,7 +1277,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["word"] }); - + console.log(transcription.text); } main(); @@ -1321,7 +1314,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1329,14 +1322,14 @@ paths: response_format="verbose_json", timestamp_granularities=["segment"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1344,7 +1337,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["segment"] }); - + console.log(transcription.text); } main(); @@ -1408,7 +1401,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.translations.create( model="whisper-1", @@ -1417,15 +1410,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const translation = await openai.audio.translations.create({ file: fs.createReadStream("speech.mp3"), model: "whisper-1", }); - + console.log(translation.text); } main(); @@ -1466,21 +1459,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.files.list(); - + for await (const file of list) { console.log(file); } } - + main(); response: | { @@ -1510,13 +1503,13 @@ paths: - Files summary: | Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. - + The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - - The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models. - - The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input). - + + The Fine-tuning API only supports `.jsonl` files. + + The Batch API only supports `.jsonl` files up to 100 MB in size. + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true @@ -1545,7 +1538,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.create( file=open("mydata.jsonl", "rb"), purpose="fine-tune" @@ -1553,18 +1546,18 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.create({ file: fs.createReadStream("mydata.jsonl"), purpose: "fine-tune", }); - + console.log(file); } - + main(); response: | { @@ -1608,19 +1601,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.del("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1659,19 +1652,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.retrieve("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1714,232 +1707,20 @@ paths: python: | from openai import OpenAI client = OpenAI() - + content = client.files.content("file-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.content("file-abc123"); - - console.log(file); - } - - main(); - /uploads: - post: - operationId: createUpload - tags: - - Uploads - summary: | - Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. - - Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. - - For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - - [Assistants](/docs/assistants/tools/file-search/supported-files) - - For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateUploadRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/Upload" - x-oaiMeta: - name: Create upload - group: uploads - returns: The [Upload](/docs/api-reference/uploads/object) object with status `pending`. - examples: - request: - curl: | - curl https://api.openai.com/v1/uploads \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "purpose": "fine-tune", - "filename": "training_examples.jsonl", - "bytes": 2147483648, - "mime_type": "text/jsonl" - }' - response: | - { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - "status": "pending", - "expires_at": 1719127296 - } - - /uploads/{upload_id}/parts: - post: - operationId: addUploadPart - tags: - - Uploads - summary: | - Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. - - Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. - - It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). - parameters: - - in: path - name: upload_id - required: true - schema: - type: string - example: upload_abc123 - description: | - The ID of the Upload. - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: "#/components/schemas/AddUploadPartRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/UploadPart" - x-oaiMeta: - name: Add upload part - group: uploads - returns: The upload [Part](/docs/api-reference/uploads/part-object) object. - examples: - request: - curl: | - curl https://api.openai.com/v1/uploads/upload_abc123/parts - -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." - response: | - { - "id": "part_def456", - "object": "upload.part", - "created_at": 1719185911, - "upload_id": "upload_abc123" - } - /uploads/{upload_id}/complete: - post: - operationId: completeUpload - tags: - - Uploads - summary: | - Completes the [Upload](/docs/api-reference/uploads/object). - - Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. - - You can specify the order of the Parts by passing in an ordered list of the Part IDs. - - The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. - parameters: - - in: path - name: upload_id - required: true - schema: - type: string - example: upload_abc123 - description: | - The ID of the Upload. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CompleteUploadRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/Upload" - x-oaiMeta: - name: Complete upload - group: uploads - returns: The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object. - examples: - request: - curl: | - curl https://api.openai.com/v1/uploads/upload_abc123/complete - -d '{ - "part_ids": ["part_def456", "part_ghi789"] - }' - response: | - { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - "status": "completed", - "expires_at": 1719127296, - "file": { - "id": "file-xyz321", - "object": "file", - "bytes": 2147483648, - "created_at": 1719186911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", + console.log(file); } - } - /uploads/{upload_id}/cancel: - post: - operationId: cancelUpload - tags: - - Uploads - summary: | - Cancels the Upload. No Parts may be added after an Upload is cancelled. - parameters: - - in: path - name: upload_id - required: true - schema: - type: string - example: upload_abc123 - description: | - The ID of the Upload. - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/Upload" - x-oaiMeta: - name: Cancel upload - group: uploads - returns: The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`. - examples: - request: - curl: | - curl https://api.openai.com/v1/uploads/upload_abc123/cancel - response: | - { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - "status": "cancelled", - "expires_at": 1719127296 - } + main(); /fine_tuning/jobs: post: @@ -1948,9 +1729,9 @@ paths: - Fine-tuning summary: | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) requestBody: required: true @@ -1978,36 +1759,36 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", - "model": "gpt-4o-mini" + "model": "gpt-3.5-turbo" }' python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-4o-mini" + model="gpt-3.5-turbo" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", - "created_at": 1721764800, + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2023,7 +1804,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-abc123", - "model": "gpt-4o-mini", + "model": "gpt-3.5-turbo", "hyperparameters": { "n_epochs": 2 } @@ -2031,36 +1812,36 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-4o-mini", + model="gpt-3.5-turbo", hyperparameters={ "n_epochs":2 } ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", - model: "gpt-4o-mini", + model: "gpt-3.5-turbo", hyperparameters: { n_epochs: 2 } }); - + console.log(fineTune); } - + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", - "created_at": 1721764800, + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2078,38 +1859,38 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-4o-mini" + "model": "gpt-3.5-turbo" }' python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", - model="gpt-4o-mini" + model="gpt-3.5-turbo" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", validation_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", - "created_at": 1721764800, + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2126,7 +1907,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-4o-mini", + "model": "gpt-3.5-turbo", "integrations": [ { "type": "wandb", @@ -2144,8 +1925,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", - "created_at": 1721764800, + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2202,21 +1983,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.jobs.list(); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2242,7 +2023,7 @@ paths: - Fine-tuning summary: | Get info about a fine-tuning job. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: - in: path @@ -2272,19 +2053,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); - + console.log(fineTune); } - + main(); response: &fine_tuning_example | { @@ -2359,24 +2140,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list_events( fine_tuning_job_id="ftjob-abc123", limit=2 ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2385,7 +2166,7 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", - "created_at": 1721764800, + "created_at": 1692407401, "level": "info", "message": "Fine tuning job successfully completed", "data": null, @@ -2394,9 +2175,9 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-tyiGuB72evQncpH87xe505Sv", - "created_at": 1721764800, + "created_at": 1692407400, "level": "info", - "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", + "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", "data": null, "type": "message" } @@ -2438,16 +2219,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); - + console.log(fineTune); } main(); @@ -2455,8 +2236,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", - "created_at": 1721764800, + "model": "gpt-3.5-turbo-0125", + "created_at": 1689376978, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2519,8 +2300,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", - "created_at": 1721764867, - "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", + "created_at": 1519129973, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", "metrics": { "full_valid_loss": 0.134, "full_valid_mean_token_accuracy": 0.874 @@ -2531,8 +2312,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", - "created_at": 1721764800, - "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "created_at": 1519129833, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", "metrics": { "full_valid_loss": 0.167, "full_valid_mean_token_accuracy": 0.781 @@ -2571,16 +2352,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.models.list(); - + for await (const model of list) { console.log(model); } @@ -2624,7 +2405,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-4o-mini + example: gpt-3.5-turbo description: The ID of the model to use for this request responses: "200": @@ -2645,19 +2426,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.retrieve("VAR_model_id"); - + console.log(model); } - + main(); response: &retrieve_model_response | { @@ -2677,7 +2458,7 @@ paths: required: true schema: type: string - example: ft:gpt-4o-mini:acemeco:suffix:abc123 + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -2693,28 +2474,28 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \ + curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | from openai import OpenAI client = OpenAI() - - client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") + + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { - const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); - + const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + console.log(model); } main(); response: | { - "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", + "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", "object": "model", "deleted": true } @@ -2754,24 +2535,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + moderation = client.moderations.create(input="I want to kill them.") print(moderation) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const moderation = await openai.moderations.create({ input: "I want to kill them." }); - + console.log(moderation); } main(); response: &moderation_example | { "id": "modr-XXXXX", - "model": "text-moderation-007", + "model": "text-moderation-005", "results": [ { "flagged": true, @@ -2827,7 +2608,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: &pagination_after_param_description | @@ -2862,7 +2643,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistants = client.beta.assistants.list( order="desc", limit="20", @@ -2870,18 +2651,18 @@ paths: print(my_assistants.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistants = await openai.beta.assistants.list({ order: "desc", limit: "20", }); - + console.log(myAssistants.data); } - + main(); response: &list_assistants_example | { @@ -2893,7 +2674,7 @@ paths: "created_at": 1698982736, "name": "Coding Tutor", "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2908,7 +2689,7 @@ paths: "created_at": 1698982718, "name": "My Assistant", "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2923,7 +2704,7 @@ paths: "created_at": 1698982643, "name": null, "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "tools": [], "tool_resources": {}, @@ -2972,37 +2753,37 @@ paths: "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], - "model": "gpt-4o" + "model": "gpt-4-turbo" }' python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", tools=[{"type": "code_interpreter"}], - model="gpt-4o", + model="gpt-4-turbo", ) print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name: "Math Tutor", tools: [{ type: "code_interpreter" }], - model: "gpt-4o", + model: "gpt-4-turbo", }); - + console.log(myAssistant); } - + main(); response: &create_assistants_example | { @@ -3011,7 +2792,7 @@ paths: "created_at": 1698984975, "name": "Math Tutor", "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "tools": [ { @@ -3034,25 +2815,25 @@ paths: "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [{"type": "file_search"}], "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, - "model": "gpt-4o" + "model": "gpt-4-turbo" }' python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", name="HR Helper", tools=[{"type": "file_search"}], tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, - model="gpt-4o" + model="gpt-4-turbo" ) print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -3064,12 +2845,12 @@ paths: vector_store_ids: ["vs_123"] } }, - model: "gpt-4o" + model: "gpt-4-turbo" }); - + console.log(myAssistant); } - + main(); response: | { @@ -3078,7 +2859,7 @@ paths: "created_at": 1699009403, "name": "HR Helper", "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -3131,22 +2912,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.retrieve("asst_abc123") print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.retrieve( "asst_abc123" ); - + console.log(myAssistant); } - + main(); response: | { @@ -3155,7 +2936,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -3207,26 +2988,26 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [{"type": "file_search"}], - "model": "gpt-4o" + "model": "gpt-4-turbo" }' python: | from openai import OpenAI client = OpenAI() - + my_updated_assistant = client.beta.assistants.update( "asst_abc123", instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name="HR Helper", tools=[{"type": "file_search"}], - model="gpt-4o" + model="gpt-4-turbo" ) - + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myUpdatedAssistant = await openai.beta.assistants.update( "asst_abc123", @@ -3235,13 +3016,13 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name: "HR Helper", tools: [{ type: "file_search" }], - model: "gpt-4o" + model: "gpt-4-turbo" } ); - + console.log(myUpdatedAssistant); } - + main(); response: | { @@ -3250,7 +3031,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [ { @@ -3302,17 +3083,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.assistants.delete("asst_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.assistants.del("asst_abc123"); - + console.log(response); } main(); @@ -3358,20 +3139,20 @@ paths: python: | from openai import OpenAI client = OpenAI() - + empty_thread = client.beta.threads.create() print(empty_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const emptyThread = await openai.beta.threads.create(); - + console.log(emptyThread); } - + main(); response: | { @@ -3400,7 +3181,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message_thread = client.beta.threads.create( messages=[ { @@ -3413,13 +3194,13 @@ paths: }, ] ) - + print(message_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messageThread = await openai.beta.threads.create({ messages: [ @@ -3433,10 +3214,10 @@ paths: }, ], }); - + console.log(messageThread); } - + main(); response: | { @@ -3482,22 +3263,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_thread = client.beta.threads.retrieve("thread_abc123") print(my_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myThread = await openai.beta.threads.retrieve( "thread_abc123" ); - + console.log(myThread); } - + main(); response: | { @@ -3557,7 +3338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_thread = client.beta.threads.update( "thread_abc123", metadata={ @@ -3568,9 +3349,9 @@ paths: print(my_updated_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const updatedThread = await openai.beta.threads.update( "thread_abc123", @@ -3578,10 +3359,10 @@ paths: metadata: { modified: "true", user: "abc123" }, } ); - + console.log(updatedThread); } - + main(); response: | { @@ -3629,17 +3410,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.threads.delete("thread_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.threads.del("thread_abc123"); - + console.log(response); } main(); @@ -3676,7 +3457,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -3715,22 +3496,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_messages = client.beta.threads.messages.list("thread_abc123") print(thread_messages.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.list( "thread_abc123" ); - + console.log(threadMessages.data); } - + main(); response: | { @@ -3825,7 +3606,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_message = client.beta.threads.messages.create( "thread_abc123", role="user", @@ -3834,18 +3615,18 @@ paths: print(thread_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.create( "thread_abc123", { role: "user", content: "How does AI work? Explain it in simple terms." } ); - + console.log(threadMessages); } - + main(); response: | { @@ -3899,7 +3680,7 @@ paths: name: Retrieve message group: threads beta: true - returns: The [message](/docs/api-reference/messages/object) object matching the specified ID. + returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. examples: request: curl: | @@ -3910,7 +3691,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.retrieve( message_id="msg_abc123", thread_id="thread_abc123", @@ -3918,18 +3699,18 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.retrieve( "thread_abc123", "msg_abc123" ); - + console.log(message); } - + main(); response: | { @@ -3987,7 +3768,7 @@ paths: name: Modify message group: threads beta: true - returns: The modified [message](/docs/api-reference/messages/object) object. + returns: The modified [message](/docs/api-reference/threads/messages/object) object. examples: request: curl: | @@ -4004,7 +3785,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.update( message_id="msg_abc12", thread_id="thread_abc123", @@ -4016,9 +3797,9 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.update( "thread_abc123", @@ -4094,7 +3875,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_message = client.beta.threads.messages.delete( message_id="msg_abc12", thread_id="thread_abc123", @@ -4102,15 +3883,15 @@ paths: print(deleted_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const deletedMessage = await openai.beta.threads.messages.del( "thread_abc123", "msg_abc123" ); - + console.log(deletedMessage); } response: | @@ -4120,6 +3901,7 @@ paths: "deleted": true } + /threads/runs: post: operationId: createThreadAndRun @@ -4163,7 +3945,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.create_and_run( assistant_id="asst_abc123", thread={ @@ -4172,13 +3954,13 @@ paths: ] } ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.createAndRun({ assistant_id: "asst_abc123", @@ -4188,10 +3970,10 @@ paths: ], }, }); - + console.log(run); } - + main(); response: | { @@ -4208,7 +3990,7 @@ paths: "completed_at": null, "required_action": null, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You are a helpful assistant.", "tools": [], "tool_resources": {}, @@ -4224,8 +4006,7 @@ paths: "incomplete_details": null, "usage": null, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" } - title: Streaming @@ -4247,7 +4028,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.create_and_run( assistant_id="asst_123", thread={ @@ -4257,14 +4038,14 @@ paths: }, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4275,58 +4056,58 @@ paths: }, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} - + event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}], "metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} - + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} + event: done data: [DONE] @@ -4372,7 +4153,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4393,7 +4174,7 @@ paths: } } ] - + stream = client.beta.threads.create_and_run( thread={ "messages": [ @@ -4404,14 +4185,14 @@ paths: tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4432,7 +4213,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4444,52 +4225,52 @@ paths: tools: tools, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} - + event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} - + ... - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} - + event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} + event: done data: [DONE] @@ -4519,7 +4300,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -4552,25 +4333,25 @@ paths: python: | from openai import OpenAI client = OpenAI() - + runs = client.beta.threads.runs.list( "thread_abc123" ) - + print(runs) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const runs = await openai.beta.threads.runs.list( "thread_abc123" ); - + console.log(runs); } - + main(); response: | { @@ -4589,7 +4370,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "incomplete_details": null, "tools": [ @@ -4620,8 +4401,7 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" }, { "id": "run_abc456", @@ -4636,7 +4416,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "incomplete_details": null, "tools": [ @@ -4667,8 +4447,7 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" } ], "first_id": "run_abc123", @@ -4687,17 +4466,6 @@ paths: schema: type: string description: The ID of the thread to run. - - name: include[] - in: query - description: &include_param_description | - A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. - - See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - schema: - type: array - items: - type: string - enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] requestBody: required: true content: @@ -4730,27 +4498,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.create( "thread_abc123", { assistant_id: "asst_abc123" } ); - + console.log(run); } - + main(); response: &run_object_example | { @@ -4766,7 +4534,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "incomplete_details": null, "tools": [ @@ -4785,8 +4553,7 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" } - title: Streaming request: @@ -4802,74 +4569,74 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.create( thread_id="thread_123", assistant_id="asst_123", stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_123", { assistant_id: "asst_123", stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + event: done data: [DONE] @@ -4910,7 +4677,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4931,21 +4698,21 @@ paths: } } ] - + stream = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123", tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4966,7 +4733,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_abc123", @@ -4976,55 +4743,55 @@ paths: stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + event: done data: [DONE] @@ -5068,27 +4835,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.retrieve( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.retrieve( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -5104,7 +4871,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "incomplete_details": null, "tools": [ @@ -5127,8 +4894,7 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" } post: operationId: modifyRun @@ -5181,19 +4947,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.update( thread_id="thread_abc123", run_id="run_abc123", metadata={"user_id": "user_abc123"}, ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.update( "thread_abc123", @@ -5204,10 +4970,10 @@ paths: }, } ); - + console.log(run); } - + main(); response: | { @@ -5223,7 +4989,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "incomplete_details": null, "tools": [ @@ -5256,8 +5022,7 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" } /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: @@ -5317,7 +5082,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5328,13 +5093,13 @@ paths: } ] ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5348,10 +5113,10 @@ paths: ], } ); - + console.log(run); } - + main(); response: | { @@ -5367,7 +5132,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": null, "tools": [ { @@ -5403,8 +5168,7 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "tool_choice": "auto" } - title: Streaming @@ -5426,7 +5190,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5438,14 +5202,14 @@ paths: ], stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5459,61 +5223,61 @@ paths: ], } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} - + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.message.created data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} - + event: thread.message.completed data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} - + event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} - + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + event: done data: [DONE] @@ -5558,27 +5322,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.cancel( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.cancel( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -5594,7 +5358,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4o", + "model": "gpt-4-turbo", "instructions": "You summarize books.", "tools": [ { @@ -5610,9 +5374,7 @@ paths: "usage": null, "temperature": 1.0, "top_p": 1.0, - "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "response_format": "auto" } /threads/{thread_id}/runs/{run_id}/steps: @@ -5647,7 +5409,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5658,14 +5420,6 @@ paths: description: *pagination_before_param_description schema: type: string - - name: include[] - in: query - description: *include_param_description - schema: - type: array - items: - type: string - enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5677,7 +5431,7 @@ paths: name: List run steps group: threads beta: true - returns: A list of [run step](/docs/api-reference/run-steps/step-object) objects. + returns: A list of [run step](/docs/api-reference/runs/step-object) objects. examples: request: curl: | @@ -5688,17 +5442,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_steps = client.beta.threads.runs.steps.list( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run_steps) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.list( "thread_abc123", @@ -5706,7 +5460,7 @@ paths: ); console.log(runStep); } - + main(); response: | { @@ -5769,14 +5523,6 @@ paths: schema: type: string description: The ID of the run step to retrieve. - - name: include[] - in: query - description: *include_param_description - schema: - type: array - items: - type: string - enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5788,7 +5534,7 @@ paths: name: Retrieve run step group: threads beta: true - returns: The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID. + returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. examples: request: curl: | @@ -5799,18 +5545,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_step = client.beta.threads.runs.steps.retrieve( thread_id="thread_abc123", run_id="run_abc123", step_id="step_abc123" ) - + print(run_step) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( "thread_abc123", @@ -5819,7 +5565,7 @@ paths: ); console.log(runStep); } - + main(); response: &run_step_object_example | { @@ -5869,7 +5615,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5902,18 +5648,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_stores = client.beta.vector_stores.list() print(vector_stores) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStores = await openai.beta.vectorStores.list(); console.log(vectorStores); } - + main(); response: | { @@ -5988,7 +5734,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.create( name="Support FAQ" ) @@ -5996,14 +5742,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.create({ name: "Support FAQ" }); console.log(vectorStore); } - + main(); response: | { @@ -6056,7 +5802,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.retrieve( vector_store_id="vs_abc123" ) @@ -6064,14 +5810,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.retrieve( "vs_abc123" ); console.log(vectorStore); } - + main(); response: | { @@ -6122,7 +5868,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.update( vector_store_id="vs_abc123", name="Support FAQ" @@ -6131,7 +5877,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.update( "vs_abc123", @@ -6141,7 +5887,7 @@ paths: ); console.log(vectorStore); } - + main(); response: | { @@ -6194,7 +5940,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store = client.beta.vector_stores.delete( vector_store_id="vs_abc123" ) @@ -6202,14 +5948,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStore = await openai.beta.vectorStores.del( "vs_abc123" ); console.log(deletedVectorStore); } - + main(); response: | { @@ -6244,7 +5990,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -6260,7 +6006,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -6283,7 +6029,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.files.list( vector_store_id="vs_abc123" ) @@ -6291,14 +6037,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.files.list( "vs_abc123" ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6366,7 +6112,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6375,7 +6121,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFile = await openai.beta.vectorStores.files.create( "vs_abc123", @@ -6385,7 +6131,7 @@ paths: ); console.log(myVectorStoreFile); } - + main(); response: | { @@ -6441,7 +6187,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.retrieve( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6450,7 +6196,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( "vs_abc123", @@ -6458,7 +6204,7 @@ paths: ); console.log(vectorStoreFile); } - + main(); response: | { @@ -6510,7 +6256,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file = client.beta.vector_stores.files.delete( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6519,7 +6265,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( "vs_abc123", @@ -6527,7 +6273,7 @@ paths: ); console.log(deletedVectorStoreFile); } - + main(); response: | { @@ -6582,7 +6328,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["file-abc123", "file-abc456"] @@ -6591,7 +6337,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( "vs_abc123", @@ -6601,7 +6347,7 @@ paths: ); console.log(myVectorStoreFileBatch); } - + main(); response: | { @@ -6662,7 +6408,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.retrieve( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6671,7 +6417,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( "vs_abc123", @@ -6679,7 +6425,7 @@ paths: ); console.log(vectorStoreFileBatch); } - + main(); response: | { @@ -6739,7 +6485,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel( vector_store_id="vs_abc123", file_batch_id="vsfb_abc123" @@ -6748,7 +6494,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( "vs_abc123", @@ -6756,7 +6502,7 @@ paths: ); console.log(deletedVectorStoreFileBatch); } - + main(); response: | { @@ -6806,7 +6552,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -6822,7 +6568,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -6845,7 +6591,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.file_batches.list_files( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6854,7 +6600,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( "vs_abc123", @@ -6862,7 +6608,7 @@ paths: ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6907,22 +6653,17 @@ paths: type: string description: | The ID of an uploaded file that contains requests for the new batch. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: - [ - "/v1/chat/completions", - "/v1/embeddings", - "/v1/completions", - ] + enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: [ "24h" ] + enum: ["24h"] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -6955,7 +6696,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.create( input_file_id="file-abc123", endpoint="/v1/chat/completions", @@ -6963,19 +6704,19 @@ paths: ) node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.create({ input_file_id: "file-abc123", endpoint: "/v1/chat/completions", completion_window: "24h" }); - + console.log(batch); } - + main(); response: | { @@ -7046,21 +6787,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.list() node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.batches.list(); - + for await (const batch of list) { console.log(batch); } } - + main(); response: | { @@ -7135,19 +6876,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.retrieve("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.retrieve("batch_abc123"); - + console.log(batch); } - + main(); response: &batch_object | { @@ -7185,7 +6926,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. + summary: Cancels an in-progress batch. parameters: - in: path name: batch_id @@ -7214,19 +6955,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.cancel("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.cancel("batch_abc123"); - + console.log(batch); } - + main(); response: | { @@ -7259,5893 +7000,2729 @@ paths: } } - # Organization - # Audit Logs List - /organization/audit_logs: - get: - summary: List user actions and configuration changes within this organization. - operationId: list-audit-logs - tags: - - Audit Logs - parameters: - - name: effective_at - in: query - description: Return only events whose `effective_at` (Unix seconds) is in this range. - required: false - schema: - type: object - properties: - gt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than this value. - gte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. - lt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than this value. - lte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. - - name: project_ids[] - in: query - description: Return only events for these projects. - required: false - schema: - type: array - items: - type: string - - name: event_types[] - in: query - description: Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object). - required: false - schema: - type: array - items: - $ref: "#/components/schemas/AuditLogEventType" - - name: actor_ids[] - in: query - description: Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID. - required: false - schema: - type: array - items: - type: string - - name: actor_emails[] - in: query - description: Return only events performed by users with these emails. - required: false - schema: - type: array - items: - type: string - - name: resource_ids[] - in: query - description: Return only events performed on these targets. For example, a project ID updated. - required: false - schema: - type: array - items: - type: string - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - schema: - type: string - - name: before - in: query - description: *pagination_before_param_description - schema: - type: string - responses: - "200": - description: Audit logs listed successfully. - content: - application/json: - schema: - $ref: "#/components/schemas/ListAuditLogsResponse" - x-oaiMeta: - name: List audit logs - group: audit-logs - returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/audit_logs \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - response: | - { - "object": "list", - "data": [ - { - "id": "audit_log-xxx_yyyymmdd", - "type": "project.archived", - "effective_at": 1722461446, - "actor": { - "type": "api_key", - "api_key": { - "type": "user", - "user": { - "id": "user-xxx", - "email": "user@example.com" - } - } - }, - "project.archived": { - "id": "proj_abc" - }, - }, - { - "id": "audit_log-yyy__20240101", - "type": "api_key.updated", - "effective_at": 1720804190, - "actor": { - "type": "session", - "session": { - "user": { - "id": "user-xxx", - "email": "user@example.com" - }, - "ip_address": "127.0.0.1", - "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" - } - }, - "api_key.updated": { - "id": "key_xxxx", - "data": { - "scopes": ["resource_2.operation_2"] - } - }, - } - ], - "first_id": "audit_log-xxx__20240101", - "last_id": "audit_log_yyy__20240101", - "has_more": true - } - /organization/invites: - get: - summary: Returns a list of invites in the organization. - operationId: list-invites - tags: - - Invites - parameters: - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - required: false - schema: - type: string - responses: - "200": - description: Invites listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteListResponse' - x-oaiMeta: - name: List invites - group: administration - returns: A list of [Invite](/docs/api-reference/invite/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "list", - "data": [ - { - "object": "organization.invite", - "id": "invite-abc", - "email": "user@example.com", - "role": "owner", - "status": "accepted", - "invited_at": 1711471533, - "expires_at": 1711471533, - "accepted_at": 1711471533 - } - ], - "first_id": "invite-abc", - "last_id": "invite-abc", - "has_more": false - } +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" - post: - summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. - operationId: inviteUser - tags: - - Invites - requestBody: - description: The invite request payload. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/InviteRequest' - responses: - "200": - description: User invited successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Create invite - group: administration - returns: The created [Invite](/docs/api-reference/invite/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/invites \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "email": "user@example.com", - "role": "owner" - }' - response: - content: | - { - "object": "organization.invite", - "id": "invite-abc", - "email": "user@example.com", - "role": "owner", - "invited_at": 1711471533, - "expires_at": 1711471533, - "accepted_at": null - } + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error - /organization/invites/{invite_id}: - get: - summary: Retrieves an invite. - operationId: retrieve-invite - tags: - - Invites - parameters: - - in: path - name: invite_id - required: true - schema: - type: string - description: The ID of the invite to retrieve. - responses: - "200": - description: Invite retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Retrieve invite - group: administration - returns: The [Invite](/docs/api-reference/invite/object) object matching the specified ID. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/invites/invite-abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.invite", - "id": "invite-abc", - "email": "user@example.com", - "role": "owner", - "status": "accepted", - "invited_at": 1711471533, - "expires_at": 1711471533, - "accepted_at": 1711471533 - } - delete: - summary: Delete an invite. If the invite has already been accepted, it cannot be deleted. - operationId: delete-invite - tags: - - Invites - parameters: - - in: path - name: invite_id - required: true - schema: - type: string - description: The ID of the invite to delete. - responses: - "200": - description: Invite deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteDeleteResponse' - x-oaiMeta: - name: Delete invite - group: administration - returns: Confirmation that the invite has been deleted - examples: - request: - curl: | - curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.invite.deleted", - "id": "invite-abc", - "deleted": true - } - - /organization/users: - get: - summary: Lists all of the users in the organization. - operationId: list-users - tags: - - Users - parameters: - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - required: false - schema: - type: string - responses: - "200": - description: Users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/UserListResponse' - x-oaiMeta: - name: List users - group: administration - returns: A list of [User](/docs/api-reference/users/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "list", - "data": [ - { - "object": "organization.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - ], - "first_id": "user-abc", - "last_id": "user-xyz", - "has_more": false - } - - /organization/users/{user_id}: - get: - summary: Retrieves a user by their identifier. - operationId: retrieve-user - tags: - - Users - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - "200": - description: User retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Retrieve user - group: administration - returns: The [User](/docs/api-reference/users/object) object matching the specified ID. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/users/user_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - - post: - summary: Modifies a user's role in the organization. - operationId: modify-user - tags: - - Users - requestBody: - description: The new user role to modify. This must be one of `owner` or `member`. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/UserRoleUpdateRequest' - responses: - "200": - description: User role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Modify user - group: administration - returns: The updated [User](/docs/api-reference/users/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/users/user_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "role": "owner" - }' - response: - content: | - { - "object": "organization.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - - delete: - summary: Deletes a user from the organization. - operationId: delete-user - tags: - - Users - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - "200": - description: User deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/UserDeleteResponse' - x-oaiMeta: - name: Delete user - group: administration - returns: Confirmation of the deleted user - examples: - request: - curl: | - curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.user.deleted", - "id": "user_abc", - "deleted": true - } - /organization/projects: - get: - summary: Returns a list of projects. - operationId: list-projects - tags: - - Projects - parameters: - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - required: false - schema: - type: string - - name: include_archived - in: query - schema: - type: boolean - default: false - description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. - responses: - "200": - description: Projects listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectListResponse' - x-oaiMeta: - name: List projects - group: administration - returns: A list of [Project](/docs/api-reference/projects/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "list", - "data": [ - { - "id": "proj_abc", - "object": "organization.project", - "name": "Project example", - "created_at": 1711471533, - "archived_at": null, - "status": "active" - } - ], - "first_id": "proj-abc", - "last_id": "proj-xyz", - "has_more": false - } - - post: - summary: Create a new project in the organization. Projects can be created and archived, but cannot be deleted. - operationId: create-project - tags: - - Projects - requestBody: - description: The project create request payload. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectCreateRequest' - responses: - "200": - description: Project created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Create project - group: administration - returns: The created [Project](/docs/api-reference/projects/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/projects \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "name": "Project ABC", - "app_use_case": "Your project use case here", - "business_website": "https://example.com" - }' - response: - content: | - { - "id": "proj_abc", - "object": "organization.project", - "name": "Project ABC", - "created_at": 1711471533, - "archived_at": null, - "status": "active", - "app_use_case": "Your project use case here", - "business_website": "https://example.com" - } - - /organization/projects/{project_id}: - get: - summary: Retrieves a project. - operationId: retrieve-project - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - "200": - description: Project retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Retrieve project - group: administration - description: Retrieve a project. - returns: The [Project](/docs/api-reference/projects/object) object matching the specified ID. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "id": "proj_abc", - "object": "organization.project", - "name": "Project example", - "created_at": 1711471533, - "archived_at": null, - "status": "active" - } - - post: - summary: Modifies a project in the organization. - operationId: modify-project - tags: - - Projects - requestBody: - description: The project update request payload. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUpdateRequest' - responses: - "200": - description: Project updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - "400": - description: Error response when updating the default project. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project - group: administration - returns: The updated [Project](/docs/api-reference/projects/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "name": "Project DEF", - "app_use_case": "Your project use case here", - "business_website": "https://example.com" - }' - - /organization/projects/{project_id}/archive: - post: - summary: Archives a project in the organization. Archived projects cannot be used or updated. - operationId: archive-project - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - "200": - description: Project archived successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Archive project - group: administration - returns: The archived [Project](/docs/api-reference/projects/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "id": "proj_abc", - "object": "organization.project", - "name": "Project DEF", - "created_at": 1711471533, - "archived_at": 1711471533, - "status": "archived" - } - - - /organization/projects/{project_id}/users: - get: - summary: Returns a list of users in the project. - operationId: list-project-users - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - required: false - schema: - type: string - responses: - "200": - description: Project users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserListResponse' - "400": - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project users - group: administration - returns: A list of [ProjectUser](/docs/api-reference/project-users/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "list", - "data": [ - { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - ], - "first_id": "user-abc", - "last_id": "user-xyz", - "has_more": false - } - error_response: - content: | - { - "code": 400, - "message": "Project {name} is archived" - } - - post: - summary: Adds a user to the project. Users must already be members of the organization to be added to a project. - operationId: create-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - tags: - - Projects - requestBody: - description: The project user create request payload. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserCreateRequest' - responses: - "200": - description: User added to project successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - "400": - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Create project user - group: administration - returns: The created [ProjectUser](/docs/api-reference/project-users/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "user_id": "user_abc", - "role": "member" - }' - response: - content: | - { - "object": "organization.project.user", - "id": "user_abc", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - error_response: - content: | - { - "code": 400, - "message": "Project {name} is archived" - } - - /organization/projects/{project_id}/users/{user_id}: - get: - summary: Retrieves a user in the project. - operationId: retrieve-project-user - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - "200": - description: Project user retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - x-oaiMeta: - name: Retrieve project user - group: administration - returns: The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - - post: - summary: Modifies a user's role in the project. - operationId: modify-project-user - tags: - - Projects - requestBody: - description: The project user update request payload. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserUpdateRequest' - responses: - "200": - description: Project user's role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - "400": - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project user - group: administration - returns: The updated [ProjectUser](/docs/api-reference/project-users/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "role": "owner" - }' - response: - content: | - { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - - delete: - summary: Deletes a user from the project. - operationId: delete-project-user - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - "200": - description: Project user deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserDeleteResponse' - "400": - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Delete project user - group: administration - returns: Confirmation that project has been deleted or an error in case of an archived project, which has no users - examples: - request: - curl: | - curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.project.user.deleted", - "id": "user_abc", - "deleted": true - } - - /organization/projects/{project_id}/service_accounts: - get: - summary: Returns a list of service accounts in the project. - operationId: list-project-service-accounts - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - required: false - schema: - type: string - responses: - "200": - description: Project service accounts listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountListResponse' - "400": - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project service accounts - group: administration - returns: A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "list", - "data": [ - { - "object": "organization.project.service_account", - "id": "svc_acct_abc", - "name": "Service Account", - "role": "owner", - "created_at": 1711471533 - } - ], - "first_id": "svc_acct_abc", - "last_id": "svc_acct_xyz", - "has_more": false - } - - post: - summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. - operationId: create-project-service-account - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - requestBody: - description: The project service account create request payload. - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' - responses: - "200": - description: Project service account created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' - "400": - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Create project service account - group: administration - returns: The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object. - examples: - request: - curl: | - curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "name": "Production App" - }' - response: - content: | - { - "object": "organization.project.service_account", - "id": "svc_acct_abc", - "name": "Production App", - "role": "member", - "created_at": 1711471533, - "api_key": { - "object": "organization.project.service_account.api_key", - "value": "sk-abcdefghijklmnop123", - "name": "Secret Key", - "created_at": 1711471533, - "id": "key_abc" - } - } - - /organization/projects/{project_id}/service_accounts/{service_account_id}: - get: - summary: Retrieves a service account in the project. - operationId: retrieve-project-service-account - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true - schema: - type: string - responses: - "200": - description: Project service account retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccount' - x-oaiMeta: - name: Retrieve project service account - group: administration - returns: The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.project.service_account", - "id": "svc_acct_abc", - "name": "Service Account", - "role": "owner", - "created_at": 1711471533 - } - - delete: - summary: Deletes a service account from the project. - operationId: delete-project-service-account - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true - schema: - type: string - responses: - "200": - description: Project service account deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' - x-oaiMeta: - name: Delete project service account - group: administration - returns: Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts - examples: - request: - curl: | - curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.project.service_account.deleted", - "id": "svc_acct_abc", - "deleted": true - } - - /organization/projects/{project_id}/api_keys: - get: - summary: Returns a list of API keys in the project. - operationId: list-project-api-keys - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: after - in: query - description: *pagination_after_param_description - required: false - schema: - type: string - responses: - "200": - description: Project API keys listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyListResponse' - - x-oaiMeta: - name: List project API keys - group: administration - returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "list", - "data": [ - { - "object": "organization.project.api_key", - "redacted_value": "sk-abc...def", - "name": "My API Key", - "created_at": 1711471533, - "id": "key_abc", - "owner": { - "type": "user", - "user": { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - } - } - ], - "first_id": "key_abc", - "last_id": "key_xyz", - "has_more": false - } - error_response: - content: | - { - "code": 400, - "message": "Project {name} is archived" - } - - /organization/projects/{project_id}/api_keys/{key_id}: - get: - summary: Retrieves an API key in the project. - operationId: retrieve-project-api-key - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - "200": - description: Project API key retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKey' - x-oaiMeta: - name: Retrieve project API key - group: administration - returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID. - examples: - request: - curl: | - curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.project.api_key", - "redacted_value": "sk-abc...def", - "name": "My API Key", - "created_at": 1711471533, - "id": "key_abc", - "owner": { - "type": "user", - "user": { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - } - } - - delete: - summary: Deletes an API key from the project. - operationId: delete-project-api-key - tags: - - Projects - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - "200": - description: Project API key deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' - "400": - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Delete project API key - group: administration - returns: Confirmation of the key's deletion or an error if the key belonged to a service account - examples: - request: - curl: | - curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ - -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ - -H "Content-Type: application/json" - response: - content: | - { - "object": "organization.project.api_key.deleted", - "id": "key_abc", - "deleted": true - } - error_response: - content: | - { - "code": 400, - "message": "API keys cannot be deleted for service accounts, please delete the service account" - } - -components: - securitySchemes: - ApiKeyAuth: - type: http - scheme: "bearer" - - schemas: - Error: - type: object - properties: - code: - type: string - nullable: true - message: - type: string - nullable: false - param: - type: string - nullable: true - type: - type: string - nullable: false - required: - - type - - message - - param - - code - ErrorResponse: - type: object - properties: - error: - $ref: "#/components/schemas/Error" - required: - - error - - ListModelsResponse: - type: object - properties: - object: - type: string - enum: [ list ] - data: - type: array - items: - $ref: "#/components/schemas/Model" - required: - - object - - data - DeleteModelResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - required: - - id - - object - - deleted - - CreateCompletionRequest: - type: object - properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true - oneOf: - - type: string - default: "" - example: "This is a test." - - type: array - items: - type: string - default: "" - example: "This is a test." - - type: array - minItems: 1 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - best_of: - type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - echo: - type: boolean - default: false - nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - logit_bias: &completions_logit_bias - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: &completions_logit_bias_description | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - logprobs: &completions_logprobs_configuration - type: integer - minimum: 0 - maximum: 5 - default: null - nullable: true - description: &completions_logprobs_description | - Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. - max_tokens: - type: integer - minimum: 0 - default: 16 - example: 16 - nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - n: - type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - seed: &completions_seed_param - type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - default: null - nullable: true - oneOf: - - type: string - default: <|endoftext|> - example: "\n" - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - example: '["\n"]' - stream: - description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - type: boolean - nullable: true - default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - suffix: - description: | - The suffix that comes after a completion of inserted text. - - This parameter is only supported for `gpt-3.5-turbo-instruct`. - default: null - nullable: true - type: string - example: "test." - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - user: &end_user_param_configuration - type: string - example: user-1234 - description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - required: - - model - - prompt - - CreateCompletionResponse: - type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). - properties: - id: - type: string - description: A unique identifier for the completion. - choices: - type: array - description: The list of completion choices the model generated for the input prompt. - items: - type: object - required: - - finish_reason - - index - - logprobs - - text - properties: - finish_reason: - type: string - description: &completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: [ "stop", "length", "content_filter" ] - index: - type: integer - logprobs: - type: object - nullable: true - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - text: - type: string - created: - type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for completion. - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always "text_completion" - enum: [ text_completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" - required: - - id - - object - - created - - model - - choices - x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-4-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } - - ChatCompletionRequestMessageContentPartText: - type: object - title: Text content part - properties: - type: - type: string - enum: [ "text" ] - description: The type of the content part. - text: - type: string - description: The text content. - required: - - type - - text - - ChatCompletionRequestMessageContentPartImage: - type: object - title: Image content part - properties: - type: - type: string - enum: [ "image_url" ] - description: The type of the content part. - image_url: - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - type: string - description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - url - required: - - type - - image_url - - ChatCompletionRequestMessageContentPartRefusal: - type: object - title: Refusal content part - properties: - type: - type: string - enum: [ "refusal" ] - description: The type of the content part. - refusal: - type: string - description: The refusal message generated by the model. - required: - - type - - refusal - - ChatCompletionRequestMessage: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true - - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - x-oaiExpandable: true - - ChatCompletionRequestUserMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true - - ChatCompletionRequestAssistantMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" - x-oaiExpandable: true - - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - x-oaiExpandable: true - - ChatCompletionRequestSystemMessage: - type: object - title: System message - properties: - content: - description: The contents of the system message. - oneOf: - - type: string - description: The contents of the system message. - title: Text content - - type: array - description: An array of content parts with a defined type. For system messages, only type `text` is supported. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" - minItems: 1 - role: - type: string - enum: [ "system" ] - description: The role of the messages author, in this case `system`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role - - ChatCompletionRequestUserMessage: - type: object - title: User message - properties: - content: - description: | - The contents of the user message. - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" - minItems: 1 - x-oaiExpandable: true - role: - type: string - enum: [ "user" ] - description: The role of the messages author, in this case `user`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role - - ChatCompletionRequestAssistantMessage: - type: object - title: Assistant message - properties: - content: - nullable: true - oneOf: - - type: string - description: The contents of the assistant message. - title: Text content - - type: array - description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" - minItems: 1 - description: | - The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - refusal: - nullable: true - type: string - description: The refusal message by the assistant. - role: - type: string - enum: [ "assistant" ] - description: The role of the messages author, in this case `assistant`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - function_call: - type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - nullable: true - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - required: - - role - - FineTuneChatCompletionRequestAssistantMessage: - allOf: - - type: object - title: Assistant message - deprecated: false - properties: - weight: - type: integer - enum: [ 0, 1 ] - description: "Controls whether the assistant message is trained against (0 or 1)" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - required: - - role - - ChatCompletionRequestToolMessage: - type: object - title: Tool message - properties: - role: - type: string - enum: [ "tool" ] - description: The role of the messages author, in this case `tool`. - content: - oneOf: - - type: string - description: The contents of the tool message. - title: Text content - - type: array - description: An array of content parts with a defined type. For tool messages, only type `text` is supported. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" - minItems: 1 - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - required: - - role - - content - - tool_call_id - - ChatCompletionRequestFunctionMessage: - type: object - title: Function message - deprecated: true - properties: - role: - type: string - enum: [ "function" ] - description: The role of the messages author, in this case `function`. - content: - nullable: true - type: string - description: The contents of the function message. - name: - type: string - description: The name of the function to call. - required: - - role - - content - - name - - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - additionalProperties: true - - ChatCompletionFunctions: - type: object - deprecated: true - properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - required: - - name - - ChatCompletionFunctionCallOption: - type: object - description: > - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - properties: - name: - type: string - description: The name of the function to call. - required: - - name - - ChatCompletionTool: - type: object - properties: - type: - type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function - - FunctionObject: - type: object - properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - strict: - type: boolean - nullable: true - default: false - description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). - required: - - name - - ResponseFormatText: - type: object - properties: - type: - type: string - description: "The type of response format being defined: `text`" - enum: [ "text" ] - required: - - type - - ResponseFormatJsonObject: - type: object - properties: - type: - type: string - description: "The type of response format being defined: `json_object`" - enum: [ "json_object" ] - required: - - type - - ResponseFormatJsonSchemaSchema: - type: object - description: "The schema for the response format, described as a JSON Schema object." - additionalProperties: true - - ResponseFormatJsonSchema: - type: object - properties: - type: - type: string - description: 'The type of response format being defined: `json_schema`' - enum: [ 'json_schema' ] - json_schema: - type: object - properties: - description: - type: string - description: A description of what the response format is for, used by the model to determine how to respond in the format. - name: - type: string - description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - nullable: true - default: false - description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). - required: - - type - - name - required: - - type - - json_schema - - ChatCompletionToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools are present. - oneOf: - - type: string - description: > - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - enum: [ none, auto, required ] - - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" - x-oaiExpandable: true - - ChatCompletionNamedToolChoice: - type: object - description: Specifies a tool the model should use. Use to force the model to call a specific function. - properties: - type: - type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name - required: - - type - - function - - ParallelToolCalls: - description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. - type: boolean - default: true - - ChatCompletionMessageToolCalls: - type: array - description: The tool calls generated by the model, such as function calls. - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCall" - - ChatCompletionMessageToolCall: - type: object - properties: - # TODO: index included when streaming - id: - type: string - description: The ID of the tool call. - type: - type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - description: The function that the model called. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments - required: - - id - - type - - function - - ChatCompletionMessageToolCallChunk: - type: object - properties: - index: - type: integer - id: - type: string - description: The ID of the tool call. - type: - type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - index - - # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. - ChatCompletionRole: - type: string - description: The role of the author of a message - enum: - - system - - user - - assistant - - tool - - function - - ChatCompletionStreamOptions: - description: | - Options for streaming response. Only set this when you set `stream: true`. - type: object - nullable: true - default: null - properties: - include_usage: - type: boolean - description: | - If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. - - ChatCompletionResponseMessage: - type: object - description: A chat completion message generated by the model. - properties: - content: - type: string - description: The contents of the message. - nullable: true - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - role: - type: string - enum: [ "assistant" ] - description: The role of the author of this message. - function_call: - type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - name - - arguments - required: - - role - - content - - refusal - - ChatCompletionStreamResponseDelta: - type: object - description: A chat completion delta generated by streamed model responses. - properties: - content: - type: string - description: The contents of the chunk message. - nullable: true - function_call: - deprecated: true - type: object - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - tool_calls: - type: array - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" - role: - type: string - enum: [ "system", "user", "assistant", "tool" ] - description: The role of the author of this message. - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - - CreateChatCompletionRequest: - type: object - properties: - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" - model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4o" - anyOf: - - type: string - - type: string - enum: - [ - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_frequency_penalty_description - logit_bias: - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. - type: boolean - default: false - nullable: true - top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. - type: integer - minimum: 0 - maximum: 20 - nullable: true - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. - - This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). - type: integer - nullable: true - deprecated: true - max_completion_tokens: - description: | - An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). - type: integer - nullable: true - - n: - type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_presence_penalty_description - response_format: - description: | - An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - $ref: "#/components/schemas/ResponseFormatText" - - $ref: "#/components/schemas/ResponseFormatJsonObject" - - $ref: "#/components/schemas/ResponseFormatJsonSchema" - x-oaiExpandable: true - seed: - type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - x-oaiMeta: - beta: true - service_tier: - description: | - Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` utilized. - type: string - enum: [ "auto", "default" ] - nullable: true - default: null - stop: - description: | - Up to 4 sequences where the API will stop generating further tokens. - default: null - oneOf: - - type: string - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - stream: - description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - type: boolean - nullable: true - default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *completions_top_p_description - tools: - type: array - description: > - A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. - items: - $ref: "#/components/schemas/ChatCompletionTool" - tool_choice: - $ref: "#/components/schemas/ChatCompletionToolChoiceOption" - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - user: *end_user_param_configuration - function_call: - deprecated: true - description: | - Deprecated in favor of `tool_choice`. - - Controls which (if any) function is called by the model. - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - - `none` is the default when no functions are present. `auto` is the default if functions are present. - oneOf: - - type: string - description: > - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - enum: [ none, auto ] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - x-oaiExpandable: true - functions: - deprecated: true - description: | - Deprecated in favor of `tools`. - - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - - required: - - model - - messages - - CreateChatCompletionResponse: - type: object - description: Represents a chat completion response returned by model, based on the provided input. - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: &chat_completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - logprobs: &chat_completion_response_logprobs - description: Log probability information for the choice. - type: object - nullable: true - properties: - content: - description: A list of message content tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true - refusal: - description: A list of message refusal tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true - required: - - content - - refusal - - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - service_tier: - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" - nullable: true - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" - required: - - choices - - created - - id - - model - - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example - - CreateChatCompletionFunctionResponse: - type: object - description: Represents a chat completion response returned by model, based on the provided input. - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: - &chat_completion_function_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: - [ "stop", "length", "function_call", "content_filter" ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" - required: - - choices - - created - - id - - model - - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_function_example - - ChatCompletionTokenLogprob: - type: object - properties: - token: &chat_completion_response_logprobs_token - description: The token. - type: string - logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - type: number - bytes: &chat_completion_response_logprobs_bytes - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - type: array - items: - type: integer - nullable: true - top_logprobs: - description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. - type: array - items: - type: object - properties: - token: *chat_completion_response_logprobs_token - logprob: *chat_completion_response_logprobs_token_logprob - bytes: *chat_completion_response_logprobs_bytes - required: - - token - - logprob - - bytes - required: - - token - - logprob - - bytes - - top_logprobs - - ListPaginatedFineTuningJobsResponse: - type: object - properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean - object: - type: string - enum: [ list ] - required: - - object - - data - - has_more - - CreateChatCompletionStreamResponse: - type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array - description: | - A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the - last chunk if you set `stream_options: {"include_usage": true}`. - items: - type: object - required: - - delta - - finish_reason - - index - properties: - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - logprobs: *chat_completion_response_logprobs - finish_reason: - type: string - description: *chat_completion_finish_reason_description - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - nullable: true - index: - type: integer - description: The index of the choice in the list of choices. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - model: - type: string - description: The model to generate the completion. - service_tier: - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" - nullable: true - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always `chat.completion.chunk`. - enum: [ chat.completion.chunk ] - usage: - type: object - description: | - An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. - When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens - required: - - choices - - created - - id - - model - - object - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example - - CreateChatCompletionImageResponse: - type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_image_example - - CreateImageRequest: - type: object - properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - type: string - example: "A cute baby sea otter" - model: - anyOf: - - type: string - - type: string - enum: [ "dall-e-2", "dall-e-3" ] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-3" - nullable: true - description: The model to use for image generation. - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - quality: - type: string - enum: [ "standard", "hd" ] - default: "standard" - example: "standard" - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - response_format: &images_response_format - type: string - enum: [ "url", "b64_json" ] - default: "url" - example: "url" - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - size: &images_size - type: string - enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - style: - type: string - enum: [ "vivid", "natural" ] - default: "vivid" - example: "vivid" - nullable: true - description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - user: *end_user_param_configuration - required: - - prompt - - ImagesResponse: - properties: - created: - type: integer - data: - type: array - items: - $ref: "#/components/schemas/Image" - required: - - created - - data - - Image: - type: object - description: Represents the url or the content of an image generated by the OpenAI API. - properties: - b64_json: - type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - url: - type: string - description: The URL of the generated image, if `response_format` is `url` (default). - revised_prompt: - type: string - description: The prompt that was used to generate the image, if there was any revision to the prompt. - x-oaiMeta: - name: The image object - example: | - { - "url": "...", - "revised_prompt": "..." - } - - CreateImageEditRequest: - type: object - properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. - type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. - type: string - example: "A cute baby sea otter wearing a beret" - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: [ "dall-e-2" ] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &dalle2_images_size - type: string - enum: [ "256x256", "512x512", "1024x1024" ] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: *images_response_format - user: *end_user_param_configuration - required: - - prompt - - image - - CreateImageVariationRequest: - type: object - properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. - type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: [ "dall-e-2" ] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: *images_n - response_format: *images_response_format - size: *dalle2_images_size - user: *end_user_param_configuration - required: - - image - - CreateModerationRequest: - type: object - properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." - model: - description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" - anyOf: - - type: string - - type: string - enum: [ "text-moderation-latest", "text-moderation-stable" ] - x-oaiTypeLabel: string - required: - - input - - CreateModerationResponse: - type: object - description: Represents if a given text input is potentially harmful. - properties: - id: - type: string - description: The unique identifier for the moderation request. - model: - type: string - description: The model used to generate the moderation results. - results: - type: array - description: A list of moderation objects. - items: - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores - required: - - id - - model - - results - x-oaiMeta: - name: The moderation object - example: *moderation_example - - ListFilesResponse: - type: object - properties: - data: - type: array - items: - $ref: "#/components/schemas/OpenAIFile" - object: - type: string - enum: [ list ] - required: - - object - - data - - CreateFileRequest: - type: object - additionalProperties: false - properties: - file: - description: | - The File object (not file name) to be uploaded. - type: string - format: binary - purpose: - description: | - The intended purpose of the uploaded file. - - Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). - type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] - required: - - file - - purpose - - DeleteFileResponse: - type: object - properties: - id: - type: string - object: - type: string - enum: [ file ] - deleted: - type: boolean - required: - - id - - object - - deleted - - CreateUploadRequest: - type: object - additionalProperties: false - properties: - filename: - description: | - The name of the file to upload. - type: string - purpose: - description: | - The intended purpose of the uploaded file. - - See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). - type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] - bytes: - description: | - The number of bytes in the file you are uploading. - type: integer - mime_type: - description: | - The MIME type of the file. - - This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. - type: string - required: - - filename - - purpose - - bytes - - mime_type - - AddUploadPartRequest: - type: object - additionalProperties: false - properties: - data: - description: | - The chunk of bytes for this Part. - type: string - format: binary - required: - - data - - CompleteUploadRequest: - type: object - additionalProperties: false - properties: - part_ids: - type: array - description: | - The ordered list of Part IDs. - items: - type: string - md5: - description: | - The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. - type: string - required: - - part_ids - - CancelUploadRequest: - type: object - additionalProperties: false - - CreateFineTuningJobRequest: - type: object - properties: - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). - example: "gpt-4o-mini" - anyOf: - - type: string - - type: string - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] - x-oaiTypeLabel: string - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string - example: "file-abc123" - hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. - properties: - batch_size: - description: | - Number of examples in each batch. A larger batch size means that model parameters - are updated less frequently, but with lower variance. - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: | - Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - overfitting. - oneOf: - - type: string - enum: [ auto ] - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto - n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one full cycle - through the training dataset. - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 50 - default: auto - suffix: - description: | - A string of up to 64 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. - type: string - minLength: 1 - maxLength: 64 - default: null - nullable: true - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string - nullable: true - example: "file-abc123" - integrations: - type: array - description: A list of integrations to enable for your fine-tuning job. - nullable: true - items: - type: object - required: - - type - - wandb - properties: - type: - description: | - The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - oneOf: - - type: string - enum: [ wandb ] - wandb: - type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project - properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - - seed: - description: | - The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. - If a seed is not specified, one will be generated for you. - type: integer - nullable: true - minimum: 0 - maximum: 2147483647 - example: 42 - required: - - model - - training_file - - ListFineTuningJobEventsResponse: + ListModelsResponse: type: object properties: + object: + type: string + enum: [list] data: type: array items: - $ref: "#/components/schemas/FineTuningJobEvent" - object: - type: string - enum: [ list ] + $ref: "#/components/schemas/Model" required: - object - data - - ListFineTuningJobCheckpointsResponse: + DeleteModelResponse: type: object properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobCheckpoint" - object: - type: string - enum: [ list ] - first_id: - type: string - nullable: true - last_id: + id: type: string - nullable: true - has_more: + deleted: type: boolean + object: + type: string required: + - id - object - - data - - has_more + - deleted - CreateEmbeddingRequest: + CreateCompletionRequest: type: object - additionalProperties: false properties: - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true oneOf: - type: string - title: string - description: The string that will be turned into an embedding. default: "" example: "This is a test." - type: array - title: array - description: The array of strings that will be turned into an embedding. - minItems: 1 - maxItems: 2048 items: type: string default: "" - example: "['This is a test.']" + example: "This is a test." - type: array - title: array - description: The array of integers that will be turned into an embedding. minItems: 1 - maxItems: 2048 items: type: integer example: "[1212, 318, 257, 1332, 13]" - type: array - title: array - description: The array of arrays containing integers that will be turned into an embedding. minItems: 1 - maxItems: 2048 items: type: array minItems: 1 items: type: integer example: "[[1212, 318, 257, 1332, 13]]" - x-oaiExpandable: true - model: - description: *model_description - example: "text-embedding-3-small" - anyOf: - - type: string - - type: string - enum: - [ - "text-embedding-ada-002", - "text-embedding-3-small", - "text-embedding-3-large", - ] - x-oaiTypeLabel: string - encoding_format: - description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." - example: "float" - default: "float" - type: string - enum: [ "float", "base64" ] - dimensions: - description: | - The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + best_of: type: integer - minimum: 1 - user: *end_user_param_configuration - required: - - model - - input + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - CreateEmbeddingResponse: - type: object - properties: - data: - type: array - description: The list of embeddings generated by the model. - items: - $ref: "#/components/schemas/Embedding" - model: - type: string - description: The name of the model used to generate the embedding. - object: - type: string - description: The object type, which is always "list". - enum: [ list ] - usage: + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens - required: - - object - - model - - data - - usage + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. - CreateTranscriptionRequest: - type: object - additionalProperties: false - properties: - file: - description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: - type: string - enum: [ "whisper-1" ] - x-oaiTypeLabel: string - language: - description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - type: string - prompt: + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true type: string - response_format: - $ref: "#/components/schemas/AudioResponseFormat" + example: "test." temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. type: number - default: 0 - timestamp_granularities[]: - description: | - The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. - type: array - items: - type: string - enum: - - word - - segment - default: [ segment ] - required: - - file - - model + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponseJson: - type: object - description: Represents a transcription response returned by model, based on the provided input. - properties: - text: + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration type: string - description: The transcribed text. + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). required: - - text - x-oaiMeta: - name: The transcription object (JSON) - group: audio - example: *basic_transcription_response_example + - model + - prompt - TranscriptionSegment: + CreateCompletionResponse: type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). properties: id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - format: float - description: Start time of the segment in seconds. - end: - type: number - format: float - description: End time of the segment in seconds. - text: type: string - description: Text content of the segment. - tokens: + description: A unique identifier for the completion. + choices: type: array + description: The list of completion choices the model generated for the input prompt. items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - format: float - description: Temperature parameter used for generating the segment. - avg_logprob: - type: number - format: float - description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. - compression_ratio: - type: number - format: float - description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. - no_speech_prob: - type: number - format: float - description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - - TranscriptionWord: - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - format: float - description: Start time of the word in seconds. - end: - type: number - format: float - description: End time of the word in seconds. - required: [ word, start, end ] - - CreateTranscriptionResponseVerboseJson: - type: object - description: Represents a verbose json transcription response returned by model, based on the provided input. - properties: - language: - type: string - description: The language of the input audio. - duration: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: ["stop", "length", "content_filter"] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: type: string - description: The duration of the input audio. - text: + description: The model used for completion. + system_fingerprint: type: string - description: The transcribed text. - words: - type: array - description: Extracted words and their corresponding timestamps. - items: - $ref: "#/components/schemas/TranscriptionWord" - segments: - type: array - description: Segments of the transcribed text and their corresponding details. - items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] - x-oaiMeta: - name: The transcription object (Verbose JSON) - group: audio - example: *verbose_transcription_response_example - - AudioResponseFormat: - description: | - The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - - CreateTranslationRequest: - type: object - additionalProperties: false - properties: - file: description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: [ "whisper-1" ] - x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + description: The object type, which is always "text_completion" + enum: [text_completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: type: string - response_format: - $ref: "#/components/schemas/AudioResponseFormat" - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: ["auto", "low", "high"] + default: "auto" + required: + - url required: - - file - - model + - type + - image_url - # Note: This does not currently support the non-default response format types. - CreateTranslationResponseJson: + ChatCompletionRequestMessageContentPartText: type: object + title: Text content part properties: + type: + type: string + enum: ["text"] + description: The type of the content part. text: type: string + description: The text content. required: + - type - text - CreateTranslationResponseVerboseJson: + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: type: object + title: System message properties: - language: + content: + description: The contents of the system message. type: string - description: The language of the output translation (always `english`). - duration: + role: type: string - description: The duration of the input audio. - text: + enum: ["system"] + description: The role of the messages author, in this case `system`. + name: type: string - description: The translated text. - segments: - type: array - description: Segments of the translated text and their corresponding details. - items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role - CreateSpeechRequest: + ChatCompletionRequestUserMessage: type: object - additionalProperties: false + title: User message properties: - model: + content: description: | - One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` - anyOf: - - type: string + The contents of the user message. + oneOf: - type: string - enum: [ "tts-1", "tts-1-hd" ] - x-oaiTypeLabel: string - input: - type: string - description: The text to generate audio for. The maximum length is 4096 characters. - maxLength: 4096 - voice: - description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: type: string - enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] - response_format: - description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." - default: "mp3" + enum: ["user"] + description: The role of the messages author, in this case `user`. + name: type: string - enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] - speed: - description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." - type: number - default: 1.0 - minimum: 0.25 - maximum: 4.0 + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. required: - - model - - input - - voice + - content + - role - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message properties: - id: + content: + nullable: true type: string - description: The model identifier, which can be referenced in the API endpoints. - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + role: type: string - description: The object type, which is always "model". - enum: [ model ] - owned_by: + enum: ["assistant"] + description: The role of the messages author, in this case `assistant`. + name: type: string - description: The organization that owns the model. + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name required: - - id - - object - - created - - owned_by - x-oaiMeta: - name: The model object - example: *retrieve_model_response + - role - OpenAIFile: - title: OpenAIFile - description: The `File` object represents a document that has been uploaded to OpenAI. + ChatCompletionRequestToolMessage: + type: object + title: Tool message properties: - id: - type: string - description: The file identifier, which can be referenced in the API endpoints. - bytes: - type: integer - description: The size of the file, in bytes. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - type: string - description: The object type, which is always `file`. - enum: [ "file" ] - purpose: + role: type: string - description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. - enum: - [ - "assistants", - "assistants_output", - "batch", - "batch_output", - "fine-tune", - "fine-tune-results", - "vision", - ] - status: + enum: ["tool"] + description: The role of the messages author, in this case `tool`. + content: type: string - deprecated: true - description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: [ "uploaded", "processed", "error" ] - status_details: + description: The contents of the tool message. + tool_call_id: type: string - deprecated: true - description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. + description: Tool call that this message is responding to. required: - - id - - object - - bytes - - created_at - - filename - - purpose - - status - x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "salesOverview.pdf", - "purpose": "assistants", - } - Upload: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: type: object - title: Upload - description: | - The Upload object can accept byte chunks in the form of Parts. + title: Function message + deprecated: true properties: - id: - type: string - description: The Upload unique identifier, which can be referenced in API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: - type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: + role: type: string - description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. - status: + enum: ["function"] + description: The role of the messages author, in this case `function`. + content: + nullable: true type: string - description: The status of the Upload. - enum: [ "pending", "completed", "cancelled", "expired" ] - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - object: + description: The contents of the function message. + name: type: string - description: The object type, which is always "upload". - enum: [ upload ] - file: - $ref: "#/components/schemas/OpenAIFile" - nullable: true - description: The ready File object after the Upload is completed. + description: The name of the function to call. required: - - bytes - - created_at - - expires_at - - filename - - id - - purpose - - status - - step_number - x-oaiMeta: - name: The upload object - example: | - { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - "status": "completed", - "expires_at": 1719127296, - "file": { - "id": "file-xyz321", - "object": "file", - "bytes": 2147483648, - "created_at": 1719186911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - } - } - UploadPart: + - role + - content + - name + + FunctionParameters: type: object - title: UploadPart - description: | - The upload Part represents a chunk of bytes we can add to an Upload object. + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true properties: - id: + description: type: string - description: The upload Part unique identifier, which can be referenced in API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: + description: A description of what the function does, used by the model to choose when and how to call the function. + name: type: string - description: The ID of the Upload object that this Part was added to. - object: + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: type: string - description: The object type, which is always `upload.part`. - enum: [ 'upload.part' ] + description: The name of the function to call. required: - - created_at - - id - - object - - upload_id - x-oaiMeta: - name: The upload part object - example: | - { - "id": "part_def456", - "object": "upload.part", - "created_at": 1719186911, - "upload_id": "upload_abc123" - } - Embedding: + - name + + ChatCompletionTool: type: object - description: | - Represents an embedding vector returned by embedding endpoint. properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: - type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). - items: - type: number - object: + type: type: string - description: The object type, which is always "embedding". - enum: [ embedding ] + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" required: - - index - - object - - embedding - x-oaiMeta: - name: The embedding object - example: | - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } + - type + - function - FineTuningJob: + FunctionObject: type: object - title: FineTuningJob + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionToolChoiceOption: description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + enum: [none, auto, required] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. properties: - id: + type: type: string - description: The object identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: + name: type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. - nullable: true - required: - - code - - message - - param - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - finished_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - properties: - n_epochs: - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + description: The name of the function to call. required: - - n_epochs - model: - type: string - description: The base model that is being fine-tuned. - object: - type: string - description: The object type, which is always "fine_tuning.job". - enum: [ fine_tuning.job ] - organization_id: - type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). - items: - type: string - example: file-abc123 - status: - type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - enum: - [ - "validating_files", - "queued", - "running", - "succeeded", - "failed", - "cancelled", - ] - trained_tokens: - type: integer - nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - training_file: - type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: - type: string - nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - integrations: - type: array - nullable: true - description: A list of integrations to enable for this fine-tuning job. - maxItems: 5 - items: - oneOf: - - $ref: "#/components/schemas/FineTuningIntegration" - x-oaiExpandable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed - x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example - - FineTuningIntegration: - type: object - title: Fine-Tuning Job Integration + - name required: - type - - wandb + - function + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: + type: object properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. type: type: string - description: "The type of the integration being enabled for the fine-tuning job" - enum: [ "wandb" ] - wandb: + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project + description: The function that the model called. properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true + description: The name of the function to call. + arguments: type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + required: + - id + - type + - function - FineTuningJobEvent: + ChatCompletionMessageToolCallChunk: type: object - description: Fine-tuning job event object properties: + index: + type: integer id: type: string - created_at: - type: integer - level: + description: The ID of the tool call. + type: type: string - enum: [ "info", "warn", "error" ] - message: + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - index + + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionStreamOptions: + description: | + Options for streaming response. Only set this when you set `stream: true`. + type: object + nullable: true + default: null + properties: + include_usage: + type: boolean + description: | + If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: type: string - object: + description: The contents of the message. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: type: string - enum: [ fine_tuning.job.event ] + enum: ["assistant"] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments required: - - id - - object - - created_at - - level - - message - x-oaiMeta: - name: The fine-tuning job event object - example: | - { - "object": "fine_tuning.job.event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" - } + - role + - content - FineTuningJobCheckpoint: + ChatCompletionStreamResponseDelta: type: object - title: FineTuningJobCheckpoint - description: | - The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + description: A chat completion delta generated by streamed model responses. properties: - id: + content: type: string - description: The checkpoint identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: + enum: ["system", "user", "assistant", "tool"] + description: The role of the author of this message. + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + type: integer + minimum: 0 + maximum: 20 + nullable: true + max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + type: integer + nullable: true + n: type: integer - description: The step number that the checkpoint was created at. - metrics: + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + response_format: type: object - description: Metrics at the step number during the fine-tuning job. - properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - fine_tuning_job_id: - type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: - type: string - description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [ fine_tuning.job.checkpoint ] - required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - - id - - metrics - - object - - step_number - x-oaiMeta: - name: The fine-tuning job checkpoint object - example: | - { - "object": "fine_tuning.job.checkpoint", - "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", - "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", - "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", - "metrics": { - "step": 88, - "train_loss": 0.478, - "train_mean_token_accuracy": 0.924, - "valid_loss": 10.112, - "valid_mean_token_accuracy": 0.145, - "full_valid_loss": 0.567, - "full_valid_mean_token_accuracy": 0.944 - }, - "step_number": 88 - } + description: | + An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - FinetuneChatRequestInput: - type: object - description: The per-line training example of a fine-tuning input file for chat models - properties: - messages: - type: array - minItems: 1 - items: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + properties: + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. + seed: + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description tools: type: array - description: A list of tools the model may generate JSON inputs for. + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. items: $ref: "#/components/schemas/ChatCompletionTool" - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + user: *end_user_param_configuration + function_call: + deprecated: true + description: | + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true functions: deprecated: true - description: + description: | + Deprecated in favor of `tools`. + A list of functions the model may generate JSON inputs for. type: array minItems: 1 maxItems: 128 items: $ref: "#/components/schemas/ChatCompletionFunctions" - x-oaiMeta: - name: Training format for chat models - example: | - { - "messages": [ - { "role": "user", "content": "What is the weather in San Francisco?" }, - { - "role": "assistant", - "tool_calls": [ - { - "id": "call_id", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" - } - } - ] - } - ], - "parallel_tool_calls": false, - "tools": [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and country, eg. San Francisco, USA" - }, - "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } - }, - "required": ["location", "format"] - } - } - } - ] - } - FinetuneCompletionRequestInput: + required: + - model + - messages + + CreateChatCompletionResponse: type: object - description: The per-line training example of a fine-tuning input file for completions models + description: Represents a chat completion response returned by model, based on the provided input. properties: - prompt: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: type: string - description: The input prompt for this training example. - completion: + description: The model used for the chat completion. + system_fingerprint: type: string - description: The desired completion for this training example. - x-oaiMeta: - name: Training format for completions models - example: | - { - "prompt": "What is the answer to 2+2", - "completion": "4" - } + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example - CompletionUsage: + CreateChatCompletionFunctionResponse: type: object - description: Usage statistics for the completion request. + description: Represents a chat completion response returned by model, based on the provided input. properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: type: integer - description: Total number of tokens used in the request (prompt + completion). - completion_tokens_details: - type: object - description: Breakdown of tokens used in a completion. - properties: - reasoning_tokens: - type: integer - description: Tokens generated by the model for reasoning. + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" required: - - prompt_tokens - - completion_tokens - - total_tokens + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example - RunCompletionUsage: + ChatCompletionTokenLogprob: type: object - description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). + token: &chat_completion_response_logprobs_token + description: The token. + type: string + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true + - token + - logprob + - bytes + - top_logprobs - RunStepCompletionUsage: + ListPaginatedFineTuningJobsResponse: type: object - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: [list] required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - AssistantsApiResponseFormatOption: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - type: string - description: > - `auto` is the default value - enum: [ auto ] - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - x-oaiExpandable: true + - object + - data + - has_more - AssistantObject: + CreateChatCompletionStreamResponse: type: object - title: Assistant - description: Represents an `assistant` that can call the model and use tools. + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. properties: id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `assistant`. - type: string - enum: [ assistant ] - created_at: - description: The Unix timestamp (in seconds) for when the assistant was created. - type: integer - name: - description: &assistant_name_param_description | - The name of the assistant. The maximum length is 256 characters. - type: string - maxLength: 256 - nullable: true - description: - description: &assistant_description_param_description | - The description of the assistant. The maximum length is 512 characters. - type: string - maxLength: 512 - nullable: true - model: - description: *model_description - type: string - instructions: - description: &assistant_instructions_param_description | - The system instructions that the assistant uses. The maximum length is 256,000 characters. type: string - maxLength: 256000 - nullable: true - tools: - description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [ ] + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: type: array - maxItems: 128 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - tool_resources: - type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the + last chunk if you set `stream_options: {"include_usage": true}`. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: [chat.completion.chunk] + usage: type: object - x-oaiTypeLabel: map - nullable: true - temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + description: | + An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. + When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens required: + - choices + - created - id - - object - - created_at - - name - - description - model - - instructions - - tools - - metadata + - object x-oaiMeta: - name: The assistant object - beta: true - example: *create_assistants_example + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example - CreateAssistantRequest: + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: type: object - additionalProperties: false properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: "A cute baby sea otter" model: - description: *model_description - example: "gpt-4o" anyOf: - type: string - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] + enum: ["dall-e-2", "dall-e-3"] x-oaiTypeLabel: string - name: - description: *assistant_name_param_description + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: ["standard", "hd"] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + size: &images_size type: string + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + default: "1024x1024" + example: "1024x1024" nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: type: string + enum: ["vivid", "natural"] + default: "vivid" + example: "vivid" nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [ ] + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration + required: + - prompt + + ImagesResponse: + properties: + created: + type: integer + data: type: array - maxItems: 128 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - chunking_strategy: - # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: [ "auto" ] - required: - - type - - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - enum: [ "static" ] - static: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - required: - - type - - static - x-oaiExpandable: true - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 + $ref: "#/components/schemas/Image" + required: + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 default: 1 example: 1 nullable: true - description: *run_top_p_description - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration required: - - model + - prompt + - image - ModifyAssistantRequest: + CreateImageVariationRequest: type: object - additionalProperties: false properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary model: - description: *model_description anyOf: - type: string - name: - description: *assistant_name_param_description - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration + required: + - image + + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + required: + - input + + CreateModerationResponse: + type: object + description: Represents if a given text input is potentially harmful. + properties: + id: type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description + description: The unique identifier for the moderation request. + model: type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [ ] + description: The model used to generate the moderation results. + results: type: array - maxItems: 128 + description: A list of moderation objects. items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + type: object + properties: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example - DeleteAssistantResponse: + ListFilesResponse: type: object properties: - id: - type: string - deleted: - type: boolean + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [ assistant.deleted ] + enum: [list] required: - - id - object - - deleted + - data - ListAssistantsResponse: + CreateFileRequest: type: object + additionalProperties: false properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/AssistantObject" - first_id: + file: + description: | + The File object (not file name) to be uploaded. type: string - example: "asst_abc123" - last_id: + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - example: "asst_abc456" - has_more: - type: boolean - example: false + enum: ["assistants", "batch", "fine-tune"] required: - - object - - data - - first_id - - last_id - - has_more - x-oaiMeta: - name: List assistants response object - group: chat - example: *list_assistants_example + - file + - purpose - AssistantToolsCode: + DeleteFileResponse: type: object - title: Code interpreter tool properties: - type: + id: type: string - description: "The type of tool being defined: `code_interpreter`" - enum: [ "code_interpreter" ] + object: + type: string + enum: [file] + deleted: + type: boolean required: - - type + - id + - object + - deleted - AssistantToolsFileSearch: + CreateFineTuningJobRequest: type: object - title: FileSearch tool properties: - type: + model: + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] - file_search: + example: "file-abc123" + hyperparameters: type: object - description: Overrides for the file search tool. + description: The hyperparameters used for the fine-tuning job. properties: - max_num_results: - type: integer - minimum: 1 - maximum: 50 + batch_size: description: | - The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - ranking_options: - $ref: "#/components/schemas/FileSearchRankingOptions" - required: - - type + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [auto] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. - FileSearchRankingOptions: - title: File search tool call ranking options - type: object - description: | - The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. - - See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - properties: - ranker: + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. type: string - description: The ranker to use for the file search. If not specified will use the `auto` ranker. - enum: [ "auto", "default_2024_08_21" ] - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. + nullable: true + items: + type: object + required: + - type + - wandb + properties: + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [wandb] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. + type: integer + nullable: true minimum: 0 - maximum: 1 + maximum: 2147483647 + example: 42 required: - - score_threshold + - model + - training_file - AssistantToolsFileSearchTypeOnly: + ListFineTuningJobEventsResponse: type: object - title: FileSearch tool properties: - type: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: type: string - description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] + enum: [list] required: - - type + - object + - data - AssistantToolsFunction: + ListFineTuningJobCheckpointsResponse: type: object - title: Function tool properties: - type: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: [list] + first_id: + type: string + nullable: true + last_id: type: string - description: "The type of tool being defined: `function`" - enum: [ "function" ] - function: - $ref: "#/components/schemas/FunctionObject" + nullable: true + has_more: + type: boolean required: - - type - - function + - object + - data + - has_more - TruncationObject: + CreateEmbeddingRequest: type: object - title: Thread Truncation Controls - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + additionalProperties: false properties: - type: + input: + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: "This is a test." + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: string + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true + model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: + [ + "text-embedding-ada-002", + "text-embedding-3-small", + "text-embedding-3-large", + ] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: [ "auto", "last_messages" ] - last_messages: + enum: ["float", "base64"] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. type: integer - description: The number of most recent messages from the thread when constructing the context for the run. minimum: 1 - nullable: true + user: *end_user_param_configuration required: - - type - - AssistantsApiToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tools and instead generates a message. - `auto` is the default value and means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - oneOf: - - type: string - description: > - `none` means the model will not call any tools and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] - - $ref: "#/components/schemas/AssistantsNamedToolChoice" - x-oaiExpandable: true + - model + - input - AssistantsNamedToolChoice: + CreateEmbeddingResponse: type: object - description: Specifies a tool the model should use. Use to force the model to call a specific tool. properties: - type: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: type: string - enum: [ "function", "code_interpreter", "file_search" ] - description: The type of the tool. If type is `function`, the function name must be set - function: + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: [list] + usage: type: object + description: The usage information for the request. properties: - name: - type: string - description: The name of the function to call. + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. required: - - name + - prompt_tokens + - total_tokens required: - - type + - object + - model + - data + - usage - RunObject: + CreateTranscriptionRequest: type: object - title: A run on a thread - description: Represents an execution run on a [thread](/docs/api-reference/threads). + additionalProperties: false properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.run`. + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string - enum: [ "thread.run" ] - created_at: - description: The Unix timestamp (in seconds) for when the run was created. - type: integer - thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. type: string - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string - status: - description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. type: string enum: - [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "incomplete", - "expired", - ] - required_action: - type: object - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - properties: - type: - description: For now, this is always `submit_tool_outputs`. - type: string - enum: [ "submit_tool_outputs" ] - submit_tool_outputs: - type: object - description: Details on the tool outputs needed for this run to continue. - properties: - tool_calls: - type: array - description: A list of the relevant tool calls. - items: - $ref: "#/components/schemas/RunToolCallObject" - required: - - tool_calls - required: - - type - - submit_tool_outputs - last_error: - type: object - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - properties: - code: - type: string - description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: - [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - expires_at: - description: The Unix timestamp (in seconds) for when the run will expire. - type: integer - nullable: true - started_at: - description: The Unix timestamp (in seconds) for when the run was started. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run failed. + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + timestamp_granularities[]: + description: | + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + type: array + items: + type: string + enum: + - word + - segment + default: [segment] + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the provided input. + properties: + text: + type: string + description: The transcribed text. + required: + - text + x-oaiMeta: + name: The transcription object + group: audio + example: *basic_transcription_response_example + + TranscriptionSegment: + type: object + properties: + id: type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run was completed. + description: Unique identifier of the segment. + seek: type: integer - nullable: true - incomplete_details: - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - type: object - nullable: true - properties: - reason: - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - type: string - enum: [ "max_completion_tokens", "max_prompt_tokens" ] - model: - description: The model that the [assistant](/docs/api-reference/assistants) used for this run. - type: string - instructions: - description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: type: string - tools: - description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [ ] + description: Text content of the segment. + tokens: type: array - maxItems: 20 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - usage: - $ref: "#/components/schemas/RunCompletionUsage" + type: integer + description: Array of token IDs for the text content. temperature: - description: The sampling temperature used for this run. If not set, defaults to 1. type: number - nullable: true - top_p: - description: The nucleus sampling value used for this run. If not set, defaults to 1. + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: type: number - nullable: true - max_prompt_tokens: - type: integer - nullable: true + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + + TranscriptionWord: + type: object + properties: + word: + type: string + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [word, start, end] + + CreateTranscriptionResponseVerboseJson: + type: object + description: Represents a verbose json transcription response returned by model, based on the provided input. + properties: + language: + type: string + description: The language of the input audio. + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [language, duration, text] + x-oaiMeta: + name: The transcription object + group: audio + example: *verbose_transcription_response_example + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: description: | - The maximum number of prompt tokens specified to have been used over the course of the run. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: description: | - The maximum number of completion tokens specified to have been used over the course of the run. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 required: - - id - - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at + - file - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format - x-oaiMeta: - name: The run object - beta: true - example: | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1698107661, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "completed", - "started_at": 1699073476, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699073498, - "last_error": null, - "model": "gpt-4o", - "instructions": null, - "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], - "metadata": {}, - "incomplete_details": null, - "usage": { - "prompt_tokens": 123, - "completion_tokens": 456, - "total_tokens": 579 - }, - "temperature": 1.0, - "top_p": 1.0, - "max_prompt_tokens": 1000, - "max_completion_tokens": 1000, - "truncation_strategy": { - "type": "auto", - "last_messages": null - }, - "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true - } - CreateRunRequest: + + # Note: This does not currently support the non-default response format types. + CreateTranslationResponseJson: + type: object + properties: + text: + type: string + required: + - text + + CreateTranslationResponseVerboseJson: + type: object + properties: + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [language, duration, text] + + CreateSpeechRequest: type: object additionalProperties: false properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4o" + description: | + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` anyOf: - type: string - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] + enum: ["tts-1", "tts-1-hd"] x-oaiTypeLabel: string - nullable: true - instructions: - description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + input: type: string - nullable: true - additional_instructions: - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - nullable: true - additional_messages: - description: Adds additional messages to the thread before creating the run. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." + default: "mp3" + type: string + enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: + default: 1.0 + minimum: 0.25 + maximum: 4.0 + required: + - model + - input + - voice + + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: type: integer - nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + description: The object type, which is always "model". + enum: [model] + owned_by: + type: string + description: The organization that owns the model. required: - - thread_id - - assistant_id - ListRunsResponse: - type: object + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response + + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. object: type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/RunObject" - first_id: + description: The object type, which is always `file`. + enum: ["file"] + purpose: type: string - example: "run_abc123" - last_id: + description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. + enum: + [ + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision" + ] + status: type: string - example: "run_abc456" - has_more: - type: boolean - example: false + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: ["uploaded", "processed", "error"] + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: + - id - object - - data - - first_id - - last_id - - has_more - ModifyRunRequest: - type: object - additionalProperties: false - properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - SubmitToolOutputsRunRequest: - type: object - additionalProperties: false - properties: - tool_outputs: - description: A list of tools for which the outputs are being submitted. - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - stream: - type: boolean - nullable: true + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + enum: [embedding] required: - - tool_outputs + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } - RunToolCallObject: + FineTuningJob: type: object - description: Tool call objects + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. properties: id: type: string - description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. - type: - type: string - description: The type of tool call the output is required for. For now, this is always `function`. - enum: [ "function" ] - function: + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: type: object - description: The function definition. + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. properties: - name: + code: type: string - description: The name of the function. - arguments: + description: A machine-readable error code. + message: type: string - description: The arguments that the model expects you to pass to the function. + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true required: - - name - - arguments - required: - - id - - type - - function - - CreateThreadAndRunRequest: - type: object - additionalProperties: false - properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string - thread: - $ref: "#/components/schemas/CreateThreadRequest" - description: If no thread is provided, an empty thread will be created. - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4o" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + - code + - message + - param + fine_tuned_model: type: string nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - tool_resources: + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: + n_epochs: + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: [fine_tuning.job] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: type: integer nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + integrations: + type: array nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 + items: + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. required: - - thread_id - - assistant_id + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example - ThreadObject: + FineTuningIntegration: type: object - title: Thread - description: Represents a thread that contains [messages](/docs/api-reference/messages). + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: "The type of the integration being enabled for the fine-tuning job" + enum: ["wandb"] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + + FineTuningJobEvent: + type: object + description: Fine-tuning job event object properties: id: - description: The identifier, which can be referenced in API endpoints. + type: string + created_at: + type: integer + level: + type: string + enum: ["info", "warn", "error"] + message: type: string object: - description: The object type, which is always `thread`. type: string - enum: [ "thread" ] + enum: [fine_tuning.job.event] + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } + + FineTuningJobCheckpoint: + type: object + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + properties: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API endpoints. created_at: - description: The Unix timestamp (in seconds) for when the thread was created. type: integer - tool_resources: + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + description: Metrics at the step number during the fine-tuning job. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [fine_tuning.job.checkpoint] required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint - id + - metrics - object - - created_at - - tool_resources - - metadata + - step_number x-oaiMeta: - name: The thread object - beta: true + name: The fine-tuning job checkpoint object example: | { - "id": "thread_abc123", - "object": "thread", - "created_at": 1698107661, - "metadata": {} + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 } - CreateThreadRequest: + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + + RunCompletionUsage: + type: object + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + RunStepCompletionUsage: + type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsApiResponseFormat" + x-oaiExpandable: true + + AssistantsApiResponseFormat: type: object - additionalProperties: false + description: | + An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. properties: - messages: - description: A list of [messages](/docs/api-reference/messages) to start the thread with. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - tool_resources: - type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - chunking_strategy: - # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: [ "auto" ] - required: - - type - - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - enum: [ "static" ] - static: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - required: - - type - - static - x-oaiExpandable: true - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - x-oaiExpandable: true - oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. - ModifyThreadRequest: + AssistantObject: type: object - additionalProperties: false + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `assistant`. + type: string + enum: [assistant] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. + type: string + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true + model: + description: *model_description + type: string + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true tool_resources: type: object description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -13153,8 +9730,8 @@ components: file_ids: type: array description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + default: [] maxItems: 20 items: type: string @@ -13164,1349 +9741,1016 @@ components: vector_store_ids: type: array description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. maxItems: 1 items: type: string nullable: true metadata: - description: *metadata_description + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. type: object x-oaiTypeLabel: map nullable: true - - DeleteThreadResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: [ thread.deleted ] - required: - - id - - object - - deleted - - ListThreadsResponse: - properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/ThreadObject" - first_id: - type: string - example: "asst_abc123" - last_id: - type: string - example: "asst_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - - MessageObject: - type: object - title: The message object - description: Represents a message within a [thread](/docs/api-reference/threads). - properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.message`. - type: string - enum: [ "thread.message" ] - created_at: - description: The Unix timestamp (in seconds) for when the message was created. - type: integer - thread_id: - description: The [thread](/docs/api-reference/threads) ID that this message belongs to. - type: string - status: - description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. - type: string - enum: [ "in_progress", "incomplete", "completed" ] - incomplete_details: - description: On an incomplete message, details about why the message is incomplete. - type: object - properties: - reason: - type: string - description: The reason the message is incomplete. - enum: - [ - "content_filter", - "max_tokens", - "run_cancelled", - "run_expired", - "run_failed", - ] - nullable: true - required: - - reason - completed_at: - description: The Unix timestamp (in seconds) for when the message was completed. - type: integer - nullable: true - incomplete_at: - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. - type: integer - nullable: true - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: [ "user", "assistant" ] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageContentTextObject" - - $ref: "#/components/schemas/MessageContentRefusalObject" - x-oaiExpandable: true - assistant_id: - description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. - type: string - nullable: true - run_id: - description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. - type: string + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they were added to. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true required: - id - object - created_at - - thread_id - - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments + - name + - description + - model + - instructions + - tools - metadata x-oaiMeta: - name: The message object + name: The assistant object beta: true - example: | - { - "id": "msg_abc123", - "object": "thread.message", - "created_at": 1698983503, - "thread_id": "thread_abc123", - "role": "assistant", - "content": [ - { - "type": "text", - "text": { - "value": "Hi! How can I help you today?", - "annotations": [] - } - } - ], - "assistant_id": "asst_abc123", - "run_id": "run_abc123", - "attachments": [], - "metadata": {} - } + example: *create_assistants_example - MessageDeltaObject: + CreateAssistantRequest: type: object - title: Message delta object - description: | - Represents a message delta i.e. any changed fields on a message during streaming. + additionalProperties: false properties: - id: - description: The identifier of the message, which can be referenced in API endpoints. + model: + description: *model_description + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + name: + description: *assistant_name_param_description type: string - object: - description: The object type, which is always `thread.message.delta`. + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: [ "thread.message.delta" ] - delta: - description: The delta containing the fields that have changed on the Message. - type: object - properties: - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: [ "user", "assistant" ] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - - $ref: "#/components/schemas/MessageDeltaContentTextObject" - - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" - - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" - x-oaiExpandable: true - required: - - id - - object - - delta - x-oaiMeta: - name: The message delta object - beta: true - example: | - { - "id": "msg_123", - "object": "thread.message.delta", - "delta": { - "content": [ - { - "index": 0, - "type": "text", - "text": { "value": "Hello", "annotations": [] } - } - ] - } - } - - CreateMessageRequest: - type: object - additionalProperties: false - required: - - role - - content - properties: - role: + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - enum: [ "user", "assistant" ] - description: | - The role of the entity that is creating the message. Allowed values include: - - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. - content: - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). - title: Array of content parts - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageRequestContentTextObject" - x-oaiExpandable: true - minItems: 1 - x-oaiExpandable: true - attachments: + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [] type: array + maxItems: 128 items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they should be added to. - required: - - file_id - - tools + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [vector_store_ids] + - required: [vector_stores] nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - ModifyMessageRequest: - type: object - additionalProperties: false - properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true + required: + - model - DeleteMessageResponse: + ModifyAssistantRequest: type: object + additionalProperties: false properties: - id: + model: + description: *model_description + anyOf: + - type: string + name: + description: *assistant_name_param_description type: string - deleted: - type: boolean - object: + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: [ thread.message.deleted ] - required: - - id - - object - - deleted - - ListMessagesResponse: - properties: - object: + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - example: "list" - data: + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [] type: array + maxItems: 128 items: - $ref: "#/components/schemas/MessageObject" - first_id: - type: string - example: "msg_abc123" - last_id: - type: string - example: "msg_abc123" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - - MessageContentImageFileObject: - title: Image file - type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. - properties: - type: - description: Always `image_file`. - type: string - enum: [ "image_file" ] - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - file_id - required: - - type - - image_file - - MessageDeltaContentImageFileObject: - title: Image file - type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. - properties: - index: - type: integer - description: The index of the content part in the message. - type: - description: Always `image_file`. - type: string - enum: [ "image_file" ] - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - index - - type - - MessageContentImageUrlObject: - title: Image URL - type: object - description: References an image URL in the content of a message. - properties: - type: - type: string - enum: [ "image_url" ] - description: The type of the content part. - image_url: - type: object - properties: - url: - type: string - description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - format: uri - detail: - type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - url - required: - - type - - image_url - - MessageDeltaContentImageUrlObject: - title: Image URL - type: object - description: References an image URL in the content of a message. - properties: - index: - type: integer - description: The index of the content part in the message. - type: - description: Always `image_url`. - type: string - enum: [ "image_url" ] - image_url: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - url: - description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - type: string - detail: - type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - index - - type - - MessageContentTextObject: - title: Text - type: object - description: The text content that is part of a message. - properties: - type: - description: Always `text`. - type: string - enum: [ "text" ] - text: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description type: object - properties: - value: - description: The data that makes up the text. - type: string - annotations: - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" - x-oaiExpandable: true - required: - - value - - annotations - required: - - type - - text - - MessageContentRefusalObject: - title: Refusal - type: object - description: The refusal content generated by the assistant. - properties: - type: - description: Always `refusal`. - type: string - enum: [ "refusal" ] - refusal: - type: string - nullable: false - required: - - type - - refusal + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - MessageRequestContentTextObject: - title: Text + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + + DeleteAssistantResponse: type: object - description: The text content that is part of a message. properties: - type: - description: Always `text`. + id: type: string - enum: [ "text" ] - text: + deleted: + type: boolean + object: type: string - description: Text content to be sent to the model + enum: [assistant.deleted] required: - - type - - text + - id + - object + - deleted - MessageContentTextAnnotationsFileCitationObject: - title: File citation + ListAssistantsResponse: type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - type: - description: Always `file_citation`. + object: type: string - enum: [ "file_citation" ] - text: - description: The text in the message content that needs to be replaced. + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - file_citation: - type: object - properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - type - - text - - file_citation - - start_index - - end_index + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example - MessageContentTextAnnotationsFilePathObject: - title: File path + AssistantToolsCode: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + title: Code interpreter tool properties: type: - description: Always `file_path`. type: string - enum: [ "file_path" ] - text: - description: The text in the message content that needs to be replaced. - type: string - file_path: - type: object - properties: - file_id: - description: The ID of the file that was generated. - type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: "The type of tool being defined: `code_interpreter`" + enum: ["code_interpreter"] required: - type - - text - - file_path - - start_index - - end_index - MessageDeltaContentTextObject: - title: Text + AssistantToolsFileSearch: type: object - description: The text content that is part of a message. + title: FileSearch tool properties: - index: - type: integer - description: The index of the content part in the message. type: - description: Always `text`. type: string - enum: [ "text" ] - text: - type: object - properties: - value: - description: The data that makes up the text. - type: string - annotations: - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" - x-oaiExpandable: true + description: "The type of tool being defined: `file_search`" + enum: ["file_search"] required: - - index - type - MessageDeltaContentRefusalObject: - title: Refusal + AssistantToolsFunction: type: object - description: The refusal content that is part of a message. + title: Function tool properties: - index: - type: integer - description: The index of the refusal part in the message. type: - description: Always `refusal`. - type: string - enum: [ "refusal" ] - refusal: type: string + description: "The type of tool being defined: `function`" + enum: ["function"] + function: + $ref: "#/components/schemas/FunctionObject" required: - - index - type + - function - - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + TruncationObject: type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. properties: - index: - type: integer - description: The index of the annotation in the text content part. type: - description: Always `file_citation`. - type: string - enum: [ "file_citation" ] - text: - description: The text in the message content that needs to be replaced. type: string - file_citation: - type: object - properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - quote: - description: The specific quote in the file. - type: string - start_index: - type: integer - minimum: 0 - end_index: + description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. + enum: ["auto", "last_messages"] + last_messages: type: integer - minimum: 0 + description: The number of most recent messages from the thread when constructing the context for the run. + minimum: 1 + nullable: true required: - - index - type - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + AssistantsApiToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tools and instead generates a message. + `auto` is the default value and means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + enum: [none, auto, required] + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + + AssistantsNamedToolChoice: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + description: Specifies a tool the model should use. Use to force the model to call a specific tool. properties: - index: - type: integer - description: The index of the annotation in the text content part. type: - description: Always `file_path`. type: string - enum: [ "file_path" ] - text: - description: The text in the message content that needs to be replaced. - type: string - file_path: + enum: ["function", "code_interpreter", "file_search"] + description: The type of the tool. If type is `function`, the function name must be set + function: type: object properties: - file_id: - description: The ID of the file that was generated. + name: type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: The name of the function to call. + required: + - name required: - - index - type - RunStepObject: + RunObject: type: object - title: Run steps - description: | - Represents a step in execution of a run. + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). properties: id: - description: The identifier of the run step, which can be referenced in API endpoints. + description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run.step`. + description: The object type, which is always `thread.run`. type: string - enum: [ "thread.run.step" ] + enum: ["thread.run"] created_at: - description: The Unix timestamp (in seconds) for when the run step was created. + description: The Unix timestamp (in seconds) for when the run was created. type: integer - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. - type: string thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was run. - type: string - run_id: - description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. type: string - type: - description: The type of run step, which can be either `message_creation` or `tool_calls`. + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. type: string - enum: [ "message_creation", "tool_calls" ] status: - description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. type: string - enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] - step_details: + enum: + [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "incomplete", + "expired", + ] + required_action: type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" - x-oaiExpandable: true + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: ["submit_tool_outputs"] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs last_error: type: object - description: The last error associated with this run step. Will be `null` if there are no errors. + description: The last error associated with this run. Will be `null` if there are no errors. nullable: true properties: code: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: [ "server_error", "rate_limit_exceeded" ] + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. required: - code - message - expired_at: - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. type: integer nullable: true cancelled_at: - description: The Unix timestamp (in seconds) for when the run step was cancelled. + description: The Unix timestamp (in seconds) for when the run was cancelled. type: integer nullable: true failed_at: - description: The Unix timestamp (in seconds) for when the run step failed. + description: The Unix timestamp (in seconds) for when the run failed. type: integer nullable: true completed_at: - description: The Unix timestamp (in seconds) for when the run step completed. + description: The Unix timestamp (in seconds) for when the run was completed. type: integer nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. + type: object + nullable: true + properties: + reason: + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + type: string + enum: ["max_completion_tokens", "max_prompt_tokens"] + model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true usage: - $ref: "#/components/schemas/RunStepCompletionUsage" + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens specified to have been used over the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens specified to have been used over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - id - object - created_at - - assistant_id - thread_id - - run_id - - type + - assistant_id - status - - step_details + - required_action - last_error - - expired_at + - expires_at + - started_at - cancelled_at - failed_at - completed_at + - model + - instructions + - tools - metadata - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - response_format x-oaiMeta: - name: The run step object - beta: true - example: *run_step_object_example - - RunStepDeltaObject: - type: object - title: Run step delta object - description: | - Represents a run step delta i.e. any changed fields on a run step during streaming. - properties: - id: - description: The identifier of the run step, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.run.step.delta`. - type: string - enum: [ "thread.run.step.delta" ] - delta: - description: The delta containing the fields that have changed on the run step. - type: object - properties: - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" - x-oaiExpandable: true - required: - - id - - object - - delta - x-oaiMeta: - name: The run step delta object + name: The run object beta: true example: | { - "id": "step_123", - "object": "thread.run.step.delta", - "delta": { - "step_details": { - "type": "tool_calls", - "tool_calls": [ - { - "index": 0, - "id": "call_123", - "type": "code_interpreter", - "code_interpreter": { "input": "", "outputs": [] } - } - ] - } - } + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" } - - ListRunStepsResponse: - properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/RunStepObject" - first_id: - type: string - example: "step_abc123" - last_id: - type: string - example: "step_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - - RunStepDetailsMessageCreationObject: - title: Message creation + CreateRunRequest: type: object - description: Details of the message creation by the run step. + additionalProperties: false properties: - type: - description: Always `message_creation`. + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string - enum: [ "message_creation" ] - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - required: - - message_id - required: - - type - - message_creation - - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation - type: object - description: Details of the message creation by the run step. - properties: - type: - description: Always `message_creation`. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. type: string - enum: [ "message_creation" ] - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - required: - - type - - RunStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. type: string - enum: [ "tool_calls" ] - tool_calls: + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. items: - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - tool_calls - - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: [ "tool_calls" ] - tool_calls: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + maxItems: 20 items: oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" x-oaiExpandable: true - required: - - type - - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call - type: object - description: Details of the Code Interpreter tool call the run step was involved in. - properties: - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] - code_interpreter: - type: object - description: The Code Interpreter tool call definition. - required: - - input - - outputs - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - id - - type - - code_interpreter - - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call - type: object - description: Details of the Code Interpreter tool call the run step was involved in. - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] - code_interpreter: + metadata: + description: *metadata_description type: object - description: The Code Interpreter tool call definition. - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - index - - type - - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output - type: object - description: Text output from the Code Interpreter tool call as part of a run step. - properties: - type: - description: Always `logs`. - type: string - enum: [ "logs" ] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - type - - logs + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output - type: object - description: Text output from the Code Interpreter tool call as part of a run step. - properties: - index: + We generally recommend altering this or temperature but not both. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: type: integer - description: The index of the output in the outputs array. - type: - description: Always `logs`. - type: string - enum: [ "logs" ] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - index - - type - - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - type: object - properties: - type: - description: Always `image`. - type: string - enum: [ "image" ] - image: - type: object - properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. - type: string - required: - - file_id - required: - - type - - image - - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output - type: object - properties: - index: + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: type: integer - description: The index of the output in the outputs array. - type: - description: Always `image`. - type: string - enum: [ "image" ] - image: - type: object - properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. - type: string + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - index - - type - - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call + - thread_id + - assistant_id + ListRunsResponse: type: object properties: - id: + object: type: string - description: The ID of the tool call object. - type: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - properties: - ranking_options: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" - results: - type: array - description: The results of the file search. - items: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false required: - - id - - type - - file_search - - RunStepDetailsToolCallsFileSearchRankingOptionsObject: - title: File search tool call ranking options + - object + - data + - first_id + - last_id + - has_more + ModifyRunRequest: type: object - description: The ranking options for the file search. + additionalProperties: false properties: - ranker: - type: string - description: The ranker used for the file search. - enum: [ "default_2024_08_21" ] - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - required: - - ranker - - score_threshold - - RunStepDetailsToolCallsFileSearchResultObject: - title: File search tool call result + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: type: object - description: A result instance of the file search. - x-oaiTypeLabel: map + additionalProperties: false properties: - file_id: - type: string - description: The ID of the file that result was found in. - file_name: - type: string - description: The name of the file that result was found in. - score: - type: number - description: The score of the result. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - content: + tool_outputs: + description: A list of tools for which the outputs are being submitted. type: array - description: The content of the result that was found. The content is only included if requested via the include query parameter. items: type: object properties: - type: + tool_call_id: type: string - description: The type of the content. - enum: [ "text" ] - text: + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: type: string - description: The text content of the file. - required: - - file_id - - file_name - - score - - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. required: - - index - - type - - file_search + - tool_outputs - RunStepDetailsToolCallsFunctionObject: + RunToolCallObject: type: object - title: Function tool call + description: Tool call objects properties: id: type: string - description: The ID of the tool call object. + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. type: type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] + description: The type of tool call the output is required for. For now, this is always `function`. + enum: ["function"] function: type: object - description: The definition of the function that was called. + description: The function definition. properties: name: type: string description: The name of the function. arguments: type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + description: The arguments that the model expects you to pass to the function. required: - name - arguments - - output required: - id - type - function - RunStepDeltaStepDetailsToolCallsFunctionObject: + CreateThreadAndRunRequest: type: object - title: Function tool call + additionalProperties: false properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string - description: The ID of the tool call object. - type: + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] - function: + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + tool_resources: type: object - description: The definition of the function that was called. + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true - required: - - index - - type - - VectorStoreExpirationAfter: - type: object - title: Vector store expiration policy - description: The expiration policy for a vector store. - properties: - anchor: - description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." - type: string - enum: [ "last_active_at" ] - days: - description: The number of days after the anchor time that the vector store will expire. + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: type: integer - minimum: 1 - maximum: 365 + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - anchor - - days + - thread_id + - assistant_id - VectorStoreObject: + ThreadObject: type: object - title: Vector store - description: A vector store is a collection of processed files can be used by the `file_search` tool. + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). properties: id: description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `vector_store`. + description: The object type, which is always `thread`. type: string - enum: [ "vector_store" ] + enum: ["thread"] created_at: - description: The Unix timestamp (in seconds) for when the vector store was created. - type: integer - name: - description: The name of the vector store. - type: string - usage_bytes: - description: The total number of bytes used by the files in the vector store. + description: The Unix timestamp (in seconds) for when the thread was created. type: integer - file_counts: + tool_resources: type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been successfully processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that were cancelled. - type: integer - total: - description: The total number of files. - type: integer - required: - - in_progress - - completed - - failed - - cancelled - - total - status: - description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - type: string - enum: [ "expired", "in_progress", "completed" ] - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - expires_at: - description: The Unix timestamp (in seconds) for when the vector store will expire. - type: integer - nullable: true - last_active_at: - description: The Unix timestamp (in seconds) for when the vector store was last active. - type: integer + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string nullable: true metadata: description: *metadata_description @@ -14516,74 +10760,115 @@ components: required: - id - object - - usage_bytes - created_at - - status - - last_active_at - - name - - file_counts + - tool_resources - metadata x-oaiMeta: - name: The vector store object + name: The thread object beta: true example: | { - "id": "vs_123", - "object": "vector_store", + "id": "thread_abc123", + "object": "thread", "created_at": 1698107661, - "usage_bytes": 123456, - "last_active_at": 1698107661, - "name": "my_vector_store", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "cancelled": 0, - "failed": 0, - "total": 100 - }, - "metadata": {}, - "last_used_at": 1698107661 + "metadata": {} } - CreateVectorStoreRequest: + CreateThreadRequest: type: object additionalProperties: false properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. type: array - maxItems: 500 items: - type: string - name: - description: The name of the vector store. - type: string - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - chunking_strategy: + $ref: "#/components/schemas/CreateMessageRequest" + tool_resources: type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [vector_store_ids] + - required: [vector_stores] + nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - UpdateVectorStoreRequest: + ModifyThreadRequest: type: object additionalProperties: false properties: - name: - description: The name of the vector store. - type: string - nullable: true - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string nullable: true metadata: description: *metadata_description @@ -14591,7 +10876,22 @@ components: x-oaiTypeLabel: map nullable: true - ListVectorStoresResponse: + DeleteThreadResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [thread.deleted] + required: + - id + - object + - deleted + + ListThreadsResponse: properties: object: type: string @@ -14599,13 +10899,13 @@ components: data: type: array items: - $ref: "#/components/schemas/VectorStoreObject" + $ref: "#/components/schemas/ThreadObject" first_id: type: string - example: "vs_abc123" + example: "asst_abc123" last_id: type: string - example: "vs_abc456" + example: "asst_abc456" has_more: type: boolean example: false @@ -14616,198 +10916,273 @@ components: - last_id - has_more - DeleteVectorStoreResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: [ vector_store.deleted ] - required: - - id - - object - - deleted - - VectorStoreFileObject: + MessageObject: type: object - title: Vector store files - description: A list of files attached to a vector store. + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: id: description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `vector_store.file`. + description: The object type, which is always `thread.message`. type: string - enum: [ "vector_store.file" ] - usage_bytes: - description: The total vector store usage in bytes. Note that this may be different from the original file size. - type: integer + enum: ["thread.message"] created_at: - description: The Unix timestamp (in seconds) for when the vector store file was created. + description: The Unix timestamp (in seconds) for when the message was created. type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string status: - description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] - last_error: + enum: ["in_progress", "incomplete", "completed"] + incomplete_details: + description: On an incomplete message, details about why the message is incomplete. type: object - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true properties: - code: + reason: type: string - description: One of `server_error` or `rate_limit_exceeded`. + description: The reason the message is incomplete. enum: [ - "server_error", - "unsupported_file", - "invalid_file", + "content_filter", + "max_tokens", + "run_cancelled", + "run_expired", + "run_failed", ] - message: - type: string - description: A human-readable description of the error. + nullable: true required: - - code - - message - chunking_strategy: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + type: integer + nullable: true + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string + nullable: true + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + type: string + nullable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were added to. + nullable: true + metadata: + description: *metadata_description type: object - description: The strategy used to chunk the file. - oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" - x-oaiExpandable: true + x-oaiTypeLabel: map + nullable: true required: - id - object - - usage_bytes - created_at - - vector_store_id + - thread_id - status - - last_error + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments + - metadata x-oaiMeta: - name: The vector store file object + name: The message object beta: true example: | { - "id": "file-abc123", - "object": "vector_store.file", - "usage_bytes": 1234, - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "last_error": null, - "chunking_strategy": { - "type": "static", - "static": { - "max_chunk_size_tokens": 800, - "chunk_overlap_tokens": 400 + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } } - } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} } - OtherChunkingStrategyResponseParam: + MessageDeltaObject: type: object - title: Other Chunking Strategy - description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. - additionalProperties: false + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. properties: - type: + id: + description: The identifier of the message, which can be referenced in API endpoints. type: string - description: Always `other`. - enum: [ "other" ] - required: - - type - - StaticChunkingStrategyResponseParam: - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: + object: + description: The object type, which is always `thread.message.delta`. type: string - description: Always `static`. - enum: [ "static" ] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" - required: - - type - - static - - StaticChunkingStrategy: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + enum: ["thread.message.delta"] + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true required: - - max_chunk_size_tokens - - chunk_overlap_tokens + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } - AutoChunkingStrategyRequestParam: + CreateMessageRequest: type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: [ "auto" ] required: - - type - - StaticChunkingStrategyRequestParam: - type: object - title: Static Chunking Strategy - additionalProperties: false + - role + - content properties: - type: + role: type: string - description: Always `static`. - enum: [ "static" ] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" - required: - - type - - static - - ChunkingStrategyRequestParam: - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + enum: ["user", "assistant"] + description: | + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. + content: + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 + x-oaiExpandable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should be added to. + required: + - file_id + - tools + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - CreateVectorStoreFileRequest: + ModifyMessageRequest: type: object additionalProperties: false properties: - file_id: - description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + DeleteMessageResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + enum: [thread.message.deleted] required: - - file_id + - id + - object + - deleted - ListVectorStoreFilesResponse: + ListMessagesResponse: properties: object: type: string @@ -14815,13 +11190,13 @@ components: data: type: array items: - $ref: "#/components/schemas/VectorStoreFileObject" + $ref: "#/components/schemas/MessageObject" first_id: type: string - example: "file-abc123" + example: "msg_abc123" last_id: type: string - example: "file-abc456" + example: "msg_abc123" has_more: type: boolean example: false @@ -14832,1736 +11207,1801 @@ components: - last_id - has_more - DeleteVectorStoreFileResponse: + MessageContentImageFileObject: + title: Image file type: object + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - type: string - deleted: - type: boolean - object: + type: + description: Always `image_file`. type: string - enum: [ vector_store.file.deleted ] + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: ["auto", "low", "high"] + default: "auto" + required: + - file_id required: - - id - - object - - deleted + - type + - image_file - VectorStoreFileBatchObject: + MessageDeltaContentImageFileObject: + title: Image file type: object - title: Vector store file batch - description: A batch of files attached to a vector store. + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `vector_store.file_batch`. - type: string - enum: [ "vector_store.files_batch" ] - created_at: - description: The Unix timestamp (in seconds) for when the vector store files batch was created. + index: type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + description: The index of the content part in the message. + type: + description: Always `image_file`. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] - file_counts: + enum: ["image_file"] + image_file: type: object properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that where cancelled. - type: integer - total: - description: The total number of files. - type: integer - required: - - in_progress - - completed - - cancelled - - failed - - total + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: ["auto", "low", "high"] + default: "auto" required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts - x-oaiMeta: - name: The vector store files batch object - beta: true - example: | - { - "id": "vsfb_123", - "object": "vector_store.files_batch", - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "failed": 0, - "cancelled": 0, - "total": 100 - } - } + - index + - type - CreateVectorStoreFileBatchRequest: + MessageContentImageUrlObject: + title: Image URL type: object - additionalProperties: false + description: References an image URL in the content of a message. properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - type: array - minItems: 1 - maxItems: 500 - items: - type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" - required: - - file_ids - - AssistantStreamEvent: - description: | - Represents an event emitted when streaming a Run. - - Each event in a server-sent events stream has an `event` and `data` property: - - ``` - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - ``` - - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit `thread.run.created` when a new run - is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses - to create a message during a run, we emit a `thread.message.created event`, a - `thread.message.in_progress` event, many `thread.message.delta` events, and finally a - `thread.message.completed` event. - - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to - integrate the Assistants API with streaming. - oneOf: - - $ref: "#/components/schemas/ThreadStreamEvent" - - $ref: "#/components/schemas/RunStreamEvent" - - $ref: "#/components/schemas/RunStepStreamEvent" - - $ref: "#/components/schemas/MessageStreamEvent" - - $ref: "#/components/schemas/ErrorEvent" - - $ref: "#/components/schemas/DoneEvent" - x-oaiMeta: - name: Assistant stream events - beta: true - - ThreadStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.created" ] - data: - $ref: "#/components/schemas/ThreadObject" - required: - - event - - data - description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" - - RunStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.run.created" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a new [run](/docs/api-reference/runs/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + type: + type: string + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object properties: - event: + url: type: string - enum: [ "thread.run.queued" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: + description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + format: uri + detail: type: string - enum: [ "thread.run.in_progress" ] - data: - $ref: "#/components/schemas/RunObject" + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` + enum: ["auto", "low", "high"] + default: "auto" required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + - url + required: + - type + - image_url + + MessageDeltaContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_url`. + type: string + enum: ["image_url"] + image_url: + type: object properties: - event: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." type: string - enum: [ "thread.run.requires_action" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: + detail: type: string - enum: [ "thread.run.completed" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: ["auto", "low", "high"] + default: "auto" + required: + - index + - type + + MessageContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: ["text"] + text: + type: object properties: - event: + value: + description: The data that makes up the text. type: string - enum: [ "thread.run.incomplete" ] - data: - $ref: "#/components/schemas/RunObject" + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + - value + - annotations + required: + - type + - text + + MessageRequestContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: ["text"] + text: + type: string + description: Text content to be sent to the model + required: + - type + - text + + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. + type: string + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object properties: - event: + file_id: + description: The ID of the specific File the citation is from. type: string - enum: [ "thread.run.failed" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: + quote: + description: The specific quote in the file. type: string - enum: [ "thread.run.cancelling" ] - data: - $ref: "#/components/schemas/RunObject" required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + - file_id + - quote + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. + type: string + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object properties: - event: + file_id: + description: The ID of the file that was generated. type: string - enum: [ "thread.run.cancelled" ] - data: - $ref: "#/components/schemas/RunObject" required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index + + MessageDeltaContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. + type: string + enum: ["text"] + text: + type: object properties: - event: + value: + description: The data that makes up the text. type: string - enum: [ "thread.run.expired" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type - RunStepStreamEvent: - oneOf: - - type: object + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. + type: string + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object properties: - event: + file_id: + description: The ID of the specific File the citation is from. type: string - enum: [ "thread.run.step.created" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object properties: - event: + file_id: + description: The ID of the file that was generated. type: string - enum: [ "thread.run.step.in_progress" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: ["thread.run.step"] + created_at: + description: The Unix timestamp (in seconds) for when the run step was created. + type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: string + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. + type: string + enum: ["message_creation", "tool_calls"] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: + type: object + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true properties: - event: + code: type: string - enum: [ "thread.run.step.delta" ] - data: - $ref: "#/components/schemas/RunStepDeltaObject" - required: - - event - - data - description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - - type: object - properties: - event: + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: type: string - enum: [ "thread.run.step.completed" ] - data: - $ref: "#/components/schemas/RunStepObject" + description: A human-readable description of the error. required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + x-oaiMeta: + name: The run step object + beta: true + example: *run_step_object_example + + RunStepDeltaObject: + type: object + title: Run step delta object + description: | + Represents a run step delta i.e. any changed fields on a run step during streaming. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step.delta`. + type: string + enum: ["thread.run.step.delta"] + delta: + description: The delta containing the fields that have changed on the run step. + type: object properties: - event: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } + + ListRunStepsResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: "step_abc123" + last_id: + type: string + example: "step_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: type: string - enum: [ "thread.run.step.failed" ] - data: - $ref: "#/components/schemas/RunStepObject" + description: The ID of the message that was created by this run step. required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object + - message_id + required: + - type + - message_creation + + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object properties: - event: + message_id: type: string - enum: [ "thread.run.step.cancelled" ] - data: - $ref: "#/components/schemas/RunStepObject" + description: The ID of the message that was created by this run step. + required: + - type + + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object + - input + - outputs properties: - event: + input: type: string - enum: [ "thread.run.step.expired" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter - MessageStreamEvent: - oneOf: - - type: object + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. properties: - event: + input: type: string - enum: [ "thread.message.created" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object properties: - event: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. type: string - enum: [ "thread.message.in_progress" ] - data: - $ref: "#/components/schemas/MessageObject" required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object + - file_id + required: + - type + - image + + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object properties: - event: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. type: string - enum: [ "thread.message.delta" ] - data: - $ref: "#/components/schemas/MessageDeltaObject" - required: - - event - - data - description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" - - type: object + required: + - index + - type + + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: ["file_search"] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - id + - type + - file_search + + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: ["file_search"] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + - file_search + + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. properties: - event: + name: type: string - enum: [ "thread.message.completed" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: + description: The name of the function. + arguments: type: string - enum: [ "thread.message.incomplete" ] - data: - $ref: "#/components/schemas/MessageObject" + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - name + - arguments + - output + required: + - id + - type + - function - ErrorEvent: + RunStepDeltaStepDetailsToolCallsFunctionObject: type: object + title: Function tool call properties: - event: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: type: string - enum: [ "error" ] - data: - $ref: "#/components/schemas/Error" + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - - event - - data - description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. - x-oaiMeta: - dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + - index + - type - DoneEvent: + VectorStoreExpirationAfter: type: object + title: Vector store expiration policy + description: The expiration policy for a vector store. properties: - event: - type: string - enum: [ "done" ] - data: + anchor: + description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: [ "[DONE]" ] + enum: ["last_active_at"] + days: + description: The number of days after the anchor time that the vector store will expire. + type: integer + minimum: 1 + maximum: 365 required: - - event - - data - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: "`data` is `[DONE]`" + - anchor + - days - Batch: + VectorStoreObject: type: object + title: Vector store + description: A vector store is a collection of processed files can be used by the `file_search` tool. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string object: + description: The object type, which is always `vector_store`. type: string - enum: [ batch ] - description: The object type, which is always `batch`. - endpoint: + enum: ["vector_store"] + created_at: + description: The Unix timestamp (in seconds) for when the vector store was created. + type: integer + name: + description: The name of the vector store. type: string - description: The OpenAI API endpoint used by the batch. - - errors: + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: type: object properties: - object: - type: string - description: The object type, which is always `list`. - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: The name of the parameter that caused the error, if applicable. - nullable: true - line: - type: integer - description: The line number of the input file where the error occurred, if applicable. - nullable: true - input_file_id: - type: string - description: The ID of the input file for the batch. - completion_window: - type: string - description: The time frame within which the batch should be processed. - status: - type: string - description: The current status of the batch. - enum: - - validating - - failed + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: - in_progress - - finalizing - completed - - expired - - cancelling + - failed - cancelled - output_file_id: - type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: + - total + status: + description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - description: The ID of the file containing the outputs of requests with errors. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. + enum: ["expired", "in_progress", "completed"] + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: + description: The Unix timestamp (in seconds) for when the vector store will expire. type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last active. type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } + + CreateVectorStoreRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. + type: string + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + metadata: + description: *metadata_description type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - required: - - total - - completed - - failed - description: The request counts for different statuses within the batch. + x-oaiTypeLabel: map + nullable: true + + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + + ListVectorStoresResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: "vs_abc123" + last_id: + type: string + example: "vs_abc456" + has_more: + type: boolean + example: false required: - - id - object - - endpoint - - input_file_id - - completion_window - - status - - created_at - x-oaiMeta: - name: The batch object - example: *batch_object + - data + - first_id + - last_id + - has_more - BatchRequestInput: + DeleteVectorStoreResponse: type: object - description: The per-line object of the batch input file properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + id: type: string - enum: [ "POST" ] - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: + deleted: + type: boolean + object: type: string - description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - x-oaiMeta: - name: The request input object - example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + enum: [vector_store.deleted] + required: + - id + - object + - deleted - BatchRequestOutput: + VectorStoreFileObject: type: object - description: The per-line object of the batch output and error files + title: Vector store files + description: A list of files attached to a vector store. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - custom_id: + object: + description: The object type, which is always `vector_store.file`. type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - nullable: true - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - x-oaiTypeLabel: map - description: The JSON body of the response - error: + enum: ["vector_store.file"] + usage_bytes: + description: The total vector store usage in bytes. Note that this may be different from the original file size. + type: integer + created_at: + description: The Unix timestamp (in seconds) for when the vector store file was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + type: string + enum: ["in_progress", "completed", "cancelled", "failed"] + last_error: type: object + description: The last error associated with this vector store file. Will be `null` if there are no errors. nullable: true - description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. properties: code: type: string - description: A machine-readable error code. + description: One of `server_error` or `rate_limit_exceeded`. + enum: + [ + "internal_error", + "file_not_found", + "parsing_error", + "unhandled_mime_type", + ] message: type: string - description: A human-readable error message. + description: A human-readable description of the error. + required: + - code + - message + required: + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error x-oaiMeta: - name: The request output object + name: The vector store file object + beta: true example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null + } - ListBatchesResponse: + CreateVectorStoreFileRequest: type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + type: string + required: + - file_id + + ListVectorStoreFilesResponse: properties: + object: + type: string + example: "list" data: type: array items: - $ref: "#/components/schemas/Batch" + $ref: "#/components/schemas/VectorStoreFileObject" first_id: type: string - example: "batch_abc123" + example: "file-abc123" last_id: type: string - example: "batch_abc456" + example: "file-abc456" has_more: type: boolean - object: - type: string - enum: [ list ] + example: false required: - object - data + - first_id + - last_id - has_more - AuditLogActorServiceAccount: + DeleteVectorStoreFileResponse: type: object - description: The service account that performed the audit logged action. properties: id: type: string - description: The service account id. + deleted: + type: boolean + object: + type: string + enum: [vector_store.file.deleted] + required: + - id + - object + - deleted - AuditLogActorUser: + VectorStoreFileBatchObject: type: object - description: The user who performed the audit logged action. + title: Vector store file batch + description: A batch of files attached to a vector store. properties: id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: ["vector_store.files_batch"] + created_at: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. type: string - description: The user id. - email: + status: + description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - description: The user email. + enum: ["in_progress", "completed", "cancelled", "failed"] + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } - AuditLogActorApiKey: + CreateVectorStoreFileBatchRequest: type: object - description: The API Key used to perform the audit logged action. + additionalProperties: false properties: - id: - type: string - description: The tracking id of the API key. - type: - type: string - description: The type of API key. Can be either `user` or `service_account`. - enum: [ "user", "service_account" ] - user: - $ref: "#/components/schemas/AuditLogActorUser" - service_account: - $ref: "#/components/schemas/AuditLogActorServiceAccount" + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + required: + - file_ids - AuditLogActorSession: - type: object - description: The session in which the audit logged action was performed. - properties: - user: - $ref: "#/components/schemas/AuditLogActorUser" - ip_address: - type: string - description: The IP address from which the action was performed. + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. - AuditLogActor: - type: object - description: The actor who performed the audit logged action. - properties: - type: - type: string - description: The type of actor. Is either `session` or `api_key`. - enum: [ "session", "api_key" ] - session: - type: object - $ref: "#/components/schemas/AuditLogActorSession" - api_key: - type: object - $ref: "#/components/schemas/AuditLogActorApiKey" + Each event in a server-sent events stream has an `event` and `data` property: + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` - AuditLogEventType: - type: string - description: The event type. - x-oaiExpandable: true - enum: - - api_key.created - - api_key.updated - - api_key.deleted - - invite.sent - - invite.accepted - - invite.deleted - - login.succeeded - - login.failed - - logout.succeeded - - logout.failed - - organization.updated - - project.created - - project.updated - - project.archived - - service_account.created - - service_account.updated - - service_account.deleted - - user.added - - user.updated - - user.deleted - - AuditLog: - type: object - description: A log of a user action or configuration change within this organization. - properties: - id: - type: string - description: The ID of this log. - type: - $ref: "#/components/schemas/AuditLogEventType" + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. - effective_at: - type: integer - description: The Unix timestamp (in seconds) of the event. - project: - type: object - description: The project that the action was scoped to. Absent for actions not scoped to projects. + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + + ThreadStreamEvent: + oneOf: + - type: object properties: - id: - type: string - description: The project ID. - name: + event: type: string - description: The project title. - actor: - $ref: "#/components/schemas/AuditLogActor" - api_key.created: - type: object - description: The details for events with this `type`. + enum: ["thread.created"] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + + RunStreamEvent: + oneOf: + - type: object properties: - id: + event: type: string - description: The tracking ID of the API key. + enum: ["thread.run.created"] data: - type: object - description: The payload used to create the API key. - properties: - scopes: - type: array - items: - type: string - description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` - api_key.updated: - type: object - description: The details for events with this `type`. + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - id: + event: type: string - description: The tracking ID of the API key. - changes_requested: - type: object - description: The payload used to update the API key. - properties: - scopes: - type: array - items: - type: string - description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` - api_key.deleted: - type: object - description: The details for events with this `type`. + enum: ["thread.run.queued"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - id: + event: type: string - description: The tracking ID of the API key. - invite.sent: - type: object - description: The details for events with this `type`. + enum: ["thread.run.in_progress"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - id: + event: type: string - description: The ID of the invite. + enum: ["thread.run.requires_action"] data: - type: object - description: The payload used to create the invite. - properties: - email: - type: string - description: The email invited to the organization. - role: - type: string - description: The role the email was invited to be. Is either `owner` or `member`. - invite.accepted: - type: object - description: The details for events with this `type`. + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - id: + event: type: string - description: The ID of the invite. - invite.deleted: - type: object - description: The details for events with this `type`. + enum: ["thread.run.completed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - id: + event: type: string - description: The ID of the invite. - login.failed: - type: object - description: The details for events with this `type`. + enum: ["thread.run.failed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - error_code: + event: type: string - description: The error code of the failure. - error_message: + enum: ["thread.run.cancelling"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: type: string - description: The error message of the failure. - logout.failed: - type: object - description: The details for events with this `type`. + enum: ["thread.run.cancelled"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - error_code: + event: type: string - description: The error code of the failure. - error_message: + enum: ["thread.run.expired"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + + RunStepStreamEvent: + oneOf: + - type: object + properties: + event: type: string - description: The error message of the failure. - organization.updated: - type: object - description: The details for events with this `type`. + enum: ["thread.run.step.created"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object properties: - id: + event: type: string - description: The organization ID. - changes_requested: - type: object - description: The payload used to update the organization settings. - properties: - title: - type: string - description: The organization title. - description: - type: string - description: The organization description. - name: - type: string - description: The organization name. - settings: - type: object - properties: - threads_ui_visibility: - type: string - description: Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. - usage_dashboard_visibility: - type: string - description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. - project.created: - type: object - description: The details for events with this `type`. + enum: ["thread.run.step.in_progress"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object properties: - id: + event: type: string - description: The project ID. + enum: ["thread.run.step.delta"] data: - type: object - description: The payload used to create the project. - properties: - name: - type: string - description: The project name. - title: - type: string - description: The title of the project as seen on the dashboard. - project.updated: - type: object - description: The details for events with this `type`. + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object properties: - id: + event: type: string - description: The project ID. - changes_requested: - type: object - description: The payload used to update the project. - properties: - title: - type: string - description: The title of the project as seen on the dashboard. - project.archived: - type: object - description: The details for events with this `type`. + enum: ["thread.run.step.completed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object properties: - id: + event: type: string - description: The project ID. - service_account.created: - type: object - description: The details for events with this `type`. + enum: ["thread.run.step.failed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object properties: - id: + event: type: string - description: The service account ID. + enum: ["thread.run.step.cancelled"] data: - type: object - description: The payload used to create the service account. - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - service_account.updated: - type: object - description: The details for events with this `type`. + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object properties: - id: + event: type: string - description: The service account ID. - changes_requested: - type: object - description: The payload used to updated the service account. - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - service_account.deleted: - type: object - description: The details for events with this `type`. + enum: ["thread.run.step.expired"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + + MessageStreamEvent: + oneOf: + - type: object properties: - id: + event: type: string - description: The service account ID. - user.added: - type: object - description: The details for events with this `type`. + enum: ["thread.message.created"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object properties: - id: + event: type: string - description: The user ID. + enum: ["thread.message.in_progress"] data: - type: object - description: The payload used to add the user to the project. - properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - user.updated: - type: object - description: The details for events with this `type`. + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object properties: - id: + event: type: string - description: The project ID. - changes_requested: - type: object - description: The payload used to update the user. - properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - user.deleted: - type: object - description: The details for events with this `type`. + enum: ["thread.message.delta"] + data: + $ref: "#/components/schemas/MessageDeltaObject" + required: + - event + - data + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" + - type: object properties: - id: - type: string - description: The user ID. - required: - - id - - type - - effective_at - - actor - x-oaiMeta: - name: The audit log object - example: | - { - "id": "req_xxx_20240101", - "type": "api_key.created", - "effective_at": 1720804090, - "actor": { - "type": "session", - "session": { - "user": { - "id": "user-xxx", - "email": "user@example.com" - }, - "ip_address": "127.0.0.1", - "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" - } - }, - "api_key.created": { - "id": "key_xxxx", - "data": { - "scopes": ["resource.operation"] - } - } - } - - ListAuditLogsResponse: - type: object - properties: - object: - type: string - enum: [ list ] - data: - type: array - items: - $ref: "#/components/schemas/AuditLog" - first_id: - type: string - example: "audit_log-defb456h8dks" - last_id: - type: string - example: "audit_log-hnbkd8s93s" - has_more: - type: boolean - - required: - - object - - data - - first_id - - last_id - - has_more - - Invite: - type: object - description: Represents an individual `invite` to the organization. - properties: - object: - type: string - enum: [ organization.invite ] - description: The object type, which is always `organization.invite` - id: - type: string - description: The identifier, which can be referenced in API endpoints - email: - type: string - description: The email address of the individual to whom the invite was sent - role: - type: string - enum: [ owner, reader ] - description: "`owner` or `reader`" - status: - type: string - enum: [ accepted, expired, pending ] - description: "`accepted`,`expired`, or `pending`" - invited_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was sent. - expires_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite expires. - accepted_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was accepted. - - required: - - object - - id - - email - - role - - status - - invited_at - - expires_at - x-oaiMeta: - name: The invite object - example: | - { - "object": "organization.invite", - "id": "invite-abc", - "email": "user@example.com", - "role": "owner", - "status": "accepted", - "invited_at": 1711471533, - "expires_at": 1711471533, - "accepted_at": 1711471533 - } - - InviteListResponse: - type: object - properties: - object: - type: string - enum: [ list ] - description: The object type, which is always `list` - data: - type: array - items: - $ref: '#/components/schemas/Invite' - first_id: - type: string - description: The first `invite_id` in the retrieved `list` - last_id: - type: string - description: The last `invite_id` in the retrieved `list` - has_more: - type: boolean - description: The `has_more` property is used for pagination to indicate there are additional results. - required: - - object - - data - - InviteRequest: - type: object - properties: - email: - type: string - description: "Send an email to this address" - role: - type: string - enum: [ reader, owner ] - description: "`owner` or `reader`" - required: - - email - - role - - InviteDeleteResponse: - type: object - properties: - object: - type: string - enum: [ organization.invite.deleted ] - description: The object type, which is always `organization.invite.deleted` - id: - type: string - deleted: - type: boolean - required: - - object - - id - - deleted - - User: - type: object - description: Represents an individual `user` within an organization. - properties: - object: - type: string - enum: [ organization.user ] - description: The object type, which is always `organization.user` - id: - type: string - description: The identifier, which can be referenced in API endpoints - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - type: string - enum: [ owner, reader ] - description: "`owner` or `reader`" - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the user was added. - required: - - object - - id - - name - - email - - role - - added_at - x-oaiMeta: - name: The user object - example: | - { - "object": "organization.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } - - UserListResponse: - type: object - properties: - object: - type: string - enum: [ list ] - data: - type: array - items: - $ref: '#/components/schemas/User' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - required: - - object - - data - - first_id - - last_id - - has_more - - UserRoleUpdateRequest: - type: object - properties: - role: - type: string - enum: [ owner,reader ] - description: "`owner` or `reader`" - required: - - role - - UserDeleteResponse: - type: object - properties: - object: - type: string - enum: [ organization.user.deleted ] - id: - type: string - deleted: - type: boolean - required: - - object - - id - - deleted - - Project: - type: object - description: Represents an individual project. - properties: - id: - type: string - description: The identifier, which can be referenced in API endpoints - object: - type: string - enum: [ organization.project ] - description: The object type, which is always `organization.project` - name: - type: string - description: The name of the project. This appears in reporting. - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was created. - archived_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) of when the project was archived or `null`. - status: - type: string - enum: [ active, archived ] - description: "`active` or `archived`" - app_use_case: - type: string - description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). - business_website: - type: string - description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). - required: - - id - - object - - name - - created_at - - status - x-oaiMeta: - name: The project object - example: | - { - "id": "proj_abc", - "object": "organization.project", - "name": "Project example", - "created_at": 1711471533, - "archived_at": null, - "status": "active", - "app_use_case": "Your project use case here", - "business_website": "https://example.com" - } + event: + type: string + enum: ["thread.message.completed"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.incomplete"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - ProjectListResponse: + ErrorEvent: type: object properties: - object: + event: type: string - enum: [ list ] + enum: ["error"] data: - type: array - items: - $ref: '#/components/schemas/Project' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean + $ref: "#/components/schemas/Error" required: - - object + - event - data - - first_id - - last_id - - has_more - - ProjectCreateRequest: - type: object - properties: - name: - type: string - description: The friendly name of the project, this name appears in reports. - app_use_case: - type: string - description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). - business_website: - type: string - description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). - required: - - name - - ProjectUpdateRequest: - type: object - properties: - name: - type: string - description: The updated name of the project, this name appears in reports. - app_use_case: - type: string - description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). - business_website: - type: string - description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). - required: - - name - - DefaultProjectErrorResponse: - type: object - properties: - code: - type: integer - message: - type: string - required: - - code - - message - - ProjectUser: - type: object - description: Represents an individual user in a project. - properties: - object: - type: string - enum: [ organization.project.user ] - description: The object type, which is always `organization.project.user` - id: - type: string - description: The identifier, which can be referenced in API endpoints - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - type: string - enum: [ owner, member ] - description: "`owner` or `member`" - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was added. - - required: - - object - - id - - name - - email - - role - - added_at + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. x-oaiMeta: - name: The project user object - example: | - { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 - } + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" - ProjectUserListResponse: + DoneEvent: type: object properties: - object: + event: type: string + enum: ["done"] data: - type: array - items: - $ref: '#/components/schemas/ProjectUser' - first_id: - type: string - last_id: type: string - has_more: - type: boolean + enum: ["[DONE]"] required: - - object + - event - data - - first_id - - last_id - - has_more - - ProjectUserCreateRequest: - type: object - properties: - user_id: - type: string - description: The ID of the user. - role: - type: string - enum: [ owner, member ] - description: "`owner` or `member`" - required: - - user_id - - role - - ProjectUserUpdateRequest: - type: object - properties: - role: - type: string - enum: [ owner, member ] - description: "`owner` or `member`" - required: - - role - - ProjectUserDeleteResponse: - type: object - properties: - object: - type: string - enum: [ organization.project.user.deleted ] - id: - type: string - deleted: - type: boolean - required: - - object - - id - - deleted + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" - ProjectServiceAccount: + Batch: type: object - description: Represents an individual service account in a project. properties: - object: - type: string - enum: [ organization.project.service_account ] - description: The object type, which is always `organization.project.service_account` id: type: string - description: The identifier, which can be referenced in API endpoints - name: - type: string - description: The name of the service account - role: - type: string - enum: [ owner, member ] - description: "`owner` or `member`" - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the service account was created - required: - - object - - id - - name - - role - - created_at - x-oaiMeta: - name: The project service account object - example: | - { - "object": "organization.project.service_account", - "id": "svc_acct_abc", - "name": "Service Account", - "role": "owner", - "created_at": 1711471533 - } - - ProjectServiceAccountListResponse: - type: object - properties: object: type: string - enum: [ list ] - data: - type: array - items: - $ref: '#/components/schemas/ProjectServiceAccount' - first_id: - type: string - last_id: + enum: [batch] + description: The object type, which is always `batch`. + endpoint: type: string - has_more: - type: boolean - required: - - object - - data - - first_id - - last_id - - has_more + description: The OpenAI API endpoint used by the batch. - ProjectServiceAccountCreateRequest: - type: object - properties: - name: + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. + nullable: true + line: + type: integer + description: The line number of the input file where the error occurred, if applicable. + nullable: true + input_file_id: type: string - description: The name of the service account being created. - required: - - name - - ProjectServiceAccountCreateResponse: - type: object - properties: - object: + description: The ID of the input file for the batch. + completion_window: type: string - enum: [ organization.project.service_account ] - id: + description: The time frame within which the batch should be processed. + status: type: string - name: + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: type: string - role: + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: type: string - enum: [ member ] - description: Service accounts can only have one role of type `member` + description: The ID of the file containing the outputs of requests with errors. created_at: type: integer - api_key: - $ref: '#/components/schemas/ProjectServiceAccountApiKey' + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + required: + - total + - completed + - failed + description: The request counts for different statuses within the batch. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - object - id - - name - - role + - object + - endpoint + - input_file_id + - completion_window + - status - created_at - - api_key + x-oaiMeta: + name: The batch object + example: *batch_object - ProjectServiceAccountApiKey: + BatchRequestInput: type: object + description: The per-line object of the batch input file properties: - object: - type: string - enum: [ organization.project.service_account.api_key ] - description: The object type, which is always `organization.project.service_account.api_key` - - value: + custom_id: type: string - name: + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: type: string - created_at: - type: integer - id: + enum: ["POST"] + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: type: string - required: - - object - - value - - name - - created_at - - id + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + x-oaiMeta: + name: The request input object + example: | + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} - ProjectServiceAccountDeleteResponse: + BatchRequestOutput: type: object + description: The per-line object of the batch output and error files properties: - object: - type: string - enum: [ organization.project.service_account.deleted ] id: type: string - deleted: - type: boolean - required: - - object - - id - - deleted - - ProjectApiKey: - type: object - description: Represents an individual API key in a project. - properties: - object: - type: string - enum: [ organization.project.api_key ] - description: The object type, which is always `organization.project.api_key` - redacted_value: - type: string - description: The redacted value of the API key - name: - type: string - description: The name of the API key - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the API key was created - id: + custom_id: type: string - description: The identifier, which can be referenced in API endpoints - owner: + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: type: object + nullable: true properties: - type: + status_code: + type: integer + description: The HTTP status code of the response + request_id: type: string - enum: [ user, service_account ] - description: "`user` or `service_account`" - user: - $ref: '#/components/schemas/ProjectUser' - service_account: - $ref: '#/components/schemas/ProjectServiceAccount' - required: - - object - - redacted_value - - name - - created_at - - id - - owner + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: + type: object + nullable: true + description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. x-oaiMeta: - name: The project API key object + name: The request output object example: | - { - "object": "organization.project.api_key", - "redacted_value": "sk-abc...def", - "name": "My API Key", - "created_at": 1711471533, - "id": "key_abc", - "owner": { - "type": "user", - "user": { - "object": "organization.project.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "created_at": 1711471533 - } - } - } + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-3.5-turbo", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} - ProjectApiKeyListResponse: + ListBatchesResponse: type: object properties: - object: - type: string - enum: [ list ] data: type: array items: - $ref: '#/components/schemas/ProjectApiKey' + $ref: "#/components/schemas/Batch" first_id: type: string + example: "batch_abc123" last_id: type: string + example: "batch_abc456" has_more: type: boolean - required: - - object - - data - - first_id - - last_id - - has_more - - ProjectApiKeyDeleteResponse: - type: object - properties: object: type: string - enum: [ organization.project.api_key.deleted ] - id: - type: string - deleted: - type: boolean + enum: [list] required: - object - - id - - deleted + - data + - has_more security: - - ApiKeyAuth: [ ] + - ApiKeyAuth: [] x-oaiMeta: navigationGroups: @@ -16569,8 +13009,6 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants - - id: administration - title: Administration - id: legacy title: Legacy groups: @@ -16599,7 +13037,7 @@ x-oaiMeta: title: Audio description: | Learn how to turn audio into text or text into audio. - + Related guide: [Speech to text](/docs/guides/speech-to-text) navigationGroup: endpoints sections: @@ -16622,7 +13060,7 @@ x-oaiMeta: title: Chat description: | Given a list of messages comprising a conversation, the model will return a response. - + Related guide: [Chat Completions](/docs/guides/text-generation) navigationGroup: endpoints sections: @@ -16639,7 +13077,7 @@ x-oaiMeta: title: Embeddings description: | Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - + Related guide: [Embeddings](/docs/guides/embeddings) navigationGroup: endpoints sections: @@ -16653,7 +13091,7 @@ x-oaiMeta: title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - + Related guide: [Fine-tune models](/docs/guides/fine-tuning) navigationGroup: endpoints sections: @@ -16675,12 +13113,6 @@ x-oaiMeta: - type: endpoint key: cancelFineTuningJob path: cancel - - type: object - key: FinetuneChatRequestInput - path: chat-input - - type: object - key: FinetuneCompletionRequestInput - path: completions-input - type: object key: FineTuningJob path: object @@ -16694,7 +13126,7 @@ x-oaiMeta: title: Batch description: | Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount. - + Related guide: [Batch](/docs/guides/batch) navigationGroup: endpoints sections: @@ -16715,10 +13147,10 @@ x-oaiMeta: path: object - type: object key: BatchRequestInput - path: request-input + path: requestInput - type: object key: BatchRequestOutput - path: request-output + path: requestOutput - id: files title: Files description: | @@ -16743,35 +13175,11 @@ x-oaiMeta: - type: object key: OpenAIFile path: object - - id: uploads - title: Uploads - description: | - Allows you to upload large files in multiple parts. - navigationGroup: endpoints - sections: - - type: endpoint - key: createUpload - path: create - - type: endpoint - key: addUploadPart - path: add-part - - type: endpoint - key: completeUpload - path: complete - - type: endpoint - key: cancelUpload - path: cancel - - type: object - key: Upload - path: object - - type: object - key: UploadPart - path: part-object - id: images title: Images description: | Given a prompt and/or an input image, the model will generate a new image. - + Related guide: [Image generation](/docs/guides/images) navigationGroup: endpoints sections: @@ -16809,7 +13217,7 @@ x-oaiMeta: title: Moderations description: | Given some input text, outputs if the model classifies it as potentially harmful across several categories. - + Related guide: [Moderations](/docs/guides/moderation) navigationGroup: endpoints sections: @@ -16819,14 +13227,12 @@ x-oaiMeta: - type: object key: CreateModerationResponse path: object - - - id: assistants title: Assistants beta: true description: | Build assistants that can call models and use tools to perform tasks. - + [Get started with the Assistants API](/docs/assistants) navigationGroup: assistants sections: @@ -16853,7 +13259,7 @@ x-oaiMeta: beta: true description: | Create threads that assistants can interact with. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -16877,7 +13283,7 @@ x-oaiMeta: beta: true description: | Create messages within threads - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -16904,7 +13310,7 @@ x-oaiMeta: beta: true description: | Represents an execution run on a thread. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -16937,7 +13343,7 @@ x-oaiMeta: beta: true description: | Represents the steps (model and tool calls) taken during the run. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -16955,7 +13361,7 @@ x-oaiMeta: beta: true description: | Vector stores are used to store files for use by the `file_search` tool. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -16982,7 +13388,7 @@ x-oaiMeta: beta: true description: | Vector store files represent files inside a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -17006,7 +13412,7 @@ x-oaiMeta: beta: true description: | Vector store file batches represent operations to add multiple files to a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -17030,11 +13436,11 @@ x-oaiMeta: beta: true description: | Stream the result of executing a Run or resuming a Run after submitting tool outputs. - + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. - + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the [Assistants API quickstart](/docs/assistants/overview) to learn more. navigationGroup: assistants @@ -17048,175 +13454,6 @@ x-oaiMeta: - type: object key: AssistantStreamEvent path: events - - - id: administration - title: Overview - description: | - Programmatically manage your organization. - - The Audit Logs endpoint provides a log of all actions taken in the - organization for security and monitoring purposes. - - To access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints. - - For best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization) - navigationGroup: administration - - - id: invite - title: Invites - description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. - navigationGroup: administration - sections: - - type: endpoint - key: list-invites - path: list - - type: endpoint - key: inviteUser - path: create - - type: endpoint - key: retrieve-invite - path: retrieve - - type: endpoint - key: delete-invite - path: delete - - type: object - key: Invite - path: object - - - id: users - title: Users - description: | - Manage users and their role in an organization. Users will be automatically added to the Default project. - navigationGroup: administration - sections: - - type: endpoint - key: list-users - path: list - - type: endpoint - key: modify-user - path: modify - - type: endpoint - key: retrieve-user - path: retrieve - - type: endpoint - key: delete-user - path: delete - - type: object - key: User - path: object - - - id: projects - title: Projects - description: | - Manage the projects within an orgnanization includes creation, updating, and archiving or projects. - The Default project cannot be modified or archived. - navigationGroup: administration - sections: - - type: endpoint - key: list-projects - path: list - - type: endpoint - key: create-project - path: create - - type: endpoint - key: retrieve-project - path: retrieve - - type: endpoint - key: modify-project - path: modify - - type: endpoint - key: archive-project - path: archive - - type: object - key: Project - path: object - - - id: project-users - title: Project Users - description: | - Manage users within a project, including adding, updating roles, and removing users. - Users cannot be removed from the Default project, unless they are being removed from the organization. - navigationGroup: administration - sections: - - type: endpoint - key: list-project-users - path: list - - type: endpoint - key: create-project-user - path: creeate - - type: endpoint - key: retrieve-project-user - path: retrieve - - type: endpoint - key: modify-project-user - path: modify - - type: endpoint - key: delete-project-user - path: delete - - type: object - key: ProjectUser - path: object - - - id: project-service-accounts - title: Project Service Accounts - description: | - Manage service accounts within a project. A service account is a bot user that is not associated with a user. - If a user leaves an organization, their keys and membership in projects will no longer work. Service accounts - do not have this limitation. However, service accounts can also be deleted from a project. - navigationGroup: administration - sections: - - type: endpoint - key: list-project-service-accounts - path: list - - type: endpoint - key: create-project-service-account - path: create - - type: endpoint - key: retrieve-project-service-account - path: retrieve - - type: endpoint - key: delete-project-service-account - path: delete - - type: object - key: ProjectServiceAccount - path: object - - - id: project-api-keys - title: Project API Keys - description: | - Manage API keys for a given project. Supports listing and deleting keys for users. - This API does not allow issuing keys for users, as users need to authorize themselves to generate keys. - navigationGroup: administration - sections: - - type: endpoint - key: list-project-api-keys - path: list - - type: endpoint - key: retrieve-project-api-key - path: retrieve - - type: endpoint - key: delete-project-api-key - path: delete - - type: object - key: ProjectApiKey - path: object - - - id: audit-logs - title: Audit Logs - description: | - Logs of user actions and configuration changes within this organization. - - To log events, you must activate logging in the [Organization Settings](/settings/organization/general). - Once activated, for security reasons, logging cannot be deactivated. - navigationGroup: administration - sections: - - type: endpoint - key: list-audit-logs - path: list - - type: object - key: AuditLog - path: object - - id: completions title: Completions legacy: true diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index afff8726..a6fd761e 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: openai_dart -description: Dart client for the OpenAI API. Supports chat (GPT-4o, o1, etc.), completions, embeddings, images (DALL·E 3), assistants (threads,, vector stores, etc.), batch, fine-tuning, etc. -version: 0.4.2 +description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai @@ -13,22 +13,22 @@ topics: - gpt environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - fetch_client: ^1.1.2 - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 + ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + test: ^1.25.2 diff --git a/packages/openai_dart/test/openai_client_assistants_test.dart b/packages/openai_dart/test/openai_client_assistants_test.dart index 61479182..cf622c2c 100644 --- a/packages/openai_dart/test/openai_client_assistants_test.dart +++ b/packages/openai_dart/test/openai_client_assistants_test.dart @@ -8,8 +8,6 @@ import 'package:test/test.dart'; // https://platform.openai.com/docs/assistants/overview void main() { - const defaultModel = 'gpt-4o'; - group('OpenAI Assistants API tests', timeout: const Timeout(Duration(minutes: 5)), () { late OpenAIClient client; @@ -25,13 +23,14 @@ void main() { }); Future createAssistant() async { + const model = 'gpt-4'; const name = 'Math Tutor'; const description = 'Help students with math homework'; const instructions = 'You are a personal math tutor. Write and run code to answer math questions.'; final res = await client.createAssistant( request: const CreateAssistantRequest( - model: AssistantModel.modelId(defaultModel), + model: AssistantModel.modelId(model), name: name, description: description, instructions: instructions, @@ -43,7 +42,7 @@ void main() { expect(res.createdAt, greaterThan(0)); expect(res.name, name); expect(res.description, description); - expect(res.model, startsWith(defaultModel)); + expect(res.model, model); expect(res.instructions, instructions); expect(res.tools, hasLength(1)); final tool = res.tools.first; @@ -115,7 +114,6 @@ void main() { assistantId: assistantId, instructions: 'Please address the user as Jane Doe. The user has a premium account.', - model: const CreateRunRequestModel.modelId(defaultModel), ), ); expect(res.id, isNotNull); @@ -131,7 +129,7 @@ void main() { expect(res.cancelledAt, isNull); expect(res.failedAt, isNull); expect(res.completedAt, isNull); - expect(res.model, startsWith(defaultModel)); + expect(res.model, startsWith('gpt-4')); expect(res.instructions, isNotEmpty); expect(res.tools, hasLength(1)); expect(res.metadata, isEmpty); diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index 96c57c2a..9277c848 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -23,7 +23,7 @@ void main() { test('Test call chat completion API', () async { final models = [ - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ChatCompletionModels.gpt4, ]; @@ -73,7 +73,7 @@ void main() { test('Test call chat completion API with stop sequence', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ ChatCompletionMessage.system( @@ -105,7 +105,7 @@ void main() { test('Test call chat completions API with max tokens', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ ChatCompletionMessage.system( @@ -115,7 +115,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxCompletionTokens: 2, + maxTokens: 2, ); final res = await client.createChatCompletion(request: request); expect(res.choices, isNotEmpty); @@ -123,16 +123,12 @@ void main() { res.choices.first.finishReason, ChatCompletionFinishReason.length, ); - expect( - res.usage?.completionTokensDetails?.reasoningTokens, - ChatCompletionFinishReason.length, - ); }); test('Test call chat completions API with other parameters', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ ChatCompletionMessage.system( @@ -142,7 +138,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxCompletionTokens: 2, + maxTokens: 2, presencePenalty: 0.6, frequencyPenalty: 0.6, logitBias: {'50256': -100}, @@ -158,7 +154,7 @@ void main() { test('Test call chat completions streaming API', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ ChatCompletionMessage.system( @@ -183,7 +179,7 @@ void main() { await for (final res in stream) { expect(res.id, isNotEmpty); expect(res.created, greaterThan(0)); - expect(res.model, startsWith('gpt-4o-mini')); + expect(res.model, startsWith('gpt-3.5-turbo')); expect(res.object, isNotEmpty); if (res.choices.isNotEmpty) { expect(res.choices, hasLength(1)); @@ -228,7 +224,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ const ChatCompletionMessage.system( @@ -276,7 +272,7 @@ void main() { final request2 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ const ChatCompletionMessage.system( @@ -334,7 +330,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt35Turbo, ), messages: [ const ChatCompletionMessage.system( @@ -364,7 +360,7 @@ void main() { res.object, isNotEmpty, ); - expect(res.model, startsWith('gpt-4o-mini')); + expect(res.model, startsWith('gpt-3.5-turbo')); expect(res.choices, hasLength(1)); final choice = res.choices.first; expect(choice.index, 0); @@ -390,50 +386,113 @@ void main() { expect(count, greaterThan(1)); }); - test('Test jsonObject response format', () async { - const request = CreateChatCompletionRequest( - model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + test('Test call chat completions API functions', () async { + const function = FunctionObject( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final request1 = CreateChatCompletionRequest( + model: const ChatCompletionModel.model( + ChatCompletionModels.gpt35Turbo, ), messages: [ - ChatCompletionMessage.system( - content: - 'You are a helpful assistant that extracts names from text ' - 'and returns them in a JSON array.', + const ChatCompletionMessage.system( + content: 'You are a helpful assistant.', ), - ChatCompletionMessage.user( + const ChatCompletionMessage.user( content: ChatCompletionUserMessageContent.string( - 'John, Mary, and Peter.', + 'What’s the weather like in Boston right now?', ), ), ], - temperature: 0, - responseFormat: ResponseFormat.jsonObject(), + functions: [function], + functionCall: ChatCompletionFunctionCall.function( + ChatCompletionFunctionCallOption(name: function.name), + ), ); - final res = await client.createChatCompletion(request: request); - expect(res.choices, hasLength(1)); - final choice = res.choices.first; - final message = choice.message; - expect(message.role, ChatCompletionMessageRole.assistant); - final content = message.content; - final jsonContent = json.decode(content!) as Map; - final jsonName = jsonContent['names'] as List; - expect(jsonName, isList); - expect(jsonName, hasLength(3)); - expect(jsonName, contains('John')); - expect(jsonName, contains('Mary')); - expect(jsonName, contains('Peter')); + final res1 = await client.createChatCompletion(request: request1); + expect(res1.choices, hasLength(1)); + + final choice1 = res1.choices.first; + + final aiMessage1 = choice1.message; + expect(aiMessage1.role, ChatCompletionMessageRole.assistant); + expect(aiMessage1.content, isNull); + expect(aiMessage1.functionCall, isNotNull); + + final functionCall = aiMessage1.functionCall!; + expect(functionCall.name, function.name); + expect(functionCall.arguments, isNotEmpty); + final arguments = json.decode( + functionCall.arguments, + ) as Map; + expect(arguments.containsKey('location'), isTrue); + expect(arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + + final request2 = CreateChatCompletionRequest( + model: const ChatCompletionModel.model( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + const ChatCompletionMessage.system( + content: 'You are a helpful assistant.', + ), + const ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'What’s the weather like in Boston right now?', + ), + ), + ChatCompletionMessage.function( + name: function.name, + content: json.encode(functionResult), + ), + ], + functions: [function], + ); + final res2 = await client.createChatCompletion(request: request2); + expect(res2.choices, hasLength(1)); + + final choice2 = res2.choices.first; + expect(choice2.finishReason, ChatCompletionFinishReason.stop); + + final aiMessage2 = choice2.message; + expect(aiMessage2.role, ChatCompletionMessageRole.assistant); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.functionCall, isNull); }); - test('Test jsonSchema response format', () async { + test('Test jsonObject response format', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt4oMini, + ChatCompletionModels.gpt41106Preview, ), messages: [ ChatCompletionMessage.system( content: - 'You are a helpful assistant. That extracts names from text.', + 'You are a helpful assistant. That extracts names from text ' + 'and returns them in a JSON array.', ), ChatCompletionMessage.user( content: ChatCompletionUserMessageContent.string( @@ -442,25 +501,8 @@ void main() { ), ], temperature: 0, - responseFormat: ResponseFormat.jsonSchema( - jsonSchema: JsonSchemaObject( - name: 'Names', - description: 'A list of names', - strict: true, - schema: { - 'type': 'object', - 'properties': { - 'names': { - 'type': 'array', - 'items': { - 'type': 'string', - }, - }, - }, - 'additionalProperties': false, - 'required': ['names'], - }, - ), + responseFormat: ChatCompletionResponseFormat( + type: ChatCompletionResponseFormatType.jsonObject, ), ); final res = await client.createChatCompletion(request: request); diff --git a/packages/tavily_dart/.gitignore b/packages/tavily_dart/.gitignore deleted file mode 100644 index 3cceda55..00000000 --- a/packages/tavily_dart/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# https://dart.dev/guides/libraries/private-files -# Created by `dart pub` -.dart_tool/ - -# Avoid committing pubspec.lock for library packages; see -# https://dart.dev/guides/libraries/private-files#pubspeclock. -pubspec.lock diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md deleted file mode 100644 index 74cd20f8..00000000 --- a/packages/tavily_dart/CHANGELOG.md +++ /dev/null @@ -1,11 +0,0 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.0 - - - **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) - -## 0.0.1-dev.1 - -- Bootstrap package. diff --git a/packages/tavily_dart/LICENSE b/packages/tavily_dart/LICENSE deleted file mode 100644 index f407ffdd..00000000 --- a/packages/tavily_dart/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 David Miguel Lozano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/tavily_dart/README.md b/packages/tavily_dart/README.md deleted file mode 100644 index a7cd6afd..00000000 --- a/packages/tavily_dart/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# Tavily Dart Client - -[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) -[![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) -[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) -[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) - -Dart Client for the [Tavily](https://tavily.com) API (a search engine optimized for LLMs and RAG). - -## Features - -- Fully type-safe, [documented](https://pub.dev/documentation/tavily_dart/latest) and tested -- All platforms supported -- Custom base URL, headers and query params support (e.g. HTTP proxies) -- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - -**Supported endpoints:** -- Search - -## Table of contents - -- [Usage](#usage) - * [Authentication](#authentication) - * [Search](#search) -- [Advance Usage](#advance-usage) - * [Custom HTTP client](#custom-http-client) - * [Using a proxy](#using-a-proxy) - + [HTTP proxy](#http-proxy) - + [SOCKS5 proxy](#socks5-proxy) -- [Acknowledgements](#acknowledgements) -- [License](#license) - -## Usage - -Refer to the [documentation](https://docs.tavily.com) for more information about the API. - -### Authentication - -The Tavily API uses API keys for authentication. Visit the [Tavily console](https://app.tavily.com/) to retrieve the API key you'll use in your requests. - -> **Remember that your API key is a secret!** -> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. - -```dart -final apiKey = Platform.environment['TAVILY_API_KEY']; -final client = TavilyClient(); -``` - -### Search - -Search for data based on a query. - -**Basic search:** - -```dart -final res = await client.search( - request: SearchRequest( - apiKey: apiKey, - query: 'Should I invest in Apple right now?', - ), -); -print(res); -``` - -**Advanced search:** - -```dart -final res = await client.search( - request: SearchRequest( - apiKey: apiKey, - query: 'Should I invest in Apple right now?', - searchDepth: SearchRequestSearchDepth.advanced, - ), -); -print(res); -``` - -See the API documentation for more information on all supported search parameters. - -## Advance Usage - -### Custom HTTP client - -You can always provide your own implementation of `http.Client` for further customization: - -```dart -final client = TavilyClient( - client: MyHttpClient(), -); -``` - -### Using a proxy - -#### HTTP proxy - -You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: - -```dart -final client = TavilyClient( - baseUrl: 'https://my-proxy.com', - headers: { - 'x-my-proxy-header': 'value', - }, -); -``` - -If you need further customization, you can always provide your own `http.Client`. - -#### SOCKS5 proxy - -To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: - -```dart -final baseHttpClient = HttpClient(); -SocksTCPClient.assignToHttpClient(baseHttpClient, [ - ProxySettings(InternetAddress.loopbackIPv4, 1080), -]); -final httpClient = IOClient(baseClient); - -final client = TavilyClient( - client: httpClient, -); -``` - -## Acknowledgements - -The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. - -## License - -Tavily Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/tavily_dart/analysis_options.yaml b/packages/tavily_dart/analysis_options.yaml deleted file mode 100644 index f04c6cf0..00000000 --- a/packages/tavily_dart/analysis_options.yaml +++ /dev/null @@ -1 +0,0 @@ -include: ../../analysis_options.yaml diff --git a/packages/tavily_dart/build.yaml b/packages/tavily_dart/build.yaml deleted file mode 100644 index dee719ac..00000000 --- a/packages/tavily_dart/build.yaml +++ /dev/null @@ -1,13 +0,0 @@ -targets: - $default: - builders: - source_gen|combining_builder: - options: - ignore_for_file: - - prefer_final_parameters - - require_trailing_commas - - non_constant_identifier_names - - unnecessary_null_checks - json_serializable: - options: - explicit_to_json: true diff --git a/packages/tavily_dart/example/tavily_dart_example.dart b/packages/tavily_dart/example/tavily_dart_example.dart deleted file mode 100644 index 652564b2..00000000 --- a/packages/tavily_dart/example/tavily_dart_example.dart +++ /dev/null @@ -1,28 +0,0 @@ -// ignore_for_file: avoid_print -import 'dart:io'; - -import 'package:tavily_dart/tavily_dart.dart'; - -void main() async { - final apiKey = Platform.environment['TAVILY_API_KEY']!; - final client = TavilyClient(); - - // Basic search - final res1 = await client.search( - request: SearchRequest( - apiKey: apiKey, - query: 'Should I invest in Apple right now?', - ), - ); - print(res1); - - // Advanced search - final res2 = await client.search( - request: SearchRequest( - apiKey: apiKey, - query: 'Should I invest in Apple right now?', - searchDepth: SearchRequestSearchDepth.advanced, - ), - ); - print(res2); -} diff --git a/packages/tavily_dart/lib/src/generated/client.dart b/packages/tavily_dart/lib/src/generated/client.dart deleted file mode 100644 index f6fb0439..00000000 --- a/packages/tavily_dart/lib/src/generated/client.dart +++ /dev/null @@ -1,382 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target, unused_import - -import 'dart:convert'; -import 'dart:typed_data'; - -import 'package:http/http.dart' as http; -import 'package:http/retry.dart'; -import 'package:meta/meta.dart'; - -import 'schema/schema.dart'; - -/// Enum of HTTP methods -enum HttpMethod { get, put, post, delete, options, head, patch, trace } - -// ========================================== -// CLASS: TavilyClientException -// ========================================== - -/// HTTP exception handler for TavilyClient -class TavilyClientException implements Exception { - TavilyClientException({ - required this.message, - required this.uri, - required this.method, - this.code, - this.body, - }); - - final String message; - final Uri uri; - final HttpMethod method; - final int? code; - final Object? body; - - @override - String toString() { - Object? data; - try { - data = body is String ? jsonDecode(body as String) : body.toString(); - } catch (e) { - data = body.toString(); - } - final s = JsonEncoder.withIndent(' ').convert({ - 'uri': uri.toString(), - 'method': method.name.toUpperCase(), - 'code': code, - 'message': message, - 'body': data, - }); - return 'TavilyClientException($s)'; - } -} - -// ========================================== -// CLASS: TavilyClient -// ========================================== - -/// Client for Tavily API (v.1.0.0) -/// -/// Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. -class TavilyClient { - /// Creates a new TavilyClient instance. - /// - /// - [TavilyClient.baseUrl] Override base URL (default: server url defined in spec) - /// - [TavilyClient.headers] Global headers to be sent with every request - /// - [TavilyClient.queryParams] Global query parameters to be sent with every request - /// - [TavilyClient.client] Override HTTP client to use for requests - TavilyClient({ - this.baseUrl, - this.headers = const {}, - this.queryParams = const {}, - http.Client? client, - }) : assert( - baseUrl == null || baseUrl.startsWith('http'), - 'baseUrl must start with http', - ), - assert( - baseUrl == null || !baseUrl.endsWith('/'), - 'baseUrl must not end with /', - ), - client = RetryClient(client ?? http.Client()); - - /// Override base URL (default: server url defined in spec) - final String? baseUrl; - - /// Global headers to be sent with every request - final Map headers; - - /// Global query parameters to be sent with every request - final Map queryParams; - - /// HTTP client for requests - final http.Client client; - - // ------------------------------------------ - // METHOD: endSession - // ------------------------------------------ - - /// Close the HTTP client and end session - void endSession() => client.close(); - - // ------------------------------------------ - // METHOD: onRequest - // ------------------------------------------ - - /// Middleware for HTTP requests (user can override) - /// - /// The request can be of type [http.Request] or [http.MultipartRequest] - Future onRequest(http.BaseRequest request) { - return Future.value(request); - } - - // ------------------------------------------ - // METHOD: onStreamedResponse - // ------------------------------------------ - - /// Middleware for HTTP streamed responses (user can override) - Future onStreamedResponse( - final http.StreamedResponse response, - ) { - return Future.value(response); - } - - // ------------------------------------------ - // METHOD: onResponse - // ------------------------------------------ - - /// Middleware for HTTP responses (user can override) - Future onResponse(http.Response response) { - return Future.value(response); - } - - // ------------------------------------------ - // METHOD: _jsonDecode - // ------------------------------------------ - - dynamic _jsonDecode(http.Response r) { - return json.decode(utf8.decode(r.bodyBytes)); - } - - // ------------------------------------------ - // METHOD: _request - // ------------------------------------------ - - /// Reusable request method - @protected - Future _request({ - required String baseUrl, - required String path, - required HttpMethod method, - Map queryParams = const {}, - Map headerParams = const {}, - bool isMultipart = false, - String requestType = '', - String responseType = '', - Object? body, - }) async { - // Override with the user provided baseUrl - baseUrl = this.baseUrl ?? baseUrl; - - // Ensure a baseUrl is provided - assert( - baseUrl.isNotEmpty, - 'baseUrl is required, but none defined in spec or provided by user', - ); - - // Add global query parameters - queryParams = {...queryParams, ...this.queryParams}; - - // Ensure query parameters are strings or iterable of strings - queryParams = queryParams.map((key, value) { - if (value is Iterable) { - return MapEntry(key, value.map((v) => v.toString())); - } else { - return MapEntry(key, value.toString()); - } - }); - - // Build the request URI - Uri uri = Uri.parse(baseUrl + path); - if (queryParams.isNotEmpty) { - uri = uri.replace(queryParameters: queryParams); - } - - // Build the headers - Map headers = {...headerParams}; - - // Define the request type being sent to server - if (requestType.isNotEmpty) { - headers['content-type'] = requestType; - } - - // Define the response type expected to receive from server - if (responseType.isNotEmpty) { - headers['accept'] = responseType; - } - - // Add global headers - headers.addAll(this.headers); - - // Build the request object - http.BaseRequest request; - if (isMultipart) { - // Handle multipart request - request = http.MultipartRequest(method.name, uri); - request = request as http.MultipartRequest; - if (body is List) { - request.files.addAll(body); - } else { - request.files.add(body as http.MultipartFile); - } - } else { - // Handle normal request - request = http.Request(method.name, uri); - request = request as http.Request; - try { - if (body != null) { - request.body = json.encode(body); - } - } catch (e) { - // Handle request encoding error - throw TavilyClientException( - uri: uri, - method: method, - message: 'Could not encode: ${body.runtimeType}', - body: e, - ); - } - } - - // Add request headers - request.headers.addAll(headers); - - // Handle user request middleware - request = await onRequest(request); - - // Submit request - return await client.send(request); - } - - // ------------------------------------------ - // METHOD: makeRequestStream - // ------------------------------------------ - - /// Reusable request stream method - @protected - Future makeRequestStream({ - required String baseUrl, - required String path, - required HttpMethod method, - Map queryParams = const {}, - Map headerParams = const {}, - bool isMultipart = false, - String requestType = '', - String responseType = '', - Object? body, - }) async { - final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); - late http.StreamedResponse response; - try { - response = await _request( - baseUrl: baseUrl, - path: path, - method: method, - queryParams: queryParams, - headerParams: headerParams, - requestType: requestType, - responseType: responseType, - body: body, - ); - // Handle user response middleware - response = await onStreamedResponse(response); - } catch (e) { - // Handle request and response errors - throw TavilyClientException( - uri: uri, - method: method, - message: 'Response error', - body: e, - ); - } - - // Check for successful response - if ((response.statusCode ~/ 100) == 2) { - return response; - } - - // Handle unsuccessful response - throw TavilyClientException( - uri: uri, - method: method, - message: 'Unsuccessful response', - code: response.statusCode, - body: (await http.Response.fromStream(response)).body, - ); - } - - // ------------------------------------------ - // METHOD: makeRequest - // ------------------------------------------ - - /// Reusable request method - @protected - Future makeRequest({ - required String baseUrl, - required String path, - required HttpMethod method, - Map queryParams = const {}, - Map headerParams = const {}, - bool isMultipart = false, - String requestType = '', - String responseType = '', - Object? body, - }) async { - final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); - late http.Response response; - try { - final streamedResponse = await _request( - baseUrl: baseUrl, - path: path, - method: method, - queryParams: queryParams, - headerParams: headerParams, - requestType: requestType, - responseType: responseType, - body: body, - ); - response = await http.Response.fromStream(streamedResponse); - // Handle user response middleware - response = await onResponse(response); - } catch (e) { - // Handle request and response errors - throw TavilyClientException( - uri: uri, - method: method, - message: 'Response error', - body: e, - ); - } - - // Check for successful response - if ((response.statusCode ~/ 100) == 2) { - return response; - } - - // Handle unsuccessful response - throw TavilyClientException( - uri: uri, - method: method, - message: 'Unsuccessful response', - code: response.statusCode, - body: response.body, - ); - } - - // ------------------------------------------ - // METHOD: search - // ------------------------------------------ - - /// Search for data based on a query. - /// - /// `request`: The search request object. - /// - /// `POST` `https://api.tavily.com/search` - Future search({ - required SearchRequest request, - }) async { - final r = await makeRequest( - baseUrl: 'https://api.tavily.com', - path: '/search', - method: HttpMethod.post, - isMultipart: false, - requestType: 'application/json', - responseType: 'application/json', - body: request, - ); - return SearchResponse.fromJson(_jsonDecode(r)); - } -} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.dart b/packages/tavily_dart/lib/src/generated/schema/schema.dart deleted file mode 100644 index 4b3ba505..00000000 --- a/packages/tavily_dart/lib/src/generated/schema/schema.dart +++ /dev/null @@ -1,15 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target - -library tavily_schema; - -import 'package:freezed_annotation/freezed_annotation.dart'; - -part 'schema.g.dart'; -part 'schema.freezed.dart'; - -part 'search_request.dart'; -part 'search_response.dart'; -part 'search_result.dart'; diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart deleted file mode 100644 index cc459594..00000000 --- a/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart +++ /dev/null @@ -1,1027 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark - -part of 'schema.dart'; - -// ************************************************************************** -// FreezedGenerator -// ************************************************************************** - -T _$identity(T value) => value; - -final _privateConstructorUsedError = UnsupportedError( - 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); - -SearchRequest _$SearchRequestFromJson(Map json) { - return _SearchRequest.fromJson(json); -} - -/// @nodoc -mixin _$SearchRequest { - /// Your unique API key. - @JsonKey(name: 'api_key') - String get apiKey => throw _privateConstructorUsedError; - - /// The search query string. - String get query => throw _privateConstructorUsedError; - - /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. - @JsonKey(name: 'search_depth') - SearchRequestSearchDepth get searchDepth => - throw _privateConstructorUsedError; - - /// Include a list of query related images in the response. Default is False. - @JsonKey(name: 'include_images') - bool get includeImages => throw _privateConstructorUsedError; - - /// Include answers in the search results. Default is False. - @JsonKey(name: 'include_answer') - bool get includeAnswer => throw _privateConstructorUsedError; - - /// Include raw content in the search results. Default is False. - @JsonKey(name: 'include_raw_content') - bool get includeRawContent => throw _privateConstructorUsedError; - - /// The number of maximum search results to return. Default is 5. - @JsonKey(name: 'max_results') - int get maxResults => throw _privateConstructorUsedError; - - /// A list of domains to specifically include in the search results. Default is None. - @JsonKey(name: 'include_domains', includeIfNull: false) - List? get includeDomains => throw _privateConstructorUsedError; - - /// A list of domains to specifically exclude from the search results. Default is None. - @JsonKey(name: 'exclude_domains', includeIfNull: false) - List? get excludeDomains => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $SearchRequestCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $SearchRequestCopyWith<$Res> { - factory $SearchRequestCopyWith( - SearchRequest value, $Res Function(SearchRequest) then) = - _$SearchRequestCopyWithImpl<$Res, SearchRequest>; - @useResult - $Res call( - {@JsonKey(name: 'api_key') String apiKey, - String query, - @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, - @JsonKey(name: 'include_images') bool includeImages, - @JsonKey(name: 'include_answer') bool includeAnswer, - @JsonKey(name: 'include_raw_content') bool includeRawContent, - @JsonKey(name: 'max_results') int maxResults, - @JsonKey(name: 'include_domains', includeIfNull: false) - List? includeDomains, - @JsonKey(name: 'exclude_domains', includeIfNull: false) - List? excludeDomains}); -} - -/// @nodoc -class _$SearchRequestCopyWithImpl<$Res, $Val extends SearchRequest> - implements $SearchRequestCopyWith<$Res> { - _$SearchRequestCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? apiKey = null, - Object? query = null, - Object? searchDepth = null, - Object? includeImages = null, - Object? includeAnswer = null, - Object? includeRawContent = null, - Object? maxResults = null, - Object? includeDomains = freezed, - Object? excludeDomains = freezed, - }) { - return _then(_value.copyWith( - apiKey: null == apiKey - ? _value.apiKey - : apiKey // ignore: cast_nullable_to_non_nullable - as String, - query: null == query - ? _value.query - : query // ignore: cast_nullable_to_non_nullable - as String, - searchDepth: null == searchDepth - ? _value.searchDepth - : searchDepth // ignore: cast_nullable_to_non_nullable - as SearchRequestSearchDepth, - includeImages: null == includeImages - ? _value.includeImages - : includeImages // ignore: cast_nullable_to_non_nullable - as bool, - includeAnswer: null == includeAnswer - ? _value.includeAnswer - : includeAnswer // ignore: cast_nullable_to_non_nullable - as bool, - includeRawContent: null == includeRawContent - ? _value.includeRawContent - : includeRawContent // ignore: cast_nullable_to_non_nullable - as bool, - maxResults: null == maxResults - ? _value.maxResults - : maxResults // ignore: cast_nullable_to_non_nullable - as int, - includeDomains: freezed == includeDomains - ? _value.includeDomains - : includeDomains // ignore: cast_nullable_to_non_nullable - as List?, - excludeDomains: freezed == excludeDomains - ? _value.excludeDomains - : excludeDomains // ignore: cast_nullable_to_non_nullable - as List?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$SearchRequestImplCopyWith<$Res> - implements $SearchRequestCopyWith<$Res> { - factory _$$SearchRequestImplCopyWith( - _$SearchRequestImpl value, $Res Function(_$SearchRequestImpl) then) = - __$$SearchRequestImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(name: 'api_key') String apiKey, - String query, - @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, - @JsonKey(name: 'include_images') bool includeImages, - @JsonKey(name: 'include_answer') bool includeAnswer, - @JsonKey(name: 'include_raw_content') bool includeRawContent, - @JsonKey(name: 'max_results') int maxResults, - @JsonKey(name: 'include_domains', includeIfNull: false) - List? includeDomains, - @JsonKey(name: 'exclude_domains', includeIfNull: false) - List? excludeDomains}); -} - -/// @nodoc -class __$$SearchRequestImplCopyWithImpl<$Res> - extends _$SearchRequestCopyWithImpl<$Res, _$SearchRequestImpl> - implements _$$SearchRequestImplCopyWith<$Res> { - __$$SearchRequestImplCopyWithImpl( - _$SearchRequestImpl _value, $Res Function(_$SearchRequestImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? apiKey = null, - Object? query = null, - Object? searchDepth = null, - Object? includeImages = null, - Object? includeAnswer = null, - Object? includeRawContent = null, - Object? maxResults = null, - Object? includeDomains = freezed, - Object? excludeDomains = freezed, - }) { - return _then(_$SearchRequestImpl( - apiKey: null == apiKey - ? _value.apiKey - : apiKey // ignore: cast_nullable_to_non_nullable - as String, - query: null == query - ? _value.query - : query // ignore: cast_nullable_to_non_nullable - as String, - searchDepth: null == searchDepth - ? _value.searchDepth - : searchDepth // ignore: cast_nullable_to_non_nullable - as SearchRequestSearchDepth, - includeImages: null == includeImages - ? _value.includeImages - : includeImages // ignore: cast_nullable_to_non_nullable - as bool, - includeAnswer: null == includeAnswer - ? _value.includeAnswer - : includeAnswer // ignore: cast_nullable_to_non_nullable - as bool, - includeRawContent: null == includeRawContent - ? _value.includeRawContent - : includeRawContent // ignore: cast_nullable_to_non_nullable - as bool, - maxResults: null == maxResults - ? _value.maxResults - : maxResults // ignore: cast_nullable_to_non_nullable - as int, - includeDomains: freezed == includeDomains - ? _value._includeDomains - : includeDomains // ignore: cast_nullable_to_non_nullable - as List?, - excludeDomains: freezed == excludeDomains - ? _value._excludeDomains - : excludeDomains // ignore: cast_nullable_to_non_nullable - as List?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$SearchRequestImpl extends _SearchRequest { - const _$SearchRequestImpl( - {@JsonKey(name: 'api_key') required this.apiKey, - required this.query, - @JsonKey(name: 'search_depth') - this.searchDepth = SearchRequestSearchDepth.basic, - @JsonKey(name: 'include_images') this.includeImages = false, - @JsonKey(name: 'include_answer') this.includeAnswer = false, - @JsonKey(name: 'include_raw_content') this.includeRawContent = false, - @JsonKey(name: 'max_results') this.maxResults = 5, - @JsonKey(name: 'include_domains', includeIfNull: false) - final List? includeDomains, - @JsonKey(name: 'exclude_domains', includeIfNull: false) - final List? excludeDomains}) - : _includeDomains = includeDomains, - _excludeDomains = excludeDomains, - super._(); - - factory _$SearchRequestImpl.fromJson(Map json) => - _$$SearchRequestImplFromJson(json); - - /// Your unique API key. - @override - @JsonKey(name: 'api_key') - final String apiKey; - - /// The search query string. - @override - final String query; - - /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. - @override - @JsonKey(name: 'search_depth') - final SearchRequestSearchDepth searchDepth; - - /// Include a list of query related images in the response. Default is False. - @override - @JsonKey(name: 'include_images') - final bool includeImages; - - /// Include answers in the search results. Default is False. - @override - @JsonKey(name: 'include_answer') - final bool includeAnswer; - - /// Include raw content in the search results. Default is False. - @override - @JsonKey(name: 'include_raw_content') - final bool includeRawContent; - - /// The number of maximum search results to return. Default is 5. - @override - @JsonKey(name: 'max_results') - final int maxResults; - - /// A list of domains to specifically include in the search results. Default is None. - final List? _includeDomains; - - /// A list of domains to specifically include in the search results. Default is None. - @override - @JsonKey(name: 'include_domains', includeIfNull: false) - List? get includeDomains { - final value = _includeDomains; - if (value == null) return null; - if (_includeDomains is EqualUnmodifiableListView) return _includeDomains; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// A list of domains to specifically exclude from the search results. Default is None. - final List? _excludeDomains; - - /// A list of domains to specifically exclude from the search results. Default is None. - @override - @JsonKey(name: 'exclude_domains', includeIfNull: false) - List? get excludeDomains { - final value = _excludeDomains; - if (value == null) return null; - if (_excludeDomains is EqualUnmodifiableListView) return _excludeDomains; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - @override - String toString() { - return 'SearchRequest(apiKey: $apiKey, query: $query, searchDepth: $searchDepth, includeImages: $includeImages, includeAnswer: $includeAnswer, includeRawContent: $includeRawContent, maxResults: $maxResults, includeDomains: $includeDomains, excludeDomains: $excludeDomains)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$SearchRequestImpl && - (identical(other.apiKey, apiKey) || other.apiKey == apiKey) && - (identical(other.query, query) || other.query == query) && - (identical(other.searchDepth, searchDepth) || - other.searchDepth == searchDepth) && - (identical(other.includeImages, includeImages) || - other.includeImages == includeImages) && - (identical(other.includeAnswer, includeAnswer) || - other.includeAnswer == includeAnswer) && - (identical(other.includeRawContent, includeRawContent) || - other.includeRawContent == includeRawContent) && - (identical(other.maxResults, maxResults) || - other.maxResults == maxResults) && - const DeepCollectionEquality() - .equals(other._includeDomains, _includeDomains) && - const DeepCollectionEquality() - .equals(other._excludeDomains, _excludeDomains)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash( - runtimeType, - apiKey, - query, - searchDepth, - includeImages, - includeAnswer, - includeRawContent, - maxResults, - const DeepCollectionEquality().hash(_includeDomains), - const DeepCollectionEquality().hash(_excludeDomains)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => - __$$SearchRequestImplCopyWithImpl<_$SearchRequestImpl>(this, _$identity); - - @override - Map toJson() { - return _$$SearchRequestImplToJson( - this, - ); - } -} - -abstract class _SearchRequest extends SearchRequest { - const factory _SearchRequest( - {@JsonKey(name: 'api_key') required final String apiKey, - required final String query, - @JsonKey(name: 'search_depth') final SearchRequestSearchDepth searchDepth, - @JsonKey(name: 'include_images') final bool includeImages, - @JsonKey(name: 'include_answer') final bool includeAnswer, - @JsonKey(name: 'include_raw_content') final bool includeRawContent, - @JsonKey(name: 'max_results') final int maxResults, - @JsonKey(name: 'include_domains', includeIfNull: false) - final List? includeDomains, - @JsonKey(name: 'exclude_domains', includeIfNull: false) - final List? excludeDomains}) = _$SearchRequestImpl; - const _SearchRequest._() : super._(); - - factory _SearchRequest.fromJson(Map json) = - _$SearchRequestImpl.fromJson; - - @override - - /// Your unique API key. - @JsonKey(name: 'api_key') - String get apiKey; - @override - - /// The search query string. - String get query; - @override - - /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. - @JsonKey(name: 'search_depth') - SearchRequestSearchDepth get searchDepth; - @override - - /// Include a list of query related images in the response. Default is False. - @JsonKey(name: 'include_images') - bool get includeImages; - @override - - /// Include answers in the search results. Default is False. - @JsonKey(name: 'include_answer') - bool get includeAnswer; - @override - - /// Include raw content in the search results. Default is False. - @JsonKey(name: 'include_raw_content') - bool get includeRawContent; - @override - - /// The number of maximum search results to return. Default is 5. - @JsonKey(name: 'max_results') - int get maxResults; - @override - - /// A list of domains to specifically include in the search results. Default is None. - @JsonKey(name: 'include_domains', includeIfNull: false) - List? get includeDomains; - @override - - /// A list of domains to specifically exclude from the search results. Default is None. - @JsonKey(name: 'exclude_domains', includeIfNull: false) - List? get excludeDomains; - @override - @JsonKey(ignore: true) - _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => - throw _privateConstructorUsedError; -} - -SearchResponse _$SearchResponseFromJson(Map json) { - return _SearchResponse.fromJson(json); -} - -/// @nodoc -mixin _$SearchResponse { - /// The answer to your search query. - @JsonKey(includeIfNull: false) - String? get answer => throw _privateConstructorUsedError; - - /// Your search query. - String get query => throw _privateConstructorUsedError; - - /// Your search result response time. - @JsonKey(name: 'response_time') - double get responseTime => throw _privateConstructorUsedError; - - /// A list of query related image urls. - @JsonKey(includeIfNull: false) - List? get images => throw _privateConstructorUsedError; - - /// A list of suggested research follow up questions related to original query. - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - List? get followUpQuestions => throw _privateConstructorUsedError; - - /// A list of search results. - List get results => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $SearchResponseCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $SearchResponseCopyWith<$Res> { - factory $SearchResponseCopyWith( - SearchResponse value, $Res Function(SearchResponse) then) = - _$SearchResponseCopyWithImpl<$Res, SearchResponse>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? answer, - String query, - @JsonKey(name: 'response_time') double responseTime, - @JsonKey(includeIfNull: false) List? images, - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - List? followUpQuestions, - List results}); -} - -/// @nodoc -class _$SearchResponseCopyWithImpl<$Res, $Val extends SearchResponse> - implements $SearchResponseCopyWith<$Res> { - _$SearchResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? answer = freezed, - Object? query = null, - Object? responseTime = null, - Object? images = freezed, - Object? followUpQuestions = freezed, - Object? results = null, - }) { - return _then(_value.copyWith( - answer: freezed == answer - ? _value.answer - : answer // ignore: cast_nullable_to_non_nullable - as String?, - query: null == query - ? _value.query - : query // ignore: cast_nullable_to_non_nullable - as String, - responseTime: null == responseTime - ? _value.responseTime - : responseTime // ignore: cast_nullable_to_non_nullable - as double, - images: freezed == images - ? _value.images - : images // ignore: cast_nullable_to_non_nullable - as List?, - followUpQuestions: freezed == followUpQuestions - ? _value.followUpQuestions - : followUpQuestions // ignore: cast_nullable_to_non_nullable - as List?, - results: null == results - ? _value.results - : results // ignore: cast_nullable_to_non_nullable - as List, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$SearchResponseImplCopyWith<$Res> - implements $SearchResponseCopyWith<$Res> { - factory _$$SearchResponseImplCopyWith(_$SearchResponseImpl value, - $Res Function(_$SearchResponseImpl) then) = - __$$SearchResponseImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? answer, - String query, - @JsonKey(name: 'response_time') double responseTime, - @JsonKey(includeIfNull: false) List? images, - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - List? followUpQuestions, - List results}); -} - -/// @nodoc -class __$$SearchResponseImplCopyWithImpl<$Res> - extends _$SearchResponseCopyWithImpl<$Res, _$SearchResponseImpl> - implements _$$SearchResponseImplCopyWith<$Res> { - __$$SearchResponseImplCopyWithImpl( - _$SearchResponseImpl _value, $Res Function(_$SearchResponseImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? answer = freezed, - Object? query = null, - Object? responseTime = null, - Object? images = freezed, - Object? followUpQuestions = freezed, - Object? results = null, - }) { - return _then(_$SearchResponseImpl( - answer: freezed == answer - ? _value.answer - : answer // ignore: cast_nullable_to_non_nullable - as String?, - query: null == query - ? _value.query - : query // ignore: cast_nullable_to_non_nullable - as String, - responseTime: null == responseTime - ? _value.responseTime - : responseTime // ignore: cast_nullable_to_non_nullable - as double, - images: freezed == images - ? _value._images - : images // ignore: cast_nullable_to_non_nullable - as List?, - followUpQuestions: freezed == followUpQuestions - ? _value._followUpQuestions - : followUpQuestions // ignore: cast_nullable_to_non_nullable - as List?, - results: null == results - ? _value._results - : results // ignore: cast_nullable_to_non_nullable - as List, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$SearchResponseImpl extends _SearchResponse { - const _$SearchResponseImpl( - {@JsonKey(includeIfNull: false) this.answer, - required this.query, - @JsonKey(name: 'response_time') required this.responseTime, - @JsonKey(includeIfNull: false) final List? images, - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - final List? followUpQuestions, - required final List results}) - : _images = images, - _followUpQuestions = followUpQuestions, - _results = results, - super._(); - - factory _$SearchResponseImpl.fromJson(Map json) => - _$$SearchResponseImplFromJson(json); - - /// The answer to your search query. - @override - @JsonKey(includeIfNull: false) - final String? answer; - - /// Your search query. - @override - final String query; - - /// Your search result response time. - @override - @JsonKey(name: 'response_time') - final double responseTime; - - /// A list of query related image urls. - final List? _images; - - /// A list of query related image urls. - @override - @JsonKey(includeIfNull: false) - List? get images { - final value = _images; - if (value == null) return null; - if (_images is EqualUnmodifiableListView) return _images; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// A list of suggested research follow up questions related to original query. - final List? _followUpQuestions; - - /// A list of suggested research follow up questions related to original query. - @override - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - List? get followUpQuestions { - final value = _followUpQuestions; - if (value == null) return null; - if (_followUpQuestions is EqualUnmodifiableListView) - return _followUpQuestions; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// A list of search results. - final List _results; - - /// A list of search results. - @override - List get results { - if (_results is EqualUnmodifiableListView) return _results; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_results); - } - - @override - String toString() { - return 'SearchResponse(answer: $answer, query: $query, responseTime: $responseTime, images: $images, followUpQuestions: $followUpQuestions, results: $results)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$SearchResponseImpl && - (identical(other.answer, answer) || other.answer == answer) && - (identical(other.query, query) || other.query == query) && - (identical(other.responseTime, responseTime) || - other.responseTime == responseTime) && - const DeepCollectionEquality().equals(other._images, _images) && - const DeepCollectionEquality() - .equals(other._followUpQuestions, _followUpQuestions) && - const DeepCollectionEquality().equals(other._results, _results)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash( - runtimeType, - answer, - query, - responseTime, - const DeepCollectionEquality().hash(_images), - const DeepCollectionEquality().hash(_followUpQuestions), - const DeepCollectionEquality().hash(_results)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => - __$$SearchResponseImplCopyWithImpl<_$SearchResponseImpl>( - this, _$identity); - - @override - Map toJson() { - return _$$SearchResponseImplToJson( - this, - ); - } -} - -abstract class _SearchResponse extends SearchResponse { - const factory _SearchResponse( - {@JsonKey(includeIfNull: false) final String? answer, - required final String query, - @JsonKey(name: 'response_time') required final double responseTime, - @JsonKey(includeIfNull: false) final List? images, - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - final List? followUpQuestions, - required final List results}) = _$SearchResponseImpl; - const _SearchResponse._() : super._(); - - factory _SearchResponse.fromJson(Map json) = - _$SearchResponseImpl.fromJson; - - @override - - /// The answer to your search query. - @JsonKey(includeIfNull: false) - String? get answer; - @override - - /// Your search query. - String get query; - @override - - /// Your search result response time. - @JsonKey(name: 'response_time') - double get responseTime; - @override - - /// A list of query related image urls. - @JsonKey(includeIfNull: false) - List? get images; - @override - - /// A list of suggested research follow up questions related to original query. - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - List? get followUpQuestions; - @override - - /// A list of search results. - List get results; - @override - @JsonKey(ignore: true) - _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => - throw _privateConstructorUsedError; -} - -SearchResult _$SearchResultFromJson(Map json) { - return _SearchResult.fromJson(json); -} - -/// @nodoc -mixin _$SearchResult { - /// The title of the search result url. - String get title => throw _privateConstructorUsedError; - - /// The url of the search result. - String get url => throw _privateConstructorUsedError; - - /// The most query related content from the scraped url. - String get content => throw _privateConstructorUsedError; - - /// The parsed and cleaned HTML of the site. For now includes parsed text only. - @JsonKey(name: 'raw_content', includeIfNull: false) - String? get rawContent => throw _privateConstructorUsedError; - - /// The relevance score of the search result. - double get score => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $SearchResultCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $SearchResultCopyWith<$Res> { - factory $SearchResultCopyWith( - SearchResult value, $Res Function(SearchResult) then) = - _$SearchResultCopyWithImpl<$Res, SearchResult>; - @useResult - $Res call( - {String title, - String url, - String content, - @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, - double score}); -} - -/// @nodoc -class _$SearchResultCopyWithImpl<$Res, $Val extends SearchResult> - implements $SearchResultCopyWith<$Res> { - _$SearchResultCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? title = null, - Object? url = null, - Object? content = null, - Object? rawContent = freezed, - Object? score = null, - }) { - return _then(_value.copyWith( - title: null == title - ? _value.title - : title // ignore: cast_nullable_to_non_nullable - as String, - url: null == url - ? _value.url - : url // ignore: cast_nullable_to_non_nullable - as String, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String, - rawContent: freezed == rawContent - ? _value.rawContent - : rawContent // ignore: cast_nullable_to_non_nullable - as String?, - score: null == score - ? _value.score - : score // ignore: cast_nullable_to_non_nullable - as double, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$SearchResultImplCopyWith<$Res> - implements $SearchResultCopyWith<$Res> { - factory _$$SearchResultImplCopyWith( - _$SearchResultImpl value, $Res Function(_$SearchResultImpl) then) = - __$$SearchResultImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {String title, - String url, - String content, - @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, - double score}); -} - -/// @nodoc -class __$$SearchResultImplCopyWithImpl<$Res> - extends _$SearchResultCopyWithImpl<$Res, _$SearchResultImpl> - implements _$$SearchResultImplCopyWith<$Res> { - __$$SearchResultImplCopyWithImpl( - _$SearchResultImpl _value, $Res Function(_$SearchResultImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? title = null, - Object? url = null, - Object? content = null, - Object? rawContent = freezed, - Object? score = null, - }) { - return _then(_$SearchResultImpl( - title: null == title - ? _value.title - : title // ignore: cast_nullable_to_non_nullable - as String, - url: null == url - ? _value.url - : url // ignore: cast_nullable_to_non_nullable - as String, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String, - rawContent: freezed == rawContent - ? _value.rawContent - : rawContent // ignore: cast_nullable_to_non_nullable - as String?, - score: null == score - ? _value.score - : score // ignore: cast_nullable_to_non_nullable - as double, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$SearchResultImpl extends _SearchResult { - const _$SearchResultImpl( - {required this.title, - required this.url, - required this.content, - @JsonKey(name: 'raw_content', includeIfNull: false) this.rawContent, - required this.score}) - : super._(); - - factory _$SearchResultImpl.fromJson(Map json) => - _$$SearchResultImplFromJson(json); - - /// The title of the search result url. - @override - final String title; - - /// The url of the search result. - @override - final String url; - - /// The most query related content from the scraped url. - @override - final String content; - - /// The parsed and cleaned HTML of the site. For now includes parsed text only. - @override - @JsonKey(name: 'raw_content', includeIfNull: false) - final String? rawContent; - - /// The relevance score of the search result. - @override - final double score; - - @override - String toString() { - return 'SearchResult(title: $title, url: $url, content: $content, rawContent: $rawContent, score: $score)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$SearchResultImpl && - (identical(other.title, title) || other.title == title) && - (identical(other.url, url) || other.url == url) && - (identical(other.content, content) || other.content == content) && - (identical(other.rawContent, rawContent) || - other.rawContent == rawContent) && - (identical(other.score, score) || other.score == score)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, title, url, content, rawContent, score); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => - __$$SearchResultImplCopyWithImpl<_$SearchResultImpl>(this, _$identity); - - @override - Map toJson() { - return _$$SearchResultImplToJson( - this, - ); - } -} - -abstract class _SearchResult extends SearchResult { - const factory _SearchResult( - {required final String title, - required final String url, - required final String content, - @JsonKey(name: 'raw_content', includeIfNull: false) - final String? rawContent, - required final double score}) = _$SearchResultImpl; - const _SearchResult._() : super._(); - - factory _SearchResult.fromJson(Map json) = - _$SearchResultImpl.fromJson; - - @override - - /// The title of the search result url. - String get title; - @override - - /// The url of the search result. - String get url; - @override - - /// The most query related content from the scraped url. - String get content; - @override - - /// The parsed and cleaned HTML of the site. For now includes parsed text only. - @JsonKey(name: 'raw_content', includeIfNull: false) - String? get rawContent; - @override - - /// The relevance score of the search result. - double get score; - @override - @JsonKey(ignore: true) - _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => - throw _privateConstructorUsedError; -} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.g.dart b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart deleted file mode 100644 index f9214d02..00000000 --- a/packages/tavily_dart/lib/src/generated/schema/schema.g.dart +++ /dev/null @@ -1,116 +0,0 @@ -// GENERATED CODE - DO NOT MODIFY BY HAND - -// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks - -part of 'schema.dart'; - -// ************************************************************************** -// JsonSerializableGenerator -// ************************************************************************** - -_$SearchRequestImpl _$$SearchRequestImplFromJson(Map json) => - _$SearchRequestImpl( - apiKey: json['api_key'] as String, - query: json['query'] as String, - searchDepth: $enumDecodeNullable( - _$SearchRequestSearchDepthEnumMap, json['search_depth']) ?? - SearchRequestSearchDepth.basic, - includeImages: json['include_images'] as bool? ?? false, - includeAnswer: json['include_answer'] as bool? ?? false, - includeRawContent: json['include_raw_content'] as bool? ?? false, - maxResults: (json['max_results'] as num?)?.toInt() ?? 5, - includeDomains: (json['include_domains'] as List?) - ?.map((e) => e as String) - .toList(), - excludeDomains: (json['exclude_domains'] as List?) - ?.map((e) => e as String) - .toList(), - ); - -Map _$$SearchRequestImplToJson(_$SearchRequestImpl instance) { - final val = { - 'api_key': instance.apiKey, - 'query': instance.query, - 'search_depth': _$SearchRequestSearchDepthEnumMap[instance.searchDepth]!, - 'include_images': instance.includeImages, - 'include_answer': instance.includeAnswer, - 'include_raw_content': instance.includeRawContent, - 'max_results': instance.maxResults, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('include_domains', instance.includeDomains); - writeNotNull('exclude_domains', instance.excludeDomains); - return val; -} - -const _$SearchRequestSearchDepthEnumMap = { - SearchRequestSearchDepth.basic: 'basic', - SearchRequestSearchDepth.advanced: 'advanced', -}; - -_$SearchResponseImpl _$$SearchResponseImplFromJson(Map json) => - _$SearchResponseImpl( - answer: json['answer'] as String?, - query: json['query'] as String, - responseTime: (json['response_time'] as num).toDouble(), - images: - (json['images'] as List?)?.map((e) => e as String).toList(), - followUpQuestions: (json['follow_up_questions'] as List?) - ?.map((e) => e as String) - .toList(), - results: (json['results'] as List) - .map((e) => SearchResult.fromJson(e as Map)) - .toList(), - ); - -Map _$$SearchResponseImplToJson( - _$SearchResponseImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('answer', instance.answer); - val['query'] = instance.query; - val['response_time'] = instance.responseTime; - writeNotNull('images', instance.images); - writeNotNull('follow_up_questions', instance.followUpQuestions); - val['results'] = instance.results.map((e) => e.toJson()).toList(); - return val; -} - -_$SearchResultImpl _$$SearchResultImplFromJson(Map json) => - _$SearchResultImpl( - title: json['title'] as String, - url: json['url'] as String, - content: json['content'] as String, - rawContent: json['raw_content'] as String?, - score: (json['score'] as num).toDouble(), - ); - -Map _$$SearchResultImplToJson(_$SearchResultImpl instance) { - final val = { - 'title': instance.title, - 'url': instance.url, - 'content': instance.content, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('raw_content', instance.rawContent); - val['score'] = instance.score; - return val; -} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_request.dart b/packages/tavily_dart/lib/src/generated/schema/search_request.dart deleted file mode 100644 index c0d16e7a..00000000 --- a/packages/tavily_dart/lib/src/generated/schema/search_request.dart +++ /dev/null @@ -1,103 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of tavily_schema; - -// ========================================== -// CLASS: SearchRequest -// ========================================== - -/// The search request object. -@freezed -class SearchRequest with _$SearchRequest { - const SearchRequest._(); - - /// Factory constructor for SearchRequest - const factory SearchRequest({ - /// Your unique API key. - @JsonKey(name: 'api_key') required String apiKey, - - /// The search query string. - required String query, - - /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. - @JsonKey(name: 'search_depth') - @Default(SearchRequestSearchDepth.basic) - SearchRequestSearchDepth searchDepth, - - /// Include a list of query related images in the response. Default is False. - @JsonKey(name: 'include_images') @Default(false) bool includeImages, - - /// Include answers in the search results. Default is False. - @JsonKey(name: 'include_answer') @Default(false) bool includeAnswer, - - /// Include raw content in the search results. Default is False. - @JsonKey(name: 'include_raw_content') - @Default(false) - bool includeRawContent, - - /// The number of maximum search results to return. Default is 5. - @JsonKey(name: 'max_results') @Default(5) int maxResults, - - /// A list of domains to specifically include in the search results. Default is None. - @JsonKey(name: 'include_domains', includeIfNull: false) - List? includeDomains, - - /// A list of domains to specifically exclude from the search results. Default is None. - @JsonKey(name: 'exclude_domains', includeIfNull: false) - List? excludeDomains, - }) = _SearchRequest; - - /// Object construction from a JSON representation - factory SearchRequest.fromJson(Map json) => - _$SearchRequestFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'api_key', - 'query', - 'search_depth', - 'include_images', - 'include_answer', - 'include_raw_content', - 'max_results', - 'include_domains', - 'exclude_domains' - ]; - - /// Validation constants - static const maxResultsDefaultValue = 5; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'api_key': apiKey, - 'query': query, - 'search_depth': searchDepth, - 'include_images': includeImages, - 'include_answer': includeAnswer, - 'include_raw_content': includeRawContent, - 'max_results': maxResults, - 'include_domains': includeDomains, - 'exclude_domains': excludeDomains, - }; - } -} - -// ========================================== -// ENUM: SearchRequestSearchDepth -// ========================================== - -/// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. -enum SearchRequestSearchDepth { - @JsonValue('basic') - basic, - @JsonValue('advanced') - advanced, -} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_response.dart b/packages/tavily_dart/lib/src/generated/schema/search_response.dart deleted file mode 100644 index 473db9c1..00000000 --- a/packages/tavily_dart/lib/src/generated/schema/search_response.dart +++ /dev/null @@ -1,68 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of tavily_schema; - -// ========================================== -// CLASS: SearchResponse -// ========================================== - -/// The response data from the search query. -@freezed -class SearchResponse with _$SearchResponse { - const SearchResponse._(); - - /// Factory constructor for SearchResponse - const factory SearchResponse({ - /// The answer to your search query. - @JsonKey(includeIfNull: false) String? answer, - - /// Your search query. - required String query, - - /// Your search result response time. - @JsonKey(name: 'response_time') required double responseTime, - - /// A list of query related image urls. - @JsonKey(includeIfNull: false) List? images, - - /// A list of suggested research follow up questions related to original query. - @JsonKey(name: 'follow_up_questions', includeIfNull: false) - List? followUpQuestions, - - /// A list of search results. - required List results, - }) = _SearchResponse; - - /// Object construction from a JSON representation - factory SearchResponse.fromJson(Map json) => - _$SearchResponseFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'answer', - 'query', - 'response_time', - 'images', - 'follow_up_questions', - 'results' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'answer': answer, - 'query': query, - 'response_time': responseTime, - 'images': images, - 'follow_up_questions': followUpQuestions, - 'results': results, - }; - } -} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_result.dart b/packages/tavily_dart/lib/src/generated/schema/search_result.dart deleted file mode 100644 index cfb75690..00000000 --- a/packages/tavily_dart/lib/src/generated/schema/search_result.dart +++ /dev/null @@ -1,62 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of tavily_schema; - -// ========================================== -// CLASS: SearchResult -// ========================================== - -/// The search result object. -@freezed -class SearchResult with _$SearchResult { - const SearchResult._(); - - /// Factory constructor for SearchResult - const factory SearchResult({ - /// The title of the search result url. - required String title, - - /// The url of the search result. - required String url, - - /// The most query related content from the scraped url. - required String content, - - /// The parsed and cleaned HTML of the site. For now includes parsed text only. - @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, - - /// The relevance score of the search result. - required double score, - }) = _SearchResult; - - /// Object construction from a JSON representation - factory SearchResult.fromJson(Map json) => - _$SearchResultFromJson(json); - - /// List of all property names of schema - static const List propertyNames = [ - 'title', - 'url', - 'content', - 'raw_content', - 'score' - ]; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'title': title, - 'url': url, - 'content': content, - 'raw_content': rawContent, - 'score': score, - }; - } -} diff --git a/packages/tavily_dart/lib/tavily_dart.dart b/packages/tavily_dart/lib/tavily_dart.dart deleted file mode 100644 index 272b33ce..00000000 --- a/packages/tavily_dart/lib/tavily_dart.dart +++ /dev/null @@ -1,5 +0,0 @@ -/// Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). -library; - -export 'src/generated/client.dart'; -export 'src/generated/schema/schema.dart'; diff --git a/packages/tavily_dart/oas/main.dart b/packages/tavily_dart/oas/main.dart deleted file mode 100644 index bf08264b..00000000 --- a/packages/tavily_dart/oas/main.dart +++ /dev/null @@ -1,23 +0,0 @@ -import 'dart:io'; - -import 'package:openapi_spec/openapi_spec.dart'; - -/// Generates Tavily API client Dart code from the OpenAPI spec. -/// https://docs.tavily.com/docs/tavily-api/rest_api -void main() async { - final spec = OpenApi.fromFile(source: 'oas/tavily_openapi.yaml'); - - await spec.generate( - package: 'Tavily', - destination: 'lib/src/generated/', - replace: true, - clientOptions: const ClientGeneratorOptions( - enabled: true, - ), - ); - - await Process.run( - 'dart', - ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], - ); -} diff --git a/packages/tavily_dart/oas/tavily_openapi.yaml b/packages/tavily_dart/oas/tavily_openapi.yaml deleted file mode 100644 index 250fa447..00000000 --- a/packages/tavily_dart/oas/tavily_openapi.yaml +++ /dev/null @@ -1,156 +0,0 @@ -openapi: 3.0.3 - -info: - title: Tavily API - description: Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. - version: 1.0.0 - contact: - name: Tavily Support - url: https://tavily.com - -servers: - - url: https://api.tavily.com - -paths: - /search: - post: - summary: Search for data based on a query. - operationId: search - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/SearchRequest' - responses: - '200': - description: Successful search response. - content: - application/json: - schema: - $ref: '#/components/schemas/SearchResponse' - '400': - description: Bad Request — Your request is invalid. - '401': - description: Unauthorized — Your API key is wrong. - '403': - description: Forbidden — The endpoint requested is hidden for administrators only. - '404': - description: Not Found — The specified endpoint could not be found. - '405': - description: Method Not Allowed — You tried to access an endpoint with an invalid method. - '429': - description: Too Many Requests — You're requesting too many results! Slow down! - '500': - description: Internal Server Error — We had a problem with our server. Try again later. - '503': - description: Service Unavailable — We're temporarily offline for maintenance. Please try again later. - '504': - description: Gateway Timeout — We're temporarily offline for maintenance. Please try again later. - -components: - schemas: - SearchRequest: - type: object - description: The search request object. - properties: - api_key: - type: string - description: Your unique API key. - example: "your api key" - query: - type: string - description: The search query string. - example: "your search query" - search_depth: - type: string - description: The depth of the search. It can be 'basic' or advanced. Default is 'basic'. - enum: - - basic - - advanced - default: basic - include_images: - type: boolean - description: Include a list of query related images in the response. Default is False. - default: false - include_answer: - type: boolean - description: Include answers in the search results. Default is False. - default: false - include_raw_content: - type: boolean - description: Include raw content in the search results. Default is False. - default: false - max_results: - type: integer - description: The number of maximum search results to return. Default is 5. - default: 5 - include_domains: - type: array - items: - type: string - description: A list of domains to specifically include in the search results. Default is None. - exclude_domains: - type: array - items: - type: string - description: A list of domains to specifically exclude from the search results. Default is None. - required: - - api_key - - query - SearchResponse: - type: object - description: The response data from the search query. - properties: - answer: - type: string - description: The answer to your search query. - query: - type: string - description: Your search query. - response_time: - type: number - description: Your search result response time. - images: - type: array - items: - type: string - description: A list of query related image urls. - follow_up_questions: - type: array - items: - type: string - description: A list of suggested research follow up questions related to original query. - results: - type: array - description: A list of search results. - items: - $ref: '#/components/schemas/SearchResult' - required: - - query - - response_time - - results - SearchResult: - type: object - description: The search result object. - properties: - title: - type: string - description: The title of the search result url. - url: - type: string - description: The url of the search result. - content: - type: string - description: The most query related content from the scraped url. - raw_content: - type: string - description: The parsed and cleaned HTML of the site. For now includes parsed text only. - score: - type: number - description: The relevance score of the search result. - required: - - title - - url - - content - - score diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml deleted file mode 100644 index 29519674..00000000 --- a/packages/tavily_dart/pubspec.yaml +++ /dev/null @@ -1,34 +0,0 @@ -name: tavily_dart -description: Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). -version: 0.1.0 -repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/tavily_dart -issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:tavily_dart -homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev - -topics: - - ai - - llms - - search - - rag - -environment: - sdk: ">=3.4.0 <4.0.0" - -dependencies: - fetch_client: ^1.1.2 - freezed_annotation: ^2.4.2 - http: ^1.2.2 - json_annotation: ^4.9.0 - meta: ^1.11.0 - -dev_dependencies: - build_runner: ^2.4.11 - freezed: ^2.5.7 - json_serializable: ^6.8.0 - # openapi_spec: ^0.7.8 - openapi_spec: - git: - url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 - test: ^1.25.8 diff --git a/packages/tavily_dart/test/tavily_test.dart b/packages/tavily_dart/test/tavily_test.dart deleted file mode 100644 index 0df02cb8..00000000 --- a/packages/tavily_dart/test/tavily_test.dart +++ /dev/null @@ -1,45 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:tavily_dart/tavily_dart.dart'; -import 'package:test/test.dart'; - -void main() { - group('Tavily API tests', () { - late TavilyClient client; - - setUp(() async { - client = TavilyClient(); - }); - - tearDown(() { - client.endSession(); - }); - - test('Test call search API', () async { - final res = await client.search( - request: SearchRequest( - apiKey: Platform.environment['TAVILY_API_KEY']!, - query: 'Should I invest in Apple right now?', - includeAnswer: true, - includeImages: true, - includeRawContent: true, - maxResults: 3, - ), - ); - expect(res.answer, isNotEmpty); - expect(res.query, 'Should I invest in Apple right now?'); - expect(res.responseTime, greaterThan(0)); - expect(res.images, isNotEmpty); - expect(res.results, hasLength(3)); - final result = res.results.first; - expect(result.title, isNotEmpty); - expect(result.url, isNotEmpty); - expect(result.content, isNotEmpty); - expect(result.rawContent, isNotEmpty); - expect(result.score, greaterThan(0)); - }); - }); -} diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 372ba2dc..f081d3a9 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,15 +1,3 @@ -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - ---- - -## 0.1.0+2 - - - Update a dependency to the latest release. - -## 0.1.0+1 - - - Update a dependency to the latest release. - ## 0.1.0 - **REFACTOR**: Minor changes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96)) diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 9e25d858..1edc8121 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,26 +1,27 @@ name: vertex_ai -description: GCP Vertex AI ML platform API client (PaLM, Vector Search, etc.). -version: 0.1.0+2 +description: GCP Vertex AI ML platform API client (PaLM, Matching Engine, etc.). +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.dev +documentation: https://langchaindart.com topics: - ai - nlp - llms - palm + - matching-engine environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ^1.18.0 - googleapis: ^13.0.0 - googleapis_auth: ^1.6.0 - http: ^1.2.2 + collection: '>=1.17.0 <1.19.0' + googleapis: ^12.0.0 + googleapis_auth: ^1.5.1 + http: ^1.1.0 meta: ^1.11.0 dev_dependencies: - test: ^1.25.8 + test: ^1.25.2 diff --git a/pubspec.yaml b/pubspec.yaml index 8373da6a..70fc02f6 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -4,4 +4,4 @@ environment: sdk: ">=3.0.0 <4.0.0" dev_dependencies: - melos: 6.1.0 + melos: 6.0.0 From 031cb3023cba9751447cf9ad32d18290b6eeb28b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 13:45:47 +0200 Subject: [PATCH 131/251] build(deps): bump actions/checkout from 4.1.5 to 4.1.6 (#427) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.5 to 4.1.6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/44c2b7a8a4ea60a981eaca3cf939b5f4305c123b...a5ac7e51b41094c92402da3b24376905380afc29) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yaml | 2 +- .github/workflows/test.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 59926dbf..5520d768 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 114e4fab..2b4ff0c5 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 From 7e77ae31fbf64d4ab3b34a9880b1fb686b6ee260 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 20 May 2024 16:53:41 +0200 Subject: [PATCH 132/251] fix: Make quote nullable in MessageContentTextAnnotationsFileCitation (#428) --- ...ontent_text_annotations_file_citation.dart | 2 +- .../src/generated/schema/schema.freezed.dart | 34 ++++++++++++------- .../lib/src/generated/schema/schema.g.dart | 21 ++++++++---- packages/openai_dart/oas/openapi_curated.yaml | 2 +- 4 files changed, 38 insertions(+), 21 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart index 5317431b..1e6807c9 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart @@ -20,7 +20,7 @@ class MessageContentTextAnnotationsFileCitation @JsonKey(name: 'file_id') required String fileId, /// The specific quote in the file. - required String quote, + @JsonKey(includeIfNull: false) String? quote, }) = _MessageContentTextAnnotationsFileCitation; /// Object construction from a JSON representation diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 472cae5b..6417d1c3 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -40379,7 +40379,8 @@ mixin _$MessageContentTextAnnotationsFileCitation { String get fileId => throw _privateConstructorUsedError; /// The specific quote in the file. - String get quote => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get quote => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) @@ -40396,7 +40397,9 @@ abstract class $MessageContentTextAnnotationsFileCitationCopyWith<$Res> { _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, MessageContentTextAnnotationsFileCitation>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(includeIfNull: false) String? quote}); } /// @nodoc @@ -40415,17 +40418,17 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, @override $Res call({ Object? fileId = null, - Object? quote = null, + Object? quote = freezed, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: null == quote + quote: freezed == quote ? _value.quote : quote // ignore: cast_nullable_to_non_nullable - as String, + as String?, ) as $Val); } } @@ -40439,7 +40442,9 @@ abstract class _$$MessageContentTextAnnotationsFileCitationImplCopyWith<$Res> __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(includeIfNull: false) String? quote}); } /// @nodoc @@ -40456,17 +40461,17 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> @override $Res call({ Object? fileId = null, - Object? quote = null, + Object? quote = freezed, }) { return _then(_$MessageContentTextAnnotationsFileCitationImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: null == quote + quote: freezed == quote ? _value.quote : quote // ignore: cast_nullable_to_non_nullable - as String, + as String?, )); } } @@ -40476,7 +40481,8 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> class _$MessageContentTextAnnotationsFileCitationImpl extends _MessageContentTextAnnotationsFileCitation { const _$MessageContentTextAnnotationsFileCitationImpl( - {@JsonKey(name: 'file_id') required this.fileId, required this.quote}) + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(includeIfNull: false) this.quote}) : super._(); factory _$MessageContentTextAnnotationsFileCitationImpl.fromJson( @@ -40490,7 +40496,8 @@ class _$MessageContentTextAnnotationsFileCitationImpl /// The specific quote in the file. @override - final String quote; + @JsonKey(includeIfNull: false) + final String? quote; @override String toString() { @@ -40532,7 +40539,7 @@ abstract class _MessageContentTextAnnotationsFileCitation extends MessageContentTextAnnotationsFileCitation { const factory _MessageContentTextAnnotationsFileCitation( {@JsonKey(name: 'file_id') required final String fileId, - required final String quote}) = + @JsonKey(includeIfNull: false) final String? quote}) = _$MessageContentTextAnnotationsFileCitationImpl; const _MessageContentTextAnnotationsFileCitation._() : super._(); @@ -40548,7 +40555,8 @@ abstract class _MessageContentTextAnnotationsFileCitation @override /// The specific quote in the file. - String get quote; + @JsonKey(includeIfNull: false) + String? get quote; @override @JsonKey(ignore: true) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 03a49b59..eeb6a84e 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -3906,15 +3906,24 @@ _$MessageContentTextAnnotationsFileCitationImpl Map json) => _$MessageContentTextAnnotationsFileCitationImpl( fileId: json['file_id'] as String, - quote: json['quote'] as String, + quote: json['quote'] as String?, ); Map _$$MessageContentTextAnnotationsFileCitationImplToJson( - _$MessageContentTextAnnotationsFileCitationImpl instance) => - { - 'file_id': instance.fileId, - 'quote': instance.quote, - }; + _$MessageContentTextAnnotationsFileCitationImpl instance) { + final val = { + 'file_id': instance.fileId, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('quote', instance.quote); + return val; +} _$MessageDeltaContentImageUrlObjectImpl _$$MessageDeltaContentImageUrlObjectImplFromJson( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 9490261d..46201dd4 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4890,7 +4890,7 @@ components: type: string required: - file_id - - quote + # - quote # https://github.com/openai/openai-openapi/issues/263 MessageContentTextAnnotationsFilePathObject: type: object description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. From 2e42b8c94b38b94a184c3dd03ab01175bba4af1c Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 20 May 2024 17:05:58 +0200 Subject: [PATCH 133/251] fix: Rename CreateRunRequestModel factories names (#429) --- .../generated/schema/create_run_request.dart | 4 +- .../src/generated/schema/schema.freezed.dart | 119 +++++++++--------- packages/openai_dart/oas/main.dart | 74 +++++------ .../test/openai_client_assistants_test.dart | 10 +- 4 files changed, 104 insertions(+), 103 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index edd89f09..95ad74a8 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -221,12 +221,12 @@ sealed class CreateRunRequestModel with _$CreateRunRequestModel { const CreateRunRequestModel._(); /// Available models. Mind that the list may not be exhaustive nor up-to-date. - const factory CreateRunRequestModel.enumeration( + const factory CreateRunRequestModel.model( RunModels value, ) = CreateRunRequestModelEnumeration; /// The ID of the model to use for this request. - const factory CreateRunRequestModel.string( + const factory CreateRunRequestModel.modelId( String value, ) = CreateRunRequestModelString; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 6417d1c3..abfb4fc9 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -30240,9 +30240,9 @@ abstract class _CreateRunRequest extends CreateRunRequest { CreateRunRequestModel _$CreateRunRequestModelFromJson( Map json) { switch (json['runtimeType']) { - case 'enumeration': + case 'model': return CreateRunRequestModelEnumeration.fromJson(json); - case 'string': + case 'modelId': return CreateRunRequestModelString.fromJson(json); default: @@ -30259,40 +30259,39 @@ mixin _$CreateRunRequestModel { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -30358,7 +30357,7 @@ class _$CreateRunRequestModelEnumerationImpl extends CreateRunRequestModelEnumeration { const _$CreateRunRequestModelEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'enumeration', + : $type = $type ?? 'model', super._(); factory _$CreateRunRequestModelEnumerationImpl.fromJson( @@ -30373,7 +30372,7 @@ class _$CreateRunRequestModelEnumerationImpl @override String toString() { - return 'CreateRunRequestModel.enumeration(value: $value)'; + return 'CreateRunRequestModel.model(value: $value)'; } @override @@ -30399,30 +30398,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) { - return enumeration(value); + return model(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) { - return enumeration?.call(value); + return model?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(value); + if (model != null) { + return model(value); } return orElse(); } @@ -30430,31 +30429,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) { - return enumeration(this); + return model(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) { - return enumeration?.call(this); + return model?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(this); + if (model != null) { + return model(this); } return orElse(); } @@ -30521,7 +30519,7 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { const _$CreateRunRequestModelStringImpl(this.value, {final String? $type}) - : $type = $type ?? 'string', + : $type = $type ?? 'modelId', super._(); factory _$CreateRunRequestModelStringImpl.fromJson( @@ -30536,7 +30534,7 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override String toString() { - return 'CreateRunRequestModel.string(value: $value)'; + return 'CreateRunRequestModel.modelId(value: $value)'; } @override @@ -30561,30 +30559,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) { - return string(value); + return modelId(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) { - return string?.call(value); + return modelId?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) { - if (string != null) { - return string(value); + if (modelId != null) { + return modelId(value); } return orElse(); } @@ -30592,31 +30590,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) { - return string(this); + return modelId(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) { - return string?.call(this); + return modelId?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) { - if (string != null) { - return string(this); + if (modelId != null) { + return modelId(this); } return orElse(); } diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index 1f2fe406..f1fbee08 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -77,53 +77,55 @@ String? _onSchemaUnionFactoryName( // Assistant 'AssistantModelEnumeration' => 'model', 'AssistantModelString' => 'modelId', + 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', + 'CreateMessageRequestContentListMessageContent' => 'parts', + 'CreateMessageRequestContentString' => 'text', + 'CreateRunRequestModelEnumeration' => 'model', + 'CreateRunRequestModelString' => 'modelId', + 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', + 'CreateRunRequestResponseFormatEnumeration' => 'mode', + 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateRunRequestToolChoiceEnumeration' => 'mode', + 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', + 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', 'MessageContentImageFileObject' => 'imageFile', - 'MessageDeltaContentImageFileObject' => 'imageFile', - 'MessageContentTextObject' => 'text', - 'MessageDeltaContentTextObject' => 'text', 'MessageContentImageUrlObject' => 'imageUrl', 'MessageContentTextAnnotationsFileCitationObject' => 'fileCitation', - 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageContentTextAnnotationsFilePathObject' => 'filePath', + 'MessageContentTextObject' => 'text', + 'MessageDeltaContentImageFileObject' => 'imageFile', + 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageDeltaContentTextAnnotationsFilePathObject' => 'filePath', + 'MessageDeltaContentTextObject' => 'text', + 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', 'RunModelEnumeration' => 'model', 'RunModelString' => 'modelId', - 'ThreadAndRunModelEnumeration' => 'model', - 'ThreadAndRunModelString' => 'modelId', - 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', + 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', + 'RunObjectResponseFormatEnumeration' => 'mode', + 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', + 'RunObjectToolChoiceEnumeration' => 'mode', + 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', 'RunStepDeltaStepDetailsToolCallsCodeObject' => 'codeInterpreter', - 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', 'RunStepDeltaStepDetailsToolCallsFileSearchObject' => 'fileSearch', - 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDeltaStepDetailsToolCallsFunctionObject' => 'function', - 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', - 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', 'RunStepDetailsMessageCreationObject' => 'messageCreation', - 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', + 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', + 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', + 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDetailsToolCallsObject' => 'toolCalls', - 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', - 'CreateRunRequestResponseFormatEnumeration' => 'mode', - 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', - 'RunObjectResponseFormatEnumeration' => 'mode', - 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', - 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', - 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', - 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', - 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'CreateRunRequestToolChoiceEnumeration' => 'mode', - 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', - 'RunObjectToolChoiceEnumeration' => 'mode', - 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateMessageRequestContentString' => 'text', - 'CreateMessageRequestContentListMessageContent' => 'parts', + 'ThreadAndRunModelEnumeration' => 'model', + 'ThreadAndRunModelString' => 'modelId', _ => null, }; diff --git a/packages/openai_dart/test/openai_client_assistants_test.dart b/packages/openai_dart/test/openai_client_assistants_test.dart index cf622c2c..61479182 100644 --- a/packages/openai_dart/test/openai_client_assistants_test.dart +++ b/packages/openai_dart/test/openai_client_assistants_test.dart @@ -8,6 +8,8 @@ import 'package:test/test.dart'; // https://platform.openai.com/docs/assistants/overview void main() { + const defaultModel = 'gpt-4o'; + group('OpenAI Assistants API tests', timeout: const Timeout(Duration(minutes: 5)), () { late OpenAIClient client; @@ -23,14 +25,13 @@ void main() { }); Future createAssistant() async { - const model = 'gpt-4'; const name = 'Math Tutor'; const description = 'Help students with math homework'; const instructions = 'You are a personal math tutor. Write and run code to answer math questions.'; final res = await client.createAssistant( request: const CreateAssistantRequest( - model: AssistantModel.modelId(model), + model: AssistantModel.modelId(defaultModel), name: name, description: description, instructions: instructions, @@ -42,7 +43,7 @@ void main() { expect(res.createdAt, greaterThan(0)); expect(res.name, name); expect(res.description, description); - expect(res.model, model); + expect(res.model, startsWith(defaultModel)); expect(res.instructions, instructions); expect(res.tools, hasLength(1)); final tool = res.tools.first; @@ -114,6 +115,7 @@ void main() { assistantId: assistantId, instructions: 'Please address the user as Jane Doe. The user has a premium account.', + model: const CreateRunRequestModel.modelId(defaultModel), ), ); expect(res.id, isNotNull); @@ -129,7 +131,7 @@ void main() { expect(res.cancelledAt, isNull); expect(res.failedAt, isNull); expect(res.completedAt, isNull); - expect(res.model, startsWith('gpt-4')); + expect(res.model, startsWith(defaultModel)); expect(res.instructions, isNotEmpty); expect(res.tools, hasLength(1)); expect(res.metadata, isEmpty); From 092b955ecf16d3eac571e05d7cdd632f90cc800c Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 20 May 2024 17:21:17 +0200 Subject: [PATCH 134/251] fix: digest path param in Ollama blob endpoints (#430) --- packages/ollama_dart/README.md | 4 ++-- .../ollama_dart/lib/src/generated/client.dart | 22 ++++++++----------- packages/ollama_dart/oas/ollama-curated.yaml | 11 +++++----- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 0aaf5a97..27895b5b 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -251,11 +251,11 @@ await for (final res in stream) { #### Check if a Blob Exists -Check if a blob is known to the server. +Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai. ```dart await client.checkBlob( - name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); ``` diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index 3ab44797..a22d8729 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -605,26 +605,23 @@ class OllamaClient { /// Create a blob from a file. Returns the server file path. /// - /// `name`: the SHA256 digest of the blob + /// `digest`: the SHA256 digest of the blob /// /// `request`: No description /// /// `POST` `http://localhost:11434/api/blobs/{digest}` Future createBlob({ - required String name, + required String digest, String? request, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/{digest}', + path: '/blobs/$digest', method: HttpMethod.post, isMultipart: false, requestType: 'application/octet-stream', responseType: '', body: request, - queryParams: { - 'name': name, - }, ); } @@ -632,24 +629,23 @@ class OllamaClient { // METHOD: checkBlob // ------------------------------------------ - /// Check to see if a blob exists on the Ollama server which is useful when creating models. + /// Ensures that the file blob used for a FROM or ADAPTER field exists on the server. /// - /// `name`: the SHA256 digest of the blob + /// This is checking your Ollama server and not Ollama.ai. + /// + /// `digest`: the SHA256 digest of the blob /// /// `HEAD` `http://localhost:11434/api/blobs/{digest}` Future checkBlob({ - required String name, + required String digest, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/{digest}', + path: '/blobs/$digest', method: HttpMethod.head, isMultipart: false, requestType: '', responseType: '', - queryParams: { - 'name': name, - }, ); } } diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index b63d0c21..876bab50 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -198,10 +198,11 @@ paths: operationId: checkBlob tags: - Models - summary: Check to see if a blob exists on the Ollama server which is useful when creating models. + summary: Ensures that the file blob used for a FROM or ADAPTER field exists on the server. + description: This is checking your Ollama server and not Ollama.ai. parameters: - - in: query - name: name + - in: path + name: digest schema: type: string required: true @@ -218,8 +219,8 @@ paths: - Models summary: Create a blob from a file. Returns the server file path. parameters: - - in: query - name: name + - in: path + name: digest schema: type: string required: true From 24480293845f4d22551d03c942548e8fb07d00e2 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Mon, 20 May 2024 17:27:24 +0200 Subject: [PATCH 135/251] docs: Fix lint issues in langchain_firebase example --- examples/browser_summarizer/pubspec.lock | 4 ++-- examples/hello_world_flutter/pubspec.lock | 4 ++-- .../langchain_firebase/example/lib/main.dart | 2 +- .../langchain_firebase/example/pubspec.lock | 24 +++++++++---------- packages/langchain_firebase/pubspec.lock | 24 +++++++++---------- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 5050c14b..cc499c81 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -291,10 +291,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" nested: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 05dca7e4..a12c6037 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -175,10 +175,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" nested: dependency: transitive description: diff --git a/packages/langchain_firebase/example/lib/main.dart b/packages/langchain_firebase/example/lib/main.dart index 7cbb5e8e..44e019e9 100644 --- a/packages/langchain_firebase/example/lib/main.dart +++ b/packages/langchain_firebase/example/lib/main.dart @@ -580,7 +580,7 @@ class MessageWidget extends StatelessWidget { decoration: BoxDecoration( color: isFromUser ? Theme.of(context).colorScheme.primaryContainer - : Theme.of(context).colorScheme.surfaceVariant, + : Theme.of(context).colorScheme.surfaceContainerHighest, borderRadius: BorderRadius.circular(18), ), padding: const EdgeInsets.symmetric( diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 6ea344fc..0dd384a2 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -257,26 +257,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" lints: dependency: transitive description: @@ -313,10 +313,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: transitive description: @@ -398,10 +398,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -430,10 +430,10 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" web: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index c13007f5..89e38672 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -211,26 +211,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" matcher: dependency: transitive description: @@ -251,10 +251,10 @@ packages: dependency: "direct main" description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: transitive description: @@ -336,10 +336,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -368,10 +368,10 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" web: dependency: transitive description: From 7a5e69fa62a4894de2495cba20a8b2890c165182 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Mon, 20 May 2024 17:28:01 +0200 Subject: [PATCH 136/251] chore(release): publish packages - langchain_firebase@0.1.0+1 - ollama_dart@0.1.0+1 - openai_dart@0.3.2+1 - langchain_ollama@0.2.1+1 - langchain_openai@0.6.1+1 --- CHANGELOG.md | 49 +++++++++++++++++++ examples/browser_summarizer/pubspec.yaml | 2 +- examples/docs_examples/pubspec.yaml | 4 +- examples/hello_world_backend/pubspec.yaml | 2 +- examples/hello_world_cli/pubspec.yaml | 2 +- examples/hello_world_flutter/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 ++ .../langchain_firebase/example/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_ollama/CHANGELOG.md | 4 ++ packages/langchain_ollama/pubspec.yaml | 4 +- packages/langchain_openai/CHANGELOG.md | 4 ++ packages/langchain_openai/pubspec.yaml | 4 +- packages/langchain_pinecone/pubspec.yaml | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- packages/ollama_dart/CHANGELOG.md | 4 ++ packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 5 ++ packages/openai_dart/pubspec.yaml | 2 +- 20 files changed, 87 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a668f597..079c8450 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,46 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-05-20 + +### Changes + +--- + +Packages with breaking changes: + +- There are no breaking changes in this release. + +Packages with other changes: + +- [`langchain_firebase` - `v0.1.0+1`](#langchain_firebase---v0101) +- [`ollama_dart` - `v0.1.0+1`](#ollama_dart---v0101) +- [`openai_dart` - `v0.3.2+1`](#openai_dart---v0321) +- [`langchain_ollama` - `v0.2.1+1`](#langchain_ollama---v0211) +- [`langchain_openai` - `v0.6.1+1`](#langchain_openai---v0611) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + +- `langchain_ollama` - `v0.2.1+1` +- `langchain_openai` - `v0.6.1+1` + +--- + +#### `openai_dart` - `v0.3.2+1` + +- **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) +- **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) + +#### `ollama_dart` - `v0.1.0+1` + +- **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) + +#### `langchain_firebase` - `v0.1.0+1` + +- **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) + ## 2024-05-14 ### Changes @@ -2387,6 +2427,15 @@ Packages with changes: ### Changes +#### `langchain` - `v0.0.1` + + - Initial public release. +t/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) + +## 2023-07-02 + +### Changes + #### `langchain` - `v0.0.1` - Initial public release. diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 9a2c4936..2ab1aff4 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -15,7 +15,7 @@ dependencies: js: ^0.7.1 langchain: ^0.7.1 langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 0b57edbc..37662d4c 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -12,5 +12,5 @@ dependencies: langchain_community: 0.2.0+1 langchain_google: ^0.5.0 langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1 - langchain_openai: ^0.6.1 + langchain_ollama: ^0.2.1+1 + langchain_openai: ^0.6.1+1 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index fa43a6d8..4c7f0059 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -8,6 +8,6 @@ environment: dependencies: langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 581a3927..d814f7c4 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -8,4 +8,4 @@ environment: dependencies: langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index c000d972..9fc3a925 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,7 +12,7 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 flutter: uses-material-design: true diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 3da841ef..197d0776 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -27,4 +27,4 @@ dev_dependencies: test: ^1.25.2 langchain: ^0.7.1 langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 862156b6..a7350a9b 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) + ## 0.1.0 - **FEAT**: Add support for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 76900f0d..a4857f0d 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -14,7 +14,7 @@ dependencies: sdk: flutter flutter_markdown: ^0.6.22 langchain: 0.7.1 - langchain_firebase: 0.1.0 + langchain_firebase: 0.1.0+1 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 8c5e6995..cfb0d9f2 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 6f38a23b..81bb56d2 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1+1 + + - Update a dependency to the latest release. + ## 0.2.1 - **FEAT**: Handle finish reason in ChatOllama ([#416](https://github.com/davidmigloz/langchain_dart/issues/416)). ([a5e1af13](https://github.com/davidmigloz/langchain_dart/commit/a5e1af13ef4d2db690ab599dbf5e42f28659a059)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 9b4736df..adfa39d4 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.0 + ollama_dart: ^0.1.0+1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 2d1e113a..d1a0368a 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.6.1+1 + + - Update a dependency to the latest release. + ## 0.6.1 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index b70412db..ca76313e 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.1 +version: 0.6.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: langchain_core: ^0.3.1 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.2 + openai_dart: ^0.3.2+1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 141a96f4..c64dda9f 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -25,4 +25,4 @@ dependencies: dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index c480cdc7..5450773b 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -26,4 +26,4 @@ dev_dependencies: test: ^1.25.2 langchain: ^0.7.1 langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + langchain_openai: ^0.6.1+1 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 4b5ff033..f7c943f9 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) + ## 0.1.0 > Note: This release has breaking changes. diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 30f792f5..e2eee5ca 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 632fa141..6e366631 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.2+1 + + - **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) + - **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) + ## 0.3.2 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index a6fd761e..30fee90f 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). -version: 0.3.2 +version: 0.3.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart From bb05c6a76b203a279b19467a5ed4407d7a6af6c1 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Tue, 21 May 2024 15:31:10 +0200 Subject: [PATCH 137/251] test: Fix ollama_dart tests --- packages/ollama_dart/example/ollama_dart_example.dart | 2 +- packages/ollama_dart/test/ollama_dart_models_test.dart | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index 15ef53d9..fab5f712 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -217,7 +217,7 @@ Future _pushModelStream(final OllamaClient client) async { Future _checkBlob(final OllamaClient client) async { await client.checkBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); } diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index f77a9d32..abb3cef3 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -149,14 +149,14 @@ void main() { test('Test check blob', skip: true, () async { await client.checkBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); }); test('Test create blob', skip: true, () async { await client.createBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', request: 'file contents', ); From 720a4450b4550e61172d01196091ee0b630f0d34 Mon Sep 17 00:00:00 2001 From: Alfredo Bautista Date: Thu, 23 May 2024 10:21:01 +0200 Subject: [PATCH 138/251] feat: Implement anthropic_sdk_dart, a Dart client for Anthropic API (#433) Co-authored-by: David Miguel --- packages/anthropic_sdk_dart/CHANGELOG.md | 3 + packages/anthropic_sdk_dart/LICENSE | 21 + packages/anthropic_sdk_dart/README.md | 172 + .../anthropic_sdk_dart/analysis_options.yaml | 1 + packages/anthropic_sdk_dart/build.yaml | 13 + .../lib/anthropic_sdk_dart.dart | 7 + .../anthropic_sdk_dart/lib/src/client.dart | 101 + .../lib/src/extensions.dart | 13 + .../lib/src/generated/client.dart | 395 ++ .../lib/src/generated/schema/block.dart | 55 + .../schema/create_message_request.dart | 293 + .../create_message_request_metadata.dart | 44 + .../generated/schema/image_block_source.dart | 74 + .../lib/src/generated/schema/message.dart | 162 + .../src/generated/schema/message_delta.dart | 61 + .../generated/schema/message_delta_usage.dart | 51 + .../src/generated/schema/message_role.dart | 17 + .../schema/message_stream_event.dart | 124 + .../schema/message_stream_event_type.dart | 27 + .../lib/src/generated/schema/schema.dart | 25 + .../src/generated/schema/schema.freezed.dart | 5620 +++++++++++++++++ .../lib/src/generated/schema/schema.g.dart | 404 ++ .../lib/src/generated/schema/stop_reason.dart | 28 + .../generated/schema/text_block_delta.dart | 44 + .../lib/src/generated/schema/usage.dart | 54 + .../lib/src/http_client/http_client.dart | 4 + .../lib/src/http_client/http_client_html.dart | 18 + .../lib/src/http_client/http_client_io.dart | 12 + .../lib/src/http_client/http_client_stub.dart | 10 + .../oas/anthropic_openapi_curated.yaml | 562 ++ packages/anthropic_sdk_dart/oas/main.dart | 45 + packages/anthropic_sdk_dart/pubspec.lock | 627 ++ packages/anthropic_sdk_dart/pubspec.yaml | 34 + .../test/messages_test.dart | 150 + 34 files changed, 9271 insertions(+) create mode 100644 packages/anthropic_sdk_dart/CHANGELOG.md create mode 100644 packages/anthropic_sdk_dart/LICENSE create mode 100644 packages/anthropic_sdk_dart/README.md create mode 100644 packages/anthropic_sdk_dart/analysis_options.yaml create mode 100644 packages/anthropic_sdk_dart/build.yaml create mode 100644 packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/client.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/extensions.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/client.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart create mode 100644 packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml create mode 100644 packages/anthropic_sdk_dart/oas/main.dart create mode 100644 packages/anthropic_sdk_dart/pubspec.lock create mode 100644 packages/anthropic_sdk_dart/pubspec.yaml create mode 100644 packages/anthropic_sdk_dart/test/messages_test.dart diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/anthropic_sdk_dart/LICENSE b/packages/anthropic_sdk_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/anthropic_sdk_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md new file mode 100644 index 00000000..6ccb7d3a --- /dev/null +++ b/packages/anthropic_sdk_dart/README.md @@ -0,0 +1,172 @@ +# Anthropic Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (aka Claude API). + +## Features + +- Fully type-safe, [documented](https://pub.dev/documentation/anthropic_sdk_dart/latest) and tested +- All platforms supported (including streaming on web) +- Custom base URL, headers and query params support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) + +**Supported endpoints:** + +- Messages (with streaming support) + +## Table of contents + +- [Usage](#usage) + * [Authentication](#authentication) + * [Messages](#messages) +- [Advance Usage](#advance-usage) + * [Default HTTP client](#default-http-client) + * [Custom HTTP client](#custom-http-client) + * [Using a proxy](#using-a-proxy) + + [HTTP proxy](#http-proxy) + + [SOCKS5 proxy](#socks5-proxy) +- [Acknowledgements](#acknowledgements) +- [License](#license) + +## Usage + +Refer to the [documentation](https://docs.anthropic.com) for more information about the API. + +### Authentication + +The Anthropic API uses API keys for authentication. Visit the [Anthropic console](https://console.anthropic.com/settings/keys) to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; +final client = AnthropicClient(apiKey: apiKey); +``` + +### Messages + +Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + +**Create a Message:** + +```dart +final res = await client.createMessage( + request: CreateMessageRequest( + model: Model.model(Models.claude3Opus20240229), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: 'Hello, Claude', + ), + ], + ), +); +print(res.content.text); +// Hi there! How can I help you today? +``` + +`Model` is a sealed class that offers two ways to specify the model: +- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-3-haiku-20240307'`). +- `Model.model(Models.claude3Opus20240229)`: a value from `Models` enum which lists all the available models. + +Mind that this list may not be up-to-date. Refer to the [documentation](https://docs.anthropic.com/en/docs/models-overview) for the updated list. + +**Streaming messages:** + +```dart +final stream = await client.createMessageStream( + request: CreateMessageRequest( + model: Model.model(Models.claude3Opus20240229), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: 'Hello, Claude', + ), + ], + ), +); +String text = ''; +await for (final res in stream) { + res.map( + messageStart: (e) {}, + messageDelta: (e) {}, + messageStop: (e) {}, + contentBlockStart: (e) {}, + contentBlockDelta: (e) { + text += e.delta.text; + }, + contentBlockStop: (e) {}, + ping: (e) {}, + ); +} +print(text); +// Hi there! How can I help you today? +``` + +## Advance Usage + +### Default HTTP client + +By default, the client uses `https://api.anthropic.com/v1` as the `baseUrl` and the following implementations of `http.Client`: + +- Non-web: [`IOClient`](https://pub.dev/documentation/http/latest/io_client/IOClient-class.html) +- Web: [`FetchClient`](https://pub.dev/documentation/fetch_client/latest/fetch_client/FetchClient-class.html) (to support streaming on web) + +### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = AnthropicClient( + apiKey: 'MISTRAL_API_KEY', + client: MyHttpClient(), +); +``` + +### Using a proxy + +#### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = AnthropicClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +#### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = AnthropicClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. + +## License + +Anthropic Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/anthropic_sdk_dart/analysis_options.yaml b/packages/anthropic_sdk_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/anthropic_sdk_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/anthropic_sdk_dart/build.yaml b/packages/anthropic_sdk_dart/build.yaml new file mode 100644 index 00000000..dee719ac --- /dev/null +++ b/packages/anthropic_sdk_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + - unnecessary_null_checks + json_serializable: + options: + explicit_to_json: true diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart new file mode 100644 index 00000000..65378d70 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -0,0 +1,7 @@ +/// Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +library anthropic_sdk_dart; + +export 'src/client.dart'; +export 'src/extensions.dart'; +export 'src/generated/client.dart' show AnthropicClientException; +export 'src/generated/schema/schema.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/client.dart b/packages/anthropic_sdk_dart/lib/src/client.dart new file mode 100644 index 00000000..17c4e2a1 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/client.dart @@ -0,0 +1,101 @@ +// ignore_for_file: use_super_parameters +import 'dart:async'; +import 'dart:convert'; + +import 'package:http/http.dart' as http; + +import 'generated/client.dart' as g; +import 'generated/schema/schema.dart'; +import 'http_client/http_client.dart'; + +/// Client for Anthropic API. +/// +/// Please see https://docs.anthropic.com/en/api for more details. +class AnthropicClient extends g.AnthropicClient { + /// Create a new Anthropic API client. + /// + /// Main configuration options: + /// - `apiKey`: your Anthropic API key. You can find your API key in the + /// [Anthropic console](https://console.anthropic.com/settings/keys). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to `https://api.anthropic.com/v1`. + /// You can override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + AnthropicClient({ + final String? apiKey, + final String? baseUrl, + final Map? headers, + final Map? queryParams, + final http.Client? client, + }) : super( + apiKey: apiKey ?? '', + baseUrl: baseUrl, + headers: { + 'anthropic-version': '2023-06-01', + ...?headers, + }, + queryParams: queryParams ?? const {}, + client: client ?? createDefaultHttpClient(), + ); + + // ------------------------------------------ + // METHOD: createMessageStream + // ------------------------------------------ + + /// Create a Message + /// + /// Send a structured list of input messages with text and/or image content, and the + /// model will generate the next message in the conversation. + /// + /// The Messages API can be used for either single queries or stateless multi-turn + /// conversations. + /// + /// `request`: The request parameters for creating a message. + /// + /// `POST` `https://api.anthropic.com/v1/messages` + Stream createMessageStream({ + required final CreateMessageRequest request, + }) async* { + final r = await makeRequestStream( + baseUrl: 'https://api.anthropic.com/v1', + path: '/messages', + method: g.HttpMethod.post, + requestType: 'application/json', + responseType: 'application/json', + body: request.copyWith(stream: true), + headerParams: { + if (apiKey.isNotEmpty) 'x-api-key': apiKey, + }, + ); + yield* r.stream + .transform(const _AnthropicStreamTransformer()) // + .map( + (final d) => MessageStreamEvent.fromJson(json.decode(d)), + ); + } + + @override + Future onRequest(final http.BaseRequest request) { + return onRequestHandler(request); + } +} + +class _AnthropicStreamTransformer + extends StreamTransformerBase, String> { + const _AnthropicStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()) // + .where((final i) => i.startsWith('data: ')) + .map((final item) => item.substring(6)); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart new file mode 100644 index 00000000..749979e5 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -0,0 +1,13 @@ +import 'generated/schema/schema.dart'; + +/// Extension methods for [MessageContent]. +extension MessageContentX on MessageContent { + /// Returns the text content of the message. + String get text { + return map( + text: (text) => text.value, + blocks: (blocks) => + blocks.value.whereType().map((t) => t.text).join('\n'), + ); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/client.dart b/packages/anthropic_sdk_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..0f3e82a8 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/client.dart @@ -0,0 +1,395 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target, unused_import + +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; +import 'package:meta/meta.dart'; + +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: AnthropicClientException +// ========================================== + +/// HTTP exception handler for AnthropicClient +class AnthropicClientException implements Exception { + AnthropicClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + final String message; + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'AnthropicClientException($s)'; + } +} + +// ========================================== +// CLASS: AnthropicClient +// ========================================== + +/// Client for Anthropic API (v.1) +/// +/// API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. +class AnthropicClient { + /// Creates a new AnthropicClient instance. + /// + /// - [AnthropicClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [AnthropicClient.headers] Global headers to be sent with every request + /// - [AnthropicClient.queryParams] Global query parameters to be sent with every request + /// - [AnthropicClient.client] Override HTTP client to use for requests + AnthropicClient({ + this.apiKey = '', + this.baseUrl, + this.headers = const {}, + this.queryParams = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// Global query parameters to be sent with every request + final Map queryParams; + + /// HTTP client for requests + final http.Client client; + + /// Authentication related variables + final String apiKey; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _jsonDecode + // ------------------------------------------ + + dynamic _jsonDecode(http.Response r) { + return json.decode(utf8.decode(r.bodyBytes)); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + @protected + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Add global query parameters + queryParams = {...queryParams, ...this.queryParams}; + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + return await client.send(request); + } + + // ------------------------------------------ + // METHOD: makeRequestStream + // ------------------------------------------ + + /// Reusable request stream method + @protected + Future makeRequestStream({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.StreamedResponse response; + try { + response = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } + + // ------------------------------------------ + // METHOD: makeRequest + // ------------------------------------------ + + /// Reusable request method + @protected + Future makeRequest({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.Response response; + try { + final streamedResponse = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + response = await http.Response.fromStream(streamedResponse); + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: createMessage + // ------------------------------------------ + + /// Create a Message + /// + /// Send a structured list of input messages with text and/or image content, and the + /// model will generate the next message in the conversation. + /// + /// The Messages API can be used for either single queries or stateless multi-turn + /// conversations. + /// + /// `request`: The request parameters for creating a message. + /// + /// `POST` `https://api.anthropic.com/v1/messages` + Future createMessage({ + required CreateMessageRequest request, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.anthropic.com/v1', + path: '/messages', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + headerParams: { + if (apiKey.isNotEmpty) 'x-api-key': apiKey, + }, + ); + return Message.fromJson(_jsonDecode(r)); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart new file mode 100644 index 00000000..36fcbaae --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -0,0 +1,55 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Block +// ========================================== + +/// A block of content in a message. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class Block with _$Block { + const Block._(); + + // ------------------------------------------ + // UNION: TextBlock + // ------------------------------------------ + + /// A block of text content. + const factory Block.text({ + /// The text content. + required String text, + + /// The type of content block. + @Default('text') String type, + }) = TextBlock; + + // ------------------------------------------ + // UNION: ImageBlock + // ------------------------------------------ + + /// A block of image content. + const factory Block.image({ + /// The source of an image block. + required ImageBlockSource source, + + /// The type of content block. + @Default('image') String type, + }) = ImageBlock; + + /// Object construction from a JSON representation + factory Block.fromJson(Map json) => _$BlockFromJson(json); +} + +// ========================================== +// ENUM: BlockEnumType +// ========================================== + +enum BlockEnumType { + @JsonValue('text') + text, + @JsonValue('image') + image, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart new file mode 100644 index 00000000..2f06233e --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -0,0 +1,293 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: CreateMessageRequest +// ========================================== + +/// The request parameters for creating a message. +@freezed +class CreateMessageRequest with _$CreateMessageRequest { + const CreateMessageRequest._(); + + /// Factory constructor for CreateMessageRequest + const factory CreateMessageRequest({ + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() required Model model, + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + required List messages, + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') required int maxTokens, + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) String? system, + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) double? temperature, + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + @Default(false) bool stream, + }) = _CreateMessageRequest; + + /// Object construction from a JSON representation + factory CreateMessageRequest.fromJson(Map json) => + _$CreateMessageRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'messages', + 'max_tokens', + 'metadata', + 'stop_sequences', + 'system', + 'temperature', + 'top_k', + 'top_p', + 'stream' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'messages': messages, + 'max_tokens': maxTokens, + 'metadata': metadata, + 'stop_sequences': stopSequences, + 'system': system, + 'temperature': temperature, + 'top_k': topK, + 'top_p': topP, + 'stream': stream, + }; + } +} + +// ========================================== +// ENUM: Models +// ========================================== + +/// Available models. Mind that the list may not be exhaustive nor up-to-date. +enum Models { + @JsonValue('claude-3-opus-20240229') + claude3Opus20240229, + @JsonValue('claude-3-sonnet-20240229') + claude3Sonnet20240229, + @JsonValue('claude-3-haiku-20240307') + claude3Haiku20240307, + @JsonValue('claude-2.1') + claude21, + @JsonValue('claude-2.0') + claude20, + @JsonValue('claude-instant-1.2') + claudeInstant12, +} + +// ========================================== +// CLASS: Model +// ========================================== + +/// The model that will complete your prompt. +/// +/// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional +/// details and options. +@freezed +sealed class Model with _$Model { + const Model._(); + + /// Available models. Mind that the list may not be exhaustive nor up-to-date. + const factory Model.model( + Models value, + ) = ModelEnumeration; + + /// The ID of the model to use for this request. + const factory Model.modelId( + String value, + ) = ModelString; + + /// Object construction from a JSON representation + factory Model.fromJson(Map json) => _$ModelFromJson(json); +} + +/// Custom JSON converter for [Model] +class _ModelConverter implements JsonConverter { + const _ModelConverter(); + + @override + Model fromJson(Object? data) { + if (data is String && _$ModelsEnumMap.values.contains(data)) { + return ModelEnumeration( + _$ModelsEnumMap.keys.elementAt( + _$ModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return ModelString(data); + } + throw Exception( + 'Unexpected value for Model: $data', + ); + } + + @override + Object? toJson(Model data) { + return switch (data) { + ModelEnumeration(value: final v) => _$ModelsEnumMap[v]!, + ModelString(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart new file mode 100644 index 00000000..bf588756 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: CreateMessageRequestMetadata +// ========================================== + +/// An object describing metadata about the request. +@freezed +class CreateMessageRequestMetadata with _$CreateMessageRequestMetadata { + const CreateMessageRequestMetadata._(); + + /// Factory constructor for CreateMessageRequestMetadata + const factory CreateMessageRequestMetadata({ + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) String? userId, + }) = _CreateMessageRequestMetadata; + + /// Object construction from a JSON representation + factory CreateMessageRequestMetadata.fromJson(Map json) => + _$CreateMessageRequestMetadataFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['user_id']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'user_id': userId, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart new file mode 100644 index 00000000..e0a89687 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart @@ -0,0 +1,74 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: ImageBlockSource +// ========================================== + +/// The source of an image block. +@freezed +class ImageBlockSource with _$ImageBlockSource { + const ImageBlockSource._(); + + /// Factory constructor for ImageBlockSource + const factory ImageBlockSource({ + /// The base64-encoded image data. + required String data, + + /// The media type of the image. + @JsonKey(name: 'media_type') required ImageBlockSourceMediaType mediaType, + + /// The type of image source. + required ImageBlockSourceType type, + }) = _ImageBlockSource; + + /// Object construction from a JSON representation + factory ImageBlockSource.fromJson(Map json) => + _$ImageBlockSourceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['data', 'media_type', 'type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'media_type': mediaType, + 'type': type, + }; + } +} + +// ========================================== +// ENUM: ImageBlockSourceMediaType +// ========================================== + +/// The media type of the image. +enum ImageBlockSourceMediaType { + @JsonValue('image/jpeg') + imageJpeg, + @JsonValue('image/png') + imagePng, + @JsonValue('image/gif') + imageGif, + @JsonValue('image/webp') + imageWebp, +} + +// ========================================== +// ENUM: ImageBlockSourceType +// ========================================== + +/// The type of image source. +enum ImageBlockSourceType { + @JsonValue('base64') + base64, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart new file mode 100644 index 00000000..e8e0b298 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart @@ -0,0 +1,162 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Message +// ========================================== + +/// A message in a chat conversation. +@freezed +class Message with _$Message { + const Message._(); + + /// Factory constructor for Message + const factory Message({ + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) String? id, + + /// The content of the message. + @_MessageContentConverter() required MessageContent content, + + /// The role of the messages author. + required MessageRole role, + + /// The model that handled the request. + @JsonKey(includeIfNull: false) String? model, + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + StopReason? stopReason, + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) String? type, + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) Usage? usage, + }) = _Message; + + /// Object construction from a JSON representation + factory Message.fromJson(Map json) => + _$MessageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'content', + 'role', + 'model', + 'stop_reason', + 'stop_sequence', + 'type', + 'usage' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'content': content, + 'role': role, + 'model': model, + 'stop_reason': stopReason, + 'stop_sequence': stopSequence, + 'type': type, + 'usage': usage, + }; + } +} + +// ========================================== +// CLASS: MessageContent +// ========================================== + +/// The content of the message. +@freezed +sealed class MessageContent with _$MessageContent { + const MessageContent._(); + + /// An array of content blocks. + const factory MessageContent.blocks( + List value, + ) = MessageContentListBlock; + + /// A single text block. + const factory MessageContent.text( + String value, + ) = MessageContentString; + + /// Object construction from a JSON representation + factory MessageContent.fromJson(Map json) => + _$MessageContentFromJson(json); +} + +/// Custom JSON converter for [MessageContent] +class _MessageContentConverter + implements JsonConverter { + const _MessageContentConverter(); + + @override + MessageContent fromJson(Object? data) { + if (data is List && data.every((item) => item is Map)) { + return MessageContentListBlock(data + .map((i) => Block.fromJson(i as Map)) + .toList(growable: false)); + } + if (data is String) { + return MessageContentString(data); + } + throw Exception( + 'Unexpected value for MessageContent: $data', + ); + } + + @override + Object? toJson(MessageContent data) { + return switch (data) { + MessageContentListBlock(value: final v) => v, + MessageContentString(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart new file mode 100644 index 00000000..aa23db40 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart @@ -0,0 +1,61 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageDelta +// ========================================== + +/// A delta in a streaming message. +@freezed +class MessageDelta with _$MessageDelta { + const MessageDelta._(); + + /// Factory constructor for MessageDelta + const factory MessageDelta({ + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + StopReason? stopReason, + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, + }) = _MessageDelta; + + /// Object construction from a JSON representation + factory MessageDelta.fromJson(Map json) => + _$MessageDeltaFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['stop_reason', 'stop_sequence']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'stop_reason': stopReason, + 'stop_sequence': stopSequence, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart new file mode 100644 index 00000000..3ce710cc --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart @@ -0,0 +1,51 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageDeltaUsage +// ========================================== + +/// Billing and rate-limit usage. +/// +/// Anthropic's API bills and rate-limits by token counts, as tokens represent the +/// underlying cost to our systems. +/// +/// Under the hood, the API transforms requests into a format suitable for the +/// model. The model's output then goes through a parsing stage before becoming an +/// API response. As a result, the token counts in `usage` will not match one-to-one +/// with the exact visible content of an API request or response. +/// +/// For example, `output_tokens` will be non-zero, even for an empty string response +/// from Claude. +@freezed +class MessageDeltaUsage with _$MessageDeltaUsage { + const MessageDeltaUsage._(); + + /// Factory constructor for MessageDeltaUsage + const factory MessageDeltaUsage({ + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') required int outputTokens, + }) = _MessageDeltaUsage; + + /// Object construction from a JSON representation + factory MessageDeltaUsage.fromJson(Map json) => + _$MessageDeltaUsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['output_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'output_tokens': outputTokens, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart new file mode 100644 index 00000000..e502789a --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart @@ -0,0 +1,17 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: MessageRole +// ========================================== + +/// The role of the messages author. +enum MessageRole { + @JsonValue('user') + user, + @JsonValue('assistant') + assistant, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart new file mode 100644 index 00000000..73dac3c3 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart @@ -0,0 +1,124 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageStreamEvent +// ========================================== + +/// A event in a streaming conversation. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class MessageStreamEvent with _$MessageStreamEvent { + const MessageStreamEvent._(); + + // ------------------------------------------ + // UNION: MessageStartEvent + // ------------------------------------------ + + /// A start event in a streaming conversation. + const factory MessageStreamEvent.messageStart({ + /// A message in a chat conversation. + required Message message, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = MessageStartEvent; + + // ------------------------------------------ + // UNION: MessageDeltaEvent + // ------------------------------------------ + + /// A delta event in a streaming conversation. + const factory MessageStreamEvent.messageDelta({ + /// A delta in a streaming message. + required MessageDelta delta, + + /// The type of a streaming event. + required MessageStreamEventType type, + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + required MessageDeltaUsage usage, + }) = MessageDeltaEvent; + + // ------------------------------------------ + // UNION: MessageStopEvent + // ------------------------------------------ + + /// A stop event in a streaming conversation. + const factory MessageStreamEvent.messageStop({ + /// The type of a streaming event. + required MessageStreamEventType type, + }) = MessageStopEvent; + + // ------------------------------------------ + // UNION: ContentBlockStartEvent + // ------------------------------------------ + + /// A start event in a streaming content block. + const factory MessageStreamEvent.contentBlockStart({ + /// A block of text content. + @JsonKey(name: 'content_block') required TextBlock contentBlock, + + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockStartEvent; + + // ------------------------------------------ + // UNION: ContentBlockDeltaEvent + // ------------------------------------------ + + /// A delta event in a streaming content block. + const factory MessageStreamEvent.contentBlockDelta({ + /// A delta in a streaming text block. + required TextBlockDelta delta, + + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockDeltaEvent; + + // ------------------------------------------ + // UNION: ContentBlockStopEvent + // ------------------------------------------ + + /// A stop event in a streaming content block. + const factory MessageStreamEvent.contentBlockStop({ + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockStopEvent; + + // ------------------------------------------ + // UNION: PingEvent + // ------------------------------------------ + + /// A ping event in a streaming conversation. + const factory MessageStreamEvent.ping({ + /// The type of a streaming event. + required MessageStreamEventType type, + }) = PingEvent; + + /// Object construction from a JSON representation + factory MessageStreamEvent.fromJson(Map json) => + _$MessageStreamEventFromJson(json); +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart new file mode 100644 index 00000000..0e6aa425 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart @@ -0,0 +1,27 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: MessageStreamEventType +// ========================================== + +/// The type of a streaming event. +enum MessageStreamEventType { + @JsonValue('message_start') + messageStart, + @JsonValue('message_delta') + messageDelta, + @JsonValue('message_stop') + messageStop, + @JsonValue('content_block_start') + contentBlockStart, + @JsonValue('content_block_delta') + contentBlockDelta, + @JsonValue('content_block_stop') + contentBlockStop, + @JsonValue('ping') + ping, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..1953d0e4 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,25 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library anthropic_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'create_message_request.dart'; +part 'create_message_request_metadata.dart'; +part 'message.dart'; +part 'message_role.dart'; +part 'image_block_source.dart'; +part 'stop_reason.dart'; +part 'usage.dart'; +part 'message_stream_event_type.dart'; +part 'message_delta.dart'; +part 'message_delta_usage.dart'; +part 'text_block_delta.dart'; +part 'block.dart'; +part 'message_stream_event.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..a014e0e8 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,5620 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); + +CreateMessageRequest _$CreateMessageRequestFromJson(Map json) { + return _CreateMessageRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateMessageRequest { + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() + Model get model => throw _privateConstructorUsedError; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + List get messages => throw _privateConstructorUsedError; + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') + int get maxTokens => throw _privateConstructorUsedError; + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) + CreateMessageRequestMetadata? get metadata => + throw _privateConstructorUsedError; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences => throw _privateConstructorUsedError; + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) + String? get system => throw _privateConstructorUsedError; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) + int? get topK => throw _privateConstructorUsedError; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + bool get stream => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateMessageRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateMessageRequestCopyWith<$Res> { + factory $CreateMessageRequestCopyWith(CreateMessageRequest value, + $Res Function(CreateMessageRequest) then) = + _$CreateMessageRequestCopyWithImpl<$Res, CreateMessageRequest>; + @useResult + $Res call( + {@_ModelConverter() Model model, + List messages, + @JsonKey(name: 'max_tokens') int maxTokens, + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + @JsonKey(includeIfNull: false) String? system, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + bool stream}); + + $ModelCopyWith<$Res> get model; + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; +} + +/// @nodoc +class _$CreateMessageRequestCopyWithImpl<$Res, + $Val extends CreateMessageRequest> + implements $CreateMessageRequestCopyWith<$Res> { + _$CreateMessageRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? maxTokens = null, + Object? metadata = freezed, + Object? stopSequences = freezed, + Object? system = freezed, + Object? temperature = freezed, + Object? topK = freezed, + Object? topP = freezed, + Object? stream = null, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as Model, + messages: null == messages + ? _value.messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + maxTokens: null == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as CreateMessageRequestMetadata?, + stopSequences: freezed == stopSequences + ? _value.stopSequences + : stopSequences // ignore: cast_nullable_to_non_nullable + as List?, + system: freezed == system + ? _value.system + : system // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topK: freezed == topK + ? _value.topK + : topK // ignore: cast_nullable_to_non_nullable + as int?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModelCopyWith<$Res> get model { + return $ModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata { + if (_value.metadata == null) { + return null; + } + + return $CreateMessageRequestMetadataCopyWith<$Res>(_value.metadata!, + (value) { + return _then(_value.copyWith(metadata: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateMessageRequestImplCopyWith<$Res> + implements $CreateMessageRequestCopyWith<$Res> { + factory _$$CreateMessageRequestImplCopyWith(_$CreateMessageRequestImpl value, + $Res Function(_$CreateMessageRequestImpl) then) = + __$$CreateMessageRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_ModelConverter() Model model, + List messages, + @JsonKey(name: 'max_tokens') int maxTokens, + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + @JsonKey(includeIfNull: false) String? system, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + bool stream}); + + @override + $ModelCopyWith<$Res> get model; + @override + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; +} + +/// @nodoc +class __$$CreateMessageRequestImplCopyWithImpl<$Res> + extends _$CreateMessageRequestCopyWithImpl<$Res, _$CreateMessageRequestImpl> + implements _$$CreateMessageRequestImplCopyWith<$Res> { + __$$CreateMessageRequestImplCopyWithImpl(_$CreateMessageRequestImpl _value, + $Res Function(_$CreateMessageRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? maxTokens = null, + Object? metadata = freezed, + Object? stopSequences = freezed, + Object? system = freezed, + Object? temperature = freezed, + Object? topK = freezed, + Object? topP = freezed, + Object? stream = null, + }) { + return _then(_$CreateMessageRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as Model, + messages: null == messages + ? _value._messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + maxTokens: null == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as CreateMessageRequestMetadata?, + stopSequences: freezed == stopSequences + ? _value._stopSequences + : stopSequences // ignore: cast_nullable_to_non_nullable + as List?, + system: freezed == system + ? _value.system + : system // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topK: freezed == topK + ? _value.topK + : topK // ignore: cast_nullable_to_non_nullable + as int?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateMessageRequestImpl extends _CreateMessageRequest { + const _$CreateMessageRequestImpl( + {@_ModelConverter() required this.model, + required final List messages, + @JsonKey(name: 'max_tokens') required this.maxTokens, + @JsonKey(includeIfNull: false) this.metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + final List? stopSequences, + @JsonKey(includeIfNull: false) this.system, + @JsonKey(includeIfNull: false) this.temperature, + @JsonKey(name: 'top_k', includeIfNull: false) this.topK, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP, + this.stream = false}) + : _messages = messages, + _stopSequences = stopSequences, + super._(); + + factory _$CreateMessageRequestImpl.fromJson(Map json) => + _$$CreateMessageRequestImplFromJson(json); + + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @override + @_ModelConverter() + final Model model; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + final List _messages; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + @override + List get messages { + if (_messages is EqualUnmodifiableListView) return _messages; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_messages); + } + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @override + @JsonKey(name: 'max_tokens') + final int maxTokens; + + /// An object describing metadata about the request. + @override + @JsonKey(includeIfNull: false) + final CreateMessageRequestMetadata? metadata; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + final List? _stopSequences; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @override + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences { + final value = _stopSequences; + if (value == null) return null; + if (_stopSequences is EqualUnmodifiableListView) return _stopSequences; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @override + @JsonKey(includeIfNull: false) + final String? system; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @override + @JsonKey(name: 'top_k', includeIfNull: false) + final int? topK; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + @override + @JsonKey() + final bool stream; + + @override + String toString() { + return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, topK: $topK, topP: $topP, stream: $stream)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateMessageRequestImpl && + (identical(other.model, model) || other.model == model) && + const DeepCollectionEquality().equals(other._messages, _messages) && + (identical(other.maxTokens, maxTokens) || + other.maxTokens == maxTokens) && + (identical(other.metadata, metadata) || + other.metadata == metadata) && + const DeepCollectionEquality() + .equals(other._stopSequences, _stopSequences) && + (identical(other.system, system) || other.system == system) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.topK, topK) || other.topK == topK) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.stream, stream) || other.stream == stream)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + model, + const DeepCollectionEquality().hash(_messages), + maxTokens, + metadata, + const DeepCollectionEquality().hash(_stopSequences), + system, + temperature, + topK, + topP, + stream); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> + get copyWith => + __$$CreateMessageRequestImplCopyWithImpl<_$CreateMessageRequestImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateMessageRequestImplToJson( + this, + ); + } +} + +abstract class _CreateMessageRequest extends CreateMessageRequest { + const factory _CreateMessageRequest( + {@_ModelConverter() required final Model model, + required final List messages, + @JsonKey(name: 'max_tokens') required final int maxTokens, + @JsonKey(includeIfNull: false) + final CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + final List? stopSequences, + @JsonKey(includeIfNull: false) final String? system, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + final bool stream}) = _$CreateMessageRequestImpl; + const _CreateMessageRequest._() : super._(); + + factory _CreateMessageRequest.fromJson(Map json) = + _$CreateMessageRequestImpl.fromJson; + + @override + + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() + Model get model; + @override + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + List get messages; + @override + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') + int get maxTokens; + @override + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) + CreateMessageRequestMetadata? get metadata; + @override + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences; + @override + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) + String? get system; + @override + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) + int? get topK; + @override + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + bool get stream; + @override + @JsonKey(ignore: true) + _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Model _$ModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'model': + return ModelEnumeration.fromJson(json); + case 'modelId': + return ModelString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'Model', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$Model { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ModelEnumeration value) model, + required TResult Function(ModelString value) modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelEnumeration value)? model, + TResult? Function(ModelString value)? modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelEnumeration value)? model, + TResult Function(ModelString value)? modelId, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelCopyWith<$Res> { + factory $ModelCopyWith(Model value, $Res Function(Model) then) = + _$ModelCopyWithImpl<$Res, Model>; +} + +/// @nodoc +class _$ModelCopyWithImpl<$Res, $Val extends Model> + implements $ModelCopyWith<$Res> { + _$ModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ModelEnumerationImplCopyWith<$Res> { + factory _$$ModelEnumerationImplCopyWith(_$ModelEnumerationImpl value, + $Res Function(_$ModelEnumerationImpl) then) = + __$$ModelEnumerationImplCopyWithImpl<$Res>; + @useResult + $Res call({Models value}); +} + +/// @nodoc +class __$$ModelEnumerationImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelEnumerationImpl> + implements _$$ModelEnumerationImplCopyWith<$Res> { + __$$ModelEnumerationImplCopyWithImpl(_$ModelEnumerationImpl _value, + $Res Function(_$ModelEnumerationImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ModelEnumerationImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as Models, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelEnumerationImpl extends ModelEnumeration { + const _$ModelEnumerationImpl(this.value, {final String? $type}) + : $type = $type ?? 'model', + super._(); + + factory _$ModelEnumerationImpl.fromJson(Map json) => + _$$ModelEnumerationImplFromJson(json); + + @override + final Models value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'Model.model(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelEnumerationImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => + __$$ModelEnumerationImplCopyWithImpl<_$ModelEnumerationImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) { + return model(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) { + return model?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) { + if (model != null) { + return model(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ModelEnumeration value) model, + required TResult Function(ModelString value) modelId, + }) { + return model(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelEnumeration value)? model, + TResult? Function(ModelString value)? modelId, + }) { + return model?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelEnumeration value)? model, + TResult Function(ModelString value)? modelId, + required TResult orElse(), + }) { + if (model != null) { + return model(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModelEnumerationImplToJson( + this, + ); + } +} + +abstract class ModelEnumeration extends Model { + const factory ModelEnumeration(final Models value) = _$ModelEnumerationImpl; + const ModelEnumeration._() : super._(); + + factory ModelEnumeration.fromJson(Map json) = + _$ModelEnumerationImpl.fromJson; + + @override + Models get value; + @JsonKey(ignore: true) + _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ModelStringImplCopyWith<$Res> { + factory _$$ModelStringImplCopyWith( + _$ModelStringImpl value, $Res Function(_$ModelStringImpl) then) = + __$$ModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ModelStringImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelStringImpl> + implements _$$ModelStringImplCopyWith<$Res> { + __$$ModelStringImplCopyWithImpl( + _$ModelStringImpl _value, $Res Function(_$ModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelStringImpl extends ModelString { + const _$ModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'modelId', + super._(); + + factory _$ModelStringImpl.fromJson(Map json) => + _$$ModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'Model.modelId(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => + __$$ModelStringImplCopyWithImpl<_$ModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) { + return modelId(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) { + return modelId?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) { + if (modelId != null) { + return modelId(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ModelEnumeration value) model, + required TResult Function(ModelString value) modelId, + }) { + return modelId(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelEnumeration value)? model, + TResult? Function(ModelString value)? modelId, + }) { + return modelId?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelEnumeration value)? model, + TResult Function(ModelString value)? modelId, + required TResult orElse(), + }) { + if (modelId != null) { + return modelId(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModelStringImplToJson( + this, + ); + } +} + +abstract class ModelString extends Model { + const factory ModelString(final String value) = _$ModelStringImpl; + const ModelString._() : super._(); + + factory ModelString.fromJson(Map json) = + _$ModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateMessageRequestMetadata _$CreateMessageRequestMetadataFromJson( + Map json) { + return _CreateMessageRequestMetadata.fromJson(json); +} + +/// @nodoc +mixin _$CreateMessageRequestMetadata { + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) + String? get userId => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateMessageRequestMetadataCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateMessageRequestMetadataCopyWith<$Res> { + factory $CreateMessageRequestMetadataCopyWith( + CreateMessageRequestMetadata value, + $Res Function(CreateMessageRequestMetadata) then) = + _$CreateMessageRequestMetadataCopyWithImpl<$Res, + CreateMessageRequestMetadata>; + @useResult + $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); +} + +/// @nodoc +class _$CreateMessageRequestMetadataCopyWithImpl<$Res, + $Val extends CreateMessageRequestMetadata> + implements $CreateMessageRequestMetadataCopyWith<$Res> { + _$CreateMessageRequestMetadataCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? userId = freezed, + }) { + return _then(_value.copyWith( + userId: freezed == userId + ? _value.userId + : userId // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateMessageRequestMetadataImplCopyWith<$Res> + implements $CreateMessageRequestMetadataCopyWith<$Res> { + factory _$$CreateMessageRequestMetadataImplCopyWith( + _$CreateMessageRequestMetadataImpl value, + $Res Function(_$CreateMessageRequestMetadataImpl) then) = + __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); +} + +/// @nodoc +class __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res> + extends _$CreateMessageRequestMetadataCopyWithImpl<$Res, + _$CreateMessageRequestMetadataImpl> + implements _$$CreateMessageRequestMetadataImplCopyWith<$Res> { + __$$CreateMessageRequestMetadataImplCopyWithImpl( + _$CreateMessageRequestMetadataImpl _value, + $Res Function(_$CreateMessageRequestMetadataImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? userId = freezed, + }) { + return _then(_$CreateMessageRequestMetadataImpl( + userId: freezed == userId + ? _value.userId + : userId // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateMessageRequestMetadataImpl extends _CreateMessageRequestMetadata { + const _$CreateMessageRequestMetadataImpl( + {@JsonKey(name: 'user_id', includeIfNull: false) this.userId}) + : super._(); + + factory _$CreateMessageRequestMetadataImpl.fromJson( + Map json) => + _$$CreateMessageRequestMetadataImplFromJson(json); + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @override + @JsonKey(name: 'user_id', includeIfNull: false) + final String? userId; + + @override + String toString() { + return 'CreateMessageRequestMetadata(userId: $userId)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateMessageRequestMetadataImpl && + (identical(other.userId, userId) || other.userId == userId)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, userId); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateMessageRequestMetadataImplCopyWith< + _$CreateMessageRequestMetadataImpl> + get copyWith => __$$CreateMessageRequestMetadataImplCopyWithImpl< + _$CreateMessageRequestMetadataImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateMessageRequestMetadataImplToJson( + this, + ); + } +} + +abstract class _CreateMessageRequestMetadata + extends CreateMessageRequestMetadata { + const factory _CreateMessageRequestMetadata( + {@JsonKey(name: 'user_id', includeIfNull: false) + final String? userId}) = _$CreateMessageRequestMetadataImpl; + const _CreateMessageRequestMetadata._() : super._(); + + factory _CreateMessageRequestMetadata.fromJson(Map json) = + _$CreateMessageRequestMetadataImpl.fromJson; + + @override + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) + String? get userId; + @override + @JsonKey(ignore: true) + _$$CreateMessageRequestMetadataImplCopyWith< + _$CreateMessageRequestMetadataImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Message _$MessageFromJson(Map json) { + return _Message.fromJson(json); +} + +/// @nodoc +mixin _$Message { + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) + String? get id => throw _privateConstructorUsedError; + + /// The content of the message. + @_MessageContentConverter() + MessageContent get content => throw _privateConstructorUsedError; + + /// The role of the messages author. + MessageRole get role => throw _privateConstructorUsedError; + + /// The model that handled the request. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason => throw _privateConstructorUsedError; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence => throw _privateConstructorUsedError; + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) + String? get type => throw _privateConstructorUsedError; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) + Usage? get usage => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageCopyWith<$Res> { + factory $MessageCopyWith(Message value, $Res Function(Message) then) = + _$MessageCopyWithImpl<$Res, Message>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? id, + @_MessageContentConverter() MessageContent content, + MessageRole role, + @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(includeIfNull: false) Usage? usage}); + + $MessageContentCopyWith<$Res> get content; + $UsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class _$MessageCopyWithImpl<$Res, $Val extends Message> + implements $MessageCopyWith<$Res> { + _$MessageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = freezed, + Object? content = null, + Object? role = null, + Object? model = freezed, + Object? stopReason = freezed, + Object? stopSequence = freezed, + Object? type = freezed, + Object? usage = freezed, + }) { + return _then(_value.copyWith( + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as MessageContent, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as MessageRole, + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as Usage?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $MessageContentCopyWith<$Res> get content { + return $MessageContentCopyWith<$Res>(_value.content, (value) { + return _then(_value.copyWith(content: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $UsageCopyWith<$Res>? get usage { + if (_value.usage == null) { + return null; + } + + return $UsageCopyWith<$Res>(_value.usage!, (value) { + return _then(_value.copyWith(usage: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { + factory _$$MessageImplCopyWith( + _$MessageImpl value, $Res Function(_$MessageImpl) then) = + __$$MessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? id, + @_MessageContentConverter() MessageContent content, + MessageRole role, + @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(includeIfNull: false) Usage? usage}); + + @override + $MessageContentCopyWith<$Res> get content; + @override + $UsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class __$$MessageImplCopyWithImpl<$Res> + extends _$MessageCopyWithImpl<$Res, _$MessageImpl> + implements _$$MessageImplCopyWith<$Res> { + __$$MessageImplCopyWithImpl( + _$MessageImpl _value, $Res Function(_$MessageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = freezed, + Object? content = null, + Object? role = null, + Object? model = freezed, + Object? stopReason = freezed, + Object? stopSequence = freezed, + Object? type = freezed, + Object? usage = freezed, + }) { + return _then(_$MessageImpl( + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as MessageContent, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as MessageRole, + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as Usage?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageImpl extends _Message { + const _$MessageImpl( + {@JsonKey(includeIfNull: false) this.id, + @_MessageContentConverter() required this.content, + required this.role, + @JsonKey(includeIfNull: false) this.model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence, + @JsonKey(includeIfNull: false) this.type, + @JsonKey(includeIfNull: false) this.usage}) + : super._(); + + factory _$MessageImpl.fromJson(Map json) => + _$$MessageImplFromJson(json); + + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @override + @JsonKey(includeIfNull: false) + final String? id; + + /// The content of the message. + @override + @_MessageContentConverter() + final MessageContent content; + + /// The role of the messages author. + @override + final MessageRole role; + + /// The model that handled the request. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @override + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @override + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence; + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @override + @JsonKey(includeIfNull: false) + final String? type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + @JsonKey(includeIfNull: false) + final Usage? usage; + + @override + String toString() { + return 'Message(id: $id, content: $content, role: $role, model: $model, stopReason: $stopReason, stopSequence: $stopSequence, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.content, content) || other.content == content) && + (identical(other.role, role) || other.role == role) && + (identical(other.model, model) || other.model == model) && + (identical(other.stopReason, stopReason) || + other.stopReason == stopReason) && + (identical(other.stopSequence, stopSequence) || + other.stopSequence == stopSequence) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, content, role, model, + stopReason, stopSequence, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageImplCopyWith<_$MessageImpl> get copyWith => + __$$MessageImplCopyWithImpl<_$MessageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageImplToJson( + this, + ); + } +} + +abstract class _Message extends Message { + const factory _Message( + {@JsonKey(includeIfNull: false) final String? id, + @_MessageContentConverter() required final MessageContent content, + required final MessageRole role, + @JsonKey(includeIfNull: false) final String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence, + @JsonKey(includeIfNull: false) final String? type, + @JsonKey(includeIfNull: false) final Usage? usage}) = _$MessageImpl; + const _Message._() : super._(); + + factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; + + @override + + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) + String? get id; + @override + + /// The content of the message. + @_MessageContentConverter() + MessageContent get content; + @override + + /// The role of the messages author. + MessageRole get role; + @override + + /// The model that handled the request. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason; + @override + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence; + @override + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) + String? get type; + @override + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) + Usage? get usage; + @override + @JsonKey(ignore: true) + _$$MessageImplCopyWith<_$MessageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageContent _$MessageContentFromJson(Map json) { + switch (json['runtimeType']) { + case 'blocks': + return MessageContentListBlock.fromJson(json); + case 'text': + return MessageContentString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'MessageContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$MessageContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentListBlock value) blocks, + required TResult Function(MessageContentString value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentListBlock value)? blocks, + TResult? Function(MessageContentString value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentListBlock value)? blocks, + TResult Function(MessageContentString value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageContentCopyWith<$Res> { + factory $MessageContentCopyWith( + MessageContent value, $Res Function(MessageContent) then) = + _$MessageContentCopyWithImpl<$Res, MessageContent>; +} + +/// @nodoc +class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> + implements $MessageContentCopyWith<$Res> { + _$MessageContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$MessageContentListBlockImplCopyWith<$Res> { + factory _$$MessageContentListBlockImplCopyWith( + _$MessageContentListBlockImpl value, + $Res Function(_$MessageContentListBlockImpl) then) = + __$$MessageContentListBlockImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$MessageContentListBlockImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentListBlockImpl> + implements _$$MessageContentListBlockImplCopyWith<$Res> { + __$$MessageContentListBlockImplCopyWithImpl( + _$MessageContentListBlockImpl _value, + $Res Function(_$MessageContentListBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$MessageContentListBlockImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentListBlockImpl extends MessageContentListBlock { + const _$MessageContentListBlockImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'blocks', + super._(); + + factory _$MessageContentListBlockImpl.fromJson(Map json) => + _$$MessageContentListBlockImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'MessageContent.blocks(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentListBlockImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> + get copyWith => __$$MessageContentListBlockImplCopyWithImpl< + _$MessageContentListBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return blocks(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return blocks?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentListBlock value) blocks, + required TResult Function(MessageContentString value) text, + }) { + return blocks(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentListBlock value)? blocks, + TResult? Function(MessageContentString value)? text, + }) { + return blocks?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentListBlock value)? blocks, + TResult Function(MessageContentString value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentListBlockImplToJson( + this, + ); + } +} + +abstract class MessageContentListBlock extends MessageContent { + const factory MessageContentListBlock(final List value) = + _$MessageContentListBlockImpl; + const MessageContentListBlock._() : super._(); + + factory MessageContentListBlock.fromJson(Map json) = + _$MessageContentListBlockImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageContentStringImplCopyWith<$Res> { + factory _$$MessageContentStringImplCopyWith(_$MessageContentStringImpl value, + $Res Function(_$MessageContentStringImpl) then) = + __$$MessageContentStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$MessageContentStringImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentStringImpl> + implements _$$MessageContentStringImplCopyWith<$Res> { + __$$MessageContentStringImplCopyWithImpl(_$MessageContentStringImpl _value, + $Res Function(_$MessageContentStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$MessageContentStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentStringImpl extends MessageContentString { + const _$MessageContentStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'text', + super._(); + + factory _$MessageContentStringImpl.fromJson(Map json) => + _$$MessageContentStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'MessageContent.text(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> + get copyWith => + __$$MessageContentStringImplCopyWithImpl<_$MessageContentStringImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return text(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return text?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentListBlock value) blocks, + required TResult Function(MessageContentString value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentListBlock value)? blocks, + TResult? Function(MessageContentString value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentListBlock value)? blocks, + TResult Function(MessageContentString value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentStringImplToJson( + this, + ); + } +} + +abstract class MessageContentString extends MessageContent { + const factory MessageContentString(final String value) = + _$MessageContentStringImpl; + const MessageContentString._() : super._(); + + factory MessageContentString.fromJson(Map json) = + _$MessageContentStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ImageBlockSource _$ImageBlockSourceFromJson(Map json) { + return _ImageBlockSource.fromJson(json); +} + +/// @nodoc +mixin _$ImageBlockSource { + /// The base64-encoded image data. + String get data => throw _privateConstructorUsedError; + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + + /// The type of image source. + ImageBlockSourceType get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ImageBlockSourceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ImageBlockSourceCopyWith<$Res> { + factory $ImageBlockSourceCopyWith( + ImageBlockSource value, $Res Function(ImageBlockSource) then) = + _$ImageBlockSourceCopyWithImpl<$Res, ImageBlockSource>; + @useResult + $Res call( + {String data, + @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, + ImageBlockSourceType type}); +} + +/// @nodoc +class _$ImageBlockSourceCopyWithImpl<$Res, $Val extends ImageBlockSource> + implements $ImageBlockSourceCopyWith<$Res> { + _$ImageBlockSourceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? mediaType = null, + Object? type = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as String, + mediaType: null == mediaType + ? _value.mediaType + : mediaType // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceMediaType, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ImageBlockSourceImplCopyWith<$Res> + implements $ImageBlockSourceCopyWith<$Res> { + factory _$$ImageBlockSourceImplCopyWith(_$ImageBlockSourceImpl value, + $Res Function(_$ImageBlockSourceImpl) then) = + __$$ImageBlockSourceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String data, + @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, + ImageBlockSourceType type}); +} + +/// @nodoc +class __$$ImageBlockSourceImplCopyWithImpl<$Res> + extends _$ImageBlockSourceCopyWithImpl<$Res, _$ImageBlockSourceImpl> + implements _$$ImageBlockSourceImplCopyWith<$Res> { + __$$ImageBlockSourceImplCopyWithImpl(_$ImageBlockSourceImpl _value, + $Res Function(_$ImageBlockSourceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? mediaType = null, + Object? type = null, + }) { + return _then(_$ImageBlockSourceImpl( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as String, + mediaType: null == mediaType + ? _value.mediaType + : mediaType // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceMediaType, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageBlockSourceImpl extends _ImageBlockSource { + const _$ImageBlockSourceImpl( + {required this.data, + @JsonKey(name: 'media_type') required this.mediaType, + required this.type}) + : super._(); + + factory _$ImageBlockSourceImpl.fromJson(Map json) => + _$$ImageBlockSourceImplFromJson(json); + + /// The base64-encoded image data. + @override + final String data; + + /// The media type of the image. + @override + @JsonKey(name: 'media_type') + final ImageBlockSourceMediaType mediaType; + + /// The type of image source. + @override + final ImageBlockSourceType type; + + @override + String toString() { + return 'ImageBlockSource(data: $data, mediaType: $mediaType, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageBlockSourceImpl && + (identical(other.data, data) || other.data == data) && + (identical(other.mediaType, mediaType) || + other.mediaType == mediaType) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, data, mediaType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => + __$$ImageBlockSourceImplCopyWithImpl<_$ImageBlockSourceImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ImageBlockSourceImplToJson( + this, + ); + } +} + +abstract class _ImageBlockSource extends ImageBlockSource { + const factory _ImageBlockSource( + {required final String data, + @JsonKey(name: 'media_type') + required final ImageBlockSourceMediaType mediaType, + required final ImageBlockSourceType type}) = _$ImageBlockSourceImpl; + const _ImageBlockSource._() : super._(); + + factory _ImageBlockSource.fromJson(Map json) = + _$ImageBlockSourceImpl.fromJson; + + @override + + /// The base64-encoded image data. + String get data; + @override + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType; + @override + + /// The type of image source. + ImageBlockSourceType get type; + @override + @JsonKey(ignore: true) + _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Usage _$UsageFromJson(Map json) { + return _Usage.fromJson(json); +} + +/// @nodoc +mixin _$Usage { + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') + int get inputTokens => throw _privateConstructorUsedError; + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $UsageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $UsageCopyWith<$Res> { + factory $UsageCopyWith(Usage value, $Res Function(Usage) then) = + _$UsageCopyWithImpl<$Res, Usage>; + @useResult + $Res call( + {@JsonKey(name: 'input_tokens') int inputTokens, + @JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class _$UsageCopyWithImpl<$Res, $Val extends Usage> + implements $UsageCopyWith<$Res> { + _$UsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputTokens = null, + Object? outputTokens = null, + }) { + return _then(_value.copyWith( + inputTokens: null == inputTokens + ? _value.inputTokens + : inputTokens // ignore: cast_nullable_to_non_nullable + as int, + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$UsageImplCopyWith<$Res> implements $UsageCopyWith<$Res> { + factory _$$UsageImplCopyWith( + _$UsageImpl value, $Res Function(_$UsageImpl) then) = + __$$UsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'input_tokens') int inputTokens, + @JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class __$$UsageImplCopyWithImpl<$Res> + extends _$UsageCopyWithImpl<$Res, _$UsageImpl> + implements _$$UsageImplCopyWith<$Res> { + __$$UsageImplCopyWithImpl( + _$UsageImpl _value, $Res Function(_$UsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputTokens = null, + Object? outputTokens = null, + }) { + return _then(_$UsageImpl( + inputTokens: null == inputTokens + ? _value.inputTokens + : inputTokens // ignore: cast_nullable_to_non_nullable + as int, + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UsageImpl extends _Usage { + const _$UsageImpl( + {@JsonKey(name: 'input_tokens') required this.inputTokens, + @JsonKey(name: 'output_tokens') required this.outputTokens}) + : super._(); + + factory _$UsageImpl.fromJson(Map json) => + _$$UsageImplFromJson(json); + + /// The number of input tokens which were used. + @override + @JsonKey(name: 'input_tokens') + final int inputTokens; + + /// The number of output tokens which were used. + @override + @JsonKey(name: 'output_tokens') + final int outputTokens; + + @override + String toString() { + return 'Usage(inputTokens: $inputTokens, outputTokens: $outputTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UsageImpl && + (identical(other.inputTokens, inputTokens) || + other.inputTokens == inputTokens) && + (identical(other.outputTokens, outputTokens) || + other.outputTokens == outputTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, inputTokens, outputTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UsageImplCopyWith<_$UsageImpl> get copyWith => + __$$UsageImplCopyWithImpl<_$UsageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$UsageImplToJson( + this, + ); + } +} + +abstract class _Usage extends Usage { + const factory _Usage( + {@JsonKey(name: 'input_tokens') required final int inputTokens, + @JsonKey(name: 'output_tokens') required final int outputTokens}) = + _$UsageImpl; + const _Usage._() : super._(); + + factory _Usage.fromJson(Map json) = _$UsageImpl.fromJson; + + @override + + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') + int get inputTokens; + @override + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens; + @override + @JsonKey(ignore: true) + _$$UsageImplCopyWith<_$UsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageDelta _$MessageDeltaFromJson(Map json) { + return _MessageDelta.fromJson(json); +} + +/// @nodoc +mixin _$MessageDelta { + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason => throw _privateConstructorUsedError; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaCopyWith<$Res> { + factory $MessageDeltaCopyWith( + MessageDelta value, $Res Function(MessageDelta) then) = + _$MessageDeltaCopyWithImpl<$Res, MessageDelta>; + @useResult + $Res call( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence}); +} + +/// @nodoc +class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> + implements $MessageDeltaCopyWith<$Res> { + _$MessageDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? stopReason = freezed, + Object? stopSequence = freezed, + }) { + return _then(_value.copyWith( + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaImplCopyWith<$Res> + implements $MessageDeltaCopyWith<$Res> { + factory _$$MessageDeltaImplCopyWith( + _$MessageDeltaImpl value, $Res Function(_$MessageDeltaImpl) then) = + __$$MessageDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence}); +} + +/// @nodoc +class __$$MessageDeltaImplCopyWithImpl<$Res> + extends _$MessageDeltaCopyWithImpl<$Res, _$MessageDeltaImpl> + implements _$$MessageDeltaImplCopyWith<$Res> { + __$$MessageDeltaImplCopyWithImpl( + _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? stopReason = freezed, + Object? stopSequence = freezed, + }) { + return _then(_$MessageDeltaImpl( + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaImpl extends _MessageDelta { + const _$MessageDeltaImpl( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence}) + : super._(); + + factory _$MessageDeltaImpl.fromJson(Map json) => + _$$MessageDeltaImplFromJson(json); + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @override + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @override + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence; + + @override + String toString() { + return 'MessageDelta(stopReason: $stopReason, stopSequence: $stopSequence)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaImpl && + (identical(other.stopReason, stopReason) || + other.stopReason == stopReason) && + (identical(other.stopSequence, stopSequence) || + other.stopSequence == stopSequence)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, stopReason, stopSequence); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => + __$$MessageDeltaImplCopyWithImpl<_$MessageDeltaImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaImplToJson( + this, + ); + } +} + +abstract class _MessageDelta extends MessageDelta { + const factory _MessageDelta( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence}) = _$MessageDeltaImpl; + const _MessageDelta._() : super._(); + + factory _MessageDelta.fromJson(Map json) = + _$MessageDeltaImpl.fromJson; + + @override + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason; + @override + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence; + @override + @JsonKey(ignore: true) + _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageDeltaUsage _$MessageDeltaUsageFromJson(Map json) { + return _MessageDeltaUsage.fromJson(json); +} + +/// @nodoc +mixin _$MessageDeltaUsage { + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaUsageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaUsageCopyWith<$Res> { + factory $MessageDeltaUsageCopyWith( + MessageDeltaUsage value, $Res Function(MessageDeltaUsage) then) = + _$MessageDeltaUsageCopyWithImpl<$Res, MessageDeltaUsage>; + @useResult + $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class _$MessageDeltaUsageCopyWithImpl<$Res, $Val extends MessageDeltaUsage> + implements $MessageDeltaUsageCopyWith<$Res> { + _$MessageDeltaUsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? outputTokens = null, + }) { + return _then(_value.copyWith( + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaUsageImplCopyWith<$Res> + implements $MessageDeltaUsageCopyWith<$Res> { + factory _$$MessageDeltaUsageImplCopyWith(_$MessageDeltaUsageImpl value, + $Res Function(_$MessageDeltaUsageImpl) then) = + __$$MessageDeltaUsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class __$$MessageDeltaUsageImplCopyWithImpl<$Res> + extends _$MessageDeltaUsageCopyWithImpl<$Res, _$MessageDeltaUsageImpl> + implements _$$MessageDeltaUsageImplCopyWith<$Res> { + __$$MessageDeltaUsageImplCopyWithImpl(_$MessageDeltaUsageImpl _value, + $Res Function(_$MessageDeltaUsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? outputTokens = null, + }) { + return _then(_$MessageDeltaUsageImpl( + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaUsageImpl extends _MessageDeltaUsage { + const _$MessageDeltaUsageImpl( + {@JsonKey(name: 'output_tokens') required this.outputTokens}) + : super._(); + + factory _$MessageDeltaUsageImpl.fromJson(Map json) => + _$$MessageDeltaUsageImplFromJson(json); + + /// The cumulative number of output tokens which were used. + @override + @JsonKey(name: 'output_tokens') + final int outputTokens; + + @override + String toString() { + return 'MessageDeltaUsage(outputTokens: $outputTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaUsageImpl && + (identical(other.outputTokens, outputTokens) || + other.outputTokens == outputTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, outputTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => + __$$MessageDeltaUsageImplCopyWithImpl<_$MessageDeltaUsageImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaUsageImplToJson( + this, + ); + } +} + +abstract class _MessageDeltaUsage extends MessageDeltaUsage { + const factory _MessageDeltaUsage( + {@JsonKey(name: 'output_tokens') required final int outputTokens}) = + _$MessageDeltaUsageImpl; + const _MessageDeltaUsage._() : super._(); + + factory _MessageDeltaUsage.fromJson(Map json) = + _$MessageDeltaUsageImpl.fromJson; + + @override + + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens; + @override + @JsonKey(ignore: true) + _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +TextBlockDelta _$TextBlockDeltaFromJson(Map json) { + return _TextBlockDelta.fromJson(json); +} + +/// @nodoc +mixin _$TextBlockDelta { + /// The text delta. + String get text => throw _privateConstructorUsedError; + + /// The type of content block. + String get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $TextBlockDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $TextBlockDeltaCopyWith<$Res> { + factory $TextBlockDeltaCopyWith( + TextBlockDelta value, $Res Function(TextBlockDelta) then) = + _$TextBlockDeltaCopyWithImpl<$Res, TextBlockDelta>; + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> + implements $TextBlockDeltaCopyWith<$Res> { + _$TextBlockDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_value.copyWith( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockDeltaImplCopyWith<$Res> + implements $TextBlockDeltaCopyWith<$Res> { + factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, + $Res Function(_$TextBlockDeltaImpl) then) = + __$$TextBlockDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class __$$TextBlockDeltaImplCopyWithImpl<$Res> + extends _$TextBlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> + implements _$$TextBlockDeltaImplCopyWith<$Res> { + __$$TextBlockDeltaImplCopyWithImpl( + _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_$TextBlockDeltaImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$TextBlockDeltaImpl extends _TextBlockDelta { + const _$TextBlockDeltaImpl({required this.text, required this.type}) + : super._(); + + factory _$TextBlockDeltaImpl.fromJson(Map json) => + _$$TextBlockDeltaImplFromJson(json); + + /// The text delta. + @override + final String text; + + /// The type of content block. + @override + final String type; + + @override + String toString() { + return 'TextBlockDelta(text: $text, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$TextBlockDeltaImpl && + (identical(other.text, text) || other.text == text) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, text, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$TextBlockDeltaImplToJson( + this, + ); + } +} + +abstract class _TextBlockDelta extends TextBlockDelta { + const factory _TextBlockDelta( + {required final String text, + required final String type}) = _$TextBlockDeltaImpl; + const _TextBlockDelta._() : super._(); + + factory _TextBlockDelta.fromJson(Map json) = + _$TextBlockDeltaImpl.fromJson; + + @override + + /// The text delta. + String get text; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Block _$BlockFromJson(Map json) { + switch (json['type']) { + case 'text': + return TextBlock.fromJson(json); + case 'image': + return ImageBlock.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$Block { + /// The type of content block. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BlockCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BlockCopyWith<$Res> { + factory $BlockCopyWith(Block value, $Res Function(Block) then) = + _$BlockCopyWithImpl<$Res, Block>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$BlockCopyWithImpl<$Res, $Val extends Block> + implements $BlockCopyWith<$Res> { + _$BlockCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$TextBlockImplCopyWith( + _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = + __$$TextBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class __$$TextBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> + implements _$$TextBlockImplCopyWith<$Res> { + __$$TextBlockImplCopyWithImpl( + _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_$TextBlockImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$TextBlockImpl extends TextBlock { + const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); + + factory _$TextBlockImpl.fromJson(Map json) => + _$$TextBlockImplFromJson(json); + + /// The text content. + @override + final String text; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.text(text: $text, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$TextBlockImpl && + (identical(other.text, text) || other.text == text) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, text, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + }) { + return text(this.text, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + }) { + return text?.call(this.text, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + required TResult orElse(), + }) { + if (text != null) { + return text(this.text, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$TextBlockImplToJson( + this, + ); + } +} + +abstract class TextBlock extends Block { + const factory TextBlock({required final String text, final String type}) = + _$TextBlockImpl; + const TextBlock._() : super._(); + + factory TextBlock.fromJson(Map json) = + _$TextBlockImpl.fromJson; + + /// The text content. + String get text; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ImageBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$ImageBlockImplCopyWith( + _$ImageBlockImpl value, $Res Function(_$ImageBlockImpl) then) = + __$$ImageBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ImageBlockSource source, String type}); + + $ImageBlockSourceCopyWith<$Res> get source; +} + +/// @nodoc +class __$$ImageBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ImageBlockImpl> + implements _$$ImageBlockImplCopyWith<$Res> { + __$$ImageBlockImplCopyWithImpl( + _$ImageBlockImpl _value, $Res Function(_$ImageBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? source = null, + Object? type = null, + }) { + return _then(_$ImageBlockImpl( + source: null == source + ? _value.source + : source // ignore: cast_nullable_to_non_nullable + as ImageBlockSource, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } + + @override + @pragma('vm:prefer-inline') + $ImageBlockSourceCopyWith<$Res> get source { + return $ImageBlockSourceCopyWith<$Res>(_value.source, (value) { + return _then(_value.copyWith(source: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageBlockImpl extends ImageBlock { + const _$ImageBlockImpl({required this.source, this.type = 'image'}) + : super._(); + + factory _$ImageBlockImpl.fromJson(Map json) => + _$$ImageBlockImplFromJson(json); + + /// The source of an image block. + @override + final ImageBlockSource source; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.image(source: $source, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageBlockImpl && + (identical(other.source, source) || other.source == source) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, source, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => + __$$ImageBlockImplCopyWithImpl<_$ImageBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + }) { + return image(source, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + }) { + return image?.call(source, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + required TResult orElse(), + }) { + if (image != null) { + return image(source, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + }) { + return image(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + }) { + return image?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + required TResult orElse(), + }) { + if (image != null) { + return image(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ImageBlockImplToJson( + this, + ); + } +} + +abstract class ImageBlock extends Block { + const factory ImageBlock( + {required final ImageBlockSource source, + final String type}) = _$ImageBlockImpl; + const ImageBlock._() : super._(); + + factory ImageBlock.fromJson(Map json) = + _$ImageBlockImpl.fromJson; + + /// The source of an image block. + ImageBlockSource get source; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageStreamEvent _$MessageStreamEventFromJson(Map json) { + switch (json['type']) { + case 'message_start': + return MessageStartEvent.fromJson(json); + case 'message_delta': + return MessageDeltaEvent.fromJson(json); + case 'message_stop': + return MessageStopEvent.fromJson(json); + case 'content_block_start': + return ContentBlockStartEvent.fromJson(json); + case 'content_block_delta': + return ContentBlockDeltaEvent.fromJson(json); + case 'content_block_stop': + return ContentBlockStopEvent.fromJson(json); + case 'ping': + return PingEvent.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageStreamEvent { + /// The type of a streaming event. + MessageStreamEventType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageStreamEventCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageStreamEventCopyWith<$Res> { + factory $MessageStreamEventCopyWith( + MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = + _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> + implements $MessageStreamEventCopyWith<$Res> { + _$MessageStreamEventCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, + $Res Function(_$MessageStartEventImpl) then) = + __$$MessageStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({Message message, MessageStreamEventType type}); + + $MessageCopyWith<$Res> get message; +} + +/// @nodoc +class __$$MessageStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> + implements _$$MessageStartEventImplCopyWith<$Res> { + __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, + $Res Function(_$MessageStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? message = null, + Object? type = null, + }) { + return _then(_$MessageStartEventImpl( + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as Message, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { + return _then(_value.copyWith(message: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStartEventImpl extends MessageStartEvent { + const _$MessageStartEventImpl({required this.message, required this.type}) + : super._(); + + factory _$MessageStartEventImpl.fromJson(Map json) => + _$$MessageStartEventImplFromJson(json); + + /// A message in a chat conversation. + @override + final Message message; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStartEventImpl && + (identical(other.message, message) || other.message == message) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, message, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStart(message, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStart?.call(message, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(message, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStartEventImplToJson( + this, + ); + } +} + +abstract class MessageStartEvent extends MessageStreamEvent { + const factory MessageStartEvent( + {required final Message message, + required final MessageStreamEventType type}) = _$MessageStartEventImpl; + const MessageStartEvent._() : super._(); + + factory MessageStartEvent.fromJson(Map json) = + _$MessageStartEventImpl.fromJson; + + /// A message in a chat conversation. + Message get message; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, + $Res Function(_$MessageDeltaEventImpl) then) = + __$$MessageDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {MessageDelta delta, + MessageStreamEventType type, + MessageDeltaUsage usage}); + + $MessageDeltaCopyWith<$Res> get delta; + $MessageDeltaUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class __$$MessageDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> + implements _$$MessageDeltaEventImplCopyWith<$Res> { + __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, + $Res Function(_$MessageDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? type = null, + Object? usage = null, + }) { + return _then(_$MessageDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as MessageDelta, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as MessageDeltaUsage, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaCopyWith<$Res> get delta { + return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaUsageCopyWith<$Res> get usage { + return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { + return _then(_value.copyWith(usage: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaEventImpl extends MessageDeltaEvent { + const _$MessageDeltaEventImpl( + {required this.delta, required this.type, required this.usage}) + : super._(); + + factory _$MessageDeltaEventImpl.fromJson(Map json) => + _$$MessageDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + @override + final MessageDelta delta; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + final MessageDeltaUsage usage; + + @override + String toString() { + return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageDelta(delta, type, usage); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageDelta?.call(delta, type, usage); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageDelta != null) { + return messageDelta(delta, type, usage); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageDelta != null) { + return messageDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaEventImplToJson( + this, + ); + } +} + +abstract class MessageDeltaEvent extends MessageStreamEvent { + const factory MessageDeltaEvent( + {required final MessageDelta delta, + required final MessageStreamEventType type, + required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; + const MessageDeltaEvent._() : super._(); + + factory MessageDeltaEvent.fromJson(Map json) = + _$MessageDeltaEventImpl.fromJson; + + /// A delta in a streaming message. + MessageDelta get delta; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + MessageDeltaUsage get usage; + @override + @JsonKey(ignore: true) + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageStopEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, + $Res Function(_$MessageStopEventImpl) then) = + __$$MessageStopEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class __$$MessageStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> + implements _$$MessageStopEventImplCopyWith<$Res> { + __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, + $Res Function(_$MessageStopEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$MessageStopEventImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStopEventImpl extends MessageStopEvent { + const _$MessageStopEventImpl({required this.type}) : super._(); + + factory _$MessageStopEventImpl.fromJson(Map json) => + _$$MessageStopEventImplFromJson(json); + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStop(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStopEventImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStop(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStop?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStop != null) { + return messageStop(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStop(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStop?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStop != null) { + return messageStop(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStopEventImplToJson( + this, + ); + } +} + +abstract class MessageStopEvent extends MessageStreamEvent { + const factory MessageStopEvent({required final MessageStreamEventType type}) = + _$MessageStopEventImpl; + const MessageStopEvent._() : super._(); + + factory MessageStopEvent.fromJson(Map json) = + _$MessageStopEventImpl.fromJson; + + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockStartEventImplCopyWith( + _$ContentBlockStartEventImpl value, + $Res Function(_$ContentBlockStartEventImpl) then) = + __$$ContentBlockStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type}); +} + +/// @nodoc +class __$$ContentBlockStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> + implements _$$ContentBlockStartEventImplCopyWith<$Res> { + __$$ContentBlockStartEventImplCopyWithImpl( + _$ContentBlockStartEventImpl _value, + $Res Function(_$ContentBlockStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? contentBlock = freezed, + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockStartEventImpl( + contentBlock: freezed == contentBlock + ? _value.contentBlock + : contentBlock // ignore: cast_nullable_to_non_nullable + as TextBlock, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { + const _$ContentBlockStartEventImpl( + {@JsonKey(name: 'content_block') required this.contentBlock, + required this.index, + required this.type}) + : super._(); + + factory _$ContentBlockStartEventImpl.fromJson(Map json) => + _$$ContentBlockStartEventImplFromJson(json); + + /// A block of text content. + @override + @JsonKey(name: 'content_block') + final TextBlock contentBlock; + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockStartEventImpl && + const DeepCollectionEquality() + .equals(other.contentBlock, contentBlock) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, + const DeepCollectionEquality().hash(contentBlock), index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< + _$ContentBlockStartEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockStart(contentBlock, index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockStart?.call(contentBlock, index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockStart != null) { + return contentBlockStart(contentBlock, index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockStart != null) { + return contentBlockStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockStartEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockStartEvent extends MessageStreamEvent { + const factory ContentBlockStartEvent( + {@JsonKey(name: 'content_block') required final TextBlock contentBlock, + required final int index, + required final MessageStreamEventType + type}) = _$ContentBlockStartEventImpl; + const ContentBlockStartEvent._() : super._(); + + factory ContentBlockStartEvent.fromJson(Map json) = + _$ContentBlockStartEventImpl.fromJson; + + /// A block of text content. + @JsonKey(name: 'content_block') + TextBlock get contentBlock; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockDeltaEventImplCopyWith( + _$ContentBlockDeltaEventImpl value, + $Res Function(_$ContentBlockDeltaEventImpl) then) = + __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({TextBlockDelta delta, int index, MessageStreamEventType type}); + + $TextBlockDeltaCopyWith<$Res> get delta; +} + +/// @nodoc +class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> + implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { + __$$ContentBlockDeltaEventImplCopyWithImpl( + _$ContentBlockDeltaEventImpl _value, + $Res Function(_$ContentBlockDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as TextBlockDelta, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $TextBlockDeltaCopyWith<$Res> get delta { + return $TextBlockDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { + const _$ContentBlockDeltaEventImpl( + {required this.delta, required this.index, required this.type}) + : super._(); + + factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => + _$$ContentBlockDeltaEventImplFromJson(json); + + /// A delta in a streaming text block. + @override + final TextBlockDelta delta; + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< + _$ContentBlockDeltaEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockDelta(delta, index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockDelta?.call(delta, index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockDelta != null) { + return contentBlockDelta(delta, index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockDelta != null) { + return contentBlockDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockDeltaEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockDeltaEvent extends MessageStreamEvent { + const factory ContentBlockDeltaEvent( + {required final TextBlockDelta delta, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockDeltaEventImpl; + const ContentBlockDeltaEvent._() : super._(); + + factory ContentBlockDeltaEvent.fromJson(Map json) = + _$ContentBlockDeltaEventImpl.fromJson; + + /// A delta in a streaming text block. + TextBlockDelta get delta; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockStopEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockStopEventImplCopyWith( + _$ContentBlockStopEventImpl value, + $Res Function(_$ContentBlockStopEventImpl) then) = + __$$ContentBlockStopEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({int index, MessageStreamEventType type}); +} + +/// @nodoc +class __$$ContentBlockStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> + implements _$$ContentBlockStopEventImplCopyWith<$Res> { + __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, + $Res Function(_$ContentBlockStopEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockStopEventImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { + const _$ContentBlockStopEventImpl({required this.index, required this.type}) + : super._(); + + factory _$ContentBlockStopEventImpl.fromJson(Map json) => + _$$ContentBlockStopEventImplFromJson(json); + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockStopEventImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< + _$ContentBlockStopEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockStop(index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockStop?.call(index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockStop != null) { + return contentBlockStop(index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockStop(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockStop?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockStop != null) { + return contentBlockStop(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockStopEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockStopEvent extends MessageStreamEvent { + const factory ContentBlockStopEvent( + {required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStopEventImpl; + const ContentBlockStopEvent._() : super._(); + + factory ContentBlockStopEvent.fromJson(Map json) = + _$ContentBlockStopEventImpl.fromJson; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$PingEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$PingEventImplCopyWith( + _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = + __$$PingEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class __$$PingEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> + implements _$$PingEventImplCopyWith<$Res> { + __$$PingEventImplCopyWithImpl( + _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$PingEventImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$PingEventImpl extends PingEvent { + const _$PingEventImpl({required this.type}) : super._(); + + factory _$PingEventImpl.fromJson(Map json) => + _$$PingEventImplFromJson(json); + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.ping(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$PingEventImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') TextBlock contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return ping(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return ping?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function( + TextBlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (ping != null) { + return ping(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return ping(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return ping?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (ping != null) { + return ping(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$PingEventImplToJson( + this, + ); + } +} + +abstract class PingEvent extends MessageStreamEvent { + const factory PingEvent({required final MessageStreamEventType type}) = + _$PingEventImpl; + const PingEvent._() : super._(); + + factory PingEvent.fromJson(Map json) = + _$PingEventImpl.fromJson; + + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + throw _privateConstructorUsedError; +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..b08b072f --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,404 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$CreateMessageRequestImpl _$$CreateMessageRequestImplFromJson( + Map json) => + _$CreateMessageRequestImpl( + model: const _ModelConverter().fromJson(json['model']), + messages: (json['messages'] as List) + .map((e) => Message.fromJson(e as Map)) + .toList(), + maxTokens: (json['max_tokens'] as num).toInt(), + metadata: json['metadata'] == null + ? null + : CreateMessageRequestMetadata.fromJson( + json['metadata'] as Map), + stopSequences: (json['stop_sequences'] as List?) + ?.map((e) => e as String) + .toList(), + system: json['system'] as String?, + temperature: (json['temperature'] as num?)?.toDouble(), + topK: (json['top_k'] as num?)?.toInt(), + topP: (json['top_p'] as num?)?.toDouble(), + stream: json['stream'] as bool? ?? false, + ); + +Map _$$CreateMessageRequestImplToJson( + _$CreateMessageRequestImpl instance) { + final val = { + 'model': const _ModelConverter().toJson(instance.model), + 'messages': instance.messages.map((e) => e.toJson()).toList(), + 'max_tokens': instance.maxTokens, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('metadata', instance.metadata?.toJson()); + writeNotNull('stop_sequences', instance.stopSequences); + writeNotNull('system', instance.system); + writeNotNull('temperature', instance.temperature); + writeNotNull('top_k', instance.topK); + writeNotNull('top_p', instance.topP); + val['stream'] = instance.stream; + return val; +} + +_$ModelEnumerationImpl _$$ModelEnumerationImplFromJson( + Map json) => + _$ModelEnumerationImpl( + $enumDecode(_$ModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$ModelEnumerationImplToJson( + _$ModelEnumerationImpl instance) => + { + 'value': _$ModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$ModelsEnumMap = { + Models.claude3Opus20240229: 'claude-3-opus-20240229', + Models.claude3Sonnet20240229: 'claude-3-sonnet-20240229', + Models.claude3Haiku20240307: 'claude-3-haiku-20240307', + Models.claude21: 'claude-2.1', + Models.claude20: 'claude-2.0', + Models.claudeInstant12: 'claude-instant-1.2', +}; + +_$ModelStringImpl _$$ModelStringImplFromJson(Map json) => + _$ModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$ModelStringImplToJson(_$ModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$CreateMessageRequestMetadataImpl _$$CreateMessageRequestMetadataImplFromJson( + Map json) => + _$CreateMessageRequestMetadataImpl( + userId: json['user_id'] as String?, + ); + +Map _$$CreateMessageRequestMetadataImplToJson( + _$CreateMessageRequestMetadataImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('user_id', instance.userId); + return val; +} + +_$MessageImpl _$$MessageImplFromJson(Map json) => + _$MessageImpl( + id: json['id'] as String?, + content: const _MessageContentConverter().fromJson(json['content']), + role: $enumDecode(_$MessageRoleEnumMap, json['role']), + model: json['model'] as String?, + stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + stopSequence: json['stop_sequence'] as String?, + type: json['type'] as String?, + usage: json['usage'] == null + ? null + : Usage.fromJson(json['usage'] as Map), + ); + +Map _$$MessageImplToJson(_$MessageImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('id', instance.id); + val['content'] = const _MessageContentConverter().toJson(instance.content); + val['role'] = _$MessageRoleEnumMap[instance.role]!; + writeNotNull('model', instance.model); + writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); + writeNotNull('stop_sequence', instance.stopSequence); + writeNotNull('type', instance.type); + writeNotNull('usage', instance.usage?.toJson()); + return val; +} + +const _$MessageRoleEnumMap = { + MessageRole.user: 'user', + MessageRole.assistant: 'assistant', +}; + +const _$StopReasonEnumMap = { + StopReason.endTurn: 'end_turn', + StopReason.maxTokens: 'max_tokens', + StopReason.stopSequence: 'stop_sequence', +}; + +_$MessageContentListBlockImpl _$$MessageContentListBlockImplFromJson( + Map json) => + _$MessageContentListBlockImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$MessageContentListBlockImplToJson( + _$MessageContentListBlockImpl instance) => + { + 'value': instance.value.map((e) => e.toJson()).toList(), + 'runtimeType': instance.$type, + }; + +_$MessageContentStringImpl _$$MessageContentStringImplFromJson( + Map json) => + _$MessageContentStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$MessageContentStringImplToJson( + _$MessageContentStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$ImageBlockSourceImpl _$$ImageBlockSourceImplFromJson( + Map json) => + _$ImageBlockSourceImpl( + data: json['data'] as String, + mediaType: + $enumDecode(_$ImageBlockSourceMediaTypeEnumMap, json['media_type']), + type: $enumDecode(_$ImageBlockSourceTypeEnumMap, json['type']), + ); + +Map _$$ImageBlockSourceImplToJson( + _$ImageBlockSourceImpl instance) => + { + 'data': instance.data, + 'media_type': _$ImageBlockSourceMediaTypeEnumMap[instance.mediaType]!, + 'type': _$ImageBlockSourceTypeEnumMap[instance.type]!, + }; + +const _$ImageBlockSourceMediaTypeEnumMap = { + ImageBlockSourceMediaType.imageJpeg: 'image/jpeg', + ImageBlockSourceMediaType.imagePng: 'image/png', + ImageBlockSourceMediaType.imageGif: 'image/gif', + ImageBlockSourceMediaType.imageWebp: 'image/webp', +}; + +const _$ImageBlockSourceTypeEnumMap = { + ImageBlockSourceType.base64: 'base64', +}; + +_$UsageImpl _$$UsageImplFromJson(Map json) => _$UsageImpl( + inputTokens: (json['input_tokens'] as num).toInt(), + outputTokens: (json['output_tokens'] as num).toInt(), + ); + +Map _$$UsageImplToJson(_$UsageImpl instance) => + { + 'input_tokens': instance.inputTokens, + 'output_tokens': instance.outputTokens, + }; + +_$MessageDeltaImpl _$$MessageDeltaImplFromJson(Map json) => + _$MessageDeltaImpl( + stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + stopSequence: json['stop_sequence'] as String?, + ); + +Map _$$MessageDeltaImplToJson(_$MessageDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); + writeNotNull('stop_sequence', instance.stopSequence); + return val; +} + +_$MessageDeltaUsageImpl _$$MessageDeltaUsageImplFromJson( + Map json) => + _$MessageDeltaUsageImpl( + outputTokens: (json['output_tokens'] as num).toInt(), + ); + +Map _$$MessageDeltaUsageImplToJson( + _$MessageDeltaUsageImpl instance) => + { + 'output_tokens': instance.outputTokens, + }; + +_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => + _$TextBlockDeltaImpl( + text: json['text'] as String, + type: json['type'] as String, + ); + +Map _$$TextBlockDeltaImplToJson( + _$TextBlockDeltaImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$TextBlockImpl _$$TextBlockImplFromJson(Map json) => + _$TextBlockImpl( + text: json['text'] as String, + type: json['type'] as String? ?? 'text', + ); + +Map _$$TextBlockImplToJson(_$TextBlockImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$ImageBlockImpl _$$ImageBlockImplFromJson(Map json) => + _$ImageBlockImpl( + source: ImageBlockSource.fromJson(json['source'] as Map), + type: json['type'] as String? ?? 'image', + ); + +Map _$$ImageBlockImplToJson(_$ImageBlockImpl instance) => + { + 'source': instance.source.toJson(), + 'type': instance.type, + }; + +_$MessageStartEventImpl _$$MessageStartEventImplFromJson( + Map json) => + _$MessageStartEventImpl( + message: Message.fromJson(json['message'] as Map), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$MessageStartEventImplToJson( + _$MessageStartEventImpl instance) => + { + 'message': instance.message.toJson(), + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +const _$MessageStreamEventTypeEnumMap = { + MessageStreamEventType.messageStart: 'message_start', + MessageStreamEventType.messageDelta: 'message_delta', + MessageStreamEventType.messageStop: 'message_stop', + MessageStreamEventType.contentBlockStart: 'content_block_start', + MessageStreamEventType.contentBlockDelta: 'content_block_delta', + MessageStreamEventType.contentBlockStop: 'content_block_stop', + MessageStreamEventType.ping: 'ping', +}; + +_$MessageDeltaEventImpl _$$MessageDeltaEventImplFromJson( + Map json) => + _$MessageDeltaEventImpl( + delta: MessageDelta.fromJson(json['delta'] as Map), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + usage: MessageDeltaUsage.fromJson(json['usage'] as Map), + ); + +Map _$$MessageDeltaEventImplToJson( + _$MessageDeltaEventImpl instance) => + { + 'delta': instance.delta.toJson(), + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + 'usage': instance.usage.toJson(), + }; + +_$MessageStopEventImpl _$$MessageStopEventImplFromJson( + Map json) => + _$MessageStopEventImpl( + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$MessageStopEventImplToJson( + _$MessageStopEventImpl instance) => + { + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockStartEventImpl _$$ContentBlockStartEventImplFromJson( + Map json) => + _$ContentBlockStartEventImpl( + contentBlock: + TextBlock.fromJson(json['content_block'] as Map), + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockStartEventImplToJson( + _$ContentBlockStartEventImpl instance) => + { + 'content_block': instance.contentBlock.toJson(), + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockDeltaEventImpl _$$ContentBlockDeltaEventImplFromJson( + Map json) => + _$ContentBlockDeltaEventImpl( + delta: TextBlockDelta.fromJson(json['delta'] as Map), + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockDeltaEventImplToJson( + _$ContentBlockDeltaEventImpl instance) => + { + 'delta': instance.delta.toJson(), + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockStopEventImpl _$$ContentBlockStopEventImplFromJson( + Map json) => + _$ContentBlockStopEventImpl( + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockStopEventImplToJson( + _$ContentBlockStopEventImpl instance) => + { + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$PingEventImpl _$$PingEventImplFromJson(Map json) => + _$PingEventImpl( + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$PingEventImplToJson(_$PingEventImpl instance) => + { + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart new file mode 100644 index 00000000..331c6207 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart @@ -0,0 +1,28 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: StopReason +// ========================================== + +/// The reason that we stopped. +/// +/// This may be one the following values: +/// +/// - `"end_turn"`: the model reached a natural stopping point +/// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum +/// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated +/// +/// In non-streaming mode this value is always non-null. In streaming mode, it is +/// null in the `message_start` event and non-null otherwise. +enum StopReason { + @JsonValue('end_turn') + endTurn, + @JsonValue('max_tokens') + maxTokens, + @JsonValue('stop_sequence') + stopSequence, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart new file mode 100644 index 00000000..fa05ffce --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: TextBlockDelta +// ========================================== + +/// A delta in a streaming text block. +@freezed +class TextBlockDelta with _$TextBlockDelta { + const TextBlockDelta._(); + + /// Factory constructor for TextBlockDelta + const factory TextBlockDelta({ + /// The text delta. + required String text, + + /// The type of content block. + required String type, + }) = _TextBlockDelta; + + /// Object construction from a JSON representation + factory TextBlockDelta.fromJson(Map json) => + _$TextBlockDeltaFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['text', 'type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'text': text, + 'type': type, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart new file mode 100644 index 00000000..37f3d39d --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Usage +// ========================================== + +/// Billing and rate-limit usage. +/// +/// Anthropic's API bills and rate-limits by token counts, as tokens represent the +/// underlying cost to our systems. +/// +/// Under the hood, the API transforms requests into a format suitable for the +/// model. The model's output then goes through a parsing stage before becoming an +/// API response. As a result, the token counts in `usage` will not match one-to-one +/// with the exact visible content of an API request or response. +/// +/// For example, `output_tokens` will be non-zero, even for an empty string response +/// from Claude. +@freezed +class Usage with _$Usage { + const Usage._(); + + /// Factory constructor for Usage + const factory Usage({ + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') required int inputTokens, + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') required int outputTokens, + }) = _Usage; + + /// Object construction from a JSON representation + factory Usage.fromJson(Map json) => _$UsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['input_tokens', 'output_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'input_tokens': inputTokens, + 'output_tokens': outputTokens, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart new file mode 100644 index 00000000..99555ca4 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart @@ -0,0 +1,4 @@ +export 'http_client_stub.dart' + if (dart.library.io) 'http_client_io.dart' + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart new file mode 100644 index 00000000..59abc229 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart @@ -0,0 +1,18 @@ +import 'package:fetch_client/fetch_client.dart' as fetch; +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(fetch.FetchClient(mode: fetch.RequestMode.cors)); +} + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) { + // If the request if bigger than 60KiB set persistentConnection to false + // Ref: https://github.com/Zekfad/fetch_client#large-payload + if ((request.contentLength ?? 0) > 61440) { + request.persistentConnection = false; + } + return Future.value(request); +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart new file mode 100644 index 00000000..0b24e7db --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart @@ -0,0 +1,12 @@ +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(http.Client()); +} + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) { + return Future.value(request); +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart new file mode 100644 index 00000000..2668d1ac --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart @@ -0,0 +1,10 @@ +import 'package:http/http.dart' as http; + +/// Creates a default HTTP client for the current platform. +http.Client createDefaultHttpClient() => throw UnsupportedError( + 'Cannot create a client without dart:html or dart:io.', + ); + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) => + throw UnsupportedError('stub'); diff --git a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml new file mode 100644 index 00000000..a3f60e70 --- /dev/null +++ b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml @@ -0,0 +1,562 @@ +openapi: 3.0.3 + +info: + title: Anthropic API + description: API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. + version: "1" + +servers: + - url: https://api.anthropic.com/v1 + +tags: + - name: Messages + description: Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + +paths: + /messages: + post: + operationId: createMessage + tags: + - Messages + summary: Create a Message + description: | + Send a structured list of input messages with text and/or image content, and the + model will generate the next message in the conversation. + + The Messages API can be used for either single queries or stateless multi-turn + conversations. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Message" +components: + securitySchemes: + ApiKeyAuth: + type: apiKey + in: header + name: x-api-key + + schemas: + CreateMessageRequest: + type: object + description: The request parameters for creating a message. + properties: + model: + title: Model + description: | + The model that will complete your prompt. + + See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + example: "claude-3-opus-20240229" + anyOf: + - type: string + description: The ID of the model to use for this request. + - type: string + title: Models + description: | + Available models. Mind that the list may not be exhaustive nor up-to-date. + enum: + - claude-3-opus-20240229 + - claude-3-sonnet-20240229 + - claude-3-haiku-20240307 + - claude-2.1 + - claude-2.0 + - claude-instant-1.2 + messages: + type: array + description: | + Input messages. + + Our models are trained to operate on alternating `user` and `assistant` + conversational turns. When creating a new `Message`, you specify the prior + conversational turns with the `messages` parameter, and the model then generates + the next `Message` in the conversation. + + Each input message must be an object with a `role` and `content`. You can + specify a single `user`-role message, or you can include multiple `user` and + `assistant` messages. The first message must always use the `user` role. + + If the final message uses the `assistant` role, the response content will + continue immediately from the content in that message. This can be used to + constrain part of the model's response. + + Example with a single `user` message: + + ```json + [{ "role": "user", "content": "Hello, Claude" }] + ``` + + Example with multiple conversational turns: + + ```json + [ + { "role": "user", "content": "Hello there." }, + { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + { "role": "user", "content": "Can you explain LLMs in plain English?" } + ] + ``` + + Example with a partially-filled response from Claude: + + ```json + [ + { + "role": "user", + "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + }, + { "role": "assistant", "content": "The best answer is (" } + ] + ``` + + Each input message `content` may be either a single `string` or an array of + content blocks, where each block has a specific `type`. Using a `string` for + `content` is shorthand for an array of one content block of type `"text"`. The + following input messages are equivalent: + + ```json + { "role": "user", "content": "Hello, Claude" } + ``` + + ```json + { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + ``` + + Starting with Claude 3 models, you can also send image content blocks: + + ```json + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/jpeg", + "data": "/9j/4AAQSkZJRg..." + } + }, + { "type": "text", "text": "What is in this image?" } + ] + } + ``` + + We currently support the `base64` source type for images, and the `image/jpeg`, + `image/png`, `image/gif`, and `image/webp` media types. + + See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + input examples. + + Note that if you want to include a + [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + the top-level `system` parameter — there is no `"system"` role for input + messages in the Messages API. + minItems: 1 + items: + $ref: '#/components/schemas/Message' + max_tokens: + type: integer + description: | + The maximum number of tokens to generate before stopping. + + Note that our models may stop _before_ reaching this maximum. This parameter + only specifies the absolute maximum number of tokens to generate. + + Different models have different maximum values for this parameter. See + [models](https://docs.anthropic.com/en/docs/models-overview) for details. + metadata: + $ref: '#/components/schemas/CreateMessageRequestMetadata' + stop_sequences: + type: array + description: | + Custom text sequences that will cause the model to stop generating. + + Our models will normally stop when they have naturally completed their turn, + which will result in a response `stop_reason` of `"end_turn"`. + + If you want the model to stop generating when it encounters custom strings of + text, you can use the `stop_sequences` parameter. If the model encounters one of + the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + and the response `stop_sequence` value will contain the matched stop sequence. + items: + type: string + system: + type: string + description: | + System prompt. + + A system prompt is a way of providing context and instructions to Claude, such + as specifying a particular goal or role. See our + [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + temperature: + type: number + description: | + Amount of randomness injected into the response. + + Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + for analytical / multiple choice, and closer to `1.0` for creative and + generative tasks. + + Note that even with `temperature` of `0.0`, the results will not be fully + deterministic. + top_k: + type: integer + description: | + Only sample from the top K options for each subsequent token. + + Used to remove "long tail" low probability responses. + [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + + Recommended for advanced use cases only. You usually only need to use + `temperature`. + top_p: + type: number + description: | + Use nucleus sampling. + + In nucleus sampling, we compute the cumulative distribution over all the options + for each subsequent token in decreasing probability order and cut it off once it + reaches a particular probability specified by `top_p`. You should either alter + `temperature` or `top_p`, but not both. + + Recommended for advanced use cases only. You usually only need to use + `temperature`. + stream: + type: boolean + default: false + description: | + Whether to incrementally stream the response using server-sent events. + + See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + details. + required: + - model + - messages + - max_tokens + CreateMessageRequestMetadata: + type: object + description: An object describing metadata about the request. + properties: + user_id: + type: string + description: | + An external identifier for the user who is associated with the request. + + This should be a uuid, hash value, or other opaque identifier. Anthropic may use + this id to help detect abuse. Do not include any identifying information such as + name, email address, or phone number. + Message: + type: object + description: A message in a chat conversation. + properties: + id: + type: string + description: | + Unique object identifier. + + The format and length of IDs may change over time. + content: + description: The content of the message. + oneOf: + - type: string + description: A single text block. + - type: array + description: An array of content blocks. + items: + $ref: "#/components/schemas/Block" + role: + $ref: "#/components/schemas/MessageRole" + model: + type: string + description: The model that handled the request. + stop_reason: + $ref: "#/components/schemas/StopReason" + stop_sequence: + type: string + description: | + Which custom stop sequence was generated, if any. + + This value will be a non-null string if one of your custom stop sequences was + generated. + type: + type: string + description: | + Object type. + + For Messages, this is always `"message"`. + usage: + $ref: "#/components/schemas/Usage" + required: + - content + - role + MessageRole: + type: string + description: The role of the messages author. + enum: + - user + - assistant + Block: + description: A block of content in a message. + oneOf: + - $ref: "#/components/schemas/TextBlock" + - $ref: "#/components/schemas/ImageBlock" + discriminator: + propertyName: type + TextBlock: + type: object + description: A block of text content. + properties: + text: + type: string + description: The text content. + type: + type: string + description: The type of content block. + default: text + required: + - text + ImageBlock: + type: object + description: A block of image content. + properties: + source: + $ref: "#/components/schemas/ImageBlockSource" + type: + type: string + description: The type of content block. + default: image + required: + - source + ImageBlockSource: + type: object + description: The source of an image block. + properties: + data: + type: string + description: The base64-encoded image data. + media_type: + type: string + description: The media type of the image. + enum: + - image/jpeg + - image/png + - image/gif + - image/webp + type: + type: string + description: The type of image source. + enum: + - base64 + required: + - data + - media_type + - type + StopReason: + type: string + description: | + The reason that we stopped. + + This may be one the following values: + + - `"end_turn"`: the model reached a natural stopping point + - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + + In non-streaming mode this value is always non-null. In streaming mode, it is + null in the `message_start` event and non-null otherwise. + enum: + - end_turn + - max_tokens + - stop_sequence + Usage: + type: object + description: | + Billing and rate-limit usage. + + Anthropic's API bills and rate-limits by token counts, as tokens represent the + underlying cost to our systems. + + Under the hood, the API transforms requests into a format suitable for the + model. The model's output then goes through a parsing stage before becoming an + API response. As a result, the token counts in `usage` will not match one-to-one + with the exact visible content of an API request or response. + + For example, `output_tokens` will be non-zero, even for an empty string response + from Claude. + properties: + input_tokens: + type: integer + description: The number of input tokens which were used. + output_tokens: + type: integer + description: The number of output tokens which were used. + required: + - input_tokens + - output_tokens + MessageStreamEvent: + type: object + description: A event in a streaming conversation. + oneOf: + - $ref: "#/components/schemas/MessageStartEvent" + - $ref: "#/components/schemas/MessageDeltaEvent" + - $ref: "#/components/schemas/MessageStopEvent" + - $ref: "#/components/schemas/ContentBlockStartEvent" + - $ref: "#/components/schemas/ContentBlockDeltaEvent" + - $ref: "#/components/schemas/ContentBlockStopEvent" + - $ref: "#/components/schemas/PingEvent" + discriminator: + propertyName: type + MessageStreamEventType: + type: string + description: The type of a streaming event. + enum: + - message_start + - message_delta + - message_stop + - content_block_start + - content_block_delta + - content_block_stop + - ping + MessageStartEvent: + type: object + description: A start event in a streaming conversation. + properties: + message: + $ref: "#/components/schemas/Message" + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - message + - type + MessageDeltaEvent: + type: object + description: A delta event in a streaming conversation. + properties: + delta: + $ref: "#/components/schemas/MessageDelta" + type: + $ref: "#/components/schemas/MessageStreamEventType" + usage: + $ref: "#/components/schemas/MessageDeltaUsage" + required: + - delta + - type + - usage + MessageDelta: + type: object + description: A delta in a streaming message. + properties: + stop_reason: + $ref: "#/components/schemas/StopReason" + stop_sequence: + type: string + description: | + Which custom stop sequence was generated, if any. + + This value will be a non-null string if one of your custom stop sequences was + generated. + MessageDeltaUsage: + type: object + description: | + Billing and rate-limit usage. + + Anthropic's API bills and rate-limits by token counts, as tokens represent the + underlying cost to our systems. + + Under the hood, the API transforms requests into a format suitable for the + model. The model's output then goes through a parsing stage before becoming an + API response. As a result, the token counts in `usage` will not match one-to-one + with the exact visible content of an API request or response. + + For example, `output_tokens` will be non-zero, even for an empty string response + from Claude. + properties: + output_tokens: + type: integer + description: The cumulative number of output tokens which were used. + required: + - output_tokens + MessageStopEvent: + type: object + description: A stop event in a streaming conversation. + properties: + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - type + ContentBlockStartEvent: + type: object + description: A start event in a streaming content block. + properties: + content_block: + $ref: "#/components/schemas/TextBlock" + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - content_block + - index + - type + ContentBlockDeltaEvent: + type: object + description: A delta event in a streaming content block. + properties: + delta: + $ref: "#/components/schemas/TextBlockDelta" + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - delta + - index + - type + TextBlockDelta: + type: object + description: A delta in a streaming text block. + properties: + text: + type: string + description: The text delta. + type: + type: string + description: The type of content block. + default: text_delta + required: + - text + - type + ContentBlockStopEvent: + type: object + description: A stop event in a streaming content block. + properties: + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - index + - type + PingEvent: + type: object + description: A ping event in a streaming conversation. + properties: + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - type + +security: + - ApiKeyAuth: [ ] diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart new file mode 100644 index 00000000..cdeaa32c --- /dev/null +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -0,0 +1,45 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Anthropic API client Dart code from the OpenAPI spec. +/// https://docs.anthropic.com/en/api +void main() async { + final spec = OpenApi.fromFile(source: 'oas/anthropic_openapi_curated.yaml'); + + await spec.generate( + package: 'Anthropic', + destination: 'lib/src/generated/', + replace: true, + schemaOptions: const SchemaGeneratorOptions( + onSchemaUnionFactoryName: _onSchemaUnionFactoryName, + ), + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} + +String? _onSchemaUnionFactoryName( + final String union, + final String unionSubclass, +) => + switch (unionSubclass) { + 'ModelEnumeration' => 'model', + 'ModelString' => 'modelId', + 'MessageContentListBlock' => 'blocks', + 'MessageContentString' => 'text', + 'MessageStartEvent' => 'messageStart', + 'MessageDeltaEvent' => 'messageDelta', + 'MessageStopEvent' => 'messageStop', + 'ContentBlockStartEvent' => 'contentBlockStart', + 'ContentBlockDeltaEvent' => 'contentBlockDelta', + 'ContentBlockStopEvent' => 'contentBlockStop', + 'PingEvent' => 'ping', + _ => null, + }; diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock new file mode 100644 index 00000000..1849898b --- /dev/null +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -0,0 +1,627 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + _fe_analyzer_shared: + dependency: transitive + description: + name: _fe_analyzer_shared + sha256: "5aaf60d96c4cd00fe7f21594b5ad6a1b699c80a27420f8a837f4d68473ef09e3" + url: "https://pub.dev" + source: hosted + version: "68.0.0" + _macros: + dependency: transitive + description: dart + source: sdk + version: "0.1.0" + analyzer: + dependency: transitive + description: + name: analyzer + sha256: "21f1d3720fd1c70316399d5e2bccaebb415c434592d778cce8acb967b8578808" + url: "https://pub.dev" + source: hosted + version: "6.5.0" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + boolean_selector: + dependency: transitive + description: + name: boolean_selector + sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + build: + dependency: transitive + description: + name: build + sha256: "80184af8b6cb3e5c1c4ec6d8544d27711700bc3e6d2efad04238c7b5290889f0" + url: "https://pub.dev" + source: hosted + version: "2.4.1" + build_config: + dependency: transitive + description: + name: build_config + sha256: bf80fcfb46a29945b423bd9aad884590fb1dc69b330a4d4700cac476af1708d1 + url: "https://pub.dev" + source: hosted + version: "1.1.1" + build_daemon: + dependency: transitive + description: + name: build_daemon + sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + build_resolvers: + dependency: transitive + description: + name: build_resolvers + sha256: "339086358431fa15d7eca8b6a36e5d783728cf025e559b834f4609a1fcfb7b0a" + url: "https://pub.dev" + source: hosted + version: "2.4.2" + build_runner: + dependency: "direct dev" + description: + name: build_runner + sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa" + url: "https://pub.dev" + source: hosted + version: "2.4.10" + build_runner_core: + dependency: transitive + description: + name: build_runner_core + sha256: "4ae8ffe5ac758da294ecf1802f2aff01558d8b1b00616aa7538ea9a8a5d50799" + url: "https://pub.dev" + source: hosted + version: "7.3.0" + built_collection: + dependency: transitive + description: + name: built_collection + sha256: "376e3dd27b51ea877c28d525560790aee2e6fbb5f20e2f85d5081027d94e2100" + url: "https://pub.dev" + source: hosted + version: "5.1.1" + built_value: + dependency: transitive + description: + name: built_value + sha256: c7913a9737ee4007efedaffc968c049fd0f3d0e49109e778edc10de9426005cb + url: "https://pub.dev" + source: hosted + version: "8.9.2" + checked_yaml: + dependency: transitive + description: + name: checked_yaml + sha256: feb6bed21949061731a7a75fc5d2aa727cf160b91af9a3e464c5e3a32e28b5ff + url: "https://pub.dev" + source: hosted + version: "2.0.3" + clock: + dependency: transitive + description: + name: clock + sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + url: "https://pub.dev" + source: hosted + version: "1.1.1" + code_builder: + dependency: transitive + description: + name: code_builder + sha256: f692079e25e7869c14132d39f223f8eec9830eb76131925143b2129c4bb01b37 + url: "https://pub.dev" + source: hosted + version: "4.10.0" + collection: + dependency: transitive + description: + name: collection + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + url: "https://pub.dev" + source: hosted + version: "1.18.0" + convert: + dependency: transitive + description: + name: convert + sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592" + url: "https://pub.dev" + source: hosted + version: "3.1.1" + coverage: + dependency: transitive + description: + name: coverage + sha256: "3945034e86ea203af7a056d98e98e42a5518fff200d6e8e6647e1886b07e936e" + url: "https://pub.dev" + source: hosted + version: "1.8.0" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" + dart_style: + dependency: transitive + description: + name: dart_style + sha256: "99e066ce75c89d6b29903d788a7bb9369cf754f7b24bf70bf4b6d6d6b26853b9" + url: "https://pub.dev" + source: hosted + version: "2.3.6" + fetch_api: + dependency: transitive + description: + name: fetch_api + sha256: c0a76bfd84d4bc5a0733ab8b9fcee268d5069228790a6dd71fc2a6d1049223cc + url: "https://pub.dev" + source: hosted + version: "2.1.0" + fetch_client: + dependency: "direct main" + description: + name: fetch_client + sha256: "0b935eff9dfa84fb56bddadaf020c9aa61f02cbd6fa8dad914d6d343a838936d" + url: "https://pub.dev" + source: hosted + version: "1.1.1" + file: + dependency: transitive + description: + name: file + sha256: "5fc22d7c25582e38ad9a8515372cd9a93834027aacf1801cf01164dac0ffa08c" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + freezed: + dependency: "direct dev" + description: + name: freezed + sha256: "5606fb95d54f3bb241b3e12dcfb65ba7494c775c4cb458334eccceb07334a3d9" + url: "https://pub.dev" + source: hosted + version: "2.5.3" + freezed_annotation: + dependency: "direct main" + description: + name: freezed_annotation + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + url: "https://pub.dev" + source: hosted + version: "2.4.1" + frontend_server_client: + dependency: transitive + description: + name: frontend_server_client + sha256: f64a0333a82f30b0cca061bc3d143813a486dc086b574bfb233b7c1372427694 + url: "https://pub.dev" + source: hosted + version: "4.0.0" + glob: + dependency: transitive + description: + name: glob + sha256: "0e7014b3b7d4dac1ca4d6114f82bf1782ee86745b9b42a92c9289c23d8a0ab63" + url: "https://pub.dev" + source: hosted + version: "2.1.2" + graphs: + dependency: transitive + description: + name: graphs + sha256: aedc5a15e78fc65a6e23bcd927f24c64dd995062bcd1ca6eda65a3cff92a4d19 + url: "https://pub.dev" + source: hosted + version: "2.3.1" + http: + dependency: "direct main" + description: + name: http + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + http_multi_server: + dependency: transitive + description: + name: http_multi_server + sha256: "97486f20f9c2f7be8f514851703d0119c3596d14ea63227af6f7a481ef2b2f8b" + url: "https://pub.dev" + source: hosted + version: "3.2.1" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + intl: + dependency: transitive + description: + name: intl + sha256: d6f56758b7d3014a48af9701c085700aac781a92a87a62b1333b46d8879661cf + url: "https://pub.dev" + source: hosted + version: "0.19.0" + io: + dependency: transitive + description: + name: io + sha256: "2ec25704aba361659e10e3e5f5d672068d332fc8ac516421d483a11e5cbd061e" + url: "https://pub.dev" + source: hosted + version: "1.0.4" + js: + dependency: transitive + description: + name: js + sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf + url: "https://pub.dev" + source: hosted + version: "0.7.1" + json_annotation: + dependency: "direct main" + description: + name: json_annotation + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + url: "https://pub.dev" + source: hosted + version: "4.9.0" + json_serializable: + dependency: "direct dev" + description: + name: json_serializable + sha256: ea1432d167339ea9b5bb153f0571d0039607a873d6e04e0117af043f14a1fd4b + url: "https://pub.dev" + source: hosted + version: "6.8.0" + logging: + dependency: transitive + description: + name: logging + sha256: "623a88c9594aa774443aa3eb2d41807a48486b5613e67599fb4c41c0ad47c340" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + macros: + dependency: transitive + description: + name: macros + sha256: "12e8a9842b5a7390de7a781ec63d793527582398d16ea26c60fed58833c9ae79" + url: "https://pub.dev" + source: hosted + version: "0.1.0-main.0" + matcher: + dependency: transitive + description: + name: matcher + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb + url: "https://pub.dev" + source: hosted + version: "0.12.16+1" + meta: + dependency: "direct main" + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + mime: + dependency: transitive + description: + name: mime + sha256: "2e123074287cc9fd6c09de8336dae606d1ddb88d9ac47358826db698c176a1f2" + url: "https://pub.dev" + source: hosted + version: "1.0.5" + node_preamble: + dependency: transitive + description: + name: node_preamble + sha256: "6e7eac89047ab8a8d26cf16127b5ed26de65209847630400f9aefd7cd5c730db" + url: "https://pub.dev" + source: hosted + version: "2.0.2" + openapi_spec: + dependency: "direct dev" + description: + path: "." + ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" + resolved-ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" + url: "https://github.com/davidmigloz/openapi_spec.git" + source: git + version: "0.7.8" + package_config: + dependency: transitive + description: + name: package_config + sha256: "1c5b77ccc91e4823a5af61ee74e6b972db1ef98c2ff5a18d3161c982a55448bd" + url: "https://pub.dev" + source: hosted + version: "2.1.0" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + pool: + dependency: transitive + description: + name: pool + sha256: "20fe868b6314b322ea036ba325e6fc0711a22948856475e2c2b6306e8ab39c2a" + url: "https://pub.dev" + source: hosted + version: "1.5.1" + pub_semver: + dependency: transitive + description: + name: pub_semver + sha256: "40d3ab1bbd474c4c2328c91e3a7df8c6dd629b79ece4c4bd04bee496a224fb0c" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + pubspec_parse: + dependency: transitive + description: + name: pubspec_parse + sha256: c63b2876e58e194e4b0828fcb080ad0e06d051cb607a6be51a9e084f47cb9367 + url: "https://pub.dev" + source: hosted + version: "1.2.3" + recase: + dependency: transitive + description: + name: recase + sha256: e4eb4ec2dcdee52dcf99cb4ceabaffc631d7424ee55e56f280bc039737f89213 + url: "https://pub.dev" + source: hosted + version: "4.1.0" + shelf: + dependency: transitive + description: + name: shelf + sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + url: "https://pub.dev" + source: hosted + version: "1.4.1" + shelf_packages_handler: + dependency: transitive + description: + name: shelf_packages_handler + sha256: "89f967eca29607c933ba9571d838be31d67f53f6e4ee15147d5dc2934fee1b1e" + url: "https://pub.dev" + source: hosted + version: "3.0.2" + shelf_static: + dependency: transitive + description: + name: shelf_static + sha256: a41d3f53c4adf0f57480578c1d61d90342cd617de7fc8077b1304643c2d85c1e + url: "https://pub.dev" + source: hosted + version: "1.1.2" + shelf_web_socket: + dependency: transitive + description: + name: shelf_web_socket + sha256: "073c147238594ecd0d193f3456a5fe91c4b0abbcc68bf5cd95b36c4e194ac611" + url: "https://pub.dev" + source: hosted + version: "2.0.0" + source_gen: + dependency: transitive + description: + name: source_gen + sha256: "14658ba5f669685cd3d63701d01b31ea748310f7ab854e471962670abcf57832" + url: "https://pub.dev" + source: hosted + version: "1.5.0" + source_helper: + dependency: transitive + description: + name: source_helper + sha256: "6adebc0006c37dd63fe05bca0a929b99f06402fc95aa35bf36d67f5c06de01fd" + url: "https://pub.dev" + source: hosted + version: "1.3.4" + source_map_stack_trace: + dependency: transitive + description: + name: source_map_stack_trace + sha256: "84cf769ad83aa6bb61e0aa5a18e53aea683395f196a6f39c4c881fb90ed4f7ae" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + source_maps: + dependency: transitive + description: + name: source_maps + sha256: "708b3f6b97248e5781f493b765c3337db11c5d2c81c3094f10904bfa8004c703" + url: "https://pub.dev" + source: hosted + version: "0.10.12" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + stack_trace: + dependency: transitive + description: + name: stack_trace + sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" + url: "https://pub.dev" + source: hosted + version: "1.11.1" + stream_channel: + dependency: transitive + description: + name: stream_channel + sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 + url: "https://pub.dev" + source: hosted + version: "2.1.2" + stream_transform: + dependency: transitive + description: + name: stream_transform + sha256: "14a00e794c7c11aa145a170587321aedce29769c08d7f58b1d141da75e3b1c6f" + url: "https://pub.dev" + source: hosted + version: "2.1.0" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + test: + dependency: "direct dev" + description: + name: test + sha256: d11b55850c68c1f6c0cf00eabded4e66c4043feaf6c0d7ce4a36785137df6331 + url: "https://pub.dev" + source: hosted + version: "1.25.5" + test_api: + dependency: transitive + description: + name: test_api + sha256: "2419f20b0c8677b2d67c8ac4d1ac7372d862dc6c460cdbb052b40155408cd794" + url: "https://pub.dev" + source: hosted + version: "0.7.1" + test_core: + dependency: transitive + description: + name: test_core + sha256: "4d070a6bc36c1c4e89f20d353bfd71dc30cdf2bd0e14349090af360a029ab292" + url: "https://pub.dev" + source: hosted + version: "0.6.2" + timing: + dependency: transitive + description: + name: timing + sha256: "70a3b636575d4163c477e6de42f247a23b315ae20e86442bebe32d3cabf61c32" + url: "https://pub.dev" + source: hosted + version: "1.0.1" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" + vm_service: + dependency: transitive + description: + name: vm_service + sha256: "7475cb4dd713d57b6f7464c0e13f06da0d535d8b2067e188962a59bac2cf280b" + url: "https://pub.dev" + source: hosted + version: "14.2.2" + watcher: + dependency: transitive + description: + name: watcher + sha256: "3d2ad6751b3c16cf07c7fca317a1413b3f26530319181b37e3b9039b84fc01d8" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + web: + dependency: transitive + description: + name: web + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + url: "https://pub.dev" + source: hosted + version: "0.5.1" + web_socket: + dependency: transitive + description: + name: web_socket + sha256: "217f49b5213796cb508d6a942a5dc604ce1cb6a0a6b3d8cb3f0c314f0ecea712" + url: "https://pub.dev" + source: hosted + version: "0.1.4" + web_socket_channel: + dependency: transitive + description: + name: web_socket_channel + sha256: a2d56211ee4d35d9b344d9d4ce60f362e4f5d1aafb988302906bd732bc731276 + url: "https://pub.dev" + source: hosted + version: "3.0.0" + webkit_inspection_protocol: + dependency: transitive + description: + name: webkit_inspection_protocol + sha256: "87d3f2333bb240704cd3f1c6b5b7acd8a10e7f0bc28c28dcf14e782014f4a572" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + yaml: + dependency: transitive + description: + name: yaml + sha256: "75769501ea3489fca56601ff33454fe45507ea3bfb014161abc3b43ae25989d5" + url: "https://pub.dev" + source: hosted + version: "3.1.2" +sdks: + dart: ">=3.4.0 <4.0.0" diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml new file mode 100644 index 00000000..2fa2b082 --- /dev/null +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -0,0 +1,34 @@ +name: anthropic_sdk_dart +description: Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.com + +topics: + - ai + - nlp + - llms + - anthropic + +environment: + sdk: ">=3.0.0 <4.0.0" + +dependencies: + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 + meta: ^1.11.0 + +dev_dependencies: + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 + # openapi_spec: ^0.7.8 + openapi_spec: + git: + url: https://github.com/davidmigloz/openapi_spec.git + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + test: ^1.25.2 diff --git a/packages/anthropic_sdk_dart/test/messages_test.dart b/packages/anthropic_sdk_dart/test/messages_test.dart new file mode 100644 index 00000000..63bbb01e --- /dev/null +++ b/packages/anthropic_sdk_dart/test/messages_test.dart @@ -0,0 +1,150 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Anthropic Messages API tests', () { + late AnthropicClient client; + + setUp(() async { + client = AnthropicClient( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call messages API', () async { + const models = Models.values; + for (final model in models) { + final res = await client.createMessage( + request: CreateMessageRequest( + model: Model.model(model), + temperature: 0, + maxTokens: 1024, + system: + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces, commas or additional explanations.', + messages: const [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'List the numbers from 1 to 9 in order.', + ), + ), + ], + ), + ); + expect(res.id, isNotEmpty); + expect( + res.content.text.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + expect(res.role, MessageRole.assistant); + expect( + res.model?.replaceAll(RegExp(r'[-.]'), ''), + model.name.toLowerCase(), + ); + expect(res.stopReason, StopReason.endTurn); + expect(res.stopSequence, isNull); + expect(res.type, 'message'); + expect(res.usage?.inputTokens, greaterThan(0)); + expect(res.usage?.outputTokens, greaterThan(0)); + await Future.delayed( + const Duration(seconds: 5), + ); // To avoid rate limit + } + }); + + test('Test call messages streaming API', () async { + final stream = client.createMessageStream( + request: const CreateMessageRequest( + model: Model.model(Models.claudeInstant12), + temperature: 0, + maxTokens: 1024, + system: 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces, commas or additional explanations.', + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'List the numbers from 1 to 9 in order.', + ), + ), + ], + ), + ); + String text = ''; + await for (final res in stream) { + res.map( + messageStart: (v) { + expect(res.type, MessageStreamEventType.messageStart); + expect(v.message.id, isNotEmpty); + expect(v.message.role, MessageRole.assistant); + expect( + v.message.model?.replaceAll(RegExp(r'[-.]'), ''), + Models.claudeInstant12.name.toLowerCase(), + ); + expect(v.message.stopReason, isNull); + expect(v.message.stopSequence, isNull); + expect(v.message.usage?.inputTokens, greaterThan(0)); + expect(v.message.usage?.outputTokens, greaterThan(0)); + }, + messageDelta: (v) { + expect(res.type, MessageStreamEventType.messageDelta); + expect(v.delta.stopReason, StopReason.endTurn); + expect(v.usage.outputTokens, greaterThan(0)); + }, + messageStop: (v) { + expect(res.type, MessageStreamEventType.messageStop); + }, + contentBlockStart: (v) { + expect(res.type, MessageStreamEventType.contentBlockStart); + expect(v.index, 0); + expect(v.contentBlock.text, isEmpty); + expect(v.contentBlock.type, 'text'); + }, + contentBlockDelta: (v) { + expect(res.type, MessageStreamEventType.contentBlockDelta); + expect(v.index, greaterThanOrEqualTo(0)); + expect(v.delta.text, isNotEmpty); + expect(v.delta.type, 'text_delta'); + text += v.delta.text.replaceAll(RegExp(r'[\s\n]'), ''); + }, + contentBlockStop: (v) { + expect(res.type, MessageStreamEventType.contentBlockStop); + expect(v.index, greaterThanOrEqualTo(0)); + }, + ping: (v) { + expect(res.type, MessageStreamEventType.ping); + }, + ); + } + expect(text, contains('123456789')); + }); + + test('Test response max tokens', () async { + const request = CreateMessageRequest( + model: Model.model(Models.claudeInstant12), + maxTokens: 1, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'Tell me a joke.', + ), + ), + ], + ); + + final res = await client.createMessage(request: request); + expect(res.stopReason, StopReason.maxTokens); + }); + }); +} From d4009e82769322b44641d08f107031f448a8f0fc Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 24 May 2024 22:48:42 +0200 Subject: [PATCH 139/251] refactor: Migrate to langchaindart.dev domain (#434) --- docs/CNAME | 2 +- docs/README.md | 2 +- docs/index.html | 13 +++++-------- .../chat_models/integrations/firebase_vertex_ai.md | 2 +- .../models/chat_models/integrations/googleai.md | 2 +- examples/browser_summarizer/pubspec.lock | 4 ++-- examples/docs_examples/README.md | 2 +- examples/docs_examples/pubspec.lock | 8 ++++---- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_backend/README.md | 2 +- examples/hello_world_backend/pubspec.lock | 4 ++-- examples/hello_world_cli/README.md | 2 +- examples/hello_world_cli/pubspec.lock | 4 ++-- examples/hello_world_flutter/README.md | 2 +- examples/hello_world_flutter/pubspec.lock | 4 ++-- packages/anthropic_sdk_dart/pubspec.yaml | 2 +- packages/chromadb/pubspec.yaml | 2 +- packages/googleai_dart/pubspec.yaml | 2 +- packages/langchain/CHANGELOG.md | 6 +++--- packages/langchain/README.md | 4 ++-- packages/langchain/pubspec.yaml | 2 +- packages/langchain_amazon/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_cohere/pubspec.yaml | 2 +- packages/langchain_community/pubspec.yaml | 2 +- packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/example/pubspec.lock | 2 +- .../vertex_ai/chat_firebase_vertex_ai.dart | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- .../google_ai/chat_google_generative_ai.dart | 2 +- packages/langchain_google/pubspec.yaml | 2 +- packages/langchain_huggingface/pubspec.yaml | 2 +- packages/langchain_microsoft/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 2 +- packages/langchain_ollama/pubspec.yaml | 2 +- packages/langchain_openai/CHANGELOG.md | 4 ++-- packages/langchain_openai/pubspec.yaml | 2 +- packages/langchain_pinecone/pubspec.yaml | 2 +- .../lib/src/vector_stores/supabase.dart | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- packages/langchain_weaviate/pubspec.yaml | 2 +- packages/langchain_wikipedia/pubspec.yaml | 2 +- packages/langchain_wolfram/pubspec.yaml | 2 +- packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/pubspec.yaml | 2 +- packages/vertex_ai/pubspec.yaml | 2 +- 48 files changed, 63 insertions(+), 66 deletions(-) diff --git a/docs/CNAME b/docs/CNAME index 17960576..6217d1b3 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -langchaindart.com \ No newline at end of file +langchaindart.dev diff --git a/docs/README.md b/docs/README.md index 8f9a3f2f..12785c34 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,7 +38,7 @@ LCEL is a declarative way to compose chains. LCEL was designed from day 1 to sup - [Overview](/expression_language/expression_language): LCEL and its benefits - [Interface](/expression_language/interface): The standard interface for LCEL objects -- [Cookbook](https://langchaindart.com/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks +- [Cookbook](https://langchaindart.dev/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks ## Modules diff --git a/docs/index.html b/docs/index.html index eab7ac39..6d4f395b 100644 --- a/docs/index.html +++ b/docs/index.html @@ -2,16 +2,13 @@ - + @@ -41,7 +38,7 @@ - + diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index 8dc05345..ef8e03d0 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -140,7 +140,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 6eca8777..87a43755 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -118,7 +118,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index cc499c81..b3733733 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -246,7 +246,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -309,7 +309,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/docs_examples/README.md b/examples/docs_examples/README.md index a2dc3095..6ec73e85 100644 --- a/examples/docs_examples/README.md +++ b/examples/docs_examples/README.md @@ -1,3 +1,3 @@ # Docs examples -Examples used in https://langchaindart.com documentation. +Examples used in https://langchaindart.dev documentation. diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 6234c279..014430f6 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -264,14 +264,14 @@ packages: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1" + version: "0.2.1+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -317,14 +317,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 37662d4c..716c7270 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -1,5 +1,5 @@ name: docs_examples -description: Examples used in langchaindart.com documentation. +description: Examples used in langchaindart.dev documentation. version: 1.0.0 publish_to: none diff --git a/examples/hello_world_backend/README.md b/examples/hello_world_backend/README.md index 4f00582c..70208b7a 100644 --- a/examples/hello_world_backend/README.md +++ b/examples/hello_world_backend/README.md @@ -7,7 +7,7 @@ It exposes a REST API that given a list of topics, generates a sonnet about them The HTTP server is implemented using [package:shelf](https://pub.dev/packages/shelf). -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ![Hello world backend](hello_world_backend.gif) diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 3ef992b7..9c8a5ba4 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -133,7 +133,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/hello_world_cli/README.md b/examples/hello_world_cli/README.md index 608daeb6..3ab0ed81 100644 --- a/examples/hello_world_cli/README.md +++ b/examples/hello_world_cli/README.md @@ -2,7 +2,7 @@ This sample app demonstrates how to call an LLM from a CLI application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ## Usage diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 42f90c1a..df156ea2 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -125,7 +125,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/README.md b/examples/hello_world_flutter/README.md index 80a111af..6b7c3871 100644 --- a/examples/hello_world_flutter/README.md +++ b/examples/hello_world_flutter/README.md @@ -2,7 +2,7 @@ This sample app demonstrates how to call an LLM from a Flutter application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ## Usage diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index a12c6037..ecb15bcc 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -154,7 +154,7 @@ packages: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -193,7 +193,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.3.2+1" path: dependency: transitive description: diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 2fa2b082..5beab57e 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index 7a218b9c..d992b91c 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/chromadb issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index ca8f0f00..7ccb5df6 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 47e5a89d..9d255b21 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -153,7 +153,7 @@ ## 0.0.13 -> Check out the [LangChain Expression Language documentation](https://langchaindart.com/#/expression_language/interface) for more details +> Check out the [LangChain Expression Language documentation](https://langchaindart.dev/#/expression_language/interface) for more details - **FEAT**: Add support for JsonOutputFunctionsParser ([#165](https://github.com/davidmigloz/langchain_dart/issues/165)). ([66c8e644](https://github.com/davidmigloz/langchain_dart/commit/66c8e64410d1dbf8b75e5734cb0cbb0e43dc0615)) - **FEAT**: Add support for StringOutputParser ([#164](https://github.com/davidmigloz/langchain_dart/issues/164)). ([ee29e99a](https://github.com/davidmigloz/langchain_dart/commit/ee29e99a410c3cc6a7ae263fea1cde283f904edf)) @@ -274,7 +274,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 @@ -322,7 +322,7 @@ https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef - Add support for LLMs - `BaseLLM` class (#14). - Add support for Chat models - `BaseChatModel` class (#10). - Add support for prompt templates - `PromptTemplate` class (#7). -- Publish LangChain.dart documentation on http://langchaindart.com. +- Publish LangChain.dart documentation on http://langchaindart.dev. ## 0.0.1-dev.1 diff --git a/packages/langchain/README.md b/packages/langchain/README.md index bef19382..561f7d7d 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -156,9 +156,9 @@ print(res); ## Documentation -- [LangChain.dart documentation](https://langchaindart.com) +- [LangChain.dart documentation](https://langchaindart.dev) - [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) -- [LangChain.dart blog](https://blog.langchaindart.com) +- [LangChain.dart blog](https://blog.langchaindart.dev) - [Project board](https://github.com/users/davidmigloz/projects/2/views/1) ## Community diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 4326a8fb..1483d1f5 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_amazon/pubspec.yaml b/packages/langchain_amazon/pubspec.yaml index 41af11b0..abbcb58c 100644 --- a/packages/langchain_amazon/pubspec.yaml +++ b/packages/langchain_amazon/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_amazon issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_amazon homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index de768b22..6ed5624f 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 197d0776..84e24303 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_cohere/pubspec.yaml b/packages/langchain_cohere/pubspec.yaml index bcb53a98..8ace6cf2 100644 --- a/packages/langchain_cohere/pubspec.yaml +++ b/packages/langchain_cohere/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_cohere issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_cohere homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 29fbdb15..8cacd96c 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 65650ce8..d6f04b41 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 0dd384a2..3b051b3a 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -252,7 +252,7 @@ packages: path: ".." relative: true source: path - version: "0.1.0" + version: "0.1.0+1" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index f8c3870d..1a3863b4 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -111,7 +111,7 @@ import 'types.dart'; /// /// [ChatFirebaseVertexAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index cfb0d9f2..921c1336 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 02fde0bb..30a27cdd 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -118,7 +118,7 @@ import 'types.dart'; /// /// [ChatGoogleGenerativeAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 7b441954..67a75cff 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.5.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 576a8f6f..7c1f00d4 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_huggingface issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_huggingface homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_microsoft/pubspec.yaml b/packages/langchain_microsoft/pubspec.yaml index 685287b7..3bd05e6a 100644 --- a/packages/langchain_microsoft/pubspec.yaml +++ b/packages/langchain_microsoft/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_microsoft issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_microsoft homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 90b027e9..964397d3 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index adfa39d4..aea5e9ee 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index d1a0368a..ae115e6d 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -77,7 +77,7 @@ ## 0.3.2 - - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.com/#/modules/model_io/models/chat_models/integrations/open_router)) + - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/open_router)) - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e)) - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f)) - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f)) @@ -257,7 +257,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index ca76313e..efab060a 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.6.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index c64dda9f..479b441e 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart index f6d1e11e..0c777f01 100644 --- a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart +++ b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart @@ -54,7 +54,7 @@ import 'package:supabase/supabase.dart'; /// ``` /// /// See documentation for more details: -/// - [LangChain.dart Supabase docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/supabase) +/// - [LangChain.dart Supabase docs](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) /// - [Supabase Vector docs](https://supabase.com/docs/guides/ai) /// {@endtemplate} class Supabase extends VectorStore { diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 5450773b..91340307 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/langchain_weaviate/pubspec.yaml b/packages/langchain_weaviate/pubspec.yaml index f5f5de33..fb6e6ce4 100644 --- a/packages/langchain_weaviate/pubspec.yaml +++ b/packages/langchain_weaviate/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_weaviate issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_weaviate homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_wikipedia/pubspec.yaml b/packages/langchain_wikipedia/pubspec.yaml index e1377267..d8f713b5 100644 --- a/packages/langchain_wikipedia/pubspec.yaml +++ b/packages/langchain_wikipedia/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wikipedia issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wikipedia homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/langchain_wolfram/pubspec.yaml b/packages/langchain_wolfram/pubspec.yaml index b64e02a0..950db4e1 100644 --- a/packages/langchain_wolfram/pubspec.yaml +++ b/packages/langchain_wolfram/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wolfram issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wolfram homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index a7aa8347..d4af6eaa 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index e2eee5ca..c9dd9706 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 30fee90f..fe366c39 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.3.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 1edc8121..703fb145 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai From 1994c645d6636571648782b4ebfca22083d27d69 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:04:35 +0200 Subject: [PATCH 140/251] fix: Fix deserialization of sealed classes (#435) --- melos.yaml | 2 +- packages/chromadb/pubspec.yaml | 2 +- packages/googleai_dart/pubspec.yaml | 2 +- packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/pubspec.yaml | 2 +- .../lib/src/generated/schema/chat_completion_message.dart | 8 +++++--- .../src/generated/schema/create_completion_request.dart | 2 +- .../src/generated/schema/create_embedding_request.dart | 2 +- .../lib/src/generated/schema/create_message_request.dart | 6 ++++-- packages/openai_dart/pubspec.yaml | 2 +- 10 files changed, 17 insertions(+), 13 deletions(-) diff --git a/melos.yaml b/melos.yaml index 51805716..d4792fff 100644 --- a/melos.yaml +++ b/melos.yaml @@ -62,7 +62,7 @@ command: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 scripts: diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index d992b91c..40252b6b 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -27,5 +27,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 7ccb5df6..2ed4d004 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -31,5 +31,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index d4af6eaa..27b81ed4 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index c9dd9706..ab538c0d 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index ae4d6e9c..65e9b1d8 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -158,9 +158,11 @@ class _ChatCompletionUserMessageContentConverter @override ChatCompletionUserMessageContent fromJson(Object? data) { - if (data is List && - data.every((item) => item is ChatCompletionMessageContentPart)) { - return ChatCompletionMessageContentParts(data.cast()); + if (data is List && data.every((item) => item is Map)) { + return ChatCompletionMessageContentParts(data + .map((i) => ChatCompletionMessageContentPart.fromJson( + i as Map)) + .toList(growable: false)); } if (data is String) { return ChatCompletionUserMessageContentString(data); diff --git a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart index 31bb714a..ff66b86c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart @@ -347,7 +347,7 @@ class _CompletionPromptConverter @override CompletionPrompt fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return CompletionPromptListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart index fec9f621..10c24925 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart @@ -179,7 +179,7 @@ class _EmbeddingInputConverter @override EmbeddingInput fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return EmbeddingInputListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart index bad29bc1..7837049f 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart @@ -88,8 +88,10 @@ class _CreateMessageRequestContentConverter @override CreateMessageRequestContent fromJson(Object? data) { - if (data is List && data.every((item) => item is MessageContent)) { - return CreateMessageRequestContentListMessageContent(data.cast()); + if (data is List && data.every((item) => item is Map)) { + return CreateMessageRequestContentListMessageContent(data + .map((i) => MessageContent.fromJson(i as Map)) + .toList(growable: false)); } if (data is String) { return CreateMessageRequestContentString(data); diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index fe366c39..e34a047d 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 From fad68ec83e20e2f6034dfcdf80b804ddd9145445 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:32:51 +0200 Subject: [PATCH 141/251] fix: Make vector store name optional in openai_dart (#436) --- packages/openai_dart/lib/openai_dart.dart | 2 +- .../schema/create_vector_store_request.dart | 10 +-- .../src/generated/schema/schema.freezed.dart | 67 ++++++++++--------- .../lib/src/generated/schema/schema.g.dart | 4 +- packages/openai_dart/oas/openapi_curated.yaml | 8 +-- packages/openai_dart/pubspec.yaml | 2 +- 6 files changed, 47 insertions(+), 46 deletions(-) diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart index 87830981..7600ced2 100644 --- a/packages/openai_dart/lib/openai_dart.dart +++ b/packages/openai_dart/lib/openai_dart.dart @@ -1,4 +1,4 @@ -/// Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +/// Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. library; export 'src/client.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index cce0ccd3..bb9e83d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -15,12 +15,12 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Factory constructor for CreateVectorStoreRequest const factory CreateVectorStoreRequest({ + /// The name of the vector store. + @JsonKey(includeIfNull: false) String? name, + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - /// The name of the vector store. - required String name, - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, @@ -35,8 +35,8 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// List of all property names of schema static const List propertyNames = [ - 'file_ids', 'name', + 'file_ids', 'expires_after', 'metadata' ]; @@ -49,8 +49,8 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Map representation of object (not serialized) Map toMap() { return { - 'file_ids': fileIds, 'name': name, + 'file_ids': fileIds, 'expires_after': expiresAfter, 'metadata': metadata, }; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index abfb4fc9..1395bc5a 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -44925,13 +44925,14 @@ CreateVectorStoreRequest _$CreateVectorStoreRequestFromJson( /// @nodoc mixin _$CreateVectorStoreRequest { + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds => throw _privateConstructorUsedError; - /// The name of the vector store. - String get name => throw _privateConstructorUsedError; - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter => @@ -44954,8 +44955,8 @@ abstract class $CreateVectorStoreRequestCopyWith<$Res> { _$CreateVectorStoreRequestCopyWithImpl<$Res, CreateVectorStoreRequest>; @useResult $Res call( - {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - String name, + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, @JsonKey(includeIfNull: false) dynamic metadata}); @@ -44977,20 +44978,20 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ + Object? name = freezed, Object? fileIds = freezed, - Object? name = null, Object? expiresAfter = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, fileIds: freezed == fileIds ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, expiresAfter: freezed == expiresAfter ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable @@ -45026,8 +45027,8 @@ abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - String name, + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, @JsonKey(includeIfNull: false) dynamic metadata}); @@ -45049,20 +45050,20 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> @pragma('vm:prefer-inline') @override $Res call({ + Object? name = freezed, Object? fileIds = freezed, - Object? name = null, Object? expiresAfter = freezed, Object? metadata = freezed, }) { return _then(_$CreateVectorStoreRequestImpl( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, fileIds: freezed == fileIds ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, expiresAfter: freezed == expiresAfter ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable @@ -45079,9 +45080,9 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { const _$CreateVectorStoreRequestImpl( - {@JsonKey(name: 'file_ids', includeIfNull: false) + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, - required this.name, @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, @@ -45090,6 +45091,11 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { factory _$CreateVectorStoreRequestImpl.fromJson(Map json) => _$$CreateVectorStoreRequestImplFromJson(json); + /// The name of the vector store. + @override + @JsonKey(includeIfNull: false) + final String? name; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. final List? _fileIds; @@ -45104,10 +45110,6 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { return EqualUnmodifiableListView(value); } - /// The name of the vector store. - @override - final String name; - /// The expiration policy for a vector store. @override @JsonKey(name: 'expires_after', includeIfNull: false) @@ -45120,7 +45122,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @override String toString() { - return 'CreateVectorStoreRequest(fileIds: $fileIds, name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, metadata: $metadata)'; } @override @@ -45128,8 +45130,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { return identical(this, other) || (other.runtimeType == runtimeType && other is _$CreateVectorStoreRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds) && (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && (identical(other.expiresAfter, expiresAfter) || other.expiresAfter == expiresAfter) && const DeepCollectionEquality().equals(other.metadata, metadata)); @@ -45139,8 +45141,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @override int get hashCode => Object.hash( runtimeType, - const DeepCollectionEquality().hash(_fileIds), name, + const DeepCollectionEquality().hash(_fileIds), expiresAfter, const DeepCollectionEquality().hash(metadata)); @@ -45161,9 +45163,9 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { const factory _CreateVectorStoreRequest( - {@JsonKey(name: 'file_ids', includeIfNull: false) + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, - required final String name, @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter, @JsonKey(includeIfNull: false) final dynamic metadata}) = @@ -45175,15 +45177,16 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { @override + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name; + @override + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; @override - /// The name of the vector store. - String get name; - @override - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index eeb6a84e..4062dc95 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -4373,10 +4373,10 @@ Map _$$VectorStoreObjectFileCountsImplToJson( _$CreateVectorStoreRequestImpl _$$CreateVectorStoreRequestImplFromJson( Map json) => _$CreateVectorStoreRequestImpl( + name: json['name'] as String?, fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), - name: json['name'] as String, expiresAfter: json['expires_after'] == null ? null : VectorStoreExpirationAfter.fromJson( @@ -4394,8 +4394,8 @@ Map _$$CreateVectorStoreRequestImplToJson( } } + writeNotNull('name', instance.name); writeNotNull('file_ids', instance.fileIds); - val['name'] = instance.name; writeNotNull('expires_after', instance.expiresAfter?.toJson()); writeNotNull('metadata', instance.metadata); return val; diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 46201dd4..b1a945bc 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -5669,23 +5669,21 @@ components: description: Request object for the Create assistant file endpoint. additionalProperties: false properties: + name: + description: The name of the vector store. + type: string file_ids: description: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. type: array maxItems: 500 items: type: string - name: - description: The name of the vector store. - type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" metadata: description: *metadata_description type: object nullable: true - required: - - name UpdateVectorStoreRequest: type: object description: Request object for the Update vector store endpoint. diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index e34a047d..ee8442e2 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: openai_dart -description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. version: 0.3.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart From 7fd903847324447159ebc726ad65012ed04cbb72 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:55:04 +0200 Subject: [PATCH 142/251] docs: Document tool calling with OpenRouter (#437) --- .../chat_models/integrations/open_router.md | 60 ++++++++++++++++ .../chat_models/integrations/open_router.dart | 54 +++++++++++++++ .../test/chat_models/open_router_test.dart | 69 +++++++++++++++++++ 3 files changed, 183 insertions(+) diff --git a/docs/modules/model_io/models/chat_models/integrations/open_router.md b/docs/modules/model_io/models/chat_models/integrations/open_router.md index e747ca5f..c2d63555 100644 --- a/docs/modules/model_io/models/chat_models/integrations/open_router.md +++ b/docs/modules/model_io/models/chat_models/integrations/open_router.md @@ -95,3 +95,63 @@ await stream.forEach(print); // 123 // 456789 ``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart index 439943c5..f552e60b 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart @@ -7,6 +7,7 @@ import 'package:langchain_openai/langchain_openai.dart'; void main(final List arguments) async { await _openRouter(); await _openRouterStreaming(); + await _openRouterStreamingTools(); } Future _openRouter() async { @@ -66,3 +67,56 @@ Future _openRouterStreaming() async { // 123 // 456789 } + +Future _openRouterStreamingTools() async { + final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final outputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(outputParser); + + final stream = chain.stream({'foo': 'bears'}); + await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); + } + // {} + // {setup: } + // {setup: Why don't} + // {setup: Why don't bears} + // {setup: Why don't bears like fast food} + // {setup: Why don't bears like fast food?, punchline: } + // {setup: Why don't bears like fast food?, punchline: Because} + // {setup: Why don't bears like fast food?, punchline: Because they can't} + // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +} diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index 396f8ac4..4587b56b 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -1,10 +1,12 @@ @TestOn('vm') library; // Uses dart:io +import 'dart:convert'; import 'dart:io'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; import 'package:test/test.dart'; @@ -104,5 +106,72 @@ void main() { expect(numTokens, 13, reason: model); } }); + + test('Test tool calling', + timeout: const Timeout(Duration(minutes: 1)), () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); }); } From e1dae79dd09c14b4949b461ecc358690ac520f7a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 28 May 2024 23:24:19 +0200 Subject: [PATCH 143/251] feat: Add support for ObjectBoxVectorStore (#438) --- docs/_sidebar.md | 1 + .../integrations/img/objectbox.png | Bin 0 -> 51968 bytes .../vector_stores/integrations/memory.md | 8 +- .../vector_stores/integrations/objectbox.md | 258 ++++++++++++++++++ examples/browser_summarizer/pubspec.lock | 16 ++ .../vector_stores/integrations/objectbox.dart | 108 ++++++++ examples/docs_examples/pubspec.lock | 24 ++ melos.yaml | 2 + .../lib/src/vector_stores/memory.dart | 4 +- .../lib/langchain_community.dart | 1 + .../objectbox/base_objectbox.dart | 120 ++++++++ .../objectbox/objectbox-model.json | 56 ++++ .../vector_stores/objectbox/objectbox.dart | 196 +++++++++++++ .../vector_stores/objectbox/objectbox.g.dart | 193 +++++++++++++ .../src/vector_stores/objectbox/types.dart | 29 ++ .../lib/src/vector_stores/vector_stores.dart | 4 + packages/langchain_community/pubspec.yaml | 8 + .../pubspec_overrides.yaml | 6 +- .../objectbox/objectbox_test.dart | 159 +++++++++++ 19 files changed, 1187 insertions(+), 6 deletions(-) create mode 100644 docs/modules/retrieval/vector_stores/integrations/img/objectbox.png create mode 100644 docs/modules/retrieval/vector_stores/integrations/objectbox.md create mode 100644 examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/types.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/vector_stores.dart create mode 100644 packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 532c82b8..6ce757ba 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -94,6 +94,7 @@ - [Vector stores](/modules/retrieval/vector_stores/vector_stores.md) - Integrations - [Memory](/modules/retrieval/vector_stores/integrations/memory.md) + - [ObjectBox](/modules/retrieval/vector_stores/integrations/objectbox.md) - [Chroma](/modules/retrieval/vector_stores/integrations/chroma.md) - [Pinecone](/modules/retrieval/vector_stores/integrations/pinecone.md) - [Supabase](/modules/retrieval/vector_stores/integrations/supabase.md) diff --git a/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png b/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png new file mode 100644 index 0000000000000000000000000000000000000000..6d88c06fd27d87ec228ae1bdf5a1b803b0af140d GIT binary patch literal 51968 zcmbSxWm_Cg(>4SW0>Oeykl-#sg9LZC#ogV4ySux)yDaYRu8Vt+#aSF)uKW21?;P`K zrl+dTs)zW&E3QE^XuEkoA>?Wvz4vg$))v?+2x_3$+rIS`sVJu zs^+x(%J8(};Dl_C*bL|JB%Jlnsr&l*wYirV0mQEm$@}A1% zut-KVOEzH*wz}G)^|fj6S=)TuM&Cq2Y26M51x`XvatN3qNdgD>BcNB?mRfi1+8#5)K_n9Nw&0zONu?dzTP}N zQ7W0McXfJAjyVhuT|7SW0fBAfGt^a;xFsd{rKJlO;PTCN#+vGc#f9a|3&Hw&$I%gK zNd*Iaz2U~9qp8-AFi#~#h1J93h3@K`rS8jxE-MRDKNlD9YmgOjic#+m6MB+Ux04vhHmApmt5dnQono#CUpv`7#z3Gt76u&9oZY~U+fhf01vT6*AuOQm3Vqfq z01br-1lqspM2dMlY(Fs`8_Nz1i8!m$1K%}liD;9c6oL@SVf z&;SfnPmWqv*??l?j)S0%@K-}RiFzs;HxUeD^>`*xakOt1fk+`#-OF8JUXpC54^IYvYhbSosHACN{yu4!o`n7kr_|2 z;Ef3uiX6}yIa-;aHV>yyRjQ?90-sy`$vF)kjQJaIRj1zUC9U#Dc4zMoJ~Sk9k5V@K zlivMr>(%cnE`=}(NJw>L<-oDD+5H@uX@Ez1D!mTw&=jh=t|o zaDMB!LAIb;{HtM5k)o_4!pf`G(UfkBE4=TsK-a%K>cH8w zfMMU+-Dok}$3m1-sm)@4CeH)8v6cYJPYtL31 zb-V2yp?c08_FtS!vi^QCh~r6SUFtwEsR!)MrfC8*{mK2*C09e)>I%{S%J$YTSvs3M z8C3lxy=eaYecU2-OauNB&Sop#{is^teE4P9#q5Lr&8Sdk3@A;%!3;!IG+*JLiL%q8 zDNk9sG`bd9<|nF*LRA`cBFYg&1q*J>y3`oGmaG?z4H3})qHc4HOzct0<#=7+QoBS2 zHU{uQ&7C7Gkn>C|j}=@Q8AcAzOClrH33`O7*vr!p?g|SF7(5G&(B)%Ef z`O?RFyzQ>4$|V_7!kfdj;}vE#J_~-1OzbxuPC#Pk%^kzB?`=$apu`GcG?6`aa$*_7 z7uEe2gO?s?Je2cjj!>%6|Z>v zVEaIXcm+YaEc^Jwgn-U~Ag3PXTj<0PX}#O$XU{tITbl+32`{c=k{;|rxre;7Vr;o& z>#gM1k&0#8P!)%6ulsY}4@tkp+gZrOg?4(kk zJGtT>tQICg<#4vxj=${=9;r6&`dav2N#Tj`_puqz#x%Kq(oe_+x4-XQ{PBz8Wmx50#g*M|CwIR%-f+ormA3L}^U^yDG zcWuQF$#LH)NyU68`qpsHSY4^xcF)s1qt@Kbe(+>zdg0;Ne6=L>EE&F!Al&>%phKrh zq|aa8cEJBzBV!}~GjOtQtF^7``e8ZlbXj!kodF=U>9J7q-gTqvY2bEHhVAWLb(^4D zKzr}{`#xy@S!c`m@kFSZ7c$NVwtu_s(%mf%an?x=v%>L49%P_9LtQB~3zb1RPSy|Q z5u^KzrV@c&6dBaX0~v!YVKvUy<{141&Jp^)(?&0iWwtdx>5{44di$^}n_$4_HTT+X zDTRIBx;RY`YxfpWF`km`ett_0nJ3%O?da%ozHb%L2qryxw@o>S;c$)s4hu<%F z!Kgq4JG2>n0jfz+qsxvA7=C9_x4?!eG;1q8)7h}pYQ7|W*2WUeiM73{wU?-cFlDnQsOsAzf=k^ zCLR4L3pXiAY?S@t!c3dxDaMhHOkdB>T_MtnZMamkBUZAk#pn9iE+tjrA{%Fa?p>Lk zhBc@)Kk|AuH+E9sNYt{$7d5Br^%_Yu+f@S0KoJFN%akA$W%-L2YdT~u)$R#O)=TOQ zCnc9JReiBGa?EJLuO8yxkPeY1IJxdwGz>SH7uD_Ce5fttCf~b>hD^+`o|f1H7!>U0 z;Z^}mGUQa!LyjvWX98`xR1|MwxXAtnj$^u7yu4mR16wsi_)+0$EXXt3wZ9e>E5EZe zBW_yycm~m~HuWCkguuPrQGt_M<`M6D-KsV~TpXpT-%-Udw zwm>6Efc(sIFr-2ukutQ#mKD&}1xLzc%&F2g--N`GV2Ol0x`aIhbl7{VNI;|q{B>e` zdDGk=IC$T9PmM;DY7%VUKk2y@6;LYyyyHTcLk{bce>s>0oUuIc<7=fZV-;cKoI83h zNIevefBWV;OT?>l<0K;XWl;$~D|AnIGVKaCKPcsOI*=p+ zoO+GZns(n6oR$!II$1wnYsbK^BQ@)k5op znXsdWyi@kRzOsDCORxp0mRt~IGGR7*O+l$8B|+Gq8Z`FRG0NHDZHF=?=K*vFzglvk z)a_z_FabU~@2b5Wa8v`k^4|3+r2x)1UIWejAj9vPjmzk{am=6usPfacrcQHX_WUM8 zvGOH>#^Hz`sxwaXE&R{aGd5{l#LWpy%w{eI#pV{wSvx*`@p!qrgFUX^zx?L8`}1&T zzmI$Tur@*GFTn3FM6>7TK#Jyowou<;Rp9Q%EXuIzoc{B(-wDp7Uc3d}*n9yn&?@?D zZK}%PeQ*3S9$#JK?MoTL+uHW`(HwELrosE^4DQSlozr*HcA#MF;^MmoVpApiV#^HnD{GSO0 zXuk@7cL0dz^K)r_t}Y|xlQ%QHj_R>o%WNVn zL*b=W=Rp_F1(P$yAl{1K#=*B%%|M(9;fl>tkP-)FjHt@B37SI>tP}6?bm%Ls635+G zo#a1(=rn?O8*lbZnHY-L+WO8vUxkc`MA(I7p{YwjG@udJNYi>=@?V2P80mH>C70ul z=Odp_o}T7dmdVGx|DhM_(p9a$liSFOOc7?px*38G4QDrF1ZCo%?S(zIit2KIet{G9 zDO{|Q*DQh&_QbTgc{G0YUov&_MBDhgA2>Tl>?c~>->1laWO4F{Jl@;2H+E%3Y? z0YwcPV$X!37sM$o?LB=~1nwJ+LS1W-J*H?s^NCTIrSxl*7(zy2=hK$Rnu*M8C|4Jv z1PF&8SU1Uv>IxDuQ0ldlXH2vtM2F#VNlZ98Olv0{MT+JXYUN@lWGP3bvScYI*Z~ka zOO<{{8rB&#T5SwY<*rX*jXcd}d_E>#$Kz_3oG?#sd#AK1Ti!>jr|HpYSb{og-FEmK zCGHPZduYFGH@K^euC;@lE2`$jxzANb%%VZR*5b^Vwq^`REkv3F1Q%yXM@fX*Gpl~b zMMcYyzCMbyr`nVzDvDVn(lLsmsSsGL#rgYcosp^a*U_$UCB}YdBUmq-^uI}%kv&2F zmp)kSPg8ou8KkzEp?9*aJX~kZ=T)RZ+-@c&Wx*Y2WQFzo>foJlaO%e;srt3aYMRt# zV1)s|dsMaoil_XK{qtGo2*Ojvx#-u`2=>bv8f*X1*CY2@GN?n+@Sc!O>}Y6M)EkCt z6r%K%?*Wlgo>bM`)%(WrKiEc1Rm2E1eo!Y5C8MYJ>W^2}!Mh8tVRcN(oA#$FHEv!0 zLm$WP^6FG#PVk&KH~_kAdAtXpcwZ&tO#6GkFU-ve-MFQ;U+z5pY0TmEg>XMo18M}@ z<)dL1tubdAFr;WyoOmg&?y?Qn+N{IFb;$D8Q!Py_>3Sl}qI;KmDKcGkTA2xw zEETdLW>Mb^A!iitzUktc4^;+(?<6eYGOvvs?CRWK*XVdKbQ?TDjjQ+fku#Ou3f z9&e11bCR^E0GIyMx|xC%+L4z{}MI|8`U?geJ zj948x`A1W22;>vY&0(YL+&`u#_sHemFx#Y4HdZz>cQaeWLMudi!iiryNMY{3*Ygp< z&`pmT^*O6X^AA55c;Z{7BcfzJ_>Pe&UaecD@DDRXJ#%X$Ilxtlkt(;h3S+^)??--C zX1mWppQK%c1+ zznHin_3lhOu&Qy=pYBp3aD%^v%Yu!%VnXS`)IW*BxPLXfbZ+c#@IN^xe%BnBZ5 zBO{ooTCK_3lJV-Xy~O5}zm)17GT&EC(DeP#;~8dt#<8du*OG-6BenrriAba|mFnqQEO+(wacqa0B$HzKt z018FX%_F!w4`IQCz*8CIun=K9XUu&(#qd)AH-X6Er+S5Cgx08n8HT_(TYJ8g7#Lyl zod>yOoZNB5rY{cml;Ry`K2*}PIyTVg))yXu06Pqz(^(~Xly3+mtF|Zig5xPjkTX~h zS)(N|VNmCLr;KK(b9wm$?k^yExUcFW;lGle)sYz81Hq07b|qT9hHR+v*q7jwT;)mC zNG0(gNH|dj9zXqtn224o$fk1KJRrX4c?1^x_+|U4G|l! zI4!G>LhTn*5vLUmS8ul=y4pH#N8N9~79HQcpK}_%H@}}9C*85c*gH}>9E~Yh`P}p- zWCsx^ z*GqpI@pDXfyXUb79WKjayN|~+U-$c}bkA$+__(RDWX-)UHqb#R+d@Kqe zAt3VZ%1AeNqXb$w;7r0;O^5s<&nx{}wzyQsa==2V5e1Ul+8M@L9Vwa>8o)V86y!{q zPeqQMYaosA16na_mMU7?BHHdT8iuBtBDi9)r3(ct14w1C^G`i3Z4v$bku&m50`RO^ z&!|E09J34{aCy!ex)AqG80`Jb=I>8n1GaPO<2*MYLB-tB^><0qJE~F@ zOCnHKOUtv`)A!6=*@df_XoiI&B=FINdfVmVs`!xS`@~$(#mgNd+#uTd^z@fCpYFEX z2Wz9ZsXMPr)U_?o?QdPvCwBcJf}#Q@?rF%uf9CbV4w;%lt(*=_ej>;k%f}{?<~sva zLi4Hk4X{VzY82>we9#|N4{(iX|8haIMl@i+Hjg?&`yC}fFu;XYHdYJm6P<|HKvh)d zSq4NWd2W4k+CH>j+Jn$+l-saA&9vyj&L?dcls{xgVx>R*2wG)jhbBp@nAv0v&qkc9 z*{*E~arS}lp{F)1y2-TK9V5lZ`>c-*&ZCmQ7?x_9L4wT3uB1wjXwdyc|ID$LN!ys< zXque!!XEGS<@Q=hO0MHMMd)d{rmpX`q@fqmKx^qTrM=FuemzsPj1vFaHC?M^<_BT@ zoIwykqIJbkPS-S;6$&#i?Gl1avau`v^tBNmAH$bR+I1)Ont3qB0dF}X>38+0h0+gG z7g5&!tIC<8l%GE=_&_j<;ao)qJsyv4P++=X6EUwad$`$ix%OO?)MoSf3HMe=R^IWa zC^=fA8Q(F8SU7JkfeFi=wpNg{joc%05XO!93mc8*@3@JGd@~H8WZ#VzlRiJ$>@|`0 zl~16N?S)(enjqASZgO|@Yb<^EK0N2IH-XL$r%L0NTj^0&6U|G>s(er5-`y|LP~KiA ztOme*t+vN=+Fg$xN$y8`HPXFrOGHF}-pg`&KY^3-nXz*8!wg0#Zw;ZMKE>tpYkCPF zb118zf44mEk&B$&2}FG6fl?g9q)132IgAnT$X&_LMSir8^E?VKNrrJ&*%l-x$ei@2 z()?-dBNz<@PbQfuQ zspXu#)DJgv4}&zT4i7p|RUVS8CYix6W(GUqiX{J>5yf^3<^W9ootkYz5bmqC2nOOL zz;e%}T|*tUGPjl7BZq$#HDQD&?^UYew1VY(J?ZL|_IdZ4L*d)g8JsQO%?++wT3gqz z5UcwwhoimmnvUzC)0&R@E=Y)`?ZYkg?Pm~JoYp_wT-!d-H9Soc)~{LLZt3gqb;XHb zWQ!#qkSk7A&e;@b$R7s+6(Z~Q@-g~zcuqt$Z3 zRnw9KH77;)GpBtfg+{SrxMCB6SUi+$KVk$qlv6``B~wgaW?f!D^rk3*c+lh3XpE;< z$WG2#x@1K(wL3f+wwbaL*F%h`JSP^o4*=8LTJ~3Qf!mB&*`D7vwclCUdf)Yj+^6zp z^0U0GZ~3FjVqtrP8b9+;3vxvhcc{wU#r}+$!`Xei32=5eFFI)0%88mwwbfGdM$pYA z9EP}T4mqxw3QlXaJzTzI-AH+IY&{)`oxk<>yS!a9^X>fU#4>~90?1($Cc`K%$5qmY zVUmgJ&sk{}5?$dTLwyTG$=#rm`F(D>7)knw)7ZcflbcQ=HNjeW3?0k;xljEV?i^XL-!oI0t_0|-R?`{08I|Hp-G zewTIj9WEmtcma2B0Pm3xaCbp^Tt)YnI zVPhSB0lySjo^JK0dSTsyztL9yyMk@%{%GxVXCpPe->#0+qCB;JG)wt_f~{xSIP9Mb zn5>#Zf<`OENNkXRv+~y#Vi|*etFk7Q#G4jZitP9d=f(Z@O^p$9%%SuFTSHefgaczc zR_uE~x*wBw-yYucv^^nLONi6e*Fog~kho1yIvl({Y)F51FiWS&gZ&#=d)2)fhoDjIEDg^C=T<|HP~(ROOe`l#HZHw^Hm*XJUm zFHsW%2>OjM3*U)d> z@2wI)b1*PuNjn=oi(PnLM&LfW9=AqF-P5gf@jsi=^itj!=_(~Z1_;64)tNH=;QQ!> z@i0*S$d;Z#QYZG1*}RE!;<&P*s0p*)3T)jk{R~SjXo$O09wm$P71qhsoG48rZHn6l z``_p8_vkXEGxYU=q<7OJ2D=|(`ApF%&1q@Dv(e-B?Fy4T|0HNO?32^nIN;Pw4ha;* z0b`SP{eI}uxk>Kv-CLjOUcQ|?KC`i!cNez9_y^-gFn&?|QwLs66zj+1@HkbIfHDv1 z)_MAOby7>x-F0)lw>S}7sBP&ufe%l@SX$ues`^JEw&W(c4J!(=z`_s8!=TUUe!5<} zco|D{eHxA-?1X6!Bm;xULnf>zB5gLQM$lj@e&>v!8ac2TW|5n0udMAv=ggg{us$x_4A>2w{iJSZ79M&aDbB|ydE(qvAgp)t`;~TR&D;F z8VbRDxs3#(r6rG=(R|iHH~yyM93sBPLEyNJ08>cd%MSuZ1hWyLV+@o;{Zk+wSK^uI z>OP38&C}$x6$3$w7j{nCV;lMhdKexG6OXG*JkmGqPZpBv?Cf_KE(TKsmjU2y~v*iaTaB@(LjKf3T>cxaWp_w&?Cs6-M z)#lTc{ECLw|1wlvxln#l zpUzG9+3kc6x^Ty*sH|*Um1#AXI5m_+mqGuBLpAszDwX)B^M*905nFg`0FW%{3L*}t z^eNVvA9U)TZ>TKVE8Eh=SkbVng!@ArxrJ0U72!Xr-Vz)opOILSoyaO0yHm6wW-F`{ zI>I8K3khN&ZGT*ND8$7%&ym!Do>OukhThagR6?N4;R-uRxkL}ShVbY|%E86;A)16p z91r~H6>R^3ux&G+9R3X7USD)Pg{<~TOrY0e;N!hYr( zlku3x6$nPm56%rP`vTzS(|-fZGXg^@QS$sOkT z{vr@|L_nqYHl}dgv}RoT?xEh2%MyN*LYvqR0I&2nmP z*hY?!Xrg}_R+iPf@J3G9<@KnG%luq+JSpk8ofY$ELNcw^^q%q6-L#D0T1LvQaMaKM zUhR{1WZjIw@*)G&K#Y+ljBmdT?T)#NUuMC#YpCC;FhPDSRH$O7(#h%mv4|8D9G@fD z|KZ3zXEE4Y=?q?3m11GK}+io=!EZ7tq zbDTuD@!wK~Glc%0w7KtBei8)d<}M0B+XLvs*D*+$X7CjjIKG;%!4enHI{^A^QVuj` zBKF0PpeYxQle33#!Hon#UOSoE>Twmn=Pc&w|Mo3R=~?6$FM|j9az@6SO2zmHr@yB| zJz4>#3Kt`aB)!~ zyC2B(Gj4sKj1-}<3=~4WlYS5jNuiCisz>Ck9f?!V!=H$Ma1Dnpu*RmvauyF z0a;Q(rY$~?SB7cupmz@^9^LGY2L%8P|67ggT6I=|v#inqXC;S1ZQFObnX-cJLg(pa zwpO0p$sQv8ZVjkh-q z-@OIH=Z|=$(JpVgpBzji;=9^WlJgx((^gSQj5kj^QQ^JSgTe?jg^}4n0>)9VI>^!w zTtL!tXDwPy0t5kYlS zQ6+U!C^pR|Rwpv(YmIe_V6RX7(RLhV8~#*`$cY5Q6gQt>7PTDEId}}#IxTI zO0!B5Q27J*Z2;V+r8xY19g_=FBWS4hty3Y{C;xc z#m&$walb6~!l-{lNFj65Tz7)B%ty_x9aS4yyH!z+7_Fd|V3Y7gc{EB8 z#vW&kO$tOODeo*)6b5x3bbjbxtYts$U}je&2@loa;|IqS#uN6>xK-O(ii`?h8Qj?g zA6V+(H|*T{J;-ZfA^S59Qw{m9B{qqVtNUxzX{NTWuKKs<#bjTJ`=wH^Wuox+snAuq z_12fM#j6*$BT&)A`ly2Eu0rj~K0Tw1CJuBDTc(|`k67y|hlI&xfve$JpRs>ii zXoR{2<%^l`W?%k7>q7ki!w;Z|nrurC(Zlm^ilc7u$nXmIBNM>H8-FJbE0c{?&)3u& zSa@sb_tjRK0h>7;MAhhf;pm8KW=QIFe3rYrl#5p z3eIm>_O!<0C|gaxCXF9(&2RZ>_MFxZ>nXR9|Kh8Bpwd7-COENwHJsd9x-cth5JN1{ z0U`ZK4Vqph%j?RltzZE6i_BH)9N>O5q!Sy*n6@!dXD& z+6QbNt6b>K7$VtM3p?t5u zxb)Zq{}dAGdux6P8RTvn_(+DtmB}IKwl&YM#LN=sw1>ez?%LImRM`)@iWA9Nyezs) z9xnDMS$gf$1Ao6|YkAqe1h^i}Y1VXSBI)RIHQF&Meq?xi`F>s+O&o+=Eh_FiVlSHl9C8*asiP2I8>w;`4 zOs|68SOq-CbxZik=(|6vXUIT+&;`${rY-xf{47GrwiG#^YQtWG3s=3j*HXe6C1I(W z#rsg29IKl7K(D)7E{hn3i(USa%P%FokmbrePfzvY9t#E!o)Bk^nR0` zTJhmHm{{$L-2>sY6o4F`HxZRxfj+0wKrC9MGgAf=6&+N(+t8#N}Er& zsLE+9923{Vi7J?-rQB=RosnwimV4o|>T?LNMCQYz@0Qg@}M2rfz_a9@9VIq9>t=CCP(mN5zzGhKLgw()_ke%XkJp{WIRLmHxkyuiS{yxLEst+u);Z26LQT2zWaPk+!B?pw~;T7vJeuOx`_YD0^7~4 z%JZ;T1eCQAQA|D~m?#}nOQcLiWueLm&wrykF=pc9FzsVOd1$NIw&*0*^z`S@{d}b6 z{q8J|dx7g~`E7%eP>{;=JzuMD(pMycl;qae%9&8l#_--qkO19R1Ag#dEebSXJ>7KC z;?ziMYo#;um)+GEg?jr;I;7Zi-NYD!+&(2P3sw2lI{LA>F7Vm(M&~kr^sR+(JHk`V{!59pOcQ0$p9w#BW3OfkxA%m3i zk_-v`6qO$MH{$RK-dJYtT596m@o>_I)Yf{UE{ig|6R`H)*p!jwNb8})B@ zK}!U!6NvNF#NgjvLw(Y;_n$!8U4ML6wU3uysLqvNWan*H33D^1b^cdpddyYYwgq&{-DSOd zeF|d9F<3P_PO8-#kS79b8h8B7Qa5NEh2APx_*$URJ3M!1wqp8=((6+eYg&4x{Y zl|L=JGSpAUzLTMOf3e%f9?AG@*hW_`I{LWemVt2QZ~d{bgKeP(E`F!Yo7%}p zaM&p}ohYLT2W{~^*L@!^mSr{fI0+f_r-jDa=v&PrKkDT_9`*24Jf z>7UB9T_ArGY1x@P!Y0C6uz%3osL2i62MB-XjBYn0bg4rN6KC?9`FaLaOXd7oTR(>+ z{LM1fu*Io||M1JF^rn;=5Qnao6IWLMx9%-EB%sg3Dv@N=w6|K?{8Zv}bU=<*=Pnh- zOTe;Um91Y2G*OsE={x*`kDjNRHX_icxp+0uSRZRi{xBio*4LETpUfyB_Ke2t=wKpNpGdaDABQK4Fz;2VbIV|jn*`)$QBXz1J+8&NA}|4 z1#5n(G_q+a_kjrDR(N^!GRj3kV473>K|T+|d`WmdZ}ut-KAl^S<2mmdf8Si+GB6Ru zW*(tZz3h$NuHL)|PR7P0lkog=y{x@T4(^#JqaGvjAi4mKJT)w?nzd7cOWaxhCXkE) zPJ)2I5(E65%z1F%0!W7MV&*`dGp10JS5)v(s~=X&h(Uhsb6O;b{!73&XU#baxi%4n z#7bdl3w>>VupbWmCDMpKmnu+mO4qZda*@FYAorW&=?jOX5ZLE!?X^2)X_?PK<(cnI z{qLJ+BM4JN%&0*Q@_T_1Qztlc`N^t@I>Xw0*Kas))AD{yVaDxHf_VYi%XvnVbH;>O zOEsnN;~ysQ8D4mr6S1Xj{8?(JyhU@)AN<{G{WOREHEtzG%51~BjqN5hvhVjAzbq*C zQ#Yexh3DW9&d9>h3%z}#zSo27gDHEE77GK%hbTtYdl<7`ZCWb`uB~3YYxz`YEEwtE zic4RS`nwlAeCmBnYL>-nqIizmSk~s}KW-L}lG8A-#`9h$;1YEF8Pd3A!(UXIi*XfC7 zgH0-{1^%dC1x-%gmHw_Al)ghsZB<8gMOu;%NmuojG|8z6v@RlyvG`65^$7anJOjW@DC~>y<7K;R=2M)laO+aXA=w3{CKV&-7c@^qh@g&i9I8(2csh^BLV zL^ViA&YZ~pi-NeHysc?eHB1hZ#+Wkgt)*jOTLI{r&%G{7Pz{-k zVu$-bq~u5o6q@Zvhc~K#IThr!-Ph5On1~_>(c*$3-0XhCh0*+GmeK+~`<11c%St3> z_Lg%0`c=cg4=1oBj!~ti{28bEr(NyV}0=R6l?C}ju zGI>$c1ORWhBy%Z)uQ>>Zzx^+cS&yU74IE-I<-iAQo7-lriY1-L1iZ;Si)C{*B#nj5 zv+8++(k21ZO+dSXVW0GMrf7t|iA(^&AYMBd$pMw6C`G@N0MYHbp@DY&3f1s-fA$}o z-s6ZAr+KYnZ4j9rY_eEjeVh2VNHuC$$~$_p<9s_#Vue7$AtV5=2(CC<3buBz{k6E( zk!806z@j_-ZK*B}RO|LDUKXjGCKp^IiFwQ8R0|@IGQ>A*f_R*MWDH;m*L^WXa4xB8 znKG&}9nj|$n*h?k5FK7gEP;o_Ld=>=>%XnZX0p(=+mO*`;G6exI2L8BPLQ0411J}o zr2t%#N2$Tk)>?|v@yX_Va~Kv(L}NJGBt-mqTN#UZ$smZ~Hbl$RF@owon}T7M8afLb z)}jrNh(uTJ5kdOeF|f-?K2|}7?MC)67=1s+XsJ3q;lTqIO7qCazwyB0BAVd0#?N?O ztp4JDKRxpBAGLg7Q}Ev}X=%hgZB0-{mdSX%IxABIO3>QXi@f}l%NXV z&ZBd4Q5#qjrltWV34Lb81ujR3N*5!;c0QJO9twtX6UO?o!2vZlu!x;IBV;Og(*Pb; zV&H44V-$tSv3Q% z@Vver253J4jd&st9!}vdE=iout=!&56*ZMFPmjh}l>YN%-Q^pSdz+X`z<+Ma3hrE`tui>#$!cxPU!D7)oKF+F`wKTj2zP6wv zYO`dZcN`W9EOL~u3>{-Y^QFPMD+Hv`Y>q=**?qcCxDpl!pfJ*qW~p ztI+ncHDXPWjN#MPc-p9dI}_9T6Bw&9MII@zNO5G~dFGE;D(GgJ(?_z8B*^H*In@{psVF3#$XH4|qAf*^ zjsH_&DVL}jx)xlv58|dE5nLA}j4C20X8tO$`}K714p!nPS=fNY9P02L8e^#aw=g9V zDRO&7#@d$6@YCh5b7(Gc)rA9>A58XHi$2Aavel$h_0adcEj+_9wfiXBSC*Z3 zKKr=1_2kWI`}p{HawXBn^X}L~E+^d^>$lNh4Bouds%;s>dwt5esg$7K9wVsf!tGF} zf{1&-#i{_HCBb&jZSlTRR>6%~*>iE1)5{LdA^P#O6|h(H zr2mTjf_^F#(yzDXJr!e9mY2OoWM3x4qW4ne$IMX{V<`KEa!c&rt5Q1^!_kVO@Tnbr)+^j zdiho0l$nse^9Hcga1CkniYx^p8w=2mO zNA=kLXe?O->nWkET3%H7e$k|%J<^vcQ>8$|_9aHg7l{*=9j{2*KvNE=Jl3;ZzC+h@ zY7u7T+}Vym=~-W@?YlX0 z9HdaYW#RKak>v8Q9T~CZ2`(DfT~Pvd*RJgjdN6i-!r45zx)7{*KOT78JJ&!`qPwMl zK933Sm&ql%uE*qRWDVTF_o<(X9yPo1LWx=~+F(-0kqiql%;JVAP_`J@_`zrK023X* z%9&i9E}1M+yi}0&h=I|_OhEZ$G)Yl;=Eh*2c2Sw+taaA4STHodQHy>ll8d^{y#o-@ z5H5ZqYdttdZIr-a5vHvClZ~-GU-H0}T6)GF$y)>5%U3?vQ5b+C^HrTM6mzUOELtx@!@T24QKI z68Lz(e{k>IGv}F^d+vF3z;gVq9a^4FQydHe3F%k}AWBD}1OcR3hf1JZ-=8!EGiij| z`<2PSuYzAbHn5p4o^`}P1#Gcb#}Fziok&crRkhtV(KO!&MvZ1$O{9GCU9Ndz`Q+Koi zG{s`w1QnQQ@Dw`Y3yx+j>XTkgaI2(RLo!9BYLxq+u8oLr0Q|@`w9}T@P;&R$-qq!N zUI}v{{aVc*mSe^^q1#cIHRq>{*sdDX^=_Gk`U?=&TLBCPB{xXsbCOIB1w$#)= zXsg_49Msuh)rHfxiL>R1MlQ-P8+)+BhohTttEL(hz{>absf39jPkt-IZTcMeyIuP| zsj@>*JAvLT2L;&r1GT_lUk%mM^=u@U2HJ|u;pX^X(Aj2x@W0{M1!jkG`jlfW$Ai7G zg_QIPdzqr+VCGYipq?ehq@BjnD&!o#55%vIJvL^sM0zJTrPo4BaUo_DS)D$ z?W{xhYB+n8eei3dYa>H29AFC z=a`F02kFanIWy%IE;Cvie|7<{FE|EH{=VQJwRLr6T^nu`^G={BmyDNJ4-SCjCH<-L zQW8Aj8{1AONKbnj1s7$gF(vjvrtknH*%{@Aw|#pH=(<$-8}-22(;6yno)71?SRXCl zRp;T+Xa}UQx1&3;x75SzUegEoaHOELEc3zm(`i<4jR;c~3WFaPC4xM47J~(Rf|=|3 z{huC>wQQ|!(py$$Hu?h}=1a`_{Pur0@m0voUZ!NP9iI0Ex+{YFgG*s!I!}6j`a+8G z(3I)Fh%EcJT;(lzujf*uul^Cyc@`C@m+RcJz0bcxHz7C*ReAGBUodc+kje3CO{Ori zsAan=9UMbHFdcX{X-Q8RxJKf2%GCYTn%Wm3nkH7wr?gR*x3|Tsz>p~vD`?JTqb%i% z%;HOcumTweQA$lYc0}YIF6Sz#u683~Oy)Hh#@C;+mX`GG#Q`fJyu{!T8-3obR0y6P z>jGeYs3bN6ds`r|6oNOHb9lpcqT2VBiT&LQh%o9%v`ReR`uUjQ(i5z^zZxjqkl9u( z4&(;h?E)o-+}>Vu!u;VVdBKvRGSEAV;9D4TLqTqm+DxvP=f5>}60VIv3eLA%gQ^w> zji}j<;QHf!;Hd2tiYU8* zT5}N{+=#`30Q|7Na}wU*-)m+;HMwjT8$wwIV{Kl`xcPyuC;Exd@TQ7U=e?)yc}a%fIY`n~dNyi9Vhf<@0wJtQDbz}U! zj&BC!)8fm58GpeHSJeA7Iot?ZG<9`dLY+O2Vm>v-?A-i98P2sr|)X%th^1#c? zgMx@cUY&xf3W(%DwSnnCMu>zQzujX(o%G(zuhT>!^p{vUkX4wgm?cv8_b%YhTa-oN zqv;~2(Fbg#via2|Y~0j(nf9lY(C4vIa9=}e^{_Q^K zj6cj57YqQ1c>O)aDCl{*Dy?}|g8nvXsDqzmn)(B)3~~^8V3G@Gne~y_fz#Nd^?MzC zCFghfE$ABKZn6=9M7u~Rzl3T{;^E>Gnc!lqU|-fz1kBBkHcD!MMSj-cz+A*OZ8A~E zV)@FO<4#D@%ouIyW%sewuGSWvR}-apdb}>?HbpQzgy&DO{MO#f; zJqw{*BPC6Sg!~*>U;X?vw&Yf!6IRu_6P6vP=qmO%^#s5ruWnliW0=ES_~}J3q&rQg zF@Gt-XuDLdUU|(h=NN0YW)!RkWw6QcdEKwSr$7T`6Nb|d1(-!^og{@D%seZ7 z=1@hz=qZx=^Lr`^yPe%ip{pL@smv@EXM_9PhqK%{B0USV7yG2 z76e{FuV-a`lU)ppbvxA>>Gb`{w(T+zKwoOr$mHdfZ_Dy$PhZj&YBPsos+1cfaO*u0 z5J|>CVTgkYqwTiGik)r^&r-6WGT=Lc+#<+a7oIMLC%Iy2R#ACETnuv%n7-3gx$B(0 zNlPFtO4~CNj>$-YZT}Yo{-3-o@|I$FU01^SF%HBf>Ttjeqjw+VgR8R2(?VHm5rm4E z?|8njK8Q6Zp(1^(*5w^AuF-x}FHJkQg=8dLAPbRfko%lq9L@uPC;daw0jn1yL%X}e|P=t#1w3Ey*4 zUObJ^k_87|Ru+4=416_J@>_W)zF-)@ql**M+2h(A8%9CvmxwO3-@D|P_48A;JgcFo3?zzFJWv!^)P!QAzV4Ryws93)gq)G zOnSzx`|+hc$p8{_pTk6yZ`!W1&`R5Zz^ZM|_c6vvPXHV%Rk>ONUb19(-}Q+>=7-`;E`=tOxX<|>wJQQw2pchUARpanV$@p9*y@! z+!i5zBbv%2FhH0t`Smur6uGbO$^|FFv}o4Ro5w$&-*=?fiAUc`p$`)_QKXgyRJw1k5z{H-wHsT^1t zpSQHBg|Q@EW1v$P%r`OXQ?1Rx8mc)*WK))x?og+) z>=s*l2UZC_0HO=g*P%Y2?b^$Qd7lD_`o=yW(?>gJvcRp~sRtEg6M`)hCMV_W>s`Cu zyAF$~Na?rQl8ON(*aWhE=?lPMfY6hwV#+}#iga*vkeU~yejW0GW z21>DpsY+yVgM}Og&41GySi8L0U+jWkxsrFd%a-13pE@RLCsWrbW=|*3;bM-ud+l!v zrkc)^cRmV|S-<}9uAocRHFQ4{%{-Jz{k5f@0Fe}a=Kp90I9EYC3O_^^gHTseZ6jeJ zvaotww6p)YxY^Nyl2S{-S!?D%oMVSoJZCCJJVQjs#tffu)v6kNyKsIpdJM0JrZl_5 zzD3=xl=)v+72#r^c0=C!!&8QsXVuJPMVZtYi#}m&&H{0=cfA+c6a+=E!-}ndti??y z_v*Y7vL9QgH78-(wKW^HD?~_Y-yq5PiE&v!!hsGSqUo(D9V6Z;d?B#Ly)RU!gG*t= zRHLNK`@(DKMtAZwruXEgcxMWw$r0yyyRGIH68Ap+yG0#vcHq~6kFGw`b8)Pk@^t&F zfQAXYQUb*SfA{^$9ZAp=AgLpYron2_y;7fy_M3I{kNT^?VW6vA#|H&rZge?b zaz<30&o^{T5+;zjGzq-`_Vr;OdQ_&wQ1o;VG6FKiADGc>hp{r=B- zwy=hf%L&WptCa$(Oh`lizqcm!v?}{}VTQ)%Pp}Q=)!+~zSm-Oio+$pnQx*#vhwL_p z>_OY)B1~GwK8`Qk>gvBRDTOKx+1bsSCex8&o|&o%67yWJD6Nf!YtwGu_xw3Wi?Hd| z6>V0BN~aDCmP$9VCv!@JjU6!GWp7CmKm7d?bxQK*u~aB{>9D=O_ahV7QuMh`Xe;pA z#-W1V0sG_r%h%mhNijKqJIlM!Lhg;g^_6;ah&Lc>UjAlKR|y~9XUP369WRv(LGx>AUYXp(16C_p)Q z!M(mE50x75osp-R6Hh9!>R+Pv6Y)C!Q*ABr`rBo1k1yo527c?|_A5_S(Ai``|GS5& zr|Ag${Kx3hJvNNHyr|enNP`28Fhl5SKm`}28Rr3{Y8LeJnAuN$wKy|<-l2GY1h~6? z6AO2-IT?O=y#a7^$}p60jjJ_aOe?J3jjb4F_K zE)w7D#+1rj&Rq zSLSHZg^0uTezTyr%R3&as6TZa_hmTim`dnmi(P|3E+ z4zV))gVl~aJ#KiogE#P$_y9Lz-HZ$m3H1qPT z@C&>1BC$fz|6!;9bv>TzWg`AivGRHA0a=z3#$IuvX} zo=u{-ek98z?90kT>^my>TN3h5y$6@Dhf7>?sPplglg>{!#b?sZJ}#RDzXLO@bxFIu z8r_wnFIfm{DK$^NaO;b#oZWV4T8;p=5CDpL7F3xCcSVdt!Qv#3<~kpULJ7j7`(zXH zj;Q_p10I`8VfnIdH>+P?7y*_XCB*sxBfo|-b~Q<6HJ8^V{#B4!OGD{LmL!57_H@Ch z>kb-J)Ons&YS43`+*`h=77`W{IOy%8*Z15gF$$n<|(u#L*aS(Cf z;=h<;AnIfCC5V1-sXD0nuDSjBDYBq$K4^bup)x3NXYU$tM_3FaI7A0Dcziyk8tUC} zX#K16B-oN@j>P-PdHyL+R?>J?Kt&;G z@&M`K9iz2O?p3@Rxr%1&w(wDWg;f@pOrEFvi*dDy^7CMWP~lyZ{#`1h80d3G*!VxDh7- z2bAWm0&yuxDZOsZ{?f8_G1>F~)e+TJJu~bHh8*!zWzR^=ghQStsyFa)T>|xDyuaA; zs>F{W(S&YS(*D$$of$5iLakGR;yLctIkC~*TPu_9hG4DF-W$9K1EB(cO}sU;22~~*6HH6m?}VxpnuBH+xdSzFLI4~vnjp?{?h;0;$7xG z>3JDe;_X-rIvtLb#jd*eJjQeUuzfnPIyqcswXXwC#(ujpRL2SE3($auNQn9ryI-h9 zF2Z|P*VLRh8jA9*t_uX;bM2ZKzDiE{Bw|{876fFnHErD`Oqsm|S9%8)gZ%FGK$+v? zl~&0MoVbBw!$F#wggBM1aCg$U_CoO zGK`UDQM_J$<$xriw!oIYGhet<;N!bBHMvX(Hv|HLmuoRm|{Cu7ru`DAp zaIIG>asMFvkS+yM561Q(QGD6%z_l_?+9!~ZR=QS#@{rmSyjPV|Dxr|CsVp~pSvdtT z<`uC0Mt;cd^Z(ve3`?T14>{K3IyHj4QTzGSNXYZdvX1Di+U?|K-@2Wm#<|aj%9z=X z-@1|4)h5TtUGg2}zw^?rHM=!c`+`_ws5xexKtzqsOwhwwKkwG@cN_UW$h~gv`>GfV zPYuM69PGVkNrmiap}?!Z8RADG%;LuH*yc3L!Rug??wcB)1W>_jm^YOhXjHgR5Ztes zLsVNreV>@^(g9;J&7ewAUh@gj~BoX!=#X4&&>W7(*bTOi1j?Uw0Rg&H7an0P1k(f>7D5{{j_xW0Ts8 z>uc3HXF~pwUL8a_F}UB_zFjc&15QhspU1945*LaM*z4#=qZoS-$AC8fBf^HQAN7V* z0qL=BSn-@J|5vNHXIrKq{l1T#)hNx|9ce>@Z%Sv z5jQ-$i~rJ#V)FqGdUmgT#g=5jhp~Ts*B^RLXQzIGuF6); zHOVCm>!lSeUz@uYEy(wm`fk^90jP2nASLwQqrE1LMYwr`8VHp}P_7fxejTP6ixd9B zPgZ4}!CS)Lk_!Q4O~oa+ zyjCx-`7v~JtWXOz_xxVPTzatkwe&Y91Ca568bkzd!O{3%ijuFyzO1*18wywr4FI;< zW(HVzuxr1$SPY&fQ;R)hAJ|2?x>k>KDb_2v>-~aaC7y?pVAld`fe}AN(>jA{*wF;h zYk}%SPU5uvYbQSC&TB+|dI6h1CNNiKb0@e|>mgS6MycYIA`27MdQYx5NlPzuHB_E% zm{e9*2BC@YHB>ziz1Od&YgzvDJ{;Y2D>L8k?Xu$6w3@}f+21=@t$|A+Cs*t#0kRFE zm7x&H_s(ozL_88LIfQm$Q+03aC_E<3>J2(cP{rV;D28R8B=%=^3;o&q(mXtiW zpC69QV7Se~^*7kZ6q#sMG5Y?=Jy;(0*QelDK|0TIb*RZ~?m`CHho-Tmq$O+%^rWKC zDNsjh28GXH_XZ}i&g;|ZDCaeOZuc(D%d(=Pf*s?2M_Kig%I|q)tZ_C3%I`Iz!aVAFlT=3-zMvA!%E{t)9Le5h|CyPr_E(fwaE>TuBc^K;QrS zZz8?x>dB;UN6_8O_izgwjQ5U$xnVE3qdKg33LHGrq1tZKsL$Ftx3S70VO;MvVgxzr z)WCx*Ae=o1TUkaea!iA6>o*JzZi}{LZ}Q}R)i!~~fTn&TdL69Y9UC~q6mia1_yV0= zWD=wwNL2_I@JJ8StohmgIbOaik~6v~L*$;mM-E9iP?aGAcs=N+)GSj-rW-|`9OT_ZW_J_3G;xhwZFD@VpVdfQ=`o_dvpC_n=_eF#6bplCbJ^q01baE zL`s6V1^7WGvaKJw)@iVK!Ki&z9fv@^el{-wkqwgO1DnZd5^8<9{T8=j zhEp&Ghl_U!J}hda|HS7BxE$>JW;Z-3XNC4iCP<5hDglFAG6KGRy^5 zzD@mV{Dc;Yvy&>?g`5Okh3*^NB#swIcIfQi z--kwNLwdvSlZR0&?eK3TIt&f!Wg&wM{0i`+m9>T8bt(MmUl;~zk5)1y38D-~~ zq&Um(Kj?))a+6);l#_Zo)fA}g^4Rr{{FlB!8ZUS(P`4xwS=H#RG>OM0g3@QJH6Y$e zvsD5cH66av&$%&9;WRt%Zeu+tL^NSVi9ZO0DqyEvm9F=@xdbCyHA3`DfJoap3PCAn z8ll=M(1yRZq>$jyIe}vFO6QV&$GpH2NWtEfGo7Joj-BU|gYA??=pexG?Y1hDcdk~nN*Gsn))qgC%a#s~s zGH`}NsWEqbBU|P<|K50;xBQfXiUM-YK2HS?7lve+<6LWgOA4`FCnrB(NwSRCCLYlo zf}3g}pSGh_tw;>gO(GU7fb5brM+BDG~M?{yMD_#Z5}-%%t*ByHp;q~nnM|kxmIP- zE}v?AK5;~1_@_u`a(8je1Ac$E#OY)!sqbOU-nHLw{x8lJU_@&j^xu3!qHJ&q_Fw}2 zt|;DorF|c)(dLu=#|IO8<+rJinNFjpwj?3_JAN{be6$!XOdkFSx|=c#oAqaJ4w4j3 zyr|d`R3~cjf}MNI!tb-TjkhT^OcWCk_=ni~K7{|EQv)3Ctjb?o2WjrqCj17Vical; zuc%}N$m4L3P3>NHHE+@?^L8F$aNA>)oz-HzSxN1RWfD|m*s(@Bg$Et5&^*6emM(B- zT*$_&6IS6Sm?B%!A(ez+LB3aCpD?UQwSDDbyavY(ON+iK6qm5ko?^SR)sh1~^;&SF zabHzZoqFbse|43dJ*fRYQ~&+p+qLVA95hfZ<_a?}218pAVb^gWRDez^(yUvvRoPO9 zHA@MDm3>rcGnv`NIyB5W%L2dkWQyisWrDMJyM>q9aDK@h@r{)9YpCXg1!$4a7A(e$ zBWz4K+ik$-{Nza)%FtHqDMw4mmHgj_-xQ!~I!X)hN7oodVGvfyn^?`IeEcCnmPcAO zZXU@9Z4sszi2|^Z>aE21w_?EviZ4Byx3(sV-M#D$ z0vdJya5HxuY0DD@1z%`?$popKu9m+?L#PR2tp3$?b<_5$j`&&p>Gf0a)?62(wkQSh zd#Wfr&fY?xAe~$%sZId;2U&GyWQ@G9ccu`*!|1k3+!6x4ACGU1{-G|N6ZI&n_bL(3 zduvi@WC+US8Qb(&y~WX#aPP)|{DK+n-MAb5V`^9&bc%3$d^OndkS(G%VlI9>kI)8ia5! zc4R#@Y0kee==DqWl2^(~zCK1`ORbAk53p&CT5&xHr6;oZQ>Hx#0&q)oGY+=v{^u z$ssQV8>+B>FkytiqzOdi$HHqd+TzW-M=4PF5`*BAQSXqbsjvKHWP_7u&SP&pn-bas z3fECNA7pQO@>d52d<#YiEJ=-!4Zj2zISKjBB~TuT*s2wkQ#lKn{XbY3d%d@GAIlIM zFVnB#)$)sE8mNRcuVp}6;baz91xYLXeHOV5VSt%ZGly&)tI}R#3MSU<)4;2-AI@=e zmdSs~K9jnx(#J7C%cq%+3@d?^F#gQ|?>yb7gYmoJG`5CYQ~`6x^11UmzxcZHwUpE; zDe$2{5jux5)#+(Yv8}FmMX%-HMj#ZRzywwrT|gh^S>X*_i&?<&I)%_@!3gH6 z&yoA;CMVg;54cREV)xH;1;|D(w?jA~uTPp`vFUyX85*YMx}T+~^`3-He1Cltf#7&nv}%5(R7&J#af z{7V{z)SirO{7rp>`2J7$x>KmAX|y8h`a4WfqJL_cFTS$MWO`3zYyyB_{j)#x)>}n{ z0|K86-zt4Ky$X9<-Lioy%9(`sU?&Xi_gYXbXxWz8!_?28b*3jL-`o3c@~br@=+;;iW{V-Y9p91x4VK&Alut2|HXmilOz9>hqWZ!+M{q;%a86qmy1sF zmG1zW?-~k0iofBUZFuy$l~Sj9GoCUg31o?%B+77g0|><5@Bs7{D8baPR9{Y+B&j=4 z$!fKP`wtsD?!Y@(CnXbO58Nt$a{N}9`csO?GpE#<*LC!UTy-B!5ff-CNJvQfgPGa5TEs2p} zQY6VNa3^8)a|VLAPC*OA{MVI48L914o_}YmTEUpTZH7#8PJU{Wx~O#*OVa=%`jkl{jt z4*z=7-0xjd3WSxS|Db6p7c5WWak&iTb8Z3AgbAZF;iFnR7o<+akVFTr*M|W}c#4l? zJ2~aRI65lxPh)-s94#xw^j{Oa_>EjT+n_Ux^pN;4;Qw<-nP3MbpeBwvp zea#@Dri>3u=!wGW+E_>|(}L0=V;Kc#=<;eVntuD$TA7=?k=`5LTyASC&ntewi8W-C zn`~ytNA_`)VdQYM`7L#)cpP8VDxQCQMEMUb7DmXmSP^;C#~;<=OjCSvali4ZYk0Yl z@ILT$w>XVqz8>FM_4JC+R6k>eZ}8?$WK7l&uM;xLC?J}h^$}WTVD`xk%NYEsx{P%r zlMWzZ8|INkqW-x{u7Ds;oU*tqGuhh_wdU=-P9v~!qAL5}rn)z$K+v$65uw6coMZ`Q zv=cSvxq29coT6g@yKP=QfAi0qhffjYO*V1xLb-S*^>;oU_KEB8Ldvf|Bs%`6!L7TM{*lAm2=|T8fP36u>w&m~fqr7| zeNNIBma)x!7fxL3qG&iwrS7QzrSt{>>BSUB3L#~c{4vj0sgpL2hn>NJTm33J;hS!N zEh6-fX|zSLKMg@<9V?QG(2qM`^53MEGxFxf*k+sfwgONfB=+v1pwOrUp;Jq-M29OZh~V zK#+&32Y==uB@I%fTTuBzdCeSu|E57!g$@zXe!IT0G>_+G`R!?~tqjvq6kujR%b6@d zEIM&;THwf*Bc;?JD10wXvQ4G-?Cw#vM$T${yTDZKXAD?C%gYbJIo=qSMpiI;Z~#O8 z-T|5v8x;0mteC^~V>l>qfb+pjmh+G)$xj^p5j-W9@-I+|itbHP=@%^M#*g13w9_&l zxEXWMxIWNQLafWz%BB}!PzZ5Ik%h^f4m^HgL zOg!5Am9n;R{}-# z^I0Vu&&lFGcDF)QnNB{2;ye7xAm2})iri@|31~`SSAfxTRVZO6|K;&MeYQV8)VA!< zllks%4dG&a`O*yw&-iRE()!{hO}64}vE=D{eZnXoRcs(zRMc2lT3G0XLmH?Mb2!sR`qAUM z&^#N!#jg9V;L0?_+`@=SBakBQ%m%JcZA6!e)!~6NO(t8V4`NpYkP@Np`VHd|%|IeK z^VQPh#Z^ud4Y%d*k?p#WCz(^_#7}yD?i|<&s>)LVoG08_!tkosN+*N0J^NKC4a9> z##!v}Wrg6!MSx`uM>ajK7w%n?6B?EMe6|Mg zP_N~#cWxkeGWE+^Lh))qw>YZxIlU53S_~auBX8?#{p5n#LsQT(UiEy>7VM$gZJUW$GN_dNePEBoL-#qYUs@DZ%% z{u%4Sb)^d4Z{hZH^hl%a_ihQOqsi6!RQI2?-#YzvJFQc0+(rlWnuEImOCFWb_qvs% zf2)b0`mKft(Bj-omO!JT4)*ti=?h;c>CJ(kb;2mI#jzdFxf{tbN2_pV-Ja|E!B@A@ z{Wo>)Vnc=B#Z(6zl8PMrfFKgyv)A~?@qzmCR~_#Pk6jAar?bsYROs-OqtUnh6mQDq z(VKVD8t$_?EyTpb&qm5~5al|ep>MnI$ykYW$Hfl9RVTcDJj17Zo|uTiZ%K>Eo-;sr zpbJ#{G@UmXqoNMI>oxP1I&-eM1?N;S|7f>KPzm|3huyxBe2VJ}m+}=;`;e_@w)D^2 z(Tb4oXYg0xW9R1=MstvO&zWv-$Jv{Dsq4w7QQP+s!b)0z@OOw$sommxAGVUvfGNm5+kv+}MxgmRTAZ^HF$eHIbUmQw!V7g=7Cdej^;`pjSMebId zNBZj{##1BJhx)tljP!8oL z%=UE0mBqgKwfnK=6PqskxREg-PK)z{vWvAl+?>z51~w0Nyv#{df%sYf26~lVbY}iV zs^UG|0b$~5o@x%_(fa!D^yl(N1Z=`F`AB<5+eY0@;!DJpIu$G-Q{Ms#EjI(Jq)+_sy@x#krSGFY%|*}wnWf0(zNtYAZvMORffE>#ux&cx%@>Ik>Zn|H(%szQaYjhC5Jq~ zBTWItX`{Qo_<5z@8kur|l)MoU5>z=8Ac2iY@f~J|yV711M4O2_?1C|5pPTk)@o;W4 zH#S$1=eY^2gf#f=V+($)*dTzL!P%9mxxAv0k4zq4j*t0sf{c=HL6)U6!--k|QM*B_sjSIH1~!Z{;LkW)cYI7nH1JLErcs z$vdO}7qIj}T;v!V6xegxJUXp_UsJ8~wsBt-B`*jx&faV7?XjFPGm-;_0QFcxfE-+6 z+QeQ)3T{! z^ep9A<(av>D7D{7%|~fI|BG|-o=_ly1BHlI%*(}s<{1q(!U~4JJv(HrmSvbp|4;cq z_--h0j!ZMcS_LCX-Cu^aaohIs`SqAi#IrEG`a zY*c)k5NQ*~5ny6grIkawj9;`z9_xA}rK2=vI$N(iArm$VU`h|mr3Vp1$u|Z$ZG>H3 zZB18F&b7xWvU+__{DeJW1a-+7r*9uAQ-o#^|CLdso<0#2efpC)^q zmgUEOBXgUNb?&FTPT$@ut7EB@5YJk8-;Ic3%G1+pE?}zDAK?5pwq(|xddh!}XGg9j z@rI@k*hT)70xL`Z0>D8ZnrY{}-#Kfb?|dvbPK-xDli4P(9idk}^8FDs5PKHaq(%)D zST58}X=YG3*Hl7KQllPVEd>6BJBJ7LCy@N$;nenV$9hWP9((6zNkKBq4{mWBy?Bns zA)jCkI{hek8qjKT^sqx=cMl_AruncYt$vskT+h)=bMkv3OkYD^-&}MJ9}{Jv=)zc? zn+#`Xs{W#bDa)(P@vrrCgBKpp<`3JU@l{)~O*MI&aoT-sd9qCc4IJ85f>2;sVpNOR zVOsI;YId?FuY4*GXuz~7*CkSvoF^I-X=)p3~ZTVFtxFsuMt=q6XtE=C>;1{+X zQw!30S3W!MyUJ|Tj5Li`RaZN&co@`xIf*1xyGHah2#Amkr)($J!saW{o6-_YPJUDk z8?L#)CXHzqK9A?cr0mlDhXDyPL=PvktIup7(DV1lOP(oq#S+b5dS`Y?EB`>qzS%p4 zmDhe*cRw-qi_pa!x%#)>?!E>4_5~;~{e043L#u#y?MTuRo+a^)RCBp0e1kMZZg*|*V3XrGc`1L~+@TEyg z^e6P}DDdKoKeMQPdy${#&5y`b?OmpvhD|g=i@V8ZB&ID4m)Ey{ZeqAFiykpvM`Dmn zV$mc#>n?o%uv%m|DO+7v)Bgq+yGE7jw^%q3I`EPa#}|uBa39AxveX$ki>GVoN_~ic z8KKOm$j|0?h!KAUqCkDo!b5JB5Fi3bv9Q9lkC+|s=TNvps@K1UoC}rrRI@M3Q(kk_ z58RM_{zyD+xEn?yXoo5858*oEV0bTnL^aDno}YELhHb^-gxJxtX({8l3A!m6JeW40 z{7u5I0a!Zsp(qp19BlF0Ci2}xqk25a>CUL0>e19$s?R=#D;{{QcPPZ=iis99r}E|( zk5SC0vzQ!k4f?J99cYEopI*Z32lRs~r@7A7-8UpN{fp~r>VloR$tkgdH!@0S3hVif zzWs|5{yvC9a^Jcr4hS`+^+khp6vqAye1+|gADup3woRvhWeU(=G78u<`X^iN?Zyd}9nlX(F8RpvY~nH`SIpcnY~XVWV8x3Z9tA`~u6F z3Z3+NPh0YMY#s`m_}qF}A?%%|n%jrq$-O!1rIXB+owO8NR6H>JA|mz0 z9YZ-;<~9^Pa$lg4m{{l7jFT~@UVu0R6Ooq|!I~Gx>ghU95}i<1REzvkobyI)wLH3q z!2bH*j~)G|uMeLb7bV=Ae7PQ@+*hH7Bth{Tg8W0yxm~>CyRz36QE{W|?=wwupfh0N z>(fHrBowJOJAdUn$ne+L;GvtNql{^f4g2p6mVYU-+t&P_Rc}{Z+R{-R*=@Rc&HMM< zvpRT}`b#I0&r*waj~u9#U`gU8wbkR*e<7&5yp=J|hPHoZOn6mRe%cD-r4zl>>ZTde z((W>Ov(AmVVEkJ2OwBmjVUGd=?%LmKfCV^+0E6NshxZ+};{4}U9IK9~J!6E|XUm6S zg-Fbb_o{`x+nf9VgCJGD@i0JJp}1gGu{ok+rmZm2EB=}i`m99@GSrzAXsljxuuycx zCKHf>An_9(N~zEV4^4|7MaMRmi5~+P026=hQg3EXR%n;C`X0_Dc~wEp@7yof7alSi zK0<%A>W6plZ=4<6js<*a`3 zZ@Ffha_p~nt2RA%mY8cIuElo)x&IJ+E0Y)nuef+eF)jRN-MIOEyfAB8?ryttvv@So zRmE=k5&H2sWGe8gb*OKVU+5MRF*1u)(peP{e)$5UzvBw|_j@$ShuRutHL6k+#8tbbCnehOCeez8i_i5zGMtfE>KyJY*2hR2KKs%o$`I3l{S%2*6><1e5z7}%rZ4+4)`5rv zYI?+}xR9YxVA?Pb)~qB?%%{qG9^S@lN7Of~cAwMdo9F3n2?vfynDN3S_x+{CsD~8* z-2`Lcgf+cklxNp8qK{XW@6XAHO^byD_f^s?4~~2LDL#dkIZ-hR77J#Z9^aFyL)E;9 zE<-~){kYjaey9N=8;3E`4fue5gp%;`JtE$a8 zkQX7R`nq6E8!+hfWdj+zpeAt1Mqj@b5i9lgnCeY1E73MCr1NWIOSY4-J*vS@TCJK+ zczBN}qeEurVE_EQ6AzDuDOX$Lhlicf4qxQiz8{?ta?^nQ&&}43%mw0RQliZVlo(d1 zoC~1MFgj8bdC}=+es55l?*XJE!%&#gekMm+Mgx%bsLs z9JPc5Gw#E=KF9DCCV@oqtadKW$-&tjinfG=d<(RX3JQvfoTMbo3X?(zjO*u>TrNVPydtt9QDKjktXXJDL3PHB${mBG1g3c!+C8z@Q2Zo zE}I9o3VGBL5JxDd!H{ZK1yfGJhjwS4hCQtq)>-)1T?GQbGo4y zQ3VS*b+DEz6G2y-71QvKR?>7hn~gnpJx|46A}}X`4Ro#1=A4@NQJhhgs%X|?Imp-4 zE5#gj8B-AvDeH0Xw@bgGd?=}$BK@Slm-0g@!!qN?v7=6!ng1z|w5UZu>Gjb}Q1c!u zC3F+t5ogr}<2v;AtjGBd^1)-OBzNqDSFl?{$Q`j&7I)N|&n$cPxctwz`ru8M zg*A?ii#k#t+*G0U_NLzcU5P`_z_x~6EAb?KeJA}n&e+e&6za8C1#<9M^2Xm5g(Ig^ zj-o_lLq-2mFx(=F%h9jx$1-{^mTA59I&)G-ftU0nco7HTFe^}CpD%DrS96x|^I_iI zR*+-^Xv_GJC8}7Z@VWkpv=!@Qb@UT*YNcdo>5J{09c7Vu+M*!(m@-E)kT**YaW9R~ zl4;gO8-<CPie2K z)e|x!rOj=a)q3gH`W!6qT^ogH{uPnzQ{r|J#X%2+-6Cyc@k+nXsP zH;$xf)F`TNQg?~+?(H`A5V73=PeG5sNBQ&1x$OC8+n49=hJP24IO)$f+w`+|wHf4U z8&NN;j(;!G+6PRtZw3;Y&o-ZrQ5vbI~2P6=aIDW}zmFC_~*XDh>O2sqcjdB?S!- z$?!E#F(}ltv1(_eAGdr|5ThU-k=tu#E@dOq970O4qE{xXYyauQ+oW~0@=o2AHzUTV zNqx=}%r16}T`>=b%(jd7Y0hs$L-2Nk={Y?b;t)?f9@yNyU_=OIn#iqVh7;E9-uRS% zX(2Gh^_zR$wR|p;y2b8?H5t0v(5wqO>EWra_w3vTuh6G2{M93Z+o9olssO*Y;`wN8 z9CoNdE!GxL)vA$Abx|k9wIz{0Vow(I*$LnS3GHip$6H2kc#0h@Eo9ehI*IEqw02tl zQ1-~~O(57U;-&V~?rI_exdrRITWs_G9{}V)8^3MOew4|MlQc~c_|H1pt7s1?wJck! zh!sOS{2)j2u3a5a96NxknNymjX}y-h*0+Kk`Swq##gY8B;>Wws;V>T8^THS>uxSVC*pt8NG``L$f`wRdo>!hrEHA*&gE}Fx%KO zJM8nK(Tl#)@-EDN@z&{0w!d_&s}(kzmX_{RL>p%?!lm4>O^x92dZZO{H@ z3jbL~dll{K_M8x0D(2d?Pj+l+>U-zy+izP5g5g<~#0i?isJv(Y@sHns|DXTqZS?Y! z1ZwT8p|+y^?%nY{hYRQMJ+PPoNB_COTv!mloxf4|q+aith!Spjqm>Gg%1wP>)QL1H}UoVB=2b~(IKp_v$N@=3wHL2 z#txJD0+QL(_4NZ(O%?6cvr)?ywGRlriT3lb&vcz;_*XywE8o6I(j4)Q3if~fmw$Qd z{eS-Z_f+4$j?sg}W~}}0>cXxkTMwyJKX(o-wU5LeC^G0lY31gRz_HgQMewa1?I*Sc zWnOH5CE%;DyhE>i_rKMhsVDY!wL_S_vT8VVvWR8>+R@H-1BHD=MYJ?^A+8r>{y+B4 zJgTXyZS;Ge`4B)vAOZ<+jvxY}-22Y+NEp6(2;y2VB2-NmSbCAtS3#Ql8BvyjiXl)% z@5uj4z4K`MjS`a3fwJ&>4eh1KkPwK3Bx1+=oDf1loGYd^zdu-e?{jj_euw`0tY<%G z?|h|zBWjPR{qZYg{KAdCd-u~j98tW-sOvi3Npnj<`xpOBr~6-DeF0{d_!GWmZ!`9* zy?)$dsh1bzH7<$0-S&sK0~In4gPb0fHalisV6_86%8s1GwSoKh^5h$?b}8XrGnn+F z@BbL~7JxMS2BXL?xVJLqm)W^lX(W0^Cr76$Z?Gd#<XQ+8+Yp?w{+2qL~+4?TC#lDBb>}Rh2S-@*U||)Q#uHm64j=Tl4efP`tR& zY}=K$w)*kUf4ehpOD0AyC|I_}?7-^{DZ=y3$wbs{hC1v?zu5atCdXR2&!rn2E4O+N zOq-vd+9^eDK<#Oi^cgBkNNH^S@d`=p%`6M~7>T?8>ys~5R#I8*@9KCC)D8r_K4|{v z_tEqqdlbn~IhuUGeHLLyR4p`;f zcs6Y>{a34#F~8cCyFHCUFDy*zJ!S{D*KTIe3VcaRlNuqUZ)wr+kDx4lNlT0BOcZ)@ zB5I$NiG2%_(Ou)B%)VCbPO5KiMeXy{fzjuAHfm$TN?kifEP;SCtp{~FW7QbT1l0Z& zETgV)M-&(?b8Fz2kcfGE(EN^H?7%+-_K*n&!e8wG-j-9E|HBZq%kwUO@I1KjlbZJS z_F<@Q*?6)_SCz>8#m`DhPvg}w;+UzI`$kQ(Zk@2Dm(Uc zRJAr?^u$EeK1Cza=YmE*i8#xH)oyvdY7ZX8q1YrK6i_=JyUhA9IZ#JQ)A)AUosT~HIOz3{KmN$p7*YFUBc?*e{kwk_PV>YR4^Yy6URa7uvcjE!q{OylmV^|)NXT5(42n2WP?K|7>Y3fA=SaHN%Q=Eif<*r83F!8(jXFnzKE)u|Rh2?l=a$&=5(l1&GC> zGW7t-l!o2J2ibEDIGOg#EiB70LKwg=n&e&B|eXv!n-rH5ZSB=&oqISlfp3y();T8;> z1y0}?m@>~U>gT6?&@f8>!=-) zG4Gh$!R#Swk7DPYPOs>1Mw^^YC4ue(Pti2-;EDRIPQbKa7$evDwVqgWyR@@1{moH3 zKyA(`-T9qA{?qU0B;DB9iuSAR?bkfL+7%Y-2itdS+0g38oq2yQh3o~ed@4q^d5m4o z$JB1yeB`y){!I*yU(U84k*%wCRqOeRs=Y5&cU{nzwRM-DJyFqhTmv=C6{pHiBcP|v zc(?^2#2-4ZJFa>|*H#Q_H=b|q1pRlMZRZGBq4V8mn^aZZUCkJ+M?~!l zcGf$WF)(|8v$!nM_c=EBYaQ2HZT@9clc1kpSnv?jQ(>cal9bN0Rug{BaF`S_MNts` z1l%4dlDT`=@Al6`L1f7gwS(0mU=N7Bu{I=Xmy%MGZt01fWmH;uK=(mqvD;_RgHSCO z8cmwqYn9C!?DLDnH(l)r3yC{&vw!sC(x1G)k*lXkgM-7$0qrUVe91%Yg{d1Cxo_H; z^XGr@>!0Q3>`o(51Vuye8&v7A^X)P^d3m5QW?i*69XFmjs_Ln%Y;D<-@Le$>VVeDvDojh25sio4`b-b*&_k?a=QBj+*Lxt8Q zqIQ#0=C+7F^NQN%IiD#bp!WU+k<6T!7$2W^gp1k@6h(Iu;<00TX_zAo^5iWUEW>=Z zoMit50Jnes$BZb7EKP-|9lSo+Ug{6nuO^46{S2IAZL?E>wu0waO_1Lj0UHQ=qUEQa7gdU{! zV~34Zr;i;yY&=m^X*^fNBkoYe!B-%3Z{^XtMATkC>(gjdCi6IFnVhE#lW=?-H|?Cz zo0imW5-9uJ+?;c2GHldNVpX$B*3r=-Wk z)c(;s0j~qKyHXB>9(#>sPAC9n5yIn86J%l#v?z@rF8flgku`z%2~`Xn(cu+3vGnRD zX$;_-WCFv9B8Mb8LG2Dt^a1;nbfkuVC4dlx6lL+^|d;`ywE zZ5tD{=a=T>WNj8O0wN@an*DFKuzmkFSmoXjcfVZwsfvn=9fvf{ZBiDd{JiesX;Ayo zfT8Qkb%(Y0CS;1=s(>s^d3iU)?x6ObD8v_?tFEl7hMU(XvW!}9_NwrN%Ur}+%qBr* za#{NryK9>B%s~*}H#arG`5dl(`;n@*h}i*ZS7Jm`?tl5kirPQ> z8tHEBT1W{eaxORoKw0>DUirdgoKcN!G52@ z%H+Ra5q+rIb!|lmVW?|=Q>`vJcKoIDx>K#7_QO?g0LPm{)UG*UtT?vxp?UY|5Vare zGL}nspS@7Gq;}MFw)@mU<4byUA0kov_5MD>ZkE(;oApsLw~TT-%nPD^+%58bb7rsD zIqvS0nZl_;CJINfoq7U7a+a0JOB=J&G0!pewFN)@BB1t9@6aqmV!zv^sJ*nbG5Aet zR=wx;ty`THNh`O?VxrPC7rsoz~BMvq*=PtD$=Iw0K5kGnI>)iu@)@7aMA^xNmC!jEhd*) zYCGYzjD6PZe}#vKgpqU7-}X^qqIMv8NB!2C_Q9>MKFhSs`!~Kb)ZRX%Skyj{UJop> z*2v?Yso2xDz*OGORib4gC>vHjKur;4Ng<{$J%E-u)1f>3mKtnrtUjb^ee-ll)6t6R zvqe?9b7dM$Q&CH2h}xy%$4gC6{0PgbTGb_8`AJ@X0unOK<+`dmji#hnQxZ@+_ezD) z*xgD*)V?0B^w@ql%sIB-X12t5Z8nERCde#vHrVQLQ8?g@wM@ZkV2mKlcwOd&n0Z_H z6hE>QD+nw_@MMNkkxnx-&82|BcmLqCKmPRY-4E_yOfu2$-ogaUg4%=RWG5=eLF{YQ zj%08mo|Qmj*6X$f7NMCb5a}!g*>hr4A6y!ph2!uX9^b+!95AJbQTwKB*C(ax4mZh8 zfv@4eOE-@Im3K+)h?Ne$7S!<9W$ib^WAz>rJjCgZrloUOozii9b`sPs9ltjTukf;Z z{{<&J)~hEVH#TZrn1w`937+YP>;CDCPpUb?<>=kbyZht4s{t%oG#wmc1RVXcGTOZ>w2SgZ(Dcwakc(}uHw?^y$5yY zo1reItFo-DvaPHspmqXmuY*=Z?d#${{&UnU{ec$zoty=O0Jh25N!z3}28x3w+&T#t z!b9yC6M{@e69nNeM2Fy=CNPT4Yp6{WAi2)yc?8_K`^l#)%TPhJ=UhvOyDfQr9kuI0 zu@07)n1!_l5^-LTwr|Yff*6~Eni-c*7>6rfIvO-?=LiA9_t}%8beO40kkoE=dhIeC z8nq*p)DW5+le2bKu!7)O=?gI1JWY&AV;W%ru4^QcX=8-e9~%vJ^kQN3AV z)ai7_^M?p^S-J82xh~_Wlena^!dP8xtk@65kJUl7qvBI=FYrbQ;-xlJvqRL*H}C1z zbyve9N-DdTgbt3^UFt*;wLc>^UhSBm1eqwZ1Wr$lCNmkDr5T392*<`71V-u-cv5g)xTX=HYtJ(lRRI z-TusclT4=JxM|n*$qY&D{V}wvDCU3$es|r=kd(!I-718Q+6%I$wLs5=GG(}C@Y>gF zci2!sK2lSY>@_U*Y9AGtO2V%8p=(L0Uu)>TIq~Yq%^|tMzl@uZy9EV^fN?@#*|b4K zQ4|XZwE?v&$&mXrhfb7VEUzk4Bcv~@g64^1B!c4<_FcTV??RLMt&Wahbvh<`E88xf z>gwKC%pv_N`!1GOzPYEW7paS2_~MDT^qLEMDoZ4_>kn0Q9X%6K`?p5z21=BP9L5w& zMJr_)TVp6JW!h zT6H>tYNrwIb6hu1tJN|K+&l$upOF$WHkZ}w86(j+yg`}{#Qy!6VWak4pmsmKi*DuB zo9%f|*M@4^M^dK8d$f;5&!R_}RdW-jvhC|CiAgMksSMZqW|pRsQksGy z1W9WQm^?N;J^cWX))3hGF&c+jNUhQKv~>E1h)!tg{Y|cEo$!g`H{3%Oa}(o?ke zqOq+PA$3WQRu7jYgX>Frdo(=a^_otQbQIBayrQj+kEs3IqV}A)nPLr(0I)WYu_RTV zV_EUi`6?|Zhyiu{b)-p(U2oPE1c; zpY4NZPe^JPSE*gItacGrO`T(e$6+&1x<;>us9o)Y>Ue2vMeR|N+AXxZKXAc3gBXj$ zpDmm7oSB#kPA9C?ZV0VXQH;P=yMtFR-+2H1r&~Au!IfcnQq5Mwfc6Q1skrM$Zo-D{ z$=g1By5Rl84>u)d*Rb zq6_8S`!y({_HT{am9JFxEXiC*m$4L&BrriSqHv66_(=hpIgS5tF^{e(eS6PwP>Ao#ui5S5;PYx1A&-YX24`Wa^HV5SZs^ z){qd*F*JuU!BI3#@l@IV-)St)J{!Xru#H~^P?~>bHW=IH6oBBsuE|p@Yg0n;rHLp)5KM1Avd$?>N8+fbe{|G?)qkkt?9omXQTw-O8I=c*(+o{< z(QyJIrL~)~qZ-NLNZq8N*=PZ-VXS)phCtICPqL}&t373n+6k}!xT$FZ+7#&AVTsQ4 zSnVt=Woir=G5xktyM92biHQ~jla#5+G|Wj~oZhSU+39JE6+&+>i$Luzi)_Lg%(Z(n zL<5Y?kHJ_#?XKy}7?bU~Bl>^~DrjUgCMnm>I7esg0_y?I8(65BnVrwb^elK~d>EzeKa|7ND|}Dw^JpwOmseSz(vnyEitRXbo5)OUy)gDoMd}**k zhGbZgAu(tj!v#fBeaG1rsQDC<1Xnu*u^LWOn|Ok z4#%v`6-#6|Z8q23EV$Rrvr>(_!_N5N`s|#`W}EVH_p06Inw*6D7BYD>Fbg;KJIs(Y zmm$FfhMzL@xumfv7@N)T*R{i&%uv_Pj7>p%&f(9sb9S@M<+RShYi0aIzx3Lm79eL_n%5c?f1AnbxqvI53%Zq&DAvjNt=|sSHC)mzLEoG7NhkweN;!DG0QJ z-U>=xV3uCz1K9$j3j*Ri;8$SELLmZ5-9hA?v}H;fvpJ?|v_|b-|IJ`xy8usDTV|nS zhNqd+5tKDTN2#B8NjJ=a&SNFDJ0_)DtQox8;BUDg;E%*W7FeZr0Qcf?byx9IUAc4A+3w74F3z?NeF(AyWarubv9bB|AG? znn~zX1`87{6N|m~pGxoA3YiE56Iaj>V)lmZF%)`U@M>;-REG|?>O=YjMqxoj?L^wn zVDTe?p-<2ZfeD6HG&UTG1EZ4|JkK&DQBnk188uelx;@6SF@|VneYMxa#sm3js|vJ= z6$)i~V1yi#Hg-`=&{%3)yiTXGkAiXf*n-n)P$Q~O2@OjohY)8`C=L)P0Czwkp*%K5 zVFc*My-w5gxKg1gLQ$O_uXUj~iasEX^(o zc4ao~CX+0p)kH-_g_dVw9PF)AwneP;q6oa|AplzQ)4&Li@$vv(8VkJ{dVyaA6;7fM z;7T~D-SDX8h`l`ni!&2$x5wu*06ve$Jux%0IMCCp-DrBGsf@cl9+(N>^TAYLCXYOo zgnPb5jevQAHKb$}jugt{V`+?@7esSMweC?l-^!tIAfk36E({AAajYOx&o#&1^oCLQ=$tC%Q5X_kSdO*DSyqP)H%JkjA1w)V=@-h z4&<&<`)#*!D?CRs`%_IhqIUUCrS`=mD>n{>B@y_t3f*I1tm;JJKt$~bi<`GgWipLB zuhyr@C?21=b?Xe~SP}G225telOEvF2l8|lpNA3A5YQOEVB<%JJKWF>qr<-v^?Fo4b z?F+}i_S`fSmPFu>KAzfN(x9*)qIQJo*aUx4rZH#N&$h&6NM~*Epmr7-XbFFp3-l-@ z++CY24=kh3#3~cE=jZx$pPiDE z_K4a6LWoP+1#gF(U{53$wU#pAyIqBTR~o_GKN{y9#K1$DPz^al0#C6 zN!eA~;HS3sNPb~*{I=K(fn_-y8_zNnc?S+zy5;|#iDPJ5l*uxrYM3KCkK_c7&}%IC z|Lnb6OdIFE_y2olIQTJyMF{7ZlMCe(LaTk*<}Ub<`|&|4HjaZw%C<*h?fp-TPm?;> zo_ZLtA=uckbJ2ZKX)C#ilE2iepsiQh-7hQ64MJ$k?L~#?VWq3pV$+JtcVDPink)-y^?&r)Zsr`tl z9hWz+4%XF;wVcxK?9xk?wYSs{?4EQgZYJ7$ZfLNfuDSkQ%}nc+8S!M)nNwoNa?4nA zb6s86pj2vqPIp5~XDNX|QTx8B>aE*mPa40T8z=(>rPi*fegDKYkC#LydsfyOt`JXm z@U5tBJb&q5_MlI$xt#l#J)dRYN{$oD45n2wO4cxZn7s0O>ZM8 zD5+gh`@TVBS>?kenOt2bdhrPmrP}t^n&zgVfph2n{LiA~W&TC}5s^wYfBy5i?xCjI zv#pid5)6ezQM>lt^IPmmqq(mcjxI`SzdSk`b&EfvZuj$ndv`Dz6@T2L;_vLCrglZ` zWJg=|*uASenZ2%^fLB0>DC?*^HKVDmt7~X%Y`WJp*fn@>@LpqMV|`uS+0)fkZ52Dc zkq8vE7nJg*o*DLpv0UHx4jfq&wcoP2(lJlWla}#_!#xKgp<9QQ+7-1URaskg?O;!C zWi=<7{qd`2M;%p_Dug6yBTTAFOUtz#ogEdO+D>hSw!Bv|uZIJMuq z)CLL#15=4af~Qcy)IM#*`QbVwwJT~Tv{y9E4_9(44{KXm?=3C$G{2hJi9ZdFY4bl< zS5bRmDLLDx$+9PljlsSOP$&!ppY>(}BLS;x+^9l@Qaj|wp{QL^J1T3_)DQJcK7QC- z-KJHMnn`g6m4KHaT6*d`;E+(%PMq#-UOs8PkMH%6pil^~ZaiR)&dv@G&(2%I8U6m{ zjuo{ZZMCDa%9dKm>qE6Om0J0K@b;D33N;-xHFx9T8y`rmsgFR7f*Iudy zh5bXsqBlk6W|QG?*c_GP`z^CSe*99qqIN`_Y^4X7d2^$N@9;rr?YfI=hM5w^S8v$VLfrYQoEvdRH7B@>)pAWSYNO1%xga7 z@N4ag+Q~D0W9*4ybEvxl6!r_j*rXkIg*1(y-UwzaqT7cTS#uM*Arbs#! z2SD_A#FL_c)a&(WJp};q6qEHvygCF4J!9~&m|$a(7%RTZ8rTW(TRh`&nG%4EOwj8} zX9*QEHJb60#jbXQ%dU*wTe~{>xJ%R8Uape7uBg4})DEZn8dL1aVSQk*6AFRS_&Dq( z;3ZTpF}B+nR6MC2GY-zi+0qCF?b5)DbPfw=`G}YQ(}WWQE*hc{BXAaOoR|+H#4rmvwK_K5 zq0tP>+Al}2J{gUqcpqoe)8poFP#w5zPx&&VqY+~E=T7VKuqTX^{S~-=9@Q1P4VJM z?ew%+PY`;O&M1t~1nHR{rv-rp89F4S0D?(Dr%#UPfQSrx5m2sVj10^h5a?}TA7F3P zL%5O#417TROX;McXlw0=EN!cKxH37}^KQ+RG6eesA989})DCrh&FqOG)jd!J`$d7a zi(3rX33wT`(IK`(rm5md?R+HZNHa8{_PYgZCbKx}rG;=1Wa#N|7Jv#zje5??12sJ_ zeapaQ_`tj$fDs%tV6Qtuxhw_@ajzv-ylN*(wUwHNm8IOu;F&5ddUe}fr65y$YOm_+ z+GbA*zg~K`1NMu8ZbVG&p}o}Z6xAN1iYK)rIvq0G7X9&wS&Ku$|BBi}ZgxlQPPw5* zZ~iT{Cp}bgs+}mSs1m(CnOnP8JJUgejH33US9@9a`Cr+S!q}xc684J%FBN3Mi9HH3 zoLG<P5BN`~=~TF7mS4ae6qzBb}8SCtgsyNm6?< zMj_+0z!2$$aa>Gl$EEGnV-2fUdxjd$R(F;_p(t-{ovF=rwY0WgA>pu4)c#Ii-ASp) zZ1wb4fWp4P-i4btFGmP?3Enz)^Olf8M_uiVV0MkiLdi7N1-OhMCI~bi;YLj3BM#q- zYM-~8ybkv`memeEhs8CKvDuC4ms7jj88ZZu)&v^2*iDIIQ9Dsu(JJ+?=gv3ORCSi1 zg6=50w$>vS?$_38RB%WrYKP8&p-uKcc(S>Nksp&rwF0fEB4L8-9l z@URcPbX#u`mn;S7Bfn&c@=pn7ArwmhkqQWcu;?d{FA_GJ9bP_RwbIYjzA){unO(e^ zorB&DaS4~weuBd_+d`u;~*_8@rw<4-=hQ@ink&o|kpY}{#FX5Tj3 zO_%-z1wbJY5GykC5gz^H-KjaT9^<8s{(kKw&G_|3DNqtR#-R5R0ElP(`XB{Zr^CC` zsD8$f<)zoRqZq92g;xTD5(7wai;-`iGb%I$|d@KE)-N3!+z}uWA#fT_3o|b9Wn^3U;5S_ zu69`r1V&H)#vir~mNz%IlZUBxQd`x$Dy~cFu5alqS0Q)?GDfU{m?-N!;jPJ}D*<~_ z*1ROKQV9o3MbpxM{n!7Kz!h`sByD_LeG<+Xa1%b+GEslIc15Sc4i?Du#0<*nPXgDzqE3ulT;sy+7VfL zW$fYArKO$*O~*f$*RkI=JUTk-NMXnizgtMNzwt78s#^~2CGh(oETP_q+W+&5!{PrF zgWAg*E>#~)Y8TbcF5kGgeD~wJ3pdVgG~c+ewf*UX4{l%I+PHJ=?uR!%TVKBa$p@d_ zSbuW&LG9fe8utBb7e2qCS#G@l;oWNmcl(ouzE&s*3Md;(N9aw?bV&}(Cra((-4s$ET#R-6*+Fq65JhL%&J*Act`N?EvA zFdN~fjcPz@>3#V*ic?trfZ_y}@2J%vfjm(!M)C`U`q>-s46XdH{G&=cZ^(#53^W0G zgDHFz)c!wfMWA+6)7!)z1pmLJcJ`ACPcF8s|N7~LWvL+ZBLd`0@ql>7oA%Qfd`!wakxeDkKm=KNk|G<3M#K@%dOarbN*GgF@A$66R5NQ! zlg(wY2Z+G%6nbRXVzY6{=~UisM?sHKN$vSk`Ck9lfu;7;{R`}`AKtxtV{lupwLiLe z_wKd#dkFoSw91#imGsbH10Rd{9 z9`<6Sr1tz!)nBSTs9HO__3``bx9>Ec{kNUezVYC*GiR>X$!f2DQ1k1JyJH_*`zLn2 z=ECg<8qM|FpMU=OMfQoN@%H_iLf2yk2dWSh2n9s?L+-hoFEFeO2uv!@8=j?ucz+%Ev!=aeo z#07Y@-(uwffMV>Ut`zSVoZ~^S;K=g!VY}B)!SmQY)U4gRmd9n{yJ|NxlI*8BJv}ku z31rnegDa3RBmlGC36C{nFnXreH)_P|=d#)#U4O)GU4LKvC>LbTT$^EMuK&~4h3e(4f4k87@rT>nn;YAow}|h4 z#uk=Gy^UHZ913wZBFx{~6}vm+r4byK+DqH2iiDZ2C5e^#>I(8L6=arHCLcent!~q* z&~FQ9VG1OIf}q<~J27SPQ>5M&P))hVF&G^-AI@0D5lFZ!2H-Q?I01abl1|v842Bob zo<2pwUZkSdm()%K77R?7Gl%tjL~xD><}?vcC&Ttd&Sn=D9hMOzVYN6z)0{9K%m}xH zj89Q}o~YjK)f_~v{n~>EAAdZP-FR^0;-}X>+upo?r+V}HC;xH(UmG{>h_#m+w>M_4 z-MMh%U(asddC+|EYoEXW;KKIp`|n@8_90tX8v8W}6bcAVFp+6)es*|x)@>e1 zvs8ZiR-r-`WQq+WPou8X%~Yt~+BE9Y(2m!U8h%gh2t>YlQSBSK)QEAsX<}3H)5`*yl8-^fsF}#j|FI-sgAP zg7L^`fHg)&qb4I`xA}>{Y=#YbxPZ~T0%YYCgO1*{>|C`Q$U?Lf`SO%$QZoow8@{Mc?@}RwL|q5n;##alJffR!`d9O zfKuiV|6J`hcSyvUQORmo>$yPIVx?YGyWkV=GiIx-_CJL1IKt-3(Qr6J@2EYRF_~OW zcZS#70y=GMTiw5^8C)WKah<1^`kEb}C}=0t7iH0aTjvifT^*@Gj1P zUrX&uYDZ`L>YuR3nXQ3yr$C{wAWTpRpV4Ua>3GrZC_lA}Gl;924&sESb|AE+=m?&& zggvtX3c$F?F^IQ?c;H2~qrid*0}du&NA1Wo?-uwM)K0jfUP7w4n-Y7ey<@FsNK;t~ zyC6cPVxobBibOkV=S)VUQAYsyCAH(Jr)qcdlG@QLYPT4$lGUPK4z(IIzW+_dP< zB!F66aOpG2C`wm{P3LI zATzhqA~QEUA9ZHNg9!p;h?&~DJ-D|dlN;1@l1C@CD{9Adz{XidEnXfF6XrOl!~& zy_s`|UH+X@{Z-99x!hy%_pyArDqXJTa=E1)ax0orCAA+NwF6S63!xC_~#ua-4RfGv7~m;*7w!0$Ck~m{#H;Z z1Vk`4y>NM_o&DCF*!MW<77;rjFVEfFxjD>@jME5!I8}4@>J0Qu|rjZFk9PJ6uMxq|P_ZNQ~T-q+%y=SPtrnRC3Fu!ZML^@-H z9ayWE2f)G1v?G)WBEk@00BEa=fVkhv5CL~Ud=T-eAz=+q1(m+`{6T-}={~lxJ*DYw zEC+=GpfXHHcN{)E$_b%>)fGvn)1FAiI^r-}W}nt(I4eT|V3HuD-<=B~Uog=suf(jc z?!*T$jfy&|YpV%(6BM<}5Iz(_eDlJF{u z+6#a$?Fk}yGl;VG*0F}QT+eFbnW_%*b-yi5qwbT&4`9g>F8@=*%H&e+pwzCY9rKiWKVL7a zs2XcnS<0<7)X1&<`GYgzpt4{4n?f}7)h!>-(B9osjbPs>;BkwzOS>h*B;x2dUZH4% zeO}7-j?~_-bhMSL)CaK9Kv`?U<4I|Hwz;C?2%{}2R<#!ZHBqXqthpy{8Q(ovbEO19 zz98C1D5?F8sq8;LlRb{1y}3`Rv=;(obj*p)4o~n1eh0PAlay*Qt_17_LI<%uSV>jG z)ukPBJx7_D5*U!i00$DBz0!sTq`IPD74QxSqy&w z@+ZZkQ^P6r_V#Npndy3__Q~84W^%GtTMTMXcw98R9uzM^VaNkqdTQ)pZgNRn%~V-N zq61dDlG>qjwdc|Ci|yMDmugF3|0wVovCWUbAz^MXrA4 z=<=wcNYw6IbOa$ENIC&B$m0lwLe>eIfP9E+XKII5#GaY@>NagDQd0YYLZ0rscl=^| zOV5B76be9ML_8!f%)=j}PH|#ICPuy0vr$VbHC>OFj`lQa+1PPYI~($V3=y~-3M@Kp zRwK&SHx**P`${f1P_Jq0P$4C?A0QQ7y&CqIvC-6b8Wakq_Wb^DYBY<->3LLdFSW~% zm704iN4oj3{K%>uC3HF_h}Be3&*;!owG%$Q-bVm0C2=*civ#vCdX`rMq2qOoK1kuj zgxgNXK_ng+){{JI`kS<%P%yQJDEMR0p{QM?v|Ur*@&;;GQoHISt2LZa>Acf| zU}dqacEVs6gn(b=52OHiugggJN5VqLLj!B|Si|N;Kb;N@TLM`SAw;zUpp+xbs@KZlDkl}S9}wmBeGS{}abolQfU;Vt2-IGv zJ!Eicdj~$kslEBAs+|wdIy2*mm@pEtChZAH?Z|I-e3S`?QhrMYLohVrW5VH&BGWc+ z+_1n+xvaLu)cDA(`J>=-wF93?`Uy5HxIUUT$5hB`4tSN+fhxjH zApJrBWQb>KPkMEnjRv4H7Jt?~MFPzI!^Vze558$_0rUl|*k zT$=1@&~%g_CAAlp%JTy)DfW1<-q6>elw*|Co)1c`T~>S2>euV_3@xjD)Tx&+5{VFJ z;35R}CLQ{e$+CyqcT;;Jz%jn0GXa2IQG;GqyPCjiBH_21-7X&f0HVCA`JuQ#p}W4i zqLf5RYA=s!^{95 zNTvWWwmsEON~s<5|q;~buP3>wd1ACy<+QDcy$Bf2tt5H%rA8;>jEm@faV53?1G-RH z2SE58wne+m?xO>XL6i+yLW|)LgMpZ`m@r^}m#Zr`=)p@Q@b>W4^OmwENZEP<@Md@*Yz9+6%I9 zssG$D*aC8StgovR6bgp=Sg4&5IHyw(M)YxIHR_8{29rvPQ~gJo$hc`D;q#^dFl#a*Lhs6?Km_?cBI7J2jcIQt6Jb;UL@JWWcxePWlgsKF zr`3=@@zVBcaW&Iq&%=66m6lZ0zJE$;FAZ*Gi@xu%zgsEC6cY8(U$T@3dW%EvH(8U5 zLG@vz_9I)60Zf1dit{`n*R9k5K!_jEP900WAd083_z1}*EiB!{6px?&0fLaEGo^)hk^?Rpe`{{j2Z3`+8^a?RVdPC6B)T?z^AAdiw0U zzZ~~zR1wsA?%c0MJ%w7|H>iRlL|L+A_L%Zx@lDi@WVNe}i=3bExUDR+XfwpE^J$|Y z98MX{v#ykXIvI-_g`_DsA%q!Zy(}2AnPi5iZ8p6zA`XmUn<412SQAQWe`_F;*UpR$ z<(4Mb8frR9N;Hp`&R3JKtM>1%RQ#{3@2~&f@q;Y+zx?uz_WPfu(NF)b-Tv_eq;@Dd z+uyyJE#fKEex;+mI8*!0TbJ|X=FK-t?O}t#WFHOqXu+n#@d=wtx0noK%8+4=W~U0U zo*SVf^DzXJN0hvEb^?Gg+kL_0B9G}vI^h|O5rl8V!78c!t%7h_YwfCZ@?L#Q!_v}H zS0(Y9YX2)+_REgizbF5`qxK&#&|iwN{i|O{C>ICEJ++Hy>-q+_igyaNr(4a4SRfRteO z(T-%m8%z+Sb$%+7u?jXnQd0Zd1tC@YO!Mm1++=P^&4m}bEXWA4N7XCn;*Sgpwuo;qaxPnWN>OhU?phEr!)F9W=HC2Y7g6k0e4DD z_(CSJt9IVVv*S~i3>|P!up&m4Qfog95JD9VOFOJ;y7sGfa$`gLCE}-+cBPzsuIB$Gc?fwxzfm7i{V?=ruHwMY#>}xN6Q03~< z4!PB}{i;2_`AGZy*I&0+{Lp!F)K0W^_tzAi+p}9;eNClsKqzW=I~*YqhxGH?gYazO z)PBo0p(2dag;YBVP78XKCpzslWTyS}h{a_{hr^7~JZ~M3IXF|?IL)~X5h0XCyJ}~+ zaLn)GtUA^qK8x91Iz{b=16iG0y4th$u%WhQY-ryFnT=o8EB^Ll>E@S`+K>5?rCmx| z28z$^8;!jeDnQ|&YV9}YZq13m^3U$zm3wnH3#azEX&(Se7f9_)I0FEbDeU1B5wlGY zGDadbVi9akFK;v_LxRm5(E(+6#=j|Cm{`<-BRzq zzx%fO#b-)tKV)88?fefvfBWsXfBE`*;`{H!>$kGn|F%vLGhctHJwd5m1dDFh6p!22 zn=W;?gTldpNPWbWH!&jt1!{hr}F%{`8Ft86tF>5c>Y|r^H50m2?IL5`?V`-|L#e- z%;w+zzB5?<>o3}^@4oo5V*A^#eypfC5gQFWr%cn+UsD|Q_N~U=F2(FcjZv7pnLkl4 zR?bFM)SfS2ef`sa|0MI(cR%g$m!E(7>(^g=B@Vv$_NSk|KHl4dy+WBdm)iWK7|cE> znjJwAqd=L=!?$kc$K0qj0m|8^irVw$zfSm39IsGP{pbF=b@r$;^IqSDb^?kT1>Rr{ zh4)5SxNHf(z_f>kVngX`FC5BI90j22TwlWxcYF3x_oc25D0UREE|^Vam-qN(0;AT{ zbENLY|Hs}PgD8##U>tu*qT`@uooterBylyW$$Lpm`aVti^2)20yTZQM9=5WWV)`DY zhuz}f3JKRN$M%SDU?(D$w{r(q?Si|f&qTi8Ho`LPpO5#VsRyJTX|F0lnNh3Kuhcub z0X6Q(4TIKCzx;Z(n&UtQX~%)g7gE<^dAX`?*%x8R;TORGX~#7x($3);m7n{eQOw<{ zJ}9#@qTm5#N7}L4k#@Wr^?UgajkRqhkJ*(d^e!a%XgKrUCfzPn`3rB+OgX4TzfT1-6;#5RaKshF(c~uBB?Dv+Aj}j$8+tc z@HNV&`^SphpUwvYsVTsL4C&H8F7GYUj?5Bejv2@TN^#)k&fYn|(Ba;@h))y|*Xn?}?u*L>r?+PQzyL~oP&1FUwyYOkRCk(<)c^tSJ~dE75@ z7Mk_{k`my#cEDGbDodKUGa6bRuQT`cB99{T)kq}u2UzWZ=h`bu5TubC6wwDRZ|T_B z38j|O6i7`0&f1Z7q`fxo)P%9jJ6^QzPph3C7c{N^moEUM)&0ag$0)S z*KYfxc|UIUrJaT>p7$RnA_2IOw39~X=ktMHj;}27Bl?PJ#-EiPoNx|az2h;ZU$5Vg!Yw2pwc@h;gGd!g&6M%1} z9RO+PPY~qhsO$EVqF^)%Qe)vA)~jzeeY@$Ynw5nSWd+UjUdL~U1ORCVJp5QiV)M;d zPlJduRxlc7sb$!z>v?6lJd|bWx$Z(Ur?Ywd^8bv+S>GA7TLb`U2Yh9z;v~P*Q~xP= zki-#Xl>LXk^gK+0EVXp!L1|;F1JVvy?KL8Z%~3}g^d3}aVcVLf>$;}duN~DL_mu9? z$9*Xv?SR!@V}c|}vfOAiWVt1YC^{hR@Z)L!{W^ZT%xVWf+5y_;$M;wASwY$XkaoC) z=2*KHb%L}5{M}AnR3Q)r003NVS&Zt+ClC8 z!#OZ8-#n-ts88)nc%Z2x;I6t(!UJ&x00000000000000000000000000000000000 n0000000000000000PvHpcVFuk;7*Vv00000NkvXXu0mjfew@iP literal 0 HcmV?d00001 diff --git a/docs/modules/retrieval/vector_stores/integrations/memory.md b/docs/modules/retrieval/vector_stores/integrations/memory.md index 58f3aacb..7acb37cf 100644 --- a/docs/modules/retrieval/vector_stores/integrations/memory.md +++ b/docs/modules/retrieval/vector_stores/integrations/memory.md @@ -1,8 +1,8 @@ # MemoryVectorStore -`MemoryVectorStore` is an in-memory, ephemeral vector store that stores -embeddings in-memory and does an exact, linear search for the most similar -embeddings. The default similarity metric is cosine similarity. +`MemoryVectorStore` is an in-memory, ephemeral vector store that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity. + +This class is useful for testing and prototyping, but it is not recommended for production use cases. See other vector store integrations for production use cases. ```dart const filePath = './test/chains/assets/state_of_the_union.txt'; @@ -30,7 +30,7 @@ final docSearch = await MemoryVectorStore.fromDocuments( ); final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + defaultOptions: ChatOpenAIOptions(temperature: 0), ); final qaChain = OpenAIQAWithSourcesChain(llm: llm); final docPrompt = PromptTemplate.fromTemplate( diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md new file mode 100644 index 00000000..af4bb6c6 --- /dev/null +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -0,0 +1,258 @@ +# ObjectBox + +Vector store for the [ObjectBox](https://objectbox.io/) on-device database. + +ObjectBox features: +- Embedded Database that runs inside your application without latency +- Vector search based is state-of-the-art HNSW algorithm that scales very well with growing data volume +- HNSW is tightly integrated within ObjectBox's internal database. Vector Search doesn’t just run “on top of database persistence” +- With this deep integration ObjectBox does not need to keep all vectors in memory +- Multi-layered caching: if a vector is not in-memory, ObjectBox fetches it from disk +- Not just a vector database: you can store any data in ObjectBox, not just vectors. You won’t need a second database +- Low minimum hardware requirements: e.g. an old Raspberry Pi comfortably runs ObjectBox smoothly +- Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB) +- Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers + +- [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) +- [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0) +- [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter) + +## Setup + +LangChain.dart offers two classes for working with ObjectBox: +- `ObjectBoxVectorStore`: This vector stores creates a `Store` with an `ObjectBoxDocument` entity that persists LangChain `Document`s along with their embeddings. +- `BaseObjectBoxVectorStore`: If you need more control over the entity (e.g. if you need to persist custom fields), you can use this class instead. + +### 1. Add ObjectBox to your project + +See the [ObjectBox documentation](https://docs.objectbox.io/getting-started) to learn how to add ObjectBox to your project. + +Note that the integration differs depending on whether you are building a Flutter application or a pure Dart application. + +### 2. Add the LangChain.dart Community package + +Add the `langchain_community` package to your `pubspec.yaml` file. + +```yaml +dependencies: + langchain: {version} + langchain_community: {version} +``` + +### 3. Instantiate the ObjectBox vector store + +```dart +final embeddings = OllamaEmbeddings(model: 'mxbai-embed-large:335m'); +final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1024, +); +``` + +The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the `mxbai-embed-large:335m` model, which has 1024 dimensions. + +The `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored: + +```dart +final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1024, + directory: 'path/to/db', +); +``` + +## Usage + +### Storing vectors + +```dart +final res = await vectorStore.addDocuments( + documents: [ + Document( + pageContent: 'The cat sat on the mat', + metadata: {'cat': 'animal'}, + ), + Document( + pageContent: 'The dog chased the ball.', + metadata: {'cat': 'animal'}, + ), + ], +); +``` + +### Querying vectors + +```dart +final res = await vectorStore.similaritySearch( + query: 'Where is the cat?', + config: const ObjectBoxSimilaritySearch(k: 1), +); +``` + +You can change the minimum similarity score threshold by setting the `scoreThreshold` parameter in the `ObjectBoxSimilaritySearch` config object. + +#### Filtering + +You can use the `ObjectBoxSimilaritySearch` class to pass ObjectBox-specific filtering options. + +`ObjectBoxVectorStore` supports filtering queries by id, content or metadata using ObjectBox's `Condition`. You can define the filter condition in the `ObjectBoxSimilaritySearch.filterCondition` parameter. Use the `ObjectBoxDocumentProps` class to reference the entity fields to use in the query. + +For example: +```dart +final res = await vectorStore.similaritySearch( + query: 'What should I feed my cat?', + config: ObjectBoxSimilaritySearch( + k: 5, + scoreThreshold: 0.8, + filterCondition: ObjectBoxDocumentProps.id.equals('my-id') + .or(ObjectBoxDocumentProps.metadata.contains('some-text')), + ), +); +``` + +### Deleting vectors + +To delete documents, you can use the `delete` method passing the ids of the documents you want to delete. + +```dart +await vectorStore.delete(ids: ['9999']); +``` + +## Example: Building a Fully Local RAG App with ObjectBox and Ollama + +This example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions. + +![RAG Pipeline](img/objectbox.png) + +#### Prerequisites + +Before running the example, make sure you have the following: + +- Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions). +- [mxbai-embed-large:335m](https://ollama.com/library/mxbai-embed-large:335m) and [`llama3:8b`](https://ollama.com/library/llama3:8b) models downloaded. + +#### Steps + +**Step 1: Retrieving and Storing Documents** + +1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader. +2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`. +3. Create embeddings from the document chunks using the `mxbai-embed-large:335m` embeddings model via `OllamaEmbeddings`. +4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`. + +> Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries. + +**Step 2: Constructing the RAG Pipeline** + +1. Set up a retrieval pipeline that takes a user question as input and retrieves the most relevant documents from the ObjectBox vector store. +2. Format the retrieved documents into a single string containing the source, title, and content of each document. +3. Pass the formatted string to the Llama 3 model to generate a response to the user question. + +```dart +// 1. Instantiate vector store +final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), + dimensions: 1024, +); + +// 2. Load documents +const loader = WebBaseLoader([ + 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', + 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', + 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', + 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', +]); +final List docs = await loader.load(); + +// 3. Split docs into chunks +const splitter = RecursiveCharacterTextSplitter( + chunkSize: 500, + chunkOverlap: 0, +); +final List chunkedDocs = await splitter.invoke(docs); + +// 4. Add documents to vector store +await vectorStore.addDocuments(documents: chunkedDocs); + +// 5. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, + ''' +You are an assistant for question-answering tasks. + +Use the following pieces of retrieved context to answer the user question. + +Context: +{context} + +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Cite the source you used to answer the question. + +Example: +""" +One sentence [1]. Another sentence [2]. + +Sources: +[1] https://example.com/1 +[2] https://example.com/2 +""" +''' + ), + (ChatMessageType.human, '{question}'), +]); + +// 6. Define the model to use and the vector store retriever +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions(model: 'llama3:8b'), +); +final retriever = vectorStore.asRetriever(); + +// 7. Create a Runnable that combines the retrieved documents into a single formatted string +final docCombiner = Runnable.mapInput, String>((docs) { + return docs.map((d) => ''' +Source: ${d.metadata['source']} +Title: ${d.metadata['title']} +Content: ${d.pageContent} +--- +''').join('\n'); +}); + +// 8. Define the RAG pipeline +final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), +}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); + +// 9. Run the pipeline +final stream = chain.stream( + 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', +); +await stream.forEach(stdout.write); +// According to the sources provided, ObjectBox Vector Search uses the HNSW +// (Hierarchical Navigable Small World) algorithm [1]. +// +// And yes, you can use it in Flutter apps. The article specifically mentions +// that ObjectBox 4.0 introduces an on-device vector database for the +// Dart/Flutter platform [2]. +// +// Sources: +// [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ +// [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ +``` + +## Advance + +### BaseObjectBoxVectorStore + +If you need more control over the entity (e.g. if you need to persist custom fields), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. + +`BaseObjectBoxVectorStore` requires the following parameters: +- `embeddings`: The embeddings model to use. +- `box`: The ObjectBox `Box` instance to use. +- `createEntity`: A function that creates an entity from the given data. +- `createDocument`: A function that creates a LangChain's `Document` from the given entity. +- `getIdProperty`: A function that returns the ID property of the entity. +- `getEmbeddingProperty`: A function that returns the embedding property of the entity. + +You can check how `ObjectBoxVectorStore` is implemented to see how to use `BaseObjectBoxVectorStore`. diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index b3733733..ca9c5503 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -129,6 +129,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" flutter: dependency: "direct main" description: flutter @@ -303,6 +311,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" openai_dart: dependency: "direct overridden" description: diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart new file mode 100644 index 00000000..4a8950b7 --- /dev/null +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -0,0 +1,108 @@ +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + await _rag(); +} + +Future _rag() async { + // 1. Instantiate vector store + final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), + dimensions: 1024, + directory: 'bin/modules/retrieval/vector_stores/integrations', + ); + + // 2. Load documents + const loader = WebBaseLoader([ + 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', + 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', + 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', + 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', + ]); + final List docs = await loader.load(); + + // 3. Split docs into chunks + const splitter = RecursiveCharacterTextSplitter( + chunkSize: 500, + chunkOverlap: 0, + ); + final List chunkedDocs = await splitter.invoke(docs); + + // 4. Add documents to vector store + await vectorStore.addDocuments(documents: chunkedDocs); + + // 5. Construct a RAG prompt template + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + ''' +You are an assistant for question-answering tasks. + +Use the following pieces of retrieved context to answer the user question. + +Context: +{context} + +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Cite the source you used to answer the question. + +Example: +""" +One sentence [1]. Another sentence [2]. + +Sources: +[1] https://example.com/1 +[2] https://example.com/2 +""" +''' + ), + (ChatMessageType.human, '{question}'), + ]); + + // 6. Define the model to use and the vector store retriever + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions(model: 'llama3'), + ); + final retriever = vectorStore.asRetriever(); + + // 7. Create a Runnable that combines the retrieved documents into a single string + final docCombiner = Runnable.mapInput, String>((docs) { + return docs + .map( + (final d) => ''' +Source: ${d.metadata['source']} +Title: ${d.metadata['title']} +Content: ${d.pageContent} +--- +''', + ) + .join('\n'); + }); + + // 8. Define the RAG pipeline + final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), + }).pipe(promptTemplate).pipe(chatModel).pipe(const StringOutputParser()); + + // 9. Run the pipeline + final stream = chain.stream( + 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', + ); + await stream.forEach(stdout.write); + // According to the sources provided, ObjectBox Vector Search uses the HNSW + // (Hierarchical Navigable Small World) algorithm [1]. + // + // And yes, you can use it in Flutter apps. The article specifically mentions + // that ObjectBox 4.0 introduces an on-device vector database for the + // Dart/Flutter platform [2]. + // + // Sources: + // [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ + // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ +} diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 014430f6..bc3d8b13 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -104,6 +104,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.2" + ffi: + dependency: transitive + description: + name: ffi + sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + url: "https://pub.dev" + source: hosted + version: "2.1.2" fixnum: dependency: transitive description: @@ -112,6 +120,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" freezed_annotation: dependency: transitive description: @@ -311,6 +327,14 @@ packages: relative: true source: path version: "0.0.3+1" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" ollama_dart: dependency: "direct overridden" description: diff --git a/melos.yaml b/melos.yaml index d4792fff..b39bb2a1 100644 --- a/melos.yaml +++ b/melos.yaml @@ -49,6 +49,7 @@ command: langchain_tiktoken: ^1.0.1 math_expressions: ^2.4.0 meta: ^1.11.0 + objectbox: ^4.0.1 pinecone: ^0.7.2 shared_preferences: ^2.2.2 shelf: ^1.4.1 @@ -59,6 +60,7 @@ command: build_runner: ^2.4.9 freezed: ^2.4.7 json_serializable: ^6.7.1 + objectbox_generator: ^4.0.0 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git diff --git a/packages/langchain/lib/src/vector_stores/memory.dart b/packages/langchain/lib/src/vector_stores/memory.dart index e812d275..a439e1cf 100644 --- a/packages/langchain/lib/src/vector_stores/memory.dart +++ b/packages/langchain/lib/src/vector_stores/memory.dart @@ -14,7 +14,9 @@ import 'package:uuid/uuid.dart'; /// This is not efficient for large vector stores as it has a time complexity /// of O(vector_dimensionality * num_vectors). /// -/// For more efficient vector stores, see [VertexAIMatchingEngine](https://pub.dev/documentation/langchain_google/latest/langchain_google/VertexAIMatchingEngine-class.html). +/// This class is useful for testing and prototyping, but it is not recommended +/// for production use cases. See other vector store integrations for +/// production use cases. /// /// ### Filtering /// diff --git a/packages/langchain_community/lib/langchain_community.dart b/packages/langchain_community/lib/langchain_community.dart index 3aee4cf9..b91a968a 100644 --- a/packages/langchain_community/lib/langchain_community.dart +++ b/packages/langchain_community/lib/langchain_community.dart @@ -3,3 +3,4 @@ library; export 'src/document_loaders/document_loaders.dart'; export 'src/tools/tools.dart'; +export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart new file mode 100644 index 00000000..7e065c4a --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart @@ -0,0 +1,120 @@ +import 'dart:convert'; + +import 'package:langchain_core/documents.dart'; +import 'package:langchain_core/vector_stores.dart'; +import 'package:objectbox/objectbox.dart' + show + Box, + Condition, + ObjectWithScore, + QueryHnswProperty, + QueryStringProperty; +import 'package:uuid/uuid.dart'; + +/// {@template base_object_box_vector_store} +/// Base class for ObjectBox vector store. +/// +/// Use this class if you need more control over the ObjectBox store. +/// Otherwise, use [ObjectBoxVectorStore] which is a pre-configured version. +/// {@endtemplate} +class BaseObjectBoxVectorStore extends VectorStore { + /// {@macro base_object_box_vector_store} + BaseObjectBoxVectorStore({ + required super.embeddings, + required final Box box, + required final T Function( + String id, + String content, + String metadata, + List embedding, + ) createEntity, + required final Document Function(T) createDocument, + required final QueryStringProperty Function() getIdProperty, + required final QueryHnswProperty Function() getEmbeddingProperty, + }) : _box = box, + _createEntity = createEntity, + _createDocument = createDocument, + _getIdProperty = getIdProperty, + _getEmbeddingProperty = getEmbeddingProperty; + + /// The [Box] to store the entities in. + final Box _box; + + /// The function to create an entity [T] from the given data. + final T Function( + String id, + String content, + String metadata, + List embedding, + ) _createEntity; + + /// The function to create a [Document] from the given entity [T]. + final Document Function(T) _createDocument; + + /// A getter for the ID query property. + final QueryStringProperty Function() _getIdProperty; + + /// A getter for the embedding query property. + final QueryHnswProperty Function() _getEmbeddingProperty; + + /// UUID generator. + final Uuid _uuid = const Uuid(); + + @override + Future> addVectors({ + required final List> vectors, + required final List documents, + }) async { + assert(vectors.length == documents.length); + + final List ids = []; + final List records = []; + for (var i = 0; i < documents.length; i++) { + final doc = documents[i]; + final id = doc.id ?? _uuid.v4(); + final entity = _createEntity( + id, + doc.pageContent, + jsonEncode(doc.metadata), + vectors[i], + ); + ids.add(id); + records.add(entity); + } + + _box.putMany(records); + return ids; + } + + @override + Future delete({required final List ids}) async { + _box.query(_getIdProperty().oneOf(ids)).build().remove(); + } + + @override + Future> similaritySearchByVectorWithScores({ + required final List embedding, + final VectorStoreSimilaritySearch config = + const VectorStoreSimilaritySearch(), + }) async { + var filter = + _getEmbeddingProperty().nearestNeighborsF32(embedding, config.k); + + final filterCondition = config.filter?.values.firstOrNull; + if (filterCondition != null && filterCondition is Condition) { + filter = filter.and(filterCondition); + } + + final query = _box.query(filter).build(); + + Iterable> results = query.findWithScores(); + + if (config.scoreThreshold != null) { + results = results.where((final r) => r.score >= config.scoreThreshold!); + } + + return results + .map((r) => (_createDocument(r.object), r.score)) + .toList(growable: false); + } +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json new file mode 100644 index 00000000..32251c2e --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json @@ -0,0 +1,56 @@ +{ + "_note1": "KEEP THIS FILE! Check it into a version control system (VCS) like git.", + "_note2": "ObjectBox manages crucial IDs for your object model. See docs for details.", + "_note3": "If you have VCS merge conflicts, you must resolve them according to ObjectBox docs.", + "entities": [ + { + "id": "1:4662034750769022750", + "lastPropertyId": "5:5762998900965066008", + "name": "ObjectBoxDocument", + "properties": [ + { + "id": "1:328437667364158177", + "name": "internalId", + "type": 6, + "flags": 1 + }, + { + "id": "2:3766173764062654800", + "name": "id", + "type": 9, + "flags": 34848, + "indexId": "1:8818474670164842374" + }, + { + "id": "3:7972539540824041325", + "name": "content", + "type": 9 + }, + { + "id": "4:866532944790310363", + "name": "metadata", + "type": 9 + }, + { + "id": "5:5762998900965066008", + "name": "embedding", + "type": 28, + "flags": 8, + "indexId": "2:3016727589204567263" + } + ], + "relations": [] + } + ], + "lastEntityId": "1:4662034750769022750", + "lastIndexId": "2:3016727589204567263", + "lastRelationId": "0:0", + "lastSequenceId": "0:0", + "modelVersion": 5, + "modelVersionParserMinimum": 5, + "retiredEntityUids": [], + "retiredIndexUids": [], + "retiredPropertyUids": [], + "retiredRelationUids": [], + "version": 1 +} \ No newline at end of file diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart new file mode 100644 index 00000000..0a3ac27b --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -0,0 +1,196 @@ +import 'dart:convert'; + +import 'package:langchain_core/documents.dart'; +import 'package:objectbox/objectbox.dart' + show + Condition, + ConflictStrategy, + Entity, + HnswIndex, + Id, + Property, + PropertyType, + Store, + Unique; + +import 'base_objectbox.dart'; +import 'objectbox.g.dart' as obxg; +import 'types.dart'; + +/// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. +/// +/// ```dart +/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); +/// ``` +/// +/// This vector stores creates a [Store] with an [ObjectBoxDocument] entity +/// that persists LangChain [Document]s along with their embeddings. If you +/// need more control over the entity, you can use the +/// [BaseObjectBoxVectorStore] class instead. +/// +/// See documentation for more details: +/// - [LangChain.dart ObjectBox docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/objectbox) +/// - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) +/// +/// ### Filtering +/// +/// You can use the [ObjectBoxSimilaritySearch] class to pass ObjectBox-specific +/// filtering options. +/// +/// [ObjectBoxVectorStore] supports filtering queries by id, content or metadata +/// using ObjectBox's [Condition]. You can define the filter condition in the +/// [ObjectBoxSimilaritySearch] `filterCondition` parameter. Use the +/// [ObjectBoxDocumentProps] class to reference the entity fields to use in the +/// query. +/// +/// For example: +/// ```dart +/// final vectorStore = ObjectBoxVectorStore(...); +/// final res = await vectorStore.similaritySearch( +/// query: 'What should I feed my cat?', +/// config: ObjectBoxSimilaritySearch( +/// k: 5, +/// scoreThreshold: 0.8, +/// filterCondition: ObjectBoxDocumentProps.id.equals('my-id') +/// .or(ObjectBoxDocumentProps.metadata.contains('some-text')), +/// ), +/// ); +/// ``` +class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { + /// Creates an [ObjectBoxVectorStore] instance. + /// + /// Main configuration options: + /// - [embeddings] The embeddings model to use. + /// - [dimensions] The number of dimensions of the embeddings (vector size). + /// + /// ObjectBox-specific options: + /// - Check the ObjectBox's [Store] documentation for more details on the + /// different options. + ObjectBoxVectorStore({ + required super.embeddings, + required final int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) : super( + box: _openStore( + dimensions: dimensions, + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup, + ).box(), + createEntity: _createObjectBoxDocument, + createDocument: _createDoc, + getIdProperty: () => obxg.ObjectBoxDocument_.id, + getEmbeddingProperty: () => obxg.ObjectBoxDocument_.embedding, + ); + + /// The ObjectBox store. + static Store? _store; + + /// Opens the ObjectBox store. + static Store _openStore({ + required final int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) { + return _store ??= obxg.openStore( + dimensions: dimensions, + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup, + ); + } + + /// Creates an [ObjectBoxDocument] entity. + static ObjectBoxDocument _createObjectBoxDocument( + String id, + String content, + String metadata, + List embedding, + ) => + ObjectBoxDocument(0, id, content, metadata, embedding); + + /// Creates a [Document] from an [ObjectBoxDocument] entity. + static Document _createDoc(ObjectBoxDocument entity) { + Map metadata = const {}; + try { + metadata = jsonDecode(entity.metadata); + } catch (_) {} + return Document( + id: entity.id, + pageContent: entity.content, + metadata: metadata, + ); + } + + /// Closes the ObjectBox store; + /// + /// Don't try to call any other methods after the store is closed. + void close() { + _store?.close(); + _store = null; + } +} + +/// {@template objectbox_document} +/// The ObjectBox entity representing a LangChain [Document]. +/// {@endtemplate} +@Entity() +class ObjectBoxDocument { + /// {@macro objectbox_document} + ObjectBoxDocument( + this.internalId, + this.id, + this.content, + this.metadata, + this.embedding, + ); + + /// The internal ID used by ObjectBox. + @Id() + int internalId = 0; + + /// The ID of the document. + @Unique(onConflict: ConflictStrategy.replace) + String id; + + /// The content of the document. + String content; + + /// The metadata of the document. + String metadata; + + /// The embedding of the document. + @HnswIndex(dimensions: 0) // Set dynamically in the ObjectBoxVectorStore + @Property(type: PropertyType.floatVector) + List embedding; +} + +/// [ObjectBoxDocument] entity fields to define ObjectBox queries. +/// +/// Example: +/// ```dart +/// final filterCondition = ObjectBoxDocumentProps.metadata +/// .contains('animal') +/// .or(ObjectBoxDocumentProps.metadata.contains('natural'); +/// ``` +typedef ObjectBoxDocumentProps = obxg.ObjectBoxDocument_; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart new file mode 100644 index 00000000..4eed33be --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart @@ -0,0 +1,193 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND +// This code was generated by ObjectBox. To update it run the generator again +// with `dart run build_runner build`. +// See also https://docs.objectbox.io/getting-started#generate-objectbox-code + +// ignore_for_file: camel_case_types, depend_on_referenced_packages, avoid_js_rounded_ints, require_trailing_commas, cascade_invocations, strict_raw_type +// coverage:ignore-file + +import 'dart:typed_data'; + +import 'package:flat_buffers/flat_buffers.dart' as fb; +import 'package:objectbox/internal.dart' + as obx_int; // generated code can access "internal" functionality +import 'package:objectbox/objectbox.dart' as obx; + +import '../../../src/vector_stores/objectbox/objectbox.dart'; + +export 'package:objectbox/objectbox.dart'; // so that callers only have to import this file + +List? _entities; + +List _getEntities(int dimensions) { + if (_entities != null) { + final objectBoxDocumentEntity = _entities![0]; + final embeddingProperty = objectBoxDocumentEntity.properties[4]; + + if (embeddingProperty.hnswParams?.dimensions != dimensions) { + _entities = null; + } else { + return _entities!; + } + } + + return _entities ??= [ + obx_int.ModelEntity( + id: const obx_int.IdUid(1, 4662034750769022750), + name: 'ObjectBoxDocument', + lastPropertyId: const obx_int.IdUid(5, 5762998900965066008), + flags: 0, + properties: [ + obx_int.ModelProperty( + id: const obx_int.IdUid(1, 328437667364158177), + name: 'internalId', + type: 6, + flags: 1), + obx_int.ModelProperty( + id: const obx_int.IdUid(2, 3766173764062654800), + name: 'id', + type: 9, + flags: 34848, + indexId: const obx_int.IdUid(1, 8818474670164842374)), + obx_int.ModelProperty( + id: const obx_int.IdUid(3, 7972539540824041325), + name: 'content', + type: 9, + flags: 0), + obx_int.ModelProperty( + id: const obx_int.IdUid(4, 866532944790310363), + name: 'metadata', + type: 9, + flags: 0), + obx_int.ModelProperty( + id: const obx_int.IdUid(5, 5762998900965066008), + name: 'embedding', + type: 28, + flags: 8, + indexId: const obx_int.IdUid(2, 3016727589204567263), + hnswParams: obx_int.ModelHnswParams( + dimensions: dimensions, + )) + ], + relations: [], + backlinks: []) + ]; +} + +/// Shortcut for [obx.Store.new] that passes [getObjectBoxModel] and for Flutter +/// apps by default a [directory] using `defaultStoreDirectory()` from the +/// ObjectBox Flutter library. +/// +/// Note: for desktop apps it is recommended to specify a unique [directory]. +/// +/// See [obx.Store.new] for an explanation of all parameters. +/// +/// For Flutter apps, also calls `loadObjectBoxLibraryAndroidCompat()` from +/// the ObjectBox Flutter library to fix loading the native ObjectBox library +/// on Android 6 and older. +obx.Store openStore( + {required int dimensions, + String? directory, + int? maxDBSizeInKB, + int? maxDataSizeInKB, + int? fileMode, + int? maxReaders, + bool queriesCaseSensitiveDefault = true, + String? macosApplicationGroup}) { + return obx.Store(getObjectBoxModel(dimensions), + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup); +} + +/// Returns the ObjectBox model definition for this project for use with +/// [obx.Store.new]. +obx_int.ModelDefinition getObjectBoxModel(int dimensions) { + final entities = _getEntities(dimensions); + final model = obx_int.ModelInfo( + entities: _getEntities(dimensions), + lastEntityId: const obx_int.IdUid(1, 4662034750769022750), + lastIndexId: const obx_int.IdUid(2, 3016727589204567263), + lastRelationId: const obx_int.IdUid(0, 0), + lastSequenceId: const obx_int.IdUid(0, 0), + retiredEntityUids: const [], + retiredIndexUids: const [], + retiredPropertyUids: const [], + retiredRelationUids: const [], + modelVersion: 5, + modelVersionParserMinimum: 5, + version: 1); + + final bindings = { + ObjectBoxDocument: obx_int.EntityDefinition( + model: entities[0], + toOneRelations: (ObjectBoxDocument object) => [], + toManyRelations: (ObjectBoxDocument object) => {}, + getId: (ObjectBoxDocument object) => object.internalId, + setId: (ObjectBoxDocument object, int id) { + object.internalId = id; + }, + objectToFB: (ObjectBoxDocument object, fb.Builder fbb) { + final idOffset = fbb.writeString(object.id); + final contentOffset = fbb.writeString(object.content); + final metadataOffset = fbb.writeString(object.metadata); + final embeddingOffset = fbb.writeListFloat32(object.embedding); + fbb.startTable(6); + fbb.addInt64(0, object.internalId); + fbb.addOffset(1, idOffset); + fbb.addOffset(2, contentOffset); + fbb.addOffset(3, metadataOffset); + fbb.addOffset(4, embeddingOffset); + fbb.finish(fbb.endTable()); + return object.internalId; + }, + objectFromFB: (obx.Store store, ByteData fbData) { + final buffer = fb.BufferContext(fbData); + final rootOffset = buffer.derefObject(0); + final internalIdParam = + const fb.Int64Reader().vTableGet(buffer, rootOffset, 4, 0); + final idParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 6, ''); + final contentParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 8, ''); + final metadataParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 10, ''); + final embeddingParam = + const fb.ListReader(fb.Float32Reader(), lazy: false) + .vTableGet(buffer, rootOffset, 12, []); + final object = ObjectBoxDocument(internalIdParam, idParam, + contentParam, metadataParam, embeddingParam); + + return object; + }) + }; + + return obx_int.ModelDefinition(model, bindings); +} + +/// [ObjectBoxDocument] entity fields to define ObjectBox queries. +class ObjectBoxDocument_ { + /// See [ObjectBoxDocument.internalId]. + static final internalId = + obx.QueryIntegerProperty(_entities![0].properties[0]); + + /// See [ObjectBoxDocument.id]. + static final id = + obx.QueryStringProperty(_entities![0].properties[1]); + + /// See [ObjectBoxDocument.content]. + static final content = + obx.QueryStringProperty(_entities![0].properties[2]); + + /// See [ObjectBoxDocument.metadata]. + static final metadata = + obx.QueryStringProperty(_entities![0].properties[3]); + + /// See [ObjectBoxDocument.embedding]. + static final embedding = + obx.QueryHnswProperty(_entities![0].properties[4]); +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart new file mode 100644 index 00000000..aaa08078 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart @@ -0,0 +1,29 @@ +import 'package:langchain_core/vector_stores.dart'; +import 'package:objectbox/objectbox.dart' show Condition; + +/// {@template objectbox_similarity_search} +/// ObjectBox similarity search config. +/// +/// ObjectBox supports filtering queries by id, content or metadata using +/// [Condition]. You can define the filter condition in the [filterCondition] +/// parameter. +/// +/// Example: +/// ```dart +/// ObjectBoxSimilaritySearch( +/// k: 10, +/// scoreThreshold: 1.3, +/// filterCondition: ObjectBoxDocumentProps.metadata.contains('cat'), +/// ); +/// ``` +/// {@endtemplate} +class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { + /// {@macro objectbox_similarity_search} + ObjectBoxSimilaritySearch({ + super.k = 4, + super.scoreThreshold, + final Condition? filterCondition, + }) : super( + filter: filterCondition != null ? {'filter': filterCondition} : null, + ); +} diff --git a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart new file mode 100644 index 00000000..753d8168 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart @@ -0,0 +1,4 @@ +export 'objectbox/base_objectbox.dart' show BaseObjectBoxVectorStore; +export 'objectbox/objectbox.dart' + show ObjectBoxDocument, ObjectBoxDocumentProps, ObjectBoxVectorStore; +export 'objectbox/types.dart' show ObjectBoxSimilaritySearch; diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 8cacd96c..caa994db 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -24,6 +24,14 @@ dependencies: langchain_core: ^0.3.1 math_expressions: ^2.4.0 meta: ^1.11.0 + objectbox: ^4.0.1 + uuid: ^4.3.3 dev_dependencies: + build_runner: ^2.4.9 + langchain_openai: ^0.6.1+1 + objectbox_generator: ^4.0.0 test: ^1.25.2 + +objectbox: + output_dir: src/vector_stores/objectbox diff --git a/packages/langchain_community/pubspec_overrides.yaml b/packages/langchain_community/pubspec_overrides.yaml index 3508ed77..de62cfcc 100644 --- a/packages/langchain_community/pubspec_overrides.yaml +++ b/packages/langchain_community/pubspec_overrides.yaml @@ -1,4 +1,8 @@ -# melos_managed_dependency_overrides: langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart dependency_overrides: langchain_core: path: ../langchain_core + langchain_openai: + path: ../langchain_openai + openai_dart: + path: ../openai_dart diff --git a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart new file mode 100644 index 00000000..740a06d7 --- /dev/null +++ b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart @@ -0,0 +1,159 @@ +import 'dart:io'; + +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_core/documents.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:objectbox/objectbox.dart'; +import 'package:test/test.dart'; + +void main() async { + late final OpenAIEmbeddings embeddings; + late final ObjectBoxVectorStore vectorStore; + + setUpAll(() async { + embeddings = OpenAIEmbeddings( + apiKey: Platform.environment['OPENAI_API_KEY'], + ); + vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1536, + directory: 'test/vector_stores/objectbox', + ); + }); + + group('ObjectBoxVectorStore tests', () { + test('Test add new vectors', () async { + final res = await vectorStore.addDocuments( + documents: [ + const Document( + id: '1', + pageContent: 'The cat sat on the mat', + metadata: {'cat': 'animal'}, + ), + const Document( + id: '2', + pageContent: 'The dog chased the ball.', + metadata: {'cat': 'animal'}, + ), + const Document( + id: '3', + pageContent: 'The boy ate the apple.', + metadata: {'cat': 'person'}, + ), + const Document( + id: '4', + pageContent: 'The girl drank the milk.', + metadata: {'cat': 'person'}, + ), + const Document( + id: '5', + pageContent: 'The sun is shining.', + metadata: {'cat': 'natural'}, + ), + ], + ); + + expect(res.length, 5); + }); + + test('Test query return 1 result', () async { + final res = await vectorStore.similaritySearch( + query: 'Is it raining?', + config: ObjectBoxSimilaritySearch(k: 1), + ); + expect(res.length, 1); + expect( + res.first.id, + '5', + ); + }); + + test('Test query with scoreThreshold', () async { + final res = await vectorStore.similaritySearchWithScores( + query: 'Is it raining?', + config: ObjectBoxSimilaritySearch(scoreThreshold: 0.3), + ); + for (final (_, score) in res) { + expect(score, greaterThan(0.3)); + } + }); + + test('Test query with equality filter', () async { + final res = await vectorStore.similaritySearch( + query: 'What are they eating?', + config: ObjectBoxSimilaritySearch( + k: 10, + scoreThreshold: 1.3, + filterCondition: ObjectBoxDocumentProps.metadata.contains('person'), + ), + ); + for (final doc in res) { + expect(doc.metadata['cat'], 'person'); + } + }); + + test('Test query with filter with multiple operators', () async { + final res = await vectorStore.similaritySearch( + query: 'What are they eating?', + config: ObjectBoxSimilaritySearch( + k: 10, + filterCondition: ObjectBoxDocumentProps.metadata + .contains('animal') + .or(ObjectBoxDocumentProps.metadata.contains('natural')), + ), + ); + for (final doc in res) { + expect(doc.metadata['cat'], isNot('person')); + } + }); + + test('Test delete document', () async { + await vectorStore.addDocuments( + documents: [ + const Document( + id: '9999', + pageContent: 'This document will be deleted', + metadata: {'cat': 'xxx'}, + ), + ], + ); + final res1 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res1.length, 1); + expect(res1.first.id, '9999'); + + await vectorStore.delete(ids: ['9999']); + final res2 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res2.length, 0); + }); + }); + + group('ObjectBoxSimilaritySearch', () { + test('ObjectBoxSimilaritySearch fields', () { + final config = ObjectBoxSimilaritySearch( + k: 5, + scoreThreshold: 0.8, + filterCondition: ObjectBoxDocumentProps.metadata.contains('style1'), + ); + expect(config.k, 5); + expect(config.scoreThreshold, 0.8); + expect(config.filter?['filter'], isA>()); + }); + }); + + tearDownAll(() async { + embeddings.close(); + vectorStore.close(); + await File('test/vector_stores/objectbox/data.mdb').delete(); + await File('test/vector_stores/objectbox/lock.mdb').delete(); + }); +} From 541e65d642c92cac25e4e1ef862b39b7fc2b840c Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 28 May 2024 23:30:54 +0200 Subject: [PATCH 144/251] feat: Add Runnable.close() to close any resources associated with it (#439) --- packages/langchain_core/lib/src/runnables/binding.dart | 5 +++++ packages/langchain_core/lib/src/runnables/map.dart | 7 +++++++ .../langchain_core/lib/src/runnables/runnable.dart | 10 ++++++++++ .../langchain_core/lib/src/runnables/sequence.dart | 7 +++++++ .../google_ai/chat_google_generative_ai.dart | 2 +- .../lib/src/chat_models/chat_mistralai.dart | 2 +- .../lib/src/chat_models/chat_ollama.dart | 2 +- packages/langchain_ollama/lib/src/llms/ollama.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 2 +- packages/langchain_openai/lib/src/llms/openai.dart | 2 +- packages/langchain_openai/lib/src/tools/dall_e.dart | 2 +- .../test/chat_models/open_router_test.dart | 4 ++-- 12 files changed, 38 insertions(+), 9 deletions(-) diff --git a/packages/langchain_core/lib/src/runnables/binding.dart b/packages/langchain_core/lib/src/runnables/binding.dart index 75e6084f..a1b9f907 100644 --- a/packages/langchain_core/lib/src/runnables/binding.dart +++ b/packages/langchain_core/lib/src/runnables/binding.dart @@ -70,4 +70,9 @@ class RunnableBinding }), ).asBroadcastStream(); } + + @override + void close() { + for (final step in steps.values) { + step.close(); + } + } } diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 792bc80a..71021af6 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -289,4 +289,14 @@ abstract class Runnable { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart index a62962e4..2e8fe5f6 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart @@ -278,7 +278,7 @@ class ChatOllama extends BaseChatModel { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index 7eeb7e7c..e61c6e27 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -276,7 +276,7 @@ class Ollama extends BaseLLM { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 053bf481..83bb8cd5 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -399,7 +399,7 @@ class ChatOpenAI extends BaseChatModel { : getEncoding('cl100k_base'); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/llms/openai.dart b/packages/langchain_openai/lib/src/llms/openai.dart index 086b8b8a..9471acfc 100644 --- a/packages/langchain_openai/lib/src/llms/openai.dart +++ b/packages/langchain_openai/lib/src/llms/openai.dart @@ -345,7 +345,7 @@ class OpenAI extends BaseLLM { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/tools/dall_e.dart b/packages/langchain_openai/lib/src/tools/dall_e.dart index 3137dcfa..3ce66fd9 100644 --- a/packages/langchain_openai/lib/src/tools/dall_e.dart +++ b/packages/langchain_openai/lib/src/tools/dall_e.dart @@ -111,7 +111,7 @@ final class OpenAIDallETool extends StringTool { } } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index 4587b56b..f108db8b 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -107,8 +107,8 @@ void main() { } }); - test('Test tool calling', - timeout: const Timeout(Duration(minutes: 1)), () async { + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { const tool = ToolSpec( name: 'get_current_weather', description: 'Get the current weather in a given location', From 5c76dbf36c2103c3a904a38cec6b807b4b42fdba Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 00:02:23 +0200 Subject: [PATCH 145/251] fix: Stream errors are not propagated by StringOutputParser (#440) --- packages/langchain_core/lib/src/output_parsers/string.dart | 4 +--- packages/langchain_core/lib/src/runnables/sequence.dart | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/langchain_core/lib/src/output_parsers/string.dart b/packages/langchain_core/lib/src/output_parsers/string.dart index f5ea11a8..9dd4722a 100644 --- a/packages/langchain_core/lib/src/output_parsers/string.dart +++ b/packages/langchain_core/lib/src/output_parsers/string.dart @@ -68,9 +68,7 @@ class StringOutputParser if (reduceOutputStream) { yield await inputStream.map(_parse).reduce((final a, final b) => '$a$b'); } else { - await for (final input in inputStream) { - yield _parse(input); - } + yield* inputStream.map(_parse); } } diff --git a/packages/langchain_core/lib/src/runnables/sequence.dart b/packages/langchain_core/lib/src/runnables/sequence.dart index e69cf0bc..4be296b9 100644 --- a/packages/langchain_core/lib/src/runnables/sequence.dart +++ b/packages/langchain_core/lib/src/runnables/sequence.dart @@ -129,7 +129,7 @@ class RunnableSequence Stream streamFromInputStream( final Stream inputStream, { final RunnableOptions? options, - }) { + }) async* { Stream nextStepStream; try { nextStepStream = first.streamFromInputStream( @@ -152,7 +152,7 @@ class RunnableSequence } try { - return last.streamFromInputStream( + yield* last.streamFromInputStream( nextStepStream, options: last.getCompatibleOptions(options), ); From 899eac8c212ec510ce55633c39cd8e6711bfe6be Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 00:05:54 +0200 Subject: [PATCH 146/251] docs: Update hello_world_flutter example with Ollama, GoogleAI and Mistral suport (#441) --- examples/hello_world_flutter/README.md | 21 +- .../android/app/build.gradle | 30 +- .../hello_world_flutter/android/build.gradle | 13 - .../android/gradle.properties | 2 +- .../gradle/wrapper/gradle-wrapper.properties | 2 +- .../android/settings.gradle | 30 +- .../hello_world_flutter/devtools_options.yaml | 3 + .../hello_world_flutter.gif | Bin 0 -> 235564 bytes .../hello_world_flutter_local.gif | Bin 144881 -> 0 bytes .../hello_world_flutter_openai.gif | Bin 119360 -> 0 bytes .../ios/Flutter/AppFrameworkInfo.plist | 2 +- .../ios/Runner.xcodeproj/project.pbxproj | 8 +- .../xcshareddata/xcschemes/Runner.xcscheme | 2 +- examples/hello_world_flutter/lib/app.dart | 1 + .../lib/home/bloc/home_screen_cubit.dart | 179 ++++++--- .../lib/home/bloc/home_screen_state.dart | 54 +-- .../lib/home/bloc/providers.dart | 40 ++ .../lib/home/home_screen.dart | 355 ++++++++++++------ examples/hello_world_flutter/pubspec.lock | 124 +++++- examples/hello_world_flutter/pubspec.yaml | 4 + .../pubspec_overrides.yaml | 14 +- .../web/flutter_bootstrap.js | 12 + examples/hello_world_flutter/web/index.html | 62 +-- .../hello_world_flutter/web/manifest.json | 4 +- 24 files changed, 647 insertions(+), 315 deletions(-) create mode 100644 examples/hello_world_flutter/devtools_options.yaml create mode 100644 examples/hello_world_flutter/hello_world_flutter.gif delete mode 100644 examples/hello_world_flutter/hello_world_flutter_local.gif delete mode 100644 examples/hello_world_flutter/hello_world_flutter_openai.gif create mode 100644 examples/hello_world_flutter/lib/home/bloc/providers.dart create mode 100644 examples/hello_world_flutter/web/flutter_bootstrap.js diff --git a/examples/hello_world_flutter/README.md b/examples/hello_world_flutter/README.md index 6b7c3871..eb983d97 100644 --- a/examples/hello_world_flutter/README.md +++ b/examples/hello_world_flutter/README.md @@ -1,9 +1,8 @@ -# Hello world Flutter +# Hello World Flutter -This sample app demonstrates how to call an LLM from a Flutter application using LangChain.dart. +This sample application demonstrates how to call various remote and local LLMs from a Flutter application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) -blog post. +![Hello World Flutter](hello_world_flutter.gif) ## Usage @@ -11,15 +10,5 @@ blog post. flutter run ``` -### Using OpenAI API - -You can get your OpenAI API key [here](https://platform.openai.com/account/api-keys). - -![OpenAI](hello_world_flutter_openai.gif) - -### Local model - -You can easily run local models using [Prem app](https://www.premai.io/#PremApp). It creates a local -server that exposes a REST API with the same interface as the OpenAI API. - -![Local](hello_world_flutter_local.gif) +- To use the remote providers you need to provide your API key. +- To use local models you need to have the [Ollama](https://ollama.ai/) app running and the model downloaded. diff --git a/examples/hello_world_flutter/android/app/build.gradle b/examples/hello_world_flutter/android/app/build.gradle index 48e93274..2c711c95 100644 --- a/examples/hello_world_flutter/android/app/build.gradle +++ b/examples/hello_world_flutter/android/app/build.gradle @@ -1,3 +1,9 @@ +plugins { + id 'com.android.application' + id 'kotlin-android' + id 'dev.flutter.flutter-gradle-plugin' +} + def localProperties = new Properties() def localPropertiesFile = rootProject.file('local.properties') if (localPropertiesFile.exists()) { @@ -6,11 +12,6 @@ if (localPropertiesFile.exists()) { } } -def flutterRoot = localProperties.getProperty('flutter.sdk') -if (flutterRoot == null) { - throw new GradleException("Flutter SDK not found. Define location with flutter.sdk in the local.properties file.") -} - def flutterVersionCode = localProperties.getProperty('flutter.versionCode') if (flutterVersionCode == null) { flutterVersionCode = '1' @@ -21,22 +22,18 @@ if (flutterVersionName == null) { flutterVersionName = '1.0' } -apply plugin: 'com.android.application' -apply plugin: 'kotlin-android' -apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle" - android { namespace "com.example.hello_world_flutter" compileSdkVersion flutter.compileSdkVersion ndkVersion flutter.ndkVersion compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } kotlinOptions { - jvmTarget = '1.8' + jvmTarget = '17' } sourceSets { @@ -44,10 +41,7 @@ android { } defaultConfig { - // TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html). applicationId "com.example.hello_world_flutter" - // You can update the following values to match your application needs. - // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. minSdkVersion flutter.minSdkVersion targetSdkVersion flutter.targetSdkVersion versionCode flutterVersionCode.toInteger() @@ -56,8 +50,6 @@ android { buildTypes { release { - // TODO: Add your own signing config for the release build. - // Signing with the debug keys for now, so `flutter run --release` works. signingConfig signingConfigs.debug } } @@ -66,7 +58,3 @@ android { flutter { source '../..' } - -dependencies { - implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" -} diff --git a/examples/hello_world_flutter/android/build.gradle b/examples/hello_world_flutter/android/build.gradle index f7eb7f63..bc157bd1 100644 --- a/examples/hello_world_flutter/android/build.gradle +++ b/examples/hello_world_flutter/android/build.gradle @@ -1,16 +1,3 @@ -buildscript { - ext.kotlin_version = '1.7.10' - repositories { - google() - mavenCentral() - } - - dependencies { - classpath 'com.android.tools.build:gradle:7.3.0' - classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" - } -} - allprojects { repositories { google() diff --git a/examples/hello_world_flutter/android/gradle.properties b/examples/hello_world_flutter/android/gradle.properties index 94adc3a3..a199917a 100644 --- a/examples/hello_world_flutter/android/gradle.properties +++ b/examples/hello_world_flutter/android/gradle.properties @@ -1,3 +1,3 @@ -org.gradle.jvmargs=-Xmx1536M +org.gradle.jvmargs=-Xmx8g -XX:+HeapDumpOnOutOfMemoryError -XX:+UseParallelGC -Dfile.encoding=UTF-8 android.useAndroidX=true android.enableJetifier=true diff --git a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties index 3c472b99..11fce01a 100644 --- a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties +++ b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties @@ -2,4 +2,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip diff --git a/examples/hello_world_flutter/android/settings.gradle b/examples/hello_world_flutter/android/settings.gradle index 44e62bcf..fd7c1580 100644 --- a/examples/hello_world_flutter/android/settings.gradle +++ b/examples/hello_world_flutter/android/settings.gradle @@ -1,11 +1,25 @@ -include ':app' +pluginManagement { + def flutterSdkPath = { + def properties = new Properties() + file("local.properties").withInputStream { properties.load(it) } + def flutterSdkPath = properties.getProperty("flutter.sdk") + assert flutterSdkPath != null, "flutter.sdk not set in local.properties" + return flutterSdkPath + }() -def localPropertiesFile = new File(rootProject.projectDir, "local.properties") -def properties = new Properties() + includeBuild("$flutterSdkPath/packages/flutter_tools/gradle") -assert localPropertiesFile.exists() -localPropertiesFile.withReader("UTF-8") { reader -> properties.load(reader) } + repositories { + google() + mavenCentral() + gradlePluginPortal() + } +} -def flutterSdkPath = properties.getProperty("flutter.sdk") -assert flutterSdkPath != null, "flutter.sdk not set in local.properties" -apply from: "$flutterSdkPath/packages/flutter_tools/gradle/app_plugin_loader.gradle" +plugins { + id "dev.flutter.flutter-plugin-loader" version "1.0.0" + id "com.android.application" version "8.2.2" apply false + id "org.jetbrains.kotlin.android" version "1.9.23" apply false +} + +include ":app" diff --git a/examples/hello_world_flutter/devtools_options.yaml b/examples/hello_world_flutter/devtools_options.yaml new file mode 100644 index 00000000..fa0b357c --- /dev/null +++ b/examples/hello_world_flutter/devtools_options.yaml @@ -0,0 +1,3 @@ +description: This file stores settings for Dart & Flutter DevTools. +documentation: https://docs.flutter.dev/tools/devtools/extensions#configure-extension-enablement-states +extensions: diff --git a/examples/hello_world_flutter/hello_world_flutter.gif b/examples/hello_world_flutter/hello_world_flutter.gif new file mode 100644 index 0000000000000000000000000000000000000000..25058c3853c765f2244c53b6e10bfd69bd67ef44 GIT binary patch literal 235564 zcmWh!Wmwa17yhj`SkaB6K|(-8q#WHi(vgl71#~JRO1Qx&35O!0gd;^n93dj1qd~%f z2uL{)vB3Wn1C@AvpO4R{=Q`K9&$-WipU2L@*63i+NyrrV7x4dc3jn}^fFK465(Pv= z5z;akh#VldM+Bn?QdAOCQIi(b1omplY3V4+?1RehL*WeoB?H0z25N?fv<{hQ?KQ*8 zm;+|!`udhq7FLE9N3Vk(UJhp7 z1V^&KIkLo2AD22G9b-R?v;RSNf1;nin{}X4K(L!fhiVi+sjpVFHzx|5%9{l*oqoA8vvsL!`D zZ{B7yi<52_=N6Y-*Scd(zvGv6=iD9E4c9X3vWgq=m8lh#MHi}!@zr((cj;H|1>L<@ z+Ix?BueP-0em4CYd*Dw4fDN+j_I5^+9@@>67;Q>mBTY4y%sNrl_YU^PfF?%6U}TeYvZ<Y}IJPgVFTn(V{1#B^{%+ zoY4m(qdk>l&)dh6+Q%yDUevH(WH!7=YkqOP<%R#~i=q46mU?b}8<*P4ZEP6NY#dK* z9#?oV-hY2$@WI5v`iU#86W7Nl23V769h1e~ukKI28qS-FzA;65I+f0xW(-e1c{RQMUnal&_wm24AOHLI?(4VrAAU~# z__?&x2a+|S=%|NFD_>(AQnKVLp?aer*R-`f86ZFg~ZclTfNE9?ro+IV_cS&(e> zb#x)1|6l!v!VthNp!I)n{2wL&au>L;R`%-o)WZ~{$X@?Jc3Dq4Udgf8zM;G?OLG6o z`N4*YfvW_oE3yual|$FnJ!<`j8mop=lWA*iQ?hyH;`pfc)qQf)$%W_REDt~UlPhvgEp@M(B3{-8v(I5w*yQir&8x9jqipF_eU;{JZXCWf@PH@ z@6y(MVvMp-8aUea=<~D?#{KUB6Rowm)^nf7>LNxY&6~1E8@I=&s$H@T8_u@f+WhjK zThSwT&dMeotF^_vEq3jOqBXB;&+f@T+~2Mn2(Mjd?at1>zkGjJvd6&jB8SJWh;6if zG%2@AmkdTKynWPo%4@l$v^GwPq=ifqw8g834l z=Lpb2<#Q_6{3hawJYJ9E}p9N>{=h2eG@lp2UHnQd9{pC4gw~HXYBRrS-nYpHe8#{5gtN6cnDdl?-r}O_$I0+;A=yl?oM9QaF0{ zc;mRvG*Xcsx~uPe2kG`XdEZLs*4yOC!R%IM_R0I}SDX7joJB$hK^SeQ#!Dn*O;zN( z+H=6A=w@k|6G4c52@#y{*Pg$= z=v;@Gn9-J7H%$?({Z074A%uq43WA) zqFw3uNgT2WD7}fDc={(tA$C8%?@@7Ax}-lV$&2i|#lH)+V9-Ekd2$e8CaqMA8b?Gc zhVPzQLfbGDE=HbXsMnssgzHvF*k&O$``Kwhp5TK`&C`=KNj9ES%420!d2;AtQ|^_wtZrlqfP zg&<-k1gAxy2^rXEAb0?7k8b(_v;q_rN5_RX>MIy;+{*6CQ~bt6*pbg=PDj}8Kbx2J z-mLO)c&pXHdUBeb(Xh>xVh`0&i$-1B=+y7AJP;_Ucwrv^Wk# zF(})wyg}qSA4Z6~$ha(E0XWd$ibBa2)$C<3RNX=?unc`rnFBu1Wh02Bq+R-Bi3Z!> z@F2dxNAnVYgW24d0hKYB4A8X*wx{99P8+Yf$=H&`Q0>#tb*_z zys|+o0Wfxc%XCSp_I_L|K~V>b6t=306j@|9u7a<)g=S=a!DpN@U4C#?m(dj1aL}6T zAU+eHXMzKc6b_b&HT_l7SNhs?c(zNzxvURBBnw_#Ut&JjwUSFLL#rSG!3s*4B3+yg zRkoC>r^2-LnSkwVGVUC{@}v}n?z)v*)zVxiu7e=&{nsf3%bHxaf{C<4#8@n}=AG&a zC*$=vSo&N|g+Aq`L&e1Mg`q(ZXr7lb<*S!-#&Y{>Wn0~Tr|eVJUJ5ez ztySm$AbnPbd@kT2v`@Wb5!(*gOCen6_ms39f$Qv6$u-T}*Ro~s6xAXMfC$v1pq~dd z-sp@9&+I9E0m4fphAQi1m!XgFr_p448nk@x1jL}bpuSQk26YR1n-P{El))jS%kCA+KrmTtN4Lso#k{5&bl6I3p| z?arIB3BHSzjIfM2S!ClJJQGfB%jhFyy;$Sy=oLmdqI4q8lp#qihy3kGL2Gb3LCv8J zj;BXs?a@*3P;!s-D^lv-MyarGw;wAk9uf*{7%8{Q@4}xt-z_>j?8?!ACe%SaCAh8W z_A$xTr%u6&-^o5R)_gmrH@Kf$^Oa(SbqCA|#h3oPMU4sM#6H^$QKEePAY;%jl1JfW zJbd@z=ARbAWMRzGUO8~Ino6u$VTqcqL#k~|vUp_Snx?JX3D^nDtGdufy=wJ0f=wY) zuO#osRhWjImR~U$*c+>DtT3%ncMxy0+Zx#5ksFy=LID@%lNgjThz7+ zkNeGcOP>9QV?a(|BU_t}@x z!D0M;aDx*z`i5N*mpn@Hy$AkOKJ+T8hA0k_sj(wA&X5{CC7UD7{6qJrV{-F>c)mc>FS`v@cN8`F z>Z;$&wFC!B$XOOrRU}4=NG^!6H3J0mYyfp)@L4>_Ow|*?w6vW{08`)@u*8>-Q%uRi zf7~uizqg*1Kr>nJi@I9M;3Njym0NDMJAN>ITCMl&F=G|beiGy$6+&dCmREY*Vt}78 z+-Ss$1?k#j#85yK0_Fn;saD^TVcE4+1dtA4pEwcFVW9;NreJP*=%#}6D^twKSE^fu z4o4);+)R{_K%Zp6&(lyn`r2(%YF6FIH$icl2UEkuK~XIcC=Rm40HFkc#JP}vpxVQ6 zR_6d=5qKhj4)h8-FhMvbD3Tg$7%n)(L0VEkI0{Hd7je;BL?R3{bsjI~nKnd$tK)O+ zxF`o*a4g&NT(7-U3~-9+_uCfmBzQ_NAs^BNpG)SFxkwZnvNsf6N`>+^LBV*G4iCao zLZXQ&oD@M%T_<0r-`&kTf|RhY`-K;zYjtFAn;Q5TJmS1=RtKMPizGPREzr8T=etd~ z`|b*=gpV?PeH~#(Y38FI@&wr&%0rImi=X~N-2&^1p+mYRmM9FF>X)aga6~HK>V&^j z#q}3r;&nWfW2RuCQoi+X0X8e&8AUv%;p?<<;{iZyp&gn5i-#P_f5^EWgu3xFG~e+f z%5unYh+km$+F8M;FztkpL61NuyU?}QWquNUorG}o#B`VMY2_kio78S_k=a~4AS*U`>yA_}R-$1L1xno#Ia|J&&&?@Ju6dU9LjhoEG0QT;7xvA)}7 zspk$i8OW$wy6U;La-HvK6gytGFRLn2NG%eQIQ-yZ;moZPd>P>`S1|Wu;XS6n@m&~W z`e0To|5nmt!TZ2%%>m;Ep~LluZtjEMsoB`K2HKx39HXs^$R;39k|7su4Rk&3pcEs1 zX(&9qct_xm%I>nVp2(eZLk9Ry;)qWwnA#G_+yCgM;DS`C{7F<5zWg3wqx{>jX;bh4 z33c`da<4$@I!d&K3v)RS7~F-w~29d1+zX@DEJE8(-kx+S1Fwkc-|-&^}PDE zm8gS)eN)eEua{M^m+ppqt!iT54QeZludQ z%uh8UZs-m|&5h?-mO*#2+wK)#63hK_uSB}G{E7_AxVHLIZO!f4+P2!d+1mO)wQTA8 zjmGzzeebtiy5D;Hep}o9j@kQ9|J>(D*L54$^ydOkH{~bNj(;+k^Sp2aA6m@TBXPjq6u@>sK$;f4E)$>C%IKZvB@(^?d1v-;5uA z_kH;D(!qMp?++5JR4cW7HDUq=h)c4Y`jc^ut|ex*26#4 z2FaoZ>GlTMxd!>a4Fs7+MUzHlzed&gM)jh`z3q)!bB#KG8+B!x^i7%!{F)4RNA4z%JpCSd$3G$$J@RdT zH@4Sj*WjDSM>N&`{RVU z$4P%5(_~tcODM_G-#Js%IosYjKi9eVx0AOk^K{wd z>5AXe)%d3$ik^OIf4VmJ^vmC;e3@t8OrCxBd-gN_*=EtRKkd)9=AP~ReFn&Kz@{9i zKL?(`K^AiaIymTg4t9%!m+cZZ?Gp9x5>M!oEbfx-=#ri9lHckg$aX85b}RdLt0r`- z7kBUN=+>I=*4gUTmF>|t?J@B0F-+(&D(*4v=rNt|G2iMT%Jy3NKilNDZ&7>gi+deA zdY$KcUAKBkvVHEReV+b(-U)r=;y&MwKL7c?z^y)tY`^zuqk#Zc7*U zmETX59XM+`5aT}(n=lYpJaDOFAYpzWX={KcJD6-b$nYObOBl>39=x<`ii;e<{U-HC z$PQi0?zG#&TmulHY?u#wsCa&;bZdwu`@G!rd8PmJ>a`&^3hZvj^Sb%x^;^#?qG0#X z!_EG~Em6abQLwg-;g0#?r(44u*^zG3kzW6i&SF?71(DCk*qIKEZHZlQjBaqSek{xeb?nR57+?0qH`%cb8rGYJT{nI4 zr{l$z>5C0K)<+jJOT$0|xbQ?SvV<$p$we=4vD;jj8K#DSI!zmCp!PD+qtcz@wp!Rw z9;}8l7;HMBywJPBgMHu)s&r0hE%bh+z`k-P^tE69-sRvvuwhGF>>Kt=(}kDj+b@Z7 zla^+a)&Y~Y3oqAc*bhW(J{uNFz~+vOyKj$d;RW1?s8RnX@(B~9obD|E=MCW80=g%( zuzp1B8zS~(=Ty|f)T!+$s@(Khv+0eX)5VzHEf&g+gN-)r&RBnyT{5FIg1cHW z^^QARzdg&An`<Oq z?e5ydsk)N6$$*8a#Dy6b>^l~=GY7-jHJj(T^lUI;H9Xt~se3rOdvsxDq;qyQXMr#G z=3C;z>;m=<0sa-g_-DI&!g>5nNyo{p!J)*bo76>K(c*agVs725jY!^j`{Ikv4&P25 zMs|T|_C~UFNg6x1!o~U#P`jOP6vyWGJ-`UbcT9}*zaQ;~1a^QHm$~iz&x>Z%Tsx5R zqd9dvh1`x80dFUU`zHjK#*3Dt=LdDoJ0_f$W}V(zd|8wU#C{|#e8j)2FTs9eVc*fF zkK`_V0x+BFZpaXN3rjA=zGlO@%;|U*_CJ?* z4K8b*u4|I@SU(P`MrU>GY1bBjDC4em4G+!`L2y1KkORe#fP5aVats$s1WN;8Kj3}5 z{5(v4{{GYVJAAmb?$mwxjvXRYne>sb3mWL1gAR}t8E+5X@8QdWCZxXm1zg~DC4_-eDqmI{`cUUfVz{YS{6ad44`tcz!U65+& z$E{rs_6Q3k2|#LeXWw^zQ8)ij{J*d2f&cwF)w`q%8U|K-1wa4yU}>EP&*NerJ-|NH zg-9_$7FD0QB;UV0h$;c+wC|ns9#r_wTIh?etWH?K`f}t5R*VK(=HaM^afd0ufbOi$ z7hEV87|s?D<3mKTzfecsQ}=aH-wf{15SnaW}AML|p3Fyhv4wZdN zky>aN5xN0vE^&XZQDz>Rj$A^;-#ZGf&#s zZ0NaeTU1Hj;TI2n!ud%XEngekm`)qGu4eJOE0Q{N$31VOWAMPe>j4EQ$b^8{XPhmp z+{omcV0u@|4VJG1lV_1XpYR#++m!Qb&PH>?PK*eP?M@TP;kUom^$t6_ zAJ>wG5Us^O9hsvU?3hn%?^k%^p&9iaGck3*)4ODOab#5@GVf>W%y#FKqN8;!a2BX& z{z>Zm<8 zlE}_bmP?nMv_vC+Rh%}b_Y&?tzUA(D#zx#P=BtflNdB4rq2P;o`53=$7h8QeufdOGw#K(eJk(0JXvbEuR+@3LJHtl)Q+>Y4eJ z)7G({+i`+1vRC#g(zSqI!s$=r!HifIoyUQGR8L~DdK#x}|#@eue7p|~_iHRxn^Z14xGH~I*r>cTv4zEwJAI9>qA7B%=Kb%?`PMzBZ@@@2h%yC)vDYtn5~b0g zPCK>=QqE;t$r_MorOYPGGcU~PQMeT~)nC(gTt~Z%hXQ_CQn=7R? zj6y@@)6xTVY?{pLSS~vmsZm5uiq|2If7RUe_cWtYFpHmI7i_1c3V_*!ujg;&$*r66 zh|7J7G0yB{u_OwRgtp8MRsg-E)>z5!eRw|JaXP+ux;`fEEizAckhmze?{mOIVkWQ9 zwAD@s21LQvDOGaKtWYe#DAlVi*pWD^|&?c z8Ghl^q>!>(HxMNtmD{`<4v3$GIuuihd8=L6s08L75dtQlj?-fcPj%ihqRV|(Og0a7 z5jML77Aa*(FJA=qY1Xi&M*3~9LdOp-=m95y456B@Dd0nBZ)rG0KTef2WbN$ovF`8-zQsT+BD7^ z7?x28qAy8+F#j7=pZ^w@qI3mWSZ$DKPQOD`nEn>%HFW;#zUxZ%95wRq`cIjIA@A5U zIqn(!#kKp#?pfnE8^3}jq7)JvW2{gsKj>zny;v!N-DO(>{9q{Ya`3CYgYM6rdsV#~ zvcl>dPcFlDX|vgM*C6LGZ4=iGBcXEV6lrsD;%z%K8^_!s!AT|sa(RZ;yaQO6t`ouf zhQLXKo)Sn0Y0$u#7rVBphJV?jrYJq$Sdf!`*p)V$mh?7dXR}MhmD8*x)K_ls3O;J^ zRpjy@uwOECSmgfatJ-|FVYjZTgZJM19FCA&kd#hHTm=R)GgLKt5j$DFaTBu{<|R-F zH~!<`clmo+`xfo)ttdbHeIKTb-gRrlv-dj3>h(ec&O?5zz?S}O@?gIxRi@@dD7fK( z#b1B{n{Bw7*h)nIu8I6%%TD(WXUW~MCmR0DyP5{Oei@-Lo++*P|B1%A=j-|FTYjF;a${1N#B=O@CNrDqV&cB(8o598pyUY9PFV zk~SfdEGup2)Ecqg1|%%w!jtsv!Ha$ZO{X_cw4_w8i=#Ql%Bv8#W=5*v1PkyQ4T0!p z?8m_0OtauH3OK zV$22%{@kGb_ex0!6_>KFmjGz5ga~f683vAzV@=33*{Aj#@q`9;S8$>EuBZ^iG%bP3 zQ1f3>d5e%uim5Eppn%*6R(;w@XH?M~xaG=#qAMGOp8*)gA0bFn0tA00RalMBLCFr{ zY@#`WG8Ad~P`tEi900YKoQ1lNsR;Bc?pZPQBW_0po*{7n=hnEFLwFd329RAv2cRQ; zV=J2}=-?FuroZ>90gVDtYsDUB(EqWpeeM)gUrPZGrfixCR(Hv~hxmhdb+PNC#9a_@ z3c=|#4T40d<$Sc}F!oT0@H#;!aYG(Rc0L@Rr&wH=IJQ{gNs;n8cywWpe{^I>5t606 zmN5&;k|NQ-`ZJO?T6`4rBTH^+#uBd1N>$DPCF->98n&E%J&Z%IU~1JYzurH(9{9yV zQ}B4wM#*kMK6~vJ*8nRlO9Ix8ANwmpTXP<;lz7)1rezI6yGw@Q;t84RJ9t0RqJl@r z_*#*=qJSy28{5FNW~q#$jLVt(*9D2NjAX^H$OO*eWtKq7FapZB{o6`_EqUc4nfN0_ z^o+(e56uGyXRniJAQ+MZOai&SMJ$TwHweg;D_u@Jj962g@Zlt6Aj;G^*|ZyfhdEW7 zrd&-#8X=vEtP&DJ(s?SU`Ec)RH>0S?)Arp@71N_y5AKPvRF29>r&w;sb+d(Ae-r-I zMX?WmEp^dWtA6+V^*D^iXL0sc2b_C$Ri!dppIko}qxiB+(343Iqx@r}2Dg%LEoR8b zezlUFzfmEh{w@O%N47f9d>E>7(I%!hX~Ca;4<^JO;9Cd+o>NgrnxIqSrD+Sd8+> z%rj8AlXDh(qil|%RfJCT`I;jADlIRC+5{A*SgY>~TC=;nHE{h_`tw5rz*!=?IPawB zEuyK^X-_FBS=LXsa%}Nn{F>yh0<`-#bwI9IPkRQDU~fl5JLJg@-Qc34Gm@2LY{Rqj z7|KJ^9liNJ_EMRHb>)L{|2)#k?kmj>@(j4vAp8N~a8uT?T+6X&918y4ofkD!7ByI2 z=}@xbaIe^L$1OdIlY9i}STXNdzvft2#!!zddgu4tLfP@Ifsa7k_Rw!a zjt`5S+O{l{kg4h{#=-pIR6)h3=$=kCIF9K+8Oc2#>eTD(ET6@M(^FgPoCdQ;F3+R% z`Cw1`e(H$x=)CjTn)8c)GJ|OL53n6q%jKo1%cQl-3kR2}D3@tx7dA0n8<~FPiOpPx z%lwGT!o17kn#-Flmt9_mOQ3$n5$-?59PuFE`JKP3Zbv_vkfO~=A#vfcM%FGJu4^N% z>+`N()?EKH=#8VKYje{sZn|!m9{KKkuMAU>Ef;;M<6K3SuqgmIqn)I?t34&Yj(P8jk;?uxN9W3>ukI0Vm$QZJhXne z>zjEPxOnUu9Pl_0?Qu}-rA$w*Qh>YhgO~l_?CB8?vjq?Hb&tc_9z=|%MSv~MbmGt> z4{H}sn*dMSXivLDPx~BChZ0Z62cAxyp3bA5E(@Nn>z+rpJxLfZH#skNZ7&ZqFHaXQ zuK+LaXs@G*UgR7vpAs+M2VQ=iUjCzA0SjJ%>t4sUy(k#(AUW?~ZSN2>?@$--umJDy zXz$~R-Vr%dJ|*erC8Z-fy`x6GqZhnSt$Ux|_NHQvo{>9xR{Q8Vv!gLCN6!Zwjg3Bf zA#pk~EIps3828|4eCN@G(W8kAN0Zi%ULHNV<&<|3pMHAt=szoi;X+OgAg4u>soU_2 z2T-x<Uu^T^7Zv-pPPw3g*iSk zsWZ2jK1H2A#iOqaQpvLOf(5U9SQy_jx!HsVKILYbmJHOz2oa1vk#ve}=g*+H~bg1*2>n zZbAvz9p7HKzwOVH3tT==o+v@T>^zzg0L9y<7#e9uvFAr#&40YHI4YJN#;1iyN=NYl zPYObQlP(ZIH=v}S;0MO22mY|}T?#n%EAd!@S)j6GV01R-e;MNL5UX(=D% zZd>q^En2rux!ESO$uc4Adl4hzoz3nF>uihADXcbAf`}p`5*OY*f)d zdfP{M%=_;n`Uxb$g+>4G5k%@kiuxvYupm;B(3XHSkq90lJbe$7 z632rIvw4Uy3O*?~Z=IrE8oXDPEbTuhaw-5-+nC(Bcq)}_hkAn{Ku`jyCZfT+jvH-f z6s|e3=+r{2e5<#DYB16kY!m^Jra`DDm-l{od*w#(VeHB;v7o}TH_ybg#SFnX7DQ;1zK0ExMIvD4^mQ== zqS`n*J0r8D}{GH(NDE-kE|X7UXjr5>(X0F}if z&K0Js&m>>mq#tAwq$OcK;t0X$c0I33WJH00=cn|`AC^BO?>hIoYO z3}BH=x1golkU?kgX(IUjj+>Aum1K1`#Y$RQ))rx!0NzlclWH2z!y`&;rxRPIi~l)# z*iT6<-hxw87WRx1*OQ%{!5wBxVKh35kQ}CdLn|&_iwYQ#;mU=8B!3|~EZs?6LzYE1 zAV|BiK+&r1|oISt;;C}% z>++pwUMc)l;Ck<=H>cy0EhHIigYb2Eh8i%hKJ$eSpMH>(3{4~3_&{A+8Gl~^Iz^41k1)V6`dlSR_nD7}V>18pCqEC7v#VkA78t`g=i`KJBx} zk(7hP)G~`y1Ac(XX0E{FFH>K9Pq~=ljs69qUHN^K>V>mt;(LWVXRnI|M%!P<%mC74 zFzh&3dhna}G3ve-XYF=A?miPcBEx<2ZNaaROB)T@Say0N^R+ynTCMu*V-TY+5QriZ&FE1KLLRB?g!aum@D$=7$SJK>b&t~ux7;j*Zo$K8L1>mm?M{FU6^jo z1umBUcM9=gH|&@1k67A4QaQEo`;PSOxWk~M;pqpdfTF@u^wGsXu=%{pajkt{w8#ug zKKQ8Wj5e8JGnjmHH%(ieaqIH$XdA?1Y^oDyMWvA3$N~1*FBq~TBi~(!vH@dQG^3U& z70LEd$|1rOT!@xJMcTx2FXq2_mthyw$T^uc5Er?ynY+8meD8fpJ80&?bX?7k^Y2dY zginj?go(;eU;ppY>8qKiI_KkDPL0C0O0K4kETTu@pdezX_vH}v1^=pNQ`kLF{9tmX z&A;?XJ`J3TLZ;_FPDZBf?;Lpl@IpXl?9bU(X%X*(J-Oc^U&LAkrfJv2L(gg#5lEaO9Q&O>>P6uR!I_aCxC-qg2)yCt&-I`>?e5-yEy(+&zO;X$sVJ%(-%edC z=f^#c0*dpMos74_HPWQq?>dVemwtU)|NLtokCU9Wsu{Jr zsk_{JWdkEG>k5^sbAC`qI$;Nyt}-adPaJ%UGL8*2=nZ}x1w%WQwbT*kmaPk%5E^i|Xo!zC=UATQ!^L40WZVJ1hO zPRN8@yi}pXb=%Kx$H>-ffY@g(P$A*5}?M1-RuVhsqef%PX3BR@E@;w zvDx}xYKEZ0g>R3UqKu(94_oP0q|~!Rzm@adbZlgAc&=7q3VcpJc~@`&w)+@~tQeUR zQzQ86=_jV!+rrN@9&>Tup_NKxtJSW zc`%i!eoI^Nxj&E5^>K_8SEJB676*6P6KbdKyfm&XOhBI?>~-rsn+ZO;F8Lg49o3gF zaoFI%5p}r#geMiL*|Ey7=h4JWZpStBsf202?b9EbEps`V&)ub~4{OUjS071Vs=}ZN zOzQrzL!X_&NL^NfM1d1*d-R2+#5ReGbrEs5nPqv1#~WFP-yHVxOFS^;BUr6IlDlft z3nh)jcEcMuBExXCj!2vCOJR?R=XBLsboWpui-S7dYiSJ=*`lt>z>3-3C>UXh30Jch zABMvJD&I94R?ZT`337?u;4RpNA*lQe4?t7Ptop&a912GStG&676G3CUfzz@BSa|^^ z(F$T}WZ(#sPRPAwVaF*Hd!dNaqYTNhvlj{`;#I2=ru z{CJ*@Rg~ls9WHB_kKx=OwbhQ6Mf$&d7*v z()~5pK#M4|!A^z@9>gir^sWoGGK`WBD47x{N|+}~CTmFH*p<9sq1R)kJgQk9I|Q!@ z{L@51itALn^2p!;fjWL}7E8qGbcdt5eP3?$nn?E7I55mt6I2;A~t3D@KbuH1pi!^-UBpF1?*1}R65ACUsG6cQB%BIPgZ(j3i4 zfLSL^6#~uQBjk77o_lJ*_$>5psjnY6nx>qfn+a_*~8Fc7!_T-Kz2?)jwnCQp!nmqe0r%3Li$C$1J z{GD-Z-#NfeNqnZLxjGnEmKjr$aQuqTQP~Q0nwV|bHFW|7-w=n8hf`8){Fis3ep2q@ z6P$blQ{%Hi1ta=syvVsV2~-)G@p&tS0ozg#J@O$~07HW~a6k|+E5k^Y%{YA`?!{Qt zAWD^;LE2(b%6`*9{C^Zql7LzBu?7{Z#0|D$DziBgTu$-adOen>Inv4MRV6tJRXwUH|)1xM#BC#w7^s$K$i zYAWYMIUM0;&}+?=57287R}Ss4LDNe?u=|UEC>a64cLsqE@4kc|)}$sg4}}WKusHBu z?Q1IPtekEttxw|$Lxk9(c{740AFZfmYQ%bKj1YuV3E7Q|16#P2?Z@t=Cs&n)4`weC^en5j@jA`;L~}XgURD z(j4fbo_a`k>^qT>k!q`(cg18y^tUeI*XZOAjecY)LEzM#Ut$BV-r-D0^P5^WS}R$1 z{F9N5=(F=staNIi7KHfKMk+c$V89H6_AH-2DpT1XCD)_7X}9%V?p*t zl!&^PIg2R-N0dn<1s%~Y3o-}-9U@46f$NH;ZdNTx8RI3MJInN!$?7vD#om9h1d;aJG5DIfV+d1ObK1g|1tRouj@+~if< zRjb@}R6Go;Jj_)*ZL2(8RlK}aj&#$T_&xUS4i4pDsSdhVS`|50#iyXkr&Ptay2|%< zm8<*sz5>?a-YWkwm4KxYY z&t0v_{i1&LXU)|ebte2C6RVLYe!^vRx5t=PeoaT?y5UEKw)C@4@iN95H@xrN2-J8P z&}*)`Z0xL2kfc$VcCT>ElS*DLEY-MOeeZT#kYfG4qNf_g{Tlg&_ll?Pl`LwMdIuM+ zYTWsG?@qNw3HA#18}E*IZJB&68&=C&+hZLXa3AL53d#g_7lsh^cTC(2b>Et@e z$XNQfWs;ildqOUAuY+VCYoeF+&mL^4y&rfoe~SpCrpl_ICq0qY9eA{E!SNv2D9iFV z1J3F~R5T8-^nQny!OVwl}Le)S2}5 z3`sOv$Rh&$_@c#=83(OqdcWN6OGV{m2}bw98IY82uU6GRH;L7>qseL0Xf_`bF+e|$ z=)uKVe9Gt%!QaL(Eh3l}Urj9vKgeRZUBWc@C_0Z44M@%8D03kGofguAG(i-uz8DY= z9Tw2C8sKtdtSw5QTBB2Sx3P8C@fM0&U4#*71AVeqG){Wu=z zjWQES&{JGHMGdt@jR;?@>Zwl=r8AoJptbY1#d zM$f0Scp-lQS;@&C^19%+K|e2@pnd2Q&hS+)roHBYL?n?a3>~3=e}lF;!elz8=0AN? z5SCAr$=*BA2$8e?a`?8G@RooG$xYlEZfp&bGk}SZ7>1#A3?J}NVdHlB65*#kVlp#M z>^guy!76v|*qgrWH?t;jPufcsu$eel|0{PyM>o4{g!kRb(93~>^(52J2vnc)dQfUP zgD&7_Th^>88r+h6rYHOB#6mel))Rmg)6mo#7)mCFLzKS*{M)}s@6%I|Xe?RQG9g*Q zm_!BY88?4N1sY6+S+8)kQRV1~1$_`q>lpId1Vb5Y;>J_Cs;{wMV=S#lfXVr97CnZ> z|7H@yR%~hu^tGoOKcLHBbo#Hfx0If|K#7D)z_0~at-^Ys`gp^0MwNBO(a9# zTG{ft^_X}B|GF#!DcxjvdIN%4TcvPN%9g_S%M6vlWZA_E?$vE;YwV-G+QnUlAAYZW zU9F)h2=2*&=j$H)Z~tMxPo;}6xe(yrqvb;+_xx=@GXa<^lb(xstd&=F45BH%-oLzK*aQgU=Xlwejs4_z$q>2sRW4kv;i_|#I(l8 znQgUQv(K0d0KL7Yar8}R8%T$RSitxN$%jY(-VZq1686UC=cnDY!1vxRf=)(S5Rss3 z7{bfI8q@{KJ1lZjB2|%Q zArPvxphz#FNE0L!r3#@4C?Hij62O8;2^|s1jedXizPZ2moA>VAdGFtxnar7!oV`AK z?X}m+g{Z{;+{wM7s+^W8pIpfXifTx=+Dub5NY7QyoW7e;r<~RO1Jm4)HQtc@l|Fl} zA!nUNEjHx#2IW#42)PYX@&$RqD*2L)`LZen=Nbz%R0{PP3(ZuXSsk2GaZJt5R05m~z`uI5L|s5rU5ILZ zR8xJTYC~#ML#}FLaZ_WZs*^wU%Z`X-b9Ynofa>eT{gnp&y%y!c8< zv|7pC+$OBnF4^2JtJZO@xkE#(Q?I$xOzn+T^BV`XE|=ymZ?*1#=I#)+o~Y)YM76i6 z&2Mwn-qDoem1@0p&AqK^ecjD{18V)_&HZy~@4q&`UsoI0JM1|2jX;}&YL|)|mJSNP z&b?JC?tgsv-0PgwZyGeI{&*0nXe2Q=_u_z2u-4hV|V zsT3lw*cnCWL#o)mS8H`v(}7zR_$I2@>&i`Je3{-<;au>J9g&D*mbHPbPP*~@%KqE? ztj_vL#|%;wg4k{tK9#n4<@bZ_rg8ciw=s@Xgo|mmYS3<9s>O5+Tqv6B?6M;c<~gPB zs+0#`Jm(*%V2tZAr%v-bRt=MsUl9@PNHa*x&qx-2A^BKhOU#Ai@X<$8kc#sVmyG)I zJf4N5@U`i79p{4{&%n6%QxOwW6qI8GTa#O03xvh0P7dQ+MRt9n^Ci(Sv%nx#@d4jm z_Yco)>imE4?ds>&xGjQDY`d-%8fB`-t6b5p526=2HG=7Yup-4c+J6AUV1m9&=Gpiv zF!(X4%LAcp9~=lB4cOe83QNiC0O4OUI~k(Wno1;=Eac;cS4wmzW$WEkcdAv@J7ypTK1^y$t;9{Lv z{S#_(l(0A|-y$27AL&C}=7A6UNF~F=LtR98$(b(c@QKW>)@F=zcPxBR+l2`cZh#2a zoMMlPlwqufl7|h?TbEs!JN3v{ODuXnl%a$;Nk9a)&kr-=q&Bd! zehNbvytP)$8;IBh@5|9k(khNj{%grP@3s~9os4)det>{w6XrjqokYW;vc*l!VL~38 z*Q1zOO)p4tA-F;%x!}m=GaT?!G0+L?1^X`0Nst}K$H33osUL$^uZ#}qhKf3tz)knL zF;uLkHC?x{gR_(_d`-;jwD_d*)5yz14dEXmA-<2mY^FR+dQo@ZxPw%LZc4Hu290j_ zUveZaWLu+=Kn$y88YKHh8nO){J&03;dvY)PCk03WML7B=R-JM#sPWvGh0na^UM_i- zld)V@F(x&~X4VN{d7W}_@IBJnZ{u7U=_h2Bqc{Aw=6QL(kmq&(sK5(b50$m^qJ9tef$yn7 zYz$od@FgD8EcWKZFX57rM#yZpoOO$`w~>Nz959M26C)c9V){IhkcdIkL#%5-5(>A20Nw=0&w-3ajav z2Sqq=C}34l9`Ms-g5YBUiAxIxLQ3ff=pfrfr8dGD0)c`{JYf22B%>k5TtE!~1W#i_ zwU$hEtFA*&C{VclCqi$P5EyxH>odU7ODN*79kqVqg6u=qIH3b!Px;3wN}pk%rxzCs z@%hJN^*$l(PMgDl{LlTMiDvw!G+&uL-`)fRBp37x5(Z&b)>;xd^ z6562neNIWZpbr77=5*}og=l~V`00|)aa)Vqe4{$B2OwD3j|^|W(R>lkZ~6v&XXDnw z#g|C9Cx%lOQ(HKS*w2@mUbnrJ`H`boJloCm_SB`^BaV`)nQsMQRNKo1LY$@M{L5yK zr!E((bCwuzZGSD&d2R+ zdarTSq<>hssXT2n@Q|xEk8{=4Aa?Ru7wBc_l~s3}Y1{D@u2)r0S3TV9?500*)ir!r z^$MD{n>*sFZ{u9^iLtX^6yk1pdu8o*`n3Jm1Aiga&w6S-M0!~dVAJG>HUEa`tLqQB zo98*#1K-+R+sfg7{q4$n@Z|Khy%z44ji>APzu7tb{K(z<5+F4U5;m7SAnH+dJc*iJ_$}^6L5S~sEu8n8|`|IqvJa3L$ zZalJ?xz63n)5WJQCje=_6f(optvt4o5H#Z?Oy%j( z_nk$`W^$?h4QU77UYoJal!loblI(%S^Xyw`Z|!fM$>r^Lv)syft2JKl#>jw>o{+!$pICYA9v5w)Cp!EeHPT$+6v64YQu!A^bD*TzmCz zuX^3d<)8g#xz{*3>lM(-|8XN_uld_m?|XCnb3exRS~lVl_sQYHK<<4K!!;kAu)sWn zhLY)Fzju^|zyfRTS#@v@VzeE!$UDB@rTo!1F+|{#fZoYBpd;^?T!GKWt%9Bzuq0Yc zh%o9oyI zbI@}K!S6QX2V)H%?^fQLr9UOr#uSKw`cXPQ8Cm_Dnq={PEExu&S`@2%XuTHDH1~Q< zdGH>~O_aZ&PT0IB_aT{K-Rn`!F-Ebp1LoV{wP4R0=`uO3O8j}s4 z$7pEWzyVy}A$TNLXs6Wb*RsuA@Y`+_!)g!oi?YML>A9ncLkyG^?Qr*bozQ+8_YuX) z0VhCe5vu9_wV6J5|LT?#lLYM7hZu(k-yMW^x)R&?c87evh6o?dYn;`P(xl_vZ-Go$ zQ4eB5?vqBY9d4vj55+6)2^nV}tjIn**l9fy=@B|wG6O*I0BY0%TPS!bfqEdjV-BF_ zLH=|{K|WRjqzCC_@rqwUVH9s*j4b3j3L1>2v++iJK|!b&5io06sJ<*j$mbWn@~50R z#03xdG0%fRZ@-X*1Y^0DHHDY+*n`OszRK-kS;%l3f20pkZyiEpp|2lczK&)5f`yGE z_>vPLHRep&2;oB!R%604Q5KTyD7=ei28amXcZAsDspe$xYapM%AlzU8{+R;O#;`bH zV7p|P3)B7-AmR%q#H!Ba$ zcQqeC9Ub6`L9g7zK+Os4yLrpEF~U?)Abqo=aHFI!&9=s!Ah4S!&^*sKEh0D-D!fYu z?P7(;NbnM0?pgx);lROl0Mqh3cSMrV_l-}KPzHMx^IbfnbD{uX9%e{DPUo%PMho*f zf_w4=?-Mrnfk<5v|F9-Rod7=+VKc-)+Cv2l3EVG9h`l_pkOg*bUNBr!_$onAHxE1vkrUB$pR9huq_j#HJ{PG}HV5AsgTZU`>=3e^qEAx38SHdyZtG9Ng?%`p&pl7I_} zZP^i`OMpy07O2hxPgMzt<$>EZd9IO8ZOE_O&lB9Qmf+75YG0GSil7g!l4E_McU56&Lnyz;+N!&!?I2K z`)KV_Qz%s{al=?aM!OPfewmk(BAAF47P_qElB^pWzaZKM=J7*Tm_%t2R!Fo{Ld^RY{>j+!HO zM#n@L&=fQNyT8OJu;$CUtWPgWKOH%$5ahaRCiwJHL?SYZAaIape)8#Mqo<$u^8{D8et zumk|pE|zPwjoW1S=POQ!t(Q96BcF6$3Vi}%K$aM23WjM6gWz&xcqOzvZ-m$|is7`| zxpIB$!(7_vby+T_GnP*0Qk+!%oqnxeleBc!QY<$_)dqn@%b7 z_9-`Sjooy!ym`mcB_PG+-k3`Wmn+WFH7dn5cFZ-A%k7D!TWX41=9pV9mwSPwdvS{U z^D*~Iu3I&hx9U=EHI3bB~J&((7xA*Y`26 zbuMq}mZkSziucbkZz`7$$jS#8P(sn+(Ci?{uKa zuQI@+mHU2&)&1_&`@Q4$2e==MSUng|eK0-#V2=CYqSeE%sSm%8KV0Vy*|G}ROAYxs z9zx~Df~>L7G%V8u7QqwBZXL><7Ro;nD$Emh%sNamElheMOqK_C#u|4n4W~MR)8Gl$ zwhpK2rG*rRX7 zoroLYi660!A5V*)o`|30Nm#T__?njReIj9D0{0^jzMI0-KED^o-||8I`=5H8z=b>6uNFnXSB89X46r=~=y#Sp&S;BR1LN>Dkkh z*>k)(i#9o5({sL0=B)GPZrSASrRV;f%%$=YK(+*E27zgcfZ)qxx6R|u$m5^N6Xwf5 zW}7dWkuN=!FUwbO#S%Ga?aYotm zsj^DG=QXy^>oT4?^7=( z*ZC^9Y%BLNDt}H@Qu&A=J0dib$TUqv@K>?hRdHul@lRI?^H(3UtCq~HmY%Mb<*zwo zS9312Ms>PIgTGeWu2wIz)@ZucjQ`~&yO&m(FYTsZI`F@8vU}x{`Rdm6D{ua~J9c#e znRWN3>q7YJad!1lnf0;L^@;oqPwW~}GaE9e8*=#@3+x(;GaH{zH&*gD)z~%FWi~ZU zH?{INci1&|XEygvHxKZ?9MjRR3mGS)l*q#MO7Tr6X}m zTF(r$i^@_pk*#j`RLnEALvdoh|Fo*?9b3s{(1=EAhJg}DISauLoz6vfT80JP`}ixB z+Y!?aqGz|fA4urvqSnUngkQ>v7hhqNwT$DrJ^gp9%5TG#7(A(qCHvMLI%+WeiT4SS z)^owtOvU{#(G}@bF6ZL2yWaD*^y_Q!UMN$~c>&=OfL>OIQ*UtNn1tFZCI7+9T&j>+ zOY@JdrIE~WiN%N*j|KXC{+9~~b~b4WVWbPxseyEJ-i7l@XQ9C-s6_3zj6QfQ*aej1 zqs3)$ND&lzC?3ebQ1a<4CKQ^#1y@NWqw=_?h+7bt%+w%1s|_;BR7NL@sKqg9BMG`9 z8BKm;wM>8f6JCeQ?NbuB+hPZkcpMvx;!M2KJBZ|9+go;?xXl3t??B``1-kQ^?V@=| z&35?}{V*W2r+z+<>b1*QJt=Qj3QpU{gU}z4>VbS@Y7u86!7xH(XM$OT%flf0M4X9~ zQ0-|GFD134bU74~l~J5JNW7vX=Ru~I^fAUlef5jVmdnatEiYf3*r$@|7$u_7bT9?6 z=tmG-ANwREP%QdGj>YUvgyT_vO*AB<4>Wpp#DmhMw$R5O0aqECZ_8P+r_++6`dlwZ zqXth8)1Vy32B#baW_AQG5k4z|-&Db135k+-g7t~zwsRl|e*l23M zO4|7LSxqc6uKL}Ij8lh(o4u&1nu;Sx#&=;blb6|UMR5U<@=#j~YsXp@-j#KFWoMkW zQ?zTtk_fnY$F*PqSj_K@;Z_X^moewlvAUqk7fY;(hE};!SFG#iT0H1qQC)ji#Z`;* zfLE?Oo0stQL;2D%>()J3Zr+K!L&w|n%}?G}%*U&0%s$cc;9E)YivxojOqY-PP=86AD9F=yOT#}9`92_P#iJY2u%2*9rmB-BvNXbz@x!Uq zMZXbazoP+03%iplDuvw52L~&4>kpS%Qc<@lC0LXytZnmUn>^jg8cmLci9~L#JVFXk z_Ot@2EudBtB(&tr%2Tg%^A!;Y{(->KF~3p!NDQ?mi@}54`31@IcwwD9|p!x~n2KHjIEPJ=;FeTEW(}m9mtL=3}j2?Cq zg?sYQdy?^0OU&1iIW~m~#RmQEYvJcr^z}^08!QCpF69kPs9MfWw(p5$GW=>;G2i(X+P9PqE25HY4GjS zE@#h{B=??9^ewq7Gi)9!6cA?aTQq9+_!>M43a3I>n(8X{hV@g46 z!nsu2aq~&$!FDkkzN~$F>S7wll@fiEvWxVFquHl+O3mZTE+d9VOKo?`tU2AU)V%xf zBKR!9?pK+0x8c}c#LIG5ljpX){$np2cPe~NUbI^@9B-Vut8nM_^J{zW#*06_ti;00 zF%DsH-ad3O&ZPW0_pQg?azLifN++BojV9lzgZV#7L6!5JCf^5-)l46jyXyTMmGKA_ zx6$|H((j!boAP6N?uonQVq{kbdZ&o`Cvw4QKE`Gssei`>gnLI9-tiAE=5{` z>$ruKe=OHaAqDftsXCz6UmN*{bHcm|X-!IdC>ACwcUg=B8VuIT4(lK|lKop2sjF~1F zmkaM0Kq-Vfjk9kNouG)V{Tdiz#r}4?yrK}OT`FjD5UOscvym#OLu7x)Fuz~WpEBiv zeWcMHm{|R9*FcUh0jMHdtWf9E4^AfYN={#>s)vup(^kY$yqhk`J{`@=X_`f|FyzUs7kT| z?`-LO$vbzhn}!!UllX!)A%eX+oQ8bz&hyg5i;PBgW!*yGPBy={{Z8pX>d9<$a=(E> zHSYa->Tx-l%rQ`8(V_{~;OUKa^8ud(X-NeQ$rW+$z1P;w$Rbe^HTI32Ol2%)1bQJO zom=7D7E^7-x!CG0ZRfX-Mub`V)r~erFI#!;d{9z})oFQ|@6`RMrvkGgiYlx+iO?vl z+9pa`zaii(uyQ@Bd3a<5H%jW~0GEI~Ra2+gwb)U6;9?kUBg}`4`X#w2)gdi^W=>zv+{F?h%X&iH?@WR z+Qz!Yaca`&w^SN+NTbHiiZZ*TI-3K3hFlmI@dr^$u zP^N?;<1huT1e^yrO7eZ9J>+y$8?(Q;0N-VTIbIHS7>bt^v42E0muh8+q=ZiziGX<1 z(0q#KVM={@ECEVtX1$Im*H8>RPLw0Hr{jd}#v|+S z7(}R)&M}suNWDG*o^N_x$8{|p-Vj*i=1zN~*tikzQ`^IZOVg4y9Et;19ox%+y*Ztq zlz6$*NdRilqjl3a;i|_1AJC>lEwU=^QMj{Mu6~Dx&=5YYuTY{Rsz=3iIKiD`5xwuK zt&!oI7^f!Af9~x&%f^e>v;D4%YXlkSeWWCn8Q(Z@vf-W8!DcGSeiZSI$G|9(?cS7h z4|h>we?H&T|a>`EOjP&e-{b z3*+y`Uj4>}_~-Utf8)Z3=b#umwlvas%fZecT=>x#-p;a1jQ))ay%Sw3yH)u3a_8T; zV7ptL)?a?}TrUk5c54Xm3RgX&sbNJf1`D#DS~Dg>rhfM&F}}jX#b|m;c53%keSd{l zKrhXYcDIfMf8i5lG_%0BSKnju;&y89%omlthQat3e#J(!WG#t*!4L!h4=@3x{yT>J zuWj)8-h}?U4SuSe)4ME#nj-Ba(EQ}T+u#=9V)0GhdX-nJziseaV6p;jZY=>!!lupj8;dt@Qrd!UpMZ2 zpQyPt_x^R$-a5G>mMZ+W-r_F|*`qA>=9;y({Q9}S^?h!jwUw4A0FfeN!CbE7P?$ss zIgD9xiHt+)O3jDUZ1CqJcy5-=M+*2a%}1dkr52*aQd}2eq>4%w9?85~T8NeFl3I*Y z7;|0x2Zk&rq9K3pE%YTyKRq^9{PyXIx$g1L$(JqNK0md-S^7D}-v8U@)ZaGvY0fEb zU(#KRO21@yy!!Sf)2HkB*DSv=x3AfOUrN8`+~587H5Usxu|x>xa$m~(JBBP3;B`-Y zD@?X@|Mo2HX4$u*EdS+i#e~Qc%O!;=?#rbmfA1}x6T7GzFJ3)lwPa zg>n*uMxf5=yBybyO*;80hjOq3V_9u2nf-83ht$$OaIMx6H#O@eB-5!OFyNeGy!hmH|JILrCC7gsCLp8XHu3ecnJo2dk}=aymYnK`ubCzEh1%0s9e< z5RH8x!!U^j*cI=R1;|jM)@tIWUgM(+NRcofEKWEJ&9*|q!Bga5DJK`fHnDFQTG|-+ z?-vBy)xV=%kr%yu+E*eui}&k>lCQw&$TRopdi$&zeCmC|=`^7yFy^mwKO{htF*d9( zHNz|pjl383BPD#$kh}`iUE4!l84SKIP%V^s%bHH$U0~PRGYd1XLIFkcLKkot9qGTX zxkD(1`_l9R=Q|hsUGb1NF&K^&R6EyT5)4U_4_vVB;%{et#2F_lWQPtL)W>F6;|Z>D z9Nprk03(z4RZon6I_Gq5gmY2=&~j}k8)P43N?hboMCK?B%c)D(`ed!ZtY>{lpw%{c z4n?9S2cs0VR&uZgu3CeG2NJ>EZKt?;u|UNmk+=|757s5L9tVX~Esn;APxW2uAfAqQ zWA=)E3jFq-uj`B_M%x9@dsFC*Zv)*F{#lH%fQoMEnIKwmKJNK>v4~aSwfK8S&pxS~ z9K!px8<(Ex?i3m_gt5%F$$N(}EcffGTq%6TDruEnW*LzonUl|2>YEfRNsmJ|_^#7F znzSx>H&zyT_LP19lXLIyUat2#TXy|Ibbr{1E6r@q6?Y#zzxtMM-1Owj{~kj!c56#a zD%`DTy+z~SFeElzGWZb0@ZVy{vh7|Y%?97Uu6K4VIG}+_T-7K?f85O2YhE&`3?A?O zxZAk*ni5}m|Etm5!PH*Met+e|z23Q_gS}P&3lR%7Ce!inlVGOAuytuX_Fs59Wm-!^ zn_tl2sOg_n7yAh@94Hd-Ze};mCuMM-`}-#i zUewt*`5re^wm7(gc~ti%eGW~-!{~gSqSvNQ=NKNW&w8cpDGT&X>Rb|0j7zMF0c{0SMEcoh*R*>zF8! zfEJ>A!_b*x?x^kiKPTGg#Zy~iIF9?|ybS>Y|JhQ01~LGd035XM$oa#QuG!Ux(tYh; z>_!M`M^Fd*pUVLgHPqPU`n31FKelQz3CQp`zyFB#JMse#ejdnb2%S4>up_UDw}-6E zi1=7-NvQg(PR<^7d3~h2r{(?SXB0`GHIq#c;DZ_XLMk?~6BTnG&1uS13T-rEyd+`r zoX(=lw)E^gE%2s4C|Tc;P9Vfb!kE}?A)*noa`>ahMSiu_Gm1LN4EyIT|A$Ac2tWW9 zfn30UBToMphnvMW4nqHy@iXkm6ov}@bs#=M?o0Vc#?MLLUj+;vOD37j&``=O+eT4-`xB3e?@vgK7*{=}O~nA9Wzqv$%BJbZ=gx9d3S#82)cQ+@#!EYgj6J zzSg+%>ib#~rAvCfd3)^E`s@8K&nvAIR(f;f=#;S_O|g}4vi^WG&fRUZp(IKG>=4JI z#fkuTbPR0(=(2f2_O!bfY=H8C0*M0IN{JceA3IOn+cW8v>z3K-QyBNy>R0+&zV%*h zZ)Ix$4L!L%sKf2KJ)|#Lu{~^jZgqRaT<_%0Uk$RgOU2HZeZcBp#OZ`{s^{*cYjMTy zltt`Dn=(`wMaB*7g_idZ&JT zO1AR)@j1=q#h(WG$5%qsse`4$R4*EF%B>1-m@hnnt0woL-l|lj9yJPb4qBM4KC8h+ zLHr5Y?|TK<2EKm}ybRD+?GWCN-{97)9NLmJ&_M23I64SzzB}Xn0J}oePTE~+p0wDP zyy1>KSag^h+(T4R({?}n{P`~)M=9WJLff1XcNUy6Q12Wd<2p=}Q{k#(3Rt0EX` zy@2|GQHq3aZK0z@gwK8y6Lnq=djmEUJb=rkE{y|3?>`DWF9TDJ29$_DOcc!H0WfhY zA;e@z4=~L+yJ%P#gE@+cK4?~yi;PvgPKI`4($VcAuT#Z+QCp}mx-V&MI~Kk%RKlF* zTAHS+s0H3l#~D?i-;)X)LWL=av3-LX9QY1|8{QuP<7wX2L>vPE)xm5=q}y;LGi+g^ z4kjGcLrG48gNslWMUuGxgeI^TtEVQnFYjM?QG~Mp8EvYZIM(lUy=i+6CcfJfx<5>_ zmn6_}okj#I_z0PmV9ziNzq@v{c{iDdgg7-H0W1s^GzaGaDTEj!vLhXje)_r7QpGL{ zr!!yZxS;g*sc=69cEN=j7cJ``<1iv9efdt29BcbK=WCUDw{>#SXU`fvB(B`uz(xrG zr`jSQo5F3MZu}@u*Zk=yAF{F!gh>>^Suy16)InWP4_=bdT9f}8N*g@n8!ZkJ57kWpXTGWL{x9LoiNE4#8J17oR}-AI?Jo*->yERQI@e zJD?yiO8HCD1+GRPUN>U9D5RQMR>q0dvgEmPJ8Pn(F{;Svf}~2{Bg>bDaqJw2xJ%IB zn3L2%5e~R9NHQU5#M*)MlE4(DJ>_d8f@CRQ}UHAflbLfMxB z1?duhs+eY7d39^nMW%;fgX?N%vO_)BG+H{J-0leUe%%TF$jjt!-ws%w1wc#zI0h*) z+lDn5VBK>GNW=$Lp|t2FC?LZ=918@i*~W+tU&(jk;GDq1`37Zc6T;~Dh@l{b!Er_z zN1@&{0F1bi$Us8pat8XUI1+$NIV7OsAQ~ipl}dFKg_?x;dn@?N35|x zvgUoBF}bL0UKZ}hBnaG3=AqL;q8kdp0(}+^LZd95P4SdmIF^GIX zAf-_GV7bp6g)I-bs#TX*?wfFi7wFu&PF=Yb^7FA|Ga$ruUR&6Rt{+DN`lam)Lxdhp zT>>yDcR%E=6L#_AX`Q@=w-Vn0Ny_>9VF53=8L1zf1UnsqhBa4GycM9qcZ(d`&F{Hk z0J_drEFaD7!Q+pV3;!$3X8r;eR`RZ zB31VUwne08LNe;?JAHM&U4Q4IR0K#835R1Z2V#Nq=D=%YmWoS2Gfcz^8kE0>ICc)! zhy^^}H9v9o^sB;9*GRx)IYb}^;5n)b!-53$kP}!i3(3m~Mh8Oy#=jV6oI{`}U`e4{NN)8*ni;)0 z5uCRlv~F(dy+nH*!Rwd@nx72@Bx2}#SZKNz%YAcWC0~CmKsTHv!{-6>KBD*&OT-P3 zB*xHYNz2~a(_{1zs}7QuIVFkKq+(bUFaUN+Boh{TA~5#ke%OuQx|c^VEOZx(({*M^ zB}5(~>FMgt01;sN!FU!qAcRbha5dU{Vz8Fv!-0qJb-N=YHwGpyK za3+#eFX`#D4>FmJ(Txm%6$f3be>zQs`xf7q{S+oxcroHx&^#q|hzOr2v*@QMjnze- z{iN|u2h2xFvTI1z`jlv>?~w;EGs4EL$axD5Y7~FM2lJYT0L-qDH7mD3ViXC_1{J=b z2kUZ-gWZUhLO7!sQU{Q*MM9v=U5Y;;u9*X9vkrbD5OM}9NeiwZ)7#(;k1+IbD%tV7 zGvbS-+!tg3Z$MZc*{cd0wuR0j2WByj8j9~&rwrfN^EZD?@#~7Yp5uIHblYt;ie*_D zITn+RPrgXMq{@hbdpBUaI#~2+f7B5J#=*x2yFoACvPc5xO1Ehd84n_!!qg~;4H6>N z9I)y}&q#!y;(mPNN1AM)&%&2{IX?AXvQ{Q<-mQ9=%<^s8u8hG`uTn4};w-|S0xS&@ z7D54PdRROGfCo}8#3ijTUG$VW@M)Uvb%uvk6c!(B6TbmM$@%Iy(`U)SlqB?AZvy)K8I0us3DiK}cjw@*{TV_4 zbhduxZW)kUDh>zRSH#MUV@UZ+WG&tjFGT`?7EIzP2aH9=LWn>dlF>*GfcaMP&Y3>= zT`}TwG1eV`v@P0?&^-PLAn(JF3ZUB<^<~EzS~teI7Q3es=>%!+dCmYubh)$~a6~Sj zQVSzC0-3tO32y^sm-2Ba=3|CH9#Vxqxne`Eh}W%X`GdpE0Z5ycxUg4tO%7Oq3{*oh z;wXvx8!Gy8z~gruVZ>rk70?`=se!#wY=9jULw5VnNq0uFAmj3oPw3|94?Y83?jmha zp8S-m?_Bh^^Whxkj;CsMjrAh0E~$1s0_p?MU{*#BDgY`EF!Mu}t)njw)xz))x*zYf ze#@Gk>%!$CAunMD(`b1kv?UhE1E32e;4Cp9N;p*Qd(F?z>c~bj4WH93cdA^Ast}xv zF_AzeN-|v)&=Q@gL?(9LrCT}xjkua;0YE%xpd6Bs{fSoF-o=z^Px?vCm_fkDI%W=| z+A3tdoLqe%`W19LoU0Rjak}no12bo%!{w)1OoPxE0P#m?7JX9I#Gppuv}P9!S|5xo zxp%&(w2`~h+ZV-6fzm@=e~B%3~BL2J7f_Sf13M>|Ca&2-$qI<(VBbPGq53g zTRoP&^jB!VoY@#f@}z*P-+Vhg_`f1L{sRZ{A1?iq?9up5_DH@UqeJOwWLpU_PbrL< zpT*A=>DUg1{Z(u)1BZW2M`_AEfs zv>)tWWNfTO8;^-Jev0K7d?3I@An*kM>&vSMw{m_G&zW1>EjCUQ;sk z4;kAZHinTc#x(GIi#T7PF&V)}ZD+U-av_hMV}<_0Ss0PO+Uatbd`c9L(3%*HIf`b zDQdCb( zXm-KFJqgDc7$!v81~nuwbvl}e>&FKp==vjAoR(a}K59IIAOG@Ou0R_#J>sBeemjKa z3WsbAE^0XY1kA8aiM;JVSdu6xmV$Zgtb*;~RtG=FJk?@SL-|6RptbTz-%pQ*FZ?xX z@`&sHbp{?SJ|)f)T|0C@Tk$VZ8SNKoqAj2&RnU`H7KoqJ?S8W{Og!eA zhJGRz^e?}A*3Bwju58?YA^yJQ@XPo0R)FeVZxT4MbG?oJw~USXT=Qt#FBO_xfy2tf zR|~0kf1^tvU}d9$cm3x^k65b5W}HaP-OYD0ThDIy$_Xdm{)>$<=#Tru#@x|CR_Wp2 zYy4(o)T82PYz(3hJ;HrVu6Ee`w_JglJ^nWvGrN>6e`$4x#>Sj40HK$eswRK4G55Qx zcH$?zf3q={=NFMGyo-O>m}6JgV?e)9461i-3H)YbP7TkDvpB!kfRjR%+7o`WF+4H! zl~cFw(Ab!K1hdMQLYj>2i*4oF!S}#AmY*xNPfp%jt^a!M>}=!S+P$GV>ZwDD9MtS^ z16>($*siJQeXu#Gy)CagYUTZFXTqiO*X~Tf`fs^{?9u+GRPUo7OU1N2J(`T|=qIIH zmU_57?oIu*|Fx2O0A}s(paPf)0H{8Rj*o(c858KZdq@zK-*N>4SW>@@(Uua%(?@`u z>uF;NrqF;r52mNz&Spqd(&9(5;z{jXjg$yET9~bie#ajf+i$r-PlwO}B?`@)j|`5| z?-b?Rh}JjGXG`trlu+4-F^|jV2+k>IWN%Smr_$O|GEERrp87#{ITOS7kE^A6F6$|1|(U*q4D_)qCc?dz}wzM8|pI*BwUIBJDcx{3)lfHP=kHThunr zKtI{>e+IyF7aUHHzqr;^Sa8(*Bg&MWT__1MoOg$Lw(dge63kip@+HyGV)`RiDX-B? z4TCGK3_E6N+7+{(bV@W5J<*_@t99`84ZW^-ufcUiFaZ`3WwsQ5*{wbriRt zK4(ws_Xde+;j*i8nr~G;Ax}Swi6Jfav$hy-(JAcjKR%|QQ+q~;<&*Iu=me$qiH;aW zX9(hCU@o!nx5@WSalyMEesy$etcv|GIPZU`#}c4}$?Fi(n>R?fN1Nz^mE%|@Sy
    4(UD^Vmz}f zRq*gqB99lTfF8ATzkvQm<&^s$a@hNJz^f;4MT6P5Ctgyvt8{Apv-A=KlQj=`Xp*HN z7IFFgi-}xb__H|KC07w$l@CEy*UvlaSPW>0C+KM_CfoR|l!`kzhCnnkFUr=}Ay^JHY-``!r;Kps|?Yp^d%#kmElMr8n zmd-gFL@!nML5>3Ez*oCVBGo^h{7nu&l>O}7P@Fec;I{Pf`SZ*-mt%EKyf|10O6NRS zE=#<5Z>0z-dr0Zj4rtOLNpidwo-FAY-6-H4y{8##=5YAC96cx>lW6@@jviJ#O8gJy z=;&~7z13g^(?eBRNCnc$(JmW=?s#<^J*^zQDno+#> zPyKf}q6a}(QJNGi7NO;cQqb3DGNF~DYvPJ$K9N<0s>S$e<>>SzR+(Bd-ViF_g;TxU zqfl#c21P4JCzLhWO!gBbv}o$7xEMP3`takM;y?tvMy!nK{{7g#E;N8WH&Eyi} zXD6(FtEV=f662o9xES=>PHm>-%P?uSN1tb5z<6}**6W-eChKHmRk0}W9igffr)R!?nZQ{sx%z8VY!Pi^Jw z_Z6$}y`>I3JlM+pqpCHesi(FHFq0B3?%(RE?L1`slKO|YxR6iUsIN1j|BuvDo~0}a z=83UK$AotD%PywU&eeZcPZ@qF`>m?|zf(^+%yz4TA0rA>HeUo*@T>1WN^GX7r&Jbi zIRK3&+WG!#^^|QcrQk&I$qIJ_Z{j~D-!%0UwafoyJ-wK!R%nz=niPW`>|fMVUcS_M zMF;Zfy1{-??PwH^yZjPVWhwNp$@g9p8D1IKstV#>LYWMi$nwa(1ATYrXwr)Qx5;;v zDvk|q`5Lf0&xJvQgh^JgMH6BecOQ9`Q4^|_7RqDJNoGv-VNjv8v7q{91p@awP%Kr^ zdd3TEnfsl}6KlxZn0c-{cm{bnAOCyU1-|M1E+vRBBHSD#(6HalWFH#JYP=}G|D#9W z^mJlq-zQ)@rf14zGbUE%#&M>3NVaQr%9h?NT<=HE!zyyT@ zh#f|8F|A8`KT3T$4`Ivf*nK&nTIDDSBzkY6>Dk9M_!mmz*%<)SJ+#l$3bAjtiR<+0 z_`3=B%yA*xAf)lnZYkIMIMHsx1f8!Y!>96Dr8yMjG)1m9HXEcbIS-;M)r2D-EDCj! z#{LI;?-|x)yRPe|7fRmHJA|TO4}wyKP?RDbTnr;RG*T3<*_3G8wQ`|As2CSob-S#zvL;}uB6if?ZT0r+7xQkfa zS<|t_1kf>6@7PckB>N;MD^a8?yV?hgMy?^?cISg%!*ysX_!E+Hu@)~M@n*^D z4ej10hDo);Z;ibtG^ML&Zdz{$nCZkBshtda=Y5?<_VGqI^*2a)d_u(Qhkcoxwa9<< z`dCxW`ID!ZHh~i>8D<#-q(Obn!~NLP)v--#YgmicRyO&^yT!NAe6X?5kaj!v{17#N zZKK;WLGFFhH{8 zEzXZ$!_Vf18-C<=z`AUE;A?xs1T?F+?9cMs#np>1EKRVG*^Qa6Rqbz^^)v8qKy?j z?_Gr25l^}GMF$K)@<)8jc18_F6Fc?=>qy07j7O)dq#E;i(@ik%iX=cV(wQ1+`0LFS)U0=cj zenP!+Vxwtdvq9orK5&D3thy#qtpW%^?b^Ep8zNpBJA|!Q7g$PL#S38;>g`+h<5r zdbhIJQLQk~MH2&^(sQCVspg9YLpephzYQ`j)WE#D$xgzErZC=T8+AAMY?m!@GMQ?s zBi%83>zTEM7vk`s&5@hGBA6ZGltPM!@X^X(AMVA-PeQQlD&m{xWYg$|91mAgm4aw? zaeE>**2eHi!FkO;BbfQY-wtL}NZ3gp*WWLW)p>*qHg90>w>SWcpox^|ok@ho*9B{S z#`wh%Oqz=ccCkKGR8^dum0Vswt}tht90&`sW2k*g)Mo_4PRQ)r@#eAq9}&!x8>!eJ z5!YnEMr)g1lQl`&y`LI|hCPjh2SnO!Pm#_*#VY?1!7Q>bMcc!oI7xvn`=N6R2TvhX zLO8Oi=x$wip)=cZ8MZNFdK*mA z{+(sU2q8?dTi@oLU~c4B1QXY#lFCBC&53!567Kqi|9c{s|0JdG<$+-K2Nxq(^6Ftq z$9WCfgGJI}EM8}A^__Xf)dIshP7q{YKSAPA;#g7e2uZy|3dh+XknNny+ zKh{klZ2C7-3RN%RpLGB2l!EU0sUI=^zoZnzTs(fgTFk}l*41Jz?mf96=Hi@Ec z89xW4!cQ)?MPLTCjIb!{^=*y2yFg@-vMu@t7w=&sU-=ba_HIO|aNUq=D+{oqSjPtK z_bew17o96Qvj6}|rHi>3n$)JYWRHwHsr-P{Wt0v4ba|9%U7@n|hb9eJYRraF8~8B> z)Ob@D-8-?Ugh_g66lsVQ85OF&;1f0p?2?vb3}QoxH0_wJ$@DsR=`jDOU0kHK12$4= z$+9T?EGbPmT_}oTK&kYC^u6j{Iu1@{3iH?~-;N}Wj%mPml){jzvr#){L{sSoG0fUN zNS`D=-d|;y=s^JMo;8A6r1_)PmFXuk7S!peXJa0w>~ro(7jVEHk*yu2YxJ@9a)|vwq!pzUWica4X_~~LmrZ_Lzttm| z9=8l_PK@Q9c4QnAdSgValr!F6InSOlY3VAwaTUm;(lrhV`K8bLc zk$hHt5DrBfNFcvb$=srdxqW&H$rrmW_4pQPygXvO`Su0!J~?Y;`8LUc)TDWh!y8@R zP^CJCJB~Xa#yL5%T>aj^xan%3sVaLEKHX=p!@Q#D&<6%(#@C;3+=Tdq=eUbc@nVFr zWN&wp&}trt2>y0U?g>)mESsh}LXuwar%49lBVS^DkwzRFYuSdLJxkBQub4@#nAn4@ z6BcRB%dLO|30n2pJsak@ty+h%k5w^Lt7|rGdBTEiRFMJgX5T*4{&NiJv#_(0t9_fT z-x8wY5waPjTRw+UV&lzMuSH9&75m(0QtYo&)cdjNUX{91S8OZe%qcNh6+B>NhHi1; z1rp#{@OpN6tkx3eQ!r#!V_oqxq*`tECZ&jseSG8+#q#Y{~dCO zsPQ?89q=_3uuu+)QPMj;?_~;0zU&qOS>#!pE7~VN#?mz>@CVndwAKAwpe37Z7{E3h zLOe4`w`ndO*xMbXd^z5GOw&3(%Wlx$*dj{ zRn+mJ{XNYvqppIvg5IRiU)k;(v)PJQUbs1VF7YY0qd_KPYf6Nr+0$Up_Ur;GoX|M; zOq_hlJ9lQpQf<#FJ`QQRC^)jBIso&wY?|;neAHQKi4FFX4Tj(6Yj^Z+`Z3l<3zCc9 z=A63*3ywtH9#G~wV_TJRMkPTpbbJp0*E*fB!Z`5hgy+J1Z#|}p9lPQ9sYCt0m$u-Re;E8{1A_b7?ZmAD9 zJe6;~xweW8T+MmouT_^tIsGMM$4!kd^am5E?X%A^|JIQ>H!iBUY;@jD&nH;ik&yhO zBhj_}_XOsT&xZuk6@NZr|InZP^dB)xBI!ZQuM3P$^Qz4&kG^gTC`eBJ#|upR&kjlL z*FjwN-(r>o+R67ww;zLjTGi%(3gAUr*d_7jqj34bRgaWeUHY08PQIRM;>6Cn$;NW% zMArVz(^u^ck4t{0DX&@GdKXeKmo_eWyy;(inD{48Bp7iTc^ee~^*mJrNDrv0S7@I+X$f!-x;{GRMubhnVnV~(ZQc9&y zpj%@T)BaRjR`kBE;L8vS-7Mk_bZr3RK;N%lR&A z=_sz-SQZb$ffw{9n$!u@nb22$9Fl_~4rTwNZUdz)73oDnHC*VnuNWk_DT5pABVI|- zC?kn=p=kuN8?2d>DF5p;G+rD@B$4j6I^D8$@c_#NGfV@p1puEPBW0TkSlWX%N9mDD z+=k>OV!c#cQ)~Y~RiZ+(m*ya&!9c{ZfjPZ*TO+DDU>RvDahxKyj3{r^;mSy=kFHY0 za}euEyp)jZP$Zd-lxw4;oa2GWS{>MLjRA>%z6f;43j|;+1DINZH3kU~X*wZFsgkO( zv3Yo`ZwmMl0aY_r9?2= zC)t>CNUDVk4u*UEMB@De>98^qQnrreDw7zg0hVtye9JLe-RLdpGO8*S)%94xKMm5p`u>X>?)zqi6RAwKn4m*VC~Qfh&iRTSp+!ah0yNoWZ%bugIoKPj5%N=kv z+1Yw<_-ysrQq#(Ys@A-l0hY=`ubO3&{c|xIeVmvzk5aequ2N7t{-UK{Le}G6)r}kb zZ*UeEK^>1Yx8v@It(+ZDd`e^$-S{H+8nZ5;uVvGhv&?VDy_RM4r&1JwR_26Pd8}pR z^+23v#-wjZY13y^XenCBf$b;N29e>l18?ZvkXHAUKYw%^)niQU_adB5U&f1GV) zx%8VSyO!zkv(JwdREe&_!HXf0=aCirq4{v2`@UO=;tK$A5%RE>Aq-GW>H$*jb(&)G@5Nz;7jBDj2aY?{$6-2`kbvJKis~n0ri2 zu3(U-Qw;EfPdqv+E>?GZSRFRkPQu(~wz`b0&6(@)b-kV4+cC2K-kj*EfAa0zk1ik0 zU(Iy}ciz7Az2l?R?YX=3ep~rwY_zS~JcsUDS*Y}Mbd$w=7bCf{Sbxiy^WORA%+AUZ zlc!^@Ve>Com^)<-TgJEL%y*Z$-nqK#grDsxn|cBL&h;Z(Cic9V@2Tm$b2I3-yV;gW zFSUifHrJ|(%%_uwEf)GY$yJr5TRt7#yD-q!(t5J;>8F!n3$M7C>YCOqpHJs3ydHI} zzT5lsbKt#&H@xKPx{q7FguYsM%kQjy@crqR^9u{_KtN61w2lArq8bOdiCD(i$&0ZN z3}RDin)O|$;`a)Ml(pg;jow}g4i*gKfZ8?(*RN?gx$ohke$&?WGnT7(Qy=gtwH-%X zr?V%vOPW5b?F_mJH>5X6rxG>>>~g(ijc-6YT1SdE6wQ=aERGRV=4H|-(O35_j(gk+ zm94r2Gt_HrvPY)q2?wD!^=-y|-R=!sVSKB)ry=RCb?-^jd5yIh5fhuWBHw=R{Qh8J z@e2hI$dnQI)oO3Yo&2HZBd*`a><2$P19k89-9jG93BNL*)s3tnf4f;tn@T!@vCt&X zw(Szmlzmy?nmn8DxvlZ}7$|JCXgIavy8_pg-HUC;&IHi1-@uZt>GG zO-Too=*)-=Tr>C0{03!#OcZX8jJI9PZ4Gp1MSR{B?9N7f%~aP3keb~U(dqz)O-VT6 zVUIpiL`00dZ|IiNAm`RVhkBHVjdZ{xlZfZwZQ<@9X*5$kjTF}NQBs6O8yd*I%se03 z3l)5nhEUIk#nLtqp=PWg@qp2e5PS@a_ToU-nx6X<6v@-4t#DKq@DQtg!ySl#Sd`;{ zhe8Y^Q;87{9P}|36aq#;sPNEU6vReS1fu5o#~|rZsSL~lULuTePJfgdhK+ty4!5F2 z)aYZq{9)$)Fb@v26B#4opf?b(nj$cHjDbC1Ks$j&4^n_e4#K@R>J^e4ZmK>RAuR7=tCy^$O^uK8zC{8ZeDsizN>-U`P89MmSoy&pB6Lx(7e5 zCKZ^%!YTAHAKw_Aa-OXdTk3|PPmgabF+iw$cI zjZ;V32`VeHttq%UlV|-u_ zC;4-4gq=8~XpnNJ0g&)Zsp*A!VIz(FVXHaO`hc;W z1t0BzMbA;9mYSwI_d<`3rr*()UEY^iR-FRVMjSH1zQX2Y^e51laf1nOv3{+QXh!@Y?w9pD_N{5v-O{XBqw zB_FjsM4={VAv1D;$AXx%;8&qyJ)GiImK_J`0g~=QhssE(&Jh7heX7!)!idcCVJr+* zRYA*HZH@wP*s-=G+R~#1qb89Y4lvkXXq^QtaHUi6$fxKW`#r^*o&=u3qTdpM6;Fy9 zr~s@Q_{6|`;7acdz3h>FdC%R;`vxw1UcLN*f%(K^Z5c0aZa9I?EIE3&nU{O3Y|rH_D zT9&=I@+a~9CYAX?Jd3?gaNT0q{eyV0@9r*f+=5$CNBPm2t6?8ZdH^#|59a>t%x%8( zdYwvxrlK&3^mW^FI?oLcx>y7Y>^ZY3UNZFxODfNSRYR(+VEBKMps;9N<;q9`n ziF*N06n_c{Yd2d&d$gCs8wGAkx5E?i6Y^>ni4Z2Ry!=H^L1n*P(RP}wzh*c{V}m6NX@ zKOsFg4t5Z&h_o$788-m1Ja(h~R(vJlT?K|n2KyPm-;guYopLl{(`5Lqr{CXPONf|LNl*DtQ(8Y_5n?69|AUlP zHZRJ+r9fg=+y5k`C7wqAXDO{x$45VuRT&(FwALB@(~7=w(x zqbS^pG&5>FmBAlu05tpWaiqU$k%KSVu2zu=^nZ?PQh3^K zxJE1r5TjP-ok3}+N|fG!KXxtlJ7n$uWohX2 zX$POX4VD&CVJhLeijM3C9hevNzyJYfOtDeL>0{S&o=d1=9mj+Lk$yt&97lYwHJu?c z#R}}yJSiG=LNHR5a}fQgvG-1)zv)q1BwCM8l|Em8Q_Im+J=Vx$MS3Ptv+jB5OC2RC z-*2>lw?^_J&MV7GFf;=*qeb)RF;iI1kb@;_tRHPE|Js02H+e}@WD4lABNlZ}C=-8dl_a`7a{ry?!R-P2Z1*~{-UIfV2e_7lFiYkkv1=BV4w=hg2_fww-_|w|ZQHRV z@l55*r_$@A?)CJa`>b|MI`4$!^XsRYtAiwkh)BQQd)%Z`xSoYgeAx5aJ4=}x8q|*4 z-CQ?#xug#_ZsC~dHi-*-%4{)RI*@cmOgv`-nylV#)s0*GJ{4rvcG|3hI5Y>GoN>iX z5Gvxm;+{7b*gcjpypr*uq9)GeSUc(BZum^Ce4taOM5g5QF`eTZ-lBa(aK3p=wu${? zMdYJUIAK?+!*hxm^6+Kn{xkOK+>ehf2iw;>ow>ayNX9}bdvzIKefhdMAYbCqQwXf?ut(FsLp*(*RaMnu;;~r3 zO`-lR;;Bcv@*>p)T>k1d%QjNdtRB!xkq< zz`ed!*V(dCFlL&obY0LeTXk>olYh#+*B@QK-+i_CIr!PVzY|X#7wN`tR1@;(ZgoRS z&-g7C!YM{d-FxJ%K#!-xKZ(cW*<5FsaGC|&A9Zk>f1V>I9=H4ByTrtEPxy_Ua)0uO z+rq#r;cU&b`=5iJExf%a#Lb}|@M7HrLuw*^o7;o0na>0x7NR*$%7d9wx5e?jqWQjO z4`!>MEq)FYEpUN{B7Uoz@N14p&|7_HzW14M_MT{wm-0|>V{}zG$6JVvM_Tb4$O1`y zK4dULgvHpi;DIdkJ4yt!VTDj%x#>IbHWm=pqnGj^OYxAC^#Gq8A#JMsp_&{)5?=(N ztHsx2v17L_LW8a}(h8j5L5#3aLy+_WB@$8-c^XXls00jhC6n;TPej_XvPda9?GW{R zp#{eV8_*qt>2zkq zF_8zoqm}Lgj=Sy^o)R3LRwkbj9M{+yr_PLXvWaW`7&m~7cPx)iaf~nKQ&P$N*K{kaIi`Ea3tY2Jz=mkp}H;M-N%GdR3g(LaidM*Bi}?ZxJZ;G-l~c37!B_X zj^`*RwVNb$`zCe8CS`jh4fMsmu8}{?PbyMQPIn9+@<<*Tm4A;)o~Vf(;3t1IO_>ot z0R*S;nJM!%DT2NfAwLDA!iefhhV5n`LKvtlhD0qxs-N*<*9weEs;pV6{O(l6kW}R{ z1}rsIy+2iRE)}Perel^?XPdS(B+VczZFz0l%Kk)H0~xQ9zSb<=cz62xkaW}9taS6* zbj$v9tGRT7N`|djhW+l0O=2xYR)%wJhD(2j>s$s=<-#_z3+}rwIPAvqS<-#13w@jm zo^uySDi^)XF8b`gcsS&uZ`Q@5wHJ@~UpzT?(O)I=v{~la-I;+QnZa3^p|zRe{h8=@&}{n+S#6vHHAIBKi@Cv`1aRE)l143PxW`af@LZHgWd~y zjP~DA@0I@UxLEIX?Wx=|>%GBXvEFO`L$o{O4%i{vza>ji{Y2Zpq4!c4nRc_v_8K3{ z%Z-@_tnx2P94;&PS~Ol(QWd)7YVKFIT&H5Apb5Bgb3}E?bw1x)T}?Z=yxU(ugg ziR@puS>X=kPPMJIssoI1dtJUhF}4!8J<++n(kKDwwVZysrbpeV^Vsjjry8&kSOKK{ z$Cp^1dFqG%dU}~)7q-AIMnZCRxG&zzK3pEy(-E7ZRdq@uG&Fh} zXKkW;y1R_pt=|ynaMJ!?g!BWt5p<;0rjB>ALm3%MWHbWQYq zcfU2;+ZvUuTxOT^!J*|*qFN^%f zx^#w@SF`>4$-*hd>ETS^ef8>8W%;n#qX+H=6NN$&BxXuq{PB$W4^Y2h;>q|x^&e2bKjAty zUftJ)d*j|8P(PUR1ZJD$_$Sn_tolOGP!T@*6YB5Hxcf`r@IhGr0rlx}O2;LCL4DAt zKcK$XlGCJggN8;tko{%(6Rlfqy7Z^z@7~4zjD?3E27X%ptZt@JZ|n(K^TQPTeYXP+ zmM{0y@<)q`X3z)+enGu?@&8RwA1Wh;`T=cgG1Qw68vKNM#ewBNpk7tu+K^N+SQheV zc%uAEgD#ORvynP^d-A>=2Q8I~V`%i)Ui%a_Cl&RniaXNt$+k1_{l=Qn(-nL6Bj?7lc+hQBzB5l77a_dH|%6R1z>cp!cHwdnPV>PPLb z-;^szEfcZCA{@DRu-(LJwcz_|+=Cp87yLyx_JmJZaPxDC_sN6__2x;F^j-VtgbP8J z{0-##+4ND&5krnxBQEJ?3flq=oA^Cq_Icq;{m-t0(y6~k%mmXMhc$~c|9182zZuk< z*;X57r~0Is{!zUu-;N0$rm5muF$G1*xaGQ)xe2v>hxK^RS!Ey+kvRmABsOh-8I_Pn`=V#fA1wI#6G(cPeqaeUm$4KDf zZ(RrUQ(bi%$x~dIMWmU_EuNivAQvpY z#f9@k5DorzkUz}`%pncd-$}FV-_rnysheNS- zmz-diZcK-4KD%-dZpAZj`eBNdtwh@T+sFIMWO1t;tgdsvg2Lb z^A)4D_69d#V4>Yt6=$?|KD7b%v11}CpI@;VjMXDaaIR19jwWP>gs*BV^g z{mi^qn^#$QA_=_`slB6cpb|kjwQ+dHkA9)fMiKcD9(eUkD*vl)bf zNgqFWjE%H;O}Y)2K5iQqN>f(9gK#K|-K8Qa=OJN-SW92&bPNaEBkHyjuU?AA93^x& zjj#~oyqKMbEC!T24r(3aBNayo`yNA3x32F;TM1H8vTYYiOo*GTXK@Guw@JyUjOvF} zrK!EISK}x*ke=qy8d^Z`sQX|EZVpXN{UG4Puh*R4>RGgOR_j{Q$<6$(nDfFr$Zzy! z z;wCi_%E97!7v?+&0uSTHD$f#eX`Xq zjFsY;K~WzyjYL&)Kgb5_YFHRFsFfXyR4JyZuQt7@YyhH1G^bzf+j1l6U1FN)v`4WC zr$I9cOfq5NWal{z$#VN5&!UFmYsDt0O@onV3EtvSUXyj9In=kZ2`;89Wz{-BtF5+V z_JmRCjYg=OrpR3f-70oji-v=nn?hXlL&;$#~IQuNa6{?E<{X zs>R2|Tlluxa+T`B3QX{J*3I}xmC;PC4Rrer%9b?PDGRmL{@_UKLCDGUu-=ctbL`kR zbZY7D7TwEJB_B7Wjcsa#+wzZLo@`CoaQz_gO~gOP)GdhK#nYA5!L?fJXvch@H^4vf z5ccOzeD>VD?@d@A^868EcXLnQQ}yrkW4Dg=Y1a*D^7oxSJ-DtohlO|Or>vOG`6+j@4 z#(cs>kz+q$gvr~Rzjz=Mts#VvgkYES8H63D_4SymtcMW%dDF&HyDjq^jW0Db7!89R zS3>6pG2OY}clt^xZyioLBh;PnuihG$9s2T`)$GFq20S_ESEpy1W~;xqwcA+rj^w-? zy}IRrlgGK~K@+XvPJLgg4k8e;`Jvu~BSduSwwo}kg(Wd=YW4NuGZ~Nby6B8m4+ZeB zsO4e#iy9vtEZF$Ba|_nDUzK7O{K@>@08kIgmO~afoF94#O=Rsk5pxz^cJ_K9CJYa| zd`|p2@1fpD)#Oug%8@#zk=kWc+7eh8iR_?HmE+4ac4I2pNDn;B*n_q{m}bhPnb*)P z+h`taz_QhS|$FY>-3r*vTJ>pA( z=YNnGXRA=BjdVw?|79%Uwv*CbE&C4c58SJfmnR>uFQ zP!AbPfu;h#Lp^4Wf&Crovr<)S{{r=TyMKrJR7h9aU!Y#}8`R6r0fheu^=7GS<}!Xk z{mzgJ9$6Rm)LxLPrT;5Xk8*du;hX6J2Ev^-PvHRp7g&Bd(+TV@&jv}bKnNB{83*;l zVQ5awO<;EkB1p|2qJx7d<3S(-xEw1ceR;x5@sCLgNkG{Z>GdFxGJ6ZGVL7`ovI?@l zeD_vIWKOvswxD}^tsjjB*wm4I@WBxXmdI1B4B$~aI07np)<7K&eZb(mEu%;#uk zml<}t9y5h?d6Sg~*Je(OD0#Hnymya*Dg6FAhzlPNOwMIf`!9(Id2x=pE&Xezxxi)+ z$djAP*2C;%fu%tZPf*sIHeiB)(BXg->cy_`6?1V&5W&rw0+s_o$-FfJf3!6bjPZw@ zU~icp0&_aD{Z={F1nQArB@P z%N1fJr9mK7928TZJx~u&=A>0YAPf$Q!-1d-1aTbj2Z8jtAcQ|etR@AK;CCTAAUKft zAH}9vH7sVYv)Hc(#j{E@>$BE#;ssa<30Co1e2IiVTF6=>AfO{8AZs}#I8I@V2vr=p z4Ewk?D-FjS!h@lBRDm(D8(%ut2sna3ZXl-GKuH0X>5m6NN$^+3KrjUa#b1#b$jV2% zFN8o|IH3`EkQ%QzY#ao|N_43JIdRAWAHa`r`PEEe!W>J$#pKV!xV!1I%cNS(fEOUB zr#Udo6`P5QiK@;xJgA+Uiz9*txVdv#2n+=&XbK-ogWCg+9_I=C!k0pxS$b2AruEGLl+3~C=dlk z4M9UlP?MZ|9}s9ajyX!mjvTm2G{01I1vJU15b~f9)-6AeSoH*uR{{u*2NAjKAr=S) zg03e)jR{ao{AIUxpqx{z$%SW9Fl#4(b@yN?@oSWhfrbYzbF$so@{({6WG=4q+fm^8 z7_j9Sd{aEKOagiWE6F3|3H+rHY|x2`Dq|8cc&A#=H1yZp9z7RK0)ng`cR}C$IMhX&F zpUrq&4dPZeN&<4YntexaH=eDDdsUaTP?LJ^mf#7dh=Gs;fz~tf>G!~&@t3Of9#U${ zOT+T#%uo<6xKI^PAcBTfHwz@(2Pv3(J*G7XM{K1H;33+^>-gg5ZMOmH%j*A+Qf_hOWqo42khfvGg-Oo#(@GN zNRM%4ZtkWVmbq>LO5WRSO1dS_yGF})6H2g!^);62_tW+^r-kJSc~{2Tfi*eo+4sOZ zinM4gU0oFRXl@z%hKh3^8?<}*)d7D9!vE&?cy`802&n$%K68**hgNtOC4mP;s$RR~ zcgy~2jcsmy=t}X*KTsOZxqu5=s_t9>dg70}Dpvhq>dW4@V=nmB4Yvb**=))c^pA_lJ`qH)yacTSOR_=6*qg5HP5dFa$8yMePKmPr%>+r1F*|3j|3=#8 z+qt#i_1K%c&07ULBm)0*MhY3Xk{(kl{`#z8Hou|O&Q3Rm4Zn^>SVQ1mP(KbHiudEF%Rzg}&TUrvDyB!aD3x6060+C@B= zCI_TUfwjDPMneJ4*9)dra0Ey1bS-n~3&|4lKbltRHypd)8P_Xvl?f#pe z;_}MIYOed_eeS>Msj~TQZO#pPPW|JY`{+93x}55`HQ4bUs7(({yZ5?J51*1) zOGAEY#X~FcZGF0YQKZV%WH45*ZN z8N3q&SwVW!`zAohdh_~eY0Ctl#$Xo^!P8@aKJLNTjklBcFXu8|zgPm@$^~|FL7qIA zCl_@5_`uwKnHP87>Fo!9!7}l<+Rb?IP998I$dc@gp1A);@U%{Lb*(1>@@dbITIXN_ z0sQ(Ydb#G1ka)!nhiR{e)`z~*e~W#w5v}w3N&$OVNP;dt2HJ5@H3CcK{IHfG5D2{- zIGvgI1mH39(~{mR6%+()$T@jp*!>dr_mP$yhzRn89R3$ZT4nejW~dFP|3fl=XYMX+ ze#hVS9R!-Q<%rvRS1HIG`p?A`=6O5WTN=E#{ki~5G2=9;K!4l0!Y%D31K31aD`%W9{wxzyB-&6<; zg^a`F3^UlOt7D~73KHXG3L0X!Gg928`MDJDf5qa8|9;7Q@klG?%JPifeImW1_LIQ_ zvfD)Rp#~Pu%U6}>tcJHdwq{=ZlKDS+#=$oEIlcDL1=I@7^T>jG;RjVOBTGj zViU~2BD>RHkRJW+iQ#I_#fZCm%Pb15mSd&Dhu#ni(t5wU+7(r9jx7LA)GxE~OmKd= z>8dK{a{sd%H#x=wi5g!l8ZFL7epy9*qdo;*Lk+i=D7L2(sG_2})168grgcT5%kC1g zMRLnF21pgx>YH!#ci+jW@N=X^keY0;Ebo7A z6S`NPM6^fQCf5)Do>N)A@}LuODrMD5D$1$tcbgC(io+o+DqUj~$DlI4IH<}bH_n`1 zDF3&d%8?Jg3h&=#TRIVxX?|RI2xIci%F^dI^T zm44_-!nO>kHciUKENX{%q?Mpi+2)fcZ!;46>`cvew`}l1z~|mMMaZ_$aFshB=h3Q-9?%r(?~3!$HqmJ?J9;m zx-K;>-iBsie8J44$?7#XQX>&mWyKnoI?GA1H|SL}eEP+DC&=mkPNj$Qcht!XV2>T+ zr==GOI@Ur$>kiDp3JP~gsG)=;RlJ8Y&^_kE^0{7>KBb5K1muZhqmh*lsHmaKN2KdV zW!yOzu58&7xkea}LJrow<-q((kS&jVq_|wxM=us%Vi8L=*n!miR{r5IWj8@;hlo|l zH?Uwx9ojgMp6op@HGZ4?VfkZot;GXL37({H|$rMx|4rpoc?l`J+JN-)*2ta zEN2EgHL066$A?}mayni!LXlYf2?YBd?0b|PxcUqQey&*gqPt&NW%Y@V)Xly7o89CcPd4eAe~@%H6|Gm(_&{*AfgNL=DKs ziSix3HGj*gWWUNCc`J5yoL@Zll-tGSqyCapiBA6dE1CaKaw-)$i<3W+`MsV0VopUr z%(^@ynEtv37rAZA@ERS1#VuCa?<7Cit|ZSz6?fu-y3_`tc39Baw@Z9&>%_&ui*!0QV8pnqt#^O=dYaVq_#QvRHAR;)FX z5ZQ0^?tAv!1Ch;0*Fp#^PXL{JFn8>E_`A&fxj_f(p1DDQ%(GkA%A#72*oeq|qPZRi z@;4LgtUd*5Lq-^cK@B3leN>sPra*inwzUG{+hSlnW=09VmOw7xA*LzFGM4BJh!YJmgCU zsv?~_-*Z;LK`5}O7`BuU3qL0#DZxRCuYKnRfi&?%gem`mD8Bi_F(mj*D^!94bHjsE zPe&sNFgFTVzn(lzfa+o4@?u{y10~0XIdVY>Y_c)|tOr608Hnu$P&pAP$}Kp6j|WGO z0&exuZanA^2Sg^4%Q>JSE?`Z8Er@eG43r;{+|CA#ashi@^bi}A$pv<@!86}67=jqW z;Rj5Clu^Kkfc}<=c=H`lBf)1sCS2gdXj~xG*I(>vR-+(yI+AyC!E%&D!FNDayq%8# z&gLf0jR2D*qz?}!jl~GXTL9T)Ar9Kchpp!(JJv&mASjLtcVoa2oJ1i-+K+))%Z50E zA-CJe&mEIQ4(Dd7QH~&pF*e>)422+wa#_Sq9<-bSe%0nCpolYx@L)EaRg+v?7O|cU zyJ!IYY>*=Khv<{wZammdg1kQk>?y*dEd3$QgW%{kvXINbF2l&8q9<^Pvks8ue&Qv< zDbsD_MmE@(k~kQY@@W(p;)2H8l7SlVDN@|DBN@+0#&hA{dMOW-CB9{%Ea${7WxBvW zL)sDYsqef&DgbW zl7Lt;1{V)rCMie+PD<#HM&VJD{^<3D3^f95$R91o1v{66{eP}w_aVk_4T@|hK*h%9 z-K>b+;=UiBJ`_p*}U<(IyY0P#d^;5&3HA$ca2YzksY z`jM4=$@=ujnIpg~Mf}}PotR_6Jnwpa?foOL4!PPa$uQ{ip{uODy6 zJ^axl-;o8pBblYyPGWqs803iPC4qOE|u@aw(Swhq*La1xN9&y`IB+#cL$m{y)2G z@4qFo{Np+rwaUCS4Q=;51N%23gN{gaEcWUIig!s3z{OVyrL+=utZLI2r0nxCQ6!1#Bi0vyC;BEi-E$qCufK1rj?nPk{^btzv@OaVcsXsD z3Dw<|GE_IK_(3=5w5Ick?tPC#@?Uy9@o@f)U588!R z`n|Ib-`a5f5iRsu7j6}-3qbkMy_D&nmFF&-{f&DIEcss?|2%C~hzRlUld zvf&7|c~wi%z+l2ALA1;hgI4KhiYcMRx@sI=^xu)CP_Z19bi5dz6d2~ht;mj5U~{qq(6Uoj5;e1)%NhqTFPjZYO>&o=?zXHvCj1h{pEPQhlq zp`df`k*GFqO0e=FO(CukEdX~=x!!gUnzgJ+JOr6Jt<{$pDa~B#lFxLo9octywgnyQ z5j-f{KGQ1a%M}5EbvEh(E>a&+?sA@_D=o_SIL$vuL*oX~7>cdo_py{y@;F5~xD7#R zD|l%i1u{Sx8E=_`o>>hAH*!9jKN_SQl0PGfAJvC@9duoh;V!1dRtYXQC4^@O3npH% zZtenQ&NoTCl7d56SooUj1SojFfz$wJd)~!q^omtS4O7QcHkxwLqzH(nfA)*Ksxy!T z>{U}mw$z!E2Xo4O35HMoT;5OFA>>feW|#2bz|r)4Q6EUQPbMDC!LjhB)b--^f~V8& zoD=Y=3JeNWPWfd?Qz9INr$mVzE38RUBZ43lB^4vxBpQ%kteO}t9r3@Vs*E4;Pm(#aqgiT5e}7{?GL>Au7Uoi zb%LS0`ImJix5xux) zP&R2CZy+O@WR1!^S#BQ1c0)*hs#H0=4t1yRZNFpHoXTDUYaTvz^x*4@-LAFM-?aNR zN2^v8Sm9l7`&uT9WQ;OBKHM+1dYow(XftMzMy1 zbp}=@{*~F#Fd${gmwWcmOHI^mV&ipd9fK>vde#hz*VdTY#Gi6c)sVm%N?4P=_=X=cn&H-j-by4r`k3j?hRMpYqOAMCP+1Q!OrH{KJ;0`>yMq9s2Lf@l zm$Dx3CaXY-ls3Rie;LDQ;3!mu^%U57@m8o&TGM=#kBLq$>-dNV1#}-otGM3;ejY~Z z9|b|kE;MxMnffInflaJJZWN5hL&|!Nrfkk&Xvo!EvcArPl55=J)~R!0X~8C_)i)gi zujN^f3<9bi4O%NeRHaJ{ICQ10x}-;bUuC)FdA&#}Pyc!y0XS9;|3wSuIJ9J@6@70` zS6+f{D__TkNDI26l$D1p1XD0|)@K-dK__K(b(G;e6zRrmMZ1!s^(`AD20S)Fd#WwF zsaRwnp+Jv6>W~YoI6gp!Na>7;|CZKglr%SOs&}9ggP%(n*XXIYm=?Dm`g{iG+w`W6 zRa+6^4IVIfWuC^5(Y*K+8deEK8wa&-rWYNL@Fz^Ga_&AI!@$}Fxu^?|T@;sZ1!oeF zDyrBo$xC%e*vT50bZ1MJWIRZ$hy89qmpV>w?mro}9hWVN~#OwPcO;$omv?iQm>*egGANy+N|SC-=$I7_n68 zXL4VAepsoz@KJ)Y#%#kTLoqC1KQ6IkV&7SYL#DPmzF(v19I10qM;)EMw2{2@K#Lyp z4C!$?JV*kzJOgJcnR3?@ zi~k3EZ~l<^gKL!Wyj=%;OlWC+@bmSd*DPsa(Q-j05?8d=xlZM1 z8MVpzae7{3?8C2vCa16N)DW51cMuWi1JQhC77*w3+E7`>vdo}R3(VlWbn>6|cQk|`W8Mo5w~ zXyvy2BQz7Tp+EEmDGP>H`}vvc;3l|5kVpH#mB|DLQ8)b4@;)ienCw>p88Bxwht!cs z*bJwB#$5u2n)=ucqqOaVWTz&fmgT497D56eA~kUE4uU59vz4uvuDWbfDXp0z#JaCE zd!13`PR`msaVr{9qx@ z9Xqt`k-mMP3-2U*XVcbDQrWNN6s?9|9>LbDg|I7_q|5`fKqh&KgDYCNHuJ(>MInM_ zm}9prkHshH7&=5@jXuR~lb-*S=fyD?5j!`TymSPe(-U=Z!C}QDGSa=cPyVt^^Qfr} zvOibB-Syal^A2JfBJOshg(X2uT^oMUb0g5rp>UdafM-R1Dd`DyWjqLiJ)b25H@YHs zA46|NwBg)zmB)v81}>(qxb=kUTTQ4vE@UM z`is@885u;^p2u#hKQ)GgE=WI=fV}tkk>Xg;$^D-#TdMWm&58Rk>9t}7sbGsii^jvR z@=7ewmJk)aXHt2G_yfmP0K$WR1D*Q*i=DKrpijpXzG2WLSJQDO{-u83ez9`_3>x0d zx5&)k?pMD{RqY3CXJxzRVx=w|$I#8XHs7)~nA`VLlR5MqrC7T@;kSO1Eym`r$$nPx3c;g!{x8tWH1)VX_Wgy5dsQCTa|p(tQ}d{KzY+T3F6FVpb*P9a{b| zHRiCMBzcI&mPOJv2(fIaYs&jPYmZoU@0(r96x7Dk>VulKG}gIbYjg_L?k%I@QiDjw5D9c0pTRTF&I~Xz9UnF-})3<5A3nObvMW zlGX1ntKvBvJ<>VJu$u{6o{7b#GXQ9Ke@=2W3>tm9Cw%Rm_yql1@iHXcmtWs}t` zut+fSE+7wH>N&BbMxkP4>uDjMZNHqTJdg!lN0nwlX-Y3ola8*b8%{HzVZjVASpMJ8+~SCdxn9UEvcz^^X^zK@^jySeMw&|8)g<~E4k9GiWu(JAw-Od-xjy^w^ou0 zc5OBx3Fh+j)^sL$dg#b}9^5EP)m8ID;&69d}TzD_v)kY}fGW_7a!_G!s#3RExW*>bPe97EP zJbt6{;svCQTTE@>?i&L$+F@tsV(M0I_14hX@Oc&i?9WY< z1}>yo8^@AF-^qS4bDX`=@>n{{8O?slR!sJlFcn(C=kSK6Y=a{`sKN z|ILXXg6on#k5^th{56KQ0oB)v*FnEhnxOOj5)G_`H0O>^9Tep8)cN^7DtQ!NlKxlK z*UY^ky5j?T2(z_m>WKH+9E~Iod(ihs- zx&U-z_Vy!DzP269Xp@CFwmRymiJ++n?jTCflV@1NT5*dVDu2rF^K|gshfH@6f%^?% zMoS~`X#x>;HBv*mw;28aJSUag*K9Gh(Ll?>fsLp%)WGNqGNWKiO%wgR0M)9H7RyBg zt>9*TQeMCsnMquq#rxPV@CO)+GzWXOtD7wvxShg6%Gi34s~nfccL)LtqJtMg4Q7Mj zL4qd81}W}&9u(%1+YGC~U!w&}P=MdkyhRE&1hy<%l9Zs&&WsSt5L&s-h>#Glg~UmB ztpf9qNT@kAaV*3f{cSkO(g4P#jhm)iiF}>Qq)Sakfch^c(I597b*~hiE9PW z4O>x1I@XSog)o2EBjUjxpo>Rr7CEVIXwXGh2(ypiAmAmwwq(hT zqStdYU2UVO)@*-_1BBH!HuHYhAY<LA#`dJ6==qe%Mhm-`1}}BBq%dF2O4Tt9+_sJSN&OQRbiNZj9$5{;{i1~8Gk0#vV>iLQ1-Yy3Te4SzIPfI- zt;yd;63!@&g}ycojv6?}d41l$wv+I~w(t(9uRI2d{_%m{4M#Nx{E*DwAWqev-WGm; zpiLJ)lw_o<{ilzC<=GkBRe;9(M>0Q7+uIX}18L4v4wh$>`yDWTdAecNeLc)9eDvFf zw|)iCT4V9%>1}~I%K{zn7qakwQhnujjd#7i&9_MIV@PM`%`a^wmo87Fu`J3d{UU%Y z{5_Ie8SrnC`QJEOLAM*n6+^ve|7d-*U zM%A8x#GDzdIJWMn5v^6X1?QgRFg1oV>l5_S9g10;*xJ4-FrrL_tZ#FP22BK6tV_}v zimtd@!KSF!KtML?+$f>*DB!Txp^*f+C-#Q{i3(kZ0i2lzpAIc^DaC7Oggdw!(JXc! zYk`F;>yfC77VOqjer*z*J3)M@R*#~bn+xrk@Pea{b+Nbs?U82Ud7!hQ5W@e zHvt!VxJ!Yp?IKj2c0aPzb%7!vW}e>i)9(F^Uy3%k)kjMYPV9>R(O~=waINnJ{{z?h zXR&pr&D%)5)dSzx`9CITJx24NKhTcFMdhtouYBHrKf?GOFe}|B4`ux1|Ggh@d69Qu zH0j#F8Q=#h+FQK}SJ6>7UCUyB#Vjm;L*F+LUcb)t5$68(RMsgR%C@r%!<+jO*9A zQokoRr_tYy|AYo(gEZ(%zPEmyRYtFA?aTcdb|k)Q;8A4E!LabSSBH8h8F;sdY{Q^p zX7;StCDDiD0CdKw@3kzik0srmHCUSR%YOi!QL^#On^UDb-o80qmTmX8rt0wKx3&Mx z=5I-VQ}M@C~Anxs)&F=Ss7*+0}8HYzsEU*zM3+BA~9>frA9Wz8D-@gM(z zT9*I2Bh1h5K0RUPIDQ7|N)tXmjXixHn4p2W(qEHW9sk3%EYo$R|F)JzLBV+hd>!CD zR5^H}XoOL(ulE%|qlkxEDCE*chn5Dvhe_*iyjKQ>j=n#OYq&9LcvC$=RjY)VV^FDx zH+*%fe)0<-XlBuxqvr5wzuwtOrYWs1EwbqUD)Q2O!E$P&jwnP}v*m0^}vV=s?Edi1vE+UkdiNrt@*q$wcjQ|%EQ+B2eMS{2@g2UsPS(>cKT_J`-6pMtV zFhQ_%AyLQf8iyz^Lcl3agv`M??`Bkjj7oDbQ${OLSTieK6-X(yrsn0BJ-$nRg)byR< z4QAu;QwzgvfuRnW1+9pMS_N(crv!>%nN3AH8|5gn(&|GR2)aWaWu)Z?b=wdLbqn8q zWO%h0c!JxNo8gp()Pfy zI+2-LkYhb+gWqAx-);2hrO(aN?LP~;D-5gGzQ3`F{L9m=1y@Y`zBKr{mt17NtP?~;Z0cxU8MPBiR9#GezwMwaX~|;aqf@^pI*tc?Io*4M zrFc8<$WMSh>JLbX=)d;ry>&bC%Ljw)tSLm0Sw$Ao=x*`)pd_TG?vTepN`4QvKC|dXrK_R ztF(M|P~Atc(pdbc=H+&;mqL(>8tKJ36dgXR8e?c`E<5~SB@;|lU$dO4^O_{ZVz-@; zfcIElIXo)Bje z*b}V5WP>ViSl_S4>KU?y5SvmUA?mDTpS#@kriY7lZUc7VH}A=_uXp8Ld*Ar}C^9j{ z;`Nr<`d?%>H*zO8o(}rngZ1AmS^PU6+@E_D63b=JiP9<2)!&xwthyQzl`0OB{G>R5 zoT*9?2Mwqkny(F%R#CoE3Yqa-NelIS->v8(c!a;DQl&x9bAn!| z29|kD1%2rvu46d#5}L#ZVf(uAU+cem!E}~cr|Z+C&)lo53KVlYNxZ05lfF^P(rLCu zW~+&Kl#+}VBu|SI#)CN1*hDim1#Tp#)6>?-bYA#D5l*eR9ew7Qe!kdQtepk#VQQ-- z4Bgw(EJg=6v(FD|BnY|2aoMW9E<&uFQjg|nvV~gcgU0Y)P=;t_Bxpq5YGIuuBE||e zZTdjqd3^Y+Y6etQg24FUH8dlnN&2N`GwkqLdhyKgATmh%1mMVdmNdf{)I^A9vKa!A zCRW)rQw7w)0Tv^Ik^y#N!>k;bc(tJ^O=b?&iDtlDu0>h^u}4>{QM=m#hHC3FyiqVo zTg__LQJG_{#a{jRb)TMh&i6%-e!01;VIo!7U7*;qRTB@ukBPoHfdY6_>zqa^Rw0 z3d+u@gc-Gwr@$2DfJv1u!YMLIY!hA16Rvqh)R$!0uozG)xvy?KzI`1e54N_^9QY~W zb}*TIr$^6ex25d$vPj4}dW+Qg%hGh8QOxCY5<9P2y=-ZVS`QO z!Qk|dT=*WtO!VD{j`mb3DqLBO87hV&D4%Ni1
  • C}7fY`r$Gt*lJ)jWY#3}kg6ve z+K_xLf*Pqo%FR-$84$k>0-V#UrofvI;n~$o3HE$2KaCBwGTo}JS96dy>4Y@w1MOF_ zIR-T;*NCwf@rGsRyyM6VSt)uY{qCg^lVF_wk)QU&C4g4X;V{zU%k!EP2#jZd-`=Ua zNx+$}-^xb7xe1{kR^?_v^verfL1p=Cj)TCHO(>XhRL8aCaoFT27OkS2_R}F&Cj|j% z!b&jxMkd56643HN*$T-9-BkLWhS}^KX9|PTHwrbPCi<1f3N8Aaa^Vr%Z(RRr72ngn zBQBmtOc4s<#z#Za$(%IcAJGr$LD-v1OH|6cIN&9;?Xc7OG&URV$zEnU zKRNYL9$;W*{wGcxBD0^9pYz*iHcF+h_tn^GNBhmBmM$E-a`Srl63^Y+7u7#c+WmSo zYjoE;k?DzJh+7eDOr!L$*`G*=!M=rEZFfv80)E?lH80Q^W6=jfr^Vgz`4ZhRqrqia z5B28JAD>k)@T@-n{aJ09H1QMsahCks2tfQ-d?56%E&;PToBrHm{!%Y_V;=&=*@3no z-D{XWOIC(c?fz0>1PBewH8Q4yk}GRAbOUF}KcCfamw@R#=6i+FH=wulzxY}0x^?r* zhksR};qUJ;v4<7ZM?)2xMhCjRfPlWXhpIziTNOB3SvaFaRtfl*05Q8-W0bp@QKIQ3 zK2@rovTCO*m0pXXqe?GtU3Ko>ZmLps59g*)w!gLp8p-WssDx06jf!NlCHV5@OHEXj zX+8RkI>mk#ZPm3z%jRX!I#%nfUgv1ZkR5?w(Zt|6e zI7P!18l}O$9d>5N!mH35eVD5>zcqpUhaQO8^PuTX1)e7fXU_P16%pl0mxLBkXWjTJSW_d_os^kzW!z)5 z5`{Jafz6bWxV%Sp`hG04sh!Je-&Ak0D0%n@QhT{EcCB~Xj}iq55)2vx{a+*s)swB| zqMX0k9E)Cjfq5%^rosMz1>bz4PH#!MT@@EOn5b!==%APT(6YUwi>K+HZXe8n40PPx zY~6N&gENqxM@_-z15)y;EP;QYqOz@H!r_S6cHP1Umaev z1GxGBkC7<;(j5CAoS4sPduZ$HZucgegei`Uszf}R#=8!lx4Vh1%Tbl7qO}_`%Iz{4 zA>G1zsABi$_PVuh8#_|0RAj?d^6BrC>GYZHnL`hC8^(o43*bw-soEEAed>-qyM+HV zVuIVMxt2IpUqHL>B46@EtwLX>Wu3mYL)V40PT9X}Y7Oflb!t0H7V*9Sfm*v>M^l2y z9KgA+WA*df?l&b9+}esV-%*(`13dvdbhc)@$A&<)hnk(KOI) z;ajU7!DkCSa-^3=bLHhL2AV3l7;YgP;LW&_*yAHD*A*7gYQpGZ93iuL;vg9a9YaJeOdm%NBhZYUSzml_%B2(tGKAq*xl6zT)VMu6Q9# zxP+-eC~58TjAYw-H)+{RjDK6RiF>u~eAW)81T_?VEyR{-)*lQaoMUR38J5L`KP;n7 zQHFSNh5$Sz0w%MPvTZ1%C>H;|%y@o_#*TL|+)H0ueKIc67K924^C!lkh#70L zA>uluM(*qk_wA+qNLz06I2Rruu%P*s(bL5l`Lc-00qafGGEO4bFZlWVzSo=!=OwUy zd@JF(ga_VQ4&stqiFo-PuS-jyvIsZ^pX6c1WE#CDdFf{lRvn*=Ojj$7C3fU%slY5> z88vN)hh4hdrR5@ThPwtMSG@23*;NQa10wUO&M2*$;wI;aH&!K|uv=s5gI$06dNh>TVXbAb6PtNrspxd? zk3M>B;Kt(fd*Et(dh}u5&iLlY`rD(=Hy{0e%iH_^61bZ7(bty?e<-F*J)zo^6UcD4 zfanN&`q zybGthOjx+trM@Q967JKkfd64PmcKvx{I3ZjV$;8aJ^fFUaQ}X@_B8f%@ai)kh1~k; zRul7X{%EvfZnwz`-TB%yuGr$%vM02K-EyejRQG43W9w3Isgi5_{H-blg0Z3Z4vWGBOtd-6U=+T3binX_-xAPxvQC(_sfcs|gdH33wOvKQ0be(ymIQ2L+vft`oMfygVCb4MHPoYJU_VwfEKP_@V{##6 z76gsQYbjc+_2IS_G#Bz6f{H#rsf4bZMh6d%4&>f=3W1*kWe})*lolO=KXTe-x6)Eu zp=fp1XJgYz9GEI!147S*8jmF!Bk2;kG8uw4<3bL-Y$B-23pE-Z5aY=AiTe!qYN{&J ztH%)T9)_MZ3--@glmg1Ic%1wZ>LH9Ha#&dTtaCFLbvo4afA_Gsg;HdqAy03xub)1yv` z!;|)9U`VzMY^hF6IbigGZQCh=x9^VH?w*|qRmutGa(1X+QQh`hfWKZdEnVbt}^>jK% z3!b_(dU2B?KsrX(8=P-XYtp{)4_El?Sb!fq{A=Zl1knl~%qbrGL#zcYT8n3Jxb9k; zeH3-a@QU1uq*(!ZOYqe;$DVf`-0N>t20y+6KMemtpT>glpe4|&KVIy=`+NV+Yclmz zbMap-(&TTi$;S5e2wxz+`&V7S_+**)pRbAkWxL)5Q%NfUefoDxo%_C{^l;b{{-29< z7BYI;QaAmY{A|tT+QT=$3%Dlut3`Tl=JH_nx7Xy)RMzvwTfV2V!WRszKbz^cVmg)e zdFaWN?ZeMI-JRcORPXwB5M054rW3+8HeLt1fVnr4CKKbvhWqNbY>M0S<074Vq3d7i zV*juGnzV+2G3dNrxmeeCyofOrhTh_UT6}J_cF%6r@O+E>iKWJvy3lR4o*^(_?DQ84N@(VeDbx>6(<@U|+{ikHS_Ye0u!P@S6Nj zg=n|>-Tg7XdtT%g6$zcAj52GE&!x$eGMLtL%~kXkSf8|GlYX?B3dnWc4q37V3p>`^w{iZh zaa{b}4MzvYHuk7{0=HRk;I6S{sqv(4|bjbUag zztiq|n|rd|#>(;UkRJR6__bYL#Zx7=K3VO!bY;|G;-{L~?h_$Jv$^QF9=M$pj7Vsd z>Yx@*p9q$$`Z$0gu$pn!KozGk9ZE9Hq)F%;TOl1f#H_KY$7}H{n&4{S?UGAE5(4R_ zW|uj7pw0g2Vhc&5Ip4ZjE8{h*DnB4sG2ItD%)I1WuCaK? zXNl1dHZKy058eWr|Ho0YB&bZl`Edo&EqO&ek!J|lypY-qPgBUPq(3lq+X{;L#OBod z>b&5^_14}j?ArckwI3rqj$5%bm(N;8a;ax!RJi}eC684r?8Wmh&P)6aG6vb&zQ&~ji4UgLjWc$|um+keekE&-Y%=S!Z z#}b!AU#Fmr5_1VVDjgnpwwh3!uPQL5_^f2Z9nVf?QgDDV<>~L(7`rNe8b3`3klGLVb@(%D^ZDPzXAR0?J{f z*s{eshNVniz?T_wLFME?AafBE%;v@qp^vt}{UifHL=8TP%l&T1V)>ke$ML6pea;mJ z2A`sl#QyM^FRPQAw+v5i($IOC^U(b|KAMg%{MTdN@sd>N$D8$(>WJFri@(vxTr`y-603 zlMUryLXzHTxEIHhN^jcqUhBAbai-%5ik8@gVGEvf-^|QvXbLcet)@k0-O{?{x0vNB z7#vQoENkxm?d8mmXK4uno3}dtU%lX{iAe(ujBFI@dGokdz3_bgP|3$%VFWOdMxMhR zF2C4>&)T^sRD)}9{H7ZpPRq6ILW%dzWgIVc5e~z>b3=#QABL1+PKL82B)7wR>(RJU zX2-gndu)?X?s+Oaj+VX|m+|(aV?BM?Uv!&(hnv!lHN}?P|2eBvRp5#Vv3oY3MLs?A zVD4gapAe;Qek`UG1f|7_aFQn6j?DXO`te{S@Lv@Jl-p75L-h~(7^g|VFw;<-k$B*$ z7SRO%viPZsEuC!b5V=)XU_hGyL2x}xpZ6KyswCwDYYmyTjhf}VGuF~%M<4LP-iOgB zH(Zn^@%ETv`RYmQ#OX+zS+ZICST92vPhYxOEd+IZl_v3xD0@2VtWs__=J-%92@xUIKNN7-q{VytNDHc(tYPlh5sf91`; zsFm&iu}(bl@jkiLLDAvzBwo=;89b-xqJ9}sbkh-zWB;F8+1#E7+NR80bYzB@&?cT) zRZ9QdKy~*H`fYaI`_o?l9ppe~`w` zQw4i^+p5qnw7)<`=lVH3cAtl|Qw4swpwxQhLiJF^KL8P0mCziBx42@7#tUYl{N8*V zF1VR^*Zw(O-Xwg5@-}VF%?YLRfimwTl6(L_uP~e5V*Z947QII5p9%_ zKq6&fazo_aS6r~^hPtePq(q~{p2c6_`#e$}@UXf77{FCnl*(@cQ-(y;5y2vd*-@Yu z^49qP~YVvN#*8uOo^lrwC@ESHO`PV>=D)5ZDGLGj$&0dWT7hSE~ zmgSsrU=9X>ovo0xXfh|bWB^4F=F(iipp@o-1u8t^n=hiwa<>(QYudb_Sns0;idqUfP;Sx2NUdGB)o!hkZQa6AhQc&IV+2E`4j z`?fY?R?2JA=C56pfzq5Bv&K^gdOX4HGyAaQWau{xb-)-MMm5?AMH13-}u|Sfjqwm^%+#X$R4=Pqiwc0uCpQHFY~dfx{{mLmCysXv+FPaGm@XM^D}8 zconlweyac~Q*vRbU9B1P4`gp?w+p5$;q7boeR<}4%vhI5q1xRmbXJdXIJBfvLFyTG z4|Dg^2VX@Zo!wh{fn-Q7kX3PQ4EcIS+Pn1jSYXv1^>bPe;MIHKuIHf|g&GPfCws{6xEKMAS_@cFub?DT%Y z|NdD~doWoSwasjC0T2N1EzybgKV;i-EAr|`3t9q8p8B$N_7dHrkVUd%rxhpLGrWG1 z4g7pgc=f2Smd|9)#hM4HYTJ1wh(k4DXOX9kv&x$^dk)SScl?CdPFkHFHtx8cwDcYA z%De%YUf;4Z|FoknNo#-oWzMgenJYG|JI zFQwK=nG**vq-$LV7#8_R<2Af}Q=eP0umT&bL@?aAPrY)4$hR=lT+_VNf<^+=vZ0YpS+p8MdR`zDbj>C_jzZRSV+jnkAY1`q zFO;O;(oG-%2`b#vnJV4&!(nCB1}>!eI3)988ukr*xL2OMOH| ztEtt|xM+1Fcli=<#+HY7t#a9Zf-?eIO8I!9Vc&0;x#f+)>>V%D)a>5wHGWR$N>M>Hd9F z{iCnA;f?>vrQcrP&0#S3(;sdu0ar#klQw>Pea~FKGF?=fuiYiQ3UNY4=N!HOyuMSP zF+k`_DuTA;c>$rTf!6;e-~DTcuKtltH7c)Ll=xJU+-m9Du(TC%hkFU3w~xaQM{hDu zCfaI6cl%VDI*4fVX;$pcDWxTu20x_g?w&GSi+=zgoEz+iS+MZZL(t$rgLmMQa{&47 zZ-uUSEnc1LgrbIZde!BhsERd9dRoVxO$E$bG%l(90wkNB-KDL(dR{3jK;(B*;f$?L zG*5zeYKv07eX)gidI!%n#qaj}0Z5)2_k3SyA@2dtej$(M%?H@m?~Wm}Ox+TQdr2< zYCr_MFGABB3^$?s%BC7xc&Wj{9cn`{q)&=-4$ff3;Q^Al6BsiwfZF3%f!Ln|UZV7Z z#*xg7`XxMGQ_>DH+Y93)#A=fwcl4JCVUv--o>{kARdN@i*@laBVee53eT*LhIetSq z%0wt~&dABs1SkOOg4r{A=>X6(gJtP%NeeH#8^C14NF^0Cbs5`{z6=0RpSa>Y=<87- zdKy(STnPj9+BE! z9r`qJ9edG)mtnxmR#^5C*Ii_%E~h)fV3c*c`qWctQHQdT8NIN zvl_eDj7nRNy}s9fes+0N^_I7^kyYNK=3C8Hy%}C>FvN58=FI39vS$oRuUp72U#d8J z?g831;(kmbX{BiRV?W{o^&<#oBJkWaXQPni5FT~J@_gox+9d<1uI>ik{0CkHCEfsl zW_#`ZT3PU`Hd~XbG_d7O;=aHIL@?$>s#qt zPghrS1v@r~kBl~b{i<|qow6mp^^!xL7QbM_EUpl@K&n6UR_cS14>X{+mPs;oZ|gPD zzLs0_caUOOc+yl-gf!P`>rbolY){q5_Bvb{lI4@{+71-B{1Q7*NO@f|P(=MaG{B?7 zXMKy|zTD?$4a9H*UEjPs5bQJSpM`}wd{GFu9}$T4w~W-aS)U!b)ZscjQs3kA|9&4M zC#Re4elcJ3R;65&FDviShaQ2BvXhW6V&Hm9odt)TVL&m;eTsjmJvSNG5D zQB`{pj3m14?o2!4l%$n5qU5zOym?8Y0bjX!+he``sqBUmUF&ee|A- z^?XrY=hk&|dG`vkpnDh0OG9rku9G)gjI{9r3O1Ons}aISgj2W#6~iR1$7kl8Z>c%r zSfg0qv#!9KbdU%H=uE{S?GXU;HL=9jW>ULoK3{6MQ`}0t-IV9J2EtcGWEZo2w0X-hdV>xjYpK zI`iNWxDI_AG@R*(R65}_r`q%yG!3^r+USpnzFPqvk>_CG_i;w(Ux=kb_QDbKXH9P5 zI)-vz;zq5$UnVSdk>A+18AG3?u!dI%6ujI&-p2*w9KkVTB<0=i+F_(sW=nfiySio+ z3Qv$bCS~1+#XKY(YCA7$O6P&d4+#eTh?17{>TWD2))VdI9jzbE9+#WsI4TK;vre0BXDinr(kw`cYF1^~UG$H+|({N+{Z9 zLc}sg873YGORo1Z*41pr8Mpm(TNn8rx2zo(8C!OyTrOn1aawQ4lEApg4=ziAN|nB{ zGb$#607TxbVxbM!ZYNrxk3S>;c8COJ6MJ#bI;6quCw_oqyHUUibaymE{8l*_dn}flYd5Nbw zk5QsdL`7^T-6#dlnupp%<*o@ML+I%WKp|249p$+HCu>M10k(OS1wZETMP`s$Xs#u2 z)|{hN@uwm0PpS5QezFbMH5cbe(+necFx1?jAn#woUeXj}RzL}sHIUEPQ7Xh82y)B8 z(SSo((2_OSmoX)fpJw5U*G*Vj9bKUo%{+!0C3M-YI2C8TUBy)RnDomJ*cgqj8NcgP zCrO@3%L|qbIz_8SF{X&o=@iD;tXZbRD#l&sgcH|0EY`c-Yu7JM+4<}+fUp(o5FBJU zs&4$0w3`GM5d49v+_^zemhwO@ghn>c0LzpFEx5Oe%&-MgBp^&+kAUu=wtdrWVm!2$ zgYYAjgG?V=wBp<`Ru5m1umG$QS{^L1qnWVpx5E%it(4Kv_4z<;$nNU zw3`Y-kX8LHdoY8hp8&K z2T_<-f^>p1kNb44+W7WLlSgplQUb*HXBz4VaZy$%D)YQ?$W)OusQ=1ga zEyQf|Dr$0m&6mpCuTN~ha(Rc6+6_671q)Y|4yuq zu8`RR8yW)KE@qOXFD`m7)i9mduL=Y@Z9iGi6Ry9+wAiBv6;!sYThv8U2M%x^Z8-YS z)|xbL;#OtAXJM-v4H0rWESJe+_g}y^mpblw6L|b%=>4UVEN)1=RtPi4l`IjMV}jxg z!~BkA+uRw7pJi%Hf`G2N6cX8Gk(kDyvcQ&Vt^8LHe zlArVb*E@gDs5@F=kS$ZMWEh^U1VFss$UQOl^)%P~f!1=&?)Yxh4P8c$u>B??$x_(l z*G-z=$UVykF2(dWbe^4F8O}&`@2(yin+8e5tn*h|O{2B8I4?5zBk_8yhxFeDlK59i zy#CUVc}T5NbTO2D+@K@h$vr!-E)P;P)>_cwR*Ab$vcszeX=?d)*-1d|=pY)jh`K6y z&y60OM2yY_Zs=$`nais2s2sSe!*bTKN^5T}L@$uefbLf+Hl9GennKyT*A=wjkkLv) zP_5ZSlt^E#R3K2h^^XP%_E@J3>&r7OyXMQISDh>W#9Z0qVE$Rw*vL9NcCo9~$e z7)cHo$x_bsgHotq2z*492SbL(%AuDJEh{)jQ^*gr!2(E9p@nS}or7eFKZ+nB0K*Go zNU9S{Es<#_vGG!J?GfYG%J-9r>(~t%{0)Ap5k13WWkDTA*36=#ks~+nTs^DGm_e$ z)lr1AY4&~hgB&m^XjxZ?a7p|CS9zvM5=7PB+jBL%V1%mf;cyJN@}%aIX2MeE*9Sy= zpYoC|9xj+jE%&b{E&7(ga5e`Or3a(le>D}zB@A2?)c$p{kv`)SpZ^(`WcDzOiFPesQIKxUl-05e$AVzOwRai?N;#Z;oyo) z4j@pV;p_~Cxu)JUVkWZM&QK%lGTTGxWoRQ5;`-&>gZr3U5@k7h#-O=I5wrDm{mODY zKNSTRz9QYc7gpZarG%Kum+Y|1kR3kj=iIfPbGWtTXH7t-@dn z;QMjCXN@P!0WkV6NNR6Ra@qbirEP2Zl`Po~%^7w*A_f9Jb<6Tz%4tYEY@pmyiOCb6 zSW?LE$x0g!17fueol(WbS>vK0ALWpk zSCTn;plyECI&si3=WjVuAp@wKVI$=YMtch9tV&%a+ z9PPd?9!pbDCW6YDn!0hP^RKkvFm(lQa7Q(d+r6$*^95}uHl3Jy!=s;WZhw0Q+TOnr z;L3=)U9h>K_KZuf?Ova`ap@3_mbiPM+igd@_TJ-DjqP~`I|Ktf%IaW2GfBP1v)v_H zB7H?%S9ZzXHL;<*^Fk|sG@FvRq5idr;x_7>E0KTa{~~Wh9pqa5vswdewbN z#-@Ae61*AjfIg4pX87iP3@|!NsmKKTQKgPLG&~1%4Pa`42?2qEwqxmG01jL$R_6lP zg__(9>zy7w$ka#?r21q1@9tH#x04X%Di&dp&QvF`sXF$|au4A5-BjDG!gpVgk+r*P zO(e;0Q5PqoTHu?q=hBT&{~z|=JF3a{ZPQI71xTSOozPK1L+>OMDFOnD2&fo}2nZT1 z7;L1_JA!l}6cH7D0YSlnp$JF~VnI-;0xC)I_okBc)E#grsI8#_?Cp%x+l-R<%IHAb{_V3Z@%^Fz5i{C9+4e)eSrl4 z%aYxhKk&81(gH}o+CgTW?@|*S4sM3QC)$6%d*wSP)eX)!ItEb`hJyeFT+3*MvnPL* znxLv8`cm=RFj457^4$2zM-KPkh>^1B3(f1riA6?#JA3^G;{SuH&{a?uKm} zJF07X4ap-C8U>%cPzVN4St?|;ERpY!+a}o(AqTb4ERrY6(HiX6j52a!>@AppZ5OYG zht&ZAWES+y9x?E!F?reaDyi8)n8v^3g>Jx@+= zE*M&XpeX>sdGeqkd5ntG1j=QVQMTGsfI6@4fLIq>rZkSxkrXU)xQiyx-G(-!FfZne zl^^`+uI`QuaGCeKW zpFEOlA>S2>{wU?H5W!E*um3J?|D#6rKKuH+N6EkBR_`Wj>CoQTJAccqOpd>fy$P$> z+4~E8dP;)%f8?@4@l**K{w*n!Rj- zjS5ejb;Rrbi6F3OojdbyCR)7emXVY=(={G{NR!YeCRvE%AA0UYx&$hqL>v7_Oeg*1g$^%w zVhmC;h<|QDtE$`3@T}f638W^q4T1}LVlUvtzxB%==g-&_h#uT-0a7c?4JEiqAF_PM z&4@ufnY;{-kHL#(K<{h$184b&MSDh#Cze76Z#(kNYJhSo7m2{JVW#55xUKCZDJYA9 zzk;!ja3#cfFsXPRbQ}=w+KYE%iX+&>2O2|^kcC`=Mo&A+EPWh-gva5@Y)I75qzsZ3 z+u=T&rfwD<)WHv44h>L_1tklsYhdAw=`=k_tv|2&K*F3dbhuvOQ1+0G)Y)XYeam(l zdKdw-&=R2SRI{i_MG7u89DGa}B$66yZeOdtMS96fcAWLBB;ln(5{W71u^KJCNsXpy z3ghkB4fk(+JDm`1xpfrwLQ98wHQPy1ZDRn6nF|vUALCbs0DGbe7e&PyWMnr)lAuiI zuqdNu&=3FgD-MYYI|v}tzAdfY`ou&hCR5MPmkw$`!|WMTp!Ye|kwFB}O#)2d3G-d5 zr_&g66UT2IMRr93TxH900Z1z-T`b62)+NXq=qu&eoa`hf7C*Cv%+e+kiHWlu%gHe! z*q1|xyUltUX%JkP8H|(MTqu%%XJ-_hEGL19ZGTzK$}Iwb?W-+$SuY^@O3jz)vqyI$ z!1Z6foU!Z%pU#iIX{jl~!3h73A8^qF-P_Itd0qQrW_$i&iI-yRThn0yA%izJx~`s) z7xHRfguYGQ*5)rMQ+U2d(ee&HqV6&Hty>1IJ!^njOdW z+ zH_3M++HkBTzJXOY`@aaRZgR|O7{8xMjP!^$Z`YU}kn6u}c$l$w*^#A=MIU#zaI>wrD1KFfO|euY zj-@GJbxvnt!BT1fSkxtO_CzkzkUw?&5h?*_g;5|;Twmb)G7Xxaz>AZZO;u0*Ji4eeTHTB)_NMB%G@n~hU;)g%j0NgjVp%TxVk$Q&Q}P=kF}aI$IrOnY zgHiw(dD!o)t@yDo+~??wmw?e5OjZ!n8tIxL=k#e>Sq9)n z*gKgj!wNcRDT5n1280bcR;q0}z=T7YLXOxBo$=KrROOJkwnH7&+7`e`oN6 z_KuFmE|K&z4=&y|f6y_u^mLM3SO6=Bh@wF>qtlY{ik~EVi2av`&OD;SCg%oBqgd1^ z@cpBkX6GC3o~-Y_^DchTv z_5Ez>Wv{8}h2|93du_f~yt1O+ili*=0RE$7a2!w#)Bx`IYX+YLv7b&EcFZA_Q$FCJCpG*I}?8fx^PYPceebW zt>BxFocQ222U*_fNBqsz%)?)(V?JA6Bfjb1M;#xpIDv@WbTU!FX9{e*(CU>|Mv+J7 z#>Dti;wOd^9uGfi)uiDakQ1vTEv>~3pi0iT%+tb$OEH^zVN7GKE%*pj6>tB>Zx8UV z?-wV1R-c`}3N0+zx$6HGoBb>@xN1pkMJ>hP!{qgxx7b2BB5GE1OdgHpUnXvsz$~;v zN;qOZSJv{>4~rW}zGdopIUN^;CtQky33Dtxhs_2SX8Oe>5_N&XLX1{4e`&*vmhfN` z0j7$Hvr-A7MV^YLf<%5(^Y-&n{%S=f54zW6@j)?3q}xGWwU&oLbWGWCmRO@G#|?;SZGDSRlFBkFNn{U<|7@S)CzwB9`YNZ5`=dAa&!If6>=Rx5d2Pax!{h3*4|d7 zGfU+hwAn6xq{$I0T$fF7d7oQg4!SYkk3Y+SAI`Adl4F1Mu|Ae=jl@X}gAiUdtkiuK zvB$C-$6Ifl^ttl6Aj$8eZD3vw}ND&L)(tI(zbs%rbrRv_BwtmA?ZOZ<; zxvokt?$y(x`^{QW?wXdZi~NP=9ALe_`u3yF5X+@D{)#}4GZ<1W+x$h#qIwM++EAm% z3bh>Xn*CPA?E8WiwF&ChvK#ZUa?lK5w*JwHf7DH`fvckmO)>b&bp9TLKj`*izYb-@ z8gMD={Yr;4>22u6zB>bE|GXm{2gp|MEM=mdD6Z^#Tu(Qugq|zE1fGK>w2JL8!t8Ce zabiP6gv~W00*e{W2MXIK?fYPP(=VffRaOM%y6_V)yKUyv}jQl;Iz)XF}l_n5uK)i`L6F z6b9130L!j75M(YmD+u74bp_rY-<1juAatmTGw`F-e#O0$xj7sv48#5=iu8wx;#h`A z*;Iuj{`p+RY`z~kcENy6Yw%N~wWsVo594z={PZwk`i_U=mpV*|G4(Xz&vN$@2h4(0 z(j`1JX%l|Ff>MzJnk4-+EYiBrGI_6Kk^?dA*39#?qo1kEuYpSHl z_T;~x777=pa{qi3-biaj?A*WP*dDS^y7bsf{Lbh{}Kf; z4lQWTj?po;8spL89zo+Vl7W@uv9b}f<8jznjfr@b?4XJN6M%ekV*V*-`p$e4_vhF7 zXaA8%{Q0?s4s4v};(t&EuXl=DC*?8_%(uF$x?wkzEtYz$F%~>~EzB$Tqs~9fywH#z z_#^b*dIEQh6^r+spY0#!U(BVAa9y81z=thO{v0_tid-R_q4RoOj3nbH>rx|*^1&WU zLs}@g;U|j8_$5AoLdkvU%nQ}gaw^O;{1?3QRJ?AyfxY@fjN0ygek;v~(5H;IArl3J zvGJ4r+5!}kmBt*u7$hNySxCdXNdA67mTqJq&SXamL!&2L%>F=ku**#3)rGH=eW5!6 z`iFO5`NcKC!?h6_hpKu}pl;560fEURIzLk~1ZW-Z*2J1it<&E_vV9HRohWBa_DQ3P z#?lrXtoWRITQKD2qew1Q*uxXB$xnpUJ@*H_9Aql-D>;}odV~+zYKt%~{-IVGaFkt? z!dWowQ=egAD&t!|~W#1*D#&$cae`;BgEKzUvS8(zF>-#g10@q*6qt#)q2h zo}eCSA(hx2LjJR86&5j!-+mJ5??jVxF%S|_M%-nHPd(qc1y(2X;bb&g&IEv(8*2lJ zDz*!xYa>zd&vnvq6y*2cQc$dDJbMfuPA&nwd<+3Wn^PT!2&7Gp^$JHMX?Lfq?d4 zuR_uo)nI5r8;oJ?FA6`SF7lG{K+{-T2+IPX{DLqyj+6#->FLuiG+b%KBpf`>U_CCu1=t|#h&Cx9RTi2sRD01v_4 z`j1@6{~{mi0h$B10wnq0VG#d2l;yaO5MSkl|KXL5;r||k0MGv`@2qhv>x4OoF%7ilgeR-m;Zv` z{jRMuSyu6$ZEyljK2QE9P5yWTIiqdwJbm17GuVT;-H zqbOm6zfU7_F~+=s@!Zbl2|JCpa^UyVNYWQ=^@@!)meg9XqABM8isM`4`iX8?xc^+p zbnGBMj-NebjXdG5k`RZ5^5giTC@>j>?Ovto=XJt*trmHh+GA-<2M(-NsVQG=I;s zw{{#_nM7)2Bif=b!WS!&J*G+YT*4d5qy_xgYMQY58&5i>j$4*yaTlIpeO=hw%i4se z*)?05W#D~W7<4zh6;E^WR<@1`^_oQfwhWG!RoQ-UHR@4ewJJ2gSI0Mr(H9Li_;A+^-1@M=$7wi zX#_vadjC$p9@boZfm)V}nPU%w!4Ob%3Nr->Dy9o*swuX_QW|h~&}vV@7loCs5x(Qa z3+Gxo^a7JzEqEB;@FElV)J*Fd+;bVPR<10Zrpwh$6#7h9lDlnc`yo{bvwyu`WMALx z87pt$BmQV>h*_)1{nK|{(yZYDGhy|`Yb9|EaNj*utUh>j~^QXsNy;=J3T+&c)G|fC~DXmSN2d>-l z8(f7($l!vz@qXl7!+Gxzq|vCKPe6qf*+hfAJvsKpLR^YGV;2+7Y z6ZnW8EZxwfzuMN^E3NObkV95xT2JD~ZdcL3{@E#1*X4)wO*`x?U6=?S+l(ME(I4U{ zD+!0ti9faFWJ~NS%UqEY?dgV9-$TufK3vg!(7fp>NIbVF0#i0Baehr#9AaB;%&OcH zsAuEV7hjcU9WS-qn2Z*B@ltrg?uPu%;80|9i(tZKdmE__L}7{MI9#ghEhQ5XfAMlS zt7*$`>fUZvh74@6=0sA|`!9*a09HlNf;`nJL+PHQo8(=|J5hwPU2D-;iXuR2F+HA}hKWs&J|Pi;y#xxd<7TCX66o7lolq5olEc+d zw{6AT@;xTovMGSSCu<}i3*ZXg@$bDW|G8U^8Q%<+X4jPemHfN$H|VH9RRk`a@ULyd zANMLv+=@S37k}QX@N)hBFT3S`!5e;+h5mv!{Qo}@{6D!Yw5De9pLh0E!JjLq8VuRU z+%@=5!y6t>wC%?j4BGEt|7R3ZI?sXYJJOAS#s9T!gHyG^V7xNMg0&X9JYcVWmyhT) z{I<+Tbf#1rSUdh3L?`qFy&un^{O#49OhZq6Uhz97?+iwZtjC!b@7HNm9uZhn1Z ze4u3~iCHVA$zfJKyyvWWN^}46x0>HjYwWhIr??BsNqcJzjMyxwl#fbjui>~&k%=kU zrn#@~jh*AuX^?=a9#>yKs2%C7~WOLXc z*$T_M$5dQJC*aS;HZ5b{ylXx`>>0gvmeOkJ3PSsgMStRP?Any17vj$?9c^y}@^aJaS2jmQeQu4dTy zLQOCl#v7vwL=s^ULH+_c6qsxg1r=_lD0G)=C*9Uex5z#1UBj`{7}8FOs$Nh?piJU- zmLR2sX>#3!PugdV(ay_2xg&)|TXJf8j$ENb&k|vxPnf8Zr!^7Rgl`&kIP_gTvXAH# z)F~=G-Db!KGu1Gmuc0ETMteYV!JbyYC^iobdzqfS9Qw{sqr^T`(;Au&hoL_<2uqUu z@<8E4KCd3EvaBep!R^p#DS003N`q3w;v_apoQ>kaGQ3%Ech`%T*^icQT#_jmy|TqF z4(=cCyAG6R`Dy`LGN4z9cG8;*n(_qgEx>{qhcXG&-YR0*5FyBZOZdKwHaD?ujBTB} zA<}`{Dm2>wu{XZ$c5|)o+It%-ncKJbky@7Tl2Yy%^>6FX*Rg>tmU*-k!k_!ko-An* z-Y%=RJyoxhaEHWdGXw~kN{1+mG>PX+v}nUK;NK9B9b0wu!a;lE#Ss&%HoR*IQmd;U z>eJ~P@Ve{GwXPra)hS^jpYu&bc}6x@UcEnIvW@`jMAPBIpPE(MG91-92Zi*Qmg1W5 z_n6aW7dbC$& z9Mi5Nn~a1W!g#+jY4^BU|9JPw*XQBwza<{*@q6+)=b3Qu2ZHNi;}b73=_*&gzh8dT zW^_6y2HRf#iOhW;jPU9(K3Q`$r)TQk(;>yXVpKv9S7)nL?F-Mo+H8rHsm-0%UDD5Q zz4-p{lTN{EPsrEP$=~*Tq9;i4{7>$_UpV7*{N}AlMV+F>0=Woc(yf#Jm!a>9!wiGU zkoWrH_nsYI{)$nt32*ni_l6nkS@*(0m^@VZu}C%Wb9~r^mjT&Zrqr_C8Cnb8;XXr^ z?{4Wk?O1!gC$Nq%oNlr=M6@+jezETKj%rJ=R?#rK1*-w8>D2xq>JfD2Ic)>xXFWIA zP-hh>@H54d?CXc}I8q5zOFo%I9udCtv%Vl#-@kS3y=I^ublBD9&>5Ya3p*vf765kP z%a*__VzqrMH%%e8?A+7u^>cN;aeC$Vs(qb~d;{c!#AXfw@q{=kedUprzqKs4u%H!@ zJ%DG{9P4IsK(`MBC7pWGdX6U^FF`sOM%5%@4`zEXzz-MFh00^@-#2RK0q-_ne?p|( z=M)1|9}~oeCu(*qf7`p4tw+>({uG)&0E`&-QI`AQsV-l3Jos%)D7NROk5{MIX91%t zbcj0n4L+OIyY}I?!NL5vt)m%@&Fw9+(^tL&nzj0$(sM%`IS&m^IuIg#F5P-52$L7y zsrBs4;ku4FAJ$7Rtuw^$SMpA%G9@Luc2jERw1m9n#<2;zSy4l5YiA}H zm!?|=YM4BC#{!s9o?Uu6 zzv=a?+iU`uZE^hg#yX=cjHk({M8;cr^;ou2VGlA)Fz?OI328hw;6v7ZJ$;2_ggcZ< zDHF#EQ-_9ef-V%G7zJYa+O8(uoJ=IkMF*nvlI;i&wr>ql08wv`f>WpHFrKUV z8~}_3T(aEnRl?SsW6_o$_|}XGxKT_NA@~{`+zkM05{4#ig!?n_st@O z33gJzt%Q}1BQGZevH*C_%wb6bxJD%nUltWM8#+n}^Pn>$uyvm#FJW;x6f*OGy~JEcFBZeFZH|w489I=P5^9xqI^n zu>Mkn7$${>1rJ^gn*e}x#=J{c&{xw<9@7E$%e{F3upuk~QX;mnoN8vLKW3i++LoH9 z>yE2VHj+R+{(**%3ACwU)T=?Dn)t=#BLjv`OhYYQJ&aPLuMZo&g9L)6Ltpc(As|c8 z_pJijD#>je>NqZwM?7SdK$6*p>1nVhGXP4=bjMX>glpFJhqPkywOFx4mzk4ym3=RZ zpIU530H#p7j5NW`d`y zQ)X(%dro2YHoa>vbdJ8w-l2>-gt48YplppU?dD~=ky*}ZDyTZL(;P0d zhKX`xqt#SV2~(!$B6IRGi{`3}?r{n)A_^Ya9Okhh8H2Q7Cd%8LG1Pk1vytJoZU0=G z);J3)35f7rfy`kTOIXZzLZ+r})E4Q}I1*@Nthfau3KBG(pyXLDL47NbITSz=1yvUh z%ddueL%>_##U$a7v+TXEXjC0c-~arZ&^bJK=W2ZD(d( z^Uu{;E_)W~dY2h?=~!k$p@dU0nbBVwKx&9$XA>s_6QB(%T99y{n}U+EqlUN0=3E+DrDXwN>CJe=3K4D;4IES*zo-B60yit;%CpTl_y_=2q? zgiTiYltAkNP!5;~mXp@gOF#h(DtjZNUs+|{lDd(8&}Fkxc#d-8X5)>~5R5@JQnP$_ z05<%sx7T0{1k7|+G~6cu^xoCwYBdNrBbb)AbPo`Sy2SrFV>ct>e0_Ew(6ZK&0C-dV zulG+>v4$%jJ79L@xE8I#9&!%6=ABLfQvqk$s8Yc^s|L%V?M1K*XShZ!eC%#5EYCO? z+#VSB0KuvKWF#rsSi49z>!sbU9YsMZ0)%>CO`Fvl)}=2adZG|8A?Wf2}fP$xX8VF)qS3)Rme1jp`r zi~(MO0xXyKC^X^6LXXUOu=#j#eEakb-^av9S8S1q;vJ5U4F4=WF5{FYg& z#M~cx(JBjD=?#H>XhVH?;6KC$zl%mm?QwsX%fwBYPODRUw}B*opgma5T-WR(hr&5r z!|pue8(v;HL=ikhYVuq1q`d)k%{($lKJ?Ko z-4d}E{vwCZ!!k5Y%y=X6=4ylp_()s^_V8z$i(eI0xsxc#zUEv)fSO^gQCiePxtSRt6{> zan1%Dx5HaJ9eOq0KFZZR!A6S>wE|h5de8365H2Q_BMYk`+7A=YsT3&AHu(drkftI zkLOy-_VD5-^uI}UI}6s$ufoc(mVCTa@B7sC?oRjJ8QjBN!XDk7fnGICFy9X#d{;lB zO8dLX9p@VWn;2_pHiQ)dHo8DU+4SbRm!rk2gOV>FA|=VBds|l^JxEEC=>o6)8gA_e z;u*i~;n@d%zW{1oDU@KNZP>mE=MiG*P+>&znhE8Vl@~Z%VC7M^IO*lP9Vq%3pf$$S z^(mnCh1JPClU=teq<$2EfUw;uK&@A&qY@geT3cV9h&}rFvfLZm@nEn*1#;{40V8R> zI!U)dwU8_l=y<}Ox~-@(Hu`uZC@J08_PX&Ndo8T zbJ;E0`kAfP9L`!k?A+_qBQ9Sa54o29cy&Q5>tJ4BI?1MS>(<>pVF6mQruH`uK11f+ z(k+$bRUn|%@agIim7BxO&Y;_-xXSAXUh5!5-(=KwkG%LcQu)F6o5B0icA|^`?e<#} z$=6oj3rsAuMzCs+^RrEWn|le_#&`YApCgT6o9D~T$6fq~ryYYc)dFtahxh8zAIk6n2r*>|%!^Qn}Napq4`!@v-!AGTMs12ilu!6d*i=3j(dL8+hU>n`2OG3QzVly?XN$gn zY5G3*@%#79?`-+`MT_}mpZT9x<~c?4YfbZW78o}Q#Hktr!m}Yq+0ZC9yqGQcj4eFM zM)BAfg#}Ukg80#eEl~@S#S7BU7Gy^kgbWt*eRwo%LW#mjr1Ejx`a@8vDKD*V`o z|KWc0$8NlP>D3?J&wd;p{o%v=K~(tZhyNLH^k-ny&!FO;Aj{ZE!`$(-koOnFv$|<`h#)_{W%!~k1V}o{KG5Vy{ z+~U>zXRC#yt3|xk!g-J?|J`|><2|vchg;RHUeg7v-Q%rQ&#xKd){SxC(%-{XOF|*a{2dqHM(Y3rN@Jsy0tD_rlqBe$#H-?{W~UIaj=)pAOb0K_<|uY5;ET=q9I_w{~h{p0}}t#_%90 z6;IZfWi&!!XTT6^+&W%Fb#J-nz{KukktKwRqQ5ON-Owgg#p~siL)Nwf_W~l7?UIwV zkM)&%4Nf~38lU?<^=@#+g<%Ou~w|J~4k0N zpSn9_`oqY|sj=o5|DQRXZ5{O|S97Euo z96C4t`SowdM?Y??$8k613>Ea_n8G_PhD73H20lkslTI)cyVj4V7(=8%I#df&zcd(3 zw7RKcm^N=Etow>&jcRO(OK6f^npRLctl=9gSG#UkvtB0?Bu$MVl@z9+h9|+)K$!&v ztv&2J+eS>pPunXc=`D8BehP}{ZHI!UibY+6ri#-}SI!iRKo(1)skK}?DEcQyU390= z%NCHfBO}eK!q3P`L1i-Q%wChP$)QD2KAp~Y9|Uq>($qF)#-6P<^hFom)7zi4{G}wq%G@%=tpC%`qC= z@I30Wz}CPKzkM2FTv*2yoedHD?e{d#c?A}nd*gMblYy?2#pf#We~{dlF(M(DPWoBK z@`8dxgLv%0qYCHl?W0-@y%8|Tt0fg5|60G|bIGu+qZnWD=?qruJx9GzN1*gmv4!B9 z2??qbT)p3iw<__H20J$y?|&%qZie*I4~~Djb%k(ol&e?m;4S!e?Sr(2o><0KkA`yt zS5(efN@+YFtBF>&4K0gy=e2kRlqMOh&EB0DIG}CuY|s}bwZ3RD$dfZu`YNChV1E`W z*SXgDRr&(fG1U14p=OSJkakP1d?aE$tRc81PQla(F1>w7Z}+-;kYPssAkrW%aPLs>Q@yWxanWH@Wmm- z5&n`l@FDX1;}xHV#ut&PpTBe;{d(aPX>{}A&yn*{I#=3??~#|j9!EZodKncJwYh$C zNAcx3Do*PD(G3eaO?u8!;p@^-upEbQdrDd16N!!ifOA2^^E7InHE(S#91V|kjaHsZ zfD8^Ge5C|6WAm-m?cW+*&2Wv?Eic*qQ_=E$eVN>sF<Ie9VUH8O%_-38BzuZsaE_t_Rpj(pXCBLmtIlFPOZpr>C{!+bUTfGXmloJ8|G9Ng$ zM(u8?XUhHMzLD+BrrpvmPWdZraO?=sed%1 zwIUsy0{3MWOa-W$uR6HK?#n7uIkwF?a*un(zH4^^j_o+Iy2rD9U-td-V_IR6j)$lB znw()9k~^{M0dja3&4)SUrV z4YEXDaE(@TFA+KxWG%Pm8gJ`fDt04ix7G!>=4)>0vF_!{s=*G<7xraU zxZl`uEZFhL+P>U&_X^z`!Ome9_7_gO-!%Oc>~eW+KLdK8(pojdE#-oHx!UU_yz3XY z9jsRc?d(i4ZTtJm;hNil2kspD6mszC+JWlW16BU2C%k$uc+|-!+~#@SQFc&!UDtl# zUUp;KVdqCcn%TKbBs-Y!kKGpvP|x3eq5j-`iO2u;-}k)#ofG44<{iQr-BAl|!L7EL z6%Y7LoB!};-Zgx|$FIQS{_m2HKf&ey-QCxys*DH2s5bFeud=cB(=XZSpL)9fkOH;T zu^v|C{yV?RPWhm`?xLq2GM2*)zw~s!WT#B}&DXK>3Krv?nLGZFeB4iviQD^>56a`~ z=}zt3AWsCRpP=#1Uz848{Wd%JOHXG$^F8r-g6@vdZ#wlWAO76TmrBfh@Esnb+C>fz z4cJAF57@lXF$g+6Odi%;a?zN{&@Up7Qy=ofVxT{AVx<9W#R?>v*QS$iP{Lhm4PLyw zKig<_D8j*LFna&d7#>Mx`<6KZrl(S@5C zYt$W|*^YO6AB~jv5k8%7aG9UYMCQCdo3NFn308^Ki&1t^N(#v63>uHV3YC%!$+TCB z3JrguMP7$xL|*y~;%A)jyLp3G6AIt4V-uXnT4pW_V?kPf_0v{>p^UY%mP+JrD~ zv0uhsGknIWJmquDRwWfBl&a5FptZ)V&A{y%8?tYx=7jykCo#Nh{^wuda;=jE(iOdE zzOWKz8i5<=z)z@v=mv9bi~SlMhdWbaM7waEk9mh_y)g}mOYP_hrdW)`9BHvKj8rYQ zTYQeO1ZLj&q1cBOIN{%_kapz6!gI;vpnf4kw?_sAN zgT~qYN-Gn2U&Sl}MFeuIHm$9iT>=!v#7_0twb6Ji3OI@EUbXN`Pgj3LR_uY6{1A%* zyLGd42`ZxV&h~}I4%h8;HTF*B9=(UDs%l~u-cATdYjZq56&^piJhY~H(Y zOqHui%%$j$P^e8{-t=Cd-*&AgDkmJo1l^Rv5M7{&4##edkU4+a|Z|Gu+V)8LbjLd2c0-{)n^nDL; z&+@~<=LeboWDFUUTq1m&EC-t2CSrr7I_{Yf6~!ouSo`afZS&O^N?d-HQIc?K^hlSYmRmVP^N)l}= z_a-0A#8@n5GK5vP2oO81qn zw{30*B8`AmdCT5ga=YARXQn>kL$RocwpmB?()*83H1?hGEZIFl)-)Lkzb&_oC1}G6 zIBMtU-MUe)83P^wKfW;3M%(5z7$1X^t%j`f|Ain#9oU_r_QnfTgv1xUo1xLqk%g!yrg23wj_B16k|jLkvm(7n zHpMqN<`z+?>fxnLGh7(76PvES;m;Eohl>>)cyA0kK}SiFDDW{7t@nU^r#PD+^PpsM zm&_Wg8L@O^dq7KG8!c?e^;+VYMMvAo08*&3GorXWC1`9)J9DRAshxAku*QV}Tg@cV zYd1r->0}h_Ei9{abpGo7+w`j(ppWSBu&+Mr-uexF!oNj1&-vX{8|v13P@9t?8=$-X zj`b$(ewp*P5V~>Go4yAR*1-x#FRYJ@p&mALIDbD~u>O8_@54uLd%s6KUjM*OefVU? zdH%x4`bQ4mnX=M5e`S4rbfeuie6t26TQWHYcWG$Z+Q*JH<&I<08rszNE+ih}PDu7Q zv>Wy0zHSN)>n|puUKD6=0v;WaRS#?a*lt1>~z8gaw zdw4l`c=e9PD3v z^LX<|b=uRp<1XvNBbz@P`k#J3-@pEGeRGA0Zequ|a3^GVoKBaf#hiZbv?*_umDaR; z(`Dn!A>P_hf78#0{Ttse^47=D&v={;m(9fj9(UH|+1lIw&7Y5X8|<`a+!+_%+6Zrx z)BkLWKcoyP-6Qb0x&h$jU~roeM4f(;bmAqt8^!AMd?ji}@fhs#h zmFG~gk~AeFny^O1k}GL3gSNeara460$)VvS={iPqJx{s;nQoLzH))`o4bd$)bb@5G zl~J^fXS6Lj+CDdWPeZiRQ1sqh`Wz0ui>l({8RJ2Y@yw0!ZiqQN6yw8*Axg&j@$ZN_ zX?996pc<6jzSz*A*pr-Cl4RUzqqwu4aS`OWogCyG7PCr;yTXa1NXFBQ;-fv|W6ANm zI^*IS;**EsQ+b^DbjgIP@o`7r#$}Qda&r^%8xjhK64E^riX;Jqog{|r0B{-=S~#6I%$E43Y|@QAer1^l-%Z-d@w%ADHk44oqQY}5F0!!&MTwofzf%b@;mU1lZCmhYxP*|^wYa!^&`#FIS>73lO>7jJgYC0w%VYV`Q zdn=laLpK@0ODM2ltgv>wQWP6G%QDSWQlMiIVNA{KM(_X#R|RgFPg|Rol*QyCVTSs*H&g z7cyX@kJw2c$%8t0>F5w)9t>a^1*KOFH`t=WX6$>l-*21%@gQxt-VNPBfsHeE4l=#; z24h+K_71P`l2JIMawW?!IqeifOPc5 z&g{X?+^)vLo_B@Xd@169YT!XXeR~KAb3E>I{(}<45&*_0@JwL5GkKp?=h)z+YBx}1 zpj|0akDxgn?5~3pTv5v7vHX6)y>Yu{NrA)F7CbhRpU)n}<{mLbI}%|{8ai|7g?;ZB zs5OQ&;yU?U1amjt1_%F8L0T?t;8nwAbXi|; zt{P%_n<1Q7er4?@MLJRR!r?&JVQp#p%NJlB7IccE5B;RP}5FV$Nv)=1uo!5oSZ?f%U?K1IPX8hbJN zvPtc4o;6YhwcCZ__FRal*DsdurgP)zL-8nWo_4fuQZ##vW_*!8ra{+`p_>aOs6DQ= z6)Mq^$@gs)OR2xND@eJz^Bzf+ub?k?g(n*{3e2t158SEV-5`7@J?m#>vgvG+%S65Y zY2~0p^f|(V9HTlrnTLJ1%Ed9-gp57fcW9f&yJ*I6AtvH|7wQlJXt9X4XQN)`r)-DU z+AYEglyh&2s_ovFBGH|=eF$KaUef!hVXzZb^1M+vBvUZIVFHJ?VE_^f9=(fC2{Xl5 z-EWv<+fJ!sIsip?gpzuPtAaEXl)RO%uQ!edHM*GAI{VeO6vXZBZs>=nq&zm>VgF<_ zx3J^l_{4m!WRaNn-XPEb%U@mfj%_NGX3tVCvO(Y?>v^flVf%q7-jVThE zrqXj?ud4cjee3KyPu!DM_G2KI*s6QDbz6hi>eto{9i~2Li)-jpcc(;Fq}r?75r+y= zJ|RG3w~$I>k~#x`05)oZ0Q6uWeF)GV8Y(BfU4KFixJ}rN0TgFKa#$sUIJgHvTfYT0 z#D;nh?6o>k$$kPCZk0Ikn5b!tkPBPGgM?rj!PD#8;%l#Zu%PIQG;JlssZf}UC~Aof z5vM5Eu<(o8$V2Jb7bH9Bn(0w&bb&tF01GmxircO)I8Q_^VU#be?+hjiM5-usj|p^F z!#x097sb%Rs$v;W7_)>IJB%t&_lu9}q+NMWJNZPwhjC+DF0|0KCp5HmmeP}1`(T!U z@M869%5IyhilbK>2O9U(xOSZHLCAdad|!3rLMI|3vG=g7Pyn_EKDR%y_Q8QdRv!HN z4vp*TY!(JNu(RfQlrT()P0Yao4~YqBd+cgz0PYG$EZ`0!&LBXzT_-v%Byd0@0%C~( zg9E@FyCE{X7*v@nP=<9)4|lhWpVMGn%L?#w#G!9N4v!wXCjY3J#{dTIzz{x^-qU!* z!?g3Nf#nOR^7o##I)aNOI~y(x8eiA@S-E|V05ZU&H_RRC2>=NAqb5{6rfQ3KmH;fZ zOsDoqJEXj|*~4`F;CU)tZ+*|;;P&f024M1p-zfuE^KA;5!C-7}|AiBp;F4xlG20ys z0EvQPV*_|3kV%%nXDi5#6mU+p*C9Ozfl0rIsu&`;Ux)zmdp$gd+u2bs0Kdy>*tnBz zM8}JuZtBzwI~m6?(K)IpNj92`s{xe=g?%oW1%TwcPyy{UE&-zMnl@LhA3#AZ;nZ|^ z+fXuW@DkxwN#olKvqNq{io#1z#hC#qpSGjlyqNAqX}{CZu4u@xb-h^EkDIBgIQc zM&D^Qxy^pSH0g&n#>sCW_VMDiM?y+$7R@b-h} z48-rl7Mx+4mBnsDx zE?ayuM**vhjnL#IeemmtzLbPEay2tzjx-8yt5Edoj@p{PS6HG918 zXRURwy`R0_b-W)r4!?mq{Lkw=f3=o1`rMEQxbvKAwlRi{ZCvoEI|=WVM@QZ@dS^=1 z7rj$T*(o1xvG!!U5cxgmlWB(iAXHV{v1@Bbi4cmJ_WrgFv04-0r^KR(b`C^$l!zF? z6j@;Mz5!4PWnXl-*-^-ce7T=r?^&LC3F>ln1Ze#thzCSNK{eFm*Vm?FZ@&_}i+lL< zYZ%)Wk?`&5DZGF#$t5_x)f(`gKE5sn7^(i_trI5#N%51QGb0}N25k@ zA|r-q`Mi4rwom^a$oQrk3$t>rY>mg8*~pEA-360}19q(N2?T`3_46Bqf!jz8f9Nm8 zAA{r&vYpTmL|tZiKd7xRKt(xBepoEACt@cYfGd)|>&D-(2Hwv+EfwML4cnjpYznoO z!y*sD?8%3zp#*P-KP^GX2794d zDR|rQqQwL!$ylSKhsj44hZjA%XdIp%v%VlOJ36u6(tUFLf{5L(fy)2$aTd?0*(cNb zV4d{6OBpW$XB_*=jJF%wUFGpx?H8dfU+3ubBEt4q^VRicJX>F!MppbNVDKI*(w?P% zzTO+Kp;Y?&^cRa9BUDM$qD*_aiw{fRz!C+aS7etZ8>HkD&(ZetC8DN`hEcy0rlA+4 zJxz}zai4zRVyAZe#RO$DkiuiJuxHlfJYVOqyzt{6?92V|YAh>DxC2geE#j>@HK+ix zWUxh7Jh!{=7e=ITd44by|BUE|ut?k*b(bC%gRzr-&uhw6yuI&gw!cafCUQgY2)jsK z^Oa_2>S|0p^|gYK5>pYy)qD1i0IW`S&}kGw&wZ@YL@vYkRb)Uhd6H;Ij!hTp77d%F z%#Q*(za9RoAoC91PB@l)LK&djPwqQWaBHrmN6OSs^mZ(pJZ)MG1o1dGfsKpZHum)r zd5}nPGQVv(&+2DIM`YciIuXEoV}F}^cZ*Dvb#<{)fPLGK`Ab0ofjlRVU*t0HfP=)6 z8!{4@!`HFP&fNirJW&tmDS+Q69~@dPD(%1u#>9Fu3#kaH6k!@KQ9h(?D!~PXu*a6wH~q@5F(@?6Uz^ecl_KKUGHq=R zpz=yLe4ansShh}sXP!Ae$G>0uP5S|U)KPC3(7s$JN=Ip^10jmP&f&f!Z&182#d#xE zQ`9UX=8bqW=L#L_6RLD4bJ3b9FKUDAL1ZM0O7_#(`TkE1Yp)DgeUVSyO%e82gLCkP zGe=swu(T3g;hUNMwh2NlXSPZ8a}A?e=K4u8L`!YHg#7&dJ^J1f=MfF46tSoIcLAdk z=H6YH_Xvsp(cX7dPo!>>`}a@B5O7UR^UGR%%8OGckh}uKn}|_G6JTZRo#KJKtMN*R z*F~-)iQh*r9SJGDth}C+D7DY&HqAfg*Y=KdB?EXzO@Ug8Mg>k36#{GM~*y!~BBV&G)uK;O-b-1-HULK|zhD znkVOa_5c4I4RhGJnI%bR_==psPKzCtIH;JTu!1%m;S4!q)47} z*~A;U{Fyhoyaj#l6e+FUt|1k-=OMMW9Vl9SSr7lp@+g>^sd_{CZ}xTbFnqYnfqT;3 z@Wq())N>l~BvkTaP9Xiz6II+QQ)iY0Uph?%3(VL*Ju8*$HP0%F9CF zNRZk1W8=o$uWpWaFU+Q5jGKyi+?}<9%;(CCo2wn%T_0YUFAf^FG?u!%y9ZgUY#Fz9 zes%W@xv*FxF=^}L@$gO#vi!np(mv+k;q&^!a_50b$84#`)7qeidyh>zm%n;E>%Mr1 zi81LibD**rle0Q1GwJ^B;2HGk!s<+)4iJ5pfS#A`(dCxOyPD$&yIn8T6^SXDu*~hA z)zKnT5|ZZ(;)P(lwC>G;HQa$?WFf&el#bbLKjO+`?q1q}`=rQ@NxkE>gEz_qwHnP~ z-iZ(Q)>YKb25y&mCsQmxnk4TTl-t@5#9Bu(9hnX*^FDc=9Bj{hfi+^pPW3pxd~VOL zdZ%qf;N^?j;IB73Ih%aYPqMlxmjfh7o7B&qyqZX|r7kd=aDD5Q^YQYrjHcB1Ek>Wb zAHmy$E=*09gFXdUtczdLPT0eEeG3oCsg(K5rxPFh7PEbt7gR8x$!KxEC%p2J9d16G z+u{;?=gJwLyD*zAsa&BQ@-4IGCYcy|4?Gv)Vv0Ctv9A**kAbI{Zs*MYIE#(sON=)8 zZoZh~p;CtRMhtLWaNMzu%8k9+dvM#7Er_vK0iL$#dTI@7s~ft9y!xTqil7fd_A2-a zo3a_$fNJ5ftvy#Bs<-`b9aqH4?}c~qg?dHZemY0y z=-kLxj|FzKOhq7o#be_$+GP~bobgjE!OhOO}>B;?a(JL$}PVNIR=b9ynd2B zWVv;_JYd2-)F*G-a$9aY;C;xo4-)WjN0~2hIyuz0gzw?5reomj>ucW%)ra2<$^++X zL!Z_-KKyR69r&U9`Y9^*;hr5|(8q~TzoznsKU^JymOowlwGBP|=~Eu`=|||ZuI-1v zg0_QJudbh=0ah3UU+_9rn14T?)qbL5@FpA9e^}M(Afr6^t4LVDxTDo!?so9@U2MQq ztkqF5U&yX@Sm0c_)p50B$oGfXz{Me}lg9FpAMRm6E8AA5o!c=Xze2DC!g5NNW4lMj9{div{1*_}5U&Us~p-Lhye>(CJo;N2 z@87O3|HrB)@Z%N5(0|i1|0{UZ_jd%+Yqg*AKZjbn+km+(oqy%7{zM@EUG8dg;KaS| zUufE_Vf#H&IgwqvqiJ`xw9naKtR0v9#i41b;YrbN%l|^tp1m&}DL8a+8*V8rVgEwY zU43i?I5hoqbEMF9^-l!y$IKhthMVqN`Q^RUztD8|24-t>tU&h>SK#R%Y5Waa=oJ=^ zgmE>bpwgI0P9tQ~RA#55j0!=XfPlzttPpdC{YjWa68uS+4BR;TC|!TKIe?p;Enc%- z6($@K*Rsj5b+DRi8YU8st7uv03 zU13On}0rfudMavnU1nAm+ zze4qECjce<3+`!#THu=VYoU2dMFlJ+Ct*UIJI#3))qgUyi`#IsNGtch$|#reD{U^a z-cUOL0p+tz*LmvWXf#nS)6Y{&yc@`MIb>wwT#Kx(mHQPm-*Rr13@o_5@yr&)B}|mW zbLj;efl>XqK{giC_Fk{w@40qf;AU15tkT_CmQWcR&A9g=>v;2++hsDp$eUCUb`62Q zM)(UdZ@m+4)hZ}I6l~He3>|P*mf7e(jH1&h)hnbdY&z5C$3k<8q{g%|OuTyIXl}cj zkW2jH-$1@Rm^amIX&AG7=Kd*n15dt30gRoEAtk1xWON;KQa<7!5D|d$&m8wmLNr5i z)$;tau{FuKC?;Y;L4NHll}!iR*RNptB(iD7OjY!btwoUg@2`z)OvfU)NAZT27_$t>6vdb@TY?qG31lOtkT*mKO_~H74e(EN)p5i;Krsm*XEi z-$hU3w9Lm`4G)xO2{GF8pOoHl1L-4 z{mN{j1bQAxC>hoSPCJWJd}mr4 zki@OUHNVIr+_9eyWwpJLDwE93HQ|nRDw;vIwZ6qnlB+C_NuR_qc(bNI*+0BF(;Go0 zWNDj_Ls`*5M>$F(Tc*?*iz};? zjSM$f5AL((q?!Bm-XIt1?}hmz7hh2N@Oe}IB$Kfo+R6N))(0(hqLnSSDjC5t+%;Q& z6)-Z(#n{}0B2%Ghs~IOq>FMCohd~T2Ru$;cJVHH*M!?a`Lrkn2LeTMbcL#uAchyX)1k(o;B7fvxidBZiurpwq*4*&pL4K)F3HhUqg z*l>^r6@VL_sQ;^Xu9BHtk*b5@-Z?$+mBw~i$OV8v1y!Vc2L@@K8&klMQ|fp^Pc>vz z9&|~#g?Y-ewv+lCY*=AbR;mLRpO#ffc=Mc@KJJQT7~kwZtdb3!Rr{sCSM)}-l;IH- z^bA0?15Py3#v>~$QeozPT2667?de}XOY>sJfRfe|5-P~WH0Q_QjU~NFrFYhMbKph- zO|t}Ul3nSZE;3odbp3(BMlC}PBn{nygYBdoQe9pI5o?O9vrY)!ne-wI@)oTwV|3mO z^T#3)bS**sY%zVry%BK@hR`6|2NtcwLU3CF?xjd5$qix|N)O4za6$#L>V`h%v*=kn zL)w}TJbPsJlGWy;)`MUXW_fAHYfy4tIxJlhN9~DF4w!`#I19;^D`fvD@$HgFbm#5kCD86_MgL zEgWp&u|UsjokIJX3H_lL{j{=U@A2x(s!tD-D~->ATYia7T3!w(NMlF2@4O`eI@0Cn z82RVK0T3=m3l(iXaEzWL1%ZIAd0`oqZ%IYb1{#eJLS%V4LF_c{C`QX_tCm&q%O9dy zUd=suXk54%WGm8N@<^MLC2cJ&)qJoxVJIa+%*ne3o4%Vd@Vv-pfj%-y$X+O;n>nJU zsaE=0_33gu^T_8Fy!Z5~PrF?he7I{Nq^41K%6fp24G1zC{?>MHgvV2h*V<4D38lw= z&=C!>k$Z1>>fi7Da!tJ1Xu!QeBSGLQ z{JBI*^4X8^b?-_Tz3bSE{c_72;%P1Ww(zWjeSqfnOUcBnr|)i+F*gre&(McpECk@w ztv>O!hVy99R)h~lU4RGqMaQZ;-r0wQOAeL_IF@mH40w-X-q}~}XfyfYxP9@aijZ{! zOuUBP1|}r$oION;eFGa|pA8uXNESk=LNK86SUi1h0l1+UobTxy6YEBXi1S`y!(-uJ z8Aga6=0~beYMyHl_5djlJbkzX&dR6?9`jT3kVWtreAsqY;v=Blu=a;M`h^V8dMfb? zqwlC9#l~PXpsbfFPAq0~2=&Uet&SwpMG-8N(n7!p z9&tn%Jle$RA?1liY9!N0Wze7 z?mfQi#v`x}wMBp(Tl<-Py~v!c@$A!Nd<;}w9vI22;Xjv-ZkFUcfrnFa0i&TpSx}Rj zzC{0UymvBSIDtvx8nGh}*^XCWwhYw2NJeu#@JWT_)3)TCC$Y&kuF;10T1Gw;5OA!1 zz%LAlJdGHF0sR=ZmPwQG8vydnm{{r?PVsuFfvK!d%(>83)e!)PG9O=Xf^Im_rJi76 zmc$NmoBU=9X+Q1wF^F-E_*0*4@B^(KjOg=iyxL5L_1Px_(xhA%cp!1Q@F}PRMU_MB zivN;?*2~z}iy#^eESCm*Vd0#q>9HS4gI3NmTsM+a@Zpvls_-+=G&Hz|@TNWi&w{Np zM+Jj>qk;RTEAcb6OVfL{_P|dW5>?ac^anx{;?B1b4)*O-!`pmm#mpl#K1dnQK%Tx3!C@~so8K1oY$ha4-b!| zdI8RYHQx*4W(!`I{zkx0W9W53qzH_pmo3SzqE%0|*9h9ThBb)2z~tb6_33ovtX9u1 zyAc%Io;$?(qTPnik>`~GmyZl0`Pm4=3PvEZC*xb;y*Y7@HZ#+%JFl_pxow4zY^K-o z&tEUSQl9($`q1X}9jtHeXuR5qZC>KX*Uw1u_wKN7ROVwwlsZ1b90 zBm~mpPO~af&MVTGE7N5wUzk^B23BV0ROWP4<}Fp`pI0K8tBPc+O3bUu0;?)=s;WAw zYL=?%&Z|(&)eW-Mc{$)i6i_Fly5lc2{r0>X&0NzfThnh|GZ>UeXx^yd2fFElNOBCixt{%^9ukDoJqJ;+H1GyB@Z->QXM^zf`kNLFA}o#Ka*YxejZ)a4#)M^#gg3;){==KCzo`f|;N7R_8iO{=n`Ek)HK;e0y9 zu)7wVGHX1Kb5ipWH zo(_8$nG~3ul!@Fv9PrJDC#;$*5ycmEKt(l6!n`Gcv)$v@h!yPs_ERF&v=q*G0C;Zu zaBkOVXBV)uedV0U3PZSgNOYPHtFC@0SL;l>j<~5t*fdG{i_TFd{!| z1DQ0^prdzE!F@7$eGfDnRy&{w7!hWzPX!8UBaU?8B&&xJI&=1H4)M1Vbuv=|^k95r z#F2ha3H+@=dTYeA2SoWoKs9)>6q+!}R{FfU&na)v#jOv&rdGvTNGe=dS^04LVO7H~eRJY8u%GLQDb?7Wej z@h8u~41ISA51`#RGQKiCDctU{?oG81Bx1h5~~-*@*$34)A}n z4YIc*mV!i$fo+sX-ZEDB^t-r5YZg9vW9k9Jhz_>A?WBy+lH#RA$gYE3dBi zmQy4lQ^G70nz???+ilu%bux#-sfiCe64HX@>UOLdQZ%PFHYEnOCDsijDaxPS_|epB zC`S5vMzDK^wR%eDrkoner%oNrhdWK7?3$%bDsXebpdPk7ARm{;a0y-F(WX+uE>v{8sO`;P-xQQuAQZqOO=>8E=LCd_;>jjrj z4diSkokwZ``jI0%O5G4_)7lHepb;9-SZJUN0t4)jaHm zMvE-yC@)~G7Z9&W>-l*tz>|xk8hQ&&ge2>%t6N;Zx10wp2et}s@Y^HVR(fma?@|F6F>u)3*}@}Pki*>Db1K~P z02im~V%r8D2zYMt;o&p}JM~G&;2{DgLAcib?C#3W>(7&WGj%FSD7p_68EvH1w`rlv zp#q$uw!|Ufw_6ec2o$CB8gUlH0MjYmJ_;;`zAKfxm1S*kh>-=>ex|;+K0f|gPhX3Q zt!C~YHqC4hHlE__tA%b z9_(0W(~+195m7Vc${z#%;)>dVKT{j#`+5rUgn|yIqzl*DR*}P_S(~Q$U$0%)r@w8U zXn&&H6kCGgH@gw?K@1Mj#;LA?20A>?Yy>$n>2VigVw2P6q1*me?Uuk=tDoz(kwolp z{EeO1y)|!AGTQ+xKN%^Qd@CGCcLK7TU8?gn3_B#0%HPh;-XTJqg49(K6p*FITbG|D%@4BQk`La$Wk|D|RA4`@2z_~+N<55FFv zBZxt~mWvph{&>1uk5?A=?S}tL%aruoTKRbBI8}H*mG|M&k@H-o(VNGgmX2K)>mA;g zVIMA^xUaN7{q-MOCWy~!<;?rb$je)fpI3hS>`a&HmRo%~f4cXv*>n5zr;BHp^?_8r zN1rbPj{c=(2A%DHeP8})^*ZG8_wldo)m3a59wDt4jDSJ{_a7bZL#!eRi!Vlz^yUjM zC32xA%)hiuxg(4+See#ag<4Z$Uxn7d(EC2Uh421-M!Of@513t>#0LDI%z3M_2OaIJ zLJ<44aJ<$~HJUYs3kFL=%X#Q?0cv!^S74S zbf_bLIOn5#@8alC7e+|u3s<6$Jc46YXpDUIr>li$>M8;Z8sWZ{;WP{JQDKkaRV0WjBY)f1PfSN`6K*f~+by+J zxKO1iOT|;i+?V<-8eGe~VV`+Nqr|?N%Q@h2o2^>p<2=rUBSuEnLGqkDgcWH)7zzFP z&MF)hxEpy8Z!096oypAclJ{sLfxPln*^)~QYLXfE`}bSHZ*RuxG@Kg|yh~2CCm;FT zbDk}HBSEfD2C)0p%PMhnA~Kn(wa@A0D;cBUj+i_e0sPHMSw(p#u?N{2yP@nuOzRaX zDEXb>rNXL%3B&?dO!+7o+pby^#1i@78vn|7(OLiQ$(u*N0TdGkGU#^1*r+tU!(#^X8d2I!oofGk3=VX*fCj#ljyWN zTYgM0VVeJv=rZ4U{zWRhH{1Uv75!^Nf8}k@KYf{ObX~1umHpzpX0wW=TMbwe$Jh;6 z;3T>$$GsICsqkFvv~%A$NSuvfPzZCowmD1=U5%nc2H|4sYgU-)ZCq@9)Skm`DM@Ki z!J_NNSlD942hM zRa=26Htz7f>3w>BdKY$Nq=7Kj>mV8C?!TY-^Sbu^*lq zl!l+TfA83vTo6KG2EXT_Y;Ig=^Lt(@_mNAqF^x*Us4aCC?q$r0lc|92gQ&4i2dUgr zmuZ6%uH@W(h2FrUa@a;oz0&hH8%sZjxr0vXy7kMd@sg}v4Gh2F8Gn>Czqc4)b)Q?r1=t=P-t2tJ0>=tD;Yvr!E$2 zVc!Athd9OCi4H2!ha(+o!r->4%14pdwFMH@4j$HW70u`JUkY;c9x&P|E)~w*rypf4 z7Vc{*<$U>DZA#@y`qWAeDP6i;WEz@K&FY_>pPxv*+>u#i7Y*+hep!h3Am2TJR6jLfYX+g|iz1iQ@+#Nv{!m!; zL^4(9>w5{; zK5zB9U;kx7(gQDnPaklD7^m_J71l%gXfWY4s9>1+RN{aT&438yG#h z2pHd|`uHk_yC$vtZV@lVZf|x9^*+gCUgPLQ_nOF?(sv@{Bg6^ytY$*fY9^|lL10V0 z_b-kl$YpRrWrG(|P#A&nU2U5At09_`Mnkt64>vE^_Xb{6L|AZ}xcUVn!CW*mx1kn< zg&)RjuVsEC`)7j0RjhNt%c_t)GfxirvCJ%bN6AR_2B~rT6ix? zO^Dp;@plWcN~&nKJ6HDj2f~N|_56~q6Tal;2WhA*Nq*L9NTSNz9sxieGvtXZ! z(HP*DD+F8k1;F{kTx>$TQNYsKQbzh|s<4ExLR8;R1zW)sy$Vxj!d+v0Q9Z~eumt>@ zNpB{B8>qZ*(!`8xfFW_B<669}O~6vvRstc&(yT5=*7M##&#GidK)Xs?UBmX0TWX6j zq2hrd$JF!N5i*oyZM4JGUX>`#^*is$wi-LW&Nq8SZP;?bdKXEkrAnPogw;Qn!fH?j znjXdCA0#tn4!-h5=FO~J=DAkH6PNN~iwOd+RB4~^ma42*cDULuKy-FpKICjFVZn_B z>4_X4MQoXqu~MY?cU0;*7&|N2G+LT`4QIom76Q|*Oq)tf4kaMSw*>K#W;8rNopnY0 ztcqq=Dp5I~0t<;itJXQTe(@}ah}XpXF#6sP_b0d9x3J-ncbcAmcpJajA;EH%!0%(? zYb);gUHgn?*&4_%GOb_qBa`5*ydZu+0>t5$3@N{X*v)u!pogWN3?9#t)WplCQSDTj zq*khI(3yX+ftcKr2V%B*4%*cBCupQNq!aYAy0G=!iu7r|$CJk#6l#XE@fW@oEunf$k!_wXA14y~iFKL36HR#UL$JGAAa{6=2^uldh=q*`92oR{_(Ci| zQ$4sBE{}DHBSw}p=`u2xpXR{Qa8F7vANxczH6%y~;lP}^7ZguyHE9b zdYJ9kILigI2a!~`U$5<+k&$4CaKJ$qi5CVY-+-Bmr;#Rxhs)?$oOyH39H_Y4LYB*^ z#`VEBX>Gz(!M-=aTpx7cB~+v`;1@_Lb;1CX8RdaEGYus%DHOgFqmMin*>^`83 zhoFQQ@Sq6)$VB@3%L8DzVfcY@qZ%FLqcfK{Rc@oL?reza%#&-!fDJCa+t>hM16wQb z1F#g7{G}uN`79m_7ZAE;_i|r5>VY}J8W?jx#BdY5jH2Aa5HYU#{*1FeXnWbi6(7h+ zsU9ygu%X+k#Ko%id^D;4H3@uF3B)4a(RdIvMQ`OSoW-bXq zUbepOof}*!QHswV!%`P5o>#&u>Pcgc{I*N2ddv*p$LIX2$f;qRzWFjaBGsvfbE^{RIsKYGCkNP3C`4GubY5uHk7SiC65B6;1QrQ!7jbtK3ELF$ zpBFKe6bZ`~^F;u+y@me=QlZgNqP0|_b6x@;EndXSmoP6ii2#1L{)<%PuwBE*b9K!TT*de zyT)9%AzSzVM=FlY-<$-#Im>x-{{JNvK`7E(6!~8g-9JgiABpatq=Kb^ORj<2;vb}9 zxk2=z0s2o;A(PuE+u10;+<5OlNrgqzgPyy7pMNq4MZfjs?Yw&Vw=tXNd zOIxH|Ta-mxOi)`~Q0vo|phFCaGo~$#r9EA){l(v;BDX!KvpsLQJ^!K|$uj>nt3?wY*lnqujmk?Wqb=w1lwUd-)Y>g--w?*4qyy@q9ZyCL`Xi^bcmptn1@Z@+cE z-CKV9^WrUr<=uhYyCaKtCqeJda^Ic*O)4(l;jy9#a6fjIXp&$wPPQfQLQ}4wftP47 zYY(-253OYneQ*zBUJr9u59>+~`(+P=wU1 zY7+tnfQD;vBo;N zAwT)Wa&jwpawl){Ti4{?%H+?>Net`z1NrwymhVr3-=F2ZKks^P&RTJK`5x>F0@#3l z;Z0F&y(f7+MczF{`DqGxH3epyroKBJ9x+ABrtR-D{Tz`(S^t>Xo%)}nM<6^EJRQQY z|JAbxZq)~Z{Ys(U@NaiB{`u^|<6QdRSAE#oH2&I{`}6Ga)Ir8~GY{GT5gauUP-~&_aJOI<9jh||BRgrcUi@k zbS? zeM8+bQfI4jJ3pA7sMP5YE-)o8c|el|EBT5TQZSi3d{3j;fEW@-xOtPsrIad?BuDTT zX)`-J5ppTmFQWUYFXuj@UmqRseyc{wV$b-TM9A%MUX!GNJ!<`N1ow8^`L=N<=2u)eK;bu3u^$EnGuM02 z+WpzhP>zE*UqZxsoG)P(yiREHF*79A>2Q(+feOY!UD75ZYE6GJ5Ie;8XrXdI6?V{9Kd1m74i?*5eF6py@lNqCsxsF*66io`5n~{yMXQ z%rj-_h(RgM&UJF^Bei}macE3lC1+vgu*#P>v=^t#%Nylt1M{5IF#aUlZ&kwsqRc-M z4#|q>H8Wtha`uz72{su&INhW#+E4b9f6n|-ZRq_Au@wLD;z04+L$c@lDS?zFkoG`is@Y>Vr`F# zAyk!Wsv5X<&>_pv#?HXnx?TouHv^hnspS}`{?e+ZP-3D|=h^6N{)3}@DINx$5RJJa z=Hm)AIshC&UR}t0T#0K3847e5mUWC)g0EtXEtYia0)4B^=_pLfH5M9j=7o)#1q)E; zaAk24Bs;aHB~FLY&Ofg67jZS8ui|TAH)PB?z`a9=W!8H%qwMHnrFM|XDB(;L0)ns? zST~geOTEdSaML>-HR1dH63PFNXm<&s;O5j&SDx(ZGD4kY7e&m1g%Qj7gGI!#X(?>8 zaU!M?W6PgMUo^gUm~jLo=&N7iv8QQ9#Y!lW$Ye_iBQU+#e(H~!ds4z zxi+^`qEAm5XIh_V>E!6{Ic`A#d^=2lehjUnl#b@+T8ap}|se%kOF=c+q(&mI!D zF%tpsvy3F+U?Bnx+OB;lI^$3aS7vd-1NeEJ_D};G((pCb$#ZvlnLld@uc3%o(BbA@ zJ09t?`YU0=$}EOOL~<$nMBLDIj$-RrUN0Dt%-mzN(UOr&|IE?{`gklGT_ho8{u<{` z;O|NQpN`LyFOd|*v@BL?aSW(+M?W8|CB zS^DcMOq<-n7}I|8e$FJCqXgW&vh8I_OuJgyz2Z}G|GVdEXE znbBUkhBZ7HxYGj{?|lWYa6;x1Mb;f_D3wVA00+d>>Figfr)D#nksz69c;=ni(T}!?1`j8`I^r1Hw)S5 zb~Cv(Sqifb2H5H4A2??(Z@{E)4c<;EcwaPkU&=J*j%8Gom(xvFy7N_WKS@$<0InS* zy)zZ>QE);q0y(bwpxA(G2kB;i@0DyF<~ZO<$i=mTPPZ4P-?G1pkqSaC`?9;o-IiCSQjCVMS3A+77+^TDbHnT6U>#lD_NTp$lv zy@@hls=5bI%IJ6;$u=(buDhd(-U0;Gs6t`c10zt9XBo z{nl}V18BluV1~QA#(97Uw&4iNXuFSGV9{28j>T|}VKNK6Fp;mEFJMF}F#0_(h1HZ? z=P8KF%dG1G0UaD)0!#smK~(X%-#szn*n!8E+AKZ5cl8U~Ee%sW@p)tdwuuEhM*s~Q zNJDOztfqk|V}w@GAQ=??WjK(ZhwC=CNmaCdSUA4+nKvRWB8$%?1V7^Q6PJ%t5W9W; zynRAGG*cjHFa{iF7h!2R47#QUu|1C2n*tDE3-P#Nc2Fojt{a}WX2U-(X57M5)J&8G zAtRWM(x`~fDvjDG_06FLMVYcZ_`a zSqr#r5s$%B{H9F#@00mL5_PeriNFSL+zIPr(kEP}bZs;U6MlXo8*|*r7QsGAFO2AA z9ATDi0D;~uUIuz*Oe_g0FBkPaD(RFBn1uxL1&Ys;9VLWagYQ^dKrtW^G|{1Hs=zTw z2#Uw)w}XFW>p=aR zJ}MGG3Z{4&4`fHD^kH;%V^!C_KS20$2=A_yw^Nq30(L7urCBRJ;hj+Xad|4m9Ee4tbgJw-YC&JJpxH z+VJAfyWuob;qHlT5zV{dY77O{lK`?RUv9use})4UW`zVwUo-YH6^7%L2MDu(VU=i*>87&1Kyy1{A+QPa4ksg+JCFX}Z-{w=Q3L{md)_nEsiLA6$oV+}#2NiR z!N6ZrVntlw6iLv015=u<*C7~qyOp2pj6V9QFrcG6RUloiDtgBDbz55|86kayEC^9m zD1`OO2NIXfOOcX7k_(Y|cLfFMHga^)Jc2HI!b?T{dQRbGImGw`R!Ay#22V5o^z}pF z8&cl5(QMT!ggC0CvxSrvDSOFAe=mv}O2?nDhXE4YAh#BC-|TqAA5DJWws5rU z=B{iK`IHb+;ua-r8_P1DHgY&d2EWCQD0!2<0)iCteunz&0Q*b3qL*aHkqKsA zo}$Z*8;ix^EF!q8vMB=IB8L)zV?ZJDQ9K?fl8bDH3vbq}m_Z_Ccg*>?fi$wHjUT~3rUlw06SqJ1a|}7 zjU%DXYbCB#Z>zWQM2sW$6Y({XRLd^JEaCWk5Hj{iU>LH5FB~9*u4cg_#-}8V)2NLf zWHvhQw^=8aZNNWW2&c0p-h;Uq)D+%mFVC97Bd;C53(nA+YUmDyA=9s$BJu3I9V+4|3`2K`iD<@Tz z^Lm!5$zq^mTj!;8{D=exCBLr&fbXVxnIRk2VNWE-*|#{YHRx9#(~nx3VzxtPFP#S) zY8SK$(msy+SF88$0H>42D{DrpSJ-Ql^!A_?E-VyOAH%bt6ED6P)lnJ%+vvxvz1;M= zx4VjVYbNE~^paZR6+UF2J_~V(0AjwiU@i3~Sf<|8Pcj~lf|8S&zPd8}ctc&wPTM^V z5%IY3X*z3vnlogkLTZNlAro)%jHrk`?ou;jdgd1P;p`6GtT-DJEvd2mr`dZ~voN+f zrMq(~59c2Ib)c!yJ*V|)PUmV4&Ni=qcU~oV_D(GWhz6w9G;jH7-s)=Jnr*@M?t=Zp zg~uTae-1Re7u-HAcw8-bv3+9MG@u^5)|S9iy~BEPu7 zSwFdrziVtsogN1Yn%OK9xpyb>O($M^PZVWLys4WgnVu*;nNeDw?bGYbOe3ux9wL5W2nvqj8v0TiJcrhdMp$-1m9v8-dZtmm@~6f}+MmrZ7t&CZw2SyvuNtXP??JoH(y z&0Kj@zhXbL;&{H|%)0tSV%3#(#n9YXbcEQme${IRAONjmS=am|)&k7df_&CO2$^eP z^=lC`YfG}WKn}K89{3fwEWVZQ#_GX;UY@eTRgV^x@cW*}4Prk1|`DcF$5PtsOy%~ByK{M-r z?9KR31$B(gmd>`=2=8Rx{A8p0 zYj}zRDS%X=9{-c@lqy%)g+(STg7nG12v61g7RJ`6I{zXa*2*BvO`CN6ZQZUqZqfWt zCHe?}_2d4S`_FZ|hkL2qNB70laUW`_j%Xr~04f>AzuI$?N3O5CqU9so?u@ua)`yr~YiW`o&-csUIBxmu#c zmb4BufKxCIC3-k^3HOL7$5O$`@bvPS5W&NRQd}AC2WKFdKcy2rJgXqOa2OIdAH)*H zyBrUI*GFV_PjAEsY{wVoViwA5P!fWqRtV7tbon4D!r9G6Nr7Yc5kqf->dYW!^`*=N zuV3q{S!1x(9E|CRfhm}hl_;9@j^BZm)ahwCR(Si=pG5jJ8JNP8*mWW@wIF=oi}It4 zI9|y#VnMaB-j=6&z7v8Si# zMl>(e9#OD!&y@cS`4UIHU!z3~E#x3^_49`C2EI8wAvyjdDc4cI;4F9kCEwkU(X{81 zm=w!&H(zt-DzfoTbPIpg#s`xN6s|udjfd=adj&}`#PzU@))x0$bd?e6E9|w6lJ2FN z6JiUtU3pzO_5AxoI?rgI%5FDsr@tn?Ca58@?e*oBl*y{TyHq=i@&1H;RmQNa4HIYi zW7xph&Btzb@0e>G4+xtFvmB=I6wQm%#%5|SAb@l@;yR~?a&TJfv&DFQ^`k`|izw}d z#AjR2hf{yF9x+i!$#%oRQLMEc z2_PNr;CX-gv*-5q=`K#;`q|!)zSG(M*zfRk>e=?$!Cdh5b09o*IzL(~1hz{XRomw$ z_;#Sivp4EQIQv67e0s1AAb%;dK_rR|5Jo(fR5cq2PaB9u@xG{tY$6`TMsg#(A3e|+ zyWP=<06JsrlsTjdicJ?%fzFs}4mq%Hr>g+LB49OSnt{t=%3j4x9Cm*J_nHQn5BXHvEwrIW4y|06u@c`aPk+54ccBK)V+LOwPI!cu0N#1e}t#X zgQ*jHuSNb5o*n~bNcbDg;D3atYHzfYx`tkh{u!PYdbVFfg}+hTYofCX8s_$?cxw)S zYta5XJl!v`ReNhR+BNd2X8*Tz*ko0Cv}OVbPkY~*0pRueF zBg#V*R2MF!9k%eQmjlw_1-dVXt>~!o2piQ!#>vArk-qY%XFZFoCx`7AAUq9LUE*Lm z>QGew@LM{}Eq2tY8TBE)P<5Hl_^9i*bhxT#S>VM{w>hFBxm|VTw{+N6y&`qAXGQqS z(bZn(sNd4zRnf_#zO5H@nFl?q5+_IffEF{G`GUv_=65pZ}N}xlkxZM;Xs~Z9G~gGou9Wv zuczbw=v==+vwnSV{tr6LAD!!yTWpf9CX2x3<3+2Mm-xfw$?m_=VUF-S)cRHBi|sK~ zo^pSROAZ90dVtHvlDL}y9VYeXNzRsLkpAcG3&6-*lK8uFYZPH;xMD*cjYT ztC621`X76Z?XK*%l4#VgFE7J*=k!+LK^ku1q%AFUlBD^)-n-?y0T^_)w5i0g#oDa+ z6ibLj(2L$hBo)D(9?W9-KauMA`YQ4CNXmw+MN7`>;E`hOWI;$Wo|fFmi-eX1 zq{QGfX{PI-U7}T7>Wxngg6BGgy^>dsZIV3%+}8^f(dPvQ{7~@~N(8G4g zBh`awTCTpEA;RGvR9)`jb+K*!r=%4jB2EpL`rOZJ8=I*$bULGj@3Go3B;D70bVc4` zYgK~-V!gv2!XfOT_L?&D};{OA3C_HIqh z={y7FH&X=w9Y*W5mxFwO+r^(;3+x0+yoLzMN5Su+ZjGCD7@=gnpM$)C1X4)A?L)_MbW=x&SGVlBqY?dSUPtR25Ouz#%9FHt;{9R*r4kvgUhpv{Uh> z<#0v+hY*3iagEvvf3t(k;p{t7jNs7#U_e6_qw9_6+5kGt3$mMyC7D-#rI?v2m3uiR z@)pvnF76^*&(XqglZc~=Qb}+0SpumOH(@QXP4<(^M~KctwXqRX9*OJ*NAG);ZnxzH zjv{3roFti{OW3U#2@%=_Bm~>b2so#K!cW1K7>m|-Tt55#X;)P}7A2ia!uRt*e5GH) zBd-2hku;0Bbc$~o1Yze2eqO#5_~`*Jgc{z3z2;^p8|jbUXabFphhtbM+=fGC8U-T? z=*_mi6tb}|rcO_pNi$M5Mc?%sAj!%*#5Qye-XFI_dT(oLYvm`f9b$voc5p~Bj4S{C zoig!_K}xp;sYY^b6swu*n$HlgCNL7oc93IS-dmVzD+4Ru3 z3cj&rc*88)|4!X91wMP%XTCaZ;K*2%EB{&QxaWE`NsH*X^35>t zT$Q%wabwqg2Rv838QmV?%54W` zi@OI=#aVwIeP@s;$D&_wa__@E7yN@uM^zYqT8FSC#40Z9F)m_;@0?QInX!h}ho_u6{me zsYWOQP23s)!t59vO?THk z*in`qz0@ken}_Ap^WBD>{%621+C*d_ja1!idYZWx^{qT|n8ea4KTtAQ@#js5n~h#V#tRpWcLv(b{5LkfiyI-?*md=NdRp zZ55#xSj|DHcQ!n*3cS=yy6n-UX8MNF{yPnQ7AxjO7GMI69SuM_+Ocx={*WgXcsf@O zvSpgPq{&3!{EkJ_o!{)RND=|u;Dk9B1K+KsBP0E>eGw!BA)bfNd?H|>L9$(rK)06t zZT)p)!HTnIrywb6r-iz@Jnzy2Y!i4U;q-J7M!Ah*R4(%BEH>AkrMdwYd&@dJ&#?^; z$)QvnlVN;z&k^4qN6UYq8$y_}O)F!^{E9B0KdoqtmTE2T~oifcB^2oq&61?D>FwW5rp}a)UNgYmTvV{#rQX+@98eAgVPxY z^Otvf&gQ9GC7aA&uzuo6M=A50oyvsTF_oaY1<@@heaOLUH=iW;moI@kcvNfox7y6*xLh==UcI+gDv`JKzZ0_ubVx?atbU(3L^ z3yZz_)I*eG67i(+s>WLuu~#LSfhC9qR>7Sx9ZDs(oqeT`IJ^hS_{p}d;V*xYNt~?p`KE}6yT^jv4di|y0UAg({v(sof zL$f<%f4-M|u;PJ!?zm0&pWaI-WJ75okbi!?|C?|AfBRPoAZHNk9}k6h{1k=n{(LCt zh^Qt1iQ*HXnrsTEzZew0f1O#><kXaOtZxp>lYt^XbF~ zJB&X$@K9L8S&KgB<2urq`kJni7k2s(cqqIX%U5wtfci+hu1#Dcl$gL zt4+Gz+h)X;$DuvH{?$&SkP)EtrJx-GyubI>&@bIix%8AzkNkMUQv7vRvhcMh6}awl zAM+y21DvUSN*-I%)=g1|m}o-i;URJwKYi;(JK^l2#^9hcex{ts%y`N)2S@NVa8)|Y zb{?Ga=Zv6PGR>b|Yl(WM{`?@Gd&8>&!AD3YnX&Qh9lj`*B);r>XDwiN$a8ixcvNs9 z-zDzF1`6C{YG*6mPThBuuf}U=kfS6O-f$y~*39*QMx1~v%k!fYt=0{&DTk&L69S~O z9hhg5QbpR1_TZG_A#o|f;rys@VH$J1Kb+h^OOu0U%)D9Bzbx)DCAUOqAMxvTtJD~+ z(A#lQ&E7(^!36e{i5D0pA|X2 zxrlLECvgdpZ?6f8A2MG^Uc0%B417>*o$E{!_ApT7lzoeOA|%;ZfA$iJBDixz>AU*t z+~XmKy^*((zT-zph9Q?9H6t#t2NZx=S3;<}B$HbFu&-PEXf{3*KnF-_m1N=m#?8!q zM-PJY(6kt#=km`Ej&|rZg;rDHNYY)?2e4EN3+E=Tob;GMVPTyzMP3bd->A=HaGMRu zHtmMh$}NFzrYsGE`Mp8Sr*G(8Cpd!}?_Cq9XhfnlouzqU*Zf%cQt0wOflpvYWwVMZF8SJB@YNIWX!Z@Q(&}N?}AKU!yU(N6ny!?c$I~5 zhUKShj6H^o`grx#m$xJ9nRdD;6^tQ``mU>kDiBuaZ@&P-ivF~F4?KaeLKwC}b+nfE za!`%Pdg&iw1%)@Y3g(~RaX3gG=w&AVPol*?j&SOS41euqs>B(eH^$O@_!FG@@6jTh zL2j@}^UqhK^=~Snxyl)T^fI+B+CAy|cy&-qXuQ{7;nQE8j@ILL6oEV|xW9li1N*`S z$r;}?|1n23*x@OOxoo*?Rb9T^9h&2P92}kX*IwqmxXj=#mAOBhj=&t%VO&cj=V$Am zPDetbYOP1EMpN8XDE#m{5 z1=ZQE{yaSNeR*Lw{R`xj%ZD{%fC;W&`SLwPyo$wG`)l&d308 zCOfeRQgVZtbUu{#b{h?ec(LX0e(-BRpw zok!l2tR92KK$W&6OYH2vZ9&5Bp}#WSF8lg&l6lY-ZbfDJ&h$8qZ#FAW?mYRC&sXNOWY z%YSEp$sSr3y3IVN(-6;aC+=rjID`)8=Nz~9Gc6T-->jMoN*jHnCA}L&k8AMM%pxK zE;^JUBFkR6xB9_tXemEy1d?f!9!`7rZJ?{>& zytbYWdEpBdV>P%D3C%H?$Y?{m9v71sA71W*INy|l7M(gTz*sjt=(;(^@}=5EPx%)D zx#K*8$Qg!?ujO8xr56CA#k{b?fUp-a*PZC}_Cv{T&dCl;WMd^2n;;A^jTDp^qQP5p zun5rv49zBciQ{jpVXzxM-w_ zWs#2!fmKS;3&k}ob7P1tA~-If4xwZO-CJ@wnt`rA0c{@|1THa`9kEM;Xu4eS<&uV; zVuF#rya;YgHydM_zvMX@?BCi9QW2qN)+CGO3RpD18UmRDNNaY|9Cz zD1BnuyBFNno>OdV@4Cn{$E!eY-}od{6U%gL4r1tNAmLENa#m*hwkt|QjZr~pFLvmY z0Dm$LXHHJ8VuIM)qdsrLi6tH+ow7^BhoIN81$=Lx)!}1&DXBgDCD+Oz8lmh|88rdK z@=MX0;*}7}n<<~dihEuG(PH%gJTTjZ2yC09drQpH`IH3XP?Ll8r19s@{RR@mHG;09 z3n^b=$gb&tT(ht?!a|q8}V`!KzYBCW6`Urb<+{l_YBdyJ?s1rB}VO^U;7bS`sc=mwXOt0LBuJi$e%y+^xY@3QgtEWg?FbF|jEUEY*n2He} zU`NpTn3WhW+n9clF~9;u)@8&>Wi==M63b5UC~|V2P755+m<>NKgOYIvBB`em*q$#3 zqbWhr{#2KeeE0js=aR;LM~iHR{FWqT)n}L9@Ua0h#I=|Jq862C*6e0BWoI3=h6b?H z8Wia@N5QSbPE}J3daN?%@2qOcNPHZOz7PxYNkqM^WikK@lt&nN95%C=5fg({nw^v3 zOx^6#u(5}@@3)+2h!q2bCqN)U)%a4}P6Hd>2<+q4400k>U?T>ZT?@0M*g-LS{*D%J z%hvUZ`>7_~OHed$fv~bSKctod!$cdP-q}QJUKn5UkU4naK9*P^)>=)-obV_zE&zJW zPDW;s{OZeORPL9E8EaPh`YWa~P1 zhdQTU7a9=BBYi=W$L{_iHqy8;cd?lX)BqB_gDq~8W;!ivcN1-nwvnR{-8k&4*b+wC zUaFiL@Fl^wJ-O}G;Qx4LfTK!e@Vn=vSo!;?Z&rmIWH_Zji0I}s-Ba)MS$5;GdJEyE z>*+RIOYnCh7{8X-ImDOslp5z6W7q0y+IRg!?DZ6%SZu5Pe0BNetQO(bzOF<}U`E?_ zO_3&aX6nB4%dxz-<6Y0)e#%nE0Zzw#na?Tg3JH(Rto6~s(PV^Q3>jkHqJ|T%GO&Qt z@r&x3%zLeGJe7NeFIZ$H!yt5IkM}>MP5baoi~6uqljX79)qiyVOV3yJXGoysymt9y z9B?|`usG9{38($GC{XZu^bB9yt>BxBA+z5<+lfJ1PLO4>nJ`4^ITwnL!lMuSP89Er zMK&2IupR+UM_K?HmN0j&P1&JZ4h=$?5nK z#?Nsn$W8pm-<^()1i)Cf^nK~Zp5d{aI}KWIAt_wq5Sj|SlkJ7J{0;jo-=_~?DMW7p?8`+UW7O}&m;T(pMJNI7E8b)y*A^ky+i^bvB zI}thNFdk=0cDsO5f)Si@46jtk614B2 z)EHiAC?)9MiM@9xR)rx!?@7BK_-NalAp@bHanGHPJf+5oih_}4&@47_xUYhk~OuBaGx24tBc>UnVzg8^zW zIi-|0iqMd}7lQac5)IkW+p|sCbxtZNipV>Pn8hL%J(6Dy&~_pOG4Z*@;rQ&N zJaz~*8cb{+H%%W=f=iYW)sRAkF{lL`7n&V4 zNVt;e$&iuyJTp-VRc4p+Z9Fs4Fmovd=wxR?%QFYoGY>j5x9?;vgk~~@W-X9qXI;s_ zAElg+XYF6fUc8fi%#Z;KODFEi+JBf_jv+QIBkeDOJEO=$JHr6U)bGAB2&rfeeHMy% zRR%M|u$ER+nw3!kU0jcBc$E^+j7^DpPR7Sbo}WUjSPm3)DbEa|lkiA8=XC5HQfFYn zZw{mPjMzYvTg~Oa=S93MBV{oPe*z{Ed=pU;f*5Nk@Xn6F7L(a!!_`DdOGQZa%t?ga zL^h$p;^Zlibr?WHOhUoB(WIm?(XC_w;hOo>DWplx5ih#rc5cG`vGHSsIrDJO#3n3> zB3W`ChGf-)?9oJY5{7J=EZkQ)A=F%T0!?bdiOLHrQaCEwxlHMghTDXa`#VSFl#wob zkjWn*vqX#9_TV|9s6x-g7ox=%M&8&kP?U?JDvaJ%rM`tUmelgS$@feQ<0~oo{$>aI z_Ji_UXn8S_#ap8JlDbE41{ft9$%|V&-?j6hI>SmP_)s&Vs5$a?MMkA>l}k9=5gyqf zP7stF4|QdrfT5^&LP+G7!q{p^*+Mhm=1@*7XzDT|M-#+e_MW^9JjjmN!4Q#S^MENk zIS9&w1;rZ10=;h@5R@7RO2;Q{;E5@H5NHhSWhlTZ1ffAtVoZEi zG0@0Mz0XKNivY_Q4~uy7~B2@Sdmf{J`0lAQw!X1_P%1{z-(N{90$xfOeVpQ&$ISh&whe#1Yakfhy5a=AZm$cgN1UZ-uhN3b% z?|`9LqLr9Wa5Tuw9I?S}M2rQurx3e7uJQ;M_dJ3^vEZh%s+_C97#tq(7QW(K9eRQU zB!m$^s@I&!C}JuWc#WVSl7*trZfQvwDy+mQRkjp0KH)XKS2;`GR+mjyub{tt`~j%+9f^_?&`1po zSlX{@Y8SzJl2Segaz+b!Xo8r-5Ik7ul0hV(t@LG(c;l9-W`q?8 zq=?UWg$DV9G62(H&5tm8bJ*otaOe?y0{hbW}$>UhQ85n$i(rl)HI)aiTD z1wnOdp}RkNwVyoh&Z_M$Q0*$J?RdM<(Yn#Kf$8i#P3hifU9g4q^dN90-8oa;gTK0m z#e2r=dtTM{mZ^SrNA-W81XZf`^)dIZigy*Nb`PE@L7DfetEj;wP(7AtT1HI`JFjp^?tn zAR)duhPzP{SWwMLdN>;V(>4Z{*7l4n*-<6=WtTs;Sc{`UZVgRJ1A;1sBAn4+$uc-( z9)`dZK`aoSy`zvFBI+38=(lhY_7>02lxR(`e|Az42*Tt{7X5YK8S>>_l*$|^;0!s@0Hp?zgucpZY*MREXJE`f@QLT@DAK_ zHu70*G&5tgj%Cc>bS(JYM03V;b0oO6ZaOYwY9VsmwQfA?+BD!F%s;Ds8cI&!f@qXc zvUNtzqL6x;m4IX~4-JMpBzrhR=rLf5GEf!+ximb!wr=1K^EcZbBC>9Ta9JGh8PPT< zMsA`>YafZgEl`w^{wgD4KTG%OlKVvpWkQ1ySP*G=i@bMJcuXS;U)13nP~35)wAwIn zOms!s@Iu*e%}Lt_7;+wyl+!Q>Y!vb|M+nX%PrL%wP?F~?E8nlgu4LChT~^K=-aJOc z!$j+es1T14F_x_H7eW!wqE_lBR^~Ocy;)Z+1y&rCR`%^yJo{E$2v89+-wjdWi1i4wHQ4;RL*FBLHJ@@U3H#jK+w}s# z_&Jpa-$ud65y|}1#N}mhXY+Z3cSGCeFlrQnZF62Qf@qO6S@I;7eK=aZ?gRHVKs9JS zSq7I72L~B00s|d5hG^8COuP@VqB%tVd4#kfXQBh)jsMhNOm^$t(oqLuVWvr=Z+Hu{ zNsVjLRZn7?gPDOKZx|%aGDkLnk;)BZIrcZ~_mCYyj3XW(OAgfpJMkgt8@hLzp{y8= z(->k>G+fXQ1VDXruRwslx&-C7pyxFN+yF5jCyo&QO~o9!fJN$MgU0X($(X@y_NXRo zIg45JEeWL1S;j9EIj6?fiU4BOIccH{WQP;pjUm>;Z4zTicAat{L={hlj~Fnbo~StOkYNtjwiZxtwR!9ApMz$$Luq`^NyYv z2yn^jm4TX0k;G+Se-yb&*{8BAV-)q2lK5_lO%S@Qm>m}fDFGqK}L`Q$hj;Jk8g6_l>Q z8bhJZ&XAlrL{1q6{ZAfnH)tcH&F%ht&ST{$x+p z7dtu|xl$*X8&aOxL1bMFD?U_MKyyZ%HqJ#F-ncCOE2ras24^}x&$Pa%o^~gdsmk+C zABm+6myee)ea*2f?0wo2Nn0`&M@UZGm;4rPnsz4dX{pm; zYpNpDO@Di7f%GGnYUGK%Q>0;eto(!K46B8OrohGYnyRIR(EkX|tnIBJP|*I^_@Lr^ zE0@CBTx+S0+hZZ4zW#Zi05t2WfcU4>MOpf)8h@w1fiqi5DX8G$nLF~#Pk)0mXO+>VPGv_bX>@K@Xz3%*{|=mqZl-yvD%kzM24~7M zx$S8wUQTe;{@;Q#-So{~?Eg=}nZJykn`xh!0N~666W2lAXQu9xZU?3wiwV!nJb!#V zF!S0Ses&SV~71n6fX^X-dRa}Dv(&=$jMbTiTh>+yCh&agk zB``5e*kC2=wz?*k==z5zo?B!a_y>MTSvV6n!FtchCo|U8Jmk=gnjlY}ivL5aFb#!{>hP1>}I$NTmKD&+<%j;jOEbG0E*Wl~D9tLPh5(B%y7sWH8^H zme9aBO=+^=3^^K7>&zbx%S32)O=)4g|8}T`@8Vz^ODyTSStkyqTQdDwy~VFTz3f za>xdn30~vp3u{H5B;h~QCy zJp5pCc4vUZbR(Kg6MO5O76@*tC?_5j_7aL|;+jMIGBKJ%{YhId5aZ-rRAs=jq;d?! z!qLwT^8$1$vY_V9p()p=ccc)+|HCBp_E<5EVe*?L2GKd(D6Lv|?g9j|C zyZVgW>^A^IKW>mxqOoA+_~p1}<^&Og7NjV(x zwJg0ZXnIAe9|S=VW2J>Z)92N1l&1a2*AeN+R)Tw6lI51B65A|(TO@Mt));8^?%AEU zKIiv<@Yhxf?x`8Tt&>AxZ7aiatQjJ&lS^G-D|gLPE8?L}9@Da|!rkNllhZNMQzxHW z_rGyE7Uhi29w_^lQ^0qohv|?uRacNYl|UfiJ?gQceXbW=Deh z^%In`#{=Q_)2zn6d|S6;gCKi!f>H^s$6O!$Q#QtQ^wMILc+2dH?GsRGV(H}Y4aGOk4mrTH^fR+m*~&^SES7dDujaQ~2(@y?DZ&8RI`_|yB3%aiVpRnl5N zR+O?ZJ-O4Jy}*5-SOR;O;PKY5i|nAPM$N(Q+SSX0iCWd|@ayIkUkqj+1Xtz-GQ7C) zVxmxM%eFso<5>Zd+2+cpYvY@a-UBaY2tK!K*mE7-3Ia?Q-q1D1+?{Yc=$f5}Bv(zk zl{n@YPcn2qYdSl7@6R;vq*AC?6xJ)ZwbH`bu>WXJQd*8-#d zNF6=G<(WR^;>E|wtt^RUkMzZ@h^hz|s~e`B10-t~Cm?s)@41(SycA!&x?|A6TE89p z-piEjMM@`wyFvW_u1=DG#?er}fl?&<7ZCHCl>C*wrOo26W5+ajYUKvVj#K51`> zquu!Z^vs9nO#CZaStso~`?rf#-pxL+WNtce-+u#^m`?gyGtfVBSY_eh;`8*^=XbjA zpHzjL^$7E>(PVy(kcZE#;Z5oi)kN<@)#eT!8mz0R3pm=PtsJF#O>K;n+JDa6*9>M^ zec0vZHNrCaIsQz}=Hl_hq*=oWW`(g7RosOrg_X#JfF9~M=K${W+X2wAHF;H;~ktzmef+4oYkN{4{2n<>JZ>J+g zErzlO1E0bmHZaIj42n{UidpIczZA8&6pgABt%($!z0^f7Df$R0hIFYrog_@PjgcKB zjHiiA8;z_}QVu&(ms+IQYZEVtH*r)ZUS^iA#7XhkQ?Q%{^NLgOq&Ho$m%ifFR9Pl< zWuxh8zVvky3IWw7jG;7lOCw*Uw7{tpflpTYqjNKVK(h!<6ZfeU2WPXmMbjN288MTl zyF<;wOENbpWkq`$rCMaf2V|s7WaX`zCGA^o1jtItwuouTD%HxWn#f3b$tvZyDEG*! z^~kCd$!W*SXhg(m^S5ekH0#i`-YaR;BWl%+Xi|%i!+5pcE|*gkYEdeY6SbE!k(DzA z08~pdW_!&RwawT6ELp9MYQ~1;pJ{m46~D z^Mq32A-}8xbDO1Ai(7!Kt7`jWF9lOhSx4q}v-B1Zduj!7c{8GR(J2}GDQV6P1*}l} zbJ>pTQ|-63+A#Jqk4!rFHk!2ZI|#sQ{q6XUtF`h9wQ@lLG|{IWmSjq?{B*DQl^&!h z#G54EFi}eKQc8~KR59%MV%y17t|Ze#kv@eAE>RK)l})9QX(Uzt%&vUphH}H05|dU} zoq1Pki&8#ObD>vr&6rXle?(D4_}h48zth%|fbe%qN|8gI?*pPUE5pmYE@bm7N18Oh z;qNYOQRWv?speE+RZ@8up!ii&zP6=XGNSu)Sy#;l#n%Aoynwa{aoJkUPDUj~f2-~$ zD`cBjSK}1bJu8ZO9fdNl9%iAQ3QpA)O4a7%o}AO3s*s)mA+;LP)-F}mK-aEL8dWz= zwHq4>eVmjNl&XWOGPg?P8dOyYzC$hPJsqO8GV_#*e*EgT_T8iQN@)Sgz4>iR{C%G* z71w3^CQQ`Zth$8k+pl}|{TS-*jY!XTDOAqMXwo-du$c9?8m+YQb|Eg3Km zPj5RD!jUd_k$uE163HaQYXnp(oQZ3+yS4}Ss9Txzl?rLmv}*Y7DPSwxDHGJQH#*jY z`Yt5k=9dQAX}moEZ_>j$;=P(zbX8?cwGEkd z(eOTn{XWfW?RU-;vmviqJ>!;vhml$iEF(^vBahVDtTu-XOh%kd zbsuI7yA6-{@{gKx=_|_V-BKHU>OG=!D*s|xS2s{!bAMEJdQ=Z);QO&fWmETw+}P7r zoK%6HIkUd|M_s2SJ%Ua8$n%Ur`)gxQ(sk^o^&-7>V@wTf@(n_->4tKRNAJsd2j~U9 zQ+TdB9`n(__1e&r48!PxaZk>%6DgW-o^o#k46%#cBmnlU1B zGQDo1Dq%ACu5o(rXkp-_wYNfT-FRuFLACc-E{n}(UcorG3^e5jm*%Rc`p`oxT)3!OK7uc`g+jmMij%+TGcxp?;o#nfZb)^pI9qyavfg6Y=YN`dmiRI`?l?_H)BD zh+*2KzXs+jJGQ4gC|NC1PX|Le=V>CL<#w@I1Ho& zEQ-sj9k)>3iHet#Kwf^WDsz583Sm`EqpBulCDFJ98;Mo&iC1up24^aZhFYcYTMu7e z&gaB6ld7otEY>kc=$%u9Zd}mui8hk&2Ca1JmM$C1$D4i9>|`Fi&$`&V7o#rGYeBqZ zDxt|Ax=be3D^J&T`O`ve{*u*}<`*Jbl%ui@BI4yTh0Ug=HC?|2-KyovoLSVv5~~H5 z5jDh)C0uJ2z13Y*FgVO_GkR(*I-=&4c>!-fC?!90S-@PH?%T(LF6wKGHxSATn?pev zS|ZCT1Yz!#>%Y2?^@@J=GZ-#)8VT)(0+w9+^M`I7KbNMBZ+)Y6oMii@M^&(br(BNN zGe4&>ubR@h(Wf0NeE%Tb=BZ9!$jmUrbT+TcX3>&$ zT3G_NTJQ%va5IC1nUcOXF9|)^*7sw3i}uQO3$Di*J_|b;1Eq)=wBw?Gsn(0W!OCCX zFUgsUdp~mTwFwc{6zF}F`cP-JckcY|I$UmceYyAW-4EJjZKhVE55lbq8IKY(hpRtr za1H3rs@eC-8y#O8*|F~H)U}`J9ojgXEmE5eWtlwUTEF*k7RNe+aGwIb8;?EvLD6?l zY+s(x(Pw;KV19pdW7JgRE|=M)gYB%N{^>1;+8}#%_tv)I+0QVe=(~<})1#^0CS5So z_ioce*BmzF9VX6=vmb7(t!zwkZ#(r-^>69pea3ekwpDs3Et$$XcP4Db%i(MG9huyn=4Xc9Pj3}knitu(OIWTrdj5_Ih@3Ss$KQUEs*3mm(9-` zgk7Qbr!B*FpZoNsU;ga&9#vd*qMO})t>&`TW;(*^O2($hchU9h`K0pwVVUc$aZxh? znH{h7+=r(xlhTkeN>(rZEOxQLobXB)D)1P(Q;?zAW=xdYL(Q{IF%osP? zo?2#cdzj@sTe{Yi>EIvW81BAtbM>i(+Hi-v1MSR2<%;`#o$l*rYkqojDfPBI@{g$9 zEyK)g>9_|hZue8^TC)kvbC=plM!Ma|m^a$KPs2U%Tz`O?Rp*o0#%JO6x2)#L?wbTJ z9-FsI4@l(}n{_w5kaO`f&*miNKk1EsPd~i+t~XY4QKx^P-0ML0*E*co0xesDE2H2K9KOSR`NOS3G$>=vM6Vxc~=#qCT#tQM6yKV`>?C|XCgIC#}j*E zo9f%E0%ogqsxRK{ojBXuSgm;8bn?u4_OLN{x#fCOh7*o!WGx^{@xb*7czGRv>%<}G zS8OJ3>dfZtiq{<7Mnmv}P0GjgAD!hj9D@$Dt&naD>JB!A1A6@qkXxrLUb}v#-UN-W z+iz!GAG0}+1pUybakXD{#n?Pm(r_~2JeT5eXGz-9{_0WjPJZR9b5h-=$VG-|i=8S3IBo2mAG( zTs_HxHs4C@cDeh~MW6lLSl{KrWlQsrdns1VDTc{jHWN1zTJMNayCW)Vp#5OrjYb6r zS7+?{V34My`*i9pJA*vacE_oS0lujSnbmLa9v=wI)OsHL-28EH-8_dqvX6#ATm*0a zJ_vc!apUmDQpYQvd!-R$kJNh-w*}m5j&82uauwnPfU5bo%G+Nve|`Ry|9#|x&0wj+ ziLm3Vzs_ZDfsmlb5UKw`p|*N}{_k5up#IL)@&Nwwj8SB89AgYMq-{}hUita}4Ts#c z4d*G#5uiF&{kN@QhO#cZW{^txg$*6t>TnP%jMbW<^6~J$`CSG}3{6q?)-hYx^AXR< zmx`ZlD5ZN&le4N;16zInQ6bkr>gx}Dz0P4ie{KzD$%n{dZz^ZS!L*GTe$h4FN%`$} zsc|#vhsx-v{@MnqaFss&0r*{R%Z!uDFfr%EHg*w_+^5X{@deoG<1{{D=50~asmw-mPXGu$71~!5tDS&H6j(`CvqMr|BJstrvkBu(L7|J(G{i*ByQx-|{-WH)ORp%^st3H%?k-uyz%Pls;()ST ziMg)ozRnf5GtQ~`g3#g|n+SJ7gD-IA|3OYlvUw}A-}N`Zf;g@)&}zX6tafr1Nk!qhKW#C*A@gY)EPXD4rA$UJc>%RPRD0~pp{LTNgt%zQN5iEkB(WM=YZcmSrny4r?dZl)U!Bw1L+FT+B8F!0 zck)0sBdfZHEjPO<*z)ZapiZ{AqCeCu=5XQ(P^XwN zg0wmM1a*UTIHoH2Wjy8~MEmd&V?06xcwTpu)|F-i3Pp_Zm z107Wt4tBC3KSHKugbQQF=P$vD8k@<^80kpLkK|$W z5Sd^hZFUXPQ0hoxxq!|H6;b-Y);^I|VofZ2DmsKI+gHv{b;pVZ1!ZAG*JPC15ch?XSxOM0Wtoy%d@+h>l!#D1ilE53L*z$f=V%ot zwA4alfD;X8@VN4lbc9`}zvk0R2lqP8b3gP3*}7g*FzFya z*Vm50##s{-{W?XJ`r>tCJ#VnUxQ6UN?&JZ~WsjTLVw&ZstNF=U59fg97L2URocQu( zKeDZ4TvaUk%3*`8G*$#9(fjrlDey`V!i##pvKRwfUz8HhPP8_Xckzp3xL7_%VL;=Y zp2cg^o64j7vK|bHRa}qyxHk@LOqin(;{+x3e0gLfuZ$A& zqG97cgw|FbBT4px13{{!Mm!I+0?<1gVHIgTWBIWAgb^BFlVcLD%EM?R=`XfCyqw!c zOvsIgP{yYJkG%K(YN~Cxbwd(TfHZn1(iKERK$;MGm7+)oLlsaUDj-cws0lUnE&(hE zmQV$;(2Jln3u2>56Euh-Jtz8>?|JsO&pz)sd+eX~U%-Gd7Hi$}p7Xk@u#m1_lpYSl zG96mG8HJc!?blRpPHHzOXruPCe-~tqi(WceeBVww9tm6Z4dftNWp#Dj<+gi&+ySJOgwMAe3+Ui=~H|mn8Rm(5;dZ&GCW+juS_$aii0`_85SQJOpQ@J zy>XHU_q+x-SOZrq8yv}+?8>UmBga2h@h#mmk_@*vqI#q|j6pOrKlvE_;qka@;_E&g zq7IteqjjLcA9ePc^|KV73 z(4pt&&eZ&R6tnxKXEi$H9i+Jpn!a*wyZFCrpxB}`lb*zw5)yNs`&l`~of&seV&z1x}7c)BU)^x|U4wVin!O3QLu|3`($ z?t+=k!_oW~KW=J2UNp#f`0^I3Q0vM9oen*G{ouvVd(pdJ@Tk`DUKZ%Icz4;~rgh@Y zi}j{f7U(pi_5Hl{uZQn;zeWtTeqL!~$H~ zMa07Jed+QH+V@_xzT$9ZL|45yewhwLT%j4g$!3GE5q4yB(8-bK*xF}OLgc%xbZFb)?P0027U*`2AdX{woV!Ews<%dDc6#cU*&4rF!b zusG5H09Z?fSPc^$nw9QBp@IO=`DQjtEV9uQJYpI-JsD-Zf$)__hB0AKBWlt$Q(y<3 zYXE!oUJ}>?2X^+JmuTfhqXdMnU$}H)Qe;N-s?5eiAfo199UGbQuSaqow!wM^RpqSw`di3DA_u z)G8_@0L@uHpQ<;UFojFou1a5#S0bCG<;+L<_fg4Cnb}9t7z#2c3F+I1tO9_>+o*_u zRMdVYN+0#!Lb9}T#G3-MxZbS0(ejG{e9I3k7v_w_3$Ptq5nVFD&yGw7eX4 zzEac^gg9UP25$748Cs6J?t#QqfNP^JZi#N+c6l0x-flVKp@r5~$4n5BU;E2MJgXEe zFcq69L!axp)l$=}(w){#z2u7K$P;G{V08M>waMsrR5T9<-yQXuyMg;kwJ;qPw4S4P z-qqD`cveVO|G${%|Bw1zQZ7AwTicQqjQzLYXKUYFoP3QEcYt+i&UB^#Is`PCd9;4U3?@ zfwkUcS{8nsU10muJ*Z&@KS47`GWpt36j2z0QZ5VaiAhJ z>M9n91tj+LfzQLB6kbkc97J-G9r~6WN&qTQ+4XRc6;n_I*1?s`(PIWazX9z^LZ;wA z=f@Muu}N4w$ifF?ltcCbfE0*e=uD&;86-&rOBA;wR-dTQSoK-NcpG^Foyr2Yj^vV^ zG+?P42vr=omJSoaWxSOKMJRKu!qNqa&<+5U7aKiA0N$o>U>Rv+1fU(OcppJyvmhsg zV7Wq+*>Wby2vH=l1-KNz4in@wqXWknQGz&dJ^_xUvmaUL79KZJ8MVWJsJ9`V`bd@(qz;;Wtu0eylAVD@n#nsPx?K(efL13599S@D zEL@nt^+*HusSl~ca#eAFLX}Z(tKbY=fiwWRhJV&eB>51ls&~-eYNEcWa_B_#q~Ho3 zoFihfa8YxJI5qm2sh5*r-nE51x#3P?KWI=CsW9A$z1YXW^7jn!PJRlwZqHh9xa3o zqdBtW7|@kpDCCDh4B|QOE?Aig@xVR`wu&sFLR4u{z}mr7$I-TeQOmYbCB&G@v9Z%` z(dvXT#i9hQ-O**+l!uCj-|(nM;{`@y*L3jg$2yU2y9k+sgGo9lM)4`PXpnTZqh2fEM0$h*AR@sKM+5lIfIrMB_P?Xt1S!;^%gasCU8qHaF=b9Y>ScK-Z zqyxtpQF&rxCAn|G&yYcMw);EJB$=9hpBEzK7#ZSp(|1B2lo{X-BGMK180raCAaoAP zJM`kflGssYya#jg9ji+34Dec@!c>Ppy_U|osz zdxEOm?!@;#(RdOz9Kj_qBKAdHuzcwGG6skr*AEinF zJ!K(>rK9&SKPTrC9^K=9Mn}2$p)X%U$9kasrX?XGsLX2%xlhn)Z7|>z0>lQpMOqX- zzW}L49bqg$Gm*^U`E6Sy*DAK+V<2pwz!qAHIgEst<7z!H#~9cjM?ji%BXIdAh^IYp zp8#}qf%xPLbV`=1e2HtsVlW*oZ;O64wG7fneo$Kx^^|*mk6XH^hIUUBT#K69T|tb1 z)NZ1+>FD0ySN?63`!A52-+`VD5cVGg zdVRH0CT{;t*qn73#=q@&$M1OD(bG%+UdW3@jqpD&R!6;%{;xpqzsyB=g*cPee+ruq zb;in=9mgs(5dOf!Y7UWJNQ+nf10H57D>dHP@f1ViQScaSKHO=--YtK`kD50#P;lhz z>%f-D$>%h~6!F~)kj1gz@Gx_HqKYF_h->-L!={yYRloSJ$NpZ(SDY5RzMG-JnCZ)| zg&#zC6J)7i~Xlg(8^DlXH(*3wfDKG?uXW@%C@{X4#)WZ)5Gt zr$2$-?}a=O%_=42>D_^b}owxeGzW_{Bfe`USjymVuHQ%Z*P z{kBs@nNeLrd6LRQ|B6hUa6m;i#O3ylW0J`MUa7&)7Spdmif)%xq!)SLzH)8iOm+1~ zW?yy1z3=d|cO*X>PTjfc@qsKu;?`rIx_#pw`gUq zlDi8&zdh9Sl3SZf;08$EGkCTX|6EUaXRR~0iWht^ZuErF!H2-d`)q;1M7j5R^XUq< zZ4em%Lyid)y>nCN*y@l9uy$rh56R`j3OhiWAx0vdj`xjv(h1*izc{4O5lkHm+qRBl zMMe*Rj|nOrOdST~S-;AiY%ZYho$oj#vvb+N3qq080tMmXaWD8c4jc8Qr=rUygNoXp zFyA-!;<#b)-8f$IFPjeLOZYO#`A};0NqtmMH(Uf3pVZ3i+Koq8DGr8cfrfH@?lG4s5=P-KGYnre6S9YNY^zl+ z?U4>$b)Y5Sw4I8d_uu;Tet1c-QGB2)LEI?1Cjf1)b4^-tl)7w;+(n7^j`h~O1fsy# zW_Gk;W%$~p1>ZDRxN@37)31zs2x+$BI_IUECi|Pz;w3f#?O2aqB@&j+bn#^0H`P(y zZ_6Q)@4U#hRSCyP#B?mAxCdvln!zEMLSiLPC{{-IrqC*OrUq}rR0-FJO=VyQr=hT@ za5xDDVuO^IrC|9FnybyiQaSPTy(v=cMu!VH((}GN{aBZeH?QiBDack`pN+##4(Zy{ zDxHXy=kG#FqFE&Og2R6M0Z6svcQ^xow1wb8yzNoSNEcR{uRENzX{QQ z((cfN?7Hi(#;cXge>h@1_uewC2G@W|Z}x(Vm5)#2r1c>x5p1Y02ECH}q;$KzI59gq zNPF@Cy3t|1?#eN4?K~nI`Es0?6_HJBln544^cJ_n-|*Gvgocwr_Zb^WE~`b(Qq1mY zsG0xDnKCNm%Ir_5&!j2;9p$&=V{FJZa<@nW33;w z6?ue%3e|x+ktRue$^alwiWG<3Mmx#@kZ?(Q8ahBFr9I`GM$atNI|JHL1Qr4f4{CVK zm#v}O%07KX>ah*kKsbWoA{(JEIsogv)VLZ4;n^HC=^0V6ZiAoqpJ9q<&_TL+lkDM- zBA+e$Gjb(b!Pc_lWQW{v_Fwm^n$khbF;Kz88Xe-#CD*Mf{)C7oXqFAXrt_e(LL$ zc=0aK@oA9>Rw=wLe`SFrU)=Y}48&zH<)SJf^0sL&sSchMMAF%A9eHBp-S}bo>`>MF zcS>e0kKKLz=5ycns2@$8{ty(bo%ilV_?^bP2Sa>nqH|XL9_W8vS&Ch2{p8G{(NTMy zk>GlJ3fcSY@$CwS0sgLOOp@)@(i5tW{dV+pE}E4#Tu99jkmm<73yJ&LFU0aW`3^A! zJy3A&1V}-No_zj5ScnHrC)pPW#w_dM+tzl;FJ?f8i4Mwb7IVRM3v62?*kHSW5HMKM zK1nMq0+(m|?4p#j9(f^xf)09Q_@!u*+rZlP@a@z()~%;`|308u!x`CSsB?uqBOOD2 zx2v%*-eNFZyLyu$w$GUJ{ip4V5vkefF|g`f&;rl zjA3z;HxV8i3C%a-?yJQcIfZzQ$N9)7HbJ~zCHf6P;x5zKq_IFr0#F>!-j7io-$gjJrkPAVowD0t2x? za~o&4k24?%e(B@r^c3njVlE`04bCK_djLSHILOEi_|@U$T87)(1t$s}Hi3r*Fha%r zQ>TZD5*0jPpavthU|0H7i?Z($r{8O^~u4YICZD%$)8F1D(jyko$KBWgftcMO*vT* z2VW*~_}`AGCxhkaO4CHJG+N`kcQ&@o{}UE8pX(+v3|*#k2@~CJG_j5Fq4ZeCV1|ni zmNH+(_AM+&aMT0n^^0~~Gwv{FXgXEt(q z_JM5B5zE7cWw{V6Aeez?H@fAa*M{`lgkaH;LjI{LnyJ%7WYr{F)S_QgKXL&F7snYc zv+O1WAQ#8cFn$hzi`%kEX{tlnkWi_nY55uhD$F|Z&s^oMuJ>u4>z5H0KhRCVvKNoYO+j(me2po zeSprUxj%b-(zDfCDYcBkxnLEftn&Y`hYF(_3u zwp4yj!C`1ZGQuV?XB>^%B;L-#$`)#BC~i5&5!p!3SX)o<#h>7B-<{VNb9oCY1+nn* zHh770`hqftFfIRa6Wev?+dEYbW`E{>1T2t^Khyf5!!i-dI}Gu3Ng^FVSoFZ-a3H=m z|NU{SlQ+jz94ijV|S=bvHyQGhJXPsjmu4ge&H2L%y; zAGc5;0FcgN+Al#)Vxe~tI)ca!lBU7a!!1IAB!q)o-un4bVG%&pN!TFIUV)Wl6x*7mmfElp&4_c847d4~* zowxc|jx~lgH+-U%EV`&%)@dnyvicur#orw3MRZ=DqpGR3ce4RjH=9YE6a5!&HEmx30>A%WBU(|wR7gMBDZFA<`*0q8YIoTjz+%w18cM70U8uD z6?Fy~-w>Y?rO_O_w@BDbc_vP|z1FB*pCjqW*^h&T-;(0vp|a$cg%!Ew*>L(dC%KO8&ruY{U@#k)Gw;K-@sFt>vp7Nnf2qXuA72}0;&MF zLl&_V)Dx!E#+HOQx#n)o@1tl(12WreBR^03^aD`6ppl1iS|rI)&2jkfS3cHJG%}57 z3x_<%VKA}hZ>H&U!hXba0lQ!K=sOQ!zmbv_sIcBv#dEImSTctuMojyQkyv>H?@T)tkR1;p+?3j<%+`Y^W zL*8fQch@H)Mg)rNbxQU}Qw(y9vp0)C1l>s$sz|sZDXcpo9BR18)gMc zC!7h!hn)EQ4(C6cfH2|_b7cVBMb{gWMsa$qyL2vpX{0dKOjI2k`@n!i(%p$WCy9q@ zbzwNsX0c+@(IRKVi5DGQe~~%Y?~u$?MBZaAfOI z#V=fSL%pGzWZ4CJSSz@qqBA@`p*36C-QIJW+3qS#=8{<#HS#v23Ziw`?tvVFo7IO) zjUw7^wfI6qJn;#aavZ>N8aH4L%7{=!7Eq5);`1u^48N}bcstcN`jb8!9dA&Ew0tkA zqs*}zOnbWL3nQA&-CN_`Xafrwg@Se@;GEK;Sb7RBe0fabZ1V&0*KJs>{pfI5C0dUG zklUN`63h;BqSm3y~|o!6Y#Ga zLdO&6frDy6^x1Z8JKDl3edQ;JB!fv#l%SxJwrx2nrmhnyNCX{lISvUrX#jeJyq%{O zb2cG03yXCEx*1P8fBa4h1@Eucm?Ve@wbi+^zJB$aE{jO}?3(~m*fBUjoazq1?Gvt7 zauyh;xEz1Y)n|di#h}|nDd~du5 zWwVQ_5?ek%p529)sWRsfl3`UBk@?-VQN!k5Ka!*?ZbABe-$;1XYlW05rl9o?4_V9B z*I(ZM*HUW}3-nF^?E9a7c*HvF*-c-+UD2H)c;w%pcLy#R>X`g#`M-X6nDcy=Clv+$ z$-r%L=rJ}?_*HZOYI$jDsZwZBwn z|2Vh*No>>n>4oc0bXU+$)5q~nm%ZiE((;?|%}@Lakjo0AdfN9F#h)k&3)8O3TA$rx zps3%jhDaN(u?Iy3Exi`;KJ6MtE44dy`6~05DBq8_BfXZ(ep)VrPu<7r5iv4zEoa9w zYtfYW?;CoGPSV`t{!Y#GBJan$kdF>?BWglXz?BQ50hPN_ao#{NIT|~2nHGnWH@;O2 z4DqS#)>GQBWh0-mqbG=EluLhP2Sw>xYC*nz}=eyq0%!(%Z|GVBt@_MXhzYlY}oCCdv&T54QS6 z0goU;<2i3q4g=FNDRv+9j`$e9D1l#*^K(F4nH#_`{q0Jx7Bwu&x(Ol!gt2JY#!!9{ z?4^kW|GlQ7Ltk(F$e@+MW;<~d9FqqQPM+J~OLz5@hom|>2YN>%Z0A#AOcIw&)Q96= zB}pGo%tq7=lUGUAUW)tCNR6GbHtx6a0W~Mmu0RIl&FKyScAp@NqL^WQo@#iev{!6E zWgng-=DBOA*!VVL5~SGyGkig*jL^R6!qovpT%W&b?-2;f9quu2U2al(xLtA~0VJ%#KT^EdNj=C}aAYL=|rC!*&uX>0A+gmla zQuAYKg%?ok_BCN~mg2#_32tXEIoN04ef#lr3jg{ivRt~vhgeou*E3a5Y11{J@a4vA zfu2l`V9t2zk9k^>($>QD{NSy{s$2K8Mp2Dlx4zJO@1b>=ZQk1}e*{#Ietl&LsGx@) ztu%J>{N>WAfz1?f;hzxok=>d2z?}9t7|#K>K6?2dh`Op^%4l8$(-t1r^Iv9#-^Mph z@v$HGKUd}aG1iRD+)`9JyCl7M_LdFxm|o+~ukW|)(vM|qacMPFjg~TLGM6o~1FWmn zGMt99f(|$oI8d$}*L-D7i4xxm&0T8rbFCYW_c*omC8+OuSouofR0c9Fq+>aqPw4e+ zyLPeD>&qN9-q$&HZjuDAs~1e6ja%x}?0}=Ld;>UVC7V5HkDnOf z*DW&sSK#6*A|An1AuQL`Rbc{)o}ez2}BW)-xNQLBS^O zU_0eRjHi3#T>MwzCw87s#-Yx(=ciqfg`|`+Q6YysUE#U4LKy)&-y3X&NHPydnW7bbNoaWR7vv6ys1Ans(Pp;JXNn_r*blX zPktc-kwBed({z=m!Za&)yi1h_-Tg@-vj>H;$-S8NQ|_|X{nt&Ub~Z&Sd=zX2s zh|@G{_BO1*lCg&Nt`XQlOR002$JKS0(bn27kLa1DYou^DZYH_*cFv()$`q%r6*K#d z2mtL_c}%$=TY-zBMF%2{VD;TlvCH&s)z+QLBGf_itmBd^mm_Xv-}~tMSr33+JVV|w z2c%s!x1DU0QU4nF0=_p{OH!YXfx6eaviQK3=F<9IdM?y8n}%0F-M|0tU&B-=nYE^w{1_rl}(}MKZgM7ShfzZAf9Ef7&zx=&+}4ZYV*N~ z$_UnXlH51|Dz2_mPWg1oc<%@P$m8!QnS3`Djsr#hu^gXY>~e0gM%~EG1yV}-)>7#0 z7h7Mn;mGY}TaJWGlZJU+p35aHX2S0Wj%BP7_+xx~Dx&y=>#tIiCf1{O{-2MYX}9p- z582=Z_pM)t9Gbk}{#|M!ZE~79SFUAJ`!`vXtKYWVoJ4i0W*kQlJg91fW_;RiC$i!8 z^9t+A4y%W}RWEDa#`wlXojP!;%%N55xPkq-m-nrnb0icUJTaihOg5NV%267jBmm!5l-8Nq5OFdO31ec#^EIPG_B@l4Sv zlc+N@ULTw3ozZT)1RhjpyAjgjU@l`wZl`v`D?X~xN!ob&)rPd)XR>3VmIjHA(3+(h zPFUj_rA}&C8wkq8dtQFySlG*!WqjNw{(IIb*9LD{t*M=pDC04+tkU8n1OMWRvwLyZ z+3)yPQn*wj0Vi3y_5`4q7sai`uL}KI!`|*AZL{q!)Qp1Cs<$?SYJEJYLw$uujv+tkeO$7@mK&;1Nn9)ID^Lz^H4na4-r z7e{LQrY*}J??Ds^0gK3U^atP)j!2%#X>If z{`)HoU8EoT=cT|XYmY-nS%_}Xr@KO)|(57%EFjlD7VQ~-$ZO94;usB-egESu3}-kso72{ofvzNl(D zM-i8Pwr<1OLI~zwerD4C{K;i6tvsm|^c|Tn#?pcft$Er(B5%j^jl9uxa3B1LdGr+d*kL_~)BfM5*5iF{M*|VC|56w< zvL4vmZ1_Kamb&iB4nk=O73Ug ze=f>`Jq7d|{<`_1SMPR*=kE4(ov5GN8*AQ=bPIO71wJ&Yb?8xID!TQibH`o~zP~(r zN}KaqZb|IQ_1yGBh`}>%THi-Z3xc;2Q-sbqbY6tIB;3B3&aSw4eGP>*q)eqauejwHaj$e8J)ICo(wNS%KhPtAX93U*G^eT}_@j@A zjz~vSc0dSCrVs0|=2a!-1JPY3;*YuR95LjUDA>=ZDY0zlmpmH^yK(sxf1tdV<3qKn ze7Qi)+@l`XWTg2QUgo_}SS#POIrj5W`5E@;+0G$;nV0oRz$Y)WkAsMc5hI1nN6Zmx zn$4Ang}cokbLCwP?%8?f$qs@SU9%lrUfd{j!1vyHi(MGdGu<)lGM;T1mo5s%6?9>? zmsv0UdXB)e_h$3Hyu5c74sYXUp|H+fPfc_V$fG}d<_5`(shG`jvDfaf`kYE#vE@?? z=+Jx8AQhZ5r@lo#vygRd}xfAPpJw&-Vs8KLPy#YD_7m##^mcq)JHKvce!|* zCoS_oqV3wDN6b1JIaLc8J=GLy_VMH)3*d2(POI3jxsaj9d9$%VL&_5ib7%pKFVfi7txOxj9|R-15nwvvbrO;eUN{U7b9d_N$HNZxktygAp0l?Cws zEj#Ln!babJVn=o2&7#jp)C~N?j{3h$#Tkr7-04#r>VFab zaIKUP>Z^6E?nT5OyW70R{99&b)YrO#S3bRBPcZ`oT4lk6vb=uxKHG4zVK&FBTSjsg zBe97*1)s>>dXE?=jh#5!y`3JFNqXul&(6wMKVmHeR?|XCepK?M-L^WzVR7X`S6Qo$ ztk5VS(^oN+2J>Ao!cf zSYV}Bdxv4?V`jKTe+C#FHW+@j+s>IISgo#-Y=2Yhl&IbYROjnPM(lgO8X`hbr?DDy zFY!WH&5r|!JW`saMq{ONTPM$+r&am{8^l)miyn1yM2{=?o+aO+`=L6=UN3JU>8fhH zNG2+1NZeCyh+k~B#`*IALaSv1P!lD7t7Io5@Q7`j*V;?x|4u% zTH}|KH-4h7J@Kr*8gw`Ofx`1QUeZlPccc%fr(mRaKzt1{HyoM!Qg7~gV-|+xF?obH zmK#lo34D@iWHja+z`oKF0X@}y_-=~g6wUsI;w6DWyl2#drv5k5KnL?ZD zT*D`5-<&E}fhCwd#Ux*yeN{Q()T655e0)9Gu%p*#$(XLqaXudO0Ku}O`fg5M?Q#&E zDQ(kR*v?QV6vhE@-aBb2I-bJg8&H&sR-FJd z4$kyHAa~-?v{>sV^Lfp3t*37)#wKi>gNJ3`Rj#wQOOE4a#Yy)26<7s_Ny!{mh!isb z%6oFOT5o(s_G#@IP!d1J~Qu16rjDETd#YAICJuNB1N zW<^X&&^&HFUOUe4ZEek-mOsf`wc=s@Vr-*@=Ti+<5!#J8F7zpp*75trAaH zCG(`88`({J%+(b0=r2%a`nm~}Yhhq`SUabSRUVsv0FRpWzCJ2YuCC6iEbg5ASWv?7 zVBye%hV3U95M>5R6$h0kI=O-Tka`aFDTK=cBKoVmBO$q`mWPeV>`yyV7B- zk&-~I@Asou-`ZbA=Q`R^1FahaH|3Do38q)(&#`Twutbb*w;hXGu9NG?#DmVke19X% zN9$|_^h0!PX z=Ohz{*QQ_?fgI=0UVnzHj0en}ikBM>6MGZFGs&hKMq(1#m{^}$yp>KEP=VlM#sG;> ziIQy*o2TUb)=?50XTVoYuA=4RBLVrkGGFZHxo#}>U%En5wAsH zkUvo{Y|6N@gHaz9MnBV&L(Jpl$Tv@ao0p}a933tcBgkA$R+=mE&BbI<3aTT#4R7Lm zf7tTU!I!}4MZK6cV!=9;P;ZfCy%_bfxI?)wxICrtIytE>!5pQ ztcqxWJ2zb@y_Y*QOGbQhr(}>cKMw&<4>d?+p8%2H(P}W#>WroXmSM{kiJ27$lUlGX zr=PxX$v#}!A?8%}G!fdO!9mpYzOf(4Yv#O6yY^unAxyoR1}{si^~j&hnL=}}n8~YUE+1-BgcFkO2VJp$C@CLPW zlliWOE{SsjzK8Q2m+`h|;SOU&fFlb^%rXG53K~z%seNYW8IEiYOX^O*C}rcaw%S4=}||*t=t8? z2k3zOz{?qnD={z!WpAJ!;;!F!g0b36yQU5=O+jWputc5ZCh#~|`J{4kCSSql3Dy|3lgRjy$0r{gDX6sO99%&^kgLK9N zPIM;;uX?&p-1*i`3m7gD4YQA7I(RN%bA%@DKL2sI_Ij#~X5PI^n2s&GB{K9I1I0mQ zkMqx~PrAya2AteN`LrR0!}6H)E0`kNDO7T;_&XbmJgfE4>VnT{tGoi%ZoSh?EGcRs~$U-7*smfP@eDka@}jQ#Z~51wr73| zAUAXx?Kj7;tubV?QARzKMZUwg8s2UV=rJ!mYc-{ez`@TJr#QKpuO)U>_MTjZlCkkw&-8wF6Jh>R*Eo^0R>%?|a;KFeS>)4$8fuZoT0#y%8n7QIC3K-uGVF=_QKx z#p?9MyY(f;^pQ*YlKCI?rM~YAEn%16K%qj7bKJySH&K7R$NFnjP^mbU_&-JkTuDo1 zf4j%}YgBN;r#bwS*S|&ZzoUXPgW(-6zulz&5EdV!tcbMSu+7anDe%uNhx^12iI%^M z;I+eQ{zL@{6F(N;w+h~#{_uOtaW{nk(c$Qk~n0N9GQhH^i@$f`wO2^)7=R3{Bho*cXz#7-Y?xb$!ugCx@5laY^=s~aA)?HHGgNwPVz{8 zx6$OgS4Ws`*K|5os%9iSmv9&DwCly1-lHD^VkKyLt+BWCSbNP*|)x07FJ{V3z znGEktxZuiKj@XcIp?-|(HGw%=ysX+ylY<;^335PqgJ1E|BWG7>{>U{ONr=)WD%E~! z(;n^)vB=6#$P29YvQ7&e5&gA)fiM44H`8ghV#yvrcI*zQunVF^@l}ROjNDqf-Nmmk zTD~tcZmvd*Pe6wCD>;d4@#HV^xO4TE8Z%ZUN1VUT(IERFj?0T{^L|3lQ#1TW)>@m)Lhz= zic5~(@Ef_Cs>`0n?wmX@*)GphOz+UEJcoWGjX%^3UD>7fKE{zL`^(mA9`WYmWq}vtpU~XF8`BB<_Gr_orZ4hm zkOoSdpD*mMLHd_m`nvg-d#rNj^vxw#F~jtu^j@Xy<%T!G|GLL=O&0s|*I51j&iBEi z-**190?p8b&ez`i&KeUhLzuf8^Y@v1JHNiMTEtxc5EctFh%hS}P~acJ;-A~5IylMn z0JGUx!9g12XkR-riV=t9DuKhb7tn!eF>z-2YrY0wK@sbMS~xSE3J zsub)UM=M>3PgW^O6n?y*f9NYgc))f>f84A`i-iYPRGl#yh8YHtXELU-eb8NbGXr&Y z>L+>fzSX`y6RitzQ7>T|4jC zBrj{s$$z_z@TB6(mGAwqMtV#hX<5{FFZHUdfoK)JLyyWpxtn)D^vpU{`vNdR0Q*vb_FKl)6N!AV`s+&U zH9m5d{mx%&^JrYXhnt2o4~j~O7O26DzCd54a-HItFq4zhwXi;C_9O9mYWLCPt&9iE zy&2RR%sA-5!*^SD>krxx<*a$rYwJYA2mDht#yI+$lz3nRg0yfruN}@kNh+C`%XAvAso`8* zG*>f$oQsdEP+rKB?h~@lnj4i|sKNs{Wjl305aK&Mt%nI%27@-wa;8#r#g7E|^Jy_4?491Kja zFNy5)J6tKvOSo2l?N^B@CVDLUlOWh6|2$V`pk&h6WW2G*SB5IL`noqMKqx8n4gu+)A_7K03B5`aktWhqP!Lc= zK$Jk}HS~`3j)vYb6e-ezpwg70AR=lgB0V>{)?RDvea_u?oiWZi;~nGu&IdkB{>=H5 z-*X-UHvq1AtVlj@%%M>dFSm9jd=vOa<%R-Ez#;^D;tVerqPhTjcOo5om)BO$5Cvle zSWMFz74W%YIk+mLLu8w!H9bXU8XQv!5A3h(fKs*luJvSQlFin|jFYy$5o6(dm3Gf&3DX?vu_k05jb zCTwpZu$}6OQ{?gdeix^OCs8DD_Yb!iZS}Yk9rkrF%8N--%UT6vZW($mPvFq^t7IzRtbzrF zsu=_&~J83 zYZzNH+V8^z?bJ2}69Zes>4lK-K`kNv??em+Wem9Sn02`@6h4rn^K!6=f!{0RY%eA( zN!vxlfQ!bsa0|{+M7KtO`PVc0S%mBNT~@UZVY+DbtOP1{Nj2cInb5rU@ujFAUiUP^ zLj#-+|}83SSpB{$LjR<3msfiYd}O5||7jhBGCn zMXcmrRid`%45dmZzfNE!>L>4*FtJj1Pk2mm-k;$#e~U8Lzy8RQ%AI=wV2N~!6AeV3 zjWo7{O_RcxL|~houteR&8e#6 zaZfyy>3%b#leiJbSPF*;M4&l{BlHHXHxrMz^E4*mtX$+bh?Tg40e$*KnH@Nm0NbnA zbtd0*^%MZ+~lGs<=umKTdfJ|r6_&tQ^K z!Z6k3i;Fn(OgYEf7OIaq-A~;|grq&F)J!AWQPtPGjFeCXnMc6GU|zfTDbqt59`|~h zDeN%tWiqWvS0RA2n4!ha7M8A_E)MHwGZAf0D-HK#_Y6LJ?_!q|?eo06{-ldMbo;5sF|~w>Ta| z?!Bz?{E|Qm?x8V2N#3W~60)ORN_O%)Uv5)i2HC>esMG@rCrr5muc}ZRuMjR5Oysa8 zGp!dwhuCzY2%y0&xVatEDNYu4)d!q{FuR-PWqQRD*FBs{0xH5iLtf;%)0bRfRbRj- z9E1VPDG#cQ9=tTbqzi^muPC{1(cO1tMn=yT17lOAvpH*^BNG+*q3T%%hLb67kpvdk zu|9j&ol`KE9RPY!gkj*s${0V`j06-U>G>2xy)Xb65=dEMDlCEOTZBVN4}Qp9C|)c4Qedup5cgQ9hZoZr9!jo*EEDx7>lwRpb=;bG7f4W4h5IxypIg+`)+NOL8>~2aOn6o6;AU?w~FlN7UjDWx-`wavc(UKE;~8XU)|M6lIBr| zTd7U5-u9Zl)$Yr*b?nmh6sGM>qwspjd?=G?gnf)|$u$iln$%RIGfh|qJvZy?6XqJp zGrGYHOyAY%c9$D{&07ML)B}~&4{qEEDSxW=-HYy&)1bXM)a0g5cru@v3b#y*%p28e z>#Ks`%Px~G7q(hplJ)9Kjjf-Jd4w5H@-W?+@qaPXGSnI>0B(n=Hoa@R)dCLJT*z#f zU8-5m>=CRJKBFn#Y+Gx6TmhE8h-*L zi$LE-V4Nku4hZ{E&Cy7<(aGx3YwIzX?J+#) zG3I@4s{Gu{=69(;HH-QV=U;MiZGb|mTmYEj{RdHz53TzQ|L={MpE)@OkujuN&Ci_N zKLt~3i-$8&cYpK#6IId<-uDPcCI53U^*8VTS1`5W4dsJg@_P`xaOyAKKbac*XCwhe zb&tvzbmxzx#=j}*R6BOWA=Uq?Gt~T3hUo6{=lp1l1PNucNqSKGbDdij1qV8dBB3ilhUf9eeSIR;a! zXS^eS7Im6yZ(vEBm-#QW)KQobVgVEgEG3e_`>{CZLMw$C(R9Cy5=m(M_NvB{LBO!B zX&v7ZAa!-5t$A#|kHU;FJoLWvakBQ-+*gF+ocp{cmdrMaQr;oQ_FY!8ZPqFcCW{SCYMRWj88p2u%?YXGY@Sc;D59h$j z$S;aqI#EAFFCogsprpYl3s=rKD2P!Y_n3;Mgo`R>rr7>0>d*)7vW8xcaOsv?0JjwK z$!;+{S6RS=MEJ=TCf8W?iR+`tPPX)qbPt}8_r%14Zh}7NMN%NJPSI)<2&}E1Umj-B zzW{m#WO#2%giIoJt2nK2ypUa!21zN&&3DEr_~7r-K^KNjQ{G0KBR|xWDX;<_IYMV_ zPSaP(Z*BhH>DStKPKdi!1ABpe%nAg%Zf1|Qfv08;N43!kc?arrN`10(8SF9)s z0SH5pO7}+L3YeQE@jC1;V9bVHq~J$>FF$V#`b=eh8Q*CtIbo7@2!|-S8abPy({V(4?GU)Q?YDI3;}saV{5uD`tHMFC?YNt zwL;WWZ^`i4$JUBQs_Q0d*r?A!`Np!6ZN&I^FV=*mfZ|4Jxs5Dbz z0ZS~pn($tsbj3dse5u#)qI|$5hl?ULW>=RVqu7~eTDKaL9N9?f)A+}SB$^JIm-_xo zQMur8wSqy$MF9ni(6cAPgFT#&@rp-{%c}KC(kPOG#l0B8VicWWPs4Onwx90kR5!Me z6pHd6qQ64X8LYqt-yT39WcuW8A?{Px&r@`Ur5Zt=~`tmTcPhuviD zmrL2`^?>NazLm8%%~j(+@vi!0tiO}fMpK_&13vCd4Uo0Sxl;1we5B>9kXcG~$k(&i z;*XekEOr`L;g8~zP3^-T2}um}8ox|vk`_R)2m!fZV@Zbg+T9k{2t_>(3|;eY1QbH~ zgR@K0(Kjt3d#Zu=$EcMElVkEzIKAvPmz<~DO6rA+>AJe*F1<&nd_&m3O*I*P6E;L z%+o5KD0o~e%)obz`DI=Q04_?z@a5$v(;TwI`OHg7@Go&Fva5!c;tPtJ5=0ai1qmvE zvV!Y3>27HN}w*5a~SDP&V1bzY2@MgoTFM+4HVDfKcmKs30^+MP(lTZw}d=N@Bs>hBVw zhlCmPMjD5pC6vKIDteTS_<&0%HhjbuwT06+mmVB<`oPVIeTuXmn1z6 zS>%(`jN=dn#OZ6;b8%!{zwFEQdSj2|PsVg>6YoURKSq{YIRLdW})wLcvXIt_GdSZxZjp2_cY_=z;mUd zTV(F1gI&rs^Mz-+=7$XJYtTLGv7z^F@k67YEBAE5g|h63kFp%|Ew7=3hu-sX%sNO{ zdUJ7FEbvW-Pt!zdz77a>;NRTo1cSHWAmvWpHJ``QkBtEjM5jI1j9-at9m5$sH9;40 zQ%O&><`wZ4+mb~c(VJpMb3Uk%^TR<{#8I91(;=g{=9Ky?~W~TG)B!n z*7y(&J`zlRHL2=m_5L;yT2P^qF(6YZsp^y@e7- zzvq7g&zof{*EFzOFi*oT3>|-UuhCleYDnXxTT=ok`73R0LzZ4Di%G5eNQ29zC) zy>V$Ejz(w9tp0ecg@4mM#5pnLN@J(_Jco~TWZHUw06jH=?=*}gKajl0Cbs=@R-AA>?1X{*4pfaBMXub{biw5WykmBrE6x!kAfS8oV<*#H6L zoR*iL4Z%K~8&%cLY0q1p$an|v$=VCFZhc?UaoVl8?Io4=h~RRuk%Qr@-{uxs>+=bq zx0uy8GD_m{#S4-`8RH&rZ|;^{ABa`KpX6=h5C!r-e(rIhLxKkb7Qe@(Q7piQmrd8A zBa<9>WaJ0!RAz{Gh}0?HnQ&}F9kvD{eGYP=Dwxd$ zr4RFPS(Krtwxhnt>Ijj6RFhzZ4xlIxy1fQQD=fgOkdF2AEzJZ-#%ao^tQRNR#WaOF z0jDWCF2f@O$wQy&d=6u2GJo>sKG2gcNk+r`Jpjl=?m$J#h_u^Ov<&mq+vFm;RV=LX7y?VUeKnD09g%p_ zn~Q=};Ot{ntW~PNuPNtY@r`3om+3O^wSjq1v?Pv z4p+cDRoHI)ciDu5DG?Da_Bo;`l3CHUk5%9UtehFPjQ2jfq+0r#6`~8Q{irZNBg`wL zZ-4d{_Zz)5U%}B7mof{=w=2BbgUfK0b3DfD134q6Oo?1}Mjd<3R-;!>GiRoRf2AB|XUY*L_<}%*f&D}6bkM~S> z06b=uWFMW%MTI{`Oy@-i=5cf9!+P@gt@4+<9}8vVzf;c_ozA}v$d}|UaO%!KVO78y zoG+VEa6-L6VY)#56lR&JP))H=!>UlruTUqWP_L!XV7kz7uh5vg$W*b&%&N%TugD^@ zPvZw$yOxm&AwXR7)4tC2i4LcbueuqebjL2wdZBc&vMV!uX)r&d;} zR?fOsf&8Erm07FUTB|%$dw#zb%~PkQRQG>_z@C+Yts5gIWuVgk9s;}H;K9>)N2$@v zy3zYVqfcg|Uu&cPOyi^dMz@Ok%dI!87@9&KG=*n2MYc9Y&ossEH{p1i6O@{htedZ7 zqC+>~JGs!5ndY4R=3JhZe5IB`>z3jNEu~ToZ$jZ^Gc8Z{Tkt%sHA<~@)~yZu_0t%5 z#)H(+9xWYn#}Y&ne~=4&*;{k(bv_y9we;ubTTV8_B27*rKx)FIG|*E@R)}?ijE>)$=SRo%`fZ z)F?{}dT~LSd5gy{X&$SAZAlU#ST7e6SOwqfaWIg}`yzj~<<3y!Gzk=d0l#&iH6YSn zOoYgwUD;R?11<>zq(POY!l^i66J;HDq8$lAz1&N8UE1FyH)4eZviSjjh=i%kpP04- z#TO6?Y=&R^4^R4cZtgb*%R&N(b`=}L`5WLlyo@|My)34qfj1Et9~VU!jF5(9y6RXW zsHy6bgy&Bf;i!4L!0rS&4g9lDVW10l%qFsiA=yOwcA~TOiw6&0_+X$@LgG)cFn8qD zWOcd%j98WwD8p`W5XpGyB4Is?#=uU;JU7A`4Hl>YyCH@$SYi4krC@Ttjl2R>=pm7@ zeO&qNINcYbpOn8M1?xRw7j_0hNgv7@%YPAqoe}C!E`d_8>VqC)S-D-2ZD8?3aOXiM zJw(R{F|LCiS0NrZCxOQCjBYaYv}@zH4_|w{?M1(>w_>P0zFz~WY$>1hUDT!xn8WGyYg;;`N@k3xs=oFbdGUHwd~q zQ`2~Xu%aa1B_n=6Yle83HRr+D^Y(Sc!?%;!ZwF_lrLrIzWV{{7Rif42&q8$CO>eSB`a>g`_p+`-${*%%5FK9!yZ6d?9aWBWcQj)s3? zj`7_*Y-D`)5PGIY5?+eN=A&KCLBueCsG@QB(E^fx5jFI78c&&p6U$PXsm;#g@ah*j zT9n~`&pbN~R#s=gcG1SqR5`wtefR$S(ffY=_pkG1nqm-&Ph^zeHT3Z{>_C4(U~?LU zZO@3`o|QD55qP<%JJJ9tY#H=mG~r)%IKOPuF>I>@Wwr$f%RpMP=Pq3_k23CZ$b3y6 ztXa|jc!HO~+^*M7pD+HJDC{}P#RUW)8JU)8M$K7vaZ&+~unB6JH%MxhgP z`ZJp#q}*|=@R>{9HO-AvA?H^MzrH6QW?5SxXbce{_@xtTiYDl2a|FYJhe5Rr#Dvrn z6yosuoX++S`;FrZ=E&Yu%hldTYp_SF{sptuZcF$>)%bTcSC1i(M9fG5hO`5xw?qM9 zKyzHbbyOGjk`&d6Gt*zGpU1APc6_a$mPF5Vq7kk3QY z{SX&A6)ub2fHE|gW9yWl@bweqjW^Qk9^|O53^&0imFq%ICw3=|^>#?X>v7-)^~U>y>eA5W!1qmgdILrXtjM7Zm`aD}(}pZK`wP&t-tskSHRi@ziePxM$9*97Jx=1&^6?WjcF*}udBLJ%%sC{^^|M9`hm%ZXPnOpMk z=j9Cb&n$+anx+Ob4`Y{18PV)y;I|j#!})KmxY33VTYv{aI{BmOJ-FuMuU*b_AHMM) zZK#mzm5%f}sDXiixg*tXu%g6V!|qwtV?P$8G0mK}o_m6XiB&d$Sv3Ia z+#F?18N}*=1~SNz&|nrb4^nc zx<8fc9(J%MaR2+Q64e}q-<9j%1$7s78?qlTihiqPyu#v7LH&2yvGaKOh0iZmr++Kc zH9mP8Rf&k?bvBrSN*J>5V{dXCIzckFRzW$;%;{8FhX~nzvLd)9C z5`gNau$OM@)?V?N%E5eryXyM&o?Goyw9@#L_b^vdCq(R@=gTg;u0Q9zQMumBdw*@c z4;d!C(J!3pO2In(oG+7ZT>E{#Ozw^A<}m7G<>rX;kG0KFH0|*%qQ=kpG99t1t(OL8 zzHE&fYajnUVRqT=`zwnZRo`FR-2d|Zjor`rGN)9x?J4Jys_kjl#xD&;ouPLTweDA= zW6?ie^VP`s-6BhmoB^8b8-c**s?l>?<1wgSR>W2xT1wj1liKKj1*WvAXLv^r=yp1O zF+;$*Q)N3Etf(6HqZOgx8+NUB=i}2!Z`ogR{>K1n z^wHktV=8DVnpTe(#J!aZxlpRc@q!qtxRr;9Dg9Zdi?rIx=NvB8I`d*I)^DqTvAwJL zi0LoxB#f$%Vgq9T&M`GP))xD3ZNSypX5t`w&OdCxU&nM60UIwdF3`*VS9kKyWBR)d z_!AvZIi~-x0q1@-lYh4Xf1>06VFNb*Y$pF^1O7awQNP=OzqTMLHXxq7l=+(t_-hOD z*D?Lg2BgG|f7yVPWBStuZ2xsk8MIOE*-gK;AO(y%DCqc=@n5{Ye}j&P2qg-=_;>d!66)}k_?);gSbEx6l|hin0Ohjfx= z=5L|^HY1DjaDm#W7*l#Fr3~pi5gbX<;fB&~g-nC@qofnUIr97+X}R`aaO!h=tVw+N zxdrJFJ3+G(%%QSYHVn5t*vv-z9H@uloou9t;%mYia6WzIz>&h_6aBFlop7o_!X%H4 zQ686m#Gtad&CF?Lr>!=|wlmtQ8aeP^$5fm16h=e1rU$vXwcdjy<7>ki7l&$xRJ!od zqlE?G>fJ({2J~gEC%RR^N(UD4&y`D)9#-(}TG&&c-$an(HP4@h0pUy(el-jhu`<22 z7=7j|>Y)+&$EXk%vCg!lF_fsqI`vLcS6WU+M?nFU=QmJwd=R3|+T~}j*&s;13*_I{ zSfI`}vXg=_M;tkV;HBGnJMSqJ59RgveeK{p1-s*heVfEUa244@h>>y5La$rg=~jWv zU`qKDS!HXB&NvR8o;@(%wna6y(#i+Jk1E(9ab46n@6WigN|t{BnSfnB8Vg-%|H7Hb zLd&Nn2IH9w2HYAa%hmH2Cf}e7NYfT4i&MjuWdtpfc4*7DMPf}39O=yQfPs{C za9~~Dl`=tg9gZAR5S+PXMFZaEzoVAm2()@Se*thG&~WU3Y#S(=k1z=t~7{ zAJ6Hodm24gq_kMOpHNFg8}5SQip=W8z3kL^>N1WAvWcpb?69~P?sx_Iv;7vnCvo?G z9#gtPzI;a}9TY&dO&iC8v}Ym+{W_+Lhj2c__-sZeB*Og2F%_6c#p_@>=)k?1Hm1j~ zl&}-0=31jHD-EWTL`N(L{yL_aM}1&==|#Rr2aW>I0r13goxIaOk7-%ahY&|<@(#b+ zhut`}n$4V@p&Js}VDbf@m*dE-IA}MChj}JL&qpO$WYot;oEbUpHq3cH zkLh!kvs_nzATb>cv2%sc#6xxF*U?NFL&0T6#~m5GPsb@JE^Z2XDerKDmTFgi>R zVNou|>x1tSN?8cWtG>q-g$MGpzCIOL7^o~O%LS$AkNU0vyWE3vJKC9N$}b1<9(*X^ zA6Hil3@3n^9Kwdt6vA;~B%eD1Lf1yHUZ`5%&@tZW7pJEQ zr&byY1nZHjyCWo9tI*3^_4=HK7NomeOlU%!BzJ{lR9YhXM7-%M#UIVk!GcFefx&eY zbRV-*A0oXX#&=>{Nxf2_f|BAhcGj=VOAGUKEYkDn)r%Mop~Q`)BtI?QFm>^u6|UB* z5ftS}AbVgMtS&(mzg$A{oOYb8(Gp{n?&RxbhFt!nzd-+NpyS7GuS#3))v9Z6o~>b= zMrme;x((HN#_hD(T$JKKJhKg)AAj~FMg>x77%D-nbUXL@IiJc+skF-8mWN?3}@@{CIG64A=32*M#Hrz=|G5RZH$AWZO>ASfgpvsi~=oW%ScbiAlfka${e3Nfh!mfQw6utf zrid)g@C2QRY|hAhRAiw=WU)_V`c6bhcx2^d zY>J!=hYlLYF8jnT@5HP&#jZ`pKJ$rD=m$fq!Cx)nwteDuC}@FF%+>b@`@}dZE*$VI z&bKKpZX%9R5ZFi1J~xRu*Tm?v6GE?pW3`NDmx!b(gHrxc!K!f-c@Q2E+ea0z<*j=(CZLA0o$V4rvn=p=ypGMH{-AROSCNZDI zo=!`W=S-iGoy04WR`Ezon}B6QazxK(M5*-(*{jx zZi2V5wSmm#Nd&Bqiq=*J5?2`Sf}oE=)3{q=xg%)XBj`0hWn|WlG=rp*K6cr;T#q--KAwj}dU7VZ z>Sh1|k3B!Jj#Ej;O-KR*B|Gw=E(DO?7EPSfX&GZ{4G)K^I}|EEa49_@_dP%Zll$se z4jYD=oh$xK7OhFDpf!j-00cr1rf~q~Rb7*H=KJ*-mOjQuA zKsi*_j(UM)DTQa0ItJ3mr0H$(rRD-YcfoSRV6q>s-p-H|YI!_|G`GZySsbHde^o%S z0i|{e7{^lfB?@=RiTWxSNgYy(u4FNio?2GeJQc62yG2XcSW}l%)77J0!6ivf9cLGY ziGqp&a>X*aryzCIrD(cRq}-gbbskQo55cgTcuKAms*ng-MA(*at1c##%T7o>B;|!t zY(RRP%qN5nrcgC9QKMXP!B}+0_qd~;>J3Tr;RtgwK84pMQTJw1;hwy^ohqsYP_9>| zeht?8TCakOL5B(`i=YM+NVc`)lBp_1$FmTAvMV?Ul9(mC3K*EKq}YIKJC){z@hi@; znZdXNi4+a194oG*m#cY5ZtAy^mog`4n`#)lkc?K%)h{v~jqvl@hqLIIGjBAtY2H=W)-d_XV7h++fsM326xfvH|(i^{m`OP7SE zOCU5YDHh9;!1p5oRJFUHW= zJj9U|Hi-dk-KEQUL;I#Yv86Ye2M3u$zzoM|vT_q`J&L1dnvxWoDK=nRTKs$RR6QcT zS-m`wj06^ORY5*HLD?0HrJORFKQ~Em275Vt-fxkxY}FtYy4cnqShxE!wr4 zNgGnnj3oMcn^SMt^xlnjkcjBDq%@N&AKl3s z-D?{?m>oSj7^UJPE=EHs?fsM>*l#u<<69!^kci+LV?H;=dS#6L;TUK37}RU{r@uN;RIc;KGM?e6Js? zJ|iRkBCkNHK2Wr3_21-`!zd1;e;@w-CwV2kI=9o;w$gP#?XThQKT_8JdHDNy%kxN% zz}@9`-SOhezlOj6y}S~A2FjcAD`ovp@(Rmt;J?Z%j)T9atpAq0a*MLe*ZSkXB(MCh zF7y3QYt`ke<=#)aM=J2W#gd;Z>t5gdT-8KLSwCsprlhP1G*WB$9`;*n)%~IsYc)d( zpMT0LQeWyO%&pf{jgWa?8fLvee`%Z#rQmx%q}=-2yi#28wPmg0^Ve2Vuhh4;t=G4{ zweKufeCs&a{`~D3)wBH`l>-tBoXgTDkMvVsA$23PrPq5{|Atn5KTDGv_|F9a6DlLn z>%XH6{?7MRUU#?c@ZHq-%aX?*%)*GV-e>;g9cT}2Qpq38!vCNQ-nu9RI^)Bq-|+jR z`r_{Ja-s7}S?0!hPe0z5(?Z%Vc<$nPY zlp67)xPYBwG`yybtA2k45&RC+KKKXUH_qYw-v<%=ZnJFD4%fz_{$C)1KW&yjK?E{^ zYdX5s9qXOU8UKmRqJ9)_9Q+w+554~v5Wz2-Wh`KB^5Xf}Um$`%Y?h;MV&+4C+APXK zn_W7+vKi~q&ISeC&rrScE$cBJ(FMF8b+l<4s{gQA6xL%O{bR|aCJ>`sD8w;90TH~6 ziSREJ{>^3yi*KXYECU0Ee#}AX2Zg_E7F*e*-SN<$Hp``q-)xrda6M1uJVB$hq7(NA z25o7Wk{jukFZgVV!0CC3*x}LCX8IglGHmf=C@B{*5(D3 zy3(IHoBy;~ZZJOmgt*j21#UhNP}$)S+I*<66@Z3cM?+k$^QFc7 z$*dL58|ObV(bGd5`A3~-mHT&1J^Skd6ro~9zN;7WX_5o9p9`oc92>t@*IBfyf79$? z%Xr++QOWODb-42Nm?4FQhvPI*Te~3FS)+hyAFPZ+JPnE$6!4HjAIOmj56K7d_#nH5 zOm{;qjD?_fnxBzdevBxCZgY2t<`D@==d~ik-Y_(?ysQ0S5&? zy?KlwKo6{>raI4$Cz0_6clVne0*t9(xXDAT#(2n-}I}+SG7gSHi^~93%VnoC|+pSl=shnW41z7LCs2286V;piKi%e3B zqkv3=uHXc{CO8SAU0R7S)H_foSFaF*qa2wcEl{a^Hw;gckE*G?!6!hstvjeIQ~rO4j3QrLlD8-b9Iw&~3h& zU`XP`AU(gTg1^2Y9rp4tW#mZlb%ljDrOOi~qLUG)$NU>)`lBX1{eeY&I zdL$p@2#pDmO`2?aE&uf4-e(JENA7WAXa=C;na2SLmIX_y)xP4&tFJ^^Sdr>tJ2L$_ zX#!jbHtLK~@!aI*yGcGD8a3X2)ugrQ#YSxN5JQ;(J^J8X75Ho5?vEwrA-^nb@9$M_ zTt9qQA{nuziokwW;po9eY!-9Ys9!}k3ofz zCE3Z+3v#nQ{QmZ*a!mqNB6%Jx`;4~oCr^d?pV0tad3Pr5(hE96=b&edC4fwm3V>{n zP=sW$y7;4WD+qXd)*96oyXZ+oDYGg#D!1o&JEf0)pvaJy8@$*(0E!G%lMM-%xJt9& zYUZ3vnJWZC|%<5}{>ad`SYGv|#0nBGEC{pdH*weVToAuHB_D%FJ+> z7s6Y#A=@4&@6icUUFyE!Pv3;OW~RS)SD)Ywq>M^3>mY_3jT_WJid4`K7-(g%3NLNY zQ?{V<*Q7)sLHEPV7SZ-vVSx=Vofl**Pdb~O@(C()auwFGRhYcsoE*eRQ3lgAVd3Ve z5|23!pMwIZED`jMNRdTu9u}w%76XivO{kt<0DBp$Hig*qYn3mf*QjVcJp++V!SAR{ zM3XcE6Kt?WpGkAbp@R+9+6E^2AgyqPoneE!i$-~;cu|TT zlm}((M8EclL9@otZ9xq%)Jw?7NzT~CNwpysV~R4kk`}v~7CSi)T|&bU^|2eMxUJum z!L+!&rnrO2xTBrll|is2j>Z=UNypJQ;~1xKuw5L&Hx3=h{8Jh9jps~{=dhILo{Hz+ zjYo1N2%Sw3woDN9O%P8{ko=_#p4d&0;YyS}n zolVlPOw#gA(n(L!YfdtlN;2F{GUiG)J)3N1nQZQxY>}R9)tqcIm2A74Y{!-2a5ly1 zmojMX>+o07mmHM|l`^pRZ@*wGJ=sZxUmd{xOEFk5CSd1im&d23_|FFJU%m~YN1UaC z<*$CrF?|>9QekWQ-Fg@J0vQVX)xiD7x8Z#^UIQ$J^z{t0-$3`vdcQs;^wWBG#2qkq zL>>Hsf-{+(C`J9sG5rVY{r~IVhRYGy_pvAPy5Gk^21v$2X!cLW(!dKcuh3%AP9GBK z1nHKN*dx$O$q)$=TnWO3SxN;TB7@G_rRXBy*NY{LS?RLpF&UZ(=?I9&*Kk7=H1EA* z_T?A4#54YCW$)1#n*Xh-y#JXO?Aop2KBUSe=RU1!kdd-@-zKY)O@hsYkZlj|>4alq z_5DH*T6kHNbh2=vL#F&rn&QXbti_LkU09yWS{KJ_4F<0ouqYY$*vAc!Acat+Ip@vYYz@7)!`$S*ct0$sQk1w14YySK*DN8$dN!b|>(2QAuIi>ksFDYcc*Kr$J?!R@Zd}L4X(+E7i3?Neiw&4!i!2Om zEu5N8t-46e!ro1GQVm6w)@&Ue+W6D}7 z6Q@Z)BiAQwfV%i-c1oFue+cr5y)X7YUR_(PDL4l=AL8SD%zq6M1~o*2I430x}g3;nfSlX+M zl(Z@?1Qq!{N!aW?-CGC<7THacewRnkR|mnjKm{Wa1?kqp27P4`_^6z?->M11yrr=; zwv-)I$H3T6Ca0J(5BikrO40TQ>b$v?OA-wE1s7p@Nl-v1d(SDTtDQRUG%`>$LpNCA z5s1da=P{olhTfAg|6c0k(&KqCvWgoK&a;LOg4QuHa3WQ;l}Wq`FLUW~m;&8U#J6&2 z9HjHChnozk3w(+;&LSL>{*-Y^zR_|p{t-(SJ=04{u)9z02tp+(dg@_L}& z!7WPZIEllVi!yKFIRtetUiCXVXf;;DW=2s0hQ{1(S~ka6n=xktisaNFg^I;mAQybR zP}fvLJYNX-7$MQ$i**AdUxpCTw8S*6vn7Dix;#^!cTO`B&E6xYy)RUuHD^?eRR=HT zuw=h;qE%>nJzSp*Ep>UoW7{4P^J7W=>B7o&EBV%`22l+jSz_Rs>G;?6w^OQ$&sCuz zp8^MR8fu@)f3kY!sW-h^^SuER;^uNLvySL@jJM(Kb(gz?Q*ZLUxjYCL-D0F{mSK&} z#Cu=wzIrheh%v7dDER8-Suy?gwnTj|^;0h;sdtNaQVU-_%yh5SpIvFV-#Q1L@Of)( zQ@5H@xDZoyzv96acl>>0(!*~Ln$ON{q@=d(U8-^!uc-e{^QNsP``e@RTUY4>EV>|C zZhrgwS0Q)rBY$X~3}=g8puhNoz+hVuVKn_(m2Th}yy07TJ>B9h3MU-0PKvzczX)OB z>{Ix<933_^AGm^vu*-r(u>j}!3z9P#E$_tMvT+LIJv$&d<(|xOZ1Fg)$X{Z-3RH(w zSEyKimqz!s@VSIP_M?L5`1LgUVL8uz){Ixcq$??Sr%Y2lOSI>p+?F(5@E{_z# z4!ab6^!Y{n;belAmbkZ$4(_ZHjxUGJqs^5w_*+EsQ=R^ysB73EcAmk>c=E0)W$h#Y zYp3x^;uuq#gv}ujzyGB%xb&>Aj36H%yWtw*9C4z`h_PN3t|J*pfazj*EUJJ&G7{1Ve?QBN6)h`7KRrBU zl%T@eWr$LU4k^ty@Ch7|UGSjSPUmvCtL7qC^GQ>goOK2c_X#K08n~gDZa#-|F;_!! zTlYSViOAxyMfpB5tEHj11dc3)Jaaz@sQC#YbsuacA`11;7zk%B(5hp<^D*R(n&4)k zJ;+^*5ID*sXLl@?FHd+!8z=1CJz|XAz#Ze2!|Z|Lum|(;d^)5E*98AN10a}8VIYjT zHFOi?FpcQ!zUtL8pkZq?X|k-^hp@-Gw;E!>j7F1oedy&4gr-mG0WwLLoO?PP62L5& zj&hkNU^%u=1c9rKiBqU`OR28d^AwsR7q`D_g6Zd*Rq};g)1;D#K48a>g>Zqdjc8c| zbz*18cvX`cw#UJ6F;qPkKbP^b>i#mV@21bYoCmk@3;sDB2&H9z*r(6AeXyq0(r&|j z{lma5;&Myt^;V7MA76=carBh^C?s1PCB|!1=)cMLS9$j=75V>g_vZ0X@BiQbnB6jq zUDmOMEZO(W43m8=Wofe&S{NlM%8VI|Y=a6VjU`kndy>jdb|s0*n!O@I!d#;|=XB2Z zd_Ldvxo*GP^}D{;-~MoWzm0i4U(e^`dA|d80T4f{&$S0WjGD0ijq?W0g~Rv&aUzfQ+E%|z6AP}o_HUktGw?yMVVl?$|M!HTInfI&Ca~k5g$b|@+OTFYepTeiK$=hYq)4E6{mf| z3vy7nH4!6srwH2bZf@c>7-oK)FZTQY#*6ybq{PUhtzlkXeMMRBeEPyMeZB@Y><6WU z1Fslz3&zv*bPWjpSPR*h2X3>Nv?XNoTd`ryWl}dnL7DC8VQ8NreoL*g8K^V zF)Rd0ruJn0$y%NCzBz>7zDuQ4u~~WAhz>;+{0@6{4b;S|w?T^1eF2xTjRa*HlUXG8SlieU% zp+iB`>CRMdH~HSH>c;qbZZvy@nA4JTxK#gkl!ZedV?L&<)sfBXS+~ z#GF?Yf~l1FO2@8CwOw7+Z6QI`d~Qxmf095pi?{ZIVypxZm+a$#cNOia9IsU~@P|5L z5E)FzMIn33ZF&gOK@LaNM@UG6ivG337r##?imVSQ7;dKM*X}n8!+wDim9ZUT0|J1o z4<{7BWMBK$eWuM4jOznm{`}nj8^JhVsz0WRET;2cxJAr%McVdFoRV4wJw61rYCK(k2*zKRC}lGaC|+rz>jn&D`C0 zR4Sb@yp)?x_v4jW+aHO$IC3xgA`hyZpxl--|waGbqyprhsuRTUwqCkw0IhwpRobob_+bu1caw=J6qO{~`-|Q9 zsoi6bmLWBR_TQ2Y+wJ)HM$tblbu673hb^Z5lnp})^G*Qb(E;S_~^zwJ@2VhrnCU35ZIu$CLXUu;??kK z64*-5g3?Gh!tHTRkkHNv#8@^Tto=*>54Na)GoZP!@#%V$V&AIVlmDn&^V1OAxhbA6FZ2lkmr?>C)s${X9+9^Z~Z1l zP3Tq3;YCC@PQUg)x8E?C8{uKTQ(rA1?v2!jv;0%>^L{{cwmZb%Y;VaN@{LKunh6>hN@qzW^NK4lSOH& z7R@+_l81PyXtQC4J)18N;Yy8Y?xegLNn(F3C&(ORP(Z_jhVZ;8)pcN1Of_-qob-#H ztvcQhnH@WF=R_u1$giPGozy~U@m3kQLF!aZxYQV6ax24<@GMqN;S&G z%KMOj)~u`V5QF5)Rm%}Zm)Cyh7J)Ym6ehCr$hbXhTMO$%i-Li2rSnQe1W>EalHdsY z+@q~z5`NH;By-_tl&!Dlwe#*~O6BylFgH)mPdslO?`%T{tqsFK*P{>wfMU07Eg(BM zN^g9AC(Gls#ka8bI=Gl?yOJmO-uydywcH7E))>-Z$fOHrII=AMBCqfo$PsZaTA3Lv z$v0{RlGAT}5g)#U@ZSN~mrslsi0_OkZ@4=NWsCCfljOXnd>gv88f?oj2}Ee#o75!> zzrOeKMiGW`gkP9*`|)8O_BS$?aWeY4N<(*3Qb~rMHStzLfXB=}0Mr&hVV57`nRf;Q zye4VLNg&HM3KB}H$>hLMXRBuUA#3F z04M0O2`!cYRgTDUvCwF|y46r&AAs!Y*<1EX0I(E^3>214mK!^b1_i&udZ+;mtJ&*> z#|t&<@FqNcZlUpy{Q*oh1i0Eg^AZsN_hA5h)d~Taq3u9J`j+6-y-bYqg`yZ){ZZ~G znE=gHVgw>QUQQkWmBUpl3>T337oFIy0$!nn-6#laz=MLg+8Y*bq}>-)5S*kMj;dH> zFlM+@4pRdtW;j7KR(L=SnvBOvM!G{Tov-c=1I=+1NX5>;^FDwu{z3@xL_%_g32YnM z9)7`@&0dHE!d49d*!)PqO7u<8g+hDu{xmVWYQS+CF+6R^58#DsGJI}Na&|ik!p=QY z=bJkmr0ts8>P+GvB9P?@DZpzUW+K5;J3;2*O!BXQXmU!EG9;))C(R33bo8$BOk z_NJK`Y$m*dSO_cGJtSQZs)(@gJ9VuTz}&h5==z30xsjgqrA_E*jGWj~DAgQmEUEhn8%2*v0otcF$)5mF9Jk{{CaZCS+vV@njR2aVxp_f2njqWOkkT+wLG=B z=V~SnaPCtj_{8XGD&K>ZB<`-$-zvqIikEenHJo!%TPzogWbzH+qPQsk^_sy<(hdk+ zbz!O##NHdGp&x$#`1X9uEG!W?Xbj8O*_JtX$WYsOtxQpx>bH31Y&HROD%ZUh5aLS$ zqzxO?G62@9mLWQpgt@>JR)W+GK&eK(A=pz$fGf4gn>xmlLY&gh;Rj$aNd7sin}F*h z`q8SWeT9HbG}S2AR$^Jdx*VHD0m-2e2!>-YiEY@P?cfm7cNd!-E@Bo#PA&GMFgW1= zUqJINO)r>`4+)N8Um(Gts%EFC09|`!Fd1P!1ek*{$Q4U4J|wf7@(z)CKbho}1J`UNC}0?K(IHQfNb)sdVen7n+9VqdsoghEen^a&bl$&Hfcj!Yp&uAw1J zIekzB7($Fgs?37O5j1ZfXCFXR{iZ6iM4U8VGD1qCa1BE^abYElX#iQJ5=$hSIdp7=!9kOj6X*Nr(9M1MBv(O(D%DdyV!ED@^4 zn@NWvRf(6EQ9)Y}ED-Hcr%aOu)r5qr)Kqf-aa$dANxX~R1O9-15(vm-Frwzyml zW=}ND4<>xpAUrv{W&fng(b|Nn%|Up` zpb3n)g=!Q=)w^H}b7DE(AkBaS zgOILghff;JlEOx$EjiqhzBFD{X++8afcF-WlXIaFm*J_rs5K%xL^WeC128~5DKd3A z>UB79;)Jwo)<_ALrc4@ao6|xeT=LvWf*Z_gkKaA78xdNlpgA_Pir^b8bk)$sRVd&b z-20BX2??sgQWzzzm`dw!La~T$faF{3%d*$)h-We}cDA_;?2v6H{pNJwSu2c@R5@iI zJHQtc%X1innNzm(f~2d$bxc*bya3OF*sWCoF|-p6I*7_3_Amg@tU8(03D0`4lf9CTV4ktFX$)$+JfAF z@ldA_Cz90{CbDC2b}!Z0Z1iIqCU-+gphuNJv0hK88>mU2?FBt^p%k85$p&o#NBRaU zJKwCOXY!HQo?is_2%}soZ!SaR)@AQv0aRk~g`3k5UR92eCQzm-{A-0_76r_U!3jnJ z0#*g=NpvjnFnS9~W)amy1ge2^_IQ)FDWDHB4vCZYLK7jK;!FTEhzs&>J1#cDHl5-}>K-d5RNsItI4)7|Es?Z*5=d zKanPK3lDp8Hm6$X4DoF!rd&r_Z1V=dKMCi$xcej|8)SR88FYiu*cL z2@!dO>cK{7+CTZa2bB1*RnbP*!o*U+CTon$(Z!D(62W!fd8DC*9+Z0Wfwv0e^+ZGS zNd!Nxv32Y8OXJLEhCG>wl)}=dQ$`>vdQ}&lO}eEsV7P7GNSJ%ay0#-nQOX;qLP758 zb9~%;HTgd6e#(VNTk0`DfE4aHQ`YjoA?0@&&eKi9RE=s%#qj5)5V$3Ep7)#smsTBqyV8meVU zv#ZnLyOm_@>dkpHs>aZbCA>05!_k7gD4$rFUq_?==Kg zq(8_Q|K`{In;)TAoj?bW;-5Pz+`02fh3+N+NNJ8WQ+j;;zm!w|;AyPQcY0h6o9tru z(jj&UL>cN$U(#Ka+L>mayzN&K^wHfiG2q&7%gTv05}MEm-W6y^HQ~}bg6Ja3qvjg> z-E<%6xlU)Y?<$&03;ovN&QG2O%Q(fWxHY!7)N8$IBxt(nwp*4C)i_>6&sm)=HA8e-8i}ctsgP{A<_6*7AF1Ag9FTRI z7U3IaI!t408N9Z9^U=94c&vmM3F*l{GRgy!2W&aU1n@h>#`fwK*k0PN+q`uw1=qKx z^}6g?(6QMEJj$G#8Ra`SLfVlzckwe)J@+2YmCJTiEjIqhC~uEjTzbUHC~tOD9KY<_ z{*$NS=`~k__?}T7_%>I?|DC6iaJHyIEJ~@^4U=%ZxJ2;AcbO(l&Q4X1yaBT*2B>`3*9W9MrpMbcC({mA}rs6a@yo+P^L(gxNierSUipE zhfeyPPcDAF$Dv5-4rK4n$m24jSGJ zdhYbp68SQg>Mj2DQOYrvaVp{ra&-`MET!ckNPN6m#6rQaIU!U7V(Vv>xczEsygW}ce zkc#=gWl;RjY3W}1w?H318QUAA&rZDuf$J`N8MKV14lGfKiZs3|vrQo+(;nBQmVCrA zLl3l$K|>*T8B_VA7E);0oLtLyH=sqgr|-Nvz*MK}-s32Z#my2pX6|+SF1VQVSJllF zwr|1M9}HY8nZ%uC|LVi+?_i4Iyn!GAC2mFsawaYoN#%Jt@L?c;4QRM z>KkLXox#n7&rKptjBIvZLxMV5EYOkJE|ntl4K*EIm1!Xb%-r6EiHQH)C~=c>(M%+L zO}b4$vx?k~oeiTPyO2BHA1%oq)Md;A=7C7bWQo;cfRjn| zxG*#u%%5axKwM7HnPZ^#1(R_!x7%S|7(T^VhEV@}g52>Mq`{Sk?h(ws zCLLjUv@l#+d`vfddw&twvOrptAOzv(WkdNl2MBK`L5)JWqtD@_**{cckj-icVFpnA zgSoNH5O;*mtaOqSbtE#YtM0g0wvzRVnL#t2YS&Z8qBz5NlW<_ANjFqNB}R`J6>pCw zi;h;C@{F5Pt!;_I>gKiJGjr^LYm)4)UGK2EHF&P)B+)06vUfPK} zn}hOcog%|n<7nir3qbLpW}yC)BC62}U35(TEktAm5m4`uDE;QJf#pMHyuUSFRDEuz zUNKwe$?64O0K2F=`G$-)cRu_x$Up`hsa z<87DWBQun}*_c{?FEXWRs7AA&4WaDwCHw=LBDNacUgrAJI3pdZ-K+b=fA+r8aB2aw zPz^yYNX!7cczZ{ynh9Uz$*4Hiq!~|pbQ&&`$T%_NrL&pEM$bB@|-9LP8@56domB3SX_HihuR0`D0N)%@zhgDuxlqjkM$0@#> zUd_MpUTb>_?#{cV%H_v%arZ+NdJQJ)r6s9X{gXxS3@R%yo;taLNigdnTFT(+6(VG%4udSBnOTqUh{W%ew=KHp)HWjUXr)0l zz9`n)dQ_OjpwR6oia*mfD*kPy35P7EM_G?aiF|D~w=GUeX&c+RT!9(vGS$tse!q>C zG) zqJenxP$vml%7EES8jGxU(QHez1luP~4OYA9@ufL(HdB`SR-dP`7!*3~Q??Z@vGd0~{ztcS?A zzGmC<;*|DH2F2^H`11RwiQwR#F^PGS*^ct|4}Kvm1_km#MH7obaqZiiNfv|RRr|*v z7K36g{z28m-!Le2tCrR1q5B8wakWvkycD9OvZ%zbXFig3>97a6{^io~gxh@TPz4n& zZJBOfl=Uw)x10KzEpMx)ufee$hZ^rC96l+^MtK zD6+LGFZIi(P66@_W;ev~)1jQO=?)HRd4k)K643iAys0M|p7s{K8Q>G-an ziFf6Z?b${5Gd_8n`*wp=;Q^V4AJRKF&ZJ@V%iFF?upoXvxKlA}cdjv3o@c#0YzsS~ ztv3xl3lDWBzQG=Pvp#BL{AElF?GHef?i88U`pKR0Y+IL34FO$V>c^)=!?^bWGfnJ0 zz(rb?J0sS24^x%OCr4QNnP3MWe~X+R5hB9zO4OqUQ*a!+^6)hiFt=o?Dm;2pBtcz! zYLh$VvLy`e;Ctor7IzDKM<&h3K9FB|>-$u%%`@L6da74v&?gP;jgCG`*Y)Z3owy!6 zuE2MVXZ~SgI<))N1cNH@NBFgpml1EeZ7S!|-lJ1DQ;*!9%1sZNp33{qoyxg#boy3) zE^9HepnQ6owHV3#j}{~UlkGANYJxV?XY}W#;~#IAnZ4O=^fkAjIOZ!nr><4?`=#R_ zL;Q9uav4=GC-i=9mbU*Jh~K{-613h~Ma$`xi=Qso_yL;#Xq*cZlDQOUDvwjMHF-)2(Ro5?H9p z4XS~L|E9U!ymUxUhHn`&Ckf5&X(i49)4mM-3F6mN!?mBdCG_}qifYbFfrA*3#3Y8Q z*ZikT2NNq#6|WeQ*ncsT=TI3{un$c&{tofmDY1luYdL2L?0Z=+x5T)(sksg5qbT{! z$L5`@K{9k%myUpohY8uDJKxufT$qnzLHzjrRhx{0ETKU?*%;Q|BUbqHE{gpqY`~c! zOi+WWp|hpG1XD~!KPM%fdo5PyOc7ahgYT%QkvYGD!pA0EJX3|2dFx|pUHREw{#z7R zuWq9-d{|-?4Ofn&Q*4&hVWDeg+s~)f*{G<))KcrPy+n`_JUrFIx=S(7A|Gjd>SE|D z3~%d5*(0?8)kDK&9&e$9UDAMR0e~DWX%u0{VP%PmIQjc}4JUUkWSl)m(UN`51d&5o z%O|?+>LLz}qMEF=UHK-`MmJ4bw-{r_sKvFIN0`d6p+PPth{><$2PbJasi=JD(TNi=Bz0vBI(KIeYAqZ)7+{!r$&>vb>38fxjfs)efdyg`7#X}H+(Sp2p{1$L*kzx)8%o~@t&dsA@SQSY z8c|iBB_`e6&&QQ!Nbf|rN77NibqD%(9j3ICiPGq-k3PsZpi2O9c$9u*hwNA!USWIB zHN2L=b@@(i*#n7O%mV4|=b(UaPz^+ur@SwQr$`mQ#!(8!AF*ThEE_wD)NAlk)_GU3 zUN7#-A7wi4;OX@b@G2Tfo0jRY6H$PDHNNJ}S`$do$=Po*=o9&MB0tY|YLe9#+&rTH zY@+{fk7yQW7bV}{pK!a5`|ZEx>|&1?nGgG8FU>9fVxorrQ ztf8gjQgIYfPVyozpMw{Kt4x2qzk0di-Daz{$7TEs%Td(E#AmpRy>4G zk=#~%xu8@^dI{62C2$hHBfmy61P9rdOM}|&f6sl;A>kZ+3~mfKtR2w>I=z++>*te_ zkC3N|!UJc|*De zjgZw~k|BJmBNAGvHTrQb6xZ@o1V5oM!V7puSfB^kfJnw^FC|Fg956db{o)(k(K4kc zQx#9#>g;*lM81?Q@dORhT?l>_lO)e!&@5{(lNlk_<_5C}9>oUfg2`uHBj3}9Q138 zyv60K5d~X^qvLlxBc8nU9Oa&H8G%x6cs7HavyZeWG-Xj@fOQ ztiGkFe7EVONT>6~vNDkt*p|RT(2j98#5W>^Ge{Nqgy+O9jV?TvC&0xYuvo!#u6e4x zcmE=AN#f?Z88)@DX~vbeQgozABDSJQ5*K$Lbk8G3#%f-3vIHcbjXL0!zg~djk2KG>$0%pHVr1>rz5!pJd(9MXA$f`A4|p=^rYOy>n;ZV^M2t+)dpYv5-)d-@IdyQs~)% z#@4F6+~Mz#K1wLfDr5Ju64sB|iVTceC5_v=>X+-e5wIIdQ(W&o#a#0jZ<-;-MBVUW z=^sn;?v~r^dUURSnXg%TPh6(@U~u03jxzpJMDXE7Pl?ia6(a^Y!eiR?l^!FUMff_7 zgBh5tz+pBC>qnhCGDX|On+tASmkeIh_2)7c~@y*oVAd>(FjSyj2bL{}!) z=HfKpy+_Y@k39JahxTcbHcfVoUG9u}o(Gy==Yy89DgdT1dz%?laj)3HnDJ|qAm6@B z4Ydqe{p2avtj`s|-7zB=+SKjLh!NUFc zr1gf~)9ZxHpI4L_MJ#aai7#IT+75#IU>v{Z^x4ZYQF>pNqjG2OKTcLC7sV-SWMTQQ4!{lA`rW@-J_yl`^4RJ?}@tS zcR-KPW86pSjscTbBxfHo?4)ji?_bq056r`r;=Xa%^j*3&bc>1-Y4q&f-gD7k)M~%H zg{jmt>AJFGQNgn1olGF`AH4x2YZ$c)1Z4xjnScHR{!#Ug;KTmiFp9r%p!vTHqg+*Q z82@<~)#5;Lh;jI381-I0o;8e;6G}q+{4$I>5b4WTJQMuok6{$6R6^<(PQCx@FiI(a zR2};JFbXcm#f2!@`U89P__@~iVbr_zWUcR2uf&rVI{$AEqyDe0djCZ1`aiZ`kPNkZ zd90kZujgeh)uDqOCTqo5DdExjFvQ6h>b`Co*`#fdq{4%(~wzwSvAQNoDT7smVN#EUYk-F0_G#rxAtM1AVmjG5y{bdK-8Ckm1@ z;~3_CNY?e|yGbzMmHh5R$+dT_}Ywc6)-@isurwhI4;jj#J{I z!d~F^y`j;YC)Q2;fetbf5;+KtqX8H`KDaiA3;Kj8^;F_G%(Z19w z^Ze+Q$8X*%^Lh{tH6tb6ILF7mQYsFr@7{@x9Y+;uTT?GV{72>C6D{gCWIpwPK^YqA z`ZFnC(^HoYse9{m-;zCjFZsIjNjbe?1=+LDHLRAZCN6)z#q9hl1C?IJsPVec(Hq_ViY4pYW1jks7DEM+##~kWcKN# zKJnyY&bR_6&m<2v0%Mz{y2oyA$ zx{LivrOSNQt{rw-b;WxMj=VHKxV|m1cH3Ub#IgdMzM|{-oZhm}`FH(3I5;X_NjTL1 za&xKTCP$IPn#`PHRT77+cYQ)_>3@b#HJ^5p)!zZpg?G{;Jhe?{}j z&6}58eYyXVdMAwOZ|(NcrVuxw|H_3^ju3Hen4(>>UI!yB)gYmN45JWnZ)FU1*j;H< z`Q=#giH*^RCe42gqdqQjC*h`dv9L$+<1!Z&PKZzFt-^uqo5QGMwjmFC%!qY4E+52% zt5zk-Si`6th}k)PSI!R@jr;d`?afeZeRc6crJq8;%CqNQzbtC!&QSi~$9b*FLMU9M z@U7S(f06!owrBF&MLycmuNjy%Gb#^@9rMi=cdT>+=B=KpA9@p*SC@=1 zGw%;bH6aa(N$z$!J8wE1=@9kc{YigYqmd%RtGr;&v;I*yw*zSChUfG!?fnhT5vX9F zC@lMeRiZiv(9?t2%R^$dqUBH1vHNXTz-dhU!TAIN4a?csV;XiO#1V`DCx#@_Q6vV$p($|j zfhdRI2_`NG%ngYnpt-<-ize7c9iJz}Gl~x&FRviKqS+$_!%)qlpP?tuHsR;B!5pQa zM41o}+p$jmkdz}Kh1%HoD`4m9kh!J<%(*iPsiG3FlO|ncUGLBVfe^7&U&U0a1uWt@ zKXu?GvEPYWH>~XdKRG5Bz==QBwfCUVGRiM6qMnQjK1E&VjbxY}+))kxLS!M*Mg3@} zUkDsTm{CvbimuQROa{p5OcXWMcTg4$?TczkM6CdJhEtqr^n73AK4Ob%RA4@>CK2_G zPD|{Ij&wdC{|dCC9d&~oUC)ggD}-@RMi=D!8p-J_7YgU5Mt&LMTunJ+)eYK#qsVE; zJR_r)M@cE#e8sQeEp%j#JX-60%$tdz4?~D(UB0d%#I&kT3Ja0mEV@?q9f-aXH^jmo z>BiUb{6dmNvTzBzK&Sq47vosV{i81ZmjlzU+!?LGN}et0zvDQcmE?8aFkm4SDi3oT zpFli~o~a0Q(+v+I{VM9=A5D-T_R|v=6IfmrX2wQ|y4{;yDX4C3FS!)A1Mb+qv@cR~ zC-5bwA8~A2&%UQLJ7`|q*x>oTXc0aRJ7M=(m37j5)bLa76B`f3WE28-lPS&ng=OYC zv4PAvIMoK4FcB8i?6WG^$`P$;{!M=U%&qc?o$0%l548Q(jr~Z6iv_$WRcOG2ki5cG zDSb@3jLH>+`aYudH*b*XRcAj)61gadf)h4n-W^)p&W+HwGAv>RnyV#^CHmW<%yf%L z_5xxAtWr2Gg?Oc`H`(p@Wz)x|E76LeLnwJ33i>N;D# zg9KA54$Jk~Ba}%;a~cgdlQ{IT=C$$K+SJde3kyEH(%RznwVm?O)t=(|Gxb1l{^HZiH+Z_XSXh&9zL$ixMqD+9VKeVSU_;A&Y`_TX4eX73_q56 z1CixxVj@2|H#DBibTa{>bmt1glUS?d1gRqt#-Pnra-fNJsL3$ag~7Cl@-pYD?Klzi zlO%hLKv?5O(Ft?Dlp92XIP@>TIM;_H`~fE89Q7-L^VlLPWk0?dApR~N*n-XN> z`skAGFe^T*53uSDK6FJyT!MZv=19d~vLJAsu zsGhiRb*!8j|83@bO0#EOC2<<2`~@Md*n&KdFWv6l^`-5*^=x2DZ+ZB;^=v`->|d;B z-v_36y-&Y%V};R*WIe_o15?aS0nDcL><=90+@A)f|H^vyYf6*(TT1iK)-y5Vd;dGu zvsXT7k*W6yq~CCypXoH~pE%CXiS*dtaGXD^XPY?A->hermI7X!d(Hom_3S&2v)MZL zH|yDxu~yvpd}OBcKe3+0<{3^{be`+q6!oyIXRN(f?KTt4%1RU#`VXyV7{o1eGt1{m zR~K{SZEl$^u-TK_MsKRd-Lm$!d~vmZF;}VLUqA9vd^&+_GM0WAsV!&BJemY4U=eYxX?xZ~Vp*;{<|OM#Eg9oJVaz2%i( z?h@nf?4PiFRr!~?^zjVeKd4Jxct4MpTu0pX*{+@toT@Fp0-yA8y5?NK;dO6e&P?hc zi#@_5j7WT|j@l2)+S9vwC364UZO7N=VZ6<5;KXJ#KghR%@k-+TBISEX(=Pb?Y3=(R z?J69ZiUWe8nb2nk?j0<%I*6_;zduj3*fE_rFyryy!TY%T5YV+fp{2|BKezjXr($0A z4U)|p)Rv6CdJOq6dRD*tZgCdeX1mCxEfj5^(+TJ$2blAC03_1VJ zXx5GU+DfR53Nga2ZRF$do(cuz9pXM~&+!K52Ca^w8B5VO3(&nyxPx!w0z}88nAU~F zh=7Dc$5|n%{X7X4xAg; z{-gFca9iWhe497V)Y+=@NKM*C>;b$;r3v-j=S)Gwb;66wtdLZE z_NefbrGfWzi}KQ3eu}TQ|LTkAsX$k}&Cz;Vf5A=LGIv|$y?Y+5supxi-|IB&DMBhp0z48TR@0zZUffbyhU$ zbo>`l51A$E(X&N|lGv^LXb;=GZ5OT%tc~mbD(b21in3kI68Tr69(>rtqs-xxB=_|h zH`JrPCflXrYwNQu%8Yf7dPhwJHGx z{Jo2HYl9FtW?iu{IS+s1FD}+wCi@J|FB#^)_)g>aBf9-NBz*l<3#O-x_#49zJyP$^^Mk+8gYS*xZhl?caA&S$EtfmeSf-G6ZQy}a{X|zdSCzP zVr`XCHA%Ze;exgZe4i1wi3sKZzv~0tpXyhv9a^3p+H|o>I5k(>R`?_z+H|qrlSf}* z<}E+obg@#bLsb{Bhkv+OO-uh@apAV5f0`UCE;!Ko{2VI3FjxFeC8kS)zwp?hhrq<@E_UEB+nkpoX6ANW5Zv+Z6Nva)LN%8e*MVa8 z>jMtu4bbR$p9nGeB5x*;ASl2H%X@O**<3%?gC?(}CWD(Rp7XW?UZMr)1( zCwFS{Hcoef^hC3|A&)7G?biDq1m3r~zZ~dKF#B&@tp7g7^`Ah(e~EGZ&5YPd0ckGG z>$rw(+LsgX8Kfo=nm3m*W=POu?p|M@x3CzN5|6D-joJMJxt5|YGB#wYK$45G}i_CC@ge)kpWsb2dF2jLE>mx8}kvLO%?k)Gw zHcJ|C6!YNons6Cn7+*~_Hunm*gfM|6G^c65o3E4SuMT6^BZCZ+AQtlVRK6`4Fk}*z z9*dG9=6hUfSwYDOgTb-^H%#9Ia|aYU#06B2;=8yXXLrp<8q|AcPwGC79a5FvN@dJ* zC^VZCB_;~tn+n0eXQtvv5yR4-knrEClK;fT`q!$YD|QnS{tsQOf2m5IG5=#1>z}I< zOQ)9SS1K=Ey_{|?d`O=m$wNqBu9)Ea$aR?KnmY+Y@31Sf1WFZ~tOEuvE*k#g#$LJ5 z#cxqf8v*(Q5xA%iZ~tgFMf7<|;X3Rwpp`Dm?73X}+QNCONERZX%U#k} zC<7E-7!h858Ur(gH!YcMFZY{^sA%Y@uDnRU%j4z6$z5<7RtvhcUWXJgAy==?!)73dFs>GA-IZ|{By7wFnm>%~|0Fv$~7=e~tQ}%^8#?bEz zcVdF%vn=kFIVz6b(f`cT9K@dGnf#e9b~q;sx0kNLyv_uPAr7M!eSmrpEy4HMDBW%pM){6KSPFD)90`db^~`Ls*f&%{V1J&$HaTBoR42P z(4BsGAGcvapR}cEvqXf3Px#_|-m!Z$U8%Ulw;#9EsaBSp@0#r~do@;+x|ci8#3suj zd9^3sw(Pc(nCwKu=&v!Ze-hpPUX=)5YxC+07c8&---vEkR1Z8pq24y?RuFC?!uKxn zFZ#fLQI&N3MIZPsAR@2Gn|in`is*Moe`Zr3_@}CbGbJIn>F5;S(aN5)jVLetI(~P` zD%L&`9^iQ0#p#fJ@3jupRuW5MhBh@!6aZYM(BI9r`z@ogCxG8%_nzuo?ELDUwUx=G38{!!(U6BayE4~0e}Fj@lv-9L9$7@~fv$B=Dka*H+vEof8N3rM4cdz&H87tD~(oljv%m z_1q_w52=jlm^a33*|C4JdF!qT?1izUiu}aUNTiS8X^%I83eeqo(^*MMwC#uXTt)h(1YjD^8C7e03|u*s!+mntSy)tGX%16L5e_;g!Ept-n>Q>274nEwe!!Lm33J)2Qg8xIx8z=#Ckt& zOI98sM{^oyV1b_>Y ztYGEv(@?aSWgxsLt2SDQ)3qQ~ZL~3TWo7+ZuxWpB;jhic}_bPLbU-`~BTRbFjunMXmStDqBKV;r;mrK@4Q>o3SFjHgDx_3=lq z`rNo&FT)x~=l!9tTR4iIp>=Z(93h7ZF!yQ9`8kF<%{%Zq*+f`a!~&;W^r4R#LDs4t9c=Yj{})p0A%1)f!{KX%^Gx*B^9IYY8sJ^sPN+4_ zE&ARSU4Bm_XT#QZX>7>V4?J0_m`{%5(pt2qaJtf^cb%^|P&R~>;Uc^;X1(P&q}mv9 zh|3SBk#3H;cnPKLA8U%%6y|r#)G_l)H9<;OF(Oz(JBqF0BwadI%(dDr++L%84s{W{ zQ|1l!OB=TdmsZteEnH=iQHRuTn+s=GX|un{X?*0;fTHu8YA@uDU~LnGMvlu7q*?`? zpjdO3{iRWtTX$WO+8d$?;@rsVI^mYd61((+?;hCcm(G$Q{APXq za2Ry?#FrjvyS$W-($hv0m%#)1`MFnpZ>8RwKAKB<45>V7_Ul&64K@R|?SBp2FyHS> zC1P0V5fK%yRJz2|um!23e5rQB_3^N;(J@+`gAH*!*j<{mgh3IsmHhc{%joy`%UpP# zu?O*uF@n-=-37kMLuR21Mm+TscD(FjTKPQ#Q3*Z{*#b5}JqmP*Exh_3v#-rwPVY&W z^RSRw$lc~}VLr4fH2@^+QsQl}r0HG@s|ximdY0L&L%{?NmekM|<8nrL}sz_a% zV^uWd$XBzzSG&TZt1rHFLY+Ez;e<6jx;xtK8CEus;X~+Od>OhnOVV1=*XA(&_VIN` zDBX9m(DN40_Q_qCWA&O}Z8Eq$+uy!7;4SCMqkKf4Zzf zd^c(K!ZxUaUWd>xU2yk6-QAsm3t8LU84bQWYJ%YXn3xcimZ;W+wqk3xlaU^?uRCA= zRhrJlte>;PTBYsjJZHRZs(Vj#vm9DryS9tH(cmm@luin6zHF)i*CgnD^##XqGRNp% zJlbN(4f~m;FofmG*yxsPejJ33)O+lUKz7jkE=vbgVsbrPt7nTz8WWe?u0I#55$8?7 z+^UO@KD^i|u2bqVfunQO=U%pW=LWr6|6w%kQjWMzkI&&2nogG-dy!r(!h~hHzEXZJ z-xhDoMQ#vg%4c`Pd;utm2kLV4bQUxrD}yerN!bdfPuEP-&m4B%Y35g7KB4it z=eqfdisl0NNT$}Df(vyc8W6iFa^vET*cFSj$`}IE9GSTFq6Xf>s z*^`^wV}iOh4sxLU`dp--aAEWI_b<;Z&FrxJp{6Yj z7CR&g4?mqw-tUGBj~;&Zr3)%6eCWU-bPN{zNEE$D@F(`=F7+nbtUM!ay(J8Aj~Xs% zoKlQkDmA0_HQs3x8m>B~=b6Vf2(>TzQExU9W&J{gvn{8_rW5lB-m!ga#Yi=s81nhD zV++L31HsrK;pW{-zb?Jx^`ntUO)k-WsO5&c)if7P=HZ~*PQwkQB^9(0(}MxBYsL(P zRoUCBvMZ7SY7T#59XYy!o(>sep3Ki2Tq08Iy~D{?JV!EjttA@^E}@MR1q+Ut!{A2) zS5=yR@|am-C3g>F&Kz6L!3deS4Hy4RT*7X!e(`CP91s27FZuPu3pLRgKAR)~an8$e zIV^RoR0ScalWSp(6v0ixivLMEAZEv z$8be(Q%u^Uqm>O^s(tp56I1ZmTOZgML%krP`O9#+H9nP}kb=BX2?TPoBpQxAr8qOHI{T zVUNWr_m-K-_*-f>pRBb|Hbbu?8&xA#SJ7*C;-l2!9e)T38h-K6u`T1j#btD6Sgj2gHjdKYK^q>F?HMJTx&iPFmpmzb7_W@^dWp z>q$^;#aEwp*as^A?oaONN^#vKn&F4eq&g%Hq8&pmunxsT78Lt$%07vt+#rOKU(wDP zaD>u`F&TTk=?cTL@6NdZQLWjfYa}PXD;%Y<8TN<^Qagcrg|&^}?`JH~-0Qjz(Nz?- zbEs6s2{q5&6R_L7Rf-so+UbZLv%TDlqOD(oS{*qpBjd$vCw1>Y#=d?>df^bcloNMF zb*W!m3wIk;q<_xopgB@%IblP@kRHxYuk_ zP$OoRkQ<7UA8d4>+3WZbIddhtwS{Q2)4rknr-F!$~$uiL&L$^A!Twv^|dnHyS zQ8no3C1ysv@%m4`?(Oz0d87Q=NX1>796~MEWmVjQy&uMZS$>{)H`F`tiLv!OYiT|3M1dEp%9_{ypAsA-a33v#S%KG|p&YzPH>eL`K>gYLa)I z^?ZtcUu-e<lcST7g1-42srM6=8DE`M%>qBunb!)>opvf-X6Cyg z3}V*Kz5LE?!M-_QI2N2~HL+wDo<0+O{-+cXQ?c%6Cs>3p1ysf#n!J!OZG1I1PJkXN zj^rzwVu?HzON!J)av)fG5I5@|HH%nIkSNAgb1Za_v))SJpE{B*DCb+UX9RQR6wPaE zdl+%JKE2Yt_X8vk#-NfNDZ62lqDjgz|FA-wgi7#RTkwhtLuo1~h(X-`v6^B(tp%DU zYR=-@iOq~l6V@FefgKKx^5GMgf6h5DE^ThT+k(>BsZH@ZA>UPWNn7e1PWI#cRJWpC z=*mEL%emVaL-3o<1OI8-F$IzTIqmp2MJ-LXnf@J2Qp#@DWVq#N?t|S@O51#^BL^?6 z2}#R2)WOhGQ$68(*dLpqoyMO$YI3KSzA(m!G$ypCDGARAqQ6LoP~?n0FsCej9N8M? zwEm^QnA1ZHo~d8kAqsR7H-S%0cN%NB*IoAttL-%Ye5f(aDljwawATB^OUKkME*XoI z-y_wfsFulFF4>AkM<#nuP!2TN%|jcq3Ped^`IJ-3+*j2TdGR~pzoP330vcWk7Y{y` zEcJOAd&fosH7|3hqdV)@m+TwA-#ZSeh5bmptMno?;gsjBeAn==#>x7@r=<>scxwKO zXKHetKcv#X-gxFJB=WH(9R^w#n8VNPg6gBzdx)7GRodKNsw2Jo&2zrAP)!6oT5x>2 zRyJ{W8t%{z*e)HEx3Iv+o35JP;;jhaEBqbvWGb=AxV^Jt03cwgJE*MrfYV@$rW)xw zuXHKy`2{?0R9I|hpyP%>x4?AWGxxf()zbNz>58K*T;lqW9!JqyYGgRsH7^oO_V9v~ zvZ_%-xR1>!6+I7_4$mEm^fJL{v z+A5W0<)IVU>>->z`{CF<=cK-(rP;|oFfbdKWA&r6qn{XqoZgjh;UIqOIQCIXHnHMB zAT~wusnBtULvA_0UovwhG3S2nq7|oTG~`*Y`6MGPBwp;1^jG4E54)6z_20ixZ0FLJ z`fNsf5?%F9YjksbZ>S6&s>Dnr&e%>SO<`>Y(xwbuSJEUM{fZ+(;Fa}p(0+^N)~8uE z9vh~W+$Fcj=BR*caLCZRFgVr*Xzb>kL5uZpEIbSFiI}vg(r@?imzRpMNv>ml>G zX!~{f)*PZ{H!lGL8F;$?Hvi|nK1sdhWw2x}HLutD8$-HJFE*`WCabzuoAKo7mN(f- zV5D}wo9P_k%^7WiE-pK_xAP9kop}HkN&e{nW*8kSHAJ+_3CfqyEPrnP9`(i7OIMhN zr#oX^Y01ueW;|0Yy7IbAO9sKY7x+{2R`4h~rRtyDbO1yFl0dk>cWNe9U0MHkO~j6I zs*eA`O|Q57itoHk_?LeKc4+%h`d|JLQX*JI?=cgm1)W0vP5IK9kTkG179aYnZR*XR zJ}xp<3o(kU`?oY-*e88U^GENisN;{rLblw#S{=Fc^~U|MzqsjD#|f6FCho9Bl&{?z zf2t8<7}yPCoo6dVb*>`FPS3Te4}Jfh=KC^Ow&S5+Z|&cZo&Qgpi1qw@CbXO`r@j}Z z?hk8S(e=DImcE2`IJ7oMbMQ7`2cFj)>r@egt@&`ot&i+!k`^Yl^aVKFl0!SZ-g)TTZ+aY))+na^pP1(R-{fnq5%BV7njmXaIYdD#BxmQUsIcLv*pt#Owf#J9B36i7 z@YX8RGC(QVrtz?QO@}BP36pyD%;WjIHcG(Shdoe?N{~2sOXojc{#k_ z8b2uZXIn+@oOZV%r|o~vkz!CMJI7AiFX4RZ`sEfbrAfT}&G8DFKckU=5h@Q1>~qib zPqA2o{xDL|nf1RwTnkOY63b|9_OLYjb2FN19)OBjM294JQ_jJ895P6}>@l4R)|Ls9 z(-waUe`i%rNGE_C6hReHd`(GhK3v=_G6#E;kgPY0Z{?RDGm;%)8yrW8CxNWA8BK`| zOUV-N_@A0m)s3V^&laeNFPdlJz5E3Hc8s`5>idJ!v=zleGewbX-B%tG3SVx?&u_t< zVfAZ3CQ{|=3O0AEPt&0baC1sYyYYpAgUR$134?dl@ubAFP#;YPe^TPPa>6j3w3IPke=){q!}4H zy~i$QpvnGMF5b9v2KSc2xVbi$y|OjcL@@xiN!bBGmQPSvA4m@;@YL8na^JuDiK`|G{L5HR`=Dd^u6vx1oXH-V5K;IDPdd2`8hgxc{Kz3);gCi~J5 zLyP@cPtuikNKNNE!_wn*V0}3G!BlxKxn0(_SCXOW3fA(Us0BM_>f`5L2%@bB?Tw07 z7naLTPaVos6r1=`$@ebmWP(p92P|W+S4DhfZj@Nf$JfNW?ow?Dz+G>MgTpVZUTtY3 zsf>gN?svd-vtzN6S2caN>~GG}I4a(1!z$~Gz^=4uE^c3Jyl}u`pS|s$;^2GxRWNIC zz2M9(_f@iS1(0X$nqSw|%~hiP^BxEl6{dB)Qa+Eta^f~E{AkDJL)H%dvA86@n6i*v z-6^U0`fy6_FS@Y7kCl_5YW!*LeHCNeHn+FSblM`UJyXebx#Eyz7E5B&pU$HcTlXP< zyjlHP;w#b#iVl|~2t6$;o;&*sOT1oB7*yV1A99@-QB7#YRhAzeQ59P9@x<|bO>euk z0=G*fy$hY33#$Pk&8ROUoj7}vpH`0q_*%)W98>^ggY1<02*-K|8J>3uy`Twm828yr z(a>4uc}wjxPs+x0B&W|1Pqh;AF^dK1!FKkjou!2kXA`9x@TS8B4tG`xylq@j)6GSD zO_C@sFE5)W9uj754)7wfhp^$)|SFxt`x!CB(Iy*zQO3d$O1es=XQ_^30jlQ?P;ZMK0!|M%0^=X%V z8OBgAFt-)AWzHe&iQD8GttZ^tk*_6@nB`43zulGjha2;+myQ24!kFwR$kNPTBg`>% z9oAPUcM9Ja&sRMj&Nm!l#`~;Gs^?7?t8AMve;`vJIj(0S^uxtWkhuss>viP*>P7W? z+Coo3p)nzqtq7~&f9=8@RFX0g&ZN;bTrlcZPiy7X-;}W};L#dGM|iL+@sHN;FSY-A zLo~HmZ(Ea_I%E~6V4>0KGF6-zZ&d&Gy)LVm9h~lwL?dKP<0G*5jQqkC!CajDx8wMyy5($fyCQcdM>`v>A4Vn?6b0+ zOAJ39H5WvXK;FulZZC`#;-PA1#o9y_tOb&@l%0+t<`m2LH-zFubg!e+HrO#TVBEz7Tkh{im}z zL00$6N}VI>&Vc@B7=z?pGouw%z6wFYzvA9WYkT?X&0G-Qzq7*3kNlJF%$v z?MY|fA1&f^Q8Q9n-ptK`FjuAm^Ar{AT3<)pA2Wj<+mUOv^;b$0k(mX( zLlze}Du}Rz(!ocKFEA*X+RybQl@CQkXuD1_t{pjb!X)X|rBmcRx3l4~I|6US{6fl_ zhyU65e;$3|f0JJ8CKR5KCz?5WUL}TZ6Z5uRm1*D6^etqugXqL(wMdGcUfgyWg3b3f zIK}f%a4;PrF_4=(c_~-KIu`pS4k-m~A#3B7k?imaN7x$+k(V9Bm7H+C=u zqgR=CX$eQq@8mmDIs`+PbHJ|+luo+&xri5F!<7RA3g_?i+?kJmY^86G97CV@N2iVvi0P)|h8xk%`Ws_yQ1G)p{y*=W7SWe(Fs zmAu;vHj_n@#H)7m2emU2c`rPC)xNZ`sqT`XK zo_D3_Ft@eOtzvJW9{xWvwk4uRqyeDw4-wW71IB6y&a;>zd<=3^k!5ZGIY!dd+R3fr zuzlQQ>oMhxxh1a2e8Gr%McX7fPs;nkbVqiFgxuyzQZ#b#c2D;DdL+mAFtgyCkdP;c z%2V59Y=mfc%QO7qc2`%p&<#g-XURhj#?)T?sb!zJT8k-{ynLwrsmUYp4`&Z^bHh(f zke$*_6}My7`)ZS{*xAsP&Z? zd`0?CsNU#pwuNYosIXMgtnKYpF|R`Py42|&`_nc>u3E>@6O0mZr>uJ28}!`kzdf}M zhNr8(xq2iOr4)T{o-UN6O8UwP95Xxo1P_f{ZJpv`JrPAy@9Z3|o6i^jw&+a4{F+(b zQ5eO4NpM7E&JFh4vx7VOx0(*&ovfH69awX%dJaS!`F2NfoG^2;MN~`w%>VfJg~V*H z)kyxexR80xslE7YohkdHFA;&+R+xY4lxmPENcLY_&1>Bv^TJ;xo8nrX940v#rL-ZY zfDqV9m)^MVLxH4*YlaLuE|kx}x92KuiwpPNKJ$uUv^gTQ%UG*9L1}MuWg52Fy-;@r zIdji+rao$xa{9W(vuR?4Q&2qvv{}ub30^RcGCu6`qTF{SY{jB9WplZN@TIXjG_08@ z46k-B(EI)&o-g=mC4c{zn@P|q&UK~D>1^dO&W6?(lm2xz)T08^!Oey}k#L_6D65=e z9jQEv_eu9ys-qU?vCVHJ6%8ASTE2DE?o*UG1*4?MZ+ot3*vFN5c5-}5;gxZB!xj_2 zHYYE{chMIe&rhdS#s2(NvbHT-c{z6VaUfb2+wl3r&DkAOvO>Z?hG{iO)HndBiiYXH z-+6EUh8Qtv9otAsNlQFR!QM>1LcKX1tgjK8=A+q~A#HairtGw)TdFoOLMBQoxa*4e zq2}9sliQw|YCH05JNq^J^wRl1^V0^hk0-9d_ngjs5yAL2y^Q>1pcjnmFOt3A;2xKz%IuH|Uq8 zaXGgs+416mqDby!ZZhN_QT@Lh#uUiC|7C}9{D18*KGM1Y>I`1}dE%bK&HszTc%$P1 z;RSGp`0~BMzgyD(9S;3J9maOwUVZ&b2fe!CQT6y>Mmg@Ha)>AQ(ZAr(6FIM-l`EB@F-Iark)yA38zZ}Ni_K)-EkM>1v{>x$fd#@`VS}gr<42s00WJV(C z(BBSY>)Zc-au{Ebw4a!|a>~_4c{vZeClkY-{!rJaR32Md@@8po+d)whQ-OT1ld2IyPAlw&fAaDv1(=rG4QuvXh)F#vX{%6 zj?9AR_+4nrbM@{t*T=HoTvEuZ+0y_SuY8&M%B&;lF$s;^`P4z>)U=q-jd~ z4K%!B|5;J9=k4XA%{{0 zRJoyZE+HM25nEq;H{g6w{VH6;+Q!|RzgFWU=C$La$M?)?Zoc7T#zeJ4AAefh%4=n& z;OOI(PetcNbb4m0k%&%z_2eHBo&G;Sk^hCm7`|D+Yn2n##Qwjg)35S6HVgj&iWK}7 z<(?3=!>-LfxLcAq&1GwU&_S{p(2hK$dyMJCQDQaYqf4lwy{iOxjm1sd}?j8L~nr5hx5<%4rXiBKJWfDQCJM0Doj> zIzs4>Zo1o@4t8J@t9@~Hs!&<5WD{8Z3>Z~efsefHCU%pTLuE~!Fj%-HH(#bDw&;Vm z1)8*k3j~SpJT;|IMBR%8kzG0Huy%S1$!%kX2R*&civ~Tan%8cd7lqX9H@_bMsUSO9Hn}AjfHihU&Ny75ND@-A@cznO1`0Zi z%LDW)(7o48OJuz0z>?ainVZeVTaH-=7BxN<=(?2s5@NCCyc^#Pwrf;@Id+dtm>IQ4 z4TxvFGYNUA63%+1lMv#xuC}6wA1;Mgs?U{$IFo#FCgx`{z!TrM z+D1t&Vp`Mm9ZsE-OK8gv$(0r|jzqX8ba*`IW*QHCcUH7!oEG2nQldtEFJ+_jj;D|Y zh!5Lhht$VJPp;Z^+z+YP{k?OfB{ka4t}SwlPm_3Z&t+)MZC(~_UU)x&v^d% zOum-V?#{*_jX-)N55L)b9`vzj#uQ9(LIdwv ztzu()G?mQQ{mb35rQ2FU$}>;qc|Uz_ES=N^zXo#+Ud{eyG%bXNm?O_kY)_-7;j=Ys zhBKvK9~n1N@53D9x!o$5wmLuE5>5w^*AL*FiX&t$ELV_@3vv#6wPK{l(xRdfQpht^ zEnAFD-Q7HKTcw?bSZ%N?yFTXXw>;H8VVhQHGuh_b(falq`c?8^;OrHuLVknubI+lg z4W}uDLAY`7hq-^*0R^QQg@hyL03ruS{E+`*Tt zY72h`HnvNyYF|y>cKh30tczl(~oN| zC9BO<*I@%6|8BHdUtq|QxfPV0W+i(A0p9VQg4#-7wBNjACtd#6!1wO+$!ht-^*zM? zKW}sFmUCJ4U#b!!HS}VCKJJhG7EOgVcgOy68`!0>Q*A|6o;bgytZ6Em`$w^L{WqKV z%2sAPvg0=%9*VVF%IS|`{r1O3cRC1vui8UdpGhk>et=}ZI{cZnao*Fx1P!W>C#rL9 zsq*i&Norvu)t5;sGnDulcqF7c;*$k@l?B^E6N{$Mf-T~XzJqXh*i~-aHad2lf!Iul z&$$YD#lq6KFd7Y8%7oa_;>c9Uh`_L?A%-KMKP7gD}1kRy3MRWRg1_>w||>AB~-% zVE5r6rYwlNkPE)S!tS+%uHukJTy(S(M##aqG9h1h*e}fVc@E|a2O|Iw+AO#L4?9J| zta2sxQsT!fpn3%GPaa5<6@T<9BwsEqK`+)5ceX86?5-Oo&=O*bL;k^?b!1|%6kuja zP<=d1;14%qV7^kJMgT&A54Pc8rU?)`J(wCH!ITmH#saENfvl6@I0n>}6B}6;HwTYx zw3Hq^pQaKCJw+q0a$!4su|52x%^b`KEqk7mFpo!N&ccLTm>nEFi_aD^#CB9;-PutA za*z!?>>vx!A%IJClLB3H8X|K@(qh(})Yv`oH_|fQNtoY^%%4;dPZ0W3h(n59L));! zzfc*l8i*+Yj#bE=C*|(-2j{Y|=6hp*UWFiW7y%7X<$_VnR82}OnVs=7Eze{LmW7D2 zV!?#7U>h2k#>U#NC&=JfS4{WywTgr|6+mB6vAr#j~)0_rca1;B( zEkaq#)Jh@w3Q?gJX9*pVCUR#9=Wkn6Zo7;|R{y^3^(xAaEy@dE*SQ$rBJ>jt`kiUG zP6ew{p{AUo+rNwKYNZ4;j63sk&btURc!c-o$gn+ma0V1~982)WM2SOuzXG~kh${nQ zd*LowJ;vcdblULom9+4boMOopUX7sSr!OUUpM4LZr6DNe;jK zA^XK$L>ON@9#Z-aEAimIY3|*_oq5F%qV0G929McO`*0nANZ=t(98BM_yQ~LNwPcJt zDew8$`=$&)6~{CO!0RlKs1Wj#Rax7wf$Y`1vk6)s-|EW&&Q@z5co9do!EQI>*`C-c+gTj7?SE(u+$D zRgovEo*O>{J5*gbp%l5lD!yAG&Ze5CRFzU#eX*`O^L=&pYBgiEnr~i{zrW^6bWK5F z&GovPqW3j7S8JF`wYO|)Z||?Y8(mvgSo@%^w)}nVIxw+Q3t4T(oIiuB+o_=vNdr{;6veyl-4z zZ3L9rU|Tly02>y=hA8XP>)5CdY>a@7Q*M&7ZIU_Av>~QRzNksDzDfB*ld7NzuiUI* z+pKkU5yh zC8pJ_sC8?7t0$_-=tC<-xox{`+s*@RyJOmXi`t@;Y7Ts8^AoiBE4K&Qwg(+(KN8a( zQq+FDzCH9q`$<7NRk`DgZAW;K!BUFJx%v$}R>uN>T_9iuTrBx&M=(87h6RW-0ho{p z$m1c$@R&r;PLa40rvOS+um&D1PXGWCr~=oOCy-5h2f&yTkmJ~2Otd)w$WzWhn>M{T zbFMDIsN#eEDy+NWfO$<(Hw@JM^!KYErS`Nc;EqA(k5AY+8VJlntn>711C1IrI$tPv zwt_p8o>|P{usZj8-+AH^VhrfEolA_~31zIj-ew&O?D__S{!Huu>eVsZp0-SbB`($j z0Q@*T=Z=agazr+hYz zR&W47v=hk%%FEFLpmUrF0=R$)00t9)V~nA701i;W7=J*Q1OfnHH_Jf45(DE!NA?h4 zfwu4M&j&^db976zFkkrCV_>UvlW1J!F4$+gZQ@j`Z-G9WJ)4R}vX*EE#XJQ8d2tWcak(hqm zLHD(jcmmMP>6v+e?iC*BKgIIOwj#pP|VABKYK9JEP&C|UK$GESzuij7*7L&*3bff>=G9w zMnJbKckTv&SbV1$XPj=(J9w}^f;!UcX}}HxF#Z@-J3xc>N@5?HU@%t;V4HpDY* zT7Z*qnFsk%k+aOnbe}YnxBlm z`6=%Ew*=L>mcIE3<c>!n84c{xI^%%`#ODKR*|MYz12>rt(k@VRjek>*Q zee}WbrGw+^xLLN>Y<1jHwO{YL|H%73tnT-5^k?ii4c#kKBYEHo! zC>0Fjew_a}p0pVZ5XN@d0b&f;wU-ES0&pRA^rqk>k24tWwY2*9SHg$SqEc&~%X@j@ z$r50N$`}!qsbUDjT?f0|h(?_GaauXjWgajifs5wEz7~CQu>+Y?!D9HOj_p5`Sbzix zGIDSlVr}F{c&mYj(6T^d=vjaS5$^7<60Q$Mu)CCSkBaBVW7gm>=G-I^pfY|lPIOAJ zK6WP1OIUzB(Dkz%y&C|-*C0jy!}18+_)}U4!2xzNO7Lmr83fo`ttPL-7MT-^U&ad! zj$fRmIVnc0&lye!ap8@b5`zG5^3)hHzJd|zOPfg5$(0WV1!L%v_B|W}Fvp%=o zFB=7q4t?2C=5xpE&OT9UsC@79lQW}lzU^TJv}J4We>=Cg(q*E;BXXzu`+R|5t?(+< zs6kYO_i85fR6vXN`hn5sk>RsrEgQxg%&c<^ePR;UMxXDo(zLG$*X-Z+eq`w=r^Go& z=iun_v7!6Bp7fPj-v8DeDKTXy+@z#E-w52SF1j&felW`^QPc2%%=oF8%g2N^kNK7- zW(V$g-}C$U=iJ=LbC3R)g$oh9iMAZwLt}!d|!cX>idN+9V$5qPdleDM^17938Sa&%E;Z-2XLJ{UDf&x8k10< zF&Z;5DxlL-GnC9~*{(Am{RGrqPR( zZmfN8;L~Qb%%c%+VzwI5j zZ{{;$|6LkY=IPnVq^!fQgKPKM+stQHw(t2dWAZD@U!2xnZJxH|vOlc((A#S-5|j>E zz37aEDL%QeW2>c`o`Q5yO{>S-t3Oh+rjBO73OH95W{$TSPJGHPDO8ujf0LZMxOx41 zVQ}VjD#9dYSJlR|&ja8&$V$n3-zfy&&TTd9aD7hlEhfFHrcr;Fs~e3+=1Lt8dRY{zg*bQ!3I5Uimw@Hv z`gU=IGx)1_I{C{4Hu0{b>Zkh|__BRKlbh0}KM;s#%no2%q&@V7ON=~p$C0rz9OeQi z8x56~ulwv+5X#DW`%Oc@xk|)RG?IR3<_AR<6a*g_P#eXUfJg30%CiIF*4YqLkBq|B z>680+9p`Ej{IS1IlOdCAN5#t1jg-g9-VP{kxQN2uWcNCSOczcl>z;jetXp}}QEI=u6PYiWLB!p` z&dy!2O4eZ0FlIN?5y^yliew~rqUg5`Oo7Yx?kOkbBfDmd&RXVBw+#76l1HM^g>xQZA6QjF0I9+;Z=@BOpDq#R?7x*Q+eEliL2{u7a3(zqBB2HD!%}x zfLqKF5+l{GDK@n$jZ(YcR_5Ou2Hq+~9(N8y1>Sko73^$y2D!C00cDx#8kB)(eCsk; zcI3G>&A?=U*g93_6GB6oP3*2#ZPA!xwQQ|(hKJQVRL^KUmRlF^^vEPXz(1ceG&V&9 z7zE-*xrS&hANiEKXK$2FzB%jTvokIx5mS4k1ONCME|dcbEi1cRb1~y>6mZVSukEfg z%ou$la0hJ^pQzFbf=48Q;Z)~)@LM(qv+4&U71KNLw~s$Iv}D7mTUm`(@&>7kVjLkC zsZS+++mXhnt(g|M%I^-0axG$ug5dJ*jD+YD{!&LL8`NMZ<}dpr!#F7^ix zTK)q2^OC9_A3yi$iV3TfK$9M3`rsK3MY3axkwyMuK5PI(IDA3}9{^TKfmVOL?;1r8+l@{ zaPRcR6vtbFilq4!&EpbLiLkMG`nmT(gW^1)8R|n;WLv6jV_tn^gE#n(IId*IkA>Kz z$oL5gX_?k@)kP4VtZcHe?rxZbTa$o9gHQmu^JRqEXu>rLUx^FOaR%`>uFm&n!93Hh zQ}&g=8{MxZDb8<_a`Fel8Fp9p)2{d?Ni%6?v&LDzaJT(;oYPSnrY7+Su`%gPboI66 z=n`pyc{@85=~nK&`t-hZDvS?lN)wZtQahia5bu^$_@x%m0vk)Gip?@(E@hmj0^*Bc z<3hOG65NEL>Zy!Gl-Vq2F$7||GHix{I zZz80MF|<@=GL@a~1xll=3#x?2{1d*OreSU+%HqYPaF$x!f_+NFhyuW9nVg#E&?GMg z!dVic$)Uc0$=1YxDR}L5wm63*{xpFWRfWXCB3NXD+f}WKNm0+(m;{gp4rS~AEFXWz z%Ai>@sFGr%rWAsVPphU<(-}%oEJ6*%M!N3@Vbc<{@n9QP8eS&T3`egakbe!p;Iuoe zCQy?v6ds9;XF^aoQ~)3J@i_Xle?lYRQ&WNlx*s1aaU+{OPJ~*MV@n?p6xWgo7*LQsdoPLV_GEWKeK+ z`Y9QdFleWXLZrhsHsJ0ADsYJ}Yfqw?1fZUW%VeHIA$672tTZ%DaL-Jq;fIq@ z@gy-c6(z-?RaAg7kYcin@CXuMxJuWQfs1oYpq#x{W1!93-F@{5K_Zg z$OFLRI=8DFnx3V5F{nu!b-j}WHm-9bLqVN&s9dD;cI#%(3q%b>+c63)CcNr!2=KBQ zQo7)<64fT=?|!866*3aO-9)>yx(rEphEEhn7?>pU;9ik;IvI3U4;ao%(B*;Qk>qx6 zs@^bQ%Q2~8fw2WN%}@z+8A6=xT%~1X5}5!?b51;ijAzg_Z#O5CzCE_jv@?z z#c+{S!24MutzyfEG8*Q=19O~sO_^sS>s3G@Ozx7z_%dABAI>>($qQ&oMbqd?c6Bf2 zuAsSeXHf^mu*c6JZ|WKEd(yA;R4)1=wnu~jrIlbUdEhiB#q^-3=F23lHaPr5d(`0T zd?pQZyfuji-=-qYmGPQfPDRf$e{kWEWk6VUk}f_`v#4~Bhp|4%$t2b}(00h_;L!fp zwAPCQeLHDaPqbzn2V9Q<1X(zoTT4#9ztwJFMr+%kft=G+U3@^hV5`Bag;Zhzuo+)c z5@KA7a;7GyYezP(Dh)iI(v1_{gUhL>A+JTXYNf`Qwow`;y60}1CvvXpJR{>=wB4H+ zF#WO)Z*Nj`Dac?ETnPaM?M*Yv6#KH*vnUfRwDngpn54@>pRpss3hZs0Z0XYM0bN$Y zVIJI)O}jKnkNQb-yd;r$*-8w0$DErb%TU`?PWe{36^`?zwKL>|} zRHV#E{AdB$FcV2x{qLFH8JS>t`JG3nM|636tETB}hD=mX<)8E}EizpHhQ3c=0!_s? zuFUc3FiJ}1aa`yCx=c}tW}<@|u1HVUhS1+l<}?QFZE}Ok94x-J-)Gk!`29!wb7K=4 z^USg(ijx`b`Io)f+YmB4-`&yL8F<6<^l-xEZoSepxFSN^whZt|NOeHpoj1NJ#(x-s zDsTosQve7z4b{Tar6~{Mlq&X}P!zhNub^Vdl?OjOki1)xmTES&SM`~DBp#x~rWI}+ zx~(D}jsx92H~Jtw)j@2#!Rr(;KIo=4B!d5OOL}Hs>{#Y?I6JIqU*^Fn;DdC*ND^r* z2f8=+IXL{nE9S^PTOQc904AR%hNEp>c&NQEd3zWMoU!1?ll5JgfuouG>)PJeTptOD zO%=Z^^Efjde`hC7<^$})-lUhskxW{}>EZ0&ClBb@m)hPX!`)On^M-s04Z@xVr#%6Y zJ3mXL%d)s+G~2vjPt|Q`^f~sXfp=Zyo^|{^n+saOld0!$&Ck&&eXhKGS#c6B1&!-p z%gTJ>(W$fA|B;b_W{vz=cqjbG7|dR}R=+JN>dwcKv`JFe)}n#5^J}SE^&?`Or~1?K zG9PY{C)I?x61alBTfSGH@})(6(5FWydM(4*ZLfK`qaJLH_*n>CemCjv9##1u*O%HB zD$ikLhqHwJTh0akDxqIJyZ5wzs@;Z6L&kf**yij)_|}4(hQ7}63m(Zln)$_#61gvT z3Lq=fa7o(0gB}e@oOUz?r51X4*JAJ(etQNDw4$&(LqhD%El=odMW_689#T7=^s)ZN z_Or);cXpgy<({OSK=`IjO^9d6X}21;#Hc!=SuELv-9kd%{WE~T;#;A$WqEey5MNjX zh34#+>Jk@pOy_7XI$h|YRmuas+j9C|m=Q8A@00X=EhEz6e8jtJlkn-@_l|GX%tyx$ zrhw$DwUsEQO{9cB3gY^`m08z1(*34RpvSTyZ`8Hl#`A~HM+a-1OV79WIeV=1`Oj>5 zMg3f)$IpWSQ_Q5W`sU~(pHZ^!6n>@Nx-`)+EiZP*B_eK*Oy-_8=w;s!PtBXTr|!~X z&z*?3+xkj6HJD4c{@9{Nf`@TvS0WP(!hx0GRuJxDXfPNB5R}sFAK#?;_`^-y95(Yh zXN-ZGS}0T=8d05;eA`_Em+DafU`fceEpn#@PoP-^oR~@qA=Od>xlNy}<;FrX>BsFs zpQFGl=`VWfpu+ITIp50Ao(5>r%vV_){IvYF4LboZHcVfh)=CzOS>Et@{c6jR)EEzh zJS*%pJM0~E;8Du3%=F9sPzb;dHBOE5!)9o&Cg$h`_0#y zky3qGAj3;^X#_liPY>ASY_N78iwP?ffaDQKTgsj5#!bN#+S>P1l^074BU2Usz=B3- zee0nzZw~QIXdWEH0nMSN@}q|@pL)Ilzf*MSI4R};t+447gKyRr9{esDvP< z?$ma8L?YM1ngZ%t>&H$EMU**&99)T@Ma_j@R=owN@fv@DBP`v7C1(|N6s$W`YWN@= z2Nuern-o6u$w?MNu*;TqWRX%~t$WOU46r<%^M4|0LfTGUrlS#kablN)nRN6p&A>O! zfchL6O1cz#C&Ld;E!9_IJXEs;*&gm5T&BfHFKq9sk)BOF;+G=LN;l`zkuM&Ni#RKijwy@MI@vo&N2g>W~OR8jA%Qhn(ndA zPu^EShZaQ%LfCudGr)C;6ulzWq`zWg{BA|JMcsrvVtj^I&r(%pdkT85R*K+v&LK6N zYEbLqZ>$b^=_w!A6Kx9Y{$47?a|qj>pPmWR}Vi+s$Mafx^to4Bsx3Dydhd&&aXUc+7KmO zxb!OWix^5->dVN-q)p=Y63*PZ?ssueyyvvXDZ}ICCx@=qAKJR|^)aV~lQ__HysPGm z(wiUfN8uAm!<=UiA8%E}e@U#DHoci4=5>ITb7=4c$5PKD;naa}V&eD5ZnP*C{nlvm zJ09zO$LgJ6^3nLZ^Met4UY!~^^|oEyGa^@en`6S`y(;gc`Bqwyu>9GS-cMAaoyqfJ)h*giWetYhX?4rW1HC;Eac=|J8KhMn{IN}$m+w3HLeCC|s7mtR|PHr+? zHdre2E4bDv;&k%v`4{BO$9V@*39t9k4EWr0hm4nuZ8jZjSm`J`d{{`$JKet7>V3}s z-LUag%He?*YQi=z4^gSN1D)e?sDwk_Rn_;@Yp&g^&W{Q$Hp&T!d>%zCNnN*o4DF^A z44mzsr-@c=*#Sxjd$vAtrLexocId#J>dl+#pFN8bt}lDdyYIH;yXxR0%-8#z8*kYe z9cg4dsjf*>=$O71JC^+N?X|(-`*UaC+f`qFaDM)yJY#6=N6`HrJ>8m@)>Q1TJzN>f zyp0d4%ugUHHiRAGpWhk&D;6@1lZ_kj}rd z685^iu0C)_XT$|7V?$BwLF!qqNwk{084i6auuw#An=A2PiaF6s1z?w7S!oXlv8l;; zu}4OFLkBOO(W}`q^qhh9=D1=7E31ZHh#5C#=)I5p_~29x=j*yY^Vn|_aYb>`F$QC2 zVi%wYDQvb=y^pG=1ipU_Xo0zjXbr~V+qN0b6sLb4$d1chelt~C^U&zywfm-n-Z#2U zf7I-07w$b*f8w)aObqAbh0o1=j&~={#6A7YdaUutcp-Y_z42motIYZfXhoFqx1niK z%wL-$uK^6;1}^`n$hT1a)jza@GPXDWCLR_Vy^$cZ|5w#TKeIdEB!O4fZo{Exb6%=w z0iFM%g2;8~+1);DcNn}^(8Ko+t{dAIE(sknizj%Fsb)zFiTIiO=!C446Sy%lmKRT>mKg*|k$UC)(?_sG5jly8 zsrVn7uPe@}V(O<$e`KHUD+tHtsa&yH(#FVvM;Qed=w_nGe~tQZPi8 zQ;HqeZ-u<|e`lP_H$ME8kAP(BB-oOKA!sg{Ye5Ka8WwM7+cz*5P98O}z(pw-p^6 zyCPk<(o5m$h!M}@2v5n5@sty2XtTN9eml%%F}rfa=P zAcWacYoy9*%uJ?yzT~5IqQ5ENTr13kw5fO4zV6`X!CL6hSC2TnN@d9PNvYMsN71Zt zG>4#JnHXY!Vw0b}K=!BFvBoU@g5AX&L^EEKg?W2!NxqbzxkE;sO3plYpXYIhS)%rv zM8D=MuaiFFiKY0Mw&$05hT@K-Ur6++Zm8uHd?1|b>sh^duE5A3KF0gP`$Q!(xNToG z`)f9Odr^bJ^_OP)^QSQtN6*uYfDahWHFTc}6zcpAa_oD47AfwQLY+)N+Mc4Zw~ng49gfBTQ2 zzW)mHJ@D*v`28QhBHznBcm7gc{DOQpwutwz{tT5qdXy~CHl}*P`lf6=`mCZ-!9qG5 z-xWX&yVf=#&XK%p{z4ZWb)@Oc2nH?-bf6=e%w(`=Bp%j`jA=67v_@%{5ADe+X-$9+ zv+k}pG0}Ev*h_E6QVe)8d@=RW*pD9~%#BdEBeM6gTc+WN0%q1Rkft2l<`sV5(W{Zm z;{sBY^Br4b&K~;Il>BW0uBuK($Chb$j0G@s(j30V(%!OStSKW-reCw=<>uMsX0azI z)sBjt(xt@w@hcPI#w$bXiahCNN4miKBrWSr7yxY;>rfsh!YfU+ltbLr8DkVIbM=>r8YE`lb^3|}IHlp4d?R34%MS}Ft?_&stB zfcQfiIZytq#pq$OpVGXgV3UpPP9b7>2Mw58PCoQW_JO@C&54kT7%gnRQ#c+ zm-*>zvH&jw>PF%*CIOwQU@!N04Hhg_4WHh4YHG%H(#n!JqeN3Oo(pp8s*>;F`|eBQG1wkJBz~;VcWhBw3S65(aNpALwq_NJkrga!>*VvCu)=K7H4H zXCoaA!WT~2b%P)UcI1!lXU*M-T|o#*+TXqTdpg>3!D!vHe@I6=xwG}UZ}-NP<0B~i z$jA2^SB^#zOw01FpI44OR;C79FT2Nei$C1y@Y7>csu7*};6B%MzCVbA=9# zH#|0{yWfudX*&Nn();KUhXcUWm}IC}05cAuQpt?x*PCG`V64EmQb*PP>)qNc>o?$RmIi6{}G&7z>fNh?jT8afuWZS4z zO`NmWo1HjMwAwtG^K42Tj~$XeC4%=2Ne?BgIQ_Csn%-@^IXZNO!>VAd=ieg&|g3$>dtFpQnvjav1N+Vj*>g{ z9y*~7d$Kb)X;m^$QrR@AGM6hlE?DQ5?nq! zFIi5IUt?K2_^iH&VE>>)9kfGqGj)WVJ-g9Im9Ey8$u1_bfQGoDCIG8rT%XleY7XWNu9tGJp z=|0e%+(5VzXwv=Q@BrV!klD@+lM4TT^EX?6x?QHEXc;ECjmdKO;7(mAPNy%RbH!L@}w6GIq2IdWp zlbwsuQf^{Q(OC(?p7hqJmADME+E!E`P)pl?lOpVb6`&ktAoExdsJ*y6zK<;GKnI;z z9v6bqnuXnPamT2U{n)Bo-s{aMo<6g3OKKdxtvNBGD-06BvR1>Mp400S9Z(xOF6999 zTmJS+-p{=`a1)4kzf9@5jdBiN)j{0i^50&Mo7vP=`s18f%^P73@XB&AfderB0#c(^9M8dc48vcj2R))5u>AO@<{uUO`741 zrP~v0@B^pT_MLtTiPm{|vTxgqm(_d|hCShjrxL{=Jn*y6bro%c{c<4t0z~?v3El) z*{%{ZQ%U!3bJJHRBk$0<9&qc6qqawXxA%HJ9rVBZC_{1Q(KUC{y~jV(SMT1M?)``Q z;@=>B_5YRnqAc(5hpNi*>JQbm4Ie(#&^tKlA8YGhAO3i)@eh0N#~a)U?i{^MENHH- zOQmM+=2N>A23}QHPi{YWNg5-5Z)x%_K(sosUawGX7W|;Y@?M_=zdiwdZ2fw5K5X)9#d4TyR%M^P z#|bxmYh%0Kt+A_V5_Y$Ff}ZYL`HJqVRoI2nwG$R~==+D3=KqEI;<`TGW}SA_LqNN@ z4+eeayQ8{H+OOAasp7ouvcIN%9Y?#cp^ndc zIF=*Pnz1%nB65QPXhHnF6JOS^EG#c1!|utC+#@Uyw^CO>*WiiIKHcCvT<6F*nk+2O zdExO31q6jPr{7lUZp1E-Oq5uJ%h?c&Vki()pt}`nC7VJdwelrUI>p-Oll3XSm|?)nn9WRb4`HDP zmn{^+22ujaSwi{1hz=Xq8p-d&mk!A3&5(^_Xz^KVq!igM+77;>WTD>iPUR*#MGEC3 zSqDt~SS}XO85~WULnH}^(V<(Sm_ns2bGaZ1-mQaivW64Ru&NofiwhaySOCZtbtb^D zoAU9%6+IOoLA8qkm{`IQX|rVgWekK%cFBZN(6J)FMM<;B3k3iqmSO?H(ZR47ZhJ+I zk@4Cx9#1a@$!Io%(pjU5r72L1r8lH`qLcUNV@@JZ1Sp#OeH6GH5)TSc6vMl76YddU zG!}$JkS!L6iE(OOB=Ejk+9jO`Fe?x%pvs>Ft{+7!^y!EpmXqyyltW-ujB`j-Gp~~b!Ct@5VrqU|TDSl%(}dwMfcD)mU4uy0(#h`2zJk)M zPOK#p9j;5RawEi>tz-|Ln;d!2!IYp~K|A2IZDJ^vhN%P64Fi0+ z96|iBJXJryWQ)R%uu`^{R%En(g+NT~%OK{>Xx>34a^_pSmkGx4`}{aGpJ1-07MHk_ z0YuL!f#qHhhhO^;q;0Sk`@~t0MbkKb`b&cF5gJGX2YR4wViq3vhJ1=r3fea@v3EVr zWAf(r#8hg{f|!$|)Us>d?zt)hG3Py3499u5sk4~j2!>g2}|Z}lZmIL{#Zy6 zCjpSr7BX!2j(CBP86eCw4xx(OyuFaWZRXi{wX*V|tQAngh(f+Bhekj@cd;p>9OUw6Sr#|1T;j^IMGfqUW~J`8dGRatqo00Ir!lu zO9F6jK_0Bx5nERLxSP7g$X6SX2Krv0BzTNi3@Gq=>1rUsS~HmTQkX2#$VXTZ!%3l3 zxPS+{0Hz~(V8Ak%(Bwo&(bL6}c-~h<$5KJdQ*~Ux3qaC#wCrCIMDbE{kA9;wXe^+79zO@`FB9|t@3 zH>Ei?joeht?b2KmfsZuabJ7YrB!C6wReUfoCKiZHw=2q>)fXMS1YT;=QU4_q?KUU~ z1m4&OM8LRdVQb>Q%x3?SKUo53;0Ngbf2GF3i2r>L1-eq>UFQSwZV>4@OuH7p4G9)r z4)Qmdno9qbj;^wiNcM<(p5i1 z;w8Q7%9Zw?@Vb2MrHS>qJ$MOjYh zb_W=~wEJJ`p@bdxAxf(WuROyS)^Ch$`&AsCS-jW87#aMKK$}gSeX-Ct|CJDy@g>oh z+GE`-@dR(Le`Xv>y!abt`|d;6uD#lK>eIJ@e_f3e+1eR=bg${%T`Ounoy1=%(P`76Q#gM%z`j&!psPdgp0??7 z!If8Yf}Ox?ZH12!zJu>KF}$1-`4P=hYd1-cOfh|O(t?Gnek`=5sU;y}Ed~Fy6P@sO zUY^?IXQp0d+J~GtBmg=fyUq4tbcBvV(aTcxJ&&Kb-TRtLKYLa4kZt$A;jej((YepX z`hnCwriSOT6imnmQp(8Gt`g}2C)-lp`KP4sN1KP}5zGB}+pYB1%V{T zi)~p~)yt{{M!E7|x@}lfoUEHD0^c_ODt`4WOLm9ukOs+JWK5iS`*2;~( z1L3FRdK;$}XkqKtJS=9`!_W0M;$(wB#L+DM0BkPi$g~5zr6-b#6*P23cp+OH5?+71 z9{H}u6!HU2am&lOyiL{A_M+piqhgy*Oa=#{)2Tfim|g?acFVy-HTz<3He4z_a_E5m zc697u95i>WLr481$aYz=lwwk$N6&sUu={mm*1KApEGP0Y#>qQ5CNtrD7*uR<9K1;P zNZ@AjP{dn7>6f3-csKIU*wL_a3g0^I;U!)RE#%U*S6>Jo)gvzN^W~K!EWO*GIDw{5 zmHQqcXa?Vlw=*p`ubr!;YVrZ?5V`#ad}nW~v5HJJ&}y;z>9FK0l(zS+sXBJJ(1u_t8G?o>xSvew$- z$$fMbyuPDV>DrY1Q9xs4utiBv6`%KCQlt=G~Kua^ z?sFAQNz6DMZEb3OKfO2gCA#Z66l|yi^D)Z*=)$jdoBG3kIAn$W|&1b8m2;ZNtP6sviao% zaiTX@f14n@zY&@&yk)jp%U5uXBLPLKGn4M9j7cx3ExC^nUmQ8L|Jq7@0t`n;4mfd< zS#?&2za<-HWO?zK^?}5$UF)wRF0b|-wz`KmH~Sn>Qu1b)`2kM|Np*@an^@>G>}^)R zGPZ9Np7afl<`3bn&sa!HF`I>C=!h);W{^F1k1v?Sj9`$VZa_2ga~d4NWFoN)z@DGd0zRSLn0Cvt; zg?K8~y%~1lou7yriARc?Gs0$S2^0_#co-MtPxao1gwQ@|x-gd{0Jgs7qvxs-%x^rH zXtmH@v=W)YD^1wWQRQX5j?g93(<>Q5!Gt(+Is~dqX@QoIp2_>Mq0+b%ysi@<#2{4a zgn-=`r)RIaLI8*(01;+IeC>?>t^`q{@+goX?d`E_I9Zi`+Ksf!l63~lT}Xg~5i`Is zqPqkWWJ87`Xi;kzp6@9j>q+;RaIh#9X1ow7L4~d6!m?hS84-^$8-Qc!pb)^#k_mDH zAXq~9$^<%r#tp83ssahk1O99}*rNkINCZIeQ^w55lj7D2$|o%e;8h+tFEw6*20D=( zua1DPk>PTLM0*K&HWp&w>T%HB^;BiTj}%alKhQ$+!UGVPp1{!xFH{K9VGx+)Y(rlW z=NXtr&k;^DrqDPH4;v?tNlA^7;3ZQ~HOcU6$&ptofng?Ew-1)Fjr%`wkpQ+kAi(qW zw%odYgywZ&If|EnT)hpx(a0ZfL*X2Ba2^2e&Y*2rUSmwaeA4S;1#q^&)4?E>O#>B$ zWK?$|g>h&W4kYY<+JK1;A%j+x`SE~{k21FohZu9H=(epFpjIcFmQ5TB`}JB zke8zn)HF{zNb5GpKsjsF2KL?Hbc&wm9MN5m8MTv@#yt|kfSWWe@*4t7I|OtyoopDC zaO`%yZ3h5#{RPiSgfZHOP<2lShGPzEWhs)HZBb0FH5L{+^J5rREX)MO;M}!o z)bJK2Fh|Ji4dC(a@Sg(qvI$Vl?@&(~P?*o-y#zI8qKn9Y6Nbl=4kkD9OE920A?H6? zZA{^-rqSFf{1OShMoZin&~r#Y%w7K@4NjX4Y@aP>E779RD=_~qMS*D(45%UgR$Qx$lxGh8LB9ZUhozIbJ zazq-Ow`6?TrTclV*9ye&F?y5^;8{@O2c9(mp6u?iw=V$FfWnd?;RN`}RF7OJnAZw? zhEDR~4dy0wtpq@nD+L^QuR8Xf@T=m}JDAK(-68S#;23E~Zu4JTf~2#c)pssjigdh~ z2vuUiVrif|qnGyDDE_-Eo~ARkW@yzeY|8{0^~aI?Z!=l0mN=A}D&WD0dhpQ{^S|gXYR< zuRSG^Cw;Pb81f zyj7GU1j}t-Ud;OKis4icHaXS6^^OluLtbI605eJ#lXor=x_w_|+Wz{K#xra(Dt$SV z_q89Ecp7fx#$$xuR&b7usgm1P%wvQ&C4J9^9KJ*2PW_JvnLHS2HD7_@ZO`)B4KJll zQ%P+_8l-32gf@@hTDfY zin5?)zkzfaw(YqHUgjM)9B}f`tu+9AFbc_wMUDY*9&BVsB@$pkYLE}d(&0OCZXJ6n zUWwO_u^>kRPXqKb#G%7WM34RC(+e#49u2o)8c^^W&cnK$dDqF39L@@PID7x$UL%kv z164=8#G!+17^q^}!yEc7+jm6rk`n4jkX{O$8+J4j2_D9x92seWSnoLiE}rUNiOC)A zg8{UJd;%!78#(b9l~01iZ%^h8sZ}nj<-<{mF_CjPgaD3rII#U(VtWFuUAM2jr5rdW z!;7MyiNI#eQ4zBet>bpoQD*a82){6~JdEtYWj{nXG=Z?Ipq_8&+`+b>2Ecf(buQ?% zG|O$r-ozqIq|t05gyfQg$<;4P4`iMlCPQJ=H0W7WIPu}(aQ@6$v}ii%cr-tjkh?|) zjeh|7=Rb~q1%IC;w{07*G{fOZW$Kwc*xg`ADbckbmt;eST9S78g~SJvp}b4n^X!Tv zBIJ_bT-?MHfOvQ96=nsxwm+34L4-U&34`!=#_0V3H}?se71{XQ3KFTd8%J98 zd%|;@kkU2AQ|z^lEj!>6B=3{7jk;#{^v#3v;l6_%T5eKuS=@a~2sYiX}^glyVq7J;9`IYF`7KKE^zq z0xwr$ro*DZ?V>0pCg}EF5H7H+<$12R1p4x3+xLx5VJ@!QzN6k-4lf-HdaE>?>*D@} z=w7+VG&XjQEyDS(DKzmvn=p?+o42 z4d8oSmERTd?D7b=CR}#7J~h7m{ceOAgtr4-NPtC(SxZcfVPnRfJu$m4Pu$uzX^=8G z?^n6d7wC}Nnq2MgnoE4KWzy^Ney__!m_O}Y1AS7mTI_RJ?@$-<9_hAM1lub^~XUNX3 zp5pe^*Lm_~Vz$X})4+6bDS^c4t5;`d;+#CS1GTjTFSErPVpKIAj&2G4s0f-9+B9cx zHs^D0@}?=~S9qxe5Cmy~u5;UUoZrL!|6c7VbyghxPCs=F+HtP8ug%?;sCK2GU>`)><3=JXlt!+mSmgv#&8jp5-+!_aIXEu|BBjCOvRG?M!@VRyhJ-N!T+)~e&7?NILvp~ zPk2d9P!~Q~IvV;AQ4cqp6>(`zY5BypdL!y6C7}>8KW6&B6TI|iTR_4!hnDEy$IuXEtQh^(P_rSoPi>@Vac)Kx*D8J=nAoDaq zZM(MyM463FE$^CG7RfeCZbvnjcAKsFcWGaa?5iT18+smY#A3I?(T8cWKb(CnZV5lGK5*X!^pke`y9Jw{om#oS z7Hl>;wHpgIf6{K;1)G2AG}=o3C+#-VU$a5G{rJw}c4{3T{d6k)J?+*wf`;*SIsQ(& z?Hq-8d+@P;+jZ1UA`yD@*R%Ma^vD1IpZ<%3gnyGCYU7lUK>u(`hD1eD{yZg)f1HwE zUaNkVVJglsBKld_rq~fu^^ySAn7grt7O*P={7mxim<`v|5O(CXw^qrX? zh|(WT6os8CjyEA@7bi06@Y<0ZzTEph_Jk`zA|D}-#yr0g)<%5E6Nn8*~{XK1YI zj|tY#IHR-l)C(Y@PY`!|@ynRU7vl^O+9~>WIzwgW?<1dEpbaN*ZBq4>&b_8PJyR!- zZr)wjA$KFEzo+03Y?@DZwkfjroPEb;naffCZ6@@W*Xr-gM)*HxHuOIJH<-|PD@!gD zy4~G$>F4s<(#J24CT~MojBTSodUc@sB1vWoD#65mcwzheW=zOa9J6ep~Xb05Wx zH}}pQHP^p3BG3s0tzwHk>ne5QlXQD-XW&m#m@<~v8?xOnoW zWW%t6R({?OEJssJo)`~u>rCHBG8a!)Aj`fc#)~OuT5;Q#*Ox~Ge#MiKzWf|xci%&E zZ!5<#cl^YYdG}hNv}Nj)4y^@yx149YZs5t_$bgfXLkmXAsr&P5PxL|Z{N?U-%J@w& zeEHLRswPdd74&BCo2N*Dx!Q{yLN|VOrr#E=-}uD0K)YMt@nfu7S)dia%hTeT zqAH`QUU`+pJ^uVR7H?S>a+Y-AsP`b1XHs<{G{fwuNa$lRELgk(#@TIV3>~!@kCOM@5Ua)1PF}=;BHIiN(k^IqB2R zy&AkM`}(&Zy@-jm@z%2(K*zjMCxUfUN%<+p9Vrqt z`Wdf0qY;|3_w~XeX{C*0F?-Y3$fy3#W^SIQHo#3d6X*K6mS`6ZjSL&~(y5)&TDm7U*(+{sb2MUEmA z*Q9=ke2L%M>BTlv{HEmoAba4u@B`197kf&h8{k_EG-Z?}1gHI9r9jz=Li-}xC+&fz z@9P#Zg(n6_m*{AHnc~O=bC4n%&WNS?9Nr}z59K+Rk@t)c{~cokX%%gV9USH1ZmHf<^v@|Nfrn-QsNp3H%7_J`5G~yK|A0=W9p7GhTEkva!Te2?A z26{$EXOo4hI!$zh64!VKunX~9Uw!JeN%xpg-1=Bm$1qJWu4uyJD+sOGNItJcRVx)Llb=B{zO@R1=E4jN$U1t*TJ96SmU9y-Ck3Q(zcX* z^^C`rrtLJ|rbOk`Lc=HFb8*hmW#tB^7q7+UK=QytD*CZ^(E+u^@PgIX2FE0OWxC8H zS4mSw-{U!1(e=W+tugT_FuT5ZsYM!MSEYFpH(XyAEVbe{RlS5HkX~EJPO_}@^tgEP z-y}GzPRDXAWWNoj*dcYC(s*I=%fwkzl3jn{@M5{avnq#EQT@5RmRGa{s);FfFH5Ap zRod*T-ktyCWx3I}Dq=>pONHI5>VF3b&eq|jhQ?=~f?{ZI-mf<3F0Wn-N!j^!OzL}M z*VUk%=eysQK=ltj`$Q4tk58NlRr&?$NCN4CyaA=Zihd?O1}3cTv7szZL@C-8+0>P^ zr$Zx2Z@cCZKa2iaGir~1wwkk5(ednecwxwi$|6baPD<>@fDdIuzNCkj2+k^1ow%KS zr9yQ|Z7KDet@9m9)n684%3pF3RI9VztU-L&ux^s1f;xmP3I8!S>oJ)@NS*?6ViufB;>Rg$Zg1j178j*ken-dOfA95)xR!2K! z&gnX~-$pSijZG>KqYGia7m%8nInd z@hJ8b#mC-o3iW;4?p>j)jL(J=ul&bl1ief{Pgj;KPoLg4@XZ9e_m4k4%L=I!%Dte+*+MhpzY89?lBMQ!Pok?8buS0!>*-;LDX|pd_;dbNe==;O^cS)3O0pif>8!*K<**CZ{0tMGq zy$;`neRw-$ykmzM&P(JjsgA#`Gg0Mzwae1ddiplj(EY@k53(~V+O8re-m-W4wP?RO z^8+Jy>+Pb|o45soP^&1Vxqz~Zyu-D}jr{pHeQ?na~;-G~T?l%u;*8b(NWmoO0N25E58Eg&Ew zvft->e)q3??m5pNPn~=3`*|s=DoKc2IRc9be*^xvNMJy6YT^eEp#WMy1>JoXdMY+{ zIs`jA11E&&F(CRejW8GcD{gi%KAxutK3!pis0g2$2&0Us$P+PEq?nkY7@@lafJh1; zBPFaLEe)5UdL|>K@El2x1gObA|Df;~sVJ7A0OmkOJ@s$8@x!a!YRS6x9` zgDq0?k*AiNfDV{gmoP_H(m+pLTAxNuUr5gYX=I?!VnXoLl>DWcnwj~Fcyk3SON$#z zBXKK2Wh)jHD}g|(mj>2P-&?cW+L}1oTI1|d@(yGu2TcPCR>jE~YLn_AV|i z-Y!nYZqE|jh}7Jf`#kL3-k`j^Jk`BnGCl-8K5n*el|6lJa(!PLzT@8495pE@qkJud=yva&j_r6N+=Q)AE9I^U_xH=_m>x#T3RC78ceN z<@Xi6D*lx1S0P(kQ4m}i5K-llTBTQ2RaIS8)LdOTUQ^^-#~)p%gQ?@mu76)ppZKxS zy|%HSsj;fANv)}=v8$=XV{?Y!v&Ve@mL50L2wW6VCIm5Qa!vO=sy)`3o>7y^l z#>N`Q(?=)zswRWFCad}<8%w9XYNkWmr?b1KF_Y87v$M0=b1y#4xy{UtT+a>W&KnoZ zTQAPfVHQlA7Gm=jjpvt!7nY{SmRkx|%okV2R#sO2tj?CK*;K5#*RKVwuYbd>ui(Bf z^x!fFasA(LU)FI;>px~TH#dK5ecRkxTG<&~+Zo;8#o>2XzU{Z|?63SdnAkn|F?E=E zc(}24)PHnzv~@hYf4u(lZ2#x4t%Hlji;JU+i;In`{+p|lKflke{+u2Bo&NLpsUgDv$X{Q~7MEPLW!!T64v>A_N`z znA&N@LM`e`43m0G)l#G5Y=!x#AXe-H$I( z6n`H`8sutuC$XvLX|y+Nk7fwFtdF%f?oMLlW0^HOn)YYQ^eZjKJDLv{>TE{y{@t8S zePs-kDjDx={rMdm3T4siYCGE;O6C2@H2GS4mG-SbzSifC%frP+m+upwJAda+aqr;A z#;Uvi{MwtXw4Chz{P*|i=4iflPxrsS*S`+GPxkcu7fKKp4oyVkhzo{r4q4huhJ){E zC5_waSW_E1{s^bFFZ&T;qsgToCN4=kt2yynUY+Cp>e)aHU(4P`EMkCbGfrgQaWh_G zr))Dp`f6`85eeenN>ZS4+Dca8EZ<7e5Zm8M)mlGO=4kTx^+=1Alc>OEa#fI+Tpou8 zlG%QA+R1b*Dc{L*ZQ0++_88#a&GDLd+RgRdDc{WtxZ2;%M}v6wFkv*#dj(O({PQ_tO*VVycEf&OERY#?f6igz5HdZY*(sS0gJ%bAokpqGGiNJzYic{1wwj0y6 zT=;=7@uSl(l*)W({nRgA&%V++RGkelzCAh{gh%q753#4ao)2@CR-KRVwH}?1A_n;c z3CS^-P!1yP19)GOjBpsqycF)VS?HnL#k9)f>Wdi-@#BkG9cBK@xks~ceUy_bCBp9P zO1_aKj1bMsMcZ_@t0l+M>Z@hfRwhwGdW;9Smw2?l`BTd4FJTbtPW6@>@woMrr0zzSmc)@Kl8~6Ghm`Bj{E+#UlX}2)l{bry4OFSX}siS$S zW#SVH5;CeJt#=?647v|Y=k-i*y6teH^QZ9lyuT-(Qxk5^$6m%^fl^a-??7@GH1-<{ zSFI3CYA){&Sl;$|{kXg|p!<5fwD#^sUK=3Kamq)2e3&pRvb168|J-XFfyMmALt`_( z-Tr>QMoei9}Sc zQI41^lAqd@05l?x1hGSaptdPLg(B3h2!e=$6%CR?;lOxR-##$?S(Ju9GwC_y^MUE& zpsUersdab=B#E4u$N)~t6`{y~kPvHpn~zUY8JJS^fI!NS=Tg7XFk7q%L?s7gg(7AY zf&=L(t^1jE2wcvRgUQPPzVggHikn5?y__(saF*+>OR4KFB{h!}d3jmReaIJAlSI(B zx!`6LN|i(klf0M)FA|DIzBc?tAg+MrqVvVe`>W}s-ESgsqoeR&4ZAXbYj`g&{KkEGX&$eMG#*JR6)t;0_e1b;0TRV4O{ zM3dfZoOEwA2ULXw5+Q|4yvEQ_?^6QDKFVCpI&QWX*8q|kbgm}AVTaR0=Wts;`N`%bL7t<&sVBeT5(jjetYE?+s*ZH6aF(+-luRO8P~}E^z~4+k)_ML2g7!g&Jm!v&nFF0 zoG0D5d&pmOv+TrnE*y7ccvE0I{ASIoRQRRvlV`>a+w&Xkzh91d+x%U-_Xf=@uh|QR zd3M8^-jd)m2EZ%Xj^_ibzlcaYWWYJ`p|H!drXxVTXimuaNe(xQ>MI5Ev+oOjZOuQ% zUzh(AUj-_8rt}1&+BvTuuHtb`ov&VEw?vb+@o)b8JX!Q)^10o~6#g;A!*!Z#=>au_ zbWDwA%V$&6hdg810Mj9eFci5>AN8fqin}jzl$myo9_a3|J&#~jy#YSE*e_?cv zwN_|CL5ORvsydl$>*mm_yEOzw`LMTM3rHLByQ^-hlb&^)R4adAO5Jmtqs^<%4T9b3 zId7M!kWRd%>(edxjg%@Pcb|ymj?Ol*YfoAGW@hjxEv@wy7pQ(tktx0Uk>c~d zgZAI$mF@yJjepyMmUZ*9nzQ-8!uKTLEerl; zaq_M_)$-l9U)>*3PyXN)W$;iFY58j^$F`e=$-Apg{J+!0p4(k*{LKpf?s~H4?)N1A z?(cu^09rJm1e(YQ4e~`pGSH-LXxJk9_^lvJf`A1R(1#0Dp7;OpI4BqFzq3G0jsn1G zLs%t3*o{IseM7i1LU`Ij_!dJ1u0s&Cp+XX&B1WNNzM&Esp;B$3(u<+bu0xTuVR8~- z3XqT;p|HtK;z^z0&Ry@Uhxa3122CPC)u?lpgtbJ3tx<%% zZ-iq;gmYVj>tclab%Y0Pq^CrrmrTw9 zi!n9VF?F=D4HB_UMzJlvv27W#9c{5qL(uLaXfv9$0S-;}AeKx5@+HNMw8f2F13ls3 zBmhYPnsn|uwtF#V0ZnR$fyFL{X>CQZ-3y#ihV~#q1!!nP9kkN}dbF5ua-DEWn|Lmf zcwv-y<(qh&k$BUVc)OT*cby2JOCpp^A~H?_y-R{*CXu!$!IqN9|0I!+K1PjU?^NouKdDH%G&#vM1>-cOcWEk_X=?3h8cS(ff6{d5KIln)FfjgL`0j&I<_DAZ z4`xdrEdG2z?SH5}B~6}!L;}FHBq`4A9~m-%9+V`JI?1&jA7&_FZ{VpS!+ukx!Pu8U zlEcsr9axBDMwoF%#Jh~B%#4`!jJTzYgg+TcbeSoVnQ6wEAKztWWM*cyXXY$r=KaaU z&}9`$W)&M}6{^A-5ztZ`i9HEewvG@^ncX0n-Ox@b3jjw>LGo}UrAb+}=*;O;(zlYS zW4{BnA@?_E-6NF~b|4vFJz!IGxigZvbH=&z?{XJ2bC=t5SC?|v|K#H6@;302d0WPL zJMZ%LGV>1F^NyDCPX6Sb(&e8^<{zrUuAK7A-;q`>W!0)?-_d2aL}z!D=a)-De>>%W zSA|U@pkK=U#v_9h_xv`&{y|GQy&kaPqyqYm0>aIeIctVSLz)c~7zDP)_NW`Q_%&$lyt4OM&NZOmV4)9}i%dE`nJs^^xcP*lFR_*?u{9~N_bYK+E&!9l{%{x9kYp>G zWOI0AtEger{V<#d#borKRP0lU*a9E;eKO7}2{S2+@GFbTDvRkTi^DIMB~%nXA1SG5 z&%PxoRqO!hCXu}1!Kh|sJy(NqRFvu*#83PVi0e(?dRS%@Q;@h^QFBvKM_<_>RoP@x zDRNW(N3~quxx6X5e8Ynx>)@kzMRC4bVPO_@R=MI;bNYSy=*ZHFQ0K}Ssp>hC>UqEF z#jNUro1*rStj>e{Za<3OXo@VgQlMIu22bH&Ruu(l(2uB!7dPqC71b9ewO4+%*IBhU z^hIq_;@D25h# z=X`6^7JHXU=E_RIU&1dqiU|Nj>nX+asiy6bavzf_=7YMC12-|=0&|xFOaGRT&N56) z(YQz3IHE14vn_12`p%@B*Mrbao$?}y_&KWXHyo_z-wNw2@LLH8=xh#pT5mDhR-#@O z358)jV3ALYWN)&2bzr4WJDZ>-PSA#Ae?l}4_UROuen_!_pt#1g??^TK9<~dP!i=CD zCp;bR)jH6ooim4@`ln#kQ=~I+vPNu2^h)73D6AT<1M5XWwkn~)`(+iTZA=v%0$JUy zq_BghpU<-kTTRO!Cnb|jH4za&o}($QDVx8Jwm*5=GThmf@vuXO_T_utvf!sZgh%>o#8|;)XXRsj641?*l#jBDPlr&&pG5hn;YD_VdVnn?E{7EM z^@0BoCZN#E6)cJ*?!=O3o{}V>z%{3X+&5omb%0)IXwxdR4NlCA1UFM=SgH@k=>V1!aRe`LBr`fNhByLJ57HXf!tN>nkq2lN-uXmr@C7Bn8cx#pQIbH z${u>vK`03zqY(QN-#D8+7(%^66&Z-fL~b=4(vc!V102z_e3@CIE0gk|Y5O zit~U8

    571NX6s_L)-q_F`?4lxN7v zgSb_d#OoN6hwb2|CmX^7y^^41I#B8TR0B$25*oIfU4X9wNW-CBM+H7PZ5q}6-=Irk zw~NCbeGJZXq!@rAB}o~8#A}N1$r@xq8ai7;~tjLn_K_VJjtkxHfCH}6Y7UT$V8 zR6Jf*(geJmf_AMgSNqRg(XCF`jawtgn%3rJ{J-()5UQd;e7K2g00fPr_(C~MpfUg3 z7<)9*KZuW>Jgu0Q9GxFoo9soyIx)mPND_Y7;2>s@Gv>1cfH<8C3O_D(paiDia_Da+ zi70_+nTfn?m0oNQr`$Fcp|k+?dG}6dMIw06>|M0RB2? z2WpvAXR#Im1q;j(KO5wStxRK}t=Q35Ik0L3lxn?>$Q?HhCsSTq5wFI)9^05Un}fRJ zzL^q#3m_9WUw<~b`gjTui=(hGUn9CV36P%ucsuZNW$oc&e@;MSUe|!e`s9W`)CWl% ziJ6P?ht*6Cu#o;Jmjv^B5c^S*M#PWz>%ihMgr6yK|Hh~BOgP*s%v%RIa76e5??JlG zI4G(E%maL*MeoIs=e}~=qtyZW;7CN#L?OeQL^y)7Dd<%-v=>1ZTDO#pAY;FSn0Sn- zVD`e*$37+ys7yg($zky)Bpq-vfH^o6wV(0q8*@dU_0()r5UDjK;AIk2n)|S(X6*aQ zUc3(RfHcHsYL*m7z&}M;iyOD;Zq%9>vRWf@KLl|l5uYH3mvXmPH5)C+*HO&1(|?ag z0%2d_WT865J|2)7?B{VnuKPs6JOkM*0xX6h6vvXJpdjU+dxtzgPD#)>rkOi3oH*G8 z&-e^8a-11?06iFENg!ak+uy0gc-Jj0X^kslXSj`FqDX)i}Zi;zyt?;gdsAkBTIrO&rFfdq99^u=;x$mYKlEUEYvP>m~njO4FFn3 zNy73BG^Dvfngr_9fjS|_ZP2h89LdBOFcBFqb2`JEl)$b9E(}_{A|E&^yK#a;YbgFU zOhU3{K#4lgMEJ4}V2+)cq~QrDdyTM_f<*9ipB2wMlg@Ob%MbnZ{z5Ma+BpRorGQRQ z{41}%d3g#+M8vfMhzWG=`mX;3B-T5=Csg4j*91@+@BE?q9jcIUUlzycytO}9Zrm89 z87JmK-e}enB;!9YUL=)tun`}(JGrHr#{HtkR#nJC#hbbP^_qe<_Ep5loVhS5b};nfXvRa85$&RKmTH z+$BdMODuAVa7dM_5iBO=`n9rO+C#{|*SVaPypv__9;}!y%sN^tM0~*&-z^phj|2Y{ zT2V}QN+|+-Fp+*;^RW+28k>QSu}IaYkpi9;q??9hf-+~lrtM(c5I1tMJvrXO^jj|y zrRk*`xv};c*3aRQe?6nUX3TwpiD@*_JMHvk$~qMdoGKtV(u1ppX=UqJ(W^x!vK)vk zF`XA>9~FJuRv&DwSD7)4+Ir|w6no50TC6p9eNm#ETC$?ow6`ETZGQQFNlv$dkA>f1 z^KXj+N~U5EPZKVf^ywmE<{takWr57V2?Tqs5m__=gnP{RD+M9|%RBcEOQeD5gFWS= z*RCl0(R>K0qsr8Lbf%XjtP#=FDS}*6&r+g31x#r|Ndz{J(f7_oRcuGAx0PQd?@$XW zb)R(H)5&zxnIfE$+rgt^bg45xx5V00!VmzF8SCPD3gMz;L%;o!x2kkA756SLV`jF# zzIE?7;8q!ZmK`IKRBBQ7j81ZzX>H(Hd)~v>HFO4N^>*Y3@=?vAEzG zpxb1ccb}4f7r;)$Ijcy9DQkNl$yAcl>&Zr`j?iFtqrIm{ns%bFzwG905`6NpvW9r;_#stZOvA`@(Ket61;zSYxz-b~=;FzB)3 zzo8@rv)2h#=w-k+USCx`=t9xnp$Cxty&>#o8}~g`RwLnZutC`gg>Ay&c7eBFQmdfs z$9@mvw_0T3Al)besR$8JFOdIwlu#SRO2yrqUO#5dQiqM@!_u^v&k-`(8hz!VZ{*#0 zpsm5A&n%cS9d~kyyVrnBC`w3e0I0DcWRf%=K#?P|FXyYet5L+Nb(AE=I8Z=NF*X0z$MrtZ8mi1nPYK%+a+?Opx5E$% z(Ly`XYW%x#mijTEakMN<0NejS`UjP23M(H`F!Ue$Chb>W#b_%pyN0LLq#KsU#5rCi^jq{;=<7vMjTlyHiKx(GA7bnH1{V$W; zFREQ~85Fe^LUm{>|8j}O2G;dsi&Zi%ta5ZeaGoIZHD4m?`z8d zsIxO@RBtu)96#QEXX6xxJev)lprCmBJFhhlJ#$VqBz}q5l|n*!32q=FbBZ3PPHIW*?Se0&X8(1h1`2F^_49K z73Go>alfjlpVv$ZVuboa5#g)ITliy$8ObV)5?0GV;DU+RQBJjj{kBIsTcDChWJCZt zr>sqa_9J2hsRZFliD10S-ZEu#U4pzgJ&b>qcHF5K=*I)T8@)c{~e_G`OWntT&xfS zX>5AP_-yNA|1oYp2ul3TeMPS11P31Q9EQ)l+PzSzIy{UE{c960?zOY{;(O!z?_Ztbcu52`b(~UhFW3nY z_Gq3OrpvTJ88`cvHV5PK_yWC*S)P@S_Utz8SNmH}Xd~!V@Xs{t`R$9?foqwaK=Y|1 zDTj;?0&Plw$7UY6ZGRm~@azA)XS$pNW%0qU-_oB6Ki_JkPN&CK>_oR+e zfY?)YDtsp~Wb9d8d6&jP^zCLha7%`QD0gb~lQ})eF9VE35zxScsf=XiO(6@RGKst- z%lZ#jRvxYDH>LbVGOgA)O2i(yz&11DfSqKkTznnEVM1I)26asTxHJ z;Cl8}($s7!Wrb`lUbFy;R~Sl11LEa_aX!{97T%^NTzXDDL}{+6S!c>r=}Y zKZ+IB$P+7;m*kVy7Yoz4_KN64EKoHqLMxZD zdmr3uS74KTte~J%{7HflD{_RC+KOGlV4XMXL9Q{2l3!l-m+FUoF=PcXWI{q^^M&hz zS@i*Ja>PqRNbear>!#$Fx6yT{(Z+F1-cm|r8cKFy1NIsz#{F{j*e{M#ed$6O4jPp_ zX#+a2!OpW9Q)datBZB+B{SV?4C?$d!RR|r*`sq3pu16F!U8Cs(I29Ue6{(cpQ!84- zRBUYrY!Pzijy=G?iO|lvJd{cSYREf_+5jol(pZRRbR8)4Q2A6w%Hb3PRae?}8cJAA zsKCU$mmap(?FSbKtJo^j1SmUDf@srZ=(>~x7)i#L23HT2(~eZh{1hGdVx(QaObz!O zZ9YTXDCC=|l^h8G|J0ays-_17v(thyoBC;+RL6RUS7uci4Ms{}>hRoqu=~TD{}^`iq${Vjpp`` z_PA;c251Z^^kZGs`{G9Za@6}pNWR{V3>|4q-fB#_HVx;DRPc=@6b+A6jRuJDPQWx5 z0yG!n#^Lmut$doJ>`im48ro@^>qna3Z#D5Y1>?OTx2Uh_?E%}x9o((gUU}`6j(IKtY zfqFL)X;PMwhP(|^Kk(A|Q>>kfWw_6;OC_UAt*QIKT$jd8_hFzeZM-gBt}cDGF2iSC z#&KPyHC^UoU6y~ka5B9|OnR&zrlY19$d0qgMaMy3$HzppQMTm)S$aIxdc2?Y_{R14 z*YpIA^`89GLy+kUGU*HP>kG^1i)iZOMa}ia-1Nl*^(ErpdM~V1MyU z&1hyNu68wUN(&b1HWeAL2v*QEP&7ACax+j4G*F2*P|Y<^t2R*oY@ji2pt)wCb!?#h z&p?Olg)Y+zJ^mN^GA|4?U%W7XVd(ZvMFwmn6ZslP>}xwC3!HoWVD2pm(6bF3hy$a@ z46T?9t@#aYWDIRJ4eiVg?cEF=0u3GG4V`igovRI9J{!7@8@jC-x*r?9`e*1t_VP8; zOHcj6s;)enIV!^K&&sS#eKkw}w)*AU$b_Q#V!ZiM zuK9Ac`O0VWflKhWfBOC~1AM04xTT}%DwD+qzs07E#g^v!)Hm}!^EFOQ(|&W)Y)Xsm zYKz0q7DwY2$7}1kmtc2Ci}kN#X8+8$i!DxMEH5-IFU>8lzMB3dwro0HI}41iDK`G< zW_deq`ESkg?)ZC7;Pcrgbak z6DyWGD>%9JBW7z>fejXNPUuG~jvy0?1WTGc)Wf_b+0WlOYBq{RNxb01vL2zLNZ^w@ zYXrHCAhV5-fQ|4o8@$Lf8zCbbF?SpBARCDU8_7HysT!N77C>oAq8CUoeT(&Tv1Lv% zvpsL~eKHH4I~#>N>TZSnU?99kZ)Hy>{ahs{M7Vk5NWIZC?x$J#p z>}|E|?JVr=-R&KM>>cfQyp6yU5ke)*-*?+gY0H=f_n6a`G|$@o#GdyG?1co% zq9Z-K*HAP>UI6gBJO{rT2mfw|fC-1db%&r62ai3F%K%UWXY(dv`;~k&okS$k7FlArQN*b|A?j3k=M1}wRGh z>~tdLgN}5#fTnvVRUa2{CvIdD$0qA;4Te8qS75!x(1t6pvo8QZ8H|7f?*Y&dOvDXM zI1GX&fB>jaPUV+i5dd2B)a*SjjG&IFOY8X99>ik|&Pi~3phM8=e)N_L2!Ig7-XC^M z5U)hMy0UvkLbE$6h8t6QjlTrTqMh?aNnRld0EY~T!LRf0Ki3{o(GT> z?7{xbHOKCp18h;V0?-p;M@Kat4*&o@Bk*y_eg<>!y$lC@mIc^{_|1aS zn?;LX3kk<>F+f7f*GnJ6mLtwUhG!lEPCfQ34+r1ec?0dD9RJdU5CG5uD%S582?3bb z`QgEUy|7djA^?i;JsK>8BhsTJ1;M@(82&4AH7fM0_ zm^T(b_dSP*Wlo)uFHthud(ZOMpa1l^4F}`g9o9Yp08<3w@IB6l5&s2U+T*S9RRjSx zn0*2bi~{@8qJa{Cdt4sczavG5e%|?ns+Xb(PylK!52D{-{38^=T>{Nfw}kpnS^9on zDfTLJ;8Fm8?pAkc0R{_+{~FW+0x)O+aL6m<6%iawNeLiC69uA)04iuoIGSjPI1uA= z@8LUF2M}Tp?5T}=MKS$bzz-h|7U6QuyNu)nprz60|K)*7!s{LcJlhfufB>OaksP!D zcQhD;KpUNctq=g(Q?LojBNZMB0C?O(0IW_UwLJU-kdXw7gzqy5fv0xHlh-CaSGfXU z(<%4Fh07%H&8sAb2iKPw7Lhs-G((0x(4C;t5sSkAZh~e)!Vw#=b}ZHQa4|7bFTmKU%!0!Qa&Fg8UGG zM|Ia62ml=z%>fBTU_)Jk8^b}P9{{M6U=pkolZ3~2PQ5C}WL60~ zyVW0uduMDzNvz5#d`{==BWXM@8(jC#ImR-CUfyid>^!Wt?8%ea+|g&!@jiiH{2P8cv$Vh7~?okby zQtWcfvX(R;A;i2J)F1}%{fEa&GwFVfnps-^Ktf4c$WNfwLe?hum#N*`5yGA!DE~-4 zvQb+fZ5U?>F8> zV?oNNh$LBE&OeP|1aHC>*i91wt8?;Aj{u@j}NV=j}Tru4P6eVT2Wt}uYC5YDP* zHfg(OtX2?jXyW~_`pYngG@Nq96|oFTAm!=$wfXdBj{v~*?*d2yuKB$wyR3Jd?>EOC@(gs!7npNAx*r+@Od2S%7&;!b zKh6&v*h+*K0-_S1+XCtoQAf!_XcEAUjXD6-^=O2ifDsx0`~X1-AdiL-py}_L*g^<` zV-RTZglT{^!1Q_(1Sn@zBn7;>E{clak5MvlvoI8~i64FE9|2w68dd5-@;(*icJ@Bp3mvVe@u< z0FVNl>lrHqPq0DYat)LR1$)nF5h1SGMgUwzTdcLaCPY=#l3D)A3PS8~z>MlaYHTZ^ z4kfANy$`RO2#wHh&4M<-8*X8@)UJDeHjodQ*DrI)H) zE+)Q@KmFdi;yS>e_SxI?{ab_EWy$aBlm7UBo#+M$y&X*U`@6l&e6KWR^nbU`UoWzX z_93TseE+WlyTOxRhuhZ_{y)PwlRY7cVm7>RfMt3QZ8h-zAt4J9TUqH|Z#;v0Mc(CJ z#OJQpKP~Uy!XZEP9||?u1Rf=mFf-b6-f~Pj*qFk-qd4U0tZm;5hgro5>47}L>LQtT-p>G>w9RzHGSgL}*=<=4B)ob}a@nY9H+l?iHb_L7@)&jQ#(V5t)7w|;x9 zZ?Atw+}%D#0(qu_?^C}e;)@i5>w;7r!O7y-WU zS4dI^3^0&WhFeX)V&+3b)2GLucA2{#QLaokm56=j)_PMhn|p3hI)YLXV85rO;<_0} ze$1u!!%kygGtKWo^PZmdpNV3<2PimeIDEAZB;?UtY4MN8=nKz$eM)ncJwGok7KsNT zxGJmN19{Chc^2AdnrnP=T}&IDMjAH_Yy;pB3Heu%6sh6Ugss!@KgA|1!Q* zv0AUVV|wYlrM3GX{|ilZUAv8W_log6B~$3?woq&P#HEc}vgGOpr()|!pny+f<=U~H za7UFF&+BrRHDk}#t{s^tewS=!r+N>$Hx*s|HhI7QnQ7%X)GYN)9PK@vZ|%AJR~zv2 zaOAc@xG2(`6-`xzBL=?2YI7izUS^w}3$^vpbbDAcF|KcPKkTKwLqy4&sgvPb7utwF zo(BKm-gwa1_7%tWI{M$V)q~Eq0kQ7-7oTG%2;La=RDD7ugz{}Y;{6~!cqcTDV zG17i*i`MVjpy`nq5Ux3kFJ<`Qo+jW=C-9R6n$1qRD>0&9bN->R zYg^#Go~lT`cv2|83O?jMPl2%Om-}jj zj~59gF*s;S2r~ruz~?`kUNSyhy&I5OrSDI&e8wW-F604o@egJdtNM4L?6}~EHyFl= zA4G&!hgO*L50>}t^2H^9v>8Z97!m}t7TD_h?_Undz|ugzkO}aa?pWFb28G&2PP zC#=7htm1}^Kr`9+0u&2rFz*DGY!v`oMe{Gp32|=1!T`jOU?JN_JP`nH5!QzKFVvo! z_13x%=n#=&=8Lxpg`Bsu{4HcUf-HIXe_L;MCaD)@d< z@3WQyVO%gZihvnWD8(7_fUB4OXj9C$kff}rKE9Bot*H3*7JEwQgE`yc_E2^mtfTK2 z?Ykl*=O;Z7i)=!EFjMiLz+&MsB&e;IL=X7bcAH&8&d`%kpd*yQ7E1=_02F><_9_;m zH|1SN-?v3VG_1uXLhtJU$W;i4R{=Ei+vdv~u#87CgxgX)8)ONE#(8m)*X%maHrN%A z|EUoI0vxtVoZF%tx5+F=$B#%@3rM2Sbbff?Y!|EJNs;(s2qXi*z{#NOSIp5M&x+c* zmm&v&1lRl^AjSntcoGt~5lC@`K3FVyx+q743nnWm;&Kl~o!DB%^jUojCaJ=SErw9q zqN!OqjnzuLEgUvf3HTB~oc7k9355VwdoDd7yAqmu152i{rH}&Twng7(4@sxr_aNMP zIDw|D+h^+_Fk$7C!~#X(Yy$Ip+ETkfrSf8}iYd)tOXY3 zAW`UYdu(Y+@KHgbS09k911MPsbkho{^a-WBW|K-HAdVuGZVMGsAz;RSF)k#Nj*?5f zD@S*fKFEoCI)o8gMUxn?Ng^Dm`fS_^<>Q4)NxCci^eS3|Do2cYnCWfbx|cNs&fUi% z--)q>N`YZV`_yXqWu+_*;v`x6jbH{1!vAtBvJnhl{N$Em1sTIKLqY_#LY{ia5%I~g z;(B2^=)@Rg!$dJxS_OkjA2V%{o78{s!v@y|z=63SbDg)l<#Agc--ty{o7@Y$Id@}p zrf|RO7&B&Uz*XGQwlR~#v5%E5jEx&23(o>DuW~=$+Qd#+b8X0$#bBAS0JP_$7-j}Z zk3O;hXCg@T=jo!@iDGtJEWO5~zzFAZAS-Bzw7nYQ?!CHE1PM)vb%uR6@yQj;Y|_e)h^bmw3X4a?sYFOyk{kfs^%> zT><^Qkg%(JtHWP^l(^fT(A1tOt)6u2{S4~=$z1#E*^^_AT6X?gWsxTr7Ei7$@-9a1 zUHXJw+CRCD__^*@d(ZpHvw+&`lG>X>ra$&ce+qxz4AkCgng6xWM%Y*UB>#DP@&r$A z{*QV3UxdY7f;*n3j-YWB@2mruWFcUw13s@Cac3H@Uw_hFdnbngIUvr}5HtH0lnJ~g zq=;V@b)fV*lHaC;lhcF_EF}4L(4N!wPp9nF2)VvGvhQ^r!yY+C9>C2y3jAr)eqGL# zhXEnJ{vOL&Gl`(^_76M1$M>HLQj2HbZv;~)*FUhVuh)ES_1b|d+l*5RP8(TInmyRa88E@TJwkkg4SbVA{0j~I--QHr8w7p|J^9`61TTaDHzFv61s^sFvIq-3ZWMYV zEG*tA{9IT>xlx2ln665;oKB@S+S1PRj3=^R`iNjSqERAISTem)GGAD#v{9;7_-Sk7 z(;mFA^kAd(q_E6Fqs({VXSQ@0snv?8w>GKwh-eHp zX-tY}E;MO=7tz{n()uZ){kusUFQNl()}av9eb}taBC5A5qO$u@J>Wcj|X>rUKb9T7z#9QlB+TzmFLREcr(ITc2Mpv^Y=DyqF z{!{GL@0M5imJ*KTLboe0gAYWRs<@usVD#ba81uPfRD7V2UrB`*4|9|?P{%(Nx2)W$y%vEIE-0RK6@ z!nfi0B_hPzk{(O?+?U)7Y)g4AIU>}4TQ8CJf9$3Db`uhnfW` zVAiDkKhabQua0wOsi~)3Gz3kUQh6J|eG!Br$;1uM2U2-w3X{=TWK*hc=UH-H**xQV zHs=b`jbcubZ*w=T-V_Qr6f-MhJts~rckx*P#-JLtl{q&Ft#pR?n-c-4{WL_;p0o?E{ zY2WGXqPU%dQB7-H$UeDjYpi=|jC(O<3RA2KAS|}hR{E$*=P;uV&|bG9jCyr=W_?@$ zOzcDvqv49|t{cxyVmT5T?^M6vLzfVO85Q7{^P+0(8GmTSx-@Bg{7ccR8zOXy6jmNn7J|bnWTyooIzjVX8c0<^ z?{M=`&C_txLbFs;;n-1eEeiQ~w6jv}WG=K_8KyMeyhXjdsYKIETtKpCL%OY-!yy!~ zM+)uenVzH=utx9WB=&sY^a85$t+g4t@om$1c%%n=gxyo7HbcT%L9HY8xOacXo6X1X zHVM=1IPCdM0wDsYcFys8aVMOB>o@vkV-2=$Sd01LU|{BcncB}VLQaR$Af22x3^#`Cq#n1<46WLI~$Pd5ep7CIhN&vz$FtQL}U-xP4sOn5y1 z6e-rZV1rHP3$Id()rJ|4e&^^kK0&+87qHin(wqm81+RW@TC1-T*85#n4Ro4xro`5f z*)V4pN@Oyl@WBdY$13S7oJxtsUupf-i`k9CC5I1XNeSOCBE*7kK=k!AG?n2KKFeyS zZPe;UC*l=?9Svm{x4n9s`LT%14CXiPd4ef0FMuB%)vFc_jrBwrL6az?@ zw3w)@W4Af}sSE~U2w^~K$VefYrw^Mx?Wp z@P06Xc`Nz=k^O4a0Ew+CTH(p~%y~(9D>9lm)qrhrt6{HD#TPP9lZl*q7;kuDJC4$l z?vU205XKG&p8-OL&c^5-k!`)u;ipM!A8+wZ>wzRfk-^f2ZO_d-atK7eEvlR5 z%nAV^(F7G!y5bwH5OmPQbB)fjxiqAF0dVzX1B9PwCw(w^C?KFU|0dzKc#bUsb7iT6)H5D9*ZgS=9>)VC%dumhS={NwwVhr ze)sw0uc6o7Ds?zY$YU_59gWV}MDAk<3u+)FFYSz{v~fTsho%oEW~#RquG9_r(I);Y zIPQnC4~wO5m#!N6AZ_`6%HHaZv{O-$(O-JFSykWh_0Xj4#=Z2E^X4;$McvS#)yL0D z{O{cQ6-M^gtNW3JBHMPLV15@V~kcj;$>TXEri5 z{77tUJ%!WMG0W;t9oSUZx6fpyD6ko>x6UUF&Cl0JELwCk z7LtG4?-%O;-*a3>A$Q!(?y%bc+y>hZXx)Z?mY!Bk@Ovl7A$9WtZ8at?#GIa2Ueuj7 z?gkXy58l9}UXWOug_!|qq2>WDW!}SgeDQ5vQK7DK*P2qZqHHlzB0wPo5~=5fjtiAZ zZ)mJ71aV~{Oo9ThX<_XUwcpFOjvnDWyF(&e-QV_UGK|4jkEn&VnsK}`oMd+gOo6{7CVD;@ zp-i+*)V=eA4Dxg;(#%n&UE$i?{_ge~dK9F-xzN2jtRA?sHi=8mcVxxz+5 zgk(Ite0J}v^O9srXS5}8PM(69xTreabi$DQK1m}y7yGz1--Lsp7;iTLyZJN=XmZHhho$+`TS`1frX&KZsHNK>z^ z3s&A|1!EflF3Bjm*$}F?0+=HT+>~{!0?xT3Y3JoX8Shwyg4oBhL10z+91`pzafM^K zq-FU6KJ4P(amVsSstP5V*d;0&ChX@reTT5j@z^K_KbTdB1)HGDj1^8)mzEV9+Of;6 z<4)9%RFzuRu`8VCPBfpEl{((BD?!+&+K6h(JtR0)K?WMG9voDl4iB?Kt&Sac5@6YHBO%I1Np6XBN&U zsw{^?C2+5l9O%br)!QZ{u8(szNh|6He7Mb1ap!hLY8oe+xGhU_=MGIP8W$e8ty|a^ zPD5&%H|e--M+z4%ODmcW!H#H`2<1pU1k;1vvym)$Um}B7w7}qZ+)hZGOD{xqZD>-w zE=0vkZ~W7Zf%}I>oSPwDT6G;{ExaE5`AdJ!RUI@>yk2sgt3YveUCa!;zR!wR!78h| zxE*-?%<)&D#_D>68+ZesJ*=!Lzvz&H@dgEPt|Nog^(jg5hr|`Hqmx$kY54Jnf5%_P z7ODTG*Gd{z!rBvb{<6e*jys}*bCWovZory>KW40Wlf1NQz}})Va%X?!-h?qTqG~YvgAeEwFZmLDNvNV6+*FM}YJi$3LeNU6vpkZN~LAZA0 za9hL7J1Sp7xPBG?(0Y`%XMvV5|KjG@_Oxc{2`1cJ#VM!qAwJ_eXjp_)YAi08#&qH# z+D1=!?ES219im0FgLWa_gS&1mTSGW?UT-hXt7#*a(uW@{0F4u?4U^D8w9lOIG-|AA zo3f#fs0`sl6>E*AmqK*t4aeE|TVLY&6A_x$^=~IXcdEvb9vi`L0*a0TK z1Rc08RAmC(?B>pb3~QFid2CGRNv`$p1E>r(eJF1QF2p8Z_c=G5Cp<}RvvA)IxicII zZy_+S!e5U+d0Ki4t8IkSu%8%{{`EZ7ffH!tSr^}M-2jt3W(>5TkYqhClWGlz;yNrB z@Vf5ug9qP%BIgmEFPmDVFG~+@=z$w(y~3ogVv6luxXG87866)N{5W^lc0cPNeeXEn zqYI7jcmX@TR zUV5FcUO=!%LOXW?TPvW1nP|ym$H&VL!HJfui_$O`6Qr$Ma1Ix5P6BY^mT(m^aL(G$ zA=*TER%EqCo+AKXoHl`{Akte9seTrM)#Z)TCeb4#(&XfWQzrT?B#sjV z5`_+MI|6>ZlSCjApb`nwF$O~;doOIir{5b1O# ztZpY_KP0xUHfm%Ss=gI!{uTgyI7u??g*#4;u?j;ZxO&-<0b>K z20*y|omewK{96#RJ@acj@T}WvW)uDO6@;mu`R2upaSOtq?nHDUg9|5vduGPaXTp2z zLOPKpJ?$d7g~X4IL^&fTeIzH{B_}0e!QjzGqZCA@n@42U6B}H&E7I1 zJfVFe|BWHEi8~XCG@eDXo`pO8QGUxLMd^YwBg1&q0hnzPJwsx<2_W}pk@jaI(8V-|t?*99_-#R^L<0P}4+Vgb1M7bdj8A zkygtgQ)aiAeeOm z&$rNB1V}r#;NGuEfVT7lg0P1W>|s-+u$3$zM!YTzHkAlg$snkdnlq;>Kph(XxFE7> zCmU%syu}TA@D+hS^Y<4J>d#DAeGrek0O|Y|oa!d};0)fGAV<&*dgvbxQC37p5N96d zb$U#6b_{=9IV=c*U($w|lpJGxi^N3`HouyPBn#&eM0!XD6DvnbCWs^d=V88vw=NPD zZW<$?h9fN#1uVRU6pi_K%!C7N$ECos;Whb1Jp9MXIZ0)vfH@EcX$r+T>)b^E=P`?P z7ZS@p8h!wv+`K-!N zTM)Cvv4J^(s+<+n54I{Q*m9DUS5{RdCEpHRwtnVdtAl>y1;weA#kFHGtMS^Zjm+(m zWGVJwYnopIA3-P^Q5q3-3OZ;%&rVq{>UL>IHO#+h!s8&`PLi)xDZkZiMB0bMD(IY` zXjesP)5PECGhq;6h+*FAoP_Cw4C=5utkZ_#&sIr({;vA>63~4^u7sukR;H(MsHgUQ zo%R$({2uPtJcRfazygwrRnY+Y{4YMOfv4gcZ7Jy&W{mw!K%tj$%Diy~j!BN9NkP0x zNxbodHYx=q)~8w&m3Sl0?^>U%P(#{&5jt1DhxmBU2CC=q(}=^Xt>sxO4hgu)`SbzxQ@tAidF|_mXHB*Gz~j8 zr~OP18*~kzuL-uoN_JIawpH+UqPX^d9u_4Y{G^rawQwD@!tBEa?e!KMEFX5d73IJ+ z4%Q2fo)1$7?BYdt`fTBjqV))CW3t?gguu<+UG147364NB&s4)_E zDe{U#qs1{2UnTnYLu7%s6AgaMSbcQZ35azmhS@QO@hL`BMMt7(-8HW}5FkZ|Idw&RRb z*MJkJ6k(~7e$xawl@xKel#vmUf&G*L?{Wv#oPbD>wKkr)#hFFOb%^tv^wE#Lic43L z3yo^Zp!q0Dqh^;l=kPCEuHgInapnjv=RTF?$f)Ll6mt_2b75Wb^a$4ERP#+$*Nv9* zZC&!!p0m@Ia_k8T-BAjBk_*es3quHs;?4?Vl8ao;i&6-Rtbl8e>Mi%SSfM9+$A zl1q5aOIip@Kb@8IB$pDJmyQsW`GptkJ?5ctm2Esvj1!a}G*8dFkbpNhvyW6Oq8hTb zV#}Bb@*h+ya~8@+Fe*GAlQmPqOhQ1KiuyYTwrPT1J z4Dt}xiV}j!UeZ6M)c#2s5>cyDYvEE}sna7=kVQK`4&_ApM7lEH6qb*+C{R&Y&D zc28*>QErHdYY2I%*HLQ{Ole9#x1NU1JaX(lFYsZpzHNNIszY3XpS=uvAu zN^Tt?ET153n^A3BXenJ;Y3m_q-)Sj1NNKNGZogVX7D4rgoVxcaf^+Q>u3>C3n-m<}woX2&(q5yXA1N_JC;!dWBcBL{ob)mwRPW zGykaffs^{w)HAe*`u9}&4c*dASNq5C2W)v#9a9IImIgdulYP_&bCL!_+>#=QhC)<^ z5?&KhR)=iyhjUWn3sQ$QmWC^M;%d}K#FIu^USm3lMp;!x2i&4ZR!8yi$7Wh17gERG z7RNSL!*|rjaTCW+QoSySCcqDkgx{MUQhQ!j^}s}v{OXfB@sr3Jy=WRnm>N@(sZ)>( zQ>4UwlxsHTDbuE^6RgDU>}fM&3p4z0;UG0F@N&Tq-h5%=Iq>2%KkkeY@1(efnJITX zKEXV^>YU}8v#rKLSi*uUv6Cn9BB9Fs$3$AoeUVUg5gfDTkdU^dv#^w*VV|S1{JClF z7w%HYT1Jt^ig3b83vp~kn|%%O>dw>3NSoyZ@mh!C+QOUR%G#QO%Bpd~>JD-0*4jEL z&c;n!{DHd@P1;7u>PA7s{0-NpyYmKGx*DeDRzlnsp{61!$#$6Y=1{{rO?u)dlARv; z9rpAjdJlJGk}6)lQ9%!nAL+YUO*?w$)=|r{kLR$e8sL zN9z-4{F9#a(E*aEtEE$B{L=-#Pa$jR#u{g9?Z@CPzKA`Nx})~#BrnR;?^Xtt(FO<=K1G4aqIM z(~YE-059p~Z&FX0j62ZyomR)LuGanMH?7>uHlOy&u~&3Ul%s09FO-nm1Hj2L-6GF?+=nz>v0qNs zVFI9^sob5)l}zCalR13F;*)IwNIPKq%z;?`tpv#(|Eb{`_s1Hn7QTt z@~CCQE06N2#rAB8T1Pl?doxxt3ylpk0T#|xDYLB|(^k9PBKwV&grRvN5BhBJyZf^Hy1 zxEO^Awg7^!^I7(l=lnw;%TZHGeiEiBO0dKP&-gN zaTOb5q{xmB8D@CA7s;o6-WHEe*M;f_d^6R#Oulp4N{SS04J zm~32_9nQ6zZ|rb9QD6~0Y*JD#ZA4e>C!tDGSg{^#T7J|=S5i>>-dJ4TjVgI+*?M1m zY}fa7#H=QgnBKJdiBrcb?$1ZaZAkLe8+*&ZOYbWhuLQuN~R4 zl{1aIu(<*B0Y`m4AE^+y>U#0G%=>V{1e5Rzg>Hj(qYCEnoPIOlBJ*L!lQYaz$ z%yh*7vAXs*>X_XOlTb+Uw6x5xyFgjS)7&0Or7`=3XAN`fF;&-J4ohcOM0JY{(B=*+ z7AY#VjvU!3wG(5KYr9F-Etk@#x!iq zVS^n4(Q)Skt6%Gm+n$Wz^XZsy*Kn6(NSrXD@Ob+6=!AUBa+kAunf_<5@&&^C?JuM* zu9w|tpXOMw{l0RIdHuN^9qZvoYZI3HZR@sY7a{%ZUdxjMw=(64#wYmhR0DTsL*vYu zP7n5oBA7rR@pzqj5gsF$BKK^&Uh*8`Ww;zc>)d=2#xrlsxN}-)S=0%&!EF+z`4~eX z0ke(d!5_y+#7NX!X@}kjAcP;)=7Lxwg~O1L^EIgcd-#@( zJ(xKN{rOLNXx6mgZ5Gvlb`%#=Po1hjg4V}AreSo0Z%aJ{zT^PPbJQT@KNNP^dQt3l zAROTD9CEavK@mEes8F+9N>aojF$tUK*wS6GOPvsgM#zYtE!o|-2Lq@z8`#8GV|+r-{4`OFbPqxy8VNuy@@ ztQm-71};7X-9SIGK>>g9%^w!7z1i=|-5epCd6b zh@l0%7eNzl$F>=-rv?0A#7QqCyG$5{LP50PNnbjccgA<3@^5Y7Nepv?3q#oN9@SGLN zcp}YaBH0&;Fckle2%gQQvoDe|FaDE(G*>8LUo2l%tWXg=SGsMN&+2YWqg*T(n_Qbq z+@}q9V>o#C>sXTcbCHDFvt07J9fn<5i3U1oq4n6l!tJa?3yiebiR4h}%TTI=7P8n& z=TH@DUaCimyfi4`P#s%V`j;_eY1G7_CZVpxkfuuEw^JOJ&ZiQNsm5-R@(MeES&5oL zv0R0VLP>2|nYm@i%IdL0L+e?Yr6=<0CelMewfko?1+Mv-Y@A<5J?p3o%c=|0+l^HW z<@OaJ;I&f|$ClNza>owj^~+$#)?J1Q=ZTQ@+hWJIQ}YVf4dji-VaN8{vI_U2l;B-D zB|l?%s^;{Of-^rvaYxst8ZOE*Y}(por#` zvLu&ogiT{d$SWgnc1^PPLI;7kj_fS4D)K${)BaDYBhJPqCi})45DpgjdHVg`IuS5LQ;t z>l->y&aFaRS9iZQG);t^+m*Pkomw=sY@l2?jkvDgma{jOotY7_urCLd*Oes%UeK<% zZo)7&cAo(>^;{ZSEb!3R!E)`?bkWBb> zY>C_67t5v*E!3OD5x0G|il#Bk@SD^Vw*#t*#`XvEp>Gny;(Uf|6A|IJx%BSfBPq+~ znGDpsLP_^yc`cVIderd{J)UT=`+0u_>V0L1`>8%-%hE*neQk;RnVDtF$_DB~6&CaTVdN_s=qq06yMN$gQtK;& z;P;!>?~j*TV8~!#*kNDfhm zoKa9hU{F$FP|9FX+F?)zLU0ysaE@4To>B0xN92OS;F7`MvO|pE6lR>F$KV>VkUFD~ zWDXoi$p6IT3qbHdyh0&E{)5Q}BNB=I4>9>ZUed+tL)CdB2~2v!{|zR;&G|I3TbEjg z;RBOTCy1k1W3pCUgLO7>f793e3zPr8 z#Iejj*8&Ii{&1{s9MGiGmG;1i3 z40e)k|G?x|$9xJ&N58ZS)?qcF8Fc?^eftk4-}8!HT>BJ}ZB^I@|EH`IJnZF^Yaj!a zsv9b~6A%rmZD8YHn0%>Y;#>mI0wR{K*f+A-Od%W$sxg6jVy8(4!hd1%{b96OkguY% z<+us-Q}EDH9CgD!F!{N^83^LCzlLGwjtDa-%ae)yO#=aqVB8BMeMKFP_7c7$CdB&s z#MN#KGpXk8@goBC0|n#&Yym zsX#V>aLXm~PwDsG;)?F~9MiHv>JZ~U%=UO@f6WxNF!^WqD^>#x536(g$m zlEu?rqG9>deyZd7(+4L1>+@mmKbZW&g7W9%(uVWrlgfdwFQEZ^=HwaldNR~;{S$FtoS-ye4qEZ?6F z3qCOUrwteHFP8(1;Mdy)OYqy{K?V5z_2J?J&az41mf;n)YeP+cy4$N*UgqGH_`$xKe`mvDk|iQSm-5q@+;2mlSS9s+}M zLhzwKs$*df(Kbu~CS*1W7*Z6~J2S@*hJ4_A>2^3dYB<5HmF^SEWds6%3Pu>6LRf_C zN4ttO8T(;29vVd;b8Z|Gi;o%);d+WfM@qF(5u}lYEnUC{@6TgC0K;|dOi130hGFQYqmyrQX@3<-C=E}?H{a$lg%wE|O3WlL-{y0S6-he|&!j)! z76_me%LYiyW)s{Miv225b57=Q@}<=ae91i!%?h_sog6tX&3u@7k` zn(3wdPK!g1MS-A4I8sG{$3*s@Tm1>77X&5*0t5sC`9E8Iqxrv#C?u_pvay0ZNdGdT z=r#o-cImL;0snJVEu`AD!7kTueksJNB>{ljgNZ{q5ppxZ2ng?!}qKGPYgEy zj%FI_e@8Ryx9!}b^8eOgQ=x4{Ov+#-xX*UGma4eqb|M>HQy=1BgB!3Nn71?x%pun5rsEIRT(H`ow7 zE;zyX;jTJ%-{}KgmKFZPU_<0^xMhmh6nm-XTm$X?|&I=knR#f zkPu_DWjNysQ+)W*W05EvTeJHAZLs;}Prer`uV}m%r)*HP7q9+LG;8~R-cS6m4K_*C zu^$GTenLmG=wK%SI;vfBAGqfUMoT90NucoI=3!3cPr9SrSVfbMXf`PRU?e&m|34dS zOiM$NdV2{w0Q!a6Knzvgs8aH4v>cAaOArEuQRZp&==YP;n#rH^XSK76rf2`X!KQi8 zLmLWJ7=avyg8!VW&}<98c%1Y$yHgMb6U*$f3xTHevipB#u(9c;WqTG4rbSw&?-+ul z2U6VH$Xda{DQ*70jAlOE(k>vF7OK#f{@hj7W7mw!jI25u-6ip8r>csWQ&T4k)| ztAx*5i7ScQRECl)&ori4`qNb<`^D=`0h-L~krS)d_v=DQw6~9F?r^_m18ju_Mog%y z7z1o>Wa!u?t(=$g>0kJ!FnO3julI!JF8jlZ+Ti_l`6iHn?d< z4?f9FXg~JOf1rIwslqlRf@va1dOl*s9 z=hr5w8n7R?KTzPKa1f_8*@64IhuR3|BkM=9nb4Ta zu_655)_y6NaHV^A6%Y(Uv!3Ddp_mC`Yi90G@NL4%^@2iJ_x`@PJ^e8+-YzDnr2PD$ z@`+~|_QhtRlMx@Duir0=q%B+{{iuQPuIEO4r=Jk$*7iolvV`IyGr|h2^y$WlM_`mS znk1?8lapu3&I~LS@sa&)!l#YsP22lZrAtLoyBo{BQvgQ#XnvY+A~zVqmrkrI0F6f* zAzrzR!tN4H4Lls14^pT8ECzx*;w38qCmNrnKl>r}>NMmI`k1aos>>;FLyzP_Vlzw) z^AzeP3(j~!w`K;5pT-v#8+F5A=^$bW*e1v*_}cantSomqG}PEb*v6Rz#Uih`|ET~chR{quND`jMmjyP{797`Afd3v`q_j#CA7=M4;5rBFBDVx@?3 zr*9ZMq`3NaH(vNDEQeb&;86#K9LL&F{L3KaVG>@pfz0hsh{Tu;Alhy#ISP-@g#J&v_w2_kAJC_ z9NsW0^F!FY;w8cx5^6Qq>l>bUmQT`mwc>(s_TViEIOv-iAA90jFVgHBVM^7hP>0JL z6g4k#MpX=)aW;Q;29UH5%WU?2DYCi%q_bM8CG{^~+`d*=IVoKXX6fG2HQAHPZn8Ce zFv2y7L_&bLQx}GsRTD+5q$U;*1S~1oD=`DbTrRT{saQa`E`vZ}TCp>vzAPh=M!?|4 ze`VAnaHGk%)2rBnr{BxyA!5nts=Ov{9rkmjER;HthvNYc6lW#ZWAFiz02v|_sS^@X zyKDxp04kyq2km30RpwtW)Z{P@!xtuNq`=vezz)^^I{JM=Zf7XANl=(x@(%Uta_g$S zjYGqVgn%0v`f=DXONy5ld9BS7|G2<}TD-be|JL%)g>rM-M!V~2hx~1ixT z*LJ8EoR~6hj^1nQwsWTKV@ffQ(6DE4e3$&qk%ybSoAB12c1Tnjj^Iz^7f7B$;0;e4 z$j>mEm%as$TOku31zGV|#g*Q0voC%L{0mo{`+FylS%2jUr^3Aa{$AIq33Wmo-N6Py zD9q2ppk}&^z(A8`YC31C&0x7|*-6%l{z1)@m#Y0+w`XmkEhMrpcf1Fu1=J&*H0fFJ z_!1~$HD@;EXt~Pd@T};7!i;6~0DzCDpIvaUNPr2?cZcnC7r24I03sw(tGKay@Wnt< z3c%-kd0-wu(ad`*26ECysxB)i{EAfjz(2woxlj%G0lW-m|Nf}lg)k}fpM%qgpY&tf zcZQwO9lwKpzkz)puw|gp5ROC`>Qld{_ct4a_$7cea|@wg2rU8^h#Kil3O2MAu9VW+ zgkZ~5(@vCpKY&nb48pnf22XNuCAxhKrckGF{cy0bFoJoM-36f`sUqFEnH+^Vj2)Dq zG9rE5Dxq*Ijfrs}DJOqt$9sHI^h;F^6~BQ5@#0Y7EoLgg(a;oOR--?Nk{4 zBL-j#8$Arwl!rVt3WXWsO;4o%u=RbYb$s`MLQx-sDIhN`FK`lqantBSt+V#Pilz-I z*a?q+4})A85{J!(F91on37uFO{eiKjHn4-mkGX{AKfdK$uVrNgJA(%6)JpyeL1ObH zV8-${@ekLka(@OGeRvUsV_1)*jyPgP1Edi3d0ABAvTwZCy&@5vNsxztC`XZnP9z`x zA&j992woJjEQCo>{AxqgFIq^P$`BjzXq-=M_?3`&Onxrn)<5*)JUH3#JMEuuAj5PS zg)*T7oFz^c*n|`k?`{65v+>U6@c(*Dh+pEenuDFYL{@zQAT_xG#0j*Y+;lnPQ^k=d zn4*MtgfQllG490xd;C^SI<+d%d6>xd~;NBlp*w2-?&!k7D;_+BYkmFZYa zssl->{}^n2GpEgpQ{cIzTePa!Wp!rTr#lo6Qxk%OAs=3W$>mP64P%X1J!w^7G(~VXk2QsqBKl+$n;R z+DN2+6IjUqFxU{KRKc|%^#>Oh4_D%t@Vz1Cpwm}jNh0H{RDFG^W+JR%;jZbj226u5 z(F`Y%vw_uv$))tF)vPy_lgZUmNCicch+iaYBwA{vR%)eRYN4WPxJPOXRI|M2f#b); z%*EBBDM$>+bsq*BUG92)wGz3OYKjsc#S&B@idtIJiVt58)=Hh0TD^m7gHuX_O9-K1 zNWElnap`fk8BN$w0+71j!35gDxON@Ms-VzQ zA$X)Qn!7nytvNrWM9>RZbBSvxiyR+PKgd-rl3eE_*|=&|kxAHG(9+Vn($a29UgC<1pH6Gd*D zQ)}OMZ9n8DRP{nybZs(5t{7F#HP2}kCT!10X^fjh%uZ>4UFmq2Y(}{xI9X}0RI5u~ zq2Fk!vw3NKYUu#KbOMRG^y%RXm_L5^Z5JV}fTYeTlg4rGc9bt&6g=HjQq4G_U4JFA zR*`FGhr0Yux_%;er&YpyQt$ch)}!azeIAlc*IK$3Qg@@)a+i|!BczcgBc!)nvLTJOXP;!{Yk)M}sRYu|hqDpV+v44`j0rHOB}`*g3U>)~wT` zwMqG;ev^Li0l7P%buhVgD7BR^%&dPIx$){GZlbu8)UC^(XCSS0xO8zFnI;uQaV=CZsnweoyvuw=Pz0V$H?uBSch7cv4co}>-9b`3o zjy&Z13*Dc0FvWc#Tce)ja`|->3PEr=e{GJftUt+o2$^U)h+)NzI|<}2c^D&4A>=$IR#vsSR3j)n;(3e1BJE(g3neLQ0g<^(E5Pu<8ClnvdjHx zvr+1c9AnErTW31V*QH$n;k>}H&Bcdb8;(+y(AH20QP9zu>&v{0L%%wN-4@5jHY8I3 zE^P}ikRR3e5h&@+__P(z)|K_PP0aNzlJJrbp|PS@acJ&olk8Vo@13OWCDSjcqwZ}m?3=Cc=L_z`dMv4hZwr_A zrJn6Or61@)9^mrr(1dT^rEWWX-E+}A^nXWl18Z*Eq;DCouZVaYMtU3uece$VKeSul zGbK4n4nH7oN089mUm@O2DBmY)KgvhlXPi7@e%}cR-_JQe^m*Sa;5+%TzLSZ%Ym0jL zE9@vw`lP*mr=H{h@BFygV&{tZWQ1qEqy3ba{eq-ndG?X z>sj)zGsp0=?eR^1Cd5s*ld|)5U;4wja@w8u^Mx~jr1tqr_({Cx>CE}w`3K&lJC zdH4B2cKF3c`Nf;ZWp*2kY}k2E`8neF$#vTW0{@k1*(C({)bRWgqlht;McXmG3cLm|Uap-_os3_}f*GzpYu&uIUL%uUPL^M_SX@_0-+U*%9Ur~s z%DAE;y#a!6ggS0_q;BylW_c`6;U;e7Cq|?z4pBUB(JpR?weB=0hLvQFeqQW;LOam_ z-x*a5=}a7}$lQxf+_7fd+f)piPVBOw-l$pLTk}7-X${z0?#ou(5oX*PZanx;^mm3#%EJtt_rOv}8jg29dR7q5#KFM}IzyIOCf5qDKuPwZvS9a@ij;Pm8Z6UgyMg* z*j=335sf7O5l*Et{V#)!rn;QCcAj*a+*t09lZ7!u&Ze{7{PBYcrw@Y-%JU74j0I=L z+N*-CL2+W7I;y(L+_6HbN}a{t^4y71rA}YC#?tlQ(SmPDbU#dHX?{0etriOx#}>^b z{Vu?uT3NWz>IA`}QL8UrZgm>uW#<`V7D%Vt?qH;;u^(ut%G@FChAiF~PG+xnOrli2kM)?25@aD=o*FvA?$RV7b}lhlZP)Y)w}ekx^$)s8+8wB57_o zp5n@AeYymRHo3m`;&_P>R`K)v5qD*9EN#S3`sLbgY~C@TK#TNKyT3I?cg=^v2Hf5+ zIl^nZb4MU!4L*8bZkzV6YQQda#ZnS2QaC+{Go*$O995I`{l75T=&XQskk4-gI zOK#lc-;%m}RmqgQTLpeU>~GV$pZgrtZAOUysC_3!nJWNp(cAOc?!-B2VojOL6QkwC zv57hz-92b|_0&AfSY*X1sCsk(oOJvN%Q41wJnV54eFD8KIUJL!c(5+i?QZ{B=T ze}7if60Ra06IcIY*{RUys?jXA@4DZKsl+qW_2cX6Nl%xio6YczS?bBa1Ig{;F!w|1 z@JP;2*W>n*_gA}F1px8a^p13H`$fGDEBoPXhcw$I+Xao^f$nWKY6(9UxX-2?+25T& zu1Yb#y&G`&Y`4)xJzS1KIqrq?l%8!a=PhVw&j-0bo1HF3*uhMfA=MhLs}qjkx5{a- z)$LVY0NCSIMNzXydgfcor9;(){u8A(qzpyJskwz0oW3?Rw|D2==SfS{Ty0nyD8Z|t zGIyL^Z7@8xx8Sp}6^Mi?3to_eXL_l^mwtB>F)Kl6s7lr!+fE1KlTkDN{-wt^wpA4S zM`7q|pba04F1ll52QD+3KG-%DrJzob@@JHb(CjLvDD96g1QEegAv!p1g+Hhy08U(b z8Zc!XU5xqY_G-Rc_p^`v6IE3n8oqkO^faRA*;C=JuY@F)jz9R|s?9yHGD%>a2P@H| z0|WJ`RC^lYua~(la+c~%#bj`n zP%f8>*$w&2!1-Mgds!|4qURH_$d6?Ky@VxKOxt;zsQE=yVwy+QnS(u|Y> z8zor$mzl|M^3pDKdHvEatL)>HEvkdOeidmug5%U9{eralC26O0`Zx)ICcdc3plSb~ z3|^N5zPG0wkM}>BUyu$3P+YRTN#wJzBo2j$pELaVA`fMYc0=N%(!oQXn>kV1(;-4_93y($0WS)O+J61_4c#UU5Ps6^~C zQ+F~Y;iLv!KJ28A+Ti$Vvs`1dO^p2>y~OOYQfK{Pxzv~`+W*1cdww;&w(Ht~gwR7M zBGN%Xlq$UwdhbY)UZwY51BBi}M+F5zMUf`GcSL&cy(mcUAV}Ug=Tqid&sux!XODO6 zPx~JrV^CK^cs|K)j)sd{ z&~ULA8ZMrGiiV5tRDYn^aWH)OWV)s1V?~CuE*dT_K*Po2Xt>yZ&S<%VtgfAZ+bxz& zd!2T=;Omo8c{E%cFHy^|0pFK+%a%=`dl5f9St?y02o>|1Zip{=uO8c{gJMPR$IsS$ zQQ?qt<bDfVS!BmZW<%@yyeN{dN)j8X&#BS5HveR;Ob?n5#+!ns_xkN8jx*=k zEXrQo{ls~8tk`I5dlhb2Wa-2He6-1!U-Y2Zne)d>!2nxPAn~yO)at7%F&RnJ^T;rA zFI-$SbtiBmFeXXg=r0+%k!Cg<|k4nnFotB0Us+-Zx2Y=P8xyoL2 zIuNItTSz;vdVl$Yr6-_lise~#eXy8oXrS+^yfqPlrAr_vYr$L9H)z^f*}wOE(e1Qd zI)EDz+m-iu#^r4dO`?IPjPDXEh`mD!Zy(C!<+Cy@CS-p#Xcwvz@Ja0OprbojB;IVn z^WO9&4V}byrR&e@4{wED%0MPl?xw9j)L-Z`q!r3)ZAxa=Kl3-_@U-cn{_Zt?HAJIy zayYv%Be85V9Lp(CKu6=Kq~9=^z2x;yoaV$6b7<;y4f z%%v4Wa_K6eBg0=>_Nq0czlBO_t=>iD_nO^G=hFT1(?;tTzy1;kSpEfZLf!Q1hFO>TB`!n;%)nq3d1Ob30A`JDPe>l zR)SBBkiu3XvS#Q+IWee>gs=TqBR}?O>xfJ{Sr;87q5aNtDN0phswOFfNITAi)U);W zjg$7{uTl)3pp5JtxWdv*2~cKZ=@`t8Ezgd9d+CQ&M2|j6$AwGBc1pXAORud!+M11eY<;ac|$*RZ_!Ukk#+GW+#Kd9Ywi92`{{sO_Pm^Fj<7a$jgw3Z>y6y26FDof z9`mLK3q%Wts(hi0yqz(XagKa>k-Xz6p3{K5UW&ZS0F&!Y&%0T9_bxmSc7Q{t;Ql=PA zR?ye*7^UZhU(trH%&xEL6G~|tUsFz%kf6RKGUY_3zIZ<6IN81!UFB%&z6dYna6}(G zT{*O{FStrMsH-nvLfL<#&-+x_3)JsHrsB@j@5-m*BHQnztKw+gZ||jIhv>IXSFtMW zH>*-H?dmt4P%+x*H#}7_01fDosXk#+)#4k_kX2RJ9Zik_+pg9Zi32KkxP`1sVgWz{%!)!3~EA9<-gM5sMTA7n06W2zct>{4Tx zP@~&Wqd6T!QGwJc$%ZJHhRFDaNMwhgx90_(=1*X5)!w9Z;*9 zbhPrm)@R<)av7~MozW7j(GQ+lMG>R#(zME=3dVfM_%4yt zF-MGXR7{14ud$-vt~OsaIlm3;R;5~pZ#*ht+~&CsJB^M~r;gC1j?JphV-7ivW*y5Q zc`3DVW|Ijibcu}|xpZ|kJF||W>I7H9gu`=P3#*=xc(QGJDYcC8=<8^e62>2z6IvM) zzdqm!Jn8l0p47?B&>ft3V)I1Da}qpp&rnWRH+Rfv@s9N5qzFfLc=qI++vJblL@~lC z-HNen147FucrrF`t?uv^#@w^XO|$i$(h1XBb|=usxK~%FXEv$V6r=Y6N7wb&ly~A( z4|JL_?1|@DtCQN)pvAP$AQ^M>nBNmd59YXl#IeBOWM}?Nb)2r?Nir5FRwxIN|6(Sb zCJtq~sGTZo(5yHUxu_dWs1w7XCtN(k%HOFyI+I813=~Img+i2*d1b!xwVoY%zvC^m^nQPg1;Rw{Rw_+Cp4?%(jClEW+$dtwV9+T0YEoEf=KU`tU9CuFiiM9QEwOjvB3O0Q*XKkv z4ZiTFG0y-4YowSxqKr7ptU6 zi0Yu3Ua4Qud17q!0il$Q$zr##&`sJ~9j>%xlFa{fMQ(0v(&%YoGM7J)p*&nX8?^ZZ z#|y{oh{o)g!|dcA4K~hZ7r|ziiDp;1Waj-CaX zoyA=j3(^>id#@}gOD+DJ1{=%!5|&KrmdvJ>EH0K0LM&O6EFZqId<0&*8?t(5XpQt; z82WD#PhmjGf5)&nW`J^myL&h){05izZ+~ytyNmARfnwAmi|+Njg5|kjjp=Q z6H^;K7aRQ$8-pYp!=4TK(oGJ<4Nf6z9(rpjQy{+`P%mU9v9OYb72MA)yC!rnQ#lw3h9$ku%*eqmv_(N!QcIc>=KvklJ@PAZ|#r}`xM%3dP5X2f>oi)2H=GUy5)Osx4sT5z-nlru4{<0;a`^Dd;g1Fz zhfhNeB})#a`wnHd4&@;~@*u#BbNj52ZMZ4y`73k0QhT9ylaWLAvNQJ4Ex^}$_WTq( zX!%{szGLgHV;jV&oz|&?)2UO!sY`vgWNIg!(=k(GM>5YU45A-i`X1r;sxCFXF4M7L zf4i~Av4_^No7Q=Z(|KINc|zTJ($sm1c5ejY&}3>~Ib_w4x%>5%@n(@8&l4Qf%b|tA zS61jQ9>r*da@kvQ+240LxOF*% zxPD7=c0S+EjB?6u+4C#et!de*JCb?XWsa>Jl= z!{l-UD(qip+MP(a&Mj@9>g~Konxp%wp82>6^yf5d+3gOlT)jKMO5Oqyxf9d5lW@6{ zO1hJ2xZg2zCwFzf8|qGx?0)aHJ7t+WRjWJoushANJMF>I@$KR2yPe(|=VcSe8W-2v z5Lf#n65*)rdh-3%(gUmu_eW(OY^@&b!yX*V9-IdrT&Um8F?UgJPD#fo(V{(UuH%ot zXkt0IqS|)Dn^z*b35CJ?4-R%&ADj@~^^`d9ltg*Lh`gleyrj8KIj2vs1U(t(j;r(> zkC7|)FFrT25H5iTEkSD%PRd+;iFX`s9`iM+MwytTQ!btJuYHN2mgdF#1)>xX(9 zBzqga_I_ICZPe;*JnSticmh^9;Y*%U5PVuWIEDMtOUUf$Kpmk-t%=&Qm-_d!=qYde z*FFwqK8~$EPQyOV%RVj#KCUPqt>|-lGp`5ww)|$E6!xBig6H_jp2Dv^&4*8I&Ai0! z`UbZ81`YcLFZ+fZ_=ci<;Y5C6bbjGnei4#>2o1kTGruTTzvxguR7|p8tcG8JuiJwx zU-M))eg0&Ni`R=jI0Nl&4%MfL$!_M)&TW0q<3i74UH!8{{j-z(pTG8hQRe@$)jwz0 zKlj09#`_Bo!3#zrAB)$Affc$jLO#|S=U_vfqzkQz=ziXAIk!YTLV4~2b3%a zlpX|>p#sW@0xReOKXV0EN(NSG1Xh~`)}Z_uU+*eJ8zepW`6f9&2ItBtGR;^2+@{R) z%IEt1v%q$`pboB}PRXDyji7F`pdQyCG*A8+`-U(2dfmWP<^A;&o}Y~h0h)_}Es|I7 zP=R0Rg2%Xm$0dU&G=eA1f~Q=Ar$d8hl7nYo2hWuS&$k9I3 z$=8Xm0|#EmBQtQCe0_(_0^VH&u40F5zYh6P7P8YCvO65Iw;Zy65ORRJoxdBBAbEvj z=DJQd%{&&g`Tb_7)+_Scb&J0K(d*Fn*rC_0p*O>!zm`L94?Z)QBoDL`X0{o7;y(y@6mE{LmmwKmr!alpV^a2hr2jGg`|;cUP@ZRFpOi?y zsbsQiyBEU4?f2Rx4rh<1KK--7=1VGvZ^Xe7w<{`aq}D>5Avu zz>6=OdI^3779Acyl=G}F*N^AHzt9jdMd@niYf|4*=WBueg$Cb~{k?0!!ZY;zB`TZ`Ao3;0S0be4UOl(lhij@-hTVYro5BH=u-u=MIdLx_Qys} zbc4+`XVX!iC|C2@Tp(A=)sIH5)?Yu1UFw;g`<-iFg{VBsU4tZ;xBdEc&D}+;BKElZ zj#1F#o_mf>?%jlf_2qT%t^&C0Rv$H$e|cD()x^`!(Rjl%@VH-$caVQRh<8YMr-^r1 z{Q8D>L<(Qrs~?}#xV)VS=y?29K24c#LQ6%QfAWb@F#nXHV>AD>$@H$5G6Qyh?pSLQ zhf}Zlvi;b&Y2z<}1^0e&!A0-+;Mz&?y^lUKYp+bs+n+@@kt|12g$S+1vb6}UCyL$* zZKSA32ybQ>g$RFp?${#yJy%lra+wJm^v{$$Bj5pm8CZo*$^TmzRXnRY`M}$M!Kfa7 zzB~LH7J)~jTvaq$pqM5h_NMB?_tJYApJzk_!Y&zIb{hzYhd%7eC z%WV<(bm~oYN9(<5e_~X>v)Z91_6=t{GYoxwL(PpB2P>WNC_0Vb7}eHzvDt7-^Yz8y zdjD&U)|Ow_zcH%e)>bsc0%l!-gUIbx!f;rMR>BDcwpJpb@~o=}GQGcMwWH~ySl42h zGVRu4S>F|{#i9S>*W$VVz^JC|))R%k6|EZH3oszn5AJIVl z`H!8lHZa?6c^A3kZbdIk@&B0BhBOi?eJ4rZmz=L^EQK9pHZqECB*=0}UEFp;M7B1r ziG+81=idmX#z?C-?NO3(H3Czt;_vi9Wy{-vO44}3Lpb5sw1LRA!Pde{mczIW^ROP# zqtBf~Ux5DW2a=rFO@zYr3vH^x+nGT;LfmX&(V{g2aJhSW3wd884_5NKMD7uoVq!?ah9k~jo6J`aiHUz)3o8DuZ= z#W9bg@&yMvSQ!u`?!$v$3AO2y&Q@dD7zRZ6dDDhwS+Y(D=L4F8WjQy~3kaaiTM^M@ znfMcf+eM@kgG-bWdl!P6H%4IcWw=hyolGi|At8OSV+a$nNhgW)xq^i7q(KvHBiZwL z4_VfM4ecJnx9J7ZWGih+KhHmw4T(g3H5%d-5=3aLi_}vSi$nVg(}o1s+mhmGy{Ii> z#Cvo~ZzQ$Fw3AL0+`@@Uk?~#z8h8h6U(V|;h`=Gq(l$6@3>#XFp+f86BDdEZ@U{d~ zYN>n6a1-IFd~EuWcIY{q0;iB)Q)r|s^@@SQfvz8dV5}W?yBCa0Y|x1Hb0mx?2q}o- zEDBfjN}}E{h$h=QO!5`O#M!2ZCStN6+EKEkfx%@ZH}WHK*E(#|c6tP6v^w0=(S8*i zj7NR#1S5Mx*tcxhw8sL{J~xNaQVAo_+z}v0Ap@a}rV&>V5-f5FAlV;}AlgEL=}s+a zY=kiHTfwmX3A8;{V7WY{^hz>gQV~gfEBAGg@xEIOAR9$yigfn|@X~0YzZX)l4Fg2S z_lDRj56EQM8*BW63$ckKcW%S1$FGWr=!?4bdcHcRUKQK+N682NbbIBA zpD%udo8*jhK(%A{M!KE2(X)dm_eY_P=ojMFxz&MJ6i-$JK4)npc<>eAEiB3J84iQL z@%m8Ee1P5iF%m7!C$*DfC+;8BFvL;r$>5wf%QLH;^CBrwuzQ=v#4kHRfqOzinvg~?`_b!cV&#WkzIN<($2qjQ>EGFm{Mx06ht^2;tS^{As?zs2}zu+t_x;SV>JpgvX2N{ zujl&rGiyJ4_Yeoc4OhUp)CP(&?uqz?yUfUX=?+$}XtKubrxy(}3_C`({fqkn7mX`~ zWqw~&mX2RsG;OJs1x)uZoi|)GA0{5qG=~wT#*7wR4wi*n^e^B3yl4fWL3nJ{6-=tj zHXQZx@Vf&mVA0EVXi_=if$A!r(Pan4P&wkjJvjwBgi8;ph*3~oBYAn*#j0Krr$4Yp z-iWq9Bvm9lQ(dQ=zw8kjsz~x3Sf{P_0=#OZIi@S+mfdGITEwvDcM0y0=ZZpMJ81hK2Y06OB2SjhpOHQ4sNRl z{v1a)&KD`D{m_2-bD~JS`lJ5f552~pljTX(pPrdGK(Y{>bwkyqzJoia*FUG*AT{OD zYP*(H*E7B9HJ`HvcWp(lXGfB1s@|*ZIT&5f%?#Dl)Dk+!g8uTj<^z-ffWW8!^0-!X zqw0)Cw;$#xR=nwsr{Hy1=&dODl1wiXN3Ha^@M{{Yj?d}0!~#JdaI-g6ug5z<5241v zn=i-9>KT7|Tt71DtJ;>b=o3h-Wd5)sV6oyPLi%nzQ{QN1N?7-u>ihA(Jg%RoR)&A4 zsSD>hs$rV(q!Lw9{*0IN5n(9owtH@1hz;paWK++{d!zs%r0jA3=i{1fBEtVr(#8Ti z)$^TGu0f9Vv{oh)mhqH}jw^+H0=7!Mt2TWimBRO!&%WkcH@6QP{)SdTDC1u(Z{DhP zdu-wrcA^d!P;RL`QzAxsE(>`l&NO?jzV7p5xk;QAq`PLw=xFHgKMwbQuhPqfNo1Lz;{LNS`uOVJ&R7q#Jm zNR_xb8LEj&AeLb|7h3}bg{J^xiPQod@k90A-5n(X@loQ@BzxgnaC+U4x{J(;rGnX$Cm1!jlG_@`MbHkqsKUe$Aa23Bb%6rccAj)J z15v;BawYB`)U%Fnm(W#?uyNp~waAd9@7K(2_ztVMu}CX(Lz;{d8{uoGW!NCyr(*<% z#wQY%BrT1m-_$d1)9>vdZ#azPPRJ*ChNhn94x@wy@*%#xowPp>qhSPZ2v8+!3D~oH zF^Z~hNV0mnSVWFuH51;Dy;tmh^z@`V#U7p)M+tVPYmzIE zq4a=NoZo2Gcf~Ivb4N*j0|m4fy1;);1{cbe(p8BwF0|->07YZWJWJpVc5}>RD;~o8&$QntJwR z-4Oflspr@~sAm9S5e!>-B%Jaz4@a#?`flGy)NkrJu}Jp8pVTwOV3E9F-)K_6X#qXq z2So+t{{!mzk#@@azfsQ)Vi)v(r=Eeld6T6s=mt`TD-aeo00m&@2b2O(m~8*@>&%Lz z?u=NX{a^e#qn#9~^dB^_YjJnqsm@A1$9!JB@U`+kfUlx&IwNdohC*|0!+;M+319g3 zl?u5ZA1If4@AR#>XIK66GG$aKfb!{OxbxtM6t#YVG`I z^mnsTV|_iM^j{jUX~#fUNh&7tE5x0sE#rGxLq0XFEq}jE9c$bzZN#)U5n0F1l@Tnl z7BO!^CAsa1G&Ryrt>ixV+hs~&^vIO;;Frz|sbO)P4AS+>Ga?GF4eze|(+Q8G#88E4 zJ)t;%yG&V1C*SabY0hu%$1;ItFNW{igA|iEV2I-+T|wm5oMfj(mVEs2P2Jj`mnmD5 zd?e5D;aVq*k9^_{gZ{=@9<9PBpGdOvD|nxm7j=6snKmAXG~KP|;rAS`B_& zDL)yC*vt|6QC!|r^~ljC+Z41QRXy|{E>kE!QsO@^Q$*gLY=45Uyy7bV1Nho_*u!?v zbUNjD(0uu=_@L$HeCMDQfX#l`2Dte5kdp5X$w|RcXy@A+aQQ*ts z8!LBd>{9`+K-R5?7me0wQZy63?ENiK5PJN;!V*7#)I@m+O136>HCZFccJ%%FBIxnauko7O@VKC`BZRDx?C}CBN2J&coCs zgrOn|V-)$m^K}tmU|){Vn71FbIlk}`s|*I^{(@UX7R6g%z{6c9(r~g}_?8-@9{Xw( zhqwAyT#CZonnb}}Ch4AR)l``wT05uvy*T!Bg%q6X)>l^W;i@50o50=iTHu&@kHh|;Ss4-aZh(r>o^}`kvmC$ zkOg2{Ljk~?E`N)+fV$No0cB-k03a67pTr{wfWkSF6-a}+WQr`YXDhIlV zfm9kt9gmMoWVbhd6{d|R8wPD_0QLYn@^~X~Dlt_dqQUC-%G0>DAG+q4;~hGdOSkcs zi30IukGk&QZ$P@Y5L=1pZ{2ocUK6HycjOytC}wTn0J>8fxI6lpgN(qf6R?_^c7nl6 zV6MW5oMV0X7KpU$CeQ;twHs$)mx|+2=62(z-C(`n&6HXc8^z1%YNZnU0&fLez9F_t zDCm<_7Y*fAM777}QLN>&Py|URs52^~ha1&~YpajT$ynQQA%b;}i(t_YN)b#N0qX>m z4y96r!Zqv+HBf8RrB9}wL0po+A^T%E&nMb8+=m!AgKStD*xa$gcseTyRGHEx`v}7Z zDk+d4sI5^n(3%?t=8j<;Ai(A}@1We8VFX}dXCpBHg4s@30Cf2kVHqZJ>@zRWY1CtB zBS)is6r&K80|m$e_<&)I*c1TzPauL7*WGIa9M(4YJp=%ttw|y}1%Lw=RseuDncSLH zUVaR7@KSjwhG#b~2bAHW@d zrsALAv6PcIVCpia5$x4zmb<`M$bLn5VXiC5l3qzBjCg3QcM=?Haa7>(TeA6yXYw6xefo`u+vhxDBS^;$!eHDT5+LNEXU9H@ni&E}9}j!X4y5jXC=ODZ=Z zncgX@XNuk;ofc)n;`#|0qD-cmMwE0Y;q`<%`Aj78HxtBz&eY}xcNxc@gl^V@a6aew zct+m=t>D{94?j6G1%_vLJ({!s@*!l`+#=`EgQjh^Gt~3ZsE|T=#+t(k zW26$uoH z?lEDOW2M+n>F?2F(Kc)oG2!jeEt_l$aCR5sXu@A_9kTp{^G^$`wU-VtA1RFTo3zqK znPfD46TlR%idoCA+-Spy2(^g^;=pwr{eH}kR7Bq7uhZS5nDQQJGc5jC@jjUVa1Ycj zDHR|?A!pP{+vja*9R-61#=vxU4CkFMhd<7ID4}#_!C?oqp>l0OfcGQ};UBfMz8FEf zBfG8yJytLPj587RPqV_JI4`;e7bA-Kn)J!zPs?OgVOe?pfGfgBy-_)LAIL%h59@%> zjSa#9AS4F21CYiVHuv6BS@2cI9++P2+b9?+iSUR=$5Yw>kQK1{etR64J^~jpGABqQ z3>rcd&_n3)JdF)=s!YQ*`(v@e1Z{w~F3*Jp7Pm8%!e*79k^#NM0n{@cvlN!mtl{r0 z^dW$R0B>OD7;_h{+%u}an4!pY;z9fhv>p?No&t%99F#E7)|7XDnCuhKmw?D%w_(PH zcGW;rl%jTC-g!;}QPBN3OkPzW-x?~*IDAGWlZIj;7Df=qM=723)@<$a*2^7|Y> zT-A<|!lvQduil@zsy3{+w8jTbJt~X6ZS60{W?v7Ty>C3;N__)$Q8q=|nh1|Dy~>Em zElYKupu^Hr_pPnGjUU1_Bf3bJ4|jHZn@L z!uvFf{sbp-r1W>oiunj%BqYgN6)*Uqu2+a!qb|Q_+#bI~-E3*J{+u4ZJ#R$)I!tc8 z`L2QbIgh%%9B#e67)IS*qc;&0a7-aM$N&!ZgX3hw@#^3NGjPZy97+*JA{0hu5Jv77 zMv)yxSrod8QFTq1>swJzLg70iAa?jl9V9Ye}l300YfeiDq} zPKf~hRiR{u0R!*?WY9-5^u6Y9jy=%JUykpf`RHjAvA}%+B&_R?Y10<>tYqBiEyKE^ zHa%BF8)FjthsT>REOskQPhqH@YUWq5r42h`2Dz|mABl99DpqL%`S^Y1rykp_mc;Sf zmH~&^ZRWZRc$md^|L;pOQiH*QbLw$gl|Gbj@-#p3w zpS>0@fExe;p8OBz-~T+)Q;P5USCOIcyMh07Y5cFwzqy8%zimms&%Zvl?>_&pH*iT0 zUH)oI`Y+DECO`52*uY^bkXncR%LeXGThf2Ef&05H$#MDrZQ#D)CJRn<{UI{^-`>Ef zd=fk)0}=7}39awdu0*lz*KK4v?$>XmOXD9R!#ln^Jbw`xqI?2}VRWP=r(>#u zd#B@?Do%W3T7MH6@{j7^r=L#!O=P&|I*kf)KA&@{MT-m#3Q7e2CNj)o_x-!baHr9C z1@U{N7fO`$hse-x0j=Ub>*(_`a@|~ey3g@#NO$P!+Y8CxBfa?&p}sz+al*!Vr%L~A zw8*e=_C3%kp5sRdBw%yijZv|#W8=-$Uha2kyoVhT;~Ynozejq?)35xuHnlZMNgTZj zuFms(TgAWFtpJ9;%`19DaOe60!4!)#9-Y^)g|@Gk%60kTr)ZI3=)l-@F^KYFCuhfr zA>qi|k*b#0e21hc!KF?LZ=KYf(H7nU&dev+dl|-*Zh_Cf0K2zxX$H7ZK#L3yEr0Dy zqi$D)-Q1*c4>}_-`ne;h%-hLVF0l!oX-0t?q;UJzbFWa3W)kshh;Z_I86H@}Wp*nL z5x+-zhUCH@pZ9yBM|$5Us-*`Wu7mZ;v6!UkW4-e%z&R^jgl#a0n7I@MdKt$w69&c! zC=DW0HM@5k2BUYvjS8@c!E`Uu_9414?NZJYE`igePwJ3R!q6%fNHOPU-_^hfYz z#8-$f|7zfa`q6Z&3vMSpFTDNewqh&{eSs`vze~UsgC$ku3bun=aJCW0LCy0C7-nRM z!(iACHR&_V-K3$+u~E=;3_`&=Oo+2oj}ts1N397%hcs+FckZmTAdU9mmN+bQB@DhC zXWfB#tlW~CU%E~LqTGT-^9<umYLrQ@S)^9j}(+*vU`foqg?< z9`&$JYlb7D9uYl%zr+$JqWAjG(AG>kM|cJ|Q~UhVF?sNkK?H0>zts7rfV!8^5@Lwk z;er_R+`MEFV(ss9eXHJJu-jwMC^hRsFW8WgQ{wlz_}s1%)Za*Wf0^RT7p04wpZH$ z7%Gpdt9;lwp_<6G}ou5K$P@z?K`74aC(LswHV4_IN_uicjPw5vgM`B zSLyD}04-_0o8#+~bdn}jr24*)b~OykLqWrEMqZi|15WA8v#Aba@VRYpWIX$-II@m5 zZTcgH%`H3WWQnlYBQXN>Fbv@vIbscFBV;XZ%xxLez{$X=N#_i2-fRo+qDU zE$X1QKOz=cPnky<)P0-C@z`@un16eV4H>9#LzFaaWbRCL%zipaoWAoS=~kh`Ehj8| z)^bGfGMdW#%#_MCf-*<34R$W>7HZUt0A!U)83kHN?{;|Rqo+DHcDk7mEl zwFerSgPae;k#7<~t6lK>dx`HfuDUwAqygt`?yg%5c^Y(Y@0f2^PY_za#e{wyx|Iqm zq=S=A%*dl8QSK4ILfS6O_A2f?RUa2;;E8O>1tm$q{N>BGd-e3a7P;;b0L3@nR<&5G zS>_d%`1No|Vy)K%62Arxu~NWMorYWIx!yztlK1MgdD(^QxM8Mv9M`%PM?6(-bx0uDDK^!wrx&_f`XopEgMx^DNSHU9o`I0}l8e=l-c~d`wWIw=7^KqNgXIle&xq<-FoSs#<`I$xTR9y4s+}3p zof8~a7es+b98jI}Vy24_Jo#q~BwG--@X`q{2GT)~^Qg|1>8m-el^InnK?e*H4h8tZ z5}vW9^b7c0ObOq;ilTa+Sy-8HGL~RtXN1cg+#F%FcgUer5;-vfNN8n*AJHjDLD|UsjSj_h9A$pKeTgHeU(LT5GsbYFgo;aCXI(0$v20TEo zEqeo|&Git^g`QvwKxB21G31sdBxmrlAf}>@P}x4ai5C~M+{@}Az@|QfbSBxsFh_PO z#qQj3$j*rBL5%WU{LT%F-E&jMa_qyqX0-2LZai>}{T8zzO&D>WfQiBY|6s)$y@<@N z)pkVd%xbU~SUk^WoICPjp3kK2+Hzw6(wQ;@=HLO7v-TKwLX@6ir6IWo&qL?wk*;}3 z#4GqvLkY|05<>PV6!i{0{`pmrIVR^Rc%DYl!q5r@!bE+S?h7J4ZV)g9=gi(M90n=m z23SLrVB~C5Sprz&UMx?!KSWpoN5QyvqA)A47|WmM$$H<*1Dy%`WXluQj5uNDgSP4j z59~u?4+)M0-l)K%PxTyF*t^5J8+$l>i#e1>&4ljf#DY+3Do}kmm$5D=$ z8w52g=<3I`EjO{gJi>l+;kx??oc6ww-iU0OmzW4xF?YZ*Dno=t5idAS__(Hs8rwHl z=Dm-@hpdkUtIzOlFT5%|Jo?y+BB6Rtbwcl{XV7*Ooh znJJgXZC1b?jssqRhCr2^m!J`=f(~XMG_g*+)*0F6?|;IwD&E8_ndm7dw=E$%7b&GQ z&40u>7FWCyAlT5CFd648Z~OkSu!S-`)`EvavR)~ffZIf+T;ES|d0TO%ZSAWd~oCp{Ols=RQny(GHu7y+8g^Si9jOwBS>tbHk#WmI?{N0w6 zDq5dzRG%4GpZ&7_MPq%=eEqBI`s8?^U>;~6Udz%@dYsf*CB1C<(b%#(-?D$*a!A#BEZTZ%)OsG+dfDIND+;}I zBf7nAMNd&N#o9o|ZQ!6boSZhirZ$3wHptCyTM~&_JDG7ic~Cn=PCI2&JM}_4?M*v9 zbqAwy`$T{1RbU5ePRFCB4)%o(&YKQy>P{Z9PCnz#AM=ldaymttI>i<`C2l%l)LqhI zU9!eq@#;WOfseg4 zap;j;BitD2*=U1EMUy#y>N#H}tZB2d?~1@_?-6?ZOYzIG6vU>xmZcnf7uf?u zwL?-ht?uUcl1o7X%dxX&ly>NQPud~o^jHSzGMG{jeC)ouSUu2Vdsp%=Za&Zt0}w)s zKDVacpwqq$IF>;<b3tJDGuMB=V>tCrLd=Z2-;O01C{6rss8^Dc8-i@mb03{>wDeqtg0B|J$ zQoC?$GJ32-GLzQ{*kpvb`rMdwHuyY;+PhHo74FeHdq9KoF+Xk~k{(wAr3v1F;^`wt z2Hdm+ei88@u#;DS_5jE|H4FoKT$?=1BJ+O0FBCTrW*k^vhY)u%JQo_g$8fxTw<-Hvh{TAd+9J({7gqBKejOaYuMLt901X@x zZjL-*Q^VFjBwXZE(O&_nLxIRVq5wCn@#?8~R;|svId$YrJ#4x=Xu3F8uas~;E*Qw* zhLxN*w*$wrM?#j{#(x5EdGiQ9mJmol=L6{V#v_$gIiN!*e(2B{nwXgF3*+v|1+OC^ zU%kiQG>^S(8q+#c>*EG`!SQk;@dnJN=5l3-Y#3M~`chHl1!bK!Ka7h=^M-QB7UKTXhT0Ybyl7ljGu5C0^<9K2{;s$S<3~a(6 z`Fm=%ASqkK^kzAsJ`%iXPSgi`YO6NCq6tB1uIZOg=C_UPl&^l!g@h79)Xm4mO0fqt zA-sE#UvRKp^MW?qxSwOX9yYq;w!$znUHMCGa|INDoM)z8a+Fwm@QQkAWkcK?_*8QP zcMp;Z#ndr%gYk{(!|~!uFg0i~)zR+-2qcAS`Nqw;WdHz??M*6~fVJs?FeLiT#z(vY zBT5N&R=~tbPwX@Y0`f4mk@y>VAQDrEAMA(r;nofeJ7(X+7)#sPbcgx&TMzMf^$}p= z2u{M2Wnd|`i}!dqy-6wzyIFG}XT;!Z^H@%s+UCf&oe}(mNql`6cKt8H=Vv(Ra_;5B zg}S!gLX&}fbHkxuYDgG1$)Mq~*LWts2{+cB?yFq}m!0aQo%&KW1#ZX{JrSq+oI|e6 z#-Z9G46*|?VGBMqEtvzq-WpRMXw02oIfUS~LO$jm_MT0Sszb=Qjv#UaY+nalP z!Z>syUUsT7d?M_6BGh{9ymZW2cEF=Cx5RPwG<3&N0y=uO2WdUClsvbVJkzD4=At{1 zFgv%=SX;6=)t20FlDx1AJ-2f`TQ5KN(70eTg9f24;GsJ{C@OodlK{HQ`<#~{$(K)C zFA^k|B8I6VUtgG*U7bs8B)z_L34ywY9(*{2+Zle#DnnPB*lOLEZ-T0qwPnJDfUiuYyd9*qF-fa0c_~z>D;5*uu6bb;6 zxxp~;n+`+~E$a|AFqPM#6b{;!)R9CkrYJ?gJK-h+5j{PWfy$7%%aW-4-Ihddg> z)8WU5qIVyEeEd}N<9FEU-C47LN-b;XG$S^{{-Fc(Y|ERzz59+w#q}dO|;L!Z?h>gK(fTxiiNruWEs2mm{ zy7U?sMBN7s25R0%4)Ag}t8t5aUEZXPv`)81$TeK;ThY3a>Ct+UE;e z?#7ehHHTq(g84-RVI>aA?Qd{#0IwzT0$;d->!mvkzqCoyuAc&n$`Kv zKmx{@1pt0_9pEQ6p&h-^mvaDEkoC|u2LPmC8s%Zbu_VeljAA-R3M4*^pbSSor$UVg zG`|z>BzL6m763_sc?!!TSO$_8npO!%_?_b@Iw`(4jq=nxug44Sn1N;F<%Vn?#{-X| z$Uhi#C+%^>-F(f1Z3ul492#ZtVv3>p0RD1wS|06PxK0Pe?L`nA)87-bru&akROP3~ zso4Yns{)>L+<;PJ&EDHE$jR)#Tfk$>I~$e^ezf|(Tfl?*XB5>zgS~B??qPcVza2#l zPba|s6)kD|?-%f>?7f2>w*N~3k6Q85X|(0P;jF|hvDo;#@^k}wZtXW%Y;GW($6%UM<0cZ7CwGz z?Cr)EF3*ehuwJKHzQh)iw#7H{i|`Aqlj1y9fjmP>Q|vvFpYy1Ay61k>hN3VXKQ67xsVD4aj0?302hv0~s)MRbW!IxQ^sQLs~Tc1jXRVT&Nv zXmXJ4_s1Aw@6Ws$)TdAoX>rFAZ@PaMw$&{X(!_W;gfPj>AtAx@ih_rMFB8VbA&*XR$;yy+*j<|sQ}49~LEv7H8p_?fGD|Ciz2fq#dNKE73r59yDrhz>;(bS# zCbpV7t2^Ao)TfjtKUeig*<2;JJF{jzj8vwOt?+%oC(GI|g)Okd4s6lMnbK|wm@Ihi zUH%11;;F90_yuVVQlD9^M>bKs8IAGYL=ku$^-%h;Q;Y>N&mdEkDU<_og^n`Gw}eB8 zN!jD|WjcUr>Pkz&?f0F&wGqUNoPy0i;Xu?oh?@BbxL)0HFSnl?$w?zd10Rk?s?sn~ ztzq_ZFM_?*&E?*DZN7eeY3Z{$hVOwgKCFGv3fGOjxb(t^{5>=<5lDQ5Pd;ct=(tC? zlSbZvB=8!&|EY*H0r;r7>hU7-QMnz-H{al$Vp0UivFRNooBr`lEZpK$3TYpAOw*pX z4m++Vm*TP7cJsrQdbGXg(k&rwRYcL|c8?>~njGxN*hCKEwORy)s7BsZ@pD%!xofrV zc2sY3rAg*}u{+Nu4OdU70-^JXiuqkWE9~G<^^&_XQsRv1BwmY->gjhs8IsR}!ds~$ z77=$B-NbA*-Ox}g@5oSA9FK=h{@m$dxAtAgeZvh2t-J9e?HR1K(VK3gJl?)j_8>6u z6;5R8lzkMT$DO7K?jx&3wMadzaPEZQ+>)q#2Ub*iboo?t?`Bg*-q8o^w|EU*ukmT;iK45hPXFUaTCUIlYVhi>2Whn zadT5~3rBH_4De+s_^L5{-4DK*4*xI}hpQ)Ra|GXGi2p1V|HU}|&@cWdJ^rLA{%k7# z;wT=#h`^Rc;F=)t{Sky22;yc0=`@1;7y-;cpxr30;p5WzCop6rFf}KzOee4%Cx93e zIiwT0OcHtg6ZtX{1)38DrxS&b6Ga%4M5U9&O_C)2lcXjxk|0kLwcT)y_Q+}cape4Q zl}(aW{gc%*k~N!?uTLjyA1CWFrszwj+%!os^iMI)NHJ|rF`rJkbDUzqn2M3$w=qez z^G|h%N-;uGphqdrkrao(G!N-CFOxJM|1`ghw1DQchtp|~kJEw}(_zx-p(g3!{^^k! z>Cw&UvD4}B<8%aLMxt~^vPnkxQ;KsDpi6T`_H;(>aR!nx6C=%EXp&jvpIMTTS=yXg zKAl;4oLR+~RU@5MXOh+6pVgF+)zX~RHl6iCI_sNALiZF?2jfp}Y2Sf9&MyM}<%?mfBd8bW(*X5rsJ{}=4M{)Z>xzeG^^Pl5jaYk%jEtB7VNQWW!Lf3Z<37Telt0s}Q#3rQ zt%33^f}+RN%r=L{(ZKWgH#cSKa7PuT$UNTy!i7frCDDSTBWCFb@gNfK8weqLioym= zNe)(d@g;*yg1i$x8QE`fX}aT$dU=y(K$M(J*R`u9F$(Xudg%ctO1%u zze?I4#mgnel3FOFg2MhYr_H~(IU`0>HdiDpeZM&aq9fE_abWDce(mPW zLFEa?=i1?V0n@wQZpoJYxL>_F``}of5W9~r&7(nH{&6({je(hg}9C@~Hj z6G(|m>I4tg`e4q2<;7FcBi`j9EU2aqaL0Ze?6Fer)(*bhyWnB(9eAdjKdyZLKHQ^R ziOQHSggP4oC^0k|ZS^jxNvtSla}?=TT^P-#TZnG2C+NP&Q?|nPSdZk6jBNl8+oT>m z5rXyP!(0eeB?8}Wb(vUg(LwqRFxulG4uz5sj#yh19z-3($pQ2TvCq)CCbN^pkA|?h z?UQTt0;pE_x@euGhn@46@tcoSnKm)DTSr0U(`P-h(+QExi!esVJuI;cc0BdHI?{mA z9A=#$Fk)+IDn!6N(0j|oD@O#Zn;;5*qyjMtipL7yi!+x(6}>-FllF+3^6f34hgzsYh+EBH zeFz~p!kp$~G%{SLunE1+Rl(9rq0WzSnm(zAunA)o7}}JFi)71ZL?`FYe*ERl4z}(-4f8QrDO zkX$gD=1|v7`U^V|Z=p4dI9rduZLl@#!Ia>&(a!tar|{__B{r-fA+B8rxvjb-Y51O- zv>~EH&u`mQ~K4m1Z$J{hAv#SN~WH=O8WN z6~k}}zx~C_x>7wd7h;AkZlS8;oj|WDS7ELMOEcux4g|6*Fat$UBH1~ z%`p@D2%oFHh2*Bg!KJsuKF*%iW+vHYP0!VPI6d4g%z|d-0>YIgU$eTp9hpp7OLt0r zz6xKsFv=l#War_3PirEz;N`NATGyr0s}rIH1dG=;yEY!UI3$0)r(1DPq8^{UM6^&27(z$0fpO9udI0yV zm~7iryRtEIpZrUD+8Q(YyXw4pZJCN~sqH)O-NC7s-OB@6ckJIoFuH7}&q!=`u24L9 zXX)~_?CA78dB>?@_v3*I&qtuZK$&?Gs-u?ss-MDj+Lq}@j=E(o_7U-ItGqhL{bm=R z(?{CY!6V1R4=xUn6wfzv>d6e@7hfPV`2DfNCzGugU#sGu@7U{{&dgpMHjO;r^&L4~ zIKBARPVr(dLg#Fm8GY2N^Wt;b$k{r@-Q+#KUdke!^ABd|lc|vxhm9lWyAOU4w3Ysv zpuK^Pp#GwO;6z@CXFTg-f|aUvV*8fsf<+uh!hqmUc?L zyKB0hS=;TuQTm8Hz|lE=_-LvqPuTZEY;A7VY~=vmPrZ>mCa?DY<-F1e`zPdm>R-t# zO(E2Oc7q_tQeB(f1Ovbi|=`riODO$f9KWzwtUwAv^V+4 zzn|nhC%FTKHB32Ur08V)1}hro+nv$Yz&Zco+u--D2`g|gUNw%+q8IfrvvfX!opc#z zDIszWCQ~^pxm%6};N3YA0^;#lEHte*xx*s>`JvZz38m6sfIl?4SE>Q>5w%0R=yu`A zg^=-U%o{GaP-59l#E<4*!)zV9vS{o2*maWwmf3s}O%|`wq)91O;lljxK^}9@;gAzNg3GqXV-;U(zYy|4RAIXRP z*r>kyW7%5kpdqR8Ph|SQ?6AOSI>@G0QaPvvJBM9(2i~cWR-tNTs^570~&%q~^ex&bZD1 zwM5k>(m-Ndo4%rc;`wfM&pV@t^WLeB@Wr9od0mVarj_e?8KGGFJekZ6;vxhNR(f~C z=NV7i`5_y4&>~D|*T%W=83)ZSnY1kc+ChrN0*W2FFTZ=4bOG6=0nkU{#PeYpCqyo` zlLK00Y;ZwQBGrV}iueyoMvoU;RoPl-GcJM70F;X;nOR@@T{tZ^7f1g=Az>}ScAM!( z8OQ?wi16B8yI3FB_@_IMMi$OEo5dJ$q2VAy021=G6{lNlZf-|owax?ThahL zycf!5qsA8mB&O!M0mD63g1a^&KIl5Dk@xyr zm(_O1C&XXgzjYpdNGwhb&4gxjPh``68~<#@YElMOgt2FcqOIT;7-WZ*Q& zK6R=v>_Rj}XuR>P92&u`h7mF-`VJjCY_tk;s z!n`ZGtVH$XEUyE-`qw`1e;kWcTWha4P1l|-@Z#a@J6%uU81mQ##YKw4i*C3<-!Dy1 z*Yb3FsJD90cDv}1=Dz2Vy!lN@RfS#?%-9#)z)k1H8E3`?{alC3?=Ypif&W1DVH0xRs+H2{!_fTx zFuH}vW;bq&ZI$?`>j4v3P;u?;v^z64$AFi|6Lk9%E&F7Sx0~-DK9-$%eBru?{+Sl^ z->YxmUl|18{Ex6yuL9L#URgl-$Qg5KjArstO4nMHio_oezWmO4R$d%esrSSeZv|?% z*6wYl0Y_5PChVx3*!QkVxd|9)(=bG7$EdFeXPy-&6buI+vYnrSE9XYzAV$Pz4ca4b z+{v!0#t8sfWMnym9(O(UE}ICPo%$HXPM59WYUk^s)M!|4kTg3;oNY$GeLGfv%6VQ3 z^U6syZpZ_C@DNxK9evHsX*gttq1gt4Eo;1M?PJ*i{EfLx#GvAUPO~FmEwlOgc z=eJ9-p``h#{rT_x*#E!(FDRf200FrC7Cq9ZYE?)DSUc`zeXOeVtU|X-kXI%#m z=bF6kwWfSyrpRT@rA%lu(3!1xB<;3181J{rYJPP1aV0O1%QgGa;gNSO8RYcqw@ap2 z#OjL=@fRZA2jQI*(@D?DHh}`Ew436<#HxOaCNxuu+7E!D>FlOhgu&!d^pfdxY8zzay5I9de#&a%!Ck+ZO*c-TFw+XJzKF!t|v# z*-DNXL)Bb{*Ecr1awB((-nipys#_Ih;Jz3=o|B%a`G%3yHBBlhi^YcE28_+_bwsMF zM-H_Nj{|)6rSmfav}dF9p-u2Yn?!bKWs}PcQwmxucee8FDA^N(Z~bs;pve@!5))pU z?zSQ9HXfBk;A7CuVEtM8hv?I<+kyY{`J@}>k(qybZ=BNVO zIVG6DX0o-Tx;d;u-3tB<7`mDDuH;G`@1dgvbQ}jvhu+CO2O8WSxvshp5-P;alQN|GKHF^= zVr9c>NM@btgeEs)-RIhW-PXn|ZHA3|qYMHPgfZ)5sdLX%1}qx8GBz1s#nK^yOA<5@ zS?M$#rxH?@2gid*LxYrLA8dB#JP1Q^MgedTFt`XQAc)`^QPE^Ncv#s21_lwk2MO)j zl(2*t`74o0fC6gc#1cPt+_hWq8ehhwA!k9j_=}AjR-0UnNT}};-m-jv&?H+z(7ETT z2?{{j9~Eelf`Gm+^qf#3YW7}}*AQ}imxkl+|0Kolb~Bk3KHg6Xv-+4$lN|F8Kq!sT z{5Ug0b%qjQlaR9*11&p=N!TwQQ~^=1!Wi&Wdw)}y6;phL^VzC5iAKBh=9Y+!7M$T{ zG~j`$>uf*Qb!0~{^v9Q(y~CTFllXelI;#Nhdk}H5@0ung3Oh$uAZcWg^Smzze3m%E?&+efoz-%IF}T4|T!SGkLcJm=p&-fiI29gZ8ItImGwV%(Z? z+W7I36iCdkm{}s zXf`H^1TLuph?}#gQXD$XECGh2&D1m&amj2A%shaC?CSut%{Dz|i#30*aBQ9oBu~HM#12S0tHSZVBFc@#r!p@!Nt=*tmzlE-Xzo z7-sitl3kbgFYe*NIC;VPx2qmdU%oR~JNwcF!Jd}Y9EYiS;U9->z7HD^37jP5ZM@-2 zuDVGP}Xy8I}>b9=VQ^<}thK*QFv22YYpFZ>!;re55qzB|nFsbO@s;dSZ_ zX1{~&DSsIICVbAbcc-NQZQWCOGPAQe-pKWOS6W8T*p82K)g_*I?Zd$~uj2r7t1=c> zkP<|-r%&JW-CSi%*7V~iCqf)li)6ULz& zWSyvGm%%2D);)-)-SndF#Bw1NRRVxH7_(EIsC?U@6D*^6q4_oV83W7X}nQWZJHLFs2EVGI-LxT@}^gGkW(GPZ_-{Q3_{y&8RTl zZKp9qnaL-UcFcwrGd|n@k|N1p5I%k$YQ`eSaGvf_$a6G4)#+cZ?5l~8<117Yrc5{! zjkq?KeQywFe-SYPQHoI>?WA?rhyK|rBLFIZa_s3p0=nwAruQ!cS`^ik$Y=1!sX~9Y z#^0SP{9$tO52p&X_vTAeaDSKB`R67VN?Jhk;J-Jy_*WK`<+q9qs%FDGqU<=dtRY1n(HLDC1(p! zA$8#0P#{bOT|O@ho-bItA45f|3kqd6@@kV@ENBkKxM8rg2NBtrk9EC|4M|mav6*gx z4OuvP?zf&tdqHiO36ytz4C$7I10pE@TIllY8>v#^nphZr0Xr9MOY_U75W+ zy&#CR6fOXtSDPhX)?T0Q6Pa{&Bru3dQ(gYK-OS(%GK`2Q=itZp_ftQem?e$3eA~La zJ~Vm2J1?`oZ@}X-mlmJ<^1|wrfUX-<`~I9FNS&Hc>P2v^2aB$l1~Xv?%TfCvmm6pe zX@Pe&sojOph8Z-AbP(oK9H=V~O(OcW;TQsN1fT}+{P8i+wP3M=Wsklk{id8++Hj(}-Cd~fRwOm(_hDXA6g=gA>n3U!@tXzs#pr!y%b z>wAhPX5lm)?$_4Lnba<99(MmrrR8GdWd#qlumlggx0pM#h0&`o%0{!+LNMZZZ4Z;n zti3j40}Pu~XiUT7m^F#!oA)K{@$rZYe5U!=V5vH@ld?d^@j%x!)5DX*>iCykQTJaZ zViZUU7GG#w`KDXY?s19Tu&$)$M0Ixt%tPqQmt)}*{B!C5i`E>00ro6E4Z;xL-wn@Y zHnj?cfd@pU9oWN51u&o(G3L3t6WrGrAS~&W%iZux9k;F%Cff8xvbaJS(_Gei1Fu;6 zWSblmwP*9%zNwR@+I*QV8wHs=6m`(glh=Z<5`WDTb7hL(EhN6h&>TX7zB)Hk%Ps>m zqNq_)8)lWM(PSn^hNqZwaR0z!t4~L8X4?`xsX?KMxHyWp_a7J}gUe|YksK zYKdJRK6V4Z4TBxC>j`BeA*;$^gc!3$w&bE1MX$^E;s^l_$u`3Ml<|CiM{TU>?rYZ5 zmi|tsV6PoXgzNk9e_)M&ZPUnXUw|RwN_c_1H z*xz4I|HK*)BXFy1w1z<1hDHtgrTtb=O-3P*&U*Bw_N_1cd_bx9EJi7zI8SNlJ%~~c zK%v6iG^slA!Gz1;_fXnjo2wu(KLh2D{%Yd8Cg0IBv&P`r21;||QDkGFVO0=L&nQyG zb=U>KsOFARiC_d5ZAtW_QlgJj}1dc<#<{@r&@@NS9?EF88l3 z9LSTQcahN)=aVji(kEJh#RU|wvU`Qzns^~WUSOD z)kUT%w+dpeZxr3*Z5l+_gdxM0Gl4vW_YUl&c9nwH7d-Ha5sMAANsOW{<&DK!_G%At zyN@YZQpGQa1V|oH%r&}}#<4lcC7|?Xn@q$=e=UVkf8WU8{XG;#*gHcANHC#T$*vFy zBna^I!Rc-Cpu@8oJaS!$<6LRKRVO8Cr2*#~uW*{8S{g`Nu?a8dALUBU7GrJL>Dw>X zPE~ScA~Qu4>my~7PC}!Y1gz7Mv0}8&d>^Tz07Vt5saSdtL;+vI!Wg z-~!yAbLIpq1)104b+LV+>kI{ZG~gR2yulGs=$IWvQ36TW*s!vb#ji0j|z8q8nMa zng|sdC_a!9+Pj@THA?U`vAi0twT8QPLlfgZPJU1Q>K6*B_piPtgrbwJm%^y@zv}!1 zP`|Ht;Q;a<>)p#Y(?yoz&fKI%6(QA=E|QRcuYpe1?^R|57kU|;;^*Yu;-GC{_HV#7c3ts;iF-RbCtCAiN$qDv6me2D7bIF_^-Icc%)7<9iRt z;L0z0SJ(uUMTfy_q|=27x|$kKQk1P2LsDFH=66Y2hVSf9@@D3s4s@ays+5Lf7MfNza`ODgA=n<1H5LQtpt#X%>coI6!)$ndF(SB9Yw&|7+eTDNRI-tpbJ!dmhQiTAY7l zN}DE-2ICCBcEo?KD~%C?9F^qF+e~;v{PS?>73M2~DRaMLFPbT5WV`b$>BV+;m1W2G z;w83Og#z1ygJLhIxmmt;p?76rByh>^iDpN|YT1A0RF4%fp*lGaUNFAFj&`! zka*NWI^$uc6gnS}$<+f#fqhozoLec!xn4Q1I$18&*@ed^Tni&bhHL8s@QJPiut?xD zFS%(NP%Gk~P!U%SZ(K0dZ#-FceiINI2-Y|hWN*FpgCAusCTxF&c@y4 zg2kgKs9OxbC=20k)~zl4f!jUTfT+$-i)4@SPU-Gg{PYEQiFp~OAK%xN-=uaIU%JfW zyUf~Ep%g;r+sOabQ261M?^1n6=E)a%H9NbzFXur$0KV}2vMVZ zPRCUC+L@A%`f+E(o@nNS#{(_(RWoh zu0|i(e-$u&!tuN>E?q*XyVhm9)4PORa1zPi`ifZ{g@ia8ZDISQ2P2VPQSY_oPK`a+PSIk=waYrSTR zoxeI{9>V~2`k#P0rqZ}BkP4Vby8Q;!y)Z?n(|MNfqRe;)DB9=7m!htk8YZ#74XweOrl;_1kL+yrUhynrCn)L4GP?~0pBS3JBsL7z5EIc1bKowLMj!Ldn@wO&?s z>3ScH)HMDV5);wI?g)_8$nhxOuPAwb3w z-Jc?C67!r)KR@U92*W(56p;OQ>#%*4QrIFNN86Drv$Qr?v0|Xttw1Aqc65mmWj_y& z;ki;YN4=ug?7F7eeSJ!AEmG}scf!r4B>VmpXxN9?s5ka+Q1~&DDoix@6rRdG6}TPw zb#tUtOULfKC1tzYc@`4zs0GL>owPI8!r4PkWm1Og`TS$Ic48u<7FY9lbQ)5`b!QJ? z5fp+Qq>;EQJa^zq0dciR)(zQ+gCUd?i3FG#f?`{fcT1+Dxv|5MnHG$RjvF^7VsV;q z7N-he6;6H=d?M{)QOShVFvZI>(4vJ>fZtiixKDe%N0`kb1|(RHuu%dWcHEk;jb^n= z?6z^|i(n`lEK-T5bW)ih3D<=|$69XE`SbmQ3|^q(k^lIRDx#S6yKhFnnz%~uehqoY-$Rp`|5$n5+MVty_KpzzY6~oa-Mp@FAl?t;D&;mSY!!+zK5g<^ zR2CiFBx6Bv@*0%dY*DSNrBF~bJe1yDzn&=Zpr1w~saPi;8b|)zc*0I4f}K|2jQ8u^ z(fA#vL~BgTB<+QM5$#B0qJ_* zE|I(LP6Dc6yXn)jrAV)*9l2S&NjBN4{YE}&TCS`yp*O|^@fEfbDR&VBU zo_R_{8Tqcths^A7^G$J>C~&_#XLs_M^$7m1IP`;0c0wykRtD;fLSJ9K7Q`|VY+Q-Q zs<%jUwjzITq0QMK_0IF!!`Tw|ww(&I(=SZ>5!~A}_6q0B{A-sqhFO+Eba51M$>|&| zz@zGnCSQj_*BEduLuqc&EQMp=O4rk7=6QvB%#8oj4&yJnSR|kwU<)w+?FTqF;Bm9~ zdA5CtO0Y@jPXj%*Fj59>s?GI+z9e!LBEAx36gXMLTAyaK+)^V$N{*^fwE`KL4RwcX zA74=%E#Tqh84RoASM${V?sxmvWC&JQaxcu!`@7n<1jK;ppN|6nR<-1>3A-c66`$<} zVD`#b0e|9(7v}_IJW_9fu89mY{|=aZq$S8K;q3Lm3bRBZYgk%%ihldzTzx@mB`hJy z(B*;h!A5lg+HRKxV!1V2;fo$wkU8`I6w+a^0Y9%gSzDfMReN4xb^c+4qV<3$i&e%0 zUEReN5S+44Aze^W^omr|ZFx8-%DmP(gz(X!~6 zU*`y-J!&_g;16A_Uh$L5R~n`Q2(tGwVV6&FSmmd$kVzPmf_TX%2y2Qghn>3ez&m`@ zF(M1?T^xyF{?>f=Fj=bYjF@B0@tKg&-1J3sg884+^7m+CHpkdJ{(ZDx$fVvsG#Ive z!Hqjy8h4|3^l3#4ezfi*^v~Xke-Sl*@uzQ?jAr~JqgkY?m^(22IA-PkEUPO5pTMdE zo1CQ!24wv{7QsxB=&nc(mLwGPxNj+na+y$JGMev}u#MwrmR34Q<;|J>!CF%`z3<^G z{sT~7D6#8>peArx(sg$Z#aTSaJDNh?SY2ChN5W=_E*Ev4i8B`_#v;c>0{e;d5_TF= za@e6X%^En0O}7O;Uut%lEo@R~9GZ{AwL0bDxW0P@qK9L*^SnhwDb!^FkVGvW1S{#Y z#&f~sc($Q7_Vu6}5BJ-HG&W0`Yq8r?Wo*F&&xlsEbW4|i;H5?v2b{Jq* zTOs8XqO|HMS|+}M?@2sVDBGN&s=Asyg3#sCQ-7aPQuNFh+zIOapR}T z^%t-IfB#S7fII*e&g0)cI73Ps6~C~1_|37?N*Vkq4>K>k`478?%iUg4!giw=gY7?d z5BWRcyLlZh#+cnh|9@Nv{H4Km1-K0Fl=u&W?ca9~@36xe|HfeZ;r?yBKMl46syEv! zezUjwINNmaqA0ty@zW23?OZ?b?*`kljr;2ugYEA!Nedg3wcq9aWjuEle)EKrwclI+ zVX!S{{w1ri`MbgPqk+ATojzJ{RwED74%OixfL;CjM?Va8@wE1z<4gbix`9L^xP19O+SQ5;L;NOA(n?XQg7aAWta&~(?lkdc%~Ewmbt*e zhYUA)g=iHb4$|1JWNxk|>1swSNAnLeOx|@7kD?9gbTR-%_VX1KrmaE=kum|Rk#FI0 z7foak3#TRRgd``CA;b1(N5N8 zVPX?bZA^aVlILk{`OLj|*$6#mmz=3n92BCEUslw^;O(l2-9hB+vek$oXnnSfi7C_~APjnX^@89=g7mWjG%U_#uwV!Z5OFci?qwQSd31WZM-aX6Ubxvnj`v7Ml;|S+Y;*WY)(LUz#5&wHDjIZkY#fWs z1L~FDrS(1$*r$w_1bcXyhmeLAE1v!(dBCW~Cc9ddhaS)xtI~nnE9&)&nrb?x*J01@ zSz4I#T{82wwU&58TyCMPH1i1K*qJ?Yh7RrE@1GVWu?6AlMlq>SJx|$NK~XrY^Poq$EPEr6;Kr~C2`#; zp=Vk^m`lZ`Q1qy)D?$iKl~2_O1?jOZ_}_TMawySS*Q>mc>e5Nu2^SNPPIGn>O?o74 z(dN@D-1d?0jv2~=-KekMMesUMD=3(VK|(5xOOGh3iQ&@yRdR6=fV;210rm4SN%1cq z@LSCXvLaQYr_Ys$Szc)=BsPYAQK4qd2BsIZrx1#kBd$MyhKM9i_j?2YRdA;ZA!`N0 zH+(oV!Cnz{-9$VfD*&&Z5d!(u(OmzC0HD}Vu!IM_u^!70)LU(hAWclO z(WRHI>eaGtUXu8z$bHMO2mTf-D#naGI&R&Bma;wk-khebo?1pqt6qgQ71`T@P(Xa* z^oY)UbgALoQUx*oS3gjx-Ys)|gjZO*R=+w2xs@t{^Mccc1_CHhwEHW+kVQICTbP6-t?ViQq9)!Q7QWcuG zN!D%=^DvQ<(yOae-8uPIG+KZ?>!Cn;uEi+@xB9ZyFcfFCguQcEnyx1E3=wB8D(&p_ z@lrojEncd+L=F+_*Py<@HOQ{xeLMPDAlau=b3A(HHF1juw;smZ9K;-=_sBM-mIZIa zXnl;a05G75GG6L%pcBeJXgVq3f->l=f5XG`Esr!TJOp>=o_GB`J{Kc*n0qy+hZ;73 z?F>i1bEuu!=XOY<8wl{0-B5i&q$eN1Nov;4d3zGCT4WJbrZI7gaSteb7sa>0#u?k@ zTAkpA?zo*|7t7k+sWjQ9M@x+iVdjXv-6#1iJ`uG9kbG>gaW4$6amM*F&|N@sn(1zu zRKacZVz+4u@+!Ym2by`$A&XX>&N6Lkisf0Bd6lXm7k5k7fR({BDkFPrUYtl4!>Re5 zDR)(dvo74u#c%~anp*k}T-Fw=tqiw1Ymbwo@k#6&v%xZDm~PigTn+ks8Y2W>tqJqG z!z|T{ubd&k8`is^5I{HMdPkz7&0ag@v-JDq&XEWjr~pU;-n$AJf!gf0D*>u_#+2(q zn07}~K#SGO8oC9nyQ8WK#Lx3Dy?({W?;VA8QGMI2nloz;<`5^5>cna=GL9T>L@LkI z=^va-nDb?nZha*4pnpY&15%O^rIotv;;6reH*~Pph!FCHih+15QM~UE9;dwVb)vM_C>pL~GwT_r%{`gttD{fBY zMhlTL0a^_o_iEIwCgmaP_odzXFUl-*yln2kugaDr5DIK4YDm5FzYe|k!G{YQFgZWf zDrfIl3d&hfw1dRw4IlHW+|FoDWqC^gbkjtOi_~&4Mt(hU_T!2t#qBFMt*&}{m3gX` zYs0|k-G}8~Z)avrKa$lEZ2dJ;bp6qO(k zh%n)ReX~mX_$?Va2xkRG?f?S3*n=)0l<~&wZQMjX4X?C@aH=hHbZ`*bi+hYo>divM z`M^GbSb)2#4-Cg_<9P_9);v~4i~zYJ=TU6K?nSosH)L02G z;YT#wv9Z8b4DvmDOU5p8|9a8oaf2&f1;+IDPLL)S$y6%?bs)d3r)A`wZw;LJB7j?V zUg>B_fJv%Tn$^{;h|pByOPYRx{Xw*NDAIa-;Rz7wauM%WMe`0S3fBBEBX<)b_+>J{9P}O<^&81Q9N?p}tbmL?SPL0+2CjNdf+#`Vwg$tpwUO zUcf}f5If0FIq{BrlFd?*s7Z35Vp2eL63tT%f8|&*N4O|9{8BS*E}gNJLb7T`3J)yV z6eE+g#b|z6aI?p{<)hOxgVNyccyc9k8}4J^FB-GeKE;9^v^rW$UAAyJDbkC zIL-qwA+aGyTvOx*qd0yhlDGv)I)fxXK?0djR1g%6DT*!t#gK_&YC*Bgpx91OAf|i{ zNIsWoK2JbCUuM2QOTOStzVJ!D2vdP5q(I!XKr)~}DzgC6QXo51aP6c3%2cQbDO5Hs zR1GLp&n(nzDZD;YsC`nX%k)Ga^5mxJ6T^Tf#+gq{Tb`KDJh^l7#Db~F3Q}ZaT4Wbc z^0?3Y;_&{F(xrugwmaS&4p3{nzmS`r>m5}8>N z-BJ>Zo+*K!lpvU%CPJPjn?6kqc$%L1G_&Pt_RQ1Vlcz|g(tJp1p=oJRKxs*4X=zJo z`Ali$Nof^RSq-GD&a|u{psXpgtfi%_ZKmwSNm)Bnc_*a&m1%i*KzVOwd4Eg!;7s}O zN%<&K#hX$ji$uj_Kn0+bcB-XfPBYJZtYVR=a(kL~*|c)KpKmpw@&j?@R!ik>K*h(C z%Fo0V`;cdc{pIfio}HAIt39bWJ$cqctOJm#dU4N))FRSEp^7-GtcXfPx`&yBxw@4Y zbL!>tR-l^Uewoy$7jb$un@ri~ch#3(-?~lz z?Zaok+-yaHDpDt6=&yyVO{56`G; z-?=co!Jg?Y8a$j8VaEaI9PW68-@7()dqWOw&TIheMe14huzPGf^S;NMwib7YVSZ6O z3+MfL9xnlS3qWJL|Hzm5b7stS-_PSOaT9J@J2tShMt%V5rW2EZ+afmw#eD zZjgtbT+Gn?aaa8h`SOqEi}$`E=LY_c8S2{BgV_S~{Jn_vKfS9~y8PXI95bEq++O(i zUG=ZuRc8qNk9AQhEdr?F%8IZTSZtbHId1u(clQr&*$Ail5ha6@Y(?RteLrLNPG0F8mqV-o)d=7rH3RR|e#(dp*ZVRTKI;ar1Yp(65u`z8}B7 z``s$k`Ec5yxZ-!KP`4gANk+|g&fn(!zRwaLeyjcX?R^k|@X>Epp^?0mKdeHJRuF2! z$E(RkF2`%>_Lax$*}l8S8^{P@j8$lw%gI(raplQ}^2XhhZH!gu=}z4{m(!0;8Hmi3RTsM8;04+QAN z_iN`LbgvL{PDoNHzSehNWzjl5jSN*-Vfoxqaehxv5+Cc#k$ug(=ka1Th1nXOmXyK9 zvZ~2nM%0G#pwx;j-#nIb85NQQ>h~j2pkTFR&GWwRd5mquLj$Y7-(P~a@5lP!4RE>^ zN{zamMVUvN=d{8j%h_0|WTyV{!+FH@OS~Bma7ZQxT=y1UhOc}$u_tk{s%yRQe_;NO z!?wnL=k~&&Nonq|5%d1|8O5?Z7cxm-V`{+WL*)^h9GY|;0#m4ejC8XgK3nBJCs-K) z4OGd;EMMvobJlc16VIFvK0Mc2yUP_0 zeyBLFW&FvgjTn#qPPUPdBKUe9euZLlv{Jv~)s2r@1neWIR6LHc>rKRo|;AHu1Pro&v}5IE5ZweMBZr6hi= zhS58Ayh^T+GY!(^iX5U`mVXXd3WYkVOX>>AuUmz%Tf7zl>MRV=WL~>$5{XN*tmmId z13lwgfC>P`3!&F9sFecIP>uJtczxiTcOg_?R^+b@O7li0#D0yJTMLpJz2OCmD#~QR zKcL{3tM)D+yDGbegeH=aa%Y6AIz}{?9x^yxddYXoi8U{@|9O5Yf$Ty=Dk5#iozL14}MbD5mAG1)A|vz8+BnLjs)!2F+ddm zj@p%+caHu1#RIzU1nng(Z!R8s>826e#Mj;32s_Mm!pk{)_cQe#g*j5n!Y2Epxc>qa z?W1JsOZsmgDZTCc!w3AIG81CB2y>hM9*SbF?tU4H{;??hHx&J_S@7#n^nb}u#XA7A z=fpZ2AC})$uOoXE#y z*dFjpan}|029_m{$)vHo5rQ*Kwn2U^?hl@WY_!HeGZNUKR$d$3;@7_x_rXX!$lEY= z8R zWkxY9%+t;Q`A`t2ROZAFK^n2)WFVApso+OqQ7l4hiU$=v28+SMR=OvpQ`}{M>*fe7 zc?nO?i4&q)Y~ zAVJ|pl+aWN|9*I-CR<5d`FGh$mb8{5yv%k8mf{MSWc(Mxe$us56wy=o(~t{ioX~T! zw^*>IhuB&05BExiV?q|yt7|?yxIKN>VW$demqti_u`GQHP-0LMhz}!N+y_`heaQ?{!}R#Ij|VpBUV(&%#o3`*&i7R-lp=+ns$Yj^8S z#N6gz<6|7S>prL~4bk+mg4uOk>m{Bqo$+QvIOzq0;=^vfL)4&Nj=ES$K{AKix3p#8 z$gR;UPfpaYY#fSA7@ab5dNHz`@GO0b_t27}OKb9_s||1Z9=OhvK!a0AQSs#QD>nyE zL0)W%m0vz@?Rjp3z5P%k=M}X#3vgqO6S@&+mX={9pRBK7T(JB2YF85v15aA!K)=Md zg`&EqW{$M9DJhZ-OoCz^p4K%d^~VxC14xm{$V5!dN(#3~Yn17fMDvUSku$w5h};79 z)NPf*m9U-FHra=M%E6y~4}5*dQ)0f3kkf8Gr*U(u%g^Rf-k|DSaIzdox_ne4yV_$w z&-XRn`&Z8^Ev8LI6v2t#=}k_FU+5ZW6@f9k_a6gbeNmT#<*zTWWB~#55C2Zre3n-_ zJPPvxKI3((Ao*E9S*Qa z)dRn^j_Cwgax!f2CR=}IjwOc%HiPM}I0Gzd24QY2ZySCnO&8t|0I>0fSZ_%?JS*M{ z>QoIRG|N2)rpCT-{M^`|^F`xNx+Zx22Fa&7Yq@jhz@WX4zjVP_ZhS{ir&pImxuphL z9DS&>DmIBOZTqTWYjuIXcs94k?_G?0Cf)Og?JM|VxssKW!SnJHKjkX5B!0sTE%)8L7rrieCqamd6=qrmXiGY)30*CEnge ziXZ)yyUy#!%Oz;32k}^#Cc@cOyaLB0biq;@vvKCW=_iBOB+c8%;L~bbOwK%P%|}Qf z0MX|*0YF)RMI`dZ*|}W5+Z_Uu0sMdtT-t8mG@@wM_k-OxPWiX6ueAh<4#ebp_KoYp zD1jV*#XAcn$=YJ?^t+w_<{ep7P!zW(qK^CG=`-m)`QtM>XC1!;C>}q1lM^tHn@Ik= z@LT5Cx!6x#7Nqj1UEJOCvkOk7^I^i3;G<9X_Rqs!Z0Ub_*rokt=|YED*^t_CWkazhdZSL;!uXN460EU?6Ctag+AIsVbHdhS_th4!xuK5Xo zgS&Wj=s+sSP|2a}C|UK{J@p{5`%B-uw0r}S^bRY9Fr6G-QzczK!l4%O@|$30a0NXu zSj*H%XY}Z6q`BJV7aD14lv|u?MvNNyjQhyL!PHOdLtgb?R?)uM3;>*bTQdDqK>N=c z`3nH1{{+BRGi(3z0Qg+wefx>B{~iGT0ow%6w5|0!U))_E@~z(87`ggejXbnRo5{ZT z&j5I#Fzb$>^TaZp&csv1#DQ*N5u-)}t&3Rpk>RT+5z?DY9ERJ)#Q%O2fed%C4rzhy zTO3@K2%^&&)+vlAf+iJN{%mraqW($(h9Rb@K)>Gax{~P9RU-V1{Q6+{N|HN6Ow*2j zqmj4rYZL)XYmCG^x?TTcnv5mBRB8pqo(seuQhe941?E@476lOF?`6k{2q>J9|DLv# z*9LQ9AxHU5{w;H?OJ3f*<@ zwy@FkekA)rl|@acq~~IcX8x#Zh2dMg@Oc|N7huxcgr}I!eR{t`9&4C_P$iRP0#8r@ z@F>WzscGLeleXez$xCsqRPc_7Y5i~yRs@9sidL8&(!<0k z3UC1w{0>zYv4IpaO8XpD&{x9?($XEF1PsEYk^;oKtw|5k;>?}|5>76(fajrT$KJrV zT3(zLqlWu!pXp28(lE_^vA}^y4P;x0&w`!}RY=|r;=L_pDn(ytm+Kz}xpZNn1lqi! z_>{wg?aS&r#qqX~d4be4nyff9gRB+r{i84_g3#u`wG zM^W<5OJ4AM3XwdJkrA9bR1^J_kzkZXYV@Q ze>-*`!2S&Kt|eR`{bVe9vYsAlDxt++pnd%G#ug$sYY!_yicxQV+8ryC_VwrA}SQp!BdctUI@|8IXQpoJ{aCWTY z3DA#Wal0u4*Yy4t`O6Z4v+Zze&aW3v06yTEpFL|oS&U2n1pO7@YBJbsRz@b`@3RlT z7jF(t>q-oQL1P2|xp-fKw*BhDA(l@nE^f`x_%kq6;yeq(!?yBWDcYf+ZELx?k;mev z^!|xI`nJ>W>@|w()K6QPUjXijTqKa|GWF^2?DeSljf3Nj(X8AICG6pLMSdY($ADh| zu6F`>*KMpTY~e;v5fb!z|4);~N{2r@=%eJC{_M4!@S%bzrWG%6+|o7;YKv23I@i&j zh8jvR{-bz{_*703B(5w>#z6_nQw$|6dP-ICc4djT@`Ht` zq{DBNr<1h?{HD>oBbC#so*m7<74I36sqopEG_;~9)WA}Ek)eb&Q<=?lbUHhm<>FIS zZbb`N^d_C;$G2o(gsj?XbdaiUr~2`gHRT2V>`emla(y)EZpirpsR0j6o{5r7G3W*}t69$gsC0)wU?!UHYwfKcbi&`1-it>3?}( z=>J>x`u^H`#(d*{hrRx%;{AV;z5cy;pRPN)ZBx=3>DK}ljB*N(H?h%KK0UxIEHe^_ zIY2mcO^ zb!X;=$^-QTl}*m*vdYZRCSU_fV6i(CalxIq!wp!t1s*OkxLTnnxGR zu9pb~ULmz|g~Q`?T6Et;Q7OF6jx8{B&}&h&N2>qQ8?g(A%}%sofKD`N(ZIU4CS?R? zXke?D?qsSF#vSx2-V1!+oz0ho6Ho-d)-H2r+l(#Zq0^vyl-f9OV2iLNO-2tI&v%Ig zkxEC;?!Q05XNRAV{_zQCz^~4F0#9X+i8fcGW33e04g>5l1khJ!izf6WyHit)p!~ZI z@q4Y3`8in4krkpw4NlUl4k0VgOaR}z$ll^^4rZI8TWQtdBufrs<$`KB3_O^5sZ2P- z#k0Lp4e0{1UhjFD5UJuco1X1bcuG@!R9B^;?f;rs63pI7C7{?=_#`II zr0A_z?%vk7Pyfu8K_$uR`E|T^7<0cxhu#*Bok+c=ixZe3tAK+Sj8)*ZH&O*#VBc!3 zbVgWtbptsJ#}eW8a67ImN9VQWf!&dr%@5w^H*4psN(an+9XdIToSoXts4@u|W$`mT*PtI zZ>kPa9cu$B5a?bwSMG7h511g|(B6|8Jt=2sgpT1bNl5@k^QhaoGwr=gjcM|Pu9MK0 z^&awGH5KbinkxVvc!i=$^lgPH;}|H3@Y&^v<)wHWz{gaZ4YC2p?8~hx4HcoU$=k2e0eb2G%n!zYokJnl45@4lh+1gOvD=4r*7#4_Ih?tDcwt zd_vlSZ{Pj(TIaT}GIp9HCt@}p@4Nl&N^kwx=BsCFhqjiZK@AfewzL-UJ!<@aog-v4 zyO@~z{^s6VrR~el&b|M$IYP$Eqgdj>BO*K$;;Xfaf65V}|A#q3QL7ouQnVZnseuW? z-Veq-KqQ()HyYp)%^n&J%!%fzi{=@M=A%V}MX>z3SV0%8a41$J2P;;Gl^DWG(XddF z82&oO-$v0VK*~8W`|DyID5q#tl7e2viL}BrC2fYBsy+jLM3kA&v^42$TKd(!= zpqtpSAaFetv`YbP7lGc#Cx0Ld492Gn52XarQZyj(_t2@A{8A4E#fOqpRVP!!fW-YW z#AP&5;S6yzo+wvL{6Qu{Cy8_*NkTn^<#;S$3&L?cj&l*hZ4t*i4DqKWf-GVo&ml3o ziNeq0_EkZI^x`CQA=1xNWx^oRRcRMp(nZwMRjSetbfh;dq$_e}Tv5&tMr0hJpUn`$ zW<03O(CNtVS;&BKW_l`TYFcET@yj$%$UIq@d8i}v#6qSlXV!7$EGdgDC%-JVge<$t zEWVDcQ=0P3`t|%KX87(+W`E*`9b*pFeF7LN?80pK=+4vpTdetvL{%o>*l{i zU;Km(e+|(6)g1n}0lE~l|Nn*FC?;Mef5Ha8sRZRz5tBsKrwrKO`;~b*x%)hFI;CYe zemd1~<9E()hIBFv`4cu&&A@q7|AYa8tn#+|c5jK{f#o3&QgfLQ;*X^)u zy)$a%{_mAEep&hrHblwpXySn9pbF&25?r+eofgJ8U@n9)d?5?4d!+E3 zHTSI=UPH_)7M{O@i-VgeJ7DUbk5Fx&a1CpK^;w?3!_?9FDd zUetHkWFe`El3t|AiLj$P z5a3P$c8sAvFdjL)^bkq<#4>p_@GU)`{*?!D<;HcA-e;~?>(~|l?l~R;Nb8w3%k2BP zQRt_cI&l&mXZtiSBS4Rw(O?KRbgxuJKZ$*$LK=wU2C|BVa<|nD2$Gu1pRw0kQQoo# z@7WsAq`U%W+8V7B7p~=$Rx1bOL4jZH)3eE9{q$Dq4BLIXI`LZiqqr9@_Ib(e`*cbb zBkm3?;mx+K{?a{sfbRKL%*#4`rPbKHdV=C1)}fI7Ezta(Q+6#?A>gt%GEVPt?JFkX zZfg*}2tlcRb@=sB=)V5bNfDdcnxZ5b>3(R^&SoPx@o({?U(QPN_@Z0IK2rCe%lX7x ztMx>3n;O@k!VRXc;8N?nNirG@WC%U&{&{sTF4o0UHcjq&w*c<(q~jjQ2k~hL|B^x2W24uch?f>)We^OuUtFj?b;vn&hH^M=MSCby5iWYsKJ{N>e4vcZsfRB_iFGW;&hk zQp49up}bXSbIbSXd240LN2@HIx9zYL5bxoXDr;ZMzPyiXw{^R#Y_4|p6>P6nz*7cinmKy0lvkJj`3a z=Xdn3>tN@=(--Ua15@t0&07w>__+QcwEOO<&CbDB+eaR($X1fRA%$>khqRI%2eHJ!t;5~S8bdoskkFKLp&Aa#!ZqpvWbxH9zW(Xq7_P*oG1;{V$ncSi__`l_Xg@3m+P#|h zeN8H@^BID?8)qnM2asCO*@xd<#>IRhMviz&Y|dg2(uzU!{WQd8V2#lmFlXdYY0JI> z@R@al$8}xZHrbvUoLce}P@&G@@8yfe#Kda-SNWP~6jx-}uKX>V;2+JDOB|=(-L$X} zICH`!)v%nriC|^Z;(`PV1vB11wM~rjb*|?ETLqtLAITe8%kAsPGx_9C zMgG;hnb@lwojdWuR;}giRmZFypZ(FYvd+$6fNmL`0DrJKl8oZjKN!y^d*;m_$xUZ* zqxu)!O12AYqB4cXPSnke`(SUd4VZB`LF@aPeq+7ry2t1m=5S=7vZ<=qUez(+#iG?% zb9$P_g0uUVfj57!USY%&JTEL7orBlnLVJE^y>2yfenX|^Lutul-DpI-L9Pay?p%}>y@HA$6&qwDqnxFUR5*c*8h(6n!Vw6WpOs! zouD$8`E95^^NTr`P{iT*-pL-9s#Gqh2AEEAk&FGjyzTq*iWa@woUpV(5{pGf2 z5|8;Br4>e42>#ZvFebnS)&{mA#daxQE~YK|Oq)Ugt5h^>cJFQ=kv4*})pMp|0@XGh zpHW{Mo4C~oFUlvcwChZ*9tE-KyeGbfR#=y|77t+n{HSDHO{Z4xP_NLzRd7ewk|9YEf@Q|E?&RDd0Xs^MBU{CKjR%vX)Xm^42>1d+4 z&A3q}@}M62*YD;9Ln8@cQu^b&X^XS{sm$l(HziXGO=9-Aw6!WJg*qX$Z$}~)K)3i0 z>?-QZFYvxi28U?bw9WS(jh8bDh!3IlO5Tvd05Cfx7e=8^YIE)WQ?~K}R6JS#u-iVf{N?oN$rmsNPvCwr{3UqtTC1 zcWqTnqMo{c&+)8YK54I!O7&~&rJ8CbgqcpY_bwx@T1Gs6X*T>2-lQ2JsFzV)Gap5* zp<%jmPQ!Zl_!SP?&$xLG_2VV%42Q?QG)nn84Et7p9}l$m0S>(GeRev@h5uQKSFcu! zh0c{tzvjA{zTPn(CS{0W#p83m30hVcB414!sknL_y9k4f=w2VQm0qf_9bYcm1XPa0 zetmgG%!io|Fe(3a=rCR$U`2*;=uE;Rz;u-mbgfKVlE49ft@xFxH$)Z#55SgPnR&X9 z!8VTLkep=_|H`ObIe^%eEx583Oz(?`{ylZ_UH{@2ey)Sat1g!1 zt&wTL((SBEMFx}?d{@2Go3&yVrat=G~59G4-VLy&EBc@r?`?2!;#*xAPytHGxpE~^W|@xV}Q{J{-=?mb=3E{K^Sz?0es$v3w)`Ex zl4{7EtYVUxr4RDg1&?5Q$Yjf0{?~#-5fJvj8%X}+9Lv9m2YuRqH;{bv#~e$7?{6T< zX6g1%14*M?lYdjt{t1$}viAScTbT`Y|Fg<4CIe#nzkMFH3hHP)AHm9fPy_*;XpWyw z_gqk`^?a5@M@9Z=AjzJ1;s26+KoxOAvxkpkp%x`GM5rRUsNG;wOD`SP(#6yUQKP)G z&2oxic)FetE`F`M9i0F%4j`ydGBhOKAt8g*R$`uT?XaS;(>AV$m*E?Xl;0UzokfK! ze)LJ-ljpDC^_}q~8pp{pbP{G`rFc-xXM4Z$8>c!e4}1I9_LhYb?o_7jS@SD_QX=S# z*-7fc@iJH_nTZWlaCA?dRVi)Ly;i;8c@{0 zqfahG&Jd;41$nqzphw=9!HO$uzDSIYor6fp00p~XD`We4Dh&P7K$ViBJ0O_FDFY=? z{o3?bvyzYJJUzxnNU&S09$1vL!MS$vBP z;tB}Qr^5OA?&=tQZ8e^kbr{?#6uwhx?K`HW*a@?ok<^$5ob<%-?AJZt#>*~rRC|GI zSb(!1BJ_@|doNg!^DAq&+Yh+^sZ}!E7f}7=AbCa(Lw>O=1)%^Gsv8hJn*C4UzkY&+ z%&N?)Ob7n@1dCu%Y(GCi(nRyM9{%DpgEb{6EXPaEhb;2=M7PavCne5;B@4tgq9yb^ z6Cmvy2jl{L~4BuW~dFYzugqc-u&!=0?Qk1^g(ndmDmA?{V$9Cw?q# z^>eJMv{q_H)0FMy)19ulKG$ErgDK}#o2RWjY8RSweadRV8}T;tXP=Ypg}@YU!tDuG z{2Rn&X8d_nB(suYD1ZymmT^FPEPHsWp$Abls!XnyUg$fu0tvz8L0=8loTj(>majJr z)h(YnvqLMKsi^a8&9JW{>-JZJrmkEqJuDPfZaXb7(0!6TNDWYG2rB348uWh5J?(e} z_c?2UMyUkHcRfvx(tZ!dYt^K|KiqoPK?;E;SS}6Zy2ud$hL7Ae)GZ_q$ z*Dkt(Mr}?oLMT}g5KH&q$?5j%tW^W-9_tQ^4&NfHh&?R%EPKJ#q57RO-dJ~-v zeHFF94_Rt{=$$O3zBVF$JiW{yOEUt9e_x7*NEs)=Yh~`9Zp@RE%G*O7KR-4B&4AdA>T^S8o! zhkf$=1hUW=Jw~bl8<^#+dQPC?ZibaV-7hWGhr07fFo4*lOVJT5Mx_X*> zim?j4W0gO41K$HPQDsAM)4>+$##+Mt+(h0i3yiF#pjZ19mk+dT-+Ul0)np7K*C`ez z+z-q~ueT;Sv6cG(uGs8NA9rVc{(R8$!qJ&wI9ob+YX}5xb<+ zrrvw*P{Z8h@kB?Ycen)GG;YDh(De! zDP7!q1pSb$cf6IzlcoC@Qra$r9tDgsjxk9otT~Uc%us1OuQX|U_e4i}(S3qpB6Lf~ z!5yQlUA;Cn>@bmx)dpxMt}Tx|k3oe3)I3(-8Qcc#eia>>9?y^Z!Tw!S*>*rw3SIY6 zrku}#d5toJM=q0qPgDBvd@rEH;|<-n z?^!Nah44M>f_wEJA8Xa1hHShZNeHLYdjjjJL3Fc>sGLK);RM<}`t-_JG0ccwv{UL@ zSX_opS&PFUS|sdl2(Qa%xg(#EDkgB8obndO=|gASu7D5HKnAaNSIJkYUUg7oqG{IY zD+{VJXacBwI$uz8!ZbIA{Wa;Nu04++@=(fIU2tSi*edaozof7X;xfu)94@_qC(6=k zR9ldtmPfDa&VyLzWm_FC&QFWhO5}yDY-_!Iwb!*O_cw>HA3ms$)8)JHEI)Y!5=y+eIqGcm(z__ zQ`#P-&Q5UMOp`H0DA0!&f=qk$qg3dett~Y`q5Wu*pel>TT%#!~V=UDdJ=hqu)pP3y z$GOxSF*3_rJ6CTPy&|g1ZY5`4EHa9*7Z+A(X~i_wu(9PTKL?*jNO^{t>YpI>f2?Bf zJBzh)8Zg4MW=$N4uNe_~==x^yi%#9NQAGIs%yc_3XgY55+so;Si$Oyc8V7Y62CGAc zVvS9SJ%*&b&&{V#>x>9Y#V@5g?XA~JVx`oyYBG#ey-pJBp?`zP2-uQyNxfk*fjT&*lDb44B3x@E*=x%JKegcU;qN=`io0x#?lt)%_tfP($Z*O(4k&9!e8f<23aJ5j_LTHCt$a1LgtbzMaDJCO@fg4r9U?fX1#R7eN#nyLiyHAk>q z+?C^c0yWfPO|VZ`>pgW;isgJ+7>7VghaBrBA{JGH5=%?)X%Y&3aCf|rg-F%a{8kMQ z^W-s>9CC0!(@GcdAMQjZHN0kB<W--I7|zvkh9(%c5j7v2g;Hdhxlr* z7)uq#EWmsUO;Sbhn`S|Dz>l3l(?z0kjUBpMbzAD3Ui4pxe(fWy0V1j>+?M&p8J%_N8AtVWi#9R^7GlX zrNCQ6XT^jM);#O-9uMrjOews~q##V>E5=b!Gw!RE;_<>rmj*}+PKqA(=kE{rL@%C=q zU#rlR>ps`#`E*|zUgCxAx7Q*?i;ndXjknhx-AoeQKhT?zv{;-D2Mb$J2*Z%aUKzjy zXS^5e$XSa0Lu(Q*Nvl zleHRDGf~5I{Ae)w9n?4$a9wpR&5XZ3Rbr(IujIu}ielX9AbzFo5TkFHgN{}2$E8G# z0(Pd=NnHFm^#?=S9Ck_$1+<;v^p!)r_s+4l@AhT+6czreCzPe&7<; z&8={V83$^Aj-#EO@8uc3@ohx#a&tpHA1r?pR3? zJ;aT2@`cPXA|0{2nW3l8?~z#CL8zrxP{Ph@wqkkb#}puJ<8e-D>c=bwYp8R<$Kzsj zH`}Ps_${4LqcY~FH#0vT?*K{Up@?pXB&Dg<>)(Z_Xi)d7p=d>RU*XSAeYtO0 z26V&=Z$%t{7`j;dA4DZM8hnFwKlns~^^XL$K@3f0tC4#}r2t0S-%ZDfO%9A~t#&}aLG`#xhA@5CD(=wO?zB6g}CV(`+t z_aq)6!W4hqz%Q6J7~PSdr_KA{Qoumc#xX#Mm=+^Cs3ne<%{X7AcEXeZIdt%z---t+ zg+pAbpA7M`NS8f&+jJ4*ZFPfhw*#-te@fPVY~p+}y1Ok|+&hF2Kwgw4iG*yg-(9#^ zlyYKwEl9#;W&~CRN#ZhGC62#OGzVHriluexMGC#-gbZUkd9aQfmyZsGxD%sZ$ZnV3 zHf1N($L&|l3z`9DR-LnE(qnK{`4Jlw*)=80H)*nlvMWh$wbtBfWNoWdm(-rR?(J7* z4OeXoU-oa_9&_jnzti+lf|cbjql~A_2xcKh2nZ9ftG8Spnkubq6kX0i|*tfDMc=^;D?B0%$AwSXyc9X_6xAunf zEl-T+n?+Vv$^e6ai;!4Ur${xHBe)N8GJE+$u!Bpxee;xYsVePIzq28!<*+mR!}FhK z5er>ze1pc$`ep~f(vJLC`LLN?-gIr3e^ARhP<=Z3ug8AKY{e|V#LIYW!f%g_fxcoq zHb3LB&zKOvD|(p=hh$A-_Vy-&ec$H3CeY`K1#SRS(!t^1)_*MY=JLzvc#(CdrY)aG zZmMp7x-uQ#D`%AWc<95$Sya^01;A{xV^VR@cCxHssBL0~W=hQN+fCcTSNRTAuO9gl z3y(i!RZw&s?!D_A$F|&&#qIoS&dhyg8)kkcF2Rajv+T|VF3?p5x=Yr=DBKAz36+x9ry3KaV0*2>ZS0_vJ8Q+(jPykr zwQ1TdYYPXD#!DM(>(dsi<1Zu8SNV^rtc*35?+2Oe?gRg|9%and%zVH7=;@z7s?)($ zAGHbCFStPbm;$yR7`3Wn^b!@|sY8_XZ&I%k$L^6A?Bvs5M$+sfC1@b~B=iog*zq~P z%>J;QwXm|Pz!Kx@yI1bG=?2MmXL7Siq4FFT-`(a4+vENvzOIPbl$noifgAxONp8X_|+;wCiRt!5~=Zy>BVK4hKb2H z$u;9yvisW?O2bGQDYCvjiy866GiB%v8t==v>dNNG-8HrIpB*P&Uc=_{_(?RQiPIiXFbQ?gn4=+Cm;^34T@QjlD|Jk-M3-s5gidS07d4 zIR(h)1iP}cV;T95Yqx?0?!tU!!tR-ht)EX5t``CI_nBCg25*a9LB9UJ2#WwNFN$|V z)|#(X+D)I341aQNfa7$%newNQr9ob{9$}GRmii|)mt~S+{HMQL$ zjWJ+3?KK@+kqRUCvXQ#tG9EFDmI;f@P|68eRk&B>Li5A&6cOVGXpgHGXW&r_)R7$K znfP8QR18-v%i}l@dec8${)v2BudHDSJ1R#0j-#@6==zt6es@^`BkXtO_X`70f~I?J z)w7@DbChyjTAb#3H!f9R9!}K_J}1GTWm&ktwO<^0pq{q3arfH3cw$l7(#j+Aux|rz zvz1m$Phh}9a@vpce;l}a?tK~EX8e=>voG*7hS%{MjFTAs0X%tT<+$nE%z!qIE+9Bn zICid4k9%K_oXe%bx91BwoDH_oR{@uK@fTj!Z!(6At~x)HHKrMEj&5zo7*v_sH53}d7&bHbFnCNJ+EqAZ891e?cr`Rfn6y0#t3i!>m? z7L=LsX4Daf9-$lGi%@!G#S?eov!}e2G>vAs&oy4%p6=#d-4jSKyF&25)wq%c?$(!p zy+G!>69)pGjZb;LhMgUog2=050*KEZ*$3n9qw?jxnk|QRwd(Eb%>!m@cR}u(hnID+ zs_xDho&%}n?10{iZvCcZIm`juhAzn&Z${%R#WKTZh??eWO|7CEj@>H8e&eORT@qV; z3<4|Tymraiafiq6#GeP_h8)Rhnwz)$2Cv&b5pSM0ZAQ_~UnXL8Y(o2%mhN91PFh_E zosit}v>LK^F#a-2^qc`H$w)LKZWr`?ZO{aMfHS#;{E~MI8v*W6gF2v4m5@M z&OCbf`J0m#NZ8!;J~wP<;$(x_9xxaQywSO{xhi?NG>*3tL^MKgCTsG2u#W0Y8{J)) zZrBn(`|$z%UxDfVUs)f%OIwkLaU?>bi% zM{Npo9WV%#Xntk<_N!y!XX%z0`#XDYoeu|1k~EABtSg>FjYCXK;;#Tz*lh}EFBXGd zhEE#A5Wz}qS3w`1e7vBbBT8>guNn0FUnkS@&jxqbf6G6 ziI!=F7*PXxVffL7lvKm-U{2CDT(J;Fm6yeXP(5deMiw7|`7pIhF(_-4wuX1sVllE) z%o5M!4MI^T%8hp1vd3Ix@MO?6pY>c@14jlPBFjEG1WSMaWqTBlZ$;U$L1}(n7+*?wX`fQiE}!y zf66NG`@}WTEpJsf)(tyXE~1%%D3s|PS+1T`qIuKw<~V+Y2#~Dex`q|lJ5FN z-0lM&ylVt?1)t6Kxn(CpCNnCSc5HmXdO7!NSuc2sBP#18>cLDOZv1v+X-)3XCnZ4z zL z^OzEUMY|ivp(J{qJP8vQ4k?BQ&#ZzH#J1#;IW=?<=E_ftSV4wk7*;zILfsrlN_E{G z$7MX?-o}gR1jiuiDSiU_I+YCEifZa_%|0y!aj7ce4Y47(ztFa1E!cMB^?) zjdoGT8H-rS6u_!&zUbalSJ=)_MsM-fP%H6DQu?zzW0ILz)%8tB|Elr_zc!Rg{fUr`Y1~u)fqdA z83;-rKzONOa^TYJRTvc)Q%3v3kZo!;u7}(x$11*SkbP*pF^KNId%%_Y&WHqBQR6Ld zdDDpgvA5!2P*d0x^bjBm5s`cEOz>D8D_tdzH9cWj<~%5pP;~q{-)iE^!RkWeCO|78 z08&+pc%3Awbkb!;>{J&{hr2-oV}x;w}%#bZ-!#Yb9907D8?pvp}O`yI3aqbSKcz@kwf6rlcyx8CK-y-0SaNy1KJ%HVJtp^z!q z>st97LqZMO0lC0gHsV9o(zcPmz1Z&JxshD@LBbap@mJGjZB3ahdu?|fP}Jf3m-1dU$+{nnTAKucD}01OuK^;NdrM^bzm>F zoo2oSR<;J+EUWzh&R>_VV3I9UFPX4Hj5~@&1HLov5V^K7a74=iB!}?BVAkHR+ZE+S zDYtCH%oxi!4!h3!Zts!v+jC-1Y}+?*pFjn}F;-c7^BA}l0QSADt}<-Ov) z$X`3%4(OA_TIVh_mS$dmB}ZF0J=w*xyv2PakPk*t9o9pr)g|)|#HhDk@Ov8vH+Uqv z{e@lr*~R{EkymHbsL3xGz`py(*L$0dnyizY0%h{q^TQ>UdpA983t@0YAN({0=@fbx zyN4guea~ouMHZ?|->mSOc!J$5TZ?X)wmUvh#?vo!DWR>YFo@cG#a_OhcP&)_CHQVU zs^+Y^4WljPfx}~x1qwa;yD0B)5XN;xUz+gR<~rbr3+AADrY_V=wje|r8Q$iHN#qH0 zz{pFE3Rs+C`8or#qVCAy&p!)ubm3;d-^XwY^3%Z>`|9}|HvKc1aB!aO zCbeV`eq$7hx%e@SGxi{{yfw?VTIe0nJND}(_B4)_!|kwrEm!11SQu{B1?Mk8+Z^UodxFGwmR7|D4f%9{coA+Hp+;oQAwA|jqe z1(zfp!gqjXC?b|2m5mQFB{p5#FCE!d(H?h0Ry=qXPg@x^xHjFhTc0)8Mah?5UccFy zX46=~t}yoL#S<9}R$KN=_kQ*H(pqGYzGKqUpDMs$hFnGaf@Q7qSCge9)MTTV(O*CE zjHEdbbMS>@xcQ!wCtD)v#g9AyWTERsf!6v-2JwAuSL4}w`!@nZeo{tLX7P4gXag)~ zE!yEZ4qXpBLtm{hXA_iq5y(1cN-Bg~yb9COI4E}iW*j`6O4H#x{OW(O_a077wf(oZ zgqBcDLT}QMCLN?mNJ0k{0-_+GbOAx6NtfPRf>f2>ZGeiRAkwQyS3m)&5~LV_x=3lJu~M$^Uj=^^A}_%d*|BM^)edG8RSNUwj+wBkq8BOm$f~i@{|B(|&c*PD$Trgf-)<2V>gY zpv~c}Xyyn?FMVreJcDqr515Gsf(%=1H}|J}bb<=4prYM@6xVRd_v4{p6|j!20jFKs znz($8tKw*W6q&}afJtPw|FocWNbtYxBQab0Ii1l&{Le1d<3H$`6B2)(oI;2+rxlM4 zAW>kUxh>}ppBv*i6iDguuzVONnZI$X;`stPf#voIVB?k8opM^qdwWSY1}a9G*PNV? zCZu5(R5dOpU(;0*svhk9u5LYzEmCe+_%42HS|_-Mj_ZMhk6oKS8RxymnI%viM(9j^ zC3@vq=WT@S!NZl;GdkSl0I>V70X}h~)gNpNk}KbIBK9)6PiQVp&DIk!9ZJWO5+OwD zc(8TdS9mPxbdQRC8}5B*?x%an0zndc=CwwwR^}pYJkgglofh$uW5E=kUf`ZkJQRfxJ^Y5$t%W z`B(d(n7h*P(2ESkbu)WdVFBT z-WuRl<`|7nR?o|1H&dzF#um!3^;}j>Q!j&G*LlZ#zvZya zDVvk6T~r(T=d1azr{GV=AUcvrICU%;wM0Ws8Ro{r1ho@6_$()Lp3gU>@E~qbK}>~p z?hy}*=$8gcweLw}TD+~O5vb4NMXRODYjul*YY)VB=SR2 zb=1ef{^no{n(xe&{yVQDoL3~2H{0se+ z)-JFY*=<}{8sXD9&&jb|KnUwiwa!w$&X1i~q&A#YyKV-KjAAT}bDBOjidsUx3QVpT`Z%u;*z9B0h(PL`U8>QSa)T~aq3}oSMnD6qT57d{jNM$g%jM0}$ zm^{;t@uyVMEYXT3$o}?;_M=F`2Do#@3}%!1@kc_XsI%>!F}zDh^B=P)7q5In%2mqd z`Sc$NeTxOcPwe$v32`PuUoNs$Xj|4UO)14q_;u!kmO(SwEG^?{jt9+uUybE~Kl#5E zLGbMl`4K?Mvsrw#!21};QBj(JmT^-sOyQ*OIA$IhmT-edb$D*5F_c%p+czg|l!0H* zJ(;rt>eL%8sjA5${~|ui>u}rdU4pFWhh&|^+ zl}WoOBYC!1E|u;&J!i^+afK=jpHEC_B>CJ{NYS}7oIxQ0Jw8IB1^NnaYHM-?~?f00p~3UjN`TJ zG-2rR=Fh(e%tTcbs=oDR37kJTHrygY4?O0cis8Q*a9j3oP=2{B_JNmNdpPfGTyU|1 zbxK^|%}*O3L!Xsn z;|+O}%LA-yo9328JqJ{BPfvx-3z8r91LV?L z({r3iKU&_WJ^uH-fu99Q|0<}7(2=L@ytpr9HrGKmc;WT%ZL`OxDClHkuBy|==_Y2s zcGk9A0UMMQ3T}bCr74(;Ql=*d?gfcOlinAwUwiHfk10~w!YUIA=Rby~U-N}JO2N!{ zZIrDy23XfuyG+$glzgKN_X4_x4L2PtG2Umd#$5`@X#tKWL@ib<(l$_bz3=H}CwI>u7zz7FWjR?WfaCzT)6*`ZuyyTOmKWu1|iNwC(&? z=Y~_py0qJL-~W-~t56_mA)v&7bSTuvTe>R7q(LrBk7re`#OWgUz>s_VhJ7<_aI^BZ z(sZ7QVr{V5E`|y!)je#CwD17z(RznlWo#*{@~Il;Sy$7L+KlaO$TUEHM|z$Wch(8c zRmDkzCLEy13kUm4+TgPN0VOgAopW)LX*3QB%DXg(NW4M^D5S8`M-@Cfo}!O91AQoA zmEOX1=-t}-AhAkgLeOvMSq7c3Ji6;_+YI=;Y^C-|T`F zJbm8Ho&N4wudJ2(tv-o@^FANHO{>57s8-$gvVV6sSH2JQ&$l?&34Y*l#{X8-YJMAk z#1*?Sg-v02cZg=xv6AYxfZM15WvHa5Sh4uzfqP zE<5yi4l{C62NJ|I%i>Svsh{g_(;Dn-w(9Or`5+2?gbuK}akyo&@GaMBp5_Pl6ZvJ7 zA}aW>#MINhsg8>i5MH&{ntT}NcSmn3pXmbJw3Wp3L_Up>h3&&A)12AaE-bIoS)zpl zGW75?dr0z6Uh9vss(hd0F)v+)3XPF8cQHzCpWwL_;Pt#E8gm_Eq_ z-C81!y@s@jW#QW5 zeCW-Sygbv`6#j9&>;V3KCOYO6uTidr^gK40Pnk{PzFd1R-=7-uQ{XGG{T2r!0dcTR z4`bUB0b-U=)UUDA3Txdxj9R)qi;CkoT7yt%_X>HkGTXOVlun&M_RpvSVM5SPCuun1kgyX%;zDw-)V5X(-z z%k5@-6Isp1{)6Wn+pRCPYLEWxuzv+y7SoB7z+>eJ{IdVWKT4d5ubFL)VT5W}<7MXB zV_Ly#xpg>pl(8tkL= zghLMwKKWYo_m~7r2ywmh|9PtEiD8*h5Gc zOV&;^no&q1SO z!4xyzm=hVfSJE_Dtgw(4Wsl3xyg5 z-3b=pqZ5CBboC4>f|{B4LN|=BHI`05LKt6N+>}5R_7Ze*6MV-l?>On{OyE-!LIqhr zANx6t40|2UBQD>WuaQksiBV}Sfj2#r8S0+lJXTQg8AN=`{Fi}Xk~X81_vwO**;am= zV!hRi2qUK;mYzMc!lvN2f)gu0HorC29rd1!{<=BS^7?O=_ksdR!3ng^|6AmoL4|;i zNBXiwfWLrRO`8Kg*B;GEFSzY5=g{JbKlEzHd(Gr?2e`L1=sg!kvufl_t?bnZi$)-7 z$4B%=77=GOf*3}-UX&sYL%*!7uT-M3Zyt%+b; z*974HuydNsDnwdoMo;2Q?n~T|YB=;hOAPFsZOlnhxS z_j+QwT$JFoboN|6VDj;5&+ASH@dejm#c3gpe};ao7a!6bdC^hC=y8RLx*EMR@U>OuM+CTVvYX*Gi*;mn#abz|E>zGJvF&7c(|l4u znb@<&L-Uc(K;KlwHSd@s100}6rkZqXD1+;PES&zvs}`IZc+YlMjFeT`;z6no@G~~? zB8W_9$3IAZl3XV3?|`96$x#Q}Q&tHCljJtb_mIgx+=Gm$wAG`Bk7{hVc=gA3rMJW6JOp8Crvd+lDNDLRrLHi-eUuVW$fqU^Q~VGehk-IO8L^HMAqbY zcbqEmx^7wfa&daTJ4I62gF}7OfkKUv|mD`IH+Ur)avyE--~o<)aCJrGUP0 ze8Cktv6^cNrE-}6aZ`>rd>8&Vz^UzjgHzYy|8m=D{spJ1{WoyxaK?YZsjvQoQ>}0M zZT%<%G7MffhEuX9^AoCP+B|#YwqxIHSay*4&;6I%j;#HExZ4gdWJV%xm7FJG98z#v zq}}7BPg=DDo6IGEx;{2gQ3IKL}w{*#Zu8*JGTrd_iXYJ`GuH-P6XiOB8=r8Jy7@B|V_qu%>;5ICwgDFg17xVKH-6yR_>u zH@soJRkB)V71*@0hV1%O>zLxL+Ep@0ZfVU+EbcYh`kEWiGY4lbqzOxaN8{UBAU;%` zL_@hjDW3Df22jw1-2js49&OzFFH=gH~PdB|C9K=DK^zp64bXrI2BuvhSz zQ#UjG*vIc0H=qK&ajW_6@<=TeP+t}QViTJITyQ$)ez!6H z9n#t$i0fV3hD}Y`G8U9C9*_x{@a}o8&`4t8&{t%`p&mwz@}u1hYU+et_BHzN=DN42 zbOG#ZCU0osE@6MXG09*kmM4CgF&tHdP;UQ|8uiDtxx?=I*AWGeMVN75y3Xqv4^MGh?H^ISL;+G32O z-u-)4ZpX9o86?KlE-rFuyi%~d^O+dTf$|PpzEk?l2O!7L(jW3Kz#JaSzX1fk zcOJQfj23SlTh549(!N%AO$l&tVG>^+*RoZP*i)0(e(k&fkH7%_bc^;*IAZm%%*8DGW#rcr4;!8*8HaD+qM(@E^MwG3`lZBplZIDOeES0acXNfSnw?p^uL|S|xKit(2 z(~u<>5>QtPMAgM9Q%3lVl^KKw!cR>_QD2YQimGHVQ|~?0P(NTm7u!S>Oi=Bz2~!1jwCvD)jP9JVx5%s6+>=@kM`7S?pC# z&eexIKtRixn=!^2>P`U3uEV%6pvT9kxR^o4Jh~SWv|vvU(8}|SO2q*$hqCP-k3wbm zm=->>Qj?e?g^7UgIai`1b50Y%L$g?r-;$maPOj0#l}Pbqvlli03z2eUE2nG5)1 z5b2=_C5c6mS8eRcDlV8uB|#9E|9B?;17P}oYz?t%3G62owQNE^FpArf3`}>SMF58y zA-lRU(1k?IPz6!F#a$DO66WlLy{Qs9AJv-CEo-sB#}uK?@x9()kgrV_G?4eJj^#5_ ziux8x@L=E@FEhx2ZPXlB&}eD$h}3pdUJM;d^@}rE`1Ru!H^!ew=_@tVCh526HK~`O zdG~|@8HYv0umbt8h^xBwv@%{_1=VX={4UgfW97z8JT5!j$Zx{P@w+{*yT7I9XP#Ke zJ9fKfIo4Np3xJbeA=N4GH1ZNvX;^rDh|B3oHp zDfnPi;D7g7oy`4rV*|mu^c)j((!O9XO+yFKo^jqyIc7Nd4zL7_tPk<|9}aQX=h z%#T82)7X`GagiD7sr1?AA{Ccok}c~EmBAqQsq7+an$YNhc%eDHxX|flN@#)z?~)yZ z!jVrD(-h8y*_qhwps%IZJ@O!>TtFWK*O+dbb9)7k&S2BQqzzzEx?P`8^SB$18JnN{ zoRX+`4gQH8{_So7Mjis`FEliQ1f;U;4y3RO=mM*HF+doFLbcee@fR~P!{j+SiT(?G zrT%aojSB(u1<(k+LYC3}>aHe98K<+tF);$zFf(2?6S3ODgh(Ay%Ym{q->0zpVcC$; z4DmJ}2K0~gJ$?F?OwHMd`z@O`Y5G)a@OS2e)1y@&YQ)YyfDgye^YJpeCP~@66J+Mv zK25dJCMi!1Ib*u;U>*QVxIhLlJ~a`k=hvkJp!ZP1z~KLaA|~n65KRf`>t$Fz6Q9Ox zt91O$nfsB`9z-!fTbP`p>|tE63*@heh+5syGQwcnV4x_I>_8~}`Wgdvn>@2&W$Pd1 z`{>sSjpO0{d1Q+MAS0=>1|E=x*!4yDko{kWocTicRz>;HE3dku*r-TAs9sdh5?1QX zhHV_;pfv8Y6zxG<0s}yzfRTr#{``?1l`wnR!*Q)_S>?BP1bK~ES}5g(hwtR0YJ2<# z&W$zC*~EgpyvKbtjxnPn&)4|@J`w)ddzT)(f!RDV$S6f0q5S5we7zz+qayC^<8EHR z%bCLnBQYo5r`}1|jRsN24CLATDB(t%u*sB=g*Z|^Q>poQG9c~<3>h$2PV{B=V#9;ZO$8;7I+0+zN|6>C4mx%P|y516a zyslTm3SNcVQ5TGpB_oSw*B~FjXw#sJVAQZYwMJ`bwd`5+M)RIQZ z#%)W=sFSRp91djlsB*b&Iw=F9U_VN{SmzonpXD=|@ch!nMLG9~-f*zG0^p5i#n>*f z#NQverQQZnlkDTnW8xE5{;IoHy_P0uNfLQ4MNnnoO(SkZDsVll-w`z1ooG8VSdh<= zsX7(FJ$=2Y7BuFyYo3~#6>^A=;&a#AQ7{VMV^y3%^-bOxKp2Yzn8I;0J2R1Vq!CT@ zYuorQ=RvOs-It16{4V;DbDvM&C1%81+CQKUFiw(waW)lkSff9G3;LtPA!)KlNhn3> zg|09T&1P0=@z2+-+EyuEIt47PrHW@ZtHIEyv#AU9s}~FV&S|qVgNz3o zE4mH8{3CQamw$1EZXvfG(Y{zhXKBCKUub-MJO4?Hr~&$QjQk+~KC5Fks+D*m zJrKg>c*1) zn5E{d?7W4QL{0;APLAQ1(`8Y98CiB2H&tBoy8byE{<|TgC6F*P`>SrS5D<7cfudL1 zZCr-9OD^Pi}Pew&YWQ}b#=-L(#V~D-loU{s@~wz#R?R+YineC9L;iD^+{y! zuPz5$dMzpE!zVX0Y^5Xb2+=Wj*U7YahX)=tEWtj&%VVkK*#bly1zXjtbo_`Gwxu1E z>=F|VZ+)*w{JNkQGWv7ADUb^Ja`sT){ezXSz3jmA?7*=O*@pSG}nHQMV^N^AmDb*mF0W_0B<=+@w4B1Fq{_I!GQ8T zt9QvqW8xk%AewLkszcT3_`o6(FoJpPHb=2FjL%<=r_6+F!P5qvv6dMJZIDy*1ffS7 z2vdoPRJqr&_VMR--#mV#6ASw1`AqU3dvWmWzrb+{!BA=Lh;i+JJOEus4E!(feu_c+wv-i3ZdD2)>6Ti{mF ze6pCEbw)ADvbExyy2T`a6az3HiMmcc@yLc#qjvu^)C}hYBy6=GTXJ)O(TFY`;ORCm zrTCz_d+2i7`(UaBtAQkSI~AQ->$JL^!<%ho{O8NzSTzy<VN@GO=PGM}WdX`* zm@5V^)`??LxUs!dJ=+n5Udisu4Q|*Vnf7 zLX?b0TZUI|T17TeNuSETbGoD9ieYXWJX-Nleg*N z?UPNR_bGx;O8IcR_OA1jO9X#zca4IsHoV4HbHf0?$H&+B)m`MdhA8LGINjK0eKHq) zQ)S40lL3%cFfx}5&_(2E!D@{`;*Y!aPcEf1GaY^NTQ|JX-tSzE-}EGp(oc3wakKsW zPP?0z*Ij9s0iqCtiL)uua;S|KQew?padVwW;wk5uu;M=cXt?gAr%Ay5eFb1Ax6Lo; z4CMTDyOUPok*0u;dJd@$7M_Ss-)~D`aljPKbNW#ETy*P!PiU zW8{JMGB5g9p5H*GxzV2vj=unx|F25*%Nt$)7Zd(xW6}QvaGA%-&qYCT(hygmw&=01 z@I8a#+J&@>4}g{1BulXjciA$~DNFF0t8lhJV2^MPFXLO0`+WCRMIP|y_1w_&c;91^ z>wUBKMjm%yLPehMxl7#Iw!4(TIO!f~VvXuy8YKOj;^-}$ zw{iJ-j~_5mvhC2t%3}G2kBp$5!4HT%+qd_BXqui7z_HD5Gp{_QD{|05AK+g~R@3Y| zJwfd%(m~Zp>Qn+Er6)?SN|570EB`L^1qLx<2LjK%gwbXu_~P0_TRA8O(DHo6X<$g z(v0OIM?eJ1CYyryWQ(5?Zx1l=BZ5#roV{lIu!Ps$fR!vhMY3_4(F|X`Fo2~LV3pGR z6w8dD917gJ@$oCrQTKCkxHU@R69?t(Nun|(V_!A3U9W;!G%Ys9VSA@xZ;St3@KGm1 zFbR`{xEo?3O*L6v&|RZV~e@}lqicR^eMQ0 zE6{nsx7v;~`mOk!+*V%Jtqj{^F;wU9_y}03-lKEHhJaxaKs1W`%LsNwlk{WoCec7z zDDgtRwpfDGd8xkj**0b7Ss?)U`w}4hQnWoZ&7F!h=R5a69>`zwqQzz!BM`kKyE10{ z*gT&uX*0gv*k-Kyy6BgrQW2Z0c`hTSN-kI%d6F=BT}B2#p9u{Na{bY zxPU?nV7Hlp`;c@jE-u{$t{8N*zZHA3NXyja#>~F|fSsnDf+KOzw5qt{I6xL}r}Y_u zJ$WSZgCeGjGxP`}=;(iJ7-%@S%R&_EyeKulKKn8r04UgW0O2t1wpeRoE*?=%0vi z0U*9Ye(S#1j%i&$!$XC&o`yKk{+w>n<_sr>`wkQ#)fG~=XFgSbYYH~i+!pgTvO&g0 z_NojCVhi*_hzA?#&OaBBA*|;}FCj0BjYE{}=wie@D+dd8BLq*~lzr4v_BTDX08pCg zerxS^z#U=1-=0B*O+SsVHz)At!{kfH)$nUHI3X#=ofRO|?;%VtG52{FcD~8lDX8{M z$G~-Di0rSOo5I+F_rbD1mq#nAzcq%*`go5*6vZ_1WU09rZ+eQqk1#6GR8o`u!#hz3 zBpkzj|IVq$|3{VImNdwL)>3D0V>F$C6yOg8Ou&#Mh&;uDK9+Z0T5!8Au0)U9xpSvY z47b3*uaU@-;D{~I$Rh8)Q@7Xppc;DC{G>JzCSnZ0(ZF5b2Am(HdV_=rGx1AV#zk4@ z(|uhpRb1Twv|rEY72m-d3bX5#TR+x%Eplheou?n9@a17J>%(QZXuq?AhWkXdy^P7w3 z{MW9OxS1t*)5+nwnydi5zQnN3rm3E9V>Zfp(FPi^PnvkC_h}u|=I8Wdr*6HwxkWRs zlZE4@w|-;L{T@jbPGNLk(9fy0rMKJ2!OVmQpIo0`>*(C7e>~&!NQBzJ&dPWR_=R zi!g1EWfT>?OxNDU%*)27r%9Se!+J2hI`_V)F#VWrOjZv(m}hCzO;fF_%41ep=X6h|g%F>=}-M#V509sxlB;S48Z(twCWpl14h;FVfYjb2%WepSjfCId~J zaNfJpd4 z8WI=nNd--xlYFlI?*O89ZY|(6C(mO7q`QolKp%-H1G&w&?bW@JvRzMc$b?dBZTK(3 z){A}y@#WLqDFg>-zxK~bIhaJj(B|Mp?ge8N(>{!XyrIH1CA7PJntXTK_`G-j>_ao?}TKlzj6bzSU_1L(15@px#g0xQsp1B^e<=VH?pVmX*K zqQ%@}v0(@*4T1agPwo?@k#~1* z=85N&f5SwH(gQ-D?C_;ZFteJJ{wMsWdy7>JwP`B_W7WoMMMqrA(-W`g;B<={#=Mis zKfxM5>UKOd!-@`nI4a}1lv?DTJnI zkNhm^tnX==obmD1>oURFYr@8s8e}`;I_f1-i3deia zdLsq3TeLWdbON^Ik%(1RW_6m6au4tBaoD#->QIfJzfST6U%EmrZ0kV|ac*DN^gy&y zLOH(70Q-C}kUR`i)9U3lB$um<4N9N1YE@ zr`2_AR?`UeE7o^O+cwQBQXq>nV8zWos*$FDe{c6W+F1{LWtVpId6NeK7chg3hw_zz z`sw`q2d5aA1;X(kCfNDxBFskTGUT`7--Xt$1#wsE>>*_lg)DDrtZU@?L2EGW5)d8s z+JVGZRuL`Rk_eLKW%80-oQ+R|i{ptY$w9vFyy}SsHc3GDK;&L2i@0}VfnRMp*b%Uu z5GZ+t&l3D&16%IIq%vusipG#eBHI)2UmJw(kZU5tQ%JK_1xvJVJ5ZbvH#IVIN%QdK zG>QRI+t12*Gp#>Ssz|7V;Z1_eAc8bwFBo?AMn@V7wY)iq0Dq3>Ffl!(oI=KNbdRC; z28SY1>O2%Z=kgHI#^E6Ja}l)M+|sBisDofEwyNFEq)%+t>P4!bJjRle!9!uUZhTIX zpY~h?-0U4s$DdiiLN#YbtO^wlh7TdQ7!1+QTnLHwW0FQj?kEc(rYvt z!||ugFg$=XOztcCpxOzcaxphLYFk1HZKgkRVq81;bA2eFkop@f0r)1%ia#nrRPryX$R#)$;@^7Zrge!B>%5*f9W zl{B{X)(x={X`~&i9J$I5af?;sIJ``5P1wb#X=VdfEJU_-eZu$PCpWr~0M36$QL2aa zvhSI;+J_pXc~KvU-192!<_tdQymw?CJu@6(5ps;2+1S1%2X(*#bf495-+i^}b7p+G zK@t>b&5gg!WW$>RWEYQ3h@XnuJDgRWB5v456rCo!Z5so}hASb!u_0yaRzzzEX8q`# zSDN4w@s!57vfT&HNR+p#<@({t%EQLu@82l2J{Hw@n!h=sGUq_R*6j0~sH+ie&}HYhn>65faS zj)pRd$YY?DCehcqZbC9X8PF;TP6snz6d5!##{;Dl_`~^+Nb|P>IB&{ND#I?+9fhC( z-9BYZq_CQkv2cuBdwPtHV&vE@OT~6Ar>+zyXr8l+m#&jDA@+g>LQ@=*6nn=%r@Y4C z?;~m%;0gG5g9u<<2aJ^Pxwh<@S+w0rA(GV_jG$&%!xkXoNZ#)5)|H6^%&{t_j$+7QrRVbPqB^>Rk}ll!YbFa{uHJS1L|XAHjRPi%`MeA8WsmDm z^+g~H0vG~{o4h&Ew!-1I7_pUcxFrc&b863}tt#~!>cY)ec@6OEq*^xF~K-0$d&X>a4Ax>opzw7Nl<5jvDRp?ayt@u!F+er zAoThdOqr3-2_1xJJ5#RlEWG=0S9_YwLhtSv#t+1vE^ivUlgH<@T#n#WQ{kW%A)QD{ zFVIX7kSaWjzCuJ{+!0450_g69_8tbkL#+;E`CY068+~iA#g(a@B_k`4$#bk$E)2-E zuiHO6D8+8*+kFvRD+^;OqB76bJ1V8Pk2;&9j4>kly1+x@SvRoEzTna0Nc&4Oyqf%G z4 zg2x$16`x%6Lu@7KOw|aJ$f`4qmQ>tqd|GyNYtJVGeWk2kLpEC4yoF~c^ZhqYU(n99 zvuy!QNi17g%uYqwF|!rDsFp+5kY*-&S@&(G!qUz_eYyKnm^jV1r+j`HbKoKmEfZt? z)tssZia7<%yxKdZCqPc%(aZ*0F;)=wa(c)!VkDhUCYwT^=3UAb1tN@p?AQ9Q zvUf)rYE7nWy!Cm}crJLOWMaw%S|GR`5o8aJzT7mk8CJxgNE;Fm&5x0FhESP@QEd$J zASvyJ`0PNgUZ2Crp2L(OwMUd4DA?!BdjSdH3ae1UevJUS;91EjwIqnIZUCEWnvtC; z*sLAdEnE9V=zw+fs=fllC|HL$$qa!~?i}|%Nk!u{^(;$;)2R$LQ&A*_eXMe>Y_y%R zIbHy!MhM@=ih1;z1Ea)k-;@}tj>1+q^BUS{2F1V}t6WyOatEQ$tTa4%jHi9+L&nrM z2ROf*pgTBIV9KyU!jKp&I(6|hw`a=V2SER9vOt9ar~Ze(bB!69XT^^rBh<<3=t(@R z0EGr=RpQa|u|=Urir&_$vS~0*J<_R&-$V<5JzVO!H~%40J%FVsQh2F9pPL!5iaGUH zxh-uj0z(?H;n*$0nNyXEpWi7NPk z$`QGRew+aAOkUZ8=Cdr#Z{KN}D~ZeUUbyei2k0CNg3ha)+VVCQePKSZ=vX??^6)8f zR|=H^Ef)sI!XUEBRsq}Q30f=~M)Ju=y*Qy~c!NyP$_FYg_y>c_V@{7CQ^Gren1KLg zPEjjFoE7@s)5|>0(_lHSq?3*FRGy~7l|UuSWPqiNnsjVG(shS0a61Dsx`14EjAl`a zVq{4OB_~_pbYp8}2jNABQ7~=ZYhqOXl;R25R%F0FAZjmIdqcC7|$8x*E`JfXSxPVZs|85$3FmB)M9++~hs?_*m;M^fGBIYBM<)n`yt8uqL=DrV2Q_ zp9T^xA3rD`R;lTe(|by8SJRgr<4|0(XFW-E8r}B2%rq8ymB!@%ZKru0HUvMg0#cB* z;)o=j3}H-9N|<49Aw;BasxglLRPp)I8UThum<=xP$(5e>=Wr%+0KUVthdQa_0i)Fg zA8@ZT4j&rSfHT9)5(yT)^A9w#kf-S61I%Vro{V`YaiwDm1}dqK*5piNhucCA0H5g? z)0q}dt)#;d5u8n1en3(EDSf}0S8;Ii6t{XnHOe%WnK@ZZ=lOhC!VGDCV_fQ75$NNU zX}9@;#*aYTU6s14uUUx81wE;)>Nx99HDPlPF9y$Kj)PoR00Q&ua8@c>ecY%8vudTwv?7;-N4zP!B|f2$<;xwBE@%>5x-VUxYO)WV5sEOQSof_k!X zp3>pM$%_}g>9r6aX-ri!#f$PT8!NHUp&ekAIjr}NqWZ3zQ?q3%n|6Ehqi0#QyHLsq z!8AJzkPzxl9!#4=HF7x4BJrM)I%rYv?>L3&!zi3A`6-b11v({w5xV*Ij&ag2xNjkx z3UYM>X*rlVu7`D*9Zvs*@V)MzDWHgpUjh(!A#&~(Cz=qG1x5Qg-T$t2>g?ZN{{QI} z{mqvE^)g?L;5)-vaY&To5p_$x z3}ifd-Kq5E$LfRDI9JVa->OtMS8uI}i7kH#I|8koZ>W1RTM|J@2FzR~voU(?-0jSB z&z&NsJ*?m7>f5G$ytWka`iy&L^IdyAp9RGlIP&dd?gQrbt%8wpo}O-E7bWL8a7ppAfyn-u=gM=SlEKD0mTG9E-h_Ru2Vj9E`9L!fh&rUy5{$NiE-X2yo#?LveUr*hfBFE3 z_c_l;X%bpnrcHApN-B+EL2fWM9V2c|w<_nE3f#O+z}>l0z!s@vug(;3TR=lu0eo{J z^Jl^PNE`CRlRKSGaBko2%a_IUJDanp+T^1zN9(24JM=yi$q{qw4GX4C9kCxA#?lJbz~@k;w2w`TG9Ug;>N7dMN4Gp3W>V%TL=$CWhR8?K+iUk?A0|B34PHu z6T!q~VH?UUsA8u_r&d=KK601CHeS$=!#d&gcOZQ3WhQ>(t=G6Ir(Jm?VAO6q8o9F*k_?dMUKsmNu?jgy|BOj42wg9hA)~BH10NS6hIDuK_ zRQ~gmGe9ySrU$M;Bx&x;Z zka#Z!u8vFWm3WML`~{y6bp;yDYOecWD=WVQNoo@gFQu}*+boPhG~bjslgl!zUa z9X5XHp+W!sA#VJH8+_iHd-Wy9j*kw#k2L_-j0cH7)qP#q3Pm2oH;BFEIy!jNxc9zc z2ysMMdHR_5$k6Qx72jdnGrFjD&o*8J!$vm5_`*gF+wFGOPPmrUpY*)~pp`->YbpMV z9rUz~p<_OMc*&whpd|`I%?ngR{N!~Q9e+B zilRGZ=lw{VA9~B!r*NVnoRa?feY@j6TCKh^x^P0=nLzZp6|GF2MXn<5<>El$?DgJm zVnO*}<)yAO7hx}djMX?eXV?<>cqiWYMBOTGsJs0tD2#sKmg}1hz$7MV)Bo=6Rv}CN z`7rB{`kj^Wmu_8&GX=#-RX__NpYi*5?p@zUkmVoKULEW`-l~r?ZjuffW|JHJCZq7C zV_)d*;y2Wlqe~oN;8Z2sKl*4{uDJYf?4vDnZ@=KWnAvwZ;ctDknW*pgU1+il{@zC` zdVg;6(NccM?($MWIE@6pFq-WqzNlb73SS%|VF;s7*8JRu{CFYJQohGA`h01w{aA7q z83yAj&&yI11)faXA*u4$_15K=b;(;$`9cv&1~m3ii2h8~?gJhf#;G9qGwjtLE6+zD z4<%fhIg;Elfg1Gevb}WePi{Xnd?MvuS$<_vj`Lc>+8bPgA-wWc^c9A;tL3Ltd=6eo z{&e1GeVv4A!9;hwlTK@vG2iQry--DFJ9D*p<+@q2bOyCMP<0sb%lLiuvk$WE9A7VA zXa%M9ZaCjRDJ+TZ;1TlbkIL80k19(~xBAuhR<{OFv{KuH z+U&QshxA3CZ4X~iUfUkQ=t=F2nwj6)`DEezZ0EDJ|Ju%&ZJgBZxMTLM-3gb9XS-kA z>(+L^dUi?eP5O@A+M5bkdbT$mvPWLq`xZ_sO`M5l_aM&3i&hcml9ktq^Ef@}{e=v3 zkNw3Q=c@hhx&G_>ONDXL2X|(Yr^_iA=N#&@Q%$#ZwSMqq)1!l*di+{@14MJE)pt!8 z{A`ZfxkzYdzkRsTEn0oJ*{A&TaBEOc=4gA={Pxk#xO4T`bFrHAs=p!-6kIqm#WF6!@Zwm@(G5*6SVrxATB~U_(CCwy;}w@ONf9a z7gCAF7gL%LA~{$-+>&>x@$kf5<2@_{>*=>LJ;0n5P%ol0=SZg8jXiL8WH-lzi#Wk->8*5x7!`Yd`yWIfZdqm+_;C?roNhWanC; zocahoq-i@YGCExuJOS4}y?G(2eO^e^pvTa1^IlG0>1pMj9;2Ym^g@O*F+GD`(+8Uw zl^4n+%zJvT)Nf|iB$u6WHuzxqWi#tdUzxOj&xh*=o7v3_<+5=GefC^iIo%h^<+FSG zZm4bDA51P+tT6a^({k&m zx>OH?-T{f=wCzynZhkULh=-g|bfNw#56GB@O`CJKR3D^mm&#>mdV+Ey_MA+yZia7! zu!0Nt9Aq;t6Lv3+)in-TB&HLQp_q$SFO{)3!Ksk6Q{^XIc$G~}B2ZX=BTWUdzPe%?l;kS(JfI9V#(`V&Lo-FF8|5m5oLnr(K`zf(F zR{)}Di-zdxKfC!yoqA?CwJcd*lXK7InoD}In5o4qjQ|D>jXgg($F=vSd#wd|@5AJG zHUBqfJUrW5LsSdy1mHH&vKJ zfdo$C3>d*7!?gS18|h%L>BLFdaI(M*h}PoWSv-aCI^1Uepq(!`hXO|6zD{XHY~|>$ ziD`3-(vux;QQU~*7Y>Q0b0fBk8F@t{4bFxta@AP+!&57b@0oWj%0h@ z!zkLz(yb~F-YupPg~)Kz_j1rm**H~~|A)Qz3~I7b+qLf$k^mtIMN9x8^rje)4nio> zMQlj11VN=JAhrMsflxvZy+bI{g;1o60Tn?JK~ZU9C`u6!#6pvNJkMI|owfG2XZAPq z&Fp!9ygwO+Kbe~|SFT*gafbAPyiH|Z+>N-=2YN+LyCJhVn4p>cl;_g{zi5A7MBs{v z#5Vm1{{WICo0V7R z56kRx>xeik7dPv?wfTPgFhuAoz!h{>dO*?sP{qkV{K3N)VGqtXsH`VO2tQ?UPZN(I zzfh2RVg9JM+SL0?iLmoI(gMk5p(;{?G%x%#@(QtAW8mDvxnai-)9hzhg(Xk7xuStS zZ%uhOJJ&Pj#u9xNRKWlYRr)ELort(WSYMsB{`y4x*8p7Y%c6VW3{_%-Zf>9^&W*mQ zE8xUgZ$A9O2p(j;^$FzDxZ8rfq;_&r4D4@y@XY{3Tx=Yp$uyE#SA~h@6iJk=ga~&J zCa4NcPV3J;JKT^9yLYyT>*j#Dj1wCgw#8tF2K|U}t>)i+bN1~el~8d%mxRC{u3xF~ zrsy!+CGl7ji8CukNWc4$;#FdZKn{H0DFAvewg`T6?zyztz^%)7_bpu#Z#C$)y&b#m z%HDW(o!@DIDvdRk_>9>cn>F~MUPcnz_U+=oN}>ho7s5Az=X}3{5<2hP6H{VPzt$|> zVzvd0Cf976<>bzZN>7V9xr1g8O;Fz(c8z{u{F>h~isWw~3J?7@S_E+r3m3-3n?L;F zNRtz``d<3)y|_1M|9ZmtGCbcOh3C?qo#>zeNvKUJnvUn)wnJ5v zqQ3&r0|fV*1HR0(z_wXvf-uh+z+d6E$IFtSz1PtW?4V09-cp!HJLSUR6~y)c`a8p` zB%SyB0Pi2bPyY_|JDty*Ng28fQPW1L(r-=ALK1vEBiMfO_MY)DUlDhIIs+`6;o@=| z>OqG&5zkKE=W<;+cYGLPN4anc!NtA>6|#qB3ZJ)Qg**0yZ`^_&Zw|}WyjglXOk|Kx zqx9nbG!k3m{D$m#ed2kSjL6A7mlriWrovr?&HcR{_yb4~&kUEDcNacCyb#BsK~Hwm zgt1W~zdTFLXyJ31ub4=!G@AWwH?8!b%nbfV?2vqNG)ePXx^}e4DuPCfX3lv=vd~9* zqH&$1>ba=e=9oXvJg~ogGTdYMLSp8-W4hNbxCnD&?}TaSSRTq*6W5Z?9@v93O3=pcES&bgkR)@_4W8G z1b8CD>(6iZQCd9qI(miZeo(=0gHGpZp(97=s0}&>mneYqi#Lk_dM|@p_!r91Vk3#- z8;N*al9W!8jAN3Nr8_n=QM{A1eyIQGY$OwL zDaJY}rj99QEn11GDON2hHX|vv8!2|U)ZHD;DaTZM|5S&}RHv5Ivm>eJH&RKsw2L}v zZjNb}{nOkt(>z=tCE-*imB?VnE0Ob=*DzdMp1w2@B1WrXTvggItJ z_-D{EGoo8EVn;IKH!|qB%p{%66vxb{jkL7P%&eBooRQ4DjZ6kEt57Ga*fFcbKP%jW zZ_i*_)ks$LMivv7U8j@X;F#U$pWU39-8!GTXV9TzBb$ZG>C(x0>X_5*pVO0>^P(lE zZzQLGBWD1Y`$i{s$T9c5f9`N*?r2Ny_(<-Aqr;<(+$oMu-i%}37yrDu%)Et`yv32c z<&C_R`Rqwt{tw6eU;g>)nfaS7`P(D;J36^*8~I>e2GofG3t%9!7(AP~<|7Q$CIcf< zAfQ_y=s-s1!MnxbzVrKxg!azM{L16&k#w=!B z;HK(8u_2C|}9FM!<@!2=Vtk1&8>^Toe` z?*0t7D*^~W>VU2_0DxG4m@fd308iaAX8_O!Aj?igy9a}TYz3kmv#6r-S1as+G#CIN zW)+p@JdoSE+z@6B4g=!aGRZ%}$4{z(`vT^k;7~ z{>`bVRucvqmCVNR(ACz`^Nf1P3TzTUYj_nkFcfx3T+6MnaRA+ghe9ww8e2h*@KA1r zm*UA~7LAAoqj4C3KrvbMg`4x-=Md0x0KiY+UZ9}vFlvtambj(Y*|nB&C@3%A`z|XF z(HOuPaHHVP}kV7ag3vpUPI_tK1tE$9U`C1SEkh#+RGXOajC}*G*d;; znC$zh)^)iQW)u-tjB!k6)K^wE_!BCIh)tlq)et&}Kmik0Kts{RRtxB2pjbOQ#SH-N zUayAx!Wpe8)CI1qmBnC}Y8L9lmn=k4MYSg{fZ}{D-&v`#!8k6k01zG; z_p*JO7cgP9OTH`*cW*oC%+&P)&Jjz$1U!N>xpuxjb^{O?A_6meQm_`e=L#@8H7qvnI89XU3O5_H(IA0&{3Z26B z9@~y`mE(DeSx{ZQwuaW4rO_w9MoZ$Yc6Wz0O96l}qnhf}esZO<)4F2EuHBeWQW*h& zd|SnkfcVu?$#GpTHh>eY!QpxAy^0LRtM6L3^`+IF7j>MVbZAqUfvm<{B2O`$=hirP zA)SY6g{E5ZmA?eZwv`=20#eA%BHhkC+nxO!yW;YJ@_pN#&R0w42DrT`5HqJ|(V-B4 z#MMB2UQT*WK3ky{+mR!JW>mmH1fbNbx{y$DGrG_QhxQ>AUtC1@XM=neN`+!yO2Gi+ z%7Y-0&ihvAgsNv@RSh;CykgAW#AgkkV+u1ZiU9m$YBWmB3k(TC&5bqfuS7Tx)I>31 zm0OOhlny)Hj`G;XcXwI#7{@eGzz5&V!83&`6`pN7Ixk-JveQGnY|ti@Q3w^5TaKMp zur5k>9}F$lR!0qZlpRn?y$W2TpjIg;Z+dM;B?O=Vg?LP}Cjenp%;j_j50v%;kdtFQ zRHvF=Ui2!P?;w9on5ZqsxfRearS~$Tz`2ht4%?j< z*d2`1HP@mcF4m72yzQI106zwV(*ux85hOj}(7oCt0Zb|n+LKj9d;tmsD)#C_;iMuM z9z2`#6ku|V6QH=O57Mu_cVj)k?FZ+K>RxRf+F`>$1N~7;u-I20vi@{-;YaZI0Bu5l z)tn>B$T5`%J%mS2Fxyjk+>|$w!m%T-V^JzRBP5=x$JfU1MmIqV%HHG-53zY)uC%?n zhU!}3t>IK9UwTnDd3ZcSx2Nhs%H`lBun`elJ^aaHlCRjV;`_Bv zDuWZ-e?9@?Y_I`)TI$onVJzIcbnBb|cRxR0hftVq8Lzlx;C9Ot{58Y`+|$QW2@{&A&jOs`$be$e2FA>8Lre z=YRobg;-ihdEneREkE{2#sCwD@2)W8vp~(L8%!>4WoeEkaj(4ljwc|V0gG}k_Fi1y#m<{Q;q0& z&fOEFRyc(6+2bYPXd{rm7r4k(qN&moyFC4BU2q2vB`};N2KsdwbGD#%^QAAMlYF80 z**&Ug2m|W9TsrRj5%Yd--z3_KF?$GvMpM9iA>ihAe#FWwN)=GG!OZzWAjAi{z=Q1g z#ZbF>4_h8Ia}h!UtM0g6Uqo)@o(skIIAZ{7mA#Ly0v7uxuDoa$h@U_ujsmQg)CE2W zsp|4QV6Ao8`9;cd?l+!l_N|I#%S6vIrz=oKF=EqYN*eD_J~W zUhE6h#4;a`>eh(^mWl|3-73!~3?Q*G7vFb6f1>5tBi)>HAbfZ7{Z&QIr*E=`-(`-i zF2sLp^aV!pzKQsJCmzHEdGdlVD1~op8z}UyBT$s{%~FROPAr!yIp(tYjZ^_Ses@jr z!cU!p-^E|mHuJRuD}a91+Ik-7dHh7Q07$?PLMG#WSc#%QE1xxAcWCEN zHezbKSbW78(0srTc~0lO3ngUd?~6gz&YL9*=hsM+Xay!zg#K%Y2$QRtzFWOPW|rpA z5fvmZXU6ZhxuCJpnjH)ZM*Id2K|Ofg?aqXHQ|h~bX}1Aro@!agC9W<4cgF(XL&Gv( zm7YiWh})jL_DpC6k%zM4*zpdUvr<-!N4$8_1zP-h3Lt&_9V;*I6%Q zf&;OLv5VoSx`KHhsE1e#OdA_@RXNzqp(pZWox@~JXU#IQWec~tou**tEIpmm7BgoD zrQ0FiHk;BZjarro*&)3oAmexIF zWy{kv{0TT_Gd9cj#EIa3=%%3A<=I!U*69Gx*Vk!u2z6tMzva1#V}kFWhwZI5NZmPl zv#CGK%xHzCaQ5}wUi?>q_D1OCt2$>2Fp_6e7sfe<2J*{Hi!WxG&UZVVeoy!imiz~O z9tIpGY&wI9)N4K$((bxc5|)HLrQVaEDSom>u=smjy!f6^{wJJ~H9LE&7u+(u{v7x6 zBlhIQ%BPYut#KtbSy#W)k`r0qdnK@HJ#+frFuZg@y?`UQpg+Z8Ms~~sr>Gbzz<)&O zAttM)vAPjYjn%MUyRu(dyO$<}6$|YV-c~CNZya3jvZ}P~`o3z8rwaL5;MaJF85(M= zc|fQUOAPC>01=_eNQ$JiG8j`3g^b_tT>$EH1jGAba~6^}krayzkQ6gC1Ua-SpQsqg z0s(9GP!?2D4NKw5RK*1M1N{9sxGpXQ-B$EbZ)dZj+rIegmip>LF$}vOG&(a~icl0IAV@I)I zKn@9EB(Fh1jMiA~K{g<^lQG{bO9EI)Yx^Iq#k=zFap4vOn&+B9RRkBHyv*Yy?T*i| z8lX0Nhy-?e++a*y+lgWu1y|V#*w{iCKk`9go6oZW*8P)VlI{R;EZ7~v7z5mN2Y7&p zfr9ohIcD7pq!|?+5qE^fQh$R#N(%*f8&6ZhORBzG5w7>BH6~aXO#&iQ8G@8!`yh_A zldrZSK|g2i6OVH)zmCxWJ8>J!4^iRc<1Hs<`_KD<^A9uSOO(`Gp$z95} zP<~VW*N*O2vVP^GVeurur>wK{g$ANEFbXk~!tC&k!)4RVk3H_&offCAPG0ToR~Ko~P~`i6_%)An&r=B= zDWC%%moije2o!fooYF-jfm;kqEa~RWByQnAZHmN>5v^EJs=zIcmAy`dpua~@`&~m7 zH>c4WPYQ*7uHeI;`p9PKqj5HDbimBAeiw~$3QMex+((p?Hht=E>z?#`-2_ft;}%C8 zf$_^cg-Vfmcn=nKW2b=7;?5_K^)3K*w%D@Cg$>wW(7kL;l2Oe!9 zw6Nd?sC?&w?NpjZVz%FRGm#eCSBx$V%$Ga zRw%REux)rswQQ~3MRtM!6g)K(3uZ7J<4#?ad3HM)K7Qj!(Iz6Bb-x^>{~03D!;eD} zbAplA)I$|M@*Prx!p4gezhpiI&8X|TV1fOOi!*!Hv|A6`0s6H@(qh)@R-9+2t4#w~ zyB)A0Dva4P*D=Rk1|$($SBBQx?eUnol~|g=aJ<7lfQXWh1~tP##v4mF}*;j>$VyLxFuT@t&135y5U`JdV4o8nvS=SmD4S1dI{p2@KJ z6SPk0$iD)wka(jWz`oyNq&I`OM0$E4a)=cB)|eYjxXZ`A8sU--AxI8+#SKs%j**c9 z)O#SXA~%3vF|>)NcO~LbVaUOu55WDm2emfaB85O4lHP4Go)y3QnA&?9M;|Qis>^`$ z_uhP$o@j>HT$P{Nt*(`aG+Tty+anPSU;PRpQ{(v%fOx8~9WmVRK2E9kZ2ba^UC+P! z8{Zc~t|qJypJoH3DjJ(EYVylmTZ&E5czW?>tU7fhVQ& zfD2H>6L(E}YdkenIO4;PiV!#@RQ4_#;H+9Kl_^DcSq`8z6)zW+NYN908_46=3BdOq zBQykiK*5B+UI*o?GQ3+U*%VNh&^>ont){`zXSc9!s&+MjCN%_&BGN@YVob509Vlc8 z|HLcovvOi^CN6YOM-fI#fBcc#cQY7p0=3?EftA>JV>1K@bR*$S9ETir~fHu9DsvT7X3TB1@1Xv|EK(lKcoKK#$ z-jkf61a0o(w5~-Z^YTjd;eKKZE+9;5Zlo9(NBZBmuZRhKqD#j+VUk`V4icG4IkCz& zwZq8=wK6n<2Pldwk)Ctvs~`=ZbvT4*LUunaP8ETbp&(nuz6grMW~3<*aZdEUa&|I4 zg`lk_7K{O_oJCz&D2`lYVRv<)J^H`swvGy|i*;Bqa>YaXq_z2^-5oanuH1P@;PLnbfB zoLf%W-F&(y@dARPxdOjrp#!OWFgU1Gp_g!{*szgR9?SyW-=aZ7dd#d;LR8ZRiI7z) zT#x|B^$9MVUw6wFoBs9|YOyC?k~ap`E@L{Rh*SXi_C`@lAIm6cRxY($7TKaKA< ze|Q=e2ng*(V_>H-A=*k5+y3acbZ6jB~9}u0Q%Z#mKg%+NSkB~h=XEsIRm*jpydb* zmkSXDBb!+hDEE|OKi-5iV!}dRBI+uO+|(j$YinPfHC2H^Rr|Q5htextmH()TX}Q1p zfGkRbQtqlT>(;c6U?RQ{5nTmbE~HFs3{!(p@?0p)lPGkcEmwnGCtuKFX8FLruuB#b zf$viNIG!Baldmffa>0VDK1I6C6s}C9+;EdTxSqF=ln#DGg*f+xtZ z6EsRnFHL_`%RCBl6B7=&%hDXdPp(H8@<%*_q*(4h_5<@6P62t6VRkyg!l5AN71)!T z0E-5k@oIo$DF*(yr>AVZawM_t!Yc$}(=ze5I!9a_xCQRP1V76jPPF~iC2*+V*p*O1 zV4!8?_UXBX+~DC@{S`Yh5ecJF?7Ik-$M@-shTc^zfJ;hspG2tH3EU(B2}Xj~9@ONz zQ4Z25#|sXR`9cKQwFg-7l6Gd+yYB*QfSQ;*K%hK~rHJ{%)CjuAbkVoSxeyw#|0F`6 z&Fp1R-i&}7zenV%Q_#SX->Seqd>hwwB@UDb$b+GQ2Qt3pE%~&w_5*QVypbMu5VZp_SA~Ip2QoC~Fw4 zebp%8*kUor-9~K_fg7_g94-hhOA0=ayXcUrGB;wGQ5Z`&v*ygdu$)Py)b*Fq@`P5|v_=uAhNn0l#_ZLN7(72Y50MRy$79 zsRdUt-uQahM6ylEyV}SDyQ=@9wvg67H zY0SVAp$^p3BZp2pLzHzK?>t0Lk#aiUyM;1!KG&oE(J+2&96 zkC4wsLRK6&5=N1O%!}c8?a{**PwE+nG{*Ikc$M>bpXZKac1X zxFq>DiN* zM442->*9lOowk@vAXzw5E+6{kkj!%1`tv+?VA52IwmL;J>2SMYF_o|m_iP^06#E?R zpxSa=yr27$P{fR~n`?T}XTkn2Zly755#N*B%pYRNFbpD4L=X zlAqnr`6eCbI$0K52*3c7h${Hs2C3 z-@G|TahZpB%RZn59fl0^yy+Le_C{r?A>#QJ>hs>)S2HK(zpb%8XkHu1yEb}xarixHsCudR z{58&q{k4xf*VriUNpbHf74K;S?-^U~&*#0rcze$VdC$dr&*ynBRC|Bz@LufqUYhV; zUiSXBMQe*XG{oKvDD5jlj=I0LAMlKiHw6BHh81afTfb3w?KcVCWLcHL6 zr4gk|T)z|PbWe1tTZ^!-;GKwfm!nt)zPSABB3dXOe7eR*dsGl5e3e_M$5Tst0?~{J z&@TO~z=N#gvecn~Xt}<+y(svQ^&24unXQ-J223LdtEt6 zt5;nxASkUhsq(s3@Qq);;QDrzzDi$LEwmulQ{Wj&r#$ve8X*)%3_GxS@{u5y6_|%* z>e}eShyJB9gS4^RH!btsk84v*Lw{)YUHgPe8f);g#XL4IJ$KyvYh(A#>W*vY8gJxh z^jM~UvwB{Bw8keQ?`Go!5|`{_9%&&gU2*jF&8l{gi^Y$fh#MCV-d?bMgyfl@F3UP% zD)K&yKpqAg*M{OYDX^fvOT1J8YCtw4@cg!JS6nM&W#M3pHq{0=!taGl%0`7lWHWLMmt~9M;#bAum4HB@=0vAk! z016sIn$rYEGS;(Lw)v)ofol_p!o3bW$Luvups_u+P8flj_G9~6L`zqbJ11+<>&eGX zD=v!UlyIl_cBX_ihtzh0TqtmTS%|0TDrN6Q-ir7(lk`V}Ac-Gox6epFfhFJA&{`|s zIur~*GC<9}p5c_u`~d(?c^FL9j!kRAVfjKYKmamDQ0fPh(Z&g#Gh8{P>0NL8@C8B$ zYiFBrLK3^jhc&0=;S@}ynp)Bi|A^4g!hTzY^8i^TeXo*8f}pbj-#9_@Uw9!3cw1`? z)NO?&ZL~T8M5M`(n+0LmP*ZYg_YIkF!r&5jK*DZ^n2h8Y7y${~-hj9Pd5_`t2wSU} zV-A2d^n>a8?}PCNLfuT)x2_mNje{@1$X{u$F@`5VuubPK{#;@x?A?Z77fYPT#xR;Z!T_Fxabt0)6d@>UZ>KDL+ff%lpMyd}op|Q4H^r&dXlb{b$%CJ|<0Ymy z>i1AmKI}GG;L3zPT!acjPxE+{hv4-g&PQ+xEgU$&h=L3}`zb#MOq;?7`S2!8L5iij z;mxYvnd--uFhrvph3c$+~}y;RfWe&Ogkh z!Cj0S-;g*AG35+lzsxCbc<;Mjz`1#{TNq$HJObF@rP5tNRfFB4)=(m!QCIsU3@O#K zr&DByd8tbv_YW20BD+!l{8X__Gl}XuI1m=KV|RlH^Zs!w-FJmRq>6e~O?G2!9u}vc zRof|y2X#4sAvlwD!A|>4?nA)YtAm`kd6D>N@}($caYkm)B&e4qy6^f8qWqaI=5vui zEd654PO}RT{u&FU2TdXwIMlg~?n#=$ec8PzMF9mGXz6AcG3x5le1a5gU3=b)NoST~q(%sLW z00N?6Lxxf|A!#%}9AB1EQuj&cV+T>}4A82B(6GeLdMFVZ2OB6cjFaNSck%zYr96K9 zVvoHO0EI4x|86lZ|)=P|X6?SQ% z)VUGO<(p=>;P5oVlHcy3pvQQ_&or}=tz`drV_$+QWvgOGiiHWOMmcLEdprV_bWY{_ zV$ecp=hRc}&rU(P1&EejVG34nO)~H~hM{#o(D(@=bgxtOlL^5=KfVKbc1Y$AISQ`K zQ|f500uBzSyx=q=-IGn3@SuwvxC3DJGaODggTgBlj1=U-wGB^iDs3Ez1hX;9j7U5T zd)oci-VpZPI6v|h9$});#h=6sd)_<2xVq{EQZAKL#`J}E)5~-}+ zqZ~DDaW4!+kwi@6UYl@l)HSq#W@aVSNksWH$yDEMX)u8tp=C;e2u9%{5fE5>pmLRw^` zARF9N&`d9&t`P%^lRN?i>jKIR&)sAyU(49M#oc7%8EC-HD$NhTHus3iA<7UzLhu`t z3~st$r`AK+*ARMv=;!YMog<&o)Vs>1hr78hZ4$@-K5ETcf4Uf} zb3kP^reJz2E{vfCuKh0<<1f#`Uv)*hJ;1wuU}>wIf4~?AY{?ETr021ClV~x^y5ins zg_9NkgfR|x=UCR4yvqKo3bA~x=qR85UoghsRft?DOV2_1x21MPe^nu}A=K*?_CMx{ zl^<$$VGOR3J%w*;-T#6y(iF=>kZhiY?`W=<`rv7voQ7G6tj1j!Lp=7qfbpZ7Cq*u= zr!%K_VGL{Avfrf7y%~r8RfQ;BsoT8n7G7U)vhw`VVZMGI;*9{8qiu~#Y1PN`cJCvb zzJ0QBeG;47$?#2k7P1RtoGp0~r(U!36|wKu+Sg%O=Q}NbK6Nik8g6pJ@*cHstnDg= zFZ^F|AWbI_VT!wMNnwwfL<_m{nW$&`O_4^CIX7P>of0XwwzMS;6&nwhrMyf*;7)v| z#F={79eU+c0RadCO61yu~x-S*CGGBoo^gm9(yyGMZ$W)=` zl#bUqnNln?kRm5)IEyEgLxeH1`>mfZ4t-|!@eElX*F^kKFaZ^(%o{4HTf3DC`V&u- z3xL*MR}1Z{98eyeXf8?Bd6h_KvgKq{%%}=bFQZ;eR`uN`N^iGkvVc&p>9|rrVkh+6 zE`qVun&F|IUD)us4w)Ha_Y|RM!{5#Iag2GlB}kY_d?}Z_zm(_b=;t1;>%1rG_6P6J ze`&%mc*t^7#nioD9-ZRTuXH^2;DFl8(o)jltxwZ+>MySJ_%@_~5Hr32s~%FgJI&~`x)RpO>rNOAt8W zjuDipH8JU98>%Z{l)jh9d@N%VFE(TNJdel7=&%fXR!zfw6Y;O=FYZ0vg`zgBN~ktW zDPfX`HyP5h6J-oRK7kN5O*gbpRkR3B;7BC$I{_+LB*EoJjT~Q!fg)s6Zk~#e2SYpOUHQQgiXqtaQ4`3rxD35QW&VN;f|7Gz)12*TN-b zCy{U9-<&NrYBWMQBPhJ;gCsC)sxN{zg-=qjloLF`u{i7P%WFO#fkAXx-Zh=c6I{Kb zwWsiva~gl7HovK{8UTclPv8%CKDo!Kj_e}^O&aY z-_v^pSJ-_is3B<24~1iAt>~A;W+I1Ziu<_LY(mC`q{Q`Uu<|mbcy)u^u?JHJETcNh zG)*bkQ%c$%6}DwxuHz@h&4lwe--TISFFQTHU=C@1x0QNYfqM5V@Hy)pvhHE!{zTh|I*27&QznHY6RTp<*jBeduwe+|uH~Ig7G5Q03)id}WxSLy#zRCL4 zP08yf58}YM{5E;yqx&e`uovJT+Q8C>rb1bzdP1n)dcY)3NfOyE{GHVhA|?v`X*6<>re2gTAI1dltA`hFh*_k znZ7CEw)JO(_}bVjHq#Oi zO=_l}3{5XWh`#;f*hGBcf$k`OZyt)s zG0|R+d|dwKc|q}}BK(4#-wp|5cYbMTuVZs<&Ap^RTYb6w>iFg(B_H!I1S8&?M^Qm% z_br5opvxy8=7P_Svrozi7UQ$=A0I==z?Lasw9s)OC~#~s>(jx* zWvDy9|J`QW06GBz1G{I)2-sgX+r9rb-Tcq5{^!;B@9=C$?M~UUyWhC{3wi!)%7!X$ zS28gFF=Zd}9Pgr4Meo)7o>Ds*y#<@E`F6^{RN z%C0z_X2Jg^b}kYhUilaD{ExzM>sHJELZ0=8pj~o%H3mMuY}&OKQy}m?iHDS*)`SWw zpYLm~b#1vX{!b$Ry1e_&hduSPuXm*+;zy6yySypeow8qz*7dz-y1vXinmxl6SL5|{ z;?-YMHjBzDVcY&U@@#!hSD^O$>_CyVIk!=9%=|9${Hgy@+wX7eqaF8CFdhHrDceVa zJ@k@-kSimZx9^{0Q|CVelVL*k+IBLKeJ+y`Vz=ieBk>UmQ#6^h|IoNgjEx~sbXBsKZ5L^>w;vZbN?P> z?<)UxH$41nT@baLlX<;rq+U;P3i)3_X5Cc%v5ma8a4-7gfzihlt`|8iO@Fx?{t;x; z&u@94P&OkaPs8Z{wJtC*krS=0o2TCA7yi?<+3v7d^qBfiOKMG|z?d z5}49}&>j7^g)w(@IVsL$(>nWJ!_X{xCzSuk!q*xWOnH&{RM=~=wnwgVv93?^>tg+Y znetM@kiFN^!{N)7OO4~Vzb-YgBSe>>f^H-*pEmM%R6<}i!sS{+mk3(QWPlL@LCjQs z>)8JF_1j|rzJG-U-FtPV6Cq!<(uLGoTzP^q-@p1)=*-pCXV@!MtKDLE7Oy`>x)YyZ zXp7CWQOx`HmBiGcn1c|)^Zjdm>L0GI{eN^f90H>lD+#i-cwL8%l;A!J8AgYpGBgy}^ zE;zplKk6$jHT#XnTa23Z_F^auaW+o#^`qxv?fp*<*|mS&T^Hz&JwN#Oy5QV~VFRnk zKf&~Nki7>Ib@MCqd1-DZT!FD-1nd5^-(cfQ(Wmy6M+p~pHs;ND-3%f@uIXV{oPd`g54y~akg<*r(J|{l_czryWt;=0`6|*i4IQe?~ns?0EjSb47ijW z{CWRD)KTQrO5U)Zq=2GfV}5FBeSm~_V!>(CjC?4iKe?)C6fz=i6tB8U=T4z(dd92RqxaVwUvId=3$ z0Prok`Ds=GgWE@lt$0lbbnH!1tUXy8B1Z_5yUV`13Hki;_Q*@?U3Y`++fVQ*V$Z8T zs=ls?h-Hg17z*!q8ONAG4xXte8~f#H*3TY(Y0f$3yLrPeb0}mFJ?bFeHj~?gH3ctJ zB+Bl%2Ez}?joSIX2+k0IaU9$M4_~`|Y+hK;b^#=)d_V$a#1PmcegwTqQIle1p@$0v z&TN`^?hNo?HOeeIpATNpG>YUPE9|1=ZM2ZZ`d^?kzlI!@*S1Y=FWoKeGr+&LdLLV{ zb++IYGwZYv@@HktnxtC7NLb*X6$n$XO98iVBIFIl$DYAhQ73VU;CHV(H7>!L%cy9? zP>k5MF6Vpx&*0m182*~ERi<`G9&F&gVXOyEa`X(GWn0^zue=z+qOefHRSdoZf`%;- zrD<;yRx@mLj|`tGM#)q>|5{-mr|4Y6d1K#L_-bkVsmRHYLX9l<2ba0#0!&qcSWNzL zDU&N-o@hz^h<<#RvgZ)9S3-@IQWm>xoG@%A+i|c+`_&l%2-Y#v^Jo!=khjOR^&|WI z^>&T|!+X~2`PH;Wk+61gyA%=Q_d7l1(Bn^(T%Db>-@R{C*CX89+umr?HQ3C1cn%|A zH69Vf0QcBPt7YFg^UUe(6I~L~?b!lw@&ut(<=WiE%x1f`3M}aJ0crB19susZALEwl zt3R!D0k(w=9@#cF%4xl@GvOv>s+&M`dmeWXNxppc)q;Jcu5;EA|9r!KwIQ*qlTv<7 zXOY9z;e2DzUX}Eg=Q%47xvJrGr?(uyWvDocTKbXi)2+;dXUq0p*nc)gx8a#bn=1Kn zY=@55#=sur_oTJTmi)5!YBoQklJb?>g0(YKyWY+klzW*R{gnK+@29$qQQX{#uY4`& zU)~xo_VJQW_pQrsd=zmGZIt!bXPn8%e{y0f<&Sw_P8@W?oEVr z0T=A67Atz4rp{+?j%I8Z(-mLMNm1#hiOW$*Es}a?!Y6wV1zl817igU2 zYxrW|P|GQ!?BTZ}4*RcWfWqd`ujUSlPG_e)?5o8Y1zaSlU^4OVMjvnI9;9$g;t-$b zm=NCQk%6nha%xD>cg7suT8!`OY$Ey^PBJGrL~ETPp0|b_lRHqv~(+cIoW4>#nN1~Bxyw^ zQt#2c!qe`eeL<$9KPs`q<&=&^|s_ zJEz;Xen(x9q0Z#zM2;Vt4IH>%_DkXXQtWn@qY$-bb4uc9j>ztxYS;yo#QAK9XnRk^ z9lQ5|Yecl$LZ`*`;FkGb_?IU6-<(5vtGwPrcQ@Z1dpr%uJqj-8{ONxwhuc|=S)IW5Hh5+)1x^mZW`u}f z)xV$N`}rFwI83SkeQ*1Wc(Yc}o;j|2O(FY-LsiI>kr^&=TIlcRejjJ=)q+D*;L1%3F_OV51AQql+D) zOUTh>8POHZ(N)9I)$7qrY)qYYOoKyAB8|{cW_W%D8PZG{rv#U4gz_2l691c!ua1DxA&IoJcqxOt;N!3tthf%ExQ#+=s8MWEH#NqB_c1+owfSG}h7r2C z9e5xS)<@#~I2RX|5xjYaraK%g)I;OOCGNT#VlKgnZUc z`Zsq2PV5-BoLuT{8K7YQ}~-x3}|#) z%hZ3m8#ZF?N8%1frA(Ej*frCR`ltQV-GCGG*hu(D{SS9T<7)P<076VB z`>A7AcT{%YKim!eg8jJcP7i(^3%V7@F;}xC_iuMY&d2hwcy0?UD5GQ}^>b$4U+#v? zjITPhrA(1;ipev$wBIW`3Mv)-rcy)OtR2^e=aVwg)4yH~r{v{xo;`d+xv8 z4V&=-t%-v=SmDuBqy$f{WEP{Lg@M)0SJW+1cPe^QTe#&Rh6ClxdKMXM z77;5%wWAs7s)YpIOq@uuRco=02-burUWzAkhMcW7T5KP1-@yr^{Gt#$nf}%>Ta}05 zs9WO3Q|xS2+^br6G9c&DXo+`p;T4|yDx3GcvWk5IO34e1dQ9no7x^bV>7^c}{<>vh zR{3|W3W5qsC`$PkIZkEqn`J$gNRBVhejG2eJnXDej-FGF3r~68X8A%dGL0t(pCzD% z%Re<*#%Qgmiq0<#D0fsWmAYPWc(kH1pfcRCoLP~W86eOwTHY*D)#a2{x`NJQR2Gj` z%B@yfINg7;SvAm_oJWAwWL3HI2=s}R^@}_hAAKNa`JiYs=ffud$cwUxfa*D`Bz9|M zou$C%tcrP&n(sUbL(z%vl`17ytFP+TY>(EwaHv__%hS{!-fg zk9+{Fd;rU43OUyfBtrWKm=lf_3I#Qxp0#+QI4o80%aXXS7Z`yS^S&uhv%k!bw29?SZadgQ=IWz9>^nkE!X*JTlf`t0!#kRYzN> zR^+WY7WmL15OZ9w{-mCOkzTE%XroTsLj%2P@zsYHvm1|DH)eP??h9%36m2qj*;s2; zKjZXpOQ|WqtM=y0O7EBazHLpRymj}+s?WzXN$534>(v|-t+&{!Kh@SC(AJzXR)ZgC zK0MZl4s1%QtV`W$S#@d#w^h-U8yJDDT}rK%)~&f-55a@TGSRm3ugxdCD((ly*JZbL zD)Gu}H3zrx8^yG>>a{;`g7s7A565botlRr|D_#(HLI3u%UTqJQle)&*hn>sZ@JYRu z4W!Yk51i~rQ-P%}tVeGv+uy~|C-piOtxG;D|39^z`8yQ++pxO}k+m#Yh6t5H$dYA< z>}Iiyb!ITO5+eyI+g(Y@Sj)bZREkKEZH#?4jAg7dW8Y`&yI~&p_jta~5AX53$NRiL zJpaV!IIin+p4VC9UR`;w8cT!Mu6W_ry~;N;aJ4^iJBPKpOPs6A)feCBqS-NU@WE-w)eUL(w zi(!6U!!{_%NwHh_Nb5y)5E&&=f0ZrQ@ji1;{)n%KEGqd!uRfZf*CiJG^ z%BJ|>3J6EDO=hMgy;}GroK9+POKVo+z#37T-7?diy&G;Ln%%K2sZU#OB7Sqp=E!ul z_=8e?KsAp*pC2PyL$p$!U~o^pYeKb7?)|h1VDq1W+TJn{DUqbQi`dp^Ks$7#4Gd^o zy3wAhmHZx1nT)AVqqkdBw!=v6@f;mr5Q(|;#=MY@!pwKyI2sE`9ltP%#T>11TAdY` zjy-t8!>-Db%+5Med>OT~9MIJQh-<{w)`oO-VPo4v8ahc`eRNDuW=pRYez*`rlx=yC ziKlc)jA5Hy>G&CMiAn4?GNOAaQ*^#C|G7-tGN5O)w>$ixJ4Lo<&s%g0l+m2oa|{qY z1bvO-=wToT`16FRr+AhcLiKaPC3V7X7vTb#P)%(JBK7cQ_5PV8K#j9M7xs#C3Ib?O z=S2IifQ2OGjHg5TWJ3jHj(8BHJ|($6eg?YneivRE*slTRy-DLytm(gl<^_^@bVLUX zV|ec3c??1aKtLXo8Xhz90O)Aon1^VI9)O{Vwwwd@HGObkKQgP^=|?{*tKXH}=N{90 zucqIVvo{+%h(Pvhb96r798w|=?$8GVIS2iaO__+H;LyIHnD0!Y!}rNUAo`HcQEw!g z@RHUW6FL+HZ2rzMoRHNUC;AB!IGt3pWu}dAdyh)QjQSOhisMJ$ zP)AecMp3feE^=eX9AmFy#$v%^YUr`g__5`}vGa1Iue7n6!meaAX&pp*h9_mkkS^8? z6GBOAot^zzLm5aio=O^H;K{92@`NaP2;KWPmNcD(pUR@(<;e43G8IT#32hq{9p6Aw zRI^9~3}px3wM-r#r;nd1qHNOI)_@aCvEz@?oo8bw`ei4$C}d{GcDm@~Z;nX;pDw}d z$vW&LAhuKNc=8uu>T-97l;hNwkg03O?bmTrsiY}+v33Qq>9<iLlD>agSa5|M=$ zVO0^13ptG7g(yhnTin9iv4vRqig>ZbXPS%e#mbYz7Ckc-)5ZQ|QWotF7e5@A=4vm2 z;Y(j2rTN)Q8r4gMvA>Ismt;kjf64zUbzBw?UaoK~slqL@jVImI3=3X|f9S4nX{XX6r z3|X^$SfUrcy_L4@5SNV@-=5pwb}r6x(b=&8ZQn+3dpJ?=bCVv_?zB;M@{V?VYpH&# z-4RGqK-~5dr=F(_QZTf{{nB2I`d&nBMr8P2W!m1`o^;Ik-uL~z_?)yvo&6O1{bZ-K z)SUgO%KglTso9MEK+-N9u$>!D%ezU?Ie8Vb57@xFg+1RtUOEh;9&pzjltO8L#(UGV z4(BxvaZbr~m+0x(Ln+R~CdNujabI~BeHuqspb@(tt`JuH`*e;L9gp-wkA@jNE2G5& zr17H;=-M03jf$JczZgg6qQ?t5`Hkbpn)}CVs~t;DM5+$s;dRDdapFNOqm+ZE_Wseq zHdRP1Vu|m2^pHZfPSkm6NS>WVZetKTQDVj;exgo+p&u({-#26OUdFn}ZKQA3ELG9H zJI}s<4xFhTurf2!KM(z&`-)52VPL@`9mRA(xy?e-FxuiXq$xk)gz1q;@R=PYF5CV@ zHOETB2dVa@o^jVj?be(dUYURGF0%`mB;LCe?rA+XwCYs%oSFLyVt5Tz=beU|-5HQT zw!uUl45Ij@T)NU!9?p?QHt!L#`KtqjcOSV81oBT2JJq(lhD+RgzA%tuJLA3J$?NZ< zg?}x@ByD=sAW3_VX4;c(GBPttrXz8ZXYPSB_XDUSWe?{mp`SPVuu!ucGh~q0>LRiD z-Z-VkX{}l)_UcVyNb6+E`0D)lI3xU&#oJ=V{@j9x-w$QSZrouJ@n6$ry(hv`^_r{n z;jilM-|~7~np+HX1#k7% z3e6Yv8nz4Pa_DS@!Te0FWGKtGQKGP{%@gq?f{E9u zZ~+wFguMX1J^lW1_iFNGFz(uj|6>DvuI5>Yk*ullVp7RtE8Ywd3#$Skd#3k-1c zw=}&GKi&!cZ6M~3@Y%9}em-e=A@_7ILwUC< zzCTazW$QLpViQ`aPw(f3+U%=exJ6W!gs90Mc;>4d3e__{jAITS6dtA@=%p6;Yk2)^ zRP|cU_ZK+yE)BnY*i^)wbsJqVz2iv}DcFJeU^gL43u_VnLUz^TgTcNH%idn@=aN*b zeOoraWaKuBq-fDvqun&-imwh~*E+s>iwvcyT@G{D?fTbS z2DQ!(pN|U;NfgM$EZ@7c&0YhuC0k}&Cbjs@q@R+}Rxh$lb~BlkMYFwkpjf78w$I9kdcJc! zwoFxIn^TBsPjnHpN*jDIJ1#EN${}x+Ugv#9_zZ)s&eNmco)|H$!k!QI?UqWLRGz!t zE(3a;-Jh}u`zk*&0)gXrvwyvx*Rr;U2B%-h+2@TTVo4Du)=Ov8Mq!-V{NMTCFTo`2 ztPA9M3hY%TY$}EC6o(~+qxcJs+8k{5viN^2SuH!X580GI<1TtsMnNndXjM~Go<1es zSl=5R8b9LyS;0$ncCxi={K8f8hM(*rRG`ji)wc0VNRpkt2DfiO3jBW7PxV+IwC`!* zEdBTypk~Hz(&zE0^c(-!1N#Ea{;G3j#R0iqp1~?(6oChoMD&`=q0+`3I zeD+ZVzEkxRyRI@EvS_AoNEve4C*C|HMa%8&T#TUe`(jxU&ihBZ@V^XyGINuh{eglF zdsaUy1+`Etrh<)QeQS{gL=-oay-C8*`PEne>cZ~BCYhDwH;b=a1Tu)thc6ys`ZZji z6Ng*8n;$j!W2eh^1YHPQ;_>bwWZV%$v1Ux}SHeTg%t)nQ`{ZoVyP&U>zU-?Vz957y}aC29UXw8ys}VdbB6H+0w{ z1>N^G+xz>7&C-?MRegW6eENU_d)W7k5-b8ea?>!&2(>|Cz_R~O)Y%8FDQqL{l*ykG zh*0iI zsjcP%mY;TlDf@H1xc$(kXHM(m47<(UIwZ{Z(v#MdO59O~l0C<b>BLEB&ukKV?68x3pY#@aU7zCi6640l6#J z^F}Q2Q(P9+mQkfLQnT}GRTyt(bT~HIy%U#1ZV;%|^uK|l;ii!*&%bo)G-U>UJ&UVE zzs5{**KQl!z`5ED9qI#adg9E&Hf{^FPtuB>*3M_GB-heE6EiM`yc7#DQ)CU))lTW& z+~4|ga%SOd-@#q8n#(1v46Ith`-9PpxNS^Y^@>bGaQ~&nT}f`-+Cyka1N8G&`)ld7 zjM{N`t=fmhuaBk+7#u!MIiUjt?c#dKv(?ur0zKw0_ke#`Hpaj0wHVb>&G2EnPrNxB zbpda>Z=Ro-R=)3|(ph(RHrZe7x7Q-)DsX75(brPGv zxF8n&y_e_Q1a*xgIA)^u@5Y=8siT>`S`(>%wEqGoh+TMxYHKpL0$S_l0Jm052fpya~N0v3@l|0th5YZ zAOmZJfsMC;ZHR###=t()z@g9pjy14^v7vaFb?-Ns{N&x|y~__^yR@9HDaER}7@_I= z(vWoeUWnoS5W^d(h7Sr2y|9MfU503qp%2yYA>GiI!{`yf$WPYjv6hiP$S43|6zFaA zB*Z8PWArrB=vkprFxJQiV;Jh+V(9-;>u-J32j<%m4YYT9r=(iAK>7xG%#nG_@Sm?> z#>|!{#(#br1H!|M=`zL?GR0DjlSEmQcUmUzK_*EElVop`ln|3tj7eIi zNqV742G%6A%Os0rl1(+qp__c58^;V7M^hu-VA@`L8%F|+Nu9t0P3?%%#_J`Can5Z~ zdB)Ks_l z@MwhgxwN|)P56k+>~Aw*H<+n|I26wZJ;XxkUC<*E^q2}|(4nU|&HoZLKMjN&ya#Ur z^!YGd=~mEig*SjtiQlkYlr8XQEpraMIVahii)PM!WX{8BabDEof}8~}(1H(a!H={M zKwDf4wGfQ45X!O;{$U|4EHlQo252RVB`xDMG_baHhu+@N+M2Whb*7jM}4nM5nHP#5c zHIi)YNV9f2vWC90wv+Btc+V}NPlQ<$Np3_N=|Nk5oBJ^~o>?{ze%N@`*m&bl3~L)7 zn$5!_8(&V_N20cVa<-3ww*Fw-0Hkdo+SWT@&_#u4CC!}_q&PA0{?4nRdjYm#G~4ha z+rK&Oo{QSOkh6OUw2J`SJt5k<%=XeW2M_XXgOkh|Apv%gHFg-hT@2YSmSz`sWEanA zpCD?VC};l;X#XB;pMn_}@bXZ2MGU+$3tsgDUR?vn;^8$}W8Z;;Uomf!hc$C(@DF5oBM{L9Ml>T4 zEoek*D55O}(Vm6q2(W9|9xG@bO}bNEUxSz}KoB^Qy`soIIb=T&IRHiyIpICDJ{CLz zPe#(jx@643e9o#$Z?I+X;&aByv>}<{xZ=Kn3&K%t* zx}G!FiZk~K)69iBFNV4xkK)xv@j+1hjwk^i)WtBAAkJCuBU?^1N(6@jbfZKmD6tjP zrDK#hmy3khl!#4wx_P>vq>Hqp%M~A&t6?tJVqIjiU9PjBq#!oJI2Uw*i~Neq&0`k@ zE>}e{S0w?IoVGDG1cqpPZqt6G@r?O0d!Y*&pUS52I&R<|oKayoO^Rp;3C4wsv* zn46xw+g)upeaMU+h0Van%_z*xIM&T1+s(Ad&8)~ZyTT1jaf7V5L66U;idkMbmkidq17TcwXf@{}-A100sjV zp2T?m1$5v>IZvj_e{E@K1oKOj^86X?2g77N9?)!2PcIW>K=(@2|F_Y}IQn~P@V5N%sXL+T1t>wll zc5BdUwPtVbzXiG!49TnmYj?PLXJXWtMB~qYW=0__n%GmKV((Dtyh?h!Ub2K%{)96L z*SKOb<26MeN44Hb6pVPJ24M62@+F(#SKIDP>|@k9c1 zny>O}0q6_!(<{Lf5gI&c^vxdDddc|yi|EIJciq_; zOt;A1j|FvGm}zuwj&qYp0w=?~inCe&7GI?NjAl}<4VSSmOZGsW<|tUWOLUhG2ilw@92dlHa+2u_$W}`<%{iYh8i0Q z@t8cR|4WSLTl08=pSHTz|5l9WtHw}!cVN|K)*9xk*1Sp2v(e4$y{@m?yX}mgu!GH< zQ=IvC&YBWl2ycC01?THs>>xyHY<=X3$=APX+WXdFD_0PoZ>ZMM8~beQlQ`!$V7GRHSlUO#9P2T5tCM{s#8@-k*K2_4N+tck|b#1G&Q6`NrVy zmMI+rUo^JAna6yG$t+%c-lZ&S8yYp)p!fD~iln@$)c_Qu6ki-hOtBV?;?YVREc_z% z<7~;Wjz;~vdXTxe<)^8kg%|lMn5LV)jcf?AMmx9iwKEcXXHhvWC3ON8XIV=qwx=gF z3VtyC#d-d@*~WGLITZBmElu`jyTEtNE={GUb>G~fpW}V@p*P_E^W{4*v*EIywz$9S$X{kn2& z`~(a*MZ6;QOh%Q}gIg)}W>c#8CBu~%_FU5Q_<&aK{k6!>vu=@g-Yv}g)YvnpPig)K DBBx7p diff --git a/examples/hello_world_flutter/hello_world_flutter_openai.gif b/examples/hello_world_flutter/hello_world_flutter_openai.gif deleted file mode 100644 index d0547b0897f8dbcb7fac1ab12dcb3ece0533b5d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 119360 zcmV)DK*7I9Nk%v~VQ2%S0`~v_000043jhlY1P&Dh0T=)m7z?Ty5+55AA|x3wBqRbR z0Es3T4=4dMC?#(xBPuK*J1;CzGcYwYEIl95Ga)+? zL_9n`Jvu}`KpH>*NVMl^IsA~i@JOG!eBNib7NNassLhfN`K zPeU(I2R%_7oKZPSQZ!UkOHx!oT3A#8S^ywg0Xtd`ja@`xUtU#UKlfl-H(>%uVJ=%? zOnqTkNMj#mV_kn^BtvHnRA@1lXLrSjEr@SSc{H>tB!$>kcyL#7?hEUlarH`luMhHl5&_tgP3bvnGSE9 zD4d*}nVpWLotvGWl)0Xe+@2YWpo^8Df)Ao0qN1Y0qi>|9o2sX%gs6LtsBU_yK&Psq ztE!@ttWvD3te~!r&90+(uPlkMP{gpGo3VePu_?5%s;;uBt+b%Av`VzJv);6tF10~c zw_x74f|R&yx4E&rxwN9Xqp7=*ySuxKyic{fsKmUx&Aem#yfg8?T;aj8Si)kz!?TgZ zRItRE#KgqQ#KObHyDi5=kH|`+$UdaVecs5zoXKRy$-$S*SIo@Jv(1;V&Wp^>!mQ7J z&(O-I(Q~fRgT2w9!O^ap(^k{d)1}mH($vZL)W@CHS)tZq*w)ja*IS<0S=QOg*V)m` z+P9(GUD(^q+uPgu+|#AqW2)V5vE6>S-izMe)!^RS;NRE3;F!nY#MR-#-r>^U;o1M; z)#T&j;pElh0>=;z(x>CWcq+vn-w+v~yW>*D_FgMv%@AKsJ^z8KX^!oPh`SfFh*r_Y}j2Lc^Rw5ZXeNRujE%CxD|r%fOt?uiw9b0}CEZxUk{Fh!ZPb%($`O zvIiYYo=my2<;$2eYu?Pcv**vC6N?Q^y0q!js8g$6&APQ~(_CN6o=v;9?c2C>>&|Ue z?>f2bf(su`ytwh>$dfBy&b+zv=g^}|pH98H_3PNPYv0bjyZ7(l!;AkPPaZtn-+80f z{awDj`}gqU%b!obzWw|7^XuR5ULN|nP65Z?fCLt3;DHDxsNjMOHrSwCOP!b8HWXH9 z;e{AxsNsejcIe@UAciR7h$NP1;)y7xsN#w&w&>!EFvck3j5OA0<8RE&0R;^~(BSB!kVY!$ zq?A@_>7|%vs_CYjcIxS;poS{ysHB!^>Zz!vs_Lq&w(9Duu*Ux?>#U9jkpmQSFyr5U zYXRlfkCHS>0}2{UFo6WfCadhS%r@)nv(QE>?X=WZYwfkzW~=SC+;;2jx8Q~=?zrTZ zYwo$|rmOC{?6Nz71}M~;$*xcK)Ru7a7}6*Q4+o3^(lX z!w^R-@x&BYZ1KeyXRPtY9Cz&T#~_C+^2h==zyYIw?8<9eJlzXyqYWhRKm#<_Z1c@H z=dAP2JooJL&p-z)^w2~XZS>JdC$03-OgHWH(@;k(_0&{XZS~byXPt8b4X6;ry!f)E zlgq&|69ft|YpwR$Y`5+9+i=G%_uO>XZTH=HL(R3<%nbk67EWaUHU|nFJTPmo94fDTR!zSOw@nfw7=HQbsHd*_>a4f!`s=5Ipu{%B z{Y_Kg>&$`z2a(_0d+$8|4z%#F7jOLWZv&6~^2|41b>=F}5|7_v&lEWAlF%@L@-%Az zf(j~tpaIa{gD*b$--kcG^X#`@Gz2-&;Bx{R1ffC#UW35B)H39-ea#5~K@jw}GZo|@ z2oMk;0o}*I209RP6Oe%<;z7Hx^-3meAr9>Z_dm>aO$IUmp9mqazrBg@ge26N2~P;V z6h82U?n?j!l%Rx|{qA2e0|6FtkN_EA;eL+u*UtZNNJJspFmgf^Vi1&I0j~W^1z3;) z_vlAM8FcN3IJ9BRIAA{~(jW&e1XvKY2*Wa(5pFV6K^-Vqy9>euceG=}2qd695QLD0 z;NzJJ?ubV{@==5#Jfk3wCqpF!ApsT`pA#XV0YwH-2|QH66z<104RFAPID>!%Vy4Ly zT7ZNJm;eWw$TdwifRPQz0TcZN0p95lh&fXMBuOaA`x%RqpS&U%8!&+-g1~W%ROBf) ziONA5^O#bTAqd>SM(Vk7B^ewJh1O994$u*E8u%j%HK08DY4e2K#F;noD8g_e^PHC} zq^+g^$t}{*kWB=^6e5X5_`MUBn5$(Ery&1DdESo(@618W0IJFwsxqE5TqiyOD9Lw1 z0E+ipA}xQ|P#X4W&rYf-MH%}zd30h}jj zndn2?X7;(~l%WRBu#Ra=)0rw&Ni>5~p~k`#cze98WpS!A%2pP-=+&A!Cwf$NCNv15 zO)V5bFk69Yl!xL}r7UG9OMwp8tPRNO0$=-9;;Pkg@~otP%8rLto#YfByx<5oH@OOKz!JBoR2+{_z!#RW zm94tw~zDTO9WMb}*uqZt2y*~F7Ue%;%%g_{vKJ-$%Rr)~4n_ zs8NNp5>ne#CFoC*_6vfPJKJQvLb9L-&LLI(b!9`VQp)mFf)$SpNumZ?r~$TcpI42% z4R_c=AQth6xl18tc=*JRg)BJ(S-@}dy1e9NwXol3D_R=a0tFr5Bc&+I``I(C1=F&T zg$zoe8o7s#e%wI=xWpWwm68LK9dMhSxg|Er$j>IB07Z?+B^pwHXifC6=Z!j7+xo*P z4e?2~yN>SO`jV>c8jMRQ@IDfJ#bKtm!fWP22!K+C4?iUd9e%`4B67tJ|5wH}{z)Td zQsbbcS^Bm)#Zq#z;t>CE9OfVn@`bxRon>b1nGah%l&qQ7wf2RH0WQu4Y_*(e8=}v_ z8FUp69q0*v`Mk4u!NJQt@Cv)D5sM!fHP$GX}FZ}j0MkOPQMJW-Q=H18>% z1Epm=;V1vP4R^!sGmrJY=kD*j=biGPXI->sOt8?KetFUr!!&$xZ!{xz^GzbLtyzzE zOrHMsxX-=rA;(D7vp)0Km3!=QE_2#{e)q^vzVeqZwA8P@^{#&=NjP73yE9`8$7jCw zx6gguOLq&;hkpO?Vc)sVvmX1l@4oui&wiJGfAzn|x0A9ved-r~7T4#%1lrI4{`;TL z6959>2MqBSebN^a{MLE+rv(jg01o(o5Ey|HIDr&cffjgy7?^JsCIEG|chGuw% zXNUqZPyq}R0TB=bAP@s6fC6Bs13vHt(tryE2zPN;f5V4!vxk3OkO{W13x;@zh?t0q zxQL9{h>rjGh>#eGk~oQ!Sc#T+iI|v)nz)Ia*omI_iJ%yYqBx4ASc;~2il~^1r$`H@ zunC(G2vUFpIM4#LSc|q;i$8z{gm4IlunDK|i=lW5UvLb1xPxg{683k0z90skzzemY z3%YO%(m0LOSdG?rjo6rt+PIC}*p1%!jo=uL;y8}vSdQj+j_8<<>bQ>V*pBY_j_??d z@;Hz5SdZ`M3a*d~m#~j;Km;aG0x1xXB`}Z#8IUPZ0yqE$fe;A$$O!o83bz1_5h;=4 zs0*%O2FYLxd&r0SCKAaQh|A~+yKoD?P?9Ejk|>#yD!Gy@*^(~#k}w&QGC7kpS(7$- zlQ{pGlRCMRJlT^z`IA5yltMX_L|K$Zd6YS>NKlu#5@CF&Vk>lrw<@bX?7?Qh?3&XIMY}uA>`Ic}QmvT9mbXk{nd6#&ZmwLIE zeA$n(mck$lvS13G;FN731S}8&oY|S4 z*_kXL1Z_|ViqHw2U<#B83xHV+&HxKwsgZj)mLp+*wq|#;fR<`m47$0SyxE(+`J2EQ zoWePr#95rid7Q|ZoXWYJ%-NjI`JB)hozgj-)LEU@d7apqo!Ysb+}WMgDGcBVo|OM- z3YLkPn)#XN$(f-!nx#pelqn1037*L*49*}6ulakxH-8<8fU`z-#dns>xC_5f48~BP z26~_fnxG20pbXle4*H-F8le(8p%hx77J8u=nxPuHp&Z(w9{Qmm8loaPq9j_PCVHYM z8lv$znUz_dPkEl{`I+lUnx~1H@VTNBN}u)_ma%z^WGQ=g2cRN3peb6UMtY=3nxsm) zq)ghRPWq%!8l_S?r3o6Jy)d3Gnwgs!qo8R5qDiCfd86?up*nh>J*uBQcys1=n*mCs zRQjfH8mDqPr*vAUc6z6Hnx_`JqE}j(rfHsA8lwxq1eV~X>{+9fxuU`lp=JNdqsCZs zv)P|(ik5DwrY>D1tbpp7rBw?DxljsuKxP302{CZJFs_Ju6}B)TH2XRfU3<<1QGBG{;&up0I#f?sPnp@^-8Vx z`VsjmtNU811RJsKLGPkO&Cp)x6i=g0Hv`Cw@pBkzMd#*4L4F2#6ci;{FFb_Xa1lrK7@oKSSs-Qe8 ze~{{tYKpCt+OJA`wspFwB&rN-s|;xSws32wOlz44tC=j20#Hj3^iTyq&<^=<4|j00 z&w8;3`n6~lw){D3LCddk+qhIJ3~bvAy-*Bn`=KqWn$Iu{j=Q;>yQFhFs#+S5ckl|M z@B=?Ev)(Wd`4G6PinGvKxb+E^v3jP6i?3t*pC42klB=PTtG49Y3)aiK+FPNYTeo-nj}HGJvjlMmKadakV6&|| zxLs?YhKo|Q%dxjRdqT>%+graR>bqWG1W^DE;7|y%P`MZCyTT9+f?x$baKLbYkH4U~ zblMBX(6-8Ovb7*=xTZytCy>w}2XuMc}v1K(%X71!&L$@0z~y zy1vHHzC38Hic7Y*`oTC%u2J9xiqHo9+X{`an|rDZzzYpl&;)>R2xrg)8{h=b2(ku> z!A%OD&Y%p)%L=_f!46!Yy$}a+&f~^|J%UI{!e0vLb+BoGCi zkh#8#0SaIWnLMGYtN;Ws2ARvDSsVoc;04bh46n?n%sdRu%*Y|zy@1LBIM56cOG72l zviIN*h~URuE5U)hqlBEF?|aDctFL7%&Ft);SsVu>-~v?82A~iLji3gJz?v4?3!Ll% zfxrv2@QkjY3zy&mJ#Y#nxvlR^05(99&}Y#!yF9@oU1ckrI5?EUO)oG z{Lt-OveZnf)~pTw@Bu{0M0pkv@)!p`)Rv1jJxYx)Lsp{X>0~kAexa- z){W2!lv|kK&E1ZfG)o?X*sJlHku49M&S&~VpZEwV?gr6y1WV9)~Gyt;Sb55b_vE*#F`Dahpv zwuamh9Xr2@yxT?n3(=qpRv-tFaKXw@4FCT-p};!^R=^9pPzw&Nqhs&_n{dhuYzzzi z0&wlurfm#WjJ#RQ#-9zw&S2l0&AwIqyY%he4@{qez{^x%hyh&# z1ck8UZCebjz~cz24Da~{E+7SZunB$5;fnAAyAa>0oYw>Z-zeR$h8+hsK+Csm3d>xd zHedst5Y)CD1$Yk4^tsBlYy)!82{!*=1Z&CK1n>c;P~H#z3(z143cvuD01Xk&2v$(b zcfQO8D$8|l%SMpP$Ghi+kPFWM&to76Hc;qtu+voC6{cT2}otpriK1}abm@X*J}UDeFp z;!-T4BqI@JJNc+*bGkqL$J~LC=5100AT>o4!xH8 z7zhI$;U_ThylnBg0J&Zu%oP7G^M#PP5q}A~u;Pv0@WM<0A>G~xY6FW+@r}*{1mFa` z;0h})^Ba!=mtga}%;1Y12yd{_3~==yzz8Uv?gLBhEbz4ckPrR<3=qKW{-6y=zO^|U z?)KV;3ea!~ zH$dK*-QMp#l79Wp8~p~ZKnsDu;9&p_zYy^XUXmkMcOK%ed`g#aX_@c-r-=m@%9=b?)TZ)8|j1L4^(_TGZ%Kq)C-7W!luJ zFlDl$Sg|rDQK42@vlc6+Y^O3-&z=E+!|O|hFNZwAJ5;L^v|Kk1@uHP$*iD?hoW&vV zV%M#^Iia!|qi7ecTGhe?08;CgEDQvk&`k^^K$Bh2yb$={>ToSxJP2$!Rm&6x1&ns- z`UTZ%v0=Y{J%j%&nQ}GDm~)wuBZ$_n#Bci6$^{E`AX=I^Z{n5sk^%;d01wkC%!UFg z)eu*EZd(^`-@vO0e?i*uNJ;Q#$EHkwV%K zf`t@JD53=%)bYogA0ns%o=FHY2qA?SdMKiaD!Qnpl~OusL=s0#X{DB4@+GF3YQk$j z7-N)iMjC6hu_s}i@xq8Pcx>cHAY0+etDJgy2AUxlp=%QnULb=Gcr*coI~SCU&nBJp z;>$0Gb&~# zm4yG)eDzq7k@0SPpa z1sF`Q#Gha!oF^Y`1~S3IY%`Q)7;lpS*CKJZ3Gt(ID>v~Y6f;th#dp1=`*YAk7kwvT zo{_@_E2!Xshb7c-eTha4#Yq;p%%BU?Uv9ZXiYDNBgoZPC`OGfQHRiJ891|gdfSX+y z!UdvH2D&b;`I<8{0>YR%M?r4R`Gc5u;rUBtoN>mOsHUgc`qesFzWL|-G8@#?o+iMG zx1AA&YCw7BUI7g7$u6by0;_J}KIVPEdC+@-*)%1-yReN{ZyQ|&EqKA7*i9ICTh{;H zQUHTs_@f!~@P{^9P=rZL;tq(z&_aaKxW|2HTpKBuZM0XKGQ4XUOpdn4*p_dI1(ML?JAYQJJrv!M->Wzy_CyPc;-F zKi2%oe)t>Jm-t7$ynscLHp!6@(8G&FE^<4MEZf;6i7^Sft&&1yWh>{UK@RHeKrASN zHbT;j7OWv3A{hk>wUw-Ig$qRFqTCIQnM0Onj$JEqmy2BaOlU@vC#4(12Y~-Tg%`Xa z150GV5TNs zXmn2j+JlDal!h}r3e*hMXbqv9Ax8|Df;K=iB>Qdwvp3u+8fR)?B&*nLA`=58wGyu!c2m4>_F+q=3yATp$B$ZNoQ}VG8Zc zMNkGa1}nJJ19lDrj9H53DRRM(UIf->Fl$0-a^VG~ove)&B^sGT!vOyv^az078`U!& z8-`q@loeh;%@`a*NH1s2!aGQFa_5(^8q*jh85RQ zz+|vj+Pa*UX*eb6P4ps17u->#Vr6b~gA!C_$r3?_Gr=eT*8&QF;kv#IP#7F1%o%nB zt68nwR+Y)a&N)}T>%CF=oS_ICyul3D41|H!x&$lA;urWhLwug$j9%C^7cPJVuJ)R- zLqdlEBV#ZI1Oca#onQ#40iR+i`bHr9rJX}48372u13@r=G^26`EMUeN9wbcxL+~PK z89OIvbdNJY_`uDE>(NaWFr*_5L@l5pM*@ri8ow|x^IX7#k=g%%Rl1O|HEUesOPR8` z#?Xr#66gY0=#RZyW-fGx>Op6P5Dh;FMittD%M`Exs@?r=Rl{3D@}A4QE?Fk$oXKT8 z=h-Q&=maYg5d~McVhDk#252JDx=6qRrGjvUHS%x?Sg^twjs$|mwiXmEVc-J{;Piq+ z)#F=KlL62%gBb`w1U8c31EJlE0#N-ZXh7oO5_l01S{qAQ&Km`F#KbIVM2VDrueQBu5UKzQlT-0Sh3E`euQc)nslF<4q5M z0by`9v{^%fY1idN!?41pqXQT{=Np<{?%;t!z=CKYq6+^ayh9aA;4C#8UJxDqU|cxhqa98;zkgCL?&ja}?QF$8b{Vniy8Uqr(d zfq(-X0wHuSlFI0~>!Z?PeG8y(;xk&4jVr7Ui(j|`G{X=^F<>EgZ=1sH>)~20fWER_ z>>PK{_<7LRj=Q*n#)rco{2?~snE7N}@)qQ`bXkA{Z}>wU^zg?WBG7^qgy7#$^{y?z zD>-69JmPZ3oVw22)$~qY_80GkfPSF}GxWd=fe8PEE85x4LKI?gaSuZiO5KDL0NYc< zD~dC2F)8Rz>Wjn31@v!m{am5+`tHPuGs;v%rHvubxe!ETFtf_oJpbu;9Ftw{;{2^~ z#Tedir(S=d3(#QyQ?V(NT~=PEa+*H?q`POh1y|9l^uxa~xxW324F$x$?Td|Hs6U;6 zjgs5H*~>uCsXU^hrC^|kc_4%qXn}(hz0os>(mTC}1Bul`m)6sv*RwN=(?A+@E@u#n z=R*uy2s;Fn4J(3_t@{E%IECAh51aFelk!0&bSK!5Kp#ZFVmK+idJ!osohT?7O+yW; zBRbc@3HzHs0OY&|Ab1NC6xuf@OF_8Hz;l znnX%Oi5E;JIzz8Jqege+L}BDbUIa#1v_DX^BD#SMXvn!u;FHgIl&|{5cN|E9JPK<> z5NFv&ZWO(5Tn1oJ!PH~Hb3{jv$ejOmguQkwNR6x@VoXI>oI=pCz~_^`>?4?sJV}%s zii2Fpg@l132!@A*#1)*#%c)4$yGR*K#gyF19}>ur#6vY45+dPAqU6byWJwcrNtlF4 zNt8&MBneAo#~Cb2s;tUb97e0mN|dx4WQi`KLQ2t7%9tF-nS9EUh)Rr{O08T=wroqc zq(QEXrLROvv1H0}bV{=f3AD_~x9m&5{7b-GucK_qmmEuni^#l$%5>x^z6?yqd`!rU z%nBOJMXXDiM9jp*p~Z|rOq5K|{7ldc%^IQ1uq;fwgtNrl%gz){)@)7Jd`*?K8@b#Y zZnR9Y#LUd}2i5c{*z8T;{LTN+q)gj9%%;puz1&QkTutCyPUdV*wmeE07*4CYOXJi{ zbWZKuPVU^um5ff)yv*V>&fSc??mSQQOwSDb&hWg=@yyNg%t-Z|Px`D+^@6Sd zktHGc#-$Wb>LkzWyhJg8gqqMG13gd#O;810PzG&K2YpZojZg`lPztS33%yVb%}@>9 zP!8=-5B*RO4N(ytQ4%dt6QxjwNS6F$7XCEM=|s)E+)bg&ta_?Z8@*8+%~2iQQ6BA4 zAN^4v4N@T;QX(xVjZ!I{QYx)dE4@-Iz0vcz8vrqd;ERM7 zJZqeGr$IZzz6?*SW`A_Q#XB6IE_;|ol`okQ#-v=Jk3)*-BUj8Q$PJvKn+wu z9aKUsR6{*fL`_shT~tPGR7ZVONVU^%nABPTM;CpwF^$nOWu`7LhkM|MeF#-i9aT~- zRZ~4xR83V?T~$_XRabpgSdCR#omE<`Ra?DPT+LNo-Bn)gRbTy8U=3Db9ads3R$~=a zdq`GgP1bk72Yf(>cQ}SGScaJJPZ%{z)$D?8@P~Wo2XFmWa1B>+9anNKS93jAbWK-v zT~~H(S9g6^c#T(iomYCTS9`rze9c#V-B*6?SAYFifDKrI9oTb)2ZL<~cvuE8pjI(8 zM>4fZ7^E{~;MD(Z%?FFUSd7hBjony|?O2cfSda}_ksVo*Em@O2S(Hs#m0ekuZCRIn zS(uGknVnghty!DBS)9#To!!}vy$7F-2cU(Ac#sA$U?ywLRBR2ka-CWZ>UDQoo)m>fICEU&}+s5VF$EC^0ZC%^FUEIxG z-Q8W@Rn&^yrC!NJUlu3<1PSr?XF7@kPt z4P*Z_{@Bj-gC~ImRRD%%h+{dPV>+&5JHBH)&SO2^<2>dDI)DTVK4C!~WJ1p1bU21g zu!A%3gKkjaG=Ag+2Hx+*RQMHN08Y*shGa-~T1OBALO6zYXa{rX25uby=ww3JB~9+-U6N$?rDO)SWCtGIOUz_n24jCnhcmDP zX-J1|zy@)cW@@fxYrbY|&Sq`iW^V3gZ+?eEfCO$;-DPHDFHYu19^>~N<`}+Y2u5c# z?p!f&1Zc(vY4&D(&S!nz=X?$aa2Ds*^@mKrf<}htFji+_9$p!)UxSw5Wfp@>NC*FW zw&rlCXp6pRjLv9{-e`{QXpjDAkN#&{MqPifg)cbghGt=eCdb(|Dv`(F^FlE24??FTX#NYh8F4p_6J9>1BtfjY36CEo@%PjX`lXS zKOh8{R_gp+>9REHVxC`Q=IXBQ1~HI^Xuj#HUTd~y>#HVNr`6z#jfcUxTXztHf+lMM z_G;Yp+n3hsz0PWhR_nITXmKb9b|{C!*670)hl_4(s}AR__FZ5A0R*`0%YJ}H?uG<7 zgUPnpFScPv@Pp3=?DOU8_w?(dZkMq(ZS_qDGB}2`cI=&2Y{m9yb_j=EC@}wV$nC|h zXvemNYlv#cPKI!x?T>zIk@W{M000_z?&l7IMy3Vy8E5nDqx>SOb(cS!PZL z*VbRu?%RhA>i|yb@jmTTpo76i?wl@%WOxBUxb2NDheN0U0vLe4@C4spY<9SU0Js8n zaBOfMfJ1nP<4$hL7Gp6efQlsvX%GM{2yKp40TtNpH2?rxC~e=h16AUCTyIR?-u|70_W&**o0++gG2a&Wh;dOe}{5-hj(Cy|MuroR`81UaUlP3 zix!7tsDJ>dfMZbXchG|XfRd;V=>~^uF%SU2pEB8HBbd}aPg0ZaTzag_lj+n(|PkOBmUhijPfDu;(F?`Y)? zS%27az>Nn9KmbB004xv#0MP1=E%S{fgd+$7JBSCT^#^Idf*=qBINv2;PIGKl)uz}u$9Ch5hcf_%Zg_Q#w(p7-bOOKUbLWKtm;pkc<_ahPCRaBG zI062Kgv9;=062jY0Dv!m^bHt*4G@8R&+T&10S2&z0`LKI@Mr%jxAH>Q=vW8YS?6*U zCIkT}fXfB|8Tf9E1@;K`hcf^G4L|_^2!dUbh6fM;87Ke&2=Pn+00i&>0eApu@CQ2p z0BSecWj+J31$Xnk^Ga@K^u}cM7W(Cl2QpX&ZkYIsE(cpcZgr1$s4oXk81QzW=7b*r z;VysyfCP9bg#i!&c*y!I@Kyomg>pCn1}Fu#2!Jnebz2w!C+LSG@Op7zb%|$ifYx%1 zPjg2QfCF%OWA|8*?_OH~03-;B4cF=%5P%Nvh6b1dfz~B|Pz3-W?Vay=lZ^*42zsU0 zUZQ7bm+tnp{r1tt*aQZE0|l@8YQAr@clG~i?)s@803G;qbMS!y;07`X z03Eo3E6@P|&;xcTcp*OqyJv@Y0E7V018$&l=4Xe9*L}ZN{fn<(e|Q7{Kmm{-b2FD* zodk1Jb#(C#S20R^T81#*h5%7QpjR0)?*#Q6y zbwq#Z&cs9N{5kaK(x)HQ$k8K6kt9vdjOqQm_n9?s=IrQ;CvxiQ*RyZ$Ug#QCW+y{^ zS879mFCw;fl}!JsI57lpNI&er0|x*{z;g@(6^PT1I2CMQ4jll{kxxI^lyVF6bIeE|f(l~;Pw#||WjeQ^j~43uI{G6e`QgC05n@P%6^3~)Ov(s?er>2e7sb|H<) zT}o%l8(vD|ja1%FZ>qa5zx`6@Pbzc}=IV>~ZQw+I1iu;%JDZ$9zyJh@uopIK`QXE0 zQh3Du0 z6o84jG#pR>(zg1;jJ5)3Ae%sI9DM*8`S>%71_eAI1_DC*QGmD-5vK~y{+fL@+L>(> zrn}pfROY;Kvy^5@Yqp7A+Ij1}_d@BAaZEW1|HqCkfeQL+Q@nV>i#ScO!=MxJ*kTJi zfr}HkDvWn^%rz`~#kt|)T(fdGD+gM0tYUYg$}EmJinIU+I5LYW(%>8q?zs!JQa9aL zJ&-@~bYtW1!v{}~XU$c#TsnL^|GeJpe$@6hWX3Bu_H^5HFPrA!4L$hbt7|7A=G6R@ z<%ciaI6M87b4&m}nDb%i?AQPQmF4Lhp1%95HJWxuu<6vvJwUEh$RJC zAZikr5PvwLT;jVR2H_+pVU8@?CHHU?#z{Nj`RRmcFc_3co-|!KJd8#ysOHuP7QU z81a|RJYg+62)A5*XO~lqI(Tsi zJ$#|8W<4uf)2jd0wX(IXZhb3UzaWOW(zUL3y(?bxs@J{pwXb|V!>v>S%Zqxou+X8Z zJgs`wmsm5OhJ9>5^Kp-Uz@xI3y)0%ktJ%$RwzHo7ENFd-)X|c*w5CO>)d&mO)fNS@ z9-QZ5UD8;QvbMIiy)ABYD>gY!&yrP*Elg(HOWhK;xW+whS6w?z;I2ft-$O2RqbuF% zzEQcjWUg+qIzrk~x4Yi`E_h4ITMowawTi{AbEz2K^|H6U?oHu%nfP1m&P}@^9qxPc ztKa?dcUi*Z3<{fc%34G58<9EhB{xOiB*J1&` zILJmmGLpwvV`*Mj!zJeBiIXhlDN`A~*XgZ|m+NF5gOtZnt}>XzEasmKnZ8AC$UM@~ zyEU`9&2D}(oZ~F#In%k$cD^&7^Q`AR^SRG{{xhHhE$BfLy3mF`G@=u&=tVQS(T?_W zI_B|^GC!EhS~anH6Ecr(aO2UQ{xqmVE$UH|y40pVHL6ps>Q%E^&z(M}LMXjun(P?U zZA?d}Io;}A^Sal*{xz_JE$m?vyV#57M)NYVlf2~h zruee^P2`J9SRooWIm}}&^O@7U<~F}M&J~XGeakn%{8b6R)V)(&}j)3dJit#iHWUjI76pFYPdH`(Gqzweh3f_1R7z3py)JKW=r zbg^TZ$qt@%fvp~JqRYMSe*Zh*121@|>l^B1zq{F~erYIYTJ3xvJme!U`N>m0?sWg> z_k34=b}u*nAZxEY=tD31(Ubn;m+!gMHLv)fr`_|9FFovIFZzy9{WKmNh({>;Ol{K>!i`Q@Jg4j=*I-~RcW z>LDM%nP2bS8#YKAHauXxMc}nbV75`9wMC!?RvQLdTLTs#2!@~ta>D_pUjMBg+I8Oi z1t2$=L+{Mbw7Fn6z@W6rAPs6;H?%`L`-WGP@7fM?gdSMV+n>j3l9elwR*ifg< zn>l#GH#J}!(!nxhTO6*zG0>qWtN|VTp&i%)4f=r}prI|GfE}a*6iy){Hlp)Yq4-^4 z{Hb6;V4XSG!U$Y}6QF<~ULp!y!4+_#Ie4NIgrXqgp)L48Uj)GjRKOAJAqR59BFZ8J zX5a(PA~yIy*C?Va`b`A@!wphfE+T**@FF(&%>#&lGD?65KmrV2j1k&`1keF5#v(0F z+blYwHg4nKK_cRf9}04w@&#ZjDx(j4;THOUIih1ZioiK)8#V-i2KfI0HgtnDtU(1# zLGO*CIqYCN++Yrl;ts}OKl)<>VnaK4f-)Wf(H-L&hyXC0Awnvo2rwk4Iiv{K0yaoP zDbRojsKWdlK@hxQ4BCPSBqBESBN+apO4eWx#^6hGqfE}E+j%4PNuvKPAhf-nIg|nl zpnz)p#Ul{A5s~07zzFI`}~aK*pM-!xu- z1|*_6sKOLHRX2QPSdOJR_yG%XnmG_cWYVN%UZ&RBB#vRB<9+|&B~F11sD=zoK^IQJ z42ULa0t0BG<|(3~enezFrerBdz#6ET6NtbJ%)k*a6Mo;)C4V5WS|=iC+E*iGMNPU1L5;|Pd=YIGtd`lnHjVu1clq~ zIdlUSR6r5Mp$0fXH?U^~6k}6ns3IPL2spwLe!Z$!lt!t@ zY39@^Aja)v5OxDFTmc=d!4&MFmQI1OkY?{3gP76*nP!_0j3nP_=LjrmH%x&DcctSg*q!akS@Cd3WC?bnaC$*Wwp(g5}GU`M6A(~cz z6Vzijd?$~ZcY;J0v4ka_O

    -DJe&bU9;lSIs;h1sl_p@V{omf5p$yI-436Rp z=IX7oDj7O~U$p6uu2Y-_D?+Zp({zJJ{^UEKx}qXiw40AltLzCLMf=i z7(%Odro*&WYdX+@1QgH_Ug$$=q%>f|gAyIPmZS(wff)4V#B!`E zd~C>~BR9;z7f7SNJ}JOHEz}YoLsT3%672PL=3MHa)^07=a_t57qy`+pG5#Ph=l}{t zWSpL8Wi7+S9HV~N0`9Z}Ayj29MyttEE3PGIJtCWl5<)w`Y_;hvrv)mG{_R8NWt&Qy zNP_1)rb7@6>OYFX)5HTj$nBya$a}zvaToGAs(IqaQcBG*ue~BXracb1fT#N_`z@5!8sfzZZ3%v5ThcJ>@g}TJd{EO zJZJAL!w4XO&1!(7BBb*|?_y4G^~$BDPFoP{C>=^89mInz%m62*19{GX@gi?Zx~l}N zlhb}K{noFz2`uSat^8Hu`4O)&{x1Lr@c(LnUE(0_60ipNK>5_*75D%h^ydhyLGPrb zC#WNC`hg2_LlEfWF*?Escmh1UC5g(SIfwx#t_TXiA_TUB2;X7}pRgXJ@WCR1MUL+X z7^Wv=A_e;Z0nY#i*WwjuKq(Ze{TBbR5#O(U)?N60BTkkstukP>U1DxdF%?&F70WWGhqNF!tg)I6t~rD+{T(qRLo)v2-K^>)?*XzNXR;=5@+Ony z1yrn zOWPzLUnO7ZC3|3>d0WG}G8s$(Nyaj}u^_%3?6&do5%?~f1+z9wauQ=60uF337cAE1 zUj*LjHlK454|COq^VPZ@HLw5QE2FbK)2})=vHnsbK|Jz2?=wFSUpJ$k6(v`yzUPzSZ?Np$h$r$vi1K?}7~ zFLlW6bT0MuKB+oKt5KR11VuS2b66HN0Il9%XetY_(UHwON}RSU-_ill57* zwOeyrT7%G9lk{8HwOwahTxZWh2OPpOtY7~%Un;JwqYMOVkfp@FE(R0wqrjw zWJk7SPc~&&wq;*7W@rDlW^XoUcXq;Z!#xl+epYEnpYOgkHx3+7)Hf+bX zY|l1r*S2lnHg4y(Ztpg4_qK2UHgE^GaQ}8*L(yI@L_Dkk87wz*H@9;?H*`n0bWb;R zSGRRvH+E;Yc5gR#cei(cH+YA)c#k)Em$!MJH+rYHdapNox3_!0cX*2dbBOj5-(*EQ zu_YtKJY3};_P2lkH-HDYfDbr<7r22RID#j*f-g9OH@Jg8ID|*IgiknySGa{=IEH7q zhHp5BcesatIEYWUc+&U#fppb9@vPvOimy0}x44VHIE=@*jL$fY*SL+}IF9Ey2o-k- z9d|?A1CR%~kPrVkkr%mFR23GVKF_|{ry?{A-d@7-s;-RB;oM?dr&HEUMY{};{78^hfcBg`8s-4v_D z8>i6}r_UR2(iCsao8Z`#;K7?1(3BX#o231a>r{)Be82VQF0rmDrNe1F(@CVHHln8~ zZQUtFrA#n^D`2xJ1JdMo@Zh#?mjTTJu1xe=4v$O-6iB+ zapdXq6*xNoGJaICZZ3%6E26+F{90F-&|F;M{5{S2^V{9^f#y=_xRMgi(h0uuJ!h`j zW}WrsatL3gN_48qb@uk1BqDz`4S(3hiUj*b=GB9EZc>#Zf1O54oj!lPNlU%8OOfPK z61&UCnA@P52T@C{a$El9f|lkA{+0$vOH0R7RvRc{FRB8im%N9+eXphcjKAZqr6U1U zJ=Q`!`}DbIQ5P)G9h23WF{A^r?-q8|WEMyU3TTVB_OaLXy5Qu5*sIRm^mz!VnYapm zZ#A>18}JbrMyVT|K^ZzT8SW6!s1Q)E6A>fWtcl?C-m~=CbMl>lyhYo>UCk^;VikqM zLPXnAj@@Dm{@DEC(m)$mgIjo-Th4s#%AOnLjNsdm!TK`uy1bwO6XOi)%R1P74E}Xl zoRwtB6ifYdS`Bqe>2;c}ebo-qI@DpObm-+#0ZoRV21mlMGtT@c;q}heE!jozD9*J@ z<%)G$l#PBH;Cq6&wpIH*%dB^YbBkPHE*s9=q~Sd|{1?*Gr}tu8r$BLoeU$Cfl=dsT z)IZqHV;ZC`!6I}EIfQ5L%b7NXZ?oTD=bcBA$)g*g;Zky*Wy*DLpfx)bh&mJsrSMZ&snC=033YRVs{- z_vf|g$F*JuU<0pFW90~!2fQZw@oluWLK16LUJXT7EU(q7Q8I-@e|#c^zNG{E-u_@< zR`IBVw$wlnHs8pvAKG6BAT#e+S&aGoocm%5m}j^Q_fk6tbwdDvx&=A=fGSH%q~Bv=j%hsPaD0utc{A*?!v4f?E=< zp_y#EnSwGgi`I^m_dJAvo)j+qL?W{*NMU0t#~ThQN(7FENFaw!nfyREJg1F(c zc2pm(t}A9gzHw?)KLqr>l~rmB*&D5KQ3mUPYtu&Cb>U0$8HmUpvZ@NLC zPiW03>s~1G919JxF#S0Nv7KsEXZWw@ob4|A=I81T3JIHBy7K3%=uoSr3tvI)>5r0d z{EzGfqLAw+?~zX&o!NAh%KHfEU-K(_##u*7gTkMdc)uZlJ!OxOQ>Uxo;lbBB{fJ=I zUd&llHQl&bbzQf2bDved{vahWw%eCpW&}mguQDE@Wv?r1MbEjWx}wTqnN&`M?+pUI zO07JnJUgr?PbyLzTwWT_@h%8M3OoZe(b?S6?|9eRK>}M^^;Z zZyInp%W<2PfJDUf)2-{>1hewKMkrpyY$;KY)Qmgv7lc zOtV|Dp21C>bk_dzN_gH0Uod49>!y>Gt*fsmup8wqiPe}K($>UEYxgN?@8|XDE`HWb z6|P0PPhwWA0vnxVTy|qo_+@jx2y~nKMCFn$OCB#Giu;VyT+}qXEV1CPrH_5+l=Ovm z-%D3N{1S(VA(LqPa8lk1oQ2+z5Ir5&t*1YoiZT&pk*KX9$n@bkHeDo9(8SNp8Oxz> z{`!rxW^%-Px1z;1ro-*Ni8Ms{zLXu$b-KzQnTMR@oOBj&4Tl;$q$JpUg++!{$Rg7j z08IA`c$TB0eW4mK$4=>fo`lP4dB;dxGn07tc|i=X9q7;cjC%hihHWWQN#h;w`pEPh zPjXd+>}gJrB0VVrACWDZ?}ijVi$95(cG_tXc8#`%>9UK&THv7 z7qbZ=Cs>q`7OJ-9z*$EdDd-`-Ee@daTB-D(5n^tykSE9Y0!S1=Q!-wlK06PwDnnIR zO&TJAJY#fSGof$qv*=a;-8i&}_)0d?uqvIOz2&OX=?uD5rK4b4`M07ed2^A);@dB+Y{y%H$#Wdu<#n zYDM0DX}bq3JIYvdxYZzDsYg5-0vzfWoG<)lS}G}Ps%dAnhUK5X2{|A3r>FE5jNL+b zvA2op7^DPoKTX-?U90|a=I8yOvf-64)IC7UmzDZffNn&hS#WC2mD^--%6-w$668T0@{kk~J^P%EE{8!hyI``yy@KE-Jk8R^9h@ zoLehdVvhB5@6LV=Y3mUh-c_%>lvxlL$c`h=w_e1ZxgL~&u7_3H#*8Z-(~pKA=W}?S z(6}s~XC+I|R=q9JS7)9CD+Y)Y4V_DZHjZ~F%V$jYT?BLI{sDxWe@J+`M1EV?^Xh!T z&S=ahxupwaI9}WE#_MB^zX+Ww+FGk_B=7uT>v8j-4}#ziijpLSHKeQq9p^YR-d#o~ ztr%%wI(4&HTnN5f`AVZtFrq2-E;i>lf}%%b#1Qu?(L{~0M-#V0_mwSlXyqGsM-%5a zbg+QqlOctYW}(^~gX^z36JZF!q!;dWCX%kHI2be)q<)=EuxctT44RILzs{x8HIvr| z%_PlT=ku=Z%WN{EwtAmp!ue`n{uaB z3w;P^p$qr6GDz3*E0}O`Nd2}tY1Pt1m~d$-o~x>f=np459f<{LPkqzscWV#Al}+5c z#vxrRy9~nB1NFP+#Z@cE4#KsI_`B8vU2B(h!u5x_yY`1wYYzzj+7}*i0Xe}TzKZ*C z-uk0(=F%Leh0Q?Bg!^7PJ=+j{qHTit`+nXv+XxS$9WuO!L0P>YF&RXE=rkUNHP?P5 zbP(;bCOnLqg*|%VmNP?Tg|)P6p}O=YWY1WHlbAlh*7DVB40kPkGBu zKRT4?6CY{LKh8C+IaGKM9~wkqM}*i7kZ0{q;MbjN=H9kH_nA-V6udp!@* zcijeqABN_iPaM)+(n}uuh7(?|i}c-&^ubSy^FrrI8M^$!9@i%)FSkSb?pGP$mjeyR zqxQO+sy5-Hz&FVAfxgGnIvDbR(f%}p3OT=O*t}MPLX3n$nS{cKgnF3hl(4?*WuAb+ zfTmD}p^1cHn1s<(f)<~EfPhenXE4G*IB{h-X&X59^Cw87#i5KcyhbFv_9Xn!1e_x} z{5CU`{v?7m5YbK<@gp-rnl%DsqFcZQF(48UG6{%?q;jqt?2Lf*0m4K9k*6>y;K)ge zCy^_FC^gC`4Us4n!1IjP4=X;+bQD@E{)xQw9Zy|?gs%|Nx??s{?O`@Lx zF|H;N!#9uzp|HX?NnDlC5I;VQMnXGSJ_9~t=2=s*86qEV!+VNg(L`Y}Okpu&VzYn5 z=B~n`m_q$Y2E_Y_6Ey+k24aGhfzlt}eVqV%8X|uqMF`A9q_hQ?PvKf);@N$~bBw}s znZmQXK(N|C4krW4e18`)b-Qa>kZg_QQHdmfh6IWF2vV5@6$0QFIXEmMRY zm_$7vi3XyG+TOkh3V@z)&Vs!X6%2FVRa@T(*~y10DqLXU?hw5TF_ngT;GNxQ0W ztvxa4rbsZ@uH-(FcmY8wm}KDSi{~zk_&mgWR#Ijx3ieCF08i4VE_^a9O0W{y(W7bDIF>(bIzz;s%@sp0QQ;W0Z`N-)3gIo zh@Mbj^J$utXr3R@NRFRgQ={q8ywSf@QAK=WXoyCH>7ppXVno{_p0b2iyf{;4Wt^D? zH+Lc4PEk2QF^#Y?F=H_~i7*6g5GJy{hooT9r+uWiuwizY1_Q_t=s!_7Z6K{}pp_ah z^?bx+@?sj;AYAKYBD`W@e%r>8u@qG^C!wHm&rlZ_;09q)IAsFQ4Z-(@U~^IGo+!j? zLvX1l$4e93CCQ-~{GElz( zYIYWg{5QA5CR(8<|Air5w}=2lz6SSRlzs0?FbMlD`GSXl4O5MS@}&zKnOzt<8j;Bh z{HqH!849513D6UfW`<_+Fa*IrajZL$iLw24`}}U3h_+HeoHVunM6#Pd14+ zG9?b!@RVx>jju^JW9oKd6c(qzM51u#>=c++gm5qO*@ zh=rX>Jq7^fnh&BEIC`C^_gNqvDD>jr@X4Ow=q$w4E5*B5!5MpA%~*m7ATRMJl?YqK ziVRzDV~7A8xnkIz`ZDudmX8t+NZF0=xgspXw+!s-$_5Pt5nnx7de!dsu@UD2}!F^ z>r-hKp}P9`o}J>tC=v3(~zKG|33-z4X1`cyZBvLUwWA{$M1d8}PqGC7#Imj$#4lo1nl>wWlitWJhDLgN2JHrYa|}9Y=Zq zic=-_d)2Jvx;5D>?t6ATf|pI89+c%DHoCD+)ItL+izwu75iGR?ENw@J4o<=V5iW}< zWNKDNGfaSLgZ&Vz^qr`hsD_72 zk?Y+ZU~$t)|C3w8G*t!^R~Me=Y^8_ez6V|-z>&-iEzvvdhp?ZcC0QaW;k_%#XPs12=@R7UlV|ww!vnmha@Wj>)P}4mlo)#h?_>r)hk%&o=C`l0m z8)!^k{Bc~IEsf!h%Hc;r;ot|r84r7oogBMn40lot|6+_VK`gJPnbKzk$HZuBmgs<9 zAEhSvGknPh==iUi@g_;}7K`!L1POLY@$4$hngr1rjf^q#eooT~44P-><%ucyiBS!H z5T`S{7=q*+UO1H`8EVbs3W5~0$m9cLsSMtf4uaGR>lDtBgHnlS*m_24Hd-l4wOyt zNb)BsbD}n#3Y5d%e1WgkLFDYj3Cb1MI_Cj(F=^#0f%5cyvfWs|$8qzjfbu^k=hBVl z==0@UYvq_|4V!~hZI=qjv1D4sC-+id@s5DXsH}>2CBHy zs@O~hP(h13|->P2pc*U>y7yvT$WQE zQwrX$d7lw9a<^mz5H>lu3{HKy@5mk_2DxYT`LYHr{Uu3ApwO=;gIOkfp3-+h!X@Hi@n@ zlDYP(xJt=C18cO~)rk5F;`+ueTkr(>ty@XhQ`>A)2QYv3`!)4{Y#j(#84zq$(H0nF zNF7XY1v)(UB@qpG+z+HQwSE^Et{_56BN}NCK&wj~=@1wxa4n9}>FHV-)tDTa&>6F3 z8Jk}j)22I=YTuwL)#{Ogy>5JgiK>>ZTm&)SW#K!4pr7L{4fi^bFTHWk^AW@gXs3e^v({lhqQ#6fM8{PR z7viOo)yprdOEtt-mAcCG^5le(+RY3d8BtJ}JHK52GE zu5B2)YgaEuxvO49y6acOYY^QvSiOzav<(!!RSc5NG2Kl(y%oZ>%{JmK3cY2T^sTbh zEoPD>cD?PWv~B*^d0~3`%`|LDKYeAV0KPTRF;pSC91 z1MBWN3Qf7J?ZFf8`>ag_r0?IX>_@nd$LJlbryiuZk7bY?PUsvKkc^hB9X1gi)sT!d zq#xz19Cf4*_2?Z3r5=y84@{7p*y^0j3-vFrov0C=ZoBtF_R>#zS5D7fyRY=ls8Y|K z+Pfem=a@R@h#s9N>*p_o7udobco`Q5%NOAFHVXaAsgz3w{Z?l1Rg?A=_j)t``c)3$ zwYYGTbjEeS^0m@>gNpu*UCNDiM!i1xR#*Gh1X5tJerpT5vlGsD%(zooy7PeK`sm+_ zC*Ow%7d*82ybwRk=skGw%%o%-Why_Z$bqcE?Rc>2ZH|iT^7xFzQ8<|; z5dsnn<|vxZQ%K=+L*^|0orll);~MaZaww6x#rXvE*U*#D?O=zOvuvr(a=F_Pp@bKjpv)aA=p)b}4nJniEW2uy1NC>x!1`la>27ct>GGBtI z*?wmIsQZ3D>zz%EyA{MXF7q{v3n6aHm4JNkO9=7dZI%x=x~x%@)LP=U7wye{j%+4t zq5j)*XIbMobuF0<<4d|BqZl?P09Bwyxa_y2Z}st%Zt_2oW+Ep2=ysBRjLVv&{m5(D zeoHvr`QpQ}gk+lOjjn~0K624WBkv|9Ynl@-gj^)*U!F*7WtEI)lACNbY?c&b{@F|} zBzV!RC_i0myCBg2aH23N8`+|?8m6!~y|nppueAAexa4PDbu(7^?;APGs{ZZ-V@vH~ zq|&?(-Va-q10UqS*M+=5mMlvW5Ryvj*4$A}wHCM-j@>uYL#&!%welGK1C~JMO`nS8 zt=n$dIG8Fg*G8;6UXf$Z%9gtqPdX65&)+*Q(22^L0K%xO704RR*1fp;L`S_r>Y=s+ zB+|tf=|mn1KZbt#kM0gqSBzf9(K;6W7-5?THL4yuxVNfb5<;Qunx00rpQLIZH5wHI zhuM2d+?m=>e-Lh4n`Y!fbC^}vptuxnA3|;FL!^Qb8NRv=Tl<#c*U$?K76P=!sv{+ur@ihBVL=t>xBDa>3Oe`3472d0ui+F z#tYfFboCcOHb2+HWhA}mZs-CQm-y84Z$Kmh#cZO0TGR?zDAF>*jvvhp@qE`&tP?Bo zivg+}*Q8f$fjy*h3IRMRWPOyhy_6sezcY(0tPCEBpR1wrnd0O)6^X3mpQ3csd$aN6 z`y^lyPD6}VAgcJnfL^xZ3nQ&(B0P)zUd~}#Ws~Pkq`g3CwpmOc$I%>cQLQxLt8L_0 zWFsP^AQ_SRDR(vBZG3E`A##GtsHjq-V%($}smI8O_~RX_ynSA&cH0>5w^5Wd$53zD zk2oATL6i-oecW8pFz3oyXF===woXRvW&jZ6%src5D@a?+o(ru&O zvPFk*UAF8pK26aGLl%i0%HPJgxs)Ophq36*7-7Q_h2nnR1&N&@X#!v0F1g9YNy=Tj zH&xL}L4?g?1E)V35!X@3DDmM$mDj`zU((9yFJZ@_$7Z8&(kWQWVJ3Zu&3jiz_rW6= zFD#tHnCcIuazrq5R(Wjh`y0AX8NRaB`XQh( zC_4;K<%4ERl_V|HhMd*vf2d{h8`INa(9LIns#z*Vk2GR#q8^QZ#8T}XF}={Mcl!UR zd<5agJ_awubno&@>d=2=zF+8(y3L7+Q!+Lgo=FPCtWN1ORg3J3>SI$k@JxYc_tex1 zP?rQ1y<=39cA5`k$wBQ4eJ}Dd6~;dKGuXWN$auL)`y0pahNtTQj&ahozP0#Do7EG> z_z}p8%OfnlSz!71_ZIWr&PM2|omM~h4k8p5vRJ(0$bLm=&pfmA09{$moYoI8gn`~| z>F>+OALsd4A7S3<$_%*)55ue0%JY_dXWY_rS%nq;4Xy|JaNs^`PEM<~wr}|29y1u) zd?CXg>>PGncUhtF3nH*ggbfsTRrFZsLc^0nFmAZl57MfmFt)h}F>F;xf4jTe35DPv zXS9M=qs&Ka=!5$$Dgq+sG^|=)9ajGpXbD+_})93iqO@Uu=B!tl~E*Ra9$z4KLMrSOYjCK`6E5b!8TU zemY0a^R1_K6&`5EZ;6Wwy-f905n;zh3@(eKR`oR*XeXxfE=#}5>+34QPAn~4mRC-> zs;tqhK2>C)+xPb|hFX4cf~-BSgWaYY`$9*re5RFts@W|J-tL!wUII3j3iI%5THax*gS?lLx3y-Ijv-Z0VjMwuJkLMlc zj)y)Ew-E;6u~F&EU0$%;L5$zytWw9zSp?+y$OH0x)&YUMg(J{hp8u@pCQ=3>gdz+YgKY1VzyX#mEH3 znFb~N3J5F=qE@#9eb(_BwDOS&6fzA=F%2#V3@$1RE*T6i+lK^K079y0LuzC~>P$l# z0z;Y#Ls|wy+V(>_0HIy9p*^%A{{KirHH`Pu`s~-gA4)0{Iz<~cBNH}j8a5vowpbXp zJQ%jRAJ#Q!G_fD5R%n%s7_3qsIw}*sH5h)f|90~#;(|8fN+#mQG~zBW;-N6&X)xkt zKLP@Xgr2IZ7~$ZEDA3uil8Wpa43rSAPS5W zO-2_@(G-Ov8dl09O6mLiu|5Y}g z`5|^}AR1UCaPTYs@N>L1Qi3gAf}L!FgIR)OP=XU(%y*}Fm7-|v-2~O41RvQ%KeNPu zpv0h}#E_xHu!F=1q@*aiq!`(xIJ2aLproXtq?Dngw1cD!q-0axIP)Ps6@tIAKZ}Zz zONNrm4w5U7QmW`uYGhODkmAH8lXtNbKaeNZAEb03rFPM!_QW4 z%DAA*xRTAdG0V6M%6KTscpA!haZ2ArVt=J8&4iW9gg4Jb49-L<&O{l`L_5sHFwcZS z_5jLdy))0k3(g`a&LSMnB0kJIqRZO9&!UjarZUf_3C^Y~&Ss#`AUn)vM$TcS&taF# z;WW?T4$k2%&fy=<5j@NhM$Q$b&lQ);l{C+l4$hS=&Xpg|RXog9LeBe0pQj?1r)Hj~ z5uB%4oTojUr+b(ud5EGjkL*o(N}A4fP-VtOk54b_?Lxqj4nzPgME1YMqU##H=e@FWaG0BzOTpnL`MfC z;t@$AR?3Qi1_%w)6dm~sK@snxc{^F|phzgH1QZZr{1@PW3zsPR$#W+`5*FEBO!OU} z#JVI6ZY-)ZfM8+s4RH7z&44Ut)GO-!TmvnN3W!b8&*p6ey25VaOG((}<;k1azsn6_ zXu>yLiQz%FM;3Jk#D*Af{RKD>t#W@1f|tC-AeBZ^jS-h2sil(CyJ?IJg=-_73O2eu z*vlwkh1(U0hX+te(`0G_?PRGW2&Dk8SCpBt>Y#jMBs#uHY*8npZyG8{IFgcyuQTLg zp;Asj9sC0)U`G5K;9wCjrD`N4p@11MnTf1vSS-gh$>PckqP4Ka>> z&$NR$_oxvHu@tKnUod4ddZOQnMG}?z+DlXe^}62+z2GRn6W^ru6Y0CE%LpPEH6LZx z%3+7dFW0YcTaAZ&on;XO{_8UdI`cQcVL7h#UAq3({{`Ssb~C5rcycpu5b*wX!6@PT z?V@SH{{!H#S9Z7NdIveVTlYj{y5I1{v%25>C*Z(+dcPeh{ZGK*;ZLGT`NM9i+>n?^p6VVee%adf@~6{QYc-d%PDJKs*$xbASMfe@#b1EK+4I$; zH1o^#Zw>30n+21Km)jM`vzNP#0Or^GodoOGhy8+z*T>_Av)8Bd0cOba^}IFY<$kXM z^7?#t_NK9*W)acEG!ub)unXP!+=mo3 z3m`4ijg@@vORJEDtTEV)+kEcF5}Ji-BGUs}I``)p%|dq^>>+tP4-i4k20{X4ddWc- zfieo&SP6r@)O;5~%Awgf1u}i~S{K2eN3(Go2K$(tFG37ZbMOab`q`2%LQNHNK=XtB zT+J6@HlaC0dolxjOBdn4MsrB+1_y*5FCsiqb4d|p2gTMHiTw_wxQnx87{7><+HZU! z$7{m0!jXW5undTR4kR(3VUgl{`zcV^hNC}Uf(sO6=;UQ1Rng(F5LgizG@besaC)PY zpQZU}i{xT=uqeuL2I+k0fO?;%qWc8`X^N-ALu$V!0UrK{0LVrexndZEIw{GKqU9H5 zYhV(7cQGJs`FKf0lO|FEs00tBgk^k_2_D2mx3VHF)-F;ZHr;??7|3`?WlUJS0D@Sf z7aFLvHywdjT0*r;QCzGS9VTj5tezLB$YxJ^em@}m6x3&=?n^lKyeF|CI^~o4g8)f0 zm!HsEiOD&Zw3cIlRe~PqsHcK}j|q*8wFMi?mMfn^36Ma!oTg9zk&!DQBhM^08XTEL z22|V=jRaMB9sdrlH40SvI5ewmbrY|6WAqU|de+aTCt6?9n0|&2XeJRAL&phV9Y~I> zp|(TA!k1AwFpo6ColP|PZjQaFrRcEsgEWV-RQ%W(Xi)npabe@gFsONXxU1eCArHTCV5_svhzHzL~%5h|Q?eVsu z6Rq4PKz?Nlbl2!+51j`C^Y=AF1gZ;q66$|mGX|@+g^h;^^e3EJ@s;R*qdz#s|3ZH_c}g?c{66LX6a5KR=y*ea>Pt1fG$#H+f8LL0 znjx+I_y_uf*0c2&`lH(hSMvC_X4t42x1qkFKMVUc{`jE3qd!TEKI56hY8w@3iMKq5`iQ}tS zf5a3U4o1Nws+4}1)3R=YhgOKf1_UvvbR^56R)|cK#-7XK@S9)Myv>5K2N9xHiu;g^ zpQ91jk5#*}c78rj^rPO$vwz?guDxJm7cZ_JM$USkXhButs1O~=9JP}WV_)K!QG#5R z_56WR7#M-$C9>+p$ReuC)Rtcbazkkla*5sH`u+gIH7(zYn~CGxWk5s!XC>4^e}aaA zf_bY1gtzYL8Sih6)&EaEf_nqHjQ$05{jI;lp{@9zfv$hm>K7fEe*#_Pv3E-O6B!~t z$bSR6IE<9i-hi&Y#oB-Mcf)@HU2+A%#3|UNi#4V*{{*^fjkbqR9_s%$(B*iv(qYUn zoz_zRpFr34KqSTAYxN&M*MHP1!NeG#(LpSPTjjsiD)YMUNpqutU)SG(u6CEri>)7Y zVgZJ9XLq8ITUkn!9Y#>{oNBB{bZ+q0vniF?Q$4V)aMBM z`h|V#2)x#Q`_|ueteqjIzwTAeXWYGbHcUXC|LX7la6_ZDj*@zI<6U;gQhHf#g9|^e z9^EFidn=w>V|#YOF&2t;8BfB%*N7k@zV&xoOujzbB5SGue%#YBzrzyJHG@K-$CnA; zzxq2cH#C8W4Kc>=nHWef9zFs+vlRr&>y;}JmP&OqNI1|FhZW#Ww_=DH(kPDkS0;@? zB&-ZjoMap-fFd;$=I3%2XnvpP6tf$RV}}s#$420L4k(mxWp6y*1_ZIf4R<0>Pb?xY zKf%i|SZz{Y3RO>O!B;eYTB96F8r~r!Pfvt!-mO_;EN=pwdwPBE!RyGE8qbJz`qBlFy7OH2V1QioBBB7-SY^*{)n_-c+0*@Fh z8h#c$TcIdAhX_*9XK~m@C{#ucPuSnBWG{S)UDt8g*18-!4Be&STli_XvT66jnPB!4}iLWg*)9H7gcGP?pKiOb8OQ^6d z-$G^C!8Qe;*sqP$-9_(A2~l^k6djp9VbMs~s{^GXUS)DMNaO6a8YkjRttWCdpE~+) zGHi~?jS(JCKUGV^1Si-oT%k>bcOs<4YdXX{4aHuC&7ufIojWwKP%wEU@IdgI1sXc{yonY zwy0@7g?A>Li54kfafRA}Y;v3vb-Kew_f&;u@`IRy0a_8@JN;p%FP{W&TheD(+|frQ z0uFY_n1v~m+sRYYV6q01+YPfPJfl*Bw`Z#nLKkV>(Ki#e#Uvt;n&cVU9V!0gVEVyx z4(gVnZu?74tf|RB!1N-L5<85NLRGNz@|WN4OfVsdzM&@1T7)VF9w7X$R6j?zLHD;) zjc;KOqURVa4A3dQ_cu}cxPJ=iMd2TJ9JE($#N|P12;qTTXyeYSN;?}+93mWv4d)ol z^(ycS_ONgy$8(SElln!4CXSGnb?A^gsD;t<2|$+#qbvD>Pwh|DCbW5-C;q|s&}(6- z(Wx=&-D!hhnKZ63Gn`+}FqybFg-Yg7cp1)^@t-+MbkQsf#*^oo@_fq}&d;J}!W+FH z9QAEECkoO)mu9)ht~OW#xSiq(bcazmTsFgi0_UAGl+)g(f)82zANPnXo^Nm)gR~pc8pWP z0Y1OZ{iG*@9(BvFxVt||o`+7gqUD|`qg!PXbL*H4qbDfT8!M6KPKi*e_2@{tUlZrp zSRuJL;&-;ez?Rtz(tPSfD)(zBxJ9(nxImUmTO!|gJIng)UGks-O#^U0_oESoz@wi% zT{58m`J{IhHDBom<&$}FU@dZpglR%44i$|(C~8+1v`e(|u|p@*;OCCTtr7GIC2Wd` z-a;SzZa%lyZ|BuNikP=jV{wLZnMxL!61x4WNAvFCP%v=&4!hjmJaDd-(l(S|e)cEW zU{v8-44_g-F^}z;{la#{Zh&==c0-YbcFVNoh=(R>5T=1b%!P92G_Vkn@|OGMF6yXy ze}gnD3A-Q(5G7Ssy785pgd)W^aKceUJAZqfXhkYwEeF(<%9Fr0FoN!fC+~&7Er14j z+MRMLCn+Ny^jW_lUh~&TI+3DwYX0^{s*Z(z7QpZHq{=6%N}u+1R2vm#AnKm-_(4=g z-o3Cs@HFHhRoVWt9n5DcLB61jl}FqxUQzdB(=etmVyfyT0d ziBtCFSN~Oi`!#wY#19sRu@{=uQ{g5LhCUNok1}+FGIa2oy&nLBCIYKd39q9Z_PY_D zC=n}l6+1`eihJyk4A7Omp5QE1XxG^`NSV+ws9D|Dn!}jf=#XJ!?^4+RT zRVvk?2`6D=S)yGsLpaY>6!s$MA9(Ms_{P~4kJv?zC}alI^fDrmlOUe{I7Z^+H$p-L zF$kYy{=lOe87RF!5N2JB!Lg5(k#%Qyh{c50)Sg6po2Ju!b_E(nB0AwyWk+_C1G5IX zKkY!F*aUymGP#|1*;*g%Wi#}hAF!`DJn+W7u-^4%IRRc=ZUxOP`_VAPL`j=oYG z@Me_Vjde{*G{>>gVnJ(HjvHl({G1dw*A=8AOO8B*8o`S)3XK2q5Qz><&Tjhj<99rs zO>*HuR3#-emPi8Sd9si?v`|iBZ4=M$*bfJ>yp2IA8fq!+n%Qnnzx={rTy&;js0nyJWd zu9unXuqM84QDnpcO3iudW)RQIH(_|V0Gq(rKMxsDpiH@6>7y(tS7xaii|O!AuJ0Fu zMv;8phTjk18F%NIA=Yhdl*_c`x?NIRaP@(iO|0mF;s+FhioTrMc$rYS~ z*_{41PZx5|25x4OPp2{lBTJeqB30&FF6CSOH=xV8xxjU)!2Pkn6IAHUSLmx%=>K+} zlUx|wTo?*jDhz)tM2ZY(X-aXzFA8~7{Jeou)R}k4o2_qdhB8c}jg*ntlzknPV~Pw9 z*?`raE-rs8t^}1-^Oe*hzoP}dBki(Dpr9)E%eOcJM-Q$q$PX7jn?Z@r%+&X>!>|z~Vc${QbwwL+9c#Q0c5%1^QDZkgy7i zzp8wxq8M13TAZ`RS8i|!%nmL+;L9x>u8b6^db{OLn^H}GRNTWtHgS&ewGt)Ivx>L4 zDhi@mN_?bW^)_ z%ZGGrE*IU8bn$ApqlxBY2vqm9v{Qz5^Q89h9apYB0RWaAj4LhhmtDgA)fEig+^s!w zt-T7aEd?7r!aDWQ^5p`ey&4w1m;&93t$jKxeQMJ^(x|-?{7q9wRq89voR&R$t^JlO z{is`Qk(KQPt{q~bm~5@tWQ<7W$NjD=1MWm%8^!^K)&`@|P9eTxvE+6$NNJzP%3%2O z;3v#M;p5imqi*9-Y~r8gCGu@ftpkzILzzUwaAZSvMA>mGU1+F{YAtA_p?#2&0a?c3 zYJm|qOZXJvfT6-re#kIi$e?%1fIlfxmBL7`&L}S<3LG>_edtJ@K);brWU)>iduydM zBT|>ZXn*S1l0erWBXHPtNT9DZbZE-8z0|V!Ghx>p@pI>0GYL|gh_{`!5^Sa%_gcq#h? z%%iMh`Tlpx`#H|V;y#i3stMj?TTO?PuM__Ww+G@ z-0zr?Y12I4=Q>tb#s%@Z6<0!ar~F)}5L3n_1r~T;R#sM5l_8j`b>CNJ!&c_s&zFrY zucfUW>SAx`uKZSs>az z|9%7YbQNxG4k3IwO>sSQbpuyu2_t+7vwZR;Y?YvF1Im4qJbbfvWA zZL2bEv%YPUDs7pBWHIApi$`y8+ zON!*rPqghVwDq0$>tgLIG8;&r*8XTK{-Igh5&1B?D7aH~jH;Ku%ZIir#I#8)wEnGp z_ec8-56d$3+Ah)Q&a2|)D$Abx>y$9?yW{J;lh7_w`$`|=axWNtN_qp(wX*Fiw2dva z6V$#RPck91vG3!)?3cdvS#LMc{UG~wTxN1Sa{M6rbyqX|cbw2+`G=9b*ZoK0gA3yM z61};y*TZJ?;i~onHoc=aDy6jjsP}bfdGe^>^+?0&s5|_q@AWv$^*CSXs1W_w=yfv- z{bXr;VDQ7q@aqEo+A%}g(X!sD_R5L8`^1;igVFGl9;?%H_tTm5({Qve8KB12GoZRD#>%%!M`05N&a``L$5EWx@k?D#rqm4u9XoTsU zS^tt>_BL z{1P9su8MJM8gX-VdiJ&BT&H7CFXQ%yQo|ShGq2SfUg0}ujCu!;m5tL&^f!%9_+EGU zZu{l#6ZoD*={~fg*5CS1AjWkt^L3d1gY?rqnf~py?oIsq14hWh(b{ED#)Zk*L%wiT zrqV5i@J*`6W4T9J(b+9@#&t}`b%n>%73c}K;=wlJsU73VBIEpf$5V;^Qy%1LP`IqQ ze#+yelJ7-E|0M(x@v_!IwFG{sReCIgyzE+&ZGvy2m0tJO zU)7^u9Wm~l&R(ze$9T3bPkQj7eA&+e|-{onmy8}H0%qf(!R|b^@%oL8~tngnJXGG9*5_+ zI-ReSvqbdyJX5X43m$`7XW`tiH|QNRb!a>_O%YE9m#%rj>Bp8|PKRk!b=GSAS&|Xd zx=Yt)zl)Uk)lrg8b{d4<>oY-;A58SpTnZi~3zzP!H@f^VX!KTCj_L|2Ta49tnNF5$ zIr+y{lbB~a`s=Ls)>fY!&kLhk)Ys>MT*TmlOI1#~G9ffogw7k~SV zb7k203^%WeEL)2r5Za%M0$`8MICoR!%>{3xlf!H7f59?}0mX4_C9ve}O}j4*ESTC* z<|!hJ?$D2wMX@bvFQka<;L!gUac|WYXV+%!5+ry*0)&v@5G+XWLV~+naCfJ$3J$^D z-QC?Cg1b8e*93R3Rmt=AEB$pJUw0pCt^NV`M%Bg~*BtX2=jh!T3qhxPVdgr?_k(9x zMwC28$xfW2Y4fau$mD%x8>|7XLOVvvjGZKX%jJ0%<(FCz6>gDl6enKA#i=CQvcrY+ z>+aG>VWxtaiO(z*npiSC50@9?N86XSy)YWJiqsHp4y70ozW0uo@?sGwMXZpXT*Y#x zlTs#G_^?z41vx<+N2LUK1SrzWk0oY6ri%RPi>g7~6{%tl+IclenWf{nn(Nioyqv`c z>{-q)vzJoBSlq8wjZ+-srSfg{Q;wfT;->Lxj;=Kwre{x< z1wwiqzi16VDQUUT^maYgqW;b}hzC5c?=;vu zrQSlt3 z{$+1mt?Gq8q%5_(5lPvt|5g8frU5IQ>!2y2{lTVlW8`7KZG0|fvJ*>1bNIXS8;;dt z+yu9C^7k3P^z%CM-R#E%Yw>H!nMGUerzCa08l9`x;~4fA_;XuL8zlT_9}nwlwHY;; zcfZ*%k2to?(A#r&+H}7lD1t^3+njb&WDjrAv@p`G_evVt8D~DG)oA5tu&0=BRN#6( zR%UiQf$RsN^$RuC1t2-I9RFqYooN$@$DuUs?HyDCEJMEGw_r3x%kn!-G(y^svho zny9lseO1}c-mV6=(SYSczddBXd}0@2nJWv;cGZ9P1F4>M?j+PEcWZWYpqq1-(c-Ja zCjQl}Xv$fXp%?k~1Uz1o00mPdpHkY3k6@1Ht5^QPx!c_Uymkp=1^ooc zVseyJs2ap1l+44T5JHLONe4uD^A+Mpm_EEVahBAXtqyD4{!r?GH>~}VS+%4V`zBBf zNCr?y91fjjpd6GkeI87j%%xZjgi!au37p5-xf!#z-Q}5iD5koj>;Xp?^X<|V(=;Xa zIDa(dg4-3-IfD1NkIb@NRuwbSTPPkocZSfqs#6}w_YYnsaw3%lK38&QHDo}OMWiB)kr_3TRP<`=U`4dT99I7-0>4wXEWFt<+*V$j%YfCB}4%Rh zeq$B>(JWRLxJ*l7x!r(rIx0&N{SbdsU6KUh7gjfB8ezAc4IRQ|j4y_im;u-nnFQsZ zze&FO@tpJi;8xUutZQXg;Gzjluk_`9C+546_-05VKX|7}E68oWwdZr;^`n_)bGdSo z=Eh5dyee(??0J%fIC_t94Ja8dA!`s_()j6N?YjLWM*{soO4LQS@h5H6QnI*mHP~vUEpIn5U%H(prifPRqfS$T+8h&o)5?yT2i`~# z_C=z&nlZEI>ru10?c_yGap=Bylx@5{$$_Q`-zwg?%gjakuLTp{j#m@kWVSMqKZ^(w zHHSdi2{?E^_NjQ@O-aOF=DMqzNp4|IC&h2%bF>M_dM5j3y>Ubo*D^;aP*bj9Ud zxe1SW*7hR&1fbVye$CU@hT2{gLf~ehxmGj#oqi)GBB--jKY9UauFhKz#R#9WC1zNA zud--VzyMWsDZBL=SoXu}%KiPy7S>H;g^IO32ee#FR=F|IiNGp_~=%@nnVPISnj)jg(V^Y4ntq!jefNyZaNBlpwY-+RvtC@L@ z!q%cUNitRj#M^-}8QiwiDje&otLH6k_`BwZPO%L~6G;tCyOYnr2K#jJB#5RZKQ1T! z*W+K9vQ7IrORF*khSNo3EG0TxrfTz?V~wj}xn zbk3!_((C;XPv5N}Tk{9jOf73^2F~*>eYdMOflmad@xBjtUZ0)#aS3|?qHb9@l``^F2wMu|u*- zNH@=UHw9J*=cq-?iyk6~Mkl=)AL7buq|i=^Bt8`10+^X9)Da@o6+ZFvTYX?h!zL*mZm>@aeGd{Wh!VH2<<> z2cu9EgoeAFH5Z@JO>i?%kfXJT&9I%5wuuYAiQBN7$*ta?rfI#Vou~FaC#}#dP)Gs+p}qwnnUx|j@*Y{YW?rdcZNW~0+Aevt_p+M8 z;?%uh1K?J-po(FyoKm-XPqPMmzNTfe@}V&DqVNu^NClRNzF~?EcV8ish*7PusuZ1? zCJ&EgUzvf3c~7!ot4PIwh?T=@i$f7DH<9WPk^OFA&`OI9cYo%Fh~s6VgJplE1)Cak zH{W)XTB?W>dGgm0(O5lE4?|?nN6|PWv2TaP?z978Bx1fZ8PLkDP7ARBa&B7|UOORP zh<50JtGrk!JaX8|K()76#E4iVn)t+f@o4s0sqPT&!C7YU1h>Ih?#{XrZuL;{ z*jQ1pk4R{XsA!LP(#&90&Omx>Uk1FyypMRUTYR{t#Iu}4LGFjb&>?bB+IHe zd?EYRA?+%O3PYqyG)cd=lGQB3l|qsgO2Z|FlEn_g(EaU#*x|ff$=q7W%pS?qnc>83 z$%M<{7rIA2GsQ`x&KVK=|*b(nssc*F-o;^|?Gb3)>Qcjm6 zV0dXq>`{AiX%Ne(wUD%x(x`=@w7J8mv9GjI?5IJmw0^C$ZqKOp%&6w}sQRTeLnp1c;6 zzfzPxGnhQFmp}HI{1qdAkTbbkBfs4}xiKxjwl(?VLVg8qY7t9efoy7ySz%UiYFbfY z(qL-bUSZ5^m8k5)=z z95ZZTz1g&)B7QSsQ8Ug`N-i}@0=+Y$lQU`f{StKpvIw&xuV%Aor@0A~l+dR?)B@bx zzQ{!>rK-+q$RoL0lB#`{P)GlA)AWV!N0R5l7v+O598a@K=yUQPB(#JDxwFDG+5q1B z5=KW@j08yYm~;HL(*`cexf&=*Ny@BMa~6IvmcoVgKpgiZR1l7m1&N9kU7#eiwJT4> z$|%I~>6HncGA5SF3h_LAk&3G$)<>KncY=8E=iskL3SRCQ-=5H9fU^*9qZsl5cwd}s zKUNe^R^`CYsX=86@=Xh&z28NDssdRhk~3AKj$TI_O-o-XH}k1E)zxW#K$7u-?K58# zOM(p@ofqE`&(2&_)+$T+f=acnCYJ=8s)cIJs+LE(WE-XYc~EUdL0z;8fyz`JBBq+; zsKUR3UHk(_W&&2cQ(e>+o_1Z`-6*_bN44lEsp<$*hRTP7Z&{?uhgo;|{8U{cF2J8| zX?+WxM@+r-DTaF8C*w&m>~p4{YH`30cD1y3$!ATVI0CuqN;zFe7#)f~l7@EPGKa3( zaN8^LN;qh17o1^QKf(`#v{y2n-mFww>}qhdovMlM>Mp5^o$jw7Ik_mlRB%gA(X4#1 zq9ETn-4!)5iB2mmQ`RLsY9y-6m5sPgYT#C6x!o|OUm(fK~ ze(};;7u!h}DI8kRrHfLoi`xF56m;?EVaw>@XzRVP*2DGCdmFBY_htk7Eke5<-X~oG z!gUw`EV&#kZKcnX-&((%c41i2IKqz!Eo`5+(0x*Po^?3hL?4byI4)fuR#hJsDj(+k zP5OOUMkE8q30S5+SY{q*L6<))tF{4~wE??_0WkdE6?7T!92@XH8}I=Pe;0Jo8w&Cm z3dtA>Ya5DKZ*zzki1ooT&A~Fh@nJaLWTx_A8PI2|)Tg=jp`!48!r7@E_+f0cx(lsW zt6RLCSbGZl-4({xxye*$ME|_W0Dwjbwwb|(I?qsswXq(du|B=A0gthvjIoinu`$n< z7RA=5z8(5~eGP3NwR#^_z|K>z3hX-xB^mwsuqe1Y?T)vbRJ%T^={sumJL>VqnjXeZ zt47A@CNAYBuI(m&3c5@@j!l5!CSKq@2ID;@ITN;lT`KS<>FSD}U%me+a6eBO;=;OT zFJt0EVfyV^-|OEBx*`E)QG{mE^ky+UX0bA6aoT3_)@BJFW{KfuN$F} zp~tY^Z2La>oA$dtB)i6rd29FX`&v6+?C4Ddk<3GsO}y!S(meK3%gsyL%}dA3%T~?H zkIgHd%_{*GRnP;cy!u}){yPQ!=IzH8 zeS}t1^j6b6Rx>hIv)Wd3)>iW#Rtw=)i|JNNRx4vxKUS?EtH)N;fFtkg1J!h& zOl8yhV~qwL^F|NtsvR`=Bot_?TI-la+e>TkwbkL6_0g*J@v-&Ev-K&!=8Vwhoc?60 z-dbbsSH{bq_H4)7alvpch0SSOyRrF9=>1 zgrEa@X#+y^1R+I$kTXCi6(H0O5ZX8heeD!R;M13!_20D}V*Q7k0uAgtpcV&fp{=^z&2AfDkM zQQ;s->F_qdvX0O^+xnv5NSVUqQ&3|8fTWq6&3p#2kjU&+gnx|&WD*zZgz&ti34%w%0EATE^{u2n>? z)eNpRysovfu5~&ODSP*>{WsgD7KJABB(I%|BbaKEE{t(+zsWl1gQ3I?x9>!5T?}sB zyl&7LvscHh&&I9a(`_KaZ7{=asKRZy!);{TZFJ3TOy|D2+@j3Jew!`A`ZL&YPb;L* z#dh|=Yww{Q;zIx)m25}z>cpAreiXnPxDtMR{eVUhL%m_c&VfI6m<>sdzrXfJ{U@)Kff7={#+JoewQ^DN(WA zE3kx8BMvY;@1d4#hv(zC=hK?!^NA+}5lQ?%E9fHa3w_6KcesB>HV_HV5k;%^8fPfB z(74L-;Mb0%F_V#XiJ3jYSSoPm>~Qdc=yyRE%GH5lflLaU$&fE{jge#=jkH1;%{k3n zsd|Im(a)=o3zho)(R3E|#061b=Ix@BU(-kyFecHI1+f%NrVHI1k1k)(k2NnRGR6FY zpwde=C@eBrAKo%<4@6TcmRsI2?T#d~+aDj^G4D@gi^kAf-Lw3h$;K%W;vW8__uXs$ z?rHD_Jh60cj7p)YTK=o26sLsV`Vn}(J(R#~bNr{EOBO=~|FKSUt@_2Uz9RzDqs3qF z7!;Pbpwqt!x}HBj-k$GGSJ*&!o*(aTE>2D$yk0L*fW+Sr-XX0y?wK_ZeD!d@NweOJ z+FQ2i)WV(phG*VL5t>grgg`B!JDvISf$nY*~;~B&$Ym1vICMKI_>VSSHEc9Lf7zPN#and z>t}OGY+Kjuuf{E8RQjyd<_G%Lvmq{qwyPyqhW5Lkk_;^}!Cc1`886}gv4l+y^AY9) z+|GX~j*tV5un7kLiLiyx$P{b-ZN+&gRs63N=kk@jnu347;{3NbLYhR~Kd(6JcK?ne zSPF_6|82#&+H`Fw?N1zmaqCYUVXedc3YPFsgiX2(6FaoA>i3Fsg&}QI_0Di2GsD~; zlOJq`o{QF8doW#Cl6^1AS@%1RaAQf=Qh)R(!uBt5gw5KBk2+(m*5|tuIg%MNZOvD| z7HTZk{%OVeew45pIW?Uue=hsA%R(<;@+K;A9Wudj~=|7!km`O~0wvQ#hYW+htw zAfFoz2Pk z+SoVz*pZz!rRc{RqcN~1;npI!sK;=OHGQ0RB!aQV_I5zlO+fdMa>M=ng7W*Hr47gT zzy8k^=Z8NLw$mP5o5!;O%Ku7)jmLgv-HpfRpn@Ue9syknljLO`T*m`kc__r~p4o8h z`B6L(9)g$uB2(|iH7KE&rK#I94em~hXsJ`dmed0B`t zWr4ZaC`H6by;v-$G(z66lbvuX`vh&Za>OR41BeKli2f|6_$=7tNc?tGQLoU*O+_C2 zvAqWf%r0e#+2ND`fU`8a)cR58QW=2NZn~y6~t_ zH7E(0F=k?&y~T83`xt+2(VvF>Gdaa#A0Z)hW?x?IADtaX?!kwu7G3<_6bvdjp6J+A*bKL zi9Qy@!{Jv%GQ?Hyk+7HLn!(6`QOQ$O0`TT@(#NY0r~DmgJ0@S)Nva zr*{QAZ(CN(A+fK|TVkQGuM~ovkFNd}a^mBGIhO%$#KuTrq?`(h zqKUeZ_+ivnLf=Q2-8Q;$HaiUvQB+Kwef5U-D^c-{mEP({Bg8oyCvS51r+%s5??esO ziNzY@TM@HG3YK2bUqK5ykVE0NfIq|F>Ks`Y_YxGLImnGw4pw5{+&|p|^&Dg(@&*Kg zhPd{{ULbPkB`(MUldtW?&{|I+-I5r2s^WndJBI>7OgjV4VZa4Yw;EUEerI@j>yUFO zaKkN2Fq)b$iG>v`4xLwRafeXv` zLoOGwGXNQ5=b4`c=rqQLV_!1|j(jK9-VduK6&9Xsr*dmB^}F50GPca==BJ+5{ltFQ zoNtpswP)&wz5u?{cVmp4z!!-43NTS+!zaM3fZAWXr59t|D#4?;q@6EpQtJa@f<9lZ zrZBV%eR@4GUAz9V7xYpPmj0Er*d#orUZ&z$PlvLlOqyChEduB3UzLN@E#!CrF7gCH zIyO-;em}hZKt)P?S!PJr0P6`psc>5BKD8x(01XDI<5979uEtO!}QmmNd15=grO|<^O?-|^e0~ErM(9W8{Mo`Tv87`82K|*FP8Gx`e#CqgvgcFP3q?JYHr$xZh|rV>ja4b z!^ag6q{}wY6Q>+M4+wADbCvv9CXqkiDR9t6n5bs@sy z2T&b}4IiGJ#W#)^3L@}w_mc-Hpi z>h=DjeAwn>HkEXdgzR}(Ytm&NcpFPdoJKw|+93}(S|r1|Ipt4ZoBNcTC#6ND(0qHbLz!UPubZEDQQyHmmFV)nRG*;12~IW^LibWb zO3B3nnEL;EFc$Kk24e@L%k2_|S9U$#kg|o9I}}L%_^E!=yf9i0ZW#V?>~hnxj$Yx^ zFRA?#Oh$werNTZxyn5Yy({>tG;kGBacE5Dfemh#>aWlO3e1Fpc1E};ulv;;&pxsNaJ}s&22=%#N^QPQx$U8nsR~vZ*?iM- z+xscJD%3=3>)rBgA8^bzIIT0nwPy>(@gXrXKx&)x{oR0=Om%d^$TkJn-Jo1}b!>su z4vog$km^`+{ zCjDk)kLTfT+zU{fi734<@cw=xP^LB;cXVHb>wYp4;J|zTV#8BusVQ;HL6OkSOvd$o zIulS=C@TF^A?1FiP^PX}W%Q?V%l&L+cwMQ9^e^@0`?g(W z%=Sl#b^ld~Ux76i=zd2S5$v|!{O3vxo$n>XQFUJTh%l0|B?0ZBO8ocvUdgBdeAGOP zW>&60^6kfnINd_zy1X}5$X{{eir@2n$Q1r3+4S(L7G%35I`3gGfKv>UwFoQK3H)UI@xj!!(G5?4UtiIU$Gv8}bnh7v+X9?o& z6IFO__EbT*^5d7K?fc`aastD48P8Z)bq=y99k~Q*dP&$!h6BR2imt%IzTCV1!W4tXB@8g=US){Yz)#1xe4YGDzw{&S!=4}5wdlO zLu|wFbOH+kvO}4=c5F8AUkq}wwfmE#{}ngp1ds5#p}=Dhd^^tWW_LHggUXSmT`-nR z>qP|iPUBl+j((O6WFF(hZx~zww2S2IM`#HntV;Oa0It~Yj78XMt8~k#?>EW(u;sgK2e^*>6=k|1PRrc` zLUNN8Q5nhe06fM}G+(^^$3VJ=ucgB3;~y5fQ5mQhn(VM3l|BdT&=uz9pwHCoi;)-s zB-yispr_Y3pK`irD^bR!d zR&z1b&>iI(R*(7pgqJ2F{r)$i!b5cy{^>G-G)#gPU7=Bb_@JZ0Lu^VP-51?tsnVB&tUeq)tnfI2^28v|{kNhV>m+`B>s#;C zu;%WSe)dS8^UBG58n0z0S4{3e-IT0LhXzI@~eqAE{!6WgkUUo z;}O<{OlhZ1=H%1cS4!AtWcrV$y?n-Kg1##`KeL^=Lm_mNAQD zAcq`sE`H_+O2Ckq2#&sysyf#Q^%gv%v_ez>b?Wv96Y*hn7dn}&&;`}=#(ssLQ}Lb9 za9}{wYqi%Gi389GW5VEwG3Q0nL})%sf%vGo`bF}>NIqM`;HWiCF3l=h0kHoraXa!N zbziE0bAE6P+i}c%(0-l>e$hgOEB}OmgLzI~Ces`IPBwZ+gJ2c@B zjWA+{6$<@UV)e`Hx1)t3>_d~`E|)pPbOZ+qQ|Xb(m$?Lbff6c1Q}NB0d7r|Hq)a5H zlb0^@e=G6Vq3QJd%K|?1Vz~f`ne2C0g<{gh3Q#4^cmw~Q(IH2vKw`F7{i;}Xv{<=e zXtvztszevPM71BP#K~8srqU(q^Fwp>%~xfhuoBHZ36#E4GKT15Le*G^>97nr)-aJ3?A0a9<`XOxJ37;Afxo`C% z{vD#?-;|j1rfK3oQDO#1$YjK~%dOUZ=?a(smJ&a%eOrOc!wmlRB+ri+yCm22je_DAQnsY_`rp3%`0|@C|A_4#=YyII*;7p> zE|#fpt-(ZK_!!!={5Nk?K18Im?|l1_`w|g(lkj(4!b|*SP?X-s#Bwpkm!x5`DJ~xa z*pN`3ZNn8?I2jLa1CZdOgMtgYNU}WY$~9hh2lsH2q`e75%CG#TpV9LQ?x2ou4)Yc0 zxf_7_{jxAj`Y1Zk3+-Lfm5w1@lcCti6kmW?Cep!@5fcBf?#!!a1`09W6TTlfN~HU?`I7KG*F-9v7)#UNtUIR* zg|PRyB&y$5MxxGMg{MdhAT<%(DafP(?OhzuxGy6J@R;DLphDimC{@jT75q1nh|U-$ z5CDiAflYWpZ>mV#;{79>V6tj3%UBkYd@5majGF4X_V#HDVf>csTQY$b@=e7`buIdI z_Gsznx|@^r?hpu31q#rMYv5ZsiBBG^S z?1ZrTpX|}IJ4wtV2xVGxbU5~d%ntGrjf)Q?O!5`wvk|JOBMBLi;Dz3Lb22L0enxY^ z=K}RWkj*(zf-jbNg);BQ895Uf_jyw?W2CSK6VEpC|00R>-9*rE;!6(m3q9u)#K0D* zgkyapH~2|U^?ExVkj*5}%Px*0WM%}SGW*4wUMS&*cN@6nYd72`Uc$EqKlq^r29fr( zbMt_4A}QOSQ?6w8LQcHuGvg33Pi+`(0CZhv?^P4_p@qsF3`i=7>eE?81JpB`YSAJ#+OhZs1YS+ie zp@l})3F=U}ehU06t&1^M(Pw6vi9hry9%OTcMr7MTo!OsnVh!@*p0_?RkRgsYkuU$D zPrYpllUsf1IIksC_eE*B<5iqsQbQn?HIzODM`&bZ?k^IzlJti@rISano%dg`?jTr- zzpcAg<$i|$Z|GB+Z!}2%MxWY7=4OJ@rzpsY|6!#4{|Q3TgsFoefgyvQC#ipUEC!k1 zeUPwdG;d{5pjQRN_K-$7j9%Xsc)Mg!N(h0V5fv z*02vlRh+AdqHbdfoQ^g24YS^Oh=6=N7FGEBo;;M8Z1`ZgV#VT@W%{EujusQ;+7L5z zt~ravGLx$OyyiEi%PBS*eXJZe23ongqr1W_2^ABSIy$fbZlHx;xewewp_Kk-e~vs% z6HMs~|9?F;p+9q=^8bLkFlIM~|J#elZ*Nj5a5R;YAbr7I^KUO6KUeNBH8?pbFI!Rj zYYHcSdy_i{(_^S2g2QxwJ2u^6jeJj*q)Pw1c(CQnVQIF-Ljg=>%atb69`1i$JSexo zX(0_2YaL#=Kfk)L{O*;p2_x5BD2M@(;^9*{Q%iv7k<&0xz#Sm`yw(H53t&t}AzxH7J6mYekpD`X7X3H}*Qw1s-BOk=+c_VKoXb0ozB%9bZQ>WQ=9po3f9a9&iJ723! zWPGc4G0h6}h7|6{)Nlsxl${ADQ-3-|5>M1UnqcgNvBXcsgWpz{`Uq_l;&Z zG9KEUh;qr$LCi@41Djq8m(1~!tn`!@jVSfBN*_(mi>OaT|Y=c7#t!RfATsiHL zk$eR802D15`;_fdS?4CB(d?=Tn7LqvX%th=2N~u=i5i&%SIS1Y#v{i_)}DqGI7Q)! zVbJlmAZ6@p8Q(7uVBkV0q}dmcWEwXDZ{G2}z!3Afm5mTx1ro$bT35p$V8ftoPT3uE z>_-MZb2Pj`jSQ-q%WrRv>31N6nfuYB-FetIN6$6a`Q;M~j7cZlJGOQ>2Dg(AL=1Iy zW+a$S?0)7G?#qn3wy$f>Iy&yxUX(me8v(8zPPf56p3b{_g5}QpX>ijF59u8`E_=q= ztCkcf4IZA)d5z_j#|=4YN1#3O%@49pfgjV)UpsZ~u%0+ix)NV0kn?Zc`MNGVX!H&B z1J9DieFz*<@25(|8qvaW3!3PDV`N| z{ai6anH!%Eav+O|K9h;yx4=&V4vinE$+}@d6e8l3aZaAj9uC}RVhWxN%x4a=cqrr+ zRv$O86ih$mruKkWmy18f%0P59<9oPMNGeBvn;%OlgGwVGFlLv{dqo`>E1cJaQsjDyS<8YEXmv19 zl{*yPWW*5A8q<}iwY*a5ou&!MN6ZJL%qop^{#K>AYVb6bEr32MH>Em4D}|pQ9R(F-sGMk z=gZ&Tyf!n}e(fhs%WUpU_N4r{n7yzOh*tndCW54fSPj+;GpZ&39u6Xe(XJf*49 z&j%_q(?bub{&^$fe>PS8zy1{~Ob!e@obTVpltYTLzfEKR!I<*zo>27<-^gYTJ57bt zrTl$N`G;`}z1{uO6Kc-iZg(OP;`SVvQ1;wkiy8mMo@29MFP~ca`|a-Mk73*rymHr=L|9KN8X#lG5E> zFHe>kGV}<($hjX0?6At1a>BBvwk#~`~u=I!+5j&FIWPsc?6{R zcB{M$v7~uG;`{-fH0Bp%F?3X84WZDkBoGo?CrB^=<3tZ`5REzZd^}ds%{G+IB~T89 zD=L2a1G@*jZK!{}L%&IICyd}UV}5$k&*IY8amGqKZjkR|1)DuX0CIk}$s9rj8AcyK zG`({V4!1ydR+dcUMo(2hHaju9ikmG0e!oa@7LapaPo_3GJs^yT=TW>H&w=x-gm58t z6OMtJkRLIj?Dt{@?dsbUnbXkY$k%N06Io)bE-bAXN@5e&eVSC+{3_bNi;Okk2}fyF!#Mon^LwtjF3vraEzt+ zDXPfJmvk&)W-`t+q5M<6VcxE=v>(_l_T@fBZ^mx*;Bx8M4v>iAf0wCg_^B(|%WR?MQRXDqg~h8Q;?|P ztv6D+R|m~gHzKEQ!gsy`9{c?)Ljd$UT{Akk}6jUphqhJUn;qI zx;;EKD<;NWtebfuMAD=e;z!32RvG%iYL1EnV22=7k3E0`;(&Y8*Q2uooJd7_k*}*Q z7`*~oFNmkWq7U+s9OzjbHuernnEaV6Se##z4?hZBzsCR09YNV#&!7f4auLYk39Ma(*6+lywcNf4|`nX+miVbVZO0j*xGinn|POIUUuBd zSiFgZ_>H`Z&TwP7bgc%~@l;O>^9vhrNeRoj!gT8}@v&R7tL9#FL)(H?B;SrJ%f0%% z>`fYjN0#KOadtxU`HNHV9-cOA1bgR8WgC~#;n5q4`R~JiCq7rj8gs2EMPgS$$~M&- z8_(Ss`dxhif``aliN%r&RA)i+CHlO=;{YOe+t6tQ16+gdH()ux+k9UGf&jGsjfW}% z??X!B6DPdRQy6~X71DNj_V)=d^D{`oZ`8BDBqTV({{xCbFykVqU zm^s+Bb?g0JBV^R_4O7$&iy_6w#W9&{tk`A2o#e|T3eR-pcm(OdbvtMN0BxJ>#ka=T z5lOvaJoaJKnj15xV9u2ZAHFe|(cje~9+(5ziG$Sg%*?&8$O4)Ri9f&O+!GaFD-sB@ zS*W#a57OUJeX={!CqUXR(>kxy@N}-%`Gx&n6cdSDChZWysIge^?9F!wBTXjVGM)U4 zSRfsLWc9IVIg_n!P~a&-K0;%~QV6D0>j>GgU){E$zK+k7z$WQXow#|ouI=hGmL6Qo zTHm#DO#&Ur6nABDHTs&jFzwWW-ECvxnWf=Fd9EC4=L(MK*P>zfpKRqRy%@K^_kz#X z*gVBkZIC7h#aL|kgvjO{SiO9X{K_PQk=ETGsy>oluVPY9TK70{k;a+BPZBLxvPCG_ zr_|^$ML`F%5YT-(JShy9gTYTo;r%Q`$g$+>;0MD6&N2Ac*A+2OKbbaAmJ+hSG2gi* zt-m_27aXJ$+Sc15|-OIkZ1qbrIeN1qy zux~rp{~Gj`i6L?{<%uX=wncJo6+DSycnmUWS~=6cXscMd^1`_1^*_E@gnQcaZ9uS* z<9Q#)Y$TEY*iS4&@VMbRyagHPJJZT`>-`ax6z$r+mD_P*SMhEzyZyPX0Q6+U!ezLc zaRc$J{O0F5p!=qu9&$Hr<8kRKdFq_@`Z40Or_JI9(d$m3iz?gSbt<h$rg=OZQNy@}_iM$elY3`Qh3STQfSMmLNB2ef^kQC#=eRK7)`zBr9|tpy@% z-oC2*RGpckiV8GxVpImqnw@ffqs-<5vx>ObDXCl$=B2n?8Xc3~YkTp=8qoM>g z@XVs#Q)>_eMNOGT;BZ8fszsAHM@tFHBHu;P21No6qUZ;sB>2tX@ZPnn#Ed`$V}Lcv2q-7O3t_nYH?6VoSAK$`eK~6 zbL8i{IDL+I4ZQfb;CR!dc=M)s%f)!>yLb>@f*nVKgIWUEIl(C@!KEp|Z85>)F2M^g z(VHU?L0n70F)=VHF}Nu)bTKjfE-?}>DVifGRxK&sIVmwIDY+>rbulUZE-4c)Ih!Ln zS1mc;Ie7tuaku`O1rwga z*_3{`n0|bheu|fI&XIAcmT~QzahsHJ-<0vVnDP9ZJ(tlPomO&yaR#64);-`FMJbSdj=-T#z57x}IznzJZYy(r$LC^5Mxxw$BHsVM!vDDz!$ zHYb!KSDf!sT$o&3++19`^iS+L^^$s*lE&nc=H`;trIPmhlFoOfU7V#o>ZN@yr31;O zL(Qd7oZQ%b=|9+WE@cbJWlPOvD@$do_hswv%AuUNZT0e9m-7AO@}JG+hfC$he`n9B zS6sVP+$LAtH&;9^LE9%PVBS~4a#g}>RK9erL`tbdX{kh8uKYWD4qKz@jce80l&W_v zRrt$Qgb!83@2g3wB~QVX&IcqN7s;M>27GJ-`nybcYc1gY{3Euj;@hwY#6{?LX=t zt>?Q*HqbLRE_BvFD(O@rnHX^!-G^~}S{ki1n&O8GY<|I?bOKU80xok{MI*U}I|018 za0=@@r~!h^7Z^wDh>a}WzX3qH|h{048oA&LA-e??2%kRur8;7ymffa zPNddO07?#Gzy#bd6C=(#yh|tQcXG^KK16v3Bn1#G=oEmz#d8!Xe7xN88S{hDFS3*g zxav)01!9DNNVG!`>q#XaGaq8;1e`H3;L;T#qBDfSI02zBbfTVSA~IqEPKc$Tee0he0fU{wuq$xK-2gB# z>*EE07r*B;^W%mt#(XEe+9Lv|CF=`Kl%uWgluuoPnmwE=aC;9@l%-vu&^8@RIE6~+ z@+T_c3?My|cS8;Us-!)HK~&Hc8q92!o`7cnAw5De5f#>j5ScLJYmj$zg`_b%TVZ;4 zhzu}382d0X=`H;a-2l)>zzOsLmIV$3LJ6ouE{H@E(}rdSzoJsgIS zuI{8oV8DF2FE?^12iVWyP3HoR(y|_QqATdWJn6*fuVfTl8TfL6@l1=rc8I+D0O-er z+tUE3(*mSVF-~+baMRFsKuF;*7*8-r!Vt|N85o&7%R%`|z-ecPz{mii0zewnet$UL zayZ_IKOynCx^o4d4+N*QKKU9A;_*NJ@tY5)vZk*7Zy(40WB>gea2#_#cU<@Noagzwl8Za34{C-0 zb!`dY56QM_2{!K)AXC@`Omv=!52G1~gfxlHQId&%T%q z9#zaGiUAN}V_?3vk_O~zZY>Z$g6JRtgw4ngZvccgqP;nCV#d@{TQ#IvZ%GZot&3n# z!0@M%T*AfyP{r)0hglyY5AdG`_Y!&&c@DqJf+usmE4RpQ>J@ zRpF{)f+-fuUMWwfkVslGm_U%MVi{PAD>bYg1SRuNE-qo5u zo_EXY)=Qe(RBg%AFCMPI&R6t3R<2!JCAV0dujw%G7&pl2ix-+V%vrSstr=m1lpH!`50>>#a z!f2E`IuM2yPC_HBmmXc)_(6hx?6H(MhrT|rtnO|aYrT>_xEh(Tk`G(S9~`;^+bFl* zP<*sH)@*`d`Fy8qJ?~v{_~1w)TS@Jsakqetiu2EB0_#r#(5=>6moIO<&)F(Z*y^3z z@*Lb6WZS+0+8(vup4Q!-$l2D4-~KeW?bW|M$M)qD>6azzFDcqz)^fhwAnjKj#;vaY zvg6Unm~Xc1hxQ7Gjy8u*L5I%I4)3%ay3HSYU=BUmkGvI*;5J9TK}UYi zjsjYa0_Tq&VUB{?k3;p2u53~rKHiC%e-VkHF!^+xfH_98pFCDLNwzsj4LV7Gc9Pk0 zqP9u(JJsVta0Z^7335rfUH!ZD zqd;q$c!Pa3kIh(jJO*C=XWuf!QS~@Iq+QQ6)Ql@1qS;NT%nF+WWnJkQ?BFL}&iTbe zi=y7IT+qm6AkV46!JROE-de3jxS7q-4jK;UT;s%-UFBd6S*t$%Avm01)s{;82Q$O* z_Nn;MUB*%VHxG5}Twh-znZ-q%E`LUECk1H;l3vw}__ld#CUDq=kO8gY;R`3i)W#n4 z>`KnaZ|Ku9*Z(;B$;>do%d1v=2&n>~1{E~k5EqhoV~nto=Ro3cU2sXBey{!*xkMWK zHL{N&>2lO<*!{Ovhly6c=9j>tIk}2N(z2DiiM*Uqn)9)u7k$fH(Ne>VoC&tH(i%}C z@rsX*^H}c63hr}-_&oF@kg8PU8zyTl4ORBhI~P}bjPgW1W)0NEp%HDum+7F{$fCr% zJ91C+0{OhL_LMhIu{#l{>lG#w21zS27RSRuDZl}0h$y^A&Xc*GZ&$+dn9RbHDQ+W2 z7<_M1Np$!KvF?_zqR2|Mz&T~a>!pRMcQ(s9wt0{QzIbR&*}iQ|@r*)Ec{z)r{H z4&_^&*nUx{gMZ_F?TUU+&hd6+TG&?tngo0Emi%6_V{lyozci6AIxOu@PDC=nZTRxD z7e{tu=~1&WuDe|fw`XV!p<)k&-r~OCU21ygKf}-32hHa(gxfl}w|J5}c1Ne(GZX8= z=P2To5}4E(HfACeIv7?R>KrhDb4sSke7G~AE1e=)WgbeQM!tW?$~b=9z|{2Nx1o`n z-{24O>3%ZN+>=#cJkG;cD2TrqYO5*np=Jn;PhYWNN->fd0Zaqg1r9uqRYYyIyt(Xt8{ULI2fG3+6ubP74?KlG@}?j<4P3b}c&V+E&s zkJI}Ld89vlfCcP5$t5e|)41Min7Nl+s#7Fz^+Y*jWjUoLu1N4gaQbNvYyBt^zW<@m z`ukqmPr>Q+etV|>EI4)P$8NuW7n}y{KV7&GoL(Qiuzvi{2~MATcF3}lmwmb>F#6bj z%SK16Oe@?e!MCB7RR__FL#{lU@~M^wBBZ8E@$yEdZNW_&a5qeWWb`Q$?WNh{P>O0# zeBP|Z0)@;CJ#B;RA$SK1{Cjs|bpW{X&Nnk$|4)ieTF8pg4=k=bcsf@pjY`>&r8eI- z4c28JkE0h^Cy6wm=Nt+7sCK#@U?PRz3sWD#0-9D^Ch3JbvZ`^KJ5g7M(?rLpd{9eD zZx$nDf0@{^Zg+#S=M*(6tEz%mozPZ>XhxI~va~BYn zED&-X=!3kVd9ACcFdEfZYxqtmQCtlnw`vVB9IZsS$Vd2SzryBRHlqZC1O~T277Wn~ zOverC)O>Gj1+~p6;%xE2KsyTRmX0>%@JtGr(zGs^1M|F1jZjYK4Fyz!quM4vRmutPn)i%7vCT63*I)$WR-OZVL8bje>6It@KUTDuRq>S6HQet`hY_$lw0|A; zLfp^8ReOp9dfgfs{c3B*`NWnU&DYI8$Fh2(^Hb+x&8sq8gLuIXB@zeUop{2kW$&&0 zu}QNRG;?0U@MAN=4}gQhr*0csKNj#qAU*DkaUh`^e$=|da50`Y6oJF}lYsYx5sRBx z_ZQd5UG+$sP(ctsGNbFbrd#^XZoDJp6I%$NU2>Ck#aobpwHwmTzqM-k4{EmdY!})s9kTBohM=@Tb@I3g zB{<@>lf`Mu$-3ncLPhOa@~)~w7z{wxxmGNz(6DItMYq0^GlocK(YcWJm1@(QgsQEC z*PMTVylM(q86(cDrwlAcM&H#1;NNxr7EAtV-+}%vosfH<0Z1YZ@h$(}Re9$M)BZ8K z{Vj0R=g4Odd^&TUTdX&2*i4571uirDlUHG!qbZWf_9ah7*;}B6^DDdiKoA4$+*+l= zafC#54F%q|o_rS6|Gb5zA}{DLl4b?lgId2ieRT7_#v>JcQ$o#vYGVh$F@=~$;O8`( zPQqYSwLsh}E)_VYE4*MfRPd*?;F?QDJR$zrfL?Z?iZ$+9;Ve0rh^KD9f#>Ngd0M`jm*ML{KC!B!*4c(TcW~Y zyAoI)hCMXG(JaErFTy!J;!Z<^8!E!%IKq=5(i<8HH;eT3i}Xv63}}c9L`6P2jtpjq z3WY|&(OPCt0;AHSVj802P*K=oF$&2L{TLdZY!;pB7oDCSo!Jnb^%LWq%MgWx&S2$90;;z4wd5 z!kK#;;`&i>gU4~h4DqAT_;Is1RS5nmfOTCBZw3`VcO1XKkgxQEhryzGY*w&eTQUQlBx!5Zkvj zPcbP=J{pd(<_kAx{EdV6%f0mt?8V*0y}8tdLlLOs&@as4+D zfS$!8?|1!${=om z)5;(EA1%Yoi0I|}Yux950q9OD80s<3wlZmqeJzycq}J8i4iN5;7~bM)<^vnqaRNrn z(<`FX?jcHOlRsdQGLPx_LKa*Q&oNUnb#(iR#&lyv5%Ky`vkgEE2GFl`hHY64XTy8K z`XIhiYHqx=T!)ZblUW)JpB7IyfYc@5vYstu?EHd7em`JSGLwj87cGlNGCv};;vuIi z`tXtI5g%lC$nJ(MrLhv2?x)1tdFesdB@HmE024RLZdoar7J;$!9fqUGRW0N<9A7S4 zG-mK!z#^|h$h{WRvTQNV3ntP^mPufDpQ)q->ZV2s{V0N64P?Cf1z^~5Q<5z`mJ*)M zI*~*>t!Qv>neYK_cKC1*S(n*bP9PQ*seA9E@q|C8;Fzwo%9RW{yV0;xh9^73odbb4 zO=9R&T<~A?7YF%nZj|IYC8ydFHkE>lsiuheONgr|H!EtefZ@-IPd&aHR$*phT$L&b z2sV^RtC}_|e}hH-pQJM!e*W6>Q{s(B|K9@8ReC$%7d?T=iFOO__e)G-0mIPs3m^4Q ziTBT9n%@0jM3cAbU{ptX`(R98L+IPM5xo_d3g-Ba0J{GYFnp*maOM-i@ZSRr-%VKW z{U1wb_}>-N7&F)JM?*KH*NAdu8Vae;Uw5#aYv&rdwJBPlTp&1jMsfu9gw{2PAd-Gt zt)^XkR?R$O)nHuN&OA0BXgejfD)}P_yfm^F2@&d4mv&PkWIXR6erl@$b!<=u-r0$y zzS2#LIS5X0#GWf$G=y>qd8%IN%cM4Zgv0|c;v8xmAX<+0SUXGHtr`^;C7c4qRzEf@ zc9>o13YQ$p4y1<;j$R^)Wa-#dyX?&sw^hn1$A{l}?PeI|55I1?-Zqe?0gM(9>AbiwJTK50ffA0MT#Xz9(x)iB`7n5GGG{2WC16dfn|xw z2H8%O7u9*=IGBD>6qjYrE|#s8gK_ehkYPZHh;@gP(Fv_2=HVA*{78x76dEqqA%lTg zH&C9`VyEML&^yog8wa-ZhAUj+(sz*i5^ZFVt6g%S zMLhT$2YTcwAV|Px&M85cQuB!k$q7^E>QFegtbxF(zk<69p}i&u%MeN@ z$3PJS$3PY!<#lVw zOX6lIzX@)$F~2Bh$p;7!dL=f&i<^dh(ahDtR1O>n?xcX9bqi-9VSJcR%dGjekyooj z44#6FSzdIi%k}Jp-xoA0J@y20ZS7L7!)-3iul>88byC=tha<>-6hqbksqGbrT@g!e&P|Y$c zmtZ7h;O0l~l?+2AN=LEa8b{gl@k+C~efyMB=j^%0TEB_1oS*=GuLXY=EkSpz0c?Vc z%Dm*m3_tNlCXI!1pM(!OPBJDe-43ld(ZGxX7LBXjLCLtxURmEtRaiI8hHl1`oA7#m z>~|rTI$#eGihRlY+QbQinCdq z=A1L5t0-Nko2HgiX_}S?zyT#c=D3uk-yIEnee~nUJbyq6x_O?Ys+454W3(SYqeewt;EN@`vBU^?iXU|%5kGNG~m zHf%q0`g9FC)Hry5a6jkA>AE6C({Lp0AfNdRt*O^Enl^Y)EO)k{pV%~>^bY9<4kI!f zYMQ7Y{8s(o?6WOJGpZYQ`10x5ma|^-r^&&?y5_TO&&1}Lb=XnU^w}4`q2{@x!J|6% zFe0J54OhQ=B?h%38beH>K0lI<-^=B^4yX7n?d5-5`}i0C5DLJ-p#!jE+lv7|HJ-yW zEsA*50xUfPGigDGbXup~RZA&6o(p_}ye~MV^+LcfBXrVia}w#*7}9R4OG=5feO_>W zaIr?Fq`P;=bpu|#Y&kFB*~SfLgG`n9PuxvL{5r*Y*>lM^DL0J@HLQ5LJ|(*vmCO5B z-(osk?aaMB!bA3d{L_^H02~$oW1kv)R zN2{p?VgfATerVPLdQT>6W|ti+ZFOOW#aBI}9P;YCj@EtH+#_fX@WpGnWa|0BAXLpq zLB3?zNWs+W%ub-LXsNF1Z zrLHRyhummxj)(!^!EtuKisvEV0eMq}_r`(fAE~W1-DwYyGoI2QXBpacQNO z7EeO9BD}#=5ucb!pob0IITZWx+2RN#lHaO+epbEkU3}VW51gVvTzU`Y`-{tOF*p=6 zZ~Q!ZM@sriVSXHSa5xFKYML-u&3$Q>l)dAydEteJb+!s#4%j;Ivvx)?A}dJ1Z|IEU zV*;|`iJ|gtmJ$)nqF6teTT&KXKW9rU+JQI)0V2z^2O;3X)uX6 zo;=|2o45+&?5IE00=Ly09o%BD{lhoR@>hTmoLGFaZ~{4zIFanYY>yZK9-5K{smL4X z5b=D`j!}`1x>%~>Vyr4s4Qz`SUSI%SoiQ6WC%AQwR{gp#&81Wzzdu+CMMm@6<>xP6=D+vI0H6q{ z!CCli3j8v-{j=Lt-W7-5WJYSf%7PC+Q>lJs-O)ZW;;A(4fBHujhFhfP%cd|kzo{q}AhOaJeQ$l1E<6`3-im)(B zO==7nrxQG%J65F}VtE557LeYi*{F#3_)8N>9v4jJ>l-dGSP}iC8hXsbJ=nGwic~^@4ofyyFoKx_#q zFWQf9MMl9$i?V<90ZLK0Pmg=NB{sN0<_MBPwW`#PNH1?8Ms*d=_WP8MX5z=xY_R3K zW_pL@xMI-R5bkKF!?zJ9_F#bMo%;iIQ+0)JeaCGcC~J<{)7(^)nox?oS~uzBFfuUV z2-&Hh0EtSjZZ-sY#8Rx0R0u;lsaw3H*yiYKjes!>;tdtwiWB9d;#R^0c9%EYCwG z&r_KF!gHb4y4Bg7OVQa4L7wN=DT&rupR+A8WZNFVSeg-&!>kTruv?J8#cNcmX|HCQ zRd_wtxEX{?pk6y@ysol+_ImHt2Coimb_{7AHL+Iq&2IvOmB42A)zm?O34}#jhWL+^ z1%$&|O?0}lvkSBag9nJUSk#<-1?)PVj;#u%R<`~MqR~N#+P7jD_OE97Ej9KacojUf zYp&et0badp!UFYK9LmB6fvqxf=Nc%GaqoXn2uqQ}El4|k<1FeuJkQV|Il3IrVTZDp zmxM(9&6{iM4Zjr@m~4hPG)AjLR`cI11I7lBX~^yHb*#mv@L<}&d!4Z19-6}8+T5prwN zNU_ycMNdn-_WJ`1a&fDMpJ<#DlVCYt$zn(aB$iq{w+>iXyLZ^Ti2zXcj*F|B78ASXWcvbNM8xg|CNyhb9yd6O5k~;9zij+<&1Yi}VD?0q zl^bA6^zqtNQSW+23aXYOgSLa^@r{=*>3QVbnZG1K*K#_CC9~d313g3q2E8*bF+iJ)kP#zD7jdH)& zm77J`$4WG)IZxk==AVeaH7mhmr$zVp7S_aCyGm!-admCUYTGYgMMw}dPNTe(i!e}e zysAo#gmJ(htEg~6>AR!zTGF%Scw z6pY+?hP_<%Yv#S&u*r=fzqj+f8;uVZPy{Mf60I9Oxr6R)^S66%EQG|&UBU(K&vMYG zoj+_gF|cfK@O+o&pwS(!RYcpE@-)W7QcjjPCnC02_l}I0sh(*2eO;Ex>X}x1R*OS7 zQ@4$Y$M~^eY@(|DdebU_sfR__T20>e(|UKQ*3grsTduwk5=349?6>LT++fHC4}EnY zIHcAihTpVP43O^F9(P%mKbTD7&RZh(IJElHX?qmWS)um2Mwo!|Q>C0fg1aOD+ee{X zIvUei6qLJLd|!Q$%xJx3NJ+y(s z7uyXwPoAGEcmejL^-b}&c_ASrX9#jB4Bo%?VjQqT%<#ur>iT1dF;Ngfv?HV`D1c6@ zb3H8SZDgG9tSCY+U+JnUu+6_MDv1@(qk%PLUrc+-^82bRs2?0A_?8Zq+gWDRXyx}Cc-}(RH5nuwhvQCkGE%Ehaa6kItM1jSF1k+p zj~etH+j%Sv=k3eFZBZc_Wu}#X-D2_6rxU^!Z|)M?Pis$#EVSYh?pHma-%)UjUZwLO zlggHeV-PzxiBwx1b*f)rx+E_)J;uC&Zxg}qD*-IPFEs)*U^j8M)wN+uVu+y4GFsJtMRrlPD42JP@e_vDFSGWg>VcF&c;>m*OvW&Je3c@ibb4RSP6<_1fdQ61!^;{U#J1^&Z6gLa@j=Xz4DRhvnohNQ?chk;kfd{S{OvLS7L)YYrhP83{39Ys~CfvMa?v&=DzW3H8!^67c|q*HfQ{*LQtYJ;kCi2z4GkwwTPT9%9_#&D;?8c?@eg^eqsm zv6!lM#)Nm(#ceCpJB@S|#bEEJc->8}A3jKhNV`33Ap%U0?<%whe64fuCZ9{g>0Zdt zp8R5FUAuNJ!%zEF8^`#Dla9f+ulq`XaxDrZq%AU-q&$<3Cn}#=UJOuNh*;-sW#sjw zc*H2I9$W>VEu>8k4Xkg)Nf>OS77zA<W-()YiPcJQ(@aA((U&o--xj2c`QTY2zWtS)P++CoDP7t;N^zYZ(%heWxFb@b#m1VzqmmmlkOtB4B& z&n}mNS!FFS3uJHVh+#UWbi@fiB84p*yuZ#-p3x_;7o;C^XP)v_mm7yH^|OY4-?mrs zz%)d&T6|%z^M%8(^?1^~sZW@jb1awAF0$-Ig!-t`pC#0yU^XJaGe} z3t^}w`wpkNU7OAj!gg_bbGPP!jZd$evrx8WzB$@f8D57=y2@?AuO8Et(88gM@-WXP zd@x2rrB+`fk@$?a3Y7yd8KtNIC0&LcpPW(uYjqhC`}Y2 zE*x@R;Vf9rwMKp`S)Zwq9AKK(>KdGPJm^M>Z$*s6orx;FCy!PGXg?eB!dh>B%Sd3Z z^Vm_13O*94R8*L4nv&hiRYpo0R;DsH@MH_I6M_4|)amQCAxwM4Dj}R!v#)^&2r~6P zUW~`+7D;kzC?KkSk(Ez2+>a04eEYO^}-jYL242l4JI4> z?6jwi9<9OAgWB25UT$MmrX>Qh7565sU)eJ^Q@!6+Dg$;|U3T>>Q^_YzVG1k-pDDQU z{)?vz65HgX{n_Nh0HRNsHIcSZj~OES4wD)?l`vwARU;=XO9hTAeI^%GGcCp{TW!wW z(~}2{VpDpgBE2`?9I*`ZJuN}>^rn&vrK(&X3o6rizC&R57# zb|chtG0FuG$Z+~pM(btaJ;&=J_^Ji4uudbuGElbCfL1L`tePd*{)y+9wfixki?P{} zt<#uvoTc8Q)#zceT)jno#|9m>FZ%e3^oIxh7BCW8w>J1jgcw20r7@?99Acp7<3axQ z5yc`I^{6JV)sEL8JXhsk?OWzWrcrKOImy%aGVQnQ+tNoXcr6ZxUzR@N+g`_F8$~p& z9t=m}(w_rKe`_{oVTXn4k72oqi`5O~XjH^yTvAUJ#&4Hk*jv*HLESkV583DLjZujOSx&^~ z>*gz9Z~#u#w(UVA57%gVYHge0YYRnVcZ_b{@Ci%kPtV_EAFm>+xhG!F6CA~bPvG}B z|G=iqL-%r&CjC$LHSKgIyhm6V6ILMXFdlQV7UAo4m_OOB@Ku=rka; zF1#UN{mz5PX-9XUin)#icLLf@wLp3&?KN}eja3F5;FZxpWd9WTs=aMpVbK#4#@dgK zvCRfwoA>$e=ZV#`vExU7(@IZW<&kCO^T_gxSec3xm-Wh1d~&EX-SS-o@@Gdd7dvLu zKgaCDMZXw|WUdcE)lgcR4d=FAHtmdd;2X~E^5}!38+c$GDzOAt?!1ZcSkgdpMh9NL zPRU*Gfq|Dg`tRBIXYkNBh4K5`f`!W6{q7cR$fujF_JHkDln_5GW-SX;gc>X!eMUMU zxq6bE{|s&sZ_kRV+R}bOrdG*6y}e}|gP91vH=U9f=fly(W=WLWXIXuN@ug5~DyKzL zNCq3ho8(;c&iKpumgjf@ZGhQo-tnSG?n{@g)Wr*{rNhqvS%gKn?q&B zY1ZLU+eNJoTJaO|&eg22khT8s_)kdYI^VDal-SY4NYZQ5d1as{0h^SMIKcvK^uDb$ zC0|pWVN5dukKZcDKo+Gh0ED z<@%v!hzQEVntnR@^U726srsRwAM+`qSNqPmZM<_ET=())k#=izT{*XV%|2UCZI+TbSL{oyq9vS46_O9#KBYAzvD6h>hctPsmz5;0fw2?)1 z1S|fd+Wk<$0`8thzYPo5vPISo`d-J{2(jBW|4f~EmnM!>wE7z$Db+feNrEXd6 z)aO5YyvV9N)v!7dA&Dhk-X?4>C65t zXFmDz5`mZz$~^Bfz5+v=#Ew7)cM9OLGyL{!MnF6YMGk&25zYaBukMS%+QMSiRgtN>sdBT zuhxJHt(x<{%u?d7oxgpu&>r;r@BWKb{TGjhv7`03dIoz4^;{g>80<*-)z~UUm{vzo zM-Jz8h0LaI412}ftHe^^JIx%EXKRE&RL(#1zzCBb^J0o5_+GARKVet0uO${=@5&T@ zK&8_t&NrAR8!{Is^9pVFYzAv}I!;_SNCpkO7AMqmm5bL)s(U4__A!Og)Xn@BlVmnE zfJzljq~B)pq{i$;Q3p^_N7fGxL?^w3FZPB8l#oH|s-9K>ZL$_prhK(>h(O$#m~t#~>cfhl73Zz*iKY`3SRq+PnjNbx+9t2+8;@iHw) zQ=aam^nE$>xcNjgaqgY5$y@uio9>k$${2c5>}^CQqK?u2DZ?wyfROe18;_pv*eL#a zVE@H1{(Fz6u!H~Wi!tV+0{FSx;&pLYIuAqId~@J__9bAujtJZrMFl>}L(QIKBC@9? zyThdF?0x1m{>YX0)Nwmj6;R|Y?RXZ4K}&u^2rOO9g`{?4zX+CxZ%#MBn7rOqqLqZvZB~l`Q(XVJDuX~k#RQP>etpCFxWB0*aC2VQRxbnVc zu?=xC!;hZlbwvW3g~sxhnbczG+3&1iXE+oIQi~Z=DqOjqjA#53PAtCG=5g5~voe}; zy+2#@!CE1sQSo3JU-S&34ZbWFe<%s->H; zi;wNvh!+DY1cZa1RoMWHz*m65iEqUL{kcC9)usJz19YyZfrtTuanFhPKCYl!C{YVZ zl(~19RRiYpUkL` zXTFSpaESq6-19|w3q{W-7=4y&h*oGe@P*|MGo=L=8DH$;v9owDbhXZ5`OJcaz=aZ`?&rKVji~5uhr$~NX_c~5~DGn-@mhy_F0oCrjQkL`S3-uM4;>0Jpc75S+_m& z8$#q*Q2{!vz)+KmH7p(q7FI=u0iz3O!^t_5|8a`{d%MqH{1pcKuE2jU125K$q8o|~ zo8LUVkaBsc^UOih;MaAIIx^nAE0(b(k9z%uowS#1R1{Wuyzn9UvPF|>nRj7-kcjKc z!YEOWfpoGL_Yj04*^LgO?!s7Z!bFA$Jorpoz$;J2m=*{D>$lyY&fb>Ie0l|PLs|o0 zHPGs0h(+2BHc{9?Fr1j6*^!>g(4|$x8LLYM_EZqilyw7XXRv zZ0Y&s9lmS{zZ(ElXrxhzfW>H&o%ni`&Yd_}_o?dxU~$cCj0BO@Lp%WMsF4fcY5%|s z4bG$B&+_-HpZa{P%j&Q`Iju3y8q>IT6qUU>5UUyh;1J^1$DYBH>U1mC*RpOn|d-A`L+@>R~xA4Ye#^^tY zwx20peovDw%+sIyC;X~FOY(!ztO8=@<)5N$EK=`p$!*wx*?Q0A-;>+wpHF+3l79c2 z1M+Xk4X#%Mk;XrfdaiT7lG}b5(y{zR>iv`%NG^|vTKy?A2(S8;+(ycN-XwOB+=i7I zutQcuojYwjYKTkl=q?VYu;3Y(#K4=oP@Uf8{oS8#iHbiDt0UV-k> zoX2)8wT0zu`YT!&D-MPyGGw4qi1b=*$9x!#Nl#AD{fvDI?gT*xh~+GcL!U_>V4qgB z6)tWg-I{D4D})M+%14x=>=xspYtF0@_wM<;Cjy?S^hBETM-3pr-nRznF@gp5vc9Lw zv{G1=U%rez-bo@mrTvxRB$U~=p(J(Mjobw1;<7b2`QC6{IUr^J5+haVXhPr`%W|H{ z8sHVL^qv>4PTbxLJ|z2nW&TxcED|SDXzk4j1=h7lT_muhv#Ta+Tc^^KPQd1n{EWc< zRh&sk^+j@<^~9Q`>?sC!C=?TjlgO(i8r`m7V#w&2{s`|%xMGf~z#8M52*z7F7Hua( zG`BrTN*M6kTr}iWKTamFUt2jDV%ip@=4Rba&c`jBVcES^ae>sceIDx9uaGdq$n$ao zip}|cbh=C|-xG5@^xEM2qN@_qmmi5jvwSz5boR^l%3{XeKS>ZeXn!K4Gd(K$)tK1f z2FcqEv?7+y6&3uYS+IU<&*I$Ne8EJFnwX>Amhq-w?gsh_vY@-r!MJva3K-qAqdGNE zrCV3ft55`2!DZd3eN}1OD;4dy4)VmB!67YtNS_+G1-t5-&U??#+t3mJ?VGtIn>*AKxDVTcZJ{S$hta%S18RD z$kJ0WwXX^(T(gvVlE%DyUwnem9K1-gA&Y(?q;Z@VFx#;HMJ!;=Hn6LJ?EAS~;akjd zCoNy)jIo@KFfK(e(=p}HQbyO`R;ZlPQ6Fq*h1rblu&wd8$=^__MTR)mle zD(hSlx!cg{?@ylrZ(Nq2McYOmmg}p9REt*9Y*yq1ZFdN^v>I^dk$XgZm3u4e3#P0u9L~yB4r9Wz;5KKOjs=!-Io$R zA9;834YX+$2HgeGbQ28G)Zt8Mbx24$!_`cDoU5q5lMhicpp`0M6#XQHkHB&E$^qjt zx_KoqqyP~KeER&S`Vb3L9&-kfPMnxsB+t8D$RpI0K1&h^#HcB{<{8FyPL#*wjs#sH zDPYjDtMvC@^3}pRAa^@dEy%RB`I-t9i;>qE6-8qS%QJ^_@Q7&y<01WGg!1ad97a-= zifYmEGBIM>VMg)h#@CGmuT$qneiTzLb?e?I8jVk4ZG%3+A~(7_LGEr*X0VJz~W94O0EWd8fdQgKVfjduM zApv6I_$%$mv*6hZvo4CYG(^{PafS_+t`9}XO33UanTK2(&y98}$`^m|6S+}yd^;OL z&ade;HS#!fJnx(s`4hR3gGFvw@dTzW5-VLGH{O=VB^mS`EXp4?{qiQH&1e*CB?t5lZPY)q!;rSEDMTE4>kRilYV`L`;! z@Cu7p{1Z(%AFJVSE38*z%IgkOp3fe*+TuK&XkhZKUYo%pD|=C0vQm|yLW1@cn`{_I zRl&C_T23xJ9GC8;zOstecUI`*H5%=&Es9^ijD-i{Mkl;}eou(cVY+e(GniUyK2>P~ ze`+z~ji?9jRXJak`?MrL{F+|PblFta}uL&DvnKjfrKk6%Ft*%s^+h1``R`Z$9B&G_e6q41Z=+p zJ1BZTQ-?+ymEmjOX9NjTg{72mDvKI6p=v}CSnyw!qH^NRlbV4~eRJQ~dO8C$@5XRY z+7OlbwHy|S1mT{}pHHN}$Bh^cp?>4Q$)Da%?c|BPQfbF|AIifrm><>n(vboI<>3=H zOt$K`|Nbt$SE%~=BT`0(Q{RkMX(C zSxFCmxv#L}_Fk^fFw{{XM-fPlE{M{G0Io6T6FRm9*}k|I{|IivscXOs{tN@ZtZfl> z2kAa&x8Cj4w941M;ZCPoMeYG|Et~PNtOzN&qB9FR+Ao_lrXZ`W?zZyr!*0aVVbx_V<3 z)UcI%)Re)WV^c$CH-&4{yAX5rVb`sun(iY<$r5`$yqH%KAd*_}THCo=wy?wuZRbC9 zq7akUCvT{J1Q}%pm0iQA05Te$de2JDid_q7f9;U=!>OmV-m8HlA@*lSENB7m2Q%R?&-|g;FI4ya{MW7Y z#HNc3fc~nCIG98Y&vK-jMqnL48aE_snXk_IRDo^Cubb+QPc4U#2TXI`*&a6n;5de8?BtMLmC9 z&UfC#=Iq~!AH6pNh0|eU;5K~Teb1Tqky3vC@C&=$j|xmAW|M=aajc`0<$2^dHo5H* z(TkSDKDN`krFkcga^>@O%)`YLKe zmr5h=6LCC1g5NiejC$N-ooE*Q#1E3H8J!jt zWxX1jj_uX$MrTGruqK$oo~R;JR7w)KT$8DiA-W?e9Yf(0Tbmwh)f3&& z5M3S>-AWYO4vj(du!fs4Xugai>xv^DjT=6W8|}F{q8V>(V2vte7<7)GIgXy~iJo`n z8AHdf5G549h+pMPSWJ&y^NX8Giu;0fK&G?pKrbAS(*}vh7Y<04?SO`#tR2z!j&XCtON2o!wC|``R$)NARh~#JQihq@}~n5^LGa% zv7sw9#gO=4Iw1WolG|=JrdVQ=+fGt!8B?*zZI0%tPGu<=2!UBJ{*B@PI=L;(JUzny zKPI<@e&8G>9`Z1|Lwm>ZW}kxp775`Wn_P9%$}M0 zH|hz!B6?Z-}6P8{>|hzlYk<#%%Xpu+?H8DJ+0DrK-nV=rv1$4(E%VkhUeeEKM&0Woozk`Skwe+U3zahLnmFM%Vnpi%E~=$BR2R!7b(05Q~>iH?4V|zI2SK6qtT#*;^^h{K|@{ zO3dPwIknbHsi&_@wQ6LiUm5hix>iyCd(TV~fco>Ec<+bCg}(2CxAM>A=>GsomiFc% z5-IwRkYw>?#w&^l64pQD=zoJG|1II$?57<4ZwcQMn`z?vTf^oTAd>%dPsD;q{!IA( zeheV6aT*)y(Bk6VDnc}PHAyD_MTX``G>i; zvUcLm=ca`L*!0S{Hf!b-ezoqhK<-)AsUa5xXdd`x1S_W3IGKsjcQiS;FxbpCnSSC@-{65Mk%rdP^}DO$YZ5Rlj&h+_M-gyV?|ola0y@3ZPK<2*#_ut=D6q zDUp|U)Bp1p4AiXr`_hsEd;(yAem`3 zAHRb|qotm7QC_~DM^};&As~={B@P=x_50GgCh+;srDd40_0N{p4|*w8=;iI&aTC|= z*C?lo?YbG?&+h4Vp3MK6@+%xi49DRoIP2u6ClP7;4*Na-*G7&2aS8v>Sc_3)r^M3k zqjq{|#gGYJM#sROo+o_LUz()7)qB%K89^uamt%UOkoKE1jXT2%sifNt*pDJ;a&@w^ zhAo5IvcUopZJBH>V_Rgona$E=P2P&hCm|&Er&`$AR(Vp4)hl4rr0P7G_g)ooRa=F; zKK76Cs()Va?*7pkzT16~#k$7xshbZ_mDTU>3n}yb@b$LzNS)gJ5NAAc?E#SET)e$; z<4IVvNAh`QV$$YEIa1=8v|-7u>|za)*aeh0T3n<2-cPvK;=|2{BO zb${sgVJnBeaI?8rp9t{!vAQI1`!FJWZfajo(&?CfS^hZk!8R;j89I z2L}c)x}bnQ5M5VDTjFQ4TxsKT$O$}CQ>Bi>(^M^wKVBU!<-4Ur%!1ho*z zZAps$FZSNEt?6&w7N!IU5J+eO(n1pur7J}s2?4=E5fD(3uAnGgI>Zo4KuYKcNN;vf z6r}f}ND-tcy#}R(UXz^Qe-+l+``Y_i&v|=3!EYvW&by3p&oOu_&2yd?jVx7C9*ejp+Jlq1TZM0=hp`TtC_UW=D3q0t$576rlW&D0) zWPqjU%}$r7XKsreu+K7k3<9Do*2E?}UobtTiux zB{r-%%x3!B35Ax4bh5#u$zy&CNwQWZuxh=j=|wEcoB1|fvS*fmX|eZDSSsFAY#C6C zyaFA6O@BzSB_zl3N7-2VMR!bif?>Oaov&NNj&F(Eqdu#|ok_42Q8P~(X7rd?K#?V0 zs#ql*5ZYx?`}0GtWa50qyea&joXTHU_W0Y_zg41;sWQ~w$v>KnI+(kpt!m$1ruH}} z2m2nwnWHPqeDUhv(k1DO*!!X{F6TXq3i97e=(%W8D$&!PrWLdz_1g#&v|+C9LR~P;B7M)A(FmI|{G?Mv&Ro$N1S-JJ$SS6Z>I*91Ws*Qml)_>NGRsC@i1s*XL zeoy|jmN=lIrM>kRopUsm{f{!;=K23DXnCI3LS|0elP>NsK%%?w>)&6E{)uOF=}SKl z4`@fELZ7}Xf)~-{vi{R%_47ILdW;%VxIl^MQW@HsHGvH%A*@~q_{6xGJM&romOd6# zBhvevqRpJ?^$$8n^Gsz97B#hhlK((>q@=oh>(3c(+BR`}l#p7?1ZLF@FdV5n4QxVtHh< zs)?X!Porz)xuO5C%yTdvu(xH3=!A35z`9~gdW8XHyu4ZD0aNj`ziNq#p_vqsoA!Z+ zbYVjY8FXdlx3cMCI9Po=nUofu(%*;@3!iqliT_ikV}{|M1@efn$i_FcXf~7- zLNi47%pV8!BSFhoYi;~KOKa`Wh%@Up12GTQJ0*)>t$#kEbH4n4*WekOtTr}(_`$F5 z&VQo8Q%pXKd+%QRVrlX}(cqaQk0OmY{xc082B8z`G^%XGl?%Yj`t3Pb#$bxi*xw)m|5B1gSa95)62|~o( zdGSY2zZz__Wspl+VNH|dC|xbjltqw}-w!D^X(dnS%2Yo>Me4ahlxL=|n1m{q6x~@S z0C0}8$%WO5J+S!@&sX#s*1@)$!9y0(i2n3gK8H^HVr31#My!Tq+E z2I3u20EL0uWVb=Co!wV(ZRg={R`Ln%F-6W1x41e_aEck=5injvN0UE6wPD#_R^rO& zukDt~Y9+n+bhoGx5j`mCv>tf8zH(HcC%R*G8u%`n?L|vK`&X`osh4}7#aVo+Z zH{k|Ebw|_^yFs&;*5c>Y9K{TFbwJU}U5+s=iy1wj@*Fym2u(gU(ccHUIPg~|U{I?d zZrd$iVvJAQI9>U8Iz2}bVW6cwOHBp*t3CKp3Sq`=FaFr?R=B;qAM?k4mm+Cb z>+5}Fze_>A0+a4phg1OnhI#=Ow6gx7UN+(!XP$g|e%GK}gDrz7Z2|2Ulre6_%Z~dP!2P0f zJO6u-@~}-mi`;OUXfKz!kpWKiCo8B+0r*sL4@S$Jw7)Pm%Y0RNL`#H5Zq)64*VIq~ zYrqQay}Dg@#Be-VosiZaD}=C=Zw9Tr5pDt@`1@G}B)cX7a?6JtEu50@Z{5GFu{jtX z#IRjATX4C3H>ZC=Kg-sj#Q4{fU)o=O27O0*XbLa8dA6+&DUNLiKBiG9U-4;GZDPZh z(koPcVdW~NJzq45`Jr#i^i3{8!f2B50lzKN9gaqBlDz8Y!e&+xLg?98xeJ~U8G(5> z8s6zCAge#cj6pICXez9Hp#cL?EY+DKd)Y+z=Jawj0GrsZO3+C7UI7$#@X>^M0)#Ak z8F7yp1OjbXeSPid+aE{3zxA=o;~ztCz=;ocsiM6G44@05wCdlkF>z8g^D9YOf;!#e zWHZ-(%^lF$W1z z*lE3kEqj>W-^g7=2vf9~34R-3KOF*RXeNqcWy&%8Z{(V#N)nPCaapH}O4SVu@{^KG z9MqhQt~?mFqc6!oaB@8D)ZZ`Sd!`g|V4!lDrMn@C+#swz2&eTOwdc)}V>km=X+!a& zeI;w`AtZM&ch;1kBQQj@?VvRNUbk*vLqb%Y&y1>wAhNpwi2WTteh% z`5+dRxnj7H4a38a0K*TscnSRFdDL`#Bie?ig=UKl!q(1t*pKlCiw$h z@=L6lo1gl^gWkpyV>vHR7wBCdE64b@cxJ3g4zVZ;$_NmgZY$5@HC?JC@`m}S#XWkvR%IZNr>4=*%A7ee5 zb)M;Ux^n?N^3W$d+kMVc%$|L^W&{s*+lOqq zaS>N%MfmnQ^bFo5q;}1oy1;8VsqrSM`0Cv0TYH}^y5FT#bj`^G?0vCeeV^8TbzUK3 zugl5cJw=ah{(SWw`Kw#(`^<&tyDpizGf zvHz?6uJ;?qv^c+ETbBPH_PZX6IMy|)53ue3T+)t{Tn>i^#U*4M&PtLEKYcduUQek$ zoRf?DUnTAKyLB|z4gV%-S8PD>BqUOV`uJ9|VHYRr_+GpatENxk0 zsV=IosPF!it6e7f-UV4v0i*~sf)1|!GhDR4Di1LE+R!GJriQp+8`4q+`0B#k3Rta$ z3aCEWD1NzTd*HXuicv(n{CPGk$~1qo*zf=be|S(eY4vLdAEB}B?pMJ*vD+TuzkYxq zPF4{2Tp~#WhB7`&Z*>kPv#ZEEt?Zbbh|_dmtKgwV~7AY#I-m?v?Js!KI9TLW=H~~sE0Xl#OR?h_lq&I9hkcd z7#J|rP9juVFVxyGR011%yEv4!BlOlns4;bzxkMO1FHFcO%nTj2R2*it8}{GpL#sGg z9@mHNz|h7Y(}$Lf2vEU>`(pp55AF0QEMxee`p`Sd5NPUuuMf?p3>kp3yc&*rvm5mv z>qDzV59CDkS;P$b#tf&${HTmMt`FUy5<`QI`geWk%19cvW4Ql$szWL&$}_8f)~j$% zM-Trb)CMIr|+J8;`MtW3z11Hh-cR2fR zKFpgFN$5qO%q=`-3j2b_`C_Z&dm9|;mh=jL_50@|?~g>o{=nJd%ZC>dJ8coVLtHOrz>k$PmNF zFAwC#VKtyKDEZRDv&r;8njSLqI5C?;jDCd7M~)Mz=>JJq_olnPp3;2NcYJdIxF@oT ziQ1e(8_)z53L#${dWP|^_$?qI&gvGu&XS;0xp40X$(Kw8n4G2~C5uYz9y<``b@jb~ zJ~t5xa|QgolWen7aElr|hc)L3hcCQ{5a3mB=~K^V6Rw``G*dhGs9E34#Ko!C!i+b` zz~%bgR*od_+>pNMsMm-=D%mTtDDjV&aB#w7lYcQ{!mGF zPwzxs=g33| zyh9In#(WoE?~DiTOfBzBJU;R>`FHq-FTH0LujBHmDammq4wmX;;5MJwjsz-a2Dnt! z=N!ynfoi?-Ym22b%>HV+_+DCw_R@NfM%j*m?A(hdZU+$LXhVla&A}V>n%q>8R+bj< znilj>tp0H8*IgGws5${&!|Vo)qredcGDfgYqbVXpUG7`l)@-}bipFYwzUG?p1ChGJ zLkwS!1|HY6(^MK4yt(>#TuV?5+d!)`ng19VJ^9hMA`-r2FZ4YzhyMK|aIg&_h!14@ z0Y3zWL=i&!Lxh;7*pJ=%Dr$MkvvIM%-TJOn!z0j_f&ENc=~Pn~3x{TCNAHw2$_o#P z-d)Xw!6fbQpW5AXV1|-j5I?pOq4u!R%uTBG)r+06dxvF-b{j%*x5?ABUFO|T@abK> zlrC=84xF$$Cta=IYHy72#qgZp#Yy%sDMLTYUkeE{wSm=EZ~^VK$0>rQ(*~gw6{*=H@mSW>o-7@AA|?oZh@N-^oC9e;<9r zQycZ=OnsSSmFsgJ?L1ccAROpH+50O+7ZNIOuJk{(0rO&3D{OshsI7lg#JN~&iC{r~ zd2#0+wj;2T6GPknMZAiyO$8PfKE72^l<{!)u}YYb2^jo6p3(F6-1Y58bOx-)#0g$d zpQ9F~k~kJ88&P}gf|eZwo2CdRzRPETYq|A-9Ewb#_@3-?>`A_xp%r(-ivxGG3^t2T z>qQIdk3UaJ>&XX;XuHT&rFzUarQ@uqcHZRH|xa^F?U0w7W{{s~H2~vTyG4 zudl`oVy3ZycRC)PX)d72PSfbHUv83jp`+p8^T{bjTaE1>th+A0tb=%QSILL3qjmMlow;4NM-GO-AAUh4-z41}z2-~!wWxkOQrI6Dr@XtgoaJoL zEA)(4?J-q6O?%>NueIq%iSr%ZY-}2q0X=84)Em7Jlga}StMP6QhE;yDSO{#U z%`xs}g%fNzTf`3|33!3k&_+`N;4>fbd0Zkt?7@*-CtFNl+kwqk zsD1QW3x@{N`jvCLY3J=d&!rFdBoLSKe;* zMfM5h@+xZcSNX)-x6<;y%5a&!h^+V6O#C9gxStv0Uo&)Mu!~|9**hT>r*3?Sh{OQMjg4n+ps&AdS_P zOQV&&LZ!TF*viNKhNA6>fVU7~g-|BV;8WK;3V1L;obdPULCq3wl1 z+cw+kyBiCz92z3q3qN~MU8;C0(}T}kzv=^Y{W-seDZLylzWt1}!Q+AD3cBQXU;p<& zc7D1#XL9m|ip82D6266}+Hmm(W~>9}t~Ed26JXvleWuI&cNwp|Roc{$thKs?M!0%k zMbDBgdW#2j=pyeV7BdH;;1-cjlXwzdNZRQHM4kOn0$yA1{p{ zl(gNbY`CKaxEb?eA$DMWBSf_qeIi#K* z&0JWV_KuP&?rs(jfHR3c*`pP)z$F>)e1XZUnb{aW)u~WIq>Ywv7Bkf|+|S*ii4;5K z5GJ#`G-v{qWVtqPyyFzwBMxAlopqxL63ukp#us<+q-pwC#WB=MjwYWKnO(eb@{^f> z_7~pc^0&Fva+I?m{r9s#;T>Tpx}&KQ++<_G7hwFoq0&4ZG(+{?(>!Dgv(?VZZT@?5 z0s=zv*peEy5^GqznC`wwn0Dx8DAJ`8_&!dkD~*e|hsebM4Fr+nqx*v!61`!hCCItY z_FZR#cKz1__L6J|c-8ztydF|7H3Bp~c|Ww+4Cg9xtf=#7-j*+)vZ8Mfje$D8FBjX7G^dWO_{*yqm#abwrM~(b6+$vTA zk}-41%=}+M@V{>|97QwJdI1m4ZH#~j&2tQY?N66AFVOq69(MKl9im87(ypvPt~4#W z#L|ye4=m;XIVN)tESM{*XiUS~^uGABu#CZ2wJt~AYf*WB&}<`SAyMLDwA{n{bA0tU z9mclziHSMMs826~nj=c48R0RP*8IFC9nwY~G%T28mrx zS8^QC@UsnJhG9O#qLgtFmDv9_TNWa?f-?L@&geneZG8Q?)t(LW2RRJ;(tKOc^I8{I z=~LJV3bWB90V^k9%0Sam??ULcs4y~I;RnnEYd*#$vPlS2*<5BY?nl7U_p^86?fXvH}r>x68-mMqv$-N;pwxel|Wt zR9c&%8POEk3UfR;+gwQQIyag_`xMBni8fU;2{&wbPIjaSNs=WX%xc`5z7InO`vVtBqp;| zp!+iuM}0thoRCeJJZbM0eU;60aH%MGU{7|aZjhei3(1k8Vs)hc)|LL;A53*sjTh8J zu(GOV3beKBKbI{hQ-A5K-ZmvJpZ5-1%Pe2s+PR$mOlHFvgT5>9eMYNN@c@2`2VUmori7Uf0T zx-AyW7)F3nQoGqq8ZdT&ymRwELGo#9-5P@IUL7oe+ljVOpi7=DX4;hvdUhPAW<%iW z57*<60O_CTdJK60<>gzKtsF~nnYs?Dr8 zSN`AA!@r#X-;E<&f?!>T+DP#e@tlj|6fz+7?fEQa!B8wn)~h?K9WXXfWZGKGa(+kH z+GUccf*TetNP`B}tQj&q^Oj|ysm|-2ua_XZk=`?EFVx@O2~pd$?~Tf{oB&Ms@AQ1< z%cr%L+vmPskj4YZwq&n#oqz`u0eLU46LfMag%`Va861EHZypq$I>+%1%v4A7lISUI zF$t;nSvz6+XjrNawSAI|ln85|5&piCEO5_Q5h)etXH=$qGv`?*`CYnTn(Phh4&u!n zPT$4#2k4#khgDNP(Y2#A0z?sA;K6!E0AK=b;FP zGvB|5(mj*FiSV$OgY?N3eN*raxk|GjCUz}LY@KZySY4mz(IuGV`E4Vk93n^mgS}1P z87*VTX1A;6b_Wjj5t^10qqE-u5tTi(Z-E(y&fr{F3i~PIQ*-@syy{o)^;^L1KF^fc z1DHD?(LCi>RCSAx9c+p%y)75=UUwEDl>F?js9o@$>AbkcX3HJpe_orJi)qUS2aIKP z2$P=U-TQ`%;o1dTrZBbX@rc~$`w9P)%xh}t^66Uch*e3rpCX-}AInqde2o>?;9#9bJcq zK367fmq?dC>8(UfY(_tcc-&@y6Ue{+WenLONyvbI`8wul2s_~MN#oo+r!?skaV|b5 zgoeFk0wXhL(g#!T(#=KsADZ-sYt&MUAjj~~UJg+iub$911-=jg*-RyRPZ_3Ju=2U{ z<(ICR<rBMYs+{qFXH{w~P!rK`uUvvM8()ciIVf#Cht5El>)Cfe~jo)h&VG%A> zXdBHFY9sYUPYfdWVwXA^W*0j$AjAAruNNc>PcT$>zt77qDkP(Q6FL#e%cq8p?!z&g z4l6#(&22T|v5Kyd56`2i&Fz5o4L5^*ZL6^9@VL*BW2y9LH8Di6;QxFe8Ug_`#0|$% z3jGd9Ro(Z{$%zvsZywTmgY(+M7lGHKsaqzQLkK*fmKMnTi%Ck2O`qSvLlgx2O7#f<&zJVhhjB z-rw+Y+U4uwc!pLO^y*8EuHwYW+G4h!v#u!gDbfC&y)+#6FZ8 zl)9CjhhkzZ+CXS4Y8PE3916!Rf`GJG(H(zly z&A}AFDYlPdSlK)sPOvtA!Z+Mu$NAn%S>>KMgTS>yOPE#gy#d(tsL6arP_5Y)+b+fd zzy-H--UVkgP`npnYm-*|^lfffkGLyvmI9gKl<`^VQaonoQbVulo4c@H&ePalnwiW8 zULNWxP9FsRy7uoS#J`3HIEp;Y*!AZm&}~p3=q`L=6Ec2dDLixUsTLN>r^dcv*uso~ z(`(srD>$fM&?L3;q^(cjLXa+$kziKaE11IA7U+5$J_e-@JSo4cb@^QYjA8#h#%@v{ z{pmj2t8K$;&B$wH{SQ&sh3bOn_J}4glP6gei8tp(6=$sD_{^AoV6cPBhFXspeg*ih z3L-;@y<8xGM_yyCEx9Π#pwEoMSmxOwxEeV}N8I z>(ZQG#@8g;(J{$2P~pK{v(VGuiE`a%N;>7HHy(694E$iICASzIvMrS=L%7l$=v+ye z4CEw^C(m29Uf@^kt?`c-rGF=+w-?m>i<|}V(s%o?@1=$<%7(8EIA)u+Lxj`0cC{A- zjH~(aU^%9BXoSWgq_~|~YEHyf3b}^69ZptJz?qYGCB{totK1m9jHo?_(6o=5nMG|) zY3;SF8{EGr2RxUwf*lOMH-x=x?KgxSI;8d3PLI8xO(4xFiYFNUFt@O6ez@n5qI7X{ zF3l+X(tH{@iT7^0qfGg!49DQTQ5< z^sa@>K>y}Me-6QSG#AYHhRTq1Q=AMS>vr1E zYx6Wb(~6zNuCqzpD$uy|;w6OKx%xD+U(H%iM}3O{U&JXDi<~!~{mi-5 z&$)HoMyK52cF-kcEHqUyHzA(clOt}|GzXf2w@5I4~NwD`g|+K!+@L&QRyggivPg=crz#LSP{ zK$(t>bo*V8CUH|d+l7=QLKBL_Bh9Sa&_xrcj4N|GG`CH$&ycHf>t7?J&zGCS+@AA@ zB6l`Tgt5vp<=lwl7w$Esp@;qhA??z2vq6OH0Voxln{X3FD(`$|iCkVQmXp_NR8l@4 z5zDU;!v7Q}()omRn5^W-o(uB`U3oo#T@%$tjlHQ+@mjTNd&(lT|Na~@B9>b)QLSsJ z`w|$6Ou9@=?XmtOX!4$YR7~!^S)nGE!v1NGRISqv8cm6Lr8j>>Kjy4r=h$5^jwWNYYC=CT;Y9oCU7}029(rhAb7iB15Iv|35+t1s^ma$HP zQL(gb54$6uDE~-QJ`v0fv>p$BM7uNF4kpeA5Jyofs`Ai(4y0F8a4_~i$mGz`iyBEe zYfZ|Fnv+UL@OuOcgJvdA*`#(jm#oYyr!^7X7zR33F*AW9h+!ueXG9!c&p4i@c zIUJKC63O!Owr~jr4Gv%=5q9;>;^(aiBu%nl?~op7p#A^bJDZxS+&D4 z2%v&mk0RN{Nx7bRe}6rMq2oBE-eHuBM1%M=`FZ50wJ}_523<~T!n%>Hun<$cp+jB7 zC~)imFDH;IDxyAj>xO$yea51?0I!~yB`p0Uowv!OQ&_AZ?QP5HAyO>t%nqxnIW4a9|pGtUEiw6&LU;IA1^4`bY1{NH!EWa#G z7@r7+t+L4kzFnJs{Ld|VM{Pu5NI;ZjKl-z5HwxBB$8kd4*T_~Y97GoO0+!h|qt+C> z6YqYp(?P{L@UKl#PO7&c?>! z_rkSF4c~3TrB=sPArma!>elb}x6-g2Hm+~{(geH4V=H-0Z9>t}Vx+9(HWit8g}Wyk zfu`A*LrNNTv8feSN@Z3W<Ic5CF=y6ZeAvd$Rp(iRv%bW-1c$ z8hkr>Y8xx!PIW|)SGImS#$|ub28I=>M=3pTPd|TLcJ~GKHEI?rX37^SW-P>wg(G%| zR^ZM_EagKkigzugwq&z)FeH3e?^=l7r{JA+-mBc&fOz5P-8E*vH6djUc**2XhHrLX z0w!K8&zScYH3aPJuQ@DkNOVM>^|AHa% z1pTZqyi=F0EHFOf*dx)K!+OB05k<)Qny<&872}VSYI%V;{#$dYbts?apnN*!=+g@n zNE%H#Y)$kgvhIih42I@B1-|ChI+G$d9+<9Sn%5!1px{Z&^Qz~(->fGyx#l$ z?JNQnK`GSb?^=J!7{!)k?rjtaK2v`+>FI}i#>s7FIpX}Zhf>TJPEP(@cz^%;OXBVb zr9R6ZNj>*GUBmg&%#RP86HN~#cbrE*anijb{%d|yqO1@~S>eXfNmP-`f`tIdd+$Bz zteViwjxD?2-dZiuZoE3dOYcL4JNfIfyqHs0=vo0GC}dSu%h&iZNVrD!t(8O+rb*v-^@*L} z_qJ1F8+Jgjc{)d(QIwghmQA3ag`cFH)mP;T!qbashNIOL5*u^*&)y2b@~SxIt93Jd z-KMG%g@j5hRe#S{3!QhQxC%8Ly9Y;kUg95maz}JdJ%)lMa`^UIA7P1tnGDg8XR_w8 z{1hxPy7&l7R2u#YW)~2#qPo$!-ugt)cr%HQw@*l23tnUfA&%1gV`>O91sm{Vsiy};# z8VcjJK6hW-p4Y&|Vo`oCg(C&-WtQ56j|Ji(Y0m2UGnorN7*_-rm(Y7}mJWSh)}hef zdUwE&$cWia5KXP^4fM)uc_S1kHnoSFW(_hGMr$XTz(K>38yBwhS{LUDJpqX0e5p}p z6A6h_>{gx_P85DZH|H7W2nb1wiz#2$6AivV&pR6jXId1Er{R4HP5|(BI>hU;AAc74 zR~7azZ>&%o6#v0Wx$K5JSsj*p60H%5C@;YHMtf;~qgQCGua`#@pKcszhIm1NVg-tZ z1IDTHXvL8>94xG=l^fPqGw5eVR7)he5kX{2;_pkizWM-Gjrjxi{7Lb^gKe%Q3CQi& zA+j{?I+u#YDP(i>=^94@dn4c7rAJ-L1H}#ZJz4Vju;+X0%Rnd^4ZS;y6w+v=OrXoc zMpY_;0?JgRO;xnfZHQlZvTa=dnc=Nncg-UGP3c`tjxMDtdsxS_>)_09`RAIEvIakC z)d{aOUw>q5!hD^{c%UUh8rzjn_C;uX@~iw+tTqGVruF-|{Z|LJy;f=;AUi5WkqPA3 ztcdQ|k>74x$Uwzedv4JX-TT)xH3tHtu1W9(6O`7*ICeXfg&s%ggjuW83#Lq>m}RgN zFj88x5V%CjVk(e(-~I}CqIN19E~JL%_Yr|KVnn9TZcl~rBuw4DDgVt02OHaki-5n+ zmdyA+ZGpEy>}}3X;()iO1i=*!h0kJ1RQX6y1o6CJBAt7FJB!>3{mE3rAk6m^2X?Qx z$5)E>lI)U?J9Hdv;_2gX3bfIeX5&vO4MUOFsXev--ZP2HWobn~8P(YPAy~K+Za55Z zuf4sVhdu(BwCCEvA|=h6qNafBsOKa59>FP2`shv%rci9tWCrR3eegl*mH6ZkW!AY( zN5{4S6XMGJJvsPIeRP+iK&sbSj~ zyKr6KyD@;I&Jt*E{^3`^&hFDK{nFcs=-L>e)uGjK-FfmP6;Z;x%4dn0>nVQAg}(-V zO9P;(PaIQ6NIP{_Gu#N)e26xjSDroZ79AY^g@TaqBoHfl{ps1u!-1LNX*bnysyxOI z=uan&!|L@UA;R*zUdf>(uY6iI?aG{7*u16uG{kIICpUlu9~OqFo4)UPG9QL-iB`7^ ziPJcoOtDuF1@zIfDb=mnseU(NI_zuksmD>Y2jy|q8hc^1aY;@>xqG9samUhSsJrw9w>dD&D7K0P@x^Ae#}W=h#lAs+h!hEKnracPoGXIl@I%D}4#)10YCF;l-Z31ZLw-R#*fyM~ zzXfBgv*A<}c#1l6n|0K&G`j;XNTf!h9|7Ym0iT$N3yH#7s4Tm~{5-FRx^Dq&zce`M zvu}ssOCYBHt~pk)U?CWeLf^Tb2pTj#UV~PR0=IL~P=|85CDB|} zTWg_=zQ?{Q%dpU`p=yDsk5d8I)OIH^tZFS#b41nBjLp^ghCU&%`U}aqB$$RU-@Tn2 zPe!i0Ym;|Ut)`Pk6JoX$pK450uD9`-f!11hjtCXR%!d;v?+_&;d$IynOr~9YPM^Z( z42Rvt=f)hr-+yhfG^LdI`{R@bU*Mnnom-FEd(5`KGlvCBnUmtBKD^4+4UcW%2*LjX8viZ5#apOA_hQ^;rSN+0%raxe6iTHEj!<$>B#Nl5xX6BP)0Xx6y zM5Wz^+N-Y~oJ{&T_<9T4g)F6LE(AOO}WcZx<~+LcB) z*f)w_I7Hd#8mrCI#>9AJ;h(KDiZ??)x-I`rH)0fS5`mWlOzV!RG(OXP+&I+~3FrS7 zr*)aG%*soyiIxGZ#Nv+OtWMfUc7811M2k}zgNF)c4J#O_eVZII3ZNixJu;?Ls=dsz zOS?UWpzp@qHmd;kAD0o2PQ;~5R5tM=F7-vu!iZfvalH1RbC#Zd!Hz~~BH-rDW4+c< z{FXjWn;k(Rf39$DL*U-wqmB3T6I>FSnLu@~u3TLVMD)czcfKwHDodiMG_y{%hYO2( zZy+2f06CJhr7{4X{t_ZD+_n+#q1R2pyl8IhC))M~78(Q{6uIWeWYUAr(Vgs{&bP?>~HZ^4(2i|-et(s*!=*l3VW16ALN%h`TV0hDmW;@6pAZFf3>QX!m&-Z zz?8=xXZkMBqEaQv+Lce_xdPHrDO@VEKkI99`GR@zr?;*cN|70VEc}>xmriaQ@W#jS zt;py2$Zif9M8Ku#hZx9d!DulTkO{BsbwGoVpwU+(4|z5CQBp8*p4) zb7Ktouts2c2zWRR1)Jq4T3e1Cnq-AzJL$mTU=otYVVN`3(2c>*qxqU`xznK6@7fkU z#E|+xk`lQ~&ggE=v*A$)(j|rQ63wR^AeLMS;DnJaFGBo3^FGf>pbgV~dK3*n%H)DX zyHk!v_aL#6`r$ueZD%)I&f>;)5O33aWp4P*P%neyZwMIr$F zj7In)IOWZU*225TA(+34uw(bzrVc~1Q7HXKn}W=Gc~jzSM?A`(pZW=~EFT%kKzyE# zn4Nfnj_C5URE0QbQbI3;S_=YXQz2Nq0Wg{{#0vu=K%WqysRnV-)wO_~)dUe~&=!9f zeto<)nD$ZFUc0>w^py#3RhMIOPA$riejUj9G51QWbmK>j%VE&B0c-Q`x+vyby(rV( z#b+mDp+6hdRt4Cd9I!Smx@3iY3GR2z0ee?(vP$bfci^~CRC zcQUcQYtuJxeEaAROj0C_CqISvZdLD`%!ozr0`^ygH~C=G1(b{4f53#B@40j^<9#Uf zXwKjbnCx!F`_2nJK^{BsAS%|c7x-}m%7;cs2miWs;bck4BxTNUbLR)l>(Im)(SRTP z?)>!jfRWXW+LTD3IN~}^LW!{)e9yWC+AChm*zy%kzDhr46YGml)wH`L-s_t}XFCZC z$!vvOc*e3hY@^5-DumeCwZ9Vncv{MDDyhJ#g0^} zjWx_7CXR-KZ-mXkWz(iET_{XW>4#TcmpXNY$bYJrMPiY<6E?QF*K^U^5^>U$`Ti*! z>lm9?4%hcddwYw!zS`QCly~pkxz#)68OenTy95E7%{JxTvAN!i74eA9J&ov-158*F zam*uqFEPa7zDV%#`2W}WkUAFs8w2-9!dIP+*bzGJ%i4G_W!RfZLd0^0!Ad6@%6Na1 z;lV|aR0b&s6nV|QPFBN<%CFCX8=1l_zW>`>8vudQ@=CQlH4&+~Z49h?+#Kh0Et1T0 zL)fY9-j&cb(wL&8*URfql-I(9^?tt`c~0pD1+d*Bow5xzZ@AKA=U{?n1g=e8y--%E z!ub<5!i0oD^v;`2==!v2eXu7-3eA1D;y6L%`ucB)p)(F>WF5=qf)qWQpzG4Z6R!x& zQ>ZFD=@thqrFb~leRP+&`t=to3JrRm5>PR6N1J{HlgZ_YpQ|CeJM+jc)=TW>yzgds z8`&U1JGD5no28-VvpU3Z_pHQEQ zbDne|G|AgNEX3#Mdxd48JUv{UI${&>ve%9$glW-^afpZ-dhJ`Us2_D|vNW3XUbtI0 z13>HpH+gUG`5NcKZ5y1vb1H&DCy=*lPq7;){xAn#w5BALCs5UCBvKy%0Q1w`h!EAH zotqIxu-&`^Wpi8d=z^^|h{uRJbXnkRwZGf-vLx-v&A~Meks}<)o8rkjFrvLpV8|9j zJlbu&O(fG$>9i3luk3h1mrEJ)JwtW+_-^q0T6_tIF|RDxXAmf_SSr2|_B<`r$GdpEJAPavfuFc{WvCBYBdU!Z1=_!z zFw6lHALVm|W?Z3wCM{3}3N{*in3ZryVI`t@MK-YlX)-`IsJhIubCsEW-8*?ND(aAx z$w6Uz!pO{!>s&*ixj)sO`EP6W5M5H^*Kqb?r%+`98b4D} zyi-mN>VWk99LO<88ogr^CHc(Vt5vp!m*#hwUui=}BU_WR2!RnH)R}+95$oz-=SBN$ z=Df?sD=+6?I;oLhG+E&w#H353wjnK2j_`JuZ5(1HYKG+=o3EFR5t!exysr!G^b=zwy=s`Qj8I=Hz}wBWK` zFY{?8hr%a-8}8GWHScRrBow;0LBRH-BIC*|cXxW6`_4#CJR80AuRxF@Wj`hVzMm?+ zLX@Fx%213&FPj_&BzjpL-vf>kT_TNTbhzijE=W>_RzqCu+Y{-QyL$WVfjEbqt9eU$ zPU7)=W}C}15}xyyGS!y1e)}n8Ukf9BW8qKm%+|=#B=bIdafCC#eN8<-yO#7kx7iR_ znn$)bzR|uAPIslZ8xcs;pJyp;W>Y7vjXkX9Iww3J`hJCMmWI4tTf&h6y~>Zgmo4FK7nQsH#v#z#7lB zTvAv0I0>5oi1gu71MPb>lO8&xN#BjOorBqT78JVM5~xlr zH4(tLM;C$lPSjc~s6|3<;tqN(6ByXEx6u$_zR$YX=K6ZE|1GoG6MeZA>mP;mOnF3C zUN*G7FRG?7V@mVKD7J;z-XUt$!#q{9>*(-lG;lbV+hIZFf_M0*?0{=KZ*YB^=xO!) zZ&m6v8n@igODDh7)gDB3V*O9K){}RlJ}vd9qX4nGY@(?uh1O`gn*(5&oxEO-j`Uf{ z+@Lc-Ubd)<4}G&u1@B5&VL1OiwVj|kp4#d$8%(Fg(pD#GU9vg?BP&X6n!Q3vi;%UD zj_QD&(@ded_zsweBzkoCyR3uGEnT}!h)QV0*DHtnXS)_0FCwH}U4oh`42v64@=hea z4MU-8!K$~}%V!chVo;w5jO7)wFSN9-GydQkHvR>_F3EXcz{!jP#GleR|4L6c6fQWS zeR|2v?X^{iCRrxhhi$8pONW8_#VEydCdu6INPS$YDFJNbE>=*3VwCl%BcAbN4!(feEdl5$xBLUd@*RIZbCeY6;vO-g09 z6QZ+i)uxp8@&_z%*Q1H@KX-vB7~cGTg)0;mtj6*!a?NnA{tC#Nl`;mQ^F8g>|6%Vu zqnh5`lAt9kiFQG_R10n`Q2)&3H1OdSk0g4aW|P^2jk5a}Xf zLqr8ZqzFwQa%stPqY588_6cC%fW>^y8I@-mQJf z&t0;$h>K2jfBtv(z|O5W2vYL5tc9hVupsW=ch>)$wXl;gDj`V~HT_c*+g}n!uSyBr zbw)*3^!^cp=&mLI4>5?kQF@KQ{jxtru~8mHKPF1bKC9kg*9!fw{WW3KRw49(@9Itz zTSL`{R^GntzeN}Q#;#3v|G$Yrd=RtiWkQ?IO(!;BN2hAPTuIp9fkZM?peTJ@NT0kk zBZTunk3?7(dFDg-s>15AeJxNTIT8#iId`;eNsb;NYvUTL+Cd+oQ~QXkPor@D?Gy1c z{*jYZnLeb|Ui2Q5aT3?I-OwJap$S<@%V_pJ0sO}GWrA6rb7AtS0OL=`)zu0dqqsDd z6Oxv$#Gb^^1Uonw|4wu9aow7`rLb#UcJ9&p5J+M#b{Lvu??|j;gc2<(G3t;fMBt zCralkhYfXc%2g?Sbi&Ewp>qaxD`gkGhA!?_(3jDI&ey!K+2SsL<>HYAzP!79pxJTuclPFbs$pNADPdbt5&6c&FU-f@_u=X$6wSN{}^l#aUh&}&~U7HAd zP9~nYy62iDCE?QA8`tN%u8sc2uGx7g@%?z~b6xHFJHHz;@uV+P?0?3t8SmYgO1Aaf z_>_8~a$`F4`r^iCM#SFDnS#5Xo3kawm78<_wdf+_eZRiFu)Vw!UDVe3L}!HW{;jpq zbba!{cbzX<^gk*kbFKHKoK4z;z0}sC$SI~z-u$x9eO_lja%x-evq;a6rR&@h$f+CX z4VvIe@g6rds31z00>Jc;(LeVEqQ6*M>=jH1^MVA6G*Q5?8tpJ2CmQSu7^T{i&%<8; z=3RbdG=7~F#USGsD3W)_j3dHX`gnR{Bqv*`rh|rO(lp0zLxhfZT>Vxh8Ba06$*UUB zD!y_;n$Gd~hlT|yFH7)HBE_~<3{$2$gB;XS;d}E8gEKFqcurD;zdGrFwnM1kb|>Av zsZq3Ku+*hY>8i5z1a5Sv=%=msSRtv7NVOWV1rr(YlV@F`G^S)6pG-jg^^RbZ$r}RD ziQRYfw2xLs+$_9lDr=c!X4SlMhZ$R_e8IHeesm?JaiDPT_1^w7n=5x&=psUd>42l~ zYHH7piQ;bWfQ#m8+E8qfTCwR9cl*`!k%1!hhTbQh0jn9~=wf2G>7aM|YUYf2vF7XE z!E4Q{Sxd3STAxjy28^y|uMZS!|LA2u4cuJK0Wc*bgxL^H_!|Rxv_ww;RV{v8hK}B4 zA;j^Ke5?o(uW<^Akw%GmRvbfsXB~AH+n;@kU6Cl@iIiSzNRm#jQ+M(~LJ>7)QQwve zJ5S136tA``Z%-D;2G3X=eBQSX@+~nrkUj?jVfB#g30*XkRe-1EHN|MvZ6eLXmF&&3SYx94#*v@MZ_P3a$- z*8I^l@C)HneB|An-VdZL9wHNeCE*R%5Bb_BoqRrcngB5IaOP-L!0Z0^LCha5OL1Rs zJn;xW?epV--ZFwvL z9301*;JUH?2+vg=X>9(HC&-U=5a^IxR5^!w+9`1@HJD3PnZo;ay^A<_B@%WMV#FMI zthyW_;XD>1Jioq^wV)aqF*F%iC)lIBY^)G@(Ruey)&fx+jI~&HlCSENm_^dKqP3vP zJ6Q`d(;(c^v-A7%JJ}LGOPB9FGykl)ipN=8Tg`Yd0TFgcLr~SPFcQ$N;zRz(53aS@ z#vieIos1r{>2}`tOK>H$eP?tdG`D*x?x5i2aAK>0&StgiZF3Ooypbvv6`XKu$w}hA zV26&u0OZz}N2t@7x{HIR_!HnarkY+zEF*Fx@3|X(fk*@+I}fb1N9u`; zB10pV%{gyB{U;GfcJ5`G=d3|^#X5FdKfTR(>`xhpdV4CCI2hIQyfrO9HK*YoG6SuDUN}Q42 zoe&&;PYWsl1n)$ObszG?Mc%O6hNlJv4`z^{A3ua}lg1;6CSNC?Z{7Ot<)_`s)^@p9 zocX@K?J(PDEY5wmvA%@0SMb*xT%jNLe|-P4q+w+X6v5lb;Q#g&q%)EW5g^dE*Ilq9 z#kT@~V5UFaawz5ulM9^e#%MBcl*#(OW+8*=t`8{Je^IcpB+e}+_ADOk-i?hQb8anh z?XM5|CFfpj4BaA7W2u)b>p6~7sl=kI-Cb1vD_C0~*mN4THH59I=VZg6u}dNAWXu`` zzK_Kz@*4Y;|C#|6`mxS*&@}t{^3$|lox^H0N5m0tf z1h@+VD~d?6bGp+*x$AmfQy?(Xl{>%-66)#*n?48cI#=ily=oL)n&MHK>OJUkG5mED zuAM8!?|iP^1)DW!eOFYC9rptux=}Fpg8JnxAf_dfI~IuTXpAlEa$j2t>GtDRPKj;k ziG3A$DH`viBN+2+9rwJ%>$PA!9`1UV5qEGTZdbcw@6Z*Be0&Ge)roQG@b#S=>G&Uj z#}_--#YmU;>hK1Z&s)0$;dV!hNUzHT_%Zr#CJOW$I@gYgLMV|ZjqbDrzstBJHsk(H ze&=DLfBxD<1A!zdjienTo~(b8ylIlydaPA# zob8PqK=|(_ice8F$Yut5l)<&hzzOH_Xy)?S=dP79_|tQRn{!1+bHz7v@xpoFxjQoU zd9nd{^67bs&3XUSM4@>2{F{735L!>OfQ82_kDn%u@-8m{u%QBrXh4_+GfpqisxE+0 z0iGqmJ^%m%fIO4S#H>(-8;ijMf#cXk9Cm)3)6@$9@bGiOr>>OYC>-1!pK!jLxa)m5 z|54nH%_L_O5K3@3X9@x1K~}=2*mSNJJFlR!l6QE_Ja6$b0b8>R`;Jh`sp2q;FJlYJISRXr0)eQoj>3zo{qUOm@LM8Hvvjxz6H{Y&-$tbA z9ln-6j~yo0N;nkXJOY87s}siG-^ODlIg1M%YF*yCI#N+JcWMv}RH6f7LlG19Hp-?9 zq{;xR0$>hGEycdlwy){j8yG+^j7zDy3e=t3D0LWWpeW+Ljbn%2f_5@f5MVlx+28WK zylR!zAaMlS{+(%xcbH`r=#D>pedzu(r4$MuP;H-JV^$9)SC5y$;zwIP#sGF+EQB&# z6%R-g%4ZoM_*;-Y3CTKA>FCu~o?e4wHC@2h5SJV&d{lmXZhP^gGeV=#Y3 z1G--2IRWit{Jk=-!hvzDfGG4v*~OVYjw+81`LT9oE!+iw2`UG<#MJwhcm8T_)H)R& z?2e?o!C>)#8nrrbs3DVzD#Q1busG7FsGPpm0`?K4;}A$)sZ1cVOFpAY^h>+^L-d2_ z2fNXLoOz||&0yeoZL9Y7=1OL?tk6N zuQBfz%-~XXFo>})id8C(-`O%pkQZ1F+IM4b+$^%{?+n-~#ST2FRRWTal(6yDa)%sV zik`~(;(38!BshP+>_%%Xwfco3{8WFN@`KtOO~f9N@|1vf1RXw$$Ide0em9ym0D!~G zsy|@ot1SPn+;Nj!+K-1g$2`cfXCm1I#GO%$8L8gyVMFH4E@0_N{7{wSkS=>1S&J@? zQEU)h;<#^!mfiyx{VdaX*aqRoN{`o03S&z%tAwclh}qG8AC1KUFalVW@CsnSLepE+ zm`^ZE_t;z2?RU8F6ef1)<<1zYdrR}-qm_#2uG$mK2s4N`Cm=>{{Zz(;;Xypq1{mpC zRT(H@&%JnNxxiNGG8z6Mpiqp0s1<&tO9hSw0MewVwrId^sZOe*vgq(@N1$q46I>l# z5N`I;SQ)S)mAq(v6O%P6uU!8ipim8XD#~$+^PEGuFm{;A`3|TnN4e3ilP9b%WmAeD zxX+cn?k`@ym4C+kgQGAXJL@fIFW)-`>h!5@7#HuamT|7_ah&)B&msZE)prfWzm~-> zWKnsoY@DZhLUj4P_+l|${G$u;qfzjOtzTnG;veKIi}wY7tll-LX!+4h^L>mamTLlW zDue49wHC#V4XrC722R2fxeP6koeTVZYm=CXNzxhIG6i81dxM47mz_ZLc}rpAWNjXL!#|#)dBA>i&W-eo-aSY5L- z{+=*t{IX>w`E=u^@2r+G=Dfqq=p_V<^;wh~>k{2DiwE8WeCFZCT6q;pJ%r2XHp8ed z)_(#nM~XG@kQxi#KKOGEMrEo+AC|teBVh6A9OIi+BWHjgr*Q3q)!v=Oj+8;&MeGGC z;L-^9Fwjl4s4QN@wq^?uZ05Bmu&Zajnol7tPw;HhAsU217=HFUr;z|B8aXuNjq1Bb z07%+c#L}#Ce8=~<<4eE#Gs;lrqF;r9rt&gyPwU5}cT4`lU4lvcJxi*k{XRk2O--xX%2~SrNl{R-(f*nXJ7ux2R_ZHIzH_R^=>*ChdZvsh+ z2onPA;0M5yaaLN(!73hVN2$=Wna_*|_;hgb1eg`s<{ASP>IGFTK$$RM2iYGgovBa{ zY8{J&7{+_mjr4o>|a z+Wb5UHdY(V(L4v|xi;rzWs#B;kLh2lau`mYLm~*S0uw}PszN=I7Hd5G%J|T-Y>@+R zXM!S>^Vc&2Z&@*Y3IZKJCHQnO@c;`~;^@l9krKxKNlY(s$8?`39Nb*JO({2)@d(_` zS9o#^m&2Q-cbGVY6YM?k4JhQ{kq3Z*t4vIvEquy-5#L>S$x=n0Bupu6XDP1)7K)-hk7UIR@mS< zY`(O6%09(Q8Vtw7+Z=gHOq7Je1WyDc#pP|hn(+{-hMT#{j}%eNG)@$y=4r23FGDJZ ziD0E2dv6_#qwJNvOVNSs=xDsd`+x+Z%#2+`ex-fV0I%NhG;mDr&pb>FYdh*|fs(bN zO3_-K@M0ZXdukb<-rPQ?=td{uk-;)oWmMy-8v3%?OhInRuyKcFlO!5>PqmMU6Ot34 z3FcCB#VM1j&~TI^AVbFCdBcColZpm$Oed|NBG=pDY~Uvm35L2N^#CC?laIlOE-AM( zuuSSl5GGIxO68_8K!9QIkz%SjQWqEsG8&?h`It*bLcm3S&+%?3S1McvFri7hag_j3 z+&DUg0e(fjd@P6v@{Qg}PWhMm_-@!bfu9=%COu=l7TcKAa3) zU7?3!x-$ zY@+y-h#)8mOa@+1LVcwmR8gtJB(jrt)8=g@7{L(Hic0fWADamG<}}C?0)4 zo)>7IEutd)zV8~lkiRhWP60}OqQ=LZo?!_55<17(=B-5G51N{t0g9$1t!lh5%eJbk z$UAUQ;Q>Div^!+@8#V6r9hN09IAaBGQww*@Sau}=VG6hrfmHK@FG<;+Cg+ihtXBXP zE(Fj+$rJv&NZ+4_=1{{e5w|iOE^F|d0rQD3%ORlyoOu4ASD;)Q3>Q*48^VJZ8Rl9| zNhybl-mUn`T{J|ZuDaddHR#*DXKl;}q~yr^K-AKeRT{Eq??EsjQb&7N!ucV*{dw1H zAh4!|V;y#OZT|FU*yso-4<1~A$minI)&h4e&Oz|$ZAI3)eVl}|v0 zaE})B(>_f#GFqC5M3PzDDkDtk5~-7&KHJ(=y0#8 zJdrvVZoBPXi5a(u)6=0|fYUh4vx0DT0#c;`j6jHDiQ?e0X|P3e@#TA1qVPVvyXP)VND=U%S(VA>`RZt z)%&|OrxgjO6D3UfAEDduB;R*PCxr9)7%v^H_?kcxTm0TjyGeVDgX7ZRk5*j3jKemhK?9~hrlX=4U0S3ugup+G zbdIH92tN~S`}=X3Jua!=?(efgN&>fFy|j=@Ave1MiD7)5@%cGCy%=*5RA zA|&|_{h*}U)S5+sKX*n2=RUXg&W&`9fy{Jb%cXiv2vLvi?(eQWdeZaiFUcrKL5TQ+ z%10~1|5cI=fTsaR%?+!2Bc1n(__14^;I7)dL zCAoU}LDos;}bdc)Q!jc_{3hYr_?pt?dJ8LmCQtI7Z9uE$L5h zml2p|N(8>~g#b^9oPXMiD-@Y7efHpUV+pZPD3ZzS0kx% z!pMpkVgheynN3NL#AWf`5DF+0u|of(-UnqI5Ryc3$ZN~^@0ygBv5*IH_N%t}?I>Fg zj8uovj~r5FDGj7G$@fe4jx$p<4u4BKd|$u%oMyUJ4Mts<H)Oa}oZ~5zU5%NyUg+IEdO+ULNm+F3ll_g@U6MrJ~)+j4(Y7 z?n%_nNB26AMf47u(p9CUStO`LZrL?DhhFiP^Wn4zh{A-qu~;lpZ4@i7yoobHBT`#U z2g(#xj(nt@t6vF#gzz%#JRwjNHGjU}U?GIdYGBurMnortYoS&S#AM4A%*>L2JVGX_=F%$48jSSq=|%DHah$(uFnQiAnLIc&TuFh^t7w@D~9j85|1UE#4{m z>Kt-cPs0I5=T*Kwsc#4OL;E`$E4M_rgvk9Xl#)|!47E(Xqx<9h$i^5Z@t`TXen7Q9 zE#I_{7}Nu5S5WPr4R78s?v9o`pZLJi08EM0on^{l$n|5IK&nID{+aL!XeCs+9E$9G zDjlWZ3a}l3a7$40+YstR3H8;K>HVK*Jt5G^URmQMBp|r&>uqN1NT$&6E= z+!Gr3@orx_|BlP*y%TE8^5_Xy>Sdwgi8SQ2-_!M}+hs`{(~WeQGHvLR?AaNSE;sU7 zKXTM_0K@IIsPMGBPq$s|g?DAXb>lW3@vIfHlsd7F4Ph40a*k z$(xG2aXi@CZ%Epn3)D#GJRQ&TC>o3l@d1?2a|rAqaAw$-3po%&$U9HxVAft%Wi`DU zmcgB*;F}qbW}I(JUVN~iNeC+^uREuB5+Nf$BP`p&XOZdO-ILvVLgB8mX)FA(9lgf) z(N2cp8p^HQf*@xT7}v;viUAFIpA!X*4ptH1dI21)08RmQeoq8?EzxZ5IW&j-u3kn6 zwlG+RSiqgiTRGnCN3`pmfd>Xq_tJr8zfi0yt&yHkjQWfY?=Uu50hvTe?njEC+`K?d{iK-wJqakc8^U;1* z63XEv`gq*FGc_C`C{@5)WIX@24VJQExsL-^l+s?nEoc>MD6?BL$&XouH3alVwV~t8 zOUq$VOR8-PVEENgxYtRk%|{cwtSmo3dsZy=`_cGXRkrqq-Rcb>=Ee}#kk*_O`~#%i zITH?N*faK+QmFPPc$Hj+q%BJI9w5O58EK-r!OK?rn^8OB!mzJ0Wt&zO;%lnjF0Xo*>fynXcKz@1<&xdGkF?-3rl1gp<7`ex!XZpd( zT0?5qFD2$-WVJ88re$2BOco&89#hH%lY2Gh-fp_R43ZB?v*M!ksMC`wuy;zvNP-+J z+UtSFOqzM`5_g$3UHdR;y%8UekLiz!&_0Y|UmsT%k{oz_hy_VTN3V12x52puOe%+) z@6UAV$sEa=;q<5H9DJo|-q_GLF$^9vbzVqSy zYn-twoV}3<7rd&b4VOO!{+^BwAWo&cQ0TRbFkMt9Tssd5mkuGE?-&vgq$=37U8A^+Gs!avt>LP?wDs(2aa>glbssloW}$PIqw|bv%(>b41qI zOBO3}(m7QNF?hT-D>dxun&Yz?r%&`LMiJQt1pNwB8A57CC!sk>jnx-1eRf@Z34Z1U@=W^f*?lT4b zk{+emqKSQRev${S&BZV9QvGDH_ZDYAk*y2s=O0+i-QRYvmz{5kpKm?mUX?Q6zBZrH z=`rZ+-aGWTj{i&PUXT3k`4&%)!o~RMtF?_bT-~y_hcM6c>piM0E&qnw#KIZ}O2=%Y#B|Oiw zrFVau^<_@VUH%+sgOk7T^}?A2jT&$EBbVjYBIKcUZ>B6~>GBm~!d2W4d^02O{h5WK zH3#7WGKhXkm_?la{r*>;YVgfzjUZn;+vODW*R7Cc`+f6itQd`Se$ zCnK)B7{|Ip()3;LK z-scuWV?uVP&Uj;DIFG|nS1@`+S`U)GTOpQnBXXF8iKq_SBNUl?*fxj^a-1z4o3lUi z^In_Wt%NEar4J*V16|Radp-2=_fyRH~+tfuj;T$E#omE{;42c%_o&z}q z{3=P-ORIc;ozLXiPI7T10z15MXy-NsBLsX|UyTS2*Kq)d2fjfV8Erwr>KfQq~kR))_pRxLOFyfOApY)U= zAM0v0AR?2!9yv8(Cj|d@2EL^1_A;)7V8gVzm3Hi$lCxE};+&i=23>Zlm7W>nEJa zp1=wqN|qATkVD99!{@!(PBt{r=kR@uH}f8z#mZ^(xSctMej^2hdJOgC z*~2O5U!MgG)Af) zgs`2-gEuC61sKu@`SpZi%soj^`L%o1aQ?dBVyMSwQ!3DUB}$T&%!6dQ-s3sm^Gx`_ zGn|2@$fXC5^Y7rX*G!?!P_2@B5g@XI^PwBOx+u?!eX4L5ax_8etj1q@Fd za5e_BolO%j@lckJY4X$=Cz;+4zF-WxfZU@7RykjzXso%Ni1 z)RjHP1JLc^BB?-okCLHXEZ*w}dOxXLH}3Y$mRJ&JJ^!$O>uSX@AFe~1vxOOfnscRJ zg)KqPPn+ZUVSE~3@YqqpQXR-q*Y+O9H(wX5R`Tmo&-4=& z{XFnq%R28m_aln7BZEo8=b4FSmu_91>gKZ}<%S&^Z&3z}{G8EWQ*?X$(h*K*}K#er#OziR02>uA;7L*kGKaFfz_YS z;`X>w&kAnj*zep$LGZvX2@$*OdXTQZ7D@L(`H6%4kvqv#5Kzpd$)prm*ZBZ5Zp`vA z=%Z9MxAiB$2JHlu>*3ZLdVHa5OBxSR=+Wh=Irv$D6tt}y=~>{Uf|(G%u10;}^~)TX zON86JA_I-Sl+bY+794M!a^cn@$n+Eh>a8b#w?bdsKPj@Dy&?^|=k<8^fk#rv{j{*# z-=?HND7`(ubiLFYBu6AXFsB^hO#G^+FV&AbJ0E8tTFX zr#_M@C7K-f!Dq%y;IT75*z2e>{tlO>UMNOYuwBKelV?Ug41O)dimRGG>Cl}^(q5=H z&j@+qL8SIm%hZg0P*N_tSO4(*3`t#t4$s*Lfs<#Zw_aGa&z4Y3X61o|R*sS_8Sc~4 zT934UO~64Vu3$4)q$-N?INtkt24mStGDPu3F3ANz(JEo{8j)}Qh4R@ zm=+L2_NGe|5_Axm?0oXl?;IbU6P#35&KZMzRD4KB&#PB z@GJ{Oi1sL+fjH>O66)W*Gy-c-j8L2mDIr%t@GT?QL~$nE2j7qe0SoXngJKb}Lx#es zur(I1#Qp;^VpzJa%~z59Zc}L_(DOO^JqP|J5iFRM%T4I~7*q8)(L{R5P?28q-1}LB z%}s-QmisFvFjpH(%5uxqP01uLx!KX(g=Cq4sc zL&cyAS=91Z1aN#o2;4dpcyf_fmPd!=LviQ!RD)Z`6->~O^yLxJ;3~)|(>**r<(EFH zgmKC;bQ}o@G5p`twc{15=I6j*6hIp>2ZhtQ7ZU44?*@s^YtFA5!{)rQ;abxdco_GL zoQKZyi>DclEk8O?d;3XsX&!j}IE_}M@)!}R3-<9g5a4h*z@^({Q|3p)q?JffhPXh| zL|$_VDs{hC0Vmbl0OlOW45w~_WYt_@U%I#{taU(DI{{r13ER2L5R{jv9-m5tD%PJ+ zx34^B@=@;##Du|qZu4-pghRH9>-K}OY3(0XgU{nIrRU$dgPv$gUd@OlB#{idkcVq_ zUmhAW`ao}guisvaxjt#M2bpfqy2_j2C!i%hOn&-svK=kH`e0&I#I#j6|EvG{R&G5{ zuO^a9=+@WIvBKE>509uH2q|)&Xc1ZO`;}EeyMQIlR9x2rJjp>{Ma{l75__xpGEzDx zKRV0!ui*z1qxDRKWAd+GJ{u?%j!+gGu{xM{qziq=7&&v_JK&Oh{9^U(mM<=^<67Pc zFz zP2gHp+u_BlRa@^kk9$Tkq0o^#p_eo`2>A=m3kL%YS2Q{+Y*A)4Ir# zaW|+#Gx{Icnj`csM;dw~DX%(MGVI-7*&5V>hB+P*(n8{&TCcJlE@GQ=yf9pvJ(jLt z>;AjPG`ZPZXczJbqc(CdkfSi|gax`u%}9#iv1g26+m z4j*4vawYh2xb%^1bAS0d8VdRltjf>)T2Zq-Q+G-I2&_+vu*N4;zyyKj|&?_%JB;u{-EO!a?>| zi`Iw!5q3|@&pr+GU0Pe3?EkTK#(8sNeRY0cBj-fbR;h0N2P(Ad{wZBUjGDM%U0C-p z4JvkgJe1GIbtkOHrDP&p^xDis1U_8;W28*VA6}@B(L27Ak1>QU`N>$dKeXT_lkr+> zGm{AXj3rc<3$+@{l9i%O@{ z|L{U}DeRiGr5tnWixrP>T@&s|Ctwx@xSjeweeWE zKjKop@S7HVhph?!T?_89_?yRcM+<(f7o+#C?#QGH{IL@5;Mcx>DfLY*H{&){9`F)t zNL-f?Z};K`Gd^2c%c+$sr>Cmrr_oqm3;#c8!T%4jHK(pGe1A=e*t7P#7wU~`@x8U* z*_!{^3-#>(K3j94>et%X^{>BvjQ^npzr4LMQ(U#Z`K96O|B@D5Uxo?OTnfgSQ?%j7 zgH;iT} zj_$1TUt@p%0LOoi1;I(1Wt#lwjz69~cBJ1Oe}um|sjtqB{e_c6iJASzN$S4FpYE&s zi<8=&kMkcnNwFipA)mq;jK4Ukf3M}0?;h+psek7re|J)wH!>&w>iGLV!%5zBxs#Ztm3bL*n{NEkC!`dEY*L+Q$Cy zYa1p{IaQ2?j#H7ce*AK5(US2HT;M0lf2`%whrv6wd{D;mH^<-q)mq*>8cO4zv-wl( z&%gL_>H_@0IGBHD=@Tii3|6=Lot-@srY6|~?$krA;$sq15{Ym=n zr+?NvMn7cyb?MFSyB{@!DB1l@NB*n$+|JVfP4DRS=cPaN4E%?VT>0g`-)qb>wo`{gYG~b&X5A|e>h$EW~%;OiTR5`mf+-g%H)p{BlYdav7&z{F^Z@A)SQoH zk@L+Szp48(g>P0IEtbLirxH`=JlVIfQUBM|<@)?s+j*toETyXd)zd}j=@Vq5uU^bS ztN!ex4|Y0*#;fyxDKYy#gf28UQo`DAE1eyD*tGh)LALeA#PUaP1*zhl)8)t4@kcRi ziDPG)^%h1RTbPfm=S+Uv=@fE~wYC1*+UXQNJpIie3oCu~-hu~2BnKmQPiF_C)n|52 z7bE$JKTa1;ML!&E`<}q~x#Rbw|0|t>MuByt(9}erD3^1p?rz7O+`zwe3Za&P@(D@Y zvke8LV|&;6(#{&4c>niK!O^71-zELL_{h{vCmL}`IcZ4+Arksu>J+jy5fVuQdh{Ih z0-NJQw!i9r1RwYrW4`j?+T45<>oY&z-e@@~14q&`U`naGGg_?>Wf8@MQQo$Grj8&uTQ2776Q}|CPd=I%H zDoWRR#XU{>Ld5v%0cfxuzt z2B_&AIYjgu1%7a(9g@BjdOd{1Ia++826HuY7ZhTNyzu)@<_-*Faee@T*<_|^^hUv8h6mz zWFES&q@5tU9A{(>K$97v)w55}J3&DF-gH2SA{p-TNc!!PNlyxFm>yB*1femYoW#oW zY%+-7nW8H&Yza@;%Ex17wFI@3;2K`}V%)vG!X3-WCC-_mC&&>$jt&xL&Yd+-b2fz6 zoP+WR;*ldWkN0oW@48qAzwKFnysu~_6-dtKq0s?OpQS|H5NrSS5~y|=1*PJX2`A#s z?(4F^PFqDHdvIO5y;+IzTP~2iH&juzn)5hDjyR}EeV6tA*w?WW4?9fRgXZBiAYO_L z5-jxOLM3nJ7lR@_0W+cojfMI8z|pb5gg4rRYyeS!AiPTrFu-bzupom|XYMh5^lAQM zXsLow-|)D?exydK-hu8ekmla!U^dXsSFzC@R5Dcb&Dv3$Eg7QFgD)O22QU}YL#p?@ z%<}aqmVVkWkP1H$@w!d{DkG$Cq=!!m8c-C2m>K79=NI{)^-6_rA?_Iksjd(@C z#i|pc`EJD-{&IeO>1RG#Y+QcyTH0}^$~b1&OZ;10hFzoPtHP@lMThUOg5g8l(9cdq z7XHGIQl|lLEBQPw4FXJ9246IwTn3j(gWx)ola8DKV?q^P_OORZaG!_nc)6t)g5C!} zK0g(0gGNW;5H?*|J_5ljYq#br}#FW=B z2P9saK5^0KwTI?;#ffq?Njr5K-@kCSeC{!3wha{tAGLz1i>x;^8b;fZz~ z(Szfw%a*0-r$mk`yfHos7pPEqQA+EcK2cDg5)t<;pJv3ThS z?ACB3D({(R`kt1KiVFeH9ysxCja=RLSdO2d^t=w<-uml~43cZi`tu2^g2lSe8(-eM z7;kO>8SeQ=4N`i9e0!|8Xu%u3Z{Q_|NL8z9otyn(VS66-I{S&Nfr*Q0)3vg9U|DOm7c|A8}Z0pe94$$^1%h+EeB559cbK zZfi-nJbih8G3S!j@N#;czx>fJwTGf-+D+6t_p})(+x~LiP;+j(i|>VM zCKw$;f3fb~l$iJgWbG@`bMo?`vBgf=<9DdNP=YGr&UxpC`4W$dB3jfoi)+koK>Dt!e&$!9nz+}~~J zG^vd}{;Zh$o3Pr}aC_+0Cy@X#&xTlI}!pE5qIdS8`C zyn4Fbr~S)Q@S+Z4Wovu&t;9|5MYu}I-Yc`m*n4FdO&u8`TH@;ftU&mk7;q2yGWd_7Fp zmAZE%NLuxxp{u_@u>S$O9V{Z)+=OOn7w+O0?shOtS3cCv&zEg{i%X7bVnRLWM;$GO zNfSV>jS)9TB3`S5@T4#nD}3jv#+&@CcF+ELd1D6_ zIwUgqQX^M#gZAl}uz5ykd=Gk;3B+MIc5pp*NHA`g8219XYd>xmHx9yP#K{AKnoB!L z1(-%sWVm36(M+%@GQ4*L_HrbCaXo%XFk$B*|4J}~UClY}hn_^nhM9y$ng|%D#?KKs zTgk!S{OHKE-zj`k$p$7)mLSlK=Xar_>q(KnX2M0*69mu2steMfZgkAwDEv3Emnn&Y zvk5Qma*LP7$J^1f=}CX3@MmKm6C?+huuA@kQ>kH>O)e5lL%NYWvu;TTnr{Dv!uLZs%np_CqttQRMO8;efmxI^6wNrStI8XF?J?6=YzuS zy4f6LIs?^52htP4WnxgXh(wJ{PWxQ`fZUfOjLNiZCU@=`p>RZWu9Rk;OthqMbhZd6 z{k=ji-Yj?bW*$K}AMnom-kZBSSWvk+O;RymYcyZmUQ}(9F+Y(@f0H-7k#CY-V3y9S zJDTxhHWjRqYusF5ZC_|(mjAssOHr7vp_pp3Sx8AQu$wEG<0w4coPRF8$i0lwLBiD# zik?K_*M$m=nv1Rl6eCBAz{^E_z3HEuvMQ#Mw_P@Z64$=a+!W-$32;oV+Uy>pe))m4acCZ9t!a2(n{&dfkn zZ=$No+^hdb07w_N=pY-i|ADcCXss(Du_BwYDtjL=Pz1qHviKUS<65xsy0SL^d$Yyi zvM~Fw`5F=^i?cu*v}B>P0_&P5>#@Hov`V|QIYG20+o?#~tV}z#R7(&}8?)CMu~Dn7 zRok^ed$l|(v+qi?AM3SdE3{y{u{{eCKYO-ri?eBKs%!fZZTq%%o3e4+u5;@Vb$hpd zTd{dNurnK+wEDM(OR+8cvPBE1y}Gshdbp5#t|5@PJFB*0tF>gCw2`~H^BTBD3$>XG zmXN@?q$}=JzKGfd z-HWwHo4nXdzU-T(=4-s@3%b-hq3hee_FJXz8@(LsvyNN8_xry;+70>Zy|UZC0DQnm zD!}I(wpq)&^V_2Si@*|Gq5~Yi$os&B8o?91!53P=44l3ge7zhz!XxUzmfOJmtG*<> z!WL@6x{JbWYrrf#!wBladb_(R9KzyD!#up7HY~VftHa9M!$ka^Km55DOs5%K#7r!n zM~u1!tinw^#nk!4(mS|HEW%X0#l~sH`y0bj%*A4Sn_j%P9kH((JjQBVnPrT_U@XLG zyvA}|m~9NhbSuMioX3+{#~_@=JFLfmOqhHuzkWQ#fqck#Imk)>e5Qts$c}uLiu}EB z{K%ACmXRFAlU&J~ER~l$y@eddn;gm~*~wVU$e=9BsN9gG9K@fT%C4M}tBkps{K~XE zlCkW-tz65xypXq?!@0c6z)X+5{Kmf=%*OnV!+ghKe9X*DjmgZnc-+j=42{ox#?d^@ z*sLYh%n@ju&EBld3w*+TY|7tU&Q`L`8_~_?yv`Vc&T~r6>>SS&;?8DDs9G$~`1~OB z?5MMx&;AS``}~&u{Lciv9{~-V#az$|eIEzSrS|;C3mwt)(a?ihq)RN(7%d+Z{cs7L z(H;#L8!Zv){Lv)+7a=`M*Id#nJrySn5hK0QFdY>w{SYtz9n&^_6Ep1)HGR`Otq?ig z5Ifz|LR}C)?GHgc)JR>_M~&1%t<)*a)I;sm%M8^?9nLR2&JW$lR9)5Z`@w_^&s)va z`fJAA{M2D>(^5UqXMNLY-O+0u(`^mbZ_UDP*_(2NbKj{VpK9ogkN*_7?imQBBztwpMBAx?a`w> z*rhGerw!4m9nq_;)vdkIuZ`KUUCy&z+p2BYNG!>TjobKq+rACix(&siEZ4!U*~5Ls ztZdxJ{oBd?#mcPK%^lCcJ>B_S-PSG7*qz<%yxrXYZO-2P-QOJE;+@UpUEb4t-ss)T z>b>5_+}`dT%;nC`kl)B-QS}8-vGYJ0zTlCT;K-&$OxX`h`ito z9>@;<;CdY45J-r_9$ z;xJCaGCt!RT;n!A!8o4d2)yGw4!}PCHWItoIbCf{^{u&>Y{$Gq+aUWdg`bSt*XB2&f4m(F08N~>$p1Wv~H`me(S87 z>$)zgyx!}e`s=_xslq<&iCXN&eyGTv?035C%)X}1{_O7i++>~3W-aZT8|~QsrP{vj z_)G0>9NoR`?S|X!zsL}O@owwy9PghV|MC18 z@**FfBwzAIobkNP+$gWKC*Shp`SLLTAD%Kl^VV7OHebamkICbn^NYUo#l7)9AGA0> z^j!?}r7ZMDU+74`+)BUnh~D&w&GJz1=u(f%RA2Rxe)Ybb^;%EqTu;ni|Miz1_ToPF zWUuLFkJr+j_6l3{Zhx9^ANQp>_jGTWc7OMjnfH2sn0(*&biDQh-}ZnnuYX_od3pGV zf0v5C_;uO%jt|L#U)BU4`P~ZnmQR+LpZQ|B`JB(mlW*;n|M{;T`tSVoq|fT6U(Xd! z+o>O_p8xtg8T+!IleAy^H+lQG56i31&pp5UzTW!+{rkWl?81-GXkYx4s{6`MlFZ-y zs(kzo-TKg9>(XD*ja~h>`uy7epOD<&{RjE|;6IPzKmPGp{^pO&*U!<(pZ?R{@K;Uo z+3)_#zy2b9?eq`r_K(t|pZ{tK5M6xD^5u&evuz32EnFwhp*(*PB~GMR(c;91@))j5 zxb0v;f?fWNBw5nrNt7v7u4LKLN`s(4|)vssY-u-*O;dM85Sbq9%)z;I)=ilG|e*gna zNxt3cv(G&Is`E%V%LH`rK?oz1Fg60|gXqAi&eN~K2|M)gLl8p@t3v1u#L%t;(c@4= z7F%@jMHmOvYBBLlJFy}S7rZV;9((lh$Nxwi&%_%eb7(Rh_XBcCCYyA!I3aB;QlcCy zi~Mz1WRq1^r($nBc3Ehn zl~zWkMw_ohJV%Xm%v-1R_FG|_J@Hv^%QY8QaWN$KTz1?2G~EN&b$4ER6NT44dF$18 z-!$(nFJFEG7Whejr2}|igcFXj;NA{a_+f}WWcW0PBewWr04MhCNZ<~26=RS?=1=3t zDi--KJIXxg|C)2q zgZ5l-#zQxK=+QIh_H@>xPTg&nHE(@((d*9b?ng0){pZzb_dVy=k(B*+;!h6#?cR%5 zKIG$LPkwpmBW|8|q@%YU;pwj~{d(@*&3^Ij!{_(=$;CH+Uh;=OfBkRMf1Z8#rFFmh z`0G!0{1(hBp|8;xV-`<@KOg>G9e0Gh$RNTP=<4g;S6nfrW)Q*hg!J__OxjrgJ>9#M%QO5zfoIH4v!QHq#Sp@m9V!6{}@LQ&LW7vIB0FNRS)U=(8+ zw?jrVrg1rFRAU>B!$vp8aW-(2V;x;XM?2<`GkDZvA1}j4KL)Z(Rm9d6JD5K~CbBt% zBq0m2s7OdIvRb#Zq9YIKzer|sFo4u#C%MK*DRFRj+Vf;7HwnrO`s$IUWF=cT*-BUr z<(09drB7yAOI+sUmbv65O?KH!V7}y+!6c?ihFMHxmgJbpOy)9~Sxjd>GnmnoW-qD# zSedmoZ&R(ILYZrbDmR^=~QPV+1XA-z7w8;l;=GCSWkQE5uf?A z<39N*M}Pj)jR6&?8VOoZGaeM7VpQltz1UEPY7wFlo#I3(`b3Lfbcq?&=n*;E(II{m zq(2ntNOxFLlim=eDV^a;S^7ekz7&Nql_>~mTGI>O6sHc<=}r~cQ=b+Ps6hqbP>H%f zqaM|MNmXk6nA%kKJr%0wlj>B(S5>Qa535+Rg4YZ*}9V*!*O5weav=}w*1XX#-0j}1Fqm?c2T5HHr zg0Hqxr0s5H)?44E47kBH-fbns9Rl#T^G9w)9!Y$ z$6Rf}7J=UVV0XzIF!P>wU+GnEz1Z7c^no{T;)QQZ<%?S6d9c1Yz3*$0tG@m6cajkd zrGHba-vBr8y$LSYf*IVt{u;}<4_0u5oi$+!C(ywej@E`byetoY_AisQ9n7CY6&_=0haqdMb%)!4>V#j(M3yyL6(xM4pAvQ~vmF(McLxvNLs*nd^4 z8|El8xnxO3b6VBRGAARr&CtX0 zoI6!#J6j6Rd8X8!`MfAU|2c$XUSV_3ENHO4*=B4;H0T1|Xg@s~(sYvaq~%m;OZQ9B zJ&QA#GcDIm8|~1aW^1UO#%NL(4%4aT(yCd_LsM6+)0lkqUSsVwRM%Ruw=UbLb?s?7;cj*~W%;;$|J&Y4few*=|&|BX{lbavQSV*4?ma zE$+sWyM^e^+P2v(B6q(VMDdol#MGVZdW-hn($4p|30+A;gS+4V+TM4|NICGw5i%pL zHM7BWi}21Vyv!HI&%q(?@3&A=f()m)!;^dQ$x1wx4Tqh@K^`^|EgZKcfB4BQEAlRr z{LANddD2oo@e)_O1xGV zn2)(d{()qlQE&2~w=OdL4*0+eUhD^+1v8+|`s_;M^^DK|yyj=0-`ba98@Qj@8FWu0 z(wE-!zjxkS%0T?$3&(C2`nls#-)Ux)VHVI&88FfyeZb><`D#a=UYIclPH-P(nXrTT zr{DC)Y@YL5*AX)e!3Xu9%nyWce@Cu`_V_zJ^7}GWxV`+_7cM}BjEkVyqY9E>KI$=u zF4(>Uyck;$18G=5E>k|udqBA>yn^5aEvUeY!GcUchNcKV@tX(_96$w05@uk7IUqrZ zu>&{AKx7!bLJL3v{J>5th%%T#8bla7I0Ik+1`SNU>gz!HJGE^Qj)GtVEug?5+!riZ zgCop173@70Y(Y5dysXd)Nf3iB@B>Z&hAKQ4V8{gjKM(^ka0FjK!X#9}(~(gk_H#gh!+gQUz`PEJjP^PMrw2fimb^0iZld_%t(#gNRI4CkNilG3`vn3Ns=r{ zlRQb3Oi7hoNtSF$mwZW>j7gcCNt&!lo4iS!%t@Wx$(yuDpQOfR+(%XLM}Zv3%Tq`I zwY+q_NO%+1+)nQ7PVf9q@C;A!98dBrPxCxa^h{6ngwB90O3wt&w(Jd*fXmV} z&C^s)U|diA+)w`OPyhT+01Z$99Z&)-Py;8;&fHKH_06?(P|ys`;UrEcOc0mJLktCp70po{ z-BBLxQ6K$LAPrI>9a17KQX@T5Bu!E!T~a1(QYU>;SA5M^ece}n?N@*OSAY#zfgMND13Wr=yE*JVXmiJe&gimh0Sy;zLRSdHCSj_p{F{aBC|UqAgmZJz9i~*%wX9ctutf zTe#Fy+Q>s%s;ye9y;`izTCLq$uI*Z{tyrm5TBw~_selQfgW9l7TeV$VwryLteOtJV zTe+QEx~*Hgy<5D^TfN;|zU^DT{ae5dT)`b&!Yy3GP1~_0TbgwVl0)0WeO$y%Cs= z&0g)@UheH)@BLoz4PWscU-B(q^F3emO<(oJ-aM&^m0MrZVHR#-7k*(Fj$s*|VH&Ps71jwG&S4$iVIJ;bAO2w= z4q_o5Vj>1&o+x4@PGTiqVkT~4Cw^imj$#=02`R2(E52eZ&SEXzVlM7txT9h(4r4JM wV=^vdGd^P@G%nkpNMkl`V>f`ZMJkDc1-eW12iGTnAI}E}iX8-^I diff --git a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist index 9625e105..7c569640 100644 --- a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist +++ b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist @@ -21,6 +21,6 @@ CFBundleVersion 1.0 MinimumOSVersion - 11.0 + 12.0 diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj index d030af56..a50a737e 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj @@ -168,7 +168,7 @@ 97C146E61CF9000F007C117D /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 1300; + LastUpgradeCheck = 1510; ORGANIZATIONNAME = ""; TargetAttributes = { 331C8080294A63A400263BE5 = { @@ -344,7 +344,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; @@ -472,7 +472,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = iphoneos; @@ -521,7 +521,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme index e42adcb3..8e3ca5df 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -1,6 +1,6 @@ { - HomeScreenCubit() : super(const HomeScreenState()); + HomeScreenCubit() : super(const HomeScreenState()) { + _updateChain(); + } + + RunnableSequence? chain; - void onClientTypeChanged(final ClientType clientType) { - emit(state.copyWith(clientType: clientType, response: '')); + void onProviderChanged(final Provider provider) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + provider: provider, + response: '', + ), + ); + _updateChain(); } - void onOpenAIKeyChanged(final String openAIKey) { - emit(state.copyWith(openAIKey: openAIKey)); + void onModelChanged(final String model) { + final newModel = { + ...state.model, + state.provider: model, + }; + emit(state.copyWith(model: newModel)); + _updateChain(); } - void onLocalUrlChanged(final String localUrl) { - emit(state.copyWith(localUrl: localUrl)); + void onApiKeyChanged(final String apiKey) { + final newApiKey = { + ...state.apiKey, + state.provider: apiKey, + }; + emit(state.copyWith(apiKey: newApiKey)); + _updateChain(); + } + + void onBaseUrlChanged(final String baseUrl) { + final newBaseUrl = { + ...state.baseUrl, + state.provider: baseUrl, + }; + emit(state.copyWith(baseUrl: newBaseUrl)); + _updateChain(); } void onQueryChanged(final String query) { @@ -27,68 +62,106 @@ class HomeScreenCubit extends Cubit { } Future onSubmitPressed() async { - final config = _getClientConfig(); - if (config == null) { - return; - } - final (apiKey, baseUrl) = config; + if (!_validateInput()) return; + emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); - final query = state.query; - if (query == null || query.isEmpty) { + assert(chain != null); + final stream = chain!.stream(state.query).handleError(_onErrorGenerating); + await for (final result in stream) { emit( state.copyWith( status: HomeScreenStatus.idle, - error: HomeScreenError.queryEmpty, + response: (state.response) + result, ), ); - return; } + } - emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); + bool _validateInput() { + final provider = state.provider; + if (provider.isRemote && (state.apiKey[provider] ?? '').isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.apiKeyEmpty, + ), + ); + return false; + } - final llm = ChatOpenAI( - apiKey: apiKey, - baseUrl: baseUrl ?? '', - ); + if (state.query.isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.queryEmpty, + ), + ); + return false; + } - final result = await llm([ChatMessage.humanText(query)]); - emit( - state.copyWith( - status: HomeScreenStatus.idle, - response: result.content.trim(), - ), - ); + return true; } - (String? apiKey, String? baseUrl)? _getClientConfig() { - final clientType = state.clientType; + void _updateChain() { + try { + final provider = state.provider; + final model = state.model; + final apiKey = state.apiKey; - if (clientType == ClientType.openAI) { - final openAIKey = state.openAIKey; - if (openAIKey == null || openAIKey.isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.openAIKeyEmpty, + final chatModel = switch (provider) { + Provider.googleAI => ChatGoogleGenerativeAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: model[provider] ?? provider.defaultModel, + ), + ), + Provider.mistral => ChatMistralAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatMistralAIOptions( + model: model[provider] ?? provider.defaultModel, + ), + ), + Provider.openAI => ChatOpenAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatOpenAIOptions( + model: model[provider] ?? provider.defaultModel, + ), ), - ); - return null; - } - - return (openAIKey, null); - } else { - final localUrl = state.localUrl; - if (localUrl == null || localUrl.isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.localUrlEmpty, + Provider.ollama => ChatOllama( + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatOllamaOptions( + model: model[provider] ?? provider.defaultModel, + ), ), - ); - return null; - } + } as BaseChatModel; - return (null, localUrl); + chain?.close(); + chain = Runnable.getMapFromInput('query') + .pipe( + ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'Your are a helpful assistant. Reply to the user using Markdown.', + ), + (ChatMessageType.human, '{query}'), + ]), + ) + .pipe(chatModel) + .pipe(const StringOutputParser()); + } catch (_) { + // Ignore invalid base URL exceptions } } + + void _onErrorGenerating(final Object error) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.generationError, + ), + ); + } } diff --git a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart index d76e34dd..c5a95466 100644 --- a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart +++ b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart @@ -6,36 +6,40 @@ class HomeScreenState extends Equatable { const HomeScreenState({ this.status = HomeScreenStatus.idle, this.error, - this.clientType = ClientType.openAI, - this.openAIKey, - this.localUrl, - this.query, - this.response, + this.provider = Provider.ollama, + this.model = const {}, + this.apiKey = const {}, + this.baseUrl = const {}, + this.query = '', + this.response = '', }); final HomeScreenStatus status; final HomeScreenError? error; - final ClientType clientType; - final String? openAIKey; - final String? localUrl; - final String? query; - final String? response; + final Provider provider; + final Map model; + final Map apiKey; + final Map baseUrl; + final String query; + final String response; HomeScreenState copyWith({ final HomeScreenStatus? status, final HomeScreenError? error, - final ClientType? clientType, - final String? openAIKey, - final String? localUrl, + final Provider? provider, + final Map? model, + final Map? apiKey, + final Map? baseUrl, final String? query, final String? response, }) { return HomeScreenState( status: status ?? this.status, error: error, - clientType: clientType ?? this.clientType, - openAIKey: openAIKey ?? this.openAIKey, - localUrl: localUrl ?? this.localUrl, + provider: provider ?? this.provider, + model: model ?? this.model, + apiKey: apiKey ?? this.apiKey, + baseUrl: baseUrl ?? this.baseUrl, query: query ?? this.query, response: response ?? this.response, ); @@ -45,9 +49,10 @@ class HomeScreenState extends Equatable { List get props => [ status, error, - clientType, - openAIKey, - localUrl, + provider, + model, + apiKey, + baseUrl, query, response, ]; @@ -59,12 +64,9 @@ enum HomeScreenStatus { } enum HomeScreenError { - openAIKeyEmpty, - localUrlEmpty, + modelEmpty, + apiKeyEmpty, + baseUrlEmpty, queryEmpty, -} - -enum ClientType { - openAI, - local, + generationError, } diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart new file mode 100644 index 00000000..c92b87af --- /dev/null +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -0,0 +1,40 @@ +// ignore_for_file: public_member_api_docs + +enum Provider { + googleAI( + name: 'GoogleAI', + defaultModel: 'gemini-1.5-pro', + defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta', + isRemote: true, + ), + mistral( + name: 'Mistral', + defaultModel: 'mistral-small', + defaultBaseUrl: 'https://api.mistral.ai/v1', + isRemote: true, + ), + openAI( + name: 'OpenAI', + defaultModel: 'gpt-4o', + defaultBaseUrl: 'https://api.openai.com/v1', + isRemote: true, + ), + ollama( + name: 'Ollama', + defaultModel: 'llama3', + defaultBaseUrl: 'http://localhost:11434/api', + isRemote: false, + ); + + const Provider({ + required this.name, + required this.defaultModel, + required this.defaultBaseUrl, + required this.isRemote, + }); + + final String name; + final String defaultModel; + final String defaultBaseUrl; + final bool isRemote; +} diff --git a/examples/hello_world_flutter/lib/home/home_screen.dart b/examples/hello_world_flutter/lib/home/home_screen.dart index 2b46a017..5b117845 100644 --- a/examples/hello_world_flutter/lib/home/home_screen.dart +++ b/examples/hello_world_flutter/lib/home/home_screen.dart @@ -1,8 +1,10 @@ // ignore_for_file: public_member_api_docs import 'package:flutter/material.dart'; import 'package:flutter_bloc/flutter_bloc.dart'; +import 'package:flutter_markdown/flutter_markdown.dart'; import 'bloc/home_screen_cubit.dart'; +import 'bloc/providers.dart'; class HomeScreen extends StatelessWidget { const HomeScreen({super.key}); @@ -27,10 +29,7 @@ class _Scaffold extends StatelessWidget { backgroundColor: theme.colorScheme.inversePrimary, title: const Text('🦜️🔗 LangChain.dart'), ), - body: const Padding( - padding: EdgeInsets.all(16), - child: _Body(), - ), + body: const _Body(), ); } } @@ -38,146 +37,203 @@ class _Scaffold extends StatelessWidget { class _Body extends StatelessWidget { const _Body(); - @override - Widget build(final BuildContext context) { - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.clientType != current.clientType, - builder: (final context, final state) { - return Column( - mainAxisSize: MainAxisSize.min, - crossAxisAlignment: CrossAxisAlignment.start, - children: [ - _ClientTypeSelector(state.clientType), - const SizedBox(height: 16), - if (state.clientType == ClientType.openAI) - const _OpenAIKeyTextField() - else - const _LocalUrlTextField(), - const SizedBox(height: 16), - const _QueryTextField(), - const SizedBox(height: 16), - const _SubmitButton(), - const SizedBox(height: 12), - const Divider(), - const SizedBox(height: 16), - const _Response(), - ], - ); - }, - ); - } -} - -class _ClientTypeSelector extends StatelessWidget { - const _ClientTypeSelector(this.selected); - - final ClientType selected; - @override Widget build(final BuildContext context) { final cubit = context.read(); - return Center( - child: SegmentedButton( - segments: const >[ - ButtonSegment( - value: ClientType.openAI, - label: Text('OpenAI'), - icon: Icon(Icons.cloud_outlined), - ), - ButtonSegment( - value: ClientType.local, - label: Text('Local'), - icon: Icon(Icons.install_desktop_outlined), + return BlocListener( + listenWhen: (final previous, final current) => + previous.error != current.error, + listener: (final context, final state) { + if (state.error == HomeScreenError.generationError) { + ScaffoldMessenger.of(context).showSnackBar( + const SnackBar( + content: Text( + 'An error occurred while generating the response', + ), + ), + ); + } + }, + child: SingleChildScrollView( + child: Padding( + padding: const EdgeInsets.all(16), + child: Column( + mainAxisSize: MainAxisSize.min, + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + const _ProviderSelector(), + const SizedBox(height: 16), + Row( + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + Expanded(child: _ApiKeyTextField(cubit)), + const SizedBox(width: 16), + Expanded(child: _BaseUrlTextField(cubit)), + ], + ), + const SizedBox(height: 16), + _ModelTextField(cubit), + const SizedBox(height: 16), + _QueryTextField(cubit), + const SizedBox(height: 16), + const _SubmitButton(), + const SizedBox(height: 12), + const Divider(), + const SizedBox(height: 16), + const _Response(), + ], ), - ], - selected: {selected}, - onSelectionChanged: (final Set newSelection) { - cubit.onClientTypeChanged(newSelection.first); - }, + ), ), ); } } -class _OpenAIKeyTextField extends StatelessWidget { - const _OpenAIKeyTextField(); +class _ProviderSelector extends StatelessWidget { + const _ProviderSelector(); @override Widget build(final BuildContext context) { final cubit = context.read(); return BlocBuilder( buildWhen: (final previous, final current) => - previous.error != current.error, + previous.provider != current.provider, builder: (final context, final state) { - return TextField( - controller: TextEditingController(text: state.openAIKey), - decoration: InputDecoration( - prefixIcon: const Icon(Icons.password), - labelText: 'OpenAI API key', - filled: true, - errorText: state.error == HomeScreenError.openAIKeyEmpty - ? 'OpenAI API key cannot be empty' - : null, + return Center( + child: SegmentedButton( + segments: Provider.values + .map( + (final provider) => ButtonSegment( + value: provider, + label: Text(provider.name), + icon: Icon( + provider.isRemote + ? Icons.cloud_outlined + : Icons.install_desktop_outlined, + ), + ), + ) + .toList(), + selected: {state.provider}, + onSelectionChanged: (final Set newSelection) { + cubit.onProviderChanged(newSelection.first); + }, ), - obscureText: true, - onChanged: cubit.onOpenAIKeyChanged, ); }, ); } } -class _LocalUrlTextField extends StatelessWidget { - const _LocalUrlTextField(); +class _ModelTextField extends _BaseTextField { + const _ModelTextField(this.cubit); + + final HomeScreenCubit cubit; @override - Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.error != current.error, - builder: (final context, final state) { - return TextField( - controller: TextEditingController(text: state.localUrl), - decoration: InputDecoration( - prefixIcon: const Icon(Icons.link), - labelText: 'Local URL', - filled: true, - errorText: state.error == HomeScreenError.localUrlEmpty - ? 'Local URL cannot be empty' - : null, - ), - onChanged: cubit.onLocalUrlChanged, - ); - }, - ); - } + String get labelText => 'Model name'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.link; + + @override + HomeScreenError get errorType => HomeScreenError.modelEmpty; + + @override + String get errorText => 'Model name cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.model[state.provider] ?? state.provider.defaultModel; + + @override + void onTextChanged(final String value) => cubit.onModelChanged(value); } -class _QueryTextField extends StatelessWidget { - const _QueryTextField(); +class _ApiKeyTextField extends _BaseTextField { + const _ApiKeyTextField(this.cubit); + + final HomeScreenCubit cubit; @override - Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.error != current.error, - builder: (final context, final state) { - return TextField( - decoration: InputDecoration( - labelText: 'Enter question', - filled: true, - errorText: state.error == HomeScreenError.queryEmpty - ? 'Question cannot be empty' - : null, - ), - onChanged: cubit.onQueryChanged, - ); - }, - ); - } + String get labelText => 'API key'; + + @override + bool get obscureText => true; + + @override + IconData get prefixIcon => Icons.password; + + @override + HomeScreenError get errorType => HomeScreenError.apiKeyEmpty; + + @override + String get errorText => 'Api API key cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.apiKey[state.provider] ?? ''; + + @override + void onTextChanged(final String value) => cubit.onApiKeyChanged(value); +} + +class _BaseUrlTextField extends _BaseTextField { + const _BaseUrlTextField(this.cubit); + + final HomeScreenCubit cubit; + + @override + String get labelText => 'Base URL'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.language; + + @override + HomeScreenError get errorType => HomeScreenError.baseUrlEmpty; + + @override + String get errorText => 'Base URL cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.baseUrl[state.provider] ?? state.provider.defaultBaseUrl; + + @override + void onTextChanged(final String value) => cubit.onBaseUrlChanged(value); +} + +class _QueryTextField extends _BaseTextField { + const _QueryTextField(this.cubit); + + final HomeScreenCubit cubit; + + @override + String get labelText => 'Enter question'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.question_answer; + + @override + HomeScreenError get errorType => HomeScreenError.queryEmpty; + + @override + String get errorText => 'Question cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => ''; + + @override + void onTextChanged(final String value) => cubit.onQueryChanged(value); } class _SubmitButton extends StatelessWidget { @@ -211,7 +267,7 @@ class _Response extends StatelessWidget { return BlocBuilder( builder: (final context, final state) { final response = state.response; - if (response == null || response.isEmpty) { + if (response.isEmpty) { return const SizedBox.shrink(); } @@ -224,8 +280,10 @@ class _Response extends StatelessWidget { 'Response', style: theme.textTheme.headlineSmall, ), - SelectableText( - state.response ?? '', + Markdown( + data: state.response, + shrinkWrap: true, + padding: EdgeInsets.zero, ), ], ); @@ -233,3 +291,64 @@ class _Response extends StatelessWidget { ); } } + +abstract class _BaseTextField extends StatefulWidget { + const _BaseTextField(); + + String get labelText; + + bool get obscureText; + + IconData get prefixIcon; + + HomeScreenError get errorType; + + String get errorText; + + String onProviderChanged(final HomeScreenState state); + + void onTextChanged(final String value); + + @override + _BaseTextFieldState createState() => _BaseTextFieldState(); +} + +class _BaseTextFieldState extends State<_BaseTextField> { + late TextEditingController _controller; + + @override + void initState() { + super.initState(); + _controller = TextEditingController(); + } + + @override + Widget build(BuildContext context) { + return BlocBuilder( + buildWhen: (previous, current) => + previous.provider != current.provider || + previous.error != current.error, + builder: (context, state) { + _controller.text = widget.onProviderChanged(state); + return TextField( + controller: _controller, + obscureText: widget.obscureText, + decoration: InputDecoration( + prefixIcon: Icon(widget.prefixIcon), + labelText: widget.labelText, + filled: true, + errorText: + state.error == widget.errorType ? widget.errorText : null, + ), + onChanged: widget.onTextChanged, + ); + }, + ); + } + + @override + void dispose() { + _controller.dispose(); + super.dispose(); + } +} diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index ecb15bcc..a402383b 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -1,6 +1,22 @@ # Generated by pub # See https://dart.dev/tools/pub/glossary#lockfile packages: + _discoveryapis_commons: + dependency: transitive + description: + name: _discoveryapis_commons + sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + url: "https://pub.dev" + source: hosted + version: "1.0.6" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" async: dependency: transitive description: @@ -94,6 +110,14 @@ packages: url: "https://pub.dev" source: hosted version: "8.1.5" + flutter_markdown: + dependency: "direct main" + description: + name: flutter_markdown + sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + url: "https://pub.dev" + source: hosted + version: "0.6.23" freezed_annotation: dependency: transitive description: @@ -102,6 +126,46 @@ packages: url: "https://pub.dev" source: hosted version: "2.4.1" + gcloud: + dependency: transitive + description: + name: gcloud + sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + url: "https://pub.dev" + source: hosted + version: "0.8.12" + google_generative_ai: + dependency: transitive + description: + name: google_generative_ai + sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + url: "https://pub.dev" + source: hosted + version: "0.4.0" + google_identity_services_web: + dependency: transitive + description: + name: google_identity_services_web + sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + url: "https://pub.dev" + source: hosted + version: "0.3.1+1" + googleapis: + dependency: transitive + description: + name: googleapis + sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + url: "https://pub.dev" + source: hosted + version: "12.0.0" + googleapis_auth: + dependency: transitive + description: + name: googleapis_auth + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 + url: "https://pub.dev" + source: hosted + version: "1.6.0" http: dependency: transitive description: @@ -148,6 +212,27 @@ packages: relative: true source: path version: "0.3.1" + langchain_google: + dependency: "direct main" + description: + path: "../../packages/langchain_google" + relative: true + source: path + version: "0.5.0" + langchain_mistralai: + dependency: "direct main" + description: + path: "../../packages/langchain_mistralai" + relative: true + source: path + version: "0.2.0+1" + langchain_ollama: + dependency: "direct main" + description: + path: "../../packages/langchain_ollama" + relative: true + source: path + version: "0.2.1+1" langchain_openai: dependency: "direct main" description: @@ -163,6 +248,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.1" + markdown: + dependency: transitive + description: + name: markdown + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 + url: "https://pub.dev" + source: hosted + version: "7.2.2" material_color_utilities: dependency: transitive description: @@ -179,6 +272,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.12.0" + mistralai_dart: + dependency: "direct overridden" + description: + path: "../../packages/mistralai_dart" + relative: true + source: path + version: "0.0.3+1" nested: dependency: transitive description: @@ -187,6 +287,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + ollama_dart: + dependency: "direct overridden" + description: + path: "../../packages/ollama_dart" + relative: true + source: path + version: "0.1.0+1" openai_dart: dependency: "direct overridden" description: @@ -210,6 +317,14 @@ packages: url: "https://pub.dev" source: hosted version: "6.1.1" + retry: + dependency: transitive + description: + name: retry + sha256: "822e118d5b3aafed083109c72d5f484c6dc66707885e07c0fbcb8b986bba7efc" + url: "https://pub.dev" + source: hosted + version: "3.1.2" rxdart: dependency: transitive description: @@ -279,6 +394,13 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" + vertex_ai: + dependency: "direct overridden" + description: + path: "../../packages/vertex_ai" + relative: true + source: path + version: "0.1.0" web: dependency: transitive description: @@ -289,4 +411,4 @@ packages: version: "0.5.1" sdks: dart: ">=3.3.0 <4.0.0" - flutter: ">=1.16.0" + flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 9fc3a925..6d125283 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -11,7 +11,11 @@ dependencies: sdk: flutter equatable: ^2.0.5 flutter_bloc: ^8.1.5 + flutter_markdown: ^0.6.22 langchain: ^0.7.1 + langchain_google: ^0.5.0 + langchain_mistralai: ^0.2.0+1 + langchain_ollama: ^0.2.1+1 langchain_openai: ^0.6.1+1 flutter: diff --git a/examples/hello_world_flutter/pubspec_overrides.yaml b/examples/hello_world_flutter/pubspec_overrides.yaml index 93b5421a..d5192892 100644 --- a/examples/hello_world_flutter/pubspec_overrides.yaml +++ b/examples/hello_world_flutter/pubspec_overrides.yaml @@ -1,10 +1,22 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,mistralai_dart,ollama_dart,vertex_ai dependency_overrides: langchain: path: ../../packages/langchain langchain_core: path: ../../packages/langchain_core + langchain_google: + path: ../../packages/langchain_google + langchain_mistralai: + path: ../../packages/langchain_mistralai + langchain_ollama: + path: ../../packages/langchain_ollama langchain_openai: path: ../../packages/langchain_openai + mistralai_dart: + path: ../../packages/mistralai_dart + ollama_dart: + path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart + vertex_ai: + path: ../../packages/vertex_ai diff --git a/examples/hello_world_flutter/web/flutter_bootstrap.js b/examples/hello_world_flutter/web/flutter_bootstrap.js new file mode 100644 index 00000000..8ce49d8a --- /dev/null +++ b/examples/hello_world_flutter/web/flutter_bootstrap.js @@ -0,0 +1,12 @@ +{{flutter_js}} +{{flutter_build_config}} + +_flutter.loader.load({ + serviceWorkerSettings: { + serviceWorkerVersion: {{flutter_service_worker_version}}, + }, + onEntrypointLoaded: async function(engineInitializer) { + const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); + await appRunner.runApp(); + }, +}); diff --git a/examples/hello_world_flutter/web/index.html b/examples/hello_world_flutter/web/index.html index add98e6a..68ffe01a 100644 --- a/examples/hello_world_flutter/web/index.html +++ b/examples/hello_world_flutter/web/index.html @@ -1,59 +1,25 @@ - + + + + - This is a placeholder for base href that will be replaced by the value of - the `--base-href` argument provided to `flutter build`. - --> - + + - - - - - - - - - - - - - - hello_world_flutter - - - - - + Hello World Flutter + - + diff --git a/examples/hello_world_flutter/web/manifest.json b/examples/hello_world_flutter/web/manifest.json index ab44f4f1..2332c807 100644 --- a/examples/hello_world_flutter/web/manifest.json +++ b/examples/hello_world_flutter/web/manifest.json @@ -1,11 +1,11 @@ { "name": "hello_world_flutter", - "short_name": "hello_world_flutter", + "short_name": "Hello World Flutter", "start_url": ".", "display": "standalone", "background_color": "#0175C2", "theme_color": "#0175C2", - "description": "A new Flutter project.", + "description": "A sample Flutter app integrating LangChain.", "orientation": "portrait-primary", "prefer_related_applications": false, "icons": [ From 186ece5b79cb7e8d77c7cbad5ee362603bef8c3f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 17:51:03 +0200 Subject: [PATCH 147/251] refactor: Reformat OpenAI OpenAPI specs (#443) --- packages/openai_dart/oas/openapi_curated.yaml | 78 ++-- .../openai_dart/oas/openapi_official.yaml | 382 +++++++++--------- 2 files changed, 230 insertions(+), 230 deletions(-) diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index b1a945bc..f3eb8a26 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1027,7 +1027,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1153,7 +1153,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1169,7 +1169,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -1371,7 +1371,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1387,7 +1387,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -1963,7 +1963,7 @@ components: `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -1972,7 +1972,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" user: *end_user_param_configuration function_call: @@ -2808,7 +2808,7 @@ components: type: string description: | The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - enum: ["wandb"] + enum: [ "wandb" ] wandb: id: FineTuningIntegrationWandB type: object @@ -2943,7 +2943,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] first_id: type: string description: The ID of the first checkpoint in the list. @@ -3036,7 +3036,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -3442,7 +3442,7 @@ components: title: AssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" required: - id @@ -3555,7 +3555,7 @@ components: title: CreateAssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" required: - model @@ -3635,7 +3635,7 @@ components: title: ModifyAssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" DeleteAssistantResponse: type: object @@ -3730,7 +3730,7 @@ components: type: type: string title: AssistantsToolType - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: $ref: "#/components/schemas/AssistantsFunctionCallOption" @@ -3752,7 +3752,7 @@ components: type: type: string title: AssistantsResponseFormatType - enum: ["text", "json_object"] + enum: [ "text", "json_object" ] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -3764,7 +3764,7 @@ components: type: string name: TruncationStrategy description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -3842,7 +3842,7 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] + enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -3877,7 +3877,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. type: string @@ -3936,21 +3936,21 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: RunObjectResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" required: - id @@ -4113,7 +4113,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" response_format: description: | @@ -4127,7 +4127,7 @@ components: title: CreateRunRequestResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" stream: type: boolean @@ -4343,7 +4343,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" response_format: description: | @@ -4357,7 +4357,7 @@ components: title: CreateThreadAndRunRequestResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" stream: type: boolean @@ -4436,7 +4436,7 @@ components: type: array description: | A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -4544,7 +4544,7 @@ components: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string nullable: true - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: id: MessageIncompleteDetails type: object @@ -4641,7 +4641,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: $ref: "#/components/schemas/MessageDelta" required: @@ -5166,7 +5166,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: $ref: "#/components/schemas/RunStepDelta" required: @@ -5580,7 +5580,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -5639,7 +5639,7 @@ components: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string name: VectorStoreStatus - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -5772,7 +5772,7 @@ components: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string title: VectorStoreFileStatus - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: id: VectorStoreFileLastError type: object @@ -5879,7 +5879,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object description: The number of files per status. @@ -6128,11 +6128,11 @@ components: nullable: true BatchEndpoint: type: string - enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. Batch: type: object @@ -6142,7 +6142,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: $ref: "#/components/schemas/BatchEndpoint" @@ -6262,7 +6262,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -6327,7 +6327,7 @@ components: object: type: string description: The object type, which is always `list`. - enum: [list] + enum: [ list ] required: - object - data diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index fa38d7f7..2f18ad09 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2608,7 +2608,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: &pagination_after_param_description | @@ -3457,7 +3457,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4300,7 +4300,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5409,7 +5409,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5615,7 +5615,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5990,7 +5990,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6006,7 +6006,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6552,7 +6552,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6568,7 +6568,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6659,11 +6659,11 @@ paths: Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -7040,7 +7040,7 @@ components: properties: object: type: string - enum: [list] + enum: [ list ] data: type: array items: @@ -7071,7 +7071,7 @@ components: anyOf: - type: string - type: string - enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] x-oaiTypeLabel: string prompt: description: &completions_prompt_description | @@ -7283,7 +7283,7 @@ components: The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] + enum: [ "stop", "length", "content_filter" ] index: type: integer logprobs: @@ -7325,7 +7325,7 @@ components: object: type: string description: The object type, which is always "text_completion" - enum: [text_completion] + enum: [ text_completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -7370,7 +7370,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -7382,7 +7382,7 @@ components: detail: type: string description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -7396,7 +7396,7 @@ components: properties: type: type: string - enum: ["text"] + enum: [ "text" ] description: The type of the content part. text: type: string @@ -7423,7 +7423,7 @@ components: type: string role: type: string - enum: ["system"] + enum: [ "system" ] description: The role of the messages author, in this case `system`. name: type: string @@ -7452,7 +7452,7 @@ components: x-oaiExpandable: true role: type: string - enum: ["user"] + enum: [ "user" ] description: The role of the messages author, in this case `user`. name: type: string @@ -7472,7 +7472,7 @@ components: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the messages author, in this case `assistant`. name: type: string @@ -7502,7 +7502,7 @@ components: properties: role: type: string - enum: ["tool"] + enum: [ "tool" ] description: The role of the messages author, in this case `tool`. content: type: string @@ -7522,7 +7522,7 @@ components: properties: role: type: string - enum: ["function"] + enum: [ "function" ] description: The role of the messages author, in this case `function`. content: nullable: true @@ -7572,7 +7572,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: $ref: "#/components/schemas/FunctionObject" @@ -7609,7 +7609,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" x-oaiExpandable: true @@ -7619,7 +7619,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7648,7 +7648,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7678,7 +7678,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7727,7 +7727,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the author of this message. function_call: type: object @@ -7772,7 +7772,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" role: type: string - enum: ["system", "user", "assistant", "tool"] + enum: [ "system", "user", "assistant", "tool" ] description: The role of the author of this message. CreateChatCompletionRequest: @@ -7877,7 +7877,7 @@ components: properties: type: type: string - enum: ["text", "json_object"] + enum: [ "text", "json_object" ] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -7955,7 +7955,7 @@ components: description: > `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" x-oaiExpandable: true functions: @@ -8040,7 +8040,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8077,7 +8077,7 @@ components: description: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: ["stop", "length", "function_call", "content_filter"] + enum: [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8098,7 +8098,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8157,7 +8157,7 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8214,7 +8214,7 @@ components: object: type: string description: The object type, which is always `chat.completion.chunk`. - enum: [chat.completion.chunk] + enum: [ chat.completion.chunk ] usage: type: object description: | @@ -8264,7 +8264,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2", "dall-e-3"] + enum: [ "dall-e-2", "dall-e-3" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-3" @@ -8280,27 +8280,27 @@ components: description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: type: string - enum: ["standard", "hd"] + enum: [ "standard", "hd" ] default: "standard" example: "standard" description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: &images_response_format type: string - enum: ["url", "b64_json"] + enum: [ "url", "b64_json" ] default: "url" example: "url" nullable: true description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string - enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] default: "1024x1024" example: "1024x1024" nullable: true description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: type: string - enum: ["vivid", "natural"] + enum: [ "vivid", "natural" ] default: "vivid" example: "vivid" nullable: true @@ -8361,7 +8361,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8377,7 +8377,7 @@ components: description: The number of images to generate. Must be between 1 and 10. size: &dalle2_images_size type: string - enum: ["256x256", "512x512", "1024x1024"] + enum: [ "256x256", "512x512", "1024x1024" ] default: "1024x1024" example: "1024x1024" nullable: true @@ -8399,7 +8399,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8437,7 +8437,7 @@ components: anyOf: - type: string - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] + enum: [ "text-moderation-latest", "text-moderation-stable" ] x-oaiTypeLabel: string required: - input @@ -8580,7 +8580,7 @@ components: $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8600,7 +8600,7 @@ components: Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: ["assistants", "batch", "fine-tune"] + enum: [ "assistants", "batch", "fine-tune" ] required: - file - purpose @@ -8612,7 +8612,7 @@ components: type: string object: type: string - enum: [file] + enum: [ file ] deleted: type: boolean required: @@ -8631,7 +8631,7 @@ components: anyOf: - type: string - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] x-oaiTypeLabel: string training_file: description: | @@ -8654,7 +8654,7 @@ components: are updated less frequently, but with lower variance. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 256 @@ -8665,7 +8665,7 @@ components: overfitting. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: number minimum: 0 exclusiveMinimum: true @@ -8676,7 +8676,7 @@ components: through the training dataset. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 @@ -8721,7 +8721,7 @@ components: The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. oneOf: - type: string - enum: [wandb] + enum: [ wandb ] wandb: type: object description: | @@ -8778,7 +8778,7 @@ components: $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8792,7 +8792,7 @@ components: $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [list] + enum: [ list ] first_id: type: string nullable: true @@ -8867,7 +8867,7 @@ components: example: "float" default: "float" type: string - enum: ["float", "base64"] + enum: [ "float", "base64" ] dimensions: description: | The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -8892,7 +8892,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] usage: type: object description: The usage information for the request. @@ -8929,7 +8929,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string language: description: | @@ -8964,7 +8964,7 @@ components: enum: - word - segment - default: [segment] + default: [ segment ] required: - file - model @@ -9051,7 +9051,7 @@ components: type: number format: float description: End time of the word in seconds. - required: [word, start, end] + required: [ word, start, end ] CreateTranscriptionResponseVerboseJson: type: object @@ -9076,7 +9076,7 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] x-oaiMeta: name: The transcription object group: audio @@ -9099,7 +9099,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string prompt: description: | @@ -9145,7 +9145,7 @@ components: description: Segments of the translated text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] CreateSpeechRequest: type: object @@ -9157,7 +9157,7 @@ components: anyOf: - type: string - type: string - enum: ["tts-1", "tts-1-hd"] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string input: type: string @@ -9166,12 +9166,12 @@ components: voice: description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] response_format: description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -9196,7 +9196,7 @@ components: object: type: string description: The object type, which is always "model". - enum: [model] + enum: [ model ] owned_by: type: string description: The organization that owns the model. @@ -9228,7 +9228,7 @@ components: object: type: string description: The object type, which is always `file`. - enum: ["file"] + enum: [ "file" ] purpose: type: string description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. @@ -9246,7 +9246,7 @@ components: type: string deprecated: true description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: ["uploaded", "processed", "error"] + enum: [ "uploaded", "processed", "error" ] status_details: type: string deprecated: true @@ -9287,7 +9287,7 @@ components: object: type: string description: The object type, which is always "embedding". - enum: [embedding] + enum: [ embedding ] required: - index - object @@ -9352,15 +9352,15 @@ components: n_epochs: oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 default: auto description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. required: - n_epochs model: @@ -9369,7 +9369,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job". - enum: [fine_tuning.job] + enum: [ fine_tuning.job ] organization_id: type: string description: The organization that owns the fine-tuning job. @@ -9448,7 +9448,7 @@ components: type: type: string description: "The type of the integration being enabled for the fine-tuning job" - enum: ["wandb"] + enum: [ "wandb" ] wandb: type: object description: | @@ -9493,12 +9493,12 @@ components: type: integer level: type: string - enum: ["info", "warn", "error"] + enum: [ "info", "warn", "error" ] message: type: string object: type: string - enum: [fine_tuning.job.event] + enum: [ fine_tuning.job.event ] required: - id - object @@ -9558,7 +9558,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -9655,7 +9655,7 @@ components: - type: string description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsApiResponseFormat" x-oaiExpandable: true @@ -9666,7 +9666,7 @@ components: properties: type: type: string - enum: ["text", "json_object"] + enum: [ "text", "json_object" ] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -9682,7 +9682,7 @@ components: object: description: The object type, which is always `assistant`. type: string - enum: [assistant] + enum: [ assistant ] created_at: description: The Unix timestamp (in seconds) for when the assistant was created. type: integer @@ -9710,7 +9710,7 @@ components: tools: description: &assistant_tools_param_description | A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [] + default: [ ] type: array maxItems: 128 items: @@ -9731,7 +9731,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -9841,7 +9841,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -9862,7 +9862,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -9897,8 +9897,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -9956,7 +9956,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -9977,7 +9977,7 @@ components: type: array description: | Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10029,7 +10029,7 @@ components: type: boolean object: type: string - enum: [assistant.deleted] + enum: [ assistant.deleted ] required: - id - object @@ -10072,7 +10072,7 @@ components: type: type: string description: "The type of tool being defined: `code_interpreter`" - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] required: - type @@ -10083,7 +10083,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: ["file_search"] + enum: [ "file_search" ] required: - type @@ -10094,7 +10094,7 @@ components: type: type: string description: "The type of tool being defined: `function`" - enum: ["function"] + enum: [ "function" ] function: $ref: "#/components/schemas/FunctionObject" required: @@ -10109,7 +10109,7 @@ components: type: type: string description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -10132,7 +10132,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" x-oaiExpandable: true @@ -10142,7 +10142,7 @@ components: properties: type: type: string - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: type: object @@ -10166,7 +10166,7 @@ components: object: description: The object type, which is always `thread.run`. type: string - enum: ["thread.run"] + enum: [ "thread.run" ] created_at: description: The Unix timestamp (in seconds) for when the run was created. type: integer @@ -10199,7 +10199,7 @@ components: type: description: For now, this is always `submit_tool_outputs`. type: string - enum: ["submit_tool_outputs"] + enum: [ "submit_tool_outputs" ] submit_tool_outputs: type: object description: Details on the tool outputs needed for this run to continue. @@ -10222,7 +10222,7 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] + enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10257,7 +10257,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string @@ -10266,7 +10266,7 @@ components: type: string tools: description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [] + default: [ ] type: array maxItems: 20 items: @@ -10558,7 +10558,7 @@ components: type: type: string description: The type of tool call the output is required for. For now, this is always `function`. - enum: ["function"] + enum: [ "function" ] function: type: object description: The function definition. @@ -10644,7 +10644,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10721,7 +10721,7 @@ components: object: description: The object type, which is always `thread`. type: string - enum: ["thread"] + enum: [ "thread" ] created_at: description: The Unix timestamp (in seconds) for when the thread was created. type: integer @@ -10737,7 +10737,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10795,7 +10795,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10830,8 +10830,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -10855,7 +10855,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10885,7 +10885,7 @@ components: type: boolean object: type: string - enum: [thread.deleted] + enum: [ thread.deleted ] required: - id - object @@ -10927,7 +10927,7 @@ components: object: description: The object type, which is always `thread.message`. type: string - enum: ["thread.message"] + enum: [ "thread.message" ] created_at: description: The Unix timestamp (in seconds) for when the message was created. type: integer @@ -10937,7 +10937,7 @@ components: status: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: description: On an incomplete message, details about why the message is incomplete. type: object @@ -10967,7 +10967,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11060,7 +11060,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: description: The delta containing the fields that have changed on the Message. type: object @@ -11068,7 +11068,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11109,7 +11109,7 @@ components: properties: role: type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] description: | The role of the entity that is creating the message. Allowed values include: - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. @@ -11176,7 +11176,7 @@ components: type: boolean object: type: string - enum: [thread.message.deleted] + enum: [ thread.message.deleted ] required: - id - object @@ -11215,7 +11215,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11225,7 +11225,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - file_id @@ -11244,7 +11244,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11254,7 +11254,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11267,7 +11267,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -11279,7 +11279,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -11298,7 +11298,7 @@ components: type: description: Always `image_url`. type: string - enum: ["image_url"] + enum: [ "image_url" ] image_url: type: object properties: @@ -11308,7 +11308,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11322,7 +11322,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -11351,7 +11351,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: string description: Text content to be sent to the model @@ -11367,7 +11367,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11404,7 +11404,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11440,7 +11440,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -11469,7 +11469,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11503,7 +11503,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11535,7 +11535,7 @@ components: object: description: The object type, which is always `thread.run.step`. type: string - enum: ["thread.run.step"] + enum: [ "thread.run.step" ] created_at: description: The Unix timestamp (in seconds) for when the run step was created. type: integer @@ -11551,11 +11551,11 @@ components: type: description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string - enum: ["message_creation", "tool_calls"] + enum: [ "message_creation", "tool_calls" ] status: description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] step_details: type: object description: The details of the run step. @@ -11571,7 +11571,7 @@ components: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] + enum: [ "server_error", "rate_limit_exceeded" ] message: type: string description: A human-readable description of the error. @@ -11635,7 +11635,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: description: The delta containing the fields that have changed on the run step. type: object @@ -11706,7 +11706,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -11727,7 +11727,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -11745,7 +11745,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -11768,7 +11768,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -11793,7 +11793,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -11832,7 +11832,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -11861,7 +11861,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -11880,7 +11880,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -11895,7 +11895,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -11918,7 +11918,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -11939,7 +11939,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -11962,7 +11962,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -11982,7 +11982,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12019,7 +12019,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12046,7 +12046,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -12067,7 +12067,7 @@ components: object: description: The object type, which is always `vector_store`. type: string - enum: ["vector_store"] + enum: [ "vector_store" ] created_at: description: The Unix timestamp (in seconds) for when the vector store was created. type: integer @@ -12104,7 +12104,7 @@ components: status: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -12225,7 +12225,7 @@ components: type: boolean object: type: string - enum: [vector_store.deleted] + enum: [ vector_store.deleted ] required: - id - object @@ -12242,7 +12242,7 @@ components: object: description: The object type, which is always `vector_store.file`. type: string - enum: ["vector_store.file"] + enum: [ "vector_store.file" ] usage_bytes: description: The total vector store usage in bytes. Note that this may be different from the original file size. type: integer @@ -12255,7 +12255,7 @@ components: status: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: type: object description: The last error associated with this vector store file. Will be `null` if there are no errors. @@ -12343,7 +12343,7 @@ components: type: boolean object: type: string - enum: [vector_store.file.deleted] + enum: [ vector_store.file.deleted ] required: - id - object @@ -12360,7 +12360,7 @@ components: object: description: The object type, which is always `vector_store.file_batch`. type: string - enum: ["vector_store.files_batch"] + enum: [ "vector_store.files_batch" ] created_at: description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer @@ -12370,7 +12370,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object properties: @@ -12473,7 +12473,7 @@ components: properties: event: type: string - enum: ["thread.created"] + enum: [ "thread.created" ] data: $ref: "#/components/schemas/ThreadObject" required: @@ -12489,7 +12489,7 @@ components: properties: event: type: string - enum: ["thread.run.created"] + enum: [ "thread.run.created" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12502,7 +12502,7 @@ components: properties: event: type: string - enum: ["thread.run.queued"] + enum: [ "thread.run.queued" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12515,7 +12515,7 @@ components: properties: event: type: string - enum: ["thread.run.in_progress"] + enum: [ "thread.run.in_progress" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12528,7 +12528,7 @@ components: properties: event: type: string - enum: ["thread.run.requires_action"] + enum: [ "thread.run.requires_action" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12541,7 +12541,7 @@ components: properties: event: type: string - enum: ["thread.run.completed"] + enum: [ "thread.run.completed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12554,7 +12554,7 @@ components: properties: event: type: string - enum: ["thread.run.failed"] + enum: [ "thread.run.failed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12567,7 +12567,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelling"] + enum: [ "thread.run.cancelling" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12580,7 +12580,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelled"] + enum: [ "thread.run.cancelled" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12593,7 +12593,7 @@ components: properties: event: type: string - enum: ["thread.run.expired"] + enum: [ "thread.run.expired" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12609,7 +12609,7 @@ components: properties: event: type: string - enum: ["thread.run.step.created"] + enum: [ "thread.run.step.created" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12622,7 +12622,7 @@ components: properties: event: type: string - enum: ["thread.run.step.in_progress"] + enum: [ "thread.run.step.in_progress" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12635,7 +12635,7 @@ components: properties: event: type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] data: $ref: "#/components/schemas/RunStepDeltaObject" required: @@ -12648,7 +12648,7 @@ components: properties: event: type: string - enum: ["thread.run.step.completed"] + enum: [ "thread.run.step.completed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12661,7 +12661,7 @@ components: properties: event: type: string - enum: ["thread.run.step.failed"] + enum: [ "thread.run.step.failed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12674,7 +12674,7 @@ components: properties: event: type: string - enum: ["thread.run.step.cancelled"] + enum: [ "thread.run.step.cancelled" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12687,7 +12687,7 @@ components: properties: event: type: string - enum: ["thread.run.step.expired"] + enum: [ "thread.run.step.expired" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12703,7 +12703,7 @@ components: properties: event: type: string - enum: ["thread.message.created"] + enum: [ "thread.message.created" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12716,7 +12716,7 @@ components: properties: event: type: string - enum: ["thread.message.in_progress"] + enum: [ "thread.message.in_progress" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12729,7 +12729,7 @@ components: properties: event: type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] data: $ref: "#/components/schemas/MessageDeltaObject" required: @@ -12742,7 +12742,7 @@ components: properties: event: type: string - enum: ["thread.message.completed"] + enum: [ "thread.message.completed" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12755,7 +12755,7 @@ components: properties: event: type: string - enum: ["thread.message.incomplete"] + enum: [ "thread.message.incomplete" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12770,7 +12770,7 @@ components: properties: event: type: string - enum: ["error"] + enum: [ "error" ] data: $ref: "#/components/schemas/Error" required: @@ -12785,10 +12785,10 @@ components: properties: event: type: string - enum: ["done"] + enum: [ "done" ] data: type: string - enum: ["[DONE]"] + enum: [ "[DONE]" ] required: - event - data @@ -12803,7 +12803,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: type: string @@ -12928,7 +12928,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -12994,14 +12994,14 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data - has_more security: - - ApiKeyAuth: [] + - ApiKeyAuth: [ ] x-oaiMeta: navigationGroups: From 72a87d1b6274b838112dc15cc7360c2e3a77e5d5 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 17:55:23 +0200 Subject: [PATCH 148/251] feat: Support FastChat OpenAI-compatible API (#444) --- packages/openai_dart/README.md | 2 +- ...reate_chat_completion_stream_response.dart | 4 +- .../src/generated/schema/function_object.dart | 2 +- .../generated/schema/function_parameters.dart | 2 +- .../src/generated/schema/schema.freezed.dart | 66 +- .../lib/src/generated/schema/schema.g.dart | 8 +- packages/openai_dart/oas/openapi_curated.yaml | 13 +- .../openai_dart/oas/openapi_official.yaml | 1074 +++++++++-------- 8 files changed, 601 insertions(+), 570 deletions(-) diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index f020d128..76dcd335 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), etc. +- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. **Supported endpoints:** diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index 18cab5fa..724f4066 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -24,7 +24,7 @@ class CreateChatCompletionStreamResponse required List choices, /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - required int created, + @JsonKey(includeIfNull: false) int? created, /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, @@ -36,7 +36,7 @@ class CreateChatCompletionStreamResponse String? systemFingerprint, /// The object type, which is always `chat.completion.chunk`. - required String object, + @JsonKey(includeIfNull: false) String? object, /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? usage, diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 8049253e..647b4e0a 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -21,7 +21,7 @@ class FunctionObject with _$FunctionObject { /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? description, - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, diff --git a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart index abd11036..2429f8ba 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart @@ -8,7 +8,7 @@ part of open_a_i_schema; // TYPE: FunctionParameters // ========================================== -/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. +/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. typedef FunctionParameters = Map; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 1395bc5a..16efa483 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -6695,7 +6695,7 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) String? get description => throw _privateConstructorUsedError; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) @@ -6821,12 +6821,12 @@ class _$FunctionObjectImpl extends _FunctionObject { @JsonKey(includeIfNull: false) final String? description; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. final Map? _parameters; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @override @@ -6898,7 +6898,7 @@ abstract class _FunctionObject extends FunctionObject { String? get description; @override - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) @@ -9004,7 +9004,8 @@ mixin _$CreateChatCompletionStreamResponse { throw _privateConstructorUsedError; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + int? get created => throw _privateConstructorUsedError; /// The model to generate the completion. @JsonKey(includeIfNull: false) @@ -9017,7 +9018,8 @@ mixin _$CreateChatCompletionStreamResponse { String? get systemFingerprint => throw _privateConstructorUsedError; /// The object type, which is always `chat.completion.chunk`. - String get object => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get object => throw _privateConstructorUsedError; /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) @@ -9041,11 +9043,11 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); $CompletionUsageCopyWith<$Res>? get usage; @@ -9067,10 +9069,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_value.copyWith( @@ -9082,10 +9084,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable @@ -9094,10 +9096,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9130,11 +9132,11 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); @override @@ -9156,10 +9158,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_$CreateChatCompletionStreamResponseImpl( @@ -9171,10 +9173,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value._choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable @@ -9183,10 +9185,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9202,11 +9204,11 @@ class _$CreateChatCompletionStreamResponseImpl const _$CreateChatCompletionStreamResponseImpl( {@JsonKey(includeIfNull: false) this.id, required final List choices, - required this.created, + @JsonKey(includeIfNull: false) this.created, @JsonKey(includeIfNull: false) this.model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, - required this.object, + @JsonKey(includeIfNull: false) this.object, @JsonKey(includeIfNull: false) this.usage}) : _choices = choices, super._(); @@ -9235,7 +9237,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. @override - final int created; + @JsonKey(includeIfNull: false) + final int? created; /// The model to generate the completion. @override @@ -9251,7 +9254,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The object type, which is always `chat.completion.chunk`. @override - final String object; + @JsonKey(includeIfNull: false) + final String? object; /// Usage statistics for the completion request. @override @@ -9311,11 +9315,11 @@ abstract class _CreateChatCompletionStreamResponse const factory _CreateChatCompletionStreamResponse( {@JsonKey(includeIfNull: false) final String? id, required final List choices, - required final int created, + @JsonKey(includeIfNull: false) final int? created, @JsonKey(includeIfNull: false) final String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, - required final String object, + @JsonKey(includeIfNull: false) final String? object, @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = _$CreateChatCompletionStreamResponseImpl; const _CreateChatCompletionStreamResponse._() : super._(); @@ -9337,7 +9341,8 @@ abstract class _CreateChatCompletionStreamResponse @override /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created; + @JsonKey(includeIfNull: false) + int? get created; @override /// The model to generate the completion. @@ -9353,7 +9358,8 @@ abstract class _CreateChatCompletionStreamResponse @override /// The object type, which is always `chat.completion.chunk`. - String get object; + @JsonKey(includeIfNull: false) + String? get object; @override /// Usage statistics for the completion request. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 4062dc95..8b4963d6 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -835,10 +835,10 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: json['created'] as int, + created: json['created'] as int?, model: json['model'] as String?, systemFingerprint: json['system_fingerprint'] as String?, - object: json['object'] as String, + object: json['object'] as String?, usage: json['usage'] == null ? null : CompletionUsage.fromJson(json['usage'] as Map), @@ -856,10 +856,10 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( writeNotNull('id', instance.id); val['choices'] = instance.choices.map((e) => e.toJson()).toList(); - val['created'] = instance.created; + writeNotNull('created', instance.created); writeNotNull('model', instance.model); writeNotNull('system_fingerprint', instance.systemFingerprint); - val['object'] = instance.object; + writeNotNull('object', instance.object); writeNotNull('usage', instance.usage?.toJson()); return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index f3eb8a26..07b38bb8 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -2212,7 +2212,7 @@ components: - name FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true ChatCompletionTool: type: object @@ -2426,10 +2426,10 @@ components: $ref: "#/components/schemas/CompletionUsage" required: - choices - - created + # - created # Made nullable to support FastChat API which doesn't return this field with some models # - id # Made nullable to support OpenRouter API which doesn't return this field with some models # - model # Made nullable to support TogetherAI API which doesn't return this field with some models - - object + # - object # Made nullable to support FastChat API which doesn't return this field with some models ChatCompletionStreamResponseChoice: type: object description: A choice the model generated for the input prompt. @@ -6128,7 +6128,12 @@ components: nullable: true BatchEndpoint: type: string - enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 2f18ad09..395d6481 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -87,7 +87,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -95,22 +95,22 @@ paths: {"role": "user", "content": "Hello!"} ] ) - + print(completion.choices[0].message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "system", content: "You are a helpful assistant." }], model: "VAR_model_id", }); - + console.log(completion.choices[0]); } - + main(); response: &chat_completion_example | { @@ -163,9 +163,9 @@ paths: }' python: | from openai import OpenAI - + client = OpenAI() - + response = client.chat.completions.create( model="gpt-4-turbo", messages=[ @@ -182,13 +182,13 @@ paths: ], max_tokens=300, ) - + print(response.choices[0]) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.chat.completions.create({ model: "gpt-4-turbo", @@ -254,7 +254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -263,15 +263,15 @@ paths: ], stream=True ) - + for chunk in completion: print(chunk.choices[0].delta) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ model: "VAR_model_id", @@ -281,20 +281,20 @@ paths: ], stream: true, }); - + for await (const chunk of completion) { console.log(chunk.choices[0].delta.content); } } - + main(); response: &chat_completion_chunk_example | {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} - + .... - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: @@ -338,7 +338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -366,13 +366,13 @@ paths: tools=tools, tool_choice="auto" ) - + print(completion) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; const tools = [ @@ -395,17 +395,17 @@ paths: } } ]; - + const response = await openai.chat.completions.create({ model: "gpt-4-turbo", messages: messages, tools: tools, tool_choice: "auto", }); - + console.log(response); } - + main(); response: &chat_completion_function_example | { @@ -460,7 +460,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -469,14 +469,14 @@ paths: logprobs=True, top_logprobs=2 ) - + print(completion.choices[0].message) print(completion.choices[0].logprobs) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: "Hello!" }], @@ -484,10 +484,10 @@ paths: logprobs: true, top_logprobs: 2, }); - + console.log(completion.choices[0]); } - + main(); response: | { @@ -716,7 +716,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -725,9 +725,9 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.completions.create({ model: "VAR_model_id", @@ -735,7 +735,7 @@ paths: max_tokens: 7, temperature: 0, }); - + console.log(completion); } main(); @@ -776,7 +776,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -787,16 +787,16 @@ paths: print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.completions.create({ model: "VAR_model_id", prompt: "Say this is a test.", stream: true, }); - + for await (const chunk of stream) { console.log(chunk.choices[0].text) } @@ -857,7 +857,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.generate( model="dall-e-3", prompt="A cute baby sea otter", @@ -866,12 +866,12 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); - + console.log(image.data); } main(); @@ -923,7 +923,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), @@ -934,16 +934,16 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.edit({ image: fs.createReadStream("otter.png"), mask: fs.createReadStream("mask.png"), prompt: "A cute baby sea otter wearing a beret", }); - + console.log(image.data); } main(); @@ -993,7 +993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.images.create_variation( image=open("image_edit_original.png", "rb"), n=2, @@ -1002,14 +1002,14 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.createVariation({ image: fs.createReadStream("otter.png"), }); - + console.log(image.data); } main(); @@ -1063,7 +1063,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.embeddings.create( model="text-embedding-ada-002", input="The food was delicious and the waiter...", @@ -1071,19 +1071,19 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", encoding_format: "float", }); - + console.log(embedding); } - + main(); response: | { @@ -1151,7 +1151,7 @@ paths: python: | from pathlib import Path import openai - + speech_file_path = Path(__file__).parent / "speech.mp3" response = openai.audio.speech.create( model="tts-1", @@ -1163,11 +1163,11 @@ paths: import fs from "fs"; import path from "path"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + const speechFile = path.resolve("./speech.mp3"); - + async function main() { const mp3 = await openai.audio.speech.create({ model: "tts-1", @@ -1216,7 +1216,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( model="whisper-1", @@ -1225,15 +1225,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), model: "whisper-1", }); - + console.log(transcription.text); } main(); @@ -1254,7 +1254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1262,14 +1262,14 @@ paths: response_format="verbose_json", timestamp_granularities=["word"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1277,7 +1277,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["word"] }); - + console.log(transcription.text); } main(); @@ -1314,7 +1314,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1322,14 +1322,14 @@ paths: response_format="verbose_json", timestamp_granularities=["segment"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1337,7 +1337,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["segment"] }); - + console.log(transcription.text); } main(); @@ -1401,7 +1401,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.translations.create( model="whisper-1", @@ -1410,15 +1410,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const translation = await openai.audio.translations.create({ file: fs.createReadStream("speech.mp3"), model: "whisper-1", }); - + console.log(translation.text); } main(); @@ -1459,21 +1459,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.files.list(); - + for await (const file of list) { console.log(file); } } - + main(); response: | { @@ -1503,13 +1503,13 @@ paths: - Files summary: | Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. - + The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - + The Fine-tuning API only supports `.jsonl` files. - + The Batch API only supports `.jsonl` files up to 100 MB in size. - + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true @@ -1538,7 +1538,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.create( file=open("mydata.jsonl", "rb"), purpose="fine-tune" @@ -1546,18 +1546,18 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.create({ file: fs.createReadStream("mydata.jsonl"), purpose: "fine-tune", }); - + console.log(file); } - + main(); response: | { @@ -1601,19 +1601,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.del("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1652,19 +1652,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.retrieve("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1707,19 +1707,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + content = client.files.content("file-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.content("file-abc123"); - + console.log(file); } - + main(); /fine_tuning/jobs: @@ -1729,9 +1729,9 @@ paths: - Fine-tuning summary: | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) requestBody: required: true @@ -1764,24 +1764,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-3.5-turbo" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { @@ -1812,7 +1812,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-3.5-turbo", @@ -1822,19 +1822,19 @@ paths: ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", model: "gpt-3.5-turbo", hyperparameters: { n_epochs: 2 } }); - + console.log(fineTune); } - + main(); response: | { @@ -1864,7 +1864,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", @@ -1872,18 +1872,18 @@ paths: ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", validation_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { @@ -1983,21 +1983,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.jobs.list(); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2023,7 +2023,7 @@ paths: - Fine-tuning summary: | Get info about a fine-tuning job. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: - in: path @@ -2053,19 +2053,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); - + console.log(fineTune); } - + main(); response: &fine_tuning_example | { @@ -2140,24 +2140,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list_events( fine_tuning_job_id="ftjob-abc123", limit=2 ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2219,16 +2219,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); - + console.log(fineTune); } main(); @@ -2352,16 +2352,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.models.list(); - + for await (const model of list) { console.log(model); } @@ -2426,19 +2426,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.retrieve("VAR_model_id"); - + console.log(model); } - + main(); response: &retrieve_model_response | { @@ -2480,16 +2480,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); - + console.log(model); } main(); @@ -2535,17 +2535,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + moderation = client.moderations.create(input="I want to kill them.") print(moderation) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const moderation = await openai.moderations.create({ input: "I want to kill them." }); - + console.log(moderation); } main(); @@ -2643,7 +2643,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistants = client.beta.assistants.list( order="desc", limit="20", @@ -2651,18 +2651,18 @@ paths: print(my_assistants.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistants = await openai.beta.assistants.list({ order: "desc", limit: "20", }); - + console.log(myAssistants.data); } - + main(); response: &list_assistants_example | { @@ -2759,7 +2759,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", @@ -2769,9 +2769,9 @@ paths: print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2780,10 +2780,10 @@ paths: tools: [{ type: "code_interpreter" }], model: "gpt-4-turbo", }); - + console.log(myAssistant); } - + main(); response: &create_assistants_example | { @@ -2820,7 +2820,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", name="HR Helper", @@ -2831,9 +2831,9 @@ paths: print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2847,10 +2847,10 @@ paths: }, model: "gpt-4-turbo" }); - + console.log(myAssistant); } - + main(); response: | { @@ -2912,22 +2912,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.retrieve("asst_abc123") print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.retrieve( "asst_abc123" ); - + console.log(myAssistant); } - + main(); response: | { @@ -2993,7 +2993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_assistant = client.beta.assistants.update( "asst_abc123", instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", @@ -3001,13 +3001,13 @@ paths: tools=[{"type": "file_search"}], model="gpt-4-turbo" ) - + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myUpdatedAssistant = await openai.beta.assistants.update( "asst_abc123", @@ -3019,10 +3019,10 @@ paths: model: "gpt-4-turbo" } ); - + console.log(myUpdatedAssistant); } - + main(); response: | { @@ -3083,17 +3083,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.assistants.delete("asst_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.assistants.del("asst_abc123"); - + console.log(response); } main(); @@ -3139,20 +3139,20 @@ paths: python: | from openai import OpenAI client = OpenAI() - + empty_thread = client.beta.threads.create() print(empty_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const emptyThread = await openai.beta.threads.create(); - + console.log(emptyThread); } - + main(); response: | { @@ -3181,7 +3181,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message_thread = client.beta.threads.create( messages=[ { @@ -3194,13 +3194,13 @@ paths: }, ] ) - + print(message_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messageThread = await openai.beta.threads.create({ messages: [ @@ -3214,10 +3214,10 @@ paths: }, ], }); - + console.log(messageThread); } - + main(); response: | { @@ -3263,22 +3263,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_thread = client.beta.threads.retrieve("thread_abc123") print(my_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myThread = await openai.beta.threads.retrieve( "thread_abc123" ); - + console.log(myThread); } - + main(); response: | { @@ -3338,7 +3338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_thread = client.beta.threads.update( "thread_abc123", metadata={ @@ -3349,9 +3349,9 @@ paths: print(my_updated_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const updatedThread = await openai.beta.threads.update( "thread_abc123", @@ -3359,10 +3359,10 @@ paths: metadata: { modified: "true", user: "abc123" }, } ); - + console.log(updatedThread); } - + main(); response: | { @@ -3410,17 +3410,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.threads.delete("thread_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.threads.del("thread_abc123"); - + console.log(response); } main(); @@ -3496,22 +3496,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_messages = client.beta.threads.messages.list("thread_abc123") print(thread_messages.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.list( "thread_abc123" ); - + console.log(threadMessages.data); } - + main(); response: | { @@ -3606,7 +3606,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_message = client.beta.threads.messages.create( "thread_abc123", role="user", @@ -3615,18 +3615,18 @@ paths: print(thread_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.create( "thread_abc123", { role: "user", content: "How does AI work? Explain it in simple terms." } ); - + console.log(threadMessages); } - + main(); response: | { @@ -3691,7 +3691,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.retrieve( message_id="msg_abc123", thread_id="thread_abc123", @@ -3699,18 +3699,18 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.retrieve( "thread_abc123", "msg_abc123" ); - + console.log(message); } - + main(); response: | { @@ -3785,7 +3785,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.update( message_id="msg_abc12", thread_id="thread_abc123", @@ -3797,9 +3797,9 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.update( "thread_abc123", @@ -3875,7 +3875,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_message = client.beta.threads.messages.delete( message_id="msg_abc12", thread_id="thread_abc123", @@ -3883,15 +3883,15 @@ paths: print(deleted_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const deletedMessage = await openai.beta.threads.messages.del( "thread_abc123", "msg_abc123" ); - + console.log(deletedMessage); } response: | @@ -3901,7 +3901,6 @@ paths: "deleted": true } - /threads/runs: post: operationId: createThreadAndRun @@ -3945,7 +3944,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.create_and_run( assistant_id="asst_abc123", thread={ @@ -3954,13 +3953,13 @@ paths: ] } ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.createAndRun({ assistant_id: "asst_abc123", @@ -3970,10 +3969,10 @@ paths: ], }, }); - + console.log(run); } - + main(); response: | { @@ -4028,7 +4027,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.create_and_run( assistant_id="asst_123", thread={ @@ -4038,14 +4037,14 @@ paths: }, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4056,58 +4055,58 @@ paths: }, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} - + event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}], "metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} - + event: done data: [DONE] @@ -4153,7 +4152,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4174,7 +4173,7 @@ paths: } } ] - + stream = client.beta.threads.create_and_run( thread={ "messages": [ @@ -4185,14 +4184,14 @@ paths: tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4213,7 +4212,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4225,52 +4224,52 @@ paths: tools: tools, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} - + event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} - + ... - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} - + event: thread.run.requires_action data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4333,25 +4332,25 @@ paths: python: | from openai import OpenAI client = OpenAI() - + runs = client.beta.threads.runs.list( "thread_abc123" ) - + print(runs) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const runs = await openai.beta.threads.runs.list( "thread_abc123" ); - + console.log(runs); } - + main(); response: | { @@ -4498,27 +4497,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.create( "thread_abc123", { assistant_id: "asst_abc123" } ); - + console.log(run); } - + main(); response: &run_object_example | { @@ -4569,74 +4568,74 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.create( thread_id="thread_123", assistant_id="asst_123", stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_123", { assistant_id: "asst_123", stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4677,7 +4676,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4698,21 +4697,21 @@ paths: } } ] - + stream = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123", tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4733,7 +4732,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_abc123", @@ -4743,55 +4742,55 @@ paths: stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4835,27 +4834,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.retrieve( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.retrieve( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -4947,19 +4946,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.update( thread_id="thread_abc123", run_id="run_abc123", metadata={"user_id": "user_abc123"}, ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.update( "thread_abc123", @@ -4970,10 +4969,10 @@ paths: }, } ); - + console.log(run); } - + main(); response: | { @@ -5082,7 +5081,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5093,13 +5092,13 @@ paths: } ] ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5113,10 +5112,10 @@ paths: ], } ); - + console.log(run); } - + main(); response: | { @@ -5190,7 +5189,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5202,14 +5201,14 @@ paths: ], stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5223,61 +5222,61 @@ paths: ], } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.message.created data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} - + event: thread.message.completed data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -5322,27 +5321,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.cancel( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.cancel( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -5442,17 +5441,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_steps = client.beta.threads.runs.steps.list( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run_steps) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.list( "thread_abc123", @@ -5460,7 +5459,7 @@ paths: ); console.log(runStep); } - + main(); response: | { @@ -5545,18 +5544,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_step = client.beta.threads.runs.steps.retrieve( thread_id="thread_abc123", run_id="run_abc123", step_id="step_abc123" ) - + print(run_step) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( "thread_abc123", @@ -5565,7 +5564,7 @@ paths: ); console.log(runStep); } - + main(); response: &run_step_object_example | { @@ -5648,18 +5647,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_stores = client.beta.vector_stores.list() print(vector_stores) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStores = await openai.beta.vectorStores.list(); console.log(vectorStores); } - + main(); response: | { @@ -5734,7 +5733,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.create( name="Support FAQ" ) @@ -5742,14 +5741,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.create({ name: "Support FAQ" }); console.log(vectorStore); } - + main(); response: | { @@ -5802,7 +5801,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.retrieve( vector_store_id="vs_abc123" ) @@ -5810,14 +5809,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.retrieve( "vs_abc123" ); console.log(vectorStore); } - + main(); response: | { @@ -5868,7 +5867,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.update( vector_store_id="vs_abc123", name="Support FAQ" @@ -5877,7 +5876,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.update( "vs_abc123", @@ -5887,7 +5886,7 @@ paths: ); console.log(vectorStore); } - + main(); response: | { @@ -5940,7 +5939,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store = client.beta.vector_stores.delete( vector_store_id="vs_abc123" ) @@ -5948,14 +5947,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStore = await openai.beta.vectorStores.del( "vs_abc123" ); console.log(deletedVectorStore); } - + main(); response: | { @@ -6029,7 +6028,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.files.list( vector_store_id="vs_abc123" ) @@ -6037,14 +6036,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.files.list( "vs_abc123" ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6112,7 +6111,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6121,7 +6120,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFile = await openai.beta.vectorStores.files.create( "vs_abc123", @@ -6131,7 +6130,7 @@ paths: ); console.log(myVectorStoreFile); } - + main(); response: | { @@ -6187,7 +6186,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.retrieve( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6196,7 +6195,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( "vs_abc123", @@ -6204,7 +6203,7 @@ paths: ); console.log(vectorStoreFile); } - + main(); response: | { @@ -6256,7 +6255,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file = client.beta.vector_stores.files.delete( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6265,7 +6264,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( "vs_abc123", @@ -6273,7 +6272,7 @@ paths: ); console.log(deletedVectorStoreFile); } - + main(); response: | { @@ -6328,7 +6327,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["file-abc123", "file-abc456"] @@ -6337,7 +6336,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( "vs_abc123", @@ -6347,7 +6346,7 @@ paths: ); console.log(myVectorStoreFileBatch); } - + main(); response: | { @@ -6408,7 +6407,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.retrieve( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6417,7 +6416,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( "vs_abc123", @@ -6425,7 +6424,7 @@ paths: ); console.log(vectorStoreFileBatch); } - + main(); response: | { @@ -6485,7 +6484,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel( vector_store_id="vs_abc123", file_batch_id="vsfb_abc123" @@ -6494,7 +6493,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( "vs_abc123", @@ -6502,7 +6501,7 @@ paths: ); console.log(deletedVectorStoreFileBatch); } - + main(); response: | { @@ -6591,7 +6590,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.file_batches.list_files( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6600,7 +6599,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( "vs_abc123", @@ -6608,7 +6607,7 @@ paths: ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6653,13 +6652,18 @@ paths: type: string description: | The ID of an uploaded file that contains requests for the new batch. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string @@ -6696,7 +6700,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.create( input_file_id="file-abc123", endpoint="/v1/chat/completions", @@ -6704,19 +6708,19 @@ paths: ) node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.create({ input_file_id: "file-abc123", endpoint: "/v1/chat/completions", completion_window: "24h" }); - + console.log(batch); } - + main(); response: | { @@ -6787,21 +6791,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.list() node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.batches.list(); - + for await (const batch of list) { console.log(batch); } } - + main(); response: | { @@ -6876,19 +6880,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.retrieve("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.retrieve("batch_abc123"); - + console.log(batch); } - + main(); response: &batch_object | { @@ -6955,19 +6959,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.cancel("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.cancel("batch_abc123"); - + console.log(batch); } - + main(); response: | { @@ -7076,7 +7080,7 @@ components: prompt: description: &completions_prompt_description | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. default: "<|endoftext|>" nullable: true @@ -7110,9 +7114,9 @@ components: nullable: true description: &completions_best_of_description | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: type: boolean @@ -7128,7 +7132,7 @@ components: nullable: true description: &completions_frequency_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) logit_bias: &completions_logit_bias type: object @@ -7139,9 +7143,9 @@ components: type: integer description: &completions_logit_bias_description | Modify the likelihood of specified tokens appearing in the completion. - + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: &completions_logprobs_configuration type: integer @@ -7151,7 +7155,7 @@ components: nullable: true description: &completions_logprobs_description | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - + The maximum value for `logprobs` is 5. max_tokens: type: integer @@ -7161,7 +7165,7 @@ components: nullable: true description: &completions_max_tokens_description | The maximum number of [tokens](/tokenizer) that can be generated in the completion. - + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: type: integer @@ -7172,7 +7176,7 @@ components: nullable: true description: &completions_completions_description | How many completions to generate for each prompt. - + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: type: number @@ -7182,7 +7186,7 @@ components: nullable: true description: &completions_presence_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) seed: &completions_seed_param type: integer @@ -7191,7 +7195,7 @@ components: nullable: true description: | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: description: &completions_stop_description > @@ -7221,7 +7225,7 @@ components: suffix: description: | The suffix that comes after a completion of inserted text. - + This parameter is only supported for `gpt-3.5-turbo-instruct`. default: null nullable: true @@ -7236,7 +7240,7 @@ components: nullable: true description: &completions_temperature_description | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - + We generally recommend altering this or `top_p` but not both. top_p: type: number @@ -7247,7 +7251,7 @@ components: nullable: true description: &completions_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or `temperature` but not both. user: &end_user_param_configuration type: string @@ -7320,7 +7324,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -7483,6 +7487,7 @@ components: type: object deprecated: true description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + nullable: true properties: arguments: type: string @@ -7538,7 +7543,7 @@ components: FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true ChatCompletionFunctions: @@ -7601,7 +7606,7 @@ components: `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -7831,7 +7836,7 @@ components: type: integer description: | Modify the likelihood of specified tokens appearing in the completion. - + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @@ -7847,7 +7852,7 @@ components: max_tokens: description: | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. type: integer nullable: true @@ -7870,9 +7875,9 @@ components: type: object description: | An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. properties: type: @@ -7943,12 +7948,12 @@ components: deprecated: true description: | Deprecated in favor of `tool_choice`. - + Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - + `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: - type: string @@ -7962,7 +7967,7 @@ components: deprecated: true description: | Deprecated in favor of `tools`. - + A list of functions the model may generate JSON inputs for. type: array minItems: 1 @@ -8035,7 +8040,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -8077,7 +8082,8 @@ components: description: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: [ "stop", "length", "function_call", "content_filter" ] + enum: + [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8093,7 +8099,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -8429,7 +8435,7 @@ components: model: description: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. nullable: false default: "text-moderation-latest" @@ -8597,10 +8603,10 @@ components: purpose: description: | The intended purpose of the uploaded file. - + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune" ] + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - file - purpose @@ -8636,11 +8642,11 @@ components: training_file: description: | The ID of an uploaded file that contains training data. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -8684,7 +8690,7 @@ components: suffix: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. type: string minLength: 1 @@ -8694,14 +8700,14 @@ components: validation_file: description: | The ID of an uploaded file that contains validation data. - + If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. - + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string nullable: true @@ -8980,7 +8986,7 @@ components: required: - text x-oaiMeta: - name: The transcription object + name: The transcription object (JSON) group: audio example: *basic_transcription_response_example @@ -9078,7 +9084,7 @@ components: $ref: "#/components/schemas/TranscriptionSegment" required: [ language, duration, text ] x-oaiMeta: - name: The transcription object + name: The transcription object (Verbose JSON) group: audio example: *verbose_transcription_response_example @@ -9240,7 +9246,7 @@ components: "batch_output", "fine-tune", "fine-tune-results", - "vision" + "vision", ] status: type: string @@ -9647,9 +9653,9 @@ components: AssistantsApiResponseFormatOption: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string @@ -9770,7 +9776,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -9923,7 +9929,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -10014,7 +10020,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -10222,7 +10228,8 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + enum: + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10459,7 +10466,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. stream: type: boolean @@ -12438,21 +12445,21 @@ components: AssistantStreamEvent: description: | Represents an event emitted when streaming a Run. - + Each event in a server-sent events stream has an `event` and `data` property: - + ``` event: thread.created data: {"id": "thread_123", "object": "thread", ...} ``` - + We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit `thread.run.created` when a new run is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a `thread.message.created event`, a `thread.message.in_progress` event, many `thread.message.delta` events, and finally a `thread.message.completed` event. - + We may add additional events over time, so we recommend handling unknown events gracefully in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to integrate the Assistants API with streaming. @@ -12550,6 +12557,19 @@ components: description: Occurs when a [run](/docs/api-reference/runs/object) is completed. x-oaiMeta: dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - type: object properties: event: @@ -13037,7 +13057,7 @@ x-oaiMeta: title: Audio description: | Learn how to turn audio into text or text into audio. - + Related guide: [Speech to text](/docs/guides/speech-to-text) navigationGroup: endpoints sections: @@ -13060,7 +13080,7 @@ x-oaiMeta: title: Chat description: | Given a list of messages comprising a conversation, the model will return a response. - + Related guide: [Chat Completions](/docs/guides/text-generation) navigationGroup: endpoints sections: @@ -13077,7 +13097,7 @@ x-oaiMeta: title: Embeddings description: | Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - + Related guide: [Embeddings](/docs/guides/embeddings) navigationGroup: endpoints sections: @@ -13091,7 +13111,7 @@ x-oaiMeta: title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - + Related guide: [Fine-tune models](/docs/guides/fine-tuning) navigationGroup: endpoints sections: @@ -13126,7 +13146,7 @@ x-oaiMeta: title: Batch description: | Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount. - + Related guide: [Batch](/docs/guides/batch) navigationGroup: endpoints sections: @@ -13179,7 +13199,7 @@ x-oaiMeta: title: Images description: | Given a prompt and/or an input image, the model will generate a new image. - + Related guide: [Image generation](/docs/guides/images) navigationGroup: endpoints sections: @@ -13217,7 +13237,7 @@ x-oaiMeta: title: Moderations description: | Given some input text, outputs if the model classifies it as potentially harmful across several categories. - + Related guide: [Moderations](/docs/guides/moderation) navigationGroup: endpoints sections: @@ -13232,7 +13252,7 @@ x-oaiMeta: beta: true description: | Build assistants that can call models and use tools to perform tasks. - + [Get started with the Assistants API](/docs/assistants) navigationGroup: assistants sections: @@ -13259,7 +13279,7 @@ x-oaiMeta: beta: true description: | Create threads that assistants can interact with. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13283,7 +13303,7 @@ x-oaiMeta: beta: true description: | Create messages within threads - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13310,7 +13330,7 @@ x-oaiMeta: beta: true description: | Represents an execution run on a thread. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13343,7 +13363,7 @@ x-oaiMeta: beta: true description: | Represents the steps (model and tool calls) taken during the run. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13361,7 +13381,7 @@ x-oaiMeta: beta: true description: | Vector stores are used to store files for use by the `file_search` tool. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13388,7 +13408,7 @@ x-oaiMeta: beta: true description: | Vector store files represent files inside a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13412,7 +13432,7 @@ x-oaiMeta: beta: true description: | Vector store file batches represent operations to add multiple files to a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13436,11 +13456,11 @@ x-oaiMeta: beta: true description: | Stream the result of executing a Run or resuming a Run after submitting tool outputs. - + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. - + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the [Assistants API quickstart](/docs/assistants/overview) to learn more. navigationGroup: assistants From c9ac183d48e2f23d8064e1418220db08838d7d9b Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 21:59:16 +0200 Subject: [PATCH 149/251] feat: Support buffered stream responses in ollama_dart (#445) --- packages/ollama_dart/lib/src/client.dart | 53 +++++++++++++----------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/packages/ollama_dart/lib/src/client.dart b/packages/ollama_dart/lib/src/client.dart index 2bb5a7be..c5dded40 100644 --- a/packages/ollama_dart/lib/src/client.dart +++ b/packages/ollama_dart/lib/src/client.dart @@ -1,4 +1,5 @@ // ignore_for_file: use_super_parameters +import 'dart:async'; import 'dart:convert'; import 'package:http/http.dart' as http; @@ -56,11 +57,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => GenerateCompletionResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => GenerateCompletionResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -85,11 +84,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => GenerateChatCompletionResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => GenerateChatCompletionResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -114,11 +111,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => CreateModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => CreateModelResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -143,11 +138,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => PullModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => PullModelResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -172,11 +165,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => PushModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => PushModelResponse.fromJson(json.decode(d))); } @override @@ -184,3 +175,15 @@ class OllamaClient extends g.OllamaClient { return onRequestHandler(request); } } + +class _OllamaStreamTransformer + extends StreamTransformerBase, String> { + const _OllamaStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()); + } +} From 592060c9a201e276fa10580a27b102a2af24fcdc Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 31 May 2024 08:45:57 +0200 Subject: [PATCH 150/251] docs: Add WikivoyageEU example, a fully local RAG with Llama3 and ObjectBox (#446) --- examples/wikivoyage_eu/.gitignore | 3 + examples/wikivoyage_eu/README.md | 89 +++++ examples/wikivoyage_eu/analysis_options.yaml | 1 + examples/wikivoyage_eu/bin/injestion.dart | 21 ++ examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 82 ++++ .../bin/wikivoyage_eu_dataset.csv | 161 ++++++++ examples/wikivoyage_eu/pubspec.lock | 350 ++++++++++++++++++ examples/wikivoyage_eu/pubspec.yaml | 12 + examples/wikivoyage_eu/pubspec_overrides.yaml | 16 + examples/wikivoyage_eu/rag.png | Bin 0 -> 18434 bytes examples/wikivoyage_eu/wikivoyage_eu.gif | Bin 0 -> 171257 bytes 11 files changed, 735 insertions(+) create mode 100644 examples/wikivoyage_eu/.gitignore create mode 100644 examples/wikivoyage_eu/README.md create mode 100644 examples/wikivoyage_eu/analysis_options.yaml create mode 100644 examples/wikivoyage_eu/bin/injestion.dart create mode 100644 examples/wikivoyage_eu/bin/wikivoyage_eu.dart create mode 100644 examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv create mode 100644 examples/wikivoyage_eu/pubspec.lock create mode 100644 examples/wikivoyage_eu/pubspec.yaml create mode 100644 examples/wikivoyage_eu/pubspec_overrides.yaml create mode 100644 examples/wikivoyage_eu/rag.png create mode 100644 examples/wikivoyage_eu/wikivoyage_eu.gif diff --git a/examples/wikivoyage_eu/.gitignore b/examples/wikivoyage_eu/.gitignore new file mode 100644 index 00000000..3a857904 --- /dev/null +++ b/examples/wikivoyage_eu/.gitignore @@ -0,0 +1,3 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md new file mode 100644 index 00000000..ca686dcb --- /dev/null +++ b/examples/wikivoyage_eu/README.md @@ -0,0 +1,89 @@ +# Wikivoyage EU + +This example demonstrates how to build a fully local Retrieval Augmented Generation (RAG) pipeline with Llama 3 and ObjectBox using LangChain.dart and Ollama. + +> This example is adapted from [Ashmi Banerjee](https://ashmibanerjee.com)'s workshop "[Building a RAG using Google Gemma and MongoDB](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk)". + +![RAG Pipeline](rag.png) +*Figure 1: RAG Architecture (source: [Ashmi Banerjee](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk))* + +## Setup + +### 1. Install Ollama + +- Go to the [Ollama](https://ollama.ai/) website and download the latest version of the Ollama app. + +### 2. Download models + +- For this example we will be using the following models: + * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) + * LLM: [`llama3:8b`](https://ollama.com/library/llama3) +- Open your terminal and run: +```bash +ollama pull jina/jina-embeddings-v2-small-en +ollama run llama3:8b +``` + +### 3. Setup ObjectBox + +- We will be using [ObjectBox](https://objectbox.io) for our vector store. +- In order to use ObjectBox, we need to download the ObjectBox C library. You can find more information on how to do this [here](https://docs.objectbox.io/getting-started). +```bash +bash <(curl -s https://raw.githubusercontent.com/objectbox/objectbox-dart/main/install.sh) +``` + +### 4. Get dependencies + +```bash +dart pub get +``` + +## How it works + +The example has two scripts: +1. `injestion.dart`: This script reads the Wikivoyage dataset, creates embeddings from the data and stores it in the ObjectBox database. +2. `wikivoyage_eu.dart`: This script implements the chatbot implementing the RAG pipeline. + +### Ingestion + +We will be using data from [Wikivoyage](https://wikivoyage.org), a freely accessible online travel guide authored by volunteers. + +The `wikivoyage_eu_dataset.csv` file contains data from 160 European cities, including the city name, country, coordinates, population and a brief description: + +| city | country | lat | lng | population | abstract | +|-----------|-------------|---------|--------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Amsterdam | Netherlands | 52.3728 | 4.8936 | 1459402.0 | Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges. | + +The script does the following: +1. It uses LangChain.dart's `CsvLoader` to load the `wikivoyage_eu_dataset.csv` dataset. +2. It uses the `jina/jina-embeddings-v2-small-en` model to create embeddings for each city's data. The generated embeddings have 1024 dimensions. + + *As the data for each city is not very large, we won't be chunking it into smaller parts, but you could easily do that using the `RecursiveCharacterTextSplitter` class.* +3. It stores the embeddings in the ObjectBox vector database. + +You can run the script using: +```bash +$ dart run bin/injestion.dart +Added 160 documents to the vector store. +``` + +### Chatbot + +The chatbot script implements the RAG pipeline. It does the following: +1. Takes a user query as input. +2. Uses the `mxbai-embed-large:335m` model to create an embedding for the query. +3. Retrieves the 5 most similar documents from the ObjectBox database. +4. Builds a prompt using the retrieved documents and the query. +5. Uses the `llama3:8b` model to generate a response to the prompt. + +You can run the script using: +```bash +$ dart run bin/wikivoyage_eu.dart +``` + +![Wikivoyage EU](wikivoyage_eu.gif) + +## Conclusion + +This example demonstrates how to build a simple RAG pipeline that can run locally on your machine. You can easily extend this example to build more complex RAG pipelines with more advance retrieval and generation techniques. Check out the [LangChain.dart](https://langchaindart.dev/) documentation for more information. + +For simplicity, this example is a CLI application. However, you can easily adapt this code to work in a Flutter app. To get started with ObjectBox in Flutter, refer to the [ObjectBox documentation](https://docs.objectbox.io/getting-started). diff --git a/examples/wikivoyage_eu/analysis_options.yaml b/examples/wikivoyage_eu/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/examples/wikivoyage_eu/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/examples/wikivoyage_eu/bin/injestion.dart b/examples/wikivoyage_eu/bin/injestion.dart new file mode 100644 index 00000000..6aa7eaa3 --- /dev/null +++ b/examples/wikivoyage_eu/bin/injestion.dart @@ -0,0 +1,21 @@ +// ignore_for_file: avoid_print +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + final loader = CsvLoader('bin/wikivoyage_eu_dataset.csv'); + final docs = await loader.load(); + + final embeddings = OllamaEmbeddings( + model: 'jina/jina-embeddings-v2-small-en', + ); + final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 512, + ); + + final ids = await vectorStore.addDocuments(documents: docs); + print('Added ${ids.length} documents to the vector store.'); + + embeddings.close(); +} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart new file mode 100644 index 00000000..b1f82689 --- /dev/null +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -0,0 +1,82 @@ +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings( + model: 'jina/jina-embeddings-v2-small-en', + ), + dimensions: 512, + ); + + final retriever = vectorStore.asRetriever( + defaultOptions: VectorStoreRetrieverOptions( + searchType: ObjectBoxSimilaritySearch(k: 5), + ), + ); + final setupAndRetrieval = Runnable.fromMap({ + 'context': retriever.pipe( + Runnable.mapInput( + (docs) => docs.map((d) => d.pageContent).join('\n---\n'), + ), + ), + 'question': Runnable.passthrough(), + }); + + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + ''' +Here is some data from Wikivoyage about travel destinations in Europe: + + +{context} + + +Please read the Wikivoyage data carefully and consider how you can best answer the user's question using only the information provided. + +Use ANSI escape codes instead of Markdown to format your answer. +For example, `\x1B[1m\x1B[0m` will make "" bold. + +If the user's question is not about Europe, just respond with: +"I can only help you with vacation planning in Europe." +Do not provide any other suggestion if the question is not about Europe. +''' + ), + (ChatMessageType.human, '{question}'), + ]); + + final model = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3', + ), + ); + const outputParser = StringOutputParser(); + final chain = setupAndRetrieval // + .pipe(promptTemplate) + .pipe(model) + .pipe(outputParser); + + stdout.writeln( + 'Hello! Ask me anything about your vacation plans in Europe, ' + 'and I will provide you with the best itinerary.', + ); + + while (true) { + stdout.write('> '); + final query = stdin.readLineSync() ?? ''; + + if (query.toLowerCase() == 'q') { + break; + } + + final stream = chain.stream(query); + await stream.forEach(stdout.write); + stdout.write('\n\n'); + } + + chain.close(); +} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv new file mode 100644 index 00000000..0e775870 --- /dev/null +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv @@ -0,0 +1,161 @@ +city,country,lat,lng,population,abstract +Aalborg,Denmark,57.05,9.9167,143598.0,"Aalborg is the largest city in North Jutland, Denmark. Its population, as of 2016, is 134,672, making it the fourth largest city in Denmark." +Adana,Turkey,37.0,35.3213,1765981.0,"Adana is a city on the Cilician Plains of central Turkey, on the Seyhan River about 50 km from the Mediterranean coast. It's industrial and mostly modern but with several places of interest in its historic centre." +Amsterdam,Netherlands,52.3728,4.8936,1459402.0,"Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges." +Ancona,Italy,43.6169,13.5167,100924.0,Ancona is the capital of the Italian region called the Marches and an important port city on the coast of the Adriatic Sea. +Ankara,Turkey,39.93,32.85,5503985.0,"Ankara is the capital of Turkey, central within the country on the plateau of Central Anatolia. It's a sprawling modern place around an ancient citadel, and in 2022 had a population of almost 5." +Antalya,Turkey,36.8874,30.7075,2426356.0,"Antalya is a city in Pamphylia on the Turkish Mediterranean coast, and the chief resort of the ""Turkish Riviera"". It's a metropolis with a population of 2." +Arad,Romania,46.175,21.3125,159074.0,There is more than one place in the world called Arad. You might be looking for: +Arkhangelsk,Russia,64.55,40.5333,351488.0,"Arkhangelsk (population 350,000 in 2018) is a regional center in Northwestern Russia, located on both banks of Northern Dvina river near its mouth on the White Sea, about 1250 km by road to the north of Moscow and about 1100 km northeast of Saint Petersburg. It is part of the Silver Ring of cultural and historical centers of Northwestern Russia." +Astrakhan,Russia,46.35,48.035,532504.0,Astrakhan (Russian: А́страхань AH-struh-khun) is a city in Russia. +Baia Mare,Romania,47.6667,23.5833,123738.0,Baia Mare is a city in north-western Romania. +Baku,Azerbaijan,40.3667,49.8352,2300500.0,Baku (Azeri: Bakı) is the capital of Azerbaijan and is the largest city in the Caucasus. Baku's Old Town has UNESCO World Heritage status. +Barcelona,Spain,41.3825,2.1769,4800000.0,"Barcelona is Spain's second largest city, with a population of nearly two million people, and the capital of Catalonia. A major port on the northeastern Mediterranean coast of Spain, Barcelona has a wide variety of attractions that bring in tourists from around the globe." +Bari,Italy,41.1253,16.8667,323370.0,"Bari (Bari dialect: Bare) is the capital of the Apulia region of Italy, on the Adriatic Sea. With a population of 317,000 (in 2019), it's the second largest city in Southern Italy after Naples." +Batman,Turkey,37.887,41.132,447106.0,"Batman (pronounced as baat-maan, not like the name of the superhero; Kurdish: Iluh) is a city in southeastern Anatolia. It is the capital of an important oil producing province." +Belgrade,Serbia,44.82,20.46,1378682.0,"Belgrade (Serbian: Београд, Beograd) is the capital of the Republic of Serbia and the country's largest city. Belgrade has been re-emerging as a tourist destination in the past years." +Bergen,Norway,60.3894,5.33,267117.0,"Bergen is Norway's second largest city and the most popular gateway to the fjords of West Norway. The city is renowned for its great location amidst mountains, fjords, and the ocean." +Berlin,Germany,52.52,13.405,4473101.0,"Berlin is Germany's capital and biggest city. Within the city limits, Berlin in 2022 had a population of 3." +Bologna,Italy,44.4939,11.3428,392564.0,"Bologna (Emilian: Bulåggna) is a beautiful and historic city in the Emilia-Romagna region of Northeast Italy. It has the oldest university in the Western world, a lively student population, excellent food, a striking brick terracotta-roofed cityscape, and lots to see and do." +Bordeaux,France,44.84,-0.58,260958.0,"Bordeaux is a city in the Gironde region of southwest France, standing on the River Garonne. It's the country's fifth largest city, with a population of 259,809 in 2020, and another million living in its associated towns." +Braga,Portugal,41.5503,-8.42,181494.0,"Braga is one of the five largest cities of Portugal, situated in the Minho region in the North of the country. It is known for its abundance of churches and thus called the ""city of archbishops""." +Bratislava,Slovakia,48.1439,17.1097,475503.0,"Bratislava (Hungarian: Pozsony, German: Pressburg, known as Prešporok before 1919), is the capital and largest city of Slovakia. It has a population of more than 475,000 (2021), and is the administrative, cultural and economic centre of the country." +Bremen,Germany,53.0833,8.8,566573.0,"The Free Hanseatic City of Bremen is a city in northern Germany with a major port on the River Weser. The population is 567,000 (2020)." +Brest,Belarus,52.1347,23.6569,340723.0,There is more than one place called Brest: +Brno,Czechia,49.1925,16.6083,382405.0,"Brno (pronounced Bruhno) (German: Brünn, Štatl in the local dialect) is the major city of Moravia (a historical region in the Czech Republic). It is the largest city in Moravia and the second-largest city in the Czech Republic by population and area." +Brussels,Belgium,50.8467,4.3525,1743000.0,"Brussels (French: Bruxelles, Dutch: Brussel) is the capital of Belgium and one of the three administrative regions within the country, together with Flanders and Wallonia. Apart from its role within its country, it is also an internationally important city, hosting numerous international institutions, and in particular the headquarters of NATO and the core institutions of the European Union." +Budapest,Hungary,47.4925,19.0514,2997958.0,"Budapest is the capital city of Hungary. With a unique, youthful atmosphere, world-class classical music scene, a pulsating nightlife increasingly appreciated among European youth, and last but not least, an exceptional offer of natural thermal baths, Budapest is one of Europe's most delightful and enjoyable cities." +Burgas,Bulgaria,42.503,27.4702,210813.0,Burgas (also Bourgas) is a city on the Black Sea coast of Bulgaria. It is a large industrial centre with many tourist attractions in the region. +Bursa,Turkey,40.1833,29.05,2901396.0,"Bursa is a large city in the Southern Marmara region of Turkey, 20 km inland from the Marmara coast. It's the country's fourth-largest city, with a population of 2,161,990 in 2021, and with another million living in the wider metro area." +Bydgoszcz,Poland,53.1219,18.0003,346739.0,"Bydgoszcz (German: Bromberg) is a major city of 360,000 in Poland and with suburban area the agglomeration has nearly 500,000. It has well preserved 19th-century architecture and was known as Little Berlin before the world wars." +Cagliari,Italy,39.2278,9.1111,154106.0,"Cagliari (Sardinian: Casteddu, ""castle""; Latin: Caralis) is the capital city of the Italian island of Sardinia." +Cheboksary,Russia,56.1333,47.25,489498.0,"Cheboksary (Russian: Чебокса́ры chee-bahk-SAH-ree) is the capital of Chuvashia in the Volga Region of the Russian Federation. About 600,000 people live here and in the nearby satellite city Novocheboksarsk." +Chelyabinsk,Russia,55.15,61.4,1202371.0,"Chelyabinsk (Russian: Челя́бинск cheel-YAH-beensk) is a big city, with more than a million inhabitants, the capital of Chelyabinsk Oblast in the European part of Russia." +Cluj-Napoca,Romania,46.7667,23.5833,324576.0,"Cluj-Napoca (Romanian), Kolozsvár (Hungarian) or Klausenburg (German) is the capital of Cluj county and the unofficial capital of the historical region of Transylvania. The city, with about 320,000 people (2016), is very pleasant, and it is a great experience for those who want to see urban Transylvanian life at its best." +Coimbra,Portugal,40.2111,-8.4292,143396.0,"Coimbra is the traditional capital city of Central Portugal's historic Beira Litoral region. With over 140,000 inhabitants (2021), it is the largest municipality there and one of Portugal's four largest metropolises." +Copenhagen,Denmark,55.6761,12.5683,1366301.0,"Copenhagen (Danish: København) is the capital city of Denmark and forms the moderate conurbation that one million Danes call home. It is big enough to form a small Danish metropolis, with shopping, culture and nightlife par excellence, yet small enough still to feel intimate and be safe." +Cork,Ireland,51.8972,-8.47,222333.0,"Cork is the principal city of County Cork in southwest Ireland. It was already the second-largest city in Ireland when in 2019 its boundaries were extended, to have a population of 210,000." +Craiova,Romania,44.3333,23.8167,269506.0,"Craiova with 306,000 inhabitants (2016), is one of the five largest cities of Romania. Craiova is in the southwestern region of the country and hosts the administrative buildings of the Dolj County and of the Oltenia district." +Debrecen,Hungary,47.53,21.6392,328642.0,[a Nagytemplom télen.jpg|thumb|400px|The Great Church of Debrecen in winter] +Denizli,Turkey,37.7667,29.0833,1027782.0,"Denizli is a city in the Southern Aegean region of Turkey, which most visitors simply transit to reach Pamukkale 20 km north. It's a typical modern Turkish city, far from picturesque, but does have enough sights of its own if your schedule allows." +Dijon,France,47.3167,5.0167,158002.0,"Dijon is the largest city in the eastern French region of Bourgogne-Franche-Comté. Dijon is best known for its mustard (named after the town), which is no longer produced in its metropolitan area, but it is still one of the most beautiful cities in France, and its historic buildings and byways were not heavily damaged by bombing in World War II and are largely intact." +Donetsk,Ukraine,48.0028,37.8053,929063.0,"Donetsk (Ukrainian: Донецьк, Russian: Доне́цк) is a city in the Donetsk People's Republic, on the banks of the River Kalmius." +Dresden,Germany,51.05,13.74,561922.0,"Dresden is the capital of Saxony (Sachsen). It's often referred to locally as Elbflorenz, or ""Florence on the Elbe"", reflecting its location on the Elbe river and its historical role as a centre for the arts and beautiful architecture - much like Florence in Italy." +Dublin,Ireland,53.35,-6.2603,1173179.0,"Dublin (Irish: Baile Átha Cliath, ""Town of the Hurdled Ford"") is the capital city of Ireland. Its vibrancy, nightlife and tourist attractions are world renowned and it's the most popular entry point for international visitors to Ireland." +Erfurt,Germany,50.9833,11.0333,213835.0,Erfurt is the capital of the German state of Thuringia (Thüringen). The city is the largest one in that province and likewise a major transportation hub. +Erzincan,Turkey,39.7464,39.4914,157452.0,"Erzincan is a city in Eastern Anatolia. It's modern, on a grid pattern, as its predecessor was destroyed by an earthquake in 1939." +Erzurum,Turkey,39.9086,41.2769,767848.0,"Erzurum is a city in Eastern Anatolia, and is the hub for visiting eastern Turkey." +Gaziantep,Turkey,37.0628,37.3792,2028563.0,"Gaziantep is a city in Southeastern Anatolia. Although it is a major city in Turkey (counting almost 2 million inhabitants) and known as the Turkish capital of gastronomy, it counts very few international tourists." +Geneva,Switzerland,46.2017,6.1469,201818.0,thumb|200px|right|The old town of Geneva in the winter +Hamburg,Germany,53.55,10.0,2484800.0,"The Free and Hanseatic City of Hamburg (Freie und Hansestadt Hamburg) is Germany's second-largest city and, at the same time, one of Germany's 16 federal states or Bundesländer. Prior to the formation of the modern German state, Hamburg for centuries enjoyed a status as de facto independent city state and regional power and trade hub in the North Sea." +Helsinki,Finland,60.1708,24.9375,1268296.0,Helsinki (Helsingfors in Swedish) is Finland's capital and largest city. Helsinki combines modern and historic architectural styles with beautiful open spaces. +Innsbruck,Austria,47.2683,11.3933,132493.0,"Innsbruck is the fifth-largest city in Austria and the provincial capital of Tyrol, as well as one of the largest cities in the Alps. It is in a valley of the river Inn between mountain ranges of above 2000 m above sea level, halfway between Bavaria and northern Italy, and is a hub of a region popular for skiing and other mountain-related activities and a busy tourist destination." +Ioannina,Greece,39.6636,20.8522,113094.0,"Ioannina (Ιωάννινα) (population: 112,486 (2011)) is a beautiful city in Northern Greece whose old town is surrounded by tall defensive walls." +Isparta,Turkey,37.7647,30.5567,258375.0,"Isparta (Greek: Σπάρτη, Baris) is a city of 220,000 inhabitants in the Lakes District of Mediterranean Turkey." +Istanbul,Turkey,41.0136,28.955,16079000.0,"Istanbul (Turkish: İstanbul) is a very large city of fantastic history, culture and beauty. Called Byzantium in ancient times, the city's name was changed to Constantinople in 324 CE when it was rebuilt by the first Christian Roman Emperor, Constantine." +Ivano-Frankivsk,Ukraine,48.9228,24.7106,238196.0,"Ivano-Frankivsk (Ukrainian: Івано-Франківськ, also transliterated Ivano-Frankovsk from Russian: Ивано-Франковск) (formerly in Polish: Stanisławów, German: Stanislau) is a city in the Ukrainian part of East Galicia." +Izmir,Turkey,38.42,27.14,4320519.0,"thumb|270px|Clock tower in Konak Square, iconic symbol of the city" +Kahramanmaras,Turkey,37.5833,36.9333,443575.0,"Kahramanmaraş, which used to be known as Maraş, is a city in Turkey, located on the crossroad of southern, eastern and southeastern Turkey." +Kaliningrad,Russia,54.7003,20.4531,475056.0,"Kaliningrad (Russian: Калинингра́д kuh-leen-een-GRAHD) , also known by its original German name, Königsberg, is the capital city of Kaliningrad Oblast in Russia. It has about 475,000 inhabitants (2018)." +Kars,Turkey,40.6078,43.0958,115891.0,"Kars is a city in Eastern Anatolia. It is most frequently visited as a jumping off point for travelers going to Ani, but it is a viable destination in its own right for its 19th-century Russian imperial buildings, and, of course, its role as the setting for Orhan Pamuk's famous novel Snow." +Kaunas,Lithuania,54.8972,23.8861,381007.0,"Kaunas is the second-largest city in Lithuania, with a population of some 288,000 people. The main reason to visit is its charming Old Town, connected to the 19th century New Town ranged along Laisvės alėja." +Kayseri,Turkey,38.7225,35.4875,1389680.0,"Kayseri is a city in Central Anatolia, 350 km southeast of Ankara. In 2021 the population was 1." +Kazan,Russia,55.7964,49.1089,1243500.0,Kazan (Russian: Каза́нь kuh-ZAHN) is the capital of Russia's republic of Tatarstan and the center of the world Tatar culture. +Kharkiv,Ukraine,49.9925,36.2311,1446107.0,"Kharkiv (Ukrainian: Харків, also transliterated Kharkov from Russian: Харьков) is a major city in the Kharkiv region of Ukraine and is the second largest city in Ukraine with a population of over 1.5 million inhabitants." +Kiel,Germany,54.3233,10.1394,246601.0,"Kiel is the capital city of the German state of Schleswig-Holstein and has a population of roughly 248,000 (2018). It is located on the Baltic Sea at the end of the ""Kieler Förde""." +Kirov,Russia,58.6,49.65,501468.0,"Kirov (Russian: Ки́ров KEE-ruhf) is the capital city of Kirov Oblast, Russia." +Klagenfurt,Austria,46.6167,14.3,101403.0,Klagenfurt (Slovenian: Celovec) is the capital of Carinthia in Austria. It was one of the eight host cities in the 2008 European Football Championships. +Konya,Turkey,37.8667,32.4833,2232374.0,"Konya is a city in Central Anatolia in Turkey, known as the city of ""whirling dervishes"" and for its outstanding Seljuk architecture. In 2021 Konya metropolis had a population of 2,277,017, the sixth largest in Turkey, but the area of most interest is compact." +Krasnodar,Russia,45.0333,38.9667,948827.0,"Krasnodar is the capital of Krasnodar Krai in southern Russia, with a popolulation in 2018 of just under 900,000. Its main industries are based on agriculture and food." +Kutaisi,Georgia,42.25,42.7,147900.0,"Kutaisi is a city in the Rioni Region of Georgia. The city itself is very cinematographic and charming, and a visit to Kutaisi is almost mandatory to see the Bagrati Cathedral and Gelati Monastery, which are UNESCO World Heritage sites and offer views from the mountain slopes over the city and the Rioni River." +Lille,France,50.6278,3.0583,234475.0,"Lille (Dutch: Rijsel) is the capital of the Hauts-de-France region in northern France and the core of one of the largest metropolitan agglomerations in the country. Historically, it has also been the capital of Flanders, and later an industrial powerhouse, thanks to which it now boasts a large and handsome historic centre." +Ljubljana,Slovenia,46.0514,14.5061,286745.0,"Ljubljana (""lee-oo-blee-AH-nuh"") is the small but delightful capital of Slovenia. While the city's population had grown to 295,500 in 2020, the sights and amenities are concentrated in the charming old centre." +London,United Kingdom,51.5072,-0.1275,11262000.0,"Noisy, vibrant and truly multicultural, London is a megalopolis of people, ideas and frenetic energy. The capital and largest city of the United Kingdom sits on the River Thames in South-East England, Greater London has a population of a little over 9 million." +Luxembourg,Luxembourg,49.6117,6.1319,132780.0,"The Grand Duchy of Luxembourg (Luxembourgish: Groussherzogtum Lëtzebuerg, French: Grand-Duché de Luxembourg, German: Großherzogtum Luxemburg), is a landlocked Benelux country at the crossroads of Germanic and Latin cultures." +Lviv,Ukraine,49.8425,24.0322,724314.0,"Lviv (also spelled L'viv; Ukrainian: Львів; Polish: Lwów, German: Lemberg, Russian: Львов), formerly known as Lvov after its Russian name, is in Western Ukraine and used to be the capital of East Galicia. It's the biggest city of the region and a major Ukrainian cultural centre on the UNESCO World Heritage List." +Lyon,France,45.76,4.84,522969.0,"Lyon is the capital of the French administrative region of Auvergne-Rhône-Alpes. A city of half a million, Lyon alone is the country's third-largest city, but its metropolitan area is only second in population to Paris." +Maastricht,Netherlands,50.85,5.6833,277721.0,"By many considered to be the most beautiful city of the country, Maastricht is the southernmost city in the Netherlands. It's the capital of the province of Limburg and famous for what the Dutch call the ""Burgundian"" way of life." +Madrid,Spain,40.4169,-3.7033,6211000.0,"Madrid is Spain's capital and largest city. A city that has been marked by Spain's varied and tumultuous history, Madrid has some of Europe's most impressive cultural and architectural heritage, which includes grand avenues, plazas, buildings and monuments, world-class art galleries and museums, highly popular football teams, and cultural events of international fame for everyone." +Magdeburg,Germany,52.1317,11.6392,236188.0,"Magdeburg is the capital city of the Bundesland of Saxony-Anhalt, Germany, with a population of 240,000 (2018). Magdeburg has become a modern city with numerous interesting sights of high importance and uniqueness, as well as many parks, which make Magdeburg the third greenest city in Germany." +Malatya,Turkey,38.3486,38.3194,426381.0,thumb|350px|New Mosque at the central square +Milan,Italy,45.4669,9.19,1366180.0,"Milan (Italian: Milano; Milanese: Milan) is financially the most important city in Italy, and home to the Borsa Italiana stock exchange. It is the second most populous city proper in the country, but sits at the centre of Italy's largest urban and metropolitan area." +Minsk,Belarus,53.9,27.5667,2009786.0,"Minsk (Belarusian: Мінск, Russian: Минск) is the capital and largest city of the Republic of Belarus. Its population is about two million people in 2021." +Miskolc,Hungary,48.0833,20.6667,150695.0,"Miskolc, with population of about 157,000 (2017), is the third largest city in Hungary, located in the north-east of the country, east of Bükk mountains." +Moscow,Russia,55.7558,37.6178,17332000.0,"Since its founding in 1147, Moscow (Russian: Москва, Moskva) has been at the crossroads of history as the capital of empires and a frequent target for invaders. As the capital of the Russian Empire, the Soviet Union, and, today, the Russian Federation, it has played a central role in the development of the largest country in the world." +Munich,Germany,48.1375,11.575,2606021.0,"Munich (German: München, Bavarian: Minga) is the capital of the federal state of Bavaria in the south of Germany. Within the city limits, Munich in 2021 had a population of just under 1." +Murcia,Spain,37.9861,-1.1303,672773.0,You could be looking for: +Murmansk,Russia,68.9706,33.075,298096.0,"Murmansk (Russian: Му́рманск) is a city in the extreme northwest of Russia and the world's largest city north of the Arctic Circle. It lies in the Kola Bay on the Kola Peninsula, by the Barents Sea." +Mykolaiv,Ukraine,46.975,31.995,498748.0,"Mykolaiv (Ukrainian: Миколаїв, also transliterated Nikolaev or Nikolayev from Russian: Николаев) is a city in Southern Ukraine. It is an important shipbuilding centre and transportation hub for Ukraine, and has a large military presence." +Nalchik,Russia,43.4833,43.6167,265162.0,"Nalchik is the capital city of Kabardino-Balkaria, a republic located in the very south of the Russian Federation." +Nantes,France,47.2181,-1.5528,318808.0,"Nantes (Breton: Naoned) is the capital of Pays de la Loire region in northwest France. Historically it was part of Brittany, whose dukes built up its castle and made the town their capital." +Naples,Italy,40.8333,14.25,966144.0,"Naples (Italian: Napoli; Neapolitan: Napule) in Italy, an ancient port on the Mediterranean sea. With just short of a million citizens, is the third most populous municipality." +Nevsehir,Turkey,38.6264,34.7139,153117.0,"Nevşehir is one of the major cities in Cappadoccia Region, which displays a beautiful combination of nature and history. The traditional main sources of income of the city, carpet weaving and viticulture, have been overtaken by tourism, because of its proximity to the underground shelters, the fairy chimneys, monasteries, caravanserais and the famous rock-hewn churches of Göreme." +Nicosia,Cyprus,35.1725,33.365,330000.0,Nicosia (Greek: Λευκωσία; Turkish: Lefkoşa) is the capital of Cyprus and is the largest city by far. +Novi Sad,Serbia,45.2542,19.8425,380000.0,thumb|right|350px|Freedom square (Trg Slobode) +Oradea,Romania,47.0722,21.9211,196367.0,"Oradea is one the few undiscovered gems of Romania's tourism. Despite being one of the largest and most important cities in Transylvania, and having a high degree of administrative, economic and commercial importance, it is often overlooked by tourists in favor of other Transylvanian cities such as Brasov, Sibiu, Sighisoara or Cluj-Napoca." +Orenburg,Russia,51.7667,55.1,564773.0,"Orenburg (Russian: Оренб'ург, Uh-rehn-BOORK) is the capital of Orenburg Oblast. Every citizen will point you the sign at the bridge across the Ural river, supposedly landmarking the geographical border between Europe and Asia (the actual boundary is further east)." +Pamplona,Spain,42.8167,-1.65,203418.0,"Pamplona (Basque: Iruña) is a city in Navarra, Spain. It is most famous world-wide for its San Fermín festival, held each year from July 6-14." +Paris,France,48.8567,2.3522,11060000.0,thumb|300px|The Eiffel Tower and the river Seine +Penza,Russia,53.2,45.0,523726.0,There's more than one place called Penza: +Perm,Russia,58.0139,56.2489,1048005.0,"Perm (Russian: Пермь p`yehr`m`) is a city in Perm Krai, Russia." +Perugia,Italy,43.1122,12.3889,165683.0,"Perugia is a city in the Italian region of Umbria. It has an important university that attracts many foreign students, is a major center of medieval art, has a stunningly beautiful central area and is home of the Umbria Jazz Festival." +Petrozavodsk,Russia,61.7833,34.35,278551.0,thumb|350 px|Old and New Petrozavodsk +Plovdiv,Bulgaria,42.15,24.75,383540.0,thumb|Old Plovdiv +Podgorica,Montenegro,42.4413,19.2629,150977.0,"Podgorica (Montenegrin: Подгорица) is the capital of Montenegro. While not a typical European eye candy, the city is definitely worth visiting, owing to its interesting mix of old and new, its café culture and nightlife, and its laid back Mediterranean atmosphere." +Porto,Portugal,41.1621,-8.622,1278210.0,"Porto is Portugal's second largest city and the capital of the Northern region, and a busy industrial and commercial centre. The city isn't very populous (about 238,000 inhabitants in 2024), but the Porto metropolitan area has some 1." +Prague,Czechia,50.0875,14.4214,1335084.0,"Prague (Czech: Praha) is the capital and largest city of the Czech Republic. The city's historic buildings and narrow, winding streets are testament to its centuries-old role as capital of the historic region of Bohemia." +Pristina,Kosovo,42.6633,21.1622,161751.0,"Pristina (Albanian: Prishtinë, Serbian: Priština), the capital city of Kosovo, is not beautiful: it is messy, with centuries-old Ottoman heritage competing with communist designs and post-communist architectural monstrosities. However, there is a powerful draw to this city of 162,000 people (2011), offering much to passing visitors." +Pskov,Russia,57.8167,28.3333,209840.0,"Pskov is the largest city and administrative capital of Pskov Oblast. One of the oldest cities in the country, it has preserved many unique architectural monuments of the 12th-16th centuries." +Rennes,France,48.1147,-1.6794,220488.0,"Rennes is the chief city of Brittany in northwest France. It's mostly modern and industrial, but has many grand 18th and 19th century buildings, and survivors of earlier times." +Riga,Latvia,56.9489,24.1064,920643.0,"Riga is the financial, creative, and cultural centre of Latvia. It is the capital and the largest city in Latvia, it is also the largest city in the Baltic States." +Rijeka,Croatia,45.3272,14.4411,191293.0,"Rijeka (literally ""River"" in Croatian language) is a city in Kvarner Bay, a northern inlet of the Adriatic Sea in Croatia. It is the principal seaport of the country." +Rivne,Ukraine,50.6192,26.2519,246574.0,"Rivne (Ukrainian: Рівне, also transliterated Rovno from Russian: Ровно) (Polish: Równe) is a city in Western Ukraine." +Rome,Italy,41.8931,12.4828,2872800.0,"Rome (Italian and Latin: Roma), the 'Eternal City', is the capital and largest city of Italy and of the Lazio region. It's the famed city of the Roman Empire, the Seven Hills, La Dolce Vita, the Vatican City and Three Coins in the Fountain." +Rouen,France,49.4428,1.0886,112321.0,"Rouen is the capital of the French region of Upper Normandy on the River Seine, 135 km (approximately 90 minutes drive) northwest from the centre of Paris. The city has a population of 110,000 and its metropolitan area includes some 666,000 inhabitants (2017)." +Saint Petersburg,Russia,59.95,30.3167,5384342.0,"Saint Petersburg (Russian: Са́нкт-Петербу́рг Sankt-Peterburg), known as Petrograd (Петроград) in 1914-1924 and Leningrad (Ленинград) in 1924-1991, is the second largest city of Russia, with 5.6 million inhabitants (2021), and the former capital of the Russian Empire." +Salzburg,Austria,47.8,13.045,155021.0,"Salzburg is a city in Austria, near the border with Germany's Bavaria state, with a population of 157,000 (2020). It was the setting for the 1965 movie The Sound of Music, so you may think you know all there is to see in Salzburg if you have seen the movie." +Samara,Russia,53.2028,50.1408,1169719.0,thumb|300px|Iversky Convent +Samsun,Turkey,41.2903,36.3336,1335716.0,"Samsun, in the Central Karadeniz region of Turkey, is the largest city on the Turkish Black Sea coast." +Santander,Spain,43.4628,-3.805,172221.0,"Santander is the capital and largest city of the province of Cantabria in Spain. It's on the north coast, with many beaches, ferries from Britain, and a small historic centre." +Sarajevo,Bosnia and Herzegovina,43.8564,18.4131,419957.0,"Sarajevo is the capital of Bosnia and Herzegovina, and its largest city, with 420,000 citizens in its urban area (2013). Sarajevo metropolitan area that has a population of 555,000 also includes some neighbourhoods of ""East Sarajevo"" that are a part of Republika Srpska." +Saratov,Russia,51.5333,46.0167,845300.0,Saratov (Russian: Сара́тов suh-RAH-tuhf) is a city in the Volga region of Russia. +Satu Mare,Romania,47.79,22.89,102411.0,"Satu Mare is a city in the Maramureș region of Romania. As of 2021, it had a population of 91,520." +Sibiu,Romania,45.7928,24.1519,147245.0,"Sibiu is a town in southern Transylvania, Romania, 280 km by road from Bucharest. The old town centre is very attractive." +Siirt,Turkey,37.925,41.9458,166332.0,Siirt is a city in Southeastern Anatolia. +Simferopol,Ukraine,44.9484,34.1,341799.0,"Simferopol (Russian: Симферополь, Ukrainian: Сімферополь) is the capital city of the Crimea." +Sivas,Turkey,39.75,37.0167,377561.0,"Sivas is a city in Central Anatolia, with a population in 2020 of 335,570. By road it's 450 km east of Ankara, and stands at 1278 m elevation." +Skopje,Macedonia,41.9961,21.4317,640000.0,"Skopje (Macedonian: Скопје, Albanian: Shkup, Turkish: Üsküp) is the capital and largest city of the Republic of North Macedonia. Skopje is city of many cultures and many centuries." +Sofia,Bulgaria,42.7,23.33,1547779.0,Sofia (София) is the capital of Bulgaria. It is also the biggest city in the country with about 2 million citizens (including suburbs). +Stavanger,Norway,58.97,5.7314,237369.0,"Stavanger is Norway's fourth largest city, at 145,000 citizens (2021). It is the largest city in, and the administrative centre of, Rogaland county in West Norway." +Stavropol,Russia,45.05,41.9833,450680.0,Stravropol (Ставрополь) is a city in Russia. +Stockholm,Sweden,59.3294,18.0686,1611776.0,"Stockholm is Sweden's capital and largest city, with nearly a million inhabitants in the city, and 2.4 million within Stockholm County (as of 2021)." +Strasbourg,France,48.5833,7.7458,290576.0,"thumb|300px|Strasbourg railway station, known for the sky dome" +Stuttgart,Germany,48.7775,9.18,2787724.0,"Stuttgart is the capital of the Bundesland of Baden-Württemberg in Germany. With a population of approximately 632,000 in the immediate city (2017) and more than 5." +Syktyvkar,Russia,61.6667,50.8167,245313.0,thumb|300px|Street scene in Syktyvkar. +Szczecin,Poland,53.4325,14.5481,403833.0,"Szczecin, (pronounced Shchetsin, German: Stettin, Latin: Stetinum) is a maritime port city and the capital of Zachodniopomorskie in Poland. The city has a population of over 400,000, with almost 780,000 living in its metro area (2019)." +Tallinn,Estonia,59.4372,24.7453,438341.0,"Tallinn is Estonia's capital and largest city. Tallinn is an important port of the Baltic Sea, with the busy passenger section of the port reaching the foothill of the picturesque medieval Old Town, which has been astonishingly well preserved and was inscribed on the UNESCO World Heritage List in 1997." +Tampere,Finland,61.4981,23.76,334112.0,thumb|350px|View to Näsinneula tower in Tampere +Tbilisi,Georgia,41.7225,44.7925,1118035.0,"Tbilisi (Georgian: , Russian: ), is the capital city of the country of Georgia, lying on the banks of the Mtkvari River. The metropolitan area covers 726 km² (280 mi²) and has a population of approximately 1." +Thessaloniki,Greece,40.6403,22.9347,824676.0,"Thessaloniki (Greek: Θεσσαλονίκη, Albanian, Turkish: Selanik, Serbian, Bulgarian, Macedonian: Солун, Solun, Judaeo-Spanish: סאלוניקו / Saloniko, Romanian: Salonic, Aromanian: Sãrunã, French: Salonique) is the capital of the administrative region of Central Macedonia and the whole historical region of Macedonia, Greece, and is, at about one million inhabitants (2011), the second largest city in the country. More importantly, it is a city with a continuous 3,000-year history, preserving relics of its Roman, Byzantine and Ottoman past and of its formerly dominant Jewish population." +Tirana,Albania,41.3289,19.8178,418495.0,"Tirana (Albanian: Tiranë) is the bustling and relatively modernised capital of Albania. It is the most important economic, financial, political and trade centre in the country." +Toulouse,France,43.6045,1.444,493465.0,"Toulouse is the chief city of Haute-Garonne in the Occitanie region of France. It stands north of the Pyrenees on the River Garonne, halfway between the Atlantic and the Mediterranean." +Trabzon,Turkey,41.005,39.7225,426882.0,"Trabzon (formerly Trebizond) is the largest city in the Eastern Karadeniz region of Turkey. Trabzon functioned as an independent state or empire during several periods in its long history, ruling over a vast area from Sinop in the west to Georgia in the east, even including territory in Crimea." +Turku,Finland,60.45,22.2667,252468.0,"Turku (Swedish: Åbo) is Finland's oldest city and the biggest one until the mid 1800s. Believed to have been founded in the early 13th century, it is the cradle of modern Finnish culture and has extensively influenced Finnish history." +Ufa,Russia,54.7261,55.9475,1115560.0,"Ufa (Russian: Уфа́ oo-FAH, Bashkirː ӨФӨ oe-FOE), the capital of Bashkortostan, is a large, interesting, and rapidly developing city, with a population of over 1.1 million in 2018." +Uzhhorod,Ukraine,48.6239,22.295,114897.0,"Uzhhorod (Ukrainian: Ужгород, also transliterated Uzhgorod from Russian: Ужгород; Hungarian: Ungvár, German: Uschhorod) is a city in Western Ukraine, the administrative center of Zakarpatska Oblast (Transcarpthian Region). The population of Uzhhorod is multiethnic." +Valencia,Spain,39.47,-0.3764,792492.0,"Valencia (València in Catalan/Valencian) is a charming old city and the capital of the Valencian Community. With just over 830,000 inhabitants in 2023, it is Spain’s third-largest city and, after Barcelona, the most significant cultural centre along the Spanish Mediterranean coast." +Valladolid,Spain,41.6528,-4.7236,297775.0,You may be looking for: +Van,Turkey,38.4942,43.38,353419.0,"Van (pronounced vahn in Turkish, wahn in Kurdish) is a city in Eastern Anatolia, Turkey. For Turks from the other regions of Turkey, it has a surprising beach resort feel in an area where their country is farthest from the sea." +Varna,Bulgaria,43.2167,27.9167,348668.0,"Varna (Варна) is a large city on the Black Sea coast in the northeast of Bulgaria. It's the larger of the country's two major sea ports (the other one is Burgas), and a gateway to the seaside resorts on the northern part of the coast." +Vienna,Austria,48.2083,16.3725,1973403.0,"Vienna (German: Wien; Austro-Bavarian: Wean) is the capital of Austria and by far its most populous city, with an urban population of 2 million and a metropolitan population of 2.9 million (2023)." +Vilnius,Lithuania,54.6872,25.28,708203.0,"Vilnius is the capital and largest city of Lithuania. It has a beautiful baroque Old Town, listed as a , and excellent tourist facilities in all price ranges." +Vinnytsia,Ukraine,49.2333,28.4833,371855.0,"Vinnytsia (Ukrainian: Вінниця, also transliterated Vinnitsa from Russian: Винница) is a city in Central Ukraine, the administrative center of the Vinnytsia region. 267 km southwest of Kyiv, it has been known since the Middle Ages, and is home to a former Soviet Cold War airbase." +Vitoria-Gasteiz,Spain,42.85,-2.6833,253672.0,"Vitoria-Gasteiz (Spanish: Vitoria, Basque: Gasteiz) is in the heart of the Basque Country in Spain. The old town has some of the best preserved medieval streets and plazas in the region and it is one of very few cities with two cathedrals." +Vladikavkaz,Russia,43.04,44.6775,306978.0,Vladikavkaz is the capital city of North Ossetia and a major transit hub for the North Caucasus region. Its position on the Georgian Military Highway makes it a staging post for journeys to both Georgia and South Ossetia. +Volgograd,Russia,48.7086,44.5147,1015586.0,"Volgograd (Russian: Волгогра́д vuhl-gah-GRAHD) is a large city along the west bank of the Volga River in Southern Russia. It used to be known as Stalingrad, a name which the city is still known as on several war-related dates each year (according to local legislation)." +Voronezh,Russia,51.6717,39.2106,1050602.0,[of the Annunciation] +Warsaw,Poland,52.23,21.0111,1860281.0,Warsaw (Polish: Warszawa) is Poland's capital and largest city. Warsaw is a bustling metropolis and one of the European Union's fastest-developing capitals and the Union's ninth most populous urban centre. +Zagreb,Croatia,45.8167,15.9833,809268.0,thumb|350px|right|Ban Jelačić Square +Zaporizhzhia,Ukraine,47.85,35.1175,741717.0,"Zaporizhzhia (Ukrainian: Запоріжжя, also transliterated Zaporozhye from Russian: Запорожье) is a city in Ukraine." +Zaragoza,Spain,41.65,-0.8833,675301.0,"Zaragoza is the capital and largest city of Aragon in Spain, and one of Spain's five largest cities, but it is one of the least known outside of Spain. Founded on the river Ebro during the Roman Empire as Cesaraugusta, Zaragoza now holds a large cultural and architectural heritage attesting to 2,000 years of affluence and importance." +Zurich,Switzerland,47.3744,8.5411,436332.0,"Zurich (German: Zürich, Swiss German: Züri) is the largest city in Switzerland, with a population of some 435,000 (2018) in the city, and 1.3 million (2009) in the metro area." diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock new file mode 100644 index 00000000..f132728e --- /dev/null +++ b/examples/wikivoyage_eu/pubspec.lock @@ -0,0 +1,350 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + beautiful_soup_dart: + dependency: transitive + description: + name: beautiful_soup_dart + sha256: "57e23946c85776dd9515a4e9a14263fff37dbedbd559bc4412bf565886e12b10" + url: "https://pub.dev" + source: hosted + version: "0.3.0" + characters: + dependency: transitive + description: + name: characters + sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" + url: "https://pub.dev" + source: hosted + version: "1.3.0" + collection: + dependency: transitive + description: + name: collection + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + url: "https://pub.dev" + source: hosted + version: "1.18.0" + cross_file: + dependency: transitive + description: + name: cross_file + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + url: "https://pub.dev" + source: hosted + version: "0.3.4+1" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" + csslib: + dependency: transitive + description: + name: csslib + sha256: "706b5707578e0c1b4b7550f64078f0a0f19dec3f50a178ffae7006b0a9ca58fb" + url: "https://pub.dev" + source: hosted + version: "1.0.0" + csv: + dependency: transitive + description: + name: csv + sha256: c6aa2679b2a18cb57652920f674488d89712efaf4d3fdf2e537215b35fc19d6c + url: "https://pub.dev" + source: hosted + version: "6.0.0" + fetch_api: + dependency: transitive + description: + name: fetch_api + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + url: "https://pub.dev" + source: hosted + version: "2.2.0" + fetch_client: + dependency: transitive + description: + name: fetch_client + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + url: "https://pub.dev" + source: hosted + version: "1.1.2" + ffi: + dependency: transitive + description: + name: ffi + sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + url: "https://pub.dev" + source: hosted + version: "2.1.2" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" + freezed_annotation: + dependency: transitive + description: + name: freezed_annotation + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + url: "https://pub.dev" + source: hosted + version: "2.4.1" + html: + dependency: transitive + description: + name: html + sha256: "3a7812d5bcd2894edf53dfaf8cd640876cf6cef50a8f238745c8b8120ea74d3a" + url: "https://pub.dev" + source: hosted + version: "0.15.4" + http: + dependency: transitive + description: + name: http + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + iregexp: + dependency: transitive + description: + name: iregexp + sha256: "143859dcaeecf6f683102786762d70a47ef8441a0d2287a158172d32d38799cf" + url: "https://pub.dev" + source: hosted + version: "0.1.2" + json_annotation: + dependency: transitive + description: + name: json_annotation + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + url: "https://pub.dev" + source: hosted + version: "4.9.0" + json_path: + dependency: transitive + description: + name: json_path + sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + url: "https://pub.dev" + source: hosted + version: "0.7.1" + langchain: + dependency: "direct main" + description: + path: "../../packages/langchain" + relative: true + source: path + version: "0.7.1" + langchain_community: + dependency: "direct main" + description: + path: "../../packages/langchain_community" + relative: true + source: path + version: "0.2.0+1" + langchain_core: + dependency: "direct overridden" + description: + path: "../../packages/langchain_core" + relative: true + source: path + version: "0.3.1" + langchain_ollama: + dependency: "direct main" + description: + path: "../../packages/langchain_ollama" + relative: true + source: path + version: "0.2.1+1" + langchain_openai: + dependency: "direct overridden" + description: + path: "../../packages/langchain_openai" + relative: true + source: path + version: "0.6.1+1" + langchain_tiktoken: + dependency: transitive + description: + name: langchain_tiktoken + sha256: c1804f4b3e56574ca67e562305d9f11e3eabe3c8aa87fea8635992f7efc66674 + url: "https://pub.dev" + source: hosted + version: "1.0.1" + math_expressions: + dependency: transitive + description: + name: math_expressions + sha256: db0b72d867491c4e53a1c773e2708d5d6e94bbe06be07080fc9f896766b9cd3d + url: "https://pub.dev" + source: hosted + version: "2.5.0" + maybe_just_nothing: + dependency: transitive + description: + name: maybe_just_nothing + sha256: "0c06326e26d08f6ed43247404376366dc4d756cef23a4f1db765f546224c35e0" + url: "https://pub.dev" + source: hosted + version: "0.5.3" + meta: + dependency: transitive + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" + ollama_dart: + dependency: "direct overridden" + description: + path: "../../packages/ollama_dart" + relative: true + source: path + version: "0.1.0+1" + openai_dart: + dependency: "direct overridden" + description: + path: "../../packages/openai_dart" + relative: true + source: path + version: "0.3.2+1" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + petitparser: + dependency: transitive + description: + name: petitparser + sha256: c15605cd28af66339f8eb6fbe0e541bfe2d1b72d5825efc6598f3e0a31b9ad27 + url: "https://pub.dev" + source: hosted + version: "6.0.2" + rfc_6901: + dependency: transitive + description: + name: rfc_6901 + sha256: df1bbfa3d023009598f19636d6114c6ac1e0b7bb7bf6a260f0e6e6ce91416820 + url: "https://pub.dev" + source: hosted + version: "0.2.0" + rxdart: + dependency: transitive + description: + name: rxdart + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + url: "https://pub.dev" + source: hosted + version: "0.27.7" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + sprintf: + dependency: transitive + description: + name: sprintf + sha256: "1fc9ffe69d4df602376b52949af107d8f5703b77cda567c4d7d86a0693120f23" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" + uuid: + dependency: transitive + description: + name: uuid + sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + url: "https://pub.dev" + source: hosted + version: "4.4.0" + vector_math: + dependency: transitive + description: + name: vector_math + sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + web: + dependency: transitive + description: + name: web + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + url: "https://pub.dev" + source: hosted + version: "0.5.1" +sdks: + dart: ">=3.3.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml new file mode 100644 index 00000000..7b4ce9a2 --- /dev/null +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -0,0 +1,12 @@ +name: wikivoyage_eu +description: Wikivoyage EU chatbot using llama3 and ObjectBox. +version: 1.0.0 +publish_to: none + +environment: + sdk: ">=3.0.0 <4.0.0" + +dependencies: + langchain: ^0.7.1 + langchain_ollama: ^0.2.1+1 + langchain_community: 0.2.0+1 diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml new file mode 100644 index 00000000..8b4fec3e --- /dev/null +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -0,0 +1,16 @@ +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,langchain_ollama,ollama_dart +dependency_overrides: + langchain: + path: ../../packages/langchain + langchain_community: + path: ../../packages/langchain_community + langchain_core: + path: ../../packages/langchain_core + langchain_ollama: + path: ../../packages/langchain_ollama + langchain_openai: + path: ../../packages/langchain_openai + ollama_dart: + path: ../../packages/ollama_dart + openai_dart: + path: ../../packages/openai_dart diff --git a/examples/wikivoyage_eu/rag.png b/examples/wikivoyage_eu/rag.png new file mode 100644 index 0000000000000000000000000000000000000000..ca46092d1d31d894f03a4eeb8070787bd5c66e28 GIT binary patch literal 18434 zcmbTd1yozX*Dsm`cXulUDK5pKIKiRN;ufq}af(wyixk)5THLj`1SwjixD*Hl3KT6~ z^o9Pv`+ax4_10VK-khxD%szYe{ASP2%sIb3iFl!@gojOy4FCY}RFoBT000y?005%J zL_tdMKZvLx1%MaouM{7UulxJofB$~$T7$K&9$uZsey5M&$3r1z^ey@$i$ z!NI|yp`mqkb!TU1@7}#b8X+bjiI0z8US7`2%R4wYI6XaWZEcm6leM+A&B@6b8ymBC za44^+xc>Y1$&)91e0*zbYi4F<{{H^k+uP30&eYV@M@L7Fj*ga=mh0>5(lRntRaG-H zGs8oJ$;ru>n3$51k|QG{Sy@?JTwEX!NLN>nfPkQ(pB&h-ana1#NmfAy;&A!v@xj3H!R*dhTw%e@{e5A{ z=cCJmiLFsr#H{1mEMWeJ%=#8=?-2fb+o&U!v(%w{X2R=R<>2)tCN;4dh#R_6Eh6}aO0vX=i=Jv?(VU*{T3X&-P?P-vojVEQ6nangOA_$=FNOw z-fB(F;p%F?i%aXvm!ByqPZk$%&(1D)celsJ7K@8-4i3f<{mp~5exY8u)frur%?Y*D z#Un#+W1)G|$-*nnS6;W1)WTHw z5O?dlK9SyH=C6S`X)b7zgE4_O`)UP_gnaM*IM5JEVwQds>09{tqzA+SCB$QrRg5s$ z#Z*wnbT}LSHF_jdsdd@h`f}nt(d?J>IW0PRcu025Os%UjoVa$M286!y@oIw&jSD!+ z49$wfd7(5WNA)aUNGkv1FqVj9uH5KG_P}=Fy8i3e+i3)-WYR{zdp~1d1+%!6Y7tf2 ztFP96^hsT}@nmUgV+E?g>s^3p`b^_~Os_?_P{A~LthW1M^F6SLMSu}|-Ehi64z0h9 znff5>b+09s4wlDKEqK3{tD`;8-hx_z~t`7uipHf z%G>PhdleiESIlGXu=@lqR6whWL3rS~l2yU?xuG{B3~g(Nlvkd&62KG{I3!_-lw+mx z=hxuL={98^liF|7X%4`xC`?rCI`T#HGmY6qGOFLUhM`y)Dy3r3A7>o44R}o%}Kpw ze&J>Ft8%rQ0cY+JHHHZpg!(7TN=^-Fz-PH;Rv%-#=Qk3t?U3BedweLIWA%;)wie|D z$daqM`H@N3cS7s+h=Z%d$2#0 zW^ypmf6kMOD*XIw$oFfGIrk(I)rZ^t!!?ww2o370uLeFFjEGk+RCrHSP4uMAJ{Fmy zyXq0iC3C}Oxk3!S$aYUQhm_`TQTRD$kdV<@QdJRsZ}t7mNnq$!&#mCal%_PwaUTzR zSW|)DMSeyNW34}P$Hn6J5Q^1v5PGUdJ`miDRpm%*W3?AR$=Lt46>s&a zRHL3gutMOatbRJ_=kqXgwAx`>XM*sXHVszhX1TPQOxmJ7aY(i*`AGofY)AhVAkT z@J4~ofB|lQ;RP!fodhtf)N?DfI7-|S9#0n19HR{mMW$d?5k#>XP>^+@BWh;N6@0#IX`ZrHRjXqea*US=n;kh2-JHgsNS7X3%de8!!twZ)>3_E>r!2HVuZ*gzU zZ!J~e#I7101^R`!jy8-SGPc})jS6Z6sffl{+D_B2p4OIQz;RX!yNcecT{0}FQ6m`m zkdCaw7qxw+TK)OcS{9j;sV;44rzQhP>WXR3`cMji8r}kNH1`bX&AY%(AY(f#$G)pS zR)GgO6W5}4JTsIZWsI(^Mqv%%Wx=)EJ&{EH;g)`CHx|{U(4wNcpT9+%o!Ah%{pS;uwqOtDe;f3TRY?^lH3UBbT z>fkaIEjuqrb78yCJ9@r>JcC47ZX&9rH~$rgYd-JU_3s%88LNz-c3+X8?l)mU&FV{k zT%3X*PL+Vs+G(8)oSeM*zU-fEo%pfyYZvHO$iR}IXx=B^mdzRn%^GnnPu>c8F^eZJ zF$Iv5$roe5oa$0ELGM(FkN1tTH;5CxRu&JA)4M+y;pnVR^))HVZNDh~yC+=n zcn|?nn=0A8EurVvjSXj@1&8WO z{cJw{wfY6oJI{s{r?YJ#_y>QnpBF?Jf5&`DjNH1{ih<5&B~NtIrS!}x0%^*=@uiP= zV*L6&5i4(dG&iJQocW>x{fyPLASbl-{o1J$ASnFgc9fM|S`CgIjOv*lvJ(XwFMN-5 zUA$T;!V{V*FrObZ#zf0U4T#fKTGJuv-s9@&t7`Al`_>AMr*49Kq+Xs|T`7Ow>g6v* z+RBsi78-dS(_XB~d9XHX?3H?8f43igXKgq~h+Q*W5itU}elYU8Z8_Xk^@ST-LnbST!r*Lhzq2_5v zq#xfrwcR4LJwdqq_l!Vvcw8CFtCee69@QfJKOfXzp}kSN4mJ6T&l~p3L<*t%iJKx` z+Lbwb1R#zPrd5g7WvAim=F`(^jM)KOqpW4LOMvAjqe=?nez>?0S8`eM1Y#JKP(xYE zb8)|r=Pf#u^^QaGx+xo2fc_h<+xz?bfM0sS^+rDyv)tbTT@L1MzOmOfMnj6U|Hx~s z8%o?(Vq6jaD##>>+8}CdgBiAAY6Sr$p94)qL7{8ZwGZ4{s>rA(*ni=8(7UYHiD6)B zb^y}E`?L|l?b_16&=g#D*+Stzq!;6hN3R?Ch;YFRFzIj$HO_mlODHFot{ev~jr zc&%sQ>laC++y1a+!x;mmi0Mkph5jU#oGXy%kwblczaag76nYVue zebJS47vk88+UCwxR54#PTw2+43I)iG@<#wkhcZCnSrK8@?^Br{y)s8qSsR_H*s>kq$9sViz+uO_`K*^~ zl7-Oxk@znfW~9`dO^kf$Z^E)C+eIYaBaq=?T;!a@b;Ayr|F`gW0&_o;DUGJmXN){cy6sKGOZ-64LCF*~`eBa!1 z{k`RpLua!_QrnXT^MRv_xrEE|8euik1IA}k#oq`?hn=(JX7(=o((G^=Qr^M!uu%`GgpnGi6=ZabS zcuyRFmS#2cffy~tX*;6Ih=^XfriGGn@cs3yD1u$>@kYxZg z-RJf9Hk5$bxnjux$?DWm_JGuXZdirgT=z(%W**df2H^%!^Q`~C3x3ItkYtn{nnimf z2%oc`lRuCA`x!Y0jdqD3{=j!8rPPxryk7p;m(yz4UGhH27oyWt;$RkC4Wu(4dKa5> zq@+pUHP}@3rsf}#8_wNDyDZe53s|HOgFY=gbZbDJ0l*vUsKKpcNz8#!8X=ay3Aj78HBzrXVK((?8{J z1W_XH{aUopTvwuts1YY8yl0g;aRq+pu5&;x7!aKj3dH4ugD9aev|0eN1ViJ418@*T z$gfd*d~V=96}+6h^%d|39#o^;o?e#()())9d?SKJ3ALd`YC<2aqyW$j{&R|~M*P2| zdPYu`(d`FbsnZ6Y48n_C-G8^vj|jp8PYP80dR%vf2Nvk#oUp7&s(0+O&7?W@`Jh3r= zT+gg3Uk@m@mi)dG3wZzTKA1m|)3eJXbU^v>#-E=U(yI&_P^KIvqXuJ9{?FTjIXWZ( zbszfxJ0{`rvBBzJIXZzaky8*t%=bTUY=P)a21;NkI=pK9!9Wd8 zxl7!qe4yE^f7zBTjBF^C65`P+hc$b{}+sOw14J3uO6@Itn8H# z?BN)|4bh_$(EB{4hdv~H#y~3Q+|jI*+z>(BL&pW7(Sqvo`gXu=YCZ^cNga%U0oh2> zF4yEQdnug8U1ni0vKxY?AxrK=4lniX^mi7b1&u4`x^t_@8y0{#l;AP)JvL;i8+jT^ z0}+=*_OsBnPdOZe3mQ%8PN@PV`OdM zcHMSd+!g^%F(YKeni30q5J|B`uw3%|u{2C}eNpE(m?QSIlL!rZ=4Al>&^A{3t5!H% zXfPKZ8bO}?woli?L(^}Su9V$7NG;! zoe;hV{hL{Shl{KQRg58G!XFFTkO!3O^68WK1U)iL++g#_cUmQ47I{}#;8EMjtlDSYNOgQKNs-Yh{2pVYCd0TMPJ=9S{Hsoqr&3!T-pttWfVKht|Yd9 zlPOqG(?}&Sg&j;r4KfbcnmvAE=maU$u}or+gBS@DQc{)K?0x;jotpSX&W>WRb3AHD zc@#i}MWwR;>*B&`Pr}f+%lIqrUj4`DM&?>MtI*e7>B}^RLBnl){oDANS>e%*22c1< zozJ6xn0aFuD)l?(>d3n#B#=L%OdQ+_uVI}X_B(y9P2qtH1~Vf2{yIR}-duBd0Pb;} ze38A24MxgVZw`dn#ry;J%i`YZxZf?`9I){<;HiFC7he<&NCm0No5?`l!(X=O`)jI> z2ePu5JARoiL?FhNwhmGf43c*%46O^+%hP)W@>&OGQ)Gy(>t8Io+5E^Fc zWo$FKAGJ-@gS(4!^Vde@9?`ELaAyHvik)<2wVz?={XVDc)yaeQ-Tg((lYn!>+2{nF z{+(B3YAsRW*4bu*I4nvZ`TbqR|2SFR4lfym#x{8Ma8zf)x>rc(1Hw<&+cA1qn`Z?e zs@f$zlL4PHTm@(v-L7FSCl^9mN(x0dnXc3$(AvN^DnTLxoNgUdMm>K_?m2KNM{kK= zi=kDX^FyxBx}F4_Vs3v0|6vVynEp)(E-ID1u0V-;xPuLLRN(V%+~?tK?ai^Y^>B?< zCKI`y$0YnTD175BgJ-a38GV+D(Rg8sIMV?enGrr}BATx27wV`TH z<~!3G`kQa^f)>XBKFzJHP&5D@yPC&*=7%uRwBSrpNNn83$D*9-xwklgHSxq}rh%LW z0^g=;WeKdqq8=PmaUlT62LWfyuej(+TYC8qX6Y~s{IV-IxDi3cB*Q+A`CahoR$NWU z9YMK8^egNI85-s2YT~zy_9bcAEtq-5LOD1(H+&3HQ@>e%&NvfzSY!ZunMXRGB7Bkw z6n8JMkHh9icS||rB!%1*90}i|zIKJOW%V>Nd=?b~aeoI;6xm}yv)>YFm|>2~366WO z!`5OFKONK2T1*kyvUSt%;8uW4;)_L+`b|D?E7VbjzWL~X15`iZe%{~DNpU>B74(W3 zA;K;hd5w9s{e1l%oFx)>h~qD<-$PkOS(_8(5PyuX$hET3#?8QI8%LF2Y=RKQ9PV#stV(FZy<+R&^uN-+Si(8QsR1uAtP0~$ z*Gtf{q;aINdX;RYA*u;$Th!})tWSCfKnD2zj2U2|8BYS4V0S=DDV^Ji3uxiYBN99T z=YBPH_X^(7h7-S!8bes+%&2vNi+>-kHdms+QnYRD%wwr%fNYwLR{4Z4nRc9u5-TOZVGFDD{bxIr&;o+)^u zxaoY*PCD2kucHRlMHS=1N}msg45gI_^G$T39G!?2r|Q(ET>mC;sFgj9_j$$f$@^82 z1Nt+Fx7$-;Y}T4RB{s1q*xRZ=?W=&GNF-wc(!@SR-(<=^ANj&_?(F};t zxqTThwDnu^7uCWtw#rbbrjaibA{$n*Lk;7c3DqY&ei+GI%I>$l>2(oJK(ChqBbcd< z)#(a*gI#IPf)T8mMnP^s6tG*lv zDnA;G#9#&JVf3K#NwL?OG~v66TtNgWeli1JfZr>bd++t0OI?15IIe+)2jSTC3_s9= z8bCcx@Ft!{vB@NYD7wVQypxcor!By;hlQ6xz?lZ8`jdGjNPqil&3Arr)! zhu`&kkNyE(=uuo-Xrz44IK~eQH+AbD6Pyswj=6HpGJ*QKSvrSM=hcSQY>|yMq6hLN z^rGqJokzed5LPi!u`Nj;6oB7Oq}#e*8g&BJZnYuT-ui54A528d{pS_T0(wZY9=NZ0tE?$J5fzmrO(HFfXv(uv$hNi zD|X^7E-mzEF;0Ht84RdR#>e-2Gnfm7d($W@ATkjR5hbd#T?uNA1!q}ONtJ_&R#a5S2}8YvZliS7b%pB z=J!6!ZiY@9AB8Q2hBF3W?fH2a+DGLF+sqMiK&-FVR`<1VqDoX!+h%1aGRQ(0PHj+W zw-g%Sifup{wP{}&mH7l_QxO>d5uLT?bS7ZV4u4D#4LYsJ@wAKtO2vSxehTm#3gK!6 z8ib8tiarz^FN-bC#G*w%dnQBkrJgNCDghn)HDH{}OlE`G-2q^C{Zg~bh`;wifqiIH)&YK_ zUaWsr|Dm5J4NJ#D7^rgg1VQxCsFdpB9iP<}YV=Jng)JXM+y%&HU#=(oW%}iH1Z+A< z0S#c-kp3+cRl@7P`rZmg2_qvdw2CUCgwoX-Y54u;_WcWT9>MnxPN|%v~APEbw0=kZ@8vVy#J7uuN?OTPV zdeDG=ulb(cO9|hX5_&(s!_H+gn7-0D=^1yFs-UnYp0EfMT#3x+!OgMP^u!TxM*y`7 zPN?oSFz@1;PpNJ36(I<8{?<5F^_rV7W&+o(`j_QN`;QlKinlnh)_d-5j%Uws zO}>GQ$P}7>&;o2S9R;@XMYPesb{#vO$0ckQ=$g$wbNvkK%|)4eiK+>T&z`B3sGSMa zK)Ve-2A3Y=En^VcvW&BeFy)LEHf3>O8-5{N1?rR*k`R~RdiTe3P zT9(;qO*KtsudfQrg?}#W8*VkZyCo~#29*`?C(^q9%?+*2hb_{zKx!pibz>7YVOz}! z)v9msO>C{0#h}PRzfh@+Z2h%l zD)SvLm3%PGd)IBpX`YWGEReWDd{EgPj$FoOFAR4!Oj)U6)>$RSq65z(5%CF2+Wh+s zljnkuAV-zw1W^o+=TkyZ$_)ZtA^0LE-Vx z;_{7Vu8|vU*O8J&I2NKtuWHu9^b0-!XpMh>y!zY#*L;eu370@h0!=+W#8h=MvH|>- z@d4$pf%%*iu8?{3v-ohd%92rN2D?yC^ZVUGOv_H;*TN<}u&5h3{q_Hae;9WQBU-Zt zWlpyiCNhNjI{MJpjxsu!iAo$%Y5*@vqT1w7?7-&Ejqx0TJ)fDD3)F$DIsi_^h4#v} zq0xDlc8c7&7B@fEYQEs8`>594RWc4Jhm3LT{L&G8M)LaQ%e=>(yw zZ^roJGXbtH=%3Ts0i2HDEZtEaCJ*0`W!-D{53t_avA+_AdtxG*2T2dLwiM#dCfbyP zE(&Pc$b5#o;sMSX5w_*iE?MrU&z@n8yrJ(nwI8~e^y`p_cyjzAJBC%K*o0F`N^jgH z{m=53BL>Pn{K05ul$2-%C}>lwqVLobU)?j@7Lo{5gD|~7J&v8t7rP)NZ39z9Y{0fi zGlrhZw`hmkkTB8g##%FXp`-vw>~QOK=n@4!*n5V%{HGrOCG5(8E^>qN_*Yw>D0mpg z0wz)crM;pHn|Y$_sU(n?B{vE)*2C=ICJSQYAv9_W7YE;6C0#j6t3sFJp@_5Fd zuA-0P27BmT651BJblGl&9~u7rwNk+hCUVr1p=2NbOlTm~7A^@6yc=td4)9*{UK2PO zn?EvRDVdyEO`CzuKdq)FC}nfmp}&sqAN`uj+clbmo3?664%o~8`SctHZJG=`ql!qs zG4I+i?P3ONS7hNH>wMMNM3FVyrGFpYzfv@4{thwK{ze7zStQp6K0^Og>QKBH^u9&f z9C>-oKQ-6)@88i|3@)mODWm0>nYxKQ2#3dDkwZa18`>CM>Bz z$0NZ@&Fya(WJFDOq{^fF8TPbZ2~4dPIs;~uQkDvy*_)yC_!W8|u}~f353!%EKHy~< z7F_&n-SOHWSg-!IUG=^RGrx~kSl_yAR?11(dH&|@19L*viM^lu^I~J372U9xuR9xe z2Gw#CHc79;D36^o%Dz7R&SEcRCh%$nJcf%YDT^^CYvQ9P%$3(FPHDqzi9 z>+5jY!t+_F2}%<%UzY@p(&qG5ZUQs7j-IcFU9hv&MvHF#ltB>aXC|Wsm0R=3E_3>h zpBe;7d}&9;ha%GK-M4NhNUdcwy||yU_(t45W_cSgJcD;wSpY$2U5G;Ky2gF|LK(6Y zy0-YmV=n~w1oy+)dh)j~Ep)M}CJD5=U8DT$5n3rFff)zuD7y?6%t(%zvysdQPRYKH zWSo)oGm=u}pGv`hBvlRbk=s`usa7N@jbvnrA@y>``A7=(zp3d*-9#ds{|m)`Mv)eY zYsr@Wce4AR#s5!M{B{fp=l|U{QuDt-{-2wC6WSza#SBF9-ADrXpDUuRA1V6(?fst% z#`#r~NM0JbZwE^w}K;L%h6sT`6}IjLud^yq(^d~}|5f3Gr9iX^kK zT12xo2RYdO)hJAz-V29A-XE`q1X~l=)5v1Nu7I54A5#!?! zh9sOkviQW15r*3TxQsUFu|EUKVpRWnV^L^tCU?}RuqtVY>}vb#Ld3h&OO_^t=_4oK z$lQJRFJ*BFWSCkFLe~2GFFdeBr61vAi9Y|&7e5$l0sHQ@K@REA|MH>ckaDwD&Bt{asplG&9;aC)MdwYmvSjts3N3QusfgBd46Fmr+*3R zmdCm+5f9Y^+1eDiIdd(pG1itRyoxDAy_c9%#u1ne0zu0ZU!x+Jc<=fX*x6X8M)fW%3M7A#NFi2Le{2a&&DHD1UNfj4sG9OmA( zeF~}|HJVipa7l&(tI&Klo~DD2)M%hbAJKfcvdKkDBVy3*I`A)AYN}DYBuc4OW|fQK zv^QKWR)J}tqb)9gCRW|AoDWWR=K>DZ{@bemn!uF4kzBnT{Qx>;y!3I$^AgsGBsHTP&Sp z{?_utd&xGRR<2Nn28g>gFwANg*cCD+*c?A}LUp|rDWwYzBvVE7D05Cyr=vi`VNS-~ z%97YG@WlE{H^DqmF1DZTfa~|trSM=+arl}hVjgyve_Q$Jh(ON27$zIPo)-m62+kbd z;7BrvK>pI;d~^O8^KVdJTi*QvxiC0t!@ed3OvcYp!;?tl1DwoG64>GXw5XU)!l?AL zBDs~ItRheF!rl-lN2!fp>WUAwQo-}jQ4rU0(*`XM4i$e>80^r>^rtDHiv9bABr*ut z&yM7=et>P%uPyIm-E%F}VQeX894;dbra##cFPH(kgMFOAD0i_%Up<_t4Lp2!l6;7>n&gSdGn}bgtesVZ%3n9BUp!QcwQi zT^#(tk^|>#_%)qmedKdGZx|Cx-z}S;k(G&kK%JFW8gori4W^L@Qsg)M)iKPM@Ra7*z zx#V~^pWwhc3(Mg5!kCEU{c~g1M(%e+TBm3XSd!odt9lfJ0QOQ4uYi=%rsCy(8=X?CH1l0&hY@_vc%9o%H8&8G`ydB!f!S2QS+`P{XC9kuOQT>7+`e!d@*Av$pq z3L-m&dp~v<18#8mRUwcE=~uuiv=%4xZ$>=mt*~8qX?ogla}cm~HO806pW* zp#e>o-xv+N&sdAyeTy$Tx#{~7fu>|R@YZ_fgfMY9K{1FmJM>v+5PR~OP1BR|dA*YN zODcDekLSwXp3mYnQ%(0Gj3R}ezw|n{51iz=zlh@A1?qy?gSCPeZNy#)O{EGr^)Uwp zn3+bu0>(zcRTW4E6X>9#0-(u7eu72~l<_}llrAR#7st<91YLDN$HCZdel7vjwGf3V zG+w*`@X-$D7)YLusqB*+LFIE2mGJUng(Rl*`lXmchr(hwE@h2w%-KQ9U))p^tA`zk z(0AXv_sew)t-pX8zrDWDAj5_Ml{E*X=C2VwuL)n-jaM%Xk*FuiPCn~1gaz0d(Pm@7 z^=_8HE138)od!w$zJOYWB9@hBHLooL#aJFryiSkk)KX`tw%lSZT*!=`%!75Xw~9+@k_`jEg3cs%P!&MQ7)hwp`Bopdc~E#dcTS z#x3z~5?yc^HT&PEgqUw;EVX>NgrQ7<>M~2PK!Q#tX%xpg4N-nbFk1ws=q4M>hbl zSn#qvM34YFyEOBny#~>1TkYUI`$7U4z*gAVVj;VpWC&#s^8p=Abw3-bB+ch*@ziew zLcsX7yp9UA5@`n4T6~LY<>?0IUptKPzT9x3XwCfyKt>gt>C1=~ic*y%=L@_DhPRoZ zX6T8@oKaE!?+nR7etbeQP7kSF0_T}wwWKuSt2lYQFUt{jO!duu+b@Ksj15r(-1I+4 zw(x4=VtwbVNb4Zp3p3#N<+u^8p9gKk=v@~T%y3RuqdSzYFoaP%=*QnrR`ikQF0^E? z4o&=s6*}3(C{KvB!g0-7ELNeT6iuM$JfTjKa=Hj@gz6+8W`bP$nlyO0V(pRMas&q= zc!NUVioOP*r%thM5_9`R{NO5&%$lMSow9Gq;eR>LxbLYP3lR^uTbXF|(b)zq zJ+d=4RiIfoN| z5~f7MeA3G?dHBbfugtgL8RZ@p^|?6zOp>PzC?7WXBVx4D81?iaja3wY51IVmOS0i9 z2Y#RGjFqGItAK}^@{QAC?@Fo5OeNE}M$nrR=^vuuvU%RjPLf-!;QHegwEO1MZnRGi zH^RYMfefj?df5IBepw*YR8i+WBmXil$)h^=_M@)EftT?{glta#djoOW4=uequ6PeV z@&~^T1b5MP$Y^knb6}z7>(x`t=6BzDsst)@Gn6ckjz}q41-hn0Fawkv3r~wi9s>4= zMDL>sa2;e}hJ*Z?!@j}xeN!eVB@Qf4@_1#nKU17|oZh||>u)1AIDiQF-m0f=V!p`O zP3^x1>eI4yz+U!3kJO^9R`6>14&&AH75o*nl*HlnX{66B(MF~}u=(ABga4|uJt0*8 zSTG$Up_BB=n!!a)!?J`^#?PZj{+VVDASLzP_X~QI&QJZbL8caVVyvxSj|{@9xvFA9A^Uj2wl<44Q%B)`{T8U&pdhB&rI<$E``Mp4jXg96>#sz~FD<87 zs$d#niyFIoQ5IltEJui6*{zZ0-aN>*sDQTI-gGkk<2=VU1CEK%K)S1FEDuN9nm_k( zV{}}|RGqf+jn&{5OL!f+n){M-g-Wthciq`VOtifBXX%-DOo3}!h@(_F{b=H&bRu3> zu)wBZRp@(3J4{Iq4FPz3|Brc4MZTA>dHYg~=huK%?C3#y!ckC;UFRRyeZ2V5U=93W zvMQL!MkbmO?Mgg-cYVj!HOFqY`+jH~+JH3HOCaLYt$QH$-6rl(NeLsj6I>yvN)y5T zB683$UKjoa-OZdnE#bR=3r(3W#78UY%+fL`LJQ3-UU8Kq+P>*V9~>y*z^Yd>E8fni zRchlrBH)}%DTFpgYeoF$kAWG`M@2#qR%DDpE7;r&KJ zoleqIO8hSSPlG?fkEj-sy{)a&zBFFZ``OhqYR5JLP^J`?c}DgJ8TBfYIIl}~ldo&g zG;?JZ2@~Eq?HVDz7k=!YO1~6ah>u46AR($U_u7pVy5AFL)fhP1a4wYT*7lUQXSP7y z;S+|N5mAVs(#LNd_(l?!TiT}zbjI4A>+sly)3Ni_ge{fjdua@ONhR(n_T3e|}+x0EWAa)jMirfNc2< z&R>+;%!bx1zR(M$tus0MT}Tny-^p(_jV{7*WgBhZXkB8Qz{LRs(9 zP#b%lK&d(rlCmrK>O)`P;q?T^L!=9~z#ZZVoH`(?O)3YUYb9RwFePisO&g7Uk!zRl zW*xxC^wUp}*Eys4?o{YEq!VJU*N_x#*Xrw-N-V z$B&kx`{Zc!6^c8m=b-g`(kgI7;8o4v6L`Lh0dnbbN=7~`871abWThvs|IU&0jJcxq zrJi}_jt`C7`@~~BKpC2CP;A3dnmxzJnYpNQr(o92HE5F%ho-5-l<-ca1C-t(; z=M7evp!s&1i0y)j666P|Xtj&x;`LjCtl!V7oB9j)f8E5qcNF3`v>n@Ne*5-!5{uA` zrYX&Cv$jxTGlEcQS=Bmt)K%_mi?P~1&VWCiKte$}8->zz?yK7?(8)&E70+^0^pG{@ zi$lb7wgm5Jb6jYB0%@S1*I&>=+zkXXYlm2xE4J^oo2|~~A~gbkfO*4@{*T$+{46Ft zU}dz8xt9RB(ocNWvC{>+c?fSFn5?=t4BLF_mMgo@TvHl8X=QLL3tM7G2EI295*pr@ zmqLj@d1jRRfQOVcMbfr0g*tX$FKZI^|G*|TNn$18O&MV0yBFEa}J;?)c+6k6n)j8If zNeL$Rp%xpZHeF2+eN?5@6&-O2EWjS`o)wN&KCo)mzfEK|jgBg{L+nD-+FS z+{$p~hf{V@#PFw9pS)L#STQ}j@lNh}#te0@%*KCZts;KDcr$k67u56+dbj>6&>^_* zoR{zleCUK-rDLZ>6GW=jcX_m=j4~VsPf@2EsrX7h#1_m`W#9pdZVa9ngV;7=D5DLO zTrvo1Y?n*_w>`6A@{%Pdo z&kJnG8|#jaNQ&&*lsmuRhdndwN>s03_yAkI!ZG|TQ;9V0Q%C$wz|$z@@jmXxn3Z^H#|DL*+{JY@9a2g^A4ra(dxaq|Th(1~GHMv+Ovw1@=jJ&9lsXnPBJi_lLm zZy!Ah;G8fGXqj=zz8oi?T^UcvEVhDgmIM>kFI;nd>k}L|mYJcU`)n5UFdtM-y=yk7 zy#5THgX0m8w?<0&&KSX$6>)d5ePkNb*y_c?=Ms4?6Og}y9OLU3jfBq)ms8OvNd!SL|d&g^yNJ4O%Um}J-C?gi^CtRU)DM&0BLPzrJV3; z1SjDXu@HPEl%1f!yyXodEQhp>f+T8^B}PqGJbMk5!mSBluB~p+@iE{<{N57m>Q`WdIrmNHG?C%Y6`v&*1*J^3WcruKuRg&(;G7s9i z{k?oM@&=!e=T;WMCy6A9 zG~FyI*oIz;X12GYlWR@CYyV4YB{pp(y4T_@M9gxS0cYtdK~9cseISvz&aF7q91?$d zXk6Miw>$XJ(dK;w!GfLH5R0bm#Uubg$P08II66_Sy#aTBTpkmBHKA zJU>iO)|#nRuqu&kPmY|3KcY6t+;NjuJHSLEnlC5I+I8Hoqll{y=6p*`VFhmd^<0 zhea3S2uz)uu7s#?yqDhaZ_~_A?_qCzJ*4d1{^K{o^%>*V%=l@?Qz^L9B)1KPRK-y&96R9EvJ?gt4FXVEuDd(Jakdf&N$JHLl4Aa@H-TO}t{Z;$QyetHlvOS<|eNlf@5u6}2} zk-asLwjc`ukyDjnXD&V8^4j27maY-d_=!P?RHAgNCMnzUu5+8YuNqTQS})931|l#x_q5rDyn|68gV~Kl zF?OTv?)k$}Tl2((HM8#0&9eGD9-cKb@+iz!Y2Wy&e%0f0WyvYZku)9fYD!E7VhNzy zJ&t#gG>+tY@nmy`lZVZEs#ON^6G*k&G9?hnWWMl>J(Ms!clh|k(o=Wpuk7O`m9K>X zvUIF>))V-V0Duf-*kA7$98tfr@W7Nelq&Plq(e9d$W(~{6a}9z5t&#}6-eYuB;kZB z_bQ{CK$$I=pW7fmESPy4DU2wGVR*XC?zC?;8WOfDjF|&eHd#q+=XTgjj42_<_d6YdKo1wx`qzw5#3Ke7mS z_*Z&VKxSV4r!8b^YpT`-!tG#fNc#X94!NgLac)be5@pPVJ+_t(=?^Y0n2}s&w{?UzF{NE^Dggq z){XS)pftc~Il$=0g+GLG1*TQPThg)hw&Eq9k;uDCtoBH+i*@iO|s?tgy zTkwMSlmVJ2lrjZR{nC;kdv?!efoFa%JW{$AF>%U!P;c-=HrkpB&UnY~Ct9qv-^)6Z zuSnXjoXs~AU?iXYAX0B9u-0?0GoY+h%hNB;gXOky@Y1}6Z0&edOP@aCM#TE`>>y_C z=I-xu_fVor1Wa*2SzdJK&P>$PDZ{Frtp3N##o?buXLWzZq~1SNfAhzt>N2}y{+9WB zF*-pV7eCE$qkf-&n#7Pf8**xuew_7sLq_UUQ7)mz-GgWDU8Zmif}*DE4vx1bOEz_hKxKGo80%P`2-JQ$XzMZK1t?VV2HKc-hXM4uc?dk#Dr zq0r7@dKL{lKv%kWENSkeG}+fBysbR`mrD0y)cKMJvfh#akBa|O9bWE&SQA}?~dBL4dLpDQUv zyu@9xID#0d5?5NTq7ylqMbd^96p5^Oh=v$a1&R3}hrZ{fQ5~X9q=cyI+tX@K`y(l| zaYA&Aw(EbT1p#?#|2eN4ep?LpsUi>YMQsS8guz*HX9(q z`#56mA~L})^MRh?5++5i0|(S zDQDD+Vh!iSIsDzXwpbWJjC7jZ^E}V|_&3^e;GV*qKpQAR9SyR9h~plu(IW zcv3&6DvCrgRB06!Q4#hD`b0x`{s!OBXVJiuq%x8IH8Ex-Rpx{GLD@I#5q3Js9CP!i zT*FNM0D0yTYVv6^Z=?Auvdnc{KS-K>?Jk9Zui$5KIix)h+c4iL>I3P~=rf^)<>-O* z^%E^~Cx&N2ya(P05kc;YDBk$i4p>}_6vMyeTYAsZ;S{u_OO4^CDnjG18$k^-IMW(Qzp1jdDcx z$QkufsX9ZVevIgY(<17nl<+alwYF0sYq~e2lTcsL2)u1h|JUAo_WC(E*_#-LM!<;B@4$Z@ z2qYGT!9j5_i~uMAihy`2A-pIkB!(B3M2cdCL?r+*2u4g$SWH?}f+!)WATA{%EG>bQ z!Ai- zNnsTUB^6DSy0C;g5u<@u)zDVe&;v9PNKFB}rlho{qJWmDke0ZJmb8MFs)m-1l(xK< zj-HO*E+u_U4Sii*{oQB-5N99?8c2y6$jBHd5)D-J42;BfDM%X08yM}DH&r(_wKO)h z65TB;U?whOrle~{l(#Fn7)un4DcH`_ydT@HD9MEHb7 zL`Ou&+8*BPa@gDBuusHcs&|ZkbWD6eZ0Nzbh}ihV_#)-j)B!Li2@vyRcj>4y{O zDaYx#(b;h)vI}B!5|8F&q~{!`=P+^^MM3$87=^`*qKfe1*yQ4jtm15X@jpXZOi5CD z>2YdVN=aEoc|~Q}$(oc?$0|-$(JL9bl|@CBrRk?n96Oy;dHPIR&GG6p=Z@9p)Sj)Q zpJU{mD=s`&R&wrS#kr~z=L^o)HPo@1O6pJMHWU{&l$~y;^NxxEf=}P<6_UrBJ8`p1iR@^*uv$LnGr?2OBf8W6Hz|h^{k>--AaI5#_QFB>b0oxUy! z2Kg`34>$q^Yy;Z=C651+34q=PFciu0*W;&h&?1_Ff1~vQ8lj#=j3P8nH9DTs z465~ReB4TDbUyEV>(bN9zBdjpJou)PkCwyf%kAyF^6ZAx?Mp!uomXFUQI{T^H`}lM z{C3*=)X}b*0Z$C z2zRY+C{xKf-VxCe!>Hd`X6f5=|%uL>D30U-= z5VA^@^tP25xe|IlgC#bFP~bAA`{ZHtMZ81ffs#cva%V07vijYC`knm6~=Yki5_>QbI{OtatB`O6{Na9tVd}# zs`nsEj9M;zx^$)U`7CN%a64?TRu}CcL4D-r+Fq?Bw}go+JujcwNw8n8P3UyYALLrw zMM0f~x;EyEhz>UoPLOJ4yX|$6?Ro-=CND!LZl(66;#A6oOdNz#e|ui(xL%i%t7&0b z@7z980`-^Zd}$q{>#&9^*8DDrI-oSjOvGvMZ)Ls}8RV_L&O7ik6Q|8r(0951(%(Vr z>fp_UyXsw)fmeuX-IvxLXm@>Hb3xoWsUtomPQRxf$7a5ipM95Dm_1^0Fi4_@7fKy% zN`C!SUOjOXI`DEoT&nTPo$RhH zL;Z&p)x2ywq3WBScLhstZbJ`Be80}WtCO8;_Kj<9kZU}u(BO-#+qL?_;V z8LR4jSGGs?m3oVP+a&ng+v<&9Vw?~QWbxiV4z&HiI5f)e{uHL+*73BNX`k_t1vjGZmnea%j zlD*i>m_iDRLof$UN~vupSD;ouva>!Ei)+}^F}w6)K#bXfI{Z3J*}pxfL~0STz0~dT z8+y3$ZwqWpF)=$Eu@;w-%;_tBkRd|U}i_Xt5ZgWDf1MwUStk~ z{;9^EGmou{Vsd_P(-f6;Nhy{Y&`QSlD{=yF#2tO#%EeP*ZZk0@hhp=T0ylCW-C8}v zDQVdi>H~M5{V4HR_%3PJQ}jWBgDBsjX7mLn&h<(tcsYGWyN97 z>b*pa?>o`}d)vt4PlJ%>$>*d;$Xbu_gMx`$ALxLGfTyTu*u5UOp7JIlT*}+`xePGrGT)BBu*U#b{IBNC-a3eGaBvFyuH_kEmaiUpr)zUBnb+M~zgSGo7saxzS3_ z+b0uJvD4GmRb*s{s6F6hQ1~U%Q$=ZnYVCB(DaQ$ORQ@vZsLtp?*ySZ@=ebj%_{!0Gq1;}CxIjZ;$o+#Z#9o{GSw0t)0PD*$Yedf* z5oVrmFEyg}9?n#coW==0^;L&miZGt=i+bZ&jd7Z0ml-|MsJTbSc3n#QBGEy)pq2%(#NN05wOHw9CFP*Vd>tLxx_9AI zyR-K7P?xBVW#&Dm(fQ{=59j7*287e+ZcBIyLhZJm>ZcUCKG%BIHN1RB7kD%9{b>#} z`Syi9!90(LPS;SVmq9s>bG>Lcc6R#undLqtfdLhYnkB1K=<1fMKiYHfeOxeeYa}9XU+}^rJ3N9|#@3JJl)O=6x(e<*_&my4t`#v(*!OVAgt0?nh z+|jM$Wc9PEqQcz1tU=8tTxTTH!5++)1I}Vl`;Tqam{G8vsSV{p{)JEvNP% zVJtO3dAe!>s>qByl9LNHCc-+BgyXAAWQ@@)GHdE;{CP?UWVUS6jn}_YY0G*J`qBmj z*T0m%{53o4V<<%>9&!7SIS=kk^H%5PY73lG68?%EG!urv(`;X;7N$Bp?-x!QY+6#= zemx{1xmc0**lgj?N~sQ@K&8+WpN~D-g`iSPqISVVrMNWMEO$MU7YbQ^)YB4IUs${| z6OvTGke8)?IQPq+R~B`TeMYVdAO3SbeKX@>(wA>+tz*mm;BJ(ON+q61CUlOQI#?3u zfxIQq@$t~ynV^*R?cUE;%*F3uEJ)#^ zN1{dzYAXcE#G&P=vA&m)M>c1Yf}fsg?PHgpr%Mau2y}cccmb&pJSdfbZX3Jy z9eo9|W31b7D(}Pwp1t^u{di!@ediy0hwardw=r``%O7D9agc`mEG4VVTI19* zw08~}j>e+y0J_ovVn-4XpG!OcJ>7wjk?oKM^`-TQL+&fU>&LQ!j1Plo`i$D~_xZ=Y zBM(WMWH+{E4vc21v>m%uogQL*Sfe04Z4+!F66TqLZ$}|GG9cfTG|0z&A%JV*kZ`KN zAQ`_WDnpb8Ajt9Mv)~jh@02)XkcIzf9C^S4fU|*&d|-qIPBDN$NpT@I5X%Fs?g*hN z46D{CQ3`@CBUG(Ca`dld(z_$dwR=b``IsRKWR=N;Xo8n@cgld!dg~9Puwr zQYPP*qn_oknwRu;Qf}+SWO9pNCknSjHq(@_tKf}STulMr^0=Hl$@_Q7RD=d z{ZToDt|DHqkW0_^yI2Bkt^BB7vRo5s=Ubv(SX}O#zquv$!?#4;xW;WRY#G4He;0TR zh>LQNEC8*>tbj6cuYm*v6ZlBNn*R_pp`k082x2}sLOelSg}fz#AE_|cG4X7p~J4PWH1kKuz1W__FgJ0kW z&@&rA&ktxnB5-M&2D(!*G*YAxA708*(GW?VHbp%x%-cakzo=0AESZ+oh)H4#fmDIV zWF(7(nmSWg$w6BK;>JWY`v+v&36eoWFZ!MG8Y&Z|z~IU;>mT8Zn{Y%1ONz;2Yn<=@ z0ip5X1yoG`v%_K;^%-htH)>1}6;sJ?NV12RWuuLO0&57mmV{O#KrlZm4f4^}T)>N6 zm&B>talal}#G-NGP3f$wn__-6w@PZ=6IH?r2MuDf^?x?(z@i-p&ASiR*V52h)C&Wx zbr~Gw7mWsfbgkDX-~b==BLg7eh$wLGi7Gss0u$oG(-T{&e~7t!sy8OK4IL!T7?{Ghi(RNNPdd z#9VBOs2_XYAnnX@{aKIIJTm?9>|iTOR1GbhhWgl37yq*&XAKwd;0kdI%phMVt-ZSO z1GLeEqq!ISSQlWLg{3su_nu>4$>&N#&K(GCIRJ=z?>-yHjr;luEEu}d+u6|GD`d#L z_Umwyq2@zWyA1L&lG> ziwDV|EDh));&)v-h7Q54kwH5d{0JaELcl)*@MA;gKn`+^11^*B)oehG1@7$z*GMQR z6|>H75G8{1WXKd3{K^ME@xe;q(kc-Qjlu2Ps{BI43tmpml>r%MsPmh+bu!2!<7;V` zC94sq&Vt`K*hM3OuVip}sJ9D=US)${IACfoa*d62oC6zaz%yb`WlRs66lMJy9Hhi0 zQSfnOAe(c0P^NFBIC%xAHRglM+dx-}bGsVr+LHi$5*cWu;7{Z5;nbK*L_Cjx|3rlk zOM`1>`1v7-JMPvB_jU{!sKfQrctGFs?Jpbzf{a|HfwpHsYf{ySJ+Qi5`z-`VtAV_G zfkrZ;g#hdZX8_`YvG4&1_VX{0PZ}2dJtXuSGDW)UYL0F< zOGN{?kJL=FVzdJlU(1JlB_XWY|J;2W!9Qw-tKHvv=PUK(j(LR3=ez2^!7s!CKQ>?j zbh!#(aocR@`(8R>gznb&s!{T~AQ=Zc4d$iVR4z8xWb z=BBBX@1PV5`;85)q=8WFan_5TYytc-7o1|ZdT4=aSqW}hV_*4$UkF2Y`QU3B_zl3` zmchRzcZ=+fT4_b`xwY#Aa8D78#|5`uB$vIs{g!&?)=RMW;Y676!=#s3tEs_z0z**# zaEQ=IUS>B(^`>piO(6=5yLTK7-1acLeI7Sz{~7Qj;|tgTj)mp3*q*D9n%}q)4mimK z+==*X05AsdfuD{${SsOxV~=KaI;X)v(p0KFrU^>-fs-`g_Z4h~ zI-bS^)c6n{6$g@L;biPOZR8RGFRdq<1mJ@>(R#9iehUH!>Qr^egLT$@y${D}Q~@;r zXe8o|*}(lkIEIP)!p;215!mr_9(V5Ez85tJ((IvQv+5sH8u_3J1&Hnz`bg?DCO(T{ z-gBI*fwQNk0^sXIk4`Lf>MwLIvxPQ90FVWaQ2;w4zKI4n5b;Kvd00)QA2egmz= zC!vL?P#@%6qE2R`+_SU=#5xO$AR*SdXzT5($3ESjL6ETVIx#XY?{pr+bYFde;2_pm z3ol(U5Eg2kj0AwJq^mJcEan;O#UNpAUy~&<|XJ zY6?E-&-`o<;77z?r2!doKoSw}@?~aUvG9kE#oMI|{DnDF3p|Yi6p-=ROyFz~5OF0c z$P_z`!yRCQ8cRn%0xx8Rp1{dv?GAC1RBn7|v7fX<)f z)PITR;d`ltXFIr+0e^3NWC9Qxx{DeMvUqa7(PZ#NlF`RUf(v6lyy=BkspscHu1?7w zE8#P1+XZnDB0Af3KHe25AiWj~e(;Pb=-mZ&kwz`8W*Zj-R%mmeCHBfX_-tj$`Gt^z z&WtDlWc^JESfsw8tel!XJxP9gwh++!i>u~VNO6HKHdx95?~+F%({L$Q!PHwIhJ)sF z(PbR;8tJOK6eNy>Wf27R&qKbDu7YHNXXH-ot7pzdU?&C0aJXYp0wfXeN<~2HeQ!4a4TL~{Uq*dMTfQq)BQF+)SwzeB3NWGWw73zB?U zz=Z6^yt6OgaDKdTvHz6Ju?;DX0Gm9rGKXAeBZb%?n+fRj+I}PmM#_#J|1+E&pX zEQC8Y&%#tW@khkBNRMx1@>Y8yMkc8^r_)^A$_PGqLh~2=Be%luz!p*tUwGidRRAdC z@bM?!w|tPXRzO!@_|ZG~0~q3~u{$PRmo7&yS!f?Os@vK25$$Y7uv*-*-L8A$(Ef$q z`s3)|oC8PJCPQ#=?&{Ogp>Q$~Tef-lM3yWW(32~OBhJMh>4Wfpf%sKnJV6SMeM@*< zU~Vh5NY`Y)k!ygW~ zLN0%)_Gm1)lPP@TVvgF!=V;j$dK^W3+X`8}pLr?*9ifG2lX?;cuOXj{$> zeZ&1ZjZ&)HN&Cq<4-}5{54j@T)vJBU z(z%hP?I{iI!#KYiMSW2XLgANjucV%cCzsf+^?IPYt;XS&g!Nk@PWRtjdD>9;LF!|Dqy9a-lS}`(udgqN?#M$yC!;e_y*Keo$YB8 zr_nKupNUoh99EWG_x#nT85+61h(*%fdf+UfwBBEdOwz|;Y*jMls3_fjIGRxz!H`mY zh&LI+(V+;>F%uU9_KKW?u9$@SS+2)MZhAl5>mgp&7go4)Y794Vm ziWCh$J$YnM@=!^lb34t#d%`aLQ%!ah!_;030qODHS8b`z6^8oV(vY_ch}%#C)p<$o zF69|`h(m3MiCpYj%)J*^EVHmzLb-QTWw&E%(;KuX$fghC-4X3QOZ4ExF0r)Czdzi& zy$yxz;9?Ksx)z0fLqqf;udLcXN<(06u(HtR)0Zq8h(^OWe5M-ZLSYNdv{O^Skwpub z;?PsDAwB=*lq~=Rd0)I&qlj9lyXMn-NYwXD4~(uvCDVB65r6VzQSlWY3Z};T-}R>q z)a-=VF5c&^rd?foPzALAnHqu$XSov-o+O~0E^tgYb)dW6vPSS4L>BH*?Vbt+F_E-` zb@ot`^^@mOMezy13bpS{tSs@1!!_B8q)m7)Gg1&T6e)`27kqeh;z0PpHu%d99Grq4Ei&rEK@tN6UB+M}-gw$1m~id2HHmYy3wFYDJ$l@@OazsP)Z<5oUI39=;py-cT?2O7W%uVKNGDN^Gua zIsM3%1dNacpt?8d0_Lxgn12F&b3OZY#q_yDS0_Sgz`dBn>kYOr8L<0~S%C*UHenf#pxB{5u#vSyHMvv{ zOnO_XK$m4>0`-Gv@3Lyi9#Dcp9LZ+kLNuT8CD4w&AcEx=Yx{jp-&5hID=vEBhb-y{@n zo0%(I8$;)lYxVH4%rN;%c1wT56sVu9AiJgxg%;@HZNurZJnC%QIB`$M)gifg6R0{b zAs>}RgVLYc2nq9Q@ytefU0g*>Xfg22ehJ;k8k1#eHR)zdK(q=Sv=%#N4~Rm9)}lsX zve~6-6zC3?GQt9$kQ+E-!;q?Zp*Ks-m!{CJm*ssHl?{3MNgOatavX}C{n!RAX^^i@ zv);8RwDV4QqhK~+*s8+}oo+q?{9XE5NJ185(4ejFzw8Wp|7(8S%}7Q7flb$LC^RBG z#UKc_7V58asj+Q9r3tn0yjbbczRuoPXU7bNV}hYqI<6j>l!A5^D9 zcwLO(;?~)DBC++0@v9|Jx2O@l=Lsl%W*O8_g`@X5As;Ggg-`O$&-}m@!rGR%`4Pu= ziwdCCejCLme_9pM4PAt0T@(@-#wzk&7ied7lBe?=j{f>ZH)Xz-_)7dFIAB|%MjMu| zCeWA*8BN_8d%N#W29}kdefiF)%P-U*(b7KwmfqAz-Z5}jwuRtsV+MU_l%7}tN&Ev}YP)0Cm%j$>wTLyHkth=;!t$3)1Lbj6_@ zS?rbj24z+k(Z3sx+o8wqNDLDLwACT0oasvvQN@wnmvIGThdV`^{~EKVBYI5C@2(0Y zimJ<1lR2ci*TT0R*@(d3RtLQj!aD$DF$4D@+2?mohuwuWzgEbDb(XOX*oINcB<&=S z)6dXZR&xBPh}~>;R_ypOkevZ7TTCcfv1NVYOCqdMio;{{?|XV% zHc&6IDF`?1Z)d^m!zY8ygtyDAuCx$o^JV6{tY{wN#}7}|P_r7`Yeas(;jT(D^!EsV z@0n;^NlhKvIq+Bc?9HX6FHBVA9-ug9q{*$e{EY@$jcPugMBAB$ilyeOi1f5(qAmuo zWOD7YRy@6@)JDD7&*Hd{`EU$!L)~_$MYDOCe_)p&L73(vFC?q;Ic3M zUmg-8j@AzS8y>}HC2xpHo}2T9yfLAR@y-aCWb3xkBNMPZ6MODYu>*w?C||lZ)pEXo zCYF{BwNl?Kk{H*xnWhd!Q-#AS?Yj%A@okV=)@YcyeK~t9IK0$f5f)dhJMoLAMs7RY zRGH#5K14gMNH{;+&D0=`ZPVKvW?|T)-FfjEXl#L_Z#{-^al9+!Qi-w_xd@PDYR4;= zi=;uPoLG10PV&|y`Q4+(K0BO9t}3r|WC4x>t5u(^P$2PsIZzR%O*kEX|M6wW1+Lxc z&rsO{!Q-FDPGmWMhhfg!+DH1`&#o`Thm2X|m==mQQ3LLmOO7Q9*kfy?=Lt?X-;T1g zoG%mr3Ta^QhWPo14`#XQDFPGyeU9M#{aV1q@l&-Sd4dj%H}=uhhF04Y?>;lR3Qu}4dD9{I%7Z3R`@^5bSZ^na-;N!->rfVEe*b};g9d>a zD|q@L^w3hZ05yACgXc^9tYDYx76?sqiKAN570qft0zxyUK^Mxsf@&Fh8A-9Txn|CRim=vx@E$G>yk zzY0)dCyv7->N2d92=HoZO27E`>y$D(mlud}-AdLFMSKW6@Bp*|L~ z=~I_|Q62Zk*UwGK=YhhP!Oe5-z3gI>;&{MMq zg#f~9q-Sbd@{7l}EKk(FCtJx?Iu>K&H{CHu-EjyHnWJ6_8Wflu#X#_}_R-3J7o69G(ofIya;J+tDE^*No@3*Hy)_*>krP{K>QG zz3;pon!TT??;G~pH{zi}j(Vz{yKnvOKA&U4LCqXJ8@K}JMxdRTTEVp-3!m`P(gPN! zhb#7J?tAL2a^A~zU$CoBO!I7n%G1OBK3|h(?fY{~sb2AN`xABchublO1wF6@1#mVc z>FCAm@xp%#@1WBC8U1tljR2gme{Rbq>(AV9#eQ0xMy8x^j)iZo;W-4r$&K^P|MnWn zf9b0K<$nJwlm1s1{AL@`}WrJn{w!m{-TUL_uF;>m$zI5xpQ5%rz=&^x9<5s5ic5(3!R$_Z@lw= zI2ky(5O_UK<5wjOO$IRC0qeP?clW)xVfgar6)}$B3(Fvnv15g3N7_3{*$ccgN*p>4 z9dNt*<#yeVfZ=+Y-LZ-5`xc-<0EYQwUW^kiwiuxuIA@YA#-1EjQ2Clndq)PI1TAj# z1@c0may;5ALCV*6f$J6rHkzMzyBd3Pa^1-YR~97UPrxYR#f_kSH>400AkTt#HD$M?NW^^SsF0gaL-cz_wa2-|%`_uQoypY|3PX4EjQ|<$JdQ9~IY{@#Td<)*JOG)&xM3J}+|M`lTf@yX6*#>N;S1ao zbNXdV%#4P)vJgk2vKMp~^MXKVDPUbz$`N~e+#(!Go?H61Ul6?@#>r77(slnnEE1U= zv3QH@2LA49?J5B*#*q$G_>rv~B;V0F8?+Hxd~FMWoE$9zUDz@3wQIP>@rbCh_qA8) z5x#vTw0)4a+>kaJHsp}$Bj^*Q92piZ5iX<;z<1xMR_necFht+U(Sh@5(cB!YZv-8H zk=3EDzHdzaVla|4*P1*6k^Zm7#c?Sc3^@enx}4N{5#5#(GIsma-?o~_H4Q0tCP_6f|?i&hvP z)xH~5`aVi_-c^c3EI_r5K?3N9S^#H0 z@&F$ZL(9eTBI`cVLioAyDIr!Sj4(Em81il#P0MrVz4zxMq?t6UG=_g0@<7d^We`1p zh>QtAhB-3y$bcFVd1yQ@QZfhUsDd4$hm9kA(~yTKE4a4YU^LxwW#s?`>C0yL^8gGn z@AT@w9s$GfeX!KrNPb=*8>z~k9Jr78z*q?f{*~dukF+6t`3!eoa4&TwykHp|g2Vt{ zBexLJ1;9>_daw-{?u(45S^4CK{5-p|Cie)>W;nG01T& zh(AA!JQNp$h92oe93tig@{nJGUk|t?svJm+xrH$-WROGx1RWV-4xeY)Dp!riFlhsJ0r_MA;#0K`%lu&3 zMw8?L-Ncn}YSijJhGhugMOeoatm;VuJ2;HpzDpu=I;t#&0YH}|(s8`J{RD_G58=h$ zm_JR^;4$pl0G&4EF5e{_5#dAr3Q_@Q;=k1o$n=|5#OUa9ENcXVEUd0MrB86 zNL*mJrF|JXlvwgOtu%igL7?rVp=SLbi?uOKOg@_|27|Ob6DlHvo8w8z#riVDuF>p# z83Z=ua2m|AHP1Ee3zC!@PNkbV(5xI81OTE@!+5fiWcL)Z|3k8Md8<1i$|ohmeLVLF zC!0_M*tan((YY@33{uUy$Oix}nPY;2csz#ezeXd}AizRA zuY;MPySRgg3xUR{=ZGI*IB0+nDQEu#{=YVxq%*@yYr1a*2i?S~XlfUAwlqtq)54-trODC>~y zTto5f-I}q-CU(?>;G@-SE0CmIMPr< zfkMjh4!yP@Ahav6Z7Bb&IO2+1UJWPGzeMVQ6>ug3b)8YJ5E;1TIr$p@0&l;BM{PeY-VyEqOd-ZI2Xkp@-ij!&jfK#J z$BCh@g=*%8%oalAX1Xa>BD0mNOnATdFc~dG0rskkc~da@x{hK~nX=kk2mPx-%(E~# z^=VX&_?Dh4I_2EbJ)2uD!vI=mT-_Su@a(P?w6@Rlj*Y}kk4YNYJLyHRQIY)v4KWgb zY?pZu)i&aH(2%X6KZ!qTM5!^F#1jeIdS|f}&o-utm0gm2JnY*~FO_-Pv1|;Fn0s4e zW|~R4Lztl7`F8+}pk#*#+Q)n3V5uxERja+{Q?J&w{+~a!*mtmIR~)pOU=xCE z&;4Z)*5>v1cSSjRcty!>Gzj90*dl23p4e$8{3+Uv9nxI>s*TS5Hcl{VfYXpCxtmDF z8u+t41Y>mJMKe$aO=L`be-!nuXuS6gM@bL$SzO@oXJ|I!3JGQ>IgxB#T^n0*(nadv zTiTGsBEJHzX?8tE88x81-DoE@6Wj0v5s)n6Bz7>F1~VzvRwywFbR8~1ixgA{-A?hK z;CAkD@qDCdA*$gAm#LgCBl<$KtJLTwK_CN#g?_6d0C@@G|YVFQwT z_p?W4qkg|_0z&@=(MXU)xm#Zv=^3@|DBMR|w@15*#+E9EI!@74Z{~WP7bg?Sk+wYS2^jYT{3eNB;`w2wjF~LAk|Alq~pjn?A0;vjh;lK^1 zT?{7hOefK3xR>(w(qBnoDj&1a@0-w9>%%PZ(+-ol0Sw!tMX)?Q7*b+B*QiD)-`6n* ze%2%ptxd@fAE&9R7Xy+n_ZQm5E(waX;r@Ki-vhm=AoW=fi!h<$z6!ylU$ohTqPt7? z6A7LhGBEuXk5av?DEc)2mHm5$k(ej!xLCKSE~{S9YkX4V18&}gMz!;b5v@mWf;OX?2qw6!cB`~U<)NI4eLcCL=;R_I_-&Zg`q)LuXt zv#AJC)TYCCC6vQ=6LKW^@1RCxF3y?hXxOe|*;zzJMX<)eAy#GPIfzOqhu&Wqg52qQ z2(w*9BVS|@j!JXGROp;>hvNiif8L$2w2O2~Z-<)-FiZ?1=JeOF97!xMoj75LBwz)1 z%Bs~#klNuYtzxM*-Y9sS1QQV8-Pok5BG<_^uz+bNI4ou}Y zx8gF~`nMw>DM=-lpE^g!m;e)7%1_FZvea#rG^YwZZ#Eboc7rQ&#}!SJPv`ilgbJS- z-)$G(c=j&Sp;(#jP`6=U7dSyOj&8LrqN!%R-fR2z8N{{hX8EW`o8zx2&saqSWX~th zOqSGzn&TT#MHMR#YQ}WIIZ#7|`pN#7|J6$8Te_?FdCt-L^xaX7D59gL(}|+(zVZ|@ z^sLLW&(07pgn+e_t3s29IdLxHB+E^L6Yg4{;nnE!w1HuveNg1`{Jn_QOXG#(-f|D< zp_|vwV&$|M>c!J#E=C=-I-hOdOZY#C5WgW=^g7%_`q@}U3VWeM6j4J!Rh&%rV}6OS z+Z~d<>yXinvb>AYa?;N)wusv}_Qat`oZM%~qjeR%akdBB{9dPQwzx~%SiSYDcpP7K zd%#sEY(#5u^<-CH;_ol+#ahqbJS;b9T=?wS7~oLzzNKsQ*8Pm90TnZDRnWDgA5*^m zn*a5Dzkb8@p^&yB({WGW*5*BqCn}J;6keq$w$gM&?_WL93%!)pZ#xfLXuRZXUowXJ zev8g(%Jkp1OzVGtOs^zNdr8V`qq)NRT9EEy<} z{ktqpn)NChwclxYrSGz3=;1zh*=`;+|33!w5@*+UYCr4U2n_h9L&$ zmb5>ZPWL=IzXTwBi1_p?L`KSA0~Sn&XC;!MBid$V@i_suN1}JC?D(!GC7odNYb^P1 zJcBZos~=sHdZ+8&i#6-?XWz=|ooTT&yyVq?jxv{nX!0?<5~j24C$f8HlX|oz`s#Af zdheWmr(ydsozI~nx99T_o0m(kem-Py`&q3`O8a@8FVSANpFg5^-nw!1OI(u7wnn;J zGW)j9O2VlZbMv~!z0+4$+%IGxC! zwbM8H-szq&eHf;@erCGwz0siQqonrr^Xq*dEVoTLIl3G5BKO<; zV)sm=?zf9x{hxdWchBBv|8_O0e>r%2_xyZ?cBN}GQel-F@Ga+ZR?)Fz-rkE^S4Snf86&PSgYC| zG=Fb!?Z;%&z!QhWaCoDG_cmDb4HTZLO&~o#}wLf2y27fJX zTW*c){JVN;@b~*Ydwxz|`}^(2;GdNfdwwtN-2O2=_;+(~&)@ZH+ke&vxBqVM0sd72 zf<%Zo3nEX1sOs0zsbV?B@&SJ1W-Xpyk1CNL0EOWURYN_ z#He1xQbE+dUerxN%%@%~SV26ZUOZkwBBfs9xPm03Ub0Le*Ubz#{wffW4Mb2;LA*ghUQtoCK~Yyx$*4ieQc>By zLD@}F#iv0fSWz{iK{Z}cEu}&2xS~3vLA^{-qpCrpPEoU^L9<;^tFu9CKv8?NLHm)S z&TNCuqN48m2Hh3Kotq6i|0PrOl2UP)iIQD0Zdz^Ku{QfZfc<1RNPL!U;& zU?ro7Mx%Ho*q9RhvC^RlJOvy)0F{?VG*bRQCBa?+aG(iD>qTSJ|J^y#Kh0 zFQeJFOvSIN*{@E;zopr~T_vEiIbc8~aI`t_kxI~PbI_tn@cZWA6_o><%?JLfgdkf& zK-Ez3mQZ=sgQ_hDbydTRTEZ+XanjfiN8h*ynFIoc9=Ts4Z(5>=)e zUDXm@r+T=h<#4-dOlM2XfNJb$OY9@nxY?GtMb-HCE%7U=M>boI{8gnQTdAO0f_Q6! zyjr4aYoe}Nl2L1trCPFmYqFc#QJ>bM!D=ZHtts(psVS|g$3HmRlW$h21`x^cp$NDr znf(7C(QZHp5GViz05lTV#{8Bb{12q||LznNumaG46P0YxwEe?NFgNMH$~TgMUk|y>et~yJXDNtTpvlX&ty! zGM@O};f!wRleZvU3I9Lx7ZlI~i~#k2G1&&T z|11&NtULd~4JpKY0m1*m4XJnb6o?oc7Ih5Gv&xpSF?tAi)Qz8#x=j-#opJ%-vUpFPFFD+ z0v20hpYKQ;VowI%7T3-%TYGzz#b?F^oR=+A%nE3%>z7G>mZ)<7^ufDm^yu;5b6txs zd!^KEf!fdx+xtb|J=GQ4>?7!)RRe@FQa+P_l&Qx7zs*ytek8XqZzfjF$P>Xx__K>q0{w7pd=X+@vkgJIGU zGH;83Xs->rCj7`ZtzA zn3*4eBw)EVV}pTWp6fr~{iVcrN0Qouq=~YtoauX_%R?Q*Dw{*XK+WV8`lLl;Np!hM zjI60ML~!>rN9ZxcsJaHdCz=alsa<$;$roV%OH{Tz`~u9?Y^s;8Rp)YJ)Z+=FV8PEs(u$3R<_!4 zb8X`4Gui2J)R%`zIHSz>F^Y<7ys2xdYsuH4CI%Dx zAM3!0ZyuRHE;v;-{{1PnGTIsU_HLpUUzsB`u`o1^@!7F)2cu>?`OV!}6cQ)Yc%Aax zUz7}=qQI|bGG7Q3e)D@FYfH^O5MeXxV z&5zSX8kF&&-7u@WZMfUwYdfz1Hn?i>_Itvinip5!73_j;O44>ExjB z*3;H%eGs9}4R)3EMp-FG0Waw>LP-l`tZ2BS)kCZ9Ih^tkje^1-=7}f6tqAl2^_FhY z;4VSY|FZ!{3?KpT0geA;FpTV0ISiGJ5YqIn3H=95Lk%5s{}(X*-?12h3khVH^E~Qp z1z{?c{|!v=sYJ}339dh$gmV?p=Guc{Y5TjZyz^zXroPl>3du?m9=b)s6|mR;L*=&Dz zTRc=Mz3cj={Ram6_x@)vQ4HmO`+O+oK<{AuHl6>J;%@QnX{T`X<9F3(={CpM6nNy* zyDV$acDy17?ph!eM-{ZC!=#4AUeOT4aD_~vTn8b9xa!kWMNki}tvIRo?m@dCS&t>V zowc`=$|LC#VI{^)`s+Q`iapT6&7SaLogLw1I$`YLbs9s@Gx4FQ!kuq-P`=_D%~m1c z#=8nx+K)r|CIZab+7Ui=k^wG z;y{B?yYmd+U7$_pA77J5V(~5031R^o>hQ>4Zl+eMvK#8#PxA(a9ATr@6xVEHKw%j1 zi3?Z@HU)~p3G~6Ot(j3!D1pxhwsxJ!tWR*JuyuHd(%MG&SEvOzL5&W&Ugdlo0E*iU zOx-%2%d@!(x!bSM`n#WJ0tyUCi%SK-s@o+-hvF7Ueiw}HT~Oy8 z>_Zx$Wot=?667$5Qx%lsnLITJ9h*+!-N>U=@ll&g8*NCCmx{WB!`y)bzJzw z?`v5}wL`k4<^LjnYes#8j#sE-YCfpHEG+SEn{ux!8P5eOiFlJ_&%yK2+ppD;VY zs>6gRyDm@b&y zOW!oFI~7QCxMh|f6{xCp2q7f7^4a{Cc{LHaKvv4F4i#3twS1#{p>nG;(0K-gdacD?o9i zLXJRyA6fENxnFVBW~By30Rw{47wTpNHRzdFurEK>vK9_Uf%YY4jI}1-dr$(mI43|Q zrnZ1^1d95g4^UYz0wxHZN@O{e6ajDZ15EogPMH%$rvV_-H1Yz49mbX*tfnJdV+)jO zd=BCp3*itLa3+E%+?!ig8u zTF?pZ1p2vR9K#BeM3f z3_oqI>Ixuno+N#{slpPDTz|Hms=~jyZE)Z%;d)m7A?Eea(*xElfT++du|2WUpSiA$hs};p!S|Wa=pG{7aqjxr_OS z-Jiy(3_Sk5xbPc0op>I95BhHePSyzn>;PosT*lJWm;b-<=ATlSzOaJAf0e>0*|+>p zDGc?>BUveo3~xs3AphXaze{1d8!tZpKT2W#9d91hTxGcNr!UP5#oAS6*%cBzcvCuw z3jYgx<^54CbMD67#Qm@CC@;opEH7A5wWW86;m!TC1yRz?n2fLJQ;r#Fikm-ctf(AG zV>X@py(dR(P`CF}-^wP1kXlavZw{a245fE#e*#NA!?zc4TJ((_N}??NSsM0Uj*0;^ z%Zy{>OH7x;q{o_u@~7xajw8ErGwvf#xE__jsJC3(M%HWGZ(mBEGvX4Mt?j9Q?eq2o z?dok5dkNqQR0Avii7Wqs@uhpc z{k%?mGXrauP>s^NHsoX9`{nVJ&=$3 zarH5_B-p^8|CcDT3lR&V<+9cFGPT0jXvo4YZQJW0>yLC%`n^}j6cm^Kl5nCvan7HN zRdaD(aDAGi6}yB9d3v?~GR=JF^RWg0e&u6odM_t;vXvH<{NPp%I7hUzxKn~ zQLs}$Q~$o5_q2TB{uGKHBn`n^P?QGMZx*D(d~T_;Fhz%cB8&|%rod4G_=m!zltuyW z4W#Aj0YanMR0=3;{>3LY&`YNY6Bk#PyxWLMwaF)w2K{_pOc0I2boiQwyOib$X+qcU zs$v~K=(;i_>a`OC0QCyO@Cl)KPM4A<;t#4(wDXLXGAJF|QaYoJP{g10SK{J8&*5n# zP@sA-Bh<)mqyHR~pEObS2v7fIf(i3wuj(}V9IpX7we5&~Ct;Xt;AODEUjibOD#p5T z1KcGKoj()GyPbz?C4JhZUy_3TxQxC`7cP#Gp|gR}*QI2=x|gdH9d9Z_U2L}w;!sZ* zng=aQN5M__((4;mcam6O@U7FBE1xxXcq!!|9u275NksU$}R%V9R z(AL=vEH_xep7hvSIFrUT^IF^Vl>x#sN^64>lrgu#WiGY11X3irj&hy}u2^{)@?@SK zn-lVUU93W25*!rS6Xyw3bj?G-b;5KSsA4HdNE}DRKQ;o91W#>XBt7&kdxO>4>0Mu}AhvTG)H*~yJ3n?l|EV2YWsS%H#nuDd(lml)e z8H&L2WGKQ{7vo9piUD7-d5Yc2?Qr&=mx5yZX_&~{Z>)3Jh-8adU1Xb_6MZY&q6?zt z4#3F%OXtxy-EJ@R3=b%XogFAm| z$6>w-=vc21^m>HQ?PgM%<)jVw*h2_!tyAHj;D^gPJ@634QymII_m-_AR4#d9pWe8Z z5Va0r+DBjQRc0S349kgRqv2}&;|8nbaI;CCcCFO*8&09P9f8@#iW0Pcxq{lLzC5ad zQ^yoN2UT#*9md~i+=8vuspK0w4EORv8pAlZ(%i}WfSS5{7xDbmi_QZHipfPb_+oLS z^u0~)LDDp8iZV&zQC}xW8^d%i5dh7XAgd^H)EVd_q0;2{d{vD^So@}K`F3TF78!Ja zI>dI~KeR?~nBb+mw8h{N1+8*e2r}<#RqAd##WOZC^S(2^m;SJ17?nuaVCn{CY&DDu z&eGEj1i2RfEHv;sb{}i%hJE?6Z29*mXs7*^(6OJ;hhnjf%D(Qksj;{2kw2!hYdqyD z{xTGif|WAfXz?~QsKvThae)RRJ10KW^4uuoGZO|*3*@)fFBztpDe@vyji;L1JLqQ5 zkc4K{JB+3MrN@Q|Y89IDIBLpF56}B)x$;W$hsZ5Y^46^nQobDg@lsGojyre(BTq&o z1*zqz%@>AN5I*Xq#W}?WGJ{kH>ysn*M%Zo$js3Ld=XZkj(ENG$)TitJj#N|u3=jcW z$^JFZ%C6hm|9%;3u|oa1C;NpKb^>s#TY{qT=U+e44<{{6{Xdd~kxbnXt zmCr8}>G%BU5?@;2H6_2YcLUL~`Z~{Nt!Mu#OcSn>ZWO(HjD>%AV78|ONM*FTVl)2# z9>@O+I)KWqPMP3;T%DYje|DCMh5xMSqoGzH_LXSF2(lD4NW?-6qlW2619Gfip_sf#vt!EP&;Pqb0W4Qqyc zs`c74>><(Ly0Q#UF^4H!(=j(VOOL)DJ&WD=+!c9d;}E5lfJ-woSzPkvSQ{?1*>RFS zjud$5S3jkm!oQ@(!}Zb`fztSK%n^mz!pf{sC0x4Oj{Rn1B}ePn7+5B3vqzCsFzdU2 zh2@Ju*N12ATBv*Y6T9rI^hV{}d?w7s1!2kIM&Wy@&l#{?`zs6FN(uCJtE7hI24yB$ z613i-kjaT^`fB5#jF@x%5Vzs=C74qJWA(N@*d5YWleLYDUafm<{f{w_dZ-1hlJZB} zBKKZ0?r%BBMP2^Va3NXn=8VeWlV2#US&wQHUnSd3-55;J3Jklvbt87;`yZs~n;)w# zTW#*miJZD4=`f#ZUU+$2aAh&YXaD`>#qF0Kob$(638Bx9A7}}ENziIlEPjmhv~^I# z-Ps#MX_24n3NwDFoMgwk@h@N?S<-pIEd(}zeGs<8rNfaJo&X1853^MCB zsktjQ3q`tqVzwyaQ~TmZw`X-P-m1CmK==E9lh&D-Rxkh?H(h$Y+?q}j;W<24>##Se zcO1*1o+{Kz!#fu zJw8(WU^fY?$iski84VrD+H@J7+(ql)Kq`Bl%iDYfLN&oMVG~pss{_RdePlcxOR>@5 z^yeKxZuOLv3SuB&;@(YZ;w`2OHh3v2T7kfxxP$|F4z}1qy|OKV`kQG`{t;c4P`i8! z+LOTEqktTONvWJ0s=?W6G(gtbX_Ie|%djui@UR;#$TXPbv!#K50qgf%)AbP0bj87- zGhS>-fYvzt3TlWXGaVb`a=>P0_*O>3fIhm|=73^x^%+_$bTsqz4cu%cep zjswJGP#JCrY}YUxC_6?yLkaezKnY`_RmbZ%iaSK@+7hsG%(7dInhVH4i<52u&H+O1N*c$D49*yrpaKjhGwc|`N94$My6G`3#*kCVL%Yhyo zCxYtlC)Dd>`mlMjgp$74he{a6d(byRq4jb}>!?YhqA{gQ4P9o0VP)SEZAUXbt_ctt zg_WhEOl_o@;|#&6<=Sp-IXzTNQXFbQ<3pdQi!s9=(e1kft;x_+yS* zqb2@4f5onC-{4wLa|a(_lQ10uYHB0ue8<8SLv^N_V45|Bu;j+p#<`{pph1*t1_Ue3 zH>E=chBFe!Lz=M~9WFU+c1WYa`ZQ*Gm_G6EX(hqh_b9dJ;n?C{4np6rEz8T#rd zKxqAjia8iOqHE&S2~E7&kGzb+-=bU`17dPjfMDD#Q#0e|rG7pGfIneEfE*&)6G}l_ z8Vm8VJT$-u>$rOfEl2$_x|}0GaW!9(e9sK4-f9A5CrRQqCv;#cNyq6XtVltk5O3I| z`kCMr4-vCB7wwMc4K%bLCk1f@oz_;fYu@R|Vwg?tF5@-` zaJ-&QBx${~mmmA6_K6-M=aN9yao!!Z4!2qB(82+Si}2uTv^J zdmhR2PqNbo+5|g53&3vOqd=FNmDd`4$d{y`a=?)d*Oz~nyB$GLN4pCfj*Ee)3<#Dx zU~~7{EXv<*7}2_2XnSr5cW-v82k`VQeLaAssM4Dd_3;KCF8L89?UA}%4`*%3Cx_fE z_)7?(`t%Q%WTNx&4YC_o$7gl~Sp&qA)+{9Fs6DVE)9|xQPj&svw}V7HOF+mX}2?1)_aETQ}v zrRbQ!na@KTyKZ~}@~uc&;BB8$Y#S8z@A#^{qW$0Jx8yo?SE2f3!})-upm&8Joyx`S{<-o<)ABpYO_{wig8$WylXr- z+#%VpM0-xMee%S?6e9dHOEHoKM+oGHHbHL^(0e^KW(cSq^>R}fC4>kzFH!WRAX6j4 z7Fb%h8R#?yu!9KaV$hXD_^x{3el~DXJvE>Jwth3zc5EN1k(0(iOBNKrilJCOYM2O% z`NC7^WRSRoVrAptU5&&GX)>SiiU ziBX;+_FVni0SSX!qU0-reXjR-wWu&=l-cz4fKgIz?a=;CRx&*uNXMXMapx%jJuU>f zVNeqBM)%BvNm)rV7?dX)(8^YtCZZn@9Y|~?n{NuA2?%I?Sl<9@mcOx$qRYC83M|3Y z@$+UeO8s`0KbKI`M8w7z_;XX#GYU%9jWmHlO-SNzkl-7P@?4Cd?TuLx9}!TBT&z*Z z1krOAf{>L*%~6o&D=;P$#h@386D*|lOVm;Vc#4P+s|D)34tp*gCU5wPil%@%g_}A> zg|hF^ny;#|%DPH6f+tQiL!xgSbdwShEcd(}?41Fkt+N8=VsLKPz{%sBM4`K#Iq_6j z^1-AS=vD?~%{+F~5Hy{-ZU=ylWJ8+6Z900xOo$F9#8dXfQ&9!Qp%a*g84kO8F%Ruv z+U}a>5@^_Uo6jtywm2cw`;%qv;gp#9l_PpR7cZsruJ!%zYEx)RVusf zJm+nHCPTPDp`KraY0N66Yt{sL#KYMwTZhWpI?8UuDdXzpWFf*`^<}yR7G`1SMTR#f zr6L-%>Zv9URHPl0UM?L1TpLeW7C362I72F}%=EXV_}R4%RfaaKb}S?MY`egWA#Tpm z0H3%Jv{Ehb%>_2q3IzIJUqg(Hy(t2+YDI*(jJ@HOc8i4ua9wUh}oLuDrXOlw8I;EVYlW*(yN1`n%Ws?yZ>* z>F>I68dTwft#S{RKNBU-g{s2ilUT1f0dlPOdgO^!Oi3!cXn3I--heyg>I} zr_#ZQ{5>L!zj+X&WfOB|;#`Tw0%Px~PG zWZH_|@h@1fvzi2fuTbIpqpVHp7W(q>a>n>1%%lX(T_}D&=%Rdb=|0{F4kThQRtO=ecXybnhhP z(YeBPS&2tl-&TB^zMGFN+Mg2P$fq8Hrgl6%v|YhM4D^xx7|WB|1nZ)Oj%w(pA@lev zq^Sql*7vjZ&e|7F>el|ILT``9b%G}XBv8WDW6o@y${4x@)@jO!Dq$^&=4-k3q5xF41$9r+8tW(+3`2%Z+>YnHxSvvH=aM74R& zVDeMy$ni48U+HY|;V5INEUHMVz{q2QvS0LOqsFc?Xw+ld5%&jhS`ej0gu42Ow1W-=IK zj7@S|x#e~DmJVfXifNFYeXkUu-Ta(s)}O9oDx+Tn`ngNt7u<%ukv>hf;2lYD0yGAx zeMX>D++%3s*zNaDSfPp%Al)lXP)U_xI=8yYuy6J9tutaSNCLB6%C7-FJ#9#=fK95O z^~LCm)knn?@P57gE43B)p}=hN82|a^E(m@W(TUt3;n_EWbW@UAU4Bz@lDRbPg?e}2 z%io~%0kBRn6RLl1Vq?6}K+B#>QF`BtiftajKWcoXvgM8c$v*okTUBqnQ9a(>JI{A& z2eNg}<)D%?M&zL4&+7UTuhs31_*no7x-a3y96*AcKK20P2}}p)d*H;-Np6ShhI(EE zPF5Wxf`auYg5njy*l&V%e4iiNug5?ECgT)}&Sy+et@tUnr^{qF#~?iM?mUYQ+AQIP zkOYW%raK5X(E7G-@d!#Bz3;r!zZjLQZZFH#zIVSLe)`D*B8lr^1wQ1dX(*RVF1ULVjS*Ufj z0y{A0lqBy9(29u@;7=MZy0Gf^y>po$_GS`0$8_e3+RYYi@RDMFmu-!0@8VgWWg?s};F?5_Tt#b6t zcHn-x^3gSJT7(t(_T0VrIyO9aPtWA$wvW)U<6SC;k;0<@q4IMijck1A!OO+XY?Msh zA0zOBaEbPhZ3*D07LOF>wAY-<5;0t2T)GC@&T&{uMJ=T zl+42P!Ai8=-ZK4LT!B{lmMK5_dd2J~Xjj{hgP6lv&azYb;N;1^?Ddv_fgtiDAi@_n z*@2xhnBZ~=Gaa;s z9lt58uo3UuJiIkFyVh!dlB`8A8~;OAI$fmLncHd9c$!8RwP6zp=`XM$`B55{=jNK) z+4H{w;)9y;uLV(!={hL*pcwxoDJ^P-!S>1-fee>;s`uCOF5cbRhc8Mc=SWm1hIW3N z7q33wc7>7_yi=w0K^x9l`rV1I2bM`WQi~i`c5-0ybk4zfm#bHkqn!yWrUKiv5qIBR z*tz0G)~oU1Z_kdN>I%a>v?oJ)QFO4yU3pb@lz;qP}4 zir~X$i>ad&`S00DzlyVd-U6G{Z@U}=@s+Ft$AInGj!DQp#gENU`@oMg7hn zj?^TsqpZqyeAOoT0;(@&FXVF>D+fW5{|Yi^P){bM$QYkJ-#MFmz_ctofy-S@2c^_7 zuq?<{%ARy4rPvE2U}o2btNIexZEgXTC%HPZ{j#gGng@<=8p$d>5wO3_rRh4;z=8Q9 zPVr-bX6bb%yuj&(DfWg<-enTzKpUo&nP=`tLd4`%+o|9Htrz3DSHY?zL!K`cFRDB~ zGJbq|d|zwa8XZoSY8E2l8`_;>>c(-3rscb|9gWJhEoD-^oXp(%FXP8P zA2`gw@T52O;*0+^8n1t`+%}u_~iLSZ!NcHcygs?=hI@@y@P|)lk}oaKx087| z2}l6bW3$f}WUbs+vK0}|rc^MyCn&n4RqNCQJn72MwSG7CM9zYm9Ahe0dse~hwP@v+ zddjIEzF=wXiOH(FsG>4JpsljyaGnhMk=r9KN7Hp1uYb7|tw)h}a*W?}-V&nMFm>mP zLk7?IPG~y^5?xz{bTV(h^d71BPBi`CO9ZPG8E5r8t|+yInpwTjv`@N-hlOljUJi0R)r(^2JIkC zQqk$9;f%0{e$cEBc^KHawRDck*4iCQ0cYM?-iDZLeB$Px-gzGb*KK@adKo8Xt+RW= z9=3p+ApD^fH23u`?;9O^_Dy+wetLIC(dg>NipE1P#^JZeud(Okyl1|A9W;5^-0QyV zpYrw}8b|QS5Q!%yKXGZeLi%jQ%_E1&Zy%;#En4u2LneW=-r)o^G9T%XMb zvva@-BoSDfoxW?i96waA4U+P!;J_$r=Qf)aV5mO{j-$kZw*sJ3B`&&PYz(}N0y;X!VITQW#@0}moVE4{&OulRtJjFxjyI@44y{|$?3!F0Rv>GBrvb4g zILkK}Guv9d8Z`o&Ne|08;y*%}yz#03eJ}rY9FtD=xce6zkM)}R#QXrUO9X0VVr@swD*y3R>SeI zR8Mp9klVmRNojN{*vND?qwO69v*mdkj%30P$_NWjlX-2UKnVK6UZqIpaTQ>;#GW6b zBq*Vs852m6^g)Wb0)p+d=hHs~4$~7l=Y-D~{$fa)h_2QLvbOte0Cr?%7>@y`d*2Nq zf03zM^abnpzo}E05kUw`l_ysa+%Sn6?_KxtpC%5tVWyi+QQFjFF*oOs#l#8MF(M*{ zOT*+5XwV$WvuyA%0`}r>j2_$DD?ntfgUaq=Nx1<<2vM8MOE$u@Ke*EBMZ0?AupeJP zbgz^N(O-~caxgDyyw=wQ*xMiqs-6kblE8!|RnFN(yLfpg5Me(nohlYX*fmtBj6V1n zZak-}WHv4nS*|wBm0_QNoxQO6`&u!7IgGqX)D~nX$w2%cSpAZaZ!e8{hL0d~AX$36 z&CVhC&oa!r^R672Hd!2Z=6 z{eW`do6L{7AEexjifM;nalunZN6PMn^{DH#WWqp!rcgNH2^Iuo9K5W>TVWx%`{5q6 zO`n*ZM1D@t4WvXq$I6I;!7dUd+a;wIxm-?r@QnGGI5@QJqtZxQOY}qVW>MI$m9<+w zC&8WG%<>Ef>{d084C42Mv}-p(x(bKkk3=@X$?R5Xmsi9_s0>$Iq;9_VC9l)e=cil$ zzGx7FJ?tvN<{dpl(H`}pY>~Cip~XG{3Zm)PIY2&@E{58sdX!O||^Zs77eY&OEx8f?x zJdxbQj5|Fthqulc^%xjYuG5ftqjv9EJ$V1X)AD|sDufIQ0^rv(O8{Ux%hSTJUq8*d z*-@mBlhEG~GPjrjt@?*l^hAmho;%KGZ*Q-V-Dht>fcj}JKMccdf*VcCdO+E3I=&R4 zT?}jXPw@JTOHze_IAfa>rpANJUcGS6 z%qFZz^;<7Ze?CK^f_WUJwen#6op)wtUZ3A@{+6JJWwx18uz3lR%XmpFVAJ5ob`Vx5 zbh#}bQc~tv=Z50i*_|o~N{sUwJ^buwN_xI1D>DD^vF<+HW1;1A>!q_(IH3n=1&6zx zA<-?D{#cRKz}`b{J6xIJS+hmLOND@SJiUZSksjh9@|+|~+Ic*>$45KcUWc(7L|&oC z4ymlb<}n{i$uBYNr!M08a1*vM8Flx)^nRXa6Vz;5%n+~)ejk$Qd2Dw+_e}uiL|7Y0 zL8MqGlAiz+z;vLEuas$!5Gk98$v)!kdDv+ziQfsDqR9L2S-;W54m=Um2lpGZ_icyluM2YOfXS=K+G2pjcU>Ubd zB9cQg5|qZwCMfbCjQ3T*$iNIs#^@jnX;t;{JHVzD96>YVNnGyqQ?PyLH9o#(kfdq#s7v)tw zfjR=(qB?`9c?=Q*S>^74ARwk>J3AEABv90>x;C<&j~PE!TUg{7Ohbr@6`gp!q=V0w zY{4YNXH0zE#-vEmj$xtmG)Bo&HXy=+*9HKWh-l)CL!J}fJ$h4$Uif7^L9YaE5!6GVA z;-|oZ&N3)B9bqu>F1M{@q1l;s@4&YS3XuZ%$pFkk*4b0s&^*~f2MgL*6}77bN1Fx3 zQRE&7&nF5gKVP2@s6T!w8Tv;*qW}RC)D-)VR+a88KldFvKtOrvos~s)UXakk63{Ra zt#}NvAVvf*k~Y6C<3f3;OK^J@q{m7AE9PKdQ;AL>kj_W#5FrXbLWcqLEC$tPyC+Bv zUBUNoXDi8A?2w3vr68w?yZREqZ2($ALA@LX9x!AjR^WE=jwgK7FJpf};{7HUP)`J?UlExnt5I40HP!uyJXxJDv1<3^ZrMsu_%|znjhGex znVvcHhKQq7Cphmu4G&Eesa5+_ML35AP*5`jbcKM#l~m1$5m%hxQ-q?rczC1y#lme~ z6^!Z`_DtBkOndl{bAyba#{7nD}rwcm6r;jes1$8>&`e{$;`Qs zbMeBOc@XReDl@{aLd8|k>g>ycFv1&5>cMJ{i9{EiAOQMO3SJ3OIg4XK<%+InR@M{$ z!eHycz9OPI0Yi;su6RV79l5TviY8;zYp;WKQyR76NxrP>-q*n0Bhgtm!NFQLS+flr z->R*(hfO^9sffGYynrTNCL5oESv+>wy%uA<2we`n0oJ=7bR470xOMn)z*fQbsIY5V znm2>DuiaMA6dhvb&e|KCWC3LZ+^NU--rJ zt$q->-txrp`1zx6|LoN-sij05|7ru+#jF&%Okr27udck5ZAmX&T$^B zu*w*;VYuh`6R+TSmi^2A_3bIh_y|fWJrvoDh zP>v6?^D;o|UT!5ttwOis0!rJ~EVt>jv{K0Jbh3+KPYhR*z*8~D#`@c%jBE2HfHCUouanWJSL)ja2RvU}*foDu`J6VJV&4^#5H z6|14ODRN7pZ-u{x#@Dttm!dBCs0KPj4mQSJFy@+LrXL{osQj>RvUhuN$=5Yd$zl~G zyM6^`?G8-K%6E657p=5^e$zGGSh}QK86Gp}iiKF;{?)lv41Z|!IF_Nfec$+lJ+{pb z13z@Z8NLSEWw@2sDM$tRRXdx8&Y#49l~!M0$Fl6c{Yc&8pjV4?OEqKhH^hb9TY9GY$JmFKiiUK( zP9_^LRNpjH-?+?9m#I0E%}D|UinMW4%8gWnd6 zQZ}hoj+nXbYrjMhX|^e^0eFimbmON9mrRVBaLtNW0LiGYRt~!EtP6AaMSxWrSB4mHdCXSiKyeqakiqY+C z{z)9mvaIGKu?J3W3f5_AzbGmQc|m;!-*kbKxTodgmc$)(_6N7PnP;2~JiK}=aM#Lr zNc&gX@rO0Tf~)EllRX0H^KWf9&YL3qID7wphHU=WXg7iXao@6C{#pD_8*NfO0j4A+ zDJ~NkvS1C?XtQ0QAB3Kz3ilTP|FL#0@5B-h*yy(+u$XNb{|Gky&yS@Twz}J}q_2gV z2X=~(G_hJJN@||J!wSRnJdfGs|FO~T8MwF>KGu&d6t^|}S-W0)%c`fw;ZfFMpOfKk zN-NM2)DpEJ6MmH!^yQ=eD3+Jl-I22)hTS)Kl=+qY(5xKsW*q*4(@SsMGBCK$Xm|4CG__St2%ko2R^&*@vF zOPB1n!QGi|AUlBB`Gy$J& zlpJ%?+h!C|Xo}EO|9Bc9Sgb(cv{#{F_&?ts;yBO*I>k|ZG}n)I@mTbjh0Y|m$9N@+wF!8 z7Vj{6-1<@16FTv8RBPdqFA@>*aqiiqTEZ(9^1_+f9^={NXSY4}WE4J?-%HTi#0BGb zAbpNj+QlxHZoKv)cz)}gFP?}{eo^%cRL=rv`NyUgu z<+^qC1b9HPqtZEOd<^nkz{VzJk!43FxePe4v9(q0rN~j|OZG}Ssdd?%4c5xgij=B} z*}4fGAlonAOjhAv`f_|DT<|m4MES{Iqr?o8alOgdvTT@F)kf_lwuWmxusV1|qcboa z0#@ljTSVkWWmI^->GC+ujnS6|(zJdLb^PYqFy+4I_}T|d5~ z;qf?o7-7Oa-~=XUGPbyKZMLxmV%@$g!I;t#h&R{=lxXL6df!B=k!*nE1w#yFi|7U{ z3MU-R1>efSrD`mc4{o}ioB7+PF zz$#&w{Igt{Bk&q3a~y~ht#fLQNJDF?puz7i?aHTj|F=*(aG=wx>uMUSvBW#wS<-|5eu! zLl1WX5W?2YH?Ljrg7Sc%@8sSplI2;lRm<}Neuljx22Sp?$q!mL51sL<(lP+#Q)))= z5<)s;4d5Fild*kSl%x?oUQ$hkpmB&|^YI^cVUkl8x$7D)g&hj&Y_cZ51fe(8+UV6f zwK~72@)DXy5Qq{8M=4i~rU%RHbk+1V47Oh_?G!}QgWLeh50lvz*nGINNH3q&&jyN; zJY7a-GcP#X#f|qBiZy zvn06ln*yce8hzQ?rB`R{0wLcFBSy^z!ks2V6f6z4KAP$R|9$@p?!#9A)upe$M1WJN zUBiM-n=5PC=un%xl@*;viV}`W(hq2;h0thpHUrEo*V)m9OnKqL-_FnnP>fC+dHz4u zZ#+Vfoy-RTvQd^xkS~DGXe$UL+h>v8hC`Yw5GZDr%e2PRqwEwtEYzKxBRe=nX@NRC zZZ_=p@4@YoEne)L1tZ?U05VnQ?)|nu#%gLm4UHgyU}Hdjh|~ao3ju)JsfytKA1`m8 zYs;kF1}NGiP(UM-ykGQXk3sODG_aE>yK5)drR!n29mE_=zR7@_HdM-nIiHGyCy_Zo zri=e1V#1vqfnETg;%05DmQI)$1X1xXh za;rq*a^%{X?ff<~lM)C@u3_KqhA~JQ;h|~Ytm3cxG;IN68eNUaXEKMcD16wWYy6L^XB-1Z9O{2;hh&=t8o5*&)2jeD0 zhAGc&7krbequlQPe`tI2uoxS^|NES+nYMFgYTCDHUop`#DoiyklA5WcD5ga#nHDOd znbv7hQ$n(hHc6NgvQLVVXd)zCOp6rKOeJw)o-^0IeDB|V-1qSu$Mbvs)*mx+9?s8u zdA)U{Nq)T@FG7L+pQE?$M|1Uc*KF^Sn!&k>AMPQ3jG6%klaithO>FYnoF8@PPTx>& z?f9rY2`A1IIm@j~)kXy#P(m+$K42<$pAueCB39@WB|hp$_3d_ zuo@EfHAx{}CN0YzADlw#6K;XRl=d<#DHqr$PLpLE&`a}8d$9ZWF}KM)Uu`D*lqvSL zID3#Eze0V7qbJ_vrHq7T40*THTo9VT9u&%Hi2w*1%|4oH^d2)yVJ__XGfBwTE@5Qo zK?AWO!V`Xmx@yurCPK_d^m{A~P{Lm(eg6vM@uTLU8BRft4<`4b2)LwnaO{}ku0_bp zIk37Pqhp8Flcp*Ux@8`p10DCkK^7p-1?DyZ$uV>?Mb3w!G?|NmXmm>%Ux*-w17aJt z)1!a{PdOwJNz4O$N!X@9{~3ln3;<4>W@US*>%UTX^kEC0D)-@!;uCSb=uS{ZaAbPD z8lAw+2Dv~z2Lp|ZKKc+V%gFc3qlJo+@V15gs9oJ(kvO18Op$YYx$>6cyu?AdDZ%D6nRMt9aDA|buzg1sBQ?BArL%wxb4uHw)8FnQ6=R3_S&PxD_u&hWuOA^3%c zl-eEHv_3sx?*T~hiFlRXMn=1QP4JMF6O+(LRwOkI7fFI-J?t?O+PFY*N{EUVf;VV@ zdqKq<-=DS~rWUhh1Oig3=ZT^+q(rd59QrVvc#h&d9GB?{q8PiqRlq4C1Ak4%0~dBR zx3cgxn2`qmM-uzWM57oSD*#*I3HFe&sWjv@*6~f7fT0PbhuR*WuJZjgV1*!djV)Pi1*2; zaWOPeEa!PO50Y9qB4|YT%;I{`coAE}+&;r&e!+*$a6wBE5CP?6DelQaFmpAoi^QHL zMK)5P1=?+)QMyuA%Cv@}J#!y|fa7o`s6rRX~3^xeJN3 zE|%}?_3~#^WK$ef77v5uk*g?3qJ%%oMNIU^#w8+xc}E8bFfz1Lyu%BPhgnL+NW$3- zr?25wH=m<@C*Y3k3GQr2yP31E=iI=2fF*Ckfl3s?9&bs}GUcCZtU3?UFke2Lz-)Y3zd=v16}<;uBG;gRKYvd1{G*X`y{a_})~KTmO}@N3r4aziIM?00 zpe@*1=B4BzMA?!;5NE-Aet?C>sYE$D#R2uBKU`Pjgl&0v$vaYOWs1x6&{p)>1|_;% z5=!314qGESCDBy&O}V_OQ}g4N%W@;3>PNN3n;YKt;QB3;zBb|yO86#c8wQLt#cG=A z_zQ}E^Y_h#J{kdJRNSU^(8so1+8^OEo#m1ou+r+5i${yfT+BBT8-B!FWZQ`bDOTj{q?aN@rUwOm;QqBy7@mSZ~q*pOOfRz%G>qJ z@9zCWdEsJRcBquniPIK_ph z4oeL<(tAG7+1Z$Pjs`)@(*`KyRKLB=itR+ykEv!43hA$Nv%i7Hz|cej!P;Au@Y6(y z#y7G1tigfD6^-bEvSwmsx%`q2P5-b!4G=+E(| z!0>6P9#VhzVYixm&B8YaW!;!>dChmaCZpvEUO*keN}#i!{Bw4L<4El{UD(kiDI`gs zG?{OQ9~zkJwI<88QZBZoe+1Itk^1L>`h()9W&o~m2#yrn*EBYjsKYdf&s@FE`~{;t zas&kqxYoS#MJ&8O>$>3bj3hM{hE#C;0Chb7_C3|IS5l(Oq8W7Vu_YRG>$r(b;_=5Y zD<|aS177EvBoC;vfq20xR0V;1)`K4mJUh`8+$-rc*4OtVjN*)*5DR2Phk zoKp=Z%Tp1ehV^LkynFOf$?69>_CR=xWPWz_DlgdfZB@C%du`HejKLkHo}E{(i8~!W4b;fNJ@xvZI*#8y zh=h0CHUu5Mvfw+Ashx*W6&TveQsv2lY{fc;DzDIKwAR0L+1pcVZ#r=xtcJrUj&7ee zvPr*BbKA{TaxhaeshIr0jr+i{=GF6_{r79KA@C!S+OEl=Uhy%k7}gh(W-0kjC3q8U z?wAf7iGmFYOZEtA*q_Mv*9n_T5D#k<@o^8GkQ`9os5@iQ@@eeL#Ot>Y@PK!5Xs4GW zB{8H1m04VGx{q=PbwY5sQ2v%(`_^p zDC^K!$}}^biwy=49HZ30NTeD{1+3rFkwaI@PBH2r4Y2iU?LF>4}o6i$)j}) z38>Q8q0`|h(45fcz=y+h_hAHP;}RsY6|QTK>((4?mT`@X(HSHe8Q^I-|L$^6Vjkg? zB6Vs7CoXF93ibw-Is0%8fdrt`9Y>UZQV9oMQc71-MLJo+4vpu|g@BErjx3bAOz1bs zAqs-Md(W+%9+lo$E-qhZ)OjM6ALND{&eHzy4B=ErV?}6R1`efl$%1tc-)+ReZp3)N z-Sz2WRsqoH*>z*@>s(~cn3^nwMPz<2-5XO4fQz|gC5p_peUHjaV@#a??M+#n28Z4K z3PU^T>%?=o&Ix;pQg(`vrW6YG@vCPWkRtWhB;SqqcrVhp`mhIy=H5H@nQ4==2(=Kv zN?iXui{f83s1fJrS>evNHK@MM>ccO0E)48+#evK&h(=YC$;~DJvXM zXBmPkF3>@%dKf)p)p~NPLz&k zm25!8Iuw2!DnA|IZ@DbUky6IObO#&}d&Wi29DVM2ke5+XnNQ~Em+-)=HQHo$5_I|p z5mfCPrzd4Y_PtMnggJUGf~zqkG{E&>^|WPxxfIV* z{%OnKM{Ek4S;)~c;sM*Y!gvWZ5;mCv@W}2H-rk?K*wBW^9>>W1cZ{kVq5zP4T+ICokZtPR@jo~Ptsqu-a- zR-`q8O=)rEw#jB%~Ic`3JDb+6GrGJn&vYdQ=yTl_vaiX=rUQ~=%2 z@qjL9(3#56$#&aLcGm}zI;DBI+8!okK{(qF3!xR)ZXz$b#} zu9n^RerM-i+A4^srd=gETjM?-$s~mD3JnOBhx+j2{mCTcLs7?a)Z0`s7eU}@RJhIGSl zmfVQW8jdma4(mp;IwL_gqgezvOL>ouF9~HzVdF)}If~o%`!J9q1Dz87mG)w_yzoJ# zv{ji1iB3$VZi|c8GzrV21;@qcEwI6U;Xyr=d=^zpX;kHvzIIQN{R2G5lcSxbtd(z& z&zZ!1Z`Yq>Yi39%=|~scur()x9M5j!WF6L%aZb{a(e;d3ezaRBvB8`*8|6A4q~{8= zXDRmMyy*6MeDxwdy6Kl@nF=9hJ5HM;M#@#L)rlZ=jBv8JC~u`=MM)t+V%?1?;O(EJn}~c5OI&?uM4iLqlYxR!=-gGAqqBkfn$0GO+w|AGc_;g1{(vGg$Ja`w)Vz zseabvF@{0kY|s@{I%9TdX0WtXM(1~>YLz+{dE6HW<}#Y$k=a}YsJ*J8tla{iC1M-v zOR?!?>c`8jIm2ve*agZe)$?TrtIO3N>FWezRo0hV+$gs)$DMp%W+fss_sT%w2#z?q0SimU{vSQ%Uqbo|K4 zxz)!gvYm5SGJx@6t&|#&EmX6ekl?6uLFMW1$B#I} zN|*}UNd}xT+Zry|C)jZt8EAwAY_7vWAUWstabEg~w)N`Td|($54k=Hn-X8mIQcR!#~%?u|gbVJs64H0(I}=zaL1JWSg?D$qN1GQs@R z$2^E!@$S0 zB+VKxXW6~a%}Ub>MlrgUqG~ORnzcV_NNj}V{nATO_y+H*0OJxj1~8a_k9j zhq?d@6$d%<)5QOaJ57TOH*Ek9-89Wmp!{c77_VJHL%|{>M#x6y zVx+SD67E^IjWY6by?f(a&_!}+de4!aj7}_ZSzx4R-s7$cgsmAta(T45l4RQ*62vt2 zW;>t@zn{zEJ!BipMENz$P4;Dm{D!SkAKm`EV&&gI<-mCiGJytWBZmcG0(7+feS|8@zWRGpC7ZRBnD@2N@(kzYWfr0AJeSbi1xDg+ zkfV5|-3q1Ws2^m)`S82}J9Z%B!s~V6(rb06H#&ZP+=bddzZhM2ZRVY;t@OEIq?&>^ zU|DWI=-)vf>8;st?okWxX_s1k{5zj#bsL6Ep zf|byOCSs|RC5<**5#W>aJ~Mh{Mg96MaYoOo(Cyz%P__dReTv~0ZWCqL^n7EqUIx*W zdQk32S*o$@kckU{EX48bVEdwwaqNJ}nyi-BJJx*7IgUpWXphdh;>kF6IU-TSLS(OP z{dnGamozf2-cX;T_?)x7Rw`b+*&E=C$faEMs{?QF52s#v3&nj*726UUu;v{Nwc5 z5A|CUZhf~HzwDz&d**G^XfzLl^FBYvz@#@TX3zvD7^m7?rMT?D3fYg6>)H`bk~``U zMSIPN*>O7WXQ30R-L2%$O{^c~5^@RfS~W;hV&7Y}KYo4#T5I6Sspm;#Fyz8Qz!3S-GGOBm3$eGgvH15Hhz7px>#AbV@Lw5r}=G_Tquhj3`y4!TsT6N*)z* ztJI4I6O(gl}eDa>FDPHI!2 zgew9{^a{Q*8TT-Ir@+)0D&9(wI==f+iMhKB?a%1Sbc6zp?Dz<7dPvTg0yW?lo@W+d#QGJ$xUIr>bo zd!&Tfi6F=awL9)?Zd@&l0FeL{a<-%;dmz?zBnRzqUsAG1B+~0mF|jds23A7vpre&t z)?8a0dNNsOIwl|ShQ*0#s=-%~s-1OZI9V?dI6;cQ`mH{gB1912L4P@jn!!}6>#jzw zhuvHJA8iflhV)!<=?V6lM8I^9+Q#Abde013Oj#}LEJevN0$Xy%J6-d=^_QjV1po0 z1TLME+E83u`X{gLf=o!1g^?*boo%Wx;S?VZ0T=aXI!*}`U~BQR7UWM4hXg!;If|0y_DT3a-aZnJkl`vLR)vKz0^@ese%+!9_r~LpbKdfB}Ay{wk6-7~Cvz3!o6u zD!Yvt_xSy?#O5AY{E|Gt2wZ~{bV!lS9P=`HG*Z;bP7-Jj?Yo$RtI?TZc{sd<6}2n< zW2E`ekPy0DX`IY<)T9G@Xs>Q|cmmG|e`C`W?l;fIuSX_KG-&pQNm1Iu&u>b~6bDZG zA4kBBysmH9@^XtmR$Q4CaQRzKN)rrbibI;amz6|k>}^Z?tqq@j$#HKD*Nr!01z&st z1`$LuUlwyUu9Sgji0704)XGeb&(5s81}ry+go!1K3TEFc;)DBWAKWT%K3Kf3wZ!O0o73ge&}2`qbQ2nKV{lv-@Hi6fAIx}#ryFTYXCPJ z^)RLC zUcy+XUA+J5w*)zAXT_qEjv*0GBMYs3;0Mum_t}S{oJO`x6La^z;)W$YMX zacQlvtDl0dUXS@bmucgoJzbd}pHlb9y{SiaQhczfy>CdX=!eI|eLMoo* zaLIlc7G@sm^f1FKQ4wMQdr=f5WhqW^|9E5E&{-0?7@EqAw-D?!8u;EJl#!5l;?2|WsdhM4GI#@Op(B%g@gjswJ;mayJaa(^JPMD;4CpwB6ijt1WPDD?ZcvJ<`$zpWgsPgqaiN6GD=ID2){=w z`b0u|CnE2nv0bEn8gxV_YqQGgGU(AeM6~4t*xM=b3l9qlP;wH1{|Hk7$HiOn{caCz zlaW+VHf3le!bgbvVGBrSbI_LsgyPCyWr0<=ZKR{%h8yJ`Tt%8JwhHGcBVpYA26nN6 z52=ANW5CfP^l2RS9)od-4IJWu%pk-QZlt{mES0%k-M#V?bHlrpA)`P#zawpTJwE9jI>NO=+F3vi@@v9ShMB}+noA*JtQZf`hWg|d-*IuC9?W`d_E zbcxJfxypRxMMCbK(mDDe9&?Ih9T!}*Gu@j-fq+PdpGuD%t_~;QX-Hq5-Q*nBm$%BX zW69071oA`Yu=b=gvw5fE=2q5h#^G5|g?ZV$tc5A-jAsr4I-JkX!sfqQe%9dKLPW!z zcfzf0d}m=#C0GwG%3hSK*-mhyA-}J(pPwSlrdPiW&wlsp%<5QPgDuJpx7Jdy0`6e1 zqzQxYM0X6%qNp&`kfk0CF`Is7U38K|m94_ia{HX}s$IE$*4P-?ryp&iTDYr;pSlxW3K4G-h zQ0~l`79JdCp($F{XJEQPV(s2;@4>cm+!3MUWw@4e^IYd_z3vaC27_}6Nfwz(yF>iW zpBTdB{J`aYtcCYy>}fN7{VdU|!|F_jRgnSya?h%!p~hYK3t-rVo_1qf1~%2DKC--V zql@mEIb+*&l^Ug{2|A+ks!?~3?u~y9J}+fCCjr`jxxA3JG-Bo1Fd4R{4JBl8#V7^D z{MWAXIHd@ov$n~s%((L?T)Fb!yUKDQ>A10bSOv7ZaQz?jtDB!wTRFV%tklK$6f}zD z!!x8Ogd8pxJ!3y=g0h?o4S{7!zhRAJ{+8F*nPZxY_wn_^x6%?Fa8UV#?uIl~Xw~Ou zIlH7QTh^yPYE-!{^}^lemkq7M8L7qxo_x9+k9IqYi`6VKkvq3{i*5U=w$EtZd2(dH_F**CRqmQv`2Xwjg1XA%T&im9c$w!?SJ~TcYWy&5 zsjD0ixO&{{%j*wsGwhOH>+QiNa^Dwid%1bSN6L$;3knP0V{%NJVOyh&c(huNrYG2I zazgD_HsHx$Gap^dTSgM{4#?!b^7D-$OnAiAle^bVV^X>(yCMgIs?rX*&|p&S2<}PD z`<22iq`)mSOGKEf&|w^NcEHVf?)D~2_d{&=XO*OfbHj?M!%QI!iJ>-9_!yh*PKR!K z7BuRyCXP`Ks(h-tba3yc&koX^Ts-`mLdw*-m*v4UJ85LLC){#istSz@I4R!mDD2{9mW_?EAeZ$jKj#(h`?e7qSDJ=&nnv3;1mzH*J>6_ijl%zRT&`DpD`^ zzj`(q``61}-K%qg{+Uu9xyC0ntnclirdTMPym-~zxjE{?N89cZ*SF`vzy&w?lwEiB z1vLh9OnH~(k6=pR?XJp;VTx^Fi}tL!sgV z+tVq=UrlvWrA^GBaHb0X6a{0|#>*?+AreGN)y;66t99znc3s8`3mtDL8u;Sz85Cz2 zN<9c_WaIgOvpi0CHuKG2o)kuKt}2@wy)2j&6-H-KV3+_q6?k=Qt^n?@_T5izCVs+uqpgB;4aGv!7*8iH>GKRYQcCclT+Zy(~!ZK9E9@> z-ll8a6xEQXdW1gPcj?mkn9nX&_dNjfsSG}gI786a04r!KA4C$k&JQ(tpEZNEs(_@d z%G9rWT@9;x!<11z!`2UsImPl-yh9@4`JE^At7Q!=LvG%OHV){|bYnDBM6}EkHxxHHv942CnX9nRI0~g^C zG+=O&#C=#|<2Sd?esTn9P%*HxrswC|tvHxzXEGXqWt_J*UMqjDi@*wbhPr`~n?+VY z2tgP~-CCtW^ckS$-QO1p!#xKZliAhsoYK z@7=6U*Aj>6pdV_{l@WJ+o{+h7pu;RHOMpLhJsVstNpWToZ(e?W>si?gT!vGYGIN82 z)QXqy<<@L~@e)kWIO`7b zbZ7Q$h61Vxo%#*=IC1`sU+1%FQ|q5Aa%4)kD%{7r2{$@5y>din5Orp7U5?|{`I;Iz z@X_wP$a)f%isL2Cc;-9d63?W@jIMuz@i+we|F_dH;kpX~oJ)fKiEJE;=<9isu)2l3 z)V%FbcektF#M;j5fB`YN4*OOW$E@aQ%q(`{$6**?7|=aLp~##<{pk|daJm#REKYha z8#vbBp{C6%*@^^Uqw4PP+42*Py%CB4`9A3`!);cdq9O@`fuqfN8@OZ0FELcu%P>!> z#q0_U(@4W$^?KM~b%H!+SQ#TO`s&8h_{66>l#~J5tu-1IhoSw0MbHC6zRm^J8OhJH z4FnN5JlQ$NjD${8Xfe8l+~QE>%(CjuCZN*pJDDZ+Mz*w2qF_&y2b#gO*6h@Hrois% zYX#+cgqBKNm$i40lsimx(e)>?EQXu2mS01l!3N`up~M1hJVI;xO;h1ZwxZG9TuM+(HDCm`TUZbB#8BeJ zVc>4sK?2RU*4>#WMLHZah?T`IK@s;VVnhNDgfO=;&IM<>_{`aJwAa~*5`FB66+FSY zqzTGL5VqZXVC|Kx`ty#f3dSrl|Gq>XJt?ER8yeGE3vEecoY!eP1Xxvv(XkqlI?{$C zTa}ftdBiTfk>cezLDq?fYZ5L&^>dq{=4U%`V=@@-RJ`B*{IpJz*6Wy@IQdbQdoRR< zrHxpH)vp34ccP}-qV4$oh1PBi#lfI1r)Tt>$|zT2{AAp`S>niEn^C^uDX+EE>$_AAoQfdT6{x&la;)O z?wx0_x?QgDwal%G$JHM^oAr&*b3F3+qdnYm0!TTqFY8P~^tH*LY^Txh*wI-}DNc-Q zNm9nKCo~&$YRp(RyGi!Hm1=9k`ZvLqTr?%nh{yOYy!n8HjTEfD^LjBCP@=$M9Vqoi zUN1J5it3aJmCmGMcNlX(oczsi#NCi(V_+tY{! zaWYX?FEr01(`j&kd58skd^{D_xkr7P^~B-e+32OEwHM>9WqT&#&^i9PC6l+f|tv$-&y!wjGDmSRW zR|{{s^x+ZNeq0xN`6)%Ejqm?)g(bb__n3DgC*cz^KTF^fTpi!S zO2ZhrEAT{DNFjWZqJ9=f~@Q^#bWs&2~3}y<)I4O z_rM`LMd8Z;2zj`Te0Ec^f<3SUx`n^7!772xJ%NK0+2rCikVV?T8$nvkgI*ZDNg?gV zH%%1N&Bjm3lzY&e;D%=+M|cOjSSqHtylWmRdglmNvh4XIdb^ts&eDgY74T4qq0-C{z2${VM zhgBLU+Vs)Z%Xn@2AaqvplNsfP{X8(<1+Wzc_(w$s;R^iQH9|?w8S z&l%=MjC?~!bkn2CBn4+*LRk{hTH}|hmeI;Q z7M&~aqVR?961gLITC?m*ZSP^~1ZF1||ygsQJHdjhLWh)oqh1*SEN~m)ZVn)Aplyw9HCl`R_O8 zo3eWon=0e}f|g(Z__{~eB%`+Y-LNL41Pe0W3KpA8QA zCtoiQt&2ise*a+~6@^pw-~G$*t7X;bpB3IU4mr+BZdX)Wy_PmO_z`H7#FtuhIUz^EGdLn-O)qk^Z%Y@95m6 z(yO*(*&nMf`>6S#2ct#JT1W4v|LFrirI6Lfqc&}v!>ZKx4**v1O_c84{)$UhpC2ZN zVqHP<&yz0JZp($>SO$rB?`YZ{n^;d8((u~kXi;JFhj;5Lr>Tc!J}b&BS$$@zDDv;@ z2(IQN`FZi$rGt|S%cXPwz_9`XW8eRQV;v3IJN5n;1jm|KLds4YkEvgRV_ln?IKg~q z{^4ZG5*#aS;{1ox84G`qvJf0AM>THpEXSmAvRc_7DaUyq$zp0h$f^|7R_3Xlciyg0 zfhx6&vS^I)RUmw9XsFl3quJ*_HeNVaC51|GtuNy~U2L0Z{B-H|!pBdS1u~YO`JJlq z5K>l`aKTf5`H9b0R+I(A7IoUBlwA=?#KQE6< zxb<%v9Pu-E7AG#uKpPyNX6^xKE3p8nwo}}WF>MmxSFr!gEFaX29Pc2g*(SQEZEmur z2iOuM4~h1VaM$&-ue#^RuK()$use?mGgCU`M{9C?v6KGD<5b~M3JNpdd>M~p#5K&g zP`zBoN4*2oS}tw1=)n`{xFW19Dft;<${KCpWmpGwbIJN#hZAcYeVxB}+ejXP)_>Xh z`(3})Pr>K>n4@&vUZ(oE`{@B^?z@%P$mK6&f9-+yto(5C8-2!zx~KnP5@UHf{OpyB zJqn@Y-J@P3!pn~NzSF{CI#^<~>JT?;kc(Twc3~t?kNNA?xD( z&w_LE$Kn4{0{)M6!xM%&?vao~oKr+JjpOjk;<~cjekbnm3}bW`5PB@$n+e;4jwzy_ z5*FJ(J^w#))HHqOd(Pm*`-idAO6Uwu4NZ>TKaayz6ATGMV= z%2_Wx$53oQ#f+`jgLE~A)oF7Be(xv5aQ_?zYcm|S3o6PH*ul-o{n-KGYT6&C7O*<-*ZG68 zy^6IjGybyucgr5T;D<9)!=xy<=rG?OqP(El3B#;8#-7fdDpc3J{dWk(^2qn$IE$_Z zmyv(B?7Nj$N^2o%m=00w%hAWo=OoMEKhSoUDjK=C1g5N#@%N6irlFO123$F*nLW;S z?Y}+8^>Hfd$SiNEx*zu>Ja$UUBL+u(VMT19WP#EYgC0AC^{lVn`U89*nIRX6c0NKj zp<#rW@eZ3bjLqY!lY5~p9bR6XuzoZ5dnp?pOybIEsvqad5GYdf(PL8YFfQhd(~A-nd zNjhbYaN5_w4GB{o$oCtBHiF+{?<=IZAHK@HA%_EyHj%{!v8V?wTukHdIrszeho-rw zj(6WQQFOfb(#ltGosOI~s}69HU8N9k-;Wo2Jm-1q`vrI+f7*d(&=ySSnrM)#FEa1;$BC4yVw-axm)8z?dwc{Ww9gd)GyyZ@a}02ZQ$!}yXhRertAB9-F=N4^nSS%&Yc0!o|FcF&%$`~;N}Q{ zXqlLeJGmW!P;Zyp!atx~D?-_KJ4<%ZvUL>BV$OeaMwf_l%p$kR`gh+~b=aM|y!fm_ zS@eC4A#q;V1${8JyF>SfIG>_Wjg#hkb{c3(IG*0sO2@i8O&lZzwBl;QInM_cTO@@+ zz16DMyB{q70g+i0YSa_q?rWx)#W4yIh$rNBo16WIG7>#|kKQ7~Q4C91`C+U+yW)?MvFe%s#dwX!)UKei^w>d_Rj0n)0Vr}7aWDwhF9stm|c{1LNRpvAbz$e@B&j~9@=c)BEpb5^Qt6AShK8fUVH1VucaviHLFxM@*HJWqR|6&S^=JtzSB$<|yO!ZjTNh43kDt7;s&N_@(or?4k!;r*q-BhB$_+x+W`{e*( z8uIB4xX`Q)90o;ujpd-X&y*3hcue0a+w}GnlF{Xx2vh6d*1CUdOi|@DIEaKq1#aYk z6yLel#u97c6aJq60o%HCpe6y<4BLHby;jA30iVf?S<@XmmB{(<~eCO5Jvc7-JZ zdMDm2-Sls=6L$kqL^||c0P(cxS7;Or!mf#pcPdjBqF{qBmZy$1%XDn3y!|s|#eO>l4LTEAP02a!F94tPQHvDY`VQne!#Q`?*))4j{Ey%PYyO2+9PR) zN29Z%_~|!o>n*yo;yA(I2UBu;?wDlzJ{sAdly>bUPj{gW79#s@{z_;b+-hw>N^iCB zgvYR{iE-i93|F?wK#gwJIVYjoWg>Iv&U_?o;lZ8P{^-cH`m5ZMdw1VwuTV%a3r@4N z-jKO3$#mP-+ZBo*1{(`E~$L3Ezz8zltDu9Ez8CI_jY!@SqVXK~e zmCtsEO)(X^odIZ^$Ah;`TLhqJfRc&{O6+Hn$VTE6`zZiMoQTBx%~Aa1S?T>a5W`~w z?tpEs5>gDS;L9){1g_`~x2fAEO7u&x+tctx6V#MS{p*%wad#DxpjqEHKQip zORNyelTBta2_#fVE@EvE&Nmm6#Y4MMXvKDzgVI49EDuw`MTNA(_KA_uaz+Kmheg6{ zdB5*C88e1V_nyo#P=ooC(DGcxMi#)LZNcn`5DPH?E7z9>WQoB~6fBDc6s|-|_~0ZB z%NMA0h{1MGpAHc?!z&UC<&IwuExZbfg0&?yE-S>QY%*TT*n=Rq4gx)$|%MK5-Z zy9JJl!6^#%I#JbE5Hv-^a`=EL7o5g{pNQCG7GO#NKhYpZU5g-=uQA4sFp=~897Igv zNb9bj$G}lqa4In~&l7|gc6=`Q*d6MnqVKi=U0e_Y5mXuSCW-J@Dq*kA?buo$s>2TH zc@TnU7N=JR>q^VO>(Fa35$phB(AGXvJUQE-CAuDDzE@E~k=$T=5pdiV|4Cf_i4rpA z0qduP=s_k33H##J5grb!@Hz*a^3oHdu9L75lAJG%{N#n)RTJK-Rs=_d6CM*3FTQ?g&mmisdKy;4uo2Pyp=X?}or> zcN^4wxDnAZa+5-Eo*{3X2uI-})$y=Carp(qqAXFBT0fO=f2v5-Cl`p zYzW_(i-1O=KGE23+yJ<0l-Fzpgc_HKHpO0xs!sO{PN$nR5E-gK$Otf80Qeb4%&;&s z8az`dXU+l}0f3B>pA`C$1xG=yViyg{aGj7c&Y*CxE|wyv8PHEtP_(U*(2y#1vioQ! z>e6_xT0@gXm#<%IxZwYooFbP0y%*Uf22HHdk=$6QrX2=5qL*H9IGS^4 zA*QUp+N-Bp%!nK0g2B*$6;u9Q6vs``q|0KU-tj0S$X#=Z z76_?IsY02h#0Uwk%|y!TNLQPq?v+*U&oC6p11FLho6(>a*lMX-?xuN~xGAM&mV zbAl2N6Z5cdGP|F3s`!YJru;y0AmZEJ%pJ|}-dv@}sNK5O5h=~7Z``(zH3#0cj`${X z7~Cg}wWJjgaEy!zygf6@s;GJ0{@qr4;K8EfU_1|RnUZ!?)#nZKmV~XIpI8!>+H_^V zlE+3|Jm-8w>P{8d;SV_a$F>NUZ4B)U?t5xmhT>pN~T& zk;&YV_8fVw<+V6T*n<*fJ_A^jfVq=knaJ4V)j>XcR-=%KXQ zFrj*Z)Oio9L_Ek}XmL@GMVGn#@W=|*ZGv3|xuy%MH3$9_&>4@iHQlPMx+S$eOSbmO^{e#V z(YDaJt&!Ni4vF->sZFjpC&0QNwg~O9Im>02{N91D^z(myA;=NlvxSLUlFbfgY#wJI z*|Xi}l^+N0yqy(|DpWQza-|<1AA)MlsUw%~vYLG~(#}oaWKY8_w|UR0w#VRcq|@H3 zjN30J9xAt}+@=1$kEefk6=!%k78W5O2JCTw{51OnJmV#@ztOvwMwISrQzcBX^u=J!W8hQ1y+7?OAV$gvFv zXb4t#{rTrdcD6ivjz$*W?=wU4&QF{oU41=q0@?7a)MC%5d5rhIo9hW^PB~Jkx?tgZ zG)$;yVeXj=x5uIIR6dl7-J26zPY_H+TXKEkK6+?>KCXmr)K2zZUNRu%>v`x6SPZyp z>AS^W)FboVS9NH+_Q;l$Yfhc-c4&=tKfmwzGxX%Zb4c`~Cf`3;rjLVpfC1l15g3{P z5B){u2#B+{{Y%N6C0pWJ#A{o#+Mdc8K|4dkG^)+QFg`)`5OK8gS}Fz}s+B!}YnNrw z%9+TkFU3A~(+d$hZ>z5|da5|Tbe z{8i|WUF|aX4dyfwkdgS1sz=2dqe8|d=+WEHF=o@f&mYK&ZyR?ar!^S27#xb}tG_+dhF`WrXCD=TrdC-NUU-Dvu2UJ*r8+Uq#Z6bW>u z!n3c8SNMmpZgs_hD?r4>5lW+OGSC968g`9Vo1-G89CAHqRL7a#RdPO02Cm2etYJjY z_|7vpz?}K*%l;OhpNFvqr;=MO9A+|TNT=^f+^0_WY$8fy@4Ri9RepCfcMS(tm@)`? zs+0=yB>vD=UbfzG$L+U8{c@Y|##&PlwV7cJwRste=|vtkJ=M;Xff|kYB7ySkLP?YW zL69LkBPkecS~nsq@i3#M9twxFJz8nX>~qhHgGlTuXQSWz=z<5(u~Z|L5pMtetDG1m z^iGisCh_q)#D@3_wMKJV+kuKT$Eb{zgP z=KXrTp3ldlC|Ylq7VB8?RKJp&U(p5a)REHTGBN3Dw8o->kA2J)2Ms+H=ZntB>~My| z|E>R8|6+eg1X#c_jQ&6WhbChk!Tu+l2Bdxrc(?JdoK8W7!L`}}j=aNi^}%a@<#cfB zeX(^z$DH+Ug&h1#l3}TMxUYD|q1LbZ^T>J^egHmj{c53ZgP>D?(~&H%`z>#tO!pL! zAs+r>OaG$JquT>z$a}Vz$8_Oohh(KBYfr?Ne|>MUSK@rO;?TWM`0B$-k$llj`FGnM zNL;acJ2ITL%3iUNRdGi((1+36=-uYz&1ux}M5fLZ7&%UdZyi;P1b)dALg3B5OdZu! zva%5_-ekkBR+TBYqneiy)tNsO$N*!(ZfQ_S6VJE@w)EE(d5N}f76;8ZqP zd2izOZLziZ$k-)`IPT^6>8FlQiE{jk7xe0p6~o!#svI{Pq=gQz03C*1;mDL@ysqH< zoKT<-8;1@_vB&QY;RrbYZczG$g$DiTgYruLZs*VwI~WR0!}}!NfzKGHVx%rO8JgUw z1o>%?4O~@`>;b7n*ZA!UztwVob8-LQ+yTUY+aJ!a`)(9OcG`FK z8oBM{)y@n1`}fI~r{=Nx4E*lHP=1CXI>_*BQx3}bn;e@2wWg`Kf2 zEp#$N{5gP3(ZUFu4vVfm4r!H`_dd4TqWASdhS|k5DQhglug+`bIUYmqsb?$kwoGil zF*k+y5$)y2tSipzhFkUNBs&T*k8630V|9LU=vW?;_aDSi`NU0@J#uG z_clU_rTg=J;e#fg)@<+**>2&1?NYZNW0#>52ceUVmm3~XV;4KPOfL|6Nl zS_F40H6F?^*iRRs7-QiYdszTlHOOMzITQv5IP2WPj$gE(!3~cBus$Hy5VaGA6oIxc z!|dPf_R)QM$ZeOzt%*mEn8B!E@;FkBb?1$8oQ`#5J!yyh+EddAE3S+EV^)VX6HapE zf-~N1*(BzyCSCy3HZVbtpCb_qtmGoKX((6f#F4q+r|9=0jC}=lCz6cHQjgT!)q<5W zS@2PAcEmbj+M4$8lU<^nio@e($y4cO`K+gQFNdU*H6l=(J_eibVZp&jM~!hILi-FT zfj!p}kUkKzt@?`cp^k{=Py^b&&N>rChN*upceQzAf&=oZG^T?hZEHbxn8-=vQv=m9 zYW!L}wD$Q!X1kGL5t%(384YG^0B+IWj^X&*(yH1_qV7NixrjIlL7 zCb`y7Y zN0TBZ@#&&nLHMWKgXY?5jY5WfUnqOc$Iq=@RA6PhG(aB>znT$+1Rm1}n?Ir;WR&ZU zMBBn}A_T1P+8wzbe*RQN=sH7Eupc<2cXns!;oM7?nQBE_%~N3-VobIb+t9IYk6)hO zIak{%lObD3aW-q9j^CwRm{6mmJ`tIfAc^h^90|u`sI1i#CR^LQ4ZyO;lcm|*V{r&EkLMTm8cA-Evn;K=tI2pt3usNJNCcD9J11uY~04rKI6^9m!t-slV9OxD^(3W z<*jQapx`SUlwW(|+?9{vNe`Q7t#rj?X5>DGx80C#E4Cc6+Mcc_K}07kRX z*+}lBx+76QJqao8%|`*eK_}@KgrJBB7W~R|-ga@W_P8A9&rj^y%kvA!4I(EZFb%gC zv~Ppi!Zm@^6}qoK6L1(o22@@|+@KXkLRF#<;(EeTh};%>&2j1AqeM)jl$!K}=F{C- z_SzOa-@YcVZos)_udSQ*D;$j9=4x(E#~+gEg6Xpa#;Ue?n;9&P%aJI_N8+195!DI{ zPpCwi=9nJuY9Exd&u-o+NN-ePmIYk*IK47yyZP;w@ zghmEUFuP8evyJ1V0pTxnsCI>$L#waJt*~@EDPOE1KZR6bqjDZLF)lj1R+b36Q&*W6 z?qY&5H&f7~5DUWS0YYPd)qCuEFlQ&TL7f66V5W0u{NE z-z;}G+hixgqQXS^Xrrlt8xdmnU4g4sJT!TvNI5r=&hpE+;i>Cju=ff;gL0e z#kz^0kr{&tE6)0!`KVK5X1^*Rp`!ih74g5lu)tu+ZG z?$q3qeTGfS=x2?3pQr(sG}lG^R6P;ue>ZTAw6VuH_wufxH&(#?Z%6s^(3gtJ@l5#6 ziXuy$dgE6e&ha6?Y_{#;)4~)M&NmL zA2m1?+~|TKiZymO4Mg{^`DB$Y*twuUOgkVga2900s#KzLgM67_vg6a{2@YIt8cdd1 zh(D8|GXF^%jSu2h>fosY^-ZrUGag{oO3?vGLm5Y`imq>il**P!Q}p@-`^xn?R=hqT zrEL0YSpvRo5(B{U5<;Lr_F}Frms;hD+#;xkV4N8G1Ui`=)4R21-dW)A-n_9AjN9ON zRGze43k9Adhf#;g^=z(=HxSdk$pWe&E9L*{TA^3gM2DTqXjeI3&b(0TO3J57He~F@ zj|sV@zH%VFlHM}xlZgE@HsToGmyNLQyoYc{k|>BmtErJ|`;dR7nddx77ahC$GQ912 zODx=U=4CS8%6}w%q{EOch(Vk83@5r$XVz~f7SG(%?x`Tw^YE+F;)2R!&Val9Y&&95 zRl0%)A+p-!2Px7i#C*Fcz8OIqDPkAcj`vk#QTQ>^pw0A1tTb%?4CO=YZ7qOnHxWsB zfWt>DJoxitlN`(wZfRQmhd_>T&tZx>Qyzd~%MFqodt#m7UG07lcdMz`*QOQ8hEkBa zKq(@s58(!j?KkUnn66(cu$^hsaf6d>`0H);Q&{j*OFW6AfV{+%6Hpf8V9*T7m0EHj zW9qebwnD=>VwN~5K+l9O4dUkQGCtew(lITh!txd$U=f|MPO#pZNf6+Ub#`DACXFBO zlO7{|eVb0C_Lwm;!Ibwk8%>>FGy{SAPB6gS-`G52i) z?ig@kg!=9hP0<1qiMbMN_VRf0Nhh|>SY>NM#A>zCNM{dRL$|_M zgL)aUAKefHuuIPYWeqQ0-XIpPh0LsTeP?y1((?C#3-~g5{^W!5qR8$^mF`G1qSXqA zn4U*sl#!?dkdKNOsw{?VNDV`I`fWb)(~)TV{n1XeXZxd9jUepK5Yyz_oMKA;#*s>- z(5duTHY~3EI7eoV>L^GHB#Nb@jo83m1B#ralCc6OL)v{~{3|i~2ERQ+4#UMMZMMF1 z6SY1$eoxoyVGMimr;fdTXziv{6`(CHEH1M1rMW9f@M(I#EKJxn4X5R?$W++-`+f4Y zfxw3Da1>NqH=8t-tW#W<1{4?q)>Y8wyu2P$M=1x~6YZjK9F2ALBhamwdLLSk{^ln& zq*kyuF+rs zHxOxgv7r^)7I>J$Ba+Q>>RFCL(5dF zwD0S-iTr|$7EA_Lgt#GzLj|qA+kK|pUQNjPWi)o`sP5a7y?-6Mzys3ndiX0FwMQy} z5`l+C8I>)DdhRUBB&_~Vj;4G@h6g)6nafu*5-k)YP`4WggS`(#j4CyAr?5e`A@$`# zUSgk94*qL8l!Fz-Rm2=%vo_jJbQD%;MW9remD7#PAr3<%kn>|06+h<_H?2i!ZSXXAG}>j|LX^Q^{2kAIc$t!a#7FoZ*xp;)oc3Z@4EBj z`W_GbNN4gU^bfCCq^HErY{;&$lpX@y_Paty>Le{SljizfN1|-qcg>bF7EaqQ&u#so zs`rmy(7)+4Ww4b0*yLEz5cG5@g2sq|UiqKylsmb~|D$o0(+&}19I#|lU2;+cHIu7q zp7$RWLMRFLaMC?Z(fc#S{jUmP@hUy;^h1XosBNi4gNbd3G?TKwCps zwUqENnEK~Xh47yGrg-}E>wgh96RX^Fb-&{@Jlkl)XC>XsmvVv_M-{SkJkEFqSJR6} zw0r6xc+1;+hDARwfena}Bdb@ZKuNIptxh7TdvRVjTcTP|1fNJ*L44LhY|W6?^nNHs zSH|xmZhO#f-P0`5M%H3pK)TcBUH5-Isob9UPSkZsQp?{0!CN}RkGuYV1aJB0tLcB1 z=+s@yS&`-E)x-u-GBs~bNk-ZNmi_`XMDyxl#l7mNs7z z%CyZ(HK+r{7|wtLp3J)Iz@?AWf(y$|GUrNHm$N}U*_;QV%_I@RstY@<`sw;)@%aig z)DXLjl$&K($>qa~Y|zePpIX5xWqQ??XP+RrKfIQL9>HUVb06t4RM} z-{g=tQYufY{b@bA4Sqr8;vHo3ZXf%{7p|}DlYb3|uo6^phdNauZR07FL2it5|1YOS zr?OuNT-a^QM?<>NtB@h`oJ<6P-n%TH4+3K|!r-l!>8sC31bG*RNKY3F?f2HJlBi#n zB=MsN3hw^kdO))?!Yr1YlsInuh(Y)m4` z@U0+f#msMreL2B#bk^DG_w3+Rqr}#k$kZW!spcl%*7b)&R?`o~9!~02VT3L@P})Ru+30U7;)II3zPcAU8?HH`zIB8u zUJmX0t!k+cF2}2$*>aPdhBzxR!02|j{6)iE7tcGsr}w1dUF9ilrEsM`xyBEyO;`m2 zysjMd*}NXJb-MwK&|llwu;Wjp)OYRay3p*w-zm-c;ST}}Z$o1L-Q;-Av%gftEIN=j zBXwi{?mYg!+zjHGH|nL2YtvW!TF<}J{yU|)9&Q)}ugt|`jfN$^5^ruw;OlzU9ZC9~ zw)T1SC*oxHcKN~=l6 zB8L!SiBlGow%+b06&6`^RBgS95T#}v#KR+Y$2{j!?(WF7kT919&EtjBlZB78EXcg% z-TbExChZ_p@4d1@*Aw{V=ZL>Gn@6tcbQ9OBSZ^kk6qDm?CyVH=9ZPiRoN#LHD zbLBfP&eZ-rrMYUhuIBij*$Z{G7iTZlx6IC7YUuf`BL3%T>;JP`R`SMu3wM-v)i3l= z65lQKQnQT~`*h1HjQIIB&r9!K`hIlZCZpm_MF=z1Cg7S%oG+vQvi3P<)t@5Ar4whqf$zz`tj~`ZB{5lW2FDs`X;Fv0O^_bBH&=9J z;1In+)}Kv9mwvtk1x0;@VS>Sf&q?a)>*Ut%dzAy~NZKb0Korl>`(s!t|2hNi3w{$S z9rT{V&aKh?G$Lv9URSBomM}H#>v3Q56OiFUDNO-f71AX_DNUt5fs_rD(#+!{_1&P9 zrZ`=>pD$(E*MUzKXHcP(rmI`0+({^<=~;=}2&FV{h%;$WO4D`fmP%@U*4nAjs+h>e zqhER1x=Z7DNk*qWb;5&Nkh~uG;Czv^!Vvd`qc(oMBTPT#oDMl?LhhBE;l-3t{myT_ zN=4^vK3CS{opXE&Una_?)px*L?!HnYS-9Rj>>HmLqLO1JPK)bDp#3IwG#wgT(M^>q z14%t*RYjQ?Vq@f%=n3Uav%42ajRk4lQ=UfpRpYV|sU3?`^xAUMz#RarQp<-<1p6S1 z41nUYw^2X<$rP-qYd?`|z;0*C6Jvx8e{%_@ZN88x$-JvRoAg-Wg0q5a!!ZO2K-o_%6<8SgDlCGK z*um(BqG*VTVk%v?c5Qk9>q>GZQG|KlC}A2WK~Skg4Y2Re zkVBPs*&#`6-}Xra07X#ugAO)FkJfOSIAky4+lKREKgPHA=ys;bi$V z=Rea~Yj$?=$g|;RL zvV6ctNlhIv{UEeKX#Hb2vq~KgvR~Ey$UCAdxM8<0Ylj01jvhY0p5Q zK=be%Hp0f%QRAwM8*<_%9<3V++roBLlM}<7&625me`Z@ex;%r#fVWORjvJ6sDO{On zrhb_kZ+8Y{%SR~Rn}F)CL@$L;_Yw^{jT2Dnk*ef80*XsHVvoX{dXu!%y6eQ0i6WJQzGH2BvbRfSRtzAINi*JwH<9nK> zFOMW7<@p3(-Hch1+Af_Yh|+ax~g~(ppz*kaG1@h7U!gypBY2!FU zgr8Gh4xj+sLOnOi^i@P~hm=Bn7$HC~@?c3@3K z0L7~CA>Q$$ME>OWy9%D;Qd7&lq#NvCT)_d9cpFUX^%|n1*%ZRiiY^<-%-C_Bx9^C| zA}_&^eB4hV?p$xyWaIixquT2u<@RH;W7@Lfl#P8NiNoDWP_1*giJ^ZrJ30vP`kFl~ zX=Efg!XnDtAO{Is;GX=^P07@iOv4SaWNtH&jzR0Uo4ci&xaAmR=TCD7sxqO7Ds6{0 zgydZ!%H9@B_p;E^0;SdIKqJHKk0!vZ65FIt$e&UIrMx9fPYluHl&&{foR!;_Q(tEU z26#?{N)H|P#*0y69%^zbmjYn+3gt}$B2-IJCu!&y5)9rTC8nW*6qWX}u=;vRng9k0 zq?Qr)nppTj7pas!?jwvbXn~4t0G{%3>6w9C(_EIK$L%`YMzQz9fd~C6Sio^SGXOziA_AT9m&#DmY{aw(pPz}Trsbm~-;5~K$WK)KjHq~tzw2VL zR|G(K%G}q>-aw|}$64r4Y)nhZ7H1xCU5MW@4#d)qY0V&y=x@Cj02qq#UE(84&}tjR zeLw+aJbWv=*!r8%{|IfewHR2!02tm%4D@NhL-D?XDd1$SR|Y^R$td0Q4JJ*Ca8`u5 z1_BlUfy=fW2$e<>clbTVy@!M_{G0<=W2K|`ITjYlE+b+Klq}+(h7!_6$(*D1cy?G` zS}ALVi?n$6gDzNAAYcg)^1=NTZvd+&d&YUv1ES-V*NyV9xGxQbUwFJ7C1r|=FlB{E zz8F6!#Iwn;UMPJC1z08E#@RRJ$OhY}h68Vp9zcpuXGJxAe$PNR{KE~abiwKOKo}ZIDW4A&d^j(^sAl zV9%yyVLs9lda3v*Ec8J$YOmW5cenXalN_i=m@btaCq^bx2^k=iB`#hR;$zu6$n4{n zEYV0l`sf$@MqdXgbvm_z(n3qJPApz^qv|onAR+z8LfH|BV9Y}uN%$nADMX#l%KKm$ zBE@ARijJgX(kuYrYw)?i9Ane+bzwF8hp70J%5>B&%W3}%G>p@zn;&Cy{@HeIydw;) z>ZZK9DBHWwmE+FKN?v{FyWD;&+%kyEo6ao0=T~uq6z-OPdJ`?w>LOz4LanOdKdv|a z#dP&Ae&hxXURM8Y@cOT(2~rpbqk`Yr7xcHhUe13Rym~S4Fx9c?#@d137@EJ;;}lMi zr+~jNMgC*(f;1$}dq#|qD}(-Cb1j6E>N<`KLSA%U{7tld-io#MKV`v^gSbi%(e}CD zG`KSbAq_1>#GNXL!^=OyAPq?`@Ap#VdgBgWM7>s=SwlCJ*K^7}UdMk7I>Fa3KcxMx z$5mP+W&K`?+*98__j2IJYIK#v>hs@fu6@OZ&{E|3UyjVP-v3#T`}5}Sn(Mzsv~7DO zk1o;@r5!gGE5I>pt)8k&dksB83#=oFv@;A}e zraZh5Y)tZWVniuV6_Hi2$B!W{Q;C3^OE4(|eYfUlNzl#z(!3;_~Sp0 zHQO@priVS;ucK^PO2362cQ0uc9~M1uz}Wrpv4e+AD%4ghGl_ev%jxY`SRZVpu6WdVlFV)Ya6I3ufvc)8?n5PK&deEKv{NAA?d^|2yh&A0!fB zAF=9JG{;X?Hz{?v{L5PNMu)>;23IG1E}0Im>I1S9S;IMZ09eQDX~QiN{+97&-Gai< zD|QQ@EB&NYAxp-HmS8q~6Sw2ik!}wF8J!L)C-7VRaSkyt$xw%@)%Yh=6r8DD&qLz@1QKPt$p+N zn1g@Qg5uy}xr4XbUwbpJ%k8{%=Y(b28=v$GIjMD>KfuG)-~8Pyf<+k^F@ z8<7=^V_Zc4b>1tUSr{rBG`7_qNX+5^kM=pkbe~G>qEy#!R9NmcFmy~ixIusoF5g90 zB*G5irl9YaS~`VoaV%~egZsGxrP%NwFYss}g^Ix2sgiPbW}a5*+U4-^uFsoj@D%bJ zfI0(UaCe(9x7`dfw||Fiot>3CG`Etwj#qFnk8EZCVt1z%D|-zSyfEg!r*og`s{M+e z>Q2n=Zmqg^y)KaJIL4TDc-&xoVMTo1*3v%uoEF|SGvJbw*DU-aJ3k!iX5qnAtcu+a zL#FAuq#(q)f}$rETuL{GzSgVlQ|%-`Q)L&?5o1^qn8A!a_akrX)?%zS4q(fDU;!|L z@2KP^I)LG`&n3A4Ug>?XAq*OhYXC$N1{+9@M7~u7*&}i&7}71<7!U|wF+*Eqy@HU+ z&z`4Q@quI(M=yMM^P?MbZGRlw4a4!qwi=fKcek*CDpLN-b<#JVZjXees1=>Db|h>X z7q}$edHJef?WS0(o;9=M7-ZbVHVJLzNZ(gHlgvi+)O4#(sHMfPwe-8Wy>2gRg1vCu zvKa$(+Z8akc&H~fIovSvsQ!Y=Ti$RznAp8$4C4Guk6-;DL{JU1IPYg6Ehz6pA()uA zD~>K7%kozXSzi6^3_v^?iNx0?p&511%yrt4zLAD;=g|$(fBvn17GlZ*m74TK#V-GH`-t>)fLL`%N$yYg7Nw7pzRtAP#>xs8~P0`jFx^O50Dp=;P6c_ z0)-5amf8@o)xL$R8z`Tz6gJtprLlGToX(a)M$vu%U;{w47_TV~{=kde)|`~sYPZDRxTaYP`bB@!O|HV!ICb-;f2u*6#W5Yg6(2pc8hjT597*@%0?@pw>X)g&+|meH)@hH*3V zc`O)I@?#;fEci4P^Q&xStDG#NE-smuV}vH;6M@@8ZVKY)CQ_OdKOX9FDVG1$;1Qxi zP4iHpS-NdLUn8t7l?k0nvYZ`JrsYIw@zF{ykW3`FvU2mOKo<|6FT`ACaiWrZzI7)J z>acxiQG)8jCB3k+FPx}`9KWE>LXG&Sud0{nnzTW4o}XktOL)ZIQy2r{gd1zcx3LS`yWIa#Qb3lFlH2XJ@F zX=fxs8UFq`zse!C=4P=7DXZt7@zC2;k4*w|#bV@}^xPYL$0VX-E?M!HlpW_;*wC1? z-ts)7-aLJ=nl2a6C+0)Lb8}FBZ-b#{5a-6Cj71XsWm}2YQw)+OBc>J)5DT3~3!KaC zjLJAgZx7Fl0Q;pNF|Sawp%DFzfD*(c_J*Sl^1jKz&RUdXi7-oQi4>$d1#tt@S(k}` zW;y`zk*ngfxA3wtGe&2d@#IV(0>b&yHkAMv^^H+2ke(zHQ3K7JJXxq480xs*;_jBM zbs>$T<-M60#fRnEk;mx=PeGc{-X!u`b2GyK6N>fK1G%0H)_WC@#4Ek0qA3H(UJ`iNyd ziIGSUx5(cy3a$@&>*^neZ*%IQVh!Y0JzVBG-vktcdb&f zOyD9P|Lt4;iziZxP?J!EpAj6FX4NFqP@NElR26^>s$3RQ5^QtmXz<~qcJpk^Mdzri zO9Ts{OtO+}x$C}79Q>9q1oVZ(2L7s3$}|XA@i_rLLra{8i1GlY3r+YyBn&ZabWY$u ze%`GKNQkk}MR>u$I*N95hym^PPSb;A@T zr4O;GHw}KrH?X(O^UJ7mu-*oIF5CthqTfSfY%4rgj2};eL(?l(09&5@L-w2@yW#St zqy(+({oZo0z_0jT2_Ihu&EG_VdT!LDC2op$7J?|FMxrjW!hw@q62IXC?J%5=W;`#= z5N&G#uFMND>iXN?PuuB>fQkSj3bkIL6@GfdL2?V+Pmt9RNz_)hHAk#BB(_EvGA(%! zym*^Q2Q2#e%JLcH4M(yj%Lbv99%I{Dv51VjK`j4Xon8?=j~4GYO@}rxkLf>4pUxn9t?{tITii zy>DD~9_jN?y5;#?Agsk7r?suRY2%HFKbJV}!_v8rZEryUt`EX(kAgi$p9jxL|0%rb z-OnJG$lo6ExZN#2umqT$bR;9&Z)-fcP1xjhs<*vo(&r<`TVdRrF~Ot`yQ=mZQ`-G; zOE4d58+D;$zC=fP>@TPlYnOIf$Nf`YG?a#cg@UwzueD4ZD>>KQ)I6wD9}eHRy3#}P z#AC>5Lw|-xYGW7sADPHhsG7ou+5MNJ>3^HuwX$>mBhZkhYw7Ra-7DRC%RSj;-6_9x#0;*VRd*M4vW8M+S7|I0x|s zbhJbfV{Y3L<=q@ASuoFGrEP3YTEbWCAqsEXN(`pe`x}QG+!1fJO_LHuqS52FYRwQ=;&V_7KIH!D{ZJr~_Cac^q)Nlm_{+Q{F-ytH0ydd3 zGkD*@ZNEGHO9gh8q`ZG!Vt21>T<#I%o{utgLkxM4g?jwvrNb7P5^u)~_B8<+$~GIH zWvb*H7kR5^l@n1l3c3}beomxn6g;}_e{Q4se;#P~_Yqa9$8&Sme&_9@TCdve@>v%7 zqNe;8Vhl`|VLWxn+qr6%URiLlE)02nZRLg9E4{Oq>N`k(9ZlW$UTD$YBOhgyqxcVv z(|@u2`QQE{Di8~7f&X(qQpJBSOC>X?jWu`EDN?n|Wm;kPR{!0P*Tzqd(2vJ+qRIa+poDqK!DdRfj<@9VerA73z)#oayDmMOha z-lhc~zTE0UI=+4jm8Dk2IUUr!>vR{qRTLPy^-XYGX9m@oIN-3Q-h`JRi!%>fOV0XA zwU-3xLw+R3DWp>w&s=DG>YTx3_(kKjOz*PYR+9llCI|4Jm>*>x!hdQWCONKH`PlvR zN{G@OcKETM)P7&yhGL(c2mRd7MqhY+Y}@}DG%auK?Ek4VKEb9lz-Lb36Tzlmd%baz zL30_bXv5=fdJAY>`K(kzWD;SqGKM3`bB8^K+HJyi6H6)N+T%X-dvSdFUIugWYV0Fh z-mTB884=!PGNm2rcS%$8>SW3?DPi?$y8wIi2^XI2;L9PFvv|~%XXBO-e}b+n6|y0c zG#vNzHsZyxW1aUpbqGiG9pa?vS4bbb!KnUlW+EetaW+Z)rT<3yhWeke>+|4u zDIfO@rkrCwg0K8p$hOOvYWDgu<1cK}j5Dggi`5biKhrmJ$pfXPCFLC)6vJ!VlLDU3 zm{(Y)%OiK}KGa~EjGE!|CxjDL#I=VBWUZEVt>#B|!B#8_*F7kK$bU-f(;!DO?e=X> zLWY%t-FH?94fL+8Wwu{r+55XCPn$fw=ZAd@c$TfaHQ$)jmC77~cU;@%t!wzQW>K$b znef2qQUAs#7T(^kIn7V-?{cD>OInsN7tY9);?J#R$`&1Hf=3(HZ*&l#obUQws}A2L zfyeTWGq{MbA!RqRG6+kE(6=q}MOr$adt)t5-}aC#MeFNWySo88dnen zem7;s`ZRq5G7-{F!7KRa&B95=SY;&s!B>>KmA3SB9!N5{Dg}-p$SA%*mgX+?2s|gf zsN7>AEoaFXb>edQu7;h9X|`651h)Blm0pUJLUPHi^pS97eXanFZ-QsMv{1F^8%2vq`16hy$2Y{kbT~~U zMAy@a^G0y$yM=%XiAFV$(Uwhg4I{WBMRMOlx@clpjv8h{@l)F!bz)kEJak@%I4*YG z(ZuKtof^GCh_&xwIlx$_Mlwra2JWKuxgQTPbD2wOP7=WjQ>F45WWWtnp5E22+2-8lZ1X+`18b_%(BJPA(-re-SW(oZ z{pOE91EmSk=8i-kE@(eDj%jdd0UDsf+b|s;Jtmg>OynTESx8DsD6C+5zFZQGi0RLe zP(~rI5`$tH09*$W8_=G2NQAh0C(1;dQxMfd24go&EtWY?tv$uo9IQOB)Q2h>9~#k_ zv%KqGs~9jjlqcA&2up{=RRAmdK91d*h!J4VG-SZp!9?uz6E2DOW|ag1yVi^Xehn2^ z>)Zg^osj^A9s&>5(XgTlWX}lF7t6oEUgoOlJsy$@Xho=FMNu477$NBE8{`M!F2F5% z#`8#2^AguZ3OTbaNieB6HGLU)!z_#o>EsJXy6vUHfP}Nbgijh$COKeM@?LFK&N;KX z`80e`eV0ZJ5J5m0EG&h|c*?)0>c4MmbdPS^tYPu$!$4X8 zAMzCfC-fLDB)AvwBdITc>V;*~(*b zg{c*LcYmEfqKK9L=Q~1UtJ_xmqP~Jff&-q(#p=nsJU14(X{X6 zN~XP1^a(oMSy;Q({$D?+)9DiM=b^RC*AAPNNm?5o`BLv6=|wt@8f>6ak4j0cE`R7N zxTEczEXQ#5eb^o6j=d84m50c_0M~T+c|Kk%^D-Rq|GF4YgD!`m|K)NhkbzsXXnhUi z4zj8g_y2tBRrG01IHs$bQpen&x87f+roT@gJ^qmrt+4(M_yAJL65M`cl-Ui&Su z_#N3Ci9E$Yo&UPey?IqnruQ$D!gVpi_tM`Yn??`02z4?sNnU%?HCFLUT|IH6F5@eW_Xpbcprte1HSoQmIc&Aw^@Q0H2_(rUx z&YnQY=te=n`rntsGrKQ)AvO}oyNOS4jG6WD0HQXZA9^05@U9Ae8i zZ<^H|F5U9~ySLuIRgB+EDVNznJlFp9&797k!`g9c{-6*){c+3CMR87GubpDVLmW28 z(a8pOLX)f37kJ&rCoHmyH+ijC*h;&r6W)7Yn?Z(4?e#CD?^T!roscQ_{^*q$(Rc%P z#vGHY)Z5F0ddyOCEYi|^yO6@Q((OLC0XsDc`Z2ewpmhCS_kmQkQ;i^4=E8hzbl6vD za~3W)W@~a{sHj=~#;SOh3Fs^0RQ&v1Xki*)<29nfEv7NDn@f z*aZ+DsGR11zEAM;9G|Gx2BqF4Ja>FBewh9~rpxAn7xSgm(U)`RI<=1nJ;r6auh&KO z+Y_Iy^GSF%BW1rlY@_+^k@F<{f_#)tq+V=l*qStOm_Kq{SbaX16surFJ9Nt zp>PFC!wZDCuk)H+@{fK+Sl+zTj`w{0!}b35Mb44XN%n244#&lvg<*#UCbsw8q_kt< z7Y|Qu-7K$ws^6HQa9SkAxUl(dNEH!RUWibMaFdZ(>}b0$%ssI3gjUD#e+t%2UdRoF zz?!{oJ*FoY@*+K}RiE_rti7?2&&sQ&ymjlfdA3lH)L*UfrLTA0_k}{XObr#izRyv0 zk;e_m>%$F8>F%#Na#SXesIz;u>;CRzB}Rd$0Br@gog0G6mvx)gh1#R-oJz*w8uXKN zu#`e9?a)5uapleWpznKjTixtZUv#wj^TK>uOPu1Pz6JM%M;=r(@^{^;;bh^IUNq2w4;F~MG<8vOKNFO+_ zEWP#d&e?~fDO7mu*G&B{jalcBYv5aydxn#6{y9vL(97+ieV*_;4Ueytt=`?W)yn^> z&B}9zmp6H=OlQl!*A4yYsSaK08e3dKVPGX2Ac-9Rlmm8QyA5Ht&hs#2_7%)#3|#6I zZ&4+Xjb#A9Y7$`s&KDTs_Jn@IaSu|$uGuVI`X=Le&!{hh($(iY2yQ-!5|W{R60Jce^Q0Q0;m&oe0)CgYwssMmHA9l)N?7#I(tBUx;GVneX}?)~rNiDA-7Ayp*qsE1kV z*iMQSRrLetYTKwq2q1%)DNM5+7*^arTPi2E>VT)nL};xEtuuLJoG(3L+rl$gVUq7< zyPw2HHaW^;lG5!AeAyQLlX5U3(k@5DmT#{n!HL}_26j0~V{Cw1-m8vb9ZinwfX!0Z z!n;lKkc^e>1JM1W;&3*=bJPen01ex>u(#D`um&Ps&tCmp2$fD^Fl}9SRrM?o2qk~` z3IGNR1+q`bund-~t!Stqfj5~o5QzYC8XzD=GiE`Ol>Qb2;11fsU_Wlr;b%HjkX94w zd(;xHZjL@`{vn~~<}cTE8prd);hWo(dU|Z#hPXPMCRuTxSf7LNhG>~)W{Sx2P>)e{++DwbE) zco%d43Cn^J_Hhtu1FVoQ8bjV~s?}U}T1|7b7Zb*(#zU%93jA`xsJ5s+J@a1#Ik}=a&}20h(;88A?7p4ZQ4#TGP;9Mp(}UV%ol}DjH^Fn>b z<965xU4{{GFg+a1g(s+QPe2?3d=M!)Mq=$b1nU=KB|nZFNbo&y;RFEQOhz@~&w0E0 z1>y6?hzfPdG7=EW$D}>OB!Xy9X7DT@lZ_2-UCK|c!D`UN=c`%MVWL%0V%mZOWh@CO zL`=ZR3kkz(@Q#CIM8(gNauvDnL{x8!L`tR;^2Viq8De{k5l$SMSWS>~7<7g!iU@N9 zk0i#lXp{+pT?6%*Gt~Pg>BRK$^uRhZcjS>bUW%~nEj@Em3nv;Grd=&tHf>0%3-S=e zp zUKDopZ-F(3DtTo~XtBiFO@FHg*<3Do^gve}F?i;SbL-GbnDm^Z@ik_M(ATK`^E$?> z2dov}B&94rnW+R}ryG-(*XD&m^DFw0OM|Chc48qo-O^y?$z?R$zBg^jPkb z>VQY*B~I?Lk`^`?ez3W_vi;a@aWBr2ZrUz(LS)4^8IkUS&Qt%H(<+{}?tI9b-lK<8 zKdtyC4-n%!tt3zG`ahW9G!2_XiqgLyAx)kIhb|tVjk|4g} zuUY}ZwxH2ux!vWXDMWDlNd*IIV`-}Y+BZ4=SFHdTSo?qT_5#*}{NI&sr&wPR*l~^erJ2^cnk3FX zkzM??P3LvX<}6kRgwA(rUcNftrE}}U{3E?~orP}0XQ2x{ziS1kUq3AL(U5CD^BJ=H zKKI*cRDFKD$@t^v0jBNR#X-02`xc+L@2OgR>K*lQrB-0=m!ZJZ`@TF6x?J_;Md+=M zUtWf_ul+h4`E1|USJ4+DnXer`314mPj1#{5x^49!Hf-dlVp+qO%TXFna~b3uj;7tZ zwKN9#^}b=v&-(Q-Zv{TRkP2FHAjt!6j+$E1yKmuqY?6M32yO=yC+ z0!eG$E1RGu{ZE_=ZP$~kuv@cx1h%c!-|&M=&K|q@zlo&70In@+w)O@x$B0a_J8>ye*Y_IA!J{qe2#*2tneO ztU<$9_Mz6&!iAtV3y>0ZDC`7DnfQ=ugOhYuWyRw7EhYosB;%#(6+g1}Am5tw?L6GC z)ah+yo`PdJe&|oAUSmmZ&1C_G0ZLEu+YMx1@KX=>vFGho+=Sb?>t zeq4S|wIU+_9c$N3n9MXS7%&Xap_70kMqm?wO?YNBu^zS1@Jtl!8@zC-R`IJglupyn z3Mx1#SLhyZHbT$vYQU{>Ik_exKefq`x2RZ2iATMoyFIoBnf3)pll$M`#73)P4HD8c z&H;-iT3trOwEYAn&fPjdZQes0(?XnklZb5$GJO=cjdij))I}F+9NmN559CV|XW>OX zuhGyikdf~)4b{y}Ax_x%ZS_xYJisX;k{(y&ArLB*qK@{kp{kkMbq)NUp||4D%FL73 z$iqL--NZH~PfMqdlQc{OH@7$XXcZlu&3yAn^V5N1obj9vp`Q(cH8QtX0c-T`A z&-+KtI@j~8?&t6QbazOa6#S~24c$C|uqDUF(P=+Sl|I_Yo zg9(;6HZ*KxdD_qN*Um&}wd1$lf2U%gZW;@KqE};q!l+hL#Pw@9f)K2sOo)MsiRh+Pb7}CKC>6@% z#DeDU&}U7qn$^E*ni22@4Gs*-z-J$t{W);q$oH+b=V_}yv~xoM<~$oVMZf&}v!%Q# zgvpimArt{=>e(Z_^GdT<CsIP1Qp8cs->p_4f~$W1^)|RQIfpXaXn=sQT2DAp=9B6y;eR%u`oGo} zq^-24>Vzej&Lk~3QTLI+u5y#h8$jTWSZJyfWiCje2{+SdjLKp zxscr~qYV$K2|#tTUKG6Lp&Re=%Bz1p_1aIBMaw2ccTGbtk7;3a~An!kNgJ;mE^Tjz` zK-$8yXdklV=}nU!+i5Rc&82M^U5d%1S~Gdq+C&g97H4_Lb;}Ndq@Hy?=gmutp%o;fGnI)zMI-Qli9j!<4{r`Ez~Pc;IemxZEhO?E(F7}j8rt*Iny&UVy_XeY8x$=9Hmrf?!ThF5|@!>j=~PV#G03vOdsD)|z&39lNa zl-L0^;g8ltUJa0lV70H6Vs)5s$i#gOFvMqpp1evZ`b7rUEbFaV|5frT=cxqKlKxAG zJ&-z zF*>?&jRJ+nQ+(0ia9F&VRm}p^&NV467p;i%LG8rXhlLf9)b}VvfmPWH`qjf%eKDVc zhD}9AfpN~5wV=FcR>1%j+yqnfE6r1XqnzV{^+kL#B+*0tdJh{~k~Byu)Mp^Z@X3hW zfRyiHfS$86^u&6Np|lxnJC1P2Nsq_lStukjkq= z4)I(2bGB(NHOckrrQ9uPl1O=`(U&bq`ca%A7P8c;dFfMxM8bBnjVWsh?htvIVf@Ga zg0-UZHOaw6V@f~VFHh#`DyPq71u)0pZ#prWUq;iQBkG`K-!9n6YL|M|vo6=(Ig41b z!kVM%hAlPoyHT`JMd4C+(CgBU`vUlf!Gpp}i=;NaF`UGp6c@#%WO<2kis?37Kzu#% z82biGU`jF%Jjx7s8laer?V!Ex$3!&_kf2l7y49WPE*baSu(qWUD?>DQD;wU98X*B8 zX+99pk3j1l6|v~Ts~PsEm{^a9=5yg^nRmfxKD1LUINYjfvvxGF$>Un?)eLh3!R{TMeSp@mlqMkmt;1h!(JxKs}VLdBWDBQsp-Z9B|uT!)>^)P z&K}iP|6KGoZB6MMpIv)h3Bq=QijB+Yrko#d^`3F^0wVfAeG_x5dF*a!R6>-YhociK zRY`_*C7_H9+1QvI!dK?u(-Mi8u!M&=esUs$CC*m`p3Y)fE)B3?8BYb3{Z1^$!ItPk zd5-~SZf~MmsGqi(0-&4A_Npfi>-XJ#t+5clZrERGPA*x-s4gPLtR#z$kCn&iWyB08 zNcreJZgh@HI-0UR7Dq$s6kTQS1VI4X9C_;=_oqF3s0+wgj^L+EUL?fnMs6zfe)-zQ z98!Jlb5)q_h9$qrdp~}CAZfSsMG1dU(6pnxRUo0VEYmkN zsu*rSz8LURhh1{elOHi?r!73cV}Y?7tBbhbW{O;h3u)b1O& z&a+VaI1-=2VCVIbpdq(R54p1%(7PdG3%H&lNPGkwa>f8qGL$S6HMUhEa pk9WD zi&{20mL9wJ){$3+bYLP*>4;@}oh7HWnS_0bq{ zUgfu{#dA6^?zC$ngV%a6F(|FoJ&sSH%LW^H6zuY>%H8&{ z&11_KNeSy5$#_qAle_A?QlO2$u11ddbI#7(tbS>~Rq+an1=`INO zdkAVahYi!1e_X8Vqr*#^v$g+Ov$@tS&SSiwn*V>c1c@jn?tZYe~J# ztZN$#3+K9JdHyl{KCcTJxRjKRmL5b2HO~c^Ebipj(Gz2r1@x+&@p=z-Yks@+yV6yA zhqoRSnh};0baYu~`T>e?=T~+EAL5;kkqdc3qWcD2x3vCU1sU>;RU3D-R*>S;;1Dy} zRw43UG0{m?SSs+mED^tRo0HZbSy!=?6DUXD>EneCGs+?-3ZJ6&D0PIoxhsl9G=a2Y>tXWY%UxH;J6 zmTU42p=wrqvaof+dS$O##NcwxD~IhgUOKAZ8F_PuYt z@ptP)n#diM+}tZKOgZ||aqvv?H5=`jppFhsw(mC(LRmC*~MpIiGk%eZhnN<_rq>)WMoeW=awSu9xhNGF6-(_wK)r@w0# zLOP^u-`*DNx%O?c=yw%ls_yrxveWy&PnTc523FzT`t*IKx?T6jhx%vx ze|&8CT?P5|(~sZ7ub@@-zmauawN?0Y>D^A#gzsOEPr1exKdbznVDJDdSo4egimIw& z$S`6WBFe>J`10E-EBQQmY2gks4V}l9gL)uA-Uv-HVZYo}l~md!2@72wTdj@FR@!8D z#|C70_jK5ZsK`|Xi6==RHa!XuYDyJi9&^G4<#qSkubLV?daukL!lkZ zX2;g+aGjJ?Hys~?52@v`*s`S5Tw57oKl?Zw!0slX%$w~@XjpiXeWMk8WXqW#6Besd zFU#y21*@bUPLeMRm`JE41|L;d(W3)|J}IljO@m-JHYyZsdw3OY7jT7SB;8$&j9#kvz5eizP;h+n3(+xH$UTqq)Boc&98R&*{*yxwnRts z%jz-BDcZ#M9J3B106|Z9u@{id{bg0QkF0_ zDAQ17SD<@eKFkDqNYoLFbdL(LMFp;!Sjy3;o)g(|V>>fz@3!r9+1jf4t27204FEXR zTi8{OZ<<_PkEy4BSF;PM*(NXe@|KgGcKx2}@EWMi1e|3WzfWc!zI1H_UKD=7L2uLR zt49ysgN5tJ7?Aw07l^0$d+mRssZ(|t;0NLJN7T)0?aNY<+N`WT>VHqLf$pzOL&StX z+aRUU%f*K}11e(EZSIlp#Y1qNBa91IZ)vAW>)=wHct^z^2B5C0{(=#b^BPyLR)yiG{`xlNnoVFRK^F8OWDknc$7o-06YHvEZQ z8+nzz<~p{d0UJ7i2i2#kLLqNySzwqbd8PoKX2gi)vOd!kjNke|&M}grYD(^Gl}2 zXlwc|+eTdK1r20}Cp=ot-&-Y2k*SiOA7dtOSN80wG#pK3uWry?zBAz87yHGXp+?!N zia=si@;BKjH!~Uz-qi&o2Kyg2_~Ygmf2_OCIIqpGRO~AL$jcNq+w5A7zsjpVX+d$v zS3Nf{7rXBd#wMGKcAmoLVH_dtHN9NSwO-RBd*1Sp1{Q5P@??+}%uNvz z3)pJ`R_dx$rG$cEkZjCiB|)!GN##EVRWhbsJyg1w3Rr{GwWy2*y>Q8ilpgWpsN)ed_yoMvyJ3H z_xIlNr(l7GnNDWYMKmpYkIMC-)c3^PB3MhBl@Hv*@T`-0RihEIVPY`A(Wk6Ck!`vt zE>##REW-9yFECtTP?mTmn>u-|zt>ctL;Q3OWfU-5-DExPRQJBoN)%d=eyhz?d|hKN zPROluqRey{q1 zZi-q(VD*~{e3E$UW@t|~nY}m`*8EH(e_Il@j1-ncts;drL*MSW36R>0z&fh=VJ1(1 z8YF(G++qly{MNeARo*?u)&{Ns*aH=X4Qr?7gFF!P{-1R=)R501ikO~y2o1MjSn>rq zH9STeLcfiQne@4yxcR}1p(_hNkAm_f1Zn%vbPxF$^(nl3o5doHk)+s^sPYJ_RdZCF ztZ6lY38SVF1kMK46cY9c+?PN%yd%S05Ebp7>opZy?ZSKO5%k7d)}?0OCmJqJze}@~ zw*ZGMzOb~HeNjp?d`-`=A^MqBrt@&R^o(t-phq5nQ`sCAo@<2-Q?{aZa9FE8szNDY zE4GUqNB11)RCV&AdJ-UUkYoCh&wvnVAQ##fBapKuE6lOI2)uv7xM3}{}n&bbd}+i%&mn@z@}K6Z-u%-SLK3zc)|4Ce9C+neEr4JjyJ zmkmjlZ}jDYo3H18iflL|sYVu@lq}k7)81hN`2>2|1IJHh(@2LBG&uwNxMnbqc*=|T zv~9cqIh60~Un(5-HY_WHXx3qBg9EeMbp{9&Z%L2@AXRBc2J?{eD3UFWlee!jNet0} z%Z7qTZ)^Tz2`MJ9g=gjEJi>~p)_*We0AyyDYyl^l@Sg3Lbkjixvhh8Y`*#prK!4e< z(-Q}L{HDeAA+fwSiDsZK&Zo>#pvDhUI$J+Gky?VF(9(WW`+{EFqrCIpf}rUROFdIlsVVYk&u>(h+|M*mXh&OMRiCNclU0b&HDQ ziXoKdtv7D4L#f4L*Aj$O%*7bGfW09ULw(v=Qg)JDE==|>roiI|2avox0^8=e(*+aOQJyt4FoeJSonNk(+& z92?(VeE7xP7Slp&bbOJoV!;yB zy@9;0Y+CoObe7eg=}nStSKagyn!3uCp6ou>6LVL#YayrHYIjF#SJ#&CZmZ=uyrsrN zK8arMNVqq0iK=Gis$a1~eRJ?d(xk?@U72OtJ=0F_%0!rR_2stb^36_sA1uGtwe=rQ zMQ}hLNP_14x%YOHR^Yn-{Gt69N1s3|%)dDL{Ka^pB_?E5)BbX5*H?50IQ@0>(HHgq zn^$24t_$wH!d%DSjHj@in;DL`e#3RaPR8F(?ca>2Klk1hufku9r?-y^3_||!Du9e9 zrOkKhr&qiRAov<&Jgs;Y{%~qXfQ+Z#UWFC#bz{Ta`2WFpI&O=7{q5kryHT8K@S*wc zv~l`wvJW$4bHjZwl_wZ?&Sr+0AN{tpAjU}NJoqI9ME;{r#$N+F{^nKiv&49Vd((f- ztKg~OpZ8=}b=jU@ou5@t;H{?%wr$(u;d?48oE9H!eb4aXsnX)lZ)t?|CCG94IHp__ z^61EJdyurBLq-`+z|TN?f=B(H|7^K%0eNv82U!HKkHzNJ)4+w8Ukh*1P=r9UI1Un4 zK}Q80dSh#e>KZzuj@XN>h*NQEp&L^+SeFo`g#u@4=8aO0*_tXcvrJgD9a|!tDy*b8 zqus9VY)~+iG@T;m>s-QL)ZpWVOcg#g7Eqr-_9!sA9Db=)^F`x!AK_ol!i9<29{xrC zbZr?ylRGWs8+6}Mm)6%|6ap&P+r_`gGX+OHel*?1%-!MZeWDu3*Xt`c&fdsE!<_>@ zZ&97K5TR~(b^*0HUe`{RtUA;BX~&hJHt4rCdDMLb!!ExeIX7PWGo|=S+Xz*HL>IK5 z%p_ZC)b9-l^iJAL>Cucl=<6)T2pOCWT3>5BzZ@`Q(L#7zO?xlyxS!raU{pTp__BsW zMpZFuY>eW#PlmQ5ZGuHETyxOltFIM4fB!nx$8Ne`{-cg&Yg{-j=q%Kpb;&N}>*>8B zp`T7O+oH2YDR2IV_g;p@sw9(5zx?dwictkWoV!mQfAG`AZ^P$u=d*7X-`<>kCN}fk z-KXLF18O+;B#I=M<7c#1Sq_~l5NB4B3M^MawNBh9H{VUgi#tNcqVSuS zt^LJZc8cUkmDecRvRY8>sC&)vKd^z+>}pcjomPy}W0%Yo%Z{;f3n-OR z3HKF_sd*UTihb`aUO&F{NO3{W-*vr>aS>xWKDoFL-%h9h~MG8(KACw3UpjCgD2v?p;!|V=s%vyU1Zdfu_MJ7K9u9x zqXOUG1)s`2xaG574p=rl#KtIFxR1X%8{aI{y3h+gl~DFY? zV4#>T<`3Gsj`~bJrM0S|MpID5C@|ELx0vU>c0FM{%kXK?|DvtyzsMo?#AV(89_VJn z6A8I$8@=yFa80|2bdy6tGZKdoQhIG+Mn-Ri=_y@qwJ3Nae z(W_y$5CUP2UPWZ^N(L=eKej1Xg)yN7Tv!3!6Ni3!6Lw=_U6%7W}uyv{q#Kx`N4dlKR8;Fdhi`c`3>gxn5j zNcU>wd4pp$!D4ZtoGEzG>+?^qiBy@=>rNk9z`xE_f_S&M2%0+O^N-zDmwpwtO&5O8 zV3zj&lE{8bikzXOIkG1rQ}F6rXeD)G+3Fsx5=C&{!0I1B^wb+!aA1J#&X0@`-bSC= z?-*hZjqmb__Nz3R%KQP%eM2YC z?YSf2wpOC~C!uEVWOVIay_@&(u|t1mSrRm8$B-ot;jx^C!6C7d36|ySld4tZrwVUogFG5|Vf0EoO4};Hy_!ivQIpixsxT`q-E#>=N&qn2Zm4cs{6)*Hm8zQ*y{%xfnn~2 z1N;G~;pfYzMfVzvHQjP)#&9MqwxMMO#8jhhpEW9Fu?VX%o0TKyd&+&Oy+Nxmy z8$wIM!s#jZe@R!L6n$?n<^l%@bftbfn(kCQI~FwhS-JyS@98R z5wTj_Z@8D{DRo9hX@ZnWsf9?|j&POweBYP`x+g{Q$x4fyc14-$gaE#k(&vJ8h+2Jc z?(-#l3xdf{&aq>-Fr`@<(jpH1ig!N=+QUf=Ne!3W0_Vwm32IX)@sngQNW^~4=G_l- z0oovWRR|@;JOyNV4_mdhW3{1cCZFiIuTxxLJ)|ncC&n0-`c1i~!m#0z?hIdvAvpv@ zBwR8fdrT=?lV&&;4u`Uug-Rh)mga_YnNh>VE#xmD>6?onguVc!iR?BBabVmGpkWsq z;NWQ}eH+g*1{0KIt_fio4JLL}Ri_reWafD3uqc}^*=p}s#xPu;ci}*I_ETKE*&tc* z0f4xJL&N~y{g*@*UoW4`AbBHv(~<^NkLv6 z5!G&hn1CsqP##$^_ganryZGtYZU`i#z*bk+funBw4I*qvG*_sBX$!5M%#iq+FZ3mk z@Drt=oP(x=xo=!3zCHUy9-6-(?eV@fF4|^uv$j9MYWFx7(Ei4%uju&%1)7y^vV6^W zYo}cR-jfy~*q}XmH0TIV1+rHgqm5bTT2vAz^gqgp9h((Iu z2CnS@Gtz#NG~RIOrK(h&ppC&W{!zKU=p$qSF54{YtGq~cq^YjH8v{PlJ=Ty+eI1@? zG}JQc%rkg$Wyk3&U(d+RQlQ=@)xcJeK=47PW9^u}WGL5)2;XZ>9n;S`JW-;?)%#-B z_~iO%TdDNZ!bB<&j0%OWx@*nip^r7i7DYjzn2h_zwLT31=k)T)62As(1fNG4PQ@qhHs;^UJpc@(kuagZt_ETMFg(0)mG1zf`b_A zAvVCbR~(Mqu;5G)z%@AG1BBzvPten4d)~^b3^8mG$`(5gt>5uvokM`3r|Wt|kioe` ze6Y2p>IAT8?X2xb*j~4}EG)kBRQztb6VXoUv#Qi=y%E6*=uJ-Cb}Rn*pzO=R&2J(T z83l>9Q*w`6lQtJ5cH}Z$DKdPx@jqR9A$ou$_yeFoqtnk)8a+`YUIej**dXw2P8PCh ze`J$|3BKNT3I3O}l?#08FNJw>v$3x7!Y%@=HfL^!!uRc__-YlDR0Tc2FtI#jb={@@aC z^h4`yIV}pX_m;m;)^Xtu@0ZR`yc#fCq_7bZ^dSnRyTzS8+1iJ)U%RE_YE1lV)KLwo z!$-ziQfm%sB<@YgmYTX9DcZd8M?C1HjJ#Z_DChMtG=4Rf_2OMQZ}3j_t^w&d=B~MI&A3>#D`rwWr)Z0QJ)~tiA7M4abkR-Ty$IuRx z^@dT4+%u_u@gy6jG9*`Y5TgS}*~K$qD%AlIaNgtIMBMxmIrF1peverxgX}De*>r;j zS*sEw1HG>3&Pxf+6+w}fv?f?U9~A}wZPwubEQ}kKLt+T@PPITLmM)X9Jcdz8o0{vB zNF1&%RdR--ItXdrTQtGaW@rb|m}S?b5Exn9Nj$T;QdBZmmULZzw|<`@5XNVF zS@wjdD@`8N>%dz;^ODaU+)3tjOO+`&NU+oaW%UF?SbH5*&jJCd z3~NUdPiLa+oQPE&FGVwW#^Zr3lULtt4XfvJK#&Ba#SW>m(%BKohVGL?XqLp^K8|Hc zeWYZ*%c8GhC_ z@I|c-l7ghP^mxHjVG#DYFfi4vT<3(1NO9-@C_=)(m zDez!tYk<;C6pNhHaQd*fivKJV0UZSSCIuQvTwV&rx0rXlpclCOT--`V zAg+p5g+5>*H#Em*?*2FJE8 zZ%5P52B|g0Itu+*1~r3ww>nz_>0{Tv?wXdc6|Hbk{}!!XL1q#_XZb5tZfaMRj!tqs zKPj@?5rtIY+wcy~l3gf&LpWG(ZF#$?R-9zDMeZ^w#G~B; z4zz162S8ZSwx)%}o3K7}Vh~Nh6im(9@W$bY5uTZ5{Ka)?TZz2sAV4mO4bN-KtKtQU zl+fAyd=02ThZ%;YOEQRl2XnVf5uS3`QtGT4b^}a)O>{Mxm7o-*$3wdp+Aah(`OJ|X z67TaEXj7*&QNY@~6^Dry5@C$gL#HSkN$k0`IZEsTsHk&T^D(rIVik%fJ5`K^M*4we zmCe5l!?y}@OO!G)Y%HEwvSU$W0V4W+Xt3~6gW0?vUXVcDKbrKO-bwARZdW6ALOQSc zDNb-zgV{iii=3~nhxQdItHW8UFQ7{JkX8$oFl0jdxcr3sUJ15x!=6o%82HR}no~7< zR;e9wb85TU3tWz0D?129gKHy7kX!3V)7bhbgcG0br|K)GJqhSwhplBQqATu_ANt$P zu{9__%xs#ip~~XG=PI%SN+~IU$PtUG25yb+3QK7+=(lNT@xONFkzj)E%rS}%6bv44 z2hGkk3|}F;eGp@*JY^qIiKyqhh1dYBp5403ZWQg{RsZU znWwl85ve{Yi23Qs4)j%ru+M-6WL+y2y*~3?^UR*QW%6_AxBLr6TE#)eKUQm#km;}H zb|l$(JwIUj=+=X^6>VIFrxs`o={>I3L3{3b`G%aj(Lu!Q{W}}j%DP_SWSJ{h!Dnwj z3Qi{o1Tx-+X<Lh2>_D1Nbi+OKHl;2=t9CY7hk60_aRG8fL?_IbYrcV01xWxp*0RU?MuZIRuaH4j^ zx3vCKDdj))FFZP}2YS-v=TLv-2ygd~HA|`+2>vRi{AI&D=UzqCI#?F;4z_Z)W-PtP9l(h+vBj-45Ka~I4J_IXDLIxQSN!O@5KZ+C73 zh$g*M{ul;2T` z;yUjLNA4YVe(>wz)W*yq<2z$*>BniosppuhuU5Z_N1Z})EjL=99!bR7XpScRZ{`RY z#J~|(Z`v8-6V}o{$GM60?^;-HRBzcHI#Gw0&lPGK50u_fJ3Vy~F%8*yXu}QLCZh2u zi$lsMRw!Cp1}6kNCP}Ln(n=nG9eio=dFl}8LcWB@P?0^i);K*JbyJ|1VUw<~lhB5J z7Ab3g(G+7#vabFYzRMg{$p@)L2o z2oUpLLDW}-MgCYD`H6NLYxrqX<0&=f2I%VN&uK5eoRu;Wb}UF97RnXj%|-5cPTVRq z=|01>raf%B3r~KQ2Ln3}bn=LUH2!hu$r=5~HMYJxx`btowT_c-J+yNBMR)BG!F-?^ z*fHbL)BUKe1kC}BU8JYQ%sq>~hzSLOe4-79{%=uW+*#i=ZvD?CarM4DY}$p~#Zx?y63m6TjSl*!ei3+_+zHvmJjYNsFIMo^gYj_cRUym- z%h2AwSX7rOqb2gZd8%y5r;}rD-(|l`cK4R93ugXO8BEIg$#>f&ij@aFD(x?M@bMQn z2!n!ikG1WcscM+4oVnKc<^9Zco{08`>QcYCW_6_HH#LPjA2x0axhv>J&o=7a zJIlHEdGgBaee~$v=ZBY-rmycNFtK`7U837zl}M!4%NEhQZ4iWZ2U*g8euFo;Rq{&Y zrn9+R$S}Dbw_Rjg+e?qGc-Rqt>G=+|vyJ4gFEXK=UeV2sEEpo<7Eht1i`axT z6boEn!@@sc5tZIjP5Jq}E&Ma9i*B=&hKK{*3b8(lO~gf()E8t>sl=%l9%7L%+qYl& z=<#BoYtmxZ%i^Qh7X)wN<1XsrF!%FglP}LByTViikGzFUPOrY0T{5_A#f|WY?aP)q zEJ0M9{R`u^Xn4>%wsw949yiu5wtc zgegnxy#DCAJbL_q_)#!LG)Em}VP{Fru@Y%P^AC3su(T$#?8slva35wu2mbUqJ(NnC zxQZPPah;V$wgwKiLVddX8?XtqP4KW0l*6Xc^~3_$^2q7rr`CZE#z9HylfHWBtMTGK z1tZfsDD--9GI60gb-p0AG{){@n8cX^>_~qafGUZ_@F1qcV839{SWs~c@?EVAggF?X zILm5R5MslUBI~UbKbEI{al&SUl37d|K+^?TNAM}Sk~58me_DeH2UR)S%r>*9xtJpp zP{<>^*I$?yLK;v1wK66$Um`l6+iuFA!-OVKkeq>`B#$uU(K?($|l?sOdUT`!diq(!s3B)2PU|Nl7OWzol zfT~wf@^u`y*jNqFh~scTE}6Q2gyq**ibBbWC<|-YeiSb1qo5Rpl(kc^E>+ZsmgAMA zvb=v;5pRdFV8>QCCu0YE@0RUT{{o?N{0Y<1ZBwD~LTe;=MG#QpVf%p`%{#Rjv7EQi zS{!ESt)-H++-?ZGm|+;ZI|jGZtZ75fiXs*R?P)e}G(Rn>Xar`e0|uOx*%k-u&aSsa zQdYOsCEKkl4hcf4xGiLE%mCRXtS4&tumo_mR_Y_!m=)@G_dS7OF%$?C$dF4=Q31et z|B-*Nd1#;kafp;MLgXjM)cZqb_6|=I=GWVHSP@&XskUN#@ldyZoVvA^nrX|H(`fu9 zjWxj(kw4}i0 ztf}AS*tRX!LXC9ulz8b1+%@ZwN%R7Sy<5986F!j75~hf5z_@u<365Smny_12c z>pkV?7{q=-2)^ZrH8Js~lUu}rk;=PdshdN0CE0@KhSW8(dqUdC2Vss|s1BO4`er8vy;exzezXw7RuA43 z*_X*`5V7Ve-cj2%!n%#oVPWrzAEoWPg%=ISlALb`aFnph%~)RGXxR3f*&X%QEsX@3 z{iZXTpL@4_q$Fk2wUTV%ytAeRN!_ z#a;onGqd!K17J;Li5QT9ed|c$A_|xP`h1MwDHAVs9 zuUDm)7EOfhipReZsgf|UOpw|K0 z9AV+*T@o*(w9PMFUUg*He*+>^>A5egA(?V5vV3~0;W2fCWodh?5MWuN!z^Sc%Ok*6 zGI~MsS?mj}t!)DYEetR)4YC%Cv``h7I#D3EdT(d?rmPaeWxI!KJm)gpyGvwlMQdsv z#MwA5`)6X+ZRBoT%Q9L|RB%jU8%+kK34@APStUs_uZDs!!+p16`Z zLDw_sS}K@7Q6AT&=A~66?p>Qdl96_!?4g5?O33Ne;WV=BdPmP*br^vQ=(?l>=s@i= z%A>SKPj#U^e)mI zpWS$T96JKbceh$I;{ia291B&{f}NGS3IiAcC8POIiMqu|40A*z0W59~g`gNH2&6sQ zM#fMddCbNay;APAqZwkp34$%0vUJI`2DV&nfk1-brcS=j>#yFZ`uM(CO>-0;^$Nh+ z(%@=IBi9s2mRPV$`2G7BxI@qw=A*-mU!DDv#~gCvNcUTMin3(na6Z(D*>3uJlzm88 zA0PpjYcp#FZ@Dc0%d9k{$zVTQi!uV6jz*fiR?aaY8Y{2X1T-i7hT%mlZu`(j)Az;c z=MqYk-h&{PFvpZ=E}1xMmx`4Uz%!m$NgHO-WWo4wINvG}$6tHk=-rOTc{YAM?z`_0 zt5af7B|sF2=wo}qqs3LA&w6IfGFU9X`j#ux4b}jb#P%Zrw^w=5%?jiBNzq$T(*^AY z*nVwMS!KxWAlvKv>FEH4rr54YlbwXT(dur&84~xHdeW_+-TlYqbmG)teV zIlgru>;K~J&Eui`_y7NEHnSMl%-G2?_O%+ajHS%j$ug2cDoGM8Bg&F$1|f+dB5h-- zkZOvMwy~5YGPJ2wV@V`6woBzUO>zzvXZK!MI-6>+yU%@Ar$V z9v5T6Ay>7qW^a)FIVfIbFN?)*cjN+rY8Pw{KzcRW4)LA|zzL?pl$4TQ2B!jtsPBvE zX#XcLlw!+qEz$3{&2$NP12Nki8L0~Gbfa^vLX{yA@-#F<&-<2*$@|(F`1)O*kMEYM z5z~gOz9~B|;g%>Yy|Xnx?1Y%jT78i9WQ^}@!Rx_Mvzvgr%fD6kj- z9LuehC*vecoNtZ(wq2*4@@BScXbBG-CA-*+L^TCG3w%;)Aq6kl^tS4NuvbK-!>A(A z9Xb9amdLs}f9q=Wn{TS=yvfS7=xuH`DeyWn3QlAu$B~nOE(|*O^p9*+Xhj9)Uz;`1 zlZs5Fy&MdTvvwr`v8ph%q&T+4_8JX{z7W_q1|A(4CJBw6O=G9(56J|ZWG|uaKz*X# zSDs-akj5vU=B3vmcsW^1s)|+Xs5@;mJ#Drv=F86#TVxJsWb3VtQ%fF&?@|?|cqnWl zPQ~3XIT`t?LPm&5D0Hw%Ie&bwLO2OMb%&*u%2w~7NnKD!|B+Wmshw`G};{?xw&v>O#ham=deWdJUT$(WZ zYItl_<8%=Q&>S4d+FOHA8x^vM*EW5N?dyRxa}%|U33I*uf&3*_jWYErKjf-_0KNJ* zXzMlI2W{B_l=Y3`H))bEcl-TiUINsTY7pJoaNS{TaMk$b7vJXIeXh&m98#`Y z{2g)5w97^Ld|BYwssj}@?&vReHux8rHe3qKbsgyX%QsRzN-Bp zU@>ZSh1pWidTUvTWGF~gzUKoI`GB*j5%t#AsPJ>XOS)20wzMtUum>4S!6XQgMIz)! zdl66F)#Cj584~62AFggon zVA3n&7FE@6c`kq?#svCf{0;zc5p9NX(bFQ7kcwX~l9sZ-^nHe5Dfm$#00?knWX}da zBq+cu`0h;tAlI|xL#Lxp{1;XAfwDxYB0(^;R7VwJ%4j&qhYaV3_c5_78l-ClHth|5 zmLxSO2&*F#ocOT=nwZ8|#78p7C1dQhIS98!sIP<`0OtY_yjh4^@g95^nZW%*h-68P zlL=$o0I?_{iM2z5BsC$->0{#U%_6~bUh75p_xyx2af^S<=H4M4!vq|25@4p8h(%Aq zA_m|DN#a<*mUoyh6407Fll0q2*1Th0*YeG7gf{?_AcmDOS4+_I0u59?IY;%;j%gY! zKpS0XxPCc+#m{tt6k-(29xNdy4S}L8z#qL?Qp21aNUgWqfng&d>BI@%0^xgT*gJk9 z$zK_mz3Rj9tG$4hF46A-52L zLL!NY2%zOX*DQRar1B&spIz;)UQTS+re1x->UKPUdoL|+SE+yGJ)@n3@gWY@B)|U3 zy#lkT3=P3i?b=FUgz{qTqUARJB~vCP`WhJTpb_ zIFF8G<$KexM`;1FGzsL8pEnC*Q)HH7SaD)<&2Jkz2Ef<@NxhB|A_0o#6z;4`7!2N6 zE0Xq>-|$IbLLvs$J^^!LvC{&7St2-0A=nyTsQ;$?>GS$QZi2yiw4}&80SBqiw|G5Y zYqS=)L?kRvh$ImsByzy4Xv7$8pScLEqyfvGU?zhJ9OcW^ldz3U$!Q|wi9=qIfwjEA z*HQ3wOoX$%IfjXzFWtmLj2fEmxgDJ~Y^ z;)WUZ6VOM!35Mj7^Ig7;t5lVhz&e%wwtqtbVjwqw z$A3B1`l}n@FEwhd19N=NupOxJ_n5Jf2Y8e!K8Qv<*9B$re_W?^oc#-R{)!okSa}-z zD&QYCP9ABtxQlx4o{LzZW7b^`s+Xic|6QXt#VS6QtpBxf;^3k8P@}ZU^{Ynx zz`{9w+4_+MtN59W>5WHw7B)^)d-eWj8z+Bvoqne7edpHK>Cs0}je3EODH?ik*8A=P zX<>SNp+*fko&U>?lfTR4{ihTEpkt!%BoAiXym5c5sfwXWwkDckWM(J7Jn%`FOv9|@mzuW*j(~h}qId3JK9Z@?r za+2GKcz0^qq~p8OaU&J)&Ln<*eo7Z{Lnr-g%8C?Par%MbH6@uVQbz3l_HX?!Ml%bk z3K5R|m8vK(6zBf?(agN-0HQHR|L;Ere}$y}8qNIiG5G&ts#3cwH2u~=avCdDAnSS!!h~697_qODQ~p8rmTna(|KRdkSFBoOf$+9Dv91* zgUkxOJ@3$DXCse2q00?e=1jjj@5|2H(-T587pr{DPAd1D&c>NA+ZBw9jU9C(_vp`* z73~~j5KRN;6i0SclO5izb2KfpkL;XBjL?GvC*Q5xrujI02i-WL03+8P{sNU7_Dvc+ zxs9rO0UcjC76{iZ9NMPBq>hv`&s7>;hqnXk37e6+9s2%@!Q*__x(*kYo z)9ty1(aikk@lSUFw6TbfRM;+R#2hQ+MPFHh52{#XZG5Vd=w2m{CT{dIv0t>VDt|wU z9K5DkE%x4EGRBE>b?wsQL2@WPg)%QXH8cN>-MH*9_kn*;wLnHw9Npx%*l4~RCpPot zKeY8Wzh~E%d7ZC}f0dMl@UWDUYZuEhB-k?O|t%<25vmtwE*0>BK|r5WkD{F z=K^R~K3rME!gvI)ojT7y8_JK8%EO{16zB!q33aVXp~q+{$10qon>>d`rE6p#EH70< z1$aZ(lE&lZ$?V0a+D(2&RmaGd?UL|Whxh5tOvX$!Qc~pdn1{C=o{;I4tjTp3WU3t{arz z_Q2yQ@{@n%>eDo$OT9K)lX=)DXev*g^VVLzLVO?MAIg4bBH=4&-up(Pk5^ci)29qp zt{L&Q|CsVlXHk;dn)h8NXM+clDJP)bVUxT}C?|zUNiRAPD%%6u(=Qun9HVNKRu60G z7qO+>9m&Pt`rn~@O|px0%*ry4gxi_~2=-Ot%6*5mmIMUXY1|cklJ4VvvDa zo)v|6apY@b$Sj1)+O^BYR5*R#uu*7hC6SNYdve#?%C+y5*1X;@bNq8~gmq3niWZD# zwpOvp`Y^&e08X-}@lDS3pgcPpS4{s_MD*&0f;*_XN+U$J78y>A=4hi;n3-W5nCu%l z4W%}o#C%pXSVT+FHeeuWDvhM0<#gmu%B%Mq)?)5IIjMFA5E>7^L$DNERQu#LR%j+k zdjTmj7%ncxc0l^!Q5p$J&xp;mC#ILP5lEgyoX4ixf=*GYloc*cz|DlN^%W5?KSsbW5pn49>z_WqVk?pY`@OsvL5Dot4+RVn!qfpi*rA+70b^BR^Na|rtziJZV4F-t~39b)u*xC<2SJe@4z66ktS@ z#JgMF9ek0@H5;ddiGnXkkO%)1ykE>sN}}w?L+%Cum$3)`oaw8}mg)kcM$!(vZ#@7A z52$m1Sb(s72$-ppxFV+^NkNN*{QVOy#NbTJh9M(lX7&PR~R*3o_ zmM83>>|gKC_H=|9QZbJ#HoBz)l(vLr{>hg5Nhe*xp5?M8dc2Nf2(d&Ud~?E5#pF*c zOqIUbckQ$Vh8_j|NMcP%EE+B%%;ZafknE5V-V(b^g#qM|36{iAnYlzJ^^jlOe#82V zmwCy@6k%@ZbR=ihwL?HOiv?rGBr*4m@nJhLX?$0ScvnDSZ(5JuIs|*|>q7Nu+4PWf z;4aTkf}d38>X29mf*e3nfD#ELu>3<@+F=vFqi~M8!(2v{!jbB5k0P|2cH!dZTy>v4 zL7$lTNG95n2P{bk*2uH^IA8!zVuC4o#lmTjzx93|>bd+ce4khdG*bx49Mo-z%r0W; z`4s2}AmlNSWx)i7B4JfM{E43FYD{h{1F>!fHo@b(Py~hN|1g$wyiE>Eon4a!T@*(Z z527ZKQ%uPn6m)W3Kpjsafr+r>=WqCWe0=0!^;^hLxL_i0=7CfBM0E-}l#w1w$v>ID zdyN}gRZGLitpMT`>bR}fKLM`i=PprVmbx>) z5*uk}qU%87`J=vDiM0S>d@4itO10(kLIa+^DX+HJ1%Zu7#Ec~$V91#kVY*3 zr3pkt6!O_jiBX4!QE<+cdnK92%21oi+$u{oukD$kpe2cAE*%=gcV$*FaE}g{RY!@( zyAt^TqCz1G)<@i+e7>*-j+c%Cub#8aIA_ve;mvnDVU{HJU#wqLqHxY>sD^o^=0f*s zv%#%qPFfnt-!wD#2OgF!){Q8sjmXx0*!U#4_|aQR#xs zsmO@PE=kWxv0l9Y{Ypjp%B${9*Q}+dXOMXU%q9amS4sL@jf;6OA?I=BqgAfkhvlbb zO=*t}6SWD`w2~JgZhJ|qWQXPVDlKUZRO=6Q*7Ujf_HPv#3kbxsaL%6>bDw=^ZMf8>8lFC(|K|H=OjUx}7Pel+_hbM7Z$mPyuDogegBz@`F~nlab)FwgOS3};foe;l-J$j~`+Yx2L_4N7p3e zBC!6I+gAF{PiN9*w;R59ohbXtJ_UKhBOLXrC}Wkph<3+-s+y<&u^XAj69WMW=>x~z z@{wSD$`cXu9^dh)ML;*Qlh3Zuc6Y23HQ_g2U^Ww7@TbdA^N%uH6t{j*K`F<6m~17d z8Beuo7yQyz%x^tu2q6#yRqoy@wKl!0-%W!*u9|)ralL6lTXEZ<%>F^)H0^}^ZA}CB zX^(y%St{rSKGV8tZe|1b`tAb`TjzOPWwp}Q(y}`=qce?c@PObkn{B<6m6UgLueYj#=UKusJZuc9%68`I!LBe$!c88>YNK^&R#quc1+ zlW6hn^V}1w<=tyDYM?jiXyk?PC(@Y`S6->^`E+eRbgdzsv)bUW*QfK_#+0RbDqBoc zi>=N%yQn0Jxgr?x9Nk=Ve%-PQscOrIeAKtis8B<5`<|YYbOBG-qu1Z^B`lxCBYNPz zoza~;NgUJQ<&)$jK50xKct_W!R;m0Jn8{TgF@pP};Yw73Zo; z6jORTrgU60_A!2JBysf+V%hKXVK<-1Enf`11#gm=MM-Qr> z60LkPo&E>~_fFS(VIm^G?6i51`ngI=_GQLQlU`;ZqhCyAG|RwW5p!u*?? z9wr^|j|+}mR}MKX>7Vh7mdsNq&9p2v^9-ma32TCbu9ZRC7@J&= zQbZ(1$XMwW*E`IT zlNU*OGyRlzLx;wHs+_gi3TK?raD5$4s>@XV2xUvC=Vy#`xv_q1XaYjge}%oEXQ&*r zf1P~7k*P;Tg=dk-KYs=Pdyg+9blUHSKmYTzUwC|x!sZG#ZK8uJYPzRfb2R8u%Er#~ zX;({Hlcko1$00^`Nqt#;PSPwK_=u&3>0u&8MtgHgF~h-RX;_NmwX`OV2RIii`v8pHY0X1mj& zQ(bh9Xr>X)Cxy29N@IuN^d~cu1dP$1Lg5k0ef^=##Q|H6p*E~v({ z)>&<)VfBrnh5U6>Wz|FPjde`+pq;}!+a6y@7c{z~yyd@i}_&a&dwIhU-qq=YrV ztrA7|6t`AQ-dFj@)BfL;RGq|o|96tAKWQcszWBcaj~@milZ^f?;PK71S1MH!@1ZPy z-YjhH4&vKASF@XyIZ?clsFO(ekl3hEd+Oe7VD~c47gUX@K<7qHzbzd}E>W?*8I@hN z7_qqJr5zYb-0(n_W8$7kYp@#$!L;AEYrAUdLL83NvCp2IFW?p@l3^(JSrO8nucX0Z zhuF8EQu>(KNwTooA6QAJFJNj%VQ8rdTvPe%t>gPQg;>AMcC3>0?EuR1q; zJ4nvyTzyw+Q{&2{4+M?TpkkY!B(Y=BSC>LK%+WT5axL-a<^q%k|L5bi zq{?yvY3cS7Z9iT9LCjXouf!AIfT+A(gSm=G_91Kk-Kr9~BK1f`S1sf!hvV_r$lnuN zEga6Lm2Y3FjCj6PQRyqeG1Pa9CDE0!{xe&dFRkGMgf)_HmGI`^8_Zw%#W(SI*DhCud0q^B*73BqA*y7j)UeS|gju|Chux#Hi(T`j zi*8pgF?=->`A%5r*I0C!m+GWyA^qKoXS!xLQ|_37`pasva^AJVuBtH02hSfSe6Hq7 zT?%(u`J%Vzb4|JDrHHi;UOa91e4Tgn()O(@U;eUv)OTIl8T;Vn>u;ay_)?ddX)F5& zmA}-td0yUK@SuOh`pXT$(aSLlwvWIs4Sijg_trgl^)cbgO`+74IQE$T(@9y*+x4&R zz_(p}`Gj9S>g2Ys+4Sl4C>Gp^lV)2J_{OXw_Ri9=Pr{Guk`o?lmBUvI9qjo=!Z}*7 zkz=3YH|GXtB$723onRg{43cw=pi+<)l!HI&g)~AZp;85y~Kl~Q~DNx0q(6(@d5Jnl{DjS98yzZ z4t$s2q!_>jj-dyK*+KE`#KU}@&vnC?(&yr?Jw8)*5tK}GDl5QUtvZaC7?9pSAQ_T+ zjl4Z)EA_%f?MO^_;nMlA4zq;rL%P#weIUc<18VVZi*zpbRoF5+Wz8Jj_R^l&Z7VF} zdkezv*@8i#E1nYCn}cXbB7=1skAxw^)le7)lA$fvOlsSMlqZkGX2Jms@fzX|E!zSK zz-r~&;5e?^BS+SYki9gs79SSeY-wAcS_YzA>U6rWL1HQ#2^Yj~41892!0O?wBl9%4 z4q){mSxz=dKX%|^zP~yuOEW8>vqD`QpMJd0)WtHlvIT{2kV=-Y5{R3MKWSRMV(Xok z13=+Dv;)q&CKdLQTL_}<7pv5FOzd*U6u6ThSw_DnebL;U zV)tm0?g%Zv?AZG8RRIVcAS&s$u;AT#W-D15;FE_~?*f8@enR0qqijkemX=~&QjB&9 zr301F3ilEiCU}TX$}4Qje)osqIk^xviF6Da#M)=kehHps55XhJ$uFK~wKp9bQ0!sy zl~bbMoPYNgfn=bp9&d>6f)-`+>QhF~*-JDnQah#xq%7#9SuPMMHM~V9oUrftmHkV2 z+26#JCXMMepK{bL`Cqt~G>`hDqgig685cUF+De?Uc}geHM7Q5Ve%4{$!Ii1tI1z;U zPti1BqX-;jfm0kv(R*+TV1G>m5*TcvB4`9`UOey<7n~CMk|5zL4IHN=*`bm@ioi)UArAm5iG(vQgj$N6RXsRJ zB&;|@RG$MyJWwRSVHgM)mGy~(l4K!9h2Q|0P!t^13M6T|CXr~^86Jq3!=sqkX(7^B z7YbbWt2p8&Y2bSX!AB7mO(sn6z-cCSyJCui7=w%bjGrQduIULoNTC6&lrh!;-zdC@ znB=IK7M%`$VoAMi-LyrP;KY@uvky8-@3Xc>pj!cs=C?;Fb&MM>50H*X5us;3D9t#L@-)P!NfwZRxdA*Ug6S;GLeG!519cVX7U6%=Vd{r555$k~wKSml*I=QtRswQ2_b_*y zi-%uu+X4#Dh&jObLZFm(Vx;xdvJ@avc>HXSL@9vl5rBFepgDd=5(`uTQU?KC5(DAC zjYtxqN-4M&-iZmTqAc2pX{Ft*WN?~${KpBGpgt6V8LIdUH7Sq?3M+sKF;g5Viwvn) z{uWswQUqXvbPHRVJMtKr=~!I*Ui{JLC=pPq#|3lUu=DZo!zWH$X{X|>I7bcv>fc~` z0O2TSb1etN0H?KEV53Y)NNFSD`a$xW8{3FCze++18-4&1MLAXCOxjw+k$irN;CLQn zNrfwceKdSnfdc(Ao~mk=VVzYzj)C7)>6Q=86$G>K>W;0?jC z6=N4bVG;c~=Q&ilWN~o~{uxp5!a4kVBEi7FIBx(jGXz^`hh3NzCk)S?>`>AQEQxG) z=>qbMlndNnNF?f&s&{xNy2~P22oVzv4_1D6xMF2M8HxyV{gyQ)kbo0m<1`$=J+H?A zR77A;`HADB%($Y8m7hVVEGFXyVgwSCA|zM##I}aqC(48`;l+KtEI0-8iHX4Qxaw^1 z5)n8>N`&SZ-Cv6^yh<2AZ;jvI!^=F|Q?8t%@J$g!1q0ajypqEe>V{P{=>THw=0Ohl zF?j1;u|ncF5eDa9(VhXTPh8IXT1i`lNC*cku}2!*Kq#RHpoX02qS2zN3vNfS0=#xa zNjcSd5+LkqFDdfbgKLY0hEFgKY6_6NU0a6bprTorNg6JiCLzg_Af-!v$v_nrhFOA@xlSM8IP3X&|?fA!8p@K$K%P{cc> zo{_@i=hX&Z)K?_jrO2%p;1U3wV-Z-u*?yNO^{F15vVoN5(j)=KrM_Y&8a+Y0q9SBB zK?4dNnEw?CIhE!tSI2UbKk-(6$-hay0hUFE39azZcx9MopOfJ$M+$xW9h?+mJVtMV z+$LT;d{M)pSYeu?B?_TWD5U{LJaCsau{OAQg1cf#W<@%=x#1Z&$&wl%meEI$(_Czs zyMjHo$(4$cztD7c4u5})?e0OW3~rtzO8xwPeWREQikMkrOytm9Q)JMINa6kGEGd)p z>j!3FmdsH9Mwk&-8aKv-MB+bOrHwcp6`2apbPJV~I^ZT9&^$$!h448ft0zL{AMV&; z(qYiD(a8tt#E>OvgnHA8?KC@u&#$U0z-m2YNdphm8mSdk_CXyFR*_&%6qLH4f&Q6r z(U@o?xccxE>gFl?^JWh=o_w$}tP3NoZ8l!K<1_MthpZ9zfl%eaa3Ugrb6W4)b#)Q4 z<`%r?)I(>v&=2zjDlIg?$XQ2Mp*b{c=23*N38jUZGPZYXIqZNdY10bl9$6EGd$+c# zwS+jB^jbRiwy5-|Pwr`J@7-40yO{1s+CQn}08`dn-qUs%yzz``x_{&I(B-tnkBX;G zqcwU#`3HE<5Xq;#>Yo?ou`ukq7OCZ%-pxKH^aw^*J=r`9ODA4gt-9$xA|gPN1TpkK zbKCdN2&g7IFszGqo0;uV8^1!o;#1JOzfAo3C%UCuJ0R9bXh=o#=AnGE%)dn(EX-f{ zF#g{sGJijR32DMB{oJBb$WEq7DgTVGalQ7(p5e7~5bk4L$ZCw~pN%;0Qz|PIJ3m+d z1;za$*CFK~Ngi{`Aiv_t#f3e?2$@uxY0zI3R9g{y@HJtzmtI2J@Ksg5f*jf+q@640 z7m7QqSPGB*Rg4TcV>ma&#WtZX>YVYovHPv7l$-woihGkjlbE#5b)`_fd!ZOiL-eDtumeK@T(WC!w-7{3X?vwAo zP>h`OVevg5AC}WCDBsUKM^`&)b+QO`)G;h%yV{qxC-Z$wRre&%Pd}u`cIrCPeQlkH z`Y~%py!KAcmwFYa`OS7?;WsLjEhK+5jczPcjKMMo6;oW7z~z{D3sPr%MbK!KmNfii zhU~T|M=5cPhuMXnd%~TIX!$Hp?{t*+rDJ;nITh6FuxDsLEUs#!Ic6r(XKRSuyK7PF z-Cjv?4)B_2FE2m7RQNbhelZi2&FiTj_`lnT)2o8!FP0xf(EP=CvPE|7_Q~IgjCP3= zu|d$QyRA9vwdFNB5|f!uxBn5{;0!Hd-#tucLLG7q4kH`I(gYB;Wf((DVENW8*oWoz5?p1*^^esC0gmwB!GP zf=Vk!2fcH}ncJ&gAH0(H7_PIQ_;qmU^<{wajiNh#uZp~hd$Jh0adTqgw*wyU&Bi}G zgKt}8_4DdA3|`A;yzRvI&uPc-t`i2FYqE%fL7-V`%PurMjRwpSYCMTL5i+iCSG47k za)}0R-=EqYKVBgtuj2#X7GN`@O%&e{>5+%{gFC~IEsMIcAJM2zkcuB{*7&R)Xk(`7 z@wc%4f06XY!6$)pf1O6n7icu||AFH_N&Z`5j@&9Przg1W7*waN@-MyLm_(5(3UaGH z@0q2dU&YbebGTjAZJxtQs-e|r9C>xYE?b`{`>nV(w(2BMD}3cHE%In*@>=ie?|RPl4sQyz0GWO zeOp*cTyV&}ti_wF0_CbVcW7<#e# zUMogiq^h_iX&^a{B-{4=7#t1<@9a)zu=q(Mfd1|1;Qc~49HSpBj#)uT^ty%Wv|P#{ zf3&;x=7bHAxLA4a&i2M5t?q2TtS9vYS{>H8cqYiY7U0ni@@i!0vGBJo8`!~Pj!&6l z6`7+^rm8bA@^8g5Z_NS=uty;>fKlSMC~?alishlMVyvcR=4#}Y8?fCeh~I*nnK@Wa zUXZ;#(c`JD^T??b>y^1-QWyHX&L3*2&xGd6h)Yr)mN!o+feD7GArWF8n z&8)qH;B8?fp%A0vJF_L{5Nm9YM<8Ns<}cJfNg*rCW}v0CeNA*rHE1>4XV2O@!LQD; z>G6NkX*=4MoJB<9HM5Cem6({V1p`VVDhD3(_`iXG1q*KHiInOr+h{T~v$Uk?Q3u>I zT_(HcJ?84!Gz`l+JLAGR-w#)#9Sa2Lp8BD3YY_v^GJKTbkiA&T>gq%5UQPRAruP6O zhf(p;A)@s7(~>#Ud^lk6DUqXUYbne0u*M=;ZHxV!0CyWbZD-hDd^7^Gi;f^*_(u=D$AhQ4vEwS6r|f1Y65;UJP6r9V1^>wDB*%w>wq1GleAYwT z@BM{a)Va|Q5K1z%4%+}(fxE3GQ6y;%0)h9+#~)1rUtb_`-L1t$)ovNdP?@Zwb#0|r zVs@e>V(%`41;xV)tSETfzLaFoWf0#KraxNa(7o73i;2DO=fh~Ro=|y|X|$d zNBP?^5gLKZo)2VwVIxOjERd*b(ZZGrp&m*wsu%bY8R{Q&GInBkxQ}AUk~33i?CD)k zhvS6Qv$8aqaV9MeN5jVVlL1tCt3Jfki<4yo<}9>k1d0#eFI26?0Dy81rI#II)hc)% zQ__?98G}T-&;a-u;x)42Fnv8hX0M>wQ$vPQaC8?VIA3n)Try$_k_^WyvXCFDc32tb z(T02Qjz1*Ruh9X`QFeOSb%(F&hH$5^IDtxL>R%6AdC9v!-W!sRj!Rlp@CYv> zv2;N%lY`+|xW0$bEutdZkD?SM3yF=2*f%DLbK>?r_@sU4t0-IfC+PqU-=?~CrnC5jWG(1;)Z4E#*J;ls! zLG3fw2OGFoQcnTA^m|5L-(PTU0c$L%KWI|-W!Rb?Z1cj$#T!BfuQ)w2-K+N> zxa7a2B&hDO^9`#CnD0(k4malM4Jgia$!n6qV;!|#Kyj|kZo~y`Dlr5xYGxV#-2!6!UVf#kmzs52dlP4v49>#4t&C zp^LjN!(W$&6bPW#RNZoVauea==Cx%^K`YJXTJ-q&{%>#6P&rb)uRT=;h6me@O&{7J zXy-e19C>Dnp*GBpZ}$uM?johMcUZ&vAT&7KP;qo>VSndfulcY2oo9!A7xs59-R%CK z?C<=iYeIk7-#NZ*e*`(PmQKv7Ob ziC15ROGt6V`Xv>Y#s1Au6+6zQO8v5+xALAGC&#*egJL$-pfQj~v z@h!`#jQ`HCM#V6aSfRH3V%i?$>KJ&fA{Z-BQD1(tVked;yW|G_&dZ%K{Y^&(rO?Z- zGRm=&D__@qwr(4zZCju6fi~e^wvC@4??ZHgW+CIO5s1PTjZ)WM?R;#>_P?(Q{TqWr zqxSI&V;I-7cj-H*qHDwu+^L4hIr+~GR`irHU-<3YAFl6wGDDZ>{OUe#_eMfIN`xmY zim5x4{q)GdP2>|9pK{sGJXjwNb;j?KK|G#I7OxJP?jvAB=X9_GHXJps(DCGrX#WkX z#|)p_3mO6GmDx0V%nXdbXzz~(TD5oi9F6qdy^c0Hx--7IXzA>CyG!B25Tobe#oA=j zmHowckWz>e*y-e*jO)lQ$XV7E{cvr|biaLj1KMIK1Ez9HwT z0-5ug_Vii$!SEz<9{-If!JhO=6-h*}yT1K|5il=ylE)-Z4*!h4m^8ZMz|(yK26?Ln z3_q;cVv6~KE4iJVVzl}_oJ+&$bHak0_DB>X2Y-^-IaQ3c_`x+ev5r}u99^iso{CW# zWE0bjg@_?TI4Di4G)SQsDLrT-ZKo1VG5V`pX>A|U*~MXg!|9Th3O@#QkApbJeaXQg zPz!(Y{3)*ZMc?{GZOUDP_l=Yt*A6vcKg*U~!S{0$pxlqRtfm7GGcmrSq!3*rXX3lu zP!g|%p%7$pa}<5a?FhmBK7|szF_E6sgWcbf>$I}i5E1Ygx4*KW_($kb&)4%|F&X*~ z&2p+wje1Few8vWo_V4BoF0iBs`~Bc(%%ApVE?avu6}*bL+&S6p7pxjF#Yc7 z|F7UfHZPE)+!Xl;eHT!rQs2D~`*_qNcaz>|N!|g+#!?jZMY-*^S`CO}J3W%b(=8^S4z>Bi$RXpW!9he~;MPWG7SbKy>eI81J(G_zKH02W7DX z+tb3SZt|Y~g@5B|yYRb-0ObSVG4yXYgUs=@LqsfVIn5*x~Mg5LI+Z ztMh(gwk)x9Ib=$c}+t~S2P2wMss)RYs+btL6olc1j>!+mKf#9F9AN!66>In0;I zJnWIvb3{6w--u3kZ8NRRrByld$sg!gI5Su(lE&8=$;pa`^dC1nQ*E=zEFU3)h$J^# zkK{D3bY#Jk>5V zzEuauIvS_K2DrRdRSd*>j2S=-v(oAwlL(F&O*IG!qtFXGvS>jAlkP(DJ;6ZUxTvcX zJZfQ?kfgI}M(rrTSN%|o%SyQ)j7? zgwAGf^%_Cys*)orRG8C*2?lO_UC!%AxT{Vb7zd%p2Z=5USrkGDqY6N}@)cf_vY9Ck zYT>U8?4P|qTMKVe<6&r|7L&{yUQ?G*aj9+y(UFAyM%e0nBk+tUTa)jgLSzC4sgp22 zalZo*m}?4n)ZyEqo1QOAgm2!Jb2PDE6DX=jYr>h&fXqR)Un&rv6l8ysT62Ah&C`KP z7r`NgBQdnyrBT@3{M@=@vl9mdim>I}&1#~#ds6z=1Ug0tPAfXWc2Y9qIvrG#SbV#k z0(>0Dm!P>1ZZT|<&5VKe!1T94A|o(_yY{x^Hx1Qn;u>}$SHkowZ>J{jMY{a8mct#P zk=F---9Lt~&bBb{e&0I0g6m;11Bdpaa&}M59X*ujl_6QM*|o>9l|+S^M{Xa2{SfH- z^U!SE=WBZ?;r5a(b?Y{zch^Gx@WWRaZZp^oL@hjs0ZsmL%o%>hUB^0ou zI`S_*{VT}t-1?hm*8g^04rO?b`~Bg%EEsgZPXCTm^H?}bS6Ttie>qFnm2nj#f){AO zuRFh9;$ZR*?)~k$bbjV}I9Tbo>+(+d$2U0kC8ECk@?U4^M#4Vc_uZle*X0|VU2&6d zAsX;muvT*UyMqy0(sz}b3q)$ z5)raC@D`)o4Gc){og&+GJ@~J#%YlD-bFqvPHKJAUjzgK|FQ{5{(_YL_Hf^w# zjzp57Wg*a!1;AWzHN$Z6<2D#-r8RLl-@izKu28@jbVT~RH$jn$V(2Gm39Q|%dFoWA zgH}-|oqjy#snRPBS5W#8GFx2bW5;eeJhYkW-1L?*89M#YMBR~d&*oE^ueXusi)~}) zZok^?27u)b9&q)tNtP%3iRWlmArKAt_p3uv;_KsiL+|x3RH#ui7qT7@ziW;$c zWMGHI^Wfy87qi;5K$F2+r8|zwc7C0>3w*i9My&6IibyasC~Z)$TILQ@cRUPeeuA*s zH^u2Po-m-FhE z=_b`17CD{M*Si$sA9sJ+%WObpnpR#=ek2l^Mf(OC?>JBl+K!nW^CH3b6Cc{FC*PiV zv3;!^4D_0>zD%1^8s_5y`h(wc)NRFAliF@vCa2upp|G>w*Egx1Fkhkbkt`eGcJ0a>{fC-5EKk~p6N>!lb%T~GOZk-e^_?X5peDCAsa#XJRq?(F4<>0a zpzjVc9gR9hv~DbCYAk#4dUkJ$5zZDyv@~3oU7z1*aY;kDD=t+{Uc^R?!q~0h2oy$e z>z$J~i@H5K^XKFty&3O#IP4xpTw;WH06+=zar4OSwS#sFUpcbyEpw>lu5^|~*MQ>P@-!`ZTC?~SC5@%h z&5?)t=zWljtZgGbeFhm9LkHd;=ARrKE(=OW>^7pM5F*bXb{2A3Zu7tiiKwJpxlq>t zZi?Wu`ZLt1XPl ziy*oUS>nPi=lvRLJD7Z|3d+FkdJK?}ioxBFhd7(JqwQXiq?Aw@Dj)Up5>n~j@hYX!b%U-huJ5AIsY;=)>(g76YiN|a*(@8y>&~Rp&-1)QSx3V1O2hLXqVF(| z#J6mg^{&@+_p3Z}(2*PXED3ELeHC=9Nmg8A&9*t9@nAa_q2XZNq~qF?5r>DbMr|9^ zABmE5FOy0v2zSZSuLd_;@#Q;0o?;(A%39e!qyCM*#asxv$%TS>BC zrB_Y0<$4A9HQie>d7o=44k_LZ{0<;bnb(SbcuLZvvh~QC>e0cQ6zU%9%`#Xji^ib^ zTan~$ZT=~5!H5m9u_A8jGQcbM2Mi6@OMxv|b5|M+lA*t3)~S+Q`L7|r{o%$TWZizW zPRukR4aH({zS9CEiNNG@${p&NF3yM$Gs_Awr>XcW3}5XfK%C+U;Dv2AEjT8;TD3F- zu;}eMWL`5QzeguMbW9^%Yq&)1|KjewqnggwegAJ72`vRePeSM*2tg^4A_*<12*C;p z3W6F1H8d%TrqD|Wih_z7ipnS|C?b~8P!vW{gCb%X4Mjx|2vt-h_lwT#J+tpQbNBh} zbI!fz_ji`Ar7J6+r@WunOJnh7xL{OZPvEvS9@BOuL}%E0{v;3Fln0#K*$oeDGup9I ziQzOX0=xI^#OJJW+YQHh-J8-=?3b$TyettM+W90>M$70D&^YUJ=Rv*_JRv8*m<3pu z_%mM??!;c$tx`@5dsk2{fnay$IW+@a}rjxRe{J6E1<*dd-sHS4l(jZKEOQtIC9cL`H^NoXzc zBcIfL>X%}c*vL|JKcdkww8l)b-V*ie$9?){G4{!4Xk%KpIs6aVSJkMShK{xdTa3gn zaiPZh>zY6^^Y`jbUcD=a4Ott$GMZgKb(b-F{`EKOL}RRf(l2k$j~z}>?y=5awBe6C zHY~sfqM(1dg$IiS)kUHIXn9?!IV1XWkzljE?GIOwzir`@v5+TcsLI$jKqF#dl;Xnb zh@dOy)>b$Z(}}GyAD>+&EV7z@FFm=k7XRU!v$x~}R0*n8m3IHC&EqR!r@jzaBFeU_=c<33{>RC zpM0f6E*0+m{^6~k_muq{>#Tj_bPhG?!y6D!wEx}3tFL|<2M>r)=1k3RCjAFPmDJWW zx!t>_)v5H;xA9QE_Lj@v_A9=$rFUZgO{8`<6ki%Hmo6GuoRKIhFebfq)r*4>88u=(V!XQ6t@skA=b)EzGhFZ3Rx zjpHXmxPioT+^HI2R`9YrU%SKIWT&ZT9u{>rG)9?U z;T@G6Yq;@rZ&M|!-N9oXR2U2A$0E7toM&_diX$^82s=8y-S{(D^LKkf2uE-nPRP5zprroW%_!M?kr zPom+Fe=F#Tn}HR?1n!kGe%-?2GFHAf1G^#NL9<$6@P=yf=Qcsq-8~l<%`TgQ`)fCQ zpG-V9d(@SAFd2w3^;-tl41A@8nE$DScVIJ?yA(6q#HGQ*K>svhcfSEkmn92h8wglE zS>~rJK^=8nf6tEtuKzJQYReTGQ+FHU^rr6VA#~k~8*0Q)nI6?G4wfI8Vh4@+46O^~ zd>2b5Wk(G$bH!AbCjkpYAtWvBB?U^Rn?xEjkj?TqsqI#-3{J#`^l)@#oL;G8FrEo6_1IV z8@ef|cFot&%!gB&$0bM7dLwdW2((4SjYtnV}`0R(3G|N_7{Kue7YH)pWE4kAC)E^M<+Sv#^CuDmz&zRicK@WeLMBj3F-c&x0X z*G1h9Oo3pLWL7;!PigYOzeh?xJ zUwlF@$8(?{Ik{$^Eha|9Cy4-ntah!HU97SL9q8fQ3CRyS+ho=s$e?!O@*QqgY2@x@ zI2?`_Vz3;%jGW;#5SCN4D2zCWWw>DJR?H5M@3t0HwL(%=Y05BwfL`hr&^H}agKC4* ztN;ux;-dmz1b5kKi(*Kn`p4};%)h0}IRRDS>yvKk#}mRhElD{$D{Q8BV?`@&pLdl{>dz zPO-NFJldm`krca~ayHbZZYtlbl7D!J$#Cw(LJCaDP?JeL$c&=oi^+v8Efy^Kx=*=V z;VMzdnJu`sF9PrSRW*uKh!Z@3!=7n`gz;OhzbI8auqFJItlu3<6+Z`=sWn^d=dEZ@ z2NpE*RTO)oKp1x}TaFJ!%e1|WbVmj2@nZJfRbEcBT;N!?gI+hy(`T!_N=y- zQ(60qV>}H!q8{rM)lJ&%B|=5Um#h^#qfq_tVI`AQ8$N%Te|rE&D|ic#!!;pkQ_7HJ zP*6i)^6y?>b$g%X(aoz(QHl6&{oUUfOXx2VNZ}gw(@!tL)`(twjwVAdoU)X5wg=tZ z!UqyEL5nrL$D7(kZdl%eBye6TFv^KtD5Qg+<^rp^SErf9SeSio`Q;1Y_}kx*K@SEF zVVGjY(XONWtWcPH8&E$NG5wcly;$}6yy}i&jsEDHn=YN-Lc29$=mS@9e7WC>T=O-A zyB`9Pz5|tlcM(_`*HJkeAd7UmXuwk9v9!aQ)CYMXL)`>gx?UW>lU1~r>R|wiEwB{+-7=x76djQ@U`~;LC zgcHd)K9ZY3ZBKaBkec~wMDfdXmsuq`svNsr3;#L) zOI^7#nuvhTw&x~=@(KF{jH8xT)hHJ_=_;#LV6x`&bt7Gd_8pJ~zECN`h!`tr!torCp3QuGtem5V zU}N*wKfC`@dN`xrp96S;pT-tD9#)?M*|<%_G3JNfY8ON_a2yYncsOhB;l-1AWy{tQ zi!vbj5sSmf3@qbmeuF5@X%jxAG7KnuII$bpyAM_hsJy8W#FvIw;IzH%fZZ9pBgVBt z7}1AnY+LER{qSZE^5qC~c|*&z_OBQAJY~?|*jMxtN2`y`2dU%tTEexP8|lsfcTjn2 z%HZJGJtcu{Yut>m%VqyalJNvrqe==NpB#1z#?))14o*g2c0D}5|KO95`tx4X+LI`n zLNd8A`ua)N7s~Ejb?Lq-Q+{pQURT6>>k5{CeM5`OnpCS*>=bJKmutw0*lvt(X?ce8 z@xe!fU!L{OuC*6G8aaubNypv)TtEsLxI#Fd(QtnQ;j*pH3d*l{t-H$>x78^~ntjxb zIon#WZ=nmddo&>76x8T~?Xh9%Wey~m&AD>w6E)8AUXJbM#kVq;nEKdBCgv#vXCxNz)7Dmye@`yG3L{Eslh*J8@dib%Lhz|^eMePZg|E!U`ltn;3Rg1X1 zH$4D0_}Ji4nI7e`rRH3W@egVmD%f`V{vX>;P!Rol+qt2S{O9m@_=Yy}@3ZM&E^mFL zg4_GI+4S~`pMKx--jg-C9e^!a@GV1d_Q#%gF!~%#2}(podHx80JH*Q>$m_0s-}A1G zxWRu^cX#j)dtTdBG&u28yzjM3Z|^Tq4t;HP1@-wxB=oH~wPX@)sX5K?d|mXv06biOJPp{O44|oY~YGB<}qE z)B*G|T5eJNyHcsJ%sI{A*zZcEBNwKhS^b`BaOgdM=?ArAep}_vAE>Ece`LrUO?kJz zqo)2L)vydabpXHZ-#0PWz#$9Bfc~+G`KNl9gO(c~&Ye2UZDPPw!{on=nj&u*@*S0? z-Zr6ir&ZhYzV|Nw_9o^#eBvK^7u%=sKB1@Pw!Q+i6K8J7@=K5tqP@4Ta76%_xpvhS z#g&W8jU!GS4(;jiD~=dg#i3ah{`Agoyb5GMv+r80(L%Ocyg#n#MG+oQP|SLNQtb*| zc#@;J4p*9Ps-#wy?J&?^F7%%Fp~Bqup82WbKP#0Msx1^2R?NXCs_F&?{@dZd|ID{N z*k-wDWX7eyji;@;?uDi>VZqjeUQZr4ZftH~IInDOK*DnyADJU0!Iwj*O;yZCs1&iC zAMM;@e@jFOJ8lCtRy676XH$EHt}HuJ(78_USIM&g|B%V2J6&C|5AmL4N9cSbJmgn} z2XyaWHSfrY4bc79erCcqo?d?CY1Fy+T-shaf1!B`cKp5xl}ejJ`i*?F)5(;I?VsVQ zxR0DI@xyV?px<)8(DA6z`;?YfyjCQhB}jVYR&+idE+E8u!u9dw$?mLkX@2fh#>#x> z(Jl|cdjkUw4SqwN;_K><6~8}H&YH#`@xCK8$X;GP@EOIGr@D;Q8u`1nOx&bF;zkJj z;Jxcf4YpHzAud2UQ?Do6QLDt>hy*^ol#ksgWeo^U&E3@4R8gC z9Ui1iGEMVwKz?#tlV+ZCH?A*Q1rA8nCbd&xg^c{XF49JG!1i4` zdLPz5E9ROG;;2FXvS35v6xI_0ZiB2#I0RWGT}y$Rr>x|TB~(Jl3N?bbzz2}XOolTY zlARpd1W}HX*r&z947DxAmezm`JNNY{fldcVLIagq-~Oei-=w5m}A2 zU02}gdpIrYXkUc(+xAoH73IW2mZ!r&y8_f@!JfTGH#kLomSd1YF6lN_toXt}fQf*S z0tjB_yTJC5+D#log}YcA2(39~8coG56eSSosNx}le`9#>prexr2Am?h_kr+d%_>os z3eNB4L0&25$5na|y1S0~Ec=W(7d8wZsD7*X3lp=RDusKQLy?ViCpfc_#1HMK33V_r zcJ5gJ%$HiPOWWA2!t%-bG}vNc2P(pjH15*^^9k@fB)9`f*2OR1kPM}F8EGl9-#byk z^Hi6HZTMD}*24*m5@>k(`tdz<)}m8Y#rQOYbH{z#X-1M=IiLuTpzESBbU><1LYxcS zqu`rdMc4ccsytr|thplns55TPbH)X7*2}xXwToCSPTOS|cx5B0Tl?;Ob6R@13XC+O zeV4MG*3mv21*xcKP zmZ#NPm1Zh_3A@>gLP(ljytx2@z$Ogn(} zw8T8hJKK%Z=#@h%d+kaXE`n}Bur|5ql1S#Z6OwO@m8u0_>3i{7@35ZlYO-B7O~J1k zN^BWZkd}*8Pq3i_*Q(E$$+jKXC5$+C=fI`t8x1%=a$U!A`(FghkDn-E!j!7S+s#5_ z=!t{_7*9@##i`NNf!Vzki!Ly$*9^b^^v;T^WM`c(eextD+VQE&ET7HZLRf_ociNpx zvRZOZvHaFL2BQ&Q~}Fbo_eB+`(<_)TYaA#duJE4PJ4SzoKx5z;rnxs zEEZ4!AKB%A8#o|jgJ>)S!qCbmw7XG7ejQ)(Skmp5JfP5Mnb=ZxmuE&uw3>d{Tf=22 z91cchhB~E@a#VD+!=%>>3T^s}$A5HVdiEbm-T(dZ zpNnCbK0qw6dGS3!J~y}=`UP_OVX4o-yDn$vN?p2g;6&EAKD09MvRW=p$oU?_2nme) z!I=zEuJOJX27RkIaj$OpAzOawy8y!bw7hegcyj?)yd_=nmJt{r_h$->`16-B&w`xE zpbaLKj&I+!%{~6aCzqa{JnwVPx>6u5IDanuzNt@oDDH3}a^I1kbuZExvP9*zKi6*= z?LJpyu+i_Rdz*&itVX4(Bo$j50;hwi*xHBa!-&-1jDSIBU<%Gr7sE0|55cF`; zsb92u=cl5wKOo-sDiLD5+D}r2Pae9BvMx!SOfM|ybAh$5>^^!G>)Gbck-QCG?b;cB zpH_&DFwf@Az=~*R3%_h~?y`^{=}O&vXI!!yX`$6y7E404xb~WRl-Dzmj*gCW7(%(6gxR)Mdzu1TH#_pY} z`8eh;xMeloX7?;=`~men$eDEc^l^?eIe$XpskLLG-N)?w#A9E_p@}C<#{9{SRcm)l zJ`MbfNaNH?cFX)vui~EV`1EVy-^o)Pr1js(Qx~RtavlFA^3>N6G16v6)~vN_X0+Ap z;>_5O4U2y2=)mlWF0+q15&FGI`{4VNceZS{xivMkeAkoT>Y1D8u6z#6vzc8d+YjI8 zH5r?FYu>lo$NR)j^_BuB4c*3E(_s4bCnu~AyXAR?Zo-xV@#bYTuJ5k>-(x)fYs!C#j4}|EvLgJ8nnPhyT!Bv#lP2Nt~_5fJvE{`+U zgddIEr`o+1svc7VABzm0nYSzeTkjd>RkO^$mW&XD=4|y0N24nZp%#>E7NJ+mOT_tH zh`JLQP_O{yo8iJ1d2`L%_k_3u=2P97(8MfmkY{Gm3yH!!Z;|qCY@^#*bE3&u^?tny zEvx#4g>jyyFce!nO}GR=$s)mHk1)8BOb5*r0SGn+Z$#rjJwvl!jT)PucLz%Fcb0V} zw%7=2(vd4#3YA41({!9_QbhM5L>$}8olYwAKE z!fqnY{tdyr6e)C)D&$F;2_@morL3|d=p2o{`dChY6wZ-&8nJ1T=p8s5r`RLT_nN|j zIQkjSnHGWq4jqkx;v&oe@@u++C$&rgKTU&g2YL2FI^ddX{BzVDENZIn#5Ns;#dbA- zv2}&cI4#3PLT?${D+106GpLX0nDu=U#lb-2q8@H0lE?e*I&77Ro6C$y161hReYkv( zrLK)4q#C`lRiQI`P$=q|W9E>2xZFDwJxVntAs6H}-0Rie{-DwMbwk%0AMDiw8X9ZU zyU~eN9xEO(KeA5(4VcPt;-CmgRugrG-}E5#i;(6=mkPsFv~G;B5D)-|iAyyibqCTt z+PekpRv50F(}%gZX8z?+k9|tECg`K>3IINglt&X^kub88;!1(?OeFeu^)~B)+s^3U zb&0q>x)Pi~C!I%$34QGd0G!wDm_|CifM60Tnuy28%2uz*AtI#?%4b&{gmqFdbUYZK ztmgyp=v;Lm6hBr^hX@J;AbS>f)%>2KCXl^5@fXNS^98^fiVEex{bhcM8Kg#S$ham% z4r!8IBFm#6?k|zTQM5}&C`v|BfA=pV(*R&yAXwl3DKxoZ%uo7kV@-3pav1*2#PD0@ z3ZWsXNES|?+PVZ@)!mM2^`>IEo0IUR&OGuR^T)V6Xl-|U$A?C~-YA+uEyImTJiq!$ z(97c(=bgNP{A=UAWWDC=wTmI~FA6^F-y4Q6Co8y|U;-;zn=}hKwFpJYTl-(d#2z4x zyfgG8AA~5$0>|fk3M}x7Z=~xYjUdb2>WhWU4XriVyaYRUpvi|}q}#5P5W&|UZ(F;) zP_4D0TD+m>@;S|%YQgTgC{61GFR54%{|G`7-t~5_#-XF^jGWr#oR)=|?Sd`Zp{nGo zmjR!w!Kf~RUV`W$vN(iO<|%qD_W^G1nHjaNSiKyfef{_zBCxb=&J~Vy5n{cWU`{fx z!>CGAhim2HK^$cvg^#LMBm#!HXjXbmDsk7?jEnB>yO&9)hd3JIavWS+!NrFp2)65$ zKypTzYHSa_os-p+b{g;URi5F;sp`Va7(hu9xjZ2&V=#2HUY~6m6JrFVhjC%onSB79 zHe^%Oq7}SEk&Xp$IZNIiA8H*}XLEk-^5FpH?Ys4x%8#QHOXG2neD#SLVj&rDTBpi! z7MG)NiH)ji%puTB4#fe0$qO8$>6N$)RS%K7_=J7tRjVOo%uDd6w|eWAJ%Y5eyyu-? z@X74e#4tqLn|k;IXAQ6+Y@U~TAz%ZiL)b-{BUsGTdlL~BBWbZ}T8k z#+il1vzeLL?R=u(t&j@sT8aW=Oj=q`#yw~f6D3-v7HY|V`i zwH10m(o;5$nI!#G#Q)4#R0~SnQi5$=^RqyN;St}|bXdFD>58TK74*$PPGjQ`_vcuR z2&}@S<(X?o`(ln_yN^;J$7-@@CoR^VT}g|vzdsO@*L*c=h~w_JovaagyTsxD#aKlF>IBTW><5@o(+L{kk1U z{?ixV{c_-J(I&n1J4WSypDivbp#Sx3(f-ZV3*q;FTujw(_2^*RffyBVlCJOf18n=B zJzJFWvEFkj??3tF{?pnE5Tg>j8}!SKO?C?1!+BZjc0p6vl7Q_yQBMNk#frhl%nf<` zlIG>MWllpXa11?R_KDevQ}aaVv5 z^m*?VX%*La=6I>mPkCpyWN+Wv*DwE2R(|nuU1LW07AXg&mbY&3qFBN5V|BA;?Z9$MW9oZ#LWicyycXn+xOLO#Ek7NIo^wCx-!rZedDi@d#}*p2X^s4enp}cqR*hv5qnV;VkKI;N))qk zKVs26)Us&|I*XgG*}X+JN*o9yG`;b1dK;nv17q1DDA8R-l|pFh`FY-fuKTp>awv!Ny+lM;!iV=L;*$k+hbH$}E;Vd7?8BAGKM zmjcXzae@UrROw)`oMJL>brOEfb+yBi*gi#tQbqr7O__NT551ilTr1RX>2VrlL4Rp( z5CH#j`jos9VVz1E(9I~(Rz)G1oF21V-vIKL= zJV-s{)pL=Jg0sD-l!*;+QZ_m<0(7Yr!wj1Z1au}(Wt&wdSn@tTaKVGz*9a0R z9i`YqEY;45e7#H<=|eHY8VnyStMedj6wy|{X$NcgCSrRaVDhdzH9Z@qXEAx0y3|V; zFKapA=o9|wajk%PJe2qVL_+i#pnz9WVjeisJ!*>pKs8hXSxkHf9*LpKNLt3LU6#hr z;~Ps9$0Ts*bqa=omue3sB3Hd>SF+Hp$c*H)FgsQ)z0q?8C>|PAmA0eca&|RPBy3Sv z5@jVq$osf6_!q$RPowjPRcqOb;NAGj7mtfOYY(3b1lEf@1XQwD zkp6Q7M4cvtQ!c=!*WP7|nuyQ5R^sj=lz1I5w8{YXOP6Mv{!8%!P{=Oo8z3SNxB_KhqJMHiM2+rcv#MpfMgdXIM{n;4%!*9GvYH3 z<3n=+TS3DBKk*we^mc=r;Kal!$8R43@(t+gAQ({ShP~;O{`6(LAgQt^$DWnWDvx&E z|H$KVRfyPRz*gz_%4GppR}?P0S9VgJ8d?YsqHlH#@nHCuq6TAsQLkJ61v59^y?3c! zy%D0axLaXR9reM)aF5QTS^WxssiNPTDW0cPVI`F-_ha%ksQVGyWBS}m_Rl+FWJvN7 zPu87ZZTe<8C3&S)vRr5Wp2EpAZ2NoP3fm)JPUlW2?EAvs__?7{ICiGy*wp&lpB`oj zY%`eTeunde2kvdGyK@2#(W4d0hJ$mnPbQ0J@BJ#0(cQ;L~v*!l|hOtxZ(1TA1^uCCAAq|E{=_b!g=Z6W&1W0i4PKtFbq zK2~*Hhz1$1Cs~+M^X$;Dj8S+%o&>GWhCPrd08IFC4Il@BLU2&y5>!SjG_*3uUmNnE z6VW6=@ucVz8887&sRD~RiUoAuC+&~VkyK_ruEbw?7PXPGBTtGZFySBzxP`59#6%#& zuBFP*SBrB#OsAs70-O+d`xZz~gh80Vdrd&L8#V_EmE^Zi=Ld#mBs&NZpwNJ(=tTp$ zm5Ok7Mx7L1N5eeK%u!!bu%du=yq*Pmp$DqBO?Az6JU*W=Z&psDLD`qthx~azXdT<_UHdNJ?3wl|H3ufKm6n7 zzvA%rfuAb>AKd(xtWY3jl+14>pV19E?(ks>T=3VZob=Iu?zyDFT3#OHVc+{KO=OXA zy2afw=jIbd)ya=R5QV2?y8!2mh{*K|H#jBi zbG~(bx23si(udmHx7H27OHbgC_^R*0ZjN)e!PA{zY1h{l&P3gO#>{thM2%39SNIWN zh?`uy*BxE!hZ}ggR%4dl>)-rUX|h9oMwYsg1{vxM-wo4Qgz1}gCkVOxPil*#H26%; zJ-K#_TW*O>%95~+Em7_ZxS$l&vX=%&j6rUE=tZOIUwo(G%P#UF#Vxi3@_|F=RkrF+ zw<7o9^0w3(ZdEH0G5lJE+Vdzpi; z=kMhJXUPmrJ4j|Rcpp3NqJwxLQe5iNef(t<%p4fys=Hh~oa@oz*2hIb0C!S9IGJyZ zhC!J<#35me*6R=8xHO#D&tWWVWkAfT!ikeCFUY(ugeaR&YLT5F(&?D^n=n!YClum6 z${@P0B#hF|IkB-2g0Pq9gt%j%eCcU&qDEEh{8nU{0hsTG1yQtxu z!mvw~J)S3b5X?ogiSN2ma|M1f4QPJ{?Dv1q;o;(mPlg&+dAJG#~Bd_S;uEu6bFieRhK=2y{r zjyf5@O-$c0nQaWumkqX5iDdRxoPJ)D>(Ft8Hze}> zbhO)ds1$v@QNzcm6)aWzzmVJKrtuB0P`O1_mEj-(o0AvcYDDz%5!+UWwHUjxBil1u z>W=KeR()F}akRD*@>ASbhIbOmv7-KU>L3tDNJb}F&?%PG?FLN=_ z9iiEMAfs1SD|`^93<*{JC5Qp6DsKwZCBOv$MnH~PRY=|LfgKXAPmrJ+9I!1|%n*e) zCQ<_b5ku8my4flb&larJ#0*n-G7#2JhIa#)g9s&y3>O?liJGZuS*c@54jK`n^`?}m z+L-63JgUKqTG`GQNTpHoddn`}wS#^|00?MA+dt*%Pj7ApFrb3MZ5*>=8ZFjm~tUK*?!F+z!{;Sa-MQ!Wb`8%+Dr=YrRa@slV_iBH%=;3LzB|No%$o}b>!j& z9UfS-YG|QPR?lvzfAxYFr`Fmtvn)mIf@pXMGf=5CIOo|4QrL!*t4)8yxnkg zhVu@S#L7xnxp--y0}@*cwPq_7W! zAM6YhX7wH{zpVGi0r#AqO&MzWk5lZwbH99QdYV*psO;&h?PV?h!2OCbN#kqV*Zpyd z{kDLoll-?l_@y`|1=_M-8GCcp?{)H_`YpdDWM7VH*6jbKxZnq?B+5&CeA!%`Ja20^ zNLjqN@u~zB^Z9!oeC@AHy}%*;OzljgxjNanp1a)=WR+Z7=)V2Q?COHSYqcMqq;j!# z+_~$%9ZSS>Q|y3Tyq!wNjyYC|9e+&Wf5H@74!37Ae7zS553X4H3*CFQ;gZdVgLP{+ zgOtUjUV<5RqM7EG^}O5}zKD#X2a!eZ1oVc5Z)uKgKfOO;RTm&}-b-3S6D{^L5IUur zo(1)6UmRDv>)U||*?+xGhI$Q{6t;e0+;zL>aPEzF{_M(BTH8`*(eW2OT0?XM*3S1T zWP#Vty$du7mmPfn@Vzl2rtaLygU{}L-RL~^?rJ;IvWm>S4fWzQ9oe=*xaHPxfUL6N z>Fg6qBbHGjRA^fAI}H3^K12Th9FmzF`t;^M7?OECb@lX~jsh4`ufwVR`K`&#m+-E! zzkUq5OBz`Z=ECl~1?^?L9W*{R*i75E$;`8`8JvWzFBitC#PR1D3+|6@dJ@cNf4^+$ zfMK*H6KXN^@YaSb3>wXKbs*JWuG-{6@|K25qWD6xpXP}(1O ztAEi<|HVfNfIW~6wfg@1k@es!<3D~K{72T&KVl5(e9!#eXD2U&{YGiM!csUF@|#TH zzxO)$KRsdp=RUjgw}ibB7h&jW`29M#QwRT4OeZ- zUo;wsHF18yDy72jBuY$ZBrgSezn#TfzabG7=`J#VPm2nn)B>4=s70HZvf9etV%B5) zBe&4yT&5=ceAs$GA#FUr;EKJ6gr;zfZva(r4VUC^8}2i6pJLTF(%m(;KPi(}WtL@{ zs15Z07xmfm3!3I}lTCewRNr%QOxGakt~Q$GC%tl zby=-?=T;6E_jw>bzC5W;(e&gVP-18sH7{`pjs|7U|Dbm)2`o4L!42F?I-#&+U~FH} zt8gK(@O@Qm5FFnz{&3nAQp=fC`F3)?x7qpHRVp_BBSLom5!)s@zB;5#W(VYco4OOE z(fjCC28ML6$MLBh9Tx7n?=w9ltKjxWJ(J5`1Tn#Fl~fM9Pv^hV`{2G{{hM#eF{$2F zS!6GMrbIjDXNc}*E_&}$g?9|>kpMOm=eaa@LFnxa6y@Lh{m3m<**f?8Axo#Wm7>c( zX;4xq>r`tcP4e2OO_--5g=b%~)eZ%o(7%uh!v}I*`f8v`R9w)q;GQ)XSh{HTs6I28 z?m7?vvs_`B;~UX|ylJj(b7|BN&w1n1$r3ugXciEIj7Yc@x1Cc)$+&u{dn5I(*SCT| zR)o5{AqPNu()L(oaf}d(1?C6n8%^jOorCZtN2oM`UXU0{Smfiq0)P}op%5j)G(Xc+ z;UuLZAv1u{77u%X#IVnryvoqE?MyLt&_ z-evDey?00~kexy;0~}&{?TfCpgl~9~W2W7Vo6+VjtB5SQr)?fLQ_2kTt(t4e zXmdcsss+5K@ZNB9sCnn-vL)~EKf#N)d1#6{^3-VA$tW?jM_V--F9IA8;cyg_v)oDT z3?t=~v618{F+f^1dvu#$lQD0&U(ZGvL zt9in><$&$Ti&mZ$2S{dlQY|E_;I&r*stY&A_Dx`26W_ttd<@G{k^%{GDiqihVvYu7 ze&y~3DiPrL;#CXQmD7V}_HjrrWYxOl9z}E@Oy{~NYf-Z~6iRM#K8;)>$0U5swo_R!#` zMIh&jeEQyTDA}$(t#Q~K8dTh*F23)J#BD_1!`!An|bV1Ar1@EXb{FQ3qtHaaE;2NHORZtl4-)vjJzYW`!QMG{bCE3p;Qs z6&qm{XnS`-Mce2aj}FMus18wuz3%t4p?qB!a5diaMUZ4he>n@7Ka&>Q(6Z;ZdoG;Nh^m>m+ z93QcJJ_Hjj3V+0nYX*am=p+~ZeFYuw({H15SFf2Op4r)=i=yNv)a!QQz9YB<@xBcZ zGoOs|HwO6|UyQ)u;#%tC0L*$O85%1DFT-u~m%+!hlU|QQ1qRy_t6-pqP^{XF!z7=B zAvM=jtM~bC^(2dK)^S=)bZs-y>DyGtS*b9)Jv?abCeq8(8CM(T?~IAuw5*Apabr@0 zYtsxPb{Aj9o;|XTvd3(Hfqc;$x4CWTUh%xpQd-2DAwgWd2NpVg$7!dO6IRQY+_C1t z1l?%9O{nTvKeV7X8u=jGh>%d3aa)d?fY0>2m$PZYYd4r?tsd&A{zjKu*LVpkO-B`0FwzikB~d*lH#rlAcci{GKOo>d>JYnz3V#%n))LHE=(I8pEUGl(4gp<(}!IR#N zNb_h@%x$(ATB7oKEszOdzLdouGTlZ~z(_P8{eUup36pV9iceIk0bh~=hVK~HUcDwy zhHmUb7t+>0z^Wc8S|hh@qjO!c`KPmrKRwDnin zDvI14XT|tAy_*Dqra^X4lq4N7 z6wv0zGPmJn5M|mn_a;a`XP=`u=B#FF!lk7$043SNlXCz8jSngc1)QZet?8kwfG3qU zA2axJ5npELyI4E#iFWD`GpVLaDYOG!9tzOA_8K03iRGY{_A(hz)YJND$ zGT~bT|vUv$t z3a$*z?^2LB1iiUxSY*Dh8)tp6fMUjmvQ8&C$^(HBEl3wz1wjV*GPENz^m7aLWh!b! zhN(T2*iHrAkSaqQSixhJZf5e0;`mq@8WdmTdZzh{4}Fj*!P&XV8-S^bR6?t zB~M+7;{L6{0D{3sLi%|mIDz2OnLTuv)@$S(h~U@mhy%tj`#VIj;N zio(>wM5rXFkegK)Gkd>YU~D<74h=Pk)Dm!iuYkRp?0C z*U-fk$g>g!8*?9}bd>095mpmYC_DGbMuK6^PQMPCZHm)clLF4*6I(r!Q< z7F@DlkBy6mZFzQDiJJ(<2;lKhS7k5H98aaYyWL|~IZ7_H6z=F3LsoJw1zm;ui)@4o zF2&ax+Dk5Lq{2eThxijaPU=|Lt+?!lXV|kZDT^Tpky6OlE0$`ej+8npWpAc(ts}XX zK(0Nbw6OL0CM{)RTaeEB9NUktO656Y zY`L)+4V2|#hC|Z^VP2<}8SUy=vh;E0{ynzdOMq&o$-x^&#|G(|;cM}~nAb+e9Qj(a zrh|R^Sp2aFqnN_$@V68W+^GB6(Kj`h4i@;(3;hkZ2DrU`yLjy?Kdpic0U71KR_)z= ztkPiwN6IL;&)C^VAKz0a)4TP{Hq4ZC!`5cgLwf3H;hOnj>y~L4=fX7<3AY05yeNz2 zd&Mz^_Nc*5si#cfwS3haMFG#pewIbwcJ|az|ETlF*_}Ed0#cy)|Nf5KT*$w}ZO$~9 zAyZ&d@(0fb#&Kb^Jz2&|`^?a`dS>I=O3)Y?2Kx!_L$&S4J ze%<~}t;3bSME#c04>Fg|_4{w7sq~hb*umEiZcx(pGuRu9Tk#1abDUwf0mpH-USz4w zfBd?A=FKd^oV@R`D##fge!!RVRL&N8{V9Q3FWjNKyT2Pup!SEs4G*(6ZSX4r$y^b? zcqsl4AahmyH?SO*o#QGX6kZvxgy{uuZcy@93~dTtpi*<7#dpRy3xNo=Jvgco$Dl_> zRrT==V~?zhHTl}wv0R>bcrsW|EODgkE~w0aNEGO&*Px| z=jzS2u=nSW#Enf~?c1os9Z^=Br%if9^VjjZ^qK>oV`!s>;a=V^Q59bFoCy3f-uHC1 zqTfPh0(6o0igl*;LJbS(YKl-<$AsHGN1fZ0ymY2~xrceon#Td;EfXS-w!l-3ZoyPm zFHOu2$Lh*`MteYV!-u|bV-p->EXI40la-#VM~68bIz&>H_ig{FwsZTrD5{hFApPuJxTXKk zujl>$G+piR^P*d-aj)79$)DOY37%P3;@-%IKBc9|zm3~Ke~A@#Lw_}#?$MYxD={x8 z>osWQs;g>3&QepCzb(Yf>S_0UaGWfkq}K<$RuYJ=s` zAAKD6Zoly;*EY3lRd5E9972>QgGu2-9|-4EF6( z#b(9eac21SXKCDyasz=Ls%JRHonOmd7`7+m%YqE_9!|PSm4VOyV(&bonpzt@zmx2c z5JIv;7byWmKrBH}LJ@Z8HVA?UHbPNRECCS^Q9}<(35tT)LQw%lqat7>Gyw$-76dFO z6pO}pxXK|gjQldfi(ap?= z()zNYLCU3tJ61m3*fDmx zqUqA&h{mT|r^m8{#LKj#l}-NYsE-%e$Y}#%)enymX+3d>7=3N`dbK06NFE4Lw z-WxT3R!qD?uV48rG-v!D(SJI|FHALEvH994eSTzm{30M<2;)|XV$~=3__c+0s!v3T zmJ_*9YN3O{s+MCLC-T&q3!TiLw4_B%359Q^zqFbBk*(i~D@%om{0xge5j_&*E0vypH8KYAJx9&Qz z9X}5UQ&zGhI75}!VQLI<_iY&Url>U@bJX)%_VSac;#JFibiOTc>T1n=8Z~*FFJBVm zyt?B>&SYuf+LB!xo_4g}o4g}PE!n+e_1g|<$7ESWbIIO_r*9|1!guB<_wGBT;LI_X8p-Cyx!#z##LX6?`Kb;&?&Ln2v09GwSV)cv3cy4!@DCtja_ZG7ThcDb2csw zemA^0l&Z~gi4L$sJyI!nupa9#)?l@t7MB<6civb-7i=kt?s-9>015%T&{)h}kM>Lch~Yeyu?+Cu!x01YLgHJfv6fzQV2{31L@Y znSt-{;|tbWMrJtYaq@7R+!eTUgMDxmibr*qLQap;Yz@_5GQXp9ANg9mOaCb%ts$KVsqv_YteAsK@qy zT<-g=Ib^aSYj88m+Li$sPz8B%4|?XADl|*#O1JlEbT206$F3qBK)tyCL2-tn#O}Nq zgFSKF<>!>Hwl_Stmm3DAmyhOfc9fkG@ezyRgsV9)9pas!T?}1S}Stz z8Wd;GC2T`B@H8Bca;1`2fUG*+`7(^n3z9bkx7jmbP$O48e=6#Y5^3!EHX;%g;{Yvq z=Q36JGkFQCgu+DH6+G<0Kn8$7+Wj9Y5=2{dx!j-sZTikdI|`K9C)e<LLZnB`d(M}N8N$*W{!tB z005Wkj5CsetIX{1hwq}OX&n42So4-S96ukr8LQ}_3GMIOQg;zJX0h2{la-9by;|@Z z3(sPrvVW#1dMc*U z0~s{|;uGcVVwNGbi+Zk;uOTg|==cP);G(|X?RSRi=9faf@1J1q66hKMGxN3LCU60euOLll%CM! zV4RsB$&}DA@u_XmQhKgK&&Pgh?S5AVKRpYXp4bB326lR zD&mn0pqL&$NqH)?UlNNXF7}H7JEZ_u0clV|8m7er!@zzjc}PGSXUlRkNQ1~{DTn0m zL>gq0o5gYC24rGF&21wtktP8gHaAp_WGn`@NJv9e(gd5B@dbqy zgbV`YK{2U?d9Xy7AmPbx$8WLjsQQaW-54NkLzELmc9}46x-;9H5Iv#Iw=; z0t;gi$w$qc=nhI~cnTYB>>_(t44`Plr3It{01(i~ZsIr=Mczj(c)SnqLML+|ga|}- zA6<3d^NH`J3paEvdD7h%W6c%qCi2oq$G^+t$n z&ID>WB+@V4D|x>x{tBsxk!C5pv2PdGC=B<+i1VZFM$f|H<8kUOh;hiS& zE+849C?vD=4K)I#r^1N#Dtqnr;S>iTQ$%tpLuRhYu0r2)2ea&xEc#iY5fItnjC?06 zx|~XO6koV$PUx2qpZ1;;?UZX~0;)N=b2kF!5>hb-7=j5o;xpt<61{4HE(6L`q4{iF zEUtjM3+G@)FC_WN^|O*kMKZE%^q}NiP=frLY|=%PiYq|oh~p@smWBvV zqmz%khAu?PVvV-Tv2hY{WFK4Z&aq3hX_PEbVvsC9%#rWp$k&RjTG+wVb|NG}4bccP z2rm=5Wvk!hK9P#<-P5gNdCy04u__Q9%vfMKsLs;Q@D( zFKe^TVwvDjGT;Vh-n0i=58}|UOPh#@hkz^z$-g%T#ZlXT=knr#C@z5e1$Pxv|V3US$LW# zT)b(AYl*^`kU>56=jx>A%4v95_KOJbCWv{WCO-(vsCE!Q3D&wWwqG4Fwg`2IF3J?_ z=YCwd_ilatn7yY$CQ4*A8^@C(c|6JaK@kBZ!A=NR zc6{7^nAC}=vmv?X2;@{4@Zcox9Uxt#m@7e~J`pySN^W5STqZdaDeN=JS-b!|jqnxO za8^O79+KNb!e^L2*buCa?Uh-dHD4=XHUuTabF~IZg6$5I4(Pi zoC9Zn%~38q{cv6`wCB_k&uQ|9sl~-_DSD%*N`O2+`zl2Yb_pNXvoWVDEN!?XpF`;2 z2gif@0rQnHh$dIbJl{@3diJ%-(_n~{h@90Yv=J?S%}x`qyv1!;rw&rx6p=Db?x7Ko zN$UcD>?n%O<~9xq(EG)tp`y!iJhmH5;EM5iO_}e^uil$Osy&EKG|cE1pE9kG@hhs4 zXP|EFh}~~R47>wTL^6wAVzMjH5fajKL0s%K*%2b`o+0sR#lD*`^@8KiscBKP_z_rl z*O{yyam|ph2niLoyWwCrHNUjr(h87 zov91exkJeZmKca{91UMA>rzaif^LFX-FQxEqb>Tj1Sb<1q5o8YSG|+ry472PPL56FKXTs$q{DA9O8FPkC zV?w}-lo#$!gkkXuYB<{Y2YH3OuiZm**fm^WB)E|1BV~hIScjkx|BWR9OX^>=axd-w zf^uBz;z+|9_noCfF3h3qM8a!FW+H}yYKOhf8K;`{LX?RYax3h>0tB4oae!c>7J6z66iqQMSy|#^ z*^#lfxJcfL!#Y95SyFiHISv^&$DkYqYm>xwWp{_VI<@xsXpkmRSJxpB18e zpXdD!pk5_!B8ZbI`6aJT1|l(WY#cj8T3T-4W);yQ2 zvrhT}D6JFf#U8=&r$3(Qqk5)0vGSy^pA~JW)8j3eYXPX9EYkoNl(_bGB{h)q!qHl#4gcq(k%$EK zZ;M7=@R0ctrRC9=S3i$!N4v0fdSI7(k#7IB`n|6S*iP#GHF0gXA?xz*@h&6!KYl*f zxQsr!4vpS&_L*JrT(@<%pB-1a1mDFMB;RYf8rB3y;T;&1_M@fxLzwoT=@>!MKKCmk z>B&da{|-V6@maO5HzY6M#)2_h>AK$O{g<9lOM+NspdFD&0yoUpz>g5eh%qz9vD#eV z?y;}%CfPvLvKRJW)#%5R_tFyVBCBEfz=FJeCQPUEQNAz!mf`eI^lJ>T614`i_+R&C z{xJiRH^;Sj^S@+3-dj_I(CdFuQ+{6_cHZut^H*=C_;t$a+e=FtI{(<&UR`$0t;9z= zw6B9#(q2A$c&siTW|nT6YsqCw;@-w6X|(;}%_vH1p5FG$(o@1O=1Q`v2}=!>W^>g@ z|8S}4)azZmINvgDLyaPXy{tD@zJ}4z6e{NO;WR?i&mB?bBX_iCVzqwdsuNAA{Bx(N zNq8)QsZ#N-Me~Ccg5Vu;!a$M|UP+3=-Zh@;dx>3470fId`zq;vdhy17k&wu_Mcdc( zwn}kTrghaD9ETWMuZRv|<8%ic@{{y@7K$D8I87n;fzLa|E}ZBe_KdkX>^`l-zvG=Y zn$=lzsS?Ik4LToZJy@!kt|fO3er7pgX1~d7_`!IZ(@DAKLb7dHBSoXNwjuHxS7>sz{zwf(6P9 zvu3(T8LPyWZs2SpDKQE$MUILjUG6(wHwuZ>O)^T==-g$eO<$UFL@#_`b{*vEOZDr= zEU#E)&10j{D9LH{<;@P%ceaOrwy)piT8nA8D0O)0YSzxOCz42F_G-(KCQ~j8=g6*I zQV^&&gd85bX)dWVHCa~3V_YOVv{?Z_M{GXP5!IsM0aW&sDF`nhy=RPd1SHm3UW4Ze z&q<>tAPzqhw>B7Gq7N9>heb+e#|QV@O$H8LzH_zg_G67PuE&Vxey*bH+=xIY#`6BA z+8RtT8)(IfmmcL_om*?;++D1)TuF- zHKigHQX*^p)NQq-A3f3NH3(0cs}yW9%TuNXsoTRMD)SdlS#~@|J#y!-T;#*C-=#)T zGk!K#t#v7WR$uh&tDen8l6;(o44Ak?_77^6#ns)m~SvR45cp=A440oxv$u`zHvFi_rOx^SHQ@AEwY@@$xE8Q;Pp~; zEp+TMqF4V47*Ny?>1IcN^FP~IjDdH+XGf}{0lU&2KM$s2eVnfB8M1_QUij1srie>=s zO-jAuJ;76uE>`U{%BK+R=g9-L}g~98I-MogdbKvSqdCNCXHcl?mHbj;Tbqqdp2P1;Ny_b#5Izv7OEHek( zK(@d-ZDkoh3(>Ee?PSB;Qgl~{W0b#z9pY7n)uA#L^=vY-dfTP`)kp)uR4=(z6PGz& zAxho5Zn4@4g$q2Todp-e%AdZ!8^)`6FMlKQH@o+vIbR>JS{6sWdHUfv2m(6aKr9ZB z0t;z~NBRxD_@8xm@`OZHQ?@YZX8ynI?)?5hebmIyh-*4a|Jw&D;pKKlzQbQ1sH>C* z#M;=iF1XNhPwxD=ZeP}U48QGV@gJOF)o{J+)vRS3rHgB4yF0&Caxk`^O?gi$nK+9+ z7@&8kqZ!6Wntk*h0 zspX@H|Bve(CF6r*=8>wIzt-)2SL0tv(jCG+i))TcwMRUxetD!3jfYv^zs%k-Zk4l~ z;c)~ZSUJlXmN69n;gQZSMEw8degEKr`pf?}?Bf1+LJ0onyF35k1*)?~>sb6#t; zeR~sQMCnGsQ}8gyUG<4x<1{Q|rul=n@b;8Gxh0)^ykdik=Z+j58zycXmupiIeW)62>;#Poxf)9w_epI4cR{+ zM)MC_6_A~z%N1%-mu^s%4D*imN)qPHPo<(g{ExmefEKK}X*1xBhi2r5kFkdps6;THPj`^CJ0 zTv*|d_p;wT(t>#gEB`$n>1xxx8Mbe4udJ?iU;To*EyeQlN8cxg5aR(GD?iW<4}=i? zR?RmaQFJl;iM8K0U7swaBDwW-r?y*~nxd%`#$rjVh_UOlZ=ckI}j zJl&iAr}i9U`f4rOv8~}?F6Td8fSNsod1&XqKZNDCH}GbIDv;j4Is>S#sP~|zhU^dHvD-A{~1()TExE}!d~m= z_rRTen(&uO(3U5TohK6>^gCh%t3~l%n|J!x%J})P4qGoxmlfa5<<9Z}j?r7t8X}n4}cAn59v~3H@$wpni zeC)+J^V17NCdH4X3trGL{U%JaTfH6bApT}?u&=sOMBWV<<;lVSKLfa7@3)WHJFb80 zJr^p+fd;S(S>b z&Jf~1*=jVMK~3&?LOwl>2?5YE=2@TQ1C>BxBY+!Ai7ItCG<9e_dgX}+IRd1~_zh?8 zMSUcjH!p56<$}zLpc8}>e_x>d>mfW_fQo3S(VCVd!NfcWdFa>bs*iGA*5oNj~)0Pz`t=J3Ht3NZrgwMDj$u1sUB{20%Vf|eB#%haoRP@tvJoXmAB`L&bWYzMu zd?1GUu?!5dw`OugC*I}=^{95=MR?u8RTk%$p2GO)TTI5_!GaU(1!3EaN9&&~|Jb5+ zuC{<+n+&Xfm#-Bo_-G3OD?x`{u6RQLJ>AE_G659k+)H#`QIeNZF0Dyv{rTP=VCUp$ zPTOVxWg}>Gea22}XD3;++^;AS^W?Vh<55NuDGFx#L(9ak)qxm{1JyrcKLk>db%MDw z`7&`-jK&w*!|K_x58e=ajE0BtcDX*;INE!ongmD&Ckc|VXsC+8(;3LDxATg=Tv<>| z)i5o=S;Rvz2>V4dIa&s1!y_FM1!zqG4yaqq^c@aj%wSMG&X6q^)exuEWr#*WCNGz_ z+AEH#712B@cx0-W^o1S2s5c}YwC>cDAafPH86@00Kh1nP!&t&dMw{hpRc6~;_TEgy zze7vJ2J>3;J7$|Thhb&Sfj;OJ_N1jV_;_16=#*DHUCjHQ0o z!K1BpwAx?}rnZGNDlPF-DyBQFaAI(Rn!06ypoA3@JPsZi`6v+J0yO2?6$=71NkI(V$gF3y%B9gn)qWg7GY{c#(HxCQwa2C}b^~9^ zx}^#ZGLC?TJ2~QAmr4y(8?;j0{-rKUXUvWZ`5^4v)=9zfig(H|dHZf(!3kM?>_Z*Kk zX;meEOUH9Es0p1w&Y%u?t+iDw?{m+&NaY3vy}NJqe-yB2cO3_A?jof^4l24vSGryw zqDIbKw_g%nZ9D9$_jU~jqjrS1tSqC{FPiS`eNb}zl`z**Ly!4P)2SeAb0X?*quKvN z8T^m^5erxfsG=SJ{+Xe;U~>cV%!t?cc{uVP>xn02DRlU6^~AMV6vBy$zx1pZcq;#C zM)a?B#+$^4{;|`bb?MR{&y2O%yZ^E_T=KS@9pgI3d;YIIGtzleQ7ya)oh)0BUp{L_ zxb2!Mr{C;G@~|GeqXnd8jR?uCZ1us}J+s&0Hjz5WPD9~MA<3_UDvaPc6%S+&9e%4z4OySV`6FbnL|&qx93QCbNX_*zjhk_vwLRA z?4Ju3=2o|cHHNw=t$uaxNiS|a17&vggCR&c%R`kdVg=>UT?S9EBx<#v_eDj$Rn(PI zcMc!gSvH!UIW}|a`oVrRu#YjqwYYK6KD_uw!kW!)*24(+oM917Lr>@)7SoE@tl`yDzj?q)bfz)9GPQc0TXqZS#V~ zqe>+|FT*)0jnQ^!mYMs|%eml#v36~`}dY(%x>pe?9+IF5k%DgC7b+p<6 zv+^5xa`-nXxq9l;zvEl4@r#>XEZ;Nh$ueC4nx}F682%f$F7CxPt^(fYH@FsO^gRO~ zEv0SEv*7^Cl|iLZcD1dv_?6Ck>=PFsLZ_{N*5dgaxy<*|&M3mI6Po`0+RyA^(@FC@ z^46WMiNzKs%4d&A|9&&FI4s7E=&K+pKm^=&}E(I{akJM&J%Ls_=k`D8Rz_v4y3V-F72Z)nZex=s9a zA+fQ}+=43CHRT9~t(2nXIdazgur8RVCkAi}S^5+Pz&O*l3Y>U}Dm>09QxUkmNxsFj z=r?dAOh1obn4Xb%FUWE0N*Rv3FxSu`2z=s*G6<|3hEJ)4ch)DR&bi z`2q4(Jwn{GGtKyDKQ(i%5O)M&sNr?hw-~}IiS8hX@0!yb>13AU4qn=-tI<+XV`IdQ z^Wy5M?`Q4;&(VmXP`7MV^h-39O90L6F76Rv2NI_*?I(S~C<&`f$_U4-&}DKpLdIq} ze>pfvPKnRyXEqvn=B)xz{I(=L0{m(&F>lk$9%G9i9d8=#;?YrDC%R#bW$tBrtz*gq zwyomDPo=8@3(f8nSr5enNueyi+G)VD_GT?tfe(Cd(nrgfq+hQ-p#X5|Z1yrZR!w?x zXo2H{BPJ=np6iv7Dy_)?3UBVHr&YjxY~9M?s3T7jK2rAP$X+2su<`&GHj7gA-MEeq zJW|FxQee)}F0pE5kTZRADp#=4gWz>a*mEPlMAzcr+VMr&>U{d(AZbfQig8e}Q8Lk${8b9@Og zkQ4_c(ZLq{Qne>^U+LBl@k?R!gHp7 z$VgRp;LD5zCB6791B)hBYYtLJlHQ2Y4hZvy2M{mH>82#{q>e#lHqJuCNMEhQ!PIb_ zmv@B_--H0DyKHBQhG^RcQjI3#%5Ss}E$sW{{`$$!WvmlX+m84a%R$N&4#+Zg_+~e1 z*(|}FPx6P)ox{^_Lxy`tfp_S+HFTLd;?7a8+Wa}q!pAo?gzv)Nb&dF@InPa5yej)Q zPM2BI#93WhLO4}dy}rUhYDT<~w&(*@vz{MmDHu&xF4cQ!nK*6h3Ul1~7N~Nb^9K3( zo5QG6KV&7~7!{8&)m1hCNLsE&3^+nt&wU}gN=Q*Wz|Il1;*QL?0aQ*sR2o6VO2Bbe zWH1zq5(cEq=j;~%{k6M%B%~ArJc7xP0|?g*)=K$-<5vQL1wnvhwrxEIZM%QRpy*1} z| z&N-6%Bu6^0b0+jK$&A)DASeE-JxY!pjNi85w#)H8WwM=`QLTTN<@)VdSayUx-#&Ef zbC@{tHQwtB>B8mhsp3OjLR6oKI4HuRIZ3jDwDO2hq?c=iBfm7KSexNZ%#WJYCsMNH z`$Pn;ByD(m2sR-ksv$|6HUFy~xAp)J&*VOyhnLWTBf#S*zGEAdJW3_C3o=r9z`C{! z`vdbiA3>z+?esOlSQW*IP3^!Zm*CwGsR3zJ^4v4>ZET#AGzrpG4X1>ovtN^18+L6x z6&LP!a_$`YSQ@!G$cW5CRc?#-(kIs@hHjye5q%FOBDP4}x6eO?VuGU_{3B-)Rz!$+ z6J7?BbT=>^Ir5j(fGn0Xy3LcslS8;QI}$M zMtm4RZE(97j{JxO#KHumMSboKX^FFp z3y$0=70jbB6Ap&SDN<17vg8q1_H?5?l2IIfL2Bcl@8UVv!#2)4BBMcXJD_j`B5APE zN87pmd~QTMDTlMDUPJ^B;1UVsO34z0x6`p>O~v z+$3A>JQvd#$|Hn?`>+pVaE3@Zy3Z41%oZ5aiXiW8joZT9MC&n~fH4gVoP`W%+2cD> zrJf)(mm<4qOY4Pp*At#*g^EHX?qlYRg?E zoqJbquW{IPJc5vOJ_YaQxMwFE;?p!69N8V<6zrSTo z-ZsT$u=CjyD=A}T)o*o5uFn74uW1Mf1%&9qe;lTfw}Af{YUj_7-+2|RS&X;#s+q4k zSKN*%9r=BvFy1PC!Il9NyJ{l#lgasCAHN$St{Y}6%^IDlA7qg*P5Y^3&x$V*_LZ!T z_#NZDxGfA^DVw<~V%r-8 z-raMzbZ72<*Q?=%fd^_t^v$!Vo#pkBQleq$w)@nwqY;37X4XB#=#&n~XATHi8+%T_ zWgtc8sJh-gC2xKRzSVlgxu$zc_g#B9)87Wl9@Ur{Wo^Z##j z{y&AyDQ1|U*D_XgbNnRmEW}Qux*x)$A7TxIA7cuZ(!qts^JBQ_8lAa z7A}%ehAcATx7b9;ga-ewHTF-}zYjn7I{bGePy4qA$FBUSn~4uNGWf&C-{N2V@%da$6=6u$=UJKm3?=oPg}TE?mT09C)y0{cvLFgYkAT6 zvN>-CC4v=X|7{dsPn0i84^aENUvRVq8#bib%OpZr;X6Mk491RK(Ppo8(J~zRrEv7gh%86y0Bx*^g6Ef@@(>6@}$>guMj9V|iv6sUYCo39jIz07a z*HLa_ofSdsjb;{T*Ojq~dfK!w(or|?)LN)@3&FAB;eC@#`}lytu160B%m?Lhb98wb z<=2n1V+XiZ4ykL;FJ>fAE|^)G9GOmyNYgzz5^VF)m4GK5iPZHpOHiL|^Z3nHGelCg zh2N;JuoBbp=>=oX%B&Z0xV4_{tCeD3AqAbkL`42$V&gygDJlY0Hbcw&u0w8{`cLjX z-aUc&YpyZQ204m@id!1V+*`z_cYmbU>g_M9qZu7{T6oC;fIrQF*w@oFcQLDnkU z`~19$)k<^e=)*x>UXs9Zw-|%6piHf=Gaj7dW{Xd)wp-X*nD2_wHS1Jlv^eBtUUs5tQzgDFCFWLaw99lHl7MQhXvOVZZ$z;dw@}d7V^-M9+ zyUAvLn#Tr>q@jwn%hv2wvX*Jteb6bcCbK}L1Oa{^wAlqmagM7aX|gL8#W#* zur1xwb~Ne9w9kv`Zi{GArK;~I@g;(4j!g-Z*HaVsDy&V_^f&9(FWmMW7SHi~exkp5 z$5BOW@r~gXA5R^>_~To5Wi1M8+@SZIpmffaKHaxt-ciOoz0xD{U)uKD&*bJ`?9U7= zb9}=(}QB7OHixRorGyd`v7Xr?P zRiKjOD-ArY3uev!g)s(6ITTp_E*q=XW@ks4i_jAiJG&NSMMAU}sg_A9!sYfUc zX#EBW=paV8rG{u-^%kt;Fp6Ra_-dykjoCgwQm~|PT76|QL*sc0_1)&l7l!yl#BPu> zC6xae5*r}0*P7rymWM=%5wG;L9A9Bj9ilYs6O0nb2W?zN@9k*omUHCSqhB*}kG`X7 ze1R_0FVM*3?Jq*@9-u5gqtb)kRUL?}aGvWNlI{eF)xHGPX{JTuKvupMA8@ehios~G z`?T#Mah5-Cq7kE4v!5?QKemt_Q!K64;&YQsW2VZ-9R>JumwNPE9bi2|B#7lCtk|2u z*)1>y6wYI?tvAz-LR8f+-u9?GlXTs_Dy?D{00)UVVLgiR!DN^vTR_axjRtjN-AQ2pB70+#p56;$ZxqRr%;aXSFmNH5A>YGkVJ9T z;u!f5Vaw$muL5OW^l=ZA>Hyk7I7>wvfj4FszXeq3xf;4TP^mmc)9$6sqm?W3*2+-FADJJo8C>B2;l6!o@WMxo`>Lmqb)B%`yqi>woK-}XJj zXbB8^)i;_X9sQ6CI0iXc_r|0xw-$r4FlgNoV~3;qBkpRwWv6?*E*JBtJ~C;6*=lBG z%aTlIQHh&f(FRbZO4gTJZ+#CwrK5tnwqP^Eeo)0 zYGQY>$kI`Ih|S^*dkqc;^ptNT`1pSB$?$r0eo63Bjg|6YPw!8(1jcP`-RdTIkn^9< z4OJl*Dg5U}`lFlrZx~LtFu^pQH1znN3jzbP<4tby;L*PfuM^C6j`_wPmCO8N;KtK5 z+HeZ{$3=QP<3lCrD&MO2v~1SU`!hEbIX>|IX~gvlM>l<+y+|eQxWBnb-=^$fXA8Gu z->jT12z*eM!WSiL?#&u{9#^;iKHl^feCYo!Lr>?(9J6Je38_BZ&cygmVP{gp9ZVPZ zV3Eh1WW(y$E)3nveOUqRUw%4P4DhiOAJf^EK%V6uGg{qqoGTfMZ7(Pmv& zvdXrktvUR|4%ONij8cl12nV8mD&Tml_hw*r62QBTM?H?1JI8*_Ki1i)`!hWeW5IKz zS2{pSXgNByXpIt;dZ>5Kfe?q^n0L3jgA}COi>4-Qt?2Fy<(DUFI0&4q`>rvnepBjW ztTC(E{iSO1EV=Z@Z^4J2b^nIW@csFf7y?%j_-1kP`qz)&p!d8rcZgB^!u6Q^Eh3u1 z_f=H*f%&MTeX-%Kxp-`zE>Brk7(w{P!Fal-2WZq#C{yr|lpy9b6w%Q7yBp*+ zbCYiAaDYAm7EpmI@yLRPSyQKFo|=3Vr#b=|{vE}4ag>XB2}`5>RbrAH=8=XYyNhE6 zA8pqmE#%9OXWOHl>`_3Uj+Ozy0YJMLiy$<`Jo1TpuZU~ZR7Kh3^BdKWAwzR#$p=r> z{{SeB%Y$MS+=M!I8h>y=1)D%GTH~r+FJvPkt99JicJa{w6vy56ZJ2{U;9yTq%SK}j z+2E?6E>eys9@mP{?5Tjt5dWjT5emQYXFx_(bdQ%VJI*P{pLBXNGWN?}gMJVa+pU`f zU9Qip*Fn}#E2>~!>9Kucwi`JH!E74T- zVdOGG43$+7hlto3^u7?{2P57YG{n*C3W>v(qWKbhl7{RqW_)oh;3L{;InY8NIEd9G z5YBQS2G4{@=ew$L`kz}}o0Vb3A(0kZrUh|B0yKxH^|E_KDa!#qew+=W1RLTbt*cae zsDNS5CK?2$n+Z5Hb(6pf zhZXmjweiy3*tyMyd7nffa=3G1us0ruL$j0oykDN~S9`1ujgofLg}=i{p|&bSvrSS) z(xxFPaGISK!tkMeKzvD!IE@mzF}7-bIk{As266^FY@is1Fdr|aS9P()_;oy8tsDqm zzBeQmx%rhlG@X_QIwwX(`w3dIcW-k8S|WIP6z9T{Jw{N=%mpfnu$^t)N<}cU_DOCo zxRv_mqIYAdX!k%J9E5N#XSx*5XmIzmpwh!%>8U#!3N^%|U{fwe1uc|`kxyY=iS|>< zr^;BTae%Kr84CTJgsA%U>64GXVz4xmF}mRWYu!Y&qj1n9M<;zXeoCrp&Z#Fsn>hi2 zHAaZHr%r(6v*)v_k!;c;7b~(ukeZGhjhd#xChn#u5Ti((?1u&xq^{*$MrX;sxEJ`) z(g3Y&_g+h<(e&`P>hLJ)7#z78*e(2~N$20z>(wFobGtR24LRTEuTIw6XR$e|A}P!5 zlPiO^9G(9SKm#Ga%_PowfHH)w&?%{#ud(ig{Y;>1z?@|b`EvIjJE7<8X+LW1_gE@S zltI0*#@usl@8}JWyU*+jeVZQNLYF~}#ix0JfU~o(O0s|{ER#mN&*>9`Jj6grlHhdT3q6q8T1H_vXd2-6-(BR4 z(s<`b>cKnD9TwK_E(jn+(PJ%qH-VcPy74mFE>;7Hn8h{UzdsbrRY{yAVmIzRpTB9`Lqb`jIT_lSru(;N)b+S#qsrWcQy z9GTL+-x4B=p>vdmpbD+lT#t41OLNLuC~5!7j8rh1p_%{1`czWPoF|F)JwYY>jt`Hm zg+0|;<&yU}FJZo)7OJxHqhGEL$2=7C$!pWr_2BJKRt=NjCzS{p3P+}DEx9d}-wAy$ zN2^z$ryX51_%Rt1WM>{Wv=&mBBAe6H857_#j$Y-L7<5IfvB2icC%SGwDh$B1SaT^#u(yp;M*I&PrKZ$8r_|P!g1V#R;Z>Nx@bhHZ zImLTI+w>TNuNPg8vs_&P_!O);Js#&5U_+Qll-Eg`*J7dSmn7$Bu|1NmT52BVopgg@ zUI3eqM$lhpEZ^a6`Nq%FAD^EnqHv%7E%TBC@BuB9=yyHr*W-{8EAvO%>+eJ5h9L}8 z8ud@Myt3&$r3p7TyHn^5{6DBXa|itHLNRu+H0-gbD-n8SzKeYLO)jnI=V9Gx#nv&W$I zS@SHF=l^=P{QpF+ynAZTisI6@XI+--pE_sPaaS439`1uxYm^EM_Fx;g@wCphxWINc zvZaM*p}V!n660%uXd6vtfN8y{!~?Y3(`f3Pf_KEY1MYW5$(Lo9dE{TkysmkT@M*<{ z?e^Dfu*Vp&59x(&lE7_h4iC!;UkI?u_w5AUB^f-{!Mbe5_MVj2-WQFw3#$U;LRdRJ zlOJ!?vnv`}Ug%Ml(W_GMxomLwzT~m-$b*U2;E{(@y;nyb%}frCR3ago+3NXjNfrKI zXUp&2czrZ<6BE>}oa<$GV&M3~f(>&DTpys@#%|32Uc5;}9a*kE{AAQg*sf~hjvmoO zr$!Avd{!N>DD!WPoPQ!Lu0#8xzWqf4Zu(cghlq$M>;eVUrfU8EUw-8=B}0uS;aI`b ztl7cOB7swC?85zJ1-}B0))q`}9k&n1j%tMUf=^0!GE!ahk1!2OogPM+^A^0{2^Flq zp=irlWZ~80ja$&XYvFh0&l}5dZfscS)pzWw^U1v{WWV}8Ifgf!s8iEf)L8A-D5j;f z#9(SoHRpyAeL8U_D@@`v{x)0UFjj%^t}P_a{dcVgiblo9a7Y1@e9glQ5sZ%!$IirG_2L1URr7AS`=;D;+IF=244 z1(MuuiLdctp%=k^Nm!*RGRmQ!B1EUOsK&&{e*35syGY9gmLI3a0#TZaA#YTB2!uk( zMkN3!r3V-SN&}hs(GRXIq@unjPm$3|(=OB<+HGTUmV=pZ+A~43$W8Iq1bk1D93EP0 z2I_KZmQd_q}D6BR36q&^nhPEgiG&w6(zu+ zdla5|4BYyjaOLe0iK{V||8>XK365RoI+py$N1%&3NO5bbvD zb{c(@YR3RGMYl;760yhV=OT3*M6~dum@|QL(jB`!vg;u<7SZCJJiC^udV-t}Cef4q+^Z zz=nss1bIk{A(|>nC4uJo8@>FsCMSq>1&$oQ(q z^IP>5_LG*9(k>y6MARe2r#mlBpRYa5U29r19-+#2k$?FNXQ_--D4I)AFlC&sE7Tb8 za|d+C@o3JXzBLDN)^3pC5(X%(9zr<<&8Ghj(^bbg|G%qbh$ps|E<>)~loG z1OOb9Ax+h*U*Ig!5ZXgn8o3-~9CI(wQ^)tkE}%%rsC#1>8DV_c`?1v#fmM_?!|i&y zk~H72rE~k)LUp9#49?_hcI6PTv1+%lGrF3oK@P^fe#_fRbkIYb1oQi^pN3gu(l%IY zqW3bSY2FuDq~j~w3}H%Y_&F-NWfr*^690K2!sO&$3h#}4(2=cpejsp1=+E*0(cPKH zL;df4|2vymFbj%g8Dy7+EJKzV+o;5Zl%kY1R3oybnn8?xL|LN7k|arKk+!jfM21vI zHI|Bmu~b@q=Kj`ixz0J)@0{zL`#O*NzOVc5JbFC*^Zj_g-mmxbnP#=2+d31!N@c3~ zy04LM3?v9{k*T`g4i1*a1Weeg&v2wqnPPu?>->Jfm5tm7xhS{*3Ga7|_Im>}96E*H z?`fZ8r>4|RbXb?G(P`q|ldt1{!O*3jr%zX1`8u@4dMG=yXpe35K0&~;Mo-`t1ElIP z$JV?8<3@|i6=24wZCJknYhhgF_8@TLR1W9S*hsV+ggND4kQFKcQxrS$ban4+ro7}D zXg6C-LDH^gkRDib=x#_uCZWsboL_7VdV;vQ!hS>hsMwFcz{SSfmx=HIk}3}9^%5YE zAr&QiN2V`K&?U{ARg|{l6_WamAaUW$1U3>Y1y7A~1*@!FpH#6^$MH}iK$(JC4Eoqq z%YVEvtEIB__3)P3nlb{GK|~B!Zn{elD9`W{Axqf+oSPs6$#F~vB z6zu~zp=p{!rAqJp176;Vy%I@|2l_jkdd9|fySg7y&Gl+8E=PpAFVG>Hx$3bQje;@z@mKrQcJ zCuAEhVc$&RsDS{Ib!Z5(%P)v#QA^156-;8M1fR8B(@{~v9m|fF+pT-mxYNZTTxOOV z(wOiibIVacapWsfdu$dvsXpjpBMPD|moM>*&~QEk4&+rKBxJ27aeU0_4S+ga}+E4Fm-QuV_eM#?yipAS*pAwkZ@a-sWnRs3?)a;c? z*5)BLIB|*%G#Q-a-uE3(>u56XC478Q?<Qnwzg?;FlpqKilCBnfzMsrhsRPzO@4I{I+O_Ymj~x5%^^!PuCY6U#6=jtP z(&p6nD+M0CEMKyC@zeEH_vS$G9Xpl-^A<~GN{>-HRxWD75>(40h5oZ&)C6M24cYWP zXsHZ4rf5EJHv}aQVe8`OWE<= zRo}!qI;Q>0yXY7GEWE4S66mp|C=$a~+y@ znFtg8RTBX9SCh{yF*#N4PoP_W9*b*spv2?;h{bih2j@$hC7^4}SEEi65f$5ha^JyN z+#6kPN95@B*9E0{Z2B3CD_OhWSw(^l-H!HmH6*M5WEzMoIm!9oZUWC~UwNvPC9n>P zE_pk}2s1jsJC}Rt_pi+#iatpdHT>|AR=VAnY%ti-2(S7Xi+}2sCcsc}pAS^Q>TVq;B5c^cKt!(32=Ob$^PCvgG^7`@J+b{IwO4LMNRQzY_xkGm^?&`kx z%Z}TKl-p-_oiO@;n`v;X+#N~~%Gt~fcouFu>iXQl-KsFT#QPH>RX#+_Tb*u!m<=c^ zN%fE?91xGg%b4@L?Xm$_r*Ley?uc|)= zm_EMSCohwG#OBqp1501V&tG2p`r+ox(zkyBx|Pm_YdSV!whMo9b4=SCv47u3`X}eK zwd6P7hKoeceDM8f@SL`2{S}}@Cy}V1a`XgJW-7ugflR|U6znOIdvlS6npF1e+jCA| z&B1K_jIx)V%ZiCHg)^6W-1%nIJFogH+Kn`}kD*l++r};@jE7}0CCZ7&_3bwDa~!r) zxvK2Lc88zb_kWUU@P0Z=X5xu-%$~Ck9j^o_Ffe-dCL8rP_^l4K+weufU}GU7oVY)I zg||+fW!i^1C$?&8{SripWfAVjlykf@R#$yIu|v1f>unjz{u(u6wQQLu2C9{vh5`h# zm2QRRV)2hJ5UvoCk{ z_A#%wVjpv9)0Gvc9m?&EYSFo4p;D0N3W?-p{wod;i5&e1cZFlw$lFtWsS_#lyZuTye+oDjW+(zwMa&!JcW+ z;`s>eL2*dj_itPf)o2B`&{)!A9+9Hof%eo}DVt6}akQ?l9zxv{@V z{!{J5Bi%io*G^4oM_$p4f9a{BTUH*Qbd}V>^f!1kFz-(+M+^i8ZZXoTSzew}w&~L7 zQ58J3^3(OZWlY-ckQ^rDzl~;q0fzuLnCze4O#XF>)_$domV&kV1K{}QZ^>^Aw$#mk z+)VyR(M~s|{qDAY+3|EFx#lOp5v+~=hnv>-xq2t?!)*<&SeKoi@+tcb;7HnN`TL5M zg~doX2~}268?x5^V6cU|Vxcc$ls5feUa<}*{&a}Q1^n)&B_{n1AE&=}D{S|t?ydR$ zKq1BOapkDBSp&FY6@kww#}X4UYK=&YSjBNNF~h4P_wL~;jx!z zQo+b6SjJp8L2|YB+d^Uh!-jeS?N>m_yff-p0PUIYRuBDz9$AYyLIj{FRNr&!eu)T8 zYVg;I@uWEkGrX$B(kSnAp!}+_vZ9@G6ENNSwRkHug2AsSu8`kdihL74VO`oJda~hM z)iZOIDiYS{IG(I}ZLjR9L7nPEQ&#~#yIJG++Gy=wF63IDgVw(5{Q(!JZVW|EPTdqF z=}y;=oeG)0HC_R_Y5#jE+P`_l8a4Itxli)H-c{19SsY0>on9Q}(Dc8I<^D@(hCgbf z?_KGuge|fz%c@$V4y)yQu6kX4rZxC|+^ulunIJNL%K^(A2lZ8tKM9i0KEWW=3mc{9 z+1;UHEAlV=8{ZPXN&I3m(dp2yZtJ*yX}r`0x#i;9WhHvw9xcX^XR}mw4Nh4fbcJe=ty!f0X~g zpvJ%|Sg##P=2x1Y(9DFZxWnHE(S*#349l-fwWRFhmY-%VmZPWgDvRFa+^7Ru;lWMATsgwG8(i01}X^kUm3-_D@ zj}Hv*z*cIqDK(*A6csC6^CE@;#C~Q*P(&lAWoT6v$CD%9M=X3Z;g_X_w+zjs<>{A{ znI3`J=S74UHE3&Y^k8g?D5VwXa*{1PC@j~8oOK=5RLc)xqJ-F2ITpfd5UjAb`&fV` z_mdAqTN0y)jq}Nb#f;moJWfBpJh!CT(+`3dbRW|rLQzD(XxVz!vJX#6<@Pzl&rnhv zP*g_+ezOcvlA<5c=csNuVdJ(z!Tc3FkI8N`AMDB0BZK)(H1fx|Wu|blTw~L?bBsY7#4ckaJa$>)5+G zb2BH^+4$0(VIFjlh^1HV$zmL#sVNA69o*5HG7=BzEYzuczfj6N*`->?#45=6WXdcu zi{TXJF1Byhl-z?r+{o6>027mV#TN53^h*1q;F8R*-qvC}iz!_md0lMpRLGIvkZ(bD zlBxoD7;jXuO-|i)4P@xvTlcCOMg34K3P8D|CSNY&N=OBs9INJDeP%hi>-z1L8@RqQ zhvAY=Y3K06S8tk<#1B*Ug#73VZLLYDa3P9S{sg_*i)*EWKrF!z67f<@ z)P;t`{w3obKN&;F2X8j9v6U|p&l;qG57|N&OU4OY2btG2KLZ&<;Qhn@faCwsEQ*-+ zKkN$q|5v)jjZ=IfvJz^}VzSSjn{GBX*Y2ye&@JMp6<$fwco`m<_)~E*G2dAeM>WBr6|YTrt-2? z_VB5+nhg`h8UHS4#cl^ubY#QZbZrunH0FDv0C@K&vaK_AXw9Z zYB8inRVVo8l#gTZ zGYXp0=W|r4yhR?^jH9E(kO_!&78?vs>g(*u+~Ycp;A&;0(#kqdk=>#6d__jfx;; zvjq#*?gJ8T1SwT0p8AKdbYq`Ur7{$c^Z}B#u6$InpW-{!CkHWUm{nG$x0x@gFunSG zB?*$@^II_hmzi;Wm3 zEdLmk0J9rFxvDayb~@3_Tf)+yAj&36i>LTuw{6%RUj(W?(c)Rp#*xjn@h4?8G@cA+ zu9V@fke7HPy+&oZ=3s&pq@O39N}ptI?uQWo2HYw%Uoyr*1-QwknRl2`8%yS8KQcy< zs~zVZ+ZB-o@F4&8@1L}nT~QjoU@Gu}?`I@R!@0}64~@TLo;-VlgL z<>Y<&14nBCohZ#vJ^9p+jj9<5242mDM~(@vayJ!I5-?jF zS@GQx*2y~Fg>?=Yg^pz!{^0nt9xLO~o^x|V8F^td;b<);S92gOKyr`w?(SETmwmF8 z#%cwxQ&@XvgTD(^W(E=nsxk}f@&dclj-X{fe9H>6N5(%5xU}{1kJKhVbQR9qV9u(^-DPtZg05qi+fhh6_T;JEoprAH@bPM2BUb-uT4ed` zedB%d$Jl)Td`@e!nb5L`6Mx3HCn-bpu_Mpt^%S%_MA_w4*z8A%FAclS76-<#2A^~_ zQhd|xS|@$qFafx*`c%FW zim(&*Ld9h$5U76aHVTA-@so!fT73=3*cmhKn=hg)hk&SB3R5X_6}l|lDD!Pu8i5nm zxn_Z5!;BBM=Z$4%nDHGgyA@y^UIagai&1%dcxPtt)s)wTn?EH|;F;qrWkiF)3aVtT z=M6E0PXP_k68eyzDI~B(YMI0g&Sro(!(p-2j6aZdz}0BjO6oQUmu8^}fS)g!YATv2 zXCl@v&GwqJgDZC$B&*pIq3p|Kvdu%8_{|it6(LMRq-}VRVLQ6toMEe0*ty%i&M#u~ zfz6_8EVj}v3HZfoX5aQcB(pwE1%X(_d8}2eEz>BSYowZ(O91#7a|oQyqblPm9p+d# zA7w7Ap*k%8hOv7Iu6R}Cni7U)?R;fqAnk-q^3^8fxmaVIUS-np=g?-h?YMRGoq4Rq zB<~3Z!fZ)!CeJ0-*}%!%QS7rq$G!X(c0aURJ%ecItvkn+b}=zd|N7W`jBNS9daArG z(|p^ch{}_mtX6ZjV8b1pEXF`6xYqd7pT-go|Q3vQ3^TI+4DS>~-wWVC_&Ymzk) zl0Nqs3DD;r?-)vXO&lT^zsyd6(Le|@o{#uS@IeUWl3=N%EDy3kF`jR`4g{dN0zPu% z3}$OzNqNPxMo1TfHkE6a6vIf8_*icTz{Ar0GJf;|nP_QFn&hBAgu9J)Lex5UPns8k z>B@%!&q!J8^aSdYMEe%06PW?uN5$Tg9U!5@nUQyF6{qD|y|jq%Uo33*iP${sVe+XR zbX5RLM;a*5l|qdYGR5gp8mE7SH%r^L9RXp;o-N0wXz!3^4@ha*$vEXBSt!;u7zL;xse!7{^xz-tGWG zIM_Sd%TS0QF%!5tDi?9y043KVQs7sgwik$($sr5`VTn*?;QEd`8{3UkS`1fT&EA>9 zBWO`mJz9M6C}yclNL#h^o#vCMdW4yL9XKo`z&dj9Sc-}$^TWA`LPWd9jh|cF(G~l2 z_@9<@k-_FZh?#mji>2{p8~S0z$BQsXRbhG%KCMh?%PY0v*~H{~WdwMTyh+RMwh+Wa z-J9`kW8(*rCt?wbH???@8U@U$BCaB{-qt)cWn<_y*vjGBQAsIc=R)p&19AG>9aebe zCSx0=5n>Yg)#k1WbAT|z*MP>$-Oh;Y(-&5GnP>J$#Gq6mYc{u;@A`_v)YlQtUOB%N z1*C1)_bWSB(xQ=&l3~R9sNchGTg&dj>INr6Bh!J;^4ne68*)n(n8%6ez3r``VT%5k zKvq3mr=u|UL^Fy8k*Kw7P?g4)eBZrtWHwP*bj~+;ojqJ-@~fLAzxPx*ib+l)Fya0F zFf6&1v`osH+5$s*K&{QW=X0PynS>90PmXwA%)k{C`0? z25-1fr#!A?`HT)IsZ1%>6+xc{eWOQ{OL+^F7DCb$EKr4jJYF52tHnr&G8#n}a@!Om z%-7r8v%5!C)G>F5FdYXZR*r4kX_90tx9p*>oD0$3iky>iXiPkWM&Xg(8nE>_Wv2#I zo7Yg{k|*_iI-~drY#J3uRZtPhevH;lY0Azt3LMVY|E9R2_C@NM{XLh(s{)J80=G1S z3Lfpm4oknCO;cT%%_Q@i722~iOokfQ%~gIF&~6SaF8e42g%Z@PJr*=Ec6RfDck% zX#_uo?WLkEM&$gah+IEn_2_qXuiFJLta*oO+KOnJ^^J>$K1KB8dF2Gbsw@Rl&Z|3# z1vQdgKE3t3EV?zwx5c`43-54^R!_~|NJ}DnJ;^B*rfoa)R6OTVOH3cFc-*4B)P4lPQAEzPg&{yyRZ*DbF? z0;7f}A5fHy6&^~eFG{N(eG}-f%lw|))4E1_LiFGld>H22JC|eWC&G1Q*KuGJXie~&{rmq&&q_5ua+^=1<^>sgZ1L5x#QW$?J@31bsh!QKia&S*bs|dt0_$&Xb1+4vzk=0wx$D&bfbGquuCL4}5tIBW zRv(xejW^f%5gf05EH0z6E_e9n64Ex+`R8p8be)Bl53(v^g;7YNj`QCd|xRty0nMMWkkKiVRfL zGM_~*G`7|ByN~kIiWzIyJkQNl%FhUl#Pn%Id3S1}?4mt%g6TQt&s!@$AV?fF5qtyZ zjN?HOP!W@d1ueBTr!d~o(;LLP&Ea>YmCUEn-*9K0YB=~}wDt~f$)#JVUX?c{FlVk; z{+okiR!U5Tt0r-wGpAwCi$M!fWt9L557G&QA{wh!)xp9jt6Oaym`qqA=31+@{u(bA z>O}v1cOjG|b*{ERBd%E;VLZCZ1lKsU0|L)1O|`XA*2YoqN=2kMYvan>D1}B}X03A_ z^#JWQlZSIf3|eS6$z41m?SybB3`unoiE@s&M6E-2TTj=oJzqCtH#pzOH#G9^R^Sbg zY&NWSE-&g#b(;UDn7)4m$2{bk?_h9z;QMU5=H>5mou)J2=lTB*`Sia7tp2x5-zYW1 zN0PH_f}yIEdY!4dNA*^-M_F(44`z9+HZa_?=jlYb241r%C{!x1bq=vAqHyPvIA65hJ^lm;xq{B{O7EVo)p zP})qDvHK9F+tbC82fYQQs{P3*@`>cK5wq@tKxlJds2{-rV zo``%qPlr=t#b=2t)v7CALb4{DMg@> zg*q75(-w8*S|^=0x^BpQRQp3dy?4e@=gH}!h-9PUM+z**tGu{{5DWM_o5=05c?9E3 zHpi(y_I+mpv-U$5$A-4}p8Z|Lo1!n1)z;@3(gSUwSisMYiXBvg7{YW;&!^Q1yGq zq~KQWFSa)%2A}`77t~4lDR$dadIh%YZ(a)f zAB${1M}1%*_crf`*ezNvfK#J}ue94l^p&Zb!2-qH_;I3IT^wKl$EPNU7d`7%E zgMQ}ZX5p5V36aU|)*-9AC zu;!lVy+-x3>t_5a_}v_Ydc3-@+d?c`Mdy;v;C4D$Btz&w1MRVWB0YkKhi;n0uPiQ6 z)*EAcGw<2GcWU0ArpVRxOY674dh~jH{nLkiQ;{#8wdmdc-)YzXFju{aN87~J#^8VGwAz42xAyQ!zrv0$9~ zPm10C7AF(S-mA1Q{eGrvqvnTiM|||BAp2w2LE(bw*OT|^Oed^mqEqR7Q@D$AS%*}_ zQ_4|&VH#*W$GuXAZraV!*uOGj9d+l9HA=7H#`CyJC$!NWyEcIizkg&$TdoEiHJb$! zqsZK&%JkunMP~>d8&|5{r>+!J4Dt7*QHf9)$7Qy}aSp1FD2|H5BFR3w0!zj* z5mVVli`XO)(3YT4jYX!at&49TQ(og#A`$4={KL)kx1aZajBM{LU4FCufqv|4nvX=8 zT$^L7Vc~3gkW-ob^Y&IS7tLTONMT0%Im;U|{UfirDv7Sp%uD^5QJtRXEsHBYZ}!Wy zTJhxbnZCAf27xX}i3}KGZF`R1L1M-bedb!u`tDAu>ackR+%`h{%T$2NTU|%Z)2>-X z*G||^l5N5`ht|G{IAA=PlV-V{il<&HCzmreTc{d8oLYRp76)`{96BL+-awpl)~w=; z%$!8!H7RIbORvu9wd^;2y;|OQ$0eDAH*<-GX|vk}?&A&`4zpqAW9v|6*WMMxToTuR zH`Z=A3K3=PZ_rW<(9!U?dh49TV)q8vDy{7|zLZ%Q_GRHSD(y5k^rUWID3$A~bTIAc z$&6hnBT8Oyq;2RuQMgd9wdul!tsT9m8h(7ZHk&T=*_=stAv#5!8OuNh#$F zRJ+1cH2lQYdGE#YN`K3^i5!z2Sc01kvO-}mp4E#1p6H_7_8DF}7FS0^2)7dyIB9B# zNGCuMWw5kmeO~#z)IFOWR0~8GaRtb-d*kSzC*u?9CDKs~0v>&_X>9=Am=gcoi*QW~={lVzV$P3G{dX?SXZVmg| z=@SRd2J>S$1=Ea8Fn_+wR`jp{F;@W|gt3yuq>6-1RMrs4phu?6DP} z^}51c!nB`COT}ie+RA#jK1KH-o3gIAtd%-eR{G@dFQ0c8pPT63!#2d?I-2FNSJ*VC z5vkL1j%5~CQnz-FEWf?@fRuS9J@7ks+N!OUa)*==)j6tk7};V`NP5P+qlO8{Xg9BF zq+UFgt#LEc2IsGZ)T)KXwEH4t_+ec|A8P7RX4<;~9_5D6afgS9kK9M+a+a08$%yk2 zCG5)tzBdhJpUF&wuCPieMoYb6^I{U*YiO3WLvIT%+-k&>%xuEf0!Tr0ymNcgk^(2t zn&wlhP{NaJ=X;!Ba4uwNI9ATQ3$jNHD(OXQKK1DLv(s!@o?EDgixl&xU%Nt=DCTcj33`DclIb=k%3*ig&TRMwlZl2BgI9B2sN6pP zOn>^yzLhMmabK)>`?J%HoA5g}u2gxpw6p$p@zlQ0xlnt#r1IBY&-Sc54c#T@P}ev$ zvsdjK>^|FJ?RM5a!?Nv9-5wph$hwK$h6i3u*z(Rv?>LW;VQRbWWkkYm1}26EwkSkOTWCq89b^C!!X`4YP7(Mxokoo!NipU>S@lHPip) zNY?aqgdK6r)Wuv`O%PQ2Bn1l3kJk{hnn&p|fvu(oAf0DE?VWRemOx zE# zjp*Ex(!m?2oC4^YMSWXKhRtOL0qCGj0%0yJI5FRCjzfN8+ z!+<6*4o8dt_7rp;^9aG`==k}gv3Q6#h*~ zS4%Y-ohyLvD?MCTi|Ag$Nyo+VvXG*% zA7tApqWgeqgopu+l*{%502)cc`GKASDQQ9gK$xhTJP3FuTgb=Sn4RGb9RH;FhJ1;`MdfHTk^ z@p(5HXovuKPm#b1!CgWYl^pYB(MS`(W$?1J60tp`{5(OBOTCy=&&f#BlYDzLnuE>Y zON=nkZGw|sOi9yss2S3Zmp6bL9IWLbDC&n>UIMe154D~F>n$>L``UCfpp#5NIHsv zoCR2sl{>>8(k8j+aGJybFON|GDEOQ?=82Dt79aD`(j*}-*g*D?FkHN(#}^#xtA;As zsm%-nFpD$k$L0Xg@-E)v3dG`Z=g^17zQ8AZiZqF+auQ~OzDbjQ`V%|rq~_W?6Q@Dtg;l%iR_SltAd{*I#Qa|X_oY-UJQF(d|vC^)@X#02^;6vx>CwcM?7<>v=h zlsP~YfWvDQ?8D>u&)p^gj6Ci9s>0mk!2mM_xP@QcF9sac;0}-^Mk{xq zfT{-d0vC40p@}jO-#b9R1mb(4Bnc29jAc~ilPg_g2t9Zl2pFF7v6|nOg)Kkl@#OR` z(NG%iJS;mA#lgOs0K5U*J`UhDaWVtI&9_5l1ckK#?mmFx&^XaLm2ttzH-Jh(3JTZ( zKqo;e&&A*%d;{$#QMZYTF^x)%hrLe$T=1u6Jn_zS7@7{0<8hO&z?KO>{yD2zG-fhI zDL-FWHN!_O>wwgxULhA=c40<(p)nz}^JvD^3_RdTNxxTBiU2zYh!xvYKPhb8vc-eaUj_@exe0!<0HfqDyOAit+`=J4VUX16|Owz9_Fi-CYjzQr; z-Fa4IIrb7Bh&_*i3lVc9;4=9Q z)PWQl39~>#!UZ=ftEyxMG4FwEQIvdatr$`cmq$XziE`>fo=e<&DlyNL90RW3*Fk~g zU>BjpM}Rd!#JYi>Kt@?>+MP?f@o6vWvvhH_bT}8lh2w!-5-x*-=qKXdEZ$laE4c%xBJEW0YbM88NAa=jLcBB^t1*C`25_+gK!$`E z6JqB`7z6`)AHdDvan0Ofgp zLiyiu&n2jWp~^=^mAO};o<7|U?_wV&bib^=Lgtx%`%Vo`gT?JImSMWB;GIkvcf*VA z{^fkcnwQDar{PEUxoYA~Y^sgdj5pzbNq%6vY`24bl+iB~`_+ATxXn(txSbXCvMj%w7<;4z&KvE!b!^jjN_sA1wT+(-Di;m zKoQdm=_yltNAwyn;y>we|1~P>*Ek-$3Hlkw{~uva zW-GhgyYtg?3f=_W{r1zP{U@H&;4Zt Date: Fri, 31 May 2024 10:46:54 +0200 Subject: [PATCH 151/251] docs: Update README.md --- examples/wikivoyage_eu/pubspec.lock | 14 ----- examples/wikivoyage_eu/pubspec_overrides.yaml | 6 +- packages/langchain/README.md | 61 ++++++++++++------- 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index f132728e..c05174d0 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -189,13 +189,6 @@ packages: relative: true source: path version: "0.2.1+1" - langchain_openai: - dependency: "direct overridden" - description: - path: "../../packages/langchain_openai" - relative: true - source: path - version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -243,13 +236,6 @@ packages: relative: true source: path version: "0.1.0+1" - openai_dart: - dependency: "direct overridden" - description: - path: "../../packages/openai_dart" - relative: true - source: path - version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml index 8b4fec3e..075ddc4f 100644 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,langchain_ollama,ollama_dart +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -8,9 +8,5 @@ dependency_overrides: path: ../../packages/langchain_core langchain_ollama: path: ../../packages/langchain_ollama - langchain_openai: - path: ../../packages/langchain_openai ollama_dart: path: ../../packages/ollama_dart - openai_dart: - path: ../../packages/openai_dart diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 561f7d7d..b86c0eae 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -10,9 +10,9 @@ Build LLM-powered Dart/Flutter applications. ## What is LangChain.dart? -LangChain.dart is a Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). +LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). -LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, extraction, etc.). +LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, translation, extraction, recsys, etc.). The components can be grouped into a few core modules: @@ -22,7 +22,7 @@ The components can be grouped into a few core modules: - 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). - 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. -The different components can be composed together using the LangChain Expression Language (LCEL). +The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). ## Motivation @@ -37,15 +37,32 @@ LangChain.dart aims to fill this gap by abstracting the intricacies of working w ## Packages LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: -- [`langchain_core`](https://pub.dev/packages/langchain_core): contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. - > Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. -- [`langchain`](https://pub.dev/packages/langchain): contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - > Depend on this package to build LLM applications with LangChain.dart. - > This package exposes `langchain_core` so you don't need to depend on it explicitly. -- [`langchain_community`](https://pub.dev/packages/langchain_community): contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. - > Depend on this package if you want to use any of the integrations or components it provides. -- Integration-specific packages (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), etc.): popular third-party integrations are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. - > Depend on an integration-specific package if you want to use the specific integration. + +### [`langchain_core`](https://pub.dev/packages/langchain_core) + +Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + +> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with LangChain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration.

    @@ -81,22 +98,24 @@ Functionality provided by each integration package: The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: -| Package | Version | Description | -|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|----------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | Anthropic (Claude API) client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | ## Getting started -To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_openai` or `langchain_google`): +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): ```yaml dependencies: langchain: {version} + langchain_community: {version} langchain_openai: {version} langchain_google: {version} ... From 22f4c3803ce7e0a35d150a5bed534c9b377f11ea Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 1 Jun 2024 00:16:24 +0200 Subject: [PATCH 152/251] chore(release): publish packages - langchain@0.7.2 - langchain_chroma@0.2.0+5 - langchain_community@0.2.1 - langchain_core@0.3.2 - langchain_firebase@0.1.0+2 - langchain_google@0.5.1 - langchain_mistralai@0.2.1 - langchain_ollama@0.2.2 - langchain_openai@0.6.2 - langchain_pinecone@0.1.0+5 - langchain_supabase@0.1.0+5 - anthropic_sdk_dart@0.0.1 - chromadb@0.2.0+1 - googleai_dart@0.1.0+1 - mistralai_dart@0.0.3+2 - ollama_dart@0.1.1 - openai_dart@0.3.3 - vertex_ai@0.1.0+1 --- CHANGELOG.md | 84 +++++++++++++++---- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.yaml | 14 ++-- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.yaml | 10 +-- .../pubspec.yaml | 2 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- melos.yaml | 2 +- packages/anthropic_sdk_dart/CHANGELOG.md | 6 +- packages/anthropic_sdk_dart/pubspec.yaml | 2 +- packages/chromadb/CHANGELOG.md | 4 + packages/chromadb/pubspec.yaml | 2 +- packages/googleai_dart/CHANGELOG.md | 4 + packages/googleai_dart/pubspec.yaml | 2 +- packages/langchain/CHANGELOG.md | 8 ++ packages/langchain/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 4 + packages/langchain_chroma/pubspec.yaml | 12 +-- packages/langchain_community/CHANGELOG.md | 5 ++ packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 6 ++ packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 + .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 4 + packages/langchain_google/pubspec.yaml | 6 +- packages/langchain_mistralai/CHANGELOG.md | 4 + packages/langchain_mistralai/pubspec.yaml | 6 +- packages/langchain_ollama/CHANGELOG.md | 4 + packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 5 ++ packages/langchain_openai/pubspec.yaml | 10 +-- packages/langchain_pinecone/CHANGELOG.md | 4 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 4 + packages/langchain_supabase/pubspec.yaml | 10 +-- packages/mistralai_dart/CHANGELOG.md | 4 + packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/CHANGELOG.md | 5 ++ packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 6 ++ packages/openai_dart/pubspec.yaml | 2 +- packages/vertex_ai/CHANGELOG.md | 4 + packages/vertex_ai/pubspec.yaml | 2 +- 46 files changed, 220 insertions(+), 87 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 079c8450..e1699211 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,73 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-06-01 + +### Changes + +--- + +New packages: + + - [`anthropic_sdk_dart` - `v0.0.1`](#anthropic_sdk_dart---v001) + +Packages with other changes: + + - [`langchain` - `v0.7.2`](#langchain---v072) + - [`langchain_core` - `v0.3.2`](#langchain_core---v032) + - [`langchain_community` - `v0.2.1`](#langchain_community---v021) + - [`langchain_chroma` - `v0.2.0+5`](#langchain_chroma---v0205) + - [`langchain_firebase` - `v0.1.0+2`](#langchain_firebase---v0102) + - [`langchain_google` - `v0.5.1`](#langchain_google---v051) + - [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) + - [`langchain_ollama` - `v0.2.2`](#langchain_ollama---v022) + - [`langchain_openai` - `v0.6.2`](#langchain_openai---v062) + - [`langchain_pinecone` - `v0.1.0+5`](#langchain_pinecone---v0105) + - [`langchain_supabase` - `v0.1.0+5`](#langchain_supabase---v0105) + - [`chromadb` - `v0.2.0+1`](#chromadb---v0201) + - [`googleai_dart` - `v0.1.0+1`](#googleai_dart---v0101) + - [`mistralai_dart` - `v0.0.3+2`](#mistralai_dart---v0032) + - [`ollama_dart` - `v0.1.1`](#ollama_dart---v011) + - [`openai_dart` - `v0.3.3`](#openai_dart---v033) + - [`vertex_ai` - `v0.1.0+1`](#vertex_ai---v0101) + +--- + +#### `langchain` - `v0.7.2` + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + +#### `langchain_core` - `v0.3.2` + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) + +#### `langchain_community` - `v0.2.1` + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + +#### `langchain_openai` - `v0.6.2` + + - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) + +#### `anthropic_sdk_dart` - `v0.0.1` + + - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) + +#### `ollama_dart` - `v0.1.1` + + - **FEAT**: Support buffered stream responses ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) + +#### `openai_dart` - `v0.3.3` + + - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) + - **FIX**: Make vector store name optional ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + + ## 2024-05-20 ### Changes @@ -2422,20 +2489,3 @@ Packages with changes: - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85)) - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) - -## 2023-07-02 - -### Changes - -#### `langchain` - `v0.0.1` - - - Initial public release. -t/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) - -## 2023-07-02 - -### Changes - -#### `langchain` - `v0.0.1` - - - Initial public release. diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 2ab1aff4..fcb8dfa3 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 + langchain_openai: ^0.6.2 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 716c7270..09e311f0 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,10 +7,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_chroma: ^0.2.0+4 - langchain_community: 0.2.0+1 - langchain_google: ^0.5.0 - langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_chroma: ^0.2.0+5 + langchain_community: 0.2.1 + langchain_google: ^0.5.1 + langchain_mistralai: ^0.2.1 + langchain_ollama: ^0.2.2 + langchain_openai: ^0.6.2 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 4c7f0059..665ba178 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_openai: ^0.6.2 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index d814f7c4..28872dc6 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_openai: ^0.6.2 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 6d125283..4f9f4c56 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 - langchain: ^0.7.1 - langchain_google: ^0.5.0 - langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_google: ^0.5.1 + langchain_mistralai: ^0.2.1 + langchain_ollama: ^0.2.2 + langchain_openai: ^0.6.2 flutter: uses-material-design: true diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 34b972bf..9de8254f 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -10,4 +10,4 @@ dependencies: gcloud: ^0.8.12 googleapis_auth: ^1.5.1 http: ^1.1.0 - vertex_ai: ^0.1.0 + vertex_ai: ^0.1.0+1 diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 7b4ce9a2..1c81fb76 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_ollama: ^0.2.1+1 - langchain_community: 0.2.0+1 + langchain: ^0.7.2 + langchain_ollama: ^0.2.2 + langchain_community: 0.2.1 diff --git a/melos.yaml b/melos.yaml index b39bb2a1..4912fe7c 100644 --- a/melos.yaml +++ b/melos.yaml @@ -14,7 +14,7 @@ command: branch: main changelogs: - path: CHANGELOG.md - description: Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. + description: "Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release." packageFilters: no-private: true bootstrap: diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index 90f8e244..de958be3 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.0.1 + + - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) + ## 0.0.1-dev.1 -- Bootstrap package. + - Bootstrap package. diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 5beab57e..164ba95f 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: anthropic_sdk_dart description: Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). -version: 0.0.1-dev.1 +version: 0.0.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 899efe6f..7f7724ef 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.0+1 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.2.0 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index 40252b6b..f11b20ea 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -1,6 +1,6 @@ name: chromadb description: Dart Client for the Chroma open-source embedding database API. -version: 0.2.0 +version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/chromadb issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 8277d0d5..7a6ca6b8 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.1.0 - **REFACTOR**: Minor changes ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e)) diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 2ed4d004..2006a059 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: googleai_dart description: Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 9d255b21..cc6953da 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,3 +1,11 @@ +## 0.7.2 + +> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + ## 0.7.1 > Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 1483d1f5..a92d1e9c 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.1 +version: 0.7.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 266080ac..8e785534 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.0+5 + + - Update a dependency to the latest release. + ## 0.2.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 84e24303..4ce07684 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.0+4 +version: 0.2.0+5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -17,14 +17,14 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - chromadb: ^0.2.0 + chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 + langchain_openai: ^0.6.2 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 7f48bd87..63111604 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.1 + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index caa994db..c7083a98 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.0+1 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -21,7 +21,7 @@ dependencies: csv: ^6.0.0 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -29,7 +29,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.9 - langchain_openai: ^0.6.1+1 + langchain_openai: ^0.6.2 objectbox_generator: ^4.0.0 test: ^1.25.2 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 25cf9ffd..dd637cd5 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,9 @@ +## 0.3.2 + + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.3.1 - **FEAT**: Add equals to ChatToolChoiceForced ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index d6f04b41..b682b76a 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index a7350a9b..60c41358 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+2 + + - Update a dependency to the latest release. + ## 0.1.0+1 - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index a4857f0d..8c912278 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.6.22 - langchain: 0.7.1 - langchain_firebase: 0.1.0+1 + langchain: 0.7.2 + langchain_firebase: 0.1.0+2 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 921c1336..3791c07c 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0+1 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: firebase_auth: ^5.1.0 cloud_firestore: ^4.17.0 firebase_vertexai: ^0.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index b61c71d8..c2d95eed 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.5.1 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.5.0 > Note: `ChatGoogleGenerativeAI` and `GoogleGenerativeAIEmbeddings` now use the version `v1beta` of the Gemini API (instead of `v1`) which support the latest models (`gemini-1.5-pro-latest` and `gemini-1.5-flash-latest`). diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 67a75cff..0f07b091 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.5.0 +version: 0.5.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,10 +24,10 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 - vertex_ai: ^0.1.0 + vertex_ai: ^0.1.0+1 langchain_firebase: ^0.1.0 firebase_core: ^2.31.0 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index c87fd2db..d5d9ca46 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 964397d3..2eda0275 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.0+1 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - mistralai_dart: ^0.0.3+1 + mistralai_dart: ^0.0.3+2 dev_dependencies: test: ^1.25.2 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 81bb56d2..a72f229e 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.2 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.2.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index aea5e9ee..ae7adb8d 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.1+1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.0+1 + ollama_dart: ^0.1.1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index ae115e6d..4daab488 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.6.2 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) + ## 0.6.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index efab060a..5d31a856 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.1+1 +version: 0.6.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.2+1 + openai_dart: ^0.3.3 uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.1 - langchain_community: 0.2.0+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 test: ^1.25.2 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 276d2616..6e3c39e3 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+5 + + - Update a dependency to the latest release. + ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 479b441e..b943bde0 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+4 +version: 0.1.0+5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.1+1 + langchain_openai: ^0.6.2 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index d98b5fe3..00a141c5 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+5 + + - Update a dependency to the latest release. + ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 91340307..d6e0e622 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.0+4 +version: 0.1.0+5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 supabase: ^2.0.8 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 + langchain_openai: ^0.6.2 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index fcb706a7..d1426493 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.0.3+2 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.0.3+1 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index 27b81ed4..a2aad311 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: mistralai_dart description: Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.0.3+1 +version: 0.0.3+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index f7c943f9..21ceb1cf 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.1.1 + + - **FEAT**: Support buffered stream responses in ollama_dart ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.1.0+1 - **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index ab538c0d..81f9fd49 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.0+1 +version: 0.1.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 6e366631..0a0e4085 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,9 @@ +## 0.3.3 + + - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) + - **FIX**: Make vector store name optional in openai_dart ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.3.2+1 - **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index ee8442e2..f617c8f0 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.3.2+1 +version: 0.3.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index f081d3a9..18902a6a 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - Update a dependency to the latest release. + ## 0.1.0 - **REFACTOR**: Minor changes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96)) diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 703fb145..3454b32d 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,6 +1,6 @@ name: vertex_ai description: GCP Vertex AI ML platform API client (PaLM, Matching Engine, etc.). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart From 4c4473065616777e606bbc171af4811009a66d21 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 1 Jun 2024 00:58:21 +0200 Subject: [PATCH 153/251] fix: Add missing dependency in langchain_community package (#448) --- melos.yaml | 1 + packages/langchain_community/pubspec.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/melos.yaml b/melos.yaml index 4912fe7c..8390e622 100644 --- a/melos.yaml +++ b/melos.yaml @@ -35,6 +35,7 @@ command: firebase_app_check: ^0.2.2+5 firebase_core: ^2.31.0 firebase_vertexai: ^0.1.0 + flat_buffers: ^23.5.26 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 freezed_annotation: ^2.4.1 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index c7083a98..17a07668 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -19,6 +19,7 @@ dependencies: beautiful_soup_dart: ^0.3.0 cross_file: ^0.3.4+1 csv: ^6.0.0 + flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 langchain_core: ^0.3.2 From fae833bfe10122c54d2775c69593d25cc11d706d Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 1 Jun 2024 00:58:56 +0200 Subject: [PATCH 154/251] chore(release): publish packages - langchain_community@0.2.1+1 - langchain_firebase@0.1.0+3 --- examples/browser_summarizer/pubspec.lock | 10 +++---- examples/browser_summarizer/pubspec.yaml | 2 +- examples/docs_examples/pubspec.lock | 26 +++++++++---------- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_backend/pubspec.lock | 8 +++--- examples/hello_world_cli/pubspec.lock | 8 +++--- examples/hello_world_flutter/pubspec.lock | 20 +++++++------- .../pubspec.lock | 2 +- examples/wikivoyage_eu/pubspec.lock | 10 +++---- examples/wikivoyage_eu/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_community/CHANGELOG.md | 4 +++ packages/langchain_community/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 +++ .../langchain_firebase/example/pubspec.lock | 6 ++--- .../langchain_firebase/example/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- 20 files changed, 63 insertions(+), 55 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index ca9c5503..17d43f80 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.2.1+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index fcb8dfa3..0e729f8d 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -14,7 +14,7 @@ dependencies: flutter_markdown: ^0.6.22 js: ^0.7.1 langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_openai: ^0.6.2 shared_preferences: ^2.2.2 diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index bc3d8b13..6199523b 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -47,7 +47,7 @@ packages: path: "../../packages/chromadb" relative: true source: path - version: "0.2.0" + version: "0.2.0+1" collection: dependency: transitive description: @@ -238,56 +238,56 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.0+4" + version: "0.2.0+5" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.2.1+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.0" + version: "0.5.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.0+1" + version: "0.2.1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -326,7 +326,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+1" + version: "0.0.3+2" objectbox: dependency: transitive description: @@ -341,14 +341,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0+1" + version: "0.1.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: @@ -451,7 +451,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" web: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 09e311f0..3044b6d2 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -9,7 +9,7 @@ environment: dependencies: langchain: ^0.7.2 langchain_chroma: ^0.2.0+5 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_google: ^0.5.1 langchain_mistralai: ^0.2.1 langchain_ollama: ^0.2.2 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 9c8a5ba4..dc3ac458 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index df156ea2..8fc27717 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index a402383b..d9c9c29f 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -204,42 +204,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.0" + version: "0.5.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.0+1" + version: "0.2.1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -278,7 +278,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+1" + version: "0.0.3+2" nested: dependency: transitive description: @@ -293,14 +293,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0+1" + version: "0.1.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: @@ -400,7 +400,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" web: dependency: transitive description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index a29715a0..99209b09 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -151,7 +151,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" web: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index c05174d0..49dc9df4 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.2.1+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0+1" + version: "0.1.1" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 1c81fb76..198686c0 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -9,4 +9,4 @@ environment: dependencies: langchain: ^0.7.2 langchain_ollama: ^0.2.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 4ce07684..3c96bacb 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -26,5 +26,5 @@ dependencies: dev_dependencies: test: ^1.25.2 langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_openai: ^0.6.2 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 63111604..5c3aaba2 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1+1 + + - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330)) + ## 0.2.1 - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 17a07668..e2286be4 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 60c41358..d5128425 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+3 + + - Update a dependency to the latest release. + ## 0.1.0+2 - Update a dependency to the latest release. diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 3b051b3a..87d91077 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -238,21 +238,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.1.0+1" + version: "0.1.0+3" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 8c912278..5b635cc5 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -14,7 +14,7 @@ dependencies: sdk: flutter flutter_markdown: ^0.6.22 langchain: 0.7.2 - langchain_firebase: 0.1.0+2 + langchain_firebase: 0.1.0+3 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 89e38672..eee61f63 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -206,7 +206,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 3791c07c..4fd8030c 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0+2 +version: 0.1.0+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 5d31a856..c0ccb98d 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -27,5 +27,5 @@ dependencies: dev_dependencies: langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 test: ^1.25.2 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index d6e0e622..ffb0656d 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -25,5 +25,5 @@ dependencies: dev_dependencies: test: ^1.25.2 langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_openai: ^0.6.2 From b3d8b1a93849fb9937e73292a8ad35ab1ef73037 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 1 Jun 2024 11:02:11 +0200 Subject: [PATCH 155/251] docs: Update ObjectBox docs --- .../vector_stores/integrations/objectbox.md | 21 ++++++++++++------- .../vector_stores/integrations/objectbox.dart | 4 ++-- examples/wikivoyage_eu/README.md | 2 +- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index af4bb6c6..9c165306 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -13,6 +13,7 @@ ObjectBox features: - Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB) - Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers +Official ObjectBox resources: - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) - [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0) - [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter) @@ -42,21 +43,21 @@ dependencies: ### 3. Instantiate the ObjectBox vector store ```dart -final embeddings = OllamaEmbeddings(model: 'mxbai-embed-large:335m'); +final embeddings = OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'); final vectorStore = ObjectBoxVectorStore( embeddings: embeddings, - dimensions: 1024, + dimensions: 512, ); ``` -The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the `mxbai-embed-large:335m` model, which has 1024 dimensions. +The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) model, which has 512 dimensions. The `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored: ```dart final vectorStore = ObjectBoxVectorStore( embeddings: embeddings, - dimensions: 1024, + dimensions: 512, directory: 'path/to/db', ); ``` @@ -129,7 +130,7 @@ This example demonstrates how to build a fully local RAG (Retrieval-Augmented Ge Before running the example, make sure you have the following: - Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions). -- [mxbai-embed-large:335m](https://ollama.com/library/mxbai-embed-large:335m) and [`llama3:8b`](https://ollama.com/library/llama3:8b) models downloaded. +- [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) and [llama3:8b](https://ollama.com/library/llama3:8b) models downloaded. #### Steps @@ -137,7 +138,7 @@ Before running the example, make sure you have the following: 1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader. 2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`. -3. Create embeddings from the document chunks using the `mxbai-embed-large:335m` embeddings model via `OllamaEmbeddings`. +3. Create embeddings from the document chunks using the `jina/jina-embeddings-v2-small-en` embeddings model via `OllamaEmbeddings`. 4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`. > Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries. @@ -151,8 +152,8 @@ Before running the example, make sure you have the following: ```dart // 1. Instantiate vector store final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), - dimensions: 1024, + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + dimensions: 512, ); // 2. Load documents @@ -241,6 +242,10 @@ await stream.forEach(stdout.write); // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ ``` +## Example: Wikivoyage EU + +Check out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_dart/tree/main/examples/wikivoyage_eu), to see how to build a fully local chatbot that uses RAG to plan vacation plans in Europe. + ## Advance ### BaseObjectBoxVectorStore diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart index 4a8950b7..cd558d1b 100644 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -11,8 +11,8 @@ void main() async { Future _rag() async { // 1. Instantiate vector store final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), - dimensions: 1024, + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + dimensions: 512, directory: 'bin/modules/retrieval/vector_stores/integrations', ); diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md index ca686dcb..07bc5073 100644 --- a/examples/wikivoyage_eu/README.md +++ b/examples/wikivoyage_eu/README.md @@ -70,7 +70,7 @@ Added 160 documents to the vector store. The chatbot script implements the RAG pipeline. It does the following: 1. Takes a user query as input. -2. Uses the `mxbai-embed-large:335m` model to create an embedding for the query. +2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. 3. Retrieves the 5 most similar documents from the ObjectBox database. 4. Builds a prompt using the retrieved documents and the query. 5. Uses the `llama3:8b` model to generate a response to the prompt. From 084e9b63b9587afcf897b35e919eb5b9fd1f856a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 10 Jun 2024 22:24:57 +0200 Subject: [PATCH 156/251] refactor: Migrate conditional imports to js_interop (#453) --- .../anthropic_sdk_dart/lib/src/http_client/http_client.dart | 3 +-- packages/googleai_dart/lib/src/http_client/http_client.dart | 3 +-- .../lib/src/utils/https_client/http_client.dart | 3 +-- packages/mistralai_dart/lib/src/http_client/http_client.dart | 3 +-- packages/ollama_dart/lib/src/http_client/http_client.dart | 3 +-- packages/openai_dart/lib/src/http_client/http_client.dart | 3 +-- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/googleai_dart/lib/src/http_client/http_client.dart b/packages/googleai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/googleai_dart/lib/src/http_client/http_client.dart +++ b/packages/googleai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/langchain_google/lib/src/utils/https_client/http_client.dart b/packages/langchain_google/lib/src/utils/https_client/http_client.dart index 479d2164..6b9ed76c 100644 --- a/packages/langchain_google/lib/src/utils/https_client/http_client.dart +++ b/packages/langchain_google/lib/src/utils/https_client/http_client.dart @@ -2,8 +2,7 @@ import 'package:http/http.dart' as http; export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; /// {@template custom_http_client} /// Custom HTTP client that wraps the base HTTP client and allows to override diff --git a/packages/mistralai_dart/lib/src/http_client/http_client.dart b/packages/mistralai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/mistralai_dart/lib/src/http_client/http_client.dart +++ b/packages/mistralai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/ollama_dart/lib/src/http_client/http_client.dart b/packages/ollama_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/ollama_dart/lib/src/http_client/http_client.dart +++ b/packages/ollama_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/openai_dart/lib/src/http_client/http_client.dart b/packages/openai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/openai_dart/lib/src/http_client/http_client.dart +++ b/packages/openai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; From a6b58f125d69173bc26a75ab85eff1ced1682fe8 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 13 Jun 2024 08:55:56 +0200 Subject: [PATCH 157/251] feat: Bootstrap `langgraph` package (#454) --- packages/langgraph/.gitignore | 7 +++++++ packages/langgraph/CHANGELOG.md | 3 +++ packages/langgraph/LICENSE | 21 +++++++++++++++++++ packages/langgraph/README.md | 17 +++++++++++++++ packages/langgraph/analysis_options.yaml | 1 + .../langgraph/example/langgraph_example.dart | 3 +++ packages/langgraph/lib/langgraph.dart | 2 ++ packages/langgraph/pubspec.yaml | 16 ++++++++++++++ 8 files changed, 70 insertions(+) create mode 100644 packages/langgraph/.gitignore create mode 100644 packages/langgraph/CHANGELOG.md create mode 100644 packages/langgraph/LICENSE create mode 100644 packages/langgraph/README.md create mode 100644 packages/langgraph/analysis_options.yaml create mode 100644 packages/langgraph/example/langgraph_example.dart create mode 100644 packages/langgraph/lib/langgraph.dart create mode 100644 packages/langgraph/pubspec.yaml diff --git a/packages/langgraph/.gitignore b/packages/langgraph/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/langgraph/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/langgraph/CHANGELOG.md b/packages/langgraph/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/langgraph/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/langgraph/LICENSE b/packages/langgraph/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/langgraph/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/langgraph/README.md b/packages/langgraph/README.md new file mode 100644 index 00000000..70fc2aae --- /dev/null +++ b/packages/langgraph/README.md @@ -0,0 +1,17 @@ +# 🦜🕸️LangGraph + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![langgraph](https://img.shields.io/pub/v/langgraph.svg)](https://pub.dev/packages/langgraph) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +⚡ Building language agents as graphs ⚡ + +## Overview + +TODO + +## License + +LangChain.dart is licensed under the +[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/langgraph/analysis_options.yaml b/packages/langgraph/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/langgraph/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/langgraph/example/langgraph_example.dart b/packages/langgraph/example/langgraph_example.dart new file mode 100644 index 00000000..21f3e9f2 --- /dev/null +++ b/packages/langgraph/example/langgraph_example.dart @@ -0,0 +1,3 @@ +void main() { + // TODO +} diff --git a/packages/langgraph/lib/langgraph.dart b/packages/langgraph/lib/langgraph.dart new file mode 100644 index 00000000..790b457d --- /dev/null +++ b/packages/langgraph/lib/langgraph.dart @@ -0,0 +1,2 @@ +/// Build resilient language agents as graphs. +library; diff --git a/packages/langgraph/pubspec.yaml b/packages/langgraph/pubspec.yaml new file mode 100644 index 00000000..2b4ebaaf --- /dev/null +++ b/packages/langgraph/pubspec.yaml @@ -0,0 +1,16 @@ +name: langgraph +description: Build resilient language agents as graphs. +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langgraph +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langgraph +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - nlp + - llms + - langchain + +environment: + sdk: ">=3.0.0 <4.0.0" From 6149566d0bccd6e63ebe0e3c0ac0037e663f39c5 Mon Sep 17 00:00:00 2001 From: Konstantin S Date: Thu, 13 Jun 2024 11:58:01 +0400 Subject: [PATCH 158/251] feat: Add support for listing running Ollama models (#451) Co-authored-by: David Miguel --- packages/ollama_dart/README.md | 10 + .../ollama_dart/lib/src/generated/client.dart | 23 +- .../src/generated/schema/process_model.dart | 69 +++ .../generated/schema/process_response.dart | 40 ++ .../lib/src/generated/schema/schema.dart | 2 + .../src/generated/schema/schema.freezed.dart | 482 ++++++++++++++++++ .../lib/src/generated/schema/schema.g.dart | 52 ++ packages/ollama_dart/oas/ollama-curated.yaml | 56 +- .../test/ollama_dart_models_test.dart | 13 + 9 files changed, 742 insertions(+), 5 deletions(-) create mode 100644 packages/ollama_dart/lib/src/generated/schema/process_model.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/process_response.dart diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 27895b5b..5b750447 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -31,6 +31,7 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. * [Models](#models) + [Create model](#create-model) + [List models](#list-models) + + [List running models](#list-running-models) + [Show Model Information](#show-model-information) + [Pull a Model](#pull-a-model) + [Push a Model](#push-a-model) @@ -192,6 +193,15 @@ final res = await client.listModels(); print(res.models); ``` +#### List running models + +Lists models currently loaded and their memory footprint. + +```dart +final res = await client.listRunningModels(); +print(res.models); +``` + #### Show Model Information Show details about a model including modelfile, template, parameters, license, and system prompt. diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index a22d8729..6c00d36f 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -477,6 +477,25 @@ class OllamaClient { return ModelsResponse.fromJson(_jsonDecode(r)); } + // ------------------------------------------ + // METHOD: listRunningModels + // ------------------------------------------ + + /// List models that are running. + /// + /// `GET` `http://localhost:11434/api/ps` + Future listRunningModels() async { + final r = await makeRequest( + baseUrl: 'http://localhost:11434/api', + path: '/ps', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return ProcessResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: showModelInfo // ------------------------------------------ @@ -567,7 +586,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/json', + responseType: 'application/x-ndjson', body: request, ); return PullModelResponse.fromJson(_jsonDecode(r)); @@ -593,7 +612,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/json', + responseType: 'application/x-ndjson', body: request, ); return PushModelResponse.fromJson(_jsonDecode(r)); diff --git a/packages/ollama_dart/lib/src/generated/schema/process_model.dart b/packages/ollama_dart/lib/src/generated/schema/process_model.dart new file mode 100644 index 00000000..dad453f0 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/process_model.dart @@ -0,0 +1,69 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ProcessModel +// ========================================== + +/// A model that is currently loaded. +@freezed +class ProcessModel with _$ProcessModel { + const ProcessModel._(); + + /// Factory constructor for ProcessModel + const factory ProcessModel({ + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) String? model, + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) int? size, + + /// The model's digest. + @JsonKey(includeIfNull: false) String? digest, + + /// Details about a model. + @JsonKey(includeIfNull: false) ModelDetails? details, + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram, + }) = _ProcessModel; + + /// Object construction from a JSON representation + factory ProcessModel.fromJson(Map json) => + _$ProcessModelFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'size', + 'digest', + 'details', + 'expires_at', + 'size_vram' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'size': size, + 'digest': digest, + 'details': details, + 'expires_at': expiresAt, + 'size_vram': sizeVram, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/process_response.dart b/packages/ollama_dart/lib/src/generated/schema/process_response.dart new file mode 100644 index 00000000..6261a813 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/process_response.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ProcessResponse +// ========================================== + +/// Response class for the list running models endpoint. +@freezed +class ProcessResponse with _$ProcessResponse { + const ProcessResponse._(); + + /// Factory constructor for ProcessResponse + const factory ProcessResponse({ + /// List of running models. + @JsonKey(includeIfNull: false) List? models, + }) = _ProcessResponse; + + /// Object construction from a JSON representation + factory ProcessResponse.fromJson(Map json) => + _$ProcessResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['models']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'models': models, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index 5c8eb964..ed6b2733 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -26,6 +26,8 @@ part 'create_model_status.dart'; part 'models_response.dart'; part 'model.dart'; part 'model_details.dart'; +part 'process_response.dart'; +part 'process_model.dart'; part 'model_info_request.dart'; part 'model_info.dart'; part 'copy_model_request.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index ab02ac2b..88e82b13 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -4857,6 +4857,488 @@ abstract class _ModelDetails extends ModelDetails { throw _privateConstructorUsedError; } +ProcessResponse _$ProcessResponseFromJson(Map json) { + return _ProcessResponse.fromJson(json); +} + +/// @nodoc +mixin _$ProcessResponse { + /// List of running models. + @JsonKey(includeIfNull: false) + List? get models => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ProcessResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ProcessResponseCopyWith<$Res> { + factory $ProcessResponseCopyWith( + ProcessResponse value, $Res Function(ProcessResponse) then) = + _$ProcessResponseCopyWithImpl<$Res, ProcessResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} + +/// @nodoc +class _$ProcessResponseCopyWithImpl<$Res, $Val extends ProcessResponse> + implements $ProcessResponseCopyWith<$Res> { + _$ProcessResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_value.copyWith( + models: freezed == models + ? _value.models + : models // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ProcessResponseImplCopyWith<$Res> + implements $ProcessResponseCopyWith<$Res> { + factory _$$ProcessResponseImplCopyWith(_$ProcessResponseImpl value, + $Res Function(_$ProcessResponseImpl) then) = + __$$ProcessResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} + +/// @nodoc +class __$$ProcessResponseImplCopyWithImpl<$Res> + extends _$ProcessResponseCopyWithImpl<$Res, _$ProcessResponseImpl> + implements _$$ProcessResponseImplCopyWith<$Res> { + __$$ProcessResponseImplCopyWithImpl( + _$ProcessResponseImpl _value, $Res Function(_$ProcessResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_$ProcessResponseImpl( + models: freezed == models + ? _value._models + : models // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ProcessResponseImpl extends _ProcessResponse { + const _$ProcessResponseImpl( + {@JsonKey(includeIfNull: false) final List? models}) + : _models = models, + super._(); + + factory _$ProcessResponseImpl.fromJson(Map json) => + _$$ProcessResponseImplFromJson(json); + + /// List of running models. + final List? _models; + + /// List of running models. + @override + @JsonKey(includeIfNull: false) + List? get models { + final value = _models; + if (value == null) return null; + if (_models is EqualUnmodifiableListView) return _models; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'ProcessResponse(models: $models)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ProcessResponseImpl && + const DeepCollectionEquality().equals(other._models, _models)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => + __$$ProcessResponseImplCopyWithImpl<_$ProcessResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ProcessResponseImplToJson( + this, + ); + } +} + +abstract class _ProcessResponse extends ProcessResponse { + const factory _ProcessResponse( + {@JsonKey(includeIfNull: false) final List? models}) = + _$ProcessResponseImpl; + const _ProcessResponse._() : super._(); + + factory _ProcessResponse.fromJson(Map json) = + _$ProcessResponseImpl.fromJson; + + @override + + /// List of running models. + @JsonKey(includeIfNull: false) + List? get models; + @override + @JsonKey(ignore: true) + _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ProcessModel _$ProcessModelFromJson(Map json) { + return _ProcessModel.fromJson(json); +} + +/// @nodoc +mixin _$ProcessModel { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size => throw _privateConstructorUsedError; + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest => throw _privateConstructorUsedError; + + /// Details about a model. + @JsonKey(includeIfNull: false) + ModelDetails? get details => throw _privateConstructorUsedError; + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) + String? get expiresAt => throw _privateConstructorUsedError; + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) + int? get sizeVram => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ProcessModelCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ProcessModelCopyWith<$Res> { + factory $ProcessModelCopyWith( + ProcessModel value, $Res Function(ProcessModel) then) = + _$ProcessModelCopyWithImpl<$Res, ProcessModel>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); + + $ModelDetailsCopyWith<$Res>? get details; +} + +/// @nodoc +class _$ProcessModelCopyWithImpl<$Res, $Val extends ProcessModel> + implements $ProcessModelCopyWith<$Res> { + _$ProcessModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + Object? expiresAt = freezed, + Object? sizeVram = freezed, + }) { + return _then(_value.copyWith( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as String?, + sizeVram: freezed == sizeVram + ? _value.sizeVram + : sizeVram // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModelDetailsCopyWith<$Res>? get details { + if (_value.details == null) { + return null; + } + + return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { + return _then(_value.copyWith(details: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ProcessModelImplCopyWith<$Res> + implements $ProcessModelCopyWith<$Res> { + factory _$$ProcessModelImplCopyWith( + _$ProcessModelImpl value, $Res Function(_$ProcessModelImpl) then) = + __$$ProcessModelImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); + + @override + $ModelDetailsCopyWith<$Res>? get details; +} + +/// @nodoc +class __$$ProcessModelImplCopyWithImpl<$Res> + extends _$ProcessModelCopyWithImpl<$Res, _$ProcessModelImpl> + implements _$$ProcessModelImplCopyWith<$Res> { + __$$ProcessModelImplCopyWithImpl( + _$ProcessModelImpl _value, $Res Function(_$ProcessModelImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + Object? expiresAt = freezed, + Object? sizeVram = freezed, + }) { + return _then(_$ProcessModelImpl( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as String?, + sizeVram: freezed == sizeVram + ? _value.sizeVram + : sizeVram // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ProcessModelImpl extends _ProcessModel { + const _$ProcessModelImpl( + {@JsonKey(includeIfNull: false) this.model, + @JsonKey(includeIfNull: false) this.size, + @JsonKey(includeIfNull: false) this.digest, + @JsonKey(includeIfNull: false) this.details, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) this.sizeVram}) + : super._(); + + factory _$ProcessModelImpl.fromJson(Map json) => + _$$ProcessModelImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// Size of the model on disk. + @override + @JsonKey(includeIfNull: false) + final int? size; + + /// The model's digest. + @override + @JsonKey(includeIfNull: false) + final String? digest; + + /// Details about a model. + @override + @JsonKey(includeIfNull: false) + final ModelDetails? details; + + /// No Description + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + final String? expiresAt; + + /// Size of the model on disk. + @override + @JsonKey(name: 'size_vram', includeIfNull: false) + final int? sizeVram; + + @override + String toString() { + return 'ProcessModel(model: $model, size: $size, digest: $digest, details: $details, expiresAt: $expiresAt, sizeVram: $sizeVram)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ProcessModelImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.size, size) || other.size == size) && + (identical(other.digest, digest) || other.digest == digest) && + (identical(other.details, details) || other.details == details) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.sizeVram, sizeVram) || + other.sizeVram == sizeVram)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, model, size, digest, details, expiresAt, sizeVram); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => + __$$ProcessModelImplCopyWithImpl<_$ProcessModelImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ProcessModelImplToJson( + this, + ); + } +} + +abstract class _ProcessModel extends ProcessModel { + const factory _ProcessModel( + {@JsonKey(includeIfNull: false) final String? model, + @JsonKey(includeIfNull: false) final int? size, + @JsonKey(includeIfNull: false) final String? digest, + @JsonKey(includeIfNull: false) final ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) + final String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) + final int? sizeVram}) = _$ProcessModelImpl; + const _ProcessModel._() : super._(); + + factory _ProcessModel.fromJson(Map json) = + _$ProcessModelImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size; + @override + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest; + @override + + /// Details about a model. + @JsonKey(includeIfNull: false) + ModelDetails? get details; + @override + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) + String? get expiresAt; + @override + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) + int? get sizeVram; + @override + @JsonKey(ignore: true) + _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => + throw _privateConstructorUsedError; +} + ModelInfoRequest _$ModelInfoRequestFromJson(Map json) { return _ModelInfoRequest.fromJson(json); } diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index f5548646..3443737b 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -476,6 +476,58 @@ Map _$$ModelDetailsImplToJson(_$ModelDetailsImpl instance) { return val; } +_$ProcessResponseImpl _$$ProcessResponseImplFromJson( + Map json) => + _$ProcessResponseImpl( + models: (json['models'] as List?) + ?.map((e) => ProcessModel.fromJson(e as Map)) + .toList(), + ); + +Map _$$ProcessResponseImplToJson( + _$ProcessResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('models', instance.models?.map((e) => e.toJson()).toList()); + return val; +} + +_$ProcessModelImpl _$$ProcessModelImplFromJson(Map json) => + _$ProcessModelImpl( + model: json['model'] as String?, + size: json['size'] as int?, + digest: json['digest'] as String?, + details: json['details'] == null + ? null + : ModelDetails.fromJson(json['details'] as Map), + expiresAt: json['expires_at'] as String?, + sizeVram: json['size_vram'] as int?, + ); + +Map _$$ProcessModelImplToJson(_$ProcessModelImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('model', instance.model); + writeNotNull('size', instance.size); + writeNotNull('digest', instance.digest); + writeNotNull('details', instance.details?.toJson()); + writeNotNull('expires_at', instance.expiresAt); + writeNotNull('size_vram', instance.sizeVram); + return val; +} + _$ModelInfoRequestImpl _$$ModelInfoRequestImplFromJson( Map json) => _$ModelInfoRequestImpl( diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 876bab50..7ade34a7 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -109,6 +109,19 @@ paths: application/json: schema: $ref: '#/components/schemas/ModelsResponse' + /ps: + get: + operationId: listRunningModels + tags: + - Models + summary: List models that are running. + responses: + '200': + description: Successful operation. + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessResponse' /show: post: operationId: showModelInfo @@ -171,7 +184,7 @@ paths: '200': description: Successful operation. content: - application/json: + application/x-ndjson: schema: $ref: '#/components/schemas/PullModelResponse' /push: @@ -190,7 +203,7 @@ paths: '200': description: Successful operation. content: - application/json: + application/x-ndjson: schema: $ref: '#/components/schemas/PushModelResponse' /blobs/{digest}: @@ -760,6 +773,43 @@ components: quantization_level: type: string description: The quantization level of the model. + ProcessResponse: + type: object + description: Response class for the list running models endpoint. + properties: + models: + type: array + description: List of running models. + items: + $ref: '#/components/schemas/ProcessModel' + ProcessModel: + type: object + description: A model that is currently loaded. + properties: + model: + type: string + description: *model_name + example: llama3:8b + size: + type: integer + format: int64 + description: Size of the model on disk. + example: 7323310500 + digest: + type: string + description: The model's digest. + example: 'sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711a' + details: + $ref: '#/components/schemas/ModelDetails' + expires_at: + type: string + format: date-time + example: 2023-08-02T17:02:23.713454393-07:00 + size_vram: + type: integer + format: int64 + description: Size of the model on disk. + example: 7323310500 ModelInfoRequest: description: Request class for the show model info endpoint. type: object @@ -805,7 +855,7 @@ components: nullable: true description: The default messages for the model. items: - $ref: '#/components/schemas/Message' + $ref: '#/components/schemas/Message' CopyModelRequest: description: Request class for copying a model. type: object diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index abb3cef3..e511bff4 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -65,6 +65,19 @@ void main() { expect(res.models?.any((final m) => m.model == defaultModel), isTrue); }); + test('Test list running models', () async { + await client.generateCompletion( + request: const GenerateCompletionRequest( + model: defaultModel, + prompt: 'You are a llama', + options: RequestOptions(numPredict: 1), + ), + ); + + final res = await client.listRunningModels(); + expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + }); + test('Test show model info', () async { final res = await client.showModelInfo( request: const ModelInfoRequest(model: defaultModel), From 8c902376a888dc064383ecbcda8cc5d4c3d4d903 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 14 Jun 2024 22:35:05 +0200 Subject: [PATCH 159/251] feat: Bootstrap `tavily_dart` package (#455) --- packages/tavily_dart/.gitignore | 7 +++++++ packages/tavily_dart/CHANGELOG.md | 3 +++ packages/tavily_dart/LICENSE | 21 +++++++++++++++++++ packages/tavily_dart/README.md | 16 ++++++++++++++ packages/tavily_dart/analysis_options.yaml | 1 + .../example/tavily_dart_example.dart | 3 +++ packages/tavily_dart/lib/tavily_dart.dart | 2 ++ packages/tavily_dart/pubspec.yaml | 16 ++++++++++++++ 8 files changed, 69 insertions(+) create mode 100644 packages/tavily_dart/.gitignore create mode 100644 packages/tavily_dart/CHANGELOG.md create mode 100644 packages/tavily_dart/LICENSE create mode 100644 packages/tavily_dart/README.md create mode 100644 packages/tavily_dart/analysis_options.yaml create mode 100644 packages/tavily_dart/example/tavily_dart_example.dart create mode 100644 packages/tavily_dart/lib/tavily_dart.dart create mode 100644 packages/tavily_dart/pubspec.yaml diff --git a/packages/tavily_dart/.gitignore b/packages/tavily_dart/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/tavily_dart/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/tavily_dart/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/tavily_dart/LICENSE b/packages/tavily_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/tavily_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/tavily_dart/README.md b/packages/tavily_dart/README.md new file mode 100644 index 00000000..bf452982 --- /dev/null +++ b/packages/tavily_dart/README.md @@ -0,0 +1,16 @@ +# Tavily Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Dart Client for the [Tavily](https://tavily.com) API (a search engine optimized for LLMs and RAG). + +## Features + +TODO + +## License + +Ollama Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/tavily_dart/analysis_options.yaml b/packages/tavily_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/tavily_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/tavily_dart/example/tavily_dart_example.dart b/packages/tavily_dart/example/tavily_dart_example.dart new file mode 100644 index 00000000..21f3e9f2 --- /dev/null +++ b/packages/tavily_dart/example/tavily_dart_example.dart @@ -0,0 +1,3 @@ +void main() { + // TODO +} diff --git a/packages/tavily_dart/lib/tavily_dart.dart b/packages/tavily_dart/lib/tavily_dart.dart new file mode 100644 index 00000000..c894f0f7 --- /dev/null +++ b/packages/tavily_dart/lib/tavily_dart.dart @@ -0,0 +1,2 @@ +/// Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). +library; diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml new file mode 100644 index 00000000..24fccdcb --- /dev/null +++ b/packages/tavily_dart/pubspec.yaml @@ -0,0 +1,16 @@ +name: tavily_dart +description: Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/tavily_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:tavily_dart +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - llms + - search + - rag + +environment: + sdk: ">=3.0.0 <4.0.0" From 66815697a8c44dc286f470a1c11d90a50bc7d1ce Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 14 Jun 2024 23:37:06 +0200 Subject: [PATCH 160/251] feat: Implement tavily_dart, a Dart client for Tavily API (#456) --- packages/tavily_dart/README.md | 119 +- packages/tavily_dart/build.yaml | 13 + .../example/tavily_dart_example.dart | 29 +- .../tavily_dart/lib/src/generated/client.dart | 382 ++++++ .../lib/src/generated/schema/schema.dart | 15 + .../src/generated/schema/schema.freezed.dart | 1027 +++++++++++++++++ .../lib/src/generated/schema/schema.g.dart | 116 ++ .../src/generated/schema/search_request.dart | 103 ++ .../src/generated/schema/search_response.dart | 68 ++ .../src/generated/schema/search_result.dart | 62 + packages/tavily_dart/lib/tavily_dart.dart | 3 + packages/tavily_dart/oas/main.dart | 23 + packages/tavily_dart/oas/tavily_openapi.yaml | 156 +++ packages/tavily_dart/pubspec.yaml | 18 + packages/tavily_dart/test/tavily_test.dart | 45 + 15 files changed, 2175 insertions(+), 4 deletions(-) create mode 100644 packages/tavily_dart/build.yaml create mode 100644 packages/tavily_dart/lib/src/generated/client.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.g.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/search_request.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/search_response.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/search_result.dart create mode 100644 packages/tavily_dart/oas/main.dart create mode 100644 packages/tavily_dart/oas/tavily_openapi.yaml create mode 100644 packages/tavily_dart/test/tavily_test.dart diff --git a/packages/tavily_dart/README.md b/packages/tavily_dart/README.md index bf452982..a7cd6afd 100644 --- a/packages/tavily_dart/README.md +++ b/packages/tavily_dart/README.md @@ -9,8 +9,123 @@ Dart Client for the [Tavily](https://tavily.com) API (a search engine optimized ## Features -TODO +- Fully type-safe, [documented](https://pub.dev/documentation/tavily_dart/latest) and tested +- All platforms supported +- Custom base URL, headers and query params support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) + +**Supported endpoints:** +- Search + +## Table of contents + +- [Usage](#usage) + * [Authentication](#authentication) + * [Search](#search) +- [Advance Usage](#advance-usage) + * [Custom HTTP client](#custom-http-client) + * [Using a proxy](#using-a-proxy) + + [HTTP proxy](#http-proxy) + + [SOCKS5 proxy](#socks5-proxy) +- [Acknowledgements](#acknowledgements) +- [License](#license) + +## Usage + +Refer to the [documentation](https://docs.tavily.com) for more information about the API. + +### Authentication + +The Tavily API uses API keys for authentication. Visit the [Tavily console](https://app.tavily.com/) to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final apiKey = Platform.environment['TAVILY_API_KEY']; +final client = TavilyClient(); +``` + +### Search + +Search for data based on a query. + +**Basic search:** + +```dart +final res = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + ), +); +print(res); +``` + +**Advanced search:** + +```dart +final res = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + searchDepth: SearchRequestSearchDepth.advanced, + ), +); +print(res); +``` + +See the API documentation for more information on all supported search parameters. + +## Advance Usage + +### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = TavilyClient( + client: MyHttpClient(), +); +``` + +### Using a proxy + +#### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = TavilyClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +#### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = TavilyClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. ## License -Ollama Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). +Tavily Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/tavily_dart/build.yaml b/packages/tavily_dart/build.yaml new file mode 100644 index 00000000..dee719ac --- /dev/null +++ b/packages/tavily_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + - unnecessary_null_checks + json_serializable: + options: + explicit_to_json: true diff --git a/packages/tavily_dart/example/tavily_dart_example.dart b/packages/tavily_dart/example/tavily_dart_example.dart index 21f3e9f2..652564b2 100644 --- a/packages/tavily_dart/example/tavily_dart_example.dart +++ b/packages/tavily_dart/example/tavily_dart_example.dart @@ -1,3 +1,28 @@ -void main() { - // TODO +// ignore_for_file: avoid_print +import 'dart:io'; + +import 'package:tavily_dart/tavily_dart.dart'; + +void main() async { + final apiKey = Platform.environment['TAVILY_API_KEY']!; + final client = TavilyClient(); + + // Basic search + final res1 = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + ), + ); + print(res1); + + // Advanced search + final res2 = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + searchDepth: SearchRequestSearchDepth.advanced, + ), + ); + print(res2); } diff --git a/packages/tavily_dart/lib/src/generated/client.dart b/packages/tavily_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..f6fb0439 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/client.dart @@ -0,0 +1,382 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target, unused_import + +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; +import 'package:meta/meta.dart'; + +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: TavilyClientException +// ========================================== + +/// HTTP exception handler for TavilyClient +class TavilyClientException implements Exception { + TavilyClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + final String message; + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'TavilyClientException($s)'; + } +} + +// ========================================== +// CLASS: TavilyClient +// ========================================== + +/// Client for Tavily API (v.1.0.0) +/// +/// Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. +class TavilyClient { + /// Creates a new TavilyClient instance. + /// + /// - [TavilyClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [TavilyClient.headers] Global headers to be sent with every request + /// - [TavilyClient.queryParams] Global query parameters to be sent with every request + /// - [TavilyClient.client] Override HTTP client to use for requests + TavilyClient({ + this.baseUrl, + this.headers = const {}, + this.queryParams = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// Global query parameters to be sent with every request + final Map queryParams; + + /// HTTP client for requests + final http.Client client; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _jsonDecode + // ------------------------------------------ + + dynamic _jsonDecode(http.Response r) { + return json.decode(utf8.decode(r.bodyBytes)); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + @protected + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Add global query parameters + queryParams = {...queryParams, ...this.queryParams}; + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw TavilyClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + return await client.send(request); + } + + // ------------------------------------------ + // METHOD: makeRequestStream + // ------------------------------------------ + + /// Reusable request stream method + @protected + Future makeRequestStream({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.StreamedResponse response; + try { + response = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw TavilyClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw TavilyClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } + + // ------------------------------------------ + // METHOD: makeRequest + // ------------------------------------------ + + /// Reusable request method + @protected + Future makeRequest({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.Response response; + try { + final streamedResponse = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + response = await http.Response.fromStream(streamedResponse); + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw TavilyClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw TavilyClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: search + // ------------------------------------------ + + /// Search for data based on a query. + /// + /// `request`: The search request object. + /// + /// `POST` `https://api.tavily.com/search` + Future search({ + required SearchRequest request, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.tavily.com', + path: '/search', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return SearchResponse.fromJson(_jsonDecode(r)); + } +} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.dart b/packages/tavily_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..4b3ba505 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,15 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library tavily_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'search_request.dart'; +part 'search_response.dart'; +part 'search_result.dart'; diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..cc459594 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,1027 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); + +SearchRequest _$SearchRequestFromJson(Map json) { + return _SearchRequest.fromJson(json); +} + +/// @nodoc +mixin _$SearchRequest { + /// Your unique API key. + @JsonKey(name: 'api_key') + String get apiKey => throw _privateConstructorUsedError; + + /// The search query string. + String get query => throw _privateConstructorUsedError; + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + SearchRequestSearchDepth get searchDepth => + throw _privateConstructorUsedError; + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') + bool get includeImages => throw _privateConstructorUsedError; + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') + bool get includeAnswer => throw _privateConstructorUsedError; + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + bool get includeRawContent => throw _privateConstructorUsedError; + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') + int get maxResults => throw _privateConstructorUsedError; + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains => throw _privateConstructorUsedError; + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchRequestCopyWith<$Res> { + factory $SearchRequestCopyWith( + SearchRequest value, $Res Function(SearchRequest) then) = + _$SearchRequestCopyWithImpl<$Res, SearchRequest>; + @useResult + $Res call( + {@JsonKey(name: 'api_key') String apiKey, + String query, + @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') bool includeImages, + @JsonKey(name: 'include_answer') bool includeAnswer, + @JsonKey(name: 'include_raw_content') bool includeRawContent, + @JsonKey(name: 'max_results') int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains}); +} + +/// @nodoc +class _$SearchRequestCopyWithImpl<$Res, $Val extends SearchRequest> + implements $SearchRequestCopyWith<$Res> { + _$SearchRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? apiKey = null, + Object? query = null, + Object? searchDepth = null, + Object? includeImages = null, + Object? includeAnswer = null, + Object? includeRawContent = null, + Object? maxResults = null, + Object? includeDomains = freezed, + Object? excludeDomains = freezed, + }) { + return _then(_value.copyWith( + apiKey: null == apiKey + ? _value.apiKey + : apiKey // ignore: cast_nullable_to_non_nullable + as String, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + searchDepth: null == searchDepth + ? _value.searchDepth + : searchDepth // ignore: cast_nullable_to_non_nullable + as SearchRequestSearchDepth, + includeImages: null == includeImages + ? _value.includeImages + : includeImages // ignore: cast_nullable_to_non_nullable + as bool, + includeAnswer: null == includeAnswer + ? _value.includeAnswer + : includeAnswer // ignore: cast_nullable_to_non_nullable + as bool, + includeRawContent: null == includeRawContent + ? _value.includeRawContent + : includeRawContent // ignore: cast_nullable_to_non_nullable + as bool, + maxResults: null == maxResults + ? _value.maxResults + : maxResults // ignore: cast_nullable_to_non_nullable + as int, + includeDomains: freezed == includeDomains + ? _value.includeDomains + : includeDomains // ignore: cast_nullable_to_non_nullable + as List?, + excludeDomains: freezed == excludeDomains + ? _value.excludeDomains + : excludeDomains // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchRequestImplCopyWith<$Res> + implements $SearchRequestCopyWith<$Res> { + factory _$$SearchRequestImplCopyWith( + _$SearchRequestImpl value, $Res Function(_$SearchRequestImpl) then) = + __$$SearchRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'api_key') String apiKey, + String query, + @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') bool includeImages, + @JsonKey(name: 'include_answer') bool includeAnswer, + @JsonKey(name: 'include_raw_content') bool includeRawContent, + @JsonKey(name: 'max_results') int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains}); +} + +/// @nodoc +class __$$SearchRequestImplCopyWithImpl<$Res> + extends _$SearchRequestCopyWithImpl<$Res, _$SearchRequestImpl> + implements _$$SearchRequestImplCopyWith<$Res> { + __$$SearchRequestImplCopyWithImpl( + _$SearchRequestImpl _value, $Res Function(_$SearchRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? apiKey = null, + Object? query = null, + Object? searchDepth = null, + Object? includeImages = null, + Object? includeAnswer = null, + Object? includeRawContent = null, + Object? maxResults = null, + Object? includeDomains = freezed, + Object? excludeDomains = freezed, + }) { + return _then(_$SearchRequestImpl( + apiKey: null == apiKey + ? _value.apiKey + : apiKey // ignore: cast_nullable_to_non_nullable + as String, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + searchDepth: null == searchDepth + ? _value.searchDepth + : searchDepth // ignore: cast_nullable_to_non_nullable + as SearchRequestSearchDepth, + includeImages: null == includeImages + ? _value.includeImages + : includeImages // ignore: cast_nullable_to_non_nullable + as bool, + includeAnswer: null == includeAnswer + ? _value.includeAnswer + : includeAnswer // ignore: cast_nullable_to_non_nullable + as bool, + includeRawContent: null == includeRawContent + ? _value.includeRawContent + : includeRawContent // ignore: cast_nullable_to_non_nullable + as bool, + maxResults: null == maxResults + ? _value.maxResults + : maxResults // ignore: cast_nullable_to_non_nullable + as int, + includeDomains: freezed == includeDomains + ? _value._includeDomains + : includeDomains // ignore: cast_nullable_to_non_nullable + as List?, + excludeDomains: freezed == excludeDomains + ? _value._excludeDomains + : excludeDomains // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchRequestImpl extends _SearchRequest { + const _$SearchRequestImpl( + {@JsonKey(name: 'api_key') required this.apiKey, + required this.query, + @JsonKey(name: 'search_depth') + this.searchDepth = SearchRequestSearchDepth.basic, + @JsonKey(name: 'include_images') this.includeImages = false, + @JsonKey(name: 'include_answer') this.includeAnswer = false, + @JsonKey(name: 'include_raw_content') this.includeRawContent = false, + @JsonKey(name: 'max_results') this.maxResults = 5, + @JsonKey(name: 'include_domains', includeIfNull: false) + final List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + final List? excludeDomains}) + : _includeDomains = includeDomains, + _excludeDomains = excludeDomains, + super._(); + + factory _$SearchRequestImpl.fromJson(Map json) => + _$$SearchRequestImplFromJson(json); + + /// Your unique API key. + @override + @JsonKey(name: 'api_key') + final String apiKey; + + /// The search query string. + @override + final String query; + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @override + @JsonKey(name: 'search_depth') + final SearchRequestSearchDepth searchDepth; + + /// Include a list of query related images in the response. Default is False. + @override + @JsonKey(name: 'include_images') + final bool includeImages; + + /// Include answers in the search results. Default is False. + @override + @JsonKey(name: 'include_answer') + final bool includeAnswer; + + /// Include raw content in the search results. Default is False. + @override + @JsonKey(name: 'include_raw_content') + final bool includeRawContent; + + /// The number of maximum search results to return. Default is 5. + @override + @JsonKey(name: 'max_results') + final int maxResults; + + /// A list of domains to specifically include in the search results. Default is None. + final List? _includeDomains; + + /// A list of domains to specifically include in the search results. Default is None. + @override + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains { + final value = _includeDomains; + if (value == null) return null; + if (_includeDomains is EqualUnmodifiableListView) return _includeDomains; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of domains to specifically exclude from the search results. Default is None. + final List? _excludeDomains; + + /// A list of domains to specifically exclude from the search results. Default is None. + @override + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains { + final value = _excludeDomains; + if (value == null) return null; + if (_excludeDomains is EqualUnmodifiableListView) return _excludeDomains; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'SearchRequest(apiKey: $apiKey, query: $query, searchDepth: $searchDepth, includeImages: $includeImages, includeAnswer: $includeAnswer, includeRawContent: $includeRawContent, maxResults: $maxResults, includeDomains: $includeDomains, excludeDomains: $excludeDomains)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchRequestImpl && + (identical(other.apiKey, apiKey) || other.apiKey == apiKey) && + (identical(other.query, query) || other.query == query) && + (identical(other.searchDepth, searchDepth) || + other.searchDepth == searchDepth) && + (identical(other.includeImages, includeImages) || + other.includeImages == includeImages) && + (identical(other.includeAnswer, includeAnswer) || + other.includeAnswer == includeAnswer) && + (identical(other.includeRawContent, includeRawContent) || + other.includeRawContent == includeRawContent) && + (identical(other.maxResults, maxResults) || + other.maxResults == maxResults) && + const DeepCollectionEquality() + .equals(other._includeDomains, _includeDomains) && + const DeepCollectionEquality() + .equals(other._excludeDomains, _excludeDomains)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + apiKey, + query, + searchDepth, + includeImages, + includeAnswer, + includeRawContent, + maxResults, + const DeepCollectionEquality().hash(_includeDomains), + const DeepCollectionEquality().hash(_excludeDomains)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => + __$$SearchRequestImplCopyWithImpl<_$SearchRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$SearchRequestImplToJson( + this, + ); + } +} + +abstract class _SearchRequest extends SearchRequest { + const factory _SearchRequest( + {@JsonKey(name: 'api_key') required final String apiKey, + required final String query, + @JsonKey(name: 'search_depth') final SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') final bool includeImages, + @JsonKey(name: 'include_answer') final bool includeAnswer, + @JsonKey(name: 'include_raw_content') final bool includeRawContent, + @JsonKey(name: 'max_results') final int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + final List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + final List? excludeDomains}) = _$SearchRequestImpl; + const _SearchRequest._() : super._(); + + factory _SearchRequest.fromJson(Map json) = + _$SearchRequestImpl.fromJson; + + @override + + /// Your unique API key. + @JsonKey(name: 'api_key') + String get apiKey; + @override + + /// The search query string. + String get query; + @override + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + SearchRequestSearchDepth get searchDepth; + @override + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') + bool get includeImages; + @override + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') + bool get includeAnswer; + @override + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + bool get includeRawContent; + @override + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') + int get maxResults; + @override + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains; + @override + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains; + @override + @JsonKey(ignore: true) + _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => + throw _privateConstructorUsedError; +} + +SearchResponse _$SearchResponseFromJson(Map json) { + return _SearchResponse.fromJson(json); +} + +/// @nodoc +mixin _$SearchResponse { + /// The answer to your search query. + @JsonKey(includeIfNull: false) + String? get answer => throw _privateConstructorUsedError; + + /// Your search query. + String get query => throw _privateConstructorUsedError; + + /// Your search result response time. + @JsonKey(name: 'response_time') + double get responseTime => throw _privateConstructorUsedError; + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) + List? get images => throw _privateConstructorUsedError; + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions => throw _privateConstructorUsedError; + + /// A list of search results. + List get results => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchResponseCopyWith<$Res> { + factory $SearchResponseCopyWith( + SearchResponse value, $Res Function(SearchResponse) then) = + _$SearchResponseCopyWithImpl<$Res, SearchResponse>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? answer, + String query, + @JsonKey(name: 'response_time') double responseTime, + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + List results}); +} + +/// @nodoc +class _$SearchResponseCopyWithImpl<$Res, $Val extends SearchResponse> + implements $SearchResponseCopyWith<$Res> { + _$SearchResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? answer = freezed, + Object? query = null, + Object? responseTime = null, + Object? images = freezed, + Object? followUpQuestions = freezed, + Object? results = null, + }) { + return _then(_value.copyWith( + answer: freezed == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as String?, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + responseTime: null == responseTime + ? _value.responseTime + : responseTime // ignore: cast_nullable_to_non_nullable + as double, + images: freezed == images + ? _value.images + : images // ignore: cast_nullable_to_non_nullable + as List?, + followUpQuestions: freezed == followUpQuestions + ? _value.followUpQuestions + : followUpQuestions // ignore: cast_nullable_to_non_nullable + as List?, + results: null == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchResponseImplCopyWith<$Res> + implements $SearchResponseCopyWith<$Res> { + factory _$$SearchResponseImplCopyWith(_$SearchResponseImpl value, + $Res Function(_$SearchResponseImpl) then) = + __$$SearchResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? answer, + String query, + @JsonKey(name: 'response_time') double responseTime, + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + List results}); +} + +/// @nodoc +class __$$SearchResponseImplCopyWithImpl<$Res> + extends _$SearchResponseCopyWithImpl<$Res, _$SearchResponseImpl> + implements _$$SearchResponseImplCopyWith<$Res> { + __$$SearchResponseImplCopyWithImpl( + _$SearchResponseImpl _value, $Res Function(_$SearchResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? answer = freezed, + Object? query = null, + Object? responseTime = null, + Object? images = freezed, + Object? followUpQuestions = freezed, + Object? results = null, + }) { + return _then(_$SearchResponseImpl( + answer: freezed == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as String?, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + responseTime: null == responseTime + ? _value.responseTime + : responseTime // ignore: cast_nullable_to_non_nullable + as double, + images: freezed == images + ? _value._images + : images // ignore: cast_nullable_to_non_nullable + as List?, + followUpQuestions: freezed == followUpQuestions + ? _value._followUpQuestions + : followUpQuestions // ignore: cast_nullable_to_non_nullable + as List?, + results: null == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchResponseImpl extends _SearchResponse { + const _$SearchResponseImpl( + {@JsonKey(includeIfNull: false) this.answer, + required this.query, + @JsonKey(name: 'response_time') required this.responseTime, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + final List? followUpQuestions, + required final List results}) + : _images = images, + _followUpQuestions = followUpQuestions, + _results = results, + super._(); + + factory _$SearchResponseImpl.fromJson(Map json) => + _$$SearchResponseImplFromJson(json); + + /// The answer to your search query. + @override + @JsonKey(includeIfNull: false) + final String? answer; + + /// Your search query. + @override + final String query; + + /// Your search result response time. + @override + @JsonKey(name: 'response_time') + final double responseTime; + + /// A list of query related image urls. + final List? _images; + + /// A list of query related image urls. + @override + @JsonKey(includeIfNull: false) + List? get images { + final value = _images; + if (value == null) return null; + if (_images is EqualUnmodifiableListView) return _images; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of suggested research follow up questions related to original query. + final List? _followUpQuestions; + + /// A list of suggested research follow up questions related to original query. + @override + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions { + final value = _followUpQuestions; + if (value == null) return null; + if (_followUpQuestions is EqualUnmodifiableListView) + return _followUpQuestions; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of search results. + final List _results; + + /// A list of search results. + @override + List get results { + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_results); + } + + @override + String toString() { + return 'SearchResponse(answer: $answer, query: $query, responseTime: $responseTime, images: $images, followUpQuestions: $followUpQuestions, results: $results)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchResponseImpl && + (identical(other.answer, answer) || other.answer == answer) && + (identical(other.query, query) || other.query == query) && + (identical(other.responseTime, responseTime) || + other.responseTime == responseTime) && + const DeepCollectionEquality().equals(other._images, _images) && + const DeepCollectionEquality() + .equals(other._followUpQuestions, _followUpQuestions) && + const DeepCollectionEquality().equals(other._results, _results)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + answer, + query, + responseTime, + const DeepCollectionEquality().hash(_images), + const DeepCollectionEquality().hash(_followUpQuestions), + const DeepCollectionEquality().hash(_results)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => + __$$SearchResponseImplCopyWithImpl<_$SearchResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$SearchResponseImplToJson( + this, + ); + } +} + +abstract class _SearchResponse extends SearchResponse { + const factory _SearchResponse( + {@JsonKey(includeIfNull: false) final String? answer, + required final String query, + @JsonKey(name: 'response_time') required final double responseTime, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + final List? followUpQuestions, + required final List results}) = _$SearchResponseImpl; + const _SearchResponse._() : super._(); + + factory _SearchResponse.fromJson(Map json) = + _$SearchResponseImpl.fromJson; + + @override + + /// The answer to your search query. + @JsonKey(includeIfNull: false) + String? get answer; + @override + + /// Your search query. + String get query; + @override + + /// Your search result response time. + @JsonKey(name: 'response_time') + double get responseTime; + @override + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) + List? get images; + @override + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions; + @override + + /// A list of search results. + List get results; + @override + @JsonKey(ignore: true) + _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +SearchResult _$SearchResultFromJson(Map json) { + return _SearchResult.fromJson(json); +} + +/// @nodoc +mixin _$SearchResult { + /// The title of the search result url. + String get title => throw _privateConstructorUsedError; + + /// The url of the search result. + String get url => throw _privateConstructorUsedError; + + /// The most query related content from the scraped url. + String get content => throw _privateConstructorUsedError; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) + String? get rawContent => throw _privateConstructorUsedError; + + /// The relevance score of the search result. + double get score => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchResultCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchResultCopyWith<$Res> { + factory $SearchResultCopyWith( + SearchResult value, $Res Function(SearchResult) then) = + _$SearchResultCopyWithImpl<$Res, SearchResult>; + @useResult + $Res call( + {String title, + String url, + String content, + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + double score}); +} + +/// @nodoc +class _$SearchResultCopyWithImpl<$Res, $Val extends SearchResult> + implements $SearchResultCopyWith<$Res> { + _$SearchResultCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? title = null, + Object? url = null, + Object? content = null, + Object? rawContent = freezed, + Object? score = null, + }) { + return _then(_value.copyWith( + title: null == title + ? _value.title + : title // ignore: cast_nullable_to_non_nullable + as String, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + rawContent: freezed == rawContent + ? _value.rawContent + : rawContent // ignore: cast_nullable_to_non_nullable + as String?, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchResultImplCopyWith<$Res> + implements $SearchResultCopyWith<$Res> { + factory _$$SearchResultImplCopyWith( + _$SearchResultImpl value, $Res Function(_$SearchResultImpl) then) = + __$$SearchResultImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String title, + String url, + String content, + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + double score}); +} + +/// @nodoc +class __$$SearchResultImplCopyWithImpl<$Res> + extends _$SearchResultCopyWithImpl<$Res, _$SearchResultImpl> + implements _$$SearchResultImplCopyWith<$Res> { + __$$SearchResultImplCopyWithImpl( + _$SearchResultImpl _value, $Res Function(_$SearchResultImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? title = null, + Object? url = null, + Object? content = null, + Object? rawContent = freezed, + Object? score = null, + }) { + return _then(_$SearchResultImpl( + title: null == title + ? _value.title + : title // ignore: cast_nullable_to_non_nullable + as String, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + rawContent: freezed == rawContent + ? _value.rawContent + : rawContent // ignore: cast_nullable_to_non_nullable + as String?, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchResultImpl extends _SearchResult { + const _$SearchResultImpl( + {required this.title, + required this.url, + required this.content, + @JsonKey(name: 'raw_content', includeIfNull: false) this.rawContent, + required this.score}) + : super._(); + + factory _$SearchResultImpl.fromJson(Map json) => + _$$SearchResultImplFromJson(json); + + /// The title of the search result url. + @override + final String title; + + /// The url of the search result. + @override + final String url; + + /// The most query related content from the scraped url. + @override + final String content; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @override + @JsonKey(name: 'raw_content', includeIfNull: false) + final String? rawContent; + + /// The relevance score of the search result. + @override + final double score; + + @override + String toString() { + return 'SearchResult(title: $title, url: $url, content: $content, rawContent: $rawContent, score: $score)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchResultImpl && + (identical(other.title, title) || other.title == title) && + (identical(other.url, url) || other.url == url) && + (identical(other.content, content) || other.content == content) && + (identical(other.rawContent, rawContent) || + other.rawContent == rawContent) && + (identical(other.score, score) || other.score == score)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, title, url, content, rawContent, score); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => + __$$SearchResultImplCopyWithImpl<_$SearchResultImpl>(this, _$identity); + + @override + Map toJson() { + return _$$SearchResultImplToJson( + this, + ); + } +} + +abstract class _SearchResult extends SearchResult { + const factory _SearchResult( + {required final String title, + required final String url, + required final String content, + @JsonKey(name: 'raw_content', includeIfNull: false) + final String? rawContent, + required final double score}) = _$SearchResultImpl; + const _SearchResult._() : super._(); + + factory _SearchResult.fromJson(Map json) = + _$SearchResultImpl.fromJson; + + @override + + /// The title of the search result url. + String get title; + @override + + /// The url of the search result. + String get url; + @override + + /// The most query related content from the scraped url. + String get content; + @override + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) + String? get rawContent; + @override + + /// The relevance score of the search result. + double get score; + @override + @JsonKey(ignore: true) + _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => + throw _privateConstructorUsedError; +} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.g.dart b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..f9214d02 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,116 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$SearchRequestImpl _$$SearchRequestImplFromJson(Map json) => + _$SearchRequestImpl( + apiKey: json['api_key'] as String, + query: json['query'] as String, + searchDepth: $enumDecodeNullable( + _$SearchRequestSearchDepthEnumMap, json['search_depth']) ?? + SearchRequestSearchDepth.basic, + includeImages: json['include_images'] as bool? ?? false, + includeAnswer: json['include_answer'] as bool? ?? false, + includeRawContent: json['include_raw_content'] as bool? ?? false, + maxResults: (json['max_results'] as num?)?.toInt() ?? 5, + includeDomains: (json['include_domains'] as List?) + ?.map((e) => e as String) + .toList(), + excludeDomains: (json['exclude_domains'] as List?) + ?.map((e) => e as String) + .toList(), + ); + +Map _$$SearchRequestImplToJson(_$SearchRequestImpl instance) { + final val = { + 'api_key': instance.apiKey, + 'query': instance.query, + 'search_depth': _$SearchRequestSearchDepthEnumMap[instance.searchDepth]!, + 'include_images': instance.includeImages, + 'include_answer': instance.includeAnswer, + 'include_raw_content': instance.includeRawContent, + 'max_results': instance.maxResults, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('include_domains', instance.includeDomains); + writeNotNull('exclude_domains', instance.excludeDomains); + return val; +} + +const _$SearchRequestSearchDepthEnumMap = { + SearchRequestSearchDepth.basic: 'basic', + SearchRequestSearchDepth.advanced: 'advanced', +}; + +_$SearchResponseImpl _$$SearchResponseImplFromJson(Map json) => + _$SearchResponseImpl( + answer: json['answer'] as String?, + query: json['query'] as String, + responseTime: (json['response_time'] as num).toDouble(), + images: + (json['images'] as List?)?.map((e) => e as String).toList(), + followUpQuestions: (json['follow_up_questions'] as List?) + ?.map((e) => e as String) + .toList(), + results: (json['results'] as List) + .map((e) => SearchResult.fromJson(e as Map)) + .toList(), + ); + +Map _$$SearchResponseImplToJson( + _$SearchResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('answer', instance.answer); + val['query'] = instance.query; + val['response_time'] = instance.responseTime; + writeNotNull('images', instance.images); + writeNotNull('follow_up_questions', instance.followUpQuestions); + val['results'] = instance.results.map((e) => e.toJson()).toList(); + return val; +} + +_$SearchResultImpl _$$SearchResultImplFromJson(Map json) => + _$SearchResultImpl( + title: json['title'] as String, + url: json['url'] as String, + content: json['content'] as String, + rawContent: json['raw_content'] as String?, + score: (json['score'] as num).toDouble(), + ); + +Map _$$SearchResultImplToJson(_$SearchResultImpl instance) { + final val = { + 'title': instance.title, + 'url': instance.url, + 'content': instance.content, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('raw_content', instance.rawContent); + val['score'] = instance.score; + return val; +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_request.dart b/packages/tavily_dart/lib/src/generated/schema/search_request.dart new file mode 100644 index 00000000..c0d16e7a --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_request.dart @@ -0,0 +1,103 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchRequest +// ========================================== + +/// The search request object. +@freezed +class SearchRequest with _$SearchRequest { + const SearchRequest._(); + + /// Factory constructor for SearchRequest + const factory SearchRequest({ + /// Your unique API key. + @JsonKey(name: 'api_key') required String apiKey, + + /// The search query string. + required String query, + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + @Default(SearchRequestSearchDepth.basic) + SearchRequestSearchDepth searchDepth, + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') @Default(false) bool includeImages, + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') @Default(false) bool includeAnswer, + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + @Default(false) + bool includeRawContent, + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') @Default(5) int maxResults, + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains, + }) = _SearchRequest; + + /// Object construction from a JSON representation + factory SearchRequest.fromJson(Map json) => + _$SearchRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'api_key', + 'query', + 'search_depth', + 'include_images', + 'include_answer', + 'include_raw_content', + 'max_results', + 'include_domains', + 'exclude_domains' + ]; + + /// Validation constants + static const maxResultsDefaultValue = 5; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'api_key': apiKey, + 'query': query, + 'search_depth': searchDepth, + 'include_images': includeImages, + 'include_answer': includeAnswer, + 'include_raw_content': includeRawContent, + 'max_results': maxResults, + 'include_domains': includeDomains, + 'exclude_domains': excludeDomains, + }; + } +} + +// ========================================== +// ENUM: SearchRequestSearchDepth +// ========================================== + +/// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. +enum SearchRequestSearchDepth { + @JsonValue('basic') + basic, + @JsonValue('advanced') + advanced, +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_response.dart b/packages/tavily_dart/lib/src/generated/schema/search_response.dart new file mode 100644 index 00000000..473db9c1 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_response.dart @@ -0,0 +1,68 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchResponse +// ========================================== + +/// The response data from the search query. +@freezed +class SearchResponse with _$SearchResponse { + const SearchResponse._(); + + /// Factory constructor for SearchResponse + const factory SearchResponse({ + /// The answer to your search query. + @JsonKey(includeIfNull: false) String? answer, + + /// Your search query. + required String query, + + /// Your search result response time. + @JsonKey(name: 'response_time') required double responseTime, + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) List? images, + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + + /// A list of search results. + required List results, + }) = _SearchResponse; + + /// Object construction from a JSON representation + factory SearchResponse.fromJson(Map json) => + _$SearchResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'answer', + 'query', + 'response_time', + 'images', + 'follow_up_questions', + 'results' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'answer': answer, + 'query': query, + 'response_time': responseTime, + 'images': images, + 'follow_up_questions': followUpQuestions, + 'results': results, + }; + } +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_result.dart b/packages/tavily_dart/lib/src/generated/schema/search_result.dart new file mode 100644 index 00000000..cfb75690 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_result.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchResult +// ========================================== + +/// The search result object. +@freezed +class SearchResult with _$SearchResult { + const SearchResult._(); + + /// Factory constructor for SearchResult + const factory SearchResult({ + /// The title of the search result url. + required String title, + + /// The url of the search result. + required String url, + + /// The most query related content from the scraped url. + required String content, + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + + /// The relevance score of the search result. + required double score, + }) = _SearchResult; + + /// Object construction from a JSON representation + factory SearchResult.fromJson(Map json) => + _$SearchResultFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'title', + 'url', + 'content', + 'raw_content', + 'score' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'title': title, + 'url': url, + 'content': content, + 'raw_content': rawContent, + 'score': score, + }; + } +} diff --git a/packages/tavily_dart/lib/tavily_dart.dart b/packages/tavily_dart/lib/tavily_dart.dart index c894f0f7..272b33ce 100644 --- a/packages/tavily_dart/lib/tavily_dart.dart +++ b/packages/tavily_dart/lib/tavily_dart.dart @@ -1,2 +1,5 @@ /// Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). library; + +export 'src/generated/client.dart'; +export 'src/generated/schema/schema.dart'; diff --git a/packages/tavily_dart/oas/main.dart b/packages/tavily_dart/oas/main.dart new file mode 100644 index 00000000..bf08264b --- /dev/null +++ b/packages/tavily_dart/oas/main.dart @@ -0,0 +1,23 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Tavily API client Dart code from the OpenAPI spec. +/// https://docs.tavily.com/docs/tavily-api/rest_api +void main() async { + final spec = OpenApi.fromFile(source: 'oas/tavily_openapi.yaml'); + + await spec.generate( + package: 'Tavily', + destination: 'lib/src/generated/', + replace: true, + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} diff --git a/packages/tavily_dart/oas/tavily_openapi.yaml b/packages/tavily_dart/oas/tavily_openapi.yaml new file mode 100644 index 00000000..250fa447 --- /dev/null +++ b/packages/tavily_dart/oas/tavily_openapi.yaml @@ -0,0 +1,156 @@ +openapi: 3.0.3 + +info: + title: Tavily API + description: Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. + version: 1.0.0 + contact: + name: Tavily Support + url: https://tavily.com + +servers: + - url: https://api.tavily.com + +paths: + /search: + post: + summary: Search for data based on a query. + operationId: search + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SearchRequest' + responses: + '200': + description: Successful search response. + content: + application/json: + schema: + $ref: '#/components/schemas/SearchResponse' + '400': + description: Bad Request — Your request is invalid. + '401': + description: Unauthorized — Your API key is wrong. + '403': + description: Forbidden — The endpoint requested is hidden for administrators only. + '404': + description: Not Found — The specified endpoint could not be found. + '405': + description: Method Not Allowed — You tried to access an endpoint with an invalid method. + '429': + description: Too Many Requests — You're requesting too many results! Slow down! + '500': + description: Internal Server Error — We had a problem with our server. Try again later. + '503': + description: Service Unavailable — We're temporarily offline for maintenance. Please try again later. + '504': + description: Gateway Timeout — We're temporarily offline for maintenance. Please try again later. + +components: + schemas: + SearchRequest: + type: object + description: The search request object. + properties: + api_key: + type: string + description: Your unique API key. + example: "your api key" + query: + type: string + description: The search query string. + example: "your search query" + search_depth: + type: string + description: The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + enum: + - basic + - advanced + default: basic + include_images: + type: boolean + description: Include a list of query related images in the response. Default is False. + default: false + include_answer: + type: boolean + description: Include answers in the search results. Default is False. + default: false + include_raw_content: + type: boolean + description: Include raw content in the search results. Default is False. + default: false + max_results: + type: integer + description: The number of maximum search results to return. Default is 5. + default: 5 + include_domains: + type: array + items: + type: string + description: A list of domains to specifically include in the search results. Default is None. + exclude_domains: + type: array + items: + type: string + description: A list of domains to specifically exclude from the search results. Default is None. + required: + - api_key + - query + SearchResponse: + type: object + description: The response data from the search query. + properties: + answer: + type: string + description: The answer to your search query. + query: + type: string + description: Your search query. + response_time: + type: number + description: Your search result response time. + images: + type: array + items: + type: string + description: A list of query related image urls. + follow_up_questions: + type: array + items: + type: string + description: A list of suggested research follow up questions related to original query. + results: + type: array + description: A list of search results. + items: + $ref: '#/components/schemas/SearchResult' + required: + - query + - response_time + - results + SearchResult: + type: object + description: The search result object. + properties: + title: + type: string + description: The title of the search result url. + url: + type: string + description: The url of the search result. + content: + type: string + description: The most query related content from the scraped url. + raw_content: + type: string + description: The parsed and cleaned HTML of the site. For now includes parsed text only. + score: + type: number + description: The relevance score of the search result. + required: + - title + - url + - content + - score diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml index 24fccdcb..216e0b0d 100644 --- a/packages/tavily_dart/pubspec.yaml +++ b/packages/tavily_dart/pubspec.yaml @@ -14,3 +14,21 @@ topics: environment: sdk: ">=3.0.0 <4.0.0" + +dependencies: + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 + meta: ^1.11.0 + +dev_dependencies: + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 + # openapi_spec: ^0.7.8 + openapi_spec: + git: + url: https://github.com/davidmigloz/openapi_spec.git + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + test: ^1.25.2 diff --git a/packages/tavily_dart/test/tavily_test.dart b/packages/tavily_dart/test/tavily_test.dart new file mode 100644 index 00000000..0df02cb8 --- /dev/null +++ b/packages/tavily_dart/test/tavily_test.dart @@ -0,0 +1,45 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:tavily_dart/tavily_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Tavily API tests', () { + late TavilyClient client; + + setUp(() async { + client = TavilyClient(); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call search API', () async { + final res = await client.search( + request: SearchRequest( + apiKey: Platform.environment['TAVILY_API_KEY']!, + query: 'Should I invest in Apple right now?', + includeAnswer: true, + includeImages: true, + includeRawContent: true, + maxResults: 3, + ), + ); + expect(res.answer, isNotEmpty); + expect(res.query, 'Should I invest in Apple right now?'); + expect(res.responseTime, greaterThan(0)); + expect(res.images, isNotEmpty); + expect(res.results, hasLength(3)); + final result = res.results.first; + expect(result.title, isNotEmpty); + expect(result.url, isNotEmpty); + expect(result.content, isNotEmpty); + expect(result.rawContent, isNotEmpty); + expect(result.score, greaterThan(0)); + }); + }); +} From ed4c28ee6b7d549639a64e1350e863c780005f2d Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 09:56:10 +0200 Subject: [PATCH 161/251] feat: Add support for usage metadata in ChatFirebaseVertexAI (#457) --- melos.yaml | 6 +- .../Flutter/GeneratedPluginRegistrant.swift | 2 + .../langchain_firebase/example/pubspec.lock | 60 +++++++++++++------ .../langchain_firebase/example/pubspec.yaml | 2 +- .../src/chat_models/vertex_ai/mappers.dart | 10 ++-- packages/langchain_firebase/pubspec.lock | 60 +++++++++++++------ packages/langchain_firebase/pubspec.yaml | 8 +-- 7 files changed, 98 insertions(+), 50 deletions(-) diff --git a/melos.yaml b/melos.yaml index 8390e622..0f524a0a 100644 --- a/melos.yaml +++ b/melos.yaml @@ -32,9 +32,9 @@ command: csv: ^6.0.0 equatable: ^2.0.5 fetch_client: ^1.0.2 - firebase_app_check: ^0.2.2+5 - firebase_core: ^2.31.0 - firebase_vertexai: ^0.1.0 + firebase_app_check: ^0.3.0 + firebase_core: ^3.1.0 + firebase_vertexai: ^0.2.2 flat_buffers: ^23.5.26 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 diff --git a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift index 2884d031..a2fafff9 100644 --- a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -7,10 +7,12 @@ import Foundation import cloud_firestore import firebase_app_check +import firebase_auth import firebase_core func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { FLTFirebaseFirestorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseFirestorePlugin")) FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAppCheckPlugin")) + FLTFirebaseAuthPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAuthPlugin")) FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseCorePlugin")) } diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 87d91077..4b481fb2 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" + sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" url: "https://pub.dev" source: hosted - version: "1.3.33" + version: "1.3.37" args: dependency: transitive description: @@ -117,58 +117,82 @@ packages: dependency: transitive description: name: firebase_app_check - sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b + sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" url: "https://pub.dev" source: hosted - version: "0.2.2+5" + version: "0.3.0+1" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 + sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" url: "https://pub.dev" source: hosted - version: "0.1.0+27" + version: "0.1.0+31" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" + sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a url: "https://pub.dev" source: hosted - version: "0.1.2+5" + version: "0.1.2+9" + firebase_auth: + dependency: transitive + description: + name: firebase_auth + sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + url: "https://pub.dev" + source: hosted + version: "5.1.0" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + url: "https://pub.dev" + source: hosted + version: "7.4.0" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + url: "https://pub.dev" + source: hosted + version: "5.12.2" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" + sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a url: "https://pub.dev" source: hosted - version: "2.31.0" + version: "3.1.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 + sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" url: "https://pub.dev" source: hosted - version: "5.0.0" + version: "5.1.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" + sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" url: "https://pub.dev" source: hosted - version: "2.17.0" + version: "2.17.2" firebase_vertexai: dependency: transitive description: name: firebase_vertexai - sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" + sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.2.2" fixnum: dependency: transitive description: @@ -212,10 +236,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" http: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 5b635cc5..ff8593ef 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -9,7 +9,7 @@ environment: dependencies: cupertino_icons: ^1.0.6 - firebase_core: ^2.31.0 + firebase_core: ^3.1.0 flutter: sdk: flutter flutter_markdown: ^0.6.22 diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 05840e8f..3d649592 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -134,11 +134,11 @@ extension GenerateContentResponseMapper on f.GenerateContentResponse { .toList(growable: false), 'finish_message': candidate.finishMessage, }, - usage: const LanguageModelUsage( - // promptTokens: usageMetadata?.promptTokenCount, // not yet supported - // responseTokens: usageMetadata?.candidatesTokenCount, - // totalTokens: usageMetadata?.totalTokenCount, - ), + usage: LanguageModelUsage( + promptTokens: usageMetadata?.promptTokenCount, + responseTokens: usageMetadata?.candidatesTokenCount, + totalTokens: usageMetadata?.totalTokenCount, + ), ); } diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index eee61f63..1c5e494a 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" + sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" url: "https://pub.dev" source: hosted - version: "1.3.33" + version: "1.3.37" async: dependency: transitive description: @@ -101,58 +101,82 @@ packages: dependency: "direct main" description: name: firebase_app_check - sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b + sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" url: "https://pub.dev" source: hosted - version: "0.2.2+5" + version: "0.3.0+1" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 + sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" url: "https://pub.dev" source: hosted - version: "0.1.0+27" + version: "0.1.0+31" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" + sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a url: "https://pub.dev" source: hosted - version: "0.1.2+5" + version: "0.1.2+9" + firebase_auth: + dependency: transitive + description: + name: firebase_auth + sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + url: "https://pub.dev" + source: hosted + version: "5.1.0" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + url: "https://pub.dev" + source: hosted + version: "7.4.0" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + url: "https://pub.dev" + source: hosted + version: "5.12.2" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" + sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a url: "https://pub.dev" source: hosted - version: "2.31.0" + version: "3.1.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 + sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" url: "https://pub.dev" source: hosted - version: "5.0.0" + version: "5.1.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" + sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" url: "https://pub.dev" source: hosted - version: "2.17.0" + version: "2.17.2" firebase_vertexai: dependency: "direct main" description: name: firebase_vertexai - sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" + sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.2.2" fixnum: dependency: transitive description: @@ -180,10 +204,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" http: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 4fd8030c..382e6418 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -19,11 +19,9 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" - firebase_app_check: ^0.2.2+5 - firebase_core: ^2.31.0 - firebase_auth: ^5.1.0 - cloud_firestore: ^4.17.0 - firebase_vertexai: ^0.1.0 + firebase_app_check: ^0.3.0 + firebase_core: ^3.1.0 + firebase_vertexai: ^0.2.2 langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 From c0d291e30d3194d6fa3a8da4ab9c1c9f36b3ea0f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 10:09:57 +0200 Subject: [PATCH 162/251] feat!: Update ChatFirebaseVertexAI default model to gemini-1.5-flash (#458) --- .../integrations/firebase_vertex_ai.md | 26 ++++----- .../langchain_firebase/example/lib/main.dart | 2 +- .../example/web/flutter_bootstrap.js | 12 +++++ .../langchain_firebase/example/web/index.html | 54 ++++--------------- .../vertex_ai/chat_firebase_vertex_ai.dart | 22 ++++---- .../lib/src/chat_models/vertex_ai/types.dart | 2 +- 6 files changed, 47 insertions(+), 71 deletions(-) create mode 100644 packages/langchain_firebase/example/web/flutter_bootstrap.js diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index ef8e03d0..cd33daa2 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -66,22 +66,22 @@ print(res); ## Available models The following models are available: -- `gemini-1.0-pro` - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 -- `gemini-1.0-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.5-pro-preview-0514`: +- `gemini-1.5-flash`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-flash-preview-0514`: +- `gemini-1.5-pro`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. @@ -90,7 +90,7 @@ Mind that this list may not be up-to-date. Refer to the [documentation](https:// ```dart final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', ), ); final res = await chatModel.invoke( @@ -122,7 +122,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', ), ); @@ -160,7 +160,7 @@ const tool = ToolSpec( ); final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', temperature: 0, tools: [tool], ), diff --git a/packages/langchain_firebase/example/lib/main.dart b/packages/langchain_firebase/example/lib/main.dart index 44e019e9..f9d5db92 100644 --- a/packages/langchain_firebase/example/lib/main.dart +++ b/packages/langchain_firebase/example/lib/main.dart @@ -155,7 +155,7 @@ class _ChatWidgetState extends State { _model = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', tools: [exchangeRateTool], ), // location: 'us-central1', diff --git a/packages/langchain_firebase/example/web/flutter_bootstrap.js b/packages/langchain_firebase/example/web/flutter_bootstrap.js new file mode 100644 index 00000000..8ce49d8a --- /dev/null +++ b/packages/langchain_firebase/example/web/flutter_bootstrap.js @@ -0,0 +1,12 @@ +{{flutter_js}} +{{flutter_build_config}} + +_flutter.loader.load({ + serviceWorkerSettings: { + serviceWorkerVersion: {{flutter_service_worker_version}}, + }, + onEntrypointLoaded: async function(engineInitializer) { + const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); + await appRunner.runApp(); + }, +}); diff --git a/packages/langchain_firebase/example/web/index.html b/packages/langchain_firebase/example/web/index.html index 27ef6265..cce674b5 100644 --- a/packages/langchain_firebase/example/web/index.html +++ b/packages/langchain_firebase/example/web/index.html @@ -1,61 +1,25 @@ - - + - - - - + + + + - - - example - + - - - + VertexAI for Firebase in LangChain.dart + - + diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 1a3863b4..3d58f8ea 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -36,22 +36,22 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.0-pro` -/// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 -/// - `gemini-1.0-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 -/// - `gemini-1.5-pro-preview-0514`: +/// - `gemini-1.5-flash`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-flash-preview-0514`: +/// - `gemini-1.5-pro`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 +/// - `gemini-1.0-pro-vision`: +/// * text / image -> text model +/// * Max input token: 12288 +/// * Max output tokens: 4096 +/// - `gemini-1.0-pro` +/// * text -> text model +/// * Max input token: 30720 +/// * Max output tokens: 2048 /// /// Mind that this list may not be up-to-date. /// Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) @@ -132,7 +132,7 @@ import 'types.dart'; /// ); /// final chatModel = ChatFirebaseVertexAI( /// defaultOptions: ChatFirebaseVertexAIOptions( -/// model: 'gemini-1.5-pro-preview-0514', +/// model: 'gemini-1.5-pro', /// temperature: 0, /// tools: [tool], /// ), diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index d41e4032..7a0ddbdb 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -6,7 +6,7 @@ import 'package:langchain_core/chat_models.dart'; class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// {@macro chat_firebase_vertex_ai_options} const ChatFirebaseVertexAIOptions({ - this.model = 'gemini-1.0-pro', + this.model = 'gemini-1.5-flash', this.topP, this.topK, this.candidateCount, From 1f7effc6d45852130ffa7d3f52158b73d902563c Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 10:27:18 +0200 Subject: [PATCH 163/251] refactor: Simplify how tools are passed to the internal Firebase client (#459) --- .../vertex_ai/chat_firebase_vertex_ai.dart | 40 +++++-------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 3d58f8ea..47661d68 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -4,7 +4,6 @@ import 'package:firebase_core/firebase_core.dart'; import 'package:firebase_vertexai/firebase_vertexai.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; import 'package:uuid/uuid.dart'; import 'mappers.dart'; @@ -193,24 +192,20 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// The current system instruction set in [_firebaseClient]; String? _currentSystemInstruction; - /// The current tools set in [_firebaseClient]; - List? _currentTools; - - /// The current tool choice set in [_firebaseClient]; - ChatToolChoice? _currentToolChoice; - @override Future invoke( final PromptValue input, { final ChatFirebaseVertexAIOptions? options, }) async { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig) = + final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); final completion = await _firebaseClient.generateContent( prompt, safetySettings: safetySettings, generationConfig: generationConfig, + tools: tools, + toolConfig: toolConfig, ); return completion.toChatResult(id, model); } @@ -221,13 +216,15 @@ class ChatFirebaseVertexAI extends BaseChatModel { final ChatFirebaseVertexAIOptions? options, }) { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig) = + final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); return _firebaseClient .generateContentStream( prompt, safetySettings: safetySettings, generationConfig: generationConfig, + tools: tools, + toolConfig: toolConfig, ) .map((final completion) => completion.toChatResult(id, model)); } @@ -238,6 +235,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { Iterable prompt, List? safetySettings, GenerationConfig? generationConfig, + List? tools, + ToolConfig? toolConfig, ) _generateCompletionRequest( final List messages, { final ChatFirebaseVertexAIOptions? options, @@ -260,6 +259,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, ), + (options?.tools ?? defaultOptions.tools)?.toToolList(), + (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), ); } @@ -288,8 +289,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { GenerativeModel _createFirebaseClient( final String model, { final String? systemInstruction, - final List? tools, - final ChatToolChoice? toolChoice, }) { return FirebaseVertexAI.instanceFor( app: app, @@ -300,8 +299,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { model: model, systemInstruction: systemInstruction != null ? Content.system(systemInstruction) : null, - tools: tools?.toToolList(), - toolConfig: toolChoice?.toToolConfig(), ); } @@ -309,14 +306,10 @@ class ChatFirebaseVertexAI extends BaseChatModel { void _recreateFirebaseClient( final String model, final String? systemInstruction, - final List? tools, - final ChatToolChoice? toolChoice, ) { _firebaseClient = _createFirebaseClient( model, systemInstruction: systemInstruction, - tools: tools, - toolChoice: toolChoice, ); } @@ -332,9 +325,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { ? messages.firstOrNull?.contentAsString : null; - final tools = options?.tools ?? defaultOptions.tools; - final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; - bool recreate = false; if (model != _currentModel) { _currentModel = model; @@ -344,17 +334,9 @@ class ChatFirebaseVertexAI extends BaseChatModel { _currentSystemInstruction = systemInstruction; recreate = true; } - if (!const ListEquality().equals(tools, _currentTools)) { - _currentTools = tools; - recreate = true; - } - if (toolChoice != _currentToolChoice) { - _currentToolChoice = toolChoice; - recreate = true; - } if (recreate) { - _recreateFirebaseClient(model, systemInstruction, tools, toolChoice); + _recreateFirebaseClient(model, systemInstruction); } } } From 0328bf488d3d52a193d961e0c4db31271f55d363 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 10:36:29 +0200 Subject: [PATCH 164/251] feat: Add support for Firebase Auth in ChatFirebaseVertexAI (#460) --- melos.yaml | 1 + .../vertex_ai/chat_firebase_vertex_ai.dart | 5 +++++ .../src/chat_models/vertex_ai/mappers.dart | 22 ++++++------------- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 1 + 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/melos.yaml b/melos.yaml index 0f524a0a..0d9dde51 100644 --- a/melos.yaml +++ b/melos.yaml @@ -33,6 +33,7 @@ command: equatable: ^2.0.5 fetch_client: ^1.0.2 firebase_app_check: ^0.3.0 + firebase_auth: ^5.1.0 firebase_core: ^3.1.0 firebase_vertexai: ^0.2.2 flat_buffers: ^23.5.26 diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 47661d68..251f3913 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -1,5 +1,6 @@ import 'package:collection/collection.dart'; import 'package:firebase_app_check/firebase_app_check.dart'; +import 'package:firebase_auth/firebase_auth.dart'; import 'package:firebase_core/firebase_core.dart'; import 'package:firebase_vertexai/firebase_vertexai.dart'; import 'package:langchain_core/chat_models.dart'; @@ -157,6 +158,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { ), this.app, this.appCheck, + this.auth, this.options, this.location, }) : _currentModel = defaultOptions.model ?? '' { @@ -171,6 +173,9 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// The optional [FirebaseAppCheck] to use to protect the project from abuse. final FirebaseAppCheck? appCheck; + /// The optional [FirebaseAuth] to use for authentication. + final FirebaseAuth? auth; + /// Configuration parameters for sending requests to Firebase. final RequestOptions? options; diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 3d649592..d256b815 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -217,45 +217,38 @@ extension ChatToolListMapper on List { switch (type) { case 'string': if (enumValues != null) { - return f.Schema( - f.SchemaType.string, + return f.Schema.enumString( enumValues: enumValues, description: description, nullable: nullable, - format: 'enum', ); } else { - return f.Schema( - f.SchemaType.string, + return f.Schema.string( description: description, nullable: nullable, ); } case 'number': - return f.Schema( - f.SchemaType.number, + return f.Schema.number( description: description, nullable: nullable, format: format, ); case 'integer': - return f.Schema( - f.SchemaType.integer, + return f.Schema.integer( description: description, nullable: nullable, format: format, ); case 'boolean': - return f.Schema( - f.SchemaType.boolean, + return f.Schema.boolean( description: description, nullable: nullable, ); case 'array': if (items != null) { final itemsSchema = _mapJsonSchemaToSchema(items); - return f.Schema( - f.SchemaType.array, + return f.Schema.array( description: description, nullable: nullable, items: itemsSchema, @@ -267,8 +260,7 @@ extension ChatToolListMapper on List { final propertiesSchema = properties.map( (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), ); - return f.Schema( - f.SchemaType.object, + return f.Schema.object( properties: propertiesSchema, requiredProperties: requiredProperties, description: description, diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 1c5e494a..62232007 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -122,7 +122,7 @@ packages: source: hosted version: "0.1.2+9" firebase_auth: - dependency: transitive + dependency: "direct main" description: name: firebase_auth sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 382e6418..8df04cfd 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -20,6 +20,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" firebase_app_check: ^0.3.0 + firebase_auth: ^5.1.0 firebase_core: ^3.1.0 firebase_vertexai: ^0.2.2 langchain_core: ^0.3.2 From f4175f7907b7c22f57916d271f5bb577bc7a73e2 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 11:13:53 +0200 Subject: [PATCH 165/251] feat: Support response MIME type and schema in ChatGoogleGenerativeAI (#461) --- melos.yaml | 2 +- .../google_ai/chat_google_generative_ai.dart | 5 +++ .../src/chat_models/google_ai/mappers.dart | 14 +++++--- .../lib/src/chat_models/google_ai/types.dart | 35 +++++++++++++++++++ packages/langchain_google/pubspec.yaml | 2 +- 5 files changed, 52 insertions(+), 6 deletions(-) diff --git a/melos.yaml b/melos.yaml index 0d9dde51..56d0555c 100644 --- a/melos.yaml +++ b/melos.yaml @@ -41,7 +41,7 @@ command: flutter_markdown: ^0.6.22 freezed_annotation: ^2.4.1 gcloud: ^0.8.12 - google_generative_ai: 0.4.0 + google_generative_ai: 0.4.3 googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index b6835b89..3b8ebc1b 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -322,6 +322,11 @@ class ChatGoogleGenerativeAI temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, + responseMimeType: + options?.responseMimeType ?? defaultOptions.responseMimeType, + responseSchema: + (options?.responseSchema ?? defaultOptions.responseSchema) + ?.toSchema(), ), (options?.tools ?? defaultOptions.tools)?.toToolList(), (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 8623a2c1..8bf41f84 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -198,14 +198,17 @@ extension ChatToolListMapper on List { (tool) => g.FunctionDeclaration( tool.name, tool.description, - _mapJsonSchemaToSchema(tool.inputJsonSchema), + tool.inputJsonSchema.toSchema(), ), ).toList(growable: false), ), ]; } +} - g.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { +extension SchemaMapper on Map { + g.Schema toSchema() { + final jsonSchema = this; final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -248,7 +251,7 @@ extension ChatToolListMapper on List { ); case 'array': if (items != null) { - final itemsSchema = _mapJsonSchemaToSchema(items); + final itemsSchema = items.toSchema(); return g.Schema.array( items: itemsSchema, description: description, @@ -259,7 +262,10 @@ extension ChatToolListMapper on List { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), + (key, value) => MapEntry( + key, + (value as Map).toSchema(), + ), ); return g.Schema.object( properties: propertiesSchema, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index b3553cab..2971a22b 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -13,6 +13,8 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { this.maxOutputTokens, this.temperature, this.stopSequences, + this.responseMimeType, + this.responseSchema, this.safetySettings, super.tools, super.toolChoice, @@ -68,6 +70,39 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; + /// Output response mimetype of the generated candidate text. + /// + /// Supported mimetype: + /// - `text/plain`: (default) Text output. + /// - `application/json`: JSON response in the candidates. + final String? responseMimeType; + + /// Output response schema of the generated candidate text. + /// Following the [JSON Schema specification](https://json-schema.org). + /// + /// - Note: This only applies when the specified ``responseMIMEType`` supports + /// a schema; currently this is limited to `application/json`. + /// + /// Example: + /// ```json + /// { + /// 'type': 'object', + /// 'properties': { + /// 'answer': { + /// 'type': 'string', + /// 'description': 'The answer to the question being asked', + /// }, + /// 'sources': { + /// 'type': 'array', + /// 'items': {'type': 'string'}, + /// 'description': 'The sources used to answer the question', + /// }, + /// }, + /// 'required': ['answer', 'sources'], + /// }, + /// ``` + final Map? responseSchema; + /// A list of unique [ChatGoogleGenerativeAISafetySetting] instances for blocking /// unsafe content. /// diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 0f07b091..a2d2670a 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -20,7 +20,7 @@ dependencies: collection: ">=1.17.0 <1.19.0" fetch_client: ^1.0.2 gcloud: ^0.8.12 - google_generative_ai: 0.4.0 + google_generative_ai: 0.4.3 googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 From 55283186eb84f1a9bd8b9f8f6452539cede04471 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 11:22:44 +0200 Subject: [PATCH 166/251] feat!: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash (#462) --- .../chat_models/integrations/googleai.md | 28 +++++++++---------- .../vertex_ai/chat_firebase_vertex_ai.dart | 2 +- .../google_ai/chat_google_generative_ai.dart | 22 +++++++-------- .../lib/src/chat_models/google_ai/types.dart | 2 +- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 87a43755..033c7672 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -7,22 +7,22 @@ Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemin To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). The following models are available: -- `gemini-1.0-pro` (or `gemini-pro`): - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 -- `gemini-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.5-pro-latest`: text / image -> text model +- `gemini-1.5-flash`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-flash-latest`: +- `gemini-1.5-pro`: text / image -> text model * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 +- `gemini-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. @@ -34,7 +34,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -63,7 +63,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -99,7 +99,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -138,7 +138,7 @@ const tool = ToolSpec( ); final chatModel = ChatGoogleGenerativeAI( defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, tools: [tool], ), diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 251f3913..94da974d 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -154,7 +154,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// - [ChatFirebaseVertexAI.location] ChatFirebaseVertexAI({ super.defaultOptions = const ChatFirebaseVertexAIOptions( - model: 'gemini-1.0-pro', + model: 'gemini-1.5-flash', ), this.app, this.appCheck, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 3b8ebc1b..58934755 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -31,22 +31,22 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.0-pro` (or `gemini-pro`): -/// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 -/// - `gemini-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 -/// - `gemini-1.5-pro-latest`: text / image -> text model +/// - `gemini-1.5-flash`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-flash-latest`: +/// - `gemini-1.5-pro`: text / image -> text model /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 +/// - `gemini-pro-vision`: +/// * text / image -> text model +/// * Max input token: 12288 +/// * Max output tokens: 4096 +/// - `gemini-1.0-pro` (or `gemini-pro`): +/// * text -> text model +/// * Max input token: 30720 +/// * Max output tokens: 2048 /// /// Mind that this list may not be up-to-date. /// Refer to the [documentation](https://ai.google.dev/models) for the updated list. @@ -211,7 +211,7 @@ class ChatGoogleGenerativeAI final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatGoogleGenerativeAIOptions( - model: 'gemini-pro', + model: 'gemini-1.5-flash', ), }) : _currentModel = defaultOptions.model ?? '', _httpClient = createDefaultHttpClient( diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index 2971a22b..c86c80a5 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -6,7 +6,7 @@ import 'package:langchain_core/chat_models.dart'; class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// {@macro chat_google_generative_ai_options} const ChatGoogleGenerativeAIOptions({ - this.model = 'gemini-pro', + this.model = 'gemini-1.5-flash', this.topP, this.topK, this.candidateCount, From d0874fd256460ad3fdfc4cad8389b5c2c5a5a139 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 11:26:23 +0200 Subject: [PATCH 167/251] feat: Support response MIME type in ChatFirebaseVertexAI (#461) (#463) --- .../vertex_ai/chat_firebase_vertex_ai.dart | 6 ++++++ .../lib/src/chat_models/vertex_ai/mappers.dart | 14 ++++++++++---- .../lib/src/chat_models/vertex_ai/types.dart | 8 ++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 94da974d..77ce67d6 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -263,6 +263,12 @@ class ChatFirebaseVertexAI extends BaseChatModel { temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, + responseMimeType: + options?.responseMimeType ?? defaultOptions.responseMimeType, + // responseSchema not supported yet + // responseSchema: + // (options?.responseSchema ?? defaultOptions.responseSchema) + // ?.toSchema(), ), (options?.tools ?? defaultOptions.tools)?.toToolList(), (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index d256b815..41517a64 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -197,14 +197,17 @@ extension ChatToolListMapper on List { (tool) => f.FunctionDeclaration( tool.name, tool.description, - _mapJsonSchemaToSchema(tool.inputJsonSchema), + tool.inputJsonSchema.toSchema(), ), ).toList(growable: false), ), ]; } +} - f.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { +extension SchemaMapper on Map { + f.Schema toSchema() { + final jsonSchema = this; final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -247,7 +250,7 @@ extension ChatToolListMapper on List { ); case 'array': if (items != null) { - final itemsSchema = _mapJsonSchemaToSchema(items); + final itemsSchema = items.toSchema(); return f.Schema.array( description: description, nullable: nullable, @@ -258,7 +261,10 @@ extension ChatToolListMapper on List { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), + (key, value) => MapEntry( + key, + (value as Map).toSchema(), + ), ); return f.Schema.object( properties: propertiesSchema, diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index 7a0ddbdb..d2aee55d 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -13,6 +13,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { this.maxOutputTokens, this.temperature, this.stopSequences, + this.responseMimeType, this.safetySettings, super.tools, super.toolChoice, @@ -69,6 +70,13 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; + /// Output response mimetype of the generated candidate text. + /// + /// Supported mimetype: + /// - `text/plain`: (default) Text output. + /// - `application/json`: JSON response in the candidates. + final String? responseMimeType; + /// A list of unique [ChatFirebaseVertexAISafetySetting] instances for blocking /// unsafe content. /// From 4558dd408898bf996774a12938600da7c523cc6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:48:59 +0200 Subject: [PATCH 168/251] build(deps): bump actions/checkout from 4.1.6 to 4.1.7 (#464) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.1.7. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/a5ac7e51b41094c92402da3b24376905380afc29...692973e3d937129bcbf40652eb9f2f61becf3332) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yaml | 2 +- .github/workflows/test.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 5520d768..98f80b82 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2b4ff0c5..0e6c4e20 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 From c8b7ad15f72894007eeaf0bb8dc7d11d571f550f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 20 Jun 2024 23:39:56 +0200 Subject: [PATCH 169/251] feat: Add support for TavilySearchResultsTool and TavilyAnswerTool (#467) --- examples/browser_summarizer/pubspec.lock | 7 + .../browser_summarizer/pubspec_overrides.yaml | 4 +- examples/docs_examples/pubspec.lock | 11 +- examples/docs_examples/pubspec_overrides.yaml | 4 +- examples/hello_world_flutter/pubspec.lock | 4 +- examples/wikivoyage_eu/pubspec.lock | 7 + examples/wikivoyage_eu/pubspec_overrides.yaml | 4 +- packages/langchain/README.md | 47 ++++--- .../langchain_chroma/pubspec_overrides.yaml | 4 +- packages/langchain_community/README.md | 4 + .../lib/src/tools/tavily/mappers.dart | 21 +++ .../lib/src/tools/tavily/tavily.dart | 3 + .../lib/src/tools/tavily/tavily_answer.dart | 102 ++++++++++++++ .../tools/tavily/tavily_search_results.dart | 130 +++++++++++++++++ .../lib/src/tools/tavily/types.dart | 131 ++++++++++++++++++ .../lib/src/tools/tools.dart | 1 + packages/langchain_community/pubspec.yaml | 1 + .../pubspec_overrides.yaml | 4 +- .../test/tools/tavily_test.dart | 31 +++++ .../langchain_core/lib/src/tools/base.dart | 1 - .../langchain_openai/pubspec_overrides.yaml | 4 +- .../langchain_supabase/pubspec_overrides.yaml | 4 +- 22 files changed, 494 insertions(+), 35 deletions(-) create mode 100644 packages/langchain_community/lib/src/tools/tavily/mappers.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/types.dart create mode 100644 packages/langchain_community/test/tools/tavily_test.dart diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 17d43f80..c32f085f 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -491,6 +491,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.0.1-dev.1" term_glyph: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec_overrides.yaml b/examples/browser_summarizer/pubspec_overrides.yaml index 3947b2ae..49be75a7 100644 --- a/examples/browser_summarizer/pubspec_overrides.yaml +++ b/examples/browser_summarizer/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -10,3 +10,5 @@ dependency_overrides: path: ../../packages/langchain_openai openai_dart: path: ../../packages/openai_dart + tavily_dart: + path: ../../packages/tavily_dart diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 6199523b..2a5b086d 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -148,10 +148,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" google_identity_services_web: dependency: transitive description: @@ -413,6 +413,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.0.1-dev.1" term_glyph: dependency: transitive description: diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index e02da308..cc3f10d6 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community +# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart dependency_overrides: chromadb: path: ../../packages/chromadb @@ -24,5 +24,7 @@ dependency_overrides: path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart + tavily_dart: + path: ../../packages/tavily_dart vertex_ai: path: ../../packages/vertex_ai diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index d9c9c29f..9bbfa0f2 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" google_identity_services_web: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 49dc9df4..18f2890b 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -292,6 +292,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.0.1-dev.1" term_glyph: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml index 075ddc4f..6f7e46d1 100644 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -10,3 +10,5 @@ dependency_overrides: path: ../../packages/langchain_ollama ollama_dart: path: ../../packages/ollama_dart + tavily_dart: + path: ../../packages/tavily_dart diff --git a/packages/langchain/README.md b/packages/langchain/README.md index b86c0eae..83608f5a 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -68,25 +68,25 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack

    -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4 Turbo, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | Functionality provided by each integration package: | Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | |---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| -| [langchain_community](https://pub.dev/packages/langchain_community) | | | | | | | | +| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | @@ -98,15 +98,16 @@ Functionality provided by each integration package: The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: -| Package | Version | Description | -|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|----------------------------------------------| -| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | Anthropic (Claude API) client | -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | +| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | ## Getting started diff --git a/packages/langchain_chroma/pubspec_overrides.yaml b/packages/langchain_chroma/pubspec_overrides.yaml index 4583d481..3470527c 100644 --- a/packages/langchain_chroma/pubspec_overrides.yaml +++ b/packages/langchain_chroma/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain +# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain,tavily_dart dependency_overrides: chromadb: path: ../chromadb @@ -12,3 +12,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_community/README.md b/packages/langchain_community/README.md index b76ee3c3..1dcb80e3 100644 --- a/packages/langchain_community/README.md +++ b/packages/langchain_community/README.md @@ -27,6 +27,10 @@ The most popular third-party integrations have their own packages (e.g. [langcha * `WebBaseLoader`: for web pages. - Tools: * `CalculatorTool`: to calculate math expressions. + * `TavilySearchResultsTool`: returns a list of results for a query using the [Tavily](https://tavily.com) search engine. + * `TavilyAnswerTool`: returns an answer for a query using the [Tavily](https://tavily.com) search engine. +- Vector stores: + * `ObjectBoxVectorStore`: [ObjectBox](https://objectbox.io/) on-device vector database. Check out the [API reference](https://pub.dev/documentation/langchain_community/latest) for more details. diff --git a/packages/langchain_community/lib/src/tools/tavily/mappers.dart b/packages/langchain_community/lib/src/tools/tavily/mappers.dart new file mode 100644 index 00000000..21e907e5 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/mappers.dart @@ -0,0 +1,21 @@ +// ignore_for_file: public_member_api_docs +import 'package:tavily_dart/tavily_dart.dart'; + +import 'types.dart'; + +extension TavilySearchDepthX on TavilySearchDepth { + SearchRequestSearchDepth toSearchRequestSearchDepth() => switch (this) { + TavilySearchDepth.basic => SearchRequestSearchDepth.basic, + TavilySearchDepth.advanced => SearchRequestSearchDepth.advanced, + }; +} + +extension TavilySearchResultX on SearchResult { + TavilySearchResult toTavilySearchResult() => TavilySearchResult( + title: title, + url: url, + content: content, + rawContent: rawContent, + score: score, + ); +} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily.dart b/packages/langchain_community/lib/src/tools/tavily/tavily.dart new file mode 100644 index 00000000..64f26c5d --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily.dart @@ -0,0 +1,3 @@ +export 'tavily_answer.dart'; +export 'tavily_search_results.dart'; +export 'types.dart'; diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart new file mode 100644 index 00000000..a5ad637f --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart @@ -0,0 +1,102 @@ +import 'dart:async'; + +import 'package:http/http.dart' as http; +import 'package:langchain_core/tools.dart'; +import 'package:tavily_dart/tavily_dart.dart'; + +import 'mappers.dart'; +import 'tavily_search_results.dart'; +import 'types.dart'; + +/// Tool that queries the [Tavily Search API](https://tavily.com) and +/// gets an answer to the search query. +/// +/// The Tavily API uses API keys for authentication. Visit the +/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll +/// use in your requests. +/// +/// If you want to get a list of search results instead, use the +/// [TavilySearchResultsTool] instead. +/// +/// Example: +/// ```dart +/// final tool = TavilyAnswerTool( +/// apiKey: Platform.environment['TAVILY_API_KEY']!, +/// ); +/// final res = await tool.invoke('What is the weather like in New York?'); +/// print(res); +/// // The current weather in New York is clear with a temperature of 22.8°C (73.0°F)... +/// ``` +final class TavilyAnswerTool extends StringTool { + /// Creates a [TavilyAnswerTool] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Tavily API key. You can find your API key in the + /// [Tavily console](https://app.tavily.com/). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters (e.g. Azure OpenAI API + /// required to attach a `version` query parameter to every request). + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + TavilyAnswerTool({ + required this.apiKey, + final String? baseUrl, + final Map headers = const {}, + final Map queryParams = const {}, + final http.Client? client, + super.defaultOptions = const TavilyAnswerToolOptions(), + }) : _client = TavilyClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ), + super( + name: 'tavily_answer', + description: + 'A search engine optimized for comprehensive, accurate, and trusted answers. ' + 'Useful for when you need to answer questions about current events. ' + 'The tool returns an answer to the search query - not the search results.', + inputDescription: 'The search query to get an answer to. ' + 'Eg: "What is the weather like in New York?"', + ); + + /// A client for interacting with Tavily API. + final TavilyClient _client; + + /// Your Tavily API key. + String apiKey; + + @override + Future invokeInternal( + final String toolInput, { + final TavilyAnswerToolOptions? options, + }) async { + final res = await _client.search( + request: SearchRequest( + apiKey: apiKey, + query: toolInput, + includeAnswer: true, + searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) + .toSearchRequestSearchDepth(), + maxResults: options?.maxResults ?? defaultOptions.maxResults, + includeDomains: + options?.includeDomains ?? defaultOptions.includeDomains, + excludeDomains: + options?.excludeDomains ?? defaultOptions.excludeDomains, + ), + ); + return res.answer ?? ''; + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart new file mode 100644 index 00000000..7e5693c7 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart @@ -0,0 +1,130 @@ +import 'dart:async'; + +import 'package:http/http.dart' as http; +import 'package:langchain_core/tools.dart'; +import 'package:tavily_dart/tavily_dart.dart'; + +import 'mappers.dart'; +import 'tavily_answer.dart'; +import 'types.dart'; + +/// Tool that queries the [Tavily Search API](https://tavily.com) and +/// gets back a list of search results. +/// +/// The Tavily API uses API keys for authentication. Visit the +/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll +/// use in your requests. +/// +/// If you want to get directly an answer to a search query, use the +/// [TavilyAnswerTool] instead. +/// +/// Example: +/// ```dart +/// final tool = TavilySearchResultsTool( +/// apiKey: Platform.environment['TAVILY_API_KEY']!, +/// ); +/// final res = await tool.invoke('What is the weather like in New York?'); +/// print(res); +/// // [ +/// // { +/// // "title": "Weather in New York", +/// // "url": "https://www.weatherapi.com/", +/// // "content": "{'location': {'lat': 40.71, 'lon': -74.01}, 'current': {'last_updated': '2024-06-20 17:00', 'temp_c': 31.1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png'}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 161, 'wind_dir': 'SSE', 'pressure_mb': 1025.0, 'pressure_in': 30.26, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 48, 'cloud': 0, 'feelslike_c': 33.1, 'feelslike_f': 91.6, 'windchill_c': 29.5, 'windchill_f': 85.0, 'heatindex_c': 30.6, 'heatindex_f': 87.0, 'dewpoint_c': 17.7, 'dewpoint_f': 63.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 16.4, 'gust_kph': 26.4}}", +/// // "score": 0.98855 +/// // }, +/// // ... +/// // ] +/// ``` +final class TavilySearchResultsTool + extends Tool { + /// Creates a [TavilySearchResultsTool] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Tavily API key. You can find your API key in the + /// [Tavily console](https://app.tavily.com/). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters (e.g. Azure OpenAI API + /// required to attach a `version` query parameter to every request). + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + TavilySearchResultsTool({ + required this.apiKey, + final String? baseUrl, + final Map headers = const {}, + final Map queryParams = const {}, + final http.Client? client, + super.defaultOptions = const TavilySearchResultsToolOptions(), + }) : _client = TavilyClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ), + super( + name: 'tavily_search_results', + description: + 'A search engine optimized for comprehensive, accurate, and trusted results. ' + 'Useful for when you need to answer questions about current events. ' + 'The tool returns a JSON object with search results.', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The search query to look up. ' + 'Eg: "What is the weather like in New York?"', + }, + }, + 'required': ['query'], + }, + ); + + /// A client for interacting with Tavily API. + final TavilyClient _client; + + /// Your Tavily API key. + String apiKey; + + @override + Future invokeInternal( + final String input, { + final TavilySearchResultsToolOptions? options, + }) async { + final res = await _client.search( + request: SearchRequest( + apiKey: apiKey, + query: input, + searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) + .toSearchRequestSearchDepth(), + maxResults: options?.maxResults ?? defaultOptions.maxResults, + includeRawContent: + options?.includeRawContent ?? defaultOptions.includeRawContent, + includeDomains: + options?.includeDomains ?? defaultOptions.includeDomains, + excludeDomains: + options?.excludeDomains ?? defaultOptions.excludeDomains, + ), + ); + return TavilySearchResults( + results: res.results + .map((r) => r.toTavilySearchResult()) + .toList(growable: false), + ); + } + + @override + String getInputFromJson(final Map json) { + return json['query'] as String; + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_community/lib/src/tools/tavily/types.dart b/packages/langchain_community/lib/src/tools/tavily/types.dart new file mode 100644 index 00000000..d6dc9134 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/types.dart @@ -0,0 +1,131 @@ +import 'dart:convert'; + +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; + +import 'tavily_answer.dart'; +import 'tavily_search_results.dart'; + +/// The depth of the search. +enum TavilySearchDepth { + /// Basic search depth. + basic, + + /// Advanced search depth. + advanced, +} + +/// {@template tavily_search_results} +/// A search results from the Tavily search engine. +/// {@endtemplate} +@immutable +class TavilySearchResults { + /// {@macro tavily_search_results} + const TavilySearchResults({ + required this.results, + }); + + /// The search results. + final List results; + + @override + String toString() { + return json.encode( + results + .map( + (result) => { + 'title': result.title, + 'url': result.url, + 'content': result.content, + 'rawContent': result.rawContent, + 'score': result.score, + }, + ) + .toList(growable: false), + ); + } +} + +/// {@template tavily_search_result} +/// A search result from the Tavily search engine. +/// {@endtemplate} +@immutable +class TavilySearchResult { + /// {@macro tavily_search_result} + const TavilySearchResult({ + required this.title, + required this.url, + required this.content, + this.rawContent, + required this.score, + }); + + /// The title of the search result url. + final String title; + + /// The url of the search result. + final String url; + + /// The most query related content from the scraped url. + final String content; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + final String? rawContent; + + /// The relevance score of the search result. + final double score; +} + +/// {@template tavily_search_results_tool_options} +/// Generation options to pass into the [TavilySearchResultsTool]. +/// {@endtemplate} +class TavilySearchResultsToolOptions extends ToolOptions { + /// {@macro tavily_search_results_tool_options} + const TavilySearchResultsToolOptions({ + this.maxResults = 5, + this.searchDepth = TavilySearchDepth.basic, + this.includeRawContent = false, + this.includeDomains, + this.excludeDomains, + }); + + /// The number of maximum search results to return. + final int maxResults; + + /// The depth of the search. + final TavilySearchDepth searchDepth; + + /// Include raw content in the search results. + final bool includeRawContent; + + /// A list of domains to specifically include in the search results. + final List? includeDomains; + + /// A list of domains to specifically exclude from the search results. + final List? excludeDomains; +} + +/// {@template tavily_answer_tool_options} +/// Generation options to pass into the [TavilyAnswerTool]. +/// {@endtemplate} +class TavilyAnswerToolOptions extends ToolOptions { + /// {@macro tavily_answer_tool_options} + const TavilyAnswerToolOptions({ + this.maxResults = 5, + this.searchDepth = TavilySearchDepth.basic, + this.includeDomains, + this.excludeDomains, + }); + + /// The number of maximum search results to return. + final int maxResults; + + /// The depth of the search. + final TavilySearchDepth searchDepth; + + /// A list of domains to specifically include in the search results. + final List? includeDomains; + + /// A list of domains to specifically exclude from the search results. + final List? excludeDomains; +} diff --git a/packages/langchain_community/lib/src/tools/tools.dart b/packages/langchain_community/lib/src/tools/tools.dart index 9601880a..4aa306f8 100644 --- a/packages/langchain_community/lib/src/tools/tools.dart +++ b/packages/langchain_community/lib/src/tools/tools.dart @@ -1 +1,2 @@ export 'calculator.dart'; +export 'tavily/tavily.dart'; diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index e2286be4..b386fb9d 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -26,6 +26,7 @@ dependencies: math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 + tavily_dart: ^0.0.1-dev.1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_community/pubspec_overrides.yaml b/packages/langchain_community/pubspec_overrides.yaml index de62cfcc..19febce5 100644 --- a/packages/langchain_community/pubspec_overrides.yaml +++ b/packages/langchain_community/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain_core: path: ../langchain_core @@ -6,3 +6,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_community/test/tools/tavily_test.dart b/packages/langchain_community/test/tools/tavily_test.dart new file mode 100644 index 00000000..85214c6c --- /dev/null +++ b/packages/langchain_community/test/tools/tavily_test.dart @@ -0,0 +1,31 @@ +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_community/langchain_community.dart'; +import 'package:test/test.dart'; + +void main() { + group('TavilySearchResultsTool tests', () { + test('Calculate expressions', () async { + final tool = TavilySearchResultsTool( + apiKey: Platform.environment['TAVILY_API_KEY']!, + ); + final res = await tool.invoke('What is the weather like in New York?'); + expect(res.results, isNotEmpty); + final jsonString = res.toString(); + expect(() => json.decode(jsonString), returnsNormally); + tool.close(); + }); + }); + + group('TavilyAnswerTool tests', () { + test('Invoke TavilyAnswerTool', () async { + final tool = TavilyAnswerTool( + apiKey: Platform.environment['TAVILY_API_KEY']!, + ); + final res = await tool.invoke('What is the weather like in New York?'); + expect(res, isNotEmpty); + tool.close(); + }); + }); +} diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index e676fc6b..37f9f9d2 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -12,7 +12,6 @@ import 'types.dart'; /// {@template tool_spec} /// The specification of a LangChain tool without the actual implementation. /// {@endtemplate} -@immutable class ToolSpec { /// {@macro tool_spec} const ToolSpec({ diff --git a/packages/langchain_openai/pubspec_overrides.yaml b/packages/langchain_openai/pubspec_overrides.yaml index 18a3bcaa..d4293e4f 100644 --- a/packages/langchain_openai/pubspec_overrides.yaml +++ b/packages/langchain_openai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain +# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain,tavily_dart dependency_overrides: langchain: path: ../langchain @@ -8,3 +8,5 @@ dependency_overrides: path: ../langchain_core openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_supabase/pubspec_overrides.yaml b/packages/langchain_supabase/pubspec_overrides.yaml index 5eb34624..d5cb8df4 100644 --- a/packages/langchain_supabase/pubspec_overrides.yaml +++ b/packages/langchain_supabase/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community +# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart # melos_managed_dependency_overrides: langchain dependency_overrides: langchain: @@ -11,3 +11,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart From 510abf9a029e4e597c23ce4bf943ec0fd05bffae Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 22 Jun 2024 15:48:14 +0200 Subject: [PATCH 170/251] feat: Add support for tool use in anthropic_sdk_dart client (#469) --- packages/anthropic_sdk_dart/README.md | 171 +- .../example/anthropic_sdk_dart_example.dart | 200 + .../lib/anthropic_sdk_dart.dart | 2 +- .../anthropic_sdk_dart/lib/src/client.dart | 7 +- .../lib/src/extensions.dart | 59 + .../lib/src/generated/schema/block.dart | 100 + .../lib/src/generated/schema/block_delta.dart | 56 + .../schema/create_message_request.dart | 95 +- .../schema/message_stream_event.dart | 10 +- .../lib/src/generated/schema/schema.dart | 5 +- .../src/generated/schema/schema.freezed.dart | 4063 +++++++++++++---- .../lib/src/generated/schema/schema.g.dart | 190 +- .../lib/src/generated/schema/stop_reason.dart | 2 + .../generated/schema/text_block_delta.dart | 44 - .../lib/src/generated/schema/tool.dart | 59 + .../lib/src/generated/schema/tool_choice.dart | 54 + .../generated/schema/tool_choice_type.dart | 24 + .../oas/anthropic_openapi_curated.yaml | 230 +- packages/anthropic_sdk_dart/oas/main.dart | 4 + packages/anthropic_sdk_dart/pubspec.yaml | 2 +- .../test/messages_test.dart | 180 +- 21 files changed, 4492 insertions(+), 1065 deletions(-) create mode 100644 packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md index 6ccb7d3a..bc8b7208 100644 --- a/packages/anthropic_sdk_dart/README.md +++ b/packages/anthropic_sdk_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (a **Supported endpoints:** -- Messages (with streaming support) +- Messages (with tools and streaming support) ## Table of contents @@ -57,57 +57,188 @@ Send a structured list of input messages with text and/or image content, and the ```dart final res = await client.createMessage( request: CreateMessageRequest( - model: Model.model(Models.claude3Opus20240229), + model: Model.model(Models.claude35Sonnet20240620), maxTokens: 1024, messages: [ Message( role: MessageRole.user, - content: 'Hello, Claude', + content: MessageContent.text('Hello, Claude'), ), ], ), ); print(res.content.text); -// Hi there! How can I help you today? +// Hello! It's nice to meet you. How are you doing today? ``` `Model` is a sealed class that offers two ways to specify the model: -- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-3-haiku-20240307'`). -- `Model.model(Models.claude3Opus20240229)`: a value from `Models` enum which lists all the available models. +- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-instant-1.2'`). +- `Model.model(Models.claude35Sonnet20240620)`: a value from `Models` enum which lists all the available models. Mind that this list may not be up-to-date. Refer to the [documentation](https://docs.anthropic.com/en/docs/models-overview) for the updated list. **Streaming messages:** ```dart -final stream = await client.createMessageStream( +final stream = client.createMessageStream( request: CreateMessageRequest( - model: Model.model(Models.claude3Opus20240229), + model: Model.model(Models.claude35Sonnet20240620), maxTokens: 1024, messages: [ Message( role: MessageRole.user, - content: 'Hello, Claude', + content: MessageContent.text('Hello, Claude'), ), ], ), ); -String text = ''; await for (final res in stream) { res.map( - messageStart: (e) {}, - messageDelta: (e) {}, - messageStop: (e) {}, - contentBlockStart: (e) {}, - contentBlockDelta: (e) { - text += e.delta.text; + messageStart: (MessageStartEvent e) {}, + messageDelta: (MessageDeltaEvent e) {}, + messageStop: (MessageStopEvent e) {}, + contentBlockStart: (ContentBlockStartEvent e) {}, + contentBlockDelta: (ContentBlockDeltaEvent e) { + stdout.write(e.delta.text); + }, + contentBlockStop: (ContentBlockStopEvent e) {}, + ping: (PingEvent e) {}, + ); +} +// Hello! It's nice to meet you. How are you doing today? +``` + +### Tool use + +Claude is capable of interacting with external client-side tools and functions, allowing you to equip Claude with your own custom tools to perform a wider variety of tasks. + +Refer to the [official documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information. + +In the following example, we want the model to be able to use our function that return the current weather in a given city: + +```dart +Map _getCurrentWeather( + final String location, + final String unit, +) { + const temperature = 22; + const weather = 'Sunny'; + return { + 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, + 'unit': unit, + 'description': weather, + }; +} +``` + +To do that, we need to provide the definition of the tool: +```dart +const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], }, - contentBlockStop: (e) {}, - ping: (e) {}, + }, + 'required': ['location'], + }, +); +``` + +Then we can use the tool in the message request: +```dart +final request1 = CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, +); +final aiMessage1 = await client.createMessage(request: request1); + +final toolUse = aiMessage1.content.blocks.firstOrNull; +if (toolUse == null || toolUse is! ToolUseBlock) { + return; +} + +// Call your tool here with the given input +final toolResult = _getCurrentWeather( + toolUse.input['location'], + toolUse.input['unit'], +); + +final request2 = CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(json.encode(toolResult)), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, +); +final aiMessage2 = await client.createMessage(request: request2); + +print(aiMessage2.content.text); +// Based on the current weather information for Boston, here's what it's like right now: +// +// The temperature in Boston is 71.6°F (Fahrenheit). +// The weather conditions are described as sunny. +``` + +You can also stream the input for a tool: + +```dart +final stream = client.createMessageStream(request: request); +await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent v) {}, + messageDelta: (MessageDeltaEvent v) {}, + messageStop: (MessageStopEvent v) {}, + contentBlockStart: (ContentBlockStartEvent v) {}, + contentBlockDelta: (ContentBlockDeltaEvent v) { + stdout.write(v.delta.inputJson); + }, + contentBlockStop: (ContentBlockStopEvent v) {}, + ping: (PingEvent v) {}, ); } -print(text); -// Hi there! How can I help you today? +// {"location": "Boston, MA", "unit": "fahrenheit"} ``` ## Advance Usage diff --git a/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart new file mode 100644 index 00000000..0a576196 --- /dev/null +++ b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart @@ -0,0 +1,200 @@ +// ignore_for_file: avoid_print +import 'dart:async'; +import 'dart:convert'; +import 'dart:io'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; + +Future main() async { + final client = AnthropicClient( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + ); + + await _createMessage(client); + await _createMessageStream(client); + await _toolUse(client); + await _toolUseStreaming(client); + + client.endSession(); +} + +Future _createMessage(final AnthropicClient client) async { + final res = await client.createMessage( + request: const CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), + ); + print(res.content.text); + // Hello! It's nice to meet you. How are you doing today? +} + +Future _createMessageStream(final AnthropicClient client) async { + final stream = client.createMessageStream( + request: const CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), + ); + await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent e) {}, + messageDelta: (MessageDeltaEvent e) {}, + messageStop: (MessageStopEvent e) {}, + contentBlockStart: (ContentBlockStartEvent e) {}, + contentBlockDelta: (ContentBlockDeltaEvent e) { + stdout.write(e.delta.text); + }, + contentBlockStop: (ContentBlockStopEvent e) {}, + ping: (PingEvent e) {}, + ); + } + // Hello! It's nice to meet you. How are you doing today? +} + +Future _toolUse(final AnthropicClient client) async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + + final aiMessage1 = await client.createMessage(request: request1); + + final toolUse = aiMessage1.content.blocks.firstOrNull; + if (toolUse == null || toolUse is! ToolUseBlock) { + return; + } + + // Call your tool here with the given input + final toolResult = _getCurrentWeather( + toolUse.input['location'], + toolUse.input['unit'], + ); + + final request2 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(json.encode(toolResult)), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, + ); + final aiMessage2 = await client.createMessage(request: request2); + print(aiMessage2.content.text); + // Based on the current weather information for Boston, here's what it's like right now: + // + // The temperature in Boston is 71.6°F (Fahrenheit). + // The weather conditions are described as sunny. +} + +Future _toolUseStreaming(final AnthropicClient client) async { + final request = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + + final stream = client.createMessageStream(request: request); + await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent v) {}, + messageDelta: (MessageDeltaEvent v) {}, + messageStop: (MessageStopEvent v) {}, + contentBlockStart: (ContentBlockStartEvent v) {}, + contentBlockDelta: (ContentBlockDeltaEvent v) { + stdout.write(v.delta.inputJson); + }, + contentBlockStop: (ContentBlockStopEvent v) {}, + ping: (PingEvent v) {}, + ); + } + // {"location": "Boston, MA", "unit": "fahrenheit"} +} + +Map _getCurrentWeather( + final String location, + final String unit, +) { + const temperature = 22; + const weather = 'Sunny'; + return { + 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, + 'unit': unit, + 'description': weather, + }; +} + +const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart index 65378d70..7a853ada 100644 --- a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -1,4 +1,4 @@ -/// Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +/// Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). library anthropic_sdk_dart; export 'src/client.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/client.dart b/packages/anthropic_sdk_dart/lib/src/client.dart index 17c4e2a1..3d02a34b 100644 --- a/packages/anthropic_sdk_dart/lib/src/client.dart +++ b/packages/anthropic_sdk_dart/lib/src/client.dart @@ -76,8 +76,11 @@ class AnthropicClient extends g.AnthropicClient { yield* r.stream .transform(const _AnthropicStreamTransformer()) // .map( - (final d) => MessageStreamEvent.fromJson(json.decode(d)), - ); + (final d) { + final j = json.decode(d) as Map; + return MessageStreamEvent.fromJson(j); + }, + ); } @override diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart index 749979e5..58b178a2 100644 --- a/packages/anthropic_sdk_dart/lib/src/extensions.dart +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -10,4 +10,63 @@ extension MessageContentX on MessageContent { blocks.value.whereType().map((t) => t.text).join('\n'), ); } + + /// Returns the blocks of the message. + List get blocks { + return map( + text: (text) => [Block.text(text: text.value)], + blocks: (blocks) => blocks.value, + ); + } +} + +/// Extension methods for [Block]. +extension BlockX on Block { + /// Returns the text content of the block. + String get text { + return mapOrNull( + text: (text) => text.text, + ) ?? + ''; + } + + /// Returns the image source of the block. + ImageBlock? get image { + return mapOrNull( + image: (image) => image, + ); + } + + /// Returns the tool use block. + ToolUseBlock? get toolUse { + return mapOrNull( + toolUse: (toolUse) => toolUse, + ); + } + + /// Returns the tool result block. + ToolResultBlock? get toolResult { + return mapOrNull( + toolResult: (toolResult) => toolResult, + ); + } +} + +/// Extension methods for [BlockDelta]. +extension BlockDeltaX on BlockDelta { + /// Returns the text content of the block delta. + String get text { + return map( + textDelta: (text) => text.text, + inputJsonDelta: (inputJson) => '', + ); + } + + /// Returns the type of the block delta. + String get inputJson { + return map( + textDelta: (text) => '', + inputJsonDelta: (inputJson) => inputJson.partialJson ?? '', + ); + } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart index 36fcbaae..959a5ecb 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -39,6 +39,47 @@ sealed class Block with _$Block { @Default('image') String type, }) = ImageBlock; + // ------------------------------------------ + // UNION: ToolUseBlock + // ------------------------------------------ + + /// The tool the model wants to use. + const factory Block.toolUse({ + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + required String id, + + /// The name of the tool being used. + required String name, + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + required Map input, + + /// The type of content block. + @Default('tool_use') String type, + }) = ToolUseBlock; + + // ------------------------------------------ + // UNION: ToolResultBlock + // ------------------------------------------ + + /// The result of using a tool. + const factory Block.toolResult({ + /// The `id` of the tool use request this is a result for. + @JsonKey(name: 'tool_use_id') required String toolUseId, + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @_ToolResultBlockContentConverter() required ToolResultBlockContent content, + + /// Set to `true` if the tool execution resulted in an error. + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + + /// The type of content block. + @Default('tool_result') String type, + }) = ToolResultBlock; + /// Object construction from a JSON representation factory Block.fromJson(Map json) => _$BlockFromJson(json); } @@ -52,4 +93,63 @@ enum BlockEnumType { text, @JsonValue('image') image, + @JsonValue('tool_use') + toolUse, + @JsonValue('tool_result') + toolResult, +} + +// ========================================== +// CLASS: ToolResultBlockContent +// ========================================== + +/// The result of the tool, as a string (e.g. `"content": "15 degrees"`) +/// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). +/// These content blocks can use the text or image types. +@freezed +sealed class ToolResultBlockContent with _$ToolResultBlockContent { + const ToolResultBlockContent._(); + + /// An array of content blocks. + const factory ToolResultBlockContent.blocks( + List value, + ) = ToolResultBlockContentListBlock; + + /// A single text block. + const factory ToolResultBlockContent.text( + String value, + ) = ToolResultBlockContentString; + + /// Object construction from a JSON representation + factory ToolResultBlockContent.fromJson(Map json) => + _$ToolResultBlockContentFromJson(json); +} + +/// Custom JSON converter for [ToolResultBlockContent] +class _ToolResultBlockContentConverter + implements JsonConverter { + const _ToolResultBlockContentConverter(); + + @override + ToolResultBlockContent fromJson(Object? data) { + if (data is List && data.every((item) => item is Map)) { + return ToolResultBlockContentListBlock(data + .map((i) => Block.fromJson(i as Map)) + .toList(growable: false)); + } + if (data is String) { + return ToolResultBlockContentString(data); + } + throw Exception( + 'Unexpected value for ToolResultBlockContent: $data', + ); + } + + @override + Object? toJson(ToolResultBlockContent data) { + return switch (data) { + ToolResultBlockContentListBlock(value: final v) => v, + ToolResultBlockContentString(value: final v) => v, + }; + } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart new file mode 100644 index 00000000..d107a864 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: BlockDelta +// ========================================== + +/// A delta in a streaming message. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class BlockDelta with _$BlockDelta { + const BlockDelta._(); + + // ------------------------------------------ + // UNION: TextBlockDelta + // ------------------------------------------ + + /// A delta in a streaming text block. + const factory BlockDelta.textDelta({ + /// The text delta. + required String text, + + /// The type of content block. + required String type, + }) = TextBlockDelta; + + // ------------------------------------------ + // UNION: InputJsonBlockDelta + // ------------------------------------------ + + /// A delta in a streaming input JSON. + const factory BlockDelta.inputJsonDelta({ + /// The partial JSON delta. + @JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, + + /// The type of content block. + required String type, + }) = InputJsonBlockDelta; + + /// Object construction from a JSON representation + factory BlockDelta.fromJson(Map json) => + _$BlockDeltaFromJson(json); +} + +// ========================================== +// ENUM: BlockDeltaEnumType +// ========================================== + +enum BlockDeltaEnumType { + @JsonValue('text_delta') + textDelta, + @JsonValue('input_json_delta') + inputJsonDelta, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart index 2f06233e..df2f1b5b 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -36,6 +36,9 @@ class CreateMessageRequest with _$CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -149,6 +152,84 @@ class CreateMessageRequest with _$CreateMessageRequest { /// deterministic. @JsonKey(includeIfNull: false) double? temperature, + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) ToolChoice? toolChoice, + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) List? tools, + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -189,6 +270,8 @@ class CreateMessageRequest with _$CreateMessageRequest { 'stop_sequences', 'system', 'temperature', + 'tool_choice', + 'tools', 'top_k', 'top_p', 'stream' @@ -209,6 +292,8 @@ class CreateMessageRequest with _$CreateMessageRequest { 'stop_sequences': stopSequences, 'system': system, 'temperature': temperature, + 'tool_choice': toolChoice, + 'tools': tools, 'top_k': topK, 'top_p': topP, 'stream': stream, @@ -222,16 +307,18 @@ class CreateMessageRequest with _$CreateMessageRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum Models { + @JsonValue('claude-3-5-sonnet-20240620') + claude35Sonnet20240620, + @JsonValue('claude-3-haiku-20240307') + claude3Haiku20240307, @JsonValue('claude-3-opus-20240229') claude3Opus20240229, @JsonValue('claude-3-sonnet-20240229') claude3Sonnet20240229, - @JsonValue('claude-3-haiku-20240307') - claude3Haiku20240307, - @JsonValue('claude-2.1') - claude21, @JsonValue('claude-2.0') claude20, + @JsonValue('claude-2.1') + claude21, @JsonValue('claude-instant-1.2') claudeInstant12, } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart index 73dac3c3..46ef88ba 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart @@ -69,8 +69,9 @@ sealed class MessageStreamEvent with _$MessageStreamEvent { /// A start event in a streaming content block. const factory MessageStreamEvent.contentBlockStart({ - /// A block of text content. - @JsonKey(name: 'content_block') required TextBlock contentBlock, + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @JsonKey(name: 'content_block') required Block contentBlock, /// The index of the content block. required int index, @@ -85,8 +86,9 @@ sealed class MessageStreamEvent with _$MessageStreamEvent { /// A delta event in a streaming content block. const factory MessageStreamEvent.contentBlockDelta({ - /// A delta in a streaming text block. - required TextBlockDelta delta, + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + required BlockDelta delta, /// The index of the content block. required int index, diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart index 1953d0e4..b9d2ef26 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart @@ -12,14 +12,17 @@ part 'schema.freezed.dart'; part 'create_message_request.dart'; part 'create_message_request_metadata.dart'; +part 'tool_choice.dart'; +part 'tool_choice_type.dart'; part 'message.dart'; part 'message_role.dart'; +part 'tool.dart'; part 'image_block_source.dart'; part 'stop_reason.dart'; part 'usage.dart'; part 'message_stream_event_type.dart'; part 'message_delta.dart'; part 'message_delta_usage.dart'; -part 'text_block_delta.dart'; part 'block.dart'; part 'message_stream_event.dart'; +part 'block_delta.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart index a014e0e8..528c9b30 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -42,6 +42,9 @@ mixin _$CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -160,6 +163,86 @@ mixin _$CreateMessageRequest { @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? get toolChoice => throw _privateConstructorUsedError; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) + List? get tools => throw _privateConstructorUsedError; + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -209,12 +292,16 @@ abstract class $CreateMessageRequestCopyWith<$Res> { List? stopSequences, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) List? tools, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, bool stream}); $ModelCopyWith<$Res> get model; $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; + $ToolChoiceCopyWith<$Res>? get toolChoice; } /// @nodoc @@ -238,6 +325,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, Object? stopSequences = freezed, Object? system = freezed, Object? temperature = freezed, + Object? toolChoice = freezed, + Object? tools = freezed, Object? topK = freezed, Object? topP = freezed, Object? stream = null, @@ -271,6 +360,14 @@ class _$CreateMessageRequestCopyWithImpl<$Res, ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + toolChoice: freezed == toolChoice + ? _value.toolChoice + : toolChoice // ignore: cast_nullable_to_non_nullable + as ToolChoice?, + tools: freezed == tools + ? _value.tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, topK: freezed == topK ? _value.topK : topK // ignore: cast_nullable_to_non_nullable @@ -306,6 +403,18 @@ class _$CreateMessageRequestCopyWithImpl<$Res, return _then(_value.copyWith(metadata: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ToolChoiceCopyWith<$Res>? get toolChoice { + if (_value.toolChoice == null) { + return null; + } + + return $ToolChoiceCopyWith<$Res>(_value.toolChoice!, (value) { + return _then(_value.copyWith(toolChoice: value) as $Val); + }); + } } /// @nodoc @@ -325,6 +434,9 @@ abstract class _$$CreateMessageRequestImplCopyWith<$Res> List? stopSequences, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) List? tools, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, bool stream}); @@ -333,6 +445,8 @@ abstract class _$$CreateMessageRequestImplCopyWith<$Res> $ModelCopyWith<$Res> get model; @override $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; + @override + $ToolChoiceCopyWith<$Res>? get toolChoice; } /// @nodoc @@ -353,6 +467,8 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> Object? stopSequences = freezed, Object? system = freezed, Object? temperature = freezed, + Object? toolChoice = freezed, + Object? tools = freezed, Object? topK = freezed, Object? topP = freezed, Object? stream = null, @@ -386,6 +502,14 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + toolChoice: freezed == toolChoice + ? _value.toolChoice + : toolChoice // ignore: cast_nullable_to_non_nullable + as ToolChoice?, + tools: freezed == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, topK: freezed == topK ? _value.topK : topK // ignore: cast_nullable_to_non_nullable @@ -414,11 +538,14 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { final List? stopSequences, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(includeIfNull: false) final List? tools, @JsonKey(name: 'top_k', includeIfNull: false) this.topK, @JsonKey(name: 'top_p', includeIfNull: false) this.topP, this.stream = false}) : _messages = messages, _stopSequences = stopSequences, + _tools = tools, super._(); factory _$CreateMessageRequestImpl.fromJson(Map json) => @@ -447,6 +574,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -534,6 +664,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -678,6 +811,164 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { @JsonKey(includeIfNull: false) final double? temperature; + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @override + @JsonKey(name: 'tool_choice', includeIfNull: false) + final ToolChoice? toolChoice; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + final List? _tools; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @override + @JsonKey(includeIfNull: false) + List? get tools { + final value = _tools; + if (value == null) return null; + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -712,7 +1003,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { @override String toString() { - return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, topK: $topK, topP: $topP, stream: $stream)'; + return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, toolChoice: $toolChoice, tools: $tools, topK: $topK, topP: $topP, stream: $stream)'; } @override @@ -731,6 +1022,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { (identical(other.system, system) || other.system == system) && (identical(other.temperature, temperature) || other.temperature == temperature) && + (identical(other.toolChoice, toolChoice) || + other.toolChoice == toolChoice) && + const DeepCollectionEquality().equals(other._tools, _tools) && (identical(other.topK, topK) || other.topK == topK) && (identical(other.topP, topP) || other.topP == topP) && (identical(other.stream, stream) || other.stream == stream)); @@ -747,6 +1041,8 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().hash(_stopSequences), system, temperature, + toolChoice, + const DeepCollectionEquality().hash(_tools), topK, topP, stream); @@ -778,6 +1074,9 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { final List? stopSequences, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + final ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) final List? tools, @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, final bool stream}) = _$CreateMessageRequestImpl; @@ -811,6 +1110,9 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -934,6 +1236,88 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { double? get temperature; @override + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? get toolChoice; + @override + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) + List? get tools; + @override + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -1524,6 +1908,191 @@ abstract class _CreateMessageRequestMetadata get copyWith => throw _privateConstructorUsedError; } +ToolChoice _$ToolChoiceFromJson(Map json) { + return _ToolChoice.fromJson(json); +} + +/// @nodoc +mixin _$ToolChoice { + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + ToolChoiceType get type => throw _privateConstructorUsedError; + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolChoiceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolChoiceCopyWith<$Res> { + factory $ToolChoiceCopyWith( + ToolChoice value, $Res Function(ToolChoice) then) = + _$ToolChoiceCopyWithImpl<$Res, ToolChoice>; + @useResult + $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class _$ToolChoiceCopyWithImpl<$Res, $Val extends ToolChoice> + implements $ToolChoiceCopyWith<$Res> { + _$ToolChoiceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? name = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolChoiceType, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolChoiceImplCopyWith<$Res> + implements $ToolChoiceCopyWith<$Res> { + factory _$$ToolChoiceImplCopyWith( + _$ToolChoiceImpl value, $Res Function(_$ToolChoiceImpl) then) = + __$$ToolChoiceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class __$$ToolChoiceImplCopyWithImpl<$Res> + extends _$ToolChoiceCopyWithImpl<$Res, _$ToolChoiceImpl> + implements _$$ToolChoiceImplCopyWith<$Res> { + __$$ToolChoiceImplCopyWithImpl( + _$ToolChoiceImpl _value, $Res Function(_$ToolChoiceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? name = freezed, + }) { + return _then(_$ToolChoiceImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolChoiceType, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolChoiceImpl extends _ToolChoice { + const _$ToolChoiceImpl( + {required this.type, @JsonKey(includeIfNull: false) this.name}) + : super._(); + + factory _$ToolChoiceImpl.fromJson(Map json) => + _$$ToolChoiceImplFromJson(json); + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @override + final ToolChoiceType type; + + /// The name of the tool to use. + @override + @JsonKey(includeIfNull: false) + final String? name; + + @override + String toString() { + return 'ToolChoice(type: $type, name: $name)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolChoiceImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, name); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => + __$$ToolChoiceImplCopyWithImpl<_$ToolChoiceImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolChoiceImplToJson( + this, + ); + } +} + +abstract class _ToolChoice extends ToolChoice { + const factory _ToolChoice( + {required final ToolChoiceType type, + @JsonKey(includeIfNull: false) final String? name}) = _$ToolChoiceImpl; + const _ToolChoice._() : super._(); + + factory _ToolChoice.fromJson(Map json) = + _$ToolChoiceImpl.fromJson; + + @override + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + ToolChoiceType get type; + @override + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) + String? get name; + @override + @JsonKey(ignore: true) + _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => + throw _privateConstructorUsedError; +} + Message _$MessageFromJson(Map json) { return _Message.fromJson(json); } @@ -2407,20 +2976,259 @@ abstract class MessageContentString extends MessageContent { get copyWith => throw _privateConstructorUsedError; } -ImageBlockSource _$ImageBlockSourceFromJson(Map json) { - return _ImageBlockSource.fromJson(json); +Tool _$ToolFromJson(Map json) { + return _Tool.fromJson(json); } /// @nodoc -mixin _$ImageBlockSource { - /// The base64-encoded image data. - String get data => throw _privateConstructorUsedError; +mixin _$Tool { + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + String get name => throw _privateConstructorUsedError; - /// The media type of the image. - @JsonKey(name: 'media_type') - ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; - /// The type of image source. + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') + Map get inputSchema => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCopyWith<$Res> { + factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = + _$ToolCopyWithImpl<$Res, Tool>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(name: 'input_schema') Map inputSchema}); +} + +/// @nodoc +class _$ToolCopyWithImpl<$Res, $Val extends Tool> + implements $ToolCopyWith<$Res> { + _$ToolCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? inputSchema = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + inputSchema: null == inputSchema + ? _value.inputSchema + : inputSchema // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { + factory _$$ToolImplCopyWith( + _$ToolImpl value, $Res Function(_$ToolImpl) then) = + __$$ToolImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(name: 'input_schema') Map inputSchema}); +} + +/// @nodoc +class __$$ToolImplCopyWithImpl<$Res> + extends _$ToolCopyWithImpl<$Res, _$ToolImpl> + implements _$$ToolImplCopyWith<$Res> { + __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? inputSchema = null, + }) { + return _then(_$ToolImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + inputSchema: null == inputSchema + ? _value._inputSchema + : inputSchema // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolImpl extends _Tool { + const _$ToolImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + @JsonKey(name: 'input_schema') + required final Map inputSchema}) + : _inputSchema = inputSchema, + super._(); + + factory _$ToolImpl.fromJson(Map json) => + _$$ToolImplFromJson(json); + + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + @override + final String name; + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + final Map _inputSchema; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @override + @JsonKey(name: 'input_schema') + Map get inputSchema { + if (_inputSchema is EqualUnmodifiableMapView) return _inputSchema; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_inputSchema); + } + + @override + String toString() { + return 'Tool(name: $name, description: $description, inputSchema: $inputSchema)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality() + .equals(other._inputSchema, _inputSchema)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_inputSchema)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolImplToJson( + this, + ); + } +} + +abstract class _Tool extends Tool { + const factory _Tool( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(name: 'input_schema') + required final Map inputSchema}) = _$ToolImpl; + const _Tool._() : super._(); + + factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; + + @override + + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + String get name; + @override + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) + String? get description; + @override + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') + Map get inputSchema; + @override + @JsonKey(ignore: true) + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ImageBlockSource _$ImageBlockSourceFromJson(Map json) { + return _ImageBlockSource.fromJson(json); +} + +/// @nodoc +mixin _$ImageBlockSource { + /// The base64-encoded image data. + String get data => throw _privateConstructorUsedError; + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + + /// The type of image source. ImageBlockSourceType get type => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @@ -3182,37 +3990,115 @@ abstract class _MessageDeltaUsage extends MessageDeltaUsage { throw _privateConstructorUsedError; } -TextBlockDelta _$TextBlockDeltaFromJson(Map json) { - return _TextBlockDelta.fromJson(json); +Block _$BlockFromJson(Map json) { + switch (json['type']) { + case 'text': + return TextBlock.fromJson(json); + case 'image': + return ImageBlock.fromJson(json); + case 'tool_use': + return ToolUseBlock.fromJson(json); + case 'tool_result': + return ToolResultBlock.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); + } } /// @nodoc -mixin _$TextBlockDelta { - /// The text delta. - String get text => throw _privateConstructorUsedError; - +mixin _$Block { /// The type of content block. String get type => throw _privateConstructorUsedError; - + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $TextBlockDeltaCopyWith get copyWith => - throw _privateConstructorUsedError; + $BlockCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $TextBlockDeltaCopyWith<$Res> { - factory $TextBlockDeltaCopyWith( - TextBlockDelta value, $Res Function(TextBlockDelta) then) = - _$TextBlockDeltaCopyWithImpl<$Res, TextBlockDelta>; +abstract class $BlockCopyWith<$Res> { + factory $BlockCopyWith(Block value, $Res Function(Block) then) = + _$BlockCopyWithImpl<$Res, Block>; @useResult - $Res call({String text, String type}); + $Res call({String type}); } /// @nodoc -class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> - implements $TextBlockDeltaCopyWith<$Res> { - _$TextBlockDeltaCopyWithImpl(this._value, this._then); +class _$BlockCopyWithImpl<$Res, $Val extends Block> + implements $BlockCopyWith<$Res> { + _$BlockCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3222,14 +4108,9 @@ class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> @pragma('vm:prefer-inline') @override $Res call({ - Object? text = null, Object? type = null, }) { return _then(_value.copyWith( - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -3239,22 +4120,21 @@ class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> } /// @nodoc -abstract class _$$TextBlockDeltaImplCopyWith<$Res> - implements $TextBlockDeltaCopyWith<$Res> { - factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, - $Res Function(_$TextBlockDeltaImpl) then) = - __$$TextBlockDeltaImplCopyWithImpl<$Res>; +abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$TextBlockImplCopyWith( + _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = + __$$TextBlockImplCopyWithImpl<$Res>; @override @useResult $Res call({String text, String type}); } /// @nodoc -class __$$TextBlockDeltaImplCopyWithImpl<$Res> - extends _$TextBlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> - implements _$$TextBlockDeltaImplCopyWith<$Res> { - __$$TextBlockDeltaImplCopyWithImpl( - _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) +class __$$TextBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> + implements _$$TextBlockImplCopyWith<$Res> { + __$$TextBlockImplCopyWithImpl( + _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -3263,7 +4143,7 @@ class __$$TextBlockDeltaImplCopyWithImpl<$Res> Object? text = null, Object? type = null, }) { - return _then(_$TextBlockDeltaImpl( + return _then(_$TextBlockImpl( text: null == text ? _value.text : text // ignore: cast_nullable_to_non_nullable @@ -3278,31 +4158,31 @@ class __$$TextBlockDeltaImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$TextBlockDeltaImpl extends _TextBlockDelta { - const _$TextBlockDeltaImpl({required this.text, required this.type}) - : super._(); +class _$TextBlockImpl extends TextBlock { + const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); - factory _$TextBlockDeltaImpl.fromJson(Map json) => - _$$TextBlockDeltaImplFromJson(json); + factory _$TextBlockImpl.fromJson(Map json) => + _$$TextBlockImplFromJson(json); - /// The text delta. + /// The text content. @override final String text; /// The type of content block. @override + @JsonKey() final String type; @override String toString() { - return 'TextBlockDelta(text: $text, type: $type)'; + return 'Block.text(text: $text, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$TextBlockDeltaImpl && + other is _$TextBlockImpl && (identical(other.text, text) || other.text == text) && (identical(other.type, type) || other.type == type)); } @@ -3314,218 +4194,25 @@ class _$TextBlockDeltaImpl extends _TextBlockDelta { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => - __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( - this, _$identity); + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); @override - Map toJson() { - return _$$TextBlockDeltaImplToJson( - this, - ); - } -} - -abstract class _TextBlockDelta extends TextBlockDelta { - const factory _TextBlockDelta( - {required final String text, - required final String type}) = _$TextBlockDeltaImpl; - const _TextBlockDelta._() : super._(); - - factory _TextBlockDelta.fromJson(Map json) = - _$TextBlockDeltaImpl.fromJson; - - @override - - /// The text delta. - String get text; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Block _$BlockFromJson(Map json) { - switch (json['type']) { - case 'text': - return TextBlock.fromJson(json); - case 'image': - return ImageBlock.fromJson(json); - - default: - throw CheckedFromJsonException( - json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$Block { - /// The type of content block. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BlockCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BlockCopyWith<$Res> { - factory $BlockCopyWith(Block value, $Res Function(Block) then) = - _$BlockCopyWithImpl<$Res, Block>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$BlockCopyWithImpl<$Res, $Val extends Block> - implements $BlockCopyWith<$Res> { - _$BlockCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { - factory _$$TextBlockImplCopyWith( - _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = - __$$TextBlockImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String text, String type}); -} - -/// @nodoc -class __$$TextBlockImplCopyWithImpl<$Res> - extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> - implements _$$TextBlockImplCopyWith<$Res> { - __$$TextBlockImplCopyWithImpl( - _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? text = null, - Object? type = null, - }) { - return _then(_$TextBlockImpl( - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$TextBlockImpl extends TextBlock { - const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); - - factory _$TextBlockImpl.fromJson(Map json) => - _$$TextBlockImplFromJson(json); - - /// The text content. - @override - final String text; - - /// The type of content block. - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'Block.text(text: $text, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$TextBlockImpl && - (identical(other.text, text) || other.text == text) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, text, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => - __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - }) { - return text(this.text, type); + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return text(this.text, type); } @override @@ -3533,6 +4220,15 @@ class _$TextBlockImpl extends TextBlock { TResult? whenOrNull({ TResult? Function(String text, String type)? text, TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, }) { return text?.call(this.text, type); } @@ -3542,6 +4238,15 @@ class _$TextBlockImpl extends TextBlock { TResult maybeWhen({ TResult Function(String text, String type)? text, TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, required TResult orElse(), }) { if (text != null) { @@ -3555,6 +4260,8 @@ class _$TextBlockImpl extends TextBlock { TResult map({ required TResult Function(TextBlock value) text, required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, }) { return text(this); } @@ -3564,6 +4271,8 @@ class _$TextBlockImpl extends TextBlock { TResult? mapOrNull({ TResult? Function(TextBlock value)? text, TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, }) { return text?.call(this); } @@ -3573,6 +4282,8 @@ class _$TextBlockImpl extends TextBlock { TResult maybeMap({ TResult Function(TextBlock value)? text, TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, required TResult orElse(), }) { if (text != null) { @@ -3703,6 +4414,15 @@ class _$ImageBlockImpl extends ImageBlock { TResult when({ required TResult Function(String text, String type) text, required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, }) { return image(source, type); } @@ -3712,6 +4432,15 @@ class _$ImageBlockImpl extends ImageBlock { TResult? whenOrNull({ TResult? Function(String text, String type)? text, TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, }) { return image?.call(source, type); } @@ -3721,6 +4450,15 @@ class _$ImageBlockImpl extends ImageBlock { TResult maybeWhen({ TResult Function(String text, String type)? text, TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, required TResult orElse(), }) { if (image != null) { @@ -3734,6 +4472,8 @@ class _$ImageBlockImpl extends ImageBlock { TResult map({ required TResult Function(TextBlock value) text, required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, }) { return image(this); } @@ -3743,6 +4483,8 @@ class _$ImageBlockImpl extends ImageBlock { TResult? mapOrNull({ TResult? Function(TextBlock value)? text, TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, }) { return image?.call(this); } @@ -3752,6 +4494,8 @@ class _$ImageBlockImpl extends ImageBlock { TResult maybeMap({ TResult Function(TextBlock value)? text, TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, required TResult orElse(), }) { if (image != null) { @@ -3789,54 +4533,1468 @@ abstract class ImageBlock extends Block { throw _privateConstructorUsedError; } -MessageStreamEvent _$MessageStreamEventFromJson(Map json) { - switch (json['type']) { - case 'message_start': - return MessageStartEvent.fromJson(json); - case 'message_delta': - return MessageDeltaEvent.fromJson(json); - case 'message_stop': - return MessageStopEvent.fromJson(json); - case 'content_block_start': - return ContentBlockStartEvent.fromJson(json); - case 'content_block_delta': - return ContentBlockDeltaEvent.fromJson(json); - case 'content_block_stop': - return ContentBlockStopEvent.fromJson(json); - case 'ping': - return PingEvent.fromJson(json); +/// @nodoc +abstract class _$$ToolUseBlockImplCopyWith<$Res> + implements $BlockCopyWith<$Res> { + factory _$$ToolUseBlockImplCopyWith( + _$ToolUseBlockImpl value, $Res Function(_$ToolUseBlockImpl) then) = + __$$ToolUseBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String id, String name, Map input, String type}); +} - default: - throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', - 'Invalid union type "${json['type']}"!'); +/// @nodoc +class __$$ToolUseBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ToolUseBlockImpl> + implements _$$ToolUseBlockImplCopyWith<$Res> { + __$$ToolUseBlockImplCopyWithImpl( + _$ToolUseBlockImpl _value, $Res Function(_$ToolUseBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? name = null, + Object? input = null, + Object? type = null, + }) { + return _then(_$ToolUseBlockImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + input: null == input + ? _value._input + : input // ignore: cast_nullable_to_non_nullable + as Map, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); } } /// @nodoc -mixin _$MessageStreamEvent { - /// The type of a streaming event. - MessageStreamEventType get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, +@JsonSerializable() +class _$ToolUseBlockImpl extends ToolUseBlock { + const _$ToolUseBlockImpl( + {required this.id, + required this.name, + required final Map input, + this.type = 'tool_use'}) + : _input = input, + super._(); + + factory _$ToolUseBlockImpl.fromJson(Map json) => + _$$ToolUseBlockImplFromJson(json); + + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + @override + final String id; + + /// The name of the tool being used. + @override + final String name; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + final Map _input; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + @override + Map get input { + if (_input is EqualUnmodifiableMapView) return _input; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_input); + } + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.toolUse(id: $id, name: $name, input: $input, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolUseBlockImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality().equals(other._input, _input) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, id, name, const DeepCollectionEquality().hash(_input), type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => + __$$ToolUseBlockImplCopyWithImpl<_$ToolUseBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return toolUse(id, name, input, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return toolUse?.call(id, name, input, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (toolUse != null) { + return toolUse(id, name, input, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return toolUse(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return toolUse?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (toolUse != null) { + return toolUse(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolUseBlockImplToJson( + this, + ); + } +} + +abstract class ToolUseBlock extends Block { + const factory ToolUseBlock( + {required final String id, + required final String name, + required final Map input, + final String type}) = _$ToolUseBlockImpl; + const ToolUseBlock._() : super._(); + + factory ToolUseBlock.fromJson(Map json) = + _$ToolUseBlockImpl.fromJson; + + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + String get id; + + /// The name of the tool being used. + String get name; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + Map get input; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolResultBlockImplCopyWith<$Res> + implements $BlockCopyWith<$Res> { + factory _$$ToolResultBlockImplCopyWith(_$ToolResultBlockImpl value, + $Res Function(_$ToolResultBlockImpl) then) = + __$$ToolResultBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type}); + + $ToolResultBlockContentCopyWith<$Res> get content; +} + +/// @nodoc +class __$$ToolResultBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ToolResultBlockImpl> + implements _$$ToolResultBlockImplCopyWith<$Res> { + __$$ToolResultBlockImplCopyWithImpl( + _$ToolResultBlockImpl _value, $Res Function(_$ToolResultBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? toolUseId = null, + Object? content = null, + Object? isError = freezed, + Object? type = null, + }) { + return _then(_$ToolResultBlockImpl( + toolUseId: null == toolUseId + ? _value.toolUseId + : toolUseId // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as ToolResultBlockContent, + isError: freezed == isError + ? _value.isError + : isError // ignore: cast_nullable_to_non_nullable + as bool?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } + + @override + @pragma('vm:prefer-inline') + $ToolResultBlockContentCopyWith<$Res> get content { + return $ToolResultBlockContentCopyWith<$Res>(_value.content, (value) { + return _then(_value.copyWith(content: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockImpl extends ToolResultBlock { + const _$ToolResultBlockImpl( + {@JsonKey(name: 'tool_use_id') required this.toolUseId, + @_ToolResultBlockContentConverter() required this.content, + @JsonKey(name: 'is_error', includeIfNull: false) this.isError, + this.type = 'tool_result'}) + : super._(); + + factory _$ToolResultBlockImpl.fromJson(Map json) => + _$$ToolResultBlockImplFromJson(json); + + /// The `id` of the tool use request this is a result for. + @override + @JsonKey(name: 'tool_use_id') + final String toolUseId; + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @override + @_ToolResultBlockContentConverter() + final ToolResultBlockContent content; + + /// Set to `true` if the tool execution resulted in an error. + @override + @JsonKey(name: 'is_error', includeIfNull: false) + final bool? isError; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.toolResult(toolUseId: $toolUseId, content: $content, isError: $isError, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockImpl && + (identical(other.toolUseId, toolUseId) || + other.toolUseId == toolUseId) && + (identical(other.content, content) || other.content == content) && + (identical(other.isError, isError) || other.isError == isError) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, toolUseId, content, isError, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => + __$$ToolResultBlockImplCopyWithImpl<_$ToolResultBlockImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return toolResult(toolUseId, content, isError, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return toolResult?.call(toolUseId, content, isError, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (toolResult != null) { + return toolResult(toolUseId, content, isError, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return toolResult(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return toolResult?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (toolResult != null) { + return toolResult(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockImplToJson( + this, + ); + } +} + +abstract class ToolResultBlock extends Block { + const factory ToolResultBlock( + {@JsonKey(name: 'tool_use_id') required final String toolUseId, + @_ToolResultBlockContentConverter() + required final ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) final bool? isError, + final String type}) = _$ToolResultBlockImpl; + const ToolResultBlock._() : super._(); + + factory ToolResultBlock.fromJson(Map json) = + _$ToolResultBlockImpl.fromJson; + + /// The `id` of the tool use request this is a result for. + @JsonKey(name: 'tool_use_id') + String get toolUseId; + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @_ToolResultBlockContentConverter() + ToolResultBlockContent get content; + + /// Set to `true` if the tool execution resulted in an error. + @JsonKey(name: 'is_error', includeIfNull: false) + bool? get isError; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolResultBlockContent _$ToolResultBlockContentFromJson( + Map json) { + switch (json['runtimeType']) { + case 'blocks': + return ToolResultBlockContentListBlock.fromJson(json); + case 'text': + return ToolResultBlockContentString.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'runtimeType', + 'ToolResultBlockContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ToolResultBlockContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentListBlock value) blocks, + required TResult Function(ToolResultBlockContentString value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentListBlock value)? blocks, + TResult? Function(ToolResultBlockContentString value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentListBlock value)? blocks, + TResult Function(ToolResultBlockContentString value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolResultBlockContentCopyWith<$Res> { + factory $ToolResultBlockContentCopyWith(ToolResultBlockContent value, + $Res Function(ToolResultBlockContent) then) = + _$ToolResultBlockContentCopyWithImpl<$Res, ToolResultBlockContent>; +} + +/// @nodoc +class _$ToolResultBlockContentCopyWithImpl<$Res, + $Val extends ToolResultBlockContent> + implements $ToolResultBlockContentCopyWith<$Res> { + _$ToolResultBlockContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { + factory _$$ToolResultBlockContentListBlockImplCopyWith( + _$ToolResultBlockContentListBlockImpl value, + $Res Function(_$ToolResultBlockContentListBlockImpl) then) = + __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> + extends _$ToolResultBlockContentCopyWithImpl<$Res, + _$ToolResultBlockContentListBlockImpl> + implements _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { + __$$ToolResultBlockContentListBlockImplCopyWithImpl( + _$ToolResultBlockContentListBlockImpl _value, + $Res Function(_$ToolResultBlockContentListBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ToolResultBlockContentListBlockImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockContentListBlockImpl + extends ToolResultBlockContentListBlock { + const _$ToolResultBlockContentListBlockImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'blocks', + super._(); + + factory _$ToolResultBlockContentListBlockImpl.fromJson( + Map json) => + _$$ToolResultBlockContentListBlockImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ToolResultBlockContent.blocks(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockContentListBlockImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockContentListBlockImplCopyWith< + _$ToolResultBlockContentListBlockImpl> + get copyWith => __$$ToolResultBlockContentListBlockImplCopyWithImpl< + _$ToolResultBlockContentListBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return blocks(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return blocks?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentListBlock value) blocks, + required TResult Function(ToolResultBlockContentString value) text, + }) { + return blocks(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentListBlock value)? blocks, + TResult? Function(ToolResultBlockContentString value)? text, + }) { + return blocks?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentListBlock value)? blocks, + TResult Function(ToolResultBlockContentString value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockContentListBlockImplToJson( + this, + ); + } +} + +abstract class ToolResultBlockContentListBlock extends ToolResultBlockContent { + const factory ToolResultBlockContentListBlock(final List value) = + _$ToolResultBlockContentListBlockImpl; + const ToolResultBlockContentListBlock._() : super._(); + + factory ToolResultBlockContentListBlock.fromJson(Map json) = + _$ToolResultBlockContentListBlockImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$ToolResultBlockContentListBlockImplCopyWith< + _$ToolResultBlockContentListBlockImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolResultBlockContentStringImplCopyWith<$Res> { + factory _$$ToolResultBlockContentStringImplCopyWith( + _$ToolResultBlockContentStringImpl value, + $Res Function(_$ToolResultBlockContentStringImpl) then) = + __$$ToolResultBlockContentStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> + extends _$ToolResultBlockContentCopyWithImpl<$Res, + _$ToolResultBlockContentStringImpl> + implements _$$ToolResultBlockContentStringImplCopyWith<$Res> { + __$$ToolResultBlockContentStringImplCopyWithImpl( + _$ToolResultBlockContentStringImpl _value, + $Res Function(_$ToolResultBlockContentStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ToolResultBlockContentStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { + const _$ToolResultBlockContentStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'text', + super._(); + + factory _$ToolResultBlockContentStringImpl.fromJson( + Map json) => + _$$ToolResultBlockContentStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ToolResultBlockContent.text(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockContentStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockContentStringImplCopyWith< + _$ToolResultBlockContentStringImpl> + get copyWith => __$$ToolResultBlockContentStringImplCopyWithImpl< + _$ToolResultBlockContentStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return text(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return text?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentListBlock value) blocks, + required TResult Function(ToolResultBlockContentString value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentListBlock value)? blocks, + TResult? Function(ToolResultBlockContentString value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentListBlock value)? blocks, + TResult Function(ToolResultBlockContentString value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockContentStringImplToJson( + this, + ); + } +} + +abstract class ToolResultBlockContentString extends ToolResultBlockContent { + const factory ToolResultBlockContentString(final String value) = + _$ToolResultBlockContentStringImpl; + const ToolResultBlockContentString._() : super._(); + + factory ToolResultBlockContentString.fromJson(Map json) = + _$ToolResultBlockContentStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$ToolResultBlockContentStringImplCopyWith< + _$ToolResultBlockContentStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +MessageStreamEvent _$MessageStreamEventFromJson(Map json) { + switch (json['type']) { + case 'message_start': + return MessageStartEvent.fromJson(json); + case 'message_delta': + return MessageDeltaEvent.fromJson(json); + case 'message_stop': + return MessageStopEvent.fromJson(json); + case 'content_block_start': + return ContentBlockStartEvent.fromJson(json); + case 'content_block_delta': + return ContentBlockDeltaEvent.fromJson(json); + case 'content_block_stop': + return ContentBlockStopEvent.fromJson(json); + case 'ping': + return PingEvent.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageStreamEvent { + /// The type of a streaming event. + MessageStreamEventType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageStreamEventCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageStreamEventCopyWith<$Res> { + factory $MessageStreamEventCopyWith( + MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = + _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> + implements $MessageStreamEventCopyWith<$Res> { + _$MessageStreamEventCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, + $Res Function(_$MessageStartEventImpl) then) = + __$$MessageStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({Message message, MessageStreamEventType type}); + + $MessageCopyWith<$Res> get message; +} + +/// @nodoc +class __$$MessageStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> + implements _$$MessageStartEventImplCopyWith<$Res> { + __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, + $Res Function(_$MessageStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? message = null, + Object? type = null, + }) { + return _then(_$MessageStartEventImpl( + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as Message, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { + return _then(_value.copyWith(message: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStartEventImpl extends MessageStartEvent { + const _$MessageStartEventImpl({required this.message, required this.type}) + : super._(); + + factory _$MessageStartEventImpl.fromJson(Map json) => + _$$MessageStartEventImplFromJson(json); + + /// A message in a chat conversation. + @override + final Message message; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStartEventImpl && + (identical(other.message, message) || other.message == message) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, message, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStart(message, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStart?.call(message, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(message, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStartEventImplToJson( + this, + ); + } +} + +abstract class MessageStartEvent extends MessageStreamEvent { + const factory MessageStartEvent( + {required final Message message, + required final MessageStreamEventType type}) = _$MessageStartEventImpl; + const MessageStartEvent._() : super._(); + + factory MessageStartEvent.fromJson(Map json) = + _$MessageStartEventImpl.fromJson; + + /// A message in a chat conversation. + Message get message; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, + $Res Function(_$MessageDeltaEventImpl) then) = + __$$MessageDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {MessageDelta delta, + MessageStreamEventType type, + MessageDeltaUsage usage}); + + $MessageDeltaCopyWith<$Res> get delta; + $MessageDeltaUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class __$$MessageDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> + implements _$$MessageDeltaEventImplCopyWith<$Res> { + __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, + $Res Function(_$MessageDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? type = null, + Object? usage = null, + }) { + return _then(_$MessageDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as MessageDelta, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as MessageDeltaUsage, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaCopyWith<$Res> get delta { + return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaUsageCopyWith<$Res> get usage { + return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { + return _then(_value.copyWith(usage: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaEventImpl extends MessageDeltaEvent { + const _$MessageDeltaEventImpl( + {required this.delta, required this.type, required this.usage}) + : super._(); + + factory _$MessageDeltaEventImpl.fromJson(Map json) => + _$$MessageDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + @override + final MessageDelta delta; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + final MessageDeltaUsage usage; + + @override + String toString() { + return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta(delta, type, usage); + } + + @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(Message message, MessageStreamEventType type)? @@ -3845,16 +6003,18 @@ mixin _$MessageStreamEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta?.call(delta, type, usage); + } + + @override @optionalTypeArgs TResult maybeWhen({ TResult Function(Message message, MessageStreamEventType type)? @@ -3863,17 +6023,22 @@ mixin _$MessageStreamEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (messageDelta != null) { + return messageDelta(delta, type, usage); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ required TResult Function(MessageStartEvent value) messageStart, @@ -3883,8 +6048,11 @@ mixin _$MessageStreamEvent { required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(MessageStartEvent value)? messageStart, @@ -3894,8 +6062,11 @@ mixin _$MessageStreamEvent { TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ TResult Function(MessageStartEvent value)? messageStart, @@ -3906,107 +6077,97 @@ mixin _$MessageStreamEvent { TResult Function(ContentBlockStopEvent value)? contentBlockStop, TResult Function(PingEvent value)? ping, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageStreamEventCopyWith get copyWith => - throw _privateConstructorUsedError; -} + }) { + if (messageDelta != null) { + return messageDelta(this); + } + return orElse(); + } -/// @nodoc -abstract class $MessageStreamEventCopyWith<$Res> { - factory $MessageStreamEventCopyWith( - MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = - _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; - @useResult - $Res call({MessageStreamEventType type}); + @override + Map toJson() { + return _$$MessageDeltaEventImplToJson( + this, + ); + } } -/// @nodoc -class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> - implements $MessageStreamEventCopyWith<$Res> { - _$MessageStreamEventCopyWithImpl(this._value, this._then); +abstract class MessageDeltaEvent extends MessageStreamEvent { + const factory MessageDeltaEvent( + {required final MessageDelta delta, + required final MessageStreamEventType type, + required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; + const MessageDeltaEvent._() : super._(); - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + factory MessageDeltaEvent.fromJson(Map json) = + _$MessageDeltaEventImpl.fromJson; - @pragma('vm:prefer-inline') + /// A delta in a streaming message. + MessageDelta get delta; @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - ) as $Val); - } + + /// The type of a streaming event. + MessageStreamEventType get type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + MessageDeltaUsage get usage; + @override + @JsonKey(ignore: true) + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageStartEventImplCopyWith<$Res> +abstract class _$$MessageStopEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, - $Res Function(_$MessageStartEventImpl) then) = - __$$MessageStartEventImplCopyWithImpl<$Res>; + factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, + $Res Function(_$MessageStopEventImpl) then) = + __$$MessageStopEventImplCopyWithImpl<$Res>; @override @useResult - $Res call({Message message, MessageStreamEventType type}); - - $MessageCopyWith<$Res> get message; + $Res call({MessageStreamEventType type}); } /// @nodoc -class __$$MessageStartEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> - implements _$$MessageStartEventImplCopyWith<$Res> { - __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, - $Res Function(_$MessageStartEventImpl) _then) +class __$$MessageStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> + implements _$$MessageStopEventImplCopyWith<$Res> { + __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, + $Res Function(_$MessageStopEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? message = null, Object? type = null, }) { - return _then(_$MessageStartEventImpl( - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as Message, + return _then(_$MessageStopEventImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as MessageStreamEventType, )); } - - @override - @pragma('vm:prefer-inline') - $MessageCopyWith<$Res> get message { - return $MessageCopyWith<$Res>(_value.message, (value) { - return _then(_value.copyWith(message: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageStartEventImpl extends MessageStartEvent { - const _$MessageStartEventImpl({required this.message, required this.type}) - : super._(); - - factory _$MessageStartEventImpl.fromJson(Map json) => - _$$MessageStartEventImplFromJson(json); +class _$MessageStopEventImpl extends MessageStopEvent { + const _$MessageStopEventImpl({required this.type}) : super._(); - /// A message in a chat conversation. - @override - final Message message; + factory _$MessageStopEventImpl.fromJson(Map json) => + _$$MessageStopEventImplFromJson(json); /// The type of a streaming event. @override @@ -4014,27 +6175,26 @@ class _$MessageStartEventImpl extends MessageStartEvent { @override String toString() { - return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + return 'MessageStreamEvent.messageStop(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageStartEventImpl && - (identical(other.message, message) || other.message == message) && + other is _$MessageStopEventImpl && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, message, type); + int get hashCode => Object.hash(runtimeType, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => - __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( this, _$identity); @override @@ -4047,18 +6207,18 @@ class _$MessageStartEventImpl extends MessageStartEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return messageStart(message, type); + return messageStop(type); } @override @@ -4070,16 +6230,15 @@ class _$MessageStartEventImpl extends MessageStartEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return messageStart?.call(message, type); + return messageStop?.call(type); } @override @@ -4091,18 +6250,17 @@ class _$MessageStartEventImpl extends MessageStartEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (messageStart != null) { - return messageStart(message, type); + if (messageStop != null) { + return messageStop(type); } return orElse(); } @@ -4118,7 +6276,7 @@ class _$MessageStartEventImpl extends MessageStartEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return messageStart(this); + return messageStop(this); } @override @@ -4132,7 +6290,7 @@ class _$MessageStartEventImpl extends MessageStartEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return messageStart?.call(this); + return messageStop?.call(this); } @override @@ -4147,164 +6305,148 @@ class _$MessageStartEventImpl extends MessageStartEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (messageStart != null) { - return messageStart(this); + if (messageStop != null) { + return messageStop(this); } return orElse(); } @override Map toJson() { - return _$$MessageStartEventImplToJson( + return _$$MessageStopEventImplToJson( this, ); } } -abstract class MessageStartEvent extends MessageStreamEvent { - const factory MessageStartEvent( - {required final Message message, - required final MessageStreamEventType type}) = _$MessageStartEventImpl; - const MessageStartEvent._() : super._(); +abstract class MessageStopEvent extends MessageStreamEvent { + const factory MessageStopEvent({required final MessageStreamEventType type}) = + _$MessageStopEventImpl; + const MessageStopEvent._() : super._(); - factory MessageStartEvent.fromJson(Map json) = - _$MessageStartEventImpl.fromJson; + factory MessageStopEvent.fromJson(Map json) = + _$MessageStopEventImpl.fromJson; - /// A message in a chat conversation. - Message get message; @override /// The type of a streaming event. MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageDeltaEventImplCopyWith<$Res> +abstract class _$$ContentBlockStartEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, - $Res Function(_$MessageDeltaEventImpl) then) = - __$$MessageDeltaEventImplCopyWithImpl<$Res>; + factory _$$ContentBlockStartEventImplCopyWith( + _$ContentBlockStartEventImpl value, + $Res Function(_$ContentBlockStartEventImpl) then) = + __$$ContentBlockStartEventImplCopyWithImpl<$Res>; @override @useResult $Res call( - {MessageDelta delta, - MessageStreamEventType type, - MessageDeltaUsage usage}); + {@JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type}); - $MessageDeltaCopyWith<$Res> get delta; - $MessageDeltaUsageCopyWith<$Res> get usage; + $BlockCopyWith<$Res> get contentBlock; } /// @nodoc -class __$$MessageDeltaEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> - implements _$$MessageDeltaEventImplCopyWith<$Res> { - __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, - $Res Function(_$MessageDeltaEventImpl) _then) +class __$$ContentBlockStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> + implements _$$ContentBlockStartEventImplCopyWith<$Res> { + __$$ContentBlockStartEventImplCopyWithImpl( + _$ContentBlockStartEventImpl _value, + $Res Function(_$ContentBlockStartEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? delta = null, + Object? contentBlock = null, + Object? index = null, Object? type = null, - Object? usage = null, }) { - return _then(_$MessageDeltaEventImpl( - delta: null == delta - ? _value.delta - : delta // ignore: cast_nullable_to_non_nullable - as MessageDelta, + return _then(_$ContentBlockStartEventImpl( + contentBlock: null == contentBlock + ? _value.contentBlock + : contentBlock // ignore: cast_nullable_to_non_nullable + as Block, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - usage: null == usage - ? _value.usage - : usage // ignore: cast_nullable_to_non_nullable - as MessageDeltaUsage, + as MessageStreamEventType, )); } @override @pragma('vm:prefer-inline') - $MessageDeltaCopyWith<$Res> get delta { - return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { - return _then(_value.copyWith(delta: value)); - }); - } - - @override - @pragma('vm:prefer-inline') - $MessageDeltaUsageCopyWith<$Res> get usage { - return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { - return _then(_value.copyWith(usage: value)); + $BlockCopyWith<$Res> get contentBlock { + return $BlockCopyWith<$Res>(_value.contentBlock, (value) { + return _then(_value.copyWith(contentBlock: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaEventImpl extends MessageDeltaEvent { - const _$MessageDeltaEventImpl( - {required this.delta, required this.type, required this.usage}) +class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { + const _$ContentBlockStartEventImpl( + {@JsonKey(name: 'content_block') required this.contentBlock, + required this.index, + required this.type}) : super._(); - factory _$MessageDeltaEventImpl.fromJson(Map json) => - _$$MessageDeltaEventImplFromJson(json); + factory _$ContentBlockStartEventImpl.fromJson(Map json) => + _$$ContentBlockStartEventImplFromJson(json); - /// A delta in a streaming message. + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] @override - final MessageDelta delta; + @JsonKey(name: 'content_block') + final Block contentBlock; - /// The type of a streaming event. + /// The index of the content block. @override - final MessageStreamEventType type; + final int index; - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. + /// The type of a streaming event. @override - final MessageDeltaUsage usage; + final MessageStreamEventType type; @override String toString() { - return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaEventImpl && - (identical(other.delta, delta) || other.delta == delta) && - (identical(other.type, type) || other.type == type) && - (identical(other.usage, usage) || other.usage == usage)); + other is _$ContentBlockStartEventImpl && + (identical(other.contentBlock, contentBlock) || + other.contentBlock == contentBlock) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, delta, type, usage); + int get hashCode => Object.hash(runtimeType, contentBlock, index, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => - __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( - this, _$identity); + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< + _$ContentBlockStartEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -4316,18 +6458,18 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return messageDelta(delta, type, usage); + return contentBlockStart(contentBlock, index, type); } @override @@ -4339,16 +6481,15 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return messageDelta?.call(delta, type, usage); + return contentBlockStart?.call(contentBlock, index, type); } @override @@ -4360,18 +6501,17 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (messageDelta != null) { - return messageDelta(delta, type, usage); + if (contentBlockStart != null) { + return contentBlockStart(contentBlock, index, type); } return orElse(); } @@ -4387,7 +6527,7 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return messageDelta(this); + return contentBlockStart(this); } @override @@ -4401,7 +6541,7 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return messageDelta?.call(this); + return contentBlockStart?.call(this); } @override @@ -4416,96 +6556,121 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (messageDelta != null) { - return messageDelta(this); + if (contentBlockStart != null) { + return contentBlockStart(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaEventImplToJson( + return _$$ContentBlockStartEventImplToJson( this, ); } } -abstract class MessageDeltaEvent extends MessageStreamEvent { - const factory MessageDeltaEvent( - {required final MessageDelta delta, - required final MessageStreamEventType type, - required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; - const MessageDeltaEvent._() : super._(); +abstract class ContentBlockStartEvent extends MessageStreamEvent { + const factory ContentBlockStartEvent( + {@JsonKey(name: 'content_block') required final Block contentBlock, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStartEventImpl; + const ContentBlockStartEvent._() : super._(); - factory MessageDeltaEvent.fromJson(Map json) = - _$MessageDeltaEventImpl.fromJson; + factory ContentBlockStartEvent.fromJson(Map json) = + _$ContentBlockStartEventImpl.fromJson; - /// A delta in a streaming message. - MessageDelta get delta; + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @JsonKey(name: 'content_block') + Block get contentBlock; + + /// The index of the content block. + int get index; @override /// The type of a streaming event. MessageStreamEventType get type; - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - MessageDeltaUsage get usage; @override @JsonKey(ignore: true) - _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => - throw _privateConstructorUsedError; + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageStopEventImplCopyWith<$Res> +abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, - $Res Function(_$MessageStopEventImpl) then) = - __$$MessageStopEventImplCopyWithImpl<$Res>; + factory _$$ContentBlockDeltaEventImplCopyWith( + _$ContentBlockDeltaEventImpl value, + $Res Function(_$ContentBlockDeltaEventImpl) then) = + __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; @override @useResult - $Res call({MessageStreamEventType type}); + $Res call({BlockDelta delta, int index, MessageStreamEventType type}); + + $BlockDeltaCopyWith<$Res> get delta; } /// @nodoc -class __$$MessageStopEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> - implements _$$MessageStopEventImplCopyWith<$Res> { - __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, - $Res Function(_$MessageStopEventImpl) _then) +class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> + implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { + __$$ContentBlockDeltaEventImplCopyWithImpl( + _$ContentBlockDeltaEventImpl _value, + $Res Function(_$ContentBlockDeltaEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ + Object? delta = null, + Object? index = null, Object? type = null, }) { - return _then(_$MessageStopEventImpl( + return _then(_$ContentBlockDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as BlockDelta, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as MessageStreamEventType, )); } + + @override + @pragma('vm:prefer-inline') + $BlockDeltaCopyWith<$Res> get delta { + return $BlockDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$MessageStopEventImpl extends MessageStopEvent { - const _$MessageStopEventImpl({required this.type}) : super._(); +class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { + const _$ContentBlockDeltaEventImpl( + {required this.delta, required this.index, required this.type}) + : super._(); - factory _$MessageStopEventImpl.fromJson(Map json) => - _$$MessageStopEventImplFromJson(json); + factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => + _$$ContentBlockDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + @override + final BlockDelta delta; + + /// The index of the content block. + @override + final int index; /// The type of a streaming event. @override @@ -4513,27 +6678,29 @@ class _$MessageStopEventImpl extends MessageStopEvent { @override String toString() { - return 'MessageStreamEvent.messageStop(type: $type)'; + return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageStopEventImpl && + other is _$ContentBlockDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, delta, index, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => - __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( - this, _$identity); + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< + _$ContentBlockDeltaEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -4545,18 +6712,18 @@ class _$MessageStopEventImpl extends MessageStopEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return messageStop(type); + return contentBlockDelta(delta, index, type); } @override @@ -4568,16 +6735,15 @@ class _$MessageStopEventImpl extends MessageStopEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return messageStop?.call(type); + return contentBlockDelta?.call(delta, index, type); } @override @@ -4589,18 +6755,17 @@ class _$MessageStopEventImpl extends MessageStopEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (messageStop != null) { - return messageStop(type); + if (contentBlockDelta != null) { + return contentBlockDelta(delta, index, type); } return orElse(); } @@ -4616,7 +6781,7 @@ class _$MessageStopEventImpl extends MessageStopEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return messageStop(this); + return contentBlockDelta(this); } @override @@ -4630,7 +6795,7 @@ class _$MessageStopEventImpl extends MessageStopEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return messageStop?.call(this); + return contentBlockDelta?.call(this); } @override @@ -4645,74 +6810,74 @@ class _$MessageStopEventImpl extends MessageStopEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (messageStop != null) { - return messageStop(this); + if (contentBlockDelta != null) { + return contentBlockDelta(this); } return orElse(); } @override Map toJson() { - return _$$MessageStopEventImplToJson( + return _$$ContentBlockDeltaEventImplToJson( this, ); } } -abstract class MessageStopEvent extends MessageStreamEvent { - const factory MessageStopEvent({required final MessageStreamEventType type}) = - _$MessageStopEventImpl; - const MessageStopEvent._() : super._(); +abstract class ContentBlockDeltaEvent extends MessageStreamEvent { + const factory ContentBlockDeltaEvent( + {required final BlockDelta delta, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockDeltaEventImpl; + const ContentBlockDeltaEvent._() : super._(); - factory MessageStopEvent.fromJson(Map json) = - _$MessageStopEventImpl.fromJson; + factory ContentBlockDeltaEvent.fromJson(Map json) = + _$ContentBlockDeltaEventImpl.fromJson; + + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + BlockDelta get delta; + /// The index of the content block. + int get index; @override /// The type of a streaming event. MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => - throw _privateConstructorUsedError; + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ContentBlockStartEventImplCopyWith<$Res> +abstract class _$$ContentBlockStopEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockStartEventImplCopyWith( - _$ContentBlockStartEventImpl value, - $Res Function(_$ContentBlockStartEventImpl) then) = - __$$ContentBlockStartEventImplCopyWithImpl<$Res>; + factory _$$ContentBlockStopEventImplCopyWith( + _$ContentBlockStopEventImpl value, + $Res Function(_$ContentBlockStopEventImpl) then) = + __$$ContentBlockStopEventImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type}); + $Res call({int index, MessageStreamEventType type}); } /// @nodoc -class __$$ContentBlockStartEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> - implements _$$ContentBlockStartEventImplCopyWith<$Res> { - __$$ContentBlockStartEventImplCopyWithImpl( - _$ContentBlockStartEventImpl _value, - $Res Function(_$ContentBlockStartEventImpl) _then) +class __$$ContentBlockStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> + implements _$$ContentBlockStopEventImplCopyWith<$Res> { + __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, + $Res Function(_$ContentBlockStopEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? contentBlock = freezed, Object? index = null, Object? type = null, }) { - return _then(_$ContentBlockStartEventImpl( - contentBlock: freezed == contentBlock - ? _value.contentBlock - : contentBlock // ignore: cast_nullable_to_non_nullable - as TextBlock, + return _then(_$ContentBlockStopEventImpl( index: null == index ? _value.index : index // ignore: cast_nullable_to_non_nullable @@ -4727,20 +6892,12 @@ class __$$ContentBlockStartEventImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { - const _$ContentBlockStartEventImpl( - {@JsonKey(name: 'content_block') required this.contentBlock, - required this.index, - required this.type}) +class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { + const _$ContentBlockStopEventImpl({required this.index, required this.type}) : super._(); - factory _$ContentBlockStartEventImpl.fromJson(Map json) => - _$$ContentBlockStartEventImplFromJson(json); - - /// A block of text content. - @override - @JsonKey(name: 'content_block') - final TextBlock contentBlock; + factory _$ContentBlockStopEventImpl.fromJson(Map json) => + _$$ContentBlockStopEventImplFromJson(json); /// The index of the content block. @override @@ -4752,31 +6909,28 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { @override String toString() { - return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; + return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ContentBlockStartEventImpl && - const DeepCollectionEquality() - .equals(other.contentBlock, contentBlock) && + other is _$ContentBlockStopEventImpl && (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, - const DeepCollectionEquality().hash(contentBlock), index, type); + int get hashCode => Object.hash(runtimeType, index, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> - get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< - _$ContentBlockStartEventImpl>(this, _$identity); + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< + _$ContentBlockStopEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -4788,18 +6942,18 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return contentBlockStart(contentBlock, index, type); + return contentBlockStop(index, type); } @override @@ -4811,16 +6965,15 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return contentBlockStart?.call(contentBlock, index, type); + return contentBlockStop?.call(index, type); } @override @@ -4832,18 +6985,17 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (contentBlockStart != null) { - return contentBlockStart(contentBlock, index, type); + if (contentBlockStop != null) { + return contentBlockStop(index, type); } return orElse(); } @@ -4859,7 +7011,7 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return contentBlockStart(this); + return contentBlockStop(this); } @override @@ -4873,7 +7025,7 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return contentBlockStart?.call(this); + return contentBlockStop?.call(this); } @override @@ -4888,34 +7040,29 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (contentBlockStart != null) { - return contentBlockStart(this); + if (contentBlockStop != null) { + return contentBlockStop(this); } return orElse(); } @override Map toJson() { - return _$$ContentBlockStartEventImplToJson( + return _$$ContentBlockStopEventImplToJson( this, ); } } -abstract class ContentBlockStartEvent extends MessageStreamEvent { - const factory ContentBlockStartEvent( - {@JsonKey(name: 'content_block') required final TextBlock contentBlock, - required final int index, - required final MessageStreamEventType - type}) = _$ContentBlockStartEventImpl; - const ContentBlockStartEvent._() : super._(); - - factory ContentBlockStartEvent.fromJson(Map json) = - _$ContentBlockStartEventImpl.fromJson; +abstract class ContentBlockStopEvent extends MessageStreamEvent { + const factory ContentBlockStopEvent( + {required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStopEventImpl; + const ContentBlockStopEvent._() : super._(); - /// A block of text content. - @JsonKey(name: 'content_block') - TextBlock get contentBlock; + factory ContentBlockStopEvent.fromJson(Map json) = + _$ContentBlockStopEventImpl.fromJson; /// The index of the content block. int get index; @@ -4925,82 +7072,50 @@ abstract class ContentBlockStartEvent extends MessageStreamEvent { MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> +abstract class _$$PingEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockDeltaEventImplCopyWith( - _$ContentBlockDeltaEventImpl value, - $Res Function(_$ContentBlockDeltaEventImpl) then) = - __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; + factory _$$PingEventImplCopyWith( + _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = + __$$PingEventImplCopyWithImpl<$Res>; @override @useResult - $Res call({TextBlockDelta delta, int index, MessageStreamEventType type}); - - $TextBlockDeltaCopyWith<$Res> get delta; + $Res call({MessageStreamEventType type}); } /// @nodoc -class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> - implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { - __$$ContentBlockDeltaEventImplCopyWithImpl( - _$ContentBlockDeltaEventImpl _value, - $Res Function(_$ContentBlockDeltaEventImpl) _then) +class __$$PingEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> + implements _$$PingEventImplCopyWith<$Res> { + __$$PingEventImplCopyWithImpl( + _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? delta = null, - Object? index = null, Object? type = null, }) { - return _then(_$ContentBlockDeltaEventImpl( - delta: null == delta - ? _value.delta - : delta // ignore: cast_nullable_to_non_nullable - as TextBlockDelta, - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$PingEventImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as MessageStreamEventType, )); } - - @override - @pragma('vm:prefer-inline') - $TextBlockDeltaCopyWith<$Res> get delta { - return $TextBlockDeltaCopyWith<$Res>(_value.delta, (value) { - return _then(_value.copyWith(delta: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { - const _$ContentBlockDeltaEventImpl( - {required this.delta, required this.index, required this.type}) - : super._(); - - factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => - _$$ContentBlockDeltaEventImplFromJson(json); - - /// A delta in a streaming text block. - @override - final TextBlockDelta delta; +class _$PingEventImpl extends PingEvent { + const _$PingEventImpl({required this.type}) : super._(); - /// The index of the content block. - @override - final int index; + factory _$PingEventImpl.fromJson(Map json) => + _$$PingEventImplFromJson(json); /// The type of a streaming event. @override @@ -5008,29 +7123,26 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { @override String toString() { - return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; + return 'MessageStreamEvent.ping(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ContentBlockDeltaEventImpl && - (identical(other.delta, delta) || other.delta == delta) && - (identical(other.index, index) || other.index == index) && + other is _$PingEventImpl && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, delta, index, type); + int get hashCode => Object.hash(runtimeType, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> - get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< - _$ContentBlockDeltaEventImpl>(this, _$identity); + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -5042,18 +7154,18 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return contentBlockDelta(delta, index, type); + return ping(type); } @override @@ -5065,16 +7177,15 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return contentBlockDelta?.call(delta, index, type); + return ping?.call(type); } @override @@ -5086,18 +7197,17 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (contentBlockDelta != null) { - return contentBlockDelta(delta, index, type); + if (ping != null) { + return ping(type); } return orElse(); } @@ -5113,7 +7223,7 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return contentBlockDelta(this); + return ping(this); } @override @@ -5127,7 +7237,7 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return contentBlockDelta?.call(this); + return ping?.call(this); } @override @@ -5142,193 +7252,263 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (contentBlockDelta != null) { - return contentBlockDelta(this); + if (ping != null) { + return ping(this); } return orElse(); } @override Map toJson() { - return _$$ContentBlockDeltaEventImplToJson( + return _$$PingEventImplToJson( this, ); } } -abstract class ContentBlockDeltaEvent extends MessageStreamEvent { - const factory ContentBlockDeltaEvent( - {required final TextBlockDelta delta, - required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockDeltaEventImpl; - const ContentBlockDeltaEvent._() : super._(); - - factory ContentBlockDeltaEvent.fromJson(Map json) = - _$ContentBlockDeltaEventImpl.fromJson; +abstract class PingEvent extends MessageStreamEvent { + const factory PingEvent({required final MessageStreamEventType type}) = + _$PingEventImpl; + const PingEvent._() : super._(); - /// A delta in a streaming text block. - TextBlockDelta get delta; + factory PingEvent.fromJson(Map json) = + _$PingEventImpl.fromJson; - /// The index of the content block. - int get index; @override /// The type of a streaming event. MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> - get copyWith => throw _privateConstructorUsedError; + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BlockDelta _$BlockDeltaFromJson(Map json) { + switch (json['type']) { + case 'text_delta': + return TextBlockDelta.fromJson(json); + case 'input_json_delta': + return InputJsonBlockDelta.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'BlockDelta', 'Invalid union type "${json['type']}"!'); + } } /// @nodoc -abstract class _$$ContentBlockStopEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockStopEventImplCopyWith( - _$ContentBlockStopEventImpl value, - $Res Function(_$ContentBlockStopEventImpl) then) = - __$$ContentBlockStopEventImplCopyWithImpl<$Res>; +mixin _$BlockDelta { + /// The type of content block. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) textDelta, + required TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? textDelta, + TResult? Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? textDelta, + TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BlockDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BlockDeltaCopyWith<$Res> { + factory $BlockDeltaCopyWith( + BlockDelta value, $Res Function(BlockDelta) then) = + _$BlockDeltaCopyWithImpl<$Res, BlockDelta>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$BlockDeltaCopyWithImpl<$Res, $Val extends BlockDelta> + implements $BlockDeltaCopyWith<$Res> { + _$BlockDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockDeltaImplCopyWith<$Res> + implements $BlockDeltaCopyWith<$Res> { + factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, + $Res Function(_$TextBlockDeltaImpl) then) = + __$$TextBlockDeltaImplCopyWithImpl<$Res>; @override @useResult - $Res call({int index, MessageStreamEventType type}); + $Res call({String text, String type}); } /// @nodoc -class __$$ContentBlockStopEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> - implements _$$ContentBlockStopEventImplCopyWith<$Res> { - __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, - $Res Function(_$ContentBlockStopEventImpl) _then) +class __$$TextBlockDeltaImplCopyWithImpl<$Res> + extends _$BlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> + implements _$$TextBlockDeltaImplCopyWith<$Res> { + __$$TextBlockDeltaImplCopyWithImpl( + _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, + Object? text = null, Object? type = null, }) { - return _then(_$ContentBlockStopEventImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$TextBlockDeltaImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, + as String, )); } } /// @nodoc @JsonSerializable() -class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { - const _$ContentBlockStopEventImpl({required this.index, required this.type}) +class _$TextBlockDeltaImpl extends TextBlockDelta { + const _$TextBlockDeltaImpl({required this.text, required this.type}) : super._(); - factory _$ContentBlockStopEventImpl.fromJson(Map json) => - _$$ContentBlockStopEventImplFromJson(json); + factory _$TextBlockDeltaImpl.fromJson(Map json) => + _$$TextBlockDeltaImplFromJson(json); - /// The index of the content block. + /// The text delta. @override - final int index; + final String text; - /// The type of a streaming event. + /// The type of content block. @override - final MessageStreamEventType type; + final String type; @override String toString() { - return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; + return 'BlockDelta.textDelta(text: $text, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ContentBlockStopEventImpl && - (identical(other.index, index) || other.index == index) && + other is _$TextBlockDeltaImpl && + (identical(other.text, text) || other.text == text) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type); + int get hashCode => Object.hash(runtimeType, text, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> - get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< - _$ContentBlockStopEventImpl>(this, _$identity); + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, + required TResult Function(String text, String type) textDelta, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, }) { - return contentBlockStop(index, type); + return textDelta(text, type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult? Function(String text, String type)? textDelta, TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, }) { - return contentBlockStop?.call(index, type); + return textDelta?.call(text, type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult Function(String text, String type)? textDelta, TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, required TResult orElse(), }) { - if (contentBlockStop != null) { - return contentBlockStop(index, type); + if (textDelta != null) { + return textDelta(text, type); } return orElse(); } @@ -5336,213 +7516,188 @@ class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, }) { - return contentBlockStop(this); + return textDelta(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, }) { - return contentBlockStop?.call(this); + return textDelta?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, required TResult orElse(), }) { - if (contentBlockStop != null) { - return contentBlockStop(this); + if (textDelta != null) { + return textDelta(this); } return orElse(); } @override Map toJson() { - return _$$ContentBlockStopEventImplToJson( + return _$$TextBlockDeltaImplToJson( this, ); } } -abstract class ContentBlockStopEvent extends MessageStreamEvent { - const factory ContentBlockStopEvent( - {required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockStopEventImpl; - const ContentBlockStopEvent._() : super._(); +abstract class TextBlockDelta extends BlockDelta { + const factory TextBlockDelta( + {required final String text, + required final String type}) = _$TextBlockDeltaImpl; + const TextBlockDelta._() : super._(); - factory ContentBlockStopEvent.fromJson(Map json) = - _$ContentBlockStopEventImpl.fromJson; + factory TextBlockDelta.fromJson(Map json) = + _$TextBlockDeltaImpl.fromJson; - /// The index of the content block. - int get index; + /// The text delta. + String get text; @override - /// The type of a streaming event. - MessageStreamEventType get type; + /// The type of content block. + String get type; @override @JsonKey(ignore: true) - _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> - get copyWith => throw _privateConstructorUsedError; + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$PingEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$PingEventImplCopyWith( - _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = - __$$PingEventImplCopyWithImpl<$Res>; +abstract class _$$InputJsonBlockDeltaImplCopyWith<$Res> + implements $BlockDeltaCopyWith<$Res> { + factory _$$InputJsonBlockDeltaImplCopyWith(_$InputJsonBlockDeltaImpl value, + $Res Function(_$InputJsonBlockDeltaImpl) then) = + __$$InputJsonBlockDeltaImplCopyWithImpl<$Res>; @override @useResult - $Res call({MessageStreamEventType type}); + $Res call( + {@JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, + String type}); } /// @nodoc -class __$$PingEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> - implements _$$PingEventImplCopyWith<$Res> { - __$$PingEventImplCopyWithImpl( - _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) +class __$$InputJsonBlockDeltaImplCopyWithImpl<$Res> + extends _$BlockDeltaCopyWithImpl<$Res, _$InputJsonBlockDeltaImpl> + implements _$$InputJsonBlockDeltaImplCopyWith<$Res> { + __$$InputJsonBlockDeltaImplCopyWithImpl(_$InputJsonBlockDeltaImpl _value, + $Res Function(_$InputJsonBlockDeltaImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ + Object? partialJson = freezed, Object? type = null, }) { - return _then(_$PingEventImpl( + return _then(_$InputJsonBlockDeltaImpl( + partialJson: freezed == partialJson + ? _value.partialJson + : partialJson // ignore: cast_nullable_to_non_nullable + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, + as String, )); } } /// @nodoc @JsonSerializable() -class _$PingEventImpl extends PingEvent { - const _$PingEventImpl({required this.type}) : super._(); +class _$InputJsonBlockDeltaImpl extends InputJsonBlockDelta { + const _$InputJsonBlockDeltaImpl( + {@JsonKey(name: 'partial_json', includeIfNull: false) this.partialJson, + required this.type}) + : super._(); - factory _$PingEventImpl.fromJson(Map json) => - _$$PingEventImplFromJson(json); + factory _$InputJsonBlockDeltaImpl.fromJson(Map json) => + _$$InputJsonBlockDeltaImplFromJson(json); - /// The type of a streaming event. + /// The partial JSON delta. @override - final MessageStreamEventType type; + @JsonKey(name: 'partial_json', includeIfNull: false) + final String? partialJson; + + /// The type of content block. + @override + final String type; @override String toString() { - return 'MessageStreamEvent.ping(type: $type)'; + return 'BlockDelta.inputJsonDelta(partialJson: $partialJson, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$PingEventImpl && + other is _$InputJsonBlockDeltaImpl && + (identical(other.partialJson, partialJson) || + other.partialJson == partialJson) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, partialJson, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => - __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); + _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => + __$$InputJsonBlockDeltaImplCopyWithImpl<_$InputJsonBlockDeltaImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, + required TResult Function(String text, String type) textDelta, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, }) { - return ping(type); + return inputJsonDelta(partialJson, type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult? Function(String text, String type)? textDelta, TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, }) { - return ping?.call(type); + return inputJsonDelta?.call(partialJson, type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult Function(String text, String type)? textDelta, TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, required TResult orElse(), }) { - if (ping != null) { - return ping(type); + if (inputJsonDelta != null) { + return inputJsonDelta(partialJson, type); } return orElse(); } @@ -5550,71 +7705,61 @@ class _$PingEventImpl extends PingEvent { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, }) { - return ping(this); + return inputJsonDelta(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, }) { - return ping?.call(this); + return inputJsonDelta?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, required TResult orElse(), }) { - if (ping != null) { - return ping(this); + if (inputJsonDelta != null) { + return inputJsonDelta(this); } return orElse(); } @override Map toJson() { - return _$$PingEventImplToJson( + return _$$InputJsonBlockDeltaImplToJson( this, ); } } -abstract class PingEvent extends MessageStreamEvent { - const factory PingEvent({required final MessageStreamEventType type}) = - _$PingEventImpl; - const PingEvent._() : super._(); +abstract class InputJsonBlockDelta extends BlockDelta { + const factory InputJsonBlockDelta( + {@JsonKey(name: 'partial_json', includeIfNull: false) + final String? partialJson, + required final String type}) = _$InputJsonBlockDeltaImpl; + const InputJsonBlockDelta._() : super._(); - factory PingEvent.fromJson(Map json) = - _$PingEventImpl.fromJson; + factory InputJsonBlockDelta.fromJson(Map json) = + _$InputJsonBlockDeltaImpl.fromJson; + /// The partial JSON delta. + @JsonKey(name: 'partial_json', includeIfNull: false) + String? get partialJson; @override - /// The type of a streaming event. - MessageStreamEventType get type; + /// The type of content block. + String get type; @override @JsonKey(ignore: true) - _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart index b08b072f..f3dded29 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -25,6 +25,12 @@ _$CreateMessageRequestImpl _$$CreateMessageRequestImplFromJson( .toList(), system: json['system'] as String?, temperature: (json['temperature'] as num?)?.toDouble(), + toolChoice: json['tool_choice'] == null + ? null + : ToolChoice.fromJson(json['tool_choice'] as Map), + tools: (json['tools'] as List?) + ?.map((e) => Tool.fromJson(e as Map)) + .toList(), topK: (json['top_k'] as num?)?.toInt(), topP: (json['top_p'] as num?)?.toDouble(), stream: json['stream'] as bool? ?? false, @@ -48,6 +54,8 @@ Map _$$CreateMessageRequestImplToJson( writeNotNull('stop_sequences', instance.stopSequences); writeNotNull('system', instance.system); writeNotNull('temperature', instance.temperature); + writeNotNull('tool_choice', instance.toolChoice?.toJson()); + writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); writeNotNull('top_k', instance.topK); writeNotNull('top_p', instance.topP); val['stream'] = instance.stream; @@ -69,11 +77,12 @@ Map _$$ModelEnumerationImplToJson( }; const _$ModelsEnumMap = { + Models.claude35Sonnet20240620: 'claude-3-5-sonnet-20240620', + Models.claude3Haiku20240307: 'claude-3-haiku-20240307', Models.claude3Opus20240229: 'claude-3-opus-20240229', Models.claude3Sonnet20240229: 'claude-3-sonnet-20240229', - Models.claude3Haiku20240307: 'claude-3-haiku-20240307', - Models.claude21: 'claude-2.1', Models.claude20: 'claude-2.0', + Models.claude21: 'claude-2.1', Models.claudeInstant12: 'claude-instant-1.2', }; @@ -109,6 +118,33 @@ Map _$$CreateMessageRequestMetadataImplToJson( return val; } +_$ToolChoiceImpl _$$ToolChoiceImplFromJson(Map json) => + _$ToolChoiceImpl( + type: $enumDecode(_$ToolChoiceTypeEnumMap, json['type']), + name: json['name'] as String?, + ); + +Map _$$ToolChoiceImplToJson(_$ToolChoiceImpl instance) { + final val = { + 'type': _$ToolChoiceTypeEnumMap[instance.type]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('name', instance.name); + return val; +} + +const _$ToolChoiceTypeEnumMap = { + ToolChoiceType.auto: 'auto', + ToolChoiceType.any: 'any', + ToolChoiceType.tool: 'tool', +}; + _$MessageImpl _$$MessageImplFromJson(Map json) => _$MessageImpl( id: json['id'] as String?, @@ -153,6 +189,7 @@ const _$StopReasonEnumMap = { StopReason.endTurn: 'end_turn', StopReason.maxTokens: 'max_tokens', StopReason.stopSequence: 'stop_sequence', + StopReason.toolUse: 'tool_use', }; _$MessageContentListBlockImpl _$$MessageContentListBlockImplFromJson( @@ -185,6 +222,28 @@ Map _$$MessageContentStringImplToJson( 'runtimeType': instance.$type, }; +_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( + name: json['name'] as String, + description: json['description'] as String?, + inputSchema: json['input_schema'] as Map, + ); + +Map _$$ToolImplToJson(_$ToolImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['input_schema'] = instance.inputSchema; + return val; +} + _$ImageBlockSourceImpl _$$ImageBlockSourceImplFromJson( Map json) => _$ImageBlockSourceImpl( @@ -257,19 +316,6 @@ Map _$$MessageDeltaUsageImplToJson( 'output_tokens': instance.outputTokens, }; -_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => - _$TextBlockDeltaImpl( - text: json['text'] as String, - type: json['type'] as String, - ); - -Map _$$TextBlockDeltaImplToJson( - _$TextBlockDeltaImpl instance) => - { - 'text': instance.text, - 'type': instance.type, - }; - _$TextBlockImpl _$$TextBlockImplFromJson(Map json) => _$TextBlockImpl( text: json['text'] as String, @@ -294,6 +340,81 @@ Map _$$ImageBlockImplToJson(_$ImageBlockImpl instance) => 'type': instance.type, }; +_$ToolUseBlockImpl _$$ToolUseBlockImplFromJson(Map json) => + _$ToolUseBlockImpl( + id: json['id'] as String, + name: json['name'] as String, + input: json['input'] as Map, + type: json['type'] as String? ?? 'tool_use', + ); + +Map _$$ToolUseBlockImplToJson(_$ToolUseBlockImpl instance) => + { + 'id': instance.id, + 'name': instance.name, + 'input': instance.input, + 'type': instance.type, + }; + +_$ToolResultBlockImpl _$$ToolResultBlockImplFromJson( + Map json) => + _$ToolResultBlockImpl( + toolUseId: json['tool_use_id'] as String, + content: + const _ToolResultBlockContentConverter().fromJson(json['content']), + isError: json['is_error'] as bool?, + type: json['type'] as String? ?? 'tool_result', + ); + +Map _$$ToolResultBlockImplToJson( + _$ToolResultBlockImpl instance) { + final val = { + 'tool_use_id': instance.toolUseId, + 'content': + const _ToolResultBlockContentConverter().toJson(instance.content), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('is_error', instance.isError); + val['type'] = instance.type; + return val; +} + +_$ToolResultBlockContentListBlockImpl + _$$ToolResultBlockContentListBlockImplFromJson(Map json) => + _$ToolResultBlockContentListBlockImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentListBlockImplToJson( + _$ToolResultBlockContentListBlockImpl instance) => + { + 'value': instance.value.map((e) => e.toJson()).toList(), + 'runtimeType': instance.$type, + }; + +_$ToolResultBlockContentStringImpl _$$ToolResultBlockContentStringImplFromJson( + Map json) => + _$ToolResultBlockContentStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentStringImplToJson( + _$ToolResultBlockContentStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + _$MessageStartEventImpl _$$MessageStartEventImplFromJson( Map json) => _$MessageStartEventImpl( @@ -350,7 +471,7 @@ _$ContentBlockStartEventImpl _$$ContentBlockStartEventImplFromJson( Map json) => _$ContentBlockStartEventImpl( contentBlock: - TextBlock.fromJson(json['content_block'] as Map), + Block.fromJson(json['content_block'] as Map), index: (json['index'] as num).toInt(), type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), ); @@ -366,7 +487,7 @@ Map _$$ContentBlockStartEventImplToJson( _$ContentBlockDeltaEventImpl _$$ContentBlockDeltaEventImplFromJson( Map json) => _$ContentBlockDeltaEventImpl( - delta: TextBlockDelta.fromJson(json['delta'] as Map), + delta: BlockDelta.fromJson(json['delta'] as Map), index: (json['index'] as num).toInt(), type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), ); @@ -402,3 +523,38 @@ Map _$$PingEventImplToJson(_$PingEventImpl instance) => { 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, }; + +_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => + _$TextBlockDeltaImpl( + text: json['text'] as String, + type: json['type'] as String, + ); + +Map _$$TextBlockDeltaImplToJson( + _$TextBlockDeltaImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$InputJsonBlockDeltaImpl _$$InputJsonBlockDeltaImplFromJson( + Map json) => + _$InputJsonBlockDeltaImpl( + partialJson: json['partial_json'] as String?, + type: json['type'] as String, + ); + +Map _$$InputJsonBlockDeltaImplToJson( + _$InputJsonBlockDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('partial_json', instance.partialJson); + val['type'] = instance.type; + return val; +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart index 331c6207..d1950d33 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart @@ -25,4 +25,6 @@ enum StopReason { maxTokens, @JsonValue('stop_sequence') stopSequence, + @JsonValue('tool_use') + toolUse, } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart deleted file mode 100644 index fa05ffce..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart +++ /dev/null @@ -1,44 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: TextBlockDelta -// ========================================== - -/// A delta in a streaming text block. -@freezed -class TextBlockDelta with _$TextBlockDelta { - const TextBlockDelta._(); - - /// Factory constructor for TextBlockDelta - const factory TextBlockDelta({ - /// The text delta. - required String text, - - /// The type of content block. - required String type, - }) = _TextBlockDelta; - - /// Object construction from a JSON representation - factory TextBlockDelta.fromJson(Map json) => - _$TextBlockDeltaFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['text', 'type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'text': text, - 'type': type, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart new file mode 100644 index 00000000..578701a9 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart @@ -0,0 +1,59 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Tool +// ========================================== + +/// A tool the model may use. +@freezed +class Tool with _$Tool { + const Tool._(); + + /// Factory constructor for Tool + const factory Tool({ + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + required String name, + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) String? description, + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') required Map inputSchema, + }) = _Tool; + + /// Object construction from a JSON representation + factory Tool.fromJson(Map json) => _$ToolFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'input_schema' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'input_schema': inputSchema, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart new file mode 100644 index 00000000..cb3d65eb --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: ToolChoice +// ========================================== + +/// How the model should use the provided tools. The model can use a specific tool, +/// any available tool, or decide by itself. +/// +/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. +/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. +/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. +@freezed +class ToolChoice with _$ToolChoice { + const ToolChoice._(); + + /// Factory constructor for ToolChoice + const factory ToolChoice({ + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + required ToolChoiceType type, + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) String? name, + }) = _ToolChoice; + + /// Object construction from a JSON representation + factory ToolChoice.fromJson(Map json) => + _$ToolChoiceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'name']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'name': name, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart new file mode 100644 index 00000000..22b88c4d --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart @@ -0,0 +1,24 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: ToolChoiceType +// ========================================== + +/// How the model should use the provided tools. The model can use a specific tool, +/// any available tool, or decide by itself. +/// +/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. +/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. +/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. +enum ToolChoiceType { + @JsonValue('auto') + auto, + @JsonValue('any') + any, + @JsonValue('tool') + tool, +} diff --git a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml index a3f60e70..5ad1f3db 100644 --- a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml +++ b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml @@ -57,7 +57,7 @@ components: See [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. - example: "claude-3-opus-20240229" + example: "claude-3-5-sonnet-20240620" anyOf: - type: string description: The ID of the model to use for this request. @@ -66,11 +66,12 @@ components: description: | Available models. Mind that the list may not be exhaustive nor up-to-date. enum: + - claude-3-5-sonnet-20240620 + - claude-3-haiku-20240307 - claude-3-opus-20240229 - claude-3-sonnet-20240229 - - claude-3-haiku-20240307 - - claude-2.1 - claude-2.0 + - claude-2.1 - claude-instant-1.2 messages: type: array @@ -89,6 +90,9 @@ components: If the final message uses the `assistant` role, the response content will continue immediately from the content in that message. This can be used to constrain part of the model's response. + + See [message content](https://docs.anthropic.com/en/api/messages-content) for + details on how to construct valid message objects. Example with a single `user` message: @@ -208,6 +212,81 @@ components: Note that even with `temperature` of `0.0`, the results will not be fully deterministic. + tool_choice: + $ref: '#/components/schemas/ToolChoice' + tools: + type: array + description: | + Definitions of tools that the model may use. + + If you include `tools` in your API request, the model may return `tool_use` + content blocks that represent the model's use of those tools. You can then run + those tools using the tool input generated by the model and then optionally + return results back to the model using `tool_result` content blocks. + + Each tool definition includes: + + - `name`: Name of the tool. + - `description`: Optional, but strongly-recommended description of the tool. + - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + shape that the model will produce in `tool_use` output content blocks. + + For example, if you defined `tools` as: + + ```json + [ + { + "name": "get_stock_price", + "description": "Get the current stock price for a given ticker symbol.", + "input_schema": { + "type": "object", + "properties": { + "ticker": { + "type": "string", + "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + } + }, + "required": ["ticker"] + } + } + ] + ``` + + And then asked the model "What's the S&P 500 at today?", the model might produce + `tool_use` content blocks in the response like this: + + ```json + [ + { + "type": "tool_use", + "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + "name": "get_stock_price", + "input": { "ticker": "^GSPC" } + } + ] + ``` + + You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + input, and return the following back to the model in a subsequent `user` + message: + + ```json + [ + { + "type": "tool_result", + "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + "content": "259.75 USD" + } + ] + ``` + + Tools can be used for workflows that include running client-side tools and + functions, or more generally whenever you want the model to produce a particular + JSON structure of output. + + See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + items: + $ref: '#/components/schemas/Tool' top_k: type: integer description: | @@ -254,6 +333,36 @@ components: This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. Do not include any identifying information such as name, email address, or phone number. + ToolChoice: + type: object + description: | + How the model should use the provided tools. The model can use a specific tool, + any available tool, or decide by itself. + + - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + properties: + type: + $ref: "#/components/schemas/ToolChoiceType" + name: + type: string + description: The name of the tool to use. + required: + - type + ToolChoiceType: + type: string + description: | + How the model should use the provided tools. The model can use a specific tool, + any available tool, or decide by itself. + + - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + enum: + - auto + - any + - tool Message: type: object description: A message in a chat conversation. @@ -302,13 +411,42 @@ components: type: string description: The role of the messages author. enum: - - user - - assistant + - user + - assistant + Tool: + type: object + description: A tool the model may use. + properties: + name: + type: string + description: The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + description: + type: string + description: | + Description of what this tool does. + + Tool descriptions should be as detailed as possible. The more information that + the model has about what the tool is and how to use it, the better it will + perform. You can use natural language descriptions to reinforce important + aspects of the tool input JSON schema. + input_schema: + type: object + description: | + [JSON schema](https://json-schema.org/) for this tool's input. + + This defines the shape of the `input` that your tool accepts and that the model + will produce. + additionalProperties: true + required: + - name + - input_schema Block: description: A block of content in a message. oneOf: - $ref: "#/components/schemas/TextBlock" - $ref: "#/components/schemas/ImageBlock" + - $ref: "#/components/schemas/ToolUseBlock" + - $ref: "#/components/schemas/ToolResultBlock" discriminator: propertyName: type TextBlock: @@ -360,6 +498,61 @@ components: - data - media_type - type + ToolUseBlock: + type: object + description: The tool the model wants to use. + properties: + id: + type: string + description: | + A unique identifier for this particular tool use block. + This will be used to match up the tool results later. + example: toolu_01A09q90qw90lq917835lq9 + name: + type: string + description: The name of the tool being used. + example: get_weather + input: + type: object + description: An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + additionalProperties: true + type: + type: string + description: The type of content block. + default: tool_use + required: + - id + - name + - input + ToolResultBlock: + type: object + description: The result of using a tool. + properties: + tool_use_id: + type: string + description: The `id` of the tool use request this is a result for. + content: + description: | + The result of the tool, as a string (e.g. `"content": "15 degrees"`) + or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + These content blocks can use the text or image types. + oneOf: + - type: string + description: A single text block. + - type: array + description: An array of content blocks. + items: + $ref: "#/components/schemas/Block" + is_error: + type: boolean + description: Set to `true` if the tool execution resulted in an error. + type: + type: string + description: The type of content block. + default: tool_result + required: + - tool_use_id + - content StopReason: type: string description: | @@ -373,10 +566,12 @@ components: In non-streaming mode this value is always non-null. In streaming mode, it is null in the `message_start` event and non-null otherwise. + nullable: true enum: - end_turn - max_tokens - stop_sequence + - tool_use Usage: type: object description: | @@ -498,7 +693,7 @@ components: description: A start event in a streaming content block. properties: content_block: - $ref: "#/components/schemas/TextBlock" + $ref: "#/components/schemas/Block" index: type: integer description: The index of the content block. @@ -513,7 +708,7 @@ components: description: A delta event in a streaming content block. properties: delta: - $ref: "#/components/schemas/TextBlockDelta" + $ref: "#/components/schemas/BlockDelta" index: type: integer description: The index of the content block. @@ -523,6 +718,13 @@ components: - delta - index - type + BlockDelta: + description: A delta in a streaming message. + oneOf: + - $ref: "#/components/schemas/TextBlockDelta" + - $ref: "#/components/schemas/InputJsonBlockDelta" + discriminator: + propertyName: type TextBlockDelta: type: object description: A delta in a streaming text block. @@ -537,6 +739,20 @@ components: required: - text - type + InputJsonBlockDelta: + type: object + description: A delta in a streaming input JSON. + properties: + partial_json: + type: string + description: The partial JSON delta. + type: + type: string + description: The type of content block. + default: input_json_delta + required: + - text + - type ContentBlockStopEvent: type: object description: A stop event in a streaming content block. diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart index cdeaa32c..9aa7a39b 100644 --- a/packages/anthropic_sdk_dart/oas/main.dart +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -34,6 +34,10 @@ String? _onSchemaUnionFactoryName( 'ModelString' => 'modelId', 'MessageContentListBlock' => 'blocks', 'MessageContentString' => 'text', + 'ToolResultBlockContentListBlock' => 'blocks', + 'ToolResultBlockContentString' => 'text', + 'TextBlockDelta' => 'textDelta', + 'InputJsonBlockDelta' => 'inputJsonDelta', 'MessageStartEvent' => 'messageStart', 'MessageDeltaEvent' => 'messageDelta', 'MessageStopEvent' => 'messageStop', diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 164ba95f..650ac782 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: anthropic_sdk_dart -description: Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +description: Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). version: 0.0.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart diff --git a/packages/anthropic_sdk_dart/test/messages_test.dart b/packages/anthropic_sdk_dart/test/messages_test.dart index 63bbb01e..648ad7f4 100644 --- a/packages/anthropic_sdk_dart/test/messages_test.dart +++ b/packages/anthropic_sdk_dart/test/messages_test.dart @@ -1,6 +1,8 @@ +// ignore_for_file: avoid_print @TestOn('vm') library; // Uses dart:io +import 'dart:convert'; import 'dart:io'; import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; @@ -20,9 +22,11 @@ void main() { client.endSession(); }); - test('Test call messages API', () async { + test('Test call messages API', timeout: const Timeout(Duration(minutes: 5)), + () async { const models = Models.values; for (final model in models) { + print('Testing model: ${model.name}'); final res = await client.createMessage( request: CreateMessageRequest( model: Model.model(model), @@ -62,7 +66,8 @@ void main() { } }); - test('Test call messages streaming API', () async { + test('Test call messages streaming API', + timeout: const Timeout(Duration(minutes: 5)), () async { final stream = client.createMessageStream( request: const CreateMessageRequest( model: Model.model(Models.claudeInstant12), @@ -107,7 +112,7 @@ void main() { contentBlockStart: (v) { expect(res.type, MessageStreamEventType.contentBlockStart); expect(v.index, 0); - expect(v.contentBlock.text, isEmpty); + expect(v.contentBlock.text, isNotNull); expect(v.contentBlock.type, 'text'); }, contentBlockDelta: (v) { @@ -115,13 +120,16 @@ void main() { expect(v.index, greaterThanOrEqualTo(0)); expect(v.delta.text, isNotEmpty); expect(v.delta.type, 'text_delta'); - text += v.delta.text.replaceAll(RegExp(r'[\s\n]'), ''); + text += v.delta + .mapOrNull(textDelta: (v) => v.text) + ?.replaceAll(RegExp(r'[\s\n]'), '') ?? + ''; }, contentBlockStop: (v) { expect(res.type, MessageStreamEventType.contentBlockStop); expect(v.index, greaterThanOrEqualTo(0)); }, - ping: (v) { + ping: (PingEvent v) { expect(res.type, MessageStreamEventType.ping); }, ); @@ -146,5 +154,167 @@ void main() { final res = await client.createMessage(request: request); expect(res.stopReason, StopReason.maxTokens); }); + + const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + test('Test tool use', () async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + final aiMessage1 = await client.createMessage(request: request1); + expect(aiMessage1.role, MessageRole.assistant); + + var toolUse = aiMessage1.content.blocks.first; + expect(toolUse, isA()); + toolUse = toolUse as ToolUseBlock; + + expect(toolUse.name, tool.name); + expect(toolUse.input, isNotEmpty); + expect(toolUse.input.containsKey('location'), isTrue); + expect(toolUse.input['location'], contains('Boston')); + + final toolResult = json.encode({ + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }); + + final request2 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(toolResult), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, + ); + final aiMessage2 = await client.createMessage(request: request2); + + expect(aiMessage2.role, MessageRole.assistant); + expect(aiMessage2.content.text, contains('22')); + }); + + test('Test tool use streaming', + timeout: const Timeout(Duration(minutes: 5)), () async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Celsius?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + final stream = client.createMessageStream( + request: request1, + ); + String inputJson = ''; + await for (final res in stream) { + res.map( + messageStart: (v) { + expect(res.type, MessageStreamEventType.messageStart); + expect(v.message.id, isNotEmpty); + expect(v.message.role, MessageRole.assistant); + expect( + v.message.model?.replaceAll(RegExp(r'[-.]'), ''), + Models.claude35Sonnet20240620.name.toLowerCase(), + ); + expect(v.message.stopReason, isNull); + expect(v.message.stopSequence, isNull); + expect(v.message.usage?.inputTokens, greaterThan(0)); + expect(v.message.usage?.outputTokens, greaterThan(0)); + }, + messageDelta: (v) { + expect(res.type, MessageStreamEventType.messageDelta); + expect(v.delta.stopReason, StopReason.toolUse); + expect(v.usage.outputTokens, greaterThan(0)); + }, + messageStop: (v) { + expect(res.type, MessageStreamEventType.messageStop); + }, + contentBlockStart: (v) { + expect(res.type, MessageStreamEventType.contentBlockStart); + expect(v.index, 0); + expect(v.contentBlock.type, 'tool_use'); + expect(v.contentBlock.toolUse, isNotNull); + expect(v.contentBlock.toolUse!.id, isNotEmpty); + expect(v.contentBlock.toolUse!.name, tool.name); + }, + contentBlockDelta: (v) { + expect(res.type, MessageStreamEventType.contentBlockDelta); + expect(v.index, greaterThanOrEqualTo(0)); + expect(v.delta.type, 'input_json_delta'); + inputJson += v.delta.inputJson; + }, + contentBlockStop: (v) { + expect(res.type, MessageStreamEventType.contentBlockStop); + expect(v.index, greaterThanOrEqualTo(0)); + }, + ping: (PingEvent v) { + expect(res.type, MessageStreamEventType.ping); + }, + ); + } + final input = json.decode(inputJson) as Map; + expect(input['location'], contains('Boston')); + expect(input['unit'], 'celsius'); + }); }); } From f03015aed25695a130626eb1f931de0d2a5c1901 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jun 2024 09:35:00 +0200 Subject: [PATCH 171/251] build(deps): bump melos from 6.0.0 to 6.1.0 (#470) Bumps [melos](https://github.com/invertase/melos/tree/main/packages) from 6.0.0 to 6.1.0. - [Release notes](https://github.com/invertase/melos/releases) - [Changelog](https://github.com/invertase/melos/blob/main/CHANGELOG.md) - [Commits](https://github.com/invertase/melos/commits/melos-v6.1.0/packages) --- updated-dependencies: - dependency-name: melos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pubspec.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubspec.yaml b/pubspec.yaml index 70fc02f6..8373da6a 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -4,4 +4,4 @@ environment: sdk: ">=3.0.0 <4.0.0" dev_dependencies: - melos: 6.0.0 + melos: 6.1.0 From 02d84ae52486e160f972ed094ad774ae7b118d26 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 27 Jun 2024 17:36:24 +0200 Subject: [PATCH 172/251] feat: Add support for ChatToolChoiceRequired (#474) --- .../lib/src/chat_models/types.dart | 19 ++++++++++++++++--- .../lib/src/language_models/types.dart | 10 ++++++++++ .../src/chat_models/vertex_ai/mappers.dart | 5 +++++ .../src/chat_models/google_ai/mappers.dart | 5 +++++ .../lib/src/chat_models/mappers.dart | 3 +++ 5 files changed, 39 insertions(+), 3 deletions(-) diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 1ada3bbe..b0ec2aa9 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -423,6 +423,8 @@ class AIChatMessageToolCall { }); /// The id of the tool to call. + /// + /// This is used to match up the tool results later. final String id; /// The name of the tool to call. @@ -716,7 +718,7 @@ class ChatMessageContentImage extends ChatMessageContent { /// Depending on the model, this can be either: /// - The base64 encoded image data - /// - A URL of the image. + /// - A URL of the image (only supported by some providers) final String data; /// The IANA standard MIME type of the source data. @@ -818,9 +820,12 @@ sealed class ChatToolChoice { /// The model does not call a tool, and responds to the end-user. static const none = ChatToolChoiceNone(); - /// The model can pick between an end-user or calling a tool. + /// The model can pick between responding to the end-user or calling a tool. static const auto = ChatToolChoiceAuto(); + /// The model must call at least one tool, but doesn’t force a particular tool. + static const required = ChatToolChoiceRequired(); + /// The model is forced to to call the specified tool. factory ChatToolChoice.forced({required final String name}) => ChatToolChoiceForced(name: name); @@ -835,13 +840,21 @@ final class ChatToolChoiceNone extends ChatToolChoice { } /// {@template chat_tool_choice_auto} -/// The model can pick between an end-user or calling a tool. +/// The model can pick between responding to the end-user or calling a tool. /// {@endtemplate} final class ChatToolChoiceAuto extends ChatToolChoice { /// {@macro chat_tool_choice_auto} const ChatToolChoiceAuto(); } +/// {@template chat_tool_choice_required} +/// The model must call at least one tool, but doesn’t force a particular tool. +/// {@endtemplate} +final class ChatToolChoiceRequired extends ChatToolChoice { + /// {@macro chat_tool_choice_none} + const ChatToolChoiceRequired(); +} + /// {@template chat_tool_choice_forced} /// The model is forced to to call the specified tool. /// {@endtemplate} diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index 8112ab37..f1475ad2 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -99,12 +99,16 @@ class LanguageModelUsage { }); /// The number of tokens in the prompt. + /// + /// Some providers call this "input_tokens". final int? promptTokens; /// The total number of billable characters in the prompt if applicable. final int? promptBillableCharacters; /// The number of tokens in the completion. + /// + /// Some providers call this "output_tokens". final int? responseTokens; /// The total number of billable characters in the completion if applicable. @@ -172,9 +176,13 @@ LanguageModelUsage{ /// The reason the model stopped generating tokens. enum FinishReason { /// The model hit a natural stop point or a provided stop sequence. + /// + /// Some providers call this "end_turn". stop, /// The maximum number of tokens specified in the request was reached. + /// + /// Some providers call this "max_tokens". length, /// The content was flagged for content filter reasons. @@ -184,6 +192,8 @@ enum FinishReason { recitation, /// The model called a tool. + /// + /// Some providers call this "tool_use". toolCalls, /// The finish reason is unspecified. diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 41517a64..9c55d409 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -293,6 +293,11 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: f.FunctionCallingMode.auto, ), ), + ChatToolChoiceRequired() => f.ToolConfig( + functionCallingConfig: f.FunctionCallingConfig( + mode: f.FunctionCallingMode.any, + ), + ), final ChatToolChoiceForced t => f.ToolConfig( functionCallingConfig: f.FunctionCallingConfig( mode: f.FunctionCallingMode.any, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 8bf41f84..106bf60b 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -294,6 +294,11 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: g.FunctionCallingMode.auto, ), ), + ChatToolChoiceRequired() => g.ToolConfig( + functionCallingConfig: g.FunctionCallingConfig( + mode: g.FunctionCallingMode.any, + ), + ), final ChatToolChoiceForced t => g.ToolConfig( functionCallingConfig: g.FunctionCallingConfig( mode: g.FunctionCallingMode.any, diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 78054bed..6b434109 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -193,6 +193,9 @@ extension ChatToolChoiceMapper on ChatToolChoice { ChatToolChoiceAuto _ => const ChatCompletionToolChoiceOption.mode( ChatCompletionToolChoiceMode.auto, ), + ChatToolChoiceRequired() => const ChatCompletionToolChoiceOption.mode( + ChatCompletionToolChoiceMode.required, + ), final ChatToolChoiceForced t => ChatCompletionToolChoiceOption.tool( ChatCompletionNamedToolChoice( type: ChatCompletionNamedToolChoiceType.function, From d93f7f0141fecc63ae33338b28661637070b7746 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 27 Jun 2024 17:42:14 +0200 Subject: [PATCH 173/251] refactor: Improve schemas names in anthropic_sdk_dart (#475) --- .../lib/src/generated/schema/block.dart | 12 +- .../schema/create_message_request.dart | 12 +- .../lib/src/generated/schema/message.dart | 12 +- .../src/generated/schema/schema.freezed.dart | 445 +++++++++--------- .../lib/src/generated/schema/schema.g.dart | 60 ++- packages/anthropic_sdk_dart/oas/main.dart | 23 +- 6 files changed, 283 insertions(+), 281 deletions(-) diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart index 959a5ecb..e15126a3 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -113,12 +113,12 @@ sealed class ToolResultBlockContent with _$ToolResultBlockContent { /// An array of content blocks. const factory ToolResultBlockContent.blocks( List value, - ) = ToolResultBlockContentListBlock; + ) = ToolResultBlockContentBlocks; /// A single text block. const factory ToolResultBlockContent.text( String value, - ) = ToolResultBlockContentString; + ) = ToolResultBlockContentText; /// Object construction from a JSON representation factory ToolResultBlockContent.fromJson(Map json) => @@ -133,12 +133,12 @@ class _ToolResultBlockContentConverter @override ToolResultBlockContent fromJson(Object? data) { if (data is List && data.every((item) => item is Map)) { - return ToolResultBlockContentListBlock(data + return ToolResultBlockContentBlocks(data .map((i) => Block.fromJson(i as Map)) .toList(growable: false)); } if (data is String) { - return ToolResultBlockContentString(data); + return ToolResultBlockContentText(data); } throw Exception( 'Unexpected value for ToolResultBlockContent: $data', @@ -148,8 +148,8 @@ class _ToolResultBlockContentConverter @override Object? toJson(ToolResultBlockContent data) { return switch (data) { - ToolResultBlockContentListBlock(value: final v) => v, - ToolResultBlockContentString(value: final v) => v, + ToolResultBlockContentBlocks(value: final v) => v, + ToolResultBlockContentText(value: final v) => v, }; } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart index df2f1b5b..e310adff 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -338,12 +338,12 @@ sealed class Model with _$Model { /// Available models. Mind that the list may not be exhaustive nor up-to-date. const factory Model.model( Models value, - ) = ModelEnumeration; + ) = ModelCatalog; /// The ID of the model to use for this request. const factory Model.modelId( String value, - ) = ModelString; + ) = ModelId; /// Object construction from a JSON representation factory Model.fromJson(Map json) => _$ModelFromJson(json); @@ -356,14 +356,14 @@ class _ModelConverter implements JsonConverter { @override Model fromJson(Object? data) { if (data is String && _$ModelsEnumMap.values.contains(data)) { - return ModelEnumeration( + return ModelCatalog( _$ModelsEnumMap.keys.elementAt( _$ModelsEnumMap.values.toList().indexOf(data), ), ); } if (data is String) { - return ModelString(data); + return ModelId(data); } throw Exception( 'Unexpected value for Model: $data', @@ -373,8 +373,8 @@ class _ModelConverter implements JsonConverter { @override Object? toJson(Model data) { return switch (data) { - ModelEnumeration(value: final v) => _$ModelsEnumMap[v]!, - ModelString(value: final v) => v, + ModelCatalog(value: final v) => _$ModelsEnumMap[v]!, + ModelId(value: final v) => v, }; } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart index e8e0b298..2444ac92 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart @@ -120,12 +120,12 @@ sealed class MessageContent with _$MessageContent { /// An array of content blocks. const factory MessageContent.blocks( List value, - ) = MessageContentListBlock; + ) = MessageContentBlocks; /// A single text block. const factory MessageContent.text( String value, - ) = MessageContentString; + ) = MessageContentText; /// Object construction from a JSON representation factory MessageContent.fromJson(Map json) => @@ -140,12 +140,12 @@ class _MessageContentConverter @override MessageContent fromJson(Object? data) { if (data is List && data.every((item) => item is Map)) { - return MessageContentListBlock(data + return MessageContentBlocks(data .map((i) => Block.fromJson(i as Map)) .toList(growable: false)); } if (data is String) { - return MessageContentString(data); + return MessageContentText(data); } throw Exception( 'Unexpected value for MessageContent: $data', @@ -155,8 +155,8 @@ class _MessageContentConverter @override Object? toJson(MessageContent data) { return switch (data) { - MessageContentListBlock(value: final v) => v, - MessageContentString(value: final v) => v, + MessageContentBlocks(value: final v) => v, + MessageContentText(value: final v) => v, }; } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart index 528c9b30..4045606f 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -1356,9 +1356,9 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { Model _$ModelFromJson(Map json) { switch (json['runtimeType']) { case 'model': - return ModelEnumeration.fromJson(json); + return ModelCatalog.fromJson(json); case 'modelId': - return ModelString.fromJson(json); + return ModelId.fromJson(json); default: throw CheckedFromJsonException(json, 'runtimeType', 'Model', @@ -1390,20 +1390,20 @@ mixin _$Model { throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(ModelEnumeration value) model, - required TResult Function(ModelString value) modelId, + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ModelEnumeration value)? model, - TResult? Function(ModelString value)? modelId, + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(ModelEnumeration value)? model, - TResult Function(ModelString value)? modelId, + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -1428,20 +1428,20 @@ class _$ModelCopyWithImpl<$Res, $Val extends Model> } /// @nodoc -abstract class _$$ModelEnumerationImplCopyWith<$Res> { - factory _$$ModelEnumerationImplCopyWith(_$ModelEnumerationImpl value, - $Res Function(_$ModelEnumerationImpl) then) = - __$$ModelEnumerationImplCopyWithImpl<$Res>; +abstract class _$$ModelCatalogImplCopyWith<$Res> { + factory _$$ModelCatalogImplCopyWith( + _$ModelCatalogImpl value, $Res Function(_$ModelCatalogImpl) then) = + __$$ModelCatalogImplCopyWithImpl<$Res>; @useResult $Res call({Models value}); } /// @nodoc -class __$$ModelEnumerationImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelEnumerationImpl> - implements _$$ModelEnumerationImplCopyWith<$Res> { - __$$ModelEnumerationImplCopyWithImpl(_$ModelEnumerationImpl _value, - $Res Function(_$ModelEnumerationImpl) _then) +class __$$ModelCatalogImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelCatalogImpl> + implements _$$ModelCatalogImplCopyWith<$Res> { + __$$ModelCatalogImplCopyWithImpl( + _$ModelCatalogImpl _value, $Res Function(_$ModelCatalogImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -1449,7 +1449,7 @@ class __$$ModelEnumerationImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ModelEnumerationImpl( + return _then(_$ModelCatalogImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -1460,13 +1460,13 @@ class __$$ModelEnumerationImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ModelEnumerationImpl extends ModelEnumeration { - const _$ModelEnumerationImpl(this.value, {final String? $type}) +class _$ModelCatalogImpl extends ModelCatalog { + const _$ModelCatalogImpl(this.value, {final String? $type}) : $type = $type ?? 'model', super._(); - factory _$ModelEnumerationImpl.fromJson(Map json) => - _$$ModelEnumerationImplFromJson(json); + factory _$ModelCatalogImpl.fromJson(Map json) => + _$$ModelCatalogImplFromJson(json); @override final Models value; @@ -1483,7 +1483,7 @@ class _$ModelEnumerationImpl extends ModelEnumeration { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelEnumerationImpl && + other is _$ModelCatalogImpl && (identical(other.value, value) || other.value == value)); } @@ -1494,9 +1494,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => - __$$ModelEnumerationImplCopyWithImpl<_$ModelEnumerationImpl>( - this, _$identity); + _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => + __$$ModelCatalogImplCopyWithImpl<_$ModelCatalogImpl>(this, _$identity); @override @optionalTypeArgs @@ -1532,8 +1531,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override @optionalTypeArgs TResult map({ - required TResult Function(ModelEnumeration value) model, - required TResult Function(ModelString value) modelId, + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, }) { return model(this); } @@ -1541,8 +1540,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ModelEnumeration value)? model, - TResult? Function(ModelString value)? modelId, + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, }) { return model?.call(this); } @@ -1550,8 +1549,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ModelEnumeration value)? model, - TResult Function(ModelString value)? modelId, + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, required TResult orElse(), }) { if (model != null) { @@ -1562,41 +1561,41 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override Map toJson() { - return _$$ModelEnumerationImplToJson( + return _$$ModelCatalogImplToJson( this, ); } } -abstract class ModelEnumeration extends Model { - const factory ModelEnumeration(final Models value) = _$ModelEnumerationImpl; - const ModelEnumeration._() : super._(); +abstract class ModelCatalog extends Model { + const factory ModelCatalog(final Models value) = _$ModelCatalogImpl; + const ModelCatalog._() : super._(); - factory ModelEnumeration.fromJson(Map json) = - _$ModelEnumerationImpl.fromJson; + factory ModelCatalog.fromJson(Map json) = + _$ModelCatalogImpl.fromJson; @override Models get value; @JsonKey(ignore: true) - _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => + _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ModelStringImplCopyWith<$Res> { - factory _$$ModelStringImplCopyWith( - _$ModelStringImpl value, $Res Function(_$ModelStringImpl) then) = - __$$ModelStringImplCopyWithImpl<$Res>; +abstract class _$$ModelIdImplCopyWith<$Res> { + factory _$$ModelIdImplCopyWith( + _$ModelIdImpl value, $Res Function(_$ModelIdImpl) then) = + __$$ModelIdImplCopyWithImpl<$Res>; @useResult $Res call({String value}); } /// @nodoc -class __$$ModelStringImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelStringImpl> - implements _$$ModelStringImplCopyWith<$Res> { - __$$ModelStringImplCopyWithImpl( - _$ModelStringImpl _value, $Res Function(_$ModelStringImpl) _then) +class __$$ModelIdImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelIdImpl> + implements _$$ModelIdImplCopyWith<$Res> { + __$$ModelIdImplCopyWithImpl( + _$ModelIdImpl _value, $Res Function(_$ModelIdImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -1604,7 +1603,7 @@ class __$$ModelStringImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ModelStringImpl( + return _then(_$ModelIdImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -1615,13 +1614,13 @@ class __$$ModelStringImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ModelStringImpl extends ModelString { - const _$ModelStringImpl(this.value, {final String? $type}) +class _$ModelIdImpl extends ModelId { + const _$ModelIdImpl(this.value, {final String? $type}) : $type = $type ?? 'modelId', super._(); - factory _$ModelStringImpl.fromJson(Map json) => - _$$ModelStringImplFromJson(json); + factory _$ModelIdImpl.fromJson(Map json) => + _$$ModelIdImplFromJson(json); @override final String value; @@ -1638,7 +1637,7 @@ class _$ModelStringImpl extends ModelString { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelStringImpl && + other is _$ModelIdImpl && (identical(other.value, value) || other.value == value)); } @@ -1649,8 +1648,8 @@ class _$ModelStringImpl extends ModelString { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => - __$$ModelStringImplCopyWithImpl<_$ModelStringImpl>(this, _$identity); + _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => + __$$ModelIdImplCopyWithImpl<_$ModelIdImpl>(this, _$identity); @override @optionalTypeArgs @@ -1686,8 +1685,8 @@ class _$ModelStringImpl extends ModelString { @override @optionalTypeArgs TResult map({ - required TResult Function(ModelEnumeration value) model, - required TResult Function(ModelString value) modelId, + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, }) { return modelId(this); } @@ -1695,8 +1694,8 @@ class _$ModelStringImpl extends ModelString { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ModelEnumeration value)? model, - TResult? Function(ModelString value)? modelId, + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, }) { return modelId?.call(this); } @@ -1704,8 +1703,8 @@ class _$ModelStringImpl extends ModelString { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ModelEnumeration value)? model, - TResult Function(ModelString value)? modelId, + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, required TResult orElse(), }) { if (modelId != null) { @@ -1716,23 +1715,22 @@ class _$ModelStringImpl extends ModelString { @override Map toJson() { - return _$$ModelStringImplToJson( + return _$$ModelIdImplToJson( this, ); } } -abstract class ModelString extends Model { - const factory ModelString(final String value) = _$ModelStringImpl; - const ModelString._() : super._(); +abstract class ModelId extends Model { + const factory ModelId(final String value) = _$ModelIdImpl; + const ModelId._() : super._(); - factory ModelString.fromJson(Map json) = - _$ModelStringImpl.fromJson; + factory ModelId.fromJson(Map json) = _$ModelIdImpl.fromJson; @override String get value; @JsonKey(ignore: true) - _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => + _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2581,9 +2579,9 @@ abstract class _Message extends Message { MessageContent _$MessageContentFromJson(Map json) { switch (json['runtimeType']) { case 'blocks': - return MessageContentListBlock.fromJson(json); + return MessageContentBlocks.fromJson(json); case 'text': - return MessageContentString.fromJson(json); + return MessageContentText.fromJson(json); default: throw CheckedFromJsonException(json, 'runtimeType', 'MessageContent', @@ -2615,20 +2613,20 @@ mixin _$MessageContent { throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(MessageContentListBlock value) blocks, - required TResult Function(MessageContentString value) text, + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentListBlock value)? blocks, - TResult? Function(MessageContentString value)? text, + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentListBlock value)? blocks, - TResult Function(MessageContentString value)? text, + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -2654,22 +2652,20 @@ class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> } /// @nodoc -abstract class _$$MessageContentListBlockImplCopyWith<$Res> { - factory _$$MessageContentListBlockImplCopyWith( - _$MessageContentListBlockImpl value, - $Res Function(_$MessageContentListBlockImpl) then) = - __$$MessageContentListBlockImplCopyWithImpl<$Res>; +abstract class _$$MessageContentBlocksImplCopyWith<$Res> { + factory _$$MessageContentBlocksImplCopyWith(_$MessageContentBlocksImpl value, + $Res Function(_$MessageContentBlocksImpl) then) = + __$$MessageContentBlocksImplCopyWithImpl<$Res>; @useResult $Res call({List value}); } /// @nodoc -class __$$MessageContentListBlockImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentListBlockImpl> - implements _$$MessageContentListBlockImplCopyWith<$Res> { - __$$MessageContentListBlockImplCopyWithImpl( - _$MessageContentListBlockImpl _value, - $Res Function(_$MessageContentListBlockImpl) _then) +class __$$MessageContentBlocksImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentBlocksImpl> + implements _$$MessageContentBlocksImplCopyWith<$Res> { + __$$MessageContentBlocksImplCopyWithImpl(_$MessageContentBlocksImpl _value, + $Res Function(_$MessageContentBlocksImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -2677,7 +2673,7 @@ class __$$MessageContentListBlockImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$MessageContentListBlockImpl( + return _then(_$MessageContentBlocksImpl( null == value ? _value._value : value // ignore: cast_nullable_to_non_nullable @@ -2688,15 +2684,15 @@ class __$$MessageContentListBlockImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentListBlockImpl extends MessageContentListBlock { - const _$MessageContentListBlockImpl(final List value, +class _$MessageContentBlocksImpl extends MessageContentBlocks { + const _$MessageContentBlocksImpl(final List value, {final String? $type}) : _value = value, $type = $type ?? 'blocks', super._(); - factory _$MessageContentListBlockImpl.fromJson(Map json) => - _$$MessageContentListBlockImplFromJson(json); + factory _$MessageContentBlocksImpl.fromJson(Map json) => + _$$MessageContentBlocksImplFromJson(json); final List _value; @override @@ -2718,7 +2714,7 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentListBlockImpl && + other is _$MessageContentBlocksImpl && const DeepCollectionEquality().equals(other._value, _value)); } @@ -2730,9 +2726,10 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> - get copyWith => __$$MessageContentListBlockImplCopyWithImpl< - _$MessageContentListBlockImpl>(this, _$identity); + _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> + get copyWith => + __$$MessageContentBlocksImplCopyWithImpl<_$MessageContentBlocksImpl>( + this, _$identity); @override @optionalTypeArgs @@ -2768,8 +2765,8 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentListBlock value) blocks, - required TResult Function(MessageContentString value) text, + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, }) { return blocks(this); } @@ -2777,8 +2774,8 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentListBlock value)? blocks, - TResult? Function(MessageContentString value)? text, + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, }) { return blocks?.call(this); } @@ -2786,8 +2783,8 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentListBlock value)? blocks, - TResult Function(MessageContentString value)? text, + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, required TResult orElse(), }) { if (blocks != null) { @@ -2798,42 +2795,42 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override Map toJson() { - return _$$MessageContentListBlockImplToJson( + return _$$MessageContentBlocksImplToJson( this, ); } } -abstract class MessageContentListBlock extends MessageContent { - const factory MessageContentListBlock(final List value) = - _$MessageContentListBlockImpl; - const MessageContentListBlock._() : super._(); +abstract class MessageContentBlocks extends MessageContent { + const factory MessageContentBlocks(final List value) = + _$MessageContentBlocksImpl; + const MessageContentBlocks._() : super._(); - factory MessageContentListBlock.fromJson(Map json) = - _$MessageContentListBlockImpl.fromJson; + factory MessageContentBlocks.fromJson(Map json) = + _$MessageContentBlocksImpl.fromJson; @override List get value; @JsonKey(ignore: true) - _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> + _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentStringImplCopyWith<$Res> { - factory _$$MessageContentStringImplCopyWith(_$MessageContentStringImpl value, - $Res Function(_$MessageContentStringImpl) then) = - __$$MessageContentStringImplCopyWithImpl<$Res>; +abstract class _$$MessageContentTextImplCopyWith<$Res> { + factory _$$MessageContentTextImplCopyWith(_$MessageContentTextImpl value, + $Res Function(_$MessageContentTextImpl) then) = + __$$MessageContentTextImplCopyWithImpl<$Res>; @useResult $Res call({String value}); } /// @nodoc -class __$$MessageContentStringImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentStringImpl> - implements _$$MessageContentStringImplCopyWith<$Res> { - __$$MessageContentStringImplCopyWithImpl(_$MessageContentStringImpl _value, - $Res Function(_$MessageContentStringImpl) _then) +class __$$MessageContentTextImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextImpl> + implements _$$MessageContentTextImplCopyWith<$Res> { + __$$MessageContentTextImplCopyWithImpl(_$MessageContentTextImpl _value, + $Res Function(_$MessageContentTextImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -2841,7 +2838,7 @@ class __$$MessageContentStringImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$MessageContentStringImpl( + return _then(_$MessageContentTextImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -2852,13 +2849,13 @@ class __$$MessageContentStringImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentStringImpl extends MessageContentString { - const _$MessageContentStringImpl(this.value, {final String? $type}) +class _$MessageContentTextImpl extends MessageContentText { + const _$MessageContentTextImpl(this.value, {final String? $type}) : $type = $type ?? 'text', super._(); - factory _$MessageContentStringImpl.fromJson(Map json) => - _$$MessageContentStringImplFromJson(json); + factory _$MessageContentTextImpl.fromJson(Map json) => + _$$MessageContentTextImplFromJson(json); @override final String value; @@ -2875,7 +2872,7 @@ class _$MessageContentStringImpl extends MessageContentString { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentStringImpl && + other is _$MessageContentTextImpl && (identical(other.value, value) || other.value == value)); } @@ -2886,10 +2883,9 @@ class _$MessageContentStringImpl extends MessageContentString { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> - get copyWith => - __$$MessageContentStringImplCopyWithImpl<_$MessageContentStringImpl>( - this, _$identity); + _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => + __$$MessageContentTextImplCopyWithImpl<_$MessageContentTextImpl>( + this, _$identity); @override @optionalTypeArgs @@ -2925,8 +2921,8 @@ class _$MessageContentStringImpl extends MessageContentString { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentListBlock value) blocks, - required TResult Function(MessageContentString value) text, + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, }) { return text(this); } @@ -2934,8 +2930,8 @@ class _$MessageContentStringImpl extends MessageContentString { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentListBlock value)? blocks, - TResult? Function(MessageContentString value)? text, + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, }) { return text?.call(this); } @@ -2943,8 +2939,8 @@ class _$MessageContentStringImpl extends MessageContentString { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentListBlock value)? blocks, - TResult Function(MessageContentString value)? text, + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, required TResult orElse(), }) { if (text != null) { @@ -2955,25 +2951,25 @@ class _$MessageContentStringImpl extends MessageContentString { @override Map toJson() { - return _$$MessageContentStringImplToJson( + return _$$MessageContentTextImplToJson( this, ); } } -abstract class MessageContentString extends MessageContent { - const factory MessageContentString(final String value) = - _$MessageContentStringImpl; - const MessageContentString._() : super._(); +abstract class MessageContentText extends MessageContent { + const factory MessageContentText(final String value) = + _$MessageContentTextImpl; + const MessageContentText._() : super._(); - factory MessageContentString.fromJson(Map json) = - _$MessageContentStringImpl.fromJson; + factory MessageContentText.fromJson(Map json) = + _$MessageContentTextImpl.fromJson; @override String get value; @JsonKey(ignore: true) - _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> - get copyWith => throw _privateConstructorUsedError; + _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => + throw _privateConstructorUsedError; } Tool _$ToolFromJson(Map json) { @@ -5048,9 +5044,9 @@ ToolResultBlockContent _$ToolResultBlockContentFromJson( Map json) { switch (json['runtimeType']) { case 'blocks': - return ToolResultBlockContentListBlock.fromJson(json); + return ToolResultBlockContentBlocks.fromJson(json); case 'text': - return ToolResultBlockContentString.fromJson(json); + return ToolResultBlockContentText.fromJson(json); default: throw CheckedFromJsonException( @@ -5085,20 +5081,20 @@ mixin _$ToolResultBlockContent { throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(ToolResultBlockContentListBlock value) blocks, - required TResult Function(ToolResultBlockContentString value) text, + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentListBlock value)? blocks, - TResult? Function(ToolResultBlockContentString value)? text, + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(ToolResultBlockContentListBlock value)? blocks, - TResult Function(ToolResultBlockContentString value)? text, + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -5125,23 +5121,23 @@ class _$ToolResultBlockContentCopyWithImpl<$Res, } /// @nodoc -abstract class _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { - factory _$$ToolResultBlockContentListBlockImplCopyWith( - _$ToolResultBlockContentListBlockImpl value, - $Res Function(_$ToolResultBlockContentListBlockImpl) then) = - __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res>; +abstract class _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { + factory _$$ToolResultBlockContentBlocksImplCopyWith( + _$ToolResultBlockContentBlocksImpl value, + $Res Function(_$ToolResultBlockContentBlocksImpl) then) = + __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res>; @useResult $Res call({List value}); } /// @nodoc -class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> +class __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res> extends _$ToolResultBlockContentCopyWithImpl<$Res, - _$ToolResultBlockContentListBlockImpl> - implements _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { - __$$ToolResultBlockContentListBlockImplCopyWithImpl( - _$ToolResultBlockContentListBlockImpl _value, - $Res Function(_$ToolResultBlockContentListBlockImpl) _then) + _$ToolResultBlockContentBlocksImpl> + implements _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { + __$$ToolResultBlockContentBlocksImplCopyWithImpl( + _$ToolResultBlockContentBlocksImpl _value, + $Res Function(_$ToolResultBlockContentBlocksImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -5149,7 +5145,7 @@ class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ToolResultBlockContentListBlockImpl( + return _then(_$ToolResultBlockContentBlocksImpl( null == value ? _value._value : value // ignore: cast_nullable_to_non_nullable @@ -5160,17 +5156,16 @@ class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ToolResultBlockContentListBlockImpl - extends ToolResultBlockContentListBlock { - const _$ToolResultBlockContentListBlockImpl(final List value, +class _$ToolResultBlockContentBlocksImpl extends ToolResultBlockContentBlocks { + const _$ToolResultBlockContentBlocksImpl(final List value, {final String? $type}) : _value = value, $type = $type ?? 'blocks', super._(); - factory _$ToolResultBlockContentListBlockImpl.fromJson( + factory _$ToolResultBlockContentBlocksImpl.fromJson( Map json) => - _$$ToolResultBlockContentListBlockImplFromJson(json); + _$$ToolResultBlockContentBlocksImplFromJson(json); final List _value; @override @@ -5192,7 +5187,7 @@ class _$ToolResultBlockContentListBlockImpl bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolResultBlockContentListBlockImpl && + other is _$ToolResultBlockContentBlocksImpl && const DeepCollectionEquality().equals(other._value, _value)); } @@ -5204,10 +5199,10 @@ class _$ToolResultBlockContentListBlockImpl @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolResultBlockContentListBlockImplCopyWith< - _$ToolResultBlockContentListBlockImpl> - get copyWith => __$$ToolResultBlockContentListBlockImplCopyWithImpl< - _$ToolResultBlockContentListBlockImpl>(this, _$identity); + _$$ToolResultBlockContentBlocksImplCopyWith< + _$ToolResultBlockContentBlocksImpl> + get copyWith => __$$ToolResultBlockContentBlocksImplCopyWithImpl< + _$ToolResultBlockContentBlocksImpl>(this, _$identity); @override @optionalTypeArgs @@ -5243,8 +5238,8 @@ class _$ToolResultBlockContentListBlockImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ToolResultBlockContentListBlock value) blocks, - required TResult Function(ToolResultBlockContentString value) text, + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, }) { return blocks(this); } @@ -5252,8 +5247,8 @@ class _$ToolResultBlockContentListBlockImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentListBlock value)? blocks, - TResult? Function(ToolResultBlockContentString value)? text, + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, }) { return blocks?.call(this); } @@ -5261,8 +5256,8 @@ class _$ToolResultBlockContentListBlockImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ToolResultBlockContentListBlock value)? blocks, - TResult Function(ToolResultBlockContentString value)? text, + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, required TResult orElse(), }) { if (blocks != null) { @@ -5273,46 +5268,46 @@ class _$ToolResultBlockContentListBlockImpl @override Map toJson() { - return _$$ToolResultBlockContentListBlockImplToJson( + return _$$ToolResultBlockContentBlocksImplToJson( this, ); } } -abstract class ToolResultBlockContentListBlock extends ToolResultBlockContent { - const factory ToolResultBlockContentListBlock(final List value) = - _$ToolResultBlockContentListBlockImpl; - const ToolResultBlockContentListBlock._() : super._(); +abstract class ToolResultBlockContentBlocks extends ToolResultBlockContent { + const factory ToolResultBlockContentBlocks(final List value) = + _$ToolResultBlockContentBlocksImpl; + const ToolResultBlockContentBlocks._() : super._(); - factory ToolResultBlockContentListBlock.fromJson(Map json) = - _$ToolResultBlockContentListBlockImpl.fromJson; + factory ToolResultBlockContentBlocks.fromJson(Map json) = + _$ToolResultBlockContentBlocksImpl.fromJson; @override List get value; @JsonKey(ignore: true) - _$$ToolResultBlockContentListBlockImplCopyWith< - _$ToolResultBlockContentListBlockImpl> + _$$ToolResultBlockContentBlocksImplCopyWith< + _$ToolResultBlockContentBlocksImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ToolResultBlockContentStringImplCopyWith<$Res> { - factory _$$ToolResultBlockContentStringImplCopyWith( - _$ToolResultBlockContentStringImpl value, - $Res Function(_$ToolResultBlockContentStringImpl) then) = - __$$ToolResultBlockContentStringImplCopyWithImpl<$Res>; +abstract class _$$ToolResultBlockContentTextImplCopyWith<$Res> { + factory _$$ToolResultBlockContentTextImplCopyWith( + _$ToolResultBlockContentTextImpl value, + $Res Function(_$ToolResultBlockContentTextImpl) then) = + __$$ToolResultBlockContentTextImplCopyWithImpl<$Res>; @useResult $Res call({String value}); } /// @nodoc -class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> +class __$$ToolResultBlockContentTextImplCopyWithImpl<$Res> extends _$ToolResultBlockContentCopyWithImpl<$Res, - _$ToolResultBlockContentStringImpl> - implements _$$ToolResultBlockContentStringImplCopyWith<$Res> { - __$$ToolResultBlockContentStringImplCopyWithImpl( - _$ToolResultBlockContentStringImpl _value, - $Res Function(_$ToolResultBlockContentStringImpl) _then) + _$ToolResultBlockContentTextImpl> + implements _$$ToolResultBlockContentTextImplCopyWith<$Res> { + __$$ToolResultBlockContentTextImplCopyWithImpl( + _$ToolResultBlockContentTextImpl _value, + $Res Function(_$ToolResultBlockContentTextImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -5320,7 +5315,7 @@ class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ToolResultBlockContentStringImpl( + return _then(_$ToolResultBlockContentTextImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -5331,14 +5326,14 @@ class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { - const _$ToolResultBlockContentStringImpl(this.value, {final String? $type}) +class _$ToolResultBlockContentTextImpl extends ToolResultBlockContentText { + const _$ToolResultBlockContentTextImpl(this.value, {final String? $type}) : $type = $type ?? 'text', super._(); - factory _$ToolResultBlockContentStringImpl.fromJson( + factory _$ToolResultBlockContentTextImpl.fromJson( Map json) => - _$$ToolResultBlockContentStringImplFromJson(json); + _$$ToolResultBlockContentTextImplFromJson(json); @override final String value; @@ -5355,7 +5350,7 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolResultBlockContentStringImpl && + other is _$ToolResultBlockContentTextImpl && (identical(other.value, value) || other.value == value)); } @@ -5366,10 +5361,9 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolResultBlockContentStringImplCopyWith< - _$ToolResultBlockContentStringImpl> - get copyWith => __$$ToolResultBlockContentStringImplCopyWithImpl< - _$ToolResultBlockContentStringImpl>(this, _$identity); + _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> + get copyWith => __$$ToolResultBlockContentTextImplCopyWithImpl< + _$ToolResultBlockContentTextImpl>(this, _$identity); @override @optionalTypeArgs @@ -5405,8 +5399,8 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override @optionalTypeArgs TResult map({ - required TResult Function(ToolResultBlockContentListBlock value) blocks, - required TResult Function(ToolResultBlockContentString value) text, + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, }) { return text(this); } @@ -5414,8 +5408,8 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentListBlock value)? blocks, - TResult? Function(ToolResultBlockContentString value)? text, + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, }) { return text?.call(this); } @@ -5423,8 +5417,8 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ToolResultBlockContentListBlock value)? blocks, - TResult Function(ToolResultBlockContentString value)? text, + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, required TResult orElse(), }) { if (text != null) { @@ -5435,25 +5429,24 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override Map toJson() { - return _$$ToolResultBlockContentStringImplToJson( + return _$$ToolResultBlockContentTextImplToJson( this, ); } } -abstract class ToolResultBlockContentString extends ToolResultBlockContent { - const factory ToolResultBlockContentString(final String value) = - _$ToolResultBlockContentStringImpl; - const ToolResultBlockContentString._() : super._(); +abstract class ToolResultBlockContentText extends ToolResultBlockContent { + const factory ToolResultBlockContentText(final String value) = + _$ToolResultBlockContentTextImpl; + const ToolResultBlockContentText._() : super._(); - factory ToolResultBlockContentString.fromJson(Map json) = - _$ToolResultBlockContentStringImpl.fromJson; + factory ToolResultBlockContentText.fromJson(Map json) = + _$ToolResultBlockContentTextImpl.fromJson; @override String get value; @JsonKey(ignore: true) - _$$ToolResultBlockContentStringImplCopyWith< - _$ToolResultBlockContentStringImpl> + _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart index f3dded29..dc8d9833 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -62,15 +62,13 @@ Map _$$CreateMessageRequestImplToJson( return val; } -_$ModelEnumerationImpl _$$ModelEnumerationImplFromJson( - Map json) => - _$ModelEnumerationImpl( +_$ModelCatalogImpl _$$ModelCatalogImplFromJson(Map json) => + _$ModelCatalogImpl( $enumDecode(_$ModelsEnumMap, json['value']), $type: json['runtimeType'] as String?, ); -Map _$$ModelEnumerationImplToJson( - _$ModelEnumerationImpl instance) => +Map _$$ModelCatalogImplToJson(_$ModelCatalogImpl instance) => { 'value': _$ModelsEnumMap[instance.value]!, 'runtimeType': instance.$type, @@ -86,13 +84,13 @@ const _$ModelsEnumMap = { Models.claudeInstant12: 'claude-instant-1.2', }; -_$ModelStringImpl _$$ModelStringImplFromJson(Map json) => - _$ModelStringImpl( +_$ModelIdImpl _$$ModelIdImplFromJson(Map json) => + _$ModelIdImpl( json['value'] as String, $type: json['runtimeType'] as String?, ); -Map _$$ModelStringImplToJson(_$ModelStringImpl instance) => +Map _$$ModelIdImplToJson(_$ModelIdImpl instance) => { 'value': instance.value, 'runtimeType': instance.$type, @@ -192,31 +190,31 @@ const _$StopReasonEnumMap = { StopReason.toolUse: 'tool_use', }; -_$MessageContentListBlockImpl _$$MessageContentListBlockImplFromJson( +_$MessageContentBlocksImpl _$$MessageContentBlocksImplFromJson( Map json) => - _$MessageContentListBlockImpl( + _$MessageContentBlocksImpl( (json['value'] as List) .map((e) => Block.fromJson(e as Map)) .toList(), $type: json['runtimeType'] as String?, ); -Map _$$MessageContentListBlockImplToJson( - _$MessageContentListBlockImpl instance) => +Map _$$MessageContentBlocksImplToJson( + _$MessageContentBlocksImpl instance) => { 'value': instance.value.map((e) => e.toJson()).toList(), 'runtimeType': instance.$type, }; -_$MessageContentStringImpl _$$MessageContentStringImplFromJson( +_$MessageContentTextImpl _$$MessageContentTextImplFromJson( Map json) => - _$MessageContentStringImpl( + _$MessageContentTextImpl( json['value'] as String, $type: json['runtimeType'] as String?, ); -Map _$$MessageContentStringImplToJson( - _$MessageContentStringImpl instance) => +Map _$$MessageContentTextImplToJson( + _$MessageContentTextImpl instance) => { 'value': instance.value, 'runtimeType': instance.$type, @@ -385,31 +383,31 @@ Map _$$ToolResultBlockImplToJson( return val; } -_$ToolResultBlockContentListBlockImpl - _$$ToolResultBlockContentListBlockImplFromJson(Map json) => - _$ToolResultBlockContentListBlockImpl( - (json['value'] as List) - .map((e) => Block.fromJson(e as Map)) - .toList(), - $type: json['runtimeType'] as String?, - ); - -Map _$$ToolResultBlockContentListBlockImplToJson( - _$ToolResultBlockContentListBlockImpl instance) => +_$ToolResultBlockContentBlocksImpl _$$ToolResultBlockContentBlocksImplFromJson( + Map json) => + _$ToolResultBlockContentBlocksImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentBlocksImplToJson( + _$ToolResultBlockContentBlocksImpl instance) => { 'value': instance.value.map((e) => e.toJson()).toList(), 'runtimeType': instance.$type, }; -_$ToolResultBlockContentStringImpl _$$ToolResultBlockContentStringImplFromJson( +_$ToolResultBlockContentTextImpl _$$ToolResultBlockContentTextImplFromJson( Map json) => - _$ToolResultBlockContentStringImpl( + _$ToolResultBlockContentTextImpl( json['value'] as String, $type: json['runtimeType'] as String?, ); -Map _$$ToolResultBlockContentStringImplToJson( - _$ToolResultBlockContentStringImpl instance) => +Map _$$ToolResultBlockContentTextImplToJson( + _$ToolResultBlockContentTextImpl instance) => { 'value': instance.value, 'runtimeType': instance.$type, diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart index 9aa7a39b..316cc26c 100644 --- a/packages/anthropic_sdk_dart/oas/main.dart +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -12,6 +12,7 @@ void main() async { destination: 'lib/src/generated/', replace: true, schemaOptions: const SchemaGeneratorOptions( + onSchemaName: _onSchemaName, onSchemaUnionFactoryName: _onSchemaUnionFactoryName, ), clientOptions: const ClientGeneratorOptions( @@ -25,17 +26,27 @@ void main() async { ); } +String? _onSchemaName(final String schemaName) => switch (schemaName) { + 'ModelEnumeration' => 'ModelCatalog', + 'ModelString' => 'ModelId', + 'MessageContentString' => 'MessageContentText', + 'MessageContentListBlock' => 'MessageContentBlocks', + 'ToolResultBlockContentListBlock' => 'ToolResultBlockContentBlocks', + 'ToolResultBlockContentString' => 'ToolResultBlockContentText', + _ => schemaName, + }; + String? _onSchemaUnionFactoryName( final String union, final String unionSubclass, ) => switch (unionSubclass) { - 'ModelEnumeration' => 'model', - 'ModelString' => 'modelId', - 'MessageContentListBlock' => 'blocks', - 'MessageContentString' => 'text', - 'ToolResultBlockContentListBlock' => 'blocks', - 'ToolResultBlockContentString' => 'text', + 'ModelCatalog' => 'model', + 'ModelId' => 'modelId', + 'MessageContentText' => 'text', + 'MessageContentBlocks' => 'blocks', + 'ToolResultBlockContentBlocks' => 'blocks', + 'ToolResultBlockContentText' => 'text', 'TextBlockDelta' => 'textDelta', 'InputJsonBlockDelta' => 'inputJsonDelta', 'MessageStartEvent' => 'messageStart', From 2838d9102ba8042cebd38b6f5d612243b6c439a1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 2 Jul 2024 22:15:48 +0200 Subject: [PATCH 174/251] feat: Add extensions on ToolResultBlockContent in anthropic_sdk_dart (#476) --- .../lib/src/extensions.dart | 20 +++++++++++++++++++ packages/langchain_anthropic/README.md | 15 ++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart index 58b178a2..ddafbab9 100644 --- a/packages/anthropic_sdk_dart/lib/src/extensions.dart +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -20,6 +20,26 @@ extension MessageContentX on MessageContent { } } +/// Extension methods for [ToolResultBlockContent]. +extension ToolResultBlockContentX on ToolResultBlockContent { + /// Returns the text content of the tool result block content. + String get text { + return map( + text: (ToolResultBlockContentText t) => t.value, + blocks: (b) => + b.value.whereType().map((t) => t.text).join('\n'), + ); + } + + /// Returns the blocks of the tool result block content. + List get blocks { + return map( + text: (t) => [Block.text(text: t.value)], + blocks: (b) => b.value, + ); + } +} + /// Extension methods for [Block]. extension BlockX on Block { /// Returns the text content of the block. diff --git a/packages/langchain_anthropic/README.md b/packages/langchain_anthropic/README.md index 2d9f50a0..85d07866 100644 --- a/packages/langchain_anthropic/README.md +++ b/packages/langchain_anthropic/README.md @@ -1,6 +1,17 @@ -# 🦜️🔗 LangChain.dart +# 🦜️🔗 LangChain.dart / Anthropic -Anthropic module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) +[![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +[Anthropic](https://anthropic.com) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). + +## Features + +- Chat models: + * `ChatAnthropic`: wrapper around [Anthropic Messages](https://docs.anthropic.com/en/api/messages) API (Claude). ## License From 1d38aa48f077c368747b9923ad8bd8619571f27d Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 2 Jul 2024 22:24:12 +0200 Subject: [PATCH 175/251] feat: Add ChatAnthropic integration (#477) --- docs/_sidebar.md | 1 + .../models/chat_models/how_to/tools.md | 1 + .../chat_models/integrations/anthropic.md | 145 ++++++ .../chat_models/integrations/anthropic.dart | 109 +++++ examples/docs_examples/pubspec.lock | 14 + examples/docs_examples/pubspec.yaml | 1 + examples/docs_examples/pubspec_overrides.yaml | 6 +- packages/anthropic_sdk_dart/README.md | 1 + .../example/langchain_anthropic_example.dart | 42 +- .../lib/langchain_anthropic.dart | 2 + .../lib/src/chat_models/chat_anthropic.dart | 238 ++++++++++ .../lib/src/chat_models/chat_models.dart | 2 + .../lib/src/chat_models/mappers.dart | 431 ++++++++++++++++++ .../lib/src/chat_models/types.dart | 116 +++++ packages/langchain_anthropic/pubspec.yaml | 14 +- .../pubspec_overrides.yaml | 6 + .../test/chat_models/assets/apple.jpeg | Bin 0 -> 66803 bytes .../test/chat_models/chat_anthropic_test.dart | 293 ++++++++++++ .../lib/src/chat_models/types.dart | 2 +- 19 files changed, 1418 insertions(+), 6 deletions(-) create mode 100644 docs/modules/model_io/models/chat_models/integrations/anthropic.md create mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/chat_models.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/mappers.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/types.dart create mode 100644 packages/langchain_anthropic/pubspec_overrides.yaml create mode 100644 packages/langchain_anthropic/test/chat_models/assets/apple.jpeg create mode 100644 packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 6ce757ba..2637ce9b 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -56,6 +56,7 @@ - [Tool calling](/modules/model_io/models/chat_models/how_to/tools.md) - [LLMChain](/modules/model_io/models/chat_models/how_to/llm_chain.md) - Integrations + - [Anthropic](/modules/model_io/models/chat_models/integrations/anthropic.md) - [OpenAI](/modules/model_io/models/chat_models/integrations/openai.md) - [Firebase Vertex AI](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) diff --git a/docs/modules/model_io/models/chat_models/how_to/tools.md b/docs/modules/model_io/models/chat_models/how_to/tools.md index 16c12081..0303be9c 100644 --- a/docs/modules/model_io/models/chat_models/how_to/tools.md +++ b/docs/modules/model_io/models/chat_models/how_to/tools.md @@ -3,6 +3,7 @@ > We use the term "tool calling" interchangeably with "function calling". Although function calling is sometimes meant to refer to invocations of a single function, we treat all models as though they can return multiple tool or function calls in each message. > Tool calling is currently supported by: +> - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md) > - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) > - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) > - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md) diff --git a/docs/modules/model_io/models/chat_models/integrations/anthropic.md b/docs/modules/model_io/models/chat_models/integrations/anthropic.md new file mode 100644 index 00000000..b3e99c84 --- /dev/null +++ b/docs/modules/model_io/models/chat_models/integrations/anthropic.md @@ -0,0 +1,145 @@ +# ChatAnthropic + +Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). + +## Setup + +The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. + +The following models are available: +- `claude-3-5-sonnet-20240620` +- `claude-3-haiku-20240307` +- `claude-3-opus-20240229` +- `claude-3-sonnet-20240229` +- `claude-2.0` +- `claude-2.1` + +Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); + +print(res.output.content); +// -> 'The fruit in the image is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 123 +// 456789101 +// 112131415161 +// 718192021222 +// 324252627282 +// 930 +``` + +## Tool calling + +`ChatAnthropic` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + tools: [tool], + ), +); + +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart new file mode 100644 index 00000000..45c1cd55 --- /dev/null +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart @@ -0,0 +1,109 @@ +// ignore_for_file: avoid_print +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_anthropic/langchain_anthropic.dart'; + +void main(final List arguments) async { + await _invokeModel(); + await _multiModal(); + await _streaming(); +} + +Future _invokeModel() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + + final chatPrompt = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.' + ), + (ChatMessageType.human, 'Text to translate:\n{text}'), + ]); + + final chain = chatPrompt | chatModel | const StringOutputParser(); + + final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', + }); + print(res); + // -> 'J'adore programmer.' + + chatModel.close(); +} + +Future _multiModal() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), + ); + print(res.output.content); + // -> 'The fruit in the image is an apple.' + + chatModel.close(); +} + +Future _streaming() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas.', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), + ]); + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + + final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + + final stream = chain.stream({'max_num': '30'}); + await stream.forEach(print); + // 123 + // 456789101 + // 112131415161 + // 718192021222 + // 324252627282 + // 930 + + chatModel.close(); +} diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 2a5b086d..47e6b5b7 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -9,6 +9,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.6" + anthropic_sdk_dart: + dependency: "direct overridden" + description: + path: "../../packages/anthropic_sdk_dart" + relative: true + source: path + version: "0.0.1" args: dependency: transitive description: @@ -239,6 +246,13 @@ packages: relative: true source: path version: "0.7.2" + langchain_anthropic: + dependency: "direct main" + description: + path: "../../packages/langchain_anthropic" + relative: true + source: path + version: "0.0.1-dev.1" langchain_chroma: dependency: "direct main" description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 3044b6d2..2f6b0f37 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -8,6 +8,7 @@ environment: dependencies: langchain: ^0.7.2 + langchain_anthropic: ^0.0.1-dev.1 langchain_chroma: ^0.2.0+5 langchain_community: 0.2.1+1 langchain_google: ^0.5.1 diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index cc3f10d6..1c756a5e 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,9 +1,13 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart +# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart,anthropic_sdk_dart,langchain_anthropic dependency_overrides: + anthropic_sdk_dart: + path: ../../packages/anthropic_sdk_dart chromadb: path: ../../packages/chromadb langchain: path: ../../packages/langchain + langchain_anthropic: + path: ../../packages/langchain_anthropic langchain_chroma: path: ../../packages/langchain_chroma langchain_community: diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md index bc8b7208..dc51d776 100644 --- a/packages/anthropic_sdk_dart/README.md +++ b/packages/anthropic_sdk_dart/README.md @@ -23,6 +23,7 @@ Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (a - [Usage](#usage) * [Authentication](#authentication) * [Messages](#messages) + * [Tool use](#tool-use) - [Advance Usage](#advance-usage) * [Default HTTP client](#default-http-client) * [Custom HTTP client](#custom-http-client) diff --git a/packages/langchain_anthropic/example/langchain_anthropic_example.dart b/packages/langchain_anthropic/example/langchain_anthropic_example.dart index 21f3e9f2..fabef4bd 100644 --- a/packages/langchain_anthropic/example/langchain_anthropic_example.dart +++ b/packages/langchain_anthropic/example/langchain_anthropic_example.dart @@ -1,3 +1,41 @@ -void main() { - // TODO +// ignore_for_file: avoid_print, unused_element +import 'dart:io'; + +import 'package:langchain_anthropic/langchain_anthropic.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; + +/// Check the docs for more examples: +/// https://langchaindart.dev +void main() async { + // Uncomment the example you want to run: + await _example1(); + // await _example2(); +} + +/// The most basic example of LangChain is calling a model on some input +Future _example1() async { + final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; + final llm = ChatAnthropic( + apiKey: openAiApiKey, + defaultOptions: const ChatAnthropicOptions(temperature: 1), + ); + final ChatResult res = await llm.invoke( + PromptValue.string('Tell me a joke'), + ); + print(res); +} + +/// Instead of waiting for the full response from the model, you can stream it +/// while it's being generated +Future _example2() async { + final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; + final llm = ChatAnthropic( + apiKey: openAiApiKey, + defaultOptions: const ChatAnthropicOptions(temperature: 1), + ); + final Stream stream = llm.stream( + PromptValue.string('Tell me a joke'), + ); + await stream.forEach((final chunk) => stdout.write(chunk.output.content)); } diff --git a/packages/langchain_anthropic/lib/langchain_anthropic.dart b/packages/langchain_anthropic/lib/langchain_anthropic.dart index d8becc4d..78ee6803 100644 --- a/packages/langchain_anthropic/lib/langchain_anthropic.dart +++ b/packages/langchain_anthropic/lib/langchain_anthropic.dart @@ -1,2 +1,4 @@ /// Anthropic module for LangChain.dart. library; + +export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart new file mode 100644 index 00000000..13a687a3 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart @@ -0,0 +1,238 @@ +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; +import 'package:http/http.dart' as http; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_tiktoken/langchain_tiktoken.dart'; + +import 'mappers.dart'; +import 'types.dart'; + +/// Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) +/// (aka Claude API). +/// +/// Example: +/// ```dart +/// final chatModel = ChatAnthropic(apiKey: '...'); +/// final messages = [ +/// ChatMessage.system('You are a helpful assistant that translates English to French.'), +/// ChatMessage.humanText('I love programming.'), +/// ]; +/// final prompt = PromptValue.chat(messages); +/// final res = await llm.invoke(prompt); +/// ``` +/// +/// - Docs: https://docs.anthropic.com +/// +/// ### Authentication +/// +/// The Anthropic API uses API keys for authentication. Visit your +/// [API Keys](https://console.anthropic.com/settings/keys) page to retrieve +/// the API key you'll use in your requests. +/// +/// ### Available models +/// +/// The following models are available: +/// - `claude-3-5-sonnet-20240620` +/// - `claude-3-haiku-20240307` +/// - `claude-3-opus-20240229` +/// - `claude-3-sonnet-20240229` +/// - `claude-2.0` +/// - `claude-2.1` +/// +/// Mind that the list may not be up-to-date. +/// See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. +/// +/// ### Call options +/// +/// You can configure the parameters that will be used when calling the +/// chat completions API in several ways: +/// +/// **Default options:** +/// +/// Use the [defaultOptions] parameter to set the default options. These +/// options will be used unless you override them when generating completions. +/// +/// ```dart +/// final chatModel = ChatAnthropic( +/// apiKey: anthropicApiKey, +/// defaultOptions: const ChatAnthropicOptions( +/// temperature: 0.9, +/// maxTokens: 100, +/// ), +/// ); +/// ``` +/// +/// **Call options:** +/// +/// You can override the default options when invoking the model: +/// +/// ```dart +/// final res = await chatModel.invoke( +/// prompt, +/// options: const ChatAnthropicOptions(temperature: 0.5), +/// ); +/// ``` +/// +/// **Bind:** +/// +/// You can also change the options in a [Runnable] pipeline using the bind +/// method. +/// +/// In this example, we are using two totally different models for each +/// question: +/// +/// ```dart +/// final chatModel = ChatAnthropic(apiKey: anthropicApiKey); +/// const outputParser = StringOutputParser(); +/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); +/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); +/// final chain = Runnable.fromMap({ +/// 'q1': prompt1 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-5-sonnet-20240620)) | outputParser, +/// 'q2': prompt2 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-sonnet-20240229)) | outputParser, +/// }); +/// final res = await chain.invoke({'name': 'David'}); +/// ``` +/// +/// ### Advance +/// +/// #### Custom HTTP client +/// +/// You can always provide your own implementation of `http.Client` for further +/// customization: +/// +/// ```dart +/// final client = ChatAnthropic( +/// apiKey: 'ANTHROPIC_API_KEY', +/// client: MyHttpClient(), +/// ); +/// ``` +/// +/// #### Using a proxy +/// +/// ##### HTTP proxy +/// +/// You can use your own HTTP proxy by overriding the `baseUrl` and providing +/// your required `headers`: +/// +/// ```dart +/// final client = ChatAnthropic( +/// baseUrl: 'https://my-proxy.com', +/// headers: {'x-my-proxy-header': 'value'}, +/// ); +/// ``` +/// +/// If you need further customization, you can always provide your own +/// `http.Client`. +/// +/// ##### SOCKS5 proxy +/// +/// To use a SOCKS5 proxy, you can use the +/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a +/// custom `http.Client`. +class ChatAnthropic extends BaseChatModel { + /// Create a new [ChatAnthropic] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Anthropic API key. You can find your API key in the + /// [Anthropic dashboard](https://console.anthropic.com/settings/keys). + /// - [ChatAnthropic.encoding] + /// - [ChatAnthropic.defaultOptions] + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Anthropic's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + ChatAnthropic({ + final String? apiKey, + final String baseUrl = 'https://api.anthropic.com/v1', + final Map? headers, + final Map? queryParams, + final http.Client? client, + super.defaultOptions = const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + ), + this.encoding = 'cl100k_base', + }) : _client = a.AnthropicClient( + apiKey: apiKey ?? '', + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ); + + /// A client for interacting with Anthropic API. + final a.AnthropicClient _client; + + /// The encoding to use by tiktoken when [tokenize] is called. + /// + /// Anthropic does not provide any API to count tokens, so we use tiktoken + /// to get an estimation of the number of tokens in a prompt. + String encoding; + + @override + String get modelType => 'anthropic-chat'; + + @override + Future invoke( + final PromptValue input, { + final ChatAnthropicOptions? options, + }) async { + final completion = await _client.createMessage( + request: createMessageRequest( + input.toChatMessages(), + options: options, + defaultOptions: defaultOptions, + throwNullModelError: throwNullModelError, + ), + ); + return completion.toChatResult(); + } + + @override + Stream stream( + final PromptValue input, { + final ChatAnthropicOptions? options, + }) { + return _client + .createMessageStream( + request: createMessageRequest( + input.toChatMessages(), + options: options, + defaultOptions: defaultOptions, + stream: true, + throwNullModelError: throwNullModelError, + ), + ) + .transform(MessageStreamEventTransformer()); + } + + /// Tokenizes the given prompt using tiktoken. + /// + /// Currently Anthropic does not provide a tokenizer for the models it supports. + /// So we use tiktoken and [encoding] model to get an approximation + /// for counting tokens. Mind that the actual tokens will be totally + /// different from the ones used by the Anthropic model. + /// + /// If an encoding model is specified in [encoding] field, that + /// encoding is used instead. + /// + /// - [promptValue] The prompt to tokenize. + @override + Future> tokenize( + final PromptValue promptValue, { + final ChatAnthropicOptions? options, + }) async { + final encoding = getEncoding(this.encoding); + return encoding.encode(promptValue.toString()); + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart new file mode 100644 index 00000000..1a011d3c --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart @@ -0,0 +1,2 @@ +export 'chat_anthropic.dart'; +export 'types.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart new file mode 100644 index 00000000..002df82c --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart @@ -0,0 +1,431 @@ +// ignore_for_file: public_member_api_docs +import 'dart:async'; +import 'dart:convert'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; +import 'package:collection/collection.dart' show IterableExtension; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:rxdart/rxdart.dart' show WhereNotNullExtension; + +import 'types.dart'; + +/// Creates a [CreateMessageRequest] from the given input. +a.CreateMessageRequest createMessageRequest( + final List messages, { + required final ChatAnthropicOptions? options, + required final ChatAnthropicOptions defaultOptions, + final bool stream = false, + required Never Function() throwNullModelError, +}) { + final systemMsg = messages.firstOrNull is SystemChatMessage + ? messages.firstOrNull?.contentAsString + : null; + + final messagesDtos = messages.toMessages(); + final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; + final toolChoiceDto = toolChoice?.toToolChoice(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toTool(toolChoice); + + return a.CreateMessageRequest( + model: a.Model.modelId( + options?.model ?? defaultOptions.model ?? throwNullModelError(), + ), + messages: messagesDtos, + maxTokens: options?.maxTokens ?? defaultOptions.maxTokens ?? 1024, + stopSequences: options?.stopSequences ?? defaultOptions.stopSequences, + system: systemMsg, + temperature: options?.temperature ?? defaultOptions.temperature, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + metadata: a.CreateMessageRequestMetadata( + userId: options?.userId ?? defaultOptions.userId, + ), + tools: toolsDtos, + toolChoice: toolChoiceDto, + stream: stream, + ); +} + +extension ChatMessageListMapper on List { + List toMessages() { + final List result = []; + final List consecutiveToolMessages = []; + + void flushToolMessages() { + if (consecutiveToolMessages.isNotEmpty) { + result.add(_mapToolChatMessages(consecutiveToolMessages)); + consecutiveToolMessages.clear(); + } + } + + for (final message in this) { + switch (message) { + case SystemChatMessage(): + flushToolMessages(); + continue; // System message set in request params + case final HumanChatMessage msg: + flushToolMessages(); + final res = _mapHumanChatMessage(msg); + result.add(res); + case final AIChatMessage msg: + flushToolMessages(); + final res = _mapAIChatMessage(msg); + result.add(res); + case final ToolChatMessage msg: + consecutiveToolMessages.add(msg); + case CustomChatMessage(): + throw UnsupportedError('Anthropic does not support custom messages'); + } + } + + flushToolMessages(); // Flush any remaining tool messages + return result; + } + + a.Message _mapHumanChatMessage(final HumanChatMessage msg) { + return a.Message( + role: a.MessageRole.user, + content: switch (msg.content) { + final ChatMessageContentText t => a.MessageContent.text(t.text), + final ChatMessageContentImage i => a.MessageContent.blocks([ + _mapHumanChatMessageContentImage(i), + ]), + final ChatMessageContentMultiModal mm => a.MessageContent.blocks( + mm.parts + .map( + (final part) => switch (part) { + final ChatMessageContentText t => + a.Block.text(text: t.text), + final ChatMessageContentImage i => + _mapHumanChatMessageContentImage(i), + ChatMessageContentMultiModal() => throw ArgumentError( + 'Cannot have multimodal content in multimodal content', + ), + }, + ) + .toList(growable: false), + ), + }, + ); + } + + a.Block _mapHumanChatMessageContentImage(ChatMessageContentImage i) { + return a.Block.image( + source: a.ImageBlockSource( + type: a.ImageBlockSourceType.base64, + mediaType: switch (i.mimeType) { + 'image/jpeg' => a.ImageBlockSourceMediaType.imageJpeg, + 'image/png' => a.ImageBlockSourceMediaType.imagePng, + 'image/gif' => a.ImageBlockSourceMediaType.imageGif, + 'image/webp' => a.ImageBlockSourceMediaType.imageWebp, + _ => + throw AssertionError('Unsupported image MIME type: ${i.mimeType}'), + }, + data: i.data.startsWith('http') + ? throw AssertionError( + 'Anthropic only supports base64-encoded images', + ) + : i.data, + ), + ); + } + + a.Message _mapAIChatMessage(final AIChatMessage msg) { + if (msg.toolCalls.isEmpty) { + return a.Message( + role: a.MessageRole.assistant, + content: a.MessageContent.text(msg.content), + ); + } else { + return a.Message( + role: a.MessageRole.assistant, + content: a.MessageContent.blocks( + msg.toolCalls + .map( + (final toolCall) => a.Block.toolUse( + id: toolCall.id, + name: toolCall.name, + input: toolCall.arguments, + ), + ) + .toList(growable: false), + ), + ); + } + } + + a.Message _mapToolChatMessages(final List msgs) { + return a.Message( + role: a.MessageRole.user, + content: a.MessageContent.blocks( + msgs + .map( + (msg) => a.Block.toolResult( + toolUseId: msg.toolCallId, + content: a.ToolResultBlockContent.text(msg.content), + ), + ) + .toList(growable: false), + ), + ); + } +} + +extension MessageMapper on a.Message { + ChatResult toChatResult() { + final (content, toolCalls) = _mapMessageContent(this.content); + return ChatResult( + id: id ?? '', + output: AIChatMessage( + content: content, + toolCalls: toolCalls, + ), + finishReason: _mapFinishReason(stopReason), + metadata: { + 'model': model, + 'stop_sequence': stopSequence, + }, + usage: _mapUsage(usage), + ); + } +} + +class MessageStreamEventTransformer + extends StreamTransformerBase { + MessageStreamEventTransformer(); + + String? lastMessageId; + String? lastToolCallId; + + @override + Stream bind(final Stream stream) { + return stream + .map( + (event) => switch (event) { + final a.MessageStartEvent e => _mapMessageStartEvent(e), + final a.MessageDeltaEvent e => _mapMessageDeltaEvent(e), + final a.ContentBlockStartEvent e => _mapContentBlockStartEvent(e), + final a.ContentBlockDeltaEvent e => _mapContentBlockDeltaEvent(e), + final a.ContentBlockStopEvent e => _mapContentBlockStopEvent(e), + final a.MessageStopEvent e => _mapMessageStopEvent(e), + a.PingEvent() => null, + }, + ) + .whereNotNull(); + } + + ChatResult _mapMessageStartEvent(final a.MessageStartEvent e) { + final msg = e.message; + + final msgId = msg.id ?? lastMessageId ?? ''; + lastMessageId = msgId; + final (content, toolCalls) = _mapMessageContent(e.message.content); + + return ChatResult( + id: msgId, + output: AIChatMessage( + content: content, + toolCalls: toolCalls, + ), + finishReason: _mapFinishReason(e.message.stopReason), + metadata: { + if (e.message.model != null) 'model': e.message.model, + if (e.message.stopSequence != null) + 'stop_sequence': e.message.stopSequence, + }, + usage: _mapUsage(e.message.usage), + streaming: true, + ); + } + + ChatResult _mapMessageDeltaEvent(final a.MessageDeltaEvent e) { + return ChatResult( + id: lastMessageId ?? '', + output: const AIChatMessage(content: ''), + finishReason: _mapFinishReason(e.delta.stopReason), + metadata: { + if (e.delta.stopSequence != null) 'stop_sequence': e.delta.stopSequence, + }, + usage: _mapMessageDeltaUsage(e.usage), + streaming: true, + ); + } + + ChatResult _mapContentBlockStartEvent(final a.ContentBlockStartEvent e) { + final (content, toolCall) = _mapContentBlock(e.contentBlock); + if (toolCall != null) { + lastToolCallId = toolCall.id; + } + + return ChatResult( + id: lastMessageId ?? '', + output: AIChatMessage( + content: content, + toolCalls: [if (toolCall != null) toolCall], + ), + finishReason: FinishReason.unspecified, + metadata: const {}, + usage: const LanguageModelUsage(), + streaming: true, + ); + } + + ChatResult _mapContentBlockDeltaEvent(final a.ContentBlockDeltaEvent e) { + final (content, toolCals) = _mapContentBlockDelta(lastToolCallId, e.delta); + return ChatResult( + id: lastMessageId ?? '', + output: AIChatMessage( + content: content, + toolCalls: toolCals, + ), + finishReason: FinishReason.unspecified, + metadata: { + 'index': e.index, + }, + usage: const LanguageModelUsage(), + streaming: true, + ); + } + + ChatResult? _mapContentBlockStopEvent(final a.ContentBlockStopEvent e) { + lastToolCallId = null; + return null; + } + + ChatResult? _mapMessageStopEvent(final a.MessageStopEvent e) { + lastMessageId = null; + return null; + } +} + +(String content, List toolCalls) _mapMessageContent( + final a.MessageContent content, +) => + switch (content) { + final a.MessageContentText t => ( + t.value, + const [] + ), + final a.MessageContentBlocks b => ( + b.text, + b.value + .whereType() + .map( + (toolUse) => AIChatMessageToolCall( + id: toolUse.id, + name: toolUse.name, + argumentsRaw: toolUse.input.isNotEmpty + ? json.encode(toolUse.input) + : '', + arguments: toolUse.input, + ), + ) + .toList(growable: false), + ), + }; + +(String content, AIChatMessageToolCall? toolCall) _mapContentBlock( + final a.Block contentBlock, +) => + switch (contentBlock) { + final a.TextBlock t => (t.text, null), + final a.ImageBlock i => (i.source.data, null), + final a.ToolUseBlock tu => ( + '', + AIChatMessageToolCall( + id: tu.id, + name: tu.name, + argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '', + arguments: tu.input, + ), + ), + final a.ToolResultBlock tr => (tr.content.text, null), + }; + +(String content, List toolCalls) _mapContentBlockDelta( + final String? lastToolId, + final a.BlockDelta blockDelta, +) => + switch (blockDelta) { + final a.TextBlockDelta t => (t.text, const []), + final a.InputJsonBlockDelta jb => ( + '', + [ + AIChatMessageToolCall( + id: lastToolId ?? '', + name: '', + argumentsRaw: jb.partialJson ?? '', + arguments: const {}, + ), + ], + ), + }; + +extension ToolSpecListMapper on List { + List toTool(final ChatToolChoice? toolChoice) { + if (toolChoice is ChatToolChoiceNone) { + return const []; + } + + if (toolChoice is ChatToolChoiceForced) { + final tool = firstWhereOrNull((final t) => t.name == toolChoice.name); + return [if (tool != null) _mapTool(tool)]; + } + + return map(_mapTool).toList(growable: false); + } + + a.Tool _mapTool(final ToolSpec tool) { + return a.Tool( + name: tool.name, + description: tool.description, + inputSchema: tool.inputJsonSchema, + ); + } +} + +extension ChatToolChoiceMapper on ChatToolChoice { + a.ToolChoice toToolChoice() { + return switch (this) { + ChatToolChoiceNone _ => const a.ToolChoice(type: a.ToolChoiceType.auto), + ChatToolChoiceAuto _ => const a.ToolChoice(type: a.ToolChoiceType.auto), + ChatToolChoiceRequired _ => + const a.ToolChoice(type: a.ToolChoiceType.any), + final ChatToolChoiceForced t => a.ToolChoice( + type: a.ToolChoiceType.tool, + name: t.name, + ), + }; + } +} + +FinishReason _mapFinishReason( + final a.StopReason? reason, +) => + switch (reason) { + a.StopReason.endTurn => FinishReason.stop, + a.StopReason.maxTokens => FinishReason.length, + a.StopReason.stopSequence => FinishReason.stop, + a.StopReason.toolUse => FinishReason.toolCalls, + null => FinishReason.unspecified, + }; + +LanguageModelUsage _mapUsage(final a.Usage? usage) { + return LanguageModelUsage( + promptTokens: usage?.inputTokens, + responseTokens: usage?.outputTokens, + totalTokens: usage?.inputTokens != null && usage?.outputTokens != null + ? usage!.inputTokens + usage.outputTokens + : null, + ); +} + +LanguageModelUsage _mapMessageDeltaUsage(final a.MessageDeltaUsage? usage) { + return LanguageModelUsage( + responseTokens: usage?.outputTokens, + totalTokens: usage?.outputTokens, + ); +} diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart new file mode 100644 index 00000000..4374c820 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -0,0 +1,116 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; + +/// {@template chat_anthropic_options} +/// Options to pass into the Anthropic Chat Model. +/// {@endtemplate} +class ChatAnthropicOptions extends ChatModelOptions { + /// {@macro chat_anthropic_options} + const ChatAnthropicOptions({ + this.model = 'claude-3-5-sonnet-20240620', + this.maxTokens = 1024, + this.stopSequences, + this.temperature, + this.topK, + this.topP, + this.userId, + super.tools, + super.toolChoice, + super.concurrencyLimit, + }); + + /// ID of the model to use (e.g. 'claude-3-5-sonnet-20240620'). + /// + /// Available models: + /// - `claude-3-5-sonnet-20240620` + /// - `claude-3-haiku-20240307` + /// - `claude-3-opus-20240229` + /// - `claude-3-sonnet-20240229` + /// - `claude-2.0` + /// - `claude-2.1` + /// + /// Mind that the list may be outdated. + /// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. + final String? model; + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + final int? maxTokens; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Anthropic models will normally stop when they have naturally completed + /// their turn. If you want the model to stop generating when it encounters + /// custom strings of text, you can use the `stopSequences` parameter. + final List? stopSequences; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + final double? temperature; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + final int? topK; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + final double? topP; + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + final String? userId; + + /// Creates a copy of this [ChatAnthropicOptions] object with the given fields + /// replaced with the new values. + ChatAnthropicOptions copyWith({ + String? model, + int? maxTokens, + List? stopSequences, + double? temperature, + int? topK, + double? topP, + String? userId, + List? tools, + ChatToolChoice? toolChoice, + int? concurrencyLimit, + }) { + return ChatAnthropicOptions( + model: model ?? this.model, + maxTokens: maxTokens ?? this.maxTokens, + stopSequences: stopSequences ?? this.stopSequences, + temperature: temperature ?? this.temperature, + topK: topK ?? this.topK, + topP: topP ?? this.topP, + userId: userId ?? this.userId, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } +} diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 6ed5624f..7a581267 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,11 +1,10 @@ name: langchain_anthropic -description: Anthropic module for LangChain.dart. +description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart documentation: https://langchaindart.dev -publish_to: none # Remove when the package is ready to be published topics: - ai @@ -15,3 +14,14 @@ topics: environment: sdk: ">=3.0.0 <4.0.0" + +dependencies: + anthropic_sdk_dart: ^0.0.1 + collection: '>=1.17.0 <1.19.0' + http: ^1.1.0 + langchain_core: ^0.3.2 + langchain_tiktoken: ^1.0.1 + rxdart: ^0.27.7 + +dev_dependencies: + test: ^1.25.2 diff --git a/packages/langchain_anthropic/pubspec_overrides.yaml b/packages/langchain_anthropic/pubspec_overrides.yaml new file mode 100644 index 00000000..4d7afffa --- /dev/null +++ b/packages/langchain_anthropic/pubspec_overrides.yaml @@ -0,0 +1,6 @@ +# melos_managed_dependency_overrides: anthropic_sdk_dart,langchain_core +dependency_overrides: + anthropic_sdk_dart: + path: ../anthropic_sdk_dart + langchain_core: + path: ../langchain_core diff --git a/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg b/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..62d7ca92657957e3ab45fc3ec7332a9938465b68 GIT binary patch literal 66803 zcmbq)WmJ?;*f$`8NFyOB;L<5ecf-=%&C;QC8-z#+?9#C0(!F$xluPW=of6AZN-Bu{ zU!Lcj_ni0pJ9E#MnKOA`IdlE~TmEkwheAsoq>h7&i-Uvvka7N7!->Tqcu0f#7X!&myhvX414j#oLiib|ODV(wmkQKmZH%*M2BMgRfK%jiOkt`r@-A^{^%!KT% zCbC*A+v-_ z75^(?i};{WajvBcH#;=>8ExpdtY%Q4S9g6b^c9qoepbcC{qdBZCrmZIyX<4bvQ)hc z(+NtuMV{9nk{Q+>5_8Hy?~O|1$1=gn5jxvF90;A?Ty87U7JxM5TryI|gHr<^l>p8{ zt~fwc;qXUXS?Ea6Ck1^8Guko*6Q&j3aUVV!;c;&@3mq3*QKv%$IHywE>l+iYi`HV+ z;j4t)2w7%DZrUukIka+Rg`ZhuRqq>7iwqy&H^L>6HKIZ*!zqQWzU~NPL+>4Hh7^g! z%9E7SAXi?6B<;v>YbSH@F06L!)3LfgWAv;s=n5=|_mHCDO5g>x=fk^>Gb1?lm0_P$ zs7(&Otc0%U=9ujx%I#(oRn1V7mOx5QS7}fY`23A6C%Yc%X%MTW?lXddU^lQxa1=y8 zrlpQb+s(}@(vu(GV6g=QA_?99&HDd(dO= z3gGAlQ3_j{A8YrXr6}5 zl2fjh$jF>Pp2%k;%mKMfePjU~wE*PM1zUh1QZiD45C)7;8@9;(|wgkH!o@nbaPYWO4^3OwDg^yS{ZW4+PHz1 zxWNA41h+)#;WQ1yD0nFbfNtT8GzPW1YSU}+;ho_gXqK$RGONbxCGn`;fFwy^P2kgq z(NPd5)FH6vq}&g)>Z8#yS#lE05osD8!f3~@5i15f`d zVre-i*!mvc`R6eoktMBV+R$R&o-9*_>nmu(h~r`^jv&ijwb&VVp6g6(X#lW}`C-B= z6p{oh0F{1V>tIb+kT(jQ7}}5IVgs~4&=Ara5|z7hu2V#(_ea$2F%ggV;ot&havx{xJ~n8#t4u*mcO4Uy27b#ZR>@M2wo1VGGOKf`aN z{`{CjPh+l8GO{bdMdh&KOljk3o`bi}S4)gD`@g-?gzV8EF78>k5oj?pL~2$;rsV|s zGS<~X+NCwO0E?>llJNBSljrl}GV6jpV$ZIoXqab1U{ktNj#DZ-|DpOESa{qT5=UHWdXl2wQ=V1Ug%p z76ac0r}))$eb0*nd@zcsj@*x@>gvcYCb^1PLdN6H2?$XJow! z9PZX_fq&`U(hJE}X6yaeo|`)|G&jFExLTyXSg5{IXrC96Ej5F=e-KGX^AR%IulLhX z88=C%@)8M4+fZPt8gJLDUh%0+5b`d7q#WhEXu!NiN$17d)IaupoCD$&ob92S$#9 z;S3=uPSNYGw$il;Jiu;|z}wt)+6=Hrz9q0{{rumD@7I_AJP!Z8-a?(BH)!2-=(#uw zAM}7c31o8en68gTJ{w@owHy|kd?A}v?EQRzIKQnP+g8F96zHr6_c z$$OoWYoWgkF;>0a?oB2TDL{hGX$w5Yk{4btJ^uv6X##zZ_Y!Eb-Mg`$V-;3PMvSq;1z2WzXvV-04-u zEkVOWjQ6aV-@aW+y?cRkeqj*U(zsKpr~0kp>Il&MG(OA)PbVA&6s_JeISC*QO86#P zxb}Knq;}-+K3>YvxDck;_2D|J`qDRKL#>iL{@JpB;CUgfZr2c_iFB36TzXA+avf>} z5|rrTy1-ZQm2FXtUhe#Z;72v6Yfk-^2jvii05L%vq4mkPw0G|!$W^{1-GM**{)dz4-`{dMBt(|3 zbo2XWygch#%jT3O6Z&wt?MZO00*DR4z{MLxkwcJX6wflW_Pt6x%fNu3>!KNgGLPn>9*WSrQ7CNT^C zhm&oTbn0_uE0dVpgy zS@S9Wn2KP*LN_ZV89Q(6D7ZYKjxH;&b--SzUwy5z07 zH%mWhir|=6i(by+bQ1PgWvMyea_A7%0;q7zgFeu+{1xCo#XnhE89u~WIgvtZ|F}R% z>09lE-}c6T>svzI{|OV#2><5~N*`QAInzN8Vq{=+&=mSsmFMcV#n@qMgLL;0ogy6` z<>%m}`E-yWGjKbX4Wi#_Txtn#PO2P%Suj-l1{)q-GRkc^=T~AtQM8rsip;0265bUg z-0|1HF3|rqr}FzVrn2|06V^8N<|B{!D#OYBAmhRQZq|VGaYM)fi10Z4Wz6k=I2zSIiz0qz=b*Ct zFqPn)Ic-|Kk9q~Bz~Zy>H*=W*vm@4NW0c^XLRcRCexqIgm`sUuNZ-3W zkFEHbi(AR7%dKxeTu1+r5b96dM8Y8^?*mf4@p+87TN1Y;CZ-he+XQp=N3bHt(e0*E z1z5-{0ppK#5o0wYGSbp+gPbbBN{&3k2lFT+@tP;-VzaTYWIv*fkt$4v!3@!6ZYtKY z;P_cNfxU%JSEbg5q-v3O=9>EhlMOTfmTJ)LU)2|%eq4%4B^T%mt#1%BH0TXQqX|vE zM3W>T3JCWv2hS=ue`|oABZBEgA2bBjgHZ;VJUL0}TfD`D?1tP4MgJxIF8ikt{2z|m z0YY!EoSq5=Z-7a{P0A7jo5R!=TQdD=5kVH&i1X7TC0bHI*Qa2b9`P?;Lv(HfIat^3$|g1T^XEc|g+9Mr!Gs z5_(Y8ofM-`tb|!de8jj9#N-k8oPNuVk#>w1Fc_uB6!PROVeu%2MQ|b9Z>mTEU6g%(~1X6M&0-1 z7PdlAaNO+W=a@~dc=29bZX}HfGw%{@@j|f|0r^AdlxJ>~?o}s+mb?;@`*AD9Y0DC@ zE_Xj$5nba-`plN9vOv6P{n)^BT9#UrL5v65qGyV;YTepQLf;%h|~V z-n|On5;TPDRdpQmM8urhXoN{N16a~RdYC}{Q!IFhdl^rE1=V#GQ*zkcL(J@7`<$; z6(^`Xl50gv2)0oGTp_AI11LML`|R4XeCL#@PwWJw!1dnd_&-gj-?7b*?dpG7voH4| z9pW;Fsa;b^WX}OP0tpZC9OGck*;dAq%cHv9ilxAq77?||AdabWzDZ02e{k&@i?s8C zvbovU$NCHV!B@vYk@~JfIsdF3NxKpS>2s}ODyt5p!+*SBRDeiE$YEudH1yn>@9u`8 zhmN|iH*3>=*4{9O=yW^IiAvER;Xxdjb zrZ2Dip0iPEY1{3LrM3`vIB|YBam(QOWbh(OiPQc1pC@_#6HWy?D=wbCLgJ3U+>xE0 z7YV+peBunzTVH5otVcoo2`k>PETY*JI68Nuyb2N&B&j0@CkFQr*l(CjVA`|(O;&@xF1(w|f4{i_d#w@B=QiYdqZ z7hY}V)oh5H8nI`4uGB9-gCqAkFM^byNn4gCfsm@PiVHczAM*dC7otX_9kzo)g-;K@ zW&Y`~uiWpJFSO5Z{u?pOxOCDUddl&3sq-SBD8Hq#U{Ptx-q-f^%xB5GkQf^^weh*B zVA?OdK`Yd*?$bon#xeNJb%l+BU0UR{B=DcOL2IRNJrqn29cOkv{=xSCwIh4i+8d?7 zI(JIIT#yn)A$l|hn=ZfYEgZmG$2r2t+iM@ZaT9E40X!d4)vlZ${8r&Dd)hji`;0zF zTag}Jcl4#t>Nx&+*T`PoDg`>rA~j0?DYWH+yEoS;G{%HDg&k*AbZ+>urR~2Uhz1O> zEUk|S;>U7PgQkPHJ3Yobtd?sOS3-wti^_BBS?0uFiMgz_RKu!s5GQlsv2SO$wHLp~ zr)}*BajXh@zkVKRb88Nf*Ye3R+8$Qsa}vN`(h;MZ-P4+=F08irV{)S*>MVWZ<%p&1 zlK;kKzaM@v-SkhI#D*WQv^r$sS;;rr?VUzcKm@bkuNTc>-jO|5m;%dQXXj*@`kt7u zmnCbn?iX3rhZk|oqbZ$>8Pb-SK6TtPK~>7`v(?+DG^z(|Y2@$_k;};Vnyat??KFj| zeu;xWFO;NZ@BKDFFY_r{R(rcL-OYzz`D}C*;j?@-Z~UBlEG7Qqb_m_q@!{}~9guiI zcB!Q|^au=%QstGmA)@bOFw6v?Dn7$!X!6VmE5w%|X2z_SIx)-F7EI|rAN$&TWj<)EDN-TALqT~L(ld`nen1n->WaQDDeyY%VurCO zm`$0}d%~IuV{$?m$!a?V)M8UKJ=BB-VR!{J+tS|j}wQ5L4OaQ)FWjL_3` z@*e#HTc9CPnOU1A&n#8?o3b9sHqnAas*J-(LH@wCnOlZTf0 zkvhVIR0q}ySZZn`65!$1%s8@W(Ni1q+O4!kzE3Vq-Z7*ZkR0@wc}GGRJTS@S;eeg4 z34ViJA$jni=T6G$<2!3e;=`;SxsUY^9kNM_U-_fQ8crgAy(>enUvB2-z-2v1s&{Eh zrmldOdmfS0%d2-lJDC@~c=_Khd-PsE+_5PX z)7CByA4hHl_SH?XNMrM1#h2X+5}8{2IRt_Uvm&s?h5b__%%sOz=O(%9ikWl29m~e! z{;-3nr4hMQ%E__*i*Bbj0vXImf_L3e<7{}|x@UJeV zzc=Vi$0%Jw!teBoI$P}wL#xNK8o_Sr?KIluI|mzE`4TOOTtf3N8ZY$hD=|XH$6#MV z*H31c)=#u=+?9BqtAm`*Dr+r8zZostdS1CT)3AhsveZ-3zw&B+Y?cpPvS}mId(1%) znL_+bzz>=;$5`v@pgK=jyU!P#DGSFE>Y;!Yeh_WyjP?jN|G;%qfX1|pwrth)VNqdd zvvp`UqnGDp4`=XBey-kYHWR_dLOwf*iSa`9pZR`i^YzI+{fw3pUh$g7)0=zHvn6Zv zIPsB^f1I|Uv!@ugaXRkkci&nYZR!ivz~b=QeBFJbWLrpxu>`517?oIawY!ePH5Eo$ z(ajF$7;!7-bbb?16!1^uL{`Ei(rLuJ>tatQ%$4)YUYj&KhlFyz^rl)J$N z#ZV$R8zp$G@D2kY-%_sJQO3<-BWFu;!9fUUt*DiD=0=6cy2x0&Eqhpp?bubsrgh1* zpTym!SCNAFMqZGaaX}vV3X^Z?TM=QY?T0d5OG0M#!s@Bb*rCOmw7=s)n#1DxqU>0Q zOOM396w$U~7EWsQZwmZIN#k#VC#2zt^RG;oCQ)98hqn?S3F%-9nUO)gG!dPh{RjU+ z$t|XYT>WCSJ&FN9;9HU>m6w|+88P0kmnEZtr15Gz2qFI}XlJgZdEqiisKHH_Htkc! zG%8T~AxByQlM|ylIkjT)d9*N#Wroq|@Xbx0dpUdd8+3C(sRiwKky9?R)rJB!g^mIx zJzBBi-aC#cr4p5$u(wdc)Y&Q8KrV~>G2O{P^avtcFyv5P8C2MdI#)#TFK?}0!nPiV z%e@zs?HACC;R=aK+t4D%EET?LAnLAlcN5Qc4ADDTTmqCQWaep+wbhCid{)pf8pL{m ze@5BE?6JH1wwUywKiBJ0`e7Ml(OWzX_4`2X4=)YH-_jPiw58H}b(Lgv24E<@|B1%lN!k}S+&j*RTH)s+MokhpqsUV1Sz8{ak=wp*4^ z2hoF|-+(t@C2Ln~YXeAe&fck(5^|c0Hchz#-Xp1}2gIkf2-cTUv#J`Ao!wybV$BVyQQnU*5r}kkpB}LdxpV)l&OPj;qILSo10CpzH{G`>jRw| zQaqvA7;NzE>RUgd5ZF;?XuCvhaC(69Wxt~`Eg~fE z<H0wNWunZc$3{+(89-kw>);-u0AM{4#n2Ov zuliUWUl(+|#mMlLKW~&2|8jXWjPr$B>3ei4_{f8f7|@oa_@SQOzTNcWr_1-Hu$7VWAhSxP$4^Yc#{ukaK0f?WhGyLdQ+#7h zvi_*i$`?iOKJv8A^9D>-i4<6$F+X(sk!c#D@8`_Z!J}2?KZD|B$LZCeUip**(~>DK zeR%AWtCB<-CL`slzeI=h34AjsWoTL z61m`aMRBMwmNg=PLn2)u&wG(2+Q z6_dozE!1j9n`h4dP=z_R%KnF=6GNzwbwFcy>807Q&f>ki`hAgUSnT&={Re)zuI@@Q z(MmHnsvxP@5)T&piXD#yzGnUwPbbTCU&XKqzbJ+5Pd9T*-xraRMjyKsUQZLf&cqrm zP{{V3B|RKyS2uH{oV~PkIDNQOt0qa(_Y;Qk<9*fihP1_MHl-AS@G=>UU%rj_ozF(^ zEM{SJKQJ;V>ZG5d?(9FD0Ffj|kLDO?PI^71Pu0gJPr_xU1{u5ggKqJ(`ElqV#bV0c z-P^`J>+9XSOXu;w)(iJz=@*T~x6NCZAv`B`hC{#ZQy47oqY(8P0&2v*+Ij6XA`Yz1 z9EO=!mbCL2Na4WxaGDXlTT`#VxGZRa0iuS54PE|`;;Zi@lc{?Ulr?j&* zv{F`^)IlIQGd|%m(>CnY-R6kpjP2RvZmZzIj;>SW9xDRnJkTAYb>Gpt%K7uJkl+P{LkWtp!<{rbLD z^5-q%9CthOe>g&y|KU(BgjI53J8yd|HbYvBMTbY*^AojtuvVOIJTAr#*bMq-S^~T~ zU~-WRLsKQ{xp)DLx&4UjjtC^fMLrh@dM%>PBTd{6DVcA@5wL;A@;RSAj|G@Z1Tv0C zy_k}bkAgNdQd1EG`qZ<$)_}EQolN#pnrKWK#vWDK{Y~+ndTXyf@lw#bXnOa@7_H`| zOZU;9NVVv(F8DK_PZhM1EufC%vyaR5WdcXLV!lbhRlrCtl%=7Tot^!5AnT7C`t3mG zo9e&%*&t3;B=d9RXRf~Q5rr>QoRZ?cm%Cwd?ST=+nDz(qC1pxsQJs?jAbpGw-;681 zTao@E4CUBlp4=bJ!E`?Tl3jfeCdb9L^^2KFo#Mvh5`Ia(gx(*#YYby&lo=nfE54TV z;Xe8Px@JuH-ndoCbkWq-G*6mF`NOoXe;FxWfbSyq&R&7zR(>l!Hu!Dj;C2{c6nNMq0{bOSCXT}wBx+d*vRbxk zqZG9ay29-$WvkNFrnzMjTr0?a&9^PTwbP%SVia=q&tHfm)@oQxAb@t}ER1Cgb)`*p zUco0!bv!uG7S}j6F;D_6LCQT7L4KI$63%=uH8$bLDxHvqr~6K&vTEh=B`JM{`X6V$ z9RGP+#D8XA+3k~!kdb;Ldg0jt%TV{heO+wUy(z%`+5wBBN2i9~F&;TaN{P1Z(tXMe zv>KfTNa6aFBJC>f zcEB9Hm}D-nwHiG)HGq=M#qL+DYisDT{_p`>7QZXlsY~OyR%EpIxJh8DN3qFue6mztI@hf??o)CQ~eny>f%S9 zatM5C=;2b6=y)#Im=df!3eeNS+Je|^jH5GuPM5A z0x|A05zty`Yu5-pl8j;>=GW!rPg;xDwtl8%xFPfW%<@QwXV@b%%}Rbyic_t%f#4Q$ zz@+OtX(@hnf(h#Ci6RgS7<*S-s3cZ6$e&lw%92ywl3{yG#QmspS=0w!!U@*EpSWA0hM%HAfS0 z(5Cbyl&_so{`W_J3R($k`n$sEdrG&<1uXVG4uGF#lfG)W<<|2GRC~#I0^}Afq?`PL zJD!cq!CmO+A>IiqH~l8deSi^>tL5U~BYB*zi%4#nLD|=)2+<=}GpiNo`8qtiykfD| zXi?FtdAjgT%4f#i`Dm!)UGd(Pi_gY7OH5wgD83d? zOX>9zs#eLIZ!io*DW&>RxL-oPy=!Cwt`De8tt`Ex#AbO zoEr<1j<1gvqSbBX>bkWldlF_QtT1U(msNP~yE2sYtF2GISuu1^k{h1ZH}QYbWMmgt zo=%bmdO7_5O7$7K0?fK?7uD-B6uvZVwk|U+Fx`e^msHw;l-770*Ni>qM!M=-BNlH^ zwOqlatNyvBsC-U*!?^?2si&K!#z||#nQhC-N-r5%=7^a-t*>GEXq!25X;!Q+zzE%RBl&t?l#HS`YP)At|91s0L1WcgOg|H0*Ap-@atO zVP*Nw|FMPahv`l)I$8sl(}*Qtp{~1!rA|WsH%r@qQf@+c`UELs#;Iz3?!5D9>@HGP z;piO>IVSq>@y5PLG@NJ?(;*e!9j!R`y{Yt&NYCn|diAlDK&i_*bJFKDsF`GME%@$z zQ&}x*ulDirSxWvZ$C0TyxtWh~Djl@6sd~}kVH0*AZ$+l#SM}+{yB6fuQvVCa)zN8Jw6hGa4-x0!p67 z53sCDN@XoATn@Emt_nkZDoWc=-;85A@4RS9*QO+@MJ%SXfZa)Ec?_L-UUMC735|R| zd>lP5)*{zap0p?AJ}#NfflsBjdi@kUPkytg4z7I};h7khS`VXLm)uJpsZY*44Koa}nkX4ElZ$x*# zij}UpUdE~iA=t7!x9QPgipA4?vP)R9RvTWJ4Wd%hXGC@=DK+H1#4;Fnm} zX+wlB_B-}+`dCAR%us1Xd_hR|3rLg-25N`=;6(H!WYl(dOQ=pfF$(m2|Eg@fDTc?y zyg$Jx`ET9gCfD~ZlgJgdeI8Lgv@}P;8@=pzIh&V%+PV8*;m(@U#QCDmL+UR!cp+^K zGMId_yPc$Bg44b-=E$Exh(Z{fRQW`QtPHPjw{0f%L1IzRB8ln`mrR6RcU?>pB+rWT zQ#chVdY+KO76+GUXA?{mkj6Y@ftDu0KiUO*OrE+(t|BMOIUhsoVd{#jXi5(A_a$#0;8vKR}wQ8ii_W7{A zM#2uvYZK7z4;ZcYwKaj&96Knu&Na z=)yr%46rq<%CvQjQ}X1Lt)8=Io&L>UAo4WCM@mbNpEto)D>QR~ja$-Un)mYyXmvIF zq`$}fa|MNM1<)H}`)pwM=YYod=7xwF)(9V}6(xO|z3+>|D4KT1l zT| ztS^R|I{mDKPsxi6wW;2i~BB~_n$pTBd9wV=gYv)92A ztJBC?Yj4jT?BFla=Bu#vix_sYx1R`44By)wfk$Atdzef`(_NbBtmVZkEg`mMPIjwI z*Ji#28NSh`)@Twk^=*odsZHDK7pU?s&k4rHi9(EAHRo|q*xxOg_BZ_ntwxGJc(L6b zFz-@j7b?#nz4GfMFT-JOm->~wF!YQvc(M2W1hWco$Xt2QO}?v@*L*r{M7yCT@goAd z+~|>E_K_Niie3E*sIBC9<<{#95Ww#?$oi5}+KgI9KQ33Enozgf z0ey7f{dN$w!0&Q8*GVMBMKtHN6{FQXkAKCa|3+|f3;(=4o0P;}A*rtTf=u(;^E{FM zf1lZqx~Kd;Qr0C4UsmJDgOQ z=*F1|=eNx=d(v$CvD11LVrDJQB%o7S1qm)cFmNF&nG^8HpbzL~sxeC!R#t1ZEScII zJ)pAkOZJ1fpPYkY8D6=MSX=X_JiV@OhCS=`Lr2m&>}V5}l;sv@%&|s% z?K4CU5OSID4@?>nm6&jXZyy2yJ%ke>(zG*R-Gp4T*V_Ai#W{T*Ni*C%CiP4kswJyZ zT=gMW=htO3EtN^l44&>Tp8iV~>)+?;gY=Zd4Q$2W#_x2d093C#F zRtS&AJDg{_0N4U4vqD2!^6|HIlvUjDMC$J-l_~2Mo^!UMNq%3e>r=Op(z)@f*z}2H z`df{gY{05R_q)UxZLT%zGf=GTj{dWO*(UY4X!v&VR3IDFQ~g;LJmvZRAyb3{H3EL# zxu|*(6IWI!oX76l+A?e&H$Z64{QEA|XiwUBtU6R(LV(FW7$&QX`VS}VxeqSrX;g`2 z#rozi%7tC!>H5kf;RKq?htR<}ij$MT4CAMno=(45RIG~s=s9a*$3-S(M#P@ZPKwHx zDACLK_ypv?>PJ<`xB4JL8DxW}g{jo%T8(W5x(aKQ9?^jNFfw|)rH@q9liURjGtItz zzODCiY~cZjnb{yG%M3Kli!K_y$Kv%zLJMJ-?2jHM>i2SXTWo!paG0!TOV%kd>qLH0 zV>Y+a^ntcAV)?!U3uY$w{%!d9ueu{J!Z4sS?RZIo|s48yKO~ z%C;09WF5$Gq?ZI3|3SyLPeg@$8SP%$lIanYNKD`IN?a>z)jxJ((}AGu@I`_Dn!d4w zy7}s1Q@Fg>VIk3aD7MrLuVKop30)0FjmI>|?CVRH^s+!rh86h1PffZN11si{S(onEJ? zV#ZuK4J7d~9=n@KT?~;ra5e2pT`1Qstt~854Qo;nM<~)omWbgC8Y})eqQ*|^NuztE z*CZ;zeGLfr50(KI zaDuNg4@}7k;tlbE=L$o>np81qCx@;XE{TTeB!z(99od^4O5=b)VBlBW_o=kAGWzWcso*lt$Ev1ixy5|IzCi`$xuYym>(*)--#>lwRmrs8}Zu7qLXXw3;< zm>RQMaf(6Q%+w4NvzeJcv2Z>nbox)uFOE;+_)`3iMJOU^`sg(ph`Mr>B5n7g)2=5C zBpIvAYdvZ>Gzya44j_-TNYR}6#`IpQI>w|aO+vl6h;&2DM*Ry%j0Ym{;9zun5S$}$r=$fngpMuh!`gDSEJa|(*wqyr^vVemtFd;fk!GxzP^{V{&hZ#M!GO|ex~s*roU z2d5tza6l^YNcI6531Sx)3wSywW%RK=`AN|NE+_#Ke zx{8mucm*aHP8{D1&pchMPH*p#gc zE3+Dgvft75dd9=?`0Ga-sb?H% z_?6ZT&tscajoegyX35Q4GWsYIjt)fAp0t8IF|CEIq)moq-~w5V{)E}QhIqMSo%Jiv zy6E=c7aI9|kxK{mX7?K*d)za`J>!L2l95}r)0m*%MmNi`fx?p;O>P5`()WwDL0XOh zh|h~sR`DVNEyp6+cq^RFQVafjM~dZKqz{j7=3+S7$(f2>)&loW;Mk3s#1w2!6o;-7lyn0iZFt9(OAj^ z!g*sZ@H`$CX46bFU@$_UHB$~#fY*`+q zT2JGeDjj2bz3Kx7{Eqn|=LLTooVZlHh!HP)+hLuzw%^c&;|{7v7_Ek=9}Ls7wdG2) zW?Nu4NjHo|j`#yPyTK|H^&`3m0m>?PC0=DIj8!nc1|OtrRbmSP%lq|3bP;wC7J?ut50h(16NisAw)kvTbPYL178DDfM_Q73I z`JnSV0`NzD_=s61#TKj~Jl8C4(AFGX;NKQJjG{``(#kdSPiiq#s`1k47P6k>qY%p@ zN<8p5SbIWI@(QKODwXsE?fNO(H*eSDiN%ivVdu39^Ce&d3?D1|5_C|@ii#+U=SpMs z-NFSyl?{XObk4tO2?oIT;19qa9C$$%h>36HYquM{EVBhvbvj-gWX|VxC<;S8SZ^T|kot_2Q=dEFO4wds`ov7(l6ZPOfB!L^=f(UO&*Qc5;O3oJ5 zmKngpJdtSvjTg<$sqU2xF@X#3*IE-V;|04(zMIyI-TC|~(l>W50a z$nS-BGVhGDm$t8^+0R>L2Mqpv>856UqHhQ9umv6;+5G?+yve0T!4Cg$Lqp3pNL ztr%yptM2X{(B=a5dD)cFA6FV#6Gl<-n|L+`XsS;&0zw7sw0pj~X>A5(fPtbp;8|-C z{b&%%O>+-=0+S*FyzziwKp(@D!%`L|H!tt5xD|kzgX4v-OORa2-HdK3FP*LaY;NDA z@j%<#2jrU3Gab$7qpx|xN#&w5(-gG<&(#2wE=`C!&@1yfq~I`q=(aeQ&33@|N*9Q?s+L zUA8`C`7%l`>ptclt+^K4Z`w}wf#tEmF;Q!xsJDeF9wT>9wlVj4w_qlc9TF;U9+Cux zpO(UY#`;=vr_At*xh#&0sm*=~KP^r2VBai1ZBX2BzcA@DSPl8wLlCdKe0P4<+1DJg zc2Mxb`^}Kf6gqgX3DX!3(q1F8Jx8M<@#{yKfp$? z1@R#79}iM%&H>_rI{E0as(QVDr7Y)36nX zgQvbmJ@UA%Hl}RM_R>GFA|K=8b#;+p5$j;c#wP{AOS?-$iv%`}&0;eeO8!(B0$ntnV}F$|l0wBDXQT4i97orn^R9~aX76zH60LWDS-L<=oXV>mjxm(RU)^hs z(<1_{?p+6p4zE?eGarQK`BN=h-5NTz@C?^-Sp&7C_~L%@%078udQU#BZvHeQRnZOU zme%&bv(l&76JlmMTtoQCtC97Zq$FkrmN)GaN;Do=*Wr%(D@uFeYe9@~kZ+8;X`{h1 zCNN;-(rwPpmhX1I^<)U~co55gGc)cUm+`JB@b&|SV`T?$h=iyBPFpIXAZ~*z%UHmg zOiMB(Afp%q5|QM}pwr@USX^RVgK6+BghhqtM08e3-v9L;Fd|wt5kGS|M6GEo_%`h& z9^Sbo9@~Ds7(b{~>zMfQEs!DfW6oJ-W*DI=-F*MJSP(~JQSAaMs?B<7if4hjj-|xe z$-?KcS8^PT=?sc9ooO6WzhXJssaociM@VOosB*i}>T-zDZ_NIgTGD&5^ph<-`GwaQ z0YLemi{Q!m^x}cQ!OZ%ofC6{0sf63A7wmc<+d1yf(XC3!1&6!A6;#+3+i0r6A0 zjmYrZ0z@+G-9X;3BJjy4h{pqFiHQ0OAO{orO}s-NdDZ_;Z`0CJ^HqQ@UAq`at}r~t z9rY*va*fzMp%@RAaDaWv@FqmL+m=x5!P(Z|Z`+d){3hvx|7xmr5~m@-!-8X8mkP)q z^lCVtNcovMU}mzn9xCT_hSaJ#iXr4GrA@1q$%bzW;h9H%59o}56X9vpv5Msi>=zne zEpg2jNu?aw2$kQ-`nMae>!OQAiy*9JoxOaR)yhpZkhTVU!A$G$kHV?yfGD#e( zpRU5BS{w~M4@U<#yiFP_VWeq%ITEvJ=+WcO(=^wfkttc1t5N1z%%81I7I;If)=hME z#WFB-$KP|;UrSzh)yvc$fjH2<4E;+dvHK?zdpUyYBkjD&aSr=cd+SBNNfTv&zSK*K zJN?Nl6Bt%4!iy~;2#&Oz9bDlzZb>euCxK=0tO$p=;cAJj0P^vX8Pb}2M@my0gA4nI zE2vH$lIF9@27Y&IQX{eT!<%S(U6j+uSz=Go=y@}q7SS?W|4aQ$anbWuT%Zb|O8oqG ztoP~)U#3)ou_(%*W|hlUR}A!RhS0U8siyH{KOa%b#hev8V_{ew);@!tF@U?B2dB~F zYL&+O$=bf}`|;_gsO7X3@q0<8_^Jh^OLLlQa%hPEt*PtW@$Z;YPB7#1c7l-Ehezr( z>Gqx8t*|D@Y0U+t%2>52HGjx-1{|LyV9I*UX+1=G<;qRIt=ijsOF(F9W8O#kUOu(t z%INsAUSD%{4e~we%2c2IOv-upogKFKPu(wBTauok!}pSXEU%%5w^)Z6On4uqq%>iX zGg_u!Wc|0K#?PpKA=2z`e-wd$W$@1+Hi$MDdIDvGL+G`HTHv1pC-6_UyTJkQd;$nA zOrSnbYuLS<45FsxES}L_yy`)GEyruKvF2Q^i=JW?N!4T&(4t*SG_F9^XxGnaf&Wq` zdQ)ezf`|g^yfoYJIi7mvWrxsf25sfYP&M}HDectyC)pB;u^dz3N1(+` zQ$WKtkET6!)rSD2$?r`DyA44NnhEOebmCsF=kQspIUZ6zau^~vYlnutf}HMu?GqBJ zMLp6+T~}kN)(68%ZQq87jg@0Vt04o=0I+9pWe~vw&gTL8ND4?pK~|)78X%7!pkXgz zfT0IOHYWuR-v6XW{|mqVA5=Cj9?l~QN_?uvtj~lM38)Qd9B3aP*iRmi*|>PPf6Ppj znpWP+k;s1~dGd$~bYDieeCJ)j|JZ_gJE2_w+xNFrP*dTk4%EW`?dnzE{OOxv=&kB= zcJfw=!J(VYNsgSNVfF&=|8T0y6Yw3TF%c_HQL&V&?prxvOvKhg|6mLuiMH~)Cq_*a zuD|{jX0TFxOoyN$CWn@WCOE4|nS>59jgI=faWneSg{cDYCgP zZ?^VN2ZlQ-ORl$WtrigkAF1nJl?~l|B6k}84`=(sbN6>>`X#$!+z(VFvhG{KzXtAE zefR$9P6ge6C$FOMowVur!?1L+wP)J+u>gPPv(%35PUHE1I2pp;?n)98tuh?Q(E-l? zaP*?DkLO8M$UBnHnNqiZ+iOVuv+Db~~C zHK|4OTX5O4?KW0k{Am9ZeA-zFF8K6riqdKS!^an;+1p#Ybr<;mSos>-EZ8%6@Ox4w zciPQTP;UN%)jb!x_W4J`JnAmE`vIeianpzg9E<;owsY_WU}%HYy;N-P(DI z^S;`GT{Uxzs3+b&Hzaw_9nrAk34c;tuvAzU2H5 zFMlzqfv&I4TLaq)!it0X?v(ai(xfZ;dW@%FDTPy|#DFV9B5{^_mqtsL zgP%f{D|fgue8EhA(@V-jkL55`TmtK;#l@>5v3__1`yjQz@bAu- z*VW?f;i)1zAD&lI(z(cO`#ooprA^-axoNC6QrlXTZx!+RT4LahOu-&V0gE6kDpt(b z{U)G0(@hLBwg%flRKf^tiWk|9N8RYuyGv{cQ?9d04E)9E^!eerA;$Bj4(AI@G$qvMX{tN4_7|1%BSKE@(bKWyl|9_(BoybgsczlC+f&FswY*GGw5ltd; z?_F%bM9ZhXFXRyCx|Z3s?qu1c|@O|2bD`%5GAa`)YtH+M~k^Ug&a z>+mg2$3yqK|0a(HrlIfiUQ+AJt}mab>4pA=zUl1ZlMuYbaB9ua0G8{6*9^=gfmVOz zChKB8c^%d~#i7ab1MwN0^K2DcjP+(*HHD_wN%wNL-(JINd^%op_YGft9XZ&jN5C+X zJ^$O3|8H`4ZNB!)>IkY2Bj??LtBafkp9F}9d_HMTa@`!^!vzVQ!k0}uFBKGd^Ho+q z2R+(>&v!LFS@pPMWC%PeN&6eUD81z5HOQKX1(H`VHp7Wp`i6oq7jpvms}-Z|t?%db z`_BCgkpoQHm#q0^A9Sg!!AdGBHLagLLc+aJYc)-CP4iD?PR#VODL zS)~bF`w$%89;J%rRh1nw;_ip|9LLTta2fcQkYCB(DKgbrNw(^B6dqH*8>r!`o8g*7#3i#p;Vi4gp zX|K_?qCCyt&`7Ztg4MyME?6cSQ|hdH65aS&N`Ba>N}T?x+qOgf4`4n$f&+-rKVQ<~>Sc`roOp6; z&dEM8$yX$)o}Sk)iMouhd9^=N+mi6{d=<0u4+eA}GX|xt!D?>e%Q3YCH?Z^fM}J9M zRr9h|wfv8}Plj*&Ql)TB(G^9Z?}uJhTq~M))B-w== z*Bij#I-X5D?g3q2kUvvk{MKb6^hvWhO_p^Kpeo= z{{T#e#BdeGQmM}B!sd%tM!#1K{{iF(Mstob5(+B)8$Q!`z1t-@%gi8o`=ZdWh^jH7 z08+IDwi+HHHQ3uQPVLHApj)u;i_ZD!isR!RfY;PfxuAoPqxpjiZ_uPppf+Ugs=Dwy zX^*uX=A+XNd!>MDFTZx1dE7ZOeCAxH!)-`0zbB-kOMb!39h@sA=zD242Gm<<>sgj~ zVlyVu>c*rZa&2=L-b-o~{etix031m0Ah8a<{&4;w*`wfT%;5g4f|X+D`uB*usEofV zHPQj=f58o^Pn37 z9k%nKfu|38-*!Szm~tJrLfwm9B5%bHF?^Y@BJfTTW3&`85sdk=@4BP1zIcZ{YjXqc z4P&wN3m-qcIluFK>YO+mUoEj)T3zzzm&{{AS5v+dmR-J{`!0f`Fh!p`Lm5)uMxRf9 zjLuaV06ZM{p4lz|NAhS2pDqn(V8(<+-Q%9f!Ui{-f(EA3U~THe{K$O3{?`}Uk|JsK zcU92^bgGu!el>170x6q5?Z4;aa2HAh*vkQ>M?bynM^<;4Hr}t6zK8m!sE%fR3A(Wx7Ofe z#x!b9V_S~oydY`LZ@y(AvTw)7tr7|drXozrO?Xw!kw`V}jb%&*k@H-6+~&E~-ZwwiJ1X4TBeRE0)KKWiW{)dUTpo- zbGzj|Wm{ud0pdoGm)d#By}OHWzT{YnUFFK))B_@nsM{{!K|UsS zpKTC6u#YnS#d|HD6d7Y*Lk)pil@3-IhJT)&QLi1Hh$0uf9n%*?a>pdL|s(Xo$ol6v{SHM+ouN7*j5t7d! zru{hB^gTdO(651Cyld_SbHWseH;(IGbidfrJmRX+L~_jaN67l0k*Y*fB#J#6n-%BC zHo-~c{0Ey%mFNVI*rtMr_fveZ6wD#}3LChgnwS1Nry^SBirp_p_Wc$|L`eZxsH|egzWuuZETs7SEs7Fl0@o?B{=-}yQuHMK&xPj? zX65POSYi?{8BtfYJ4bj6%Xtamz z&{A#gH^1k6eifpcLq6X13cr8xm-`8QV?TKzFsjyK2+NsSTs624F1cn(eQdI2qS>DicEmHSw)AHm=@cMN%J?4bS&9a$=8$n zto4A6w^svc$Xrby`C4O2V7&stti$o38Nt(&or}G}7)-Ui$m3ZDCq|$AuM7)gm3xWS z2SXD}2*+4Tx_`KEBjgu)>3om#(&b`2#_nHNkG zXByjz z=|88-XYK+QPvG+=ZdqX6Y)k zrdI)Pu*Jf~sqF8w4u~~liyYt#P7YVhzn{ohH(MF`sDwn(Z&N2I=yxA6_5b{p>EPi7 zMUL+Jp30<_szwVwggtGz-8wBu~7*b|p zgQw2Uem-5_jz^?|!%nFhhh?m7Bc`WQJ9iNBF3pO+_w5Q(ce)Q)6^&6R`w(j-LCE^% zW%Gt@Pk+!@Q$7O7I-r^;iW}uAIJzmwE`Kxn~_r*Bx1=Q?_jEvs-`g`*-D^QD)2XmW}nI=a50v_&COC0|k7F z@BY*J@&&U?R?SYPcN8&+P+`2qnd}CU&affO&Bpjm&-<4_F5{I&dTj9`+av{*4=BtB zMbzR_e0cX-etfG0o5vKP(nrm!fIBZLE7+Y}TCa!8Ygpwvz2bgBkuo)sj+2fWj8g!C z6iC~-`j_|^f4OPVKWeiI*;K%FXY~2@m?>+EAtZ{}Pv(ul1-;CN7mr-ng>yND&Saww z{sGh@5UdgW{u5uCW~u$$4WX!hT*F!*YtHH{#=;Hu(PF!4TcEnSEH2O0=!MYWco62} z5I|`HTQB?7)hZmBpqIb9t)V23MKcIEIPeL(qlQ}(7skx*GeFnXkPY46uS*qhm;01K zlpg-FePwyGbI^DiZg{GKcmwVX&2Z;WvQAJHcQib6;ft}Chg*Wg*+6tWsPsR77vxX& zr@P5?HI@Q7-vry;6}#sdxCOEepO5t3(nvR}57o!8lnxwh=tK;mCsaM{?%AqP^a-g3 z)gT2icHp;VH*9x13-$RgjkSj6U&VXcFS6_uXgJl8>Z}N|eu9u3C#UZW3~>U@t#hZ6 zwc=$$OZWX76)~g07%}B37MIw4kODZO{n0npdv;?e52#wh1kM`4dtA^AneY6qTkT5s zfoAS|Aj#8x)BppvF}O&-UI`n>dfNK=$&-s(OdTJG#lD8~OI_#iBIK32mc4yS^F0nR zt~x74Qu{qSXLX5@9uGVVzR-Zk@Y&8=8$PQ9|{0~m2p-N)~9 z0*uDLT6{mu(6%8~%HB~dFdaCE^%RuF3|+4u1lw@2k<&>8&FjqT6oHNhKhD0}rgC?CqD3Kg{;Kt@!O zFoZ>tKm?^7TW6S#Go|QEg)8yrN0Wax6y0R*d+DFn1^fo#5dpYef%s*u()YGFCVI)w zD*h;@T07Zd6|zcAjaYU|rG3x~O%9D_RfvM3AU1snY#>`JM7Xiu^Q*-zgQS4H(VEI? z+~=QRSYFk^r$lBm9zUM~5rTag!9Z3h7=T|Mms|Bz{jg2}tGY;EpX1##IC*(YKoP$D ztb||;xg^f0(I!O{rH-=TrUDi&NC~?d==ps(r2NFc;jC#7KD}s3JuA89d-^iZ4g(!p zI9arBk{H`1D8DJ-QV8}SWSUowYmr25CQ9a%#jTvRT(r^@)-fcJhfQypyYxRBS_?8| zy?u*Wi6j`={`f02nB1kN?xV)N*=1C2B&91b4EiQKnqFd~BIj~lRh$7EvJHE!fU5%_ z!WEP^zkuA3@cIef!W7FKbRB%?3G1yB#gmh~CI^zyuosVqca0Qs;(1Y7VG0ow2!@ug z^LKe!EA}&$eKwE-FMLMNMztv&-W{Bc@SRzQLG`~N6#y&o!qmi3SZCIkk#Gd8*0Unn zq?WLO#>4t@vac^&a^ZQY#O~Z~rM2%P$0ia6P2|9)y|wkOig3F^wli%hMS0OJKAAFc4XZz1eGdW#PcyKSr*rGNVeIa@p@G%H zRQ1Zd>*djeEu+7&DJmD=A!=kcC?{1ZjokWg#Jf+-#G-R>l^CNpi;binY z&f^?PV&VMzZ!{6$AyaL*M~C?cCNZ0Fi)E1|)?`M&KYlj_Y~3@?pQa*_M%{cvxQn4JLolIzj2w{4l&A{-bCkc>nwoij0RsG5$b+iL3IT=_v1#|Rl5U>?_Z=WJCobc;XJbDw5a32^+x)uP2L; zepz68BkPsBf}vR;Vz?$IdP!-fjj0&Mii`$WPpW-fqT-OCzz^PUc&@H@O3yxts(YL~zWsZ;pN5~=H`nH2 z?UWA#-V~9V$_VUK1$+_KcyZtuT-c{rgi5H}#$1A&JaA=mvf)Zd zJ;Aibc4zh&#VTe-K7V=w&aZgtpT#sh)hK?N<8)uRFY;ZTuG!i=Ql{S)QD^VR#m43; zwEc`rF!{rj7d`&z6nlA5nR~XatYmTkpUjd3h=qztuym`213A?aY0NBfaWqBPl1Zka zIFYCvJ;#V3Q@Owj{aOQud0`U>%(L;Jj=?wZYX4YBsd8}(joxD zU&us7XQLp#JQum*CY|}^1Y$pQZXtbff}hQu31jK|a*}DuBLNV+xTNP`B?e4AN|Pqx zhKHV{*igm`Ce1YP#8u#;?a)S^BMnPeAm(W7r-Uh)t49hqXuj#R(vHAyHdb^q#hA{= zd7HjaYg_s}hCpjozv>eaUNpamHI?Z&nhfkJ1CnQb<2bggL?R(SD6iwDTsY~UkYCdN0LXr7)v7gK4arEcMZK5fv=_Q9zjea$vxtz7f5 z;W^2?h*}XS2?xPG@zw?ab7mhP0Re>;DwC7@2KxZYI=_^8tDECZFPJgTCP)F|Y?mP> z`t7eM3LpbgM(|WrSMcL1iLYtoVK|jS^1872ftvX565`W+eQtiORBA3OM}0vN0!c8 zjmzS3LaA{nIBW*}#Yn3kZSUv6?nXaFrLV8-oJ3w|4++C35i&^|_+scqs3`q}itQ#T z-gF-rMir7HkZe6M;buCO3=#hOjm1@}Pj_knTUf39jrwqTcgsN-c_oy@W|~d68aR|K z&C997s*5!|=HJjEcKRz0KZ~Lo*x=wps+v(<4FZzP>gwOa1@osA-ySAsaAr>*1as<@S{vanOZ&oFzfH1#EpJ46 z*Q*eM=XgaM>m7p15C8ziTfljmNCSc>(9g0(;`!~i=dMF}DCp=+)LC!bq=s8WPitJ= zRtFM(c*LETXBF*TQdQ(&PTPU;4igvGyw@8$cLvEae~6Q@7c!G$+>RT+&9ZA!Bc}(_d3gD7^~h2qkn9SyI1I-Z{%d{} z>*8V~AIpI;A`a|yu0do1ycq=C`?eC+0piDXVPW(OmSuKi?R1kb5|g(-StSXXxEr+u zeX`GkI`@}o<=^(gQR6nWiBG|E&?FGgd2FwPJ-1;7PJM`1eWYGJ>*bPXq5Uq#hQesc zq39NdDGNR=9Bi)pHzZS3rZ=y!==aZ2GXf*$NTQax{_Vi*eZS+*MeU%k!r+){nTEfi zDT|NvvQ*Gh_@Wx-*!7A=92~G(0)ntET6mRdq$nZGtnqQj!0=}^#_Z5MTo!q~*%o)w z?$e*^e*6E?kWMO6m*8yF>n1CK5e%&?8o)~vXV~Ribx=ev>G;F)wKIy|PRpSuk(A*1Y}ML7EhYF_L?hr?<)<^+L?u zV|g94=ldRcK!-fcs!H&WTTqc;1VfwcvuUg&n~0-kcWICKjD@dTR)Y>4Gqyu5?q)dt z8y=@28yG(Nu9v6%f^_d*nplm~_aC4$NaYgG?=0|>Yx{O*ZRJ$|(}BCZyKH5rkm^0N z+e4K0velHaA^pTQl*(N78MF#owA^P*#C2s>4<%@bcF&Nu9V+4i^dIbccgRY8m9z+U zms;khx$Au#zSczv@2FpPKmL7KXO*%=I9wMeF!?V5$vg?(-f4D@o9`R*Q z(I(UxdTSYw`sCS;Pw1N1Hulp1dU75+7n7XgHY`0RC6rjuE3<&@T*x>*8If|uSjr<9 z4RN?t&fZ_`a`f%*`Ig#lzrsj7jiHiZOe(@R&>cNF^-sS5JBV*8<+#r2*kezX`K1T! z!JJ01zy`hS)0)GXer{r9L<~SB!Hp;RNJ+eNO8z&@q9Aa6^=mPs2sWPU-_Yj)Nv{qU z>i!EVXrH$Js&TUj{)t&;p&UsPC9#-zQOeHiALrLqBd$DJ?V0C+S3YZm{TalME~=n@ zGCxRnJ@0q0`RN(wV!&Sw9bf8o3}*lf#|6`r;qT4Gpa6j?$S_@Ne=Uu6yE3Ttp3#2b?pa<4G-P!OGwUwskKPWT5fOu-Em@S>i`CwG5=j6Z9zL&nK~)sj2($K3+ym;d5&wQB{vK>Luf z90j(MXGl>sNC{c}18|3AMMl4LIAn=U-_`=X{L%a}wb40|iz^f+9{9{#kg9$n-955( zRrM?{`eIscu`v~spcF0CU~`>@Gk%%e|65LO$oxvV7DP|V$@K}=177e;8#b8L)ZVwd`bFIp1sTDX4de7mxl~9kEnzJ(o%KY=Esy9&_!gYn z7V%ZC6qDy-gUDhcBPOnBYfDIEZA`8IO1Ev$Tx+bm(QYEIorpqUA*c#$?1f(YUhY~X z;%wP4V|(XTQRGHFbiBf6?Zj)(`?1{E#_M2y8VThc^qm-+dBuT@IrmODyXFu={@aE(Y>A?1?(q}ydx<&QFyq0#F;?)ASqCZ2N zTmdRMPFYlPqr>LOHH_s9{+gH+EsTslf1mqd^Ha+}V=Ue+keF&o{Cs02+&{3iN{8lc zWBZ%wjaZ*EBk5}wcNNNI)i_J?FRA_xE(4&4l&?#{_G7XpW-`}UG#F=v*|`# zC#IwwK-Q|zENLklW?LT(pgT^jsUJ7*XrrC62tvZ-GxTv;I;IGGm9bZ%LFZ}~#P)S5D)6?>>kJWh0n4u9J@)nN9h2d0PRLn;v z61H3{&v*U-ri(_p^M7oG7GgJ|*lir0h`FG#Te&CLzaa&E)y5Bdh-uC`?XzwPMi?Lv zuKhraPcHu~2UnI#4f!1mL7p`c>hG!H(HSXM51NpOe2W&G^wBfy(3MZfko4=iJrcyM zX2(m+kj2w3I?;cg!bXsp6a%N9+WgQIKs^_>@Y3TjBWd3q0PL9_aU`6C)^%WDle;8M zzA##E|GhH^<*8ppr-+T@WSt)+tXG6(sr=E0qQag&^2_gE^y0WfxCG|wNgsZ5=^TXk1qLip{~K5)Ut!fyY^}6b&MBW5iNucXkvvU-JQ$ zgjD@1cqk1@=HSLg>j zGQvI}ZwjTON374cq-SjJS-)Q-w!a{a<&lED$w$;JnzK*NosdY%;3eIyR}pqZ(LWtM z|C{sko}<|q=wBNIqG-@5uGKZssZp1i))|ir-~|mvdvEozdV7Yl{-w-Cm-E7* zpaat9qX)^-p{`=CO&g>bLmTjI@91{@Ed35+}xR0TYIK^ z1mhFj$Un~wn7pZPvGdO2=ro&Qi`6$C>~Flcr%=Lt9U3>PbHCZoWm*=Y!q=?sy_R_9Gj)%5}$CJM(Et!waAYArJtoDalsQ zeRl28%O*V&+}6q`Osm(DmTqLuh$K0=)gE+d9Nqu>A^m$@o=ID7C^)p7oUn&MDHAnj zURC(ggeE(lGHd=}Gbew@rlr88E)+d+r`oU0zF!;1%WI%ATkBwTZUz4Q*r4+04Mzl) zJdF-^3w~TTcK4I5+kuQpffi5W~*CbO24+VX%p#i%xAjlBYfExeE=O^z=<^%8r;m?FU**!kuFitMbmQ*yeSu};J}q|D6+c#Y=j6rdB;lTgalx9lR3<@w30lQi69{6qQK?v zsznBy=|j95Uq{HLceCQ>j|rt^UH29Bm(QZT^UWpJIIAarzzS6!d6zUa z22>I}K3BLH$%&qCqEzH_Lm%k%(dzBrVQPwujf{8|^poRg^h%vOx8VH7ntsn?#S^>aj8Tth-UY%dYi#^m(kKNM z9gz;xJ_!E|xa`ckKcz-< zuJY4Pv(Wccg&}5%ZjCJ69Qc&Ph8Bo;Y?gp~3?;OMygt@v_4W0Qf);clEm8sUd#}zS zxxi3?h>oSsU+c@cwJy$9rT+l?=F_*C%Bh++^RE8DLKUR;Bc)2#D(czS-EVjT>zHB` z9Nrh}7#Tg1oVhP~Py3t7AsD@noUW}gwu89BdOm9L<~csw z*x15>LB{iU{p`7J40ue!#r4TBgHMNV8zTy#$Ps&D4I?lKW$qXEsPXZYzgF!ZGCFr!8C3EYEQo0mQfLBxT)}G zx|y!VB%8V(SklBq>rJIOWX5Lkv*u4#<;U{GGO`q0AkrVmEI4$b%)-RVqcEvIypS>{ zuJ0-zWLMjpiTQ`)tZ<>cb%vu$3f80;BifQ&JxHBS+R(A5nD$(UwbsS9*3Xg6-nnc6 zyi?y8q5D#Ml36E2d*KDNqVn;rPMsNaens3uo?3Ql;3B@?{v8_9m(laH4R42Hw25Ni zm{7-+Sl(F5!NkgSEjg3Aamz2VYKMkenyXN%^T~D6MPo$_}}Kv zTE9)Czl9AT$H`obryPQTbJQqYZZ@G)tOqJspR-SC8z`mJ`&{@F_`3%7xZ?Q1hGCmP zFW&RYVX3c4gWOm?&98`%eqekgoskK%@%tI6CR-$W&%&xH1uDG>ri=BJQ2jZY-p_lT1M zDvDH8II^)LiNMXMEyeGLsK-{Q34x`>l1JecD#<)w@eLCdB{iVXuU~Wmt0nkRqK_)u z$W-@}{sFpr#$EWJq*=9YK}uAB_nio*me#FG`ezjbJ~w(h_s3d@)|^@~+*#ck!bMC1 z{e-M8@v(MhYLiHv8+cl#Qfjxlnuwt!x$1CV+~Rqsin`21jh?5m(bNm-=`Q(?Dw_K4@6}1AKf%Mie3h#MAaz+Gze9ar{-xt1$bWe73%K z`KVk*$#{+q(mN`f#*$Ve?1{S{5?z1nP-Zb%x`q!@6ksfbWJ{INcFMS0I3ck`(C-yp z@O=Ex;m?kt)Qi_PA3o}OQsE?Ty-G(bVVqE333#{F?TlDrRY2y9pX;lZ9Ak5rp7_}v z`6)HYT`u#4&8?#D#z0Pg*rf$h-y`lX0TP9 zxeG6L$I26ckyg$Ui3(NDD0ch?NVdI%n)A>>py^|`Qcx+ z)#q-3#z;=KV`Lc3jBOHs&4&A`x=EhBa3YcmVTihw_7^J^5f4ew-17N|U((|0WS4V6 z)h$=?QoVV_b*)XKQ`kXVrbD#db4#l{76$#T(7$B8j4eXOPRo7m3#vI2N-Xk@)*-C{hHek&(d9cyLDCw zioml%u~}#$iZmW++_~2$i!TGYr_y0xr9U}0ZYkyu9^EGSrOO=dH17FTxC+;(TM+?F z@__gijya^N(mBspnP`u=){+nXJjT{KB{h@uYsXj4sSiM%Ixf&BhKDo2an6XiNR2vr zhklYSHP_`}v1;(oo(CNROuG3Z>6nX-lkLhC@2=*pNuGwHfmCX386lC^$7P2uMX{Rl zmCQQbjUV;f-Moj%;!An`#HjbRyo591gJ{WWgP~bGzqx0Vjq^W2w7dr(zQujHsKzw2 z+LB)W1@I3?7auLsxTC!?8kr^YpWPB087M#NI-<`tT8czNrkOg}Co`pX^B>5pwMj>zwvHZC)peE>M~R5ePp1y zG3sUMKDA7x57P&xOSZ%qM0jVZ8HzsIT^I-6D382(IJ%bZDg_kLUP- zvt~ruvXsVBvZVKTCR6BKQ3as<(>LFToy`&~i1Un$HWEx*jn_N36uB~{L>u3kd6+$0&(sCKh5CiP zPe6}FF+ZPDVd`?;^3j@KtFZhIMxuT+_QYkwr>WEP9z@|)^K-pUevcr=MrHGkneIfU zJ>P|{85D@Z+&9#~jV%HztCcPk4#(YVH9rk1YW|cKp-ju$W=PBCV%FwF1 zvjr`-z`8%t(wXNK#yWT&d|pTU)l%5S%?(!_uV}aNHzGZ%@cdjynUeQ;KbIq~r0a zxJ=xF1g5M~Wk0t!-3EQG=Me8RIcGfWxf~ME@;@sDubR`g?K8rRMeZt&yo!fj7B5T8 zfID0Gi(40aPKieKR2&pc5`G%1TqiqULiQ8V!=jEhn@?Zln{v|C! z#PrjXou1aSjYde)OZDe{!!K>y%=J`0l{fTQmx55-RcD>3Ao9$Lp_pI2=HiMYmZ zB&Hm~hrv}<9&jVFs^qGl>C_AH3IIY~E+Z#;-|jE+AUA+f(ODdmW};L2of{AhJ>2qb#PP%ItAl0!c5{6sdOP#fo|-Kr z=_~f=Hk2iz&Beh1-9~h9Fq>fF0PH>hfO|%u$G|ry%N}M>=LEAZpk%2U_m(ZyjxkG8 zPmUgSGM|>7_7eyZ9{bz6k03V^qQH!Nd5qd^6IbogW?T)F?iBBqBm`xmR{0?fcM!0(b{TJ6LcK1=upamj58KZ z8D4L6lk#kg_FWN2Kie&1w9~||CA2F8NlS8?)XB2@!K?h@j%i&)F04zwo8T9A zBU|V9{;Uy7GN{O(^-6*(_vz|e*!il?0EL1?^q-@v<=$nd%N* zQ|$vMv+#-$>ejy^Xqi_@5tg=;9yrSleJr4#a%U~@0uh**poGeVN)x|8%Vy5Y1U|w6 zCXmu8sQl>Ju^7Tdxhh!3-R{i3PEorm8+xWUesKSuS7beNl0A3nh2Hq9w61TSlUX_2 z1um7ngd;JJKdqKyadOownZ9A>@{XkJOs|^xu*yswsjylx2|#6~N7v*9ozJLdRxZ{w$FebP|DG9VE%2Kwskz_CxbrY^2T+@dyI|VwauK)NodB ztiAoY*QjpX_X0eJ$6H{ccq3jZg*yToqwUP8Ev2GYs4dI4bLa*jt&bT`ofK!qf;3Id z@1>A@3Y({YmRWd8#$5?}xiwvZ^k(V9iS00&u;SBYHp$^VT2Lv%NEL}i@4e@$n5GbW z@RE%26PVz)7Fcju_htN@oQ^S`JQW&I>$2si*luE;R5ht0!%gHWtpRR<#Tz48mz=T) zzvKk4g%v64N?BawV}1=U(tr4BJ2|)k*JN9AAlzbdf{ecvo zQbrC%^;u|Q@h*E5-?p01&%LvPj^n|3@JA&-J+_(1(64v9>N)(QzZk}TN_a@IYr^v4 z&6ne!Py$+le*i>7*pnjkXEXv_CD2TPg7NJMST6iZ4X=Vi= zy4&zx9|y`HCk>jTO%giOMho%W_)!i-{iFnqoe35umKjsiojQZNk4Xjtm6;k7q{c-= zxbN6g&;jUx{^$Py)QM;Md0RBE>7GR}&I*2skiUKw!nUpG(m$FLhaSPjw5^Q4EJa#Z z*O*U0`FVi1K`Z0AB23WI^6jmR7sTAcWlIWo&+LueQa{VuIQ1iPPy<`9T0}rwyZi>W zY`L|x>J~~Whv;s;LiaqH83$LBr*&w+KfoggH1d%pmH7ahP0Q6Y;cO|PEafMH?BgzO+O1t9A#K^z!z$Dfb;^iSe z7H*Vlmk2-V<|}Yd89z0`(W~@M=CQ=%AL6?oezc@L{ecN$ae)4}{q_HL!TzriHWq+_ zO$el*Ys2ajDVSE&@c*l@jen4YcwilsWs+x zB)1h94V1=4Lp+cPAdQ72FS-!Vd&`v&(%WBMzw%Z#esvTvbTN(kKesGAqThwozt3=P ztCkffkEH2^jt+lgS3IG5nrg=2J8lPhW>{dym-MdHa^;&Wd1$0E=R1aVuj{o$4jVRX z;f)5B_p&r0cAm8LcODp8S-gM&Sru~L%lwz&p%LP7I(+(;1fSxUI@gV(_5>&y@R z!VPVio>%a&u_+s=m%?em>yjyRxuq-3KKDwd%z_ z`P{X{QEN|N_oC9`boZ9=FD8x`U%>n+E7m3i`nZepZ79v{O4|0i(p}q&$Zz>~521|L z8y9kt=dt~#{bx@74{AFHpMA;D5ZQ)Np7%Dp!55HW_ac^dx~8{3yneNZrd=>JhEZDI z(U4%udvY^3Wkm&QdWuxnT~`D?yy#P2bwcA>T`9-@W7rP@g3HW#e$ObiXo_oY7xe(MUr0S&Z?~j8puEEXZl4+g+yq z+Wc|r>+mO9a#j)1Q5m65e>dK}?pxfrCHT9~VN)P5!r?^>WK|k^_nw-pzjo+EiPqy= zL?YoLMhO9TQ42f4N_Nq=oQb_;8zH_Yug@55o^G58C(-Ruw z&0m{We|8~X;z`D2t=>JkI~`tm2zX!c^cP3+Y#4gXZvX9Pb2du4aM=gp zHLqTQ@{xC3VoziR{7nbDeg`jIggOl$o(*D}Qe|%HvVm6ojUO$^P5uKEJ!ld1?Mk?_ zVmiT*_pNocu_Ui63wY#bru3QX#Mjdu33W0kE}Idz_O)q={qop&6M8!A;u70;qk5!p z->7tdCFz9@Wop`y#rhWW*o?(2Xm^ghPvJa|V)SMymoWFoY_oidVSIwn&_6)q#m1YX zcW>=AzN=XaTaHexh+F@FXD;4YcD0?3X!SCO9%MHB`tdE}F2L^LThAT2FUwKeqc*R) za7@`^t7)~F!Z;P)}e?Q9UpgWUs!tFwS)8STW@9HGjqGeX1X}@F^;2~ z`cw1@;aO|1Oxq;uyOl33#IdbWXP$0!?u{7ufV10iCdcuByCO+&F#tO{8!yN7VU@o{6HBc1QHYn* zimYsPQ*VG!Q-;-f!y?ltPB;MV&1pqPFxQ%jS?d1)ctcJ1skG;O5m!%>qTnoz1e~|w zxa`bNN@-PB%o}0;0ghsHJ;xVt-Aq~XcD|c$c{T;HY2n`n9`4@A;U8_7KGU9-rrehk z7#(R!DcNXe~xfK;t107EIXNPJBH}BE4 zc(_PQ4_e|gHz9?nZ) zb}+^ziPSsccJtu=&vO3&YzCj2Qr~&8TQquo&OG$~dnGN0mv_DP55U=Y7;+vvT#(^4 zAjhcp#4O+duSu3U`i6zBl*=Gv`4>dFl40GdEH<3^4wu|oIK001qTyu;8Lbk}j<`~# za5owGNCAuIipL2^=@S0^n?U#VqwBjU69%T3GmfuC6CA5zCoPB46ip126ZXe{!!zer zIN_8HyRrS~&Cw9)dIjv71T$GBn9*Fg_Y<8AyGWwEU;)yR*taJnR)WURj43^TN}21Ss|2W znp=d_EHi`LiIsa~hB@A!f50=&=kuKVIp;pt^}VJtA2m76_7SR3AcSe zc{ACyu%X%(yx5GzTJ6E}Km&xtNZWTqp*o>@W(72SRCU|#Nc8Cmd2p$a-bGMYF`|F3)W=~ z%E};}y8OH+jhDf;T2oXvy!GhyWYJ((8mo`d$AA%!O$~*&)m+Aj-;wk}y3U$S|Jube zQ-$|i!@Fn06CikR`K5-ph?t#S>26^7>TJoB?|%XrK2H&`j)}X;`nHO+1B^(TR(k)M zYG3nJM|bR&ert_!-bu#ml$0Em@_L9Ky>V+Vc}1HRnbbzsoWvSVD{Q857`ZWW4LEi6+`QE4C z&z86n->%wske&p^GjVm^FwtPZTyQqw_~jbQME-@vWzO zaaquHfNJaCXkoV@qAPtd&C@%)_?_36Ia`ogVPuO`+w-8r2&ZHcQbW-%`jxbP$maG= zcDr5D#jeI3J>}Aw#dY5D=uYcY?bK%b&xu!jalB~2iZ~Q)6kVVlxVw$FLz8f7qyYF< z%6(@nk3E~b_99_8b*N~rrF&+%dHGMjX{YJ8VJSR}{l@)0V^&+W#NdJveLy=&=84;c zueuTby^ig)fvsCm02k}day%_MXeU$T?UC^g){0G^`=Li&(47(f^Vg?1>#FGf*oPKN zg>}ULfNHGB#ykWo7137bu>#`lN~ah81>)&8+rrvTXayg&wSw}Prl}pg#d-^nR@1+2 z)~+(NnOLiIeb)}&xfXdKYK!CqQkmBL^?Vj6YrJZ;ySAyc5wcMm(@kxL9Zeg2Z_?zQ z&i)6p{deOte@1mG4IPt~B{phoD+HtRBpJ7C9~j<}BJ^s=@0de2$=AS>;CBH&H&=%; zL;vW>8Lg>X;KLj0{SpE-UTxZ+PYaDgJt*Xlm63LeZ_%Zgm zdu_7~Aow~m)UoSBqar=m@VM{}ZrSD;w(j z((#^~KH6`XB^jSRnleNnYmIT`xGI^;#hGBEhI+KONNdnXbbhM!jWGXULVzGKoXHR| zQwQ{QARAu4Sg!^BWlmLlLSY)6Gxru=JeWvd?HeI4S+h*LY^AbhHWFZsO3LMop~5&9 zRhd``qAB+NZ}Weeyw+>mOEh3S177;s5DgM&YjnJ_5PmcFHNL+R3EyIY z8*H{FN&RVKDeLty7y9q6Qzy1Gd?PQWJK`PpuvPmr#SuHPk?K=;r`cM7LHC-$Ce=9Y zKx2lOcA9j=?V-h#=zrkS|xK&wMCw7PR%fqf8A#b6PKsoQX>~cRy6=YJsKgv&4FJMMNNd^!gLM?a0pk z+$p?rHr7qlnJ+S~e+H=d58`9!j*V~#H6`!odY5m+RkdhD+~fD3Tul)C%`OR`05Z(G zN~@)@D3)FOVQaNK#H1_Q+Pu-#yXEL7SU_V+$#5Ly@NRfEZmF1Q%>0Dr#r>KEDz%0A zasGE4W^fq>-lO@iB}{C*PmM&QLB%b>YMJV4k2$IAp-f%a4)pjg^r)sFrOpnu%?XBe zDX1x7#`;ye+_>9dvgDXfsH@8Dwd+g`g2Yc0Bv-U9>?Q3k|91Du{F`8iG{>rD62cb*7HwY4JwMwiEpZ~eNzpV=ShGs}QvD-94{oDef-)R>B`tvC z?A-$imw4csF{|@91$tn42L8Dp?ZBGy8~;xL-u~y{JBb9Bm`^p8+B$CR5Qk)tb}ko- zj@@$6*fyKJv@P1Nxos;7M&+jVs}=}9YDB2FFZNCIeXE6jYrMRy(t!Wj3nK~9f65JGw}!H9ZTD4ok)=hu<-eY! zhjv}J?Q}BmXXk#J;ZMT*Y6S<2H&WGvhcaP9*{E!^o=szV-;YiZf6`-L9NLKY>Bka3 zZzYBo?Y$}LOR38OtVsK`hUW5eQo=oV@+KO;#(0jY^k~y1hq}TH)!2ItM?HqUMt>hl zxH!;SGr0?DOifH0XW;lrH^qg&%$l-{yFM%Z8+x?-qwYma;EL%dmJL15INMZD{*kVV z=?4&}cjP{~s*shDVz%9MtlSW9*p}u89zRRH{@f_Ov=h2Xh{k#^1wS-;SliyHv0L$I zCykk2k~~FNuKkg6;J$xphq+AaGN0@F{OboR5{Wmf-8BWHV!)U9Ak%M~<+bP+eYMk! z1A#2OQ>CCH^|C)w!UA*&B?fMSRzfe}w*mN%Fw3FG|=EkHdKR=8E% z>2HaZ&cbg{FLotm?M0_lquOzDNqmv{e*#?geaL(i=FoRXd4yl4$5)J2-JM~RYvwVR zKN+r$ge15F3l4VpPtU|i_AIq$F2$+XosCqhAa5>hC4%?dA@xpmCRQ>}X!Dl;Zm1@bzcFV%C3;t;uM7wGm!6aI$NHq(hWbuM&l@0`3NhCQL zeAqaR6hN}N4mcCrYF44WHz|!tv+^%R zmzRe+Rf^ag2~_7Tm%(f+%RR?R#s!75cqLq0|4^}hi!`@kW1f8HEz#7^-a#2A$<8O& z1~>&Zr@sxqKWmZzUE&dJin6b#?+sX3{yZJ@6bpYU@>xs1!@xedhAWLY;tRqhy}ZMf zZ4zZ(-B!|uzLTcohiciPr!NKA3~Bc-?rwu<>BJJe@l=+2O7?w(og`X3DZ+1xI!0kd zj@0WuV0~1sI~^dB@guDWNrD5PmupT8GNj~=JAao!a8Ru14&q_`=eHtC>R#Z12gJPe zAcz{gU4TjCZZ6$F#}S(-UGYQ2Bn&Ku&gz(`&4Q1x!_1E5d8aC)0`zm zu1aP)glD!HWeogWl9MR~wHH!Ce9Uz-r_tlx!M-!1kFd+xhY@O6(n?ShF-YhHc@W`q?c5?Whcc z8y|fCv0OZ|E_}X}sGaeR@_1%m$j}a#pi$q-`V#WFKUnrX?`{d`PdjgwuMV-UB~AO4 zjLO4m(ZGj3gZipYRlxUXXpDfYO%3l&$=2nWBYGc@UA~zb)}G@hQlLIMf<` zP;6a03=Hlh2)Ab7A3h{6+AXep4Y(mQ{gT*mry+KYlPeebh;ipvT24ji*L#k?rQWT$ zpB#!JeW*zA8_LKq|G5MEqUlz-1r8$Z_iu9^Jl{?p<{E*JRYtF9?B8tHe)(Oj*6usKWSwgVYFO8{|K3$HRFcA~*bq(DC=v4RR@{(UwN}L_6>AEX$xv7l0sX~8a%X|7!Gcy&z=k4o*|Mk!ZEJ$V5KF-zr!3h@Qtvad#KJ=Nm zi-jh+>3Q$wk&O#NQF<-=(X7*gC(EsuQ$@5hjIgx>go90X3wz8SUCE@XCSsuwXngeMfJa#+&yCkI~e$-Aj^8@Uh_B3+v>XR{&wM*a`x_OVPTN+gn zT=YxwZ=Hu~P}SX`U(NeIS%}ucTyS=zu54?byB5tknF&* z2*oTO&dfe&l8Ai9jsRyd^dVxW;S+eAeO6=oH z-P;L1%PqtY-s=0uph3@x<$GO1)r5DJ-=GxT@`vH z?+JYr#*j+T^l&$Sv6Wy77Y(6u{>k*b_04O*RP^4v!*zQPGn_u6k@8X5nqtbz_%>L2eoAV&lMf(;VyL0axtY1GrcWTZuy&1p&;|K6QY|1;cB;=*hRuN zAmTe2WnVX`nj3?|$@_hKq%mE3f`TWX=mOUmjIifYY^q_ST|Icm2JmB9ItDg<1C;)t zw@gR%fM&87BT#sWHkFYuP-uCPOn5kQ_((us8uy{VhM@tOdHP}Jz%6MP^Rb_rXO|BD zxgRRB!$4bFJARfH?f?185OhDZ;s!JN;VIYs4wEG}`PYLbJ*|I92j;4|TZ@21{A!=} zU!Mua8w1(f%Kjd_S(#$4giJeuPw0x*-!a$yn{z)++9&TtzxjHm8b~`&Yy3$9GC_u1YcmGX=7P#cZ9V&R>O< zp^mU&zmN@^w;7$L$Vj}%8wPxnmEehV?;Rp+tL@wFR&Qv1Y-ze{=9u_YQ7GyG-FOOD z{hOk8)-rXb)YPUa;o^tfD*&4}-}?t&iD7k$M~>opMqMF?H^8B0yKX*34r~i_iThId z?<2yaAx#$VJR>)WXfXiIIfspin5**P-79B-Y}_8f2N8wPky_GAiw!mMS@p{k1C(UI z(LGK}&+}Z5|GAsf>@DDzN}n73Xg~LI>==7Mvo}&VvF2UYo1sW4;s=yi=d}y}39vZd z9!xENzR{K&`4pCE0g@9)^WXwDPQ8#5AT$VWgBAWKqUm~Qu2$NPd8!6p_j|UN@MXik z-@RcjgS{4BDT#2)M4+v!gB53E)H2JApZVJjk*(h{)je~RP97BYq4Cs}fVJqtykNAV zS7r#o@}^~q_Qx`jil%Lds`l$N!`d2IPzh=W;>ucfx2n_<*3zE6Dgkv;*4Q%~4Blhn z37?8}gVr@hJwFonDSKZaY97}8dWJ-{!RWKWDbSJaXUM|cF6!|9Y46Z@IaICz6Z!gjz=&;=tTHX3o7qIV{pXe*YvxfF*3H<~3 zn=c3@&a|Hq3X}L>;wb75!Q6LXpupn9*MhODB2U_DP78F^_RH)L7yja1%8G!LB5g&j zZL1!v@jxC1U?jA&e=1|GQ4k@>+B($XcF0dETd&fMBxV}Z>2l->H*NP9liwqy;2cLvOWmafn zNEA;Hj^GGc2&y=OJ&d=~9vgfWa>c&DJ(uN0t36y%b01uBe&6`dk|kI6C%brd2Fet_ z0W{3&5p-zd)PsTIK`yh5@o#z>3%!LC90 z5h0~)K<&v5T$pSuwJTk3Mb$VP?7D?A-T)|<-dZDs9h%F7L`bMKa|l27@vNB}GTvEk z%IsuG0m^c|&VqlQx|%Px_6!X^8rYl{lXQ%!K~+(5XPB!uB;7OoI5KOe z(wZV2IbNI~+BIpqZ9TbH#qSrU3CP`ZMk$Sk3Wg$x0uKK9bsGTy2-?6uv=bOr=&80_ zdjTHa+jaU6xFGkryV)K+E1;?br>B&x7fwoWN|wBdcLR7B@=F{yAPihq|GpOWbdRtB z%ZV>hyr?5;tt}k{JF{QSftKL4S#$je!m-vAnnJuCkGQ zOa}&i|6mkg;U}`t73YivzgM1zCBE*HPx(9yUYBaG@G1PGSJAz5T$hWTd*#hD_u*?8yK)>|gL}E6?xQfH@gta389J z&9>yb+JZ7N_|E+QF`g{}HM7{pvxW>ZjRGEkhf%*07lhnxw<;Ee2mbv(_kagLDUck>aY{P?7B-o zN${7R%j};~xWk&wRQa5a9y+|6n|Gn~xqtQsqHfEwPt|>pqMJAUYxbW^@P#unlXHa( z%^(SR8P_E;IA1>3Ln-DXA6{qVRme3&&m2&wW%wqJR%9()AttMQcTy2k zLONbtzctVc=)x`fEBn87P9-}OY72l~tIMTP)!#xeX%t&Iaf%0y>+r@%?~Ol#>U3P% z2b-Yudw;Mfpj~xWHOOzNhn;CDVL{}xRj_Z-j>KdO>F0m_?7lG$2c0wjT_`^AnO2i( zDT;w{X8ll=ky&3CY5$Hv1hC*gFG6xS4#Cr+ir?}z-u4Ugj?Dc;A~^P2rt?S5FP)jU z@lSH{7gmSo`Q(2b)1usHFWKI!9_}i)uJ##Qi=ABohu{v3zKt?=c|#hu{qQ&0{WVRMDP7RZt?IH32sLDb0Zx|Alw+g_Q zCkaONl5{EH*_@lXQ3XjqSk2wAoTmtk#C?)%|Cj!?;{Inf%MFreiwp5Fh$(W0=C1gk z1Xbn2=ze)4Jr|?1?w?F$r`bx|x_hs}(Z9ve@{M=t^NobTs%!LYy@1Ij{l;&B5)O|w zd^Y>_fM!sDDxjxFCoVP5zKr;|y2+?(zAE~qL;fZbSyHwr?gG3oZJde_$SboFIO=`u z%f6FyhLTJk%=Kq1r6$>yd5y+o+(F1>^P#Byy%Kx`u} z{p|VM9njqTwIVUCC(0}xQCGf^g+B8=Y}8{xjB2jV{^w} z*6Tk{{!M|Z^I{{2B|vHKS-SR^yqk&u$x}d3HrJq=Jl}AT{AH3QZ&n|M_|rWtvn5<# zKP#-+4fb?RaP$nq{p)eWP4a~sr&9jHb$_&iuiZvJd@&*Yujb+g@IRes27@`N%-? z96(GfL-sr!wSJaoy5t1X2uMybFvDHmb3C~h{Hj+%^?^o}x_=(%@mUurUEZtUe#xP~ zHTG7WO?5Jxh*Q?+08 zp7xU=l}pjOKIp1t9Ug1~0reg;iY4z(r-v%vPUh^cYU)BOW{zCL`mwoV7b%dk&#(1o z$d-fl_S^UIjUrWpcQN9^j(RtX>%-z;=kD9y1*nZJYmxodZ{67ri$gllkFu~&K>IyM zv}SGp6Ckiz^`7Tuesn4+!}4v-e;{$rGh))GJTTd;aanQLsY=+|-(;h;gy6W9!B)h&_>?zh6l$~8CEUg>u< zh#QSNt|f>*vs->|0D-D=YZ_0%KA=_VrPvLX*43%j>$q2leM~8K*okZs+jHwEue3M- zXu$r$=4fLQ%B3v#y;!X%;=8D8tqO7^WPSv@8d0a*@#=j%rkqsj3&`=7(`SHQKLThy z%eg5RUSJHn@;4NbuDx)HME>3GL%(PE@IY?EH+tre2hqnYW4($5wBiE0QJ<*4wPB1*Q7pvBNv*OOZOR?=JIH+YaVUF}K9tjk0H?02>cI^%AN zfl~*+ncm9$s5{)Do0X9?k8^_y{V8M}?{w5f=G&d0!NTP(g@rXlsh?E;r{GCT+Aa6{ z#?-IEX5u%=A6&!pJmlfbZsMd>SR@+~+SiD2w35`EHaPEQkFX~`Jk|AeMk-!UJY@wq<%+PC+*B4fUGH z?E#7Pk-wtvlU}CumzPCgnCgBnH_Pxh(@y5Y{%JXsYKO>@DPFho?<<2c`?(gzI2nOjN@jRBA#}M=bG|mwID$(Db8`k5 zwubPI=)5``GNu%2$+6w@mR)&Uf*sEhz0b8r`tu)_{9W_-3ga8cTo!ptlmBAM^Z>Y1 zjF9jBgQaT^y(HzD-QFRZkWz42;yE{3x0L)$(2atl)_n_*Fx5K#QT>898?Z<1u{Xv0 z+7(abBl()?6gyjXL*FzW!2eTv(7Q>NmgyHSXbXU{J_HDH{ag!3gfzCqw7e8$c_2aS zrAHvDGgOjdrZF8vmHx580|K%H{G>3C1)n|T6>-Rtzza(b%jK$2uVY-rkTmVMvb6xY ze=kIFoy8tELo$1gJp$Pd+x1cG00A2lR#Vqzz{=gA7ai^0$gW5eZ#^J_Z=H|MHF$8$w} zK$u(Q^AhpYAK^;`4F~sfgPo!xsw1%M;hO_(CMQ|=apUF@w?*juA;ekF+r@JB?!HT& zrPTh_j%3ZM?QjNXKuxR@6ke-`)ip}mw$sZ7#)-Vn@^&`;p4hePmgB$LM}2P*y9LnN z!wl;RhQVe+?6b@qa*-7fo9HrG897ZM@KyVwM;(;em%a#<#{5XVgFX?AT6gix*`Eb$BHJIzo$O2F`Hf(k?n;IWv*=- zL|{owf5FM2>!$*><37r%q{Qq#ZQM!Gk$h9~yv#Eu|C~RX%nlB0yZ?-AQ!>9j1~s%2 zAx$#563zNg>9&R=Jri_^pQh)bAGnGe(GjNW3!RBSj5gRL$3yYzr}Fwf6V$pmh16-B zh@HAL+j(HLYk6CbBSDZE)xYQC#8O;Pjca(*xsL)ePjxBJh*_CmffAhnH}55$c?rn+ za|2QH{6&TElk-Fk~-BsmtK8MmV=LVUSDcCI#|&yOFJEw8Sc3Wik^ z)-!DFgLEy+RKes0Tk>aiX5%XDK*o$@D3=D-lsFBMx2ebEe8_FT`w8~<7TN=SdT!WvT*?Q&R-??9}8~|aE!^$Cd z`^7u0xr+);R9d+Paoda$!i~EBp!^q=Df^>oFEy#Pz=F+#*H70W*{^orQfBdqEwYs{ z{rv->KVKN7vTss^{gnNmwj4QPhxHvab@*64$-G%VknM#ufY)kPhiYKVXV57n2?qCJ zA7H12goUKzHVH%*m0U&Cejl>rJI36LrPr`B@G&GeaFekJ(rB{-qDuc1HXAdQIw4kA z+*`)a@&01i$~sOu64;(Dkdb}E>3@x<_3ON%`JB8Lyj~BDwMIQW`P9vpHRm1ut^`G1 zf*9$^#QjSOYkMhXX(Nw5TuSY!ObNWTkd4)w4xB^yd$;% z5i$@S{bWOOP(^D?%HIdX#PbR^oa5ECWd-W5%UK82{g`n}X&08%EVez_X=o&dJ<=8C zqoVXaZB~bn02GHD#jbgvh!8hP9b$bSOjekMI9q0clwh9?}N zogXkgAN{S#i*)S(xpMWcw!=m`xiDl)4{3UsZJNZ9W;sR&BvZp1svyzaxD*SClr_YZ zZ}$v;%Y1fL35ufwkevTrrhkAGfSe_(lUyYKRxoxpcGZ-Jih|rLu0cy>;2&U|5ul`Q z83VZ?FOj1Z*i4go%Z3O;!xxm&nb63Nogq)n&Qkg z{#ED%nnxF$b#yIQGZx%uIpD;huScM2OS3>rKIjL3Xl<^jzb*y&Uf$Ym`DQpLWK?vZ z;mb5Dx)gtICvjI$@}Ubx@@S9b#yAi>1TRzqqZHv1zJ1UGG6()qy^OogI(4I;6Q?43Z3+CpkOhLmOW-0DY6VRb=RQBu5P7u6eFf) zbC;uwJ@4wf-kp4F9Ipf|#!TaJleVpwDZR`n&FLZdF_ov+K2w#hTcyRkPn9gZ(Jzwp z&N#O>lgeq&$bRQo{Rg4Vt4{ppDO`sg(b#<-w~ia(ONqmcJy8CA3`_;>R|p+n*_=4o z-rj=`&}83)BAoo^Zqgo3Jw?>N-HO#XohE-Is8Q?7g2|0n?3|W%|M?#pN^O$|2RRtS zhA*8X;sRSe4h6$L%v}oT9cv=b@*S>$X}ned*UG(=tKRLIT6U|VQxT+Y{yl)evFBbZ za_O8Z)pI44YfR*cUx?Lw&`fx3Q?8Yfk;h8Faeufwp;2NQ*B6Vk9K( zE~?VvMB9^nR+`3o7$cP->+eI(psCf9UOQq-a0Qt*!S(x@$td z&W~f=VQNifyta`jgKbD5T>Ur3(N7G|Y(T8JHzhGLf5h_#3|pMb!E>Z+_es}imNZIr z8&LA1qsuXz_9ZZ{CNMycz7OG;_g2UOOhdy4j?%^PeGZ`qni~!JH_Yvq;19HEk8*L1 zcA?_}e)3XSIaQ{}P3s-5_kw2ses>X|C#viLILKUcD)!ev}88{XGyMs?igY-;JC zguMzLCUtp?p&R1yJbv&7mw&``aLc^$!HYudSWpi2jQhjg9ZA=&)Ex1?5gX0=Lp=r= zRy#5q=od^FQx7#a1L3~hn#Or+LO1#gl!kcIcKcozJmeq=a@GSS7Kcr>zq=*4Z*edO ztdzYO$E4`LrFtt2W4^ZNGJ}97Qsj)F36du>C^DKz2b$2Xk*G{jzSO8N6j#tNY66RJ z8pqyKz9Oy7s9isZ$Jv2|e5t2fyU9O=H4I|<+zy6^A{bB0Ph=@a<=zRL@dX z(gbo{QRf<`<(zw{RA&Lr72Y#a0a)jCv*8&X z>aY zY!};r{wrj-R-;uTxfUK@9JCSewz`#Pc}g}HO+Jj~lVgs`{E3Mjf0>U^4qmBg*{fPA zvC{g{ZP*2EOry5#o-B`>ThjK2chp=h84%`kyA?7+1+UJkT@X6bB(|X9AsHT?KWRp! zo{J1$?cGrDaA!obat2-+dgxY^7>bbWw5Mj!l~ShC-A`hQNtB_-SCkK&Iy-p>{Z^3U zF@n&g4iNk2BWt_hi^kKCEiSTy59_q8`$zm6@%hsHk&26d`x<5%o0YZLS4tA#M+h_- ztw4z#@oQ|sf}G4o2afD*_w$-~VT4U>CSNVHY1&L(nQm~~_mZA0E<^5l+c$s#j>p2b z*ddd9K zdfChnm|`!OrN4e@3Vy~iWx40Z#FmGvM`Ck_e3hYz!5%t74&-jFhb5BWiyR0*d+a!D zXc?E7M1#XuP_3D*?0n$c;%I%BrQ{r1x1@~~Xe%1ci*)Nw$dZ2!Zw`9$hf-;z{hN%M z3x0waGW~2r&L2WN$hh=+h=yB!)SI8%%`X}bIBTdNLU>6mumU(=Cw+;GhSh>J2=5SlZ31;jE~4PBYGM?rpku^wvAARw4$f zNj8eU)edS>=b$E?G!D8N6FvyXDWh=BT5slvA1to5 zh}Sp`XKl)btm$1PSm54a(T}mJKe~3)fjXrI>rzb~A zUqJnPQ-lu$NbksQ%}ignbC;CnoZ<`%ps^^%O9P1IsA2w%$#20G{|jmNhi9eQX-2i0 zUY9&0TV&=H{w64@qJ_ToZ-PGp>wI$l*v8K;sD`1$mRDf2yQ9{Z0NbS&oc9$J?9< zFI9T$w`GQS(-(tGRhC_re`tC=`~O3xbZ>T;P#&rzSHCionwOUXb;z2NxF73P<$ zWSKRxv-J)=BhJGw+5Rnhe*!O_pgPu)$kl(S7~>X4jnz$(v-KBNWqK&xH7E7vM}8hj z2%NZScTT9bN_$V^!ViQ2cX><~swo@;dfk(aI%>ev_CK#_R+rsTnG{5gb1k68Ye_vn zN@%U!>g0Ri5Zsi1&SIw-lA<9DA3V>Fd8akmiiX71jt%d--i#4~|&c zv=WuO`u<3Sh3K~dAp_O!D(nsh6VE>+htY?`&uTY%DmOJc1kWpfC|p)|6;lj3Of2KV zQNAgElHI;9Ww}iSYJR2^HS%%#?kVOG4O0+o!r{+la%b~-LMEg-n_Bq~F-lP!r$!U? zF!C{jO}4^UfC|%Iq?Of;&Pdmqp^dc!qr``D$Fwh~NM#YaN%1$Is_Ips*a?2{X(Tn( zl}LtVQ})*^qXIc)y2xqw8;IV&3q8T_WId)&9XbcyZ+QdEl#z9``$C zbaW90lDe4%m6)WSI+MX#6o_t&@t>H=T<#9JLmXJ^8c*oBPgE?o3F)6Ah7L#-2WnQ>CiTQ^u5f+myUAJiVjAF8+jgFM5F?fmn zM1w-zJ542Aj;E749MI3Z*R)ruo7C(PHy8!7px(&C~Ov+EisQ$|M1KB}LAl<9cDHZDG9{?~O zR>Ej;F_Wn{OpNtd9yHV`u&K(*aEYKfnv_#z{8RG+bt6HwK96@QY}iSr#H_qQ9Cr+0 zLosKwKTdI?{}Z6R_2ER5z9_{j-5%444Jn49qzoxy+vXbTH-%)j1ltfgaipkA#w%l* znXtm#*MoxE?)~Z;wW{w==x$x!O2lNa$8;iOjv+=!=QYX#KE7>2h7>u{`_4SR2(lG} zwyZqVH^3ii}9rhP~+5 zU!xbVs7^vY?V?EmYX%bLP82~LZ8k2P_GkJ`Revfb3bccpKB#BKZ9`48vNF#Z>!n5w z?{1mANI>vmzc*bnFQ~F`sLKfzzF$1a|A*Vh8uIJ&e>)M zz@=entHZ+=E@=&rG<~co8j|4y5~uSn^fcX7b!AL@>dEw-!3h}}xz+#H15k6goFxa! zsKI{%pYH|ci;hVWE@2c!p&(vO&m{W^2?|+Mc9BBNAMtkk04K)hAICyGJ8S>8IQa@- zur3`;p-Rdj-Fwh>dYH`(1anXR#v=BdIkS%zLv8fp)Dzrxh)4o%at?BZK_{;x> z!(HXH8b%gvD8TxaSB7LQOkwn531E3VOX<{T*=A1SmO~<9ZrwduuW7TwmNca z?YL5z*C1Uo?8zN}$X@P6qnLwqMCi&HzMi~Y8UpY3n_#{@sGlzq)U=FVHkN>5NzcQ#A z+nuFLd%B>;*u4r77w?B(q9~SJ4_`Hs)!Yu1nXT0%Org6grLo>DYO0NxNRVH9q z)5r)RBqY1x(3D$@| z`Rm#L2GnT|VlMz>Gy2NjMo0neVA5LTm`FR=jpm%3cDnj|!d3s8wfNhacTH%U5|<0_ zYydhL!6%NDhrf|55BgW6kWrqb`Ll!*(H-XX%c?<0^IngaMYJ z{Dt3>Kgy~}8XDM2Q?AKIXW0NlzPZS2r1p0z6Dy2HF-xEle*;3lcA}`TW<&#{PDBE7 z{P1O0N_ueqRf~o@cosPqFFW7vnWXgFW-(TFw{2TW+I^DS1DF2V@R_YLY-aQ~=K~q~ zG|^v-V&EtPC^u=eFd1*oJxHlkx?n|@Xm05FKTY2r&h-EO-$5cOM$V_r%pr$1GvpMu z*%+ddLx{~Oqe!AqIghcEFo(PyLegQINQx4(AsQ+}CsZm5A?JSke6Q>Gx~^UOZ->|G z`MjU^{dnB><1t%#^NPEpVF?9mbl}bbY!T?j-sCj&^IhQ#(Y~K3-G8ZB>29+m%Twm; z#wzV{*hAez`$!+{fCjX(A|)w@cE*V0l*zvxt=_@BxIyDx8+z&1dnUk<>E+ztmu~JU zy8rU6@AXUB);oNTkm;^iyXO;>4BI`cstXGbxtFiZ+T1ez!!9}c;cv)p#)x&ig$?OC zXw%=EZ-37Wt~OVO4q?$Ev0hK2^mobFi$N=eZF(yQTlHdh!*&fNgo5VX zh6JEUV{-JM=OV9m_HN-F`kv{7_)13H_0%MZp9d`uSV`=8>QM^#YaVj{m}}`y*5@`@ zsCw??bwMw)x=mYWoJ}-%OEoWi7IcAKC6!!3R zqUp7upvB|EpHtA@9~U;GY>)b7@m6doAwyT~X5UNXt1fFuan-HlrH3`dbdLknkgwvf+et+82h(qG)#(3EfZm|zMX)qANXQ>HGFmVelyS~G|nV~me_ z4xDSdve0H-5v!Kgz~dUGu`M%Rm+$8s%f7{@%0?TXA-Hvd_mX!47~ert>Yr%v^Fzkd ztC0KN#;?9ARO=j6{8HU2#yco(V_%35`M2Yzj>Ghd5niWeXNY3du&_8y2}n{ z`4?m#Yd90_l->HvM1ARX&&S-k<8Q%48|5GF*9#TGWaEK%X&?L+@}MMv&geeFYui9R z<_&3fWT*N}A7<~go&+kfaQZ$t=rudmN9}2TVSu@Ajl@c{nfUAS&#oLFXsB|(@-|(( z8~*SMXMS~pakbELx2~9K4Samb=ZH|^R}V8}=l8Qz@Bz}*qGQi%ir`9H;COB6JYHiD zWIBEPP6Q#KhWVxXRRDV(R z=m-UC?F*8L86S7OvQ%<4aBM&;a9L&Ef{H^~e^;Dp)I9qocd;<-CWcNr0U_bLK+yz= z*H&-^8+h?AzH3?;`Y7W>OV+N0epCp0MP2yr`6l7ojlOtCTbFUE*nzz1j=j7Zv{fN- zj%s)Vcr5$y8Z;=akt}&e`IkT23LJ0qg>)r8K~d=s?Hk>GPm#o+5bMasrPWf}zX}b&lP~i}E1O?Dd^~&fB5zcU7RHudD8dP)>9oG<6D^|ZO)_JP9N+UDn(rP5kq3{T!ztaXUFpOQTmQ_Y7ZUleSLC!miH+_<6LPXq?FHd8vJ9BUf> z8$s3y&IJ&AW( z*AWDxej{m<%iGwWkSCZce4g%m-ZhiPhY1-J;un$skdqzgq6^m(9Ci%9FlT8t!uUx5Ta9ed(R{ zH*p(yct+;bBX?@?F~%p}{E!UqbE~q2FD1%UD{j*ml_&;D*;K5Uy|yYFmnfDMMkUJK zk#mWS+t6cUNu|Zjf3{7v?4ow0_{u2;bxQ>aIfh4ts3Cjd2k^O+olyC%vc6F0D!?k- z7kkt>$a}v|f-g(G%tzv2-Zx#Tzotv#gS>L>E6TJV0Q`~6?kxO^%iL?t&lpfCD@IY9 zjS+kA_Grh&{3E;XSRfzHU~rqnFMsXM;&&^3t0`j3cm*baoPJ zd+pbZ#U1xEPG~dfXCB7Bsd$8c7-SuCZ%pBs4C7#*YKxrl9sZRt3-=wUTjQBBH!3vG zV(LKE5(Z!;D%FWS^{|qj?iIaqWcE`^bDW3;6@{-a9jeW-6C9;+JL>11Cg8=9Mbjl# z=LNskgTODdg`A(l#dDQs_w=R1rhZ9o>px{+YnZZsn;K++b^k4}_ul&&|98>sXj<4e1gvPRJ- zLxZB^pJ+3B72mNm+A9%JlnoIHuqVZI>i5>3mZ_Hc)PbIKg~iyr3zKe)fwtY|OY8%m zC1$oaT&jyhHbTebs511Y>}~IDkAX`f{~iU@U9@WV^OJ68vs8Pd)*;;}mr?8JfME~fuB1Q*v?o)em{d?%x-DWgw zIkd4`Jcx_LkvjLxr?VH^+|WbzM=cF_%j6drx%9MO!qUD4jGuB%15Tr_Im>(N3yY}~ z(h|Feo`1y|NM;}|?YnwTY2BSZ(;dCkc_{${r_EBV&YGJ`9u%sN*H-?u-Ox_6JahiV zg)_UgVA4d%i$CW1Qem;CVQOi~)GM;p(~}0+T7o_V#%?wK+9{{c89f=g0E)|xmE0LD zt9%RblDHMNSJk+2z)oak<>L0ae3ihdaT%UGvSaf)1Yv?0o@0X*VTs*&@D=&foxw@t zwtBDs=dpqkiR4LtpD}2MV|I$58%>9 z8L}b(J8s|ntr_$1E8;&y3b)P`+Jbz`Kw`NI*GPY~ht;q>^JWztAM$>Jsn(c(B5Sx|I<>duW=JxzO0a;~t~1Xwq|tg`TDeJ@JZ#-; zJ7Q-4m``#~e*rJaRQ~f;N%4o) z1vrn1>?fw#MEy3Epq5wZCIpH8=S4H%GmIyg{(n0*EH0$sU<02+jpbxVHZ#;U3A7tP zR@)#?J62IkhS50@gc82px%*o5Cm{WD*gXotf*;BQ*tR^-MFh;=IbyHDwI5jb?VT;- zM^lVa4xX8|W|}>&lE()>I-95Ab;`*}(zbGneoSd~KU3UlBj#koPm$o;AUW6W`NeO) zZ4W5Vv!0+rk!xXw>4~cG8vs)~szjmmuHtK*(IUJD4USP3jF!Cx%nWwA*Cc(+#sXdX ziN6{%cU+By$cU(d7eb(d1X`4=`IGR&%1paz}wbnQAP$ZmwESX;z*0DVrWE}_mb!7EO zdr;(RV)Qn@t}sTYQ+~?U(xy+mH`Fcj-E`q4t+Uu$?4Re)PBxokp65@W9q|5zG6nKK zM0Z=|e<9b|p%@G};Fzo+Rou?wXOPM<4=wxvZxY#R3(hZ9lKx`eq5W^i^Az(x$z5rlPtc5+z_6BweQzWTBuWCI{yTv>9;%!2r=|wz-7H_(fazfA?+8rj3sa@3Q zq~}MqTg93@%gAc})p?#=nwvN+y?1z_^0E6wYS{NkI>*!!WB{;;Fgcm>-(~Fkx9S(0 zW9IIEzjX1J!UfLR;q(LS!=kg{P5Ikn(9BjwOaM~%GUlQZpBWUCs^x3^s-=XxY8r5Q zM|0_D@uH}E1gSgSbRlDBulM!@ICa(`Q%=J7KJ3ujU9dKw8S%Z}<;}mF6;^*9tc|>X zrd!x&JL_aW!^?-eZ4~`Pn;p95WUX z&r~OaMG9X(Gu}MfB^VXg>fDs3np=;ae|XT4`f$-ElQt~*_pFsxDs(v*JG;*MUA;gu z!aA{lF?%%pEHEX>EgeB}qd$Gj2+LX%T3Z2j_n;+Q&YJK1^sH z_DCzmcWF18RPe-c!`}1pR}MI))CrCFp-er|$kB8K_FTTR4cHnOr_(BN%Xvm7Gk5fr zyur=~pN*;%Pdz&D^=_JXkH=hoVdhev*YU96vo#dLUg;!wu!$H;K4#e=|odMCi zBNLO~udc5yi)w)FOHFHX=Y3W;y`@733ZLgkC=J)j`~{3|9A+IPu53{?xqBql>APUF zGV4g^2?@moGafMOEezNgj)qGqNLe;Is-g!x2V8|80UC-TUj}t1l+5PkhNg611~)?}Rvmj_j0u zRd|E@?&Pz8Z;OG+D}q!&Ir(vRN=N*r>Q=sH^-+8B_I&;2+zUxfR_rT^ zZ=_ZTnxler>BE}B{+B`G9ox>2&Dqsa(nkt+lDCb;6ctC#e*3q>M*_6o6l{lV8y#kivXMza=m0LO0-8Xo#=s}54O*6aqi zMS`9z<%Pld#`l(l_y`G658mWxgu0vC&zjg*?DTP`#%K*wr@*2oeZ?ueIwMxS??fyaZ7{d!(MFuhANixAT!DdDOx$`sD~ z$5H7e&;3Ua?=1elcYz~(Fp0H1=WIJ*+gVO{H99fw^aIWkvO2*}8mQaf3a1YP8ENW2 zui8@{kNyEq5(c zUYod>L$htGY;M+MoMavQ&h2|VG44hC#3@+#zN6jF^PrG+;!o^*Zr3f&?d4~yM(q!( zH{&%Ies5?Ny^k8#tgzRPDcGXrGgUekuRKd@Q+~Mn2=k~0;eL(AQN*G4iqWz_`fj8k z^&(}$@JqXb@$O}1T}J)O6RWq9q!#0So;`Lq?t;w> z3JSE1_jfd=-EuwqC~ow_4%eMqy%%*9u%cuWSloV_QvS%@$U$7$>LoFh^LM%kid`u+ zT41zqYI+wYk{R5HonlZm)r7u}#6X6-)(88qP&XDs*&kt4mvYDDUSDX?S5}pBP^Sv< zY^*}f*$dYlB`-;Q`|;P1St!?meXjCv$Ex&ob5EHAv{l$1!>`t=MekO>t(s5$uoYVy zBxh}QsLtC9mmt451I}y4&!~Kp7#)|ASu@$o;NUpGt~WM+XXEwKk=VYcSq`ol*p{Ds>K8@?y{l}q2+ zggpDgjyRCPGZNJT30ZP~?ppk*^rjM@1e zsUIW)qdrcQbzVt}1Wp+lZk%QB*2-w{;;M}9WIeQz_|0der%LyBuXSEn_)^}KG8^#Y z%zr9Tj~r)3jM?3kXPjH;s|*gvxg0;`5$SGxhKxzIxd@YC@p0;XuYhLN_L2K9k#oij- zcDjTuY~NcgliBbIqUm3&!<(@YlxAbsH*BXx&pTI@T$>!k=DD4Gg&}^`_Jo93{7QoJ zuTjN=(RA2FwF0xSQ`)NbMW@e}j_!(e&=*Pb=I*@u;C=sFC+L!B!(ZzSb1%M0%6=I% z-!j+19-S{Rb#FV2IQiH|V^cNb(W(Mp}i{n25 zKH56qY&oB8Ghq580h$S%q1_`ju9!IvaPcdrKF*WQe~y=$c8Wh^`s1(Mza6D!Jn@i` zfU`%=`}xyxGuJ5~lepM7i?0z(nlTevDOC$`3%ya{S%t7x+3^rx`w!jDgDsDHZt?Ks|1Ng%iX z+o9@6a&)+X{7;W|GH~On8U;4%;J3zvy_8GX&|Ys2ft0m6eHZ@u5?k;kydxgk+w3$uJ1=Pe(M7mgi;n8CU=QI(7j^?#$4%!^((tzNmkI)P}Y=LW=aXoels5({k#0#jxTvfTi9DhJu zzhUrlrOB%hN@Qik52xNQ6>_UmLKc~jX^wkYe%p~wr1%mU%I-0Iuzx;o>uq~CGbaU; zUEHx0PzXT~;z*jBO?Id?BCGdsv&;00OMH`}!=IJ5{h~|cnwX9OWDMOlP;O?`ctMJI z1wV|;gzKWM79CDDap4|bPO2Mv3?-h!>uNoY+#*WxcCC#cSh&(}KTx<_DQ>{K!|(QH`|_lUKrwPYpX}F%FytzokvB!bM;bdfThXz-1p>gDmR^cBg4Ue@s41# z`X2heqG~S>rSDb9rhdyaNAYW(=NcDV2aTBUm7HDD5i?Bk2ZLNd;?GgK82di!*_ zJ%iLOLk{l`X3PE#-qkFkLL@eOCUCpriD5yaaO3H%SbT}v$Ccz*uKB+m!KJwTNFey% z4moh92d>ioUecv)Dc3>>Fx$6iHy`Iy%tjOm+w9fV?4cX3fBtS>pq6wvk{n6IyFyg^t?@0eeuh+G0_R_df0Oa05* ze2s`?%{Gyk4P9@++DAbkxqK~yyr70P3V29}Oyg6$c>O1yL41s{oyfCY^?Vvi`43|B z7tVXwF;7zI-QF8G&+v%cdndGIfd+5QV~k|WPLJsbzs@F}OzFA#ik9Z8G&uzGC%v3?Gk+m5 z)a;6Qqjbu~{v3jdU+#F$lsr+jB?SH;`&i?V-{MeC-QGOb65%to8Ed6U2SQMEx^w?p zQt>ztGze%wi+aJ)nA-Y+B=2%k(_k*sO@0IUUl#d7aQ1)vM~8@mv}V2^o-j=0mV|fS zZ^Xn9%REE%-atcIkc9kVro!pFZyxxWU<)DmB8^3GPYp0W=VX?zC!~=aQQ)j!Y3$;G zCN`T7R6n03={=coJ1$3jCDt|Jzkfa{!VGSFDqs7?2EM*}8h5jqpf8j@J*L>Rp{=Z~ z)>uR6oL)s%Rqoq3f?6C`uhM@abE~NOD*J7zs+@vA0X4QlGx92Ir=jFMJ|*N`I0_@M z-IfP#8)zTKeni_TcbkG`r5n^d^t*b8lN5qFz(JQKe$j5lg%$0k8T(9TgY;sMQ8tD- z`a;V!E9g<+HZDCPf(3;}v0||vA%)&u5Z@s=pg~H1QR7v;cnS~->(dDgJwpO{s^uCs zITHxKhL3!$ie-ndf6vIjVfLk^)TOT{Z2 zTWgqJ!*uJ)HV74IFGRZabMDK$VPZ82VtuM|=h<=2`nM!6;6G*Q%Ql(Ux1*X(rR;*= zFnT7(@MomAGc%i>$UJ#dTybcIA7}nEt1SDTr}c4x8~+j7XaA2(nv>1}z4;rxmRgO+ zp;Ob{2V`f~oCVZBR={8NzmA}bg+?QzMtij4kwa7_8>3a}J{NoSCIs~0_ z{BE~K{yzMfc`)3g7a-F_j=o(BrE84K-Y_w89yHK4nhr%nLDZjtgy3;&Nql&CgootG zZ1kII1=T9(z0JC9u|P1N?zXIaz_ps6GuyuRMLweVzk+kwDA_ani|#~7;nHlaKQ1Io ztQi?~Ak&SUSh1N|cMirQpSKO)u-GS11`veMyD1HzIQ=%(M3Hu09vp35alqC?`_Q4= z59mC=895}ir(syWH1z0#iWWW6pLOX@JW4XU)DbINk}Fc9OuAV8)#dj#?-}%9L@tK= zOIxE@a5Xhl$3w4~j~3`1r-6`7gFrk!0OVT}3PzP{C11H;h4 znQ%wVakU~$v{1m;SQ4DX`YgmII9K?Eb4(UPVUCbi6_p{qg=RDeDG8GFYVX33mS^9V zjLQxCI>=H4bd91=Gu{)EQ+I$>$ikxCS56`_fR>UChT&A^0{cwFGhPMZ5*Q1u>?&{u zfU@=Sr1Ao8k7m7Y%zJlNqdj+cNMmxDvL=nEeyxwPsg_O{3i-i>W98K1e+{7I_1?ix zQ>%R!7-|Or`(ISaxf)@3tuiElBa&tW)>}%rYn`BXx}M=nK)t>7@%F{0rVov2a==Y0 zAApbUGxRT>igl7Sf+W6X>o&t;z&4(G4Gz=UUCcOt(tcC<$4=qZdQ1gqWtE4HEP9kB zkf)=L(^ah9a>HZgo*@JBogxMIFCpW&P35xjj%yM0F~D@U%H{9TyjtyR%*5=Kw-`a_PU9xV&?u~&<7?ayuk2y)lFb!gTh_G{#4+$}ri54c$wZ-3wH<^|VSRBnx&;^^Kq^@GpW#Px_9#CjB$}SpwM$r>=ih)$^%IQ z{xBz;Dgfj2^40dkEnTXZNHA&){1^t5dN1WM48DuO%LXS&e%B0`A%=VW+hJs{t@4?! z`yC)#U)@z1IRK(xd4@p>z?gU)La=C@)Gzm3eIA^UXDex+qn)d`9|m~qy%^UeN|SfH zb(IXOM>fG>O{3M%yCB_mB9KAS49Wk+%L<(<#G(80H3o(NqwFIu<{-}ArG$G@a-JvBqkn7c@poknm^ul zh|^0`c&E46gb5D*LOq$i?y=!CcWdtr5`0~ceYX*|UYV6-{(`(sx;n>hDG_w5|nf=TDVJE-oET;kZ~eax6ro_;gnbru&+!trST=v zg3d&E>>+^Ct1N2JLcKAdLenG`oXNArw86LnC=?3!W?pd^2Cu0Xz?-RAcT9PnUOKJN z3t2$gFMj(T!LBtq`SzS4^dUUyps!xltxnY86>+tCpVHk#`vPNGG#F!LM6&e_#TIv^MSuB`gFj~h0 zoZS%cs6nqP6Na|0tOCpXU?sEjh)sUhptY^t%HLMfcQ5S=$|3m?&8Yf#}l> z&wJT6EmiCWS&;S{$WYjhZcFJIye$x~mgjV{p~(nraxzDWi{UXH9WgnyUM&<`Uw{H0 zOyk8O=T>YrA^JHbrjv7dC*DdU#l;^VM7kE*7)!z=&i!3+3kE~F`SNfZ5BcDX zCcWDYhlS68EVpIged$!47jaG|HWY1+)Oog~dK7CBx?1H5XyGeA(pKa5@Rb)%ggJze zo|_?i0>aRn@R1WG=!8Z0kcT_i)IQ+?Qpre1=CDvzG)q=@16=bXhM*%q#_eOc)6eB}jl2i!M z--i;_i0&@fGM|XU>SkAJ!4&Y9=dUvxQ8J_)ySz$(sdHl%#4yB*2@0oD>D5)XNGTV# zj~3+1kV0+a;#bO^=R<5P%B`7%nP3OgArkk*31K*?opA%%XMrAdo$%&0CZo zt??GRF2UdUL}pSZqEO80xUO`RPyT)u_?g$cDy2c{z2YkTwaHBAce*2XA1a@K*Ef~d za~>cOKwltwHJRLLzfZh1(xTk-4Dkhn8{U1>7Nyfzp^Uvit~gXDFm#1>=S9+xwI?j) z3ElbbGl8nwf!&lTLg0diz6jo>*6`NasS9qcpN@Yedry)VNbkfTZrDl*(J*w;7vIoR zeV|rgt$8gb#A6y#s%>yG%kroLA8uGNZUmDEIZ$F9-gmQ$4<9l$8+tPgO(NOfUJ725 z+@hAqA*&UIj49nASdy0=MQ16cuRSB%yZZ*)PPU`UPMXriZBwvts;|zdlSr;{Q4qmf zdh=cF6uqu89{i(vF+MaYvWr&|%&?xeZxQa{=h*(9=YZIU>FR(ul1r4pcv%N8ifZNi zS^VP3kA^wJ5ppGHCy09!VWHCL6^on6B%CS2t^V5q!ajhY&oQhACs4H(_FD(Ve70|a zUTDr7&sjGD3BG1g6P=jX@N2S2&d1S3mRo-j5;8(tsEdHb32q$CdM zq}(Q_D#tmg8si>;_8eU)<8|gWzRda%#FnpY1xqfr?XU3luap6OoU0lwg)SLs&lDlY zb@ZY$NnE$B(g-b+wr%XK5ussAR}t| z*Z>^<@Otucu#jBUvF-vwz;Xa9Mio(CR&z|5Yu6|WJ;1GoyVVb6DFMPb%=)BCj{f-vqq z?z_3G23vjce=#nXw+sKW`-JBYTTbJ85U)j?5>P(@Z6y6=I-sC~u!ZoL=YJ|wZHl?d zHR6afePf~aCK9O@^qhIY-PaBgz58n1i~%v{O2(ge?2-4>^Aowe4L@R|t}5a?OXe?l z#Mxaz8(KdtKEj1N=piT1LG^V7MVbn59pX!sY89;~S~)ZPqrttU*pe++$;F&G$^BB7 zSE;YJnmj8hTYJ>@X|iompXCBCtfX|2y&p8&dt~>F(%>N_U%CbNmlG5*{j8O1<*?b|P@s<)H7xyOOgZ?}2fYrU3d+mGI^l!E(Y)gFZ zGm)WDh98~ywCDZ)xSsFcKsT>$io&50PX)h1;O%I*vzWV5)U>dm#bQ;Y8S8L=U8lo0xjh*bN_NRw% zGY_teIsUD0UUw~CN~}92?liP$HjYfzX`iLk*l)6~xQVq9|Lu76{brmaDpdnoWih2G z>jdz`!u!XbUO0JM$gn*#o@W9j7HF<+AKy&{N2JYQwqLsu6B6Nmt{}4)?(; z;QcBV4kbCP;7X?`WIJE^D&OwUFPA98a-H?i)-%EXcCf0_(_4@}3MeW~tzfX>+$BxP zybnAQ@8Smy9H!^f^(^@T@Z_Cal`8$c!NS=O@7Vai5lg?p9n}YP#OQt~Z^nFczL^2x zzjvudU~>M+`oaW@3(=);Pre!_eXg6cU8ui^wwBFp%5JbefMkOsO=G&vuk9H>KkoPa zZu7ay1cr(}E$#e^rQ7Qkob(P{zW$RMUO1;iM!S0C+6&t7*MPGF07?OuPpEN=?|y<0 z$wo@MRI7j>5Ln7V(Rz5**m=mU#zi{m%jG`kNe0%8l4 zDTZJ=1It{RFJt5xgc>GR+ioh&#q}aCRT?%ImBv=y$BJkK%?F z#Dj;w^ql$c^+W52Q%D;X*TZN#r!LZZypj&~pbIHc!B7N>F3qO|*XOHw{7zMH8#_K9 zTw3G?G21(wqf_&Oc3CWJsW|>qB)l~SQ9t=RWX=IN-DO`*dnL=W&RubKX8zd!^!NGQ zRHp1~dU~UWZ3+HH*UP$#*95sLsaWZ*;wenG@zHEnr@*3V9Tt`kE_-T@RP*y^1_hHD zEdUup@SXi0y6D6um&5M7MMi(MDzQ)3ve}09xzBD&ykCP+5`oD0ZmB&_S(ilVVvB(} zw!wMU7aliY-hEEX7@85FO>`7Bnc5JPKisu9Q1><8PLjGb`!`AAO486AZAoYcQ>+zX zdekLd!KdtAN`1*CbhY%wM0DM7h*XT1+X3Se{S6svw#b-4O52u0fQ&Lxrhzr&cknx9 zqR17x;s<&+JDtlpU>w`>DIMUPTg~7|Q_>7JXt|&QP;V~mJL?NY8FYM-5xDEzaR*8= z2^_1&AFMviF}#XLezsiA)vg?I*?P2%S;ti@mY_{shaFqp`aOs7SOcV4N%*yAGH=dS zkbq9Af#S)U(Z|tBA>83fz9S^W*@UfP8NBg?H5I#!TbEr0)>6JpN#hr3dk<-X_@o>U zRCjeX=sQb@%|AR{s5&2MW8*wV3B`H#KuR=bTnPH$iv;yz+McGm(?SNQ;9UR1oYHxl z`g?Z?&6sL&T|@^G%GEb8OfCqD6XfaJD?^JJ9x=dM6$%^L7ENXfo){L3jpJyev7Li= z#hl>sQ3A$D$R&X9=1qZ-8`dh8aB*;D@UF=?umYTfEjAl#v8aEw2KW4?v}jvKxKCx( zv!-&<-Z9jtG~B!n;ihbc%$fQ6==m(~i0t);@%CT(j&@4aaI0F~#;P9cV-vf%w z(V43D(A8jOwB!j3TUpsvIeL-8EGfRuI&|`Q4tD=+GsRWq45@)g$GLD$ zPAL8T^`gOIvg416QwKr1H^E^e%4V=&-Zu8@`ROl@_JN!*uMhX;?dlgf1H6#BMA(!e zjy%V3hjNi`1Y$x9&!(90&nshAmfrL*i6-l7dljCjMtE4q=cSO`jIr^g7u`)@ow*#2 zJO&7?Mi`%XqG5Z4UU|2mgCD@<_Jyz!0*YKIRwYSF?f53Z&%45rmJ4RMDsc>HfvhK2 zBcawOzG+JL+=)n3sPXA3+RzA1K7pw0U>JLi*7&X5@0KHdAFX>8lbox7aRUUX_{#=% z^-{lZ$@yIw0B8rqw#EWdUx>&bRQw?YE@OJ+ zRDeS=8@h6aS3;79eojB5U4?s~)!baQ-7}9+^0{0@SD_&M2MSDR=Xko93n!h`6V!Z=c!3fJeB}waK8ZS}>xr%d6!@B11X$-qr-Si}!Tu8NVPmid ztn$U8wGUh5?z{o4N7}yBfPoBEHS8JXe7DyNmaE#5b?9<4YytW1Y-mGupEp<#o;cR^2OlbK(*dfkYUlv9d${e2@d_j8V>xXFiPvR1X}VHI9Z zUjxu6PjGpVFuKdHc>(2Z+sz5EW4XUH$VMXlL5a`F>%xNrA zHc+zyoXn{ccs0JUAC#VK_qg99?Mt$^itMv78Cg7Al3p>k&T*>0g-L!1-X2!P7RDOY z;tH;^=w6M>hDj_$(G8L;pjz{VJlJT<_fnq8xYC%4%D9*@8lHV`jz;wBZtD<(h%ih@ z=53AH9t7c4cayO3?c)YdU30{^u*7?d&-6zzblQXovhrusE|RK5_)%BK^LdBI@1jbG zM^fu+&~Cgg46hwCj4i{*6XG_Yd@IL5h-IQtQ*Ycv}t$u0|xkuCE~-s9gy;q9?Y$pVVx%c1;Jv( z>GyI@AHYR3>8ih{yYnwav2BuMp4ITM_0IWGU_wcF#+oCKPKv<7(U_6oMAm=};*GX$ z^*I(Hw&47)Rhy#+_Q|+?zua_FCh!X6o{WDv#AQGz)88QYq4fA0!9&UcIz$L~i*O6& z=vuRdqI`{vR0nZaFkjJL-o@+#NN~o@?&8H*r+e_JJse@8XjW7{bRKi7FAk(%qI8CK zyN9c}ek4922RsC%+ks2(H)oidED+iHF`A2j4HOXOabgu3+@mm-XZ|f>3~e^rH5M{@ zrxGH#3BVPFH8}pxt^&#u$+VEXlCm-&#ITe&uN+J48f7GQMb&Hv?`IY8ATFJDxX+oipRqGTK7@D0XH(u8>1Y-$AEy%R#m z+aBoR#Z9h+pT--(jA9HK8@Y-%n1^8KcJqT<1!#R*cW~#0$WN7dC9_3G16Az^fuRT# z?dqjRbG&Ittm4Xm2Qt*K5PQ+p)FX||L?%^fb}`L_xTx5(P~`y6h)n|m|#A>)MgOVf^=4a$Ss$g{vL8}F_!vwl4di>gJRsr(z_oCloiYYh9#aiO60qzx$7!3d_ZP; zdO8_8CzDfSuXLLLgJROlp|;t5pJ{Q3*x;zj+V}gw`LjKn9;FR(cVHXF*{`%qHRCK; zYXpC!Bl(G{SO0QYy=PYkum4<^?&}&d9XCRl5_j_&UvYM%h0sLB8}a7G2}eA^m)-69 zhUR)y{%$p#7~Zu~0?yHMfG!F4M#zR*lb|jyl;iLfDQ-w5CZCo(cS#Vrgw?K++>~?6 z-N?eriVJx;Pas9WBw2DR#ADeJp6)M1 zSbECWd*ZOheGW7WQ4LE7oXBn`fjsmoC1ptY*LDTBXZJn7y;9cZP+H^Q>=2hodlQGA zxg3$ly5E(d^NFFdL5k~rqAyQn&XYTKwNvhVR%;)=%Q1DTcm8&09eJ8?R13;Ej8!4u zPYn3`>`+sZECG}4pftLbClGf)4a+%1aLKFhDpQw z_}2`a0+~ACOP?6V+SbLP635!V;!b0tDK`-H(KF{64wne({iP9bofT8l-_<@zYchl+ zSyk207IW$7ryWtU(L6gL*YHy_X{UsPs$OMfUT(9VO=H?Du1vm)aKm0^)$S2xLiuU(mxkVSg?GVBEZ{TP->wzu>f@r0y z)-$h7gq|sf;v<%jBnW|y<{Z8i`28wzGJVEJsn$O`S578l_U7xZMhY(1RqlHkwWOL2 zST*e&m#f7Wf#DYUy6V)Yg(zyMr6iRqBn`XLiX6(v)||jT5bnmkN&^&q4IkA-5lMkR zEgqx8juN!JJeUU1k6>>9(0Ao>&?N&_mkSGEW@*uh9A~hs{=%Bw!$~VZ;@degjzDle z%@(o7yfs~o;~c{T-tgcPnK$wl`5@~kfMI%uwrUd)D^O05Thlh~OX{(kVe{bnao9@! zqmzN7neRO^%>dV4X&gXqI5>lE|Me&ZaZCtL$(1Zl;L%)1MfS7$05OoQgarO)sCS8f zUafc4kG?~u>fsqBhTbN+cwhN-M5kh~8j6f+ft%Cg~4@U-|?ZJ=}4 zds(lZPmD9P%s?Vj6A`;;9v~IqWTi&VYt=wE<6*DBk-7sBDO3?=1ds)QT`Z*;SIsy? z9GNfT%vKyuIyf{^9~6Y9;av|ljY%}t+gt&zlI=ER+j`wGmUc2eDOoT`srrM1we?qHp7s3Edz8%f&{QNCll9^G1WStSo~EZ zhRi)0Qa{e1Abc?AK@}h-{%ju~81D(r%n1?kGfBUBy~C{7r6+EW&7vJAHNwTmfn@l zR3K#OF|~$|giH=`k-1>dT?*g zQLo^QHo{R+d}8Wrdz7P%7>t6_l;v56>^IRFNbhD(OK2Vf&ZX=jWuMG$<~37GwYuI^ zd!)xqzA~<^mMyrO_?LQ2AaYwRgP9@nr zT_{c3EcE7PHiyTk98S@dyZ)(%d?8FSCHwm~V1(x{&zYAL&2#!bN*%4xN%EOI1Zsj9 zohhJ))*Vd`OlI))o5weKY2N`?O~A->B$E0mV<%FSk5Om6&|;6f0XJvqAu?-6Xs6L& zw|y7F2Wum1N%-()^3gl!#-0Xp_B3u6y%Es-Atw#JiWVsY6K*?(mWV?siSIV4Hj zC{biFwqt@uIGEM4)CD^fjz={5W}{D!BKuOaCZ1PG3r{c5@ITPuByyS-St05qJo0}d zl$ph!PEW#QaLxQOI(dJ^kQ6$%rZBXY%r&6N^PEJy7{&qx(EwK^sF^kBB;Q1K$N5LV8hw(lU0~dlH22Ti& z$d8eW!W?7YNR(sWNRmcGyCy#*ghR;)#=IgikLeMNZFGo6H{ow=w|h&&AM<7p{A|?! a03jIsq5l9AA*g-~@9twCwR;f7xBuDQJ7seK literal 0 HcmV?d00001 diff --git a/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart new file mode 100644 index 00000000..6023d581 --- /dev/null +++ b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart @@ -0,0 +1,293 @@ +// ignore_for_file: avoid_redundant_argument_values, avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_anthropic/langchain_anthropic.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:test/test.dart'; + +void main() { + group('ChatAnthropic tests', () { + const defaultModel = 'claude-3-5-sonnet-20240620'; + + late ChatAnthropic chatModel; + + setUp(() async { + chatModel = ChatAnthropic( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + defaultOptions: const ChatAnthropicOptions( + model: defaultModel, + ), + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test Text-only input with different models', () async { + final models = [ + 'claude-3-5-sonnet-20240620', + 'claude-3-haiku-20240307', + 'claude-3-opus-20240229', + 'claude-3-sonnet-20240229', + ]; + for (final model in models) { + print('Testing model: $model'); + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + options: ChatAnthropicOptions( + model: model, + temperature: 0, + ), + ); + expect(res.id, isNotEmpty); + expect(res.finishReason, isNot(FinishReason.unspecified)); + expect(res.metadata['model'], contains(model.toLowerCase())); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + await Future.delayed(const Duration(seconds: 5)); + } + }); + + test('Text-and-image input', () async { + final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./test/chat_models/assets/apple.jpeg') + .readAsBytes(), + ), + ), + ]), + ), + ]), + ); + + expect(res.output.content.toLowerCase(), contains('apple')); + }); + + test('Test stop sequence', () async { + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + options: const ChatAnthropicOptions( + model: defaultModel, + stopSequences: ['4'], + ), + ); + final text = res.output.content; + expect(text, contains('123')); + expect(text, isNot(contains('456789'))); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test max tokens', () async { + final res = await chatModel.invoke( + PromptValue.string('Tell me a joke'), + options: const ChatAnthropicOptions( + model: defaultModel, + maxTokens: 10, + ), + ); + expect(res.output.content.length, lessThan(50)); + expect(res.finishReason, FinishReason.length); + }); + + test('Test Multi-turn conversations', () async { + final prompt = PromptValue.chat([ + ChatMessage.humanText( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + ChatMessage.ai('123456789'), + ChatMessage.humanText( + 'Remove the number 4 from the list', + ), + ]); + final res = await chatModel.invoke( + prompt, + options: const ChatAnthropicOptions( + model: defaultModel, + temperature: 0, + ), + ); + expect( + res.output.content, + contains('12356789'), + ); + }); + + test('Test streaming', () async { + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 100 in order ' + 'without any spaces, commas or additional explanations.', + ), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content; + count++; + } + expect(count, greaterThan(1)); + expect(content, contains('123456789')); + }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + final model = chatModel.bind( + const ChatAnthropicOptions( + model: defaultModel, + tools: [tool], + ), + ); + + final humanMessage = ChatMessage.humanText( + "What's the weather like in Boston and Madrid right now in celsius?", + ); + final res1 = await model.invoke(PromptValue.chat([humanMessage])); + + final aiMessage1 = res1.output; + expect(aiMessage1.toolCalls, hasLength(2)); + + final toolCall1 = aiMessage1.toolCalls.first; + expect(toolCall1.name, tool.name); + expect(toolCall1.arguments.containsKey('location'), isTrue); + expect(toolCall1.arguments['location'], contains('Boston')); + expect(toolCall1.arguments['unit'], 'celsius'); + + final toolCall2 = aiMessage1.toolCalls.last; + expect(toolCall2.name, tool.name); + expect(toolCall2.arguments.containsKey('location'), isTrue); + expect(toolCall2.arguments['location'], contains('Madrid')); + expect(toolCall2.arguments['unit'], 'celsius'); + + final functionResult1 = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage1 = ChatMessage.tool( + toolCallId: toolCall1.id, + content: json.encode(functionResult1), + ); + + final functionResult2 = { + 'temperature': '25', + 'unit': 'celsius', + 'description': 'Cloudy', + }; + final functionMessage2 = ChatMessage.tool( + toolCallId: toolCall2.id, + content: json.encode(functionResult2), + ); + + final res2 = await model.invoke( + PromptValue.chat([ + humanMessage, + aiMessage1, + functionMessage1, + functionMessage2, + ]), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.content, contains('25')); + }); + + test('Test streaming with tools', + timeout: const Timeout(Duration(minutes: 5)), () async { + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = chatModel.bind( + ChatAnthropicOptions( + model: defaultModel, + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final jsonOutputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(jsonOutputParser); + + final stream = chain.stream({'foo': 'bears'}); + + List lastResult = []; + int count = 0; + await for (final res in stream) { + print(res); + lastResult = res; + count++; + } + + expect(count, greaterThan(1)); + expect(lastResult, hasLength(1)); + final toolCall = lastResult.first; + expect(toolCall.arguments['setup'], isNotEmpty); + expect(toolCall.arguments['punchline'], isNotEmpty); + }); + }); +} diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index b0ec2aa9..fa0bc0fc 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -44,7 +44,7 @@ class ChatResult extends LanguageModelResult { final LanguageModelResult other, ) { return ChatResult( - id: other.id, + id: other.id.isNotEmpty ? other.id : id, output: output.concat(other.output), finishReason: finishReason != FinishReason.unspecified && other.finishReason == FinishReason.unspecified From 25319666fda7e186b5f390cd26363d1a7c6d8ff9 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Tue, 2 Jul 2024 22:26:09 +0200 Subject: [PATCH 176/251] docs: Update README.md --- packages/langchain/README.md | 96 ++++++++++++++----- .../lib/src/chat_models/chat_ollama.dart | 2 +- 2 files changed, 71 insertions(+), 27 deletions(-) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 83608f5a..652ef1be 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -38,13 +38,13 @@ LangChain.dart aims to fill this gap by abstracting the intricacies of working w LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: -### [`langchain_core`](https://pub.dev/packages/langchain_core) +### [`langchain_core`](https://pub.dev/packages/langchain_core) [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. > Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. -### [`langchain`](https://pub.dev/packages/langchain) +### [`langchain`](https://pub.dev/packages/langchain) [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. @@ -52,7 +52,7 @@ Contains higher-level and use-case specific chains, agents, and retrieval algori > > This package exposes `langchain_core` so you don't need to depend on it explicitly. -### [`langchain_community`](https://pub.dev/packages/langchain_community) +### [`langchain_community`](https://pub.dev/packages/langchain_community) [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. @@ -64,40 +64,28 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack > Depend on an integration-specific package if you want to use the specific integration. -

    - -

    - | Package | Version | Description | |---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | -Functionality provided by each integration package: +

    + +

    -| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | -|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| -| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | -| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | +### API clients packages The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: +> Depend on an API client package if you just want to consume the API of a specific provider directly without using LangChain.dart abstractions. + | Package | Version | Description | |-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| | [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | @@ -109,6 +97,62 @@ The following packages are maintained (and used internally) by LangChain.dart, a | [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | | [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | +## Integrations + +The following integrations are available in LangChain.dart: + +### Chat Models + +| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | +|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | +| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | +| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | +| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | | [Ollama Chat API](https://ollama.ai) | +| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | +| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | + +### LLMs + +_Note: Prefer using Chat Models over LLMs as many providers have deprecated them._ + +| LLM | Package | Streaming | Description | +|-------------------------------------------------------------------------------------------------|---------------------------------------------------------------|-----------|--------------------------------------------------------------------------------------| +| [Ollama](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | [Ollama Completions API](https://ollama.ai) | +| [OpenAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions) | +| [VertexAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | [GCP Vertex AI Text API](https://cloud.google.com/vertex-ai) | + +### Embedding Models + +| Embedding model | Package | Description | +|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------| +| [GoogleGenerativeAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/google_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Google AI Embeddings API](https://ai.google.dev) | +| [MistralAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [Mistral Embeddings API](https://docs.mistral.ai) | +| [OllamaEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [Ollama Embeddings API](https://ollama.ai) | +| [OpenAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) | +| [VertexAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [GCP Vertex AI Embeddings API](https://cloud.google.com/vertex-ai) | + +### Vector Stores + +| Vector store | Package | Description | +|--------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| +| [Chroma](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/chroma) | [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [Chroma](https://trychroma.com/) integration | +| [MemoryVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/memory) | [langchain](https://pub.dev/packages/langchain) | In-memory vector store for prototype and testing | +| [ObjectBoxVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox) | [langchain_community](https://pub.dev/packages/langchain_community) | [ObjectBox](https://objectbox.io/) integration | +| [Pinecone](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/pinecone) | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [Pinecone](https://pinecone.io/) integration | +| [Supabase](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [Supabase Vector](https://supabase.com/vector) integration | +| [VertexAIMatchingEngine](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview) (former Matching Engine) integration | + +### Tools + +| Tool | Package | Description | +|-----------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +| [CalculatorTool](https://langchaindart.dev/#/modules/agents/tools/calculator) | [langchain_community](https://pub.dev/packages/langchain_community) | To calculate math expressions | +| [OpenAIDallETool](https://langchaindart.dev/#/modules/agents/tools/openai_dall_e) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI's DALL-E Image Generator](https://platform.openai.com/docs/api-reference/images) | +| TavilyAnswerTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns an answer for a query using the [Tavily](https://tavily.com) search engine | +| TavilySearchResultsTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns a list of results for a query using the [Tavily](https://tavily.com) search engine | + ## Getting started To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart index 2e8fe5f6..64a5cdae 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart @@ -9,7 +9,7 @@ import '../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; -/// Wrapper around [Ollama](https://ollama.ai) Completions API that enables +/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, From fe969ddac49b74c2c2bf1d1afd0420581db9b82c Mon Sep 17 00:00:00 2001 From: David Miguel Date: Tue, 2 Jul 2024 22:35:17 +0200 Subject: [PATCH 177/251] chore(release): publish packages - langchain@0.7.3 - langchain_core@0.3.3 - langchain_community@0.2.2 - langchain_anthropic@0.1.0 - langchain_chroma@0.2.1 - langchain_firebase@0.2.0 - langchain_google@0.6.0 - langchain_mistralai@0.2.1 - langchain_ollama@0.2.2+1 - langchain_openai@0.6.3 - langchain_pinecone@0.1.0+6 - langchain_supabase@0.1.1 - anthropic_sdk_dart@0.1.0 - googleai_dart@0.1.0+2 - mistralai_dart@0.0.3+3 - ollama_dart@0.1.2 - openai_dart@0.3.3+1 - tavily_dart@0.1.0 --- CHANGELOG.md | 127 ++++++++++++++++++ examples/browser_summarizer/pubspec.lock | 12 +- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.lock | 26 ++-- examples/docs_examples/pubspec.yaml | 14 +- examples/hello_world_backend/pubspec.lock | 8 +- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.lock | 8 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.lock | 16 +-- examples/hello_world_flutter/pubspec.yaml | 8 +- examples/wikivoyage_eu/pubspec.lock | 12 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- packages/anthropic_sdk_dart/CHANGELOG.md | 7 + packages/anthropic_sdk_dart/pubspec.yaml | 2 +- packages/googleai_dart/CHANGELOG.md | 4 + packages/googleai_dart/pubspec.yaml | 2 +- packages/langchain/CHANGELOG.md | 11 +- packages/langchain/pubspec.yaml | 4 +- packages/langchain_anthropic/CHANGELOG.md | 4 + packages/langchain_anthropic/pubspec.yaml | 6 +- packages/langchain_chroma/CHANGELOG.md | 4 + packages/langchain_chroma/pubspec.yaml | 10 +- packages/langchain_community/CHANGELOG.md | 4 + packages/langchain_community/pubspec.yaml | 8 +- packages/langchain_core/CHANGELOG.md | 5 + packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 11 ++ .../langchain_firebase/example/pubspec.lock | 6 +- .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 9 ++ packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 4 + packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 4 + packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 4 + packages/langchain_openai/pubspec.yaml | 10 +- packages/langchain_pinecone/CHANGELOG.md | 4 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 4 + packages/langchain_supabase/pubspec.yaml | 10 +- packages/mistralai_dart/CHANGELOG.md | 4 + packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/CHANGELOG.md | 5 + packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 4 + packages/openai_dart/pubspec.yaml | 2 +- packages/tavily_dart/CHANGELOG.md | 4 + packages/tavily_dart/pubspec.yaml | 2 +- 52 files changed, 332 insertions(+), 113 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1699211..aead8730 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,133 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-07-02 + +### Changes + +--- + +New packages: + +- [`langchain_anthropic` - `v0.1.0`](#langchain_anthropic---v010) +- [`tavily_dart` - `v0.1.0`](#tavily_dart---v010) + +Packages with breaking changes: + +- [`langchain_firebase` - `v0.2.0`](#langchain_firebase---v020) +- [`langchain_google` - `v0.6.0`](#langchain_google---v060) + +Packages with other changes: + +- [`langchain` - `v0.7.3`](#langchain---v073) +- [`langchain_core` - `v0.3.3`](#langchain_core---v033) +- [`langchain_community` - `v0.2.2`](#langchain_community---v022) +- [`langchain_chroma` - `v0.2.1`](#langchain_chroma---v021) +- [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) +- [`langchain_ollama` - `v0.2.2+1`](#langchain_ollama---v0221) +- [`langchain_openai` - `v0.6.3`](#langchain_openai---v063) +- [`langchain_pinecone` - `v0.1.0+6`](#langchain_pinecone---v0106) +- [`langchain_supabase` - `v0.1.1`](#langchain_supabase---v011) +- [`anthropic_sdk_dart` - `v0.1.0`](#anthropic_sdk_dart---v010) +- [`googleai_dart` - `v0.1.0+2`](#googleai_dart---v0102) +- [`mistralai_dart` - `v0.0.3+3`](#mistralai_dart---v0033) +- [`ollama_dart` - `v0.1.2`](#ollama_dart---v012) +- [`openai_dart` - `v0.3.3+1`](#openai_dart---v0331) + +--- + +#### `langchain` - `v0.7.3` + +> Note: Anthropic integration (`ChatAnthropic`) is available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) +- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +#### `langchain_core` - `v0.3.3` + +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +#### `langchain_community` - `v0.2.2` + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) + +#### `langchain_anthropic` - `v0.1.0` + +- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +#### `langchain_firebase` - `v0.2.0` + +> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. + +- **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) +- **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) +- **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) +- **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) + +#### `langchain_google` - `v0.6.0` + +> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. + +- **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `langchain_openai` - `v0.6.3` + +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + +#### `langchain_ollama` - `v0.2.2+1` + +- **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +#### `langchain_chroma` - `v0.2.1` + +- Update a dependency to the latest release. + +#### `langchain_mistralai` - `v0.2.1` + +- Update a dependency to the latest release. + +#### `langchain_pinecone` - `v0.1.0+6` + +- Update a dependency to the latest release. + +#### `langchain_supabase` - `v0.1.1` + +- Update a dependency to the latest release. + +#### `anthropic_sdk_dart` - `v0.1.0` + +- **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) +- **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) +- **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `ollama_dart` - `v0.1.2` + +- **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `tavily_dart` - `v0.1.0` + +- **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) + +#### `googleai_dart` - `v0.1.0+2` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `mistralai_dart` - `v0.0.3+3` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `openai_dart` - `v0.3.3+1` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 2024-06-01 ### Changes diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index c32f085f..621fd8c2 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: @@ -497,7 +497,7 @@ packages: path: "../../packages/tavily_dart" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" term_glyph: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 0e729f8d..3868b314 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.2 - langchain_community: 0.2.1+1 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_community: 0.2.2 + langchain_openai: ^0.6.3 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 47e6b5b7..806b30f6 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -15,7 +15,7 @@ packages: path: "../../packages/anthropic_sdk_dart" relative: true source: path - version: "0.0.1" + version: "0.1.0" args: dependency: transitive description: @@ -245,42 +245,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.0+5" + version: "0.2.1" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.1" + version: "0.6.0" langchain_mistralai: dependency: "direct main" description: @@ -294,14 +294,14 @@ packages: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2" + version: "0.2.2+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -340,7 +340,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+2" + version: "0.0.3+3" objectbox: dependency: transitive description: @@ -355,14 +355,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.1" + version: "0.1.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: @@ -433,7 +433,7 @@ packages: path: "../../packages/tavily_dart" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" term_glyph: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 2f6b0f37..4888329b 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_anthropic: ^0.0.1-dev.1 - langchain_chroma: ^0.2.0+5 - langchain_community: 0.2.1+1 - langchain_google: ^0.5.1 + langchain: ^0.7.3 + langchain_anthropic: ^0.1.0 + langchain_chroma: ^0.2.1 + langchain_community: 0.2.2 + langchain_google: ^0.6.0 langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2 - langchain_openai: ^0.6.2 + langchain_ollama: ^0.2.2+1 + langchain_openai: ^0.6.3 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index dc3ac458..1acce35a 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 665ba178..c091ef7c 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_openai: ^0.6.3 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 8fc27717..45f69561 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 28872dc6..0e070b1d 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_openai: ^0.6.3 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 9bbfa0f2..9802cb30 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -204,21 +204,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.1" + version: "0.6.0" langchain_mistralai: dependency: "direct main" description: @@ -232,14 +232,14 @@ packages: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2" + version: "0.2.2+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -278,7 +278,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+2" + version: "0.0.3+3" nested: dependency: transitive description: @@ -293,14 +293,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.1" + version: "0.1.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 4f9f4c56..786c1edd 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 - langchain: ^0.7.2 - langchain_google: ^0.5.1 + langchain: ^0.7.3 + langchain_google: ^0.6.0 langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2 - langchain_openai: ^0.6.2 + langchain_ollama: ^0.2.2+1 + langchain_openai: ^0.6.3 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 18f2890b..da33efe5 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2" + version: "0.2.2+1" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.1" + version: "0.1.2" path: dependency: transitive description: @@ -298,7 +298,7 @@ packages: path: "../../packages/tavily_dart" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" term_glyph: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 198686c0..a591713f 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_ollama: ^0.2.2 - langchain_community: 0.2.1+1 + langchain: ^0.7.3 + langchain_ollama: ^0.2.2+1 + langchain_community: 0.2.2 diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index de958be3..85fb6080 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.1.0 + + - **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) + - **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) + - **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.0.1 - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 650ac782..160596dc 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: anthropic_sdk_dart description: Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). -version: 0.0.1 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 7a6ca6b8..e1d53bc8 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+2 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.1.0+1 - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 2006a059..22370975 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: googleai_dart description: Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -version: 0.1.0+1 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index cc6953da..219c9dc5 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,6 +1,13 @@ +## 0.7.3 + +> Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) +- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + ## 0.7.2 -> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package +> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package. - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) @@ -8,7 +15,7 @@ ## 0.7.1 -> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. +> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is now available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. - **DOCS**: Add docs for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) - **DOCS**: Update ChatOllama docs ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e)) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index a92d1e9c..e53a3859 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.2 +version: 0.7.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 6df81faa..fe3d0a4d 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0 + +- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + ## 0.0.1-dev.1 - Bootstrap project. diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 7a581267..700b6559 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.0.1-dev.1 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -16,10 +16,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - anthropic_sdk_dart: ^0.0.1 + anthropic_sdk_dart: ^0.1.0 collection: '>=1.17.0 <1.19.0' http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 rxdart: ^0.27.7 diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 8e785534..218a218c 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1 + + - Update a dependency to the latest release. + ## 0.2.0+5 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 3c96bacb..8d7684ba 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.0+5 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.2 - langchain_community: 0.2.1+1 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_community: 0.2.2 + langchain_openai: ^0.6.3 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 5c3aaba2..3e7be761 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.2 + + - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) + ## 0.2.1+1 - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index b386fb9d..4e2c6f7a 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.1+1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,16 +22,16 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 - tavily_dart: ^0.0.1-dev.1 + tavily_dart: ^0.1.0 uuid: ^4.3.3 dev_dependencies: build_runner: ^2.4.9 - langchain_openai: ^0.6.2 + langchain_openai: ^0.6.3 objectbox_generator: ^4.0.0 test: ^1.25.2 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index dd637cd5..7757bae9 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.3 + + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + ## 0.3.2 - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index b682b76a..38081129 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.2 +version: 0.3.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index d5128425..a0eb0aa4 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,14 @@ +## 0.2.0 + +> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. + + - **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) + - **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) + - **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) + - **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) + ## 0.1.0+3 - Update a dependency to the latest release. diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 4b481fb2..03e1dab8 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.1.0+3" + version: "0.2.0" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index ff8593ef..f1618ec8 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.6.22 - langchain: 0.7.2 - langchain_firebase: 0.1.0+3 + langchain: 0.7.3 + langchain_firebase: 0.2.0 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 62232007..2927c037 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 8df04cfd..0cb109c8 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0+3 +version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -23,7 +23,7 @@ dependencies: firebase_auth: ^5.1.0 firebase_core: ^3.1.0 firebase_vertexai: ^0.2.2 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index c2d95eed..9964d000 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,12 @@ +## 0.6.0 + +> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. + + - **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.5.1 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index a2d2670a..eeec07e2 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.5.1 +version: 0.6.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index d5d9ca46..7b74ab46 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1 + + - Update a dependency to the latest release. + ## 0.2.1 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 2eda0275..b8025bfe 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - mistralai_dart: ^0.0.3+2 + mistralai_dart: ^0.0.3+3 dev_dependencies: test: ^1.25.2 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index a72f229e..4d475188 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.2+1 + + - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + ## 0.2.2 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index ae7adb8d..273a1594 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.2 +version: 0.2.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.1 + ollama_dart: ^0.1.2 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 4daab488..c1503886 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.6.3 + + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + ## 0.6.2 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index c0ccb98d..4679404e 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.2 +version: 0.6.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.3 + openai_dart: ^0.3.3+1 uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.2 - langchain_community: 0.2.1+1 + langchain: ^0.7.3 + langchain_community: 0.2.2 test: ^1.25.2 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 6e3c39e3..9faacd04 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+6 + + - Update a dependency to the latest release. + ## 0.1.0+5 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index b943bde0..282943fd 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+5 +version: 0.1.0+6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.2 + langchain_openai: ^0.6.3 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index 00a141c5..bb20d19b 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.1 + + - Update a dependency to the latest release. + ## 0.1.0+5 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index ffb0656d..80ab7e11 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.0+5 +version: 0.1.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 supabase: ^2.0.8 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.2 - langchain_community: 0.2.1+1 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_community: 0.2.2 + langchain_openai: ^0.6.3 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index d1426493..df84e9cc 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.0.3+3 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.0.3+2 - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index a2aad311..970d1403 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: mistralai_dart description: Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.0.3+2 +version: 0.0.3+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 21ceb1cf..5dfee162 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.1.2 + + - **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.1.1 - **FEAT**: Support buffered stream responses in ollama_dart ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 81f9fd49..954eb772 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.1 +version: 0.1.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 0a0e4085..70e28286 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.3.3+1 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.3.3 - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index f617c8f0..4c449cc2 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.3.3 +version: 0.3.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md index 90f8e244..897f0558 100644 --- a/packages/tavily_dart/CHANGELOG.md +++ b/packages/tavily_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0 + + - **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) + ## 0.0.1-dev.1 - Bootstrap package. diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml index 216e0b0d..5694d98f 100644 --- a/packages/tavily_dart/pubspec.yaml +++ b/packages/tavily_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: tavily_dart description: Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). -version: 0.0.1-dev.1 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/tavily_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:tavily_dart homepage: https://github.com/davidmigloz/langchain_dart From c1ed7643ba88833111d2a65988bd604b54f85931 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 5 Jul 2024 17:20:07 +0200 Subject: [PATCH 178/251] docs: Update Ollama request options default values in API docs (#479) --- .../lib/src/chat_models/types.dart | 4 +- .../src/generated/schema/request_options.dart | 60 ++++-- .../src/generated/schema/schema.freezed.dart | 183 ++++++++++++------ packages/ollama_dart/oas/ollama-curated.yaml | 61 ++++-- 4 files changed, 200 insertions(+), 108 deletions(-) diff --git a/packages/langchain_ollama/lib/src/chat_models/types.dart b/packages/langchain_ollama/lib/src/chat_models/types.dart index 3f14d2a2..4db56920 100644 --- a/packages/langchain_ollama/lib/src/chat_models/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/types.dart @@ -141,7 +141,7 @@ class ChatOllamaOptions extends ChatModelOptions { final double? mirostatEta; /// Penalize newlines in the output. - /// (Default: false) + /// (Default: true) final bool? penalizeNewline; /// Sequences where the API will stop generating further tokens. The returned @@ -172,7 +172,7 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? lowVram; /// Enable f16 key/value. - /// (Default: false) + /// (Default: true) final bool? f16KV; /// Enable logits all. diff --git a/packages/ollama_dart/lib/src/generated/schema/request_options.dart b/packages/ollama_dart/lib/src/generated/schema/request_options.dart index a83df364..b6288d57 100644 --- a/packages/ollama_dart/lib/src/generated/schema/request_options.dart +++ b/packages/ollama_dart/lib/src/generated/schema/request_options.dart @@ -18,68 +18,84 @@ class RequestOptions with _$RequestOptions { /// Number of tokens to keep from the prompt. @JsonKey(name: 'num_keep', includeIfNull: false) int? numKeep, - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? seed, - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? temperature, - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? repeatPenalty, - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? frequencyPenalty, - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? mirostat, - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? mirostatTau, - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? mirostatEta, - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? penalizeNewline, - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? stop, /// Enable NUMA support. (Default: false) @JsonKey(includeIfNull: false) bool? numa, - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? numCtx, - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? numBatch, - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? numGpu, /// The GPU to use for the main model. Default is 0. @@ -88,7 +104,7 @@ class RequestOptions with _$RequestOptions { /// Enable low VRAM mode. (Default: false) @JsonKey(name: 'low_vram', includeIfNull: false) bool? lowVram, - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? f16Kv, /// Enable logits all. (Default: false) @@ -103,7 +119,9 @@ class RequestOptions with _$RequestOptions { /// Enable mlock. (Default: false) @JsonKey(name: 'use_mlock', includeIfNull: false) bool? useMlock, - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? numThread, }) = _RequestOptions; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 88e82b13..db00fd66 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -567,67 +567,82 @@ mixin _$RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) int? get numKeep => throw _privateConstructorUsedError; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict => throw _privateConstructorUsedError; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK => throw _privateConstructorUsedError; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ => throw _privateConstructorUsedError; - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP => throw _privateConstructorUsedError; - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN => throw _privateConstructorUsedError; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty => throw _privateConstructorUsedError; - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat => throw _privateConstructorUsedError; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau => throw _privateConstructorUsedError; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta => throw _privateConstructorUsedError; - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline => throw _privateConstructorUsedError; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? get stop => throw _privateConstructorUsedError; @@ -635,15 +650,16 @@ mixin _$RequestOptions { @JsonKey(includeIfNull: false) bool? get numa => throw _privateConstructorUsedError; - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx => throw _privateConstructorUsedError; - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch => throw _privateConstructorUsedError; - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu => throw _privateConstructorUsedError; @@ -655,7 +671,7 @@ mixin _$RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) bool? get lowVram => throw _privateConstructorUsedError; - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv => throw _privateConstructorUsedError; @@ -675,7 +691,9 @@ mixin _$RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) bool? get useMlock => throw _privateConstructorUsedError; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread => throw _privateConstructorUsedError; @@ -1144,85 +1162,101 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) final int? numKeep; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @override @JsonKey(includeIfNull: false) final int? seed; - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @override @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @override @JsonKey(name: 'top_k', includeIfNull: false) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @override @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ; - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @override @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP; - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @override @JsonKey(name: 'repeat_last_n', includeIfNull: false) final int? repeatLastN; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @override @JsonKey(includeIfNull: false) final double? temperature; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @override @JsonKey(name: 'repeat_penalty', includeIfNull: false) final double? repeatPenalty; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @override @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) final double? frequencyPenalty; - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @override @JsonKey(includeIfNull: false) final int? mirostat; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @override @JsonKey(name: 'mirostat_tau', includeIfNull: false) final double? mirostatTau; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @override @JsonKey(name: 'mirostat_eta', includeIfNull: false) final double? mirostatEta; - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @override @JsonKey(name: 'penalize_newline', includeIfNull: false) final bool? penalizeNewline; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. final List? _stop; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @override @JsonKey(includeIfNull: false) List? get stop { @@ -1238,17 +1272,18 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(includeIfNull: false) final bool? numa; - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @override @JsonKey(name: 'num_ctx', includeIfNull: false) final int? numCtx; - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @override @JsonKey(name: 'num_batch', includeIfNull: false) final int? numBatch; - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @override @JsonKey(name: 'num_gpu', includeIfNull: false) final int? numGpu; @@ -1263,7 +1298,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) final bool? lowVram; - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @override @JsonKey(name: 'f16_kv', includeIfNull: false) final bool? f16Kv; @@ -1288,7 +1323,9 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) final bool? useMlock; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @override @JsonKey(name: 'num_thread', includeIfNull: false) final int? numThread; @@ -1451,82 +1488,97 @@ abstract class _RequestOptions extends RequestOptions { int? get numKeep; @override - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed; @override - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict; @override - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK; @override - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; @override - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ; @override - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP; @override - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN; @override - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature; @override - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty; @override - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; @override - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; @override - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat; @override - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau; @override - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta; @override - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline; @override - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? get stop; @override @@ -1536,17 +1588,18 @@ abstract class _RequestOptions extends RequestOptions { bool? get numa; @override - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx; @override - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch; @override - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu; @override @@ -1561,7 +1614,7 @@ abstract class _RequestOptions extends RequestOptions { bool? get lowVram; @override - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv; @override @@ -1586,7 +1639,9 @@ abstract class _RequestOptions extends RequestOptions { bool? get useMlock; @override - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread; @override diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 7ade34a7..12159978 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -326,90 +326,106 @@ components: type: integer nullable: true description: | - Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + Sets the random number seed to use for generation. Setting this to a specific number will make the model + generate the same text for the same prompt. (Default: 0) num_predict: type: integer nullable: true description: | - Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + Maximum number of tokens to predict when generating text. + (Default: 128, -1 = infinite generation, -2 = fill context) top_k: type: integer nullable: true description: | - Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: type: number format: float nullable: true description: | - Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) tfs_z: type: number format: float nullable: true description: | - Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) typical_p: type: number format: float nullable: true description: | - Typical p is used to reduce the impact of less probable tokens from the output. + Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) repeat_last_n: type: integer nullable: true description: | - Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + Sets how far back for the model to look back to prevent repetition. + (Default: 64, 0 = disabled, -1 = num_ctx) temperature: type: number format: float nullable: true description: | - The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + The temperature of the model. Increasing the temperature will make the model answer more creatively. + (Default: 0.8) repeat_penalty: type: number format: float nullable: true description: | - Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) presence_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + Positive values penalize new tokens based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. (Default: 0) frequency_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. (Default: 0) mirostat: type: integer nullable: true description: | - Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + Enable Mirostat sampling for controlling perplexity. + (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) mirostat_tau: type: number format: float nullable: true description: | - Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + Controls the balance between coherence and diversity of the output. A lower value will result in more + focused and coherent text. (Default: 5.0) mirostat_eta: type: number format: float nullable: true description: | - Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + (Default: 0.1) penalize_newline: type: boolean nullable: true description: | - Penalize newlines in the output. (Default: false) + Penalize newlines in the output. (Default: true) stop: type: array nullable: true - description: Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + description: | + Sequences where the API will stop generating further tokens. The returned text will not contain the stop + sequence. items: type: string numa: @@ -421,17 +437,18 @@ components: type: integer nullable: true description: | - Sets the size of the context window used to generate the next token. + Sets the size of the context window used to generate the next token. (Default: 2048) num_batch: type: integer nullable: true description: | - Sets the number of batches to use for generation. (Default: 1) + Sets the number of batches to use for generation. (Default: 512) num_gpu: type: integer nullable: true description: | - The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + The number of layers to send to the GPU(s). + On macOS it defaults to 1 to enable metal support, 0 to disable. main_gpu: type: integer nullable: true @@ -446,7 +463,7 @@ components: type: boolean nullable: true description: | - Enable f16 key/value. (Default: false) + Enable f16 key/value. (Default: true) logits_all: type: boolean nullable: true @@ -471,7 +488,9 @@ components: type: integer nullable: true description: | - Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + performance. It is recommended to set this value to the number of physical CPU cores your system has + (as opposed to the logical number of cores). ResponseFormat: type: string description: | From f9db5063da816d5e5a47816316ba028c83a82631 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 10 Jul 2024 08:08:56 +0200 Subject: [PATCH 179/251] refactor: Depend on exact versions for internal 1st party dependencies (#484) --- packages/langchain/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_community/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_google/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 2 +- packages/langchain_ollama/pubspec.yaml | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- packages/langchain_pinecone/pubspec.yaml | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index e53a3859..1dfd6338 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 700b6559..9444cd96 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: '>=1.17.0 <1.19.0' http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 rxdart: ^0.27.7 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 8d7684ba..395c1ca6 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 4e2c6f7a..55b71a63 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 0cb109c8..8d6b73af 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -23,7 +23,7 @@ dependencies: firebase_auth: ^5.1.0 firebase_core: ^3.1.0 firebase_vertexai: ^0.2.2 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index eeec07e2..24e330ac 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index b8025bfe..2dfd84a2 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 273a1594..13b206cb 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 ollama_dart: ^0.1.2 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 4679404e..7df76216 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 openai_dart: ^0.3.3+1 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 282943fd..987d270b 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -18,7 +18,7 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 80ab7e11..8650f10c 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -18,7 +18,7 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 supabase: ^2.0.8 From 757cbac290dae79bd29f85898a912ee4d72dbe61 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 10 Jul 2024 08:13:21 +0200 Subject: [PATCH 180/251] build: Expand collection package version constraints to 1.20.0 (#485) --- melos.yaml | 2 +- packages/langchain/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 2 +- packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_google/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 2 +- packages/langchain_ollama/pubspec.yaml | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- packages/vertex_ai/pubspec.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/melos.yaml b/melos.yaml index 56d0555c..164a7618 100644 --- a/melos.yaml +++ b/melos.yaml @@ -26,7 +26,7 @@ command: async: ^2.11.0 beautiful_soup_dart: ^0.3.0 characters: ^1.3.0 - collection: '>=1.17.0 <1.19.0' + collection: '>=1.17.0 <1.20.0' cross_file: ^0.3.4+1 crypto: ^3.0.3 csv: ^6.0.0 diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 1dfd6338..9217303a 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -17,7 +17,7 @@ environment: dependencies: characters: ^1.3.0 - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" crypto: ^3.0.3 langchain_core: 0.3.3 meta: ^1.11.0 diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 9444cd96..33e625f7 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -17,7 +17,7 @@ environment: dependencies: anthropic_sdk_dart: ^0.1.0 - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 38081129..b1f7f159 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -17,7 +17,7 @@ environment: dependencies: async: ^2.11.0 - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.20.0" cross_file: ^0.3.4+1 crypto: ^3.0.3 meta: ^1.11.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 8d6b73af..61201af0 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -18,7 +18,7 @@ environment: flutter: ">=3.19.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" firebase_app_check: ^0.3.0 firebase_auth: ^5.1.0 firebase_core: ^3.1.0 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 24e330ac..da0082a6 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" fetch_client: ^1.0.2 gcloud: ^0.8.12 google_generative_ai: 0.4.3 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 2dfd84a2..3c725d01 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 13b206cb..0214a6c7 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 7df76216..4b628c54 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 3454b32d..ccfa07c8 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.20.0" googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 From 59a2d5fc987d8329cc1805857815c6493ebbd5f8 Mon Sep 17 00:00:00 2001 From: Ganesh Date: Fri, 12 Jul 2024 03:08:26 +0530 Subject: [PATCH 181/251] feat: Add support for ChatOllamaTools (#288) Co-authored-by: David Miguel --- docs/_sidebar.md | 1 + .../chat_models/integrations/ollama_tools.md | 273 ++++++++++++++++ .../integrations/ollama_tools.dart | 226 ++++++++++++++ .../langchain_core/lib/src/tools/base.dart | 18 ++ .../lib/src/chat_models/chat_models.dart | 6 +- .../{ => chat_ollama}/chat_ollama.dart | 8 +- .../{ => chat_ollama}/mappers.dart | 0 .../chat_models/{ => chat_ollama}/types.dart | 3 +- .../chat_ollama_tools/chat_ollama_tools.dart | 294 ++++++++++++++++++ .../chat_ollama_tools/mappers.dart | 1 + .../chat_models/chat_ollama_tools/types.dart | 70 +++++ .../chat_models/chat_ollama_tools_test.dart | 207 ++++++++++++ 12 files changed, 1101 insertions(+), 6 deletions(-) create mode 100644 docs/modules/model_io/models/chat_models/integrations/ollama_tools.md create mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart rename packages/langchain_ollama/lib/src/chat_models/{ => chat_ollama}/chat_ollama.dart (97%) rename packages/langchain_ollama/lib/src/chat_models/{ => chat_ollama}/mappers.dart (100%) rename packages/langchain_ollama/lib/src/chat_models/{ => chat_ollama}/types.dart (99%) create mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart create mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart create mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart create mode 100644 packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 2637ce9b..5296fd96 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -62,6 +62,7 @@ - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) - [Google AI](/modules/model_io/models/chat_models/integrations/googleai.md) - [Ollama](/modules/model_io/models/chat_models/integrations/ollama.md) + - [OllamaTools](/modules/model_io/models/chat_models/integrations/ollama_tools.md) - [Mistral AI](/modules/model_io/models/chat_models/integrations/mistralai.md) - [OpenRouter](/modules/model_io/models/chat_models/integrations/open_router.md) - [Together AI](/modules/model_io/models/chat_models/integrations/together_ai.md) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md b/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md new file mode 100644 index 00000000..17334a5b --- /dev/null +++ b/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md @@ -0,0 +1,273 @@ +# ChatOllamaTools + +LangChain.dart offers an experimental wrapper around open source models run locally via [Ollama](https://ollama.ai) that enables [tool calling capabilities](/modules/model_io/models/chat_models/how_to/tools.md). + +> Warning: This is an experimental wrapper that attempts to add tool calling support to models that do not support it natively. Use with caution. + +More powerful and capable models will perform better with complex schema and/or multiple tools. For a complete list of supported models, see the [Ollama model library](https://ollama.ai/library). The examples below use Google's [Gemma2 9B model](https://ollama.com/library/gemma2) running locally. + +## Setup + +Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: + +1. Download and install [Ollama](https://ollama.ai) +2. Fetch a model via `ollama pull ` + * e.g., for Llama 3: `ollama pull gemma2` + +## Usage + +You can use `ChatOllamaTools` in a similar way to a regular `ChatOllama` wrapper. The main difference is that `ChatOllamaToolsOptions` accepts: +- `options`: the usual `ChatOllamaOptions` options +- `tools`: the list with the definition of the tools the model can call +- `toolChoice`: how the model should respond to tool calls +- `toolsSystemPromptTemplate`: the prompt template used to inform the user about the available tools + +`ChatOllamaTools` follows the standard [LangChain tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); +final model = ChatOllamaTools( + defaultOptions: ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string("What's the weather in Barcelona?"), +); +print(res); +// ChatResult{ +// id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, +// output: AIChatMessage{ +// content: , +// toolCalls: [ +// AIChatMessageToolCall{ +// id: 42139039-9251-4e1b-9f47-21f24da65be9, +// name: get_current_weather, +// arguments: {location: Barcelona, ES, unit: celsius}, +// } +// ], +// }, +// finishReason: FinishReason.stop, +// metadata: { +// model: gemma2, +// created_at: 2024-07-11T15:44:56.893216Z, +// done: true, +// total_duration: 2900101792, +// load_duration: 41286000, +// prompt_eval_count: 327, +// prompt_eval_duration: 453557000, +// eval_count: 57, +// eval_duration: 2401129000 +// }, +// usage: LanguageModelUsage{ +// promptTokens: 327, +// promptBillableCharacters: null, +// responseTokens: 57, +// responseBillableCharacters: null, +// totalTokens: 384 +// } +// } +``` + +If you want to extract only the tool calls, you can use the `ToolCallOutputParser`: + +```dart +final chain = model.pipe(ToolsOutputParser()); +final res2 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), +); +print(res2); +// [ +// ParsedToolCall{ +// id: b62a9051-0193-4115-9bac-362005c40c2d, +// name: get_current_weather, +// arguments: {location: Barcelona, ES, unit: celsius}, +// }, +// ParsedToolCall{ +// id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, +// name: get_current_weather, +// arguments: {location: Amsterdam, NL, unit: celsius}, +// } +// ] +``` + +As you can see, `ChatOllamaTools` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final res3 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), + options: ChatOllamaToolsOptions( + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +Note: streaming is not supported at the moment. + +## Customizing the system prompt template + +Behind the scenes, `ChatOllamaTools` uses Ollama's JSON mode to restrict output to JSON, and passes tool schemas to the prompt as JSON schemas. + +You can find the default system prompt in `ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate`. + +Because different models have different strengths, it may be helpful to pass in your own system prompt. Here's an example of how you can customize the system prompt template: + +```dart +const toolSystemPromptTemplate = ''' +You have access to these tools: +{tools} + +Based on the user input, select {tool_choice} from the available tools. + +Respond with a JSON containing a list of tool call objects. +The tool call objects should have two properties: +- "tool_name": The name of the selected tool (string) +- "tool_input": A JSON string with the input for the tool matching the tool's input schema + +Example response format: +```json +{{ + "tool_calls": [ + {{ + "tool_name": "tool_name", + "tool_input": "{{"param1":"value1","param2":"value2"}}" + }} + ] +}} + +Ensure your response is valid JSON and follows this exact format.'''; + +final model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + toolsSystemPromptTemplate: toolSystemPromptTemplate, + ), +); +``` + +You prompt template should contain the following placeholders: +- `{tools}`: where the list of available tools will be inserted +- `{tool_choice}`: where the instruction to select a certain tool will be inserted + +The model should return a JSON like: +```json +{ + "tool_calls": [ + { + "tool_name": "tool_name", + "tool_input": "{\"param1\":\"value1\",\"param2\":\"value2\"}" + } + ] +} +``` + +## Example: extracting structured data + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllamaTools( + defaultOptions: ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart new file mode 100644 index 00000000..486b8c1b --- /dev/null +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart @@ -0,0 +1,226 @@ +// ignore_for_file: avoid_print, avoid_redundant_argument_values +import 'package:langchain/langchain.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main(final List arguments) async { + await _tools(); + await _customizingSystemPrompt(); + await _extraction(); +} + +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); + +Future _tools() async { + final model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + ), + ); + + final res = await model.invoke( + PromptValue.string("What's the weather in Barcelona?"), + ); + print(res); + // ChatResult{ + // id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, + // output: AIChatMessage{ + // content: , + // toolCalls: [ + // AIChatMessageToolCall{ + // id: 42139039-9251-4e1b-9f47-21f24da65be9, + // name: get_current_weather, + // arguments: {location: Barcelona, ES, unit: celsius}, + // } + // ], + // }, + // finishReason: FinishReason.stop, + // metadata: { + // model: gemma2, + // created_at: 2024-07-11T15:44:56.893216Z, + // done: true, + // total_duration: 2900101792, + // load_duration: 41286000, + // prompt_eval_count: 327, + // prompt_eval_duration: 453557000, + // eval_count: 57, + // eval_duration: 2401129000 + // }, + // usage: LanguageModelUsage{ + // promptTokens: 327, + // promptBillableCharacters: null, + // responseTokens: 57, + // responseBillableCharacters: null, + // totalTokens: 384 + // } + // } + + final chain = model.pipe(ToolsOutputParser()); + final res2 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), + ); + print(res2); + // [ + // ParsedToolCall{ + // id: b62a9051-0193-4115-9bac-362005c40c2d, + // name: get_current_weather, + // arguments: {location: Barcelona, ES, unit: celsius}, + // }, + // ParsedToolCall{ + // id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, + // name: get_current_weather, + // arguments: {location: Amsterdam, NL, unit: celsius}, + // } + // ] + + final res3 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), + options: ChatOllamaToolsOptions( + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), + ); + print(res3); +} + +Future _customizingSystemPrompt() async { + const toolSystemPromptTemplate = ''' +You have access to these tools: +{tools} + +Based on the user input, select {tool_choice} from the available tools. + +Respond with a JSON containing a list of tool call objects. +The tool call objects should have two properties: +- "tool_name": The name of the selected tool (string) +- "tool_input": A JSON string with the input for the tool matching the tool's input schema + +Example response format: +```json +{{ + "tool_calls": [ + {{ + "tool_name": "tool_name", + "tool_input": "{{"param1":"value1","param2":"value2"}}" + }} + ] +}} +``` + +Ensure your response is valid JSON and follows this exact format.'''; + + final model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + toolsSystemPromptTemplate: toolSystemPromptTemplate, + ), + ); + + final res = await model.invoke( + PromptValue.string("What's the weather in Barcelona?"), + ); + print(res); +} + +Future _extraction() async { + const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, + ); + + final model = ChatOllamaTools( + defaultOptions: ChatOllamaToolsOptions( + options: const ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + + final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + + final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', + ); + final extractedData = res.first.arguments; + print(extractedData); + // { + // people: [ + // { + // name: Alex, + // height: 152, + // hair_color: blonde + // }, + // { + // name: Claudia, + // height: 183, + // hair_color: orange + // } + // ] + // } +} diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index 37f9f9d2..f6e8dd29 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -73,6 +73,15 @@ ToolSpec{ } '''; } + + /// Converts the tool spec to a JSON-serializable map. + Map toJson() { + return { + 'name': name, + 'description': description, + 'inputJsonSchema': inputJsonSchema, + }; + } } /// {@template tool} @@ -214,6 +223,15 @@ abstract base class Tool name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + + @override + Map toJson() { + return { + 'name': name, + 'description': description, + 'inputJsonSchema': inputJsonSchema, + }; + } } /// {@template tool_func} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart index 731f4e59..4b826ef4 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart @@ -1,2 +1,4 @@ -export 'chat_ollama.dart'; -export 'types.dart'; +export 'chat_ollama/chat_ollama.dart'; +export 'chat_ollama/types.dart'; +export 'chat_ollama_tools/chat_ollama_tools.dart'; +export 'chat_ollama_tools/types.dart' hide conversationalResponseTool; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart similarity index 97% rename from packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 64a5cdae..7dbed939 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -5,7 +5,7 @@ import 'package:langchain_tiktoken/langchain_tiktoken.dart'; import 'package:ollama_dart/ollama_dart.dart'; import 'package:uuid/uuid.dart'; -import '../llms/mappers.dart'; +import '../../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; @@ -31,6 +31,8 @@ import 'types.dart'; /// /// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion) /// +/// If you need to use tools, consider using the [ChatOllamaTools] instead. +/// /// ### Setup /// /// 1. Download and install [Ollama](https://ollama.ai) @@ -218,8 +220,8 @@ class ChatOllama extends BaseChatModel { return GenerateChatCompletionRequest( model: options?.model ?? defaultOptions.model ?? throwNullModelError(), messages: messages.toMessages(), - format: options?.format?.toResponseFormat(), - keepAlive: options?.keepAlive, + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, stream: stream, options: RequestOptions( numKeep: options?.numKeep ?? defaultOptions.numKeep, diff --git a/packages/langchain_ollama/lib/src/chat_models/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart similarity index 100% rename from packages/langchain_ollama/lib/src/chat_models/mappers.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart diff --git a/packages/langchain_ollama/lib/src/chat_models/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart similarity index 99% rename from packages/langchain_ollama/lib/src/chat_models/types.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 4db56920..67598acb 100644 --- a/packages/langchain_ollama/lib/src/chat_models/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,6 +1,7 @@ import 'package:langchain_core/chat_models.dart'; -import '../llms/types.dart'; +import '../../../langchain_ollama.dart'; +import '../../llms/types.dart'; /// {@template chat_ollama_options} /// Options to pass into ChatOllama. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart new file mode 100644 index 00000000..889e7c87 --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart @@ -0,0 +1,294 @@ +import 'dart:convert'; + +import 'package:collection/collection.dart' show IterableExtension; +import 'package:http/http.dart' as http; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_tiktoken/langchain_tiktoken.dart'; +import 'package:ollama_dart/ollama_dart.dart'; +import 'package:uuid/uuid.dart'; + +import 'mappers.dart'; +import 'types.dart'; + +/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables tool +/// calling capabilities. +/// +/// Warning: This is an experimental wrapper that attempts to add tool calling +/// support to models that do not support it natively. More powerful and +/// capable models will perform better with complex schema and/or multiple +/// tools. Use with caution. +/// +/// Example: +/// ```dart +/// const tool = ToolSpec( +/// name: 'get_current_weather', +/// description: 'Get the current weather in a given location', +/// inputJsonSchema: { +/// 'type': 'object', +/// 'properties': { +/// 'location': { +/// 'type': 'string', +/// 'description': 'The city and state, e.g. San Francisco, CA', +/// }, +/// 'unit': { +/// 'type': 'string', +/// 'enum': ['celsius', 'fahrenheit'], +/// }, +/// }, +/// 'required': ['location'], +/// }, +/// ); +/// final chatModel = ChatOllamaTools( +/// defaultOptions: ChatOllamaToolOptions( +/// options: ChatOllamaOptions(model: 'llama3:8b'), +/// tools: [tool], +/// ), +/// ); +/// final prompt = PromptValue.string('What's the weather in Bangalore, India?'); +/// final res = await ollamaTools.invoke(prompt); +/// ``` +/// +/// If you don't need to use tools, use [ChatOllama] instead. +/// +/// ### Setup +/// +/// 1. Download and install [Ollama](https://ollama.ai) +/// 2. Fetch a model via `ollama pull ` +/// * e.g., for Llama 3: `ollama pull llama3` +/// +/// ### Ollama base URL +/// +/// By default, [ChatOllama] uses 'http://localhost:11434/api' as base URL +/// (default Ollama API URL). But if you are running Ollama on a different +/// one, you can override it using the [baseUrl] parameter. +class ChatOllamaTools extends BaseChatModel { + /// Create a new [ChatOllamaTools] instance. + /// + /// Main configuration options: + /// - `baseUrl`: the base URL of Ollama API. + /// - [ChatOllamaTools.defaultOptions] + /// + /// Advance configuration options: + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + /// - [ChatOllama.encoding] + ChatOllamaTools({ + final String baseUrl = 'http://localhost:11434/api', + final Map? headers, + final Map? queryParams, + final http.Client? client, + super.defaultOptions = const ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: 'llama3'), + ), + this.encoding = 'cl100k_base', + }) : _client = OllamaClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ); + + /// A client for interacting with Ollama API. + final OllamaClient _client; + + /// The encoding to use by tiktoken when [tokenize] is called. + /// + /// Ollama does not provide any API to count tokens, so we use tiktoken + /// to get an estimation of the number of tokens in a prompt. + String encoding; + + /// A UUID generator. + late final Uuid _uuid = const Uuid(); + + @override + String get modelType => 'chat-ollama-tools'; + + @override + Future invoke( + PromptValue input, { + ChatOllamaToolsOptions? options, + }) async { + final id = _uuid.v4(); + final completion = await _client.generateChatCompletion( + request: _generateCompletionRequest(input, options), + ); + final result = completion.toChatResult(id); + return _parseResult(result); + } + + /// Creates a [GenerateChatCompletionRequest] from the given input. + GenerateChatCompletionRequest _generateCompletionRequest( + final PromptValue input, + final ChatOllamaToolsOptions? toolsOptions, { + final bool stream = false, + }) { + final messages = _formatPrompt(input, toolsOptions).toChatMessages(); + final options = toolsOptions?.options; + final defaultOptions = + this.defaultOptions.options ?? const ChatOllamaOptions(); + return GenerateChatCompletionRequest( + model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + messages: messages.toMessages(), + format: ResponseFormat.json, + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, + stream: stream, + options: RequestOptions( + numKeep: options?.numKeep ?? defaultOptions.numKeep, + seed: options?.seed ?? defaultOptions.seed, + numPredict: options?.numPredict ?? defaultOptions.numPredict, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, + typicalP: options?.typicalP ?? defaultOptions.typicalP, + repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, + temperature: options?.temperature ?? defaultOptions.temperature, + repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + mirostat: options?.mirostat ?? defaultOptions.mirostat, + mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, + mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, + penalizeNewline: + options?.penalizeNewline ?? defaultOptions.penalizeNewline, + stop: options?.stop ?? defaultOptions.stop, + numa: options?.numa ?? defaultOptions.numa, + numCtx: options?.numCtx ?? defaultOptions.numCtx, + numBatch: options?.numBatch ?? defaultOptions.numBatch, + numGpu: options?.numGpu ?? defaultOptions.numGpu, + mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, + lowVram: options?.lowVram ?? defaultOptions.lowVram, + f16Kv: options?.f16KV ?? defaultOptions.f16KV, + logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, + vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, + useMmap: options?.useMmap ?? defaultOptions.useMmap, + useMlock: options?.useMlock ?? defaultOptions.useMlock, + numThread: options?.numThread ?? defaultOptions.numThread, + ), + ); + } + + PromptValue _formatPrompt( + final PromptValue input, + final ChatOllamaToolsOptions? options, + ) { + final toolsSystemPromptTemplate = options?.toolsSystemPromptTemplate ?? + defaultOptions.toolsSystemPromptTemplate ?? + ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate; + final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, toolsSystemPromptTemplate), + (ChatMessageType.messagesPlaceholder, 'input'), + ]); + final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; + final availableTools = options?.tools ?? defaultOptions.tools; + final tools = switch (toolChoice) { + // If toolChoice is auto, we include all the tools + ChatToolChoiceAuto() || null => [ + ...?availableTools, + conversationalResponseTool, + ], + // If toolChoice is none, we include only the conversational response tool + ChatToolChoiceNone() => [conversationalResponseTool], + // If toolChoice is required, we include only the user specified tools + ChatToolChoiceRequired() => availableTools!, + // If toolChoice is forced, we include only the forced tool + final ChatToolChoiceForced f => [ + availableTools!.firstWhere((t) => t.name == f.name), + ] + }; + final toolsJsonMap = json.encode( + tools.map((tool) => tool.toJson()).toList(growable: false), + ); + final toolChoiceInstructions = switch (toolChoice) { + ChatToolChoiceNone() => '`${conversationalResponseTool.name}` tool', + ChatToolChoiceAuto() || + ChatToolChoiceRequired() || + null => + 'one or more tools', + final ChatToolChoiceForced f => '`${f.name}` tool', + }; + return promptTemplate.formatPrompt({ + 'tools': toolsJsonMap, + 'tool_choice': toolChoiceInstructions, + 'input': input.toChatMessages(), + }); + } + + Future _parseResult(final ChatResult result) async { + try { + final output = result.output.content; + final outputMap = json.decode(output) as Map; + final toolCalls = (outputMap['tool_calls'] as List).map((t) { + final tool = t as Map; + final toolInput = tool['tool_input']; + final toolInputMap = json.decode(toolInput) as Map; + return AIChatMessageToolCall( + id: _uuid.v4(), + name: tool['tool_name'].toString(), + arguments: toolInputMap, + argumentsRaw: toolInput, + ); + }).toList(growable: false); + + final conversationalResponseToolCall = toolCalls + .firstWhereOrNull((t) => t.name == conversationalResponseTool.name); + final content = conversationalResponseToolCall != null + ? await conversationalResponseTool.invoke( + conversationalResponseTool.getInputFromJson( + conversationalResponseToolCall.arguments, + ), + ) + : ''; + final otherToolCalls = toolCalls + .where((t) => t.name != conversationalResponseTool.name) + .toList(growable: false); + + return ChatResult( + id: result.id, + output: AIChatMessage( + content: content, + toolCalls: otherToolCalls, + ), + finishReason: result.finishReason, + metadata: result.metadata, + usage: result.usage, + ); + } catch (e) { + throw Exception( + 'Model did not respond in valid json string format, ' + 'try improving your prompt, instruct to "respond in JSON"', + ); + } + } + + /// Tokenizes the given prompt using tiktoken. + /// + /// Currently Ollama does not provide a tokenizer for the models it supports. + /// So we use tiktoken and [encoding] model to get an approximation + /// for counting tokens. Mind that the actual tokens will be totally + /// different from the ones used by the Ollama model. + /// + /// If an encoding model is specified in [encoding] field, that + /// encoding is used instead. + /// + /// - [promptValue] The prompt to tokenize. + @override + Future> tokenize( + PromptValue promptValue, { + ChatOllamaToolsOptions? options, + }) async { + final encoding = getEncoding(this.encoding); + return encoding.encode(promptValue.toString()); + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart new file mode 100644 index 00000000..3a9ebb5a --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart @@ -0,0 +1 @@ +export '../chat_ollama/mappers.dart'; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart new file mode 100644 index 00000000..9447a51f --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart @@ -0,0 +1,70 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; + +import '../chat_ollama/types.dart'; +import 'chat_ollama_tools.dart'; + +export '../chat_ollama/types.dart'; + +/// {@template chat_ollama_tools_options} +/// Options to pass into [ChatOllamaTools]. +/// {@endtemplate} +class ChatOllamaToolsOptions extends ChatModelOptions { + /// {@macro chat_ollama_tools_options} + const ChatOllamaToolsOptions({ + this.options, + super.tools, + super.toolChoice, + this.toolsSystemPromptTemplate, + }); + + /// [ChatOllamaOptions] to pass into Ollama. + final ChatOllamaOptions? options; + + /// Prompt template for the system message where the model is instructed to + /// use the tools. + /// + /// The following placeholders can be used: + /// - `{tools}`: The list of tools available to the model. + /// - `{tool_choice}`: the tool choice the model must always select. + /// + /// If not provided, [defaultToolsSystemPromtTemplate] will be used. + final String? toolsSystemPromptTemplate; + + /// Default [toolsSystemPromptTemplate]. + static const String defaultToolsSystemPromtTemplate = ''' +You have access to these tools: +{tools} + +Based on the user input, select {tool_choice} from the available tools. + +Respond with a JSON containing a list of tool call objects. +The tool call objects should have two properties: +- "tool_name": The name of the selected tool (string) +- "tool_input": A JSON string with the input for the tool matching the tool's input schema + +Example response format: +```json +{{ + "tool_calls": [ + {{ + "tool_name": "tool_name", + "tool_input": "{{"param1":"value1","param2":"value2"}}" + }} + ] +}} +``` + +Ensure your response is valid JSON and follows this exact format. +'''; +} + +/// Default tool called if model decides no other tools should be called +/// for a given query. +final conversationalResponseTool = StringTool.fromFunction( + name: '_conversational_response', + description: + 'Respond conversationally if no other tools should be called for a given query.', + inputDescription: 'Conversational response to the user', + func: (input) => input, +); diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart new file mode 100644 index 00000000..7204591a --- /dev/null +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart @@ -0,0 +1,207 @@ +import 'dart:io'; + +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:test/test.dart'; + +void main() { + group( + 'ChatOllamaTools tests', + skip: Platform.environment.containsKey('CI'), + () { + const defaultModel = 'gemma2'; + late ChatOllamaTools model; + const tool1 = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + const tool2 = ToolSpec( + name: 'get_historic_weather', + description: 'Get the historic weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + setUp(() async { + model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: defaultModel, + keepAlive: 2, + ), + tools: [tool1, tool2], + ), + ); + }); + + tearDown(() { + model.close(); + }); + + test('Test single tool call', () async { + final res = await model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, 'get_current_weather'); + expect(toolCall.argumentsRaw, isNotEmpty); + expect(toolCall.arguments, isNotEmpty); + expect(toolCall.arguments['location'], contains('Vellore')); + expect(toolCall.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test multi tool call', () async { + final res = await model.invoke( + PromptValue.string( + "What's the weather in Vellore, India and in Barcelona, Spain?", + ), + ); + expect(res.output.toolCalls, hasLength(2)); + final toolCall1 = res.output.toolCalls.first; + expect(toolCall1.name, 'get_current_weather'); + expect(toolCall1.argumentsRaw, isNotEmpty); + expect(toolCall1.arguments, isNotEmpty); + expect(toolCall1.arguments['location'], 'Vellore, India'); + expect(toolCall1.arguments['unit'], 'celsius'); + final toolCall2 = res.output.toolCalls.last; + expect(toolCall2.name, 'get_current_weather'); + expect(toolCall2.argumentsRaw, isNotEmpty); + expect(toolCall2.arguments, isNotEmpty); + expect(toolCall2.arguments['location'], 'Barcelona, Spain'); + expect(toolCall2.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test ChatToolChoice.none', () async { + final res = await model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: const ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + tools: [tool1], + toolChoice: ChatToolChoice.none, + ), + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test('Test ChatToolChoice.forced', () async { + final res = await model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: ChatOllamaToolsOptions( + options: const ChatOllamaOptions(model: defaultModel), + tools: const [tool1, tool2], + toolChoice: ChatToolChoice.forced(name: tool2.name), + ), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, tool2.name); + }); + + test( + 'Should throw exception if model did not respond in right JSON string format', + () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + tools: [tool1], + toolsSystemPromptTemplate: + 'You have access to the following tools: {tools} ' + 'You must always select one of the above tools ' + 'and respond in plain text.', + ); + + expect( + () async => model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: toolOptions, + ), + throwsException, + ); + }, + ); + + test( + 'Should return content if no other tools should be called for a given query', + () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + tools: [], + ); + final res = await model.invoke( + PromptValue.string('Do you know the weather in Vellore, India?'), + options: toolOptions, + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test( + 'Should throw error if toolSystemPromptTemplate not in right format', + () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + toolsSystemPromptTemplate: + 'You have access to the following tools: tools} ' + 'You must always select one of the above tools', + ); + expect( + () async => model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: toolOptions, + ), + throwsException, + ); + }, + ); + + test('Test ChatOllamaToolsOptions', () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: defaultModel, + ), + tools: [tool1], + toolsSystemPromptTemplate: 'toolSystemPromptTemplate', + ); + + expect(toolOptions.options?.model, defaultModel); + expect( + toolOptions.toolsSystemPromptTemplate, + 'toolSystemPromptTemplate', + ); + expect(toolOptions.tools![0], tool1); + }); + }, + ); +} From 8f78e7ccd78e227bb4b632a66270aa56023303ab Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 11 Jul 2024 23:42:21 +0200 Subject: [PATCH 182/251] docs: Update README.md --- packages/langchain/README.md | 1 + packages/langchain_ollama/README.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 652ef1be..51793fa8 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -110,6 +110,7 @@ The following integrations are available in LangChain.dart: | [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | | [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | | [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | | [Ollama Chat API](https://ollama.ai) | +| [ChatOllamaTools](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama_tools) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) with tool-calling capabilities | | [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | | [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index e6d6d884..0c37e80f 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -13,7 +13,8 @@ - LLMs: * `Ollama`: wrapper around Ollama Completions API. - Chat models: - * `ChatOllama`: wrapper around Ollama Completions API in a chat-like fashion. + * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion. + * `ChatOllamaTools`: Wrapper around Ollama Chat API that enables tool calling capabilities. - Embeddings: * `OllamaEmbeddings`: wrapper around Ollama Embeddings API. From 5b75bb5a4ca105205d282ba8c7728e719c5ab59e Mon Sep 17 00:00:00 2001 From: Konstantin S Date: Sat, 13 Jul 2024 01:45:52 +0400 Subject: [PATCH 183/251] feat: Add support for Ollama version and model info (#488) Co-authored-by: David Miguel --- packages/ollama_dart/README.md | 10 + .../ollama_dart/lib/src/generated/client.dart | 21 + .../lib/src/generated/schema/model_info.dart | 6 + .../generated/schema/model_information.dart | 61 +++ .../lib/src/generated/schema/schema.dart | 2 + .../src/generated/schema/schema.freezed.dart | 459 +++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 52 ++ .../generated/schema/version_response.dart | 40 ++ packages/ollama_dart/oas/ollama-curated.yaml | 42 ++ .../test/ollama_dart_chat_test.dart | 8 +- .../test/ollama_dart_completions_test.dart | 13 +- .../test/ollama_dart_embeddings_test.dart | 4 +- .../test/ollama_dart_models_test.dart | 24 +- .../test/ollama_dart_version_test.dart | 24 + 14 files changed, 748 insertions(+), 18 deletions(-) create mode 100644 packages/ollama_dart/lib/src/generated/schema/model_information.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/version_response.dart create mode 100644 packages/ollama_dart/test/ollama_dart_version_test.dart diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 5b750447..cf822953 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -36,6 +36,7 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. + [Pull a Model](#pull-a-model) + [Push a Model](#push-a-model) + [Check if a Blob Exists](#check-if-a-blob-exists) + * [Version](#version) - [Advance Usage](#advance-usage) * [Default HTTP client](#default-http-client) * [Custom HTTP client ](#custom-http-client) @@ -271,6 +272,15 @@ await client.checkBlob( If the blob doesn't exist, an `OllamaClientException` exception will be thrown. +### Version + +Get the version of the Ollama server. + +```dart +final res = await client.getVersion(); +print(res.version); +``` + ## Advance Usage ### Default HTTP client diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index 6c00d36f..0a530915 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -356,6 +356,27 @@ class OllamaClient { ); } + // ------------------------------------------ + // METHOD: getVersion + // ------------------------------------------ + + /// Returns the version of the Ollama server. + /// + /// This endpoint returns the version of the Ollama server. + /// + /// `GET` `http://localhost:11434/api/version` + Future getVersion() async { + final r = await makeRequest( + baseUrl: 'http://localhost:11434/api', + path: '/version', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return VersionResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: generateCompletion // ------------------------------------------ diff --git a/packages/ollama_dart/lib/src/generated/schema/model_info.dart b/packages/ollama_dart/lib/src/generated/schema/model_info.dart index cb212131..30c2a949 100644 --- a/packages/ollama_dart/lib/src/generated/schema/model_info.dart +++ b/packages/ollama_dart/lib/src/generated/schema/model_info.dart @@ -33,6 +33,10 @@ class ModelInfo with _$ModelInfo { /// Details about a model. @JsonKey(includeIfNull: false) ModelDetails? details, + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, + /// The default messages for the model. @JsonKey(includeIfNull: false) List? messages, }) = _ModelInfo; @@ -49,6 +53,7 @@ class ModelInfo with _$ModelInfo { 'template', 'system', 'details', + 'model_info', 'messages' ]; @@ -66,6 +71,7 @@ class ModelInfo with _$ModelInfo { 'template': template, 'system': system, 'details': details, + 'model_info': modelInfo, 'messages': messages, }; } diff --git a/packages/ollama_dart/lib/src/generated/schema/model_information.dart b/packages/ollama_dart/lib/src/generated/schema/model_information.dart new file mode 100644 index 00000000..d10848f8 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/model_information.dart @@ -0,0 +1,61 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ModelInformation +// ========================================== + +/// Details about a model. +@freezed +class ModelInformation with _$ModelInformation { + const ModelInformation._(); + + /// Factory constructor for ModelInformation + const factory ModelInformation({ + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion, + }) = _ModelInformation; + + /// Object construction from a JSON representation + factory ModelInformation.fromJson(Map json) => + _$ModelInformationFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'general.architecture', + 'general.file_type', + 'general.parameter_count', + 'general.quantization_version' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'general.architecture': generalArchitecture, + 'general.file_type': generalFileType, + 'general.parameter_count': generalParameterCount, + 'general.quantization_version': generalQuantizationVersion, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index ed6b2733..dae6d4fb 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -13,6 +13,7 @@ part 'schema.freezed.dart'; part 'generate_completion_request.dart'; part 'request_options.dart'; part 'response_format.dart'; +part 'version_response.dart'; part 'generate_completion_response.dart'; part 'generate_chat_completion_request.dart'; part 'generate_chat_completion_response.dart'; @@ -26,6 +27,7 @@ part 'create_model_status.dart'; part 'models_response.dart'; part 'model.dart'; part 'model_details.dart'; +part 'model_information.dart'; part 'process_response.dart'; part 'process_model.dart'; part 'model_info_request.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index db00fd66..83c14bb1 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -1650,6 +1650,154 @@ abstract class _RequestOptions extends RequestOptions { throw _privateConstructorUsedError; } +VersionResponse _$VersionResponseFromJson(Map json) { + return _VersionResponse.fromJson(json); +} + +/// @nodoc +mixin _$VersionResponse { + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) + String? get version => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VersionResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $VersionResponseCopyWith<$Res> { + factory $VersionResponseCopyWith( + VersionResponse value, $Res Function(VersionResponse) then) = + _$VersionResponseCopyWithImpl<$Res, VersionResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) String? version}); +} + +/// @nodoc +class _$VersionResponseCopyWithImpl<$Res, $Val extends VersionResponse> + implements $VersionResponseCopyWith<$Res> { + _$VersionResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? version = freezed, + }) { + return _then(_value.copyWith( + version: freezed == version + ? _value.version + : version // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$VersionResponseImplCopyWith<$Res> + implements $VersionResponseCopyWith<$Res> { + factory _$$VersionResponseImplCopyWith(_$VersionResponseImpl value, + $Res Function(_$VersionResponseImpl) then) = + __$$VersionResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) String? version}); +} + +/// @nodoc +class __$$VersionResponseImplCopyWithImpl<$Res> + extends _$VersionResponseCopyWithImpl<$Res, _$VersionResponseImpl> + implements _$$VersionResponseImplCopyWith<$Res> { + __$$VersionResponseImplCopyWithImpl( + _$VersionResponseImpl _value, $Res Function(_$VersionResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? version = freezed, + }) { + return _then(_$VersionResponseImpl( + version: freezed == version + ? _value.version + : version // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$VersionResponseImpl extends _VersionResponse { + const _$VersionResponseImpl({@JsonKey(includeIfNull: false) this.version}) + : super._(); + + factory _$VersionResponseImpl.fromJson(Map json) => + _$$VersionResponseImplFromJson(json); + + /// The version of the Ollama server. + @override + @JsonKey(includeIfNull: false) + final String? version; + + @override + String toString() { + return 'VersionResponse(version: $version)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$VersionResponseImpl && + (identical(other.version, version) || other.version == version)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, version); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => + __$$VersionResponseImplCopyWithImpl<_$VersionResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$VersionResponseImplToJson( + this, + ); + } +} + +abstract class _VersionResponse extends VersionResponse { + const factory _VersionResponse( + {@JsonKey(includeIfNull: false) final String? version}) = + _$VersionResponseImpl; + const _VersionResponse._() : super._(); + + factory _VersionResponse.fromJson(Map json) = + _$VersionResponseImpl.fromJson; + + @override + + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) + String? get version; + @override + @JsonKey(ignore: true) + _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + GenerateCompletionResponse _$GenerateCompletionResponseFromJson( Map json) { return _GenerateCompletionResponse.fromJson(json); @@ -4912,6 +5060,266 @@ abstract class _ModelDetails extends ModelDetails { throw _privateConstructorUsedError; } +ModelInformation _$ModelInformationFromJson(Map json) { + return _ModelInformation.fromJson(json); +} + +/// @nodoc +mixin _$ModelInformation { + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? get generalArchitecture => throw _privateConstructorUsedError; + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? get generalFileType => throw _privateConstructorUsedError; + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? get generalParameterCount => throw _privateConstructorUsedError; + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? get generalQuantizationVersion => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModelInformationCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelInformationCopyWith<$Res> { + factory $ModelInformationCopyWith( + ModelInformation value, $Res Function(ModelInformation) then) = + _$ModelInformationCopyWithImpl<$Res, ModelInformation>; + @useResult + $Res call( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion}); +} + +/// @nodoc +class _$ModelInformationCopyWithImpl<$Res, $Val extends ModelInformation> + implements $ModelInformationCopyWith<$Res> { + _$ModelInformationCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? generalArchitecture = freezed, + Object? generalFileType = freezed, + Object? generalParameterCount = freezed, + Object? generalQuantizationVersion = freezed, + }) { + return _then(_value.copyWith( + generalArchitecture: freezed == generalArchitecture + ? _value.generalArchitecture + : generalArchitecture // ignore: cast_nullable_to_non_nullable + as String?, + generalFileType: freezed == generalFileType + ? _value.generalFileType + : generalFileType // ignore: cast_nullable_to_non_nullable + as int?, + generalParameterCount: freezed == generalParameterCount + ? _value.generalParameterCount + : generalParameterCount // ignore: cast_nullable_to_non_nullable + as int?, + generalQuantizationVersion: freezed == generalQuantizationVersion + ? _value.generalQuantizationVersion + : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModelInformationImplCopyWith<$Res> + implements $ModelInformationCopyWith<$Res> { + factory _$$ModelInformationImplCopyWith(_$ModelInformationImpl value, + $Res Function(_$ModelInformationImpl) then) = + __$$ModelInformationImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion}); +} + +/// @nodoc +class __$$ModelInformationImplCopyWithImpl<$Res> + extends _$ModelInformationCopyWithImpl<$Res, _$ModelInformationImpl> + implements _$$ModelInformationImplCopyWith<$Res> { + __$$ModelInformationImplCopyWithImpl(_$ModelInformationImpl _value, + $Res Function(_$ModelInformationImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? generalArchitecture = freezed, + Object? generalFileType = freezed, + Object? generalParameterCount = freezed, + Object? generalQuantizationVersion = freezed, + }) { + return _then(_$ModelInformationImpl( + generalArchitecture: freezed == generalArchitecture + ? _value.generalArchitecture + : generalArchitecture // ignore: cast_nullable_to_non_nullable + as String?, + generalFileType: freezed == generalFileType + ? _value.generalFileType + : generalFileType // ignore: cast_nullable_to_non_nullable + as int?, + generalParameterCount: freezed == generalParameterCount + ? _value.generalParameterCount + : generalParameterCount // ignore: cast_nullable_to_non_nullable + as int?, + generalQuantizationVersion: freezed == generalQuantizationVersion + ? _value.generalQuantizationVersion + : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelInformationImpl extends _ModelInformation { + const _$ModelInformationImpl( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + this.generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + this.generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + this.generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + this.generalQuantizationVersion}) + : super._(); + + factory _$ModelInformationImpl.fromJson(Map json) => + _$$ModelInformationImplFromJson(json); + + /// The architecture of the model. + @override + @JsonKey(name: 'general.architecture', includeIfNull: false) + final String? generalArchitecture; + + /// The file type of the model. + @override + @JsonKey(name: 'general.file_type', includeIfNull: false) + final int? generalFileType; + + /// The number of parameters in the model. + @override + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + final int? generalParameterCount; + + /// The number of parameters in the model. + @override + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + final int? generalQuantizationVersion; + + @override + String toString() { + return 'ModelInformation(generalArchitecture: $generalArchitecture, generalFileType: $generalFileType, generalParameterCount: $generalParameterCount, generalQuantizationVersion: $generalQuantizationVersion)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelInformationImpl && + (identical(other.generalArchitecture, generalArchitecture) || + other.generalArchitecture == generalArchitecture) && + (identical(other.generalFileType, generalFileType) || + other.generalFileType == generalFileType) && + (identical(other.generalParameterCount, generalParameterCount) || + other.generalParameterCount == generalParameterCount) && + (identical(other.generalQuantizationVersion, + generalQuantizationVersion) || + other.generalQuantizationVersion == + generalQuantizationVersion)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, generalArchitecture, + generalFileType, generalParameterCount, generalQuantizationVersion); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => + __$$ModelInformationImplCopyWithImpl<_$ModelInformationImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ModelInformationImplToJson( + this, + ); + } +} + +abstract class _ModelInformation extends ModelInformation { + const factory _ModelInformation( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + final String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + final int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + final int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + final int? generalQuantizationVersion}) = _$ModelInformationImpl; + const _ModelInformation._() : super._(); + + factory _ModelInformation.fromJson(Map json) = + _$ModelInformationImpl.fromJson; + + @override + + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? get generalArchitecture; + @override + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? get generalFileType; + @override + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? get generalParameterCount; + @override + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? get generalQuantizationVersion; + @override + @JsonKey(ignore: true) + _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => + throw _privateConstructorUsedError; +} + ProcessResponse _$ProcessResponseFromJson(Map json) { return _ProcessResponse.fromJson(json); } @@ -5573,6 +5981,10 @@ mixin _$ModelInfo { @JsonKey(includeIfNull: false) ModelDetails? get details => throw _privateConstructorUsedError; + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? get modelInfo => throw _privateConstructorUsedError; + /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages => throw _privateConstructorUsedError; @@ -5595,9 +6007,12 @@ abstract class $ModelInfoCopyWith<$Res> { @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); $ModelDetailsCopyWith<$Res>? get details; + $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -5619,6 +6034,7 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> Object? template = freezed, Object? system = freezed, Object? details = freezed, + Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_value.copyWith( @@ -5646,6 +6062,10 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, + modelInfo: freezed == modelInfo + ? _value.modelInfo + : modelInfo // ignore: cast_nullable_to_non_nullable + as ModelInformation?, messages: freezed == messages ? _value.messages : messages // ignore: cast_nullable_to_non_nullable @@ -5664,6 +6084,18 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> return _then(_value.copyWith(details: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ModelInformationCopyWith<$Res>? get modelInfo { + if (_value.modelInfo == null) { + return null; + } + + return $ModelInformationCopyWith<$Res>(_value.modelInfo!, (value) { + return _then(_value.copyWith(modelInfo: value) as $Val); + }); + } } /// @nodoc @@ -5681,10 +6113,14 @@ abstract class _$$ModelInfoImplCopyWith<$Res> @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); @override $ModelDetailsCopyWith<$Res>? get details; + @override + $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -5704,6 +6140,7 @@ class __$$ModelInfoImplCopyWithImpl<$Res> Object? template = freezed, Object? system = freezed, Object? details = freezed, + Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_$ModelInfoImpl( @@ -5731,6 +6168,10 @@ class __$$ModelInfoImplCopyWithImpl<$Res> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, + modelInfo: freezed == modelInfo + ? _value.modelInfo + : modelInfo // ignore: cast_nullable_to_non_nullable + as ModelInformation?, messages: freezed == messages ? _value._messages : messages // ignore: cast_nullable_to_non_nullable @@ -5749,6 +6190,7 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) this.template, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.details, + @JsonKey(name: 'model_info', includeIfNull: false) this.modelInfo, @JsonKey(includeIfNull: false) final List? messages}) : _messages = messages, super._(); @@ -5786,6 +6228,11 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) final ModelDetails? details; + /// Details about a model. + @override + @JsonKey(name: 'model_info', includeIfNull: false) + final ModelInformation? modelInfo; + /// The default messages for the model. final List? _messages; @@ -5802,7 +6249,7 @@ class _$ModelInfoImpl extends _ModelInfo { @override String toString() { - return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, messages: $messages)'; + return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, modelInfo: $modelInfo, messages: $messages)'; } @override @@ -5819,6 +6266,8 @@ class _$ModelInfoImpl extends _ModelInfo { other.template == template) && (identical(other.system, system) || other.system == system) && (identical(other.details, details) || other.details == details) && + (identical(other.modelInfo, modelInfo) || + other.modelInfo == modelInfo) && const DeepCollectionEquality().equals(other._messages, _messages)); } @@ -5832,6 +6281,7 @@ class _$ModelInfoImpl extends _ModelInfo { template, system, details, + modelInfo, const DeepCollectionEquality().hash(_messages)); @JsonKey(ignore: true) @@ -5856,6 +6306,8 @@ abstract class _ModelInfo extends ModelInfo { @JsonKey(includeIfNull: false) final String? template, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + final ModelInformation? modelInfo, @JsonKey(includeIfNull: false) final List? messages}) = _$ModelInfoImpl; const _ModelInfo._() : super._(); @@ -5895,6 +6347,11 @@ abstract class _ModelInfo extends ModelInfo { ModelDetails? get details; @override + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? get modelInfo; + @override + /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index 3443737b..1ad66a40 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -133,6 +133,26 @@ Map _$$RequestOptionsImplToJson( return val; } +_$VersionResponseImpl _$$VersionResponseImplFromJson( + Map json) => + _$VersionResponseImpl( + version: json['version'] as String?, + ); + +Map _$$VersionResponseImplToJson( + _$VersionResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('version', instance.version); + return val; +} + _$GenerateCompletionResponseImpl _$$GenerateCompletionResponseImplFromJson( Map json) => _$GenerateCompletionResponseImpl( @@ -476,6 +496,33 @@ Map _$$ModelDetailsImplToJson(_$ModelDetailsImpl instance) { return val; } +_$ModelInformationImpl _$$ModelInformationImplFromJson( + Map json) => + _$ModelInformationImpl( + generalArchitecture: json['general.architecture'] as String?, + generalFileType: json['general.file_type'] as int?, + generalParameterCount: json['general.parameter_count'] as int?, + generalQuantizationVersion: json['general.quantization_version'] as int?, + ); + +Map _$$ModelInformationImplToJson( + _$ModelInformationImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('general.architecture', instance.generalArchitecture); + writeNotNull('general.file_type', instance.generalFileType); + writeNotNull('general.parameter_count', instance.generalParameterCount); + writeNotNull( + 'general.quantization_version', instance.generalQuantizationVersion); + return val; +} + _$ProcessResponseImpl _$$ProcessResponseImplFromJson( Map json) => _$ProcessResponseImpl( @@ -550,6 +597,10 @@ _$ModelInfoImpl _$$ModelInfoImplFromJson(Map json) => details: json['details'] == null ? null : ModelDetails.fromJson(json['details'] as Map), + modelInfo: json['model_info'] == null + ? null + : ModelInformation.fromJson( + json['model_info'] as Map), messages: (json['messages'] as List?) ?.map((e) => Message.fromJson(e as Map)) .toList(), @@ -570,6 +621,7 @@ Map _$$ModelInfoImplToJson(_$ModelInfoImpl instance) { writeNotNull('template', instance.template); writeNotNull('system', instance.system); writeNotNull('details', instance.details?.toJson()); + writeNotNull('model_info', instance.modelInfo?.toJson()); writeNotNull('messages', instance.messages?.map((e) => e.toJson()).toList()); return val; } diff --git a/packages/ollama_dart/lib/src/generated/schema/version_response.dart b/packages/ollama_dart/lib/src/generated/schema/version_response.dart new file mode 100644 index 00000000..21d3259e --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/version_response.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: VersionResponse +// ========================================== + +/// The response class for the version endpoint. +@freezed +class VersionResponse with _$VersionResponse { + const VersionResponse._(); + + /// Factory constructor for VersionResponse + const factory VersionResponse({ + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) String? version, + }) = _VersionResponse; + + /// Object construction from a JSON representation + factory VersionResponse.fromJson(Map json) => + _$VersionResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['version']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'version': version, + }; + } +} diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 12159978..08ec6845 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -20,6 +20,18 @@ tags: description: List and describe the various models available. paths: + /version: + get: + operationId: getVersion + summary: Returns the version of the Ollama server. + description: This endpoint returns the version of the Ollama server. + responses: + '200': + description: Successful operation. + content: + application/json: + schema: + $ref: '#/components/schemas/VersionResponse' /generate: post: operationId: generateCompletion @@ -501,6 +513,13 @@ components: Note: it's important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace. enum: - json + VersionResponse: + type: object + description: The response class for the version endpoint. + properties: + version: + type: string + description: The version of the Ollama server. GenerateCompletionResponse: type: object description: The response class for the generate endpoint. @@ -792,6 +811,27 @@ components: quantization_level: type: string description: The quantization level of the model. + ModelInformation: + type: object + description: Details about a model. + properties: + general.architecture: + type: string + description: The architecture of the model. + general.file_type: + type: integer + nullable: true + description: The file type of the model. + general.parameter_count: + type: integer + format: int64 + nullable: true + description: The number of parameters in the model. + general.quantization_version: + type: integer + nullable: true + description: The number of parameters in the model. + ProcessResponse: type: object description: Response class for the list running models endpoint. @@ -869,6 +909,8 @@ components: description: The system prompt for the model. details: $ref: '#/components/schemas/ModelDetails' + model_info: + $ref: '#/components/schemas/ModelInformation' messages: type: array nullable: true diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index af90c448..807e1b67 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,19 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; - const visionModel = 'llava:latest'; + const defaultModel = 'gemma2'; + const visionModel = 'llava'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); expect( - res.models?.firstWhere((final m) => m.model == visionModel), + res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), isNotNull, ); }); diff --git a/packages/ollama_dart/test/ollama_dart_completions_test.dart b/packages/ollama_dart/test/ollama_dart_completions_test.dart index 5c4b2981..5a134b37 100644 --- a/packages/ollama_dart/test/ollama_dart_completions_test.dart +++ b/packages/ollama_dart/test/ollama_dart_completions_test.dart @@ -7,20 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; - const visionModel = 'llava:latest'; + const defaultModel = 'gemma2'; + const visionModel = 'llava'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); - expect( - res.models?.firstWhere((final m) => m.model == visionModel), + res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), isNotNull, ); }); @@ -76,9 +75,9 @@ void main() { }); test('Test call completions API with raw mode', () async { - const testPrompt = '[INST] List the numbers from 1 to 9 in order. ' + const testPrompt = 'List the numbers from 1 to 9 in order. ' 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS: [/INST]'; + 'NUMBERS:'; final res = await client.generateCompletion( request: const GenerateCompletionRequest( diff --git a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart index c32701a8..e6ff8b6f 100644 --- a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart +++ b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Generate Embeddings API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; + const defaultModel = 'mxbai-embed-large:335m'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); }); diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index e511bff4..b94698ad 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Models API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; + const defaultModel = 'gemma2'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); }); @@ -62,7 +62,10 @@ void main() { test('Test list models', () async { final res = await client.listModels(); - expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + expect( + res.models?.any((final m) => m.model!.startsWith(defaultModel)), + isTrue, + ); }); test('Test list running models', () async { @@ -75,7 +78,10 @@ void main() { ); final res = await client.listRunningModels(); - expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + expect( + res.models?.any((final m) => m.model!.startsWith(defaultModel)), + isTrue, + ); }); test('Test show model info', () async { @@ -84,7 +90,17 @@ void main() { ); expect(res.license, isNotEmpty); expect(res.modelfile, isNotEmpty); + expect(res.parameters, isNotEmpty); expect(res.template, isNotEmpty); + expect(res.details?.format, isNotEmpty); + expect(res.details?.family, isNotEmpty); + expect(res.details?.families, isNotEmpty); + expect(res.details?.parameterSize, isNotEmpty); + expect(res.details?.quantizationLevel, isNotEmpty); + expect(res.modelInfo?.generalArchitecture, isNotEmpty); + expect(res.modelInfo?.generalFileType, greaterThan(0)); + expect(res.modelInfo?.generalParameterCount, greaterThan(0)); + expect(res.modelInfo?.generalQuantizationVersion, greaterThan(0)); }); test('Test copy model', () async { diff --git a/packages/ollama_dart/test/ollama_dart_version_test.dart b/packages/ollama_dart/test/ollama_dart_version_test.dart new file mode 100644 index 00000000..002f8167 --- /dev/null +++ b/packages/ollama_dart/test/ollama_dart_version_test.dart @@ -0,0 +1,24 @@ +import 'dart:io'; + +import 'package:ollama_dart/ollama_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Ollama Version API tests', + skip: Platform.environment.containsKey('CI'), () { + late OllamaClient client; + + setUp(() async { + client = OllamaClient(); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test get version', () async { + final res = await client.getVersion(); + expect(res.version, isNotEmpty); + }); + }); +} From 76a6e5501744a9b3515b99d31322b7d2a6e9e3ce Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 13 Jul 2024 00:05:55 +0200 Subject: [PATCH 184/251] refactor!: Change Ollama push model status type from enum to String (#489) --- .../generated/schema/push_model_response.dart | 6 +-- .../generated/schema/push_model_status.dart | 21 ---------- .../lib/src/generated/schema/schema.dart | 1 - .../src/generated/schema/schema.freezed.dart | 39 ++++++------------- .../lib/src/generated/schema/schema.g.dart | 12 +----- packages/ollama_dart/oas/ollama-curated.yaml | 11 +----- .../test/ollama_dart_models_test.dart | 6 +-- 7 files changed, 20 insertions(+), 76 deletions(-) delete mode 100644 packages/ollama_dart/lib/src/generated/schema/push_model_status.dart diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart index bdfb3574..d3bb5142 100644 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart @@ -16,11 +16,7 @@ class PushModelResponse with _$PushModelResponse { /// Factory constructor for PushModelResponse const factory PushModelResponse({ /// Status pushing the model. - @JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - PushModelStatus? status, + @JsonKey(includeIfNull: false) String? status, /// the model's digest @JsonKey(includeIfNull: false) String? digest, diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart deleted file mode 100644 index c043c843..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart +++ /dev/null @@ -1,21 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// ENUM: PushModelStatus -// ========================================== - -/// Status pushing the model. -enum PushModelStatus { - @JsonValue('retrieving manifest') - retrievingManifest, - @JsonValue('starting upload') - startingUpload, - @JsonValue('pushing manifest') - pushingManifest, - @JsonValue('success') - success, -} diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index dae6d4fb..5ed7214c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -39,4 +39,3 @@ part 'pull_model_response.dart'; part 'pull_model_status.dart'; part 'push_model_request.dart'; part 'push_model_response.dart'; -part 'push_model_status.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 83c14bb1..bf7cf75c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -7485,9 +7485,8 @@ PushModelResponse _$PushModelResponseFromJson(Map json) { /// @nodoc mixin _$PushModelResponse { /// Status pushing the model. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? get status => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get status => throw _privateConstructorUsedError; /// the model's digest @JsonKey(includeIfNull: false) @@ -7514,10 +7513,7 @@ abstract class $PushModelResponseCopyWith<$Res> { _$PushModelResponseCopyWithImpl<$Res, PushModelResponse>; @useResult $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? status, + {@JsonKey(includeIfNull: false) String? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -7545,7 +7541,7 @@ class _$PushModelResponseCopyWithImpl<$Res, $Val extends PushModelResponse> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as PushModelStatus?, + as String?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -7571,10 +7567,7 @@ abstract class _$$PushModelResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? status, + {@JsonKey(includeIfNull: false) String? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -7600,7 +7593,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as PushModelStatus?, + as String?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -7621,10 +7614,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> @JsonSerializable() class _$PushModelResponseImpl extends _PushModelResponse { const _$PushModelResponseImpl( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.status, + {@JsonKey(includeIfNull: false) this.status, @JsonKey(includeIfNull: false) this.digest, @JsonKey(includeIfNull: false) this.total, @JsonKey(includeIfNull: false) this.completed}) @@ -7635,9 +7625,8 @@ class _$PushModelResponseImpl extends _PushModelResponse { /// Status pushing the model. @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final PushModelStatus? status; + @JsonKey(includeIfNull: false) + final String? status; /// the model's digest @override @@ -7693,10 +7682,7 @@ class _$PushModelResponseImpl extends _PushModelResponse { abstract class _PushModelResponse extends PushModelResponse { const factory _PushModelResponse( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final PushModelStatus? status, + {@JsonKey(includeIfNull: false) final String? status, @JsonKey(includeIfNull: false) final String? digest, @JsonKey(includeIfNull: false) final int? total, @JsonKey(includeIfNull: false) final int? completed}) = @@ -7709,9 +7695,8 @@ abstract class _PushModelResponse extends PushModelResponse { @override /// Status pushing the model. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? get status; + @JsonKey(includeIfNull: false) + String? get status; @override /// the model's digest diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index 1ad66a40..a4aee619 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -749,8 +749,7 @@ Map _$$PushModelRequestImplToJson( _$PushModelResponseImpl _$$PushModelResponseImplFromJson( Map json) => _$PushModelResponseImpl( - status: $enumDecodeNullable(_$PushModelStatusEnumMap, json['status'], - unknownValue: JsonKey.nullForUndefinedEnumValue), + status: json['status'] as String?, digest: json['digest'] as String?, total: json['total'] as int?, completed: json['completed'] as int?, @@ -766,16 +765,9 @@ Map _$$PushModelResponseImplToJson( } } - writeNotNull('status', _$PushModelStatusEnumMap[instance.status]); + writeNotNull('status', instance.status); writeNotNull('digest', instance.digest); writeNotNull('total', instance.total); writeNotNull('completed', instance.completed); return val; } - -const _$PushModelStatusEnumMap = { - PushModelStatus.retrievingManifest: 'retrieving manifest', - PushModelStatus.startingUpload: 'starting upload', - PushModelStatus.pushingManifest: 'pushing manifest', - PushModelStatus.success: 'success', -}; diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 08ec6845..9d3a507e 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -1037,7 +1037,8 @@ components: description: Response class for pushing a model. properties: status: - $ref: '#/components/schemas/PushModelStatus' + type: string + description: Status pushing the model. digest: type: string description: the model's digest @@ -1052,11 +1053,3 @@ components: format: int64 description: Total bytes transferred. example: 2142590208 - PushModelStatus: - type: string - description: Status pushing the model. - enum: - - retrieving manifest - - starting upload - - pushing manifest - - success diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index b94698ad..03086e4b 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -158,7 +158,7 @@ void main() { request: const PushModelRequest(model: 'mattw/pygmalion:latest'), ); - expect(res.status, PushModelStatus.success); + expect(res.status, equals('success')); }); test('Test push model stream', skip: true, () async { @@ -167,13 +167,13 @@ void main() { ); int count = 0; - PushModelStatus? lastStatus; + String? lastStatus; await for (final res in stream) { lastStatus = res.status; count++; } expect(count, greaterThan(1)); - expect(lastStatus, equals(PushModelStatus.success)); + expect(lastStatus, equals('success')); }); test('Test check blob', skip: true, () async { From 5ee56586acb8d24221f5f3d6b3938201cf181cdb Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 18 Jul 2024 23:24:59 +0200 Subject: [PATCH 185/251] feat: Add support for overrides in the file search tool in openai_dart (#491) --- .../openai_dart/lib/src/generated/client.dart | 4 +- .../src/generated/schema/assistant_tools.dart | 57 ++- .../schema/create_batch_request.dart | 2 +- .../create_fine_tuning_job_request.dart | 7 +- ...ontent_text_annotations_file_citation.dart | 6 +- .../src/generated/schema/schema.freezed.dart | 384 +++++++++++++++--- .../lib/src/generated/schema/schema.g.dart | 64 ++- packages/openai_dart/oas/openapi_curated.yaml | 35 +- .../openai_dart/oas/openapi_official.yaml | 224 ++++++++-- 9 files changed, 637 insertions(+), 146 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index aca8f85f..66c918d1 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -58,7 +58,7 @@ class OpenAIClientException implements Exception { // CLASS: OpenAIClient // ========================================== -/// Client for OpenAI API (v.2.0.0) +/// Client for OpenAI API (v.2.1.0) /// /// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. class OpenAIClient { @@ -1846,7 +1846,7 @@ class OpenAIClient { // METHOD: cancelBatch // ------------------------------------------ - /// Cancels an in-progress batch. + /// Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. /// /// `batchId`: The ID of the batch to cancel. /// diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 6e45f715..043a7d9a 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -30,7 +30,11 @@ sealed class AssistantTools with _$AssistantTools { /// FileSearch tool const factory AssistantTools.fileSearch({ /// The type of tool being defined: `file_search` - @Default('file_search') String type, + required String type, + + /// Overrides for the file search tool. + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch, }) = AssistantToolsFileSearch; // ------------------------------------------ @@ -63,3 +67,54 @@ enum AssistantToolsEnumType { @JsonValue('function') function, } + +// ========================================== +// CLASS: AssistantToolsFileSearchFileSearch +// ========================================== + +/// Overrides for the file search tool. +@freezed +class AssistantToolsFileSearchFileSearch + with _$AssistantToolsFileSearchFileSearch { + const AssistantToolsFileSearchFileSearch._(); + + /// Factory constructor for AssistantToolsFileSearchFileSearch + const factory AssistantToolsFileSearchFileSearch({ + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, + }) = _AssistantToolsFileSearchFileSearch; + + /// Object construction from a JSON representation + factory AssistantToolsFileSearchFileSearch.fromJson( + Map json) => + _$AssistantToolsFileSearchFileSearchFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['max_num_results']; + + /// Validation constants + static const maxNumResultsMinValue = 1; + static const maxNumResultsMaxValue = 50; + + /// Perform validations on the schema property values + String? validateSchema() { + if (maxNumResults != null && maxNumResults! < maxNumResultsMinValue) { + return "The value of 'maxNumResults' cannot be < $maxNumResultsMinValue"; + } + if (maxNumResults != null && maxNumResults! > maxNumResultsMaxValue) { + return "The value of 'maxNumResults' cannot be > $maxNumResultsMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'max_num_results': maxNumResults, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart index 5014b4f1..b7a86f72 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart @@ -19,7 +19,7 @@ class CreateBatchRequest with _$CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') required String inputFileId, /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 14929898..17b649aa 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -23,7 +23,12 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') required String trainingFile, diff --git a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart index 1e6807c9..c8d3a8f1 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart @@ -18,9 +18,6 @@ class MessageContentTextAnnotationsFileCitation const factory MessageContentTextAnnotationsFileCitation({ /// The ID of the specific File the citation is from. @JsonKey(name: 'file_id') required String fileId, - - /// The specific quote in the file. - @JsonKey(includeIfNull: false) String? quote, }) = _MessageContentTextAnnotationsFileCitation; /// Object construction from a JSON representation @@ -29,7 +26,7 @@ class MessageContentTextAnnotationsFileCitation _$MessageContentTextAnnotationsFileCitationFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id', 'quote']; + static const List propertyNames = ['file_id']; /// Perform validations on the schema property values String? validateSchema() { @@ -40,7 +37,6 @@ class MessageContentTextAnnotationsFileCitation Map toMap() { return { 'file_id': fileId, - 'quote': quote, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 16efa483..0bc9015f 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -13428,7 +13428,12 @@ mixin _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') @@ -13682,7 +13687,12 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @override @@ -13815,7 +13825,12 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') @@ -40381,10 +40396,6 @@ mixin _$MessageContentTextAnnotationsFileCitation { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; - /// The specific quote in the file. - @JsonKey(includeIfNull: false) - String? get quote => throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $MessageContentTextAnnotationsFileCitationCopyWith< @@ -40400,9 +40411,7 @@ abstract class $MessageContentTextAnnotationsFileCitationCopyWith<$Res> { _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, MessageContentTextAnnotationsFileCitation>; @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(includeIfNull: false) String? quote}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc @@ -40421,17 +40430,12 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, @override $Res call({ Object? fileId = null, - Object? quote = freezed, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: freezed == quote - ? _value.quote - : quote // ignore: cast_nullable_to_non_nullable - as String?, ) as $Val); } } @@ -40445,9 +40449,7 @@ abstract class _$$MessageContentTextAnnotationsFileCitationImplCopyWith<$Res> __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(includeIfNull: false) String? quote}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc @@ -40464,17 +40466,12 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> @override $Res call({ Object? fileId = null, - Object? quote = freezed, }) { return _then(_$MessageContentTextAnnotationsFileCitationImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: freezed == quote - ? _value.quote - : quote // ignore: cast_nullable_to_non_nullable - as String?, )); } } @@ -40484,8 +40481,7 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> class _$MessageContentTextAnnotationsFileCitationImpl extends _MessageContentTextAnnotationsFileCitation { const _$MessageContentTextAnnotationsFileCitationImpl( - {@JsonKey(name: 'file_id') required this.fileId, - @JsonKey(includeIfNull: false) this.quote}) + {@JsonKey(name: 'file_id') required this.fileId}) : super._(); factory _$MessageContentTextAnnotationsFileCitationImpl.fromJson( @@ -40497,14 +40493,9 @@ class _$MessageContentTextAnnotationsFileCitationImpl @JsonKey(name: 'file_id') final String fileId; - /// The specific quote in the file. - @override - @JsonKey(includeIfNull: false) - final String? quote; - @override String toString() { - return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId, quote: $quote)'; + return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId)'; } @override @@ -40512,13 +40503,12 @@ class _$MessageContentTextAnnotationsFileCitationImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$MessageContentTextAnnotationsFileCitationImpl && - (identical(other.fileId, fileId) || other.fileId == fileId) && - (identical(other.quote, quote) || other.quote == quote)); + (identical(other.fileId, fileId) || other.fileId == fileId)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId, quote); + int get hashCode => Object.hash(runtimeType, fileId); @JsonKey(ignore: true) @override @@ -40541,8 +40531,7 @@ class _$MessageContentTextAnnotationsFileCitationImpl abstract class _MessageContentTextAnnotationsFileCitation extends MessageContentTextAnnotationsFileCitation { const factory _MessageContentTextAnnotationsFileCitation( - {@JsonKey(name: 'file_id') required final String fileId, - @JsonKey(includeIfNull: false) final String? quote}) = + {@JsonKey(name: 'file_id') required final String fileId}) = _$MessageContentTextAnnotationsFileCitationImpl; const _MessageContentTextAnnotationsFileCitation._() : super._(); @@ -40556,11 +40545,6 @@ abstract class _MessageContentTextAnnotationsFileCitation @JsonKey(name: 'file_id') String get fileId; @override - - /// The specific quote in the file. - @JsonKey(includeIfNull: false) - String? get quote; - @override @JsonKey(ignore: true) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< _$MessageContentTextAnnotationsFileCitationImpl> @@ -48068,7 +48052,7 @@ mixin _$CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') String get inputFileId => throw _privateConstructorUsedError; @@ -48214,7 +48198,7 @@ class _$CreateBatchRequestImpl extends _CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @override @JsonKey(name: 'input_file_id') final String inputFileId; @@ -48300,7 +48284,7 @@ abstract class _CreateBatchRequest extends CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') String get inputFileId; @override @@ -52790,21 +52774,33 @@ mixin _$AssistantTools { @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) => @@ -52952,7 +52948,11 @@ class _$AssistantToolsCodeInterpreterImpl @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) { return codeInterpreter(type); @@ -52962,7 +52962,11 @@ class _$AssistantToolsCodeInterpreterImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) { return codeInterpreter?.call(type); @@ -52972,7 +52976,11 @@ class _$AssistantToolsCodeInterpreterImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { @@ -53053,7 +53061,12 @@ abstract class _$$AssistantToolsFileSearchImplCopyWith<$Res> __$$AssistantToolsFileSearchImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type}); + $Res call( + {String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch}); + + $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch; } /// @nodoc @@ -53069,32 +53082,57 @@ class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> @override $Res call({ Object? type = null, + Object? fileSearch = freezed, }) { return _then(_$AssistantToolsFileSearchImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, + fileSearch: freezed == fileSearch + ? _value.fileSearch + : fileSearch // ignore: cast_nullable_to_non_nullable + as AssistantToolsFileSearchFileSearch?, )); } + + @override + @pragma('vm:prefer-inline') + $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch { + if (_value.fileSearch == null) { + return null; + } + + return $AssistantToolsFileSearchFileSearchCopyWith<$Res>(_value.fileSearch!, + (value) { + return _then(_value.copyWith(fileSearch: value)); + }); + } } /// @nodoc @JsonSerializable() class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { - const _$AssistantToolsFileSearchImpl({this.type = 'file_search'}) : super._(); + const _$AssistantToolsFileSearchImpl( + {required this.type, + @JsonKey(name: 'file_search', includeIfNull: false) this.fileSearch}) + : super._(); factory _$AssistantToolsFileSearchImpl.fromJson(Map json) => _$$AssistantToolsFileSearchImplFromJson(json); /// The type of tool being defined: `file_search` @override - @JsonKey() final String type; + /// Overrides for the file search tool. + @override + @JsonKey(name: 'file_search', includeIfNull: false) + final AssistantToolsFileSearchFileSearch? fileSearch; + @override String toString() { - return 'AssistantTools.fileSearch(type: $type)'; + return 'AssistantTools.fileSearch(type: $type, fileSearch: $fileSearch)'; } @override @@ -53102,12 +53140,14 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { return identical(this, other) || (other.runtimeType == runtimeType && other is _$AssistantToolsFileSearchImpl && - (identical(other.type, type) || other.type == type)); + (identical(other.type, type) || other.type == type) && + (identical(other.fileSearch, fileSearch) || + other.fileSearch == fileSearch)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, type, fileSearch); @JsonKey(ignore: true) @override @@ -53120,32 +53160,44 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) { - return fileSearch(type); + return fileSearch(type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) { - return fileSearch?.call(type); + return fileSearch?.call(type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { if (fileSearch != null) { - return fileSearch(type); + return fileSearch(type, this.fileSearch); } return orElse(); } @@ -53194,7 +53246,10 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { } abstract class AssistantToolsFileSearch extends AssistantTools { - const factory AssistantToolsFileSearch({final String type}) = + const factory AssistantToolsFileSearch( + {required final String type, + @JsonKey(name: 'file_search', includeIfNull: false) + final AssistantToolsFileSearchFileSearch? fileSearch}) = _$AssistantToolsFileSearchImpl; const AssistantToolsFileSearch._() : super._(); @@ -53205,6 +53260,10 @@ abstract class AssistantToolsFileSearch extends AssistantTools { /// The type of tool being defined: `file_search` String get type; + + /// Overrides for the file search tool. + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? get fileSearch; @override @JsonKey(ignore: true) _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> @@ -53310,7 +53369,11 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) { return function(type, this.function); @@ -53320,7 +53383,11 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) { return function?.call(type, this.function); @@ -53330,7 +53397,11 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { @@ -53405,6 +53476,187 @@ abstract class AssistantToolsFunction extends AssistantTools { get copyWith => throw _privateConstructorUsedError; } +AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson( + Map json) { + return _AssistantToolsFileSearchFileSearch.fromJson(json); +} + +/// @nodoc +mixin _$AssistantToolsFileSearchFileSearch { + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) + int? get maxNumResults => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $AssistantToolsFileSearchFileSearchCopyWith< + AssistantToolsFileSearchFileSearch> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + factory $AssistantToolsFileSearchFileSearchCopyWith( + AssistantToolsFileSearchFileSearch value, + $Res Function(AssistantToolsFileSearchFileSearch) then) = + _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + AssistantToolsFileSearchFileSearch>; + @useResult + $Res call( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + int? maxNumResults}); +} + +/// @nodoc +class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + $Val extends AssistantToolsFileSearchFileSearch> + implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + _$AssistantToolsFileSearchFileSearchCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxNumResults = freezed, + }) { + return _then(_value.copyWith( + maxNumResults: freezed == maxNumResults + ? _value.maxNumResults + : maxNumResults // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> + implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + factory _$$AssistantToolsFileSearchFileSearchImplCopyWith( + _$AssistantToolsFileSearchFileSearchImpl value, + $Res Function(_$AssistantToolsFileSearchFileSearchImpl) then) = + __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + int? maxNumResults}); +} + +/// @nodoc +class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> + extends _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + _$AssistantToolsFileSearchFileSearchImpl> + implements _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> { + __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl( + _$AssistantToolsFileSearchFileSearchImpl _value, + $Res Function(_$AssistantToolsFileSearchFileSearchImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxNumResults = freezed, + }) { + return _then(_$AssistantToolsFileSearchFileSearchImpl( + maxNumResults: freezed == maxNumResults + ? _value.maxNumResults + : maxNumResults // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$AssistantToolsFileSearchFileSearchImpl + extends _AssistantToolsFileSearchFileSearch { + const _$AssistantToolsFileSearchFileSearchImpl( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + this.maxNumResults}) + : super._(); + + factory _$AssistantToolsFileSearchFileSearchImpl.fromJson( + Map json) => + _$$AssistantToolsFileSearchFileSearchImplFromJson(json); + + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @override + @JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults; + + @override + String toString() { + return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$AssistantToolsFileSearchFileSearchImpl && + (identical(other.maxNumResults, maxNumResults) || + other.maxNumResults == maxNumResults)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, maxNumResults); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$AssistantToolsFileSearchFileSearchImplCopyWith< + _$AssistantToolsFileSearchFileSearchImpl> + get copyWith => __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl< + _$AssistantToolsFileSearchFileSearchImpl>(this, _$identity); + + @override + Map toJson() { + return _$$AssistantToolsFileSearchFileSearchImplToJson( + this, + ); + } +} + +abstract class _AssistantToolsFileSearchFileSearch + extends AssistantToolsFileSearchFileSearch { + const factory _AssistantToolsFileSearchFileSearch( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults}) = _$AssistantToolsFileSearchFileSearchImpl; + const _AssistantToolsFileSearchFileSearch._() : super._(); + + factory _AssistantToolsFileSearchFileSearch.fromJson( + Map json) = + _$AssistantToolsFileSearchFileSearchImpl.fromJson; + + @override + + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) + int? get maxNumResults; + @override + @JsonKey(ignore: true) + _$$AssistantToolsFileSearchFileSearchImplCopyWith< + _$AssistantToolsFileSearchFileSearchImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageContent _$MessageContentFromJson(Map json) { switch (json['type']) { case 'image_file': diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 8b4963d6..5795dcc1 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -3906,24 +3906,13 @@ _$MessageContentTextAnnotationsFileCitationImpl Map json) => _$MessageContentTextAnnotationsFileCitationImpl( fileId: json['file_id'] as String, - quote: json['quote'] as String?, ); Map _$$MessageContentTextAnnotationsFileCitationImplToJson( - _$MessageContentTextAnnotationsFileCitationImpl instance) { - final val = { - 'file_id': instance.fileId, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('quote', instance.quote); - return val; -} + _$MessageContentTextAnnotationsFileCitationImpl instance) => + { + 'file_id': instance.fileId, + }; _$MessageDeltaContentImageUrlObjectImpl _$$MessageDeltaContentImageUrlObjectImplFromJson( @@ -5114,14 +5103,28 @@ Map _$$AssistantToolsCodeInterpreterImplToJson( _$AssistantToolsFileSearchImpl _$$AssistantToolsFileSearchImplFromJson( Map json) => _$AssistantToolsFileSearchImpl( - type: json['type'] as String? ?? 'file_search', + type: json['type'] as String, + fileSearch: json['file_search'] == null + ? null + : AssistantToolsFileSearchFileSearch.fromJson( + json['file_search'] as Map), ); Map _$$AssistantToolsFileSearchImplToJson( - _$AssistantToolsFileSearchImpl instance) => - { - 'type': instance.type, - }; + _$AssistantToolsFileSearchImpl instance) { + final val = { + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('file_search', instance.fileSearch?.toJson()); + return val; +} _$AssistantToolsFunctionImpl _$$AssistantToolsFunctionImplFromJson( Map json) => @@ -5138,6 +5141,27 @@ Map _$$AssistantToolsFunctionImplToJson( 'function': instance.function.toJson(), }; +_$AssistantToolsFileSearchFileSearchImpl + _$$AssistantToolsFileSearchFileSearchImplFromJson( + Map json) => + _$AssistantToolsFileSearchFileSearchImpl( + maxNumResults: json['max_num_results'] as int?, + ); + +Map _$$AssistantToolsFileSearchFileSearchImplToJson( + _$AssistantToolsFileSearchFileSearchImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('max_num_results', instance.maxNumResults); + return val; +} + _$MessageContentImageFileObjectImpl _$$MessageContentImageFileObjectImplFromJson(Map json) => _$MessageContentImageFileObjectImpl( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 07b38bb8..bb054143 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4,7 +4,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.1.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -1465,7 +1465,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. + summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. parameters: - in: path name: batch_id @@ -2668,8 +2668,13 @@ components: See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + `fine-tune`. + The contents of the file should differ depending on if the model uses the + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -3710,7 +3715,23 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - default: "file_search" + default: file_search + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search + tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + required: + - type AssistantToolsFunction: type: object description: Function tool @@ -4885,12 +4906,8 @@ components: file_id: description: The ID of the specific File the citation is from. type: string - quote: - description: The specific quote in the file. - type: string required: - file_id - # - quote # https://github.com/openai/openai-openapi/issues/263 MessageContentTextAnnotationsFilePathObject: type: object description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. @@ -6115,7 +6132,7 @@ components: See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: $ref: "#/components/schemas/BatchEndpoint" completion_window: diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 395d6481..6763b140 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.1.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -16,7 +16,7 @@ tags: - name: Assistants description: Build Assistants that can call models and use tools. - name: Audio - description: Learn how to turn audio into text or text into audio. + description: Turn audio into text or text into audio. - name: Chat description: Given a list of messages comprising a conversation, the model will return a response. - name: Completions @@ -1506,9 +1506,9 @@ paths: The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - The Fine-tuning API only supports `.jsonl` files. + The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models. - The Batch API only supports `.jsonl` files up to 100 MB in size. + The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input). Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: @@ -4005,7 +4005,8 @@ paths: "incomplete_details": null, "usage": null, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming @@ -4067,13 +4068,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4105,7 +4106,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] @@ -4236,13 +4237,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} @@ -4268,7 +4269,7 @@ paths: data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4400,7 +4401,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true }, { "id": "run_abc456", @@ -4446,7 +4448,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } ], "first_id": "run_abc123", @@ -4552,7 +4555,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming request: @@ -4596,13 +4600,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4634,7 +4638,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4751,13 +4755,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4789,7 +4793,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4893,7 +4897,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } post: operationId: modifyRun @@ -5021,7 +5026,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: @@ -5167,7 +5173,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming @@ -5234,10 +5241,10 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} @@ -5275,7 +5282,7 @@ paths: data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5373,7 +5380,9 @@ paths: "usage": null, "temperature": 1.0, "top_p": 1.0, - "response_format": "auto" + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } /threads/{thread_id}/runs/{run_id}/steps: @@ -6655,7 +6664,7 @@ paths: See [upload file](/docs/api-reference/files/create) for how to upload a file. - Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string enum: @@ -6930,7 +6939,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. + summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. parameters: - in: path name: batch_id @@ -7501,6 +7510,20 @@ components: required: - role + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: [ 0, 1 ] + description: "Controls whether the assistant message is trained against (0 or 1)" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + ChatCompletionRequestToolMessage: type: object title: Tool message @@ -8647,6 +8670,8 @@ components: Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -9594,6 +9619,98 @@ components: "step_number": 88 } + FinetuneChatRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for chat models + properties: + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: | + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } + + FinetuneCompletionRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for completions models + properties: + prompt: + type: string + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. + x-oaiMeta: + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } + CompletionUsage: type: object description: Usage statistics for the completion request. @@ -10083,6 +10200,29 @@ components: - type AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + required: + - type + + AssistantToolsFileSearchTypeOnly: type: object title: FileSearch tool properties: @@ -11006,7 +11146,7 @@ components: items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" x-oaiExpandable: true description: A list of files attached to the message, and the tools they were added to. nullable: true @@ -11151,7 +11291,7 @@ components: items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" x-oaiExpandable: true description: A list of files attached to the message, and the tools they should be added to. required: @@ -11384,12 +11524,8 @@ components: file_id: description: The ID of the specific File the citation is from. type: string - quote: - description: The specific quote in the file. - type: string required: - file_id - - quote start_index: type: integer minimum: 0 @@ -13133,6 +13269,12 @@ x-oaiMeta: - type: endpoint key: cancelFineTuningJob path: cancel + - type: object + key: FinetuneChatRequestInput + path: chat-input + - type: object + key: FinetuneCompletionRequestInput + path: completions-input - type: object key: FineTuningJob path: object @@ -13167,10 +13309,10 @@ x-oaiMeta: path: object - type: object key: BatchRequestInput - path: requestInput + path: request-input - type: object key: BatchRequestOutput - path: requestOutput + path: request-output - id: files title: Files description: | From de565aa13b60ca63533a0024a417ccf53de97365 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 18 Jul 2024 23:48:20 +0200 Subject: [PATCH 186/251] feat: Add support for disabling parallel tool calls in openai_dart (#492) --- .../create_chat_completion_request.dart | 8 + .../generated/schema/create_run_request.dart | 8 + .../schema/create_thread_and_run_request.dart | 8 + .../lib/src/generated/schema/run_object.dart | 6 + .../src/generated/schema/schema.freezed.dart | 157 +++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 8 + packages/openai_dart/oas/openapi_curated.yaml | 11 ++ .../openai_dart/oas/openapi_official.yaml | 7 + 8 files changed, 209 insertions(+), 4 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 997af317..8c740fde 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -104,6 +104,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + @Default(true) + bool? parallelToolCalls, + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? user, @@ -149,6 +155,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p', 'tools', 'tool_choice', + 'parallel_tool_calls', 'user', 'function_call', 'functions' @@ -237,6 +244,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p': topP, 'tools': tools, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'user': user, 'function_call': functionCall, 'functions': functions, diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 95ad74a8..375ea8a0 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -69,6 +69,12 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + @Default(true) + bool? parallelToolCalls, + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -101,6 +107,7 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -155,6 +162,7 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 5f7692df..ff5013d5 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -68,6 +68,12 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + @Default(true) + bool? parallelToolCalls, + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -100,6 +106,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -154,6 +161,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index e34403a8..d3e7dcf5 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -99,6 +99,10 @@ class RunObject with _$RunObject { @JsonKey(name: 'tool_choice') required RunObjectToolChoice? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -140,6 +144,7 @@ class RunObject with _$RunObject { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format' ]; @@ -187,6 +192,7 @@ class RunObject with _$RunObject { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 0bc9015f..68caea69 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3500,6 +3500,11 @@ mixin _$CreateChatCompletionRequest { ChatCompletionToolChoiceOption? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; @@ -3565,6 +3570,8 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3611,6 +3618,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3688,6 +3696,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3813,6 +3825,8 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3864,6 +3878,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3941,6 +3956,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3985,6 +4004,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls = true, @JsonKey(includeIfNull: false) this.user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4142,6 +4163,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @override @JsonKey(includeIfNull: false) @@ -4180,7 +4207,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4216,6 +4243,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().equals(other._tools, _tools) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.user, user) || other.user == user) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && @@ -4245,6 +4274,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { topP, const DeepCollectionEquality().hash(_tools), toolChoice, + parallelToolCalls, user, functionCall, const DeepCollectionEquality().hash(_functions) @@ -4297,6 +4327,8 @@ abstract class _CreateChatCompletionRequest @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @JsonKey(includeIfNull: false) final String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4426,6 +4458,12 @@ abstract class _CreateChatCompletionRequest ChatCompletionToolChoiceOption? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; + @override + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? get user; @@ -26742,6 +26780,11 @@ mixin _$RunObject { @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -26793,6 +26836,7 @@ abstract class $RunObjectCopyWith<$Res> { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -26844,6 +26888,7 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_value.copyWith( @@ -26947,6 +26992,10 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -27075,6 +27124,7 @@ abstract class _$$RunObjectImplCopyWith<$Res> @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -27131,6 +27181,7 @@ class __$$RunObjectImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_$RunObjectImpl( @@ -27234,6 +27285,10 @@ class __$$RunObjectImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -27273,6 +27328,7 @@ class _$RunObjectImpl extends _RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required this.toolChoice, + @JsonKey(name: 'parallel_tool_calls') required this.parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required this.responseFormat}) @@ -27421,6 +27477,12 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'tool_choice') final RunObjectToolChoice? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls') + final bool? parallelToolCalls; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -27433,7 +27495,7 @@ class _$RunObjectImpl extends _RunObject { @override String toString() { - return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat)'; + return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat)'; } @override @@ -27483,6 +27545,8 @@ class _$RunObjectImpl extends _RunObject { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat)); } @@ -27516,6 +27580,7 @@ class _$RunObjectImpl extends _RunObject { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat ]); @@ -27566,6 +27631,8 @@ abstract class _RunObject extends RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required final RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') + required final bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required final RunObjectResponseFormat responseFormat}) = _$RunObjectImpl; @@ -27698,6 +27765,12 @@ abstract class _RunObject extends RunObject { RunObjectToolChoice? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') + bool? get parallelToolCalls; + @override + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -29562,6 +29635,11 @@ mixin _$CreateRunRequest { CreateRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -29611,6 +29689,8 @@ abstract class $CreateRunRequestCopyWith<$Res> { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -29648,6 +29728,7 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -29704,6 +29785,10 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29797,6 +29882,8 @@ abstract class _$$CreateRunRequestImplCopyWith<$Res> @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -29836,6 +29923,7 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -29892,6 +29980,10 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29930,6 +30022,8 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls = true, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -30043,6 +30137,12 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -30060,7 +30160,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @override String toString() { - return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -30090,6 +30190,8 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); @@ -30112,6 +30214,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat, stream); @@ -30154,6 +30257,8 @@ abstract class _CreateRunRequest extends CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateRunRequestResponseFormat? responseFormat, @@ -30239,6 +30344,12 @@ abstract class _CreateRunRequest extends CreateRunRequest { CreateRunRequestToolChoice? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; + @override + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -32834,6 +32945,11 @@ mixin _$CreateThreadAndRunRequest { CreateThreadAndRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -32882,6 +32998,8 @@ abstract class $CreateThreadAndRunRequestCopyWith<$Res> { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -32922,6 +33040,7 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -32978,6 +33097,10 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -33095,6 +33218,8 @@ abstract class _$$CreateThreadAndRunRequestImplCopyWith<$Res> @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -33140,6 +33265,7 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -33196,6 +33322,10 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -33230,6 +33360,8 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls = true, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -33332,6 +33464,12 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -33349,7 +33487,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @override String toString() { - return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -33378,6 +33516,8 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); @@ -33400,6 +33540,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat, stream); @@ -33441,6 +33582,8 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -33526,6 +33669,12 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { CreateThreadAndRunRequestToolChoice? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; + @override + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 5795dcc1..e52d47f7 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -335,6 +335,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), toolChoice: const _ChatCompletionToolChoiceOptionConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, user: json['user'] as String?, functionCall: const _ChatCompletionFunctionCallConverter() .fromJson(json['function_call']), @@ -376,6 +377,7 @@ Map _$$CreateChatCompletionRequestImplToJson( 'tool_choice', const _ChatCompletionToolChoiceOptionConverter() .toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull('user', instance.user); writeNotNull( 'function_call', @@ -2583,6 +2585,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => json['truncation_strategy'] as Map), toolChoice: const _RunObjectToolChoiceConverter().fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _RunObjectResponseFormatConverter() .fromJson(json['response_format']), ); @@ -2623,6 +2626,7 @@ Map _$$RunObjectImplToJson(_$RunObjectImpl instance) { val['truncation_strategy'] = instance.truncationStrategy?.toJson(); val['tool_choice'] = _$JsonConverterToJson( instance.toolChoice, const _RunObjectToolChoiceConverter().toJson); + val['parallel_tool_calls'] = instance.parallelToolCalls; val['response_format'] = const _RunObjectResponseFormatConverter().toJson(instance.responseFormat); return val; @@ -2836,6 +2840,7 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, responseFormat: const _CreateRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -2868,6 +2873,7 @@ Map _$$CreateRunRequestImplToJson( writeNotNull('truncation_strategy', instance.truncationStrategy?.toJson()); writeNotNull('tool_choice', const _CreateRunRequestToolChoiceConverter().toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateRunRequestResponseFormatConverter() @@ -3158,6 +3164,7 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateThreadAndRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, responseFormat: const _CreateThreadAndRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -3191,6 +3198,7 @@ Map _$$CreateThreadAndRunRequestImplToJson( 'tool_choice', const _CreateThreadAndRunRequestToolChoiceConverter() .toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateThreadAndRunRequestResponseFormatConverter() diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index bb054143..31fe55f9 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1974,6 +1974,13 @@ components: `required` means the model must call one or more tools. enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + parallel_tool_calls: ¶llel_tool_calls + description: | + Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + during tool use. + type: boolean + default: true + nullable: true user: *end_user_param_configuration function_call: title: ChatCompletionFunctionCall @@ -3959,6 +3966,7 @@ components: `required` means the model must call one or more tools before responding to the user. enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. @@ -3997,6 +4005,7 @@ components: - max_completion_tokens - truncation_strategy - tool_choice + - parallel_tool_calls - response_format RunCompletionUsage: type: object @@ -4136,6 +4145,7 @@ components: `required` means the model must call one or more tools before responding to the user. enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. @@ -4366,6 +4376,7 @@ components: `required` means the model must call one or more tools before responding to the user. enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 6763b140..1b0f8c0c 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -7661,6 +7661,11 @@ components: - type - function + ParallelToolCalls: + description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + type: boolean + default: true + ChatCompletionMessageToolCalls: type: array description: The tool calls generated by the model, such as function calls. @@ -7966,6 +7971,8 @@ components: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" user: *end_user_param_configuration function_call: deprecated: true From fe8498714e4cd09b2fa89784df41f9b3fad11613 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 18 Jul 2024 23:56:40 +0200 Subject: [PATCH 187/251] feat: Add support for disabling parallel tool calls in ChatOpenAI (#493) --- .../langchain_openai/lib/src/chat_models/chat_openai.dart | 2 ++ packages/langchain_openai/lib/src/chat_models/types.dart | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 83bb8cd5..fa03bf4c 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -305,6 +305,8 @@ class ChatOpenAI extends BaseChatModel { : null, temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, + parallelToolCalls: + options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, user: options?.user ?? defaultOptions.user, streamOptions: stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index a82ab9a1..859c5cdf 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -18,6 +18,7 @@ class ChatOpenAIOptions extends ChatModelOptions { this.stop, this.temperature, this.topP, + this.parallelToolCalls, this.user, super.tools, super.toolChoice, @@ -123,6 +124,13 @@ class ChatOpenAIOptions extends ChatModelOptions { /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p final double? topP; + /// Whether to enable parallel tool calling during tool use. + /// By default, it is enabled. + /// + /// + /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling + final bool? parallelToolCalls; + /// A unique identifier representing your end-user, which can help OpenAI to /// monitor and detect abuse. /// From 65515011236ca09b4f7978acdc90a1070ec1f340 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 19 Jul 2024 00:19:05 +0200 Subject: [PATCH 188/251] feat: Add support for service tier in openai_dart (#494) --- .../create_chat_completion_request.dart | 34 +++ .../create_chat_completion_response.dart | 11 + ...reate_chat_completion_stream_response.dart | 11 + .../lib/src/generated/schema/schema.dart | 1 + .../src/generated/schema/schema.freezed.dart | 199 +++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 23 ++ .../src/generated/schema/service_tier.dart | 18 ++ packages/openai_dart/oas/openapi_curated.yaml | 25 +++ .../openai_dart/oas/openapi_official.yaml | 33 ++- packages/openai_dart/pubspec.yaml | 2 +- 10 files changed, 352 insertions(+), 5 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/service_tier.dart diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 8c740fde..657f7268 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -68,6 +68,20 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. @JsonKey(includeIfNull: false) int? seed, + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + CreateChatCompletionRequestServiceTier? serviceTier, + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) @@ -148,6 +162,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'presence_penalty', 'response_format', 'seed', + 'service_tier', 'stop', 'stream', 'stream_options', @@ -237,6 +252,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'presence_penalty': presencePenalty, 'response_format': responseFormat, 'seed': seed, + 'service_tier': serviceTier, 'stop': stop, 'stream': stream, 'stream_options': streamOptions, @@ -398,6 +414,24 @@ class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { } } +// ========================================== +// ENUM: CreateChatCompletionRequestServiceTier +// ========================================== + +/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers +/// subscribed to the scale tier service: +/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. +/// - If set to 'default', the request will be processed using the default service tier with a lower +/// uptime SLA and no latency guarantee. +/// +/// When this parameter is set, the response body will include the `service_tier` utilized. +enum CreateChatCompletionRequestServiceTier { + @JsonValue('auto') + auto, + @JsonValue('default') + vDefault, +} + // ========================================== // CLASS: ChatCompletionStop // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart index 95771ce0..9a9687d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart @@ -27,6 +27,15 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { /// The model used for the chat completion. required String model, + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ServiceTier? serviceTier, + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -50,6 +59,7 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices', 'created', 'model', + 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -67,6 +77,7 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices': choices, 'created': created, 'model': model, + 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index 724f4066..cc0341fc 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -29,6 +29,15 @@ class CreateChatCompletionStreamResponse /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ServiceTier? serviceTier, + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -53,6 +62,7 @@ class CreateChatCompletionStreamResponse 'choices', 'created', 'model', + 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -70,6 +80,7 @@ class CreateChatCompletionStreamResponse 'choices': choices, 'created': created, 'model': model, + 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 6d9b2613..793315da 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -30,6 +30,7 @@ part 'chat_completion_stream_options.dart'; part 'create_chat_completion_response.dart'; part 'chat_completion_response_choice.dart'; part 'chat_completion_finish_reason.dart'; +part 'service_tier.dart'; part 'chat_completion_logprobs.dart'; part 'chat_completion_token_logprob.dart'; part 'chat_completion_token_top_logprob.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 68caea69..7737861a 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3458,6 +3458,20 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? get serviceTier => + throw _privateConstructorUsedError; + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) @@ -3558,6 +3572,11 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(name: 'response_format', includeIfNull: false) ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3611,6 +3630,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, + Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3668,6 +3688,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3813,6 +3837,11 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(name: 'response_format', includeIfNull: false) ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3871,6 +3900,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, + Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3928,6 +3958,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3995,6 +4029,11 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @JsonKey(includeIfNull: false) this.seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) this.stop, @JsonKey(includeIfNull: false) this.stream = false, @JsonKey(name: 'stream_options', includeIfNull: false) this.streamOptions, @@ -4107,6 +4146,20 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) final int? seed; + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateChatCompletionRequestServiceTier? serviceTier; + /// Up to 4 sequences where the API will stop generating further tokens. @override @_ChatCompletionStopConverter() @@ -4207,7 +4260,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4233,6 +4286,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.seed, seed) || other.seed == seed) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.stop, stop) || other.stop == stop) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.streamOptions, streamOptions) || @@ -4267,6 +4322,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { presencePenalty, responseFormat, seed, + serviceTier, stop, stream, streamOptions, @@ -4315,6 +4371,11 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'response_format', includeIfNull: false) final ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) final int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) final ChatCompletionStop? stop, @@ -4411,6 +4472,20 @@ abstract class _CreateChatCompletionRequest int? get seed; @override + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? get serviceTier; + @override + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) @@ -7741,6 +7816,14 @@ mixin _$CreateChatCompletionResponse { /// The model used for the chat completion. String get model => throw _privateConstructorUsedError; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier => throw _privateConstructorUsedError; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -7773,6 +7856,11 @@ abstract class $CreateChatCompletionResponseCopyWith<$Res> { List choices, int created, String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -7799,6 +7887,7 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, Object? choices = null, Object? created = null, Object? model = null, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -7820,6 +7909,10 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -7862,6 +7955,11 @@ abstract class _$$CreateChatCompletionResponseImplCopyWith<$Res> List choices, int created, String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -7888,6 +7986,7 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> Object? choices = null, Object? created = null, Object? model = null, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -7909,6 +8008,10 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -7933,6 +8036,11 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { required final List choices, required this.created, required this.model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, required this.object, @@ -7968,6 +8076,15 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override final String model; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -7986,7 +8103,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override String toString() { - return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -7998,6 +8115,8 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && @@ -8012,6 +8131,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().hash(_choices), created, model, + serviceTier, systemFingerprint, object, usage); @@ -8039,6 +8159,11 @@ abstract class _CreateChatCompletionResponse required final List choices, required final int created, required final String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, required final String object, @@ -8068,6 +8193,15 @@ abstract class _CreateChatCompletionResponse String get model; @override + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier; + @override + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -9049,6 +9183,14 @@ mixin _$CreateChatCompletionStreamResponse { @JsonKey(includeIfNull: false) String? get model => throw _privateConstructorUsedError; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier => throw _privateConstructorUsedError; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9083,6 +9225,11 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { List choices, @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, @JsonKey(includeIfNull: false) String? object, @@ -9109,6 +9256,7 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, Object? choices = null, Object? created = freezed, Object? model = freezed, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = freezed, Object? usage = freezed, @@ -9130,6 +9278,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -9172,6 +9324,11 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> List choices, @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, @JsonKey(includeIfNull: false) String? object, @@ -9198,6 +9355,7 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> Object? choices = null, Object? created = freezed, Object? model = freezed, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = freezed, Object? usage = freezed, @@ -9219,6 +9377,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -9244,6 +9406,11 @@ class _$CreateChatCompletionStreamResponseImpl required final List choices, @JsonKey(includeIfNull: false) this.created, @JsonKey(includeIfNull: false) this.model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, @JsonKey(includeIfNull: false) this.object, @@ -9283,6 +9450,15 @@ class _$CreateChatCompletionStreamResponseImpl @JsonKey(includeIfNull: false) final String? model; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9302,7 +9478,7 @@ class _$CreateChatCompletionStreamResponseImpl @override String toString() { - return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -9314,6 +9490,8 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && @@ -9328,6 +9506,7 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().hash(_choices), created, model, + serviceTier, systemFingerprint, object, usage); @@ -9355,6 +9534,11 @@ abstract class _CreateChatCompletionStreamResponse required final List choices, @JsonKey(includeIfNull: false) final int? created, @JsonKey(includeIfNull: false) final String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, @JsonKey(includeIfNull: false) final String? object, @@ -9388,6 +9572,15 @@ abstract class _CreateChatCompletionStreamResponse String? get model; @override + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier; + @override + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index e52d47f7..9ed1ff90 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -322,6 +322,9 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( : ChatCompletionResponseFormat.fromJson( json['response_format'] as Map), seed: json['seed'] as int?, + serviceTier: $enumDecodeNullable( + _$CreateChatCompletionRequestServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), stop: const _ChatCompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -366,6 +369,8 @@ Map _$$CreateChatCompletionRequestImplToJson( writeNotNull('presence_penalty', instance.presencePenalty); writeNotNull('response_format', instance.responseFormat?.toJson()); writeNotNull('seed', instance.seed); + writeNotNull('service_tier', + _$CreateChatCompletionRequestServiceTierEnumMap[instance.serviceTier]); writeNotNull( 'stop', const _ChatCompletionStopConverter().toJson(instance.stop)); writeNotNull('stream', instance.stream); @@ -388,6 +393,11 @@ Map _$$CreateChatCompletionRequestImplToJson( return val; } +const _$CreateChatCompletionRequestServiceTierEnumMap = { + CreateChatCompletionRequestServiceTier.auto: 'auto', + CreateChatCompletionRequestServiceTier.vDefault: 'default', +}; + _$ChatCompletionModelEnumerationImpl _$$ChatCompletionModelEnumerationImplFromJson(Map json) => _$ChatCompletionModelEnumerationImpl( @@ -707,6 +717,9 @@ _$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( .toList(), created: json['created'] as int, model: json['model'] as String, + serviceTier: $enumDecodeNullable( + _$ServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, object: json['object'] as String, usage: json['usage'] == null @@ -728,12 +741,18 @@ Map _$$CreateChatCompletionResponseImplToJson( val['choices'] = instance.choices.map((e) => e.toJson()).toList(); val['created'] = instance.created; val['model'] = instance.model; + writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); val['object'] = instance.object; writeNotNull('usage', instance.usage?.toJson()); return val; } +const _$ServiceTierEnumMap = { + ServiceTier.scale: 'scale', + ServiceTier.vDefault: 'default', +}; + _$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( Map json) => _$ChatCompletionResponseChoiceImpl( @@ -839,6 +858,9 @@ _$CreateChatCompletionStreamResponseImpl .toList(), created: json['created'] as int?, model: json['model'] as String?, + serviceTier: $enumDecodeNullable( + _$ServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, object: json['object'] as String?, usage: json['usage'] == null @@ -860,6 +882,7 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( val['choices'] = instance.choices.map((e) => e.toJson()).toList(); writeNotNull('created', instance.created); writeNotNull('model', instance.model); + writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); writeNotNull('object', instance.object); writeNotNull('usage', instance.usage?.toJson()); diff --git a/packages/openai_dart/lib/src/generated/schema/service_tier.dart b/packages/openai_dart/lib/src/generated/schema/service_tier.dart new file mode 100644 index 00000000..8a01afc5 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/service_tier.dart @@ -0,0 +1,18 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ServiceTier +// ========================================== + +/// The service tier used for processing the request. This field is only included if the `service_tier` parameter +/// is specified in the request. +enum ServiceTier { + @JsonValue('scale') + scale, + @JsonValue('default') + vDefault, +} diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 31fe55f9..cb469506 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1908,6 +1908,19 @@ components: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers + subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower + uptime SLA and no latency guarantee. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: title: ChatCompletionStop description: | @@ -2297,6 +2310,8 @@ components: model: type: string description: The model used for the chat completion. + service_tier: + $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | @@ -2349,6 +2364,14 @@ components: "content_filter", "function_call", ] + ServiceTier: + description: | + The service tier used for processing the request. This field is only included if the `service_tier` parameter + is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true ChatCompletionLogprobs: &chat_completion_response_logprobs description: Log probability information for the choice. type: object @@ -2419,6 +2442,8 @@ components: model: type: string description: The model to generate the completion. + service_tier: + $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 1b0f8c0c..fd863f50 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -7925,6 +7925,17 @@ components: Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. x-oaiMeta: beta: true + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: description: | Up to 4 sequences where the API will stop generating further tokens. @@ -8066,6 +8077,12 @@ components: model: type: string description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true system_fingerprint: type: string description: | @@ -8242,6 +8259,12 @@ components: model: type: string description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true system_fingerprint: type: string description: | @@ -10462,6 +10485,8 @@ components: tool_choice: $ref: "#/components/schemas/AssistantsApiToolChoiceOption" nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10489,6 +10514,7 @@ components: - max_completion_tokens - truncation_strategy - tool_choice + - parallel_tool_calls - response_format x-oaiMeta: name: The run object @@ -10526,7 +10552,8 @@ components: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } CreateRunRequest: type: object @@ -10638,6 +10665,8 @@ components: tool_choice: $ref: "#/components/schemas/AssistantsApiToolChoiceOption" nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10857,6 +10886,8 @@ components: tool_choice: $ref: "#/components/schemas/AssistantsApiToolChoiceOption" nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 4c449cc2..5b2fef22 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 test: ^1.25.2 From 3bc788d6640812c75f00dfec1c1720f4fb239d4d Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 10:56:54 +0200 Subject: [PATCH 189/251] feat: Add support for service tier in ChatOpenAI (#495) --- .../lib/src/chat_models/chat_openai.dart | 17 +++++++++------- .../lib/src/chat_models/mappers.dart | 11 ++++++++++ .../lib/src/chat_models/types.dart | 20 +++++++++++++++++++ 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index fa03bf4c..e218637a 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -276,13 +276,15 @@ class ChatOpenAI extends BaseChatModel { final bool stream = false, }) { final messagesDtos = messages.toChatCompletionMessages(); - final toolsDtos = options?.tools?.toChatCompletionTool() ?? - defaultOptions.tools?.toChatCompletionTool(); - final toolChoice = options?.toolChoice?.toChatCompletionToolChoice() ?? - defaultOptions.toolChoice?.toChatCompletionToolChoice(); - final responseFormat = - options?.responseFormat ?? defaultOptions.responseFormat; - final responseFormatDto = responseFormat?.toChatCompletionResponseFormat(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); + final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) + ?.toChatCompletionToolChoice(); + final responseFormatDto = + (options?.responseFormat ?? defaultOptions.responseFormat) + ?.toChatCompletionResponseFormat(); + final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) + .toCreateChatCompletionRequestServiceTier(); return CreateChatCompletionRequest( model: ChatCompletionModel.modelId( @@ -307,6 +309,7 @@ class ChatOpenAI extends BaseChatModel { topP: options?.topP ?? defaultOptions.topP, parallelToolCalls: options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, + serviceTier: serviceTierDto, user: options?.user ?? defaultOptions.user, streamOptions: stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 6b434109..0c70fd73 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -260,6 +260,17 @@ extension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat { } } +extension ChatOpenAIServiceTierX on ChatOpenAIServiceTier? { + CreateChatCompletionRequestServiceTier? + toCreateChatCompletionRequestServiceTier() => switch (this) { + ChatOpenAIServiceTier.auto => + CreateChatCompletionRequestServiceTier.auto, + ChatOpenAIServiceTier.vDefault => + CreateChatCompletionRequestServiceTier.vDefault, + null => null, + }; +} + FinishReason _mapFinishReason( final ChatCompletionFinishReason? reason, ) => diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 859c5cdf..0526f937 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -19,6 +19,7 @@ class ChatOpenAIOptions extends ChatModelOptions { this.temperature, this.topP, this.parallelToolCalls, + this.serviceTier, this.user, super.tools, super.toolChoice, @@ -131,6 +132,10 @@ class ChatOpenAIOptions extends ChatModelOptions { /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling final bool? parallelToolCalls; + /// Specifies the latency tier to use for processing the request. + /// This is relevant for customers subscribed to the scale tier service. + final ChatOpenAIServiceTier? serviceTier; + /// A unique identifier representing your end-user, which can help OpenAI to /// monitor and detect abuse. /// @@ -151,6 +156,8 @@ class ChatOpenAIOptions extends ChatModelOptions { final List? stop, final double? temperature, final double? topP, + final bool? parallelToolCalls, + final ChatOpenAIServiceTier? serviceTier, final String? user, final List? tools, final ChatToolChoice? toolChoice, @@ -167,6 +174,8 @@ class ChatOpenAIOptions extends ChatModelOptions { stop: stop ?? this.stop, temperature: temperature ?? this.temperature, topP: topP ?? this.topP, + parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls, + serviceTier: serviceTier ?? this.serviceTier, user: user ?? this.user, tools: tools ?? this.tools, toolChoice: toolChoice ?? this.toolChoice, @@ -196,3 +205,14 @@ enum ChatOpenAIResponseFormatType { /// guarantees the message the model generates is valid JSON. jsonObject, } + +/// Specifies the latency tier to use for processing the request. +/// This is relevant for customers subscribed to the scale tier service. +enum ChatOpenAIServiceTier { + /// The system will utilize scale tier credits until they are exhausted. + auto, + + /// The request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + vDefault, +} From 31106fee090a2d964d302ace58624d3d756f3807 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 11:00:12 +0200 Subject: [PATCH 190/251] feat: Support chunking strategy in file_search tool in openai_dart (#496) --- .../chunking_strategy_request_param.dart | 54 + .../chunking_strategy_response_param.dart | 55 + ...reate_vector_store_file_batch_request.dart | 8 +- .../create_vector_store_file_request.dart | 8 +- .../schema/create_vector_store_request.dart | 7 + .../lib/src/generated/schema/schema.dart | 3 + .../src/generated/schema/schema.freezed.dart | 1455 ++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 160 +- .../schema/static_chunking_strategy.dart | 60 + ...ol_resources_file_search_vector_store.dart | 12 +- .../schema/vector_store_file_object.dart | 9 +- packages/openai_dart/oas/openapi_curated.yaml | 101 ++ .../openai_dart/oas/openapi_official.yaml | 203 ++- 13 files changed, 2088 insertions(+), 47 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart new file mode 100644 index 00000000..a8f0c03d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChunkingStrategyRequestParam +// ========================================== + +/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ChunkingStrategyRequestParam with _$ChunkingStrategyRequestParam { + const ChunkingStrategyRequestParam._(); + + // ------------------------------------------ + // UNION: AutoChunkingStrategyRequestParam + // ------------------------------------------ + + /// Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` + /// and `chunk_overlap_tokens` of `400`. + const factory ChunkingStrategyRequestParam.auto({ + /// Always `auto`. + required String type, + }) = AutoChunkingStrategyRequestParam; + + // ------------------------------------------ + // UNION: StaticChunkingStrategyRequestParam + // ------------------------------------------ + + /// Static chunking strategy + const factory ChunkingStrategyRequestParam.static({ + /// Always `static`. + required String type, + + /// Static chunking strategy + required StaticChunkingStrategy static, + }) = StaticChunkingStrategyRequestParam; + + /// Object construction from a JSON representation + factory ChunkingStrategyRequestParam.fromJson(Map json) => + _$ChunkingStrategyRequestParamFromJson(json); +} + +// ========================================== +// ENUM: ChunkingStrategyRequestParamEnumType +// ========================================== + +enum ChunkingStrategyRequestParamEnumType { + @JsonValue('auto') + auto, + @JsonValue('static') + static, +} diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart new file mode 100644 index 00000000..c706df60 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart @@ -0,0 +1,55 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChunkingStrategyResponseParam +// ========================================== + +/// The chunking strategy used to chunk the file(s). +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ChunkingStrategyResponseParam + with _$ChunkingStrategyResponseParam { + const ChunkingStrategyResponseParam._(); + + // ------------------------------------------ + // UNION: StaticChunkingStrategyResponseParam + // ------------------------------------------ + + /// Static Chunking Strategy. + const factory ChunkingStrategyResponseParam.static({ + /// Always `static`. + required String type, + + /// Static chunking strategy + required StaticChunkingStrategy static, + }) = StaticChunkingStrategyResponseParam; + + // ------------------------------------------ + // UNION: OtherChunkingStrategyResponseParam + // ------------------------------------------ + + /// Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because + /// the file was indexed before the `chunking_strategy` concept was introduced in the API. + const factory ChunkingStrategyResponseParam.other({ + /// Always `other`. + required String type, + }) = OtherChunkingStrategyResponseParam; + + /// Object construction from a JSON representation + factory ChunkingStrategyResponseParam.fromJson(Map json) => + _$ChunkingStrategyResponseParamFromJson(json); +} + +// ========================================== +// ENUM: ChunkingStrategyResponseParamEnumType +// ========================================== + +enum ChunkingStrategyResponseParamEnumType { + @JsonValue('static') + static, + @JsonValue('other') + other, +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart index 6a607eae..3111c855 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart @@ -18,6 +18,11 @@ class CreateVectorStoreFileBatchRequest const factory CreateVectorStoreFileBatchRequest({ /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids') required List fileIds, + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileBatchRequest; /// Object construction from a JSON representation @@ -26,7 +31,7 @@ class CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids']; + static const List propertyNames = ['file_ids', 'chunking_strategy']; /// Perform validations on the schema property values String? validateSchema() { @@ -37,6 +42,7 @@ class CreateVectorStoreFileBatchRequest Map toMap() { return { 'file_ids': fileIds, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart index 742fae3b..c18eadee 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart @@ -17,6 +17,11 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { const factory CreateVectorStoreFileRequest({ /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_id') required String fileId, + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileRequest; /// Object construction from a JSON representation @@ -24,7 +29,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { _$CreateVectorStoreFileRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id']; + static const List propertyNames = ['file_id', 'chunking_strategy']; /// Perform validations on the schema property values String? validateSchema() { @@ -35,6 +40,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { Map toMap() { return { 'file_id': fileId, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index bb9e83d7..61e87095 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -25,6 +25,11 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _CreateVectorStoreRequest; @@ -38,6 +43,7 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { 'name', 'file_ids', 'expires_after', + 'chunking_strategy', 'metadata' ]; @@ -52,6 +58,7 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { 'name': name, 'file_ids': fileIds, 'expires_after': expiresAfter, + 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 793315da..e4f6c023 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -127,6 +127,7 @@ part 'update_vector_store_request.dart'; part 'list_vector_stores_response.dart'; part 'delete_vector_store_response.dart'; part 'vector_store_file_object.dart'; +part 'static_chunking_strategy.dart'; part 'create_vector_store_file_request.dart'; part 'list_vector_store_files_response.dart'; part 'delete_vector_store_file_response.dart'; @@ -152,4 +153,6 @@ part 'run_step_details_tool_calls.dart'; part 'run_step_delta_step_details_tool_calls.dart'; part 'run_step_details_tool_calls_code_output.dart'; part 'run_step_delta_step_details_tool_calls_code_output.dart'; +part 'chunking_strategy_request_param.dart'; +part 'chunking_strategy_response_param.dart'; part 'assistant_stream_event.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 7737861a..d1f911c1 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -36641,6 +36641,12 @@ mixin _$ToolResourcesFileSearchVectorStore { @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; @@ -36662,7 +36668,11 @@ abstract class $ToolResourcesFileSearchVectorStoreCopyWith<$Res> { @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -36680,6 +36690,7 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, @override $Res call({ Object? fileIds = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( @@ -36687,12 +36698,29 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable as dynamic, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -36706,7 +36734,12 @@ abstract class _$$ToolResourcesFileSearchVectorStoreImplCopyWith<$Res> @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -36723,6 +36756,7 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> @override $Res call({ Object? fileIds = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_$ToolResourcesFileSearchVectorStoreImpl( @@ -36730,6 +36764,10 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -36745,6 +36783,8 @@ class _$ToolResourcesFileSearchVectorStoreImpl const _$ToolResourcesFileSearchVectorStoreImpl( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, super._(); @@ -36767,6 +36807,12 @@ class _$ToolResourcesFileSearchVectorStoreImpl return EqualUnmodifiableListView(value); } + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) @@ -36774,7 +36820,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl @override String toString() { - return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, metadata: $metadata)'; + return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; } @override @@ -36783,6 +36829,8 @@ class _$ToolResourcesFileSearchVectorStoreImpl (other.runtimeType == runtimeType && other is _$ToolResourcesFileSearchVectorStoreImpl && const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy) && const DeepCollectionEquality().equals(other.metadata, metadata)); } @@ -36791,6 +36839,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_fileIds), + chunkingStrategy, const DeepCollectionEquality().hash(metadata)); @JsonKey(ignore: true) @@ -36814,6 +36863,8 @@ abstract class _ToolResourcesFileSearchVectorStore const factory _ToolResourcesFileSearchVectorStore( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) final dynamic metadata}) = _$ToolResourcesFileSearchVectorStoreImpl; const _ToolResourcesFileSearchVectorStore._() : super._(); @@ -36829,6 +36880,12 @@ abstract class _ToolResourcesFileSearchVectorStore List? get fileIds; @override + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata; @@ -45270,6 +45327,12 @@ mixin _$CreateVectorStoreRequest { VectorStoreExpirationAfter? get expiresAfter => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; @@ -45291,9 +45354,12 @@ abstract class $CreateVectorStoreRequestCopyWith<$Res> { @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -45313,6 +45379,7 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, Object? name = freezed, Object? fileIds = freezed, Object? expiresAfter = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( @@ -45328,6 +45395,10 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable as VectorStoreExpirationAfter?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -45347,6 +45418,19 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, return _then(_value.copyWith(expiresAfter: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -45363,10 +45447,14 @@ abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); @override $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -45385,6 +45473,7 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> Object? name = freezed, Object? fileIds = freezed, Object? expiresAfter = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_$CreateVectorStoreRequestImpl( @@ -45400,6 +45489,10 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable as VectorStoreExpirationAfter?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -45416,6 +45509,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, super._(); @@ -45447,6 +45542,12 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) @@ -45454,7 +45555,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @override String toString() { - return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; } @override @@ -45466,6 +45567,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { const DeepCollectionEquality().equals(other._fileIds, _fileIds) && (identical(other.expiresAfter, expiresAfter) || other.expiresAfter == expiresAfter) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy) && const DeepCollectionEquality().equals(other.metadata, metadata)); } @@ -45476,6 +45579,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { name, const DeepCollectionEquality().hash(_fileIds), expiresAfter, + chunkingStrategy, const DeepCollectionEquality().hash(metadata)); @JsonKey(ignore: true) @@ -45500,6 +45604,8 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { final List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) final dynamic metadata}) = _$CreateVectorStoreRequestImpl; const _CreateVectorStoreRequest._() : super._(); @@ -45524,6 +45630,12 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { VectorStoreExpirationAfter? get expiresAfter; @override + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata; @@ -46275,6 +46387,12 @@ mixin _$VectorStoreFileObject { VectorStoreFileObjectLastError? get lastError => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? get chunkingStrategy => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $VectorStoreFileObjectCopyWith get copyWith => @@ -46294,9 +46412,12 @@ abstract class $VectorStoreFileObjectCopyWith<$Res> { @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy}); $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46320,6 +46441,7 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, Object? vectorStoreId = null, Object? status = null, Object? lastError = freezed, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( id: null == id @@ -46350,6 +46472,10 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, ? _value.lastError : lastError // ignore: cast_nullable_to_non_nullable as VectorStoreFileObjectLastError?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyResponseParam?, ) as $Val); } @@ -46365,6 +46491,19 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, return _then(_value.copyWith(lastError: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyResponseParamCopyWith<$Res>( + _value.chunkingStrategy!, (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -46383,10 +46522,14 @@ abstract class _$$VectorStoreFileObjectImplCopyWith<$Res> @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy}); @override $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + @override + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46408,6 +46551,7 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> Object? vectorStoreId = null, Object? status = null, Object? lastError = freezed, + Object? chunkingStrategy = freezed, }) { return _then(_$VectorStoreFileObjectImpl( id: null == id @@ -46438,6 +46582,10 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> ? _value.lastError : lastError // ignore: cast_nullable_to_non_nullable as VectorStoreFileObjectLastError?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyResponseParam?, )); } } @@ -46452,7 +46600,9 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { @JsonKey(name: 'created_at') required this.createdAt, @JsonKey(name: 'vector_store_id') required this.vectorStoreId, required this.status, - @JsonKey(name: 'last_error') required this.lastError}) + @JsonKey(name: 'last_error') required this.lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : super._(); factory _$VectorStoreFileObjectImpl.fromJson(Map json) => @@ -46490,9 +46640,15 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { @JsonKey(name: 'last_error') final VectorStoreFileObjectLastError? lastError; + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyResponseParam? chunkingStrategy; + @override String toString() { - return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError)'; + return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError, chunkingStrategy: $chunkingStrategy)'; } @override @@ -46510,13 +46666,15 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { other.vectorStoreId == vectorStoreId) && (identical(other.status, status) || other.status == status) && (identical(other.lastError, lastError) || - other.lastError == lastError)); + other.lastError == lastError) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, object, usageBytes, - createdAt, vectorStoreId, status, lastError); + createdAt, vectorStoreId, status, lastError, chunkingStrategy); @JsonKey(ignore: true) @override @@ -46542,7 +46700,9 @@ abstract class _VectorStoreFileObject extends VectorStoreFileObject { @JsonKey(name: 'vector_store_id') required final String vectorStoreId, required final VectorStoreFileStatus status, @JsonKey(name: 'last_error') - required final VectorStoreFileObjectLastError? lastError}) = + required final VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyResponseParam? chunkingStrategy}) = _$VectorStoreFileObjectImpl; const _VectorStoreFileObject._() : super._(); @@ -46582,6 +46742,12 @@ abstract class _VectorStoreFileObject extends VectorStoreFileObject { @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? get lastError; @override + + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? get chunkingStrategy; + @override @JsonKey(ignore: true) _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -46768,6 +46934,204 @@ abstract class _VectorStoreFileObjectLastError get copyWith => throw _privateConstructorUsedError; } +StaticChunkingStrategy _$StaticChunkingStrategyFromJson( + Map json) { + return _StaticChunkingStrategy.fromJson(json); +} + +/// @nodoc +mixin _$StaticChunkingStrategy { + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') + int get maxChunkSizeTokens => throw _privateConstructorUsedError; + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') + int get chunkOverlapTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $StaticChunkingStrategyCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $StaticChunkingStrategyCopyWith<$Res> { + factory $StaticChunkingStrategyCopyWith(StaticChunkingStrategy value, + $Res Function(StaticChunkingStrategy) then) = + _$StaticChunkingStrategyCopyWithImpl<$Res, StaticChunkingStrategy>; + @useResult + $Res call( + {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); +} + +/// @nodoc +class _$StaticChunkingStrategyCopyWithImpl<$Res, + $Val extends StaticChunkingStrategy> + implements $StaticChunkingStrategyCopyWith<$Res> { + _$StaticChunkingStrategyCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxChunkSizeTokens = null, + Object? chunkOverlapTokens = null, + }) { + return _then(_value.copyWith( + maxChunkSizeTokens: null == maxChunkSizeTokens + ? _value.maxChunkSizeTokens + : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable + as int, + chunkOverlapTokens: null == chunkOverlapTokens + ? _value.chunkOverlapTokens + : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$StaticChunkingStrategyImplCopyWith<$Res> + implements $StaticChunkingStrategyCopyWith<$Res> { + factory _$$StaticChunkingStrategyImplCopyWith( + _$StaticChunkingStrategyImpl value, + $Res Function(_$StaticChunkingStrategyImpl) then) = + __$$StaticChunkingStrategyImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); +} + +/// @nodoc +class __$$StaticChunkingStrategyImplCopyWithImpl<$Res> + extends _$StaticChunkingStrategyCopyWithImpl<$Res, + _$StaticChunkingStrategyImpl> + implements _$$StaticChunkingStrategyImplCopyWith<$Res> { + __$$StaticChunkingStrategyImplCopyWithImpl( + _$StaticChunkingStrategyImpl _value, + $Res Function(_$StaticChunkingStrategyImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxChunkSizeTokens = null, + Object? chunkOverlapTokens = null, + }) { + return _then(_$StaticChunkingStrategyImpl( + maxChunkSizeTokens: null == maxChunkSizeTokens + ? _value.maxChunkSizeTokens + : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable + as int, + chunkOverlapTokens: null == chunkOverlapTokens + ? _value.chunkOverlapTokens + : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$StaticChunkingStrategyImpl extends _StaticChunkingStrategy { + const _$StaticChunkingStrategyImpl( + {@JsonKey(name: 'max_chunk_size_tokens') required this.maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') required this.chunkOverlapTokens}) + : super._(); + + factory _$StaticChunkingStrategyImpl.fromJson(Map json) => + _$$StaticChunkingStrategyImplFromJson(json); + + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @override + @JsonKey(name: 'max_chunk_size_tokens') + final int maxChunkSizeTokens; + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @override + @JsonKey(name: 'chunk_overlap_tokens') + final int chunkOverlapTokens; + + @override + String toString() { + return 'StaticChunkingStrategy(maxChunkSizeTokens: $maxChunkSizeTokens, chunkOverlapTokens: $chunkOverlapTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StaticChunkingStrategyImpl && + (identical(other.maxChunkSizeTokens, maxChunkSizeTokens) || + other.maxChunkSizeTokens == maxChunkSizeTokens) && + (identical(other.chunkOverlapTokens, chunkOverlapTokens) || + other.chunkOverlapTokens == chunkOverlapTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, maxChunkSizeTokens, chunkOverlapTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> + get copyWith => __$$StaticChunkingStrategyImplCopyWithImpl< + _$StaticChunkingStrategyImpl>(this, _$identity); + + @override + Map toJson() { + return _$$StaticChunkingStrategyImplToJson( + this, + ); + } +} + +abstract class _StaticChunkingStrategy extends StaticChunkingStrategy { + const factory _StaticChunkingStrategy( + {@JsonKey(name: 'max_chunk_size_tokens') + required final int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') + required final int chunkOverlapTokens}) = _$StaticChunkingStrategyImpl; + const _StaticChunkingStrategy._() : super._(); + + factory _StaticChunkingStrategy.fromJson(Map json) = + _$StaticChunkingStrategyImpl.fromJson; + + @override + + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') + int get maxChunkSizeTokens; + @override + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') + int get chunkOverlapTokens; + @override + @JsonKey(ignore: true) + _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> + get copyWith => throw _privateConstructorUsedError; +} + CreateVectorStoreFileRequest _$CreateVectorStoreFileRequestFromJson( Map json) { return _CreateVectorStoreFileRequest.fromJson(json); @@ -46779,6 +47143,12 @@ mixin _$CreateVectorStoreFileRequest { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $CreateVectorStoreFileRequestCopyWith @@ -46793,7 +47163,12 @@ abstract class $CreateVectorStoreFileRequestCopyWith<$Res> { _$CreateVectorStoreFileRequestCopyWithImpl<$Res, CreateVectorStoreFileRequest>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46811,14 +47186,32 @@ class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, @override $Res call({ Object? fileId = null, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -46830,7 +47223,13 @@ abstract class _$$CreateVectorStoreFileRequestImplCopyWith<$Res> __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46847,12 +47246,17 @@ class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> @override $Res call({ Object? fileId = null, + Object? chunkingStrategy = freezed, }) { return _then(_$CreateVectorStoreFileRequestImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, )); } } @@ -46861,7 +47265,9 @@ class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { const _$CreateVectorStoreFileRequestImpl( - {@JsonKey(name: 'file_id') required this.fileId}) + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : super._(); factory _$CreateVectorStoreFileRequestImpl.fromJson( @@ -46873,9 +47279,15 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { @JsonKey(name: 'file_id') final String fileId; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + @override String toString() { - return 'CreateVectorStoreFileRequest(fileId: $fileId)'; + return 'CreateVectorStoreFileRequest(fileId: $fileId, chunkingStrategy: $chunkingStrategy)'; } @override @@ -46883,12 +47295,14 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { return identical(this, other) || (other.runtimeType == runtimeType && other is _$CreateVectorStoreFileRequestImpl && - (identical(other.fileId, fileId) || other.fileId == fileId)); + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId); + int get hashCode => Object.hash(runtimeType, fileId, chunkingStrategy); @JsonKey(ignore: true) @override @@ -46909,7 +47323,9 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { abstract class _CreateVectorStoreFileRequest extends CreateVectorStoreFileRequest { const factory _CreateVectorStoreFileRequest( - {@JsonKey(name: 'file_id') required final String fileId}) = + {@JsonKey(name: 'file_id') required final String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy}) = _$CreateVectorStoreFileRequestImpl; const _CreateVectorStoreFileRequest._() : super._(); @@ -46922,6 +47338,12 @@ abstract class _CreateVectorStoreFileRequest @JsonKey(name: 'file_id') String get fileId; @override + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override @JsonKey(ignore: true) _$$CreateVectorStoreFileRequestImplCopyWith< _$CreateVectorStoreFileRequestImpl> @@ -48012,6 +48434,12 @@ mixin _$CreateVectorStoreFileBatchRequest { @JsonKey(name: 'file_ids') List get fileIds => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $CreateVectorStoreFileBatchRequestCopyWith @@ -48026,7 +48454,12 @@ abstract class $CreateVectorStoreFileBatchRequestCopyWith<$Res> { _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, CreateVectorStoreFileBatchRequest>; @useResult - $Res call({@JsonKey(name: 'file_ids') List fileIds}); + $Res call( + {@JsonKey(name: 'file_ids') List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -48044,14 +48477,32 @@ class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, @override $Res call({ Object? fileIds = null, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( fileIds: null == fileIds ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -48063,7 +48514,13 @@ abstract class _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_ids') List fileIds}); + $Res call( + {@JsonKey(name: 'file_ids') List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -48080,12 +48537,17 @@ class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> @override $Res call({ Object? fileIds = null, + Object? chunkingStrategy = freezed, }) { return _then(_$CreateVectorStoreFileBatchRequestImpl( fileIds: null == fileIds ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, )); } } @@ -48095,7 +48557,9 @@ class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> class _$CreateVectorStoreFileBatchRequestImpl extends _CreateVectorStoreFileBatchRequest { const _$CreateVectorStoreFileBatchRequestImpl( - {@JsonKey(name: 'file_ids') required final List fileIds}) + {@JsonKey(name: 'file_ids') required final List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : _fileIds = fileIds, super._(); @@ -48115,9 +48579,15 @@ class _$CreateVectorStoreFileBatchRequestImpl return EqualUnmodifiableListView(_fileIds); } + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + @override String toString() { - return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds)'; + return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy)'; } @override @@ -48125,13 +48595,15 @@ class _$CreateVectorStoreFileBatchRequestImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$CreateVectorStoreFileBatchRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds)); + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); + int get hashCode => Object.hash(runtimeType, + const DeepCollectionEquality().hash(_fileIds), chunkingStrategy); @JsonKey(ignore: true) @override @@ -48152,7 +48624,9 @@ class _$CreateVectorStoreFileBatchRequestImpl abstract class _CreateVectorStoreFileBatchRequest extends CreateVectorStoreFileBatchRequest { const factory _CreateVectorStoreFileBatchRequest( - {@JsonKey(name: 'file_ids') required final List fileIds}) = + {@JsonKey(name: 'file_ids') required final List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy}) = _$CreateVectorStoreFileBatchRequestImpl; const _CreateVectorStoreFileBatchRequest._() : super._(); @@ -48166,6 +48640,12 @@ abstract class _CreateVectorStoreFileBatchRequest @JsonKey(name: 'file_ids') List get fileIds; @override + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override @JsonKey(ignore: true) _$$CreateVectorStoreFileBatchRequestImplCopyWith< _$CreateVectorStoreFileBatchRequestImpl> @@ -62244,6 +62724,933 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject get copyWith => throw _privateConstructorUsedError; } +ChunkingStrategyRequestParam _$ChunkingStrategyRequestParamFromJson( + Map json) { + switch (json['type']) { + case 'auto': + return AutoChunkingStrategyRequestParam.fromJson(json); + case 'static': + return StaticChunkingStrategyRequestParam.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'ChunkingStrategyRequestParam', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ChunkingStrategyRequestParam { + /// Always `auto`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChunkingStrategyRequestParamCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChunkingStrategyRequestParamCopyWith<$Res> { + factory $ChunkingStrategyRequestParamCopyWith( + ChunkingStrategyRequestParam value, + $Res Function(ChunkingStrategyRequestParam) then) = + _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + ChunkingStrategyRequestParam>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + $Val extends ChunkingStrategyRequestParam> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + _$ChunkingStrategyRequestParamCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + factory _$$AutoChunkingStrategyRequestParamImplCopyWith( + _$AutoChunkingStrategyRequestParamImpl value, + $Res Function(_$AutoChunkingStrategyRequestParamImpl) then) = + __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type}); +} + +/// @nodoc +class __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + _$AutoChunkingStrategyRequestParamImpl> + implements _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> { + __$$AutoChunkingStrategyRequestParamImplCopyWithImpl( + _$AutoChunkingStrategyRequestParamImpl _value, + $Res Function(_$AutoChunkingStrategyRequestParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$AutoChunkingStrategyRequestParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$AutoChunkingStrategyRequestParamImpl + extends AutoChunkingStrategyRequestParam { + const _$AutoChunkingStrategyRequestParamImpl({required this.type}) + : super._(); + + factory _$AutoChunkingStrategyRequestParamImpl.fromJson( + Map json) => + _$$AutoChunkingStrategyRequestParamImplFromJson(json); + + /// Always `auto`. + @override + final String type; + + @override + String toString() { + return 'ChunkingStrategyRequestParam.auto(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$AutoChunkingStrategyRequestParamImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$AutoChunkingStrategyRequestParamImplCopyWith< + _$AutoChunkingStrategyRequestParamImpl> + get copyWith => __$$AutoChunkingStrategyRequestParamImplCopyWithImpl< + _$AutoChunkingStrategyRequestParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, + }) { + return auto(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, + }) { + return auto?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, + required TResult orElse(), + }) { + if (auto != null) { + return auto(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, + }) { + return auto(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, + }) { + return auto?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, + required TResult orElse(), + }) { + if (auto != null) { + return auto(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$AutoChunkingStrategyRequestParamImplToJson( + this, + ); + } +} + +abstract class AutoChunkingStrategyRequestParam + extends ChunkingStrategyRequestParam { + const factory AutoChunkingStrategyRequestParam({required final String type}) = + _$AutoChunkingStrategyRequestParamImpl; + const AutoChunkingStrategyRequestParam._() : super._(); + + factory AutoChunkingStrategyRequestParam.fromJson(Map json) = + _$AutoChunkingStrategyRequestParamImpl.fromJson; + + @override + + /// Always `auto`. + String get type; + @override + @JsonKey(ignore: true) + _$$AutoChunkingStrategyRequestParamImplCopyWith< + _$AutoChunkingStrategyRequestParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + factory _$$StaticChunkingStrategyRequestParamImplCopyWith( + _$StaticChunkingStrategyRequestParamImpl value, + $Res Function(_$StaticChunkingStrategyRequestParamImpl) then) = + __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, StaticChunkingStrategy static}); + + $StaticChunkingStrategyCopyWith<$Res> get static; +} + +/// @nodoc +class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + _$StaticChunkingStrategyRequestParamImpl> + implements _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> { + __$$StaticChunkingStrategyRequestParamImplCopyWithImpl( + _$StaticChunkingStrategyRequestParamImpl _value, + $Res Function(_$StaticChunkingStrategyRequestParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? static = null, + }) { + return _then(_$StaticChunkingStrategyRequestParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + static: null == static + ? _value.static + : static // ignore: cast_nullable_to_non_nullable + as StaticChunkingStrategy, + )); + } + + @override + @pragma('vm:prefer-inline') + $StaticChunkingStrategyCopyWith<$Res> get static { + return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { + return _then(_value.copyWith(static: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$StaticChunkingStrategyRequestParamImpl + extends StaticChunkingStrategyRequestParam { + const _$StaticChunkingStrategyRequestParamImpl( + {required this.type, required this.static}) + : super._(); + + factory _$StaticChunkingStrategyRequestParamImpl.fromJson( + Map json) => + _$$StaticChunkingStrategyRequestParamImplFromJson(json); + + /// Always `static`. + @override + final String type; + + /// Static chunking strategy + @override + final StaticChunkingStrategy static; + + @override + String toString() { + return 'ChunkingStrategyRequestParam.static(type: $type, static: $static)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StaticChunkingStrategyRequestParamImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.static, static) || other.static == static)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, static); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$StaticChunkingStrategyRequestParamImplCopyWith< + _$StaticChunkingStrategyRequestParamImpl> + get copyWith => __$$StaticChunkingStrategyRequestParamImplCopyWithImpl< + _$StaticChunkingStrategyRequestParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, + }) { + return static(type, this.static); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, + }) { + return static?.call(type, this.static); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, + required TResult orElse(), + }) { + if (static != null) { + return static(type, this.static); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, + }) { + return static(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, + }) { + return static?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, + required TResult orElse(), + }) { + if (static != null) { + return static(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$StaticChunkingStrategyRequestParamImplToJson( + this, + ); + } +} + +abstract class StaticChunkingStrategyRequestParam + extends ChunkingStrategyRequestParam { + const factory StaticChunkingStrategyRequestParam( + {required final String type, + required final StaticChunkingStrategy static}) = + _$StaticChunkingStrategyRequestParamImpl; + const StaticChunkingStrategyRequestParam._() : super._(); + + factory StaticChunkingStrategyRequestParam.fromJson( + Map json) = + _$StaticChunkingStrategyRequestParamImpl.fromJson; + + @override + + /// Always `static`. + String get type; + + /// Static chunking strategy + StaticChunkingStrategy get static; + @override + @JsonKey(ignore: true) + _$$StaticChunkingStrategyRequestParamImplCopyWith< + _$StaticChunkingStrategyRequestParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChunkingStrategyResponseParam _$ChunkingStrategyResponseParamFromJson( + Map json) { + switch (json['type']) { + case 'static': + return StaticChunkingStrategyResponseParam.fromJson(json); + case 'other': + return OtherChunkingStrategyResponseParam.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'ChunkingStrategyResponseParam', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ChunkingStrategyResponseParam { + /// Always `static`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChunkingStrategyResponseParamCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChunkingStrategyResponseParamCopyWith<$Res> { + factory $ChunkingStrategyResponseParamCopyWith( + ChunkingStrategyResponseParam value, + $Res Function(ChunkingStrategyResponseParam) then) = + _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + ChunkingStrategyResponseParam>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + $Val extends ChunkingStrategyResponseParam> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + _$ChunkingStrategyResponseParamCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + factory _$$StaticChunkingStrategyResponseParamImplCopyWith( + _$StaticChunkingStrategyResponseParamImpl value, + $Res Function(_$StaticChunkingStrategyResponseParamImpl) then) = + __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, StaticChunkingStrategy static}); + + $StaticChunkingStrategyCopyWith<$Res> get static; +} + +/// @nodoc +class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + _$StaticChunkingStrategyResponseParamImpl> + implements _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> { + __$$StaticChunkingStrategyResponseParamImplCopyWithImpl( + _$StaticChunkingStrategyResponseParamImpl _value, + $Res Function(_$StaticChunkingStrategyResponseParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? static = null, + }) { + return _then(_$StaticChunkingStrategyResponseParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + static: null == static + ? _value.static + : static // ignore: cast_nullable_to_non_nullable + as StaticChunkingStrategy, + )); + } + + @override + @pragma('vm:prefer-inline') + $StaticChunkingStrategyCopyWith<$Res> get static { + return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { + return _then(_value.copyWith(static: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$StaticChunkingStrategyResponseParamImpl + extends StaticChunkingStrategyResponseParam { + const _$StaticChunkingStrategyResponseParamImpl( + {required this.type, required this.static}) + : super._(); + + factory _$StaticChunkingStrategyResponseParamImpl.fromJson( + Map json) => + _$$StaticChunkingStrategyResponseParamImplFromJson(json); + + /// Always `static`. + @override + final String type; + + /// Static chunking strategy + @override + final StaticChunkingStrategy static; + + @override + String toString() { + return 'ChunkingStrategyResponseParam.static(type: $type, static: $static)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StaticChunkingStrategyResponseParamImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.static, static) || other.static == static)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, static); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$StaticChunkingStrategyResponseParamImplCopyWith< + _$StaticChunkingStrategyResponseParamImpl> + get copyWith => __$$StaticChunkingStrategyResponseParamImplCopyWithImpl< + _$StaticChunkingStrategyResponseParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, + }) { + return static(type, this.static); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, + }) { + return static?.call(type, this.static); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, + required TResult orElse(), + }) { + if (static != null) { + return static(type, this.static); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, + }) { + return static(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, + }) { + return static?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, + required TResult orElse(), + }) { + if (static != null) { + return static(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$StaticChunkingStrategyResponseParamImplToJson( + this, + ); + } +} + +abstract class StaticChunkingStrategyResponseParam + extends ChunkingStrategyResponseParam { + const factory StaticChunkingStrategyResponseParam( + {required final String type, + required final StaticChunkingStrategy static}) = + _$StaticChunkingStrategyResponseParamImpl; + const StaticChunkingStrategyResponseParam._() : super._(); + + factory StaticChunkingStrategyResponseParam.fromJson( + Map json) = + _$StaticChunkingStrategyResponseParamImpl.fromJson; + + @override + + /// Always `static`. + String get type; + + /// Static chunking strategy + StaticChunkingStrategy get static; + @override + @JsonKey(ignore: true) + _$$StaticChunkingStrategyResponseParamImplCopyWith< + _$StaticChunkingStrategyResponseParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + factory _$$OtherChunkingStrategyResponseParamImplCopyWith( + _$OtherChunkingStrategyResponseParamImpl value, + $Res Function(_$OtherChunkingStrategyResponseParamImpl) then) = + __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type}); +} + +/// @nodoc +class __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + _$OtherChunkingStrategyResponseParamImpl> + implements _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> { + __$$OtherChunkingStrategyResponseParamImplCopyWithImpl( + _$OtherChunkingStrategyResponseParamImpl _value, + $Res Function(_$OtherChunkingStrategyResponseParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$OtherChunkingStrategyResponseParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$OtherChunkingStrategyResponseParamImpl + extends OtherChunkingStrategyResponseParam { + const _$OtherChunkingStrategyResponseParamImpl({required this.type}) + : super._(); + + factory _$OtherChunkingStrategyResponseParamImpl.fromJson( + Map json) => + _$$OtherChunkingStrategyResponseParamImplFromJson(json); + + /// Always `other`. + @override + final String type; + + @override + String toString() { + return 'ChunkingStrategyResponseParam.other(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$OtherChunkingStrategyResponseParamImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$OtherChunkingStrategyResponseParamImplCopyWith< + _$OtherChunkingStrategyResponseParamImpl> + get copyWith => __$$OtherChunkingStrategyResponseParamImplCopyWithImpl< + _$OtherChunkingStrategyResponseParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, + }) { + return other(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, + }) { + return other?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, + required TResult orElse(), + }) { + if (other != null) { + return other(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, + }) { + return other(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, + }) { + return other?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, + required TResult orElse(), + }) { + if (other != null) { + return other(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$OtherChunkingStrategyResponseParamImplToJson( + this, + ); + } +} + +abstract class OtherChunkingStrategyResponseParam + extends ChunkingStrategyResponseParam { + const factory OtherChunkingStrategyResponseParam( + {required final String type}) = _$OtherChunkingStrategyResponseParamImpl; + const OtherChunkingStrategyResponseParam._() : super._(); + + factory OtherChunkingStrategyResponseParam.fromJson( + Map json) = + _$OtherChunkingStrategyResponseParamImpl.fromJson; + + @override + + /// Always `other`. + String get type; + @override + @JsonKey(ignore: true) + _$$OtherChunkingStrategyResponseParamImplCopyWith< + _$OtherChunkingStrategyResponseParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantStreamEvent _$AssistantStreamEventFromJson(Map json) { switch (json['event']) { case 'thread_stream_event': diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 9ed1ff90..ede4fed4 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -3520,6 +3520,10 @@ _$ToolResourcesFileSearchVectorStoreImpl fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -3534,6 +3538,7 @@ Map _$$ToolResourcesFileSearchVectorStoreImplToJson( } writeNotNull('file_ids', instance.fileIds); + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -4401,6 +4406,10 @@ _$CreateVectorStoreRequestImpl _$$CreateVectorStoreRequestImplFromJson( ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -4417,6 +4426,7 @@ Map _$$CreateVectorStoreRequestImplToJson( writeNotNull('name', instance.name); writeNotNull('file_ids', instance.fileIds); writeNotNull('expires_after', instance.expiresAfter?.toJson()); + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -4499,19 +4509,33 @@ _$VectorStoreFileObjectImpl _$$VectorStoreFileObjectImplFromJson( ? null : VectorStoreFileObjectLastError.fromJson( json['last_error'] as Map), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyResponseParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$VectorStoreFileObjectImplToJson( - _$VectorStoreFileObjectImpl instance) => - { - 'id': instance.id, - 'object': instance.object, - 'usage_bytes': instance.usageBytes, - 'created_at': instance.createdAt, - 'vector_store_id': instance.vectorStoreId, - 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, - 'last_error': instance.lastError?.toJson(), - }; + _$VectorStoreFileObjectImpl instance) { + final val = { + 'id': instance.id, + 'object': instance.object, + 'usage_bytes': instance.usageBytes, + 'created_at': instance.createdAt, + 'vector_store_id': instance.vectorStoreId, + 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, + 'last_error': instance.lastError?.toJson(), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} const _$VectorStoreFileStatusEnumMap = { VectorStoreFileStatus.inProgress: 'in_progress', @@ -4542,17 +4566,45 @@ const _$VectorStoreFileObjectLastErrorCodeEnumMap = { VectorStoreFileObjectLastErrorCode.unhandledMimeType: 'unhandled_mime_type', }; +_$StaticChunkingStrategyImpl _$$StaticChunkingStrategyImplFromJson( + Map json) => + _$StaticChunkingStrategyImpl( + maxChunkSizeTokens: json['max_chunk_size_tokens'] as int, + chunkOverlapTokens: json['chunk_overlap_tokens'] as int, + ); + +Map _$$StaticChunkingStrategyImplToJson( + _$StaticChunkingStrategyImpl instance) => + { + 'max_chunk_size_tokens': instance.maxChunkSizeTokens, + 'chunk_overlap_tokens': instance.chunkOverlapTokens, + }; + _$CreateVectorStoreFileRequestImpl _$$CreateVectorStoreFileRequestImplFromJson( Map json) => _$CreateVectorStoreFileRequestImpl( fileId: json['file_id'] as String, + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileRequestImplToJson( - _$CreateVectorStoreFileRequestImpl instance) => - { - 'file_id': instance.fileId, - }; + _$CreateVectorStoreFileRequestImpl instance) { + final val = { + 'file_id': instance.fileId, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} _$ListVectorStoreFilesResponseImpl _$$ListVectorStoreFilesResponseImplFromJson( Map json) => @@ -4651,13 +4703,27 @@ _$CreateVectorStoreFileBatchRequestImpl fileIds: (json['file_ids'] as List) .map((e) => e as String) .toList(), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileBatchRequestImplToJson( - _$CreateVectorStoreFileBatchRequestImpl instance) => - { - 'file_ids': instance.fileIds, - }; + _$CreateVectorStoreFileBatchRequestImpl instance) { + final val = { + 'file_ids': instance.fileIds, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} _$ErrorImpl _$$ErrorImplFromJson(Map json) => _$ErrorImpl( code: json['code'] as String?, @@ -5810,6 +5876,64 @@ Map return val; } +_$AutoChunkingStrategyRequestParamImpl + _$$AutoChunkingStrategyRequestParamImplFromJson( + Map json) => + _$AutoChunkingStrategyRequestParamImpl( + type: json['type'] as String, + ); + +Map _$$AutoChunkingStrategyRequestParamImplToJson( + _$AutoChunkingStrategyRequestParamImpl instance) => + { + 'type': instance.type, + }; + +_$StaticChunkingStrategyRequestParamImpl + _$$StaticChunkingStrategyRequestParamImplFromJson( + Map json) => + _$StaticChunkingStrategyRequestParamImpl( + type: json['type'] as String, + static: StaticChunkingStrategy.fromJson( + json['static'] as Map), + ); + +Map _$$StaticChunkingStrategyRequestParamImplToJson( + _$StaticChunkingStrategyRequestParamImpl instance) => + { + 'type': instance.type, + 'static': instance.static.toJson(), + }; + +_$StaticChunkingStrategyResponseParamImpl + _$$StaticChunkingStrategyResponseParamImplFromJson( + Map json) => + _$StaticChunkingStrategyResponseParamImpl( + type: json['type'] as String, + static: StaticChunkingStrategy.fromJson( + json['static'] as Map), + ); + +Map _$$StaticChunkingStrategyResponseParamImplToJson( + _$StaticChunkingStrategyResponseParamImpl instance) => + { + 'type': instance.type, + 'static': instance.static.toJson(), + }; + +_$OtherChunkingStrategyResponseParamImpl + _$$OtherChunkingStrategyResponseParamImplFromJson( + Map json) => + _$OtherChunkingStrategyResponseParamImpl( + type: json['type'] as String, + ); + +Map _$$OtherChunkingStrategyResponseParamImplToJson( + _$OtherChunkingStrategyResponseParamImpl instance) => + { + 'type': instance.type, + }; + _$ThreadStreamEventImpl _$$ThreadStreamEventImplFromJson( Map json) => _$ThreadStreamEventImpl( diff --git a/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart new file mode 100644 index 00000000..aa67e062 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart @@ -0,0 +1,60 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: StaticChunkingStrategy +// ========================================== + +/// Static chunking strategy +@freezed +class StaticChunkingStrategy with _$StaticChunkingStrategy { + const StaticChunkingStrategy._(); + + /// Factory constructor for StaticChunkingStrategy + const factory StaticChunkingStrategy({ + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') required int maxChunkSizeTokens, + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') required int chunkOverlapTokens, + }) = _StaticChunkingStrategy; + + /// Object construction from a JSON representation + factory StaticChunkingStrategy.fromJson(Map json) => + _$StaticChunkingStrategyFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'max_chunk_size_tokens', + 'chunk_overlap_tokens' + ]; + + /// Validation constants + static const maxChunkSizeTokensMinValue = 100; + static const maxChunkSizeTokensMaxValue = 4096; + + /// Perform validations on the schema property values + String? validateSchema() { + if (maxChunkSizeTokens < maxChunkSizeTokensMinValue) { + return "The value of 'maxChunkSizeTokens' cannot be < $maxChunkSizeTokensMinValue"; + } + if (maxChunkSizeTokens > maxChunkSizeTokensMaxValue) { + return "The value of 'maxChunkSizeTokens' cannot be > $maxChunkSizeTokensMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'max_chunk_size_tokens': maxChunkSizeTokens, + 'chunk_overlap_tokens': chunkOverlapTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart index 63247873..cc01299d 100644 --- a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart +++ b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart @@ -19,6 +19,11 @@ class ToolResourcesFileSearchVectorStore /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _ToolResourcesFileSearchVectorStore; @@ -29,7 +34,11 @@ class ToolResourcesFileSearchVectorStore _$ToolResourcesFileSearchVectorStoreFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids', 'metadata']; + static const List propertyNames = [ + 'file_ids', + 'chunking_strategy', + 'metadata' + ]; /// Perform validations on the schema property values String? validateSchema() { @@ -40,6 +49,7 @@ class ToolResourcesFileSearchVectorStore Map toMap() { return { 'file_ids': fileIds, + 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart index 53e6f928..b6c24133 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart @@ -36,6 +36,11 @@ class VectorStoreFileObject with _$VectorStoreFileObject { /// The last error associated with this vector store file. Will be `null` if there are no errors. @JsonKey(name: 'last_error') required VectorStoreFileObjectLastError? lastError, + + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy, }) = _VectorStoreFileObject; /// Object construction from a JSON representation @@ -50,7 +55,8 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'created_at', 'vector_store_id', 'status', - 'last_error' + 'last_error', + 'chunking_strategy' ]; /// Perform validations on the schema property values @@ -68,6 +74,7 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'vector_store_id': vectorStoreId, 'status': status, 'last_error': lastError, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index cb469506..141e9071 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4527,6 +4527,8 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: type: object description: | @@ -5733,6 +5735,8 @@ components: type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: description: *metadata_description type: object @@ -5848,6 +5852,8 @@ components: required: - code - message + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyResponseParam" required: - id - object @@ -5856,6 +5862,97 @@ components: - vector_store_id - status - last_error + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + discriminator: + propertyName: type + AutoChunkingStrategyRequestParam: + type: object + description: | + Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` + and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + default: auto + required: + - type + StaticChunkingStrategyRequestParam: + type: object + description: Static chunking strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + default: static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + StaticChunkingStrategy: + type: object + description: Static chunking strategy + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: | + The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + ChunkingStrategyResponseParam: + type: object + description: The chunking strategy used to chunk the file(s). + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + discriminator: + propertyName: type + OtherChunkingStrategyResponseParam: + type: object + description: | + Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because + the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + default: other + required: + - type + StaticChunkingStrategyResponseParam: + type: object + description: Static Chunking Strategy. + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + default: static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static CreateVectorStoreFileRequest: type: object description: Request object for the Create vector store file endpoint. @@ -5863,6 +5960,8 @@ components: file_id: description: A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_id ListVectorStoreFilesResponse: @@ -5976,6 +6075,8 @@ components: maxItems: 500 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_ids AssistantStreamEvent: diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index fd863f50..0dd10f12 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -10044,6 +10044,52 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true metadata: type: object description: | @@ -11009,11 +11055,58 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true metadata: type: object description: | Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + x-oaiExpandable: true oneOf: - required: [ vector_store_ids ] - required: [ vector_stores ] @@ -12349,6 +12442,13 @@ components: type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true metadata: description: *metadata_description type: object @@ -12458,6 +12558,13 @@ components: required: - code - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true required: - id - object @@ -12477,9 +12584,99 @@ components: "created_at": 1698107661, "vector_store_id": "vs_abc123", "status": "completed", - "last_error": null + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } } + OtherChunkingStrategyResponseParam: + type: object + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + enum: [ "other" ] + required: + - type + + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + CreateVectorStoreFileRequest: type: object additionalProperties: false @@ -12487,6 +12684,8 @@ components: file_id: description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_id @@ -12613,6 +12812,8 @@ components: maxItems: 500 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_ids From a640089f3aa9c30292b283e7ada66361ffa6f090 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 11:19:52 +0200 Subject: [PATCH 191/251] feat: Add GPT-4o-mini to model catalog (#497) --- .../lib/src/chat_models/types.dart | 16 +- .../schema/chat_completion_message.dart | 2 +- .../schema/create_assistant_request.dart | 4 + .../create_chat_completion_request.dart | 6 + .../generated/schema/create_run_request.dart | 4 + .../schema/create_thread_and_run_request.dart | 4 + .../fine_tuning_job_hyperparameters.dart | 12 +- .../src/generated/schema/schema.freezed.dart | 21 +- .../lib/src/generated/schema/schema.g.dart | 8 + packages/openai_dart/oas/openapi_curated.yaml | 29 +- .../openai_dart/oas/openapi_official.yaml | 836 +++++++++++++----- 11 files changed, 701 insertions(+), 241 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 0526f937..299902fe 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -30,23 +30,23 @@ class ChatOpenAIOptions extends ChatModelOptions { /// /// Available models: /// - `gpt-4` - /// - `gpt-4-0314` - /// - `gpt-4-0613` /// - `gpt-4-32k` /// - `gpt-4-32k-0314` /// - `gpt-4-32k-0613` - /// - `gpt-4-turbo-preview` - /// - `gpt-4-1106-preview` /// - `gpt-4-0125-preview` + /// - `gpt-4-0314` + /// - `gpt-4-0613` + /// - `gpt-4-1106-preview` + /// - `gpt-4-turbo` + /// - `gpt-4-turbo-2024-04-09` + /// - `gpt-4-turbo-preview` /// - `gpt-4-vision-preview` /// - `gpt-4o` /// - `gpt-4o-2024-05-13` + /// - `gpt-4o-mini` + /// - `gpt-4o-mini-2024-07-18` /// - `gpt-3.5-turbo` /// - `gpt-3.5-turbo-16k` - /// - `gpt-3.5-turbo-0301` - /// - `gpt-3.5-turbo-0613` - /// - `gpt-3.5-turbo-1106` - /// - `gpt-3.5-turbo-16k-0613` /// /// Mind that the list may be outdated. /// See https://platform.openai.com/docs/models for the latest list. diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index 65e9b1d8..e546e524 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -135,7 +135,7 @@ sealed class ChatCompletionUserMessageContent with _$ChatCompletionUserMessageContent { const ChatCompletionUserMessageContent._(); - /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. + /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. const factory ChatCompletionUserMessageContent.parts( List value, ) = ChatCompletionMessageContentParts; diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 16db2e01..3b9086d3 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -163,6 +163,10 @@ enum AssistantModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 657f7268..6e7e429a 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -73,6 +73,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @JsonKey( @@ -302,6 +303,10 @@ enum ChatCompletionModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -423,6 +428,7 @@ class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. +/// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. enum CreateChatCompletionRequestServiceTier { diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 375ea8a0..83c04dc1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -203,6 +203,10 @@ enum RunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index ff5013d5..67b921cb 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -202,6 +202,10 @@ enum ThreadAndRunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart index 51d89b60..409aa1d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart @@ -15,8 +15,10 @@ class FineTuningJobHyperparameters with _$FineTuningJobHyperparameters { /// Factory constructor for FineTuningJobHyperparameters const factory FineTuningJobHyperparameters({ - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') required FineTuningNEpochs nEpochs, @@ -56,8 +58,10 @@ enum FineTuningNEpochsOptions { // CLASS: FineTuningNEpochs // ========================================== -/// The number of epochs to train the model for. An epoch refers to one -/// full cycle through the training dataset. +/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. +/// +/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number +/// manually, we support any number between 1 and 50 epochs. @freezed sealed class FineTuningNEpochs with _$FineTuningNEpochs { const FineTuningNEpochs._(); diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index d1f911c1..75973e85 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3463,6 +3463,7 @@ mixin _$CreateChatCompletionRequest { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @JsonKey( @@ -4151,6 +4152,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @override @@ -4477,6 +4479,7 @@ abstract class _CreateChatCompletionRequest /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @JsonKey( @@ -15766,8 +15769,10 @@ FineTuningJobHyperparameters _$FineTuningJobHyperparametersFromJson( /// @nodoc mixin _$FineTuningJobHyperparameters { - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; @@ -15882,8 +15887,10 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { Map json) => _$$FineTuningJobHyperparametersImplFromJson(json); - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @override @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') @@ -15936,8 +15943,10 @@ abstract class _FineTuningJobHyperparameters @override - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index ede4fed4..95ffa209 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -427,6 +427,8 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt4VisionPreview: 'gpt-4-vision-preview', ChatCompletionModels.gpt4o: 'gpt-4o', ChatCompletionModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ChatCompletionModels.gpt4oMini: 'gpt-4o-mini', + ChatCompletionModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', ChatCompletionModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ChatCompletionModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2275,6 +2277,8 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt4VisionPreview: 'gpt-4-vision-preview', AssistantModels.gpt4o: 'gpt-4o', AssistantModels.gpt4o20240513: 'gpt-4o-2024-05-13', + AssistantModels.gpt4oMini: 'gpt-4o-mini', + AssistantModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', AssistantModels.gpt35Turbo: 'gpt-3.5-turbo', AssistantModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', AssistantModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2935,6 +2939,8 @@ const _$RunModelsEnumMap = { RunModels.gpt4VisionPreview: 'gpt-4-vision-preview', RunModels.gpt4o: 'gpt-4o', RunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + RunModels.gpt4oMini: 'gpt-4o-mini', + RunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', RunModels.gpt35Turbo: 'gpt-3.5-turbo', RunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', RunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -3259,6 +3265,8 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt4VisionPreview: 'gpt-4-vision-preview', ThreadAndRunModels.gpt4o: 'gpt-4o', ThreadAndRunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ThreadAndRunModels.gpt4oMini: 'gpt-4o-mini', + ThreadAndRunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ThreadAndRunModels.gpt35Turbo: 'gpt-3.5-turbo', ThreadAndRunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ThreadAndRunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 141e9071..420c7cf4 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1820,6 +1820,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -1915,6 +1917,7 @@ components: - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarantee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. type: string @@ -2071,7 +2074,7 @@ components: - type: string description: The text contents of the message. - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. items: $ref: "#/components/schemas/ChatCompletionMessageContentPart" minItems: 1 @@ -2918,8 +2921,10 @@ components: n_epochs: title: FineTuningNEpochs description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + manually, we support any number between 1 and 50 epochs. oneOf: - type: string title: FineTuningNEpochsOptions @@ -3523,6 +3528,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4085,6 +4092,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4327,6 +4336,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -5872,7 +5883,7 @@ components: propertyName: type AutoChunkingStrategyRequestParam: type: object - description: | + description: | Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. additionalProperties: false @@ -5906,7 +5917,7 @@ components: type: integer minimum: 100 maximum: 4096 - description: | + description: | The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. chunk_overlap_tokens: @@ -5922,13 +5933,13 @@ components: type: object description: The chunking strategy used to chunk the file(s). oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" discriminator: - propertyName: type + propertyName: type OtherChunkingStrategyResponseParam: type: object - description: | + description: | Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. additionalProperties: false diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 0dd10f12..1a91af5d 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -29,6 +29,8 @@ tags: description: Create large batches of API requests to run asynchronously. - name: Files description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Uploads + description: Use Uploads to upload large files in multiple parts. - name: Images description: Given a prompt and/or an input image, the model will generate a new image. - name: Models @@ -117,7 +119,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -214,7 +216,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -289,13 +291,13 @@ paths: main(); response: &chat_completion_chunk_example | - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} .... - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: curl: | @@ -412,7 +414,7 @@ paths: "id": "chatcmpl-abc123", "object": "chat.completion", "created": 1699896916, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -494,7 +496,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1702685778, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -1721,6 +1723,218 @@ paths: } main(); + /uploads: + post: + operationId: createUpload + tags: + - Uploads + summary: | + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. + + Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: + - [Assistants](/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Create upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `pending`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "purpose": "fine-tune", + "filename": "training_examples.jsonl", + "bytes": 2147483648, + "mime_type": "text/jsonl" + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "pending", + "expires_at": 1719127296 + } + + /uploads/{upload_id}/parts: + post: + operationId: addUploadPart + tags: + - Uploads + summary: | + Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/AddUploadPartRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/UploadPart" + x-oaiMeta: + name: Add upload part + group: uploads + returns: The upload [Part](/docs/api-reference/uploads/part-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/parts + -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." + response: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719185911, + "upload_id": "upload_abc123" + } + + /uploads/{upload_id}/complete: + post: + operationId: completeUpload + tags: + - Uploads + summary: | + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part IDs. + + The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CompleteUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Complete upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/complete + -d '{ + "part_ids": ["part_def456", "part_ghi789"] + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + + /uploads/{upload_id}/cancel: + post: + operationId: cancelUpload + tags: + - Uploads + summary: | + Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Cancel upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/cancel + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "cancelled", + "expires_at": 1719127296 + } /fine_tuning/jobs: post: @@ -2608,7 +2822,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: &pagination_after_param_description | @@ -3457,7 +3671,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -3680,7 +3894,7 @@ paths: name: Retrieve message group: threads beta: true - returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + returns: The [message](/docs/api-reference/messages/object) object matching the specified ID. examples: request: curl: | @@ -3768,7 +3982,7 @@ paths: name: Modify message group: threads beta: true - returns: The modified [message](/docs/api-reference/threads/messages/object) object. + returns: The modified [message](/docs/api-reference/messages/object) object. examples: request: curl: | @@ -4300,7 +4514,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5417,7 +5631,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5623,7 +5837,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5998,7 +6212,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -6014,7 +6228,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -6560,7 +6774,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -6576,7 +6790,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -6676,7 +6890,7 @@ paths: description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: [ "24h" ] + enum: ["24h"] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -7053,7 +7267,7 @@ components: properties: object: type: string - enum: [ list ] + enum: [list] data: type: array items: @@ -7084,7 +7298,7 @@ components: anyOf: - type: string - type: string - enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] x-oaiTypeLabel: string prompt: description: &completions_prompt_description | @@ -7296,7 +7510,7 @@ components: The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. - enum: [ "stop", "length", "content_filter" ] + enum: ["stop", "length", "content_filter"] index: type: integer logprobs: @@ -7338,7 +7552,7 @@ components: object: type: string description: The object type, which is always "text_completion" - enum: [ text_completion ] + enum: [text_completion] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -7383,7 +7597,7 @@ components: properties: type: type: string - enum: [ "image_url" ] + enum: ["image_url"] description: The type of the content part. image_url: type: object @@ -7395,7 +7609,7 @@ components: detail: type: string description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - url @@ -7409,7 +7623,7 @@ components: properties: type: type: string - enum: [ "text" ] + enum: ["text"] description: The type of the content part. text: type: string @@ -7436,7 +7650,7 @@ components: type: string role: type: string - enum: [ "system" ] + enum: ["system"] description: The role of the messages author, in this case `system`. name: type: string @@ -7457,7 +7671,7 @@ components: description: The text contents of the message. title: Text content - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. title: Array of content parts items: $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" @@ -7465,7 +7679,7 @@ components: x-oaiExpandable: true role: type: string - enum: [ "user" ] + enum: ["user"] description: The role of the messages author, in this case `user`. name: type: string @@ -7485,7 +7699,7 @@ components: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. role: type: string - enum: [ "assistant" ] + enum: ["assistant"] description: The role of the messages author, in this case `assistant`. name: type: string @@ -7518,7 +7732,7 @@ components: properties: weight: type: integer - enum: [ 0, 1 ] + enum: [0, 1] description: "Controls whether the assistant message is trained against (0 or 1)" - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" required: @@ -7530,7 +7744,7 @@ components: properties: role: type: string - enum: [ "tool" ] + enum: ["tool"] description: The role of the messages author, in this case `tool`. content: type: string @@ -7550,7 +7764,7 @@ components: properties: role: type: string - enum: [ "function" ] + enum: ["function"] description: The role of the messages author, in this case `function`. content: nullable: true @@ -7600,7 +7814,7 @@ components: properties: type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: $ref: "#/components/schemas/FunctionObject" @@ -7637,7 +7851,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" x-oaiExpandable: true @@ -7647,7 +7861,7 @@ components: properties: type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7681,7 +7895,7 @@ components: description: The ID of the tool call. type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7711,7 +7925,7 @@ components: description: The ID of the tool call. type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7760,7 +7974,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: type: string - enum: [ "assistant" ] + enum: ["assistant"] description: The role of the author of this message. function_call: type: object @@ -7805,7 +8019,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" role: type: string - enum: [ "system", "user", "assistant", "tool" ] + enum: ["system", "user", "assistant", "tool"] description: The role of the author of this message. CreateChatCompletionRequest: @@ -7827,6 +8041,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -7910,7 +8126,7 @@ components: properties: type: type: string - enum: [ "text", "json_object" ] + enum: ["text", "json_object"] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -7930,10 +8146,11 @@ components: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. type: string - enum: [ "auto", "default" ] + enum: ["auto", "default"] nullable: true default: null stop: @@ -8001,7 +8218,7 @@ components: description: > `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - enum: [ none, auto ] + enum: [none, auto] - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" x-oaiExpandable: true functions: @@ -8080,7 +8297,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: [ "scale", "default" ] + enum: ["scale", "default"] example: "scale" nullable: true system_fingerprint: @@ -8092,7 +8309,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] + enum: [chat.completion] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8130,7 +8347,7 @@ components: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. enum: - [ "stop", "length", "function_call", "content_filter" ] + ["stop", "length", "function_call", "content_filter"] index: type: integer description: The index of the choice in the list of choices. @@ -8151,7 +8368,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] + enum: [chat.completion] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8210,7 +8427,7 @@ components: type: boolean object: type: string - enum: [ list ] + enum: [list] required: - object - data @@ -8262,7 +8479,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: [ "scale", "default" ] + enum: ["scale", "default"] example: "scale" nullable: true system_fingerprint: @@ -8273,7 +8490,7 @@ components: object: type: string description: The object type, which is always `chat.completion.chunk`. - enum: [ chat.completion.chunk ] + enum: [chat.completion.chunk] usage: type: object description: | @@ -8323,7 +8540,7 @@ components: anyOf: - type: string - type: string - enum: [ "dall-e-2", "dall-e-3" ] + enum: ["dall-e-2", "dall-e-3"] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-3" @@ -8339,27 +8556,27 @@ components: description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: type: string - enum: [ "standard", "hd" ] + enum: ["standard", "hd"] default: "standard" example: "standard" description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: &images_response_format type: string - enum: [ "url", "b64_json" ] + enum: ["url", "b64_json"] default: "url" example: "url" nullable: true description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string - enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] default: "1024x1024" example: "1024x1024" nullable: true description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: type: string - enum: [ "vivid", "natural" ] + enum: ["vivid", "natural"] default: "vivid" example: "vivid" nullable: true @@ -8420,7 +8637,7 @@ components: anyOf: - type: string - type: string - enum: [ "dall-e-2" ] + enum: ["dall-e-2"] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8436,7 +8653,7 @@ components: description: The number of images to generate. Must be between 1 and 10. size: &dalle2_images_size type: string - enum: [ "256x256", "512x512", "1024x1024" ] + enum: ["256x256", "512x512", "1024x1024"] default: "1024x1024" example: "1024x1024" nullable: true @@ -8458,7 +8675,7 @@ components: anyOf: - type: string - type: string - enum: [ "dall-e-2" ] + enum: ["dall-e-2"] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8496,7 +8713,7 @@ components: anyOf: - type: string - type: string - enum: [ "text-moderation-latest", "text-moderation-stable" ] + enum: ["text-moderation-latest", "text-moderation-stable"] x-oaiTypeLabel: string required: - input @@ -8639,7 +8856,7 @@ components: $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [ list ] + enum: [list] required: - object - data @@ -8659,7 +8876,7 @@ components: Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] + enum: ["assistants", "batch", "fine-tune", "vision"] required: - file - purpose @@ -8671,7 +8888,7 @@ components: type: string object: type: string - enum: [ file ] + enum: [file] deleted: type: boolean required: @@ -8679,6 +8896,70 @@ components: - object - deleted + CreateUploadRequest: + type: object + additionalProperties: false + properties: + filename: + description: | + The name of the file to upload. + type: string + purpose: + description: | + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + type: string + enum: ["assistants", "batch", "fine-tune", "vision"] + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: | + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + type: string + required: + - filename + - purpose + - bytes + - mime_type + + AddUploadPartRequest: + type: object + additionalProperties: false + properties: + data: + description: | + The chunk of bytes for this Part. + type: string + format: binary + required: + - data + + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: + type: array + description: | + The ordered list of Part IDs. + items: + type: string + md5: + description: | + The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. + type: string + required: + - part_ids + + CancelUploadRequest: + type: object + additionalProperties: false + CreateFineTuningJobRequest: type: object properties: @@ -8690,7 +8971,7 @@ components: anyOf: - type: string - type: string - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] x-oaiTypeLabel: string training_file: description: | @@ -8715,7 +8996,7 @@ components: are updated less frequently, but with lower variance. oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: integer minimum: 1 maximum: 256 @@ -8726,7 +9007,7 @@ components: overfitting. oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: number minimum: 0 exclusiveMinimum: true @@ -8737,7 +9018,7 @@ components: through the training dataset. oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: integer minimum: 1 maximum: 50 @@ -8782,7 +9063,7 @@ components: The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. oneOf: - type: string - enum: [ wandb ] + enum: [wandb] wandb: type: object description: | @@ -8839,7 +9120,7 @@ components: $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - enum: [ list ] + enum: [list] required: - object - data @@ -8853,7 +9134,7 @@ components: $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [ list ] + enum: [list] first_id: type: string nullable: true @@ -8928,7 +9209,7 @@ components: example: "float" default: "float" type: string - enum: [ "float", "base64" ] + enum: ["float", "base64"] dimensions: description: | The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -8953,7 +9234,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [ list ] + enum: [list] usage: type: object description: The usage information for the request. @@ -8990,7 +9271,7 @@ components: anyOf: - type: string - type: string - enum: [ "whisper-1" ] + enum: ["whisper-1"] x-oaiTypeLabel: string language: description: | @@ -9025,7 +9306,7 @@ components: enum: - word - segment - default: [ segment ] + default: [segment] required: - file - model @@ -9112,7 +9393,7 @@ components: type: number format: float description: End time of the word in seconds. - required: [ word, start, end ] + required: [word, start, end] CreateTranscriptionResponseVerboseJson: type: object @@ -9137,7 +9418,7 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + required: [language, duration, text] x-oaiMeta: name: The transcription object (Verbose JSON) group: audio @@ -9160,7 +9441,7 @@ components: anyOf: - type: string - type: string - enum: [ "whisper-1" ] + enum: ["whisper-1"] x-oaiTypeLabel: string prompt: description: | @@ -9206,7 +9487,7 @@ components: description: Segments of the translated text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + required: [language, duration, text] CreateSpeechRequest: type: object @@ -9218,7 +9499,7 @@ components: anyOf: - type: string - type: string - enum: [ "tts-1", "tts-1-hd" ] + enum: ["tts-1", "tts-1-hd"] x-oaiTypeLabel: string input: type: string @@ -9227,12 +9508,12 @@ components: voice: description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] response_format: description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] + enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -9257,7 +9538,7 @@ components: object: type: string description: The object type, which is always "model". - enum: [ model ] + enum: [model] owned_by: type: string description: The organization that owns the model. @@ -9289,7 +9570,7 @@ components: object: type: string description: The object type, which is always `file`. - enum: [ "file" ] + enum: ["file"] purpose: type: string description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. @@ -9307,7 +9588,7 @@ components: type: string deprecated: true description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: [ "uploaded", "processed", "error" ] + enum: ["uploaded", "processed", "error"] status_details: type: string deprecated: true @@ -9331,6 +9612,105 @@ components: "filename": "salesOverview.pdf", "purpose": "assistants", } + Upload: + type: object + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + description: The status of the Upload. + enum: ["pending", "completed", "cancelled", "expired"] + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + description: The object type, which is always "upload". + enum: [upload] + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. + required: + - bytes + - created_at + - expires_at + - filename + - id + - purpose + - status + - step_number + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: + type: object + title: UploadPart + description: | + The upload Part represents a chunk of bytes we can add to an Upload object. + properties: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: ['upload.part'] + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } Embedding: type: object description: | @@ -9348,7 +9728,7 @@ components: object: type: string description: The object type, which is always "embedding". - enum: [ embedding ] + enum: [embedding] required: - index - object @@ -9413,15 +9793,15 @@ components: n_epochs: oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: integer minimum: 1 maximum: 50 default: auto description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. required: - n_epochs model: @@ -9430,7 +9810,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job". - enum: [ fine_tuning.job ] + enum: [fine_tuning.job] organization_id: type: string description: The organization that owns the fine-tuning job. @@ -9509,7 +9889,7 @@ components: type: type: string description: "The type of the integration being enabled for the fine-tuning job" - enum: [ "wandb" ] + enum: ["wandb"] wandb: type: object description: | @@ -9554,12 +9934,12 @@ components: type: integer level: type: string - enum: [ "info", "warn", "error" ] + enum: ["info", "warn", "error"] message: type: string object: type: string - enum: [ fine_tuning.job.event ] + enum: [fine_tuning.job.event] required: - id - object @@ -9619,7 +9999,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [ fine_tuning.job.checkpoint ] + enum: [fine_tuning.job.checkpoint] required: - created_at - fine_tuning_job_id @@ -9808,7 +10188,7 @@ components: - type: string description: > `auto` is the default value - enum: [ none, auto ] + enum: [none, auto] - $ref: "#/components/schemas/AssistantsApiResponseFormat" x-oaiExpandable: true @@ -9819,7 +10199,7 @@ components: properties: type: type: string - enum: [ "text", "json_object" ] + enum: ["text", "json_object"] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -9835,7 +10215,7 @@ components: object: description: The object type, which is always `assistant`. type: string - enum: [ assistant ] + enum: [assistant] created_at: description: The Unix timestamp (in seconds) for when the assistant was created. type: integer @@ -9863,7 +10243,7 @@ components: tools: description: &assistant_tools_param_description | A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [ ] + default: [] type: array maxItems: 128 items: @@ -9884,7 +10264,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -9957,6 +10337,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -9994,7 +10376,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [ ] + default: [] type: array maxItems: 128 items: @@ -10015,7 +10397,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -10057,7 +10439,7 @@ components: type: type: string description: Always `auto`. - enum: [ "auto" ] + enum: ["auto"] required: - type - type: object @@ -10067,7 +10449,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: type: object additionalProperties: false @@ -10096,8 +10478,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] + - required: [vector_store_ids] + - required: [vector_stores] nullable: true metadata: description: *metadata_description @@ -10155,7 +10537,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [ ] + default: [] type: array maxItems: 128 items: @@ -10176,7 +10558,7 @@ components: type: array description: | Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -10228,7 +10610,7 @@ components: type: boolean object: type: string - enum: [ assistant.deleted ] + enum: [assistant.deleted] required: - id - object @@ -10271,7 +10653,7 @@ components: type: type: string description: "The type of tool being defined: `code_interpreter`" - enum: [ "code_interpreter" ] + enum: ["code_interpreter"] required: - type @@ -10282,7 +10664,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] + enum: ["file_search"] file_search: type: object description: Overrides for the file search tool. @@ -10305,7 +10687,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] + enum: ["file_search"] required: - type @@ -10316,7 +10698,7 @@ components: type: type: string description: "The type of tool being defined: `function`" - enum: [ "function" ] + enum: ["function"] function: $ref: "#/components/schemas/FunctionObject" required: @@ -10331,7 +10713,7 @@ components: type: type: string description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: [ "auto", "last_messages" ] + enum: ["auto", "last_messages"] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -10354,7 +10736,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/AssistantsNamedToolChoice" x-oaiExpandable: true @@ -10364,7 +10746,7 @@ components: properties: type: type: string - enum: [ "function", "code_interpreter", "file_search" ] + enum: ["function", "code_interpreter", "file_search"] description: The type of the tool. If type is `function`, the function name must be set function: type: object @@ -10388,7 +10770,7 @@ components: object: description: The object type, which is always `thread.run`. type: string - enum: [ "thread.run" ] + enum: ["thread.run"] created_at: description: The Unix timestamp (in seconds) for when the run was created. type: integer @@ -10421,7 +10803,7 @@ components: type: description: For now, this is always `submit_tool_outputs`. type: string - enum: [ "submit_tool_outputs" ] + enum: ["submit_tool_outputs"] submit_tool_outputs: type: object description: Details on the tool outputs needed for this run to continue. @@ -10445,7 +10827,7 @@ components: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. enum: - [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. @@ -10480,7 +10862,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: [ "max_completion_tokens", "max_prompt_tokens" ] + enum: ["max_completion_tokens", "max_prompt_tokens"] model: description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string @@ -10489,7 +10871,7 @@ components: type: string tools: description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [ ] + default: [] type: array maxItems: 20 items: @@ -10618,6 +11000,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -10787,7 +11171,7 @@ components: type: type: string description: The type of tool call the output is required for. For now, this is always `function`. - enum: [ "function" ] + enum: ["function"] function: type: object description: The function definition. @@ -10826,6 +11210,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -10873,7 +11259,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -10952,7 +11338,7 @@ components: object: description: The object type, which is always `thread`. type: string - enum: [ "thread" ] + enum: ["thread"] created_at: description: The Unix timestamp (in seconds) for when the thread was created. type: integer @@ -10968,7 +11354,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -11026,7 +11412,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -11068,7 +11454,7 @@ components: type: type: string description: Always `auto`. - enum: [ "auto" ] + enum: ["auto"] required: - type - type: object @@ -11078,7 +11464,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: type: object additionalProperties: false @@ -11108,8 +11494,8 @@ components: x-oaiTypeLabel: map x-oaiExpandable: true oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] + - required: [vector_store_ids] + - required: [vector_stores] nullable: true metadata: description: *metadata_description @@ -11133,7 +11519,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -11163,7 +11549,7 @@ components: type: boolean object: type: string - enum: [ thread.deleted ] + enum: [thread.deleted] required: - id - object @@ -11205,7 +11591,7 @@ components: object: description: The object type, which is always `thread.message`. type: string - enum: [ "thread.message" ] + enum: ["thread.message"] created_at: description: The Unix timestamp (in seconds) for when the message was created. type: integer @@ -11215,7 +11601,7 @@ components: status: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: [ "in_progress", "incomplete", "completed" ] + enum: ["in_progress", "incomplete", "completed"] incomplete_details: description: On an incomplete message, details about why the message is incomplete. type: object @@ -11245,7 +11631,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: [ "user", "assistant" ] + enum: ["user", "assistant"] content: description: The content of the message in array of text and/or images. type: array @@ -11338,7 +11724,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: [ "thread.message.delta" ] + enum: ["thread.message.delta"] delta: description: The delta containing the fields that have changed on the Message. type: object @@ -11346,7 +11732,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: [ "user", "assistant" ] + enum: ["user", "assistant"] content: description: The content of the message in array of text and/or images. type: array @@ -11387,7 +11773,7 @@ components: properties: role: type: string - enum: [ "user", "assistant" ] + enum: ["user", "assistant"] description: | The role of the entity that is creating the message. Allowed values include: - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. @@ -11454,7 +11840,7 @@ components: type: boolean object: type: string - enum: [ thread.message.deleted ] + enum: [thread.message.deleted] required: - id - object @@ -11493,7 +11879,7 @@ components: type: description: Always `image_file`. type: string - enum: [ "image_file" ] + enum: ["image_file"] image_file: type: object properties: @@ -11503,7 +11889,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - file_id @@ -11522,7 +11908,7 @@ components: type: description: Always `image_file`. type: string - enum: [ "image_file" ] + enum: ["image_file"] image_file: type: object properties: @@ -11532,7 +11918,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - index @@ -11545,7 +11931,7 @@ components: properties: type: type: string - enum: [ "image_url" ] + enum: ["image_url"] description: The type of the content part. image_url: type: object @@ -11557,7 +11943,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - url @@ -11576,7 +11962,7 @@ components: type: description: Always `image_url`. type: string - enum: [ "image_url" ] + enum: ["image_url"] image_url: type: object properties: @@ -11586,7 +11972,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - index @@ -11600,7 +11986,7 @@ components: type: description: Always `text`. type: string - enum: [ "text" ] + enum: ["text"] text: type: object properties: @@ -11629,7 +12015,7 @@ components: type: description: Always `text`. type: string - enum: [ "text" ] + enum: ["text"] text: type: string description: Text content to be sent to the model @@ -11645,7 +12031,7 @@ components: type: description: Always `file_citation`. type: string - enum: [ "file_citation" ] + enum: ["file_citation"] text: description: The text in the message content that needs to be replaced. type: string @@ -11678,7 +12064,7 @@ components: type: description: Always `file_path`. type: string - enum: [ "file_path" ] + enum: ["file_path"] text: description: The text in the message content that needs to be replaced. type: string @@ -11714,7 +12100,7 @@ components: type: description: Always `text`. type: string - enum: [ "text" ] + enum: ["text"] text: type: object properties: @@ -11743,7 +12129,7 @@ components: type: description: Always `file_citation`. type: string - enum: [ "file_citation" ] + enum: ["file_citation"] text: description: The text in the message content that needs to be replaced. type: string @@ -11777,7 +12163,7 @@ components: type: description: Always `file_path`. type: string - enum: [ "file_path" ] + enum: ["file_path"] text: description: The text in the message content that needs to be replaced. type: string @@ -11809,7 +12195,7 @@ components: object: description: The object type, which is always `thread.run.step`. type: string - enum: [ "thread.run.step" ] + enum: ["thread.run.step"] created_at: description: The Unix timestamp (in seconds) for when the run step was created. type: integer @@ -11825,11 +12211,11 @@ components: type: description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string - enum: [ "message_creation", "tool_calls" ] + enum: ["message_creation", "tool_calls"] status: description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] step_details: type: object description: The details of the run step. @@ -11845,7 +12231,7 @@ components: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: [ "server_error", "rate_limit_exceeded" ] + enum: ["server_error", "rate_limit_exceeded"] message: type: string description: A human-readable description of the error. @@ -11909,7 +12295,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: [ "thread.run.step.delta" ] + enum: ["thread.run.step.delta"] delta: description: The delta containing the fields that have changed on the run step. type: object @@ -11980,7 +12366,7 @@ components: type: description: Always `message_creation`. type: string - enum: [ "message_creation" ] + enum: ["message_creation"] message_creation: type: object properties: @@ -12001,7 +12387,7 @@ components: type: description: Always `message_creation`. type: string - enum: [ "message_creation" ] + enum: ["message_creation"] message_creation: type: object properties: @@ -12019,7 +12405,7 @@ components: type: description: Always `tool_calls`. type: string - enum: [ "tool_calls" ] + enum: ["tool_calls"] tool_calls: type: array description: | @@ -12042,7 +12428,7 @@ components: type: description: Always `tool_calls`. type: string - enum: [ "tool_calls" ] + enum: ["tool_calls"] tool_calls: type: array description: | @@ -12067,7 +12453,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] + enum: ["code_interpreter"] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12106,7 +12492,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] + enum: ["code_interpreter"] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12135,7 +12521,7 @@ components: type: description: Always `logs`. type: string - enum: [ "logs" ] + enum: ["logs"] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12154,7 +12540,7 @@ components: type: description: Always `logs`. type: string - enum: [ "logs" ] + enum: ["logs"] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12169,7 +12555,7 @@ components: type: description: Always `image`. type: string - enum: [ "image" ] + enum: ["image"] image: type: object properties: @@ -12192,7 +12578,7 @@ components: type: description: Always `image`. type: string - enum: [ "image" ] + enum: ["image"] image: type: object properties: @@ -12213,7 +12599,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] + enum: ["file_search"] file_search: type: object description: For now, this is always going to be an empty object. @@ -12236,7 +12622,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] + enum: ["file_search"] file_search: type: object description: For now, this is always going to be an empty object. @@ -12256,7 +12642,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] + enum: ["function"] function: type: object description: The definition of the function that was called. @@ -12293,7 +12679,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] + enum: ["function"] function: type: object description: The definition of the function that was called. @@ -12320,7 +12706,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: [ "last_active_at" ] + enum: ["last_active_at"] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -12341,7 +12727,7 @@ components: object: description: The object type, which is always `vector_store`. type: string - enum: [ "vector_store" ] + enum: ["vector_store"] created_at: description: The Unix timestamp (in seconds) for when the vector store was created. type: integer @@ -12378,7 +12764,7 @@ components: status: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - enum: [ "expired", "in_progress", "completed" ] + enum: ["expired", "in_progress", "completed"] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -12506,7 +12892,7 @@ components: type: boolean object: type: string - enum: [ vector_store.deleted ] + enum: [vector_store.deleted] required: - id - object @@ -12523,7 +12909,7 @@ components: object: description: The object type, which is always `vector_store.file`. type: string - enum: [ "vector_store.file" ] + enum: ["vector_store.file"] usage_bytes: description: The total vector store usage in bytes. Note that this may be different from the original file size. type: integer @@ -12536,7 +12922,7 @@ components: status: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] + enum: ["in_progress", "completed", "cancelled", "failed"] last_error: type: object description: The last error associated with this vector store file. Will be `null` if there are no errors. @@ -12603,7 +12989,7 @@ components: type: type: string description: Always `other`. - enum: [ "other" ] + enum: ["other"] required: - type @@ -12615,7 +13001,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -12650,7 +13036,7 @@ components: type: type: string description: Always `auto`. - enum: [ "auto" ] + enum: ["auto"] required: - type @@ -12662,7 +13048,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -12723,7 +13109,7 @@ components: type: boolean object: type: string - enum: [ vector_store.file.deleted ] + enum: [vector_store.file.deleted] required: - id - object @@ -12740,7 +13126,7 @@ components: object: description: The object type, which is always `vector_store.file_batch`. type: string - enum: [ "vector_store.files_batch" ] + enum: ["vector_store.files_batch"] created_at: description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer @@ -12750,7 +13136,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] + enum: ["in_progress", "completed", "cancelled", "failed"] file_counts: type: object properties: @@ -12855,7 +13241,7 @@ components: properties: event: type: string - enum: [ "thread.created" ] + enum: ["thread.created"] data: $ref: "#/components/schemas/ThreadObject" required: @@ -12871,7 +13257,7 @@ components: properties: event: type: string - enum: [ "thread.run.created" ] + enum: ["thread.run.created"] data: $ref: "#/components/schemas/RunObject" required: @@ -12884,7 +13270,7 @@ components: properties: event: type: string - enum: [ "thread.run.queued" ] + enum: ["thread.run.queued"] data: $ref: "#/components/schemas/RunObject" required: @@ -12897,7 +13283,7 @@ components: properties: event: type: string - enum: [ "thread.run.in_progress" ] + enum: ["thread.run.in_progress"] data: $ref: "#/components/schemas/RunObject" required: @@ -12910,7 +13296,7 @@ components: properties: event: type: string - enum: [ "thread.run.requires_action" ] + enum: ["thread.run.requires_action"] data: $ref: "#/components/schemas/RunObject" required: @@ -12923,7 +13309,7 @@ components: properties: event: type: string - enum: [ "thread.run.completed" ] + enum: ["thread.run.completed"] data: $ref: "#/components/schemas/RunObject" required: @@ -12949,7 +13335,7 @@ components: properties: event: type: string - enum: [ "thread.run.failed" ] + enum: ["thread.run.failed"] data: $ref: "#/components/schemas/RunObject" required: @@ -12962,7 +13348,7 @@ components: properties: event: type: string - enum: [ "thread.run.cancelling" ] + enum: ["thread.run.cancelling"] data: $ref: "#/components/schemas/RunObject" required: @@ -12975,7 +13361,7 @@ components: properties: event: type: string - enum: [ "thread.run.cancelled" ] + enum: ["thread.run.cancelled"] data: $ref: "#/components/schemas/RunObject" required: @@ -12988,7 +13374,7 @@ components: properties: event: type: string - enum: [ "thread.run.expired" ] + enum: ["thread.run.expired"] data: $ref: "#/components/schemas/RunObject" required: @@ -13004,7 +13390,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.created" ] + enum: ["thread.run.step.created"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13017,7 +13403,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.in_progress" ] + enum: ["thread.run.step.in_progress"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13030,7 +13416,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.delta" ] + enum: ["thread.run.step.delta"] data: $ref: "#/components/schemas/RunStepDeltaObject" required: @@ -13043,7 +13429,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.completed" ] + enum: ["thread.run.step.completed"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13056,7 +13442,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.failed" ] + enum: ["thread.run.step.failed"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13069,7 +13455,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.cancelled" ] + enum: ["thread.run.step.cancelled"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13082,7 +13468,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.expired" ] + enum: ["thread.run.step.expired"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13098,7 +13484,7 @@ components: properties: event: type: string - enum: [ "thread.message.created" ] + enum: ["thread.message.created"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13111,7 +13497,7 @@ components: properties: event: type: string - enum: [ "thread.message.in_progress" ] + enum: ["thread.message.in_progress"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13124,7 +13510,7 @@ components: properties: event: type: string - enum: [ "thread.message.delta" ] + enum: ["thread.message.delta"] data: $ref: "#/components/schemas/MessageDeltaObject" required: @@ -13137,7 +13523,7 @@ components: properties: event: type: string - enum: [ "thread.message.completed" ] + enum: ["thread.message.completed"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13150,7 +13536,7 @@ components: properties: event: type: string - enum: [ "thread.message.incomplete" ] + enum: ["thread.message.incomplete"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13165,7 +13551,7 @@ components: properties: event: type: string - enum: [ "error" ] + enum: ["error"] data: $ref: "#/components/schemas/Error" required: @@ -13180,10 +13566,10 @@ components: properties: event: type: string - enum: [ "done" ] + enum: ["done"] data: type: string - enum: [ "[DONE]" ] + enum: ["[DONE]"] required: - event - data @@ -13198,7 +13584,7 @@ components: type: string object: type: string - enum: [ batch ] + enum: [batch] description: The object type, which is always `batch`. endpoint: type: string @@ -13323,7 +13709,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: [ "POST" ] + enum: ["POST"] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -13389,14 +13775,14 @@ components: type: boolean object: type: string - enum: [ list ] + enum: [list] required: - object - data - has_more security: - - ApiKeyAuth: [ ] + - ApiKeyAuth: [] x-oaiMeta: navigationGroups: @@ -13576,6 +13962,30 @@ x-oaiMeta: - type: object key: OpenAIFile path: object + - id: uploads + title: Uploads + description: | + Allows you to upload large files in multiple parts. + navigationGroup: endpoints + sections: + - type: endpoint + key: createUpload + path: create + - type: endpoint + key: addUploadPart + path: add-part + - type: endpoint + key: completeUpload + path: complete + - type: endpoint + key: cancelUpload + path: cancel + - type: object + key: Upload + path: object + - type: object + key: UploadPart + path: part-object - id: images title: Images description: | From 8846e02ad911d6bc50f76ee877b9b04f25cbbf5e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 16:33:58 +0200 Subject: [PATCH 192/251] refactor: Remove default model from the language model options (#498) --- .../lib/src/chat_models/chat_anthropic.dart | 11 +- .../lib/src/chat_models/mappers.dart | 8 +- .../lib/src/chat_models/types.dart | 61 ++++++-- packages/langchain_anthropic/pubspec.yaml | 1 + .../lib/src/chat_models/types.dart | 3 +- .../lib/src/language_models/base.dart | 31 ---- .../lib/src/language_models/types.dart | 5 + .../langchain_core/lib/src/llms/types.dart | 1 + .../vertex_ai/chat_firebase_vertex_ai.dart | 14 +- .../lib/src/chat_models/vertex_ai/types.dart | 81 +++++++++- .../google_ai/chat_google_generative_ai.dart | 14 +- .../lib/src/chat_models/google_ai/types.dart | 48 +++++- .../chat_models/vertex_ai/chat_vertex_ai.dart | 33 ++-- .../lib/src/chat_models/vertex_ai/types.dart | 52 +++++-- .../lib/src/llms/vertex_ai/types.dart | 51 +++++-- .../lib/src/llms/vertex_ai/vertex_ai.dart | 32 ++-- .../chat_google_generative_ai_test.dart | 14 +- .../lib/src/chat_models/chat_mistralai.dart | 7 +- .../lib/src/chat_models/types.dart | 35 ++++- .../chat_models/chat_ollama/chat_ollama.dart | 5 +- .../src/chat_models/chat_ollama/types.dart | 93 +++++++++++- .../chat_ollama_tools/chat_ollama_tools.dart | 7 +- .../chat_models/chat_ollama_tools/types.dart | 36 +++++ .../langchain_ollama/lib/src/llms/ollama.dart | 7 +- .../langchain_ollama/lib/src/llms/types.dart | 103 ++++++++++++- .../test/chat_models/chat_ollama_test.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 18 ++- .../lib/src/chat_models/types.dart | 141 ++++++++++++------ .../langchain_openai/lib/src/llms/openai.dart | 26 +++- .../langchain_openai/lib/src/llms/types.dart | 123 +++++++-------- .../test/chains/qa_with_sources_test.dart | 1 - .../test/chat_models/anyscale_test.dart | 4 - .../test/chat_models/chat_openai_test.dart | 1 - 33 files changed, 767 insertions(+), 302 deletions(-) diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart index 13a687a3..1c8360d4 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart @@ -154,7 +154,8 @@ class ChatAnthropic extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', + model: defaultModel, + maxTokens: defaultMaxTokens, ), this.encoding = 'cl100k_base', }) : _client = a.AnthropicClient( @@ -177,6 +178,12 @@ class ChatAnthropic extends BaseChatModel { @override String get modelType => 'anthropic-chat'; + /// The default model to use unless another is specified. + static const defaultModel = 'claude-3-5-sonnet-20240620'; + + /// The default max tokens to use unless another is specified. + static const defaultMaxTokens = 1024; + @override Future invoke( final PromptValue input, { @@ -187,7 +194,6 @@ class ChatAnthropic extends BaseChatModel { input.toChatMessages(), options: options, defaultOptions: defaultOptions, - throwNullModelError: throwNullModelError, ), ); return completion.toChatResult(); @@ -205,7 +211,6 @@ class ChatAnthropic extends BaseChatModel { options: options, defaultOptions: defaultOptions, stream: true, - throwNullModelError: throwNullModelError, ), ) .transform(MessageStreamEventTransformer()); diff --git a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart index 002df82c..020ef844 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart @@ -9,6 +9,7 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/tools.dart'; import 'package:rxdart/rxdart.dart' show WhereNotNullExtension; +import 'chat_anthropic.dart'; import 'types.dart'; /// Creates a [CreateMessageRequest] from the given input. @@ -17,7 +18,6 @@ a.CreateMessageRequest createMessageRequest( required final ChatAnthropicOptions? options, required final ChatAnthropicOptions defaultOptions, final bool stream = false, - required Never Function() throwNullModelError, }) { final systemMsg = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString @@ -31,10 +31,12 @@ a.CreateMessageRequest createMessageRequest( return a.CreateMessageRequest( model: a.Model.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? ChatAnthropic.defaultModel, ), messages: messagesDtos, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens ?? 1024, + maxTokens: options?.maxTokens ?? + defaultOptions.maxTokens ?? + ChatAnthropic.defaultMaxTokens, stopSequences: options?.stopSequences ?? defaultOptions.stopSequences, system: systemMsg, temperature: options?.temperature ?? defaultOptions.temperature, diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart index 4374c820..98069444 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/types.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -1,14 +1,28 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_anthropic_options} /// Options to pass into the Anthropic Chat Model. +/// +/// Available models: +/// - `claude-3-5-sonnet-20240620` +/// - `claude-3-haiku-20240307` +/// - `claude-3-opus-20240229` +/// - `claude-3-sonnet-20240229` +/// - `claude-2.0` +/// - `claude-2.1` +/// +/// Mind that the list may be outdated. +/// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. /// {@endtemplate} +@immutable class ChatAnthropicOptions extends ChatModelOptions { /// {@macro chat_anthropic_options} const ChatAnthropicOptions({ - this.model = 'claude-3-5-sonnet-20240620', - this.maxTokens = 1024, + super.model, + this.maxTokens, this.stopSequences, this.temperature, this.topK, @@ -19,20 +33,6 @@ class ChatAnthropicOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'claude-3-5-sonnet-20240620'). - /// - /// Available models: - /// - `claude-3-5-sonnet-20240620` - /// - `claude-3-haiku-20240307` - /// - `claude-3-opus-20240229` - /// - `claude-3-sonnet-20240229` - /// - `claude-2.0` - /// - `claude-2.1` - /// - /// Mind that the list may be outdated. - /// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. - final String? model; - /// The maximum number of tokens to generate before stopping. /// /// Note that our models may stop _before_ reaching this maximum. This parameter @@ -113,4 +113,33 @@ class ChatAnthropicOptions extends ChatModelOptions { concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatAnthropicOptions other) { + return model == other.model && + maxTokens == other.maxTokens && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + temperature == other.temperature && + topK == other.topK && + topP == other.topP && + userId == other.userId && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + maxTokens.hashCode ^ + const ListEquality().hash(stopSequences) ^ + temperature.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + userId.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 33e625f7..180234ac 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -21,6 +21,7 @@ dependencies: http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 + meta: ^1.11.0 rxdart: ^0.27.7 dev_dependencies: diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index fa0bc0fc..e9b788c7 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -10,9 +10,10 @@ import '../tools/base.dart'; class ChatModelOptions extends LanguageModelOptions { /// {@macro chat_model_options} const ChatModelOptions({ - super.concurrencyLimit, + super.model, this.tools, this.toolChoice, + super.concurrencyLimit, }); /// A list of tools the model may call. diff --git a/packages/langchain_core/lib/src/language_models/base.dart b/packages/langchain_core/lib/src/language_models/base.dart index 33d3b002..3156cd74 100644 --- a/packages/langchain_core/lib/src/language_models/base.dart +++ b/packages/langchain_core/lib/src/language_models/base.dart @@ -1,5 +1,3 @@ -import 'package:meta/meta.dart'; - import '../langchain/base.dart'; import '../prompts/types.dart'; import 'types.dart'; @@ -58,33 +56,4 @@ abstract class BaseLanguageModel< @override String toString() => modelType; - - /// Throws an error if the model id is not specified. - @protected - Never throwNullModelError() { - throw ArgumentError(''' -Null model in $runtimeType. - -You need to specify the id of model to use either in `$runtimeType.defaultOptions` -or in the options passed when invoking the model. - -Example: -``` -// In defaultOptions -final model = $runtimeType( - defaultOptions: ${runtimeType}Options( - model: 'model-id', - ), -); - -// Or when invoking the model -final res = await model.invoke( - prompt, - options: ${runtimeType}Options( - model: 'model-id', - ), -); -``` -'''); - } } diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index f1475ad2..c2e6df11 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -10,8 +10,13 @@ import '../langchain/types.dart'; abstract class LanguageModelOptions extends BaseLangChainOptions { /// {@macro language_model_options} const LanguageModelOptions({ + this.model, super.concurrencyLimit, }); + + /// ID of the language model to use. + /// Check the provider's documentation for available models. + final String? model; } /// {@template language_model} diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index d6bed6f3..47b98285 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -9,6 +9,7 @@ import '../language_models/types.dart'; class LLMOptions extends LanguageModelOptions { /// {@macro llm_options} const LLMOptions({ + super.model, super.concurrencyLimit, }); } diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 77ce67d6..20b2b520 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -154,7 +154,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// - [ChatFirebaseVertexAI.location] ChatFirebaseVertexAI({ super.defaultOptions = const ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-flash', + model: defaultModel, ), this.app, this.appCheck, @@ -188,15 +188,18 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// A UUID generator. late final Uuid _uuid = const Uuid(); - @override - String get modelType => 'chat-firebase-vertex-ai'; - /// The current model set in [_firebaseClient]; String _currentModel; /// The current system instruction set in [_firebaseClient]; String? _currentSystemInstruction; + @override + String get modelType => 'chat-firebase-vertex-ai'; + + /// The default model to use unless another is specified. + static const defaultModel = 'gemini-1.5-flash'; + @override Future invoke( final PromptValue input, { @@ -329,8 +332,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { final List messages, final ChatFirebaseVertexAIOptions? options, ) { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index d2aee55d..7c92e16c 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -1,12 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_firebase_vertex_ai_options} /// Options to pass into the Vertex AI for Firebase model. +/// +/// You can find a list of available models here: +/// https://firebase.google.com/docs/vertex-ai/gemini-models /// {@endtemplate} +@immutable class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// {@macro chat_firebase_vertex_ai_options} const ChatFirebaseVertexAIOptions({ - this.model = 'gemini-1.5-flash', + super.model, this.topP, this.topK, this.candidateCount, @@ -20,12 +27,6 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// The LLM to use. - /// - /// You can find a list of available models here: - /// https://firebase.google.com/docs/vertex-ai/gemini-models - final String? model; - /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -99,7 +100,11 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { final int? maxOutputTokens, final double? temperature, final List? stopSequences, + final String? responseMimeType, final List? safetySettings, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatFirebaseVertexAIOptions( model: model ?? this.model, @@ -109,9 +114,48 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens, temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, + responseMimeType: responseMimeType ?? this.responseMimeType, safetySettings: safetySettings ?? this.safetySettings, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatFirebaseVertexAIOptions other) { + return model == other.model && + topP == other.topP && + topK == other.topK && + candidateCount == other.candidateCount && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + responseMimeType == other.responseMimeType && + const ListEquality() + .equals(safetySettings, other.safetySettings) && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + candidateCount.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + const ListEquality().hash(stopSequences) ^ + responseMimeType.hashCode ^ + const ListEquality() + .hash(safetySettings) ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_google_generative_ai_safety_setting} @@ -119,6 +163,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// Passing a safety setting for a category changes the allowed probability that /// content is blocked. /// {@endtemplate} +@immutable class ChatFirebaseVertexAISafetySetting { /// {@macro chat_google_generative_ai_safety_setting} const ChatFirebaseVertexAISafetySetting({ @@ -131,6 +176,28 @@ class ChatFirebaseVertexAISafetySetting { /// Controls the probability threshold at which harm is blocked. final ChatFirebaseVertexAISafetySettingThreshold threshold; + + /// Creates a copy of this [ChatFirebaseVertexAISafetySetting] object with + /// the given fields replaced with the new values. + ChatFirebaseVertexAISafetySetting copyWith({ + final ChatFirebaseVertexAISafetySettingCategory? category, + final ChatFirebaseVertexAISafetySettingThreshold? threshold, + }) { + return ChatFirebaseVertexAISafetySetting( + category: category ?? this.category, + threshold: threshold ?? this.threshold, + ); + } + + @override + bool operator ==(covariant final ChatFirebaseVertexAISafetySetting other) { + return category == other.category && threshold == other.threshold; + } + + @override + int get hashCode { + return category.hashCode ^ threshold.hashCode; + } } /// Safety settings categorizes. diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 58934755..0fde4b9f 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -211,7 +211,7 @@ class ChatGoogleGenerativeAI final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-flash', + model: defaultModel, ), }) : _currentModel = defaultOptions.model ?? '', _httpClient = createDefaultHttpClient( @@ -247,15 +247,18 @@ class ChatGoogleGenerativeAI /// Get the API key. String get apiKey => _httpClient.headers['x-goog-api-key'] ?? ''; - @override - String get modelType => 'chat-google-generative-ai'; - /// The current model set in [_googleAiClient]; String _currentModel; /// The current system instruction set in [_googleAiClient]; String? _currentSystemInstruction; + @override + String get modelType => 'chat-google-generative-ai'; + + /// The default model to use unless another is specified. + static const defaultModel = 'gemini-1.5-flash'; + @override Future invoke( final PromptValue input, { @@ -389,8 +392,7 @@ class ChatGoogleGenerativeAI final List messages, final ChatGoogleGenerativeAIOptions? options, ) { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index c86c80a5..8c4bff41 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -1,12 +1,17 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_google_generative_ai_options} /// Options to pass into the Google Generative AI Chat Model. +/// +/// You can find a list of available models [here](https://ai.google.dev/models). /// {@endtemplate} +@immutable class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// {@macro chat_google_generative_ai_options} const ChatGoogleGenerativeAIOptions({ - this.model = 'gemini-1.5-flash', + super.model, this.topP, this.topK, this.candidateCount, @@ -21,11 +26,6 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// The LLM to use. - /// - /// You can find a list of available models here: https://ai.google.dev/models - final String? model; - /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -126,6 +126,9 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { final double? temperature, final List? stopSequences, final List? safetySettings, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatGoogleGenerativeAIOptions( model: model ?? this.model, @@ -136,8 +139,41 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, safetySettings: safetySettings ?? this.safetySettings, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) { + return model == other.model && + topP == other.topP && + topK == other.topK && + candidateCount == other.candidateCount && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + stopSequences == other.stopSequences && + safetySettings == other.safetySettings && + tools == other.tools && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + candidateCount.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + stopSequences.hashCode ^ + safetySettings.hashCode ^ + tools.hashCode ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_google_generative_ai_safety_setting} diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart index 4f668b40..e79f00b4 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart @@ -117,8 +117,8 @@ class ChatVertexAI extends BaseChatModel { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const ChatVertexAIOptions( - publisher: 'google', - model: 'chat-bison', + publisher: defaultPublisher, + model: defaultModel, ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -139,6 +139,12 @@ class ChatVertexAI extends BaseChatModel { @override String get modelType => 'vertex-ai-chat'; + /// The default publisher to use unless another is specified. + static const defaultPublisher = 'google'; + + /// The default model to use unless another is specified. + static const defaultModel = 'chat-bison'; + @override Future invoke( final PromptValue input, { @@ -158,19 +164,15 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final result = await client.chat.predict( context: context, examples: examples, messages: vertexMessages, - publisher: options?.publisher ?? - defaultOptions.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, parameters: VertexAITextChatModelRequestParams( maxOutputTokens: @@ -216,18 +218,15 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final res = await client.chat.countTokens( context: context, examples: examples, messages: vertexMessages, - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index 49316c4e..c0642867 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -1,13 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:meta/meta.dart'; /// {@template chat_vertex_ai_options} /// Options to pass into the Vertex AI Chat Model. +/// +/// You can find a list of available models here: +/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} +@immutable class ChatVertexAIOptions extends ChatModelOptions { /// {@macro chat_vertex_ai_options} const ChatVertexAIOptions({ - this.publisher = 'google', - this.model = 'chat-bison', + this.publisher, + super.model, this.maxOutputTokens, this.temperature, this.topP, @@ -23,17 +29,6 @@ class ChatVertexAIOptions extends ChatModelOptions { /// Use `google` for first-party models. final String? publisher; - /// The text model to use. - /// - /// To use the latest model version, specify the model name without a version - /// number (e.g. `chat-bison`). - /// To use a stable model version, specify the model version number - /// (e.g. `chat-bison@001`). - /// - /// You can find a list of available models here: - /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models - final String? model; - /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -114,6 +109,7 @@ class ChatVertexAIOptions extends ChatModelOptions { final List? stopSequences, final int? candidateCount, final List? examples, + final int? concurrencyLimit, }) { return ChatVertexAIOptions( publisher: publisher ?? this.publisher, @@ -125,6 +121,36 @@ class ChatVertexAIOptions extends ChatModelOptions { stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, examples: examples ?? this.examples, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatVertexAIOptions other) { + return publisher == other.publisher && + model == other.model && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + topP == other.topP && + topK == other.topK && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + candidateCount == other.candidateCount && + const ListEquality().equals(examples, other.examples) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return publisher.hashCode ^ + model.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + const ListEquality().hash(stopSequences) ^ + candidateCount.hashCode ^ + const ListEquality().hash(examples) ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart index bf382c44..f9eee704 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart @@ -1,13 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; +import 'package:meta/meta.dart'; /// {@template vertex_ai_options} /// Options to pass into the Vertex AI LLM. +/// +/// You can find a list of available models here: +/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} +@immutable class VertexAIOptions extends LLMOptions { /// {@macro vertex_ai_options} const VertexAIOptions({ - this.publisher = 'google', - this.model = 'text-bison', + this.publisher, + super.model, this.maxOutputTokens, this.temperature, this.topP, @@ -22,17 +28,6 @@ class VertexAIOptions extends LLMOptions { /// Use `google` for first-party models. final String? publisher; - /// The text model to use. - /// - /// To use the latest model version, specify the model name without a version - /// number (e.g. `text-bison`). - /// To use a stable model version, specify the model version number - /// (e.g. `text-bison@001`). - /// - /// You can find a list of available models here: - /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models - final String? model; - /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -109,6 +104,7 @@ class VertexAIOptions extends LLMOptions { final int? topK, final List? stopSequences, final int? candidateCount, + final int? concurrencyLimit, }) { return VertexAIOptions( publisher: publisher ?? this.publisher, @@ -119,6 +115,35 @@ class VertexAIOptions extends LLMOptions { topK: topK ?? this.topK, stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + @override + bool operator ==(covariant final VertexAIOptions other) { + return publisher == other.publisher && + model == other.model && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + topP == other.topP && + topK == other.topK && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + candidateCount == other.candidateCount && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return publisher.hashCode ^ + model.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + const ListEquality().hash(stopSequences) ^ + candidateCount.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart index a0873fcc..955cc7ca 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart @@ -123,8 +123,8 @@ class VertexAI extends BaseLLM { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const VertexAIOptions( - publisher: 'google', - model: 'text-bison', + publisher: defaultPublisher, + model: defaultModel, ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -145,21 +145,24 @@ class VertexAI extends BaseLLM { @override String get modelType => 'vertex-ai'; + /// The default publisher to use unless another is specified. + static const defaultPublisher = 'google'; + + /// The default model to use unless another is specified. + static const defaultModel = 'text-bison'; + @override Future invoke( final PromptValue input, { final VertexAIOptions? options, }) async { final id = _uuid.v4(); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final result = await client.text.predict( prompt: input.toString(), - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, parameters: VertexAITextModelRequestParams( maxOutputTokens: @@ -191,15 +194,12 @@ class VertexAI extends BaseLLM { final PromptValue promptValue, { final VertexAIOptions? options, }) async { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final res = await client.text.countTokens( prompt: promptValue.toString(), - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart index f6567f6d..6d692977 100644 --- a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart +++ b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatGoogleGenerativeAI tests', () { - const defaultModel = 'gemini-1.5-pro-latest'; + const defaultModel = 'gemini-1.5-pro'; late ChatGoogleGenerativeAI chatModel; @@ -73,7 +73,7 @@ void main() { expect(res.output.content, isNotEmpty); }); - test('Text-and-image input with gemini-pro-vision', () async { + test('Text-and-image input', () async { final res = await chatModel.invoke( PromptValue.chat([ ChatMessage.human( @@ -89,9 +89,6 @@ void main() { ]), ), ]), - options: const ChatGoogleGenerativeAIOptions( - model: 'gemini-pro-vision', - ), ); expect(res.output.content.toLowerCase(), contains('apple')); @@ -122,7 +119,8 @@ void main() { ), ); expect(res.output.content.length, lessThan(20)); - expect(res.finishReason, FinishReason.length); + // It seems the gemini-1.5 doesn't return length reason anymore + // expect(res.finishReason, FinishReason.length); }); test('Test Multi-turn conversations with gemini-pro', () async { @@ -177,7 +175,7 @@ void main() { 'properties': { 'location': { 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', + 'description': 'The city and country, e.g. San Francisco, US', }, 'unit': { 'type': 'string', @@ -196,7 +194,7 @@ void main() { ); final humanMessage = ChatMessage.humanText( - 'What’s the weather like in Boston and Madrid right now in celsius?', + 'What’s the weather like in Boston, US and Madrid, Spain in Celsius?', ); final res1 = await model.invoke(PromptValue.chat([humanMessage])); diff --git a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart index 31bc53aa..70f6bd4b 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart @@ -156,7 +156,7 @@ class ChatMistralAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatMistralAIOptions( - model: 'mistral-small', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = MistralAIClient( @@ -179,6 +179,9 @@ class ChatMistralAI extends BaseChatModel { @override String get modelType => 'chat-mistralai'; + /// The default model to use unless another is specified. + static const defaultModel = 'mistral-small'; + @override Future invoke( final PromptValue input, { @@ -216,7 +219,7 @@ class ChatMistralAI extends BaseChatModel { }) { return ChatCompletionRequest( model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), messages: messages.toChatCompletionMessages(), temperature: options?.temperature ?? defaultOptions.temperature, diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index 60158ea7..aa2f9537 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -1,12 +1,16 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:meta/meta.dart'; /// {@template chat_mistral_ai_options} /// Options to pass into ChatMistralAI. +/// +/// You can check the list of available models [here](https://docs.mistral.ai/models). /// {@endtemplate} +@immutable class ChatMistralAIOptions extends ChatModelOptions { /// {@macro chat_mistral_ai_options} const ChatMistralAIOptions({ - this.model = 'mistral-small', + super.model, this.temperature, this.topP, this.maxTokens, @@ -15,11 +19,6 @@ class ChatMistralAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// ID of the model to use. You can use the [List Available Models](https://docs.mistral.ai/api#operation/listModels) - /// API to see all of your available models, or see our [Model overview](https://docs.mistral.ai/models) - /// for model descriptions. - final String? model; - /// What sampling temperature to use, between 0.0 and 2.0. Higher values like /// 0.8 will make the output more random, while lower values like 0.2 will /// make it more focused and deterministic. @@ -56,6 +55,7 @@ class ChatMistralAIOptions extends ChatModelOptions { final int? maxTokens, final bool? safePrompt, final int? randomSeed, + final int? concurrencyLimit, }) { return ChatMistralAIOptions( model: model ?? this.model, @@ -64,6 +64,29 @@ class ChatMistralAIOptions extends ChatModelOptions { maxTokens: maxTokens ?? this.maxTokens, safePrompt: safePrompt ?? this.safePrompt, randomSeed: randomSeed ?? this.randomSeed, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatMistralAIOptions other) { + return model == other.model && + temperature == other.temperature && + topP == other.topP && + maxTokens == other.maxTokens && + safePrompt == other.safePrompt && + randomSeed == other.randomSeed && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + maxTokens.hashCode ^ + safePrompt.hashCode ^ + randomSeed.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 7dbed939..2ff391ef 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -178,6 +178,9 @@ class ChatOllama extends BaseChatModel { @override String get modelType => 'chat-ollama'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3'; + @override Future invoke( final PromptValue input, { @@ -218,7 +221,7 @@ class ChatOllama extends BaseChatModel { final ChatOllamaOptions? options, }) { return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, messages: messages.toMessages(), format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 67598acb..971f259c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,15 +1,21 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:meta/meta.dart'; import '../../../langchain_ollama.dart'; import '../../llms/types.dart'; /// {@template chat_ollama_options} /// Options to pass into ChatOllama. +/// +/// For a complete list of supported models and model variants, see the +/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} +@immutable class ChatOllamaOptions extends ChatModelOptions { /// {@macro chat_ollama_options} const ChatOllamaOptions({ - this.model = 'llama3', + super.model, this.format, this.keepAlive, this.numKeep, @@ -44,9 +50,6 @@ class ChatOllamaOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// The model used to generate completions - final String? model; - /// The format to return a response in. Currently the only accepted value is /// json. /// @@ -203,6 +206,7 @@ class ChatOllamaOptions extends ChatModelOptions { ChatOllamaOptions copyWith({ final String? model, final OllamaResponseFormat? format, + final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, @@ -223,7 +227,6 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? numa, final int? numCtx, final int? numBatch, - final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -232,14 +235,13 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, - final bool? embeddingOnly, - final double? ropeFrequencyBase, - final double? ropeFrequencyScale, final int? numThread, + final int? concurrencyLimit, }) { return ChatOllamaOptions( model: model ?? this.model, format: format ?? this.format, + keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, @@ -269,6 +271,81 @@ class ChatOllamaOptions extends ChatModelOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatOllamaOptions other) { + return model == other.model && + format == other.format && + keepAlive == other.keepAlive && + numKeep == other.numKeep && + seed == other.seed && + numPredict == other.numPredict && + topK == other.topK && + topP == other.topP && + tfsZ == other.tfsZ && + typicalP == other.typicalP && + repeatLastN == other.repeatLastN && + temperature == other.temperature && + repeatPenalty == other.repeatPenalty && + presencePenalty == other.presencePenalty && + frequencyPenalty == other.frequencyPenalty && + mirostat == other.mirostat && + mirostatTau == other.mirostatTau && + mirostatEta == other.mirostatEta && + penalizeNewline == other.penalizeNewline && + const ListEquality().equals(stop, other.stop) && + numa == other.numa && + numCtx == other.numCtx && + numBatch == other.numBatch && + numGpu == other.numGpu && + mainGpu == other.mainGpu && + lowVram == other.lowVram && + f16KV == other.f16KV && + logitsAll == other.logitsAll && + vocabOnly == other.vocabOnly && + useMmap == other.useMmap && + useMlock == other.useMlock && + numThread == other.numThread && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + format.hashCode ^ + keepAlive.hashCode ^ + numKeep.hashCode ^ + seed.hashCode ^ + numPredict.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + tfsZ.hashCode ^ + typicalP.hashCode ^ + repeatLastN.hashCode ^ + temperature.hashCode ^ + repeatPenalty.hashCode ^ + presencePenalty.hashCode ^ + frequencyPenalty.hashCode ^ + mirostat.hashCode ^ + mirostatTau.hashCode ^ + mirostatEta.hashCode ^ + penalizeNewline.hashCode ^ + const ListEquality().hash(stop) ^ + numa.hashCode ^ + numCtx.hashCode ^ + numBatch.hashCode ^ + numGpu.hashCode ^ + mainGpu.hashCode ^ + lowVram.hashCode ^ + f16KV.hashCode ^ + logitsAll.hashCode ^ + vocabOnly.hashCode ^ + useMmap.hashCode ^ + useMlock.hashCode ^ + numThread.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart index 889e7c87..677fd308 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart @@ -83,7 +83,7 @@ class ChatOllamaTools extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: 'llama3'), + options: ChatOllamaOptions(model: defaultModel), ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -108,6 +108,9 @@ class ChatOllamaTools extends BaseChatModel { @override String get modelType => 'chat-ollama-tools'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3'; + @override Future invoke( PromptValue input, { @@ -132,7 +135,7 @@ class ChatOllamaTools extends BaseChatModel { final defaultOptions = this.defaultOptions.options ?? const ChatOllamaOptions(); return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, messages: messages.toMessages(), format: ResponseFormat.json, keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart index 9447a51f..f10f1186 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart @@ -1,5 +1,7 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; import '../chat_ollama/types.dart'; import 'chat_ollama_tools.dart'; @@ -9,6 +11,7 @@ export '../chat_ollama/types.dart'; /// {@template chat_ollama_tools_options} /// Options to pass into [ChatOllamaTools]. /// {@endtemplate} +@immutable class ChatOllamaToolsOptions extends ChatModelOptions { /// {@macro chat_ollama_tools_options} const ChatOllamaToolsOptions({ @@ -57,6 +60,39 @@ Example response format: Ensure your response is valid JSON and follows this exact format. '''; + + /// Creates a copy of this [ChatOllamaToolsOptions] object with the given + /// fields replaced with the new values. + ChatOllamaToolsOptions copyWith({ + ChatOllamaOptions? options, + List? tools, + ChatToolChoice? toolChoice, + String? toolsSystemPromptTemplate, + }) { + return ChatOllamaToolsOptions( + options: options ?? this.options, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + toolsSystemPromptTemplate: + toolsSystemPromptTemplate ?? this.toolsSystemPromptTemplate, + ); + } + + @override + bool operator ==(covariant final ChatOllamaToolsOptions other) { + return options == other.options && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + toolsSystemPromptTemplate == other.toolsSystemPromptTemplate; + } + + @override + int get hashCode { + return options.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + toolsSystemPromptTemplate.hashCode; + } } /// Default tool called if model decides no other tools should be called diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index e61c6e27..fd9a8ed4 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -152,7 +152,7 @@ class Ollama extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OllamaOptions( - model: 'llama3', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -177,6 +177,9 @@ class Ollama extends BaseLLM { @override String get modelType => 'ollama'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3'; + @override Future invoke( final PromptValue input, { @@ -210,7 +213,7 @@ class Ollama extends BaseLLM { final OllamaOptions? options, }) { return GenerateCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, prompt: prompt, system: options?.system, template: options?.template, diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index dcbe7669..494e759e 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -1,12 +1,18 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; +import 'package:meta/meta.dart'; /// {@template ollama_options} /// Options to pass into the Ollama LLM. +/// +/// For a complete list of supported models and model variants, see the +/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} +@immutable class OllamaOptions extends LLMOptions { /// {@macro ollama_options} const OllamaOptions({ - this.model = 'llama3', + super.model, this.system, this.template, this.context, @@ -45,9 +51,6 @@ class OllamaOptions extends LLMOptions { super.concurrencyLimit, }); - /// The model used to generate completions - final String? model; - /// The system prompt (Overrides what is defined in the Modelfile). final String? system; @@ -228,6 +231,7 @@ class OllamaOptions extends LLMOptions { final List? context, final OllamaResponseFormat? format, final bool? raw, + final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, @@ -248,7 +252,6 @@ class OllamaOptions extends LLMOptions { final bool? numa, final int? numCtx, final int? numBatch, - final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -257,10 +260,8 @@ class OllamaOptions extends LLMOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, - final bool? embeddingOnly, - final double? ropeFrequencyBase, - final double? ropeFrequencyScale, final int? numThread, + final int? concurrencyLimit, }) { return OllamaOptions( model: model ?? this.model, @@ -269,6 +270,7 @@ class OllamaOptions extends LLMOptions { context: context ?? this.context, format: format ?? this.format, raw: raw ?? this.raw, + keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, @@ -298,8 +300,93 @@ class OllamaOptions extends LLMOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, ); } + + @override + bool operator ==(covariant final OllamaOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + system == other.system && + template == other.template && + const ListEquality().equals(context, other.context) && + format == other.format && + raw == other.raw && + keepAlive == other.keepAlive && + numKeep == other.numKeep && + seed == other.seed && + numPredict == other.numPredict && + topK == other.topK && + topP == other.topP && + tfsZ == other.tfsZ && + typicalP == other.typicalP && + repeatLastN == other.repeatLastN && + temperature == other.temperature && + repeatPenalty == other.repeatPenalty && + presencePenalty == other.presencePenalty && + frequencyPenalty == other.frequencyPenalty && + mirostat == other.mirostat && + mirostatTau == other.mirostatTau && + mirostatEta == other.mirostatEta && + penalizeNewline == other.penalizeNewline && + const ListEquality().equals(stop, other.stop) && + numa == other.numa && + numCtx == other.numCtx && + numBatch == other.numBatch && + numGpu == other.numGpu && + mainGpu == other.mainGpu && + lowVram == other.lowVram && + f16KV == other.f16KV && + logitsAll == other.logitsAll && + vocabOnly == other.vocabOnly && + useMmap == other.useMmap && + useMlock == other.useMlock && + numThread == other.numThread && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + system.hashCode ^ + template.hashCode ^ + const ListEquality().hash(context) ^ + format.hashCode ^ + raw.hashCode ^ + keepAlive.hashCode ^ + numKeep.hashCode ^ + seed.hashCode ^ + numPredict.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + tfsZ.hashCode ^ + typicalP.hashCode ^ + repeatLastN.hashCode ^ + temperature.hashCode ^ + repeatPenalty.hashCode ^ + presencePenalty.hashCode ^ + frequencyPenalty.hashCode ^ + mirostat.hashCode ^ + mirostatTau.hashCode ^ + mirostatEta.hashCode ^ + penalizeNewline.hashCode ^ + const ListEquality().hash(stop) ^ + numa.hashCode ^ + numCtx.hashCode ^ + numBatch.hashCode ^ + numGpu.hashCode ^ + mainGpu.hashCode ^ + lowVram.hashCode ^ + f16KV.hashCode ^ + logitsAll.hashCode ^ + vocabOnly.hashCode ^ + useMmap.hashCode ^ + useMlock.hashCode ^ + numThread.hashCode ^ + concurrencyLimit.hashCode; + } } /// The format to return a response in. diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 0fa46c03..7e001289 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -107,7 +107,7 @@ void main() { ]), ); expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + res.output.content.replaceAll(RegExp(r'[\s\n-]'), ''), contains('123456789'), ); expect(res.finishReason, FinishReason.stop); diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index e218637a..54c955e9 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -25,8 +25,10 @@ import 'types.dart'; /// - [Completions API docs](https://platform.openai.com/docs/api-reference/chat) /// /// You can also use this wrapper to consume OpenAI-compatible APIs like -/// [Anyscale](https://www.anyscale.com), [Together AI](https://www.together.ai), -/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), etc. +/// [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), +/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), +/// [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), +/// [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. /// /// ### Call options /// @@ -172,7 +174,7 @@ class ChatOpenAI extends BaseChatModel { /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). /// - [ChatOpenAI.encoding] - /// - [OpenAI.defaultOptions] + /// - [ChatOpenAI.defaultOptions] /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -192,7 +194,7 @@ class ChatOpenAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOpenAIOptions( - model: 'gpt-3.5-turbo', + model: defaultModel, ), this.encoding, }) : _client = OpenAIClient( @@ -236,6 +238,9 @@ class ChatOpenAI extends BaseChatModel { @override String get modelType => 'openai-chat'; + /// The default model to use unless another is specified. + static const defaultModel = 'gpt-3.5-turbo'; + @override Future invoke( final PromptValue input, { @@ -288,7 +293,7 @@ class ChatOpenAI extends BaseChatModel { return CreateChatCompletionRequest( model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), messages: messagesDtos, tools: toolsDtos, @@ -334,8 +339,7 @@ class ChatOpenAI extends BaseChatModel { final PromptValue promptValue, { final ChatOpenAIOptions? options, }) async { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final tiktoken = _getTiktoken(); final messages = promptValue.toChatMessages(); diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 299902fe..ed53c65c 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -1,13 +1,39 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_openai_options} /// Options to pass into the OpenAI Chat Model. +/// +/// Available [ChatOpenAIOptions.model]s: +/// - `gpt-4` +/// - `gpt-4-32k` +/// - `gpt-4-32k-0314` +/// - `gpt-4-32k-0613` +/// - `gpt-4-0125-preview` +/// - `gpt-4-0314` +/// - `gpt-4-0613` +/// - `gpt-4-1106-preview` +/// - `gpt-4-turbo` +/// - `gpt-4-turbo-2024-04-09` +/// - `gpt-4-turbo-preview` +/// - `gpt-4-vision-preview` +/// - `gpt-4o` +/// - `gpt-4o-2024-05-13` +/// - `gpt-4o-mini` +/// - `gpt-4o-mini-2024-07-18` +/// - `gpt-3.5-turbo` +/// - `gpt-3.5-turbo-16k` +/// +/// Mind that the list may be outdated. +/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} +@immutable class ChatOpenAIOptions extends ChatModelOptions { /// {@macro chat_openai_options} const ChatOpenAIOptions({ - this.model = 'gpt-3.5-turbo', + super.model, this.frequencyPenalty, this.logitBias, this.maxTokens, @@ -18,40 +44,14 @@ class ChatOpenAIOptions extends ChatModelOptions { this.stop, this.temperature, this.topP, + super.tools, + super.toolChoice, this.parallelToolCalls, this.serviceTier, this.user, - super.tools, - super.toolChoice, super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'gpt-3.5-turbo'). - /// - /// Available models: - /// - `gpt-4` - /// - `gpt-4-32k` - /// - `gpt-4-32k-0314` - /// - `gpt-4-32k-0613` - /// - `gpt-4-0125-preview` - /// - `gpt-4-0314` - /// - `gpt-4-0613` - /// - `gpt-4-1106-preview` - /// - `gpt-4-turbo` - /// - `gpt-4-turbo-2024-04-09` - /// - `gpt-4-turbo-preview` - /// - `gpt-4-vision-preview` - /// - `gpt-4o` - /// - `gpt-4o-2024-05-13` - /// - `gpt-4o-mini` - /// - `gpt-4o-mini-2024-07-18` - /// - `gpt-3.5-turbo` - /// - `gpt-3.5-turbo-16k` - /// - /// Mind that the list may be outdated. - /// See https://platform.openai.com/docs/models for the latest list. - final String? model; - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on /// their existing frequency in the text so far, decreasing the model's /// likelihood to repeat the same line verbatim. @@ -145,22 +145,23 @@ class ChatOpenAIOptions extends ChatModelOptions { /// Creates a copy of this [ChatOpenAIOptions] object with the given fields /// replaced with the new values. ChatOpenAIOptions copyWith({ - final String? model, - final double? frequencyPenalty, - final Map? logitBias, - final int? maxTokens, - final int? n, - final double? presencePenalty, - final ChatOpenAIResponseFormat? responseFormat, - final int? seed, - final List? stop, - final double? temperature, - final double? topP, - final bool? parallelToolCalls, - final ChatOpenAIServiceTier? serviceTier, - final String? user, - final List? tools, - final ChatToolChoice? toolChoice, + String? model, + double? frequencyPenalty, + Map? logitBias, + int? maxTokens, + int? n, + double? presencePenalty, + ChatOpenAIResponseFormat? responseFormat, + int? seed, + List? stop, + double? temperature, + double? topP, + List? tools, + ChatToolChoice? toolChoice, + bool? parallelToolCalls, + ChatOpenAIServiceTier? serviceTier, + String? user, + int? concurrencyLimit, }) { return ChatOpenAIOptions( model: model ?? this.model, @@ -174,13 +175,59 @@ class ChatOpenAIOptions extends ChatModelOptions { stop: stop ?? this.stop, temperature: temperature ?? this.temperature, topP: topP ?? this.topP, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls, serviceTier: serviceTier ?? this.serviceTier, user: user ?? this.user, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatOpenAIOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + frequencyPenalty == other.frequencyPenalty && + const MapEquality() + .equals(logitBias, other.logitBias) && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + responseFormat == other.responseFormat && + seed == other.seed && + const ListEquality().equals(stop, other.stop) && + temperature == other.temperature && + topP == other.topP && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + parallelToolCalls == other.parallelToolCalls && + serviceTier == other.serviceTier && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + responseFormat.hashCode ^ + seed.hashCode ^ + const ListEquality().hash(stop) ^ + temperature.hashCode ^ + topP.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + parallelToolCalls.hashCode ^ + serviceTier.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_openai_response_format} diff --git a/packages/langchain_openai/lib/src/llms/openai.dart b/packages/langchain_openai/lib/src/llms/openai.dart index 9471acfc..aed0e9e9 100644 --- a/packages/langchain_openai/lib/src/llms/openai.dart +++ b/packages/langchain_openai/lib/src/llms/openai.dart @@ -1,3 +1,5 @@ +import 'dart:math'; + import 'package:http/http.dart' as http; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/prompts.dart'; @@ -186,8 +188,9 @@ class OpenAI extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OpenAIOptions( - model: 'gpt-3.5-turbo-instruct', - maxTokens: 256, + model: defaultModel, + maxTokens: defaultMaxTokens, + concurrencyLimit: defaultConcurrencyLimit, ), this.encoding, }) : _client = OpenAIClient( @@ -228,6 +231,15 @@ class OpenAI extends BaseLLM { @override String get modelType => 'openai'; + /// The default model to use unless another is specified. + static const defaultModel = 'gpt-3.5-turbo-instruct'; + + /// The default max tokens to use unless another is specified. + static const defaultMaxTokens = 256; + + /// The default concurrency limit to use unless another is specified. + static const defaultConcurrencyLimit = 20; + @override Future invoke( final PromptValue input, { @@ -259,7 +271,8 @@ class OpenAI extends BaseLLM { // Otherwise, we can batch the calls to the API final finalOptions = options?.first ?? defaultOptions; - final concurrencyLimit = finalOptions.concurrencyLimit; + final concurrencyLimit = + min(finalOptions.concurrencyLimit, defaultConcurrencyLimit); var index = 0; final results = []; @@ -302,7 +315,7 @@ class OpenAI extends BaseLLM { }) { return CreateCompletionRequest( model: CompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), prompt: CompletionPrompt.listString(prompts), bestOf: options?.bestOf ?? defaultOptions.bestOf, @@ -310,7 +323,8 @@ class OpenAI extends BaseLLM { options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, logitBias: options?.logitBias ?? defaultOptions.logitBias, logprobs: options?.logprobs ?? defaultOptions.logprobs, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + maxTokens: + options?.maxTokens ?? defaultOptions.maxTokens ?? defaultMaxTokens, n: options?.n ?? defaultOptions.n, presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, @@ -340,7 +354,7 @@ class OpenAI extends BaseLLM { final encoding = this.encoding != null ? getEncoding(this.encoding!) : encodingForModel( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ); return encoding.encode(promptValue.toString()); } diff --git a/packages/langchain_openai/lib/src/llms/types.dart b/packages/langchain_openai/lib/src/llms/types.dart index 6869a4c4..7f8da471 100644 --- a/packages/langchain_openai/lib/src/llms/types.dart +++ b/packages/langchain_openai/lib/src/llms/types.dart @@ -4,17 +4,24 @@ import 'package:meta/meta.dart'; /// {@template openai_options} /// Options to pass into the OpenAI LLM. +/// +/// Available models: +/// - `gpt-3.5-turbo-instruct` +/// - `davinci-002` +/// - `babbage-002` +/// Mind that the list may be outdated. +/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} @immutable class OpenAIOptions extends LLMOptions { /// {@macro openai_options} const OpenAIOptions({ - this.model = 'gpt-3.5-turbo-instruct', + super.model, this.bestOf, this.frequencyPenalty, this.logitBias, this.logprobs, - this.maxTokens = 256, + this.maxTokens, this.n, this.presencePenalty, this.seed, @@ -23,20 +30,9 @@ class OpenAIOptions extends LLMOptions { this.temperature, this.topP, this.user, - super.concurrencyLimit = 20, + super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'gpt-3.5-turbo-instruct'). - /// - /// Available models: - /// - `gpt-3.5-turbo-instruct` - /// - `davinci-002` - /// - `babbage-002` - /// - /// Mind that the list may be outdated. - /// See https://platform.openai.com/docs/models for the latest list. - final String? model; - /// Generates best_of completions server-side and returns the "best" /// (the one with the highest log probability per token). /// @@ -128,20 +124,21 @@ class OpenAIOptions extends LLMOptions { /// Creates a copy of this [OpenAIOptions] object with the given fields /// replaced with the new values. OpenAIOptions copyWith({ - final String? model, - final int? bestOf, - final double? frequencyPenalty, - final Map? logitBias, - final int? logprobs, - final int? maxTokens, - final int? n, - final double? presencePenalty, - final int? seed, - final List? stop, - final String? suffix, - final double? temperature, - final double? topP, - final String? user, + String? model, + int? bestOf, + double? frequencyPenalty, + Map? logitBias, + int? logprobs, + int? maxTokens, + int? n, + double? presencePenalty, + int? seed, + List? stop, + String? suffix, + double? temperature, + double? topP, + String? user, + int? concurrencyLimit, }) { return OpenAIOptions( model: model ?? this.model, @@ -158,42 +155,48 @@ class OpenAIOptions extends LLMOptions { temperature: temperature ?? this.temperature, topP: topP ?? this.topP, user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, ); } @override - bool operator ==(covariant final OpenAIOptions other) => - identical(this, other) || - runtimeType == other.runtimeType && - model == other.model && - bestOf == other.bestOf && - frequencyPenalty == other.frequencyPenalty && - const MapEquality().equals(logitBias, other.logitBias) && - logprobs == other.logprobs && - maxTokens == other.maxTokens && - n == other.n && - presencePenalty == other.presencePenalty && - seed == other.seed && - stop == other.stop && - suffix == other.suffix && - temperature == other.temperature && - topP == other.topP && - user == other.user; + bool operator ==(covariant final OpenAIOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + bestOf == other.bestOf && + frequencyPenalty == other.frequencyPenalty && + const MapEquality() + .equals(logitBias, other.logitBias) && + logprobs == other.logprobs && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + seed == other.seed && + const ListEquality().equals(stop, other.stop) && + suffix == other.suffix && + temperature == other.temperature && + topP == other.topP && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } @override - int get hashCode => - model.hashCode ^ - bestOf.hashCode ^ - frequencyPenalty.hashCode ^ - const MapEquality().hash(logitBias) ^ - logprobs.hashCode ^ - maxTokens.hashCode ^ - n.hashCode ^ - presencePenalty.hashCode ^ - seed.hashCode ^ - stop.hashCode ^ - suffix.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - user.hashCode; + int get hashCode { + return model.hashCode ^ + bestOf.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + logprobs.hashCode ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + seed.hashCode ^ + const ListEquality().hash(stop) ^ + suffix.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index b1080986..a94ea862 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -53,7 +53,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-3.5-turbo-0613', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/anyscale_test.dart b/packages/langchain_openai/test/chat_models/anyscale_test.dart index 1a2fdef1..f0a99e88 100644 --- a/packages/langchain_openai/test/chat_models/anyscale_test.dart +++ b/packages/langchain_openai/test/chat_models/anyscale_test.dart @@ -30,8 +30,6 @@ void main() { 'codellama/CodeLlama-34b-Instruct-hf', 'mistralai/Mistral-7B-Instruct-v0.1', 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'HuggingFaceH4/zephyr-7b-beta', - 'Open-Orca/Mistral-7B-OpenOrca', ]; for (final model in models) { final res = await chatModel.invoke( @@ -67,8 +65,6 @@ void main() { 'codellama/CodeLlama-34b-Instruct-hf', 'mistralai/Mistral-7B-Instruct-v0.1', 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'HuggingFaceH4/zephyr-7b-beta', - 'Open-Orca/Mistral-7B-OpenOrca', ]; for (final model in models) { final stream = chatModel.stream( diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 6268a77b..7c2d95d1 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -208,7 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', From 52e5b006431d6ccfa603673c1c6ba4a79f44cf3e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 18:21:45 +0200 Subject: [PATCH 193/251] feat: Implement additive options merging for cascade bind calls (#500) --- .../lib/src/chat_models/types.dart | 39 ++-- .../lib/src/tools/tavily/types.dart | 50 +++++ .../langchain_core/lib/src/chains/types.dart | 2 +- .../lib/src/chat_models/fake.dart | 179 ++++++++++++++++-- .../lib/src/chat_models/types.dart | 3 +- .../lib/src/langchain/types.dart | 2 +- .../lib/src/language_models/types.dart | 2 +- .../langchain_core/lib/src/llms/types.dart | 2 +- .../lib/src/output_parsers/types.dart | 4 +- .../lib/src/retrievers/types.dart | 4 +- .../lib/src/runnables/binding.dart | 4 +- .../lib/src/runnables/types.dart | 27 +++ .../langchain_core/lib/src/tools/types.dart | 4 +- .../test/runnables/binding_test.dart | 26 +++ .../lib/src/chat_models/vertex_ai/types.dart | 23 ++- .../lib/src/chat_models/google_ai/types.dart | 22 ++- .../lib/src/chat_models/vertex_ai/types.dart | 19 +- .../lib/src/llms/vertex_ai/types.dart | 17 +- .../lib/src/chat_models/types.dart | 16 +- .../src/chat_models/chat_ollama/types.dart | 42 +++- .../chat_models/chat_ollama_tools/types.dart | 25 ++- .../langchain_ollama/lib/src/llms/types.dart | 46 ++++- .../lib/src/chat_models/types.dart | 60 ++++-- .../langchain_openai/lib/src/llms/types.dart | 54 ++++-- .../langchain_openai/lib/src/tools/types.dart | 59 ++++++ .../test/chat_models/chat_openai_test.dart | 140 ++++++++++---- 26 files changed, 733 insertions(+), 138 deletions(-) diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart index 98069444..f91abdb3 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/types.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -86,19 +86,18 @@ class ChatAnthropicOptions extends ChatModelOptions { /// name, email address, or phone number. final String? userId; - /// Creates a copy of this [ChatAnthropicOptions] object with the given fields - /// replaced with the new values. + @override ChatAnthropicOptions copyWith({ - String? model, - int? maxTokens, - List? stopSequences, - double? temperature, - int? topK, - double? topP, - String? userId, - List? tools, - ChatToolChoice? toolChoice, - int? concurrencyLimit, + final String? model, + final int? maxTokens, + final List? stopSequences, + final double? temperature, + final int? topK, + final double? topP, + final String? userId, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatAnthropicOptions( model: model ?? this.model, @@ -114,6 +113,22 @@ class ChatAnthropicOptions extends ChatModelOptions { ); } + @override + ChatAnthropicOptions merge(covariant final ChatAnthropicOptions? other) { + return copyWith( + model: other?.model, + maxTokens: other?.maxTokens, + stopSequences: other?.stopSequences, + temperature: other?.temperature, + topK: other?.topK, + topP: other?.topP, + userId: other?.userId, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatAnthropicOptions other) { return model == other.model && diff --git a/packages/langchain_community/lib/src/tools/tavily/types.dart b/packages/langchain_community/lib/src/tools/tavily/types.dart index d6dc9134..872723cf 100644 --- a/packages/langchain_community/lib/src/tools/tavily/types.dart +++ b/packages/langchain_community/lib/src/tools/tavily/types.dart @@ -108,6 +108,7 @@ class TavilySearchResultsToolOptions extends ToolOptions { /// {@template tavily_answer_tool_options} /// Generation options to pass into the [TavilyAnswerTool]. /// {@endtemplate} +@immutable class TavilyAnswerToolOptions extends ToolOptions { /// {@macro tavily_answer_tool_options} const TavilyAnswerToolOptions({ @@ -115,6 +116,7 @@ class TavilyAnswerToolOptions extends ToolOptions { this.searchDepth = TavilySearchDepth.basic, this.includeDomains, this.excludeDomains, + super.concurrencyLimit, }); /// The number of maximum search results to return. @@ -128,4 +130,52 @@ class TavilyAnswerToolOptions extends ToolOptions { /// A list of domains to specifically exclude from the search results. final List? excludeDomains; + + @override + TavilyAnswerToolOptions copyWith({ + final int? maxResults, + final TavilySearchDepth? searchDepth, + final List? includeDomains, + final List? excludeDomains, + final int? concurrencyLimit, + }) { + return TavilyAnswerToolOptions( + maxResults: maxResults ?? this.maxResults, + searchDepth: searchDepth ?? this.searchDepth, + includeDomains: includeDomains ?? this.includeDomains, + excludeDomains: excludeDomains ?? this.excludeDomains, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + TavilyAnswerToolOptions merge( + covariant final TavilyAnswerToolOptions? other, + ) { + return copyWith( + maxResults: other?.maxResults, + searchDepth: other?.searchDepth, + includeDomains: other?.includeDomains, + excludeDomains: other?.excludeDomains, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final TavilyAnswerToolOptions other) { + return maxResults == other.maxResults && + searchDepth == other.searchDepth && + includeDomains == other.includeDomains && + excludeDomains == other.excludeDomains && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return maxResults.hashCode ^ + searchDepth.hashCode ^ + includeDomains.hashCode ^ + excludeDomains.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_core/lib/src/chains/types.dart b/packages/langchain_core/lib/src/chains/types.dart index f677381e..e76d876c 100644 --- a/packages/langchain_core/lib/src/chains/types.dart +++ b/packages/langchain_core/lib/src/chains/types.dart @@ -6,7 +6,7 @@ import '../langchain/types.dart'; typedef ChainValues = Map; /// {@template chain_options} -/// Options to pass to a chain. +/// Options to pass to the chain. /// {@endtemplate} @immutable class ChainOptions extends BaseLangChainOptions { diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index f465223d..79c44d1b 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -1,3 +1,5 @@ +import 'package:collection/collection.dart'; + import '../../language_models.dart'; import '../prompts/types.dart'; import 'base.dart'; @@ -7,11 +9,12 @@ import 'types.dart'; /// Fake Chat Model for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeChatModel extends SimpleChatModel { +class FakeChatModel extends BaseChatModel { /// {@macro fake_list_llm} FakeChatModel({ required this.responses, - }) : super(defaultOptions: const ChatModelOptions()); + super.defaultOptions = const FakeChatModelOptions(), + }); /// Responses to return in order when called. final List responses; @@ -22,17 +25,28 @@ class FakeChatModel extends SimpleChatModel { String get modelType => 'fake-chat-model'; @override - Future callInternal( - final List messages, { - final ChatModelOptions? options, - }) { - return Future.value(responses[_i++ % responses.length]); + Future invoke( + final PromptValue input, { + final FakeChatModelOptions? options, + }) async { + final text = responses[_i++ % responses.length]; + final message = AIChatMessage(content: text); + return ChatResult( + id: '1', + output: message, + finishReason: FinishReason.unspecified, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, + usage: const LanguageModelUsage(), + ); } @override Stream stream( final PromptValue input, { - final ChatModelOptions? options, + final FakeChatModelOptions? options, }) { final res = responses[_i++ % responses.length].split(''); return Stream.fromIterable(res).map( @@ -40,7 +54,10 @@ class FakeChatModel extends SimpleChatModel { id: 'fake-chat-model', output: AIChatMessage(content: char), finishReason: FinishReason.stop, - metadata: const {}, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, usage: const LanguageModelUsage(), streaming: true, ), @@ -60,30 +77,96 @@ class FakeChatModel extends SimpleChatModel { } } -/// {@template fake_echo_llm} +/// {@template fake_chat_model_options} +/// Fake Chat Model Options for testing. +/// {@endtemplate} +class FakeChatModelOptions extends ChatModelOptions { + /// {@macro fake_chat_model_options} + const FakeChatModelOptions({ + super.model, + this.metadata, + super.concurrencyLimit, + }); + + /// Metadata. + final Map? metadata; + + @override + FakeChatModelOptions copyWith({ + final String? model, + final Map? metadata, + final int? concurrencyLimit, + }) { + return FakeChatModelOptions( + model: model ?? this.model, + metadata: metadata ?? this.metadata, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + FakeChatModelOptions merge( + covariant final FakeChatModelOptions? other, + ) { + return copyWith( + model: other?.model, + metadata: other?.metadata, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final FakeChatModelOptions other) { + return model == other.model && + const MapEquality().equals(metadata, other.metadata) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + const MapEquality().hash(metadata) ^ + concurrencyLimit.hashCode; + } +} + +/// {@template fake_echo_chat_model} /// Fake Chat Model for testing. /// It just returns the content of the last message of the prompt /// or streams it char by char. /// {@endtemplate} -class FakeEchoChatModel extends SimpleChatModel { - /// {@macro fake_echo_llm} - const FakeEchoChatModel() : super(defaultOptions: const ChatModelOptions()); +class FakeEchoChatModel extends BaseChatModel { + /// {@macro fake_echo_chat_model} + const FakeEchoChatModel({ + super.defaultOptions = const FakeEchoChatModelOptions(), + }); @override String get modelType => 'fake-echo-chat-model'; @override - Future callInternal( - final List messages, { - final ChatModelOptions? options, - }) { - return Future.value(messages.last.contentAsString); + Future invoke( + final PromptValue input, { + final FakeEchoChatModelOptions? options, + }) async { + final text = input.toChatMessages().last.contentAsString; + final message = AIChatMessage(content: text); + return ChatResult( + id: '1', + output: message, + finishReason: FinishReason.unspecified, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, + usage: const LanguageModelUsage(), + ); } @override Stream stream( final PromptValue input, { - final ChatModelOptions? options, + final FakeEchoChatModelOptions? options, }) { final prompt = input.toChatMessages().first.contentAsString.split(''); return Stream.fromIterable(prompt).map( @@ -91,7 +174,10 @@ class FakeEchoChatModel extends SimpleChatModel { id: 'fake-echo-chat-model', output: AIChatMessage(content: char), finishReason: FinishReason.stop, - metadata: const {}, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, usage: const LanguageModelUsage(), streaming: true, ), @@ -110,3 +196,56 @@ class FakeEchoChatModel extends SimpleChatModel { .toList(growable: false); } } + +/// {@template fake_chat_model_options} +/// Fake Echo Chat Model Options for testing. +/// {@endtemplate} +class FakeEchoChatModelOptions extends ChatModelOptions { + /// {@macro fake_chat_model_options} + const FakeEchoChatModelOptions({ + super.model, + this.metadata, + super.concurrencyLimit, + }); + + /// Metadata. + final Map? metadata; + + @override + FakeEchoChatModelOptions copyWith({ + final String? model, + final Map? metadata, + final int? concurrencyLimit, + }) { + return FakeEchoChatModelOptions( + model: model ?? this.model, + metadata: metadata ?? this.metadata, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + FakeEchoChatModelOptions merge( + covariant final FakeEchoChatModelOptions? other, + ) { + return copyWith( + model: other?.model, + metadata: other?.metadata, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final FakeEchoChatModelOptions other) { + return model == other.model && + const MapEquality().equals(metadata, other.metadata) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + const MapEquality().hash(metadata) ^ + concurrencyLimit.hashCode; + } +} diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index e9b788c7..f9c2aff3 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -7,7 +7,8 @@ import '../tools/base.dart'; /// {@template chat_model_options} /// Generation options to pass into the Chat Model. /// {@endtemplate} -class ChatModelOptions extends LanguageModelOptions { +@immutable +abstract class ChatModelOptions extends LanguageModelOptions { /// {@macro chat_model_options} const ChatModelOptions({ super.model, diff --git a/packages/langchain_core/lib/src/langchain/types.dart b/packages/langchain_core/lib/src/langchain/types.dart index 8dabca52..091429d6 100644 --- a/packages/langchain_core/lib/src/langchain/types.dart +++ b/packages/langchain_core/lib/src/langchain/types.dart @@ -3,7 +3,7 @@ import 'package:meta/meta.dart'; import '../runnables/types.dart'; /// {@template base_lang_chain_options} -/// Base class for LangChain components' options. +/// Base options class for LangChain components. /// {@endtemplate} @immutable class BaseLangChainOptions extends RunnableOptions { diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index c2e6df11..3b52ee63 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -4,7 +4,7 @@ import 'package:meta/meta.dart'; import '../langchain/types.dart'; /// {@template language_model_options} -/// Generation options to pass into the language model. +/// Options to pass into the language model. /// {@endtemplate} @immutable abstract class LanguageModelOptions extends BaseLangChainOptions { diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index 47b98285..02a506de 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -3,7 +3,7 @@ import 'package:meta/meta.dart'; import '../language_models/types.dart'; /// {@template llm_options} -/// Generation options to pass into the LLM. +/// Options to pass into the LLM. /// {@endtemplate} @immutable class LLMOptions extends LanguageModelOptions { diff --git a/packages/langchain_core/lib/src/output_parsers/types.dart b/packages/langchain_core/lib/src/output_parsers/types.dart index 460840fa..9e8906b7 100644 --- a/packages/langchain_core/lib/src/output_parsers/types.dart +++ b/packages/langchain_core/lib/src/output_parsers/types.dart @@ -60,7 +60,9 @@ class ParsedToolCall { } @override - int get hashCode => id.hashCode ^ name.hashCode ^ arguments.hashCode; + int get hashCode { + return id.hashCode ^ name.hashCode ^ arguments.hashCode; + } @override String toString() { diff --git a/packages/langchain_core/lib/src/retrievers/types.dart b/packages/langchain_core/lib/src/retrievers/types.dart index 4ed82147..a80412e2 100644 --- a/packages/langchain_core/lib/src/retrievers/types.dart +++ b/packages/langchain_core/lib/src/retrievers/types.dart @@ -9,7 +9,9 @@ import '../vector_stores/types.dart'; @immutable class RetrieverOptions extends BaseLangChainOptions { /// {@macro retriever_options} - const RetrieverOptions(); + const RetrieverOptions({ + super.concurrencyLimit, + }); } /// {@template vector_store_retriever_options} diff --git a/packages/langchain_core/lib/src/runnables/binding.dart b/packages/langchain_core/lib/src/runnables/binding.dart index a1b9f907..1bd1bee4 100644 --- a/packages/langchain_core/lib/src/runnables/binding.dart +++ b/packages/langchain_core/lib/src/runnables/binding.dart @@ -60,7 +60,9 @@ class RunnableBinding? safetySettings; - /// Creates a copy of this [ChatFirebaseVertexAIOptions] object with the given fields - /// replaced with the new values. + @override ChatFirebaseVertexAIOptions copyWith({ final String? model, final double? topP, @@ -122,6 +121,26 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { ); } + @override + ChatFirebaseVertexAIOptions merge( + covariant final ChatFirebaseVertexAIOptions? other, + ) { + return copyWith( + model: other?.model, + topP: other?.topP, + topK: other?.topK, + candidateCount: other?.candidateCount, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + stopSequences: other?.stopSequences, + responseMimeType: other?.responseMimeType, + safetySettings: other?.safetySettings, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatFirebaseVertexAIOptions other) { return model == other.model && diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index 8c4bff41..4c2f4063 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -115,8 +115,7 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// the default safety setting for that category. final List? safetySettings; - /// Creates a copy of this [ChatGoogleGenerativeAIOptions] object with the given fields - /// replaced with the new values. + @override ChatGoogleGenerativeAIOptions copyWith({ final String? model, final double? topP, @@ -145,6 +144,25 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { ); } + @override + ChatGoogleGenerativeAIOptions merge( + covariant final ChatGoogleGenerativeAIOptions? other, + ) { + return copyWith( + model: other?.model, + topP: other?.topP, + topK: other?.topK, + candidateCount: other?.candidateCount, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + stopSequences: other?.stopSequences, + safetySettings: other?.safetySettings, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) { return model == other.model && diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index c0642867..019ab64e 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -97,8 +97,7 @@ class ChatVertexAIOptions extends ChatModelOptions { /// List of messages to the model to learn how to respond to the conversation. final List? examples; - /// Creates a copy of this [ChatVertexAIOptions] object with the given fields - /// replaced with the new values. + @override ChatVertexAIOptions copyWith({ final String? publisher, final String? model, @@ -125,6 +124,22 @@ class ChatVertexAIOptions extends ChatModelOptions { ); } + @override + ChatVertexAIOptions merge(covariant ChatVertexAIOptions? other) { + return copyWith( + publisher: other?.publisher, + model: other?.model, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + topP: other?.topP, + topK: other?.topK, + stopSequences: other?.stopSequences, + candidateCount: other?.candidateCount, + examples: other?.examples, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatVertexAIOptions other) { return publisher == other.publisher && diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart index f9eee704..e11589c4 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart @@ -93,8 +93,7 @@ class VertexAIOptions extends LLMOptions { /// Range: `[1–8]` final int? candidateCount; - /// Creates a copy of this [VertexAIOptions] object with the given fields - /// replaced with the new values. + @override VertexAIOptions copyWith({ final String? publisher, final String? model, @@ -120,6 +119,20 @@ class VertexAIOptions extends LLMOptions { } @override + VertexAIOptions merge(covariant final VertexAIOptions? other) { + return copyWith( + publisher: other?.publisher, + model: other?.model, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + topP: other?.topP, + topK: other?.topK, + stopSequences: other?.stopSequences, + candidateCount: other?.candidateCount, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final VertexAIOptions other) { return publisher == other.publisher && diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index aa2f9537..e6ba07b9 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -46,8 +46,7 @@ class ChatMistralAIOptions extends ChatModelOptions { /// If set, different calls will generate deterministic results. final int? randomSeed; - /// Creates a copy of this [ChatMistralAIOptions] object with the given fields - /// replaced with the new values. + @override ChatMistralAIOptions copyWith({ final String? model, final double? temperature, @@ -68,6 +67,19 @@ class ChatMistralAIOptions extends ChatModelOptions { ); } + @override + ChatMistralAIOptions merge(covariant ChatMistralAIOptions? other) { + return copyWith( + model: other?.model, + temperature: other?.temperature, + topP: other?.topP, + maxTokens: other?.maxTokens, + safePrompt: other?.safePrompt, + randomSeed: other?.randomSeed, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatMistralAIOptions other) { return model == other.model && diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 971f259c..137d0bdf 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -201,8 +201,7 @@ class ChatOllamaOptions extends ChatModelOptions { /// the logical number of cores). final int? numThread; - /// Creates a copy of this [ChatOllamaOptions] object with the given fields - /// replaced with the new values. + @override ChatOllamaOptions copyWith({ final String? model, final OllamaResponseFormat? format, @@ -275,6 +274,45 @@ class ChatOllamaOptions extends ChatModelOptions { ); } + @override + ChatOllamaOptions merge(covariant final ChatOllamaOptions? other) { + return copyWith( + model: other?.model, + format: other?.format, + keepAlive: other?.keepAlive, + numKeep: other?.numKeep, + seed: other?.seed, + numPredict: other?.numPredict, + topK: other?.topK, + topP: other?.topP, + tfsZ: other?.tfsZ, + typicalP: other?.typicalP, + repeatLastN: other?.repeatLastN, + temperature: other?.temperature, + repeatPenalty: other?.repeatPenalty, + presencePenalty: other?.presencePenalty, + frequencyPenalty: other?.frequencyPenalty, + mirostat: other?.mirostat, + mirostatTau: other?.mirostatTau, + mirostatEta: other?.mirostatEta, + penalizeNewline: other?.penalizeNewline, + stop: other?.stop, + numa: other?.numa, + numCtx: other?.numCtx, + numBatch: other?.numBatch, + numGpu: other?.numGpu, + mainGpu: other?.mainGpu, + lowVram: other?.lowVram, + f16KV: other?.f16KV, + logitsAll: other?.logitsAll, + vocabOnly: other?.vocabOnly, + useMmap: other?.useMmap, + useMlock: other?.useMlock, + numThread: other?.numThread, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatOllamaOptions other) { return model == other.model && diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart index f10f1186..7ad2615a 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart @@ -19,6 +19,7 @@ class ChatOllamaToolsOptions extends ChatModelOptions { super.tools, super.toolChoice, this.toolsSystemPromptTemplate, + super.concurrencyLimit, }); /// [ChatOllamaOptions] to pass into Ollama. @@ -61,13 +62,13 @@ Example response format: Ensure your response is valid JSON and follows this exact format. '''; - /// Creates a copy of this [ChatOllamaToolsOptions] object with the given - /// fields replaced with the new values. + @override ChatOllamaToolsOptions copyWith({ - ChatOllamaOptions? options, - List? tools, - ChatToolChoice? toolChoice, - String? toolsSystemPromptTemplate, + final ChatOllamaOptions? options, + final List? tools, + final ChatToolChoice? toolChoice, + final String? toolsSystemPromptTemplate, + final int? concurrencyLimit, }) { return ChatOllamaToolsOptions( options: options ?? this.options, @@ -75,6 +76,18 @@ Ensure your response is valid JSON and follows this exact format. toolChoice: toolChoice ?? this.toolChoice, toolsSystemPromptTemplate: toolsSystemPromptTemplate ?? this.toolsSystemPromptTemplate, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + ChatOllamaToolsOptions merge(covariant final ChatOllamaToolsOptions? other) { + return copyWith( + options: other?.options, + tools: other?.tools, + toolChoice: other?.toolChoice, + toolsSystemPromptTemplate: other?.toolsSystemPromptTemplate, + concurrencyLimit: other?.concurrencyLimit, ); } diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index 494e759e..ecad337d 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -222,8 +222,7 @@ class OllamaOptions extends LLMOptions { /// the logical number of cores). final int? numThread; - /// Creates a copy of this [OllamaOptions] object with the given fields - /// replaced with the new values. + @override OllamaOptions copyWith({ final String? model, final String? system, @@ -304,6 +303,49 @@ class OllamaOptions extends LLMOptions { ); } + @override + OllamaOptions merge(covariant final OllamaOptions? other) { + return copyWith( + model: other?.model, + system: other?.system, + template: other?.template, + context: other?.context, + format: other?.format, + raw: other?.raw, + keepAlive: other?.keepAlive, + numKeep: other?.numKeep, + seed: other?.seed, + numPredict: other?.numPredict, + topK: other?.topK, + topP: other?.topP, + tfsZ: other?.tfsZ, + typicalP: other?.typicalP, + repeatLastN: other?.repeatLastN, + temperature: other?.temperature, + repeatPenalty: other?.repeatPenalty, + presencePenalty: other?.presencePenalty, + frequencyPenalty: other?.frequencyPenalty, + mirostat: other?.mirostat, + mirostatTau: other?.mirostatTau, + mirostatEta: other?.mirostatEta, + penalizeNewline: other?.penalizeNewline, + stop: other?.stop, + numa: other?.numa, + numCtx: other?.numCtx, + numBatch: other?.numBatch, + numGpu: other?.numGpu, + mainGpu: other?.mainGpu, + lowVram: other?.lowVram, + f16KV: other?.f16KV, + logitsAll: other?.logitsAll, + vocabOnly: other?.vocabOnly, + useMmap: other?.useMmap, + useMlock: other?.useMlock, + numThread: other?.numThread, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final OllamaOptions other) { return identical(this, other) || diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index ed53c65c..9712ff59 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -142,26 +142,25 @@ class ChatOpenAIOptions extends ChatModelOptions { /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - /// Creates a copy of this [ChatOpenAIOptions] object with the given fields - /// replaced with the new values. + @override ChatOpenAIOptions copyWith({ - String? model, - double? frequencyPenalty, - Map? logitBias, - int? maxTokens, - int? n, - double? presencePenalty, - ChatOpenAIResponseFormat? responseFormat, - int? seed, - List? stop, - double? temperature, - double? topP, - List? tools, - ChatToolChoice? toolChoice, - bool? parallelToolCalls, - ChatOpenAIServiceTier? serviceTier, - String? user, - int? concurrencyLimit, + final String? model, + final double? frequencyPenalty, + final Map? logitBias, + final int? maxTokens, + final int? n, + final double? presencePenalty, + final ChatOpenAIResponseFormat? responseFormat, + final int? seed, + final List? stop, + final double? temperature, + final double? topP, + final List? tools, + final ChatToolChoice? toolChoice, + final bool? parallelToolCalls, + final ChatOpenAIServiceTier? serviceTier, + final String? user, + final int? concurrencyLimit, }) { return ChatOpenAIOptions( model: model ?? this.model, @@ -184,6 +183,29 @@ class ChatOpenAIOptions extends ChatModelOptions { ); } + @override + ChatOpenAIOptions merge(covariant final ChatOpenAIOptions? other) { + return copyWith( + model: other?.model, + frequencyPenalty: other?.frequencyPenalty, + logitBias: other?.logitBias, + maxTokens: other?.maxTokens, + n: other?.n, + presencePenalty: other?.presencePenalty, + responseFormat: other?.responseFormat, + seed: other?.seed, + stop: other?.stop, + temperature: other?.temperature, + topP: other?.topP, + tools: other?.tools, + toolChoice: other?.toolChoice, + parallelToolCalls: other?.parallelToolCalls, + serviceTier: other?.serviceTier, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatOpenAIOptions other) { return identical(this, other) || diff --git a/packages/langchain_openai/lib/src/llms/types.dart b/packages/langchain_openai/lib/src/llms/types.dart index 7f8da471..a6bc2ee2 100644 --- a/packages/langchain_openai/lib/src/llms/types.dart +++ b/packages/langchain_openai/lib/src/llms/types.dart @@ -121,24 +121,23 @@ class OpenAIOptions extends LLMOptions { /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - /// Creates a copy of this [OpenAIOptions] object with the given fields - /// replaced with the new values. + @override OpenAIOptions copyWith({ - String? model, - int? bestOf, - double? frequencyPenalty, - Map? logitBias, - int? logprobs, - int? maxTokens, - int? n, - double? presencePenalty, - int? seed, - List? stop, - String? suffix, - double? temperature, - double? topP, - String? user, - int? concurrencyLimit, + final String? model, + final int? bestOf, + final double? frequencyPenalty, + final Map? logitBias, + final int? logprobs, + final int? maxTokens, + final int? n, + final double? presencePenalty, + final int? seed, + final List? stop, + final String? suffix, + final double? temperature, + final double? topP, + final String? user, + final int? concurrencyLimit, }) { return OpenAIOptions( model: model ?? this.model, @@ -159,6 +158,27 @@ class OpenAIOptions extends LLMOptions { ); } + @override + OpenAIOptions merge(covariant final OpenAIOptions? other) { + return copyWith( + model: other?.model, + bestOf: other?.bestOf, + frequencyPenalty: other?.frequencyPenalty, + logitBias: other?.logitBias, + logprobs: other?.logprobs, + maxTokens: other?.maxTokens, + n: other?.n, + presencePenalty: other?.presencePenalty, + seed: other?.seed, + stop: other?.stop, + suffix: other?.suffix, + temperature: other?.temperature, + topP: other?.topP, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final OpenAIOptions other) { return identical(this, other) || diff --git a/packages/langchain_openai/lib/src/tools/types.dart b/packages/langchain_openai/lib/src/tools/types.dart index 3b049dc6..086ba0f5 100644 --- a/packages/langchain_openai/lib/src/tools/types.dart +++ b/packages/langchain_openai/lib/src/tools/types.dart @@ -1,10 +1,12 @@ import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; import 'dall_e.dart'; /// {@template open_ai_dall_e_tool_options} /// Generation options to pass into the [OpenAIDallETool]. /// {@endtemplate} +@immutable class OpenAIDallEToolOptions extends ToolOptions { /// {@macro open_ai_dall_e_tool_options} const OpenAIDallEToolOptions({ @@ -14,6 +16,7 @@ class OpenAIDallEToolOptions extends ToolOptions { this.size = ImageSize.v1024x1024, this.style = ImageStyle.vivid, this.user, + super.concurrencyLimit, }); /// ID of the model to use (e.g. `dall-e-2` or 'dall-e-3'). @@ -63,4 +66,60 @@ class OpenAIDallEToolOptions extends ToolOptions { /// /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; + + @override + OpenAIDallEToolOptions copyWith({ + final String? model, + final ImageQuality? quality, + final ImageResponseFormat? responseFormat, + final ImageSize? size, + final ImageStyle? style, + final String? user, + final int? concurrencyLimit, + }) { + return OpenAIDallEToolOptions( + model: model ?? this.model, + quality: quality ?? this.quality, + responseFormat: responseFormat ?? this.responseFormat, + size: size ?? this.size, + style: style ?? this.style, + user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + OpenAIDallEToolOptions merge(covariant final OpenAIDallEToolOptions? other) { + return copyWith( + model: other?.model, + quality: other?.quality, + responseFormat: other?.responseFormat, + size: other?.size, + style: other?.style, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final OpenAIDallEToolOptions other) { + return model == other.model && + quality == other.quality && + responseFormat == other.responseFormat && + size == other.size && + style == other.style && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + quality.hashCode ^ + responseFormat.hashCode ^ + size.hashCode ^ + style.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 7c2d95d1..edb42b2e 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -118,36 +118,36 @@ void main() { expect(res.content, isNotEmpty); }); + const getCurrentWeatherTool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + test('Test ChatOpenAI tool calling', timeout: const Timeout(Duration(minutes: 1)), () async { final chat = ChatOpenAI(apiKey: openaiApiKey); - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - final humanMessage = ChatMessage.humanText( 'What’s the weather like in Boston right now?', ); final res1 = await chat.invoke( PromptValue.chat([humanMessage]), - options: const ChatOpenAIOptions(tools: [tool]), + options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), ); final aiMessage1 = res1.output; @@ -156,7 +156,7 @@ void main() { expect(aiMessage1.toolCalls, isNotEmpty); final toolCall = aiMessage1.toolCalls.first; - expect(toolCall.name, tool.name); + expect(toolCall.name, getCurrentWeatherTool.name); expect(toolCall.arguments.containsKey('location'), isTrue); expect(toolCall.arguments['location'], contains('Boston')); @@ -172,7 +172,7 @@ void main() { final res2 = await chat.invoke( PromptValue.chat([humanMessage, aiMessage1, functionMessage]), - options: const ChatOpenAIOptions(tools: [tool]), + options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), ); final aiMessage2 = res2.output; @@ -269,26 +269,26 @@ void main() { expect(result.usage.totalTokens, greaterThan(0)); }); - test('Test ChatOpenAI streaming with functions', () async { - const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, + const jokeTool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', }, - 'required': ['location', 'punchline'], }, - ); + 'required': ['location', 'punchline'], + }, + ); + test('Test ChatOpenAI streaming with functions', () async { final promptTemplate = ChatPromptTemplate.fromTemplate( 'tell me a long joke about {foo}', ); @@ -300,7 +300,7 @@ void main() { ), ).bind( ChatOpenAIOptions( - tools: const [tool], + tools: const [jokeTool], toolChoice: ChatToolChoice.forced(name: 'joke'), ), ); @@ -433,5 +433,63 @@ void main() { final res = await chatModel.invoke(prompt); expect(res.output.content.toLowerCase(), contains('apple')); }); + + test('Test additive bind calls', () async { + final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: defaultModel, + temperature: 0, + ), + ); + + final chatModelWithTools = chatModel.bind( + const ChatOpenAIOptions( + tools: [getCurrentWeatherTool, jokeTool], + ), + ); + + final res1 = await chatModelWithTools.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res1.output.toolCalls.map((tc) => tc.name).toSet(), + {getCurrentWeatherTool.name, jokeTool.name}, + ); + + final chatModelForceWeatherTool = chatModelWithTools.bind( + ChatOpenAIOptions( + toolChoice: ChatToolChoice.forced(name: getCurrentWeatherTool.name), + ), + ); + + final res2 = await chatModelForceWeatherTool.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res2.output.toolCalls.map((tc) => tc.name).toSet(), + {getCurrentWeatherTool.name}, + ); + + final chatModelForceJokeTool = chatModelWithTools.bind( + ChatOpenAIOptions( + toolChoice: ChatToolChoice.forced(name: jokeTool.name), + ), + ); + + final res3 = await chatModelForceJokeTool.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res3.output.toolCalls.map((tc) => tc.name).toSet(), + {jokeTool.name}, + ); + }); }); } From 3a7e15d496bb7aa957d2a9aee6bc0d7bf0f8216e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 23 Jul 2024 23:12:24 +0200 Subject: [PATCH 194/251] feat: Allow to customize OpenAI-Beta header in openai_dart (#502) --- packages/openai_dart/lib/src/client.dart | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/openai_dart/lib/src/client.dart b/packages/openai_dart/lib/src/client.dart index 098a4cf2..b01a1594 100644 --- a/packages/openai_dart/lib/src/client.dart +++ b/packages/openai_dart/lib/src/client.dart @@ -18,6 +18,8 @@ class OpenAIClient extends g.OpenAIClient { /// - `apiKey`: your OpenAI API key. You can find your API key in the /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). + /// - `beta`: the content to use for the `OpenAI-Beta` header which can be + /// used to enable beta features. /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -32,6 +34,7 @@ class OpenAIClient extends g.OpenAIClient { OpenAIClient({ final String? apiKey, final String? organization, + final String? beta = 'assistants=v2', final String? baseUrl, final Map? headers, final Map? queryParams, @@ -41,7 +44,7 @@ class OpenAIClient extends g.OpenAIClient { baseUrl: baseUrl, headers: { if (organization != null) 'OpenAI-Organization': organization, - 'OpenAI-Beta': 'assistants=v2', + if (beta != null) 'OpenAI-Beta': beta, ...?headers, }, queryParams: queryParams ?? const {}, From 57aed0c8174ed1ee219575bb946c22e06040059e Mon Sep 17 00:00:00 2001 From: Ganesh Date: Wed, 24 Jul 2024 12:05:50 +0530 Subject: [PATCH 195/251] feat: Add Fallback support for Runnables (#501) Co-authored-by: David Miguel --- docs/_sidebar.md | 1 + docs/expression_language/fallbacks.md | 135 +++++++++++++ .../bin/expression_language/fallbacks.dart | 181 ++++++++++++++++++ .../lib/src/chat_models/fake.dart | 48 +++-- .../lib/src/runnables/fallbacks.dart | 112 +++++++++++ .../lib/src/runnables/runnable.dart | 20 ++ .../lib/src/runnables/runnables.dart | 1 + .../test/runnables/fallbacks_test.dart | 102 ++++++++++ 8 files changed, 589 insertions(+), 11 deletions(-) create mode 100644 docs/expression_language/fallbacks.md create mode 100644 examples/docs_examples/bin/expression_language/fallbacks.dart create mode 100644 packages/langchain_core/lib/src/runnables/fallbacks.dart create mode 100644 packages/langchain_core/test/runnables/fallbacks_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 5296fd96..bfb7aad0 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -14,6 +14,7 @@ - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) - [Streaming](/expression_language/streaming.md) + - [Fallbacks](/expression_language/fallbacks.md) - Cookbook - [Prompt + LLM](/expression_language/cookbook/prompt_llm_parser.md) - [Multiple chains](/expression_language/cookbook/multiple_chains.md) diff --git a/docs/expression_language/fallbacks.md b/docs/expression_language/fallbacks.md new file mode 100644 index 00000000..5fd0b8a7 --- /dev/null +++ b/docs/expression_language/fallbacks.md @@ -0,0 +1,135 @@ +# Fallbacks + +When working with language models, you may often encounter issues from the underlying APIs, e.g. rate limits or downtime. Therefore, as you move your LLM applications into production it becomes more and more important to have contingencies for errors. That's why we've introduced the concept of fallbacks. + +Crucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use e.g. a different prompt template. + +## Handling LLM API errors with fallbacks + +This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit a rate limit, or any number of things. This Situation can be handled using Fallbacks. + +Fallbacks can be created using `withFallbacks()` function on the runnable that you are working on, for example `final runnablWithFallbacks = mainRunnable.withFallbacks([fallback1, fallback2])` this would create a `RunnableWithFallback` along with a list of fallbacks. When it is invoked, the `mainRunnable` would be called first, if it fails then fallbacks would be invoked sequentially until one of the fallback in list return output. If the `mainRunnable` succeeds and returns output then the fallbacks won't be called. + +## Fallback for chat models + +```dart +// fake model will throw error during invoke and fallback model will be called +final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), +); + +final latestModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), +); + +final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); + +final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + +final res = await modelWithFallbacks.invoke(prompt); +print(res); +/* +{ + "ChatResult": { + "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", + "output": { + "AIChatMessage": { + "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", + "toolCalls": [] + } + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721542696, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "LanguageModelUsage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 36, + "responseBillableCharacters": null, + "totalTokens": 52 + } + }, + "streaming": false + } +} +*/ +``` + +Note: if the options provided when invoking the runnable with fallbacks are not compatible with some of the fallbacks, they will be ignored. If you want to use different options for different fallbacks, provide them as `defaultOptions` when instantiating the fallbacks or use `bind()`. + +## Fallbacks for RunnableSequences with batch + +```dart +final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), +); + +final latestModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + +final badChain = promptTemplate.pipe(fakeOpenAIModel); +final goodChain = promptTemplate.pipe(latestModel); + +final chainWithFallbacks = badChain.withFallbacks([goodChain]); + +final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], +); +print(res); +/* +[ + { + "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", + "output": { + "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 31, + "responseBillableCharacters": null, + "totalTokens": 44 + }, + "streaming": false + }, + { + "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", + "output": { + "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 29, + "responseBillableCharacters": null, + "totalTokens": 42 + }, + "streaming": false + } +] +*/ +``` diff --git a/examples/docs_examples/bin/expression_language/fallbacks.dart b/examples/docs_examples/bin/expression_language/fallbacks.dart new file mode 100644 index 00000000..8eea7bb2 --- /dev/null +++ b/examples/docs_examples/bin/expression_language/fallbacks.dart @@ -0,0 +1,181 @@ +// ignore_for_file: avoid_print +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _modelWithFallbacks(); + await _modelWithMultipleFallbacks(); + await _chainWithFallbacks(); +} + +Future _modelWithFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), + ); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); + + final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + + final res = await modelWithFallbacks.invoke(prompt); + print(res); +/* +{ + "ChatResult": { + "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", + "output": { + "AIChatMessage": { + "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", + "toolCalls": [] + } + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721542696, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "LanguageModelUsage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 36, + "responseBillableCharacters": null, + "totalTokens": 52 + } + }, + "streaming": false + } +} +*/ +} + +Future _modelWithMultipleFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel1 = + ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'tomato')); + + final fakeOpenAIModel2 = + ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'potato')); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final modelWithFallbacks = + fakeOpenAIModel1.withFallbacks([fakeOpenAIModel2, latestModel]); + + final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + + final res = await modelWithFallbacks.invoke(prompt); + print(res); + /* + { + "id": "chatcmpl-9nLKW345nrh0nzmw18iO35XnoQ2jo", + "output": { + "content": "The sky appears blue due to Rayleigh scattering, where shorter blue wavelengths of sunlight are scattered more than other colors by the molecules in Earth's atmosphere. This scattering disperses blue light in all directions, making the sky look blue.", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721547092, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 45, + "responseBillableCharacters": null, + "totalTokens": 61 + }, + "streaming": false +} +*/ +} + +Future _chainWithFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), + ); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + + final badChain = promptTemplate.pipe(fakeOpenAIModel); + final goodChain = promptTemplate.pipe(latestModel); + + final chainWithFallbacks = badChain.withFallbacks([goodChain]); + + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + print(res); +/* +[ + { + "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", + "output": { + "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 31, + "responseBillableCharacters": null, + "totalTokens": 44 + }, + "streaming": false + }, + { + "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", + "output": { + "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 29, + "responseBillableCharacters": null, + "totalTokens": 42 + }, + "streaming": false + } +] +*/ +} diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index 79c44d1b..b69868c3 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -149,6 +149,12 @@ class FakeEchoChatModel extends BaseChatModel { final PromptValue input, { final FakeEchoChatModelOptions? options, }) async { + final throwError = + options?.throwRandomError ?? defaultOptions.throwRandomError; + if (throwError) { + throw Exception('Random error'); + } + final text = input.toChatMessages().last.contentAsString; final message = AIChatMessage(content: text); return ChatResult( @@ -169,18 +175,29 @@ class FakeEchoChatModel extends BaseChatModel { final FakeEchoChatModelOptions? options, }) { final prompt = input.toChatMessages().first.contentAsString.split(''); + final throwError = + options?.throwRandomError ?? defaultOptions.throwRandomError; + + var index = 0; return Stream.fromIterable(prompt).map( - (final char) => ChatResult( - id: 'fake-echo-chat-model', - output: AIChatMessage(content: char), - finishReason: FinishReason.stop, - metadata: { - 'model': options?.model ?? defaultOptions.model, - ...?options?.metadata ?? defaultOptions.metadata, - }, - usage: const LanguageModelUsage(), - streaming: true, - ), + (final char) { + if (throwError && index == prompt.length ~/ 2) { + throw Exception('Random error'); + } + + return ChatResult( + id: 'fake-echo-chat-model', + output: AIChatMessage(content: char), + finishReason: FinishReason.stop, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + 'index': index++, + }, + usage: const LanguageModelUsage(), + streaming: true, + ); + }, ); } @@ -205,21 +222,27 @@ class FakeEchoChatModelOptions extends ChatModelOptions { const FakeEchoChatModelOptions({ super.model, this.metadata, + this.throwRandomError = false, super.concurrencyLimit, }); /// Metadata. final Map? metadata; + /// If true, throws a random error. + final bool throwRandomError; + @override FakeEchoChatModelOptions copyWith({ final String? model, final Map? metadata, + final bool? throwRandomError, final int? concurrencyLimit, }) { return FakeEchoChatModelOptions( model: model ?? this.model, metadata: metadata ?? this.metadata, + throwRandomError: throwRandomError ?? this.throwRandomError, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } @@ -231,6 +254,7 @@ class FakeEchoChatModelOptions extends ChatModelOptions { return copyWith( model: other?.model, metadata: other?.metadata, + throwRandomError: other?.throwRandomError, concurrencyLimit: other?.concurrencyLimit, ); } @@ -239,6 +263,7 @@ class FakeEchoChatModelOptions extends ChatModelOptions { bool operator ==(covariant final FakeEchoChatModelOptions other) { return model == other.model && const MapEquality().equals(metadata, other.metadata) && + throwRandomError == other.throwRandomError && concurrencyLimit == other.concurrencyLimit; } @@ -246,6 +271,7 @@ class FakeEchoChatModelOptions extends ChatModelOptions { int get hashCode { return model.hashCode ^ const MapEquality().hash(metadata) ^ + throwRandomError.hashCode ^ concurrencyLimit.hashCode; } } diff --git a/packages/langchain_core/lib/src/runnables/fallbacks.dart b/packages/langchain_core/lib/src/runnables/fallbacks.dart new file mode 100644 index 00000000..db4d31a7 --- /dev/null +++ b/packages/langchain_core/lib/src/runnables/fallbacks.dart @@ -0,0 +1,112 @@ +import 'runnable.dart'; +import 'types.dart'; + +/// {@template runnable_with_fallback} +/// A [Runnable] that can fallback to other [Runnable]s if it fails. +/// +/// This class allows for the creation of a [Runnable] chain where a main +/// [Runnable] is attempted first, and if it fails, a sequence of fallback +/// [Runnable]s are tried in order. This process continues until one of the +/// [Runnable]s succeeds or all of them fail, in which case an exception is +/// thrown. +/// +/// You can create a [RunnableWithFallback] using the [Runnable.withFallbacks] +/// method. +/// +/// Example: +/// ```dart +/// final mainChatModel = ChatOpenAI(...); +/// final fallbackChatModel = ChatOpenAI(...); +/// final chatModel = mainChatModel.withFallbacks([fallbackChatModel]); +/// final res = await chatModel.invoke(...); +/// ``` +/// {@endtemplate} +class RunnableWithFallback + extends Runnable { + /// {@macro runnable_fallback} + RunnableWithFallback({ + required this.mainRunnable, + required this.fallbacks, + }) : super(defaultOptions: const RunnableOptions()); + + /// The Runnable to run first. + final Runnable mainRunnable; + + /// A sequence of fallbacks to try if the [mainRunnable] fails. + final List> fallbacks; + + @override + Future invoke(RunInput input, {RunnableOptions? options}) async { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + try { + return await runnable.invoke( + input, + options: firstError == null + ? options + : runnable.getCompatibleOptions(options), + ); + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } + + @override + Future> batch( + List inputs, { + List? options, + }) async { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + List? currentOptions; + if (firstError == null) { + currentOptions = options; + } else { + final compatibleOptions = + options?.map(runnable.getCompatibleOptions).toList(growable: false); + final hasNullOptions = + compatibleOptions?.any((o) => o == null) ?? false; + if (!hasNullOptions) { + currentOptions = compatibleOptions?.cast(); + } + } + + try { + return await runnable.batch( + inputs, + options: currentOptions, + ); + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } + + @override + Stream stream( + RunInput input, { + RunnableOptions? options, + }) async* { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + try { + final stream = runnable.stream( + input, + options: firstError == null + ? options + : runnable.getCompatibleOptions(options), + ); + await for (final output in stream) { + yield output; + } + return; + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } +} diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 71021af6..7c020a50 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -2,6 +2,7 @@ import 'dart:async'; import '../../utils.dart'; import 'binding.dart'; +import 'fallbacks.dart'; import 'function.dart'; import 'input_map.dart'; import 'input_stream_map.dart'; @@ -282,6 +283,25 @@ abstract class Runnable withFallbacks( + List> fallbacks, + ) { + return RunnableWithFallback( + mainRunnable: this, + fallbacks: fallbacks, + ); + } + /// Returns the given [options] if they are compatible with the [Runnable], /// otherwise returns `null`. CallOptions? getCompatibleOptions( diff --git a/packages/langchain_core/lib/src/runnables/runnables.dart b/packages/langchain_core/lib/src/runnables/runnables.dart index d909b234..146761d7 100644 --- a/packages/langchain_core/lib/src/runnables/runnables.dart +++ b/packages/langchain_core/lib/src/runnables/runnables.dart @@ -1,4 +1,5 @@ export 'binding.dart'; +export 'fallbacks.dart'; export 'function.dart'; export 'input_map.dart'; export 'input_stream_map.dart'; diff --git a/packages/langchain_core/test/runnables/fallbacks_test.dart b/packages/langchain_core/test/runnables/fallbacks_test.dart new file mode 100644 index 00000000..7bc7a72d --- /dev/null +++ b/packages/langchain_core/test/runnables/fallbacks_test.dart @@ -0,0 +1,102 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:test/test.dart'; + +void main() { + group('RunnableFallback tests', () { + late FakeEchoChatModel model; + late FakeChatModel fallbackModel; + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + final input = PromptValue.string('why is the sky blue'); + + setUp(() { + model = const FakeEchoChatModel(); + fallbackModel = FakeChatModel(responses: ['fallback response']); + }); + + test('RunnableFallback should return main runnable output', () async { + final modelWithFallback = model.withFallbacks([fallbackModel]); + final res = await modelWithFallback.invoke(input); + expect(res.output.content, 'why is the sky blue'); + }); + + test('Should call fallback runnable if main runnable fails', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); + final res = await modelWithFallback.invoke(input); + expect(res.output.content, 'fallback response'); + }); + + test('Test batch response of main runnable in RunnableFallback', () async { + const model = FakeEchoChatModel(); + const fallbackModel = FakeEchoChatModel(); + final fallbackChain = promptTemplate.pipe(fallbackModel); + final chainWithFallbacks = + promptTemplate.pipe(model).withFallbacks([fallbackChain]); + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + expect(res[0].output.content, 'tell me a joke about bears'); + expect(res[1].output.content, 'tell me a joke about cats'); + }); + + test('Test fallbacks response in batch', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final fallbackChain = promptTemplate.pipe(fallbackModel); + final chainWithFallbacks = + promptTemplate.pipe(brokenModel).withFallbacks([fallbackChain]); + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + ], + ); + expect(res.first.output.content, 'fallback response'); + }); + + test('Should throw error if none of runnable returned output', () async { + final brokenModel1 = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final brokenModel2 = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final fallbackChain = promptTemplate.pipe(brokenModel2); + final chainWithFallbacks = + promptTemplate.pipe(brokenModel1).withFallbacks([fallbackChain]); + expect( + () async => chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + ], + ), + throwsException, + ); + }); + + test('Test stream response of main runnable in RunnableFallback', () async { + final modelWithFallback = model.withFallbacks([fallbackModel]); + final chain = modelWithFallback.pipe(const StringOutputParser()); + final res = await chain.stream(input).toList(); + expect(res.join('|'), 'w|h|y| |i|s| |t|h|e| |s|k|y| |b|l|u|e'); + }); + + test('Test fallbacks response in stream', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); + final chain = modelWithFallback.pipe(const StringOutputParser()); + final res = await chain.stream(input).toList(); + expect(res.join('|'), endsWith('f|a|l|l|b|a|c|k| |r|e|s|p|o|n|s|e')); + }); + }); +} From 1a7b48c1a06cdb54672e63c8998f7b5bdb599fe9 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 24 Jul 2024 18:37:42 +0200 Subject: [PATCH 196/251] feat: Add suffix support in Ollama completions API in ollama_dart (#503) --- .../schema/generate_completion_request.dart | 5 +++ .../src/generated/schema/schema.freezed.dart | 32 ++++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 2 ++ packages/ollama_dart/oas/ollama-curated.yaml | 18 ++++------- packages/ollama_dart/pubspec.yaml | 2 +- 5 files changed, 46 insertions(+), 13 deletions(-) diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart index 1368ac7a..014e2654 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart @@ -23,6 +23,9 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { /// The prompt to generate a response. required String prompt, + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) String? suffix, + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, @@ -74,6 +77,7 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { static const List propertyNames = [ 'model', 'prompt', + 'suffix', 'images', 'system', 'template', @@ -95,6 +99,7 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { return { 'model': model, 'prompt': prompt, + 'suffix': suffix, 'images': images, 'system': system, 'template': template, diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index bf7cf75c..9b6d2f8f 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -29,6 +29,10 @@ mixin _$GenerateCompletionRequest { /// The prompt to generate a response. String get prompt => throw _privateConstructorUsedError; + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) + String? get suffix => throw _privateConstructorUsedError; + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; @@ -91,6 +95,7 @@ abstract class $GenerateCompletionRequestCopyWith<$Res> { $Res call( {String model, String prompt, + @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -123,6 +128,7 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, $Res call({ Object? model = null, Object? prompt = null, + Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -142,6 +148,10 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, images: freezed == images ? _value.images : images // ignore: cast_nullable_to_non_nullable @@ -206,6 +216,7 @@ abstract class _$$GenerateCompletionRequestImplCopyWith<$Res> $Res call( {String model, String prompt, + @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -238,6 +249,7 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> $Res call({ Object? model = null, Object? prompt = null, + Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -257,6 +269,10 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, images: freezed == images ? _value._images : images // ignore: cast_nullable_to_non_nullable @@ -303,6 +319,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { const _$GenerateCompletionRequestImpl( {required this.model, required this.prompt, + @JsonKey(includeIfNull: false) this.suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.template, @@ -332,6 +349,11 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override final String prompt; + /// The text that comes after the inserted text. + @override + @JsonKey(includeIfNull: false) + final String? suffix; + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) final List? _images; @@ -409,7 +431,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override String toString() { - return 'GenerateCompletionRequest(model: $model, prompt: $prompt, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateCompletionRequest(model: $model, prompt: $prompt, suffix: $suffix, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; } @override @@ -419,6 +441,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { other is _$GenerateCompletionRequestImpl && (identical(other.model, model) || other.model == model) && (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.suffix, suffix) || other.suffix == suffix) && const DeepCollectionEquality().equals(other._images, _images) && (identical(other.system, system) || other.system == system) && (identical(other.template, template) || @@ -438,6 +461,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { runtimeType, model, prompt, + suffix, const DeepCollectionEquality().hash(_images), system, template, @@ -467,6 +491,7 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { const factory _GenerateCompletionRequest( {required final String model, required final String prompt, + @JsonKey(includeIfNull: false) final String? suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final String? template, @@ -497,6 +522,11 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { String get prompt; @override + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) + String? get suffix; + @override + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index a4aee619..e5d46d53 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -13,6 +13,7 @@ _$GenerateCompletionRequestImpl _$$GenerateCompletionRequestImplFromJson( _$GenerateCompletionRequestImpl( model: json['model'] as String, prompt: json['prompt'] as String, + suffix: json['suffix'] as String?, images: (json['images'] as List?)?.map((e) => e as String).toList(), system: json['system'] as String?, @@ -42,6 +43,7 @@ Map _$$GenerateCompletionRequestImplToJson( } } + writeNotNull('suffix', instance.suffix); writeNotNull('images', instance.images); writeNotNull('system', instance.system); writeNotNull('template', instance.template); diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 9d3a507e..540b7141 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -278,6 +278,9 @@ components: type: string description: The prompt to generate a response. example: Why is the sky blue? + suffix: + type: string + description: The text that comes after the inserted text. images: type: array description: (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @@ -312,10 +315,10 @@ components: description: &stream | If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. default: false - keep_alive: + keep_alive: &keep_alive type: integer nullable: true - description: &keep_alive | + description: | How long (in minutes) to keep the model loaded in memory. - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. @@ -598,10 +601,7 @@ components: type: boolean description: *stream default: false - keep_alive: - type: integer - nullable: true - description: *keep_alive + keep_alive: *keep_alive required: - model - messages @@ -697,10 +697,7 @@ components: example: 'Here is an article about llamas...' options: $ref: '#/components/schemas/RequestOptions' - keep_alive: - type: integer - nullable: true - description: *keep_alive + keep_alive: *keep_alive required: - model - prompt @@ -831,7 +828,6 @@ components: type: integer nullable: true description: The number of parameters in the model. - ProcessResponse: type: object description: Response class for the list running models endpoint. diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 954eb772..5da5caa2 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 test: ^1.25.2 From 448577c2047e77d16725d903f2d565447e0a1d1b Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 24 Jul 2024 23:41:59 +0200 Subject: [PATCH 197/251] feat: Add tool calling support in ollama_dart (#504) --- .../src/chat_models/chat_ollama/mappers.dart | 2 +- packages/ollama_dart/README.md | 99 +- .../example/ollama_dart_example.dart | 80 +- .../generate_chat_completion_request.dart | 7 +- .../generate_chat_completion_response.dart | 8 +- .../lib/src/generated/schema/message.dart | 14 +- .../lib/src/generated/schema/schema.dart | 6 + .../src/generated/schema/schema.freezed.dart | 988 ++++++++++++++++-- .../lib/src/generated/schema/schema.g.dart | 106 +- .../lib/src/generated/schema/tool.dart | 53 + .../lib/src/generated/schema/tool_call.dart | 40 + .../generated/schema/tool_call_function.dart | 44 + .../schema/tool_call_function_args.dart | 12 + .../src/generated/schema/tool_function.dart | 52 + .../schema/tool_function_params.dart | 12 + packages/ollama_dart/oas/ollama-curated.yaml | 72 +- packages/ollama_dart/pubspec.yaml | 2 +- .../test/ollama_dart_chat_test.dart | 69 +- 18 files changed, 1540 insertions(+), 126 deletions(-) create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_function.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart index 0553fb88..d8b31e61 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -101,7 +101,7 @@ extension ChatResultMapper on GenerateChatCompletionResponse { return ChatResult( id: id, output: AIChatMessage( - content: message?.content ?? '', + content: message.content, ), finishReason: _mapFinishReason(doneReason), metadata: { diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index cf822953..46ad88a3 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -17,17 +17,24 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. **Supported endpoints:** - Completions (with streaming support) -- Chat completions +- Chat completions (with streaming and tool calling support) - Embeddings - Models - Blobs +- Version ## Table of contents - [Usage](#usage) * [Completions](#completions) + + [Generate completion](#generate-completion) + + [Stream completion](#stream-completion) * [Chat completions](#chat-completions) + + [Generate chat completion](#generate-chat-completion) + + [Stream chat completion](#stream-chat-completion) + + [Tool calling](#tool-calling) * [Embeddings](#embeddings) + + [Generate embedding](#generate-embedding) * [Models](#models) + [Create model](#create-model) + [List models](#list-models) @@ -54,7 +61,7 @@ Refer to the [documentation](https://github.com/jmorganca/ollama/blob/main/docs/ Given a prompt, the model will generate a response. -**Generate completion:** +#### Generate completion ```dart final generated = await client.generateCompletion( @@ -67,7 +74,7 @@ print(generated.response); // The sky appears blue because of a phenomenon called Rayleigh scattering... ``` -**Stream completion:** +#### Stream completion ```dart final stream = client.generateCompletionStream( @@ -88,7 +95,7 @@ print(text); Given a prompt, the model will generate a response in a chat format. -**Generate chat completion:** +#### Generate chat completion ```dart final res = await client.generateChatCompletion( @@ -111,7 +118,7 @@ print(res); // Message(role: MessageRole.assistant, content: 123456789) ``` -**Stream chat completion:** +#### Stream chat completion ```dart final stream = client.generateChatCompletionStream( @@ -139,11 +146,91 @@ print(text); // 123456789 ``` +#### Tool calling + +Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema, that you can then use to call the tools in your code and return the result back to the model to complete the conversation. + +**Notes:** +- Tool calling requires Ollama 0.2.8 or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). + +```dart +const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), +); + +const userMsg = Message( + role: MessageRole.user, + content: 'What’s the weather like in Barcelona in celsius?', +); + +final res1 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [userMsg], + tools: [tool], + ), +); + +print(res1.message.toolCalls); +// [ +// ToolCall( +// function: +// ToolCallFunction( +// name: get_current_weather, +// arguments: { +// location: Barcelona, ES, +// unit: celsius +// } +// ) +// ) +// ] + +// Call your tool here. For this example, we'll just mock the response. +const toolResult = '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; + +// Submit the response of the tool call to the model +final res2 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [ + userMsg, + res1.message, + Message( + role: MessageRole.tool, + content: toolResult, + ), + ], + ), +); +print(res2.message.content); +// The current weather in Barcelona is 20°C. +``` + ### Embeddings Given a prompt, the model will generate an embedding representing the prompt. -**Generate embedding:** +#### Generate embedding ```dart final generated = await client.generateEmbedding( diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index fab5f712..e5d11e3c 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -11,6 +11,7 @@ Future main() async { await _generateChatCompletion(client); await _generateChatCompletionWithHistory(client); await _generateChatCompletionStream(client); + await _generateChatToolCalling(client); // Embeddings await _generateEmbedding(client); @@ -86,7 +87,7 @@ Future _generateChatCompletion(final OllamaClient client) async { ], ), ); - print(generated.message?.content); + print(generated.message.content); } Future _generateChatCompletionWithHistory( @@ -111,7 +112,7 @@ Future _generateChatCompletionWithHistory( ], ), ); - print(generated.message?.content); + print(generated.message.content); } Future _generateChatCompletionStream(final OllamaClient client) async { @@ -132,11 +133,84 @@ Future _generateChatCompletionStream(final OllamaClient client) async { ); String text = ''; await for (final res in stream) { - text += (res.message?.content ?? '').trim(); + text += res.message.content.trim(); } print(text); } +Future _generateChatToolCalling(final OllamaClient client) async { + const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), + ); + + const userMsg = Message( + role: MessageRole.user, + content: 'What’s the weather like in Barcelona in celsius?', + ); + + final res1 = await client.generateChatCompletion( + request: const GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [userMsg], + tools: [tool], + keepAlive: 1, + ), + ); + + print(res1.message.toolCalls); + // [ + // ToolCall( + // function: + // ToolCallFunction( + // name: get_current_weather, + // arguments: { + // location: Barcelona, ES, + // unit: celsius + // } + // ) + // ) + // ] + + // Call your tool here. For this example, we'll just mock the response. + const toolResult = + '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; + + // Submit the response of the tool call to the model + final res2 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [ + userMsg, + res1.message, + const Message( + role: MessageRole.tool, + content: toolResult, + ), + ], + ), + ); + print(res2.message.content); + // The current weather in Barcelona is 20°C. +} + Future _generateEmbedding(final OllamaClient client) async { final generated = await client.generateEmbedding( request: const GenerateEmbeddingRequest( diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart index 491efa66..fe47da47 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart @@ -47,6 +47,9 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { /// - If set to 0, the model will be unloaded immediately once finished. /// - If not set, the model will stay loaded for 5 minutes by default @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) List? tools, }) = _GenerateChatCompletionRequest; /// Object construction from a JSON representation @@ -60,7 +63,8 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'format', 'options', 'stream', - 'keep_alive' + 'keep_alive', + 'tools' ]; /// Perform validations on the schema property values @@ -77,6 +81,7 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'options': options, 'stream': stream, 'keep_alive': keepAlive, + 'tools': tools, }; } } diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart index faf7462a..d7857fd4 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart @@ -16,18 +16,18 @@ class GenerateChatCompletionResponse with _$GenerateChatCompletionResponse { /// Factory constructor for GenerateChatCompletionResponse const factory GenerateChatCompletionResponse({ /// A message in the chat endpoint - @JsonKey(includeIfNull: false) Message? message, + required Message message, /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) String? model, + required String model, /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, + @JsonKey(name: 'created_at') required String createdAt, /// Whether the response has completed. - @JsonKey(includeIfNull: false) bool? done, + required bool done, /// Reason why the model is done generating a response. @JsonKey( diff --git a/packages/ollama_dart/lib/src/generated/schema/message.dart b/packages/ollama_dart/lib/src/generated/schema/message.dart index 362e2349..add48dc2 100644 --- a/packages/ollama_dart/lib/src/generated/schema/message.dart +++ b/packages/ollama_dart/lib/src/generated/schema/message.dart @@ -23,6 +23,10 @@ class Message with _$Message { /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, + + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, }) = _Message; /// Object construction from a JSON representation @@ -30,7 +34,12 @@ class Message with _$Message { _$MessageFromJson(json); /// List of all property names of schema - static const List propertyNames = ['role', 'content', 'images']; + static const List propertyNames = [ + 'role', + 'content', + 'images', + 'tool_calls' + ]; /// Perform validations on the schema property values String? validateSchema() { @@ -43,6 +52,7 @@ class Message with _$Message { 'role': role, 'content': content, 'images': images, + 'tool_calls': toolCalls, }; } } @@ -59,4 +69,6 @@ enum MessageRole { user, @JsonValue('assistant') assistant, + @JsonValue('tool') + tool, } diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index 5ed7214c..f951912a 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -19,6 +19,12 @@ part 'generate_chat_completion_request.dart'; part 'generate_chat_completion_response.dart'; part 'done_reason.dart'; part 'message.dart'; +part 'tool.dart'; +part 'tool_function.dart'; +part 'tool_function_params.dart'; +part 'tool_call.dart'; +part 'tool_call_function.dart'; +part 'tool_call_function_args.dart'; part 'generate_embedding_request.dart'; part 'generate_embedding_response.dart'; part 'create_model_request.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 9b6d2f8f..b9128995 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -2363,6 +2363,10 @@ mixin _$GenerateChatCompletionRequest { @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive => throw _privateConstructorUsedError; + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) + List? get tools => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $GenerateChatCompletionRequestCopyWith @@ -2386,7 +2390,8 @@ abstract class $GenerateChatCompletionRequestCopyWith<$Res> { ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + @JsonKey(includeIfNull: false) List? tools}); $RequestOptionsCopyWith<$Res>? get options; } @@ -2411,6 +2416,7 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, + Object? tools = freezed, }) { return _then(_value.copyWith( model: null == model @@ -2437,6 +2443,10 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, + tools: freezed == tools + ? _value.tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } @@ -2471,7 +2481,8 @@ abstract class _$$GenerateChatCompletionRequestImplCopyWith<$Res> ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + @JsonKey(includeIfNull: false) List? tools}); @override $RequestOptionsCopyWith<$Res>? get options; @@ -2496,6 +2507,7 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, + Object? tools = freezed, }) { return _then(_$GenerateChatCompletionRequestImpl( model: null == model @@ -2522,6 +2534,10 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, + tools: freezed == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -2539,8 +2555,10 @@ class _$GenerateChatCompletionRequestImpl this.format, @JsonKey(includeIfNull: false) this.options, this.stream = false, - @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) + @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive, + @JsonKey(includeIfNull: false) final List? tools}) : _messages = messages, + _tools = tools, super._(); factory _$GenerateChatCompletionRequestImpl.fromJson( @@ -2594,9 +2612,23 @@ class _$GenerateChatCompletionRequestImpl @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive; + /// A list of tools the model may call. + final List? _tools; + + /// A list of tools the model may call. + @override + @JsonKey(includeIfNull: false) + List? get tools { + final value = _tools; + if (value == null) return null; + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive, tools: $tools)'; } @override @@ -2610,7 +2642,8 @@ class _$GenerateChatCompletionRequestImpl (identical(other.options, options) || other.options == options) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.keepAlive, keepAlive) || - other.keepAlive == keepAlive)); + other.keepAlive == keepAlive) && + const DeepCollectionEquality().equals(other._tools, _tools)); } @JsonKey(ignore: true) @@ -2622,7 +2655,8 @@ class _$GenerateChatCompletionRequestImpl format, options, stream, - keepAlive); + keepAlive, + const DeepCollectionEquality().hash(_tools)); @JsonKey(ignore: true) @override @@ -2651,8 +2685,9 @@ abstract class _GenerateChatCompletionRequest final ResponseFormat? format, @JsonKey(includeIfNull: false) final RequestOptions? options, final bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive}) = _$GenerateChatCompletionRequestImpl; + @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive, + @JsonKey(includeIfNull: false) + final List? tools}) = _$GenerateChatCompletionRequestImpl; const _GenerateChatCompletionRequest._() : super._(); factory _GenerateChatCompletionRequest.fromJson(Map json) = @@ -2698,6 +2733,11 @@ abstract class _GenerateChatCompletionRequest @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive; @override + + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) + List? get tools; + @override @JsonKey(ignore: true) _$$GenerateChatCompletionRequestImplCopyWith< _$GenerateChatCompletionRequestImpl> @@ -2712,22 +2752,19 @@ GenerateChatCompletionResponse _$GenerateChatCompletionResponseFromJson( /// @nodoc mixin _$GenerateChatCompletionResponse { /// A message in the chat endpoint - @JsonKey(includeIfNull: false) - Message? get message => throw _privateConstructorUsedError; + Message get message => throw _privateConstructorUsedError; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; + String get model => throw _privateConstructorUsedError; /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) - String? get createdAt => throw _privateConstructorUsedError; + @JsonKey(name: 'created_at') + String get createdAt => throw _privateConstructorUsedError; /// Whether the response has completed. - @JsonKey(includeIfNull: false) - bool? get done => throw _privateConstructorUsedError; + bool get done => throw _privateConstructorUsedError; /// Reason why the model is done generating a response. @JsonKey( @@ -2775,10 +2812,10 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { GenerateChatCompletionResponse>; @useResult $Res call( - {@JsonKey(includeIfNull: false) Message? message, - @JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, - @JsonKey(includeIfNull: false) bool? done, + {Message message, + String model, + @JsonKey(name: 'created_at') String createdAt, + bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2793,7 +2830,7 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { @JsonKey(name: 'eval_count', includeIfNull: false) int? evalCount, @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); - $MessageCopyWith<$Res>? get message; + $MessageCopyWith<$Res> get message; } /// @nodoc @@ -2810,10 +2847,10 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ - Object? message = freezed, - Object? model = freezed, - Object? createdAt = freezed, - Object? done = freezed, + Object? message = null, + Object? model = null, + Object? createdAt = null, + Object? done = null, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2823,22 +2860,22 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, Object? evalDuration = freezed, }) { return _then(_value.copyWith( - message: freezed == message + message: null == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message?, - model: freezed == model + as Message, + model: null == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: freezed == createdAt + as String, + createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String?, - done: freezed == done + as String, + done: null == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool?, + as bool, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2872,12 +2909,8 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @override @pragma('vm:prefer-inline') - $MessageCopyWith<$Res>? get message { - if (_value.message == null) { - return null; - } - - return $MessageCopyWith<$Res>(_value.message!, (value) { + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { return _then(_value.copyWith(message: value) as $Val); }); } @@ -2893,10 +2926,10 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) Message? message, - @JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, - @JsonKey(includeIfNull: false) bool? done, + {Message message, + String model, + @JsonKey(name: 'created_at') String createdAt, + bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2912,7 +2945,7 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); @override - $MessageCopyWith<$Res>? get message; + $MessageCopyWith<$Res> get message; } /// @nodoc @@ -2928,10 +2961,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> @pragma('vm:prefer-inline') @override $Res call({ - Object? message = freezed, - Object? model = freezed, - Object? createdAt = freezed, - Object? done = freezed, + Object? message = null, + Object? model = null, + Object? createdAt = null, + Object? done = null, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2941,22 +2974,22 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> Object? evalDuration = freezed, }) { return _then(_$GenerateChatCompletionResponseImpl( - message: freezed == message + message: null == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message?, - model: freezed == model + as Message, + model: null == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: freezed == createdAt + as String, + createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String?, - done: freezed == done + as String, + done: null == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool?, + as bool, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2994,10 +3027,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> class _$GenerateChatCompletionResponseImpl extends _GenerateChatCompletionResponse { const _$GenerateChatCompletionResponseImpl( - {@JsonKey(includeIfNull: false) this.message, - @JsonKey(includeIfNull: false) this.model, - @JsonKey(name: 'created_at', includeIfNull: false) this.createdAt, - @JsonKey(includeIfNull: false) this.done, + {required this.message, + required this.model, + @JsonKey(name: 'created_at') required this.createdAt, + required this.done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -3019,25 +3052,22 @@ class _$GenerateChatCompletionResponseImpl /// A message in the chat endpoint @override - @JsonKey(includeIfNull: false) - final Message? message; + final Message message; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. @override - @JsonKey(includeIfNull: false) - final String? model; + final String model; /// Date on which a model was created. @override - @JsonKey(name: 'created_at', includeIfNull: false) - final String? createdAt; + @JsonKey(name: 'created_at') + final String createdAt; /// Whether the response has completed. @override - @JsonKey(includeIfNull: false) - final bool? done; + final bool done; /// Reason why the model is done generating a response. @override @@ -3143,11 +3173,10 @@ class _$GenerateChatCompletionResponseImpl abstract class _GenerateChatCompletionResponse extends GenerateChatCompletionResponse { const factory _GenerateChatCompletionResponse( - {@JsonKey(includeIfNull: false) final Message? message, - @JsonKey(includeIfNull: false) final String? model, - @JsonKey(name: 'created_at', includeIfNull: false) - final String? createdAt, - @JsonKey(includeIfNull: false) final bool? done, + {required final Message message, + required final String model, + @JsonKey(name: 'created_at') required final String createdAt, + required final bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -3172,25 +3201,22 @@ abstract class _GenerateChatCompletionResponse @override /// A message in the chat endpoint - @JsonKey(includeIfNull: false) - Message? get message; + Message get message; @override /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model; + String get model; @override /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) - String? get createdAt; + @JsonKey(name: 'created_at') + String get createdAt; @override /// Whether the response has completed. - @JsonKey(includeIfNull: false) - bool? get done; + bool get done; @override /// Reason why the model is done generating a response. @@ -3252,6 +3278,10 @@ mixin _$Message { @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $MessageCopyWith get copyWith => throw _privateConstructorUsedError; @@ -3265,7 +3295,9 @@ abstract class $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images}); + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc @@ -3284,6 +3316,7 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> Object? role = null, Object? content = null, Object? images = freezed, + Object? toolCalls = freezed, }) { return _then(_value.copyWith( role: null == role @@ -3298,6 +3331,10 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> ? _value.images : images // ignore: cast_nullable_to_non_nullable as List?, + toolCalls: freezed == toolCalls + ? _value.toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -3312,7 +3349,9 @@ abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images}); + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc @@ -3329,6 +3368,7 @@ class __$$MessageImplCopyWithImpl<$Res> Object? role = null, Object? content = null, Object? images = freezed, + Object? toolCalls = freezed, }) { return _then(_$MessageImpl( role: null == role @@ -3343,6 +3383,10 @@ class __$$MessageImplCopyWithImpl<$Res> ? _value._images : images // ignore: cast_nullable_to_non_nullable as List?, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -3353,8 +3397,11 @@ class _$MessageImpl extends _Message { const _$MessageImpl( {required this.role, required this.content, - @JsonKey(includeIfNull: false) final List? images}) + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) : _images = images, + _toolCalls = toolCalls, super._(); factory _$MessageImpl.fromJson(Map json) => @@ -3382,9 +3429,23 @@ class _$MessageImpl extends _Message { return EqualUnmodifiableListView(value); } + /// A list of tools the model wants to call. + final List? _toolCalls; + + /// A list of tools the model wants to call. + @override + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'Message(role: $role, content: $content, images: $images)'; + return 'Message(role: $role, content: $content, images: $images, toolCalls: $toolCalls)'; } @override @@ -3394,13 +3455,19 @@ class _$MessageImpl extends _Message { other is _$MessageImpl && (identical(other.role, role) || other.role == role) && (identical(other.content, content) || other.content == content) && - const DeepCollectionEquality().equals(other._images, _images)); + const DeepCollectionEquality().equals(other._images, _images) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, role, content, const DeepCollectionEquality().hash(_images)); + runtimeType, + role, + content, + const DeepCollectionEquality().hash(_images), + const DeepCollectionEquality().hash(_toolCalls)); @JsonKey(ignore: true) @override @@ -3418,10 +3485,11 @@ class _$MessageImpl extends _Message { abstract class _Message extends Message { const factory _Message( - {required final MessageRole role, - required final String content, - @JsonKey(includeIfNull: false) final List? images}) = - _$MessageImpl; + {required final MessageRole role, + required final String content, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) = _$MessageImpl; const _Message._() : super._(); factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; @@ -3440,11 +3508,749 @@ abstract class _Message extends Message { @JsonKey(includeIfNull: false) List? get images; @override + + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; + @override @JsonKey(ignore: true) _$$MessageImplCopyWith<_$MessageImpl> get copyWith => throw _privateConstructorUsedError; } +Tool _$ToolFromJson(Map json) { + return _Tool.fromJson(json); +} + +/// @nodoc +mixin _$Tool { + /// The type of tool. + ToolType get type => throw _privateConstructorUsedError; + + /// A function that the model may call. + @JsonKey(includeIfNull: false) + ToolFunction? get function => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCopyWith<$Res> { + factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = + _$ToolCopyWithImpl<$Res, Tool>; + @useResult + $Res call( + {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); + + $ToolFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class _$ToolCopyWithImpl<$Res, $Val extends Tool> + implements $ToolCopyWith<$Res> { + _$ToolCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? function = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolType, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolFunction?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ToolFunctionCopyWith<$Res>? get function { + if (_value.function == null) { + return null; + } + + return $ToolFunctionCopyWith<$Res>(_value.function!, (value) { + return _then(_value.copyWith(function: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { + factory _$$ToolImplCopyWith( + _$ToolImpl value, $Res Function(_$ToolImpl) then) = + __$$ToolImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); + + @override + $ToolFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class __$$ToolImplCopyWithImpl<$Res> + extends _$ToolCopyWithImpl<$Res, _$ToolImpl> + implements _$$ToolImplCopyWith<$Res> { + __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? function = freezed, + }) { + return _then(_$ToolImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolType, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolFunction?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolImpl extends _Tool { + const _$ToolImpl( + {this.type = ToolType.function, + @JsonKey(includeIfNull: false) this.function}) + : super._(); + + factory _$ToolImpl.fromJson(Map json) => + _$$ToolImplFromJson(json); + + /// The type of tool. + @override + @JsonKey() + final ToolType type; + + /// A function that the model may call. + @override + @JsonKey(includeIfNull: false) + final ToolFunction? function; + + @override + String toString() { + return 'Tool(type: $type, function: $function)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.function, function) || + other.function == function)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, function); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolImplToJson( + this, + ); + } +} + +abstract class _Tool extends Tool { + const factory _Tool( + {final ToolType type, + @JsonKey(includeIfNull: false) final ToolFunction? function}) = + _$ToolImpl; + const _Tool._() : super._(); + + factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; + + @override + + /// The type of tool. + ToolType get type; + @override + + /// A function that the model may call. + @JsonKey(includeIfNull: false) + ToolFunction? get function; + @override + @JsonKey(ignore: true) + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolFunction _$ToolFunctionFromJson(Map json) { + return _ToolFunction.fromJson(json); +} + +/// @nodoc +mixin _$ToolFunction { + /// The name of the function to be called. + String get name => throw _privateConstructorUsedError; + + /// A description of what the function does, used by the model to choose when and how to call the function. + String get description => throw _privateConstructorUsedError; + + /// The parameters the functions accepts, described as a JSON Schema object. + Map get parameters => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolFunctionCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolFunctionCopyWith<$Res> { + factory $ToolFunctionCopyWith( + ToolFunction value, $Res Function(ToolFunction) then) = + _$ToolFunctionCopyWithImpl<$Res, ToolFunction>; + @useResult + $Res call({String name, String description, Map parameters}); +} + +/// @nodoc +class _$ToolFunctionCopyWithImpl<$Res, $Val extends ToolFunction> + implements $ToolFunctionCopyWith<$Res> { + _$ToolFunctionCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = null, + Object? parameters = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: null == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String, + parameters: null == parameters + ? _value.parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolFunctionImplCopyWith<$Res> + implements $ToolFunctionCopyWith<$Res> { + factory _$$ToolFunctionImplCopyWith( + _$ToolFunctionImpl value, $Res Function(_$ToolFunctionImpl) then) = + __$$ToolFunctionImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String name, String description, Map parameters}); +} + +/// @nodoc +class __$$ToolFunctionImplCopyWithImpl<$Res> + extends _$ToolFunctionCopyWithImpl<$Res, _$ToolFunctionImpl> + implements _$$ToolFunctionImplCopyWith<$Res> { + __$$ToolFunctionImplCopyWithImpl( + _$ToolFunctionImpl _value, $Res Function(_$ToolFunctionImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = null, + Object? parameters = null, + }) { + return _then(_$ToolFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: null == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String, + parameters: null == parameters + ? _value._parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolFunctionImpl extends _ToolFunction { + const _$ToolFunctionImpl( + {required this.name, + required this.description, + required final Map parameters}) + : _parameters = parameters, + super._(); + + factory _$ToolFunctionImpl.fromJson(Map json) => + _$$ToolFunctionImplFromJson(json); + + /// The name of the function to be called. + @override + final String name; + + /// A description of what the function does, used by the model to choose when and how to call the function. + @override + final String description; + + /// The parameters the functions accepts, described as a JSON Schema object. + final Map _parameters; + + /// The parameters the functions accepts, described as a JSON Schema object. + @override + Map get parameters { + if (_parameters is EqualUnmodifiableMapView) return _parameters; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_parameters); + } + + @override + String toString() { + return 'ToolFunction(name: $name, description: $description, parameters: $parameters)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolFunctionImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality() + .equals(other._parameters, _parameters)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_parameters)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => + __$$ToolFunctionImplCopyWithImpl<_$ToolFunctionImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolFunctionImplToJson( + this, + ); + } +} + +abstract class _ToolFunction extends ToolFunction { + const factory _ToolFunction( + {required final String name, + required final String description, + required final Map parameters}) = _$ToolFunctionImpl; + const _ToolFunction._() : super._(); + + factory _ToolFunction.fromJson(Map json) = + _$ToolFunctionImpl.fromJson; + + @override + + /// The name of the function to be called. + String get name; + @override + + /// A description of what the function does, used by the model to choose when and how to call the function. + String get description; + @override + + /// The parameters the functions accepts, described as a JSON Schema object. + Map get parameters; + @override + @JsonKey(ignore: true) + _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolCall _$ToolCallFromJson(Map json) { + return _ToolCall.fromJson(json); +} + +/// @nodoc +mixin _$ToolCall { + /// The function the model wants to call. + @JsonKey(includeIfNull: false) + ToolCallFunction? get function => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCallCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCallCopyWith<$Res> { + factory $ToolCallCopyWith(ToolCall value, $Res Function(ToolCall) then) = + _$ToolCallCopyWithImpl<$Res, ToolCall>; + @useResult + $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); + + $ToolCallFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class _$ToolCallCopyWithImpl<$Res, $Val extends ToolCall> + implements $ToolCallCopyWith<$Res> { + _$ToolCallCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? function = freezed, + }) { + return _then(_value.copyWith( + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolCallFunction?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ToolCallFunctionCopyWith<$Res>? get function { + if (_value.function == null) { + return null; + } + + return $ToolCallFunctionCopyWith<$Res>(_value.function!, (value) { + return _then(_value.copyWith(function: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ToolCallImplCopyWith<$Res> + implements $ToolCallCopyWith<$Res> { + factory _$$ToolCallImplCopyWith( + _$ToolCallImpl value, $Res Function(_$ToolCallImpl) then) = + __$$ToolCallImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); + + @override + $ToolCallFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class __$$ToolCallImplCopyWithImpl<$Res> + extends _$ToolCallCopyWithImpl<$Res, _$ToolCallImpl> + implements _$$ToolCallImplCopyWith<$Res> { + __$$ToolCallImplCopyWithImpl( + _$ToolCallImpl _value, $Res Function(_$ToolCallImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? function = freezed, + }) { + return _then(_$ToolCallImpl( + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolCallFunction?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolCallImpl extends _ToolCall { + const _$ToolCallImpl({@JsonKey(includeIfNull: false) this.function}) + : super._(); + + factory _$ToolCallImpl.fromJson(Map json) => + _$$ToolCallImplFromJson(json); + + /// The function the model wants to call. + @override + @JsonKey(includeIfNull: false) + final ToolCallFunction? function; + + @override + String toString() { + return 'ToolCall(function: $function)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolCallImpl && + (identical(other.function, function) || + other.function == function)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, function); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => + __$$ToolCallImplCopyWithImpl<_$ToolCallImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolCallImplToJson( + this, + ); + } +} + +abstract class _ToolCall extends ToolCall { + const factory _ToolCall( + {@JsonKey(includeIfNull: false) final ToolCallFunction? function}) = + _$ToolCallImpl; + const _ToolCall._() : super._(); + + factory _ToolCall.fromJson(Map json) = + _$ToolCallImpl.fromJson; + + @override + + /// The function the model wants to call. + @JsonKey(includeIfNull: false) + ToolCallFunction? get function; + @override + @JsonKey(ignore: true) + _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolCallFunction _$ToolCallFunctionFromJson(Map json) { + return _ToolCallFunction.fromJson(json); +} + +/// @nodoc +mixin _$ToolCallFunction { + /// The name of the function to be called. + String get name => throw _privateConstructorUsedError; + + /// The arguments to pass to the function. + Map get arguments => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCallFunctionCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCallFunctionCopyWith<$Res> { + factory $ToolCallFunctionCopyWith( + ToolCallFunction value, $Res Function(ToolCallFunction) then) = + _$ToolCallFunctionCopyWithImpl<$Res, ToolCallFunction>; + @useResult + $Res call({String name, Map arguments}); +} + +/// @nodoc +class _$ToolCallFunctionCopyWithImpl<$Res, $Val extends ToolCallFunction> + implements $ToolCallFunctionCopyWith<$Res> { + _$ToolCallFunctionCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? arguments = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolCallFunctionImplCopyWith<$Res> + implements $ToolCallFunctionCopyWith<$Res> { + factory _$$ToolCallFunctionImplCopyWith(_$ToolCallFunctionImpl value, + $Res Function(_$ToolCallFunctionImpl) then) = + __$$ToolCallFunctionImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String name, Map arguments}); +} + +/// @nodoc +class __$$ToolCallFunctionImplCopyWithImpl<$Res> + extends _$ToolCallFunctionCopyWithImpl<$Res, _$ToolCallFunctionImpl> + implements _$$ToolCallFunctionImplCopyWith<$Res> { + __$$ToolCallFunctionImplCopyWithImpl(_$ToolCallFunctionImpl _value, + $Res Function(_$ToolCallFunctionImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? arguments = null, + }) { + return _then(_$ToolCallFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value._arguments + : arguments // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolCallFunctionImpl extends _ToolCallFunction { + const _$ToolCallFunctionImpl( + {required this.name, required final Map arguments}) + : _arguments = arguments, + super._(); + + factory _$ToolCallFunctionImpl.fromJson(Map json) => + _$$ToolCallFunctionImplFromJson(json); + + /// The name of the function to be called. + @override + final String name; + + /// The arguments to pass to the function. + final Map _arguments; + + /// The arguments to pass to the function. + @override + Map get arguments { + if (_arguments is EqualUnmodifiableMapView) return _arguments; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_arguments); + } + + @override + String toString() { + return 'ToolCallFunction(name: $name, arguments: $arguments)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolCallFunctionImpl && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality() + .equals(other._arguments, _arguments)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, name, const DeepCollectionEquality().hash(_arguments)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => + __$$ToolCallFunctionImplCopyWithImpl<_$ToolCallFunctionImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ToolCallFunctionImplToJson( + this, + ); + } +} + +abstract class _ToolCallFunction extends ToolCallFunction { + const factory _ToolCallFunction( + {required final String name, + required final Map arguments}) = _$ToolCallFunctionImpl; + const _ToolCallFunction._() : super._(); + + factory _ToolCallFunction.fromJson(Map json) = + _$ToolCallFunctionImpl.fromJson; + + @override + + /// The name of the function to be called. + String get name; + @override + + /// The arguments to pass to the function. + Map get arguments; + @override + @JsonKey(ignore: true) + _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => + throw _privateConstructorUsedError; +} + GenerateEmbeddingRequest _$GenerateEmbeddingRequestFromJson( Map json) { return _GenerateEmbeddingRequest.fromJson(json); diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index e5d46d53..fbf45bc0 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -211,6 +211,9 @@ _$GenerateChatCompletionRequestImpl json['options'] as Map), stream: json['stream'] as bool? ?? false, keepAlive: json['keep_alive'] as int?, + tools: (json['tools'] as List?) + ?.map((e) => Tool.fromJson(e as Map)) + .toList(), ); Map _$$GenerateChatCompletionRequestImplToJson( @@ -230,18 +233,17 @@ Map _$$GenerateChatCompletionRequestImplToJson( writeNotNull('options', instance.options?.toJson()); val['stream'] = instance.stream; writeNotNull('keep_alive', instance.keepAlive); + writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); return val; } _$GenerateChatCompletionResponseImpl _$$GenerateChatCompletionResponseImplFromJson(Map json) => _$GenerateChatCompletionResponseImpl( - message: json['message'] == null - ? null - : Message.fromJson(json['message'] as Map), - model: json['model'] as String?, - createdAt: json['created_at'] as String?, - done: json['done'] as bool?, + message: Message.fromJson(json['message'] as Map), + model: json['model'] as String, + createdAt: json['created_at'] as String, + done: json['done'] as bool, doneReason: $enumDecodeNullable( _$DoneReasonEnumMap, json['done_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -255,7 +257,12 @@ _$GenerateChatCompletionResponseImpl Map _$$GenerateChatCompletionResponseImplToJson( _$GenerateChatCompletionResponseImpl instance) { - final val = {}; + final val = { + 'message': instance.message.toJson(), + 'model': instance.model, + 'created_at': instance.createdAt, + 'done': instance.done, + }; void writeNotNull(String key, dynamic value) { if (value != null) { @@ -263,10 +270,6 @@ Map _$$GenerateChatCompletionResponseImplToJson( } } - writeNotNull('message', instance.message?.toJson()); - writeNotNull('model', instance.model); - writeNotNull('created_at', instance.createdAt); - writeNotNull('done', instance.done); writeNotNull('done_reason', _$DoneReasonEnumMap[instance.doneReason]); writeNotNull('total_duration', instance.totalDuration); writeNotNull('load_duration', instance.loadDuration); @@ -289,6 +292,9 @@ _$MessageImpl _$$MessageImplFromJson(Map json) => content: json['content'] as String, images: (json['images'] as List?)?.map((e) => e as String).toList(), + toolCalls: (json['tool_calls'] as List?) + ?.map((e) => ToolCall.fromJson(e as Map)) + .toList(), ); Map _$$MessageImplToJson(_$MessageImpl instance) { @@ -304,6 +310,8 @@ Map _$$MessageImplToJson(_$MessageImpl instance) { } writeNotNull('images', instance.images); + writeNotNull( + 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); return val; } @@ -311,8 +319,84 @@ const _$MessageRoleEnumMap = { MessageRole.system: 'system', MessageRole.user: 'user', MessageRole.assistant: 'assistant', + MessageRole.tool: 'tool', +}; + +_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( + type: $enumDecodeNullable(_$ToolTypeEnumMap, json['type']) ?? + ToolType.function, + function: json['function'] == null + ? null + : ToolFunction.fromJson(json['function'] as Map), + ); + +Map _$$ToolImplToJson(_$ToolImpl instance) { + final val = { + 'type': _$ToolTypeEnumMap[instance.type]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('function', instance.function?.toJson()); + return val; +} + +const _$ToolTypeEnumMap = { + ToolType.function: 'function', }; +_$ToolFunctionImpl _$$ToolFunctionImplFromJson(Map json) => + _$ToolFunctionImpl( + name: json['name'] as String, + description: json['description'] as String, + parameters: json['parameters'] as Map, + ); + +Map _$$ToolFunctionImplToJson(_$ToolFunctionImpl instance) => + { + 'name': instance.name, + 'description': instance.description, + 'parameters': instance.parameters, + }; + +_$ToolCallImpl _$$ToolCallImplFromJson(Map json) => + _$ToolCallImpl( + function: json['function'] == null + ? null + : ToolCallFunction.fromJson(json['function'] as Map), + ); + +Map _$$ToolCallImplToJson(_$ToolCallImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('function', instance.function?.toJson()); + return val; +} + +_$ToolCallFunctionImpl _$$ToolCallFunctionImplFromJson( + Map json) => + _$ToolCallFunctionImpl( + name: json['name'] as String, + arguments: json['arguments'] as Map, + ); + +Map _$$ToolCallFunctionImplToJson( + _$ToolCallFunctionImpl instance) => + { + 'name': instance.name, + 'arguments': instance.arguments, + }; + _$GenerateEmbeddingRequestImpl _$$GenerateEmbeddingRequestImplFromJson( Map json) => _$GenerateEmbeddingRequestImpl( diff --git a/packages/ollama_dart/lib/src/generated/schema/tool.dart b/packages/ollama_dart/lib/src/generated/schema/tool.dart new file mode 100644 index 00000000..4a225d1a --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool.dart @@ -0,0 +1,53 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: Tool +// ========================================== + +/// A tool the model may call. +@freezed +class Tool with _$Tool { + const Tool._(); + + /// Factory constructor for Tool + const factory Tool({ + /// The type of tool. + @Default(ToolType.function) ToolType type, + + /// A function that the model may call. + @JsonKey(includeIfNull: false) ToolFunction? function, + }) = _Tool; + + /// Object construction from a JSON representation + factory Tool.fromJson(Map json) => _$ToolFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'function']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'function': function, + }; + } +} + +// ========================================== +// ENUM: ToolType +// ========================================== + +/// The type of tool. +enum ToolType { + @JsonValue('function') + function, +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart new file mode 100644 index 00000000..ec1d82e0 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolCall +// ========================================== + +/// The tool the model wants to call. +@freezed +class ToolCall with _$ToolCall { + const ToolCall._(); + + /// Factory constructor for ToolCall + const factory ToolCall({ + /// The function the model wants to call. + @JsonKey(includeIfNull: false) ToolCallFunction? function, + }) = _ToolCall; + + /// Object construction from a JSON representation + factory ToolCall.fromJson(Map json) => + _$ToolCallFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['function']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'function': function, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart new file mode 100644 index 00000000..4d5e969c --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolCallFunction +// ========================================== + +/// The function the model wants to call. +@freezed +class ToolCallFunction with _$ToolCallFunction { + const ToolCallFunction._(); + + /// Factory constructor for ToolCallFunction + const factory ToolCallFunction({ + /// The name of the function to be called. + required String name, + + /// The arguments to pass to the function. + required ToolCallFunctionArgs arguments, + }) = _ToolCallFunction; + + /// Object construction from a JSON representation + factory ToolCallFunction.fromJson(Map json) => + _$ToolCallFunctionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['name', 'arguments']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'arguments': arguments, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart new file mode 100644 index 00000000..a1d7d7b8 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart @@ -0,0 +1,12 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// TYPE: ToolCallFunctionArgs +// ========================================== + +/// The arguments to pass to the function. +typedef ToolCallFunctionArgs = Map; diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart new file mode 100644 index 00000000..35d5e8f1 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart @@ -0,0 +1,52 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolFunction +// ========================================== + +/// A function that the model may call. +@freezed +class ToolFunction with _$ToolFunction { + const ToolFunction._(); + + /// Factory constructor for ToolFunction + const factory ToolFunction({ + /// The name of the function to be called. + required String name, + + /// A description of what the function does, used by the model to choose when and how to call the function. + required String description, + + /// The parameters the functions accepts, described as a JSON Schema object. + required ToolFunctionParams parameters, + }) = _ToolFunction; + + /// Object construction from a JSON representation + factory ToolFunction.fromJson(Map json) => + _$ToolFunctionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'parameters' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'parameters': parameters, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart new file mode 100644 index 00000000..89fa74fb --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart @@ -0,0 +1,12 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// TYPE: ToolFunctionParams +// ========================================== + +/// The parameters the functions accepts, described as a JSON Schema object. +typedef ToolFunctionParams = Map; diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 540b7141..a0a42067 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -602,6 +602,11 @@ components: description: *stream default: false keep_alive: *keep_alive + tools: + type: array + description: A list of tools the model may call. + items: + $ref: '#/components/schemas/Tool' required: - model - messages @@ -654,6 +659,11 @@ components: format: int64 description: Time in nanoseconds spent generating the response. example: 1325948000 + required: + - model + - created_at + - message + - done DoneReason: type: string description: Reason why the model is done generating a response. @@ -668,7 +678,7 @@ components: role: type: string description: The role of the message - enum: [ "system", "user", "assistant" ] + enum: [ "system", "user", "assistant", "tool" ] content: type: string description: The content of the message @@ -680,9 +690,69 @@ components: type: string description: Base64-encoded image (for multimodal models such as llava) example: iVBORw0KGgoAAAANSUhEUgAAAAkAAAANCAIAAAD0YtNRAAAABnRSTlMA/AD+APzoM1ogAAAAWklEQVR4AWP48+8PLkR7uUdzcMvtU8EhdykHKAciEXL3pvw5FQIURaBDJkARoDhY3zEXiCgCHbNBmAlUiyaBkENoxZSDWnOtBmoAQu7TnT+3WuDOA7KBIkAGAGwiNeqjusp/AAAAAElFTkSuQmCC + tool_calls: + type: array + description: A list of tools the model wants to call. + items: + $ref: '#/components/schemas/ToolCall' required: - role - content + Tool: + type: object + description: A tool the model may call. + properties: + type: + type: string + enum: + - function + default: function + description: The type of tool. + function: + $ref: '#/components/schemas/ToolFunction' + ToolFunction: + type: object + description: A function that the model may call. + properties: + name: + type: string + description: The name of the function to be called. + description: + type: string + description: | + A description of what the function does, used by the model to choose when and how to call the function. + parameters: + $ref: '#/components/schemas/ToolFunctionParams' + required: + - name + - description + - parameters + ToolFunctionParams: + type: object + description: The parameters the functions accepts, described as a JSON Schema object. + additionalProperties: true + ToolCall: + type: object + description: The tool the model wants to call. + properties: + function: + $ref: '#/components/schemas/ToolCallFunction' + ToolCallFunction: + type: object + description: The function the model wants to call. + properties: + name: + type: string + description: The name of the function to be called. + arguments: + $ref: '#/components/schemas/ToolCallFunctionArgs' + required: + - name + - arguments + ToolCallFunctionArgs: + type: object + description: The arguments to pass to the function. + additionalProperties: true GenerateEmbeddingRequest: description: Generate embeddings from a model. type: object diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 5da5caa2..c967f29e 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: ollama_dart -description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). +description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). version: 0.1.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index 807e1b67..3ed66209 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,7 +7,7 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'gemma2'; + const defaultModel = 'llama3.1'; const visionModel = 'llava'; setUp(() async { @@ -48,7 +48,7 @@ void main() { expect(response.model, defaultModel); expect(response.createdAt, isNotNull); expect( - response.message?.content, + response.message.content, isNotEmpty, ); expect(response.done, isTrue); @@ -79,7 +79,7 @@ void main() { ); String text = ''; await for (final res in stream) { - text += (res.message?.content ?? '').trim(); + text += res.message.content.trim(); } expect(text, contains('123456789')); }); @@ -103,7 +103,7 @@ void main() { format: ResponseFormat.json, ), ); - final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('[1,2,3,4,5,6,7,8,9]')); }); @@ -125,7 +125,7 @@ void main() { options: RequestOptions(stop: ['4']), ), ); - final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('123')); expect(generation, isNot(contains('456789'))); expect(res.doneReason, DoneReason.stop); @@ -170,8 +170,65 @@ void main() { ); final res1 = await client.generateChatCompletion(request: request); - final text1 = res1.message?.content; + final text1 = res1.message.content; expect(text1, contains('star')); }); + + test('Test tool calling', () async { + const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), + ); + + final res = await client.generateChatCompletion( + request: const GenerateChatCompletionRequest( + model: defaultModel, + messages: [ + Message( + role: MessageRole.system, + content: 'You are a helpful assistant.', + ), + Message( + role: MessageRole.user, + content: + 'What’s the weather like in Boston and Barcelona in celsius?', + ), + ], + tools: [tool], + keepAlive: 1, + ), + ); + // https://github.com/ollama/ollama/issues/5796 + expect(res.doneReason, DoneReason.stop); + expect(res.message.role, MessageRole.assistant); + expect(res.message.content, isEmpty); + final toolCalls = res.message.toolCalls; + expect(toolCalls, hasLength(2)); + final toolCall1 = toolCalls?.first.function; + expect(toolCall1?.name, tool.function?.name); + expect(toolCall1?.arguments['location'], contains('Boston')); + expect(toolCall1?.arguments['unit'], 'celsius'); + final toolCall2 = toolCalls?.last.function; + expect(toolCall2?.name, tool.function?.name); + expect(toolCall2?.arguments['location'], contains('Barcelona')); + expect(toolCall2?.arguments['unit'], 'celsius'); + }); }); } From 117d4ab1f189f9bc04eda2e9361f86ac618b85a0 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 24 Jul 2024 23:54:12 +0200 Subject: [PATCH 198/251] feat: Add tool calling support in ChatOllama (#505) --- .../models/chat_models/how_to/tools.md | 3 +- .../chat_models/integrations/anthropic.md | 4 +- .../models/chat_models/integrations/ollama.md | 51 +++++ .../chat_models/integrations/ollama.dart | 43 +++++ packages/langchain/README.md | 4 +- .../chat_models/chat_ollama/chat_ollama.dart | 61 +----- .../src/chat_models/chat_ollama/mappers.dart | 178 +++++++++++++++--- .../src/chat_models/chat_ollama/types.dart | 2 + packages/langchain_ollama/pubspec.yaml | 2 +- .../test/chat_models/chat_ollama_test.dart | 87 ++++++++- 10 files changed, 347 insertions(+), 88 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/how_to/tools.md b/docs/modules/model_io/models/chat_models/how_to/tools.md index 0303be9c..11bf5f3e 100644 --- a/docs/modules/model_io/models/chat_models/how_to/tools.md +++ b/docs/modules/model_io/models/chat_models/how_to/tools.md @@ -4,9 +4,10 @@ > Tool calling is currently supported by: > - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md) -> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) > - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) > - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md) +> - [`ChatOllama`](/modules/model_io/models/chat_models/integrations/ollama.md) +> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. While the name implies that the model is performing some action, this is actually not the case! The model is coming up with the arguments to a tool, and actually running the tool (or not) is up to the user - for example, if you want to extract output matching some schema from unstructured text, you could give the model an “extraction” tool that takes parameters matching the desired schema, then treat the generated output as your final result. diff --git a/docs/modules/model_io/models/chat_models/integrations/anthropic.md b/docs/modules/model_io/models/chat_models/integrations/anthropic.md index b3e99c84..b607ddc7 100644 --- a/docs/modules/model_io/models/chat_models/integrations/anthropic.md +++ b/docs/modules/model_io/models/chat_models/integrations/anthropic.md @@ -112,7 +112,7 @@ await stream.forEach(print); `ChatAnthropic` supports tool calling. -Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. Example: ```dart @@ -124,7 +124,7 @@ const tool = ToolSpec( 'properties': { 'location': { 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', + 'description': 'The city and country, e.g. San Francisco, US', }, }, 'required': ['location'], diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 37110289..9c9339e8 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -125,6 +125,57 @@ print(res.output.content); // -> 'An Apple' ``` +## Tool calling + +`ChatOllama` supports tool calling. + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +**Notes:** +- Tool calling requires Ollama 0.2.8 or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + ## RAG (Retrieval-Augmented Generation) pipeline We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 4e5cf3b5..5fa3bcd0 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -10,6 +10,7 @@ void main(final List arguments) async { await _chatOllamaStreaming(); await _chatOllamaJsonMode(); await _chatOllamaMultimodal(); + await _chatOllamaToolCalling(); await _rag(); } @@ -94,6 +95,48 @@ Future _chatOllamaJsonMode() async { // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} } +Future _chatOllamaToolCalling() async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, + ); + + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), + ); + + final res = await chatModel.invoke( + PromptValue.string( + 'What’s the weather like in Boston and Madrid right now in celsius?', + ), + ); + print(res.output.toolCalls); + // [AIChatMessageToolCall{ + // id: a621064b-03b3-4ca6-8278-f37504901034, + // name: get_current_weather, + // arguments: {location: Boston, US}, + // }, + // AIChatMessageToolCall{ + // id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, + // name: get_current_weather, + // arguments: {location: Madrid, ES}, + // }] +} + Future _chatOllamaMultimodal() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 51793fa8..fc16ffa9 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -71,7 +71,7 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | | [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.1, Gemma 2, Phi-3, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | | [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | @@ -109,7 +109,7 @@ The following integrations are available in LangChain.dart: | [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | | [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | | [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | -| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | | [Ollama Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | | [ChatOllamaTools](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama_tools) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) with tool-calling capabilities | | [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | | [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 2ff391ef..7317dd69 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -5,7 +5,6 @@ import 'package:langchain_tiktoken/langchain_tiktoken.dart'; import 'package:ollama_dart/ollama_dart.dart'; import 'package:uuid/uuid.dart'; -import '../../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; @@ -13,7 +12,7 @@ import 'types.dart'; /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, -/// such as Llama 3 or LLaVA, locally. +/// such as Llama 3.1, Gemma 2 or LLaVA, locally. /// /// For a complete list of supported models and model variants, see the /// [Ollama model library](https://ollama.ai/library). @@ -37,7 +36,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3` +/// * e.g., for Llama 3: `ollama pull llama3.1` /// /// ### Ollama base URL /// @@ -188,9 +187,10 @@ class ChatOllama extends BaseChatModel { }) async { final id = _uuid.v4(); final completion = await _client.generateChatCompletion( - request: _generateCompletionRequest( + request: generateChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, ), ); return completion.toChatResult(id); @@ -204,9 +204,11 @@ class ChatOllama extends BaseChatModel { final id = _uuid.v4(); return _client .generateChatCompletionStream( - request: _generateCompletionRequest( + request: generateChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, + stream: true, ), ) .map( @@ -214,55 +216,6 @@ class ChatOllama extends BaseChatModel { ); } - /// Creates a [GenerateChatCompletionRequest] from the given input. - GenerateChatCompletionRequest _generateCompletionRequest( - final List messages, { - final bool stream = false, - final ChatOllamaOptions? options, - }) { - return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? defaultModel, - messages: messages.toMessages(), - format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), - keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, - stream: stream, - options: RequestOptions( - numKeep: options?.numKeep ?? defaultOptions.numKeep, - seed: options?.seed ?? defaultOptions.seed, - numPredict: options?.numPredict ?? defaultOptions.numPredict, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, - typicalP: options?.typicalP ?? defaultOptions.typicalP, - repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, - temperature: options?.temperature ?? defaultOptions.temperature, - repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - mirostat: options?.mirostat ?? defaultOptions.mirostat, - mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, - mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, - penalizeNewline: - options?.penalizeNewline ?? defaultOptions.penalizeNewline, - stop: options?.stop ?? defaultOptions.stop, - numa: options?.numa ?? defaultOptions.numa, - numCtx: options?.numCtx ?? defaultOptions.numCtx, - numBatch: options?.numBatch ?? defaultOptions.numBatch, - numGpu: options?.numGpu ?? defaultOptions.numGpu, - mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, - lowVram: options?.lowVram ?? defaultOptions.lowVram, - f16Kv: options?.f16KV ?? defaultOptions.f16KV, - logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, - vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, - useMmap: options?.useMmap ?? defaultOptions.useMmap, - useMlock: options?.useMlock ?? defaultOptions.useMlock, - numThread: options?.numThread ?? defaultOptions.numThread, - ), - ); - } - /// Tokenizes the given prompt using tiktoken. /// /// Currently Ollama does not provide a tokenizer for the models it supports. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart index d8b31e61..0c543a9c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -1,47 +1,138 @@ // ignore_for_file: public_member_api_docs +import 'dart:convert'; + import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; -import 'package:ollama_dart/ollama_dart.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:ollama_dart/ollama_dart.dart' as o; +import 'package:uuid/uuid.dart'; + +import '../../llms/mappers.dart' show OllamaResponseFormatMapper; +import 'chat_ollama.dart'; +import 'types.dart'; + +/// Creates a [GenerateChatCompletionRequest] from the given input. +o.GenerateChatCompletionRequest generateChatCompletionRequest( + final List messages, { + required final ChatOllamaOptions? options, + required final ChatOllamaOptions defaultOptions, + final bool stream = false, +}) { + return o.GenerateChatCompletionRequest( + model: options?.model ?? defaultOptions.model ?? ChatOllama.defaultModel, + messages: messages.toMessages(), + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, + tools: _mapTools( + tools: options?.tools ?? defaultOptions.tools, + toolChoice: options?.toolChoice ?? defaultOptions.toolChoice, + ), + stream: stream, + options: o.RequestOptions( + numKeep: options?.numKeep ?? defaultOptions.numKeep, + seed: options?.seed ?? defaultOptions.seed, + numPredict: options?.numPredict ?? defaultOptions.numPredict, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, + typicalP: options?.typicalP ?? defaultOptions.typicalP, + repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, + temperature: options?.temperature ?? defaultOptions.temperature, + repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + mirostat: options?.mirostat ?? defaultOptions.mirostat, + mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, + mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, + penalizeNewline: + options?.penalizeNewline ?? defaultOptions.penalizeNewline, + stop: options?.stop ?? defaultOptions.stop, + numa: options?.numa ?? defaultOptions.numa, + numCtx: options?.numCtx ?? defaultOptions.numCtx, + numBatch: options?.numBatch ?? defaultOptions.numBatch, + numGpu: options?.numGpu ?? defaultOptions.numGpu, + mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, + lowVram: options?.lowVram ?? defaultOptions.lowVram, + f16Kv: options?.f16KV ?? defaultOptions.f16KV, + logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, + vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, + useMmap: options?.useMmap ?? defaultOptions.useMmap, + useMlock: options?.useMlock ?? defaultOptions.useMlock, + numThread: options?.numThread ?? defaultOptions.numThread, + ), + ); +} + +List? _mapTools({ + final List? tools, + final ChatToolChoice? toolChoice, +}) { + if (tools == null || tools.isEmpty) { + return null; + } + + return switch (toolChoice) { + ChatToolChoiceNone() => null, + ChatToolChoiceAuto() || + ChatToolChoiceRequired() || + null => + tools.map(_mapTool).toList(growable: false), + final ChatToolChoiceForced f => [ + _mapTool(tools.firstWhere((t) => t.name == f.name)), + ] + }; +} + +o.Tool _mapTool(final ToolSpec tool) { + return o.Tool( + function: o.ToolFunction( + name: tool.name, + description: tool.description, + parameters: tool.inputJsonSchema, + ), + ); +} extension OllamaChatMessagesMapper on List { - List toMessages() { + List toMessages() { return map(_mapMessage).expand((final msg) => msg).toList(growable: false); } - List _mapMessage(final ChatMessage msg) { + List _mapMessage(final ChatMessage msg) { return switch (msg) { final SystemChatMessage msg => [ - Message( - role: MessageRole.system, + o.Message( + role: o.MessageRole.system, content: msg.content, ), ], final HumanChatMessage msg => _mapHumanMessage(msg), - final AIChatMessage msg => [ - Message( - role: MessageRole.assistant, + final AIChatMessage msg => _mapAIMessage(msg), + final ToolChatMessage msg => [ + o.Message( + role: o.MessageRole.tool, content: msg.content, ), ], - ToolChatMessage() => - throw UnsupportedError('Ollama does not support tool calls'), CustomChatMessage() => throw UnsupportedError('Ollama does not support custom messages'), }; } - List _mapHumanMessage(final HumanChatMessage message) { + List _mapHumanMessage(final HumanChatMessage message) { return switch (message.content) { final ChatMessageContentText c => [ - Message( - role: MessageRole.user, + o.Message( + role: o.MessageRole.user, content: c.text, ), ], final ChatMessageContentImage c => [ - Message( - role: MessageRole.user, + o.Message( + role: o.MessageRole.user, content: c.data, ), ], @@ -49,7 +140,7 @@ extension OllamaChatMessagesMapper on List { }; } - List _mapContentMultiModal( + List _mapContentMultiModal( final ChatMessageContentMultiModal content, ) { final parts = content.parts.groupListsBy((final p) => p.runtimeType); @@ -63,8 +154,8 @@ extension OllamaChatMessagesMapper on List { // If there's only one text part and the rest are images, then we combine them in one message if ((parts[ChatMessageContentText]?.length ?? 0) == 1) { return [ - Message( - role: MessageRole.user, + o.Message( + role: o.MessageRole.user, content: (parts[ChatMessageContentText]!.first as ChatMessageContentText) .text, @@ -79,12 +170,12 @@ extension OllamaChatMessagesMapper on List { return content.parts .map( (final p) => switch (p) { - final ChatMessageContentText c => Message( - role: MessageRole.user, + final ChatMessageContentText c => o.Message( + role: o.MessageRole.user, content: c.text, ), - final ChatMessageContentImage c => Message( - role: MessageRole.user, + final ChatMessageContentImage c => o.Message( + role: o.MessageRole.user, content: c.data, ), ChatMessageContentMultiModal() => throw UnsupportedError( @@ -94,14 +185,38 @@ extension OllamaChatMessagesMapper on List { ) .toList(growable: false); } + + List _mapAIMessage(final AIChatMessage message) { + return [ + o.Message( + role: o.MessageRole.assistant, + content: message.content, + toolCalls: message.toolCalls.isNotEmpty + ? message.toolCalls.map(_mapToolCall).toList(growable: false) + : null, + ), + ]; + } + + o.ToolCall _mapToolCall(final AIChatMessageToolCall toolCall) { + return o.ToolCall( + function: o.ToolCallFunction( + name: toolCall.name, + arguments: toolCall.arguments, + ), + ); + } } -extension ChatResultMapper on GenerateChatCompletionResponse { +extension ChatResultMapper on o.GenerateChatCompletionResponse { ChatResult toChatResult(final String id, {final bool streaming = false}) { return ChatResult( id: id, output: AIChatMessage( content: message.content, + toolCalls: + message.toolCalls?.map(_mapToolCall).toList(growable: false) ?? + const [], ), finishReason: _mapFinishReason(doneReason), metadata: { @@ -120,6 +235,15 @@ extension ChatResultMapper on GenerateChatCompletionResponse { ); } + AIChatMessageToolCall _mapToolCall(final o.ToolCall toolCall) { + return AIChatMessageToolCall( + id: const Uuid().v4(), + name: toolCall.function?.name ?? '', + argumentsRaw: json.encode(toolCall.function?.arguments ?? const {}), + arguments: toolCall.function?.arguments ?? const {}, + ); + } + LanguageModelUsage _mapUsage() { return LanguageModelUsage( promptTokens: promptEvalCount, @@ -131,12 +255,12 @@ extension ChatResultMapper on GenerateChatCompletionResponse { } FinishReason _mapFinishReason( - final DoneReason? reason, + final o.DoneReason? reason, ) => switch (reason) { - DoneReason.stop => FinishReason.stop, - DoneReason.length => FinishReason.length, - DoneReason.load => FinishReason.unspecified, + o.DoneReason.stop => FinishReason.stop, + o.DoneReason.length => FinishReason.length, + o.DoneReason.load => FinishReason.unspecified, null => FinishReason.unspecified, }; } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 137d0bdf..1b3b4d77 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -47,6 +47,8 @@ class ChatOllamaOptions extends ChatModelOptions { this.useMmap, this.useMlock, this.numThread, + super.tools, + super.toolChoice, super.concurrencyLimit, }); diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 0214a6c7..33a60f44 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_ollama -description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). +description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). version: 0.2.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 7e001289..66167f0f 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -6,13 +6,14 @@ import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_ollama/langchain_ollama.dart'; import 'package:test/test.dart'; void main() { group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () { late ChatOllama chatModel; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.1'; const visionModel = 'llava:latest'; setUp(() async { @@ -251,5 +252,89 @@ void main() { expect(res.output.content.toLowerCase(), contains('apple')); }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + final model = chatModel.bind( + const ChatOllamaOptions( + model: defaultModel, + tools: [tool], + ), + ); + + final humanMessage = ChatMessage.humanText( + "What's the weather like in Boston and Madrid right now in celsius?", + ); + final res1 = await model.invoke(PromptValue.chat([humanMessage])); + + final aiMessage1 = res1.output; + expect(aiMessage1.toolCalls, hasLength(2)); + + final toolCall1 = aiMessage1.toolCalls.first; + expect(toolCall1.name, tool.name); + expect(toolCall1.arguments.containsKey('location'), isTrue); + expect(toolCall1.arguments['location'], contains('Boston')); + expect(toolCall1.arguments['unit'], 'celsius'); + + final toolCall2 = aiMessage1.toolCalls.last; + expect(toolCall2.name, tool.name); + expect(toolCall2.arguments.containsKey('location'), isTrue); + expect(toolCall2.arguments['location'], contains('Madrid')); + expect(toolCall2.arguments['unit'], 'celsius'); + + final functionResult1 = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage1 = ChatMessage.tool( + toolCallId: toolCall1.id, + content: json.encode(functionResult1), + ); + + final functionResult2 = { + 'temperature': '25', + 'unit': 'celsius', + 'description': 'Cloudy', + }; + final functionMessage2 = ChatMessage.tool( + toolCallId: toolCall2.id, + content: json.encode(functionResult2), + ); + + final res2 = await model.invoke( + PromptValue.chat([ + humanMessage, + aiMessage1, + functionMessage1, + functionMessage2, + ]), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.content, contains('25')); + }); }); } From 9ed482ffb63fca36d9a133b7e22dd15186e181e3 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 25 Jul 2024 09:58:34 +0200 Subject: [PATCH 199/251] feat!: Update Ollama default model to llama-3.1 (#506) --- docs/expression_language/primitives/router.md | 6 +++--- .../models/chat_models/integrations/ollama.md | 12 +++++------ .../models/llms/integrations/ollama.md | 6 +++--- .../text_embedding/integrations/ollama.md | 2 +- .../vector_stores/integrations/objectbox.md | 2 +- .../expression_language/cookbook/routing.dart | 6 +++--- .../chat_models/integrations/ollama.dart | 10 +++++----- .../models/llms/integrations/ollama.dart | 4 ++-- .../vector_stores/integrations/objectbox.dart | 2 +- .../lib/home/bloc/providers.dart | 2 +- examples/wikivoyage_eu/README.md | 6 +++--- examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 2 +- examples/wikivoyage_eu/pubspec.yaml | 2 +- .../vector_stores/objectbox/objectbox.dart | 2 +- .../chat_models/chat_ollama/chat_ollama.dart | 8 ++++---- .../chat_ollama_tools/chat_ollama_tools.dart | 6 +++--- .../lib/src/embeddings/ollama_embeddings.dart | 6 +++--- .../langchain_ollama/lib/src/llms/ollama.dart | 10 +++++----- .../test/embeddings/ollama_test.dart | 2 +- .../test/llms/ollama_test.dart | 2 +- .../example/ollama_dart_example.dart | 6 +++--- packages/ollama_dart/oas/ollama-curated.yaml | 20 +++++++++---------- 22 files changed, 62 insertions(+), 62 deletions(-) diff --git a/docs/expression_language/primitives/router.md b/docs/expression_language/primitives/router.md index 15b6f8ad..effd5f66 100644 --- a/docs/expression_language/primitives/router.md +++ b/docs/expression_language/primitives/router.md @@ -12,7 +12,7 @@ First, let’s create a chain that will identify incoming questions as being abo ```dart final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3'), + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -131,7 +131,7 @@ Here is a question: {query} '''; -final embeddings = OllamaEmbeddings(model: 'llama3'); +final embeddings = OllamaEmbeddings(model: 'llama3.1'); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( promptTemplates.map((final pt) => Document(pageContent: pt)).toList(), @@ -146,7 +146,7 @@ final chain = Runnable.fromMap({'query': Runnable.passthrough()}) | return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ) | StringOutputParser(); diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 9c9339e8..1e440c83 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3` + * e.g., for Llama 3: `ollama pull llama3.1` ## Usage @@ -28,7 +28,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -53,7 +53,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -79,7 +79,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, format: OllamaResponseFormat.json, ), @@ -183,7 +183,7 @@ We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `Ch ```dart // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3'), + embeddings: OllamaEmbeddings(model: 'llama3.1'), ); await vectorStore.addDocuments( documents: [ @@ -200,7 +200,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3'), + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever( defaultOptions: VectorStoreRetrieverOptions( diff --git a/docs/modules/model_io/models/llms/integrations/ollama.md b/docs/modules/model_io/models/llms/integrations/ollama.md index 3a90917c..c139e7d9 100644 --- a/docs/modules/model_io/models/llms/integrations/ollama.md +++ b/docs/modules/model_io/models/llms/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3` + * e.g., for Llama 3: `ollama pull llama3.1` ## Usage @@ -26,7 +26,7 @@ final prompt = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); final chain = prompt | llm | StringOutputParser(); @@ -43,7 +43,7 @@ final promptTemplate = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); final chain = promptTemplate | llm | StringOutputParser(); diff --git a/docs/modules/retrieval/text_embedding/integrations/ollama.md b/docs/modules/retrieval/text_embedding/integrations/ollama.md index 395b1203..fc83bbb5 100644 --- a/docs/modules/retrieval/text_embedding/integrations/ollama.md +++ b/docs/modules/retrieval/text_embedding/integrations/ollama.md @@ -1,7 +1,7 @@ # OllamaEmbeddings ```dart -final embeddings = OllamaEmbeddings(model: 'llama3'); +final embeddings = OllamaEmbeddings(model: 'llama3.1'); const text = 'This is a test document.'; final res = await embeddings.embedQuery(text); final res = await embeddings.embedDocuments([text]); diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index 9c165306..0ac3dd9b 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -205,7 +205,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3:8b'), + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/routing.dart b/examples/docs_examples/bin/expression_language/cookbook/routing.dart index d177611d..79bbd348 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/routing.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/routing.dart @@ -9,7 +9,7 @@ void main(final List arguments) async { Future _runnableRouter() async { final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -114,7 +114,7 @@ Here is a question: '''; final embeddings = OllamaEmbeddings( - model: 'llama3', + model: 'llama3.1', ); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( @@ -132,7 +132,7 @@ Here is a question: return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ) | const StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 5fa3bcd0..5c47bb0e 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -25,7 +25,7 @@ Future _chatOllama() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -52,7 +52,7 @@ Future _chatOllamaStreaming() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -77,7 +77,7 @@ Future _chatOllamaJsonMode() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, format: OllamaResponseFormat.json, ), @@ -162,7 +162,7 @@ Future _chatOllamaMultimodal() async { Future _rag() async { // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3'), + embeddings: OllamaEmbeddings(model: 'llama3.1'), ); await vectorStore.addDocuments( documents: [ @@ -184,7 +184,7 @@ Future _rag() async { // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever( defaultOptions: const VectorStoreRetrieverOptions( diff --git a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart index 2095d341..eb019a6b 100644 --- a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart @@ -13,7 +13,7 @@ Future _ollama() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); @@ -29,7 +29,7 @@ Future _ollamaStreaming() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); const stringOutputParser = StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart index cd558d1b..6c66d5dc 100644 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -66,7 +66,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart index c92b87af..1445bec3 100644 --- a/examples/hello_world_flutter/lib/home/bloc/providers.dart +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -21,7 +21,7 @@ enum Provider { ), ollama( name: 'Ollama', - defaultModel: 'llama3', + defaultModel: 'llama3.1', defaultBaseUrl: 'http://localhost:11434/api', isRemote: false, ); diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md index 07bc5073..cc573899 100644 --- a/examples/wikivoyage_eu/README.md +++ b/examples/wikivoyage_eu/README.md @@ -17,11 +17,11 @@ This example demonstrates how to build a fully local Retrieval Augmented Generat - For this example we will be using the following models: * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) - * LLM: [`llama3:8b`](https://ollama.com/library/llama3) + * LLM: [`llama3.1`](https://ollama.com/library/llama3.1) - Open your terminal and run: ```bash ollama pull jina/jina-embeddings-v2-small-en -ollama run llama3:8b +ollama run llama3.1 ``` ### 3. Setup ObjectBox @@ -73,7 +73,7 @@ The chatbot script implements the RAG pipeline. It does the following: 2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. 3. Retrieves the 5 most similar documents from the ObjectBox database. 4. Builds a prompt using the retrieved documents and the query. -5. Uses the `llama3:8b` model to generate a response to the prompt. +5. Uses the `llama3.1` model to generate a response to the prompt. You can run the script using: ```bash diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart index b1f82689..8123c262 100644 --- a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -51,7 +51,7 @@ Do not provide any other suggestion if the question is not about Europe. final model = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); const outputParser = StringOutputParser(); diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index a591713f..70fc19fb 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -1,5 +1,5 @@ name: wikivoyage_eu -description: Wikivoyage EU chatbot using llama3 and ObjectBox. +description: Wikivoyage EU chatbot using llama3.1 and ObjectBox. version: 1.0.0 publish_to: none diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart index 0a3ac27b..94457e54 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. /// /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); /// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); /// ``` /// diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 7317dd69..8db88d0d 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -57,7 +57,7 @@ import 'types.dart'; /// ```dart /// final chatModel = ChatOllama( /// defaultOptions: const ChatOllamaOptions( -/// model: 'llama3', +/// model: 'llama3.1', /// temperature: 0, /// format: 'json', /// ), @@ -89,7 +89,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3')) | outputParser, +/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.1')) | outputParser, /// 'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -152,7 +152,7 @@ class ChatOllama extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOllamaOptions( - model: 'llama3', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -178,7 +178,7 @@ class ChatOllama extends BaseChatModel { String get modelType => 'chat-ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3'; + static const defaultModel = 'llama3.1'; @override Future invoke( diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart index 677fd308..82da6a95 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart @@ -41,7 +41,7 @@ import 'types.dart'; /// ); /// final chatModel = ChatOllamaTools( /// defaultOptions: ChatOllamaToolOptions( -/// options: ChatOllamaOptions(model: 'llama3:8b'), +/// options: ChatOllamaOptions(model: 'llama3.1'), /// tools: [tool], /// ), /// ); @@ -55,7 +55,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3` +/// * e.g., for Llama 3: `ollama pull llama3.1` /// /// ### Ollama base URL /// @@ -109,7 +109,7 @@ class ChatOllamaTools extends BaseChatModel { String get modelType => 'chat-ollama-tools'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3'; + static const defaultModel = 'llama3.1'; @override Future invoke( diff --git a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart index 66ac2edb..bd40cf60 100644 --- a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart +++ b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart @@ -13,7 +13,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// Example: /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); /// final res = await embeddings.embedQuery('Hello world'); /// ``` /// @@ -23,7 +23,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3` +/// * e.g., for `Llama-7b`: `ollama pull llama3.1` /// /// ### Advance /// @@ -76,7 +76,7 @@ class OllamaEmbeddings implements Embeddings { /// - `client`: the HTTP client to use. You can set your own HTTP client if /// you need further customization (e.g. to use a Socks5 proxy). OllamaEmbeddings({ - this.model = 'llama3', + this.model = 'llama3.1', this.keepAlive, final String baseUrl = 'http://localhost:11434/api', final Map? headers, diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index fd9a8ed4..b3601f6e 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOption: const OllamaOptions( -/// model: 'llama3', +/// model: 'llama3.1', /// temperature: 1, /// ), /// ); @@ -49,7 +49,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOptions: const OllamaOptions( -/// model: 'llama3', +/// model: 'llama3.1', /// temperature: 0, /// format: 'json', /// ), @@ -83,7 +83,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3')) | outputParser, +/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.1')) | outputParser, /// 'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -93,7 +93,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3` +/// * e.g., for `Llama-7b`: `ollama pull llama3.1` /// /// ### Advance /// @@ -178,7 +178,7 @@ class Ollama extends BaseLLM { String get modelType => 'ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3'; + static const defaultModel = 'llama3.1'; @override Future invoke( diff --git a/packages/langchain_ollama/test/embeddings/ollama_test.dart b/packages/langchain_ollama/test/embeddings/ollama_test.dart index 0f94ad0d..ac8f999e 100644 --- a/packages/langchain_ollama/test/embeddings/ollama_test.dart +++ b/packages/langchain_ollama/test/embeddings/ollama_test.dart @@ -8,7 +8,7 @@ void main() { group('OllamaEmbeddings tests', skip: Platform.environment.containsKey('CI'), () { late OllamaEmbeddings embeddings; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.1'; setUp(() async { embeddings = OllamaEmbeddings( diff --git a/packages/langchain_ollama/test/llms/ollama_test.dart b/packages/langchain_ollama/test/llms/ollama_test.dart index e9a6ac55..d21d0e56 100644 --- a/packages/langchain_ollama/test/llms/ollama_test.dart +++ b/packages/langchain_ollama/test/llms/ollama_test.dart @@ -10,7 +10,7 @@ import 'package:test/test.dart'; void main() { group('Ollama tests', skip: Platform.environment.containsKey('CI'), () { late Ollama llm; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.1'; setUp(() async { llm = Ollama( diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index e5d11e3c..b1e9361f 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -70,7 +70,7 @@ Future _generateCompletionStream(final OllamaClient client) async { Future _generateChatCompletion(final OllamaClient client) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.1', messages: [ Message( role: MessageRole.system, @@ -95,7 +95,7 @@ Future _generateChatCompletionWithHistory( ) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.1', messages: [ Message( role: MessageRole.user, @@ -118,7 +118,7 @@ Future _generateChatCompletionWithHistory( Future _generateChatCompletionStream(final OllamaClient client) async { final stream = client.generateChatCompletionStream( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.1', messages: [ Message( role: MessageRole.system, diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index a0a42067..b7c04cae 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -273,7 +273,7 @@ components: The model name. Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - example: llama3:8b + example: llama3.1 prompt: type: string description: The prompt to generate a response. @@ -530,7 +530,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 created_at: type: string format: date-time @@ -587,7 +587,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 messages: type: array description: The messages of the chat, this can be used to keep a chat memory @@ -619,7 +619,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 created_at: type: string format: date-time @@ -760,7 +760,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 prompt: type: string description: Text to generate embeddings for. @@ -837,7 +837,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 modified_at: type: string format: date-time @@ -914,7 +914,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 size: type: integer format: int64 @@ -942,7 +942,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 required: - model ModelInfo: @@ -990,7 +990,7 @@ components: source: type: string description: Name of the model to copy. - example: llama3:8b + example: llama3.1 destination: type: string description: Name of the new model. @@ -1015,7 +1015,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 insecure: type: boolean description: | From 217ef62d6e5837a0df5ce3a412af5a6f76e8a931 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 26 Jul 2024 18:13:46 +0200 Subject: [PATCH 200/251] feat!: Update ChatOpenAI default model to gpt-4o-mini (#507) --- .../cookbook/prompt_llm_parser.md | 6 +- docs/expression_language/get_started.md | 2 +- docs/expression_language/interface.md | 2 +- .../expression_language/primitives/binding.md | 2 +- .../primitives/function.md | 8 +- docs/expression_language/streaming.md | 4 +- .../modules/agents/agent_types/agent_types.md | 2 +- .../models/chat_models/chat_models.md | 2 +- .../cookbook/prompt_llm_parser.dart | 6 +- .../cookbook/streaming.dart | 4 +- .../bin/expression_language/get_started.dart | 2 +- .../bin/expression_language/interface.dart | 2 +- .../primitives/binding.dart | 2 +- .../primitives/function.dart | 8 +- .../lib/src/chains/qa_with_sources.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 9 +- .../test/chains/qa_with_sources_test.dart | 2 +- .../test/chat_models/chat_openai_test.dart | 3 +- .../test/chat_models/open_router_test.dart | 6 +- packages/openai_dart/README.md | 8 +- .../create_chat_completion_request.dart | 1 - .../generated/schema/create_run_request.dart | 1 - .../schema/create_thread_and_run_request.dart | 1 - .../src/generated/schema/schema.freezed.dart | 6 +- .../lib/src/generated/schema/schema.g.dart | 6 +- packages/openai_dart/oas/openapi_curated.yaml | 2 +- .../test/openai_client_chat_test.dart | 117 ++---------------- 27 files changed, 57 insertions(+), 159 deletions(-) diff --git a/docs/expression_language/cookbook/prompt_llm_parser.md b/docs/expression_language/cookbook/prompt_llm_parser.md index bb9a1a28..e96bf6c1 100644 --- a/docs/expression_language/cookbook/prompt_llm_parser.md +++ b/docs/expression_language/cookbook/prompt_llm_parser.md @@ -33,7 +33,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -74,7 +74,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -144,7 +144,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/get_started.md b/docs/expression_language/get_started.md index 70c12b9a..9b51efe6 100644 --- a/docs/expression_language/get_started.md +++ b/docs/expression_language/get_started.md @@ -120,7 +120,7 @@ print(res2); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/interface.md b/docs/expression_language/interface.md index 9b7085d8..30fcf890 100644 --- a/docs/expression_language/interface.md +++ b/docs/expression_language/interface.md @@ -107,7 +107,7 @@ final res = await chain.batch( {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/docs/expression_language/primitives/binding.md b/docs/expression_language/primitives/binding.md index a04a511f..2aeb9575 100644 --- a/docs/expression_language/primitives/binding.md +++ b/docs/expression_language/primitives/binding.md @@ -57,7 +57,7 @@ final chain = Runnable.fromMap({ chatModel.bind(ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | + chatModel.bind(ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, }); diff --git a/docs/expression_language/primitives/function.md b/docs/expression_language/primitives/function.md index e0b621fd..88bf731b 100644 --- a/docs/expression_language/primitives/function.md +++ b/docs/expression_language/primitives/function.md @@ -76,7 +76,7 @@ await chain.invoke('x raised to the third plus seven equals 12'); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -122,7 +122,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -141,7 +141,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -160,7 +160,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/streaming.md b/docs/expression_language/streaming.md index 8b4b720f..25725045 100644 --- a/docs/expression_language/streaming.md +++ b/docs/expression_language/streaming.md @@ -49,7 +49,7 @@ print(chunks.first); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -71,7 +71,7 @@ print(result); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/modules/agents/agent_types/agent_types.md b/docs/modules/agents/agent_types/agent_types.md index 229422ee..d6c79bd0 100644 --- a/docs/modules/agents/agent_types/agent_types.md +++ b/docs/modules/agents/agent_types/agent_types.md @@ -8,7 +8,7 @@ response to the user. Here are the agents available in LangChain. ### OpenAI Functions -Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been +Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been explicitly fine-tuned to detect when a function should to be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models. diff --git a/docs/modules/model_io/models/chat_models/chat_models.md b/docs/modules/model_io/models/chat_models/chat_models.md index 5aabfd23..e191707b 100644 --- a/docs/modules/model_io/models/chat_models/chat_models.md +++ b/docs/modules/model_io/models/chat_models/chat_models.md @@ -93,5 +93,5 @@ print(chatRes1.generations); print(chatRes1.usage?.totalTokens); // -> 36 print(chatRes1.modelOutput); -// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-3.5-turbo} +// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-4o-mini} ``` diff --git a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart index f34fab19..21cea3b4 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart @@ -32,7 +32,7 @@ Future _promptTemplateLLM() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -65,7 +65,7 @@ Future _attachingStopSequences() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -133,7 +133,7 @@ Future _attachingToolCallInformation() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart index 7af0bb43..66e4a7a6 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart @@ -33,7 +33,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -49,7 +49,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/get_started.dart b/examples/docs_examples/bin/expression_language/get_started.dart index 5ccc2505..c3ecbd1f 100644 --- a/examples/docs_examples/bin/expression_language/get_started.dart +++ b/examples/docs_examples/bin/expression_language/get_started.dart @@ -82,7 +82,7 @@ Future _promptModelOutputParser() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/interface.dart b/examples/docs_examples/bin/expression_language/interface.dart index f678f18a..f2a634b7 100644 --- a/examples/docs_examples/bin/expression_language/interface.dart +++ b/examples/docs_examples/bin/expression_language/interface.dart @@ -96,7 +96,7 @@ Future _runnableInterfaceBatchOptions() async { {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/examples/docs_examples/bin/expression_language/primitives/binding.dart b/examples/docs_examples/bin/expression_language/primitives/binding.dart index 1c456ef7..d16d81d8 100644 --- a/examples/docs_examples/bin/expression_language/primitives/binding.dart +++ b/examples/docs_examples/bin/expression_language/primitives/binding.dart @@ -63,7 +63,7 @@ Future _differentModels() async { chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | + chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, }); final res = await chain.invoke({'name': 'David'}); diff --git a/examples/docs_examples/bin/expression_language/primitives/function.dart b/examples/docs_examples/bin/expression_language/primitives/function.dart index 8c631877..029322bb 100644 --- a/examples/docs_examples/bin/expression_language/primitives/function.dart +++ b/examples/docs_examples/bin/expression_language/primitives/function.dart @@ -73,7 +73,7 @@ Future _function() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -116,7 +116,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -135,7 +135,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -154,7 +154,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart index 207577a1..7c812836 100644 --- a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart +++ b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart @@ -12,7 +12,7 @@ import 'qa_with_structure.dart'; /// ```dart /// final llm = ChatOpenAI( /// apiKey: openaiApiKey, -/// model: 'gpt-3.5-turbo-0613', +/// model: 'gpt-4o-mini', /// temperature: 0, /// ); /// final qaChain = OpenAIQAWithSourcesChain(llm: llm); diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 54c955e9..dbd9c333 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -76,7 +76,7 @@ import 'types.dart'; /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ /// 'q1': prompt1 | chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4')) | outputParser, -/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser, +/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); /// ``` @@ -239,7 +239,7 @@ class ChatOpenAI extends BaseChatModel { String get modelType => 'openai-chat'; /// The default model to use unless another is specified. - static const defaultModel = 'gpt-3.5-turbo'; + static const defaultModel = 'gpt-4o-mini'; @override Future invoke( @@ -348,7 +348,6 @@ class ChatOpenAI extends BaseChatModel { final int tokensPerName; switch (model) { - case 'gpt-3.5-turbo-0613': case 'gpt-3.5-turbo-16k-0613': case 'gpt-4-0314': case 'gpt-4-32k-0314': @@ -362,8 +361,8 @@ class ChatOpenAI extends BaseChatModel { // If there's a name, the role is omitted tokensPerName = -1; default: - if (model.startsWith('gpt-3.5-turbo') || model.startsWith('gpt-4')) { - // Returning num tokens assuming gpt-3.5-turbo-0613 + if (model.startsWith('gpt-4o-mini') || model.startsWith('gpt-4')) { + // Returning num tokens assuming gpt-4 tokensPerMessage = 3; tokensPerName = 1; } else { diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index a94ea862..15a80431 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -125,7 +125,7 @@ Question: {question} final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-3.5-turbo', + model: 'gpt-4o-mini', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index edb42b2e..7ba681c6 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatOpenAI tests', () { final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - const defaultModel = 'gpt-3.5-turbo'; + const defaultModel = 'gpt-4o-mini'; test('Test ChatOpenAI parameters', () async { final chat = ChatOpenAI( @@ -208,7 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', 'gpt-4-0613', diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index f108db8b..d7c8fc9c 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -27,7 +27,7 @@ void main() { test('Test invoke OpenRouter API with different models', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', @@ -57,7 +57,7 @@ void main() { test('Test stream OpenRouter API with different models', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', // 'google/gemini-pro', // Not supported 'anthropic/claude-2', @@ -88,7 +88,7 @@ void main() { test('Test countTokens', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 76dcd335..2c003f8d 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -257,7 +257,7 @@ const tool = ChatCompletionTool( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -333,7 +333,7 @@ const function = FunctionObject( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-3.5-turbo'), + model: ChatCompletionModel.modelId('gpt-4o-mini'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -355,7 +355,7 @@ final functionResult = getCurrentWeather(arguments['location'], arguments['unit' final res2 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-3.5-turbo'), + model: ChatCompletionModel.modelId('gpt-4o-mini'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -480,7 +480,7 @@ Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-t ```dart const request = CreateFineTuningJobRequest( - model: FineTuningModel.modelId('gpt-3.5-turbo'), + model: FineTuningModel.modelId('gpt-4o-mini'), trainingFile: 'file-abc123', validationFile: 'file-abc123', hyperparameters: FineTuningJobHyperparameters( diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 6e7e429a..f9213271 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -122,7 +122,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - @Default(true) bool? parallelToolCalls, /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 83c04dc1..0e395531 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -72,7 +72,6 @@ class CreateRunRequest with _$CreateRunRequest { /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - @Default(true) bool? parallelToolCalls, /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 67b921cb..ae054a5c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -71,7 +71,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - @Default(true) bool? parallelToolCalls, /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 75973e85..06e93133 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -4045,7 +4045,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls = true, + this.parallelToolCalls, @JsonKey(includeIfNull: false) this.user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -30225,7 +30225,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls = true, + this.parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -33563,7 +33563,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls = true, + this.parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 95ffa209..191f05e7 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -338,7 +338,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), toolChoice: const _ChatCompletionToolChoiceOptionConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, + parallelToolCalls: json['parallel_tool_calls'] as bool?, user: json['user'] as String?, functionCall: const _ChatCompletionFunctionCallConverter() .fromJson(json['function_call']), @@ -2867,7 +2867,7 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -3193,7 +3193,7 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateThreadAndRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateThreadAndRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 420c7cf4..00dbbe54 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1995,7 +1995,7 @@ components: Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. type: boolean - default: true + default: null nullable: true user: *end_user_param_configuration function_call: diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index 9277c848..fa272bbe 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -23,7 +23,7 @@ void main() { test('Test call chat completion API', () async { final models = [ - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ChatCompletionModels.gpt4, ]; @@ -73,7 +73,7 @@ void main() { test('Test call chat completion API with stop sequence', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -105,7 +105,7 @@ void main() { test('Test call chat completions API with max tokens', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -128,7 +128,7 @@ void main() { test('Test call chat completions API with other parameters', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -154,7 +154,7 @@ void main() { test('Test call chat completions streaming API', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -179,7 +179,7 @@ void main() { await for (final res in stream) { expect(res.id, isNotEmpty); expect(res.created, greaterThan(0)); - expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.model, startsWith('gpt-4o-mini')); expect(res.object, isNotEmpty); if (res.choices.isNotEmpty) { expect(res.choices, hasLength(1)); @@ -224,7 +224,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -272,7 +272,7 @@ void main() { final request2 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -330,7 +330,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -360,7 +360,7 @@ void main() { res.object, isNotEmpty, ); - expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.model, startsWith('gpt-4o-mini')); expect(res.choices, hasLength(1)); final choice = res.choices.first; expect(choice.index, 0); @@ -386,103 +386,6 @@ void main() { expect(count, greaterThan(1)); }); - test('Test call chat completions API functions', () async { - const function = FunctionObject( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - final request1 = CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, - ), - messages: [ - const ChatCompletionMessage.system( - content: 'You are a helpful assistant.', - ), - const ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'What’s the weather like in Boston right now?', - ), - ), - ], - functions: [function], - functionCall: ChatCompletionFunctionCall.function( - ChatCompletionFunctionCallOption(name: function.name), - ), - ); - final res1 = await client.createChatCompletion(request: request1); - expect(res1.choices, hasLength(1)); - - final choice1 = res1.choices.first; - - final aiMessage1 = choice1.message; - expect(aiMessage1.role, ChatCompletionMessageRole.assistant); - expect(aiMessage1.content, isNull); - expect(aiMessage1.functionCall, isNotNull); - - final functionCall = aiMessage1.functionCall!; - expect(functionCall.name, function.name); - expect(functionCall.arguments, isNotEmpty); - final arguments = json.decode( - functionCall.arguments, - ) as Map; - expect(arguments.containsKey('location'), isTrue); - expect(arguments['location'], contains('Boston')); - - final functionResult = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - - final request2 = CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, - ), - messages: [ - const ChatCompletionMessage.system( - content: 'You are a helpful assistant.', - ), - const ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'What’s the weather like in Boston right now?', - ), - ), - ChatCompletionMessage.function( - name: function.name, - content: json.encode(functionResult), - ), - ], - functions: [function], - ); - final res2 = await client.createChatCompletion(request: request2); - expect(res2.choices, hasLength(1)); - - final choice2 = res2.choices.first; - expect(choice2.finishReason, ChatCompletionFinishReason.stop); - - final aiMessage2 = choice2.message; - expect(aiMessage2.role, ChatCompletionMessageRole.assistant); - expect(aiMessage2.content, contains('22')); - expect(aiMessage2.functionCall, isNull); - }); - test('Test jsonObject response format', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( From b7b25521891f321c126630dce70bfd14c8ab3bc0 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 26 Jul 2024 18:20:35 +0200 Subject: [PATCH 201/251] docs: Update Ollama documentation (#508) --- .../models/chat_models/integrations/ollama.md | 321 +++++++++++++++--- .../chat_models/integrations/ollama.dart | 258 ++++++++++++-- 2 files changed, 500 insertions(+), 79 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 1e440c83..2521307a 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -2,13 +2,9 @@ Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. -Ollama allows you to run open-source large language models, such as Llama 3, locally. +Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. -Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. - -It optimizes setup and configuration details, including GPU usage. - -For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. ## Setup @@ -17,6 +13,30 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` * e.g., for Llama 3: `ollama pull llama3.1` +3. Instantiate the `ChatOllama` class with the downloaded model. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + ), +); +``` + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). + +### Ollama base URL + +By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + baseUrl: 'https://your-remote-server-where-ollama-is-running.com', + model: 'llama3.1', + ), +); +``` ## Usage @@ -44,7 +64,9 @@ print(res); // -> 'La traduction est : "J'aime le programming.' ``` -## Streaming +### Streaming + +Ollama supports streaming the output as the model generates it. ```dart final promptTemplate = ChatPromptTemplate.fromTemplates([ @@ -68,36 +90,7 @@ await stream.forEach(print); // 9 ``` -## JSON mode - -You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), - (ChatMessageType.human, '{question}'), -]); -final chat = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - format: OllamaResponseFormat.json, - ), -); - -final chain = Runnable.getMapFromInput('question') - .pipe(promptTemplate) - .pipe(chat) - .pipe(JsonOutputParser()); - -final res = await chain.invoke( - 'What is the population of Spain, The Netherlands, and France?', -); -print(res); -// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} -``` - -## Multimodal support +### Multimodal support Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). @@ -125,14 +118,12 @@ print(res.output.content); // -> 'An Apple' ``` -## Tool calling +### Tool calling -`ChatOllama` supports tool calling. - -Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. +`ChatOllama` now offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). **Notes:** -- Tool calling requires Ollama 0.2.8 or newer. +- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. - Streaming tool calls is not supported at the moment. - Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). @@ -176,7 +167,251 @@ print(res.output.toolCalls); // }] ``` -## RAG (Retrieval-Augmented Generation) pipeline +As you can see, `ChatOllamaTools` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +### JSON mode + +You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), + (ChatMessageType.human, '{question}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), +); + +final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + +final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', +); +print(res); +// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +``` + +## Examples + +### Answering questions with data from an external API + +Imagine you have an API that provides flight times between two cities: + +```dart +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} +``` + +Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. + +```dart +const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [getFlightTimesTool], + ), +); + +final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), +]; + +// First API call: Send the query and function description to the model +final response = await chatModel.invoke(PromptValue.chat(messages)); + +messages.add(response.output); + +// Check if the model decided to use the provided function +if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; +} + +// Process function calls made by the model +for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, + ), + ); +} + +// Second API call: Get final response from the model +final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); +print(finalResponse.output.content); +// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. +``` + +### Extracting structured data with tools + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + options: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` + +### RAG (Retrieval-Augmented Generation) pipeline We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 5c47bb0e..0682326f 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -8,9 +8,11 @@ import 'package:langchain_ollama/langchain_ollama.dart'; void main(final List arguments) async { await _chatOllama(); await _chatOllamaStreaming(); - await _chatOllamaJsonMode(); await _chatOllamaMultimodal(); await _chatOllamaToolCalling(); + await _chatOllamaJsonMode(); + await _extraction(); + await _flights(); await _rag(); } @@ -67,32 +69,26 @@ Future _chatOllamaStreaming() async { // 9 } -Future _chatOllamaJsonMode() async { - final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are an assistant that respond question using JSON format.' - ), - (ChatMessageType.human, '{question}'), - ]); - final chat = ChatOllama( +Future _chatOllamaMultimodal() async { + final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llava', temperature: 0, - format: OllamaResponseFormat.json, ), ); - - final chain = Runnable.getMapFromInput('question') - .pipe(promptTemplate) - .pipe(chat) - .pipe(JsonOutputParser()); - - final res = await chain.invoke( - 'What is the population of Spain, The Netherlands, and France?', + final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), ); - print(res); - // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} + final res = await chatModel.invoke(PromptValue.chat([prompt])); + print(res.output.content); + // -> 'An Apple' } Future _chatOllamaToolCalling() async { @@ -137,26 +133,216 @@ Future _chatOllamaToolCalling() async { // }] } -Future _chatOllamaMultimodal() async { +Future _chatOllamaJsonMode() async { + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are an assistant that respond question using JSON format.' + ), + (ChatMessageType.human, '{question}'), + ]); + final chat = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), + ); + + final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + + final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', + ); + print(res); + // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +} + +Future _extraction() async { + const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, + ); + + final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + + final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + + final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', + ); + final extractedData = res.first.arguments; + print(extractedData); + // { + // people: [ + // { + // name: Alex, + // height: 152, + // hair_color: blonde + // }, + // { + // name: Claudia, + // height: 183, + // hair_color: orange + // } + // ] + // } +} + +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} + +Future _flights() async { + const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, + ); + final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llava', + model: 'llama3.1', temperature: 0, + tools: [getFlightTimesTool], ), ); - final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), + + final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), + ]; + + // First API call: Send the query and function description to the model + final response = await chatModel.invoke(PromptValue.chat(messages)); + + messages.add(response.output); + + // Check if the model decided to use the provided function + if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; + } + + // Process function calls made by the model + for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, ), - ]), - ); - final res = await chatModel.invoke(PromptValue.chat([prompt])); - print(res.output.content); - // -> 'An Apple' + ); + } + + // Second API call: Get final response from the model + final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); + print(finalResponse.output.content); + // The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. } Future _rag() async { From c745b4f0175a44f60569afa280cf3ebdb5808bfb Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 26 Jul 2024 18:23:56 +0200 Subject: [PATCH 202/251] refactor: Remove ChatOllamaTools in favour of ChatOllama (#509) --- docs/_sidebar.md | 1 - .../models/chat_models/integrations/ollama.md | 2 +- .../chat_models/integrations/ollama_tools.md | 273 ---------------- .../integrations/ollama_tools.dart | 226 ------------- packages/langchain/README.md | 1 - packages/langchain_ollama/README.md | 1 - .../lib/src/chat_models/chat_models.dart | 2 - .../chat_models/chat_ollama/chat_ollama.dart | 2 - .../chat_ollama_tools/chat_ollama_tools.dart | 297 ------------------ .../chat_ollama_tools/mappers.dart | 1 - .../chat_models/chat_ollama_tools/types.dart | 119 ------- .../test/chat_models/chat_ollama_test.dart | 119 +++++-- .../chat_models/chat_ollama_tools_test.dart | 207 ------------ 13 files changed, 96 insertions(+), 1155 deletions(-) delete mode 100644 docs/modules/model_io/models/chat_models/integrations/ollama_tools.md delete mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart delete mode 100644 packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index bfb7aad0..c51de21b 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -63,7 +63,6 @@ - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) - [Google AI](/modules/model_io/models/chat_models/integrations/googleai.md) - [Ollama](/modules/model_io/models/chat_models/integrations/ollama.md) - - [OllamaTools](/modules/model_io/models/chat_models/integrations/ollama_tools.md) - [Mistral AI](/modules/model_io/models/chat_models/integrations/mistralai.md) - [OpenRouter](/modules/model_io/models/chat_models/integrations/open_router.md) - [Together AI](/modules/model_io/models/chat_models/integrations/together_ai.md) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 2521307a..d12b5b93 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -167,7 +167,7 @@ print(res.output.toolCalls); // }] ``` -As you can see, `ChatOllamaTools` support calling multiple tools in a single request. +As you can see, `ChatOllama` support calling multiple tools in a single request. If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md b/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md deleted file mode 100644 index 17334a5b..00000000 --- a/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md +++ /dev/null @@ -1,273 +0,0 @@ -# ChatOllamaTools - -LangChain.dart offers an experimental wrapper around open source models run locally via [Ollama](https://ollama.ai) that enables [tool calling capabilities](/modules/model_io/models/chat_models/how_to/tools.md). - -> Warning: This is an experimental wrapper that attempts to add tool calling support to models that do not support it natively. Use with caution. - -More powerful and capable models will perform better with complex schema and/or multiple tools. For a complete list of supported models, see the [Ollama model library](https://ollama.ai/library). The examples below use Google's [Gemma2 9B model](https://ollama.com/library/gemma2) running locally. - -## Setup - -Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: - -1. Download and install [Ollama](https://ollama.ai) -2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull gemma2` - -## Usage - -You can use `ChatOllamaTools` in a similar way to a regular `ChatOllama` wrapper. The main difference is that `ChatOllamaToolsOptions` accepts: -- `options`: the usual `ChatOllamaOptions` options -- `tools`: the list with the definition of the tools the model can call -- `toolChoice`: how the model should respond to tool calls -- `toolsSystemPromptTemplate`: the prompt template used to inform the user about the available tools - -`ChatOllamaTools` follows the standard [LangChain tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). - -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, -); -final model = ChatOllamaTools( - defaultOptions: ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - ), -); -final res = await model.invoke( - PromptValue.string("What's the weather in Barcelona?"), -); -print(res); -// ChatResult{ -// id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, -// output: AIChatMessage{ -// content: , -// toolCalls: [ -// AIChatMessageToolCall{ -// id: 42139039-9251-4e1b-9f47-21f24da65be9, -// name: get_current_weather, -// arguments: {location: Barcelona, ES, unit: celsius}, -// } -// ], -// }, -// finishReason: FinishReason.stop, -// metadata: { -// model: gemma2, -// created_at: 2024-07-11T15:44:56.893216Z, -// done: true, -// total_duration: 2900101792, -// load_duration: 41286000, -// prompt_eval_count: 327, -// prompt_eval_duration: 453557000, -// eval_count: 57, -// eval_duration: 2401129000 -// }, -// usage: LanguageModelUsage{ -// promptTokens: 327, -// promptBillableCharacters: null, -// responseTokens: 57, -// responseBillableCharacters: null, -// totalTokens: 384 -// } -// } -``` - -If you want to extract only the tool calls, you can use the `ToolCallOutputParser`: - -```dart -final chain = model.pipe(ToolsOutputParser()); -final res2 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), -); -print(res2); -// [ -// ParsedToolCall{ -// id: b62a9051-0193-4115-9bac-362005c40c2d, -// name: get_current_weather, -// arguments: {location: Barcelona, ES, unit: celsius}, -// }, -// ParsedToolCall{ -// id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, -// name: get_current_weather, -// arguments: {location: Amsterdam, NL, unit: celsius}, -// } -// ] -``` - -As you can see, `ChatOllamaTools` support calling multiple tools in a single request. - -If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: - -```dart -final res3 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), - options: ChatOllamaToolsOptions( - toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), - ), -); -``` - -Note: streaming is not supported at the moment. - -## Customizing the system prompt template - -Behind the scenes, `ChatOllamaTools` uses Ollama's JSON mode to restrict output to JSON, and passes tool schemas to the prompt as JSON schemas. - -You can find the default system prompt in `ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate`. - -Because different models have different strengths, it may be helpful to pass in your own system prompt. Here's an example of how you can customize the system prompt template: - -```dart -const toolSystemPromptTemplate = ''' -You have access to these tools: -{tools} - -Based on the user input, select {tool_choice} from the available tools. - -Respond with a JSON containing a list of tool call objects. -The tool call objects should have two properties: -- "tool_name": The name of the selected tool (string) -- "tool_input": A JSON string with the input for the tool matching the tool's input schema - -Example response format: -```json -{{ - "tool_calls": [ - {{ - "tool_name": "tool_name", - "tool_input": "{{"param1":"value1","param2":"value2"}}" - }} - ] -}} - -Ensure your response is valid JSON and follows this exact format.'''; - -final model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - toolsSystemPromptTemplate: toolSystemPromptTemplate, - ), -); -``` - -You prompt template should contain the following placeholders: -- `{tools}`: where the list of available tools will be inserted -- `{tool_choice}`: where the instruction to select a certain tool will be inserted - -The model should return a JSON like: -```json -{ - "tool_calls": [ - { - "tool_name": "tool_name", - "tool_input": "{\"param1\":\"value1\",\"param2\":\"value2\"}" - } - ] -} -``` - -## Example: extracting structured data - -A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. - -```dart -const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, -); - -final model = ChatOllamaTools( - defaultOptions: ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), -); - -final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - -final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - -final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', -); -final extractedData = res.first.arguments; -print(extractedData); -// { -// people: [ -// { -// name: Alex, -// height: 152, -// hair_color: blonde -// }, -// { -// name: Claudia, -// height: 183, -// hair_color: orange -// } -// ] -// } -``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart deleted file mode 100644 index 486b8c1b..00000000 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart +++ /dev/null @@ -1,226 +0,0 @@ -// ignore_for_file: avoid_print, avoid_redundant_argument_values -import 'package:langchain/langchain.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; - -void main(final List arguments) async { - await _tools(); - await _customizingSystemPrompt(); - await _extraction(); -} - -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, -); - -Future _tools() async { - final model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - ), - ); - - final res = await model.invoke( - PromptValue.string("What's the weather in Barcelona?"), - ); - print(res); - // ChatResult{ - // id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, - // output: AIChatMessage{ - // content: , - // toolCalls: [ - // AIChatMessageToolCall{ - // id: 42139039-9251-4e1b-9f47-21f24da65be9, - // name: get_current_weather, - // arguments: {location: Barcelona, ES, unit: celsius}, - // } - // ], - // }, - // finishReason: FinishReason.stop, - // metadata: { - // model: gemma2, - // created_at: 2024-07-11T15:44:56.893216Z, - // done: true, - // total_duration: 2900101792, - // load_duration: 41286000, - // prompt_eval_count: 327, - // prompt_eval_duration: 453557000, - // eval_count: 57, - // eval_duration: 2401129000 - // }, - // usage: LanguageModelUsage{ - // promptTokens: 327, - // promptBillableCharacters: null, - // responseTokens: 57, - // responseBillableCharacters: null, - // totalTokens: 384 - // } - // } - - final chain = model.pipe(ToolsOutputParser()); - final res2 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), - ); - print(res2); - // [ - // ParsedToolCall{ - // id: b62a9051-0193-4115-9bac-362005c40c2d, - // name: get_current_weather, - // arguments: {location: Barcelona, ES, unit: celsius}, - // }, - // ParsedToolCall{ - // id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, - // name: get_current_weather, - // arguments: {location: Amsterdam, NL, unit: celsius}, - // } - // ] - - final res3 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), - options: ChatOllamaToolsOptions( - toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), - ), - ); - print(res3); -} - -Future _customizingSystemPrompt() async { - const toolSystemPromptTemplate = ''' -You have access to these tools: -{tools} - -Based on the user input, select {tool_choice} from the available tools. - -Respond with a JSON containing a list of tool call objects. -The tool call objects should have two properties: -- "tool_name": The name of the selected tool (string) -- "tool_input": A JSON string with the input for the tool matching the tool's input schema - -Example response format: -```json -{{ - "tool_calls": [ - {{ - "tool_name": "tool_name", - "tool_input": "{{"param1":"value1","param2":"value2"}}" - }} - ] -}} -``` - -Ensure your response is valid JSON and follows this exact format.'''; - - final model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - toolsSystemPromptTemplate: toolSystemPromptTemplate, - ), - ); - - final res = await model.invoke( - PromptValue.string("What's the weather in Barcelona?"), - ); - print(res); -} - -Future _extraction() async { - const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, - ); - - final model = ChatOllamaTools( - defaultOptions: ChatOllamaToolsOptions( - options: const ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: const [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), - ); - - final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - - final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - - final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', - ); - final extractedData = res.first.arguments; - print(extractedData); - // { - // people: [ - // { - // name: Alex, - // height: 152, - // hair_color: blonde - // }, - // { - // name: Claudia, - // height: 183, - // hair_color: orange - // } - // ] - // } -} diff --git a/packages/langchain/README.md b/packages/langchain/README.md index fc16ffa9..1e4ad928 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -110,7 +110,6 @@ The following integrations are available in LangChain.dart: | [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | | [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | | [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | -| [ChatOllamaTools](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama_tools) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) with tool-calling capabilities | | [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | | [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index 0c37e80f..a0c8c7e4 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -14,7 +14,6 @@ * `Ollama`: wrapper around Ollama Completions API. - Chat models: * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion. - * `ChatOllamaTools`: Wrapper around Ollama Chat API that enables tool calling capabilities. - Embeddings: * `OllamaEmbeddings`: wrapper around Ollama Embeddings API. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart index 4b826ef4..0232e939 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart @@ -1,4 +1,2 @@ export 'chat_ollama/chat_ollama.dart'; export 'chat_ollama/types.dart'; -export 'chat_ollama_tools/chat_ollama_tools.dart'; -export 'chat_ollama_tools/types.dart' hide conversationalResponseTool; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 8db88d0d..4b0e9c75 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -30,8 +30,6 @@ import 'types.dart'; /// /// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion) /// -/// If you need to use tools, consider using the [ChatOllamaTools] instead. -/// /// ### Setup /// /// 1. Download and install [Ollama](https://ollama.ai) diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart deleted file mode 100644 index 82da6a95..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart +++ /dev/null @@ -1,297 +0,0 @@ -import 'dart:convert'; - -import 'package:collection/collection.dart' show IterableExtension; -import 'package:http/http.dart' as http; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_tiktoken/langchain_tiktoken.dart'; -import 'package:ollama_dart/ollama_dart.dart'; -import 'package:uuid/uuid.dart'; - -import 'mappers.dart'; -import 'types.dart'; - -/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables tool -/// calling capabilities. -/// -/// Warning: This is an experimental wrapper that attempts to add tool calling -/// support to models that do not support it natively. More powerful and -/// capable models will perform better with complex schema and/or multiple -/// tools. Use with caution. -/// -/// Example: -/// ```dart -/// const tool = ToolSpec( -/// name: 'get_current_weather', -/// description: 'Get the current weather in a given location', -/// inputJsonSchema: { -/// 'type': 'object', -/// 'properties': { -/// 'location': { -/// 'type': 'string', -/// 'description': 'The city and state, e.g. San Francisco, CA', -/// }, -/// 'unit': { -/// 'type': 'string', -/// 'enum': ['celsius', 'fahrenheit'], -/// }, -/// }, -/// 'required': ['location'], -/// }, -/// ); -/// final chatModel = ChatOllamaTools( -/// defaultOptions: ChatOllamaToolOptions( -/// options: ChatOllamaOptions(model: 'llama3.1'), -/// tools: [tool], -/// ), -/// ); -/// final prompt = PromptValue.string('What's the weather in Bangalore, India?'); -/// final res = await ollamaTools.invoke(prompt); -/// ``` -/// -/// If you don't need to use tools, use [ChatOllama] instead. -/// -/// ### Setup -/// -/// 1. Download and install [Ollama](https://ollama.ai) -/// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3.1` -/// -/// ### Ollama base URL -/// -/// By default, [ChatOllama] uses 'http://localhost:11434/api' as base URL -/// (default Ollama API URL). But if you are running Ollama on a different -/// one, you can override it using the [baseUrl] parameter. -class ChatOllamaTools extends BaseChatModel { - /// Create a new [ChatOllamaTools] instance. - /// - /// Main configuration options: - /// - `baseUrl`: the base URL of Ollama API. - /// - [ChatOllamaTools.defaultOptions] - /// - /// Advance configuration options: - /// - `headers`: global headers to send with every request. You can use - /// this to set custom headers, or to override the default headers. - /// - `queryParams`: global query parameters to send with every request. You - /// can use this to set custom query parameters. - /// - `client`: the HTTP client to use. You can set your own HTTP client if - /// you need further customization (e.g. to use a Socks5 proxy). - /// - [ChatOllama.encoding] - ChatOllamaTools({ - final String baseUrl = 'http://localhost:11434/api', - final Map? headers, - final Map? queryParams, - final http.Client? client, - super.defaultOptions = const ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - ), - this.encoding = 'cl100k_base', - }) : _client = OllamaClient( - baseUrl: baseUrl, - headers: headers, - queryParams: queryParams, - client: client, - ); - - /// A client for interacting with Ollama API. - final OllamaClient _client; - - /// The encoding to use by tiktoken when [tokenize] is called. - /// - /// Ollama does not provide any API to count tokens, so we use tiktoken - /// to get an estimation of the number of tokens in a prompt. - String encoding; - - /// A UUID generator. - late final Uuid _uuid = const Uuid(); - - @override - String get modelType => 'chat-ollama-tools'; - - /// The default model to use unless another is specified. - static const defaultModel = 'llama3.1'; - - @override - Future invoke( - PromptValue input, { - ChatOllamaToolsOptions? options, - }) async { - final id = _uuid.v4(); - final completion = await _client.generateChatCompletion( - request: _generateCompletionRequest(input, options), - ); - final result = completion.toChatResult(id); - return _parseResult(result); - } - - /// Creates a [GenerateChatCompletionRequest] from the given input. - GenerateChatCompletionRequest _generateCompletionRequest( - final PromptValue input, - final ChatOllamaToolsOptions? toolsOptions, { - final bool stream = false, - }) { - final messages = _formatPrompt(input, toolsOptions).toChatMessages(); - final options = toolsOptions?.options; - final defaultOptions = - this.defaultOptions.options ?? const ChatOllamaOptions(); - return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? defaultModel, - messages: messages.toMessages(), - format: ResponseFormat.json, - keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, - stream: stream, - options: RequestOptions( - numKeep: options?.numKeep ?? defaultOptions.numKeep, - seed: options?.seed ?? defaultOptions.seed, - numPredict: options?.numPredict ?? defaultOptions.numPredict, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, - typicalP: options?.typicalP ?? defaultOptions.typicalP, - repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, - temperature: options?.temperature ?? defaultOptions.temperature, - repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - mirostat: options?.mirostat ?? defaultOptions.mirostat, - mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, - mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, - penalizeNewline: - options?.penalizeNewline ?? defaultOptions.penalizeNewline, - stop: options?.stop ?? defaultOptions.stop, - numa: options?.numa ?? defaultOptions.numa, - numCtx: options?.numCtx ?? defaultOptions.numCtx, - numBatch: options?.numBatch ?? defaultOptions.numBatch, - numGpu: options?.numGpu ?? defaultOptions.numGpu, - mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, - lowVram: options?.lowVram ?? defaultOptions.lowVram, - f16Kv: options?.f16KV ?? defaultOptions.f16KV, - logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, - vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, - useMmap: options?.useMmap ?? defaultOptions.useMmap, - useMlock: options?.useMlock ?? defaultOptions.useMlock, - numThread: options?.numThread ?? defaultOptions.numThread, - ), - ); - } - - PromptValue _formatPrompt( - final PromptValue input, - final ChatOllamaToolsOptions? options, - ) { - final toolsSystemPromptTemplate = options?.toolsSystemPromptTemplate ?? - defaultOptions.toolsSystemPromptTemplate ?? - ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate; - final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, toolsSystemPromptTemplate), - (ChatMessageType.messagesPlaceholder, 'input'), - ]); - final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; - final availableTools = options?.tools ?? defaultOptions.tools; - final tools = switch (toolChoice) { - // If toolChoice is auto, we include all the tools - ChatToolChoiceAuto() || null => [ - ...?availableTools, - conversationalResponseTool, - ], - // If toolChoice is none, we include only the conversational response tool - ChatToolChoiceNone() => [conversationalResponseTool], - // If toolChoice is required, we include only the user specified tools - ChatToolChoiceRequired() => availableTools!, - // If toolChoice is forced, we include only the forced tool - final ChatToolChoiceForced f => [ - availableTools!.firstWhere((t) => t.name == f.name), - ] - }; - final toolsJsonMap = json.encode( - tools.map((tool) => tool.toJson()).toList(growable: false), - ); - final toolChoiceInstructions = switch (toolChoice) { - ChatToolChoiceNone() => '`${conversationalResponseTool.name}` tool', - ChatToolChoiceAuto() || - ChatToolChoiceRequired() || - null => - 'one or more tools', - final ChatToolChoiceForced f => '`${f.name}` tool', - }; - return promptTemplate.formatPrompt({ - 'tools': toolsJsonMap, - 'tool_choice': toolChoiceInstructions, - 'input': input.toChatMessages(), - }); - } - - Future _parseResult(final ChatResult result) async { - try { - final output = result.output.content; - final outputMap = json.decode(output) as Map; - final toolCalls = (outputMap['tool_calls'] as List).map((t) { - final tool = t as Map; - final toolInput = tool['tool_input']; - final toolInputMap = json.decode(toolInput) as Map; - return AIChatMessageToolCall( - id: _uuid.v4(), - name: tool['tool_name'].toString(), - arguments: toolInputMap, - argumentsRaw: toolInput, - ); - }).toList(growable: false); - - final conversationalResponseToolCall = toolCalls - .firstWhereOrNull((t) => t.name == conversationalResponseTool.name); - final content = conversationalResponseToolCall != null - ? await conversationalResponseTool.invoke( - conversationalResponseTool.getInputFromJson( - conversationalResponseToolCall.arguments, - ), - ) - : ''; - final otherToolCalls = toolCalls - .where((t) => t.name != conversationalResponseTool.name) - .toList(growable: false); - - return ChatResult( - id: result.id, - output: AIChatMessage( - content: content, - toolCalls: otherToolCalls, - ), - finishReason: result.finishReason, - metadata: result.metadata, - usage: result.usage, - ); - } catch (e) { - throw Exception( - 'Model did not respond in valid json string format, ' - 'try improving your prompt, instruct to "respond in JSON"', - ); - } - } - - /// Tokenizes the given prompt using tiktoken. - /// - /// Currently Ollama does not provide a tokenizer for the models it supports. - /// So we use tiktoken and [encoding] model to get an approximation - /// for counting tokens. Mind that the actual tokens will be totally - /// different from the ones used by the Ollama model. - /// - /// If an encoding model is specified in [encoding] field, that - /// encoding is used instead. - /// - /// - [promptValue] The prompt to tokenize. - @override - Future> tokenize( - PromptValue promptValue, { - ChatOllamaToolsOptions? options, - }) async { - final encoding = getEncoding(this.encoding); - return encoding.encode(promptValue.toString()); - } - - @override - void close() { - _client.endSession(); - } -} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart deleted file mode 100644 index 3a9ebb5a..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart +++ /dev/null @@ -1 +0,0 @@ -export '../chat_ollama/mappers.dart'; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart deleted file mode 100644 index 7ad2615a..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart +++ /dev/null @@ -1,119 +0,0 @@ -import 'package:collection/collection.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; - -import '../chat_ollama/types.dart'; -import 'chat_ollama_tools.dart'; - -export '../chat_ollama/types.dart'; - -/// {@template chat_ollama_tools_options} -/// Options to pass into [ChatOllamaTools]. -/// {@endtemplate} -@immutable -class ChatOllamaToolsOptions extends ChatModelOptions { - /// {@macro chat_ollama_tools_options} - const ChatOllamaToolsOptions({ - this.options, - super.tools, - super.toolChoice, - this.toolsSystemPromptTemplate, - super.concurrencyLimit, - }); - - /// [ChatOllamaOptions] to pass into Ollama. - final ChatOllamaOptions? options; - - /// Prompt template for the system message where the model is instructed to - /// use the tools. - /// - /// The following placeholders can be used: - /// - `{tools}`: The list of tools available to the model. - /// - `{tool_choice}`: the tool choice the model must always select. - /// - /// If not provided, [defaultToolsSystemPromtTemplate] will be used. - final String? toolsSystemPromptTemplate; - - /// Default [toolsSystemPromptTemplate]. - static const String defaultToolsSystemPromtTemplate = ''' -You have access to these tools: -{tools} - -Based on the user input, select {tool_choice} from the available tools. - -Respond with a JSON containing a list of tool call objects. -The tool call objects should have two properties: -- "tool_name": The name of the selected tool (string) -- "tool_input": A JSON string with the input for the tool matching the tool's input schema - -Example response format: -```json -{{ - "tool_calls": [ - {{ - "tool_name": "tool_name", - "tool_input": "{{"param1":"value1","param2":"value2"}}" - }} - ] -}} -``` - -Ensure your response is valid JSON and follows this exact format. -'''; - - @override - ChatOllamaToolsOptions copyWith({ - final ChatOllamaOptions? options, - final List? tools, - final ChatToolChoice? toolChoice, - final String? toolsSystemPromptTemplate, - final int? concurrencyLimit, - }) { - return ChatOllamaToolsOptions( - options: options ?? this.options, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - toolsSystemPromptTemplate: - toolsSystemPromptTemplate ?? this.toolsSystemPromptTemplate, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - ChatOllamaToolsOptions merge(covariant final ChatOllamaToolsOptions? other) { - return copyWith( - options: other?.options, - tools: other?.tools, - toolChoice: other?.toolChoice, - toolsSystemPromptTemplate: other?.toolsSystemPromptTemplate, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatOllamaToolsOptions other) { - return options == other.options && - const ListEquality().equals(tools, other.tools) && - toolChoice == other.toolChoice && - toolsSystemPromptTemplate == other.toolsSystemPromptTemplate; - } - - @override - int get hashCode { - return options.hashCode ^ - const ListEquality().hash(tools) ^ - toolChoice.hashCode ^ - toolsSystemPromptTemplate.hashCode; - } -} - -/// Default tool called if model decides no other tools should be called -/// for a given query. -final conversationalResponseTool = StringTool.fromFunction( - name: '_conversational_response', - description: - 'Respond conversationally if no other tools should be called for a given query.', - inputDescription: 'Conversational response to the user', - func: (input) => input, -); diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 66167f0f..fcceacdb 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -210,10 +210,10 @@ void main() { test('Test Multi-turn conversations', () async { final prompt = PromptValue.chat([ - ChatMessage.humanText('List the numbers from 1 to 9 in order. '), + ChatMessage.humanText('List the numbers from 1 to 9 in order.'), ChatMessage.ai('123456789'), ChatMessage.humanText( - 'Remove the number "4" from the list', + 'Remove the number "4" from the list. Output only the remaining numbers in ascending order.', ), ]); final res = await chatModel.invoke( @@ -253,31 +253,49 @@ void main() { expect(res.output.content.toLowerCase(), contains('apple')); }); - test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), - () async { - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, + const tool1 = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], }, - 'required': ['location'], }, - ); + 'required': ['location'], + }, + ); + const tool2 = ToolSpec( + name: 'get_historic_weather', + description: 'Get the historic weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { final model = chatModel.bind( const ChatOllamaOptions( model: defaultModel, - tools: [tool], + tools: [tool1], ), ); @@ -290,13 +308,13 @@ void main() { expect(aiMessage1.toolCalls, hasLength(2)); final toolCall1 = aiMessage1.toolCalls.first; - expect(toolCall1.name, tool.name); + expect(toolCall1.name, tool1.name); expect(toolCall1.arguments.containsKey('location'), isTrue); expect(toolCall1.arguments['location'], contains('Boston')); expect(toolCall1.arguments['unit'], 'celsius'); final toolCall2 = aiMessage1.toolCalls.last; - expect(toolCall2.name, tool.name); + expect(toolCall2.name, tool1.name); expect(toolCall2.arguments.containsKey('location'), isTrue); expect(toolCall2.arguments['location'], contains('Madrid')); expect(toolCall2.arguments['unit'], 'celsius'); @@ -336,5 +354,58 @@ void main() { expect(aiMessage2.content, contains('22')); expect(aiMessage2.content, contains('25')); }); + + test('Test multi tool call', () async { + final res = await chatModel.invoke( + PromptValue.string( + "What's the weather in Vellore, India and in Barcelona, Spain?", + ), + options: const ChatOllamaOptions( + model: defaultModel, + tools: [tool1, tool2], + ), + ); + expect(res.output.toolCalls, hasLength(2)); + final toolCall1 = res.output.toolCalls.first; + expect(toolCall1.name, 'get_current_weather'); + expect(toolCall1.argumentsRaw, isNotEmpty); + expect(toolCall1.arguments, isNotEmpty); + expect(toolCall1.arguments['location'], 'Vellore, India'); + expect(toolCall1.arguments['unit'], 'celsius'); + final toolCall2 = res.output.toolCalls.last; + expect(toolCall2.name, 'get_current_weather'); + expect(toolCall2.argumentsRaw, isNotEmpty); + expect(toolCall2.arguments, isNotEmpty); + expect(toolCall2.arguments['location'], 'Barcelona, Spain'); + expect(toolCall2.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test ChatToolChoice.none', () async { + final res = await chatModel.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: const ChatOllamaOptions( + model: defaultModel, + tools: [tool1], + toolChoice: ChatToolChoice.none, + ), + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test('Test ChatToolChoice.forced', () async { + final res = await chatModel.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: ChatOllamaOptions( + model: defaultModel, + tools: const [tool1, tool2], + toolChoice: ChatToolChoice.forced(name: tool2.name), + ), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, tool2.name); + }); }); } diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart deleted file mode 100644 index 7204591a..00000000 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart +++ /dev/null @@ -1,207 +0,0 @@ -import 'dart:io'; - -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; -import 'package:test/test.dart'; - -void main() { - group( - 'ChatOllamaTools tests', - skip: Platform.environment.containsKey('CI'), - () { - const defaultModel = 'gemma2'; - late ChatOllamaTools model; - const tool1 = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - const tool2 = ToolSpec( - name: 'get_historic_weather', - description: 'Get the historic weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - setUp(() async { - model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: defaultModel, - keepAlive: 2, - ), - tools: [tool1, tool2], - ), - ); - }); - - tearDown(() { - model.close(); - }); - - test('Test single tool call', () async { - final res = await model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - ); - expect(res.output.toolCalls, hasLength(1)); - final toolCall = res.output.toolCalls.first; - expect(toolCall.name, 'get_current_weather'); - expect(toolCall.argumentsRaw, isNotEmpty); - expect(toolCall.arguments, isNotEmpty); - expect(toolCall.arguments['location'], contains('Vellore')); - expect(toolCall.arguments['unit'], 'celsius'); - expect(res.finishReason, FinishReason.stop); - }); - - test('Test multi tool call', () async { - final res = await model.invoke( - PromptValue.string( - "What's the weather in Vellore, India and in Barcelona, Spain?", - ), - ); - expect(res.output.toolCalls, hasLength(2)); - final toolCall1 = res.output.toolCalls.first; - expect(toolCall1.name, 'get_current_weather'); - expect(toolCall1.argumentsRaw, isNotEmpty); - expect(toolCall1.arguments, isNotEmpty); - expect(toolCall1.arguments['location'], 'Vellore, India'); - expect(toolCall1.arguments['unit'], 'celsius'); - final toolCall2 = res.output.toolCalls.last; - expect(toolCall2.name, 'get_current_weather'); - expect(toolCall2.argumentsRaw, isNotEmpty); - expect(toolCall2.arguments, isNotEmpty); - expect(toolCall2.arguments['location'], 'Barcelona, Spain'); - expect(toolCall2.arguments['unit'], 'celsius'); - expect(res.finishReason, FinishReason.stop); - }); - - test('Test ChatToolChoice.none', () async { - final res = await model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: const ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - tools: [tool1], - toolChoice: ChatToolChoice.none, - ), - ); - expect(res.output.toolCalls, isEmpty); - expect(res.output.content, isNotEmpty); - }); - - test('Test ChatToolChoice.forced', () async { - final res = await model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: ChatOllamaToolsOptions( - options: const ChatOllamaOptions(model: defaultModel), - tools: const [tool1, tool2], - toolChoice: ChatToolChoice.forced(name: tool2.name), - ), - ); - expect(res.output.toolCalls, hasLength(1)); - final toolCall = res.output.toolCalls.first; - expect(toolCall.name, tool2.name); - }); - - test( - 'Should throw exception if model did not respond in right JSON string format', - () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - tools: [tool1], - toolsSystemPromptTemplate: - 'You have access to the following tools: {tools} ' - 'You must always select one of the above tools ' - 'and respond in plain text.', - ); - - expect( - () async => model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: toolOptions, - ), - throwsException, - ); - }, - ); - - test( - 'Should return content if no other tools should be called for a given query', - () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - tools: [], - ); - final res = await model.invoke( - PromptValue.string('Do you know the weather in Vellore, India?'), - options: toolOptions, - ); - expect(res.output.toolCalls, isEmpty); - expect(res.output.content, isNotEmpty); - }); - - test( - 'Should throw error if toolSystemPromptTemplate not in right format', - () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - toolsSystemPromptTemplate: - 'You have access to the following tools: tools} ' - 'You must always select one of the above tools', - ); - expect( - () async => model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: toolOptions, - ), - throwsException, - ); - }, - ); - - test('Test ChatOllamaToolsOptions', () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: defaultModel, - ), - tools: [tool1], - toolsSystemPromptTemplate: 'toolSystemPromptTemplate', - ); - - expect(toolOptions.options?.model, defaultModel); - expect( - toolOptions.toolsSystemPromptTemplate, - 'toolSystemPromptTemplate', - ); - expect(toolOptions.tools![0], tool1); - }); - }, - ); -} From 4a60d8cdf5c61d5b12e763311d3fb5d1f23339e6 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Fri, 26 Jul 2024 23:08:53 +0200 Subject: [PATCH 203/251] chore(release): publish packages - langchain@0.7.4 - langchain_anthropic@0.1.1 - langchain_chroma@0.2.1+1 - langchain_community@0.3.0 - langchain_core@0.3.4 - langchain_firebase@0.2.1 - langchain_google@0.6.1 - langchain_mistralai@0.2.2 - langchain_ollama@0.3.0 - langchain_openai@0.7.0 - langchain_pinecone@0.1.0+7 - langchain_supabase@0.1.1+1 - ollama_dart@0.2.0 - openai_dart@0.4.0 --- CHANGELOG.md | 120 ++++++++++++++++++ examples/browser_summarizer/pubspec.lock | 10 +- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.lock | 22 ++-- examples/docs_examples/pubspec.yaml | 16 +-- examples/hello_world_backend/pubspec.lock | 8 +- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.lock | 8 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.lock | 16 +-- examples/hello_world_flutter/pubspec.yaml | 10 +- examples/wikivoyage_eu/pubspec.lock | 10 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- packages/anthropic_sdk_dart/CHANGELOG.md | 2 + packages/chromadb/CHANGELOG.md | 2 + packages/googleai_dart/CHANGELOG.md | 2 + packages/langchain/CHANGELOG.md | 10 ++ packages/langchain/pubspec.yaml | 4 +- packages/langchain_anthropic/CHANGELOG.md | 8 ++ packages/langchain_anthropic/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 6 + packages/langchain_chroma/pubspec.yaml | 10 +- packages/langchain_community/CHANGELOG.md | 7 + packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 8 ++ packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 8 ++ .../langchain_firebase/example/pubspec.lock | 6 +- .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 8 ++ packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 8 ++ packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 11 ++ packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 12 ++ packages/langchain_openai/pubspec.yaml | 10 +- packages/langchain_pinecone/CHANGELOG.md | 6 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 6 + packages/langchain_supabase/pubspec.yaml | 10 +- packages/mistralai_dart/CHANGELOG.md | 2 + packages/ollama_dart/CHANGELOG.md | 13 ++ packages/ollama_dart/pubspec.yaml | 4 +- packages/openai_dart/CHANGELOG.md | 11 ++ packages/openai_dart/pubspec.yaml | 4 +- packages/tavily_dart/CHANGELOG.md | 2 + packages/vertex_ai/CHANGELOG.md | 2 + 50 files changed, 359 insertions(+), 105 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aead8730..47d794f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,126 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-07-26 + +### Changes + +--- + +Packages with breaking changes: + + - [`langchain_community` - `v0.3.0`](#langchain_community---v030) + - [`langchain_ollama` - `v0.3.0`](#langchain_ollama---v030) + - [`langchain_openai` - `v0.7.0`](#langchain_openai---v070) + - [`ollama_dart` - `v0.2.0`](#ollama_dart---v020) + - [`openai_dart` - `v0.4.0`](#openai_dart---v040) + +Packages with other changes: + + - [`langchain` - `v0.7.4`](#langchain---v074) + - [`langchain_anthropic` - `v0.1.1`](#langchain_anthropic---v011) + - [`langchain_chroma` - `v0.2.1+1`](#langchain_chroma---v0211) + - [`langchain_core` - `v0.3.4`](#langchain_core---v034) + - [`langchain_firebase` - `v0.2.1`](#langchain_firebase---v021) + - [`langchain_google` - `v0.6.1`](#langchain_google---v061) + - [`langchain_mistralai` - `v0.2.2`](#langchain_mistralai---v022) + - [`langchain_pinecone` - `v0.1.0+7`](#langchain_pinecone---v0107) + - [`langchain_supabase` - `v0.1.1+1`](#langchain_supabase---v0111) + +--- + +#### `langchain` - `v0.7.4` + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) + +#### `langchain_core` - `v0.3.4` + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + +#### `langchain_community` - `v0.3.0` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_ollama` - `v0.3.0` + + - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +#### `langchain_openai` - `v0.7.0` + + - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) + - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_anthropic` - `v0.1.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_firebase` - `v0.2.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_google` - `v0.6.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_mistralai` - `v0.2.2` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_chroma` - `v0.2.1+1` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_pinecone` - `v0.1.0+7` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_supabase` - `v0.1.1+1` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `ollama_dart` - `v0.2.0` + + - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) + - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) + - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +#### `openai_dart` - `v0.4.0` + + - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) + - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) + - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) + - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) + ## 2024-07-02 ### Changes diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 621fd8c2..a18a608d 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.2" + version: "0.3.0" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 3868b314..922fd87b 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.3 - langchain_community: 0.2.2 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_community: 0.3.0 + langchain_openai: ^0.7.0 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 806b30f6..f37088f5 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -245,63 +245,63 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.1.0" + version: "0.1.1" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1" + version: "0.2.1+1" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.2" + version: "0.3.0" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.0" + version: "0.6.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.1" + version: "0.2.2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2+1" + version: "0.3.0" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -355,14 +355,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.2" + version: "0.2.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 4888329b..50df0b98 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_anthropic: ^0.1.0 - langchain_chroma: ^0.2.1 - langchain_community: 0.2.2 - langchain_google: ^0.6.0 - langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2+1 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_anthropic: ^0.1.1 + langchain_chroma: ^0.2.1+1 + langchain_community: 0.3.0 + langchain_google: ^0.6.1 + langchain_mistralai: ^0.2.2 + langchain_ollama: ^0.3.0 + langchain_openai: ^0.7.0 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 1acce35a..86030403 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index c091ef7c..171d3039 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_openai: ^0.7.0 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 45f69561..4c5e1daa 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 0e070b1d..531264b7 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_openai: ^0.7.0 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 9802cb30..74a75767 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -204,42 +204,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.0" + version: "0.6.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.1" + version: "0.2.2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2+1" + version: "0.3.0" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -293,14 +293,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.2" + version: "0.2.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 786c1edd..a8419a7d 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 - langchain: ^0.7.3 - langchain_google: ^0.6.0 - langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2+1 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_google: ^0.6.1 + langchain_mistralai: ^0.2.2 + langchain_ollama: ^0.3.0 + langchain_openai: ^0.7.0 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index da33efe5..4caa6233 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.2" + version: "0.3.0" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2+1" + version: "0.3.0" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.2" + version: "0.2.0" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 70fc19fb..109cd236 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_ollama: ^0.2.2+1 - langchain_community: 0.2.2 + langchain: ^0.7.4 + langchain_ollama: ^0.3.0 + langchain_community: 0.3.0 diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index 85fb6080..600092c2 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0 - **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 7f7724ef..171047ca 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.2.0+1 - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index e1d53bc8..437f20b2 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0+2 - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 219c9dc5..680dadd0 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,3 +1,13 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.7.4 + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) + ## 0.7.3 > Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 9217303a..2a05e1c7 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.3 +version: 0.7.4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.20.0" crypto: ^3.0.3 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index fe3d0a4d..03cf82f7 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.1.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.1.0 - **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 180234ac..4a76ab62 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.0 +version: 0.1.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 rxdart: ^0.27.7 diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 218a218c..cb464d8e 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,9 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.1+1 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.1 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 395c1ca6..9ae0ce5d 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.3 - langchain_community: 0.2.2 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_community: 0.3.0 + langchain_openai: ^0.7.0 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 3e7be761..01068b20 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,10 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.3.0 + +- **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) +- **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.2 - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 55b71a63..0415ef78 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.2 +version: 0.3.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: 0.3.3 + langchain_core: 0.3.4 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -31,7 +31,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.9 - langchain_openai: ^0.6.3 + langchain_openai: ^0.7.0 objectbox_generator: ^4.0.0 test: ^1.25.2 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 7757bae9..9e750ed8 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.3.4 + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + ## 0.3.3 - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index b1f7f159..56b13134 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.3 +version: 0.3.4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index a0eb0aa4..d7aedb1f 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.0 > Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 03e1dab8..e3f16b8b 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.0" + version: "0.2.1" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index f1618ec8..ddb7010d 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.6.22 - langchain: 0.7.3 - langchain_firebase: 0.2.0 + langchain: 0.7.4 + langchain_firebase: 0.2.1 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 2927c037..b8f43f02 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 61201af0..2180b564 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.2.0 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -23,7 +23,7 @@ dependencies: firebase_auth: ^5.1.0 firebase_core: ^3.1.0 firebase_vertexai: ^0.2.2 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index 9964d000..fb087939 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.6.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.6.0 > Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index da0082a6..3adcb2ed 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.6.0 +version: 0.6.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index 7b74ab46..acae7e78 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.2 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.1 - Update a dependency to the latest release. diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 3c725d01..d237e3f9 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 4d475188..e8f7ae0e 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,14 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.3.0 + + - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + ## 0.2.2+1 - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 33a60f44..a97ab982 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.2.2+1 +version: 0.3.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.2 + ollama_dart: ^0.2.0 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index c1503886..5b2c1ed7 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,15 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.7.0 + + - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) + - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.6.3 - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 4b628c54..f2e989a9 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.3 +version: 0.7.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.3+1 + openai_dart: ^0.4.0 uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.3 - langchain_community: 0.2.2 + langchain: ^0.7.4 + langchain_community: 0.3.0 test: ^1.25.2 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 9faacd04..8925fcd8 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,9 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.1.0+7 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.1.0+6 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 987d270b..ca636d73 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+6 +version: 0.1.0+7 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.3 + langchain_openai: ^0.7.0 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index bb20d19b..3cd0af92 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,9 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.1.1+1 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.1.1 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 8650f10c..d1120d45 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1 +version: 0.1.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 supabase: ^2.0.8 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.3 - langchain_community: 0.2.2 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_community: 0.3.0 + langchain_openai: ^0.7.0 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index df84e9cc..9a9234bb 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.0.3+3 - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 5dfee162..af9e377b 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,16 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.0 + +> Note: This release has breaking changes. + + - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) + - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) + - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + ## 0.1.2 - **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index c967f29e..69b2f8a3 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.1.2 +version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 70e28286..d1fafe5f 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,14 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.4.0 + + - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) + - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) + - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) + - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) + ## 0.3.3+1 - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 5b2fef22..97f7f230 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.3.3+1 +version: 0.4.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md index 897f0558..9abf1cdf 100644 --- a/packages/tavily_dart/CHANGELOG.md +++ b/packages/tavily_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0 - **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 18902a6a..217a19e8 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0+1 - Update a dependency to the latest release. From e377ed2ebe2a857e4bbf3ed8cc5c2f05818f4190 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 27 Jul 2024 00:43:24 +0200 Subject: [PATCH 204/251] docs: Fix typo in langchain_ollama readme --- packages/langchain_ollama/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index a0c8c7e4..885dbf9f 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -2,7 +2,7 @@ [![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) [![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) -[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollam.svg)](https://pub.dev/packages/langchain_ollama) +[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) From 64eca28f79cf6ad73be0e89e5ff57140e30c7b28 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 29 Jul 2024 18:33:24 +0200 Subject: [PATCH 205/251] refactor: Don't send OpenAI-Beta header in ChatOpenAI (#511) --- packages/langchain_openai/lib/src/chat_models/chat_openai.dart | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index dbd9c333..0dc31168 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -200,6 +200,7 @@ class ChatOpenAI extends BaseChatModel { }) : _client = OpenAIClient( apiKey: apiKey ?? '', organization: organization, + beta: null, baseUrl: baseUrl, headers: headers, queryParams: queryParams, From eb3f7f5ae697c07c2bf13f022a10eeca324c2289 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 29 Jul 2024 21:33:44 +0200 Subject: [PATCH 206/251] feat: Add support for min_p in Ollama (#512) --- .../src/chat_models/chat_ollama/mappers.dart | 1 + .../src/chat_models/chat_ollama/types.dart | 16 ++++++- .../langchain_ollama/lib/src/llms/ollama.dart | 14 +++--- .../langchain_ollama/lib/src/llms/types.dart | 25 +++++++++- .../src/generated/schema/request_options.dart | 10 +++- .../src/generated/schema/schema.freezed.dart | 47 +++++++++++++++++-- .../lib/src/generated/schema/schema.g.dart | 2 + packages/ollama_dart/oas/ollama-curated.yaml | 11 ++++- 8 files changed, 112 insertions(+), 14 deletions(-) diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart index 0c543a9c..ce12e70f 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -35,6 +35,7 @@ o.GenerateChatCompletionRequest generateChatCompletionRequest( numPredict: options?.numPredict ?? defaultOptions.numPredict, topK: options?.topK ?? defaultOptions.topK, topP: options?.topP ?? defaultOptions.topP, + minP: options?.minP ?? defaultOptions.minP, tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, typicalP: options?.typicalP ?? defaultOptions.typicalP, repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 1b3b4d77..6e9c0f20 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -23,6 +23,7 @@ class ChatOllamaOptions extends ChatModelOptions { this.numPredict, this.topK, this.topP, + this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -90,12 +91,20 @@ class ChatOllamaOptions extends ChatModelOptions { /// (Default: 40) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more + /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; + /// Alternative to the [topP], and aims to ensure a balance of quality and + /// variety. [minP] represents the minimum probability for a token to be + /// considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability + /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. + /// (Default: 0.0) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -213,6 +222,7 @@ class ChatOllamaOptions extends ChatModelOptions { final int? numPredict, final int? topK, final double? topP, + final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -248,6 +258,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, + minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -287,6 +298,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict: other?.numPredict, topK: other?.topK, topP: other?.topP, + minP: other?.minP, tfsZ: other?.tfsZ, typicalP: other?.typicalP, repeatLastN: other?.repeatLastN, @@ -325,6 +337,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict == other.numPredict && topK == other.topK && topP == other.topP && + minP == other.minP && tfsZ == other.tfsZ && typicalP == other.typicalP && repeatLastN == other.repeatLastN && @@ -362,6 +375,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict.hashCode ^ topK.hashCode ^ topP.hashCode ^ + minP.hashCode ^ tfsZ.hashCode ^ typicalP.hashCode ^ repeatLastN.hashCode ^ diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index b3601f6e..db352184 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -215,12 +215,13 @@ class Ollama extends BaseLLM { return GenerateCompletionRequest( model: options?.model ?? defaultOptions.model ?? defaultModel, prompt: prompt, - system: options?.system, - template: options?.template, - context: options?.context, - format: options?.format?.toResponseFormat(), - raw: options?.raw, - keepAlive: options?.keepAlive, + system: options?.system ?? defaultOptions.system, + suffix: options?.suffix ?? defaultOptions.suffix, + template: options?.template ?? defaultOptions.template, + context: options?.context ?? defaultOptions.context, + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + raw: options?.raw ?? defaultOptions.raw, + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, stream: stream, options: RequestOptions( numKeep: options?.numKeep ?? defaultOptions.numKeep, @@ -228,6 +229,7 @@ class Ollama extends BaseLLM { numPredict: options?.numPredict ?? defaultOptions.numPredict, topK: options?.topK ?? defaultOptions.topK, topP: options?.topP ?? defaultOptions.topP, + minP: options?.minP ?? defaultOptions.minP, tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, typicalP: options?.typicalP ?? defaultOptions.typicalP, repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index ecad337d..a8807248 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -14,6 +14,7 @@ class OllamaOptions extends LLMOptions { const OllamaOptions({ super.model, this.system, + this.suffix, this.template, this.context, this.format, @@ -24,6 +25,7 @@ class OllamaOptions extends LLMOptions { this.numPredict, this.topK, this.topP, + this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -54,6 +56,9 @@ class OllamaOptions extends LLMOptions { /// The system prompt (Overrides what is defined in the Modelfile). final String? system; + /// The text that comes after the inserted text. + final String? suffix; + /// The full prompt or prompt template (overrides what is defined in the /// Modelfile). final String? template; @@ -109,12 +114,20 @@ class OllamaOptions extends LLMOptions { /// (Default: 40) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more + /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; + /// Alternative to the [topP], and aims to ensure a balance of quality and + /// variety. [minP] represents the minimum probability for a token to be + /// considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability + /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. + /// (Default: 0.0) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -226,6 +239,7 @@ class OllamaOptions extends LLMOptions { OllamaOptions copyWith({ final String? model, final String? system, + final String? suffix, final String? template, final List? context, final OllamaResponseFormat? format, @@ -236,6 +250,7 @@ class OllamaOptions extends LLMOptions { final int? numPredict, final int? topK, final double? topP, + final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -265,6 +280,7 @@ class OllamaOptions extends LLMOptions { return OllamaOptions( model: model ?? this.model, system: system ?? this.system, + suffix: suffix ?? this.suffix, template: template ?? this.template, context: context ?? this.context, format: format ?? this.format, @@ -275,6 +291,7 @@ class OllamaOptions extends LLMOptions { numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, + minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -308,6 +325,7 @@ class OllamaOptions extends LLMOptions { return copyWith( model: other?.model, system: other?.system, + suffix: other?.suffix, template: other?.template, context: other?.context, format: other?.format, @@ -318,6 +336,7 @@ class OllamaOptions extends LLMOptions { numPredict: other?.numPredict, topK: other?.topK, topP: other?.topP, + minP: other?.minP, tfsZ: other?.tfsZ, typicalP: other?.typicalP, repeatLastN: other?.repeatLastN, @@ -352,6 +371,7 @@ class OllamaOptions extends LLMOptions { runtimeType == other.runtimeType && model == other.model && system == other.system && + suffix == other.suffix && template == other.template && const ListEquality().equals(context, other.context) && format == other.format && @@ -362,6 +382,7 @@ class OllamaOptions extends LLMOptions { numPredict == other.numPredict && topK == other.topK && topP == other.topP && + minP == other.minP && tfsZ == other.tfsZ && typicalP == other.typicalP && repeatLastN == other.repeatLastN && @@ -393,6 +414,7 @@ class OllamaOptions extends LLMOptions { int get hashCode { return model.hashCode ^ system.hashCode ^ + suffix.hashCode ^ template.hashCode ^ const ListEquality().hash(context) ^ format.hashCode ^ @@ -403,6 +425,7 @@ class OllamaOptions extends LLMOptions { numPredict.hashCode ^ topK.hashCode ^ topP.hashCode ^ + minP.hashCode ^ tfsZ.hashCode ^ typicalP.hashCode ^ repeatLastN.hashCode ^ diff --git a/packages/ollama_dart/lib/src/generated/schema/request_options.dart b/packages/ollama_dart/lib/src/generated/schema/request_options.dart index b6288d57..940d09d4 100644 --- a/packages/ollama_dart/lib/src/generated/schema/request_options.dart +++ b/packages/ollama_dart/lib/src/generated/schema/request_options.dart @@ -30,10 +30,16 @@ class RequestOptions with _$RequestOptions { /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @@ -136,6 +142,7 @@ class RequestOptions with _$RequestOptions { 'num_predict', 'top_k', 'top_p', + 'min_p', 'tfs_z', 'typical_p', 'repeat_last_n', @@ -175,6 +182,7 @@ class RequestOptions with _$RequestOptions { 'num_predict': numPredict, 'top_k': topK, 'top_p': topP, + 'min_p': minP, 'tfs_z': tfsZ, 'typical_p': typicalP, 'repeat_last_n': repeatLastN, diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index b9128995..1c77269c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -612,11 +612,18 @@ mixin _$RequestOptions { @JsonKey(name: 'top_k', includeIfNull: false) int? get topK => throw _privateConstructorUsedError; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) + double? get minP => throw _privateConstructorUsedError; + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) @@ -745,6 +752,7 @@ abstract class $RequestOptionsCopyWith<$Res> { @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -793,6 +801,7 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, + Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -839,6 +848,10 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, + minP: freezed == minP + ? _value.minP + : minP // ignore: cast_nullable_to_non_nullable + as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -953,6 +966,7 @@ abstract class _$$RequestOptionsImplCopyWith<$Res> @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -999,6 +1013,7 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, + Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -1045,6 +1060,10 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, + minP: freezed == minP + ? _value.minP + : minP // ignore: cast_nullable_to_non_nullable + as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -1154,6 +1173,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) this.numPredict, @JsonKey(name: 'top_k', includeIfNull: false) this.topK, @JsonKey(name: 'top_p', includeIfNull: false) this.topP, + @JsonKey(name: 'min_p', includeIfNull: false) this.minP, @JsonKey(name: 'tfs_z', includeIfNull: false) this.tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) this.typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) this.repeatLastN, @@ -1210,12 +1230,20 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'top_k', includeIfNull: false) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @override + @JsonKey(name: 'min_p', includeIfNull: false) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @override @@ -1362,7 +1390,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @override String toString() { - return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; + return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, minP: $minP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; } @override @@ -1376,6 +1404,7 @@ class _$RequestOptionsImpl extends _RequestOptions { other.numPredict == numPredict) && (identical(other.topK, topK) || other.topK == topK) && (identical(other.topP, topP) || other.topP == topP) && + (identical(other.minP, minP) || other.minP == minP) && (identical(other.tfsZ, tfsZ) || other.tfsZ == tfsZ) && (identical(other.typicalP, typicalP) || other.typicalP == typicalP) && @@ -1426,6 +1455,7 @@ class _$RequestOptionsImpl extends _RequestOptions { numPredict, topK, topP, + minP, tfsZ, typicalP, repeatLastN, @@ -1474,6 +1504,7 @@ abstract class _RequestOptions extends RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) final double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) @@ -1536,12 +1567,20 @@ abstract class _RequestOptions extends RequestOptions { int? get topK; @override - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; @override + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) + double? get minP; + @override + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index fbf45bc0..473e7825 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -67,6 +67,7 @@ _$RequestOptionsImpl _$$RequestOptionsImplFromJson(Map json) => numPredict: json['num_predict'] as int?, topK: json['top_k'] as int?, topP: (json['top_p'] as num?)?.toDouble(), + minP: (json['min_p'] as num?)?.toDouble(), tfsZ: (json['tfs_z'] as num?)?.toDouble(), typicalP: (json['typical_p'] as num?)?.toDouble(), repeatLastN: json['repeat_last_n'] as int?, @@ -108,6 +109,7 @@ Map _$$RequestOptionsImplToJson( writeNotNull('num_predict', instance.numPredict); writeNotNull('top_k', instance.topK); writeNotNull('top_p', instance.topP); + writeNotNull('min_p', instance.minP); writeNotNull('tfs_z', instance.tfsZ); writeNotNull('typical_p', instance.typicalP); writeNotNull('repeat_last_n', instance.repeatLastN); diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index b7c04cae..0939dfb3 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -360,8 +360,17 @@ components: format: float nullable: true description: | - Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + min_p: + type: number + format: float + nullable: true + description: | + Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + probability for a token to be considered, relative to the probability of the most likely token. For + example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + than 0.05*0.9=0.045 are filtered out. (Default: 0.0) tfs_z: type: number format: float From 1c9d8783c254ee2cb6f84d5202eb761706a9e51e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 10 Aug 2024 18:31:48 +0200 Subject: [PATCH 207/251] build: Update dependencies (#520) --- examples/browser_summarizer/pubspec.lock | 84 +++++++++---------- examples/browser_summarizer/pubspec.yaml | 8 +- examples/docs_examples/pubspec.lock | 54 +++++------- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_backend/pubspec.lock | 50 +++++------ examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.lock | 42 ++++------ examples/hello_world_cli/pubspec.yaml | 2 +- examples/hello_world_flutter/pubspec.lock | 58 ++++++------- examples/hello_world_flutter/pubspec.yaml | 6 +- .../pubspec.lock | 18 ++-- .../pubspec.yaml | 6 +- examples/wikivoyage_eu/pubspec.lock | 26 +++--- examples/wikivoyage_eu/pubspec.yaml | 2 +- melos.yaml | 51 +++++------ packages/anthropic_sdk_dart/pubspec.lock | 56 ++++++------- packages/anthropic_sdk_dart/pubspec.yaml | 20 ++--- packages/chromadb/pubspec.yaml | 18 ++-- packages/googleai_dart/pubspec.yaml | 20 ++--- packages/langchain/pubspec.yaml | 8 +- packages/langchain_amazon/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 10 +-- packages/langchain_chroma/pubspec.yaml | 8 +- packages/langchain_cohere/pubspec.yaml | 2 +- packages/langchain_community/pubspec.yaml | 18 ++-- .../lib/src/runnables/function.dart | 4 +- packages/langchain_core/pubspec.yaml | 10 +-- .../langchain_firebase/example/pubspec.lock | 52 ++++++------ .../langchain_firebase/example/pubspec.yaml | 8 +- packages/langchain_firebase/pubspec.lock | 48 +++++------ packages/langchain_firebase/pubspec.yaml | 10 +-- packages/langchain_google/pubspec.yaml | 18 ++-- packages/langchain_huggingface/pubspec.yaml | 2 +- packages/langchain_microsoft/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 8 +- packages/langchain_ollama/pubspec.yaml | 10 +-- packages/langchain_openai/pubspec.yaml | 10 +-- packages/langchain_pinecone/pubspec.yaml | 8 +- packages/langchain_supabase/pubspec.yaml | 8 +- packages/langchain_weaviate/pubspec.yaml | 2 +- packages/langchain_wikipedia/pubspec.yaml | 2 +- packages/langchain_wolfram/pubspec.yaml | 2 +- packages/langgraph/pubspec.yaml | 2 +- packages/mistralai_dart/pubspec.yaml | 20 ++--- packages/ollama_dart/pubspec.yaml | 20 ++--- packages/openai_dart/pubspec.yaml | 20 ++--- packages/tavily_dart/pubspec.yaml | 20 ++--- packages/vertex_ai/pubspec.yaml | 10 +-- 48 files changed, 419 insertions(+), 452 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index a18a608d..21488e9b 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -93,18 +93,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "77f3be8c9acaa64ed37dd49c21c056da71b78053d31131ca26a273884a753f66" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "2.0.0-wasm" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: e11722d7d8cd21f944b52af780392274f7c34a41156b1c80053fc2a414e09a1b + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.1.0-wasm" + version: "1.1.2" ffi: dependency: transitive description: @@ -146,18 +146,18 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 + sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a url: "https://pub.dev" source: hosted - version: "8.1.5" + version: "8.1.6" flutter_markdown: dependency: "direct main" description: name: flutter_markdown - sha256: "87e11b9df25a42e2db315b8b7a51fae8e66f57a4b2f50ec4b822d0fa155e6b52" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.22" + version: "0.7.3+1" flutter_web_plugins: dependency: transitive description: flutter @@ -167,10 +167,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" html: dependency: transitive description: @@ -183,10 +183,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -215,18 +215,18 @@ packages: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" json_path: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: @@ -275,18 +275,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" math_expressions: dependency: transitive description: name: math_expressions - sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.4.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -299,10 +299,10 @@ packages: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" nested: dependency: transitive description: @@ -410,58 +410,58 @@ packages: dependency: "direct main" description: name: shared_preferences - sha256: "81429e4481e1ccfb51ede496e916348668fd0921627779233bd24cc3ff6abd02" + sha256: c272f9cabca5a81adc9b0894381e9c1def363e980f960fa903c604c471b22f68 url: "https://pub.dev" source: hosted - version: "2.2.2" + version: "2.3.1" shared_preferences_android: dependency: transitive description: name: shared_preferences_android - sha256: "8568a389334b6e83415b6aae55378e158fbc2314e074983362d20c562780fb06" + sha256: a7e8467e9181cef109f601e3f65765685786c1a738a83d7fbbde377589c0d974 url: "https://pub.dev" source: hosted - version: "2.2.1" + version: "2.3.1" shared_preferences_foundation: dependency: transitive description: name: shared_preferences_foundation - sha256: "7bf53a9f2d007329ee6f3df7268fd498f8373602f943c975598bbb34649b62a7" + sha256: "776786cff96324851b656777648f36ac772d88bc4c669acff97b7fce5de3c849" url: "https://pub.dev" source: hosted - version: "2.3.4" + version: "2.5.1" shared_preferences_linux: dependency: transitive description: name: shared_preferences_linux - sha256: "9f2cbcf46d4270ea8be39fa156d86379077c8a5228d9dfdb1164ae0bb93f1faa" + sha256: "580abfd40f415611503cae30adf626e6656dfb2f0cee8f465ece7b6defb40f2f" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" shared_preferences_platform_interface: dependency: transitive description: name: shared_preferences_platform_interface - sha256: "22e2ecac9419b4246d7c22bfbbda589e3acf5c0351137d87dd2939d984d37c3b" + sha256: "57cbf196c486bc2cf1f02b85784932c6094376284b3ad5779d1b1c6c6a816b80" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" shared_preferences_web: dependency: transitive description: name: shared_preferences_web - sha256: "9aee1089b36bd2aafe06582b7d7817fd317ef05fc30e6ba14bff247d0933042a" + sha256: d2ca4132d3946fec2184261726b355836a82c33d7d5b67af32692aff18a4684e url: "https://pub.dev" source: hosted - version: "2.3.0" + version: "2.4.2" shared_preferences_windows: dependency: transitive description: name: shared_preferences_windows - sha256: "841ad54f3c8381c480d0c9b508b89a34036f512482c407e6df7a9c4aa2ef8f59" + sha256: "94ef0f72b2d71bc3e700e025db3710911bd51a71cefb65cc609dd0d9a982e3c1" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" sky_engine: dependency: transitive description: flutter @@ -518,10 +518,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -555,5 +555,5 @@ packages: source: hosted version: "1.0.4" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 922fd87b..326a41a5 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -4,19 +4,19 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 js: ^0.7.1 langchain: ^0.7.4 langchain_community: 0.3.0 langchain_openai: ^0.7.0 - shared_preferences: ^2.2.2 + shared_preferences: ^2.3.0 flutter: uses-material-design: true diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index f37088f5..8aaa5dcd 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -67,10 +67,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -99,18 +99,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" ffi: dependency: transitive description: @@ -139,10 +139,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" gcloud: dependency: transitive description: @@ -155,10 +155,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" google_identity_services_web: dependency: transitive description: @@ -179,10 +179,10 @@ packages: dependency: transitive description: name: googleapis_auth - sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 url: "https://pub.dev" source: hosted - version: "1.5.1" + version: "1.6.0" html: dependency: transitive description: @@ -195,10 +195,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -215,30 +215,22 @@ packages: url: "https://pub.dev" source: hosted version: "0.1.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" json_path: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: @@ -314,10 +306,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.4.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -454,10 +446,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -482,4 +474,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 50df0b98..398e7e15 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -4,7 +4,7 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 86030403..97cf3b4b 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_methods: dependency: transitive description: @@ -93,26 +93,18 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" - url: "https://pub.dev" - source: hosted - version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "0.6.7" + version: "4.1.0" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: @@ -177,10 +169,10 @@ packages: dependency: "direct main" description: name: shelf - sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 url: "https://pub.dev" source: hosted - version: "1.4.1" + version: "1.4.2" shelf_router: dependency: "direct main" description: @@ -249,10 +241,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" web: dependency: transitive description: @@ -262,4 +254,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 171d3039..b7b5dd3a 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 langchain_openai: ^0.7.0 - shelf: ^1.4.1 + shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 4c5e1daa..06a4acdb 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -89,22 +89,14 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: @@ -209,10 +201,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" web: dependency: transitive description: @@ -222,4 +214,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 531264b7..d73e4928 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -4,7 +4,7 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 74a75767..94a94e96 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -77,18 +77,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -106,26 +106,26 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 + sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a url: "https://pub.dev" source: hosted - version: "8.1.5" + version: "8.1.6" flutter_markdown: dependency: "direct main" description: name: flutter_markdown - sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.23" + version: "0.7.3+1" freezed_annotation: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" gcloud: dependency: transitive description: @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" google_identity_services_web: dependency: transitive description: @@ -170,10 +170,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -182,22 +182,14 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: @@ -260,18 +252,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" meta: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" mistralai_dart: dependency: "direct overridden" description: @@ -382,10 +374,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -410,5 +402,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index a8419a7d..fb83f0cc 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -4,14 +4,14 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 langchain: ^0.7.4 langchain_google: ^0.6.1 langchain_mistralai: ^0.2.2 diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index 99209b09..653b4e81 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: google_identity_services_web - sha256: "0c56c2c5d60d6dfaf9725f5ad4699f04749fb196ee5a70487a46ef184837ccf6" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.0+2" + version: "0.3.1+4" googleapis: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: "direct main" description: name: googleapis_auth - sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 url: "https://pub.dev" source: hosted - version: "1.5.1" + version: "1.6.0" http: dependency: "direct main" description: name: http - sha256: d4872660c46d929f6b8a9ef4e7a7eff7e49bbf0c4ec3f385ee32df5119175139 + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.2.2" http_parser: dependency: transitive description: @@ -156,9 +156,9 @@ packages: dependency: transitive description: name: web - sha256: edc8a9573dd8c5a83a183dae1af2b6fd4131377404706ca4e5420474784906fa + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "1.0.0" sdks: - dart: ">=3.2.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 9de8254f..e42414a8 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: gcloud: ^0.8.12 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 vertex_ai: ^0.1.0+1 diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 4caa6233..da1458be 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -37,10 +37,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -109,10 +109,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" html: dependency: transitive description: @@ -125,10 +125,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -157,10 +157,10 @@ packages: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: @@ -201,10 +201,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: db0b72d867491c4e53a1c773e2708d5d6e94bbe06be07080fc9f896766b9cd3d + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.5.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -319,10 +319,10 @@ packages: dependency: transitive description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -340,4 +340,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 109cd236..ab9a51be 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -4,7 +4,7 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 diff --git a/melos.yaml b/melos.yaml index 164a7618..69c804de 100644 --- a/melos.yaml +++ b/melos.yaml @@ -20,54 +20,55 @@ command: bootstrap: usePubspecOverrides: true environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: async: ^2.11.0 beautiful_soup_dart: ^0.3.0 characters: ^1.3.0 - collection: '>=1.17.0 <1.20.0' - cross_file: ^0.3.4+1 + collection: ^1.18.0 + cross_file: ^0.3.4+2 crypto: ^3.0.3 csv: ^6.0.0 equatable: ^2.0.5 - fetch_client: ^1.0.2 + fetch_client: ^1.1.2 firebase_app_check: ^0.3.0 firebase_auth: ^5.1.0 - firebase_core: ^3.1.0 + firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 flat_buffers: ^23.5.26 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 - freezed_annotation: ^2.4.1 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 + freezed_annotation: ^2.4.2 gcloud: ^0.8.12 - google_generative_ai: 0.4.3 + google_generative_ai: 0.4.4 googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 js: ^0.7.1 - json_annotation: ^4.8.1 - json_path: ^0.7.1 + json_annotation: ^4.9.0 + json_path: ^0.7.4 langchain_tiktoken: ^1.0.1 - math_expressions: ^2.4.0 + math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 pinecone: ^0.7.2 - shared_preferences: ^2.2.2 - shelf: ^1.4.1 + rxdart: ">=0.27.7 <0.29.0" + shared_preferences: ^2.3.0 + shelf: ^1.4.2 shelf_router: ^1.1.4 - supabase: ^2.0.8 - uuid: ^4.3.3 + supabase: ^2.2.7 + uuid: ^4.4.2 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 - objectbox_generator: ^4.0.0 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 + objectbox_generator: ^4.0.1 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 scripts: lint: diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock index 1849898b..ddbf9394 100644 --- a/packages/anthropic_sdk_dart/pubspec.lock +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -5,23 +5,23 @@ packages: dependency: transitive description: name: _fe_analyzer_shared - sha256: "5aaf60d96c4cd00fe7f21594b5ad6a1b699c80a27420f8a837f4d68473ef09e3" + sha256: f256b0c0ba6c7577c15e2e4e114755640a875e885099367bf6e012b19314c834 url: "https://pub.dev" source: hosted - version: "68.0.0" + version: "72.0.0" _macros: dependency: transitive description: dart source: sdk - version: "0.1.0" + version: "0.3.2" analyzer: dependency: transitive description: name: analyzer - sha256: "21f1d3720fd1c70316399d5e2bccaebb415c434592d778cce8acb967b8578808" + sha256: b652861553cd3990d8ed361f7979dc6d7053a9ac8843fa73820ab68ce5410139 url: "https://pub.dev" source: hosted - version: "6.5.0" + version: "6.7.0" args: dependency: transitive description: @@ -82,10 +82,10 @@ packages: dependency: "direct dev" description: name: build_runner - sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa" + sha256: dd09dd4e2b078992f42aac7f1a622f01882a8492fef08486b27ddde929c19f04 url: "https://pub.dev" source: hosted - version: "2.4.10" + version: "2.4.12" build_runner_core: dependency: transitive description: @@ -178,18 +178,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: c0a76bfd84d4bc5a0733ab8b9fcee268d5069228790a6dd71fc2a6d1049223cc + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.2.0" fetch_client: dependency: "direct main" description: name: fetch_client - sha256: "0b935eff9dfa84fb56bddadaf020c9aa61f02cbd6fa8dad914d6d343a838936d" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.1.1" + version: "1.1.2" file: dependency: transitive description: @@ -210,18 +210,18 @@ packages: dependency: "direct dev" description: name: freezed - sha256: "5606fb95d54f3bb241b3e12dcfb65ba7494c775c4cb458334eccceb07334a3d9" + sha256: "44c19278dd9d89292cf46e97dc0c1e52ce03275f40a97c5a348e802a924bf40e" url: "https://pub.dev" source: hosted - version: "2.5.3" + version: "2.5.7" freezed_annotation: dependency: "direct main" description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" frontend_server_client: dependency: transitive description: @@ -250,10 +250,10 @@ packages: dependency: "direct main" description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_multi_server: dependency: transitive description: @@ -322,10 +322,10 @@ packages: dependency: transitive description: name: macros - sha256: "12e8a9842b5a7390de7a781ec63d793527582398d16ea26c60fed58833c9ae79" + sha256: "0acaed5d6b7eab89f63350bccd82119e6c602df0f391260d0e32b5e23db79536" url: "https://pub.dev" source: hosted - version: "0.1.0-main.0" + version: "0.1.2-main.4" matcher: dependency: transitive description: @@ -362,8 +362,8 @@ packages: dependency: "direct dev" description: path: "." - ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" - resolved-ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" + ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" + resolved-ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" url: "https://github.com/davidmigloz/openapi_spec.git" source: git version: "0.7.8" @@ -531,26 +531,26 @@ packages: dependency: "direct dev" description: name: test - sha256: d11b55850c68c1f6c0cf00eabded4e66c4043feaf6c0d7ce4a36785137df6331 + sha256: "713a8789d62f3233c46b4a90b174737b2c04cb6ae4500f2aa8b1be8f03f5e67f" url: "https://pub.dev" source: hosted - version: "1.25.5" + version: "1.25.8" test_api: dependency: transitive description: name: test_api - sha256: "2419f20b0c8677b2d67c8ac4d1ac7372d862dc6c460cdbb052b40155408cd794" + sha256: "664d3a9a64782fcdeb83ce9c6b39e78fd2971d4e37827b9b06c3aa1edc5e760c" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.3" test_core: dependency: transitive description: name: test_core - sha256: "4d070a6bc36c1c4e89f20d353bfd71dc30cdf2bd0e14349090af360a029ab292" + sha256: "12391302411737c176b0b5d6491f466b0dd56d4763e347b6714efbaa74d7953d" url: "https://pub.dev" source: hosted - version: "0.6.2" + version: "0.6.5" timing: dependency: transitive description: @@ -624,4 +624,4 @@ packages: source: hosted version: "3.1.2" sdks: - dart: ">=3.4.0 <4.0.0" + dart: ">=3.5.0-259.0.dev <4.0.0" diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 160596dc..84376d27 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - anthropic environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index f11b20ea..eb6a6f29 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -12,20 +12,20 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 22370975..ee294296 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -14,22 +14,22 @@ topics: - gemini environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 2a05e1c7..70437c8d 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -13,15 +13,15 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: characters: ^1.3.0 - collection: ">=1.17.0 <1.20.0" + collection: ^1.18.0 crypto: ^3.0.3 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_amazon/pubspec.yaml b/packages/langchain_amazon/pubspec.yaml index abbcb58c..d948eb8c 100644 --- a/packages/langchain_amazon/pubspec.yaml +++ b/packages/langchain_amazon/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 4a76ab62..c25c8e94 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -13,16 +13,16 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: anthropic_sdk_dart: ^0.1.0 - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - rxdart: ^0.27.7 + rxdart: ">=0.27.7 <0.29.0" dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 9ae0ce5d..391b329b 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -14,17 +14,17 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: chromadb: ^0.2.0+1 - http: ^1.1.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 langchain: ^0.7.4 langchain_community: 0.3.0 langchain_openai: ^0.7.0 diff --git a/packages/langchain_cohere/pubspec.yaml b/packages/langchain_cohere/pubspec.yaml index 8ace6cf2..ed26abe5 100644 --- a/packages/langchain_cohere/pubspec.yaml +++ b/packages/langchain_cohere/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 0415ef78..ebf10d32 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -13,27 +13,27 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: beautiful_soup_dart: ^0.3.0 - cross_file: ^0.3.4+1 + cross_file: ^0.3.4+2 csv: ^6.0.0 flat_buffers: ^23.5.26 - http: ^1.1.0 - json_path: ^0.7.1 + http: ^1.2.2 + json_path: ^0.7.4 langchain_core: 0.3.4 - math_expressions: ^2.4.0 + math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 tavily_dart: ^0.1.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - build_runner: ^2.4.9 + build_runner: ^2.4.11 langchain_openai: ^0.7.0 - objectbox_generator: ^4.0.0 - test: ^1.25.2 + objectbox_generator: ^4.0.1 + test: ^1.25.8 objectbox: output_dir: src/vector_stores/objectbox diff --git a/packages/langchain_core/lib/src/runnables/function.dart b/packages/langchain_core/lib/src/runnables/function.dart index 32843641..7af32bcc 100644 --- a/packages/langchain_core/lib/src/runnables/function.dart +++ b/packages/langchain_core/lib/src/runnables/function.dart @@ -89,7 +89,7 @@ class RunnableFunction final RunnableOptions? options, }) async { if (_invokeFunc != null) { - return _invokeFunc!(input, options); + return _invokeFunc(input, options); } else { return stream(input, options: options).first; } @@ -113,7 +113,7 @@ class RunnableFunction final RunnableOptions? options, }) async* { if (_streamFunc != null) { - yield* _streamFunc!(inputStream, options); + yield* _streamFunc(inputStream, options); } else { yield* inputStream.asyncMap((final input) async { return invoke(input, options: options); diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 56b13134..170363a7 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -13,15 +13,15 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: async: ^2.11.0 - collection: ">=1.17.0 <1.20.0" - cross_file: ^0.3.4+1 + collection: ^1.18.0 + cross_file: ^0.3.4+2 crypto: ^3.0.3 meta: ^1.11.0 - rxdart: ^0.27.7 + rxdart: ">=0.27.7 <0.29.0" dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index e3f16b8b..1896af9c 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -165,26 +165,26 @@ packages: dependency: "direct main" description: name: firebase_core - sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a + sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "3.3.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" + sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.2.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" + sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e url: "https://pub.dev" source: hosted - version: "2.17.2" + version: "2.17.4" firebase_vertexai: dependency: transitive description: @@ -218,10 +218,10 @@ packages: dependency: "direct main" description: name: flutter_markdown - sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.23" + version: "0.7.3+1" flutter_test: dependency: "direct dev" description: flutter @@ -281,18 +281,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" + sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" url: "https://pub.dev" source: hosted - version: "10.0.4" + version: "10.0.5" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" + sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "3.0.5" leak_tracker_testing: dependency: transitive description: @@ -329,18 +329,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" meta: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" path: dependency: transitive description: @@ -422,10 +422,10 @@ packages: dependency: transitive description: name: test_api - sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" + sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" url: "https://pub.dev" source: hosted - version: "0.7.0" + version: "0.7.2" typed_data: dependency: transitive description: @@ -438,10 +438,10 @@ packages: dependency: transitive description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -454,10 +454,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc url: "https://pub.dev" source: hosted - version: "14.2.1" + version: "14.2.4" web: dependency: transitive description: @@ -467,5 +467,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index ddb7010d..d63e1ccd 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -4,15 +4,15 @@ version: 1.0.0+1 publish_to: 'none' environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: cupertino_icons: ^1.0.6 - firebase_core: ^3.1.0 + firebase_core: ^3.3.0 flutter: sdk: flutter - flutter_markdown: ^0.6.22 + flutter_markdown: ^0.7.3 langchain: 0.7.4 langchain_firebase: 0.2.1 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index b8f43f02..6c5ffbb4 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -77,10 +77,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -149,26 +149,26 @@ packages: dependency: "direct main" description: name: firebase_core - sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a + sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "3.3.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" + sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.2.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" + sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e url: "https://pub.dev" source: hosted - version: "2.17.2" + version: "2.17.4" firebase_vertexai: dependency: "direct main" description: @@ -235,18 +235,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" + sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" url: "https://pub.dev" source: hosted - version: "10.0.4" + version: "10.0.5" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" + sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "3.0.5" leak_tracker_testing: dependency: transitive description: @@ -267,18 +267,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" meta: dependency: "direct main" description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" path: dependency: transitive description: @@ -360,10 +360,10 @@ packages: dependency: transitive description: name: test_api - sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" + sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" url: "https://pub.dev" source: hosted - version: "0.7.0" + version: "0.7.2" typed_data: dependency: transitive description: @@ -376,10 +376,10 @@ packages: dependency: "direct main" description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -392,10 +392,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc url: "https://pub.dev" source: hosted - version: "14.2.1" + version: "14.2.4" web: dependency: transitive description: @@ -405,5 +405,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 2180b564..d838c00c 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -14,18 +14,18 @@ topics: - firebase environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: - collection: ">=1.17.0 <1.20.0" + collection: ^1.18.0 firebase_app_check: ^0.3.0 firebase_auth: ^5.1.0 - firebase_core: ^3.1.0 + firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: flutter_test: diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 3adcb2ed..e3c502ea 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -14,24 +14,22 @@ topics: - vertex-ai environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - fetch_client: ^1.0.2 + collection: ^1.18.0 + fetch_client: ^1.1.2 gcloud: ^0.8.12 - google_generative_ai: 0.4.3 + google_generative_ai: 0.4.4 googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 vertex_ai: ^0.1.0+1 langchain_firebase: ^0.1.0 firebase_core: ^2.31.0 dev_dependencies: - test: ^1.24.9 - fake_cloud_firestore: ^2.5.1 - langchain: ^0.6.0+1 + test: ^1.25.8 diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 7c1f00d4..2f29e62b 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_microsoft/pubspec.yaml b/packages/langchain_microsoft/pubspec.yaml index 3bd05e6a..11d0021c 100644 --- a/packages/langchain_microsoft/pubspec.yaml +++ b/packages/langchain_microsoft/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index d237e3f9..5d12387b 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -14,15 +14,15 @@ topics: - mistral environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index a97ab982..53d659d0 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -14,16 +14,16 @@ topics: - ollama environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 ollama_dart: ^0.2.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index f2e989a9..2e8e5ff6 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -14,18 +14,18 @@ topics: - gpt environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 openai_dart: ^0.4.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: langchain: ^0.7.4 langchain_community: 0.3.0 - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index ca636d73..ffbd4c9a 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -14,15 +14,15 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - http: ^1.1.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 pinecone: ^0.7.2 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 langchain_openai: ^0.7.0 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index d1120d45..af7c072f 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -14,16 +14,16 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - http: ^1.1.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - supabase: ^2.0.8 + supabase: ^2.2.7 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 langchain: ^0.7.4 langchain_community: 0.3.0 langchain_openai: ^0.7.0 diff --git a/packages/langchain_weaviate/pubspec.yaml b/packages/langchain_weaviate/pubspec.yaml index fb6e6ce4..3d9b8cd3 100644 --- a/packages/langchain_weaviate/pubspec.yaml +++ b/packages/langchain_weaviate/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_wikipedia/pubspec.yaml b/packages/langchain_wikipedia/pubspec.yaml index d8f713b5..2dcc9e5c 100644 --- a/packages/langchain_wikipedia/pubspec.yaml +++ b/packages/langchain_wikipedia/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_wolfram/pubspec.yaml b/packages/langchain_wolfram/pubspec.yaml index 950db4e1..14b30014 100644 --- a/packages/langchain_wolfram/pubspec.yaml +++ b/packages/langchain_wolfram/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langgraph/pubspec.yaml b/packages/langgraph/pubspec.yaml index 2b4ebaaf..e6ef9c18 100644 --- a/packages/langgraph/pubspec.yaml +++ b/packages/langgraph/pubspec.yaml @@ -13,4 +13,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index 970d1403..406b7170 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - mistral environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 69b2f8a3..3f8d7f75 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - ollama environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 97f7f230..4126650f 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - gpt environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml index 5694d98f..29519674 100644 --- a/packages/tavily_dart/pubspec.yaml +++ b/packages/tavily_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - rag environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index ccfa07c8..c612870d 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -14,14 +14,14 @@ topics: - matching-engine environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" + collection: ^1.18.0 googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 meta: ^1.11.0 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 From 6b5afeed6b39f42a1f1f757b6cdaa491cf4c6657 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 10 Aug 2024 18:33:14 +0200 Subject: [PATCH 208/251] docs: Suggest using few-shot prompting with Ollama tool calling (#521) --- docs/modules/model_io/models/chat_models/integrations/ollama.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index d12b5b93..9c368d1b 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -182,6 +182,8 @@ final chatModel = ChatOllama( ); ``` +**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). + ### JSON mode You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. From 081508233f1f733f25cd509b856568d8a27c3e9f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 10 Aug 2024 18:38:43 +0200 Subject: [PATCH 209/251] feat: Add gpt-4o-2024-08-06 to model catalog in openai_dart (#522) --- .../lib/src/chat_models/types.dart | 6 + .../openai_dart/lib/src/generated/client.dart | 2 +- .../generated/schema/assistant_object.dart | 48 +- .../src/generated/schema/assistant_tools.dart | 2 +- .../lib/src/generated/schema/batch.dart | 4 +- .../schema/create_assistant_request.dart | 50 +- .../create_chat_completion_request.dart | 2 + .../create_fine_tuning_job_request.dart | 4 +- .../schema/create_message_request.dart | 4 +- .../generated/schema/create_run_request.dart | 47 +- .../schema/create_thread_and_run_request.dart | 47 +- .../schema/create_thread_request.dart | 4 +- .../schema/create_vector_store_request.dart | 4 +- .../src/generated/schema/message_object.dart | 4 +- .../schema/modify_assistant_request.dart | 48 +- .../schema/modify_message_request.dart | 4 +- .../generated/schema/modify_run_request.dart | 4 +- .../schema/modify_thread_request.dart | 4 +- .../lib/src/generated/schema/run_object.dart | 38 +- .../src/generated/schema/run_step_object.dart | 4 +- .../src/generated/schema/schema.freezed.dart | 7238 ++++++++++++----- .../lib/src/generated/schema/schema.g.dart | 267 +- .../src/generated/schema/thread_object.dart | 4 +- .../schema/update_vector_store_request.dart | 4 +- .../schema/vector_store_file_object.dart | 14 +- .../generated/schema/vector_store_object.dart | 4 +- packages/openai_dart/oas/main.dart | 5 +- packages/openai_dart/oas/openapi_curated.yaml | 113 +- .../openai_dart/oas/openapi_official.yaml | 796 +- 29 files changed, 6234 insertions(+), 2541 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 9712ff59..988d27c0 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -21,10 +21,16 @@ import 'package:meta/meta.dart'; /// - `gpt-4-vision-preview` /// - `gpt-4o` /// - `gpt-4o-2024-05-13` +/// - `gpt-4o-2024-08-06` /// - `gpt-4o-mini` /// - `gpt-4o-mini-2024-07-18` /// - `gpt-3.5-turbo` /// - `gpt-3.5-turbo-16k` +/// - `gpt-3.5-turbo-16k-0613` +/// - `gpt-3.5-turbo-0125` +/// - `gpt-3.5-turbo-0301` +/// - `gpt-3.5-turbo-0613` +/// - `gpt-3.5-turbo-1106` /// /// Mind that the list may be outdated. /// See https://platform.openai.com/docs/models for the latest list. diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index 66c918d1..828b26be 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -58,7 +58,7 @@ class OpenAIClientException implements Exception { // CLASS: OpenAIClient // ========================================== -/// Client for OpenAI API (v.2.1.0) +/// Client for OpenAI API (v.2.3.0) /// /// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. class OpenAIClient { diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index 59bac618..5784d1ed 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -36,29 +36,46 @@ class AssistantObject with _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. required String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. required List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? responseFormat, @@ -170,11 +187,22 @@ enum AssistantResponseFormatMode { // CLASS: AssistantObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class AssistantObjectResponseFormat with _$AssistantObjectResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 043a7d9a..5c0c2c47 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -80,7 +80,7 @@ class AssistantToolsFileSearchFileSearch /// Factory constructor for AssistantToolsFileSearchFileSearch const factory AssistantToolsFileSearchFileSearch({ - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search diff --git a/packages/openai_dart/lib/src/generated/schema/batch.dart b/packages/openai_dart/lib/src/generated/schema/batch.dart index 94cc6080..471ac112 100644 --- a/packages/openai_dart/lib/src/generated/schema/batch.dart +++ b/packages/openai_dart/lib/src/generated/schema/batch.dart @@ -74,7 +74,9 @@ class Batch with _$Batch { @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? requestCounts, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _Batch; diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 3b9086d3..0e849a85 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -27,29 +27,46 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? responseFormat, @@ -163,6 +180,8 @@ enum AssistantModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -254,11 +273,22 @@ enum CreateAssistantResponseFormatMode { // CLASS: CreateAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateAssistantRequestResponseFormat with _$CreateAssistantRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index f9213271..fd24189a 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -302,6 +302,8 @@ enum ChatCompletionModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 17b649aa..3da0a42e 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -39,7 +39,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? suffix, /// The ID of an uploaded file that contains validation data. @@ -127,6 +127,8 @@ enum FineTuningModels { davinci002, @JsonValue('gpt-3.5-turbo') gpt35Turbo, + @JsonValue('gpt-4o-mini') + gpt4oMini, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart index 7837049f..fc42a4d2 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart @@ -25,7 +25,9 @@ class CreateMessageRequest with _$CreateMessageRequest { /// A list of files attached to the message, and the tools they were added to. @JsonKey(includeIfNull: false) List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateMessageRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 0e395531..4c13ec8f 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -37,13 +37,18 @@ class CreateRunRequest with _$CreateRunRequest { /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. @JsonKey(includeIfNull: false) List? tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -74,11 +79,22 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -202,6 +218,8 @@ enum RunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -382,11 +400,22 @@ enum CreateRunRequestResponseFormatMode { // CLASS: CreateRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateRunRequestResponseFormat with _$CreateRunRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index ae054a5c..e69a2060 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -36,13 +36,18 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -73,11 +78,22 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -201,6 +217,8 @@ enum ThreadAndRunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -388,11 +406,22 @@ enum CreateThreadAndRunRequestResponseFormatMode { // CLASS: CreateThreadAndRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateThreadAndRunRequestResponseFormat with _$CreateThreadAndRunRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart index 22823647..2cfb4b35 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart @@ -22,7 +22,9 @@ class CreateThreadRequest with _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index 61e87095..b26b786e 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -30,7 +30,9 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? chunkingStrategy, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _CreateVectorStoreRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/message_object.dart b/packages/openai_dart/lib/src/generated/schema/message_object.dart index fae9d2ae..9e991a27 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_object.dart @@ -58,7 +58,9 @@ class MessageObject with _$MessageObject { /// A list of files attached to the message, and the tools they were added to. required List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, }) = _MessageObject; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index b02d123e..99c1f887 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -27,7 +27,8 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -37,22 +38,38 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? responseFormat, @@ -150,11 +167,22 @@ enum ModifyAssistantResponseFormatMode { // CLASS: ModifyAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class ModifyAssistantRequestResponseFormat with _$ModifyAssistantRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart index b6e7d119..b7ec05e1 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart @@ -15,7 +15,9 @@ class ModifyMessageRequest with _$ModifyMessageRequest { /// Factory constructor for ModifyMessageRequest const factory ModifyMessageRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyMessageRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart index 3d113815..973a0b3d 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart @@ -15,7 +15,9 @@ class ModifyRunRequest with _$ModifyRunRequest { /// Factory constructor for ModifyRunRequest const factory ModifyRunRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyRunRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart index a335f1b6..96f4983f 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart @@ -19,7 +19,9 @@ class ModifyThreadRequest with _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index d3e7dcf5..73ffe897 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -68,7 +68,9 @@ class RunObject with _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. required List tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -103,11 +105,22 @@ class RunObject with _$RunObject { /// during tool use. @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required RunObjectResponseFormat responseFormat, @@ -448,11 +461,22 @@ enum RunObjectResponseFormatMode { // CLASS: RunObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { const RunObjectResponseFormat._(); diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart index 2e56839e..ede505da 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart @@ -58,7 +58,9 @@ class RunStepObject with _$RunStepObject { /// The Unix timestamp (in seconds) for when the run step completed. @JsonKey(name: 'completed_at') required int? completedAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 06e93133..36ff6d91 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -123,8 +123,12 @@ mixin _$CreateCompletionRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -178,6 +182,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -276,6 +282,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionModelCopyWith<$Res> get model { @@ -284,6 +292,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionPromptCopyWith<$Res>? get prompt { @@ -296,6 +306,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionStopCopyWith<$Res>? get stop { @@ -308,6 +320,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -377,6 +391,8 @@ class __$$CreateCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -676,7 +692,7 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -699,7 +715,9 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { topP, user); - @JsonKey(ignore: true) + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> @@ -746,127 +764,129 @@ abstract class _CreateCompletionRequest extends CreateCompletionRequest { factory _CreateCompletionRequest.fromJson(Map json) = _$CreateCompletionRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_CompletionModelConverter() CompletionModel get model; - @override /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. /// /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @override @_CompletionPromptConverter() CompletionPrompt? get prompt; - @override /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. /// /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override @JsonKey(name: 'best_of', includeIfNull: false) int? get bestOf; - @override /// Echo back the prompt in addition to the completion + @override @JsonKey(includeIfNull: false) bool? get echo; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; - @override /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. /// /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - @override /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. /// /// The maximum value for `logprobs` is 5. + @override @JsonKey(includeIfNull: false) int? get logprobs; - @override /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion. /// /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - @override /// How many completions to generate for each prompt. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - @override /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. /// /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + @override @JsonKey(includeIfNull: false) int? get seed; - @override /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @override @_CompletionStopConverter() @JsonKey(includeIfNull: false) CompletionStop? get stop; - @override /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override @JsonKey(includeIfNull: false) bool? get stream; - @override /// Options for streaming response. Only set this when you set `stream: true`. + @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; - @override /// The suffix that comes after a completion of inserted text. /// /// This parameter is only supported for `gpt-3.5-turbo-instruct`. + @override @JsonKey(includeIfNull: false) String? get suffix; - @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -925,6 +945,8 @@ mixin _$CompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -944,6 +966,9 @@ class _$CompletionModelCopyWithImpl<$Res, $Val extends CompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -966,6 +991,8 @@ class __$$CompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CompletionModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1010,11 +1037,13 @@ class _$CompletionModelEnumerationImpl extends CompletionModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> @@ -1101,7 +1130,10 @@ abstract class CompletionModelEnumeration extends CompletionModel { @override CompletionModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1124,6 +1156,8 @@ class __$$CompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$CompletionModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1167,11 +1201,13 @@ class _$CompletionModelStringImpl extends CompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> @@ -1258,7 +1294,10 @@ abstract class CompletionModelString extends CompletionModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1333,6 +1372,8 @@ mixin _$CompletionPrompt { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionPrompt to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -1352,6 +1393,9 @@ class _$CompletionPromptCopyWithImpl<$Res, $Val extends CompletionPrompt> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -1374,6 +1418,8 @@ class __$$CompletionPromptListListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListListIntImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1425,12 +1471,14 @@ class _$CompletionPromptListListIntImpl extends CompletionPromptListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> @@ -1529,7 +1577,10 @@ abstract class CompletionPromptListListInt extends CompletionPrompt { @override List> get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1553,6 +1604,8 @@ class __$$CompletionPromptListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListIntImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1603,12 +1656,14 @@ class _$CompletionPromptListIntImpl extends CompletionPromptListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> @@ -1707,7 +1762,10 @@ abstract class CompletionPromptListInt extends CompletionPrompt { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1732,6 +1790,8 @@ class __$$CompletionPromptListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1783,12 +1843,14 @@ class _$CompletionPromptListStringImpl extends CompletionPromptListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> @@ -1887,7 +1949,10 @@ abstract class CompletionPromptListString extends CompletionPrompt { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1911,6 +1976,8 @@ class __$$CompletionPromptStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1954,11 +2021,13 @@ class _$CompletionPromptStringImpl extends CompletionPromptString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> @@ -2057,7 +2126,10 @@ abstract class CompletionPromptString extends CompletionPrompt { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2116,6 +2188,8 @@ mixin _$CompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -2135,6 +2209,9 @@ class _$CompletionStopCopyWithImpl<$Res, $Val extends CompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -2156,6 +2233,8 @@ class __$$CompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopListStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2206,12 +2285,14 @@ class _$CompletionStopListStringImpl extends CompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> @@ -2298,7 +2379,10 @@ abstract class CompletionStopListString extends CompletionStop { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2320,6 +2404,8 @@ class __$$CompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2363,11 +2449,13 @@ class _$CompletionStopStringImpl extends CompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> @@ -2455,7 +2543,10 @@ abstract class CompletionStopString extends CompletionStop { @override String? get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2493,8 +2584,12 @@ mixin _$CreateCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2529,6 +2624,8 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2572,6 +2669,8 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -2618,6 +2717,8 @@ class __$$CreateCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2739,7 +2840,7 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -2751,7 +2852,9 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> @@ -2782,40 +2885,42 @@ abstract class _CreateCompletionResponse extends CreateCompletionResponse { factory _CreateCompletionResponse.fromJson(Map json) = _$CreateCompletionResponseImpl.fromJson; - @override - /// A unique identifier for the completion. - String get id; @override + String get id; /// The list of completion choices the model generated for the input prompt. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the completion was created. - int get created; @override + int get created; /// The model used for completion. - String get model; @override + String get model; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always "text_completion" - CreateCompletionResponseObject get object; @override + CreateCompletionResponseObject get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2844,8 +2949,12 @@ mixin _$CompletionChoice { /// The text of the completion. String get text => throw _privateConstructorUsedError; + /// Serializes this CompletionChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2878,6 +2987,8 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2906,6 +3017,8 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> ) as $Val); } + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionLogprobsCopyWith<$Res>? get logprobs { @@ -2948,6 +3061,8 @@ class __$$CompletionChoiceImplCopyWithImpl<$Res> $Res Function(_$CompletionChoiceImpl) _then) : super(_value, _then); + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3032,12 +3147,14 @@ class _$CompletionChoiceImpl extends _CompletionChoice { (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, finishReason, index, logprobs, text); - @JsonKey(ignore: true) + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => @@ -3066,29 +3183,31 @@ abstract class _CompletionChoice extends CompletionChoice { factory _CompletionChoice.fromJson(Map json) = _$CompletionChoiceImpl.fromJson; - @override - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// or `content_filter` if content was omitted due to a flag from our content filters. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) CompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of generated choices. - int get index; @override + int get index; /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - CompletionLogprobs? get logprobs; @override + CompletionLogprobs? get logprobs; /// The text of the completion. + @override String get text; + + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3116,8 +3235,12 @@ mixin _$CompletionLogprobs { List?>? get topLogprobs => throw _privateConstructorUsedError; + /// Serializes this CompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3148,6 +3271,8 @@ class _$CompletionLogprobsCopyWithImpl<$Res, $Val extends CompletionLogprobs> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3203,6 +3328,8 @@ class __$$CompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$CompletionLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3327,7 +3454,7 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -3336,7 +3463,9 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { const DeepCollectionEquality().hash(_tokens), const DeepCollectionEquality().hash(_topLogprobs)); - @JsonKey(ignore: true) + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => @@ -3366,28 +3495,30 @@ abstract class _CompletionLogprobs extends CompletionLogprobs { factory _CompletionLogprobs.fromJson(Map json) = _$CompletionLogprobsImpl.fromJson; - @override - /// The offset of the token from the beginning of the prompt. + @override @JsonKey(name: 'text_offset', includeIfNull: false) List? get textOffset; - @override /// The log probabilities of tokens in the completion. + @override @JsonKey(name: 'token_logprobs', includeIfNull: false) List? get tokenLogprobs; - @override /// The tokens generated by the model converted back to text. + @override @JsonKey(includeIfNull: false) List? get tokens; - @override /// The log probabilities of the `logprobs` most likely tokens. + @override @JsonKey(name: 'top_logprobs', includeIfNull: false) List?>? get topLogprobs; + + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3543,8 +3674,12 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) List? get functions => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3617,6 +3752,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3740,6 +3877,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionModelCopyWith<$Res> get model { @@ -3748,6 +3887,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat { @@ -3761,6 +3902,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStopCopyWith<$Res>? get stop { @@ -3773,6 +3916,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -3786,6 +3931,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice { @@ -3799,6 +3946,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallCopyWith<$Res>? get functionCall { @@ -3887,6 +4036,8 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4309,7 +4460,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { .equals(other._functions, _functions)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -4338,7 +4489,9 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().hash(_functions) ]); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> @@ -4404,75 +4557,73 @@ abstract class _CreateChatCompletionRequest factory _CreateChatCompletionRequest.fromJson(Map json) = _$CreateChatCompletionRequestImpl.fromJson; - @override - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + @override @_ChatCompletionModelConverter() ChatCompletionModel get model; - @override /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - List get messages; @override + List get messages; /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; - @override /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - @override /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + @override @JsonKey(includeIfNull: false) bool? get logprobs; - @override /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + @override @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; - @override /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. /// /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - @override /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - @override /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @JsonKey(name: 'response_format', includeIfNull: false) ChatCompletionResponseFormat? get responseFormat; - @override /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + @override @JsonKey(includeIfNull: false) int? get seed; - @override /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: @@ -4482,47 +4633,47 @@ abstract class _CreateChatCompletionRequest /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. + @override @JsonKey( name: 'service_tier', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) CreateChatCompletionRequestServiceTier? get serviceTier; - @override /// Up to 4 sequences where the API will stop generating further tokens. + @override @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? get stop; - @override /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override @JsonKey(includeIfNull: false) bool? get stream; - @override /// Options for streaming response. Only set this when you set `stream: true`. + @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; - @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + @override @JsonKey(includeIfNull: false) List? get tools; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. @@ -4531,21 +4682,21 @@ abstract class _CreateChatCompletionRequest /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. + @override @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; - @override /// Deprecated in favor of `tool_choice`. /// @@ -4555,18 +4706,22 @@ abstract class _CreateChatCompletionRequest /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. + @override @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionFunctionCall? get functionCall; - @override /// Deprecated in favor of `tools`. /// /// A list of functions the model may generate JSON inputs for. + @override @JsonKey(includeIfNull: false) List? get functions; + + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4625,6 +4780,8 @@ mixin _$ChatCompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -4644,6 +4801,9 @@ class _$ChatCompletionModelCopyWithImpl<$Res, $Val extends ChatCompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -4666,6 +4826,8 @@ class __$$ChatCompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4711,11 +4873,13 @@ class _$ChatCompletionModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelEnumerationImplCopyWith< @@ -4803,7 +4967,10 @@ abstract class ChatCompletionModelEnumeration extends ChatCompletionModel { @override ChatCompletionModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionModelEnumerationImplCopyWith< _$ChatCompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -4829,6 +4996,8 @@ class __$$ChatCompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4872,11 +5041,13 @@ class _$ChatCompletionModelStringImpl extends ChatCompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> @@ -4963,7 +5134,10 @@ abstract class ChatCompletionModelString extends ChatCompletionModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4979,8 +5153,12 @@ mixin _$ChatCompletionResponseFormat { ChatCompletionResponseFormatType get type => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionResponseFormatCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -5007,6 +5185,8 @@ class _$ChatCompletionResponseFormatCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5043,6 +5223,8 @@ class __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5086,11 +5268,13 @@ class _$ChatCompletionResponseFormatImpl extends _ChatCompletionResponseFormat { (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionResponseFormatImplCopyWith< @@ -5116,12 +5300,14 @@ abstract class _ChatCompletionResponseFormat factory _ChatCompletionResponseFormat.fromJson(Map json) = _$ChatCompletionResponseFormatImpl.fromJson; - @override - /// Must be one of `text` or `json_object`. + @override ChatCompletionResponseFormatType get type; + + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionResponseFormatImplCopyWith< _$ChatCompletionResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -5181,6 +5367,8 @@ mixin _$ChatCompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5200,6 +5388,9 @@ class _$ChatCompletionStopCopyWithImpl<$Res, $Val extends ChatCompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5222,6 +5413,8 @@ class __$$ChatCompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopListStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5273,12 +5466,14 @@ class _$ChatCompletionStopListStringImpl extends ChatCompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopListStringImplCopyWith< @@ -5366,7 +5561,10 @@ abstract class ChatCompletionStopListString extends ChatCompletionStop { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStopListStringImplCopyWith< _$ChatCompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -5392,6 +5590,8 @@ class __$$ChatCompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5435,11 +5635,13 @@ class _$ChatCompletionStopStringImpl extends ChatCompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> @@ -5526,7 +5728,10 @@ abstract class ChatCompletionStopString extends ChatCompletionStop { @override String? get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5597,6 +5802,8 @@ mixin _$ChatCompletionToolChoiceOption { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionToolChoiceOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5619,6 +5826,9 @@ class _$ChatCompletionToolChoiceOptionCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5641,6 +5851,8 @@ class __$$ChatCompletionToolChoiceOptionEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolChoiceOptionEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5687,11 +5899,13 @@ class _$ChatCompletionToolChoiceOptionEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< @@ -5791,7 +6005,10 @@ abstract class ChatCompletionToolChoiceOptionEnumeration @override ChatCompletionToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< _$ChatCompletionToolChoiceOptionEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -5828,6 +6045,8 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit _then) : super(_value, _then); + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5842,6 +6061,8 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit )); } + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionNamedToolChoiceCopyWith<$Res> get value { @@ -5886,11 +6107,13 @@ class _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< @@ -5992,7 +6215,10 @@ abstract class ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice @override ChatCompletionNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -6063,6 +6289,8 @@ mixin _$ChatCompletionFunctionCall { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -6084,6 +6312,9 @@ class _$ChatCompletionFunctionCallCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -6106,6 +6337,8 @@ class __$$ChatCompletionFunctionCallEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6152,11 +6385,13 @@ class _$ChatCompletionFunctionCallEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallEnumerationImplCopyWith< @@ -6253,7 +6488,10 @@ abstract class ChatCompletionFunctionCallEnumeration @override ChatCompletionFunctionCallMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallEnumerationImplCopyWith< _$ChatCompletionFunctionCallEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -6290,6 +6528,8 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6304,6 +6544,8 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith )); } + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get value { @@ -6349,11 +6591,13 @@ class _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< @@ -6453,7 +6697,10 @@ abstract class ChatCompletionFunctionCallChatCompletionFunctionCallOption @override ChatCompletionFunctionCallOption get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6472,8 +6719,12 @@ mixin _$ChatCompletionMessageFunctionCall { /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. String get arguments => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageFunctionCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6500,6 +6751,8 @@ class _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6541,6 +6794,8 @@ class __$$ChatCompletionMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageFunctionCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6595,11 +6850,13 @@ class _$ChatCompletionMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageFunctionCallImplCopyWith< @@ -6626,16 +6883,18 @@ abstract class _ChatCompletionMessageFunctionCall Map json) = _$ChatCompletionMessageFunctionCallImpl.fromJson; - @override - /// The name of the function to call. - String get name; @override + String get name; /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override String get arguments; + + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageFunctionCallImplCopyWith< _$ChatCompletionMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -6651,8 +6910,12 @@ mixin _$ChatCompletionFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6679,6 +6942,8 @@ class _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6715,6 +6980,8 @@ class __$$ChatCompletionFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallOptionImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6757,11 +7024,13 @@ class _$ChatCompletionFunctionCallOptionImpl (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallOptionImplCopyWith< @@ -6787,12 +7056,14 @@ abstract class _ChatCompletionFunctionCallOption Map json) = _$ChatCompletionFunctionCallOptionImpl.fromJson; - @override - /// The name of the function to call. + @override String get name; + + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6817,8 +7088,12 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) Map? get parameters => throw _privateConstructorUsedError; + /// Serializes this FunctionObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FunctionObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6845,6 +7120,8 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6891,6 +7168,8 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> _$FunctionObjectImpl _value, $Res Function(_$FunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6972,12 +7251,14 @@ class _$FunctionObjectImpl extends _FunctionObject { .equals(other._parameters, _parameters)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, description, const DeepCollectionEquality().hash(_parameters)); - @JsonKey(ignore: true) + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => @@ -7003,24 +7284,26 @@ abstract class _FunctionObject extends FunctionObject { factory _FunctionObject.fromJson(Map json) = _$FunctionObjectImpl.fromJson; - @override - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - String get name; @override + String get name; /// A description of what the function does, used by the model to choose when and how to call the function. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. + @override @JsonKey(includeIfNull: false) Map? get parameters; + + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7037,8 +7320,12 @@ mixin _$ChatCompletionTool { /// A function that the model may call. FunctionObject get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTool to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionToolCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7064,6 +7351,8 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7082,6 +7371,8 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> ) as $Val); } + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FunctionObjectCopyWith<$Res> get function { @@ -7113,6 +7404,8 @@ class __$$ChatCompletionToolImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7164,11 +7457,13 @@ class _$ChatCompletionToolImpl extends _ChatCompletionTool { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => @@ -7192,16 +7487,18 @@ abstract class _ChatCompletionTool extends ChatCompletionTool { factory _ChatCompletionTool.fromJson(Map json) = _$ChatCompletionToolImpl.fromJson; - @override - /// The type of the tool. Currently, only `function` is supported. - ChatCompletionToolType get type; @override + ChatCompletionToolType get type; /// A function that the model may call. + @override FunctionObject get function; + + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7221,8 +7518,12 @@ mixin _$ChatCompletionNamedToolChoice { ChatCompletionFunctionCallOption get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7253,6 +7554,8 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7271,6 +7574,8 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get function { @@ -7308,6 +7613,8 @@ class __$$ChatCompletionNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7362,11 +7669,13 @@ class _$ChatCompletionNamedToolChoiceImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionNamedToolChoiceImplCopyWith< @@ -7393,16 +7702,18 @@ abstract class _ChatCompletionNamedToolChoice factory _ChatCompletionNamedToolChoice.fromJson(Map json) = _$ChatCompletionNamedToolChoiceImpl.fromJson; - @override - /// The type of the tool. Currently, only `function` is supported. - ChatCompletionNamedToolChoiceType get type; @override + ChatCompletionNamedToolChoiceType get type; /// Forces the model to call the specified function. + @override ChatCompletionFunctionCallOption get function; + + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -7426,8 +7737,12 @@ mixin _$ChatCompletionMessageToolCall { ChatCompletionMessageFunctionCall get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageToolCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageToolCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7459,6 +7774,8 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7482,6 +7799,8 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageFunctionCallCopyWith<$Res> get function { @@ -7520,6 +7839,8 @@ class __$$ChatCompletionMessageToolCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageToolCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7584,11 +7905,13 @@ class _$ChatCompletionMessageToolCallImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageToolCallImplCopyWith< @@ -7616,20 +7939,22 @@ abstract class _ChatCompletionMessageToolCall factory _ChatCompletionMessageToolCall.fromJson(Map json) = _$ChatCompletionMessageToolCallImpl.fromJson; - @override - /// The ID of the tool call. - String get id; @override + String get id; /// The type of the tool. Currently, only `function` is supported. - ChatCompletionMessageToolCallType get type; @override + ChatCompletionMessageToolCallType get type; /// The name and arguments of a function that should be called, as generated by the model. + @override ChatCompletionMessageFunctionCall get function; + + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageToolCallImplCopyWith< _$ChatCompletionMessageToolCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -7646,8 +7971,12 @@ mixin _$ChatCompletionStreamOptions { @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamOptions to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamOptionsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7676,6 +8005,8 @@ class _$ChatCompletionStreamOptionsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7714,6 +8045,8 @@ class __$$ChatCompletionStreamOptionsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamOptionsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7758,11 +8091,13 @@ class _$ChatCompletionStreamOptionsImpl extends _ChatCompletionStreamOptions { other.includeUsage == includeUsage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, includeUsage); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> @@ -7787,13 +8122,15 @@ abstract class _ChatCompletionStreamOptions factory _ChatCompletionStreamOptions.fromJson(Map json) = _$ChatCompletionStreamOptionsImpl.fromJson; - @override - /// If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + @override @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage; + + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7840,8 +8177,12 @@ mixin _$CreateChatCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7883,6 +8224,8 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7931,6 +8274,8 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -7982,6 +8327,8 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8126,7 +8473,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -8139,7 +8486,9 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionResponseImplCopyWith< @@ -8177,50 +8526,52 @@ abstract class _CreateChatCompletionResponse factory _CreateChatCompletionResponse.fromJson(Map json) = _$CreateChatCompletionResponseImpl.fromJson; - @override - /// A unique identifier for the chat completion. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// A list of chat completion choices. Can be more than one if `n` is greater than 1. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the chat completion was created. - int get created; @override + int get created; /// The model used for the chat completion. - String get model; @override + String get model; /// The service tier used for processing the request. This field is only included if the `service_tier` parameter /// is specified in the request. + @override @JsonKey( name: 'service_tier', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ServiceTier? get serviceTier; - @override /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always `chat.completion`. - String get object; @override + String get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionResponseImplCopyWith< _$CreateChatCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -8254,8 +8605,12 @@ mixin _$ChatCompletionResponseChoice { /// Log probability information for the choice. ChatCompletionLogprobs? get logprobs => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionResponseChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8291,6 +8646,8 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8319,6 +8676,8 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionLogprobsCopyWith<$Res>? get logprobs { @@ -8364,6 +8723,8 @@ class __$$ChatCompletionResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionResponseChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8451,12 +8812,14 @@ class _$ChatCompletionResponseChoiceImpl extends _ChatCompletionResponseChoice { other.logprobs == logprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, finishReason, index, const DeepCollectionEquality().hash(message), logprobs); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionResponseChoiceImplCopyWith< @@ -8488,31 +8851,33 @@ abstract class _ChatCompletionResponseChoice factory _ChatCompletionResponseChoice.fromJson(Map json) = _$ChatCompletionResponseChoiceImpl.fromJson; - @override - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of choices. + @override @JsonKey(includeIfNull: false) int? get index; - @override /// An assistant message in a chat conversation. - ChatCompletionAssistantMessage get message; @override + ChatCompletionAssistantMessage get message; /// Log probability information for the choice. + @override ChatCompletionLogprobs? get logprobs; + + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionResponseChoiceImplCopyWith< _$ChatCompletionResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -8529,8 +8894,12 @@ mixin _$ChatCompletionLogprobs { List? get content => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8555,6 +8924,8 @@ class _$ChatCompletionLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8591,6 +8962,8 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8642,12 +9015,14 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> @@ -8671,12 +9046,14 @@ abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { factory _ChatCompletionLogprobs.fromJson(Map json) = _$ChatCompletionLogprobsImpl.fromJson; - @override - /// A list of message content tokens with log probability information. + @override List? get content; + + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8702,8 +9079,12 @@ mixin _$ChatCompletionTokenLogprob { List get topLogprobs => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTokenLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionTokenLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8734,6 +9115,8 @@ class _$ChatCompletionTokenLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8790,6 +9173,8 @@ class __$$ChatCompletionTokenLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenLogprobImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8886,7 +9271,7 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -8895,7 +9280,9 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { const DeepCollectionEquality().hash(_bytes), const DeepCollectionEquality().hash(_topLogprobs)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> @@ -8923,25 +9310,27 @@ abstract class _ChatCompletionTokenLogprob extends ChatCompletionTokenLogprob { factory _ChatCompletionTokenLogprob.fromJson(Map json) = _$ChatCompletionTokenLogprobImpl.fromJson; - @override - /// The token. - String get token; @override + String get token; /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - double get logprob; @override + double get logprob; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - List? get bytes; @override + List? get bytes; /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + @override @JsonKey(name: 'top_logprobs') List get topLogprobs; + + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8962,8 +9351,12 @@ mixin _$ChatCompletionTokenTopLogprob { /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. List? get bytes => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTokenTopLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionTokenTopLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8990,6 +9383,8 @@ class _$ChatCompletionTokenTopLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9036,6 +9431,8 @@ class __$$ChatCompletionTokenTopLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenTopLogprobImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9111,12 +9508,14 @@ class _$ChatCompletionTokenTopLogprobImpl const DeepCollectionEquality().equals(other._bytes, _bytes)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, token, logprob, const DeepCollectionEquality().hash(_bytes)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenTopLogprobImplCopyWith< @@ -9143,20 +9542,22 @@ abstract class _ChatCompletionTokenTopLogprob factory _ChatCompletionTokenTopLogprob.fromJson(Map json) = _$ChatCompletionTokenTopLogprobImpl.fromJson; - @override - /// The token. - String get token; @override + String get token; /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - double get logprob; @override + double get logprob; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + @override List? get bytes; + + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionTokenTopLogprobImplCopyWith< _$ChatCompletionTokenTopLogprobImpl> get copyWith => throw _privateConstructorUsedError; @@ -9208,8 +9609,12 @@ mixin _$CreateChatCompletionStreamResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionStreamResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionStreamResponseCopyWith< CreateChatCompletionStreamResponse> get copyWith => throw _privateConstructorUsedError; @@ -9252,6 +9657,8 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9300,6 +9707,8 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -9351,6 +9760,8 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionStreamResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9501,7 +9912,7 @@ class _$CreateChatCompletionStreamResponseImpl (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -9514,7 +9925,9 @@ class _$CreateChatCompletionStreamResponseImpl object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionStreamResponseImplCopyWith< @@ -9553,54 +9966,56 @@ abstract class _CreateChatCompletionStreamResponse Map json) = _$CreateChatCompletionStreamResponseImpl.fromJson; - @override - /// A unique identifier for the chat completion. Each chunk has the same ID. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the /// last chunk if you set `stream_options: {"include_usage": true}`. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + @override @JsonKey(includeIfNull: false) int? get created; - @override /// The model to generate the completion. + @override @JsonKey(includeIfNull: false) String? get model; - @override /// The service tier used for processing the request. This field is only included if the `service_tier` parameter /// is specified in the request. + @override @JsonKey( name: 'service_tier', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ServiceTier? get serviceTier; - @override /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always `chat.completion.chunk`. + @override @JsonKey(includeIfNull: false) String? get object; - @override /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionStreamResponseImplCopyWith< _$CreateChatCompletionStreamResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -9636,8 +10051,12 @@ mixin _$ChatCompletionStreamResponseChoice { @JsonKey(includeIfNull: false) int? get index => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseChoiceCopyWith< ChatCompletionStreamResponseChoice> get copyWith => throw _privateConstructorUsedError; @@ -9676,6 +10095,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9704,6 +10125,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta { @@ -9713,6 +10136,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, }); } + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res>? get logprobs { @@ -9762,6 +10187,8 @@ class __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9851,12 +10278,14 @@ class _$ChatCompletionStreamResponseChoiceImpl (identical(other.index, index) || other.index == index)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, delta, logprobs, finishReason, index); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceImplCopyWith< @@ -9890,32 +10319,34 @@ abstract class _ChatCompletionStreamResponseChoice Map json) = _$ChatCompletionStreamResponseChoiceImpl.fromJson; - @override - /// A chat completion delta generated by streamed model responses. - ChatCompletionStreamResponseDelta get delta; @override + ChatCompletionStreamResponseDelta get delta; /// Log probability information for the choice. + @override @JsonKey(includeIfNull: false) ChatCompletionStreamResponseChoiceLogprobs? get logprobs; - @override /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of choices. + @override @JsonKey(includeIfNull: false) int? get index; + + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseChoiceImplCopyWith< _$ChatCompletionStreamResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -9933,8 +10364,12 @@ mixin _$ChatCompletionStreamResponseChoiceLogprobs { List? get content => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoiceLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseChoiceLogprobsCopyWith< ChatCompletionStreamResponseChoiceLogprobs> get copyWith => throw _privateConstructorUsedError; @@ -9963,6 +10398,8 @@ class _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10000,6 +10437,8 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10053,12 +10492,14 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< @@ -10087,12 +10528,14 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs Map json) = _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson; - @override - /// A list of message content tokens with log probability information. + @override List? get content; + + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< _$ChatCompletionStreamResponseChoiceLogprobsImpl> get copyWith => throw _privateConstructorUsedError; @@ -10124,8 +10567,12 @@ mixin _$ChatCompletionStreamResponseDelta { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10163,6 +10610,8 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10191,6 +10640,8 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get functionCall { @@ -10239,6 +10690,8 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseDeltaImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10336,12 +10789,14 @@ class _$ChatCompletionStreamResponseDeltaImpl (identical(other.role, role) || other.role == role)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, content, functionCall, const DeepCollectionEquality().hash(_toolCalls), role); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseDeltaImplCopyWith< @@ -10376,29 +10831,31 @@ abstract class _ChatCompletionStreamResponseDelta Map json) = _$ChatCompletionStreamResponseDeltaImpl.fromJson; - @override - /// The contents of the chunk message. + @override @JsonKey(includeIfNull: false) String? get content; - @override /// The name and arguments of a function that should be called, as generated by the model. + @override @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall; - @override /// No Description + @override @JsonKey(name: 'tool_calls', includeIfNull: false) List? get toolCalls; - @override /// The role of the messages author. One of `system`, `user`, `assistant`, or `tool` (`function` is deprecated). + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role; + + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseDeltaImplCopyWith< _$ChatCompletionStreamResponseDeltaImpl> get copyWith => throw _privateConstructorUsedError; @@ -10420,8 +10877,12 @@ mixin _$ChatCompletionStreamMessageFunctionCall { @JsonKey(includeIfNull: false) String? get arguments => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamMessageFunctionCallCopyWith< ChatCompletionStreamMessageFunctionCall> get copyWith => throw _privateConstructorUsedError; @@ -10452,6 +10913,8 @@ class _$ChatCompletionStreamMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10495,6 +10958,8 @@ class __$$ChatCompletionStreamMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageFunctionCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10552,11 +11017,13 @@ class _$ChatCompletionStreamMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< @@ -10585,18 +11052,20 @@ abstract class _ChatCompletionStreamMessageFunctionCall Map json) = _$ChatCompletionStreamMessageFunctionCallImpl.fromJson; - @override - /// The name of the function to call. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override @JsonKey(includeIfNull: false) String? get arguments; + + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< _$ChatCompletionStreamMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -10628,8 +11097,12 @@ mixin _$ChatCompletionStreamMessageToolCallChunk { ChatCompletionStreamMessageFunctionCall? get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamMessageToolCallChunk to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamMessageToolCallChunkCopyWith< ChatCompletionStreamMessageToolCallChunk> get copyWith => throw _privateConstructorUsedError; @@ -10668,6 +11141,8 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10696,6 +11171,8 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get function { @@ -10743,6 +11220,8 @@ class __$$ChatCompletionStreamMessageToolCallChunkImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageToolCallChunkImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10827,11 +11306,13 @@ class _$ChatCompletionStreamMessageToolCallChunkImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< @@ -10866,28 +11347,30 @@ abstract class _ChatCompletionStreamMessageToolCallChunk Map json) = _$ChatCompletionStreamMessageToolCallChunkImpl.fromJson; - @override - /// No Description - int get index; @override + int get index; /// The ID of the tool call. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// The type of the tool. Currently, only `function` is supported. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionStreamMessageToolCallChunkType? get type; - @override /// The name and arguments of a function that should be called, as generated by the model. + @override @JsonKey(includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get function; + + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< _$ChatCompletionStreamMessageToolCallChunkImpl> get copyWith => throw _privateConstructorUsedError; @@ -10911,8 +11394,12 @@ mixin _$CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this CompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10939,6 +11426,8 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10985,6 +11474,8 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> _$CompletionUsageImpl _value, $Res Function(_$CompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11054,12 +11545,14 @@ class _$CompletionUsageImpl extends _CompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => @@ -11085,23 +11578,25 @@ abstract class _CompletionUsage extends CompletionUsage { factory _CompletionUsage.fromJson(Map json) = _$CompletionUsageImpl.fromJson; - @override - /// Number of tokens in the generated completion. + @override @JsonKey(name: 'completion_tokens') int? get completionTokens; - @override /// Number of tokens in the prompt. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used in the request (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11134,8 +11629,12 @@ mixin _$CreateEmbeddingRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateEmbeddingRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateEmbeddingRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -11168,6 +11667,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11201,6 +11702,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingModelCopyWith<$Res> get model { @@ -11209,6 +11712,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingInputCopyWith<$Res> get input { @@ -11250,6 +11755,8 @@ class __$$CreateEmbeddingRequestImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11343,12 +11850,14 @@ class _$CreateEmbeddingRequestImpl extends _CreateEmbeddingRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, model, input, encodingFormat, dimensions, user); - @JsonKey(ignore: true) + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> @@ -11377,33 +11886,35 @@ abstract class _CreateEmbeddingRequest extends CreateEmbeddingRequest { factory _CreateEmbeddingRequest.fromJson(Map json) = _$CreateEmbeddingRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_EmbeddingModelConverter() EmbeddingModel get model; - @override /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @_EmbeddingInputConverter() EmbeddingInput get input; - @override /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @override @JsonKey(name: 'encoding_format') EmbeddingEncodingFormat get encodingFormat; - @override /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + @override @JsonKey(includeIfNull: false) int? get dimensions; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11462,6 +11973,8 @@ mixin _$EmbeddingModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -11481,6 +11994,9 @@ class _$EmbeddingModelCopyWithImpl<$Res, $Val extends EmbeddingModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -11502,6 +12018,8 @@ class __$$EmbeddingModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11545,11 +12063,13 @@ class _$EmbeddingModelEnumerationImpl extends EmbeddingModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> @@ -11636,7 +12156,10 @@ abstract class EmbeddingModelEnumeration extends EmbeddingModel { @override EmbeddingModels get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11658,6 +12181,8 @@ class __$$EmbeddingModelStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11701,11 +12226,13 @@ class _$EmbeddingModelStringImpl extends EmbeddingModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> @@ -11793,7 +12320,10 @@ abstract class EmbeddingModelString extends EmbeddingModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11868,6 +12398,8 @@ mixin _$EmbeddingInput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -11887,6 +12419,9 @@ class _$EmbeddingInputCopyWithImpl<$Res, $Val extends EmbeddingInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -11908,6 +12443,8 @@ class __$$EmbeddingInputListListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListListIntImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11958,12 +12495,14 @@ class _$EmbeddingInputListListIntImpl extends EmbeddingInputListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> @@ -12062,7 +12601,10 @@ abstract class EmbeddingInputListListInt extends EmbeddingInput { @override List> get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12085,6 +12627,8 @@ class __$$EmbeddingInputListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListIntImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12135,12 +12679,14 @@ class _$EmbeddingInputListIntImpl extends EmbeddingInputListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> @@ -12239,7 +12785,10 @@ abstract class EmbeddingInputListInt extends EmbeddingInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12263,6 +12812,8 @@ class __$$EmbeddingInputListStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12313,12 +12864,14 @@ class _$EmbeddingInputListStringImpl extends EmbeddingInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> @@ -12417,7 +12970,10 @@ abstract class EmbeddingInputListString extends EmbeddingInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12439,6 +12995,8 @@ class __$$EmbeddingInputStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12482,11 +13040,13 @@ class _$EmbeddingInputStringImpl extends EmbeddingInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> @@ -12586,7 +13146,10 @@ abstract class EmbeddingInputString extends EmbeddingInput { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12612,8 +13175,12 @@ mixin _$CreateEmbeddingResponse { @JsonKey(includeIfNull: false) EmbeddingUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateEmbeddingResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateEmbeddingResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12644,6 +13211,8 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12672,6 +13241,8 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingUsageCopyWith<$Res>? get usage { @@ -12714,6 +13285,8 @@ class __$$CreateEmbeddingResponseImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12797,12 +13370,14 @@ class _$CreateEmbeddingResponseImpl extends _CreateEmbeddingResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_data), model, object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> @@ -12829,25 +13404,27 @@ abstract class _CreateEmbeddingResponse extends CreateEmbeddingResponse { factory _CreateEmbeddingResponse.fromJson(Map json) = _$CreateEmbeddingResponseImpl.fromJson; - @override - /// The list of embeddings generated by the model. - List get data; @override + List get data; /// The name of the model used to generate the embedding. - String get model; @override + String get model; /// The object type, which is always "list". - CreateEmbeddingResponseObject get object; @override + CreateEmbeddingResponseObject get object; /// The usage information for the request. + @override @JsonKey(includeIfNull: false) EmbeddingUsage? get usage; + + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12868,8 +13445,12 @@ mixin _$Embedding { /// The object type, which is always "embedding". EmbeddingObject get object => throw _privateConstructorUsedError; + /// Serializes this Embedding to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $EmbeddingCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12897,6 +13478,8 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12920,6 +13503,8 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> ) as $Val); } + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingVectorCopyWith<$Res> get embedding { @@ -12954,6 +13539,8 @@ class __$$EmbeddingImplCopyWithImpl<$Res> _$EmbeddingImpl _value, $Res Function(_$EmbeddingImpl) _then) : super(_value, _then); + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13019,11 +13606,13 @@ class _$EmbeddingImpl extends _Embedding { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, embedding, object); - @JsonKey(ignore: true) + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => @@ -13047,21 +13636,23 @@ abstract class _Embedding extends Embedding { factory _Embedding.fromJson(Map json) = _$EmbeddingImpl.fromJson; - @override - /// The index of the embedding in the list of embeddings. - int get index; @override + int get index; /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + @override @_EmbeddingVectorConverter() EmbeddingVector get embedding; - @override /// The object type, which is always "embedding". + @override EmbeddingObject get object; + + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13120,6 +13711,8 @@ mixin _$EmbeddingVector { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingVector to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -13139,6 +13732,9 @@ class _$EmbeddingVectorCopyWithImpl<$Res, $Val extends EmbeddingVector> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -13160,6 +13756,8 @@ class __$$EmbeddingVectorListDoubleImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorListDoubleImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13210,12 +13808,14 @@ class _$EmbeddingVectorListDoubleImpl extends EmbeddingVectorListDouble { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> @@ -13302,7 +13902,10 @@ abstract class EmbeddingVectorListDouble extends EmbeddingVector { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13325,6 +13928,8 @@ class __$$EmbeddingVectorStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13368,11 +13973,13 @@ class _$EmbeddingVectorStringImpl extends EmbeddingVectorString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> @@ -13459,7 +14066,10 @@ abstract class EmbeddingVectorString extends EmbeddingVector { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13478,8 +14088,12 @@ mixin _$EmbeddingUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this EmbeddingUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $EmbeddingUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13505,6 +14119,8 @@ class _$EmbeddingUsageCopyWithImpl<$Res, $Val extends EmbeddingUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13545,6 +14161,8 @@ class __$$EmbeddingUsageImplCopyWithImpl<$Res> _$EmbeddingUsageImpl _value, $Res Function(_$EmbeddingUsageImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13601,11 +14219,13 @@ class _$EmbeddingUsageImpl extends _EmbeddingUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => @@ -13630,18 +14250,20 @@ abstract class _EmbeddingUsage extends EmbeddingUsage { factory _EmbeddingUsage.fromJson(Map json) = _$EmbeddingUsageImpl.fromJson; - @override - /// The number of tokens used by the prompt. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// The total number of tokens used by the request. + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13680,7 +14302,7 @@ mixin _$CreateFineTuningJobRequest { /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? get suffix => throw _privateConstructorUsedError; @@ -13707,8 +14329,12 @@ mixin _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; + /// Serializes this CreateFineTuningJobRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateFineTuningJobRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13746,6 +14372,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13789,6 +14417,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningModelCopyWith<$Res> get model { @@ -13797,6 +14427,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters { @@ -13847,6 +14479,8 @@ class __$$CreateFineTuningJobRequestImplCopyWithImpl<$Res> $Res Function(_$CreateFineTuningJobRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13940,7 +14574,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @JsonKey(includeIfNull: false) final String? suffix; @@ -14002,7 +14636,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { (identical(other.seed, seed) || other.seed == seed)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -14014,7 +14648,9 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { const DeepCollectionEquality().hash(_integrations), seed); - @JsonKey(ignore: true) + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> @@ -14047,13 +14683,11 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { factory _CreateFineTuningJobRequest.fromJson(Map json) = _$CreateFineTuningJobRequestImpl.fromJson; - @override - /// The name of the model to fine-tune. You can select one of the /// [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @override @_FineTuningModelConverter() FineTuningModel get model; - @override /// The ID of an uploaded file that contains training data. /// @@ -14067,21 +14701,21 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(name: 'training_file') String get trainingFile; - @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? get hyperparameters; - @override /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + @override @JsonKey(includeIfNull: false) String? get suffix; - @override /// The ID of an uploaded file that contains validation data. /// @@ -14093,21 +14727,25 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(name: 'validation_file', includeIfNull: false) String? get validationFile; - @override /// A list of integrations to enable for your fine-tuning job. + @override @JsonKey(includeIfNull: false) List? get integrations; - @override /// The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. /// If a seed is not specified, one will be generated for you. + @override @JsonKey(includeIfNull: false) int? get seed; + + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14166,6 +14804,8 @@ mixin _$FineTuningModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this FineTuningModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -14185,6 +14825,9 @@ class _$FineTuningModelCopyWithImpl<$Res, $Val extends FineTuningModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -14207,6 +14850,8 @@ class __$$FineTuningModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14251,11 +14896,13 @@ class _$FineTuningModelEnumerationImpl extends FineTuningModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> @@ -14342,7 +14989,10 @@ abstract class FineTuningModelEnumeration extends FineTuningModel { @override FineTuningModels get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14365,6 +15015,8 @@ class __$$FineTuningModelStringImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelStringImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14408,11 +15060,13 @@ class _$FineTuningModelStringImpl extends FineTuningModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> @@ -14499,7 +15153,10 @@ abstract class FineTuningModelString extends FineTuningModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14566,8 +15223,12 @@ mixin _$FineTuningJob { List? get integrations => throw _privateConstructorUsedError; + /// Serializes this FineTuningJob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -14610,6 +15271,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14693,6 +15356,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> ) as $Val); } + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobErrorCopyWith<$Res>? get error { @@ -14705,6 +15370,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> }); } + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters { @@ -14755,6 +15422,8 @@ class __$$FineTuningJobImplCopyWithImpl<$Res> _$FineTuningJobImpl _value, $Res Function(_$FineTuningJobImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14988,7 +15657,7 @@ class _$FineTuningJobImpl extends _FineTuningJob { .equals(other._integrations, _integrations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -15008,7 +15677,9 @@ class _$FineTuningJobImpl extends _FineTuningJob { validationFile, const DeepCollectionEquality().hash(_integrations)); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => @@ -15045,77 +15716,79 @@ abstract class _FineTuningJob extends FineTuningJob { factory _FineTuningJob.fromJson(Map json) = _$FineTuningJobImpl.fromJson; - @override - /// The object identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. - FineTuningJobError? get error; @override + FineTuningJobError? get error; /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'fine_tuned_model') String? get fineTunedModel; - @override /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'finished_at') int? get finishedAt; - @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - FineTuningJobHyperparameters get hyperparameters; @override + FineTuningJobHyperparameters get hyperparameters; /// The base model that is being fine-tuned. - String get model; @override + String get model; /// The object type, which is always "fine_tuning.job". - FineTuningJobObject get object; @override + FineTuningJobObject get object; /// The organization that owns the fine-tuning job. + @override @JsonKey(name: 'organization_id') String get organizationId; - @override /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'result_files') List get resultFiles; - @override /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - FineTuningJobStatus get status; @override + FineTuningJobStatus get status; /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'trained_tokens') int? get trainedTokens; - @override /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'training_file') String get trainingFile; - @override /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'validation_file') String? get validationFile; - @override /// A list of integrations to enable for this fine-tuning job. + @override @JsonKey(includeIfNull: false) List? get integrations; + + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15135,8 +15808,12 @@ mixin _$FineTuningIntegration { /// to your run, and set a default entity (team, username, etc) to be associated with your run. FineTuningIntegrationWandb get wandb => throw _privateConstructorUsedError; + /// Serializes this FineTuningIntegration to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningIntegrationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15163,6 +15840,8 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15181,6 +15860,8 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningIntegrationWandbCopyWith<$Res> get wandb { @@ -15214,6 +15895,8 @@ class __$$FineTuningIntegrationImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15266,11 +15949,13 @@ class _$FineTuningIntegrationImpl extends _FineTuningIntegration { (identical(other.wandb, wandb) || other.wandb == wandb)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, wandb); - @JsonKey(ignore: true) + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> @@ -15295,18 +15980,20 @@ abstract class _FineTuningIntegration extends FineTuningIntegration { factory _FineTuningIntegration.fromJson(Map json) = _$FineTuningIntegrationImpl.fromJson; - @override - /// The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - FineTuningIntegrationType get type; @override + FineTuningIntegrationType get type; /// The settings for your integration with Weights and Biases. This payload specifies the project that /// metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags /// to your run, and set a default entity (team, username, etc) to be associated with your run. + @override FineTuningIntegrationWandb get wandb; + + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15335,8 +16022,12 @@ mixin _$FineTuningIntegrationWandb { @JsonKey(includeIfNull: false) List? get tags => throw _privateConstructorUsedError; + /// Serializes this FineTuningIntegrationWandb to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningIntegrationWandbCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15366,6 +16057,8 @@ class _$FineTuningIntegrationWandbCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15421,6 +16114,8 @@ class __$$FineTuningIntegrationWandbImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationWandbImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15512,12 +16207,14 @@ class _$FineTuningIntegrationWandbImpl extends _FineTuningIntegrationWandb { const DeepCollectionEquality().equals(other._tags, _tags)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, project, name, entity, const DeepCollectionEquality().hash(_tags)); - @JsonKey(ignore: true) + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> @@ -15544,29 +16241,31 @@ abstract class _FineTuningIntegrationWandb extends FineTuningIntegrationWandb { factory _FineTuningIntegrationWandb.fromJson(Map json) = _$FineTuningIntegrationWandbImpl.fromJson; - @override - /// The name of the project that the new run will be created under. - String get project; @override + String get project; /// A display name to set for the run. If not set, we will use the Job ID as the name. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The entity to use for the run. This allows you to set the team or username of the WandB user that you would /// like associated with the run. If not set, the default entity for the registered WandB API key is used. + @override @JsonKey(includeIfNull: false) String? get entity; - @override /// A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some /// default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + @override @JsonKey(includeIfNull: false) List? get tags; + + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15586,8 +16285,12 @@ mixin _$FineTuningJobError { /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. String? get param => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15611,6 +16314,8 @@ class _$FineTuningJobErrorCopyWithImpl<$Res, $Val extends FineTuningJobError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15654,6 +16359,8 @@ class __$$FineTuningJobErrorImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobErrorImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15715,11 +16422,13 @@ class _$FineTuningJobErrorImpl extends _FineTuningJobError { (identical(other.param, param) || other.param == param)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => @@ -15744,20 +16453,22 @@ abstract class _FineTuningJobError extends FineTuningJobError { factory _FineTuningJobError.fromJson(Map json) = _$FineTuningJobErrorImpl.fromJson; - @override - /// A machine-readable error code. - String get code; @override + String get code; /// A human-readable error message. - String get message; @override + String get message; /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + @override String? get param; + + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15777,8 +16488,12 @@ mixin _$FineTuningJobHyperparameters { @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobHyperparameters to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobHyperparametersCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15810,6 +16525,8 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15823,6 +16540,8 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningNEpochsCopyWith<$Res> get nEpochs { @@ -15860,6 +16579,8 @@ class __$$FineTuningJobHyperparametersImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobHyperparametersImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15909,11 +16630,13 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { (identical(other.nEpochs, nEpochs) || other.nEpochs == nEpochs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, nEpochs); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobHyperparametersImplCopyWith< @@ -15941,17 +16664,19 @@ abstract class _FineTuningJobHyperparameters factory _FineTuningJobHyperparameters.fromJson(Map json) = _$FineTuningJobHyperparametersImpl.fromJson; - @override - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. /// /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number /// manually, we support any number between 1 and 50 epochs. + @override @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs; + + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobHyperparametersImplCopyWith< _$FineTuningJobHyperparametersImpl> get copyWith => throw _privateConstructorUsedError; @@ -16011,6 +16736,8 @@ mixin _$FineTuningNEpochs { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this FineTuningNEpochs to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -16030,6 +16757,9 @@ class _$FineTuningNEpochsCopyWithImpl<$Res, $Val extends FineTuningNEpochs> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -16052,6 +16782,8 @@ class __$$FineTuningNEpochsEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16096,11 +16828,13 @@ class _$FineTuningNEpochsEnumerationImpl extends FineTuningNEpochsEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsEnumerationImplCopyWith< @@ -16189,7 +16923,10 @@ abstract class FineTuningNEpochsEnumeration extends FineTuningNEpochs { @override FineTuningNEpochsOptions get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningNEpochsEnumerationImplCopyWith< _$FineTuningNEpochsEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -16212,6 +16949,8 @@ class __$$FineTuningNEpochsIntImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsIntImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16255,11 +16994,13 @@ class _$FineTuningNEpochsIntImpl extends FineTuningNEpochsInt { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> @@ -16347,7 +17088,10 @@ abstract class FineTuningNEpochsInt extends FineTuningNEpochs { @override int get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -16370,8 +17114,12 @@ mixin _$ListPaginatedFineTuningJobsResponse { ListPaginatedFineTuningJobsResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListPaginatedFineTuningJobsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListPaginatedFineTuningJobsResponseCopyWith< ListPaginatedFineTuningJobsResponse> get copyWith => throw _privateConstructorUsedError; @@ -16402,6 +17150,8 @@ class _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16451,6 +17201,8 @@ class __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl<$Res> $Res Function(_$ListPaginatedFineTuningJobsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16525,12 +17277,14 @@ class _$ListPaginatedFineTuningJobsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), hasMore, object); - @JsonKey(ignore: true) + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListPaginatedFineTuningJobsResponseImplCopyWith< @@ -16559,21 +17313,23 @@ abstract class _ListPaginatedFineTuningJobsResponse Map json) = _$ListPaginatedFineTuningJobsResponseImpl.fromJson; - @override - /// The list of fine-tuning jobs. - List get data; @override + List get data; /// Whether there are more fine-tuning jobs to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; - @override /// The object type, which is always "list". + @override ListPaginatedFineTuningJobsResponseObject get object; + + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListPaginatedFineTuningJobsResponseImplCopyWith< _$ListPaginatedFineTuningJobsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16593,8 +17349,12 @@ mixin _$ListFineTuningJobEventsResponse { ListFineTuningJobEventsResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListFineTuningJobEventsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListFineTuningJobEventsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16623,6 +17383,8 @@ class _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16666,6 +17428,8 @@ class __$$ListFineTuningJobEventsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobEventsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16727,12 +17491,14 @@ class _$ListFineTuningJobEventsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), object); - @JsonKey(ignore: true) + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobEventsResponseImplCopyWith< @@ -16759,16 +17525,18 @@ abstract class _ListFineTuningJobEventsResponse factory _ListFineTuningJobEventsResponse.fromJson(Map json) = _$ListFineTuningJobEventsResponseImpl.fromJson; - @override - /// The list of fine-tuning job events. - List get data; @override + List get data; /// The object type, which is always "list". + @override ListFineTuningJobEventsResponseObject get object; + + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListFineTuningJobEventsResponseImplCopyWith< _$ListFineTuningJobEventsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16800,8 +17568,12 @@ mixin _$ListFineTuningJobCheckpointsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListFineTuningJobCheckpointsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListFineTuningJobCheckpointsResponseCopyWith< ListFineTuningJobCheckpointsResponse> get copyWith => throw _privateConstructorUsedError; @@ -16834,6 +17606,8 @@ class _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16895,6 +17669,8 @@ class __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobCheckpointsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16993,7 +17769,7 @@ class _$ListFineTuningJobCheckpointsResponseImpl (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -17003,7 +17779,9 @@ class _$ListFineTuningJobCheckpointsResponseImpl lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobCheckpointsResponseImplCopyWith< @@ -17035,31 +17813,33 @@ abstract class _ListFineTuningJobCheckpointsResponse Map json) = _$ListFineTuningJobCheckpointsResponseImpl.fromJson; - @override - /// The list of fine-tuning job checkpoints. - List get data; @override + List get data; /// The object type, which is always "list". - ListFineTuningJobCheckpointsResponseObject get object; @override + ListFineTuningJobCheckpointsResponseObject get object; /// The ID of the first checkpoint in the list. + @override @JsonKey(name: 'first_id', includeIfNull: false) String? get firstId; - @override /// The ID of the last checkpoint in the list. + @override @JsonKey(name: 'last_id', includeIfNull: false) String? get lastId; - @override /// Whether there are more checkpoints to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListFineTuningJobCheckpointsResponseImplCopyWith< _$ListFineTuningJobCheckpointsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -17087,8 +17867,12 @@ mixin _$FineTuningJobEvent { /// The object type, which is always "fine_tuning.job.event". FineTuningJobEventObject get object => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17117,6 +17901,8 @@ class _$FineTuningJobEventCopyWithImpl<$Res, $Val extends FineTuningJobEvent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17175,6 +17961,8 @@ class __$$FineTuningJobEventImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobEventImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17262,12 +18050,14 @@ class _$FineTuningJobEventImpl extends _FineTuningJobEvent { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, createdAt, level, message, object); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => @@ -17295,29 +18085,31 @@ abstract class _FineTuningJobEvent extends FineTuningJobEvent { factory _FineTuningJobEvent.fromJson(Map json) = _$FineTuningJobEventImpl.fromJson; - @override - /// The event identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the event was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The log level of the event. - FineTuningJobEventLevel get level; @override + FineTuningJobEventLevel get level; /// The message of the event. - String get message; @override + String get message; /// The object type, which is always "fine_tuning.job.event". + @override FineTuningJobEventObject get object; + + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17356,8 +18148,12 @@ mixin _$FineTuningJobCheckpoint { FineTuningJobCheckpointObject get object => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobCheckpoint to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCheckpointCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17392,6 +18188,8 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17435,6 +18233,8 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics { @@ -17478,6 +18278,8 @@ class __$$FineTuningJobCheckpointImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17595,12 +18397,14 @@ class _$FineTuningJobCheckpointImpl extends _FineTuningJobCheckpoint { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, createdAt, fineTunedModelCheckpoint, stepNumber, metrics, fineTuningJobId, object); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> @@ -17632,40 +18436,42 @@ abstract class _FineTuningJobCheckpoint extends FineTuningJobCheckpoint { factory _FineTuningJobCheckpoint.fromJson(Map json) = _$FineTuningJobCheckpointImpl.fromJson; - @override - /// The checkpoint identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the checkpoint was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the fine-tuned checkpoint model that is created. + @override @JsonKey(name: 'fine_tuned_model_checkpoint') String get fineTunedModelCheckpoint; - @override /// The step number that the checkpoint was created at. + @override @JsonKey(name: 'step_number') int get stepNumber; - @override /// Metrics at the step number during the fine-tuning job. - FineTuningJobCheckpointMetrics get metrics; @override + FineTuningJobCheckpointMetrics get metrics; /// The name of the fine-tuning job that this checkpoint was created from. + @override @JsonKey(name: 'fine_tuning_job_id') String get fineTuningJobId; - @override /// The object type, which is always "fine_tuning.job.checkpoint". + @override FineTuningJobCheckpointObject get object; + + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17705,8 +18511,12 @@ mixin _$FineTuningJobCheckpointMetrics { @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobCheckpointMetrics to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCheckpointMetricsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17744,6 +18554,8 @@ class _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17821,6 +18633,8 @@ class __$$FineTuningJobCheckpointMetricsImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointMetricsImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17949,7 +18763,7 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidMeanTokenAccuracy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -17961,7 +18775,9 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidLoss, fullValidMeanTokenAccuracy); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointMetricsImplCopyWith< @@ -17999,43 +18815,45 @@ abstract class _FineTuningJobCheckpointMetrics factory _FineTuningJobCheckpointMetrics.fromJson(Map json) = _$FineTuningJobCheckpointMetricsImpl.fromJson; - @override - /// The step number that the metrics were recorded at. + @override @JsonKey(includeIfNull: false) double? get step; - @override /// The training loss at the step number. + @override @JsonKey(name: 'train_loss', includeIfNull: false) double? get trainLoss; - @override /// The training mean token accuracy at the step number. + @override @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) double? get trainMeanTokenAccuracy; - @override /// The validation loss at the step number. + @override @JsonKey(name: 'valid_loss', includeIfNull: false) double? get validLoss; - @override /// The validation mean token accuracy at the step number. + @override @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) double? get validMeanTokenAccuracy; - @override /// The full validation loss at the step number. + @override @JsonKey(name: 'full_valid_loss', includeIfNull: false) double? get fullValidLoss; - @override /// The full validation mean token accuracy at the step number. + @override @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy; + + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobCheckpointMetricsImplCopyWith< _$FineTuningJobCheckpointMetricsImpl> get copyWith => throw _privateConstructorUsedError; @@ -18083,8 +18901,12 @@ mixin _$CreateImageRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateImageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateImageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18130,6 +18952,8 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18178,6 +19002,8 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> ) as $Val); } + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateImageRequestModelCopyWith<$Res>? get model { @@ -18233,6 +19059,8 @@ class __$$CreateImageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18377,12 +19205,14 @@ class _$CreateImageRequestImpl extends _CreateImageRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, prompt, model, n, quality, responseFormat, size, style, user); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => @@ -18425,52 +19255,54 @@ abstract class _CreateImageRequest extends CreateImageRequest { factory _CreateImageRequest.fromJson(Map json) = _$CreateImageRequestImpl.fromJson; - @override - /// A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - String get prompt; @override + String get prompt; /// The model to use for image generation. + @override @_CreateImageRequestModelConverter() @JsonKey(includeIfNull: false) CreateImageRequestModel? get model; - @override /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - ImageQuality get quality; @override + ImageQuality get quality; /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + @override @JsonKey( name: 'response_format', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageResponseFormat? get responseFormat; - @override /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageSize? get size; - @override /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageStyle? get style; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -18533,6 +19365,8 @@ mixin _$CreateImageRequestModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateImageRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -18553,6 +19387,9 @@ class _$CreateImageRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -18575,6 +19412,8 @@ class __$$CreateImageRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18621,11 +19460,13 @@ class _$CreateImageRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelEnumerationImplCopyWith< @@ -18715,7 +19556,10 @@ abstract class CreateImageRequestModelEnumeration @override ImageModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestModelEnumerationImplCopyWith< _$CreateImageRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -18741,6 +19585,8 @@ class __$$CreateImageRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18786,11 +19632,13 @@ class _$CreateImageRequestModelStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelStringImplCopyWith< @@ -18878,7 +19726,10 @@ abstract class CreateImageRequestModelString extends CreateImageRequestModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestModelStringImplCopyWith< _$CreateImageRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -18896,8 +19747,12 @@ mixin _$ImagesResponse { /// The list of images generated by the model. List get data => throw _privateConstructorUsedError; + /// Serializes this ImagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ImagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18921,6 +19776,8 @@ class _$ImagesResponseCopyWithImpl<$Res, $Val extends ImagesResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18959,6 +19816,8 @@ class __$$ImagesResponseImplCopyWithImpl<$Res> _$ImagesResponseImpl _value, $Res Function(_$ImagesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19018,12 +19877,14 @@ class _$ImagesResponseImpl extends _ImagesResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, created, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => @@ -19047,16 +19908,18 @@ abstract class _ImagesResponse extends ImagesResponse { factory _ImagesResponse.fromJson(Map json) = _$ImagesResponseImpl.fromJson; - @override - /// The Unix timestamp (in seconds) when the image was created. - int get created; @override + int get created; /// The list of images generated by the model. + @override List get data; + + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19079,8 +19942,12 @@ mixin _$Image { @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt => throw _privateConstructorUsedError; + /// Serializes this Image to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ImageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19106,6 +19973,8 @@ class _$ImageCopyWithImpl<$Res, $Val extends Image> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19152,6 +20021,8 @@ class __$$ImageImplCopyWithImpl<$Res> _$ImageImpl _value, $Res Function(_$ImageImpl) _then) : super(_value, _then); + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19220,11 +20091,13 @@ class _$ImageImpl extends _Image { other.revisedPrompt == revisedPrompt)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, b64Json, url, revisedPrompt); - @JsonKey(ignore: true) + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ImageImplCopyWith<_$ImageImpl> get copyWith => @@ -19248,23 +20121,25 @@ abstract class _Image extends Image { factory _Image.fromJson(Map json) = _$ImageImpl.fromJson; - @override - /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @override @JsonKey(name: 'b64_json', includeIfNull: false) String? get b64Json; - @override /// The URL of the generated image, if `response_format` is `url` (default). + @override @JsonKey(includeIfNull: false) String? get url; - @override /// The prompt that was used to generate the image, if there was any revision to the prompt. + @override @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt; + + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ImageImplCopyWith<_$ImageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19288,8 +20163,12 @@ mixin _$Model { @JsonKey(name: 'owned_by') String get ownedBy => throw _privateConstructorUsedError; + /// Serializes this Model to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModelCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19315,6 +20194,8 @@ class _$ModelCopyWithImpl<$Res, $Val extends Model> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19366,6 +20247,8 @@ class __$$ModelImplCopyWithImpl<$Res> _$ModelImpl _value, $Res Function(_$ModelImpl) _then) : super(_value, _then); + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19441,11 +20324,13 @@ class _$ModelImpl extends _Model { (identical(other.ownedBy, ownedBy) || other.ownedBy == ownedBy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, created, object, ownedBy); - @JsonKey(ignore: true) + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModelImplCopyWith<_$ModelImpl> get copyWith => @@ -19469,25 +20354,27 @@ abstract class _Model extends Model { factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; - @override - /// The model identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) when the model was created. - int get created; @override + int get created; /// The object type, which is always "model". - ModelObject get object; @override + ModelObject get object; /// The organization that owns the model. + @override @JsonKey(name: 'owned_by') String get ownedBy; + + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModelImplCopyWith<_$ModelImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19504,8 +20391,12 @@ mixin _$ListModelsResponse { /// The list of models. List get data => throw _privateConstructorUsedError; + /// Serializes this ListModelsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListModelsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19529,6 +20420,8 @@ class _$ListModelsResponseCopyWithImpl<$Res, $Val extends ListModelsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19567,6 +20460,8 @@ class __$$ListModelsResponseImplCopyWithImpl<$Res> $Res Function(_$ListModelsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19626,12 +20521,14 @@ class _$ListModelsResponseImpl extends _ListModelsResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, object, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => @@ -19655,16 +20552,18 @@ abstract class _ListModelsResponse extends ListModelsResponse { factory _ListModelsResponse.fromJson(Map json) = _$ListModelsResponseImpl.fromJson; - @override - /// The object type, which is always "list". - ListModelsResponseObject get object; @override + ListModelsResponseObject get object; /// The list of models. + @override List get data; + + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19684,8 +20583,12 @@ mixin _$DeleteModelResponse { /// The object type, which is always "model". String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteModelResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteModelResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19709,6 +20612,8 @@ class _$DeleteModelResponseCopyWithImpl<$Res, $Val extends DeleteModelResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19752,6 +20657,8 @@ class __$$DeleteModelResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteModelResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19813,11 +20720,13 @@ class _$DeleteModelResponseImpl extends _DeleteModelResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => @@ -19842,20 +20751,22 @@ abstract class _DeleteModelResponse extends DeleteModelResponse { factory _DeleteModelResponse.fromJson(Map json) = _$DeleteModelResponseImpl.fromJson; - @override - /// The model identifier. - String get id; @override + String get id; /// Whether the model was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always "model". + @override String get object; + + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19878,8 +20789,12 @@ mixin _$CreateModerationRequest { @_ModerationInputConverter() ModerationInput get input => throw _privateConstructorUsedError; + /// Serializes this CreateModerationRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateModerationRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19911,6 +20826,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19929,6 +20846,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationModelCopyWith<$Res>? get model { @@ -19941,6 +20860,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationInputCopyWith<$Res> get input { @@ -19981,6 +20902,8 @@ class __$$CreateModerationRequestImplCopyWithImpl<$Res> $Res Function(_$CreateModerationRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20040,11 +20963,13 @@ class _$CreateModerationRequestImpl extends _CreateModerationRequest { (identical(other.input, input) || other.input == input)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, model, input); - @JsonKey(ignore: true) + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> @@ -20071,21 +20996,23 @@ abstract class _CreateModerationRequest extends CreateModerationRequest { factory _CreateModerationRequest.fromJson(Map json) = _$CreateModerationRequestImpl.fromJson; - @override - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. /// /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @override @_ModerationModelConverter() @JsonKey(includeIfNull: false) ModerationModel? get model; - @override /// The input text to classify + @override @_ModerationInputConverter() ModerationInput get input; + + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20144,6 +21071,8 @@ mixin _$ModerationModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModerationModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -20163,6 +21092,9 @@ class _$ModerationModelCopyWithImpl<$Res, $Val extends ModerationModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -20185,6 +21117,8 @@ class __$$ModerationModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ModerationModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20229,11 +21163,13 @@ class _$ModerationModelEnumerationImpl extends ModerationModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> @@ -20320,7 +21256,10 @@ abstract class ModerationModelEnumeration extends ModerationModel { @override ModerationModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20343,6 +21282,8 @@ class __$$ModerationModelStringImplCopyWithImpl<$Res> $Res Function(_$ModerationModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20386,11 +21327,13 @@ class _$ModerationModelStringImpl extends ModerationModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> @@ -20477,7 +21420,10 @@ abstract class ModerationModelString extends ModerationModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20536,6 +21482,8 @@ mixin _$ModerationInput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModerationInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -20555,6 +21503,9 @@ class _$ModerationInputCopyWithImpl<$Res, $Val extends ModerationInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -20576,6 +21527,8 @@ class __$$ModerationInputListStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputListStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20626,12 +21579,14 @@ class _$ModerationInputListStringImpl extends ModerationInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> @@ -20718,7 +21673,10 @@ abstract class ModerationInputListString extends ModerationInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20741,6 +21699,8 @@ class __$$ModerationInputStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20784,11 +21744,13 @@ class _$ModerationInputStringImpl extends ModerationInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> @@ -20875,7 +21837,10 @@ abstract class ModerationInputString extends ModerationInput { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20896,8 +21861,12 @@ mixin _$CreateModerationResponse { /// A list of moderation objects. List get results => throw _privateConstructorUsedError; + /// Serializes this CreateModerationResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateModerationResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20922,6 +21891,8 @@ class _$CreateModerationResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20968,6 +21939,8 @@ class __$$CreateModerationResponseImplCopyWithImpl<$Res> $Res Function(_$CreateModerationResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21039,12 +22012,14 @@ class _$CreateModerationResponseImpl extends _CreateModerationResponse { const DeepCollectionEquality().equals(other._results, _results)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, model, const DeepCollectionEquality().hash(_results)); - @JsonKey(ignore: true) + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> @@ -21070,20 +22045,22 @@ abstract class _CreateModerationResponse extends CreateModerationResponse { factory _CreateModerationResponse.fromJson(Map json) = _$CreateModerationResponseImpl.fromJson; - @override - /// The unique identifier for the moderation request. - String get id; @override + String get id; /// The model used to generate the moderation results. - String get model; @override + String get model; /// A list of moderation objects. + @override List get results; + + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21105,8 +22082,12 @@ mixin _$Moderation { ModerationCategoriesScores get categoryScores => throw _privateConstructorUsedError; + /// Serializes this Moderation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21137,6 +22118,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21160,6 +22143,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> ) as $Val); } + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesCopyWith<$Res> get categories { @@ -21168,6 +22153,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> }); } + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesScoresCopyWith<$Res> get categoryScores { @@ -21206,6 +22193,8 @@ class __$$ModerationImplCopyWithImpl<$Res> _$ModerationImpl _value, $Res Function(_$ModerationImpl) _then) : super(_value, _then); + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21272,12 +22261,14 @@ class _$ModerationImpl extends _Moderation { other.categoryScores == categoryScores)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, flagged, categories, categoryScores); - @JsonKey(ignore: true) + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => @@ -21303,21 +22294,23 @@ abstract class _Moderation extends Moderation { factory _Moderation.fromJson(Map json) = _$ModerationImpl.fromJson; - @override - /// Whether any of the below categories are flagged. - bool get flagged; @override + bool get flagged; /// A list of the categories, and whether they are flagged or not. - ModerationCategories get categories; @override + ModerationCategories get categories; /// A list of the categories along with their scores as predicted by model. + @override @JsonKey(name: 'category_scores') ModerationCategoriesScores get categoryScores; + + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21368,8 +22361,12 @@ mixin _$ModerationCategories { @JsonKey(name: 'violence/graphic') bool get violenceGraphic => throw _privateConstructorUsedError; + /// Serializes this ModerationCategories to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCategoriesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21405,6 +22402,8 @@ class _$ModerationCategoriesCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21499,6 +22498,8 @@ class __$$ModerationCategoriesImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesImpl) _then) : super(_value, _then); + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21668,7 +22669,7 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { other.violenceGraphic == violenceGraphic)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -21684,7 +22685,9 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { violence, violenceGraphic); - @JsonKey(ignore: true) + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> @@ -21721,59 +22724,61 @@ abstract class _ModerationCategories extends ModerationCategories { factory _ModerationCategories.fromJson(Map json) = _$ModerationCategoriesImpl.fromJson; - @override - /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - bool get hate; @override + bool get hate; /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @override @JsonKey(name: 'hate/threatening') bool get hateThreatening; - @override /// Content that expresses, incites, or promotes harassing language towards any target. - bool get harassment; @override + bool get harassment; /// Harassment content that also includes violence or serious harm towards any target. + @override @JsonKey(name: 'harassment/threatening') bool get harassmentThreatening; - @override /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @override @JsonKey(name: 'self-harm') bool get selfHarm; - @override /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @override @JsonKey(name: 'self-harm/intent') bool get selfHarmIntent; - @override /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @override @JsonKey(name: 'self-harm/instructions') bool get selfHarmInstructions; - @override /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - bool get sexual; @override + bool get sexual; /// Sexual content that includes an individual who is under 18 years old. + @override @JsonKey(name: 'sexual/minors') bool get sexualMinors; - @override /// Content that depicts death, violence, or physical injury. - bool get violence; @override + bool get violence; /// Content that depicts death, violence, or physical injury in graphic detail. + @override @JsonKey(name: 'violence/graphic') bool get violenceGraphic; + + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21825,8 +22830,12 @@ mixin _$ModerationCategoriesScores { @JsonKey(name: 'violence/graphic') double get violenceGraphic => throw _privateConstructorUsedError; + /// Serializes this ModerationCategoriesScores to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCategoriesScoresCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21863,6 +22872,8 @@ class _$ModerationCategoriesScoresCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21960,6 +22971,8 @@ class __$$ModerationCategoriesScoresImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesScoresImpl) _then) : super(_value, _then); + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22130,7 +23143,7 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { other.violenceGraphic == violenceGraphic)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -22146,7 +23159,9 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { violence, violenceGraphic); - @JsonKey(ignore: true) + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> @@ -22183,59 +23198,61 @@ abstract class _ModerationCategoriesScores extends ModerationCategoriesScores { factory _ModerationCategoriesScores.fromJson(Map json) = _$ModerationCategoriesScoresImpl.fromJson; - @override - /// The score for the category 'hate'. - double get hate; @override + double get hate; /// The score for the category 'hate/threatening'. + @override @JsonKey(name: 'hate/threatening') double get hateThreatening; - @override /// The score for the category 'harassment'. - double get harassment; @override + double get harassment; /// The score for the category 'harassment/threatening'. + @override @JsonKey(name: 'harassment/threatening') double get harassmentThreatening; - @override /// The score for the category 'self-harm'. + @override @JsonKey(name: 'self-harm') double get selfHarm; - @override /// The score for the category 'self-harm/intent'. + @override @JsonKey(name: 'self-harm/intent') double get selfHarmIntent; - @override /// The score for the category 'self-harm/instructions'. + @override @JsonKey(name: 'self-harm/instructions') double get selfHarmInstructions; - @override /// The score for the category 'sexual'. - double get sexual; @override + double get sexual; /// The score for the category 'sexual/minors'. + @override @JsonKey(name: 'sexual/minors') double get sexualMinors; - @override /// The score for the category 'violence'. - double get violence; @override + double get violence; /// The score for the category 'violence/graphic'. + @override @JsonKey(name: 'violence/graphic') double get violenceGraphic; + + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22268,38 +23285,59 @@ mixin _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this AssistantObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -22342,6 +23380,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22415,6 +23455,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> ) as $Val); } + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -22427,6 +23469,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> }); } + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantObjectResponseFormatCopyWith<$Res>? get responseFormat { @@ -22481,6 +23525,8 @@ class __$$AssistantObjectImplCopyWithImpl<$Res> _$AssistantObjectImpl _value, $Res Function(_$AssistantObjectImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22610,10 +23656,12 @@ class _$AssistantObjectImpl extends _AssistantObject { @override final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override List get tools { if (_tools is EqualUnmodifiableListView) return _tools; @@ -22626,10 +23674,14 @@ class _$AssistantObjectImpl extends _AssistantObject { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -22639,23 +23691,37 @@ class _$AssistantObjectImpl extends _AssistantObject { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -22692,7 +23758,7 @@ class _$AssistantObjectImpl extends _AssistantObject { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -22710,7 +23776,9 @@ class _$AssistantObjectImpl extends _AssistantObject { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => @@ -22749,72 +23817,91 @@ abstract class _AssistantObject extends AssistantObject { factory _AssistantObject.fromJson(Map json) = _$AssistantObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `assistant`. - AssistantObjectObject get object; @override + AssistantObjectObject get object; /// The Unix timestamp (in seconds) for when the assistant was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the assistant. The maximum length is 256 characters. - String? get name; @override + String? get name; /// The description of the assistant. The maximum length is 512 characters. - String? get description; @override + String? get description; /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. - String get model; @override + String get model; /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - String? get instructions; @override + String? get instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat; + + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22888,6 +23975,8 @@ mixin _$AssistantObjectResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -22910,6 +23999,9 @@ class _$AssistantObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -22932,6 +24024,8 @@ class __$$AssistantObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22978,11 +24072,13 @@ class _$AssistantObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectResponseFormatEnumerationImplCopyWith< @@ -23084,7 +24180,10 @@ abstract class AssistantObjectResponseFormatEnumeration @override AssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectResponseFormatEnumerationImplCopyWith< _$AssistantObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -23120,6 +24219,8 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23133,6 +24234,8 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< )); } + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -23176,11 +24279,13 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< @@ -23283,7 +24388,10 @@ abstract class AssistantObjectResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -23312,39 +24420,60 @@ mixin _$CreateAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this CreateAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -23386,6 +24515,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23444,6 +24575,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantModelCopyWith<$Res> get model { @@ -23452,6 +24585,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -23464,6 +24599,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -23520,6 +24657,8 @@ class __$$CreateAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$CreateAssistantRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23622,10 +24761,12 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -23639,10 +24780,14 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -23653,23 +24798,37 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -23702,7 +24861,7 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -23717,7 +24876,9 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> @@ -23753,64 +24914,83 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { factory _CreateAssistantRequest.fromJson(Map json) = _$CreateAssistantRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_AssistantModelConverter() AssistantModel get model; - @override /// The name of the assistant. The maximum length is 256 characters. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The description of the assistant. The maximum length is 512 characters. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat; + + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23869,6 +25049,8 @@ mixin _$AssistantModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -23888,6 +25070,9 @@ class _$AssistantModelCopyWithImpl<$Res, $Val extends AssistantModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -23909,6 +25094,8 @@ class __$$AssistantModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23952,11 +25139,13 @@ class _$AssistantModelEnumerationImpl extends AssistantModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> @@ -24043,7 +25232,10 @@ abstract class AssistantModelEnumeration extends AssistantModel { @override AssistantModels get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24065,6 +25257,8 @@ class __$$AssistantModelStringImplCopyWithImpl<$Res> $Res Function(_$AssistantModelStringImpl) _then) : super(_value, _then); + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24108,11 +25302,13 @@ class _$AssistantModelStringImpl extends AssistantModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> @@ -24200,7 +25396,10 @@ abstract class AssistantModelString extends AssistantModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24274,6 +25473,8 @@ mixin _$CreateAssistantRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -24296,6 +25497,9 @@ class _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -24322,6 +25526,8 @@ class __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24368,11 +25574,13 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -24475,7 +25683,10 @@ abstract class CreateAssistantRequestResponseFormatEnumeration @override CreateAssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< _$CreateAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -24512,6 +25723,8 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24526,6 +25739,8 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi )); } + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -24570,11 +25785,13 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -24679,7 +25896,10 @@ abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -24708,7 +25928,8 @@ mixin _$ModifyAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -24719,32 +25940,52 @@ mixin _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this ModifyAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -24786,6 +26027,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24849,6 +26092,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -24861,6 +26106,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -24916,6 +26163,8 @@ class __$$ModifyAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyAssistantRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25025,10 +26274,12 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -25054,10 +26305,14 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -25068,23 +26323,37 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -25118,7 +26387,7 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -25134,7 +26403,9 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> @@ -25171,69 +26442,88 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { factory _ModifyAssistantRequest.fromJson(Map json) = _$ModifyAssistantRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @JsonKey(includeIfNull: false) String? get model; - @override /// The name of the assistant. The maximum length is 256 characters. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The description of the assistant. The maximum length is 512 characters. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + @override @JsonKey(name: 'file_ids') List get fileIds; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat; + + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25307,6 +26597,8 @@ mixin _$ModifyAssistantRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModifyAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -25329,6 +26621,9 @@ class _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -25355,6 +26650,8 @@ class __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25401,11 +26698,13 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -25508,7 +26807,10 @@ abstract class ModifyAssistantRequestResponseFormatEnumeration @override ModifyAssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< _$ModifyAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -25545,6 +26847,8 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25559,6 +26863,8 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi )); } + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -25603,11 +26909,13 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -25712,7 +27020,10 @@ abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -25735,8 +27046,12 @@ mixin _$DeleteAssistantResponse { DeleteAssistantResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteAssistantResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteAssistantResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25761,6 +27076,8 @@ class _$DeleteAssistantResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25807,6 +27124,8 @@ class __$$DeleteAssistantResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteAssistantResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25868,11 +27187,13 @@ class _$DeleteAssistantResponseImpl extends _DeleteAssistantResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> @@ -25898,20 +27219,22 @@ abstract class _DeleteAssistantResponse extends DeleteAssistantResponse { factory _DeleteAssistantResponse.fromJson(Map json) = _$DeleteAssistantResponseImpl.fromJson; - @override - /// The assistant identifier. - String get id; @override + String get id; /// Whether the assistant was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `assistant.deleted`. + @override DeleteAssistantResponseObject get object; + + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25941,8 +27264,12 @@ mixin _$ListAssistantsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListAssistantsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListAssistantsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25972,6 +27299,8 @@ class _$ListAssistantsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26033,6 +27362,8 @@ class __$$ListAssistantsResponseImplCopyWithImpl<$Res> $Res Function(_$ListAssistantsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26129,12 +27460,14 @@ class _$ListAssistantsResponseImpl extends _ListAssistantsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> @@ -26162,31 +27495,33 @@ abstract class _ListAssistantsResponse extends ListAssistantsResponse { factory _ListAssistantsResponse.fromJson(Map json) = _$ListAssistantsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of assistants. - List get data; @override + List get data; /// The ID of the first assistant in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last assistant in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more assistants to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26206,8 +27541,12 @@ mixin _$AssistantsNamedToolChoice { AssistantsFunctionCallOption? get function => throw _privateConstructorUsedError; + /// Serializes this AssistantsNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26236,6 +27575,8 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26254,6 +27595,8 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsFunctionCallOptionCopyWith<$Res>? get function { @@ -26295,6 +27638,8 @@ class __$$AssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$AssistantsNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26348,11 +27693,13 @@ class _$AssistantsNamedToolChoiceImpl extends _AssistantsNamedToolChoice { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> @@ -26378,17 +27725,19 @@ abstract class _AssistantsNamedToolChoice extends AssistantsNamedToolChoice { factory _AssistantsNamedToolChoice.fromJson(Map json) = _$AssistantsNamedToolChoiceImpl.fromJson; - @override - /// The type of the tool. If type is `function`, the function name must be set - AssistantsToolType get type; @override + AssistantsToolType get type; /// No Description + @override @JsonKey(includeIfNull: false) AssistantsFunctionCallOption? get function; + + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26403,8 +27752,12 @@ mixin _$AssistantsFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; + /// Serializes this AssistantsFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26431,6 +27784,8 @@ class _$AssistantsFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26467,6 +27822,8 @@ class __$$AssistantsFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$AssistantsFunctionCallOptionImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26507,11 +27864,13 @@ class _$AssistantsFunctionCallOptionImpl extends _AssistantsFunctionCallOption { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name); - @JsonKey(ignore: true) + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsFunctionCallOptionImplCopyWith< @@ -26536,12 +27895,14 @@ abstract class _AssistantsFunctionCallOption factory _AssistantsFunctionCallOption.fromJson(Map json) = _$AssistantsFunctionCallOptionImpl.fromJson; - @override - /// The name of the function to call. + @override String get name; + + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsFunctionCallOptionImplCopyWith< _$AssistantsFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -26557,8 +27918,12 @@ mixin _$AssistantsResponseFormat { /// Must be one of `text` or `json_object`. AssistantsResponseFormatType get type => throw _privateConstructorUsedError; + /// Serializes this AssistantsResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsResponseFormatCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26583,6 +27948,8 @@ class _$AssistantsResponseFormatCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26619,6 +27986,8 @@ class __$$AssistantsResponseFormatImplCopyWithImpl<$Res> $Res Function(_$AssistantsResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26661,11 +28030,13 @@ class _$AssistantsResponseFormatImpl extends _AssistantsResponseFormat { (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> @@ -26689,12 +28060,14 @@ abstract class _AssistantsResponseFormat extends AssistantsResponseFormat { factory _AssistantsResponseFormat.fromJson(Map json) = _$AssistantsResponseFormatImpl.fromJson; - @override - /// Must be one of `text` or `json_object`. + @override AssistantsResponseFormatType get type; + + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26712,8 +28085,12 @@ mixin _$TruncationObject { @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages => throw _privateConstructorUsedError; + /// Serializes this TruncationObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $TruncationObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26739,6 +28116,8 @@ class _$TruncationObjectCopyWithImpl<$Res, $Val extends TruncationObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26779,6 +28158,8 @@ class __$$TruncationObjectImplCopyWithImpl<$Res> $Res Function(_$TruncationObjectImpl) _then) : super(_value, _then); + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26833,11 +28214,13 @@ class _$TruncationObjectImpl extends _TruncationObject { other.lastMessages == lastMessages)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, lastMessages); - @JsonKey(ignore: true) + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => @@ -26862,17 +28245,19 @@ abstract class _TruncationObject extends TruncationObject { factory _TruncationObject.fromJson(Map json) = _$TruncationObjectImpl.fromJson; - @override - /// The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - TruncationObjectType get type; @override + TruncationObjectType get type; /// The number of most recent messages from the thread when constructing the context for the run. + @override @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages; + + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26946,7 +28331,9 @@ mixin _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. List get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -26987,18 +28374,33 @@ mixin _$RunObject { @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat => throw _privateConstructorUsedError; + /// Serializes this RunObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -27062,6 +28464,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27205,6 +28609,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ) as $Val); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunRequiredActionCopyWith<$Res>? get requiredAction { @@ -27217,6 +28623,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunLastErrorCopyWith<$Res>? get lastError { @@ -27229,6 +28637,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -27242,6 +28652,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunCompletionUsageCopyWith<$Res>? get usage { @@ -27254,6 +28666,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -27266,6 +28680,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectToolChoiceCopyWith<$Res>? get toolChoice { @@ -27278,6 +28694,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectResponseFormatCopyWith<$Res> get responseFormat { @@ -27355,6 +28773,8 @@ class __$$RunObjectImplCopyWithImpl<$Res> _$RunObjectImpl _value, $Res Function(_$RunObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27627,10 +29047,14 @@ class _$RunObjectImpl extends _RunObject { return EqualUnmodifiableListView(_tools); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -27685,11 +29109,22 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'parallel_tool_calls') final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') @@ -27753,7 +29188,7 @@ class _$RunObjectImpl extends _RunObject { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -27786,7 +29221,9 @@ class _$RunObjectImpl extends _RunObject { responseFormat ]); - @JsonKey(ignore: true) + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => @@ -27843,146 +29280,161 @@ abstract class _RunObject extends RunObject { factory _RunObject.fromJson(Map json) = _$RunObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run`. - RunObjectObject get object; @override + RunObjectObject get object; /// The Unix timestamp (in seconds) for when the run was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. - RunStatus get status; @override + RunStatus get status; /// Details on the action required to continue the run. Will be `null` if no action is required. + @override @JsonKey(name: 'required_action') RunRequiredAction? get requiredAction; - @override /// The last error associated with this run. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') RunLastError? get lastError; - @override /// The Unix timestamp (in seconds) for when the run will expire. + @override @JsonKey(name: 'expires_at') int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the run was started. + @override @JsonKey(name: 'started_at') int? get startedAt; - @override /// The Unix timestamp (in seconds) for when the run was cancelled. + @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; - @override /// The Unix timestamp (in seconds) for when the run failed. + @override @JsonKey(name: 'failed_at') int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the run was completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override /// Details on why the run is incomplete. Will be `null` if the run is not incomplete. + @override @JsonKey(name: 'incomplete_details') RunObjectIncompleteDetails? get incompleteDetails; - @override /// The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - String get model; @override + String get model; /// The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - String get instructions; @override + String get instructions; /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - List get tools; @override + List get tools; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). - RunCompletionUsage? get usage; @override + RunCompletionUsage? get usage; /// The sampling temperature used for this run. If not set, defaults to 1. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// The nucleus sampling value used for this run. If not set, defaults to 1. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens specified to have been used over the course of the run. + @override @JsonKey(name: 'max_prompt_tokens') int? get maxPromptTokens; - @override /// The maximum number of completion tokens specified to have been used over the course of the run. + @override @JsonKey(name: 'max_completion_tokens') int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy') TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat; + + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28001,8 +29453,12 @@ mixin _$RunRequiredAction { RunSubmitToolOutputs get submitToolOutputs => throw _privateConstructorUsedError; + /// Serializes this RunRequiredAction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunRequiredActionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28031,6 +29487,8 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28049,6 +29507,8 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> ) as $Val); } + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunSubmitToolOutputsCopyWith<$Res> get submitToolOutputs { @@ -28084,6 +29544,8 @@ class __$$RunRequiredActionImplCopyWithImpl<$Res> $Res Function(_$RunRequiredActionImpl) _then) : super(_value, _then); + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28138,11 +29600,13 @@ class _$RunRequiredActionImpl extends _RunRequiredAction { other.submitToolOutputs == submitToolOutputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, submitToolOutputs); - @JsonKey(ignore: true) + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => @@ -28168,17 +29632,19 @@ abstract class _RunRequiredAction extends RunRequiredAction { factory _RunRequiredAction.fromJson(Map json) = _$RunRequiredActionImpl.fromJson; - @override - /// For now, this is always `submit_tool_outputs`. - RunRequiredActionType get type; @override + RunRequiredActionType get type; /// Details on the tool outputs needed for this run to continue. + @override @JsonKey(name: 'submit_tool_outputs') RunSubmitToolOutputs get submitToolOutputs; + + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28195,8 +29661,12 @@ mixin _$RunLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this RunLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28220,6 +29690,8 @@ class _$RunLastErrorCopyWithImpl<$Res, $Val extends RunLastError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28258,6 +29730,8 @@ class __$$RunLastErrorImplCopyWithImpl<$Res> _$RunLastErrorImpl _value, $Res Function(_$RunLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28308,11 +29782,13 @@ class _$RunLastErrorImpl extends _RunLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => @@ -28335,16 +29811,18 @@ abstract class _RunLastError extends RunLastError { factory _RunLastError.fromJson(Map json) = _$RunLastErrorImpl.fromJson; - @override - /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - RunLastErrorCode get code; @override + RunLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28362,8 +29840,12 @@ mixin _$RunObjectIncompleteDetails { RunObjectIncompleteDetailsReason? get reason => throw _privateConstructorUsedError; + /// Serializes this RunObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28393,6 +29875,8 @@ class _$RunObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28433,6 +29917,8 @@ class __$$RunObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$RunObjectIncompleteDetailsImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28480,11 +29966,13 @@ class _$RunObjectIncompleteDetailsImpl extends _RunObjectIncompleteDetails { (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, reason); - @JsonKey(ignore: true) + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> @@ -28511,14 +29999,16 @@ abstract class _RunObjectIncompleteDetails extends RunObjectIncompleteDetails { factory _RunObjectIncompleteDetails.fromJson(Map json) = _$RunObjectIncompleteDetailsImpl.fromJson; - @override - /// The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) RunObjectIncompleteDetailsReason? get reason; + + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28579,6 +30069,8 @@ mixin _$RunObjectToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunObjectToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -28598,6 +30090,9 @@ class _$RunObjectToolChoiceCopyWithImpl<$Res, $Val extends RunObjectToolChoice> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -28620,6 +30115,8 @@ class __$$RunObjectToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28665,11 +30162,13 @@ class _$RunObjectToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceEnumerationImplCopyWith< @@ -28760,7 +30259,10 @@ abstract class RunObjectToolChoiceEnumeration extends RunObjectToolChoice { @override RunObjectToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectToolChoiceEnumerationImplCopyWith< _$RunObjectToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -28791,6 +30293,8 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceAssistantsNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28804,6 +30308,8 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> )); } + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -28845,11 +30351,13 @@ class _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -28944,7 +30452,10 @@ abstract class RunObjectToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -29012,6 +30523,8 @@ mixin _$RunObjectResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -29032,6 +30545,9 @@ class _$RunObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -29054,6 +30570,8 @@ class __$$RunObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29100,11 +30618,13 @@ class _$RunObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectResponseFormatEnumerationImplCopyWith< @@ -29199,7 +30719,10 @@ abstract class RunObjectResponseFormatEnumeration @override RunObjectResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectResponseFormatEnumerationImplCopyWith< _$RunObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -29231,6 +30754,8 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29244,6 +30769,8 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> )); } + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -29285,11 +30812,13 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< @@ -29386,7 +30915,10 @@ abstract class RunObjectResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< _$RunObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -29402,8 +30934,12 @@ mixin _$RunSubmitToolOutputs { @JsonKey(name: 'tool_calls') List get toolCalls => throw _privateConstructorUsedError; + /// Serializes this RunSubmitToolOutputs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunSubmitToolOutputsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29428,6 +30964,8 @@ class _$RunSubmitToolOutputsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29461,6 +30999,8 @@ class __$$RunSubmitToolOutputsImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputsImpl) _then) : super(_value, _then); + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29513,12 +31053,14 @@ class _$RunSubmitToolOutputsImpl extends _RunSubmitToolOutputs { .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> @@ -29544,13 +31086,15 @@ abstract class _RunSubmitToolOutputs extends RunSubmitToolOutputs { factory _RunSubmitToolOutputs.fromJson(Map json) = _$RunSubmitToolOutputsImpl.fromJson; - @override - /// A list of the relevant tool calls. + @override @JsonKey(name: 'tool_calls') List get toolCalls; + + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29573,8 +31117,12 @@ mixin _$RunCompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this RunCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29601,6 +31149,8 @@ class _$RunCompletionUsageCopyWithImpl<$Res, $Val extends RunCompletionUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29647,6 +31197,8 @@ class __$$RunCompletionUsageImplCopyWithImpl<$Res> $Res Function(_$RunCompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29716,12 +31268,14 @@ class _$RunCompletionUsageImpl extends _RunCompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => @@ -29747,23 +31301,25 @@ abstract class _RunCompletionUsage extends RunCompletionUsage { factory _RunCompletionUsage.fromJson(Map json) = _$RunCompletionUsageImpl.fromJson; - @override - /// Number of completion tokens used over the course of the run. + @override @JsonKey(name: 'completion_tokens') int get completionTokens; - @override /// Number of prompt tokens used over the course of the run. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29800,15 +31356,20 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -29842,11 +31403,22 @@ mixin _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat => @@ -29856,8 +31428,12 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this CreateRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29914,6 +31490,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30002,6 +31580,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ) as $Val); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestModelCopyWith<$Res>? get model { @@ -30014,6 +31594,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -30026,6 +31608,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -30039,6 +31623,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -30109,6 +31695,8 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30288,10 +31876,14 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -30302,12 +31894,15 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -30345,11 +31940,22 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -30399,7 +32005,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -30420,7 +32026,9 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { responseFormat, stream); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => @@ -30471,102 +32079,120 @@ abstract class _CreateRunRequest extends CreateRunRequest { factory _CreateRunRequest.fromJson(Map json) = _$CreateRunRequestImpl.fromJson; - @override - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + @override @_CreateRunRequestModelConverter() @JsonKey(includeIfNull: false) CreateRunRequestModel? get model; - @override /// Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override /// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + @override @JsonKey(name: 'additional_instructions', includeIfNull: false) String? get additionalInstructions; - @override /// Adds additional messages to the thread before creating the run. + @override @JsonKey(name: 'additional_messages', includeIfNull: false) List? get additionalMessages; - @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) List? get tools; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; - @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30629,6 +32255,8 @@ mixin _$CreateRunRequestModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -30649,6 +32277,9 @@ class _$CreateRunRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -30671,6 +32302,8 @@ class __$$CreateRunRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30717,11 +32350,13 @@ class _$CreateRunRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelEnumerationImplCopyWith< @@ -30809,7 +32444,10 @@ abstract class CreateRunRequestModelEnumeration extends CreateRunRequestModel { @override RunModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestModelEnumerationImplCopyWith< _$CreateRunRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -30835,6 +32473,8 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30879,11 +32519,13 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> @@ -30970,7 +32612,10 @@ abstract class CreateRunRequestModelString extends CreateRunRequestModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31038,6 +32683,8 @@ mixin _$CreateRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -31059,6 +32706,9 @@ class _$CreateRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -31081,6 +32731,8 @@ class __$$CreateRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31127,11 +32779,13 @@ class _$CreateRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< @@ -31227,7 +32881,10 @@ abstract class CreateRunRequestToolChoiceEnumeration @override CreateRunRequestToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< _$CreateRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -31263,6 +32920,8 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31276,6 +32935,8 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< )); } + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -31318,11 +32979,13 @@ class _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -31420,7 +33083,10 @@ abstract class CreateRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -31492,6 +33158,8 @@ mixin _$CreateRunRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -31514,6 +33182,9 @@ class _$CreateRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -31536,6 +33207,8 @@ class __$$CreateRunRequestResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31582,11 +33255,13 @@ class _$CreateRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< @@ -31686,7 +33361,10 @@ abstract class CreateRunRequestResponseFormatEnumeration @override CreateRunRequestResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< _$CreateRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -31723,6 +33401,8 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl _then) : super(_value, _then); + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31736,6 +33416,8 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl )); } + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -31779,11 +33461,13 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -31883,7 +33567,10 @@ abstract class CreateRunRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -31913,8 +33600,12 @@ mixin _$ListRunsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListRunsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListRunsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -31943,6 +33634,8 @@ class _$ListRunsResponseCopyWithImpl<$Res, $Val extends ListRunsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32001,6 +33694,8 @@ class __$$ListRunsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32097,12 +33792,14 @@ class _$ListRunsResponseImpl extends _ListRunsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => @@ -32130,31 +33827,33 @@ abstract class _ListRunsResponse extends ListRunsResponse { factory _ListRunsResponse.fromJson(Map json) = _$ListRunsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of runs. - List get data; @override + List get data; /// The ID of the first run in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last run in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more runs to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32165,12 +33864,18 @@ ModifyRunRequest _$ModifyRunRequestFromJson(Map json) { /// @nodoc mixin _$ModifyRunRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32194,6 +33899,8 @@ class _$ModifyRunRequestCopyWithImpl<$Res, $Val extends ModifyRunRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32227,6 +33934,8 @@ class __$$ModifyRunRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32252,10 +33961,14 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { factory _$ModifyRunRequestImpl.fromJson(Map json) => _$$ModifyRunRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -32279,12 +33992,14 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => @@ -32308,13 +34023,17 @@ abstract class _ModifyRunRequest extends ModifyRunRequest { factory _ModifyRunRequest.fromJson(Map json) = _$ModifyRunRequestImpl.fromJson; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32335,8 +34054,12 @@ mixin _$SubmitToolOutputsRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this SubmitToolOutputsRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $SubmitToolOutputsRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32365,6 +34088,8 @@ class _$SubmitToolOutputsRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32408,6 +34133,8 @@ class __$$SubmitToolOutputsRunRequestImplCopyWithImpl<$Res> $Res Function(_$SubmitToolOutputsRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32473,12 +34200,14 @@ class _$SubmitToolOutputsRunRequestImpl extends _SubmitToolOutputsRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_toolOutputs), stream); - @JsonKey(ignore: true) + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> @@ -32505,18 +34234,20 @@ abstract class _SubmitToolOutputsRunRequest factory _SubmitToolOutputsRunRequest.fromJson(Map json) = _$SubmitToolOutputsRunRequestImpl.fromJson; - @override - /// A list of tools for which the outputs are being submitted. + @override @JsonKey(name: 'tool_outputs') List get toolOutputs; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32535,8 +34266,12 @@ mixin _$RunSubmitToolOutput { @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; + /// Serializes this RunSubmitToolOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunSubmitToolOutputCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32562,6 +34297,8 @@ class _$RunSubmitToolOutputCopyWithImpl<$Res, $Val extends RunSubmitToolOutput> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32602,6 +34339,8 @@ class __$$RunSubmitToolOutputImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputImpl) _then) : super(_value, _then); + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32657,11 +34396,13 @@ class _$RunSubmitToolOutputImpl extends _RunSubmitToolOutput { (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, toolCallId, output); - @JsonKey(ignore: true) + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => @@ -32687,18 +34428,20 @@ abstract class _RunSubmitToolOutput extends RunSubmitToolOutput { factory _RunSubmitToolOutput.fromJson(Map json) = _$RunSubmitToolOutputImpl.fromJson; - @override - /// The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + @override @JsonKey(name: 'tool_call_id', includeIfNull: false) String? get toolCallId; - @override /// The output of the tool call to be submitted to continue the run. + @override @JsonKey(includeIfNull: false) String? get output; + + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32718,8 +34461,12 @@ mixin _$RunToolCallObject { /// The function definition. RunToolCallFunction get function => throw _privateConstructorUsedError; + /// Serializes this RunToolCallObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunToolCallObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32746,6 +34493,8 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32769,6 +34518,8 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> ) as $Val); } + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunToolCallFunctionCopyWith<$Res> get function { @@ -32801,6 +34552,8 @@ class __$$RunToolCallObjectImplCopyWithImpl<$Res> $Res Function(_$RunToolCallObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32863,11 +34616,13 @@ class _$RunToolCallObjectImpl extends _RunToolCallObject { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => @@ -32892,20 +34647,22 @@ abstract class _RunToolCallObject extends RunToolCallObject { factory _RunToolCallObject.fromJson(Map json) = _$RunToolCallObjectImpl.fromJson; - @override - /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint. - String get id; @override + String get id; /// The type of tool call the output is required for. For now, this is always `function`. - RunToolCallObjectType get type; @override + RunToolCallObjectType get type; /// The function definition. + @override RunToolCallFunction get function; + + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32922,8 +34679,12 @@ mixin _$RunToolCallFunction { /// The arguments that the model expects you to pass to the function. String get arguments => throw _privateConstructorUsedError; + /// Serializes this RunToolCallFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunToolCallFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32947,6 +34708,8 @@ class _$RunToolCallFunctionCopyWithImpl<$Res, $Val extends RunToolCallFunction> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32985,6 +34748,8 @@ class __$$RunToolCallFunctionImplCopyWithImpl<$Res> $Res Function(_$RunToolCallFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33036,11 +34801,13 @@ class _$RunToolCallFunctionImpl extends _RunToolCallFunction { other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => @@ -33064,16 +34831,18 @@ abstract class _RunToolCallFunction extends RunToolCallFunction { factory _RunToolCallFunction.fromJson(Map json) = _$RunToolCallFunctionImpl.fromJson; - @override - /// The name of the function. - String get name; @override + String get name; /// The arguments that the model expects you to pass to the function. + @override String get arguments; + + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33110,15 +34879,20 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -33152,11 +34926,22 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat => @@ -33166,8 +34951,12 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this CreateThreadAndRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateThreadAndRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -33226,6 +35015,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33314,6 +35105,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadRequestCopyWith<$Res>? get thread { @@ -33326,6 +35119,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadAndRunModelCopyWith<$Res>? get model { @@ -33338,6 +35133,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -33350,6 +35147,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -33362,6 +35161,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -33375,6 +35176,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -33451,6 +35254,8 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33615,10 +35420,14 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -33629,12 +35438,15 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -33672,11 +35484,22 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -33725,7 +35548,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -33746,7 +35569,9 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { responseFormat, stream); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> @@ -33796,102 +35621,120 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { factory _CreateThreadAndRunRequest.fromJson(Map json) = _$CreateThreadAndRunRequestImpl.fromJson; - @override - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// If no thread is provided, an empty thread will be created. + @override @JsonKey(includeIfNull: false) CreateThreadRequest? get thread; - @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + @override @_ThreadAndRunModelConverter() @JsonKey(includeIfNull: false) ThreadAndRunModel? get model; - @override /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) List? get tools; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; - @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33950,6 +35793,8 @@ mixin _$ThreadAndRunModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ThreadAndRunModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -33969,6 +35814,9 @@ class _$ThreadAndRunModelCopyWithImpl<$Res, $Val extends ThreadAndRunModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -33991,6 +35839,8 @@ class __$$ThreadAndRunModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34035,11 +35885,13 @@ class _$ThreadAndRunModelEnumerationImpl extends ThreadAndRunModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelEnumerationImplCopyWith< @@ -34127,7 +35979,10 @@ abstract class ThreadAndRunModelEnumeration extends ThreadAndRunModel { @override ThreadAndRunModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadAndRunModelEnumerationImplCopyWith< _$ThreadAndRunModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -34152,6 +36007,8 @@ class __$$ThreadAndRunModelStringImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34195,11 +36052,13 @@ class _$ThreadAndRunModelStringImpl extends ThreadAndRunModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> @@ -34286,7 +36145,10 @@ abstract class ThreadAndRunModelString extends ThreadAndRunModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34361,6 +36223,8 @@ mixin _$CreateThreadAndRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateThreadAndRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -34383,6 +36247,9 @@ class _$CreateThreadAndRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -34408,6 +36275,8 @@ class __$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34454,11 +36323,13 @@ class _$CreateThreadAndRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< @@ -34562,7 +36433,10 @@ abstract class CreateThreadAndRunRequestToolChoiceEnumeration @override CreateThreadAndRunRequestToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< _$CreateThreadAndRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -34599,6 +36473,8 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34613,6 +36489,8 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi )); } + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -34657,11 +36535,13 @@ class _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -34767,7 +36647,10 @@ abstract class CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -34847,6 +36730,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateThreadAndRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -34870,6 +36755,9 @@ class _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -34900,6 +36788,8 @@ class __$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34946,11 +36836,13 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< @@ -35057,7 +36949,10 @@ abstract class CreateThreadAndRunRequestResponseFormatEnumeration @override CreateThreadAndRunRequestResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< _$CreateThreadAndRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -35096,6 +36991,8 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35110,6 +37007,8 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop )); } + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -35154,11 +37053,13 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -35267,7 +37168,10 @@ abstract class CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -35293,11 +37197,17 @@ mixin _$ThreadObject { @JsonKey(name: 'tool_resources') ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ThreadObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ThreadObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35328,6 +37238,8 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35361,6 +37273,8 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> ) as $Val); } + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35401,6 +37315,8 @@ class __$$ThreadObjectImplCopyWithImpl<$Res> _$ThreadObjectImpl _value, $Res Function(_$ThreadObjectImpl) _then) : super(_value, _then); + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35468,10 +37384,14 @@ class _$ThreadObjectImpl extends _ThreadObject { @JsonKey(name: 'tool_resources') final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -35500,12 +37420,14 @@ class _$ThreadObjectImpl extends _ThreadObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, createdAt, toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => @@ -35532,30 +37454,34 @@ abstract class _ThreadObject extends ThreadObject { factory _ThreadObject.fromJson(Map json) = _$ThreadObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread`. - ThreadObjectObject get object; @override + ThreadObjectObject get object; /// The Unix timestamp (in seconds) for when the thread was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources') ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override Map? get metadata; + + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35575,12 +37501,18 @@ mixin _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35610,6 +37542,8 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35633,6 +37567,8 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> ) as $Val); } + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35672,6 +37608,8 @@ class __$$CreateThreadRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35730,10 +37668,14 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -35760,7 +37702,7 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -35768,7 +37710,9 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => @@ -35796,23 +37740,27 @@ abstract class _CreateThreadRequest extends CreateThreadRequest { factory _CreateThreadRequest.fromJson(Map json) = _$CreateThreadRequestImpl.fromJson; - @override - /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. + @override @JsonKey(includeIfNull: false) List? get messages; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35827,12 +37775,18 @@ mixin _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35861,6 +37815,8 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35879,6 +37835,8 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> ) as $Val); } + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35917,6 +37875,8 @@ class __$$ModifyThreadRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyThreadRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35954,10 +37914,14 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -35983,12 +37947,14 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => @@ -36014,18 +37980,22 @@ abstract class _ModifyThreadRequest extends ModifyThreadRequest { factory _ModifyThreadRequest.fromJson(Map json) = _$ModifyThreadRequestImpl.fromJson; - @override - /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36045,8 +38015,12 @@ mixin _$ToolResources { @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch => throw _privateConstructorUsedError; + /// Serializes this ToolResources to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36077,6 +38051,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36095,6 +38071,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> ) as $Val); } + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCodeInterpreterCopyWith<$Res>? get codeInterpreter { @@ -36108,6 +38086,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> }); } + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesFileSearchCopyWith<$Res>? get fileSearch { @@ -36149,6 +38129,8 @@ class __$$ToolResourcesImplCopyWithImpl<$Res> _$ToolResourcesImpl _value, $Res Function(_$ToolResourcesImpl) _then) : super(_value, _then); + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36206,11 +38188,13 @@ class _$ToolResourcesImpl extends _ToolResources { other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, codeInterpreter, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => @@ -36235,18 +38219,20 @@ abstract class _ToolResources extends ToolResources { factory _ToolResources.fromJson(Map json) = _$ToolResourcesImpl.fromJson; - @override - /// No Description + @override @JsonKey(name: 'code_interpreter', includeIfNull: false) ToolResourcesCodeInterpreter? get codeInterpreter; - @override /// No Description + @override @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch; + + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36262,8 +38248,12 @@ mixin _$ToolResourcesCodeInterpreter { @JsonKey(name: 'file_ids') List get fileIds => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesCodeInterpreterCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36290,6 +38280,8 @@ class _$ToolResourcesCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36326,6 +38318,8 @@ class __$$ToolResourcesCodeInterpreterImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesCodeInterpreterImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36377,12 +38371,14 @@ class _$ToolResourcesCodeInterpreterImpl extends _ToolResourcesCodeInterpreter { const DeepCollectionEquality().equals(other._fileIds, _fileIds)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesCodeInterpreterImplCopyWith< @@ -36408,13 +38404,15 @@ abstract class _ToolResourcesCodeInterpreter factory _ToolResourcesCodeInterpreter.fromJson(Map json) = _$ToolResourcesCodeInterpreterImpl.fromJson; - @override - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + @override @JsonKey(name: 'file_ids') List get fileIds; + + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesCodeInterpreterImplCopyWith< _$ToolResourcesCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -36436,8 +38434,12 @@ mixin _$ToolResourcesFileSearch { List? get vectorStores => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesFileSearchCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36466,6 +38468,8 @@ class _$ToolResourcesFileSearchCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36511,6 +38515,8 @@ class __$$ToolResourcesFileSearchImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36589,14 +38595,16 @@ class _$ToolResourcesFileSearchImpl extends _ToolResourcesFileSearch { .equals(other._vectorStores, _vectorStores)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_vectorStoreIds), const DeepCollectionEquality().hash(_vectorStores)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> @@ -36623,18 +38631,20 @@ abstract class _ToolResourcesFileSearch extends ToolResourcesFileSearch { factory _ToolResourcesFileSearch.fromJson(Map json) = _$ToolResourcesFileSearchImpl.fromJson; - @override - /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + @override @JsonKey(name: 'vector_store_ids', includeIfNull: false) List? get vectorStoreIds; - @override /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. + @override @JsonKey(name: 'vector_stores', includeIfNull: false) List? get vectorStores; + + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36660,8 +38670,12 @@ mixin _$ToolResourcesFileSearchVectorStore { @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesFileSearchVectorStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesFileSearchVectorStoreCopyWith< ToolResourcesFileSearchVectorStore> get copyWith => throw _privateConstructorUsedError; @@ -36695,6 +38709,8 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36718,6 +38734,8 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -36761,6 +38779,8 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchVectorStoreImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36843,7 +38863,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -36851,7 +38871,9 @@ class _$ToolResourcesFileSearchVectorStoreImpl chunkingStrategy, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchVectorStoreImplCopyWith< @@ -36882,24 +38904,26 @@ abstract class _ToolResourcesFileSearchVectorStore Map json) = _$ToolResourcesFileSearchVectorStoreImpl.fromJson; - @override - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + @override @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; - @override /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesFileSearchVectorStoreImplCopyWith< _$ToolResourcesFileSearchVectorStoreImpl> get copyWith => throw _privateConstructorUsedError; @@ -36920,8 +38944,12 @@ mixin _$DeleteThreadResponse { /// The object type, which is always `thread.deleted`. DeleteThreadResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteThreadResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteThreadResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36946,6 +38974,8 @@ class _$DeleteThreadResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36989,6 +39019,8 @@ class __$$DeleteThreadResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteThreadResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37050,11 +39082,13 @@ class _$DeleteThreadResponseImpl extends _DeleteThreadResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> @@ -37081,20 +39115,22 @@ abstract class _DeleteThreadResponse extends DeleteThreadResponse { factory _DeleteThreadResponse.fromJson(Map json) = _$DeleteThreadResponseImpl.fromJson; - @override - /// The thread identifier. - String get id; @override + String get id; /// Whether the thread was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `thread.deleted`. + @override DeleteThreadResponseObject get object; + + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37123,8 +39159,12 @@ mixin _$ListThreadsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListThreadsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListThreadsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37153,6 +39193,8 @@ class _$ListThreadsResponseCopyWithImpl<$Res, $Val extends ListThreadsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37211,6 +39253,8 @@ class __$$ListThreadsResponseImplCopyWithImpl<$Res> $Res Function(_$ListThreadsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37307,12 +39351,14 @@ class _$ListThreadsResponseImpl extends _ListThreadsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => @@ -37340,31 +39386,33 @@ abstract class _ListThreadsResponse extends ListThreadsResponse { factory _ListThreadsResponse.fromJson(Map json) = _$ListThreadsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of threads. - List get data; @override + List get data; /// The ID of the first thread in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last thread in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more threads to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37424,11 +39472,17 @@ mixin _$MessageObject { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this MessageObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37470,6 +39524,8 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37548,6 +39604,8 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> ) as $Val); } + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -37600,6 +39658,8 @@ class __$$MessageObjectImplCopyWithImpl<$Res> _$MessageObjectImpl _value, $Res Function(_$MessageObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37782,10 +39842,14 @@ class _$MessageObjectImpl extends _MessageObject { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -37828,7 +39892,7 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -37847,7 +39911,9 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => @@ -37884,72 +39950,76 @@ abstract class _MessageObject extends MessageObject { factory _MessageObject.fromJson(Map json) = _$MessageObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.message`. - MessageObjectObject get object; @override + MessageObjectObject get object; /// The Unix timestamp (in seconds) for when the message was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + @override @JsonKey(unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageObjectStatus? get status; - @override /// On an incomplete message, details about why the message is incomplete. + @override @JsonKey(name: 'incomplete_details') MessageObjectIncompleteDetails? get incompleteDetails; - @override /// The Unix timestamp (in seconds) for when the message was completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override /// The Unix timestamp (in seconds) for when the message was marked as incomplete. + @override @JsonKey(name: 'incomplete_at') int? get incompleteAt; - @override /// The entity that produced the message. One of `user` or `assistant`. - MessageRole get role; @override + MessageRole get role; /// The content of the message in array of text and/or images. - List get content; @override + List get content; /// If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message. + @override @JsonKey(name: 'assistant_id') String? get assistantId; - @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + @override @JsonKey(name: 'run_id') String? get runId; - @override /// A list of files attached to the message, and the tools they were added to. - List? get attachments; @override + List? get attachments; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override Map? get metadata; + + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37965,8 +40035,12 @@ mixin _$MessageObjectIncompleteDetails { MessageObjectIncompleteDetailsReason get reason => throw _privateConstructorUsedError; + /// Serializes this MessageObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37993,6 +40067,8 @@ class _$MessageObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38029,6 +40105,8 @@ class __$$MessageObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$MessageObjectIncompleteDetailsImpl) _then) : super(_value, _then); + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38071,11 +40149,13 @@ class _$MessageObjectIncompleteDetailsImpl (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, reason); - @JsonKey(ignore: true) + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageObjectIncompleteDetailsImplCopyWith< @@ -38101,12 +40181,14 @@ abstract class _MessageObjectIncompleteDetails factory _MessageObjectIncompleteDetails.fromJson(Map json) = _$MessageObjectIncompleteDetailsImpl.fromJson; - @override - /// The reason the message is incomplete. + @override MessageObjectIncompleteDetailsReason get reason; + + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageObjectIncompleteDetailsImplCopyWith< _$MessageObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; @@ -38126,8 +40208,12 @@ mixin _$MessageAttachment { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; + /// Serializes this MessageAttachment to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageAttachmentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38153,6 +40239,8 @@ class _$MessageAttachmentCopyWithImpl<$Res, $Val extends MessageAttachment> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38193,6 +40281,8 @@ class __$$MessageAttachmentImplCopyWithImpl<$Res> $Res Function(_$MessageAttachmentImpl) _then) : super(_value, _then); + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38257,12 +40347,14 @@ class _$MessageAttachmentImpl extends _MessageAttachment { const DeepCollectionEquality().equals(other._tools, _tools)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, fileId, const DeepCollectionEquality().hash(_tools)); - @JsonKey(ignore: true) + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => @@ -38287,18 +40379,20 @@ abstract class _MessageAttachment extends MessageAttachment { factory _MessageAttachment.fromJson(Map json) = _$MessageAttachmentImpl.fromJson; - @override - /// The ID of the file to attach to the message. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - @override /// The tools to add this file to. + @override @JsonKey(includeIfNull: false) List? get tools; + + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38318,8 +40412,12 @@ mixin _$MessageDeltaObject { /// The delta containing the fields that have changed on the Message. MessageDelta get delta => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38345,6 +40443,8 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38368,6 +40468,8 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> ) as $Val); } + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaCopyWith<$Res> get delta { @@ -38399,6 +40501,8 @@ class __$$MessageDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38460,11 +40564,13 @@ class _$MessageDeltaObjectImpl extends _MessageDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => @@ -38489,20 +40595,22 @@ abstract class _MessageDeltaObject extends MessageDeltaObject { factory _MessageDeltaObject.fromJson(Map json) = _$MessageDeltaObjectImpl.fromJson; - @override - /// The identifier of the message, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.message.delta`. - MessageDeltaObjectObject get object; @override + MessageDeltaObjectObject get object; /// The delta containing the fields that have changed on the Message. + @override MessageDelta get delta; + + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38522,8 +40630,12 @@ mixin _$MessageDelta { @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; + /// Serializes this MessageDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38552,6 +40664,8 @@ class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38595,6 +40709,8 @@ class __$$MessageDeltaImplCopyWithImpl<$Res> _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) : super(_value, _then); + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38663,12 +40779,14 @@ class _$MessageDeltaImpl extends _MessageDelta { const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, role, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => @@ -38695,19 +40813,21 @@ abstract class _MessageDelta extends MessageDelta { factory _MessageDelta.fromJson(Map json) = _$MessageDeltaImpl.fromJson; - @override - /// The entity that produced the message. One of `user` or `assistant`. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageRole? get role; - @override /// The content of the message in array of text and/or images. + @override @JsonKey(includeIfNull: false) List? get content; + + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38730,12 +40850,18 @@ mixin _$CreateMessageRequest { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38767,6 +40893,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38795,6 +40923,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateMessageRequestContentCopyWith<$Res> get content { @@ -38831,6 +40961,8 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38898,10 +41030,14 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -38929,7 +41065,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -38938,7 +41074,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> @@ -38967,27 +41105,31 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { factory _CreateMessageRequest.fromJson(Map json) = _$CreateMessageRequestImpl.fromJson; - @override - /// The entity that produced the message. One of `user` or `assistant`. - MessageRole get role; @override + MessageRole get role; /// The content of the message. + @override @_CreateMessageRequestContentConverter() CreateMessageRequestContent get content; - @override /// A list of files attached to the message, and the tools they were added to. + @override @JsonKey(includeIfNull: false) List? get attachments; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39054,6 +41196,8 @@ mixin _$CreateMessageRequestContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateMessageRequestContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -39076,6 +41220,9 @@ class _$CreateMessageRequestContentCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -39101,6 +41248,8 @@ class __$$CreateMessageRequestContentListMessageContentImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentListMessageContentImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39154,12 +41303,14 @@ class _$CreateMessageRequestContentListMessageContentImpl const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentListMessageContentImplCopyWith< @@ -39256,7 +41407,10 @@ abstract class CreateMessageRequestContentListMessageContent @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestContentListMessageContentImplCopyWith< _$CreateMessageRequestContentListMessageContentImpl> get copyWith => throw _privateConstructorUsedError; @@ -39282,6 +41436,8 @@ class __$$CreateMessageRequestContentStringImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39328,11 +41484,13 @@ class _$CreateMessageRequestContentStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentStringImplCopyWith< @@ -39426,7 +41584,10 @@ abstract class CreateMessageRequestContentString @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestContentStringImplCopyWith< _$CreateMessageRequestContentStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -39438,12 +41599,18 @@ ModifyMessageRequest _$ModifyMessageRequestFromJson(Map json) { /// @nodoc mixin _$ModifyMessageRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39468,6 +41635,8 @@ class _$ModifyMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39501,6 +41670,8 @@ class __$$ModifyMessageRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyMessageRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39526,10 +41697,14 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { factory _$ModifyMessageRequestImpl.fromJson(Map json) => _$$ModifyMessageRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -39553,12 +41728,14 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> @@ -39583,13 +41760,17 @@ abstract class _ModifyMessageRequest extends ModifyMessageRequest { factory _ModifyMessageRequest.fromJson(Map json) = _$ModifyMessageRequestImpl.fromJson; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39610,8 +41791,12 @@ mixin _$DeleteMessageResponse { /// The object type, which is always `thread.message.deleted`. DeleteMessageResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteMessageResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteMessageResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39636,6 +41821,8 @@ class _$DeleteMessageResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39681,6 +41868,8 @@ class __$$DeleteMessageResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteMessageResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39742,11 +41931,13 @@ class _$DeleteMessageResponseImpl extends _DeleteMessageResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> @@ -39772,20 +41963,22 @@ abstract class _DeleteMessageResponse extends DeleteMessageResponse { factory _DeleteMessageResponse.fromJson(Map json) = _$DeleteMessageResponseImpl.fromJson; - @override - /// The message identifier. - String get id; @override + String get id; /// Whether the message was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `thread.message.deleted`. + @override DeleteMessageResponseObject get object; + + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39814,8 +42007,12 @@ mixin _$ListMessagesResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListMessagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListMessagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39845,6 +42042,8 @@ class _$ListMessagesResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39903,6 +42102,8 @@ class __$$ListMessagesResponseImplCopyWithImpl<$Res> $Res Function(_$ListMessagesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39999,12 +42200,14 @@ class _$ListMessagesResponseImpl extends _ListMessagesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> @@ -40033,31 +42236,33 @@ abstract class _ListMessagesResponse extends ListMessagesResponse { factory _ListMessagesResponse.fromJson(Map json) = _$ListMessagesResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of messages. - List get data; @override + List get data; /// The ID of the first message in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last message in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more messages to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40076,8 +42281,12 @@ mixin _$MessageContentImageFile { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this MessageContentImageFile to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentImageFileCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40104,6 +42313,8 @@ class _$MessageContentImageFileCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40147,6 +42358,8 @@ class __$$MessageContentImageFileImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageFileImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40201,11 +42414,13 @@ class _$MessageContentImageFileImpl extends _MessageContentImageFile { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, detail); - @JsonKey(ignore: true) + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> @@ -40229,17 +42444,19 @@ abstract class _MessageContentImageFile extends MessageContentImageFile { factory _MessageContentImageFile.fromJson(Map json) = _$MessageContentImageFileImpl.fromJson; - @override - /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + @override @JsonKey(name: 'file_id') String get fileId; - @override /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + @override MessageContentImageDetail get detail; + + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40257,8 +42474,12 @@ mixin _$MessageContentImageUrl { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this MessageContentImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentImageUrlCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40283,6 +42504,8 @@ class _$MessageContentImageUrlCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40324,6 +42547,8 @@ class __$$MessageContentImageUrlImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageUrlImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40376,11 +42601,13 @@ class _$MessageContentImageUrlImpl extends _MessageContentImageUrl { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, url, detail); - @JsonKey(ignore: true) + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> @@ -40404,16 +42631,18 @@ abstract class _MessageContentImageUrl extends MessageContentImageUrl { factory _MessageContentImageUrl.fromJson(Map json) = _$MessageContentImageUrlImpl.fromJson; - @override - /// The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp. - String get url; @override + String get url; /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + @override MessageContentImageDetail get detail; + + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40431,8 +42660,12 @@ mixin _$MessageRequestContentTextObject { /// Text content to be sent to the model String get text => throw _privateConstructorUsedError; + /// Serializes this MessageRequestContentTextObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageRequestContentTextObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40459,6 +42692,8 @@ class _$MessageRequestContentTextObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40500,6 +42735,8 @@ class __$$MessageRequestContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageRequestContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40553,11 +42790,13 @@ class _$MessageRequestContentTextObjectImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageRequestContentTextObjectImplCopyWith< @@ -40583,16 +42822,18 @@ abstract class _MessageRequestContentTextObject factory _MessageRequestContentTextObject.fromJson(Map json) = _$MessageRequestContentTextObjectImpl.fromJson; - @override - /// Always `text`. - String get type; @override + String get type; /// Text content to be sent to the model + @override String get text; + + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageRequestContentTextObjectImplCopyWith< _$MessageRequestContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -40612,8 +42853,12 @@ mixin _$MessageContentText { List? get annotations => throw _privateConstructorUsedError; + /// Serializes this MessageContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40640,6 +42885,8 @@ class _$MessageContentTextCopyWithImpl<$Res, $Val extends MessageContentText> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40681,6 +42928,8 @@ class __$$MessageContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40746,12 +42995,14 @@ class _$MessageContentTextImpl extends _MessageContentText { .equals(other._annotations, _annotations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - @JsonKey(ignore: true) + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => @@ -40777,17 +43028,19 @@ abstract class _MessageContentText extends MessageContentText { factory _MessageContentText.fromJson(Map json) = _$MessageContentTextImpl.fromJson; - @override - /// The data that makes up the text. - String get value; @override + String get value; /// A list of annotations that point to specific quotes from specific files. + @override @JsonKey(includeIfNull: false) List? get annotations; + + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40804,8 +43057,12 @@ mixin _$MessageContentTextAnnotationsFileCitation { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsFileCitationCopyWith< MessageContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -40834,6 +43091,8 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40870,6 +43129,8 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40914,11 +43175,13 @@ class _$MessageContentTextAnnotationsFileCitationImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFileCitationImplCopyWith< @@ -40947,13 +43210,15 @@ abstract class _MessageContentTextAnnotationsFileCitation Map json) = _$MessageContentTextAnnotationsFileCitationImpl.fromJson; - @override - /// The ID of the specific File the citation is from. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< _$MessageContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; @@ -40978,8 +43243,12 @@ mixin _$MessageDeltaContentImageUrlObject { @JsonKey(name: 'image_url', includeIfNull: false) MessageContentImageUrl? get imageUrl => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentImageUrlObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentImageUrlObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41012,6 +43281,8 @@ class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41035,6 +43306,8 @@ class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageUrlCopyWith<$Res>? get imageUrl { @@ -41077,6 +43350,8 @@ class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41146,11 +43421,13 @@ class _$MessageDeltaContentImageUrlObjectImpl other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentImageUrlObjectImplCopyWith< @@ -41180,23 +43457,25 @@ abstract class _MessageDeltaContentImageUrlObject Map json) = _$MessageDeltaContentImageUrlObjectImpl.fromJson; - @override - /// The index of the content part in the message. + @override @JsonKey(includeIfNull: false) int? get index; - @override /// Always `image_url`. + @override @JsonKey(includeIfNull: false) String? get type; - @override /// The image URL part of a message. + @override @JsonKey(name: 'image_url', includeIfNull: false) MessageContentImageUrl? get imageUrl; + + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentImageUrlObjectImplCopyWith< _$MessageDeltaContentImageUrlObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -41218,8 +43497,12 @@ mixin _$MessageDeltaContentText { List? get annotations => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41247,6 +43530,8 @@ class _$MessageDeltaContentTextCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41291,6 +43576,8 @@ class __$$MessageDeltaContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41357,12 +43644,14 @@ class _$MessageDeltaContentTextImpl extends _MessageDeltaContentText { .equals(other._annotations, _annotations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> @@ -41388,18 +43677,20 @@ abstract class _MessageDeltaContentText extends MessageDeltaContentText { factory _MessageDeltaContentText.fromJson(Map json) = _$MessageDeltaContentTextImpl.fromJson; - @override - /// The data that makes up the text. + @override @JsonKey(includeIfNull: false) String? get value; - @override /// A list of annotations that point to specific quotes from specific files. + @override @JsonKey(includeIfNull: false) List? get annotations; + + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41420,8 +43711,12 @@ mixin _$MessageDeltaContentTextAnnotationsFileCitation { @JsonKey(includeIfNull: false) String? get quote => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsFileCitationCopyWith< MessageDeltaContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -41452,6 +43747,8 @@ class _$MessageDeltaContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41498,6 +43795,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41554,11 +43853,13 @@ class _$MessageDeltaContentTextAnnotationsFileCitationImpl (identical(other.quote, quote) || other.quote == quote)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, quote); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< @@ -41588,18 +43889,20 @@ abstract class _MessageDeltaContentTextAnnotationsFileCitation Map json) = _$MessageDeltaContentTextAnnotationsFileCitationImpl.fromJson; - @override - /// The ID of the specific File the citation is from. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - @override /// The specific quote in the file. + @override @JsonKey(includeIfNull: false) String? get quote; + + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< _$MessageDeltaContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; @@ -41664,14 +43967,20 @@ mixin _$RunStepObject { @JsonKey(name: 'completed_at') int? get completedAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. RunStepCompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this RunStepObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41715,6 +44024,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41803,6 +44114,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> ) as $Val); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsCopyWith<$Res> get stepDetails { @@ -41811,6 +44124,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepLastErrorCopyWith<$Res>? get lastError { @@ -41823,6 +44138,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepCompletionUsageCopyWith<$Res>? get usage { @@ -41878,6 +44195,8 @@ class __$$RunStepObjectImplCopyWithImpl<$Res> _$RunStepObjectImpl _value, $Res Function(_$RunStepObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42060,10 +44379,14 @@ class _$RunStepObjectImpl extends _RunStepObject { @JsonKey(name: 'completed_at') final int? completedAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -42114,7 +44437,7 @@ class _$RunStepObjectImpl extends _RunStepObject { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -42135,7 +44458,9 @@ class _$RunStepObjectImpl extends _RunStepObject { const DeepCollectionEquality().hash(_metadata), usage); - @JsonKey(ignore: true) + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => @@ -42172,83 +44497,87 @@ abstract class _RunStepObject extends RunStepObject { factory _RunStepObject.fromJson(Map json) = _$RunStepObjectImpl.fromJson; - @override - /// The identifier of the run step, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run.step`. - RunStepObjectObject get object; @override + RunStepObjectObject get object; /// The Unix timestamp (in seconds) for when the run step was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of. + @override @JsonKey(name: 'run_id') String get runId; - @override /// The type of run step, which can be either `message_creation` or `tool_calls`. - RunStepType get type; @override + RunStepType get type; /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. - RunStepStatus get status; @override + RunStepStatus get status; /// The details of the run step. /// Any of: [RunStepDetailsMessageCreationObject], [RunStepDetailsToolCallsObject] + @override @JsonKey(name: 'step_details') RunStepDetails get stepDetails; - @override /// The last error associated with this run step. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') RunStepLastError? get lastError; - @override /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + @override @JsonKey(name: 'expired_at') int? get expiredAt; - @override /// The Unix timestamp (in seconds) for when the run step was cancelled. + @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; - @override /// The Unix timestamp (in seconds) for when the run step failed. + @override @JsonKey(name: 'failed_at') int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the run step completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + @override RunStepCompletionUsage? get usage; + + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42265,8 +44594,12 @@ mixin _$RunStepLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this RunStepLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42290,6 +44623,8 @@ class _$RunStepLastErrorCopyWithImpl<$Res, $Val extends RunStepLastError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42328,6 +44663,8 @@ class __$$RunStepLastErrorImplCopyWithImpl<$Res> $Res Function(_$RunStepLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42378,11 +44715,13 @@ class _$RunStepLastErrorImpl extends _RunStepLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => @@ -42406,16 +44745,18 @@ abstract class _RunStepLastError extends RunStepLastError { factory _RunStepLastError.fromJson(Map json) = _$RunStepLastErrorImpl.fromJson; - @override - /// One of `server_error` or `rate_limit_exceeded`. - RunStepLastErrorCode get code; @override + RunStepLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42435,8 +44776,12 @@ mixin _$RunStepDeltaObject { /// The delta containing the fields that have changed on the run step. RunStepDelta get delta => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42462,6 +44807,8 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42485,6 +44832,8 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> ) as $Val); } + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaCopyWith<$Res> get delta { @@ -42516,6 +44865,8 @@ class __$$RunStepDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42577,11 +44928,13 @@ class _$RunStepDeltaObjectImpl extends _RunStepDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => @@ -42606,20 +44959,22 @@ abstract class _RunStepDeltaObject extends RunStepDeltaObject { factory _RunStepDeltaObject.fromJson(Map json) = _$RunStepDeltaObjectImpl.fromJson; - @override - /// The identifier of the run step, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run.step.delta`. - RunStepDeltaObjectObject get object; @override + RunStepDeltaObjectObject get object; /// The delta containing the fields that have changed on the run step. + @override RunStepDelta get delta; + + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42635,8 +44990,12 @@ mixin _$RunStepDelta { @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails => throw _privateConstructorUsedError; + /// Serializes this RunStepDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42664,6 +45023,8 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42677,6 +45038,8 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> ) as $Val); } + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaDetailsCopyWith<$Res>? get stepDetails { @@ -42714,6 +45077,8 @@ class __$$RunStepDeltaImplCopyWithImpl<$Res> _$RunStepDeltaImpl _value, $Res Function(_$RunStepDeltaImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42758,11 +45123,13 @@ class _$RunStepDeltaImpl extends _RunStepDelta { other.stepDetails == stepDetails)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, stepDetails); - @JsonKey(ignore: true) + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => @@ -42785,14 +45152,16 @@ abstract class _RunStepDelta extends RunStepDelta { factory _RunStepDelta.fromJson(Map json) = _$RunStepDeltaImpl.fromJson; - @override - /// The details of the run step /// Any of: [RunStepDeltaStepDetailsMessageCreationObject], [RunStepDeltaStepDetailsToolCallsObject] + @override @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails; + + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42821,8 +45190,12 @@ mixin _$ListRunStepsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListRunStepsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListRunStepsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42852,6 +45225,8 @@ class _$ListRunStepsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42910,6 +45285,8 @@ class __$$ListRunStepsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunStepsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43006,12 +45383,14 @@ class _$ListRunStepsResponseImpl extends _ListRunStepsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> @@ -43040,31 +45419,33 @@ abstract class _ListRunStepsResponse extends ListRunStepsResponse { factory _ListRunStepsResponse.fromJson(Map json) = _$ListRunStepsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of run steps. - List get data; @override + List get data; /// The ID of the first run step in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last run step in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more run steps to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43080,8 +45461,12 @@ mixin _$RunStepDetailsMessageCreation { @JsonKey(name: 'message_id') String get messageId => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsMessageCreationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -43108,6 +45493,8 @@ class _$RunStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43144,6 +45531,8 @@ class __$$RunStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsMessageCreationImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43189,11 +45578,13 @@ class _$RunStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, messageId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsMessageCreationImplCopyWith< @@ -43219,13 +45610,15 @@ abstract class _RunStepDetailsMessageCreation factory _RunStepDetailsMessageCreation.fromJson(Map json) = _$RunStepDetailsMessageCreationImpl.fromJson; - @override - /// The ID of the message that was created by this run step. + @override @JsonKey(name: 'message_id') String get messageId; + + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsMessageCreationImplCopyWith< _$RunStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -43243,8 +45636,12 @@ mixin _$RunStepDeltaStepDetailsMessageCreation { @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsMessageCreationCopyWith< RunStepDeltaStepDetailsMessageCreation> get copyWith => throw _privateConstructorUsedError; @@ -43273,6 +45670,8 @@ class _$RunStepDeltaStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43310,6 +45709,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsMessageCreationImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43355,11 +45756,13 @@ class _$RunStepDeltaStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, messageId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< @@ -43387,13 +45790,15 @@ abstract class _RunStepDeltaStepDetailsMessageCreation Map json) = _$RunStepDeltaStepDetailsMessageCreationImpl.fromJson; - @override - /// The ID of the message that was created by this run step. + @override @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId; + + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< _$RunStepDeltaStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -43414,8 +45819,12 @@ mixin _$RunStepDetailsToolCallsCodeObjectCodeInterpreter { List get outputs => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -43445,6 +45854,8 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43491,6 +45902,8 @@ class __$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43553,12 +45966,14 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -43588,16 +46003,18 @@ abstract class _RunStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - @override - /// The input to the Code Interpreter tool call. - String get input; @override + String get input; /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + @override List get outputs; + + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -43621,8 +46038,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter { List? get outputs => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -43660,6 +46081,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl< // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43715,6 +46138,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithI _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43785,12 +46210,14 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -43822,18 +46249,20 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - @override - /// The input to the Code Interpreter tool call. + @override @JsonKey(includeIfNull: false) String? get input; - @override /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + @override @JsonKey(includeIfNull: false) List? get outputs; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -43851,8 +46280,12 @@ mixin _$RunStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -43880,6 +46313,8 @@ class _$RunStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43916,6 +46351,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputImageImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43960,11 +46397,13 @@ class _$RunStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -43992,13 +46431,15 @@ abstract class _RunStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDetailsToolCallsCodeOutputImageImpl.fromJson; - @override - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -44016,8 +46457,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDeltaStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -44046,6 +46491,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44086,6 +46533,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44130,11 +46579,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -44164,13 +46615,15 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl.fromJson; - @override - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -44195,8 +46648,12 @@ mixin _$RunStepCompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this RunStepCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44224,6 +46681,8 @@ class _$RunStepCompletionUsageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44273,6 +46732,8 @@ class __$$RunStepCompletionUsageImplCopyWithImpl<$Res> $Res Function(_$RunStepCompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44342,12 +46803,14 @@ class _$RunStepCompletionUsageImpl extends _RunStepCompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> @@ -44373,23 +46836,25 @@ abstract class _RunStepCompletionUsage extends RunStepCompletionUsage { factory _RunStepCompletionUsage.fromJson(Map json) = _$RunStepCompletionUsageImpl.fromJson; - @override - /// Number of completion tokens used over the course of the run step. + @override @JsonKey(name: 'completion_tokens') int get completionTokens; - @override /// Number of prompt tokens used over the course of the run step. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -44408,8 +46873,12 @@ mixin _$VectorStoreExpirationAfter { /// The number of days after the anchor time that the vector store will expire. int get days => throw _privateConstructorUsedError; + /// Serializes this VectorStoreExpirationAfter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreExpirationAfterCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44435,6 +46904,8 @@ class _$VectorStoreExpirationAfterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44476,6 +46947,8 @@ class __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res> $Res Function(_$VectorStoreExpirationAfterImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44528,11 +47001,13 @@ class _$VectorStoreExpirationAfterImpl extends _VectorStoreExpirationAfter { (identical(other.days, days) || other.days == days)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, anchor, days); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> @@ -44556,16 +47031,18 @@ abstract class _VectorStoreExpirationAfter extends VectorStoreExpirationAfter { factory _VectorStoreExpirationAfter.fromJson(Map json) = _$VectorStoreExpirationAfterImpl.fromJson; - @override - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. - VectorStoreExpirationAfterAnchor get anchor; @override + VectorStoreExpirationAfterAnchor get anchor; /// The number of days after the anchor time that the vector store will expire. + @override int get days; + + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> get copyWith => throw _privateConstructorUsedError; } @@ -44614,11 +47091,17 @@ mixin _$VectorStoreObject { @JsonKey(name: 'last_active_at') int? get lastActiveAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this VectorStoreObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44657,6 +47140,8 @@ class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44720,6 +47205,8 @@ class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> ) as $Val); } + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts { @@ -44729,6 +47216,8 @@ class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> }); } + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { @@ -44779,6 +47268,8 @@ class __$$VectorStoreObjectImplCopyWithImpl<$Res> $Res Function(_$VectorStoreObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44909,7 +47400,9 @@ class _$VectorStoreObjectImpl extends _VectorStoreObject { @JsonKey(name: 'last_active_at') final int? lastActiveAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override final dynamic metadata; @@ -44942,7 +47435,7 @@ class _$VectorStoreObjectImpl extends _VectorStoreObject { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -44958,7 +47451,9 @@ class _$VectorStoreObjectImpl extends _VectorStoreObject { lastActiveAt, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => @@ -44993,58 +47488,62 @@ abstract class _VectorStoreObject extends VectorStoreObject { factory _VectorStoreObject.fromJson(Map json) = _$VectorStoreObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `vector_store`. - String get object; @override + String get object; /// The Unix timestamp (in seconds) for when the vector store was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the vector store. - String? get name; @override + String? get name; /// The total number of bytes used by the files in the vector store. + @override @JsonKey(name: 'usage_bytes') int get usageBytes; - @override /// The number of files in the vector store. + @override @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts get fileCounts; - @override /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - VectorStoreObjectStatus get status; @override + VectorStoreObjectStatus get status; /// The expiration policy for a vector store. + @override @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; - @override /// The Unix timestamp (in seconds) for when the vector store will expire. + @override @JsonKey(name: 'expires_at', includeIfNull: false) int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the vector store was last active. + @override @JsonKey(name: 'last_active_at') int? get lastActiveAt; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override dynamic get metadata; + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45072,8 +47571,12 @@ mixin _$VectorStoreObjectFileCounts { /// The total number of files. int get total => throw _privateConstructorUsedError; + /// Serializes this VectorStoreObjectFileCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreObjectFileCountsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45105,6 +47608,8 @@ class _$VectorStoreObjectFileCountsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45166,6 +47671,8 @@ class __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res> $Res Function(_$VectorStoreObjectFileCountsImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45256,12 +47763,14 @@ class _$VectorStoreObjectFileCountsImpl extends _VectorStoreObjectFileCounts { (identical(other.total, total) || other.total == total)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> @@ -45289,29 +47798,31 @@ abstract class _VectorStoreObjectFileCounts factory _VectorStoreObjectFileCounts.fromJson(Map json) = _$VectorStoreObjectFileCountsImpl.fromJson; - @override - /// The number of files that are currently being processed. + @override @JsonKey(name: 'in_progress') int get inProgress; - @override /// The number of files that have been successfully processed. - int get completed; @override + int get completed; /// The number of files that have failed to process. - int get failed; @override + int get failed; /// The number of files that were cancelled. - int get cancelled; @override + int get cancelled; /// The total number of files. + @override int get total; + + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45342,12 +47853,18 @@ mixin _$CreateVectorStoreRequest { ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateVectorStoreRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45382,6 +47899,8 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45415,6 +47934,8 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { @@ -45428,6 +47949,8 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -45476,6 +47999,8 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> $Res Function(_$CreateVectorStoreRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45557,7 +48082,9 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @JsonKey(name: 'chunking_strategy', includeIfNull: false) final ChunkingStrategyRequestParam? chunkingStrategy; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) final dynamic metadata; @@ -45581,7 +48108,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -45591,7 +48118,9 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { chunkingStrategy, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> @@ -45622,34 +48151,38 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { factory _CreateVectorStoreRequest.fromJson(Map json) = _$CreateVectorStoreRequestImpl.fromJson; - @override - /// The name of the vector store. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @override @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; - @override /// The expiration policy for a vector store. + @override @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45670,12 +48203,18 @@ mixin _$UpdateVectorStoreRequest { VectorStoreExpirationAfter? get expiresAfter => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this UpdateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $UpdateVectorStoreRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45706,6 +48245,8 @@ class _$UpdateVectorStoreRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45729,6 +48270,8 @@ class _$UpdateVectorStoreRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { @@ -45772,6 +48315,8 @@ class __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res> $Res Function(_$UpdateVectorStoreRequestImpl) _then) : super(_value, _then); + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45818,7 +48363,9 @@ class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) final dynamic metadata; @@ -45839,12 +48386,14 @@ class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, expiresAfter, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> @@ -45871,23 +48420,27 @@ abstract class _UpdateVectorStoreRequest extends UpdateVectorStoreRequest { factory _UpdateVectorStoreRequest.fromJson(Map json) = _$UpdateVectorStoreRequestImpl.fromJson; - @override - /// The name of the vector store. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The expiration policy for a vector store. + @override @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45917,8 +48470,12 @@ mixin _$ListVectorStoresResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListVectorStoresResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListVectorStoresResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45948,6 +48505,8 @@ class _$ListVectorStoresResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46009,6 +48568,8 @@ class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> $Res Function(_$ListVectorStoresResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46105,12 +48666,14 @@ class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> @@ -46138,31 +48701,33 @@ abstract class _ListVectorStoresResponse extends ListVectorStoresResponse { factory _ListVectorStoresResponse.fromJson(Map json) = _$ListVectorStoresResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// A list of assistant files. - List get data; @override + List get data; /// The ID of the first assistant file in the list. + @override @JsonKey(name: 'first_id') String? get firstId; - @override /// The ID of the last assistant file in the list. + @override @JsonKey(name: 'last_id') String? get lastId; - @override /// Whether there are more assistant files available. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -46183,8 +48748,12 @@ mixin _$DeleteVectorStoreResponse { /// The object type, which is always `vector_store.deleted`. String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteVectorStoreResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteVectorStoreResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46209,6 +48778,8 @@ class _$DeleteVectorStoreResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46255,6 +48826,8 @@ class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteVectorStoreResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46316,11 +48889,13 @@ class _$DeleteVectorStoreResponseImpl extends _DeleteVectorStoreResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> @@ -46345,20 +48920,22 @@ abstract class _DeleteVectorStoreResponse extends DeleteVectorStoreResponse { factory _DeleteVectorStoreResponse.fromJson(Map json) = _$DeleteVectorStoreResponseImpl.fromJson; - @override - /// The ID of the deleted vector store. - String get id; @override + String get id; /// Whether the vector store was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `vector_store.deleted`. + @override String get object; + + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -46402,8 +48979,12 @@ mixin _$VectorStoreFileObject { ChunkingStrategyResponseParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46440,6 +49021,8 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46488,6 +49071,8 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError { @@ -46501,6 +49086,8 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, }); } + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy { @@ -46550,6 +49137,8 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46680,12 +49269,14 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, usageBytes, createdAt, vectorStoreId, status, lastError, chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> @@ -46718,46 +49309,48 @@ abstract class _VectorStoreFileObject extends VectorStoreFileObject { factory _VectorStoreFileObject.fromJson(Map json) = _$VectorStoreFileObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `vector_store.file`. - String get object; @override + String get object; /// The total vector store usage in bytes. Note that this may be different from the original file size. + @override @JsonKey(name: 'usage_bytes') int get usageBytes; - @override /// The Unix timestamp (in seconds) for when the vector store file was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override @JsonKey(name: 'vector_store_id') String get vectorStoreId; - @override /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - VectorStoreFileStatus get status; @override + VectorStoreFileStatus get status; /// The last error associated with this vector store file. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? get lastError; - @override /// The chunking strategy used to chunk the file(s). /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyResponseParam? get chunkingStrategy; + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -46776,8 +49369,12 @@ mixin _$VectorStoreFileObjectLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileObjectLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileObjectLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46804,6 +49401,8 @@ class _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46845,6 +49444,8 @@ class __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileObjectLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46898,11 +49499,13 @@ class _$VectorStoreFileObjectLastErrorImpl (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileObjectLastErrorImplCopyWith< @@ -46928,16 +49531,18 @@ abstract class _VectorStoreFileObjectLastError factory _VectorStoreFileObjectLastError.fromJson(Map json) = _$VectorStoreFileObjectLastErrorImpl.fromJson; - @override - /// One of `server_error` or `rate_limit_exceeded`. - VectorStoreFileObjectLastErrorCode get code; @override + VectorStoreFileObjectLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileObjectLastErrorImplCopyWith< _$VectorStoreFileObjectLastErrorImpl> get copyWith => throw _privateConstructorUsedError; @@ -46961,8 +49566,12 @@ mixin _$StaticChunkingStrategy { @JsonKey(name: 'chunk_overlap_tokens') int get chunkOverlapTokens => throw _privateConstructorUsedError; + /// Serializes this StaticChunkingStrategy to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $StaticChunkingStrategyCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46989,6 +49598,8 @@ class _$StaticChunkingStrategyCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47032,6 +49643,8 @@ class __$$StaticChunkingStrategyImplCopyWithImpl<$Res> $Res Function(_$StaticChunkingStrategyImpl) _then) : super(_value, _then); + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47091,12 +49704,14 @@ class _$StaticChunkingStrategyImpl extends _StaticChunkingStrategy { other.chunkOverlapTokens == chunkOverlapTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, maxChunkSizeTokens, chunkOverlapTokens); - @JsonKey(ignore: true) + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> @@ -47122,21 +49737,23 @@ abstract class _StaticChunkingStrategy extends StaticChunkingStrategy { factory _StaticChunkingStrategy.fromJson(Map json) = _$StaticChunkingStrategyImpl.fromJson; - @override - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the /// maximum value is `4096`. + @override @JsonKey(name: 'max_chunk_size_tokens') int get maxChunkSizeTokens; - @override /// The number of tokens that overlap between chunks. The default value is `400`. /// /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @override @JsonKey(name: 'chunk_overlap_tokens') int get chunkOverlapTokens; + + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> get copyWith => throw _privateConstructorUsedError; } @@ -47158,8 +49775,12 @@ mixin _$CreateVectorStoreFileRequest { ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreFileRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateVectorStoreFileRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47191,6 +49812,8 @@ class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47209,6 +49832,8 @@ class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -47251,6 +49876,8 @@ class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> $Res Function(_$CreateVectorStoreFileRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47309,11 +49936,13 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateVectorStoreFileRequestImplCopyWith< @@ -47341,19 +49970,21 @@ abstract class _CreateVectorStoreFileRequest factory _CreateVectorStoreFileRequest.fromJson(Map json) = _$CreateVectorStoreFileRequestImpl.fromJson; - @override - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + @override @JsonKey(name: 'file_id') String get fileId; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; + + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateVectorStoreFileRequestImplCopyWith< _$CreateVectorStoreFileRequestImpl> get copyWith => throw _privateConstructorUsedError; @@ -47384,8 +50015,12 @@ mixin _$ListVectorStoreFilesResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListVectorStoreFilesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListVectorStoreFilesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47417,6 +50052,8 @@ class _$ListVectorStoreFilesResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47478,6 +50115,8 @@ class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> $Res Function(_$ListVectorStoreFilesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47575,12 +50214,14 @@ class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListVectorStoreFilesResponseImplCopyWith< @@ -47610,31 +50251,33 @@ abstract class _ListVectorStoreFilesResponse factory _ListVectorStoreFilesResponse.fromJson(Map json) = _$ListVectorStoreFilesResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// A list of message files. - List get data; @override + List get data; /// The ID of the first message file in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last message file in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more message files available. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListVectorStoreFilesResponseImplCopyWith< _$ListVectorStoreFilesResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -47656,8 +50299,12 @@ mixin _$DeleteVectorStoreFileResponse { /// The object type, which is always `vector_store.file.deleted`. String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteVectorStoreFileResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteVectorStoreFileResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47684,6 +50331,8 @@ class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47730,6 +50379,8 @@ class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteVectorStoreFileResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47793,11 +50444,13 @@ class _$DeleteVectorStoreFileResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteVectorStoreFileResponseImplCopyWith< @@ -47824,20 +50477,22 @@ abstract class _DeleteVectorStoreFileResponse factory _DeleteVectorStoreFileResponse.fromJson(Map json) = _$DeleteVectorStoreFileResponseImpl.fromJson; - @override - /// The ID of the deleted vector store file. - String get id; @override + String get id; /// Whether the vector store file was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `vector_store.file.deleted`. + @override String get object; + + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteVectorStoreFileResponseImplCopyWith< _$DeleteVectorStoreFileResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -47873,8 +50528,12 @@ mixin _$VectorStoreFileBatchObject { VectorStoreFileBatchObjectFileCounts get fileCounts => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileBatchObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileBatchObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47909,6 +50568,8 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47947,6 +50608,8 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts { @@ -47989,6 +50652,8 @@ class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileBatchObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48092,12 +50757,14 @@ class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { other.fileCounts == fileCounts)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, object, createdAt, vectorStoreId, status, fileCounts); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> @@ -48127,35 +50794,37 @@ abstract class _VectorStoreFileBatchObject extends VectorStoreFileBatchObject { factory _VectorStoreFileBatchObject.fromJson(Map json) = _$VectorStoreFileBatchObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `vector_store.file_batch`. - String get object; @override + String get object; /// The Unix timestamp (in seconds) for when the vector store files batch was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override @JsonKey(name: 'vector_store_id') String get vectorStoreId; - @override /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - VectorStoreFileBatchObjectStatus get status; @override + VectorStoreFileBatchObjectStatus get status; /// The number of files per status. + @override @JsonKey(name: 'file_counts') VectorStoreFileBatchObjectFileCounts get fileCounts; + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -48183,8 +50852,12 @@ mixin _$VectorStoreFileBatchObjectFileCounts { /// The total number of files. int get total => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileBatchObjectFileCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileBatchObjectFileCountsCopyWith< VectorStoreFileBatchObjectFileCounts> get copyWith => throw _privateConstructorUsedError; @@ -48217,6 +50890,8 @@ class _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48278,6 +50953,8 @@ class __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48369,12 +51046,14 @@ class _$VectorStoreFileBatchObjectFileCountsImpl (identical(other.total, total) || other.total == total)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< @@ -48404,29 +51083,31 @@ abstract class _VectorStoreFileBatchObjectFileCounts Map json) = _$VectorStoreFileBatchObjectFileCountsImpl.fromJson; - @override - /// The number of files that are currently being processed. + @override @JsonKey(name: 'in_progress') int get inProgress; - @override /// The number of files that have been processed. - int get completed; @override + int get completed; /// The number of files that have failed to process. - int get failed; @override + int get failed; /// The number of files that where cancelled. - int get cancelled; @override + int get cancelled; /// The total number of files. + @override int get total; + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< _$VectorStoreFileBatchObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; @@ -48449,8 +51130,12 @@ mixin _$CreateVectorStoreFileBatchRequest { ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreFileBatchRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateVectorStoreFileBatchRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -48482,6 +51167,8 @@ class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48500,6 +51187,8 @@ class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -48542,6 +51231,8 @@ class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> $Res Function(_$CreateVectorStoreFileBatchRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48609,12 +51300,14 @@ class _$CreateVectorStoreFileBatchRequestImpl other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds), chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateVectorStoreFileBatchRequestImplCopyWith< @@ -48643,19 +51336,21 @@ abstract class _CreateVectorStoreFileBatchRequest Map json) = _$CreateVectorStoreFileBatchRequestImpl.fromJson; - @override - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @override @JsonKey(name: 'file_ids') List get fileIds; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateVectorStoreFileBatchRequestImplCopyWith< _$CreateVectorStoreFileBatchRequestImpl> get copyWith => throw _privateConstructorUsedError; @@ -48679,8 +51374,12 @@ mixin _$Error { /// The type of error. String get type => throw _privateConstructorUsedError; + /// Serializes this Error to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -48702,6 +51401,8 @@ class _$ErrorCopyWithImpl<$Res, $Val extends Error> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48749,6 +51450,8 @@ class __$$ErrorImplCopyWithImpl<$Res> _$ErrorImpl _value, $Res Function(_$ErrorImpl) _then) : super(_value, _then); + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48823,11 +51526,13 @@ class _$ErrorImpl extends _Error { (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param, type); - @JsonKey(ignore: true) + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => @@ -48851,24 +51556,26 @@ abstract class _Error extends Error { factory _Error.fromJson(Map json) = _$ErrorImpl.fromJson; - @override - /// The error code. - String? get code; @override + String? get code; /// A human-readable description of the error. - String get message; @override + String get message; /// The parameter in the request that caused the error. - String? get param; @override + String? get param; /// The type of error. + @override String get type; + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -48899,8 +51606,12 @@ mixin _$CreateBatchRequest { @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateBatchRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateBatchRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -48929,6 +51640,8 @@ class _$CreateBatchRequestCopyWithImpl<$Res, $Val extends CreateBatchRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48982,6 +51695,8 @@ class __$$CreateBatchRequestImplCopyWithImpl<$Res> $Res Function(_$CreateBatchRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49076,12 +51791,14 @@ class _$CreateBatchRequestImpl extends _CreateBatchRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, inputFileId, endpoint, completionWindow, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => @@ -49109,31 +51826,33 @@ abstract class _CreateBatchRequest extends CreateBatchRequest { factory _CreateBatchRequest.fromJson(Map json) = _$CreateBatchRequestImpl.fromJson; - @override - /// The ID of an uploaded file that contains requests for the new batch. /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + @override @JsonKey(name: 'input_file_id') String get inputFileId; - @override /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint; @override + BatchEndpoint get endpoint; /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override @JsonKey(name: 'completion_window') BatchCompletionWindow get completionWindow; - @override /// Optional custom metadata for the batch. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -49217,12 +51936,18 @@ mixin _$Batch { @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? get requestCounts => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this Batch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -49270,6 +51995,8 @@ class _$BatchCopyWithImpl<$Res, $Val extends Batch> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49378,6 +52105,8 @@ class _$BatchCopyWithImpl<$Res, $Val extends Batch> ) as $Val); } + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $BatchErrorsCopyWith<$Res>? get errors { @@ -49390,6 +52119,8 @@ class _$BatchCopyWithImpl<$Res, $Val extends Batch> }); } + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $BatchRequestCountsCopyWith<$Res>? get requestCounts { @@ -49449,6 +52180,8 @@ class __$$BatchImplCopyWithImpl<$Res> _$BatchImpl _value, $Res Function(_$BatchImpl) _then) : super(_value, _then); + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49678,7 +52411,9 @@ class _$BatchImpl extends _Batch { @JsonKey(name: 'request_counts', includeIfNull: false) final BatchRequestCounts? requestCounts; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) final dynamic metadata; @@ -49730,7 +52465,7 @@ class _$BatchImpl extends _Batch { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -49756,7 +52491,9 @@ class _$BatchImpl extends _Batch { const DeepCollectionEquality().hash(metadata) ]); - @JsonKey(ignore: true) + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchImplCopyWith<_$BatchImpl> get copyWith => @@ -49805,104 +52542,108 @@ abstract class _Batch extends Batch { factory _Batch.fromJson(Map json) = _$BatchImpl.fromJson; - @override - /// No Description - String get id; @override + String get id; /// The object type, which is always `batch`. - BatchObject get object; @override + BatchObject get object; /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint; @override + BatchEndpoint get endpoint; /// No Description + @override @JsonKey(includeIfNull: false) BatchErrors? get errors; - @override /// The ID of the input file for the batch. + @override @JsonKey(name: 'input_file_id') String get inputFileId; - @override /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override @JsonKey(name: 'completion_window') BatchCompletionWindow get completionWindow; - @override /// The current status of the batch. - BatchStatus get status; @override + BatchStatus get status; /// The ID of the file containing the outputs of successfully executed requests. + @override @JsonKey(name: 'output_file_id', includeIfNull: false) String? get outputFileId; - @override /// The ID of the file containing the outputs of requests with errors. + @override @JsonKey(name: 'error_file_id', includeIfNull: false) String? get errorFileId; - @override /// The Unix timestamp (in seconds) for when the batch was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The Unix timestamp (in seconds) for when the batch started processing. + @override @JsonKey(name: 'in_progress_at', includeIfNull: false) int? get inProgressAt; - @override /// The Unix timestamp (in seconds) for when the batch will expire. + @override @JsonKey(name: 'expires_at', includeIfNull: false) int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the batch started finalizing. + @override @JsonKey(name: 'finalizing_at', includeIfNull: false) int? get finalizingAt; - @override /// The Unix timestamp (in seconds) for when the batch was completed. + @override @JsonKey(name: 'completed_at', includeIfNull: false) int? get completedAt; - @override /// The Unix timestamp (in seconds) for when the batch failed. + @override @JsonKey(name: 'failed_at', includeIfNull: false) int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the batch expired. + @override @JsonKey(name: 'expired_at', includeIfNull: false) int? get expiredAt; - @override /// The Unix timestamp (in seconds) for when the batch started cancelling. + @override @JsonKey(name: 'cancelling_at', includeIfNull: false) int? get cancellingAt; - @override /// The Unix timestamp (in seconds) for when the batch was cancelled. + @override @JsonKey(name: 'cancelled_at', includeIfNull: false) int? get cancelledAt; - @override /// The request counts for different statuses within the batch. + @override @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? get requestCounts; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchImplCopyWith<_$BatchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -49921,8 +52662,12 @@ mixin _$BatchErrors { @JsonKey(includeIfNull: false) List? get data => throw _privateConstructorUsedError; + /// Serializes this BatchErrors to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchErrorsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -49948,6 +52693,8 @@ class _$BatchErrorsCopyWithImpl<$Res, $Val extends BatchErrors> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49988,6 +52735,8 @@ class __$$BatchErrorsImplCopyWithImpl<$Res> _$BatchErrorsImpl _value, $Res Function(_$BatchErrorsImpl) _then) : super(_value, _then); + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50052,12 +52801,14 @@ class _$BatchErrorsImpl extends _BatchErrors { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, object, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => @@ -50081,18 +52832,20 @@ abstract class _BatchErrors extends BatchErrors { factory _BatchErrors.fromJson(Map json) = _$BatchErrorsImpl.fromJson; - @override - /// The object type, which is always `list`. + @override @JsonKey(includeIfNull: false) String? get object; - @override /// No Description + @override @JsonKey(includeIfNull: false) List? get data; + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50112,8 +52865,12 @@ mixin _$BatchRequestCounts { /// Number of requests that have failed. int get failed => throw _privateConstructorUsedError; + /// Serializes this BatchRequestCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchRequestCountsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50137,6 +52894,8 @@ class _$BatchRequestCountsCopyWithImpl<$Res, $Val extends BatchRequestCounts> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50180,6 +52939,8 @@ class __$$BatchRequestCountsImplCopyWithImpl<$Res> $Res Function(_$BatchRequestCountsImpl) _then) : super(_value, _then); + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50242,11 +53003,13 @@ class _$BatchRequestCountsImpl extends _BatchRequestCounts { (identical(other.failed, failed) || other.failed == failed)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, total, completed, failed); - @JsonKey(ignore: true) + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => @@ -50271,20 +53034,22 @@ abstract class _BatchRequestCounts extends BatchRequestCounts { factory _BatchRequestCounts.fromJson(Map json) = _$BatchRequestCountsImpl.fromJson; - @override - /// Total number of requests in the batch. - int get total; @override + int get total; /// Number of requests that have been completed successfully. - int get completed; @override + int get completed; /// Number of requests that have failed. + @override int get failed; + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50311,8 +53076,12 @@ mixin _$BatchErrorsDataInner { @JsonKey(includeIfNull: false) int? get line => throw _privateConstructorUsedError; + /// Serializes this BatchErrorsDataInner to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchErrorsDataInnerCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50341,6 +53110,8 @@ class _$BatchErrorsDataInnerCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50393,6 +53164,8 @@ class __$$BatchErrorsDataInnerImplCopyWithImpl<$Res> $Res Function(_$BatchErrorsDataInnerImpl) _then) : super(_value, _then); + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50471,11 +53244,13 @@ class _$BatchErrorsDataInnerImpl extends _BatchErrorsDataInner { (identical(other.line, line) || other.line == line)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param, line); - @JsonKey(ignore: true) + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> @@ -50503,28 +53278,30 @@ abstract class _BatchErrorsDataInner extends BatchErrorsDataInner { factory _BatchErrorsDataInner.fromJson(Map json) = _$BatchErrorsDataInnerImpl.fromJson; - @override - /// An error code identifying the error type. + @override @JsonKey(includeIfNull: false) String? get code; - @override /// A human-readable message providing more details about the error. + @override @JsonKey(includeIfNull: false) String? get message; - @override /// The name of the parameter that caused the error, if applicable. + @override @JsonKey(includeIfNull: false) String? get param; - @override /// The line number of the input file where the error occurred, if applicable. + @override @JsonKey(includeIfNull: false) int? get line; + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50553,8 +53330,12 @@ mixin _$ListBatchesResponse { /// The object type, which is always `list`. ListBatchesResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListBatchesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListBatchesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50583,6 +53364,8 @@ class _$ListBatchesResponseCopyWithImpl<$Res, $Val extends ListBatchesResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50641,6 +53424,8 @@ class __$$ListBatchesResponseImplCopyWithImpl<$Res> $Res Function(_$ListBatchesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50737,7 +53522,7 @@ class _$ListBatchesResponseImpl extends _ListBatchesResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -50747,7 +53532,9 @@ class _$ListBatchesResponseImpl extends _ListBatchesResponse { hasMore, object); - @JsonKey(ignore: true) + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => @@ -50775,31 +53562,33 @@ abstract class _ListBatchesResponse extends ListBatchesResponse { factory _ListBatchesResponse.fromJson(Map json) = _$ListBatchesResponseImpl.fromJson; - @override - /// No Description - List get data; @override + List get data; /// The ID of the first batch in the list. + @override @JsonKey(name: 'first_id', includeIfNull: false) String? get firstId; - @override /// The ID of the last batch in the list. + @override @JsonKey(name: 'last_id', includeIfNull: false) String? get lastId; - @override /// Whether there are more batches available. + @override @JsonKey(name: 'has_more') bool get hasMore; - @override /// The object type, which is always `list`. + @override ListBatchesResponseObject get object; + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50944,8 +53733,13 @@ mixin _$ChatCompletionMessage { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionMessage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50970,6 +53764,8 @@ class _$ChatCompletionMessageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51009,6 +53805,8 @@ class __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionSystemMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51075,11 +53873,13 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> @@ -51242,20 +54042,22 @@ abstract class ChatCompletionSystemMessage extends ChatCompletionMessage { factory ChatCompletionSystemMessage.fromJson(Map json) = _$ChatCompletionSystemMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `system`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the system message. + @override String get content; /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -51288,6 +54090,8 @@ class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionUserMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51311,6 +54115,8 @@ class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> )); } + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionUserMessageContentCopyWith<$Res> get content { @@ -51363,11 +54169,13 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> @@ -51531,21 +54339,23 @@ abstract class ChatCompletionUserMessage extends ChatCompletionMessage { factory ChatCompletionUserMessage.fromJson(Map json) = _$ChatCompletionUserMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `user`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the user message. + @override @_ChatCompletionUserMessageContentConverter() ChatCompletionUserMessageContent get content; /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -51581,6 +54391,8 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionAssistantMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51614,6 +54426,8 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> )); } + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { @@ -51699,12 +54513,14 @@ class _$ChatCompletionAssistantMessageImpl other.functionCall == functionCall)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name, const DeepCollectionEquality().hash(_toolCalls), functionCall); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionAssistantMessageImplCopyWith< @@ -51872,13 +54688,12 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { factory ChatCompletionAssistantMessage.fromJson(Map json) = _$ChatCompletionAssistantMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `assistant`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + @override @JsonKey(includeIfNull: false) String? get content; @@ -51893,8 +54708,11 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionMessageFunctionCall? get functionCall; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionAssistantMessageImplCopyWith< _$ChatCompletionAssistantMessageImpl> get copyWith => throw _privateConstructorUsedError; @@ -51925,6 +54743,8 @@ class __$$ChatCompletionToolMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51991,11 +54811,13 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { other.toolCallId == toolCallId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, toolCallId); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> @@ -52158,20 +54980,22 @@ abstract class ChatCompletionToolMessage extends ChatCompletionMessage { factory ChatCompletionToolMessage.fromJson(Map json) = _$ChatCompletionToolMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `tool`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the tool message. + @override String get content; /// Tool call that this message is responding to. @JsonKey(name: 'tool_call_id') String get toolCallId; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -52198,6 +55022,8 @@ class __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52264,11 +55090,13 @@ class _$ChatCompletionFunctionMessageImpl (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionMessageImplCopyWith< @@ -52431,19 +55259,21 @@ abstract class ChatCompletionFunctionMessage extends ChatCompletionMessage { factory ChatCompletionFunctionMessage.fromJson(Map json) = _$ChatCompletionFunctionMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `function`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the function message. + @override String? get content; /// The name of the function to call. String get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionMessageImplCopyWith< _$ChatCompletionFunctionMessageImpl> get copyWith => throw _privateConstructorUsedError; @@ -52509,6 +55339,8 @@ mixin _$ChatCompletionUserMessageContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionUserMessageContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -52531,6 +55363,9 @@ class _$ChatCompletionUserMessageContentCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -52553,6 +55388,8 @@ class __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageContentPartsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52606,12 +55443,14 @@ class _$ChatCompletionMessageContentPartsImpl const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageContentPartsImplCopyWith< @@ -52704,7 +55543,10 @@ abstract class ChatCompletionMessageContentParts @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageContentPartsImplCopyWith< _$ChatCompletionMessageContentPartsImpl> get copyWith => throw _privateConstructorUsedError; @@ -52730,6 +55572,8 @@ class __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionUserMessageContentStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52776,11 +55620,13 @@ class _$ChatCompletionUserMessageContentStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionUserMessageContentStringImplCopyWith< @@ -52873,7 +55719,10 @@ abstract class ChatCompletionUserMessageContentString @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionUserMessageContentStringImplCopyWith< _$ChatCompletionUserMessageContentStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -52950,8 +55799,13 @@ mixin _$ChatCompletionMessageContentPart { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionMessageContentPart to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageContentPartCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -52978,6 +55832,8 @@ class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53014,6 +55870,8 @@ class __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageContentPartTextImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53069,11 +55927,13 @@ class _$ChatCompletionMessageContentPartTextImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageContentPartTextImplCopyWith< @@ -53173,15 +56033,17 @@ abstract class ChatCompletionMessageContentPartText Map json) = _$ChatCompletionMessageContentPartTextImpl.fromJson; - @override - /// The type of the content part, in this case `text`. + @override ChatCompletionMessageContentPartType get type; /// The text content. String get text; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageContentPartTextImplCopyWith< _$ChatCompletionMessageContentPartTextImpl> get copyWith => throw _privateConstructorUsedError; @@ -53213,6 +56075,8 @@ class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageContentPartImageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53231,6 +56095,8 @@ class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> )); } + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl { @@ -53279,11 +56145,13 @@ class _$ChatCompletionMessageContentPartImageImpl other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageContentPartImageImplCopyWith< @@ -53385,16 +56253,18 @@ abstract class ChatCompletionMessageContentPartImage Map json) = _$ChatCompletionMessageContentPartImageImpl.fromJson; - @override - /// The type of the content part, in this case `image_url`. + @override ChatCompletionMessageContentPartType get type; /// The URL of the image. @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl get imageUrl; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageContentPartImageImplCopyWith< _$ChatCompletionMessageContentPartImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -53414,8 +56284,12 @@ mixin _$ChatCompletionMessageImageUrl { ChatCompletionMessageImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageImageUrlCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -53442,6 +56316,8 @@ class _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53483,6 +56359,8 @@ class __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageImageUrlImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53537,11 +56415,13 @@ class _$ChatCompletionMessageImageUrlImpl (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, url, detail); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageImageUrlImplCopyWith< @@ -53568,16 +56448,18 @@ abstract class _ChatCompletionMessageImageUrl factory _ChatCompletionMessageImageUrl.fromJson(Map json) = _$ChatCompletionMessageImageUrlImpl.fromJson; - @override - /// Either a URL of the image or the base64 encoded image data. - String get url; @override + String get url; /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + @override ChatCompletionMessageImageDetail get detail; + + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageImageUrlImplCopyWith< _$ChatCompletionMessageImageUrlImpl> get copyWith => throw _privateConstructorUsedError; @@ -53659,8 +56541,13 @@ mixin _$AssistantTools { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantTools to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantToolsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -53684,6 +56571,8 @@ class _$AssistantToolsCopyWithImpl<$Res, $Val extends AssistantTools> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53720,6 +56609,8 @@ class __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsCodeInterpreterImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53763,11 +56654,13 @@ class _$AssistantToolsCodeInterpreterImpl (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsCodeInterpreterImplCopyWith< @@ -53872,12 +56765,14 @@ abstract class AssistantToolsCodeInterpreter extends AssistantTools { factory AssistantToolsCodeInterpreter.fromJson(Map json) = _$AssistantToolsCodeInterpreterImpl.fromJson; - @override - /// The type of tool being defined: `code_interpreter` + @override String get type; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsCodeInterpreterImplCopyWith< _$AssistantToolsCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -53909,6 +56804,8 @@ class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53927,6 +56824,8 @@ class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch { @@ -53976,11 +56875,13 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> @@ -54087,16 +56988,18 @@ abstract class AssistantToolsFileSearch extends AssistantTools { factory AssistantToolsFileSearch.fromJson(Map json) = _$AssistantToolsFileSearchImpl.fromJson; - @override - /// The type of tool being defined: `file_search` + @override String get type; /// Overrides for the file search tool. @JsonKey(name: 'file_search', includeIfNull: false) AssistantToolsFileSearchFileSearch? get fileSearch; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -54124,6 +57027,8 @@ class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54142,6 +57047,8 @@ class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FunctionObjectCopyWith<$Res> get function { @@ -54185,11 +57092,13 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> @@ -54294,15 +57203,17 @@ abstract class AssistantToolsFunction extends AssistantTools { factory AssistantToolsFunction.fromJson(Map json) = _$AssistantToolsFunctionImpl.fromJson; - @override - /// The type of tool being defined: `function` + @override String get type; /// A function that the model may call. FunctionObject get function; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -54314,7 +57225,7 @@ AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson( /// @nodoc mixin _$AssistantToolsFileSearchFileSearch { - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search @@ -54322,8 +57233,12 @@ mixin _$AssistantToolsFileSearchFileSearch { @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; + /// Serializes this AssistantToolsFileSearchFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantToolsFileSearchFileSearchCopyWith< AssistantToolsFileSearchFileSearch> get copyWith => throw _privateConstructorUsedError; @@ -54353,6 +57268,8 @@ class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54391,6 +57308,8 @@ class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsFileSearchFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54418,7 +57337,7 @@ class _$AssistantToolsFileSearchFileSearchImpl Map json) => _$$AssistantToolsFileSearchFileSearchImplFromJson(json); - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search @@ -54441,11 +57360,13 @@ class _$AssistantToolsFileSearchFileSearchImpl other.maxNumResults == maxNumResults)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, maxNumResults); - @JsonKey(ignore: true) + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsFileSearchFileSearchImplCopyWith< @@ -54472,17 +57393,19 @@ abstract class _AssistantToolsFileSearchFileSearch Map json) = _$AssistantToolsFileSearchFileSearchImpl.fromJson; - @override - - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @override @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsFileSearchFileSearchImplCopyWith< _$AssistantToolsFileSearchFileSearchImpl> get copyWith => throw _privateConstructorUsedError; @@ -54563,8 +57486,13 @@ mixin _$MessageContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -54588,6 +57516,8 @@ class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54628,6 +57558,8 @@ class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54646,6 +57578,8 @@ class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageFileCopyWith<$Res> get imageFile { @@ -54693,11 +57627,13 @@ class _$MessageContentImageFileObjectImpl other.imageFile == imageFile)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageFile); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageFileObjectImplCopyWith< @@ -54804,16 +57740,18 @@ abstract class MessageContentImageFileObject extends MessageContent { factory MessageContentImageFileObject.fromJson(Map json) = _$MessageContentImageFileObjectImpl.fromJson; - @override - /// Always `image_file`. + @override String get type; /// The image file that is part of a message. @JsonKey(name: 'image_file') MessageContentImageFile get imageFile; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageFileObjectImplCopyWith< _$MessageContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -54845,6 +57783,8 @@ class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageUrlObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54863,6 +57803,8 @@ class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageUrlCopyWith<$Res> get imageUrl { @@ -54909,11 +57851,13 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageUrlObjectImplCopyWith< @@ -55020,16 +57964,18 @@ abstract class MessageContentImageUrlObject extends MessageContent { factory MessageContentImageUrlObject.fromJson(Map json) = _$MessageContentImageUrlObjectImpl.fromJson; - @override - /// The type of the content part. Always `image_url`. + @override String get type; /// The image URL part of a message. @JsonKey(name: 'image_url') MessageContentImageUrl get imageUrl; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageUrlObjectImplCopyWith< _$MessageContentImageUrlObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -55058,6 +58004,8 @@ class __$$MessageContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55076,6 +58024,8 @@ class __$$MessageContentTextObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentTextCopyWith<$Res> get text { @@ -55117,11 +58067,13 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> @@ -55225,15 +58177,17 @@ abstract class MessageContentTextObject extends MessageContent { factory MessageContentTextObject.fromJson(Map json) = _$MessageContentTextObjectImpl.fromJson; - @override - /// Always `text`. + @override String get type; /// The text content that is part of a message. MessageContentText get text; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -55318,8 +58272,13 @@ mixin _$MessageDeltaContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageDeltaContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -55343,6 +58302,8 @@ class _$MessageDeltaContentCopyWithImpl<$Res, $Val extends MessageDeltaContent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55390,6 +58351,8 @@ class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentImageFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55413,6 +58376,8 @@ class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageFileCopyWith<$Res>? get imageFile { @@ -55469,11 +58434,13 @@ class _$MessageDeltaContentImageFileObjectImpl other.imageFile == imageFile)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, imageFile); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentImageFileObjectImplCopyWith< @@ -55586,20 +58553,22 @@ abstract class MessageDeltaContentImageFileObject extends MessageDeltaContent { Map json) = _$MessageDeltaContentImageFileObjectImpl.fromJson; - @override - /// The index of the content part in the message. - int get index; @override + int get index; /// Always `image_file`. + @override String get type; /// The image file that is part of a message. @JsonKey(name: 'image_file', includeIfNull: false) MessageContentImageFile? get imageFile; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentImageFileObjectImplCopyWith< _$MessageDeltaContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -55632,6 +58601,8 @@ class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55655,6 +58626,8 @@ class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaContentTextCopyWith<$Res>? get text { @@ -55710,11 +58683,13 @@ class _$MessageDeltaContentTextObjectImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextObjectImplCopyWith< @@ -55825,20 +58800,22 @@ abstract class MessageDeltaContentTextObject extends MessageDeltaContent { factory MessageDeltaContentTextObject.fromJson(Map json) = _$MessageDeltaContentTextObjectImpl.fromJson; - @override - /// The index of the content part in the message. - int get index; @override + int get index; /// Always `text`. + @override String get type; /// The text content that is part of a message. @JsonKey(includeIfNull: false) MessageDeltaContentText? get text; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextObjectImplCopyWith< _$MessageDeltaContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -55963,8 +58940,13 @@ mixin _$MessageContentTextAnnotations { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageContentTextAnnotations to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -55995,6 +58977,8 @@ class _$MessageContentTextAnnotationsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56057,6 +59041,8 @@ class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56090,6 +59076,8 @@ class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation { @@ -56159,12 +59147,14 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text, fileCitation, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< @@ -56310,30 +59300,32 @@ abstract class MessageContentTextAnnotationsFileCitationObject Map json) = _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson; - @override - /// Always `file_citation`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override String get text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @JsonKey(name: 'file_citation') MessageContentTextAnnotationsFileCitation get fileCitation; - @override /// The start index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'start_index') int get startIndex; - @override /// The end index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'end_index') int get endIndex; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< _$MessageContentTextAnnotationsFileCitationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -56371,6 +59363,8 @@ class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56404,6 +59398,8 @@ class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath { @@ -56473,12 +59469,14 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text, filePath, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< @@ -56623,30 +59621,32 @@ abstract class MessageContentTextAnnotationsFilePathObject Map json) = _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson; - @override - /// Always `file_path`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override String get text; /// No Description @JsonKey(name: 'file_path') MessageContentTextAnnotationsFilePath get filePath; - @override /// No Description + @override @JsonKey(name: 'start_index') int get startIndex; - @override /// No Description + @override @JsonKey(name: 'end_index') int get endIndex; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< _$MessageContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -56663,8 +59663,12 @@ mixin _$MessageContentTextAnnotationsFilePath { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageContentTextAnnotationsFilePath to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsFilePathCopyWith< MessageContentTextAnnotationsFilePath> get copyWith => throw _privateConstructorUsedError; @@ -56692,6 +59696,8 @@ class _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56728,6 +59734,8 @@ class __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFilePathImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56772,11 +59780,13 @@ class _$MessageContentTextAnnotationsFilePathImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFilePathImplCopyWith< @@ -56803,13 +59813,15 @@ abstract class _MessageContentTextAnnotationsFilePath Map json) = _$MessageContentTextAnnotationsFilePathImpl.fromJson; - @override - /// The ID of the file that was generated. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFilePathImplCopyWith< _$MessageContentTextAnnotationsFilePathImpl> get copyWith => throw _privateConstructorUsedError; @@ -56948,8 +59960,13 @@ mixin _$MessageDeltaContentTextAnnotations { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageDeltaContentTextAnnotations to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsCopyWith< MessageDeltaContentTextAnnotations> get copyWith => throw _privateConstructorUsedError; @@ -56982,6 +59999,8 @@ class _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57055,6 +60074,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57093,6 +60114,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< )); } + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? @@ -57175,12 +60198,14 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, index, type, text, fileCitation, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< @@ -57339,35 +60364,37 @@ abstract class MessageDeltaContentTextAnnotationsFileCitationObject Map json) = _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson; - @override - /// The index of the annotation in the text content part. - int get index; @override + int get index; /// Always `file_citation`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override @JsonKey(includeIfNull: false) String? get text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @JsonKey(name: 'file_citation', includeIfNull: false) MessageDeltaContentTextAnnotationsFileCitation? get fileCitation; - @override /// The start index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'start_index', includeIfNull: false) int? get startIndex; - @override /// The end index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'end_index', includeIfNull: false) int? get endIndex; + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -57409,6 +60436,8 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57447,6 +60476,8 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? @@ -57528,12 +60559,14 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, index, type, text, filePath, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< @@ -57690,35 +60723,37 @@ abstract class MessageDeltaContentTextAnnotationsFilePathObject Map json) = _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson; - @override - /// The index of the annotation in the text content part. - int get index; @override + int get index; /// Always `file_path`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override @JsonKey(includeIfNull: false) String? get text; /// No Description @JsonKey(name: 'file_path', includeIfNull: false) MessageDeltaContentTextAnnotationsFilePathObjectFilePath? get filePath; - @override /// No Description + @override @JsonKey(name: 'start_index', includeIfNull: false) int? get startIndex; - @override /// No Description + @override @JsonKey(name: 'end_index', includeIfNull: false) int? get endIndex; + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -57737,8 +60772,12 @@ mixin _$MessageDeltaContentTextAnnotationsFilePathObjectFilePath { @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentTextAnnotationsFilePathObjectFilePath to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< MessageDeltaContentTextAnnotationsFilePathObjectFilePath> get copyWith => throw _privateConstructorUsedError; @@ -57773,6 +60812,8 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57820,6 +60861,8 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithIm _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57866,11 +60909,13 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< @@ -57901,13 +60946,15 @@ abstract class _MessageDeltaContentTextAnnotationsFilePathObjectFilePath Map json) = _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson; - @override - /// The ID of the file that was generated. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> get copyWith => throw _privateConstructorUsedError; @@ -57995,8 +61042,13 @@ mixin _$RunStepDetails { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -58020,6 +61072,8 @@ class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58061,6 +61115,8 @@ class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58079,6 +61135,8 @@ class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation { @@ -58126,11 +61184,13 @@ class _$RunStepDetailsMessageCreationObjectImpl other.messageCreation == messageCreation)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, messageCreation); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsMessageCreationObjectImplCopyWith< @@ -58247,16 +61307,18 @@ abstract class RunStepDetailsMessageCreationObject extends RunStepDetails { Map json) = _$RunStepDetailsMessageCreationObjectImpl.fromJson; - @override - /// Always `message_creation`. + @override String get type; /// Details of the message creation by the run step. @JsonKey(name: 'message_creation') RunStepDetailsMessageCreation get messageCreation; + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsMessageCreationObjectImplCopyWith< _$RunStepDetailsMessageCreationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -58286,6 +61348,8 @@ class __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58351,12 +61415,14 @@ class _$RunStepDetailsToolCallsObjectImpl .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsObjectImplCopyWith< @@ -58472,16 +61538,18 @@ abstract class RunStepDetailsToolCallsObject extends RunStepDetails { factory RunStepDetailsToolCallsObject.fromJson(Map json) = _$RunStepDetailsToolCallsObjectImpl.fromJson; - @override - /// Always `tool_calls`. + @override String get type; /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @JsonKey(name: 'tool_calls') List get toolCalls; + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsObjectImplCopyWith< _$RunStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -58571,8 +61639,13 @@ mixin _$RunStepDeltaDetails { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -58596,6 +61669,8 @@ class _$RunStepDeltaDetailsCopyWithImpl<$Res, $Val extends RunStepDeltaDetails> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58639,6 +61714,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58657,6 +61734,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation { @@ -58709,11 +61788,13 @@ class _$RunStepDeltaStepDetailsMessageCreationObjectImpl other.messageCreation == messageCreation)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, messageCreation); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< @@ -58835,16 +61916,18 @@ abstract class RunStepDeltaStepDetailsMessageCreationObject Map json) = _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson; - @override - /// Always `message_creation`. + @override String get type; /// Details of the message creation by the run step. @JsonKey(name: 'message_creation', includeIfNull: false) RunStepDeltaStepDetailsMessageCreation? get messageCreation; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< _$RunStepDeltaStepDetailsMessageCreationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -58875,6 +61958,8 @@ class __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58942,12 +62027,14 @@ class _$RunStepDeltaStepDetailsToolCallsObjectImpl .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< @@ -59068,16 +62155,18 @@ abstract class RunStepDeltaStepDetailsToolCallsObject Map json) = _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson; - @override - /// Always `tool_calls`. + @override String get type; /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @JsonKey(name: 'tool_calls', includeIfNull: false) List? get toolCalls; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -59181,8 +62270,13 @@ mixin _$RunStepDetailsToolCalls { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -59207,6 +62301,8 @@ class _$RunStepDetailsToolCallsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59255,6 +62351,8 @@ class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59278,6 +62376,8 @@ class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> @@ -59332,11 +62432,13 @@ class _$RunStepDetailsToolCallsCodeObjectImpl other.codeInterpreter == codeInterpreter)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, codeInterpreter); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< @@ -59465,20 +62567,22 @@ abstract class RunStepDetailsToolCallsCodeObject Map json) = _$RunStepDetailsToolCallsCodeObjectImpl.fromJson; - @override - /// The ID of the tool call. - String get id; @override + String get id; /// Always `code_interpreter`. + @override String get type; /// The Code Interpreter tool call definition. @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter get codeInterpreter; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< _$RunStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -59509,6 +62613,8 @@ class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59585,12 +62691,14 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl .equals(other._fileSearch, _fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, type, const DeepCollectionEquality().hash(_fileSearch)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< @@ -59720,20 +62828,22 @@ abstract class RunStepDetailsToolCallsFileSearchObject Map json) = _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson; - @override - /// The ID of the tool call object. - String get id; @override + String get id; /// The type of tool call. This is always going to be `file_search` for this type of tool call. + @override String get type; /// For now, this is always going to be an empty object. @JsonKey(name: 'file_search') Map get fileSearch; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< _$RunStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -59763,6 +62873,8 @@ class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59786,6 +62898,8 @@ class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function { @@ -59836,11 +62950,13 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< @@ -59968,19 +63084,21 @@ abstract class RunStepDetailsToolCallsFunctionObject Map json) = _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson; - @override - /// The ID of the tool call object. - String get id; @override + String get id; /// Always `function`. + @override String get type; /// The definition of the function that was called. RunStepDetailsToolCallsFunction get function; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< _$RunStepDetailsToolCallsFunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -60002,8 +63120,12 @@ mixin _$RunStepDetailsToolCallsFunction { /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. String? get output => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -60030,6 +63152,8 @@ class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60076,6 +63200,8 @@ class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60140,11 +63266,13 @@ class _$RunStepDetailsToolCallsFunctionImpl (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments, output); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsFunctionImplCopyWith< @@ -60171,20 +63299,22 @@ abstract class _RunStepDetailsToolCallsFunction factory _RunStepDetailsToolCallsFunction.fromJson(Map json) = _$RunStepDetailsToolCallsFunctionImpl.fromJson; - @override - /// The name of the function. - String get name; @override + String get name; /// The arguments passed to the function. - String get arguments; @override + String get arguments; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @override String? get output; + + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsFunctionImplCopyWith< _$RunStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; @@ -60329,8 +63459,13 @@ mixin _$RunStepDeltaStepDetailsToolCalls { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -60358,6 +63493,8 @@ class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60414,6 +63551,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60442,6 +63581,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? @@ -60509,12 +63650,14 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl other.codeInterpreter == codeInterpreter)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, codeInterpreter); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< @@ -60680,26 +63823,28 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeObject Map json) = _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson; - @override - /// The index of the tool call in the tool calls array. - int get index; @override + int get index; /// The ID of the tool call. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// Always `code_interpreter`. + @override String get type; /// The Code Interpreter tool call definition. - outputs @JsonKey(name: 'code_interpreter', includeIfNull: false) RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? get codeInterpreter; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -60735,6 +63880,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60823,12 +63970,14 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl .equals(other._fileSearch, _fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, const DeepCollectionEquality().hash(_fileSearch)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< @@ -60994,25 +64143,27 @@ abstract class RunStepDeltaStepDetailsToolCallsFileSearchObject Map json) = _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson; - @override - /// The index of the tool call in the tool calls array. - int get index; @override + int get index; /// The ID of the tool call object. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// The type of tool call. This is always going to be `file_search` for this type of tool call. + @override String get type; /// For now, this is always going to be an empty object. @JsonKey(name: 'file_search') Map get fileSearch; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -61049,6 +64200,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61077,6 +64230,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function { @@ -61141,11 +64296,13 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< @@ -61311,25 +64468,27 @@ abstract class RunStepDeltaStepDetailsToolCallsFunctionObject Map json) = _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson; - @override - /// The index of the tool call in the tool calls array. - int get index; @override + int get index; /// The ID of the tool call object. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// Always `function`. + @override String get type; /// The definition of the function that was called. @JsonKey(includeIfNull: false) RunStepDeltaStepDetailsToolCallsFunction? get function; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -61355,8 +64514,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsFunction { @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsFunctionCopyWith< RunStepDeltaStepDetailsToolCallsFunction> get copyWith => throw _privateConstructorUsedError; @@ -61388,6 +64551,8 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61437,6 +64602,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61506,11 +64673,13 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionImpl (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments, output); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< @@ -61540,23 +64709,25 @@ abstract class _RunStepDeltaStepDetailsToolCallsFunction Map json) = _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson; - @override - /// The name of the function. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The arguments passed to the function. + @override @JsonKey(includeIfNull: false) String? get arguments; - @override /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @override @JsonKey(includeIfNull: false) String? get output; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< _$RunStepDeltaStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; @@ -61630,8 +64801,13 @@ mixin _$RunStepDetailsToolCallsCodeOutput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsCodeOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeOutputCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -61658,6 +64834,8 @@ class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61696,6 +64874,8 @@ class __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61749,11 +64929,13 @@ class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, logs); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< @@ -61853,15 +65035,17 @@ abstract class RunStepDetailsToolCallsCodeOutputLogsObject Map json) = _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - @override - /// Always `logs`. + @override String get type; /// The text output from the Code Interpreter tool call. String get logs; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -61893,6 +65077,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61911,6 +65097,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image { @@ -61955,11 +65143,13 @@ class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl (identical(other.image, image) || other.image == image)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, image); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< @@ -62060,15 +65250,17 @@ abstract class RunStepDetailsToolCallsCodeOutputImageObject Map json) = _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - @override - /// Always `image`. + @override String get type; /// Code interpreter image output. RunStepDetailsToolCallsCodeOutputImage get image; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -62172,8 +65364,13 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeOutput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith< RunStepDeltaStepDetailsToolCallsCodeOutput> get copyWith => throw _privateConstructorUsedError; @@ -62202,6 +65399,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62251,6 +65450,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62318,11 +65519,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, logs); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< @@ -62448,20 +65651,22 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - @override - /// The index of the output in the outputs array. - int get index; @override + int get index; /// Always `logs`. + @override String get type; /// The text output from the Code Interpreter tool call. @JsonKey(includeIfNull: false) String? get logs; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -62502,6 +65707,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62525,6 +65732,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< )); } + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image { @@ -62583,11 +65792,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl (identical(other.image, image) || other.image == image)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, image); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< @@ -62714,20 +65925,22 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - @override - /// The index of the output in the outputs array. - int get index; @override + int get index; /// Always `image`. + @override String get type; /// Code interpreter image output. @JsonKey(includeIfNull: false) RunStepDeltaStepDetailsToolCallsCodeOutputImage? get image; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -62793,8 +66006,13 @@ mixin _$ChunkingStrategyRequestParam { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChunkingStrategyRequestParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChunkingStrategyRequestParamCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -62821,6 +66039,8 @@ class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62857,6 +66077,8 @@ class __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res> $Res Function(_$AutoChunkingStrategyRequestParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62899,11 +66121,13 @@ class _$AutoChunkingStrategyRequestParamImpl (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AutoChunkingStrategyRequestParamImplCopyWith< @@ -62991,12 +66215,14 @@ abstract class AutoChunkingStrategyRequestParam factory AutoChunkingStrategyRequestParam.fromJson(Map json) = _$AutoChunkingStrategyRequestParamImpl.fromJson; - @override - /// Always `auto`. + @override String get type; + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AutoChunkingStrategyRequestParamImplCopyWith< _$AutoChunkingStrategyRequestParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63026,6 +66252,8 @@ class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> $Res Function(_$StaticChunkingStrategyRequestParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63044,6 +66272,8 @@ class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> )); } + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $StaticChunkingStrategyCopyWith<$Res> get static { @@ -63087,11 +66317,13 @@ class _$StaticChunkingStrategyRequestParamImpl (identical(other.static, static) || other.static == static)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, static); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$StaticChunkingStrategyRequestParamImplCopyWith< @@ -63182,15 +66414,17 @@ abstract class StaticChunkingStrategyRequestParam Map json) = _$StaticChunkingStrategyRequestParamImpl.fromJson; - @override - /// Always `static`. + @override String get type; /// Static chunking strategy StaticChunkingStrategy get static; + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$StaticChunkingStrategyRequestParamImplCopyWith< _$StaticChunkingStrategyRequestParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63256,8 +66490,13 @@ mixin _$ChunkingStrategyResponseParam { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChunkingStrategyResponseParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChunkingStrategyResponseParamCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -63284,6 +66523,8 @@ class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63322,6 +66563,8 @@ class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> $Res Function(_$StaticChunkingStrategyResponseParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63340,6 +66583,8 @@ class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> )); } + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $StaticChunkingStrategyCopyWith<$Res> get static { @@ -63383,11 +66628,13 @@ class _$StaticChunkingStrategyResponseParamImpl (identical(other.static, static) || other.static == static)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, static); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$StaticChunkingStrategyResponseParamImplCopyWith< @@ -63478,15 +66725,17 @@ abstract class StaticChunkingStrategyResponseParam Map json) = _$StaticChunkingStrategyResponseParamImpl.fromJson; - @override - /// Always `static`. + @override String get type; /// Static chunking strategy StaticChunkingStrategy get static; + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$StaticChunkingStrategyResponseParamImplCopyWith< _$StaticChunkingStrategyResponseParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63514,6 +66763,8 @@ class __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res> $Res Function(_$OtherChunkingStrategyResponseParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63556,11 +66807,13 @@ class _$OtherChunkingStrategyResponseParamImpl (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$OtherChunkingStrategyResponseParamImplCopyWith< @@ -63649,12 +66902,14 @@ abstract class OtherChunkingStrategyResponseParam Map json) = _$OtherChunkingStrategyResponseParamImpl.fromJson; - @override - /// Always `other`. + @override String get type; + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$OtherChunkingStrategyResponseParamImplCopyWith< _$OtherChunkingStrategyResponseParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63777,8 +67032,13 @@ mixin _$AssistantStreamEvent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantStreamEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantStreamEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -63803,6 +67063,8 @@ class _$AssistantStreamEventCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63838,6 +67100,8 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> $Res Function(_$ThreadStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63856,6 +67120,8 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadObjectCopyWith<$Res> get data { @@ -63896,11 +67162,13 @@ class _$ThreadStreamEventImpl extends ThreadStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => @@ -64033,16 +67301,18 @@ abstract class ThreadStreamEvent extends AssistantStreamEvent { factory ThreadStreamEvent.fromJson(Map json) = _$ThreadStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages). + @override ThreadObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64068,6 +67338,8 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> _$RunStreamEventImpl _value, $Res Function(_$RunStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64086,6 +67358,8 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectCopyWith<$Res> get data { @@ -64126,11 +67400,13 @@ class _$RunStreamEventImpl extends RunStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => @@ -64263,16 +67539,18 @@ abstract class RunStreamEvent extends AssistantStreamEvent { factory RunStreamEvent.fromJson(Map json) = _$RunStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). + @override RunObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64298,6 +67576,8 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64316,6 +67596,8 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepObjectCopyWith<$Res> get data { @@ -64356,11 +67638,13 @@ class _$RunStepStreamEventImpl extends RunStepStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => @@ -64493,16 +67777,18 @@ abstract class RunStepStreamEvent extends AssistantStreamEvent { factory RunStepStreamEvent.fromJson(Map json) = _$RunStepStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a step in execution of a run. + @override RunStepObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64531,6 +67817,8 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamDeltaEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64549,6 +67837,8 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaObjectCopyWith<$Res> get data { @@ -64589,11 +67879,13 @@ class _$RunStepStreamDeltaEventImpl extends RunStepStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> @@ -64726,16 +68018,18 @@ abstract class RunStepStreamDeltaEvent extends AssistantStreamEvent { factory RunStepStreamDeltaEvent.fromJson(Map json) = _$RunStepStreamDeltaEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a run step delta i.e. any changed fields on a run step during streaming. + @override RunStepDeltaObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64761,6 +68055,8 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64779,6 +68075,8 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectCopyWith<$Res> get data { @@ -64819,11 +68117,13 @@ class _$MessageStreamEventImpl extends MessageStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => @@ -64956,16 +68256,18 @@ abstract class MessageStreamEvent extends AssistantStreamEvent { factory MessageStreamEvent.fromJson(Map json) = _$MessageStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). + @override MessageObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64994,6 +68296,8 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamDeltaEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -65012,6 +68316,8 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaObjectCopyWith<$Res> get data { @@ -65052,11 +68358,13 @@ class _$MessageStreamDeltaEventImpl extends MessageStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> @@ -65189,16 +68497,18 @@ abstract class MessageStreamDeltaEvent extends AssistantStreamEvent { factory MessageStreamDeltaEvent.fromJson(Map json) = _$MessageStreamDeltaEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a message delta i.e. any changed fields on a message during streaming. + @override MessageDeltaObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -65224,6 +68534,8 @@ class __$$ErrorEventImplCopyWithImpl<$Res> _$ErrorEventImpl _value, $Res Function(_$ErrorEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -65242,6 +68554,8 @@ class __$$ErrorEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ErrorCopyWith<$Res> get data { @@ -65281,11 +68595,13 @@ class _$ErrorEventImpl extends ErrorEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => @@ -65417,16 +68733,18 @@ abstract class ErrorEvent extends AssistantStreamEvent { factory ErrorEvent.fromJson(Map json) = _$ErrorEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents an error that occurred during an API request. + @override Error get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -65450,6 +68768,8 @@ class __$$DoneEventImplCopyWithImpl<$Res> _$DoneEventImpl _value, $Res Function(_$DoneEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -65499,11 +68819,13 @@ class _$DoneEventImpl extends DoneEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => @@ -65635,16 +68957,18 @@ abstract class DoneEvent extends AssistantStreamEvent { factory DoneEvent.fromJson(Map json) = _$DoneEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// No Description + @override String get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 191f05e7..63581c97 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -13,17 +13,17 @@ _$CreateCompletionRequestImpl _$$CreateCompletionRequestImplFromJson( _$CreateCompletionRequestImpl( model: const _CompletionModelConverter().fromJson(json['model']), prompt: const _CompletionPromptConverter().fromJson(json['prompt']), - bestOf: json['best_of'] as int?, + bestOf: (json['best_of'] as num?)?.toInt(), echo: json['echo'] as bool? ?? false, frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, e as int), + (k, e) => MapEntry(k, (e as num).toInt()), ), - logprobs: json['logprobs'] as int?, - maxTokens: json['max_tokens'] as int? ?? 16, - n: json['n'] as int? ?? 1, + logprobs: (json['logprobs'] as num?)?.toInt(), + maxTokens: (json['max_tokens'] as num?)?.toInt() ?? 16, + n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), stop: const _CompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -113,7 +113,8 @@ _$CompletionPromptListListIntImpl _$$CompletionPromptListListIntImplFromJson( Map json) => _$CompletionPromptListListIntImpl( (json['value'] as List) - .map((e) => (e as List).map((e) => e as int).toList()) + .map((e) => + (e as List).map((e) => (e as num).toInt()).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -128,7 +129,7 @@ Map _$$CompletionPromptListListIntImplToJson( _$CompletionPromptListIntImpl _$$CompletionPromptListIntImplFromJson( Map json) => _$CompletionPromptListIntImpl( - (json['value'] as List).map((e) => e as int).toList(), + (json['value'] as List).map((e) => (e as num).toInt()).toList(), $type: json['runtimeType'] as String?, ); @@ -202,7 +203,7 @@ _$CreateCompletionResponseImpl _$$CreateCompletionResponseImplFromJson( choices: (json['choices'] as List) .map((e) => CompletionChoice.fromJson(e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num).toInt(), model: json['model'] as String, systemFingerprint: json['system_fingerprint'] as String?, object: @@ -243,7 +244,7 @@ _$CompletionChoiceImpl _$$CompletionChoiceImplFromJson( finishReason: $enumDecodeNullable( _$CompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int, + index: (json['index'] as num).toInt(), logprobs: json['logprobs'] == null ? null : CompletionLogprobs.fromJson( @@ -270,7 +271,7 @@ _$CompletionLogprobsImpl _$$CompletionLogprobsImplFromJson( Map json) => _$CompletionLogprobsImpl( textOffset: (json['text_offset'] as List?) - ?.map((e) => e as int) + ?.map((e) => (e as num).toInt()) .toList(), tokenLogprobs: (json['token_logprobs'] as List?) ?.map((e) => (e as num?)?.toDouble()) @@ -310,18 +311,18 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, e as int), + (k, e) => MapEntry(k, (e as num).toInt()), ), logprobs: json['logprobs'] as bool?, - topLogprobs: json['top_logprobs'] as int?, - maxTokens: json['max_tokens'] as int?, - n: json['n'] as int? ?? 1, + topLogprobs: (json['top_logprobs'] as num?)?.toInt(), + maxTokens: (json['max_tokens'] as num?)?.toInt(), + n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null ? null : ChatCompletionResponseFormat.fromJson( json['response_format'] as Map), - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), serviceTier: $enumDecodeNullable( _$CreateChatCompletionRequestServiceTierEnumMap, json['service_tier'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -427,6 +428,7 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt4VisionPreview: 'gpt-4-vision-preview', ChatCompletionModels.gpt4o: 'gpt-4o', ChatCompletionModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ChatCompletionModels.gpt4o20240806: 'gpt-4o-2024-08-06', ChatCompletionModels.gpt4oMini: 'gpt-4o-mini', ChatCompletionModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -717,7 +719,7 @@ _$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( .map((e) => ChatCompletionResponseChoice.fromJson(e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num).toInt(), model: json['model'] as String, serviceTier: $enumDecodeNullable( _$ServiceTierEnumMap, json['service_tier'], @@ -761,7 +763,7 @@ _$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), message: ChatCompletionAssistantMessage.fromJson( json['message'] as Map), logprobs: json['logprobs'] == null @@ -816,7 +818,9 @@ _$ChatCompletionTokenLogprobImpl _$$ChatCompletionTokenLogprobImplFromJson( _$ChatCompletionTokenLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: (json['bytes'] as List?)?.map((e) => e as int).toList(), + bytes: (json['bytes'] as List?) + ?.map((e) => (e as num).toInt()) + .toList(), topLogprobs: (json['top_logprobs'] as List) .map((e) => ChatCompletionTokenTopLogprob.fromJson(e as Map)) @@ -837,8 +841,9 @@ _$ChatCompletionTokenTopLogprobImpl _$ChatCompletionTokenTopLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: - (json['bytes'] as List?)?.map((e) => e as int).toList(), + bytes: (json['bytes'] as List?) + ?.map((e) => (e as num).toInt()) + .toList(), ); Map _$$ChatCompletionTokenTopLogprobImplToJson( @@ -858,7 +863,7 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: json['created'] as int?, + created: (json['created'] as num?)?.toInt(), model: json['model'] as String?, serviceTier: $enumDecodeNullable( _$ServiceTierEnumMap, json['service_tier'], @@ -904,7 +909,7 @@ _$ChatCompletionStreamResponseChoiceImpl finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), ); Map _$$ChatCompletionStreamResponseChoiceImplToJson( @@ -1013,7 +1018,7 @@ _$ChatCompletionStreamMessageToolCallChunkImpl _$$ChatCompletionStreamMessageToolCallChunkImplFromJson( Map json) => _$ChatCompletionStreamMessageToolCallChunkImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: $enumDecodeNullable( _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap, @@ -1051,9 +1056,9 @@ const _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap = { _$CompletionUsageImpl _$$CompletionUsageImplFromJson( Map json) => _$CompletionUsageImpl( - completionTokens: json['completion_tokens'] as int?, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num?)?.toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$CompletionUsageImplToJson( @@ -1072,7 +1077,7 @@ _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( encodingFormat: $enumDecodeNullable( _$EmbeddingEncodingFormatEnumMap, json['encoding_format']) ?? EmbeddingEncodingFormat.float, - dimensions: json['dimensions'] as int?, + dimensions: (json['dimensions'] as num?)?.toInt(), user: json['user'] as String?, ); @@ -1139,7 +1144,8 @@ _$EmbeddingInputListListIntImpl _$$EmbeddingInputListListIntImplFromJson( Map json) => _$EmbeddingInputListListIntImpl( (json['value'] as List) - .map((e) => (e as List).map((e) => e as int).toList()) + .map((e) => + (e as List).map((e) => (e as num).toInt()).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -1154,7 +1160,7 @@ Map _$$EmbeddingInputListListIntImplToJson( _$EmbeddingInputListIntImpl _$$EmbeddingInputListIntImplFromJson( Map json) => _$EmbeddingInputListIntImpl( - (json['value'] as List).map((e) => e as int).toList(), + (json['value'] as List).map((e) => (e as num).toInt()).toList(), $type: json['runtimeType'] as String?, ); @@ -1231,7 +1237,7 @@ const _$CreateEmbeddingResponseObjectEnumMap = { _$EmbeddingImpl _$$EmbeddingImplFromJson(Map json) => _$EmbeddingImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), embedding: const _EmbeddingVectorConverter().fromJson(json['embedding']), object: $enumDecode(_$EmbeddingObjectEnumMap, json['object']), ); @@ -1279,8 +1285,8 @@ Map _$$EmbeddingVectorStringImplToJson( _$EmbeddingUsageImpl _$$EmbeddingUsageImplFromJson(Map json) => _$EmbeddingUsageImpl( - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$EmbeddingUsageImplToJson( @@ -1305,7 +1311,7 @@ _$CreateFineTuningJobRequestImpl _$$CreateFineTuningJobRequestImplFromJson( ?.map( (e) => FineTuningIntegration.fromJson(e as Map)) .toList(), - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), ); Map _$$CreateFineTuningJobRequestImplToJson( @@ -1348,6 +1354,7 @@ const _$FineTuningModelsEnumMap = { FineTuningModels.babbage002: 'babbage-002', FineTuningModels.davinci002: 'davinci-002', FineTuningModels.gpt35Turbo: 'gpt-3.5-turbo', + FineTuningModels.gpt4oMini: 'gpt-4o-mini', }; _$FineTuningModelStringImpl _$$FineTuningModelStringImplFromJson( @@ -1367,12 +1374,12 @@ Map _$$FineTuningModelStringImplToJson( _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => _$FineTuningJobImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), error: json['error'] == null ? null : FineTuningJobError.fromJson(json['error'] as Map), fineTunedModel: json['fine_tuned_model'] as String?, - finishedAt: json['finished_at'] as int?, + finishedAt: (json['finished_at'] as num?)?.toInt(), hyperparameters: FineTuningJobHyperparameters.fromJson( json['hyperparameters'] as Map), model: json['model'] as String, @@ -1382,7 +1389,7 @@ _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => .map((e) => e as String) .toList(), status: $enumDecode(_$FineTuningJobStatusEnumMap, json['status']), - trainedTokens: json['trained_tokens'] as int?, + trainedTokens: (json['trained_tokens'] as num?)?.toInt(), trainingFile: json['training_file'] as String, validationFile: json['validation_file'] as String?, integrations: (json['integrations'] as List?) @@ -1528,7 +1535,7 @@ const _$FineTuningNEpochsOptionsEnumMap = { _$FineTuningNEpochsIntImpl _$$FineTuningNEpochsIntImplFromJson( Map json) => _$FineTuningNEpochsIntImpl( - json['value'] as int, + (json['value'] as num).toInt(), $type: json['runtimeType'] as String?, ); @@ -1632,7 +1639,7 @@ _$FineTuningJobEventImpl _$$FineTuningJobEventImplFromJson( Map json) => _$FineTuningJobEventImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), level: $enumDecode(_$FineTuningJobEventLevelEnumMap, json['level']), message: json['message'] as String, object: $enumDecode(_$FineTuningJobEventObjectEnumMap, json['object']), @@ -1662,9 +1669,9 @@ _$FineTuningJobCheckpointImpl _$$FineTuningJobCheckpointImplFromJson( Map json) => _$FineTuningJobCheckpointImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), fineTunedModelCheckpoint: json['fine_tuned_model_checkpoint'] as String, - stepNumber: json['step_number'] as int, + stepNumber: (json['step_number'] as num).toInt(), metrics: FineTuningJobCheckpointMetrics.fromJson( json['metrics'] as Map), fineTuningJobId: json['fine_tuning_job_id'] as String, @@ -1732,7 +1739,7 @@ _$CreateImageRequestImpl _$$CreateImageRequestImplFromJson( model: json['model'] == null ? const CreateImageRequestModelString('dall-e-2') : const _CreateImageRequestModelConverter().fromJson(json['model']), - n: json['n'] as int? ?? 1, + n: (json['n'] as num?)?.toInt() ?? 1, quality: $enumDecodeNullable(_$ImageQualityEnumMap, json['quality']) ?? ImageQuality.standard, responseFormat: $enumDecodeNullable( @@ -1831,7 +1838,7 @@ Map _$$CreateImageRequestModelStringImplToJson( _$ImagesResponseImpl _$$ImagesResponseImplFromJson(Map json) => _$ImagesResponseImpl( - created: json['created'] as int, + created: (json['created'] as num).toInt(), data: (json['data'] as List) .map((e) => Image.fromJson(e as Map)) .toList(), @@ -1867,7 +1874,7 @@ Map _$$ImageImplToJson(_$ImageImpl instance) { _$ModelImpl _$$ModelImplFromJson(Map json) => _$ModelImpl( id: json['id'] as String, - created: json['created'] as int, + created: (json['created'] as num).toInt(), object: $enumDecode(_$ModelObjectEnumMap, json['object']), ownedBy: json['owned_by'] as String, ); @@ -2109,7 +2116,7 @@ _$AssistantObjectImpl _$$AssistantObjectImplFromJson( _$AssistantObjectImpl( id: json['id'] as String, object: $enumDecode(_$AssistantObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), name: json['name'] as String?, description: json['description'] as String?, model: json['model'] as String, @@ -2277,6 +2284,7 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt4VisionPreview: 'gpt-4-vision-preview', AssistantModels.gpt4o: 'gpt-4o', AssistantModels.gpt4o20240513: 'gpt-4o-2024-05-13', + AssistantModels.gpt4o20240806: 'gpt-4o-2024-08-06', AssistantModels.gpt4oMini: 'gpt-4o-mini', AssistantModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', AssistantModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -2545,7 +2553,7 @@ _$TruncationObjectImpl _$$TruncationObjectImplFromJson( Map json) => _$TruncationObjectImpl( type: $enumDecode(_$TruncationObjectTypeEnumMap, json['type']), - lastMessages: json['last_messages'] as int?, + lastMessages: (json['last_messages'] as num?)?.toInt(), ); Map _$$TruncationObjectImplToJson( @@ -2573,7 +2581,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => _$RunObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), threadId: json['thread_id'] as String, assistantId: json['assistant_id'] as String, status: $enumDecode(_$RunStatusEnumMap, json['status']), @@ -2584,11 +2592,11 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => lastError: json['last_error'] == null ? null : RunLastError.fromJson(json['last_error'] as Map), - expiresAt: json['expires_at'] as int?, - startedAt: json['started_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, - failedAt: json['failed_at'] as int?, - completedAt: json['completed_at'] as int?, + expiresAt: (json['expires_at'] as num?)?.toInt(), + startedAt: (json['started_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), incompleteDetails: json['incomplete_details'] == null ? null : RunObjectIncompleteDetails.fromJson( @@ -2604,8 +2612,8 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => : RunCompletionUsage.fromJson(json['usage'] as Map), temperature: (json['temperature'] as num?)?.toDouble(), topP: (json['top_p'] as num?)?.toDouble(), - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( @@ -2830,9 +2838,9 @@ Map _$$RunSubmitToolOutputsImplToJson( _$RunCompletionUsageImpl _$$RunCompletionUsageImplFromJson( Map json) => _$RunCompletionUsageImpl( - completionTokens: json['completion_tokens'] as int, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num).toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$RunCompletionUsageImplToJson( @@ -2859,8 +2867,8 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( @@ -2939,6 +2947,7 @@ const _$RunModelsEnumMap = { RunModels.gpt4VisionPreview: 'gpt-4-vision-preview', RunModels.gpt4o: 'gpt-4o', RunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + RunModels.gpt4o20240806: 'gpt-4o-2024-08-06', RunModels.gpt4oMini: 'gpt-4o-mini', RunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', RunModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -3185,8 +3194,8 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( @@ -3265,6 +3274,7 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt4VisionPreview: 'gpt-4-vision-preview', ThreadAndRunModels.gpt4o: 'gpt-4o', ThreadAndRunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ThreadAndRunModels.gpt4o20240806: 'gpt-4o-2024-08-06', ThreadAndRunModels.gpt4oMini: 'gpt-4o-mini', ThreadAndRunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ThreadAndRunModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -3377,7 +3387,7 @@ _$ThreadObjectImpl _$$ThreadObjectImplFromJson(Map json) => _$ThreadObjectImpl( id: json['id'] as String, object: $enumDecode(_$ThreadObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), toolResources: json['tool_resources'] == null ? null : ToolResources.fromJson( @@ -3597,7 +3607,7 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => _$MessageObjectImpl( id: json['id'] as String, object: $enumDecode(_$MessageObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), threadId: json['thread_id'] as String, status: $enumDecodeNullable(_$MessageObjectStatusEnumMap, json['status'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -3605,8 +3615,8 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => ? null : MessageObjectIncompleteDetails.fromJson( json['incomplete_details'] as Map), - completedAt: json['completed_at'] as int?, - incompleteAt: json['incomplete_at'] as int?, + completedAt: (json['completed_at'] as num?)?.toInt(), + incompleteAt: (json['incomplete_at'] as num?)?.toInt(), role: $enumDecode(_$MessageRoleEnumMap, json['role']), content: (json['content'] as List) .map((e) => MessageContent.fromJson(e as Map)) @@ -3962,7 +3972,7 @@ _$MessageDeltaContentImageUrlObjectImpl _$$MessageDeltaContentImageUrlObjectImplFromJson( Map json) => _$MessageDeltaContentImageUrlObjectImpl( - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), type: json['type'] as String?, imageUrl: json['image_url'] == null ? null @@ -4040,7 +4050,7 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => _$RunStepObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunStepObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), assistantId: json['assistant_id'] as String, threadId: json['thread_id'] as String, runId: json['run_id'] as String, @@ -4052,10 +4062,10 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => ? null : RunStepLastError.fromJson( json['last_error'] as Map), - expiredAt: json['expired_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, - failedAt: json['failed_at'] as int?, - completedAt: json['completed_at'] as int?, + expiredAt: (json['expired_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), metadata: json['metadata'] as Map?, usage: json['usage'] == null ? null @@ -4300,9 +4310,9 @@ Map _$RunStepCompletionUsageImpl _$$RunStepCompletionUsageImplFromJson( Map json) => _$RunStepCompletionUsageImpl( - completionTokens: json['completion_tokens'] as int, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num).toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$RunStepCompletionUsageImplToJson( @@ -4318,7 +4328,7 @@ _$VectorStoreExpirationAfterImpl _$$VectorStoreExpirationAfterImplFromJson( _$VectorStoreExpirationAfterImpl( anchor: $enumDecode( _$VectorStoreExpirationAfterAnchorEnumMap, json['anchor']), - days: json['days'] as int, + days: (json['days'] as num).toInt(), ); Map _$$VectorStoreExpirationAfterImplToJson( @@ -4337,9 +4347,9 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( _$VectorStoreObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), name: json['name'] as String?, - usageBytes: json['usage_bytes'] as int, + usageBytes: (json['usage_bytes'] as num).toInt(), fileCounts: VectorStoreObjectFileCounts.fromJson( json['file_counts'] as Map), status: $enumDecode(_$VectorStoreObjectStatusEnumMap, json['status']), @@ -4347,8 +4357,8 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), - expiresAt: json['expires_at'] as int?, - lastActiveAt: json['last_active_at'] as int?, + expiresAt: (json['expires_at'] as num?)?.toInt(), + lastActiveAt: (json['last_active_at'] as num?)?.toInt(), metadata: json['metadata'], ); @@ -4386,11 +4396,11 @@ const _$VectorStoreObjectStatusEnumMap = { _$VectorStoreObjectFileCountsImpl _$$VectorStoreObjectFileCountsImplFromJson( Map json) => _$VectorStoreObjectFileCountsImpl( - inProgress: json['in_progress'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, - cancelled: json['cancelled'] as int, - total: json['total'] as int, + inProgress: (json['in_progress'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), + cancelled: (json['cancelled'] as num).toInt(), + total: (json['total'] as num).toInt(), ); Map _$$VectorStoreObjectFileCountsImplToJson( @@ -4509,8 +4519,8 @@ _$VectorStoreFileObjectImpl _$$VectorStoreFileObjectImplFromJson( _$VectorStoreFileObjectImpl( id: json['id'] as String, object: json['object'] as String, - usageBytes: json['usage_bytes'] as int, - createdAt: json['created_at'] as int, + usageBytes: (json['usage_bytes'] as num).toInt(), + createdAt: (json['created_at'] as num).toInt(), vectorStoreId: json['vector_store_id'] as String, status: $enumDecode(_$VectorStoreFileStatusEnumMap, json['status']), lastError: json['last_error'] == null @@ -4568,17 +4578,16 @@ Map _$$VectorStoreFileObjectLastErrorImplToJson( }; const _$VectorStoreFileObjectLastErrorCodeEnumMap = { - VectorStoreFileObjectLastErrorCode.internalError: 'internal_error', - VectorStoreFileObjectLastErrorCode.fileNotFound: 'file_not_found', - VectorStoreFileObjectLastErrorCode.parsingError: 'parsing_error', - VectorStoreFileObjectLastErrorCode.unhandledMimeType: 'unhandled_mime_type', + VectorStoreFileObjectLastErrorCode.serverError: 'server_error', + VectorStoreFileObjectLastErrorCode.unsupportedFile: 'unsupported_file', + VectorStoreFileObjectLastErrorCode.invalidFile: 'invalid_file', }; _$StaticChunkingStrategyImpl _$$StaticChunkingStrategyImplFromJson( Map json) => _$StaticChunkingStrategyImpl( - maxChunkSizeTokens: json['max_chunk_size_tokens'] as int, - chunkOverlapTokens: json['chunk_overlap_tokens'] as int, + maxChunkSizeTokens: (json['max_chunk_size_tokens'] as num).toInt(), + chunkOverlapTokens: (json['chunk_overlap_tokens'] as num).toInt(), ); Map _$$StaticChunkingStrategyImplToJson( @@ -4657,7 +4666,7 @@ _$VectorStoreFileBatchObjectImpl _$$VectorStoreFileBatchObjectImplFromJson( _$VectorStoreFileBatchObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), vectorStoreId: json['vector_store_id'] as String, status: $enumDecode( _$VectorStoreFileBatchObjectStatusEnumMap, json['status']), @@ -4687,11 +4696,11 @@ _$VectorStoreFileBatchObjectFileCountsImpl _$$VectorStoreFileBatchObjectFileCountsImplFromJson( Map json) => _$VectorStoreFileBatchObjectFileCountsImpl( - inProgress: json['in_progress'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, - cancelled: json['cancelled'] as int, - total: json['total'] as int, + inProgress: (json['in_progress'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), + cancelled: (json['cancelled'] as num).toInt(), + total: (json['total'] as num).toInt(), ); Map _$$VectorStoreFileBatchObjectFileCountsImplToJson( @@ -4802,15 +4811,15 @@ _$BatchImpl _$$BatchImplFromJson(Map json) => _$BatchImpl( status: $enumDecode(_$BatchStatusEnumMap, json['status']), outputFileId: json['output_file_id'] as String?, errorFileId: json['error_file_id'] as String?, - createdAt: json['created_at'] as int, - inProgressAt: json['in_progress_at'] as int?, - expiresAt: json['expires_at'] as int?, - finalizingAt: json['finalizing_at'] as int?, - completedAt: json['completed_at'] as int?, - failedAt: json['failed_at'] as int?, - expiredAt: json['expired_at'] as int?, - cancellingAt: json['cancelling_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, + createdAt: (json['created_at'] as num).toInt(), + inProgressAt: (json['in_progress_at'] as num?)?.toInt(), + expiresAt: (json['expires_at'] as num?)?.toInt(), + finalizingAt: (json['finalizing_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + expiredAt: (json['expired_at'] as num?)?.toInt(), + cancellingAt: (json['cancelling_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), requestCounts: json['request_counts'] == null ? null : BatchRequestCounts.fromJson( @@ -4892,9 +4901,9 @@ Map _$$BatchErrorsImplToJson(_$BatchErrorsImpl instance) { _$BatchRequestCountsImpl _$$BatchRequestCountsImplFromJson( Map json) => _$BatchRequestCountsImpl( - total: json['total'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, + total: (json['total'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), ); Map _$$BatchRequestCountsImplToJson( @@ -4911,7 +4920,7 @@ _$BatchErrorsDataInnerImpl _$$BatchErrorsDataInnerImplFromJson( code: json['code'] as String?, message: json['message'] as String?, param: json['param'] as String?, - line: json['line'] as int?, + line: (json['line'] as num?)?.toInt(), ); Map _$$BatchErrorsDataInnerImplToJson( @@ -5250,7 +5259,7 @@ _$AssistantToolsFileSearchFileSearchImpl _$$AssistantToolsFileSearchFileSearchImplFromJson( Map json) => _$AssistantToolsFileSearchFileSearchImpl( - maxNumResults: json['max_num_results'] as int?, + maxNumResults: (json['max_num_results'] as num?)?.toInt(), ); Map _$$AssistantToolsFileSearchFileSearchImplToJson( @@ -5315,7 +5324,7 @@ _$MessageDeltaContentImageFileObjectImpl _$$MessageDeltaContentImageFileObjectImplFromJson( Map json) => _$MessageDeltaContentImageFileObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, imageFile: json['image_file'] == null ? null @@ -5343,7 +5352,7 @@ Map _$$MessageDeltaContentImageFileObjectImplToJson( _$MessageDeltaContentTextObjectImpl _$$MessageDeltaContentTextObjectImplFromJson(Map json) => _$MessageDeltaContentTextObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] == null ? null @@ -5376,8 +5385,8 @@ _$MessageContentTextAnnotationsFileCitationObjectImpl text: json['text'] as String, fileCitation: MessageContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: json['start_index'] as int, - endIndex: json['end_index'] as int, + startIndex: (json['start_index'] as num).toInt(), + endIndex: (json['end_index'] as num).toInt(), ); Map @@ -5399,8 +5408,8 @@ _$MessageContentTextAnnotationsFilePathObjectImpl text: json['text'] as String, filePath: MessageContentTextAnnotationsFilePath.fromJson( json['file_path'] as Map), - startIndex: json['start_index'] as int, - endIndex: json['end_index'] as int, + startIndex: (json['start_index'] as num).toInt(), + endIndex: (json['end_index'] as num).toInt(), ); Map _$$MessageContentTextAnnotationsFilePathObjectImplToJson( @@ -5430,15 +5439,15 @@ _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] as String?, fileCitation: json['file_citation'] == null ? null : MessageDeltaContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: json['start_index'] as int?, - endIndex: json['end_index'] as int?, + startIndex: (json['start_index'] as num?)?.toInt(), + endIndex: (json['end_index'] as num?)?.toInt(), ); Map @@ -5466,15 +5475,15 @@ _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] as String?, filePath: json['file_path'] == null ? null : MessageDeltaContentTextAnnotationsFilePathObjectFilePath .fromJson(json['file_path'] as Map), - startIndex: json['start_index'] as int?, - endIndex: json['end_index'] as int?, + startIndex: (json['start_index'] as num?)?.toInt(), + endIndex: (json['end_index'] as num?)?.toInt(), ); Map @@ -5683,7 +5692,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, codeInterpreter: json['code_interpreter'] == null @@ -5714,7 +5723,7 @@ _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, fileSearch: json['file_search'] as Map, @@ -5743,7 +5752,7 @@ _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, function: json['function'] == null @@ -5831,7 +5840,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, logs: json['logs'] as String?, ); @@ -5858,7 +5867,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, image: json['image'] == null ? null diff --git a/packages/openai_dart/lib/src/generated/schema/thread_object.dart b/packages/openai_dart/lib/src/generated/schema/thread_object.dart index a5ae0ea8..20f2e014 100644 --- a/packages/openai_dart/lib/src/generated/schema/thread_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/thread_object.dart @@ -27,7 +27,9 @@ class ThreadObject with _$ThreadObject { /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources') required ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, }) = _ThreadObject; diff --git a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart index 7105bd0c..d2ef2414 100644 --- a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart @@ -22,7 +22,9 @@ class UpdateVectorStoreRequest with _$UpdateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _UpdateVectorStoreRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart index b6c24133..3664758b 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart @@ -140,12 +140,10 @@ class VectorStoreFileObjectLastError with _$VectorStoreFileObjectLastError { /// One of `server_error` or `rate_limit_exceeded`. enum VectorStoreFileObjectLastErrorCode { - @JsonValue('internal_error') - internalError, - @JsonValue('file_not_found') - fileNotFound, - @JsonValue('parsing_error') - parsingError, - @JsonValue('unhandled_mime_type') - unhandledMimeType, + @JsonValue('server_error') + serverError, + @JsonValue('unsupported_file') + unsupportedFile, + @JsonValue('invalid_file') + invalidFile, } diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart index 836d8337..a3d49591 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart @@ -47,7 +47,9 @@ class VectorStoreObject with _$VectorStoreObject { /// The Unix timestamp (in seconds) for when the vector store was last active. @JsonKey(name: 'last_active_at') required int? lastActiveAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required dynamic metadata, }) = _VectorStoreObject; diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index f1fbee08..5a62f6b1 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -1,3 +1,4 @@ +// ignore_for_file: avoid_print import 'dart:io'; import 'package:openapi_spec/openapi_spec.dart'; @@ -18,10 +19,12 @@ void main() async { enabled: true, ), ); - await Process.run( + final res = await Process.run( 'dart', ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], ); + print(res.stdout); + print(res.stderr); } String? _onSchemaName(final String schemaName) => switch (schemaName) { diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 00dbbe54..c349f64e 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4,7 +4,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.1.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -310,7 +310,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -330,7 +330,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -1796,7 +1796,7 @@ components: model: title: ChatCompletionModel description: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string description: The ID of the model to use for this request. @@ -1820,6 +1820,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -2688,7 +2689,7 @@ components: description: | The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" + example: "gpt-4o-mini" anyOf: - type: string description: The ID of the model to use for this request. @@ -2696,7 +2697,7 @@ components: title: FineTuningModels description: | Available fine-tuning models. Mind that the list may not be exhaustive nor up-to-date. - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] training_file: description: | The ID of an uploaded file that contains training data. @@ -2719,7 +2720,7 @@ components: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 maxLength: 40 @@ -3438,7 +3439,8 @@ components: nullable: true tools: description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + types `code_interpreter`, `file_search`, or `function`. default: [ ] type: array maxItems: 128 @@ -3448,13 +3450,16 @@ components: $ref: "#/components/schemas/ToolResources" metadata: description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + information about the object in a structured format. Keys can be a maximum of 64 characters long and values + can be a maxium of 512 characters long. type: object additionalProperties: true nullable: true temperature: description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 @@ -3469,16 +3474,29 @@ components: example: 1 nullable: true description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + mass are considered. We generally recommend altering this or temperature but not both. response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + description: &assistant_response_format | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-4o-mini-1106`. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + system or user message. Without this, the model may generate an unending stream of whitespace until the + generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + that the message content may be partially cut off if `finish_reason="length"`, which indicates the + generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: AssistantResponseFormatMode @@ -3528,6 +3546,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -3568,8 +3587,7 @@ components: additionalProperties: true nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description type: number minimum: 0 maximum: 2 @@ -3583,17 +3601,9 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateAssistantResponseFormatMode @@ -3663,17 +3673,9 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: ModifyAssistantResponseFormatMode @@ -3764,7 +3766,7 @@ components: minimum: 1 maximum: 50 description: | - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search @@ -4000,12 +4002,7 @@ components: - $ref: "#/components/schemas/AssistantsNamedToolChoice" parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: RunObjectResponseFormatMode @@ -4092,6 +4089,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4143,10 +4141,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description max_prompt_tokens: type: integer nullable: true @@ -4181,12 +4176,7 @@ components: - $ref: "#/components/schemas/AssistantsNamedToolChoice" parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateRunRequestResponseFormatMode @@ -4336,6 +4326,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4414,12 +4405,7 @@ components: - $ref: "#/components/schemas/AssistantsNamedToolChoice" parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateThreadAndRunRequestResponseFormatMode @@ -5852,10 +5838,9 @@ components: description: One of `server_error` or `rate_limit_exceeded`. enum: [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", + "server_error", + "unsupported_file", + "invalid_file", ] message: type: string diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 1a91af5d..4d45fce2 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.1.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -143,7 +143,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -169,7 +169,7 @@ paths: client = OpenAI() response = client.chat.completions.create( - model="gpt-4-turbo", + model="gpt-4o", messages=[ { "role": "user", @@ -193,7 +193,7 @@ paths: async function main() { const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: [ { role: "user", @@ -305,7 +305,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -399,7 +399,7 @@ paths: ]; const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: messages, tools: tools, tool_choice: "auto", @@ -1973,7 +1973,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI @@ -1981,7 +1981,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; @@ -2001,8 +2001,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2018,7 +2018,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "hyperparameters": { "n_epochs": 2 } @@ -2029,7 +2029,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo", + model="gpt-4o-mini", hyperparameters={ "n_epochs":2 } @@ -2042,7 +2042,7 @@ paths: async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", - model: "gpt-3.5-turbo", + model: "gpt-4o-mini", hyperparameters: { n_epochs: 2 } }); @@ -2054,8 +2054,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2073,7 +2073,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI @@ -2082,7 +2082,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; @@ -2103,8 +2103,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2121,7 +2121,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "integrations": [ { "type": "wandb", @@ -2139,8 +2139,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2380,7 +2380,7 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", - "created_at": 1692407401, + "created_at": 1721764800, "level": "info", "message": "Fine tuning job successfully completed", "data": null, @@ -2389,9 +2389,9 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-tyiGuB72evQncpH87xe505Sv", - "created_at": 1692407400, + "created_at": 1721764800, "level": "info", - "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", "data": null, "type": "message" } @@ -2450,8 +2450,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1689376978, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2514,8 +2514,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", - "created_at": 1519129973, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", + "created_at": 1721764867, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", "metrics": { "full_valid_loss": 0.134, "full_valid_mean_token_accuracy": 0.874 @@ -2526,8 +2526,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", - "created_at": 1519129833, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "created_at": 1721764800, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", "metrics": { "full_valid_loss": 0.167, "full_valid_mean_token_accuracy": 0.781 @@ -2619,7 +2619,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -2672,7 +2672,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -2688,28 +2688,28 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \ -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | from openai import OpenAI client = OpenAI() - client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); console.log(model); } main(); response: | { - "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", "object": "model", "deleted": true } @@ -2822,7 +2822,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: &pagination_after_param_description | @@ -2888,7 +2888,7 @@ paths: "created_at": 1698982736, "name": "Coding Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2903,7 +2903,7 @@ paths: "created_at": 1698982718, "name": "My Assistant", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2918,7 +2918,7 @@ paths: "created_at": 1698982643, "name": null, "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [], "tool_resources": {}, @@ -2967,7 +2967,7 @@ paths: "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | @@ -2978,7 +2978,7 @@ paths: instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", tools=[{"type": "code_interpreter"}], - model="gpt-4-turbo", + model="gpt-4o", ) print(my_assistant) node.js: |- @@ -2992,7 +2992,7 @@ paths: "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name: "Math Tutor", tools: [{ type: "code_interpreter" }], - model: "gpt-4-turbo", + model: "gpt-4o", }); console.log(myAssistant); @@ -3006,7 +3006,7 @@ paths: "created_at": 1698984975, "name": "Math Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "tools": [ { @@ -3029,7 +3029,7 @@ paths: "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [{"type": "file_search"}], "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI @@ -3040,7 +3040,7 @@ paths: name="HR Helper", tools=[{"type": "file_search"}], tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, - model="gpt-4-turbo" + model="gpt-4o" ) print(my_assistant) node.js: |- @@ -3059,7 +3059,7 @@ paths: vector_store_ids: ["vs_123"] } }, - model: "gpt-4-turbo" + model: "gpt-4o" }); console.log(myAssistant); @@ -3073,7 +3073,7 @@ paths: "created_at": 1699009403, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -3150,7 +3150,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -3202,7 +3202,7 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [{"type": "file_search"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI @@ -3213,7 +3213,7 @@ paths: instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name="HR Helper", tools=[{"type": "file_search"}], - model="gpt-4-turbo" + model="gpt-4o" ) print(my_updated_assistant) @@ -3230,7 +3230,7 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name: "HR Helper", tools: [{ type: "file_search" }], - model: "gpt-4-turbo" + model: "gpt-4o" } ); @@ -3245,7 +3245,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [ { @@ -3671,7 +3671,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4203,7 +4203,7 @@ paths: "completed_at": null, "required_action": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant.", "tools": [], "tool_resources": {}, @@ -4282,13 +4282,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4320,7 +4320,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] @@ -4451,13 +4451,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} @@ -4483,7 +4483,7 @@ paths: data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4514,7 +4514,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4584,7 +4584,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4631,7 +4631,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4750,7 +4750,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4814,13 +4814,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4852,7 +4852,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4969,13 +4969,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -5007,7 +5007,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5088,7 +5088,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -5207,7 +5207,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -5351,7 +5351,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [ { @@ -5455,10 +5455,10 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} @@ -5496,7 +5496,7 @@ paths: data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5578,7 +5578,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You summarize books.", "tools": [ { @@ -5631,7 +5631,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5837,7 +5837,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6212,7 +6212,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6228,7 +6228,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6774,7 +6774,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6790,7 +6790,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6890,7 +6890,7 @@ paths: description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -7267,7 +7267,7 @@ components: properties: object: type: string - enum: [list] + enum: [ list ] data: type: array items: @@ -7298,7 +7298,7 @@ components: anyOf: - type: string - type: string - enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] x-oaiTypeLabel: string prompt: description: &completions_prompt_description | @@ -7510,7 +7510,7 @@ components: The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] + enum: [ "stop", "length", "content_filter" ] index: type: integer logprobs: @@ -7552,7 +7552,7 @@ components: object: type: string description: The object type, which is always "text_completion" - enum: [text_completion] + enum: [ text_completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -7597,7 +7597,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -7609,7 +7609,7 @@ components: detail: type: string description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -7650,7 +7650,7 @@ components: type: string role: type: string - enum: ["system"] + enum: [ "system" ] description: The role of the messages author, in this case `system`. name: type: string @@ -7699,7 +7699,7 @@ components: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the messages author, in this case `assistant`. name: type: string @@ -7732,7 +7732,7 @@ components: properties: weight: type: integer - enum: [0, 1] + enum: [ 0, 1 ] description: "Controls whether the assistant message is trained against (0 or 1)" - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" required: @@ -7744,7 +7744,7 @@ components: properties: role: type: string - enum: ["tool"] + enum: [ "tool" ] description: The role of the messages author, in this case `tool`. content: type: string @@ -7764,7 +7764,7 @@ components: properties: role: type: string - enum: ["function"] + enum: [ "function" ] description: The role of the messages author, in this case `function`. content: nullable: true @@ -7814,7 +7814,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: $ref: "#/components/schemas/FunctionObject" @@ -7851,7 +7851,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" x-oaiExpandable: true @@ -7861,7 +7861,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7895,7 +7895,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7925,7 +7925,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7974,7 +7974,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the author of this message. function_call: type: object @@ -8019,7 +8019,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" role: type: string - enum: ["system", "user", "assistant", "tool"] + enum: [ "system", "user", "assistant", "tool" ] description: The role of the author of this message. CreateChatCompletionRequest: @@ -8033,14 +8033,16 @@ components: $ref: "#/components/schemas/ChatCompletionRequestMessage" model: description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -8150,7 +8152,7 @@ components: When this parameter is set, the response body will include the `service_tier` utilized. type: string - enum: ["auto", "default"] + enum: [ "auto", "default" ] nullable: true default: null stop: @@ -8218,7 +8220,7 @@ components: description: > `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" x-oaiExpandable: true functions: @@ -8297,7 +8299,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: ["scale", "default"] + enum: [ "scale", "default" ] example: "scale" nullable: true system_fingerprint: @@ -8309,7 +8311,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8347,7 +8349,7 @@ components: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. enum: - ["stop", "length", "function_call", "content_filter"] + [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8368,7 +8370,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8427,7 +8429,7 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8479,7 +8481,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: ["scale", "default"] + enum: [ "scale", "default" ] example: "scale" nullable: true system_fingerprint: @@ -8490,7 +8492,7 @@ components: object: type: string description: The object type, which is always `chat.completion.chunk`. - enum: [chat.completion.chunk] + enum: [ chat.completion.chunk ] usage: type: object description: | @@ -8540,7 +8542,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2", "dall-e-3"] + enum: [ "dall-e-2", "dall-e-3" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-3" @@ -8556,27 +8558,27 @@ components: description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: type: string - enum: ["standard", "hd"] + enum: [ "standard", "hd" ] default: "standard" example: "standard" description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: &images_response_format type: string - enum: ["url", "b64_json"] + enum: [ "url", "b64_json" ] default: "url" example: "url" nullable: true description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string - enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] default: "1024x1024" example: "1024x1024" nullable: true description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: type: string - enum: ["vivid", "natural"] + enum: [ "vivid", "natural" ] default: "vivid" example: "vivid" nullable: true @@ -8637,7 +8639,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8653,7 +8655,7 @@ components: description: The number of images to generate. Must be between 1 and 10. size: &dalle2_images_size type: string - enum: ["256x256", "512x512", "1024x1024"] + enum: [ "256x256", "512x512", "1024x1024" ] default: "1024x1024" example: "1024x1024" nullable: true @@ -8675,7 +8677,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8713,7 +8715,7 @@ components: anyOf: - type: string - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] + enum: [ "text-moderation-latest", "text-moderation-stable" ] x-oaiTypeLabel: string required: - input @@ -8856,7 +8858,7 @@ components: $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8876,7 +8878,7 @@ components: Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: ["assistants", "batch", "fine-tune", "vision"] + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - file - purpose @@ -8888,7 +8890,7 @@ components: type: string object: type: string - enum: [file] + enum: [ file ] deleted: type: boolean required: @@ -8910,7 +8912,7 @@ components: See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - enum: ["assistants", "batch", "fine-tune", "vision"] + enum: [ "assistants", "batch", "fine-tune", "vision" ] bytes: description: | The number of bytes in the file you are uploading. @@ -8966,12 +8968,12 @@ components: model: description: | The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + example: "gpt-4o-mini" anyOf: - type: string - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] x-oaiTypeLabel: string training_file: description: | @@ -8996,7 +8998,7 @@ components: are updated less frequently, but with lower variance. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 256 @@ -9007,7 +9009,7 @@ components: overfitting. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: number minimum: 0 exclusiveMinimum: true @@ -9018,7 +9020,7 @@ components: through the training dataset. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 @@ -9027,7 +9029,7 @@ components: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 maxLength: 40 @@ -9063,7 +9065,7 @@ components: The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. oneOf: - type: string - enum: [wandb] + enum: [ wandb ] wandb: type: object description: | @@ -9120,7 +9122,7 @@ components: $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -9134,7 +9136,7 @@ components: $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [list] + enum: [ list ] first_id: type: string nullable: true @@ -9209,7 +9211,7 @@ components: example: "float" default: "float" type: string - enum: ["float", "base64"] + enum: [ "float", "base64" ] dimensions: description: | The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -9234,7 +9236,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] usage: type: object description: The usage information for the request. @@ -9271,7 +9273,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string language: description: | @@ -9306,7 +9308,7 @@ components: enum: - word - segment - default: [segment] + default: [ segment ] required: - file - model @@ -9393,7 +9395,7 @@ components: type: number format: float description: End time of the word in seconds. - required: [word, start, end] + required: [ word, start, end ] CreateTranscriptionResponseVerboseJson: type: object @@ -9418,7 +9420,7 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] x-oaiMeta: name: The transcription object (Verbose JSON) group: audio @@ -9441,7 +9443,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string prompt: description: | @@ -9487,7 +9489,7 @@ components: description: Segments of the translated text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] CreateSpeechRequest: type: object @@ -9499,7 +9501,7 @@ components: anyOf: - type: string - type: string - enum: ["tts-1", "tts-1-hd"] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string input: type: string @@ -9508,12 +9510,12 @@ components: voice: description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] response_format: description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -9538,7 +9540,7 @@ components: object: type: string description: The object type, which is always "model". - enum: [model] + enum: [ model ] owned_by: type: string description: The organization that owns the model. @@ -9570,7 +9572,7 @@ components: object: type: string description: The object type, which is always `file`. - enum: ["file"] + enum: [ "file" ] purpose: type: string description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. @@ -9588,7 +9590,7 @@ components: type: string deprecated: true description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: ["uploaded", "processed", "error"] + enum: [ "uploaded", "processed", "error" ] status_details: type: string deprecated: true @@ -9636,14 +9638,14 @@ components: status: type: string description: The status of the Upload. - enum: ["pending", "completed", "cancelled", "expired"] + enum: [ "pending", "completed", "cancelled", "expired" ] expires_at: type: integer description: The Unix timestamp (in seconds) for when the Upload was created. object: type: string description: The object type, which is always "upload". - enum: [upload] + enum: [ upload ] file: $ref: "#/components/schemas/OpenAIFile" nullable: true @@ -9696,7 +9698,7 @@ components: object: type: string description: The object type, which is always `upload.part`. - enum: ['upload.part'] + enum: [ 'upload.part' ] required: - created_at - id @@ -9728,7 +9730,7 @@ components: object: type: string description: The object type, which is always "embedding". - enum: [embedding] + enum: [ embedding ] required: - index - object @@ -9793,15 +9795,15 @@ components: n_epochs: oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 default: auto description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. required: - n_epochs model: @@ -9810,7 +9812,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job". - enum: [fine_tuning.job] + enum: [ fine_tuning.job ] organization_id: type: string description: The organization that owns the fine-tuning job. @@ -9889,7 +9891,7 @@ components: type: type: string description: "The type of the integration being enabled for the fine-tuning job" - enum: ["wandb"] + enum: [ "wandb" ] wandb: type: object description: | @@ -9934,12 +9936,12 @@ components: type: integer level: type: string - enum: ["info", "warn", "error"] + enum: [ "info", "warn", "error" ] message: type: string object: type: string - enum: [fine_tuning.job.event] + enum: [ fine_tuning.job.event ] required: - id - object @@ -9999,7 +10001,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -10015,7 +10017,7 @@ components: "object": "fine_tuning.job.checkpoint", "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", "metrics": { "step": 88, @@ -10181,6 +10183,8 @@ components: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @@ -10215,7 +10219,7 @@ components: object: description: The object type, which is always `assistant`. type: string - enum: [assistant] + enum: [ assistant ] created_at: description: The Unix timestamp (in seconds) for when the assistant was created. type: integer @@ -10243,7 +10247,7 @@ components: tools: description: &assistant_tools_param_description | A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [] + default: [ ] type: array maxItems: 128 items: @@ -10264,7 +10268,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10329,14 +10333,16 @@ components: properties: model: description: *model_description - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -10376,7 +10382,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -10397,7 +10403,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10439,7 +10445,7 @@ components: type: type: string description: Always `auto`. - enum: ["auto"] + enum: [ "auto" ] required: - type - type: object @@ -10449,7 +10455,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: type: object additionalProperties: false @@ -10478,8 +10484,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -10487,8 +10493,7 @@ components: x-oaiTypeLabel: map nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description type: number minimum: 0 maximum: 2 @@ -10502,10 +10507,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10537,7 +10539,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -10558,7 +10560,7 @@ components: type: array description: | Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10593,10 +10595,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10610,7 +10609,7 @@ components: type: boolean object: type: string - enum: [assistant.deleted] + enum: [ assistant.deleted ] required: - id - object @@ -10653,7 +10652,7 @@ components: type: type: string description: "The type of tool being defined: `code_interpreter`" - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] required: - type @@ -10664,7 +10663,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: Overrides for the file search tool. @@ -10674,7 +10673,7 @@ components: minimum: 1 maximum: 50 description: | - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. required: @@ -10687,7 +10686,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: ["file_search"] + enum: [ "file_search" ] required: - type @@ -10698,7 +10697,7 @@ components: type: type: string description: "The type of tool being defined: `function`" - enum: ["function"] + enum: [ "function" ] function: $ref: "#/components/schemas/FunctionObject" required: @@ -10713,7 +10712,7 @@ components: type: type: string description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -10736,7 +10735,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" x-oaiExpandable: true @@ -10746,7 +10745,7 @@ components: properties: type: type: string - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: type: object @@ -10770,7 +10769,7 @@ components: object: description: The object type, which is always `thread.run`. type: string - enum: ["thread.run"] + enum: [ "thread.run" ] created_at: description: The Unix timestamp (in seconds) for when the run was created. type: integer @@ -10803,7 +10802,7 @@ components: type: description: For now, this is always `submit_tool_outputs`. type: string - enum: ["submit_tool_outputs"] + enum: [ "submit_tool_outputs" ] submit_tool_outputs: type: object description: Details on the tool outputs needed for this run to continue. @@ -10827,7 +10826,7 @@ components: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. enum: - ["server_error", "rate_limit_exceeded", "invalid_prompt"] + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10862,7 +10861,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string @@ -10871,7 +10870,7 @@ components: type: string tools: description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [] + default: [ ] type: array maxItems: 20 items: @@ -10961,7 +10960,7 @@ components: "failed_at": null, "completed_at": 1699073498, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], "metadata": {}, @@ -10992,14 +10991,16 @@ components: type: string model: description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -11068,10 +11069,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description stream: type: boolean nullable: true @@ -11171,7 +11169,7 @@ components: type: type: string description: The type of tool call the output is required for. For now, this is always `function`. - enum: ["function"] + enum: [ "function" ] function: type: object description: The function definition. @@ -11202,14 +11200,16 @@ components: description: If no thread is provided, an empty thread will be created. model: description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -11259,7 +11259,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11338,7 +11338,7 @@ components: object: description: The object type, which is always `thread`. type: string - enum: ["thread"] + enum: [ "thread" ] created_at: description: The Unix timestamp (in seconds) for when the thread was created. type: integer @@ -11354,7 +11354,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11412,7 +11412,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11454,7 +11454,7 @@ components: type: type: string description: Always `auto`. - enum: ["auto"] + enum: [ "auto" ] required: - type - type: object @@ -11464,7 +11464,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: type: object additionalProperties: false @@ -11494,8 +11494,8 @@ components: x-oaiTypeLabel: map x-oaiExpandable: true oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -11519,7 +11519,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11549,7 +11549,7 @@ components: type: boolean object: type: string - enum: [thread.deleted] + enum: [ thread.deleted ] required: - id - object @@ -11591,7 +11591,7 @@ components: object: description: The object type, which is always `thread.message`. type: string - enum: ["thread.message"] + enum: [ "thread.message" ] created_at: description: The Unix timestamp (in seconds) for when the message was created. type: integer @@ -11601,7 +11601,7 @@ components: status: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: description: On an incomplete message, details about why the message is incomplete. type: object @@ -11631,7 +11631,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11724,7 +11724,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: description: The delta containing the fields that have changed on the Message. type: object @@ -11732,7 +11732,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11773,7 +11773,7 @@ components: properties: role: type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] description: | The role of the entity that is creating the message. Allowed values include: - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. @@ -11840,7 +11840,7 @@ components: type: boolean object: type: string - enum: [thread.message.deleted] + enum: [ thread.message.deleted ] required: - id - object @@ -11879,7 +11879,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11889,7 +11889,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - file_id @@ -11908,7 +11908,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11918,7 +11918,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11931,7 +11931,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -11943,7 +11943,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -11962,7 +11962,7 @@ components: type: description: Always `image_url`. type: string - enum: ["image_url"] + enum: [ "image_url" ] image_url: type: object properties: @@ -11972,7 +11972,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11986,7 +11986,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -12015,7 +12015,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: string description: Text content to be sent to the model @@ -12031,7 +12031,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12064,7 +12064,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12100,7 +12100,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -12129,7 +12129,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12163,7 +12163,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12195,7 +12195,7 @@ components: object: description: The object type, which is always `thread.run.step`. type: string - enum: ["thread.run.step"] + enum: [ "thread.run.step" ] created_at: description: The Unix timestamp (in seconds) for when the run step was created. type: integer @@ -12211,11 +12211,11 @@ components: type: description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string - enum: ["message_creation", "tool_calls"] + enum: [ "message_creation", "tool_calls" ] status: description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] step_details: type: object description: The details of the run step. @@ -12231,7 +12231,7 @@ components: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] + enum: [ "server_error", "rate_limit_exceeded" ] message: type: string description: A human-readable description of the error. @@ -12295,7 +12295,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: description: The delta containing the fields that have changed on the run step. type: object @@ -12366,7 +12366,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -12387,7 +12387,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -12405,7 +12405,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -12428,7 +12428,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -12453,7 +12453,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12492,7 +12492,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12521,7 +12521,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12540,7 +12540,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12555,7 +12555,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -12578,7 +12578,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -12599,7 +12599,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -12622,7 +12622,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -12642,7 +12642,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12679,7 +12679,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12706,7 +12706,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -12727,7 +12727,7 @@ components: object: description: The object type, which is always `vector_store`. type: string - enum: ["vector_store"] + enum: [ "vector_store" ] created_at: description: The Unix timestamp (in seconds) for when the vector store was created. type: integer @@ -12764,7 +12764,7 @@ components: status: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -12892,7 +12892,7 @@ components: type: boolean object: type: string - enum: [vector_store.deleted] + enum: [ vector_store.deleted ] required: - id - object @@ -12909,7 +12909,7 @@ components: object: description: The object type, which is always `vector_store.file`. type: string - enum: ["vector_store.file"] + enum: [ "vector_store.file" ] usage_bytes: description: The total vector store usage in bytes. Note that this may be different from the original file size. type: integer @@ -12922,7 +12922,7 @@ components: status: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: type: object description: The last error associated with this vector store file. Will be `null` if there are no errors. @@ -12933,10 +12933,9 @@ components: description: One of `server_error` or `rate_limit_exceeded`. enum: [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", + "server_error", + "unsupported_file", + "invalid_file", ] message: type: string @@ -12989,7 +12988,7 @@ components: type: type: string description: Always `other`. - enum: ["other"] + enum: [ "other" ] required: - type @@ -13001,7 +13000,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -13036,7 +13035,7 @@ components: type: type: string description: Always `auto`. - enum: ["auto"] + enum: [ "auto" ] required: - type @@ -13048,7 +13047,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -13109,7 +13108,7 @@ components: type: boolean object: type: string - enum: [vector_store.file.deleted] + enum: [ vector_store.file.deleted ] required: - id - object @@ -13126,7 +13125,7 @@ components: object: description: The object type, which is always `vector_store.file_batch`. type: string - enum: ["vector_store.files_batch"] + enum: [ "vector_store.files_batch" ] created_at: description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer @@ -13136,7 +13135,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object properties: @@ -13241,7 +13240,7 @@ components: properties: event: type: string - enum: ["thread.created"] + enum: [ "thread.created" ] data: $ref: "#/components/schemas/ThreadObject" required: @@ -13257,7 +13256,7 @@ components: properties: event: type: string - enum: ["thread.run.created"] + enum: [ "thread.run.created" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13270,7 +13269,7 @@ components: properties: event: type: string - enum: ["thread.run.queued"] + enum: [ "thread.run.queued" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13283,7 +13282,7 @@ components: properties: event: type: string - enum: ["thread.run.in_progress"] + enum: [ "thread.run.in_progress" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13296,7 +13295,7 @@ components: properties: event: type: string - enum: ["thread.run.requires_action"] + enum: [ "thread.run.requires_action" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13309,7 +13308,7 @@ components: properties: event: type: string - enum: ["thread.run.completed"] + enum: [ "thread.run.completed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13335,7 +13334,7 @@ components: properties: event: type: string - enum: ["thread.run.failed"] + enum: [ "thread.run.failed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13348,7 +13347,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelling"] + enum: [ "thread.run.cancelling" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13361,7 +13360,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelled"] + enum: [ "thread.run.cancelled" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13374,7 +13373,7 @@ components: properties: event: type: string - enum: ["thread.run.expired"] + enum: [ "thread.run.expired" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13390,7 +13389,7 @@ components: properties: event: type: string - enum: ["thread.run.step.created"] + enum: [ "thread.run.step.created" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13403,7 +13402,7 @@ components: properties: event: type: string - enum: ["thread.run.step.in_progress"] + enum: [ "thread.run.step.in_progress" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13416,7 +13415,7 @@ components: properties: event: type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] data: $ref: "#/components/schemas/RunStepDeltaObject" required: @@ -13429,7 +13428,7 @@ components: properties: event: type: string - enum: ["thread.run.step.completed"] + enum: [ "thread.run.step.completed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13442,7 +13441,7 @@ components: properties: event: type: string - enum: ["thread.run.step.failed"] + enum: [ "thread.run.step.failed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13455,7 +13454,7 @@ components: properties: event: type: string - enum: ["thread.run.step.cancelled"] + enum: [ "thread.run.step.cancelled" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13468,7 +13467,7 @@ components: properties: event: type: string - enum: ["thread.run.step.expired"] + enum: [ "thread.run.step.expired" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13484,7 +13483,7 @@ components: properties: event: type: string - enum: ["thread.message.created"] + enum: [ "thread.message.created" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13497,7 +13496,7 @@ components: properties: event: type: string - enum: ["thread.message.in_progress"] + enum: [ "thread.message.in_progress" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13510,7 +13509,7 @@ components: properties: event: type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] data: $ref: "#/components/schemas/MessageDeltaObject" required: @@ -13523,7 +13522,7 @@ components: properties: event: type: string - enum: ["thread.message.completed"] + enum: [ "thread.message.completed" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13536,7 +13535,7 @@ components: properties: event: type: string - enum: ["thread.message.incomplete"] + enum: [ "thread.message.incomplete" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13551,7 +13550,7 @@ components: properties: event: type: string - enum: ["error"] + enum: [ "error" ] data: $ref: "#/components/schemas/Error" required: @@ -13566,10 +13565,10 @@ components: properties: event: type: string - enum: ["done"] + enum: [ "done" ] data: type: string - enum: ["[DONE]"] + enum: [ "[DONE]" ] required: - event - data @@ -13584,7 +13583,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: type: string @@ -13709,7 +13708,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -13717,7 +13716,7 @@ components: x-oaiMeta: name: The request input object example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} BatchRequestOutput: type: object @@ -13756,7 +13755,7 @@ components: x-oaiMeta: name: The request output object example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-3.5-turbo", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} ListBatchesResponse: type: object @@ -13775,14 +13774,14 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data - has_more security: - - ApiKeyAuth: [] + - ApiKeyAuth: [ ] x-oaiMeta: navigationGroups: @@ -13790,6 +13789,8 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants + - id: administration + title: Administration - id: legacy title: Legacy groups: @@ -14038,6 +14039,8 @@ x-oaiMeta: - type: object key: CreateModerationResponse path: object + + - id: assistants title: Assistants beta: true @@ -14265,6 +14268,175 @@ x-oaiMeta: - type: object key: AssistantStreamEvent path: events + + - id: administration + title: Overview + description: | + Programmatically manage your organization. + + The Audit Logs endpoint provides a log of all actions taken in the + organization for security and monitoring purposes. + + To access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints. + + For best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization) + navigationGroup: administration + + - id: invite + title: Invites + description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-invites + path: list + - type: endpoint + key: inviteUser + path: create + - type: endpoint + key: retrieve-invite + path: retrieve + - type: endpoint + key: delete-invite + path: delete + - type: object + key: Invite + path: object + + - id: users + title: Users + description: | + Manage users and their role in an organization. Users will be automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-users + path: list + - type: endpoint + key: modify-user + path: modify + - type: endpoint + key: retrieve-user + path: retrieve + - type: endpoint + key: delete-user + path: delete + - type: object + key: User + path: object + + - id: projects + title: Projects + description: | + Manage the projects within an orgnanization includes creation, updating, and archiving or projects. + The Default project cannot be modified or archived. + navigationGroup: administration + sections: + - type: endpoint + key: list-projects + path: list + - type: endpoint + key: create-project + path: create + - type: endpoint + key: retrieve-project + path: retrieve + - type: endpoint + key: modify-project + path: modify + - type: endpoint + key: archive-project + path: archive + - type: object + key: Project + path: object + + - id: project-users + title: Project Users + description: | + Manage users within a project, including adding, updating roles, and removing users. + Users cannot be removed from the Default project, unless they are being removed from the organization. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-users + path: list + - type: endpoint + key: create-project-user + path: creeate + - type: endpoint + key: retrieve-project-user + path: retrieve + - type: endpoint + key: modify-project-user + path: modify + - type: endpoint + key: delete-project-user + path: delete + - type: object + key: ProjectUser + path: object + + - id: project-service-accounts + title: Project Service Accounts + description: | + Manage service accounts within a project. A service account is a bot user that is not associated with a user. + If a user leaves an organization, their keys and membership in projects will no longer work. Service accounts + do not have this limitation. However, service accounts can also be deleted from a project. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-service-accounts + path: list + - type: endpoint + key: create-project-service-account + path: create + - type: endpoint + key: retrieve-project-service-account + path: retrieve + - type: endpoint + key: delete-project-service-account + path: delete + - type: object + key: ProjectServiceAccount + path: object + + - id: project-api-keys + title: Project API Keys + description: | + Manage API keys for a given project. Supports listing and deleting keys for users. + This API does not allow issuing keys for users, as users need to authorize themselves to generate keys. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-api-keys + path: list + - type: endpoint + key: retrieve-project-api-key + path: retrieve + - type: endpoint + key: delete-project-api-key + path: delete + - type: object + key: ProjectApiKey + path: object + + - id: audit-logs + title: Audit Logs + description: | + Logs of user actions and configuration changes within this organization. + + To log events, you must activate logging in the [Organization Settings](/settings/organization/general). + Once activated, for security reasons, logging cannot be deactivated. + navigationGroup: administration + sections: + - type: endpoint + key: list-audit-logs + path: list + - type: object + key: AuditLog + path: object + - id: completions title: Completions legacy: true From 7205932146f0baae76a27bf0faeeea5ad374e9c7 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 16 Aug 2024 15:51:17 +0200 Subject: [PATCH 210/251] build: Upgrade pubspec.lock files (#523) https://github.com/dart-lang/build/issues/3733#issuecomment-2272082820 --- .gitignore | 1 + examples/browser_summarizer/pubspec.lock | 52 ++++++++--------- .../browser_summarizer/pubspec_overrides.yaml | 2 +- examples/docs_examples/pubspec.lock | 40 ++++++------- examples/docs_examples/pubspec_overrides.yaml | 2 +- examples/hello_world_backend/pubspec.lock | 16 +++--- .../pubspec_overrides.yaml | 2 +- examples/hello_world_cli/pubspec.lock | 20 +++---- .../hello_world_cli/pubspec_overrides.yaml | 2 +- examples/hello_world_flutter/pubspec.lock | 28 +++++----- .../pubspec_overrides.yaml | 2 +- .../pubspec.lock | 24 ++++---- examples/wikivoyage_eu/pubspec.lock | 24 ++++---- examples/wikivoyage_eu/pubspec_overrides.yaml | 2 +- packages/anthropic_sdk_dart/pubspec.lock | 56 +++++++++---------- .../langchain_chroma/pubspec_overrides.yaml | 2 +- .../langchain_firebase/example/pubspec.lock | 44 +++++++-------- .../example/pubspec_overrides.yaml | 2 +- packages/langchain_firebase/pubspec.lock | 44 +++++++-------- .../langchain_google/pubspec_overrides.yaml | 2 +- .../pubspec_overrides.yaml | 2 +- .../langchain_ollama/pubspec_overrides.yaml | 2 +- .../langchain_openai/pubspec_overrides.yaml | 2 +- .../langchain_pinecone/pubspec_overrides.yaml | 2 +- .../langchain_supabase/pubspec_overrides.yaml | 3 +- 25 files changed, 185 insertions(+), 193 deletions(-) diff --git a/.gitignore b/.gitignore index dd509d78..cf493593 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ .dart_tool/ /pubspec.lock .vscode/ +.aider* diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 21488e9b..fe72f39c 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" + sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" url: "https://pub.dev" source: hosted - version: "8.1.2" + version: "8.1.4" characters: dependency: transitive description: @@ -109,10 +109,10 @@ packages: dependency: transitive description: name: ffi - sha256: "7bf0adc28a23d395f19f3f1eb21dd7cfd1dd9f8e1c50051c069122e6853bc878" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.1.3" file: dependency: transitive description: @@ -267,10 +267,10 @@ packages: dependency: transitive description: name: markdown - sha256: acf35edccc0463a9d7384e437c015a3535772e09714cf60e07eeef3a15870dcd + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 url: "https://pub.dev" source: hosted - version: "7.1.1" + version: "7.2.2" material_color_utilities: dependency: transitive description: @@ -354,10 +354,10 @@ packages: dependency: transitive description: name: path_provider_windows - sha256: "8bc9f22eee8690981c22aa7fc602f5c85b497a6fb2ceb35ee5a5e5ed85ad8170" + sha256: bd6f00dbd873bfb70d0761682da2b3a2c2fccc2b9e84c495821639601d81afe7 url: "https://pub.dev" source: hosted - version: "2.2.1" + version: "2.3.0" petitparser: dependency: transitive description: @@ -370,10 +370,10 @@ packages: dependency: transitive description: name: platform - sha256: "12220bb4b65720483f8fa9450b4332347737cf8213dd2840d8b2c823e47243ec" + sha256: "9b71283fc13df574056616011fb138fd3b793ea47cc509c189a6c3fa5f8a1a65" url: "https://pub.dev" source: hosted - version: "3.1.4" + version: "3.1.5" plugin_platform_interface: dependency: transitive description: @@ -386,10 +386,10 @@ packages: dependency: transitive description: name: provider - sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c url: "https://pub.dev" source: hosted - version: "6.1.1" + version: "6.1.2" rfc_6901: dependency: transitive description: @@ -402,10 +402,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" shared_preferences: dependency: "direct main" description: @@ -426,10 +426,10 @@ packages: dependency: transitive description: name: shared_preferences_foundation - sha256: "776786cff96324851b656777648f36ac772d88bc4c669acff97b7fce5de3c849" + sha256: c4b35f6cb8f63c147312c054ce7c2254c8066745125264f0c88739c417fc9d9f url: "https://pub.dev" source: hosted - version: "2.5.1" + version: "2.5.2" shared_preferences_linux: dependency: transitive description: @@ -487,10 +487,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" tavily_dart: dependency: "direct overridden" description: @@ -534,18 +534,10 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" - url: "https://pub.dev" - source: hosted - version: "0.5.1" - win32: - dependency: transitive - description: - name: win32 - sha256: "464f5674532865248444b4c3daca12bd9bf2d7c47f759ce2617986e7229494a8" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "5.2.0" + version: "1.0.0" xdg_directories: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec_overrides.yaml b/examples/browser_summarizer/pubspec_overrides.yaml index 49be75a7..808fbc3a 100644 --- a/examples/browser_summarizer/pubspec_overrides.yaml +++ b/examples/browser_summarizer/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 8aaa5dcd..caa950ab 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" anthropic_sdk_dart: dependency: "direct overridden" description: @@ -20,10 +20,10 @@ packages: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -59,10 +59,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: @@ -115,10 +115,10 @@ packages: dependency: transitive description: name: ffi - sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.3" fixnum: dependency: transitive description: @@ -163,10 +163,10 @@ packages: dependency: transitive description: name: google_identity_services_web - sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.1+1" + version: "0.3.1+4" googleapis: dependency: transitive description: @@ -203,10 +203,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" iregexp: dependency: transitive description: @@ -322,10 +322,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" mistralai_dart: dependency: "direct overridden" description: @@ -391,10 +391,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -415,10 +415,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" tavily_dart: dependency: "direct overridden" description: @@ -469,9 +469,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index 1c756a5e..4060d466 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart,anthropic_sdk_dart,langchain_anthropic +# melos_managed_dependency_overrides: anthropic_sdk_dart,chromadb,langchain,langchain_anthropic,langchain_chroma,langchain_community,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,tavily_dart,vertex_ai dependency_overrides: anthropic_sdk_dart: path: ../../packages/anthropic_sdk_dart diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 97cf3b4b..b2934b90 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" openai_dart: dependency: "direct overridden" description: @@ -161,10 +161,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" shelf: dependency: "direct main" description: @@ -217,10 +217,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -249,9 +249,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_backend/pubspec_overrides.yaml b/examples/hello_world_backend/pubspec_overrides.yaml index 93b5421a..a52f79af 100644 --- a/examples/hello_world_backend/pubspec_overrides.yaml +++ b/examples/hello_world_backend/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 06a4acdb..40613637 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" json_annotation: dependency: transitive description: @@ -130,10 +130,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" openai_dart: dependency: "direct overridden" description: @@ -153,10 +153,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -177,10 +177,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -209,9 +209,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_cli/pubspec_overrides.yaml b/examples/hello_world_cli/pubspec_overrides.yaml index 93b5421a..a52f79af 100644 --- a/examples/hello_world_cli/pubspec_overrides.yaml +++ b/examples/hello_world_cli/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 94a94e96..02e61985 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" args: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" + sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" url: "https://pub.dev" source: hosted - version: "8.1.2" + version: "8.1.4" characters: dependency: transitive description: @@ -146,10 +146,10 @@ packages: dependency: transitive description: name: google_identity_services_web - sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.1+1" + version: "0.3.1+4" googleapis: dependency: transitive description: @@ -305,10 +305,10 @@ packages: dependency: transitive description: name: provider - sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c url: "https://pub.dev" source: hosted - version: "6.1.1" + version: "6.1.2" retry: dependency: transitive description: @@ -321,10 +321,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter @@ -350,10 +350,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -397,10 +397,10 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec_overrides.yaml b/examples/hello_world_flutter/pubspec_overrides.yaml index d5192892..5c8d37f9 100644 --- a/examples/hello_world_flutter/pubspec_overrides.yaml +++ b/examples/hello_world_flutter/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,mistralai_dart,ollama_dart,vertex_ai +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index 653b4e81..c4bd2136 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -5,18 +5,18 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" args: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" crypto: dependency: transitive description: @@ -85,18 +85,18 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" meta: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" path: dependency: transitive description: @@ -125,10 +125,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index da1458be..2e50c6c0 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: ffi - sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.3" fixnum: dependency: transitive description: @@ -133,10 +133,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" iregexp: dependency: transitive description: @@ -264,10 +264,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -288,10 +288,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" tavily_dart: dependency: "direct overridden" description: @@ -335,9 +335,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml index 6f7e46d1..5814891d 100644 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_ollama,ollama_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock index ddbf9394..6e950ea1 100644 --- a/packages/anthropic_sdk_dart/pubspec.lock +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _fe_analyzer_shared - sha256: f256b0c0ba6c7577c15e2e4e114755640a875e885099367bf6e012b19314c834 + sha256: "45cfa8471b89fb6643fe9bf51bd7931a76b8f5ec2d65de4fb176dba8d4f22c77" url: "https://pub.dev" source: hosted - version: "72.0.0" + version: "73.0.0" _macros: dependency: transitive description: dart @@ -18,10 +18,10 @@ packages: dependency: transitive description: name: analyzer - sha256: b652861553cd3990d8ed361f7979dc6d7053a9ac8843fa73820ab68ce5410139 + sha256: "4959fec185fe70cce007c57e9ab6983101dbe593d2bf8bbfb4453aaec0cf470a" url: "https://pub.dev" source: hosted - version: "6.7.0" + version: "6.8.0" args: dependency: transitive description: @@ -90,10 +90,10 @@ packages: dependency: transitive description: name: build_runner_core - sha256: "4ae8ffe5ac758da294ecf1802f2aff01558d8b1b00616aa7538ea9a8a5d50799" + sha256: f8126682b87a7282a339b871298cc12009cb67109cfa1614d6436fb0289193e0 url: "https://pub.dev" source: hosted - version: "7.3.0" + version: "7.3.2" built_collection: dependency: transitive description: @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" convert: dependency: transitive description: @@ -154,10 +154,10 @@ packages: dependency: transitive description: name: coverage - sha256: "3945034e86ea203af7a056d98e98e42a5518fff200d6e8e6647e1886b07e936e" + sha256: "576aaab8b1abdd452e0f656c3e73da9ead9d7880e15bdc494189d9c1a1baf0db" url: "https://pub.dev" source: hosted - version: "1.8.0" + version: "1.9.0" crypto: dependency: transitive description: @@ -242,10 +242,10 @@ packages: dependency: transitive description: name: graphs - sha256: aedc5a15e78fc65a6e23bcd927f24c64dd995062bcd1ca6eda65a3cff92a4d19 + sha256: "741bbf84165310a68ff28fe9e727332eef1407342fca52759cb21ad8177bb8d0" url: "https://pub.dev" source: hosted - version: "2.3.1" + version: "2.3.2" http: dependency: "direct main" description: @@ -266,10 +266,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" intl: dependency: transitive description: @@ -403,10 +403,10 @@ packages: dependency: transitive description: name: pubspec_parse - sha256: c63b2876e58e194e4b0828fcb080ad0e06d051cb607a6be51a9e084f47cb9367 + sha256: c799b721d79eb6ee6fa56f00c04b472dcd44a30d258fac2174a6ec57302678f8 url: "https://pub.dev" source: hosted - version: "1.2.3" + version: "1.3.0" recase: dependency: transitive description: @@ -419,10 +419,10 @@ packages: dependency: transitive description: name: shelf - sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 url: "https://pub.dev" source: hosted - version: "1.4.1" + version: "1.4.2" shelf_packages_handler: dependency: transitive description: @@ -515,10 +515,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -571,10 +571,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "7475cb4dd713d57b6f7464c0e13f06da0d535d8b2067e188962a59bac2cf280b" + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc url: "https://pub.dev" source: hosted - version: "14.2.2" + version: "14.2.4" watcher: dependency: transitive description: @@ -587,26 +587,26 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" web_socket: dependency: transitive description: name: web_socket - sha256: "217f49b5213796cb508d6a942a5dc604ce1cb6a0a6b3d8cb3f0c314f0ecea712" + sha256: "3c12d96c0c9a4eec095246debcea7b86c0324f22df69893d538fcc6f1b8cce83" url: "https://pub.dev" source: hosted - version: "0.1.4" + version: "0.1.6" web_socket_channel: dependency: transitive description: name: web_socket_channel - sha256: a2d56211ee4d35d9b344d9d4ce60f362e4f5d1aafb988302906bd732bc731276 + sha256: "9f187088ed104edd8662ca07af4b124465893caf063ba29758f97af57e61da8f" url: "https://pub.dev" source: hosted - version: "3.0.0" + version: "3.0.1" webkit_inspection_protocol: dependency: transitive description: diff --git a/packages/langchain_chroma/pubspec_overrides.yaml b/packages/langchain_chroma/pubspec_overrides.yaml index 3470527c..d53c4efe 100644 --- a/packages/langchain_chroma/pubspec_overrides.yaml +++ b/packages/langchain_chroma/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain,tavily_dart +# melos_managed_dependency_overrides: chromadb,langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: chromadb: path: ../chromadb diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 1896af9c..b1d7e459 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" + sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa url: "https://pub.dev" source: hosted - version: "1.3.37" + version: "1.3.40" args: dependency: transitive description: @@ -117,50 +117,50 @@ packages: dependency: transitive description: name: firebase_app_check - sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" + sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" url: "https://pub.dev" source: hosted - version: "0.3.0+1" + version: "0.3.0+4" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" + sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 url: "https://pub.dev" source: hosted - version: "0.1.0+31" + version: "0.1.0+34" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a + sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" url: "https://pub.dev" source: hosted - version: "0.1.2+9" + version: "0.1.2+12" firebase_auth: dependency: transitive description: name: firebase_auth - sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.1.4" firebase_auth_platform_interface: dependency: transitive description: name: firebase_auth_platform_interface - sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" url: "https://pub.dev" source: hosted - version: "7.4.0" + version: "7.4.3" firebase_auth_web: dependency: transitive description: name: firebase_auth_web - sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" url: "https://pub.dev" source: hosted - version: "5.12.2" + version: "5.12.5" firebase_core: dependency: "direct main" description: @@ -189,10 +189,10 @@ packages: dependency: transitive description: name: firebase_vertexai - sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d + sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 url: "https://pub.dev" source: hosted - version: "0.2.2" + version: "0.2.2+4" fixnum: dependency: transitive description: @@ -236,18 +236,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -361,10 +361,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter diff --git a/packages/langchain_firebase/example/pubspec_overrides.yaml b/packages/langchain_firebase/example/pubspec_overrides.yaml index 35cb2da2..fb671352 100644 --- a/packages/langchain_firebase/example/pubspec_overrides.yaml +++ b/packages/langchain_firebase/example/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_firebase,langchain +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_firebase dependency_overrides: langchain: path: ../../langchain diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 6c5ffbb4..593dfe9c 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" + sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa url: "https://pub.dev" source: hosted - version: "1.3.37" + version: "1.3.40" async: dependency: transitive description: @@ -101,50 +101,50 @@ packages: dependency: "direct main" description: name: firebase_app_check - sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" + sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" url: "https://pub.dev" source: hosted - version: "0.3.0+1" + version: "0.3.0+4" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" + sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 url: "https://pub.dev" source: hosted - version: "0.1.0+31" + version: "0.1.0+34" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a + sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" url: "https://pub.dev" source: hosted - version: "0.1.2+9" + version: "0.1.2+12" firebase_auth: dependency: "direct main" description: name: firebase_auth - sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.1.4" firebase_auth_platform_interface: dependency: transitive description: name: firebase_auth_platform_interface - sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" url: "https://pub.dev" source: hosted - version: "7.4.0" + version: "7.4.3" firebase_auth_web: dependency: transitive description: name: firebase_auth_web - sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" url: "https://pub.dev" source: hosted - version: "5.12.2" + version: "5.12.5" firebase_core: dependency: "direct main" description: @@ -173,10 +173,10 @@ packages: dependency: "direct main" description: name: firebase_vertexai - sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d + sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 url: "https://pub.dev" source: hosted - version: "0.2.2" + version: "0.2.2+4" fixnum: dependency: transitive description: @@ -204,18 +204,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -299,10 +299,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter diff --git a/packages/langchain_google/pubspec_overrides.yaml b/packages/langchain_google/pubspec_overrides.yaml index 1fabfd3c..9844d8a9 100644 --- a/packages/langchain_google/pubspec_overrides.yaml +++ b/packages/langchain_google/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: vertex_ai,langchain_core +# melos_managed_dependency_overrides: langchain_core,vertex_ai dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_mistralai/pubspec_overrides.yaml b/packages/langchain_mistralai/pubspec_overrides.yaml index 4a44a89b..0bb3e94e 100644 --- a/packages/langchain_mistralai/pubspec_overrides.yaml +++ b/packages/langchain_mistralai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: mistralai_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,mistralai_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_ollama/pubspec_overrides.yaml b/packages/langchain_ollama/pubspec_overrides.yaml index 9090f50e..1cab36be 100644 --- a/packages/langchain_ollama/pubspec_overrides.yaml +++ b/packages/langchain_ollama/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: ollama_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,ollama_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_openai/pubspec_overrides.yaml b/packages/langchain_openai/pubspec_overrides.yaml index d4293e4f..92ad1620 100644 --- a/packages/langchain_openai/pubspec_overrides.yaml +++ b/packages/langchain_openai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,openai_dart,tavily_dart dependency_overrides: langchain: path: ../langchain diff --git a/packages/langchain_pinecone/pubspec_overrides.yaml b/packages/langchain_pinecone/pubspec_overrides.yaml index 8dd8d545..de62cfcc 100644 --- a/packages/langchain_pinecone/pubspec_overrides.yaml +++ b/packages/langchain_pinecone/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_supabase/pubspec_overrides.yaml b/packages/langchain_supabase/pubspec_overrides.yaml index d5cb8df4..b03ffbc5 100644 --- a/packages/langchain_supabase/pubspec_overrides.yaml +++ b/packages/langchain_supabase/pubspec_overrides.yaml @@ -1,5 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart -# melos_managed_dependency_overrides: langchain +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain: path: ../langchain From 5542577c2f86dd96e612d990e46b5154e9c5cf6b Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 17 Aug 2024 11:07:28 +0200 Subject: [PATCH 211/251] feat: Add support for Structured Outputs in openai_dart (#525) --- packages/openai_dart/README.md | 51 +- .../generated/schema/assistant_object.dart | 27 +- .../src/generated/schema/assistant_tools.dart | 2 +- .../schema/assistants_response_format.dart | 53 - .../schema/chat_completion_logprobs.dart | 2 +- .../schema/chat_completion_message.dart | 5 +- .../chat_completion_message_content_part.dart | 21 +- ..._completion_message_content_part_type.dart | 2 + ...hat_completion_stream_response_choice.dart | 2 +- ...chat_completion_stream_response_delta.dart | 5 + .../schema/create_assistant_request.dart | 26 +- .../create_chat_completion_request.dart | 59 +- .../generated/schema/create_run_request.dart | 24 +- .../schema/create_thread_and_run_request.dart | 26 +- .../src/generated/schema/function_object.dart | 13 +- .../generated/schema/json_schema_object.dart | 62 + .../src/generated/schema/message_content.dart | 15 + .../schema/message_delta_content.dart | 42 + ...essage_delta_content_image_url_object.dart | 51 - .../schema/modify_assistant_request.dart | 26 +- .../src/generated/schema/response_format.dart | 71 + .../schema/response_format_type.dart | 19 + .../lib/src/generated/schema/run_object.dart | 25 +- .../lib/src/generated/schema/schema.dart | 5 +- .../src/generated/schema/schema.freezed.dart | 4091 ++++++++++++----- .../lib/src/generated/schema/schema.g.dart | 406 +- packages/openai_dart/oas/main.dart | 8 + packages/openai_dart/oas/openapi_curated.yaml | 239 +- .../openai_dart/oas/openapi_official.yaml | 244 +- .../test/openai_client_chat_test.dart | 59 +- 30 files changed, 3911 insertions(+), 1770 deletions(-) delete mode 100644 packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/json_schema_object.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/response_format.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/response_format_type.dart diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 2c003f8d..df9cc58b 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -165,7 +165,7 @@ await for (final res in stream) { // 789 ``` -**Multi-modal prompt:** +**Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision)) ```dart final res = await client.createChatCompletion( @@ -198,7 +198,7 @@ print(res.choices.first.message.content); // The fruit in the image is an apple. ``` -**JSON mode:** +**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) ```dart final res = await client.createChatCompletion( @@ -227,7 +227,52 @@ final res = await client.createChatCompletion( // { "names": ["John", "Mary", "Peter"] } ``` -**Tools:** +**Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))** + +```dart +final res = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, + ), + messages: [ + ChatCompletionMessage.system( + content: + 'You are a helpful assistant. That extracts names from text.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), + ), + ), +); +// {"names":["John","Mary","Peter"]} +``` + +**Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling)) ```dart const function = FunctionObject( diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index 5784d1ed..f0e1f3b5 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -60,9 +60,10 @@ class AssistantObject with _$AssistantObject { /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -177,8 +178,6 @@ enum AssistantObjectObject { /// `auto` is the default value enum AssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -187,9 +186,10 @@ enum AssistantResponseFormatMode { // CLASS: AssistantObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -209,14 +209,14 @@ sealed class AssistantObjectResponseFormat const AssistantObjectResponseFormat._(); /// `auto` is the default value - const factory AssistantObjectResponseFormat.enumeration( + const factory AssistantObjectResponseFormat.mode( AssistantResponseFormatMode value, ) = AssistantObjectResponseFormatEnumeration; /// No Description - const factory AssistantObjectResponseFormat.assistantsResponseFormat( - AssistantsResponseFormat value, - ) = AssistantObjectResponseFormatAssistantsResponseFormat; + const factory AssistantObjectResponseFormat.responseFormat( + ResponseFormat value, + ) = AssistantObjectResponseFormatResponseFormat; /// Object construction from a JSON representation factory AssistantObjectResponseFormat.fromJson(Map json) => @@ -243,8 +243,8 @@ class _AssistantObjectResponseFormatConverter } if (data is Map) { try { - return AssistantObjectResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return AssistantObjectResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -258,8 +258,7 @@ class _AssistantObjectResponseFormatConverter return switch (data) { AssistantObjectResponseFormatEnumeration(value: final v) => _$AssistantResponseFormatModeEnumMap[v]!, - AssistantObjectResponseFormatAssistantsResponseFormat(value: final v) => - v.toJson(), + AssistantObjectResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 5c0c2c47..e36cd8e6 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -84,7 +84,7 @@ class AssistantToolsFileSearchFileSearch /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, }) = _AssistantToolsFileSearchFileSearch; diff --git a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart b/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart deleted file mode 100644 index bc5f9c8b..00000000 --- a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart +++ /dev/null @@ -1,53 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: AssistantsResponseFormat -// ========================================== - -/// An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. -@freezed -class AssistantsResponseFormat with _$AssistantsResponseFormat { - const AssistantsResponseFormat._(); - - /// Factory constructor for AssistantsResponseFormat - const factory AssistantsResponseFormat({ - /// Must be one of `text` or `json_object`. - @Default(AssistantsResponseFormatType.text) - AssistantsResponseFormatType type, - }) = _AssistantsResponseFormat; - - /// Object construction from a JSON representation - factory AssistantsResponseFormat.fromJson(Map json) => - _$AssistantsResponseFormatFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - }; - } -} - -// ========================================== -// ENUM: AssistantsResponseFormatType -// ========================================== - -/// Must be one of `text` or `json_object`. -enum AssistantsResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, -} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart index 8678903a..36c84a12 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart @@ -16,7 +16,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { /// Factory constructor for ChatCompletionLogprobs const factory ChatCompletionLogprobs({ /// A list of message content tokens with log probability information. - required List? content, + @JsonKey(includeIfNull: false) List? content, }) = _ChatCompletionLogprobs; /// Object construction from a JSON representation diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index e546e524..93afcd9b 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -59,6 +59,9 @@ sealed class ChatCompletionMessage with _$ChatCompletionMessage { /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. @JsonKey(includeIfNull: false) String? content, + /// The refusal message by the assistant. + @JsonKey(includeIfNull: false) String? refusal, + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? name, @@ -140,7 +143,7 @@ sealed class ChatCompletionUserMessageContent List value, ) = ChatCompletionMessageContentParts; - /// The text contents of the message. + /// The text contents of the user message. const factory ChatCompletionUserMessageContent.string( String value, ) = ChatCompletionUserMessageContentString; diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart index e96bf346..6e38e239 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart @@ -18,7 +18,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartText // ------------------------------------------ - /// A text content part of a user message. + /// A text content part of a message. const factory ChatCompletionMessageContentPart.text({ /// The type of the content part, in this case `text`. @Default(ChatCompletionMessageContentPartType.text) @@ -32,8 +32,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartImage // ------------------------------------------ - /// Union constructor for [ChatCompletionMessageContentPartImage] - @FreezedUnionValue('image_url') + /// An image content part of a user message. const factory ChatCompletionMessageContentPart.image({ /// The type of the content part, in this case `image_url`. @Default(ChatCompletionMessageContentPartType.imageUrl) @@ -43,6 +42,20 @@ sealed class ChatCompletionMessageContentPart @JsonKey(name: 'image_url') required ChatCompletionMessageImageUrl imageUrl, }) = ChatCompletionMessageContentPartImage; + // ------------------------------------------ + // UNION: ChatCompletionMessageContentPartRefusal + // ------------------------------------------ + + /// A refusal content part of a message. + const factory ChatCompletionMessageContentPart.refusal({ + /// The type of the content part, in this case `refusal`. + @Default(ChatCompletionMessageContentPartType.refusal) + ChatCompletionMessageContentPartType type, + + /// The refusal message generated by the model. + required String refusal, + }) = ChatCompletionMessageContentPartRefusal; + /// Object construction from a JSON representation factory ChatCompletionMessageContentPart.fromJson( Map json) => @@ -58,6 +71,8 @@ enum ChatCompletionMessageContentPartEnumType { text, @JsonValue('image_url') imageUrl, + @JsonValue('refusal') + refusal, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart index 0b4409fb..1aeebe14 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart @@ -14,4 +14,6 @@ enum ChatCompletionMessageContentPartType { text, @JsonValue('image_url') imageUrl, + @JsonValue('refusal') + refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart index 1b4f0705..39a46139 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -79,7 +79,7 @@ class ChatCompletionStreamResponseChoiceLogprobs /// Factory constructor for ChatCompletionStreamResponseChoiceLogprobs const factory ChatCompletionStreamResponseChoiceLogprobs({ /// A list of message content tokens with log probability information. - required List? content, + @JsonKey(includeIfNull: false) List? content, }) = _ChatCompletionStreamResponseChoiceLogprobs; /// Object construction from a JSON representation diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart index e676c18c..5cc5fa0d 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart @@ -19,6 +19,9 @@ class ChatCompletionStreamResponseDelta /// The contents of the chunk message. @JsonKey(includeIfNull: false) String? content, + /// The refusal message generated by the model. + @JsonKey(includeIfNull: false) String? refusal, + /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @@ -43,6 +46,7 @@ class ChatCompletionStreamResponseDelta /// List of all property names of schema static const List propertyNames = [ 'content', + 'refusal', 'function_call', 'tool_calls', 'role' @@ -57,6 +61,7 @@ class ChatCompletionStreamResponseDelta Map toMap() { return { 'content': content, + 'refusal': refusal, 'function_call': functionCall, 'tool_calls': toolCalls, 'role': role, diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 0e849a85..f078f394 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -51,9 +51,10 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -263,8 +264,6 @@ class _AssistantModelConverter /// `auto` is the default value enum CreateAssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -273,9 +272,10 @@ enum CreateAssistantResponseFormatMode { // CLASS: CreateAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -300,9 +300,9 @@ sealed class CreateAssistantRequestResponseFormat ) = CreateAssistantRequestResponseFormatEnumeration; /// No Description - const factory CreateAssistantRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateAssistantRequestResponseFormatAssistantsResponseFormat; + const factory CreateAssistantRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateAssistantRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateAssistantRequestResponseFormat.fromJson( @@ -332,8 +332,8 @@ class _CreateAssistantRequestResponseFormatConverter } if (data is Map) { try { - return CreateAssistantRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateAssistantRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -347,9 +347,7 @@ class _CreateAssistantRequestResponseFormatConverter return switch (data) { CreateAssistantRequestResponseFormatEnumeration(value: final v) => _$CreateAssistantResponseFormatModeEnumMap[v]!, - CreateAssistantRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + CreateAssistantRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index fd24189a..8b6c5c52 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -55,13 +55,16 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? presencePenalty, - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -380,46 +383,6 @@ class _ChatCompletionModelConverter } } -// ========================================== -// CLASS: ChatCompletionResponseFormat -// ========================================== - -/// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. -/// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. -@freezed -class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { - const ChatCompletionResponseFormat._(); - - /// Factory constructor for ChatCompletionResponseFormat - const factory ChatCompletionResponseFormat({ - /// Must be one of `text` or `json_object`. - @Default(ChatCompletionResponseFormatType.text) - ChatCompletionResponseFormatType type, - }) = _ChatCompletionResponseFormat; - - /// Object construction from a JSON representation - factory ChatCompletionResponseFormat.fromJson(Map json) => - _$ChatCompletionResponseFormatFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - }; - } -} - // ========================================== // ENUM: CreateChatCompletionRequestServiceTier // ========================================== @@ -669,15 +632,3 @@ class _ChatCompletionFunctionCallConverter }; } } - -// ========================================== -// ENUM: ChatCompletionResponseFormatType -// ========================================== - -/// Must be one of `text` or `json_object`. -enum ChatCompletionResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, -} diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 4c13ec8f..485869d0 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -79,9 +79,10 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -390,8 +391,6 @@ class _CreateRunRequestToolChoiceConverter /// `auto` is the default value enum CreateRunRequestResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -400,9 +399,10 @@ enum CreateRunRequestResponseFormatMode { // CLASS: CreateRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -427,9 +427,9 @@ sealed class CreateRunRequestResponseFormat ) = CreateRunRequestResponseFormatEnumeration; /// No Description - const factory CreateRunRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateRunRequestResponseFormatAssistantsResponseFormat; + const factory CreateRunRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateRunRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateRunRequestResponseFormat.fromJson(Map json) => @@ -458,8 +458,8 @@ class _CreateRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateRunRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateRunRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -473,7 +473,7 @@ class _CreateRunRequestResponseFormatConverter return switch (data) { CreateRunRequestResponseFormatEnumeration(value: final v) => _$CreateRunRequestResponseFormatModeEnumMap[v]!, - CreateRunRequestResponseFormatAssistantsResponseFormat(value: final v) => + CreateRunRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index e69a2060..e52be3e1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -78,9 +78,10 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -396,8 +397,6 @@ class _CreateThreadAndRunRequestToolChoiceConverter /// `auto` is the default value enum CreateThreadAndRunRequestResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -406,9 +405,10 @@ enum CreateThreadAndRunRequestResponseFormatMode { // CLASS: CreateThreadAndRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -433,9 +433,9 @@ sealed class CreateThreadAndRunRequestResponseFormat ) = CreateThreadAndRunRequestResponseFormatEnumeration; /// No Description - const factory CreateThreadAndRunRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat; + const factory CreateThreadAndRunRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateThreadAndRunRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateThreadAndRunRequestResponseFormat.fromJson( @@ -467,8 +467,8 @@ class _CreateThreadAndRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateThreadAndRunRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -482,9 +482,7 @@ class _CreateThreadAndRunRequestResponseFormatConverter return switch (data) { CreateThreadAndRunRequestResponseFormatEnumeration(value: final v) => _$CreateThreadAndRunRequestResponseFormatModeEnumMap[v]!, - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + CreateThreadAndRunRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 647b4e0a..62426de3 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -15,7 +15,8 @@ class FunctionObject with _$FunctionObject { /// Factory constructor for FunctionObject const factory FunctionObject({ - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. required String name, /// A description of what the function does, used by the model to choose when and how to call the function. @@ -25,6 +26,12 @@ class FunctionObject with _$FunctionObject { /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, + + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @JsonKey(includeIfNull: false) @Default(false) bool? strict, }) = _FunctionObject; /// Object construction from a JSON representation @@ -35,7 +42,8 @@ class FunctionObject with _$FunctionObject { static const List propertyNames = [ 'name', 'description', - 'parameters' + 'parameters', + 'strict' ]; /// Perform validations on the schema property values @@ -49,6 +57,7 @@ class FunctionObject with _$FunctionObject { 'name': name, 'description': description, 'parameters': parameters, + 'strict': strict, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart new file mode 100644 index 00000000..32f20701 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: JsonSchemaObject +// ========================================== + +/// A JSON Schema object. +@freezed +class JsonSchemaObject with _$JsonSchemaObject { + const JsonSchemaObject._(); + + /// Factory constructor for JsonSchemaObject + const factory JsonSchemaObject({ + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + required String name, + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @JsonKey(includeIfNull: false) String? description, + + /// The schema for the response format, described as a JSON Schema object. + required Map schema, + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @JsonKey(includeIfNull: false) @Default(false) bool? strict, + }) = _JsonSchemaObject; + + /// Object construction from a JSON representation + factory JsonSchemaObject.fromJson(Map json) => + _$JsonSchemaObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'schema', + 'strict' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'schema': schema, + 'strict': strict, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_content.dart b/packages/openai_dart/lib/src/generated/schema/message_content.dart index 14e23e22..46783eae 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content.dart @@ -52,6 +52,19 @@ sealed class MessageContent with _$MessageContent { required MessageContentText text, }) = MessageContentTextObject; + // ------------------------------------------ + // UNION: MessageContentRefusalObject + // ------------------------------------------ + + /// The refusal content generated by the assistant. + const factory MessageContent.refusal({ + /// Always `refusal`. + required String type, + + /// No Description + required String refusal, + }) = MessageContentRefusalObject; + /// Object construction from a JSON representation factory MessageContent.fromJson(Map json) => _$MessageContentFromJson(json); @@ -68,4 +81,6 @@ enum MessageContentEnumType { imageUrl, @JsonValue('text') text, + @JsonValue('refusal') + refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart index f53291ee..738ab400 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart @@ -46,7 +46,49 @@ sealed class MessageDeltaContent with _$MessageDeltaContent { @JsonKey(includeIfNull: false) MessageDeltaContentText? text, }) = MessageDeltaContentTextObject; + // ------------------------------------------ + // UNION: MessageDeltaContentRefusalObject + // ------------------------------------------ + + /// The refusal content that is part of a message. + const factory MessageDeltaContent.refusal({ + /// The index of the refusal part in the message. + required int index, + + /// Always `refusal`. + required String type, + + /// The refusal content generated by the assistant. + @JsonKey(includeIfNull: false) String? refusal, + }) = MessageDeltaContentRefusalObject; + + // ------------------------------------------ + // UNION: MessageDeltaContentImageUrlObject + // ------------------------------------------ + + /// References an image URL in the content of a message. + const factory MessageDeltaContent.imageUrl({ + /// The index of the content part in the message. + required int index, + + /// Always `image_url`. + required String type, + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl, + }) = MessageDeltaContentImageUrlObject; + /// Object construction from a JSON representation factory MessageDeltaContent.fromJson(Map json) => _$MessageDeltaContentFromJson(json); } + +// ========================================== +// ENUM: MessageDeltaContentEnumType +// ========================================== + +enum MessageDeltaContentEnumType { + @JsonValue('refusal') + refusal, +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart deleted file mode 100644 index 1008bbb0..00000000 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart +++ /dev/null @@ -1,51 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: MessageDeltaContentImageUrlObject -// ========================================== - -/// References an image URL in the content of a message. -@freezed -class MessageDeltaContentImageUrlObject - with _$MessageDeltaContentImageUrlObject { - const MessageDeltaContentImageUrlObject._(); - - /// Factory constructor for MessageDeltaContentImageUrlObject - const factory MessageDeltaContentImageUrlObject({ - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) int? index, - - /// Always `image_url`. - @JsonKey(includeIfNull: false) String? type, - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl, - }) = _MessageDeltaContentImageUrlObject; - - /// Object construction from a JSON representation - factory MessageDeltaContentImageUrlObject.fromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['index', 'type', 'image_url']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'index': index, - 'type': type, - 'image_url': imageUrl, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index 99c1f887..2b4d94d1 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -54,9 +54,10 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -157,8 +158,6 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// `auto` is the default value enum ModifyAssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -167,9 +166,10 @@ enum ModifyAssistantResponseFormatMode { // CLASS: ModifyAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -194,9 +194,9 @@ sealed class ModifyAssistantRequestResponseFormat ) = ModifyAssistantRequestResponseFormatEnumeration; /// No Description - const factory ModifyAssistantRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = ModifyAssistantRequestResponseFormatAssistantsResponseFormat; + const factory ModifyAssistantRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = ModifyAssistantRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory ModifyAssistantRequestResponseFormat.fromJson( @@ -226,8 +226,8 @@ class _ModifyAssistantRequestResponseFormatConverter } if (data is Map) { try { - return ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return ModifyAssistantRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -241,9 +241,7 @@ class _ModifyAssistantRequestResponseFormatConverter return switch (data) { ModifyAssistantRequestResponseFormatEnumeration(value: final v) => _$ModifyAssistantResponseFormatModeEnumMap[v]!, - ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + ModifyAssistantRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/response_format.dart b/packages/openai_dart/lib/src/generated/schema/response_format.dart new file mode 100644 index 00000000..35b1f30d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/response_format.dart @@ -0,0 +1,71 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ResponseFormat +// ========================================== + +/// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ResponseFormat with _$ResponseFormat { + const ResponseFormat._(); + + // ------------------------------------------ + // UNION: ResponseFormatText + // ------------------------------------------ + + /// The model should respond with plain text. + const factory ResponseFormat.text({ + /// The type of response format being defined. + @Default(ResponseFormatType.text) ResponseFormatType type, + }) = ResponseFormatText; + + // ------------------------------------------ + // UNION: ResponseFormatJsonObject + // ------------------------------------------ + + /// The model should respond with a JSON object. + const factory ResponseFormat.jsonObject({ + /// The type of response format being defined. + @Default(ResponseFormatType.jsonObject) ResponseFormatType type, + }) = ResponseFormatJsonObject; + + // ------------------------------------------ + // UNION: ResponseFormatJsonSchema + // ------------------------------------------ + + /// The model should respond with a JSON object that adheres to the specified schema. + const factory ResponseFormat.jsonSchema({ + /// The type of response format being defined. + @Default(ResponseFormatType.jsonSchema) ResponseFormatType type, + + /// A JSON Schema object. + @JsonKey(name: 'json_schema') required JsonSchemaObject jsonSchema, + }) = ResponseFormatJsonSchema; + + /// Object construction from a JSON representation + factory ResponseFormat.fromJson(Map json) => + _$ResponseFormatFromJson(json); +} + +// ========================================== +// ENUM: ResponseFormatEnumType +// ========================================== + +enum ResponseFormatEnumType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, + @JsonValue('json_schema') + jsonSchema, +} diff --git a/packages/openai_dart/lib/src/generated/schema/response_format_type.dart b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart new file mode 100644 index 00000000..da215209 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart @@ -0,0 +1,19 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ResponseFormatType +// ========================================== + +/// The type of response format being defined. +enum ResponseFormatType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, + @JsonValue('json_schema') + jsonSchema, +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 73ffe897..351d140b 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -105,9 +105,10 @@ class RunObject with _$RunObject { /// during tool use. @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -451,8 +452,6 @@ class _RunObjectToolChoiceConverter /// `auto` is the default value enum RunObjectResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -461,9 +460,10 @@ enum RunObjectResponseFormatMode { // CLASS: RunObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -487,9 +487,9 @@ sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { ) = RunObjectResponseFormatEnumeration; /// No Description - const factory RunObjectResponseFormat.format( - AssistantsResponseFormat value, - ) = RunObjectResponseFormatAssistantsResponseFormat; + const factory RunObjectResponseFormat.responseFormat( + ResponseFormat value, + ) = RunObjectResponseFormatResponseFormat; /// Object construction from a JSON representation factory RunObjectResponseFormat.fromJson(Map json) => @@ -513,8 +513,8 @@ class _RunObjectResponseFormatConverter } if (data is Map) { try { - return RunObjectResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return RunObjectResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -528,8 +528,7 @@ class _RunObjectResponseFormatConverter return switch (data) { RunObjectResponseFormatEnumeration(value: final v) => _$RunObjectResponseFormatModeEnumMap[v]!, - RunObjectResponseFormatAssistantsResponseFormat(value: final v) => - v.toJson(), + RunObjectResponseFormatResponseFormat(value: final v) => v.toJson(), }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index e4f6c023..028e108f 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -22,6 +22,8 @@ part 'chat_completion_message_function_call.dart'; part 'chat_completion_function_call_option.dart'; part 'function_object.dart'; part 'function_parameters.dart'; +part 'response_format_type.dart'; +part 'json_schema_object.dart'; part 'chat_completion_tool.dart'; part 'chat_completion_named_tool_choice.dart'; part 'chat_completion_message_tool_calls.dart'; @@ -73,7 +75,6 @@ part 'delete_assistant_response.dart'; part 'list_assistants_response.dart'; part 'assistants_named_tool_choice.dart'; part 'assistants_function_call_option.dart'; -part 'assistants_response_format.dart'; part 'truncation_object.dart'; part 'run_object.dart'; part 'run_completion_usage.dart'; @@ -106,7 +107,6 @@ part 'message_content_image_detail.dart'; part 'message_request_content_text_object.dart'; part 'message_content_text.dart'; part 'message_content_text_annotations_file_citation.dart'; -part 'message_delta_content_image_url_object.dart'; part 'message_delta_content_text.dart'; part 'message_delta_content_text_annotations_file_citation.dart'; part 'run_step_object.dart'; @@ -142,6 +142,7 @@ part 'batch.dart'; part 'list_batches_response.dart'; part 'chat_completion_message.dart'; part 'chat_completion_message_content_part.dart'; +part 'response_format.dart'; part 'assistant_tools.dart'; part 'message_content.dart'; part 'message_delta_content.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 36ff6d91..d3269d89 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3574,14 +3574,16 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? get responseFormat => - throw _privateConstructorUsedError; + ResponseFormat? get responseFormat => throw _privateConstructorUsedError; /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -3706,7 +3708,7 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, @JsonKey( name: 'service_tier', @@ -3734,7 +3736,7 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) List? functions}); $ChatCompletionModelCopyWith<$Res> get model; - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; + $ResponseFormatCopyWith<$Res>? get responseFormat; $ChatCompletionStopCopyWith<$Res>? get stop; $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions; $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice; @@ -3821,7 +3823,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormat?, + as ResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable @@ -3891,13 +3893,12 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat { + $ResponseFormatCopyWith<$Res>? get responseFormat { if (_value.responseFormat == null) { return null; } - return $ChatCompletionResponseFormatCopyWith<$Res>(_value.responseFormat!, - (value) { + return $ResponseFormatCopyWith<$Res>(_value.responseFormat!, (value) { return _then(_value.copyWith(responseFormat: value) as $Val); }); } @@ -3985,7 +3986,7 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, @JsonKey( name: 'service_tier', @@ -4015,7 +4016,7 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @override $ChatCompletionModelCopyWith<$Res> get model; @override - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; + $ResponseFormatCopyWith<$Res>? get responseFormat; @override $ChatCompletionStopCopyWith<$Res>? get stop; @override @@ -4105,7 +4106,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormat?, + as ResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable @@ -4282,14 +4283,17 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) - final ChatCompletionResponseFormat? responseFormat; + final ResponseFormat? responseFormat; /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -4524,7 +4528,7 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - final ChatCompletionResponseFormat? responseFormat, + final ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) final int? seed, @JsonKey( name: 'service_tier', @@ -4609,14 +4613,17 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? get responseFormat; + ResponseFormat? get responseFormat; /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -5142,177 +5149,6 @@ abstract class ChatCompletionModelString extends ChatCompletionModel { get copyWith => throw _privateConstructorUsedError; } -ChatCompletionResponseFormat _$ChatCompletionResponseFormatFromJson( - Map json) { - return _ChatCompletionResponseFormat.fromJson(json); -} - -/// @nodoc -mixin _$ChatCompletionResponseFormat { - /// Must be one of `text` or `json_object`. - ChatCompletionResponseFormatType get type => - throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionResponseFormat to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChatCompletionResponseFormatCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ChatCompletionResponseFormatCopyWith<$Res> { - factory $ChatCompletionResponseFormatCopyWith( - ChatCompletionResponseFormat value, - $Res Function(ChatCompletionResponseFormat) then) = - _$ChatCompletionResponseFormatCopyWithImpl<$Res, - ChatCompletionResponseFormat>; - @useResult - $Res call({ChatCompletionResponseFormatType type}); -} - -/// @nodoc -class _$ChatCompletionResponseFormatCopyWithImpl<$Res, - $Val extends ChatCompletionResponseFormat> - implements $ChatCompletionResponseFormatCopyWith<$Res> { - _$ChatCompletionResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormatType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ChatCompletionResponseFormatImplCopyWith<$Res> - implements $ChatCompletionResponseFormatCopyWith<$Res> { - factory _$$ChatCompletionResponseFormatImplCopyWith( - _$ChatCompletionResponseFormatImpl value, - $Res Function(_$ChatCompletionResponseFormatImpl) then) = - __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({ChatCompletionResponseFormatType type}); -} - -/// @nodoc -class __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res> - extends _$ChatCompletionResponseFormatCopyWithImpl<$Res, - _$ChatCompletionResponseFormatImpl> - implements _$$ChatCompletionResponseFormatImplCopyWith<$Res> { - __$$ChatCompletionResponseFormatImplCopyWithImpl( - _$ChatCompletionResponseFormatImpl _value, - $Res Function(_$ChatCompletionResponseFormatImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$ChatCompletionResponseFormatImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormatType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionResponseFormatImpl extends _ChatCompletionResponseFormat { - const _$ChatCompletionResponseFormatImpl( - {this.type = ChatCompletionResponseFormatType.text}) - : super._(); - - factory _$ChatCompletionResponseFormatImpl.fromJson( - Map json) => - _$$ChatCompletionResponseFormatImplFromJson(json); - - /// Must be one of `text` or `json_object`. - @override - @JsonKey() - final ChatCompletionResponseFormatType type; - - @override - String toString() { - return 'ChatCompletionResponseFormat(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionResponseFormatImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type); - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionResponseFormatImplCopyWith< - _$ChatCompletionResponseFormatImpl> - get copyWith => __$$ChatCompletionResponseFormatImplCopyWithImpl< - _$ChatCompletionResponseFormatImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ChatCompletionResponseFormatImplToJson( - this, - ); - } -} - -abstract class _ChatCompletionResponseFormat - extends ChatCompletionResponseFormat { - const factory _ChatCompletionResponseFormat( - {final ChatCompletionResponseFormatType type}) = - _$ChatCompletionResponseFormatImpl; - const _ChatCompletionResponseFormat._() : super._(); - - factory _ChatCompletionResponseFormat.fromJson(Map json) = - _$ChatCompletionResponseFormatImpl.fromJson; - - /// Must be one of `text` or `json_object`. - @override - ChatCompletionResponseFormatType get type; - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionResponseFormatImplCopyWith< - _$ChatCompletionResponseFormatImpl> - get copyWith => throw _privateConstructorUsedError; -} - ChatCompletionStop _$ChatCompletionStopFromJson(Map json) { switch (json['runtimeType']) { case 'listString': @@ -7075,7 +6911,8 @@ FunctionObject _$FunctionObjectFromJson(Map json) { /// @nodoc mixin _$FunctionObject { - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. String get name => throw _privateConstructorUsedError; /// A description of what the function does, used by the model to choose when and how to call the function. @@ -7088,6 +6925,13 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) Map? get parameters => throw _privateConstructorUsedError; + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @JsonKey(includeIfNull: false) + bool? get strict => throw _privateConstructorUsedError; + /// Serializes this FunctionObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -7107,7 +6951,8 @@ abstract class $FunctionObjectCopyWith<$Res> { $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters}); + @JsonKey(includeIfNull: false) Map? parameters, + @JsonKey(includeIfNull: false) bool? strict}); } /// @nodoc @@ -7128,6 +6973,7 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> Object? name = null, Object? description = freezed, Object? parameters = freezed, + Object? strict = freezed, }) { return _then(_value.copyWith( name: null == name @@ -7142,6 +6988,10 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> ? _value.parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, ) as $Val); } } @@ -7157,7 +7007,8 @@ abstract class _$$FunctionObjectImplCopyWith<$Res> $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters}); + @JsonKey(includeIfNull: false) Map? parameters, + @JsonKey(includeIfNull: false) bool? strict}); } /// @nodoc @@ -7176,6 +7027,7 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> Object? name = null, Object? description = freezed, Object? parameters = freezed, + Object? strict = freezed, }) { return _then(_$FunctionObjectImpl( name: null == name @@ -7190,6 +7042,10 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> ? _value._parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, )); } } @@ -7200,14 +7056,16 @@ class _$FunctionObjectImpl extends _FunctionObject { const _$FunctionObjectImpl( {required this.name, @JsonKey(includeIfNull: false) this.description, - @JsonKey(includeIfNull: false) final Map? parameters}) + @JsonKey(includeIfNull: false) final Map? parameters, + @JsonKey(includeIfNull: false) this.strict = false}) : _parameters = parameters, super._(); factory _$FunctionObjectImpl.fromJson(Map json) => _$$FunctionObjectImplFromJson(json); - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. @override final String name; @@ -7234,9 +7092,17 @@ class _$FunctionObjectImpl extends _FunctionObject { return EqualUnmodifiableMapView(value); } + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @override + @JsonKey(includeIfNull: false) + final bool? strict; + @override String toString() { - return 'FunctionObject(name: $name, description: $description, parameters: $parameters)'; + return 'FunctionObject(name: $name, description: $description, parameters: $parameters, strict: $strict)'; } @override @@ -7248,13 +7114,14 @@ class _$FunctionObjectImpl extends _FunctionObject { (identical(other.description, description) || other.description == description) && const DeepCollectionEquality() - .equals(other._parameters, _parameters)); + .equals(other._parameters, _parameters) && + (identical(other.strict, strict) || other.strict == strict)); } @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_parameters)); + const DeepCollectionEquality().hash(_parameters), strict); /// Create a copy of FunctionObject /// with the given fields replaced by the non-null parameter values. @@ -7275,16 +7142,18 @@ class _$FunctionObjectImpl extends _FunctionObject { abstract class _FunctionObject extends FunctionObject { const factory _FunctionObject( - {required final String name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(includeIfNull: false) - final Map? parameters}) = _$FunctionObjectImpl; + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(includeIfNull: false) final Map? parameters, + @JsonKey(includeIfNull: false) final bool? strict}) = + _$FunctionObjectImpl; const _FunctionObject._() : super._(); factory _FunctionObject.fromJson(Map json) = _$FunctionObjectImpl.fromJson; - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. @override String get name; @@ -7300,6 +7169,14 @@ abstract class _FunctionObject extends FunctionObject { @JsonKey(includeIfNull: false) Map? get parameters; + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @override + @JsonKey(includeIfNull: false) + bool? get strict; + /// Create a copy of FunctionObject /// with the given fields replaced by the non-null parameter values. @override @@ -7308,6 +7185,275 @@ abstract class _FunctionObject extends FunctionObject { throw _privateConstructorUsedError; } +JsonSchemaObject _$JsonSchemaObjectFromJson(Map json) { + return _JsonSchemaObject.fromJson(json); +} + +/// @nodoc +mixin _$JsonSchemaObject { + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + String get name => throw _privateConstructorUsedError; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; + + /// The schema for the response format, described as a JSON Schema object. + Map get schema => throw _privateConstructorUsedError; + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @JsonKey(includeIfNull: false) + bool? get strict => throw _privateConstructorUsedError; + + /// Serializes this JsonSchemaObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $JsonSchemaObjectCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $JsonSchemaObjectCopyWith<$Res> { + factory $JsonSchemaObjectCopyWith( + JsonSchemaObject value, $Res Function(JsonSchemaObject) then) = + _$JsonSchemaObjectCopyWithImpl<$Res, JsonSchemaObject>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map schema, + @JsonKey(includeIfNull: false) bool? strict}); +} + +/// @nodoc +class _$JsonSchemaObjectCopyWithImpl<$Res, $Val extends JsonSchemaObject> + implements $JsonSchemaObjectCopyWith<$Res> { + _$JsonSchemaObjectCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? schema = null, + Object? strict = freezed, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + schema: null == schema + ? _value.schema + : schema // ignore: cast_nullable_to_non_nullable + as Map, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$JsonSchemaObjectImplCopyWith<$Res> + implements $JsonSchemaObjectCopyWith<$Res> { + factory _$$JsonSchemaObjectImplCopyWith(_$JsonSchemaObjectImpl value, + $Res Function(_$JsonSchemaObjectImpl) then) = + __$$JsonSchemaObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map schema, + @JsonKey(includeIfNull: false) bool? strict}); +} + +/// @nodoc +class __$$JsonSchemaObjectImplCopyWithImpl<$Res> + extends _$JsonSchemaObjectCopyWithImpl<$Res, _$JsonSchemaObjectImpl> + implements _$$JsonSchemaObjectImplCopyWith<$Res> { + __$$JsonSchemaObjectImplCopyWithImpl(_$JsonSchemaObjectImpl _value, + $Res Function(_$JsonSchemaObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? schema = null, + Object? strict = freezed, + }) { + return _then(_$JsonSchemaObjectImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + schema: null == schema + ? _value._schema + : schema // ignore: cast_nullable_to_non_nullable + as Map, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$JsonSchemaObjectImpl extends _JsonSchemaObject { + const _$JsonSchemaObjectImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + required final Map schema, + @JsonKey(includeIfNull: false) this.strict = false}) + : _schema = schema, + super._(); + + factory _$JsonSchemaObjectImpl.fromJson(Map json) => + _$$JsonSchemaObjectImplFromJson(json); + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + @override + final String name; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// The schema for the response format, described as a JSON Schema object. + final Map _schema; + + /// The schema for the response format, described as a JSON Schema object. + @override + Map get schema { + if (_schema is EqualUnmodifiableMapView) return _schema; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_schema); + } + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @override + @JsonKey(includeIfNull: false) + final bool? strict; + + @override + String toString() { + return 'JsonSchemaObject(name: $name, description: $description, schema: $schema, strict: $strict)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$JsonSchemaObjectImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality().equals(other._schema, _schema) && + (identical(other.strict, strict) || other.strict == strict)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_schema), strict); + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => + __$$JsonSchemaObjectImplCopyWithImpl<_$JsonSchemaObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$JsonSchemaObjectImplToJson( + this, + ); + } +} + +abstract class _JsonSchemaObject extends JsonSchemaObject { + const factory _JsonSchemaObject( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + required final Map schema, + @JsonKey(includeIfNull: false) final bool? strict}) = + _$JsonSchemaObjectImpl; + const _JsonSchemaObject._() : super._(); + + factory _JsonSchemaObject.fromJson(Map json) = + _$JsonSchemaObjectImpl.fromJson; + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + @override + String get name; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @override + @JsonKey(includeIfNull: false) + String? get description; + + /// The schema for the response format, described as a JSON Schema object. + @override + Map get schema; + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @override + @JsonKey(includeIfNull: false) + bool? get strict; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => + throw _privateConstructorUsedError; +} + ChatCompletionTool _$ChatCompletionToolFromJson(Map json) { return _ChatCompletionTool.fromJson(json); } @@ -8891,6 +9037,7 @@ ChatCompletionLogprobs _$ChatCompletionLogprobsFromJson( /// @nodoc mixin _$ChatCompletionLogprobs { /// A list of message content tokens with log probability information. + @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; @@ -8910,7 +9057,9 @@ abstract class $ChatCompletionLogprobsCopyWith<$Res> { $Res Function(ChatCompletionLogprobs) then) = _$ChatCompletionLogprobsCopyWithImpl<$Res, ChatCompletionLogprobs>; @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -8949,7 +9098,9 @@ abstract class _$$ChatCompletionLogprobsImplCopyWith<$Res> __$$ChatCompletionLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -8982,7 +9133,8 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> @JsonSerializable() class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const _$ChatCompletionLogprobsImpl( - {required final List? content}) + {@JsonKey(includeIfNull: false) + final List? content}) : _content = content, super._(); @@ -8994,6 +9146,7 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -9039,7 +9192,8 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { const factory _ChatCompletionLogprobs( - {required final List? content}) = + {@JsonKey(includeIfNull: false) + final List? content}) = _$ChatCompletionLogprobsImpl; const _ChatCompletionLogprobs._() : super._(); @@ -9048,6 +9202,7 @@ abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content; /// Create a copy of ChatCompletionLogprobs @@ -10361,6 +10516,7 @@ ChatCompletionStreamResponseChoiceLogprobs /// @nodoc mixin _$ChatCompletionStreamResponseChoiceLogprobs { /// A list of message content tokens with log probability information. + @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; @@ -10383,7 +10539,9 @@ abstract class $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res> { _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, ChatCompletionStreamResponseChoiceLogprobs>; @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -10424,7 +10582,9 @@ abstract class _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith<$Res> __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -10458,7 +10618,8 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> class _$ChatCompletionStreamResponseChoiceLogprobsImpl extends _ChatCompletionStreamResponseChoiceLogprobs { const _$ChatCompletionStreamResponseChoiceLogprobsImpl( - {required final List? content}) + {@JsonKey(includeIfNull: false) + final List? content}) : _content = content, super._(); @@ -10471,6 +10632,7 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -10520,7 +10682,8 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl abstract class _ChatCompletionStreamResponseChoiceLogprobs extends ChatCompletionStreamResponseChoiceLogprobs { const factory _ChatCompletionStreamResponseChoiceLogprobs( - {required final List? content}) = + {@JsonKey(includeIfNull: false) + final List? content}) = _$ChatCompletionStreamResponseChoiceLogprobsImpl; const _ChatCompletionStreamResponseChoiceLogprobs._() : super._(); @@ -10530,6 +10693,7 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content; /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs @@ -10552,6 +10716,10 @@ mixin _$ChatCompletionStreamResponseDelta { @JsonKey(includeIfNull: false) String? get content => throw _privateConstructorUsedError; + /// The refusal message generated by the model. + @JsonKey(includeIfNull: false) + String? get refusal => throw _privateConstructorUsedError; + /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall => @@ -10587,6 +10755,7 @@ abstract class $ChatCompletionStreamResponseDeltaCopyWith<$Res> { @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10616,6 +10785,7 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, @override $Res call({ Object? content = freezed, + Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -10625,6 +10795,10 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -10667,6 +10841,7 @@ abstract class _$$ChatCompletionStreamResponseDeltaImplCopyWith<$Res> @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10696,6 +10871,7 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> @override $Res call({ Object? content = freezed, + Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -10705,6 +10881,10 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -10727,6 +10907,7 @@ class _$ChatCompletionStreamResponseDeltaImpl extends _ChatCompletionStreamResponseDelta { const _$ChatCompletionStreamResponseDeltaImpl( {@JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.refusal, @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -10746,6 +10927,11 @@ class _$ChatCompletionStreamResponseDeltaImpl @JsonKey(includeIfNull: false) final String? content; + /// The refusal message generated by the model. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + /// The name and arguments of a function that should be called, as generated by the model. @override @JsonKey(name: 'function_call', includeIfNull: false) @@ -10773,7 +10959,7 @@ class _$ChatCompletionStreamResponseDeltaImpl @override String toString() { - return 'ChatCompletionStreamResponseDelta(content: $content, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; + return 'ChatCompletionStreamResponseDelta(content: $content, refusal: $refusal, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; } @override @@ -10782,6 +10968,7 @@ class _$ChatCompletionStreamResponseDeltaImpl (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseDeltaImpl && (identical(other.content, content) || other.content == content) && + (identical(other.refusal, refusal) || other.refusal == refusal) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && const DeepCollectionEquality() @@ -10791,7 +10978,7 @@ class _$ChatCompletionStreamResponseDeltaImpl @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, content, functionCall, + int get hashCode => Object.hash(runtimeType, content, refusal, functionCall, const DeepCollectionEquality().hash(_toolCalls), role); /// Create a copy of ChatCompletionStreamResponseDelta @@ -10816,6 +11003,7 @@ abstract class _ChatCompletionStreamResponseDelta extends ChatCompletionStreamResponseDelta { const factory _ChatCompletionStreamResponseDelta( {@JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) final ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10836,6 +11024,11 @@ abstract class _ChatCompletionStreamResponseDelta @JsonKey(includeIfNull: false) String? get content; + /// The refusal message generated by the model. + @override + @JsonKey(includeIfNull: false) + String? get refusal; + /// The name and arguments of a function that should be called, as generated by the model. @override @JsonKey(name: 'function_call', includeIfNull: false) @@ -23311,9 +23504,10 @@ mixin _$AssistantObject { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -23706,9 +23900,10 @@ class _$AssistantObjectImpl extends _AssistantObject { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -23877,9 +24072,10 @@ abstract class _AssistantObject extends AssistantObject { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -23909,11 +24105,10 @@ abstract class _AssistantObject extends AssistantObject { AssistantObjectResponseFormat _$AssistantObjectResponseFormatFromJson( Map json) { switch (json['runtimeType']) { - case 'enumeration': + case 'mode': return AssistantObjectResponseFormatEnumeration.fromJson(json); - case 'assistantsResponseFormat': - return AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( - json); + case 'responseFormat': + return AssistantObjectResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -23929,49 +24124,43 @@ mixin _$AssistantObjectResponseFormat { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -24046,7 +24235,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl extends AssistantObjectResponseFormatEnumeration { const _$AssistantObjectResponseFormatEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'enumeration', + : $type = $type ?? 'mode', super._(); factory _$AssistantObjectResponseFormatEnumerationImpl.fromJson( @@ -24061,7 +24250,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override String toString() { - return 'AssistantObjectResponseFormat.enumeration(value: $value)'; + return 'AssistantObjectResponseFormat.mode(value: $value)'; } @override @@ -24090,31 +24279,30 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) { - return enumeration(value); + return mode(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return enumeration?.call(value); + return mode?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(value); + if (mode != null) { + return mode(value); } return orElse(); } @@ -24123,38 +24311,33 @@ class _$AssistantObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) { - return enumeration(this); + return mode(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return enumeration?.call(this); + return mode?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(this); + if (mode != null) { + return mode(this); } return orElse(); } @@ -24190,33 +24373,28 @@ abstract class AssistantObjectResponseFormatEnumeration } /// @nodoc -abstract class _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl value, - $Res Function( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl) + factory _$$AssistantObjectResponseFormatResponseFormatImplCopyWith( + _$AssistantObjectResponseFormatResponseFormatImpl value, + $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) then) = - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res>; + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res> +class __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$AssistantObjectResponseFormatCopyWithImpl<$Res, - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$AssistantObjectResponseFormatResponseFormatImpl> implements - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl _value, - $Res Function(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl) - _then) + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith<$Res> { + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl( + _$AssistantObjectResponseFormatResponseFormatImpl _value, + $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) _then) : super(_value, _then); /// Create a copy of AssistantObjectResponseFormat @@ -24226,11 +24404,11 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res call({ Object? value = null, }) { - return _then(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl( + return _then(_$AssistantObjectResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -24238,8 +24416,8 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -24247,35 +24425,33 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl - extends AssistantObjectResponseFormatAssistantsResponseFormat { - const _$AssistantObjectResponseFormatAssistantsResponseFormatImpl(this.value, +class _$AssistantObjectResponseFormatResponseFormatImpl + extends AssistantObjectResponseFormatResponseFormat { + const _$AssistantObjectResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'assistantsResponseFormat', + : $type = $type ?? 'responseFormat', super._(); - factory _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$AssistantObjectResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$AssistantObjectResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'AssistantObjectResponseFormat.assistantsResponseFormat(value: $value)'; + return 'AssistantObjectResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$AssistantObjectResponseFormatAssistantsResponseFormatImpl && + other is _$AssistantObjectResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -24288,41 +24464,40 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatResponseFormatImpl> get copyWith => - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl>( + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl< + _$AssistantObjectResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) { - return assistantsResponseFormat(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return assistantsResponseFormat?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (assistantsResponseFormat != null) { - return assistantsResponseFormat(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -24331,69 +24506,64 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) { - return assistantsResponseFormat(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return assistantsResponseFormat?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (assistantsResponseFormat != null) { - return assistantsResponseFormat(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( + return _$$AssistantObjectResponseFormatResponseFormatImplToJson( this, ); } } -abstract class AssistantObjectResponseFormatAssistantsResponseFormat +abstract class AssistantObjectResponseFormatResponseFormat extends AssistantObjectResponseFormat { - const factory AssistantObjectResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl; - const AssistantObjectResponseFormatAssistantsResponseFormat._() : super._(); + const factory AssistantObjectResponseFormatResponseFormat( + final ResponseFormat value) = + _$AssistantObjectResponseFormatResponseFormatImpl; + const AssistantObjectResponseFormatResponseFormat._() : super._(); - factory AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( + factory AssistantObjectResponseFormatResponseFormat.fromJson( Map json) = - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson; + _$AssistantObjectResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of AssistantObjectResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24447,9 +24617,10 @@ mixin _$CreateAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -24813,9 +24984,10 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -24966,9 +25138,10 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -25409,9 +25582,8 @@ CreateAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateAssistantRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return CreateAssistantRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -25428,19 +25600,19 @@ mixin _$CreateAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -25450,26 +25622,24 @@ mixin _$CreateAssistantRequestResponseFormat { CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -25594,7 +25764,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -25603,7 +25773,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -25612,7 +25782,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -25628,8 +25798,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -25639,9 +25809,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -25651,9 +25820,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -25693,33 +25861,32 @@ abstract class CreateAssistantRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + factory _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith( + _$CreateAssistantRequestResponseFormatResponseFormatImpl value, $Res Function( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _$CreateAssistantRequestResponseFormatResponseFormatImpl) then) = - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateAssistantRequestResponseFormatResponseFormatImpl> implements - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateAssistantRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateAssistantRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); @@ -25730,12 +25897,11 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi $Res call({ Object? value = null, }) { - return _then( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateAssistantRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -25743,8 +25909,8 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -25752,36 +25918,33 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// @nodoc @JsonSerializable() -class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - extends CreateAssistantRequestResponseFormatAssistantsResponseFormat { - const _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$CreateAssistantRequestResponseFormatResponseFormatImpl + extends CreateAssistantRequestResponseFormatResponseFormat { + const _$CreateAssistantRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateAssistantRequestResponseFormat.format(value: $value)'; + return 'CreateAssistantRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl && + other is _$CreateAssistantRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -25794,40 +25957,40 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateAssistantRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -25839,10 +26002,10 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -25850,11 +26013,10 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -25862,46 +26024,43 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat +abstract class CreateAssistantRequestResponseFormatResponseFormat extends CreateAssistantRequestResponseFormat { - const factory CreateAssistantRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl; - const CreateAssistantRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory CreateAssistantRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateAssistantRequestResponseFormatResponseFormatImpl; + const CreateAssistantRequestResponseFormatResponseFormat._() : super._(); - factory CreateAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateAssistantRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of CreateAssistantRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25959,9 +26118,10 @@ mixin _$ModifyAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -26338,9 +26498,10 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -26499,9 +26660,10 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -26533,9 +26695,8 @@ ModifyAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return ModifyAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return ModifyAssistantRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return ModifyAssistantRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -26552,19 +26713,19 @@ mixin _$ModifyAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -26574,26 +26735,24 @@ mixin _$ModifyAssistantRequestResponseFormat { ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -26718,7 +26877,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -26727,7 +26886,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -26736,7 +26895,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -26752,8 +26911,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -26763,9 +26922,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -26775,9 +26933,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -26817,33 +26974,32 @@ abstract class ModifyAssistantRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + factory _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl value, $Res Function( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _$ModifyAssistantRequestResponseFormatResponseFormatImpl) then) = - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> implements - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$ModifyAssistantRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); @@ -26854,12 +27010,11 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi $Res call({ Object? value = null, }) { - return _then( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$ModifyAssistantRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -26867,8 +27022,8 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -26876,36 +27031,33 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// @nodoc @JsonSerializable() -class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - extends ModifyAssistantRequestResponseFormatAssistantsResponseFormat { - const _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$ModifyAssistantRequestResponseFormatResponseFormatImpl + extends ModifyAssistantRequestResponseFormatResponseFormat { + const _$ModifyAssistantRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'ModifyAssistantRequestResponseFormat.format(value: $value)'; + return 'ModifyAssistantRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl && + other is _$ModifyAssistantRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -26918,40 +27070,40 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> get copyWith => - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -26963,10 +27115,10 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -26974,11 +27126,10 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -26986,46 +27137,43 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat +abstract class ModifyAssistantRequestResponseFormatResponseFormat extends ModifyAssistantRequestResponseFormat { - const factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl; - const ModifyAssistantRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory ModifyAssistantRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$ModifyAssistantRequestResponseFormatResponseFormatImpl; + const ModifyAssistantRequestResponseFormatResponseFormat._() : super._(); - factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + factory ModifyAssistantRequestResponseFormatResponseFormat.fromJson( Map json) = - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of ModifyAssistantRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27908,170 +28056,6 @@ abstract class _AssistantsFunctionCallOption get copyWith => throw _privateConstructorUsedError; } -AssistantsResponseFormat _$AssistantsResponseFormatFromJson( - Map json) { - return _AssistantsResponseFormat.fromJson(json); -} - -/// @nodoc -mixin _$AssistantsResponseFormat { - /// Must be one of `text` or `json_object`. - AssistantsResponseFormatType get type => throw _privateConstructorUsedError; - - /// Serializes this AssistantsResponseFormat to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $AssistantsResponseFormatCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $AssistantsResponseFormatCopyWith<$Res> { - factory $AssistantsResponseFormatCopyWith(AssistantsResponseFormat value, - $Res Function(AssistantsResponseFormat) then) = - _$AssistantsResponseFormatCopyWithImpl<$Res, AssistantsResponseFormat>; - @useResult - $Res call({AssistantsResponseFormatType type}); -} - -/// @nodoc -class _$AssistantsResponseFormatCopyWithImpl<$Res, - $Val extends AssistantsResponseFormat> - implements $AssistantsResponseFormatCopyWith<$Res> { - _$AssistantsResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormatType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$AssistantsResponseFormatImplCopyWith<$Res> - implements $AssistantsResponseFormatCopyWith<$Res> { - factory _$$AssistantsResponseFormatImplCopyWith( - _$AssistantsResponseFormatImpl value, - $Res Function(_$AssistantsResponseFormatImpl) then) = - __$$AssistantsResponseFormatImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({AssistantsResponseFormatType type}); -} - -/// @nodoc -class __$$AssistantsResponseFormatImplCopyWithImpl<$Res> - extends _$AssistantsResponseFormatCopyWithImpl<$Res, - _$AssistantsResponseFormatImpl> - implements _$$AssistantsResponseFormatImplCopyWith<$Res> { - __$$AssistantsResponseFormatImplCopyWithImpl( - _$AssistantsResponseFormatImpl _value, - $Res Function(_$AssistantsResponseFormatImpl) _then) - : super(_value, _then); - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$AssistantsResponseFormatImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormatType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$AssistantsResponseFormatImpl extends _AssistantsResponseFormat { - const _$AssistantsResponseFormatImpl( - {this.type = AssistantsResponseFormatType.text}) - : super._(); - - factory _$AssistantsResponseFormatImpl.fromJson(Map json) => - _$$AssistantsResponseFormatImplFromJson(json); - - /// Must be one of `text` or `json_object`. - @override - @JsonKey() - final AssistantsResponseFormatType type; - - @override - String toString() { - return 'AssistantsResponseFormat(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$AssistantsResponseFormatImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type); - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> - get copyWith => __$$AssistantsResponseFormatImplCopyWithImpl< - _$AssistantsResponseFormatImpl>(this, _$identity); - - @override - Map toJson() { - return _$$AssistantsResponseFormatImplToJson( - this, - ); - } -} - -abstract class _AssistantsResponseFormat extends AssistantsResponseFormat { - const factory _AssistantsResponseFormat( - {final AssistantsResponseFormatType type}) = - _$AssistantsResponseFormatImpl; - const _AssistantsResponseFormat._() : super._(); - - factory _AssistantsResponseFormat.fromJson(Map json) = - _$AssistantsResponseFormatImpl.fromJson; - - /// Must be one of `text` or `json_object`. - @override - AssistantsResponseFormatType get type; - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> - get copyWith => throw _privateConstructorUsedError; -} - TruncationObject _$TruncationObjectFromJson(Map json) { return _TruncationObject.fromJson(json); } @@ -28374,9 +28358,10 @@ mixin _$RunObject { @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -29109,9 +29094,10 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'parallel_tool_calls') final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -29410,9 +29396,10 @@ abstract class _RunObject extends RunObject { @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -30466,8 +30453,8 @@ RunObjectResponseFormat _$RunObjectResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return RunObjectResponseFormatEnumeration.fromJson(json); - case 'format': - return RunObjectResponseFormatAssistantsResponseFormat.fromJson(json); + case 'responseFormat': + return RunObjectResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -30484,42 +30471,41 @@ mixin _$RunObjectResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -30636,7 +30622,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -30645,7 +30631,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -30654,7 +30640,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -30667,9 +30653,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -30678,8 +30663,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -30688,8 +30673,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -30729,29 +30714,25 @@ abstract class RunObjectResponseFormatEnumeration } /// @nodoc -abstract class _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - factory _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith( - _$RunObjectResponseFormatAssistantsResponseFormatImpl value, - $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) - then) = - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res>; +abstract class _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { + factory _$$RunObjectResponseFormatResponseFormatImplCopyWith( + _$RunObjectResponseFormatResponseFormatImpl value, + $Res Function(_$RunObjectResponseFormatResponseFormatImpl) then) = + __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> +class __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$RunObjectResponseFormatCopyWithImpl<$Res, - _$RunObjectResponseFormatAssistantsResponseFormatImpl> - implements - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith<$Res> { - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$RunObjectResponseFormatAssistantsResponseFormatImpl _value, - $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) - _then) + _$RunObjectResponseFormatResponseFormatImpl> + implements _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { + __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl( + _$RunObjectResponseFormatResponseFormatImpl _value, + $Res Function(_$RunObjectResponseFormatResponseFormatImpl) _then) : super(_value, _then); /// Create a copy of RunObjectResponseFormat @@ -30761,11 +30742,11 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$RunObjectResponseFormatAssistantsResponseFormatImpl( + return _then(_$RunObjectResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -30773,8 +30754,8 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -30782,33 +30763,33 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunObjectResponseFormatAssistantsResponseFormatImpl - extends RunObjectResponseFormatAssistantsResponseFormat { - const _$RunObjectResponseFormatAssistantsResponseFormatImpl(this.value, +class _$RunObjectResponseFormatResponseFormatImpl + extends RunObjectResponseFormatResponseFormat { + const _$RunObjectResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$RunObjectResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson(json); + _$$RunObjectResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'RunObjectResponseFormat.format(value: $value)'; + return 'RunObjectResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunObjectResponseFormatAssistantsResponseFormatImpl && + other is _$RunObjectResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -30821,40 +30802,38 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$RunObjectResponseFormatAssistantsResponseFormatImpl> - get copyWith => - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$RunObjectResponseFormatAssistantsResponseFormatImpl>( - this, _$identity); + _$$RunObjectResponseFormatResponseFormatImplCopyWith< + _$RunObjectResponseFormatResponseFormatImpl> + get copyWith => __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl< + _$RunObjectResponseFormatResponseFormatImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -30863,64 +30842,62 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( + return _$$RunObjectResponseFormatResponseFormatImplToJson( this, ); } } -abstract class RunObjectResponseFormatAssistantsResponseFormat +abstract class RunObjectResponseFormatResponseFormat extends RunObjectResponseFormat { - const factory RunObjectResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$RunObjectResponseFormatAssistantsResponseFormatImpl; - const RunObjectResponseFormatAssistantsResponseFormat._() : super._(); + const factory RunObjectResponseFormatResponseFormat( + final ResponseFormat value) = _$RunObjectResponseFormatResponseFormatImpl; + const RunObjectResponseFormatResponseFormat._() : super._(); - factory RunObjectResponseFormatAssistantsResponseFormat.fromJson( + factory RunObjectResponseFormatResponseFormat.fromJson( Map json) = - _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson; + _$RunObjectResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of RunObjectResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$RunObjectResponseFormatAssistantsResponseFormatImpl> + _$$RunObjectResponseFormatResponseFormatImplCopyWith< + _$RunObjectResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31403,9 +31380,10 @@ mixin _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -31940,9 +31918,10 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -32163,9 +32142,10 @@ abstract class _CreateRunRequest extends CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -33097,9 +33077,8 @@ CreateRunRequestResponseFormat _$CreateRunRequestResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return CreateRunRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( - json); + case 'responseFormat': + return CreateRunRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -33116,19 +33095,19 @@ mixin _$CreateRunRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -33137,24 +33116,22 @@ mixin _$CreateRunRequestResponseFormat { required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -33275,7 +33252,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -33284,7 +33261,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -33293,7 +33270,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -33308,8 +33285,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -33318,9 +33295,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -33329,9 +33305,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -33371,34 +33346,28 @@ abstract class CreateRunRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl value, - $Res Function( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) + factory _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith( + _$CreateRunRequestResponseFormatResponseFormatImpl value, + $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) then) = - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res>; + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res> +class __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$CreateRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateRunRequestResponseFormatResponseFormatImpl> implements - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) - _then) + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith<$Res> { + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateRunRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); /// Create a copy of CreateRunRequestResponseFormat @@ -33408,11 +33377,11 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl $Res call({ Object? value = null, }) { - return _then(_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateRunRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -33420,8 +33389,8 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -33429,35 +33398,33 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl /// @nodoc @JsonSerializable() -class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - extends CreateRunRequestResponseFormatAssistantsResponseFormat { - const _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl(this.value, +class _$CreateRunRequestResponseFormatResponseFormatImpl + extends CreateRunRequestResponseFormatResponseFormat { + const _$CreateRunRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$CreateRunRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateRunRequestResponseFormat.format(value: $value)'; + return 'CreateRunRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl && + other is _$CreateRunRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -33470,40 +33437,40 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateRunRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -33514,65 +33481,63 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateRunRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateRunRequestResponseFormatAssistantsResponseFormat +abstract class CreateRunRequestResponseFormatResponseFormat extends CreateRunRequestResponseFormat { - const factory CreateRunRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl; - const CreateRunRequestResponseFormatAssistantsResponseFormat._() : super._(); + const factory CreateRunRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateRunRequestResponseFormatResponseFormatImpl; + const CreateRunRequestResponseFormatResponseFormat._() : super._(); - factory CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateRunRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson; + _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of CreateRunRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34926,9 +34891,10 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -35484,9 +35450,10 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -35705,9 +35672,10 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -36662,9 +36630,9 @@ CreateThreadAndRunRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateThreadAndRunRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( + json); default: throw CheckedFromJsonException( @@ -36682,19 +36650,19 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -36704,9 +36672,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -36714,9 +36681,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -36724,9 +36690,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -36857,7 +36822,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -36866,7 +36831,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -36875,7 +36840,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -36891,9 +36856,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -36904,9 +36868,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -36917,9 +36880,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -36959,35 +36921,32 @@ abstract class CreateThreadAndRunRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - value, + factory _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl value, $Res Function( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) then) = - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> implements - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - _value, - $Res Function( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); @@ -36998,12 +36957,11 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop $Res call({ Object? value = null, }) { - return _then( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -37011,8 +36969,8 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -37020,28 +36978,27 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop /// @nodoc @JsonSerializable() -class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - extends CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat { - const _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl + extends CreateThreadAndRunRequestResponseFormatResponseFormat { + const _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplFromJson( + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplFromJson( json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateThreadAndRunRequestResponseFormat.format(value: $value)'; + return 'CreateThreadAndRunRequestResponseFormat.responseFormat(value: $value)'; } @override @@ -37049,7 +37006,7 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl return identical(this, other) || (other.runtimeType == runtimeType && other - is _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl && + is _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -37062,11 +37019,11 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @@ -37074,29 +37031,29 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -37108,11 +37065,10 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -37121,11 +37077,10 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -37134,46 +37089,43 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat +abstract class CreateThreadAndRunRequestResponseFormatResponseFormat extends CreateThreadAndRunRequestResponseFormat { - const factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl; - const CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory CreateThreadAndRunRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl; + const CreateThreadAndRunRequestResponseFormatResponseFormat._() : super._(); - factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of CreateThreadAndRunRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43224,263 +43176,6 @@ abstract class _MessageContentTextAnnotationsFileCitation get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContentImageUrlObject _$MessageDeltaContentImageUrlObjectFromJson( - Map json) { - return _MessageDeltaContentImageUrlObject.fromJson(json); -} - -/// @nodoc -mixin _$MessageDeltaContentImageUrlObject { - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) - int? get index => throw _privateConstructorUsedError; - - /// Always `image_url`. - @JsonKey(includeIfNull: false) - String? get type => throw _privateConstructorUsedError; - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl => throw _privateConstructorUsedError; - - /// Serializes this MessageDeltaContentImageUrlObject to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageDeltaContentImageUrlObjectCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - factory $MessageDeltaContentImageUrlObjectCopyWith( - MessageDeltaContentImageUrlObject value, - $Res Function(MessageDeltaContentImageUrlObject) then) = - _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - MessageDeltaContentImageUrlObject>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) int? index, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); - - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; -} - -/// @nodoc -class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - $Val extends MessageDeltaContentImageUrlObject> - implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - _$MessageDeltaContentImageUrlObjectCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = freezed, - Object? type = freezed, - Object? imageUrl = freezed, - }) { - return _then(_value.copyWith( - index: freezed == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, - ) as $Val); - } - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $MessageContentImageUrlCopyWith<$Res>? get imageUrl { - if (_value.imageUrl == null) { - return null; - } - - return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { - return _then(_value.copyWith(imageUrl: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> - implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( - _$MessageDeltaContentImageUrlObjectImpl value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) int? index, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); - - @override - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; -} - -/// @nodoc -class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - _$MessageDeltaContentImageUrlObjectImpl> - implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( - _$MessageDeltaContentImageUrlObjectImpl _value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) - : super(_value, _then); - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = freezed, - Object? type = freezed, - Object? imageUrl = freezed, - }) { - return _then(_$MessageDeltaContentImageUrlObjectImpl( - index: freezed == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageDeltaContentImageUrlObjectImpl - extends _MessageDeltaContentImageUrlObject { - const _$MessageDeltaContentImageUrlObjectImpl( - {@JsonKey(includeIfNull: false) this.index, - @JsonKey(includeIfNull: false) this.type, - @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) - : super._(); - - factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( - Map json) => - _$$MessageDeltaContentImageUrlObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - @JsonKey(includeIfNull: false) - final int? index; - - /// Always `image_url`. - @override - @JsonKey(includeIfNull: false) - final String? type; - - /// The image URL part of a message. - @override - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl; - - @override - String toString() { - return 'MessageDeltaContentImageUrlObject(index: $index, type: $type, imageUrl: $imageUrl)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageDeltaContentImageUrlObjectImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type) && - (identical(other.imageUrl, imageUrl) || - other.imageUrl == imageUrl)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, index, type, imageUrl); - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< - _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); - - @override - Map toJson() { - return _$$MessageDeltaContentImageUrlObjectImplToJson( - this, - ); - } -} - -abstract class _MessageDeltaContentImageUrlObject - extends MessageDeltaContentImageUrlObject { - const factory _MessageDeltaContentImageUrlObject( - {@JsonKey(includeIfNull: false) final int? index, - @JsonKey(includeIfNull: false) final String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl}) = - _$MessageDeltaContentImageUrlObjectImpl; - const _MessageDeltaContentImageUrlObject._() : super._(); - - factory _MessageDeltaContentImageUrlObject.fromJson( - Map json) = - _$MessageDeltaContentImageUrlObjectImpl.fromJson; - - /// The index of the content part in the message. - @override - @JsonKey(includeIfNull: false) - int? get index; - - /// Always `image_url`. - @override - @JsonKey(includeIfNull: false) - String? get type; - - /// The image URL part of a message. - @override - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl; - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - MessageDeltaContentText _$MessageDeltaContentTextFromJson( Map json) { return _MessageDeltaContentText.fromJson(json); @@ -53634,6 +53329,7 @@ mixin _$ChatCompletionMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53662,6 +53358,7 @@ mixin _$ChatCompletionMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53690,6 +53387,7 @@ mixin _$ChatCompletionMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53901,6 +53599,7 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53932,6 +53631,7 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53963,6 +53663,7 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54197,6 +53898,7 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54228,6 +53930,7 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54259,6 +53962,7 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54372,6 +54076,7 @@ abstract class _$$ChatCompletionAssistantMessageImplCopyWith<$Res> $Res call( {ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54398,6 +54103,7 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> $Res call({ Object? role = null, Object? content = freezed, + Object? refusal = freezed, Object? name = freezed, Object? toolCalls = freezed, Object? functionCall = freezed, @@ -54411,6 +54117,10 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, name: freezed == name ? _value.name : name // ignore: cast_nullable_to_non_nullable @@ -54449,6 +54159,7 @@ class _$ChatCompletionAssistantMessageImpl const _$ChatCompletionAssistantMessageImpl( {this.role = ChatCompletionMessageRole.assistant, @JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.refusal, @JsonKey(includeIfNull: false) this.name, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -54470,6 +54181,11 @@ class _$ChatCompletionAssistantMessageImpl @JsonKey(includeIfNull: false) final String? content; + /// The refusal message by the assistant. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @override @JsonKey(includeIfNull: false) @@ -54496,7 +54212,7 @@ class _$ChatCompletionAssistantMessageImpl @override String toString() { - return 'ChatCompletionMessage.assistant(role: $role, content: $content, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; + return 'ChatCompletionMessage.assistant(role: $role, content: $content, refusal: $refusal, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; } @override @@ -54506,6 +54222,7 @@ class _$ChatCompletionAssistantMessageImpl other is _$ChatCompletionAssistantMessageImpl && (identical(other.role, role) || other.role == role) && (identical(other.content, content) || other.content == content) && + (identical(other.refusal, refusal) || other.refusal == refusal) && (identical(other.name, name) || other.name == name) && const DeepCollectionEquality() .equals(other._toolCalls, _toolCalls) && @@ -54515,7 +54232,7 @@ class _$ChatCompletionAssistantMessageImpl @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, name, + int get hashCode => Object.hash(runtimeType, role, content, refusal, name, const DeepCollectionEquality().hash(_toolCalls), functionCall); /// Create a copy of ChatCompletionMessage @@ -54543,6 +54260,7 @@ class _$ChatCompletionAssistantMessageImpl required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54556,7 +54274,7 @@ class _$ChatCompletionAssistantMessageImpl ChatCompletionMessageRole role, String? content, String name) function, }) { - return assistant(role, content, name, toolCalls, functionCall); + return assistant(role, content, refusal, name, toolCalls, functionCall); } @override @@ -54574,6 +54292,7 @@ class _$ChatCompletionAssistantMessageImpl TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54587,7 +54306,8 @@ class _$ChatCompletionAssistantMessageImpl ChatCompletionMessageRole role, String? content, String name)? function, }) { - return assistant?.call(role, content, name, toolCalls, functionCall); + return assistant?.call( + role, content, refusal, name, toolCalls, functionCall); } @override @@ -54605,6 +54325,7 @@ class _$ChatCompletionAssistantMessageImpl TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54620,7 +54341,7 @@ class _$ChatCompletionAssistantMessageImpl required TResult orElse(), }) { if (assistant != null) { - return assistant(role, content, name, toolCalls, functionCall); + return assistant(role, content, refusal, name, toolCalls, functionCall); } return orElse(); } @@ -54677,6 +54398,7 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { const factory ChatCompletionAssistantMessage( {final ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? refusal, @JsonKey(includeIfNull: false) final String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -54697,6 +54419,10 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { @JsonKey(includeIfNull: false) String? get content; + /// The refusal message by the assistant. + @JsonKey(includeIfNull: false) + String? get refusal; + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? get name; @@ -54839,6 +54565,7 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54870,6 +54597,7 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54901,6 +54629,7 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55119,6 +54848,7 @@ class _$ChatCompletionFunctionMessageImpl required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55150,6 +54880,7 @@ class _$ChatCompletionFunctionMessageImpl TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55181,6 +54912,7 @@ class _$ChatCompletionFunctionMessageImpl TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55733,8 +55465,10 @@ ChatCompletionMessageContentPart _$ChatCompletionMessageContentPartFromJson( switch (json['type']) { case 'text': return ChatCompletionMessageContentPartText.fromJson(json); - case 'image_url': + case 'image': return ChatCompletionMessageContentPartImage.fromJson(json); + case 'refusal': + return ChatCompletionMessageContentPartRefusal.fromJson(json); default: throw CheckedFromJsonException( @@ -55758,6 +55492,9 @@ mixin _$ChatCompletionMessageContentPart { required TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -55767,6 +55504,9 @@ mixin _$ChatCompletionMessageContentPart { TResult? Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -55776,6 +55516,8 @@ mixin _$ChatCompletionMessageContentPart { TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -55784,18 +55526,22 @@ mixin _$ChatCompletionMessageContentPart { required TResult Function(ChatCompletionMessageContentPartText value) text, required TResult Function(ChatCompletionMessageContentPartImage value) image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(ChatCompletionMessageContentPartText value)? text, TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(ChatCompletionMessageContentPartText value)? text, TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -55950,6 +55696,9 @@ class _$ChatCompletionMessageContentPartTextImpl required TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, }) { return text(type, this.text); } @@ -55962,6 +55711,9 @@ class _$ChatCompletionMessageContentPartTextImpl TResult? Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, }) { return text?.call(type, this.text); } @@ -55974,6 +55726,8 @@ class _$ChatCompletionMessageContentPartTextImpl TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, required TResult orElse(), }) { if (text != null) { @@ -55988,6 +55742,8 @@ class _$ChatCompletionMessageContentPartTextImpl required TResult Function(ChatCompletionMessageContentPartText value) text, required TResult Function(ChatCompletionMessageContentPartImage value) image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, }) { return text(this); } @@ -55997,6 +55753,7 @@ class _$ChatCompletionMessageContentPartTextImpl TResult? mapOrNull({ TResult? Function(ChatCompletionMessageContentPartText value)? text, TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, }) { return text?.call(this); } @@ -56006,6 +55763,7 @@ class _$ChatCompletionMessageContentPartTextImpl TResult maybeMap({ TResult Function(ChatCompletionMessageContentPartText value)? text, TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, required TResult orElse(), }) { if (text != null) { @@ -56168,6 +55926,9 @@ class _$ChatCompletionMessageContentPartImageImpl required TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, }) { return image(type, imageUrl); } @@ -56180,6 +55941,9 @@ class _$ChatCompletionMessageContentPartImageImpl TResult? Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, }) { return image?.call(type, imageUrl); } @@ -56192,6 +55956,8 @@ class _$ChatCompletionMessageContentPartImageImpl TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, required TResult orElse(), }) { if (image != null) { @@ -56206,6 +55972,8 @@ class _$ChatCompletionMessageContentPartImageImpl required TResult Function(ChatCompletionMessageContentPartText value) text, required TResult Function(ChatCompletionMessageContentPartImage value) image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, }) { return image(this); } @@ -56215,6 +55983,7 @@ class _$ChatCompletionMessageContentPartImageImpl TResult? mapOrNull({ TResult? Function(ChatCompletionMessageContentPartText value)? text, TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, }) { return image?.call(this); } @@ -56224,6 +55993,7 @@ class _$ChatCompletionMessageContentPartImageImpl TResult maybeMap({ TResult Function(ChatCompletionMessageContentPartText value)? text, TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, required TResult orElse(), }) { if (image != null) { @@ -56270,6 +56040,221 @@ abstract class ChatCompletionMessageContentPartImage get copyWith => throw _privateConstructorUsedError; } +/// @nodoc +abstract class _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartRefusalImplCopyWith( + _$ChatCompletionMessageContentPartRefusalImpl value, + $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) then) = + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ChatCompletionMessageContentPartType type, String refusal}); +} + +/// @nodoc +class __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartRefusalImpl> + implements _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl( + _$ChatCompletionMessageContentPartRefusalImpl _value, + $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? refusal = null, + }) { + return _then(_$ChatCompletionMessageContentPartRefusalImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + refusal: null == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageContentPartRefusalImpl + extends ChatCompletionMessageContentPartRefusal { + const _$ChatCompletionMessageContentPartRefusalImpl( + {this.type = ChatCompletionMessageContentPartType.refusal, + required this.refusal}) + : super._(); + + factory _$ChatCompletionMessageContentPartRefusalImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartRefusalImplFromJson(json); + + /// The type of the content part, in this case `refusal`. + @override + @JsonKey() + final ChatCompletionMessageContentPartType type; + + /// The refusal message generated by the model. + @override + final String refusal; + + @override + String toString() { + return 'ChatCompletionMessageContentPart.refusal(type: $type, refusal: $refusal)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionMessageContentPartRefusalImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, refusal); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionMessageContentPartRefusalImplCopyWith< + _$ChatCompletionMessageContentPartRefusalImpl> + get copyWith => + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl< + _$ChatCompletionMessageContentPartRefusalImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, + }) { + return refusal(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, + }) { + return refusal?.call(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionMessageContentPartRefusalImplToJson( + this, + ); + } +} + +abstract class ChatCompletionMessageContentPartRefusal + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartRefusal( + {final ChatCompletionMessageContentPartType type, + required final String refusal}) = + _$ChatCompletionMessageContentPartRefusalImpl; + const ChatCompletionMessageContentPartRefusal._() : super._(); + + factory ChatCompletionMessageContentPartRefusal.fromJson( + Map json) = + _$ChatCompletionMessageContentPartRefusalImpl.fromJson; + + /// The type of the content part, in this case `refusal`. + @override + ChatCompletionMessageContentPartType get type; + + /// The refusal message generated by the model. + String get refusal; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageContentPartRefusalImplCopyWith< + _$ChatCompletionMessageContentPartRefusalImpl> + get copyWith => throw _privateConstructorUsedError; +} + ChatCompletionMessageImageUrl _$ChatCompletionMessageImageUrlFromJson( Map json) { return _ChatCompletionMessageImageUrl.fromJson(json); @@ -56465,6 +56450,692 @@ abstract class _ChatCompletionMessageImageUrl get copyWith => throw _privateConstructorUsedError; } +ResponseFormat _$ResponseFormatFromJson(Map json) { + switch (json['type']) { + case 'text': + return ResponseFormatText.fromJson(json); + case 'json_object': + return ResponseFormatJsonObject.fromJson(json); + case 'json_schema': + return ResponseFormatJsonSchema.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'ResponseFormat', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ResponseFormat { + /// The type of response format being defined. + ResponseFormatType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this ResponseFormat to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ResponseFormatCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ResponseFormatCopyWith<$Res> { + factory $ResponseFormatCopyWith( + ResponseFormat value, $Res Function(ResponseFormat) then) = + _$ResponseFormatCopyWithImpl<$Res, ResponseFormat>; + @useResult + $Res call({ResponseFormatType type}); +} + +/// @nodoc +class _$ResponseFormatCopyWithImpl<$Res, $Val extends ResponseFormat> + implements $ResponseFormatCopyWith<$Res> { + _$ResponseFormatCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ResponseFormatTextImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatTextImplCopyWith(_$ResponseFormatTextImpl value, + $Res Function(_$ResponseFormatTextImpl) then) = + __$$ResponseFormatTextImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ResponseFormatType type}); +} + +/// @nodoc +class __$$ResponseFormatTextImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatTextImpl> + implements _$$ResponseFormatTextImplCopyWith<$Res> { + __$$ResponseFormatTextImplCopyWithImpl(_$ResponseFormatTextImpl _value, + $Res Function(_$ResponseFormatTextImpl) _then) + : super(_value, _then); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$ResponseFormatTextImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ResponseFormatTextImpl extends ResponseFormatText { + const _$ResponseFormatTextImpl({this.type = ResponseFormatType.text}) + : super._(); + + factory _$ResponseFormatTextImpl.fromJson(Map json) => + _$$ResponseFormatTextImplFromJson(json); + + /// The type of response format being defined. + @override + @JsonKey() + final ResponseFormatType type; + + @override + String toString() { + return 'ResponseFormat.text(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ResponseFormatTextImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => + __$$ResponseFormatTextImplCopyWithImpl<_$ResponseFormatTextImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return text(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return text?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) { + if (text != null) { + return text(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatTextImplToJson( + this, + ); + } +} + +abstract class ResponseFormatText extends ResponseFormat { + const factory ResponseFormatText({final ResponseFormatType type}) = + _$ResponseFormatTextImpl; + const ResponseFormatText._() : super._(); + + factory ResponseFormatText.fromJson(Map json) = + _$ResponseFormatTextImpl.fromJson; + + /// The type of response format being defined. + @override + ResponseFormatType get type; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ResponseFormatJsonObjectImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatJsonObjectImplCopyWith( + _$ResponseFormatJsonObjectImpl value, + $Res Function(_$ResponseFormatJsonObjectImpl) then) = + __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ResponseFormatType type}); +} + +/// @nodoc +class __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonObjectImpl> + implements _$$ResponseFormatJsonObjectImplCopyWith<$Res> { + __$$ResponseFormatJsonObjectImplCopyWithImpl( + _$ResponseFormatJsonObjectImpl _value, + $Res Function(_$ResponseFormatJsonObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$ResponseFormatJsonObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ResponseFormatJsonObjectImpl extends ResponseFormatJsonObject { + const _$ResponseFormatJsonObjectImpl( + {this.type = ResponseFormatType.jsonObject}) + : super._(); + + factory _$ResponseFormatJsonObjectImpl.fromJson(Map json) => + _$$ResponseFormatJsonObjectImplFromJson(json); + + /// The type of response format being defined. + @override + @JsonKey() + final ResponseFormatType type; + + @override + String toString() { + return 'ResponseFormat.jsonObject(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ResponseFormatJsonObjectImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> + get copyWith => __$$ResponseFormatJsonObjectImplCopyWithImpl< + _$ResponseFormatJsonObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return jsonObject(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return jsonObject?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) { + if (jsonObject != null) { + return jsonObject(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return jsonObject(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return jsonObject?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) { + if (jsonObject != null) { + return jsonObject(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatJsonObjectImplToJson( + this, + ); + } +} + +abstract class ResponseFormatJsonObject extends ResponseFormat { + const factory ResponseFormatJsonObject({final ResponseFormatType type}) = + _$ResponseFormatJsonObjectImpl; + const ResponseFormatJsonObject._() : super._(); + + factory ResponseFormatJsonObject.fromJson(Map json) = + _$ResponseFormatJsonObjectImpl.fromJson; + + /// The type of response format being defined. + @override + ResponseFormatType get type; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ResponseFormatJsonSchemaImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatJsonSchemaImplCopyWith( + _$ResponseFormatJsonSchemaImpl value, + $Res Function(_$ResponseFormatJsonSchemaImpl) then) = + __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema}); + + $JsonSchemaObjectCopyWith<$Res> get jsonSchema; +} + +/// @nodoc +class __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonSchemaImpl> + implements _$$ResponseFormatJsonSchemaImplCopyWith<$Res> { + __$$ResponseFormatJsonSchemaImplCopyWithImpl( + _$ResponseFormatJsonSchemaImpl _value, + $Res Function(_$ResponseFormatJsonSchemaImpl) _then) + : super(_value, _then); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? jsonSchema = null, + }) { + return _then(_$ResponseFormatJsonSchemaImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + jsonSchema: null == jsonSchema + ? _value.jsonSchema + : jsonSchema // ignore: cast_nullable_to_non_nullable + as JsonSchemaObject, + )); + } + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $JsonSchemaObjectCopyWith<$Res> get jsonSchema { + return $JsonSchemaObjectCopyWith<$Res>(_value.jsonSchema, (value) { + return _then(_value.copyWith(jsonSchema: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ResponseFormatJsonSchemaImpl extends ResponseFormatJsonSchema { + const _$ResponseFormatJsonSchemaImpl( + {this.type = ResponseFormatType.jsonSchema, + @JsonKey(name: 'json_schema') required this.jsonSchema}) + : super._(); + + factory _$ResponseFormatJsonSchemaImpl.fromJson(Map json) => + _$$ResponseFormatJsonSchemaImplFromJson(json); + + /// The type of response format being defined. + @override + @JsonKey() + final ResponseFormatType type; + + /// A JSON Schema object. + @override + @JsonKey(name: 'json_schema') + final JsonSchemaObject jsonSchema; + + @override + String toString() { + return 'ResponseFormat.jsonSchema(type: $type, jsonSchema: $jsonSchema)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ResponseFormatJsonSchemaImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.jsonSchema, jsonSchema) || + other.jsonSchema == jsonSchema)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, jsonSchema); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> + get copyWith => __$$ResponseFormatJsonSchemaImplCopyWithImpl< + _$ResponseFormatJsonSchemaImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return jsonSchema(type, this.jsonSchema); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return jsonSchema?.call(type, this.jsonSchema); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) { + if (jsonSchema != null) { + return jsonSchema(type, this.jsonSchema); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return jsonSchema(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return jsonSchema?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) { + if (jsonSchema != null) { + return jsonSchema(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatJsonSchemaImplToJson( + this, + ); + } +} + +abstract class ResponseFormatJsonSchema extends ResponseFormat { + const factory ResponseFormatJsonSchema( + {final ResponseFormatType type, + @JsonKey(name: 'json_schema') + required final JsonSchemaObject jsonSchema}) = + _$ResponseFormatJsonSchemaImpl; + const ResponseFormatJsonSchema._() : super._(); + + factory ResponseFormatJsonSchema.fromJson(Map json) = + _$ResponseFormatJsonSchemaImpl.fromJson; + + /// The type of response format being defined. + @override + ResponseFormatType get type; + + /// A JSON Schema object. + @JsonKey(name: 'json_schema') + JsonSchemaObject get jsonSchema; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantTools _$AssistantToolsFromJson(Map json) { switch (json['type']) { case 'code_interpreter': @@ -57229,7 +57900,7 @@ mixin _$AssistantToolsFileSearchFileSearch { /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; @@ -57341,7 +58012,7 @@ class _$AssistantToolsFileSearchFileSearchImpl /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) final int? maxNumResults; @@ -57397,7 +58068,7 @@ abstract class _AssistantToolsFileSearchFileSearch /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; @@ -57419,6 +58090,8 @@ MessageContent _$MessageContentFromJson(Map json) { return MessageContentImageUrlObject.fromJson(json); case 'text': return MessageContentTextObject.fromJson(json); + case 'refusal': + return MessageContentRefusalObject.fromJson(json); default: throw CheckedFromJsonException(json, 'type', 'MessageContent', @@ -57439,6 +58112,7 @@ mixin _$MessageContent { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57450,6 +58124,7 @@ mixin _$MessageContent { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57461,6 +58136,7 @@ mixin _$MessageContent { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -57469,6 +58145,7 @@ mixin _$MessageContent { required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57476,6 +58153,7 @@ mixin _$MessageContent { TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57483,6 +58161,7 @@ mixin _$MessageContent { TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -57651,6 +58330,7 @@ class _$MessageContentImageFileObjectImpl @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { return imageFile(type, this.imageFile); } @@ -57665,6 +58345,7 @@ class _$MessageContentImageFileObjectImpl @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { return imageFile?.call(type, this.imageFile); } @@ -57679,6 +58360,7 @@ class _$MessageContentImageFileObjectImpl @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { if (imageFile != null) { @@ -57693,6 +58375,7 @@ class _$MessageContentImageFileObjectImpl required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { return imageFile(this); } @@ -57703,6 +58386,7 @@ class _$MessageContentImageFileObjectImpl TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { return imageFile?.call(this); } @@ -57713,6 +58397,7 @@ class _$MessageContentImageFileObjectImpl TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { if (imageFile != null) { @@ -57875,6 +58560,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { return imageUrl(type, this.imageUrl); } @@ -57889,6 +58575,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { return imageUrl?.call(type, this.imageUrl); } @@ -57903,6 +58590,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { if (imageUrl != null) { @@ -57917,6 +58605,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { return imageUrl(this); } @@ -57927,6 +58616,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { return imageUrl?.call(this); } @@ -57937,6 +58627,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { if (imageUrl != null) { @@ -58090,6 +58781,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { return text(type, this.text); } @@ -58104,6 +58796,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { return text?.call(type, this.text); } @@ -58118,6 +58811,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { if (text != null) { @@ -58132,6 +58826,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { return text(this); } @@ -58142,6 +58837,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { return text?.call(this); } @@ -58152,6 +58848,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { if (text != null) { @@ -58192,12 +58889,223 @@ abstract class MessageContentTextObject extends MessageContent { get copyWith => throw _privateConstructorUsedError; } +/// @nodoc +abstract class _$$MessageContentRefusalObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentRefusalObjectImplCopyWith( + _$MessageContentRefusalObjectImpl value, + $Res Function(_$MessageContentRefusalObjectImpl) then) = + __$$MessageContentRefusalObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, String refusal}); +} + +/// @nodoc +class __$$MessageContentRefusalObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentRefusalObjectImpl> + implements _$$MessageContentRefusalObjectImplCopyWith<$Res> { + __$$MessageContentRefusalObjectImplCopyWithImpl( + _$MessageContentRefusalObjectImpl _value, + $Res Function(_$MessageContentRefusalObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? refusal = null, + }) { + return _then(_$MessageContentRefusalObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + refusal: null == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentRefusalObjectImpl extends MessageContentRefusalObject { + const _$MessageContentRefusalObjectImpl( + {required this.type, required this.refusal}) + : super._(); + + factory _$MessageContentRefusalObjectImpl.fromJson( + Map json) => + _$$MessageContentRefusalObjectImplFromJson(json); + + /// Always `refusal`. + @override + final String type; + + /// No Description + @override + final String refusal; + + @override + String toString() { + return 'MessageContent.refusal(type: $type, refusal: $refusal)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentRefusalObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, refusal); + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> + get copyWith => __$$MessageContentRefusalObjectImplCopyWithImpl< + _$MessageContentRefusalObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, + }) { + return refusal(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, + }) { + return refusal?.call(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentRefusalObjectImplToJson( + this, + ); + } +} + +abstract class MessageContentRefusalObject extends MessageContent { + const factory MessageContentRefusalObject( + {required final String type, + required final String refusal}) = _$MessageContentRefusalObjectImpl; + const MessageContentRefusalObject._() : super._(); + + factory MessageContentRefusalObject.fromJson(Map json) = + _$MessageContentRefusalObjectImpl.fromJson; + + /// Always `refusal`. + @override + String get type; + + /// No Description + String get refusal; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageDeltaContent _$MessageDeltaContentFromJson(Map json) { switch (json['type']) { case 'image_file': return MessageDeltaContentImageFileObject.fromJson(json); case 'text': return MessageDeltaContentTextObject.fromJson(json); + case 'refusal': + return MessageDeltaContentRefusalObject.fromJson(json); + case 'image_url': + return MessageDeltaContentImageUrlObject.fromJson(json); default: throw CheckedFromJsonException(json, 'type', 'MessageDeltaContent', @@ -58223,6 +59131,15 @@ mixin _$MessageDeltaContent { required TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text) text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -58236,6 +59153,15 @@ mixin _$MessageDeltaContent { TResult? Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -58249,6 +59175,15 @@ mixin _$MessageDeltaContent { TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -58257,18 +59192,24 @@ mixin _$MessageDeltaContent { required TResult Function(MessageDeltaContentImageFileObject value) imageFile, required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(MessageDeltaContentImageFileObject value)? imageFile, TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -58460,6 +59401,15 @@ class _$MessageDeltaContentImageFileObjectImpl required TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text) text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) { return imageFile(index, type, this.imageFile); } @@ -58476,6 +59426,15 @@ class _$MessageDeltaContentImageFileObjectImpl TResult? Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) { return imageFile?.call(index, type, this.imageFile); } @@ -58492,6 +59451,15 @@ class _$MessageDeltaContentImageFileObjectImpl TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) { if (imageFile != null) { @@ -58506,6 +59474,8 @@ class _$MessageDeltaContentImageFileObjectImpl required TResult Function(MessageDeltaContentImageFileObject value) imageFile, required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { return imageFile(this); } @@ -58515,6 +59485,8 @@ class _$MessageDeltaContentImageFileObjectImpl TResult? mapOrNull({ TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { return imageFile?.call(this); } @@ -58524,6 +59496,8 @@ class _$MessageDeltaContentImageFileObjectImpl TResult maybeMap({ TResult Function(MessageDeltaContentImageFileObject value)? imageFile, TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { if (imageFile != null) { @@ -58709,6 +59683,15 @@ class _$MessageDeltaContentTextObjectImpl required TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text) text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) { return text(index, type, this.text); } @@ -58725,6 +59708,15 @@ class _$MessageDeltaContentTextObjectImpl TResult? Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) { return text?.call(index, type, this.text); } @@ -58741,6 +59733,15 @@ class _$MessageDeltaContentTextObjectImpl TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) { if (text != null) { @@ -58755,6 +59756,8 @@ class _$MessageDeltaContentTextObjectImpl required TResult Function(MessageDeltaContentImageFileObject value) imageFile, required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { return text(this); } @@ -58764,6 +59767,8 @@ class _$MessageDeltaContentTextObjectImpl TResult? mapOrNull({ TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { return text?.call(this); } @@ -58773,6 +59778,8 @@ class _$MessageDeltaContentTextObjectImpl TResult maybeMap({ TResult Function(MessageDeltaContentImageFileObject value)? imageFile, TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { if (text != null) { @@ -58821,6 +59828,552 @@ abstract class MessageDeltaContentTextObject extends MessageDeltaContent { get copyWith => throw _privateConstructorUsedError; } +/// @nodoc +abstract class _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentRefusalObjectImplCopyWith( + _$MessageDeltaContentRefusalObjectImpl value, + $Res Function(_$MessageDeltaContentRefusalObjectImpl) then) = + __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {int index, String type, @JsonKey(includeIfNull: false) String? refusal}); +} + +/// @nodoc +class __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentRefusalObjectImpl> + implements _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> { + __$$MessageDeltaContentRefusalObjectImplCopyWithImpl( + _$MessageDeltaContentRefusalObjectImpl _value, + $Res Function(_$MessageDeltaContentRefusalObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + Object? refusal = freezed, + }) { + return _then(_$MessageDeltaContentRefusalObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaContentRefusalObjectImpl + extends MessageDeltaContentRefusalObject { + const _$MessageDeltaContentRefusalObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.refusal}) + : super._(); + + factory _$MessageDeltaContentRefusalObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentRefusalObjectImplFromJson(json); + + /// The index of the refusal part in the message. + @override + final int index; + + /// Always `refusal`. + @override + final String type; + + /// The refusal content generated by the assistant. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + + @override + String toString() { + return 'MessageDeltaContent.refusal(index: $index, type: $type, refusal: $refusal)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaContentRefusalObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, index, type, refusal); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaContentRefusalObjectImplCopyWith< + _$MessageDeltaContentRefusalObjectImpl> + get copyWith => __$$MessageDeltaContentRefusalObjectImplCopyWithImpl< + _$MessageDeltaContentRefusalObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, + }) { + return refusal(index, type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + }) { + return refusal?.call(index, type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(index, type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaContentRefusalObjectImplToJson( + this, + ); + } +} + +abstract class MessageDeltaContentRefusalObject extends MessageDeltaContent { + const factory MessageDeltaContentRefusalObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? refusal}) = + _$MessageDeltaContentRefusalObjectImpl; + const MessageDeltaContentRefusalObject._() : super._(); + + factory MessageDeltaContentRefusalObject.fromJson(Map json) = + _$MessageDeltaContentRefusalObjectImpl.fromJson; + + /// The index of the refusal part in the message. + @override + int get index; + + /// Always `refusal`. + @override + String get type; + + /// The refusal content generated by the assistant. + @JsonKey(includeIfNull: false) + String? get refusal; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentRefusalObjectImplCopyWith< + _$MessageDeltaContentRefusalObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( + _$MessageDeltaContentImageUrlObjectImpl value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl}); + + $MessageContentImageUrlCopyWith<$Res>? get imageUrl; +} + +/// @nodoc +class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentImageUrlObjectImpl> + implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( + _$MessageDeltaContentImageUrlObjectImpl _value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + Object? imageUrl = freezed, + }) { + return _then(_$MessageDeltaContentImageUrlObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + imageUrl: freezed == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as MessageContentImageUrl?, + )); + } + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $MessageContentImageUrlCopyWith<$Res>? get imageUrl { + if (_value.imageUrl == null) { + return null; + } + + return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { + return _then(_value.copyWith(imageUrl: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaContentImageUrlObjectImpl + extends MessageDeltaContentImageUrlObject { + const _$MessageDeltaContentImageUrlObjectImpl( + {required this.index, + required this.type, + @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) + : super._(); + + factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentImageUrlObjectImplFromJson(json); + + /// The index of the content part in the message. + @override + final int index; + + /// Always `image_url`. + @override + final String type; + + /// The image URL part of a message. + @override + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl; + + @override + String toString() { + return 'MessageDeltaContent.imageUrl(index: $index, type: $type, imageUrl: $imageUrl)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaContentImageUrlObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.imageUrl, imageUrl) || + other.imageUrl == imageUrl)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, index, type, imageUrl); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< + _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, + }) { + return imageUrl(index, type, this.imageUrl); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + }) { + return imageUrl?.call(index, type, this.imageUrl); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + required TResult orElse(), + }) { + if (imageUrl != null) { + return imageUrl(index, type, this.imageUrl); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + }) { + return imageUrl(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + }) { + return imageUrl?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + required TResult orElse(), + }) { + if (imageUrl != null) { + return imageUrl(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaContentImageUrlObjectImplToJson( + this, + ); + } +} + +abstract class MessageDeltaContentImageUrlObject extends MessageDeltaContent { + const factory MessageDeltaContentImageUrlObject( + {required final int index, + required final String type, + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl}) = + _$MessageDeltaContentImageUrlObjectImpl; + const MessageDeltaContentImageUrlObject._() : super._(); + + factory MessageDeltaContentImageUrlObject.fromJson( + Map json) = + _$MessageDeltaContentImageUrlObjectImpl.fromJson; + + /// The index of the content part in the message. + @override + int get index; + + /// Always `image_url`. + @override + String get type; + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? get imageUrl; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageContentTextAnnotations _$MessageContentTextAnnotationsFromJson( Map json) { switch (json['type']) { diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 63581c97..77e7ee5d 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -320,7 +320,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null ? null - : ChatCompletionResponseFormat.fromJson( + : ResponseFormat.fromJson( json['response_format'] as Map), seed: (json['seed'] as num?)?.toInt(), serviceTier: $enumDecodeNullable( @@ -454,25 +454,6 @@ Map _$$ChatCompletionModelStringImplToJson( 'runtimeType': instance.$type, }; -_$ChatCompletionResponseFormatImpl _$$ChatCompletionResponseFormatImplFromJson( - Map json) => - _$ChatCompletionResponseFormatImpl( - type: $enumDecodeNullable( - _$ChatCompletionResponseFormatTypeEnumMap, json['type']) ?? - ChatCompletionResponseFormatType.text, - ); - -Map _$$ChatCompletionResponseFormatImplToJson( - _$ChatCompletionResponseFormatImpl instance) => - { - 'type': _$ChatCompletionResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$ChatCompletionResponseFormatTypeEnumMap = { - ChatCompletionResponseFormatType.text: 'text', - ChatCompletionResponseFormatType.jsonObject: 'json_object', -}; - _$ChatCompletionStopListStringImpl _$$ChatCompletionStopListStringImplFromJson( Map json) => _$ChatCompletionStopListStringImpl( @@ -611,6 +592,7 @@ _$FunctionObjectImpl _$$FunctionObjectImplFromJson(Map json) => name: json['name'] as String, description: json['description'] as String?, parameters: json['parameters'] as Map?, + strict: json['strict'] as bool? ?? false, ); Map _$$FunctionObjectImplToJson( @@ -627,6 +609,34 @@ Map _$$FunctionObjectImplToJson( writeNotNull('description', instance.description); writeNotNull('parameters', instance.parameters); + writeNotNull('strict', instance.strict); + return val; +} + +_$JsonSchemaObjectImpl _$$JsonSchemaObjectImplFromJson( + Map json) => + _$JsonSchemaObjectImpl( + name: json['name'] as String, + description: json['description'] as String?, + schema: json['schema'] as Map, + strict: json['strict'] as bool? ?? false, + ); + +Map _$$JsonSchemaObjectImplToJson( + _$JsonSchemaObjectImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['schema'] = instance.schema; + writeNotNull('strict', instance.strict); return val; } @@ -808,10 +818,18 @@ _$ChatCompletionLogprobsImpl _$$ChatCompletionLogprobsImplFromJson( ); Map _$$ChatCompletionLogprobsImplToJson( - _$ChatCompletionLogprobsImpl instance) => - { - 'content': instance.content?.map((e) => e.toJson()).toList(), - }; + _$ChatCompletionLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} _$ChatCompletionTokenLogprobImpl _$$ChatCompletionTokenLogprobImplFromJson( Map json) => @@ -942,16 +960,25 @@ _$ChatCompletionStreamResponseChoiceLogprobsImpl ); Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( - _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) => - { - 'content': instance.content?.map((e) => e.toJson()).toList(), - }; + _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} _$ChatCompletionStreamResponseDeltaImpl _$$ChatCompletionStreamResponseDeltaImplFromJson( Map json) => _$ChatCompletionStreamResponseDeltaImpl( content: json['content'] as String?, + refusal: json['refusal'] as String?, functionCall: json['function_call'] == null ? null : ChatCompletionStreamMessageFunctionCall.fromJson( @@ -976,6 +1003,7 @@ Map _$$ChatCompletionStreamResponseDeltaImplToJson( } writeNotNull('content', instance.content); + writeNotNull('refusal', instance.refusal); writeNotNull('function_call', instance.functionCall?.toJson()); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -2185,22 +2213,19 @@ Map _$$AssistantObjectResponseFormatEnumerationImplToJson( }; const _$AssistantResponseFormatModeEnumMap = { - AssistantResponseFormatMode.none: 'none', AssistantResponseFormatMode.auto: 'auto', }; -_$AssistantObjectResponseFormatAssistantsResponseFormatImpl - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( +_$AssistantObjectResponseFormatResponseFormatImpl + _$$AssistantObjectResponseFormatResponseFormatImplFromJson( Map json) => - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$AssistantObjectResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl instance) => +Map _$$AssistantObjectResponseFormatResponseFormatImplToJson( + _$AssistantObjectResponseFormatResponseFormatImpl instance) => { 'value': instance.value.toJson(), 'runtimeType': instance.$type, @@ -2328,27 +2353,24 @@ Map }; const _$CreateAssistantResponseFormatModeEnumMap = { - CreateAssistantResponseFormatMode.none: 'none', CreateAssistantResponseFormatMode.auto: 'auto', }; -_$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$CreateAssistantRequestResponseFormatResponseFormatImpl + _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateAssistantRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( + _$CreateAssistantRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ModifyAssistantRequestImpl _$$ModifyAssistantRequestImplFromJson( Map json) => @@ -2421,27 +2443,24 @@ Map }; const _$ModifyAssistantResponseFormatModeEnumMap = { - ModifyAssistantResponseFormatMode.none: 'none', ModifyAssistantResponseFormatMode.auto: 'auto', }; -_$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$ModifyAssistantRequestResponseFormatResponseFormatImpl + _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$ModifyAssistantRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$DeleteAssistantResponseImpl _$$DeleteAssistantResponseImplFromJson( Map json) => @@ -2530,25 +2549,6 @@ Map _$$AssistantsFunctionCallOptionImplToJson( 'name': instance.name, }; -_$AssistantsResponseFormatImpl _$$AssistantsResponseFormatImplFromJson( - Map json) => - _$AssistantsResponseFormatImpl( - type: $enumDecodeNullable( - _$AssistantsResponseFormatTypeEnumMap, json['type']) ?? - AssistantsResponseFormatType.text, - ); - -Map _$$AssistantsResponseFormatImplToJson( - _$AssistantsResponseFormatImpl instance) => - { - 'type': _$AssistantsResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$AssistantsResponseFormatTypeEnumMap = { - AssistantsResponseFormatType.text: 'text', - AssistantsResponseFormatType.jsonObject: 'json_object', -}; - _$TruncationObjectImpl _$$TruncationObjectImplFromJson( Map json) => _$TruncationObjectImpl( @@ -2800,26 +2800,23 @@ Map _$$RunObjectResponseFormatEnumerationImplToJson( }; const _$RunObjectResponseFormatModeEnumMap = { - RunObjectResponseFormatMode.none: 'none', RunObjectResponseFormatMode.auto: 'auto', }; -_$RunObjectResponseFormatAssistantsResponseFormatImpl - _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson( +_$RunObjectResponseFormatResponseFormatImpl + _$$RunObjectResponseFormatResponseFormatImplFromJson( Map json) => - _$RunObjectResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$RunObjectResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( - _$RunObjectResponseFormatAssistantsResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$RunObjectResponseFormatResponseFormatImplToJson( + _$RunObjectResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$RunSubmitToolOutputsImpl _$$RunSubmitToolOutputsImplFromJson( Map json) => @@ -3028,27 +3025,23 @@ Map _$$CreateRunRequestResponseFormatEnumerationImplToJson( }; const _$CreateRunRequestResponseFormatModeEnumMap = { - CreateRunRequestResponseFormatMode.none: 'none', CreateRunRequestResponseFormatMode.auto: 'auto', }; -_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$CreateRunRequestResponseFormatResponseFormatImpl + _$$CreateRunRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateRunRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateRunRequestResponseFormatResponseFormatImplToJson( + _$CreateRunRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ListRunsResponseImpl _$$ListRunsResponseImplFromJson( Map json) => @@ -3361,27 +3354,24 @@ Map json) => - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ThreadObjectImpl _$$ThreadObjectImplFromJson(Map json) => _$ThreadObjectImpl( @@ -3968,34 +3958,6 @@ Map _$$MessageContentTextAnnotationsFileCitationImplToJson( 'file_id': instance.fileId, }; -_$MessageDeltaContentImageUrlObjectImpl - _$$MessageDeltaContentImageUrlObjectImplFromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectImpl( - index: (json['index'] as num?)?.toInt(), - type: json['type'] as String?, - imageUrl: json['image_url'] == null - ? null - : MessageContentImageUrl.fromJson( - json['image_url'] as Map), - ); - -Map _$$MessageDeltaContentImageUrlObjectImplToJson( - _$MessageDeltaContentImageUrlObjectImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('index', instance.index); - writeNotNull('type', instance.type); - writeNotNull('image_url', instance.imageUrl?.toJson()); - return val; -} - _$MessageDeltaContentTextImpl _$$MessageDeltaContentTextImplFromJson( Map json) => _$MessageDeltaContentTextImpl( @@ -5038,6 +5000,7 @@ _$ChatCompletionAssistantMessageImpl _$ChatCompletionMessageRoleEnumMap, json['role']) ?? ChatCompletionMessageRole.assistant, content: json['content'] as String?, + refusal: json['refusal'] as String?, name: json['name'] as String?, toolCalls: (json['tool_calls'] as List?) ?.map((e) => ChatCompletionMessageToolCall.fromJson( @@ -5062,6 +5025,7 @@ Map _$$ChatCompletionAssistantMessageImplToJson( } writeNotNull('content', instance.content); + writeNotNull('refusal', instance.refusal); writeNotNull('name', instance.name); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -5159,6 +5123,7 @@ Map _$$ChatCompletionMessageContentPartTextImplToJson( const _$ChatCompletionMessageContentPartTypeEnumMap = { ChatCompletionMessageContentPartType.text: 'text', ChatCompletionMessageContentPartType.imageUrl: 'image_url', + ChatCompletionMessageContentPartType.refusal: 'refusal', }; _$ChatCompletionMessageContentPartImageImpl @@ -5180,6 +5145,24 @@ Map _$$ChatCompletionMessageContentPartImageImplToJson( 'image_url': instance.imageUrl.toJson(), }; +_$ChatCompletionMessageContentPartRefusalImpl + _$$ChatCompletionMessageContentPartRefusalImplFromJson( + Map json) => + _$ChatCompletionMessageContentPartRefusalImpl( + type: $enumDecodeNullable( + _$ChatCompletionMessageContentPartTypeEnumMap, + json['type']) ?? + ChatCompletionMessageContentPartType.refusal, + refusal: json['refusal'] as String, + ); + +Map _$$ChatCompletionMessageContentPartRefusalImplToJson( + _$ChatCompletionMessageContentPartRefusalImpl instance) => + { + 'type': _$ChatCompletionMessageContentPartTypeEnumMap[instance.type]!, + 'refusal': instance.refusal, + }; + _$ChatCompletionMessageImageUrlImpl _$$ChatCompletionMessageImageUrlImplFromJson(Map json) => _$ChatCompletionMessageImageUrlImpl( @@ -5202,6 +5185,54 @@ const _$ChatCompletionMessageImageDetailEnumMap = { ChatCompletionMessageImageDetail.high: 'high', }; +_$ResponseFormatTextImpl _$$ResponseFormatTextImplFromJson( + Map json) => + _$ResponseFormatTextImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.text, + ); + +Map _$$ResponseFormatTextImplToJson( + _$ResponseFormatTextImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + }; + +const _$ResponseFormatTypeEnumMap = { + ResponseFormatType.text: 'text', + ResponseFormatType.jsonObject: 'json_object', + ResponseFormatType.jsonSchema: 'json_schema', +}; + +_$ResponseFormatJsonObjectImpl _$$ResponseFormatJsonObjectImplFromJson( + Map json) => + _$ResponseFormatJsonObjectImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.jsonObject, + ); + +Map _$$ResponseFormatJsonObjectImplToJson( + _$ResponseFormatJsonObjectImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + }; + +_$ResponseFormatJsonSchemaImpl _$$ResponseFormatJsonSchemaImplFromJson( + Map json) => + _$ResponseFormatJsonSchemaImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.jsonSchema, + jsonSchema: JsonSchemaObject.fromJson( + json['json_schema'] as Map), + ); + +Map _$$ResponseFormatJsonSchemaImplToJson( + _$ResponseFormatJsonSchemaImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + 'json_schema': instance.jsonSchema.toJson(), + }; + _$AssistantToolsCodeInterpreterImpl _$$AssistantToolsCodeInterpreterImplFromJson(Map json) => _$AssistantToolsCodeInterpreterImpl( @@ -5320,6 +5351,20 @@ Map _$$MessageContentTextObjectImplToJson( 'text': instance.text.toJson(), }; +_$MessageContentRefusalObjectImpl _$$MessageContentRefusalObjectImplFromJson( + Map json) => + _$MessageContentRefusalObjectImpl( + type: json['type'] as String, + refusal: json['refusal'] as String, + ); + +Map _$$MessageContentRefusalObjectImplToJson( + _$MessageContentRefusalObjectImpl instance) => + { + 'type': instance.type, + 'refusal': instance.refusal, + }; + _$MessageDeltaContentImageFileObjectImpl _$$MessageDeltaContentImageFileObjectImplFromJson( Map json) => @@ -5377,6 +5422,61 @@ Map _$$MessageDeltaContentTextObjectImplToJson( return val; } +_$MessageDeltaContentRefusalObjectImpl + _$$MessageDeltaContentRefusalObjectImplFromJson( + Map json) => + _$MessageDeltaContentRefusalObjectImpl( + index: (json['index'] as num).toInt(), + type: json['type'] as String, + refusal: json['refusal'] as String?, + ); + +Map _$$MessageDeltaContentRefusalObjectImplToJson( + _$MessageDeltaContentRefusalObjectImpl instance) { + final val = { + 'index': instance.index, + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('refusal', instance.refusal); + return val; +} + +_$MessageDeltaContentImageUrlObjectImpl + _$$MessageDeltaContentImageUrlObjectImplFromJson( + Map json) => + _$MessageDeltaContentImageUrlObjectImpl( + index: (json['index'] as num).toInt(), + type: json['type'] as String, + imageUrl: json['image_url'] == null + ? null + : MessageContentImageUrl.fromJson( + json['image_url'] as Map), + ); + +Map _$$MessageDeltaContentImageUrlObjectImplToJson( + _$MessageDeltaContentImageUrlObjectImpl instance) { + final val = { + 'index': instance.index, + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('image_url', instance.imageUrl?.toJson()); + return val; +} + _$MessageContentTextAnnotationsFileCitationObjectImpl _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index 5a62f6b1..ab870afb 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -49,11 +49,15 @@ String? _onSchemaUnionFactoryName( 'ChatCompletionMessageContentParts' => 'parts', 'ChatCompletionMessageContentPartText' => 'text', 'ChatCompletionMessageContentPartImage' => 'image', + 'ChatCompletionMessageContentPartRefusal' => 'refusal', 'ChatCompletionToolChoiceOptionEnumeration' => 'mode', 'ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice' => 'tool', 'ChatCompletionFunctionCallEnumeration' => 'mode', 'ChatCompletionFunctionCallChatCompletionFunctionCallOption' => 'function', + 'ResponseFormatText' => 'text', + 'ResponseFormatJsonObject' => 'jsonObject', + 'ResponseFormatJsonSchema' => 'jsonSchema', // Completion 'CompletionModelEnumeration' => 'model', 'CompletionModelString' => 'modelId', @@ -80,6 +84,7 @@ String? _onSchemaUnionFactoryName( // Assistant 'AssistantModelEnumeration' => 'model', 'AssistantModelString' => 'modelId', + 'AssistantObjectResponseFormatEnumeration' => 'mode', 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => 'format', 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', @@ -101,7 +106,10 @@ String? _onSchemaUnionFactoryName( 'MessageContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageContentTextAnnotationsFilePathObject' => 'filePath', 'MessageContentTextObject' => 'text', + 'MessageContentRefusalObject' => 'refusal', 'MessageDeltaContentImageFileObject' => 'imageFile', + 'MessageDeltaContentRefusalObject' => 'refusal', + 'MessageDeltaContentImageUrlObject' => 'imageUrl', 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageDeltaContentTextAnnotationsFilePathObject' => 'filePath', 'MessageDeltaContentTextObject' => 'text', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index c349f64e..46282eab 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1887,21 +1887,7 @@ components: nullable: true description: *completions_presence_penalty_description response_format: - title: ChatCompletionResponseFormat - type: object - description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: [ "text", "json_object" ] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + $ref: "#/components/schemas/ResponseFormat" seed: type: integer # minimum: -9223372036854775808 # The value can't be represented exactly in JavaScript @@ -2069,11 +2055,10 @@ components: default: user description: The role of the messages author, in this case `user`. content: - # TODO extract to ChatCompletionMessageContent once generator bug fixed description: The contents of the user message. oneOf: - type: string - description: The text contents of the message. + description: The text contents of the user message. - type: array description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. items: @@ -2097,6 +2082,10 @@ components: type: string description: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. name: type: string description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. @@ -2148,11 +2137,12 @@ components: oneOf: - $ref: "#/components/schemas/ChatCompletionMessageContentPartText" - $ref: "#/components/schemas/ChatCompletionMessageContentPartImage" + - $ref: "#/components/schemas/ChatCompletionMessageContentPartRefusal" discriminator: propertyName: type ChatCompletionMessageContentPartText: type: object - description: A text content part of a user message. + description: A text content part of a message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2165,7 +2155,7 @@ components: - text ChatCompletionMessageContentPartImage: type: object - title: Image content part + description: An image content part of a user message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2190,9 +2180,25 @@ components: - url required: - image_url + ChatCompletionMessageContentPartRefusal: + type: object + description: A refusal content part of a message. + properties: + type: + $ref: "#/components/schemas/ChatCompletionMessageContentPartType" + default: refusal + description: The type of the content part, in this case `refusal`. + refusal: + type: string + description: The refusal message generated by the model. + required: + - refusal ChatCompletionMessageContentPartType: type: string - enum: [ "text", "image_url" ] + enum: + - text + - image_url + - refusal description: The type of the content part. ChatCompletionMessageRole: type: string @@ -2226,18 +2232,109 @@ components: properties: name: type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + description: | + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + maximum length of 64. description: type: string - description: A description of what the function does, used by the model to choose when and how to call the function. + description: | + A description of what the function does, used by the model to choose when and how to call the function. parameters: $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: | + Whether to enable strict schema adherence when generating the function call. If set to true, the model will + follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + `strict` is `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). required: - name FunctionParameters: type: object description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true + ResponseFormat: + type: object + description: | + An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + discriminator: + propertyName: type + ResponseFormatType: + type: string + enum: + - text + - json_object + - json_schema + description: The type of response format being defined. + ResponseFormatText: + type: object + description: "The model should respond with plain text." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "text" + ResponseFormatJsonObject: + type: object + description: "The model should respond with a JSON object." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "json_object" + ResponseFormatJsonSchema: + type: object + description: "The model should respond with a JSON object that adheres to the specified schema." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "json_schema" + json_schema: + $ref: "#/components/schemas/JsonSchemaObject" + required: + - json_schema + JsonSchemaObject: + type: object + description: "A JSON Schema object." + properties: + name: + type: string + description: | + The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + length of 64. + description: + type: string + description: | + A description of what the response format is for, used by the model to determine how to respond in the + format. + schema: + type: object + description: | + The schema for the response format, described as a JSON Schema object. + additionalProperties: true + strict: + type: boolean + nullable: true + default: false + description: | + Whether to enable strict schema adherence when generating the output. If set to true, the model will always + follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + `strict` is `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + required: + - name + - schema ChatCompletionTool: type: object description: A tool the model may use. @@ -2387,8 +2484,12 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - required: - - content + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true ChatCompletionTokenLogprob: type: object description: Log probability information for a token. @@ -2491,6 +2592,10 @@ components: type: string description: The contents of the chunk message. nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true function_call: $ref: "#/components/schemas/ChatCompletionStreamMessageFunctionCall" tool_calls: @@ -3481,9 +3586,10 @@ components: We generally recommend altering this or temperature but not both. response_format: description: &assistant_response_format | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + Specifies the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-4o-mini-1106`. + since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the @@ -3502,8 +3608,9 @@ components: title: AssistantResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - id - object @@ -3609,8 +3716,9 @@ components: title: CreateAssistantResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - model ModifyAssistantRequest: @@ -3681,8 +3789,9 @@ components: title: ModifyAssistantResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" DeleteAssistantResponse: type: object description: Represents a deleted response returned by the Delete assistant endpoint. @@ -3770,7 +3879,7 @@ components: and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search - tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. required: - type AssistantToolsFunction: @@ -3806,18 +3915,6 @@ components: description: The name of the function to call. required: - name - AssistantsResponseFormat: - type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - title: AssistantsResponseFormatType - enum: [ "text", "json_object" ] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. TruncationObject: type: object description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -4008,8 +4105,9 @@ components: title: RunObjectResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - id - object @@ -4182,8 +4280,9 @@ components: title: CreateRunRequestResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" stream: type: boolean nullable: true @@ -4411,8 +4510,9 @@ components: title: CreateThreadAndRunRequestResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" stream: type: boolean nullable: true @@ -4722,6 +4822,7 @@ components: - $ref: "#/components/schemas/MessageContentImageFileObject" - $ref: "#/components/schemas/MessageContentImageUrlObject" - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" discriminator: propertyName: type MessageDeltaContent: @@ -4730,6 +4831,8 @@ components: oneOf: - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" discriminator: propertyName: type CreateMessageRequest: @@ -4974,6 +5077,20 @@ components: - file_path - start_index - end_index + MessageContentRefusalObject: + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + default: refusal + refusal: + type: string + nullable: false + required: + - type + - refusal MessageDeltaContentImageFileObject: type: object description: References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. @@ -5013,6 +5130,9 @@ components: type: string image_url: $ref: "#/components/schemas/MessageContentImageUrl" + required: + - index + - type MessageDeltaContentTextObject: type: object description: The text content that is part of a message. @@ -5114,6 +5234,23 @@ components: required: - index - type + MessageDeltaContentRefusalObject: + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + type: string + description: Always `refusal`. + default: refusal + refusal: + type: string + description: The refusal content generated by the assistant. + required: + - index + - type RunStepObject: type: object description: | diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 4d45fce2..615f5614 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -7585,11 +7585,20 @@ components: } } - ChatCompletionRequestMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: [ "text" ] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text ChatCompletionRequestMessageContentPartImage: type: object @@ -7617,20 +7626,20 @@ components: - type - image_url - ChatCompletionRequestMessageContentPartText: + ChatCompletionRequestMessageContentPartRefusal: type: object - title: Text content part + title: Refusal content part properties: type: type: string - enum: ["text"] + enum: [ "refusal" ] description: The type of the content part. - text: + refusal: type: string - description: The text content. + description: The refusal message generated by the model. required: - type - - text + - refusal ChatCompletionRequestMessage: oneOf: @@ -7641,13 +7650,44 @@ components: - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" x-oaiExpandable: true + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + ChatCompletionRequestSystemMessage: type: object title: System message properties: content: description: The contents of the system message. - type: string + oneOf: + - type: string + description: The contents of the system message. + title: Text content + - type: array + description: An array of content parts with a defined type. For system messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 role: type: string enum: [ "system" ] @@ -7674,12 +7714,12 @@ components: description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. title: Array of content parts items: - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" minItems: 1 x-oaiExpandable: true role: type: string - enum: ["user"] + enum: [ "user" ] description: The role of the messages author, in this case `user`. name: type: string @@ -7694,9 +7734,22 @@ components: properties: content: nullable: true - type: string + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 description: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. role: type: string enum: [ "assistant" ] @@ -7747,7 +7800,16 @@ components: enum: [ "tool" ] description: The role of the messages author, in this case `tool`. content: - type: string + oneOf: + - type: string + description: The contents of the tool message. + title: Text content + - type: array + description: An array of content parts with a defined type. For tool messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + minItems: 1 description: The contents of the tool message. tool_call_id: type: string @@ -7833,9 +7895,69 @@ components: description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. parameters: $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). required: - name + ResponseFormatText: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `text`" + enum: [ "text" ] + required: + - type + + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: [ "json_object" ] + required: + - type + + ResponseFormatJsonSchemaSchema: + type: object + description: "The schema for the response format, described as a JSON Schema object." + additionalProperties: true + + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: 'The type of response format being defined: `json_schema`' + enum: [ 'json_schema' ] + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + ChatCompletionToolChoiceOption: description: | Controls which (if any) tool is called by the model. @@ -7970,6 +8092,10 @@ components: type: string description: The contents of the message. nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true tool_calls: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: @@ -7993,6 +8119,7 @@ components: required: - role - content + - refusal ChatCompletionStreamResponseDelta: type: object @@ -8021,6 +8148,10 @@ components: type: string enum: [ "system", "user", "assistant", "tool" ] description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true CreateChatCompletionRequest: type: object @@ -8040,7 +8171,6 @@ components: enum: [ "gpt-4o", - "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-mini", @@ -8118,20 +8248,19 @@ components: nullable: true description: *completions_presence_penalty_description response_format: - type: object description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true seed: type: integer minimum: -9223372036854775808 @@ -8288,8 +8417,16 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true required: - content + - refusal + created: type: integer description: The Unix timestamp (in seconds) of when the chat completion was created. @@ -10192,22 +10329,12 @@ components: - type: string description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsApiResponseFormat" + enum: [ auto ] + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' x-oaiExpandable: true - AssistantsApiResponseFormat: - type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. - AssistantObject: type: object title: Assistant @@ -11640,6 +11767,7 @@ components: - $ref: "#/components/schemas/MessageContentImageFileObject" - $ref: "#/components/schemas/MessageContentImageUrlObject" - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" x-oaiExpandable: true assistant_id: description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. @@ -11740,6 +11868,7 @@ components: oneOf: - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" x-oaiExpandable: true required: @@ -12007,6 +12136,22 @@ components: - type - text + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + enum: [ "refusal" ] + refusal: + type: string + nullable: false + required: + - type + - refusal + MessageRequestContentTextObject: title: Text type: object @@ -12118,6 +12263,25 @@ components: - index - type + MessageDeltaContentRefusalObject: + title: Refusal + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. + type: string + enum: [ "refusal" ] + refusal: + type: string + required: + - index + - type + + MessageDeltaContentTextAnnotationsFileCitationObject: title: File citation type: object diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index fa272bbe..ebfe8c44 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -389,12 +389,12 @@ void main() { test('Test jsonObject response format', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt41106Preview, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( content: - 'You are a helpful assistant. That extracts names from text ' + 'You are a helpful assistant that extracts names from text ' 'and returns them in a JSON array.', ), ChatCompletionMessage.user( @@ -404,8 +404,59 @@ void main() { ), ], temperature: 0, - responseFormat: ChatCompletionResponseFormat( - type: ChatCompletionResponseFormatType.jsonObject, + responseFormat: ResponseFormat.jsonObject(), + ); + final res = await client.createChatCompletion(request: request); + expect(res.choices, hasLength(1)); + final choice = res.choices.first; + final message = choice.message; + expect(message.role, ChatCompletionMessageRole.assistant); + final content = message.content; + final jsonContent = json.decode(content!) as Map; + final jsonName = jsonContent['names'] as List; + expect(jsonName, isList); + expect(jsonName, hasLength(3)); + expect(jsonName, contains('John')); + expect(jsonName, contains('Mary')); + expect(jsonName, contains('Peter')); + }); + + test('Test jsonSchema response format', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, + ), + messages: [ + ChatCompletionMessage.system( + content: + 'You are a helpful assistant. That extracts names from text.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), ), ); final res = await client.createChatCompletion(request: request); From 97322e928dbd563c76504e54ac12faa188eee130 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 17 Aug 2024 18:00:03 +0200 Subject: [PATCH 212/251] feat: Add support for Structured Outputs in ChatOpenAI (#526) --- docs/expression_language/primitives/mapper.md | 4 +- docs/expression_language/streaming.md | 4 +- .../models/chat_models/integrations/openai.md | 260 ++++++++++++++++-- docs/modules/model_io/output_parsers/json.md | 4 +- .../cookbook/streaming.dart | 8 +- .../primitives/mapper.dart | 4 +- .../chat_models/integrations/openai.dart | 4 +- .../modules/model_io/output_parsers/json.dart | 8 +- .../lib/src/chat_models/mappers.dart | 22 +- .../lib/src/chat_models/types.dart | 113 +++++++- .../test/chat_models/chat_openai_test.dart | 63 ++++- 11 files changed, 423 insertions(+), 71 deletions(-) diff --git a/docs/expression_language/primitives/mapper.md b/docs/expression_language/primitives/mapper.md index 2fb57295..bc599cb6 100644 --- a/docs/expression_language/primitives/mapper.md +++ b/docs/expression_language/primitives/mapper.md @@ -54,9 +54,7 @@ In the following example, the model streams the output in chunks and the output final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/expression_language/streaming.md b/docs/expression_language/streaming.md index 25725045..dd04c9c6 100644 --- a/docs/expression_language/streaming.md +++ b/docs/expression_language/streaming.md @@ -124,9 +124,7 @@ Let’s see such a parser in action to understand what this means. final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/modules/model_io/models/chat_models/integrations/openai.md b/docs/modules/model_io/models/chat_models/integrations/openai.md index df92b348..6b3ccbbc 100644 --- a/docs/modules/model_io/models/chat_models/integrations/openai.md +++ b/docs/modules/model_io/models/chat_models/integrations/openai.md @@ -1,25 +1,78 @@ # OpenAI -[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of -chat models with different levels of power suitable for different tasks. +This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). -This example goes over how to use LangChain to interact with -OpenAI [models](https://platform.openai.com/docs/models) using the Chat API. +OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). + +> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. + +## Setup + +To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. + +### Credentials + +Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). + +### Installation + +The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: + +```yaml +dart pub add langchain_openai +``` + +## Usage + +### Instantiation + +Now we can instantiate our model object and generate chat completions: ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, '{text}'), -]); - final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o' temperature: 0, + // ...other options ), ); +``` + +If you are using a proxy, you can override the base URL, headers, and other options: + +```dart +final client = ChatOpenAI( + baseUrl: 'https://my-proxy.com', + headers: {'x-my-proxy-header': 'value'}, +); +``` + +### Invocation + +Now you can generate completions by calling the `invoke` method: + +```dart +final messages = [ + ChatMessage.system('You are a helpful assistant that translates English to French.'), + ChatMessage.humanText('I love programming.'), +]; +final prompt = PromptValue.chat(messages); +final res = await llm.invoke(prompt); +// -> 'J'adore la programmation.' +``` + +### Chaining + +We can chain our model with a prompt template or output parser to create a more complex pipeline: + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); final chain = promptTemplate | chatModel | StringOutputParser(); @@ -32,15 +85,16 @@ print(res); // -> 'J'adore la programmation.' ``` -## Streaming +### Streaming + +OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; final promptTemplate = ChatPromptTemplate.fromTemplates([ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' + (ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' 'in order without any spaces or commas', ), (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), @@ -57,7 +111,91 @@ await stream.forEach(print); // 789 ``` -You can also stream OpenAI tool calls: +### Multimodal support + +OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. + +You can send the image as a base64-encoded string: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image + ), + ]), + ), +]); +``` + +Or you can send the URL where the image is hosted: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', + ), + ]), + ), +]); +``` + +### Tool calling + +OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. + + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'gpt-4o' + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. + +You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: ```dart const tool = ToolSpec( @@ -108,9 +246,76 @@ await for (final chunk in stream) { // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} ``` -## JSON mode +### Structured Outputs + +[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), + ), + ), +); + +final res = await chatModel.invoke(prompt); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. + +### JSON mode -GPT-4 Turbo supports a new JSON mode, which ensures the model will respond with valid JSON. JSON mode is useful for developers generating JSON in the Chat Completions API outside of function calling. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. +When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. ```dart final prompt = PromptValue.chat([ @@ -127,9 +332,7 @@ final llm = ChatOpenAI( defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final chain = llm.pipe(JsonOutputParser()); @@ -148,3 +351,22 @@ print(res); // ] // } ``` + +### Fine-tuning + +You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. + +This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: + +```dart +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' + ), +); +``` + +## API reference + +For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs/modules/model_io/output_parsers/json.md b/docs/modules/model_io/output_parsers/json.md index 905b380b..06451f17 100644 --- a/docs/modules/model_io/output_parsers/json.md +++ b/docs/modules/model_io/output_parsers/json.md @@ -21,9 +21,7 @@ final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart index 66e4a7a6..d6b8cdae 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart @@ -79,9 +79,7 @@ Future _inputStreams() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); @@ -125,9 +123,7 @@ Future _inputStreamMapper() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/primitives/mapper.dart b/examples/docs_examples/bin/expression_language/primitives/mapper.dart index 818ed0d7..c9d0400a 100644 --- a/examples/docs_examples/bin/expression_language/primitives/mapper.dart +++ b/examples/docs_examples/bin/expression_language/primitives/mapper.dart @@ -63,9 +63,7 @@ Future _mapInputStream() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart index 6d302daf..2b6ea9df 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart @@ -131,9 +131,7 @@ Future _chatOpenAIJsonMode() async { defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final chain = llm.pipe(JsonOutputParser()); diff --git a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart index 8005f8d0..b921ec7d 100644 --- a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart +++ b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart @@ -22,9 +22,7 @@ Future _invoke() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); @@ -51,9 +49,7 @@ Future _streaming() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 0c70fd73..5e9000c2 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -248,15 +248,19 @@ extension CreateChatCompletionStreamResponseMapper } extension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat { - ChatCompletionResponseFormat toChatCompletionResponseFormat() { - return ChatCompletionResponseFormat( - type: switch (type) { - ChatOpenAIResponseFormatType.text => - ChatCompletionResponseFormatType.text, - ChatOpenAIResponseFormatType.jsonObject => - ChatCompletionResponseFormatType.jsonObject, - }, - ); + ResponseFormat toChatCompletionResponseFormat() { + return switch (this) { + ChatOpenAIResponseFormatText() => const ResponseFormat.text(), + ChatOpenAIResponseFormatJsonObject() => const ResponseFormat.jsonObject(), + final ChatOpenAIResponseFormatJsonSchema res => ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: res.jsonSchema.name, + description: res.jsonSchema.description, + schema: res.jsonSchema.schema, + strict: res.jsonSchema.strict, + ), + ), + }; } } diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 988d27c0..5db3268f 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -261,24 +261,111 @@ class ChatOpenAIOptions extends ChatModelOptions { /// {@template chat_openai_response_format} /// An object specifying the format that the model must output. /// {@endtemplate} -class ChatOpenAIResponseFormat { - /// {@macro chat_openai_response_format} - const ChatOpenAIResponseFormat({ - required this.type, +sealed class ChatOpenAIResponseFormat { + const ChatOpenAIResponseFormat(); + + /// The model will respond with text. + static const text = ChatOpenAIResponseFormatText(); + + /// The model will respond with a valid JSON object. + static const jsonObject = ChatOpenAIResponseFormatJsonObject(); + + /// The model will respond with a valid JSON object that adheres to the + /// specified schema. + factory ChatOpenAIResponseFormat.jsonSchema( + final ChatOpenAIJsonSchema jsonSchema, + ) => + ChatOpenAIResponseFormatJsonSchema(jsonSchema: jsonSchema); +} + +/// {@template chat_openai_response_format_text} +/// The model will respond with text. +/// {@endtemplate} +class ChatOpenAIResponseFormatText extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_text} + const ChatOpenAIResponseFormatText(); +} + +/// {@template chat_openai_response_format_json_object} +/// The model will respond with a valid JSON object. +/// {@endtemplate} +class ChatOpenAIResponseFormatJsonObject extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_json_object} + const ChatOpenAIResponseFormatJsonObject(); +} + +/// {@template chat_openai_response_format_json_schema} +/// The model will respond with a valid JSON object that adheres to the +/// specified schema. +/// {@endtemplate} +@immutable +class ChatOpenAIResponseFormatJsonSchema extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_json_schema} + const ChatOpenAIResponseFormatJsonSchema({ + required this.jsonSchema, }); - /// The format type. - final ChatOpenAIResponseFormatType type; + /// The JSON schema that the model must adhere to. + final ChatOpenAIJsonSchema jsonSchema; + + @override + bool operator ==(covariant ChatOpenAIResponseFormatJsonSchema other) { + return identical(this, other) || + runtimeType == other.runtimeType && jsonSchema == other.jsonSchema; + } + + @override + int get hashCode => jsonSchema.hashCode; } -/// Types of response formats. -enum ChatOpenAIResponseFormatType { - /// Standard text mode. - text, +/// {@template chat_openai_json_schema} +/// Specifies the schema for the response format. +/// {@endtemplate} +@immutable +class ChatOpenAIJsonSchema { + /// {@macro chat_openai_json_schema} + const ChatOpenAIJsonSchema({ + required this.name, + required this.schema, + this.description, + this.strict = false, + }); + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain + /// underscores and dashes, with a maximum length of 64. + final String name; + + /// A description of what the response format is for, used by the model to + /// determine how to respond in the format. + final String? description; + + /// The schema for the response format, described as a JSON Schema object. + final Map schema; + + /// Whether to enable strict schema adherence when generating the output. + /// If set to true, the model will always follow the exact schema defined in + /// the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + final bool strict; - /// [ChatOpenAIResponseFormatType.jsonObject] enables JSON mode, which - /// guarantees the message the model generates is valid JSON. - jsonObject, + @override + bool operator ==(covariant ChatOpenAIJsonSchema other) { + return identical(this, other) || + runtimeType == other.runtimeType && + name == other.name && + description == other.description && + const MapEquality().equals(schema, other.schema) && + strict == other.strict; + } + + @override + int get hashCode { + return name.hashCode ^ + description.hashCode ^ + const MapEquality().hash(schema) ^ + strict.hashCode; + } } /// Specifies the latency tier to use for processing the request. diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 7ba681c6..d56ba1f9 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -357,11 +357,68 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-1106-preview', + model: defaultModel, + temperature: 0, + seed: 9999, + responseFormat: ChatOpenAIResponseFormat.jsonObject, + ), + ); + + final res = await llm.invoke(prompt); + final outputMsg = res.output; + final outputJson = json.decode(outputMsg.content) as Map; + expect(outputJson['companies'], isNotNull); + final companies = outputJson['companies'] as List; + expect(companies, hasLength(2)); + final firstCompany = companies.first as Map; + expect(firstCompany['name'], 'Google'); + expect(firstCompany['origin'], 'USA'); + final secondCompany = companies.last as Map; + expect(secondCompany['name'], 'Deepmind'); + expect(secondCompany['origin'], 'UK'); + }); + + test('Test Structured Output', () async { + final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), + ]); + final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: defaultModel, temperature: 0, seed: 9999, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + const ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), ), ), ); From 2f6dbdeaa47dcadab422ccebecaa95c64a780bb1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 17 Aug 2024 18:14:33 +0200 Subject: [PATCH 213/251] feat: Add chatgpt-4o-latest to model catalog (#527) --- packages/langchain_openai/lib/src/chat_models/types.dart | 1 + .../src/generated/schema/create_chat_completion_request.dart | 2 ++ packages/openai_dart/lib/src/generated/schema/schema.g.dart | 1 + packages/openai_dart/oas/openapi_curated.yaml | 1 + packages/openai_dart/oas/openapi_official.yaml | 1 + 5 files changed, 6 insertions(+) diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 5db3268f..0c80184f 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -7,6 +7,7 @@ import 'package:meta/meta.dart'; /// Options to pass into the OpenAI Chat Model. /// /// Available [ChatOpenAIOptions.model]s: +/// - `chatgpt-4o-latest` /// - `gpt-4` /// - `gpt-4-32k` /// - `gpt-4-32k-0314` diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 8b6c5c52..0b6a5920 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -277,6 +277,8 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum ChatCompletionModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 77e7ee5d..01851e43 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -414,6 +414,7 @@ Map _$$ChatCompletionModelEnumerationImplToJson( }; const _$ChatCompletionModelsEnumMap = { + ChatCompletionModels.chatgpt4oLatest: 'chatgpt-4o-latest', ChatCompletionModels.gpt4: 'gpt-4', ChatCompletionModels.gpt432k: 'gpt-4-32k', ChatCompletionModels.gpt432k0314: 'gpt-4-32k-0314', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 46282eab..ba1d409c 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1806,6 +1806,7 @@ components: Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 615f5614..02653404 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -8173,6 +8173,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", From bcba078d03cddec223292947e425c7f4192275e2 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 09:24:54 +0200 Subject: [PATCH 214/251] feat: Add copyWith method to all RunnableOptions subclasses (#531) --- .../lib/src/chat_models/fake.dart | 13 +++++++ .../lib/src/chat_models/types.dart | 8 +++++ .../lib/src/language_models/types.dart | 6 ++++ .../langchain_core/lib/src/llms/fake.dart | 34 +++++++++++++++---- .../langchain_core/lib/src/llms/types.dart | 2 +- .../lib/src/retrievers/types.dart | 12 +++++++ .../test/runnables/binding_test.dart | 11 ++++++ .../lib/src/chat_models/vertex_ai/types.dart | 3 ++ .../lib/src/chat_models/types.dart | 3 ++ .../src/chat_models/chat_ollama/types.dart | 5 +++ 10 files changed, 90 insertions(+), 7 deletions(-) diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index b69868c3..bda1d6e3 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -2,6 +2,7 @@ import 'package:collection/collection.dart'; import '../../language_models.dart'; import '../prompts/types.dart'; +import '../tools/base.dart'; import 'base.dart'; import 'types.dart'; @@ -85,6 +86,8 @@ class FakeChatModelOptions extends ChatModelOptions { const FakeChatModelOptions({ super.model, this.metadata, + super.tools, + super.toolChoice, super.concurrencyLimit, }); @@ -95,11 +98,15 @@ class FakeChatModelOptions extends ChatModelOptions { FakeChatModelOptions copyWith({ final String? model, final Map? metadata, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return FakeChatModelOptions( model: model ?? this.model, metadata: metadata ?? this.metadata, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } @@ -223,6 +230,8 @@ class FakeEchoChatModelOptions extends ChatModelOptions { super.model, this.metadata, this.throwRandomError = false, + super.tools, + super.toolChoice, super.concurrencyLimit, }); @@ -237,12 +246,16 @@ class FakeEchoChatModelOptions extends ChatModelOptions { final String? model, final Map? metadata, final bool? throwRandomError, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return FakeEchoChatModelOptions( model: model ?? this.model, metadata: metadata ?? this.metadata, throwRandomError: throwRandomError ?? this.throwRandomError, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index f9c2aff3..f92fe4af 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -22,6 +22,14 @@ abstract class ChatModelOptions extends LanguageModelOptions { /// Controls which (if any) tool is called by the model. final ChatToolChoice? toolChoice; + + @override + ChatModelOptions copyWith({ + final String? model, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }); } /// {@template chat_result} diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index 3b52ee63..39e071bd 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -17,6 +17,12 @@ abstract class LanguageModelOptions extends BaseLangChainOptions { /// ID of the language model to use. /// Check the provider's documentation for available models. final String? model; + + @override + LanguageModelOptions copyWith({ + final String? model, + final int? concurrencyLimit, + }); } /// {@template language_model} diff --git a/packages/langchain_core/lib/src/llms/fake.dart b/packages/langchain_core/lib/src/llms/fake.dart index 0781e607..ffb64c00 100644 --- a/packages/langchain_core/lib/src/llms/fake.dart +++ b/packages/langchain_core/lib/src/llms/fake.dart @@ -7,11 +7,11 @@ import 'types.dart'; /// Fake LLM for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeLLM extends SimpleLLM { +class FakeLLM extends SimpleLLM { /// {@macro fake_list_llm} FakeLLM({ required this.responses, - }) : super(defaultOptions: const LLMOptions()); + }) : super(defaultOptions: const FakeLLMOptions()); /// Responses to return in order when called. final List responses; @@ -60,13 +60,35 @@ class FakeLLM extends SimpleLLM { } } +/// {@template fake_llm_options} +/// Fake LLM options for testing. +/// {@endtemplate} +class FakeLLMOptions extends LLMOptions { + /// {@macro fake_llm_options} + const FakeLLMOptions({ + super.model, + super.concurrencyLimit, + }); + + @override + FakeLLMOptions copyWith({ + final String? model, + final int? concurrencyLimit, + }) { + return FakeLLMOptions( + model: model ?? this.model, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } +} + /// {@template fake_echo_llm} /// Fake LLM for testing. /// It just returns the prompt or streams it char by char. /// {@endtemplate} -class FakeEchoLLM extends BaseLLM { +class FakeEchoLLM extends BaseLLM { /// {@macro fake_echo_llm} - const FakeEchoLLM() : super(defaultOptions: const LLMOptions()); + const FakeEchoLLM() : super(defaultOptions: const FakeLLMOptions()); @override String get modelType => 'fake-echo'; @@ -122,11 +144,11 @@ class FakeEchoLLM extends BaseLLM { /// Fake LLM for testing. /// It returns the string returned by the [handler] function. /// {@endtemplate} -class FakeHandlerLLM extends SimpleLLM { +class FakeHandlerLLM extends SimpleLLM { /// {@macro fake_handler_llm} FakeHandlerLLM({ required this.handler, - }) : super(defaultOptions: const LLMOptions()); + }) : super(defaultOptions: const FakeLLMOptions()); /// Function called to generate the response. final String Function( diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index 02a506de..7a81a0ab 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -6,7 +6,7 @@ import '../language_models/types.dart'; /// Options to pass into the LLM. /// {@endtemplate} @immutable -class LLMOptions extends LanguageModelOptions { +abstract class LLMOptions extends LanguageModelOptions { /// {@macro llm_options} const LLMOptions({ super.model, diff --git a/packages/langchain_core/lib/src/retrievers/types.dart b/packages/langchain_core/lib/src/retrievers/types.dart index a80412e2..e3938296 100644 --- a/packages/langchain_core/lib/src/retrievers/types.dart +++ b/packages/langchain_core/lib/src/retrievers/types.dart @@ -21,10 +21,22 @@ class VectorStoreRetrieverOptions extends RetrieverOptions { /// {@macro vector_store_retriever_options} const VectorStoreRetrieverOptions({ this.searchType = const VectorStoreSimilaritySearch(), + super.concurrencyLimit, }); /// The type of search to perform, either: /// - [VectorStoreSearchType.similarity] (default) /// - [VectorStoreSearchType.mmr] final VectorStoreSearchType searchType; + + @override + VectorStoreRetrieverOptions copyWith({ + final VectorStoreSearchType? searchType, + final int? concurrencyLimit, + }) { + return VectorStoreRetrieverOptions( + searchType: searchType ?? this.searchType, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } } diff --git a/packages/langchain_core/test/runnables/binding_test.dart b/packages/langchain_core/test/runnables/binding_test.dart index 882bf164..e64f0042 100644 --- a/packages/langchain_core/test/runnables/binding_test.dart +++ b/packages/langchain_core/test/runnables/binding_test.dart @@ -4,6 +4,7 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; import 'package:langchain_core/runnables.dart'; +import 'package:langchain_core/tools.dart'; import 'package:test/test.dart'; void main() { @@ -123,4 +124,14 @@ class _FakeOptionsChatModelOptions extends ChatModelOptions { const _FakeOptionsChatModelOptions(this.stop); final String stop; + + @override + ChatModelOptions copyWith({ + final String? model, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }) { + return _FakeOptionsChatModelOptions(stop); + } } diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index 019ab64e..50249bf3 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -1,5 +1,6 @@ import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; import 'package:meta/meta.dart'; /// {@template chat_vertex_ai_options} @@ -108,6 +109,8 @@ class ChatVertexAIOptions extends ChatModelOptions { final List? stopSequences, final int? candidateCount, final List? examples, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return ChatVertexAIOptions( diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index e6ba07b9..d9a75761 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -1,4 +1,5 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; import 'package:meta/meta.dart'; /// {@template chat_mistral_ai_options} @@ -54,6 +55,8 @@ class ChatMistralAIOptions extends ChatModelOptions { final int? maxTokens, final bool? safePrompt, final int? randomSeed, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return ChatMistralAIOptions( diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 6e9c0f20..cf02b00c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,5 +1,6 @@ import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; import 'package:meta/meta.dart'; import '../../../langchain_ollama.dart'; @@ -247,6 +248,8 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? useMmap, final bool? useMlock, final int? numThread, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return ChatOllamaOptions( @@ -283,6 +286,8 @@ class ChatOllamaOptions extends ChatModelOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } From 989b3f170e93df638b9fe67aa9d918b9a381b81c Mon Sep 17 00:00:00 2001 From: Heinrich Date: Wed, 21 Aug 2024 21:54:37 +1000 Subject: [PATCH 215/251] feat: Add ToolsAgent for models with tool-calling support (#530) Co-authored-by: David Miguel --- docs/_sidebar.md | 3 +- .../modules/agents/agent_types/tools_agent.md | 190 +++++ .../models/chat_models/integrations/ollama.md | 15 +- .../agents/agent_types/tools_agent.dart | 160 ++++ .../chat_models/integrations/ollama.dart | 2 +- packages/langchain/lib/src/agents/agents.dart | 1 + packages/langchain/lib/src/agents/tools.dart | 304 ++++++++ packages/langchain/pubspec.yaml | 3 + packages/langchain/pubspec_overrides.yaml | 14 +- .../test/agents/assets/state_of_the_union.txt | 723 ++++++++++++++++++ .../langchain/test/agents/tools_test.dart | 224 ++++++ 11 files changed, 1628 insertions(+), 11 deletions(-) create mode 100644 docs/modules/agents/agent_types/tools_agent.md create mode 100644 examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart create mode 100644 packages/langchain/lib/src/agents/tools.dart create mode 100644 packages/langchain/test/agents/assets/state_of_the_union.txt create mode 100644 packages/langchain/test/agents/tools_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index c51de21b..92d4f394 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -121,7 +121,8 @@ - [Memory](/modules/memory/memory.md) - [Agents](/modules/agents/agents.md) - [Agent types](/modules/agents/agent_types/agent_types.md) - - [OpenAI functions](/modules/agents/agent_types/openai_tools_agent.md) + - [Tools Agent](/modules/agents/agent_types/tools_agent.md) + - [OpenAI Tools Agent](/modules/agents/agent_types/openai_tools_agent.md) - [Tools](/modules/agents/tools/tools.md) - [Calculator](/modules/agents/tools/calculator.md) - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md) diff --git a/docs/modules/agents/agent_types/tools_agent.md b/docs/modules/agents/agent_types/tools_agent.md new file mode 100644 index 00000000..7c0c9de8 --- /dev/null +++ b/docs/modules/agents/agent_types/tools_agent.md @@ -0,0 +1,190 @@ +# Tools Agent + +An agent powered by the [tool calling API](/modules/model_io/models/chat_models/how_to/tools.md). + +This agent is designed to work with any chat model that supports tool calling. It can interpret the model's output and decide when to call specific tools based on that output. + +**Supported models:** +You can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the [tool calling docs](/modules/model_io/models/chat_models/how_to/tools.md) for a complete list. + +## Usage + +In the following example, we use `ChatOllama` with the `llama3.1` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. + +```dart +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +//... + +final llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); +final tool = CalculatorTool(); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final executor = AgentExecutor(agent: agent); +final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', +); +print(res); +// The result is: 4.885 +``` + +## Custom tools + +You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. + +Let's see an example of how to do this. + +First, let's create a class that will be the input for our tool. + +```dart +@immutable +class SearchInput { + const SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); +} +``` + +Now let's define the tool: + +```dart +final searchTool = Tool.fromFunction( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: callYourSearchFunction, + getInputFromJson: SearchInput.fromJson, +); +``` + +Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. + +The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. +```dart +String callYourSearchFunction(final SearchInput input) { + final n = input.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; +} +``` + +Now we can create the agent and run it: + +```dart +final llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3-groq-tool-use', + temperature: 0, + ), +); + +final memory = ConversationBufferMemory(returnMessages: true); +final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + tools: [searchTool], + memory: memory, +); + +final executor = AgentExecutor(agent: agent); + +final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', +); +print(res1); +// Here are the top 3 cat names I found: AAA, BBB, and CCC. +``` + +## Custom agent using LangChain Expression Language (LCEL) + +You can replicate the functionality of the Tools Agent by using the LangChain Expression Language (LCEL) directly. + +```dart +final openAiKey = Platform.environment['OPENAI_API_KEY']; + +final prompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), +]); + +final tool = CalculatorTool(); + +final model = ChatOpenAI( + apiKey: openAiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o-mini', + temperature: 0, + tools: [tool], + ), +); + +const outputParser = ToolsAgentOutputParser(); + +List buildScratchpad(final List intermediateSteps) { + return intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false); +} + +final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), + }, + ).pipe(prompt).pipe(model).pipe(outputParser), + tools: [tool], +); +final executor = AgentExecutor(agent: agent); + +final res = await executor.invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', +}); +print(res['output']); +// The result of 40 raised to the power of 0.43 is approximately 4.885. +``` + +In this way, you can create your own custom agents with full control over their behavior, while still leveraging the flexibility of the Tools Agent to work with various language models and tools. diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 9c368d1b..e6cc5907 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -83,11 +83,9 @@ final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); final stream = chain.stream({'max_num': '9'}); await stream.forEach(print); -// 1 -// 2 -// 3 -// .. -// 9 +// 123 +// 456 +// 789 ``` ### Multimodal support @@ -120,12 +118,13 @@ print(res.output.content); ### Tool calling -`ChatOllama` now offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). +`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). **Notes:** - Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. - Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. ```dart const tool = ToolSpec( @@ -420,7 +419,7 @@ We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `Ch ```dart // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3.1'), + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), ); await vectorStore.addDocuments( documents: [ diff --git a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart new file mode 100644 index 00000000..7554d8d4 --- /dev/null +++ b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart @@ -0,0 +1,160 @@ +// ignore_for_file: avoid_print, unreachable_from_main +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _toolsAgent(); + await _toolsAgentCustomToolsMemory(); + await _toolsAgentLCEL(); +} + +Future _toolsAgent() async { + final llm = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + ); + final tool = CalculatorTool(); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final executor = AgentExecutor(agent: agent); + final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + ); + print(res); + // The result is: 4.885 +} + +Future _toolsAgentCustomToolsMemory() async { + final tool = Tool.fromFunction( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: const { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: callYourSearchFunction, + getInputFromJson: SearchInput.fromJson, + ); + + final llm = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3-groq-tool-use', + temperature: 0, + ), + ); + + final memory = ConversationBufferMemory(returnMessages: true); + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + tools: [tool], + memory: memory, + ); + + final executor = AgentExecutor(agent: agent); + + final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', + ); + print(res1); + // Here are 3 search results for "cats": + // 1. Result 1 + // 2. Result 2 + // 3. Result 3 +} + +class SearchInput { + const SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); +} + +String callYourSearchFunction(final SearchInput input) { + final n = input.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; +} + +Future _toolsAgentLCEL() async { + final openAiKey = Platform.environment['OPENAI_API_KEY']; + + final prompt = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), + ]); + + final tool = CalculatorTool(); + + final model = ChatOpenAI( + apiKey: openAiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o-mini', + temperature: 0, + tools: [tool], + ), + ); + + const outputParser = ToolsAgentOutputParser(); + + List buildScratchpad(final List intermediateSteps) { + return intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false); + } + + final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), + }, + ).pipe(prompt).pipe(model).pipe(outputParser), + tools: [tool], + ); + final executor = AgentExecutor(agent: agent); + + final res = await executor.invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + print(res['output']); + // The result of 40 raised to the power of 0.43 is approximately 4.885. +} diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 0682326f..2d66b367 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -348,7 +348,7 @@ Future _flights() async { Future _rag() async { // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3.1'), + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), ); await vectorStore.addDocuments( documents: [ diff --git a/packages/langchain/lib/src/agents/agents.dart b/packages/langchain/lib/src/agents/agents.dart index ec89c95c..cc12a558 100644 --- a/packages/langchain/lib/src/agents/agents.dart +++ b/packages/langchain/lib/src/agents/agents.dart @@ -1,3 +1,4 @@ export 'package:langchain_core/agents.dart'; export 'executor.dart'; +export 'tools.dart'; diff --git a/packages/langchain/lib/src/agents/tools.dart b/packages/langchain/lib/src/agents/tools.dart new file mode 100644 index 00000000..02a16284 --- /dev/null +++ b/packages/langchain/lib/src/agents/tools.dart @@ -0,0 +1,304 @@ +import 'package:langchain_core/agents.dart'; +import 'package:langchain_core/chains.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/exceptions.dart'; +import 'package:langchain_core/memory.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; + +const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( + prompt: PromptTemplate( + inputVariables: {}, + template: 'You are a helpful AI assistant', + ), +); + +/// {@template tools_agent} +/// An agent powered by the tool calling API. +/// +/// Example: +/// ```dart +/// final llm = ChatOllama( +/// defaultOptions: ChatOllamaOptions( +/// model: 'llama3-groq-tool-use', +/// temperature: 0, +/// ), +/// ); +/// final tools = [CalculatorTool()]; +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final executor = AgentExecutor(agent: agent); +/// final res = await executor.run('What is 40 raised to the 0.43 power? '); +/// ``` +/// +/// You can use any chat model that supports tools, like `ChatOpenAI`, +/// `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the +/// [documentation](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) +/// for a complete list. +/// +/// You can easily add memory to the agent using the memory parameter from the +/// [ToolsAgent.fromLLMAndTools] constructor. Make sure you enable +/// [BaseChatMemory.returnMessages] on your memory, as the agent works with +/// [ChatMessage]s. The default prompt template already takes care of adding +/// the history to the prompt. For example: +/// ```dart +/// final memory = ConversationBufferMemory(returnMessages: true); +/// final agent = ToolsAgent.fromLLMAndTools( +/// llm: llm, +/// tools: tools, +/// memory: memory, +/// ); +/// ``` +/// +/// If you need to use your own [llmChain] make sure your prompt template +/// includes: +/// - `MessagePlaceholder(variableName: agentInputKey)`: the input to the agent. +/// - If you are using memory: +/// * `MessagesPlaceholder(variableName: '{memoryKey}')`: the history of chat +/// messages. +/// - If you are not using memory: +/// * `MessagesPlaceholder(variableName: BaseActionAgent.agentScratchpadInputKey)`: +/// the intermediary work of the agent (if you are using memory, the agent +/// uses the memory to store the intermediary work). +/// Example: +/// ```dart +/// ChatPromptTemplate.fromTemplates([ +/// (ChatMessageType.system, 'You are a helpful AI assistant'), +/// (ChatMessageType.messagesPlaceholder, 'history'), +/// (ChatMessageType.messagePlaceholder, 'input'), +/// ]); +/// ``` +/// +/// You can use [ToolsAgent.createPrompt] to build the prompt +/// template if you only need to customize the system message or add some +/// extra messages. +/// {@endtemplate} +class ToolsAgent extends BaseSingleActionAgent { + /// {@macro tools_agent} + ToolsAgent({ + required this.llmChain, + required super.tools, + }) : _parser = const ToolsAgentOutputParser(), + assert( + llmChain.memory != null || + llmChain.prompt.inputVariables + .contains(BaseActionAgent.agentScratchpadInputKey), + '`${BaseActionAgent.agentScratchpadInputKey}` should be one of the ' + 'variables in the prompt, got ${llmChain.prompt.inputVariables}', + ), + assert( + llmChain.memory == null || llmChain.memory!.returnMessages, + 'The memory must have `returnMessages` set to true', + ); + + /// Chain to use to call the LLM. + /// + /// If the chain does not have a memory, the prompt MUST include a variable + /// called [BaseActionAgent.agentScratchpadInputKey] where the agent can put + /// its intermediary work. + /// + /// If the chain has a memory, the agent will use the memory to store the + /// intermediary work. + /// + /// The memory must have [BaseChatMemory.returnMessages] set to true for + /// the agent to work properly. + final LLMChain llmChain; + + /// Parser to use to parse the output of the LLM. + final ToolsAgentOutputParser _parser; + + /// The key for the input to the agent. + static const agentInputKey = 'input'; + + @override + Set get inputKeys => {agentInputKey}; + + /// Construct an [ToolsAgent] from an [llm] and [tools]. + /// + /// - [llm] - The model to use for the agent. + /// - [tools] - The tools the agent has access to. You can omit this field if + /// you have already configured the tools in the [llm]. + /// - [memory] - The memory to use for the agent. + /// - [systemChatMessage] message to use as the system message that will be + /// the first in the prompt. Default: "You are a helpful AI assistant". + /// - [extraPromptMessages] prompt messages that will be placed between the + /// system message and the input from the agent. + factory ToolsAgent.fromLLMAndTools({ + required final BaseChatModel llm, + final List? tools, + final BaseChatMemory? memory, + final SystemChatMessagePromptTemplate systemChatMessage = + _systemChatMessagePromptTemplate, + final List? extraPromptMessages, + }) { + assert( + tools != null || llm.defaultOptions.tools != null, + 'Tools must be provided or configured in the llm', + ); + assert( + tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool), + 'All elements in `tools` must be of type `Tool` or its subclasses', + ); + + final actualTools = tools ?? llm.defaultOptions.tools!.cast(); + + return ToolsAgent( + llmChain: LLMChain( + llm: llm, + llmOptions: llm.defaultOptions.copyWith( + tools: actualTools, + ), + prompt: createPrompt( + systemChatMessage: systemChatMessage, + extraPromptMessages: extraPromptMessages, + memory: memory, + ), + memory: memory, + ), + tools: actualTools, + ); + } + + @override + Future> plan(final AgentPlanInput input) async { + final llmChainInputs = _constructLlmChainInputs( + input.intermediateSteps, + input.inputs, + ); + final ChainValues output = await llmChain.invoke(llmChainInputs); + final predictedMessage = output[LLMChain.defaultOutputKey] as AIChatMessage; + return _parser.parseChatMessage(predictedMessage); + } + + Map _constructLlmChainInputs( + final List intermediateSteps, + final InputValues inputs, + ) { + final dynamic agentInput; + + // If there is a memory, we pass the last agent step as a function message. + // Otherwise, we pass the input as a human message. + if (llmChain.memory != null && intermediateSteps.isNotEmpty) { + final lastStep = intermediateSteps.last; + final functionMsg = ChatMessage.tool( + toolCallId: lastStep.action.id, + content: lastStep.observation, + ); + agentInput = functionMsg; + } else { + agentInput = switch (inputs[agentInputKey]) { + final String inputStr => ChatMessage.humanText(inputStr), + final ChatMessage inputMsg => inputMsg, + final List inputMsgs => inputMsgs, + _ => throw LangChainException( + message: 'Agent expected a String or ChatMessage as input,' + ' got ${inputs[agentInputKey]}', + ), + }; + } + + return { + ...inputs, + agentInputKey: agentInput, + if (llmChain.memory == null) + BaseActionAgent.agentScratchpadInputKey: + _constructScratchPad(intermediateSteps), + }; + } + + List _constructScratchPad( + final List intermediateSteps, + ) { + return [ + ...intermediateSteps.map((final s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }).expand((final m) => m), + ]; + } + + @override + String get agentType => 'tool-agent'; + + /// Creates prompt for this agent. + /// + /// It takes care of adding the necessary placeholders to handle the + /// intermediary work of the agent or the memory. + /// + /// - [systemChatMessage] message to use as the system message that will be + /// the first in the prompt. + /// - [extraPromptMessages] prompt messages that will be placed between the + /// system message and the new human input. + /// - [memory] optional memory to use for the agent. + static BasePromptTemplate createPrompt({ + final SystemChatMessagePromptTemplate systemChatMessage = + _systemChatMessagePromptTemplate, + final List? extraPromptMessages, + final BaseChatMemory? memory, + }) { + return ChatPromptTemplate.fromPromptMessages([ + systemChatMessage, + ...?extraPromptMessages, + for (final memoryKey in memory?.memoryKeys ?? {}) + MessagesPlaceholder(variableName: memoryKey), + const MessagePlaceholder(variableName: agentInputKey), + if (memory == null) + const MessagesPlaceholder( + variableName: BaseActionAgent.agentScratchpadInputKey, + ), + ]); + } +} + +/// {@template tools_agent_output_parser} +/// Parser for [ToolsAgent]. +/// +/// It parses the output of the LLM and returns the corresponding +/// [BaseAgentAction] to be executed. +/// {@endtemplate} +class ToolsAgentOutputParser extends BaseOutputParser> { + /// {@macro tools_agent_output_parser} + const ToolsAgentOutputParser() + : super(defaultOptions: const OutputParserOptions()); + + @override + Future> invoke( + final ChatResult input, { + final OutputParserOptions? options, + }) { + return parseChatMessage(input.output); + } + + /// Parses the [message] and returns the corresponding [BaseAgentAction]. + Future> parseChatMessage( + final AIChatMessage message, + ) async { + final toolCalls = message.toolCalls; + if (toolCalls.isNotEmpty) { + return toolCalls.map((final toolCall) { + return AgentAction( + id: toolCall.id, + tool: toolCall.name, + toolInput: toolCall.arguments, + log: 'Invoking: `${toolCall.name}` ' + 'with `${toolCall.arguments}`\n' + 'Responded: ${message.content}\n', + messageLog: [message], + ); + }).toList(growable: false); + } else { + return [ + AgentFinish( + returnValues: {'output': message.content}, + log: message.content, + ), + ]; + } + } +} diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 70437c8d..857d9c79 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -25,3 +25,6 @@ dependencies: dev_dependencies: test: ^1.25.8 + langchain_community: ^0.3.0 + langchain_openai: ^0.7.0 + langchain_ollama: ^0.3.0 diff --git a/packages/langchain/pubspec_overrides.yaml b/packages/langchain/pubspec_overrides.yaml index 3508ed77..65792891 100644 --- a/packages/langchain/pubspec_overrides.yaml +++ b/packages/langchain/pubspec_overrides.yaml @@ -1,4 +1,16 @@ -# melos_managed_dependency_overrides: langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_community,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart dependency_overrides: + langchain_community: + path: ../langchain_community langchain_core: path: ../langchain_core + langchain_ollama: + path: ../langchain_ollama + langchain_openai: + path: ../langchain_openai + ollama_dart: + path: ../ollama_dart + openai_dart: + path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain/test/agents/assets/state_of_the_union.txt b/packages/langchain/test/agents/assets/state_of_the_union.txt new file mode 100644 index 00000000..d50175de --- /dev/null +++ b/packages/langchain/test/agents/assets/state_of_the_union.txt @@ -0,0 +1,723 @@ +Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. + +Last year COVID-19 kept us apart. This year we are finally together again. + +Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. + +With a duty to one another to the American people to the Constitution. + +And with an unwavering resolve that freedom will always triumph over tyranny. + +Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. + +He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. + +He met the Ukrainian people. + +From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. + +Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. + +In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. + +Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. + +Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. + +Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. + +They keep moving. + +And the costs and the threats to America and the world keep rising. + +That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. + +The United States is a member along with 29 other nations. + +It matters. American diplomacy matters. American resolve matters. + +Putin’s latest attack on Ukraine was premeditated and unprovoked. + +He rejected repeated efforts at diplomacy. + +He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. + +We prepared extensively and carefully. + +We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. + +I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. + +We countered Russia’s lies with truth. + +And now that he has acted the free world is holding him accountable. + +Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. + +We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. + +Together with our allies –we are right now enforcing powerful economic sanctions. + +We are cutting off Russia’s largest banks from the international financial system. + +Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. + +We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. + +Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. + +The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. + +We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. + +And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. + +The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. + +Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. + +We are giving more than $1 Billion in direct assistance to Ukraine. + +And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. + +Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. + +Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. + +For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. + +As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. + +And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. + +Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. + +And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. + +To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. + +And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. + +Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. + +America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. + +These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. + +But I want you to know that we are going to be okay. + +When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. + +While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. + +We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. + +In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. + +This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. + +To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. + +Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. + +He will never extinguish their love of freedom. He will never weaken the resolve of the free world. + +We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. + +The pandemic has been punishing. + +And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. + +I understand. + +I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. + +That’s why one of the first things I did as President was fight to pass the American Rescue Plan. + +Because people were hurting. We needed to act, and we did. + +Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. + +It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. + +Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. + +And as my Dad used to say, it gave people a little breathing room. + +And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. + +And it worked. It created jobs. Lots of jobs. + +In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year +than ever before in the history of America. + +Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. + +For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. + +But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. + +Vice President Harris and I ran for office with a new economic vision for America. + +Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up +and the middle out, not from the top down. + +Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. + +America used to have the best roads, bridges, and airports on Earth. + +Now our infrastructure is ranked 13th in the world. + +We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. + +That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. + +This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. + +We’re done talking about infrastructure weeks. + +We’re going to have an infrastructure decade. + +It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. + +As I’ve told Xi Jinping, it is never a good bet to bet against the American people. + +We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. + +And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. + +We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. + +4,000 projects have already been announced. + +And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. + +When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. + +The federal government spends about $600 Billion a year to keep the country safe and secure. + +There’s been a law on the books for almost a century +to make sure taxpayers’ dollars support American jobs and businesses. + +Every Administration says they’ll do it, but we are actually doing it. + +We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. + +But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. + +That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. + +Let me give you one example of why it’s so important to pass it. + +If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. + +It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. + +This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. + +Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. + +Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. + +Smartphones. The Internet. Technology we have yet to invent. + +But that’s just the beginning. + +Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from +$20 billion to $100 billion. + +That would be one of the biggest investments in manufacturing in American history. + +And all they’re waiting for is for you to pass this bill. + +So let’s not wait any longer. Send it to my desk. I’ll sign it. + +And we will really take off. + +And Intel is not alone. + +There’s something happening in America. + +Just look around and you’ll see an amazing story. + +The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing. + +Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas. + +That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. + +GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. + +All told, we created 369,000 new manufacturing jobs in America just last year. + +Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. + +As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” + +It’s time. + +But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. + +Inflation is robbing them of the gains they might otherwise feel. + +I get it. That’s why my top priority is getting prices under control. + +Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. + +The pandemic also disrupted global supply chains. + +When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. + +Look at cars. + +Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. + +And guess what, prices of automobiles went up. + +So—we have a choice. + +One way to fight inflation is to drive down wages and make Americans poorer. + +I have a better plan to fight inflation. + +Lower your costs, not your wages. + +Make more cars and semiconductors in America. + +More infrastructure and innovation in America. + +More goods moving faster and cheaper in America. + +More jobs where you can earn a good living in America. + +And instead of relying on foreign supply chains, let’s make it in America. + +Economists call it “increasing the productive capacity of our economy.” + +I call it building a better America. + +My plan to fight inflation will lower your costs and lower the deficit. + +17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: + +First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis. + +He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make. + +But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. + +Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it. + +What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. + +Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy. + +For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it. + +Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. + +Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. + +Second – cut energy costs for families an average of $500 a year by combatting climate change. + +Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. + +Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child. + +Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children. + +My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. + +My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old. + +All of these will lower costs. + +And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody. + +The one thing all Americans agree on is that the tax system is not fair. We have to fix it. + +I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. + +Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax. + +That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. + +We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. + +That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter. + +So that’s my plan. It will grow the economy and lower costs for families. + +So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. + +My plan will not only lower costs to give families a fair shot, it will lower the deficit. + +The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. + +But in my administration, the watchdogs have been welcomed back. + +We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. + +And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. + +By the end of this year, the deficit will be down to less than half what it was before I took office. + +The only president ever to cut the deficit by more than one trillion dollars in a single year. + +Lowering your costs also means demanding more competition. + +I’m a capitalist, but capitalism without competition isn’t capitalism. + +It’s exploitation—and it drives up prices. + +When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. + +We see it happening with ocean carriers moving goods in and out of America. + +During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. + +Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. + +And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. + +That ends on my watch. + +Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. + +We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. + +Let’s pass the Paycheck Fairness Act and paid leave. + +Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. + +Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. + +And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. + +When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. + +For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. + +And I know you’re tired, frustrated, and exhausted. + +But I also know this. + +Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say +we are moving forward safely, back to more normal routines. + +We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. + +Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. + +Under these new guidelines, most Americans in most of the country can now be mask free. + +And based on the projections, more of the country will reach that point across the next couple of weeks. + +Thanks to the progress we have made this past year, COVID-19 need no longer control our lives. + +I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. + +We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. + +Here are four common sense steps as we move forward safely. + +First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. + +We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. + +The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. + +We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%. + +We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month. + +And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost. + +If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. + +We’re leaving no one behind or ignoring anyone’s needs as we move forward. + +And on testing, we have made hundreds of millions of tests available for you to order for free. + +Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. + +Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. + +If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years. + +And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. + +I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. + +Third – we can end the shutdown of schools and businesses. We have the tools we need. + +It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. + +We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. + +Our schools are open. Let’s keep it that way. Our kids need to be in school. + +And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. + +We achieved this because we provided free vaccines, treatments, tests, and masks. + +Of course, continuing this costs money. + +I will soon send Congress a request. + +The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly. + +Fourth, we will continue vaccinating the world. + +We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. + +And we won’t stop. + +We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. + +Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. + +Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. + +We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. + +I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. + +They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. + +Officer Mora was 27 years old. + +Officer Rivera was 22. + +Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. + +I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. + +I’ve worked on these issues a long time. + +I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. + +So let’s not abandon our streets. Or choose between safety and equal justice. + +Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. + +That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. + +That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. + +We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. + +I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. + +And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. + +And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? + +Ban assault weapons and high-capacity magazines. + +Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. + +These laws don’t infringe on the Second Amendment. They save lives. + +The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. + +In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. + +We cannot let this happen. + +Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. + +Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. + +One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. + +And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. + +A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. + +And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. + +We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. + +We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. + +We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. + +We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. + +We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. + +Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. + +Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. + +It’s not only the right thing to do—it’s the economically smart thing to do. + +That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. + +Let’s get it done once and for all. + +Advancing liberty and justice also requires protecting the rights of women. + +The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. + +If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. + +And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. + +As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. + +While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. + +And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. + +So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. + +First, beat the opioid epidemic. + +There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. + +Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. + +If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. + +Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down. + +The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. + +I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. + +Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. + +As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. + +It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. + +And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. + +Third, support our veterans. + +Veterans are the best of us. + +I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. + +My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. + +Our troops in Iraq and Afghanistan faced many dangers. + +One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. + +When they came home, many of the world’s fittest and best trained warriors were never the same. + +Headaches. Numbness. Dizziness. + +A cancer that would put them in a flag-draped coffin. + +I know. + +One of those soldiers was my son Major Beau Biden. + +We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. + +But I’m committed to finding out everything we can. + +Committed to military families like Danielle Robinson from Ohio. + +The widow of Sergeant First Class Heath Robinson. + +He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. + +Stationed near Baghdad, just yards from burn pits the size of football fields. + +Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. + +But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. + +Danielle says Heath was a fighter to the very end. + +He didn’t know how to stop fighting, and neither did she. + +Through her pain she found purpose to demand we do better. + +Tonight, Danielle—we are. + +The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. + +And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. + +I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. + +And fourth, let’s end cancer as we know it. + +This is personal to me and Jill, to Kamala, and to so many of you. + +Cancer is the #2 cause of death in America–second only to heart disease. + +Last month, I announced our plan to supercharge +the Cancer Moonshot that President Obama asked me to lead six years ago. + +Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. + +More support for patients and families. + +To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. + +It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. + +ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. + +A unity agenda for the nation. + +We can do this. + +My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. + +In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. + +We have fought for freedom, expanded liberty, defeated totalitarianism and terror. + +And built the strongest, freest, and most prosperous nation the world has ever known. + +Now is the hour. + +Our moment of responsibility. + +Our test of resolve and conscience, of history itself. + +It is in this moment that our character is formed. Our purpose is found. Our future is forged. + +Well I know this nation. + +We will meet the test. + +To protect freedom and liberty, to expand fairness and opportunity. + +We will save democracy. + +As hard as these times have been, I am more optimistic about America today than I have been my whole life. + +Because I see the future that is within our grasp. + +Because I know there is simply nothing beyond our capacity. + +We are the only nation on Earth that has always turned every crisis we have faced into an opportunity. + +The only nation that can be defined by a single word: possibilities. + +So on this night, in our 245th year as a nation, I have come to report on the State of the Union. + +And my report is this: the State of the Union is strong—because you, the American people, are strong. + +We are stronger today than we were a year ago. + +And we will be stronger a year from now than we are today. + +Now is our moment to meet and overcome the challenges of our time. + +And we will, as one people. + +One America. + +The United States of America. + +May God bless you all. May God protect our troops. \ No newline at end of file diff --git a/packages/langchain/test/agents/tools_test.dart b/packages/langchain/test/agents/tools_test.dart new file mode 100644 index 00000000..8e2d7852 --- /dev/null +++ b/packages/langchain/test/agents/tools_test.dart @@ -0,0 +1,224 @@ +@TestOn('vm') +@Timeout(Duration(minutes: 50)) +library; // Uses dart:io + +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:meta/meta.dart'; +import 'package:test/test.dart'; + +void main() { + late BaseChatModel llm; + const defaultOllamaModel = 'llama3-groq-tool-use'; + const defaultOpenAIModel = 'gpt-4o-mini'; + + group('ChatToolsAgent using Ollama tests', () { + setUp(() async { + llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: defaultOllamaModel, + temperature: 0, + tools: [CalculatorTool(), searchTool], + ), + ); + }); + + test('Test ChatToolsAgent with calculator tool', () async { + await testAgentWithCalculator(llm); + }); + + test('Test ToolsAgent with messages memory', () async { + await testMemory(llm, returnMessages: true); + }); + + test('Test ToolsAgent with string memory throws error', () async { + expect( + () async => testMemory(llm, returnMessages: false), + throwsA(isA()), + ); + }); + + test('Test ToolsAgent LCEL equivalent using Ollama', () async { + final res = + await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + expect(res['output'], contains('4.88')); + }); + }); + + group('ChatToolsAgent using OpenAi tests', () { + setUp(() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: defaultOpenAIModel, + tools: [CalculatorTool(), searchTool], + ), + ); + }); + + test('Test ChatToolsAgent with calculator tool', () async { + await testAgentWithCalculator(llm); + }); + + test('Test ToolsAgent with messages memory', () async { + await testMemory(llm, returnMessages: true); + }); + + test('Test ToolsAgent with string memory throws error', () async { + expect( + () async => testMemory(llm, returnMessages: false), + throwsA(isA()), + ); + }); + + test('Test ToolsAgent LCEL equivalent using OpenAi', () async { + final res = + await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + expect(res['output'], contains('4.88')); + }); + }); +} + +Future testAgentWithCalculator( + BaseChatModel llm, +) async { + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + ); + final executor = AgentExecutor(agent: agent); + final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + ); + expect(res, contains('4.885')); +} + +Future testMemory( + BaseChatModel llm, { + required final bool returnMessages, +}) async { + final memory = ConversationBufferMemory(returnMessages: returnMessages); + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + memory: memory, + ); + + final executor = AgentExecutor(agent: agent); + + final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', + ); + + expect(res1, contains('AAA')); + expect(res1, contains('BBB')); + expect(res1, contains('CCC')); + expect(res1, isNot(contains('DDD'))); + + final res2 = await executor.run( + 'How many results did the search return? Respond with a number.', + ); + expect(res2, contains('3')); + expect(res2, isNot(contains('1'))); + expect(res2, isNot(contains('2'))); + expect(res2, isNot(contains('4'))); + + final res3 = await executor.run('What was the last result?'); + expect(res3, contains('CCC')); +} + +AgentExecutor testLCDLEquivalent({ + required BaseChatModel llm, + required Tool tool, +}) { + final prompt = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), + ]); + + final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': planInput.intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false), + }, + ).pipe(prompt).pipe(llm).pipe(const ToolsAgentOutputParser()), + tools: [tool], + ); + + return AgentExecutor(agent: agent); +} + +@immutable +class _SearchInput { + const _SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + _SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); + + @override + bool operator ==(covariant _SearchInput other) => + identical(this, other) || query == other.query && n == other.n; + + @override + int get hashCode => query.hashCode ^ n.hashCode; +} + +final searchTool = Tool.fromFunction<_SearchInput, String>( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: const { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: (final _SearchInput toolInput) async { + final n = toolInput.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; + }, + getInputFromJson: _SearchInput.fromJson, +); From 7be6bfec8f805921fda38423b76b2522427c9bb5 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 20:18:48 +0200 Subject: [PATCH 216/251] feat: Deprecate OpenAIToolsAgent in favour of ToolsAgent (#532) --- docs/_sidebar.md | 1 - .../agents/agent_types/openai_tools_agent.md | 173 ------------------ docs/modules/agents/agents.md | 4 +- docs/modules/agents/tools/calculator.md | 2 +- docs/modules/agents/tools/openai_dall_e.md | 2 +- .../agent_types/openai_tools_agent.dart | 150 --------------- .../bin/modules/agents/tools/calculator.dart | 2 +- .../modules/agents/tools/openai_dalle.dart | 2 +- ...{tools_test.dart => tools_agent_test.dart} | 4 +- .../lib/src/tools/calculator.dart | 2 +- .../langchain_core/lib/src/prompts/types.dart | 2 +- .../test/runnables/map_test.dart | 5 +- .../src/chat_models/google_ai/mappers.dart | 3 +- .../google_ai/google_ai_embeddings.dart | 3 +- packages/langchain_openai/README.md | 2 - .../langchain_openai/lib/fix_data/fix.yaml | 19 ++ .../lib/src/agents/tools.dart | 18 +- .../lib/src/tools/dall_e.dart | 2 +- .../test/agents/tools_test.dart | 1 + .../test/tools/dall_e_test.dart | 4 +- 20 files changed, 55 insertions(+), 346 deletions(-) delete mode 100644 docs/modules/agents/agent_types/openai_tools_agent.md delete mode 100644 examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart rename packages/langchain/test/agents/{tools_test.dart => tools_agent_test.dart} (98%) create mode 100644 packages/langchain_openai/lib/fix_data/fix.yaml diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 92d4f394..a5572f4d 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -122,7 +122,6 @@ - [Agents](/modules/agents/agents.md) - [Agent types](/modules/agents/agent_types/agent_types.md) - [Tools Agent](/modules/agents/agent_types/tools_agent.md) - - [OpenAI Tools Agent](/modules/agents/agent_types/openai_tools_agent.md) - [Tools](/modules/agents/tools/tools.md) - [Calculator](/modules/agents/tools/calculator.md) - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md) diff --git a/docs/modules/agents/agent_types/openai_tools_agent.md b/docs/modules/agents/agent_types/openai_tools_agent.md deleted file mode 100644 index db68921e..00000000 --- a/docs/modules/agents/agent_types/openai_tools_agent.md +++ /dev/null @@ -1,173 +0,0 @@ -# OpenAI tools - -Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been -fine-tuned to detect when a tool should to be called and respond with the -inputs that should be passed to the tool. In an API call, you can describe -tools and have the model intelligently choose to output a JSON object -containing arguments to call those tools. The goal of the OpenAI Function -APIs is to more reliably return valid and useful tool calls than a generic -text completion or chat API. - -The OpenAI Tools Agent is designed to work with these models. - -> **Note**: Must be used with an [OpenAI Tools](https://platform.openai.com/docs/guides/function-calling) model. - -```dart -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4-turbo', - temperature: 0, - ), -); -final tool = CalculatorTool(); -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); -final executor = AgentExecutor(agent: agent); -final res = await executor.run('What is 40 raised to the 0.43 power? '); -print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' -``` - -You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. - -Let's see an example of how to do this. - -First let's create a class that will be the input for our tool. - -```dart -class SearchInput { - const SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); -} -``` - -Now let's define the tool: - -```dart -final tool = Tool.fromFunction( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: const { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'number', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: callYourSearchFunction, - getInputFromJson: SearchInput.fromJson, -); -``` - -Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. - -The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. -```dart -String callYourSearchFunction(final SearchInput input) { - return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; -} -``` - -Now we can create the agent and run it. - -```dart -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), -); - -final memory = ConversationBufferMemory(returnMessages: true); -final agent = OpenAIToolsAgent.fromLLMAndTools( - llm: llm, - tools: [tool], - memory: memory, -); - -final executor = AgentExecutor(agent: agent); - -final res1 = await executor.run( - 'Search for cats. Return only 3 results.', -); -print(res1); -// Here are 3 search results for "cats": -// 1. Result 1 -// 2. Result 2 -// 3. Result 3 -``` - -## Using LangChain Expression Language (LCEL) - -You can replicate the functionality of the OpenAI Functions Agent by using the LangChain Expression Language (LCEL) directly. - -```dart -final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - -final prompt = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), -]); - -final tool = CalculatorTool(); - -final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - temperature: 0, - tools: [tool], - ), -); - -const outputParser = OpenAIToolsAgentOutputParser(); - -List buildScratchpad(final List intermediateSteps) { - return intermediateSteps - .map((final s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((final m) => m) - .toList(growable: false); -} - -final agent = Agent.fromRunnable( - Runnable.mapInput( - (final AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), - }, - ).pipe(prompt).pipe(model).pipe(outputParser), - tools: [tool], -); -final executor = AgentExecutor(agent: agent); - -final res = await executor.invoke({ - 'input': 'What is 40 raised to the 0.43 power?', -}); -print(res['output']); -// 40 raised to the power of 0.43 is approximately 4.88524. -``` - -In this way, you can create your own custom agents with full control over their behavior. diff --git a/docs/modules/agents/agents.md b/docs/modules/agents/agents.md index ab56353c..78004d19 100644 --- a/docs/modules/agents/agents.md +++ b/docs/modules/agents/agents.md @@ -75,7 +75,7 @@ First, let's load the language model we're going to use to control the agent. ```dart final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + defaultOptions: ChatOpenAIOptions(temperature: 0), ); ``` @@ -91,7 +91,7 @@ Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. ```dart -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); ``` Now let's create the agent executor and test it out! diff --git a/docs/modules/agents/tools/calculator.md b/docs/modules/agents/tools/calculator.md index 0847f2eb..fe9f127c 100644 --- a/docs/modules/agents/tools/calculator.md +++ b/docs/modules/agents/tools/calculator.md @@ -14,7 +14,7 @@ final llm = ChatOpenAI( ), ); final tool = CalculatorTool(); -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/docs/modules/agents/tools/openai_dall_e.md b/docs/modules/agents/tools/openai_dall_e.md index 9d30914b..426f4d89 100644 --- a/docs/modules/agents/tools/openai_dall_e.md +++ b/docs/modules/agents/tools/openai_dall_e.md @@ -18,7 +18,7 @@ final tools = [ CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart deleted file mode 100644 index 16d0b44f..00000000 --- a/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart +++ /dev/null @@ -1,150 +0,0 @@ -// ignore_for_file: avoid_print, unreachable_from_main -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_openai/langchain_openai.dart'; - -void main() async { - await _openAIToolsAgent(); - await _openAIToolsAgentCustomToolsMemory(); - await _openAIToolsAgentLCEL(); -} - -Future _openAIToolsAgent() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', - temperature: 0, - ), - ); - final tool = CalculatorTool(); - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); - final executor = AgentExecutor(agent: agent); - final res = await executor.run('What is 40 raised to the 0.43 power? '); - print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' -} - -Future _openAIToolsAgentCustomToolsMemory() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final tool = Tool.fromFunction( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: const { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'number', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: callYourSearchFunction, - getInputFromJson: SearchInput.fromJson, - ); - - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), - ); - - final memory = ConversationBufferMemory(returnMessages: true); - final agent = OpenAIToolsAgent.fromLLMAndTools( - llm: llm, - tools: [tool], - memory: memory, - ); - - final executor = AgentExecutor(agent: agent); - - final res1 = await executor.run( - 'Search for cats. Return only 3 results.', - ); - print(res1); - // Here are 3 search results for "cats": - // 1. Result 1 - // 2. Result 2 - // 3. Result 3 -} - -class SearchInput { - const SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); -} - -String callYourSearchFunction(final SearchInput input) { - return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; -} - -Future _openAIToolsAgentLCEL() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final prompt = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), - ]); - - final tool = CalculatorTool(); - - final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - temperature: 0, - tools: [tool], - ), - ); - - const outputParser = OpenAIToolsAgentOutputParser(); - - List buildScratchpad(final List intermediateSteps) { - return intermediateSteps - .map((final s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((final m) => m) - .toList(growable: false); - } - - final agent = Agent.fromRunnable( - Runnable.mapInput( - (final AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), - }, - ).pipe(prompt).pipe(model).pipe(outputParser), - tools: [tool], - ); - final executor = AgentExecutor(agent: agent); - - final res = await executor.invoke({ - 'input': 'What is 40 raised to the 0.43 power?', - }); - print(res['output']); - // 40 raised to the power of 0.43 is approximately 4.88524. -} diff --git a/examples/docs_examples/bin/modules/agents/tools/calculator.dart b/examples/docs_examples/bin/modules/agents/tools/calculator.dart index 5d92dc27..acab2d65 100644 --- a/examples/docs_examples/bin/modules/agents/tools/calculator.dart +++ b/examples/docs_examples/bin/modules/agents/tools/calculator.dart @@ -15,7 +15,7 @@ void main() async { ), ); final tool = CalculatorTool(); - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart index f62d7bde..7144ea82 100644 --- a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart +++ b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart @@ -18,7 +18,7 @@ void main() async { CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/packages/langchain/test/agents/tools_test.dart b/packages/langchain/test/agents/tools_agent_test.dart similarity index 98% rename from packages/langchain/test/agents/tools_test.dart rename to packages/langchain/test/agents/tools_agent_test.dart index 8e2d7852..e879ba88 100644 --- a/packages/langchain/test/agents/tools_test.dart +++ b/packages/langchain/test/agents/tools_agent_test.dart @@ -16,13 +16,15 @@ void main() { const defaultOllamaModel = 'llama3-groq-tool-use'; const defaultOpenAIModel = 'gpt-4o-mini'; - group('ChatToolsAgent using Ollama tests', () { + group('ChatToolsAgent using Ollama tests', + skip: Platform.environment.containsKey('CI'), () { setUp(() async { llm = ChatOllama( defaultOptions: ChatOllamaOptions( model: defaultOllamaModel, temperature: 0, tools: [CalculatorTool(), searchTool], + keepAlive: 1, ), ); }); diff --git a/packages/langchain_community/lib/src/tools/calculator.dart b/packages/langchain_community/lib/src/tools/calculator.dart index 9f41a130..26becb93 100644 --- a/packages/langchain_community/lib/src/tools/calculator.dart +++ b/packages/langchain_community/lib/src/tools/calculator.dart @@ -14,7 +14,7 @@ import 'package:math_expressions/math_expressions.dart'; /// temperature: 0, /// ); /// final tool = CalculatorTool(); -/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/packages/langchain_core/lib/src/prompts/types.dart b/packages/langchain_core/lib/src/prompts/types.dart index c2a9474b..c68f0979 100644 --- a/packages/langchain_core/lib/src/prompts/types.dart +++ b/packages/langchain_core/lib/src/prompts/types.dart @@ -145,7 +145,7 @@ class ChatPromptValue implements PromptValue { return message.concat(otherMessage); } }) - .whereNotNull() + .nonNulls .toList(growable: false), ), }; diff --git a/packages/langchain_core/test/runnables/map_test.dart b/packages/langchain_core/test/runnables/map_test.dart index 98a4a3ff..e65dc73a 100644 --- a/packages/langchain_core/test/runnables/map_test.dart +++ b/packages/langchain_core/test/runnables/map_test.dart @@ -1,5 +1,4 @@ // ignore_for_file: unused_element -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/output_parsers.dart'; @@ -43,11 +42,11 @@ void main() { final left = streamList .map((final it) => it['left']) // - .whereNotNull() + .nonNulls .join(); final right = streamList .map((final it) => it['right']) // - .whereNotNull() + .nonNulls .join(); expect(left, 'Hello world!'); diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 106bf60b..521921ac 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -1,7 +1,6 @@ // ignore_for_file: public_member_api_docs import 'dart:convert'; -import 'package:collection/collection.dart'; import 'package:google_generative_ai/google_generative_ai.dart' as g; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; @@ -96,7 +95,7 @@ extension GenerateContentResponseMapper on g.GenerateContentResponse { _ => throw AssertionError('Unknown part type: $p'), }, ) - .whereNotNull() + .nonNulls .join('\n'), toolCalls: candidate.content.parts .whereType() diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index b5996abd..385f1088 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -1,4 +1,3 @@ -import 'package:collection/collection.dart' show IterableNullableExtension; import 'package:google_generative_ai/google_generative_ai.dart' show Content, EmbedContentRequest, GenerativeModel, TaskType; import 'package:http/http.dart' as http; @@ -175,7 +174,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { ); return data.embeddings .map((final p) => p.values) - .whereNotNull() + .nonNulls .toList(growable: false); }), ); diff --git a/packages/langchain_openai/README.md b/packages/langchain_openai/README.md index b7a080da..39073b3b 100644 --- a/packages/langchain_openai/README.md +++ b/packages/langchain_openai/README.md @@ -20,8 +20,6 @@ OpenAI module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart * `OpenAIQAWithStructureChain` a chain that answer questions in the specified structure. * `OpenAIQAWithSourcesChain`: a chain that answer questions providing sources. -- Agents: - * `OpenAIToolsAgent`: an agent driven by OpenAIs Tools powered API. - Tools: * `OpenAIDallETool`: a tool that uses DallE to generate images from text. diff --git a/packages/langchain_openai/lib/fix_data/fix.yaml b/packages/langchain_openai/lib/fix_data/fix.yaml new file mode 100644 index 00000000..5db14fd0 --- /dev/null +++ b/packages/langchain_openai/lib/fix_data/fix.yaml @@ -0,0 +1,19 @@ +version: 1 + +transforms: + - title: "Migrate to 'ToolsAgent'" + date: 2024-08-21 + element: + uris: ['langchain_openai.dart', 'src/agents/tools.dart'] + class: 'OpenAIToolsAgent' + changes: + - kind: 'rename' + newName: 'ToolsAgent' + - title: "Migrate to 'ToolsAgentOutputParser'" + date: 2024-08-21 + element: + uris: ['langchain_openai.dart', 'src/agents/tools.dart'] + class: 'OpenAIToolsAgentOutputParser' + changes: + - kind: 'rename' + newName: 'ToolsAgentOutputParser' diff --git a/packages/langchain_openai/lib/src/agents/tools.dart b/packages/langchain_openai/lib/src/agents/tools.dart index 2867427d..a1a13583 100644 --- a/packages/langchain_openai/lib/src/agents/tools.dart +++ b/packages/langchain_openai/lib/src/agents/tools.dart @@ -1,3 +1,4 @@ +// ignore_for_file: deprecated_member_use_from_same_package import 'package:langchain_core/agents.dart'; import 'package:langchain_core/chains.dart'; import 'package:langchain_core/chat_models.dart'; @@ -17,6 +18,11 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( ); /// {@template openai_tools_agent} +/// > Note: This class is deprecated. Use `ToolsAgent` (from the `langchain` +/// > package instead). It works with the same API as this class, but can be +/// > used with any provider that supports tool calling. +/// > You can run `dart fix --apply` to automatically update your code. +/// /// An Agent driven by OpenAI's Tools powered API. /// /// Example: @@ -27,7 +33,7 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// temperature: 0, /// ); /// final tools = [CalculatorTool()]; -/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// ``` @@ -69,8 +75,10 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// template if you only need to customize the system message or add some /// extra messages. /// {@endtemplate} +@Deprecated('Use ToolsAgent instead') class OpenAIToolsAgent extends BaseSingleActionAgent { /// {@macro openai_functions_agent} + @Deprecated('Use ToolsAgent instead') OpenAIToolsAgent({ required this.llmChain, required super.tools, @@ -118,6 +126,7 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { /// the first in the prompt. Default: "You are a helpful AI assistant". /// - [extraPromptMessages] prompt messages that will be placed between the /// system message and the input from the agent. + @Deprecated('Use ToolsAgent.fromLLMAndTools() instead') factory OpenAIToolsAgent.fromLLMAndTools({ required final ChatOpenAI llm, required final List tools, @@ -241,14 +250,21 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { } /// {@template openai_tools_agent_output_parser} +/// > Note: This class is deprecated. Use `ToolsAgentOutputParser` (from the +/// > `langchain` package instead). It is equivalent to this class, but +/// > prepared to work with the `ToolsAgent`. +/// > You can run `dart fix --apply` to automatically update your code. +/// /// Parser for [OpenAIToolsAgent]. /// /// It parses the output of the LLM and returns the corresponding /// [BaseAgentAction] to be executed. /// {@endtemplate} +@Deprecated('Use ToolsAgentOutputParser instead') class OpenAIToolsAgentOutputParser extends BaseOutputParser> { /// {@macro openai_tools_agent_output_parser} + @Deprecated('Use ToolsAgentOutputParser instead') const OpenAIToolsAgentOutputParser() : super(defaultOptions: const OutputParserOptions()); diff --git a/packages/langchain_openai/lib/src/tools/dall_e.dart b/packages/langchain_openai/lib/src/tools/dall_e.dart index 3ce66fd9..aefba7b9 100644 --- a/packages/langchain_openai/lib/src/tools/dall_e.dart +++ b/packages/langchain_openai/lib/src/tools/dall_e.dart @@ -34,7 +34,7 @@ export 'package:openai_dart/openai_dart.dart' /// ), /// ), /// ]; -/// final agent = OpenAIToolsAgent.fromLLMAndTools( +/// final agent = ToolsAgent.fromLLMAndTools( /// llm: llm, /// tools: tools, /// ); diff --git a/packages/langchain_openai/test/agents/tools_test.dart b/packages/langchain_openai/test/agents/tools_test.dart index 57342631..c08f4979 100644 --- a/packages/langchain_openai/test/agents/tools_test.dart +++ b/packages/langchain_openai/test/agents/tools_test.dart @@ -1,3 +1,4 @@ +// ignore_for_file: deprecated_member_use_from_same_package @TestOn('vm') library; // Uses dart:io diff --git a/packages/langchain_openai/test/tools/dall_e_test.dart b/packages/langchain_openai/test/tools/dall_e_test.dart index 5a9aba09..7a8a8407 100644 --- a/packages/langchain_openai/test/tools/dall_e_test.dart +++ b/packages/langchain_openai/test/tools/dall_e_test.dart @@ -4,7 +4,7 @@ library; // Uses dart:io import 'dart:io'; -import 'package:langchain/langchain.dart' show AgentExecutor; +import 'package:langchain/langchain.dart' show AgentExecutor, ToolsAgent; import 'package:langchain_community/langchain_community.dart'; import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; @@ -62,7 +62,7 @@ void main() { ), ]; - final agent = OpenAIToolsAgent.fromLLMAndTools( + final agent = ToolsAgent.fromLLMAndTools( llm: llm, tools: tools, ); From 335bdaa5362dd2c49bccc51d4d23a3ca964a83ba Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 22:47:09 +0200 Subject: [PATCH 217/251] feat: Handle refusal in OpenAI's Structured Outputs API (#533) --- .../lib/src/chat_models/chat_openai.dart | 53 +------- .../lib/src/chat_models/mappers.dart | 128 ++++++++++++++---- .../lib/src/chat_models/types.dart | 22 +++ 3 files changed, 129 insertions(+), 74 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 0dc31168..c8a670f5 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -248,9 +248,10 @@ class ChatOpenAI extends BaseChatModel { final ChatOpenAIOptions? options, }) async { final completion = await _client.createChatCompletion( - request: _createChatCompletionRequest( + request: createChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, ), ); return completion.toChatResult(completion.id ?? _uuid.v4()); @@ -263,9 +264,10 @@ class ChatOpenAI extends BaseChatModel { }) { return _client .createChatCompletionStream( - request: _createChatCompletionRequest( + request: createChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, stream: true, ), ) @@ -275,53 +277,6 @@ class ChatOpenAI extends BaseChatModel { ); } - /// Creates a [CreateChatCompletionRequest] from the given input. - CreateChatCompletionRequest _createChatCompletionRequest( - final List messages, { - final ChatOpenAIOptions? options, - final bool stream = false, - }) { - final messagesDtos = messages.toChatCompletionMessages(); - final toolsDtos = - (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); - final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) - ?.toChatCompletionToolChoice(); - final responseFormatDto = - (options?.responseFormat ?? defaultOptions.responseFormat) - ?.toChatCompletionResponseFormat(); - final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) - .toCreateChatCompletionRequestServiceTier(); - - return CreateChatCompletionRequest( - model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? defaultModel, - ), - messages: messagesDtos, - tools: toolsDtos, - toolChoice: toolChoice, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - logitBias: options?.logitBias ?? defaultOptions.logitBias, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, - n: options?.n ?? defaultOptions.n, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - responseFormat: responseFormatDto, - seed: options?.seed ?? defaultOptions.seed, - stop: (options?.stop ?? defaultOptions.stop) != null - ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) - : null, - temperature: options?.temperature ?? defaultOptions.temperature, - topP: options?.topP ?? defaultOptions.topP, - parallelToolCalls: - options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, - serviceTier: serviceTierDto, - user: options?.user ?? defaultOptions.user, - streamOptions: - stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, - ); - } - /// Tokenizes the given prompt using tiktoken with the encoding used by the /// [model]. If an encoding model is specified in [encoding] field, that /// encoding is used instead. diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 5e9000c2..a2ea96f4 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -6,8 +6,56 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/tools.dart'; import 'package:openai_dart/openai_dart.dart'; +import 'chat_openai.dart'; import 'types.dart'; +/// Creates a [CreateChatCompletionRequest] from the given input. +CreateChatCompletionRequest createChatCompletionRequest( + final List messages, { + required final ChatOpenAIOptions? options, + required final ChatOpenAIOptions defaultOptions, + final bool stream = false, +}) { + final messagesDtos = messages.toChatCompletionMessages(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); + final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) + ?.toChatCompletionToolChoice(); + final responseFormatDto = + (options?.responseFormat ?? defaultOptions.responseFormat) + ?.toChatCompletionResponseFormat(); + final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) + .toCreateChatCompletionRequestServiceTier(); + + return CreateChatCompletionRequest( + model: ChatCompletionModel.modelId( + options?.model ?? defaultOptions.model ?? ChatOpenAI.defaultModel, + ), + messages: messagesDtos, + tools: toolsDtos, + toolChoice: toolChoice, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + logitBias: options?.logitBias ?? defaultOptions.logitBias, + maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + n: options?.n ?? defaultOptions.n, + presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, + responseFormat: responseFormatDto, + seed: options?.seed ?? defaultOptions.seed, + stop: (options?.stop ?? defaultOptions.stop) != null + ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) + : null, + temperature: options?.temperature ?? defaultOptions.temperature, + topP: options?.topP ?? defaultOptions.topP, + parallelToolCalls: + options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, + serviceTier: serviceTierDto, + user: options?.user ?? defaultOptions.user, + streamOptions: + stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, + ); +} + extension ChatMessageListMapper on List { List toChatCompletionMessages() { return map(_mapMessage).toList(growable: false); @@ -15,36 +63,36 @@ extension ChatMessageListMapper on List { ChatCompletionMessage _mapMessage(final ChatMessage msg) { return switch (msg) { - final SystemChatMessage systemChatMessage => ChatCompletionMessage.system( - content: systemChatMessage.content, - ), - final HumanChatMessage humanChatMessage => ChatCompletionMessage.user( - content: switch (humanChatMessage.content) { - final ChatMessageContentText c => _mapMessageContentString(c), - final ChatMessageContentImage c => - ChatCompletionUserMessageContent.parts( - [_mapMessageContentPartImage(c)], - ), - final ChatMessageContentMultiModal c => _mapMessageContentPart(c), - }, - ), - final AIChatMessage aiChatMessage => ChatCompletionMessage.assistant( - content: aiChatMessage.content, - toolCalls: aiChatMessage.toolCalls.isNotEmpty - ? aiChatMessage.toolCalls - .map(_mapMessageToolCall) - .toList(growable: false) - : null, - ), - final ToolChatMessage toolChatMessage => ChatCompletionMessage.tool( - toolCallId: toolChatMessage.toolCallId, - content: toolChatMessage.content, - ), + final SystemChatMessage msg => _mapSystemMessage(msg), + final HumanChatMessage msg => _mapHumanMessage(msg), + final AIChatMessage msg => _mapAIMessage(msg), + final ToolChatMessage msg => _mapToolMessage(msg), CustomChatMessage() => throw UnsupportedError('OpenAI does not support custom messages'), }; } + ChatCompletionMessage _mapSystemMessage( + final SystemChatMessage systemChatMessage, + ) { + return ChatCompletionMessage.system(content: systemChatMessage.content); + } + + ChatCompletionMessage _mapHumanMessage( + final HumanChatMessage humanChatMessage, + ) { + return ChatCompletionMessage.user( + content: switch (humanChatMessage.content) { + final ChatMessageContentText c => _mapMessageContentString(c), + final ChatMessageContentImage c => + ChatCompletionUserMessageContent.parts( + [_mapMessageContentPartImage(c)], + ), + final ChatMessageContentMultiModal c => _mapMessageContentPart(c), + }, + ); + } + ChatCompletionUserMessageContentString _mapMessageContentString( final ChatMessageContentText c, ) { @@ -105,6 +153,17 @@ extension ChatMessageListMapper on List { return ChatCompletionMessageContentParts(partsList); } + ChatCompletionMessage _mapAIMessage(final AIChatMessage aiChatMessage) { + return ChatCompletionMessage.assistant( + content: aiChatMessage.content, + toolCalls: aiChatMessage.toolCalls.isNotEmpty + ? aiChatMessage.toolCalls + .map(_mapMessageToolCall) + .toList(growable: false) + : null, + ); + } + ChatCompletionMessageToolCall _mapMessageToolCall( final AIChatMessageToolCall toolCall, ) { @@ -117,12 +176,26 @@ extension ChatMessageListMapper on List { ), ); } + + ChatCompletionMessage _mapToolMessage( + final ToolChatMessage toolChatMessage, + ) { + return ChatCompletionMessage.tool( + toolCallId: toolChatMessage.toolCallId, + content: toolChatMessage.content, + ); + } } extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { ChatResult toChatResult(final String id) { final choice = choices.first; final msg = choice.message; + + if (msg.refusal != null && msg.refusal!.isNotEmpty) { + throw OpenAIRefusalException(msg.refusal!); + } + return ChatResult( id: id, output: AIChatMessage( @@ -211,6 +284,11 @@ extension CreateChatCompletionStreamResponseMapper ChatResult toChatResult(final String id) { final choice = choices.firstOrNull; final delta = choice?.delta; + + if (delta?.refusal != null && delta!.refusal!.isNotEmpty) { + throw OpenAIRefusalException(delta.refusal!); + } + return ChatResult( id: id, output: AIChatMessage( diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 0c80184f..6713a56f 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -379,3 +379,25 @@ enum ChatOpenAIServiceTier { /// uptime SLA and no latency guarantee. vDefault, } + +/// {@template openai_refusal_exception} +/// Exception thrown when OpenAI Structured Outputs API returns a refusal. +/// +/// When using OpenAI's Structured Outputs API with user-generated input, the +/// model may occasionally refuse to fulfill the request for safety reasons. +/// +/// See here for more on refusals: +/// https://platform.openai.com/docs/guides/structured-outputs/refusals +/// {@endtemplate} +class OpenAIRefusalException implements Exception { + /// {@macro openai_refusal_exception} + const OpenAIRefusalException(this.message); + + /// The refusal message. + final String message; + + @override + String toString() { + return 'OpenAIRefusalException: $message'; + } +} From 7cc1966acc08d8c614f4ec5efb53f6de4dba52bf Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 23:04:48 +0200 Subject: [PATCH 218/251] feat: Add log probabilities for refusal tokens in openai_dart (#534) --- .../schema/chat_completion_logprobs.dart | 6 +- ...hat_completion_stream_response_choice.dart | 6 +- .../src/generated/schema/function_object.dart | 2 +- .../src/generated/schema/schema.freezed.dart | 134 +++++++++++++++--- .../lib/src/generated/schema/schema.g.dart | 10 ++ packages/openai_dart/oas/openapi_curated.yaml | 24 ++-- 6 files changed, 144 insertions(+), 38 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart index 36c84a12..4b4adc2c 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart @@ -17,6 +17,9 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { const factory ChatCompletionLogprobs({ /// A list of message content tokens with log probability information. @JsonKey(includeIfNull: false) List? content, + + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) List? refusal, }) = _ChatCompletionLogprobs; /// Object construction from a JSON representation @@ -24,7 +27,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { _$ChatCompletionLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content']; + static const List propertyNames = ['content', 'refusal']; /// Perform validations on the schema property values String? validateSchema() { @@ -35,6 +38,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { Map toMap() { return { 'content': content, + 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart index 39a46139..8d81379d 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -80,6 +80,9 @@ class ChatCompletionStreamResponseChoiceLogprobs const factory ChatCompletionStreamResponseChoiceLogprobs({ /// A list of message content tokens with log probability information. @JsonKey(includeIfNull: false) List? content, + + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) List? refusal, }) = _ChatCompletionStreamResponseChoiceLogprobs; /// Object construction from a JSON representation @@ -88,7 +91,7 @@ class ChatCompletionStreamResponseChoiceLogprobs _$ChatCompletionStreamResponseChoiceLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content']; + static const List propertyNames = ['content', 'refusal']; /// Perform validations on the schema property values String? validateSchema() { @@ -99,6 +102,7 @@ class ChatCompletionStreamResponseChoiceLogprobs Map toMap() { return { 'content': content, + 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 62426de3..ac87dc02 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -30,7 +30,7 @@ class FunctionObject with _$FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @JsonKey(includeIfNull: false) @Default(false) bool? strict, }) = _FunctionObject; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index d3269d89..76274966 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -6928,7 +6928,7 @@ mixin _$FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @JsonKey(includeIfNull: false) bool? get strict => throw _privateConstructorUsedError; @@ -7095,7 +7095,7 @@ class _$FunctionObjectImpl extends _FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @override @JsonKey(includeIfNull: false) final bool? strict; @@ -7172,7 +7172,7 @@ abstract class _FunctionObject extends FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @override @JsonKey(includeIfNull: false) bool? get strict; @@ -9041,6 +9041,11 @@ mixin _$ChatCompletionLogprobs { List? get content => throw _privateConstructorUsedError; + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) + List? get refusal => + throw _privateConstructorUsedError; + /// Serializes this ChatCompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -9058,8 +9063,9 @@ abstract class $ChatCompletionLogprobsCopyWith<$Res> { _$ChatCompletionLogprobsCopyWithImpl<$Res, ChatCompletionLogprobs>; @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -9079,12 +9085,17 @@ class _$ChatCompletionLogprobsCopyWithImpl<$Res, @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -9099,8 +9110,9 @@ abstract class _$$ChatCompletionLogprobsImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -9119,12 +9131,17 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_$ChatCompletionLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value._refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -9134,8 +9151,11 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const _$ChatCompletionLogprobsImpl( {@JsonKey(includeIfNull: false) - final List? content}) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) : _content = content, + _refusal = refusal, super._(); factory _$ChatCompletionLogprobsImpl.fromJson(Map json) => @@ -9155,9 +9175,23 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return EqualUnmodifiableListView(value); } + /// A list of message refusal tokens with log probability information. + final List? _refusal; + + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal { + final value = _refusal; + if (value == null) return null; + if (_refusal is EqualUnmodifiableListView) return _refusal; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'ChatCompletionLogprobs(content: $content)'; + return 'ChatCompletionLogprobs(content: $content, refusal: $refusal)'; } @override @@ -9165,13 +9199,16 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content)); + const DeepCollectionEquality().equals(other._content, _content) && + const DeepCollectionEquality().equals(other._refusal, _refusal)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_content), + const DeepCollectionEquality().hash(_refusal)); /// Create a copy of ChatCompletionLogprobs /// with the given fields replaced by the non-null parameter values. @@ -9193,7 +9230,9 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { const factory _ChatCompletionLogprobs( {@JsonKey(includeIfNull: false) - final List? content}) = + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) = _$ChatCompletionLogprobsImpl; const _ChatCompletionLogprobs._() : super._(); @@ -9205,6 +9244,11 @@ abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { @JsonKey(includeIfNull: false) List? get content; + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal; + /// Create a copy of ChatCompletionLogprobs /// with the given fields replaced by the non-null parameter values. @override @@ -10520,6 +10564,11 @@ mixin _$ChatCompletionStreamResponseChoiceLogprobs { List? get content => throw _privateConstructorUsedError; + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) + List? get refusal => + throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoiceLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -10540,8 +10589,9 @@ abstract class $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res> { ChatCompletionStreamResponseChoiceLogprobs>; @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -10562,12 +10612,17 @@ class _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -10583,8 +10638,9 @@ abstract class _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -10603,12 +10659,17 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_$ChatCompletionStreamResponseChoiceLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value._refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -10619,8 +10680,11 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl extends _ChatCompletionStreamResponseChoiceLogprobs { const _$ChatCompletionStreamResponseChoiceLogprobsImpl( {@JsonKey(includeIfNull: false) - final List? content}) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) : _content = content, + _refusal = refusal, super._(); factory _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson( @@ -10641,9 +10705,23 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return EqualUnmodifiableListView(value); } + /// A list of message refusal tokens with log probability information. + final List? _refusal; + + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal { + final value = _refusal; + if (value == null) return null; + if (_refusal is EqualUnmodifiableListView) return _refusal; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content)'; + return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content, refusal: $refusal)'; } @override @@ -10651,13 +10729,16 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseChoiceLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content)); + const DeepCollectionEquality().equals(other._content, _content) && + const DeepCollectionEquality().equals(other._refusal, _refusal)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_content), + const DeepCollectionEquality().hash(_refusal)); /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs /// with the given fields replaced by the non-null parameter values. @@ -10683,7 +10764,9 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs extends ChatCompletionStreamResponseChoiceLogprobs { const factory _ChatCompletionStreamResponseChoiceLogprobs( {@JsonKey(includeIfNull: false) - final List? content}) = + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) = _$ChatCompletionStreamResponseChoiceLogprobsImpl; const _ChatCompletionStreamResponseChoiceLogprobs._() : super._(); @@ -10696,6 +10779,11 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs @JsonKey(includeIfNull: false) List? get content; + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal; + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs /// with the given fields replaced by the non-null parameter values. @override diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 01851e43..d03e9a18 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -816,6 +816,10 @@ _$ChatCompletionLogprobsImpl _$$ChatCompletionLogprobsImplFromJson( ?.map((e) => ChatCompletionTokenLogprob.fromJson(e as Map)) .toList(), + refusal: (json['refusal'] as List?) + ?.map((e) => + ChatCompletionTokenLogprob.fromJson(e as Map)) + .toList(), ); Map _$$ChatCompletionLogprobsImplToJson( @@ -829,6 +833,7 @@ Map _$$ChatCompletionLogprobsImplToJson( } writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); return val; } @@ -958,6 +963,10 @@ _$ChatCompletionStreamResponseChoiceLogprobsImpl ?.map((e) => ChatCompletionTokenLogprob.fromJson( e as Map)) .toList(), + refusal: (json['refusal'] as List?) + ?.map((e) => ChatCompletionTokenLogprob.fromJson( + e as Map)) + .toList(), ); Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( @@ -971,6 +980,7 @@ Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( } writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index ba1d409c..9c474cec 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -2233,7 +2233,7 @@ components: properties: name: type: string - description: | + description: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. description: @@ -2246,11 +2246,11 @@ components: type: boolean nullable: true default: false - description: | + description: | Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](](https://platform.openai.com/docs/guides/function-calling). required: - name FunctionParameters: @@ -2311,12 +2311,12 @@ components: properties: name: type: string - description: | + description: | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. description: type: string - description: | + description: | A description of what the response format is for, used by the model to determine how to respond in the format. schema: @@ -2328,7 +2328,7 @@ components: type: boolean nullable: true default: false - description: | + description: | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the @@ -2485,12 +2485,12 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - refusal: - description: A list of message refusal tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true ChatCompletionTokenLogprob: type: object description: Log probability information for a token. From 186c258140eb4d92eaa55e04103716f2f6c60927 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 23:09:12 +0200 Subject: [PATCH 219/251] feat: Include logprobs in result metadata from ChatOpenAI (#535) --- packages/langchain_openai/lib/src/chat_models/mappers.dart | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index a2ea96f4..3b23ee8c 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -209,6 +209,7 @@ extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { 'model': model, 'created': created, 'system_fingerprint': systemFingerprint, + 'logprobs': choice.logprobs?.toMap(), }, usage: _mapUsage(usage), ); From 68964d4c0a947867baffe27af0147dafcf1cae03 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 23:15:20 +0200 Subject: [PATCH 220/251] feat: Support OpenAI's strict mode for tool calling in ChatOpenAI (#536) --- .../langchain_core/lib/src/tools/base.dart | 40 ++++++++++++++++--- .../langchain_core/lib/src/tools/string.dart | 6 +++ 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index f6e8dd29..079dbab7 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -18,6 +18,7 @@ class ToolSpec { required this.name, required this.description, required this.inputJsonSchema, + this.strict = false, }); /// The unique name of the tool that clearly communicates its purpose. @@ -50,18 +51,31 @@ class ToolSpec { /// ``` final Map inputJsonSchema; + /// Whether to enable strict schema adherence when generating the tool call. + /// If set to true, the model will follow the exact schema defined in the + /// [inputJsonSchema] field. + /// + /// This is only supported by some providers (e.g. OpenAI). Mind that when + /// enabled, only a subset of JSON Schema may be supported. Check out the + /// provider's tool calling documentation for more information. + final bool strict; + @override bool operator ==(covariant final ToolSpec other) { final mapEquals = const DeepCollectionEquality().equals; return identical(this, other) || name == other.name && description == other.description && - mapEquals(inputJsonSchema, other.inputJsonSchema); + mapEquals(inputJsonSchema, other.inputJsonSchema) && + strict == other.strict; } @override int get hashCode => - name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + name.hashCode ^ + description.hashCode ^ + inputJsonSchema.hashCode ^ + strict.hashCode; @override String toString() { @@ -70,6 +84,7 @@ ToolSpec{ name: $name, description: $description, inputJsonSchema: $inputJsonSchema, + strict: $strict, } '''; } @@ -80,6 +95,7 @@ ToolSpec{ 'name': name, 'description': description, 'inputJsonSchema': inputJsonSchema, + 'strict': strict, }; } } @@ -102,6 +118,7 @@ abstract base class Tool inputJsonSchema; + @override + final bool strict; + /// Whether to return the tool's output directly. /// Setting this to true means that after the tool is called, /// the AgentExecutor will stop looping. @@ -132,7 +152,9 @@ abstract base class Tool inputJsonSchema, + final bool strict = false, required final FutureOr Function(Input input) func, Input Function(Map json)? getInputFromJson, final bool returnDirect = false, @@ -157,6 +180,7 @@ abstract base class Tool json['input'] as Input, returnDirect: returnDirect, @@ -217,12 +241,16 @@ abstract base class Tool - name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + name.hashCode ^ + description.hashCode ^ + inputJsonSchema.hashCode ^ + strict.hashCode; @override Map toJson() { @@ -230,6 +258,7 @@ abstract base class Tool required super.name, required super.description, required super.inputJsonSchema, + required super.strict, required FutureOr Function(Input input) function, required Input Function(Map json) getInputFromJson, super.returnDirect = false, diff --git a/packages/langchain_core/lib/src/tools/string.dart b/packages/langchain_core/lib/src/tools/string.dart index 3c9973d5..43e1e2e7 100644 --- a/packages/langchain_core/lib/src/tools/string.dart +++ b/packages/langchain_core/lib/src/tools/string.dart @@ -14,6 +14,7 @@ abstract base class StringTool required super.name, required super.description, final String inputDescription = 'The input to the tool', + super.strict = false, super.returnDirect = false, super.handleToolError, super.defaultOptions, @@ -36,6 +37,8 @@ abstract base class StringTool /// purpose. /// - [description] is used to tell the model how/when/why to use the tool. /// You can provide few-shot examples as a part of the description. + /// - [strict] whether to enable strict schema adherence when generating the + /// tool call (only supported by some providers). /// - [func] is the function that will be called when the tool is run. /// - [returnDirect] whether to return the tool's output directly. /// Setting this to true means that after the tool is called, @@ -46,6 +49,7 @@ abstract base class StringTool required final String name, required final String description, final String inputDescription = 'The input to the tool', + final bool strict = false, required final FutureOr Function(String input) func, final bool returnDirect = false, final String Function(ToolException)? handleToolError, @@ -54,6 +58,7 @@ abstract base class StringTool name: name, description: description, inputDescription: inputDescription, + strict: strict, func: func, returnDirect: returnDirect, handleToolError: handleToolError, @@ -84,6 +89,7 @@ final class _StringToolFunc required super.name, required super.description, super.inputDescription, + required super.strict, required FutureOr Function(String) func, super.returnDirect = false, super.handleToolError, From 4959d7163d0da587780831de384cfac368169b22 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 22 Aug 2024 18:38:30 +0200 Subject: [PATCH 221/251] build: Update analysis_options.yaml and fix linter issues (#537) --- analysis_options.yaml | 15 +++++++++++++-- .../browser_summarizer/lib/chrome/chrome_api.dart | 2 +- .../lib/anthropic_sdk_dart.dart | 2 +- packages/googleai_dart/lib/googleai_dart.dart | 2 +- .../lib/src/vector_stores/chroma.dart | 2 +- .../lib/src/document_loaders/csv.dart | 3 +-- packages/langchain_core/lib/agents.dart | 2 +- packages/langchain_core/lib/chains.dart | 2 +- packages/langchain_core/lib/chat_history.dart | 2 +- packages/langchain_core/lib/chat_models.dart | 2 +- packages/langchain_core/lib/document_loaders.dart | 2 +- packages/langchain_core/lib/documents.dart | 2 +- packages/langchain_core/lib/embeddings.dart | 2 +- packages/langchain_core/lib/exceptions.dart | 2 +- packages/langchain_core/lib/langchain.dart | 2 +- packages/langchain_core/lib/language_models.dart | 2 +- packages/langchain_core/lib/llms.dart | 2 +- packages/langchain_core/lib/memory.dart | 2 +- packages/langchain_core/lib/output_parsers.dart | 2 +- packages/langchain_core/lib/prompts.dart | 2 +- packages/langchain_core/lib/retrievers.dart | 2 +- packages/langchain_core/lib/runnables.dart | 2 +- .../langchain_core/lib/src/chat_models/utils.dart | 2 +- .../langchain_core/lib/src/prompts/types.dart | 2 +- packages/langchain_core/lib/stores.dart | 2 +- packages/langchain_core/lib/tools.dart | 2 +- packages/langchain_core/lib/utils.dart | 2 +- packages/langchain_core/lib/vector_stores.dart | 2 +- packages/langchain_firebase/example/pubspec.lock | 4 ++-- packages/langchain_firebase/pubspec.lock | 4 ++-- packages/mistralai_dart/lib/mistralai_dart.dart | 2 +- packages/ollama_dart/lib/ollama_dart.dart | 2 +- 32 files changed, 46 insertions(+), 36 deletions(-) diff --git a/analysis_options.yaml b/analysis_options.yaml index d8f55c71..9a364363 100644 --- a/analysis_options.yaml +++ b/analysis_options.yaml @@ -7,6 +7,11 @@ analyzer: missing_return: error todo: ignore sdk_version_since: ignore # TODO remove when fixed https://github.com/dart-lang/sdk/issues/52327 + exclude: + - "**/generated_plugin_registrant.dart" + - "**/generated/**" + - "**/*.gen.dart" + - "**/*.g.dart" linter: rules: # https://dart-lang.github.io/linter/lints/{rule}.html @@ -30,7 +35,7 @@ linter: - avoid_null_checks_in_equality_operators - avoid_positional_boolean_parameters - avoid_print - # - avoid_redundant_argument_values # Sometimes is useful to be explicit + # - avoid_redundant_argument_values # I prefer to be explicit sometimes - avoid_relative_lib_imports - avoid_renaming_method_parameters - avoid_return_types_on_setters @@ -65,6 +70,7 @@ linter: # - diagnostic_describe_all_properties # Disabled because it's very verbose - directives_ordering - discarded_futures + # - document_ignores # Disabled because it's very verbose - empty_catches - empty_constructor_bodies - empty_statements @@ -76,6 +82,7 @@ linter: - implementation_imports - implicit_call_tearoffs - invalid_case_patterns + - invalid_runtime_check_with_js_interop_types - iterable_contains_unrelated_type - join_return_with_assignment - leading_newlines_in_multiline_strings @@ -85,6 +92,7 @@ linter: - library_private_types_in_public_api - list_remove_unrelated_type - matching_super_parameters + - missing_code_block_language_in_doc_comment - missing_whitespace_between_adjacent_strings - no_adjacent_strings_in_list - no_default_cases @@ -94,6 +102,7 @@ linter: - no_literal_bool_comparisons - no_logic_in_create_state - no_runtimeType_toString + - no_wildcard_variable_uses - non_constant_identifier_names - noop_primitive_operations - null_check_on_nullable_type_parameter @@ -116,7 +125,7 @@ linter: - prefer_final_fields - prefer_final_in_for_each - prefer_final_locals - # - prefer_final_parameters # adds too much verbosity + # - prefer_final_parameters # Very verbose - prefer_for_elements_to_map_fromIterable - prefer_foreach - prefer_function_declarations_over_variables @@ -152,6 +161,7 @@ linter: - type_init_formals - type_literal_in_constant_pattern - unawaited_futures + - unintended_html_in_doc_comment - unnecessary_await_in_return - unnecessary_brace_in_string_interps - unnecessary_breaks @@ -161,6 +171,7 @@ linter: - unnecessary_lambdas - unnecessary_late - unnecessary_library_directive + - unnecessary_library_name - unnecessary_new - unnecessary_null_aware_assignments - unnecessary_null_aware_operator_on_extension_on_nullable diff --git a/examples/browser_summarizer/lib/chrome/chrome_api.dart b/examples/browser_summarizer/lib/chrome/chrome_api.dart index d60ac8b7..9ab8b8b4 100644 --- a/examples/browser_summarizer/lib/chrome/chrome_api.dart +++ b/examples/browser_summarizer/lib/chrome/chrome_api.dart @@ -1,6 +1,6 @@ // ignore_for_file: public_member_api_docs @JS('chrome') -library chrome; +library; import 'package:js/js.dart'; diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart index 7a853ada..4cc40a27 100644 --- a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). -library anthropic_sdk_dart; +library; export 'src/client.dart'; export 'src/extensions.dart'; diff --git a/packages/googleai_dart/lib/googleai_dart.dart b/packages/googleai_dart/lib/googleai_dart.dart index e673e9d9..f0807211 100644 --- a/packages/googleai_dart/lib/googleai_dart.dart +++ b/packages/googleai_dart/lib/googleai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -library googleai_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show GoogleAIClientException; diff --git a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart index 9ed252ba..dc170896 100644 --- a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart +++ b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart @@ -42,7 +42,7 @@ import 'types.dart'; /// If you are interacting with Chroma server from a web browser, /// you may need to configure the CORS policy. You can do this by /// passing the following environment variable: -/// ``` +/// ```sh /// docker run -p 8000:8000 -e 'CHROMA_SERVER_CORS_ALLOW_ORIGINS=["*"]' chromadb/chroma /// ``` /// The previous command will allow all origins to access the Chroma server diff --git a/packages/langchain_community/lib/src/document_loaders/csv.dart b/packages/langchain_community/lib/src/document_loaders/csv.dart index 155e520d..2a4a4872 100644 --- a/packages/langchain_community/lib/src/document_loaders/csv.dart +++ b/packages/langchain_community/lib/src/document_loaders/csv.dart @@ -17,7 +17,7 @@ import 'package:langchain_core/documents.dart'; /// and [eol]. /// /// The fields are added to the page content in the following format: -/// ``` +/// ```txt /// {field1Name}: {field1Value} /// {field2Name}: {field2Value} /// ... @@ -56,7 +56,6 @@ class CsvLoader extends BaseDocumentLoader { /// the page content of the document. /// /// If not provided, all row fields are extracted. - /// ``` final List? fields; /// Optional field to override the field names from the CSV file. diff --git a/packages/langchain_core/lib/agents.dart b/packages/langchain_core/lib/agents.dart index e99fdb9f..97382b62 100644 --- a/packages/langchain_core/lib/agents.dart +++ b/packages/langchain_core/lib/agents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to agents. -library agents; +library; export 'src/agents/agents.dart'; diff --git a/packages/langchain_core/lib/chains.dart b/packages/langchain_core/lib/chains.dart index 3214cef2..a35484cd 100644 --- a/packages/langchain_core/lib/chains.dart +++ b/packages/langchain_core/lib/chains.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chains. -library chains; +library; export 'src/chains/chains.dart'; diff --git a/packages/langchain_core/lib/chat_history.dart b/packages/langchain_core/lib/chat_history.dart index 316cbccc..726dbd3c 100644 --- a/packages/langchain_core/lib/chat_history.dart +++ b/packages/langchain_core/lib/chat_history.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat history. -library chat_history; +library; export 'src/chat_history/chat_history.dart'; diff --git a/packages/langchain_core/lib/chat_models.dart b/packages/langchain_core/lib/chat_models.dart index 803668df..259fa3c3 100644 --- a/packages/langchain_core/lib/chat_models.dart +++ b/packages/langchain_core/lib/chat_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat models. -library chat_models; +library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_core/lib/document_loaders.dart b/packages/langchain_core/lib/document_loaders.dart index 51fdbead..b8340c67 100644 --- a/packages/langchain_core/lib/document_loaders.dart +++ b/packages/langchain_core/lib/document_loaders.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to document loaders. -library document_loaders; +library; export 'src/document_loaders/document_loaders.dart'; diff --git a/packages/langchain_core/lib/documents.dart b/packages/langchain_core/lib/documents.dart index 24d340a4..a0f68ebd 100644 --- a/packages/langchain_core/lib/documents.dart +++ b/packages/langchain_core/lib/documents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to documents. -library documents; +library; export 'src/documents/documents.dart'; diff --git a/packages/langchain_core/lib/embeddings.dart b/packages/langchain_core/lib/embeddings.dart index 829de2c7..b6c2bc82 100644 --- a/packages/langchain_core/lib/embeddings.dart +++ b/packages/langchain_core/lib/embeddings.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to embeddings. -library embeddings; +library; export 'src/embeddings/embeddings.dart'; diff --git a/packages/langchain_core/lib/exceptions.dart b/packages/langchain_core/lib/exceptions.dart index 4371a3a3..1e0d7fa0 100644 --- a/packages/langchain_core/lib/exceptions.dart +++ b/packages/langchain_core/lib/exceptions.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to exceptions. -library exceptions; +library; export 'src/exceptions/exceptions.dart'; diff --git a/packages/langchain_core/lib/langchain.dart b/packages/langchain_core/lib/langchain.dart index b30c4d14..cf5bb742 100644 --- a/packages/langchain_core/lib/langchain.dart +++ b/packages/langchain_core/lib/langchain.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LangChain. -library langchain; +library; export 'src/langchain/langchain.dart'; diff --git a/packages/langchain_core/lib/language_models.dart b/packages/langchain_core/lib/language_models.dart index 7cabafc7..1fae54b5 100644 --- a/packages/langchain_core/lib/language_models.dart +++ b/packages/langchain_core/lib/language_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to language models. -library language_models; +library; export 'src/language_models/language_models.dart'; diff --git a/packages/langchain_core/lib/llms.dart b/packages/langchain_core/lib/llms.dart index 5b98240d..ed130b60 100644 --- a/packages/langchain_core/lib/llms.dart +++ b/packages/langchain_core/lib/llms.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LLMs. -library llms; +library; export 'src/llms/llms.dart'; diff --git a/packages/langchain_core/lib/memory.dart b/packages/langchain_core/lib/memory.dart index b79467cf..7193923f 100644 --- a/packages/langchain_core/lib/memory.dart +++ b/packages/langchain_core/lib/memory.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to memory. -library memory; +library; export 'src/memory/memory.dart'; diff --git a/packages/langchain_core/lib/output_parsers.dart b/packages/langchain_core/lib/output_parsers.dart index 7f0d0d5f..2915a146 100644 --- a/packages/langchain_core/lib/output_parsers.dart +++ b/packages/langchain_core/lib/output_parsers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to output parsers. -library output_parsers; +library; export 'src/output_parsers/output_parsers.dart'; diff --git a/packages/langchain_core/lib/prompts.dart b/packages/langchain_core/lib/prompts.dart index dbb7ef5b..b7873da5 100644 --- a/packages/langchain_core/lib/prompts.dart +++ b/packages/langchain_core/lib/prompts.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to prompts. -library prompts; +library; export 'src/prompts/prompts.dart'; diff --git a/packages/langchain_core/lib/retrievers.dart b/packages/langchain_core/lib/retrievers.dart index 5b1ec71e..5d1278bf 100644 --- a/packages/langchain_core/lib/retrievers.dart +++ b/packages/langchain_core/lib/retrievers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to retrievers. -library retrievers; +library; export 'src/retrievers/retrievers.dart'; diff --git a/packages/langchain_core/lib/runnables.dart b/packages/langchain_core/lib/runnables.dart index e111eb58..72b67584 100644 --- a/packages/langchain_core/lib/runnables.dart +++ b/packages/langchain_core/lib/runnables.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to runnables. -library runnables; +library; export 'src/runnables/runnables.dart'; diff --git a/packages/langchain_core/lib/src/chat_models/utils.dart b/packages/langchain_core/lib/src/chat_models/utils.dart index 5c84a142..ebfc011c 100644 --- a/packages/langchain_core/lib/src/chat_models/utils.dart +++ b/packages/langchain_core/lib/src/chat_models/utils.dart @@ -1,6 +1,6 @@ import 'types.dart'; -/// Extensions on [List]. +/// Extensions on `List`. extension ChatMessagesX on List { /// This function is to get a string representation of the chat messages /// based on the message content and role. diff --git a/packages/langchain_core/lib/src/prompts/types.dart b/packages/langchain_core/lib/src/prompts/types.dart index c68f0979..3bd9756b 100644 --- a/packages/langchain_core/lib/src/prompts/types.dart +++ b/packages/langchain_core/lib/src/prompts/types.dart @@ -90,7 +90,7 @@ class StringPromptValue implements PromptValue { /// /// When [toString] is called, it returns the string representation of the /// messages using the following format: -/// ``` +/// ```txt /// System: /// Human: /// AI: diff --git a/packages/langchain_core/lib/stores.dart b/packages/langchain_core/lib/stores.dart index 2a234153..96eb406a 100644 --- a/packages/langchain_core/lib/stores.dart +++ b/packages/langchain_core/lib/stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to stores. -library stores; +library; export 'src/stores/stores.dart'; diff --git a/packages/langchain_core/lib/tools.dart b/packages/langchain_core/lib/tools.dart index 9d0b95aa..d829f7d5 100644 --- a/packages/langchain_core/lib/tools.dart +++ b/packages/langchain_core/lib/tools.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to tools. -library tools; +library; export 'src/tools/tools.dart'; diff --git a/packages/langchain_core/lib/utils.dart b/packages/langchain_core/lib/utils.dart index cdcc6670..7ceacd01 100644 --- a/packages/langchain_core/lib/utils.dart +++ b/packages/langchain_core/lib/utils.dart @@ -1,4 +1,4 @@ /// Contains core utilities. -library utils; +library; export 'src/utils/utils.dart'; diff --git a/packages/langchain_core/lib/vector_stores.dart b/packages/langchain_core/lib/vector_stores.dart index 35174345..129d296c 100644 --- a/packages/langchain_core/lib/vector_stores.dart +++ b/packages/langchain_core/lib/vector_stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to vector stores. -library vector_stores; +library; export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index b1d7e459..eedcc6b5 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -454,10 +454,10 @@ packages: dependency: transitive description: name: vm_service - sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc + sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" url: "https://pub.dev" source: hosted - version: "14.2.4" + version: "14.2.5" web: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 593dfe9c..3f945900 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -392,10 +392,10 @@ packages: dependency: transitive description: name: vm_service - sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc + sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" url: "https://pub.dev" source: hosted - version: "14.2.4" + version: "14.2.5" web: dependency: transitive description: diff --git a/packages/mistralai_dart/lib/mistralai_dart.dart b/packages/mistralai_dart/lib/mistralai_dart.dart index 31efab90..05cfac61 100644 --- a/packages/mistralai_dart/lib/mistralai_dart.dart +++ b/packages/mistralai_dart/lib/mistralai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -library mistralai_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show MistralAIClientException; diff --git a/packages/ollama_dart/lib/ollama_dart.dart b/packages/ollama_dart/lib/ollama_dart.dart index a62c32c4..1195c10c 100644 --- a/packages/ollama_dart/lib/ollama_dart.dart +++ b/packages/ollama_dart/lib/ollama_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Ollama API (run Llama 3, Code Llama, and other models locally). -library ollama_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show OllamaClientException; From c16d013445b200124cd8b74dab2798ffb393d365 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 22 Aug 2024 18:47:44 +0200 Subject: [PATCH 222/251] docs: Add Code Assist AI in README and documentation (#538) --- docs/_sidebar.md | 1 + packages/langchain/README.md | 2 ++ 2 files changed, 3 insertions(+) diff --git a/docs/_sidebar.md b/docs/_sidebar.md index a5572f4d..d04533a7 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -1,3 +1,4 @@ +- [![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) - [Get started](README.md) - [Installation](/get_started/installation.md) - [Quickstart](/get_started/quickstart.md) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 1e4ad928..2dfacb97 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -5,6 +5,7 @@ [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) +[![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) Build LLM-powered Dart/Flutter applications. @@ -221,6 +222,7 @@ print(res); ## Documentation - [LangChain.dart documentation](https://langchaindart.dev) +- [Code Assist AI](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) (Chatbot for LangChain.dart documentation) - [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) - [LangChain.dart blog](https://blog.langchaindart.dev) - [Project board](https://github.com/users/davidmigloz/projects/2/views/1) From 752916652627ea2387ca5d8e765672451554f167 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 22 Aug 2024 18:53:03 +0200 Subject: [PATCH 223/251] test: Update model used in OpenAI tests to gpt-4o-mini (#539) --- .../test/agents/tools_test.dart | 3 - .../test/chains/qa_with_sources_test.dart | 1 - .../test/chat_models/anyscale_test.dart | 111 ------------------ .../test/chat_models/chat_openai_test.dart | 7 +- .../embeddings/anyscale_embeddings_test.dart | 36 ------ 5 files changed, 3 insertions(+), 155 deletions(-) delete mode 100644 packages/langchain_openai/test/chat_models/anyscale_test.dart delete mode 100644 packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart diff --git a/packages/langchain_openai/test/agents/tools_test.dart b/packages/langchain_openai/test/agents/tools_test.dart index c08f4979..03b52dea 100644 --- a/packages/langchain_openai/test/agents/tools_test.dart +++ b/packages/langchain_openai/test/agents/tools_test.dart @@ -24,7 +24,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ); @@ -46,7 +45,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ); @@ -135,7 +133,6 @@ void main() { final model = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ).bind(ChatOpenAIOptions(tools: [tool])); diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index 15a80431..c655af98 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -125,7 +125,6 @@ Question: {question} final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4o-mini', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/anyscale_test.dart b/packages/langchain_openai/test/chat_models/anyscale_test.dart deleted file mode 100644 index f0a99e88..00000000 --- a/packages/langchain_openai/test/chat_models/anyscale_test.dart +++ /dev/null @@ -1,111 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anyscale tests', () { - late ChatOpenAI chatModel; - - setUp(() async { - chatModel = ChatOpenAI( - apiKey: Platform.environment['ANYSCALE_API_KEY'], - baseUrl: 'https://api.endpoints.anyscale.com/v1', - ); - }); - - tearDown(() { - chatModel.close(); - }); - - test('Test invoke Anyscale API with different models', () async { - final models = [ - 'meta-llama/Llama-2-70b-chat-hf', - 'codellama/CodeLlama-34b-Instruct-hf', - 'mistralai/Mistral-7B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - ]; - for (final model in models) { - final res = await chatModel.invoke( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions(model: model), - ); - - expect(res.id, isNotEmpty); - expect( - res.finishReason, - isNot(FinishReason.unspecified), - reason: model, - ); - expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), - contains('123456789'), - reason: model, - ); - expect(res.metadata, isNotNull, reason: model); - expect(res.metadata['created'], greaterThan(0), reason: model); - expect(res.metadata['model'], isNotEmpty, reason: model); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - - test('Test stream Anyscale API with different models', () async { - final models = [ - 'meta-llama/Llama-2-70b-chat-hf', - 'codellama/CodeLlama-34b-Instruct-hf', - 'mistralai/Mistral-7B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - ]; - for (final model in models) { - final stream = chatModel.stream( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions(model: model), - ); - - String content = ''; - int count = 0; - await for (final res in stream) { - content += res.output.content.replaceAll(RegExp(r'[\s\n,]'), ''); - count++; - } - expect(count, greaterThan(1), reason: model); - expect(content, contains('123456789'), reason: model); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - - test('Test countTokens', () async { - final models = [ - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'mistralai/Mistral-7B-Instruct-v0.2', - 'NousResearch/Nous-Hermes-2-Yi-34B', - 'openchat/openchat-3.5-1210', - 'togethercomputer/llama-2-70b-chat', - 'togethercomputer/falcon-40b-instruct', - ]; - for (final model in models) { - const text = 'Hello, how are you?'; - - final numTokens = await chatModel.countTokens( - PromptValue.chat([ChatMessage.humanText(text)]), - options: ChatOpenAIOptions(model: model), - ); - expect(numTokens, 13, reason: model); - } - }); - }); -} diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index d56ba1f9..a0ea44fb 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -208,7 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', 'gpt-4-0613', ]; @@ -328,7 +327,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, temperature: 0, seed: 12345, ), @@ -455,7 +454,7 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, ), ); @@ -482,7 +481,7 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, ), ); diff --git a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart b/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart deleted file mode 100644 index 988c7e4c..00000000 --- a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart +++ /dev/null @@ -1,36 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anyscale AI Embeddings tests', () { - late OpenAIEmbeddings embeddings; - - setUp(() async { - embeddings = OpenAIEmbeddings( - apiKey: Platform.environment['ANYSCALE_API_KEY'], - baseUrl: 'https://api.endpoints.anyscale.com/v1', - ); - }); - - tearDown(() { - embeddings.close(); - }); - - test('Test Anyscale Embeddings models', () async { - final models = [ - 'thenlper/gte-large', - ]; - for (final model in models) { - embeddings.model = model; - final res = await embeddings.embedQuery('Hello world'); - expect(res.length, greaterThan(0)); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - }); -} From 248596e9e389e95682fe11fb009372f345760a4e Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 22 Aug 2024 18:55:31 +0200 Subject: [PATCH 224/251] chore(release): publish packages - langchain@0.7.5 - langchain_community@0.3.1 - langchain_core@0.3.5 - langchain_google@0.6.2 - langchain_mistralai@0.2.3 - langchain_ollama@0.3.1 - langchain_openai@0.7.1 - ollama_dart@0.2.1 - openai_dart@0.4.1 - langchain_firebase@0.2.1+1 - langchain_supabase@0.1.1+2 - langchain_pinecone@0.1.0+8 - langchain_anthropic@0.1.1+1 - langchain_chroma@0.2.1+2 --- CHANGELOG.md | 92 ++++++++++++++++++- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.yaml | 16 ++-- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.yaml | 10 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- melos.yaml | 2 +- packages/anthropic_sdk_dart/CHANGELOG.md | 4 +- packages/chromadb/CHANGELOG.md | 4 +- packages/googleai_dart/CHANGELOG.md | 4 +- packages/langchain/CHANGELOG.md | 10 +- packages/langchain/pubspec.yaml | 10 +- packages/langchain_anthropic/CHANGELOG.md | 8 +- packages/langchain_anthropic/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 8 +- packages/langchain_chroma/pubspec.yaml | 10 +- packages/langchain_community/CHANGELOG.md | 8 +- packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 10 +- packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 8 +- .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 8 +- packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 8 +- packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 9 +- packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 14 ++- packages/langchain_openai/pubspec.yaml | 10 +- packages/langchain_pinecone/CHANGELOG.md | 8 +- packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 8 +- packages/langchain_supabase/pubspec.yaml | 10 +- packages/mistralai_dart/CHANGELOG.md | 4 +- packages/ollama_dart/CHANGELOG.md | 8 +- packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 11 ++- packages/openai_dart/pubspec.yaml | 2 +- packages/tavily_dart/CHANGELOG.md | 4 +- packages/vertex_ai/CHANGELOG.md | 4 +- 43 files changed, 287 insertions(+), 87 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47d794f4..2bedbee2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,96 @@ # Change Log -Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 2024-08-22 + +### Changes + +--- + +Packages with breaking changes: + + - There are no breaking changes in this release. + +Packages with other changes: + + - [`langchain` - `v0.7.5`](#langchain---v075) + - [`langchain_core` - `v0.3.5`](#langchain_core---v035) + - [`langchain_community` - `v0.3.1`](#langchain_community---v031) + - [`langchain_openai` - `v0.7.1`](#langchain_openai---v071) + - [`langchain_ollama` - `v0.3.1`](#langchain_ollama---v031) + - [`langchain_google` - `v0.6.2`](#langchain_google---v062) + - [`langchain_mistralai` - `v0.2.3`](#langchain_mistralai---v023) + - [`ollama_dart` - `v0.2.1`](#ollama_dart---v021) + - [`openai_dart` - `v0.4.1`](#openai_dart---v041) + - [`langchain_firebase` - `v0.2.1+1`](#langchain_firebase---v0211) + - [`langchain_supabase` - `v0.1.1+2`](#langchain_supabase---v0112) + - [`langchain_pinecone` - `v0.1.0+8`](#langchain_pinecone---v0108) + - [`langchain_anthropic` - `v0.1.1+1`](#langchain_anthropic---v0111) + - [`langchain_chroma` - `v0.2.1+2`](#langchain_chroma---v0212) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + + - `langchain_firebase` - `v0.2.1+1` + - `langchain_supabase` - `v0.1.1+2` + - `langchain_pinecone` - `v0.1.0+8` + - `langchain_anthropic` - `v0.1.1+1` + - `langchain_chroma` - `v0.2.1+2` + +--- + +#### `langchain` - `v0.7.5` + + - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) + +#### `langchain_core` - `v0.3.5` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +#### `langchain_community` - `v0.3.1` + + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +#### `langchain_openai` - `v0.7.1` + + - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) + - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) + - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) + +#### `langchain_ollama` - `v0.3.1` + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `langchain_google` - `v0.6.2` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `langchain_mistralai` - `v0.2.3` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `openai_dart` - `v0.4.1` + + - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) + - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + +#### `ollama_dart` - `v0.2.1` + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + ## 2024-07-26 diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 326a41a5..f284843f 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 js: ^0.7.1 - langchain: ^0.7.4 - langchain_community: 0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 + langchain_openai: ^0.7.1 shared_preferences: ^2.3.0 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 398e7e15..0136cd5a 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_anthropic: ^0.1.1 - langchain_chroma: ^0.2.1+1 - langchain_community: 0.3.0 - langchain_google: ^0.6.1 - langchain_mistralai: ^0.2.2 - langchain_ollama: ^0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_anthropic: ^0.1.1+1 + langchain_chroma: ^0.2.1+2 + langchain_community: 0.3.1 + langchain_google: ^0.6.2 + langchain_mistralai: ^0.2.3 + langchain_ollama: ^0.3.1 + langchain_openai: ^0.7.1 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index b7b5dd3a..55135704 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_openai: ^0.7.1 shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index d73e4928..26e63ed8 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_openai: ^0.7.1 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index fb83f0cc..15fd553e 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 - langchain: ^0.7.4 - langchain_google: ^0.6.1 - langchain_mistralai: ^0.2.2 - langchain_ollama: ^0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_google: ^0.6.2 + langchain_mistralai: ^0.2.3 + langchain_ollama: ^0.3.1 + langchain_openai: ^0.7.1 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index ab9a51be..4d7ddfc5 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_ollama: ^0.3.0 - langchain_community: 0.3.0 + langchain: ^0.7.5 + langchain_ollama: ^0.3.1 + langchain_community: 0.3.1 diff --git a/melos.yaml b/melos.yaml index 69c804de..3a1c0092 100644 --- a/melos.yaml +++ b/melos.yaml @@ -14,7 +14,7 @@ command: branch: main changelogs: - path: CHANGELOG.md - description: "Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release." + description: "📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details." packageFilters: no-private: true bootstrap: diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index 600092c2..c9710913 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0 diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 171047ca..70f441bd 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.2.0+1 diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 437f20b2..7bc6e29d 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0+2 diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 680dadd0..79614782 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,4 +1,12 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.7.5 + + - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) ## 0.7.4 diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 857d9c79..06b182e2 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.4 +version: 0.7.5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ dependencies: characters: ^1.3.0 collection: ^1.18.0 crypto: ^3.0.3 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_community: ^0.3.0 - langchain_openai: ^0.7.0 - langchain_ollama: ^0.3.0 + langchain_community: ^0.3.1 + langchain_openai: ^0.7.1 + langchain_ollama: ^0.3.1 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 03cf82f7..690821d1 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.1+1 + + - Update a dependency to the latest release. ## 0.1.1 diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index c25c8e94..6aedbe71 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.1 +version: 0.1.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 rxdart: ">=0.27.7 <0.29.0" diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index cb464d8e..45eaafa2 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1+2 + + - Update a dependency to the latest release. ## 0.2.1+1 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 391b329b..59aa28a5 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1+1 +version: 0.2.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.4 - langchain_community: 0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 + langchain_openai: ^0.7.1 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 01068b20..9add3205 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.1 + + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) ## 0.3.0 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index ebf10d32..de530389 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.3.0 +version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.2.2 json_path: ^0.7.4 - langchain_core: 0.3.4 + langchain_core: 0.3.5 math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -31,7 +31,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.11 - langchain_openai: ^0.7.0 + langchain_openai: ^0.7.1 objectbox_generator: ^4.0.1 test: ^1.25.8 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 9e750ed8..b7592ca0 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,4 +1,12 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.5 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) ## 0.3.4 diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 170363a7..d322abdc 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.4 +version: 0.3.5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index d7aedb1f..11f4c2ea 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1+1 + + - Update a dependency to the latest release. ## 0.2.1 diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index d63e1ccd..f63d336a 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.7.3 - langchain: 0.7.4 - langchain_firebase: 0.2.1 + langchain: 0.7.5 + langchain_firebase: 0.2.1+1 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index d838c00c..a64aae40 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -23,7 +23,7 @@ dependencies: firebase_auth: ^5.1.0 firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index fb087939..a4288382 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.6.2 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) ## 0.6.1 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index e3c502ea..a4897bd3 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.6.1 +version: 0.6.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index acae7e78..a60fe14b 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.3 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) ## 0.2.2 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 5d12387b..4a3583ed 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.2 +version: 0.2.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index e8f7ae0e..b9795885 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,4 +1,11 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.1 + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) ## 0.3.0 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 53d659d0..51c98cd9 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.3.0 +version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.2.0 + ollama_dart: ^0.2.1 uuid: ^4.4.2 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 5b2c1ed7..ab160770 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,4 +1,16 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.7.1 + + - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) + - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) + - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) ## 0.7.0 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 2e8e5ff6..bf0db409 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.7.0 +version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.4.0 + openai_dart: ^0.4.1 uuid: ^4.4.2 dev_dependencies: - langchain: ^0.7.4 - langchain_community: 0.3.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 test: ^1.25.8 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 8925fcd8..e277b4a0 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0+8 + + - Update a dependency to the latest release. ## 0.1.0+7 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index ffbd4c9a..8cbec927 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+7 +version: 0.1.0+8 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_openai: ^0.7.0 + langchain_openai: ^0.7.1 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index 3cd0af92..be6b7129 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.1+2 + + - Update a dependency to the latest release. ## 0.1.1+1 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index af7c072f..da03bb1c 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1+1 +version: 0.1.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 supabase: ^2.2.7 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.4 - langchain_community: 0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 + langchain_openai: ^0.7.1 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index 9a9234bb..ec5979cc 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.0.3+3 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index af9e377b..c8b93090 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1 + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) ## 0.2.0 diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 3f8d7f75..66bea8fc 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.2.0 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index d1fafe5f..789ec5a7 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,4 +1,13 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.4.1 + + - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) + - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) ## 0.4.0 diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 4126650f..91e131b4 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.4.0 +version: 0.4.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md index 9abf1cdf..74cd20f8 100644 --- a/packages/tavily_dart/CHANGELOG.md +++ b/packages/tavily_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0 diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 217a19e8..5c733127 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0+1 From ab9d98d110e302308f7edc46ae8c18b113c735de Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 22 Aug 2024 19:16:42 +0200 Subject: [PATCH 225/251] build: Update pubspec.lock files --- examples/browser_summarizer/pubspec.lock | 10 ++++----- examples/docs_examples/pubspec.lock | 22 +++++++++---------- examples/hello_world_backend/pubspec.lock | 8 +++---- examples/hello_world_cli/pubspec.lock | 8 +++---- examples/hello_world_flutter/pubspec.lock | 16 +++++++------- examples/wikivoyage_eu/pubspec.lock | 10 ++++----- packages/langchain/pubspec_overrides.yaml | 2 +- .../langchain_firebase/example/pubspec.lock | 6 ++--- packages/langchain_firebase/pubspec.lock | 2 +- 9 files changed, 42 insertions(+), 42 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index fe72f39c..6eada274 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index caa950ab..78752c5c 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -237,63 +237,63 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.1.1" + version: "0.1.1+1" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1+1" + version: "0.2.1+2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.1" + version: "0.6.2" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.2" + version: "0.2.3" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -347,14 +347,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.0" + version: "0.2.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index b2934b90..f7ad7603 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 40613637..94af9a94 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -103,21 +103,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -140,7 +140,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 02e61985..7eb4a4d8 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -196,42 +196,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.1" + version: "0.6.2" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.2" + version: "0.2.3" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -285,14 +285,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.0" + version: "0.2.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 2e50c6c0..df3386b8 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.0" + version: "0.2.1" path: dependency: transitive description: diff --git a/packages/langchain/pubspec_overrides.yaml b/packages/langchain/pubspec_overrides.yaml index 65792891..d9c6fc7e 100644 --- a/packages/langchain/pubspec_overrides.yaml +++ b/packages/langchain/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_community,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart +# melos_managed_dependency_overrides: langchain_community,langchain_core,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart dependency_overrides: langchain_community: path: ../langchain_community diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index eedcc6b5..5a3fa013 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.1" + version: "0.2.1+1" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 3f945900..de205b64 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" leak_tracker: dependency: transitive description: From 3f7cfdc26bd96fc4dfb65a448187c80e8ce7d2f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 09:39:54 +0200 Subject: [PATCH 226/251] build(deps): bump bluefireteam/melos-action (#541) Bumps [bluefireteam/melos-action](https://github.com/bluefireteam/melos-action) from 6085791af7036f6366c9a4b9d55105c0ef9c6388 to 7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52. - [Release notes](https://github.com/bluefireteam/melos-action/releases) - [Commits](https://github.com/bluefireteam/melos-action/compare/6085791af7036f6366c9a4b9d55105c0ef9c6388...7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52) --- updated-dependencies: - dependency-name: bluefireteam/melos-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 0e6c4e20..b77c2ed8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -38,7 +38,7 @@ jobs: run: flutter pub cache clean - name: Install Melos - uses: bluefireteam/melos-action@6085791af7036f6366c9a4b9d55105c0ef9c6388 + uses: bluefireteam/melos-action@7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52 with: run-bootstrap: false From a2007f8c95600f15a10c1519da9ae98bbe27b035 Mon Sep 17 00:00:00 2001 From: Ganesh Date: Mon, 26 Aug 2024 15:53:59 +0530 Subject: [PATCH 227/251] feat: Add retry support for Runnables (#540) Co-authored-by: David Miguel --- docs/_sidebar.md | 1 + docs/expression_language/primitives.md | 1 + docs/expression_language/primitives/retry.md | 94 ++++++++++ .../expression_language/primitives/retry.dart | 177 ++++++++++++++++++ packages/langchain/lib/src/utils/utils.dart | 6 +- .../lib/src/runnables/retry.dart | 63 +++++++ .../lib/src/runnables/runnable.dart | 35 +++- .../lib/src/runnables/runnables.dart | 1 + .../lib/src/utils/retry_client.dart | 92 +++++++++ .../langchain_core/lib/src/utils/utils.dart | 1 + .../test/runnables/retry_test.dart | 87 +++++++++ 11 files changed, 556 insertions(+), 2 deletions(-) create mode 100644 docs/expression_language/primitives/retry.md create mode 100644 examples/docs_examples/bin/expression_language/primitives/retry.dart create mode 100644 packages/langchain_core/lib/src/runnables/retry.dart create mode 100644 packages/langchain_core/lib/src/utils/retry_client.dart create mode 100644 packages/langchain_core/test/runnables/retry_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index d04533a7..ee2c472a 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -14,6 +14,7 @@ - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) + - [Retry: Retrying runnables](/expression_language/primitives/retry.md) - [Streaming](/expression_language/streaming.md) - [Fallbacks](/expression_language/fallbacks.md) - Cookbook diff --git a/docs/expression_language/primitives.md b/docs/expression_language/primitives.md index 89d618e4..aecaa93e 100644 --- a/docs/expression_language/primitives.md +++ b/docs/expression_language/primitives.md @@ -11,3 +11,4 @@ This section goes into greater depth on where and how some of these components a - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) +- [Retry: Retrying Runnable](/expression_language/primitives/retry.md) diff --git a/docs/expression_language/primitives/retry.md b/docs/expression_language/primitives/retry.md new file mode 100644 index 00000000..ef6ae6c9 --- /dev/null +++ b/docs/expression_language/primitives/retry.md @@ -0,0 +1,94 @@ +# RunnableRetry : Retrying Runnables + +`RunnableRetry` wraps a `Runnable` and retries it if it fails. It be created using `runnable.withRetry()`. + +By default, the runnable will be retried 3 times with exponential backoff strategy. + +## Usage + +## Creating a RunnableRetry + +```dart +final model = ChatOpenAI(); +final input = PromptValue.string('Explain why sky is blue in 2 lines'); + +final modelWithRetry = model.withRetry(); +final res = await modelWithRetry.invoke(input); +print(res); +``` + +## Retrying a chain + +`RunnableRetry` can be used to retry any `Runnable`, including a chain of `Runnable`s. + +Example + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'gpt-4o'), +); +final chain = promptTemplate.pipe(model).withRetry(); + +final res = await chain.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], +); +print(res); +``` + +> In general, it's best to keep the scope of the retry as small as possible. + +## Configuring the retry + +```dart +// passing a fake model to cause Exception +final input = PromptValue.string('Explain why sky is blue in 2 lines'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'fake-model'), +); +final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: true, +); +final res = await modelWithRetry.invoke(input); +print(res); +// retried 3 times and returned Exception: +// OpenAIClientException({ +// "uri": "https://api.openai.com/v1/chat/completions", +// "method": "POST", +// "code": 404, +// "message": "Unsuccessful response", +// "body": { +// "error": { +// "message": "The model `fake-model` does not exist or you do not have access to it.", +// "type": "invalid_request_error", +// "param": null, +// "code": "model_not_found" +// } +// } +// }) +``` + +## Passing delay durations + +If you want to use custom delay durations for each retry attempt, you can pass a list of `Duration` objects to the `delayDurations` parameter. + +```dart +final input = PromptValue.string('Explain why sky is blue in 2 lines'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'fake-model'), +); +final modelWithRetry = model.withRetry( + maxRetries: 3, + delayDurations: [ + Duration(seconds: 1), + Duration(seconds: 2), + Duration(seconds: 3), + ], +); +final res = await modelWithRetry.invoke(input); +print(res); +``` diff --git a/examples/docs_examples/bin/expression_language/primitives/retry.dart b/examples/docs_examples/bin/expression_language/primitives/retry.dart new file mode 100644 index 00000000..917ac501 --- /dev/null +++ b/examples/docs_examples/bin/expression_language/primitives/retry.dart @@ -0,0 +1,177 @@ +// ignore_for_file: avoid_print +import 'dart:io'; +import 'package:langchain/langchain.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _modelWithRetry(); + await _chainWithRetry(); + await _withRetryOptions(); + await _withDelayDurations(); +} + +Future _modelWithRetry() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final model = ChatOpenAI(apiKey: openaiApiKey); + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + + final modelWithRetry = model.withRetry(); + final res = await modelWithRetry.invoke(input); + print(res); + /* + ChatResult{ + id: chatcmpl-9zmFYnu19Pd6ss3zVFHlKN71DILtx, + output: AIChatMessage{ + content: The sky appears blue due to Rayleigh scattering, where shorter wavelengths of sunlight (blue light) are scattered more than longer wavelengths (red light) by the molecules in the Earth's atmosphere. This scattering effect is most prominent when the sun is high in the sky., + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-mini-2024-07-18, created: 1724510508, system_fingerprint: fp_48196bc67a}, + usage: LanguageModelUsage{ + promptTokens: 16, + promptBillableCharacters: null, + responseTokens: 52, + responseBillableCharacters: null, + totalTokens: 68} +, + streaming: false +} +*/ +} + +Future _chainWithRetry() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + final chain = promptTemplate.pipe(model).withRetry(); + + final res = await chain.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + print(res); + /* + [ChatResult{ + id: chatcmpl-9zmjiMfHP2WP3PhM6YXdoHXS02ZAm, + output: AIChatMessage{ + content: Sure, here's a bear-themed joke for you: + +Why did the bear refuse to play cards? + +Because he was afraid he might get spotted—he couldn’t bear the tension! 🐻♠️, + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_3aa7262c27}, + usage: LanguageModelUsage{ + promptTokens: 13, + promptBillableCharacters: null, + responseTokens: 41, + responseBillableCharacters: null, + totalTokens: 54} +, + streaming: false +}, ChatResult{ + id: chatcmpl-9zmji1gxCZ4yR3UtX7Af4TBrRhPP1, + output: AIChatMessage{ + content: Sure, here's one for you: + +Why did the cat sit on the computer? + +Because it wanted to keep an eye on the mouse! 🐱🖱️, + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_c9aa9c0491}, + usage: LanguageModelUsage{ + promptTokens: 13, + promptBillableCharacters: null, + responseTokens: 34, + responseBillableCharacters: null, + totalTokens: 47} +, + streaming: false +}] +*/ +} + +Future _withRetryOptions() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), + ); + final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: true, + ); + final res = await modelWithRetry.invoke(input); + print(res); + /* + retry attempt 0 with delay duration 0:00:01.082000 + retry attempt 1 with delay duration 0:00:02.073000 + retry attempt 2 with delay duration 0:00:04.074000 + Unhandled exception: + Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ + "uri": "https://api.openai.com/v1/chat/completions", + "method": "POST", + "code": 404, + "message": "Unsuccessful response", + "body": { + "error": { + "message": "The model `fake-model` does not exist or you do not have access to it.", + "type": "invalid_request_error", + "param": null, + "code": "model_not_found" + } + } +})*/ +} + +Future _withDelayDurations() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), + ); + final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: false, + delayDurations: const [ + Duration(seconds: 1), + Duration(seconds: 2), + Duration(seconds: 3), + ], + ); + final res = await modelWithRetry.invoke(input); + print(res); + // retried with delays provided in RetryOptions + /* +retry attempt 0 with delay duration 0:00:01.000000 +retry attempt 1 with delay duration 0:00:02.000000 +retry attempt 2 with delay duration 0:00:03.000000 +Unhandled exception: +Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ + "uri": "https://api.openai.com/v1/chat/completions", + "method": "POST", + "code": 401, + "message": "Unsuccessful response", + "body": { + "error": { + "message": "You didn't provide an API key. You need to provide your API key in an Authorization header using Bearer auth (i.e. Authorization: Bearer YOUR_KEY), or as the password field (with blank username) if you're accessing the API from your browser and are prompted for a username and password. You can obtain an API key from https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": null, + "code": null + } + } +}) +*/ +} diff --git a/packages/langchain/lib/src/utils/utils.dart b/packages/langchain/lib/src/utils/utils.dart index d41e35b9..28748719 100644 --- a/packages/langchain/lib/src/utils/utils.dart +++ b/packages/langchain/lib/src/utils/utils.dart @@ -1,2 +1,6 @@ export 'package:langchain_core/utils.dart' - show calculateSimilarity, cosineSimilarity, getIndexesMostSimilarEmbeddings; + show + RetryOptions, + calculateSimilarity, + cosineSimilarity, + getIndexesMostSimilarEmbeddings; diff --git a/packages/langchain_core/lib/src/runnables/retry.dart b/packages/langchain_core/lib/src/runnables/retry.dart new file mode 100644 index 00000000..e49c4d22 --- /dev/null +++ b/packages/langchain_core/lib/src/runnables/retry.dart @@ -0,0 +1,63 @@ +import 'dart:async'; +import '../utils/retry_client.dart'; +import 'runnables.dart'; + +/// {@template runnable_retry} +/// A [Runnable] that automatically retries the operation if it fails. +/// +/// You can create a [RunnableRetry] using [Runnable.withRetry], passing in the +/// [RetryOptions]. +/// +/// When [invoke] or [batch] is called on the runnable, if the initial attempt +/// fails, it will be retried according to the specified [RetryOptions]. +/// +/// Example usage: +/// ```dart +/// final model = ChatOpenAI(...); +/// final modelWithRetry = model.withRetry(maxRetries: 2); +/// final res = await modelWithRetry.invoke(...); +/// ``` +/// {@endtemplate} +class RunnableRetry + extends Runnable { + /// {@macro runnable_retry} + RunnableRetry({ + required this.runnable, + required super.defaultOptions, + required this.retryOptions, + }); + + /// Runnable that will be retried on error. + final Runnable runnable; + + /// Options to retry the runnable. + final RetryOptions retryOptions; + + @override + Future invoke( + RunInput input, { + RunnableOptions? options, + }) async { + return retryClient( + options: retryOptions, + fn: () => runnable.invoke( + input, + options: options, + ), + ); + } + + @override + Future> batch( + List inputs, { + List? options, + }) async { + return retryClient( + options: retryOptions, + fn: () => runnable.batch( + inputs, + options: options, + ), + ); + } +} diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 7c020a50..05f828ca 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -8,6 +8,7 @@ import 'input_map.dart'; import 'input_stream_map.dart'; import 'map.dart'; import 'passthrough.dart'; +import 'retry.dart'; import 'router.dart'; import 'sequence.dart'; import 'types.dart'; @@ -292,7 +293,7 @@ abstract class Runnable withFallbacks( List> fallbacks, ) { @@ -302,6 +303,38 @@ abstract class Runnable withRetry({ + final int maxRetries = 3, + final FutureOr Function(Object e)? retryIf, + final List? delayDurations, + final bool addJitter = false, + }) { + return RunnableRetry( + runnable: this, + defaultOptions: defaultOptions, + retryOptions: RetryOptions( + maxRetries: maxRetries, + retryIf: retryIf, + delayDurations: delayDurations, + addJitter: addJitter, + ), + ); + } + /// Returns the given [options] if they are compatible with the [Runnable], /// otherwise returns `null`. CallOptions? getCompatibleOptions( diff --git a/packages/langchain_core/lib/src/runnables/runnables.dart b/packages/langchain_core/lib/src/runnables/runnables.dart index 146761d7..3cbec552 100644 --- a/packages/langchain_core/lib/src/runnables/runnables.dart +++ b/packages/langchain_core/lib/src/runnables/runnables.dart @@ -5,6 +5,7 @@ export 'input_map.dart'; export 'input_stream_map.dart'; export 'map.dart'; export 'passthrough.dart'; +export 'retry.dart'; export 'router.dart'; export 'runnable.dart'; export 'runnable_ext.dart'; diff --git a/packages/langchain_core/lib/src/utils/retry_client.dart b/packages/langchain_core/lib/src/utils/retry_client.dart new file mode 100644 index 00000000..9cd15317 --- /dev/null +++ b/packages/langchain_core/lib/src/utils/retry_client.dart @@ -0,0 +1,92 @@ +import 'dart:async'; +import 'dart:math'; + +/// {@template retry_options} +/// Options to pass into [retryClient] to control the retry behavior. +/// {@endtemplate} +class RetryOptions { + /// {@macro retry_options} + RetryOptions({ + required this.maxRetries, + required this.addJitter, + this.retryIf, + this.delayDurations, + }); + + /// The maximum number of attempts to retry. + final int maxRetries; + + /// An evaluator function that can be used to decide if the function should + /// be retried based on the exception it throws. + /// + /// If you decide not to retry on a particular exception, [retryIf] can return + /// `false` and the retry won't happen. By default [retryIf] is `true` and + /// all exceptions are retried. + final FutureOr Function(Object e)? retryIf; + + /// The function will be retried based on an exponential backoff strategy + /// with a base delay of 1 second. + /// + /// But you can override this behavior by providing an optional list of + /// [delayDurations]`. Each entry in the list corresponds to a specific + /// retry attempt, and the corresponding delay from the list will be used + /// instead of the default exponential delay. + /// + /// For example, if you provide a list of `[2, 4, 8]`, the delays between the + /// first three retries will be 2, 4, and 8 seconds, respectively. + final List? delayDurations; + + /// Whether to add jitter to the exponential backoff. + /// + /// Jitter is a random value added to the delay to prevent multiple clients + /// from retrying at the same time. + final bool addJitter; +} + +/// A client that handles retry logic for a given function. +/// +/// This client takes [RetryOptions] and a function to execute. If the +/// function fails, it will be retried according to the specified options. +/// If it succeeds, the result of the function will be returned. +FutureOr retryClient({ + required RetryOptions options, + required FutureOr Function() fn, +}) async { + const defaultDelay = Duration(seconds: 1); + + for (int attempt = 0; attempt < options.maxRetries; attempt++) { + try { + return await fn(); + } catch (e) { + final isLastAttempt = attempt == options.maxRetries - 1; + final shouldRetry = await options.retryIf?.call(e) ?? true; + + if (isLastAttempt || !shouldRetry) { + rethrow; + } + + final duration = + options.delayDurations?[attempt] ?? defaultDelay * pow(2, attempt); + await _delay(duration, attempt, options.addJitter); + } + } + + // This line should never be reached + throw StateError('Exhausted all retry attempts'); +} + +Future _delay( + final Duration duration, + final int attempt, + final bool addJitter, +) async { + final Duration delay; + if (addJitter) { + final random = Random(); + final jitter = random.nextInt(100); + delay = Duration(milliseconds: duration.inMilliseconds + jitter); + } else { + delay = duration; + } + await Future.delayed(delay); +} diff --git a/packages/langchain_core/lib/src/utils/utils.dart b/packages/langchain_core/lib/src/utils/utils.dart index d439ed98..57924640 100644 --- a/packages/langchain_core/lib/src/utils/utils.dart +++ b/packages/langchain_core/lib/src/utils/utils.dart @@ -1,3 +1,4 @@ export 'chunk.dart'; export 'reduce.dart'; +export 'retry_client.dart'; export 'similarity.dart'; diff --git a/packages/langchain_core/test/runnables/retry_test.dart b/packages/langchain_core/test/runnables/retry_test.dart new file mode 100644 index 00000000..f1e8f625 --- /dev/null +++ b/packages/langchain_core/test/runnables/retry_test.dart @@ -0,0 +1,87 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/runnables.dart'; +import 'package:test/test.dart'; + +void main() { + group('Runnable Retry Test', () { + late FakeEchoChatModel model; + final input = PromptValue.string('why is the sky blue'); + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + + setUp(() { + model = const FakeEchoChatModel(); + }); + + test('Runnable retry should return output for invoke', () async { + final modelWithRetry = model.withRetry(maxRetries: 2); + final res = await modelWithRetry.invoke(input); + expect(res.output.content, 'why is the sky blue'); + }); + + test('Runnable retry should return output for batch', () async { + final chain = promptTemplate.pipe(model); + final chainWithRetry = chain.withRetry(); + final res = await chainWithRetry.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + expect(res[0].output.content, 'tell me a joke about bears'); + expect(res[1].output.content, 'tell me a joke about cats'); + }); + + test('Should retry based RetryOptions, maxRetries = 2', () async { + final modelWithRetry = model.withRetry(maxRetries: 2); + expect( + () async => modelWithRetry.invoke( + input, + options: const FakeEchoChatModelOptions(throwRandomError: true), + ), + throwsException, + ); + }); + + test('Should return the output after successful retry', () async { + int count = 0; + final modelWithRetry = model.pipe( + Runnable.fromFunction( + invoke: (input, opt) { + if (count++ < 1) { + throw Exception('Random error'); + } + return input; + }, + ), + ).withRetry(maxRetries: 2); + final res = await modelWithRetry.invoke(input); + expect(res.outputAsString, input.toString()); + expect(count, 2); + }); + + test('Should not retry if retryIf returned false', () async { + late String error; + final modelWithRetry = model.withRetry( + maxRetries: 3, + retryIf: (e) { + if (e.toString() == 'Exception: Random error') { + return false; + } else { + return true; + } + }, + ); + try { + await modelWithRetry.invoke( + input, + options: const FakeEchoChatModelOptions(throwRandomError: true), + ); + } catch (e) { + error = e.toString(); + } + expect(error, 'Exception: Random error'); + }); + }); +} From 274c7cb4e02d9c228f224c96b1d15c86244c8a9c Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 29 Aug 2024 00:06:31 +0200 Subject: [PATCH 228/251] feat: Option to include file search results in assistants API (#543) --- packages/openai_dart/README.md | 134 +- packages/openai_dart/lib/openai_dart.dart | 2 +- .../openai_dart/lib/src/generated/client.dart | 16 + .../schema/assistant_stream_event.dart | 4 +- .../src/generated/schema/assistant_tools.dart | 18 +- .../generated/schema/file_search_ranker.dart | 17 + .../schema/file_search_ranking_options.dart | 62 + .../schema/run_step_details_tool_calls.dart | 5 +- ...n_step_details_tool_calls_file_search.dart | 48 + ...ls_file_search_ranking_options_object.dart | 56 + ...tool_calls_file_search_result_content.dart | 46 + ..._tool_calls_file_search_result_object.dart | 71 + .../lib/src/generated/schema/schema.dart | 6 + .../src/generated/schema/schema.freezed.dart | 1415 ++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 141 +- packages/openai_dart/oas/openapi_curated.yaml | 117 +- .../openai_dart/oas/openapi_official.yaml | 140 +- packages/openai_dart/pubspec.yaml | 2 +- 18 files changed, 2171 insertions(+), 129 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index df9cc58b..12f5b51f 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -20,7 +20,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen **Supported endpoints:** -- Chat (with tools and streaming support) +- Chat (with structured outputs, tools and streaming support) - Completions (legacy) - Embeddings - Fine-tuning @@ -28,7 +28,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Images - Models - Moderations -- Assistants v2 (with tools and streaming support) `beta` +- Assistants v2 (with structured outputs, tools and streaming support) `beta` * Threads * Messages * Runs @@ -97,14 +97,14 @@ final client = OpenAIClient( Given a list of messages comprising a conversation, the model will return a response. -Related guide: [Chat Completions](https://platform.openai.com/docs/guides/text-generation) +Related guide: [Chat Completions](https://platform.openai.com/docs/guides/chat-completions) **Create chat completion:** ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4'), + model: ChatCompletionModel.modelId('gpt-4o'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -121,28 +121,28 @@ print(res.choices.first.message.content); ``` `ChatCompletionModel` is a sealed class that offers two ways to specify the model: -- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4'` or your fine-tuned model ID). -- `ChatCompletionModel.model(ChatCompletionModels.gpt4)`: a value from `ChatCompletionModels` enum which lists all of the available models. +- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4o'` or your fine-tuned model ID). +- `ChatCompletionModel.model(ChatCompletionModels.gpt4o)`: a value from `ChatCompletionModels` enum which lists all of the available models. `ChatCompletionMessage` is a sealed class that supports the following message types: - `ChatCompletionMessage.system()`: a system message. - `ChatCompletionMessage.user()`: a user message. - `ChatCompletionMessage.assistant()`: an assistant message. - `ChatCompletionMessage.tool()`: a tool message. -- `ChatCompletionMessage.function()`: a function message. +- `ChatCompletionMessage.function()`: a function message (deprecated in favor of tools). `ChatCompletionMessage.user()` takes a `ChatCompletionUserMessageContent` object that supports the following content types: - `ChatCompletionUserMessageContent.string('content')`: string content. - `ChatCompletionUserMessageContent.parts([...])`: multi-modal content (check the 'Multi-modal prompt' section below). * `ChatCompletionMessageContentPart.text('content')`: text content. - * `ChatCompletionMessageContentPart.image(imageUrl: ...)`: image content. + * `ChatCompletionMessageContentPart.image(...)`: image content (URL or base64-encoded image). **Stream chat completion:** ```dart final stream = client.createChatCompletionStream( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4-turbo'), + model: ChatCompletionModel.modelId('gpt-4o'), messages: [ ChatCompletionMessage.system( content: @@ -167,6 +167,8 @@ await for (final res in stream) { **Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision)) +You can either provide the image URL: + ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( @@ -198,37 +200,31 @@ print(res.choices.first.message.content); // The fruit in the image is an apple. ``` -**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) - +Or provide the base64-encoded image: ```dart -final res = await client.createChatCompletion( - request: CreateChatCompletionRequest( - model: ChatCompletionModel.model( - ChatCompletionModels.gpt41106Preview, - ), - messages: [ - ChatCompletionMessage.system( - content: - 'You are a helpful assistant. That extracts names from text ' - 'and returns them in a JSON array.', +//... +ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.parts( + [ + ChatCompletionMessageContentPart.text( + text: 'What fruit is this?', ), - ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'John, Mary, and Peter.', + ChatCompletionMessageContentPart.image( + imageUrl: ChatCompletionMessageImageUrl( + url: '/9j/4AAQSkZJRgABAQAAAQABAAD/2wB...P3s/XHQ8cE/nmiupbL0+fz/r/MjnSbsr69/Rdu1j//2Q==', + detail: ChatCompletionMessageImageDetail.high, ), ), ], - temperature: 0, - responseFormat: ChatCompletionResponseFormat( - type: ChatCompletionResponseFormatType.jsonObject, - ), ), -); -// { "names": ["John", "Mary", "Peter"] } +), +//... ``` **Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))** +Structured Outputs is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema. + ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( @@ -237,8 +233,7 @@ final res = await client.createChatCompletion( ), messages: [ ChatCompletionMessage.system( - content: - 'You are a helpful assistant. That extracts names from text.', + content: 'You are a helpful assistant. That extracts names from text.', ), ChatCompletionMessage.user( content: ChatCompletionUserMessageContent.string( @@ -272,8 +267,41 @@ final res = await client.createChatCompletion( // {"names":["John","Mary","Peter"]} ``` +**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It us recommended to use Structured Outputs if it is supported for your use case. + +```dart +final res = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt41106Preview, + ), + messages: [ + ChatCompletionMessage.system( + content: + 'You are a helpful assistant. That extracts names from text ' + 'and returns them in a JSON array.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ChatCompletionResponseFormat( + type: ChatCompletionResponseFormatType.jsonObject, + ), + ), +); +// { "names": ["John", "Mary", "Peter"] } +``` + **Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling)) +Tool calling allows you to connect models to external tools and systems. + ```dart const function = FunctionObject( name: 'get_current_weather', @@ -301,7 +329,7 @@ const tool = ChatCompletionTool( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: const ChatCompletionModel.model( + model: ChatCompletionModel.model( ChatCompletionModels.gpt4oMini, ), messages: [ @@ -353,6 +381,8 @@ final answer = res2.choices.first.message.content; // The weather in Boston right now is sunny with a temperature of 22°C ``` +You can enable Structured Outputs for your tools by setting `strict: true` in your `FunctionObject` definition. Structured Outputs ensures that the arguments generated by the model for a tool call exactly match the JSON Schema you provided in the tool definition. + **Function calling:** (deprecated in favor of tools) ```dart @@ -813,7 +843,7 @@ final res = await client.createThreadMessage( ), MessageContent.imageUrl( imageUrl: MessageContentImageUrl( - url: 'https://example.com/image.jpg', + url: 'https://example.com/image.jpg', // or base64-encoded image ), ), ]), @@ -867,6 +897,42 @@ final res = await client.createThreadRun( ); ``` +You can also use Structured Outputs to ensure that the model-generated responses adhere to a specific JSON schema: + +```dart + +final res = await client.createThreadRun( + threadId: threadId, + request: CreateRunRequest( + assistantId: assistantId, + instructions: 'You are a helpful assistant that extracts names from text.', + model: CreateRunRequestModel.modelId('gpt-4o'), + responseFormat: CreateRunRequestResponseFormat.responseFormat( + ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), + ) + ) + ), +); +``` + **Create run: (streaming)** ```dart diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart index 7600ced2..57003125 100644 --- a/packages/openai_dart/lib/openai_dart.dart +++ b/packages/openai_dart/lib/openai_dart.dart @@ -1,4 +1,4 @@ -/// Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. +/// Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. library; export 'src/client.dart'; diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index 828b26be..b58d7e15 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -1175,11 +1175,14 @@ class OpenAIClient { /// /// `threadId`: The ID of the thread to run. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `request`: Request object for the Create run endpoint. /// /// `POST` `https://api.openai.com/v1/threads/{thread_id}/runs` Future createThreadRun({ required String threadId, + String? include, required CreateRunRequest request, }) async { final r = await makeRequest( @@ -1190,6 +1193,9 @@ class OpenAIClient { requestType: 'application/json', responseType: 'application/json', body: request, + queryParams: { + if (include != null) 'include': include, + }, ); return RunObject.fromJson(_jsonDecode(r)); } @@ -1324,6 +1330,8 @@ class OpenAIClient { /// /// `before`: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps` Future listThreadRunSteps({ required String threadId, @@ -1332,6 +1340,7 @@ class OpenAIClient { String order = 'desc', String? after, String? before, + String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1345,6 +1354,7 @@ class OpenAIClient { 'order': order, if (after != null) 'after': after, if (before != null) 'before': before, + if (include != null) 'include': include, }, ); return ListRunStepsResponse.fromJson(_jsonDecode(r)); @@ -1362,11 +1372,14 @@ class OpenAIClient { /// /// `stepId`: The ID of the run step to retrieve. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps/{step_id}` Future getThreadRunStep({ required String threadId, required String runId, required String stepId, + String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1375,6 +1388,9 @@ class OpenAIClient { isMultipart: false, requestType: '', responseType: 'application/json', + queryParams: { + if (include != null) 'include': include, + }, ); return RunStepObject.fromJson(_jsonDecode(r)); } diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart index 348155db..0686da7b 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart @@ -61,7 +61,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. const factory AssistantStreamEvent.runStepStreamEvent({ /// The type of the event. required EventType event, @@ -74,7 +74,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamDeltaEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. const factory AssistantStreamEvent.runStepStreamDeltaEvent({ /// The type of the event. required EventType event, diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index e36cd8e6..920d2301 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -83,9 +83,17 @@ class AssistantToolsFileSearchFileSearch /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, + + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions, }) = _AssistantToolsFileSearchFileSearch; /// Object construction from a JSON representation @@ -94,7 +102,10 @@ class AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson(json); /// List of all property names of schema - static const List propertyNames = ['max_num_results']; + static const List propertyNames = [ + 'max_num_results', + 'ranking_options' + ]; /// Validation constants static const maxNumResultsMinValue = 1; @@ -115,6 +126,7 @@ class AssistantToolsFileSearchFileSearch Map toMap() { return { 'max_num_results': maxNumResults, + 'ranking_options': rankingOptions, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart new file mode 100644 index 00000000..6dfc6218 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart @@ -0,0 +1,17 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: FileSearchRanker +// ========================================== + +/// The ranker to use for the file search. If not specified will use the `auto` ranker. +enum FileSearchRanker { + @JsonValue('auto') + auto, + @JsonValue('default_2024_08_21') + default20240821, +} diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart new file mode 100644 index 00000000..e60070f0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FileSearchRankingOptions +// ========================================== + +/// The ranking options for the file search. +/// +/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) +/// for more information. +@freezed +class FileSearchRankingOptions with _$FileSearchRankingOptions { + const FileSearchRankingOptions._(); + + /// Factory constructor for FileSearchRankingOptions + const factory FileSearchRankingOptions({ + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + FileSearchRanker? ranker, + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? scoreThreshold, + }) = _FileSearchRankingOptions; + + /// Object construction from a JSON representation + factory FileSearchRankingOptions.fromJson(Map json) => + _$FileSearchRankingOptionsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranker', 'score_threshold']; + + /// Validation constants + static const scoreThresholdMinValue = 0.0; + static const scoreThresholdMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (scoreThreshold != null && scoreThreshold! < scoreThresholdMinValue) { + return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; + } + if (scoreThreshold != null && scoreThreshold! > scoreThresholdMaxValue) { + return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranker': ranker, + 'score_threshold': scoreThreshold, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart index c4605b7b..327de9f5 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart @@ -42,8 +42,9 @@ sealed class RunStepDetailsToolCalls with _$RunStepDetailsToolCalls { /// The type of tool call. This is always going to be `file_search` for this type of tool call. required String type, - /// For now, this is always going to be an empty object. - @JsonKey(name: 'file_search') required Map fileSearch, + /// The definition of the file search that was called. + @JsonKey(name: 'file_search') + required RunStepDetailsToolCallsFileSearch fileSearch, }) = RunStepDetailsToolCallsFileSearchObject; // ------------------------------------------ diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart new file mode 100644 index 00000000..16f72322 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearch +// ========================================== + +/// The definition of the file search that was called. +@freezed +class RunStepDetailsToolCallsFileSearch + with _$RunStepDetailsToolCallsFileSearch { + const RunStepDetailsToolCallsFileSearch._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearch + const factory RunStepDetailsToolCallsFileSearch({ + /// The ranking options for the file search. + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + + /// The results of the file search. + @JsonKey(includeIfNull: false) + List? results, + }) = _RunStepDetailsToolCallsFileSearch; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearch.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranking_options', 'results']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranking_options': rankingOptions, + 'results': results, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart new file mode 100644 index 00000000..61b2ff06 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchRankingOptionsObject +// ========================================== + +/// The ranking options for the file search. +@freezed +class RunStepDetailsToolCallsFileSearchRankingOptionsObject + with _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const RunStepDetailsToolCallsFileSearchRankingOptionsObject._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchRankingOptionsObject + const factory RunStepDetailsToolCallsFileSearchRankingOptionsObject({ + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + required FileSearchRanker ranker, + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') required double scoreThreshold, + }) = _RunStepDetailsToolCallsFileSearchRankingOptionsObject; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranker', 'score_threshold']; + + /// Validation constants + static const scoreThresholdMinValue = 0.0; + static const scoreThresholdMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (scoreThreshold < scoreThresholdMinValue) { + return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; + } + if (scoreThreshold > scoreThresholdMaxValue) { + return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranker': ranker, + 'score_threshold': scoreThreshold, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart new file mode 100644 index 00000000..3ba23a07 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart @@ -0,0 +1,46 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchResultContent +// ========================================== + +/// The content of the result that was found. +@freezed +class RunStepDetailsToolCallsFileSearchResultContent + with _$RunStepDetailsToolCallsFileSearchResultContent { + const RunStepDetailsToolCallsFileSearchResultContent._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchResultContent + const factory RunStepDetailsToolCallsFileSearchResultContent({ + /// The type of the content. + @Default('text') String type, + + /// The text content of the file. + @JsonKey(includeIfNull: false) String? text, + }) = _RunStepDetailsToolCallsFileSearchResultContent; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchResultContent.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultContentFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'text']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'text': text, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart new file mode 100644 index 00000000..4b1a1de0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart @@ -0,0 +1,71 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchResultObject +// ========================================== + +/// A result instance of the file search. +@freezed +class RunStepDetailsToolCallsFileSearchResultObject + with _$RunStepDetailsToolCallsFileSearchResultObject { + const RunStepDetailsToolCallsFileSearchResultObject._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchResultObject + const factory RunStepDetailsToolCallsFileSearchResultObject({ + /// The ID of the file that result was found in. + @JsonKey(name: 'file_id') required String fileId, + + /// The name of the file that result was found in. + @JsonKey(name: 'file_name') required String fileName, + + /// The score of the result. All values must be a floating point number between 0 and 1. + required double score, + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @JsonKey(includeIfNull: false) + List? content, + }) = _RunStepDetailsToolCallsFileSearchResultObject; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchResultObject.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'file_id', + 'file_name', + 'score', + 'content' + ]; + + /// Validation constants + static const scoreMinValue = 0.0; + static const scoreMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (score < scoreMinValue) { + return "The value of 'score' cannot be < $scoreMinValue"; + } + if (score > scoreMaxValue) { + return "The value of 'score' cannot be > $scoreMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'file_id': fileId, + 'file_name': fileName, + 'score': score, + 'content': content, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 028e108f..a48b094d 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -73,6 +73,8 @@ part 'create_assistant_request.dart'; part 'modify_assistant_request.dart'; part 'delete_assistant_response.dart'; part 'list_assistants_response.dart'; +part 'file_search_ranking_options.dart'; +part 'file_search_ranker.dart'; part 'assistants_named_tool_choice.dart'; part 'assistants_function_call_option.dart'; part 'truncation_object.dart'; @@ -119,6 +121,10 @@ part 'run_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_delta_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_details_tool_calls_code_output_image.dart'; part 'run_step_delta_step_details_tool_calls_code_output_image.dart'; +part 'run_step_details_tool_calls_file_search.dart'; +part 'run_step_details_tool_calls_file_search_ranking_options_object.dart'; +part 'run_step_details_tool_calls_file_search_result_object.dart'; +part 'run_step_details_tool_calls_file_search_result_content.dart'; part 'run_step_completion_usage.dart'; part 'vector_store_expiration_after.dart'; part 'vector_store_object.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 76274966..5753970f 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -27762,6 +27762,222 @@ abstract class _ListAssistantsResponse extends ListAssistantsResponse { get copyWith => throw _privateConstructorUsedError; } +FileSearchRankingOptions _$FileSearchRankingOptionsFromJson( + Map json) { + return _FileSearchRankingOptions.fromJson(json); +} + +/// @nodoc +mixin _$FileSearchRankingOptions { + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? get ranker => throw _privateConstructorUsedError; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? get scoreThreshold => throw _privateConstructorUsedError; + + /// Serializes this FileSearchRankingOptions to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $FileSearchRankingOptionsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FileSearchRankingOptionsCopyWith<$Res> { + factory $FileSearchRankingOptionsCopyWith(FileSearchRankingOptions value, + $Res Function(FileSearchRankingOptions) then) = + _$FileSearchRankingOptionsCopyWithImpl<$Res, FileSearchRankingOptions>; + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? scoreThreshold}); +} + +/// @nodoc +class _$FileSearchRankingOptionsCopyWithImpl<$Res, + $Val extends FileSearchRankingOptions> + implements $FileSearchRankingOptionsCopyWith<$Res> { + _$FileSearchRankingOptionsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = freezed, + Object? scoreThreshold = freezed, + }) { + return _then(_value.copyWith( + ranker: freezed == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker?, + scoreThreshold: freezed == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$FileSearchRankingOptionsImplCopyWith<$Res> + implements $FileSearchRankingOptionsCopyWith<$Res> { + factory _$$FileSearchRankingOptionsImplCopyWith( + _$FileSearchRankingOptionsImpl value, + $Res Function(_$FileSearchRankingOptionsImpl) then) = + __$$FileSearchRankingOptionsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? scoreThreshold}); +} + +/// @nodoc +class __$$FileSearchRankingOptionsImplCopyWithImpl<$Res> + extends _$FileSearchRankingOptionsCopyWithImpl<$Res, + _$FileSearchRankingOptionsImpl> + implements _$$FileSearchRankingOptionsImplCopyWith<$Res> { + __$$FileSearchRankingOptionsImplCopyWithImpl( + _$FileSearchRankingOptionsImpl _value, + $Res Function(_$FileSearchRankingOptionsImpl) _then) + : super(_value, _then); + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = freezed, + Object? scoreThreshold = freezed, + }) { + return _then(_$FileSearchRankingOptionsImpl( + ranker: freezed == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker?, + scoreThreshold: freezed == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { + const _$FileSearchRankingOptionsImpl( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + this.scoreThreshold}) + : super._(); + + factory _$FileSearchRankingOptionsImpl.fromJson(Map json) => + _$$FileSearchRankingOptionsImplFromJson(json); + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final FileSearchRanker? ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold', includeIfNull: false) + final double? scoreThreshold; + + @override + String toString() { + return 'FileSearchRankingOptions(ranker: $ranker, scoreThreshold: $scoreThreshold)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FileSearchRankingOptionsImpl && + (identical(other.ranker, ranker) || other.ranker == ranker) && + (identical(other.scoreThreshold, scoreThreshold) || + other.scoreThreshold == scoreThreshold)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> + get copyWith => __$$FileSearchRankingOptionsImplCopyWithImpl< + _$FileSearchRankingOptionsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FileSearchRankingOptionsImplToJson( + this, + ); + } +} + +abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { + const factory _FileSearchRankingOptions( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + final double? scoreThreshold}) = _$FileSearchRankingOptionsImpl; + const _FileSearchRankingOptions._() : super._(); + + factory _FileSearchRankingOptions.fromJson(Map json) = + _$FileSearchRankingOptionsImpl.fromJson; + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? get ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? get scoreThreshold; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantsNamedToolChoice _$AssistantsNamedToolChoiceFromJson( Map json) { return _AssistantsNamedToolChoice.fromJson(json); @@ -46412,6 +46628,975 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeOutputImage get copyWith => throw _privateConstructorUsedError; } +RunStepDetailsToolCallsFileSearch _$RunStepDetailsToolCallsFileSearchFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearch.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearch { + /// The ranking options for the file search. + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions => + throw _privateConstructorUsedError; + + /// The results of the file search. + @JsonKey(includeIfNull: false) + List? get results => + throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearch to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchCopyWith( + RunStepDetailsToolCallsFileSearch value, + $Res Function(RunStepDetailsToolCallsFileSearch) then) = + _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearch>; + @useResult + $Res call( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + @JsonKey(includeIfNull: false) + List? results}); + + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions; +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearch> + implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? rankingOptions = freezed, + Object? results = freezed, + }) { + return _then(_value.copyWith( + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, + results: freezed == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions { + if (_value.rankingOptions == null) { + return null; + } + + return $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>( + _value.rankingOptions!, (value) { + return _then(_value.copyWith(rankingOptions: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> + implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchImplCopyWith( + _$RunStepDetailsToolCallsFileSearchImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) then) = + __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + @JsonKey(includeIfNull: false) + List? results}); + + @override + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions; +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchImpl> + implements _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? rankingOptions = freezed, + Object? results = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchImpl( + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, + results: freezed == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchImpl + extends _RunStepDetailsToolCallsFileSearch { + const _$RunStepDetailsToolCallsFileSearchImpl( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + this.rankingOptions, + @JsonKey(includeIfNull: false) + final List? results}) + : _results = results, + super._(); + + factory _$RunStepDetailsToolCallsFileSearchImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchImplFromJson(json); + + /// The ranking options for the file search. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + final RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions; + + /// The results of the file search. + final List? _results; + + /// The results of the file search. + @override + @JsonKey(includeIfNull: false) + List? get results { + final value = _results; + if (value == null) return null; + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearch(rankingOptions: $rankingOptions, results: $results)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsFileSearchImpl && + (identical(other.rankingOptions, rankingOptions) || + other.rankingOptions == rankingOptions) && + const DeepCollectionEquality().equals(other._results, _results)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, rankingOptions, + const DeepCollectionEquality().hash(_results)); + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchImplCopyWith< + _$RunStepDetailsToolCallsFileSearchImpl> + get copyWith => __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchImpl>(this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearch + extends RunStepDetailsToolCallsFileSearch { + const factory _RunStepDetailsToolCallsFileSearch( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + final RunStepDetailsToolCallsFileSearchRankingOptionsObject? + rankingOptions, + @JsonKey(includeIfNull: false) + final List? results}) = + _$RunStepDetailsToolCallsFileSearchImpl; + const _RunStepDetailsToolCallsFileSearch._() : super._(); + + factory _RunStepDetailsToolCallsFileSearch.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchImpl.fromJson; + + /// The ranking options for the file search. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions; + + /// The results of the file search. + @override + @JsonKey(includeIfNull: false) + List? get results; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchImplCopyWith< + _$RunStepDetailsToolCallsFileSearchImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFileSearchRankingOptionsObject + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + FileSearchRanker get ranker => throw _privateConstructorUsedError; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') + double get scoreThreshold => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearchRankingOptionsObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< + RunStepDetailsToolCallsFileSearchRankingOptionsObject> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< + $Res> { + factory $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith( + RunStepDetailsToolCallsFileSearchRankingOptionsObject value, + $Res Function(RunStepDetailsToolCallsFileSearchRankingOptionsObject) + then) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchRankingOptionsObject>; + @useResult + $Res call( + {FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchRankingOptionsObject> + implements + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = null, + Object? scoreThreshold = null, + }) { + return _then(_value.copyWith( + ranker: null == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + $Res> + implements + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl value, + $Res Function( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + $Res>; + @override + @useResult + $Res call( + {FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + $Res> + extends _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl< + $Res, _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + implements + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + $Res> { + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) + _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = null, + Object? scoreThreshold = null, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + ranker: null == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl + extends _RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + {required this.ranker, + @JsonKey(name: 'score_threshold') required this.scoreThreshold}) + : super._(); + + factory _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( + json); + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + final FileSearchRanker ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold') + final double scoreThreshold; + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearchRankingOptionsObject(ranker: $ranker, scoreThreshold: $scoreThreshold)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other + is _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl && + (identical(other.ranker, ranker) || other.ranker == ranker) && + (identical(other.scoreThreshold, scoreThreshold) || + other.scoreThreshold == scoreThreshold)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearchRankingOptionsObject + extends RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject( + {required final FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') + required final double scoreThreshold}) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl; + const _RunStepDetailsToolCallsFileSearchRankingOptionsObject._() : super._(); + + factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson; + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + FileSearchRanker get ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold') + double get scoreThreshold; + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFileSearchResultObject + _$RunStepDetailsToolCallsFileSearchResultObjectFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchResultObject.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearchResultObject { + /// The ID of the file that result was found in. + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; + + /// The name of the file that result was found in. + @JsonKey(name: 'file_name') + String get fileName => throw _privateConstructorUsedError; + + /// The score of the result. All values must be a floating point number between 0 and 1. + double get score => throw _privateConstructorUsedError; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @JsonKey(includeIfNull: false) + List? get content => + throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearchResultObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchResultObjectCopyWith< + RunStepDetailsToolCallsFileSearchResultObject> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchResultObjectCopyWith( + RunStepDetailsToolCallsFileSearchResultObject value, + $Res Function(RunStepDetailsToolCallsFileSearchResultObject) then) = + _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchResultObject>; + @useResult + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'file_name') String fileName, + double score, + @JsonKey(includeIfNull: false) + List? content}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchResultObject> + implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileId = null, + Object? fileName = null, + Object? score = null, + Object? content = freezed, + }) { + return _then(_value.copyWith( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + fileName: null == fileName + ? _value.fileName + : fileName // ignore: cast_nullable_to_non_nullable + as String, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + $Res> + implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'file_name') String fileName, + double score, + @JsonKey(includeIfNull: false) + List? content}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + implements + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileId = null, + Object? fileName = null, + Object? score = null, + Object? content = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchResultObjectImpl( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + fileName: null == fileName + ? _value.fileName + : fileName // ignore: cast_nullable_to_non_nullable + as String, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + content: freezed == content + ? _value._content + : content // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchResultObjectImpl + extends _RunStepDetailsToolCallsFileSearchResultObject { + const _$RunStepDetailsToolCallsFileSearchResultObjectImpl( + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(name: 'file_name') required this.fileName, + required this.score, + @JsonKey(includeIfNull: false) + final List? content}) + : _content = content, + super._(); + + factory _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson(json); + + /// The ID of the file that result was found in. + @override + @JsonKey(name: 'file_id') + final String fileId; + + /// The name of the file that result was found in. + @override + @JsonKey(name: 'file_name') + final String fileName; + + /// The score of the result. All values must be a floating point number between 0 and 1. + @override + final double score; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + final List? _content; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @override + @JsonKey(includeIfNull: false) + List? get content { + final value = _content; + if (value == null) return null; + if (_content is EqualUnmodifiableListView) return _content; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearchResultObject(fileId: $fileId, fileName: $fileName, score: $score, content: $content)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsFileSearchResultObjectImpl && + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.fileName, fileName) || + other.fileName == fileName) && + (identical(other.score, score) || other.score == score) && + const DeepCollectionEquality().equals(other._content, _content)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, fileId, fileName, score, + const DeepCollectionEquality().hash(_content)); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearchResultObject + extends RunStepDetailsToolCallsFileSearchResultObject { + const factory _RunStepDetailsToolCallsFileSearchResultObject( + {@JsonKey(name: 'file_id') required final String fileId, + @JsonKey(name: 'file_name') required final String fileName, + required final double score, + @JsonKey(includeIfNull: false) + final List? + content}) = _$RunStepDetailsToolCallsFileSearchResultObjectImpl; + const _RunStepDetailsToolCallsFileSearchResultObject._() : super._(); + + factory _RunStepDetailsToolCallsFileSearchResultObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson; + + /// The ID of the file that result was found in. + @override + @JsonKey(name: 'file_id') + String get fileId; + + /// The name of the file that result was found in. + @override + @JsonKey(name: 'file_name') + String get fileName; + + /// The score of the result. All values must be a floating point number between 0 and 1. + @override + double get score; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @override + @JsonKey(includeIfNull: false) + List? get content; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFileSearchResultContent + _$RunStepDetailsToolCallsFileSearchResultContentFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchResultContent.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearchResultContent { + /// The type of the content. + String get type => throw _privateConstructorUsedError; + + /// The text content of the file. + @JsonKey(includeIfNull: false) + String? get text => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearchResultContent to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchResultContentCopyWith< + RunStepDetailsToolCallsFileSearchResultContent> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchResultContentCopyWith( + RunStepDetailsToolCallsFileSearchResultContent value, + $Res Function(RunStepDetailsToolCallsFileSearchResultContent) then) = + _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchResultContent>; + @useResult + $Res call({String type, @JsonKey(includeIfNull: false) String? text}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchResultContent> + implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? text = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + $Res> + implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith( + _$RunStepDetailsToolCallsFileSearchResultContentImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, @JsonKey(includeIfNull: false) String? text}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + implements + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchResultContentImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? text = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchResultContentImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchResultContentImpl + extends _RunStepDetailsToolCallsFileSearchResultContent { + const _$RunStepDetailsToolCallsFileSearchResultContentImpl( + {this.type = 'text', @JsonKey(includeIfNull: false) this.text}) + : super._(); + + factory _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson(json); + + /// The type of the content. + @override + @JsonKey() + final String type; + + /// The text content of the file. + @override + @JsonKey(includeIfNull: false) + final String? text; + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearchResultContent(type: $type, text: $text)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsFileSearchResultContentImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, text); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchResultContentImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearchResultContent + extends RunStepDetailsToolCallsFileSearchResultContent { + const factory _RunStepDetailsToolCallsFileSearchResultContent( + {final String type, + @JsonKey(includeIfNull: false) final String? text}) = + _$RunStepDetailsToolCallsFileSearchResultContentImpl; + const _RunStepDetailsToolCallsFileSearchResultContent._() : super._(); + + factory _RunStepDetailsToolCallsFileSearchResultContent.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson; + + /// The type of the content. + @override + String get type; + + /// The text content of the file. + @override + @JsonKey(includeIfNull: false) + String? get text; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + get copyWith => throw _privateConstructorUsedError; +} + RunStepCompletionUsage _$RunStepCompletionUsageFromJson( Map json) { return _RunStepCompletionUsage.fromJson(json); @@ -57987,11 +59172,20 @@ mixin _$AssistantToolsFileSearchFileSearch { /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? get rankingOptions => + throw _privateConstructorUsedError; + /// Serializes this AssistantToolsFileSearchFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -58013,7 +59207,11 @@ abstract class $AssistantToolsFileSearchFileSearchCopyWith<$Res> { @useResult $Res call( {@JsonKey(name: 'max_num_results', includeIfNull: false) - int? maxNumResults}); + int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions}); + + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; } /// @nodoc @@ -58033,14 +59231,34 @@ class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, @override $Res call({ Object? maxNumResults = freezed, + Object? rankingOptions = freezed, }) { return _then(_value.copyWith( maxNumResults: freezed == maxNumResults ? _value.maxNumResults : maxNumResults // ignore: cast_nullable_to_non_nullable as int?, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as FileSearchRankingOptions?, ) as $Val); } + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions { + if (_value.rankingOptions == null) { + return null; + } + + return $FileSearchRankingOptionsCopyWith<$Res>(_value.rankingOptions!, + (value) { + return _then(_value.copyWith(rankingOptions: value) as $Val); + }); + } } /// @nodoc @@ -58054,7 +59272,12 @@ abstract class _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> @useResult $Res call( {@JsonKey(name: 'max_num_results', includeIfNull: false) - int? maxNumResults}); + int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions}); + + @override + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; } /// @nodoc @@ -58073,12 +59296,17 @@ class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> @override $Res call({ Object? maxNumResults = freezed, + Object? rankingOptions = freezed, }) { return _then(_$AssistantToolsFileSearchFileSearchImpl( maxNumResults: freezed == maxNumResults ? _value.maxNumResults : maxNumResults // ignore: cast_nullable_to_non_nullable as int?, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as FileSearchRankingOptions?, )); } } @@ -58089,7 +59317,9 @@ class _$AssistantToolsFileSearchFileSearchImpl extends _AssistantToolsFileSearchFileSearch { const _$AssistantToolsFileSearchFileSearchImpl( {@JsonKey(name: 'max_num_results', includeIfNull: false) - this.maxNumResults}) + this.maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + this.rankingOptions}) : super._(); factory _$AssistantToolsFileSearchFileSearchImpl.fromJson( @@ -58099,15 +59329,24 @@ class _$AssistantToolsFileSearchFileSearchImpl /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) final int? maxNumResults; + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + final FileSearchRankingOptions? rankingOptions; + @override String toString() { - return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults)'; + return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults, rankingOptions: $rankingOptions)'; } @override @@ -58116,12 +59355,14 @@ class _$AssistantToolsFileSearchFileSearchImpl (other.runtimeType == runtimeType && other is _$AssistantToolsFileSearchFileSearchImpl && (identical(other.maxNumResults, maxNumResults) || - other.maxNumResults == maxNumResults)); + other.maxNumResults == maxNumResults) && + (identical(other.rankingOptions, rankingOptions) || + other.rankingOptions == rankingOptions)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, maxNumResults); + int get hashCode => Object.hash(runtimeType, maxNumResults, rankingOptions); /// Create a copy of AssistantToolsFileSearchFileSearch /// with the given fields replaced by the non-null parameter values. @@ -58144,8 +59385,11 @@ class _$AssistantToolsFileSearchFileSearchImpl abstract class _AssistantToolsFileSearchFileSearch extends AssistantToolsFileSearchFileSearch { const factory _AssistantToolsFileSearchFileSearch( - {@JsonKey(name: 'max_num_results', includeIfNull: false) - final int? maxNumResults}) = _$AssistantToolsFileSearchFileSearchImpl; + {@JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + final FileSearchRankingOptions? rankingOptions}) = + _$AssistantToolsFileSearchFileSearchImpl; const _AssistantToolsFileSearchFileSearch._() : super._(); factory _AssistantToolsFileSearchFileSearch.fromJson( @@ -58155,12 +59399,21 @@ abstract class _AssistantToolsFileSearchFileSearch /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? get rankingOptions; + /// Create a copy of AssistantToolsFileSearchFileSearch /// with the given fields replaced by the non-null parameter values. @override @@ -63844,8 +65097,11 @@ mixin _$RunStepDetailsToolCalls { @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -63860,8 +65116,11 @@ mixin _$RunStepDetailsToolCalls { @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -63876,8 +65135,11 @@ mixin _$RunStepDetailsToolCalls { @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64096,8 +65358,11 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -64115,8 +65380,11 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64134,8 +65402,11 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64241,7 +65512,10 @@ abstract class _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> $Res call( {String id, String type, - @JsonKey(name: 'file_search') Map fileSearch}); + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch}); + + $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch; } /// @nodoc @@ -64273,11 +65547,22 @@ class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> : type // ignore: cast_nullable_to_non_nullable as String, fileSearch: null == fileSearch - ? _value._fileSearch + ? _value.fileSearch : fileSearch // ignore: cast_nullable_to_non_nullable - as Map, + as RunStepDetailsToolCallsFileSearch, )); } + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch { + return $RunStepDetailsToolCallsFileSearchCopyWith<$Res>(_value.fileSearch, + (value) { + return _then(_value.copyWith(fileSearch: value)); + }); + } } /// @nodoc @@ -64287,10 +65572,8 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl const _$RunStepDetailsToolCallsFileSearchObjectImpl( {required this.id, required this.type, - @JsonKey(name: 'file_search') - required final Map fileSearch}) - : _fileSearch = fileSearch, - super._(); + @JsonKey(name: 'file_search') required this.fileSearch}) + : super._(); factory _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson( Map json) => @@ -64304,17 +65587,10 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override final String type; - /// For now, this is always going to be an empty object. - final Map _fileSearch; - - /// For now, this is always going to be an empty object. + /// The definition of the file search that was called. @override @JsonKey(name: 'file_search') - Map get fileSearch { - if (_fileSearch is EqualUnmodifiableMapView) return _fileSearch; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_fileSearch); - } + final RunStepDetailsToolCallsFileSearch fileSearch; @override String toString() { @@ -64328,14 +65604,13 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl other is _$RunStepDetailsToolCallsFileSearchObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._fileSearch, _fileSearch)); + (identical(other.fileSearch, fileSearch) || + other.fileSearch == fileSearch)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, id, type, const DeepCollectionEquality().hash(_fileSearch)); + int get hashCode => Object.hash(runtimeType, id, type, fileSearch); /// Create a copy of RunStepDetailsToolCalls /// with the given fields replaced by the non-null parameter values. @@ -64357,8 +65632,11 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -64376,8 +65654,11 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64395,8 +65676,11 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64461,7 +65745,7 @@ abstract class RunStepDetailsToolCallsFileSearchObject {required final String id, required final String type, @JsonKey(name: 'file_search') - required final Map fileSearch}) = + required final RunStepDetailsToolCallsFileSearch fileSearch}) = _$RunStepDetailsToolCallsFileSearchObjectImpl; const RunStepDetailsToolCallsFileSearchObject._() : super._(); @@ -64477,9 +65761,9 @@ abstract class RunStepDetailsToolCallsFileSearchObject @override String get type; - /// For now, this is always going to be an empty object. + /// The definition of the file search that was called. @JsonKey(name: 'file_search') - Map get fileSearch; + RunStepDetailsToolCallsFileSearch get fileSearch; /// Create a copy of RunStepDetailsToolCalls /// with the given fields replaced by the non-null parameter values. @@ -64614,8 +65898,11 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -64633,8 +65920,11 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64652,8 +65942,11 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index d03e9a18..3ffb5c36 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -2516,6 +2516,34 @@ Map _$$ListAssistantsResponseImplToJson( 'has_more': instance.hasMore, }; +_$FileSearchRankingOptionsImpl _$$FileSearchRankingOptionsImplFromJson( + Map json) => + _$FileSearchRankingOptionsImpl( + ranker: $enumDecodeNullable(_$FileSearchRankerEnumMap, json['ranker'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + scoreThreshold: (json['score_threshold'] as num?)?.toDouble(), + ); + +Map _$$FileSearchRankingOptionsImplToJson( + _$FileSearchRankingOptionsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('ranker', _$FileSearchRankerEnumMap[instance.ranker]); + writeNotNull('score_threshold', instance.scoreThreshold); + return val; +} + +const _$FileSearchRankerEnumMap = { + FileSearchRanker.auto: 'auto', + FileSearchRanker.default20240821: 'default_2024_08_21', +}; + _$AssistantsNamedToolChoiceImpl _$$AssistantsNamedToolChoiceImplFromJson( Map json) => _$AssistantsNamedToolChoiceImpl( @@ -4280,6 +4308,109 @@ Map return val; } +_$RunStepDetailsToolCallsFileSearchImpl + _$$RunStepDetailsToolCallsFileSearchImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchImpl( + rankingOptions: json['ranking_options'] == null + ? null + : RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + json['ranking_options'] as Map), + results: (json['results'] as List?) + ?.map((e) => + RunStepDetailsToolCallsFileSearchResultObject.fromJson( + e as Map)) + .toList(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchImplToJson( + _$RunStepDetailsToolCallsFileSearchImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('ranking_options', instance.rankingOptions?.toJson()); + writeNotNull('results', instance.results?.map((e) => e.toJson()).toList()); + return val; +} + +_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + ranker: $enumDecode(_$FileSearchRankerEnumMap, json['ranker']), + scoreThreshold: (json['score_threshold'] as num).toDouble(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl instance) => + { + 'ranker': _$FileSearchRankerEnumMap[instance.ranker]!, + 'score_threshold': instance.scoreThreshold, + }; + +_$RunStepDetailsToolCallsFileSearchResultObjectImpl + _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultObjectImpl( + fileId: json['file_id'] as String, + fileName: json['file_name'] as String, + score: (json['score'] as num).toDouble(), + content: (json['content'] as List?) + ?.map((e) => + RunStepDetailsToolCallsFileSearchResultContent.fromJson( + e as Map)) + .toList(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl instance) { + final val = { + 'file_id': instance.fileId, + 'file_name': instance.fileName, + 'score': instance.score, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} + +_$RunStepDetailsToolCallsFileSearchResultContentImpl + _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultContentImpl( + type: json['type'] as String? ?? 'text', + text: json['text'] as String?, + ); + +Map + _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( + _$RunStepDetailsToolCallsFileSearchResultContentImpl instance) { + final val = { + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('text', instance.text); + return val; +} + _$RunStepCompletionUsageImpl _$$RunStepCompletionUsageImplFromJson( Map json) => _$RunStepCompletionUsageImpl( @@ -5302,6 +5433,10 @@ _$AssistantToolsFileSearchFileSearchImpl Map json) => _$AssistantToolsFileSearchFileSearchImpl( maxNumResults: (json['max_num_results'] as num?)?.toInt(), + rankingOptions: json['ranking_options'] == null + ? null + : FileSearchRankingOptions.fromJson( + json['ranking_options'] as Map), ); Map _$$AssistantToolsFileSearchFileSearchImplToJson( @@ -5315,6 +5450,7 @@ Map _$$AssistantToolsFileSearchFileSearchImplToJson( } writeNotNull('max_num_results', instance.maxNumResults); + writeNotNull('ranking_options', instance.rankingOptions?.toJson()); return val; } @@ -5754,7 +5890,8 @@ _$RunStepDetailsToolCallsFileSearchObjectImpl _$RunStepDetailsToolCallsFileSearchObjectImpl( id: json['id'] as String, type: json['type'] as String, - fileSearch: json['file_search'] as Map, + fileSearch: RunStepDetailsToolCallsFileSearch.fromJson( + json['file_search'] as Map), ); Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( @@ -5762,7 +5899,7 @@ Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( { 'id': instance.id, 'type': instance.type, - 'file_search': instance.fileSearch, + 'file_search': instance.fileSearch.toJson(), }; _$RunStepDetailsToolCallsFunctionObjectImpl diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 9c474cec..4fc465f5 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -796,6 +796,16 @@ paths: schema: type: string description: The ID of the thread to run. + - name: include + in: query + description: &include_param_description | + A list of additional fields to include in the response. Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + schema: + type: string requestBody: required: true content: @@ -968,6 +978,11 @@ paths: description: *pagination_before_param_description schema: type: string + - name: include + in: query + description: *include_param_description + schema: + type: string responses: "200": description: OK @@ -1000,6 +1015,11 @@ paths: schema: type: string description: The ID of the run step to retrieve. + - name: include + in: query + description: *include_param_description + schema: + type: string responses: "200": description: OK @@ -3879,10 +3899,32 @@ components: The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - Note that the file search tool may output fewer than `max_num_results` results. See the [file search - tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + Note that the file search tool may output fewer than `max_num_results` results. See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" required: - type + FileSearchRankingOptions: + type: object + description: | + The ranking options for the file search. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + properties: + ranker: + $ref: "#/components/schemas/FileSearchRanker" + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + FileSearchRanker: + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] AssistantToolsFunction: type: object description: Function tool @@ -5653,13 +5695,74 @@ components: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. file_search: - type: object - description: For now, this is always going to be an empty object. - additionalProperties: true + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearch" required: - id - type - file_search + RunStepDetailsToolCallsFileSearch: + type: object + description: The definition of the file search that was called. + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + type: object + description: The ranking options for the file search. + properties: + ranker: + $ref: "#/components/schemas/FileSearchRanker" + score_threshold: + type: number + description: | + The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + RunStepDetailsToolCallsFileSearchResultObject: + type: object + description: A result instance of the file search. + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: | + The content of the result that was found. The content is only included if requested via the include + query parameter. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultContent" + required: + - file_id + - file_name + - score + RunStepDetailsToolCallsFileSearchResultContent: + type: object + description: The content of the result that was found. + properties: + type: + type: string + description: The type of the content. + default: text + text: + type: string + description: The text content of the file. RunStepDeltaStepDetailsToolCallsFileSearchObject: type: object description: File search tool call @@ -6298,7 +6401,7 @@ components: - data RunStepStreamEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" @@ -6309,7 +6412,7 @@ components: - data RunStepStreamDeltaEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 02653404..de7cd98a 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -4682,6 +4682,17 @@ paths: schema: type: string description: The ID of the thread to run. + - name: include[] + in: query + description: &include_param_description | + A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] requestBody: required: true content: @@ -5642,6 +5653,14 @@ paths: description: *pagination_before_param_description schema: type: string + - name: include[] + in: query + description: *include_param_description + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5653,7 +5672,7 @@ paths: name: List run steps group: threads beta: true - returns: A list of [run step](/docs/api-reference/runs/step-object) objects. + returns: A list of [run step](/docs/api-reference/run-steps/step-object) objects. examples: request: curl: | @@ -5745,6 +5764,14 @@ paths: schema: type: string description: The ID of the run step to retrieve. + - name: include[] + in: query + description: *include_param_description + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5756,7 +5783,7 @@ paths: name: Retrieve run step group: threads beta: true - returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. + returns: The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID. examples: request: curl: | @@ -10803,10 +10830,30 @@ components: description: | The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. - Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" required: - type + FileSearchRankingOptions: + title: File search tool call ranking options + type: object + description: | + The ranking options for the file search. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + properties: + ranker: + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + AssistantToolsFileSearchTypeOnly: type: object title: FileSearch tool @@ -12769,11 +12816,72 @@ components: type: object description: For now, this is always going to be an empty object. x-oaiTypeLabel: map + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" required: - id - type - file_search + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + type: object + description: The ranking options for the file search. + properties: + ranker: + type: string + description: The ranker used for the file search. + enum: [ "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + type: object + description: A result instance of the file search. + x-oaiTypeLabel: map + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only included if requested via the include query parameter. + items: + type: object + properties: + type: + type: string + description: The type of the content. + enum: [ "text" ] + text: + type: string + description: The text content of the file. + required: + - file_id + - file_name + - score + RunStepDeltaStepDetailsToolCallsFileSearchObject: title: File search tool call type: object @@ -13560,9 +13668,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13573,9 +13681,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13586,7 +13694,7 @@ components: required: - event - data - description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. x-oaiMeta: dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - type: object @@ -13599,9 +13707,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13612,9 +13720,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13625,9 +13733,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13638,9 +13746,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" MessageStreamEvent: oneOf: diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 91e131b4..f98e6d9a 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: openai_dart -description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. +description: Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. version: 0.4.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart From fe82d1dcee0863e0a270ab4058a598ae6d1c3579 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 31 Aug 2024 11:29:32 +0200 Subject: [PATCH 229/251] feat: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings (#544) --- .../lib/src/embeddings/google_ai/google_ai_embeddings.dart | 5 ++--- .../test/embeddings/google_ai/google_ai_embeddings_test.dart | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index 385f1088..93ec105a 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -138,7 +138,6 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { /// The number of dimensions the resulting output embeddings should have. /// Only supported in `text-embedding-004` and later models. - /// TODO https://github.com/google-gemini/generative-ai-dart/pull/149 int? dimensions; /// The maximum number of documents to embed in a single request. @@ -168,7 +167,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { Content.text(doc.pageContent), taskType: TaskType.retrievalDocument, title: doc.metadata[docTitleKey], - // outputDimensionality: dimensions, TODO + outputDimensionality: dimensions, ); }).toList(growable: false), ); @@ -187,7 +186,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { final data = await _googleAiClient.embedContent( Content.text(query), taskType: TaskType.retrievalQuery, - // outputDimensionality: dimensions, TODO + outputDimensionality: dimensions, ); return data.embedding.values; } diff --git a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart index bc942e51..a2f88906 100644 --- a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart +++ b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart @@ -49,8 +49,7 @@ void main() { expect(res[1].length, 768); }); - // TODO https://github.com/google-gemini/generative-ai-dart/pull/149 - test('Test shortening embeddings', skip: true, () async { + test('Test shortening embeddings', () async { embeddings.dimensions = 256; final res = await embeddings.embedQuery('Hello world'); expect(res.length, 256); From a4cda8990ac249fb722db0bac5521cac5a8007ba Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 6 Sep 2024 23:47:17 +0200 Subject: [PATCH 230/251] docs: Update READMEs (#545) --- packages/langchain/README.md | 18 +- .../lib/langchain_firebase.dart | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- .../lib/langchain_google.dart | 2 +- packages/langchain_google/pubspec.yaml | 2 +- .../lib/langchain_openai.dart | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- .../test/chat_models/github_models_test.dart | 181 ++++++++++++++++++ packages/openai_dart/README.md | 15 +- 9 files changed, 203 insertions(+), 23 deletions(-) create mode 100644 packages/langchain_openai/test/chat_models/github_models_test.dart diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 2dfacb97..e93bfdd1 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -104,15 +104,15 @@ The following integrations are available in LangChain.dart: ### Chat Models -| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | -|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | -| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | -| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | -| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | -| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | -| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | -| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | +| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | +|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | +| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | +| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | +| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | +| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | +| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | ### LLMs diff --git a/packages/langchain_firebase/lib/langchain_firebase.dart b/packages/langchain_firebase/lib/langchain_firebase.dart index 0b76e587..45448a85 100644 --- a/packages/langchain_firebase/lib/langchain_firebase.dart +++ b/packages/langchain_firebase/lib/langchain_firebase.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). +/// LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index a64aae40..35c94028 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_firebase -description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). +description: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase diff --git a/packages/langchain_google/lib/langchain_google.dart b/packages/langchain_google/lib/langchain_google.dart index 371e45ad..a4dd4908 100644 --- a/packages/langchain_google/lib/langchain_google.dart +++ b/packages/langchain_google/lib/langchain_google.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). +/// LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index a4897bd3..f39cf8bb 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_google -description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). +description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). version: 0.6.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google diff --git a/packages/langchain_openai/lib/langchain_openai.dart b/packages/langchain_openai/lib/langchain_openai.dart index d2730a6b..77e92aa5 100644 --- a/packages/langchain_openai/lib/langchain_openai.dart +++ b/packages/langchain_openai/lib/langchain_openai.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). +/// LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). library; export 'package:openai_dart/openai_dart.dart' show OpenAIClientException; diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index bf0db409..c367ac51 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_openai -description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). +description: LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai diff --git a/packages/langchain_openai/test/chat_models/github_models_test.dart b/packages/langchain_openai/test/chat_models/github_models_test.dart new file mode 100644 index 00000000..7eac34dd --- /dev/null +++ b/packages/langchain_openai/test/chat_models/github_models_test.dart @@ -0,0 +1,181 @@ +// ignore_for_file: avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:test/test.dart'; + +void main() { + group('GitHub Models tests', () { + late ChatOpenAI chatModel; + + setUp(() async { + chatModel = ChatOpenAI( + apiKey: Platform.environment['GITHUB_TOKEN'], + baseUrl: 'https://models.inference.ai.azure.com', + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test invoke GitHub Models API with different models', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Mistral-large', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions( + model: model, + temperature: 0, + ), + ); + + expect(res.id, isNotEmpty); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + expect(res.metadata, isNotEmpty, reason: model); + expect(res.metadata['created'], greaterThan(0), reason: model); + expect(res.metadata['model'], isNotEmpty, reason: model); + } + }); + + test('Test stream GitHub Models API with different models', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions( + model: model, + temperature: 0, + ), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content.replaceAll(RegExp(r'[\s\n]'), ''); + count++; + } + expect(count, greaterThan(1), reason: model); + expect(content, contains('123456789'), reason: model); + } + }); + + test('Test countTokens', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Mistral-large', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + const text = 'Hello, how are you?'; + + final numTokens = await chatModel.countTokens( + PromptValue.chat([ChatMessage.humanText(text)]), + options: ChatOpenAIOptions(model: model), + ); + expect(numTokens, 13, reason: model); + } + }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); + }); +} diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 12f5b51f..68a26356 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. +- It can be used to consume OpenAI-compatible APIs like [GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. **Supported endpoints:** @@ -900,7 +900,6 @@ final res = await client.createThreadRun( You can also use Structured Outputs to ensure that the model-generated responses adhere to a specific JSON schema: ```dart - final res = await client.createThreadRun( threadId: threadId, request: CreateRunRequest( @@ -1198,21 +1197,21 @@ final client = OpenAIClient( This client can be used to consume APIs that are compatible with the OpenAI API spec. -[TogetherAI](https://www.together.ai/): +[GitHub Models](https://github.com/marketplace/models): ```dart final client = OpenAIClient( - baseUrl: 'https://api.together.xyz/v1', - headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, + baseUrl: 'https://models.inference.ai.azure.com', + headers: { 'api-key': 'YOUR_GITHUB_TOKEN' }, ); ``` -[Anyscale](https://www.anyscale.com/): +[TogetherAI](https://www.together.ai/): ```dart final client = OpenAIClient( - baseUrl: 'https://api.endpoints.anyscale.com/v1', - headers: { 'api-key': 'YOUR_ANYSCALE_API_KEY' }, + baseUrl: 'https://api.together.xyz/v1', + headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, ); ``` From 395dbfc8ef2bb93f9c78fb22dd5329e0a6877145 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 6 Sep 2024 23:53:43 +0200 Subject: [PATCH 231/251] build: Update gcloud and googleapis dependencies (#546) --- examples/docs_examples/pubspec.lock | 8 ++++---- examples/hello_world_flutter/pubspec.lock | 8 ++++---- examples/vertex_ai_matching_engine_setup/pubspec.lock | 8 ++++---- examples/vertex_ai_matching_engine_setup/pubspec.yaml | 2 +- melos.yaml | 4 ++-- packages/langchain_google/pubspec.yaml | 4 ++-- packages/vertex_ai/pubspec.yaml | 2 +- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 78752c5c..a7a05f06 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -147,10 +147,10 @@ packages: dependency: transitive description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_generative_ai: dependency: transitive description: @@ -171,10 +171,10 @@ packages: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 7eb4a4d8..1fbbc8d3 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -130,10 +130,10 @@ packages: dependency: transitive description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_generative_ai: dependency: transitive description: @@ -154,10 +154,10 @@ packages: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: transitive description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index c4bd2136..752608b4 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -45,10 +45,10 @@ packages: dependency: "direct main" description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_identity_services_web: dependency: transitive description: @@ -61,10 +61,10 @@ packages: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: "direct main" description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index e42414a8..4519fdbb 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - gcloud: ^0.8.12 + gcloud: ^0.8.13 googleapis_auth: ^1.6.0 http: ^1.2.2 vertex_ai: ^0.1.0+1 diff --git a/melos.yaml b/melos.yaml index 3a1c0092..b1835a9d 100644 --- a/melos.yaml +++ b/melos.yaml @@ -40,9 +40,9 @@ command: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 freezed_annotation: ^2.4.2 - gcloud: ^0.8.12 + gcloud: ^0.8.13 google_generative_ai: 0.4.4 - googleapis: ^12.0.0 + googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 js: ^0.7.1 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index f39cf8bb..a20269ee 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -19,9 +19,9 @@ environment: dependencies: collection: ^1.18.0 fetch_client: ^1.1.2 - gcloud: ^0.8.12 + gcloud: ^0.8.13 google_generative_ai: 0.4.4 - googleapis: ^12.0.0 + googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 langchain_core: 0.3.5 diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index c612870d..5d7612aa 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -18,7 +18,7 @@ environment: dependencies: collection: ^1.18.0 - googleapis: ^12.0.0 + googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 meta: ^1.11.0 From 5edd1261719bc1e54781465df28b29a98ded14ed Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 21 Sep 2024 00:51:10 +0200 Subject: [PATCH 232/251] docs: Bootstrap docusaurus docs (#548) Co-authored-by: Douglas Bett --- .github/workflows/firebase-hosting-merge.yml | 21 + .../firebase-hosting-pull-request.yml | 26 + .github/workflows/test.yaml | 3 + docs_v2/.firebaserc | 5 + docs_v2/.gitignore | 21 + docs_v2/README.md | 41 + docs_v2/babel.config.js | 3 + docs_v2/docs/01-intro.md | 171 + docs_v2/docs/02-tutorials/01-llm_chain.md | 7 + docs_v2/docs/02-tutorials/index.mdx | 28 + docs_v2/docs/03-how_to/01-installation.md | 77 + .../docs/03-how_to/02-structured_output.md | 14 + docs_v2/docs/03-how_to/index.mdx | 149 + docs_v2/docs/04-concepts.mdx | 468 + docs_v2/docs/05-integrations/anthropic.md | 145 + docs_v2/docs/05-integrations/anyscale.md | 84 + .../05-integrations/firebase_vertex_ai.md | 190 + docs_v2/docs/05-integrations/gcp_vertex_ai.md | 116 + docs_v2/docs/05-integrations/googleai.md | 149 + docs_v2/docs/05-integrations/index.mdx | 56 + docs_v2/docs/05-integrations/mistralai.md | 76 + docs_v2/docs/05-integrations/ollama.md | 462 + docs_v2/docs/05-integrations/open_router.md | 157 + docs_v2/docs/05-integrations/openai.md | 372 + docs_v2/docs/05-integrations/prem.md | 24 + docs_v2/docs/05-integrations/together_ai.md | 84 + docs_v2/docs/05-integrations/tools/index.mdx | 5 + .../05-integrations/tools/tavily_search.md | 13 + docs_v2/docusaurus.config.js | 130 + docs_v2/firebase.json | 16 + docs_v2/package-lock.json | 14683 ++++++++++++++++ docs_v2/package.json | 44 + docs_v2/sidebars.js | 30 + .../src/components/HomepageFeatures/index.js | 64 + .../HomepageFeatures/styles.module.css | 11 + docs_v2/src/css/custom.css | 30 + docs_v2/src/pages/index.js | 7 + docs_v2/src/pages/index.module.css | 23 + docs_v2/src/pages/markdown-page.md | 7 + docs_v2/static/.nojekyll | 0 docs_v2/static/img/favicon.ico | Bin 0 -> 15406 bytes docs_v2/static/img/langchain.dart.png | Bin 0 -> 156015 bytes docs_v2/static/img/logo.svg | 1 + 43 files changed, 18013 insertions(+) create mode 100644 .github/workflows/firebase-hosting-merge.yml create mode 100644 .github/workflows/firebase-hosting-pull-request.yml create mode 100644 docs_v2/.firebaserc create mode 100644 docs_v2/.gitignore create mode 100644 docs_v2/README.md create mode 100644 docs_v2/babel.config.js create mode 100644 docs_v2/docs/01-intro.md create mode 100644 docs_v2/docs/02-tutorials/01-llm_chain.md create mode 100644 docs_v2/docs/02-tutorials/index.mdx create mode 100644 docs_v2/docs/03-how_to/01-installation.md create mode 100644 docs_v2/docs/03-how_to/02-structured_output.md create mode 100644 docs_v2/docs/03-how_to/index.mdx create mode 100644 docs_v2/docs/04-concepts.mdx create mode 100644 docs_v2/docs/05-integrations/anthropic.md create mode 100644 docs_v2/docs/05-integrations/anyscale.md create mode 100644 docs_v2/docs/05-integrations/firebase_vertex_ai.md create mode 100644 docs_v2/docs/05-integrations/gcp_vertex_ai.md create mode 100644 docs_v2/docs/05-integrations/googleai.md create mode 100644 docs_v2/docs/05-integrations/index.mdx create mode 100644 docs_v2/docs/05-integrations/mistralai.md create mode 100644 docs_v2/docs/05-integrations/ollama.md create mode 100644 docs_v2/docs/05-integrations/open_router.md create mode 100644 docs_v2/docs/05-integrations/openai.md create mode 100644 docs_v2/docs/05-integrations/prem.md create mode 100644 docs_v2/docs/05-integrations/together_ai.md create mode 100644 docs_v2/docs/05-integrations/tools/index.mdx create mode 100644 docs_v2/docs/05-integrations/tools/tavily_search.md create mode 100644 docs_v2/docusaurus.config.js create mode 100644 docs_v2/firebase.json create mode 100644 docs_v2/package-lock.json create mode 100644 docs_v2/package.json create mode 100644 docs_v2/sidebars.js create mode 100644 docs_v2/src/components/HomepageFeatures/index.js create mode 100644 docs_v2/src/components/HomepageFeatures/styles.module.css create mode 100644 docs_v2/src/css/custom.css create mode 100644 docs_v2/src/pages/index.js create mode 100644 docs_v2/src/pages/index.module.css create mode 100644 docs_v2/src/pages/markdown-page.md create mode 100644 docs_v2/static/.nojekyll create mode 100644 docs_v2/static/img/favicon.ico create mode 100644 docs_v2/static/img/langchain.dart.png create mode 100644 docs_v2/static/img/logo.svg diff --git a/.github/workflows/firebase-hosting-merge.yml b/.github/workflows/firebase-hosting-merge.yml new file mode 100644 index 00000000..262b5a52 --- /dev/null +++ b/.github/workflows/firebase-hosting-merge.yml @@ -0,0 +1,21 @@ +name: Deploy docs_v2 + +on: + push: + branches: + - main + +jobs: + build_and_deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - run: npm ci && npm run build + working-directory: docs_v2 + - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} + channelId: live + projectId: langchain-dart + entryPoint: docs_v2 diff --git a/.github/workflows/firebase-hosting-pull-request.yml b/.github/workflows/firebase-hosting-pull-request.yml new file mode 100644 index 00000000..2a2d9416 --- /dev/null +++ b/.github/workflows/firebase-hosting-pull-request.yml @@ -0,0 +1,26 @@ +name: Deploy docs_v2 on PR + +on: + pull_request: + paths: + - 'docs_v2/**' + +permissions: + checks: write + contents: read + pull-requests: write + +jobs: + build_and_preview: + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - run: npm ci && npm run build + working-directory: docs_v2 + - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} + projectId: langchain-dart + entryPoint: docs_v2 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b77c2ed8..06a8deb6 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -4,6 +4,9 @@ on: # pull_request_target is dangerous! Review external PRs code before approving to run the workflow # We need this to be able to access the secrets required by the workflow pull_request_target: + paths-ignore: + - 'docs/**' + - 'docs_v2/**' workflow_dispatch: # Cancel currently running workflow when a new one is triggered diff --git a/docs_v2/.firebaserc b/docs_v2/.firebaserc new file mode 100644 index 00000000..15e3b72b --- /dev/null +++ b/docs_v2/.firebaserc @@ -0,0 +1,5 @@ +{ + "projects": { + "default": "langchain-dart" + } +} diff --git a/docs_v2/.gitignore b/docs_v2/.gitignore new file mode 100644 index 00000000..0f21febf --- /dev/null +++ b/docs_v2/.gitignore @@ -0,0 +1,21 @@ +# Dependencies +/node_modules + +# Production +/build + +# Generated files +.docusaurus +.cache-loader + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.firebase diff --git a/docs_v2/README.md b/docs_v2/README.md new file mode 100644 index 00000000..0c6c2c27 --- /dev/null +++ b/docs_v2/README.md @@ -0,0 +1,41 @@ +# Website + +This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. + +### Installation + +``` +$ yarn +``` + +### Local Development + +``` +$ yarn start +``` + +This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. + +### Build + +``` +$ yarn build +``` + +This command generates static content into the `build` directory and can be served using any static contents hosting service. + +### Deployment + +Using SSH: + +``` +$ USE_SSH=true yarn deploy +``` + +Not using SSH: + +``` +$ GIT_USER= yarn deploy +``` + +If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docs_v2/babel.config.js b/docs_v2/babel.config.js new file mode 100644 index 00000000..e00595da --- /dev/null +++ b/docs_v2/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; diff --git a/docs_v2/docs/01-intro.md b/docs_v2/docs/01-intro.md new file mode 100644 index 00000000..75428706 --- /dev/null +++ b/docs_v2/docs/01-intro.md @@ -0,0 +1,171 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + +# Introduction + +Build Dart/Flutter applications powered by Large Language Models. + +## What is LangChain.dart? + +LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). LangChain is a framework for developing applications that are powered by large language models (LLMs). + +It comes with a set of components that make working with LLMs easy. +The components can be grouped into a few core modules: + +![LangChain.dart](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain.dart.png) + +- 📃 **Model I/O:** LangChain offers a unified API for interacting with various LLM providers (e.g. OpenAI, Google, Mistral, Ollama, etc.), allowing developers to switch between them with ease. Additionally, it provides tools for managing model inputs (prompt templates and example selectors) and parsing the resulting model outputs (output parsers). +- 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). +- 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. + +The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). + +## Motivation + +Large Language Models (LLMs) have revolutionized Natural Language Processing (NLP), serving as essential components in a wide range of applications, such as question-answering, summarization, translation, and text generation. + +The adoption of LLMs is creating a new tech stack in its wake. However, emerging libraries and tools are predominantly being developed for the Python and JavaScript ecosystems. As a result, the number of applications leveraging LLMs in these ecosystems has grown exponentially. + +In contrast, the Dart / Flutter ecosystem has not experienced similar growth, which can likely be attributed to the scarcity of Dart and Flutter libraries that streamline the complexities associated with working with LLMs. + +LangChain.dart aims to fill this gap by abstracting the intricacies of working with LLMs in Dart and Flutter, enabling developers to harness their combined potential effectively. + +## Packages + +LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: + +### [`langchain_core`](https://pub.dev/packages/langchain_core) + +Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + +> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with LangChain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + + +## Getting started + +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): + +```yaml +dependencies: + langchain: {version} + langchain_community: {version} + langchain_openai: {version} + langchain_google: {version} + ... +``` + +The most basic building block of LangChain.dart is calling an LLM on some prompt. LangChain.dart provides a unified interface for calling different LLMs. For example, we can use `ChatGoogleGenerativeAI` to call Google's Gemini model: + +```dart +final model = ChatGoogleGenerativeAI(apiKey: googleApiKey); +final prompt = PromptValue.string('Hello world!'); +final result = await model.invoke(prompt); +// Hello everyone! I'm new here and excited to be part of this community. +``` + +But the power of LangChain.dart comes from chaining together multiple components to implement complex use cases. For example, a RAG (Retrieval-Augmented Generation) pipeline that would accept a user query, retrieve relevant documents from a vector store, format them using prompt templates, invoke the model, and parse the output: + +```dart +// 1. Create a vector store and add documents to it +final vectorStore = MemoryVectorStore( + embeddings: OpenAIEmbeddings(apiKey: openaiApiKey), +); +await vectorStore.addDocuments( + documents: [ + Document(pageContent: 'LangChain was created by Harrison'), + Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), + ], +); + +// 2. Define the retrieval chain +final retriever = vectorStore.asRetriever(); +final setupAndRetrieval = Runnable.fromMap({ + 'context': retriever.pipe( + Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\n')), + ), + 'question': Runnable.passthrough(), +}); + +// 3. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), + (ChatMessageType.human, '{question}'), +]); + +// 4. Define the final chain +final model = ChatOpenAI(apiKey: openaiApiKey); +const outputParser = StringOutputParser(); +final chain = setupAndRetrieval + .pipe(promptTemplate) + .pipe(model) + .pipe(outputParser); + +// 5. Run the pipeline +final res = await chain.invoke('Who created LangChain.dart?'); +print(res); +// David created LangChain.dart +``` + +## Documentation + +- [LangChain.dart documentation](https://langchaindart.dev) +- [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) +- [LangChain.dart blog](https://blog.langchaindart.dev) +- [Project board](https://github.com/users/davidmigloz/projects/2/views/1) + +## Community + +Stay up-to-date on the latest news and updates on the field, have great discussions, and get help in the official [LangChain.dart Discord server](https://discord.gg/x4qbhqecVR). + +[![LangChain.dart Discord server](https://invidget.switchblade.xyz/x4qbhqecVR?theme=light)](https://discord.gg/x4qbhqecVR) + +## Contribute + +| 📢 **Call for Collaborators** 📢 | +|-------------------------------------------------------------------------| +| We are looking for collaborators to join the core group of maintainers. | + +New contributors welcome! Check out our [Contributors Guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md) for help getting started. + +Join us on [Discord](https://discord.gg/x4qbhqecVR) to meet other maintainers. We'll help you get your first contribution in no time! + +## Related projects + +- [LangChain](https://github.com/langchain-ai/langchain): The original Python LangChain project. +- [LangChain.js](https://github.com/langchain-ai/langchainjs): A JavaScript port of LangChain. +- [LangChain.go](https://github.com/tmc/langchaingo): A Go port of LangChain. +- [LangChain.rb](https://github.com/andreibondarev/langchainrb): A Ruby port of LangChain. + +## Sponsors + +
    + +## License + +LangChain.dart is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/docs_v2/docs/02-tutorials/01-llm_chain.md b/docs_v2/docs/02-tutorials/01-llm_chain.md new file mode 100644 index 00000000..e40bbb77 --- /dev/null +++ b/docs_v2/docs/02-tutorials/01-llm_chain.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + + +# Build a Simple LLM Application with LCEL \ No newline at end of file diff --git a/docs_v2/docs/02-tutorials/index.mdx b/docs_v2/docs/02-tutorials/index.mdx new file mode 100644 index 00000000..82e56f9e --- /dev/null +++ b/docs_v2/docs/02-tutorials/index.mdx @@ -0,0 +1,28 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- +# Tutorials + +New to LangChain or to LLM app development in general? Read this material to quickly get up and running. + +## Basics +- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain) +- [Build a Chatbot](/docs/tutorials/chatbot) +- [Build vector stores and retrievers](/docs/tutorials/retrievers) +- [Build an Agent](/docs/tutorials/agents) + +## Working with external knowledge +- [Build a Retrieval Augmented Generation (RAG) Application](/docs/tutorials/rag) +- [Build a Conversational RAG Application](/docs/tutorials/qa_chat_history) +- [Build a Question/Answering system over SQL data](/docs/tutorials/sql_qa) +- [Build a Query Analysis System](/docs/tutorials/query_analysis) +- [Build a local RAG application](/docs/tutorials/local_rag) +- [Build a Question Answering application over a Graph Database](/docs/tutorials/graph) +- [Build a PDF ingestion and Question/Answering system](/docs/tutorials/pdf_qa/) + +## Specialized tasks +- [Build an Extraction Chain](/docs/tutorials/extraction) +- [Generate synthetic data](/docs/tutorials/data_generation) +- [Classify text into labels](/docs/tutorials/classification) +- [Summarize text](/docs/tutorials/summarization) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/01-installation.md b/docs_v2/docs/03-how_to/01-installation.md new file mode 100644 index 00000000..44604573 --- /dev/null +++ b/docs_v2/docs/03-how_to/01-installation.md @@ -0,0 +1,77 @@ +# Installation +Langchain as a framework consists of a number of packages. They're split into different packages allowing you to choose exactly what pieces of the framework to install and use. + +## Installing essential Langchain.dart packages + +### [`langchain`](https://pub.dev/packages/langchain) +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with Langchain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +```bash +dart pub add langchain +``` + +### [`langchain_core`](https://pub.dev/packages/langchain_core) +This package contains base abstractions of different components and ways to compose them together. +The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. +> Depend on this package to build frameworks on top of Langchain.dart or to interoperate with it. + +To install this package in your Dart or Flutter project +```bash +dart pub add langchain_core +``` + +### [`langchain_community`](https://pub.dev/packages/langchain_community) +Contains third-party integrations and community-contributed components that are not part of the core Langchain.dart API. +> Depend on this package if you want to use any of the integrations or components it provides like CSV,JSON,Text or HTML loaders and more. + +```bash +dart pub add langchain langchain_community +``` + +## Integration packages +Certain integrations like OpenAI and Anthropic have their own packages. Any integrations that require their own package will be documented as such in the Integration docs. + + +Let's say you're using [OpenAI](https://platform.openai.com/), install the `langchain_openai` package. +```bash +dart pub add langchain langchain_community langchain_openai +``` + +Let's say you want Google integration to use (GoogleAI, VertexAI, Gemini etc), install the `langchain_google` package. +```bash +dart pub add langchain langchain_community langchain_google +``` +The following table contains the list of existing Langchain.dart integration packages. + +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +## Documentation + +Detailed documentation for various integrations can be found in the `/docs/05-integration/` directory: + +- [Anthropic](/docs/integrations/anthropic) +- [Anyscale](/docs/integrations/anyscale) +- [Firebase VertexAI](/docs/integrations/firebase_vertex_ai) +- [GCP VertexAI](/docs/integrations/gcp_vertex_ai) +- [GoogleAI](/docs/integrations/googleai) +- [MistralAI](/docs/integrations/mistralai) +- [Ollama](/docs/integrations/ollama) +- [OpenRouter](/docs/integrations/open_router) +- [OpenAI](/docs/integrations/openai) +- [PrEM](/docs/integrations/prem) +- [TogetherAI](/docs/integrations/together_ai) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/02-structured_output.md b/docs_v2/docs/03-how_to/02-structured_output.md new file mode 100644 index 00000000..95983cd6 --- /dev/null +++ b/docs_v2/docs/03-how_to/02-structured_output.md @@ -0,0 +1,14 @@ +--- +sidebar_position: 3 +keywords: [structured output, json, information extraction, with_structured_output] +--- +# How to return structured data from a model + +> This guide assumes familiarity with the following concepts: +> - [Chat models](/docs/concepts/#chat-models) +> - [Function/tool calling](/docs/concepts/#functiontool-calling) + + +It is often useful to have a model return output that matches a specific schema. One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model. + + diff --git a/docs_v2/docs/03-how_to/index.mdx b/docs_v2/docs/03-how_to/index.mdx new file mode 100644 index 00000000..81ea6bc7 --- /dev/null +++ b/docs_v2/docs/03-how_to/index.mdx @@ -0,0 +1,149 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + +# How-to guides + +Here you'll find answers to "How do I...?" types of questions. +These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task. +For conceptual explanations see the [Conceptual guide](/docs/concepts/). +For end-to-end walkthroughs see [Tutorials](/docs/tutorials). +For comprehensive descriptions of every class and function see the [API Reference](https://pub.dev/documentation/langchain/latest/index.html). + + +## Installation + +- [How to: install LangChain packages](/docs/how_to/installation/) + +## Key features +This highlights functionality that is core to using LangChain. + +- [How to: return structured data from a model](/docs/how_to/structured_output/) +- [How to: use a model to call tools](/docs/how_to/tool_calling) +- [How to: stream runnables](/docs/how_to/streaming) +- [How to: debug your LLM apps](/docs/how_to/debugging/) + +[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol. + +[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives. + +- [How to: chain runnables](/docs/how_to/sequence) +- [How to: stream runnables](/docs/how_to/streaming) +- [How to: invoke runnables in parallel](/docs/how_to/parallel/) +- [How to: add default invocation args to runnables](/docs/how_to/binding/) +- [How to: turn any function into a runnable](/docs/how_to/functions) +- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough) +- [How to: configure runnable behavior at runtime](/docs/how_to/configure) +- [How to: add message history (memory) to a chain](/docs/how_to/message_history) +- [How to: route between sub-chains](/docs/how_to/routing) +- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/) +- [How to: inspect runnables](/docs/how_to/inspect) +- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks) +- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains) +- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets) + + +## Components + +These are the core building blocks you can use when building applications. + +### Prompt templates + +[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model. + +- [How to: use few shot examples](/docs/how_to/few_shot_examples) +- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/) +- [How to: partially format prompt templates](/docs/how_to/prompts_partial) +- [How to: compose prompts together](/docs/how_to/prompts_composition) + +### Chat models + +[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message. + +- [How to: do function/tool calling](/docs/how_to/tool_calling) +- [How to: get models to return structured output](/docs/how_to/structured_output) +- [How to: cache model responses](/docs/how_to/chat_model_caching) +- [How to: get log probabilities](/docs/how_to/logprobs) +- [How to: create a custom chat model class](/docs/how_to/custom_chat_model) +- [How to: stream a response back](/docs/how_to/chat_streaming) +- [How to: track token usage](/docs/how_to/chat_token_usage_tracking) +- [How to: track response metadata across providers](/docs/how_to/response_metadata) +- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/) +- [How to: use chat model to call tools](/docs/how_to/tool_calling) +- [How to: stream tool calls](/docs/how_to/tool_streaming) +- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot) +- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific) +- [How to: force a specific tool call](/docs/how_to/tool_choice) +- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/) + +### LLMs + +What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string. + +- [How to: cache model responses](/docs/how_to/llm_caching) +- [How to: create a custom LLM class](/docs/how_to/custom_llm) +- [How to: stream a response back](/docs/how_to/streaming_llm) +- [How to: track token usage](/docs/how_to/llm_token_usage_tracking) +- [How to: work with local LLMs](/docs/how_to/local_llms) + +### Document loaders + +[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources. + +- [How to: load CSV data](/docs/how_to/document_loader_csv) +- [How to: load data from a directory](/docs/how_to/document_loader_directory) +- [How to: load HTML data](/docs/how_to/document_loader_html) +- [How to: load JSON data](/docs/how_to/document_loader_json) +- [How to: load Markdown data](/docs/how_to/document_loader_markdown) +- [How to: load Microsoft Office data](/docs/how_to/document_loader_office_file) +- [How to: load PDF files](/docs/how_to/document_loader_pdf) +- [How to: write a custom document loader](/docs/how_to/document_loader_custom) + +### Text splitters + +[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval. + +- [How to: recursively split text](/docs/how_to/recursive_text_splitter) +- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter) +- [How to: split by HTML sections](/docs/how_to/HTML_section_aware_splitter) +- [How to: split by character](/docs/how_to/character_text_splitter) +- [How to: split code](/docs/how_to/code_splitter) +- [How to: split Markdown by headers](/docs/how_to/markdown_header_metadata_splitter) +- [How to: recursively split JSON](/docs/how_to/recursive_json_splitter) +- [How to: split text into semantic chunks](/docs/how_to/semantic-chunker) +- [How to: split by tokens](/docs/how_to/split_by_token) + +### Vector stores + +[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings. + +- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores) + +### Retrievers + +[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents. + +- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever) +- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever) +- [How to: use contextual compression to compress the data retrieved](/docs/how_to/contextual_compression) +- [How to: write a custom retriever class](/docs/how_to/custom_retriever) +- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever) +- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever) +- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder) +- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector) +- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever) +- [How to: generate metadata filters](/docs/how_to/self_query) +- [How to: create a time-weighted retriever](/docs/how_to/time_weighted_vectorstore) +- [How to: use hybrid vector and keyword retrieval](/docs/how_to/hybrid) + +### Agents + +:::note + +For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation. + +::: + +- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor) +- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent) \ No newline at end of file diff --git a/docs_v2/docs/04-concepts.mdx b/docs_v2/docs/04-concepts.mdx new file mode 100644 index 00000000..fcd2335b --- /dev/null +++ b/docs_v2/docs/04-concepts.mdx @@ -0,0 +1,468 @@ +# Conceptual guide + +This section contains introductions to key parts of LangChain.dart + +## Architecture + +LangChain.dart as a framework consists of a number of packages. + +### [`langchain_core`](https://pub.dev/packages/langchain_core) +This package contains base abstractions of different components and ways to compose them together. +The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. +No third party integrations are defined here. + +> Depend on this package to build frameworks on top of .dart.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with .dart.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + +See [Integrations](/docs/integrations) to integrate with a specific package. + +## LangChain Expression Language (LCEL) + +LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL: + +- **First-class streaming support:** When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens. +- **Optimized concurrent execution:** Whenever your LCEL chains have steps that can be executed concurrently (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency. +- **Retries and fallbacks:** Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. +- **Access intermediate results:** For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. + +### Runnable interface + +To make it as easy as possible to create custom chains, LangChain provides a `Runnable` interface that most components implement, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. + +This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. The standard interface includes: + +- `invoke`: call the chain on an input and return the output. +- `stream`: call the chain on an input and stream the output. +- `batch`: call the chain on a list of inputs and return a list of outputs. + +The type of the input and output varies by component: + +| Component | Input Type | Output Type | +|-----------------------------|------------------------|------------------------| +| `PromptTemplate` | `Map` | `PromptValue` | +| `ChatMessagePromptTemplate` | `Map` | `PromptValue` | +| `LLM` | `PromptValue` | `LLMResult` | +| `ChatModel` | `PromptValue` | `ChatResult` | +| `OutputParser` | Any object | Parser output type | +| `Retriever` | `String` | `List` | +| `DocumentTransformer` | `List` | `List` | +| `Tool` | `Map` | `String` | +| `Chain` | `Map` | `Map` | + +## Components + +### Chat models +Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). +These are traditionally newer models (older models are generally `LLMs`, see below). +Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. + +Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs. + +When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model. + +LangChain does not host any Chat Models, rather we rely on third party integrations. + +We have some standardized parameters when constructing ChatModels: +- `model`: the name of the model +- `temperature`: the sampling temperature +- `timeout`: request timeout +- `maxTokens`: max tokens to generate +- `apiKey`: API key for the model provider +- `baseUrl`: endpoint to send requests to + +Some important things to note: +- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these. +- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``. + +ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model. + +### LLMs +:::caution +Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models), +even for non-chat use cases. + +You are probably looking for [the section above instead](/docs/concepts/#chat-models). +::: + +Language models that takes a string as input and returns a string. +These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above). + +Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. +This gives them the same interface as [Chat Models](/docs/concepts/#chat-models). +When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. + +LangChain.dart does not host any LLMs, rather we rely on third party integrations. See (/docs/integrations) + + + +### Messages +Some language models take a list of messages as input and return a message. + +LangChain provides several objects to easily distinguish between different roles: +#### HumanChatMessage +This represents a message from the user. + +#### AIChatMessage +This represents a message from the model. + +#### SystemChatMessage +This represents a system message, which tells the model how to behave. Not every model provider supports this. + +#### FunctionChatMessage / ToolChatMessage +These represent a decision from an language model to call a tool. They're a subclass of a AIChatMessage. FunctionChatMessage is a legacy message type corresponding to OpenAI's legacy function-calling API. + +### Prompt Templates + +Most LLM applications do not pass user input directly into an `LLM`. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. + +In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it would be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions. + +`PromptTemplates` help with exactly this! They bundle up all the logic for going from user input into a fully formatted prompt. This can start off very simple - for example, a prompt to produce the above string would just be: + +```dart +final prompt = PromptTemplate.fromTemplate( + 'What is a good name for a company that makes {product}?', +); +final res = prompt.format({'product': 'colorful socks'}); +print(res); +// 'What is a good name for a company that makes colorful socks?' +``` + +However, the advantages of using these over raw string formatting are several. You can "partial" out variables - e.g. you can format only some of the variables at a time. You can compose them together, easily combining different templates into a single prompt. + +For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). + +`PromptTemplates` can also be used to produce a list of messages. In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc) Here, what happens most often is a `ChatPromptTemplate` is a list of `ChatMessagePromptTemplates`. Each `ChatMessagePromptTemplate` contains instructions for how to format that `ChatMessage` - its role, and then also its content. Let's take a look at this below: + +```dart +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +const humanTemplate = '{text}'; + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, template), + (ChatMessageType.human, humanTemplate), +]); + +final res = chatPrompt.formatMessages({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// [ +// SystemChatMessage(content='You are a helpful assistant that translates English to French.'), +// HumanChatMessage(content='I love programming.') +// ] +``` + +`ChatPromptTemplates` can also be constructed in other ways - For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). + +### Output parsers + + +:::note + +The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. +More and more models are supporting function (or tool) calling, which handles this automatically. +It is recommended to use function/tool calling rather than output parsing. +See documentation for that [here](/docs/concepts/#function-tool-calling). + +::: +`OutputParsers` convert the raw output of an LLM into a format that can be used downstream. There are few main type of `OutputParsers`, including: + +- Convert text from LLM -> structured information (e.g. JSON). +- Convert a `ChatMessage` into just a string. +- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string. + +For full information on this, see the section on [output parsers](/docs/how_to/#output-parsers). + +### Chat history +Most LLM applications have a conversational interface. An essential component of a conversation is being able to refer to information introduced earlier in the conversation. At bare minimum, a conversational system should be able to access some window of past messages directly. + +The concept of ChatHistory refers to a class in LangChain which can be used to wrap an arbitrary chain. This ChatHistory will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database. Future interactions will then load those messages and pass them into the chain as part of the input. + +### Documents +A Document object in LangChain contains information about some data. It has two attributes: +- pageContent: `String` - The content of this document. +- metadata: `Map` - Arbitrary metadata associated with this document. Can track the document id, file name, etc. + +### Document loaders +Use document loaders to load data from a source as `Document`'s. For example, there are document loaders +for loading a simple .txt file, for loading the text contents of any web page, +or even for loading a transcript of a YouTube video. + +Document loaders expose two methods: + +- `lazyLoad()`: returns a `Stream` of `Document`'s. This is useful for loading + large amounts of data, as it allows you to process each `Document` as it is + loaded, rather than waiting for the entire data set to be loaded in memory. +- `load()`: returns a list of `Document`'s. Under the hood, this method calls + `lazyLoad()` and collects the results into a list. Use this method only with + small data sets. + +The simplest loader reads in a file as text and places it all into one +`Document`. + +```dart + +const filePath = 'example.txt'; +const loader = TextLoader(filePath); +final docs = await loader.load(); +``` + +### Text splitters +Once you've loaded documents, you'll often want to transform them to better suit +your application. The simplest example is you may want to split a long document +into smaller chunks that can fit into your model's context window. LangChain has +a number of built-in document transformers that make it easy to split, combine, +filter, and otherwise manipulate documents. + +## Text splitters + +When you want to deal with long pieces of text, it is necessary to split up that +text into chunks. As simple as this sounds, there is a lot of potential +complexity here. Ideally, you want to keep the semantically related pieces of +text together. What "semantically related" means could depend on the type of +text. This tutorial showcases several ways to do that. + +At a high level, text splitters work as following: + +1. Split the text up into small, semantically meaningful chunks (often + sentences). +2. Start combining these small chunks into a larger chunk until you reach a + certain size (as measured by some function). +3. Once you reach that size, make that chunk its own piece of text and then + start creating a new chunk of text with some overlap (to keep context between + chunks). + +That means there are two different axes along which you can customize your text +splitter: + +1. How the text is split. +2. How the chunk size is measured. + +The most basic text splitter is the `CharacterTextSplitter`. This splits based +on characters (by default `\n\n`) and measure chunk length by number of +characters. + +The default recommended text splitter is the `RecursiveCharacterTextSplitter`. This text splitter +takes a list of characters. It tries to create chunks based on splitting on the first character, +but if any chunks are too large it then moves onto the next character, and so forth. By default +the characters it tries to split on are `["\n\n", "\n", " ", ""]`. + +In addition to controlling which characters you can split on, you can also +control a few other things: + +- `lengthFunction`: how the length of chunks is calculated. Defaults to just + counting number of characters, but it's pretty common to pass a token counter + here. +- `chunkSize`: the maximum size of your chunks (as measured by the length + function). +- `chunkOverlap`: the maximum overlap between chunks. It can be nice to have + some overlap to maintain some continuity between chunks (eg do a sliding + window). +- `addStartIndex`: whether to include the starting position of each chunk within + the original document in the metadata. + +```dart +const filePath = 'state_of_the_union.txt'; +const loader = TextLoader(filePath); +final documents = await loader.load(); +const textSplitter = RecursiveCharacterTextSplitter( + chunkSize: 800, + chunkOverlap: 0, +); +final docs = textSplitter.splitDocuments(documents); +``` + +### Embedding models +Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text. By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning. These natural language search capabilities underpin many of the context retrieval where we provide an LLM with relevant data it needs to effectively respond to a query. + +The Embeddings class is a class designed for interfacing with text embedding +models. There are lots of embedding model providers (OpenAI, Cohere, Hugging +Face, etc) - this class is designed to provide a standard interface for all of +them. + +Embeddings create a vector representation of a piece of text. This is useful +because it means we can think about text in the vector space, and do things like +semantic search where we look for pieces of text that are most similar in the +vector space. + +The base Embeddings class in LangChain exposes two methods: one for embedding +documents and one for embedding a query. The former takes as input multiple +texts, while the latter takes a single text. The reason for having these as two +separate methods is that some embedding providers have different embedding +methods for documents (to be searched over) vs queries (the search query +itself). + +For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models). + +### Vector stores +One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. + +Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before +similarity search, allowing you more control over returned documents. + +Vector stores can be converted to the retriever interface by doing: + +For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores). + +### Retrievers +A retriever is an interface that returns documents given an unstructured query. +It is more general than a vector store. A retriever does not need to be able to +store documents, only to return (or retrieve) it. Vector stores can be used as +the backbone of a retriever, but there are other types of retrievers as well. + +Retrievers accept a string query as input and return a list of Document's as output. + +The public API of the `BaseRetriever` class in LangChain is as follows: + +```dart +abstract interface class BaseRetriever { + Future> getRelevantDocuments(final String query); +} +``` + +For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers). + +### Tools +Tools are utilities designed to be called by a model. Their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. Tools are needed whenever you want a model to control parts of your code or call out to external APIs. +A tool consists of: + +1. The name of the tool. +2. A description of what the tool does. +3. A JSON schema defining the inputs to the tool. +4. A function (and, optionally, an async variant of the function). + +When a tool is bound to a model, the name, description and JSON schema are provided as context to the model. +Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs. + +To define a tool in dart, we use the `ToolSpec` class. +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; +final model = ChatOpenAI(apiKey: openaiApiKey); + +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'Tell me a joke about {foo}', +); + +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline for the joke', + }, + }, + 'required': ['setup', 'punchline'], + }, +); + +final chain = promptTemplate | + model.bind( + ChatOpenAIOptions( + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + +final res = await chain.invoke({'foo': 'bears'}); +print(res); +// ChatResult{ +// id: chatcmpl-9LBPyaZcFMgjmOvkD0JJKAyA4Cihb, +// output: AIChatMessage{ +// content: , +// toolCalls: [ +// AIChatMessageToolCall{ +// id: call_JIhyfu6jdIXaDHfYzbBwCKdb, +// name: joke, +// argumentsRaw: {"setup":"Why don't bears like fast food?","punchline":"Because they can't catch it!"}, +// arguments: { +// setup: Why don't bears like fast food?, +// punchline: Because they can't catch it! +// }, +// } +// ], +// }, +// finishReason: FinishReason.stop, +// metadata: { +// model: gpt-4o-mini, +// created: 1714835806, +// system_fingerprint: fp_3b956da36b +// }, +// usage: LanguageModelUsage{ +// promptTokens: 77, +// responseTokens: 24, +// totalTokens: 101 +// }, +// streaming: false +// } +``` + +When designing tools to be used by a model, it is important to keep in mind that: + +- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models. +- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering. +- Simple, narrowly scoped tools are easier for models to use than complex tools. + +#### Related + +For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools). + +To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/). + +### Agents +By themselves, language models can't take actions - they just output text. +A big use case for LangChain is creating agents. Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be. +The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish. + +### Callbacks +TODO: + + +### Techniques + +#### Streaming + +#### Function/tool calling + +#### Structured Output +LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide range of inputs, but for some use-cases, it can be useful to constrain the LLM's output to a specific format or structure. This is referred to as structured output. + + +#### Few-shot prompting + +#### Retrieval + +#### Text splitting + +#### Evaluation + +#### Tracing +##### \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/anthropic.md b/docs_v2/docs/05-integrations/anthropic.md new file mode 100644 index 00000000..b607ddc7 --- /dev/null +++ b/docs_v2/docs/05-integrations/anthropic.md @@ -0,0 +1,145 @@ +# ChatAnthropic + +Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). + +## Setup + +The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. + +The following models are available: +- `claude-3-5-sonnet-20240620` +- `claude-3-haiku-20240307` +- `claude-3-opus-20240229` +- `claude-3-sonnet-20240229` +- `claude-2.0` +- `claude-2.1` + +Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); + +print(res.output.content); +// -> 'The fruit in the image is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 123 +// 456789101 +// 112131415161 +// 718192021222 +// 324252627282 +// 930 +``` + +## Tool calling + +`ChatAnthropic` supports tool calling. + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + tools: [tool], + ), +); + +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs_v2/docs/05-integrations/anyscale.md b/docs_v2/docs/05-integrations/anyscale.md new file mode 100644 index 00000000..9a71c30e --- /dev/null +++ b/docs_v2/docs/05-integrations/anyscale.md @@ -0,0 +1,84 @@ +# Anyscale + +[Anyscale](https://www.anyscale.com/) offers a unified OpenAI-compatible API for a broad range of [models](https://docs.endpoints.anyscale.com/guides/models/#chat-models) running serverless or on your own dedicated instances. + +It also allows to fine-tune models on your own data or train new models from scratch. + +You can consume Anyscale API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://api.endpoints.anyscale.com/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'meta-llama/Llama-2-70b-chat-hf', + ), +); +``` + +## Invoke + +```dart +final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'meta-llama/Llama-2-70b-chat-hf', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> "I love programming" se traduit en français sous la forme "J'aime passionnément la programmation" +``` + +## Stream + +```dart +final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 1 +// 2 +// 3 +// ... +// 9 +``` diff --git a/docs_v2/docs/05-integrations/firebase_vertex_ai.md b/docs_v2/docs/05-integrations/firebase_vertex_ai.md new file mode 100644 index 00000000..cd33daa2 --- /dev/null +++ b/docs_v2/docs/05-integrations/firebase_vertex_ai.md @@ -0,0 +1,190 @@ +# Vertex AI for Firebase + +The [Vertex AI Gemini API](https://firebase.google.com/docs/vertex-ai) gives you access to the latest generative AI models from Google: the Gemini models. If you need to call the Vertex AI Gemini API directly from your mobile or web app you can use the `ChatFirebaseVertexAI` class instead of the [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) class which is designed to be used on the server-side. + +`ChatFirebaseVertexAI` is built specifically for use with mobile and web apps, offering security options against unauthorized clients as well as integrations with other Firebase services. + +## Key capabilities + +- **Multimodal input**: The Gemini models are multimodal, so prompts sent to the Gemini API can include text, images (even PDFs), video, and audio. +- **Growing suite of capabilities**: You can call the Gemini API directly from your mobile or web app, build an AI chat experience, use function calling, and more. +- **Security for production apps**: Use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. +- **Robust infrastructure**: Take advantage of scalable infrastructure that's built for use with mobile and web apps, like managing structured data with Firebase database offerings (like Cloud Firestore) and dynamically setting run-time configurations with Firebase Remote Config. + +## Setup + +### 1. Set up a Firebase project + +Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/get-started?platform=flutter) for the latest information on how to set up the Vertex AI for Firebase in your Firebase project. + +In summary, you need to: +1. Upgrade your billing plan to the Blaze pay-as-you-go pricing plan. +2. Enable the required APIs (`aiplatform.googleapis.com` and `firebaseml.googleapis.com`). +3. Integrate the Firebase SDK into your app (if you haven't already). +4. Recommended: Enable Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. + +### 2. Add the LangChain.dart Google package + +Add the `langchain_google` package to your `pubspec.yaml` file. + +```yaml +dependencies: + langchain: {version} + langchain_google: {version} +``` + +Internally, `langchain_google` uses the [`firebase_vertexai`](https://pub.dev/packages/firebase_vertexai) SDK to interact with the Vertex AI for Firebase API. + +### 3. Initialize your Firebase app + +```yaml +await Firebase.initializeApp(); +``` + +### 4. Call the Vertex AI Gemini API + +```dart +final chatModel = ChatFirebaseVertexAI(); +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +> Check out the [sample project](https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase/example) to see a complete project using Vertex AI for Firebase. + +## Available models + +The following models are available: +- `gemini-1.5-flash`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.5-pro`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 + +Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. + +## Multimodal support + +```dart +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + ), +); +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); +print(res.output.content); +// -> 'That is an apple.' +``` + +## Streaming + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 1 +// 2345678910111213 +// 1415161718192021 +// 222324252627282930 +``` + +## Tool calling + +`ChatGoogleGenerativeAI` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` + +## Prevent abuse with Firebase App Check + +You can use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/app-check) for more information. + +## Locations + +When initializing the Vertex AI service, you can optionally specify a location in which to run the service and access a model. If you don't specify a location, the default is us-central1. See the list of [available locations](https://firebase.google.com/docs/vertex-ai/locations?platform=flutter#available-locations). + +```dart +final chatModel = ChatFirebaseVertexAI( + location: 'us-central1', +); +``` + +## Alternatives + +- [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md): Use this class to call the Vertex AI Gemini API from the server-side. +- [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md): Use this class to call the "Google AI" version of the Gemini API that provides free-of-charge access (within limits and where available). This API is not intended for production use but for experimentation and prototyping. After you're familiar with how a Gemini API works, migrate to the Vertex AI for Firebase, which have many additional features important for mobile and web apps, like protecting the API from abuse using Firebase App Check. diff --git a/docs_v2/docs/05-integrations/gcp_vertex_ai.md b/docs_v2/docs/05-integrations/gcp_vertex_ai.md new file mode 100644 index 00000000..5417aab5 --- /dev/null +++ b/docs_v2/docs/05-integrations/gcp_vertex_ai.md @@ -0,0 +1,116 @@ +# GCP Chat Vertex AI + +Wrapper around [GCP Vertex AI chat models](https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts) API (aka PaLM API for chat). + +## Set up your Google Cloud Platform project + +1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). +2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). +3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). +4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations). + +### Authentication + +To create an instance of `ChatVertexAI` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package. + +To create an instance of `VertexAI` you need to provide an [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) instance. + +There are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details. + +Example using a service account JSON: + +```dart +final serviceAccountCredentials = ServiceAccountCredentials.fromJson( + json.decode(serviceAccountJson), +); +final authClient = await clientViaServiceAccount( + serviceAccountCredentials, + [ChatVertexAI.cloudPlatformScope], +); +final chatVertexAi = ChatVertexAI( + httpClient: authClient, + project: 'your-project-id', +); +``` + +The service account should have the following [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions): +- `aiplatform.endpoints.predict` + +The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is: +- `https://www.googleapis.com/auth/cloud-platform` (you can use the constant `ChatVertexAI.cloudPlatformScope`) + +See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control + +### Available models + +- `chat-bison` + * Max input token: 4096 + * Max output tokens: 1024 + * Training data: Up to Feb 2023 + * Max turns: 2500 +- `chat-bison-32k` + * Max input and output tokens combined: 32k + * Training data: Up to Aug 2023 + * Max turns: 2500 + +The previous list of models may not be exhaustive or up-to-date. Check out the [Vertex AI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models) for the latest list of available models. + +### Model options + +You can define default options to use when calling the model (e.g. temperature, stop sequences, etc. ) using the `defaultOptions` parameter. + +The default options can be overridden when calling the model using the `options` parameter. + +Example: +```dart +final chatModel = ChatVertexAI( + httpClient: authClient, + project: 'your-project-id', + defaultOptions: ChatVertexAIOptions( + temperature: 0.9, + ), +); +final result = await chatModel( + [ChatMessage.humanText('Hello')], + options: ChatVertexAIOptions( + temperature: 0.5, + ), +); +``` + +### Full example + +```dart +import 'package:langchain/langchain.dart'; +import 'package:langchain_google/langchain_google.dart'; + +void main() async { + final chat = ChatVertexAI( + httpClient: await _getAuthHttpClient(), + project: _getProjectId(), + defaultOptions: const ChatVertexAIOptions( + temperature: 0, + ), + ); + while (true) { + stdout.write('> '); + final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? ''); + final aiMsg = await chat([usrMsg]); + print(aiMsg.content); + } +} + +Future _getAuthHttpClient() async { + final serviceAccountCredentials = ServiceAccountCredentials.fromJson( + json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!), + ); + return clientViaServiceAccount( + serviceAccountCredentials, + [VertexAI.cloudPlatformScope], + ); +} + +String _getProjectId() { + return Platform.environment['VERTEX_AI_PROJECT_ID']!; +} +``` diff --git a/docs_v2/docs/05-integrations/googleai.md b/docs_v2/docs/05-integrations/googleai.md new file mode 100644 index 00000000..033c7672 --- /dev/null +++ b/docs_v2/docs/05-integrations/googleai.md @@ -0,0 +1,149 @@ +# ChatGoogleGenerativeAI + +Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemini API). + +## Setup + +To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). + +The following models are available: +- `gemini-1.5-flash`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.5-pro`: text / image -> text model + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 + +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); +print(res.output.content); +// -> 'That is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: const ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 1 +// 2345678910111213 +// 1415161718192021 +// 222324252627282930 +``` + +## Tool calling + +`ChatGoogleGenerativeAI` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatGoogleGenerativeAI( + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs_v2/docs/05-integrations/index.mdx b/docs_v2/docs/05-integrations/index.mdx new file mode 100644 index 00000000..35f38dfc --- /dev/null +++ b/docs_v2/docs/05-integrations/index.mdx @@ -0,0 +1,56 @@ +--- +sidebar_position: 0 +index: auto +--- +# Integrations + +> If you'd like to write your own integration, see Extending Langchain. + +The following table contains the list of existing Langchain.dart integration packages. To install a specific integration, see [Installing Langchain components](/docs/03-how_to/01-installation.md) + + +

    + +

    + +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | + +Functionality provided by each integration package: + +| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | +|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| +| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | +| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | + +The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: + +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | +| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | + diff --git a/docs_v2/docs/05-integrations/mistralai.md b/docs_v2/docs/05-integrations/mistralai.md new file mode 100644 index 00000000..14f21fb1 --- /dev/null +++ b/docs_v2/docs/05-integrations/mistralai.md @@ -0,0 +1,76 @@ +# ChatMistralAI + +Wrapper around [Mistral AI](https://mistral.ai/) Chat Completions API. + +Mistral AI brings the strongest open generative models to the developers, along with efficient ways to deploy and customise them for production. + +> Note: Mistral AI API is currently in closed beta. You can request access [here](https://console.mistral.ai). + +## Setup + +To use `ChatMistralAI` you need to have a Mistral AI account and an API key. You can get one [here](https://console.mistral.ai/users/). + +The following models are available at the moment: +- `mistral-tiny`: Mistral 7B Instruct v0.2 (a minor release of Mistral 7B Instruct). It only works in English and obtains 7.6 on MT-Bench. +- `mistral-small`: Mixtral 8x7B. It masters English/French/Italian/German/Spanish and code and obtains 8.3 on MT-Bench. +- `mistral-medium`: a prototype model, that is currently among the top serviced models available based on standard benchmarks. It masters English/French/Italian/German/Spanish and code and obtains a score of 8.6 on MT-Bench. + +## Usage + +```dart +final chatModel = ChatMistralAI( + apiKey: 'apiKey', + defaultOptions: ChatMistralAIOptions( + model: 'mistral-small', + temperature: 0, + ), +); + +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); +const humanTemplate = '{text}'; +final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); +final chatPrompt = ChatPromptTemplate.fromPromptMessages( + [systemMessagePrompt, humanMessagePrompt], +); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime la programmation.' +``` + +## Streaming + +```dart +final promptTemplate = ChatPromptTemplate.fromPromptMessages([ + SystemChatMessagePromptTemplate.fromTemplate( + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + HumanChatMessagePromptTemplate.fromTemplate( + 'List the numbers from 1 to {max_num}', + ), +]); +final chat = ChatMistralAI( + apiKey: 'apiKey', + defaultOptions: ChatMistralAIOptions( + model: 'mistral-medium', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 12 +// 345 +// 67 +// 89 +``` diff --git a/docs_v2/docs/05-integrations/ollama.md b/docs_v2/docs/05-integrations/ollama.md new file mode 100644 index 00000000..e6cc5907 --- /dev/null +++ b/docs_v2/docs/05-integrations/ollama.md @@ -0,0 +1,462 @@ +# ChatOllama + +Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. + +Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. + +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. + +## Setup + +Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: + +1. Download and install [Ollama](https://ollama.ai) +2. Fetch a model via `ollama pull ` + * e.g., for Llama 3: `ollama pull llama3.1` +3. Instantiate the `ChatOllama` class with the downloaded model. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + ), +); +``` + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). + +### Ollama base URL + +By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + baseUrl: 'https://your-remote-server-where-ollama-is-running.com', + model: 'llama3.1', + ), +); +``` + +## Usage + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'La traduction est : "J'aime le programming.' +``` + +### Streaming + +Ollama supports streaming the output as the model generates it. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456 +// 789 +``` + +### Multimodal support + +Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). + +You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llava', + temperature: 0, + ), +); +final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), +); +final res = await chatModel.invoke(PromptValue.chat([prompt])); +print(res.output.content); +// -> 'An Apple' +``` + +### Tool calling + +`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). + +**Notes:** +- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +As you can see, `ChatOllama` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). + +### JSON mode + +You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), + (ChatMessageType.human, '{question}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), +); + +final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + +final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', +); +print(res); +// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +``` + +## Examples + +### Answering questions with data from an external API + +Imagine you have an API that provides flight times between two cities: + +```dart +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} +``` + +Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. + +```dart +const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [getFlightTimesTool], + ), +); + +final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), +]; + +// First API call: Send the query and function description to the model +final response = await chatModel.invoke(PromptValue.chat(messages)); + +messages.add(response.output); + +// Check if the model decided to use the provided function +if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; +} + +// Process function calls made by the model +for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, + ), + ); +} + +// Second API call: Get final response from the model +final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); +print(finalResponse.output.content); +// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. +``` + +### Extracting structured data with tools + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + options: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` + +### RAG (Retrieval-Augmented Generation) pipeline + +We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. + +```dart +// 1. Create a vector store and add documents to it +final vectorStore = MemoryVectorStore( + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), +); +await vectorStore.addDocuments( + documents: [ + Document(pageContent: 'LangChain was created by Harrison'), + Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), + ], +); + +// 2. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), + (ChatMessageType.human, '{question}'), +]); + +// 3. Define the model to use and the vector store retriever +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), +); +final retriever = vectorStore.asRetriever( + defaultOptions: VectorStoreRetrieverOptions( + searchType: VectorStoreSimilaritySearch(k: 1), + ), +); + +// 4. Create a Runnable that combines the retrieved documents into a single string +final docCombiner = Runnable.mapInput, String>((docs) { + return docs.map((final d) => d.pageContent).join('\n'); +}); + +// 4. Define the RAG pipeline +final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), +}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); + +// 5. Run the pipeline +final res = await chain.invoke('Who created LangChain.dart?'); +print(res); +// Based on the context provided, David created LangChain.dart. +``` diff --git a/docs_v2/docs/05-integrations/open_router.md b/docs_v2/docs/05-integrations/open_router.md new file mode 100644 index 00000000..c2d63555 --- /dev/null +++ b/docs_v2/docs/05-integrations/open_router.md @@ -0,0 +1,157 @@ +# OpenRouter + +[OpenRouter](https://openrouter.ai/) offers a unified OpenAI-compatible API for a broad range of [models](https://openrouter.ai/models). + +You can also let users pay for their own models via their [OAuth PKCE](https://openrouter.ai/docs#oauth) flow. + +You can consume OpenRouter API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://openrouter.ai/api/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); +``` + +OpenRouter allows you to specify an optional `HTTP-Referer` header to identify your app and make it discoverable to users on openrouter.ai. You can also include an optional `X-Title` header to set or modify the title of your app. + +```dart + final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + headers: { + 'HTTP-Referer': 'com.myapp', + 'X-Title': 'OpenRouterTest', + }, + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); +``` + +## Invoke + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime la programmation.' +``` + +## Stream + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456789 +``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/docs_v2/docs/05-integrations/openai.md b/docs_v2/docs/05-integrations/openai.md new file mode 100644 index 00000000..6b3ccbbc --- /dev/null +++ b/docs_v2/docs/05-integrations/openai.md @@ -0,0 +1,372 @@ +# OpenAI + +This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). + +OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). + +> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. + +## Setup + +To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. + +### Credentials + +Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). + +### Installation + +The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: + +```yaml +dart pub add langchain_openai +``` + +## Usage + +### Instantiation + +Now we can instantiate our model object and generate chat completions: + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o' + temperature: 0, + // ...other options + ), +); +``` + +If you are using a proxy, you can override the base URL, headers, and other options: + +```dart +final client = ChatOpenAI( + baseUrl: 'https://my-proxy.com', + headers: {'x-my-proxy-header': 'value'}, +); +``` + +### Invocation + +Now you can generate completions by calling the `invoke` method: + +```dart +final messages = [ + ChatMessage.system('You are a helpful assistant that translates English to French.'), + ChatMessage.humanText('I love programming.'), +]; +final prompt = PromptValue.chat(messages); +final res = await llm.invoke(prompt); +// -> 'J'adore la programmation.' +``` + +### Chaining + +We can chain our model with a prompt template or output parser to create a more complex pipeline: + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore la programmation.' +``` + +### Streaming + +OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chat = ChatOpenAI(apiKey: openaiApiKey); + +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456 +// 789 +``` + +### Multimodal support + +OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. + +You can send the image as a base64-encoded string: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image + ), + ]), + ), +]); +``` + +Or you can send the URL where the image is hosted: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', + ), + ]), + ), +]); +``` + +### Tool calling + +OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. + + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'gpt-4o' + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. + +You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: + +```dart +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` + +### Structured Outputs + +[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), + ), + ), +); + +final res = await chatModel.invoke(prompt); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. + +### JSON mode + +When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + "Extract the 'name' and 'origin' of any companies mentioned in the " + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonObject, + ), +); +final chain = llm.pipe(JsonOutputParser()); +final res = await chain.invoke(prompt); +print(res); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +### Fine-tuning + +You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. + +This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: + +```dart +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' + ), +); +``` + +## API reference + +For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs_v2/docs/05-integrations/prem.md b/docs_v2/docs/05-integrations/prem.md new file mode 100644 index 00000000..65258f7c --- /dev/null +++ b/docs_v2/docs/05-integrations/prem.md @@ -0,0 +1,24 @@ +# Prem App + +You can easily run local models using [Prem app](https://www.premai.io/#PremApp). +It creates a local server that exposes a REST API with the same interface as +the OpenAI API. + +```dart +const localUrl = 'http://localhost:8000'; // Check Prem app for the actual URL +final chat = ChatOpenAI(baseUrl: localUrl); + +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); +const humanTemplate = '{text}'; +final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); + +final chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]); +final formattedPrompt = chatPrompt.formatPrompt({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.' +}).toChatMessages(); + +final output = chat.predictMessages(formattedPrompt); +``` diff --git a/docs_v2/docs/05-integrations/together_ai.md b/docs_v2/docs/05-integrations/together_ai.md new file mode 100644 index 00000000..10567455 --- /dev/null +++ b/docs_v2/docs/05-integrations/together_ai.md @@ -0,0 +1,84 @@ +# Together AI + +[Together AI](https://www.together.ai) offers a unified OpenAI-compatible API for a broad range of [models](https://api.together.xyz/playground) running serverless or on your own dedicated instances. + +It also allows to fine-tune models on your own data or train new models from scratch. + +You can consume Together AI API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://api.together.xyz/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); +``` + +## Invoke + +```dart +final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime programmer' +``` + +## Stream + +```dart +final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 1 +// 2 +// 3 +// ... +// 9 +``` diff --git a/docs_v2/docs/05-integrations/tools/index.mdx b/docs_v2/docs/05-integrations/tools/index.mdx new file mode 100644 index 00000000..211b41de --- /dev/null +++ b/docs_v2/docs/05-integrations/tools/index.mdx @@ -0,0 +1,5 @@ +--- +sidebar_position: 0 +index: auto +--- +# Tools \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/tools/tavily_search.md b/docs_v2/docs/05-integrations/tools/tavily_search.md new file mode 100644 index 00000000..2f3d461d --- /dev/null +++ b/docs_v2/docs/05-integrations/tools/tavily_search.md @@ -0,0 +1,13 @@ +# Tavily Search + +[Tavily's Search API](https://tavily.com) is a search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed. + +## Setup + +The integration lives in the `langchain-community` package. We also need to install the `tavily-python` package itself. + +```bash +dart pub add langchain langchain_community +``` + +We also need to set our Tavily API key. \ No newline at end of file diff --git a/docs_v2/docusaurus.config.js b/docs_v2/docusaurus.config.js new file mode 100644 index 00000000..1376cddc --- /dev/null +++ b/docs_v2/docusaurus.config.js @@ -0,0 +1,130 @@ +// @ts-check +// `@type` JSDoc annotations allow editor autocompletion and type checking +// (when paired with `@ts-check`). +// There are various equivalent ways to declare your Docusaurus config. +// See: https://docusaurus.io/docs/api/docusaurus-config + +import { themes as prismThemes } from 'prism-react-renderer'; + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: 'LangChain.dart', + tagline: 'LangChain.dart Docs', + favicon: 'img/favicon.ico', + + // Set the production url of your site here + url: 'https://beta.langchaindart.dev/', + // Set the // pathname under which your site is served + // For GitHub pages deployment, it is often '//' + baseUrl: '/', + + // GitHub pages deployment config. + // If you aren't using GitHub pages, you don't need these. + organizationName: 'davidmigloz', // Usually your GitHub org/user name. + projectName: 'langchain_dart', // Usually your repo name. + + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', + + // Even if you don't use internationalization, you can use this field to set + // useful metadata like html lang. For example, if your site is Chinese, you + // may want to replace "en" with "zh-Hans". + i18n: { + defaultLocale: 'en', + locales: ['en'], + }, + + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + sidebarPath: './sidebars.js', + // Please change this to your repo. + // Remove this to remove the "edit this page" links. + editUrl: + 'https://github.com/davidmigloz/langchain_dart/tree/main/docs_v2/', + }, + theme: { + customCss: './src/css/custom.css', + }, + }), + ], + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + // Replace with your project's social card + image: 'img/langchain.dart.png', + navbar: { + title: 'LangChain.dart', + logo: { + alt: 'LangChain Logo', + src: 'img/favicon.ico', + }, + items: [ + { + to: '/docs/integrations', + position: 'left', + label: 'Integrations', + }, + {to: 'https://blog.langchaindart.dev/blog', label: 'Blog', position: 'left'}, + { + href: 'https://github.com/davidmigloz/langchain_dart/', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Docs', + items: [ + { + label: 'Tutorial', + to: '/docs/intro', + }, + ], + }, + { + title: 'Community', + items: [ + { + label: 'Discord', + href: 'https://discord.gg/x4qbhqecVR', + }, + { + label: 'pub.dev', + href: 'https://pub.dev/packages/langchain', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'Blog', + to: '/blog', + }, + { + label: 'GitHub', + href: 'https://github.com/davidmigloz/langchain_dart', + }, + ], + }, + ], + copyright: `Made with 💙 by the LangChain.dart Community`, + }, + prism: { + theme: prismThemes.vsLight, + darkTheme: prismThemes.vsDark, + additionalLanguages: ['yaml','dart','bash'], + }, + }), +}; + +export default config; diff --git a/docs_v2/firebase.json b/docs_v2/firebase.json new file mode 100644 index 00000000..340ed5b7 --- /dev/null +++ b/docs_v2/firebase.json @@ -0,0 +1,16 @@ +{ + "hosting": { + "public": "build", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ] + } +} diff --git a/docs_v2/package-lock.json b/docs_v2/package-lock.json new file mode 100644 index 00000000..21e6a165 --- /dev/null +++ b/docs_v2/package-lock.json @@ -0,0 +1,14683 @@ +{ + "name": "langchain-dart", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "langchain-dart", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/types": "^3.5.2" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", + "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", + "@algolia/autocomplete-shared": "1.9.3" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", + "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", + "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", + "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/cache-browser-local-storage": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz", + "integrity": "sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==", + "dependencies": { + "@algolia/cache-common": "4.24.0" + } + }, + "node_modules/@algolia/cache-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.24.0.tgz", + "integrity": "sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==" + }, + "node_modules/@algolia/cache-in-memory": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz", + "integrity": "sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==", + "dependencies": { + "@algolia/cache-common": "4.24.0" + } + }, + "node_modules/@algolia/client-account": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.24.0.tgz", + "integrity": "sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", + "integrity": "sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.5.3.tgz", + "integrity": "sha512-3rdSdreBL2LGYu4DWmUGlMhaGy1vy36Xp42LdbTFsW/y3bhW5viptMHI5A3PKT0hPEMZUn+te1iM/EWvLUuVGQ==", + "peer": true, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", + "integrity": "sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-personalization/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.5.3.tgz", + "integrity": "sha512-qrokD+uoNxchbiF9aP8niQd/9SZ6BgYg4WaesFaubHhr9DFvwGm4IePEMha8vQcc3fSsY6uL+gOtKB3J6RF0NQ==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3", + "@algolia/requester-browser-xhr": "5.5.3", + "@algolia/requester-fetch": "5.5.3", + "@algolia/requester-node-http": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" + }, + "node_modules/@algolia/logger-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.24.0.tgz", + "integrity": "sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==" + }, + "node_modules/@algolia/logger-console": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.24.0.tgz", + "integrity": "sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==", + "dependencies": { + "@algolia/logger-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.24.0.tgz", + "integrity": "sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.5.3.tgz", + "integrity": "sha512-LsfUPokiXEpvlYF7SwNjyyjkUX7IoW7oIhH6WkDUD4PCfEZkFbVplGQA0UrCiWOAbpb25P7mmP6+ldwjwqW6Kg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", + "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==" + }, + "node_modules/@algolia/requester-fetch": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.5.3.tgz", + "integrity": "sha512-RKaliEFHtVeD/fMxwrApkcI6ZxR+mU6pZna29r3NwVMpCXTJWWtlMpQmbr1RHzUsaAlpfv9pfGJN4nYPE8XWEg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-node-http": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.5.3.tgz", + "integrity": "sha512-2wU+HlTVrVce7BMW2b3Gd62btk8B0jBbuKYYzu3OFeBD/aZa88eHABtjcjQCdw3x+wvkIPEc56UsZx9eHYLebg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/transporter": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.24.0.tgz", + "integrity": "sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==", + "dependencies": { + "@algolia/cache-common": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", + "dependencies": { + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.7.tgz", + "integrity": "sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.7.tgz", + "integrity": "sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helpers": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", + "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", + "dependencies": { + "@babel/types": "^7.24.7", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz", + "integrity": "sha512-BaDeOonYvhdKw+JoMVkAixAAJzG2jVPIwWoKBPdYuY9b452e2rPuI9QPYh3KpofZ3pW2akOmwZLOiOsHMiqRAg==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz", + "integrity": "sha512-xZeCVVdwb4MsDBkkyZ64tReWYrLRHlMN72vP7Bdm3OUOuyFZExhsHUUnuWnm2/XOlAJzR0LfPpB56WXZn0X/lA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.24.7.tgz", + "integrity": "sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==", + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.7.tgz", + "integrity": "sha512-kTkaDl7c9vO80zeX1rJxnuRpEsD5tA81yh11X1gQo+PhSti3JS+7qeZo9U4RHobKRiFPKaGK3svUAeb8D0Q7eg==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.24.7.tgz", + "integrity": "sha512-03TCmXy2FtXJEZfbXDTSqq1fRJArk7lX9DOFC/47VthYcxyIOx+eXQmdo6DOQvrbpIix+KfXwvuXdFDZHxt+rA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", + "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", + "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", + "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.7.tgz", + "integrity": "sha512-LGeMaf5JN4hAT471eJdBs/GK1DoYIJ5GCtZN/EsL6KUiiDZOvO/eKE11AMZJa2zP4zk4qe9V2O/hxAmkRc8p6w==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.7.tgz", + "integrity": "sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz", + "integrity": "sha512-jKiTsW2xmWwxT1ixIdfXUZp+P5yURx2suzLZr5Hi64rURpDYdMW0pv+Uf17EYk2Rd428Lx4tLsnjGJzYKDM/6A==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.24.7.tgz", + "integrity": "sha512-9pKLcTlZ92hNZMQfGCHImUpDOlAgkkpqalWEeftW5FBya75k8Li2ilerxkM/uBEj01iBZXcCIB/bwvDYgWyibA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-wrap-function": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.24.7.tgz", + "integrity": "sha512-qTAxxBM81VEyoAY0TtLrx1oAEJc09ZK67Q9ljQToqCnA+55eNwCORaxlKyu+rNfX86o8OXRUSNUnrtsAZXM9sg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz", + "integrity": "sha512-IO+DLT3LQUElMbpzlatRASEyQtfhSE0+m465v++3jyyXeBTBUjtVZg28/gHeV5mrTJqvEKhKroBGAvhW+qPHiQ==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", + "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", + "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.7.tgz", + "integrity": "sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.24.7.tgz", + "integrity": "sha512-N9JIYk3TD+1vq/wn77YnJOqMtfWhNewNE+DJV4puD2X7Ew9J4JvrzrFDfTfyv5EgEXVy9/Wt8QiOErzEmv5Ifw==", + "dependencies": { + "@babel/helper-function-name": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.7.tgz", + "integrity": "sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", + "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.7.tgz", + "integrity": "sha512-TiT1ss81W80eQsN+722OaeQMY/G4yTb4G9JrqeiDADs3N8lbPMGldWi9x8tyqCW5NLx1Jh2AvkE6r6QvEltMMQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.7.tgz", + "integrity": "sha512-unaQgZ/iRu/By6tsjMZzpeBZjChYfLYry6HrEXPoz3KmfF0sVBQ1l8zKMQ4xRGLWVsjuvB8nQfjNP/DcfEOCsg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz", + "integrity": "sha512-+izXIbke1T33mY4MSNnrqhPXDz01WYhEf3yF5NbnUtkiNnm+XBZJl3kNfoK6NKmYlz/D07+l2GWVK/QfDkNCuQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.7.tgz", + "integrity": "sha512-utA4HuR6F4Vvcr+o4DnjL8fCOlgRFGbeeBEGNg3ZTrLFw6VWG5XmUrvcQ0FjIYMU2ST4XcR2Wsp7t9qOAPnxMg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.7.tgz", + "integrity": "sha512-Ec3NRUMoi8gskrkBe3fNmEQfxDvY8bgfQpz6jlk/41kX9eUjvpyqWU7PBP/pLAvMaSQjbMNKJmvX57jP+M6bPg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.7.tgz", + "integrity": "sha512-hbX+lKKeUMGihnK8nvKqmXBInriT3GVjzXKFriV3YC6APGxMbP8RZNFwy91+hocLXq90Mta+HshoB31802bb8A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", + "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.7.tgz", + "integrity": "sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz", + "integrity": "sha512-Dt9LQs6iEY++gXUwY03DNFat5C2NbO48jj+j/bSAz6b3HgPs39qcPiYt77fDObIcFwj3/C2ICX9YMwGflUoSHQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.7.tgz", + "integrity": "sha512-o+iF77e3u7ZS4AoAuJvapz9Fm001PuD2V3Lp6OSE4FYQke+cSewYtnek+THqGRWyQloRCyvWL1OkyfNEl9vr/g==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz", + "integrity": "sha512-SQY01PcJfmQ+4Ash7NE+rpbLFbmqA2GPIgqzxfFTL4t1FKRq4zTms/7htKpoCUI9OcFYgzqfmCdH53s6/jn5fA==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz", + "integrity": "sha512-yO7RAz6EsVQDaBH18IDJcMB1HnrUn2FJ/Jslc/WtPPWcjhpUJXU/rjbwmluzp7v/ZzWcEhTMXELnnsz8djWDwQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.7.tgz", + "integrity": "sha512-Nd5CvgMbWc+oWzBsuaMcbwjJWAcp5qzrbg69SZdHSP7AMY0AbWFqFO0WTFCA1jxhMCwodRwvRec8k0QUbZk7RQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.7.tgz", + "integrity": "sha512-vKbfawVYayKcSeSR5YYzzyXvsDFWU2mD8U5TFeXtbCPLFUqe7GyCgvO6XDHzje862ODrOwy6WCPmKeWHbCFJ4w==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz", + "integrity": "sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.7.tgz", + "integrity": "sha512-CFbbBigp8ln4FU6Bpy6g7sE8B/WmCmzvivzUC6xDAdWVsjYTXijpuuGJmYkAaoWAzcItGKT3IOAbxRItZ5HTjw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz", + "integrity": "sha512-25cS7v+707Gu6Ds2oY6tCkUwsJ9YIDbggd9+cu9jzzDgiNq7hR/8dkzxWfKWnTic26vsI3EsCXNd4iEB6e8esQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/template": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.7.tgz", + "integrity": "sha512-19eJO/8kdCQ9zISOf+SEUJM/bAUIsvY3YDnXZTupUCQ8LgrWnsG/gFB9dvXqdXnRXMAM8fvt7b0CBKQHNGy1mw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz", + "integrity": "sha512-ZOA3W+1RRTSWvyqcMJDLqbchh7U4NRGqwRfFSVbOLS/ePIP4vHB5e8T8eXcuqyN1QkgKyj5wuW0lcS85v4CrSw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz", + "integrity": "sha512-JdYfXyCRihAe46jUIliuL2/s0x0wObgwwiGxw/UbgJBr20gQBThrokO4nYKgWkD7uBaqM7+9x5TU7NkExZJyzw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz", + "integrity": "sha512-sc3X26PhZQDb3JhORmakcbvkeInvxz+A8oda99lj7J60QRuPZvNAk9wQlTBS1ZynelDrDmTU4pw1tyc5d5ZMUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz", + "integrity": "sha512-Rqe/vSc9OYgDajNIK35u7ot+KeCoetqQYFXM4Epf7M7ez3lWlOjrDjrwMei6caCVhfdw+mIKD4cgdGNy5JQotQ==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz", + "integrity": "sha512-v0K9uNYsPL3oXZ/7F9NNIbAj2jv1whUEtyA6aujhekLs56R++JDQuzRcP2/z4WX5Vg/c5lE9uWZA0/iUoFhLTA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz", + "integrity": "sha512-wo9ogrDG1ITTTBsy46oGiN1dS9A7MROBTcYsfS8DtsImMkHk9JXJ3EWQM6X2SUw4x80uGPlwj0o00Uoc6nEE3g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.7.tgz", + "integrity": "sha512-U9FcnA821YoILngSmYkW6FjyQe2TyZD5pHt4EVIhmcTkrJw/3KqcrRSxuOo5tFZJi7TE19iDyI1u+weTI7bn2w==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz", + "integrity": "sha512-2yFnBGDvRuxAaE/f0vfBKvtnvvqU8tGpMHqMNpTN2oWMKIR3NqFkjaAgGwawhqK/pIN2T3XdjGPdaG0vDhOBGw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.7.tgz", + "integrity": "sha512-vcwCbb4HDH+hWi8Pqenwnjy+UiklO4Kt1vfspcQYFhJdpthSnW8XvWGyDZWKNVrVbVViI/S7K9PDJZiUmP2fYQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz", + "integrity": "sha512-4D2tpwlQ1odXmTEIFWy9ELJcZHqrStlzK/dAOWYyxX3zT0iXQB6banjgeOJQXzEc4S0E0a5A+hahxPaEFYftsw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz", + "integrity": "sha512-T/hRC1uqrzXMKLQ6UCwMT85S3EvqaBXDGf0FaMf4446Qx9vKwlghvee0+uuZcDUCZU5RuNi4781UQ7R308zzBw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz", + "integrity": "sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.7.tgz", + "integrity": "sha512-iFI8GDxtevHJ/Z22J5xQpVqFLlMNstcLXh994xifFwxxGslr2ZXXLWgtBeLctOD63UFDArdvN6Tg8RFw+aEmjQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.7.tgz", + "integrity": "sha512-GYQE0tW7YoaN13qFh3O1NCY4MPkUiAH3fiF7UcV/I3ajmDKEdG3l+UOcbAm4zUE3gnvUU+Eni7XrVKo9eO9auw==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz", + "integrity": "sha512-3aytQvqJ/h9z4g8AsKPLvD4Zqi2qT+L3j7XoFFu1XBlZWEl2/1kWnhmAbxpLgPrHSY0M6UA02jyTiwUVtiKR6A==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz", + "integrity": "sha512-/jr7h/EWeJtk1U/uz2jlsCioHkZk1JJZVcc8oQsJ1dUlaJD83f4/6Zeh2aHt9BIFokHIsSeDfhUmju0+1GPd6g==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz", + "integrity": "sha512-RNKwfRIXg4Ls/8mMTza5oPF5RkOW8Wy/WgMAp1/F1yZ8mMbtwXW+HDoJiOsagWrAhI5f57Vncrmr9XeT4CVapA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz", + "integrity": "sha512-Ts7xQVk1OEocqzm8rHMXHlxvsfZ0cEF2yomUqpKENHWMF4zKk175Y4q8H5knJes6PgYad50uuRmt3UJuhBw8pQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz", + "integrity": "sha512-e6q1TiVUzvH9KRvicuxdBTUj4AdKSRwzIyFFnfnezpCfP2/7Qmbb8qbU2j7GODbl4JMkblitCQjKYUaX/qkkwA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz", + "integrity": "sha512-4QrHAr0aXQCEFni2q4DqKLD31n2DL+RxcwnNjDFkSG0eNQ/xCavnRkfCUjsyqGC2OviNJvZOF/mQqZBw7i2C5Q==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz", + "integrity": "sha512-A/vVLwN6lBrMFmMDmPPz0jnE6ZGx7Jq7d6sT/Ev4H65RER6pZ+kczlf1DthF5N0qaPHBsI7UXiE8Zy66nmAovg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz", + "integrity": "sha512-uLEndKqP5BfBbC/5jTwPxLh9kqPWWgzN/f8w6UwAIirAEqiIVJWWY312X72Eub09g5KF9+Zn7+hT7sDxmhRuKA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.7.tgz", + "integrity": "sha512-tK+0N9yd4j+x/4hxF3F0e0fu/VdcxU18y5SevtyM/PCFlQvXbR0Zmlo2eBrKtVipGNFzpq56o8WsIIKcJFUCRQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz", + "integrity": "sha512-yGWW5Rr+sQOhK0Ot8hjDJuxU3XLRQGflvT4lhlSY0DFvdb3TwKaY26CJzHtYllU0vT9j58hc37ndFPsqT1SrzA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.7.tgz", + "integrity": "sha512-COTCOkG2hn4JKGEKBADkA8WNb35TGkkRbI5iT845dB+NyqgO8Hn+ajPbSnIQznneJTa3d30scb6iz/DhH8GsJQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz", + "integrity": "sha512-9z76mxwnwFxMyxZWEgdgECQglF2Q7cFLm0kMf8pGwt+GSJsY0cONKj/UuO4bOH0w/uAel3ekS4ra5CEAyJRmDA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz", + "integrity": "sha512-EMi4MLQSHfd2nrCqQEWxFdha2gBCqU4ZcCng4WBGZ5CJL4bBRW0ptdqqDdeirGZcpALazVVNJqRmsO8/+oNCBA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.25.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.1.tgz", + "integrity": "sha512-SLV/giH/V4SmloZ6Dt40HjTGTAIkxn33TVIHxNGNvo8ezMhrxBkzisj4op1KZYPIOHFLqhv60OHvX+YRu4xbmQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.7.tgz", + "integrity": "sha512-H/Snz9PFxKsS1JLI4dJLtnJgCJRoo0AUm3chP6NYr+9En1JMKloheEiLIhlp5MDVznWo+H3AAC1Mc8lmUEpsgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.24.7.tgz", + "integrity": "sha512-+Dj06GDZEFRYvclU6k4bme55GKBEWUmByM/eoKuqg4zTNQHiApWRhQph5fxQB2wAEFvRzL1tOEj1RJ19wJrhoA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.24.7.tgz", + "integrity": "sha512-QG9EnzoGn+Qar7rxuW+ZOsbWOt56FvvI93xInqsZDC5fsekx1AlIO4KIJ5M+D0p0SqSH156EpmZyXq630B8OlQ==", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.7.tgz", + "integrity": "sha512-PLgBVk3fzbmEjBJ/u8kFzOqS9tUeDjiaWud/rRym/yjCo/M9cASPlnrd2ZmmZpQT40fOOrvR8jh+n8jikrOhNA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.7.tgz", + "integrity": "sha512-lq3fvXPdimDrlg6LWBoqj+r/DEWgONuwjuOuQCSYgRroXDH/IdM1C0IZf59fL5cHLpjEH/O6opIRBbqv7ELnuA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz", + "integrity": "sha512-0DUq0pHcPKbjFZCfTss/pGkYMfy3vFWydkUBd9r0GHpIyfs2eCDENvqadMycRS9wZCXR41wucAfJHJmwA0UmoQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.7.tgz", + "integrity": "sha512-YqXjrk4C+a1kZjewqt+Mmu2UuV1s07y8kqcUf4qYLnoqemhR4gRQikhdAhSVJioMjVTu6Mo6pAbaypEA3jY6fw==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.1", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz", + "integrity": "sha512-KsDsevZMDsigzbA09+vacnLpmPH4aWjcZjXdyFKGzpplxhbeB4wYtury3vglQkg6KM/xEPKt73eCjPPf1PgXBA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz", + "integrity": "sha512-x96oO0I09dgMDxJaANcRyD4ellXFLLiWhuwDxKZX5g2rWP1bTPkBSwCYv96VDXVT1bD9aPj8tppr5ITIh8hBng==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz", + "integrity": "sha512-kHPSIJc9v24zEml5geKg9Mjx5ULpfncj0wRpYtxbvKyTtHCYDkVE3aHQ03FrpEo4gEe2vrJJS1Y9CJTaThA52g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz", + "integrity": "sha512-AfDTQmClklHCOLxtGoP7HkeMw56k1/bTQjwsfhL6pppo/M4TOBSq+jjBUBLmV/4oeFg4GWMavIl44ZeCtmmZTw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.7.tgz", + "integrity": "sha512-VtR8hDy7YLB7+Pet9IarXjg/zgCMSF+1mNS/EQEiEaUPoFXCVsHG64SIxcaaI2zJgRiv+YmgaQESUfWAdbjzgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.7.tgz", + "integrity": "sha512-iLD3UNkgx2n/HrjBesVbYX6j0yqn/sJktvbtKKgcaLIQ4bTTQ8obAypc1VpyHPD2y4Phh9zHOaAt8e/L14wCpw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz", + "integrity": "sha512-U3ap1gm5+4edc2Q/P+9VrBNhGkfnf+8ZqppY71Bo/pzZmXhhLdqgaUl6cuB07O1+AQJtCLfaOmswiNbSQ9ivhw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz", + "integrity": "sha512-uH2O4OV5M9FZYQrwc7NdVmMxQJOCCzFeYudlZSzUAHRFeOujQefa92E74TQDVskNHCzOXoigEuoyzHDhaEaK5w==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz", + "integrity": "sha512-hlQ96MBZSAXUq7ltkjtu3FJCCSMx/j629ns3hA3pXnBXjanNP0LHi+JpPeA81zaWgVK1VGH95Xuy7u0RyQ8kMg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.7.tgz", + "integrity": "sha512-2G8aAvF4wy1w/AGZkemprdGMRg5o6zPNhbHVImRz3lss55TYCBd6xStN19rt8XJHq20sqV0JbyWjOWwQRwV/wg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.7.tgz", + "integrity": "sha512-1YZNsc+y6cTvWlDHidMBsQZrZfEFjRIo/BZCT906PMdzOyXtSLTgqGdrpcuTDCXyd11Am5uQULtDIcCfnTc8fQ==", + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.7", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.7", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.7", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.7", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.7", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.7", + "@babel/plugin-transform-async-generator-functions": "^7.24.7", + "@babel/plugin-transform-async-to-generator": "^7.24.7", + "@babel/plugin-transform-block-scoped-functions": "^7.24.7", + "@babel/plugin-transform-block-scoping": "^7.24.7", + "@babel/plugin-transform-class-properties": "^7.24.7", + "@babel/plugin-transform-class-static-block": "^7.24.7", + "@babel/plugin-transform-classes": "^7.24.7", + "@babel/plugin-transform-computed-properties": "^7.24.7", + "@babel/plugin-transform-destructuring": "^7.24.7", + "@babel/plugin-transform-dotall-regex": "^7.24.7", + "@babel/plugin-transform-duplicate-keys": "^7.24.7", + "@babel/plugin-transform-dynamic-import": "^7.24.7", + "@babel/plugin-transform-exponentiation-operator": "^7.24.7", + "@babel/plugin-transform-export-namespace-from": "^7.24.7", + "@babel/plugin-transform-for-of": "^7.24.7", + "@babel/plugin-transform-function-name": "^7.24.7", + "@babel/plugin-transform-json-strings": "^7.24.7", + "@babel/plugin-transform-literals": "^7.24.7", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.7", + "@babel/plugin-transform-member-expression-literals": "^7.24.7", + "@babel/plugin-transform-modules-amd": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-modules-systemjs": "^7.24.7", + "@babel/plugin-transform-modules-umd": "^7.24.7", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.24.7", + "@babel/plugin-transform-new-target": "^7.24.7", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.7", + "@babel/plugin-transform-numeric-separator": "^7.24.7", + "@babel/plugin-transform-object-rest-spread": "^7.24.7", + "@babel/plugin-transform-object-super": "^7.24.7", + "@babel/plugin-transform-optional-catch-binding": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7", + "@babel/plugin-transform-parameters": "^7.24.7", + "@babel/plugin-transform-private-methods": "^7.24.7", + "@babel/plugin-transform-private-property-in-object": "^7.24.7", + "@babel/plugin-transform-property-literals": "^7.24.7", + "@babel/plugin-transform-regenerator": "^7.24.7", + "@babel/plugin-transform-reserved-words": "^7.24.7", + "@babel/plugin-transform-shorthand-properties": "^7.24.7", + "@babel/plugin-transform-spread": "^7.24.7", + "@babel/plugin-transform-sticky-regex": "^7.24.7", + "@babel/plugin-transform-template-literals": "^7.24.7", + "@babel/plugin-transform-typeof-symbol": "^7.24.7", + "@babel/plugin-transform-unicode-escapes": "^7.24.7", + "@babel/plugin-transform-unicode-property-regex": "^7.24.7", + "@babel/plugin-transform-unicode-regex": "^7.24.7", + "@babel/plugin-transform-unicode-sets-regex": "^7.24.7", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.4", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.24.7.tgz", + "integrity": "sha512-AAH4lEkpmzFWrGVlHaxJB7RLH21uPQ9+He+eFLWHmF9IuFQVugz8eAsamaW0DXRrTfco5zj1wWtpdcXJUOfsag==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-transform-react-display-name": "^7.24.7", + "@babel/plugin-transform-react-jsx": "^7.24.7", + "@babel/plugin-transform-react-jsx-development": "^7.24.7", + "@babel/plugin-transform-react-pure-annotations": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz", + "integrity": "sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", + "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.24.7.tgz", + "integrity": "sha512-eytSX6JLBY6PVAeQa2bFlDx/7Mmln/gaEpsit5a3WEvjGfiIytEsgAwuIXCPM0xvw0v0cJn3ilq0/TvXrW0kgA==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", + "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", + "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", + "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", + "dependencies": { + "@babel/helper-string-parser": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/css": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.1.tgz", + "integrity": "sha512-VtVb5DS+0hRIprU2CO6ZQjK2Zg4QU5HrDM1+ix6rT0umsYvFvatMAnf97NHZlVWDaaLlx7GRfR/7FikANiM2Fg==" + }, + "node_modules/@docsearch/react": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.1.tgz", + "integrity": "sha512-qXZkEPvybVhSXj0K7U3bXc233tk5e8PfhoZ6MhPOiik/qUQxYC+Dn9DnoS7CxHQQhHfCvTiN0eY9M12oRghEXw==", + "dependencies": { + "@algolia/autocomplete-core": "1.9.3", + "@algolia/autocomplete-preset-algolia": "1.9.3", + "@docsearch/css": "3.6.1", + "algoliasearch": "^4.19.1" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 19.0.0", + "react": ">= 16.8.0 < 19.0.0", + "react-dom": ">= 16.8.0 < 19.0.0", + "search-insights": ">= 1 < 3" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "search-insights": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.5.2.tgz", + "integrity": "sha512-4Z1WkhCSkX4KO0Fw5m/Vuc7Q3NxBG53NE5u59Rs96fWkMPZVSrzEPP16/Nk6cWb/shK7xXPndTmalJtw7twL/w==", + "dependencies": { + "@babel/core": "^7.23.3", + "@babel/generator": "^7.23.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.22.9", + "@babel/preset-env": "^7.22.9", + "@babel/preset-react": "^7.22.5", + "@babel/preset-typescript": "^7.22.5", + "@babel/runtime": "^7.22.6", + "@babel/runtime-corejs3": "^7.22.6", + "@babel/traverse": "^7.22.8", + "@docusaurus/cssnano-preset": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "autoprefixer": "^10.4.14", + "babel-loader": "^9.1.3", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.2", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.31.1", + "css-loader": "^6.8.1", + "css-minimizer-webpack-plugin": "^5.0.1", + "cssnano": "^6.1.2", + "del": "^6.1.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "eval": "^0.1.8", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "html-minifier-terser": "^7.2.0", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.5.3", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.7.6", + "p-map": "^4.0.0", + "postcss": "^8.4.26", + "postcss-loader": "^7.3.3", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "rtl-detect": "^1.0.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.5", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "url-loader": "^4.1.1", + "webpack": "^5.88.1", + "webpack-bundle-analyzer": "^4.9.0", + "webpack-dev-server": "^4.15.1", + "webpack-merge": "^5.9.0", + "webpackbar": "^5.0.2" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@mdx-js/react": "^3.0.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.2.tgz", + "integrity": "sha512-D3KiQXOMA8+O0tqORBrTOEQyQxNIfPm9jEaJoALjjSjc2M/ZAWcUfPQEnwr2JB2TadHw2gqWgpZckQmrVWkytA==", + "dependencies": { + "cssnano-preset-advanced": "^6.1.2", + "postcss": "^8.4.38", + "postcss-sort-media-queries": "^5.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.5.2.tgz", + "integrity": "sha512-LHC540SGkeLfyT3RHK3gAMK6aS5TRqOD4R72BEU/DE2M/TY8WwEUAMY576UUc/oNJXv8pGhBmQB6N9p3pt8LQw==", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.5.2.tgz", + "integrity": "sha512-ku3xO9vZdwpiMIVd8BzWV0DCqGEbCP5zs1iHfKX50vw6jX8vQo0ylYo1YJMZyz6e+JFJ17HYHT5FzVidz2IflA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@mdx-js/mdx": "^3.0.0", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^3.0.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "image-size": "^1.0.2", + "mdast-util-mdx": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "rehype-raw": "^7.0.0", + "remark-directive": "^3.0.0", + "remark-emoji": "^4.0.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0", + "url-loader": "^4.1.1", + "vfile": "^6.0.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.2.tgz", + "integrity": "sha512-Z+Xu3+2rvKef/YKTMxZHsEXp1y92ac0ngjDiExRdqGTmEKtCUpkbNYH8v5eXo5Ls+dnW88n6WTa+Q54kLOkwPg==", + "dependencies": { + "@docusaurus/types": "3.5.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.2.tgz", + "integrity": "sha512-R7ghWnMvjSf+aeNDH0K4fjyQnt5L0KzUEnUhmf1e3jZrv3wogeytZNN6n7X8yHcMsuZHPOrctQhXWnmxu+IRRg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "cheerio": "1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "reading-time": "^1.5.0", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^5.0.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.2.tgz", + "integrity": "sha512-Bt+OXn/CPtVqM3Di44vHjE7rPCEsRCB/DMo2qoOuozB9f7+lsdrHvD0QCHdBs0uhz6deYJDppAr2VgqybKPlVQ==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.2.tgz", + "integrity": "sha512-WzhHjNpoQAUz/ueO10cnundRz+VUtkjFhhaQ9jApyv1a46FPURO4cef89pyNIOMny1fjDz/NUN2z6Yi+5WUrCw==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.5.2.tgz", + "integrity": "sha512-kBK6GlN0itCkrmHuCS6aX1wmoWc5wpd5KJlqQ1FyrF0cLDnvsYSnh7+ftdwzt7G6lGBho8lrVwkkL9/iQvaSOA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "fs-extra": "^11.1.1", + "react-json-view-lite": "^1.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.2.tgz", + "integrity": "sha512-rjEkJH/tJ8OXRE9bwhV2mb/WP93V441rD6XnM6MIluu7rk8qg38iSxS43ga2V2Q/2ib53PcqbDEJDG/yWQRJhQ==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.2.tgz", + "integrity": "sha512-lm8XL3xLkTPHFKKjLjEEAHUrW0SZBSHBE1I+i/tmYMBsjCcUB5UJ52geS5PSiOCFVR74tbPGcPHEV/gaaxFeSA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.2.tgz", + "integrity": "sha512-QkpX68PMOMu10Mvgvr5CfZAzZQFx8WLlOiUQ/Qmmcl6mjGK6H21WLT5x7xDmcpCoKA/3CegsqIqBR+nA137lQg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.2.tgz", + "integrity": "sha512-DnlqYyRAdQ4NHY28TfHuVk414ft2uruP4QWCH//jzpHjqvKyXjj2fmDtI8RPUBh9K8iZKFMHRnLtzJKySPWvFA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.5.2.tgz", + "integrity": "sha512-3ihfXQ95aOHiLB5uCu+9PRy2gZCeSZoDcqpnDvf3B+sTrMvMTr8qRUzBvWkoIqc82yG5prCboRjk1SVILKx6sg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/plugin-debug": "3.5.2", + "@docusaurus/plugin-google-analytics": "3.5.2", + "@docusaurus/plugin-google-gtag": "3.5.2", + "@docusaurus/plugin-google-tag-manager": "3.5.2", + "@docusaurus/plugin-sitemap": "3.5.2", + "@docusaurus/theme-classic": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-search-algolia": "3.5.2", + "@docusaurus/types": "3.5.2" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.5.2.tgz", + "integrity": "sha512-XRpinSix3NBv95Rk7xeMF9k4safMkwnpSgThn0UNQNumKvmcIYjfkwfh2BhwYh/BxMXQHJ/PdmNh22TQFpIaYg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "copy-text-to-clipboard": "^3.2.0", + "infima": "0.2.0-alpha.44", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.4.26", + "prism-react-renderer": "^2.3.0", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.5.2.tgz", + "integrity": "sha512-QXqlm9S6x9Ibwjs7I2yEDgsCocp708DrCrgHgKwg2n2AY0YQ6IjU0gAK35lHRLOvAoJUfCKpQAwUykB0R7+Eew==", + "dependencies": { + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^2.0.0", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^2.3.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.2.tgz", + "integrity": "sha512-qW53kp3VzMnEqZGjakaV90sst3iN1o32PH+nawv1uepROO8aEGxptcq2R5rsv7aBShSRbZwIobdvSYKsZ5pqvA==", + "dependencies": { + "@docsearch/react": "^3.5.2", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "algoliasearch": "^4.18.0", + "algoliasearch-helper": "^3.13.3", + "clsx": "^2.0.0", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.5.2.tgz", + "integrity": "sha512-GPZLcu4aT1EmqSTmbdpVrDENGR2yObFEX8ssEFYTCiAIVc0EihNSdOIBTazUvgNqwvnoU1A8vIs1xyzc3LITTw==", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/types": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.5.2.tgz", + "integrity": "sha512-N6GntLXoLVUwkZw7zCxwy9QiuEXIcTVzA9AkmNw16oc0AP3SXLrMmDMMBIfgqwuKWa6Ox6epHol9kMtJqekACw==", + "dependencies": { + "@mdx-js/mdx": "^3.0.0", + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.5.2.tgz", + "integrity": "sha512-33QvcNFh+Gv+C2dP9Y9xWEzMgf3JzrpL2nW9PopidiohS1nDcyknKRx2DWaFvyVTTYIkkABVSr073VTj/NITNA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@svgr/webpack": "^8.1.0", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "jiti": "^1.20.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "prompts": "^2.4.2", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.5.2.tgz", + "integrity": "sha512-i0AZjHiRgJU6d7faQngIhuHKNrszpL/SHQPgF1zH4H+Ij6E9NBYGy6pkcGWToIv7IVPbs+pQLh1P3whn0gWXVg==", + "dependencies": { + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.5.2.tgz", + "integrity": "sha512-m+Foq7augzXqB6HufdS139PFxDC5d5q2QKZy8q0qYYvGdI6nnlNsGH4cIGsgBnV7smz+mopl3g4asbSDvMV0jA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "fs-extra": "^11.2.0", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", + "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==" + }, + "node_modules/@mdx-js/mdx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.0.1.tgz", + "integrity": "sha512-eIQ4QTrOWyL3LWEe/bu6Taqzq2HQvHcyTMaOrI95P2/LmJE7AsfPfgJGuFLPVqBUE1BC1rik3VIhU+s9u72arA==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-to-js": "^2.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-estree": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "periscopic": "^3.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.1.tgz", + "integrity": "sha512-9ZrPIU4MGf6et1m1ov3zKf+q9+deetI51zprKB1D/z3NOb+rUxxtEl3mCjW5wTGh6VhRdwPueh1oRzi6ezkA8A==", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz", + "integrity": "sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.25", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.25.tgz", + "integrity": "sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", + "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", + "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", + "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", + "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", + "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", + "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", + "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", + "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", + "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", + "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", + "@svgr/babel-plugin-transform-svg-component": "8.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", + "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "camelcase": "^6.2.0", + "cosmiconfig": "^8.1.3", + "snake-case": "^3.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", + "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", + "dependencies": { + "@babel/types": "^7.21.3", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", + "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "@svgr/hast-util-to-babel-ast": "8.0.0", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", + "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", + "dependencies": { + "cosmiconfig": "^8.1.3", + "deepmerge": "^4.3.1", + "svgo": "^3.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", + "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@babel/plugin-transform-react-constant-elements": "^7.21.3", + "@babel/preset-env": "^7.20.2", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.21.0", + "@svgr/core": "8.1.0", + "@svgr/plugin-jsx": "8.1.0", + "@svgr/plugin-svgo": "8.1.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/acorn": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", + "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.56.10", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz", + "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.5", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.5.tgz", + "integrity": "sha512-y6W03tvrACO72aijJ5uF02FRq5cgDR9lUxddQ8vyF+GvmjJQqbzDcJngEjURc+ZsG31VI3hODNZJ2URj86pzmg==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==" + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.14", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.14.tgz", + "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==" + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + }, + "node_modules/@types/node": { + "version": "20.14.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.7.tgz", + "integrity": "sha512-uTr2m2IbJJucF3KUxgnGOZvYbN0QgkGyWxG6973HCpMYFy2KfcgYuIwkJQMQkt1VbBMlvWRbpshFTLxnxCZjKQ==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-forge": { + "version": "1.3.11", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz", + "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" + }, + "node_modules/@types/prismjs": { + "version": "1.26.4", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.4.tgz", + "integrity": "sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" + }, + "node_modules/@types/qs": { + "version": "6.9.15", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.15.tgz", + "integrity": "sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==" + }, + "node_modules/@types/react": { + "version": "18.3.3", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", + "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" + }, + "node_modules/@types/ws": { + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz", + "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz", + "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz", + "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.12.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz", + "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-opt": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1", + "@webassemblyjs/wast-printer": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz", + "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz", + "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz", + "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz", + "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.0.tgz", + "integrity": "sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.3", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.3.tgz", + "integrity": "sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.16.0.tgz", + "integrity": "sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.24.0.tgz", + "integrity": "sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-account": "4.24.0", + "@algolia/client-analytics": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-personalization": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/recommend": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.22.5", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz", + "integrity": "sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.20", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", + "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.23.3", + "caniuse-lite": "^1.0.30001646", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/babel-loader": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", + "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", + "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.1", + "core-js-compat": "^3.36.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/body-parser": { + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.2.1.tgz", + "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001662", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001662.tgz", + "integrity": "sha512-sgMUVwLmGseH8ZIrm1d51UbrhqMCH3jvS7gF/M6byuHOnKyLOBL7W8yz5V02OHwgLGA36o/AFhWzzh4uc5aqTA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-text-to-clipboard": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.37.1.tgz", + "integrity": "sha512-Xn6qmxrQZyB0FFY8E3bgRXei3lWDJHhvI+u0q9TKIYM49G8pAr0FgnnrFRAmsbptZL1yxRADVXn+x5AGsbBfyw==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.1.tgz", + "integrity": "sha512-9TNiImhKvQqSUkOvk/mMRZzOANTiEVC7WaBNhHcKM7x+/5E1l5NvsysR19zuDQScE8k+kfQXWRN3AtS/eOSHpg==", + "dependencies": { + "browserslist": "^4.23.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.1.tgz", + "integrity": "sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-declaration-sorter": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", + "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", + "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.33", + "postcss-modules-extract-imports": "^3.1.0", + "postcss-modules-local-by-default": "^4.0.5", + "postcss-modules-scope": "^3.2.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "cssnano": "^6.0.1", + "jest-worker": "^29.4.3", + "postcss": "^8.4.24", + "schema-utils": "^4.0.1", + "serialize-javascript": "^6.0.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-select": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", + "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", + "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", + "dependencies": { + "cssnano-preset-default": "^6.1.2", + "lilconfig": "^3.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", + "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", + "dependencies": { + "autoprefixer": "^10.4.19", + "browserslist": "^4.23.0", + "cssnano-preset-default": "^6.1.2", + "postcss-discard-unused": "^6.0.5", + "postcss-merge-idents": "^6.0.3", + "postcss-reduce-idents": "^6.0.3", + "postcss-zindex": "^6.0.2" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-default": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", + "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", + "dependencies": { + "browserslist": "^4.23.0", + "css-declaration-sorter": "^7.2.0", + "cssnano-utils": "^4.0.2", + "postcss-calc": "^9.0.1", + "postcss-colormin": "^6.1.0", + "postcss-convert-values": "^6.1.0", + "postcss-discard-comments": "^6.0.2", + "postcss-discard-duplicates": "^6.0.3", + "postcss-discard-empty": "^6.0.3", + "postcss-discard-overridden": "^6.0.2", + "postcss-merge-longhand": "^6.0.5", + "postcss-merge-rules": "^6.1.1", + "postcss-minify-font-values": "^6.1.0", + "postcss-minify-gradients": "^6.0.3", + "postcss-minify-params": "^6.1.0", + "postcss-minify-selectors": "^6.0.4", + "postcss-normalize-charset": "^6.0.2", + "postcss-normalize-display-values": "^6.0.2", + "postcss-normalize-positions": "^6.0.2", + "postcss-normalize-repeat-style": "^6.0.2", + "postcss-normalize-string": "^6.0.2", + "postcss-normalize-timing-functions": "^6.0.2", + "postcss-normalize-unicode": "^6.1.0", + "postcss-normalize-url": "^6.0.2", + "postcss-normalize-whitespace": "^6.0.2", + "postcss-ordered-values": "^6.0.2", + "postcss-reduce-initial": "^6.1.0", + "postcss-reduce-transforms": "^6.0.2", + "postcss-svgo": "^6.0.3", + "postcss-unique-selectors": "^6.0.4" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-utils": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", + "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/csso": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", + "dependencies": { + "css-tree": "~2.2.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==" + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + }, + "node_modules/debug": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", + "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.26", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.26.tgz", + "integrity": "sha512-Z+OMe9M/V6Ep9n/52+b7lkvYEps26z4Yz3vjWL1V61W0q+VLF1pOHhMY17sa4roz4AWmULSI8E6SAojZA5L0YQ==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", + "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.3.tgz", + "integrity": "sha512-i1gCgmR9dCl6Vil6UKPI/trA69s08g/syhiDK9TG0Nf1RJjjFI+AzoWW7sPufzkgYAn861skuCwJa0pIIHYxvg==" + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz", + "integrity": "sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.2", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.6.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "dependencies": { + "punycode": "^1.3.2" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", + "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", + "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^8.0.0", + "property-information": "^6.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.4.tgz", + "integrity": "sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", + "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", + "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==" + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz", + "integrity": "sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", + "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.5.2.tgz", + "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", + "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", + "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", + "dependencies": { + "queue": "6.0.2" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=16.x" + } + }, + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.44", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.44.tgz", + "integrity": "sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", + "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.14.0.tgz", + "integrity": "sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.6", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", + "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.8.0.tgz", + "integrity": "sha512-vJranOAJrI/llyWGRQqiDM+adrw+k83fvmmx3+nV47g3+36xM15jE+zyZ6Ffel02+xSvuM0b2GDRosXZkbb6wA==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", + "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", + "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", + "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz", + "integrity": "sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", + "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-frontmatter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", + "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", + "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz", + "integrity": "sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz", + "integrity": "sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg==", + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", + "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-label": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", + "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz", + "integrity": "sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-space/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-title": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", + "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", + "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", + "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", + "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", + "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", + "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz", + "integrity": "sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz", + "integrity": "sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", + "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", + "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", + "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz", + "integrity": "sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromatch": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", + "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.0.tgz", + "integrity": "sha512-Zs1YsZVfemekSZG+44vBsYTLQORkPMwnlv+aehcxK/NLKC+EGhDB39/YePYYqx/sTk6NnYpuqikhSn7+JIevTA==", + "dependencies": { + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mrmime": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", + "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.1.3.tgz", + "integrity": "sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA==", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", + "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", + "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", + "dependencies": { + "domhandler": "^5.0.2", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/periscopic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", + "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^3.0.0", + "is-reference": "^3.0.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", + "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", + "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "colord": "^2.9.3", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-convert-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", + "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-comments": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", + "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", + "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-empty": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", + "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", + "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-unused": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", + "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", + "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", + "dependencies": { + "cosmiconfig": "^8.3.5", + "jiti": "^1.20.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-merge-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", + "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", + "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^6.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-rules": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", + "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^4.0.2", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", + "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", + "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", + "dependencies": { + "colord": "^2.9.3", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-params": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", + "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", + "dependencies": { + "browserslist": "^4.23.0", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", + "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", + "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.5.tgz", + "integrity": "sha512-6MieY7sIfTK0hYfafw1OMEG+2bg8Q1ocHCpoWLqOKj3JXlKu4G7btkmM/B7lFubYkYWmRSPLZi5chid63ZaZYw==", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.0.tgz", + "integrity": "sha512-oq+g1ssrsZOsx9M96c5w8laRmvEu9C3adDSjI8oTcbfkrTE8hx/zfyobUoWIxaKPO8bt6S62kxpw5GqypEw1QQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", + "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", + "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", + "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", + "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-string": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", + "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", + "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", + "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-url": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", + "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", + "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-ordered-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", + "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", + "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", + "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", + "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.0.tgz", + "integrity": "sha512-UMz42UD0UY0EApS0ZL9o1XnLhSTtvvvLe5Dc2H2O56fvRZi+KulDyf5ctDhhtYJBGKStV2FL1fy6253cmLgqVQ==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", + "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", + "dependencies": { + "sort-css-media-queries": "2.2.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.23" + } + }, + "node_modules/postcss-svgo": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", + "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^3.2.0" + }, + "engines": { + "node": "^14 || ^16 || >= 18" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", + "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/postcss-zindex": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", + "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", + "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", + "dependencies": { + "@types/prismjs": "^1.26.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.0.0" + } + }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "node_modules/pupa": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", + "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/react-dev-utils/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/loader-utils": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.3.1.tgz", + "integrity": "sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/react-dev-utils/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/react-dev-utils/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-json-view-lite": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz", + "integrity": "sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.13.1 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", + "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", + "dependencies": { + "@types/react": "*" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reading-time": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", + "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" + }, + "node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "dependencies": { + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", + "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.0.tgz", + "integrity": "sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", + "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", + "dependencies": { + "@types/mdast": "^4.0.2", + "emoticon": "^4.0.1", + "mdast-util-find-and-replace": "^3.0.1", + "node-emoji": "^2.1.0", + "unified": "^11.0.4" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-frontmatter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", + "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", + "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.0.1.tgz", + "integrity": "sha512-3Pz3yPQ5Rht2pM5R+0J2MrGoBSrzf+tJG94N+t/ilfdh8YLyyKYtidAYwTveB20BoHAcwIopOUqhcmh2F7hGYA==", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.1.tgz", + "integrity": "sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rtl-detect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz", + "integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==" + }, + "node_modules/rtlcss": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sax": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/search-insights": { + "version": "2.17.2", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.2.tgz", + "integrity": "sha512-zFNpOpUO+tY2D85KrxJ+aqwnIfdEGi06UH2+xEb+Bp9Mwznmauqc9djbnBibJO5mpfUPPa8st6Sx65+vbeO45g==", + "peer": true + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "node_modules/selfsigned": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", + "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", + "dependencies": { + "@types/node-forge": "^1.3.0", + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/sitemap": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/snake-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", + "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", + "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-object": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", + "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/stylehacks": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", + "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "node_modules/svgo": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", + "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^5.1.0", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.0.0" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.31.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.31.1.tgz", + "integrity": "sha512-37upzU1+viGvuFtBo9NPufCb9dwM0+l9hMxYyWfBA+fbwrPqNJAhbZ6W47bBFnZHKHTUBnMvi87434qq+qnxOg==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.20", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.26.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.2.tgz", + "integrity": "sha512-NcRtPEOsPFFWjobJEtfihkLCZCXZt/os3zf8nTxjVH3RvTSxjrCamJpbExGvYOF+tFHc3pA65qpdwPbzjohhew==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "node_modules/utility-types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", + "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/watchpack": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.1.tgz", + "integrity": "sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.92.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.92.1.tgz", + "integrity": "sha512-JECQ7IwJb+7fgUFBlrJzbyu3GEuNBcdqr1LD7IbSzwkSmIevTm8PF+wej3Oxuz/JFBUZ6O1o43zsPkwm1C4TmA==", + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.5", + "@webassemblyjs/ast": "^1.12.1", + "@webassemblyjs/wasm-edit": "^1.12.1", + "@webassemblyjs/wasm-parser": "^1.12.1", + "acorn": "^8.7.1", + "acorn-import-attributes": "^1.9.5", + "browserslist": "^4.21.10", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.10", + "watchpack": "^2.4.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.2", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.4", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/webpack/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/docs_v2/package.json b/docs_v2/package.json new file mode 100644 index 00000000..64b6fe3b --- /dev/null +++ b/docs_v2/package.json @@ -0,0 +1,44 @@ +{ + "name": "langchain-dart", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids" + }, + "dependencies": { + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/types": "^3.5.2" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 3 chrome version", + "last 3 firefox version", + "last 5 safari version" + ] + }, + "engines": { + "node": ">=18.0" + } +} diff --git a/docs_v2/sidebars.js b/docs_v2/sidebars.js new file mode 100644 index 00000000..72e4f826 --- /dev/null +++ b/docs_v2/sidebars.js @@ -0,0 +1,30 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + integrations: [{type: 'autogenerated', dirName: '.'}] + + // tutorialSidebar: [ + // 'intro', + // 'tutorials/index', + // 'how_to/index', + // 'concepts', + // 'integrations/index', + // ], + +}; + +export default sidebars; \ No newline at end of file diff --git a/docs_v2/src/components/HomepageFeatures/index.js b/docs_v2/src/components/HomepageFeatures/index.js new file mode 100644 index 00000000..acc76219 --- /dev/null +++ b/docs_v2/src/components/HomepageFeatures/index.js @@ -0,0 +1,64 @@ +import clsx from 'clsx'; +import Heading from '@theme/Heading'; +import styles from './styles.module.css'; + +const FeatureList = [ + { + title: 'Easy to Use', + Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default, + description: ( + <> + Docusaurus was designed from the ground up to be easily installed and + used to get your website up and running quickly. + + ), + }, + { + title: 'Focus on What Matters', + Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default, + description: ( + <> + Docusaurus lets you focus on your docs, and we'll do the chores. Go + ahead and move your docs into the docs directory. + + ), + }, + { + title: 'Powered by React', + Svg: require('@site/static/img/undraw_docusaurus_react.svg').default, + description: ( + <> + Extend or customize your website layout by reusing React. Docusaurus can + be extended while reusing the same header and footer. + + ), + }, +]; + +function Feature({Svg, title, description}) { + return ( +
    +
    + +
    +
    + {title} +

    {description}

    +
    +
    + ); +} + +export default function HomepageFeatures() { + return ( +
    +
    +
    + {FeatureList.map((props, idx) => ( + + ))} +
    +
    +
    + ); +} diff --git a/docs_v2/src/components/HomepageFeatures/styles.module.css b/docs_v2/src/components/HomepageFeatures/styles.module.css new file mode 100644 index 00000000..b248eb2e --- /dev/null +++ b/docs_v2/src/components/HomepageFeatures/styles.module.css @@ -0,0 +1,11 @@ +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; +} + +.featureSvg { + height: 200px; + width: 200px; +} diff --git a/docs_v2/src/css/custom.css b/docs_v2/src/css/custom.css new file mode 100644 index 00000000..2bc6a4cf --- /dev/null +++ b/docs_v2/src/css/custom.css @@ -0,0 +1,30 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #2e8555; + --ifm-color-primary-dark: #29784c; + --ifm-color-primary-darker: #277148; + --ifm-color-primary-darkest: #205d3b; + --ifm-color-primary-light: #33925d; + --ifm-color-primary-lighter: #359962; + --ifm-color-primary-lightest: #3cad6e; + --ifm-code-font-size: 95%; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); +} + +/* For readability concerns, you should choose a lighter palette in dark mode. */ +[data-theme='dark'] { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: #21af90; + --ifm-color-primary-darker: #1fa588; + --ifm-color-primary-darkest: #1a8870; + --ifm-color-primary-light: #29d5b0; + --ifm-color-primary-lighter: #32d8b4; + --ifm-color-primary-lightest: #4fddbf; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} diff --git a/docs_v2/src/pages/index.js b/docs_v2/src/pages/index.js new file mode 100644 index 00000000..176f838c --- /dev/null +++ b/docs_v2/src/pages/index.js @@ -0,0 +1,7 @@ +import { Redirect } from "@docusaurus/router"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import React from "react"; + +export default function Home() { + return ; +} diff --git a/docs_v2/src/pages/index.module.css b/docs_v2/src/pages/index.module.css new file mode 100644 index 00000000..9f71a5da --- /dev/null +++ b/docs_v2/src/pages/index.module.css @@ -0,0 +1,23 @@ +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} diff --git a/docs_v2/src/pages/markdown-page.md b/docs_v2/src/pages/markdown-page.md new file mode 100644 index 00000000..9756c5b6 --- /dev/null +++ b/docs_v2/src/pages/markdown-page.md @@ -0,0 +1,7 @@ +--- +title: Markdown page example +--- + +# Markdown page example + +You don't need React to write simple standalone pages. diff --git a/docs_v2/static/.nojekyll b/docs_v2/static/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/docs_v2/static/img/favicon.ico b/docs_v2/static/img/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..4c29611109064a93f09866b3b89d4486b8db1866 GIT binary patch literal 15406 zcmeI3d3e;-)yHr4%w#f|eaTFcNytV*LVyrR0wjbiC`%+H>>w1apj25D5DN$jvTuU8 zR03E*!Y*Mei(nz31s4!2T5D}BtwP_oeeD0A_xytK9WZ4nA@ON5&vT#q+s^NP&-tBu z?!D)GN2vf6q)a9ywNH%;Rw_p+!S3y9?d88>7l4!yo}oYHmddvA*Gc!mDR8tQ|-7{3?*U5JZ$f5#`OLU zjEkodt_!~UV0P5_Gi$UjW?AwMj3H?T)3w9Bk<&M0)M*!(}D-k z84Wj;)o>>y`Y50N6xns3;yHT+vgvztH~P-dOD3>OV+D3f?vg>8KyOwDBx0y&c-vrrnE>SanTX5}WlV0}&1rhUlA*5!cB2jsHHMkK&RN z%6i{RPEHq!i%aNz&%GqcSlYcuA*pTCNK8sX!}a|;OnQv{Ge}tdAQ@X`W6nzW|9gJ# z{{H!!Tr(PtL`3M(iB4%b?J>kz2{-6)56HsSD+PUo-?z|*W2lg}U!F*Z!^?=SD!8Mz z{W0o{dKvpW5k0CC=FUERbNl0-&;wn9nUFLaA)Va>7G&u%jon9xuW9YBm${y$DjnzaV(d!?6Fj+0+MFPBu(hZK`%z0zpq9UfdiFSh zLl=nNe*Sn9-}d*fMvujxE&Y;kP40ntX%%X6U(~D_sMXb|b)s|4S=81Gs5j1`UOi1{ zY*Lfj-H>1rAJ4(NbRZ!!$_ZTfIO_FRQ2Y0y4#~HE>(te9_-%p*k7&}iHG7!5#}YHU z7|V*`xK}=cd+Q?1M>Y~tw~LU|wS=DDgYNV$!dJ~DTw+9{HoSvxV6RHU`}|-$)sN9` z|3ccHcoEhZ7}D>~+%m!)g?m;Rkvpdm|ItS5e}0q5%LfU#ypMpd_n<%X5{Awx zO=@#3#y8l|To8|Y$v}el%_8KpO{mNJ36)~{Y6q@uvx!J>fB$TQ%$?TpZ6vNI#L=m1 zM`EAoi)+IZ1RRzaQ@0XzY88P;=My=1C^{Ki@3gJP2=n$C zM0H8LD|$Czb2{ym53C|*<56M<3?cvg7ZiQ{Eg7Gl$2mjRZ+`yKAk);fJ8dEvV?Uw& z?3Ltx_$7sx{!YrK*U>v<-f;)9`lWIppT$(V=4}a&z-2>QzcshmQE-<4HK#(lYw=?I$_kKJ+Lkq-RktIqxr+ zi|HVkolYkineFRy!O_vtSZ%^n3b(4sUxO)9*7bQ_;V?^ym%MP6sO~9zF;pKZ= z^XwgB_kUNEaELaW?K?c-#Kgvs(=AW(;zr3E3q~!!)*+VI%503u&a1iP>(`jNFv7B& zSf`W_|KbEPYv+-*XFkr-?7N2BUkCr=s5l$Z12VB!wv)WR31gBI&!j@UQ+nd4lH7Bo z^b^gVS~2Pfvl|FawG#Ey5}XsC!8W{_jG85Q2KT%xe&dhz`eU8sv#dQ5@Q&+B%=lca zc|M#&vuX2O1+jDb;e0R`U50~@E?OQvhTz`u1olZFuxtP!$u$HTjuMu#4O@?n(s!@( zuO{@F>v1=nYx#d?pENvUJL4Xffpb7x;%4@t&9WhQp6-k3fzAX}rJzP-5HPYcA(dqW zbYFmqJC3qRG3+Efy0F=OrUAcR_5nCc6LD6gU>}r5^dsF!SWt;~?tPdfpB_5A9jdCm z=x`qa{imZ!wxPO9j<<{C)jJ*`Fm5JcdP~dUH%m+{YlCHYCXR;%`#kZRMOF1YMBvCw z;US8pOqRVBlB*jo`RU37sEQ+~{-T5E65{D8*zaDfX|Qu+way%YQF7Uq(H(J2>Pf=< z3StDi#$yDG=z4zFO4K;B!YO3%sKi?_5h}e79NvVYwnIfFfsP%+e zVp;^d&JuyCI3BCm-6i~uV~O~S@MeJ%dx@RJ=1)A0n)9OYINMPxq>nydhg$p5RZizQ zvHdg02+k>Q5$sy7)7aI6eN?9KUIn-o3#a#-a9)oI|1@Qs@J$PacX}1|LJexYVBaJh z)F$D9-ndxL1*we}2$-~nFsmOo+l>CB_eNnGmX7<;T=Dh$FfJKHz~kaCPYL#A^HJ5+ zf^`pS`(e}`vHQUfPzOGgBKF^NM(zuj*H!xL_2Y3f>0ru>!96}(_@`1_tA`W(WM4vO zk0o%;9I>ahoz4mIVv2O?5R2~K7U+%{*LCe&Lbq|IK=N`=a%7m>3-}h$B_KX86@qRPn$!li94|#=ewIRpWTM}z3o_}*xuWU z^XwLEo0kxgnbI`BR?y+> zI?^s|B;m8oIKQYNN_a@aA9qUGQGe|C_%*C!?zuYmY^FZisD3ifG#3hYII9%PhVi)f zKTG-t)g=FJ3o*i(Mqb)Y&|ls}35ThbfWPcUf4&;igz^@dS6;7!wqMgWupRD&12Alz zf}?H~Ucqj-AmfG6Quw844&T{BJ`BZagHxW{b~-nol|kF8G|9w zb&bz8ysx=yZ>*Qz{mK|NdMK6S$C3TY0ow09Psih*(dnHp$$J0y#GE@p@W~emshuff z!FbFi89xf_nynndxA*Q_$^+xZP_=kD`GU9b&;Lu&&ibVl9K$lkT&DLZkcWYOj)lgN4Jb9#RDce?%Y8&Xdn#eTnxPsWHJ zjqmvDVzgzEQgni>N57!+8+GKr`w2b&`YpZw_AOn`e=50#hh=>F@%W8D_WEO;4ogZm zk|)1J&el`p*PSExh_%a{4-aeg_X|aLu8I2A$DQu`*__)h_xq*N%RI*07Nv)2$Cs<&$Azf79^;qe)W?-SFZVkyh^nWJ ztm|@tr@fM!y3a3%NzdDnPfNbfhjlOTj+d*Y`w@?qyUok1o4fmmGsMNg@!8Pcc=A}b zajg?qrc_^F9}EW5)YJq5fhwvhfk8oG;Sp`^9kulhsujkL1O9QVnf2>!Ia_~&hGHwv zn~on(7w%SiUe$Ph%9t!LZ*ZMFnvEJt-9T&~U7yI7=zeeZ3ZGAQX$utlqq=)}(6=*u z`*6qeTc%^PFMp=2e5p>S${K!q?%N$%a#DTua4nD{zj(T0&}`Rw+_SQ_HZZkV-#t1v zzi@g6f4IEcJ2=|h-pwd&v5PMI&pS0d{qOn3<=k}PM1$|jh{D8jhj&JsXX#R8{l=e$ z!LdrI>w~XZJ$wHqPMbDghDS#)CRZkHH78U)EIW$yEW_6>o(`{GCXZg4=Fig8Gwu!% z!y+_!(eJBsIqK_JOS@ercP>lI$}GRAstbVa9i5I!yBZ?ISy-6)d3e3GaPso=+>DLE zqGD3g(#Z;MO21$mDd1saV#~|Rm$VCp{ua2ryfXOqt#k&G(7<+fqf<9GxoDqdRXCvX29O)$w=B_jYWSte~oIw1i3?JqvuR_#C&(g$#B{E-rCD} z+e@)lw|^_#IA=*DbTohdWw~kp;_PL4>}9TeD_5%NZ1sZ0t8cUC=w%3g1VA=mm6eiE zcSky2nefbMuMFNmW;Y59E2PaS4*%@*qE%0;wEDp2Psab6KlFGnOA705dQ9 z|AgVF5DdUEP1OJXP$#tkfcL}-e?;#L-x5IqXaRn? zxScIbYy6XJ%8f&3@O==#uUzp_<=V@?`oNQ0Fy?j9a81vDbZb1bcF!$txmUNhTmF}v z?@q^_NoezmL~f|E2l}#C)cWLv49_&aK1RCd%AZxb5=a#svKKy8lHfALe#_ z#fiIJLgHz9?@)|i!E^$IDei>Odolq0@N}#>ld$b}jz++DvzR&I0+s@-zKFOw31174ksKBpGlunXaVyqL=x z#>&rBiaoBZk`)_Zbx$iXZI>iYWB}ML@Q=<0%*=@LaOJ=CbjcQ3D_p{g={Mn`1?fw3 z%e3-Cf%#Sj?!OjLIJzJ~f> zZda3!p1>7_&Xdxl1OTAr6fdWC11+W^B;6bmIr6ZGs@Tj{T5>TQ!B zx%Et08eM@?TD0-!(m?k;dLhwBfaYY1)$}hlm_S`K-sIlcqXG7FzPdy%_Jt80Sb6Kd zMn16}%R{WI4mwHtnJyF#t-)#x<9mmEf#}kyV zT2nzu_&uyd%YP+YYlddbT6{oBRBhP& zqZ~-6Olw*~z9@}YBl@^{qMFJ&b~}BtcDlB5n2fCDb{n6xE7CWnOjHMGB;?oe2~2$R)v(#@VOK1~fKVUmbr?X&nB%kNJFMH2OVL6es&Vq&K#y zWwG1Ba98~}TavpR1S1PCPSpy{^1+G7G5?R~ zN-aKk3)UOr;yIo!&ZZ&NH3$j&V%M7pp^$yG{KTssOay8Zuu^TRJnbHHP|nq2;S6{l4)^dG!NNNVHg z$1f|+O3&UtRX5Kh^N`K!Tvs-q6fadltDeqyHm=#GPnzB_S_dFNdtS{}dzv_kWv-BT zjIzh)A@4q`$!Hz(CSmE>?o4kp{p!FaS`P9>!wp8xO|B$7WWZ%OZYpbeIq9{;v@>JY zFRO(ReL>vC#ba-;U+#uhhXS{X@ye@$tNrm+(`l{5DP^z9eS4JM`cC9z|$lSy1LpWkj#t`9JKv4OuHC`H@t2P?E`P-CkK{M9MK0 zz;6qEvKOa#E?N1#M@ma+38{Q!&D$o&few@XM6dnfFPYIL^&YSi&IiG)8<`?< zs@frkZ~72!H&g(;TrT|U^7B0yv+`!Zy$oN(!yX7r{WRuTbKFO1nZ#Som>03)m4q8S zrB01}_c5-W2+-cf4y~C{%546r!nd&+fA%o$>iZQzB{t_qf8(T4cn2!om6|2a&7zi;`aG1K_(UbKY=3Gi6nqrj(H z9Wl%B(zm*h-%wEnnJ0ZZk!%4h(ewR#Y4P#BOB3U9GC*f@9o=f3{#daj`33FWV5Bh@ zpy>#usKYwBHLIOT!1y_GOkDSR**(n8_obS$VmhcC5=W;*ZJ+s)3OUM_IlZpqH|oaA zG@pOrHGMXh@;lkG8MO%n#&tV?+}e&#MJt~6Jh z4kmqb&Vyaozm-*LJU{nQQ+r#D(L?lsY;2RJ+?-r@$>4gwsShXWdBC-`M7no~KEoW` z7SgP@_46~nSFd}=1RJ(kg|q?ZO=Vwe*4UoU54;FviMK$zXsv3Un@CET3-1QKa{fAu z@;NRiy8py_m2I&>AMQOHhDQ>)v=*A)lN%cvZP{d5w=FH5 zw5eS1u?VF#R)pd=AKdc`O)PEe7!PNP;%(fZ_&)s?ewWtD!85n1^yu0!Z8MBpdA7Eu zT?s~J*L|#U3n>-v9us+(y8|E9J>!cWU*Yrq#}4}bFkh>Bj5i>0j1jRt%^!U(dwbOp zqSUJ`G1uy^QhUXDfZ_3Y?JZv8i~dn8YfZZLCNCFQUTNFz-g{|b3kmMtGTc_dy-(A5 zp1t}0oZa}8y+r0hgc5kXv^J^E_q#725Mh%1rzgyL_;Fon?GnKl{o^sMUod**ZZnMf z3(Ie&Yc-lHp|QtEhcTW*v4+0KQxNZn_YuVJg)^+@?iBa=QA-P&!8uQBe|A9r5HC4d zFL5fG7e}f+h4Xl-QuNO9E@be+j$*lUd<{L6xH3s}dV!%eWqb}TDJyr~qI2TibNGKW ztVQbVQek3z*4=GMZUC>8Z7NtMUQJ@V<;Rtzz;W?R9GKZfKU@Kl=UVS zVCgWRaE^YN5_jR?d&seD6}R19HFWYTjS;BmyChI8e^D8jS41U#$C>Jx8YnPz5(bSD zru7Fb_Qd}{QXhFHx{NSGUIT<(pVT*e z)nV4;b-XIEe;ju&kK^ilX2n>Wu7oRt1GL%c#HWs!JDM@gdTSw<0isR+Mf;rN#mL7L zZu@|p3;`YuNT zD$KMFFOcl8)~3=4VeWzhHq$1=;pMfU6fy?~i#z9JAKjwWCcUpB!&-ySC&RrQ$(1$8 zS5?PN-A;q?Wd_nQH~C^(>l)Xw2BA>C+U@Di-}}v79*2oKZ1inmJnsctyeVR9m&GHU zat$=8eWru^Sbpsol3(JtIADBzSpHzgTsimWGwI6E<2Zz{5)U>GtE`5`eX!2_ms7<- zE<>I^&ecw*rzA^Ky3n@f0Wf)@BfrKsr##eq&A6Ut5QQN;nktj&;t^18Do?#!TkvFlNm)63hACggQw0WuT*+9ih@kT@HqAX92v%%-&Z_GvBAPSvP6lM zP~xXKyki6zm{9y*neGw^SPRu%eJtziPVT7Wc4EX z1?^RxFbWk5u;Rl?A021c*ASUi5v4HE+6J;g9M?c^7`>2yDok9a?lNwXOu-}nae*yA z_=G&%_2YYi0Yp%MIW5$MA96(lFE7@k7@#MQp97{7l3j zc)rb9Nr6nzSILoT%#uOET@NIp*_aB<*i<$Br^fQ3SU?8L<=5(lg%jcyOM~fm<^*XA zZ`ooRzKA)-@iZyCzX`eN8N&V{gK)|Ld@uOCGA91Ux|G7*S@q1X@o@3GlrjM5|LGi+ zWm51LDPA`+cvGd-iAFBh@@rekCzZ}zA?ZyT`%jkXBe?V%{951Uwwt=EfLhX*>_>(* z7~~*}J;bfRi(GFC#^17@l~fjblj?M-~;ni)_(`EF#G&4F854%T!`oj$3$U}W29aPNw;Liqz2FXu4`DxZ z`wb`G*UG6$4_MU^uY-C1J8(pT?Yg@pivBZlyLCx{>?o)NtAIZMJL1>FJS<7)gn%>; zBx-pMOB;eftR~G5#rxGm0xu8GkKnm3qgc+Hx;%eQSy}{_BZWzGXdfD`H-YL4Cy>Uh z!51Y@wZ~T2HK*Y{j~+R!S-p|w;C)~2Z+B-S)*Re<4Nev+W5~zWd!+2}9yRD_@l15hGcWJWqT#G>WZ2cV4CJ2=F1(9T$ zZWM`UxaY}`a@i|>Sn#!+8q^bF=!%c&sY z(+M2~Z%i(aI<~nn{Q1uRZ`Q^xy5Y!`A>puZQk6GLQp!aLSPnb&a#=PQ2bl z(&jw5blAB3%lU$+eN_-s1s2dD0U3GHYKnA-j|gMyF{7_!hl9RIb16Aqnzt4WJ%$3M zLfZd*=-Oso09<}$uPf%HC3X8w*HN<;@(rVmGo}U(rGnX#9Yl^)j2sllZI-s{$iBB3 z4F1{dB!SDJe{=P#e$rvrkGG~-v0jRrm=OIQ2jUP|tMv4ctDPV8<8Ub#$@1lDQZ^uF zBR00@3I+Pe%Dozn`h>);nB~ELx}Z}jf8n`&9)+ZPK?V-}C!Rlk(Y2 z@-3uU2Oqlhj&=91X$K%q^9@Mon0*e;tfO_cfQJiK{+fUuvptU^2ppkq?_whT&n%+Y zy!SU$1tkyR5HcO%{?1QV0*p-h=M}w9B~s=`k6MeVJRse;Mu&jsP-}Zxpv*dyug&2bLwCilFC{y_on+Se9+Za8bY8#9}+JOS%y2ipssxb zk%WFY3OHK`D~i1ypn4PYH%vy!j?~Ej67|ZQg$M8thEtVB;&8W#x`v&TEOtK~Ur^G$ z%zuCZM0OU=zwO9?yUtr%n2a@rW9K?pT-71cy#p{w#gc076)m&ckt-+x`Nc3mbB7J) zk;HeD)=-LzU9aG}9p&hYte(0R!1J|8jbsXa8GA_2G6i*g4VTfJ>~sIoJ+Js~X%E06 zbu-Fs3Fic+c1Pr3_a_A6gH$-O%{4a>Rz-hknBGL!PO5g?AcK)HespR><;Y)lSat6b zZobF?y+KLXt#=+f0d~oG$iTZrB>HiUH95b8t%>H8Mj2TJ!drvbQ^;Q@u-r$MPxe#; zMpQU6?I0BWjbQ|4p?AdG@HggUBCuRw3h`rF(@i`e?%!{*qhm&b?HgQB#h_(a0;Bu8 zo)^kobm&qDX_O8#=A2v!DisVl=gt24D@xAsD%In>-ZDr73}~@$%-l^x!NOOF(2}GB z0AUB6J==5w4HAS$wj;O>uKVXHY~Q&vsFvn7dJ%1>1PP05-V+7NDJ}nP&pRn6 zNXC`*jrKR>{hqIiyKh6 z7XBnn)aq75nX~8Ji|jSg;&1Zb|6NJX^NcSb(y4D>CWZw~*k8uFSv@TtH#E>w!w(<1P$yc5CB#HKD{ ztf|4CAsn=s*q)TSD{PsZw|MC=XS>KZ|9t3n=xjq>8nlZWV%lY%QQkls_vbG_5#}!~ z9mb_UOgakQ@6erVpQ?8Vl!T`3-PF9PPy-Z1xmSM#Z6S-aR8Bv*#MtErb!)6;$2CfC z(KT9hM~jPPz})XRH4+Z<1nCRm)g1$^9{T=Zcl{x>9Qx67$t2D0b{u&J6_0jkl zGdm`5-u?Dy#{7j0CaCozVLc58C^Q7mQQxC``m}r)v~BrOMjw9S9k(vRyJ+4{FxJ}4 zJNQkUEJ_(*&w+g$AXSmQT5(yz8Z^B@o?4^}R$ji>o16=|e0L=2^XWU?zb7iD^NEKh za7hNCY6yc#223uR^F7MNSXQ^$k9A5cw3OtQ->PDh*N$&s zibb??bI2tJYI}5Fo!O744?(5at78n3;qdih<53xU`ow#^C!F>;B3_QDRzUlQ)?ebt z?GoO6`4U_RLr0AtROVR`K_>2n;omIP_%dJtqdhy*1aYrtyq;Ej9}mBEH)NOXU-rwR z(iiS|>GgJ``_6r)3U3kkN+3r2z&(>sDolgG7tr3*ZR)>c?#3mry#Zf(87}rT?~ClMf_RP!YL-;6|q>CBQ9e zFabM@#Wj^Y+|;FN0m`h%Vp@^gnOhTek=uO&kXYs#-(h1IF|5b-$}}R$Dx=V^T>9Nr ztb~0D8GaQM{c2b0+S&x zS-HAE_w_dglD;w=fH4rfQk+L=G%#x*48+Z{2rXrPwUiic4~dV;J^(Kg51cTIP(@;I zkqp?T;F%xnxc3q1XF>xZb%ULo{=3|@jQ~5dFKTFOEZ!zHj*OVCXixOGSxQjN26^vp zuFBF`I~X_c6=IxL7coNZ;PsAxVYT-dVyxZGNdOly$73(i0S$}@7N z&I;)aBdkosmz$`)43Gr3r@PeGZ656uw&t846rd+!l}OSFB}rJ+ETF_qgcdRocWjzN z#|qPT-1F45AI~tk*citkezld}SuLWLC0;TIiA5i-m_u5M-$p5kt&xd6pt88V$7N{q zE~<5)<)*c3F!7&MGmP!t)Ma?VG1h@9^3Qvq;!K0-1ZWWE{xR36M1`^s2KCOQ!O|SvGp`X;|aiHz8Y7UTGRwNNNM^Nb%b0~Be#5Cv9H9Q zY;pwi3j^R3Mqr?snBb8FSAI`eui9`CC?s=-0F;@s86nX%b6 zD6QqhF|rTl+)-x*aoika?&umS4RekyVQ9b2$pn1LSr@6t_)WI*Y{W~J3AmLq6_l21 zGE*P@IO4xzN@#2@6|~gb`RoNjTdREL4Fz$7C2hbI62+g+7t!gZ@_dZP(tDyaD*4Rz zGi)+jua*C_^!(-^Gjiz=P!4Qt=ZwC37YaiDX&SYe0Eb9T6n;uv3N%1?h~{|I_#9ip5*wI)-R1sNU3KNt#H&tHjRzfqU`Tk1w;7ksxH4#o(gT<>(R${N>2GGcCnj5)uK-R*M#?~H>&#ybQ%{JJkk zuz`-&w6BgW|Ki!s0jDOuU8b4m6eYQT@?u!Z2Z5=js3ZLPZp1vfV0VGk3?#42c+Rt#rno`(*Qk?0{OF1wBuyqmI37 z5se0|1y_fe2Nbq=g+sHm<`vvkedqhu8>ci0_5cA_?x20OSJ9b3TYEL-K-C=b)SFq% zj-Pez?t4by1Sg`ODBG-v*Z`R?1t^ArqeOjepP_7FFzpaq$MjhO0K3Re_b0}w3I=Jd zJ5>56((BC}V-ck2mzxOuhl{&jv;{wU**RvYQbd&jh*_YHtwx#BmeTy?hX;Z!em=~* zIvEL(Qf}UfRbh!vzvm)$&qSokIVwq)*}eoI#)2OUS*7K3bh=VdOMjUr_Fj`|uoG_1 znh+MTxR?Fmk^zB6!UEWD@i?X2=e@vDh;f@C4Ln}lIHa%*l@Y(ZS{pe243;b}DAtX7 z%(SFJS=LgT))Q+zuQ$xwO;=j`v3tI*xGi*FV6R6yxm~6^@!pMCQYVaB9j(U`(zN&z z(!h2sujkf?Mndn$3jL}2I~<$MWW>gWc+R}-WzO?QMtZG_!V8|unajoD>O#VN`f&Py z38N`&ifT3Yj2<#PA_Vrmv3yXBDC0_thl%ahP`df}az8#BGI{4d-Rejd$7-T%7RlZI z_yqm$+B-u1x9(~QXZh~M6-NYV8UKGxrGK=t6mAV%QSv({aCJc?OAp(fYlJ=PMsOZ%4_@SIq}v-{@j+V{TwDRmJ&f^VBrUo+28vVXHV zvXMH1r3%7m#RymE{J(>)LR*;*UU<)29~|=3tC>rgF1*i5YQsl$Ai~xiK3gkUR}lUf z{llPHA(n`jh#Pajd5nRU8S@#ccouyN}JG@8xmVt z4&WbRHog&sWE+yvA*P&I!0C$%thHu_8Z4{|WCSTwIzvnt#>bg}hbE#!@o!mN@S z-G}dW1r1<}o+hQe(_+&LbK65=kfnnP%XzBRTSD~Y*MR`FWiW;UYu6_yaLmnhKVm!^ z2mh@zIqaE*U{3^GdjNlfKjtrU#mHMUBJ-BoF}ONL<}a1)rsxIA%L%}4ziq`3PPdL#Mt)GoACOynwi}k27UDo<+FMnluyUK>_5B0vo1$E5Z z$3m=E;HCKf2U%PUc|vRL12{34GFYC0&Y$Bq?-B>A*Eh;a2QSLnc~&Z8-rf%!4hOnE z#L2A~PEj8hyV}Puic9~A?){GdboD)wbb?k#l6Tmr5YRQv0*Ny?$_&LExZ-%o3xE+? zR=6QMVBt%j^&@U~+5@i%sqmj10YJ=G<{Bvngp?s{oEP-H9P}mQMA*UFgDm_t#wx#W z{p+&M)Ra)hpHs*vL&V%xi-okasMq-AlEtKU4Cl3ZE|x6T_Aw+y9_0a+_7a$p?G{{% z7lMq+4Sf?#UGBR*HP|tzL|0pHC3QZ8?w~;@m%9gz6Ni?Q_jAu8jP518^T?=NNrx~8 zN}Vbe*uEEe`?+@P$qGn9w&5w=E6a3?pKosglwHKH;o7d+v(3}e@j9#EN-*WOk4f7bZ&cCaZ1a#x0{eG;tiKR zPZnj)Gp?Q^PtBWnKvS;6k7})Qfb5sV`%*@lfjemJT-h$&)87vf)hQKHM8~jRnQP55 znq%UP@hAX>Izr$Pxf(m;qnSMIW-UW%S6MoY6`vP>lkUupCQr(F4E#q1o+0XYV;10z z%<`#J)H?rSb&RB$_rENstmVGP&3d*(t5di~ zqGokY9sR#)VP$xxdqrZ61{2y=@1&}PnuHZ*x$=5jK`W|6?+5?3HTKULM!#OkAt`pg z%ZDZVnp>i*6`SrA5G@7_O(V%qY@;fgOcWgFy17QH*<|_8ma?A0eJyM=mMA`sV8OaG zuCIKP;kXNilF-w-vh`f4b{YHjo1bKymux9ab=E_E!mY0B1nsT{1z!kNqXg>eWqMOS z^t=q`*mZ5+O?*!NmbuOU41_I==G7=KddWb;s*{v5;kY5WK$`y!P0^se@gL^gq&mrg zxJ*E+mth2yX?rm&#C6kQ1RfeTvP%da-ouG0E{`q$1g&3DX#e@$Y4F*dMC}y(>`tw5 z`&`UUbM>x~&!`JC?D3?#c^o#rsUtnB$vDgGO1Wy~tT~pJ~xu5B-E(pV0I7nEQq^!T|3vWh6 zDlKg|_z56^YePoULo8e%!d^Yc%3eahHt?i*?v-(wJV!Gk4z;i+0aL^7m?N}i zsef3Y7*-o#cezG6L9C|KM+oJi{~;pabesQHP9gdM@s60l>$WXBB~BaMc8*7i!=`hR z50#NI#a2y6Oy0&yR)mDd$$tR@-AZDco21=&G8X zNt$63nf_MQ1q+LDhP(Eg(;yt&ceHi_?ZrAc#v>0?`SH$#M-scYKt}5c{GWr>&#H@( zib}OivNn@_boW*HNY;#4eA!~1#d)VCGxGSBuk)mK3kXuh*B%QWZp)C^LV#?iOpKSy ztLLWkmyc6M5PW<737{J355M26W}?WC;b{3^k9gYc zU69?KY4iSVO;EL9I&8Bz$ZnQVz`jenc5wCYE#Y$Bdl{A#fn%8_1{e}|wkUafouVR= zV2M;@ww=dg(4I8FvxudgIMCn};%o2fpS6-&5gJEd7*`oQ+g$ zdJCKX+1K{P54A!oKISd*yGUGfP|@R7EVn?0%Zo!w_||z2V<72#jzSuL9j6bHAn&d~ z*0qFDriR0>Q~$}&&|T?HXzj&)?C2Eco@aYrm4N3r?uD3d(OUV`WOEH|Q|jgoJ$zVU z9IhuEI5S+3^T?&J zt7j6)aOpMCSSIgJg>OAZhzTq;*{E#yIMCd<YX$-Ed-n5CijD)64c^9IzDyf&mr&;c|)n+ z_PVPO@Vg$I!SZkMPg!!;PE(5j^Vuxeq8!nXiXP1ZI0DxL`E zXncIjb5>51m&e)>E%LK7$nMcX`+TrD77;bN`bDlow2Hheaxo-73-Y*8^T7{( zIIS5UUKYdH)Hr&T%KAa)YwY*$%-7kB3?o#NUvxiHXb)z|-SRu&O2!p94Bg1Q)m;CN zFuE^%L*>nO=hh1uuv_MHP)Nah_n2(4zVw;x++x(GUqSSHbbH*?zN^XwX$@tOF~T1I zO@whk&i})SFtkW}$z5L-w#Xfsa)^%j*Dc&7bu#=nbSugU;bgQGGWCYY)S8L5Gw%}> zru;vN=BvP_?`YoLaK8XdgoXSaW}Ms*BP)b8JuQh6cp1LN1KOf1 z*Ac*{$>ZmfrEtR)3s7;`KztVj%16s^zcw~78S{1-lduvSP`C7SP2qt_OgPNi13f%D zk@E&on*y{a&tyl_A)^}4NpU-SzGZ{4s%(bCEndvjVMY$jZFrxjx!`^TyrBF zFXax9mQUkt^8%jDq_^@gqp-zPN4PEf1;ROl24>YQIleULdVWamG4|xYRJT zr{t=eic|a^r~aBU{_kyG;t4x-`!=e0s&@5#jfRzq{|O?!5>GNBijW;Q)(8~7=->p8 zQMCCjwY!_UM4v}TOB+H6-~X9i&u#OIspj1fNvt>Q#oZBul3uSnAMYvYUH+nG_v5hb zWsb5SAs&+c{&$9^G=(~2ZzJqz88DhWd!qDhh+lIKws0S0R>t0!snSQgyIo*3$lPQq ze;1Fn_I4IT@2RQk_fCYvuY{?^b4wJsvD*CxmW_hn9Q_t%{+i3I69tF3ZV>zt)kT6A zYWb4DNr+4eP~F@llJq)m_tns#SXt)WKqE08 z!l7Pjw~*e}|FB8c%h0krxNB3_j|8$J<^6;XRSTt(7TR~{oTI-8CSItF1f7Ma^D03R zb8iWsQ|__WM)z*Gnv!#IwS?$z7xPc;Mg_JoHA|u3jLem!!l%a*0@Ukn8kfr?&4^0N zM&$!|*&NhNkG0-;J~WR`*txf5Gzv^j;pvY_sKN+-H3Uemf&|6Zwey%Bu1f)&p} zBKqOrA}E0Z4GKS|TCeaZgN1ng+!v37>xXim$X}i~Mo7FLTX%)E)l~-B4a)|p-(w`^ zsN_3o?UXb4h;1|-=Cg{B1fhak{bGz*tQP6xg6yWH1av`X4+Sf>&)^n*Xth17UvOw?zWMThR&t3P2 z>xpIB?uXzrTkZp}@d|*5-{e6qVX@yt<1^px#b28Ebp-W05W)uDtExEb7WAII%Uw^M zlPkDM)e|RiHgn{mzGb4w36xiq1{;3{Q1_HYN5(+!8ARyGt|OGA{1y_E3ftsAbZO=Kfmg1`rU=Y*A0(QZ z6-(w*WO~cax|{y+pfZ8F$X`rMK4`DbM|kvyFnzdK0d zVIRk5N9euorBou#Cxwqp@J~16=6-0*ch9bytRCfRwPA%65meKalS06)?Unkh;kMK) z*yD^%q>tGQ3YS0JUU--ER-&=Tp4tPn_5Cr_-Vy-6u3qu{h|s-d2uP!tBnr!BDS7 zmE0t|-12U(^gQCZDCkUF3hF>i&44V{xh1eigW>lKaVYIgKL^! z8#6Mx#UZT?=`n|-AWPEBoKmRqLN4{U1JMk7iXw23U)fPXQ|L26Q4XeW@zJgfWoqxs zM>pWI&6YOV&+RYd$+C8XjRnG=1J)JoUS8}@xbgb%IF_$4#2o3ePUC4~-RqfeP=Wx2 zT$>$~6CTqjMUU4_0NExBpnLb;xY<_-YV}#ONYj|_JC2go5~}BPb|TGPI}B>WHzmwU z4BpD9d8{x1CV7yZ!{2_{f{q8jQkC1^S^R5#9^K`a=k?9_88VG92&dl|>3_slPmmJAw`JtNq0=E*Vuy1hN=;iwFC|PGPm_|e z3S*6P?fJTI;UU9rCTB?Affj{Lnc!#eIzej}V+`jm3B~Pqz;JAR3Bd9YWcSB?xApS|fgPxW?Z*S+;A@+& zoiHq}!7d%j?e0g9==TO=x6=t?AsY}bHvnFIUc_#NHo3&;F?z<3VL(j1C$L6Wtz|AeC|q+;lv zB@A0<)%7SM5<#Crf&O{YLKuN#B2l0$BV%+a5l|=2XSb8n#OOZhs7utC+qOe6q;hb0 za$9lgFRsQ_sl%zvEtvGVrt_1JM2JUqDa0te_g<%-WNC0eCro;}mo6<+=A^TZ6uP`l zh6$QS$Z!6~Q%YseWayqS<0w$Xcl-0KF?0%*KL4U%ps1ft+ckVMnBrr!GVd3U%j=G% zBjb6)Um-G&3OfGX9H^)ww)~~w@LUfQH8?Yx%I!6r*>@2L>L^zHd*jCvQNbYvyJj94 zpmT7dZ49zgvNsU)>qY_X5@Z#YTM8k=-UQh-EMHMLz~FqQae?Zh77JmP^iVye%Z!qF^$Y%F!rao(^e{xQ6*wdy03 zIi;zP9zmi9+Bq1k4U=*O@OI` zbo*9I*D*o)D2qF3{xToJW`3SUQS0BjV#FA=7+Y2yM<V4(jg_+~C}=NV$A|MV2fLeoBdmO(TOf zyI7`9gslf*d#$ZJK5}Bj9Nv(uLke-BAtzo#{2|}^bhm&TOJs4vu=IznC1m=o>^gRD z>yppHuz*juB*@TpRLl^JeXv!{v3s2kJ_un}=gDdRov8Jo$9h7y4W3~F?D;?l1@ljg zwOnDZaRPU^k@9vfjvhFhk?5%~qfk|+J$M7Z^<|`V(~%rknj?M*kgbasM=3|wqCiy+ z59`U(I&-_OCTLU2w$tKY&H8)ijWk9d_=yZMR#u#ugjJ`2UCUk2***>k;SF$4Fa@fB zsQgi*fZmm^8@hWc>CAqYn==Dm41T=LFvKK__XVWzJ#?5(|KCF!)|xOZg~|J;*uSv~ z@#a_P{+g1k<_nkTyR1~n8Rj-!ED(p9ZirBhZqrj$ovz~1pK3VcOxlmfRF?{5$PDNq z(bvHhbJ5KtiDK590yjk50AXo$|K_a(LWQ)HZcmwHcW!45_cm03X!NHPgz)}}E14{~ z%|}$Duebc%*j-*gU1WN^wK|+NZCy0zSW(Fv;D*Q$Qr?%HShZXJKL}QMm-y; zmy}gGhsiG?4H;VTQ|8OB_QKB~#9o3ss&|n2tL4<@>nSa^2EE^MD^{NuKbk2&1rQzy ziJEs$h2S61Yz34tOp)^80!p#hE>L5BON3@vSglVa0kCF`&Zj$X8jqe)V5) zPeo9vE^5=8n6F@QkLpj5Kp3hGw-UW~*Eegbm>^rrRzTbeU)!C1rLd6BX6U^3_>@Op z(aLb{8?h5X5F*N3_J5c;%dR-OW)0&K+#LoO^g#k4ID?0Q5Zr>h1_*A0yAKXQgS)#1 zch?ZyoxvRr@4q;o`ctp&+N*X|t-GqO@4PjHRPgz5gATt7Z+^TyX3$u{&%|P(BILIq zfVrZORpbhp3Oz{sMkz5ZT{Hb;D&bTB)MPNQcIV(1m)fJ}}f|xO#5IWCQY;8ZUGh$zhTD-i2SMiW)7G-Mx`u%>QO|R*fM0H!dvhRj(4m)o8 z3p_B;lL{Spkj(6d^pVhhG|0e+jLKmB=~0ziz%O6M!oft~JxLh#_nVSKrKm5G{U`F+ z^izk0S@Rl*?s9g$M82_D2}i6^KqZWS8=>W=$|I_9yCB#K^QP(FyRP{W?)2MChI2RT zr(Wg=!@S3E@>~B>eG|fKWf;hCZ99{4BWiIdA4N`nopT8#IrxB1oHdy3gGUMblSec#T(r3J$#Qy-)^DwdVP%{(-oRV zJ?dvo>uGazfb_Tfffw`*ymp{bJFgHFBne;EK%}Gb9Ye<5Ur)s4cm1a3M_#{M^d7g5 zdiNo4_|8Wl0D8}zpNyIB8P+x6V;X61UiD(qzw$}W4=Fxz;H-xK*=H_lu`Qcm^+!y_ zeW%r-hZB+_s*Qrd*`nh)GpPI5e(&z@Sl*CK1E#+ejvSnd>88+UwDmsvvz@137)Ar{ znE;{_31epi@VoCI%`gOgr4;^P>i?|KsAJ`?*JXa*J&$VeDc1QtiV%ov;dQpDie4jF ziIHIfl6))&NyPBo)gEJSMsa7W%wp;Z%|*mdNuPV%6xbj***u(33^B*6As*`8zHy2# z!!Mb5UMuV($E{~fxSjr#0nWzbJ$G>6>=BwoKOhhCN#FgDp^0Ry(1VK84Kl;_eHUW< z82x*2YpU7Uh$8_fX@=Wtk#v^%UQTY&fzO0vKRm@Bxo+wyb${82J03vai=zQg)o;}2 zsZe$`BMI&L1#^?S>&hLg(#~A02I#++Q2~d-bfQe^S`tyN3jbHrX+!*0LepIp8Ljg=t)_wW$S9^v9064E|^wc$#lI5LC-;*znN z`r2ld&)uveu*vsS)93XdT|)8J0bO*SWnoBbeb58L?Glp zM>&+ws5k2HDt2)3jy9>?LinPEKu8MH>SVv^=08-~&wi%t!XxQowbjUc0|EaHucfg; zQe?j0(qs)g;Z-K9lcj5}OJ@N563N{YNtIRMhY6`~#PAtww1V2NT)j}Y%45aWIK4c! z5x`qL=z2Zf_TkxUKZiLi#ohymO6dSSai%(6tSy0oH92w1`dpe2u{E8ycnDvw9h)H>F@!($NUXivXtp#{UDaft8z>{ zJ$T^nDvWov6me7mHNJ;_De@W_+Klg>_}mWZKwjfl_TzDYLqF3eBG-SC#TxM|NLhnh z_Sy7Oa!N7q=_Fmy{egQ@&R%f587Di+`9eLz3l}~*{=d7@eA0S`rM}OYg_ALV6>Re8&@fL?}4T+kQ#F?clT_>r&s17xWu{M1Nee^#nxz--S-BLYRPB>AaftPgCyqV^rL%X5rx)eMUr0-Kd^fZ$;C zb(EL^o!{9hBADGjoJ=Pu`~p80`?@_PJWc6yvn6`DhT^Z$oqd9je~2y%2(nju#-|BO zUqsonfmr#sezScuK(V=@)?GhUObI8$Je#)}Gr{eAxX{|0M-9Qyb8Uei-Y~0kH?;}b zyd$}(KlAXtgt)C_9e)|>%UkUQH@4CFNl6*leI9G0wyC@M9h{MN*_LqZQ8z&nfWKWz z4*iQ3JtJ9W?3;-Lr2|o?NdnuOo~|xssA{x7Z7UBK-MLjPC!|vhdc4eJ>L;|N4Sl=+ zqKqwOyrDcakcSv)-p(KAVbLgXb@|!)R?j-TQ2g(4TA)TC?LKo*uuR;;W9f0NA`*B= zdgF{J?K}}*nPaY#xHXltaQ22i1yT=6#*P_JYy+?Xia9&vf}BHim)kl{D(MEB*eig%68&fzU& z%NKHJ>)C|l$;18;ZQaIxN3A0|Nlm2WTyv8>%oA>H6 z8#yl)>w(`KK%=Zru9EmyPU(Dl{w3w7GtICM8*!VjWpsXh1M~w_i;M&OdwL7F#p436 zd^$~&M%#8y>ujnnSt8^3$_+`Gmjxc#eefw!dC;vO`;HSQxcKgebTfwy(4}IprHBzL zvxpnQ;g8m`6aD=5EQRyVvCze4zAhp;J2VAJ>uR@=Z^$HSdiY(t&mhVivzv>^PJSc6 z5?PNoG91g}C+egS`kZoKZyw4l@5{Pb<~6%@J9hH2sF2CnWWOfa9w8RRN-ccJhqu2Q ztQj>)bhpGG74QAAT7EZUw#jOzNeh-Ysu3DvIxp5gNN3~^`qzL^xc5wepD+{v2BVj!WS#c7_egIO`*9+ z%>P!amyG{dI05h*T9bs7YrSTXwJ3!Y0ktg}4?khy<17b%XXov0{d}w!lih_g z>RCjYP*t*obUrO;Amc;jP=p_m*xE9J?9CDI(=cqw9!2#V`4r5#rqpIq@y)dB$75L6 zHXIWT$f-+2b1D2LfxRQ;t{R0%z(kUM`reZvbnrd*ei|!(B$r-=5j-P_b(Wu_t}uyR zcy&m4gf?t?sQ^8zN1|Kf(~VlP5A^ts#w5(Cw!gB1@Jqu64(nT=W0`=KO*y63r93~ee8ed522>_Va?VuX80`A{m}&CNK0Qunn?ga0^6@_+tKUu_v4X~nwIEmI-c zr}#z{d+jr7*T~!HVYDWJs=Q6PMHqN|zVbz5z=^{F`P&kyhT zF}8@VqIDmz92MS!)7)c!LR-!YZwv1q<;ec{`O-pFlQbsy@K~9Fbi_;e_%zrHBQ;g< z&$hJIEGYQ7F4RC6?IBy%yuh4=NIj^_xw7> z6G6qAZ(@|TJz>v+;dia>Z`#2zV&XU6VTjiJMn8l zd4A~ylIpV4N=@jSt;xYr!B*PhA)@__XXS7KD*#ky@NdY&wi{P3^wAe#SJmWw7PQ(KY&Yg9o2q!)!3vNUKd%I>@qf^!7{$ z40DdPy5{I{OB97i?=hj!Ww5c|vnY+R7l1p#ps`$a+AD2oIA6I&8Tv(eNAB`fs;(d+ z$nDb8w1ZebU!=uld3qPUf9F#?!~8wd@@y${ot#hsa<#%%!nNid88%|LWDm#u;gfye z0)fKYsm*3kq3hV{QFI3xcaZao`8U%S{R$`$#JBAh z_Tr}~U0;vkA}~T1lS$8RShCxv51rW;*mKKCm|`Hp67z}(;8aIsv~`CxvRE|Fz5;xx z_Q*f9pcW~uDP*(F9?F&R!Wq3j0}Y1b+s-DY6qlsU58E-FsLdPL$Ik-ayOyojKR@8N z&O}BTW|2T+25z!x#gN2uGrJBjged;|u_%{HJ0Vtjz=ehI;P7B`5gS(< z3*R8V`S)@5cFAdn+&kY@`XK*5)(sArz^Pt!+cGRu$wgNg(=d_rLMipSX|bpDRwG=b zDjxpODba&Rbp#!Z$v4`Z7yS7U8v*+KpR5Q_xiO8sq6gLdiTlV_^j5oR$DCh?$fo(* z5A%}eoYRse_m=mA@m~8aO>~J6FMGKX2~i$vp%aEo@9WKIrZ%*IPp>mkyqTDold9Ob zgf)i~HCwGpdxMk5MW80_k3*0t6m?My02%E4e1&ENosn>)w@(*LPKA3Bqu3n7eSQKr z%em^WM``l`I2vA_%h^&jOxuJBC&rutxHz{32_~(BwA1}i2JFWUhwX)p~ z{%fsmF;P_e3P=51=8>R9X9k^N;WWLApE@gS1f`I%VfjduZMp}S=0#>w>Qqx@P?8 z*i}r6D?DS%(U)8-^BDel@knnviDR)<9K%f=S(aNM+_zTo>$fsM-L#lOHWKvX9~=P0 zW~(^7-#$$GUORGkTuwpTvkaR#WH_TDZ=UKKh12H-*RQoxZI>hq=K0GfSZOVQ9}NKUJ=?;d2!}EN_!q_CyC@tE)FxsVCPrOUu}jVXC~@vlL~()Gat`y~=NSJZV(k z&`Hiw>%|635LK#zQvSp|O0&Jg=ll_$imQr;{RaN?LBMxu_#wu6h~a2H#l)8CN0{s^ z<|$~%i~xPiKg!IZEnG3Nk*W=Gsd?;0f-Z-PY0AcAEh?4cOx*nUj09BUg@`f%r0rvv zS)JqTS!O=5GE+}^6rCl^+nyCgO@GhN)}j^BqC4$yDxmnB9fDzyEMR+#h`w%~p7!c3 zrb61=e#JG?mJsnx23PyD#no(hbbi)5V~DwTyU|3Em6My`R<+-NP1(diJ*mi9$fY29 zTcJddnZm-tL71**6czl3KDk2~bxN*}50$JUFTFcKP$WlZ^c?6H+J7gQz|gW%jQeF|KJc*(hH)^f$Asp&o$<6_0+HyLf7Pb!=K$9`K3HqOZmUQxRXeTLd z?KYxTun^jNaG79Yf)=g6YgZ^}GW?J>cR%t&(x1RQR9>pKYB64H{1&((V} zSLC#BdY5VNtIZG{kI4##}w%>yEgnP+=?9}cMXG5cM86u%!RDy+o5 zlpV91FDN=Zo2eYiC!}b9Ms@{nMe3nK@{H6EI!H8U1q0?a>iksB)cV*!l$~E|k4J=s z=F3@vWb&OK?Zj9+T=HJC^+!|7zSDU4J%L3239vf43{vyZ2F@avh?(so2pt@q4Bf2M z^FfpOgBfnszi6cwlTK}ZFaDe1mxjQO>rpH==Zu|BJ+l1=3*Y$1YDonP4nceu^;Gy* z)*=4?3PEyeH03ws7_p>$5DVaEfl**xw62M>(oF!yg06B~=0~?LT}YnalC&CN#1gL5 zTO{Mrzo^II!mjP{dfe_oJ#Ic?tW;zVlC5Ny?<*KMJd;9ZPJ@B<0;B%Y;gfcemcRxn zdFcWQZ*LQ~bXS!4jCJ9`C>0KBX>%fPgm*8h!(e)!{@@2lhqhnvc6<{BgDV&~ouGzE zAz6awz1lv!_>rFLA!Z=I4ex0oeD*B$5{aG-X-2@fbXmh46CfC+r%Bvh8ZNxyb#?#* zUG@9f5$`t#BPu6O`O%-72&5m?Q3l&@6CHr`#Q!3weugf;v1eNTgBeIvYTn#Uqsi$Guf-C6k! zX(cv2L+Fy*j?dc?=P!{SI(kCG$s4kDKypBos@`!)?SBQqI)W4EfC`Q`T)IW(Kd+06 znacr*Tl;13(^N+3ZD3JiqP5<^0CU{{6v}|C^KNoY5>wUce42D57d0x8fY_TlB>Dx+fI}tAR4^#+ zKo0Lvdv5Wsz9*`n&p7>wg~x#70okbKv=6SF@KO1geSHzl%ZQC`Sn~0rAdvb}firvRxYSyQA;mG=1Ujl^! z^5rB`B9?8EYp>5XEMz_UvHP@VQ9n4?Z{sZ;QRjwgdU%FOd~|iRUZ~qjin1VyM4a)@ zjObAmqpQrGXhStdYaREek;f}p{+?XSP#P{-B%I`A@K~bV^)Itqu|mL|P;VPA9fsu( zAs2fb8K9TXhXmOdL;@V*dW9C_JkQ`LW{@;2p6LEj(vfwfG`Hu-HoD!w9==Ed+Z_1x z`l92{xajK@KWieW4E57j29VwGF!RLLroFbhY!x4UH{f$IWa^r$vWkK}n|-!z$s%GG zAo>~Bp!l_CQY&FaPFg?nltVuJN&L$IR===5$Nb*RrQMgKQR0<=|6G{qcf?Z_d`4B3%z0RERSfybWX#<=RE0>r;VJ!hpSl1NMC^jq4FM|vyE59 z2>&X6ry=Bn50||U5d+}>PVbxOYcGZ}{(N^0TxMlq0{9P!{L5Jrnr#%8xYYoX$^;2fpMQyN53+(N!rMjkEp4^;j;G8pB0*JyRrs z=|37Tsd5RhTzS>HFLnS^l;O7JXnhih5k!5+&9(2g-+&XiCIO` z%iVd*^d^$wtJja^Bcckv95G*15~k&4B|-&u*uUAI#k@88C<$={0$%Rg%M^_t8dH3KITN!NsDi+hB*hgN+1zal zH1ET<7|-&4&O2;6cl5vcqtY-+jvifQe3t`hSTg2zM0Rz`*qt79R=d({WV^zVC-`s)S~u($oC%&%{d z-jbhbQ;7RQicDb~(2Qp0x;}hX=(hM6{!ucu1)$r*Cr3*8UZ#EIX)U04vWbvbW4#;F z5dK7o@0dbTQF{BqV8*;gXXiRc*+EQ#4>O(1!&LvzaYIn33<{#;bdnZUoZCXX(X`y# zb&jTk_ebiAXi_2fF9`U7XRrI^eA**vm(OLT5nX;kEVu9aV8;8gcJX(2V#09XGhNBwI(L1(;(5u-3$v0zJbx2~Qopk|Or7>gneRS}XKm5ndb5>&Ljf!g&mQGn zEay70vHlE0gtF)0*dff&a#E8~diM3xJ81$)*HP8VkuR88lW`-@Ng8lEq!I0NeC?p*?v2+8=6r}GWQmPy`AVr*J_hz;A$1wz(Lp9>RV*8pB=+c4l)+a?p>!jdUtgH!`yOy%j=$2TSI^3CKt zD*oSI62lm(Bgr2GI$CqT7tS+{E!BkV#@1(X_OzPwYxi*mC3E_ zZfWXsGfxIVThw&k1KRowO-dF?79196E5P3IlMWkaWGspRflWrNRN;_X%4wZoVJY|DH#kOk~Xu=o%`zkA@DL z_G=lzg&aq7jf9KAuznHB=i?!Z09$NBphq1j`LrrswD)TchwB0~7u3LN0i*$`7J&wn zigRkuC82QTA7X-rVxXgZ!L;*xECWNChXYdy)_VoWKR={o-krRf?fa;^z)E<=^a~>pU{X~C)0q6|1LhB4yNaR~Gi5e)PLG?zA zue??(Y)^rPj9@(XwruDu&Q3L8j$(Yii7<%{(8s;wY+oCBLV#!7IiPw9u>T{2C_<01 zO|O?ynu|Pwx?`Xr55^M^q5Tyr*ea2ICpbJ$$TjkShVvMrRRcZlk^m!SCq1D6S?^sR zibekT%~|epa_>^oR?v-;qW}T-XKb*}4GNNAC&*kir~rBVCsQFuMg2y)BHAJS#X^p@ zlNPLk)A~>s1;kh`cNLV+Bz3FsE*CblQk%x7r+VCL#D7Or*+v2SP)!U6shv;RVJ-S0 zd5SCU}zO9tFN<;A@cY$kK%E*Wtf7VL?%v6 z#+@5&ic!YmePKLE+sdF~xNK7~@&yI+7JrtG_`Oez_b8ub#@1Xxw1<&@635p00MnU- zTl9F@16bfK!howdkp3U|N<3V^JtL~;i3qPTlvkaE18cv~?-mnr=Duw$`2optw`&3u zJT0?@0t}38ofF!UEnF!u7^SSk+*Za}AL0SH{17|Bra5eie|M3f$!iLxRKnQ(|40lm z*i_Jx)m;-}r?_!xfW*&B|L8GmsISDKb7m zI*d$t@Z(UePJq3B6;I&?S?#U99s0?^-}3u!I~|%ALlO2}TX0o6NT%{>nyB1wPKYzy zpl`5_B@`gXJ>!GoLDa+M+f3Ap?|MHcpLU=hL)i778irvy*u8{VR<#sSp`U<@S6c8I?B=)|>bPpiSMr$<^li)r09iy3o_H&oWa2|=Bda2vm zp5xx`Eoy_pIf9F3^Vv-o@C-B9>-wUo;&w4Qz5p;mf7?(w*A3CHOWN-$T?{3LNILzF z!Y6{@42NiaF(oy&MN}En7m0?_sRzU$STb&}_>yK8^&lr~PC7PXd(un9SansoU;SlsFBPQ>v?po2Af^`sYdly=U?O`yXLM;xoPi z1W-pU8a0>*KTVCX>PyyZFc$biD328cKP?}jbXmVc0loLAX zZS56rDy-aS+f3@l;X0U0ht!{x&Il74>CWf;i+CoY3>!&j&W&2g$yea0K6ZlesIdS* z)|qHS{8cg?$gSJE)u)t$FL+Y=w z+j+q#{0J)l#{NaTvsDNn6it@*0ru;D`B&K-IM=lC^46=WMzIu<3&>a_DD;W7)s!Up zILkE5zE^&I+V~f69qgg8a85a6dlXP996a*@oOmsPXsEHJ`)^w%BzC8H%v0L2D&N#&kxF0%gBYC!%fD_70JWjR|mRaPu(42k3UC zjhZ~8fRcN$*Jz!Y#04RFx^mDci>qym7E`>MzwD5PESa^keC4U=iFkl?-__c5^Hr;7 z0qz^`Ee$Nif+o)VKvrVpuJPz2Vo$FGnf-+vz>6o1nC8HoiT zQ&u3j5<9O#{nXJm@^)j8n>I%Y$xo578o8VO<3kf7^_yvVQ9NE)6@JJHGuA7%bN8ya zxvR}%fg*Bycj5(i5)X3dP*f_n#wHd^3qV~?)o(xo`RKRh}#v0LUs)|mFd$EQZn6=eM*?w*K! z2^E1GZ6U@{Syk9E2Hr~R6xv}TfsZ$>NQ);N0GuoU>N1#LP3L0OnBvY=4LT3FxR$S7 zKA=%R1slxHqVGfnvc={{l&A%&h3{F)Ou;^8A4R{O-Zw)GLBHOM5E;Y86r7QBc8#GD zo&1q(t%XScK{UjVB+X)ogh{%!rS;}0-*(1 zX8eNVr?*XXvink@bwAT3H8ZFS|Mz`;DQp7#B|7vFq^!FIm4MU1E3@+~O> zY7G4qX|weP(KI|gST;j;P7hJ57sA$u9}Y<45zNs$Q3EhK9pb8Lml0l7dne6Fp)mi7 zYP!RCHTwyamr+{e^)uypEl4+C0?tnmdAmOSH=z|FykYS-0M9Vws1uteX;(P$lt_K9 zGl$yEQ#`fB5VC)7S?$#(U5K11O*+3MWxnu3 zdie-=`j`vRf1>p~h-{ETrC!hY9*Wma_p`h7dg{a-t#Dg%j2u1OmTaR+P-j9n=sP7K zIR_}8uq#tXQS}rHHBR=83KlP?1wWdh8}W-b1a& zX0g*b(y4ihM-i|0Pd+?@Ix*O|jLRtIDmhYpciE(j?mv?cou~@mYDZ(%qdNl`NKnXH zB@~wEY|UU5xQvh+2(~1Di{F8~wHQThRmI2H*NYw^stnKWcaY^5H zZ~}kQcdyb1rf*aJ{nalQ;3UbG$bH`a`s{M3I~N{97SdmJbx3X}T!_r>=Kg=qBz*_9%^(jEUTNC)RQ503INUwxf|0jgw=jxX zI&ux6+%MjLvb85q1Baum85NTUC{KNHTn>xyh7c%S@? zjg|U|7`!h^40tR5CG_V(L?86T4(!d$AKKSpb3&N|`I_k}J_GnKnDldi-3!UpHteUA&+B@9EyYtIxd)H*A*` zp1Ke z(-$JO)84b-eA6j7@RAD^M;m8zd73K`e`5nhpde_`TBxez=vTT1qGlNTeM?b35_mcf z*5`MAA@g_n&rum6*<~*)UZA__`%$+>j4u}MB<3Q`JWj|58A=_Vs)+bT>MJ!!swZt_ zN+Q$nBj;j6K$ueb?zhe@0^^0rIm4N|m;FcV#pCmC$G%-h4;K3iM;q8j*Ly`HYzTV} zhCLu-66~)%;<>LMRHZp4v-;mnvcckU)eXD;K>|$ZVDSdW*gMID_I1f~d)%|lFn~7z zDgKB&>_K?r(MOGqU7-0@8ZuLnM76Hgw;dN*L8&4AaAS9*-?c~1|2p!B@^4o8EtE#o zXO3PU6(e%+W^b;GDq6bi!LmZlXGGbwQxJydk-R3-5cjmcvCE&$_4}U(?(Z($*Z74? z)GE>I0*$Q72?=;n?yOUKldjlvuK5SlpwJmQ+yZHj{)>l?#2t~3%{hQML^Xs`nkc(< zyrfyY*e?N01M5j|7l)TAp;U=|=xuqJr+jEXhjZM2T%V?Z_quM<+2ix`#IA`5iuV7A z4eWICC#p+rwSS;hmR6F#KHfF^dcS#(s{Go5lzPF=SuCD;zgqG#%kwtzA220!O76vK zD{sMR_nY0%5yqHb93;+iMQ^i0n*=Uvg$ z`#EoayQo2slcz^)(W=E*l>LWDl0N3I zI1+K6Ct2FZha1-Y`>31lhPIdwXh_5NK-`A`(ZdCv7f$dfUq5Ys2LW-06i`NDy8N=g zcP8p)kCUCtvc&Y|+S^e$Sut|(3d_?+<>jp&l&nZ6(Cf(Pn>xJjPNFI03GvOF zZaeM*48#wYqKo@~{CK8m+iT4bjt?~Qsdn3Zo)k!WXK!Nv08l!DR^Ot_XL+jTOgw4^ zLvT7J8jfeKwIWOvJ8+J8Yq~g0?MpGJPriQ)?F{gs=w8wJct<=Yf~=adF%#ojpGOs4 z&Vh7krVur`h3fQVc!WRJhRF+(weS8MeiY~}P}Is=++;{GC8+fb2Sg}+exz zvEQGl-|j3w^HR&iW4-2!2n#1JX6>vjN+a=L+i~VMNCqp7))ru3m{9#*~p z>yfPfWgUkDg@~57c6LOmPE6~F(ruU4E_rqTgZxvn%WI6>I+vQB9>|%&7T`%!D2n~e z`;9lfbbOCs39`#cyleJQ57DA{PAYC??aLB$n3EU|{ZKd{y8k!9$ykOYoT5$)cPy)z zZhFrlM1;8dE+viPSk!~P@3s$AeVN3T^7pvp&ewoN|I=FC_N2M0ma~Yj`O9GRt=F6B zr60%*F^_{;a9P#?H(>_Udw^DeFCFVU5N|%t{a2fXlE=Pgp#ZnKS$OlMP%GS~ zoh@%n!Zw;tGPHtD^suzWBRoAz7~NNHcm8X0@_sqz&u9@Qf$RszO1t*$i>k)30zF(S ze&Lzo7OJBRXWyXdu7+uB#ypmdk1WzeU2R+>q|!dxUp^M^O+*<_$O!?HgtTu>3^iJZ zvstdpH`ae(d%ibQaoo62jkA+SybOgnDxTfLUsV>ibw4^;=J2N{CBKk-UgwRA;W-+P z=>0u?|FC;gammp8@A%m3#wIv6B}Lh8`{MrL!Q&a{-?8NHl`ou}H1o04YX+iS8aR1B z9NfS?mOPo2jdNka@4p9ZS)wQC;nmE!@y5kZ{xboZndN3tDOA816LrEwZ<%1(i%*H% zuU8N*x_2X`;VHIK(Y0VKAvk5qOv}f>6my#W#K@BQaFRo!nP7SRoJ>v4*x=qT(eBV7 z!yL4TJJwC0ujZ#%>r+N)>5UWIhenM{%kM55_`UxtNrd`_S4Xo#K(tgMQ-D0Byhr^j zD*gwRTy!sYC+Z)Q3hi|{TU%Q*)a%<)yBjA+uCA^q>y?6oI{W+k^Lgc?>FGL@jr|sm zxL|}Bz{K5MVAbU0`H0@Gv5Kk9dUJyh-o@*d2}x$LD83`2S{KLsDTZ7wL7znj|Dd z;O7PB?=08EkqgxaH;I3#rcBPAi9;9nEuLb58)&F>4Z=qu)V>#k!+-rD6LcD@{Z+TZ z0UaIfKRo1qNs+UU@6eYjD3Og2k#)Ah`Z3j`oRK)RTrQmz^x{(Cj}pNlD#0_hT~JB1 zhb`IrRoi8WNkcr;FOvcBM}4YL=a&vR^ci$d#zc}nz{*#0W|V*Egk#7)+nP3>2GDOJ zff8T-ZGQ%-&150c#wJ_nseOabe!znb!9#g$m$C%T$#%I$66U{^SO|;E$l8wQSDY@u z<4bBn_}KM5Bi$y9Ddg^bOTNyukq@l#?kQtEg!KCmSRs+G{;T=T6`O(HYcSUP`vnyH z$Vf9a8zzGVK+TBxq9cL-^V-mvY#Dk!;T})Y_v3W;h_KnN7(XE~PEBnewwV^>uWVXO zz?`4P-EV3k!`duKWHZUk`D1xc0?}QImj*c)-Acu^o(gTS-V{|R%*w?wLNL~ns;de{3lX*};0jKbX%znCfQ*i*Sv}ZJ1;oON z4(WAWj_s$ADSYW~!%qB@z3wy9cDwUm#0wCAfPSM`Bo-nay6$6t(sm3)p?th}vE|EU z`e;=jeQ0Uerz$+0w?QQgHvEKaNK$#I^m!^DVG3GG&5Cs8!rDy93cV{5KA>Gub^ zbn&s(JqhF*VIj5O!aw_|N=tXqabvQ^4>JzGL&LwR@QQpjrYMD{&g;_0&B>y@n%4Ya zW=}%|6RU1Wi8mKXXWEk=%a`2-pd05|jQzp(By}$M-pU{I4S6%fC0g(Up=L!o20z#J<-t<-n+e7OYrGaXy(p`s*=vqw}i!6t>7S!J_4=40$ z+_tBSvzTyAeH&WFo+!*S-LF*@(Mj}9X@g-X`-*u!;XKj&BQ7x~&yq`fALM=)n?T!v zS+tr=NFF+%WC^w&2d-XfiFh+F3wXfP2TLW9AF@&X3)Hv~`LJ)3fA=ZDsBGbN7@BuA zX}1z37CQtBS?H2vPl5U1Mx*2vU+j2%Ool!X_{}LMN+P~PhY~G3v2#_(DPG&4Huj^j z?1Ky`X$=1pT5qO;v{n}$Z4YLWVo?GpoacIX_AO^~f#mvEU2a?F^o8i&gsrTdkj!H{ zth4{#2WL4|sY8tJw}aN&)%5C%_xT&8S1M!bgPXriBD1trrFF42@v!5DCOQz#DlYD* z1+L{E@Kf>VInh4OI!0__IR)&X0b|yl#B?xKM(%Vx!7s4sR`ac(QmABH(V+LR!XPD1 zH%b#XKa4&GS&yWAII~bmQPGLrBi}GWBGJw88}-9iw&2jdg}#Yc$5=FQZ9YxaIJa=r(rTY` z3`u+5uc%+3eY9*yXzzkLv}>*=z%O1njkCPoBpA3JQH&A|zpR}#ym57p?K9d)Y{YCA3mfb*-|zVE-j?x*x1_;doxmO z5k@P?r{6LY&fQjq^*EN)`Mwo1Esr(g<_R|)Pc;`;W^~~)z6bP=N<4ad$`nskTBA=Y zvq+9E{$S27YrUd&z z-x;p}-=M^S-3WcH+4##>RLak32*c&ql@WK>3z|+FhZ-?nCrQ|GC;yGA_KI1*_{!X% z2{7QSsu(g6DVZ>36e+UJ4i@4w`twvs>=|hxpZe@n7qOFeyaM|tF|gUq)a+SjwcJQX z|8>EtViBw%)@89~12P`}H-q0+27TWB5BKmS^tows+Lm(a-Eb>&2W-O3tAOJ(nYh=s zooSuHJO{_q3e@F)mPfs1paa3!y|v($b;y*Dx>?JA2GJujNEs5De}1M*21A9dj|@vG z8Y2~MRcN~oYHxF-kOj#|mY#YDMKUekhcbP7dVg2uO+yH!)ki|JD=Smza$ei-dLCq5 zCgoDa>FPeM3H1=%Jw}x6W^L-bUC8q-Di!}^bau~jVZ#5!ny{=j*gu+^%=)wZ>QL@@ z-KUB356}7qk>~o&1<%`sFVfq*hqy>A;WVmP^9@I+(f7jrX<>{-gE{1(apAz$qStL- zrI|CKQW|tBy+qr zJi9ll;#Pft%?x{I;a$6E)~vMt4+wZ;j=Yp?>EU#`%$KV)ZMl_)eiufRnJaKdvGu1| zu#*xej(uhx{qRy$0PLUK0#j(hQgFWAx>Z>UQRgQNE$B?7GRGAJ!!HTSw`Ll@ZO%6KyVaZY$ z00*A4U}YaRT-_5mNwx87Z${Kp6eQ^6NF1kD62H3GljJDrAb25T4+~10a5yIswRt#^ zO49!AYM>=;8a=ciyL)LDO*<&-lKpWu$Ofc7CfFsl;IN;zm)NQ3548e1k_As;&G>Z% zq7PFsZo3PDe}f!v!lr=siw*g%tQX~db-MU~$(y8dtVMt4Oqmadde*kO^J$ZgsTRaf z32Otwts*Dw_u>L=OiU-_a?Zl6beQhf+~t3+g*_<-UrVeHv0PV~#vO`L=V_jSHxI$@ z2y_V~WpKb2*uGlTKtsRuk+I7>B69<*LjVvdGd79rwj<5meEN1^g9pwQAdk)#?rA`A^iPZmu z|9Cp_XU?N5sWqWz^41YK$B_Fl=^KKqlDB!qXye*M1nD;w)vOZ>tG_TzCk6+$dE0i~C#{EDdp)GyLPV0s1$EfPIys1hPPn)m7PpU914p4KqGa?ZQ zi7IYpPa|;0?Mz9KWW9D^B)GvF%fLtqHt^f;2EJl{6BAL$_0h47g~cVQHNwi;P@kYB zAIjQY7f4Eu4I5&I665x@(qNOS)cmA=P>Ah6=s_MyB0b+>H{?-sxQqVRc{HE9-SX&) z;rnfX!>Q(zui=8h4(Z)Ip0v^AFcmotC?E?SkqRn=ajOZ==Eq8sv9`!WvMvJ5=&TLd zg**~OV>Vkf9}~t4wj;82)#O&jE;-rlq$**sUEtK(Iml(CXvJnKp!X9`b=A@E%$3^R zgjRNES|>ypP(iKVAh1lfeec8q0}N9Ave^xAfO|xgu`JlAX$yjBGyXzjWHs+W<1bL> zX0G#S@egt~ToOnMwB50uN3F!@4vu+raiFWnIX|htaom+OsdUN1_-ujHQ$jp?=_%7w zYRn@6K{%P9OFU_5cws=q0Eg2_6p~1n40e%?JDHLrgr=MwHi!BnBvgFw*YV^sBF+tV z%vg#_@|scC)J_78L@W-r0A!NTr+Ceqz5dbdBV)f$6{&e zle2q%1*wV1iu{il?A{)4K+^lhHih2Fcg^rUYlxj+)2GLCb{ZI@Ky~L%CS~mg?mc2l zaD(Y9y#jSmVO}z$EAz*FHA8T6w3$_$C`K@&El>3(bgu<*U7g{OM1LkPi^RIX$tRy{ zjH;j1@40cg>t&=_yYD-A6s;rC-ZQ-oRVF$lTuzY|_LTqy7jdgu5q^HE@&ihzR_0NH zFli9$Oc?t5G*8dR6-HUl2`_H-G#8OB^dKqFPPa3hg!B)SQqPBP(|~lLqg6e=qYoOi z-agyb@RI~m-x4YwwbOm3A^m(|#h6Dg7kDJ^!hlCpDKs13_tT>}J0&s2pWJ)lg>L^iEZsT*xg7)a6}R%dwun~SMgSBgB}HZjL5^r21UyYJQ{}dd8tR}7#Y5A+ojat zMIMd(>SF%$1k!*q97-a6Ne2=rBeRMv zDrn1K*O+!?af1>tu};5P!@Si+Ou=+g2uUEdu6heo9;=*5&A;p-5hNBwqnY zcA~0xu(`E?b$Q(~qm0I2*Y@2|&UV&E3*mHYcM2GdwE@dVh3*+#r>s+wSLi{u2{W@f zAVoI*cH%aGqKMjsyCkG66&Y6ka^)9h-2}QuG7@fWs5>R-Y2lw$M!J--`}l2mG&AkZ zym0!YYqRcdBOZy7k5-ig{Xaar2Wh~gm(g6iIgeb+s8TiCP&;Ud9jwu4B;Q<~4Z5!` zluNe~j@)Dq0oQhsM$xEVfnF^5^-9>ku&6(K$Z*o;`o(n$TFIAVfb9V2iiuwo^ z%|9z?cL7L>(}O5~r2(lI7&iFnD=*J*{MJp`RD_Q=FB?@7CP38|0M14BZKuIAuF4Y7|HzUQsGW5f5Y z7h+ecN>2om(n;MiSVi-f8IQE5?*flr=2Kd)@aV0Vd~{n(}& zVtJ6>G^>&C z1QK<1SO+vXwIwNG)u?qJ$GWUtZN736HZ`@n8DAam#z)B7K-fi5_niMM-%k6#Nvyoh z4RW`k;rlk{(M7|zvVilGjvA0IjamIGQUBRZNN!T_Xns;H>D!X#hVMbAlloLg8};Uf z?+ZK6Y*T5vYsH(3pFW6~9T=rZ3))C)H zu7-rg@~ka%H2u>IbMx2Gev1luq`ZOs8uhr>T2^W&WNHXKJX>ZYoKX}GJx3mcDZ*oW z_?~iKJG9=x-7yXgNOOVoi;^nXe40U|Ok)Z6h|%Jl zB9*H18mD0Sw>8WMlWuc}ZsB<*qtL;m7alT0y|e1<=Q3-7gr8LsOqb_#=eP{ji+-6= zlZA_P)e;BPAb&?RW-})nu~u+|+Y&wM`?9@-O31ZM$Xg?-K-5xQVi~H=TrC#S6c5F4 zraEPhb8yec5t}uxEvqfMak(JC{iFwYwF9eZB!eTCUgr#=-e9w({<-%x%u7gf9$hee zU*nN>TTE2fbsk+`DEPMm(sNATpW2YBO3iDp24;qao(C=YM(8>>DO})@U^Qvl-p~ zs|$sVTO)m3tXD8x3-NTBQM09Vmnt=7kNtW%_rgWH{f?m9IqJfoqKrfp+ia+dM?s$J zSOlLe!Lmu~I>ZRAW)#GscW~M4QR+AnHqcz&%yqn=D71nX0KHQfxv0~Q;SQmtV;Q?X zBeZN1e=lFd7)T10ZJ<3HRoB0@H++u+`fa$~p--;Ohx-g*IvBG$Y;}?jeUhi4H#CSx zEqR*w+wy48fK-T?I11()BcyO%Mm!oxAzjm)N3VaH14$M;Pv?VMXflwr3cGpKDi>Jz_Ei=zHgX3W7nKYZeAb^)Owzf)j>+uhzFByOYZ z8u43GVPkF3!?QvffP~Qz+-+;ked?rL>u_pAtMy#+r6&nnvGOvkT}pNwIp;>dZZ$n zS>qduUe$;qZ!_DXPg{4vnuSAVXqdMg3QKZ}nSE-HI_aVCSi%sfkwW@eK)RCXT7-0Y zx>DNesysq+Q%M3fdPqKsgf5jh9tK&HCyG7Yz*KNu7h=fbx~DuTJ3mD0#go^mQy*E2 zwU8DGLDFGGSx4RaJqMD0_z!tBopJDS#3M$eCmQb z2YNNpw698F&8A^D&hnw7VCH<#d#^MgP0`hSycjR;&w(VVqC&kAW;N7okHs~>=6#fa zGDEx@w!m|Poo^1Z`VS?lyr~j-wo*^b3e?p>0FgKQT>q?>Dk!*jASBP~jpXl7vlde7 z7TwiO+E}GhM~&edyZJqokZbppCv|&Bl45u_qz$pP77!^x#}YGsqxvLM zr=TvckgchGvh$ZK$C7Uc(^d1u&lwfg>C6PY(gK`@+syF&acqUV zVzARY=a8EYX!zrHF^ji7A4s1n^iO&8dbg;rPB1oncRcFLLT^(ZU2j18$~@9D{b>#) z^L)Ep0186tNR6YZr9v*##*LZ5u3KgV?W_wA^BMZ%dz`8G-=R(8qN7f_u9R3isc@=) zbg8NW)j8jfM3;HgD^Ln4DcTBclu<;$@F}n8oE_qSAu}u?N8YViCzjq;G$~U0pjE63 zK0u23IBbF@q*%OJ2L`34Bf&g{U_?US)0%|TBlBdd7-@oBlahFE;rH*s^%;>No@3jq zNPo{ckW}3GI55c7EbZHs1|&uKZ_K0lNpE%W7sjlf?J<8okj@R?Z)e%9rhWC-M+|m7 zkEUj}>kUZPc=R^Fp?Fk4`t-0slHHd_nBx(w6@0+^$_Bj1LB+=krfTdjUA&ll~q9(m(z= ztmY66AGg8~O|^v15r^^G5+LC__eSM9{&@%<)oCh}O%T1zEp5j8tkm<&g<7OzhmOqaUUejUd8v}%9 zbB4tJIHCr@16yMIMYe`Hki?0JL9Tymd+-{M&Nc#e2d_57&c@n3%%kT6=}%2aE@$;M z9?io1izY$LO?&+tXYYOOlZ~S`*@@X*kED+OX$mA|>}o?SeG;fbUS>1F4oTGDwLm%6 zyq#ZdlHFDS4}`jzB%d1R=VvKuF5_*QhHeSpr6Wkb>ikLT;6+fG!qJG-d@lL}7s z&{$VQor!JVO=RL&<&;8$Q-WNd7}+Sb3e+G$@9F}G$P_Mh&y*dlzGy7IeUd1S$l8zr z96~P=DDm$h+(S(+lKfGsCWJG9uMp**Vynan$m=MKynJTFMg% zpdymC3dvx{b;y~9JQpvt2~PX9Pwf`byz~^N#`4snRxNpV^eA}J^GHr1X~jws;vJX? zHfhmZ@3SrtA}TXVk`mk6gbWc%zB8!}HMAD33||1DMd0spNCvxOdG5M7QO^V-r@Gqg zd!o#f+BVpO_a5h(@Nt|^gQoztgZ{{jF9XtTZrHvJ8j!}N9^q^__uw{Ijy{H*c=X{q zk9vch24Xx7NS~VD{SA*~1b-XyXx6;?rVkpK2SzS~`5JZI{NU)AM>2j0NRO0|EO>@O z4U1C%un|O+OZJhc5P8Z#)>bpz)Sc-y(gV9~*X*Pgz6Gptzi|`7}40ZHxE3 z6fhxXEq$amxHOc)>=!lE%Wf;K8jnqJQ8URK-#s%4D&MzXc$l>zp+&FqRW}AZe~!VV zK1#;ND&EM`!h;}08SEDCxD8+rYB(W}(-9is{Z1sG+SiUel)!YjXddm|rrz-3^o?sz zy9t%E8w2S!H*9x@uQwoFHhj-VqBdu|X83M-By0K8fz;GMsv)+K2#T|pH%`dOcacZ) z2BdjI>{N<1$C}AXBOobb_YaA?Roeie{`oyTLFE!f&qz<0i5IY-i?k93bG12GY;ysV z(igPyy5iizj9a@q|BOpD-qg!|QQUlU_2hp5IbIA49g-c^r zo2d~`1(ITF+Ys9r>|QQ3AidoQ34!l%!?)fzGT8O!JmFD~zA=w<0dpW#@A^N>y+j@z zsjx;LS>32ilB&}Z174_BnyaEs=pQAp*bH+cOsQebx=?Y6tJtvj32kmP4x(l?tQ+(M z5V5pr?QLvvly^KtHG8Rk+-8BT?1{ptmLO@b2fY9Wkopssq+7SZ_PAE1&dQ8{f;2>{ zB41;YAZf$`wd|GHd}dtkl^uF4M<8Q^>ep79+jctLIcvT{p!C49djPd z4d0g=kba%vP;-zxWjD{*sb5hxZ$MF)zO`+MROKEfam*aDiafk5pUr;>!K&M&(~m~TZjH7-jS5r^#~|6Vz}36T0JK#dQ2-Vm$5|H?cX?`oPi zAoV=bz?4`mB7D zaMMkWme~%>Kpq>1jt<&GV6f|qC7Na{3c=F+*3Av|d-FneTi~`Rd_Nq^?Wljs|%@ppPer z8sMkF3;s3uCU^7jd!H&YAUzm~gZpqBf&P}Unn*TKmj}7&k+RIu!ArIq)%9A<&^GI} zv=Q5%7|8tR0gzPNxSfcBIgk3Tq^`)iYAGA?1;Ph0ZeAg-*KGT_Ol%L=xrB3Y%PNjBJRJ1`NtYW?pP53<|M!))0^y zoYQn&@e%K}|KMWv===z=YCe>;fjday6NI>5H6+BaOzn=0RsWDR4Q?^pMWXifWnFcq zdEBLRoZI2L&P^o9sF_20uu|T_S+feOS?tJCQrO@V{lxG_=i+~X^mz`XmPdY2Jo?aL zYS_Re@J)&}z2SSl8)I(xzRIHy11sNCf%G=q5G#Y-^#&vnVuCI)X3nFRug4=zvX~8M zP@k`!^BhRFL=!F+sylNZaZs`-5&%f(x2mEK8+~Pk6I$yJ%hO;DF!G&4QbKXm#}skd z92D+gJ1zvexOF&Z>XsD$s2&^@Y?Q*3YtP+Wj#zui(gKl)yP}#ZPFjJ=uFXs0OzKZX zDke4?yhP~oSm*5aNC4EE8CitY^D9`DY$5bFNEbai%3MllEQwcp%J<G0zXv9D9Zw~qCFRab$ef(KZY z0$H)*@ZS<#ezzJLl(8@JWO8ExV`qVCB#1;eFO|7J4;- zSCd`k$!I5XfCOYbkp!uU+su98pRaw-WI`gdX57{&Lpm-qoBdlj0E6Qk2_Ky(A}v7D z6e({cV|_2bJOZRin5}_9AFms}2f=?XMs#e_)imbOt)=0RN0Nfi2U0iEMm%a7Ykr+J zuMWC`wEAzlZRi4zUgiz47Y*NzSqIXe#wDbGSO%4bPlAziTkcsOT0^@1nIOD4Ux5M^ z=DF4IMd6III~lOfQ+%-f(U;U4DGF{I013*68#y|C(z3N_t2q6*0JW@yfU`@UVZ z_$ApBOU(h*6l`V&(icmya$R8_4mL}nWPdCk=>Drp=yf^IT1H|W&ml;X(oF@hy+I0$ zLq-F3b+tEk1Ksa#{~0OSid`8&R)lU^YdunV3gwZB^(~Rwqk+_9MZBk}Io{8=%~Z$j z8*H5T;|0UF-Z>Xt0~-FZA0sp$1!1@Pd>}P@dfv{`Xmi8&VDakyL1V-B)P=40KG=Yy zH(oP*_kC4AJp`oB;5w+3XjeW!KfJZ#4ZaD105nY1`$%EyyA_k04Y84d1i|;07UsFn zqe)xkCLi=V?-_2kQ+aAFfd`prhf1{04zVrB_bZicT3CYai6>`@yEac5#zZ-y{&(F2uMF~{==Ku`~>&> z50m>Y&Kl3jsPF;0sZN7R`}v@Nb^a7DB6ia*GC;ZSi2IID1yVs22_(Q!)Q!sZv2a;((b-m1k!_9MPo)l?M7qiZl7}87le#)xHuYwg)GVW8DJQtK!P=AJJ|RaR>{Q}8 z+@9zg`I@o7tlqny;|qawts(Z@Fn-|AqZ=Vk&GuX{}bj|SH zzqQUe;E~=qFymcph&|PY6NBRG3`i;=IWXMVL?*)}j5fpu0d7b&iAa+)-8{?7HLzV@*2*`V9&9Be4B7bEFbn6_Ey^1hz7?l0 z@MvoI9zo(7kLC?Xy+2PAX&ZP6NQ1#apEX-RN%%MtB}~X2M^dxuJZQNmfW-03nU)`8 zXg=twsi<&H950(1j8&laJR_ktrlk9Pbxm$y1J}(&NFlaLAXZ1;;0kN)>k$~ekQPM> z71MkcIY{JTy$M)h@Bh0`-}SP?*vG%r;A(Pl*bdKzG&lw0{1s+KzhB< zfYjmav4&VTH0bD04+5#Cw)lu_mo1RT%EL!`3nDbA1~nS3fK%HONViUl^xCi_mp_s?m<7Ir5s9op7;a(^ zt!4UPt!a0rWD=f3EF#P6a7Qk-6FGy;`L{e85JExJ_HnPN}%>2zzzvyj>3k>qiUqz@op$3H~M7~P}tlqlF1N|LOTdy4+5yv zA82lMtR29hE>GvgT1ULI>Y-NTBkwKzNPd1CND8wf9^E>2eX|CnTl-0K(KWM3bU5o7 zH!^(d*wcZefZWR#cXH1gkb2jf0{4_h*Bg)~0_SyVuoI8EzN$_P3P=-^-9~uuXbiT# zImZm&0D{LL9eQskQo3Ue^AMZV7R2)G)CV+XGoM)Gb~N#$u3QtQ<%cHM z?3^{WP@9jGFspdEhrD$k;o|1fORN4L*7nMT9o&v$;9A=6SMAaqY&^VuWFkrV&3#x= z8R?62QCh2;Q!1N05E<6$3_|-7VK|^`vV`kU)y+uf3}`NLcVsF>+CB;puHKWhM~OY&fZmy*_qtaGx)03--#?ULI8;I_3;L{qg~vr`LMbiY?13bCnMUb;)0l0CUq$LTRJ^(Z{`?Z(O@B9qYMbImVz#|mmDpL&>u>iPit zEM?ltEn-`sN(P2$Qz4hs)lx(z3)Hkc9l=Avsh zUE((9(ZukrW6uZD<2<@z_%0qzbjwh*=v7QqXax}xJhTx(cI&~p_$XMg z-D6$&jtSw+hx2H@4z(6noHA zB#caIfCLb9ZA6;UPphr!P0&q~c{D2lD4*joXk7Y;_MBL|%dwOxwY;936q@2~;M4R-Q)JPf4Iznwu%@c`)q8l+m5r~;3b`vMYQ~0;tzkN>PUv#R3meRVV$7H9%z__%pWNmoV!h|dgNw1 zL2{1MQ<*Br=FB8MM@_{&(SNK%F$~?*kk~s38gvP5kx!T2ob`ZRb4taNY1dO9I}8lVkf$PaWe%hHo*dfb`>Yfpi+?r#u=P zzF)R0JQ5@ZoSE~e%>q8yfaE?64fPuWN!ySwG z5GO*iBe9$Z%(r*Mouc`4a2P+^yXlnQ{uJcQF%nje8bwk&9G)MwAkP&#BbmK+C zw=U^DeGbjG7$6OP2dc^2>g{rms_G&g*UF>@XbH%<(Aq@&{oq~7rTIFSDC9Z1Jy zZLcR4GAMtph~cSyPk52b7-=4Qhc+S3M~d=23n!zWiUbl>6xovWT@O6m&Ee4?4I9_ONqTRmsLhhhFw^dN&JXERJVSTple_d0z}6_}|Q(hj$f8vgR)cXfC1+ zMlGoYsNG;S^oU2(@LX12%=%s7L0Hky4iDqK_wG+#ReqVjid0oKRddffn!7uFiX(&@EZLSkXL68{6sGsLi%`NmWTCLET(bJv zK*6SYNW%-^EJ?c~ax3wQr}qfG_G-UX@aqDbHX=NaI;8p;GsWzza$bX=hp)u`@w4^; zQZtVz7}P0(V`fh5oZY;qokz`zgpPu5c+}tM0y5nUL0og)c+}n+WBq}IbF2XqbsM2i z0+PM?=IWD{E{R7S1mD-jNTG z60&d%#uF`|#yZ`KpeabYI9G4AH1Yv_6obw?%^~0;BC=MST~vLv0ZP`?+J^~Mff`?! z%0ty9%eOXUcUe;iZ`vPd4XO`V#WP=ta52*z|4CMEy;u|9f`+Vdcl>OGR~^3tjppOZpwfAGsN48$z_BAP!}{Qbs_S?d zfeBd5B1t)TaUG(m;43+=LB41&AT_t%qd-HK2#%S#vs`q`%;8i2#?Y(V4ISP6dIxp& z;&?ndHa?HqypXN01R(8MKOk{V=bz3Av0W07?w$*um3yCFZx(EK;!!&|8+L#F{<(7x zmOCvv9*?f;KkcuiFOZC9$)3U-8djZLOQWZ1HpxJQYRqrC=@j<%X6g))gOl~I6FY3f zi*Pt&z!>>2kT9c_nq7)37c0h&aKe!?gg*$}LPHb5aRI8RHz;Xcmo`v@DGvp~Rcl9j zuV9F?jl>-|z}CZJLSP3Nt-s9qBzp7+rsE(!NO+`-F`ru)@R&tW!^iuTy``WJj>oXS zf|}giw)(LlL#lINAJgWX_Y*NkO+nOue0Zm}Rs`a4q6vp4_FDA>g(&#cB=gP-74m~dfR9ojNcHmG76ps; z0@5*flAqIz_7i;1hE-wG{^<^}p0`$~?x^#qOI^A4pj1a5brpO=zHaDS{HFoZwdR9% zoP^jqQNH-{ocVlt|EuL!@TeUUU4vS|0DRbiM^~CD)j0u4|7q47_6L&ADd15964@+D z5i(-CT2?(vVdwy#uL7UadO2 zYv-H|I*f|K)2={jc2nxrrnxp$|0MhcFK`cf>`1*>Iy&XezxhKZZ6NX13izUU!W&g# zV!BX31v_sOwRjXsPF4sR|D98CE^~sK;u&ko6nQR%8L3D-RRyL0NcA&3?VG&KgQNaa z-jOfU+Eh(4T>6g&0+GCdSR>lps0WawFzXWMU+mx;!$RUS`__h6wi_yewDlE8m^e0O z0#b9Jth3;o-FHGC8(n`OjmM*|g73@c^3Q{Z%dMOkmq(_g>5UAL9n3(Sl?{yQ$fJSX zes3UUc{k3S_#gpMlZ>fyq$sLNpqa4Aac%1e(FcbF9$y7|$e1jM^ALxsJ%B^ED)vdY zpdv;4sS0Nb4}n9@DeL`WegQ#ar3&gFU;yDHtG6b#sZd!rDF=^i(^p@al)j#{#I;98 z9qx3a*l8Alp?7QTww_x({o`lzw$a>gM7&GD?b+S1&7Rv?*DU zXHpej?}b+nPz&$8K}X^|S{tjsu+0V$aazH*nlo8tni30I(`CD)8^M7NS%4qJpl=o zgPC^m+*%|(T5jeMF{LpRkRIFYpJpC)5PWy&fW#7Q4bxBe0#f1evqvR2@?WY}Qicad zi7jBjwP}5XSyX<4DMVmh&K+z7{80`{Kn#~1)VAVWp_?5A95_vl9Dyg%N?XMW=&Wix z=U9Dr{S+IuwC3OsBdYO4#~|yOl6lccVn@`9u%>46Jn=8!L0d3-Y;7W^VYZwF(7cCd z;l8RA93dVu%cxvRDwxee^S&sbTg1Im+NkQ?vq z8r7T`!;YD8!S#db$$z z4bM6zAQ9B*A^Lj~9>VARU~GY1{{*D|Kq3#xNE`6Z*c#c+)tlC#zfMP zB40CW0_2Lov+B_DsrduE9O2usYKw-HO`lFw=W~@h{j8}F2TZwC$|{Bga&DYzf{^W6 zi?=a*d4#I4iXsy{xKZ=-A`-MnMo?NBVZD`Dcwg6MYs+v-HditY2vq^t>}bcV@K~+z zs3_9+EYQbctnfN)O}>#gl1bjh$cCa!_L$XT7P4Vt87XhY=|#;iN;)dIkG7 zSIIk{e(qh-4M_EKx_d%wJ_wLLFaz6#q}fk6=j)u92)N-h*N}j8RL;L`y?Mk~!}nb5 zF8J00otEYH3`j@TA4uJK)Lrn667AZB@>$Q3fj@dt{`tkFPJ-|2Z4sPiMZ&X(x1T-^ z>zmKz{~Ez}7ajqmZVh8iI(w~ks8M!RzNVxR!^h&Km!i>vV<9-9;~)95V(rp?#g3yk z2I13j=y^cZdx70yfgs?caH`C*nD|+keyp}M~I5>?A{4F~5f9Ull_Q7fTHaegCR5K273nx@2FtWP@dji3OjQ{?)tv6Oi!X)y4+nCm^*8 zzW<3Wq^5(qACKx4Rj*z-Us^rPKD9)l1osj-F*jRPMr@WyI1cRt=HYp%>?27is4B$-i%tdK>!IvvQn zHlyI?^JY1Aypcz%EE=xC(U!SWb}A0D={F{4=U;2t>GbX`^ZPO$y}xP0D{Q94HG=OA z+uz)MX!7tUT~cp(R0kdjNSoFlNbSWK80#tcX4>lLy|q@#^5gR9UxmxhFW-B7^XZM3 zRl6K3tl||>ht#^t;N0@UMGdP1tsikNq%SYg0 z-0_91v>~rYTdOVxzBE*Dr4_YSrb9C$chL zondLJDCleHTO{o|qgyF|?*DstAay%?7ds7pvt07PVADH$w=?!e08-OAe|!y#wjS8@ zZ`B6Av19Ah)ZQHj!=_rR;!z%A-ij9vuCBJBT|&O=_|(kQp%eQUdHY?t7KfYnj+21Y z?AbfCPCz;hkjCawv*4S^|LXsi0)4>3=eHk(r(Xn30Unol&dZArAAa@nYXsl<@afgr zUr67#VrlgGbGxPG)Ax+kWN_9lu)F&82}t?e79RBl5)IZ&w^#xsa`o$~dO#F0%J^S0aV;?-05EE zjoGO61&5%Mz#nH!Awqi`>q0?j-5+#_?Jpi@{51>&*iz z@9GH0qR#7g(9Eg8tJfb$_LOh#_S31N>aio`l5c)EEF($V!rA}sY_Ng;gE{Z-o*xe9 z_rAA!e-=A>V_Qy-9dPPsQ!(zNb6bX6rVfZYbLG5kM!YazZX?e8=e~*CgA(9ugJF<`<8^S+qBbc-;Hk_^OuoS%9S`qju!x7*TLoS7h_fG^+FU zq#(lYS`^rl-BMnPYOF;1Q*vPWLWYNxQ5fMFjh{q7PIy%VVk0W^fHx3ZU_=|$RI;=n zgVdydVRD&tTiV5D!h2Gj(5uNhDDs$0)_6h`p%?>6OH4+>fh||rZc-9Tu@LIwdHkoz zK{(=J5no|V>?aex63R%7(kuZIi@9D>J-6lfS_QvsuqVuC@BZ&U2qbvaSMbf*dnBOc zKs?)LXOlzjQ1Y!!yrrSxaPp&tT>{S3eEHusYBsH*&(y!xEV21St9<}zdu5@#SN?fd zxcubkmL!&VJMO{e?(Uc#?8v-^Q{56`JLKL1w9V@Zq%J&a_KgiX2)?h!)^&HzHGLj@ zwmRi$Hnem%@dc|XXT*~yTKD|Q{pZ&AC^y`#Rhr#@@%YT0!QjT~pYK~k?jh5@Tv>Yj z?eBCGd^fubn>!#~Z+RtsfaC^)>hQEN2yY!pdHXwC1BU24!%_g~tj&B|UixJw3~0`0 zJ1tNUA#H+SjwwOsZ6%ltO+@c0<%$V5BKMLSZJyIr3z&VF(3kOH0*oRZfF53Cfv)U@ z#RFCo5W`0u2dd}7r|3<^SaVNp)p8UI#15Qj50qk!lgPw4v%2{ z7YHFdZ=B>X8LQ-2B+B6l7yeJM!Yy1!=LBbCFu6Z8QqA=~v@h)F$IhDVU*c_t;eX%QJQ_u|ZzaRqkG z9gx})eYOvfoX4*4waRJ2OXf)O@qbm)B89k_=1sXd$*HKv92`ziF7OB&Nm(l4?o4}J ztlgdWwkQmqr(|n*X`N}aq-;`i^i~IYuFV*6iSZ|xSj7Zn47P*ahrOMb=S@bIgLAwY zZYY-mtU%H>^DxBoBl^7=^}?mVWP#_9HKoTiC8$W)HRT0)sahOihjUKz>P49PTS!(+HU^ItwwFu3 zu9YjC99ghD;@5L4^IJ(f-@Koq8Y$ z(IcIvriR%jkDuz5fK;2?Os?|w^#xKF9*rgV4uiMX+D1#~PYWj@s`L0sIq$+!fJ&Nj zFXbyeEO{hNM2I}CiXSc$fg0S+RCP-b>GKkE_6t4GvQKD)+uV+@gW$W7N3DqN2P8^= zWm;ZxF?A+k7m(Xxs)gZ5%5_^tVCN}MC!!^N_^3z+kx1etw9f0I8Aw`3jp^Vd6*Y=t z3gM|KhQbF#EinH`&EY!(2%06~#48uz3t2GKr43dW;!uPcv!IBNA$mmV9jjrtz@UyG zN(kpA0CCn_rHHIUd*>*{!OsTX(#*%E z9|D^cW|s$Y6k#I%W%96095MT~#R?{Pm2UOt{(tlXQhy#DYn=Fc0McNmJ?T#%T>DUT z+5Osq`Am&BUH)mp_J`ObcU-yXkzmnm-BXFa-w{!EZQniAjT)L^r*kFC%He;T6SMa3 z4>r^csZ#U*8e&J`TIlA_*^dqoH-<$_34NT&|GHucZm0@hxL{Nk=q|7ecoh_ClJ=04TVh_`r??>{TV<`va-|V(&HnMW&ytVX zVn|;cnnkHKn5jjJ(wwdvw^8s7M|4vr@-Ri1bcs{A_A62ttkp`#+Vnxx@V|^XYm-;6jrU}$z@dHIiz`)$~(Wj8-R5Bxl{?CUP{<{UM`J_ z`(?O%WP!wUtZ#4SLQ?~!9s;}0Jkn?%ApPHwppmtOHbn_Ilf3oL*we!#c{o##jlvCN zdDc6@;qt<>rs14&97*WMoi2GIhJ;}RTWTl->LV!qKHQ%yU%RvjRJ=QCw9vh#srTt%crxOe+ig4kl1lZehesmQp?L$|kiH4-vWeLN`^-_} z(%O*w#KmDuS zKxs-%8i(0L2op_&qrXa#oep--#$*(I9PO}SbmT3b3U*dt!TV7-u@J^iSPgiR^UWO` zI{~SgQtJ<-=DZ$wG>+g~gF$3>t8RG6@;SF`hVIH4hjn)ML5Zg@?@^2{T^571}F8Ho}*LEKD1yTWuINNI)1Ad)_DCYNZdwAk5l4r*p zvgRD~nlJZpSe9zU>jg$@ekzY!Qk<^PFX8JMz&`oShao}KG4fYaN?Cj#ilW=9%5Pa z%tdcLavuF5#So~gL`pA?JBjU-4ZFt9qbjO52}J`VU?`u&%QlU+X!mFe2E|1iGQs#) zO?O5#7rcCJ`5$oEEwqO~x(U8DagX9XrOh%Yoxmv;*sw(-yAK~dw3#Rm7Sc(x>O?t^ zCm`*O=vg=+gJ$cS?;V&83kFCWuL_t>m4hFa`<&C%cXquOoO{Em)vp;7O4+i;X`F66cRxb7w*fCQ&_U=6D8$+_zO}*tE*9FB8NQ@6- z7zZ5l$+zzThR!*kLK|}VtF*MQ70z@*j)QVx6+g<0XL7dNVn!g+Wx_M1+N~eGV^%%*gOR~Wuy(ZY@3x~j65nmm?L<4 z*+QNiSh|RN*0kPMLp<3KRA^1$5GIg7#`lnk?Q|iYk%E;ah&lB-0yzZn5PTE3%zoLx zbk-6mIRPqFi7+X7n*)-hupj#ashLNFK)MOOHSw*@fMHsJq%GJT1eS&|xb$`gBwL_i zTi{yeFxx59Yw@I&B7n7^H=nDvu#2Q9``Ny_%{@)^l8&cE=ilp=fYiZcgh%TSB#z%M z39;?jHcbM%>lO}m*PamaWeVQ_q^}GkZU;aCB*Y7)1MUowp=V*uFJv}>x))J!_o7@e zJp9@D2Ow$r6#~09!8bf=ew}|xUxV+<@a>d-aR}_?Pjl3J#YCf!#S1o6l7*tWUm(w!dnQBW(R*+aAhIw0tqQI~V z!>|M5#i9;IaR*oiHRHKZS}=xJc(07(!VCs*VLO(G^{E(?t!k|z`afBlkhkw8Lbt!h z@bVhPnbokK3y`|BFsI_qc!KXim65tP4W8Ok=?^5p+xXV7DYdAacPe@2h7*!zA4#i{ zm^8Dx(bHLzgba(Y{ zB(r-8p%TqLk3cA|8C{9w2#1B0#Flu_Au9MypzyqQ#LLf;pgr(W_D?^Kk(6`pXH;#OI;rFb)fZ~Mkf zbq3=rqzOb!QAk_^V5@LHNAYn-`HDTwAwdg{$eaWsF zkaIzd`0Na_Z%G%P2}mV#z7XUvcb=@mY(*|R*+MVmD3e$3W-yx0t5_aLdhuBjd|&O_ zaiY$n`cyoi{ekps{33=PQ8@@zoeWx@KoS4r12n$BB2WRrB(K7Us5F?1&92f+`7@Mkuv?vAaF??%j@T2nKt8-UAOVwK+NA(1C8`{57 z?uNY9mq)#Tqw&-M$|f} z9O6=r|A-eH=iWF|DIIQWfu}Br1g%nGRSd>FP~>b`1!UOPsG#4u>4Q~d)j1%uR^>|X zLSsm;O>B-#FAsP04?}4Ul=g(iC^`<04Dmpt{w=7R7u~j)q|TJZ`+vQF)Etsu>{|9a zSB3{5Np&aT-Fx9F-%xp<*_LQa$$_aP&fcq=hR3r>8-fx~KzgGI68k9Kpgl=tzRh+l zFX!%7l9IiBY86}wQTwLC5abRa~-VM7vrRV{&oBrj%Qutkfb zj0tRk?r-TYy(6tB5tFjlRCeVU#>5FTD}&8*8WO89#-l74o#~bJS7^GgvOGnHHH`JN zU815Y?4a~eF-jOs#n_Q0>NZh-W*T@zGVauds17`cw=ixq zXfNI&s>SWICH{5_UD49&oPf0W>TG=_(ndcI`D z!))yCgRJ1)ON8Suo`vMs$5Ge)DmfPgCIG!7V6p#w>F&Kq6XPkMG~Uefz$i za!p~SSA@jK^X<>0zCfbMXs)&M&PMW8D9-#JcY!$0=)ycEjCG`Vfya5R8ZvIgC!KX7 znGd4uESV&Q5?E8O- zNJ{Rt!gE9BlDr9X;D8zrDf4zDks)F`OeNrh|5LT}YC=eYQAzYyU;tV2=u7i+ls7p9 zbG=-4%{2x(m;F8lqo~@mJD3fO?-Y<;3pKt!Zs1V}_wadVb?8Wv%H`eBx2$ezCa}u_ zft~IScTFVG-`hiyq5>iHO?L#TLsqM9K9X$j?wvcgN(1ppN5MCUZkxFNK)TwUN1X)U zvLx0B&t3*uL6f%nS&5RnYX(0}6@*qEJ~!svr{(?2pDp9u`|{kQ0GTI)&+ai_TN1F+ z=eA6~^9)eQs6}9RwHuFkCG9-w4I~24S#4?OsN)s|SUzh{1RF-SAq`8JK6OfQ@jBw9 zyfw99l7GkLCw1lKgB@N$D;Q}s2$FVNY>yJd=EpDp2#d8E^~0M4 zb~J2vkp`4ZkXtO}@Z`C`MV>RoQs#S&Y`Fx=zKQOhO9Xb-;zOlQuOa6PTNF0XR>ke8 zz>UmXbH+{a6YRxgIu?FobPeGb(il zn?|$#-&cX;Is)mNO+3Qv=`8p*Vz{}t#UtjvJKVf4nI5zIs=4pKw(H$#nH_WRXlkNd zvr;+mpw;3IhdxT4xt$XgkB;nr^BXbqX?}hO`wX_3meDpBKKkUFZ|n&&ylpo3Rr9jX zmKGizYr%257jON6)O4i6qb>2x_(KK}D)eDV3TtHADR3y-dJ;89PPT|pHDj(GYb zc#7L#^AJd{_om?5kLXh;lf$KU5L7)QqY;zF_zP`_!!$uEbi!$E*#%Z%z|Ghd-l`m| zHbbTYU$p!m%0jhGCG28UYLplNo$|p^vPZxufU1LF;*tDz&Kv8@@`$&WQjHXnPa5{` ze(!tV|NalY`$LZFVBP(w^5!A117{9qSM`kIk?(nebykjy%-dJ}b8|z@o9Redipnx1 z&;zWYs&{~0Xe}UBJ@NMr$VAo<| zG=lH>+WT3vKDW7J`4`zB^Om2MHqh-m8Vkc;+^|S`e5td*jyg!~-b!1db_^gD0z-ZX zIB&CRuzBiATH|8zPLekw$-eN=%!H>`$BfGh=~-6VIbJJV;!py_lV?<8r00%|Z5kz1 zM9j*2O0$}Jb46kV-!$hLMFCRX)af68_b2cF@W&%xBp0Jcn&`nzVoa(I;i~?411lYm z?GdpYR1{8M?u*g)fBLhZ>+_4Be(#slNaTtXY>~&4)M4aml_?1)JUihD#&KAnehV*} z8VWWQOE)+`;K3n=h-eNujc5_DQ^gjyTy(w?Wuz2dt%wpBs+GF}mmYP9p{gH+pL=D5 zcLCDEcnPu1;ipF5-r;0_Oij)YH#hwlITp_LbcFK31pPbr~2-3W# zeOpqaqZzfakamKlkuFTUWsx*@YD~emow)ukyFr&DwY9xMZS-q+biE7sND6x8T!{}J zQUEGIB_VzF6)p~R~n{`eE6$h|N1w-IdbIJzxwS@NGU}gR{*3Wi&J!lLOCjDN5wnq zuhM`=Nbtk&{o?08|Jm<;_xnHm;rGA)-S2+(^Pm0khsjV4x9c0lvsW%POu&LBcA3=| zN($-;v{%C7rIk|*!=XzQk0^%V({suxnq*2PQCzCUt;yy?csM5teI2aO}y9dM<* zx=~#su#3;53Zaym{ebkX?mRlyg+~a!s}I>e-%3>AGdz-3-LQT81n6nHo6!l{A@SMT zpvIt}UjEtB(tK~<8#}g6t%i|K96dR10+Ja8NJkgfssF^;?mTK1d|#~z>{u(!UIr3CJYB2dI6mr zbXY2)j?Pg4Q;P))!h2#2xY%gLU2jMZ`gDI$80DSz%W49KOf6H@+hx3w`i^RO5}AQi zgku`n_Uk|W>G07%m!IGK=GVXaamp(4&Zzp38idMoNb2ZBi{RTxBWgP5>7V@K=Rf=X zU;g^H^7Gff{`D_^`2FvG_Orhiisg@xZ3hKobhBFG7pO6E6UZa5L#8iNktoew5Rk0` zMV25>U?CklK4+i3&mrq~=A7UBX0X=i zY6NznEfE3c{Xl{q+1XfAITdCfuN4H{`}Du?FSc>?&;2jntDi@Qn#R~K+Qshf zscuXSBl!04$R-wh14#%ZyFtMI+58lhVmx518dz(l8jmhIA)+2{etQ?w(_;A}%ALsi zX2YEU%lU*zw(Ca_=*Ha8OERP`g_6QOA#QUGJ z1CU$|u@1pEc5Y`V1;%)JWM5C9rJmb;TKnA}L#OOYe4(%r zK&On_c@)+aQ*GIXLvFCa<&rwOSnx9%R^Ukx4Wm*e#3# z;g0olwd=4jg8ncrExHSNppI*jJikt!_ zC$e6LZ&uVsB7$kI=1OtdaQHS8kRs3<M!5;RBALdPb2T^eBjNR8ogPQJW}Y_KrHA zc8o73?=d;W38aS-kUTtsn#`jxW5%z0^Z!r(rGMpL`Ir9F?l#+zZXR)!O!{v>@rh6T z4<+r_iYc$dcNdR38P|FFLjY-vX*HVM%+#2$ z33D=c6o#Op3t85np)fMuh8V#s>|`7(`8DyCFswp_9B@;uAEMVKc6g)#Qjh}JP^qxyvgq#it~9(L&6%k@_uG9dL3e0Qk0YaSi;g;Mq5=aF4Ddq4ad zKFbU6^T;Ex>*f)(4_+R*fizYFNl~wc9GVHSVi%xd_aSA+YjLUx#RmbyOwEWKx|S(2 zEiCjS`FDpmoj@a%I)l=F1xB`ZA?0I{c1ayih#EjnN!h&wMbc!$^P@dW zue|!#Fa6}7Y_5LlrJ6|Jt%3B~>r=R$qy=ZGLnBgsMi)}qQWe(m;~y?`myazx^T zJR*6VbN+0?%r17;uN*4uw8WsV`{kYqNF&bM@1)dy1=1&df^SCy5(IWGbb$sA!FNZO zE`R_}p)Ys0sbZ~5eBc(?L8js3QFoi{XNCY$^5^Nuym6wqw37USc7Bfkj-2uX92l&q zs<0u|PC13hDhVRe{~*&sw2=(luy=uS))gCE<8Yl!4l_U@D0@~#b6zr^l;AS)Q_IRP z;3@eA!_xT1)=M>zKJ`ytM$Yc1Z@hjWzX!)lpch7<5uhmC%&IG(C6^Kx_ncWcI|z?P zYaESlZSO3ttt~IDY$hUIICy4$YFZ+d-jd=^<)+UBmzibGt|^S2E%7B4zz!=NceTp~Bz&a%hXIo9!nY3oc$iWi+=meJl>SGhc+6nJOh3?e z$Rq3=mHfM#60QbH?3^{>gQ%V9&7BR!23&t=lnY=HQiotoCQMN;+vyDfzx%~hQEb0( zjwYv<>L_LVrSE?CC!c!x`OKpMkY1n5GC~yt;Rf2#PAo@js3=F>TFoHT0eg-uoDJ3V zft`!18(S+&S7v85%Jiu#6PxP;Bwd&|vv`~?;aT%2gkiAC4lQ=GFzb`VohX7CtStH| zboU&u8>ly+P@9gb8dVIsFng&1aj;5Fh(s`=)(~0)2i`-?V(bMYGmn{^rq0(amRQK9 z@a$s(Y19v-d)a{0DfsR@tBXfyK=QET7n@2c?;?c)3wDA4_$-yJ9N}-Dg$fG#~-+mrp zx&udBeCuw4hQl_AXqFHAdE_%+eIFhT2PAgkTSEneLK11bjQTe-doY+$Lkx5(iJ&LM zIPV}ahXzvU!bq`j`{rz?s7qc5b`C;irdZ>>+D7Fz&tp{-h^gSB!-;&bRj{2(1;N`j zj<&V|qXCcVUFis-T671u~1+ z;t-|4fz^wvYMCyg_>}?&?_}z&iYXY;WFGCmZARO2r=t)dY64^#9Ue%XJOUCtQz#(# z_VdW!faE%-xlS9$crVG_fP_Mur%;wY(hmiYkOu1^_~xQL?s)?ec!cANDQ9;Vf!*Eu zlH87}-GYom4shb?hXYbzgwgJMzoVy6OrhIK*qY4)jU|XJinuK>w*pNY8dcln^Rx)P zAAVcYjtzQhyuf&b0-y@UaGgQ>;ls#rXow|ArUYtfD3gd(Y9W#elB5_3Pef8{r|_6k1yCi&u&^TnxyqTettqrl zg|tbOOd5=b4Pc2#o@Gymz!2_;$E`(%ZR-l?nnnH@AMoyg^l`u7yPGj1$oM&U7vw}!2ju1lqdHab$Gz;6z- zLzVNB>@H~@Q|K#vFAX?P z`0xK%2S^Q&u1;c%IwbPwxnfmG1slD|Ng)L#0w?sNiANV}9$lM@cMQCk+!))aL!{BQ zxquO_A_6(2=KG7Sq3kJfmY4(#0V4Ec3I!wNaS~sPhMZ;OltMVFMpOO5mu7}D251Tq zx7cne#Jre^CG6+T4{V@HDWFx+V^X}e?xU@&QZTBqasdeggzT2;w6z=QFZlK}#6s}x zo60S7eP_wEC)NoP}Tpb2P;8rV~6`J<2v%ax8KEA&3!+0*X5i>FXcGO8!6zxc6V9#LPew?7i%)&D@kFH%@-CUUx)bp&7#>Dve zAVgX_5Q}D73_fMBgh&lDh|rsyWsA*|f=Y^0G32)SZw$qoG?534ftj5|NalG-M(@|0 zM#Wfcqq1cN(wl}@Wx&Oe=R{M$qLI!iXlhN{ZOPB1@ND~(>=H;1_Z575H-FeZ^lLzJ z2pUG*f^R>M@Kzp`1m69j4+D_8Lj)IOP^&rISMcrUkwatQ7ko1X>7XppqjH2*E5T~%-uHZIVTI13$~oe*Kb zFjosORzAV^UWefOo;4tG)qbS+A4uGPGY)%By5sqtZBQg z&K0o?OQETBI6RPsnt(Lo6?}so-Dmya8<4yjbU%;$f^YV8-G3krE%^4;YOv4y1>e4g z*uxHuMGkN5s$KRA>^d&hHF1JG8Zu?4fKcG35TNrmB^lOd*hXu`%=x!Y%!To}EswrbtpkV&DoKA3O`xu~Kr3#SnU#)3nISQH79 z4Uh(DyX!xD{q^VHNXMHQNw0q~0Ye4GYj`m{RXZWsESDVD(bLaXvOLf;ETQ&H(+Q`P%H*1zK6Chz1jUg2igTT$!Js{8M9YAaVch#Ehd$6cZ;# z?qdVeh@%1N9tpl5_6xrEeprBnJk;cCJJbS;t0SUQ9hl(4N9ae4au;DOrVI z#cF^5YpHtK#@^D5YHMLhy>7=>C~Ho7hwGhYHd}q+%=p&i%ElmXcjM+eS8D_fP?Vum zBkAqOvrk?W43`R32PE6(uSsVCo%`wMn*O8pt+nX}AfT!#)XDPJ_Qq!I*Kj<8Jz&g{ zWa?9FC!98bhhuZE8!FRRvWhYEn2XRzxd{UgeEaFAhkEVu^m>B39bK zS+`8WwS*zJTLg2kho~q5Up#MYSRnZukopV0x!At}iPzLq@a^Ogw0w@&<32wWK=Sjb zYxB1}vWK5XSoJi-cB|%G;SDI$iCBm9!6#_I8ZK%`Ah7|yy$^|T7v;l<;S3PVQShA; z`%>fvT1?0=HwpS*oHUpO5xOx`mJC}GW~mF}oQ}R0Q*5$39>jJ={tN5qh>}(cnCLNJ zRfTjau=`vTncdkwa%A$_&d$z_8#k{Hkd$x)BE3)(NmVr64kqEkC6-7r3bYiEf!k6i z?2eq9xVSntdbvTM;h7W(dkv(`8b~|mFz{R{&@NFZY!&cZBZUrtGIE=E9`<%nLG$^n z1)eb(6m3GSb|eHJbx@(X3gpj9jyFRFXG1K*VfD&Zr6?t2WyFWs=d{kXb{eMS)m#c5 zmw)dkSUl}KI2VDNr>EQI;r@beACE>nJi;~g7kqc{$SY{DNyP4FK6t-?RAnG_heA#s zVXTnD0dH8r_has6pB{qmZWaTG z4UnKB%Rly+oeGnqr;R-!uoBh+?z5lg5rQ@`CS%P8Sg0LD&vR;EcE1!J;arNN1uyjAjogQz9xi9=X8| z_Arn6L+qd*NW)E>=r8yl>Cu4HSMc4x0f}pb-}^uF{sYO+BRtgs6kC9@&vaFaz%DP3 z4tc^GEV~4Dc&3Y|-8?$9TOd{1;rIou6_jbEPLIACm=MrX2E%_aMj&Dk67y@^!Bnv0 zE0TH|%uE40jTU4xGl~=(K^~1i=|e6)2ogiss;xVxH91nP%-~yvez|O@F`r0~oeHy= z3}3pQLZliArAyt*I!^hGi1DY4<`O!1yXVwM@yUDb<3V;?h z(!qLpWqYiikaX#Ily(oR?OPVHGD)c7B*C)Cyir?9az`ZF_>n=uphi28uDjh9kp2V z{Rh$q$s>p0yMF_cU-12yhev7E!_#gRIC7Y~1yYD*IseFM*wSNZK#8Zg z?bt3PICz`XOaVrOpxLxKhBOEVywW@!+e9%*XqWznr?y?|)G7ffQzr*tBNH0^76fqC zRJ!f@z*>;;ppDm?oE-ur0I4R@3y%}@m*QiwKh zd1`d&;7SdoQMF~saoAI8N$W1bH=+*5-rqglp&N1*2zJ>o z_#T3%j<+8QNNfNJ?f-M*v27`3f1#*yZ;yc`Y5KxZxWKV%78rO6SR=`C8?|_W%gpUl z)X}~q)`h6GQE9se7gGcYSIwK-fR>2cI5V|SwYGEN;^taq<@DyO6hT** z2tLjn!2*G_ZOW~&lyIQH#480*AUqMs36da!VKRmDknghugn=O{782H5ND@m!@-4d>f46{ zNZsi1H6T@oeLQjqz8wv*-GcAK4)An2s^XoTJo4ACI`rnB90Evz`ZwEDWPv2c*MMTX zfcZ8EThJJbEE&XhMfPTQVcSNZ<0*xdfa3|QK^_xQcr0rEMD0!(#cn|G?nM74nt4b| zX(bhdl{P@NrCM68P@osB&JBu4wW^^85{QH_>DITQg2$&vZ32P>c&bBSCv(T<7q&;& zb}rQtyX}(;^GBx*#H^Xy<@xz5%R3h@j!$MdS}sv+mt`IS0uoc@7h}+^(x6?C76ULi zHbN9c!o>=u5x0HI3bCD?*{Yf|olyWCbJ?NfO%-y%s0MT9>oH_kRRz^>FQrr4Ov`Wg z&Pg>4kX#K&$YOO1zVCellG99~gOPi^6DP3f;1L#`f(ShG!vds3{)rQZoPq`mg#LnW z22E~(-C>vD8@J@2nC{_`)tlSYs9)4T6%y6Srdk61Gq)N|l_G)406i!i{bsy3ZFWj6 zFl;7>ld46PIktefg>y|qd%U(4*lAI6o1_gV$_dT{xi$JgK{>#QDN7MLtfj^t6;V?j znZ2HLc5jeK43eIt37nOG$I`$Wp#KApjx8)ab#`KBV*Ap?i(4lL$Kr9I9e0Z5<&`Y_zICgewHrLX_4%~Nh6eF?XN49J zDhy02+H-Va;p~g&Wnz1KpjOyEd2)2IM$-J{Vh9Hbg2cJgXX4CYG{vYL5n`3`6szP3 zL&+Yt1fW)YaEBzagx&_~z@zpR5NPGJW zzU^^ef%Jh0zCDINhlX!JaslD4SwNSd0fW>X`NF&K2&=mUk{Pcc2{-IPsWcYf1yO9m z7)cGo9}~9ZC#5n(0!4A*&J#+d0E6*U5()=;+YocmM&!R{+)(}vxiGni@Fu7j-iWp* zvm!gl^N83XrlN3~fC@v!m{Y)a?$p~&&MpzDMp6x>!G8w-tslPdXBbIqW<;2Y#=ft^{t+5(N<15<-r;Z-0k+c{CT6aO!*)ylVKYebX#5t0!y9&}%kgEazMj_rspe{JM ziI*jRd6v6;AAG2wV7gV187ey`kQ`FDzJhP?$hn{It^o

    %3HiLOmLg9(D`9@lN*^ zNYh{Y!e>8!Pk`j*(IIyOQg^=XaDN_sK!Wd19vyOqj9A?*kOKAC%vL1|iC955R%4_= zCZ3+$dsHGV)v8jgm(<@Mv3aQRtyVj#HXs-Yr7(~OEuk$_#CS-fj~`F4S}>Tz8nu`a ziATjyY`iP;P^z|niDyMk3$02yBwJ3su~%Pz14L?$zfEX*;c;|l35)ZgC8@@^@R`#! zW2#dJMz_Ys#>Z+4hLyqG^2>V@XapM`PCB(d3c1_vit3a0TMI{ zodP>*f+-%bVmYX-FghIDoZSjQBsR8~RlEWG=0Yet3wCkV&aGVB6gHGDs-o7(lg!Bz zcR0)kSu~){xSW>amQgY*CSIh;NDJ)C-$}_MWlB-)<)svr zUOsyI?D;C3o7mVGczADaHb8oLb9?mo9}I}Jc!c5rmE#Mi=bGVt8*7x*2|g-}OI6fy zpo+tG4vheIdHO1-^*N99W`$FGKp1VMDtKP7!Kn#V$#|j?8bhl1XvBaKOjs85zcs;i zz&Y$3bPPG91cQ?uTWb4at~$x$sUJw>k;hTh@~FMfdwJ9?rR$EGDyP8C%cBv0L+oBR zkM`aLa(#jH=vP14I{x(0;eg~2d>?jg-9z%|&~Ob%4vX!2)g$;$lyUONWuVtR&kr{> ztf+x$9^!@qTgmZCgj=X`->^crGSvvWEp5-;h6q6fq*1ZDU9v|m z4V#|SN;t46idA7CRJ(la^mBE(Zu!E-*2>t}c0Jl+qjpW*URfPmxi+(BK%_H!zzd1z z20S`7NZ75+8cCg2YExji#Nv@B8m%oH9!cK85hbG!Rbdw=O5^R)`K5Lq1>B^Jn9X*u zs>#dRSw+a(DbEDh1d+-R6CXZg`1PVFsn%{v2gVu{llnrbZV|dq@ZCMX$^ZV{ zIP45B>$e{gNbCfK3XJI=g$6i<2T>$%QDT6BR2>%;ohPNJE-LwrL`6g1=P?Nz81%`IR{h}Hi3Y01Q$hm$`BYC0BnnkryRf;kGInt8%0_+E zK-xJmxwUn2`NW=Q&n}#tgYZCV9?ewKQ#Fsq&Y9ho`nmShD#`k3v2zn?d?6Hu*oM=f z3V9&~_ZTSx)Ntw&21X!93O{w|TAHUbyhV6J6@$4Zl1+r-2Zb^`a}r~A@jbU&r+F`` zYA(N|><$bMBm@~=f!zo<|NcBOwhg`!uiXN>k$cvF4qREm_s|HLN8YdSk+;e6ZzN(bwklaNrN-~l7=22 zzqJUWjiN^|H4Gaq2#J@vs&K0s8UF|!Wz!#p@~#XuRN+klEU8E>vl~>At`1yQ6OjNT z954Iq(ST$!q!21-$TRM!_%8mg^x`05MHDQu zzT3(;n!hX*-w^B~Lc&afoBIXa7K z?e4j=e!;h&M;`9ssY3w?7_u^F%bWJzj(^~Uq&!_tA@auP8*J$$j^t)BHc(t zdj9pAK|lHt{yX^3clPi@#kpt=d+hWN8Xj#=Y%Qhy;+4(K_4Rrd?8?q;JifCyH(fpV z^y#U=NiNTyoS&_xCkOG-wTP)x2x~cY7X>AFKfBT3gfLOVtjm?e7PQ2?{zXw$m73Bo7#wIwhXW?pf=#QOFZC%|rop)2dbC;DZ_p zNDtVgmw%Vu{X{kNE%*jH z?y=z8`A&$sx(7x$ba#i`JUZmz5ptDx=h41lfF#_++xrL=S7z{;K+vGpb|AO>6ILOZ z0=a2t#hi^dg2<+T?^^yKOa%C&Qqh3IIBn40y?~NXS+Djef4g`qROi`hc-L`X`d5F(G(7>UAAgPlOXKv z6g1>O1tJ0CsHy-DHb96HM1lfCn!M%ggjgF>MUV}3a#4ZU&RE2R7&)#DbB$P1Z_+^6 z#OydyBkbzfv$DxV5@Ar0uK1WxU5aTNe)|^#A`L>Mnn*wU+3P=iO*7j|^n`m(A3xkp?T?W(SdcBes3`%3Fa;M7s{&CNAfwP{ z=TX?7KnB%{+*(F>ItSvTkTsJUB`pU?L>PJ^&=1`;wPv)1w}$ipR1HXa`E<dw2VonA*?|tu)7;Iu8b~(=K)U&hOr+=Q<43>xRsG-k_)-0z=l|kVRADv{S;ZiF z`s{QiYXcq~NI&%(^et^|50ZAHi#uZz2j^oo{g0krJb3ct!ZE342M%`QYxokZh8;12 z`BXDwBxc!Q6$PJb6to5r#xwz>LKd*G2r zcgGtYzE=vbckl?>Z~*BMe+>x|TllL4GYQV@=XP%wYdZj+wmik4Jv#?_w5SCLs7WKf zVx@}eVw3^W1Jo_BWoNHd@JwOVP31Y(O%pC0+|2K7%Dn;`qWI5D2Qe6cqz~^e^7wWl z(mN#551+50^s99IdhnO$ztjHka2qF%J$=3k%NMqHw$5d%C21`3cDA?IS8F1z9UR+S ztE)yoI6J?6a{l{AaVna-z0@sgZO zvv?nNONg}U#X^jhThAS#6!@2xF##kcF%hI8ZU#jVFgMF(Y!an(c9O06x3g`D2s(-#B>WD?oWvejgW7S8#;dq2II0WB^-8^y`xv5i}kj2pW!Fy09}g z*?gAJGsl^&mF@wvgoiOsD8$oi3TqzK~?R8j&KQd=2xbRxVU z6#Sytd90A(scO)eAXMs5z!}vbrs8PF?i%}7tNnvy5mIzf>hD!p$upf$Dl`dKdWrK^ zf{m}6dkZ$wf}zLoKpLJ$9?zVSu40dWu8^bm6qh;T9GC&}_*#FR0=sT<5q-q|?plz( z+3_de$PKa%pgr<_b^dOkc!&4-;v2NN?pC082!W78cjx5M@B+JT9#K2u2U6N#gWWJG zbMD^T(;wtv;uxlfLJcgxX?4Jw6yh1PuM5V*r6(YMLG)yJfFJRgQ)-JoEU&|821vmU zih>xV!6>y5XF#6OW^<|qr0mB1JLWR~%+`QNH`_$2kKfcl`b`a_-wgiq7p(Yj(4WrN z2|GPl^JrO8#}hW>cy4@ayH++_m^ip{?Zv8AH7p-IdbFO%SC@}guB6Md(rLQ?Na@~f zNDnhqAV%ZuLq4o1F5>%yG@@x3OIijFo(D&a?gT_LauDl~hu&$W!>tvrpm;`ug`#s* zErmp!K&k74A8&-nQ(y=W!8iO`d8bI(1%DCgt_?^dejfD_*mc%=d|N*R4UTjpl5no4 z_H|Q8p=k!Oiq)#ik-^q7}?_z!)W_d~);z)t=Zv5@G};W>&) zk5tSvZNMDc)5$3pXAThGL_h>aZ}P-lQBs-VytS1;gyaTNeZY&`=cxMd@C~tCy0?OF z2pZf5C~`<+l$LUj2e^mvKRV!FP|;8aUw)*a3Zx z)?dF15_n9JogYZphHVWZPvlckIHd)J&KoAF5WGoX4$i2@sR~Z^cTh`9Q_J9Xs1=(@ zjQ?hl;0b;sBR_-e6J`{FJTRKcO--}{rLCJ`ikikFqZcz%DiU!tCcBzIT4b9($)yQd zdbdraU;g-Kzy0ldKm5%PYaG=`s)_WQ_g88FaU(J^Q2v2QPKP;H~ge8H9+t!im6cC z(lg;0^Oz{t3Kc741VvgCCv01>?Tcki&x$h{s2g{9cue7Qhs+_Eu_zep1UwU+X5yE> zhGeoyNO<`K-}jhDdv`7PhM>X8BfQ@6R6ma#`n(5s2_!Vde&sIpp=94M?NBv*01Zff zp7a-d`+0;L_2dh?1$NFziYKMW&JQGvK%>+u`LEFBM*ttOzl^i0ioVNIp?#GyEEw^m z(sh8GT#Z{HgngBa<(=J<|clNU|aekbOitt&`7D_c|P zqef8;WtVf?HG|ey7tdGIFP>SxR7XeKYbVewN;HUpgvml?uG9y~ppvK0hbFp!xj~U- z<0DiR=DCu{F0^$m$j(|lib4ii3BNw8ys%)v)Dzgdigo{iCsg`U6>2(|gHz_f+sEDK z_d?K1U}C_MUuB07=_6fX19nu8_`uJjj(6(IqleuMNd0-_Z$QE``-trJ{7J){FLeLu zk*_)&gc=}y+B`s9zAWHl{q0E>`J1v;XK$3VsS+emI5*2o#XbJ9+sp3UbuSX=r zkO78wMxdven;43#p@`6t7E`o}#zu5@`Jg3I9VGqu$G@$Q_ui}j3nIPr?^r`q9gHkL zGhLlOGqExO+jJ4w-=uD->2n9iUtT>Es_Ey}-oCJPaB9vX$SO>Xwt=dHOl*#MRe?MW z(}|Q$4K&fUhDoWfIB6r*%uveBWTBN24drjpbKBlj`!A1{4qOo|h#5bh-T+U8zFSI; zk%g{e_$0Q{7|NGk`hd&qba>f$L!=r=t~3^kJ$-oOw1vZl?h)9H__JvK%w!K0I&O;E z`dzEO{{$Ml&f_b|QOJ?AJLG^2YAwE}x_N@9?$%rAP=6k|1>gQDg@i0iNPZw8l0cG- zeH+|JOw5BP;1BS_HiJVTzlMm4CS-~%Uhd-wWsr7(AEd(ooSMG<0AXCqs0PnK*o4kj zBf`KSK{WX$rhssG@7Ufvx#8z6Ruu*GMq`ear#xt(ZjUuYdbcK0%_JbH#?h;Fh?HgI zn&Elm*mHGpX#U`($y^Z0F_%<=C{JZ}a^<-JwysUAT`A_~CP_y5x1H-MiB2Lc?xB8$ zj||@eO)F@kOF2sxjB6gDs4xJ>CkxdbVI9PydYG3c;^NBf5al9h8S z!DAg&$tl$1qmXm@hPRfFj||#2QYEM_%?$cMQ>0Eug+TG zK$BfEdChsubOSX;0_;@k$b)HiW>6%vVnzp?)}p4$)$)xNk$#bpR1@iyCO~@S)tCOi zN=zm+&dt|qg%{_q?abJdB22=y&n-S%KWlMk=Qy{0w!T9CZA68bwXV&)g}|E=Ew^Mx zENX{~Qjx<&fxIOu7|ig5!UaP~e33AL0*}}+gxsHU&be(|1`UmZA5}*)AqzizN~Kdo zFptCt;03pDu1>u`LWuOx2f(9;osOj4+5?AbF0DEHc>yWk?7?4i_B;>X{Ji?kU>ghHE_j@V08)EzO$XUN?=lhzkp>hMs$65pxaAtrzTudtr5m00~aQ)o|NtsCR{-im3fhw})!t2NIOuy#(K#1*pAlft_1$J>nb6)SYg;E06jJzDFF-YCtdDU?{omJU>vSu)smqx?{^fp|U`VV!V96ymll6c-H&OHs2o zN3q(8c9SpP0CQoH=0e>;1*tv!21ji}v9rS#O*5BOs+sL~;UaXsCQ?SyD-DYhlD<;~ z)~dqsfm-3YGfR_$%dl4KW)`YWF8rWY;vKwjX=-pwrc8p`5N}V-gb)>;htuykcSuBY zh^gpqjRg-`OfB^R;kzJHsOIaZ7=~^PHWSBLR`|$FbDLlk2N-xrx6|7QH|ifV=$16_82!{}-HyS+QNYA+jOxVL zAxto6jr_)wa)BH9GK!?|ax1nAYJ}k$h2uQaD!c{QHzSmPqv#_hg%?q{GA7h?_|_-| z9dx53QecQfF>-(%LEwxFYul*gLO` zN)`Um(@O`Wp%ez2CVTW;R$F|!j-rlUy12O}n0RZ#$6y`oj9^@oZXcnoXpTZuICse8 z$O6@e5wQez{Dv0z6dT_Jju5B)fku3FRv5!+i@^dZDuAU3Qt_5)F{twLa@|egKgi1# z*t)Yv%6<@y05g{W11C6fg!xb*k{yfG4VgX$4XFLQ1a{qv-SwM(4M=Vtxm6O-L-Y@j zu-hf&!sDHq2PE_yvId91&YAJLdz)xafn8^S(k+|oirPFpLe%BBQSaE>PwoOp?M5VQ z&Mb6;Bf=1-*`pv^OMhYq!WK6&b5d2afeGGJEC{2~Um;SV{9BO%WZMfg-_|RScGOVF zH9BhyD@XZ6I!j)r?qJ3^1X%ruhI09Wn6j))-gx)jmun>5yj}z8=8ZaSm-2S63|LhE zxBkyRhU?z|NC&l!ke*tc)G7*Yp?OoBb)W$Ht;DV|?dZtCyi^1*DYOk7 zcNg%Ys6>YrND6I~a+rdMnB33BpVnNXz&TQC3rW!Go7g$G@D|S3hqe`+#3fb(zO-@& zA+u6)XD^D8%6TLzRBrM@Gmz^4`a>jcn*F!w$~`AH*+lNPcy#haf`HOfBMblkH9Lgtike#i~eDGCyQeloOxnETw#Ukr%!ZWAI6Vx&R9G(gfT z0~|HSUj$JMY`QWr*UXies#NgST2u=vy64!!_opjey12SIRrY(M9yE0km3*Uuq#ptp zQR+FYBvC~X>S<(DY@2zKY5||Dk;S$264oV%(romHC83>A?T5J_StbXKOFB%P$Hjnu z0BexrORuFuLPAtr4`Ta{0Ze`%Aw&X^JUnuET>EP+{(%=B!y(8i+~Dl)2lhD*?R9lE z@ssT{4yoHj_1o!>d{Nf^-^C-ZmxE`Qc1B_jF9#=&eC02^yeE$i-|^w@8yZN)TFed$ zP$~_Y_yrCHd7D8-3%R)@FA&?OsuU7H;ER}qy|{)TT{Av0>q!lsZ z7Wh3!GslSD+;oA<9j`{Y4gXUO33=w@v%R^&%Dk;W(R}8bgq!f>7SUkbCNQ#3fe|zU zuyGn`MT`JJlyAOg!Bw~W0$?5F1rkCe?3+XIXyhLA2$XQ1vmej!POebS3n_c2Hb8oy zCy=_O3t#cbzL8YIrg^A0kA7EM&hX?r^uh7Skp;v>Ia;N-+ASIJ11So2j1WZ#ogk*~ zNbrvk6_>qDML8|jdZMyelZX@wmLi@=Pf<3f+NLNx1*t4}+r&yJwi7Z^6lcn%_n7{? zLxaU3qL4VNHn35+5!msk?QUESQw^mh>iG2=HwH+Wcze)|)PzV0NrSeeOr)3Q3gM@( zEl=03sizmN2o>L2g2EGL7HUDmx29?yZO@o+XaHP`4I2#PdJOx>ShM2`ZNV^GL*)dd zLffUK3W+My~+vTx(+(@Fq1St|!kX&h}iqL!sZyN~6I7mX1=B=3>7M``27$BE%?A1VvK~s=NbUiIYIDS^BOs?gOXWipelapump0yYhKi zr3}70Mo4}l<>RB>(}KJ_+IzPGj6c9|$0t2_gw&opl=Cezi-*XwM-LLya@}(#9)AMs zPOaYlyNc1g4Y7EpGe9}qztrT?VY_&Q#qPIqOVhbZ$w@UdkVLHkHiWdMg~xCtFhvs4 zkPmSd^uMiUo)FnCQnZ`L)Wn>Vlj+041g<+pdXuMf&e zzig27>I-v{&pWd^M=JH4z8_+=-75mTJUZ-Zb;9f2C+%j(E_l@6`*q(VmPc+N z)!B60MQ=3)+EU1+2T2!Y2m2_E!b6_#W@n2AmB23gypsuRI_V%_svc^^ea0=^sKpdJ zCbSc|)w7YF^HOvE3Z6G&;vaK!mBDfGmCo^n62uY^4>mYxN*6c{>LRjKSB$RDMV)D6 z-#6Z_fIc)64(dna z2ahB%fLK8__3i;gLIWwIyQWZ6OokYqDmC&#`L}9RqYpp>5*+^`a3)fohjI69K?4wn z$Ou783jNTDZ0A|f@f-3%wZnX|j8YJ8y-dDpu8+M^qGBNg*&ZRh3m`pUi1c7LdOQ^v ze_N1Wd*Ge>+PMgx^x%6*zhX0y{sDCaLq~v;a{60)0mUiIgU>9Y#tsZH3vQc3?Sxp|-b3 z6jMaa=9PmjE488@D}pvA+2HAetYEq@&0;$NLA42hKqN!kJ03!xoNiP(S{wUfprQ?Y z5E0&HvpG+$-q@ZTJbkT6-aWY|%wCq}*7oc3)H!?c51`3cbRNPrHjyom5=FG*6d?ty4Ff_$*J7~uvI7E!Z=(nb(0*D z-Tyf(AdlxJ(r3Ip@|nFks!9%?`CVR}D)d9$i}y~N4jlQzBMYP;%FG-c;gJ5v22r$k zwu=_?#BINb6|+%nXU9cg@K7!sVZ#VgiUq+E1J4Ng2et@WT}AB#B(?Pel$Wrj)`vYO zt5If-A8HygVu7qOxE$OQlwT>NI&vg$?W3F5PBa&AV(rE+ZoEA`9Z&7qBQffp`fta_ z-@Wn9TUTFy^UXJEA~pG>$<^)2BMp&GE*_neJ#`9VcIM2&>1WPY)y!z!e>A={E4UrF zXdC%AC2RtXz~Jdh4T=_AYGB`&=m@ zxequRl1Bj2fgC}o!fZQ)h{k>+ndLqRgG^Y2G5wXcDLP`am$GfAROg?o zl?^8s=Fc2EHov%V`fQ!9lLHeM*Eh%3rrDm%*-lKiQ301#2$JqjA!dsRDRU%*s84g= zkzzZ=Nh3UwNN`m_2v(w=EFuVPnH_*(epG-rq8w=ju^ix`#+98$hxumk9sns2L*y}? zE2^jqDndbYK|!G5Z;b6$HZXIywG|1X>i$K3?q_nw@9Bxrbj#CW2R_>=c~-<}Y+y3umAFqqUzHvt3>5iy#BXu44@TQ~LMNOO zxfQo)!U>c8K%q<{d0laXJ*S> zE9~Gxw1%jfQ)eQ1R};9(fiP9HDJX@=yEHMXaTrojfs(Ae#BO?USU`A`wjZd=Fd?b< z?WJ?JQ#)-Xw4#@(?2nSJC1`RAN>zO+Q?@6!SJpSiHaBZ3t!|8MY;KN^ZynqoU*B9` zUERL6vUTmiY~GRg?5SoZ2ChO^>pIe#Pp00`*|F8}i34>Y6`ppUlBS>6;(027qH93P@$L*jz+DUhKikqFxC+CCov|N9qL%8ia$+Z3 zQqfg}`ZXa$_dHI25UU6-RRk4WG^+U;@*Iq$6@v^9X|532(n<}qYTp42Xi>N^6 zC;2o&ms7nmC)YN|SJ&6Ku1(EM)$vgcrp>XMP^&M$y!!I$`o`82lx;PUs+p;`uU>!W z&0BB0{`Hh4o7-C5*czRjMZZKiaeR4aY<+!meCG<Dlet|0U+DLZtb@u0CgftG?YW9@^eG^A479qF~)yH%;;q7jBG!Gma!?Y|7vaZP5n z2(DitK(0x>wUJp{^wtzxZ#`ozqot%a2)kE`x!rTet-1C5rJeyNSpy+O}c|A^NRuu zMr$MfrnbkHuN>c#r~h59 z%Sl?DntAfcxz9iOohP#-;?%@yEvDNZU7kF5u5P1U+S%G%U*D+9N3$uwvzD|*{@vVj z5@k>iAiOzHB18f|4wk8jz*en-=n8U3cA+3(fH(PsyscUk;|Z}Gu;(Y&v{raP`1i~VqfgV_06)jqzp?;BtE%>J)G`fX(M`XXug zdm5JhPi6s$QrdEDcY*a zY@=cun9!_`He3jiququyQq^aME}&u|5t$$~-r(XmL>(uB&<}G9Q}t?2Pc%m@y*qpE2kue%QI1`JjxWfy^$Cuddbz8Xwyl8?Q5e>jNH5oNF*_ z%u0=xL_-#Q+fX$W_X_C071M%RgFFxkq2|xLYRDUX&{Q-sl)#w!V0L`;6(^ZIXu~fq zF97Z-;Dx|Kr7}Q*VWP>$1rRtwv`-a28vrzCo3YL}Rw)58JYDmG07$z&dLileM-Lof zGC6u-zf|4xW8ePW21LJS(f-}T$fnYbkPSN^J!M!^j!W&&;Y+2 zKq*_Y%>0MrMlNI_5~s}5R=ipx%k4AgSFGw5?!k zUKY47snTCYlh`h8n}!zF{{{IO2qFSIoj|CVum?n{2&?nFQY#URP;r?H3MayaGG}RYFOGMSz=w>r5d@)0A5xoWYPtOue;`ES7jTK@Xx3;gtxS z;6Wk0FCol%7owRqQqF*6E?%CaPJC#V!g+~xEqvo4B6FJpz!LZpr_nSq!^^Jk@xX&M zmg?j8X!52UA57)5zPNb8Y8Sp;eNabI%eAFvhjw))$LoK807x98XH~bsioeUy85L1Q z6t+hsc4F}&E2g>6b=QD?)|a158sA6 z9vtR)=$;*SdxzVuu6})J=tgHl##kH<`zBe6th#Ux2s~3{vbpgGPtCNnTZ2V-rlC8U#-SEp` zj?8gBR^+{<{7pB9ajmsOcdX=^>~CjOm6nBWS%`G^?Z*?ow^ak!sRu_@(hC`v9pQ`= z4@RMkwUWhfZ5<{c@m=H8aIcrTJ`c?sg7=fGyFyabFlt951<{YA2`^0La(j9Vd^NnV zfrQp!4U`p%)2sWX%%8(0+vx%oHB=wgv9zC6vt2O?BTPLe53%uD%4CZc8+q5x!SzN% zt+Vr#esRGTzrl%A(?OCB*;82PkisKRnqOK@voO`wTbxtrY^xI>$Buyz2C1V7xP86l zwLMdnk*+9sDuqkw{3r?h=H}=zED`XSdE&~lwNZ=_%V%v$L3>T;^1U+r!fj2RMTd5? z@Pq!1!r;g`BI7khLQ)D7>UaO@Upye)>q|e{bBtdc_j(dQ`iSVBY+K97LNb)!7&};f z*`6>9cM_>(cbBXQGax47(T@n211G>Um8I$Tz*%m>aO`7`g&I+fNM^eX#T=n=i9)T# zM^Kb$ZFwW$;sxw})TBC4bTOu=Jsl(B7Flt}#;dNbv`sa!PTr%fPP9 zWI@)jHvQY$6DE8R+$C)!bRE`waH=IfV_!3WBjz_0LZWCfsl<4EW_>BLN$t!=^A(tDWZbIKj|% z|F#tV5)+)Os0^&bVE`cw_TZ5OeY)CR-LqvgscWwT!4q?}9}V2D^zZHeGc@w*IspU2 zq^W)?nA<^rlQ8g8nr%za_7@w@+6o{G#1_}2(pERJZ1c25^(uxzOPCxSa6;_mVH$M4X{|w&gs+-$Wn;s{Qoy_r(+Fr> zKGy6*LWGqaMzY*%_I!GC>qRhhSU@9391%h;40wRX>XvQ23P{yP{W)T*mO4tnDI_Cb zJ*`udB;t+cHASr(D{pTDh_2+h&mm;{Xsl+IZS@pxrXj1%Z>*6z!Ahr`(;<|i zBHQKCY~P#?6O)DITKzj|5iBgt=w6GAz*xmNC_JijE!mT(H^8L=F!o7#H`!zmP43*Y zP5i`pKL9Sv+r?1r2qfQRyAGN+bdQ}d)r<2ul}(6qmu?B)brusg5|S_(LPVBh&T>Qe z&RQ!o_?1&JSx*K?_xkEL?@`k8wd|N7;g_bM9$1^pE#U-{oMNYdlTo(EKe{Ccqo5+H z)wv~Et@hqWw0+w6d+ol=AcYudMAX0lN%pMLIL%(YyTP>%*m76$vd`IvoHW9qtoO2Z zsJ6EZi0)LM;+UEbkp_N69n|(&X;L%D(k=y{7aStqLvou+NI@mb#wMH?d_8LF?McLz=T^)%I=v}s@2nB9xY$Z zPuZ8HuE-RaVD9*P*yFW=2;c&49|eauIat#wHe_lV`5_!mnJ~B#kd!cr`$IJRtlf0P zn|jb=_-jc)gAkx?{AqVxLqB}DJ+NJTW^CtnNW%axKC*Jxru>y{t$C9*gztj3UR|ir z(#pGey*}WbM2&UqRsGLQQl=Sbc$Q`$8$1N6Se(Tr5_g-xowJ%?(KayFZxTEhMaZ%)|)V+n>0THE%cQOcum3 zb%_i*#m?<>KLq7v^Gb>{YdzEsHFX_j?4zzHGkTEsW&}HTm37q?W*PJ8NGgNbrTEwIBw3(MdXZ~QhFZ~TDx^SDU`f!n z-6fZN37XTWnu<$7Svec=@XoS0y;2_Y2V*-YA4+-XKy^CUr}MC{DqB8mlE70T<}Q_z2wc8J6zc%(N+GNy^}7QzrV%!m92dk=w@GRO~k6b1pA zD#@$W<%fD@4$c(19JtsE9MvXl{|frjf)qsX$B z%Mr0lujDWFD#4?}gD_x44o(8IDIs)YS7+r`^IXWJ?kmDVva@L?IhErt$o& zq=tKae~prU{{7=+t{afDm8JNv5far_!*^OBq}O&*v(|X7RLXYf;b``@Yn;JX(2*T+~$*770j) z$SG_Cf9;c_fHQU2b89LK+1kx1hwa=z2Rw4dQycQVQAD1zmDdQ1bfzQ;IB9c1q`Y!i zU?WC0Ul_j@>FV=6YjG;e`}*!VSGNP@2Hk5t=wL^iFwUdHwY2XDb6USGOYm5*D0#Yd zukZfkg%?Tp3@LmJ1mPAdfgm|Swv$s_pYCsw7;zrVrn0|lxvy~-3Pggk0EDbC2ragQ zcPO|6~6yD0@KJPK={VIkG5T zsnC0_&Dh(LL3oE-GX8QzkTXZ)P$D5*KK6Bh{ylR(!*OW!)fSgq774rR^qsv9*b38gcGU{f%x88R8xLBO2b;9cZVIE{$N z3>|o~mrsLa&97`QyHdM7}XDn)5iNe zeTEc{spW~HXl3GgcHwOE%s~7}kYuQU7n|K$(=2c+ooa7Ddg}Gk4I_EiNb0LS zsjD*?+;%u;cR_KBJeoe?r&|jslycIsoU^Vo8HI^!<%4lnYU3k139bhSO3i0koBnNO zbBbSEMxtJ1yP6tl$}vpo(yIJ=9;5#;F57E+tS)$n>IpUveX)Yw##^}`$adJyEy6dp za|4ntm}*XfSv$?TQ$FVDGQ!|Mg)3#!Zv!2~N+Cd^MHqd!kqNj4K*Abvkm^xmYo~!X zbU6t1o{M5JSCZ&)g6#~_0+8Aqw>`UHAIM*H+u^#k$+w6;jxg4Q&ey@*J

    (#NMMJTGKRrYwHIZp*G}5>lDxMh+d|HQWDkX`-D1IXW7sj}JCPJ(sSw2gE;8&4tEkI0n z_Lb<-%BP9+qPip=zXpUYB|TB?55DoicL^ZfMba0J7q{*_BnB>Xw4$b)4GBsvkWs#R z?R0}zTC66oQNLTs!mU>H-Q*CAq~sC39N^EA_;*rwRjwYlSJ89=bKEE;&G zmWz=$``KA_#eif?=`xOo?+34Lsv%k3nDAh3W3)==1 zxr=0`adWWys#`j{T?pkPfG0n7wmG3%aQv$glsK^!3)E0oAQ&ef1V~7Ewf&?l zr^d}X4yV=l2qNiRN}UAbmhC<#S~uFw~Y*=2X~?t(!p_d-_ncrZVMx z9g&X5B!fZwf{t1#ay4F=>DFdP~P-X+r|LP?a^sDrL;BW`DR&ON>59C!BY+v z@4T<9-gL^KNu~<-TpxVnts9WuMMcsNkC(GsX z6<;_gd9$Ta%52CIWzyt5@YwGSm}p>#+S`98$%hQt8#v2SVv+5_yw%(8v%}cb2A_jypAKU zh^Dtkjbu}4wobZqSMdMBw7eCDtlWB+c}c2 zU3*M)1HyZ^HjdPC<_33UE4@>XvS6g0$Lh7&ViX`DhosUY!#q?&N?P1{be6jZ;JJ17N~x-4&zuGG8wfk*DHYtXhs$yG~`OpN zGoQ(^3$j`t|L)(xToYOJT^-DU_M@#-*_IQK0J$1`h8kzDP^=g*?b?~VdH4`=*6GP} z*`hIrmsF@_AB`lgtwHV2qCd=#>bTd_udke%q%R*YV%>oB`~TnJHEfd>dx5%mZ1si= z@7c?CGG#t)9h=g9;t9N}^9u*DkEHNaXDY{2McCbt?N&Vlt*i(Rjhwpq0ytB9{VI9v zxPNmMydXrl-=Q_4)KYq_(j#*xook+GNkYX%>mI=LoPUcZ3>QoHs~DJ zGEzO*N__SNzxa74@fU(igW|gOg*R<@Q!1< zjim0_hWG5n5td{Z32nvfIiLEohd1!D@JvilN^Jy8v$>mm*u}+`+=`%r+?`~mV!)Y& zfz0`~X(JI8(8zjJoqnt_*{~g{*gzFj4TeqF*r+)_HO2zfZV$X}rlFiTs6q8*24ryI zLi6sXu1Ic%HvJomJG}7z{r`L8ck567%b^CW=*|Y*@@|x%(|UAQ`bwPC5ChL}P@vf7 z4aIhFju!J|d^8r+4qm&%4}=oS)yk?8tX!-vv?lD%kbLN_Q4ot)8=S(Hce5${mfTN z_W9A?$e^xBd`R}Ag`tl(yTD>@HMnoZjOV|;@rU(S+}icos{n>%}dYRPKt#-0__P7sqnA1b#csa=^Pa^5pBtBZfbHy}Mv1@ws@{P=H^ zOz~pY4M>j?k?Pt^U_nk|UII$E7bmV-vXyxbd$TUmW#L7XT<1Pzn?BG$n5=oWMZk1| z5CCGgTR5i9Nz3L$hcq71>}Jy!p5!(zdb*XrQ+bVhxFG)RZ%-!UNcOy4k2F}U>vfY3`b2io1{2NnNQWW0 z$E2%nKzbf0)t~=0($^n<;fH|L%USmf>47lSY3n^E44Z~FyCoPl8%;{++hzh1j+yF` z_ak!64+a&sJ#%~QNV3zGdR0TuH$G)>`?d+(DqZUVpM{pqN>}QU(+)(hPV_j4`oeX3E zW1n6PTKG>+K>B|Za_f_5!p#ZuxeJ>`SD&8zu2kNc6c&9o4?QqS6_e(tc_(NJWfgg~ z$ijf$v*4>%5>Ds_rXLZf>P*Mb4Mm+?#8lOqoume$Q?0sscV{-Qf_%G*{Cq(NwS@>X96YA5?|Y*Wwn^<86h$zLz`@8 zZ`N6UD_RUG?X+`5Mfg0n5jlFZi8?K0PO8u%?)|n9d?Y{=CUYeKP4M>PQt~6YC9qbsk zXw`rc5Vv3vFh{m>BF{EayS0NMbr-pDv}`h{z+0Jb1|%~wvIvje1h`}>D5{oPajjG3 z8Lw4$tmhH`KxH7&|*$ul~LEE{lgPvrar0#d%Ed1d2KYQ=-!qz=Qx=8zp3!glj z$MA=1o{?q`H~ixj3X8r`4yso5qg83^`dlwtl3MJvuC2_kZPD$WRPFG^yKbYRYjh%; zSImJj*u0h9Ir*@)Mo7>L1A>+HpY52s!!T@9-PD}3y4Dnm?Rg5BFtu*%6clBNT7q1- zdwalzkNCySROH*-AZuOM)<*`A{-3>Hs9txc1augNy|H)VLiE-YE1>7!f8-;-w)EiE zC%xJR%zt`vkuw;rH=20SkK`$&IFjK!Djey%p3gey2Bde0J^Iw|fA*u}g{`|~S0U}! z#lcz}i><0M%*~d%+Y^U?W^)GPJz$UphAR+b7!B@!|7*?9PiOAJfVABOQ#u#`ZIY^! zc$T39+9KE1BK=8}9CNB}>q@p0cGx@c!8uZo`j74Zd?3gFw9_pBlk*{1Xh$3NuHAgt zoBk~=usfY~IxUFeJRuY@^)wS+j(u71njm+i)JE!oyz=yPJnmfR%G zp*(a;rh4?blg`w06ChFb!Y|-69|`b)1R=f%Pyr;tudsaEv`!!2Ke7# zs(A{~&ou(2K z0S84IP3<^kRwjXl5s}q@SV*C!41h%BvG0FM)5{-lf}|jq6D!CFNsEZPWk=wG7&|f3 zXxcww6bK`MQGucqe;_+WAiyKCFsCLgh<0tR8cmI=nxIKy6Cm(KqcK_QaKfOKq z>Fsg*HsD1)zE?o8XT6@ot5z{w`j#%Dez%dvMP$^Lm4verwi z#H76_1XfSfOz5AD@^{LLUKb2O2o;_ zjXloq)&$}~(nE)HccWJ5>Qcc6!gMlz1n7)?$Moe^0agmlLxuWp^Gi}Ijy3uCdw^%HivT;^}x|PNIVgl5cd^0hnYbL#h_tJz?fBWvLXzD}aafj>uMtSjQca zNjOB7bbE|d2$w8>mym=It<-%B&(PerrQk!d(~}7iA%oeeIIsix)HqPD<<*`ZSvGsR4pb9zd%-%jXYWAycfTQ7 zTmvV1W`MK@|8si?RCk~S4AKMf>pN*Jv>1;lR=y#>yDj%Pp&gWVVisARX2kpOzXCLA zqNE%n{1OR$Ux(?=u`Y7QO^KLtpI&wy`uqgu{IVIgeFUufvI~X3s{iqKK&$^mH2MXV z?gNk*e_ixz7?m;`4q0V)s}>7R^dIX(NyX}62_a0ygo9W!rx_jGVwu>sya<(n(-`I_0CUdgpglEu zdrARBqYDM`E2bu0j{&)(Y2bXMLc;nTFC2um?{Q5d9ZK}~`7mqbu}kGCanp9n9Nddf zgewpgkROF~0?(8bfV34x*K_y>-A|1JMx|Y$yb3mpc7){zRfk)j-p;-zN-p9E+R?Ia zK+!KaG<DD2)XqAF3J~Pgv0p5;hIeDOQf?yh;;H znj?sx%g<99M}zfH>vgF8j2}=Eb_^CyfHp3Tc4W&MZqR3HNQzm9D^FJVj3ZMem}gEW zAY{KLFOCUE*j@rg@T_U?YOys-r3Z;GuP3$>6M?DeoYj%fgR}}K+p0W&VG?Lk4lne1 z9~f@ndnx~pyGNUP@wquVwF97sB%lHvNiGJgp;M+o=El<@AZWMxWC7u->fyh2A4dS-%dCrMS7u`=^0d!|BLQNR6Kn4*=Nh@gZDch95|L%#D)6W?U_E5LVWe~=`*~H z_m4Opk_brZh~Ynb6JkS?z6vCerev%ZF^WTL zB|a-iJo<=ft|>Va#cv9}{n{ed$)rRf5y{T5&~SV5X=PhaIlEVC;d!@Eg%O1r{Q=S` zYmwHaL`!Z?!lnEhb_{!km0gRNMuEeWT(Kq-i7x3-g{aBDZ9=X^?JyR0vf(Ma0tlrb z1_cWtLCF)@sfLZh4auAxP8%S8%s3#qABe^YS^bH8Y6#7ni58rr`jeSAdtEgB2E>_+ zF)k!>J^>(&S(y7Mi5{*l`szWeS!>^Hanh&#D`_uY3N z$`1bYr$2qT^WRtO{P)#6m+th3>h1YEJpK1SlqY&$WvfBby*R5bhuDAq?7u$w5_HM_ z`georQ4HDd^kdJSKK*L>(E85FS8?y~os*|ecWZarB@S@=^Dn-r>?|PJveNo90cDm~ z`mAC3Po6X(QoFfouvf_`BzmY+$gHDJ3=%Z=+8lvZJXe`dL)hr9jS`7Kzm}kBKwMF_ z8d2NK;6(IDxPAGd0x265e7HL%2NG>*!itln+X8XPih4OB9&`i|k9<3um#=OtwL9n~ zO7GcMkRhaSdH}s5Uy={!#Yxv|I(I-4 zoS|YiPjqm{CkDY2P{qtBg^?LbhW=C!0@5ZCM^y@#_J^l?dlA+jJ6pY{nBu(H3&1;4 z=m^FfQ3&^E4QUh{A^Frr;O~ht?a&i9?i_q1V}IrR`p&l_b^>Et?sT_&eg&1@+>`9P zvVHwu|K_*9`^}Sq^UI%9zzBfQ038U^PnY)%!LwbQJ_02gSGrq8zHxB+?2FF>Q+oWf z84|&R6BSr5h%JiB(g7>UwDv*Ko<>1}b7^pzvI2}a2|9l9G{(coetmPF;r^0JSjG-E(Iu(ZVOIOC53q zB$3b_3$k(Gc%hC6KCnRxeOngTMfrC@>EIP^>XVeRJoiW0;~9Y)DW{~1Iv`2EQpeE_ zsjxmNEG!GzoNlB0))%N@LKcfnY`pc9AHlWMx2%6Dss{;jd_5_t+UeLEt?$%TcH`ja zThajr-@$-@^D8@lXa@wp{dnl{+c!hcfQ$a=PxlTQ+psq{DAT^3NiSG zHqSXAQjV2<6%ppM(5G^4wFimRfz={VGZmrCSuEb-Qap_10>4s8kRiu|nYG>|Yj)Gg zYbP#ez6cn!*xCx!q2>-TMNM`5%u?juISYEf^@k!ZO9)l2dF9?Rj>FPvCOoW$ENk zrk7G4ZAxjkOB2n>WzI>sR9A^Jc+z*A8L^at6}CE@!E019rvf=6+X+>p!}mrw*G`c< z)g)RZYqwe5@YK+Nxl9`llrl1iq?h6nvz$z9=h%0-hrcJ|Ig!l7pM7^B1tOt;2ND!y zxsLqFbiysV#Sus|lp*v-J z2W7kE2_bMPxQ}z~CwLYBX=5lLeOo~z1mEwQ6K;2nfP#H(fN<9ihYiq!og?#oKT=cc zL(*78dR&AHpMCxHKd%I&-!|hB;AlBaH&7Yu-EHF{z_SKGX~&~+MJ=qpvbTLFpXo_N zq~UOJhhXR~5e$ zfQyq;b5`mbK+QtDL=6IwPX!&eb_hcI{u#sRTDSj^oQ6!7=aHsRX5_p)MIBzN^u^Zl zN37PY>nOH^<18=^+Xg9vOFSk%)q{rWqOS86mTGM7VeV-scX|@`2-KnkBxtVP0o@&J zvk$HpTNEF~g_@5RpRU+mVjf9Q=nKG8Y0picB6SR;r8pa{NuayaX-2rjs-V&YL)!eA z={mv@@#yV+6P&w7{q>iB`47MU^?>sF6T(qjK#zFBy*PF_p5e&d z+L7&2yr*OFs9ieo)0x4?RYYoMNP#*;m$JmELGF-kLSnbP%uz=G#RE71hwgct28=+c zyblc)RbG%ln3fI^i7q@;hKpF=4)zoQfdT_!$CQX&N3Ho)MX?T1I4209NTg(eCh)_? z`if#gTy5|wS;R|u&Fe~(xMjmUl1-uD8(LCB;(s4^+~E)p6Q(wqJCem`rAjoih!Uv;|E`$rc1!!6UT_PeD&Nyb!fg zh`UgL$fZf-2;h6&n&jZ(m9^oPwdX{7C{6&XGsV5)pE|!H%cJ5X2c&ft_p+}8znQ|s zObLd0%GnEer_6#-%D103Y#V#(B|ighw}oH@GKt`8LzQ6DhZ|PnRHLentk^Pp)YWVa z10?^LZ^WZ|L~dhE*w@HK8VL?=--btbjpZ*QUppSvNYc0C2O|Ay6@6;7SABxuSz|3D z@MzMGNA+}Hdls!q+N$T{nOg8@+|DPUr^5j$3$Y~UVEQ7Jwtf6o4aU}4A-1FT+R8gA z3uz8!yP?(t0%Mi?eIF2zOm5zbvx9o|IG^DUB^`OpA`)_fISZKH6aoydeIb|^Btfg^)jy2Bkz|7$Ch_m0Y?*NFJf1emfYC zM!;bM^t^4)_ubW3-^#Dv+)L69%I+0?v%jtaO;hLuN;Je?99qq|weqWax^J97u3aBk z$D?Os^?jcYMI-={c<2exQ?Y7rl>@TaE7ExDo`+#g$L;f3P!XdTx!M@xDLOxW`P|!U=0DkJn~Jf;NbbPO!7V(gpeK- z$$M;KEyWg*_`cNT-!Awr4~72(JnHeIiS(?Rg!wVC@NIc6cMDWI+DsJ6Et6(3MuivY zfOOMWu#jxgH=L$Lno{I=1w0OqF4nKFuC{KcH-~V`UA-;VcXG;^vVTjZ`eIcf_)fH@ zU3R?-wJe>eXY!D!48!CJs}jdT3R89;ACE{Tn(?Tm_Z4{b{kX2A;RT@ac;v_aX?)U- zRt-PeH~XvMc*HbVD<0L4ZRJ-0l<}-(aKa7yTBYgF2Hn1Uv7ZbdVa_N-JhI#!fl3^v zyzs9APa6DIuV-@xlJ;fPA|73mtE$%}%vvfc1PjJER#l@kUA4bZg)+eiiVD85%c6QT z1dB63%3`TLhG4i(Tp5GN$zP8KLH1PGfi*9CwoQ4VrUWcWOoQnVKk-ni6x$`kDG~Vo zP^ius90gwdMfd^a$K&w@{cMLP8YPO{7+AZ-8oJmAgM zPGXI1dAr=YIGQEq<>DYMw8b(r4DeJ1^a2?{vFY&56f3kGi8Fs$X^X~Ht|S};NW`O- z=-Un`$b+?=UQf+6-o9NA@pgp34yO+=QF?O^NxQ#X&5#H+pS7^8C#@*q`yzDJkA2pt zh5NX8)NrRNm1@VMRnPRq-)uV|#XOCpM*txyGXQLSj{=XZ&du1Mp5+mo?~3qp`qBrh z$~8(aXshJ(02PSsqW@+R8&sew&T;4oo>(&gvboSZ&K=LJgj<=5-9zOmwbW?#tk@x$ z#uEjQ%$9T~XSI{in^a(kh)qWUC>7%?08T>wopSPLg2pZOGD&Ft4Q!9jKWI?&G~XZZf6A`O$D~=Vfmz`Dmo#|YIYRrH?Soe ze`V>x_+Q+-(!q|UFq|aqDLW@ft$2js+XOQ;F3~IvZ@WEqwyu6G9@VMTit5=PM^hxs zDgVp*-{ANC`xb)K7Q5?sw7UwAnhl3`9~h77Qo!;0zE6Ni!x$2kR~q9EGx6+cp`hF6 zw+4|o@@PEFztUOF!1cXnK9}syWy+Fz(sQMLE(pGHW~r|bf9!gE3<9fSh@fn}IzK7j@c{b>*x7h_{^{ifz5!f#mc6xAeZ~{;hO?4b={b^|Xc6V;O<{qvCr)Z2 zdLKC+aXYO8(jM3qPPaFJ*&7n-aT19jBA^3he4v!i^ zt{snXXd`J_t?z@Z7zo zhNr+)6|r{V-|`T8$(%73F`1>bc%de&amX+R#pL?aU$DfoW6THiXWInmq$o*>|{-SBnzCFHA$qFVN6KBk=sB=U;Gj zqvECbR9>Aov~mjF@w?*H<_*d&>#mYZwZX1<^W_QU(b3j)orf4Bi2y6I>lXYB*~gI!v}UAxRkLqD;smls)`Ylj6G}_A|)0 zfX9$1ANRUOq8X3s1f`*V)qqFi^?mIWiGb8M@Tb8HCr! z!A#OJ#?5Gq6Nc`kT)a=|G^(_bnO}WSm>?i|$5WP#A~5LSM?=s&6@@aUbFJ6gKG5l1<_c)vVjF7xtmo_Rg`V0Aai zge>@QfCJL$YGwEJ$(}Vf9wfik!iIBjnJl_Oq9z|ngI2K(H<})=2-z}p^6#u zc}tFrYhADN2!eDs(l{G?>v+`Cd|F3@I!3lZ4=}FL>+bCsJn|=H8ENuRTV16GKxp6s zT}ZDdw;CIe>UdP;B2CBxm}ppkxl`jx+pUdwD3f-d47Cmd#4KA!Q!VStIZ>M-Asn5@ zHcG-43rvdm>divg4L%c;#}}rBhKft1r->F*T{+j`cJ;_IO*pBGn`ebTb?l%QG zI^ryI%yoKUvKURyS~$+66w4H_3}7Xb=ArH^i+nQhD4Sw3^z8-|;=KYOZKMH!)LiFp zU?{+edab3E;NVs*c+{vDA7Su-Yu^1FfAeoYV$QDJdZ?k+gME#H@24XgkeXEn5zZP6 zVQY74pi(3C6re~00V(1Lf75HlJZ~)F2K`fO7W*78ptOd?f$vY7_Kx0Um?|Q+8?3-QwF|~{3pm6@cEn99;&g;!b?#G# zUKt1ik9LTjYaN!kvum|)bZcG&>w+X7hRe>T7du5lCj_KpnM_{1>Jvq!*p7;9is19? z^h5j;y#k~cZ03mc?b_o|b9RF>cJ<`c?s&m>9XfWKG<)q{;H}t!JJqi`5|83Dp&4ly zAZ7O8bE<<^mBeGQM#Zs&c6kA!WbvF;@@P`50@yPTncIPdL`Oi;o?kQ~YR(A9we8r) z3nUnoB2uc#o`V=IcxbFWUkbi2_iXJxH}Gtxg7v9l$OVOmHlawfBJ5roAcRYw&$CeT zzB12swgW|Uk$_LSS2REG0H}bvf$O&DxXx|6t%B`QzQ~QhMH!p zR<}lZ2GByp2~x*~;2}ymOYPDTx>0cTz_`V~^4Q~hj_8|cYj%(uu-woKYTR(jS z9__XO!eBfC6&ulj#Ag~Pv?CyCCjKa$=fq=%myDxxfcBWtS)R81vb|D_DtZn~HZ3TM$b17p%DTRs zAY*|p2i96}Q2Qj2g(E=qsfKy+v(>AcBg`lBFIP4)9UEg2&FT3GUh9or^wj>DqH9OW zz0NPBgE{D45;DVy(gXz_k*|ZR`N(?LscW&zfMF02N%)}6fP`rIrp;*dhmVd&Opdjh zMpUhZK91nKLZzBs$sZpyX9t4RfJb8m-@B7$f!$a;6ecy9mY_n48sD#7JeF@sfuvasP`!zmdJHx1rMmG5}22j8;WH4*2CobQNg}s zJ95v0TGSNHc^2^qY>eItjHkZc3h_2i5wANGqG*^y7cyq<&|vSXJWL^gr%Nyh5k;1O zN1GS3t^me45Z7qZ^%f=&p<6JzePc+(BW^c^OA?XnZEMc+vj8#Yc05JnSI&9UEUfON zTV3+i6#_#GL(kYP9swip_Fmt1ubl&Y0nQg1qg((A6@5NhX%&UoLOV?#{X2QI{-L$0JxmoI>Pw1Ri(m3pTJ> z?_N5bAsFJbLUM>VeK46k-@Dj&dHyPkLDQz!U3}dy_buU-xw<_maG&Auot$mCodZtN zo3$favf@o2d*guEys7N^Z&$!b&QvGhlDsdWN5z@}V%HHzLG=qsScv}|2uKaX_ZrCr zu&c55U)RFn;08Pba5Ws+Ztze?!-0bD3PpPJfEl~~h}bF~wKX7tOpL>$<^oRR81>4b zc&64{?bb1_P2Y!?+z3c(pKcJ3dj6Vy!#8*#WF$`%U$DCLzx|GEy)l1ib48mhdvi9- ze@D~`1Bqfgj>G7p&b|u|JexONns_RTetp>l4iJxCle6rg($&YK%QMOaj$XaH;BD3$ zoNA6giP=kqkq^CXKxPb(4jqpOnfP)iu3o)HFOP`VgHB78-h7b=NLTRC+xbxRegs5% z#qi77eJ~8VAS$kLNQWj{360j5S2t0Nz}n*KY#tWovd#_WmerYRgL~1cMnHo5u9^)) zxgceAO7`iMFio~oG<@$w_d@Y95gwl~)c3+t$Fpf0AR$(MJKVOLhqlNZfJZHg>$_$= z`k@(mr~)4@_$D6RwF|x*Uh+ehkPxNY8)6#;->rDmpn7X3DD^BFPH&}Bt27$T^{YPz z=r;mV-@wk`#b*n#QBf_85?uvCT_C#AsifYta4`=v#guA}v^(!W4I(y^6fpY-<@cp& zvtI`#@0Q1VsGjX?V@AXym4lZqF;6{h^V$0TEKOeT+%DFIEMy@sOw^DasYNW{r;J1; z9-n}eDtJW5oRg~{%av6o`pU6Ey7n0d_VS1u#c$OYO*~SfGS^72)e_qCxnb^pPI#pv z`QSX{DRHwKLQetT(r@OgJA%dqUmI%_FZ3I73Du~y$*GG26DLIR3x)=M2E-|v`6gFu zvUMq#he){^r;#vkYe$54@*V>1qHB$S^u|BlJtDW`QKP`F4qSCD9H`jvu}H*@D(Pxm z{a8Gz&mR0=4+ltPF5hzSZEA?c#EDjcUBl_9>a@BpGz-4#txoN?svp>1zxo0%xe<`e z1&qEiw68&@yb@O&w}KMR@yHkudTQWoXVncYd9MOhiJ6?Oaz9A%0x;6mO(xsat1;rL zJa^bRSERb|h259dy_&1gnQptgdx0I~v0w_&kv?MCBh!vwbW4N4&Td~GK|`fo0FSR< zI36u-wtB(*`G)<$E#d^3M^GWO_}Fl|;{uA0iZXjiFMgsTlWeI-RNstZAfc!yd>qLvD&NS7Nf9@QHhTq5o9hl1pSdkcYt`2*ud~h_-6AmbQ(zs3upXn!G)liUW*cXH zJUZ++9Wdy$Tj!=nbtK~OjZLI_ZgKRLed|M!jDYWTc6`=DXb|c!@3VsoZ~XC|sLR(* zN&AYqBTSqy7ahRKUJ2ameLO10-c~QeM0R==p^@p-u8iEP3Ln$9d6w7^dpXwtNWInJ zC22QD(f;yW#ixJ}4UiiYU%~}1YJU#K9wMRzOfb8?C-Y=7ICj)ZA>JCN0=px9gfo_+ zpkOS3wA~CyKk4??GQO^%0jWXo4LoXTb8Xd&k7z)`-Rt?nc2~YX;4MEqAl1dub-{PD zBA`xX`mviB_0@=3No|5}EUD>M!||wkK8i@~fJDPEeqs_OygSjIlZD`+IbpPsPbBCC z4`&c214ybJc{-s{fKA;qQpHB4j5{7P1c&I7Vr%Hx-2JFkQE@U>rCkx|(K%*GOeX3b zG;2OO114wKgkE;*=)28?X27s{NsFdQ7M!^t@tEf%oaMKesLxkAV(?6gBgj@Xsi^f49Wf$Xa7%47@r1hIuoa^Dcb>Ovjx*lJ2* zj#KWH70HAq0X+74igt1b`x#IJDvWO+tWcv+GxsbNdG*APCtj7Ux4miW4p)%mH3QPO z%-9i+MhLz^97f<#t^U*?_{NdNEG?rJJUq=)lq=mqq4JZe`2wDm7F;Sr## zouJe)cDHqy?SKZPXRGk2yynO4fW(^bvb2FN&#(}~;307zJG1_@GfEL8L;@B57EI48 zr!Bx(c%o*n(nwNCbbnc}dM+$(Gh;-lp0Mn6vm}Tvlh;qt5;{@YUb4aycKmJ{t;#7` z)V`XlaGI0J!?Gyo2B;Fz0th7Kgbfed2T|cb2f@Yi&>%sZlnU|ex+Dy*dY(y`>@){v zkxg`x16wWbS^V!N_DcEuK}dMm^IC;1>+cMf|ZMqkT>V#`(@`5KZk#BPT6@0iFh=^BO8HwOhYVoH^{g@ zIv%0=HNs>TJAwP(JecY`M$`dL0piH|MOy^lyA8u^hZ>AeR^btF|H&Xg zBBZc7&B6f6JLE}Xv_NbB+mC^HIGu->pf_Rk4KsFI)LcPqXXvKxsaKHU>L9j$&OY$} zG}W-mT1@6i?rb>4>M@dKVqtcZ9XKe!P{+1aq*y>F6)j#vuwjE4Y-S@(7K_isBLu=@ zXGK2c98V}Ir=2DBgSw-tX;pAHjzKdW4GIK68Se6NP{PX+@L4B6OIiJ*WE#=dl6S=zwE zEg<@&IYnz?7OI{mN%JHzQ?;((UJwoNV(MuZYZ8LStvhPtgO1lk(lQOZQ%;4nCG5RX zO$*MWK=KD30c&TT%1im+6%lDqym%tYdGF@;CyA3Zcwwl;jIEvECxquJrH1msK%rZ* zh?JGRKC^CbNVKidT?it*Q^sjb^($Ou&%glf$VqbM>p4ATHA?diSJ&OEdpcho;a!X- znri^0BKqQDrE>|emka7;4?=07C@Y>82gv{lb}*BO_zPL zE?{gBAccrNt^o;wdbr>lM>YxUu(KJDS_R*AJbF7u@XeiX9te>7J)65D@o2ZbgQzaB zYgA$H)&XGv9__Z@s<{CPyDR((fb_|*8dAbXC5I;{SQE=a934t7#Hp=0*bI5)ShA_) zG?*~MPJ7MWdOD^+S(PBrOI9TJIHUq&&2vsMmE_eR#hCA7Cla%aiWg(^MG9s zKzAXis?R|}F_lm8SpnR?=s>DSC5T6h=WF;Jd@fQG##md{{tw;&?Qo0SUReMm%Z}*a0L)2)^r_Z;;@-UEjC* z^|b@i#;5C4*;${f5_xEH76f)0ZRfHX0=X%;U^F#y`$#_Fbk2MbAs}s);2Z?sTF4Ti zjk@TY?$Gxa*(Y~7bqj}^QX@w@DNrgaq8FWpuJO%ocsS9LV|>0AAZ4c`b5Y2*Ih~pN zfmy4$PmKtxPtep^4wPDf4Q9&DS-qE|Ga}3tZ*2PD*&08UPvfLV{K1!~KP&60}6 z>9y(fsC0I%S%oPpoV0hy;tnT|UivWCxVlp5+ws+%a&>zh<4hua;1{{PK^A&IWBE^d zL_=5XjB_;LSquMn6|ztA92a!+`u+YTQS?J=Rzi*)?}R%ga;9eMAn1|XlUw5(kU+ZX zWTdgpwNdc>{aC^GU0qMnF8IdIKR!GlA^5j8AT=ujcAExPjBY@xV^@vST-;fRVujmyn_+V z$-?lg-0Sf6j6wAvpwBer651AoD{!W=rpt0l$~trEv$)ukEc2uF$Obka6=g@y(e?6p zrkD_$x<06h;CJ^%;v7I0&%`ZA4_vkfl;6MhRpi;GdyY?12DMBZ_&X}`2F)toJ$u6F zj%lg~9k`tx85q!!Ppn;vEbf+6#VIQ=c5f@J?!Z@flgsMZF}v5XFB4lkBC!l)45@FU8XF9my;IIO|C@*2Z`;;p2481V@A$6x0YjYg?Xuf~6 zwsUg4b+z&G9jmx_oi(dcgK#v{+0i=C1dXyU7EX7xi&PXfpY3ZHH)@Hxxu_b@;J8Zq zuY9E2fkdx;IoRJhS-)Pq?A)-ocEoyn&U*zUrM>5z`nlN|F<7$foQCUW(Y=>(5QV~3 z`_uu+(Q&)n6r^;|sb;4*IXWi*Ol03mSOrOsUiOFX&9UO=lZnLD<+baux)+k?VRcBc z*`8@;|Bo}_=)3*gkPEELbgV1sWi#5Q_30A zT00=E#-n;fZ>tLdV%h|DU=s~(uB~R=?E<@YJZjg#bLYbY(qmRY+63RNcvSDGZNwu0 zUo##xRE2;?t=>khz*U$1@zCLb=8}GsD!8pGGz5_Vg7xhvco&p zw>^mMj)LG14~;jnV5)G-A+T4S+34I{L&fR>eL13AZc@%f<9VmEH|e70vcgxTF$anc za2=WPnWp=DJ46{e&@D)>()Fsp2mSZoP2Cc4>hcvH%B8ouaO2|6r$C6UPp# zv-4tuk&y*dTvV(IgegzNrllE?aLDrx>u9NHAhtFPklGuN>UabPyD<$&mHN{v)^q=O zRJSLuHy{lYd_OQCO#nz=wF~U(gUDNhcclRdbHrOoOS|B^%K5%(71*tMq}@73uZD91 ze=8s*q1#I|@K-omvg=*5gv44vA<<>Q1M_@{E-DyIE{Y^gaZ_m67JJ#|RD6I|dnMw{ z&g0~vlrs(FNm!wBr(RAhaM>0@Xw-ZPkj)P7P%~p6zQ>;ew-ik+@ zmyA=;SZ_TqaFdS>K2nwo0zy|^Nt0BDFJOT;UBYS>h+mSNS3KwGto){m6}i z7jbph2O`cx7Xrbz8X_r1M41fxjHuduKy0{DKU46n#EV3gCEJXI&;#im)Hc{j7hV8Q zkLAZ1DFm@rKtgTf`>_)zuzR%NyP>V9E?pQa_^zM{5{?aMT;CZ_e^5XwkT4F9R)qc*=|InmvdceoMC-E;R}5K%!BalOqHMgVl%--)cpxMZSvY`L^_)ZP`EV2 zuD$v(h6KbWAf3M`NXD0xaHn}%p7Y8SSsAUGFTzuwJnu04#8^lc^ZR*~MGa;(XW<~( zjyoL1-mI0cdHoS)hvZF);a98z*si7Fz>1}jT^zaZZ3mRw@c(x&yQ#DR((2*l?^!O86)Q8^OJZv=@}E#K3I2Bbzj8V*Qo z(4Z8~D*&NKHjDCW6EbZG>?lH}Fi()@Q*j83c|K&CTDD`+$)Xm=bJtbl<$mfMW*Z$~ zC4u2|K?tV)*1DzdZ>+lEX^Eq$46_i?!;6*Z^-4u@&p!OP2)={i#W4$02bJp_=YmI> zet3-6s=7xXRnnQMPE5pHCbqx0EKEf+{qorRG^w((3)57R_-v`xSC`7#Cd;c+FPhD$ z+ZIi!sQ4pgt{@#Uo9Z34fF0VU5|F&9$Uk*BEGh9=j49R#KmH%Y@_C^#c1IflX{By# zbOTaDVvl5Gc$+JatfSQ3&lh~(b$Ikcqu{$ib@#A|M%YJsv8^d4$AU zWj?e>R#ug)eKCvW^Jxq&H=9j>v^9B;NPR%q-b+0tZ<;z(z5mPX^y);j5LS^*+`|FI zpj+3DPhXwktjnw0bwjiqhC!*?xYq6E(7t&)6Cr?a;ba%sO)PdEd-4C~ysz0v$cpCJ zR7AoPD<0PK9tfKcCP!P?0u> z>Oeq3m>uq}GhXlwGSVind;6a#`2L|q@ZF9_=tFuSKw7O>ZlX;;&N}|?-3|UBVoxI; zRY^;;;5!}~1$H9@-@v1WvKF3xBp}hik`uz+x>{3&#EpvGoRVx#D-xRdDBWC!cIjKB)J)0dK zs$@0)@i2A63S^q4qaRy~SLfXl7F*Lj6g|+|nHGh7NLKnyAYvYNXuAFW=c9ak?u|_X z=WJo@%qK57M7$$svqh1_sTe~LsvdJjP<|)tIJ?%4@vKE8?@pU-^ibv&;KnbC*=4ux zVm-GWRyS9nq*7#Qb(0B{if4yXmg1^Q(Z26C7ZqnzN!CQ84{tzX4lg){h=O!W1}3qC z+ozP^mdR7hAGXxtcYAtJ~;S_x=w>E_adK5TQ)t6%%E;>%x-C{_I$?mnY-PQpoPI(=62-<`he|1C80c ztWGr%k>d9VySeS;)C5Qflz;f4z4-qT1mDeg^pP5n?wT|ftCaC=qun1CkeY%thm*G@ z*LZ>7zq`TT{b&PHJ>ND~@cpbR_#TZ%Ewl~0MgbB{6$nc#0Q3i>c|K0uL#%lHB#AEF z6zwT43!l808B~SpiP#oJ6iGGB_SXbSK9y1=z+Z)5;A#!Klf!wr9C(K%a=^;c(s#0` zAi$&Sape>XLrdvq4n>!UsM>>w05Zf|ikYwLdrj-!#r7I_7FkutXz)%5{auD^eqctZ zcz95P(i@5w;DUVXDJ@*fyBq%NT20i04oF0O*tN0N%biSh^fgG+VQBu7#|jp#^BMjpHOW7%e(7;R-G<^ zlbiBwfUW^}^i`wayNZW(h;0*mvs$&CqvQo2P!zQ1S0rS!15ptNBODHcBWf}z@nMokPNDMg8XPfv4 zhC-;2p?;$xhtuoD;(EvFhbtzPJ6(o?eGr~VKqLlv({f8r>^rq2Zi+_mwj>RM^fILB z*v_tZ4oy~<0CU;;zO?)WNWbK^V8&2O1NGHsNLo@;@%s4xkQO5Zn^HzLPjNUH z8U$D6MqE<~U2&xxMr8FKOmEeUa4CQg=}?pQnikHX!DBPb8D=B_pH-E0Bs>&~9?FuN zFl|@xB4-mR3m@Os$Kc747a5g%44x80D|KnJLQ#*{FG014ItprYP{X0v!eWZ_rhyP8 zq98^2E6ehigDBQ2h29atqK?3qq~UDCJ}G*cz3X>)e||azDt5156JXBpnJbETZUjoz zbDQp1r;-hQ5{VLC9~3qfTdWZb3Bh+P9yNsNw^hM+8_EDq;zfr~y~WOo#-b6A8sv%p z=K<2n*!!$qam_=&es}%9Z4i7nG{k;Yg~R(7d=G0tdfJ3XUp52MJ4pK9%$?a@8%vVL z^%j1dtOQK&-Pkfd7%YLX*b!I~v(H2H+|Kpv`*PLmQ}jyj-7hj!ei{D^Vd*&Q$o!@? z)9oTvmC7p7@h2j_h(tDroy+8CVvI7nM7XFv^kfL$c0ecJ*G$8{j#k6T*e6r2v=3d; ztY(RB;!d1U{vOY3k^K@Zl>H%E7v<5h1F0^T&BNSgll+%7+_8((B4my!axs_57aQR< z*_hk}eUdn>wY5yqq5P;ujuxIwxly_eFjuZS;(OAX#0vXLP^#IYLdND}c=$x0y`A0K z?3SjdO2xAVd|E8(>x`-**&z*YW6&3P^9(7JM@(jok?G zLWcsR^ZDS$f#WF!-#5FX*~Mr)s476JXj{H4)Af?)Tmz(7ycuhc>;-m_tpI{$Exf>P zu{Wz!wQjVrZOdaxhoJNo#Bz;(bl{1Ni_LhO6|ntHPFo2;kV+1N&yq)ZP7$S`<$@5~ zDR#vEc`g9hx^YCtT0rCUDJrDpp>5y5eA0Xk=gNXfh^R$i=b!jh<7?<3M0p4}PCh>; z1tC|H{ewVsz4Kx?_7_|XZK>0sVw9P$Spc>`Nxviz7l>i==ybP5lrTUvk}LGJne;jz z=|mhDX2)$SC{-|4V!H)D7eL#KKq^2oSIuTilJ!svqz z+xu7jtLw|bQJvuXeCuLcuiKhis%R6t>^XG;QYiaIM59J@0=uG$qm2*|IoieWhM72x z%A#cbfUlzkf>O)n$QeNw=YZkZG^T)X4fh(^z=amC0oy50$aY8QA$XAgB_c(!DU#;+ zYqMIKjI2-k2r7d&VrI14z&&KI0t@}9AFYC$asW-6Coqz0aXC9NEW(T^Pbl*(LN0ll z@lp2An`z^W9+c759((b5+?hZ~>OMJ`oP&Lk!)-f$*wXCT!V#I9d}1i4lT8fpt->(Noqqx(-N_-3#ZkfxiL z*H>G^+uPyR)ph@Jb1*->*```)Ko0bF%!ylWkt(PCPHfXZbMYo^o9TVW6(IJ%7X^p#xM)NVMrtK6C9HKpWx7z>?_ksC9KQD0w4T|!62q< z9j^jfGi1WaeKQ=2T|R>^2w5&jCS6nwKeTP+d;kz7L?lxouZ}D{c#y}Gy(DZaV5La7 ziKaT#O@(}4q)swIc_3E3C8umbF@Pjn3=lhzc@X*{wkXRjFRgJ5bLbM>JeuTE1}~}B z8AW7idTj+|@wL2)1V>kHYixAW?w6&3W=;gj)5Gsv(!g8vqYsEADJDJF{N$&C^|Gt5 zecH*iibr(?NWXl70;FFqPb%F%K>-putvVHX1)+83@3awO)$3-oz3g>yGTNCOk9UU$ z50p9^49@yj!|{&Z>sJ7&f;@HIef;2Qir6iSxb-hs`RKGvd7P@2&NC$@X87M9%ZE94 z^KS1H<3F-mr#J5lU@kTv3BJ!p>EkZTO^JNG=3NBedldypjK1muBpnJPzOtR164qZe z^7LPhE+>-&W+qn#je9dT5f$UO7(@x{mK^M$}+6N70`nnqS1|0|NVV|FT` zg?IlqLrQb$rbT+vSRy^KT$a?ZSYH`n`bd36l!}RUe9!!9bI;v?RAEsobP8Ps-}k?* zFF>jneAgHGE-gR;s1+)SdMRHgAl+_veqB`nsiO3?PVnsqhsjY{Qf!nAG_O7%^dem_ zwl_}o>9629()+RQ7O_~M~H`rYX9U;feg~5_Rw63`OE!@K>b)4heO|${Z?uDlA-_rs5?-A{M@c_OPW&_YBcV!>LcpB4Y9 z8<2b`NoKmjB14}w1mBfBTAozGm8lWfRji4eR^8n%SKX~D-}*@hAf0qUQMHEzPbxj3 zl?pXKxIb&5;=6g1T1rH@x;#AJZGD-kP1*jmb?ZsG_xk-#aeDmqyL&yiLHJ4G(aBbS zesifiL(p(pT(Sd8_Uw<+=_!iW`e37O1xT-}Ht$O(enB7wJIFtX0!r)Czu%?05q&w8 z6kFr?UDtTu(54De9b@;hM?+-~WYfhMRH@T6e{Q~0qI)4MRz;!6a~tQt2AR?9f&CL@ zv8Bs^yciNmitX{Pu4^dUK@}LFo$uw>#Apnl=YLZLn>uU+%2Ev_|Q@EeafNi}||rt%(=CS_ws6Ow2Zuot2cf@S%*&aq)8 z{mGQrrX_5+1s5-0vp?Qp354r+r#q0as1-a~Q}9i)=DubhiTf2nJWX0rxtq?cSQ9y| zdck*f`PNT60O@UK9-T}pfP}QW!V{`gTtDl#PR$a*_gR9=E3!e`Lz{94dah^f#qE@% zZ3m?D;b?d&FHy2MVA9GY5_GnJl&((>Uf*U&KpO1qyn9&Fb;9CJa zBGdFmfJD8E{0FvI^Q9n*60SfoimZypWiH6ZM^8B!&{Te8`m7={55epmO<`Q54RENq zG|M@$4v7}J2m1Mv;MS-f__Sh_Sh<{-XN#$oG5@+v8Xkj5S(j}*9lnp^(C&+q_ zK)92-TcuD|$K}eAmvu{sIA%&VX|C3rVEIHdb($JfG|T+RlJMgp1+6s|Zqd>}4yNQ5 zjkF|8q;oWv^N&rnNWf5wz^#rtvNE-}1jm;02p)PK)e62jvzx#U#Qe64YOhm&^e#Y}?vBT|x8w2g+vk&= zozcn3?BZe-ipacF0cpkFdtC~U7(~*rwL~J9{Fpz{u`qFyeUYJYV~`MX@-XF{ys6Fo z-s$xqab}WYdL!l=$eG%Ez2oAO!q|28j}G;Swr+aoXV=rTfa|=D?a@}huN>8jPJ-{f zszjSk+!QIRfb@;9(C5eZAS=0ZABBs-8G<6Vi*}(#gJDM#Teu|`w17Fsm=LM8`LIg2 zsI)DWg7>ArdR08*$~0wo8&MPgEu?Hru;^3CyV%A+`-YH6OVLKk1_OpTK!J>LR{}e) z35#Nt=E189@{yQR^EX&z=fCK&HrA-9aZ-nfs#^J7U z-Re(k>yWUhHTzK1@Mzvn{+>}@i8==a-<0i9Y-|jVdixg-|7JPB_IN+vWDAy2@)F6It;6RUsmu9$Sn2($S-Fxb5SGi zfR0tTg5{w?Mpg@%LkyBngE@RMMWtG%Gz!$n^T{S1aFh&9uQW*rmU%Ltialy&(YP_1 z^W@lIC`}gfhN%B*fi%h5SZAp%qESIUiD+f?h>_NyvADOwjNf7F5e+;oCd@R7f= z7T)w_%HJ&%QGBu2eCR*M$IA13jFeC1ba-TZbqc0*1k!4YdS8v1dcpUK0;GDu_x%&_ zh#2Ch6?|(HAbsN3vkrx^pRL^duAWD4J^;_j64Sj?xN$Qx`GR~A#Umw|nWlC-GJ1ii>Mb~pk8hfl+)%IY?{Nn)0=yK$GXbC+tn3MdCUcUq9j~$`3I>CfkIF$LI*42A z*mefI5FdFmPc6Hbvft8HfGCNtlXZvmnTk$O;cI5`I=teMXR6-W=3pXniF(+~*A^#h zA$XEZiceT%jZJ4Mt-e~v(bv2o`^!l69qNe%dtazX^}MIo2}l(Mu@wbK zs|0q-cR;Ead{^>_Gi$Wj6){A3vL25#DM0#v?9`AfcQudRRJ1LaNSn>6c5w>=abq9l z+nui7kw28)Yy}Q4Ur)R$W1bi?Oz7DbUy`ktCw5o7y)C?+_pi-g8X1qq>5cu%#Ib{! zoq1i$BhXt>fTZD$KCAjmi^!C>JzMFT^BSnUX5Q5qEELDqE5)_=i z7=ay*O?Krd5*AYpKf(yT9As=aycCS7T-g!%BlURjmRDD0T;hag8a6~uV=zyLA}1%d z3kWfSWEk1OBrVq9+LopIaIQ_NQ;=}vk2jdNUo?MJ3@9GMWz)Z!dWSTkyDR_ZgW7;K*CAu}2 zF%Oj65cENbi!0!)4ji!UPIax1qp%f%Z-0Q5UTl%33{B6A_+XI~&N`3m49!DrD)x-~ zcCH`Q)^p#?(ypK|7`g%}AOihGWz+SB){MZ8UJE)``apvYn=_F-bGdnMoFiz%rhDVvt&@^P_V>lHZCLbh?HcvC&lS z5DA(+W`{)G)L`3*`XU#butu+P&^X;PT*(@}?7mjfso^I8>9_R-u|(&p1a{p8-wbvW zd{^x1#NsOib`=Fk6>@o<`?NrsV~ke=34vWr0aERDuBt=5n%}gHIGVSmy0=#k=bg0B zneS)kWOmr6bIwnLbQPM)$$*9NsA2g zx2fOOGf?Cr>ux!DlJ6A2Ak~TuCY9Ma)EUv!Op-U3A0d;S5k{^Ca(h7#Zn7tboU!6V8#yGXPxbC6?tt(rrZQ0<>1cgK{9U+KmqX7;Vp)eJB;jdPwP zj4Q%zZj|SX{dS$YT{kFQSMp*XRPzE1Ap%aj;GDj?P!L1uRMW&zl5)<$GBJ$uQh4M7 zEvkEJu>HJn%TLNm09sIawr(qE@JkgQ596Bt)_kOkF|umkf}DCV4McK0_nbvM=)g_!S|8^q@Vx(VdL6W z_S|2-zTkTq)l}HSaR6f z_&XiD^x@$lF*|i(G`jZ?Go4C{Zt>F8ej<@!Ed}e;#WDK6=TdIgl z>Qy25hEmMX2}oa$#5NYt!=c2>v=^fu2C*GoK+RhbaZKt)k&E6S;Zdrez>Jm=b&&_9 z4kKlr-ug=;AXiJ~S`sV^K55Z3=gH;#0#bVY^SAXR+Cxi`KwB4WRBSKth*DFw#*=|o z7T)*k3946;@s)Me)*H~gkhzhbE;4}LgdM->T@<{_s`ZExB-e6YiwG9J5>hmRfp!a` zE$GrNhj_QHnu$K&0bf{`>#2Lj2D?*}7r1z4w3ZhzQ$6#F;*TO}_Zt~9NJZq*1*&uq zv_%r4(-(@!qAQT@*J{wv>nQlu+fkLzU+weN3mTpt zNIm%pp9dsa)GCYore-@=h_dW8aa|LRr?T-P(nZ@_Zpf4`vz!6=C|RVBhcG*HbsXC5 zsS}Xy_D}_}`O^6VmKPSYBS^?WtKD&eMz58LHpaUF=^M?G{wf^Hx;yQ)%zzmNbD?Ba zY+4_q^T{ADDUWJX8l_c}k_{C=WV0k36*`KxC!))vA6#qjbZv;DlZFsyX|=W;Ye-wP z@%rxH{*w;AW@2G;+Q4*c?*e^GjVUOVy;I^IWVF-r&J!~DL^fM+vXwaS^g%LjQ#GzW zKkUiwIxwNUP6y`dDFR4mJ0vQGxks)r9Xj|^13{cPP-v?(;gV=>uu(FA`Ya8nA>V79 zv*=;V!wZpP<`3ChB!4MLx)!bMq~&Q_Di}Hg2}X1+h{Z~G7kvA{U;h*!E#(nseq8YV zv!DL-ch5TV2u9WjzN>lUfcE202&C!t+nw>%p&ybdzF7?<1a=jB($#F|%HZx|BUm)u zrxT%RU^agLHm8wJ2X1ro;r3{n?rDB`H9Y7Kk}l3Z{$}_v`Dk~Z&fOi&2JMNr!qIiB z8BBL|l?s=Vq~mn+dh5mRyFZzjzTh~yWv-*@&1V04|DyuC*R=&mZicN+IV7i-LLpbu z1;SHef*E%wT%aw37uW@-lF;A%j|_FTjxZ*+R~M-l+ZT8%=b}q$98E`>c4dBN3kd;a z!A+j0ZIRI9p{jfI`kvHU3VsQ7uH&l~C$6!=>@IPpHC^*wh4C^??s^z-wbt*Wr9KT$ zc*-V~Fhp-lgMpwCM~618L@PB6y3Bl=my#f7Sd5XUAEF*>6eHc2h%&x>x9t9&J;}%< z)-R99p#S^Ct1$4${ewiZAV=D5Y_3jHbUOE*nsf6Vc$=;vyLAd;HB>G5US5Ed4EFE7 z@QxFHtljA0Zy2o>*gb*Zn;ZRcmY)6M?iT-?-3j}C6HrVBVd zF_GV?9JRam>FW8`|1o!F*>N1nn$B~yh+K|^h2|c0#M>f3&8m@9Ywo9uu0`;fPd%Zh z1@u06ND3f900aT9qnTmx`)BEhD0&Eat19mx(GlS>fbuWR?YG}Bdf00b*fI1*9xVdW zzsn)mP>h>cyfS;IKHn1t?1zl#0&St4{%l&-7i+sUJGNq)1 zdnwZ#?wI*GKiKr2|94HJ5P`dLS#D-}A0hZ=m&;29Q4txNX}yI*K%~SBft}TYY`Hkqt_{`~OP=cQP9S2$ zW^@#Z$i_YUkJEsp2SWrR=6!84p>`)R52@i5ku_BbFD$);u0X=aRs)_*(=7S?kcT!Or3mpmzPCHk78*PRwesZOFnCh2)Y-uyQ-b!f4>As ziwa^-{sjcz7sG9&!S;?5HDoRWNUefzigcIri0jV?zIP9#od5sm&bC4L&+|aqYb}Uv z5q!g=+f!pV;L+Z9kq&C%QGF@GyDy1VNJJs}`y3wKK=2KEmjmg8;v%i>B}*=b-~wkrG1in2YvriU{*;7t)yk$WhZ>c9>P zwhetd1%L`=2wQ?%u5zFF1I~6`$Uo%Wlh8oHvzrs!<_Obkqm5p=hGdSAmDlk{RFAq3 zI$jjq5cndt53Yt41gR#poz!zvkAy|A?4jr)&&9*3p&dxg1+ncsnvObV3>vP6<@%_h z0O{F0WD<~$XMML@igbe~6J@ULw%_n*w?O)G+f=n`q4ebxkl4&?Gzx9V@@vO!Zft%^ zz%jYZe@z35NqrwYn3DpIy_7yJlK5@i#ogck@uq@r(ED3~^nTr&&sI|%l47C41A!36 zQ%GbUjefMzN2VH&XoL!g@zkhI<)&d0&{4TVN2Zuvy(~r|cvs}#ah`HEs%b6m2r4HH zQ|b@41EhZr1=^^$AgD4!f>uO+S}?a1T;A*HQA`gHZzjicp)Gmc6}cFhFzZXQL|4bb zx@fzd0wjK6fpdxNR1p=Cp$I^$D#9MMctA#?=aNvtoUJxj+AMBGj|6U4?rDB3;nO#m zj2jFch`p*vmw)fGn<9PoE&Hoe7h9LxHj5g5mF&AEqfa!@EckBVkpd)c<9vzW8y*0p zC#T0pM`ve8SI6r&GP0RR=W~MZ2}m))&VWRK#HVc9-IJJL8c1)q7sR#+?DnUY?nOL; z2$^Via|&tx^5O73SI_%xh`n{_kLvz+_uY_!1dnbYu;Wpe18Fq?DQ~=Ycm#B?LuKqRAxi&@z>GgNiOc@_z~c1wG& zyQtGVoTQtYzeffOZgr;D$J@Yzh;Yozh zVwRhKvc44CG4yckP|^La&)Inyb9>e^IK6^O_93?eFWYSk< zN29zd?zq(U%DW;M;^LVQfycR#gh5geyN|KpmvvqbB3ux7)#bo?Xe95!F#ONei9!>> z9-P0oyu2BTuqUD%g=n5aG9P|E@|*{%%zyCV=#?ZKfz)KsFxfrtMg>Tl&!gccD2$yX&zpI4a(H}o z`4SfC(cW-R4IP*?+O7g;c&9)bU3R`)eHHaM?{1Uon?@it79ic6N2^&LO_xRe#|&9! zZQAhT_P-;Y)?R=F`&tFxHv&>Bcfs<;oobUhwM~m`K`hYZ+0Km|avw)*t|5Ili}ki$ zfX2a}r+Rz^3nh7CzCu#qiq7ynh=jLaGykDUZNF9~=|k;5jwvXq0V5Sed2VTuN+L`; zFfV_IaF4`VfJ5CioVaIAtBMMiFtt>qgr zvPy8k2|JYT>D2csV(EapdHkS6zx>Y7CZL2!v8;{`zNm5R z_gyI^CExP3elQKB#sZ}Mb{_4|jLkPZCuq7xzVGjSzEYLz+pT)@Yr`LZ`jZqM-?cP# zY7p4PbsDaJJKU1qZUmCd7l?%-F38TyYg|!$q+zjdRlcy|EBmY*YQ;&A#2g=@bKHjdhRlfGNx=JY%U8 zLCXs2QT?ii&P{Q7r-TSN))FgrGC~^V9kIKa-Qx=Aux14qie~%XseHm&0Y}v2fNNF@ z?5h49I7j8~;K3eLeDP*R`FE78R=p``!k#mI@uR3gE+C`IH74RWUI8cT)V15#509F? zSx2S*VjwjXAf4Y(@IBc9>F`&8w7BC0V^1m;U7en+)1i;`^Q+wn>`od4b~^-;F!X8Z z_(Kxuqd6eGb8Vfxy3xL!XqTX%gBwd;7GQW>qtCYR0=ah?apI_o*iQX1cwuSf zHV+__NTO3aTPML|Nf!r$Q6-plH~>laPy#@lXXPU+jXdZKk2%y2B*0SYpC*2$ppQ_9 zWxlXOn05_I`q+8yLBaBGIf8M-GHSw}=b*CJT=8;ZSg{w(evlREvZH`Sjd1*c5G$3mO<1 zn3LKVhrL|4y4~5QONOSM_*m{Eq6&l`hG1fR+_42y*SoH$bz1MbBW9I}EiQPXL;xEz zThdGAs%pbWux9vjU_o9q^MN+W0+|(sT@EByy(n*G$2WdW(GCMD=ZM_1u!T`8o`~6j zB2~{>U)4gH<%Sfy0MbdbOkoMRstAX&qV+}Shr^8?JPGZ> zRp93ALIU#gIo{gLyPOx;5uYvR(UicB({}`<&&eFU`yMUqXb02)r2S=r?@wBQut@N| z2V}VUp;5%=IP%S1el=Tw6b=@zn_g^CeJ{pZ%dgrURd3Hd>N|_J?DR=M%0`@7O{?}m z{Fk3MbW}^rdbaIxmy8Xt6R;wsGs_z5dS}dKc*+VX&pIoDZ|mR!4Gp|^dA^b%3U0Db zWzj5~+&Z zNn$&Lk}CYlt{C~}9--#QVcLS|4FxO34lL8-DYMx^96}5hhX?S~FC;`L>T?t))Xn)O zDqh)|>w3?P0?qk*CI08`#Mj+|ZTXhnLLMQ5zA2BMg&_KLbre7nj}GoXdbr*`GHuDd zI@pZ9#?15J(Qxki>f-YFWOZ^?1L?4wck$%%?CK;7>?XB*Q;*bc`^^MQx+5S(qwZMt zqP*tLmfGQ+1|T&SAd%(Yz2N)WoS&s06W=VmJec^CDvd(+F`WR~TPpZ&5S&M=sb(O3 z_!>y81lY+LBw#zKtT!F?B3ctr>e|>!J6VJ$G4Dt&vP2&9un$#Lo+w%gdH6 zBkEGe^-6H0>!ujNEaY;eVHnJ>*6ZW&k_&W7<%N@=M=xB$oKQIs_0<05>^tk1&ljwPKw?2{_eotKu$$r0 z`Ds+d4nTS&J;2G?;I(=57Bnil(;cB4iYp_Ed(maqB7a%b@4+IWu=vOgaK=j9Cr*LbbzK;T=cT&rX_Kp)w zyIk;{4_yMJe}~BdE2?a~Q$!bw$K!z4AwN@`9U1VD~12 z1>Q-s3Or9hB(*Cb-IGPbt*yeN@9$18hXCmxr2uJvav$x-|5Th5{=D?^?8DtFy(!+g zvEcjmbRbzLz(Cis2&uOuLb4+Gb}DIDlIo^%@=*wlfYw>31g1wcr$yKdxp->2Ml>ep zRQaKqhaa@Y)xM1r1_+22=4j9?Z?AFxL5XrQxiB|Nw3X~3S2Lx{mZdOQtVEdgvX~87 z9E8>JRJ3Z8-aQtyB~fP{qek973(rR|TNc~Fqw)0Mn(Hu+(U29=As8(j(MB$$Rjn0| zE}D-&ia%rES9LS0nv<(Z31Yj*dzFUiC{e8EWF|s9o@%FysRmL8887j-__xWvpEbz1 zAy2d5`?|~_c(fj(m;0*`NW<6z)CreGU^igs;(6JDTR$DZb9q{hJ=5jySH9uv<1s=y zof7>u6d)~9-A%XVxH*u>|3|*%oh^~wm_NDmCLrCOEkN485s#*&2NQwJT;JNg&;KBl zG+zGukG{TF=Y{V4Fxu69|F;r+gRVtDO15((iCDxDALK8mhO?VAYkkB~T`a#rG)KH( zJ&oj?sEgH=D%^ljP^WAlMs%een~;LhAgJV5!g^G4bggiRu>u}Wp;17Hb{k@wAmyWA z&CB)>c{N(z8e%)jL`S%Jd6C3F2oo_6_25{M4!T|iDXD5jwiLd0Py%w6KD?4>^oHQj zMbLO|F*q*HJ{P|io=V0{5@E1OMs0Gu2|`i|ehHVeU}A{KS2Y;vHbO)<(I9GM4Rv%q zMv9(hB6<36mjVgF_hKFqcuWhvr~61;MI0oL?u{PcWKZzczwQc1pHF$&{eq449FWj4H}FVA z+26U~yLJpB!uaX;2-l}B&mao;?>S!){MNy?L zJsv^=G_}R@vuAg!ucxK4`Oq$hX0-7gyhBLs#wMOt^g(rHn6Yiyl(H0AZ!)9P17%z-lhCFUc=gZHCbw2M&g<~8bA*#>{>WOXMX|sG&qf&PRpFRhN7N;#B{JYzfeM4d zB(tQAR*0dv15-I8f5z5Zj#>oo$hfdlkXZ>FMFsE5YT_;mDZSeY!so>*?^1{MiV;PZ2l_ z+QyD8FAlTEnA8!FRLm_q;M5Annj!^;_~v#7p((UDnCHB^@Ui-I?Hf ze{ZgXQN!gK1W2DxXT_EazNZCtP3YCo;s}W{m6VRaDvdm`7KB*1*&1Te(*Yj4UmerZ z*n9wk*ACuztIC{|I^ChALNLsGsuFNqZqi34cWeg@)KRjeb3p0^#fztd04H z(b1h&HR znYYujOE_N)q!u0#buPA45zPud0F49){B^JrH|_+(45DMXqKttk#Z1mKB-}FENMR^`i+L%n^5`SpYi%b1 zLgyW9$Fl&@eNd5gT7}i@wj2HzWf)zXsJkJrLt7}0K!figoyMHUi3PS|N_Sekc7Q5~ zw3&tGMc&a|*NKo|xX^$JY1If&!Ly5wsSv=;11XkjX8~R!Qm(o(%htO_Q>maEh%xJV zO@N!=G=}LAg-M7HFM0`(*qLimlRaw}*v%IptuL?f=;(Yk6u)ZnV9x!6ArN|0_;oc* z`8e=0{JNfR9%8)EgUxW!*S+HE#jfGyhF#No?H2T$FXz#&fP{oWJ^JYOGo|?kNb|a} zn+v`hr<~bG+n?sqEozdgO97-?A^5%lkA%6K2);9r-f0Aqp@h>q$9m4zOWk0gchuaZ z-`Rk30xgWbuS<$M<|IX~NX?B#4U{=yf2Vc^WFU}HJH(K&n``q&$3!RBz*2Tz1!%+i z%CmS19!;~v#bZfTwUR9ToO|8Yzp>q!tOdLzi;5)7b(0{b-+qxOp4B;#4KGOyTv?pB zoH~b;0wfD0n@E!Eld}~$Rg2*6USgrQOh|He0!ST z%Wo$2n(ecvI=v|t0BOjqw3NA$R%w%l*E~YTeZIch3XLSHJHm?Ljarn+0~ud9)Bn)Udi69#vT-RJFBOvy z3sXmQ5+ytn^|&tn8AUu{o=5N~!8cM5(|(|MyBXp^L5kiZwXJfq4hKF=`8-Amh2r!w zQTXGKD>7UZk|MivqK9tfLT~TpdKoFubfDL0>Dm)Klb*?EyI=R9LW91!A!6}R zMxqfXxaGHlp->L6h>MXnSSJ}|C_Xc{2$~iG>1oR@+;YKpHHzT7Y{NadI=uhj*;Pmp zlo@(;bfmn}!&R;_CLZnYT=2a&$(}hLMXKztC=WKkx;3>4xVQj`8DL~9qZwKPBte4R z89GW-^{dw799DMQ^euV#k>^|u=9w|7r58zq0OPscXGc(m^Mbk=xRpf+<$w%3wf09F z^qAMw#Ynylo@-~!QYeuUIKxV&=|Yevk%Lu2w`_CckhOPNmih=k5^yfTG%=Ultc%rm zZQAYG{lXDp1byl8szhJV-zJKR0D2N3=#tw0jE%T1qt{Tz^&P=;0*_f8!Vw`E8gwIL zrNEPR8OSG#yi$kIY$AoED|t}ow8$tFSP#R#g07ehKK=6s^_4+qd4H54G7x2x_}GX@Q- ze;FWA1WG07`mX~i>k0TQi+oj8-}}~8zd75?BWqSGjfP^yY;L$p~AR`1+{6p2uM(-fk#iBbw(W%0=i5F(K9deSQ#4HYz-soCv>AaDU*j6$*&xFxW&_C zdA^02sc~)A+l3W@--D^gLJiPKg zeH~;yp%KMP8XX;HoMkt=%cI9fXJ_FVeo*TXZc>0W1vGAS*Fd63_CI|0hxhJ&{jMAf z=7IE2Z3ReZ=^EJ6!XqHj7RxZ}_c&41R&yvjKHl5C;G0LPAvwP_xEM$)#!5#q!}K-&FhSSLSTR)DlOO^M}# z@1;Cq)_Zitw4(XqEewz6>8Q?o{XBikzuN}rVNT<`bj+IboxB?-W7Eeo6m+?#y zPDnc4chFpYF2+83o%bD5Me1d`>OB^+Sx(rfrACAq9R)mTb*_PA#2w_mq%LGnwjYJi zG-c9lDAz@w4$5#t2SKcsA`Xcu#-oeuKgk05H>vXw+v!G~>I116%qHq(AtMY0 z=BNuaKmBF3)0f3|ray=AAP?%gyK8%xLXD0Mv=IzBZ3ruCqhcLoeb)%27S&w4;2SO7 z45Zdqm(OcM(uXgzA$M{3jKYs&|ZvD*)koFq|cC7u=vwkz&_m^aP>hm}L;FVX>@{SX? zZzTA>0g$TN`J5v_H(ONA)P*=DsjhQE@SUU$iY{oO9b`~@B${oLa7ka!MK<~)Na$_7 zPK!W=V@HOk9k67+@A z!eBf_MVi`Fq?oTSa&9Br)*JHtmqGu$w36`RQSG`|H)i!=uYrPcDy+PtThQkj~#QsLxzM>;#Y=|F*7t zUw!}QS^f79^hKKf%&e1pI~9C0ILV_1GX-X)@lz0IrsKp0NIMpMf3jTg-3}x^0um0E zRQdN>OtZc?E#gjxpsg-CRO;kBlDZ!_0xC-VldQt8Z^Nu$RzzHJA0Ezjl@P;}4L63g zP`Q!W0KDd9?d*C55k)j%1ATOgyezy{uvJ23v#4{;K6cU5mcH3m0!9~ND@UH(2cuj9K}jSdNl5}gtf~?ME1foN z=7(#;aJSC;WGq4#l8|gN6)lOewZJE9a+@`HB4_wm3ZPLGbk;PH&vIICn5-1e#h z(*8SqxASN^kjyt?;oLw~z7cNAwjkRRoRaF6-Y>rWjiC9)D>NHd3*q!Ro8}q zVSG3@0wN>`bi+l}9(IWB;ykS)=;eTe6#(qC&LtMSe-%L~8eL?USw{#jH26vHUEiQy zG8r5Yrms^+l=&*!8a31uAIQy;bGuBZccE<|NqxAkse(by`79v4YmQifu9`<}kQ%%E z_hG_y^8H^Gto!rde)HL=-W;IRRl1GOziX{Z0@4+a<^$=1f!zFE9V$%q^x*-g-%bS* zR(7u7o0B6QRd)r#xP0ucgQk65d2+1kgL?30--7Q46AF;#0ZC=9grSHGRXxr)#5^rS zxJCV@+##@~4mLU0gVwIj_|thoA*K`<>;}8%0iI;pw-O9qHw$-Sjr|Q(y5q%mqGI+$ zcz)$KGqv3@K_6x^rDKD>1NG?3yUc7yy%r|&Q!*V^DN+)hr zNgIgH#n+8slCZ0SsWX72oP2}8t^xvDx|uv${E=W{Ir-b)zkmOm-;V3eIoDXFdpny) zBMpP*18F4`RXu_(Pft&ui?(4ZhIv6v6jGryCw0rvcIxX{m}mUktoT z_x%357cZWiw+p_H+C>%(?%m@41R#B-A|FkyLsMIq&JU)Yc{D`t&1^fLL(V^qNX7gK zeqoaIbG~ST$*n|3RSh9!9pX()vSxKTa+v)$SKKW+o(4h!3(MTU&kW0sY)$Tttgu16koh|i>;u;f%2{+}^x3CAuu5sr4Cz%uzHbu+u#sZ8VVyu83@GSTG-s=H{?fiuIG z6JFIhN|myj`-^iF@l?sJ)mR8h$ax0M3~yz~MRLxa5%9_c@$EW?d^s#yJ0ON>ygkck zTg(6wJZiDX)hC;Ilm%|}C;ui*11Iapx2#l{*uAK`83nPoV+|VS1L^ITt$#0`Et-Ku zL2Og<{y}RHZC z>a1f*+mZ!yRgHG>DMPJbiumW+83xjs8^J$hXH05CKOkMUnk9%5DF+b`iG`qQX3fTL zaDt5mDx#sYw-_fx+ijf>l$j%&5czBwvh@O9>?k?4V4fn4o8g46_k*;X-mM)sjVjo> zu@kt&`iq?5+>o>iatzFu0v@1Q&g^j3^O%q{Fp%b?3qD995&t@Gf1rE^cxbjBDhBzE z+#bQVFLw>6P_8W4q8I5->PoB2&SS1~n~_7p%C}hLyPbmXW~ZsX{QckFzgv{ zN3Fih*jT34*0D2rbo=vEAbs|<6-R{YT7h&lV&&TkkfsT~k4C2zJv+a=IDGuWE*_~- z!+FBt`4{W6_0`3-mj8i32SxO8n1YwDTLs?@Jfd=Tlt)}Y1xWvzyL{#G;p<60XY>eE8LY_4Qn^YQKX0%+!8tzq$M^J z)kvP9iVGkkDBrBl1F6h)3RI9Z#PG*Ht0Lzu`M2dGZPDCSNrl)h5W?GsJVszptrH~| zG<}!csrZrI0#Tml$$p-`G_>FLkLQqX=Lo)~kZXlhtrzb(^i%OFexA-F9Gs@AsM{F@ zNCYuQl5boNkedG)0n&qcg71+Ep7YWk_Uz=b*fHGo<>}j%mVLt@ z^v72UPrfY~oOPLZe*I#*soy_ZfOOpCNR5Z~B4oE{0@BxpDOi{@4O{7HIylHQ$Pg5;m1^bv(Q zMxUc#WtW(^^vGhB@3+=7=tVPVp3|Gv5FG4i1`7?cE+)BBJV|o#a)rLjJfEbKm^_Z% zc>)q~TeyTPQCy|_{p)lfwYMmk$fHF&58l03SBC$T$c|4omP*y^pekyQ1xP@8EZE8A z|1%v(i|f;i0Ixt$zmtP;Dlsy)9Z2x#6HlC=qw3LO^=uV!wW~+x1rtxdNV`}c29J&x z&ra4ur0$1U_6t2KPk=i6K9+w|CT_r^^}!b}`b%>GQmf#5OkmgE3*z%BKtiP5m1f(8 z9D@waksClBcOaI>>t!u)+#*M0$P8e`QosxyMH$a5hE^Xsq#M7jfpe3hxMk)xiNUACz6!=k;E5F(N#i<9SLL^^B+Qag{P3%-v=1mC}U`QiFkCud)-U-mre(?XX$dRAe)`MV#f zs`DF#As0XN|MBa@BmNHJQT@n`{`JULynI!!w0Yu0b%tHF_9mJFq;JTh^9y||NnYu~ zuv5_(&8dXMnpv6E$DNct(AAo@hY;Aj%TiuxokIeQZ8H>oPDv#!tW<$&r6Z+)7sEx| zCwl+L&!O%J74_2S(yfuYX2#@H^MEQHZ_+wgpOx!#)lV!c5h++)Cwh3pmDrAVD*NBX zPUNMkm7mNr6UB{{WYg{hL2XkWi~khw$>8!Nj`R^L_ zd2Q}teTvKfG!aM@mfoJ8A6{NuJpBC0{nsaJQYNd3K&lIKz)WjWsk(YM<9__|>{myN z!!kYadZ6mMzx+VX&ZE~Q68B?&hsz{me-~Rq-ioF$jINm;FqD3c^rsN*-@7~l`v-2Pi z3Bue6QQMUJXi=)G_-@{)8{p0*NMt8fYqS|uVU_Qxr%eOWpT{9XuH>IqfRsC-{+4jx zh%~(sDEV19ZT~bLE#?AgyXnO0a)YGjFWZ3xkLGCUDl1)oFfKpnU7gjyq36+cd}#Cf zKu*oUvmPk-i`#SbGNO~i&JJG>K#IE_k{o*3AD#9`y$kh(kR9EVX7Xry0aEDNCj%*) zyj=Z~)6+3RLGx(b84;BAQf7{WK|>(-REApONCfmO$fl+W#SEHykW>9y?nISnnoXWv zt>KU{u7wrq1iDN{ypk8vX_a24dK<>zRn!SV!w`Ao$RKRv3h%Xd2jf{ucIJqK-A@M_(%=z7O%vpmE;a5Pg?}vjXZkL!XqZkP6X1oRS;ff zge1}tV|SaDpN4TJSHlcf8RSSh#GN{90+B%m?*)d?_QnacYEo1vRT1kl<${vo%Z+tR z*^w&0j(8dd$@3wFB~!0fYKaF_QVEB=lkoYFiv__tH$7&!hPq&kxa*B#{JE|u9;wu# zLdZ0NwUS;lc`%75(XB`%PwlYA#_IKRR~^xLNFYHwAql-MdaEbZMx3r_V(VAA1^b?Y=bPR z7g=@p;8EQzU~A8vFYn+U(ypQB1|L} zfn9F9*J*88JG$NI(7VY%s;gW%*~Ftd3j(tIAZ3+!s}GNHC%U^|Ga zH@Yt%5y5)cSLU1*8$X^$VuX2na0*CG<54BBtE3n`c-Ey)-FiiHp*Ov2^m%`>Xynez z1^qw1$~wc?y2`XJir4b&WYfjs?MDUQ$1TlQk6R03nJ{}#2yJQi{gwVlyDT9k>`*pm zn1WPBTkECN%%)2>N`VwLKg|L=#lCXTX5bD*{w2R8u@$Kb8tjGtL4lE_GEYy(40TX> zig~aiYCPR!kr9BgP>TPB!P1d6_C>}(_WetRQP+Q)!2l}AT23Xr0s>IYCfue5Z-QUXZ#lY{c>o<&zf zTrTwP>m(w);077LPXkS*BkId1ieP^6cz{&%#0mD)Q0AMj<>YiA4Z82H>X+k`h7$dO z1d7h2=#_b=q#G)VwQ+Do59jco6*?%|q3vr=!P1GVQLcm07%~8aLW<)q)%FZWWSw z+{e|Kr@U?yaLgvG6>MYz3m6oCG4X1ME>FoeDyp|sh#lIKVT71K<8hF$&jCL}RzQGs3p68g+R)VOC&d7&QV+1R0 zPT>W1p5NVa0054ZdMb2bx|~)=ND3IjQGln9h<$0ix+Kb(t8_r1_?M-c1j*a`48|7u zw^`And&Lfpm7R0Vsx|eppq^JSlBMss8dnYNbX_5|33(BaV{&R3e6oB98W?N4L%}8@ zW$ERBxOfgU&E!oobnRavC0HldLs)Bfpf^Rz5 z1(428gA?CR#>9;)tYz5+j-&DYA)|K`Y!Y{~ zC7}&{Vm(q^*RUJCNqJHsk-`1(ZkLvijWWHXfY#8C)2UDOzIjQNJkk0Vj;%lfG7V{& zxdlk?IM?9G%n*FTo=;qW1fs?_`saNC=|<835=!zn>OZN!kcc&i$>s^Z>0lQ?x_BG; zqyF+I5o~-i*zS1zo2V^%u{bE(xVIiu5UWAYeGzp=tC|Q;`?nD1PA?>VKMf>3pC|Z+ zM@Njk#i2@zE zYc4z%z%O#ei!4lN@Ig3@M9O09H%bk)xI zq+BwUL~A;t@@EJzh?kUJMrepKob^Y6ROeOD(#?Ss=c*2N2Z7E3NH=f#Q{mPuUIsQk zzkL1TpvWDr7mtR0oLq&=_W?1d{XfTN4{IV^_n#Wht!_TtR4(E<8u5mS6Jv~SX)t$_dPdoKRrYwyvVLDy> zyjRQy?f77vkdD#GGh%quP04ZD>{eG)Z4q>Rf>Q*eEQq_^?7YLJBRePZR)wUrDauhq zwVGQbwMpf*`L+X7C0hb1oh#ep*%|B5t+*<|=+nb%lJ#fRd0xG3RDcd6=y<-P$~V_L zo8rIZ4MXr9oa@kY}V_>RH{a#RKxic zwUo-6a2{W?lrc@JZ^^&@+**KygHoSs=26S#LCBdrx^3;~y7z)u9=?Adt$)#0^PD%f!&duw}*km`*Er#(OD^Q^#TO zo#@~q^MX_vc+BG?zG3Z=w^`RxnV+UtS4yRBpz8<4aHKT!5b2g1~ zFu5;uP!h26P^E6W9czV-Ko)s3eZ%ox@$Q-TN}qUYnCs;&qXm6ewNZ^Qr1=)Czn%gl z@o0|VdrZdNz@yvUJZctv-^~MI&L+Dbw*CC?1L>%lM@LhF zc*kP`J3aYeH5mCfI(2rrOQ58_f`!a7+rXN`8H~m*69u8 zzmt)?P@CYpZWBAgqX*S{zSj(-O3si|@{2r=A<48=%@Y?>wVA3}xRLTAVa2In(a98h zijNd61&lNxW;}o=ILfOi77DsCKQH_)Dm9sZM88-Tg}{!>gJcmM>f8pvDv`5TdMy~p z2nX9nUj&KjtTydgt8TV4F9N5i5RzUG*?hP^_+jqcAB;#a5F!-PHnD%)jtFk*g@b6w z%Q-o%jJsudS^gK@sNf=-W2tP^@_gN>@iG-&lSs?0Ao!ZK(uOi5fJ8y;ehQEXS+xqj z-_=I`w&C#v{PtUb1mo%(wD73fXaH$1b+L2|LgYZ!WH*pTd34mG<8Bjt*CS$AuU3^S z@@%D#zWM%F-+%usH;{eu=-`VVzx$7bo?g%XI`(@W61>Bcn@|}ZfA{>L_=y)dSMk}G z4e*>K_};;z>Vxe-T728b>=02QR8mM@w%Y;_gwVFCc{(&w@c{&E6!}QXO;k_7bHLKk zBf*o-H>K?9NNOEpxr1A=e0T360%x+TmQ(7qMZh2{j+Q_mqo3yvDMdlsgIJzb`86nt zroEKhxY5CUID%)jM6AnWaRX+RE-?&mG`6m!0hBExJ;?0r5Db+2yRm`etlXs zJ?LmBp(^J%fk=cc=c!HE=fhJ?--#$V|nLKJ2eAiVy zK(eI(>24eeEj-+L^<7h!*#;izJ?{lbE4db7{2q^1B4r@WD2P=MuR-vQz^=uW;trz@ z%4Y+E4iw3Ibfuc6vuy|ESxEKN0sU12kB;XQ#KNN)Kq4%n;z@iSCkiHo0uy4p9pgd? z&E;VP1vAOocX?uYuA?PKA~jl1Ji1~#LgXPoL7qR%6W|d+>)2HAl6kdDF9wj%S+boR zJ3%E{o0=x+<+nmas$(O-0{| zoc-r7#}{~gc`R=3->yzS;2gns4U9;1L0Imzp^yjid&Zc>#l|m-2L5&l!%Wqp$ z-!hbQcof#Ru}Zd~00|!5%_u-x%qu_|=h1CLK`am7Cy*eF0#k=CSNDpEe*E`U5D(e~ z-|bG*n82>hm5MvG%rH9sv5v|85(ulummmDAgC9>Gp8n7hnmh(kEj(&bIfQtmRc7~1 zZ_?1yDQD18Z<(7R_%7iYE(p^V)Xq#o-GC4VhxTzPj7Co(*A#MvfquL*QIL+OAWX;G!Vty&P%VdSy4xD{z@9C4 zcB*4|YN@Z3gKv?W!ghP(|TJ1REsW)vXFnP?GwH(U;nW(mIE&6qd=IU9IX z-I0fXA|NG=8h_a-Xfl*zJZN{C#sqc`#$Bm=c9)AXCNp?=v)vQ#@}p&{qbp!j^#>ap zG*Htu14!R?IZ@KpEz*1$qBqb{jqJnBk-LyZX^i)sCxf~2~Hz14Y-xdwdEIAyi zbE;%zm0-fHnxRUNF9hlC5}$2#>Qnj(i(JdJ&0V9AT)pPFwSY#nu@Yf)jt}M~s;7;| zVbT1l3j?{NKun#$+0lWbz!FzwVKiT-Q6aWHzf(Gx^ZhA`%Sdr!k}NQ+rQM5+ORAzm zgw8aQG9!bAQS=c4y}sSqRncYQ$+p2Sl?ZaG{-r_F0FasrklF)g^9A4ID)b2gyL}fR z?cUQ}!^3&cpNL4W7wtfTD0>!sABW((fk%toJUSl5%QpXENkmkc?|KdA^jmJ1ht~XK5*%6%mKo-Pufn z27@C|FsIZN0~@kWR;vWq{q`_d7j(?xM}ItUOV0r=1sH;av>>7h?gI+^YK z?(y9@S=Ook$Aq3`E``b({Q5;Q@YN|s(@zW7i&GWH)qVJ$af;&*U1foqQzn*GocVjG zj@8*U5}Ld`RNnY!?rth|_?HPl`eU2mdyGf7ZDS}JFTeeig6|nTdN)b%{V9R;*)JQ8 z-n?oD(l}9$XYeQlc0{s97>7Zh+%0bP^!)Jr>570^Llli&weYlcNh2EiCLobS`etX9 zt9kCUBTRHvr3g*wFp|ooR1C9Zah;~F5pF9y;#Sle2_%&Ui((tOWBRd7B`(Ffq=LL2 z&X|SYi=~{v^bL)o~H!jRAt9O%(6gysN?EH7Z4-(<@i^Cikw#gy*fJyy@Q4g z_os@sa?{e89CxR{hy2p8-2bEQJe1@}Zmpef@jniL;&69Jc_}uAl=t52Rkh@Q=dOJc zNjn)?A8_k*42IQmbB%g4y0fcFQht4a10XCD2Ym)3OSTB>OEE3dgm#39c1FihOE@7> z%XD^~0cIxt?Hj4Vu`geK#oirACj(}cGJV50`sg%|SUz8Xgcq;LBend=HGo9poZl55 z{blrl#EINk>=?dTtxS+KprdNPTb$BYI*N9ZN3VO3(^0%{_JBkZ>F6mo!`iT?R$`{S z!%HpwFp}?Vgo&)C`D3nO*jw8_kPb7Q=rtGR@C%ES@=u-3BBu3WqA<@3jC|r9<}^@t zYumcR;`;`Bv&4vtI{G>A&5>H6*U|1CKIFW&-(NjFXwyLBsq%97H zul=#2FYh2n`}zm7Ib4Violj0Wwf%z_!Vx&0she^{-&b`f*= ztDO`Q6`LP64d307;j&n#&7+Sn3jC7`kXl#Ivc4j5qXJW(0{*M~{bd#My!W%4AC3-? z8a#U46-}B)um8zBV%O1I~<00DWn=Kab z^un+<6x$jBNeu7PUA`idkzmD;!}(z(Rl+Se=1NmH_LNNwi?Jr>$NEgfdK=Ym?C)vl z)JEL)%cPK61Ch=b#5Tqv>5umdkebtqKD&MalJw$T>i(x?6F|Dw-_L*X?0e6i{owbV zUPFBz^{QlLzzoXhFJhILS6P+2O7!)jX!;5?Pt{_BN34FMWBUe3OCp7>QhVFh(Zi7u zb);xXJWxj(&Ile4+lyv%`HXuydvj!LKRp=OYMaf(bd)TRt#7V}ETW41RGleHM!t?HFZfj%0 z*rTnuRQj}H@?7!%ZJ`+v#!g!KED32zo_CIQkme|eBYPWsp(b~$uRu~ku#nEjl zW#4TUwi&Z0`atSWKzF9ot=x_KbP8z-?&AL3&-IjD1@AK;d=`RoUp+BR` zcOOW1r+759O98`ovjC|fMp$essRoZ;Uvi-<$n|*yklH@G6^offvds@#Ajg_Wsn6Fq zlAV?YHmgWC<()nzi+rQSOQeYmYkUykst0mOtK>$VM!;{se7Xa1D(~=!JTRmB!gR); zo}Ce-oMP^L9C#kQuF3h$CWBYcwdJ<(rGGr8x}h;DAD1J`V^_6ds8fuTmU%Ikg=sM> z4BN7rSv_C3Eq`1j`Ybb2wBWX|4gA_ht&ztTg*?bJARKE(iZN5lFxaKs)sFqxtXG9Z zlQmWR*&or`aDg`)EL`HzIBW_f8b~-h0ZCoI2G>Q+_V$`U`sE+?`Kdo_0qL3Gk&r!& z3ZmDS4R%n5k1NP%gP)c?SLG1~yT&4-s~5$)c^Z&bZETKth`Jq!G#QklYf+WTmQpg# zfyt(Zean}F%eQS6B5@uX{y$;1O1sjt)MmBQl2TUZ&gR+dVdg0!S7xH~UB`@nY(DuG zX6if_TEjEtjEH5-@7SYIVWqdv@jlIP-XmEDNMx9i*)RiVBJvpFEOX2|1+%t#aC@od z@mSSm^+u~XAp-P_DDhrjb+*H|YQD=Y301$O*m4vuHn$j-e!=W!JEL^ANVVI33W+9b zS@pDdbbpRV_tz~zV)c9hQsb@nEyFjD^jbh-q-r%Zw1FgnBD*HA*f7|^1od(L;E_1| z#o|dGz3yE9o#v6+G&8nF76DQ}W@p%cw@4}f<-m=-Hq#>oxRyaKL!#)b5G68W*F7Wg%#FQQlLhxCYK=($XKzBRGOV!u);0^v4|z?7KZb3AAO62} zd4Z>6b7s&+ZLlENRvd_oX5aQnj5Pgq?np%*9|Y3vy8`KQL2P^GchB(sk>t_&0wlic z%*2TXj~a$=c=S*gi+SYK-=C_*?&p0Van9-%*DZsc&gy6$oiu#c&vy;qEcbW>qkD#L zO{wQiAYuG|`PKBp*38VXJe#e9&%?k(;H7fFQ=ED!lf3ylU>TYhw8FCqf|&10J0Zh( zb42!?S@w2su^Ne5#LB!Q^Vk$#Ad4&)Nf~}|?wLZQTICw~@bpz+6g;auryQX$KjTp) zX$W~1Mrd>CqOe@G1TQ#+<5J|2s-+!bv5xuL^KN+5vY;}J$`DnH{d7uVHdG8~g{FiZ zI<)8Hh{7D>$z)_OS3dD9lwUlLLHApL)a)JfhJ&_S5c^TnHb<(cVXzzjffFZqYs+9K zS10!2{`r32{Qc|^Tf_5wP`+xn%y|-zq(164(Cf<{9WZ0D&!c_;QkPQibzOGE?7lQ9 zL2vI!&4eN>SH194KPdZCgHW4~Fq<;0qG$!2sKl8e7ATL#l>zlwNrA=;cAC+8y`#w2Y6i zQO^IQ+qW`x3i3Krd}~`gz(zq=rLb+g$#0o1aGOJ+5tjGNE{8*p*gTEN0i2nwYJ-(u zqo?3z0eNC%-wK&VOKC|Wc5NU%R)F;GJR()pC_w5@oY3s}C-F#Vu9m?L%{YNHuG#MU z`wzYbF>FBq>88&kohwpjJgd4vgErW)d@aK_*CYn*H4NV;c!ZLB)&vqUyO~AuTnQQT zL{^Rj{AkHiY{>xUOg{*tmWGJT`iAvomd?@PgnvjXt(1gvWo-Z_a)lAaLRo!y4yPcDNE*cFH>Zl5xs~I453@Y-PKEX&X-XOu zQfuVpHk7r2mytPAn-MKzd5>h%hH>%NFy+w5F@k^ktE-W$6^m( zKF=8QnRWycfBbeoTM(<$GXJ^)q;dTMB+1i{x&}K|-(C$!Kb*4`zW)0$zWx38#B@G2 zGWPjXc_bJ6hVRSz=wt!XUC#wBqmb~l!6V+-qtp$P<@*xp#R`q(26pn-ybllB!BJ-8 zx#MXpUPMhc6j$8pSm2K8TJ0Jr+ks&uJ?g-lQqEg6P7oerDgsR=IGnkpBZcyTyzCMN z6DhVyV!*Z#X(2nzL1*GQgSsQqkcZ%og0nXaM%Gcjbcpk8u){kp#ylu%gB#x!m=-Pn z|``zQ+!B6r?objx}Bi8yn z>ZgTXo#PRP?>>+El)A|SWBU?moeQFWi^Z0v#rZ-;9bX-`g+PySyw(KMI?XrprMfKU z|Czy?`GQSVV{3jdTi`$6i>iEOfgq?EMK1bqHrs-iXhOzFAQZ%KzNkYXz-aBYfXRJ} zveS?^D!P`_aEVQAUX|BEEeGwQYKbcal8hiK^d1A!5T13B_Jip@EEKQYZ=!ahKDTJ^$GUxR># z+EQKEWL7&qmpD=Y6XW;`@w=Oomp~GApH*OlsB*O$vkJ2AMGM);vL!q9bX8HUTH>Ju z-jZswGmC+rBVV2*Z*Y0YTH>d%TzlDA&cmboYaTmG%@=pOyX}XW86FhwIn>1SeL2tO zvk4`)7%+^$uzrwF%^>ZEC04qg;k#2-)Zh_IA8DgNzW_;_;jUkR)OPo!2IFt9X0rQE zO&iSt-`{E3?Bt31q^cwhgnj{1cRVP2s35oRL#TP#vHzUGf z3p%NUHTBqu&4p4i&S|1|vovvufyhY%BV|t3CZ9s1%mg%m5nO?-co_+>66O>7DmV!P zvtLRD;bP{VZ41#B4p~5g-E9ADpC-=2I%Esn_6&(!2yDliU}BCIwKEVM;&9H(iRV1M zNt-ap69W+b0Z9aVH=;e;PT`z7#o4- zyX|V2F}rrdAicmYS;>vVeCXYCSpy`rq>+WrLcn(j$b&E67gccE!&+K45}5q#!D9> zUIwxY&{`m4@O@_|T~r|nMu8tn%4$ndRFQ?L&|XFd$tKWdLHLmg@DcC=N^pIJmw4gU z|L%rBPZTms*q2Vdv`P=bv3R*z82-OKkrqeWz9DoYJe7ZyL0(_!cWKD3hBxqui0ICQ5?VLw7Gd6;YUC&!-r$P_xr{rbYkw zf(mh|QhgH~^W3IMgB`8>lIJ`I&$NE|o8`#*w5pY?gv^bYXO^E_&pvzw6YGY%egAh%ZE zT+P$)>j(b)`u`H@TaPA@zSZWD9z#*rSUp>S)Z!80#)jehuFs=pgN8<#Y>C+Qvq-o9 z_+RDnuFZL}b;@jjA4C<1D$QeZRta8ste9M#`#qCW*>6DJLe5$;!(B}>WDC+?`=xGp z$4d@EOg7!JWgRg_4{FQ=k*-c!^}=(u=$KSu)CWF1Bx&MVCL25s2Ex+i$CIBbBm74QEuni(U&Bk%~v_G0GSXR&&=_vLzS}b>BG< z&SOclGgd^3G>#nuW>)T*I#uKPiSjL7U}k~($W^}FlIrqm=pW1*c{oxIXL8SC$RS_r z3)4xNZMzqJ%MBck!B(^u&KC5$Z*ELC8I!URSonqI^XURfd6LQONTBpx;77X)!BRvOm zTppw|I7hOm3NjXJ{<(2FQG;)=C!ffCRqfbj)W!HZ>*qTyYXOs9ILom!20Rhqs)C3r zTxGC>Y`no38W^<26G}1ysS@M3lV#82x*F#q4H*-VH@JOOoaFM5vg?Bp_yJAX_Jb?+A;w%pZ2t6solJ3%*^xjwV?9tV zfqTA5v*TTObl>0+?`&u;xZ9&KxBB7vh#jW7Lu-cLHoFHsZyCNdkGf^9dIHj2gGc8f z1txk*u=_^9t-q)rvC~1jfBf(5>cNIDEK8DddPMB0ldZ^tFv}&M#*s4j1zg8?UTjDe zTf`$J!r_?-wGdd&P7|{!=9##8?UwBsbtXlh7?#A$jbu9DdegKZQ}Qelg`&zco?Dr< zTOII*K2ZicW_A~Yo#m%VbG+j~K6c8N8i=ZO&%W`qCG*UL7siTEjboxN@>nE(a-{zl zD%(D{Ped_wa-F^KUc8Gy!j;_QQBQXC-pSG3*t8ciW3Bb^)>I{KzQssf8t~{kjqK(i zq#yTz#4q@pM)n?{oaYgn%H?xBx;x3EyB>`Jx4x(|mWCCy`y!K+oNmXNZFR`&Dn(7r zd3@UtRfmLj7-GP4Drga@v|{z8Ve&CtfgvuHRr4B=stp@bw`|3W0*ulI7H8L@5JS8z za!&@~PSic97aqv-r^c$8d%-b}`w`6-v6-46ZtMR=)|;{wgfx761Xh9hAoqI?AQ9^F z*pw(-9uJRhggiDjk*V`Ec%GjvhCC0R8@35EBZ2hp66voE!*>%MPvFtX2IHDX?Sk0W zp4@!no@f($Wr4(R@oZwaX#h#gM$p~?$k8#_y&gToH}7l|#GW#IclBMLM;hEUtEAfo zwS#tF5NRBZXZiGxs#n}%y-bIUMU?2tNSmf^*^!H1iRU3z=5dnbnF5CK5@gM)rHE|g zvXKnOfs`=OYtA$9n4pd=&a(fVG{1JR1@FLMwVf!Dz&Iu zs+%TYm>@5X1X~^v_pEbem!AN59#$KKC!EobC^94YxcY((qM8k}Lw1g^q}j9;-u44_ zS|Yt^^Qcj0s^2NNb14pXZ61AgvH+%oi0FXJ*blbJA!t%{E#$&5wVBbvW1I82hldw*v-oL0$(E~Hup#s zJYm^OWh%C#(>4}n5u+Z!w0CnVch<62@A(8CvHbQb0*SJt z`S+{8&^rh|wS#sSkJ-Df!;IyI!A`B}ISxJa@bR+R)pw27MEwUv!0tSe?12|)9%(S; z24FB=PR#7)fS?V4+l<0lpoKzB=Z<>JK}I%R%IDbRnZY=92CEv!y3Vv3=0aL=T=VgW z8j(_>3bD9$O1dC;Up{)-ROLnHbMxw8^WA+08bhYPvRxj!Sd2r}QXSxAYNt9nkWQ-y z=LNrGLfAP>D>Z;@snf`O4pcbZ2r%0Y(t1Wd1@uW@Z{>jM&gpR&2fI+W?WU$M# zmVDjY7C^!kFV)QM&5jxQ!6B=M8Rc_yM9Kx#D4bUZ8v=umD+e2wR||5HZibMIWK|PN zEyU=+)Cv&Ir|lfNA8i6}X|%hZ!FKS%I8n&z+6#o_)JIGApOHv!;8829Gx`Nc_wwxS zH!N!NXuzX}iM+9A-(UxiKDuhaj=%r+z4zAN_n-a#_rIPq`hNEK2XQ7pwe9z4QRdZY z9`X79I~!e)^rk0y^h!L}Q$+e=7{&XkP!kC0YL+}+j;Yhf(~0h%Wd>;--wh=!YD*n* z^la)|X^bXHN*R|*$Ok>eo;A#6>Ei*}@;wurT1PpgEG7(|UZ^VEJmnA}iM1}oZ84o#D1P70czRO=~=M-!w5Ym2Mr%ibqzwIn@-EyiS{? z7iV}hB@#rEhi{iMm&Y?G)zRjWmQwwRfGy3(o;`zIi$@5gD-ST|?}N4-;$}44exn6W z@QAe1iCUvGJerFq4BztO2gCRCaprHDk#4azxXg>#Mr*lP=c26 z>IkGCjy{pdptM}zui8A)kh?mM?)p4>)#Xut;zZ@qv*+)D`Ewd6TN^ybZ+y=!IRC|G zyj*Z_bGG45u0Wy|$WtEOBh|%Nepzz_L-n;AVY)?}+ckXh%-@@Jcs8PFrx)pcdXh)??FqGg)84&W zYxC%{%A@l=8Z=n)-yfdz!4FgvPP@R{sz>wa^@)PmhT*%RWtuMX-oyz4n>^6-JVGSx zQFqQcridDcC&nw$E}GYbe6UexgI(FQoJiWiX0h}Sz~C@~AXvCcS&%f?nFWjU3rc8@ z6Pc_>HrTnm=iJPD1TF-}Kr!tAkKA)upiZHXW12{%P^g=Hw8Kkr&$)ufMQ9HTh?alE z&Onw0fy^xpx5WbC$IHvNUp*p*N8bV%8x!ykk*A6zi8Q=&z!WP53G- z7pJl*K4Q_4hnKXY%Ni%FY{%u%BozkgFc;;Y6OuKw=Y%?srBDtey=uOC&*x+SwQ~NSHU}bzq!JwX+N&lA%3iXV2f2jrZi}4%UWDUJv;o zgQU_1XQO84D08JqRtZV`64H2b93x>FT*?Cqqhwno#`L+vxmQ*(te>!Bu>$Ou7%YTJZhk)F>&IqA6x5GHF5WS zMk9`{#3Mw~L-hT{tpLV29wY!GjMK@eF+Jc$na%J^aLy$MTW})Jf#I%#gr>q=nL@%~ z7vtERr(7QL&yN*680-k`=`h#jdp?A@a_ubIFdkE(aEscE7~ezVxy$kNU<2h@@2us;3*=YF4=pZF}qY zUi}yG2*&EJe8?l7r$+(lMChm?0#E&1@w z^;IY}!-R2Y&cipF>(Cze9I9%GbQ(fZHzd-22{4)+YzEV_N92)S<7_$+R`_6f92TMq z#9G?a*;ENhLrC&GLWeUC#(Rq(ddHn`&tHAO(KX}|L{dm;Vq;U@+2+wVq0xMT2-K?~ zX+h~LU#WqVK^(~g)iNUI2p%a5^4zaGo6U%;=_s7o)M}a^DOAH;#s3$kHVt#aTy`i6 ziru2^yPd;K$uk+|GCpX_FV5w87LsNZ3mOqYQjVasmZOaH`$xcT9 zh?Mp-g-Kk5x{OlF@KDN?bM>$4Ln?`b$@!@;%_(lVQK% zAjRR))Fv5p2hI`XWskt>1bPL9!fs<`tCAk6G=!2Lxo@cb3LGel+l zdp>B8&Zcu%zSFfwFW=I$gu<>IuS+SR&bxvja|VOKJ@cntpx^H|ZXTGv{VS2-aG0#f z*Tc-286y!#LFp!t>ULM~wcEn#l_Ncj6IRsiPE0Ry^hoH$*LLCS1V(=K0wP6+ORwg? z5D_}X5XsYtU5Faz#2RVSb+WC|5t<`xl7M-Z{hw~C$W!G1uU}`=62G#aGnb!Pmd%j4 zWwBVyk*&rbp{)FO^8jXsY&oZ?MOc=z&oUhJlydY*+IwZ61blE;5dg^1u~0f~7D%;0 z^}0&&71y)Qu}(oE7vK~S=~(GS*KtnKwOPHGF_s5aO1E1n9iVh%9$%6w0yvo*y?8D9PMH7-(&k-$iJG+k=NW&4TUxnwl7%_bfu%n@U0@}w<%^_XG= z{HrwSCaBS~=Wvtkcd;{xl=Vi%HB%!%DZMNn&6@?*Il#3#brL8UZOKJtBv_+j$~3+e zjEePV$~eI1OtpW@l%e1Q%^Qo|hf0alFvGJ+6~4@8CsCuX$@|YAzc~+B4I{}<3te)$ zB|q1Kpiu($Oq0uSl$3Fd!t(HuDD&Xa1IG9ejl{!*VU$ObEOiEcE%031DNzQ_256VG zOtYc_NQ-Dd0o*Zn&fXp5wh2UW7=O3uffAn6oh4*dBEdEYurb64xN7aL9Q7zwQYAG5 zp=xE%(DeH)k&xJZ3r6!77fzGS(s9V4=hXLf8t~M8dKh%<>VM7YiZR+}t7mPvGgI_p z9>~Rfar!h@w8004E>Xiw6LZ2$lS*`|nd?RZ=X000hb{df|;k^lf`Jv~zi0054(k-kX<002jB zDt(g+000gIyQI-GJFyJ_fZ8{X+2tFt3jlz9;iVR~y5&!_EuwXtfw_*H~hTSJws%!%!4OJ)0oAX#h_as?(Tv2s4S{C-0Q1#5b4rtUK%P3_xq z{7ZXo%~_N1sbczk0rw_+NfW{ff2S%L)CYO$bFI!m9=*ODgLD0)&;1D!A8Pgb>2z@} zq@2MA$HFCOR9!1Ij?*Wj5kWAz_Fs8Qz!6YQQ^N z%;z&;QEopi*=$n8gi0ql){*_1`G=X&f1)pCBhPC!n?C22FAu!CJMYL>*GC0&rh1iD zqtUF{P{;S%#QeUI>NU7~CI#ZOsF!8KVt79Y4883Z=PRGU+c~@4?hQEZv=`V^!oeRN zv+QvIWZ6UUKb`33c}NV8+ODZ2)KR*bFMo!AsyKl|r{qYZU^pCv(vF;nX&#%7$(u^=9N@Gmr>ECeIGlrs!cO( z4uPVk!V*5|R>F|sP+hdG_mL1bTIKcCT>n_dGGf?Aq0o?X!AE*a^yN^N9EL+#C?6>r zy7Q5tw*@7+)KPl+cI=?&{a5wTnw%FsXp8E@*}|=oqVjD-6~ISDN7nXkSqN%N#H3Y^ z9Lkn~Od&Z4fhf9VCDlBzk?~PSryg}kUCpCQZ$1b?a#&R9BlVfg^5r|JexZ*n8`+it zABC6sNbdb=AxLSiFDh*67jtK<+&FRsQ2v{w+`a%HK8b;iXbb$X(*_{r!bM9J%f1rN zWfl-Q>8q8*2kjg7@}Lc;1{_jBzwPeVgGdB3nOOT}^HYiRd;ufikw`lP5cE!;iLLzN zRFvN7!utv*odciC zrBFgCE_;d5g*LKvDXu|D7cXns{M=`YxCsvczixQABkwU&_8Y~q zoed{{^3&$B8EjBvhDoYtfAvU;^4rrr+ujqjYi2JTGhRVxa3`tYm(4?>@f{CPV>dRP zUn10fu2k+Dc9^L|7fwb!_A3cAE6|3-S^510m}y0@PPPM&d?`T(Jw> zdQk#X+N(zQJnk6mON20d-(9E-5v$O;0c|UaUP-+p#;awkzMT4PtrYw0%jx6u-9TE< z7C7jzv&cCvB4HJ69Jvt=k_l(ViXF8m8bX(c&%8G2PrxpZ`u2PV?2{4LkbJw%@%VVK zWeOt4_yDfh=GM4iFj8{`@6ZELm<&)H3jqye9Z>v_FLV_7XSE&DwPgvGK(Y62Wd^#f zYz?K3h5xb41^=#IEz3^h=HwgH0x*4voD?xL zVhS<@b^VGjztc1zj|Crpcc4yD>h|qsDoij4SnHhe|3GWqH-T3?nkGW z8fy#;>zdi~1LmF^LUeQt{)yP5*Afu1go!Ad9Ht@G93{SL5{2!3cy5ZWZAi}zYg;Yf zTeNVlG1hi2|B()JP{kTqf7PWkUPGJ!hF+!D{KlY(-=PJ65O(N6?!8`i216n*S_R3VA* zQ)%YWpc|Wx5|8oaPZQ=n|M0o?IU?Pxm_#vj;NcbAWl@Ot?+ocipetpv*0#F0c0oqTK{>C{b~wRL zxh!2bms11RPT}&oUE;YmWtbF%ROY;dNSV_oSHURpc+b9~VRAR$OjPQZBk4~EW=k(r zd>_)KavX734z&lTAkIvpmMXurk&)%R+B(~#AAt?+VVh~pJbN-qlWzOVKh6L=Vsz3( zvWf8A@K^J;)`Zp!x!<+!hdP1@a5)ro+xd#^ebxa?nTH9VZ}+f>xYExq4On(|9ujwv z3OfkgMWOR0jq&O{w4XZM!<^)!cQkFgS>eiPoSU>+{Ih0aXouXc?*CMJt4pC0m#C00 zH9N%Cnb!QX`)lOownBLN3ioBp#@T*|Hpc4AEW?BS-&6U7o*{cB)yXT6WrG7;{@-yigV^msZ8j!e z$GKfjmh@tDgLE8gIW5~#>?ib@{!Fue&&MLntXYDdt44^~P*2{XBpC&@h6Nn1JJ}CB zBkoo*jLi>lWT4gJw=g%;ru^U0$oAO1ehhB5OhYPmV`QTMK988 zDTPghfIsPxh`)+ae0`4$=Q80epa(;Dbuo+RMFHJ8i!7e+OR?7@z&2L9Mz^6xIA?EB zWbUWQKI7N09=ac#okqq{C7!YKZ7!`f$@UGSaJDr4+$X_Du zo;BdCq)!2s!q||;gA*6lOYDLczxPT%-K0_X-Xk0L7kzKF9NH}ng3VW(AO1R~lSS42 zZCBr}+m2ea5Oh*VFQ^MkD-xFAG~W~GkS!QUWW zAJJ_bDCme>8{~JAqWBK9*XK%DKH|yK2h6yu{_Ca>Qc3K#Z>S7-Z)|uxt-e`N7Xuyc z%8&w!Cu0^r0+EiVxiS^kj0=zykZM$(1Ux(AOWV&w1$@G)S{p6Q5|+5++&hjYHVv^7 z&g+rSZ`Q9M=GVYW#r|{LleC7^xO&|UPOv@bN;X2amv7hS%=j|S zSv88jNpT2bmTk4^uci3Kes77R+6k4YEeOx_fQ>WSTf6AD>vyh|B zkx1DDSQewLDuv=DBmBVPE?!do5O;F~j3+A3_b=&Hm8&4R`y^mbE0OavN+B(4v*r3* zepvPsVb)(asZ<%36KQvDc?5(G6x~9!*n`iGi};9Xi11X>EH9S9)_~j|%(*$zu1@w* zhVit@E`+bB;%zM)cBu-hGPyUr)DA@yH7`p*yr7tXvr7G50)5HSSj|R@@s!u!&Dua( zTzwuE&XhsU&g>?!PR`lxijP2x$0FkHkqW+$r?ULm;oE)cYqm6HWbye+JVGQfF( zmguzxHRZAH_DqCkE+sl_48FG2L|+hKsqdThH-k0kJrK_@wA-#u(8DvjPmmY)qC1x0 zzXKO1+Jo7^Q@`)@tpl&d;t(^Ly_}tzPwKtq&MZ0bfuI*xO*Vk|^u=Kr6f}lGnx}A} zz~vz*d1wOgUXZ@J8`T@L(?(|c$e}d3#oMkvS}{akeGGX23)agT6o9+0``W6Ll>qbF zV8oS8+S^QoQMn88I$=PwmjO2z0q`9!{D~8Re}J`DR)UO^>Mg z32RrIF$YsVVlJc47C4C8@Caw6Hl3QA$gtRlypn$}ojT3Njh_s2@;v?7M` z{dJk!wu5Mq1OHxYecbw#Dwi~`bR2E6YySyqmaSm&H3z?Zy)1cgR>6gq{uXEb1pUE#mI%`jiK!2OwfIXR*+wuU4@S*c ze-NDK%6{hu+O#ahjF9w7$34n*2A(|2rzRviTr`57Hc5uBLXYyx^}JFTc@3?QuAMtz zknajGrQ94IC^ET_GzdXELT7Zws|=v0Y9{+l1TGGMVHXyw@<$3e$ZLIh+PI42Tc@O~bD00!Z9V(59E) zT-{ap$TFZ<`xF^qiXkU3Itc&#eD&~`1iZbJPxTzY(`wNIyuAlN9@Ry<%0Khu7tnTm zc|wak2@S0l!Z!A9RNuq2WFhEE_QSMrg`w=V!q?_qwPO4uE>zjCwyS?>+~YH!=l*dA zwJ6GpYq>1Me24N9l`(hcE+El3xc5Z*`8o%9ZKLORhdT?gP^1=CWM z9G9q-0^F#|17>7J6osiU6OMI)-^)*$-T~qbD^^!5C_Yy7X`i5%e$YKD`e$ZIycI-; zLxNw{KvXW>Fqb4DXJ%$-@LeK#X^%-V#$r)U?^xJx-vG$AR5>qC1?bJEp68K5FJ`#_ zaUo`)U8LU|(^0|c%Dx-jKNnVVMi@Ry?_doQyJ^LE+W_trrE+@-b7~p{HoQ-Kdf8Cz zHwczwindC#bg~%lf;_c-M^U%V5BiPi47Qwm^|Qnml<=38P-;(ulu4eC^Nu8o4~fHh zP%+z4_owCrHoI>tk9Xa$zx~PjvJj#heK662XCXBq zQ8@3o3ohULVy_fOrv*=X8_$0RpI`0%Ry3nDd4%Q@P;Dg*bg{YsWGUJUd+dP3y>5lc zr-sut!_E{o#>GvN*&F%`5XfbW5c~r<;f;SpJbpK!fAoF859Z6iJnUDjLs=5CmAK@5 zf-BN_qLDSN^Wm(QY4|`#u{*C7!vrIUO<)-0J#?y(Oq-Ds=|nG1-Nv#Kly=}xEYE7rlXAsp){LSvaT)}-RJXLvdfO2j7HOwA833A!`3H)X)5=fM0T$ZIB-2@`XV7rq|cBfrD5sew(|IWAnlH+{MC zeaLTdbIwdDc*vonK99I`6T`>MJKGKDSA@b;=-tKP+0aJU1{I>#E;C^wCXTLFFUV$v zdocJ3QZ=CHYZQ#MNAkJZ4Pha~T2Rj8^AH>xgB;Kg%hEq3_mEth6b}t;jQ+=ui~Kmn zu*YJ1qWrW-N!&X_RrtL5az-p_Givro0ef{>Ei@oecPsoA;!Q*JP?Z7Y``yO@0Bg!G zbf@nh6+`wy@3&N*Z%s$M|6b(x0?}y+rh+5reNAf>3x21&2x?3n94CUc5ra#$ANIch zH4PFOEd_X6$PHJCT7p;QdY1q^+>16B!dBb?i8C_)--(cR{>xa`AA>qg;SLSKhfI$` z2c-iK(np(#$A(&`VIp5Eh_HFuTOfmwDjkbmDCj8ii9%IC_!eKwH7t1YTFj2sr)8dM zQaD6=>c$^qC&x@Ok`SOrRbMgwoLH8>XA{qv393DMFO|76Cl= zKcscaY$-3}`q2Qx24PVE4>gFk`HG|+XLlHQt7uA==_!}CXdR2LbbMvxr7$wE(E*T1 zPEe0`Qpd&E9!n%5^eHK3`w^gPe=Zg4*Fa!_fSxXhuil#iC=($9H1Gs;p)Nhm*(hK` z(Ov-gECuK*vcx_0p{=W^UmV`66Gz(O?9_P(eo9Al92Uq*SZApJMBVR}YmlSVuq-`RTnv13J!a>4eU4kuB?8iHrxMZubwKS;zMbZoAfQ{MMcQ^Lb8NRN}2UEm9NP z*nTAB{$E#i@?+3id$1apv1YB`Q^?aRTv~2hFF*)ha zWM+a2wv;pS!>R}J(8}(rdIOCiQ)kj2VUc(f{+7f}g*};^q^LX*c>9tz5c)DNTpzw3 z8tX5^{3QENv_%c-s(w6qk2pJ8R6e)a#ss}KBl|DB5p^`MLYXF?R6jb{XHxHFoU8s) z0mSF`77gyfvrF3#U)-i#tj)I)YzcVf-aSQAs|ZRp1|oG6sFbLd9=r(A>*#M?h~b$r zW!V#Yu^(JM&yCry6h}faYF<%!2+Bo|GML_Qt?&agG1lKqm!1zy6%5cA&RGyHOpY5t zWd3ko+!sd$eH$l+AB`VO&AEnuZ5e+f5%B3zK~d0O`)Tv-=ZWbKKc0LF=Vk-b;lyA@ zOjt3vWmbooX?sII4q5-xOhG1-5Wo6l;@%Nnn4V_!iDnE)-GXo#3bTh z%-#=9@a~n-){nqMR)UZIqJn65{)+y7+>!_8B+&#V?T(6rY zr^D#bzO+)ce76Ywn^o!AWL5Zgp{Fss(bd5*#`LtS?G36JEAii-0EzvhUz3cIM6Qjs z745mqH!e`MPUL&VFmt@qOyB4hTxE}`{&h6sGY>FISn-a4Uewhn1eb!tuLX)bQ7*#zJVC-V$VK%%gO5}hfU*V&MVgOB2j0_hUgU1fO25uY z949(GZ3u4#UFu)!!yv53{*vy8KQ#I(@$}BQGE>#3@wFIQO6*R)V%WkhQJmgjDeM>t zgu)H^T9(Rrf3(X|!6@m5Y5IN1xwfS>*=Lf*n+eOJ$7@dsBFpIM1OQu}DYuz54M!20 zS@5kfaCa^6QLMin2Lj;R8|(iJkXU!8Jtb@xn9azu!6g&&+4Ye4$s>kU@oU;|#)Oc( zB8TPrBx$F=+Y9%o9YM z=7md%suF)HwVofFw}?yAJ=$}=bhkB?DrFEgL)#S#T_MSl`j zXg^PP42mJ8+a7;Cu=!ik913=$CapA;N9rS*&*u1fg1%5H1UWG2Cvpqi9O3RXJfX3A z7?sN`1PjnC?yh45Xt(?Xtv~&ad1bhh1G+=QerHl~ro91_;5Br@lHmk|GGfnDUqyC$ zyWGD56&h{Rul&zUvaIVmnmySeGloPUnSt@IwJ>zOS2&8>*#yO{C=!r zzuWF`c3skC8nAip`)1rc%(*gDxn=$0$<{FsY6CNIJv=>05CV`B0|Xz{Y3q>a+{Xbt zNqsc3nfVWaS}}C3_A$vX7T)$tS^&F9LX65)h$9<}^qSqUgBZ z9oDvsVaYJd?uOZ6!Cf4?PgbP1{g(lO(et$2?oFj)h-zT`SD!sf_!HH>_ z>GVxj|2;v2%tf41k@BYotx3bTul#{zN`u4;1K}C*p~KlhPiF(+79D&Xo~z}+wHNf% zhgGQ{(EX84i=@FmA%S~Zu5~Tke*KU5WD;N*-Up~FoX+E?Z)P9o-u9-W{79VUn zYLFCzO#ic$@ofcP<)FvQZsyf}_NVDP033!*<^6VB2k zpcwpE{kKw>DrlgNmKm!?%#r_QtBg$Si&*&GN5ed-raq-{3$^f;5hsI9ARuUwOOPg| zo$~dqzMR3p!^j3pEDiG2-PtH7msjsXhstK9^EdM!ym9}OxUKs8e1FNSDX@|8 zuQVz;Fec4y`VomFykiqNX=6PzjT#ZLg@{cse)IUeA_V?wVGCehdHf&Fsp?~*oWDhy zC3GLavqKvWlLA6{aV?b?eUv^;EeA2@)=f_ETBO~~xmGITiGlS4U#Y>O;z+(^PE*wn zUZ{^iyzImQ0BTC1`Y}A(m<)LuS$_uPP0-(Bkfe8Nu$!WM*v&V#AO8i|RqM{)IJEk3 zcjKi><;lXJ`K5if_8R%@+GD^@&xBS-tsdb0hG0t;4Np#qbOdp_9b(`^VkKlC8cBIP z;@o1i>T#UAsoN_;Gx>`)766Yz?+VaD?)I=&5WHCkD~$wIo$k-2sg+y3?`*z zW@6J(q!zHF5m3ZRFa{dVahLCY3H9b_>G2cKL&V$#fw)DAe3nv$k$7Hi5y>3n8P&g~ zxo9{|K3o^)(RE=3FTpSweL%cCGiLv;*l%P!A7^J!7e!{pLg`${?^$JBDtY#wO0c(M zB6M05TYt`(iXASEV7^v~6WX%d@-(=3>MiA*If6Ey=(gebW z*9MFrIHLUPx!M}S>KgnoF$Knqme*ClyJ!4d7f_xtfzM7!E3M~0hICIi4$mL%oPW_Z zoqq9e!KGD=A)oWvzaMonzzKaJ-li@Tt$V~yMmlpajs}+yevp^oeuhshW!K3 z5Z}LLyO4w0oSK@3b6ci9ztE<>ntwI$Jt1@54UB?T^=%yGNHEjP_xaG~iEYOHqKdLKpXie>_29p+95XNLp>Fy2Gg zTnQuBN(Ul76=cPyJz=qZhk^#@x}xd={)$X6NV)l%@`TZX0{aa~1EM%F)FJ?H0%DI9 zM|TRl6BH=h=KkTVAYxHERq|4y6q`Fgp3=>tV-OBpxNi{mHy7y$bwJt4sp{>psfsPo zj<{)c^a57@O%*0_VQpth@N()kd(3ks?1&q{m$SB*cC9H_(ncU&YmW)?Q6B!f;mBSe z|K=Y_>AllAr(Nd!aC+;R7;E>qD|ZN2GHd&mQ=}RWn=7yVt??t!u;%X~Lv^|zJ?wYB z)X3(U)y1n$)RA3&49WT_OR2rf7Ah`rP)wZ>@V$~S?&cH2;pBK*tIG9I^ZiM){ax=i z2B>nXgX=5$YxADCg;ZUf$fIK%jg~WM7KgH$LEB7gozAq}tC-WG$FI+`Bkz}AaFfC8 z)6_1}CSLB8>b}Jy^2l3dahwhNL@{HV;e?kRUqZvhVn5gSZ@HlreR9ywTPv@~^gcri z=R=zYr3768FPVhIu$JF9*DcJZ4dAdHY5f+Bn4m)`3+%h0JP{H!czG1kx1*4bFj?3X z65%3!?>R7WF*QU8tH`8~XZXeQVP8+NF7Dt0XjsVx);pIP{}y;9VX@ta`g>$dMKQ6XDDGR)FUJ!!nX;xnzyQW{x(y=8|Mec`Q`*hNHNyp=|J|b@fWxyzx4p$ ztCA&~PrQ=*AzE2)=?L1i_>XIIg-5S=Z%WnOp&g066`i2W>@hcw$rvbl89(Qk?Hrm9 zEa(udPDwdP`D<-lDa+i#ZH$2?l&hj9Q3-R7bKJN4fbfuWAGkXjYCwOWoCJu8UAX$? zt=v8IGse-%0H!_Io?uuXZjaIO=(jn^R0L_`|E z!yAQIl|HGyu$zDtn18E_TNISzxBm~(_oWuMP3{$C$qWo`_6|$_xj{z|jV>=AukJH% zuk33Zcda)em+FxdigQ(QbRef1nB{?Iq>9Ilg54*|lU8*%dH&uh>g&M7c*%ZGqR}#P zL2c7P?HSp<)cxW2E;Ha47OjW3Kd!~(t$NIP!i|7y%G#|IiwB2upW_U)HO*kp5tWCc z$T@B$%EeE<`3QmF82(${tJD6K$2xlnj3W#OhB=6@bD`RP2=5OmkF2E)Qc;|IA>krj z(MEL8Y{U**TuyD;&ZUgKZ(Y=Hc^C>XG!nh3!vOUfP>P7%07=}1^IW>m3KK~p_83-A zZ{^h)(5AW9KY^qm_RveE8*I?-Kd;1v-Sm(OLhc*cHxUDuVsIAuP*L0~@4BIqN5<}z z%Ro%`ZKq#Yi*2Q+4Jg-7LhA;-r}Jj1Rb@NN$v0AOMr^te&wO@Re6z6AzVz>Ayp6E zQNN5c8|#7L9x)023HrVeaj1iH9>-&cqk7J^Oq-}6bemcmZ^cHhWS;ANKIVU=&tcP?Djywfd-tuaE;8bZU#>|8UYYsW|X}VZc zTSM+!t1Gz82Rfz2Qy&B&Ykh+6S1tPM*M35*&c6TMI~s1YP`KgI#@cd*-tYS3K}+T3 zVNs?YrYm3OBq=vHvxYnV1u1?f=;R>Ukp>0^Anh~VyI|{6dV(jS<*3l$x)3ya2Y`pK zXkPGizkV%f`1fDe$J3gB?Grt@C$2#aN;IQ}93>T~*A-T12K#qPQ!p0W$ZJ6RKB8nAVfTh3TW-}2>&riGFGHl!^=fK=n-px7NDq+gBD zYpn-E;;ro7@X4sv&?_ui^-(}s9vzoG2B+xDtG zNJuz{J>mBQ+=8AdA}H|HRdMyC7fxLAMBYH;SvEf?g6e|(hXUB^*V*7>So`u$8$>kjGZeITP(w(cxKs7bY|-NS z7l0v(-ZH}VM?fug4I`by5LSG;=HvMi=43%w>Cw?;}w z(rqRkf(b&{I~XX*r_QG>xT01xK9Mx{Nf5}?!1?6KOo~t8^%XvHAgap<)~@|(+C zu|}xK#_q^Le6>x zaeT?!r1GoWpXFkIg@dFhpCSaJeMEOT;5=%55TaM&$2qO{qGQRIDx%~q-qHq|;$}P7 zqia>*nik!;?Hn>=-_3NzUzBKea3(E3y}i>J#~rmi^6_ITnDWV8t;h=`s}AO_-lzHl z30}*0Ou3P}iMf))WDWl^$;QV*FMPM2afdsZi$Ka=`I|14+4g+jDf$(5pW0 z5+`2whb+&?+sw7>lG-^SNSgerxJk&c{Mt1rt9- z>UfrmAak*g-d=rX3cyoTitV&6{uqHb8Vcj2a%aQh(5@Bsb?N8hD;>cNN22#YF9g0p z#sdH&=Ob)LnxlKelGD>Ty^-h*uzQMG#)0Lq&-!rPs~eXdu<$G$mfr&kF|-lB!Qwc=j^ zr+f*v>l$2^aR+XunU=8v^Z1e&wBvB_=oN04T&w-qfSTcJ#( zu02wY*_6QY?n~GrYEhnYvw|~e?$$PgX7uA&I@zb!MFs8JcEe+86fqgbnTKoo#^wP0 zE5voa)e<$rtO>2f-zAo0Gx~S-U32r! zP53<-0JClFOHacA_#C$+<)CZ{Gq*rXb^8@4gXAQ?90b#Dl>c?&O?E4E8%95sb(p3L zA||g1FvOV9uSEtk;-9BphHrlwiSB#cDJ$otHYaxvquRJ%Uf86uaJiik;QN+_?RUbQ zTc+P+4A8Ku(>1HK1|1Xmb#9uU8rY(vO*4-5ztf?}$1Ib5i!P2WBuT{P!GQcDIeL5q zHI1;!AERb=5kg0#KRopM#T z-C{i3)GHq?Jffq^9)$%rqi zWGf#wpMNG?lPy!jX*Rs&EBnY0r<9JLnF;6HT2XuXTh@fmA}ma_op-uE(%pMnI}>wT zotAnZ$AXW8QXU3G`E$}*Zf*?2%Ld=P7+)D4ect_;(lr>SK9wgZ#NTzeEZ}oJTJU|B zxTe`=pOd@>ZQ6&(u@ES@7RHmr${$8Ypto?1|C~OrsWg8pPL?o?T}c(isRUU)qfP6F z6fLHNm*=>lw*nt&+Bvb_g*#mCc^;wHkzQZ(e4(NKf$DBFovU2tjLAC2Prq&r-IjJ8Z%p|`yrG#?q@@uV>rUpv{UrNl@TNN@4esS5m9z zw3Mh8bT{j!#_Jo-pPOG9XabmXq>C<+`NN_SBBBgR@5}kTRAmNUxN$%H^-b}P-)^+a zgCB^n3)W^01}wzDOQw%6DzhLPJ><#?J=6SpnKGkUai{Fa!fy}vgEnW3a5d{1ddSnH z5+8{d^lN$*dHTWuwCsnUN3}MdUe6KD?R$FLoPL9+Dgaz>ZKQjA>nb=ECK3WLG=w=s z0ehY(v#89%Rx$2afd40x?g?1gSAE=Oo3d^ zkZgqS{uVW6+FHRXTKo-iU9FPkPn;cRn|dF_8i!s~s#!oUGT@(!*Y7EVoX8A#6m#9% z8jI*oU#*BzpKg{Ij`!O&em`Ynygw3@w=zGC0%VA>qpqxNJl@oYH&oJ9SYFT72hxwk zV)BRe=m~ak!V{mnmfE;?54u&Q)fsV`#4dZOJsa!j`XNhvWPt#Dy(FLIZO6C=9U;kc zA170>$PQ)vj~N4Y2ouh><+PT6i>;xhpslmW*?R)jcfA?IlnQH`#xt(&Ja(o!itdI} zAWY1WF(XeP$X?BKFTv+%&L4j1QHoWfbbp5jUIME;OOI5+`p{aw{5cU#V zVXO(4hd2fwSwjtV;8O;lewQw=?gh5*H~wrw(5#u@#x zviSM&#}btsM%SGFxOq&sU#}|!eGI?1>oeZ+MW)$K8l}(F0))Oh)1L+;hFep$M$Vk0 zfG(JS?YffSx2jNoj^&4qaaLdE9jP^@?t4}%>RUZflgg>1vh*VpZCCS6hGaymD}mnv z4aMMLn6bBvIwQO^OW>^1ttgwH|DKE zp8OWT#;_sq&~1s-0#j0V2Zj!GM4MQBXHJ14Ai}zoBn(T#0%>{qta$$MpSZKLWq(l1 z85yZ(YHN7qM1!pS0kr%Rjfh~tOMm#5VrTtL67x-Li+QmpEKHUgilV#Xkx$RFoBb@3 z0UCv1x%?Wjdx0dQttI1M#bH)5;p&wG-3jR|@-qiryNmRfR#3qr+ZuQ-y>RaK&yI5U zhgIG5%SZSaIWbs4#xf!KEU&tct+_E?kxuRjUIc#bEw<)JD7o}60JKGBfXYdckS{;i z$GE-?%KqD26x}=<^6|n!9FCqq*QjFqV5&_BIW)ciF#ICwKF;e;J968SG|&+C)@vpq zz#o{Z@Xe^&m+B(aN^MZph2|bSpWN&c-G%P{JPxBpvpvmRUEf(2B>)eM8VjKcmVZ zMHhC^sfR#$D+9tS~Zh^IbVyxZ;qcPuO z5k`0u0S*{Ch9#&FXm}I*d{B-7$pW88FJ7R$L(!pK-Ug54YeV z)1u=a&n7?u_g0!qt3dJRU&~utS?F+fpyuG4;9adXp&0)%L28bLtY9bE=iV~60r^df z0{G~9z^OM4vLSS~DcSq7Jm$&xlz;uf8y!=?`=}D?6sK^&2*3JmKu>!Su(1DUbaZ4U zLUdD{)UK|#0v1E6)@XPMve0VM*GOjhnim$RamgFqj`RmLt{ywm#qKwYi&VX%np{kS~jh@+K`=f zsB-60{ft|f(>%cP-Xn|g_?xmu47#=L>dO|f`vZA$akPfcqmInr9zW1e$a6$~Ssp?v zj8lf_*gI3i07zs4zmD-gT%Skdy~KPCKFVo(E;LoIZR)g{oeK!Tvmqb_YF;X$O{?j} z77%Klx;)HBN?39LK3uV2rZg4qJGN)1~6hD&>HOUz5Vcbe`8|KV#E??TNc2)~-{#$VB%|PcAb!uU(;bw_+6JeQz{DUv0n#E7mK}bN z?WS&!k=!+Xa+5FY3V?h-TtX&P3e&k$YD9-)0=*|aSV%pIf4Ki5W(SP|7W?lvUub~2l%^0zGMajpyJ8c>41lC%S6WViiUkX{kd43)b;{B&BsaOOynghQN0|RHeexwWsHcAVrRe>eJ)L`SmtSg?AtAf6=neV8$^Kr$yi+&r0+n)jlxdaMYkl z3%^5X5K$`@^uyp;{AFw$o-`n6aF5~~1Xh>?q0LVhdoot|%2|774mtd*dV(BA6+M)dHwwBGD1)nURmYp<0{qce zx#T$ErK~F1yqhSZBf3s=UT1uYA)qz7@%jU$l!zcMr#?VXtG}j z?f?e~W9T|nUn1&A3SLnN2I4e5GvT%)phx%LBBF`G5)QtKq_l+30OIf5T~VqJa@*nv zm(|XHBdzQ!4PTqR7y79UB7Dn9bY{pvR$2VX!(2I0TBL-L6~M5{>k1b#nyvkiTIb=2 zrfU+`!Qid)5s+iUSi~KGFRqJ*6UZ}Xak1^88&}I&oCpiJHqx$Ztolip;wR`TAT1N| z(S?zQnZf;sq8dhRnHD*EAC~x&@)9_`}rOlcrE>yu1Qil%mc&@#C_W?(-LgiZGS6YIe-KWNMiA)uQyF&?rXTE zDq!kxg$->iK%Um?(POYKb1t|FsarnG{3sC)7C3m`#K)`pS$qix4~|p zJWTyH2gf5f16YZArp{0;A3AV0Bv zfMct= znolFv$_~y0gRDydJOT6iYZu^)GfsOcRX>za1#4-4BO=iM5Twa(81@4DAIwvBP)lu7TDibyk4oS)BYOe20L))kBs?-Yi{I6FeEI&X`nzgg z$-dUn)AM?>fMMsz5^fLgXav?)6Gbi%sT71daZ8(lL0()fPC_^g3}e37(aiL#^s)uc zfP;f^1x**@-&v%}<9Vg~0!5Gv` zT}SU3VS^Qz)>2{$%buh`8r6NH+K-7diea~b(Eid{8s__5fU+eLuXI^r23JwrjoS$TXD`Ps%A6q)oeZ{x>{G3$^2D6g6voBzhzMm%{^l&O1eL7XUrv?9Kt-+y!bLQSNDr96?3uZrQ==B?U_7cid`N1dKPwegpEO>D(jx!t{sF}< z4=Za-o|(FXvYZSO|_ zLj)B+eSNVsJ*MJ^wTP5As!Rme2UO&TIL!}*FU9(ILf+4>1yipYj7I)pB|vb^AUldp z?rLy;LMSPGBmbkQfMTJkTv_3 zm|085K6{U5t0-&G09eTDWWm5uVY^d-}CwX{X5rnuIpUa`Qw~tInV1p&+ESL zSK*eC%=s(jb(WtmX#SF$$#vlm%eGqxU&*#>IK>@VAtStupMLHsN=+c>$YB#7m&-oK zfUPGVZy*kubp@6j7{B^TK}V4`aa+6+=rPM%49QOi86Zl&5kcg<$a)n4Rn8(b(sTF7 z)tduQ-FW1pJb(z;F2DPYi|+A2`+pxaB1#geDv32`Cxs6!jkhNA|M(F*3czmU%D<2Q zJnkMA+omuFx^vHVA`%j4rZ$uF`Bz)a1M5X-}CpZ1x29hDk9pOjR7edo@+1)2t zAS5hIJaO&QXM1IRf{sE`a53ui4T-X>x;v8{zSq(}Fa!DEIIK=zZHC}e4oM_y7FZ^w zsArn*2_M>{05z$0tW$_$xjz&LrT0B?*}M01LFF`8DH+vx|;#SArBNL=)btjD+UqnxG>IYHSx!~g1H_X2$ z0mMp34_WcNim=|9D-K2stf;ceg zn2*lgB9F~H2aq9^;O~=+2$b~f2sjAN1VmD~Lk<{i?XvU_xAb(G=;JMHD4&`HFx()t zid4(PO5t?of3!Dmw5Kq3-DCjMo@2r^0bX!F$aApn^O$ z(Oz(ljfm7sut64#0_PHk0x#Y`6pg2!@zz99nrR0*s zAmTw3k7U$3-)Qs#ALqv*8|+>8YyO25k^BcTp>uWP=rFea1MV0Bkjo6asTgjp?1=+i z>B1lGopc}fsP}5)$sV394HIJG%35by7xkig6v{)Zl%JYNEoy4JdLJ?#u}Xss>hAK= z^tdB9;TK`LR}nAX!8To%O>%8di*;1svr-iP2jRf~QSHtPf^SyqmFM}xqg2~E%B zg1OaO*+|f}{ZCQax)r7}3y$LcA;_yIrVx}@a*ofGa z$mGohZ(QDhh9Cejjz@WEuHTYs;zC$nam)SP{Od}}2q#XOs3`T4Wv%3)71fu(_GRa+ z3f=?%W&q*N`1L4_&0wkdyB-dfalK(G@+`9HE@DH^dih&>anc&VX1awNUTc4%fwTK> zWAgsQ`EM)2CGt)%6-M*djY<~7D(1@HN5)ZXjOIItC@nRTC{@-TO23;ZG`D}0^~^O! zhA-Hie-j!q|HAaa(8I^fk7sF-RX5;89VR$WSSUD4jLPe&?>7aZND*(ATn4E~kc-7( zyhrV+CV>yo=v4oX$$_BD9knz)J@|^>N-bat)wS8l&AXK!5*qLt9n~(U*i-s1 zLs$hiDyif+DR{XED;u}9_50%JP!ab&L_rBp|44>VU|5|qL>2K*oMT$BZP!6h_d&?n{yqQK{v*(u-2 zyNY8Y-vWkhr|MC85&LZ+R`G2+pH_VsW`g=l{<_VHiRaDr}8RzC9ECL2~0yx zBKDoCe?;iuhMhhg@T)L?NnoQB*aj}MY5gSR2rnOKJz)Ao{yhT+88(R!9rjigqGrXY zt^8F!=-Dqbv3sMOKsZhAlJ)pRi^_}s<&I5BaBbU zpKn^7*zGPy@>}^&79R}@b=Nr0c-%;vSd;{IXb}fOzm0g&eUs+Kxy%V|B1HFT3C}oT zn$7TQsT&v3)Th#(|L3l)SpmvxF+;c*70`)$A}5Pi!i8V;W*gMXwEQAp&Q_JoJY3YZ zh`j~#N?bdvU$!0~h)fT}Po%|uG$N(34|2Db`tjn1$C4fX!^iH=(;^cPuVamw(28A6 z(z}%5=+;b}r|6A6Zx|;!3bq1K!4L7s&0-E~K?g)Zw3r+bP{c(?FGCb|R3;a?Ab>N| zIMR{2V}3$aD$x1*+t8ca_PzGVK;a_4f zRzRoi6xMuvb|J>^ouBT`(1!9|u&u&CT7S#v!6hg?hKc5)rlVO=MonS#BaQO}{Bf<;XW>b;nWkpjpp$P5DrArD z#HwH^L52UXGkOFQ*Zy8?JbTs;mTG%VazL6TwK~~Jzwcu0QfrbL#1{BHSVDbyF-=bK zb{{M}L1nl4BgR=$a+I=fjJNl#K|V2v3m^bvf)XA+$)0 z>MTl>BspT6-?8{5j9T`}76Z~rQ8EQC_SFkme8d<$y4YJv=J#!VPA=AjI7|u&`A0Zp z>G^-rgKHYTCH{vG2CNN&XCMmJdM+~>4|;I)OR?Xa7|m5473yybAGVoa``I-qqS7`{ zEsV3%jW+1XwD30mYecl`SDf>19+nCYj6)3**buY;MZ{qsX}L0Gi-)bTBM7O=2@5St z!-ZGiC}TAawQ6N3SSTkjZ6uvuzK^Rkp&Chl4cN{QPJ*3(+v)U?*>h_0E*~2AlA!|! z5Zmu=c7;=1I&d89;dE8=_j!p2yeOEag4j*dXN40O>>Jt7JvMvE9WU#6b5YeI%h+22 zo60VN_s8T0H$!2(#GX=6VsM{4NxieE@@?FEMg??0gNB%TAT34?Vo^TG>D}zqgTGH6 zKdpkd`A8<>!t?J+Y}kKpykL1C(lCu#&~T0#4P+mevWFLh!#hC9RGU&RL&NJcSBu9y~-rFa6t8RiBSLbzbs`JEG}3vtoioKt2%!A>uPW0bfyB`(YEN-p z`k&VkEuLVZs`y(oX3Ky|;~#?n?PM zwsiZw->@=}iU$5K2;VRNHLGLhgLpHPY&~h7WTC~Cha!c39?vV~VWxbl+_P*g1zuOm z(8oe(6+~%m`4c2N9|qH<>`X31_~X^%iJ=dG5ZbF-)>y&1FnTEP+~#cCfp8Iy&Bo?| z0}Jec7|x3}>4lQYSr`6vD&rWB#SZNN+br8E;Ihi_cQ+zUas9r_^-pW+#S_ox1Z!wB zeE2;x0z)PHstnh1VzjF?5eSbgSuf+j5U{Vux8HmXFF50-TzZ1oP`oA%S|@ zvz$!yk2IDr3(Sio=H=}Ab%doZJdl8rf`_Jrr{6wbWvZNX)mEAq6>Bw_`I?{wZ27zq zOL@>J31eTZP+WNhh2kS@gsCygY%%4jc(?n+ND}WJ%dNlXt6F8JG=4s>3C6NrY;IgY&VF`cLgPI`J(p~BQI|9KlPy-^v8m}22HqoBvu zN9lKjh+^VKKoFWsroxpvyZmhEi=DRsU#R~AQcTWAxx)Edyq43-xpg( zlhm}*I}q_j`I7quWZhQz^(J1q?P4It000Xp^P!k7MU3KA5um1`TCrX7=&>YjQDg{5 zli1s4JUGjS^%RvlBgjqfEWPnD87}nuY))PivP+X-*JW-5tEe6NBx4&rsnK-3I$k%# z;5~yJxd2!`mCqLF5I^+yQ_m*gsC|h|=}K_l-6||>-@;x@h@5A`JLUXF$QG`{v+ku< z0AW;!k`_xh4}^FcPWP3DgR{`A8+uj;C!M#027K?Hh@1OYlMT1ci82r3xy_9ew=(HQ z8NHit4yeYtQIF(2$@hg9W}L35j``$Q$Elc0KnHF65q?HQ$FHUH_hwubLKHe)m_{yd z+H~;IjXO%?%*16>zT*Lk{?#S#{t})Z_ zYqkj03wu-+SkLtfW1pz+wd#_-eXBmCdCYw{XNtDMF_@i)sOFv|ylG1(K?;PW`%or$fc1}*krMKfY){G|cp*GYfSnKHy>MiGfE;Dx93K2I@vTk~9si~LVc zL5R#|g1jsB-i#RN1DL5CqJR&!q96yuTY~~L>4cm~%-ij^;hpkFW?8Z??W(|{Tu|ECivZX}-$Zf8y(;Kv1PdE~6`|94zCu4uE zRnwMLlA z{=gRw7B}1%XK;!qxH$baQkc`(>3?f%iV^$$|G6MSZCGJ$$5+L43jRZEpt+R)qk@qk z@i!f&!^lZ%c&ZJX^N!j6(vw2<_zQA~EOgZ0@50;FedoV!KyRLP@_SuEstNb~RHIXX zlELqcimjKvUnDEpekh*!trZo9kob2$H|HFPQRJNX>4?5O(P}$&=48wI2dDJgZg+Pc zeskN+z0OQUgL31I6ieiK;h7yX=?-If6IGP}uTONZNb11F=&sm8PZlFWRG6Qqj(@UL zPSWL4O5(h3p&;6lu$T%Oz7`S1koi&^Slg)WIcy?r)j`?SvGzdJam-8n&Pp>A6# zizfj?f_no?6Xu;(HIdkp3BQ8$B3hXv=gn0=_l|6D-{sik9~gF48C(2#{@(UsaMOgG z#e5K2d3Cj_FnuHQ-^Ruz_+wer$dX~ujABhq&3o>0;%)NW(TrR7%heJ8J(OL*;O2%? zZM*=Y&**DNa^fjY_2M`7mUZ#A!Cz}m;6o=;Ret;${3~6(-S?#6 zc#|(CA=|e7oAiaccJznqmqynfJqzP+4tM*JU6K=I`?^)}V$1TO#orN>*Nl3?^}y&% zq?9W;MAKiSPv#3IjqwL9h%QhIh#Hp z*>wjnEfmPU-PBa&OsDL!UgPzVK&h7f!(NlMpa%*1*F<>jV(3}gOrht+Tp|oVcBPxZ|Rlsr!`;IQ@}%cVYO?%w#T$ctN<-q z1ZIE;0GmQ(Ya@UBZF_ou%nb4ndFoF~rxkudoMZ0I38-x1Wmq0i4@W=N>)(vzabOK! zD&VgVPu`C-P!S%8&Sg%$ir`}T5t$dw^8Y(vY4QW!0zXaF|9A7Eg|+!B(_8WX2jn^A AsQ>@~ literal 0 HcmV?d00001 diff --git a/docs_v2/static/img/logo.svg b/docs_v2/static/img/logo.svg new file mode 100644 index 00000000..9db6d0d0 --- /dev/null +++ b/docs_v2/static/img/logo.svg @@ -0,0 +1 @@ + \ No newline at end of file From 4001ef95c7543b1424555c3c88a8cd076a933587 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Sep 2024 00:58:54 +0200 Subject: [PATCH 233/251] build(deps): bump the npm_and_yarn group across 1 directory with 7 updates (#547) Bumps the npm_and_yarn group with 4 updates in the /docs_v2 directory: [body-parser](https://github.com/expressjs/body-parser), [express](https://github.com/expressjs/express), [micromatch](https://github.com/micromatch/micromatch) and [webpack](https://github.com/webpack/webpack). Updates `body-parser` from 1.20.2 to 1.20.3 - [Release notes](https://github.com/expressjs/body-parser/releases) - [Changelog](https://github.com/expressjs/body-parser/blob/master/HISTORY.md) - [Commits](https://github.com/expressjs/body-parser/compare/1.20.2...1.20.3) Updates `express` from 4.19.2 to 4.21.0 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.0/History.md) - [Commits](https://github.com/expressjs/express/compare/4.19.2...4.21.0) Updates `express` from 4.19.2 to 4.21.0 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.0/History.md) - [Commits](https://github.com/expressjs/express/compare/4.19.2...4.21.0) Updates `path-to-regexp` from 0.1.7 to 0.1.10 - [Release notes](https://github.com/pillarjs/path-to-regexp/releases) - [Changelog](https://github.com/pillarjs/path-to-regexp/blob/master/History.md) - [Commits](https://github.com/pillarjs/path-to-regexp/compare/v0.1.7...v0.1.10) Updates `micromatch` from 4.0.7 to 4.0.8 - [Release notes](https://github.com/micromatch/micromatch/releases) - [Changelog](https://github.com/micromatch/micromatch/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/micromatch/compare/4.0.7...4.0.8) Updates `send` from 0.18.0 to 0.19.0 - [Release notes](https://github.com/pillarjs/send/releases) - [Changelog](https://github.com/pillarjs/send/blob/master/HISTORY.md) - [Commits](https://github.com/pillarjs/send/compare/0.18.0...0.19.0) Updates `serve-static` from 1.15.0 to 1.16.2 - [Release notes](https://github.com/expressjs/serve-static/releases) - [Changelog](https://github.com/expressjs/serve-static/blob/v1.16.2/HISTORY.md) - [Commits](https://github.com/expressjs/serve-static/compare/v1.15.0...v1.16.2) Updates `webpack` from 5.92.1 to 5.94.0 - [Release notes](https://github.com/webpack/webpack/releases) - [Commits](https://github.com/webpack/webpack/compare/v5.92.1...v5.94.0) --- updated-dependencies: - dependency-name: body-parser dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: express dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: express dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: path-to-regexp dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: micromatch dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: send dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: serve-static dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: webpack dependency-type: indirect dependency-group: npm_and_yarn ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs_v2/package-lock.json | 139 ++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 72 deletions(-) diff --git a/docs_v2/package-lock.json b/docs_v2/package-lock.json index 21e6a165..ebb2ff9e 100644 --- a/docs_v2/package-lock.json +++ b/docs_v2/package-lock.json @@ -3405,24 +3405,6 @@ "@types/ms": "*" } }, - "node_modules/@types/eslint": { - "version": "8.56.10", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz", - "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", - "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "node_modules/@types/eslint-scope": { - "version": "3.7.7", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", - "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, "node_modules/@types/estree": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", @@ -4299,9 +4281,9 @@ } }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -4311,7 +4293,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -5820,17 +5802,17 @@ } }, "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", "engines": { "node": ">= 0.8" } }, "node_modules/enhanced-resolve": { - "version": "5.17.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", - "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", + "version": "5.17.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", + "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -6125,36 +6107,36 @@ } }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.0.tgz", + "integrity": "sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -6190,9 +6172,9 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "node_modules/express/node_modules/range-parser": { "version": "1.2.1", @@ -6377,12 +6359,12 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -8645,9 +8627,12 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/merge-stream": { "version": "2.0.0", @@ -10363,9 +10348,9 @@ ] }, "node_modules/micromatch": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", - "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" @@ -10620,9 +10605,12 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -11758,11 +11746,11 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -12763,9 +12751,9 @@ } }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -12798,6 +12786,14 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -12910,14 +12906,14 @@ } }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" @@ -14175,11 +14171,10 @@ } }, "node_modules/webpack": { - "version": "5.92.1", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.92.1.tgz", - "integrity": "sha512-JECQ7IwJb+7fgUFBlrJzbyu3GEuNBcdqr1LD7IbSzwkSmIevTm8PF+wej3Oxuz/JFBUZ6O1o43zsPkwm1C4TmA==", + "version": "5.94.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", + "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", "dependencies": { - "@types/eslint-scope": "^3.7.3", "@types/estree": "^1.0.5", "@webassemblyjs/ast": "^1.12.1", "@webassemblyjs/wasm-edit": "^1.12.1", @@ -14188,7 +14183,7 @@ "acorn-import-attributes": "^1.9.5", "browserslist": "^4.21.10", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.17.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", From 511f1b7f5df24ca3c738bce9bff72a3733630b4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 15:29:31 +0200 Subject: [PATCH 234/251] build(deps): bump bluefireteam/melos-action (#549) Bumps [bluefireteam/melos-action](https://github.com/bluefireteam/melos-action) from 7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52 to c7dcb921b23cc520cace360b95d02b37bf09cdaa. - [Release notes](https://github.com/bluefireteam/melos-action/releases) - [Commits](https://github.com/bluefireteam/melos-action/compare/7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52...c7dcb921b23cc520cace360b95d02b37bf09cdaa) --- updated-dependencies: - dependency-name: bluefireteam/melos-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 06a8deb6..c4e0c410 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -41,7 +41,7 @@ jobs: run: flutter pub cache clean - name: Install Melos - uses: bluefireteam/melos-action@7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52 + uses: bluefireteam/melos-action@c7dcb921b23cc520cace360b95d02b37bf09cdaa with: run-bootstrap: false From c1b2d1d7f2f30563be79bad54a12e6e48a7536d5 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 20:30:37 +0200 Subject: [PATCH 235/251] docs: Update Google's models in documentation (#551) --- .../integrations/firebase_vertex_ai.md | 12 ++++++------ .../chat_models/integrations/googleai.md | 18 +++++++++--------- .../text_embedding/integrations/google_ai.md | 2 -- .../vertex_ai/chat_firebase_vertex_ai.dart | 12 ++++++------ .../google_ai/chat_google_generative_ai.dart | 19 ++++++++++--------- .../google_ai/google_ai_embeddings.dart | 2 -- 6 files changed, 31 insertions(+), 34 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index cd33daa2..167ffe13 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -72,18 +72,18 @@ The following models are available: * Max output tokens: 8192 - `gemini-1.5-pro`: * text / image / audio -> text model - * Max input token: 1048576 + * Max input token: 2097152 * Max output tokens: 8192 - `gemini-1.0-pro-vision`: * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 + * Max input token: 16384 + * Max output tokens: 2048 - `gemini-1.0-pro` * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 + * Max input token: 32760 + * Max output tokens: 8192 -Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) for the updated list. ## Multimodal support diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 033c7672..12ff5f2c 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -11,20 +11,20 @@ The following models are available: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-pro`: text / image -> text model +- `gemini-1.5-pro`: * text / image / audio -> text model - * Max input token: 1048576 + * Max input token: 2097152 * Max output tokens: 8192 -- `gemini-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 - `gemini-1.0-pro` (or `gemini-pro`): * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 + * Max input token: 32760 + * Max output tokens: 8192 +- `aqa`: + * text -> text model + * Max input token: 7168 + * Max output tokens: 1024 -Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) for the updated list. ## Usage diff --git a/docs/modules/retrieval/text_embedding/integrations/google_ai.md b/docs/modules/retrieval/text_embedding/integrations/google_ai.md index 6d84e8a1..657d7f6d 100644 --- a/docs/modules/retrieval/text_embedding/integrations/google_ai.md +++ b/docs/modules/retrieval/text_embedding/integrations/google_ai.md @@ -6,8 +6,6 @@ The embedding service in the [Gemini API](https://ai.google.dev/docs/embeddings_ - `text-embedding-004` * Dimensions: 768 (with support for reduced dimensionality) -- `embedding-001` - * Dimensions: 768 The previous list of models may not be exhaustive or up-to-date. Check out the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of available models. diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 20b2b520..83ac8d8c 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -42,19 +42,19 @@ import 'types.dart'; /// * Max output tokens: 8192 /// - `gemini-1.5-pro`: /// * text / image / audio -> text model -/// * Max input token: 1048576 +/// * Max input token: 2097152 /// * Max output tokens: 8192 /// - `gemini-1.0-pro-vision`: /// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 +/// * Max input token: 16384 +/// * Max output tokens: 2048 /// - `gemini-1.0-pro` /// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 +/// * Max input token: 32760 +/// * Max output tokens: 8192 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) +/// Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) /// for the updated list. /// /// ### Call options diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 0fde4b9f..5b41f34d 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -35,21 +35,22 @@ import 'types.dart'; /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-pro`: text / image -> text model +/// - `gemini-1.5-pro`: /// * text / image / audio -> text model -/// * Max input token: 1048576 +/// * Max input token: 2097152 /// * Max output tokens: 8192 -/// - `gemini-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 /// - `gemini-1.0-pro` (or `gemini-pro`): /// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 +/// * Max input token: 32760 +/// * Max output tokens: 8192 +/// - `aqa`: +/// * text -> text model +/// * Max input token: 7168 +/// * Max output tokens: 1024 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://ai.google.dev/models) for the updated list. +/// Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) +/// for the updated list. /// /// #### Tuned models /// diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index 93ec105a..263a2c44 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -24,8 +24,6 @@ import '../../utils/https_client/http_client.dart'; /// /// - `text-embedding-004` /// * Dimensions: 768 (with support for reduced dimensionality) -/// - `embedding-001` -/// * Dimensions: 768 /// /// The previous list of models may not be exhaustive or up-to-date. Check out /// the [Google AI documentation](https://ai.google.dev/models/gemini) From edfb728f6af17859bc40750a41f5ee46ec589d21 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 22:06:55 +0200 Subject: [PATCH 236/251] feat: Add support for deleteWhere in ObjectBoxVectorStore (#552) --- .../vector_stores/integrations/objectbox.md | 87 +++++++++++++- .../objectbox/base_objectbox.dart | 108 +++++++++++++++++- .../vector_stores/objectbox/objectbox.dart | 2 +- .../objectbox/objectbox_test.dart | 31 +++++ .../lib/src/vector_stores/base.dart | 2 - 5 files changed, 221 insertions(+), 9 deletions(-) diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index 0ac3dd9b..ee035833 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -119,6 +119,14 @@ To delete documents, you can use the `delete` method passing the ids of the docu await vectorStore.delete(ids: ['9999']); ``` +You can also use `deleteWhere` to delete documents based on a condition. + +```dart +await vectorStore.deleteWhere( + ObjectBoxDocumentProps.metadata.contains('cat'), +); +``` + ## Example: Building a Fully Local RAG App with ObjectBox and Ollama This example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions. @@ -250,7 +258,7 @@ Check out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_d ### BaseObjectBoxVectorStore -If you need more control over the entity (e.g. if you need to persist custom fields), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. +If you need more control over the entity (e.g. if you are using ObjectBox to store other entities, or if you need to customize the Document entity class.), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. `BaseObjectBoxVectorStore` requires the following parameters: - `embeddings`: The embeddings model to use. @@ -260,4 +268,79 @@ If you need more control over the entity (e.g. if you need to persist custom fie - `getIdProperty`: A function that returns the ID property of the entity. - `getEmbeddingProperty`: A function that returns the embedding property of the entity. -You can check how `ObjectBoxVectorStore` is implemented to see how to use `BaseObjectBoxVectorStore`. +Here is an example of how to use this class: + +First, you can define our own Document entity class instead of using the one provided by the [ObjectBoxVectorStore]. In this way, you can customize the entity to your needs. You will need to define the mapping logic between the entity and the LangChain [Document] model. + +```dart +@Entity() +class MyDocumentEntity { + MyDocumentEntity({ + required this.id, + required this.content, + required this.metadata, + required this.embedding, + }); + @Id() + int internalId = 0; + @Unique(onConflict: ConflictStrategy.replace) + String id; + String content; + String metadata; + @HnswIndex( + dimensions: 768, + distanceType: VectorDistanceType.cosine, + ) + @Property(type: PropertyType.floatVector) + List embedding; + factory MyDocumentEntity.fromModel( + Document doc, List embedding, + ) => MyDocumentEntity( + id: doc.id ?? '', + content: doc.pageContent, + metadata: jsonEncode(doc.metadata), + embedding: embedding, + ); + Document toModel() => Document( + id: id, + pageContent: content, + metadata: jsonDecode(metadata), + ); +} +``` + +After defining the entity class, you will need to run the ObjectBox generator: + +```sh +dart run build_runner build --delete-conflicting-outputs +``` + +Then, you just need to create your custom vector store class that extends [BaseObjectBoxVectorStore] and wire everything up: + +```dart +class MyCustomVectorStore extends BaseObjectBoxVectorStore { + MyCustomVectorStore({ + required super.embeddings, + required Store store, + }) : super( + box: store.box(), + createEntity: ( + String id, + String content, + String metadata, + List embedding, + ) => + MyDocumentEntity( + id: id, + content: content, + metadata: metadata, + embedding: embedding, + ), + createDocument: (MyDocumentEntity docDto) => docDto.toModel(), + getIdProperty: () => MyDocumentEntity_.id, + getEmbeddingProperty: () => MyDocumentEntity_.embedding, + ); +} +``` + +Now you can use the [MyCustomVectorStore] class to store and search documents. diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart index 7e065c4a..84658107 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart @@ -14,8 +14,101 @@ import 'package:uuid/uuid.dart'; /// {@template base_object_box_vector_store} /// Base class for ObjectBox vector store. /// -/// Use this class if you need more control over the ObjectBox store. -/// Otherwise, use [ObjectBoxVectorStore] which is a pre-configured version. +/// The [ObjectBoxVectorStore] class is a pre-configured version of this class, +/// but it can only be used if you don't use ObjectBox for anything else. +/// +/// If you need more control over the ObjectBox store, use this class instead. +/// For example, if you are using ObjectBox to store other entities, or if you +/// need to customize the Document entity class. +/// +/// Here is an example of how to use this class: +/// +/// First, you can define our own Document entity class instead of using the +/// one provided by the [ObjectBoxVectorStore]. In this way, you can customize +/// the entity to your needs. You will need to define the mapping logic between +/// the entity and the LangChain [Document] model. +/// +/// ```dart +/// @Entity() +/// class MyDocumentEntity { +/// MyDocumentEntity({ +/// required this.id, +/// required this.content, +/// required this.metadata, +/// required this.embedding, +/// }); +/// +/// @Id() +/// int internalId = 0; +/// +/// @Unique(onConflict: ConflictStrategy.replace) +/// String id; +/// +/// String content; +/// +/// String metadata; +/// +/// @HnswIndex( +/// dimensions: 768, +/// distanceType: VectorDistanceType.cosine, +/// ) +/// @Property(type: PropertyType.floatVector) +/// List embedding; +/// +/// factory MyDocumentEntity.fromModel( +/// Document doc, List embedding, +/// ) => MyDocumentEntity( +/// id: doc.id ?? '', +/// content: doc.pageContent, +/// metadata: jsonEncode(doc.metadata), +/// embedding: embedding, +/// ); +/// +/// Document toModel() => Document( +/// id: id, +/// pageContent: content, +/// metadata: jsonDecode(metadata), +/// ); +/// } +/// ``` +/// +/// After defining the entity class, you will need to run the ObjectBox +/// generator: +/// +/// ```sh +/// dart run build_runner build --delete-conflicting-outputs +/// ``` +/// +/// Then, you just need to create your custom vector store class that +/// extends [BaseObjectBoxVectorStore] and wire everything up: +/// +/// ```dart +/// class MyCustomVectorStore extends BaseObjectBoxVectorStore { +/// MyCustomVectorStore({ +/// required super.embeddings, +/// required Store store, +/// }) : super( +/// box: store.box(), +/// createEntity: ( +/// String id, +/// String content, +/// String metadata, +/// List embedding, +/// ) => +/// MyDocumentEntity( +/// id: id, +/// content: content, +/// metadata: metadata, +/// embedding: embedding, +/// ), +/// createDocument: (MyDocumentEntity docDto) => docDto.toModel(), +/// getIdProperty: () => MyDocumentEntity_.id, +/// getEmbeddingProperty: () => MyDocumentEntity_.embedding, +/// ); +/// } +/// ``` +/// +/// Now you can use the [MyCustomVectorStore] class to store and search documents. /// {@endtemplate} class BaseObjectBoxVectorStore extends VectorStore { /// {@macro base_object_box_vector_store} @@ -87,8 +180,15 @@ class BaseObjectBoxVectorStore extends VectorStore { } @override - Future delete({required final List ids}) async { - _box.query(_getIdProperty().oneOf(ids)).build().remove(); + Future delete({required final List ids}) { + return _box.query(_getIdProperty().oneOf(ids)).build().removeAsync(); + } + + /// Delete by condition. + /// + /// - [condition] is the condition to delete by. + Future deleteWhere(final Condition condition) { + return _box.query(condition).build().removeAsync(); } @override diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart index 94457e54..5889b920 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -26,7 +26,7 @@ import 'types.dart'; /// /// This vector stores creates a [Store] with an [ObjectBoxDocument] entity /// that persists LangChain [Document]s along with their embeddings. If you -/// need more control over the entity, you can use the +/// need more control over the entity or the storeo, you can use the /// [BaseObjectBoxVectorStore] class instead. /// /// See documentation for more details: diff --git a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart index 740a06d7..fdce5a1b 100644 --- a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart +++ b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart @@ -135,6 +135,37 @@ void main() async { ); expect(res2.length, 0); }); + + test('Test delete where', () async { + await vectorStore.addDocuments( + documents: [ + const Document( + id: '9999', + pageContent: 'This document will be deleted', + metadata: {'cat': 'xxx'}, + ), + ], + ); + final res1 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res1.length, 1); + expect(res1.first.id, '9999'); + + await vectorStore.deleteWhere( + ObjectBoxDocumentProps.metadata.contains('xxx'), + ); + final res2 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res2.length, 0); + }); }); group('ObjectBoxSimilaritySearch', () { diff --git a/packages/langchain_core/lib/src/vector_stores/base.dart b/packages/langchain_core/lib/src/vector_stores/base.dart index 9ef54df3..3a5ecb51 100644 --- a/packages/langchain_core/lib/src/vector_stores/base.dart +++ b/packages/langchain_core/lib/src/vector_stores/base.dart @@ -45,8 +45,6 @@ abstract class VectorStore { /// Delete by vector ID. /// /// - [ids] is a list of ids to delete. - /// - /// Returns true if the delete was successful. Future delete({required final List ids}); /// Returns docs most similar to query using specified search type. From 4fc0e7a5766ca2c56a74cfa1a366499a8ff43b9e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 22:46:03 +0200 Subject: [PATCH 237/251] refactor: Add stubs for ObjectBox on web platform (#553) --- .../objectbox/base_objectbox_stub.dart | 40 ++++++++++++++ .../lib/src/vector_stores/objectbox/ob.dart | 7 +++ .../src/vector_stores/objectbox/ob_io.dart | 3 ++ .../src/vector_stores/objectbox/ob_stub.dart | 3 ++ .../objectbox/objectbox_stub.dart | 53 +++++++++++++++++++ .../vector_stores/objectbox/types_stub.dart | 11 ++++ .../lib/src/vector_stores/vector_stores.dart | 5 +- 7 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart new file mode 100644 index 00000000..308e7da0 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart @@ -0,0 +1,40 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'package:langchain_core/documents.dart'; +import 'package:langchain_core/vector_stores.dart'; + +// This is a stub class +class BaseObjectBoxVectorStore extends VectorStore { + BaseObjectBoxVectorStore({ + required super.embeddings, + required final Object? box, + required final Object? createEntity, + required final Object? createDocument, + required final Object? getIdProperty, + required final Object? getEmbeddingProperty, + }); + + @override + Future> addVectors({ + required List> vectors, + required List documents, + }) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + @override + Future delete({required List ids}) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + Future deleteWhere(final Object condition) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + @override + Future> similaritySearchByVectorWithScores({ + required List embedding, + VectorStoreSimilaritySearch config = const VectorStoreSimilaritySearch(), + }) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart new file mode 100644 index 00000000..63b1f86d --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart @@ -0,0 +1,7 @@ +export 'ob_io.dart' if (dart.library.js_interop) 'ob_stub.dart' + show + BaseObjectBoxVectorStore, + ObjectBoxDocument, + ObjectBoxDocumentProps, + ObjectBoxSimilaritySearch, + ObjectBoxVectorStore; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart new file mode 100644 index 00000000..db6546e3 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart @@ -0,0 +1,3 @@ +export 'base_objectbox.dart'; +export 'objectbox.dart'; +export 'types.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart new file mode 100644 index 00000000..87329806 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart @@ -0,0 +1,3 @@ +export 'base_objectbox_stub.dart'; +export 'objectbox_stub.dart'; +export 'types_stub.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart new file mode 100644 index 00000000..7763f9cf --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart @@ -0,0 +1,53 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'base_objectbox_stub.dart'; + +// This is a stub class +class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { + ObjectBoxVectorStore({ + required super.embeddings, + required int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) : super( + box: null, + createEntity: null, + createDocument: null, + getIdProperty: null, + getEmbeddingProperty: null, + ); + + void close() { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } +} + +// This is a stub class +class ObjectBoxDocument { + ObjectBoxDocument( + this.internalId, + this.id, + this.content, + this.metadata, + this.embedding, + ); + + int internalId = 0; + String id; + String content; + String metadata; + List embedding; +} + +// This is a stub class +class ObjectBoxDocumentProps { + static const internalId = null; + static const id = null; + static const content = null; + static const metadata = null; + static const embedding = null; +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart new file mode 100644 index 00000000..4b1aa144 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart @@ -0,0 +1,11 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'package:langchain_core/vector_stores.dart'; + +// This is a stub class +class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { + ObjectBoxSimilaritySearch({ + super.k = 0, + super.scoreThreshold, + Object? filterCondition, + }) : super(filter: null); +} diff --git a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart index 753d8168..d9da952b 100644 --- a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart +++ b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart @@ -1,4 +1 @@ -export 'objectbox/base_objectbox.dart' show BaseObjectBoxVectorStore; -export 'objectbox/objectbox.dart' - show ObjectBoxDocument, ObjectBoxDocumentProps, ObjectBoxVectorStore; -export 'objectbox/types.dart' show ObjectBoxSimilaritySearch; +export 'objectbox/ob.dart'; From 364842c8236539f81cb74b5dbda05282f0b859a5 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 22:50:46 +0200 Subject: [PATCH 238/251] feat: Update Ollama default model to llama-3.2 (#554) --- docs/expression_language/primitives/router.md | 6 ++-- .../modules/agents/agent_types/tools_agent.md | 4 +-- .../models/chat_models/integrations/ollama.md | 28 +++++++++---------- .../models/llms/integrations/ollama.md | 6 ++-- .../text_embedding/integrations/ollama.md | 2 +- .../vector_stores/integrations/objectbox.md | 2 +- .../expression_language/cookbook/routing.dart | 6 ++-- .../agents/agent_types/tools_agent.dart | 2 +- .../chat_models/integrations/ollama.dart | 14 +++++----- .../models/llms/integrations/ollama.dart | 4 +-- .../vector_stores/integrations/objectbox.dart | 2 +- .../lib/home/bloc/providers.dart | 2 +- examples/wikivoyage_eu/README.md | 6 ++-- examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 2 +- examples/wikivoyage_eu/pubspec.yaml | 2 +- packages/langchain/README.md | 2 +- .../vector_stores/objectbox/objectbox.dart | 2 +- .../chat_models/chat_ollama/chat_ollama.dart | 10 +++---- .../lib/src/embeddings/ollama_embeddings.dart | 6 ++-- .../langchain_ollama/lib/src/llms/ollama.dart | 10 +++---- packages/langchain_ollama/pubspec.yaml | 2 +- .../test/chat_models/chat_ollama_test.dart | 2 +- .../test/embeddings/ollama_test.dart | 2 +- .../test/llms/ollama_test.dart | 2 +- packages/ollama_dart/README.md | 6 ++-- .../example/ollama_dart_example.dart | 10 +++---- packages/ollama_dart/oas/ollama-curated.yaml | 20 ++++++------- packages/ollama_dart/pubspec.yaml | 2 +- .../test/ollama_dart_chat_test.dart | 2 +- 29 files changed, 83 insertions(+), 83 deletions(-) diff --git a/docs/expression_language/primitives/router.md b/docs/expression_language/primitives/router.md index effd5f66..da5a59c6 100644 --- a/docs/expression_language/primitives/router.md +++ b/docs/expression_language/primitives/router.md @@ -12,7 +12,7 @@ First, let’s create a chain that will identify incoming questions as being abo ```dart final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -131,7 +131,7 @@ Here is a question: {query} '''; -final embeddings = OllamaEmbeddings(model: 'llama3.1'); +final embeddings = OllamaEmbeddings(model: 'llama3.2'); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( promptTemplates.map((final pt) => Document(pageContent: pt)).toList(), @@ -146,7 +146,7 @@ final chain = Runnable.fromMap({'query': Runnable.passthrough()}) | return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ) | StringOutputParser(); diff --git a/docs/modules/agents/agent_types/tools_agent.md b/docs/modules/agents/agent_types/tools_agent.md index 7c0c9de8..45a14352 100644 --- a/docs/modules/agents/agent_types/tools_agent.md +++ b/docs/modules/agents/agent_types/tools_agent.md @@ -9,7 +9,7 @@ You can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatO ## Usage -In the following example, we use `ChatOllama` with the `llama3.1` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. +In the following example, we use `ChatOllama` with the `llama3.2` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. ```dart import 'package:langchain/langchain.dart'; @@ -20,7 +20,7 @@ import 'package:langchain_ollama/langchain_ollama.dart'; final llm = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index e6cc5907..f612616e 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -2,7 +2,7 @@ Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. -Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. +Ollama allows you to run open-source large language models, such as Llama 3.2 or Gemma 2, locally. Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. @@ -12,13 +12,13 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.1` + * e.g., for Llama 3: `ollama pull llama3.2` 3. Instantiate the `ChatOllama` class with the downloaded model. ```dart final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); ``` @@ -33,7 +33,7 @@ By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( baseUrl: 'https://your-remote-server-where-ollama-is-running.com', - model: 'llama3.1', + model: 'llama3.2', ), ); ``` @@ -48,7 +48,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -75,7 +75,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -123,8 +123,8 @@ print(res.output.content); **Notes:** - Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. - Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). -- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.2` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.2:70b` or `llama3.2:405b`. ```dart const tool = ToolSpec( @@ -144,7 +144,7 @@ const tool = ToolSpec( final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [tool], ), @@ -173,7 +173,7 @@ If you want to customize how the model should respond to tool calls, you can use ```dart final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [tool], toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), @@ -194,7 +194,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, format: OllamaResponseFormat.json, ), @@ -284,7 +284,7 @@ const getFlightTimesTool = ToolSpec( final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [getFlightTimesTool], ), @@ -370,7 +370,7 @@ const tool = ToolSpec( final model = ChatOllama( defaultOptions: ChatOllamaOptions( options: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), tools: [tool], @@ -436,7 +436,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever( defaultOptions: VectorStoreRetrieverOptions( diff --git a/docs/modules/model_io/models/llms/integrations/ollama.md b/docs/modules/model_io/models/llms/integrations/ollama.md index c139e7d9..25f6806e 100644 --- a/docs/modules/model_io/models/llms/integrations/ollama.md +++ b/docs/modules/model_io/models/llms/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.1` + * e.g., for Llama 3: `ollama pull llama3.2` ## Usage @@ -26,7 +26,7 @@ final prompt = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); final chain = prompt | llm | StringOutputParser(); @@ -43,7 +43,7 @@ final promptTemplate = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); final chain = promptTemplate | llm | StringOutputParser(); diff --git a/docs/modules/retrieval/text_embedding/integrations/ollama.md b/docs/modules/retrieval/text_embedding/integrations/ollama.md index fc83bbb5..b13ddd28 100644 --- a/docs/modules/retrieval/text_embedding/integrations/ollama.md +++ b/docs/modules/retrieval/text_embedding/integrations/ollama.md @@ -1,7 +1,7 @@ # OllamaEmbeddings ```dart -final embeddings = OllamaEmbeddings(model: 'llama3.1'); +final embeddings = OllamaEmbeddings(model: 'llama3.2'); const text = 'This is a test document.'; final res = await embeddings.embedQuery(text); final res = await embeddings.embedDocuments([text]); diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index ee035833..08a07bc2 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -213,7 +213,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/routing.dart b/examples/docs_examples/bin/expression_language/cookbook/routing.dart index 79bbd348..1f2232a0 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/routing.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/routing.dart @@ -9,7 +9,7 @@ void main(final List arguments) async { Future _runnableRouter() async { final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -114,7 +114,7 @@ Here is a question: '''; final embeddings = OllamaEmbeddings( - model: 'llama3.1', + model: 'llama3.2', ); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( @@ -132,7 +132,7 @@ Here is a question: return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ) | const StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart index 7554d8d4..b934c0a7 100644 --- a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart +++ b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart @@ -15,7 +15,7 @@ void main() async { Future _toolsAgent() async { final llm = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 2d66b367..3473a738 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -27,7 +27,7 @@ Future _chatOllama() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -54,7 +54,7 @@ Future _chatOllamaStreaming() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -109,7 +109,7 @@ Future _chatOllamaToolCalling() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [tool], ), @@ -143,7 +143,7 @@ Future _chatOllamaJsonMode() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, format: OllamaResponseFormat.json, ), @@ -197,7 +197,7 @@ Future _extraction() async { final model = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: const [tool], toolChoice: ChatToolChoice.forced(name: tool.name), @@ -300,7 +300,7 @@ Future _flights() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [getFlightTimesTool], ), @@ -370,7 +370,7 @@ Future _rag() async { // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever( defaultOptions: const VectorStoreRetrieverOptions( diff --git a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart index eb019a6b..aae53fa7 100644 --- a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart @@ -13,7 +13,7 @@ Future _ollama() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); @@ -29,7 +29,7 @@ Future _ollamaStreaming() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); const stringOutputParser = StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart index 6c66d5dc..92d419c9 100644 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -66,7 +66,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart index 1445bec3..4d9c364b 100644 --- a/examples/hello_world_flutter/lib/home/bloc/providers.dart +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -21,7 +21,7 @@ enum Provider { ), ollama( name: 'Ollama', - defaultModel: 'llama3.1', + defaultModel: 'llama3.2', defaultBaseUrl: 'http://localhost:11434/api', isRemote: false, ); diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md index cc573899..74bbd8e2 100644 --- a/examples/wikivoyage_eu/README.md +++ b/examples/wikivoyage_eu/README.md @@ -17,11 +17,11 @@ This example demonstrates how to build a fully local Retrieval Augmented Generat - For this example we will be using the following models: * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) - * LLM: [`llama3.1`](https://ollama.com/library/llama3.1) + * LLM: [`llama3.2`](https://ollama.com/library/llama3.2) - Open your terminal and run: ```bash ollama pull jina/jina-embeddings-v2-small-en -ollama run llama3.1 +ollama run llama3.2 ``` ### 3. Setup ObjectBox @@ -73,7 +73,7 @@ The chatbot script implements the RAG pipeline. It does the following: 2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. 3. Retrieves the 5 most similar documents from the ObjectBox database. 4. Builds a prompt using the retrieved documents and the query. -5. Uses the `llama3.1` model to generate a response to the prompt. +5. Uses the `llama3.2` model to generate a response to the prompt. You can run the script using: ```bash diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart index 8123c262..9fb076eb 100644 --- a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -51,7 +51,7 @@ Do not provide any other suggestion if the question is not about Europe. final model = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); const outputParser = StringOutputParser(); diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 4d7ddfc5..a95f4820 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -1,5 +1,5 @@ name: wikivoyage_eu -description: Wikivoyage EU chatbot using llama3.1 and ObjectBox. +description: Wikivoyage EU chatbot using llama3.2 and ObjectBox. version: 1.0.0 publish_to: none diff --git a/packages/langchain/README.md b/packages/langchain/README.md index e93bfdd1..7443cae5 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -72,7 +72,7 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | | [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.1, Gemma 2, Phi-3, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | | [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart index 5889b920..22ddeee4 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. /// /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); /// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); /// ``` /// diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 4b0e9c75..190170d6 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -12,7 +12,7 @@ import 'types.dart'; /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, -/// such as Llama 3.1, Gemma 2 or LLaVA, locally. +/// such as Llama 3.2, Gemma 2 or LLaVA, locally. /// /// For a complete list of supported models and model variants, see the /// [Ollama model library](https://ollama.ai/library). @@ -34,7 +34,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3.1` +/// * e.g., for Llama 3: `ollama pull llama3.2` /// /// ### Ollama base URL /// @@ -55,7 +55,7 @@ import 'types.dart'; /// ```dart /// final chatModel = ChatOllama( /// defaultOptions: const ChatOllamaOptions( -/// model: 'llama3.1', +/// model: 'llama3.2', /// temperature: 0, /// format: 'json', /// ), @@ -87,7 +87,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.1')) | outputParser, +/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.2')) | outputParser, /// 'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -176,7 +176,7 @@ class ChatOllama extends BaseChatModel { String get modelType => 'chat-ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3.1'; + static const defaultModel = 'llama3.2'; @override Future invoke( diff --git a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart index bd40cf60..ffef2882 100644 --- a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart +++ b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart @@ -13,7 +13,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// Example: /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); /// final res = await embeddings.embedQuery('Hello world'); /// ``` /// @@ -23,7 +23,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3.1` +/// * e.g., for `Llama-7b`: `ollama pull llama3.2` /// /// ### Advance /// @@ -76,7 +76,7 @@ class OllamaEmbeddings implements Embeddings { /// - `client`: the HTTP client to use. You can set your own HTTP client if /// you need further customization (e.g. to use a Socks5 proxy). OllamaEmbeddings({ - this.model = 'llama3.1', + this.model = 'llama3.2', this.keepAlive, final String baseUrl = 'http://localhost:11434/api', final Map? headers, diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index db352184..9be8ed12 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOption: const OllamaOptions( -/// model: 'llama3.1', +/// model: 'llama3.2', /// temperature: 1, /// ), /// ); @@ -49,7 +49,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOptions: const OllamaOptions( -/// model: 'llama3.1', +/// model: 'llama3.2', /// temperature: 0, /// format: 'json', /// ), @@ -83,7 +83,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.1')) | outputParser, +/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.2')) | outputParser, /// 'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -93,7 +93,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3.1` +/// * e.g., for `Llama-7b`: `ollama pull llama3.2` /// /// ### Advance /// @@ -178,7 +178,7 @@ class Ollama extends BaseLLM { String get modelType => 'ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3.1'; + static const defaultModel = 'llama3.2'; @override Future invoke( diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 51c98cd9..f6e9e066 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_ollama -description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). +description: LangChain.dart integration module for Ollama (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index fcceacdb..9aac4640 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -13,7 +13,7 @@ import 'package:test/test.dart'; void main() { group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () { late ChatOllama chatModel; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; const visionModel = 'llava:latest'; setUp(() async { diff --git a/packages/langchain_ollama/test/embeddings/ollama_test.dart b/packages/langchain_ollama/test/embeddings/ollama_test.dart index ac8f999e..5363d47c 100644 --- a/packages/langchain_ollama/test/embeddings/ollama_test.dart +++ b/packages/langchain_ollama/test/embeddings/ollama_test.dart @@ -8,7 +8,7 @@ void main() { group('OllamaEmbeddings tests', skip: Platform.environment.containsKey('CI'), () { late OllamaEmbeddings embeddings; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; setUp(() async { embeddings = OllamaEmbeddings( diff --git a/packages/langchain_ollama/test/llms/ollama_test.dart b/packages/langchain_ollama/test/llms/ollama_test.dart index d21d0e56..7426b0c6 100644 --- a/packages/langchain_ollama/test/llms/ollama_test.dart +++ b/packages/langchain_ollama/test/llms/ollama_test.dart @@ -10,7 +10,7 @@ import 'package:test/test.dart'; void main() { group('Ollama tests', skip: Platform.environment.containsKey('CI'), () { late Ollama llm; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; setUp(() async { llm = Ollama( diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 46ad88a3..dc637664 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -153,7 +153,7 @@ Tool calling allows a model to respond to a given prompt by generating output th **Notes:** - Tool calling requires Ollama 0.2.8 or newer. - Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2)). ```dart const tool = Tool( @@ -185,7 +185,7 @@ const userMsg = Message( final res1 = await client.generateChatCompletion( request: GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [userMsg], tools: [tool], ), @@ -211,7 +211,7 @@ const toolResult = '{"location": "Barcelona, ES", "temperature": 20, "unit": "ce // Submit the response of the tool call to the model final res2 = await client.generateChatCompletion( request: GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ userMsg, res1.message, diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index b1e9361f..53dc2abf 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -70,7 +70,7 @@ Future _generateCompletionStream(final OllamaClient client) async { Future _generateChatCompletion(final OllamaClient client) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ Message( role: MessageRole.system, @@ -95,7 +95,7 @@ Future _generateChatCompletionWithHistory( ) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ Message( role: MessageRole.user, @@ -118,7 +118,7 @@ Future _generateChatCompletionWithHistory( Future _generateChatCompletionStream(final OllamaClient client) async { final stream = client.generateChatCompletionStream( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ Message( role: MessageRole.system, @@ -168,7 +168,7 @@ Future _generateChatToolCalling(final OllamaClient client) async { final res1 = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [userMsg], tools: [tool], keepAlive: 1, @@ -196,7 +196,7 @@ Future _generateChatToolCalling(final OllamaClient client) async { // Submit the response of the tool call to the model final res2 = await client.generateChatCompletion( request: GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ userMsg, res1.message, diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 0939dfb3..05b3f593 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -273,7 +273,7 @@ components: The model name. Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - example: llama3.1 + example: llama3.2 prompt: type: string description: The prompt to generate a response. @@ -539,7 +539,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 created_at: type: string format: date-time @@ -596,7 +596,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 messages: type: array description: The messages of the chat, this can be used to keep a chat memory @@ -628,7 +628,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 created_at: type: string format: date-time @@ -769,7 +769,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 prompt: type: string description: Text to generate embeddings for. @@ -846,7 +846,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 modified_at: type: string format: date-time @@ -923,7 +923,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 size: type: integer format: int64 @@ -951,7 +951,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 required: - model ModelInfo: @@ -999,7 +999,7 @@ components: source: type: string description: Name of the model to copy. - example: llama3.1 + example: llama3.2 destination: type: string description: Name of the new model. @@ -1024,7 +1024,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 insecure: type: boolean description: | diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 66bea8fc..0bbd9916 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: ollama_dart -description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). +description: Dart Client for the Ollama API (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index 3ed66209..3e8afd82 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,7 +7,7 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; const visionModel = 'llava'; setUp(() async { From 8ffbe8c722b16ba5b924ea254aa57f5d7154c9c1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 23:24:12 +0200 Subject: [PATCH 239/251] feat: Add OpenAI o1-preview and o1-mini to model catalog (#555) --- packages/langchain/README.md | 22 +- .../lib/src/chat_models/types.dart | 5 + packages/langchain_openai/pubspec.yaml | 2 +- .../schema/create_assistant_request.dart | 12 + .../create_chat_completion_request.dart | 10 + .../generated/schema/create_run_request.dart | 12 + .../schema/create_thread_and_run_request.dart | 12 + .../lib/src/generated/schema/schema.g.dart | 19 + packages/openai_dart/oas/openapi_curated.yaml | 27 +- .../openai_dart/oas/openapi_official.yaml | 13260 +++++++++------- packages/openai_dart/pubspec.yaml | 2 +- 11 files changed, 7983 insertions(+), 5400 deletions(-) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 7443cae5..d01e9ccd 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -65,17 +65,17 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack > Depend on an integration-specific package if you want to use the specific integration. -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-4o, o1, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration |

    diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 6713a56f..3173e293 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -23,6 +23,7 @@ import 'package:meta/meta.dart'; /// - `gpt-4o` /// - `gpt-4o-2024-05-13` /// - `gpt-4o-2024-08-06` +/// - `gpt-4o-2024-08-06` /// - `gpt-4o-mini` /// - `gpt-4o-mini-2024-07-18` /// - `gpt-3.5-turbo` @@ -32,6 +33,10 @@ import 'package:meta/meta.dart'; /// - `gpt-3.5-turbo-0301` /// - `gpt-3.5-turbo-0613` /// - `gpt-3.5-turbo-1106` +/// - `o1-mini` +/// - `o1-mini-2024-09-12` +/// - `o1-preview` +/// - `o1-preview-2024-09-12` /// /// Mind that the list may be outdated. /// See https://platform.openai.com/docs/models for the latest list. diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index c367ac51..37c0b6ca 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_openai -description: LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). +description: LangChain.dart integration module for OpenAI (GPT-4o, o1, Embeddings, DALL·E, etc.). version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index f078f394..cb7f5b82 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -153,6 +153,8 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum AssistantModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -183,6 +185,8 @@ enum AssistantModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -201,6 +205,14 @@ enum AssistantModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 0b6a5920..47280735 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -309,6 +309,8 @@ enum ChatCompletionModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -327,6 +329,14 @@ enum ChatCompletionModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 485869d0..6fe86422 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -191,6 +191,8 @@ class CreateRunRequest with _$CreateRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum RunModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -221,6 +223,8 @@ enum RunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -239,6 +243,14 @@ enum RunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index e52be3e1..1d9c82ee 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -190,6 +190,8 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum ThreadAndRunModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -220,6 +222,8 @@ enum ThreadAndRunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -238,6 +242,14 @@ enum ThreadAndRunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 3ffb5c36..092425e5 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -439,6 +439,10 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ChatCompletionModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ChatCompletionModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + ChatCompletionModels.o1Mini: 'o1-mini', + ChatCompletionModels.o1Mini20240912: 'o1-mini-2024-09-12', + ChatCompletionModels.o1Preview: 'o1-preview', + ChatCompletionModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ChatCompletionModelStringImpl _$$ChatCompletionModelStringImplFromJson( @@ -2306,6 +2310,7 @@ Map _$$AssistantModelEnumerationImplToJson( }; const _$AssistantModelsEnumMap = { + AssistantModels.chatgpt4oLatest: 'chatgpt-4o-latest', AssistantModels.gpt4: 'gpt-4', AssistantModels.gpt432k: 'gpt-4-32k', AssistantModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2330,6 +2335,10 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', AssistantModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', AssistantModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + AssistantModels.o1Mini: 'o1-mini', + AssistantModels.o1Mini20240912: 'o1-mini-2024-09-12', + AssistantModels.o1Preview: 'o1-preview', + AssistantModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$AssistantModelStringImpl _$$AssistantModelStringImplFromJson( @@ -2969,6 +2978,7 @@ Map _$$CreateRunRequestModelEnumerationImplToJson( }; const _$RunModelsEnumMap = { + RunModels.chatgpt4oLatest: 'chatgpt-4o-latest', RunModels.gpt4: 'gpt-4', RunModels.gpt432k: 'gpt-4-32k', RunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2993,6 +3003,10 @@ const _$RunModelsEnumMap = { RunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', RunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', RunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + RunModels.o1Mini: 'o1-mini', + RunModels.o1Mini20240912: 'o1-mini-2024-09-12', + RunModels.o1Preview: 'o1-preview', + RunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$CreateRunRequestModelStringImpl _$$CreateRunRequestModelStringImplFromJson( @@ -3292,6 +3306,7 @@ Map _$$ThreadAndRunModelEnumerationImplToJson( }; const _$ThreadAndRunModelsEnumMap = { + ThreadAndRunModels.chatgpt4oLatest: 'chatgpt-4o-latest', ThreadAndRunModels.gpt4: 'gpt-4', ThreadAndRunModels.gpt432k: 'gpt-4-32k', ThreadAndRunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -3316,6 +3331,10 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ThreadAndRunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ThreadAndRunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + ThreadAndRunModels.o1Mini: 'o1-mini', + ThreadAndRunModels.o1Mini20240912: 'o1-mini-2024-09-12', + ThreadAndRunModels.o1Preview: 'o1-preview', + ThreadAndRunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ThreadAndRunModelStringImpl _$$ThreadAndRunModelStringImplFromJson( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 4fc465f5..793e696e 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1842,6 +1842,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -1851,6 +1852,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] messages: description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @@ -3660,6 +3665,7 @@ components: Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -3675,6 +3681,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -3684,6 +3691,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] name: description: *assistant_name_param_description @@ -4216,6 +4227,7 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4231,6 +4243,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4240,6 +4253,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] instructions: description: Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -4454,6 +4471,7 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4469,6 +4487,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4478,6 +4497,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] instructions: description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -5719,7 +5742,7 @@ components: $ref: "#/components/schemas/FileSearchRanker" score_threshold: type: number - description: | + description: | The score threshold for the file search. All values must be a floating point number between 0 and 1. minimum: 0 maximum: 1 @@ -5743,7 +5766,7 @@ components: maximum: 1 content: type: array - description: | + description: | The content of the result that was found. The content is only included if requested via the include query parameter. items: diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index de7cd98a..96e64e32 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -37,6 +37,8 @@ tags: description: List and describe the various models available in the API. - name: Moderations description: Given a input text, outputs if the model classifies it as potentially harmful. + - name: Audit Logs + description: List user actions and configuration changes within this organization. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -177,7 +179,9 @@ paths: {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + } }, ], } @@ -201,9 +205,10 @@ paths: { type: "text", text: "What's in this image?" }, { type: "image_url", - image_url: - "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - }, + image_url: { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + } ], }, ], @@ -2766,7 +2771,7 @@ paths: response: &moderation_example | { "id": "modr-XXXXX", - "model": "text-moderation-005", + "model": "text-moderation-007", "results": [ { "flagged": true, @@ -7254,169 +7259,2450 @@ paths: } } -components: - securitySchemes: - ApiKeyAuth: - type: http - scheme: "bearer" + # Organization + # Audit Logs List + /organization/audit_logs: + get: + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs + tags: + - Audit Logs + parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this range. + required: false + schema: + type: object + properties: + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. + - name: project_ids[] + in: query + description: Return only events for these projects. + required: false + schema: + type: array + items: + type: string + - name: event_types[] + in: query + description: Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object). + required: false + schema: + type: array + items: + $ref: "#/components/schemas/AuditLogEventType" + - name: actor_ids[] + in: query + description: Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID. + required: false + schema: + type: array + items: + type: string + - name: actor_emails[] + in: query + description: Return only events performed by users with these emails. + required: false + schema: + type: array + items: + type: string + - name: resource_ids[] + in: query + description: Return only events performed on these targets. For example, a project ID updated. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: Audit logs listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListAuditLogsResponse" + x-oaiMeta: + name: List audit logs + group: audit-logs + returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/audit_logs \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + response: | + { + "object": "list", + "data": [ + { + "id": "audit_log-xxx_yyyymmdd", + "type": "project.archived", + "effective_at": 1722461446, + "actor": { + "type": "api_key", + "api_key": { + "type": "user", + "user": { + "id": "user-xxx", + "email": "user@example.com" + } + } + }, + "project.archived": { + "id": "proj_abc" + }, + }, + { + "id": "audit_log-yyy__20240101", + "type": "api_key.updated", + "effective_at": 1720804190, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.updated": { + "id": "key_xxxx", + "data": { + "scopes": ["resource_2.operation_2"] + } + }, + } + ], + "first_id": "audit_log-xxx__20240101", + "last_id": "audit_log_yyy__20240101", + "has_more": true + } + /organization/invites: + get: + summary: Returns a list of invites in the organization. + operationId: list-invites + tags: + - Invites + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Invites listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteListResponse' + x-oaiMeta: + name: List invites + group: administration + returns: A list of [Invite](/docs/api-reference/invite/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + ], + "first_id": "invite-abc", + "last_id": "invite-abc", + "has_more": false + } - schemas: - Error: - type: object - properties: - code: - type: string - nullable: true - message: - type: string - nullable: false - param: - type: string - nullable: true - type: - type: string - nullable: false - required: - - type - - message - - param - - code - ErrorResponse: - type: object - properties: - error: - $ref: "#/components/schemas/Error" - required: - - error + post: + summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. + operationId: inviteUser + tags: + - Invites + requestBody: + description: The invite request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InviteRequest' + responses: + "200": + description: User invited successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Create invite + group: administration + returns: The created [Invite](/docs/api-reference/invite/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/invites \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "role": "owner" + }' + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": null + } - ListModelsResponse: - type: object - properties: - object: - type: string - enum: [ list ] - data: - type: array - items: - $ref: "#/components/schemas/Model" - required: - - object - - data - DeleteModelResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - required: - - id - - object - - deleted + /organization/invites/{invite_id}: + get: + summary: Retrieves an invite. + operationId: retrieve-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to retrieve. + responses: + "200": + description: Invite retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Retrieve invite + group: administration + returns: The [Invite](/docs/api-reference/invite/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + delete: + summary: Delete an invite. If the invite has already been accepted, it cannot be deleted. + operationId: delete-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to delete. + responses: + "200": + description: Invite deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteDeleteResponse' + x-oaiMeta: + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite.deleted", + "id": "invite-abc", + "deleted": true + } - CreateCompletionRequest: - type: object - properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true - oneOf: - - type: string - default: "" - example: "This is a test." + /organization/users: + get: + summary: Lists all of the users in the organization. + operationId: list-users + tags: + - Users + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserListResponse' + x-oaiMeta: + name: List users + group: administration + returns: A list of [User](/docs/api-reference/users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + + /organization/users/{user_id}: + get: + summary: Retrieves a user by their identifier. + operationId: retrieve-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Retrieve user + group: administration + returns: The [User](/docs/api-reference/users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + post: + summary: Modifies a user's role in the organization. + operationId: modify-user + tags: + - Users + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserRoleUpdateRequest' + responses: + "200": + description: User role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Modify user + group: administration + returns: The updated [User](/docs/api-reference/users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + delete: + summary: Deletes a user from the organization. + operationId: delete-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserDeleteResponse' + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user.deleted", + "id": "user_abc", + "deleted": true + } + /organization/projects: + get: + summary: Returns a list of projects. + operationId: list-projects + tags: + - Projects + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + - name: include_archived + in: query + schema: + type: boolean + default: false + description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + responses: + "200": + description: Projects listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectListResponse' + x-oaiMeta: + name: List projects + group: administration + returns: A list of [Project](/docs/api-reference/projects/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ], + "first_id": "proj-abc", + "last_id": "proj-xyz", + "has_more": false + } + + post: + summary: Create a new project in the organization. Projects can be created and archived, but cannot be deleted. + operationId: create-project + tags: + - Projects + requestBody: + description: The project create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectCreateRequest' + responses: + "200": + description: Project created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Create project + group: administration + returns: The created [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project ABC", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + }' + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project ABC", + "created_at": 1711471533, + "archived_at": null, + "status": "active", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + } + + /organization/projects/{project_id}: + get: + summary: Retrieves a project. + operationId: retrieve-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: The [Project](/docs/api-reference/projects/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + + post: + summary: Modifies a project in the organization. + operationId: modify-project + tags: + - Projects + requestBody: + description: The project update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpdateRequest' + responses: + "200": + description: Project updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + "400": + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project + group: administration + returns: The updated [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project DEF", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + }' + + /organization/projects/{project_id}/archive: + post: + summary: Archives a project in the organization. Archived projects cannot be used or updated. + operationId: archive-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project archived successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Archive project + group: administration + returns: The archived [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project DEF", + "created_at": 1711471533, + "archived_at": 1711471533, + "status": "archived" + } + + + /organization/projects/{project_id}/users: + get: + summary: Returns a list of users in the project. + operationId: list-project-users + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project users + group: administration + returns: A list of [ProjectUser](/docs/api-reference/project-users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + post: + summary: Adds a user to the project. Users must already be members of the organization to be added to a project. + operationId: create-project-user + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + tags: + - Projects + requestBody: + description: The project user create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserCreateRequest' + responses: + "200": + description: User added to project successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project user + group: administration + returns: The created [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "user_id": "user_abc", + "role": "member" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + /organization/projects/{project_id}/users/{user_id}: + get: + summary: Retrieves a user in the project. + operationId: retrieve-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + x-oaiMeta: + name: Retrieve project user + group: administration + returns: The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + post: + summary: Modifies a user's role in the project. + operationId: modify-project-user + tags: + - Projects + requestBody: + description: The project user update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserUpdateRequest' + responses: + "200": + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project user + group: administration + returns: The updated [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + delete: + summary: Deletes a user from the project. + operationId: delete-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project user + group: administration + returns: Confirmation that project has been deleted or an error in case of an archived project, which has no users + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user.deleted", + "id": "user_abc", + "deleted": true + } + + /organization/projects/{project_id}/service_accounts: + get: + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project service accounts listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project service accounts + group: administration + returns: A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ], + "first_id": "svc_acct_abc", + "last_id": "svc_acct_xyz", + "has_more": false + } + + post: + summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. + operationId: create-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project service account create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' + responses: + "200": + description: Project service account created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project service account + group: administration + returns: The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Production App" + }' + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Production App", + "role": "member", + "created_at": 1711471533, + "api_key": { + "object": "organization.project.service_account.api_key", + "value": "sk-abcdefghijklmnop123", + "name": "Secret Key", + "created_at": 1711471533, + "id": "key_abc" + } + } + + /organization/projects/{project_id}/service_accounts/{service_account_id}: + get: + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccount' + x-oaiMeta: + name: Retrieve project service account + group: administration + returns: The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + delete: + summary: Deletes a service account from the project. + operationId: delete-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' + x-oaiMeta: + name: Delete project service account + group: administration + returns: Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account.deleted", + "id": "svc_acct_abc", + "deleted": true + } + + /organization/projects/{project_id}/api_keys: + get: + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project API keys listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyListResponse' + + x-oaiMeta: + name: List project API keys + group: administration + returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + ], + "first_id": "key_abc", + "last_id": "key_xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + /organization/projects/{project_id}/api_keys/{key_id}: + get: + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKey' + x-oaiMeta: + name: Retrieve project API key + group: administration + returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + + delete: + summary: Deletes an API key from the project. + operationId: delete-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a service account + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key.deleted", + "id": "key_abc", + "deleted": true + } + error_response: + content: | + { + "code": 400, + "message": "API keys cannot be deleted for service accounts, please delete the service account" + } + +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + enum: [ list ] + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: [ "stop", "length", "content_filter" ] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: [ text_completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: [ "text" ] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: + type: string + enum: [ "image_url" ] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - url + required: + - type + - image_url + + ChatCompletionRequestMessageContentPartRefusal: + type: object + title: Refusal content part + properties: + type: + type: string + enum: [ "refusal" ] + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + required: + - type + - refusal + + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + oneOf: + - type: string + description: The contents of the system message. + title: Text content - type: array + description: An array of content parts with a defined type. For system messages, only type `text` is supported. + title: Array of content parts items: - type: string - default: "" - example: "This is a test." + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 + role: + type: string + enum: [ "system" ] + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: [ "user" ] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + nullable: true + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. + role: + type: string + enum: [ "assistant" ] + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + nullable: true + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: [ 0, 1 ] + description: "Controls whether the assistant message is trained against (0 or 1)" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: [ "tool" ] + description: The role of the messages author, in this case `tool`. + content: + oneOf: + - type: string + description: The contents of the tool message. + title: Text content - type: array - minItems: 1 + description: An array of content parts with a defined type. For tool messages, only type `text` is supported. + title: Array of content parts items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - best_of: + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: [ "function" ] + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + required: + - name + + ResponseFormatText: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `text`" + enum: [ "text" ] + required: + - type + + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: [ "json_object" ] + required: + - type + + ResponseFormatJsonSchemaSchema: + type: object + description: "The schema for the response format, described as a JSON Schema object." + additionalProperties: true + + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: 'The type of response format being defined: `json_schema`' + enum: [ 'json_schema' ] + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + enum: [ none, auto, required ] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + + ParallelToolCalls: + description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + type: boolean + default: true + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: + type: object + properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + required: + - id + - type + - function + + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - echo: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - index + + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionStreamOptions: + description: | + Options for streaming response. Only set this when you set `stream: true`. + type: object + nullable: true + default: null + properties: + include_usage: type: boolean - default: false + description: | + If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: [ "assistant" ] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + required: + - role + - content + - refusal + + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: [ "system", "user", "assistant", "tool" ] + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string frequency_penalty: type: number default: 0 minimum: -2 maximum: 2 nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - logit_bias: &completions_logit_bias + description: *completions_frequency_penalty_description + logit_bias: type: object x-oaiTypeLabel: map default: null nullable: true additionalProperties: type: integer - description: &completions_logit_bias_description | + description: | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - logprobs: &completions_logprobs_configuration + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 - maximum: 5 - default: null + maximum: 20 nullable: true - description: &completions_logprobs_description | - Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. type: integer - minimum: 0 - default: 16 - example: 16 nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: type: integer minimum: 1 @@ -7424,63 +9710,72 @@ components: default: 1 example: 1 nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 minimum: -2 maximum: 2 nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + description: *completions_presence_penalty_description + response_format: + description: | + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - seed: &completions_seed_param + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + seed: type: integer minimum: -9223372036854775808 maximum: 9223372036854775807 nullable: true description: | + This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + description: | + Up to 4 sequences where the API will stop generating further tokens. default: null - nullable: true oneOf: - type: string - default: <|endoftext|> - example: "\n" nullable: true - type: array minItems: 1 maxItems: 4 items: type: string - example: '["\n"]' stream: description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false stream_options: $ref: "#/components/schemas/ChatCompletionStreamOptions" - suffix: - description: | - The suffix that comes after a completion of inserted text. - - This parameter is only supported for `gpt-3.5-turbo-instruct`. - default: null - nullable: true - type: string - example: "test." temperature: type: number minimum: 0 @@ -7488,10 +9783,7 @@ components: default: 1 example: 1 nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + description: *completions_temperature_description top_p: type: number minimum: 0 @@ -7499,77 +9791,125 @@ components: default: 1 example: 1 nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + user: *end_user_param_configuration + function_call: + deprecated: true + description: | + Deprecated in favor of `tool_choice`. - We generally recommend altering this or `temperature` but not both. - user: &end_user_param_configuration - type: string - example: user-1234 + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [ none, auto ] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + required: - model - - prompt + - messages - CreateCompletionResponse: + CreateChatCompletionResponse: type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + description: Represents a chat completion response returned by model, based on the provided input. properties: id: type: string - description: A unique identifier for the completion. + description: A unique identifier for the chat completion. choices: type: array - description: The list of completion choices the model generated for the input prompt. + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. items: type: object required: - finish_reason - index + - message - logprobs - - text properties: finish_reason: type: string - description: &completion_finish_reason_description | + description: &chat_completion_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: [ "stop", "length", "content_filter" ] + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] index: type: integer - logprobs: + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. type: object nullable: true properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: + content: + description: A list of message content tokens with log probability information. type: array items: - type: string - top_logprobs: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + refusal: + description: A list of message refusal tokens with log probability information. type: array items: - type: object - additionalProperties: - type: number - text: - type: string + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + created: type: integer - description: The Unix timestamp (in seconds) of when the completion was created. + description: The Unix timestamp (in seconds) of when the chat completion was created. model: type: string - description: The model used for completion. + description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true system_fingerprint: type: string description: | @@ -7578,1611 +9918,1489 @@ components: Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string - description: The object type, which is always "text_completion" - enum: [ text_completion ] + description: The object type, which is always `chat.completion`. + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: - - id - - object + - choices - created + - id - model - - choices + - object x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-4-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } + name: The chat completion object + group: chat + example: *chat_completion_example - ChatCompletionRequestMessageContentPartText: + CreateChatCompletionFunctionResponse: type: object - title: Text content part + description: Represents a chat completion response returned by model, based on the provided input. properties: - type: + id: type: string - enum: [ "text" ] - description: The type of the content part. - text: + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: + [ "stop", "length", "function_call", "content_filter" ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: type: string - description: The text content. - required: - - type - - text - - ChatCompletionRequestMessageContentPartImage: - type: object - title: Image content part - properties: - type: + description: The model used for the chat completion. + system_fingerprint: type: string - enum: [ "image_url" ] - description: The type of the content part. - image_url: - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - type: string - description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - url + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [ chat.completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" required: - - type - - image_url + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example - ChatCompletionRequestMessageContentPartRefusal: + ChatCompletionTokenLogprob: type: object - title: Refusal content part properties: - type: - type: string - enum: [ "refusal" ] - description: The type of the content part. - refusal: + token: &chat_completion_response_logprobs_token + description: The token. type: string - description: The refusal message generated by the model. + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes required: - - type - - refusal - - ChatCompletionRequestMessage: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true - - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - x-oaiExpandable: true - - ChatCompletionRequestUserMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true - - ChatCompletionRequestAssistantMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" - x-oaiExpandable: true - - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - x-oaiExpandable: true + - token + - logprob + - bytes + - top_logprobs - ChatCompletionRequestSystemMessage: + ListPaginatedFineTuningJobsResponse: type: object - title: System message properties: - content: - description: The contents of the system message. - oneOf: - - type: string - description: The contents of the system message. - title: Text content - - type: array - description: An array of content parts with a defined type. For system messages, only type `text` is supported. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" - minItems: 1 - role: - type: string - enum: [ "system" ] - description: The role of the messages author, in this case `system`. - name: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + enum: [ list ] required: - - content - - role + - object + - data + - has_more - ChatCompletionRequestUserMessage: + CreateChatCompletionStreamResponse: type: object - title: User message + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. properties: - content: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array description: | - The contents of the user message. - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" - minItems: 1 - x-oaiExpandable: true - role: + A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the + last chunk if you set `stream_options: {"include_usage": true}`. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: type: string - enum: [ "user" ] - description: The role of the messages author, in this case `user`. - name: + description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role - - ChatCompletionRequestAssistantMessage: - type: object - title: Assistant message - properties: - content: - nullable: true - oneOf: - - type: string - description: The contents of the assistant message. - title: Text content - - type: array - description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" - minItems: 1 - description: | - The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - refusal: + enum: [ "scale", "default" ] + example: "scale" nullable: true + system_fingerprint: type: string - description: The refusal message by the assistant. - role: - type: string - enum: [ "assistant" ] - description: The role of the messages author, in this case `assistant`. - name: + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - function_call: + description: The object type, which is always `chat.completion.chunk`. + enum: [ chat.completion.chunk ] + usage: type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - nullable: true - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - required: - - role - - FineTuneChatCompletionRequestAssistantMessage: - allOf: - - type: object - title: Assistant message - deprecated: false + description: | + An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. + When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. properties: - weight: + completion_tokens: type: integer - enum: [ 0, 1 ] - description: "Controls whether the assistant message is trained against (0 or 1)" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens required: - - role + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example - ChatCompletionRequestToolMessage: + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: type: object - title: Tool message properties: - role: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. type: string - enum: [ "tool" ] - description: The role of the messages author, in this case `tool`. - content: - oneOf: + example: "A cute baby sea otter" + model: + anyOf: - type: string - description: The contents of the tool message. - title: Text content - - type: array - description: An array of content parts with a defined type. For tool messages, only type `text` is supported. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" - minItems: 1 - description: The contents of the tool message. - tool_call_id: + - type: string + enum: [ "dall-e-2", "dall-e-3" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: type: string - description: Tool call that this message is responding to. - required: - - role - - content - - tool_call_id - - ChatCompletionRequestFunctionMessage: - type: object - title: Function message - deprecated: true - properties: - role: + enum: [ "standard", "hd" ] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format type: string - enum: [ "function" ] - description: The role of the messages author, in this case `function`. - content: + enum: [ "url", "b64_json" ] + default: "url" + example: "url" nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + size: &images_size type: string - description: The contents of the function message. - name: + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: type: string - description: The name of the function to call. + enum: [ "vivid", "natural" ] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration required: - - role - - content - - name - - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - additionalProperties: true + - prompt - ChatCompletionFunctions: - type: object - deprecated: true + ImagesResponse: properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" required: - - name + - created + - data - ChatCompletionFunctionCallOption: + Image: type: object - description: > - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + description: Represents the url or the content of an image generated by the OpenAI API. properties: - name: + b64_json: type: string - description: The name of the function to call. - required: - - name + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } - ChatCompletionTool: + CreateImageEditRequest: type: object properties: - type: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - $ref: "#/components/schemas/FunctionObject" + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: [ "dall-e-2" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: [ "256x256", "512x512", "1024x1024" ] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration required: - - type - - function + - prompt + - image - FunctionObject: + CreateImageVariationRequest: type: object properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - strict: - type: boolean + format: binary + model: + anyOf: + - type: string + - type: string + enum: [ "dall-e-2" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" nullable: true - default: false - description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration required: - - name + - image - ResponseFormatText: + CreateModerationRequest: type: object properties: - type: - type: string - description: "The type of response format being defined: `text`" - enum: [ "text" ] + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: [ "text-moderation-latest", "text-moderation-stable" ] + x-oaiTypeLabel: string required: - - type + - input - ResponseFormatJsonObject: + CreateModerationResponse: type: object + description: Represents if a given text input is potentially harmful. properties: - type: + id: type: string - description: "The type of response format being defined: `json_object`" - enum: [ "json_object" ] - required: - - type - - ResponseFormatJsonSchemaSchema: - type: object - description: "The schema for the response format, described as a JSON Schema object." - additionalProperties: true - - ResponseFormatJsonSchema: - type: object - properties: - type: + description: The unique identifier for the moderation request. + model: type: string - description: 'The type of response format being defined: `json_schema`' - enum: [ 'json_schema' ] - json_schema: - type: object - properties: - description: - type: string - description: A description of what the response format is for, used by the model to determine how to respond in the format. - name: - type: string - description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - nullable: true - default: false - description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). - required: - - type - - name + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores required: - - type - - json_schema - - ChatCompletionToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools are present. - oneOf: - - type: string - description: > - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - enum: [ none, auto, required ] - - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" - x-oaiExpandable: true + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example - ChatCompletionNamedToolChoice: + ListFilesResponse: type: object - description: Specifies a tool the model should use. Use to force the model to call a specific function. properties: - type: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + object: type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + enum: [ list ] required: - - type - - function - - ParallelToolCalls: - description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. - type: boolean - default: true - - ChatCompletionMessageToolCalls: - type: array - description: The tool calls generated by the model, such as function calls. - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCall" + - object + - data - ChatCompletionMessageToolCall: + CreateFileRequest: type: object + additionalProperties: false properties: - # TODO: index included when streaming - id: - type: string - description: The ID of the tool call. - type: + file: + description: | + The File object (not file name) to be uploaded. type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - description: The function that the model called. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). + type: string + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - - id - - type - - function + - file + - purpose - ChatCompletionMessageToolCallChunk: + DeleteFileResponse: type: object properties: - index: - type: integer id: type: string - description: The ID of the tool call. - type: + object: type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + enum: [ file ] + deleted: + type: boolean required: - - index - - # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. - ChatCompletionRole: - type: string - description: The role of the author of a message - enum: - - system - - user - - assistant - - tool - - function + - id + - object + - deleted - ChatCompletionStreamOptions: - description: | - Options for streaming response. Only set this when you set `stream: true`. + CreateUploadRequest: type: object - nullable: true - default: null + additionalProperties: false properties: - include_usage: - type: boolean + filename: description: | - If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. - - ChatCompletionResponseMessage: - type: object - description: A chat completion message generated by the model. - properties: - content: + The name of the file to upload. type: string - description: The contents of the message. - nullable: true - refusal: + purpose: + description: | + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - role: + enum: [ "assistants", "batch", "fine-tune", "vision" ] + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: | + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. type: string - enum: [ "assistant" ] - description: The role of the author of this message. - function_call: - type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - name - - arguments required: - - role - - content - - refusal + - filename + - purpose + - bytes + - mime_type - ChatCompletionStreamResponseDelta: + AddUploadPartRequest: type: object - description: A chat completion delta generated by streamed model responses. + additionalProperties: false properties: - content: + data: + description: | + The chunk of bytes for this Part. type: string - description: The contents of the chunk message. - nullable: true - function_call: - deprecated: true - type: object - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - tool_calls: + format: binary + required: + - data + + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: type: array + description: | + The ordered list of Part IDs. items: - $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" - role: - type: string - enum: [ "system", "user", "assistant", "tool" ] - description: The role of the author of this message. - refusal: + type: string + md5: + description: | + The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. type: string - description: The refusal message generated by the model. - nullable: true + required: + - part_ids - CreateChatCompletionRequest: + CancelUploadRequest: + type: object + additionalProperties: false + + CreateFineTuningJobRequest: type: object properties: - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4o" + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + example: "gpt-4o-mini" anyOf: - type: string - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_frequency_penalty_description - logit_bias: - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. - type: boolean - default: false - nullable: true - top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. - type: integer - minimum: 0 - maximum: 20 - nullable: true - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - type: integer - nullable: true - n: - type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_presence_penalty_description - response_format: + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] + x-oaiTypeLabel: string + training_file: description: | - An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + The ID of an uploaded file that contains training data. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + See [upload file](/docs/api-reference/files/create) for how to upload a file. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - $ref: "#/components/schemas/ResponseFormatText" - - $ref: "#/components/schemas/ResponseFormatJsonObject" - - $ref: "#/components/schemas/ResponseFormatJsonSchema" - x-oaiExpandable: true - seed: - type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - x-oaiMeta: - beta: true - service_tier: - description: | - Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - When not set, the default behavior is 'auto'. + The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. - When this parameter is set, the response body will include the `service_tier` utilized. + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - enum: [ "auto", "default" ] - nullable: true - default: null - stop: + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [ auto ] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [ auto ] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [ auto ] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: description: | - Up to 4 sequences where the API will stop generating further tokens. + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 default: null - oneOf: - - type: string - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - stream: - description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - type: boolean - nullable: true - default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 nullable: true - description: *completions_top_p_description - tools: - type: array - description: > - A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. - items: - $ref: "#/components/schemas/ChatCompletionTool" - tool_choice: - $ref: "#/components/schemas/ChatCompletionToolChoiceOption" - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - user: *end_user_param_configuration - function_call: - deprecated: true + validation_file: description: | - Deprecated in favor of `tool_choice`. + The ID of an uploaded file that contains validation data. - Controls which (if any) function is called by the model. - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. - `none` is the default when no functions are present. `auto` is the default if functions are present. - oneOf: - - type: string - description: > - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - enum: [ none, auto ] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - x-oaiExpandable: true - functions: - deprecated: true - description: | - Deprecated in favor of `tools`. + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - - required: - - model - - messages - - CreateChatCompletionResponse: - type: object - description: Represents a chat completion response returned by model, based on the provided input. - properties: - id: + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - description: A unique identifier for the chat completion. - choices: + nullable: true + example: "file-abc123" + integrations: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + description: A list of integrations to enable for your fine-tuning job. + nullable: true items: type: object required: - - finish_reason - - index - - message - - logprobs + - type + - wandb properties: - finish_reason: - type: string - description: &chat_completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - logprobs: &chat_completion_response_logprobs - description: Log probability information for the choice. + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [ wandb ] + wandb: type: object - nullable: true + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project properties: - content: - description: A list of message content tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. nullable: true - refusal: - description: A list of message refusal tokens with log probability information. + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". type: array items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true - required: - - content - - refusal + type: string + example: "custom-tag" - created: + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - service_tier: - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" nullable: true - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" + minimum: 0 + maximum: 2147483647 + example: 42 required: - - choices - - created - - id - model - - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example + - training_file - CreateChatCompletionFunctionResponse: + ListFineTuningJobEventsResponse: type: object - description: Represents a chat completion response returned by model, based on the provided input. properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: + data: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: - &chat_completion_function_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: - [ "stop", "length", "function_call", "content_filter" ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" + enum: [ list ] required: - - choices - - created - - id - - model - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_function_example - - ChatCompletionTokenLogprob: - type: object - properties: - token: &chat_completion_response_logprobs_token - description: The token. - type: string - logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - type: number - bytes: &chat_completion_response_logprobs_bytes - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - type: array - items: - type: integer - nullable: true - top_logprobs: - description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. - type: array - items: - type: object - properties: - token: *chat_completion_response_logprobs_token - logprob: *chat_completion_response_logprobs_token_logprob - bytes: *chat_completion_response_logprobs_bytes - required: - - token - - logprob - - bytes - required: - - token - - logprob - - bytes - - top_logprobs + - data - ListPaginatedFineTuningJobsResponse: + ListFineTuningJobCheckpointsResponse: type: object properties: data: type: array items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean + $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string enum: [ list ] + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean required: - object - data - has_more - CreateChatCompletionStreamResponse: + CreateEmbeddingRequest: type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + additionalProperties: false properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array + input: description: | - A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the - last chunk if you set `stream_options: {"include_usage": true}`. - items: - type: object - required: - - delta - - finish_reason - - index - properties: - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - logprobs: *chat_completion_response_logprobs - finish_reason: + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: "This is a test." + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: type: string - description: *chat_completion_finish_reason_description - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - nullable: true - index: + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: type: integer - description: The index of the choice in the list of choices. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: + [ + "text-embedding-ada-002", + "text-embedding-3-small", + "text-embedding-3-large", + ] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - description: The model to generate the completion. - service_tier: - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" - nullable: true - system_fingerprint: + enum: [ "float", "base64" ] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + description: The name of the model used to generate the embedding. object: type: string - description: The object type, which is always `chat.completion.chunk`. - enum: [ chat.completion.chunk ] + description: The object type, which is always "list". + enum: [ list ] usage: type: object - description: | - An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. - When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. + description: The usage information for the request. properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. prompt_tokens: type: integer - description: Number of tokens in the prompt. + description: The number of tokens used by the prompt. total_tokens: type: integer - description: Total number of tokens used in the request (prompt + completion). + description: The total number of tokens used by the request. required: - prompt_tokens - - completion_tokens - total_tokens required: - - choices - - created - - id - - model - object - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example - - CreateChatCompletionImageResponse: - type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_image_example + - model + - data + - usage - CreateImageRequest: + CreateTranscriptionRequest: type: object + additionalProperties: false properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string - example: "A cute baby sea otter" + x-oaiTypeLabel: file + format: binary model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - type: string - enum: [ "dall-e-2", "dall-e-3" ] + enum: [ "whisper-1" ] x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-3" - nullable: true - description: The model to use for image generation. - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - quality: + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. type: string - enum: [ "standard", "hd" ] - default: "standard" - example: "standard" - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - response_format: &images_response_format + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string - enum: [ "url", "b64_json" ] - default: "url" - example: "url" - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - size: &images_size + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. type: string - enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - style: + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + timestamp_granularities[]: + description: | + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + type: array + items: + type: string + enum: + - word + - segment + default: [ segment ] + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the provided input. + properties: + text: type: string - enum: [ "vivid", "natural" ] - default: "vivid" - example: "vivid" - nullable: true - description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - user: *end_user_param_configuration + description: The transcribed text. required: - - prompt + - text + x-oaiMeta: + name: The transcription object (JSON) + group: audio + example: *basic_transcription_response_example - ImagesResponse: + TranscriptionSegment: + type: object properties: - created: + id: type: integer - data: + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: type: array items: - $ref: "#/components/schemas/Image" + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. required: - - created - - data + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob - Image: + TranscriptionWord: type: object - description: Represents the url or the content of an image generated by the OpenAI API. properties: - b64_json: - type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - url: - type: string - description: The URL of the generated image, if `response_format` is `url` (default). - revised_prompt: + word: type: string - description: The prompt that was used to generate the image, if there was any revision to the prompt. - x-oaiMeta: - name: The image object - example: | - { - "url": "...", - "revised_prompt": "..." - } + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [ word, start, end ] - CreateImageEditRequest: + CreateTranscriptionResponseVerboseJson: type: object + description: Represents a verbose json transcription response returned by model, based on the provided input. properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + language: type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + description: The language of the input audio. + duration: type: string - example: "A cute baby sea otter wearing a beret" - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [ language, duration, text ] + x-oaiMeta: + name: The transcription object (Verbose JSON) + group: audio + example: *verbose_transcription_response_example + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string + x-oaiTypeLabel: file format: binary model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - type: string - enum: [ "dall-e-2" ] + enum: [ "whisper-1" ] x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &dalle2_images_size + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. type: string - enum: [ "256x256", "512x512", "1024x1024" ] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: *images_response_format - user: *end_user_param_configuration + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 required: - - prompt - - image + - file + - model - CreateImageVariationRequest: + # Note: This does not currently support the non-default response format types. + CreateTranslationResponseJson: type: object properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + text: type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: [ "dall-e-2" ] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: *images_n - response_format: *images_response_format - size: *dalle2_images_size - user: *end_user_param_configuration required: - - image + - text - CreateModerationRequest: + CreateTranslationResponseVerboseJson: type: object properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [ language, duration, text ] + + CreateSpeechRequest: + type: object + additionalProperties: false + properties: model: description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` anyOf: - type: string - type: string - enum: [ "text-moderation-latest", "text-moderation-stable" ] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string - required: - - input - - CreateModerationResponse: - type: object - description: Represents if a given text input is potentially harmful. - properties: - id: + input: type: string - description: The unique identifier for the moderation request. - model: + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - description: The model used to generate the moderation results. - results: - type: array - description: A list of moderation objects. - items: - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." + default: "mp3" + type: string + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 required: - - id - model - - results - x-oaiMeta: - name: The moderation object - example: *moderation_example + - input + - voice - ListFilesResponse: - type: object + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. properties: - data: - type: array - items: - $ref: "#/components/schemas/OpenAIFile" + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. object: type: string - enum: [ list ] + description: The object type, which is always "model". + enum: [ model ] + owned_by: + type: string + description: The organization that owns the model. required: + - id - object - - data + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response - CreateFileRequest: - type: object - additionalProperties: false + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. properties: - file: - description: | - The File object (not file name) to be uploaded. + id: type: string - format: binary + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + description: The object type, which is always `file`. + enum: [ "file" ] purpose: - description: | - The intended purpose of the uploaded file. - - Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] - required: - - file - - purpose - - DeleteFileResponse: - type: object - properties: - id: + description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. + enum: + [ + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision", + ] + status: type: string - object: + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: [ "uploaded", "processed", "error" ] + status_details: type: string - enum: [ file ] - deleted: - type: boolean + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: - id - object - - deleted - - CreateUploadRequest: + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Upload: type: object - additionalProperties: false + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. filename: - description: | - The name of the file to upload. type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. purpose: - description: | - The intended purpose of the uploaded file. - - See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] - bytes: - description: | - The number of bytes in the file you are uploading. + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + description: The status of the Upload. + enum: [ "pending", "completed", "cancelled", "expired" ] + expires_at: type: integer - mime_type: - description: | - The MIME type of the file. - - This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + description: The Unix timestamp (in seconds) for when the Upload was created. + object: type: string + description: The object type, which is always "upload". + enum: [ upload ] + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. required: + - bytes + - created_at + - expires_at - filename + - id - purpose - - bytes - - mime_type - - AddUploadPartRequest: + - status + - step_number + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: type: object - additionalProperties: false + title: UploadPart + description: | + The upload Part represents a chunk of bytes we can add to an Upload object. properties: - data: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: [ 'upload.part' ] + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array description: | - The chunk of bytes for this Part. + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: type: string - format: binary + description: The object type, which is always "embedding". + enum: [ embedding ] required: - - data + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } - CompleteUploadRequest: + FineTuningJob: type: object - additionalProperties: false + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. properties: - part_ids: - type: array - description: | - The ordered list of Part IDs. - items: - type: string - md5: - description: | - The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. + id: type: string - required: - - part_ids - - CancelUploadRequest: - type: object - additionalProperties: false - - CreateFineTuningJobRequest: - type: object - properties: - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). - example: "gpt-4o-mini" - anyOf: - - type: string - - type: string - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] - x-oaiTypeLabel: string - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: type: string - example: "file-abc123" + nullable: true + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. hyperparameters: type: object - description: The hyperparameters used for the fine-tuning job. + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - batch_size: - description: | - Number of examples in each batch. A larger batch size means that model parameters - are updated less frequently, but with lower variance. - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: | - Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - overfitting. - oneOf: - - type: string - enum: [ auto ] - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one full cycle - through the training dataset. oneOf: - type: string enum: [ auto ] @@ -9190,1227 +11408,1463 @@ components: minimum: 1 maximum: 50 default: auto - suffix: - description: | - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: type: string - minLength: 1 - maxLength: 40 - default: null + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: [ fine_tuning.job ] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: + type: integer nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string nullable: true - example: "file-abc123" + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). integrations: type: array - description: A list of integrations to enable for your fine-tuning job. nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 items: - type: object - required: - - type - - wandb - properties: - type: - description: | - The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - oneOf: - - type: string - enum: [ wandb ] - wandb: - type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project - properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true seed: - description: | - The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. - If a seed is not specified, one will be generated for you. + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: type: integer nullable: true - minimum: 0 - maximum: 2147483647 - example: 42 + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id - model + - object + - organization_id + - result_files + - status + - trained_tokens - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example + + FineTuningIntegration: + type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: "The type of the integration being enabled for the fine-tuning job" + enum: [ "wandb" ] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" - ListFineTuningJobEventsResponse: + FineTuningJobEvent: type: object + description: Fine-tuning job event object properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobEvent" + id: + type: string + created_at: + type: integer + level: + type: string + enum: [ "info", "warn", "error" ] + message: + type: string object: type: string - enum: [ list ] + enum: [ fine_tuning.job.event ] required: + - id - object - - data + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } - ListFineTuningJobCheckpointsResponse: + FineTuningJobCheckpoint: type: object + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobCheckpoint" - object: + id: type: string - enum: [ list ] - first_id: + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: type: string - nullable: true - last_id: + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: type: string - nullable: true - has_more: - type: boolean + description: The name of the fine-tuning job that this checkpoint was created from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [ fine_tuning.job.checkpoint ] required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics - object - - data - - has_more + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: | + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } - CreateEmbeddingRequest: + FinetuneChatRequestInput: type: object - additionalProperties: false + description: The per-line training example of a fine-tuning input file for chat models properties: - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" - oneOf: - - type: string - title: string - description: The string that will be turned into an embedding. - default: "" - example: "This is a test." - - type: array - title: array - description: The array of strings that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: string - default: "" - example: "['This is a test.']" - - type: array - title: array - description: The array of integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - title: array - description: The array of arrays containing integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - x-oaiExpandable: true - model: - description: *model_description - example: "text-embedding-3-small" - anyOf: - - type: string - - type: string - enum: - [ - "text-embedding-ada-002", - "text-embedding-3-small", - "text-embedding-3-large", - ] - x-oaiTypeLabel: string - encoding_format: - description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." - example: "float" - default: "float" - type: string - enum: [ "float", "base64" ] - dimensions: - description: | - The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. - type: integer - minimum: 1 - user: *end_user_param_configuration - required: - - model - - input + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: | + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } - CreateEmbeddingResponse: + FinetuneCompletionRequestInput: type: object + description: The per-line training example of a fine-tuning input file for completions models properties: - data: - type: array - description: The list of embeddings generated by the model. - items: - $ref: "#/components/schemas/Embedding" - model: + prompt: type: string - description: The name of the model used to generate the embedding. - object: + description: The input prompt for this training example. + completion: type: string - description: The object type, which is always "list". - enum: [ list ] - usage: - type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens - required: - - object - - model - - data - - usage + description: The desired completion for this training example. + x-oaiMeta: + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } - CreateTranscriptionRequest: + CompletionUsage: type: object - additionalProperties: false + description: Usage statistics for the completion request. properties: - file: - description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: [ "whisper-1" ] - x-oaiTypeLabel: string - language: - description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - type: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - timestamp_granularities[]: - description: | - The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. - type: array - items: - type: string - enum: - - word - - segment - default: [ segment ] + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). required: - - file - - model + - prompt_tokens + - completion_tokens + - total_tokens - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponseJson: + RunCompletionUsage: type: object - description: Represents a transcription response returned by model, based on the provided input. + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - text: - type: string - description: The transcribed text. + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - text - x-oaiMeta: - name: The transcription object (JSON) - group: audio - example: *basic_transcription_response_example + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - TranscriptionSegment: + RunStepCompletionUsage: type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. properties: - id: + completion_tokens: type: integer - description: Unique identifier of the segment. - seek: + description: Number of completion tokens used over the course of the run step. + prompt_tokens: type: integer - description: Seek offset of the segment. - start: - type: number - format: float - description: Start time of the segment in seconds. - end: - type: number - format: float - description: End time of the segment in seconds. - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - format: float - description: Temperature parameter used for generating the segment. - avg_logprob: - type: number - format: float - description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. - compression_ratio: - type: number - format: float - description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. - no_speech_prob: - type: number - format: float - description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - TranscriptionWord: - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - format: float - description: Start time of the word in seconds. - end: - type: number - format: float - description: End time of the word in seconds. - required: [ word, start, end ] + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [ auto ] + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + x-oaiExpandable: true - CreateTranscriptionResponseVerboseJson: + AssistantObject: type: object - description: Represents a verbose json transcription response returned by model, based on the provided input. + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: - language: + id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The language of the input audio. - duration: + object: + description: The object type, which is always `assistant`. type: string - description: The duration of the input audio. - text: + enum: [ assistant ] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. type: string - description: The transcribed text. - words: - type: array - description: Extracted words and their corresponding timestamps. - items: - $ref: "#/components/schemas/TranscriptionWord" - segments: + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true + model: + description: *model_description + type: string + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + default: [ ] type: array - description: Segments of the transcribed text and their corresponding details. + maxItems: 128 items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata x-oaiMeta: - name: The transcription object (Verbose JSON) - group: audio - example: *verbose_transcription_response_example + name: The assistant object + beta: true + example: *create_assistants_example - CreateTranslationRequest: + CreateAssistantRequest: type: object additionalProperties: false properties: - file: - description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 + description: *model_description + example: "gpt-4o" anyOf: - type: string - type: string - enum: [ "whisper-1" ] + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + name: + description: *assistant_name_param_description type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - default: json + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description + type: string + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [ ] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [ vector_store_ids ] + - required: [ vector_stores ] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + description: *run_temperature_description type: number - default: 0 + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - file - model - # Note: This does not currently support the non-default response format types. - CreateTranslationResponseJson: - type: object - properties: - text: - type: string - required: - - text - - CreateTranslationResponseVerboseJson: - type: object - properties: - language: - type: string - description: The language of the output translation (always `english`). - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The translated text. - segments: - type: array - description: Segments of the translated text and their corresponding details. - items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] - - CreateSpeechRequest: + ModifyAssistantRequest: type: object additionalProperties: false properties: model: - description: | - One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + description: *model_description anyOf: - type: string - - type: string - enum: [ "tts-1", "tts-1-hd" ] - x-oaiTypeLabel: string - input: + name: + description: *assistant_name_param_description type: string - description: The text to generate audio for. The maximum length is 4096 characters. - maxLength: 4096 - voice: - description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] - response_format: - description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." - default: "mp3" + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] - speed: - description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [ ] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description type: number - default: 1.0 - minimum: 0.25 - maximum: 4.0 - required: - - model - - input - - voice - - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. - properties: - id: - type: string - description: The model identifier, which can be referenced in the API endpoints. - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: - type: string - description: The object type, which is always "model". - enum: [ model ] - owned_by: - type: string - description: The organization that owns the model. - required: - - id - - object - - created - - owned_by - x-oaiMeta: - name: The model object - example: *retrieve_model_response - - OpenAIFile: - title: OpenAIFile - description: The `File` object represents a document that has been uploaded to OpenAI. - properties: - id: - type: string - description: The file identifier, which can be referenced in the API endpoints. - bytes: - type: integer - description: The size of the file, in bytes. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - type: string - description: The object type, which is always `file`. - enum: [ "file" ] - purpose: - type: string - description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. - enum: - [ - "assistants", - "assistants_output", - "batch", - "batch_output", - "fine-tune", - "fine-tune-results", - "vision", - ] - status: + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + + DeleteAssistantResponse: + type: object + properties: + id: type: string - deprecated: true - description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: [ "uploaded", "processed", "error" ] - status_details: + deleted: + type: boolean + object: type: string - deprecated: true - description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. + enum: [ assistant.deleted ] required: - id - object - - bytes - - created_at - - filename - - purpose - - status - x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "salesOverview.pdf", - "purpose": "assistants", - } - Upload: + - deleted + + ListAssistantsResponse: type: object - title: Upload - description: | - The Upload object can accept byte chunks in the form of Parts. properties: - id: + object: type: string - description: The Upload unique identifier, which can be referenced in API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: + example: "asst_abc123" + last_id: type: string - description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. - status: + example: "asst_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example + + AssistantToolsCode: + type: object + title: Code interpreter tool + properties: + type: type: string - description: The status of the Upload. - enum: [ "pending", "completed", "cancelled", "expired" ] - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - object: + description: "The type of tool being defined: `code_interpreter`" + enum: [ "code_interpreter" ] + required: + - type + + AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: type: string - description: The object type, which is always "upload". - enum: [ upload ] - file: - $ref: "#/components/schemas/OpenAIFile" - nullable: true - description: The ready File object after the Upload is completed. + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" required: - - bytes - - created_at - - expires_at - - filename - - id - - purpose - - status - - step_number - x-oaiMeta: - name: The upload object - example: | - { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - "status": "completed", - "expires_at": 1719127296, - "file": { - "id": "file-xyz321", - "object": "file", - "bytes": 2147483648, - "created_at": 1719186911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - } - } - UploadPart: + - type + + FileSearchRankingOptions: + title: File search tool call ranking options type: object - title: UploadPart description: | - The upload Part represents a chunk of bytes we can add to an Upload object. + The ranking options for the file search. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. properties: - id: + ranker: type: string - description: The upload Part unique identifier, which can be referenced in API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + + AssistantToolsFileSearchTypeOnly: + type: object + title: FileSearch tool + properties: + type: type: string - description: The ID of the Upload object that this Part was added to. - object: + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + required: + - type + + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: type: string - description: The object type, which is always `upload.part`. - enum: [ 'upload.part' ] + description: "The type of tool being defined: `function`" + enum: [ "function" ] + function: + $ref: "#/components/schemas/FunctionObject" required: - - created_at - - id - - object - - upload_id - x-oaiMeta: - name: The upload part object - example: | - { - "id": "part_def456", - "object": "upload.part", - "created_at": 1719186911, - "upload_id": "upload_abc123" - } - Embedding: + - type + - function + + TruncationObject: type: object - description: | - Represents an embedding vector returned by embedding endpoint. + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: - type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). - items: - type: number - object: + type: type: string - description: The object type, which is always "embedding". - enum: [ embedding ] + description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. + enum: [ "auto", "last_messages" ] + last_messages: + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + minimum: 1 + nullable: true required: - - index - - object - - embedding - x-oaiMeta: - name: The embedding object - example: | - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } + - type + + AssistantsApiToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tools and instead generates a message. + `auto` is the default value and means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - FineTuningJob: + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + enum: [ none, auto, required ] + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + + AssistantsNamedToolChoice: type: object - title: FineTuningJob - description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + description: Specifies a tool the model should use. Use to force the model to call a specific tool. properties: - id: + type: type: string - description: The object identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: + enum: [ "function", "code_interpreter", "file_search" ] + description: The type of the tool. If type is `function`, the function name must be set + function: type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: + name: type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. - nullable: true - required: - - code - - message - - param - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - finished_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - properties: - n_epochs: - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + description: The name of the function to call. required: - - n_epochs - model: + - name + required: + - type + + RunObject: + type: object + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The base model that is being fine-tuned. object: + description: The object type, which is always `thread.run`. type: string - description: The object type, which is always "fine_tuning.job". - enum: [ fine_tuning.job ] - organization_id: + enum: [ "thread.run" ] + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). - items: - type: string - example: file-abc123 status: + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. enum: [ - "validating_files", "queued", - "running", - "succeeded", - "failed", + "in_progress", + "requires_action", + "cancelling", "cancelled", + "failed", + "completed", + "incomplete", + "expired", ] - trained_tokens: - type: integer - nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - training_file: - type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: - type: string - nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - integrations: - type: array - nullable: true - description: A list of integrations to enable for this fine-tuning job. - maxItems: 5 - items: - oneOf: - - $ref: "#/components/schemas/FineTuningIntegration" - x-oaiExpandable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed - x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example - - FineTuningIntegration: - type: object - title: Fine-Tuning Job Integration - required: - - type - - wandb - properties: - type: - type: string - description: "The type of the integration being enabled for the fine-tuning job" - enum: [ "wandb" ] - wandb: + required_action: type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true properties: - project: - description: | - The name of the project that the new run will be created under. + type: + description: For now, this is always `submit_tool_outputs`. type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true + enum: [ "submit_tool_outputs" ] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + properties: + code: type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + message: type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - - FineTuningJobEvent: - type: object - description: Fine-tuning job event object - properties: - id: - type: string - created_at: + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. type: integer - level: - type: string - enum: [ "info", "warn", "error" ] - message: - type: string - object: - type: string - enum: [ fine_tuning.job.event ] - required: - - id - - object - - created_at - - level - - message - x-oaiMeta: - name: The fine-tuning job event object - example: | - { - "object": "fine_tuning.job.event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" - } - - FineTuningJobCheckpoint: - type: object - title: FineTuningJobCheckpoint - description: | - The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. - properties: - id: - type: string - description: The checkpoint identifier, which can be referenced in the API endpoints. - created_at: + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: - type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. type: integer - description: The step number that the checkpoint was created at. - metrics: + nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. type: object - description: Metrics at the step number during the fine-tuning job. + nullable: true properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - fine_tuning_job_id: + reason: + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + type: string + enum: [ "max_completion_tokens", "max_prompt_tokens" ] + model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [ fine_tuning.job.checkpoint ] + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [ ] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens specified to have been used over the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens specified to have been used over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - id - - metrics - object - - step_number + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format x-oaiMeta: - name: The fine-tuning job checkpoint object + name: The run object + beta: true example: | { - "object": "fine_tuning.job.checkpoint", - "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", - "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", - "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", - "metrics": { - "step": 88, - "train_loss": 0.478, - "train_mean_token_accuracy": 0.924, - "valid_loss": 10.112, - "valid_mean_token_accuracy": 0.145, - "full_valid_loss": 0.567, - "full_valid_mean_token_accuracy": 0.944 + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 }, - "step_number": 88 + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } - - FinetuneChatRequestInput: + CreateRunRequest: type: object - description: The per-line training example of a fine-tuning input file for chat models + additionalProperties: false properties: - messages: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. type: array - minItems: 1 items: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - description: A list of tools the model may generate JSON inputs for. + maxItems: 20 items: - $ref: "#/components/schemas/ChatCompletionTool" + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true parallel_tool_calls: $ref: "#/components/schemas/ParallelToolCalls" - functions: - deprecated: true - description: - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - x-oaiMeta: - name: Training format for chat models - example: | - { - "messages": [ - { "role": "user", "content": "What is the weather in San Francisco?" }, - { - "role": "assistant", - "tool_calls": [ - { - "id": "call_id", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" - } - } - ] - } - ], - "parallel_tool_calls": false, - "tools": [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and country, eg. San Francisco, USA" - }, - "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } - }, - "required": ["location", "format"] - } - } - } - ] - } - - FinetuneCompletionRequestInput: + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - thread_id + - assistant_id + ListRunsResponse: type: object - description: The per-line training example of a fine-tuning input file for completions models properties: - prompt: + object: type: string - description: The input prompt for this training example. - completion: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: type: string - description: The desired completion for this training example. - x-oaiMeta: - name: Training format for completions models - example: | - { - "prompt": "What is the answer to 2+2", - "completion": "4" - } - - CompletionUsage: - type: object - description: Usage statistics for the completion request. - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false required: - - prompt_tokens - - completion_tokens - - total_tokens - - RunCompletionUsage: + - object + - data + - first_id + - last_id + - has_more + ModifyRunRequest: type: object - description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - RunStepCompletionUsage: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: type: object - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - AssistantsApiResponseFormatOption: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - type: string - description: > - `auto` is the default value - enum: [ auto ] - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - x-oaiExpandable: true + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + required: + - tool_outputs - AssistantObject: + RunToolCallObject: type: object - title: Assistant - description: Represents an `assistant` that can call the model and use tools. + description: Tool call objects properties: id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `assistant`. type: string - enum: [ assistant ] - created_at: - description: The Unix timestamp (in seconds) for when the assistant was created. - type: integer - name: - description: &assistant_name_param_description | - The name of the assistant. The maximum length is 256 characters. + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: type: string - maxLength: 256 - nullable: true - description: - description: &assistant_description_param_description | - The description of the assistant. The maximum length is 512 characters. + description: The type of tool call the output is required for. For now, this is always `function`. + enum: [ "function" ] + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function + + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string - maxLength: 512 - nullable: true + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. model: - description: *model_description - type: string + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true instructions: - description: &assistant_instructions_param_description | - The system instructions that the assistant uses. The maximum length is 256,000 characters. + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. type: string - maxLength: 256000 nullable: true tools: - description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [ ] + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - maxItems: 128 + maxItems: 20 items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - $ref: "#/components/schemas/AssistantToolsFileSearch" - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true tool_resources: type: object description: | @@ -10422,7 +12876,7 @@ components: file_ids: type: array description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [ ] maxItems: 20 items: @@ -10439,20 +12893,18 @@ components: type: string nullable: true metadata: - description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + description: *metadata_description type: object x-oaiTypeLabel: map nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 default: 1 example: 1 nullable: true + description: *run_temperature_description top_p: type: number minimum: 0 @@ -10460,96 +12912,116 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true required: - - id - - object - - created_at - - name - - description - - model - - instructions - - tools - - metadata - x-oaiMeta: - name: The assistant object - beta: true - example: *create_assistants_example + - thread_id + - assistant_id - CreateAssistantRequest: + ThreadObject: type: object - additionalProperties: false + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). properties: - model: - description: *model_description - example: "gpt-4o" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - name: - description: *assistant_name_param_description + id: + description: The identifier, which can be referenced in API endpoints. type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description + object: + description: The object type, which is always `thread`. type: string + enum: [ "thread" ] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description - type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [ ] + required: + - id + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + + CreateThreadRequest: + type: object + additionalProperties: false + properties: + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. type: array - maxItems: 128 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true + $ref: "#/components/schemas/CreateMessageRequest" tool_resources: type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -10568,14 +13040,14 @@ components: vector_store_ids: type: array description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: string vector_stores: type: array description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: object @@ -10638,6 +13110,7 @@ components: description: | Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + x-oaiExpandable: true oneOf: - required: [ vector_store_ids ] - required: [ vector_stores ] @@ -10647,66 +13120,15 @@ components: type: object x-oaiTypeLabel: map nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - required: - - model - ModifyAssistantRequest: + ModifyThreadRequest: type: object additionalProperties: false properties: - model: - description: *model_description - anyOf: - - type: string - name: - description: *assistant_name_param_description - type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description - type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description - type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [ ] - type: array - maxItems: 128 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true tool_resources: type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -10714,7 +13136,7 @@ components: file_ids: type: array description: | - Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [ ] maxItems: 20 items: @@ -10725,7 +13147,7 @@ components: vector_store_ids: type: array description: | - Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: string @@ -10735,27 +13157,8 @@ components: type: object x-oaiTypeLabel: map nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - DeleteAssistantResponse: + DeleteThreadResponse: type: object properties: id: @@ -10764,14 +13167,13 @@ components: type: boolean object: type: string - enum: [ assistant.deleted ] + enum: [ thread.deleted ] required: - id - object - deleted - ListAssistantsResponse: - type: object + ListThreadsResponse: properties: object: type: string @@ -10779,505 +13181,292 @@ components: data: type: array items: - $ref: "#/components/schemas/AssistantObject" - first_id: - type: string - example: "asst_abc123" - last_id: - type: string - example: "asst_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - x-oaiMeta: - name: List assistants response object - group: chat - example: *list_assistants_example - - AssistantToolsCode: - type: object - title: Code interpreter tool - properties: - type: - type: string - description: "The type of tool being defined: `code_interpreter`" - enum: [ "code_interpreter" ] - required: - - type - - AssistantToolsFileSearch: - type: object - title: FileSearch tool - properties: - type: - type: string - description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] - file_search: - type: object - description: Overrides for the file search tool. - properties: - max_num_results: - type: integer - minimum: 1 - maximum: 50 - description: | - The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - ranking_options: - $ref: "#/components/schemas/FileSearchRankingOptions" - required: - - type - - FileSearchRankingOptions: - title: File search tool call ranking options - type: object - description: | - The ranking options for the file search. - - See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - properties: - ranker: - type: string - description: The ranker to use for the file search. If not specified will use the `auto` ranker. - enum: [ "auto", "default_2024_08_21" ] - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - - AssistantToolsFileSearchTypeOnly: - type: object - title: FileSearch tool - properties: - type: - type: string - description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] - required: - - type - - AssistantToolsFunction: - type: object - title: Function tool - properties: - type: - type: string - description: "The type of tool being defined: `function`" - enum: [ "function" ] - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function - - TruncationObject: - type: object - title: Thread Truncation Controls - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - properties: - type: - type: string - description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: [ "auto", "last_messages" ] - last_messages: - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - minimum: 1 - nullable: true - required: - - type - - AssistantsApiToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tools and instead generates a message. - `auto` is the default value and means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - oneOf: - - type: string - description: > - `none` means the model will not call any tools and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] - - $ref: "#/components/schemas/AssistantsNamedToolChoice" - x-oaiExpandable: true - - AssistantsNamedToolChoice: - type: object - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - properties: - type: + $ref: "#/components/schemas/ThreadObject" + first_id: type: string - enum: [ "function", "code_interpreter", "file_search" ] - description: The type of the tool. If type is `function`, the function name must be set - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - type + - object + - data + - first_id + - last_id + - has_more - RunObject: + MessageObject: type: object - title: A run on a thread - description: Represents an execution run on a [thread](/docs/api-reference/threads). + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: id: description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run`. + description: The object type, which is always `thread.message`. type: string - enum: [ "thread.run" ] + enum: [ "thread.message" ] created_at: - description: The Unix timestamp (in seconds) for when the run was created. + description: The Unix timestamp (in seconds) for when the message was created. type: integer thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. - type: string - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string status: - description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: - [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "incomplete", - "expired", - ] - required_action: - type: object - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - properties: - type: - description: For now, this is always `submit_tool_outputs`. - type: string - enum: [ "submit_tool_outputs" ] - submit_tool_outputs: - type: object - description: Details on the tool outputs needed for this run to continue. - properties: - tool_calls: - type: array - description: A list of the relevant tool calls. - items: - $ref: "#/components/schemas/RunToolCallObject" - required: - - tool_calls - required: - - type - - submit_tool_outputs - last_error: - type: object - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - properties: - code: - type: string - description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: - [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - expires_at: - description: The Unix timestamp (in seconds) for when the run will expire. - type: integer - nullable: true - started_at: - description: The Unix timestamp (in seconds) for when the run was started. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run failed. - type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run was completed. - type: integer - nullable: true + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - type: object - nullable: true - properties: - reason: - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - type: string - enum: [ "max_completion_tokens", "max_prompt_tokens" ] - model: - description: The model that the [assistant](/docs/api-reference/assistants) used for this run. - type: string - instructions: - description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. - type: string - tools: - description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [ ] - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - metadata: - description: *metadata_description + description: On an incomplete message, details about why the message is incomplete. type: object - x-oaiTypeLabel: map - nullable: true - usage: - $ref: "#/components/schemas/RunCompletionUsage" - temperature: - description: The sampling temperature used for this run. If not set, defaults to 1. - type: number - nullable: true - top_p: - description: The nucleus sampling value used for this run. If not set, defaults to 1. - type: number - nullable: true - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens specified to have been used over the course of the run. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens specified to have been used over the course of the run. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - required: - - id - - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format - x-oaiMeta: - name: The run object - beta: true - example: | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1698107661, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "completed", - "started_at": 1699073476, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699073498, - "last_error": null, - "model": "gpt-4o", - "instructions": null, - "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], - "metadata": {}, - "incomplete_details": null, - "usage": { - "prompt_tokens": 123, - "completion_tokens": 456, - "total_tokens": 579 - }, - "temperature": 1.0, - "top_p": 1.0, - "max_prompt_tokens": 1000, - "max_completion_tokens": 1000, - "truncation_strategy": { - "type": "auto", - "last_messages": null - }, - "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true - } - CreateRunRequest: - type: object - additionalProperties: false - properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4o" - anyOf: - - type: string - - type: string + properties: + reason: + type: string + description: The reason the message is incomplete. enum: [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", + "content_filter", + "max_tokens", + "run_cancelled", + "run_expired", + "run_failed", ] - x-oaiTypeLabel: string nullable: true - instructions: - description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. - type: string + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer nullable: true - additional_instructions: - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. - type: string + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + type: integer nullable: true - additional_messages: - description: Adds additional messages to the thread before creating the run. + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: [ "user", "assistant" ] + content: + description: The content of the message in array of text and/or images. type: array items: - $ref: "#/components/schemas/CreateMessageRequest" + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + type: string nullable: true + attachments: type: array - maxItems: 20 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were added to. + nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true + required: + - id + - object + - created_at + - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} + } + + MessageDeltaObject: + type: object + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: [ "thread.message.delta" ] + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: [ "user", "assistant" ] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: + type: string + enum: [ "user", "assistant" ] description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. + content: + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 + x-oaiExpandable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should be added to. + required: + - file_id + - tools nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + + ModifyMessageRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - required: - - thread_id - - assistant_id - ListRunsResponse: + + DeleteMessageResponse: type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [ thread.message.deleted ] + required: + - id + - object + - deleted + + ListMessagesResponse: properties: object: type: string @@ -11285,13 +13474,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageObject" first_id: type: string - example: "run_abc123" + example: "msg_abc123" last_id: type: string - example: "run_abc456" + example: "msg_abc123" has_more: type: boolean example: false @@ -11301,729 +13490,505 @@ components: - first_id - last_id - has_more - ModifyRunRequest: - type: object - additionalProperties: false - properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - SubmitToolOutputsRunRequest: - type: object - additionalProperties: false - properties: - tool_outputs: - description: A list of tools for which the outputs are being submitted. - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - required: - - tool_outputs - RunToolCallObject: + MessageContentImageFileObject: + title: Image file type: object - description: Tool call objects + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - type: string - description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. type: + description: Always `image_file`. type: string - description: The type of tool call the output is required for. For now, this is always `function`. - enum: [ "function" ] - function: + enum: [ "image_file" ] + image_file: type: object - description: The function definition. properties: - name: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. type: string - description: The name of the function. - arguments: + detail: type: string - description: The arguments that the model expects you to pass to the function. - required: - - name - - arguments - required: - - id - - type - - function - - CreateThreadAndRunRequest: - type: object - additionalProperties: false - properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string - thread: - $ref: "#/components/schemas/CreateThreadRequest" - description: If no thread is provided, an empty thread will be created. - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4o" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. - type: string - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - file_id required: - - thread_id - - assistant_id + - type + - image_file - ThreadObject: + MessageDeltaContentImageFileObject: + title: Image file type: object - title: Thread - description: Represents a thread that contains [messages](/docs/api-reference/messages). + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread`. - type: string - enum: [ "thread" ] - created_at: - description: The Unix timestamp (in seconds) for when the thread was created. + index: type: integer - tool_resources: + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: [ "image_file" ] + image_file: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" required: - - id - - object - - created_at - - tool_resources - - metadata - x-oaiMeta: - name: The thread object - beta: true - example: | - { - "id": "thread_abc123", - "object": "thread", - "created_at": 1698107661, - "metadata": {} - } + - index + - type - CreateThreadRequest: + MessageContentImageUrlObject: + title: Image URL type: object - additionalProperties: false + description: References an image URL in the content of a message. properties: - messages: - description: A list of [messages](/docs/api-reference/messages) to start the thread with. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - tool_resources: + type: + type: string + enum: [ "image_url" ] + description: The type of the content part. + image_url: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - chunking_strategy: - # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: [ "auto" ] - required: - - type - - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - enum: [ "static" ] - static: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - required: - - type - - static - x-oaiExpandable: true - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - x-oaiExpandable: true - oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + url: + type: string + description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - url + required: + - type + - image_url - ModifyThreadRequest: + MessageDeltaContentImageUrlObject: + title: Image URL type: object - additionalProperties: false + description: References an image URL in the content of a message. properties: - tool_resources: - type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_url`. + type: string + enum: [ "image_url" ] + image_url: type: object - x-oaiTypeLabel: map - nullable: true + properties: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + type: string + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - index + - type - DeleteThreadResponse: + MessageContentTextObject: + title: Text type: object + description: The text content that is part of a message. properties: - id: - type: string - deleted: - type: boolean - object: + type: + description: Always `text`. type: string - enum: [ thread.deleted ] + enum: [ "text" ] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations required: - - id - - object - - deleted + - type + - text - ListThreadsResponse: + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/ThreadObject" - first_id: + type: + description: Always `refusal`. type: string - example: "asst_abc123" - last_id: + enum: [ "refusal" ] + refusal: type: string - example: "asst_abc456" - has_more: - type: boolean - example: false + nullable: false required: - - object - - data - - first_id - - last_id - - has_more + - type + - refusal - MessageObject: + MessageRequestContentTextObject: + title: Text type: object - title: The message object - description: Represents a message within a [thread](/docs/api-reference/threads). + description: The text content that is part of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. + type: + description: Always `text`. type: string - object: - description: The object type, which is always `thread.message`. + enum: [ "text" ] + text: type: string - enum: [ "thread.message" ] - created_at: - description: The Unix timestamp (in seconds) for when the message was created. - type: integer - thread_id: - description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + description: Text content to be sent to the model + required: + - type + - text + + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. type: string - status: - description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + enum: [ "file_citation" ] + text: + description: The text in the message content that needs to be replaced. type: string - enum: [ "in_progress", "incomplete", "completed" ] - incomplete_details: - description: On an incomplete message, details about why the message is incomplete. + file_citation: type: object properties: - reason: + file_id: + description: The ID of the specific File the citation is from. type: string - description: The reason the message is incomplete. - enum: - [ - "content_filter", - "max_tokens", - "run_cancelled", - "run_expired", - "run_failed", - ] - nullable: true required: - - reason - completed_at: - description: The Unix timestamp (in seconds) for when the message was completed. + - file_id + start_index: type: integer - nullable: true - incomplete_at: - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + minimum: 0 + end_index: type: integer - nullable: true - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: [ "user", "assistant" ] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageContentTextObject" - - $ref: "#/components/schemas/MessageContentRefusalObject" - x-oaiExpandable: true - assistant_id: - description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. type: string - nullable: true - run_id: - description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + enum: [ "file_path" ] + text: + description: The text in the message content that needs to be replaced. type: string - nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they were added to. - nullable: true - metadata: - description: *metadata_description + file_path: type: object - x-oaiTypeLabel: map - nullable: true + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: - - id - - object - - created_at - - thread_id - - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - - metadata - x-oaiMeta: - name: The message object - beta: true - example: | - { - "id": "msg_abc123", - "object": "thread.message", - "created_at": 1698983503, - "thread_id": "thread_abc123", - "role": "assistant", - "content": [ - { - "type": "text", - "text": { - "value": "Hi! How can I help you today?", - "annotations": [] - } - } - ], - "assistant_id": "asst_abc123", - "run_id": "run_abc123", - "attachments": [], - "metadata": {} - } + - type + - text + - file_path + - start_index + - end_index - MessageDeltaObject: + MessageDeltaContentTextObject: + title: Text type: object - title: Message delta object - description: | - Represents a message delta i.e. any changed fields on a message during streaming. + description: The text content that is part of a message. properties: - id: - description: The identifier of the message, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.message.delta`. + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. type: string - enum: [ "thread.message.delta" ] - delta: - description: The delta containing the fields that have changed on the Message. + enum: [ "text" ] + text: type: object properties: - role: - description: The entity that produced the message. One of `user` or `assistant`. + value: + description: The data that makes up the text. type: string - enum: [ "user", "assistant" ] - content: - description: The content of the message in array of text and/or images. + annotations: type: array items: oneOf: - - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - - $ref: "#/components/schemas/MessageDeltaContentTextObject" - - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" - - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" x-oaiExpandable: true required: - - id - - object - - delta - x-oaiMeta: - name: The message delta object - beta: true - example: | - { - "id": "msg_123", - "object": "thread.message.delta", - "delta": { - "content": [ - { - "index": 0, - "type": "text", - "text": { "value": "Hello", "annotations": [] } - } - ] - } - } + - index + - type - CreateMessageRequest: + MessageDeltaContentRefusalObject: + title: Refusal type: object - additionalProperties: false + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. + type: string + enum: [ "refusal" ] + refusal: + type: string required: - - role - - content + - index + - type + + + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - role: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. type: string - enum: [ "user", "assistant" ] - description: | - The role of the entity that is creating the message. Allowed values include: - - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. - content: + enum: [ "file_citation" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: [ "file_path" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: [ "thread.run.step" ] + created_at: + description: The Unix timestamp (in seconds) for when the run step was created. + type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: string + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. + type: string + enum: [ "message_creation", "tool_calls" ] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] + step_details: + type: object + description: The details of the run step. oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). - title: Array of content parts - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageRequestContentTextObject" - x-oaiExpandable: true - minItems: 1 + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" x-oaiExpandable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they should be added to. + last_error: + type: object + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: [ "server_error", "rate_limit_exceeded" ] + message: + type: string + description: A human-readable description of the error. required: - - file_id - - tools + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer nullable: true - - ModifyMessageRequest: - type: object - additionalProperties: false - properties: metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + x-oaiMeta: + name: The run step object + beta: true + example: *run_step_object_example - DeleteMessageResponse: + RunStepDeltaObject: type: object + title: Run step delta object + description: | + Represents a run step delta i.e. any changed fields on a run step during streaming. properties: id: + description: The identifier of the run step, which can be referenced in API endpoints. type: string - deleted: - type: boolean object: + description: The object type, which is always `thread.run.step.delta`. type: string - enum: [ thread.message.deleted ] + enum: [ "thread.run.step.delta" ] + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true required: - id - object - - deleted + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } - ListMessagesResponse: + ListRunStepsResponse: properties: object: type: string @@ -12031,13 +13996,13 @@ components: data: type: array items: - $ref: "#/components/schemas/MessageObject" + $ref: "#/components/schemas/RunStepObject" first_id: type: string - example: "msg_abc123" + example: "step_abc123" last_id: type: string - example: "msg_abc123" + example: "step_abc456" has_more: type: boolean example: false @@ -12048,504 +14013,784 @@ components: - last_id - has_more - MessageContentImageFileObject: - title: Image file + RunStepDetailsMessageCreationObject: + title: Message creation type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Details of the message creation by the run step. properties: type: - description: Always `image_file`. + description: Always `message_creation`. type: string - enum: [ "image_file" ] - image_file: + enum: [ "message_creation" ] + message_creation: type: object properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + message_id: type: string - detail: + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: [ "message_creation" ] + message_creation: + type: object + properties: + message_id: type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" + description: The ID of the message that was created by this run step. + required: + - type + + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: [ "tool_calls" ] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: [ "tool_calls" ] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: [ "code_interpreter" ] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. required: - - file_id + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true required: + - id - type - - image_file + - code_interpreter - MessageDeltaContentImageFileObject: - title: Image file + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: [ "code_interpreter" ] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: [ "logs" ] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Text output from the Code Interpreter tool call as part of a run step. properties: index: type: integer - description: The index of the content part in the message. + description: The index of the output in the outputs array. type: - description: Always `image_file`. + description: Always `logs`. type: string - enum: [ "image_file" ] - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" + enum: [ "logs" ] + logs: + type: string + description: The text output from the Code Interpreter tool call. required: - index - type - MessageContentImageUrlObject: - title: Image URL + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output type: object - description: References an image URL in the content of a message. properties: type: + description: Always `image`. type: string - enum: [ "image_url" ] - description: The type of the content part. - image_url: + enum: [ "image" ] + image: type: object properties: - url: - type: string - description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - format: uri - detail: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: [ "auto", "low", "high" ] - default: "auto" required: - - url + - file_id required: - type - - image_url + - image - MessageDeltaContentImageUrlObject: - title: Image URL + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output type: object - description: References an image URL in the content of a message. properties: index: type: integer - description: The index of the content part in the message. + description: The index of the output in the outputs array. type: - description: Always `image_url`. + description: Always `image`. type: string - enum: [ "image_url" ] - image_url: + enum: [ "image" ] + image: type: object properties: - url: - description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - type: string - detail: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" required: - index - type - MessageContentTextObject: - title: Text + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call type: object - description: The text content that is part of a message. properties: + id: + type: string + description: The ID of the tool call object. type: - description: Always `text`. type: string - enum: [ "text" ] - text: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: [ "file_search" ] + file_search: type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map properties: - value: - description: The data that makes up the text. - type: string - annotations: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: type: array + description: The results of the file search. items: - oneOf: - - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" - x-oaiExpandable: true - required: - - value - - annotations + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" required: + - id - type - - text + - file_search - MessageContentRefusalObject: - title: Refusal + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options type: object - description: The refusal content generated by the assistant. + description: The ranking options for the file search. properties: - type: - description: Always `refusal`. - type: string - enum: [ "refusal" ] - refusal: + ranker: type: string - nullable: false + description: The ranker used for the file search. + enum: [ "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 required: - - type - - refusal + - ranker + - score_threshold - MessageRequestContentTextObject: - title: Text + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result type: object - description: The text content that is part of a message. + description: A result instance of the file search. + x-oaiTypeLabel: map properties: - type: - description: Always `text`. + file_id: type: string - enum: [ "text" ] - text: + description: The ID of the file that result was found in. + file_name: type: string - description: Text content to be sent to the model + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only included if requested via the include query parameter. + items: + type: object + properties: + type: + type: string + description: The type of the content. + enum: [ "text" ] + text: + type: string + description: The text content of the file. required: - - type - - text + - file_id + - file_name + - score - MessageContentTextAnnotationsFileCitationObject: - title: File citation + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - type: - description: Always `file_citation`. + index: + type: integer + description: The index of the tool call in the tool calls array. + id: type: string - enum: [ "file_citation" ] - text: - description: The text in the message content that needs to be replaced. + description: The ID of the tool call object. + type: type: string - file_citation: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: [ "file_search" ] + file_search: type: object - properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map required: + - index - type - - text - - file_citation - - start_index - - end_index + - file_search - MessageContentTextAnnotationsFilePathObject: - title: File path + RunStepDetailsToolCallsFunctionObject: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + title: Function tool call properties: - type: - description: Always `file_path`. + id: type: string - enum: [ "file_path" ] - text: - description: The text in the message content that needs to be replaced. + description: The ID of the tool call object. + type: type: string - file_path: + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: [ "function" ] + function: type: object + description: The definition of the function that was called. properties: - file_id: - description: The ID of the file that was generated. + name: + type: string + description: The name of the function. + arguments: type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + - name + - arguments + - output required: + - id - type - - text - - file_path - - start_index - - end_index - - MessageDeltaContentTextObject: - title: Text + - function + + RunStepDeltaStepDetailsToolCallsFunctionObject: type: object - description: The text content that is part of a message. + title: Function tool call properties: index: type: integer - description: The index of the content part in the message. + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. type: - description: Always `text`. type: string - enum: [ "text" ] - text: + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: [ "function" ] + function: type: object + description: The definition of the function that was called. properties: - value: - description: The data that makes up the text. + name: type: string - annotations: - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" - x-oaiExpandable: true + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - index - type - MessageDeltaContentRefusalObject: - title: Refusal + VectorStoreExpirationAfter: type: object - description: The refusal content that is part of a message. + title: Vector store expiration policy + description: The expiration policy for a vector store. properties: - index: - type: integer - description: The index of the refusal part in the message. - type: - description: Always `refusal`. - type: string - enum: [ "refusal" ] - refusal: + anchor: + description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string + enum: [ "last_active_at" ] + days: + description: The number of days after the anchor time that the vector store will expire. + type: integer + minimum: 1 + maximum: 365 required: - - index - - type - + - anchor + - days - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + VectorStoreObject: type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + title: Vector store + description: A vector store is a collection of processed files can be used by the `file_search` tool. properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - description: Always `file_citation`. + id: + description: The identifier, which can be referenced in API endpoints. type: string - enum: [ "file_citation" ] - text: - description: The text in the message content that needs to be replaced. + object: + description: The object type, which is always `vector_store`. type: string - file_citation: + enum: [ "vector_store" ] + created_at: + description: The Unix timestamp (in seconds) for when the vector store was created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: type: object properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - quote: - description: The specific quote in the file. - type: string - start_index: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + type: string + enum: [ "expired", "in_progress", "completed" ] + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will expire. type: integer - minimum: 0 - end_index: + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last active. type: integer - minimum: 0 + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - index - - type + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + CreateVectorStoreRequest: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + additionalProperties: false properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - description: Always `file_path`. + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. type: string - enum: [ "file_path" ] - text: - description: The text in the message content that needs to be replaced. + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + ListVectorStoresResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: "vs_abc123" + last_id: + type: string + example: "vs_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + DeleteVectorStoreResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: type: string - file_path: - type: object - properties: - file_id: - description: The ID of the file that was generated. - type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + enum: [ vector_store.deleted ] required: - - index - - type + - id + - object + - deleted - RunStepObject: + VectorStoreFileObject: type: object - title: Run steps - description: | - Represents a step in execution of a run. + title: Vector store files + description: A list of files attached to a vector store. properties: id: - description: The identifier of the run step, which can be referenced in API endpoints. + description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run.step`. + description: The object type, which is always `vector_store.file`. type: string - enum: [ "thread.run.step" ] + enum: [ "vector_store.file" ] + usage_bytes: + description: The total vector store usage in bytes. Note that this may be different from the original file size. + type: integer created_at: - description: The Unix timestamp (in seconds) for when the run step was created. + description: The Unix timestamp (in seconds) for when the vector store file was created. type: integer - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. - type: string - thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was run. - type: string - run_id: - description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. - type: string - type: - description: The type of run step, which can be either `message_creation` or `tool_calls`. + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. type: string - enum: [ "message_creation", "tool_calls" ] status: - description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" - x-oaiExpandable: true + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: type: object - description: The last error associated with this run step. Will be `null` if there are no errors. + description: The last error associated with this vector store file. Will be `null` if there are no errors. nullable: true properties: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: [ "server_error", "rate_limit_exceeded" ] + enum: + [ + "server_error", + "unsupported_file", + "invalid_file", + ] message: type: string description: A human-readable description of the error. required: - code - message - expired_at: - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run step was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run step failed. - type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run step completed. - type: integer - nullable: true - metadata: - description: *metadata_description + chunking_strategy: type: object - x-oaiTypeLabel: map - nullable: true - usage: - $ref: "#/components/schemas/RunStepCompletionUsage" + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true required: - id - object + - usage_bytes - created_at - - assistant_id - - thread_id - - run_id - - type + - vector_store_id - status - - step_details - last_error - - expired_at - - cancelled_at - - failed_at - - completed_at - - metadata - - usage x-oaiMeta: - name: The run step object + name: The vector store file object beta: true - example: *run_step_object_example + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } - RunStepDeltaObject: + OtherChunkingStrategyResponseParam: type: object - title: Run step delta object - description: | - Represents a run step delta i.e. any changed fields on a run step during streaming. + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false properties: - id: - description: The identifier of the run step, which can be referenced in API endpoints. + type: type: string - object: - description: The object type, which is always `thread.run.step.delta`. + description: Always `other`. + enum: [ "other" ] + required: + - type + + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: type: string - enum: [ "thread.run.step.delta" ] - delta: - description: The delta containing the fields that have changed on the run step. - type: object - properties: - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" - x-oaiExpandable: true + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" required: - - id - - object - - delta - x-oaiMeta: - name: The run step delta object - beta: true - example: | - { - "id": "step_123", - "object": "thread.run.step.delta", - "delta": { - "step_details": { - "type": "tool_calls", - "tool_calls": [ - { - "index": 0, - "id": "call_123", - "type": "code_interpreter", - "code_interpreter": { "input": "", "outputs": [] } - } - ] - } - } - } + - type + - static + + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + + CreateVectorStoreFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_id - ListRunStepsResponse: + ListVectorStoreFilesResponse: properties: object: type: string @@ -12553,13 +14798,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunStepObject" + $ref: "#/components/schemas/VectorStoreFileObject" first_id: type: string - example: "step_abc123" + example: "file-abc123" last_id: type: string - example: "step_abc456" + example: "file-abc456" has_more: type: boolean example: false @@ -12570,801 +14815,1250 @@ components: - last_id - has_more - RunStepDetailsMessageCreationObject: - title: Message creation + DeleteVectorStoreFileResponse: type: object - description: Details of the message creation by the run step. properties: - type: - description: Always `message_creation`. + id: type: string - enum: [ "message_creation" ] - message_creation: + deleted: + type: boolean + object: + type: string + enum: [ vector_store.file.deleted ] + required: + - id + - object + - deleted + + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: [ "vector_store.files_batch" ] + created_at: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: [ "in_progress", "completed", "cancelled", "failed" ] + file_counts: type: object properties: - message_id: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } + + CreateVectorStoreFileBatchRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_ids + + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. + + Each event in a server-sent events stream has an `event` and `data` property: + + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + + ThreadStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.created" ] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.run.created" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.queued" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.in_progress" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.requires_action" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.completed" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.failed" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: type: string - description: The ID of the message that was created by this run step. + enum: [ "thread.run.cancelling" ] + data: + $ref: "#/components/schemas/RunObject" required: - - message_id - required: - - type - - message_creation - - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation - type: object - description: Details of the message creation by the run step. - properties: - type: - description: Always `message_creation`. - type: string - enum: [ "message_creation" ] - message_creation: - type: object + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - message_id: + event: type: string - description: The ID of the message that was created by this run step. - required: - - type - - RunStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: [ "tool_calls" ] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - tool_calls - - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: [ "tool_calls" ] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call - type: object - description: Details of the Code Interpreter tool call the run step was involved in. - properties: - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] - code_interpreter: - type: object - description: The Code Interpreter tool call definition. + enum: [ "thread.run.cancelled" ] + data: + $ref: "#/components/schemas/RunObject" required: - - input - - outputs + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - input: + event: type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - id - - type - - code_interpreter + enum: [ "thread.run.expired" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call - type: object - description: Details of the Code Interpreter tool call the run step was involved in. - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] - code_interpreter: - type: object - description: The Code Interpreter tool call definition. + RunStepStreamEvent: + oneOf: + - type: object properties: - input: + event: type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - index - - type - - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output - type: object - description: Text output from the Code Interpreter tool call as part of a run step. - properties: - type: - description: Always `logs`. - type: string - enum: [ "logs" ] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - type - - logs - - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output - type: object - description: Text output from the Code Interpreter tool call as part of a run step. - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - description: Always `logs`. - type: string - enum: [ "logs" ] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - index - - type - - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - type: object - properties: - type: - description: Always `image`. - type: string - enum: [ "image" ] - image: - type: object + enum: [ "thread.run.step.created" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.in_progress" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.delta" ] + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. + event: type: string + enum: [ "thread.run.step.completed" ] + data: + $ref: "#/components/schemas/RunStepObject" required: - - file_id - required: - - type - - image - - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - description: Always `image`. - type: string - enum: [ "image" ] - image: - type: object + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. + event: type: string - required: - - index - - type - - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call - type: object - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map + enum: [ "thread.run.step.failed" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object properties: - ranking_options: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" - results: - type: array - description: The results of the file search. - items: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" - required: - - id - - type - - file_search - - RunStepDetailsToolCallsFileSearchRankingOptionsObject: - title: File search tool call ranking options - type: object - description: The ranking options for the file search. - properties: - ranker: - type: string - description: The ranker used for the file search. - enum: [ "default_2024_08_21" ] - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - required: - - ranker - - score_threshold - - RunStepDetailsToolCallsFileSearchResultObject: - title: File search tool call result - type: object - description: A result instance of the file search. - x-oaiTypeLabel: map - properties: - file_id: - type: string - description: The ID of the file that result was found in. - file_name: - type: string - description: The name of the file that result was found in. - score: - type: number - description: The score of the result. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - content: - type: array - description: The content of the result that was found. The content is only included if requested via the include query parameter. - items: - type: object - properties: - type: - type: string - description: The type of the content. - enum: [ "text" ] - text: - type: string - description: The text content of the file. - required: - - file_id - - file_name - - score - - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - required: - - index - - type - - file_search + event: + type: string + enum: [ "thread.run.step.cancelled" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.expired" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - RunStepDetailsToolCallsFunctionObject: - type: object - title: Function tool call - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] - function: - type: object - description: The definition of the function that was called. + MessageStreamEvent: + oneOf: + - type: object properties: - name: + event: type: string - description: The name of the function. - arguments: + enum: [ "thread.message.created" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: type: string - description: The arguments passed to the function. - output: + enum: [ "thread.message.in_progress" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + enum: [ "thread.message.delta" ] + data: + $ref: "#/components/schemas/MessageDeltaObject" required: - - name - - arguments - - output - required: - - id - - type - - function + - event + - data + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.completed" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.incomplete" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - RunStepDeltaStepDetailsToolCallsFunctionObject: + ErrorEvent: type: object - title: Function tool call properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: + event: type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] - function: - type: object - description: The definition of the function that was called. - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + enum: [ "error" ] + data: + $ref: "#/components/schemas/Error" required: - - index - - type + - event + - data + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" - VectorStoreExpirationAfter: + DoneEvent: type: object - title: Vector store expiration policy - description: The expiration policy for a vector store. properties: - anchor: - description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." + event: type: string - enum: [ "last_active_at" ] - days: - description: The number of days after the anchor time that the vector store will expire. - type: integer - minimum: 1 - maximum: 365 + enum: [ "done" ] + data: + type: string + enum: [ "[DONE]" ] required: - - anchor - - days + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" - VectorStoreObject: + Batch: type: object - title: Vector store - description: A vector store is a collection of processed files can be used by the `file_search` tool. properties: id: - description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `vector_store`. type: string - enum: [ "vector_store" ] + enum: [ batch ] + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. + nullable: true + line: + type: integer + description: The line number of the input file where the error occurred, if applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. created_at: - description: The Unix timestamp (in seconds) for when the vector store was created. type: integer - name: - description: The name of the vector store. - type: string - usage_bytes: - description: The total number of bytes used by the files in the vector store. + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: type: integer - file_counts: + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: type: object properties: - in_progress: - description: The number of files that are currently being processed. + total: type: integer + description: Total number of requests in the batch. completed: - description: The number of files that have been successfully processed. type: integer + description: Number of requests that have been completed successfully. failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that were cancelled. - type: integer - total: - description: The total number of files. type: integer + description: Number of requests that have failed. required: - - in_progress - - completed - - failed - - cancelled - total - status: - description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - type: string - enum: [ "expired", "in_progress", "completed" ] - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - expires_at: - description: The Unix timestamp (in seconds) for when the vector store will expire. - type: integer - nullable: true - last_active_at: - description: The Unix timestamp (in seconds) for when the vector store was last active. - type: integer - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata - x-oaiMeta: - name: The vector store object - beta: true - example: | - { - "id": "vs_123", - "object": "vector_store", - "created_at": 1698107661, - "usage_bytes": 123456, - "last_active_at": 1698107661, - "name": "my_vector_store", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "cancelled": 0, - "failed": 0, - "total": 100 - }, - "metadata": {}, - "last_used_at": 1698107661 - } - - CreateVectorStoreRequest: - type: object - additionalProperties: false - properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - type: array - maxItems: 500 - items: - type: string - name: - description: The name of the vector store. - type: string - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - chunking_strategy: - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + - completed + - failed + description: The request counts for different statuses within the batch. metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + x-oaiMeta: + name: The batch object + example: *batch_object - UpdateVectorStoreRequest: + BatchRequestInput: type: object - additionalProperties: false + description: The per-line object of the batch input file properties: - name: - description: The name of the vector store. + custom_id: type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: [ "POST" ] + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + x-oaiMeta: + name: The request input object + example: | + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + + BatchRequestOutput: + type: object + description: The per-line object of the batch output and error files + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object nullable: true - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - nullable: true - metadata: - description: *metadata_description + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: type: object - x-oaiTypeLabel: map nullable: true + description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: | + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} - ListVectorStoresResponse: + ListBatchesResponse: + type: object properties: - object: - type: string - example: "list" data: type: array items: - $ref: "#/components/schemas/VectorStoreObject" + $ref: "#/components/schemas/Batch" first_id: type: string - example: "vs_abc123" + example: "batch_abc123" last_id: type: string - example: "vs_abc456" + example: "batch_abc456" has_more: type: boolean - example: false + object: + type: string + enum: [ list ] required: - object - data - - first_id - - last_id - has_more - DeleteVectorStoreResponse: + AuditLogActorServiceAccount: type: object + description: The service account that performed the audit logged action. properties: id: type: string - deleted: - type: boolean - object: + description: The service account id. + + AuditLogActorUser: + type: object + description: The user who performed the audit logged action. + properties: + id: type: string - enum: [ vector_store.deleted ] - required: - - id - - object - - deleted + description: The user id. + email: + type: string + description: The user email. - VectorStoreFileObject: + AuditLogActorApiKey: type: object - title: Vector store files - description: A list of files attached to a vector store. + description: The API Key used to perform the audit logged action. properties: id: - description: The identifier, which can be referenced in API endpoints. type: string - object: - description: The object type, which is always `vector_store.file`. + description: The tracking id of the API key. + type: type: string - enum: [ "vector_store.file" ] - usage_bytes: - description: The total vector store usage in bytes. Note that this may be different from the original file size. - type: integer - created_at: - description: The Unix timestamp (in seconds) for when the vector store file was created. - type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + description: The type of API key. Can be either `user` or `service_account`. + enum: [ "user", "service_account" ] + user: + $ref: "#/components/schemas/AuditLogActorUser" + service_account: + $ref: "#/components/schemas/AuditLogActorServiceAccount" + + AuditLogActorSession: + type: object + description: The session in which the audit logged action was performed. + properties: + user: + $ref: "#/components/schemas/AuditLogActorUser" + ip_address: type: string - status: - description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + description: The IP address from which the action was performed. + + AuditLogActor: + type: object + description: The actor who performed the audit logged action. + properties: + type: type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] - last_error: + description: The type of actor. Is either `session` or `api_key`. + enum: [ "session", "api_key" ] + session: type: object - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true + $ref: "#/components/schemas/AuditLogActorSession" + api_key: + type: object + $ref: "#/components/schemas/AuditLogActorApiKey" + + + AuditLogEventType: + type: string + description: The event type. + x-oaiExpandable: true + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - user.added + - user.updated + - user.deleted + + AuditLog: + type: object + description: A log of a user action or configuration change within this organization. + properties: + id: + type: string + description: The ID of this log. + type: + $ref: "#/components/schemas/AuditLogEventType" + + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + description: The project that the action was scoped to. Absent for actions not scoped to projects. properties: - code: + id: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: - [ - "server_error", - "unsupported_file", - "invalid_file", - ] - message: + description: The project ID. + name: + type: string + description: The project title. + actor: + $ref: "#/components/schemas/AuditLogActor" + api_key.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + data: + type: object + description: The payload used to create the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + description: The payload used to update the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + invite.sent: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + description: The payload used to create the invite. + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + invite.accepted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + invite.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + login.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + logout.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + organization.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + description: The payload used to update the organization settings. + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + project.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + data: + type: object + description: The payload used to create the project. + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + project.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the project. + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + project.archived: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + service_account.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + data: + type: object + description: The payload used to create the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + description: The payload used to updated the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + user.added: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + data: + type: object + description: The payload used to add the user to the project. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.updated: + type: object + description: The details for events with this `type`. + properties: + id: type: string - description: A human-readable description of the error. - required: - - code - - message - chunking_strategy: + description: The project ID. + changes_requested: + type: object + description: The payload used to update the user. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.deleted: type: object - description: The strategy used to chunk the file. - oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" - x-oaiExpandable: true + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. required: - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error + - type + - effective_at + - actor x-oaiMeta: - name: The vector store file object - beta: true + name: The audit log object example: | { - "id": "file-abc123", - "object": "vector_store.file", - "usage_bytes": 1234, - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "last_error": null, - "chunking_strategy": { - "type": "static", - "static": { - "max_chunk_size_tokens": 800, - "chunk_overlap_tokens": 400 + "id": "req_xxx_20240101", + "type": "api_key.created", + "effective_at": 1720804090, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.created": { + "id": "key_xxxx", + "data": { + "scopes": ["resource.operation"] + } } - } } - OtherChunkingStrategyResponseParam: + ListAuditLogsResponse: type: object - title: Other Chunking Strategy - description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. - additionalProperties: false properties: - type: + object: type: string - description: Always `other`. - enum: [ "other" ] - required: - - type - - StaticChunkingStrategyResponseParam: - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: + enum: [ list ] + data: + type: array + items: + $ref: "#/components/schemas/AuditLog" + first_id: type: string - description: Always `static`. - enum: [ "static" ] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" + example: "audit_log-defb456h8dks" + last_id: + type: string + example: "audit_log-hnbkd8s93s" + has_more: + type: boolean + required: - - type - - static + - object + - data + - first_id + - last_id + - has_more - StaticChunkingStrategy: + Invite: type: object - additionalProperties: false + description: Represents an individual `invite` to the organization. properties: - max_chunk_size_tokens: + object: + type: string + enum: [ organization.invite ] + description: The object type, which is always `organization.invite` + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: [ owner, reader ] + description: "`owner` or `reader`" + status: + type: string + enum: [ accepted, expired, pending ] + description: "`accepted`,`expired`, or `pending`" + invited_at: type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + required: - - max_chunk_size_tokens - - chunk_overlap_tokens + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } - AutoChunkingStrategyRequestParam: + InviteListResponse: type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false properties: - type: + object: type: string - description: Always `auto`. - enum: [ "auto" ] + enum: [ list ] + description: The object type, which is always `list` + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. required: - - type + - object + - data - StaticChunkingStrategyRequestParam: + InviteRequest: type: object - title: Static Chunking Strategy - additionalProperties: false properties: - type: + email: type: string - description: Always `static`. - enum: [ "static" ] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" + description: "Send an email to this address" + role: + type: string + enum: [ reader, owner ] + description: "`owner` or `reader`" required: - - type - - static + - email + - role - ChunkingStrategyRequestParam: + InviteDeleteResponse: type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + properties: + object: + type: string + enum: [ organization.invite.deleted ] + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted - CreateVectorStoreFileRequest: + User: type: object - additionalProperties: false + description: Represents an individual `user` within an organization. properties: - file_id: - description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + object: type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + enum: [ organization.user ] + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [ owner, reader ] + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. required: - - file_id + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The user object + example: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - ListVectorStoreFilesResponse: + UserListResponse: + type: object properties: object: type: string - example: "list" + enum: [ list ] data: type: array items: - $ref: "#/components/schemas/VectorStoreFileObject" + $ref: '#/components/schemas/User' first_id: type: string - example: "file-abc123" last_id: type: string - example: "file-abc456" has_more: type: boolean - example: false required: - object - data @@ -13372,687 +16066,483 @@ components: - last_id - has_more - DeleteVectorStoreFileResponse: + UserRoleUpdateRequest: + type: object + properties: + role: + type: string + enum: [ owner,reader ] + description: "`owner` or `reader`" + required: + - role + + UserDeleteResponse: type: object properties: + object: + type: string + enum: [ organization.user.deleted ] id: type: string deleted: type: boolean - object: - type: string - enum: [ vector_store.file.deleted ] required: - - id - object + - id - deleted - VectorStoreFileBatchObject: + Project: type: object - title: Vector store file batch - description: A batch of files attached to a vector store. + description: Represents an individual project. properties: id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `vector_store.file_batch`. - type: string - enum: [ "vector_store.files_batch" ] - created_at: - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] - file_counts: - type: object - properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that where cancelled. - type: integer - total: - description: The total number of files. - type: integer - required: - - in_progress - - completed - - cancelled - - failed - - total + type: string + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: [ organization.project ] + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or `null`. + status: + type: string + enum: [ active, archived ] + description: "`active` or `archived`" + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). required: - id - object + - name - created_at - - vector_store_id - status - - file_counts x-oaiMeta: - name: The vector store files batch object - beta: true + name: The project object example: | { - "id": "vsfb_123", - "object": "vector_store.files_batch", - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "failed": 0, - "cancelled": 0, - "total": 100 - } + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" } - CreateVectorStoreFileBatchRequest: + ProjectListResponse: type: object - additionalProperties: false properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + object: + type: string + enum: [ list ] + data: type: array - minItems: 1 - maxItems: 500 items: - type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + $ref: '#/components/schemas/Project' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - file_ids + - object + - data + - first_id + - last_id + - has_more - AssistantStreamEvent: - description: | - Represents an event emitted when streaming a Run. - - Each event in a server-sent events stream has an `event` and `data` property: - - ``` - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - ``` - - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit `thread.run.created` when a new run - is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses - to create a message during a run, we emit a `thread.message.created event`, a - `thread.message.in_progress` event, many `thread.message.delta` events, and finally a - `thread.message.completed` event. - - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to - integrate the Assistants API with streaming. - oneOf: - - $ref: "#/components/schemas/ThreadStreamEvent" - - $ref: "#/components/schemas/RunStreamEvent" - - $ref: "#/components/schemas/RunStepStreamEvent" - - $ref: "#/components/schemas/MessageStreamEvent" - - $ref: "#/components/schemas/ErrorEvent" - - $ref: "#/components/schemas/DoneEvent" - x-oaiMeta: - name: Assistant stream events - beta: true + ProjectCreateRequest: + type: object + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - name - ThreadStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.created" ] - data: - $ref: "#/components/schemas/ThreadObject" - required: - - event - - data - description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + ProjectUpdateRequest: + type: object + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - name - RunStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.run.created" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a new [run](/docs/api-reference/runs/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.queued" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.in_progress" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.requires_action" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.completed" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.incomplete" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.failed" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.cancelling" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.cancelled" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.expired" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + DefaultProjectErrorResponse: + type: object + properties: + code: + type: integer + message: + type: string + required: + - code + - message - RunStepStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.created" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.in_progress" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.delta" ] - data: - $ref: "#/components/schemas/RunStepDeltaObject" - required: - - event - - data - description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.completed" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.failed" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.cancelled" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.expired" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + ProjectUser: + type: object + description: Represents an individual user in a project. + properties: + object: + type: string + enum: [ organization.project.user ] + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. - MessageStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.message.created" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.in_progress" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.delta" ] - data: - $ref: "#/components/schemas/MessageDeltaObject" - required: - - event - - data - description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.completed" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.incomplete" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The project user object + example: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - ErrorEvent: + ProjectUserListResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/ProjectUser' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + + ProjectUserCreateRequest: + type: object + properties: + user_id: + type: string + description: The ID of the user. + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + required: + - user_id + - role + + ProjectUserUpdateRequest: + type: object + properties: + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + required: + - role + + ProjectUserDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.project.user.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + + ProjectServiceAccount: + type: object + description: Represents an individual service account in a project. + properties: + object: + type: string + enum: [ organization.project.service_account ] + description: The object type, which is always `organization.project.service_account` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the service account + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the service account was created + required: + - object + - id + - name + - role + - created_at + x-oaiMeta: + name: The project service account object + example: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + ProjectServiceAccountListResponse: type: object properties: - event: + object: type: string - enum: [ "error" ] + enum: [ list ] data: - $ref: "#/components/schemas/Error" + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - event + - object - data - description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. - x-oaiMeta: - dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + - first_id + - last_id + - has_more - DoneEvent: + ProjectServiceAccountCreateRequest: type: object properties: - event: - type: string - enum: [ "done" ] - data: + name: type: string - enum: [ "[DONE]" ] + description: The name of the service account being created. required: - - event - - data - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: "`data` is `[DONE]`" + - name - Batch: + ProjectServiceAccountCreateResponse: type: object properties: - id: - type: string object: type: string - enum: [ batch ] - description: The object type, which is always `batch`. - endpoint: + enum: [ organization.project.service_account ] + id: type: string - description: The OpenAI API endpoint used by the batch. - - errors: - type: object - properties: - object: - type: string - description: The object type, which is always `list`. - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: The name of the parameter that caused the error, if applicable. - nullable: true - line: - type: integer - description: The line number of the input file where the error occurred, if applicable. - nullable: true - input_file_id: + name: type: string - description: The ID of the input file for the batch. - completion_window: + role: type: string - description: The time frame within which the batch should be processed. - status: + enum: [ member ] + description: Service accounts can only have one role of type `member` + created_at: + type: integer + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' + required: + - object + - id + - name + - role + - created_at + - api_key + + ProjectServiceAccountApiKey: + type: object + properties: + object: type: string - description: The current status of the batch. - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - output_file_id: + enum: [ organization.project.service_account.api_key ] + description: The object type, which is always `organization.project.service_account.api_key` + + value: type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: + name: type: string - description: The ID of the file containing the outputs of requests with errors. created_at: type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - required: - - total - - completed - - failed - description: The request counts for different statuses within the batch. - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + id: + type: string required: - - id - object - - endpoint - - input_file_id - - completion_window - - status + - value + - name - created_at - x-oaiMeta: - name: The batch object - example: *batch_object + - id - BatchRequestInput: + ProjectServiceAccountDeleteResponse: type: object - description: The per-line object of the batch input file properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + object: type: string - enum: [ "POST" ] - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: + enum: [ organization.project.service_account.deleted ] + id: type: string - description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - x-oaiMeta: - name: The request input object - example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + deleted: + type: boolean + required: + - object + - id + - deleted - BatchRequestOutput: + ProjectApiKey: type: object - description: The per-line object of the batch output and error files + description: Represents an individual API key in a project. properties: - id: + object: type: string - custom_id: + enum: [ organization.project.api_key ] + description: The object type, which is always `organization.project.api_key` + redacted_value: type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - nullable: true - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - x-oaiTypeLabel: map - description: The JSON body of the response - error: + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: type: object - nullable: true - description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: + type: type: string - description: A human-readable error message. + enum: [ user, service_account ] + description: "`user` or `service_account`" + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + required: + - object + - redacted_value + - name + - created_at + - id + - owner x-oaiMeta: - name: The request output object + name: The project API key object example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "created_at": 1711471533 + } + } + } - ListBatchesResponse: + ProjectApiKeyListResponse: type: object properties: + object: + type: string + enum: [ list ] data: type: array items: - $ref: "#/components/schemas/Batch" + $ref: '#/components/schemas/ProjectApiKey' first_id: type: string - example: "batch_abc123" last_id: type: string - example: "batch_abc456" has_more: type: boolean - object: - type: string - enum: [ list ] required: - object - data + - first_id + - last_id - has_more + ProjectApiKeyDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.project.api_key.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + security: - ApiKeyAuth: [ ] diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index f98e6d9a..cff9352b 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: openai_dart -description: Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. +description: Dart client for the OpenAI API. Supports chat (GPT-4o, o1, etc.), completions, embeddings, images (DALL·E 3), assistants (threads,, vector stores, etc.), batch, fine-tuning, etc. version: 0.4.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart From 0d0997b33e3072248cebec4e4c7a94b4b5b5968b Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 23:37:44 +0200 Subject: [PATCH 240/251] feat: Add support for maxCompletionTokens and reasoningTokens in openai_dart (#556) --- .../generated/schema/assistant_object.dart | 8 +- .../src/generated/schema/assistant_tools.dart | 3 +- .../schema/completion_tokens_details.dart | 41 ++ .../generated/schema/completion_usage.dart | 8 +- .../schema/create_assistant_request.dart | 10 +- .../create_chat_completion_request.dart | 101 ++- .../create_fine_tuning_job_request.dart | 4 +- .../generated/schema/create_run_request.dart | 10 +- .../schema/create_thread_and_run_request.dart | 10 +- .../schema/file_search_ranking_options.dart | 10 +- .../schema/modify_assistant_request.dart | 8 +- .../src/generated/schema/response_format.dart | 19 +- .../lib/src/generated/schema/run_object.dart | 8 +- .../lib/src/generated/schema/schema.dart | 1 + .../src/generated/schema/schema.freezed.dart | 645 ++++++++++++++---- .../lib/src/generated/schema/schema.g.dart | 53 +- packages/openai_dart/oas/openapi_curated.yaml | 118 +++- .../openai_dart/oas/openapi_official.yaml | 65 +- .../test/openai_client_chat_test.dart | 8 +- 19 files changed, 853 insertions(+), 277 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index f0e1f3b5..4c7ba8df 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -65,11 +65,11 @@ class AssistantObject with _$AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -191,11 +191,11 @@ enum AssistantResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 920d2301..30a5cacc 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -88,7 +88,8 @@ class AssistantToolsFileSearchFileSearch /// for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. diff --git a/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart new file mode 100644 index 00000000..14fe08a8 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart @@ -0,0 +1,41 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CompletionTokensDetails +// ========================================== + +/// Breakdown of tokens used in a completion. +@freezed +class CompletionTokensDetails with _$CompletionTokensDetails { + const CompletionTokensDetails._(); + + /// Factory constructor for CompletionTokensDetails + const factory CompletionTokensDetails({ + /// Tokens generated by the model for reasoning. + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens, + }) = _CompletionTokensDetails; + + /// Object construction from a JSON representation + factory CompletionTokensDetails.fromJson(Map json) => + _$CompletionTokensDetailsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['reasoning_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'reasoning_tokens': reasoningTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart index 17826175..86877b8e 100644 --- a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart +++ b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart @@ -23,6 +23,10 @@ class CompletionUsage with _$CompletionUsage { /// Total number of tokens used in the request (prompt + completion). @JsonKey(name: 'total_tokens') required int totalTokens, + + /// Breakdown of tokens used in a completion. + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails, }) = _CompletionUsage; /// Object construction from a JSON representation @@ -33,7 +37,8 @@ class CompletionUsage with _$CompletionUsage { static const List propertyNames = [ 'completion_tokens', 'prompt_tokens', - 'total_tokens' + 'total_tokens', + 'completion_tokens_details' ]; /// Perform validations on the schema property values @@ -47,6 +52,7 @@ class CompletionUsage with _$CompletionUsage { 'completion_tokens': completionTokens, 'prompt_tokens': promptTokens, 'total_tokens': totalTokens, + 'completion_tokens_details': completionTokensDetails, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index cb7f5b82..312d8f5c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -56,11 +56,11 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -185,8 +185,6 @@ enum AssistantModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -289,11 +287,11 @@ enum CreateAssistantResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 47280735..3d59ae2b 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -15,10 +15,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Factory constructor for CreateChatCompletionRequest const factory CreateChatCompletionRequest({ - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @_ChatCompletionModelConverter() required ChatCompletionModel model, - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). required List messages, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -30,22 +32,37 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? logitBias, - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? logprobs, - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) @Default(1) int? n, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -55,27 +72,43 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? presencePenalty, - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) ResponseFormat? responseFormat, /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @JsonKey(includeIfNull: false) int? seed, /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -91,7 +124,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) ChatCompletionStop? stop, - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) @Default(false) bool? stream, /// Options for streaming response. Only set this when you set `stream: true`. @@ -108,14 +141,17 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// We generally recommend altering this or `temperature` but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @JsonKey(includeIfNull: false) List? tools, /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @@ -135,7 +171,8 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -161,6 +198,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs', 'top_logprobs', 'max_tokens', + 'max_completion_tokens', 'n', 'presence_penalty', 'response_format', @@ -251,6 +289,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs': logprobs, 'top_logprobs': topLogprobs, 'max_tokens': maxTokens, + 'max_completion_tokens': maxCompletionTokens, 'n': n, 'presence_penalty': presencePenalty, 'response_format': responseFormat, @@ -309,8 +348,6 @@ enum ChatCompletionModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -343,7 +380,8 @@ enum ChatCompletionModels { // CLASS: ChatCompletionModel // ========================================== -/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. +/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) +/// table for details on which models work with the Chat API. @freezed sealed class ChatCompletionModel with _$ChatCompletionModel { const ChatCompletionModel._(); @@ -401,9 +439,12 @@ class _ChatCompletionModelConverter /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: -/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. -/// - If set to 'default', the request will be processed using the default service tier with a lower -/// uptime SLA and no latency guarantee. +/// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits +/// until they are exhausted. +/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the +/// default service tier with a lower uptime SLA and no latency guarantee. +/// - If set to 'default', the request will be processed using the default service tier with a lower uptime +/// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -491,7 +532,8 @@ enum ChatCompletionToolChoiceMode { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. -/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. +/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the +/// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @freezed @@ -579,7 +621,8 @@ enum ChatCompletionFunctionCallMode { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. -/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. +/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that +/// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @freezed diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 3da0a42e..863ffb57 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -37,7 +37,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? hyperparameters, - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? suffix, @@ -80,7 +80,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// Validation constants static const suffixMinLengthValue = 1; - static const suffixMaxLengthValue = 40; + static const suffixMaxLengthValue = 64; static const seedMinValue = 0; static const seedMaxValue = 2147483647; diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 6fe86422..3698ed7c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -84,11 +84,11 @@ class CreateRunRequest with _$CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -223,8 +223,6 @@ enum RunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -416,11 +414,11 @@ enum CreateRunRequestResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 1d9c82ee..d58474f8 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -83,11 +83,11 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -222,8 +222,6 @@ enum ThreadAndRunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -422,11 +420,11 @@ enum CreateThreadAndRunRequestResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart index e60070f0..03533c56 100644 --- a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart @@ -8,7 +8,8 @@ part of open_a_i_schema; // CLASS: FileSearchRankingOptions // ========================================== -/// The ranking options for the file search. +/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and +/// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. @@ -26,8 +27,7 @@ class FileSearchRankingOptions with _$FileSearchRankingOptions { FileSearchRanker? ranker, /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? scoreThreshold, + @JsonKey(name: 'score_threshold') required double scoreThreshold, }) = _FileSearchRankingOptions; /// Object construction from a JSON representation @@ -43,10 +43,10 @@ class FileSearchRankingOptions with _$FileSearchRankingOptions { /// Perform validations on the schema property values String? validateSchema() { - if (scoreThreshold != null && scoreThreshold! < scoreThresholdMinValue) { + if (scoreThreshold < scoreThresholdMinValue) { return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; } - if (scoreThreshold != null && scoreThreshold! > scoreThresholdMaxValue) { + if (scoreThreshold > scoreThresholdMaxValue) { return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; } return null; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index 2b4d94d1..5bd7ad65 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -59,11 +59,11 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -171,11 +171,11 @@ enum ModifyAssistantResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/response_format.dart b/packages/openai_dart/lib/src/generated/schema/response_format.dart index 35b1f30d..7b975680 100644 --- a/packages/openai_dart/lib/src/generated/schema/response_format.dart +++ b/packages/openai_dart/lib/src/generated/schema/response_format.dart @@ -8,13 +8,24 @@ part of open_a_i_schema; // CLASS: ResponseFormat // ========================================== -/// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +/// An object specifying the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer +/// than `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model +/// will match your supplied JSON schema. +/// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is +/// valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system +/// or user message. Without this, the model may generate an unending stream of whitespace until the generation +/// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message +/// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded +/// `max_tokens` or the conversation exceeded the max context length. @Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) sealed class ResponseFormat with _$ResponseFormat { const ResponseFormat._(); diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 351d140b..98fd5f0c 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -110,11 +110,11 @@ class RunObject with _$RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -465,11 +465,11 @@ enum RunObjectResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index a48b094d..265649d4 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -42,6 +42,7 @@ part 'chat_completion_stream_response_delta.dart'; part 'chat_completion_stream_message_function_call.dart'; part 'chat_completion_stream_message_tool_call_chunk.dart'; part 'completion_usage.dart'; +part 'completion_tokens_details.dart'; part 'create_embedding_request.dart'; part 'create_embedding_response.dart'; part 'embedding.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 5753970f..af25caaf 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3530,11 +3530,13 @@ CreateChatCompletionRequest _$CreateChatCompletionRequestFromJson( /// @nodoc mixin _$CreateChatCompletionRequest { - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @_ChatCompletionModelConverter() ChatCompletionModel get model => throw _privateConstructorUsedError; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). List get messages => throw _privateConstructorUsedError; @@ -3546,25 +3548,40 @@ mixin _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias => throw _privateConstructorUsedError; - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? get logprobs => throw _privateConstructorUsedError; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs => throw _privateConstructorUsedError; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens => throw _privateConstructorUsedError; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? get maxCompletionTokens => throw _privateConstructorUsedError; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) int? get n => throw _privateConstructorUsedError; @@ -3574,28 +3591,44 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) ResponseFormat? get responseFormat => throw _privateConstructorUsedError; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -3611,7 +3644,7 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) ChatCompletionStop? get stop => throw _privateConstructorUsedError; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; @@ -3632,7 +3665,9 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; @@ -3640,7 +3675,8 @@ mixin _$CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @@ -3662,7 +3698,8 @@ mixin _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -3704,6 +3741,8 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @@ -3766,6 +3805,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, + Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, @@ -3812,6 +3852,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, + maxCompletionTokens: freezed == maxCompletionTokens + ? _value.maxCompletionTokens + : maxCompletionTokens // ignore: cast_nullable_to_non_nullable + as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -3982,6 +4026,8 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @@ -4049,6 +4095,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, + Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, @@ -4095,6 +4142,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, + maxCompletionTokens: freezed == maxCompletionTokens + ? _value.maxCompletionTokens + : maxCompletionTokens // ignore: cast_nullable_to_non_nullable + as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -4176,6 +4227,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) this.logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) this.topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) this.maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + this.maxCompletionTokens, @JsonKey(includeIfNull: false) this.n = 1, @JsonKey(name: 'presence_penalty', includeIfNull: false) this.presencePenalty = 0.0, @@ -4213,15 +4266,18 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { Map json) => _$$CreateChatCompletionRequestImplFromJson(json); - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @override @_ChatCompletionModelConverter() final ChatCompletionModel model; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). final List _messages; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override List get messages { if (_messages is EqualUnmodifiableListView) return _messages; @@ -4238,12 +4294,20 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. final Map? _logitBias; /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias { @@ -4254,24 +4318,36 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { return EqualUnmodifiableMapView(value); } - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @override @JsonKey(includeIfNull: false) final bool? logprobs; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @override @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @override + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + final int? maxCompletionTokens; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @override @JsonKey(includeIfNull: false) final int? n; @@ -4283,30 +4359,46 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) final ResponseFormat? responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @override @JsonKey(includeIfNull: false) final int? seed; /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -4323,7 +4415,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) final ChatCompletionStop? stop; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override @JsonKey(includeIfNull: false) final bool? stream; @@ -4347,10 +4439,14 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. final List? _tools; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @override @JsonKey(includeIfNull: false) List? get tools { @@ -4365,7 +4461,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @override @@ -4389,7 +4486,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @override @@ -4417,7 +4515,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, maxCompletionTokens: $maxCompletionTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4437,6 +4535,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { other.topLogprobs == topLogprobs) && (identical(other.maxTokens, maxTokens) || other.maxTokens == maxTokens) && + (identical(other.maxCompletionTokens, maxCompletionTokens) || + other.maxCompletionTokens == maxCompletionTokens) && (identical(other.n, n) || other.n == n) && (identical(other.presencePenalty, presencePenalty) || other.presencePenalty == presencePenalty) && @@ -4475,6 +4575,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { logprobs, topLogprobs, maxTokens, + maxCompletionTokens, n, presencePenalty, responseFormat, @@ -4524,6 +4625,8 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + final int? maxCompletionTokens, @JsonKey(includeIfNull: false) final int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty, @@ -4561,12 +4664,14 @@ abstract class _CreateChatCompletionRequest factory _CreateChatCompletionRequest.fromJson(Map json) = _$CreateChatCompletionRequestImpl.fromJson; - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @override @_ChatCompletionModelConverter() ChatCompletionModel get model; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override List get messages; @@ -4579,29 +4684,45 @@ abstract class _CreateChatCompletionRequest /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @override @JsonKey(includeIfNull: false) bool? get logprobs; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @override + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? get maxCompletionTokens; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @override @JsonKey(includeIfNull: false) int? get n; @@ -4613,30 +4734,46 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) ResponseFormat? get responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @override @JsonKey(includeIfNull: false) int? get seed; /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -4653,7 +4790,7 @@ abstract class _CreateChatCompletionRequest @JsonKey(includeIfNull: false) ChatCompletionStop? get stop; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override @JsonKey(includeIfNull: false) bool? get stream; @@ -4677,7 +4814,9 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @override @JsonKey(includeIfNull: false) List? get tools; @@ -4686,7 +4825,8 @@ abstract class _CreateChatCompletionRequest /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @override @@ -4710,7 +4850,8 @@ abstract class _CreateChatCompletionRequest /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @override @@ -11675,6 +11816,11 @@ mixin _$CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Breakdown of tokens used in a completion. + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? get completionTokensDetails => + throw _privateConstructorUsedError; + /// Serializes this CompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -11694,7 +11840,11 @@ abstract class $CompletionUsageCopyWith<$Res> { $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + @JsonKey(name: 'total_tokens') int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails}); + + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; } /// @nodoc @@ -11715,6 +11865,7 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, + Object? completionTokensDetails = freezed, }) { return _then(_value.copyWith( completionTokens: freezed == completionTokens @@ -11729,8 +11880,27 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, + completionTokensDetails: freezed == completionTokensDetails + ? _value.completionTokensDetails + : completionTokensDetails // ignore: cast_nullable_to_non_nullable + as CompletionTokensDetails?, ) as $Val); } + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails { + if (_value.completionTokensDetails == null) { + return null; + } + + return $CompletionTokensDetailsCopyWith<$Res>( + _value.completionTokensDetails!, (value) { + return _then(_value.copyWith(completionTokensDetails: value) as $Val); + }); + } } /// @nodoc @@ -11744,7 +11914,12 @@ abstract class _$$CompletionUsageImplCopyWith<$Res> $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + @JsonKey(name: 'total_tokens') int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails}); + + @override + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; } /// @nodoc @@ -11763,6 +11938,7 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, + Object? completionTokensDetails = freezed, }) { return _then(_$CompletionUsageImpl( completionTokens: freezed == completionTokens @@ -11777,6 +11953,10 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, + completionTokensDetails: freezed == completionTokensDetails + ? _value.completionTokensDetails + : completionTokensDetails // ignore: cast_nullable_to_non_nullable + as CompletionTokensDetails?, )); } } @@ -11787,7 +11967,9 @@ class _$CompletionUsageImpl extends _CompletionUsage { const _$CompletionUsageImpl( {@JsonKey(name: 'completion_tokens') required this.completionTokens, @JsonKey(name: 'prompt_tokens') required this.promptTokens, - @JsonKey(name: 'total_tokens') required this.totalTokens}) + @JsonKey(name: 'total_tokens') required this.totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + this.completionTokensDetails}) : super._(); factory _$CompletionUsageImpl.fromJson(Map json) => @@ -11808,9 +11990,14 @@ class _$CompletionUsageImpl extends _CompletionUsage { @JsonKey(name: 'total_tokens') final int totalTokens; + /// Breakdown of tokens used in a completion. + @override + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + final CompletionTokensDetails? completionTokensDetails; + @override String toString() { - return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; + return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens, completionTokensDetails: $completionTokensDetails)'; } @override @@ -11823,13 +12010,16 @@ class _$CompletionUsageImpl extends _CompletionUsage { (identical(other.promptTokens, promptTokens) || other.promptTokens == promptTokens) && (identical(other.totalTokens, totalTokens) || - other.totalTokens == totalTokens)); + other.totalTokens == totalTokens) && + (identical( + other.completionTokensDetails, completionTokensDetails) || + other.completionTokensDetails == completionTokensDetails)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); + int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, + totalTokens, completionTokensDetails); /// Create a copy of CompletionUsage /// with the given fields replaced by the non-null parameter values. @@ -11852,8 +12042,10 @@ abstract class _CompletionUsage extends CompletionUsage { const factory _CompletionUsage( {@JsonKey(name: 'completion_tokens') required final int? completionTokens, @JsonKey(name: 'prompt_tokens') required final int promptTokens, - @JsonKey(name: 'total_tokens') - required final int totalTokens}) = _$CompletionUsageImpl; + @JsonKey(name: 'total_tokens') required final int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + final CompletionTokensDetails? + completionTokensDetails}) = _$CompletionUsageImpl; const _CompletionUsage._() : super._(); factory _CompletionUsage.fromJson(Map json) = @@ -11874,6 +12066,11 @@ abstract class _CompletionUsage extends CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens; + /// Breakdown of tokens used in a completion. + @override + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? get completionTokensDetails; + /// Create a copy of CompletionUsage /// with the given fields replaced by the non-null parameter values. @override @@ -11882,6 +12079,178 @@ abstract class _CompletionUsage extends CompletionUsage { throw _privateConstructorUsedError; } +CompletionTokensDetails _$CompletionTokensDetailsFromJson( + Map json) { + return _CompletionTokensDetails.fromJson(json); +} + +/// @nodoc +mixin _$CompletionTokensDetails { + /// Tokens generated by the model for reasoning. + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? get reasoningTokens => throw _privateConstructorUsedError; + + /// Serializes this CompletionTokensDetails to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CompletionTokensDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionTokensDetailsCopyWith<$Res> { + factory $CompletionTokensDetailsCopyWith(CompletionTokensDetails value, + $Res Function(CompletionTokensDetails) then) = + _$CompletionTokensDetailsCopyWithImpl<$Res, CompletionTokensDetails>; + @useResult + $Res call( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens}); +} + +/// @nodoc +class _$CompletionTokensDetailsCopyWithImpl<$Res, + $Val extends CompletionTokensDetails> + implements $CompletionTokensDetailsCopyWith<$Res> { + _$CompletionTokensDetailsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? reasoningTokens = freezed, + }) { + return _then(_value.copyWith( + reasoningTokens: freezed == reasoningTokens + ? _value.reasoningTokens + : reasoningTokens // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CompletionTokensDetailsImplCopyWith<$Res> + implements $CompletionTokensDetailsCopyWith<$Res> { + factory _$$CompletionTokensDetailsImplCopyWith( + _$CompletionTokensDetailsImpl value, + $Res Function(_$CompletionTokensDetailsImpl) then) = + __$$CompletionTokensDetailsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens}); +} + +/// @nodoc +class __$$CompletionTokensDetailsImplCopyWithImpl<$Res> + extends _$CompletionTokensDetailsCopyWithImpl<$Res, + _$CompletionTokensDetailsImpl> + implements _$$CompletionTokensDetailsImplCopyWith<$Res> { + __$$CompletionTokensDetailsImplCopyWithImpl( + _$CompletionTokensDetailsImpl _value, + $Res Function(_$CompletionTokensDetailsImpl) _then) + : super(_value, _then); + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? reasoningTokens = freezed, + }) { + return _then(_$CompletionTokensDetailsImpl( + reasoningTokens: freezed == reasoningTokens + ? _value.reasoningTokens + : reasoningTokens // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CompletionTokensDetailsImpl extends _CompletionTokensDetails { + const _$CompletionTokensDetailsImpl( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + this.reasoningTokens}) + : super._(); + + factory _$CompletionTokensDetailsImpl.fromJson(Map json) => + _$$CompletionTokensDetailsImplFromJson(json); + + /// Tokens generated by the model for reasoning. + @override + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + final int? reasoningTokens; + + @override + String toString() { + return 'CompletionTokensDetails(reasoningTokens: $reasoningTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CompletionTokensDetailsImpl && + (identical(other.reasoningTokens, reasoningTokens) || + other.reasoningTokens == reasoningTokens)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, reasoningTokens); + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> + get copyWith => __$$CompletionTokensDetailsImplCopyWithImpl< + _$CompletionTokensDetailsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CompletionTokensDetailsImplToJson( + this, + ); + } +} + +abstract class _CompletionTokensDetails extends CompletionTokensDetails { + const factory _CompletionTokensDetails( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + final int? reasoningTokens}) = _$CompletionTokensDetailsImpl; + const _CompletionTokensDetails._() : super._(); + + factory _CompletionTokensDetails.fromJson(Map json) = + _$CompletionTokensDetailsImpl.fromJson; + + /// Tokens generated by the model for reasoning. + @override + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? get reasoningTokens; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> + get copyWith => throw _privateConstructorUsedError; +} + CreateEmbeddingRequest _$CreateEmbeddingRequestFromJson( Map json) { return _CreateEmbeddingRequest.fromJson(json); @@ -14581,7 +14950,7 @@ mixin _$CreateFineTuningJobRequest { FineTuningJobHyperparameters? get hyperparameters => throw _privateConstructorUsedError; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) @@ -14853,7 +15222,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) final FineTuningJobHyperparameters? hyperparameters; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @@ -14991,7 +15360,7 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? get hyperparameters; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @@ -23597,11 +23966,11 @@ mixin _$AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -23993,11 +24362,11 @@ class _$AssistantObjectImpl extends _AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -24165,11 +24534,11 @@ abstract class _AssistantObject extends AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -24710,11 +25079,11 @@ mixin _$CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -25077,11 +25446,11 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -25231,11 +25600,11 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -26211,11 +26580,11 @@ mixin _$ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -26591,11 +26960,11 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -26753,11 +27122,11 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -27775,8 +28144,8 @@ mixin _$FileSearchRankingOptions { FileSearchRanker? get ranker => throw _privateConstructorUsedError; /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? get scoreThreshold => throw _privateConstructorUsedError; + @JsonKey(name: 'score_threshold') + double get scoreThreshold => throw _privateConstructorUsedError; /// Serializes this FileSearchRankingOptions to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -27799,8 +28168,7 @@ abstract class $FileSearchRankingOptionsCopyWith<$Res> { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? scoreThreshold}); + @JsonKey(name: 'score_threshold') double scoreThreshold}); } /// @nodoc @@ -27820,17 +28188,17 @@ class _$FileSearchRankingOptionsCopyWithImpl<$Res, @override $Res call({ Object? ranker = freezed, - Object? scoreThreshold = freezed, + Object? scoreThreshold = null, }) { return _then(_value.copyWith( ranker: freezed == ranker ? _value.ranker : ranker // ignore: cast_nullable_to_non_nullable as FileSearchRanker?, - scoreThreshold: freezed == scoreThreshold + scoreThreshold: null == scoreThreshold ? _value.scoreThreshold : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double?, + as double, ) as $Val); } } @@ -27849,8 +28217,7 @@ abstract class _$$FileSearchRankingOptionsImplCopyWith<$Res> includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? scoreThreshold}); + @JsonKey(name: 'score_threshold') double scoreThreshold}); } /// @nodoc @@ -27869,17 +28236,17 @@ class __$$FileSearchRankingOptionsImplCopyWithImpl<$Res> @override $Res call({ Object? ranker = freezed, - Object? scoreThreshold = freezed, + Object? scoreThreshold = null, }) { return _then(_$FileSearchRankingOptionsImpl( ranker: freezed == ranker ? _value.ranker : ranker // ignore: cast_nullable_to_non_nullable as FileSearchRanker?, - scoreThreshold: freezed == scoreThreshold + scoreThreshold: null == scoreThreshold ? _value.scoreThreshold : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double?, + as double, )); } } @@ -27892,8 +28259,7 @@ class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) this.ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - this.scoreThreshold}) + @JsonKey(name: 'score_threshold') required this.scoreThreshold}) : super._(); factory _$FileSearchRankingOptionsImpl.fromJson(Map json) => @@ -27907,8 +28273,8 @@ class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { /// The score threshold for the file search. All values must be a floating point number between 0 and 1. @override - @JsonKey(name: 'score_threshold', includeIfNull: false) - final double? scoreThreshold; + @JsonKey(name: 'score_threshold') + final double scoreThreshold; @override String toString() { @@ -27952,8 +28318,8 @@ abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) final FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - final double? scoreThreshold}) = _$FileSearchRankingOptionsImpl; + @JsonKey(name: 'score_threshold') + required final double scoreThreshold}) = _$FileSearchRankingOptionsImpl; const _FileSearchRankingOptions._() : super._(); factory _FileSearchRankingOptions.fromJson(Map json) = @@ -27967,8 +28333,8 @@ abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { /// The score threshold for the file search. All values must be a floating point number between 0 and 1. @override - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? get scoreThreshold; + @JsonKey(name: 'score_threshold') + double get scoreThreshold; /// Create a copy of FileSearchRankingOptions /// with the given fields replaced by the non-null parameter values. @@ -28667,11 +29033,11 @@ mixin _$RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -29403,11 +29769,11 @@ class _$RunObjectImpl extends _RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -29705,11 +30071,11 @@ abstract class _RunObject extends RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -31689,11 +32055,11 @@ mixin _$CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -32227,11 +32593,11 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -32451,11 +32817,11 @@ abstract class _CreateRunRequest extends CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -35200,11 +35566,11 @@ mixin _$CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -35759,11 +36125,11 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -35981,11 +36347,11 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -59178,7 +59544,8 @@ mixin _$AssistantToolsFileSearchFileSearch { @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. @@ -59336,7 +59703,8 @@ class _$AssistantToolsFileSearchFileSearchImpl @JsonKey(name: 'max_num_results', includeIfNull: false) final int? maxNumResults; - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. @@ -59406,7 +59774,8 @@ abstract class _AssistantToolsFileSearchFileSearch @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 092425e5..c57effb3 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -316,6 +316,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( logprobs: json['logprobs'] as bool?, topLogprobs: (json['top_logprobs'] as num?)?.toInt(), maxTokens: (json['max_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null @@ -366,6 +367,7 @@ Map _$$CreateChatCompletionRequestImplToJson( writeNotNull('logprobs', instance.logprobs); writeNotNull('top_logprobs', instance.topLogprobs); writeNotNull('max_tokens', instance.maxTokens); + writeNotNull('max_completion_tokens', instance.maxCompletionTokens); writeNotNull('n', instance.n); writeNotNull('presence_penalty', instance.presencePenalty); writeNotNull('response_format', instance.responseFormat?.toJson()); @@ -1102,15 +1104,50 @@ _$CompletionUsageImpl _$$CompletionUsageImplFromJson( completionTokens: (json['completion_tokens'] as num?)?.toInt(), promptTokens: (json['prompt_tokens'] as num).toInt(), totalTokens: (json['total_tokens'] as num).toInt(), + completionTokensDetails: json['completion_tokens_details'] == null + ? null + : CompletionTokensDetails.fromJson( + json['completion_tokens_details'] as Map), ); Map _$$CompletionUsageImplToJson( - _$CompletionUsageImpl instance) => - { - 'completion_tokens': instance.completionTokens, - 'prompt_tokens': instance.promptTokens, - 'total_tokens': instance.totalTokens, - }; + _$CompletionUsageImpl instance) { + final val = { + 'completion_tokens': instance.completionTokens, + 'prompt_tokens': instance.promptTokens, + 'total_tokens': instance.totalTokens, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'completion_tokens_details', instance.completionTokensDetails?.toJson()); + return val; +} + +_$CompletionTokensDetailsImpl _$$CompletionTokensDetailsImplFromJson( + Map json) => + _$CompletionTokensDetailsImpl( + reasoningTokens: (json['reasoning_tokens'] as num?)?.toInt(), + ); + +Map _$$CompletionTokensDetailsImplToJson( + _$CompletionTokensDetailsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('reasoning_tokens', instance.reasoningTokens); + return val; +} _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( Map json) => @@ -2530,7 +2567,7 @@ _$FileSearchRankingOptionsImpl _$$FileSearchRankingOptionsImplFromJson( _$FileSearchRankingOptionsImpl( ranker: $enumDecodeNullable(_$FileSearchRankerEnumMap, json['ranker'], unknownValue: JsonKey.nullForUndefinedEnumValue), - scoreThreshold: (json['score_threshold'] as num?)?.toDouble(), + scoreThreshold: (json['score_threshold'] as num).toDouble(), ); Map _$$FileSearchRankingOptionsImplToJson( @@ -2544,7 +2581,7 @@ Map _$$FileSearchRankingOptionsImplToJson( } writeNotNull('ranker', _$FileSearchRankerEnumMap[instance.ranker]); - writeNotNull('score_threshold', instance.scoreThreshold); + val['score_threshold'] = instance.scoreThreshold; return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 793e696e..b7333f2c 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1815,7 +1815,9 @@ components: properties: model: title: ChatCompletionModel - description: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + description: | + ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. example: "gpt-4o" anyOf: - type: string @@ -1842,7 +1844,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -1858,7 +1859,9 @@ components: "o1-preview-2024-09-12", ] messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + description: | + A list of messages comprising the conversation so far. + [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). type: array minItems: 1 items: @@ -1879,22 +1882,39 @@ components: description: | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + relevant token. logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + description: | + Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + each output token returned in the `content` of `message`. type: boolean nullable: true top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + description: | + An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 maximum: 20 nullable: true max_tokens: description: | - The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + type: integer + nullable: true + max_completion_tokens: + description: | + An upper bound for the number of tokens that can be generated for a completion, including visible output + tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). type: integer nullable: true n: @@ -1904,7 +1924,9 @@ components: default: 1 example: 1 nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: | + How many chat completion choices to generate for each input message. Note that you will be charged based on + the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 @@ -1921,15 +1943,20 @@ components: nullable: true description: | This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + If specified, our system will make a best effort to sample deterministically, such that repeated requests + with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + monitor changes in the backend. service_tier: description: | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed using the default service tier with a lower - uptime SLA and no latency guarantee. + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + default service tier with a lower uptime SLA and no latency guarantee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime + SLA and no latency guarantee. - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -1954,8 +1981,10 @@ components: type: string stream: description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false @@ -1979,9 +2008,10 @@ components: description: *completions_top_p_description tools: type: array - description: > + description: | A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + supported. items: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: @@ -1991,7 +2021,8 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: @@ -2020,7 +2051,8 @@ components: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + function. `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: @@ -2285,13 +2317,24 @@ components: ResponseFormat: type: object description: | - An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + than `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + will match your supplied JSON schema. + Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + valid JSON. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + or user message. Without this, the model may generate an unending stream of whitespace until the generation + reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + `max_tokens` or the conversation exceeded the max context length. oneOf: - $ref: "#/components/schemas/ResponseFormatText" - $ref: "#/components/schemas/ResponseFormatJsonObject" @@ -2671,10 +2714,19 @@ components: total_tokens: type: integer description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + $ref: "#/components/schemas/CompletionTokensDetails" required: - prompt_tokens - completion_tokens - total_tokens + CompletionTokensDetails: + type: object + description: Breakdown of tokens used in a completion. + properties: + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. CreateEmbeddingRequest: type: object description: Request object for the Create embedding endpoint. @@ -2849,12 +2901,12 @@ components: $ref: "#/components/schemas/FineTuningJobHyperparameters" suffix: description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 - maxLength: 40 + maxLength: 64 default: null nullable: true validation_file: @@ -3617,11 +3669,11 @@ components: [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -3681,7 +3733,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -3920,7 +3971,8 @@ components: FileSearchRankingOptions: type: object description: | - The ranking options for the file search. + The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + a score_threshold of 0. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. @@ -3932,6 +3984,8 @@ components: description: The score threshold for the file search. All values must be a floating point number between 0 and 1. minimum: 0 maximum: 1 + required: + - score_threshold FileSearchRanker: type: string description: The ranker to use for the file search. If not specified will use the `auto` ranker. @@ -4243,7 +4297,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4487,7 +4540,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 96e64e32..d9b16b55 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -9698,11 +9698,18 @@ components: nullable: true max_tokens: description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). type: integer nullable: true + deprecated: true + max_completion_tokens: + description: | + An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + type: integer + nullable: true + n: type: integer minimum: 1 @@ -9722,9 +9729,9 @@ components: description: | An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: @@ -9746,7 +9753,8 @@ components: service_tier: description: | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -10635,12 +10643,12 @@ components: default: auto suffix: description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 - maxLength: 40 + maxLength: 64 default: null nullable: true validation_file: @@ -10892,16 +10900,7 @@ components: An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json + $ref: "#/components/schemas/AudioResponseFormat" temperature: description: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. @@ -11034,6 +11033,18 @@ components: group: audio example: *verbose_transcription_response_example + AudioResponseFormat: + description: | + The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + CreateTranslationRequest: type: object additionalProperties: false @@ -11058,10 +11069,7 @@ components: An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. type: string response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - default: json + $ref: "#/components/schemas/AudioResponseFormat" temperature: description: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. @@ -11744,6 +11752,13 @@ components: total_tokens: type: integer description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + description: Breakdown of tokens used in a completion. + properties: + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. required: - prompt_tokens - completion_tokens @@ -11791,9 +11806,9 @@ components: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: @@ -12283,7 +12298,7 @@ components: title: File search tool call ranking options type: object description: | - The ranking options for the file search. + The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. properties: @@ -12296,6 +12311,8 @@ components: description: The score threshold for the file search. All values must be a floating point number between 0 and 1. minimum: 0 maximum: 1 + required: + - score_threshold AssistantToolsFileSearchTypeOnly: type: object diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index ebfe8c44..96c57c2a 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -115,7 +115,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxTokens: 2, + maxCompletionTokens: 2, ); final res = await client.createChatCompletion(request: request); expect(res.choices, isNotEmpty); @@ -123,6 +123,10 @@ void main() { res.choices.first.finishReason, ChatCompletionFinishReason.length, ); + expect( + res.usage?.completionTokensDetails?.reasoningTokens, + ChatCompletionFinishReason.length, + ); }); test('Test call chat completions API with other parameters', () async { @@ -138,7 +142,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxTokens: 2, + maxCompletionTokens: 2, presencePenalty: 0.6, frequencyPenalty: 0.6, logitBias: {'50256': -100}, From ccf34f7542e780ce3f11128a59b466321af9c490 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 23:47:16 +0200 Subject: [PATCH 241/251] refactor: Migrate ChatOpenAI to maxCompletionTokens (#557) --- packages/langchain_openai/lib/src/chat_models/mappers.dart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 3b23ee8c..ad8bec0b 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -37,7 +37,7 @@ CreateChatCompletionRequest createChatCompletionRequest( frequencyPenalty: options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, logitBias: options?.logitBias ?? defaultOptions.logitBias, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + maxCompletionTokens: options?.maxTokens ?? defaultOptions.maxTokens, n: options?.n ?? defaultOptions.n, presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, responseFormat: responseFormatDto, From d762f550a7e3cdb91475328486432584799a5a04 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Wed, 25 Sep 2024 23:47:45 +0200 Subject: [PATCH 242/251] chore(release): publish packages - langchain@0.7.6 - langchain_community@0.3.2 - langchain_core@0.3.6 - langchain_firebase@0.2.1+2 - langchain_google@0.6.3 - langchain_ollama@0.3.2 - langchain_openai@0.7.2 - ollama_dart@0.2.2 - openai_dart@0.4.2 - langchain_supabase@0.1.1+3 - langchain_pinecone@0.1.0+9 - langchain_anthropic@0.1.1+2 - langchain_chroma@0.2.1+3 - langchain_mistralai@0.2.3+1 --- CHANGELOG.md | 81 +++++++++++++++++++ examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.yaml | 16 ++-- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.yaml | 10 +-- examples/wikivoyage_eu/pubspec.yaml | 6 +- packages/langchain/CHANGELOG.md | 4 + packages/langchain/pubspec.yaml | 10 +-- packages/langchain_anthropic/CHANGELOG.md | 4 + packages/langchain_anthropic/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 4 + packages/langchain_chroma/pubspec.yaml | 10 +-- packages/langchain_community/CHANGELOG.md | 5 ++ packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 4 + packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 + .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 5 ++ packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 4 + packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 4 + packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 5 ++ packages/langchain_openai/pubspec.yaml | 10 +-- packages/langchain_pinecone/CHANGELOG.md | 4 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 4 + packages/langchain_supabase/pubspec.yaml | 10 +-- packages/ollama_dart/CHANGELOG.md | 4 + packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 6 ++ packages/openai_dart/pubspec.yaml | 2 +- 36 files changed, 207 insertions(+), 65 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bedbee2..7a91910b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,87 @@ 📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +## 2024-09-25 + +### Changes + +--- + +Packages with breaking changes: + +- There are no breaking changes in this release. + +Packages with other changes: + +- [`langchain` - `v0.7.6`](#langchain---v076) +- [`langchain_core` - `v0.3.6`](#langchain_core---v036) +- [`langchain_community` - `v0.3.2`](#langchain_community---v032) +- [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212) +- [`langchain_google` - `v0.6.3`](#langchain_google---v063) +- [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032) +- [`langchain_openai` - `v0.7.2`](#langchain_openai---v072) +- [`ollama_dart` - `v0.2.2`](#ollama_dart---v022) +- [`openai_dart` - `v0.4.2`](#openai_dart---v042) +- [`langchain_supabase` - `v0.1.1+3`](#langchain_supabase---v0113) +- [`langchain_pinecone` - `v0.1.0+9`](#langchain_pinecone---v0109) +- [`langchain_anthropic` - `v0.1.1+2`](#langchain_anthropic---v0112) +- [`langchain_chroma` - `v0.2.1+3`](#langchain_chroma---v0213) +- [`langchain_mistralai` - `v0.2.3+1`](#langchain_mistralai---v0231) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + +- `langchain_supabase` - `v0.1.1+3` +- `langchain_pinecone` - `v0.1.0+9` +- `langchain_anthropic` - `v0.1.1+2` +- `langchain_chroma` - `v0.2.1+3` +- `langchain_mistralai` - `v0.2.3+1` + +--- + +#### `langchain` - `v0.7.6` + +- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +#### `langchain_core` - `v0.3.6` + +- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +#### `langchain_community` - `v0.3.2` + +- **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) +- **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) + +#### `langchain_firebase` - `v0.2.1+2` + +- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +#### `langchain_google` - `v0.6.3` + +- **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) +- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +#### `langchain_ollama` - `v0.3.2` + +- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +#### `langchain_openai` - `v0.7.2` + +- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) +- **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) + +#### `ollama_dart` - `v0.2.2` + +- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +#### `openai_dart` - `v0.4.2` + +- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) +- **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) +- **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) + + ## 2024-08-22 ### Changes diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index f284843f..42a5999e 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 js: ^0.7.1 - langchain: ^0.7.5 - langchain_community: 0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 shared_preferences: ^2.3.0 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 0136cd5a..eb228cde 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_anthropic: ^0.1.1+1 - langchain_chroma: ^0.2.1+2 - langchain_community: 0.3.1 - langchain_google: ^0.6.2 - langchain_mistralai: ^0.2.3 - langchain_ollama: ^0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_anthropic: ^0.1.1+2 + langchain_chroma: ^0.2.1+3 + langchain_community: 0.3.2 + langchain_google: ^0.6.3 + langchain_mistralai: ^0.2.3+1 + langchain_ollama: ^0.3.2 + langchain_openai: ^0.7.2 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 55135704..883ecefc 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_openai: ^0.7.2 shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 26e63ed8..55291147 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_openai: ^0.7.2 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 15fd553e..1bb0485e 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 - langchain: ^0.7.5 - langchain_google: ^0.6.2 - langchain_mistralai: ^0.2.3 - langchain_ollama: ^0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_google: ^0.6.3 + langchain_mistralai: ^0.2.3+1 + langchain_ollama: ^0.3.2 + langchain_openai: ^0.7.2 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index a95f4820..5782a40f 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_ollama: ^0.3.1 - langchain_community: 0.3.1 + langchain: ^0.7.6 + langchain_ollama: ^0.3.2 + langchain_community: 0.3.2 diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 79614782..b5ee86a2 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.7.6 + + - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + ## 0.7.5 - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 06b182e2..48657423 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.5 +version: 0.7.6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ dependencies: characters: ^1.3.0 collection: ^1.18.0 crypto: ^3.0.3 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_community: ^0.3.1 - langchain_openai: ^0.7.1 - langchain_ollama: ^0.3.1 + langchain_community: ^0.3.2 + langchain_openai: ^0.7.2 + langchain_ollama: ^0.3.2 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 690821d1..167d8c93 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.1+2 + + - Update a dependency to the latest release. + ## 0.1.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 6aedbe71..7b23e44a 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.1+1 +version: 0.1.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 rxdart: ">=0.27.7 <0.29.0" diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 45eaafa2..7e458f37 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.1+3 + + - Update a dependency to the latest release. + ## 0.2.1+2 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 59aa28a5..e216f998 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1+2 +version: 0.2.1+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.5 - langchain_community: 0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 9add3205..0336c13e 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -2,6 +2,11 @@ --- +## 0.3.2 + + - **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) + - **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) + ## 0.3.1 - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index de530389..9fd5f428 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.3.1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.2.2 json_path: ^0.7.4 - langchain_core: 0.3.5 + langchain_core: 0.3.6 math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -31,7 +31,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.11 - langchain_openai: ^0.7.1 + langchain_openai: ^0.7.2 objectbox_generator: ^4.0.1 test: ^1.25.8 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index b7592ca0..382d3dd3 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.3.6 + + - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + ## 0.3.5 - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index d322abdc..69e8bac9 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.5 +version: 0.3.6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 11f4c2ea..ab291e7e 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.1+2 + + - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + ## 0.2.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index f63d336a..2c34d324 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.7.3 - langchain: 0.7.5 - langchain_firebase: 0.2.1+1 + langchain: 0.7.6 + langchain_firebase: 0.2.1+2 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 35c94028..74f496b2 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). -version: 0.2.1+1 +version: 0.2.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -23,7 +23,7 @@ dependencies: firebase_auth: ^5.1.0 firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index a4288382..a7b8814d 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -2,6 +2,11 @@ --- +## 0.6.3 + + - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) + - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + ## 0.6.2 - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index a20269ee..92903540 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). -version: 0.6.2 +version: 0.6.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index a60fe14b..99b6c0e2 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.3+1 + + - Update a dependency to the latest release. + ## 0.2.3 - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 4a3583ed..4b29f8a0 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.3 +version: 0.2.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index b9795885..f83459af 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.3.2 + + - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + ## 0.3.1 - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index f6e9e066..eb2c1fc8 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). -version: 0.3.1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.2.1 + ollama_dart: ^0.2.2 uuid: ^4.4.2 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index ab160770..a7a12549 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -2,6 +2,11 @@ --- +## 0.7.2 + + - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) + - **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) + ## 0.7.1 - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 37c0b6ca..1161ee71 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-4o, o1, Embeddings, DALL·E, etc.). -version: 0.7.1 +version: 0.7.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.4.1 + openai_dart: ^0.4.2 uuid: ^4.4.2 dev_dependencies: - langchain: ^0.7.5 - langchain_community: 0.3.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 test: ^1.25.8 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index e277b4a0..f616d549 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.0+9 + + - Update a dependency to the latest release. + ## 0.1.0+8 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 8cbec927..82e39fa2 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+8 +version: 0.1.0+9 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_openai: ^0.7.1 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index be6b7129..bd6956b4 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.1+3 + + - Update a dependency to the latest release. + ## 0.1.1+2 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index da03bb1c..9b5530ad 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1+2 +version: 0.1.1+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 supabase: ^2.2.7 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.5 - langchain_community: 0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index c8b93090..d6f79865 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.2 + + - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + ## 0.2.1 - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 0bbd9916..52b3b896 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). -version: 0.2.1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 789ec5a7..aa3ac2cc 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -2,6 +2,12 @@ --- +## 0.4.2 + + - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) + - **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) + - **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) + ## 0.4.1 - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index cff9352b..afff8726 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports chat (GPT-4o, o1, etc.), completions, embeddings, images (DALL·E 3), assistants (threads,, vector stores, etc.), batch, fine-tuning, etc. -version: 0.4.1 +version: 0.4.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart From 7504e27613ac1b882fe9c23b8ad5e11aeb4962a3 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Wed, 25 Sep 2024 23:59:13 +0200 Subject: [PATCH 243/251] build: Update pubspec.lock files --- examples/browser_summarizer/pubspec.lock | 10 ++++----- examples/docs_examples/pubspec.lock | 22 +++++++++---------- examples/hello_world_backend/pubspec.lock | 8 +++---- examples/hello_world_cli/pubspec.lock | 8 +++---- examples/hello_world_flutter/pubspec.lock | 16 +++++++------- examples/wikivoyage_eu/pubspec.lock | 10 ++++----- .../langchain_firebase/example/pubspec.lock | 6 ++--- packages/langchain_firebase/pubspec.lock | 2 +- 8 files changed, 41 insertions(+), 41 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 6eada274..138544a4 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index a7a05f06..40ee1fd3 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -237,63 +237,63 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.1.1+1" + version: "0.1.1+2" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1+2" + version: "0.2.1+3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.3" + version: "0.2.3+1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -347,14 +347,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index f7ad7603..cbfd6954 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 94af9a94..52a95a74 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -103,21 +103,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -140,7 +140,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 1fbbc8d3..7ef29219 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -196,42 +196,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.3" + version: "0.2.3+1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -285,14 +285,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index df3386b8..f242c95d 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 5a3fa013..b08dde36 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.1+1" + version: "0.2.1+2" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index de205b64..eb451c0d 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" leak_tracker: dependency: transitive description: From 978296951087d3aef6446c99be0243ca4cb2f4d5 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 26 Sep 2024 00:04:20 +0200 Subject: [PATCH 244/251] docs: Update README.md --- packages/vertex_ai/pubspec.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 5d7612aa..e4a35224 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,5 +1,5 @@ name: vertex_ai -description: GCP Vertex AI ML platform API client (PaLM, Matching Engine, etc.). +description: GCP Vertex AI ML platform API client (PaLM, Vector Search, etc.). version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai @@ -11,7 +11,6 @@ topics: - nlp - llms - palm - - matching-engine environment: sdk: ">=3.4.0 <4.0.0" From 94e730d62a62e298810a7ced483967e6a054df65 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 26 Sep 2024 00:04:32 +0200 Subject: [PATCH 245/251] chore(release): publish packages - vertex_ai@0.1.0+2 - langchain_google@0.6.3+1 --- CHANGELOG.md | 5 +++-- examples/docs_examples/pubspec.lock | 4 ++-- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_flutter/pubspec.lock | 4 ++-- examples/hello_world_flutter/pubspec.yaml | 2 +- examples/vertex_ai_matching_engine_setup/pubspec.lock | 2 +- examples/vertex_ai_matching_engine_setup/pubspec.yaml | 2 +- packages/langchain_google/CHANGELOG.md | 2 +- packages/langchain_google/pubspec.yaml | 6 ++---- packages/vertex_ai/CHANGELOG.md | 4 ++++ packages/vertex_ai/pubspec.yaml | 2 +- 11 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a91910b..59691103 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ Packages with other changes: - [`langchain_core` - `v0.3.6`](#langchain_core---v036) - [`langchain_community` - `v0.3.2`](#langchain_community---v032) - [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212) -- [`langchain_google` - `v0.6.3`](#langchain_google---v063) +- [`langchain_google` - `v0.6.3+1`](#langchain_google---v0631) - [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032) - [`langchain_openai` - `v0.7.2`](#langchain_openai---v072) - [`ollama_dart` - `v0.2.2`](#ollama_dart---v022) @@ -38,6 +38,7 @@ Packages with dependency updates only: - `langchain_anthropic` - `v0.1.1+2` - `langchain_chroma` - `v0.2.1+3` - `langchain_mistralai` - `v0.2.3+1` +- `vertex_ai` - `v0.1.0+2` --- @@ -58,7 +59,7 @@ Packages with dependency updates only: - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) -#### `langchain_google` - `v0.6.3` +#### `langchain_google` - `v0.6.3+1` - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 40ee1fd3..1a928bf7 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -272,7 +272,7 @@ packages: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.3" + version: "0.6.3+1" langchain_mistralai: dependency: "direct main" description: @@ -464,7 +464,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+1" + version: "0.1.0+2" web: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index eb228cde..985f1d64 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -11,7 +11,7 @@ dependencies: langchain_anthropic: ^0.1.1+2 langchain_chroma: ^0.2.1+3 langchain_community: 0.3.2 - langchain_google: ^0.6.3 + langchain_google: ^0.6.3+1 langchain_mistralai: ^0.2.3+1 langchain_ollama: ^0.3.2 langchain_openai: ^0.7.2 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 7ef29219..feb099a5 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -210,7 +210,7 @@ packages: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.3" + version: "0.6.3+1" langchain_mistralai: dependency: "direct main" description: @@ -392,7 +392,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+1" + version: "0.1.0+2" web: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 1bb0485e..f9fe1384 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -13,7 +13,7 @@ dependencies: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 langchain: ^0.7.6 - langchain_google: ^0.6.3 + langchain_google: ^0.6.3+1 langchain_mistralai: ^0.2.3+1 langchain_ollama: ^0.3.2 langchain_openai: ^0.7.2 diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index 752608b4..b3a0f0ae 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -151,7 +151,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+1" + version: "0.1.0+2" web: dependency: transitive description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 4519fdbb..c37f6c30 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -10,4 +10,4 @@ dependencies: gcloud: ^0.8.13 googleapis_auth: ^1.6.0 http: ^1.2.2 - vertex_ai: ^0.1.0+1 + vertex_ai: ^0.1.0+2 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index a7b8814d..36d0882e 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -2,7 +2,7 @@ --- -## 0.6.3 +## 0.6.3+1 - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 92903540..a047e563 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). -version: 0.6.3 +version: 0.6.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -27,9 +27,7 @@ dependencies: langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 - vertex_ai: ^0.1.0+1 - langchain_firebase: ^0.1.0 - firebase_core: ^2.31.0 + vertex_ai: ^0.1.0+2 dev_dependencies: test: ^1.25.8 diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 5c733127..372ba2dc 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.0+2 + + - Update a dependency to the latest release. + ## 0.1.0+1 - Update a dependency to the latest release. diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index e4a35224..9e25d858 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,6 +1,6 @@ name: vertex_ai description: GCP Vertex AI ML platform API client (PaLM, Vector Search, etc.). -version: 0.1.0+1 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart From e236025e8dfc9456beeed2d1f8b8ba37af3f2866 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 26 Sep 2024 00:28:05 +0200 Subject: [PATCH 246/251] refactor: Update deprecated UUID constant (#558) --- packages/langchain/lib/src/embeddings/cache.dart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain/lib/src/embeddings/cache.dart b/packages/langchain/lib/src/embeddings/cache.dart index 270ae124..3a67cd39 100644 --- a/packages/langchain/lib/src/embeddings/cache.dart +++ b/packages/langchain/lib/src/embeddings/cache.dart @@ -135,7 +135,7 @@ class EmbeddingsByteStoreEncoder @override String encodeKey(final String key) { final keyHash = sha1.convert(utf8.encode(key)).toString(); - return uuid.v5(Uuid.NAMESPACE_URL, keyHash); + return uuid.v5(Namespace.URL, keyHash); } @override From a7bcd8fcfd9dfbf8380413033e92fefa23699aa0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 08:53:21 +0200 Subject: [PATCH 247/251] build(deps): bump actions/checkout from 4.1.7 to 4.2.0 (#559) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.7 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/692973e3d937129bcbf40652eb9f2f61becf3332...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yaml | 2 +- .github/workflows/firebase-hosting-merge.yml | 2 +- .github/workflows/firebase-hosting-pull-request.yml | 2 +- .github/workflows/test.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 98f80b82..448ca45f 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/firebase-hosting-merge.yml b/.github/workflows/firebase-hosting-merge.yml index 262b5a52..35567de1 100644 --- a/.github/workflows/firebase-hosting-merge.yml +++ b/.github/workflows/firebase-hosting-merge.yml @@ -9,7 +9,7 @@ jobs: build_and_deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - run: npm ci && npm run build working-directory: docs_v2 - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a diff --git a/.github/workflows/firebase-hosting-pull-request.yml b/.github/workflows/firebase-hosting-pull-request.yml index 2a2d9416..0086d2e1 100644 --- a/.github/workflows/firebase-hosting-pull-request.yml +++ b/.github/workflows/firebase-hosting-pull-request.yml @@ -15,7 +15,7 @@ jobs: if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - run: npm ci && npm run build working-directory: docs_v2 - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c4e0c410..1141bb2d 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 From ce95cebc259b8a12a1184d5ee7e3a3ac84982fb4 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Tue, 1 Oct 2024 21:15:13 +0200 Subject: [PATCH 248/251] fixed dependency errors --- examples/browser_summarizer/pubspec.lock | 8 +- examples/hello_world_flutter/pubspec.lock | 8 +- packages/anthropic_sdk_dart/pubspec.lock | 24 +++--- .../Flutter/GeneratedPluginRegistrant.swift | 2 - .../langchain_firebase/example/pubspec.lock | 48 +++-------- packages/langchain_firebase/pubspec.lock | 80 ++++++++++--------- packages/langchain_firebase/pubspec.yaml | 2 + .../example/firestore_memory.dart | 3 +- packages/langchain_google/pubspec.yaml | 5 +- 9 files changed, 84 insertions(+), 96 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 138544a4..c6a87ff2 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -275,10 +275,10 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" math_expressions: dependency: transitive description: @@ -299,10 +299,10 @@ packages: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.12.0" nested: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index feb099a5..4c5f7717 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -252,18 +252,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" meta: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.12.0" mistralai_dart: dependency: "direct overridden" description: diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock index 6e950ea1..c719386a 100644 --- a/packages/anthropic_sdk_dart/pubspec.lock +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -5,23 +5,23 @@ packages: dependency: transitive description: name: _fe_analyzer_shared - sha256: "45cfa8471b89fb6643fe9bf51bd7931a76b8f5ec2d65de4fb176dba8d4f22c77" + sha256: "5aaf60d96c4cd00fe7f21594b5ad6a1b699c80a27420f8a837f4d68473ef09e3" url: "https://pub.dev" source: hosted - version: "73.0.0" + version: "68.0.0" _macros: dependency: transitive description: dart source: sdk - version: "0.3.2" + version: "0.1.0" analyzer: dependency: transitive description: name: analyzer - sha256: "4959fec185fe70cce007c57e9ab6983101dbe593d2bf8bbfb4453aaec0cf470a" + sha256: "21f1d3720fd1c70316399d5e2bccaebb415c434592d778cce8acb967b8578808" url: "https://pub.dev" source: hosted - version: "6.8.0" + version: "6.5.0" args: dependency: transitive description: @@ -82,18 +82,18 @@ packages: dependency: "direct dev" description: name: build_runner - sha256: dd09dd4e2b078992f42aac7f1a622f01882a8492fef08486b27ddde929c19f04 + sha256: "644dc98a0f179b872f612d3eb627924b578897c629788e858157fa5e704ca0c7" url: "https://pub.dev" source: hosted - version: "2.4.12" + version: "2.4.11" build_runner_core: dependency: transitive description: name: build_runner_core - sha256: f8126682b87a7282a339b871298cc12009cb67109cfa1614d6436fb0289193e0 + sha256: e3c79f69a64bdfcd8a776a3c28db4eb6e3fb5356d013ae5eb2e52007706d5dbe url: "https://pub.dev" source: hosted - version: "7.3.2" + version: "7.3.1" built_collection: dependency: transitive description: @@ -322,10 +322,10 @@ packages: dependency: transitive description: name: macros - sha256: "0acaed5d6b7eab89f63350bccd82119e6c602df0f391260d0e32b5e23db79536" + sha256: "12e8a9842b5a7390de7a781ec63d793527582398d16ea26c60fed58833c9ae79" url: "https://pub.dev" source: hosted - version: "0.1.2-main.4" + version: "0.1.0-main.0" matcher: dependency: transitive description: @@ -624,4 +624,4 @@ packages: source: hosted version: "3.1.2" sdks: - dart: ">=3.5.0-259.0.dev <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift index a2fafff9..c6c180db 100644 --- a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -5,13 +5,11 @@ import FlutterMacOS import Foundation -import cloud_firestore import firebase_app_check import firebase_auth import firebase_core func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { - FLTFirebaseFirestorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseFirestorePlugin")) FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAppCheckPlugin")) FLTFirebaseAuthPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAuthPlugin")) FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseCorePlugin")) diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index b08dde36..02cc232c 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -49,30 +49,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.1" - cloud_firestore: - dependency: transitive - description: - name: cloud_firestore - sha256: e461ea9ab23959102a780efcbccfe33c2ac46269928bc57093bbc0b526afc801 - url: "https://pub.dev" - source: hosted - version: "4.17.3" - cloud_firestore_platform_interface: - dependency: transitive - description: - name: cloud_firestore_platform_interface - sha256: "2e0b8db9a759ffc71086019f1bd27237e5e888ab1e99c507067ff8616acdfa24" - url: "https://pub.dev" - source: hosted - version: "6.2.3" - cloud_firestore_web: - dependency: transitive - description: - name: cloud_firestore_web - sha256: "37b6974bef5b0a7ecd31037ffb7d7bfe6bb9d2ac6c064fbea395411ef0a64d55" - url: "https://pub.dev" - source: hosted - version: "3.12.3" collection: dependency: transitive description: @@ -281,18 +257,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.5" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "3.0.5" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: @@ -329,18 +305,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" meta: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.12.0" path: dependency: transitive description: @@ -422,10 +398,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.7.2" + version: "0.7.0" typed_data: dependency: transitive description: @@ -454,10 +430,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "14.2.5" + version: "14.2.1" web: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index eb451c0d..0d8a2a30 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa + sha256: "5534e701a2c505fed1f0799e652dd6ae23bd4d2c4cf797220e5ced5764a7c1c2" url: "https://pub.dev" source: hosted - version: "1.3.40" + version: "1.3.44" async: dependency: transitive description: @@ -45,26 +45,26 @@ packages: dependency: "direct main" description: name: cloud_firestore - sha256: e461ea9ab23959102a780efcbccfe33c2ac46269928bc57093bbc0b526afc801 + sha256: a08d0f4aae40e63e7a57102de890d5d3c93d719ce38985b2a36c2672283af7d2 url: "https://pub.dev" source: hosted - version: "4.17.3" + version: "5.4.3" cloud_firestore_platform_interface: dependency: transitive description: name: cloud_firestore_platform_interface - sha256: "2e0b8db9a759ffc71086019f1bd27237e5e888ab1e99c507067ff8616acdfa24" + sha256: "884fa34c6be2d9c7c1f4af86f90f36c0a3b3afef585a12b350a5d15368e7ec7a" url: "https://pub.dev" source: hosted - version: "6.2.3" + version: "6.4.3" cloud_firestore_web: dependency: transitive description: name: cloud_firestore_web - sha256: "37b6974bef5b0a7ecd31037ffb7d7bfe6bb9d2ac6c064fbea395411ef0a64d55" + sha256: "6e621bbcc999f32db0bc6bfcb18d9991617ec20f8d6bf51b6a1571f5c324fafd" url: "https://pub.dev" source: hosted - version: "3.12.3" + version: "4.3.2" collection: dependency: "direct main" description: @@ -109,18 +109,18 @@ packages: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 + sha256: "8dbb826d99c67512212331331461ee142e46645740f1c1209706ca2f72958e57" url: "https://pub.dev" source: hosted - version: "0.1.0+34" + version: "0.1.0+38" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" + sha256: "0d889be9adf525fea6791ab8b137ee3c152c28d126115408768bd83ab5e7e46d" url: "https://pub.dev" source: hosted - version: "0.1.2+12" + version: "0.1.3+2" firebase_auth: dependency: "direct main" description: @@ -133,42 +133,42 @@ packages: dependency: transitive description: name: firebase_auth_platform_interface - sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" + sha256: "78966c2ef774f5bf2a8381a307222867e9ece3509110500f7a138c115926aa65" url: "https://pub.dev" source: hosted - version: "7.4.3" + version: "7.4.7" firebase_auth_web: dependency: transitive description: name: firebase_auth_web - sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" + sha256: "77ad3b252badedd3f08dfa21a4c7fe244be96c6da3a4067f253b13ea5d32424c" url: "https://pub.dev" source: hosted - version: "5.12.5" + version: "5.13.2" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" + sha256: "51dfe2fbf3a984787a2e7b8592f2f05c986bfedd6fdacea3f9e0a7beb334de96" url: "https://pub.dev" source: hosted - version: "3.3.0" + version: "3.6.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" + sha256: e30da58198a6d4b49d5bce4e852f985c32cb10db329ebef9473db2b9f09ce810 url: "https://pub.dev" source: hosted - version: "5.2.0" + version: "5.3.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e + sha256: f967a7138f5d2ffb1ce15950e2a382924239eaa521150a8f144af34e68b3b3e5 url: "https://pub.dev" source: hosted - version: "2.17.4" + version: "2.18.1" firebase_vertexai: dependency: "direct main" description: @@ -224,6 +224,14 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" + langchain: + dependency: "direct main" + description: + name: langchain + sha256: "98fc0517bf806390938ef3d8388a7a550f78b2b0da3a5cd6571a8907343e7150" + url: "https://pub.dev" + source: hosted + version: "0.7.6" langchain_core: dependency: "direct main" description: @@ -235,18 +243,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.5" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "3.0.5" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: @@ -267,18 +275,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" meta: dependency: "direct main" description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.12.0" path: dependency: transitive description: @@ -360,10 +368,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.7.2" + version: "0.7.0" typed_data: dependency: transitive description: @@ -392,18 +400,18 @@ packages: dependency: transitive description: name: vm_service - sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "14.2.5" + version: "14.2.1" web: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: cd3543bd5798f6ad290ea73d210f423502e71900302dde696f8bff84bf89a1cb url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.1.0" sdks: dart: ">=3.4.0 <4.0.0" flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 74f496b2..46ef3e21 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -23,6 +23,8 @@ dependencies: firebase_auth: ^5.1.0 firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 + cloud_firestore: ^5.4.3 + langchain: ^0.7.6 langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart index 11c08946..65f24c66 100644 --- a/packages/langchain_google/example/firestore_memory.dart +++ b/packages/langchain_google/example/firestore_memory.dart @@ -1,7 +1,8 @@ import 'package:fake_cloud_firestore/fake_cloud_firestore.dart'; import 'package:firebase_core/firebase_core.dart'; -import 'package:langchain/langchain.dart'; +import 'package:langchain_core/llms.dart'; import 'package:langchain_firebase/langchain_firebase.dart'; +import 'package:langchain/langchain.dart'; Future main() async { await _history(); diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index a047e563..810dbf62 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -28,6 +28,9 @@ dependencies: meta: ^1.11.0 uuid: ^4.4.2 vertex_ai: ^0.1.0+2 + firebase_core: ^3.6.0 dev_dependencies: - test: ^1.25.8 + test: ^1.25.2 + langchain: ^0.7.6 + fake_cloud_firestore: ^3.0.3 From 590b86d7a4fd6f97071da3859da87111796507fc Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Tue, 1 Oct 2024 21:18:39 +0200 Subject: [PATCH 249/251] fix uuid namespace pass as string --- packages/langchain/lib/src/embeddings/cache.dart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain/lib/src/embeddings/cache.dart b/packages/langchain/lib/src/embeddings/cache.dart index 3a67cd39..fe0c9435 100644 --- a/packages/langchain/lib/src/embeddings/cache.dart +++ b/packages/langchain/lib/src/embeddings/cache.dart @@ -135,7 +135,7 @@ class EmbeddingsByteStoreEncoder @override String encodeKey(final String key) { final keyHash = sha1.convert(utf8.encode(key)).toString(); - return uuid.v5(Namespace.URL, keyHash); + return uuid.v5(Namespace.url.name, keyHash); } @override From 50e6237d330e5c49dec02fa8dfc20e786905dd9d Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Tue, 1 Oct 2024 21:20:25 +0200 Subject: [PATCH 250/251] langchain dep error --- packages/langchain_firebase/pubspec.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 46ef3e21..e34da117 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -24,7 +24,6 @@ dependencies: firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 cloud_firestore: ^5.4.3 - langchain: ^0.7.6 langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 From 85b2c6e2ba45a4aab3823c0988805e8ea00024e0 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Tue, 1 Oct 2024 21:20:58 +0200 Subject: [PATCH 251/251] lock update --- packages/langchain_firebase/pubspec.lock | 8 -------- 1 file changed, 8 deletions(-) diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 0d8a2a30..deafd770 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -224,14 +224,6 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" - langchain: - dependency: "direct main" - description: - name: langchain - sha256: "98fc0517bf806390938ef3d8388a7a550f78b2b0da3a5cd6571a8907343e7150" - url: "https://pub.dev" - source: hosted - version: "0.7.6" langchain_core: dependency: "direct main" description:

    + + + +

  • C}7fY`r$Gt*lJ)jWY#3}kg6ve z+K_xLf*Pqo%FR-$84$k>0-V#UrofvI;n~$o3HE$2KaCBwGTo}JS96dy>4Y@w1MOF_ zIR-T;*NCwf@rGsRyyM6VSt)uY{qCg^lVF_wk)QU&C4g4X;V{zU%k!EP2#jZd-`=Ua zNx+$}-^xb7xe1{kR^?_v^verfL1p=Cj)TCHO(>XhRL8aCaoFT27OkS2_R}F&Cj|j% z!b&jxMkd56643HN*$T-9-BkLWhS}^KX9|PTHwrbPCi<1f3N8Aaa^Vr%Z(RRr72ngn zBQBmtOc4s<#z#Za$(%IcAJGr$LD-v1OH|6cIN&9;?Xc7OG&URV$zEnU zKRNYL9$;W*{wGcxBD0^9pYz*iHcF+h_tn^GNBhmBmM$E-a`Srl63^Y+7u7#c+WmSo zYjoE;k?DzJh+7eDOr!L$*`G*=!M=rEZFfv80)E?lH80Q^W6=jfr^Vgz`4ZhRqrqia z5B28JAD>k)@T@-n{aJ09H1QMsahCks2tfQ-d?56%E&;PToBrHm{!%Y_V;=&=*@3no z-D{XWOIC(c?fz0>1PBewH8Q4yk}GRAbOUF}KcCfamw@R#=6i+FH=wulzxY}0x^?r* zhksR};qUJ;v4<7ZM?)2xMhCjRfPlWXhpIziTNOB3SvaFaRtfl*05Q8-W0bp@QKIQ3 zK2@rovTCO*m0pXXqe?GtU3Ko>ZmLps59g*)w!gLp8p-WssDx06jf!NlCHV5@OHEXj zX+8RkI>mk#ZPm3z%jRX!I#%nfUgv1ZkR5?w(Zt|6e zI7P!18l}O$9d>5N!mH35eVD5>zcqpUhaQO8^PuTX1)e7fXU_P16%pl0mxLBkXWjTJSW_d_os^kzW!z)5 z5`{Jafz6bWxV%Sp`hG04sh!Je-&Ak0D0%n@QhT{EcCB~Xj}iq55)2vx{a+*s)swB| zqMX0k9E)Cjfq5%^rosMz1>bz4PH#!MT@@EOn5b!==%APT(6YUwi>K+HZXe8n40PPx zY~6N&gENqxM@_-z15)y;EP;QYqOz@H!r_S6cHP1Umaev z1GxGBkC7<;(j5CAoS4sPduZ$HZucgegei`Uszf}R#=8!lx4Vh1%Tbl7qO}_`%Iz{4 zA>G1zsABi$_PVuh8#_|0RAj?d^6BrC>GYZHnL`hC8^(o43*bw-soEEAed>-qyM+HV zVuIVMxt2IpUqHL>B46@EtwLX>Wu3mYL)V40PT9X}Y7Oflb!t0H7V*9Sfm*v>M^l2y z9KgA+WA*df?l&b9+}esV-%*(`13dvdbhc)@$A&<)hnk(KOI) z;ajU7!DkCSa-^3=bLHhL2AV3l7;YgP;LW&_*yAHD*A*7gYQpGZ93iuL;vg9a9YaJeOdm%NBhZYUSzml_%B2(tGKAq*xl6zT)VMu6Q9# zxP+-eC~58TjAYw-H)+{RjDK6RiF>u~eAW)81T_?VEyR{-)*lQaoMUR38J5L`KP;n7 zQHFSNh5$Sz0w%MPvTZ1%C>H;|%y@o_#*TL|+)H0ueKIc67K924^C!lkh#70L zA>uluM(*qk_wA+qNLz06I2Rruu%P*s(bL5l`Lc-00qafGGEO4bFZlWVzSo=!=OwUy zd@JF(ga_VQ4&stqiFo-PuS-jyvIsZ^pX6c1WE#CDdFf{lRvn*=Ojj$7C3fU%slY5> z88vN)hh4hdrR5@ThPwtMSG@23*;NQa10wUO&M2*$;wI;aH&!K|uv=s5gI$06dNh>TVXbAb6PtNrspxd? zk3M>B;Kt(fd*Et(dh}u5&iLlY`rD(=Hy{0e%iH_^61bZ7(bty?e<-F*J)zo^6UcD4 zfanN&`q zybGthOjx+trM@Q967JKkfd64PmcKvx{I3ZjV$;8aJ^fFUaQ}X@_B8f%@ai)kh1~k; zRul7X{%EvfZnwz`-TB%yuGr$%vM02K-EyejRQG43W9w3Isgi5_{H-blg0Z3Z4vWGBOtd-6U=+T3binX_-xAPxvQC(_sfcs|gdH33wOvKQ0be(ymIQ2L+vft`oMfygVCb4MHPoYJU_VwfEKP_@V{##6 z76gsQYbjc+_2IS_G#Bz6f{H#rsf4bZMh6d%4&>f=3W1*kWe})*lolO=KXTe-x6)Eu zp=fp1XJgYz9GEI!147S*8jmF!Bk2;kG8uw4<3bL-Y$B-23pE-Z5aY=AiTe!qYN{&J ztH%)T9)_MZ3--@glmg1Ic%1wZ>LH9Ha#&dTtaCFLbvo4afA_Gsg;HdqAy03xub)1yv` z!;|)9U`VzMY^hF6IbigGZQCh=x9^VH?w*|qRmutGa(1X+QQh`hfWKZdEnVbt}^>jK% z3!b_(dU2B?KsrX(8=P-XYtp{)4_El?Sb!fq{A=Zl1knl~%qbrGL#zcYT8n3Jxb9k; zeH3-a@QU1uq*(!ZOYqe;$DVf`-0N>t20y+6KMemtpT>glpe4|&KVIy=`+NV+Yclmz zbMap-(&TTi$;S5e2wxz+`&V7S_+**)pRbAkWxL)5Q%NfUefoDxo%_C{^l;b{{-29< z7BYI;QaAmY{A|tT+QT=$3%Dlut3`Tl=JH_nx7Xy)RMzvwTfV2V!WRszKbz^cVmg)e zdFaWN?ZeMI-JRcORPXwB5M054rW3+8HeLt1fVnr4CKKbvhWqNbY>M0S<074Vq3d7i zV*juGnzV+2G3dNrxmeeCyofOrhTh_UT6}J_cF%6r@O+E>iKWJvy3lR4o*^(_?DQ84N@(VeDbx>6(<@U|+{ikHS_Ye0u!P@S6Nj zg=n|>-Tg7XdtT%g6$zcAj52GE&!x$eGMLtL%~kXkSf8|GlYX?B3dnWc4q37V3p>`^w{iZh zaa{b}4MzvYHuk7{0=HRk;I6S{sqv(4|bjbUag zztiq|n|rd|#>(;UkRJR6__bYL#Zx7=K3VO!bY;|G;-{L~?h_$Jv$^QF9=M$pj7Vsd z>Yx@*p9q$$`Z$0gu$pn!KozGk9ZE9Hq)F%;TOl1f#H_KY$7}H{n&4{S?UGAE5(4R_ zW|uj7pw0g2Vhc&5Ip4ZjE8{h*DnB4sG2ItD%)I1WuCaK? zXNl1dHZKy058eWr|Ho0YB&bZl`Edo&EqO&ek!J|lypY-qPgBUPq(3lq+X{;L#OBod z>b&5^_14}j?ArckwI3rqj$5%bm(N;8a;ax!RJi}eC684r?8Wmh&P)6aG6vb&zQ&~ji4UgLjWc$|um+keekE&-Y%=S!Z z#}b!AU#Fmr5_1VVDjgnpwwh3!uPQL5_^f2Z9nVf?QgDDV<>~L(7`rNe8b3`3klGLVb@(%D^ZDPzXAR0?J{f z*s{eshNVniz?T_wLFME?AafBE%;v@qp^vt}{UifHL=8TP%l&T1V)>ke$ML6pea;mJ z2A`sl#QyM^FRPQAw+v5i($IOC^U(b|KAMg%{MTdN@sd>N$D8$(>WJFri@(vxTr`y-603 zlMUryLXzHTxEIHhN^jcqUhBAbai-%5ik8@gVGEvf-^|QvXbLcet)@k0-O{?{x0vNB z7#vQoENkxm?d8mmXK4uno3}dtU%lX{iAe(ujBFI@dGokdz3_bgP|3$%VFWOdMxMhR zF2C4>&)T^sRD)}9{H7ZpPRq6ILW%dzWgIVc5e~z>b3=#QABL1+PKL82B)7wR>(RJU zX2-gndu)?X?s+Oaj+VX|m+|(aV?BM?Uv!&(hnv!lHN}?P|2eBvRp5#Vv3oY3MLs?A zVD4gapAe;Qek`UG1f|7_aFQn6j?DXO`te{S@Lv@Jl-p75L-h~(7^g|VFw;<-k$B*$ z7SRO%viPZsEuC!b5V=)XU_hGyL2x}xpZ6KyswCwDYYmyTjhf}VGuF~%M<4LP-iOgB zH(Zn^@%ETv`RYmQ#OX+zS+ZICST92vPhYxOEd+IZl_v3xD0@2VtWs__=J-%92@xUIKNN7-q{VytNDHc(tYPlh5sf91`; zsFm&iu}(bl@jkiLLDAvzBwo=;89b-xqJ9}sbkh-zWB;F8+1#E7+NR80bYzB@&?cT) zRZ9QdKy~*H`fYaI`_o?l9ppe~`w` zQw4i^+p5qnw7)<`=lVH3cAtl|Qw4swpwxQhLiJF^KL8P0mCziBx42@7#tUYl{N8*V zF1VR^*Zw(O-Xwg5@-}VF%?YLRfimwTl6(L_uP~e5V*Z947QII5p9%_ zKq6&fazo_aS6r~^hPtePq(q~{p2c6_`#e$}@UXf77{FCnl*(@cQ-(y;5y2vd*-@Yu z^49qP~YVvN#*8uOo^lrwC@ESHO`PV>=D)5ZDGLGj$&0dWT7hSE~ zmgSsrU=9X>ovo0xXfh|bWB^4F=F(iipp@o-1u8t^n=hiwa<>(QYudb_Sns0;idqUfP;Sx2NUdGB)o!hkZQa6AhQc&IV+2E`4j z`?fY?R?2JA=C56pfzq5Bv&K^gdOX4HGyAaQWau{xb-)-MMm5?AMH13-}u|Sfjqwm^%+#X$R4=Pqiwc0uCpQHFY~dfx{{mLmCysXv+FPaGm@XM^D}8 zconlweyac~Q*vRbU9B1P4`gp?w+p5$;q7boeR<}4%vhI5q1xRmbXJdXIJBfvLFyTG z4|Dg^2VX@Zo!wh{fn-Q7kX3PQ4EcIS+Pn1jSYXv1^>bPe;MIHKuIHf|g&GPfCws{6xEKMAS_@cFub?DT%Y z|NdD~doWoSwasjC0T2N1EzybgKV;i-EAr|`3t9q8p8B$N_7dHrkVUd%rxhpLGrWG1 z4g7pgc=f2Smd|9)#hM4HYTJ1wh(k4DXOX9kv&x$^dk)SScl?CdPFkHFHtx8cwDcYA z%De%YUf;4Z|FoknNo#-oWzMgenJYG|JI zFQwK=nG**vq-$LV7#8_R<2Af}Q=eP0umT&bL@?aAPrY)4$hR=lT+_VNf<^+=vZ0YpS+p8MdR`zDbj>C_jzZRSV+jnkAY1`q zFO;O;(oG-%2`b#vnJV4&!(nCB1}>!eI3)988ukr*xL2OMOH| ztEtt|xM+1Fcli=<#+HY7t#a9Zf-?eIO8I!9Vc&0;x#f+)>>V%D)a>5wHGWR$N>M>Hd9F z{iCnA;f?>vrQcrP&0#S3(;sdu0ar#klQw>Pea~FKGF?=fuiYiQ3UNY4=N!HOyuMSP zF+k`_DuTA;c>$rTf!6;e-~DTcuKtltH7c)Ll=xJU+-m9Du(TC%hkFU3w~xaQM{hDu zCfaI6cl%VDI*4fVX;$pcDWxTu20x_g?w&GSi+=zgoEz+iS+MZZL(t$rgLmMQa{&47 zZ-uUSEnc1LgrbIZde!BhsERd9dRoVxO$E$bG%l(90wkNB-KDL(dR{3jK;(B*;f$?L zG*5zeYKv07eX)gidI!%n#qaj}0Z5)2_k3SyA@2dtej$(M%?H@m?~Wm}Ox+TQdr2< zYCr_MFGABB3^$?s%BC7xc&Wj{9cn`{q)&=-4$ff3;Q^Al6BsiwfZF3%f!Ln|UZV7Z z#*xg7`XxMGQ_>DH+Y93)#A=fwcl4JCVUv--o>{kARdN@i*@laBVee53eT*LhIetSq z%0wt~&dABs1SkOOg4r{A=>X6(gJtP%NeeH#8^C14NF^0Cbs5`{z6=0RpSa>Y=<87- zdKy(STnPj9+BE! z9r`qJ9edG)mtnxmR#^5C*Ii_%E~h)fV3c*c`qWctQHQdT8NIN zvl_eDj7nRNy}s9fes+0N^_I7^kyYNK=3C8Hy%}C>FvN58=FI39vS$oRuUp72U#d8J z?g831;(kmbX{BiRV?W{o^&<#oBJkWaXQPni5FT~J@_gox+9d<1uI>ik{0CkHCEfsl zW_#`ZT3PU`Hd~XbG_d7O;=aHIL@?$>s#qt zPghrS1v@r~kBl~b{i<|qow6mp^^!xL7QbM_EUpl@K&n6UR_cS14>X{+mPs;oZ|gPD zzLs0_caUOOc+yl-gf!P`>rbolY){q5_Bvb{lI4@{+71-B{1Q7*NO@f|P(=MaG{B?7 zXMKy|zTD?$4a9H*UEjPs5bQJSpM`}wd{GFu9}$T4w~W-aS)U!b)ZscjQs3kA|9&4M zC#Re4elcJ3R;65&FDviShaQ2BvXhW6V&Hm9odt)TVL&m;eTsjmJvSNG5D zQB`{pj3m14?o2!4l%$n5qU5zOym?8Y0bjX!+he``sqBUmUF&ee|A- z^?XrY=hk&|dG`vkpnDh0OG9rku9G)gjI{9r3O1Ons}aISgj2W#6~iR1$7kl8Z>c%r zSfg0qv#!9KbdU%H=uE{S?GXU;HL=9jW>ULoK3{6MQ`}0t-IV9J2EtcGWEZo2w0X-hdV>xjYpK zI`iNWxDI_AG@R*(R65}_r`q%yG!3^r+USpnzFPqvk>_CG_i;w(Ux=kb_QDbKXH9P5 zI)-vz;zq5$UnVSdk>A+18AG3?u!dI%6ujI&-p2*w9KkVTB<0=i+F_(sW=nfiySio+ z3Qv$bCS~1+#XKY(YCA7$O6P&d4+#eTh?17{>TWD2))VdI9jzbE9+#WsI4TK;vre0BXDinr(kw`cYF1^~UG$H+|({N+{Z9 zLc}sg873YGORo1Z*41pr8Mpm(TNn8rx2zo(8C!OyTrOn1aawQ4lEApg4=ziAN|nB{ zGb$#607TxbVxbM!ZYNrxk3S>;c8COJ6MJ#bI;6quCw_oqyHUUibaymE{8l*_dn}flYd5Nbw zk5QsdL`7^T-6#dlnupp%<*o@ML+I%WKp|249p$+HCu>M10k(OS1wZETMP`s$Xs#u2 z)|{hN@uwm0PpS5QezFbMH5cbe(+necFx1?jAn#woUeXj}RzL}sHIUEPQ7Xh82y)B8 z(SSo((2_OSmoX)fpJw5U*G*Vj9bKUo%{+!0C3M-YI2C8TUBy)RnDomJ*cgqj8NcgP zCrO@3%L|qbIz_8SF{X&o=@iD;tXZbRD#l&sgcH|0EY`c-Yu7JM+4<}+fUp(o5FBJU zs&4$0w3`GM5d49v+_^zemhwO@ghn>c0LzpFEx5Oe%&-MgBp^&+kAUu=wtdrWVm!2$ zgYYAjgG?V=wBp<`Ru5m1umG$QS{^L1qnWVpx5E%it(4Kv_4z<;$nNU zw3`Y-kX8LHdoY8hp8&K z2T_<-f^>p1kNb44+W7WLlSgplQUb*HXBz4VaZy$%D)YQ?$W)OusQ=1ga zEyQf|Dr$0m&6mpCuTN~ha(Rc6+6_671q)Y|4yuq zu8`RR8yW)KE@qOXFD`m7)i9mduL=Y@Z9iGi6Ry9+wAiBv6;!sYThv8U2M%x^Z8-YS z)|xbL;#OtAXJM-v4H0rWESJe+_g}y^mpblw6L|b%=>4UVEN)1=RtPi4l`IjMV}jxg z!~BkA+uRw7pJi%Hf`G2N6cX8Gk(kDyvcQ&Vt^8LHe zlArVb*E@gDs5@F=kS$ZMWEh^U1VFss$UQOl^)%P~f!1=&?)Yxh4P8c$u>B??$x_(l z*G-z=$UVykF2(dWbe^4F8O}&`@2(yin+8e5tn*h|O{2B8I4?5zBk_8yhxFeDlK59i zy#CUVc}T5NbTO2D+@K@h$vr!-E)P;P)>_cwR*Ab$vcszeX=?d)*-1d|=pY)jh`K6y z&y60OM2yY_Zs=$`nais2s2sSe!*bTKN^5T}L@$uefbLf+Hl9GennKyT*A=wjkkLv) zP_5ZSlt^E#R3K2h^^XP%_E@J3>&r7OyXMQISDh>W#9Z0qVE$Rw*vL9NcCo9~$e z7)cHo$x_bsgHotq2z*492SbL(%AuDJEh{)jQ^*gr!2(E9p@nS}or7eFKZ+nB0K*Go zNU9S{Es<#_vGG!J?GfYG%J-9r>(~t%{0)Ap5k13WWkDTA*36=#ks~+nTs^DGm_e$ z)lr1AY4&~hgB&m^XjxZ?a7p|CS9zvM5=7PB+jBL%V1%mf;cyJN@}%aIX2MeE*9Sy= zpYoC|9xj+jE%&b{E&7(ga5e`Or3a(le>D}zB@A2?)c$p{kv`)SpZ^(`WcDzOiFPesQIKxUl-05e$AVzOwRai?N;#Z;oyo) z4j@pV;p_~Cxu)JUVkWZM&QK%lGTTGxWoRQ5;`-&>gZr3U5@k7h#-O=I5wrDm{mODY zKNSTRz9QYc7gpZarG%Kum+Y|1kR3kj=iIfPbGWtTXH7t-@dn z;QMjCXN@P!0WkV6NNR6Ra@qbirEP2Zl`Po~%^7w*A_f9Jb<6Tz%4tYEY@pmyiOCb6 zSW?LE$x0g!17fueol(WbS>vK0ALWpk zSCTn;plyECI&si3=WjVuAp@wKVI$=YMtch9tV&%a+ z9PPd?9!pbDCW6YDn!0hP^RKkvFm(lQa7Q(d+r6$*^95}uHl3Jy!=s;WZhw0Q+TOnr z;L3=)U9h>K_KZuf?Ova`ap@3_mbiPM+igd@_TJ-DjqP~`I|Ktf%IaW2GfBP1v)v_H zB7H?%S9ZzXHL;<*^Fk|sG@FvRq5idr;x_7>E0KTa{~~Wh9pqa5vswdewbN z#-@Ae61*AjfIg4pX87iP3@|!NsmKKTQKgPLG&~1%4Pa`42?2qEwqxmG01jL$R_6lP zg__(9>zy7w$ka#?r21q1@9tH#x04X%Di&dp&QvF`sXF$|au4A5-BjDG!gpVgk+r*P zO(e;0Q5PqoTHu?q=hBT&{~z|=JF3a{ZPQI71xTSOozPK1L+>OMDFOnD2&fo}2nZT1 z7;L1_JA!l}6cH7D0YSlnp$JF~VnI-;0xC)I_okBc)E#grsI8#_?Cp%x+l-R<%IHAb{_V3Z@%^Fz5i{C9+4e)eSrl4 z%aYxhKk&81(gH}o+CgTW?@|*S4sM3QC)$6%d*wSP)eX)!ItEb`hJyeFT+3*MvnPL* znxLv8`cm=RFj457^4$2zM-KPkh>^1B3(f1riA6?#JA3^G;{SuH&{a?uKm} zJF07X4ap-C8U>%cPzVN4St?|;ERpY!+a}o(AqTb4ERrY6(HiX6j52a!>@AppZ5OYG zht&ZAWES+y9x?E!F?reaDyi8)n8v^3g>Jx@+= zE*M&XpeX>sdGeqkd5ntG1j=QVQMTGsfI6@4fLIq>rZkSxkrXU)xQiyx-G(-!FfZne zl^^`+uI`QuaGCeKW zpFEOlA>S2>{wU?H5W!E*um3J?|D#6rKKuH+N6EkBR_`Wj>CoQTJAccqOpd>fy$P$> z+4~E8dP;)%f8?@4@l**K{w*n!Rj- zjS5ejb;Rrbi6F3OojdbyCR)7emXVY=(={G{NR!YeCRvE%AA0UYx&$hqL>v7_Oeg*1g$^%w zVhmC;h<|QDtE$`3@T}f638W^q4T1}LVlUvtzxB%==g-&_h#uT-0a7c?4JEiqAF_PM z&4@ufnY;{-kHL#(K<{h$184b&MSDh#Cze76Z#(kNYJhSo7m2{JVW#55xUKCZDJYA9 zzk;!ja3#cfFsXPRbQ}=w+KYE%iX+&>2O2|^kcC`=Mo&A+EPWh-gva5@Y)I75qzsZ3 z+u=T&rfwD<)WHv44h>L_1tklsYhdAw=`=k_tv|2&K*F3dbhuvOQ1+0G)Y)XYeam(l zdKdw-&=R2SRI{i_MG7u89DGa}B$66yZeOdtMS96fcAWLBB;ln(5{W71u^KJCNsXpy z3ghkB4fk(+JDm`1xpfrwLQ98wHQPy1ZDRn6nF|vUALCbs0DGbe7e&PyWMnr)lAuiI zuqdNu&=3FgD-MYYI|v}tzAdfY`ou&hCR5MPmkw$`!|WMTp!Ye|kwFB}O#)2d3G-d5 zr_&g66UT2IMRr93TxH900Z1z-T`b62)+NXq=qu&eoa`hf7C*Cv%+e+kiHWlu%gHe! z*q1|xyUltUX%JkP8H|(MTqu%%XJ-_hEGL19ZGTzK$}Iwb?W-+$SuY^@O3jz)vqyI$ z!1Z6foU!Z%pU#iIX{jl~!3h73A8^qF-P_Itd0qQrW_$i&iI-yRThn0yA%izJx~`s) z7xHRfguYGQ*5)rMQ+U2d(ee&HqV6&Hty>1IJ!^njOdW z+ zH_3M++HkBTzJXOY`@aaRZgR|O7{8xMjP!^$Z`YU}kn6u}c$l$w*^#A=MIU#zaI>wrD1KFfO|euY zj-@GJbxvnt!BT1fSkxtO_CzkzkUw?&5h?*_g;5|;Twmb)G7Xxaz>AZZO;u0*Ji4eeTHTB)_NMB%G@n~hU;)g%j0NgjVp%TxVk$Q&Q}P=kF}aI$IrOnY zgHiw(dD!o)t@yDo+~??wmw?e5OjZ!n8tIxL=k#e>Sq9)n z*gKgj!wNcRDT5n1280bcR;q0}z=T7YLXOxBo$=KrROOJkwnH7&+7`e`oN6 z_KuFmE|K&z4=&y|f6y_u^mLM3SO6=Bh@wF>qtlY{ik~EVi2av`&OD;SCg%oBqgd1^ z@cpBkX6GC3o~-Y_^DchTv z_5Ez>Wv{8}h2|93du_f~yt1O+ili*=0RE$7a2!w#)Bx`IYX+YLv7b&EcFZA_Q$FCJCpG*I}?8fx^PYPceebW zt>BxFocQ222U*_fNBqsz%)?)(V?JA6Bfjb1M;#xpIDv@WbTU!FX9{e*(CU>|Mv+J7 z#>Dti;wOd^9uGfi)uiDakQ1vTEv>~3pi0iT%+tb$OEH^zVN7GKE%*pj6>tB>Zx8UV z?-wV1R-c`}3N0+zx$6HGoBb>@xN1pkMJ>hP!{qgxx7b2BB5GE1OdgHpUnXvsz$~;v zN;qOZSJv{>4~rW}zGdopIUN^;CtQky33Dtxhs_2SX8Oe>5_N&XLX1{4e`&*vmhfN` z0j7$Hvr-A7MV^YLf<%5(^Y-&n{%S=f54zW6@j)?3q}xGWwU&oLbWGWCmRO@G#|?;SZGDSRlFBkFNn{U<|7@S)CzwB9`YNZ5`=dAa&!If6>=Rx5d2Pax!{h3*4|d7 zGfU+hwAn6xq{$I0T$fF7d7oQg4!SYkk3Y+SAI`Adl4F1Mu|Ae=jl@X}gAiUdtkiuK zvB$C-$6Ifl^ttl6Aj$8eZD3vw}ND&L)(tI(zbs%rbrRv_BwtmA?ZOZ<; zxvokt?$y(x`^{QW?wXdZi~NP=9ALe_`u3yF5X+@D{)#}4GZ<1W+x$h#qIwM++EAm% z3bh>Xn*CPA?E8WiwF&ChvK#ZUa?lK5w*JwHf7DH`fvckmO)>b&bp9TLKj`*izYb-@ z8gMD={Yr;4>22u6zB>bE|GXm{2gp|MEM=mdD6Z^#Tu(Qugq|zE1fGK>w2JL8!t8Ce zabiP6gv~W00*e{W2MXIK?fYPP(=VffRaOM%y6_V)yKUyv}jQl;Iz)XF}l_n5uK)i`L6F z6b9130L!j75M(YmD+u74bp_rY-<1juAatmTGw`F-e#O0$xj7sv48#5=iu8wx;#h`A z*;Iuj{`p+RY`z~kcENy6Yw%N~wWsVo594z={PZwk`i_U=mpV*|G4(Xz&vN$@2h4(0 z(j`1JX%l|Ff>MzJnk4-+EYiBrGI_6Kk^?dA*39#?qo1kEuYpSHl z_T;~x777=pa{qi3-biaj?A*WP*dDS^y7bsf{Lbh{}Kf; z4lQWTj?po;8spL89zo+Vl7W@uv9b}f<8jznjfr@b?4XJN6M%ekV*V*-`p$e4_vhF7 zXaA8%{Q0?s4s4v};(t&EuXl=DC*?8_%(uF$x?wkzEtYz$F%~>~EzB$Tqs~9fywH#z z_#^b*dIEQh6^r+spY0#!U(BVAa9y81z=thO{v0_tid-R_q4RoOj3nbH>rx|*^1&WU zLs}@g;U|j8_$5AoLdkvU%nQ}gaw^O;{1?3QRJ?AyfxY@fjN0ygek;v~(5H;IArl3J zvGJ4r+5!}kmBt*u7$hNySxCdXNdA67mTqJq&SXamL!&2L%>F=ku**#3)rGH=eW5!6 z`iFO5`NcKC!?h6_hpKu}pl;560fEURIzLk~1ZW-Z*2J1it<&E_vV9HRohWBa_DQ3P z#?lrXtoWRITQKD2qew1Q*uxXB$xnpUJ@*H_9Aql-D>;}odV~+zYKt%~{-IVGaFkt? z!dWowQ=egAD&t!|~W#1*D#&$cae`;BgEKzUvS8(zF>-#g10@q*6qt#)q2h zo}eCSA(hx2LjJR86&5j!-+mJ5??jVxF%S|_M%-nHPd(qc1y(2X;bb&g&IEv(8*2lJ zDz*!xYa>zd&vnvq6y*2cQc$dDJbMfuPA&nwd<+3Wn^PT!2&7Gp^$JHMX?Lfq?d4 zuR_uo)nI5r8;oJ?FA6`SF7lG{K+{-T2+IPX{DLqyj+6#->FLuiG+b%KBpf`>U_CCu1=t|#h&Cx9RTi2sRD01v_4 z`j1@6{~{mi0h$B10wnq0VG#d2l;yaO5MSkl|KXL5;r||k0MGv`@2qhv>x4OoF%7ilgeR-m;Zv` z{jRMuSyu6$ZEyljK2QE9P5yWTIiqdwJbm17GuVT;-H zqbOm6zfU7_F~+=s@!Zbl2|JCpa^UyVNYWQ=^@@!)meg9XqABM8isM`4`iX8?xc^+p zbnGBMj-NebjXdG5k`RZ5^5giTC@>j>?Ovto=XJt*trmHh+GA-<2M(-NsVQG=I;s zw{{#_nM7)2Bif=b!WS!&J*G+YT*4d5qy_xgYMQY58&5i>j$4*yaTlIpeO=hw%i4se z*)?05W#D~W7<4zh6;E^WR<@1`^_oQfwhWG!RoQ-UHR@4ewJJ2gSI0Mr(H9Li_;A+^-1@M=$7wi zX#_vadjC$p9@boZfm)V}nPU%w!4Ob%3Nr->Dy9o*swuX_QW|h~&}vV@7loCs5x(Qa z3+Gxo^a7JzEqEB;@FElV)J*Fd+;bVPR<10Zrpwh$6#7h9lDlnc`yo{bvwyu`WMALx z87pt$BmQV>h*_)1{nK|{(yZYDGhy|`Yb9|EaNj*utUh>j~^QXsNy;=J3T+&c)G|fC~DXmSN2d>-l z8(f7($l!vz@qXl7!+Gxzq|vCKPe6qf*+hfAJvsKpLR^YGV;2+7Y z6ZnW8EZxwfzuMN^E3NObkV95xT2JD~ZdcL3{@E#1*X4)wO*`x?U6=?S+l(ME(I4U{ zD+!0ti9faFWJ~NS%UqEY?dgV9-$TufK3vg!(7fp>NIbVF0#i0Baehr#9AaB;%&OcH zsAuEV7hjcU9WS-qn2Z*B@ltrg?uPu%;80|9i(tZKdmE__L}7{MI9#ghEhQ5XfAMlS zt7*$`>fUZvh74@6=0sA|`!9*a09HlNf;`nJL+PHQo8(=|J5hwPU2D-;iXuR2F+HA}hKWs&J|Pi;y#xxd<7TCX66o7lolq5olEc+d zw{6AT@;xTovMGSSCu<}i3*ZXg@$bDW|G8U^8Q%<+X4jPemHfN$H|VH9RRk`a@ULyd zANMLv+=@S37k}QX@N)hBFT3S`!5e;+h5mv!{Qo}@{6D!Yw5De9pLh0E!JjLq8VuRU z+%@=5!y6t>wC%?j4BGEt|7R3ZI?sXYJJOAS#s9T!gHyG^V7xNMg0&X9JYcVWmyhT) z{I<+Tbf#1rSUdh3L?`qFy&un^{O#49OhZq6Uhz97?+iwZtjC!b@7HNm9uZhn1Z ze4u3~iCHVA$zfJKyyvWWN^}46x0>HjYwWhIr??BsNqcJzjMyxwl#fbjui>~&k%=kU zrn#@~jh*AuX^?=a9#>yKs2%C7~WOLXc z*$T_M$5dQJC*aS;HZ5b{ylXx`>>0gvmeOkJ3PSsgMStRP?Any17vj$?9c^y}@^aJaS2jmQeQu4dTy zLQOCl#v7vwL=s^ULH+_c6qsxg1r=_lD0G)=C*9Uex5z#1UBj`{7}8FOs$Nh?piJU- zmLR2sX>#3!PugdV(ay_2xg&)|TXJf8j$ENb&k|vxPnf8Zr!^7Rgl`&kIP_gTvXAH# z)F~=G-Db!KGu1Gmuc0ETMteYV!JbyYC^iobdzqfS9Qw{sqr^T`(;Au&hoL_<2uqUu z@<8E4KCd3EvaBep!R^p#DS003N`q3w;v_apoQ>kaGQ3%Ech`%T*^icQT#_jmy|TqF z4(=cCyAG6R`Dy`LGN4z9cG8;*n(_qgEx>{qhcXG&-YR0*5FyBZOZdKwHaD?ujBTB} zA<}`{Dm2>wu{XZ$c5|)o+It%-ncKJbky@7Tl2Yy%^>6FX*Rg>tmU*-k!k_!ko-An* z-Y%=RJyoxhaEHWdGXw~kN{1+mG>PX+v}nUK;NK9B9b0wu!a;lE#Ss&%HoR*IQmd;U z>eJ~P@Ve{GwXPra)hS^jpYu&bc}6x@UcEnIvW@`jMAPBIpPE(MG91-92Zi*Qmg1W5 z_n6aW7dbC$& z9Mi5Nn~a1W!g#+jY4^BU|9JPw*XQBwza<{*@q6+)=b3Qu2ZHNi;}b73=_*&gzh8dT zW^_6y2HRf#iOhW;jPU9(K3Q`$r)TQk(;>yXVpKv9S7)nL?F-Mo+H8rHsm-0%UDD5Q zz4-p{lTN{EPsrEP$=~*Tq9;i4{7>$_UpV7*{N}AlMV+F>0=Woc(yf#Jm!a>9!wiGU zkoWrH_nsYI{)$nt32*ni_l6nkS@*(0m^@VZu}C%Wb9~r^mjT&Zrqr_C8Cnb8;XXr^ z?{4Wk?O1!gC$Nq%oNlr=M6@+jezETKj%rJ=R?#rK1*-w8>D2xq>JfD2Ic)>xXFWIA zP-hh>@H54d?CXc}I8q5zOFo%I9udCtv%Vl#-@kS3y=I^ublBD9&>5Ya3p*vf765kP z%a*__VzqrMH%%e8?A+7u^>cN;aeC$Vs(qb~d;{c!#AXfw@q{=kedUprzqKs4u%H!@ zJ%DG{9P4IsK(`MBC7pWGdX6U^FF`sOM%5%@4`zEXzz-MFh00^@-#2RK0q-_ne?p|( z=M)1|9}~oeCu(*qf7`p4tw+>({uG)&0E`&-QI`AQsV-l3Jos%)D7NROk5{MIX91%t zbcj0n4L+OIyY}I?!NL5vt)m%@&Fw9+(^tL&nzj0$(sM%`IS&m^IuIg#F5P-52$L7y zsrBs4;ku4FAJ$7Rtuw^$SMpA%G9@Luc2jERw1m9n#<2;zSy4l5YiA}H zm!?|=YM4BC#{!s9o?Uu6 zzv=a?+iU`uZE^hg#yX=cjHk({M8;cr^;ou2VGlA)Fz?OI328hw;6v7ZJ$;2_ggcZ< zDHF#EQ-_9ef-V%G7zJYa+O8(uoJ=IkMF*nvlI;i&wr>ql08wv`f>WpHFrKUV z8~}_3T(aEnRl?SsW6_o$_|}XGxKT_NA@~{`+zkM05{4#ig!?n_st@O z33gJzt%Q}1BQGZevH*C_%wb6bxJD%nUltWM8#+n}^Pn>$uyvm#FJW;x6f*OGy~JEcFBZeFZH|w489I=P5^9xqI^n zu>Mkn7$${>1rJ^gn*e}x#=J{c&{xw<9@7E$%e{F3upuk~QX;mnoN8vLKW3i++LoH9 z>yE2VHj+R+{(**%3ACwU)T=?Dn)t=#BLjv`OhYYQJ&aPLuMZo&g9L)6Ltpc(As|c8 z_pJijD#>je>NqZwM?7SdK$6*p>1nVhGXP4=bjMX>glpFJhqPkywOFx4mzk4ym3=RZ zpIU530H#p7j5NW`d`y zQ)X(%dro2YHoa>vbdJ8w-l2>-gt48YplppU?dD~=ky*}ZDyTZL(;P0d zhKX`xqt#SV2~(!$B6IRGi{`3}?r{n)A_^Ya9Okhh8H2Q7Cd%8LG1Pk1vytJoZU0=G z);J3)35f7rfy`kTOIXZzLZ+r})E4Q}I1*@Nthfau3KBG(pyXLDL47NbITSz=1yvUh z%ddueL%>_##U$a7v+TXEXjC0c-~arZ&^bJK=W2ZD(d( z^Uu{;E_)W~dY2h?=~!k$p@dU0nbBVwKx&9$XA>s_6QB(%T99y{n}U+EqlUN0=3E+DrDXwN>CJe=3K4D;4IES*zo-B60yit;%CpTl_y_=2q? zgiTiYltAkNP!5;~mXp@gOF#h(DtjZNUs+|{lDd(8&}Fkxc#d-8X5)>~5R5@JQnP$_ z05<%sx7T0{1k7|+G~6cu^xoCwYBdNrBbb)AbPo`Sy2SrFV>ct>e0_Ew(6ZK&0C-dV zulG+>v4$%jJ79L@xE8I#9&!%6=ABLfQvqk$s8Yc^s|L%V?M1K*XShZ!eC%#5EYCO? z+#VSB0KuvKWF#rsSi49z>!sbU9YsMZ0)%>CO`Fvl)}=2adZG|8A?Wf2}fP$xX8VF)qS3)Rme1jp`r zi~(MO0xXyKC^X^6LXXUOu=#j#eEakb-^av9S8S1q;vJ5U4F4=WF5{FYg& z#M~cx(JBjD=?#H>XhVH?;6KC$zl%mm?QwsX%fwBYPODRUw}B*opgma5T-WR(hr&5r z!|pue8(v;HL=ikhYVuq1q`d)k%{($lKJ?Ko z-4d}E{vwCZ!!k5Y%y=X6=4ylp_()s^_V8z$i(eI0xsxc#zUEv)fSO^gQCiePxtSRt6{> zan1%Dx5HaJ9eOq0KFZZR!A6S>wE|h5de8365H2Q_BMYk`+7A=YsT3&AHu(drkftI zkLOy-_VD5-^uI}UI}6s$ufoc(mVCTa@B7sC?oRjJ8QjBN!XDk7fnGICFy9X#d{;lB zO8dLX9p@VWn;2_pHiQ)dHo8DU+4SbRm!rk2gOV>FA|=VBds|l^JxEEC=>o6)8gA_e z;u*i~;n@d%zW{1oDU@KNZP>mE=MiG*P+>&znhE8Vl@~Z%VC7M^IO*lP9Vq%3pf$$S z^(mnCh1JPClU=teq<$2EfUw;uK&@A&qY@geT3cV9h&}rFvfLZm@nEn*1#;{40V8R> zI!U)dwU8_l=y<}Ox~-@(Hu`uZC@J08_PX&Ndo8T zbJ;E0`kAfP9L`!k?A+_qBQ9Sa54o29cy&Q5>tJ4BI?1MS>(<>pVF6mQruH`uK11f+ z(k+$bRUn|%@agIim7BxO&Y;_-xXSAXUh5!5-(=KwkG%LcQu)F6o5B0icA|^`?e<#} z$=6oj3rsAuMzCs+^RrEWn|le_#&`YApCgT6o9D~T$6fq~ryYYc)dFtahxh8zAIk6n2r*>|%!^Qn}Napq4`!@v-!AGTMs12ilu!6d*i=3j(dL8+hU>n`2OG3QzVly?XN$gn zY5G3*@%#79?`-+`MT_}mpZT9x<~c?4YfbZW78o}Q#Hktr!m}Yq+0ZC9yqGQcj4eFM zM)BAfg#}Ukg80#eEl~@S#S7BU7Gy^kgbWt*eRwo%LW#mjr1Ejx`a@8vDKD*V`o z|KWc0$8NlP>D3?J&wd;p{o%v=K~(tZhyNLH^k-ny&!FO;Aj{ZE!`$(-koOnFv$|<`h#)_{W%!~k1V}o{KG5Vy{ z+~U>zXRC#yt3|xk!g-J?|J`|><2|vchg;RHUeg7v-Q%rQ&#xKd){SxC(%-{XOF|*a{2dqHM(Y3rN@Jsy0tD_rlqBe$#H-?{W~UIaj=)pAOb0K_<|uY5;ET=q9I_w{~h{p0}}t#_%90 z6;IZfWi&!!XTT6^+&W%Fb#J-nz{KukktKwRqQ5ON-Owgg#p~siL)Nwf_W~l7?UIwV zkM)&%4Nf~38lU?<^=@#+g<%Ou~w|J~4k0N zpSn9_`oqY|sj=o5|DQRXZ5{O|S97Euo z96C4t`SowdM?Y??$8k613>Ea_n8G_PhD73H20lkslTI)cyVj4V7(=8%I#df&zcd(3 zw7RKcm^N=Etow>&jcRO(OK6f^npRLctl=9gSG#UkvtB0?Bu$MVl@z9+h9|+)K$!&v ztv&2J+eS>pPunXc=`D8BehP}{ZHI!UibY+6ri#-}SI!iRKo(1)skK}?DEcQyU390= z%NCHfBO}eK!q3P`L1i-Q%wChP$)QD2KAp~Y9|Uq>($qF)#-6P<^hFom)7zi4{G}wq%G@%=tpC%`qC= z@I30Wz}CPKzkM2FTv*2yoedHD?e{d#c?A}nd*gMblYy?2#pf#We~{dlF(M(DPWoBK z@`8dxgLv%0qYCHl?W0-@y%8|Tt0fg5|60G|bIGu+qZnWD=?qruJx9GzN1*gmv4!B9 z2??qbT)p3iw<__H20J$y?|&%qZie*I4~~Djb%k(ol&e?m;4S!e?Sr(2o><0KkA`yt zS5(efN@+YFtBF>&4K0gy=e2kRlqMOh&EB0DIG}CuY|s}bwZ3RD$dfZu`YNChV1E`W z*SXgDRr&(fG1U14p=OSJkakP1d?aE$tRc81PQla(F1>w7Z}+-;kYPssAkrW%aPLs>Q@yWxanWH@Wmm- z5&n`l@FDX1;}xHV#ut&PpTBe;{d(aPX>{}A&yn*{I#=3??~#|j9!EZodKncJwYh$C zNAcx3Do*PD(G3eaO?u8!;p@^-upEbQdrDd16N!!ifOA2^^E7InHE(S#91V|kjaHsZ zfD8^Ge5C|6WAm-m?cW+*&2Wv?Eic*qQ_=E$eVN>sF<Ie9VUH8O%_-38BzuZsaE_t_Rpj(pXCBLmtIlFPOZpr>C{!+bUTfGXmloJ8|G9Ng$ zM(u8?XUhHMzLD+BrrpvmPWdZraO?=sed%1 zwIUsy0{3MWOa-W$uR6HK?#n7uIkwF?a*un(zH4^^j_o+Iy2rD9U-td-V_IR6j)$lB znw()9k~^{M0dja3&4)SUrV z4YEXDaE(@TFA+KxWG%Pm8gJ`fDt04ix7G!>=4)>0vF_!{s=*G<7xraU zxZl`uEZFhL+P>U&_X^z`!Ome9_7_gO-!%Oc>~eW+KLdK8(pojdE#-oHx!UU_yz3XY z9jsRc?d(i4ZTtJm;hNil2kspD6mszC+JWlW16BU2C%k$uc+|-!+~#@SQFc&!UDtl# zUUp;KVdqCcn%TKbBs-Y!kKGpvP|x3eq5j-`iO2u;-}k)#ofG44<{iQr-BAl|!L7EL z6%Y7LoB!};-Zgx|$FIQS{_m2HKf&ey-QCxys*DH2s5bFeud=cB(=XZSpL)9fkOH;T zu^v|C{yV?RPWhm`?xLq2GM2*)zw~s!WT#B}&DXK>3Krv?nLGZFeB4iviQD^>56a`~ z=}zt3AWsCRpP=#1Uz848{Wd%JOHXG$^F8r-g6@vdZ#wlWAO76TmrBfh@Esnb+C>fz z4cJAF57@lXF$g+6Odi%;a?zN{&@Up7Qy=ofVxT{AVx<9W#R?>v*QS$iP{Lhm4PLyw zKig<_D8j*LFna&d7#>Mx`<6KZrl(S@5C zYt$W|*^YO6AB~jv5k8%7aG9UYMCQCdo3NFn308^Ki&1t^N(#v63>uHV3YC%!$+TCB z3JrguMP7$xL|*y~;%A)jyLp3G6AIt4V-uXnT4pW_V?kPf_0v{>p^UY%mP+JrD~ zv0uhsGknIWJmquDRwWfBl&a5FptZ)V&A{y%8?tYx=7jykCo#Nh{^wuda;=jE(iOdE zzOWKz8i5<=z)z@v=mv9bi~SlMhdWbaM7waEk9mh_y)g}mOYP_hrdW)`9BHvKj8rYQ zTYQeO1ZLj&q1cBOIN{%_kapz6!gI;vpnf4kw?_sAN zgT~qYN-Gn2U&Sl}MFeuIHm$9iT>=!v#7_0twb6Ji3OI@EUbXN`Pgj3LR_uY6{1A%* zyLGd42`ZxV&h~}I4%h8;HTF*B9=(UDs%l~u-cATdYjZq56&^piJhY~H(Y zOqHui%%$j$P^e8{-t=Cd-*&AgDkmJo1l^Rv5M7{&4##edkU4+a|Z|Gu+V)8LbjLd2c0-{)n^nDL; z&+@~<=LeboWDFUUTq1m&EC-t2CSrr7I_{Yf6~!ouSo`afZS&O^N?d-HQIc?K^hlSYmRmVP^N)l}= z_a-0A#8@n5GK5vP2oO81qn zw{30*B8`AmdCT5ga=YARXQn>kL$RocwpmB?()*83H1?hGEZIFl)-)Lkzb&_oC1}G6 zIBMtU-MUe)83P^wKfW;3M%(5z7$1X^t%j`f|Ain#9oU_r_QnfTgv1xUo1xLqk%g!yrg23wj_B16k|jLkvm(7n zHpMqN<`z+?>fxnLGh7(76PvES;m;Eohl>>)cyA0kK}SiFDDW{7t@nU^r#PD+^PpsM zm&_Wg8L@O^dq7KG8!c?e^;+VYMMvAo08*&3GorXWC1`9)J9DRAshxAku*QV}Tg@cV zYd1r->0}h_Ei9{abpGo7+w`j(ppWSBu&+Mr-uexF!oNj1&-vX{8|v13P@9t?8=$-X zj`b$(ewp*P5V~>Go4yAR*1-x#FRYJ@p&mALIDbD~u>O8_@54uLd%s6KUjM*OefVU? zdH%x4`bQ4mnX=M5e`S4rbfeuie6t26TQWHYcWG$Z+Q*JH<&I<08rszNE+ih}PDu7Q zv>Wy0zHSN)>n|puUKD6=0v;WaRS#?a*lt1>~z8gaw zdw4l`c=e9PD3v z^LX<|b=uRp<1XvNBbz@P`k#J3-@pEGeRGA0Zequ|a3^GVoKBaf#hiZbv?*_umDaR; z(`Dn!A>P_hf78#0{Ttse^47=D&v={;m(9fj9(UH|+1lIw&7Y5X8|<`a+!+_%+6Zrx z)BkLWKcoyP-6Qb0x&h$jU~roeM4f(;bmAqt8^!AMd?ji}@fhs#h zmFG~gk~AeFny^O1k}GL3gSNeara460$)VvS={iPqJx{s;nQoLzH))`o4bd$)bb@5G zl~J^fXS6Lj+CDdWPeZiRQ1sqh`Wz0ui>l({8RJ2Y@yw0!ZiqQN6yw8*Axg&j@$ZN_ zX?996pc<6jzSz*A*pr-Cl4RUzqqwu4aS`OWogCyG7PCr;yTXa1NXFBQ;-fv|W6ANm zI^*IS;**EsQ+b^DbjgIP@o`7r#$}Qda&r^%8xjhK64E^riX;Jqog{|r0B{-=S~#6I%$E43Y|@QAer1^l-%Z-d@w%ADHk44oqQY}5F0!!&MTwofzf%b@;mU1lZCmhYxP*|^wYa!^&`#FIS>73lO>7jJgYC0w%VYV`Q zdn=laLpK@0ODM2ltgv>wQWP6G%QDSWQlMiIVNA{KM(_X#R|RgFPg|Rol*QyCVTSs*H&g z7cyX@kJw2c$%8t0>F5w)9t>a^1*KOFH`t=WX6$>l-*21%@gQxt-VNPBfsHeE4l=#; z24h+K_71P`l2JIMawW?!IqeifOPc5 z&g{X?+^)vLo_B@Xd@169YT!XXeR~KAb3E>I{(}<45&*_0@JwL5GkKp?=h)z+YBx}1 zpj|0akDxgn?5~3pTv5v7vHX6)y>Yu{NrA)F7CbhRpU)n}<{mLbI}%|{8ai|7g?;ZB zs5OQ&;yU?U1amjt1_%F8L0T?t;8nwAbXi|; zt{P%_n<1Q7er4?@MLJRR!r?&JVQp#p%NJlB7IccE5B;RP}5FV$Nv)=1uo!5oSZ?f%U?K1IPX8hbJN zvPtc4o;6YhwcCZ__FRal*DsdurgP)zL-8nWo_4fuQZ##vW_*!8ra{+`p_>aOs6DQ= z6)Mq^$@gs)OR2xND@eJz^Bzf+ub?k?g(n*{3e2t158SEV-5`7@J?m#>vgvG+%S65Y zY2~0p^f|(V9HTlrnTLJ1%Ed9-gp57fcW9f&yJ*I6AtvH|7wQlJXt9X4XQN)`r)-DU z+AYEglyh&2s_ovFBGH|=eF$KaUef!hVXzZb^1M+vBvUZIVFHJ?VE_^f9=(fC2{Xl5 z-EWv<+fJ!sIsip?gpzuPtAaEXl)RO%uQ!edHM*GAI{VeO6vXZBZs>=nq&zm>VgF<_ zx3J^l_{4m!WRaNn-XPEb%U@mfj%_NGX3tVCvO(Y?>v^flVf%q7-jVThE zrqXj?ud4cjee3KyPu!DM_G2KI*s6QDbz6hi>eto{9i~2Li)-jpcc(;Fq}r?75r+y= zJ|RG3w~$I>k~#x`05)oZ0Q6uWeF)GV8Y(BfU4KFixJ}rN0TgFKa#$sUIJgHvTfYT0 z#D;nh?6o>k$$kPCZk0Ikn5b!tkPBPGgM?rj!PD#8;%l#Zu%PIQG;JlssZf}UC~Aof z5vM5Eu<(o8$V2Jb7bH9Bn(0w&bb&tF01GmxircO)I8Q_^VU#be?+hjiM5-usj|p^F z!#x097sb%Rs$v;W7_)>IJB%t&_lu9}q+NMWJNZPwhjC+DF0|0KCp5HmmeP}1`(T!U z@M869%5IyhilbK>2O9U(xOSZHLCAdad|!3rLMI|3vG=g7Pyn_EKDR%y_Q8QdRv!HN z4vp*TY!(JNu(RfQlrT()P0Yao4~YqBd+cgz0PYG$EZ`0!&LBXzT_-v%Byd0@0%C~( zg9E@FyCE{X7*v@nP=<9)4|lhWpVMGn%L?#w#G!9N4v!wXCjY3J#{dTIzz{x^-qU!* z!?g3Nf#nOR^7o##I)aNOI~y(x8eiA@S-E|V05ZU&H_RRC2>=NAqb5{6rfQ3KmH;fZ zOsDoqJEXj|*~4`F;CU)tZ+*|;;P&f024M1p-zfuE^KA;5!C-7}|AiBp;F4xlG20ys z0EvQPV*_|3kV%%nXDi5#6mU+p*C9Ozfl0rIsu&`;Ux)zmdp$gd+u2bs0Kdy>*tnBz zM8}JuZtBzwI~m6?(K)IpNj92`s{xe=g?%oW1%TwcPyy{UE&-zMnl@LhA3#AZ;nZ|^ z+fXuW@DkxwN#olKvqNq{io#1z#hC#qpSGjlyqNAqX}{CZu4u@xb-h^EkDIBgIQc zM&D^Qxy^pSH0g&n#>sCW_VMDiM?y+$7R@b-h} z48-rl7Mx+4mBnsDx zE?ayuM**vhjnL#IeemmtzLbPEay2tzjx-8yt5Edoj@p{PS6HG918 zXRURwy`R0_b-W)r4!?mq{Lkw=f3=o1`rMEQxbvKAwlRi{ZCvoEI|=WVM@QZ@dS^=1 z7rj$T*(o1xvG!!U5cxgmlWB(iAXHV{v1@Bbi4cmJ_WrgFv04-0r^KR(b`C^$l!zF? z6j@;Mz5!4PWnXl-*-^-ce7T=r?^&LC3F>ln1Ze#thzCSNK{eFm*Vm?FZ@&_}i+lL< zYZ%)Wk?`&5DZGF#$t5_x)f(`gKE5sn7^(i_trI5#N%51QGb0}N25k@ zA|r-q`Mi4rwom^a$oQrk3$t>rY>mg8*~pEA-360}19q(N2?T`3_46Bqf!jz8f9Nm8 zAA{r&vYpTmL|tZiKd7xRKt(xBepoEACt@cYfGd)|>&D-(2Hwv+EfwML4cnjpYznoO z!y*sD?8%3zp#*P-KP^GX2794d zDR|rQqQwL!$ylSKhsj44hZjA%XdIp%v%VlOJ36u6(tUFLf{5L(fy)2$aTd?0*(cNb zV4d{6OBpW$XB_*=jJF%wUFGpx?H8dfU+3ubBEt4q^VRicJX>F!MppbNVDKI*(w?P% zzTO+Kp;Y?&^cRa9BUDM$qD*_aiw{fRz!C+aS7etZ8>HkD&(ZetC8DN`hEcy0rlA+4 zJxz}zai4zRVyAZe#RO$DkiuiJuxHlfJYVOqyzt{6?92V|YAh>DxC2geE#j>@HK+ix zWUxh7Jh!{=7e=ITd44by|BUE|ut?k*b(bC%gRzr-&uhw6yuI&gw!cafCUQgY2)jsK z^Oa_2>S|0p^|gYK5>pYy)qD1i0IW`S&}kGw&wZ@YL@vYkRb)Uhd6H;Ij!hTp77d%F z%#Q*(za9RoAoC91PB@l)LK&djPwqQWaBHrmN6OSs^mZ(pJZ)MG1o1dGfsKpZHum)r zd5}nPGQVv(&+2DIM`YciIuXEoV}F}^cZ*Dvb#<{)fPLGK`Ab0ofjlRVU*t0HfP=)6 z8!{4@!`HFP&fNirJW&tmDS+Q69~@dPD(%1u#>9Fu3#kaH6k!@KQ9h(?D!~PXu*a6wH~q@5F(@?6Uz^ecl_KKUGHq=R zpz=yLe4ansShh}sXP!Ae$G>0uP5S|U)KPC3(7s$JN=Ip^10jmP&f&f!Z&182#d#xE zQ`9UX=8bqW=L#L_6RLD4bJ3b9FKUDAL1ZM0O7_#(`TkE1Yp)DgeUVSyO%e82gLCkP zGe=swu(T3g;hUNMwh2NlXSPZ8a}A?e=K4u8L`!YHg#7&dJ^J1f=MfF46tSoIcLAdk z=H6YH_Xvsp(cX7dPo!>>`}a@B5O7UR^UGR%%8OGckh}uKn}|_G6JTZRo#KJKtMN*R z*F~-)iQh*r9SJGDth}C+D7DY&HqAfg*Y=KdB?EXzO@Ug8Mg>k36#{GM~*y!~BBV&G)uK;O-b-1-HULK|zhD znkVOa_5c4I4RhGJnI%bR_==psPKzCtIH;JTu!1%m;S4!q)47} z*~A;U{Fyhoyaj#l6e+FUt|1k-=OMMW9Vl9SSr7lp@+g>^sd_{CZ}xTbFnqYnfqT;3 z@Wq())N>l~BvkTaP9Xiz6II+QQ)iY0Uph?%3(VL*Ju8*$HP0%F9CF zNRZk1W8=o$uWpWaFU+Q5jGKyi+?}<9%;(CCo2wn%T_0YUFAf^FG?u!%y9ZgUY#Fz9 zes%W@xv*FxF=^}L@$gO#vi!np(mv+k;q&^!a_50b$84#`)7qeidyh>zm%n;E>%Mr1 zi81LibD**rle0Q1GwJ^B;2HGk!s<+)4iJ5pfS#A`(dCxOyPD$&yIn8T6^SXDu*~hA z)zKnT5|ZZ(;)P(lwC>G;HQa$?WFf&el#bbLKjO+`?q1q}`=rQ@NxkE>gEz_qwHnP~ z-iZ(Q)>YKb25y&mCsQmxnk4TTl-t@5#9Bu(9hnX*^FDc=9Bj{hfi+^pPW3pxd~VOL zdZ%qf;N^?j;IB73Ih%aYPqMlxmjfh7o7B&qyqZX|r7kd=aDD5Q^YQYrjHcB1Ek>Wb zAHmy$E=*09gFXdUtczdLPT0eEeG3oCsg(K5rxPFh7PEbt7gR8x$!KxEC%p2J9d16G z+u{;?=gJwLyD*zAsa&BQ@-4IGCYcy|4?Gv)Vv0Ctv9A**kAbI{Zs*MYIE#(sON=)8 zZoZh~p;CtRMhtLWaNMzu%8k9+dvM#7Er_vK0iL$#dTI@7s~ft9y!xTqil7fd_A2-a zo3a_$fNJ5ftvy#Bs<-`b9aqH4?}c~qg?dHZemY0y z=-kLxj|FzKOhq7o#be_$+GP~bobgjE!OhOO}>B;?a(JL$}PVNIR=b9ynd2B zWVv;_JYd2-)F*G-a$9aY;C;xo4-)WjN0~2hIyuz0gzw?5reomj>ucW%)ra2<$^++X zL!Z_-KKyR69r&U9`Y9^*;hr5|(8q~TzoznsKU^JymOowlwGBP|=~Eu`=|||ZuI-1v zg0_QJudbh=0ah3UU+_9rn14T?)qbL5@FpA9e^}M(Afr6^t4LVDxTDo!?so9@U2MQq ztkqF5U&yX@Sm0c_)p50B$oGfXz{Me}lg9FpAMRm6E8AA5o!c=Xze2DC!g5NNW4lMj9{div{1*_}5U&Us~p-Lhye>(CJo;N2 z@87O3|HrB)@Z%N5(0|i1|0{UZ_jd%+Yqg*AKZjbn+km+(oqy%7{zM@EUG8dg;KaS| zUufE_Vf#H&IgwqvqiJ`xw9naKtR0v9#i41b;YrbN%l|^tp1m&}DL8a+8*V8rVgEwY zU43i?I5hoqbEMF9^-l!y$IKhthMVqN`Q^RUztD8|24-t>tU&h>SK#R%Y5Waa=oJ=^ zgmE>bpwgI0P9tQ~RA#55j0!=XfPlzttPpdC{YjWa68uS+4BR;TC|!TKIe?p;Enc%- z6($@K*Rsj5b+DRi8YU8st7uv03 zU13On}0rfudMavnU1nAm+ zze4qECjce<3+`!#THu=VYoU2dMFlJ+Ct*UIJI#3))qgUyi`#IsNGtch$|#reD{U^a z-cUOL0p+tz*LmvWXf#nS)6Y{&yc@`MIb>wwT#Kx(mHQPm-*Rr13@o_5@yr&)B}|mW zbLj;efl>XqK{giC_Fk{w@40qf;AU15tkT_CmQWcR&A9g=>v;2++hsDp$eUCUb`62Q zM)(UdZ@m+4)hZ}I6l~He3>|P*mf7e(jH1&h)hnbdY&z5C$3k<8q{g%|OuTyIXl}cj zkW2jH-$1@Rm^amIX&AG7=Kd*n15dt30gRoEAtk1xWON;KQa<7!5D|d$&m8wmLNr5i z)$;tau{FuKC?;Y;L4NHll}!iR*RNptB(iD7OjY!btwoUg@2`z)OvfU)NAZT27_$t>6vdb@TY?qG31lOtkT*mKO_~H74e(EN)p5i;Krsm*XEi z-$hU3w9Lm`4G)xO2{GF8pOoHl1L-4 z{mN{j1bQAxC>hoSPCJWJd}mr4 zki@OUHNVIr+_9eyWwpJLDwE93HQ|nRDw;vIwZ6qnlB+C_NuR_qc(bNI*+0BF(;Go0 zWNDj_Ls`*5M>$F(Tc*?*iz};? zjSM$f5AL((q?!Bm-XIt1?}hmz7hh2N@Oe}IB$Kfo+R6N))(0(hqLnSSDjC5t+%;Q& z6)-Z(#n{}0B2%Ghs~IOq>FMCohd~T2Ru$;cJVHH*M!?a`Lrkn2LeTMbcL#uAchyX)1k(o;B7fvxidBZiurpwq*4*&pL4K)F3HhUqg z*l>^r6@VL_sQ;^Xu9BHtk*b5@-Z?$+mBw~i$OV8v1y!Vc2L@@K8&klMQ|fp^Pc>vz z9&|~#g?Y-ewv+lCY*=AbR;mLRpO#ffc=Mc@KJJQT7~kwZtdb3!Rr{sCSM)}-l;IH- z^bA0?15Py3#v>~$QeozPT2667?de}XOY>sJfRfe|5-P~WH0Q_QjU~NFrFYhMbKph- zO|t}Ul3nSZE;3odbp3(BMlC}PBn{nygYBdoQe9pI5o?O9vrY)!ne-wI@)oTwV|3mO z^T#3)bS**sY%zVry%BK@hR`6|2NtcwLU3CF?xjd5$qix|N)O4za6$#L>V`h%v*=kn zL)w}TJbPsJlGWy;)`MUXW_fAHYfy4tIxJlhN9~DF4w!`#I19;^D`fvD@$HgFbm#5kCD86_MgL zEgWp&u|UsjokIJX3H_lL{j{=U@A2x(s!tD-D~->ATYia7T3!w(NMlF2@4O`eI@0Cn z82RVK0T3=m3l(iXaEzWL1%ZIAd0`oqZ%IYb1{#eJLS%V4LF_c{C`QX_tCm&q%O9dy zUd=suXk54%WGm8N@<^MLC2cJ&)qJoxVJIa+%*ne3o4%Vd@Vv-pfj%-y$X+O;n>nJU zsaE=0_33gu^T_8Fy!Z5~PrF?he7I{Nq^41K%6fp24G1zC{?>MHgvV2h*V<4D38lw= z&=C!>k$Z1>>fi7Da!tJ1Xu!QeBSGLQ z{JBI*^4X8^b?-_Tz3bSE{c_72;%P1Ww(zWjeSqfnOUcBnr|)i+F*gre&(McpECk@w ztv>O!hVy99R)h~lU4RGqMaQZ;-r0wQOAeL_IF@mH40w-X-q}~}XfyfYxP9@aijZ{! zOuUBP1|}r$oION;eFGa|pA8uXNESk=LNK86SUi1h0l1+UobTxy6YEBXi1S`y!(-uJ z8Aga6=0~beYMyHl_5djlJbkzX&dR6?9`jT3kVWtreAsqY;v=Blu=a;M`h^V8dMfb? zqwlC9#l~PXpsbfFPAq0~2=&Uet&SwpMG-8N(n7!p z9&tn%Jle$RA?1liY9!N0Wze7 z?mfQi#v`x}wMBp(Tl<-Py~v!c@$A!Nd<;}w9vI22;Xjv-ZkFUcfrnFa0i&TpSx}Rj zzC{0UymvBSIDtvx8nGh}*^XCWwhYw2NJeu#@JWT_)3)TCC$Y&kuF;10T1Gw;5OA!1 zz%LAlJdGHF0sR=ZmPwQG8vydnm{{r?PVsuFfvK!d%(>83)e!)PG9O=Xf^Im_rJi76 zmc$NmoBU=9X+Q1wF^F-E_*0*4@B^(KjOg=iyxL5L_1Px_(xhA%cp!1Q@F}PRMU_MB zivN;?*2~z}iy#^eESCm*Vd0#q>9HS4gI3NmTsM+a@Zpvls_-+=G&Hz|@TNWi&w{Np zM+Jj>qk;RTEAcb6OVfL{_P|dW5>?ac^anx{;?B1b4)*O-!`pmm#mpl#K1dnQK%Tx3!C@~so8K1oY$ha4-b!| zdI8RYHQx*4W(!`I{zkx0W9W53qzH_pmo3SzqE%0|*9h9ThBb)2z~tb6_33ovtX9u1 zyAc%Io;$?(qTPnik>`~GmyZl0`Pm4=3PvEZC*xb;y*Y7@HZ#+%JFl_pxow4zY^K-o z&tEUSQl9($`q1X}9jtHeXuR5qZC>KX*Uw1u_wKN7ROVwwlsZ1b90 zBm~mpPO~af&MVTGE7N5wUzk^B23BV0ROWP4<}Fp`pI0K8tBPc+O3bUu0;?)=s;WAw zYL=?%&Z|(&)eW-Mc{$)i6i_Fly5lc2{r0>X&0NzfThnh|GZ>UeXx^yd2fFElNOBCixt{%^9ukDoJqJ;+H1GyB@Z->QXM^zf`kNLFA}o#Ka*YxejZ)a4#)M^#gg3;){==KCzo`f|;N7R_8iO{=n`Ek)HK;e0y9 zu)7wVGHX1Kb5ipWH zo(_8$nG~3ul!@Fv9PrJDC#;$*5ycmEKt(l6!n`Gcv)$v@h!yPs_ERF&v=q*G0C;Zu zaBkOVXBV)uedV0U3PZSgNOYPHtFC@0SL;l>j<~5t*fdG{i_TFd{!| z1DQ0^prdzE!F@7$eGfDnRy&{w7!hWzPX!8UBaU?8B&&xJI&=1H4)M1Vbuv=|^k95r z#F2ha3H+@=dTYeA2SoWoKs9)>6q+!}R{FfU&na)v#jOv&rdGvTNGe=dS^04LVO7H~eRJY8u%GLQDb?7Wej z@h8u~41ISA51`#RGQKiCDctU{?oG81Bx1h5~~-*@*$34)A}n z4YIc*mV!i$fo+sX-ZEDB^t-r5YZg9vW9k9Jhz_>A?WBy+lH#RA$gYE3dBi zmQy4lQ^G70nz???+ilu%bux#-sfiCe64HX@>UOLdQZ%PFHYEnOCDsijDaxPS_|epB zC`S5vMzDK^wR%eDrkoner%oNrhdWK7?3$%bDsXebpdPk7ARm{;a0y-F(WX+uE>v{8sO`;P-xQQuAQZqOO=>8E=LCd_;>jjrj z4diSkokwZ``jI0%O5G4_)7lHepb;9-SZJUN0t4)jaHm zMvE-yC@)~G7Z9&W>-l*tz>|xk8hQ&&ge2>%t6N;Zx10wp2et}s@Y^HVR(fma?@|F6F>u)3*}@}Pki*>Db1K~P z02im~V%r8D2zYMt;o&p}JM~G&;2{DgLAcib?C#3W>(7&WGj%FSD7p_68EvH1w`rlv zp#q$uw!|Ufw_6ec2o$CB8gUlH0MjYmJ_;;`zAKfxm1S*kh>-=>ex|;+K0f|gPhX3Q zt!C~YHqC4hHlE__tA%b z9_(0W(~+195m7Vc${z#%;)>dVKT{j#`+5rUgn|yIqzl*DR*}P_S(~Q$U$0%)r@w8U zXn&&H6kCGgH@gw?K@1Mj#;LA?20A>?Yy>$n>2VigVw2P6q1*me?Uuk=tDoz(kwolp z{EeO1y)|!AGTQ+xKN%^Qd@CGCcLK7TU8?gn3_B#0%HPh;-XTJqg49(K6p*FITbG|D%@4BQk`La$Wk|D|RA4`@2z_~+N<55FFv zBZxt~mWvph{&>1uk5?A=?S}tL%aruoTKRbBI8}H*mG|M&k@H-o(VNGgmX2K)>mA;g zVIMA^xUaN7{q-MOCWy~!<;?rb$je)fpI3hS>`a&HmRo%~f4cXv*>n5zr;BHp^?_8r zN1rbPj{c=(2A%DHeP8})^*ZG8_wldo)m3a59wDt4jDSJ{_a7bZL#!eRi!Vlz^yUjM zC32xA%)hiuxg(4+See#ag<4Z$Uxn7d(EC2Uh421-M!Of@513t>#0LDI%z3M_2OaIJ zLJ<44aJ<$~HJUYs3kFL=%X#Q?0cv!^S74S zbf_bLIOn5#@8alC7e+|u3s<6$Jc46YXpDUIr>li$>M8;Z8sWZ{;WP{JQDKkaRV0WjBY)f1PfSN`6K*f~+by+J zxKO1iOT|;i+?V<-8eGe~VV`+Nqr|?N%Q@h2o2^>p<2=rUBSuEnLGqkDgcWH)7zzFP z&MF)hxEpy8Z!096oypAclJ{sLfxPln*^)~QYLXfE`}bSHZ*RuxG@Kg|yh~2CCm;FT zbDk}HBSEfD2C)0p%PMhnA~Kn(wa@A0D;cBUj+i_e0sPHMSw(p#u?N{2yP@nuOzRaX zDEXb>rNXL%3B&?dO!+7o+pby^#1i@78vn|7(OLiQ$(u*N0TdGkGU#^1*r+tU!(#^X8d2I!oofGk3=VX*fCj#ljyWN zTYgM0VVeJv=rZ4U{zWRhH{1Uv75!^Nf8}k@KYf{ObX~1umHpzpX0wW=TMbwe$Jh;6 z;3T>$$GsICsqkFvv~%A$NSuvfPzZCowmD1=U5%nc2H|4sYgU-)ZCq@9)Skm`DM@Ki z!J_NNSlD942hM zRa=26Htz7f>3w>BdKY$Nq=7Kj>mV8C?!TY-^Sbu^*lq zl!l+TfA83vTo6KG2EXT_Y;Ig=^Lt(@_mNAqF^x*Us4aCC?q$r0lc|92gQ&4i2dUgr zmuZ6%uH@W(h2FrUa@a;oz0&hH8%sZjxr0vXy7kMd@sg}v4Gh2F8Gn>Czqc4)b)Q?r1=t=P-t2tJ0>=tD;Yvr!E$2 zVc!Athd9OCi4H2!ha(+o!r->4%14pdwFMH@4j$HW70u`JUkY;c9x&P|E)~w*rypf4 z7Vc{*<$U>DZA#@y`qWAeDP6i;WEz@K&FY_>pPxv*+>u#i7Y*+hep!h3Am2TJR6jLfYX+g|iz1iQ@+#Nv{!m!; zL^4(9>w5{; zK5zB9U;kx7(gQDnPaklD7^m_J71l%gXfWY4s9>1+RN{aT&438yG#h z2pHd|`uHk_yC$vtZV@lVZf|x9^*+gCUgPLQ_nOF?(sv@{Bg6^ytY$*fY9^|lL10V0 z_b-kl$YpRrWrG(|P#A&nU2U5At09_`Mnkt64>vE^_Xb{6L|AZ}xcUVn!CW*mx1kn< zg&)RjuVsEC`)7j0RjhNt%c_t)GfxirvCJ%bN6AR_2B~rT6ix? zO^Dp;@plWcN~&nKJ6HDj2f~N|_56~q6Tal;2WhA*Nq*L9NTSNz9sxieGvtXZ! z(HP*DD+F8k1;F{kTx>$TQNYsKQbzh|s<4ExLR8;R1zW)sy$Vxj!d+v0Q9Z~eumt>@ zNpB{B8>qZ*(!`8xfFW_B<669}O~6vvRstc&(yT5=*7M##&#GidK)Xs?UBmX0TWX6j zq2hrd$JF!N5i*oyZM4JGUX>`#^*is$wi-LW&Nq8SZP;?bdKXEkrAnPogw;Qn!fH?j znjXdCA0#tn4!-h5=FO~J=DAkH6PNN~iwOd+RB4~^ma42*cDULuKy-FpKICjFVZn_B z>4_X4MQoXqu~MY?cU0;*7&|N2G+LT`4QIom76Q|*Oq)tf4kaMSw*>K#W;8rNopnY0 ztcqq=Dp5I~0t<;itJXQTe(@}ah}XpXF#6sP_b0d9x3J-ncbcAmcpJajA;EH%!0%(? zYb);gUHgn?*&4_%GOb_qBa`5*ydZu+0>t5$3@N{X*v)u!pogWN3?9#t)WplCQSDTj zq*khI(3yX+ftcKr2V%B*4%*cBCupQNq!aYAy0G=!iu7r|$CJk#6l#XE@fW@oEunf$k!_wXA14y~iFKL36HR#UL$JGAAa{6=2^uldh=q*`92oR{_(Ci| zQ$4sBE{}DHBSw}p=`u2xpXR{Qa8F7vANxczH6%y~;lP}^7ZguyHE9b zdYJ9kILigI2a!~`U$5<+k&$4CaKJ$qi5CVY-+-Bmr;#Rxhs)?$oOyH39H_Y4LYB*^ z#`VEBX>Gz(!M-=aTpx7cB~+v`;1@_Lb;1CX8RdaEGYus%DHOgFqmMin*>^`83 zhoFQQ@Sq6)$VB@3%L8DzVfcY@qZ%FLqcfK{Rc@oL?reza%#&-!fDJCa+t>hM16wQb z1F#g7{G}uN`79m_7ZAE;_i|r5>VY}J8W?jx#BdY5jH2Aa5HYU#{*1FeXnWbi6(7h+ zsU9ygu%X+k#Ko%id^D;4H3@uF3B)4a(RdIvMQ`OSoW-bXq zUbepOof}*!QHswV!%`P5o>#&u>Pcgc{I*N2ddv*p$LIX2$f;qRzWFjaBGsvfbE^{RIsKYGCkNP3C`4GubY5uHk7SiC65B6;1QrQ!7jbtK3ELF$ zpBFKe6bZ`~^F;u+y@me=QlZgNqP0|_b6x@;EndXSmoP6ii2#1L{)<%PuwBE*b9K!TT*de zyT)9%AzSzVM=FlY-<$-#Im>x-{{JNvK`7E(6!~8g-9JgiABpatq=Kb^ORj<2;vb}9 zxk2=z0s2o;A(PuE+u10;+<5OlNrgqzgPyy7pMNq4MZfjs?Yw&Vw=tXNd zOIxH|Ta-mxOi)`~Q0vo|phFCaGo~$#r9EA){l(v;BDX!KvpsLQJ^!K|$uj>nt3?wY*lnqujmk?Wqb=w1lwUd-)Y>g--w?*4qyy@q9ZyCL`Xi^bcmptn1@Z@+cE z-CKV9^WrUr<=uhYyCaKtCqeJda^Ic*O)4(l;jy9#a6fjIXp&$wPPQfQLQ}4wftP47 zYY(-253OYneQ*zBUJr9u59>+~`(+P=wU1 zY7+tnfQD;vBo;N zAwT)Wa&jwpawl){Ti4{?%H+?>Net`z1NrwymhVr3-=F2ZKks^P&RTJK`5x>F0@#3l z;Z0F&y(f7+MczF{`DqGxH3epyroKBJ9x+ABrtR-D{Tz`(S^t>Xo%)}nM<6^EJRQQY z|JAbxZq)~Z{Ys(U@NaiB{`u^|<6QdRSAE#oH2&I{`}6Ga)Ir8~GY{GT5gauUP-~&_aJOI<9jh||BRgrcUi@k zbS? zeM8+bQfI4jJ3pA7sMP5YE-)o8c|el|EBT5TQZSi3d{3j;fEW@-xOtPsrIad?BuDTT zX)`-J5ppTmFQWUYFXuj@UmqRseyc{wV$b-TM9A%MUX!GNJ!<`N1ow8^`L=N<=2u)eK;bu3u^$EnGuM02 z+WpzhP>zE*UqZxsoG)P(yiREHF*79A>2Q(+feOY!UD75ZYE6GJ5Ie;8XrXdI6?V{9Kd1m74i?*5eF6py@lNqCsxsF*66io`5n~{yMXQ z%rj-_h(RgM&UJF^Bei}macE3lC1+vgu*#P>v=^t#%Nylt1M{5IF#aUlZ&kwsqRc-M z4#|q>H8Wtha`uz72{su&INhW#+E4b9f6n|-ZRq_Au@wLD;z04+L$c@lDS?zFkoG`is@Y>Vr`F# zAyk!Wsv5X<&>_pv#?HXnx?TouHv^hnspS}`{?e+ZP-3D|=h^6N{)3}@DINx$5RJJa z=Hm)AIshC&UR}t0T#0K3847e5mUWC)g0EtXEtYia0)4B^=_pLfH5M9j=7o)#1q)E; zaAk24Bs;aHB~FLY&Ofg67jZS8ui|TAH)PB?z`a9=W!8H%qwMHnrFM|XDB(;L0)ns? zST~geOTEdSaML>-HR1dH63PFNXm<&s;O5j&SDx(ZGD4kY7e&m1g%Qj7gGI!#X(?>8 zaU!M?W6PgMUo^gUm~jLo=&N7iv8QQ9#Y!lW$Ye_iBQU+#e(H~!ds4z zxi+^`qEAm5XIh_V>E!6{Ic`A#d^=2lehjUnl#b@+T8ap}|se%kOF=c+q(&mI!D zF%tpsvy3F+U?Bnx+OB;lI^$3aS7vd-1NeEJ_D};G((pCb$#ZvlnLld@uc3%o(BbA@ zJ09t?`YU0=$}EOOL~<$nMBLDIj$-RrUN0Dt%-mzN(UOr&|IE?{`gklGT_ho8{u<{` z;O|NQpN`LyFOd|*v@BL?aSW(+M?W8|CB zS^DcMOq<-n7}I|8e$FJCqXgW&vh8I_OuJgyz2Z}G|GVdEXE znbBUkhBZ7HxYGj{?|lWYa6;x1Mb;f_D3wVA00+d>>Figfr)D#nksz69c;=ni(T}!?1`j8`I^r1Hw)S5 zb~Cv(Sqifb2H5H4A2??(Z@{E)4c<;EcwaPkU&=J*j%8Gom(xvFy7N_WKS@$<0InS* zy)zZ>QE);q0y(bwpxA(G2kB;i@0DyF<~ZO<$i=mTPPZ4P-?G1pkqSaC`?9;o-IiCSQjCVMS3A+77+^TDbHnT6U>#lD_NTp$lv zy@@hls=5bI%IJ6;$u=(buDhd(-U0;Gs6t`c10zt9XBo z{nl}V18BluV1~QA#(97Uw&4iNXuFSGV9{28j>T|}VKNK6Fp;mEFJMF}F#0_(h1HZ? z=P8KF%dG1G0UaD)0!#smK~(X%-#szn*n!8E+AKZ5cl8U~Ee%sW@p)tdwuuEhM*s~Q zNJDOztfqk|V}w@GAQ=??WjK(ZhwC=CNmaCdSUA4+nKvRWB8$%?1V7^Q6PJ%t5W9W; zynRAGG*cjHFa{iF7h!2R47#QUu|1C2n*tDE3-P#Nc2Fojt{a}WX2U-(X57M5)J&8G zAtRWM(x`~fDvjDG_06FLMVYcZ_`a zSqr#r5s$%B{H9F#@00mL5_PeriNFSL+zIPr(kEP}bZs;U6MlXo8*|*r7QsGAFO2AA z9ATDi0D;~uUIuz*Oe_g0FBkPaD(RFBn1uxL1&Ys;9VLWagYQ^dKrtW^G|{1Hs=zTw z2#Uw)w}XFW>p=aR zJ}MGG3Z{4&4`fHD^kH;%V^!C_KS20$2=A_yw^Nq30(L7urCBRJ;hj+Xad|4m9Ee4tbgJw-YC&JJpxH z+VJAfyWuob;qHlT5zV{dY77O{lK`?RUv9use})4UW`zVwUo-YH6^7%L2MDu(VU=i*>87&1Kyy1{A+QPa4ksg+JCFX}Z-{w=Q3L{md)_nEsiLA6$oV+}#2NiR z!N6ZrVntlw6iLv015=u<*C7~qyOp2pj6V9QFrcG6RUloiDtgBDbz55|86kayEC^9m zD1`OO2NIXfOOcX7k_(Y|cLfFMHga^)Jc2HI!b?T{dQRbGImGw`R!Ay#22V5o^z}pF z8&cl5(QMT!ggC0CvxSrvDSOFAe=mv}O2?nDhXE4YAh#BC-|TqAA5DJWws5rU z=B{iK`IHb+;ua-r8_P1DHgY&d2EWCQD0!2<0)iCteunz&0Q*b3qL*aHkqKsA zo}$Z*8;ix^EF!q8vMB=IB8L)zV?ZJDQ9K?fl8bDH3vbq}m_Z_Ccg*>?fi$wHjUT~3rUlw06SqJ1a|}7 zjU%DXYbCB#Z>zWQM2sW$6Y({XRLd^JEaCWk5Hj{iU>LH5FB~9*u4cg_#-}8V)2NLf zWHvhQw^=8aZNNWW2&c0p-h;Uq)D+%mFVC97Bd;C53(nA+YUmDyA=9s$BJu3I9V+4|3`2K`iD<@Tz z^Lm!5$zq^mTj!;8{D=exCBLr&fbXVxnIRk2VNWE-*|#{YHRx9#(~nx3VzxtPFP#S) zY8SK$(msy+SF88$0H>42D{DrpSJ-Ql^!A_?E-VyOAH%bt6ED6P)lnJ%+vvxvz1;M= zx4VjVYbNE~^paZR6+UF2J_~V(0AjwiU@i3~Sf<|8Pcj~lf|8S&zPd8}ctc&wPTM^V z5%IY3X*z3vnlogkLTZNlAro)%jHrk`?ou;jdgd1P;p`6GtT-DJEvd2mr`dZ~voN+f zrMq(~59c2Ib)c!yJ*V|)PUmV4&Ni=qcU~oV_D(GWhz6w9G;jH7-s)=Jnr*@M?t=Zp zg~uTae-1Re7u-HAcw8-bv3+9MG@u^5)|S9iy~BEPu7 zSwFdrziVtsogN1Yn%OK9xpyb>O($M^PZVWLys4WgnVu*;nNeDw?bGYbOe3ux9wL5W2nvqj8v0TiJcrhdMp$-1m9v8-dZtmm@~6f}+MmrZ7t&CZw2SyvuNtXP??JoH(y z&0Kj@zhXbL;&{H|%)0tSV%3#(#n9YXbcEQme${IRAONjmS=am|)&k7df_&CO2$^eP z^=lC`YfG}WKn}K89{3fwEWVZQ#_GX;UY@eTRgV^x@cW*}4Prk1|`DcF$5PtsOy%~ByK{M-r z?9KR31$B(gmd>`=2=8Rx{A8p0 zYj}zRDS%X=9{-c@lqy%)g+(STg7nG12v61g7RJ`6I{zXa*2*BvO`CN6ZQZUqZqfWt zCHe?}_2d4S`_FZ|hkL2qNB70laUW`_j%Xr~04f>AzuI$?N3O5CqU9so?u@ua)`yr~YiW`o&-csUIBxmu#c zmb4BufKxCIC3-k^3HOL7$5O$`@bvPS5W&NRQd}AC2WKFdKcy2rJgXqOa2OIdAH)*H zyBrUI*GFV_PjAEsY{wVoViwA5P!fWqRtV7tbon4D!r9G6Nr7Yc5kqf->dYW!^`*=N zuV3q{S!1x(9E|CRfhm}hl_;9@j^BZm)ahwCR(Si=pG5jJ8JNP8*mWW@wIF=oi}It4 zI9|y#VnMaB-j=6&z7v8Si# zMl>(e9#OD!&y@cS`4UIHU!z3~E#x3^_49`C2EI8wAvyjdDc4cI;4F9kCEwkU(X{81 zm=w!&H(zt-DzfoTbPIpg#s`xN6s|udjfd=adj&}`#PzU@))x0$bd?e6E9|w6lJ2FN z6JiUtU3pzO_5AxoI?rgI%5FDsr@tn?Ca58@?e*oBl*y{TyHq=i@&1H;RmQNa4HIYi zW7xph&Btzb@0e>G4+xtFvmB=I6wQm%#%5|SAb@l@;yR~?a&TJfv&DFQ^`k`|izw}d z#AjR2hf{yF9x+i!$#%oRQLMEc z2_PNr;CX-gv*-5q=`K#;`q|!)zSG(M*zfRk>e=?$!Cdh5b09o*IzL(~1hz{XRomw$ z_;#Sivp4EQIQv67e0s1AAb%;dK_rR|5Jo(fR5cq2PaB9u@xG{tY$6`TMsg#(A3e|+ zyWP=<06JsrlsTjdicJ?%fzFs}4mq%Hr>g+LB49OSnt{t=%3j4x9Cm*J_nHQn5BXHvEwrIW4y|06u@c`aPk+54ccBK)V+LOwPI!cu0N#1e}t#X zgQ*jHuSNb5o*n~bNcbDg;D3atYHzfYx`tkh{u!PYdbVFfg}+hTYofCX8s_$?cxw)S zYta5XJl!v`ReNhR+BNd2X8*Tz*ko0Cv}OVbPkY~*0pRueF zBg#V*R2MF!9k%eQmjlw_1-dVXt>~!o2piQ!#>vArk-qY%XFZFoCx`7AAUq9LUE*Lm z>QGew@LM{}Eq2tY8TBE)P<5Hl_^9i*bhxT#S>VM{w>hFBxm|VTw{+N6y&`qAXGQqS z(bZn(sNd4zRnf_#zO5H@nFl?q5+_IffEF{G`GUv_=65pZ}N}xlkxZM;Xs~Z9G~gGou9Wv zuczbw=v==+vwnSV{tr6LAD!!yTWpf9CX2x3<3+2Mm-xfw$?m_=VUF-S)cRHBi|sK~ zo^pSROAZ90dVtHvlDL}y9VYeXNzRsLkpAcG3&6-*lK8uFYZPH;xMD*cjYT ztC621`X76Z?XK*%l4#VgFE7J*=k!+LK^ku1q%AFUlBD^)-n-?y0T^_)w5i0g#oDa+ z6ibLj(2L$hBo)D(9?W9-KauMA`YQ4CNXmw+MN7`>;E`hOWI;$Wo|fFmi-eX1 zq{QGfX{PI-U7}T7>Wxngg6BGgy^>dsZIV3%+}8^f(dPvQ{7~@~N(8G4g zBh`awTCTpEA;RGvR9)`jb+K*!r=%4jB2EpL`rOZJ8=I*$bULGj@3Go3B;D70bVc4` zYgK~-V!gv2!XfOT_L?&D};{OA3C_HIqh z={y7FH&X=w9Y*W5mxFwO+r^(;3+x0+yoLzMN5Su+ZjGCD7@=gnpM$)C1X4)A?L)_MbW=x&SGVlBqY?dSUPtR25Ouz#%9FHt;{9R*r4kvgUhpv{Uh> z<#0v+hY*3iagEvvf3t(k;p{t7jNs7#U_e6_qw9_6+5kGt3$mMyC7D-#rI?v2m3uiR z@)pvnF76^*&(XqglZc~=Qb}+0SpumOH(@QXP4<(^M~KctwXqRX9*OJ*NAG);ZnxzH zjv{3roFti{OW3U#2@%=_Bm~>b2so#K!cW1K7>m|-Tt55#X;)P}7A2ia!uRt*e5GH) zBd-2hku;0Bbc$~o1Yze2eqO#5_~`*Jgc{z3z2;^p8|jbUXabFphhtbM+=fGC8U-T? z=*_mi6tb}|rcO_pNi$M5Mc?%sAj!%*#5Qye-XFI_dT(oLYvm`f9b$voc5p~Bj4S{C zoig!_K}xp;sYY^b6swu*n$HlgCNL7oc93IS-dmVzD+4Ru3 z3cj&rc*88)|4!X91wMP%XTCaZ;K*2%EB{&QxaWE`NsH*X^35>t zT$Q%wabwqg2Rv838QmV?%54W` zi@OI=#aVwIeP@s;$D&_wa__@E7yN@uM^zYqT8FSC#40Z9F)m_;@0?QInX!h}ho_u6{me zsYWOQP23s)!t59vO?THk z*in`qz0@ken}_Ap^WBD>{%621+C*d_ja1!idYZWx^{qT|n8ea4KTtAQ@#js5n~h#V#tRpWcLv(b{5LkfiyI-?*md=NdRp zZ55#xSj|DHcQ!n*3cS=yy6n-UX8MNF{yPnQ7AxjO7GMI69SuM_+Ocx={*WgXcsf@O zvSpgPq{&3!{EkJ_o!{)RND=|u;Dk9B1K+KsBP0E>eGw!BA)bfNd?H|>L9$(rK)06t zZT)p)!HTnIrywb6r-iz@Jnzy2Y!i4U;q-J7M!Ah*R4(%BEH>AkrMdwYd&@dJ&#?^; z$)QvnlVN;z&k^4qN6UYq8$y_}O)F!^{E9B0KdoqtmTE2T~oifcB^2oq&61?D>FwW5rp}a)UNgYmTvV{#rQX+@98eAgVPxY z^Otvf&gQ9GC7aA&uzuo6M=A50oyvsTF_oaY1<@@heaOLUH=iW;moI@kcvNfox7y6*xLh==UcI+gDv`JKzZ0_ubVx?atbU(3L^ z3yZz_)I*eG67i(+s>WLuu~#LSfhC9qR>7Sx9ZDs(oqeT`IJ^hS_{p}d;V*xYNt~?p`KE}6yT^jv4di|y0UAg({v(sof zL$f<%f4-M|u;PJ!?zm0&pWaI-WJ75okbi!?|C?|AfBRPoAZHNk9}k6h{1k=n{(LCt zh^Qt1iQ*HXnrsTEzZew0f1O#><kXaOtZxp>lYt^XbF~ zJB&X$@K9L8S&KgB<2urq`kJni7k2s(cqqIX%U5wtfci+hu1#Dcl$gL zt4+Gz+h)X;$DuvH{?$&SkP)EtrJx-GyubI>&@bIix%8AzkNkMUQv7vRvhcMh6}awl zAM+y21DvUSN*-I%)=g1|m}o-i;URJwKYi;(JK^l2#^9hcex{ts%y`N)2S@NVa8)|Y zb{?Ga=Zv6PGR>b|Yl(WM{`?@Gd&8>&!AD3YnX&Qh9lj`*B);r>XDwiN$a8ixcvNs9 z-zDzF1`6C{YG*6mPThBuuf}U=kfS6O-f$y~*39*QMx1~v%k!fYt=0{&DTk&L69S~O z9hhg5QbpR1_TZG_A#o|f;rys@VH$J1Kb+h^OOu0U%)D9Bzbx)DCAUOqAMxvTtJD~+ z(A#lQ&E7(^!36e{i5D0pA|X2 zxrlLECvgdpZ?6f8A2MG^Uc0%B417>*o$E{!_ApT7lzoeOA|%;ZfA$iJBDixz>AU*t z+~XmKy^*((zT-zph9Q?9H6t#t2NZx=S3;<}B$HbFu&-PEXf{3*KnF-_m1N=m#?8!q zM-PJY(6kt#=km`Ej&|rZg;rDHNYY)?2e4EN3+E=Tob;GMVPTyzMP3bd->A=HaGMRu zHtmMh$}NFzrYsGE`Mp8Sr*G(8Cpd!}?_Cq9XhfnlouzqU*Zf%cQt0wOflpvYWwVMZF8SJB@YNIWX!Z@Q(&}N?}AKU!yU(N6ny!?c$I~5 zhUKShj6H^o`grx#m$xJ9nRdD;6^tQ``mU>kDiBuaZ@&P-ivF~F4?KaeLKwC}b+nfE za!`%Pdg&iw1%)@Y3g(~RaX3gG=w&AVPol*?j&SOS41euqs>B(eH^$O@_!FG@@6jTh zL2j@}^UqhK^=~Snxyl)T^fI+B+CAy|cy&-qXuQ{7;nQE8j@ILL6oEV|xW9li1N*`S z$r;}?|1n23*x@OOxoo*?Rb9T^9h&2P92}kX*IwqmxXj=#mAOBhj=&t%VO&cj=V$Am zPDetbYOP1EMpN8XDE#m{5 z1=ZQE{yaSNeR*Lw{R`xj%ZD{%fC;W&`SLwPyo$wG`)l&d308 zCOfeRQgVZtbUu{#b{h?ec(LX0e(-BRpw zok!l2tR92KK$W&6OYH2vZ9&5Bp}#WSF8lg&l6lY-ZbfDJ&h$8qZ#FAW?mYRC&sXNOWY z%YSEp$sSr3y3IVN(-6;aC+=rjID`)8=Nz~9Gc6T-->jMoN*jHnCA}L&k8AMM%pxK zE;^JUBFkR6xB9_tXemEy1d?f!9!`7rZJ?{>& zytbYWdEpBdV>P%D3C%H?$Y?{m9v71sA71W*INy|l7M(gTz*sjt=(;(^@}=5EPx%)D zx#K*8$Qg!?ujO8xr56CA#k{b?fUp-a*PZC}_Cv{T&dCl;WMd^2n;;A^jTDp^qQP5p zun5rv49zBciQ{jpVXzxM-w_ zWs#2!fmKS;3&k}ob7P1tA~-If4xwZO-CJ@wnt`rA0c{@|1THa`9kEM;Xu4eS<&uV; zVuF#rya;YgHydM_zvMX@?BCi9QW2qN)+CGO3RpD18UmRDNNaY|9Cz zD1BnuyBFNno>OdV@4Cn{$E!eY-}od{6U%gL4r1tNAmLENa#m*hwkt|QjZr~pFLvmY z0Dm$LXHHJ8VuIM)qdsrLi6tH+ow7^BhoIN81$=Lx)!}1&DXBgDCD+Oz8lmh|88rdK z@=MX0;*}7}n<<~dihEuG(PH%gJTTjZ2yC09drQpH`IH3XP?Ll8r19s@{RR@mHG;09 z3n^b=$gb&tT(ht?!a|q8}V`!KzYBCW6`Urb<+{l_YBdyJ?s1rB}VO^U;7bS`sc=mwXOt0LBuJi$e%y+^xY@3QgtEWg?FbF|jEUEY*n2He} zU`NpTn3WhW+n9clF~9;u)@8&>Wi==M63b5UC~|V2P755+m<>NKgOYIvBB`em*q$#3 zqbWhr{#2KeeE0js=aR;LM~iHR{FWqT)n}L9@Ua0h#I=|Jq862C*6e0BWoI3=h6b?H z8Wia@N5QSbPE}J3daN?%@2qOcNPHZOz7PxYNkqM^WikK@lt&nN95%C=5fg({nw^v3 zOx^6#u(5}@@3)+2h!q2bCqN)U)%a4}P6Hd>2<+q4400k>U?T>ZT?@0M*g-LS{*D%J z%hvUZ`>7_~OHed$fv~bSKctod!$cdP-q}QJUKn5UkU4naK9*P^)>=)-obV_zE&zJW zPDW;s{OZeORPL9E8EaPh`YWa~P1 zhdQTU7a9=BBYi=W$L{_iHqy8;cd?lX)BqB_gDq~8W;!ivcN1-nwvnR{-8k&4*b+wC zUaFiL@Fl^wJ-O}G;Qx4LfTK!e@Vn=vSo!;?Z&rmIWH_Zji0I}s-Ba)MS$5;GdJEyE z>*+RIOYnCh7{8X-ImDOslp5z6W7q0y+IRg!?DZ6%SZu5Pe0BNetQO(bzOF<}U`E?_ zO_3&aX6nB4%dxz-<6Y0)e#%nE0Zzw#na?Tg3JH(Rto6~s(PV^Q3>jkHqJ|T%GO&Qt z@r&x3%zLeGJe7NeFIZ$H!yt5IkM}>MP5baoi~6uqljX79)qiyVOV3yJXGoysymt9y z9B?|`usG9{38($GC{XZu^bB9yt>BxBA+z5<+lfJ1PLO4>nJ`4^ITwnL!lMuSP89Er zMK&2IupR+UM_K?HmN0j&P1&JZ4h=$?5nK z#?Nsn$W8pm-<^()1i)Cf^nK~Zp5d{aI}KWIAt_wq5Sj|SlkJ7J{0;jo-=_~?DMW7p?8`+UW7O}&m;T(pMJNI7E8b)y*A^ky+i^bvB zI}thNFdk=0cDsO5f)Si@46jtk614B2 z)EHiAC?)9MiM@9xR)rx!?@7BK_-NalAp@bHanGHPJf+5oih_}4&@47_xUYhk~OuBaGx24tBc>UnVzg8^zW zIi-|0iqMd}7lQac5)IkW+p|sCbxtZNipV>Pn8hL%J(6Dy&~_pOG4Z*@;rQ&N zJaz~*8cb{+H%%W=f=iYW)sRAkF{lL`7n&V4 zNVt;e$&iuyJTp-VRc4p+Z9Fs4Fmovd=wxR?%QFYoGY>j5x9?;vgk~~@W-X9qXI;s_ zAElg+XYF6fUc8fi%#Z;KODFEi+JBf_jv+QIBkeDOJEO=$JHr6U)bGAB2&rfeeHMy% zRR%M|u$ER+nw3!kU0jcBc$E^+j7^DpPR7Sbo}WUjSPm3)DbEa|lkiA8=XC5HQfFYn zZw{mPjMzYvTg~Oa=S93MBV{oPe*z{Ed=pU;f*5Nk@Xn6F7L(a!!_`DdOGQZa%t?ga zL^h$p;^Zlibr?WHOhUoB(WIm?(XC_w;hOo>DWplx5ih#rc5cG`vGHSsIrDJO#3n3> zB3W`ChGf-)?9oJY5{7J=EZkQ)A=F%T0!?bdiOLHrQaCEwxlHMghTDXa`#VSFl#wob zkjWn*vqX#9_TV|9s6x-g7ox=%M&8&kP?U?JDvaJ%rM`tUmelgS$@feQ<0~oo{$>aI z_Ji_UXn8S_#ap8JlDbE41{ft9$%|V&-?j6hI>SmP_)s&Vs5$a?MMkA>l}k9=5gyqf zP7stF4|QdrfT5^&LP+G7!q{p^*+Mhm=1@*7XzDT|M-#+e_MW^9JjjmN!4Q#S^MENk zIS9&w1;rZ10=;h@5R@7RO2;Q{;E5@H5NHhSWhlTZ1ffAtVoZEi zG0@0Mz0XKNivY_Q4~uy7~B2@Sdmf{J`0lAQw!X1_P%1{z-(N{90$xfOeVpQ&$ISh&whe#1Yakfhy5a=AZm$cgN1UZ-uhN3b% z?|`9LqLr9Wa5Tuw9I?S}M2rQurx3e7uJQ;M_dJ3^vEZh%s+_C97#tq(7QW(K9eRQU zB!m$^s@I&!C}JuWc#WVSl7*trZfQvwDy+mQRkjp0KH)XKS2;`GR+mjyub{tt`~j%+9f^_?&`1po zSlX{@Y8SzJl2Segaz+b!Xo8r-5Ik7ul0hV(t@LG(c;l9-W`q?8 zq=?UWg$DV9G62(H&5tm8bJ*otaOe?y0{hbW}$>UhQ85n$i(rl)HI)aiTD z1wnOdp}RkNwVyoh&Z_M$Q0*$J?RdM<(Yn#Kf$8i#P3hifU9g4q^dN90-8oa;gTK0m z#e2r=dtTM{mZ^SrNA-W81XZf`^)dIZigy*Nb`PE@L7DfetEj;wP(7AtT1HI`JFjp^?tn zAR)duhPzP{SWwMLdN>;V(>4Z{*7l4n*-<6=WtTs;Sc{`UZVgRJ1A;1sBAn4+$uc-( z9)`dZK`aoSy`zvFBI+38=(lhY_7>02lxR(`e|Az42*Tt{7X5YK8S>>_l*$|^;0!s@0Hp?zgucpZY*MREXJE`f@QLT@DAK_ zHu70*G&5tgj%Cc>bS(JYM03V;b0oO6ZaOYwY9VsmwQfA?+BD!F%s;Ds8cI&!f@qXc zvUNtzqL6x;m4IX~4-JMpBzrhR=rLf5GEf!+ximb!wr=1K^EcZbBC>9Ta9JGh8PPT< zMsA`>YafZgEl`w^{wgD4KTG%OlKVvpWkQ1ySP*G=i@bMJcuXS;U)13nP~35)wAwIn zOms!s@Iu*e%}Lt_7;+wyl+!Q>Y!vb|M+nX%PrL%wP?F~?E8nlgu4LChT~^K=-aJOc z!$j+es1T14F_x_H7eW!wqE_lBR^~Ocy;)Z+1y&rCR`%^yJo{E$2v89+-wjdWi1i4wHQ4;RL*FBLHJ@@U3H#jK+w}s# z_&Jpa-$ud65y|}1#N}mhXY+Z3cSGCeFlrQnZF62Qf@qO6S@I;7eK=aZ?gRHVKs9JS zSq7I72L~B00s|d5hG^8COuP@VqB%tVd4#kfXQBh)jsMhNOm^$t(oqLuVWvr=Z+Hu{ zNsVjLRZn7?gPDOKZx|%aGDkLnk;)BZIrcZ~_mCYyj3XW(OAgfpJMkgt8@hLzp{y8= z(->k>G+fXQ1VDXruRwslx&-C7pyxFN+yF5jCyo&QO~o9!fJN$MgU0X($(X@y_NXRo zIg45JEeWL1S;j9EIj6?fiU4BOIccH{WQP;pjUm>;Z4zTicAat{L={hlj~Fnbo~StOkYNtjwiZxtwR!9ApMz$$Luq`^NyYv z2yn^jm4TX0k;G+Se-yb&*{8BAV-)q2lK5_lO%S@Qm>m}fDFGqK}L`Q$hj;Jk8g6_l>Q z8bhJZ&XAlrL{1q6{ZAfnH)tcH&F%ht&ST{$x+p z7dtu|xl$*X8&aOxL1bMFD?U_MKyyZ%HqJ#F-ncCOE2ras24^}x&$Pa%o^~gdsmk+C zABm+6myee)ea*2f?0wo2Nn0`&M@UZGm;4rPnsz4dX{pm; zYpNpDO@Di7f%GGnYUGK%Q>0;eto(!K46B8OrohGYnyRIR(EkX|tnIBJP|*I^_@Lr^ zE0@CBTx+S0+hZZ4zW#Zi05t2WfcU4>MOpf)8h@w1fiqi5DX8G$nLF~#Pk)0mXO+>VPGv_bX>@K@Xz3%*{|=mqZl-yvD%kzM24~7M zx$S8wUQTe;{@;Q#-So{~?Eg=}nZJykn`xh!0N~666W2lAXQu9xZU?3wiwV!nJb!#V zF!S0Ses&SV~71n6fX^X-dRa}Dv(&=$jMbTiTh>+yCh&agk zB``5e*kC2=wz?*k==z5zo?B!a_y>MTSvV6n!FtchCo|U8Jmk=gnjlY}ivL5aFb#!{>hP1>}I$NTmKD&+<%j;jOEbG0E*Wl~D9tLPh5(B%y7sWH8^H zme9aBO=+^=3^^K7>&zbx%S32)O=)4g|8}T`@8Vz^ODyTSStkyqTQdDwy~VFTz3f za>xdn30~vp3u{H5B;h~QCy zJp5pCc4vUZbR(Kg6MO5O76@*tC?_5j_7aL|;+jMIGBKJ%{YhId5aZ-rRAs=jq;d?! z!qLwT^8$1$vY_V9p()p=ccc)+|HCBp_E<5EVe*?L2GKd(D6Lv|?g9j|C zyZVgW>^A^IKW>mxqOoA+_~p1}<^&Og7NjV(x zwJg0ZXnIAe9|S=VW2J>Z)92N1l&1a2*AeN+R)Tw6lI51B65A|(TO@Mt));8^?%AEU zKIiv<@Yhxf?x`8Tt&>AxZ7aiatQjJ&lS^G-D|gLPE8?L}9@Da|!rkNllhZNMQzxHW z_rGyE7Uhi29w_^lQ^0qohv|?uRacNYl|UfiJ?gQceXbW=Deh z^%In`#{=Q_)2zn6d|S6;gCKi!f>H^s$6O!$Q#QtQ^wMILc+2dH?GsRGV(H}Y4aGOk4mrTH^fR+m*~&^SES7dDujaQ~2(@y?DZ&8RI`_|yB3%aiVpRnl5N zR+O?ZJ-O4Jy}*5-SOR;O;PKY5i|nAPM$N(Q+SSX0iCWd|@ayIkUkqj+1Xtz-GQ7C) zVxmxM%eFso<5>Zd+2+cpYvY@a-UBaY2tK!K*mE7-3Ia?Q-q1D1+?{Yc=$f5}Bv(zk zl{n@YPcn2qYdSl7@6R;vq*AC?6xJ)ZwbH`bu>WXJQd*8-#d zNF6=G<(WR^;>E|wtt^RUkMzZ@h^hz|s~e`B10-t~Cm?s)@41(SycA!&x?|A6TE89p z-piEjMM@`wyFvW_u1=DG#?er}fl?&<7ZCHCl>C*wrOo26W5+ajYUKvVj#K51`> zquu!Z^vs9nO#CZaStso~`?rf#-pxL+WNtce-+u#^m`?gyGtfVBSY_eh;`8*^=XbjA zpHzjL^$7E>(PVy(kcZE#;Z5oi)kN<@)#eT!8mz0R3pm=PtsJF#O>K;n+JDa6*9>M^ zec0vZHNrCaIsQz}=Hl_hq*=oWW`(g7RosOrg_X#JfF9~M=K${W+X2wAHF;H;~ktzmef+4oYkN{4{2n<>JZ>J+g zErzlO1E0bmHZaIj42n{UidpIczZA8&6pgABt%($!z0^f7Df$R0hIFYrog_@PjgcKB zjHiiA8;z_}QVu&(ms+IQYZEVtH*r)ZUS^iA#7XhkQ?Q%{^NLgOq&Ho$m%ifFR9Pl< zWuxh8zVvky3IWw7jG;7lOCw*Uw7{tpflpTYqjNKVK(h!<6ZfeU2WPXmMbjN288MTl zyF<;wOENbpWkq`$rCMaf2V|s7WaX`zCGA^o1jtItwuouTD%HxWn#f3b$tvZyDEG*! z^~kCd$!W*SXhg(m^S5ekH0#i`-YaR;BWl%+Xi|%i!+5pcE|*gkYEdeY6SbE!k(DzA z08~pdW_!&RwawT6ELp9MYQ~1;pJ{m46~D z^Mq32A-}8xbDO1Ai(7!Kt7`jWF9lOhSx4q}v-B1Zduj!7c{8GR(J2}GDQV6P1*}l} zbJ>pTQ|-63+A#Jqk4!rFHk!2ZI|#sQ{q6XUtF`h9wQ@lLG|{IWmSjq?{B*DQl^&!h z#G54EFi}eKQc8~KR59%MV%y17t|Ze#kv@eAE>RK)l})9QX(Uzt%&vUphH}H05|dU} zoq1Pki&8#ObD>vr&6rXle?(D4_}h48zth%|fbe%qN|8gI?*pPUE5pmYE@bm7N18Oh z;qNYOQRWv?speE+RZ@8up!ii&zP6=XGNSu)Sy#;l#n%Aoynwa{aoJkUPDUj~f2-~$ zD`cBjSK}1bJu8ZO9fdNl9%iAQ3QpA)O4a7%o}AO3s*s)mA+;LP)-F}mK-aEL8dWz= zwHq4>eVmjNl&XWOGPg?P8dOyYzC$hPJsqO8GV_#*e*EgT_T8iQN@)Sgz4>iR{C%G* z71w3^CQQ`Zth$8k+pl}|{TS-*jY!XTDOAqMXwo-du$c9?8m+YQb|Eg3Km zPj5RD!jUd_k$uE163HaQYXnp(oQZ3+yS4}Ss9Txzl?rLmv}*Y7DPSwxDHGJQH#*jY z`Yt5k=9dQAX}moEZ_>j$;=P(zbX8?cwGEkd z(eOTn{XWfW?RU-;vmviqJ>!;vhml$iEF(^vBahVDtTu-XOh%kd zbsuI7yA6-{@{gKx=_|_V-BKHU>OG=!D*s|xS2s{!bAMEJdQ=Z);QO&fWmETw+}P7r zoK%6HIkUd|M_s2SJ%Ua8$n%Ur`)gxQ(sk^o^&-7>V@wTf@(n_->4tKRNAJsd2j~U9 zQ+TdB9`n(__1e&r48!PxaZk>%6DgW-o^o#k46%#cBmnlU1B zGQDo1Dq%ACu5o(rXkp-_wYNfT-FRuFLACc-E{n}(UcorG3^e5jm*%Rc`p`oxT)3!OK7uc`g+jmMij%+TGcxp?;o#nfZb)^pI9qyavfg6Y=YN`dmiRI`?l?_H)BD zh+*2KzXs+jJGQ4gC|NC1PX|Le=V>CL<#w@I1Ho& zEQ-sj9k)>3iHet#Kwf^WDsz583Sm`EqpBulCDFJ98;Mo&iC1up24^aZhFYcYTMu7e z&gaB6ld7otEY>kc=$%u9Zd}mui8hk&2Ca1JmM$C1$D4i9>|`Fi&$`&V7o#rGYeBqZ zDxt|Ax=be3D^J&T`O`ve{*u*}<`*Jbl%ui@BI4yTh0Ug=HC?|2-KyovoLSVv5~~H5 z5jDh)C0uJ2z13Y*FgVO_GkR(*I-=&4c>!-fC?!90S-@PH?%T(LF6wKGHxSATn?pev zS|ZCT1Yz!#>%Y2?^@@J=GZ-#)8VT)(0+w9+^M`I7KbNMBZ+)Y6oMii@M^&(br(BNN zGe4&>ubR@h(Wf0NeE%Tb=BZ9!$jmUrbT+TcX3>&$ zT3G_NTJQ%va5IC1nUcOXF9|)^*7sw3i}uQO3$Di*J_|b;1Eq)=wBw?Gsn(0W!OCCX zFUgsUdp~mTwFwc{6zF}F`cP-JckcY|I$UmceYyAW-4EJjZKhVE55lbq8IKY(hpRtr za1H3rs@eC-8y#O8*|F~H)U}`J9ojgXEmE5eWtlwUTEF*k7RNe+aGwIb8;?EvLD6?l zY+s(x(Pw;KV19pdW7JgRE|=M)gYB%N{^>1;+8}#%_tv)I+0QVe=(~<})1#^0CS5So z_ioce*BmzF9VX6=vmb7(t!zwkZ#(r-^>69pea3ekwpDs3Et$$XcP4Db%i(MG9huyn=4Xc9Pj3}knitu(OIWTrdj5_Ih@3Ss$KQUEs*3mm(9-` zgk7Qbr!B*FpZoNsU;ga&9#vd*qMO})t>&`TW;(*^O2($hchU9h`K0pwVVUc$aZxh? znH{h7+=r(xlhTkeN>(rZEOxQLobXB)D)1P(Q;?zAW=xdYL(Q{IF%osP? zo?2#cdzj@sTe{Yi>EIvW81BAtbM>i(+Hi-v1MSR2<%;`#o$l*rYkqojDfPBI@{g$9 zEyK)g>9_|hZue8^TC)kvbC=plM!Ma|m^a$KPs2U%Tz`O?Rp*o0#%JO6x2)#L?wbTJ z9-FsI4@l(}n{_w5kaO`f&*miNKk1EsPd~i+t~XY4QKx^P-0ML0*E*co0xesDE2H2K9KOSR`NOS3G$>=vM6Vxc~=#qCT#tQM6yKV`>?C|XCgIC#}j*E zo9f%E0%ogqsxRK{ojBXuSgm;8bn?u4_OLN{x#fCOh7*o!WGx^{@xb*7czGRv>%<}G zS8OJ3>dfZtiq{<7Mnmv}P0GjgAD!hj9D@$Dt&naD>JB!A1A6@qkXxrLUb}v#-UN-W z+iz!GAG0}+1pUybakXD{#n?Pm(r_~2JeT5eXGz-9{_0WjPJZR9b5h-=$VG-|i=8S3IBo2mAG( zTs_HxHs4C@cDeh~MW6lLSl{KrWlQsrdns1VDTc{jHWN1zTJMNayCW)Vp#5OrjYb6r zS7+?{V34My`*i9pJA*vacE_oS0lujSnbmLa9v=wI)OsHL-28EH-8_dqvX6#ATm*0a zJ_vc!apUmDQpYQvd!-R$kJNh-w*}m5j&82uauwnPfU5bo%G+Nve|`Ry|9#|x&0wj+ ziLm3Vzs_ZDfsmlb5UKw`p|*N}{_k5up#IL)@&Nwwj8SB89AgYMq-{}hUita}4Ts#c z4d*G#5uiF&{kN@QhO#cZW{^txg$*6t>TnP%jMbW<^6~J$`CSG}3{6q?)-hYx^AXR< zmx`ZlD5ZN&le4N;16zInQ6bkr>gx}Dz0P4ie{KzD$%n{dZz^ZS!L*GTe$h4FN%`$} zsc|#vhsx-v{@MnqaFss&0r*{R%Z!uDFfr%EHg*w_+^5X{@deoG<1{{D=50~asmw-mPXGu$71~!5tDS&H6j(`CvqMr|BJstrvkBu(L7|J(G{i*ByQx-|{-WH)ORp%^st3H%?k-uyz%Pls;()ST ziMg)ozRnf5GtQ~`g3#g|n+SJ7gD-IA|3OYlvUw}A-}N`Zf;g@)&}zX6tafr1Nk!qhKW#C*A@gY)EPXD4rA$UJc>%RPRD0~pp{LTNgt%zQN5iEkB(WM=YZcmSrny4r?dZl)U!Bw1L+FT+B8F!0 zck)0sBdfZHEjPO<*z)ZapiZ{AqCeCu=5XQ(P^XwN zg0wmM1a*UTIHoH2Wjy8~MEmd&V?06xcwTpu)|F-i3Pp_Zm z107Wt4tBC3KSHKugbQQF=P$vD8k@<^80kpLkK|$W z5Sd^hZFUXPQ0hoxxq!|H6;b-Y);^I|VofZ2DmsKI+gHv{b;pVZ1!ZAG*JPC15ch?XSxOM0Wtoy%d@+h>l!#D1ilE53L*z$f=V%ot zwA4alfD;X8@VN4lbc9`}zvk0R2lqP8b3gP3*}7g*FzFya z*Vm50##s{-{W?XJ`r>tCJ#VnUxQ6UN?&JZ~WsjTLVw&ZstNF=U59fg97L2URocQu( zKeDZ4TvaUk%3*`8G*$#9(fjrlDey`V!i##pvKRwfUz8HhPP8_Xckzp3xL7_%VL;=Y zp2cg^o64j7vK|bHRa}qyxHk@LOqin(;{+x3e0gLfuZ$A& zqG97cgw|FbBT4px13{{!Mm!I+0?<1gVHIgTWBIWAgb^BFlVcLD%EM?R=`XfCyqw!c zOvsIgP{yYJkG%K(YN~Cxbwd(TfHZn1(iKERK$;MGm7+)oLlsaUDj-cws0lUnE&(hE zmQV$;(2Jln3u2>56Euh-Jtz8>?|JsO&pz)sd+eX~U%-Gd7Hi$}p7Xk@u#m1_lpYSl zG96mG8HJc!?blRpPHHzOXruPCe-~tqi(WceeBVww9tm6Z4dftNWp#Dj<+gi&+ySJOgwMAe3+Ui=~H|mn8Rm(5;dZ&GCW+juS_$aii0`_85SQJOpQ@J zy>XHU_q+x-SOZrq8yv}+?8>UmBga2h@h#mmk_@*vqI#q|j6pOrKlvE_;qka@;_E&g zq7IteqjjLcA9ePc^|KV73 z(4pt&&eZ&R6tnxKXEi$H9i+Jpn!a*wyZFCrpxB}`lb*zw5)yNs`&l`~of&seV&z1x}7c)BU)^x|U4wVin!O3QLu|3`($ z?t+=k!_oW~KW=J2UNp#f`0^I3Q0vM9oen*G{ouvVd(pdJ@Tk`DUKZ%Icz4;~rgh@Y zi}j{f7U(pi_5Hl{uZQn;zeWtTeqL!~$H~ zMa07Jed+QH+V@_xzT$9ZL|45yewhwLT%j4g$!3GE5q4yB(8-bK*xF}OLgc%xbZFb)?P0027U*`2AdX{woV!Ews<%dDc6#cU*&4rF!b zusG5H09Z?fSPc^$nw9QBp@IO=`DQjtEV9uQJYpI-JsD-Zf$)__hB0AKBWlt$Q(y<3 zYXE!oUJ}>?2X^+JmuTfhqXdMnU$}H)Qe;N-s?5eiAfo199UGbQuSaqow!wM^RpqSw`di3DA_u z)G8_@0L@uHpQ<;UFojFou1a5#S0bCG<;+L<_fg4Cnb}9t7z#2c3F+I1tO9_>+o*_u zRMdVYN+0#!Lb9}T#G3-MxZbS0(ejG{e9I3k7v_w_3$Ptq5nVFD&yGw7eX4 zzEac^gg9UP25$748Cs6J?t#QqfNP^JZi#N+c6l0x-flVKp@r5~$4n5BU;E2MJgXEe zFcq69L!axp)l$=}(w){#z2u7K$P;G{V08M>waMsrR5T9<-yQXuyMg;kwJ;qPw4S4P z-qqD`cveVO|G${%|Bw1zQZ7AwTicQqjQzLYXKUYFoP3QEcYt+i&UB^#Is`PCd9;4U3?@ zfwkUcS{8nsU10muJ*Z&@KS47`GWpt36j2z0QZ5VaiAhJ z>M9n91tj+LfzQLB6kbkc97J-G9r~6WN&qTQ+4XRc6;n_I*1?s`(PIWazX9z^LZ;wA z=f@Muu}N4w$ifF?ltcCbfE0*e=uD&;86-&rOBA;wR-dTQSoK-NcpG^Foyr2Yj^vV^ zG+?P42vr=omJSoaWxSOKMJRKu!qNqa&<+5U7aKiA0N$o>U>Rv+1fU(OcppJyvmhsg zV7Wq+*>Wby2vH=l1-KNz4in@wqXWknQGz&dJ^_xUvmaUL79KZJ8MVWJsJ9`V`bd@(qz;;Wtu0eylAVD@n#nsPx?K(efL13599S@D zEL@nt^+*HusSl~ca#eAFLX}Z(tKbY=fiwWRhJV&eB>51ls&~-eYNEcWa_B_#q~Ho3 zoFihfa8YxJI5qm2sh5*r-nE51x#3P?KWI=CsW9A$z1YXW^7jn!PJRlwZqHh9xa3o zqdBtW7|@kpDCCDh4B|QOE?Aig@xVR`wu&sFLR4u{z}mr7$I-TeQOmYbCB&G@v9Z%` z(dvXT#i9hQ-O**+l!uCj-|(nM;{`@y*L3jg$2yU2y9k+sgGo9lM)4`PXpnTZqh2fEM0$h*AR@sKM+5lIfIrMB_P?Xt1S!;^%gasCU8qHaF=b9Y>ScK-Z zqyxtpQF&rxCAn|G&yYcMw);EJB$=9hpBEzK7#ZSp(|1B2lo{X-BGMK180raCAaoAP zJM`kflGssYya#jg9ji+34Dec@!c>Ppy_U|osz zdxEOm?!@;#(RdOz9Kj_qBKAdHuzcwGG6skr*AEinF zJ!K(>rK9&SKPTrC9^K=9Mn}2$p)X%U$9kasrX?XGsLX2%xlhn)Z7|>z0>lQpMOqX- zzW}L49bqg$Gm*^U`E6Sy*DAK+V<2pwz!qAHIgEst<7z!H#~9cjM?ji%BXIdAh^IYp zp8#}qf%xPLbV`=1e2HtsVlW*oZ;O64wG7fneo$Kx^^|*mk6XH^hIUUBT#K69T|tb1 z)NZ1+>FD0ySN?63`!A52-+`VD5cVGg zdVRH0CT{;t*qn73#=q@&$M1OD(bG%+UdW3@jqpD&R!6;%{;xpqzsyB=g*cPee+ruq zb;in=9mgs(5dOf!Y7UWJNQ+nf10H57D>dHP@f1ViQScaSKHO=--YtK`kD50#P;lhz z>%f-D$>%h~6!F~)kj1gz@Gx_HqKYF_h->-L!={yYRloSJ$NpZ(SDY5RzMG-JnCZ)| zg&#zC6J)7i~Xlg(8^DlXH(*3wfDKG?uXW@%C@{X4#)WZ)5Gt zr$2$-?}a=O%_=42>D_^b}owxeGzW_{Bfe`USjymVuHQ%Z*P z{kBs@nNeLrd6LRQ|B6hUa6m;i#O3ylW0J`MUa7&)7Spdmif)%xq!)SLzH)8iOm+1~ zW?yy1z3=d|cO*X>PTjfc@qsKu;?`rIx_#pw`gUq zlDi8&zdh9Sl3SZf;08$EGkCTX|6EUaXRR~0iWht^ZuErF!H2-d`)q;1M7j5R^XUq< zZ4em%Lyid)y>nCN*y@l9uy$rh56R`j3OhiWAx0vdj`xjv(h1*izc{4O5lkHm+qRBl zMMe*Rj|nOrOdST~S-;AiY%ZYho$oj#vvb+N3qq080tMmXaWD8c4jc8Qr=rUygNoXp zFyA-!;<#b)-8f$IFPjeLOZYO#`A};0NqtmMH(Uf3pVZ3i+Koq8DGr8cfrfH@?lG4s5=P-KGYnre6S9YNY^zl+ z?U4>$b)Y5Sw4I8d_uu;Tet1c-QGB2)LEI?1Cjf1)b4^-tl)7w;+(n7^j`h~O1fsy# zW_Gk;W%$~p1>ZDRxN@37)31zs2x+$BI_IUECi|Pz;w3f#?O2aqB@&j+bn#^0H`P(y zZ_6Q)@4U#hRSCyP#B?mAxCdvln!zEMLSiLPC{{-IrqC*OrUq}rR0-FJO=VyQr=hT@ za5xDDVuO^IrC|9FnybyiQaSPTy(v=cMu!VH((}GN{aBZeH?QiBDack`pN+##4(Zy{ zDxHXy=kG#FqFE&Og2R6M0Z6svcQ^xow1wb8yzNoSNEcR{uRENzX{QQ z((cfN?7Hi(#;cXge>h@1_uewC2G@W|Z}x(Vm5)#2r1c>x5p1Y02ECH}q;$KzI59gq zNPF@Cy3t|1?#eN4?K~nI`Es0?6_HJBln544^cJ_n-|*Gvgocwr_Zb^WE~`b(Qq1mY zsG0xDnKCNm%Ir_5&!j2;9p$&=V{FJZa<@nW33;w z6?ue%3e|x+ktRue$^alwiWG<3Mmx#@kZ?(Q8ahBFr9I`GM$atNI|JHL1Qr4f4{CVK zm#v}O%07KX>ah*kKsbWoA{(JEIsogv)VLZ4;n^HC=^0V6ZiAoqpJ9q<&_TL+lkDM- zBA+e$Gjb(b!Pc_lWQW{v_Fwm^n$khbF;Kz88Xe-#CD*Mf{)C7oXqFAXrt_e(LL$ zc=0aK@oA9>Rw=wLe`SFrU)=Y}48&zH<)SJf^0sL&sSchMMAF%A9eHBp-S}bo>`>MF zcS>e0kKKLz=5ycns2@$8{ty(bo%ilV_?^bP2Sa>nqH|XL9_W8vS&Ch2{p8G{(NTMy zk>GlJ3fcSY@$CwS0sgLOOp@)@(i5tW{dV+pE}E4#Tu99jkmm<73yJ&LFU0aW`3^A! zJy3A&1V}-No_zj5ScnHrC)pPW#w_dM+tzl;FJ?f8i4Mwb7IVRM3v62?*kHSW5HMKM zK1nMq0+(m|?4p#j9(f^xf)09Q_@!u*+rZlP@a@z()~%;`|308u!x`CSsB?uqBOOD2 zx2v%*-eNFZyLyu$w$GUJ{ip4V5vkefF|g`f&;rl zjA3z;HxV8i3C%a-?yJQcIfZzQ$N9)7HbJ~zCHf6P;x5zKq_IFr0#F>!-j7io-$gjJrkPAVowD0t2x? za~o&4k24?%e(B@r^c3njVlE`04bCK_djLSHILOEi_|@U$T87)(1t$s}Hi3r*Fha%r zQ>TZD5*0jPpavthU|0H7i?Z($r{8O^~u4YICZD%$)8F1D(jyko$KBWgftcMO*vT* z2VW*~_}`AGCxhkaO4CHJG+N`kcQ&@o{}UE8pX(+v3|*#k2@~CJG_j5Fq4ZeCV1|ni zmNH+(_AM+&aMT0n^^0~~Gwv{FXgXEt(q z_JM5B5zE7cWw{V6Aeez?H@fAa*M{`lgkaH;LjI{LnyJ%7WYr{F)S_QgKXL&F7snYc zv+O1WAQ#8cFn$hzi`%kEX{tlnkWi_nY55uhD$F|Z&s^oMuJ>u4>z5H0KhRCVvKNoYO+j(me2po zeSprUxj%b-(zDfCDYcBkxnLEftn&Y`hYF(_3u zwp4yj!C`1ZGQuV?XB>^%B;L-#$`)#BC~i5&5!p!3SX)o<#h>7B-<{VNb9oCY1+nn* zHh770`hqftFfIRa6Wev?+dEYbW`E{>1T2t^Khyf5!!i-dI}Gu3Ng^FVSoFZ-a3H=m z|NU{SlQ+jz94ijV|S=bvHyQGhJXPsjmu4ge&H2L%y; zAGc5;0FcgN+Al#)Vxe~tI)ca!lBU7a!!1IAB!q)o-un4bVG%&pN!TFIUV)Wl6x*7mmfElp&4_c847d4~* zowxc|jx~lgH+-U%EV`&%)@dnyvicur#orw3MRZ=DqpGR3ce4RjH=9YE6a5!&HEmx30>A%WBU(|wR7gMBDZFA<`*0q8YIoTjz+%w18cM70U8uD z6?Fy~-w>Y?rO_O_w@BDbc_vP|z1FB*pCjqW*^h&T-;(0vp|a$cg%!Ew*>L(dC%KO8&ruY{U@#k)Gw;K-@sFt>vp7Nnf2qXuA72}0;&MF zLl&_V)Dx!E#+HOQx#n)o@1tl(12WreBR^03^aD`6ppl1iS|rI)&2jkfS3cHJG%}57 z3x_<%VKA}hZ>H&U!hXba0lQ!K=sOQ!zmbv_sIcBv#dEImSTctuMojyQkyv>H?@T)tkR1;p+?3j<%+`Y^W zL*8fQch@H)Mg)rNbxQU}Qw(y9vp0)C1l>s$sz|sZDXcpo9BR18)gMc zC!7h!hn)EQ4(C6cfH2|_b7cVBMb{gWMsa$qyL2vpX{0dKOjI2k`@n!i(%p$WCy9q@ zbzwNsX0c+@(IRKVi5DGQe~~%Y?~u$?MBZaAfOI z#V=fSL%pGzWZ4CJSSz@qqBA@`p*36C-QIJW+3qS#=8{<#HS#v23Ziw`?tvVFo7IO) zjUw7^wfI6qJn;#aavZ>N8aH4L%7{=!7Eq5);`1u^48N}bcstcN`jb8!9dA&Ew0tkA zqs*}zOnbWL3nQA&-CN_`Xafrwg@Se@;GEK;Sb7RBe0fabZ1V&0*KJs>{pfI5C0dUG zklUN`63h;BqSm3y~|o!6Y#Ga zLdO&6frDy6^x1Z8JKDl3edQ;JB!fv#l%SxJwrx2nrmhnyNCX{lISvUrX#jeJyq%{O zb2cG03yXCEx*1P8fBa4h1@Eucm?Ve@wbi+^zJB$aE{jO}?3(~m*fBUjoazq1?Gvt7 zauyh;xEz1Y)n|di#h}|nDd~du5 zWwVQ_5?ek%p529)sWRsfl3`UBk@?-VQN!k5Ka!*?ZbABe-$;1XYlW05rl9o?4_V9B z*I(ZM*HUW}3-nF^?E9a7c*HvF*-c-+UD2H)c;w%pcLy#R>X`g#`M-X6nDcy=Clv+$ z$-r%L=rJ}?_*HZOYI$jDsZwZBwn z|2Vh*No>>n>4oc0bXU+$)5q~nm%ZiE((;?|%}@Lakjo0AdfN9F#h)k&3)8O3TA$rx zps3%jhDaN(u?Iy3Exi`;KJ6MtE44dy`6~05DBq8_BfXZ(ep)VrPu<7r5iv4zEoa9w zYtfYW?;CoGPSV`t{!Y#GBJan$kdF>?BWglXz?BQ50hPN_ao#{NIT|~2nHGnWH@;O2 z4DqS#)>GQBWh0-mqbG=EluLhP2Sw>xYC*nz}=eyq0%!(%Z|GVBt@_MXhzYlY}oCCdv&T54QS6 z0goU;<2i3q4g=FNDRv+9j`$e9D1l#*^K(F4nH#_`{q0Jx7Bwu&x(Ol!gt2JY#!!9{ z?4^kW|GlQ7Ltk(F$e@+MW;<~d9FqqQPM+J~OLz5@hom|>2YN>%Z0A#AOcIw&)Q96= zB}pGo%tq7=lUGUAUW)tCNR6GbHtx6a0W~Mmu0RIl&FKyScAp@NqL^WQo@#iev{!6E zWgng-=DBOA*!VVL5~SGyGkig*jL^R6!qovpT%W&b?-2;f9quu2U2al(xLtA~0VJ%#KT^EdNj=C}aAYL=|rC!*&uX>0A+gmla zQuAYKg%?ok_BCN~mg2#_32tXEIoN04ef#lr3jg{ivRt~vhgeou*E3a5Y11{J@a4vA zfu2l`V9t2zk9k^>($>QD{NSy{s$2K8Mp2Dlx4zJO@1b>=ZQk1}e*{#Ietl&LsGx@) ztu%J>{N>WAfz1?f;hzxok=>d2z?}9t7|#K>K6?2dh`Op^%4l8$(-t1r^Iv9#-^Mph z@v$HGKUd}aG1iRD+)`9JyCl7M_LdFxm|o+~ukW|)(vM|qacMPFjg~TLGM6o~1FWmn zGMt99f(|$oI8d$}*L-D7i4xxm&0T8rbFCYW_c*omC8+OuSouofR0c9Fq+>aqPw4e+ zyLPeD>&qN9-q$&HZjuDAs~1e6ja%x}?0}=Ld;>UVC7V5HkDnOf z*DW&sSK#6*A|An1AuQL`Rbc{)o}ez2}BW)-xNQLBS^O zU_0eRjHi3#T>MwzCw87s#-Yx(=ciqfg`|`+Q6YysUE#U4LKy)&-y3X&NHPydnW7bbNoaWR7vv6ys1Ans(Pp;JXNn_r*blX zPktc-kwBed({z=m!Za&)yi1h_-Tg@-vj>H;$-S8NQ|_|X{nt&Ub~Z&Sd=zX2s zh|@G{_BO1*lCg&Nt`XQlOR002$JKS0(bn27kLa1DYou^DZYH_*cFv()$`q%r6*K#d z2mtL_c}%$=TY-zBMF%2{VD;TlvCH&s)z+QLBGf_itmBd^mm_Xv-}~tMSr33+JVV|w z2c%s!x1DU0QU4nF0=_p{OH!YXfx6eaviQK3=F<9IdM?y8n}%0F-M|0tU&B-=nYE^w{1_rl}(}MKZgM7ShfzZAf9Ef7&zx=&+}4ZYV*N~ z$_UnXlH51|Dz2_mPWg1oc<%@P$m8!QnS3`Djsr#hu^gXY>~e0gM%~EG1yV}-)>7#0 z7h7Mn;mGY}TaJWGlZJU+p35aHX2S0Wj%BP7_+xx~Dx&y=>#tIiCf1{O{-2MYX}9p- z582=Z_pM)t9Gbk}{#|M!ZE~79SFUAJ`!`vXtKYWVoJ4i0W*kQlJg91fW_;RiC$i!8 z^9t+A4y%W}RWEDa#`wlXojP!;%%N55xPkq-m-nrnb0icUJTaihOg5NV%267jBmm!5l-8Nq5OFdO31ec#^EIPG_B@l4Sv zlc+N@ULTw3ozZT)1RhjpyAjgjU@l`wZl`v`D?X~xN!ob&)rPd)XR>3VmIjHA(3+(h zPFUj_rA}&C8wkq8dtQFySlG*!WqjNw{(IIb*9LD{t*M=pDC04+tkU8n1OMWRvwLyZ z+3)yPQn*wj0Vi3y_5`4q7sai`uL}KI!`|*AZL{q!)Qp1Cs<$?SYJEJYLw$uujv+tkeO$7@mK&;1Nn9)ID^Lz^H4na4-r z7e{LQrY*}J??Ds^0gK3U^atP)j!2%#X>If z{`)HoU8EoT=cT|XYmY-nS%_}Xr@KO)|(57%EFjlD7VQ~-$ZO94;usB-egESu3}-kso72{ofvzNl(D zM-i8Pwr<1OLI~zwerD4C{K;i6tvsm|^c|Tn#?pcft$Er(B5%j^jl9uxa3B1LdGr+d*kL_~)BfM5*5iF{M*|VC|56w< zvL4vmZ1_Kamb&iB4nk=O73Ug ze=f>`Jq7d|{<`_1SMPR*=kE4(ov5GN8*AQ=bPIO71wJ&Yb?8xID!TQibH`o~zP~(r zN}KaqZb|IQ_1yGBh`}>%THi-Z3xc;2Q-sbqbY6tIB;3B3&aSw4eGP>*q)eqauejwHaj$e8J)ICo(wNS%KhPtAX93U*G^eT}_@j@A zjz~vSc0dSCrVs0|=2a!-1JPY3;*YuR95LjUDA>=ZDY0zlmpmH^yK(sxf1tdV<3qKn ze7Qi)+@l`XWTg2QUgo_}SS#POIrj5W`5E@;+0G$;nV0oRz$Y)WkAsMc5hI1nN6Zmx zn$4Ang}cokbLCwP?%8?f$qs@SU9%lrUfd{j!1vyHi(MGdGu<)lGM;T1mo5s%6?9>? zmsv0UdXB)e_h$3Hyu5c74sYXUp|H+fPfc_V$fG}d<_5`(shG`jvDfaf`kYE#vE@?? z=+Jx8AQhZ5r@lo#vygRd}xfAPpJw&-Vs8KLPy#YD_7m##^mcq)JHKvce!|* zCoS_oqV3wDN6b1JIaLc8J=GLy_VMH)3*d2(POI3jxsaj9d9$%VL&_5ib7%pKFVfi7txOxj9|R-15nwvvbrO;eUN{U7b9d_N$HNZxktygAp0l?Cws zEj#Ln!babJVn=o2&7#jp)C~N?j{3h$#Tkr7-04#r>VFab zaIKUP>Z^6E?nT5OyW70R{99&b)YrO#S3bRBPcZ`oT4lk6vb=uxKHG4zVK&FBTSjsg zBe97*1)s>>dXE?=jh#5!y`3JFNqXul&(6wMKVmHeR?|XCepK?M-L^WzVR7X`S6Qo$ ztk5VS(^oN+2J>Ao!cf zSYV}Bdxv4?V`jKTe+C#FHW+@j+s>IISgo#-Y=2Yhl&IbYROjnPM(lgO8X`hbr?DDy zFY!WH&5r|!JW`saMq{ONTPM$+r&am{8^l)miyn1yM2{=?o+aO+`=L6=UN3JU>8fhH zNG2+1NZeCyh+k~B#`*IALaSv1P!lD7t7Io5@Q7`j*V;?x|4u% zTH}|KH-4h7J@Kr*8gw`Ofx`1QUeZlPccc%fr(mRaKzt1{HyoM!Qg7~gV-|+xF?obH zmK#lo34D@iWHja+z`oKF0X@}y_-=~g6wUsI;w6DWyl2#drv5k5KnL?ZD zT*D`5-<&E}fhCwd#Ux*yeN{Q()T655e0)9Gu%p*#$(XLqaXudO0Ku}O`fg5M?Q#&E zDQ(kR*v?QV6vhE@-aBb2I-bJg8&H&sR-FJd z4$kyHAa~-?v{>sV^Lfp3t*37)#wKi>gNJ3`Rj#wQOOE4a#Yy)26<7s_Ny!{mh!isb z%6oFOT5o(s_G#@IP!d1J~Qu16rjDETd#YAICJuNB1N zW<^X&&^&HFUOUe4ZEek-mOsf`wc=s@Vr-*@=Ti+<5!#J8F7zpp*75trAaH zCG(`88`({J%+(b0=r2%a`nm~}Yhhq`SUabSRUVsv0FRpWzCJ2YuCC6iEbg5ASWv?7 zVBye%hV3U95M>5R6$h0kI=O-Tka`aFDTK=cBKoVmBO$q`mWPeV>`yyV7B- zk&-~I@Asou-`ZbA=Q`R^1FahaH|3Do38q)(&#`Twutbb*w;hXGu9NG?#DmVke19X% zN9$|_^h0!PX z=Ohz{*QQ_?fgI=0UVnzHj0en}ikBM>6MGZFGs&hKMq(1#m{^}$yp>KEP=VlM#sG;> ziIQy*o2TUb)=?50XTVoYuA=4RBLVrkGGFZHxo#}>U%En5wAsH zkUvo{Y|6N@gHaz9MnBV&L(Jpl$Tv@ao0p}a933tcBgkA$R+=mE&BbI<3aTT#4R7Lm zf7tTU!I!}4MZK6cV!=9;P;ZfCy%_bfxI?)wxICrtIytE>!5pQ ztcqxWJ2zb@y_Y*QOGbQhr(}>cKMw&<4>d?+p8%2H(P}W#>WroXmSM{kiJ27$lUlGX zr=PxX$v#}!A?8%}G!fdO!9mpYzOf(4Yv#O6yY^unAxyoR1}{si^~j&hnL=}}n8~YUE+1-BgcFkO2VJp$C@CLPW zlliWOE{SsjzK8Q2m+`h|;SOU&fFlb^%rXG53K~z%seNYW8IEiYOX^O*C}rcaw%S4=}||*t=t8? z2k3zOz{?qnD={z!WpAJ!;;!F!g0b36yQU5=O+jWputc5ZCh#~|`J{4kCSSql3Dy|3lgRjy$0r{gDX6sO99%&^kgLK9N zPIM;;uX?&p-1*i`3m7gD4YQA7I(RN%bA%@DKL2sI_Ij#~X5PI^n2s&GB{K9I1I0mQ zkMqx~PrAya2AteN`LrR0!}6H)E0`kNDO7T;_&XbmJgfE4>VnT{tGoi%ZoSh?EGcRs~$U-7*smfP@eDka@}jQ#Z~51wr73| zAUAXx?Kj7;tubV?QARzKMZUwg8s2UV=rJ!mYc-{ez`@TJr#QKpuO)U>_MTjZlCkkw&-8wF6Jh>R*Eo^0R>%?|a;KFeS>)4$8fuZoT0#y%8n7QIC3K-uGVF=_QKx z#p?9MyY(f;^pQ*YlKCI?rM~YAEn%16K%qj7bKJySH&K7R$NFnjP^mbU_&-JkTuDo1 zf4j%}YgBN;r#bwS*S|&ZzoUXPgW(-6zulz&5EdV!tcbMSu+7anDe%uNhx^12iI%^M z;I+eQ{zL@{6F(N;w+h~#{_uOtaW{nk(c$Qk~n0N9GQhH^i@$f`wO2^)7=R3{Bho*cXz#7-Y?xb$!ugCx@5laY^=s~aA)?HHGgNwPVz{8 zx6$OgS4Ws`*K|5os%9iSmv9&DwCly1-lHD^VkKyLt+BWCSbNP*|)x07FJ{V3z znGEktxZuiKj@XcIp?-|(HGw%=ysX+ylY<;^335PqgJ1E|BWG7>{>U{ONr=)WD%E~! z(;n^)vB=6#$P29YvQ7&e5&gA)fiM44H`8ghV#yvrcI*zQunVF^@l}ROjNDqf-Nmmk zTD~tcZmvd*Pe6wCD>;d4@#HV^xO4TE8Z%ZUN1VUT(IERFj?0T{^L|3lQ#1TW)>@m)Lhz= zic5~(@Ef_Cs>`0n?wmX@*)GphOz+UEJcoWGjX%^3UD>7fKE{zL`^(mA9`WYmWq}vtpU~XF8`BB<_Gr_orZ4hm zkOoSdpD*mMLHd_m`nvg-d#rNj^vxw#F~jtu^j@Xy<%T!G|GLL=O&0s|*I51j&iBEi z-**190?p8b&ez`i&KeUhLzuf8^Y@v1JHNiMTEtxc5EctFh%hS}P~acJ;-A~5IylMn z0JGUx!9g12XkR-riV=t9DuKhb7tn!eF>z-2YrY0wK@sbMS~xSE3J zsub)UM=M>3PgW^O6n?y*f9NYgc))f>f84A`i-iYPRGl#yh8YHtXELU-eb8NbGXr&Y z>L+>fzSX`y6RitzQ7>T|4jC zBrj{s$$z_z@TB6(mGAwqMtV#hX<5{FFZHUdfoK)JLyyWpxtn)D^vpU{`vNdR0Q*vb_FKl)6N!AV`s+&U zH9m5d{mx%&^JrYXhnt2o4~j~O7O26DzCd54a-HItFq4zhwXi;C_9O9mYWLCPt&9iE zy&2RR%sA-5!*^SD>krxx<*a$rYwJYA2mDht#yI+$lz3nRg0yfruN}@kNh+C`%XAvAso`8* zG*>f$oQsdEP+rKB?h~@lnj4i|sKNs{Wjl305aK&Mt%nI%27@-wa;8#r#g7E|^Jy_4?491Kja zFNy5)J6tKvOSo2l?N^B@CVDLUlOWh6|2$V`pk&h6WW2G*SB5IL`noqMKqx8n4gu+)A_7K03B5`aktWhqP!Lc= zK$Jk}HS~`3j)vYb6e-ezpwg70AR=lgB0V>{)?RDvea_u?oiWZi;~nGu&IdkB{>=H5 z-*X-UHvq1AtVlj@%%M>dFSm9jd=vOa<%R-Ez#;^D;tVerqPhTjcOo5om)BO$5Cvle zSWMFz74W%YIk+mLLu8w!H9bXU8XQv!5A3h(fKs*luJvSQlFin|jFYy$5o6(dm3Gf&3DX?vu_k05jb zCTwpZu$}6OQ{?gdeix^OCs8DD_Yb!iZS}Yk9rkrF%8N--%UT6vZW($mPvFq^t7IzRtbzrF zsu=_&~J83 zYZzNH+V8^z?bJ2}69Zes>4lK-K`kNv??em+Wem9Sn02`@6h4rn^K!6=f!{0RY%eA( zN!vxlfQ!bsa0|{+M7KtO`PVc0S%mBNT~@UZVY+DbtOP1{Nj2cInb5rU@ujFAUiUP^ zLj#-+|}83SSpB{$LjR<3msfiYd}O5||7jhBGCn zMXcmrRid`%45dmZzfNE!>L>4*FtJj1Pk2mm-k;$#e~U8Lzy8RQ%AI=wV2N~!6AeV3 zjWo7{O_RcxL|~houteR&8e#6 zaZfyy>3%b#leiJbSPF*;M4&l{BlHHXHxrMz^E4*mtX$+bh?Tg40e$*KnH@Nm0NbnA zbtd0*^%MZ+~lGs<=umKTdfJ|r6_&tQ^K z!Z6k3i;Fn(OgYEf7OIaq-A~;|grq&F)J!AWQPtPGjFeCXnMc6GU|zfTDbqt59`|~h zDeN%tWiqWvS0RA2n4!ha7M8A_E)MHwGZAf0D-HK#_Y6LJ?_!q|?eo06{-ldMbo;5sF|~w>Ta| z?!Bz?{E|Qm?x8V2N#3W~60)ORN_O%)Uv5)i2HC>esMG@rCrr5muc}ZRuMjR5Oysa8 zGp!dwhuCzY2%y0&xVatEDNYu4)d!q{FuR-PWqQRD*FBs{0xH5iLtf;%)0bRfRbRj- z9E1VPDG#cQ9=tTbqzi^muPC{1(cO1tMn=yT17lOAvpH*^BNG+*q3T%%hLb67kpvdk zu|9j&ol`KE9RPY!gkj*s${0V`j06-U>G>2xy)Xb65=dEMDlCEOTZBVN4}Qp9C|)c4Qedup5cgQ9hZoZr9!jo*EEDx7>lwRpb=;bG7f4W4h5IxypIg+`)+NOL8>~2aOn6o6;AU?w~FlN7UjDWx-`wavc(UKE;~8XU)|M6lIBr| zTd7U5-u9Zl)$Yr*b?nmh6sGM>qwspjd?=G?gnf)|$u$iln$%RIGfh|qJvZy?6XqJp zGrGYHOyAY%c9$D{&07ML)B}~&4{qEEDSxW=-HYy&)1bXM)a0g5cru@v3b#y*%p28e z>#Ks`%Px~G7q(hplJ)9Kjjf-Jd4w5H@-W?+@qaPXGSnI>0B(n=Hoa@R)dCLJT*z#f zU8-5m>=CRJKBFn#Y+Gx6TmhE8h-*L zi$LE-V4Nku4hZ{E&Cy7<(aGx3YwIzX?J+#) zG3I@4s{Gu{=69(;HH-QV=U;MiZGb|mTmYEj{RdHz53TzQ|L={MpE)@OkujuN&Ci_N zKLt~3i-$8&cYpK#6IId<-uDPcCI53U^*8VTS1`5W4dsJg@_P`xaOyAKKbac*XCwhe zb&tvzbmxzx#=j}*R6BOWA=Uq?Gt~T3hUo6{=lp1l1PNucNqSKGbDdij1qV8dBB3ilhUf9eeSIR;a! zXS^eS7Im6yZ(vEBm-#QW)KQobVgVEgEG3e_`>{CZLMw$C(R9Cy5=m(M_NvB{LBO!B zX&v7ZAa!-5t$A#|kHU;FJoLWvakBQ-+*gF+ocp{cmdrMaQr;oQ_FY!8ZPqFcCW{SCYMRWj88p2u%?YXGY@Sc;D59h$j z$S;aqI#EAFFCogsprpYl3s=rKD2P!Y_n3;Mgo`R>rr7>0>d*)7vW8xcaOsv?0JjwK z$!;+{S6RS=MEJ=TCf8W?iR+`tPPX)qbPt}8_r%14Zh}7NMN%NJPSI)<2&}E1Umj-B zzW{m#WO#2%giIoJt2nK2ypUa!21zN&&3DEr_~7r-K^KNjQ{G0KBR|xWDX;<_IYMV_ zPSaP(Z*BhH>DStKPKdi!1ABpe%nAg%Zf1|Qfv08;N43!kc?arrN`10(8SF9)s z0SH5pO7}+L3YeQE@jC1;V9bVHq~J$>FF$V#`b=eh8Q*CtIbo7@2!|-S8abPy({V(4?GU)Q?YDI3;}saV{5uD`tHMFC?YNt zwL;WWZ^`i4$JUBQs_Q0d*r?A!`Np!6ZN&I^FV=*mfZ|4Jxs5Dbz z0ZS~pn($tsbj3dse5u#)qI|$5hl?ULW>=RVqu7~eTDKaL9N9?f)A+}SB$^JIm-_xo zQMur8wSqy$MF9ni(6cAPgFT#&@rp-{%c}KC(kPOG#l0B8VicWWPs4Onwx90kR5!Me z6pHd6qQ64X8LYqt-yT39WcuW8A?{Px&r@`Ur5Zt=~`tmTcPhuviD zmrL2`^?>NazLm8%%~j(+@vi!0tiO}fMpK_&13vCd4Uo0Sxl;1we5B>9kXcG~$k(&i z;*XekEOr`L;g8~zP3^-T2}um}8ox|vk`_R)2m!fZV@Zbg+T9k{2t_>(3|;eY1QbH~ zgR@K0(Kjt3d#Zu=$EcMElVkEzIKAvPmz<~DO6rA+>AJe*F1<&nd_&m3O*I*P6E;L z%+o5KD0o~e%)obz`DI=Q04_?z@a5$v(;TwI`OHg7@Go&Fva5!c;tPtJ5=0ai1qmvE zvV!Y3>27HN}w*5a~SDP&V1bzY2@MgoTFM+4HVDfKcmKs30^+MP(lTZw}d=N@Bs>hBVw zhlCmPMjD5pC6vKIDteTS_<&0%HhjbuwT06+mmVB<`oPVIeTuXmn1z6 zS>%(`jN=dn#OZ6;b8%!{zwFEQdSj2|PsVg>6YoURKSq{YIRLdW})wLcvXIt_GdSZxZjp2_cY_=z;mUd zTV(F1gI&rs^Mz-+=7$XJYtTLGv7z^F@k67YEBAE5g|h63kFp%|Ew7=3hu-sX%sNO{ zdUJ7FEbvW-Pt!zdz77a>;NRTo1cSHWAmvWpHJ``QkBtEjM5jI1j9-at9m5$sH9;40 zQ%O&><`wZ4+mb~c(VJpMb3Uk%^TR<{#8I91(;=g{=9Ky?~W~TG)B!n z*7y(&J`zlRHL2=m_5L;yT2P^qF(6YZsp^y@e7- zzvq7g&zof{*EFzOFi*oT3>|-UuhCleYDnXxTT=ok`73R0LzZ4Di%G5eNQ29zC) zy>V$Ejz(w9tp0ecg@4mM#5pnLN@J(_Jco~TWZHUw06jH=?=*}gKajl0Cbs=@R-AA>?1X{*4pfaBMXub{biw5WykmBrE6x!kAfS8oV<*#H6L zoR*iL4Z%K~8&%cLY0q1p$an|v$=VCFZhc?UaoVl8?Io4=h~RRuk%Qr@-{uxs>+=bq zx0uy8GD_m{#S4-`8RH&rZ|;^{ABa`KpX6=h5C!r-e(rIhLxKkb7Qe@(Q7piQmrd8A zBa<9>WaJ0!RAz{Gh}0?HnQ&}F9kvD{eGYP=Dwxd$ zr4RFPS(Krtwxhnt>Ijj6RFhzZ4xlIxy1fQQD=fgOkdF2AEzJZ-#%ao^tQRNR#WaOF z0jDWCF2f@O$wQy&d=6u2GJo>sKG2gcNk+r`Jpjl=?m$J#h_u^Ov<&mq+vFm;RV=LX7y?VUeKnD09g%p_ zn~Q=};Ot{ntW~PNuPNtY@r`3om+3O^wSjq1v?Pv z4p+cDRoHI)ciDu5DG?Da_Bo;`l3CHUk5%9UtehFPjQ2jfq+0r#6`~8Q{irZNBg`wL zZ-4d{_Zz)5U%}B7mof{=w=2BbgUfK0b3DfD134q6Oo?1}Mjd<3R-;!>GiRoRf2AB|XUY*L_<}%*f&D}6bkM~S> z06b=uWFMW%MTI{`Oy@-i=5cf9!+P@gt@4+<9}8vVzf;c_ozA}v$d}|UaO%!KVO78y zoG+VEa6-L6VY)#56lR&JP))H=!>UlruTUqWP_L!XV7kz7uh5vg$W*b&%&N%TugD^@ zPvZw$yOxm&AwXR7)4tC2i4LcbueuqebjL2wdZBc&vMV!uX)r&d;} zR?fOsf&8Erm07FUTB|%$dw#zb%~PkQRQG>_z@C+Yts5gIWuVgk9s;}H;K9>)N2$@v zy3zYVqfcg|Uu&cPOyi^dMz@Ok%dI!87@9&KG=*n2MYc9Y&ossEH{p1i6O@{htedZ7 zqC+>~JGs!5ndY4R=3JhZe5IB`>z3jNEu~ToZ$jZ^Gc8Z{Tkt%sHA<~@)~yZu_0t%5 z#)H(+9xWYn#}Y&ne~=4&*;{k(bv_y9we;ubTTV8_B27*rKx)FIG|*E@R)}?ijE>)$=SRo%`fZ z)F?{}dT~LSd5gy{X&$SAZAlU#ST7e6SOwqfaWIg}`yzj~<<3y!Gzk=d0l#&iH6YSn zOoYgwUD;R?11<>zq(POY!l^i66J;HDq8$lAz1&N8UE1FyH)4eZviSjjh=i%kpP04- z#TO6?Y=&R^4^R4cZtgb*%R&N(b`=}L`5WLlyo@|My)34qfj1Et9~VU!jF5(9y6RXW zsHy6bgy&Bf;i!4L!0rS&4g9lDVW10l%qFsiA=yOwcA~TOiw6&0_+X$@LgG)cFn8qD zWOcd%j98WwD8p`W5XpGyB4Is?#=uU;JU7A`4Hl>YyCH@$SYi4krC@Ttjl2R>=pm7@ zeO&qNINcYbpOn8M1?xRw7j_0hNgv7@%YPAqoe}C!E`d_8>VqC)S-D-2ZD8?3aOXiM zJw(R{F|LCiS0NrZCxOQCjBYaYv}@zH4_|w{?M1(>w_>P0zFz~WY$>1hUDT!xn8WGyYg;;`N@k3xs=oFbdGUHwd~q zQ`2~Xu%aa1B_n=6Yle83HRr+D^Y(Sc!?%;!ZwF_lrLrIzWV{{7Rif42&q8$CO>eSB`a>g`_p+`-${*%%5FK9!yZ6d?9aWBWcQj)s3? zj`7_*Y-D`)5PGIY5?+eN=A&KCLBueCsG@QB(E^fx5jFI78c&&p6U$PXsm;#g@ah*j zT9n~`&pbN~R#s=gcG1SqR5`wtefR$S(ffY=_pkG1nqm-&Ph^zeHT3Z{>_C4(U~?LU zZO@3`o|QD55qP<%JJJ9tY#H=mG~r)%IKOPuF>I>@Wwr$f%RpMP=Pq3_k23CZ$b3y6 ztXa|jc!HO~+^*M7pD+HJDC{}P#RUW)8JU)8M$K7vaZ&+~unB6JH%MxhgP z`ZJp#q}*|=@R>{9HO-AvA?H^MzrH6QW?5SxXbce{_@xtTiYDl2a|FYJhe5Rr#Dvrn z6yosuoX++S`;FrZ=E&Yu%hldTYp_SF{sptuZcF$>)%bTcSC1i(M9fG5hO`5xw?qM9 zKyzHbbyOGjk`&d6Gt*zGpU1APc6_a$mPF5Vq7kk3QY z{SX&A6)ub2fHE|gW9yWl@bweqjW^Qk9^|O53^&0imFq%ICw3=|^>#?X>v7-)^~U>y>eA5W!1qmgdILrXtjM7Zm`aD}(}pZK`wP&t-tskSHRi@ziePxM$9*97Jx=1&^6?WjcF*}udBLJ%%sC{^^|M9`hm%ZXPnOpMk z=j9Cb&n$+anx+Ob4`Y{18PV)y;I|j#!})KmxY33VTYv{aI{BmOJ-FuMuU*b_AHMM) zZK#mzm5%f}sDXiixg*tXu%g6V!|qwtV?P$8G0mK}o_m6XiB&d$Sv3Ia z+#F?18N}*=1~SNz&|nrb4^nc zx<8fc9(J%MaR2+Q64e}q-<9j%1$7s78?qlTihiqPyu#v7LH&2yvGaKOh0iZmr++Kc zH9mP8Rf&k?bvBrSN*J>5V{dXCIzckFRzW$;%;{8FhX~nzvLd)9C z5`gNau$OM@)?V?N%E5eryXyM&o?Goyw9@#L_b^vdCq(R@=gTg;u0Q9zQMumBdw*@c z4;d!C(J!3pO2In(oG+7ZT>E{#Ozw^A<}m7G<>rX;kG0KFH0|*%qQ=kpG99t1t(OL8 zzHE&fYajnUVRqT=`zwnZRo`FR-2d|Zjor`rGN)9x?J4Jys_kjl#xD&;ouPLTweDA= zW6?ie^VP`s-6BhmoB^8b8-c**s?l>?<1wgSR>W2xT1wj1liKKj1*WvAXLv^r=yp1O zF+;$*Q)N3Etf(6HqZOgx8+NUB=i}2!Z`ogR{>K1n z^wHktV=8DVnpTe(#J!aZxlpRc@q!qtxRr;9Dg9Zdi?rIx=NvB8I`d*I)^DqTvAwJL zi0LoxB#f$%Vgq9T&M`GP))xD3ZNSypX5t`w&OdCxU&nM60UIwdF3`*VS9kKyWBR)d z_!AvZIi~-x0q1@-lYh4Xf1>06VFNb*Y$pF^1O7awQNP=OzqTMLHXxq7l=+(t_-hOD z*D?Lg2BgG|f7yVPWBStuZ2xsk8MIOE*-gK;AO(y%DCqc=@n5{Ye}j&P2qg-=_;>d!66)}k_?);gSbEx6l|hin0Ohjfx= z=5L|^HY1DjaDm#W7*l#Fr3~pi5gbX<;fB&~g-nC@qofnUIr97+X}R`aaO!h=tVw+N zxdrJFJ3+G(%%QSYHVn5t*vv-z9H@uloou9t;%mYia6WzIz>&h_6aBFlop7o_!X%H4 zQ686m#Gtad&CF?Lr>!=|wlmtQ8aeP^$5fm16h=e1rU$vXwcdjy<7>ki7l&$xRJ!od zqlE?G>fJ({2J~gEC%RR^N(UD4&y`D)9#-(}TG&&c-$an(HP4@h0pUy(el-jhu`<22 z7=7j|>Y)+&$EXk%vCg!lF_fsqI`vLcS6WU+M?nFU=QmJwd=R3|+T~}j*&s;13*_I{ zSfI`}vXg=_M;tkV;HBGnJMSqJ59RgveeK{p1-s*heVfEUa244@h>>y5La$rg=~jWv zU`qKDS!HXB&NvR8o;@(%wna6y(#i+Jk1E(9ab46n@6WigN|t{BnSfnB8Vg-%|H7Hb zLd&Nn2IH9w2HYAa%hmH2Cf}e7NYfT4i&MjuWdtpfc4*7DMPf}39O=yQfPs{C za9~~Dl`=tg9gZAR5S+PXMFZaEzoVAm2()@Se*thG&~WU3Y#S(=k1z=t~7{ zAJ6Hodm24gq_kMOpHNFg8}5SQip=W8z3kL^>N1WAvWcpb?69~P?sx_Iv;7vnCvo?G z9#gtPzI;a}9TY&dO&iC8v}Ym+{W_+Lhj2c__-sZeB*Og2F%_6c#p_@>=)k?1Hm1j~ zl&}-0=31jHD-EWTL`N(L{yL_aM}1&==|#Rr2aW>I0r13goxIaOk7-%ahY&|<@(#b+ zhut`}n$4V@p&Js}VDbf@m*dE-IA}MChj}JL&qpO$WYot;oEbUpHq3cH zkLh!kvs_nzATb>cv2%sc#6xxF*U?NFL&0T6#~m5GPsb@JE^Z2XDerKDmTFgi>R zVNou|>x1tSN?8cWtG>q-g$MGpzCIOL7^o~O%LS$AkNU0vyWE3vJKC9N$}b1<9(*X^ zA6Hil3@3n^9Kwdt6vA;~B%eD1Lf1yHUZ`5%&@tZW7pJEQ zr&byY1nZHjyCWo9tI*3^_4=HK7NomeOlU%!BzJ{lR9YhXM7-%M#UIVk!GcFefx&eY zbRV-*A0oXX#&=>{Nxf2_f|BAhcGj=VOAGUKEYkDn)r%Mop~Q`)BtI?QFm>^u6|UB* z5ftS}AbVgMtS&(mzg$A{oOYb8(Gp{n?&RxbhFt!nzd-+NpyS7GuS#3))v9Z6o~>b= zMrme;x((HN#_hD(T$JKKJhKg)AAj~FMg>x77%D-nbUXL@IiJc+skF-8mWN?3}@@{CIG64A=32*M#Hrz=|G5RZH$AWZO>ASfgpvsi~=oW%ScbiAlfka${e3Nfh!mfQw6utf zrid)g@C2QRY|hAhRAiw=WU)_V`c6bhcx2^d zY>J!=hYlLYF8jnT@5HP&#jZ`pKJ$rD=m$fq!Cx)nwteDuC}@FF%+>b@`@}dZE*$VI z&bKKpZX%9R5ZFi1J~xRu*Tm?v6GE?pW3`NDmx!b(gHrxc!K!f-c@Q2E+ea0z<*j=(CZLA0o$V4rvn=p=ypGMH{-AROSCNZDI zo=!`W=S-iGoy04WR`Ezon}B6QazxK(M5*-(*{jx zZi2V5wSmm#Nd&Bqiq=*J5?2`Sf}oE=)3{q=xg%)XBj`0hWn|WlG=rp*K6cr;T#q--KAwj}dU7VZ z>Sh1|k3B!Jj#Ej;O-KR*B|Gw=E(DO?7EPSfX&GZ{4G)K^I}|EEa49_@_dP%Zll$se z4jYD=oh$xK7OhFDpf!j-00cr1rf~q~Rb7*H=KJ*-mOjQuA zKsi*_j(UM)DTQa0ItJ3mr0H$(rRD-YcfoSRV6q>s-p-H|YI!_|G`GZySsbHde^o%S z0i|{e7{^lfB?@=RiTWxSNgYy(u4FNio?2GeJQc62yG2XcSW}l%)77J0!6ivf9cLGY ziGqp&a>X*aryzCIrD(cRq}-gbbskQo55cgTcuKAms*ng-MA(*at1c##%T7o>B;|!t zY(RRP%qN5nrcgC9QKMXP!B}+0_qd~;>J3Tr;RtgwK84pMQTJw1;hwy^ohqsYP_9>| zeht?8TCakOL5B(`i=YM+NVc`)lBp_1$FmTAvMV?Ul9(mC3K*EKq}YIKJC){z@hi@; znZdXNi4+a194oG*m#cY5ZtAy^mog`4n`#)lkc?K%)h{v~jqvl@hqLIIGjBAtY2H=W)-d_XV7h++fsM326xfvH|(i^{m`OP7SE zOCU5YDHh9;!1p5oRJFUHW= zJj9U|Hi-dk-KEQUL;I#Yv86Ye2M3u$zzoM|vT_q`J&L1dnvxWoDK=nRTKs$RR6QcT zS-m`wj06^ORY5*HLD?0HrJORFKQ~Em275Vt-fxkxY}FtYy4cnqShxE!wr4 zNgGnnj3oMcn^SMt^xlnjkcjBDq%@N&AKl3s z-D?{?m>oSj7^UJPE=EHs?fsM>*l#u<<69!^kci+LV?H;=dS#6L;TUK37}RU{r@uN;RIc;KGM?e6Js? zJ|iRkBCkNHK2Wr3_21-`!zd1;e;@w-CwV2kI=9o;w$gP#?XThQKT_8JdHDNy%kxN% zz}@9`-SOhezlOj6y}S~A2FjcAD`ovp@(Rmt;J?Z%j)T9atpAq0a*MLe*ZSkXB(MCh zF7y3QYt`ke<=#)aM=J2W#gd;Z>t5gdT-8KLSwCsprlhP1G*WB$9`;*n)%~IsYc)d( zpMT0LQeWyO%&pf{jgWa?8fLvee`%Z#rQmx%q}=-2yi#28wPmg0^Ve2Vuhh4;t=G4{ zweKufeCs&a{`~D3)wBH`l>-tBoXgTDkMvVsA$23PrPq5{|Atn5KTDGv_|F9a6DlLn z>%XH6{?7MRUU#?c@ZHq-%aX?*%)*GV-e>;g9cT}2Qpq38!vCNQ-nu9RI^)Bq-|+jR z`r_{Ja-s7}S?0!hPe0z5(?Z%Vc<$nPY zlp67)xPYBwG`yybtA2k45&RC+KKKXUH_qYw-v<%=ZnJFD4%fz_{$C)1KW&yjK?E{^ zYdX5s9qXOU8UKmRqJ9)_9Q+w+554~v5Wz2-Wh`KB^5Xf}Um$`%Y?h;MV&+4C+APXK zn_W7+vKi~q&ISeC&rrScE$cBJ(FMF8b+l<4s{gQA6xL%O{bR|aCJ>`sD8w;90TH~6 ziSREJ{>^3yi*KXYECU0Ee#}AX2Zg_E7F*e*-SN<$Hp``q-)xrda6M1uJVB$hq7(NA z25o7Wk{jukFZgVV!0CC3*x}LCX8IglGHmf=C@B{*5(D3 zy3(IHoBy;~ZZJOmgt*j21#UhNP}$)S+I*<66@Z3cM?+k$^QFc7 z$*dL58|ObV(bGd5`A3~-mHT&1J^Skd6ro~9zN;7WX_5o9p9`oc92>t@*IBfyf79$? z%Xr++QOWODb-42Nm?4FQhvPI*Te~3FS)+hyAFPZ+JPnE$6!4HjAIOmj56K7d_#nH5 zOm{;qjD?_fnxBzdevBxCZgY2t<`D@==d~ik-Y_(?ysQ0S5&? zy?KlwKo6{>raI4$Cz0_6clVne0*t9(xXDAT#(2n-}I}+SG7gSHi^~93%VnoC|+pSl=shnW41z7LCs2286V;piKi%e3B zqkv3=uHXc{CO8SAU0R7S)H_foSFaF*qa2wcEl{a^Hw;gckE*G?!6!hstvjeIQ~rO4j3QrLlD8-b9Iw&~3h& zU`XP`AU(gTg1^2Y9rp4tW#mZlb%ljDrOOi~qLUG)$NU>)`lBX1{eeY&I zdL$p@2#pDmO`2?aE&uf4-e(JENA7WAXa=C;na2SLmIX_y)xP4&tFJ^^Sdr>tJ2L$_ zX#!jbHtLK~@!aI*yGcGD8a3X2)ugrQ#YSxN5JQ;(J^J8X75Ho5?vEwrA-^nb@9$M_ zTt9qQA{nuziokwW;po9eY!-9Ys9!}k3ofz zCE3Z+3v#nQ{QmZ*a!mqNB6%Jx`;4~oCr^d?pV0tad3Pr5(hE96=b&edC4fwm3V>{n zP=sW$y7;4WD+qXd)*96oyXZ+oDYGg#D!1o&JEf0)pvaJy8@$*(0E!G%lMM-%xJt9& zYUZ3vnJWZC|%<5}{>ad`SYGv|#0nBGEC{pdH*weVToAuHB_D%FJ+> z7s6Y#A=@4&@6icUUFyE!Pv3;OW~RS)SD)Ywq>M^3>mY_3jT_WJid4`K7-(g%3NLNY zQ?{V<*Q7)sLHEPV7SZ-vVSx=Vofl**Pdb~O@(C()auwFGRhYcsoE*eRQ3lgAVd3Ve z5|23!pMwIZED`jMNRdTu9u}w%76XivO{kt<0DBp$Hig*qYn3mf*QjVcJp++V!SAR{ zM3XcE6Kt?WpGkAbp@R+9+6E^2AgyqPoneE!i$-~;cu|TT zlm}((M8EclL9@otZ9xq%)Jw?7NzT~CNwpysV~R4kk`}v~7CSi)T|&bU^|2eMxUJum z!L+!&rnrO2xTBrll|is2j>Z=UNypJQ;~1xKuw5L&Hx3=h{8Jh9jps~{=dhILo{Hz+ zjYo1N2%Sw3woDN9O%P8{ko=_#p4d&0;YyS}n zolVlPOw#gA(n(L!YfdtlN;2F{GUiG)J)3N1nQZQxY>}R9)tqcIm2A74Y{!-2a5ly1 zmojMX>+o07mmHM|l`^pRZ@*wGJ=sZxUmd{xOEFk5CSd1im&d23_|FFJU%m~YN1UaC z<*$CrF?|>9QekWQ-Fg@J0vQVX)xiD7x8Z#^UIQ$J^z{t0-$3`vdcQs;^wWBG#2qkq zL>>Hsf-{+(C`J9sG5rVY{r~IVhRYGy_pvAPy5Gk^21v$2X!cLW(!dKcuh3%AP9GBK z1nHKN*dx$O$q)$=TnWO3SxN;TB7@G_rRXBy*NY{LS?RLpF&UZ(=?I9&*Kk7=H1EA* z_T?A4#54YCW$)1#n*Xh-y#JXO?Aop2KBUSe=RU1!kdd-@-zKY)O@hsYkZlj|>4alq z_5DH*T6kHNbh2=vL#F&rn&QXbti_LkU09yWS{KJ_4F<0ouqYY$*vAc!Acat+Ip@vYYz@7)!`$S*ct0$sQk1w14YySK*DN8$dN!b|>(2QAuIi>ksFDYcc*Kr$J?!R@Zd}L4X(+E7i3?Neiw&4!i!2Om zEu5N8t-46e!ro1GQVm6w)@&Ue+W6D}7 z6Q@Z)BiAQwfV%i-c1oFue+cr5y)X7YUR_(PDL4l=AL8SD%zq6M1~o*2I430x}g3;nfSlX+M zl(Z@?1Qq!{N!aW?-CGC<7THacewRnkR|mnjKm{Wa1?kqp27P4`_^6z?->M11yrr=; zwv-)I$H3T6Ca0J(5BikrO40TQ>b$v?OA-wE1s7p@Nl-v1d(SDTtDQRUG%`>$LpNCA z5s1da=P{olhTfAg|6c0k(&KqCvWgoK&a;LOg4QuHa3WQ;l}Wq`FLUW~m;&8U#J6&2 z9HjHChnozk3w(+;&LSL>{*-Y^zR_|p{t-(SJ=04{u)9z02tp+(dg@_L}& z!7WPZIEllVi!yKFIRtetUiCXVXf;;DW=2s0hQ{1(S~ka6n=xktisaNFg^I;mAQybR zP}fvLJYNX-7$MQ$i**AdUxpCTw8S*6vn7Dix;#^!cTO`B&E6xYy)RUuHD^?eRR=HT zuw=h;qE%>nJzSp*Ep>UoW7{4P^J7W=>B7o&EBV%`22l+jSz_Rs>G;?6w^OQ$&sCuz zp8^MR8fu@)f3kY!sW-h^^SuER;^uNLvySL@jJM(Kb(gz?Q*ZLUxjYCL-D0F{mSK&} z#Cu=wzIrheh%v7dDER8-Suy?gwnTj|^;0h;sdtNaQVU-_%yh5SpIvFV-#Q1L@Of)( zQ@5H@xDZoyzv96acl>>0(!*~Ln$ON{q@=d(U8-^!uc-e{^QNsP``e@RTUY4>EV>|C zZhrgwS0Q)rBY$X~3}=g8puhNoz+hVuVKn_(m2Th}yy07TJ>B9h3MU-0PKvzczX)OB z>{Ix<933_^AGm^vu*-r(u>j}!3z9P#E$_tMvT+LIJv$&d<(|xOZ1Fg)$X{Z-3RH(w zSEyKimqz!s@VSIP_M?L5`1LgUVL8uz){Ixcq$??Sr%Y2lOSI>p+?F(5@E{_z# z4!ab6^!Y{n;belAmbkZ$4(_ZHjxUGJqs^5w_*+EsQ=R^ysB73EcAmk>c=E0)W$h#Y zYp3x^;uuq#gv}ujzyGB%xb&>Aj36H%yWtw*9C4z`h_PN3t|J*pfazj*EUJJ&G7{1Ve?QBN6)h`7KRrBU zl%T@eWr$LU4k^ty@Ch7|UGSjSPUmvCtL7qC^GQ>goOK2c_X#K08n~gDZa#-|F;_!! zTlYSViOAxyMfpB5tEHj11dc3)Jaaz@sQC#YbsuacA`11;7zk%B(5hp<^D*R(n&4)k zJ;+^*5ID*sXLl@?FHd+!8z=1CJz|XAz#Ze2!|Z|Lum|(;d^)5E*98AN10a}8VIYjT zHFOi?FpcQ!zUtL8pkZq?X|k-^hp@-Gw;E!>j7F1oedy&4gr-mG0WwLLoO?PP62L5& zj&hkNU^%u=1c9rKiBqU`OR28d^AwsR7q`D_g6Zd*Rq};g)1;D#K48a>g>Zqdjc8c| zbz*18cvX`cw#UJ6F;qPkKbP^b>i#mV@21bYoCmk@3;sDB2&H9z*r(6AeXyq0(r&|j z{lma5;&Myt^;V7MA76=carBh^C?s1PCB|!1=)cMLS9$j=75V>g_vZ0X@BiQbnB6jq zUDmOMEZO(W43m8=Wofe&S{NlM%8VI|Y=a6VjU`kndy>jdb|s0*n!O@I!d#;|=XB2Z zd_Ldvxo*GP^}D{;-~MoWzm0i4U(e^`dA|d80T4f{&$S0WjGD0ijq?W0g~Rv&aUzfQ+E%|z6AP}o_HUktGw?yMVVl?$|M!HTInfI&Ca~k5g$b|@+OTFYepTeiK$=hYq)4E6{mf| z3vy7nH4!6srwH2bZf@c>7-oK)FZTQY#*6ybq{PUhtzlkXeMMRBeEPyMeZB@Y><6WU z1Fslz3&zv*bPWjpSPR*h2X3>Nv?XNoTd`ryWl}dnL7DC8VQ8NreoL*g8K^V zF)Rd0ruJn0$y%NCzBz>7zDuQ4u~~WAhz>;+{0@6{4b;S|w?T^1eF2xTjRa*HlUXG8SlieU% zp+iB`>CRMdH~HSH>c;qbZZvy@nA4JTxK#gkl!ZedV?L&<)sfBXS+~ z#GF?Yf~l1FO2@8CwOw7+Z6QI`d~Qxmf095pi?{ZIVypxZm+a$#cNOia9IsU~@P|5L z5E)FzMIn33ZF&gOK@LaNM@UG6ivG337r##?imVSQ7;dKM*X}n8!+wDim9ZUT0|J1o z4<{7BWMBK$eWuM4jOznm{`}nj8^JhVsz0WRET;2cxJAr%McVdFoRV4wJw61rYCK(k2*zKRC}lGaC|+rz>jn&D`C0 zR4Sb@yp)?x_v4jW+aHO$IC3xgA`hyZpxl--|waGbqyprhsuRTUwqCkw0IhwpRobob_+bu1caw=J6qO{~`-|Q9 zsoi6bmLWBR_TQ2Y+wJ)HM$tblbu673hb^Z5lnp})^G*Qb(E;S_~^zwJ@2VhrnCU35ZIu$CLXUu;??kK z64*-5g3?Gh!tHTRkkHNv#8@^Tto=*>54Na)GoZP!@#%V$V&AIVlmDn&^V1OAxhbA6FZ2lkmr?>C)s${X9+9^Z~Z1l zP3Tq3;YCC@PQUg)x8E?C8{uKTQ(rA1?v2!jv;0%>^L{{cwmZb%Y;VaN@{LKunh6>hN@qzW^NK4lSOH& z7R@+_l81PyXtQC4J)18N;Yy8Y?xegLNn(F3C&(ORP(Z_jhVZ;8)pcN1Of_-qob-#H ztvcQhnH@WF=R_u1$giPGozy~U@m3kQLF!aZxYQV6ax24<@GMqN;S&G z%KMOj)~u`V5QF5)Rm%}Zm)Cyh7J)Ym6ehCr$hbXhTMO$%i-Li2rSnQe1W>EalHdsY z+@q~z5`NH;By-_tl&!Dlwe#*~O6BylFgH)mPdslO?`%T{tqsFK*P{>wfMU07Eg(BM zN^g9AC(Gls#ka8bI=Gl?yOJmO-uydywcH7E))>-Z$fOHrII=AMBCqfo$PsZaTA3Lv z$v0{RlGAT}5g)#U@ZSN~mrslsi0_OkZ@4=NWsCCfljOXnd>gv88f?oj2}Ee#o75!> zzrOeKMiGW`gkP9*`|)8O_BS$?aWeY4N<(*3Qb~rMHStzLfXB=}0Mr&hVV57`nRf;Q zye4VLNg&HM3KB}H$>hLMXRBuUA#3F z04M0O2`!cYRgTDUvCwF|y46r&AAs!Y*<1EX0I(E^3>214mK!^b1_i&udZ+;mtJ&*> z#|t&<@FqNcZlUpy{Q*oh1i0Eg^AZsN_hA5h)d~Taq3u9J`j+6-y-bYqg`yZ){ZZ~G znE=gHVgw>QUQQkWmBUpl3>T337oFIy0$!nn-6#laz=MLg+8Y*bq}>-)5S*kMj;dH> zFlM+@4pRdtW;j7KR(L=SnvBOvM!G{Tov-c=1I=+1NX5>;^FDwu{z3@xL_%_g32YnM z9)7`@&0dHE!d49d*!)PqO7u<8g+hDu{xmVWYQS+CF+6R^58#DsGJI}Na&|ik!p=QY z=bJkmr0ts8>P+GvB9P?@DZpzUW+K5;J3;2*O!BXQXmU!EG9;))C(R33bo8$BOk z_NJK`Y$m*dSO_cGJtSQZs)(@gJ9VuTz}&h5==z30xsjgqrA_E*jGWj~DAgQmEUEhn8%2*v0otcF$)5mF9Jk{{CaZCS+vV@njR2aVxp_f2njqWOkkT+wLG=B z=V~SnaPCtj_{8XGD&K>ZB<`-$-zvqIikEenHJo!%TPzogWbzH+qPQsk^_sy<(hdk+ zbz!O##NHdGp&x$#`1X9uEG!W?Xbj8O*_JtX$WYsOtxQpx>bH31Y&HROD%ZUh5aLS$ zqzxO?G62@9mLWQpgt@>JR)W+GK&eK(A=pz$fGf4gn>xmlLY&gh;Rj$aNd7sin}F*h z`q8SWeT9HbG}S2AR$^Jdx*VHD0m-2e2!>-YiEY@P?cfm7cNd!-E@Bo#PA&GMFgW1= zUqJINO)r>`4+)N8Um(Gts%EFC09|`!Fd1P!1ek*{$Q4U4J|wf7@(z)CKbho}1J`UNC}0?K(IHQfNb)sdVen7n+9VqdsoghEen^a&bl$&Hfcj!Yp&uAw1J zIekzB7($Fgs?37O5j1ZfXCFXR{iZ6iM4U8VGD1qCa1BE^abYElX#iQJ5=$hSIdp7=!9kOj6X*Nr(9M1MBv(O(D%DdyV!ED@^4 zn@NWvRf(6EQ9)Y}ED-Hcr%aOu)r5qr)Kqf-aa$dANxX~R1O9-15(vm-Frwzyml zW=}ND4<>xpAUrv{W&fng(b|Nn%|Up` zpb3n)g=!Q=)w^H}b7DE(AkBaS zgOILghff;JlEOx$EjiqhzBFD{X++8afcF-WlXIaFm*J_rs5K%xL^WeC128~5DKd3A z>UB79;)Jwo)<_ALrc4@ao6|xeT=LvWf*Z_gkKaA78xdNlpgA_Pir^b8bk)$sRVd&b z-20BX2??sgQWzzzm`dw!La~T$faF{3%d*$)h-We}cDA_;?2v6H{pNJwSu2c@R5@iI zJHQtc%X1innNzm(f~2d$bxc*bya3OF*sWCoF|-p6I*7_3_Amg@tU8(03D0`4lf9CTV4ktFX$)$+JfAF z@ldA_Cz90{CbDC2b}!Z0Z1iIqCU-+gphuNJv0hK88>mU2?FBt^p%k85$p&o#NBRaU zJKwCOXY!HQo?is_2%}soZ!SaR)@AQv0aRk~g`3k5UR92eCQzm-{A-0_76r_U!3jnJ z0#*g=NpvjnFnS9~W)amy1ge2^_IQ)FDWDHB4vCZYLK7jK;!FTEhzs&>J1#cDHl5-}>K-d5RNsItI4)7|Es?Z*5=d zKanPK3lDp8Hm6$X4DoF!rd&r_Z1V=dKMCi$xcej|8)SR88FYiu*cL z2@!dO>cK{7+CTZa2bB1*RnbP*!o*U+CTon$(Z!D(62W!fd8DC*9+Z0Wfwv0e^+ZGS zNd!Nxv32Y8OXJLEhCG>wl)}=dQ$`>vdQ}&lO}eEsV7P7GNSJ%ay0#-nQOX;qLP758 zb9~%;HTgd6e#(VNTk0`DfE4aHQ`YjoA?0@&&eKi9RE=s%#qj5)5V$3Ep7)#smsTBqyV8meVU zv#ZnLyOm_@>dkpHs>aZbCA>05!_k7gD4$rFUq_?==Kg zq(8_Q|K`{In;)TAoj?bW;-5Pz+`02fh3+N+NNJ8WQ+j;;zm!w|;AyPQcY0h6o9tru z(jj&UL>cN$U(#Ka+L>mayzN&K^wHfiG2q&7%gTv05}MEm-W6y^HQ~}bg6Ja3qvjg> z-E<%6xlU)Y?<$&03;ovN&QG2O%Q(fWxHY!7)N8$IBxt(nwp*4C)i_>6&sm)=HA8e-8i}ctsgP{A<_6*7AF1Ag9FTRI z7U3IaI!t408N9Z9^U=94c&vmM3F*l{GRgy!2W&aU1n@h>#`fwK*k0PN+q`uw1=qKx z^}6g?(6QMEJj$G#8Ra`SLfVlzckwe)J@+2YmCJTiEjIqhC~uEjTzbUHC~tOD9KY<_ z{*$NS=`~k__?}T7_%>I?|DC6iaJHyIEJ~@^4U=%ZxJ2;AcbO(l&Q4X1yaBT*2B>`3*9W9MrpMbcC({mA}rs6a@yo+P^L(gxNierSUipE zhfeyPPcDAF$Dv5-4rK4n$m24jSGJ zdhYbp68SQg>Mj2DQOYrvaVp{ra&-`MET!ckNPN6m#6rQaIU!U7V(Vv>xczEsygW}ce zkc#=gWl;RjY3W}1w?H318QUAA&rZDuf$J`N8MKV14lGfKiZs3|vrQo+(;nBQmVCrA zLl3l$K|>*T8B_VA7E);0oLtLyH=sqgr|-Nvz*MK}-s32Z#my2pX6|+SF1VQVSJllF zwr|1M9}HY8nZ%uC|LVi+?_i4Iyn!GAC2mFsawaYoN#%Jt@L?c;4QRM z>KkLXox#n7&rKptjBIvZLxMV5EYOkJE|ntl4K*EIm1!Xb%-r6EiHQH)C~=c>(M%+L zO}b4$vx?k~oeiTPyO2BHA1%oq)Md;A=7C7bWQo;cfRjn| zxG*#u%%5axKwM7HnPZ^#1(R_!x7%S|7(T^VhEV@}g52>Mq`{Sk?h(ws zCLLjUv@l#+d`vfddw&twvOrptAOzv(WkdNl2MBK`L5)JWqtD@_**{cckj-icVFpnA zgSoNH5O;*mtaOqSbtE#YtM0g0wvzRVnL#t2YS&Z8qBz5NlW<_ANjFqNB}R`J6>pCw zi;h;C@{F5Pt!;_I>gKiJGjr^LYm)4)UGK2EHF&P)B+)06vUfPK} zn}hOcog%|n<7nir3qbLpW}yC)BC62}U35(TEktAm5m4`uDE;QJf#pMHyuUSFRDEuz zUNKwe$?64O0K2F=`G$-)cRu_x$Up`hsa z<87DWBQun}*_c{?FEXWRs7AA&4WaDwCHw=LBDNacUgrAJI3pdZ-K+b=fA+r8aB2aw zPz^yYNX!7cczZ{ynh9Uz$*4Hiq!~|pbQ&&`$T%_NrL&pEM$bB@|-9LP8@56domB3SX_HihuR0`D0N)%@zhgDuxlqjkM$0@#> zUd_MpUTb>_?#{cV%H_v%arZ+NdJQJ)r6s9X{gXxS3@R%yo;taLNigdnTFT(+6(VG%4udSBnOTqUh{W%ew=KHp)HWjUXr)0l zz9`n)dQ_OjpwR6oia*mfD*kPy35P7EM_G?aiF|D~w=GUeX&c+RT!9(vGS$tse!q>C zG) zqJenxP$vml%7EES8jGxU(QHez1luP~4OYA9@ufL(HdB`SR-dP`7!*3~Q??Z@vGd0~{ztcS?A zzGmC<;*|DH2F2^H`11RwiQwR#F^PGS*^ct|4}Kvm1_km#MH7obaqZiiNfv|RRr|*v z7K36g{z28m-!Le2tCrR1q5B8wakWvkycD9OvZ%zbXFig3>97a6{^io~gxh@TPz4n& zZJBOfl=Uw)x10KzEpMx)ufee$hZ^rC96l+^MtK zD6+LGFZIi(P66@_W;ev~)1jQO=?)HRd4k)K643iAys0M|p7s{K8Q>G-an ziFf6Z?b${5Gd_8n`*wp=;Q^V4AJRKF&ZJ@V%iFF?upoXvxKlA}cdjv3o@c#0YzsS~ ztv3xl3lDWBzQG=Pvp#BL{AElF?GHef?i88U`pKR0Y+IL34FO$V>c^)=!?^bWGfnJ0 zz(rb?J0sS24^x%OCr4QNnP3MWe~X+R5hB9zO4OqUQ*a!+^6)hiFt=o?Dm;2pBtcz! zYLh$VvLy`e;Ctor7IzDKM<&h3K9FB|>-$u%%`@L6da74v&?gP;jgCG`*Y)Z3owy!6 zuE2MVXZ~SgI<))N1cNH@NBFgpml1EeZ7S!|-lJ1DQ;*!9%1sZNp33{qoyxg#boy3) zE^9HepnQ6owHV3#j}{~UlkGANYJxV?XY}W#;~#IAnZ4O=^fkAjIOZ!nr><4?`=#R_ zL;Q9uav4=GC-i=9mbU*Jh~K{-613h~Ma$`xi=Qso_yL;#Xq*cZlDQOUDvwjMHF-)2(Ro5?H9p z4XS~L|E9U!ymUxUhHn`&Ckf5&X(i49)4mM-3F6mN!?mBdCG_}qifYbFfrA*3#3Y8Q z*ZikT2NNq#6|WeQ*ncsT=TI3{un$c&{tofmDY1luYdL2L?0Z=+x5T)(sksg5qbT{! z$L5`@K{9k%myUpohY8uDJKxufT$qnzLHzjrRhx{0ETKU?*%;Q|BUbqHE{gpqY`~c! zOi+WWp|hpG1XD~!KPM%fdo5PyOc7ahgYT%QkvYGD!pA0EJX3|2dFx|pUHREw{#z7R zuWq9-d{|-?4Ofn&Q*4&hVWDeg+s~)f*{G<))KcrPy+n`_JUrFIx=S(7A|Gjd>SE|D z3~%d5*(0?8)kDK&9&e$9UDAMR0e~DWX%u0{VP%PmIQjc}4JUUkWSl)m(UN`51d&5o z%O|?+>LLz}qMEF=UHK-`MmJ4bw-{r_sKvFIN0`d6p+PPth{><$2PbJasi=JD(TNi=Bz0vBI(KIeYAqZ)7+{!r$&>vb>38fxjfs)efdyg`7#X}H+(Sp2p{1$L*kzx)8%o~@t&dsA@SQSY z8c|iBB_`e6&&QQ!Nbf|rN77NibqD%(9j3ICiPGq-k3PsZpi2O9c$9u*hwNA!USWIB zHN2L=b@@(i*#n7O%mV4|=b(UaPz^+ur@SwQr$`mQ#!(8!AF*ThEE_wD)NAlk)_GU3 zUN7#-A7wi4;OX@b@G2Tfo0jRY6H$PDHNNJ}S`$do$=Po*=o9&MB0tY|YLe9#+&rTH zY@+{fk7yQW7bV}{pK!a5`|ZEx>|&1?nGgG8FU>9fVxorrQ ztf8gjQgIYfPVyozpMw{Kt4x2qzk0di-Daz{$7TEs%Td(E#AmpRy>4G zk=#~%xu8@^dI{62C2$hHBfmy61P9rdOM}|&f6sl;A>kZ+3~mfKtR2w>I=z++>*te_ zkC3N|!UJc|*De zjgZw~k|BJmBNAGvHTrQb6xZ@o1V5oM!V7puSfB^kfJnw^FC|Fg956db{o)(k(K4kc zQx#9#>g;*lM81?Q@dORhT?l>_lO)e!&@5{(lNlk_<_5C}9>oUfg2`uHBj3}9Q138 zyv60K5d~X^qvLlxBc8nU9Oa&H8G%x6cs7HavyZeWG-Xj@fOQ ztiGkFe7EVONT>6~vNDkt*p|RT(2j98#5W>^Ge{Nqgy+O9jV?TvC&0xYuvo!#u6e4x zcmE=AN#f?Z88)@DX~vbeQgozABDSJQ5*K$Lbk8G3#%f-3vIHcbjXL0!zg~djk2KG>$0%pHVr1>rz5!pJd(9MXA$f`A4|p=^rYOy>n;ZV^M2t+)dpYv5-)d-@IdyQs~)% z#@4F6+~Mz#K1wLfDr5Ju64sB|iVTceC5_v=>X+-e5wIIdQ(W&o#a#0jZ<-;-MBVUW z=^sn;?v~r^dUURSnXg%TPh6(@U~u03jxzpJMDXE7Pl?ia6(a^Y!eiR?l^!FUMff_7 zgBh5tz+pBC>qnhCGDX|On+tASmkeIh_2)7c~@y*oVAd>(FjSyj2bL{}!) z=HfKpy+_Y@k39JahxTcbHcfVoUG9u}o(Gy==Yy89DgdT1dz%?laj)3HnDJ|qAm6@B z4Ydqe{p2avtj`s|-7zB=+SKjLh!NUFc zr1gf~)9ZxHpI4L_MJ#aai7#IT+75#IU>v{Z^x4ZYQF>pNqjG2OKTcLC7sV-SWMTQQ4!{lA`rW@-J_yl`^4RJ?}@tS zcR-KPW86pSjscTbBxfHo?4)ji?_bq056r`r;=Xa%^j*3&bc>1-Y4q&f-gD7k)M~%H zg{jmt>AJFGQNgn1olGF`AH4x2YZ$c)1Z4xjnScHR{!#Ug;KTmiFp9r%p!vTHqg+*Q z82@<~)#5;Lh;jI381-I0o;8e;6G}q+{4$I>5b4WTJQMuok6{$6R6^<(PQCx@FiI(a zR2};JFbXcm#f2!@`U89P__@~iVbr_zWUcR2uf&rVI{$AEqyDe0djCZ1`aiZ`kPNkZ zd90kZujgeh)uDqOCTqo5DdExjFvQ6h>b`Co*`#fdq{4%(~wzwSvAQNoDT7smVN#EUYk-F0_G#rxAtM1AVmjG5y{bdK-8Ckm1@ z;~3_CNY?e|yGbzMmHh5R$+dT_}Ywc6)-@isurwhI4;jj#J{I z!d~F^y`j;YC)Q2;fetbf5;+KtqX8H`KDaiA3;Kj8^;F_G%(Z19w z^Ze+Q$8X*%^Lh{tH6tb6ILF7mQYsFr@7{@x9Y+;uTT?GV{72>C6D{gCWIpwPK^YqA z`ZFnC(^HoYse9{m-;zCjFZsIjNjbe?1=+LDHLRAZCN6)z#q9hl1C?IJsPVec(Hq_ViY4pYW1jks7DEM+##~kWcKN# zKJnyY&bR_6&m<2v0%Mz{y2oyA$ zx{LivrOSNQt{rw-b;WxMj=VHKxV|m1cH3Ub#IgdMzM|{-oZhm}`FH(3I5;X_NjTL1 za&xKTCP$IPn#`PHRT77+cYQ)_>3@b#HJ^5p)!zZpg?G{;Jhe?{}j z&6}58eYyXVdMAwOZ|(NcrVuxw|H_3^ju3Hen4(>>UI!yB)gYmN45JWnZ)FU1*j;H< z`Q=#giH*^RCe42gqdqQjC*h`dv9L$+<1!Z&PKZzFt-^uqo5QGMwjmFC%!qY4E+52% zt5zk-Si`6th}k)PSI!R@jr;d`?afeZeRc6crJq8;%CqNQzbtC!&QSi~$9b*FLMU9M z@U7S(f06!owrBF&MLycmuNjy%Gb#^@9rMi=cdT>+=B=KpA9@p*SC@=1 zGw%;bH6aa(N$z$!J8wE1=@9kc{YigYqmd%RtGr;&v;I*yw*zSChUfG!?fnhT5vX9F zC@lMeRiZiv(9?t2%R^$dqUBH1vHNXTz-dhU!TAIN4a?csV;XiO#1V`DCx#@_Q6vV$p($|j zfhdRI2_`NG%ngYnpt-<-ize7c9iJz}Gl~x&FRviKqS+$_!%)qlpP?tuHsR;B!5pQa zM41o}+p$jmkdz}Kh1%HoD`4m9kh!J<%(*iPsiG3FlO|ncUGLBVfe^7&U&U0a1uWt@ zKXu?GvEPYWH>~XdKRG5Bz==QBwfCUVGRiM6qMnQjK1E&VjbxY}+))kxLS!M*Mg3@} zUkDsTm{CvbimuQROa{p5OcXWMcTg4$?TczkM6CdJhEtqr^n73AK4Ob%RA4@>CK2_G zPD|{Ij&wdC{|dCC9d&~oUC)ggD}-@RMi=D!8p-J_7YgU5Mt&LMTunJ+)eYK#qsVE; zJR_r)M@cE#e8sQeEp%j#JX-60%$tdz4?~D(UB0d%#I&kT3Ja0mEV@?q9f-aXH^jmo z>BiUb{6dmNvTzBzK&Sq47vosV{i81ZmjlzU+!?LGN}et0zvDQcmE?8aFkm4SDi3oT zpFli~o~a0Q(+v+I{VM9=A5D-T_R|v=6IfmrX2wQ|y4{;yDX4C3FS!)A1Mb+qv@cR~ zC-5bwA8~A2&%UQLJ7`|q*x>oTXc0aRJ7M=(m37j5)bLa76B`f3WE28-lPS&ng=OYC zv4PAvIMoK4FcB8i?6WG^$`P$;{!M=U%&qc?o$0%l548Q(jr~Z6iv_$WRcOG2ki5cG zDSb@3jLH>+`aYudH*b*XRcAj)61gadf)h4n-W^)p&W+HwGAv>RnyV#^CHmW<%yf%L z_5xxAtWr2Gg?Oc`H`(p@Wz)x|E76LeLnwJ33i>N;D# zg9KA54$Jk~Ba}%;a~cgdlQ{IT=C$$K+SJde3kyEH(%RznwVm?O)t=(|Gxb1l{^HZiH+Z_XSXh&9zL$ixMqD+9VKeVSU_;A&Y`_TX4eX73_q56 z1CixxVj@2|H#DBibTa{>bmt1glUS?d1gRqt#-Pnra-fNJsL3$ag~7Cl@-pYD?Klzi zlO%hLKv?5O(Ft?Dlp92XIP@>TIM;_H`~fE89Q7-L^VlLPWk0?dApR~N*n-XN> z`skAGFe^T*53uSDK6FJyT!MZv=19d~vLJAsu zsGhiRb*!8j|83@bO0#EOC2<<2`~@Md*n&KdFWv6l^`-5*^=x2DZ+ZB;^=v`->|d;B z-v_36y-&Y%V};R*WIe_o15?aS0nDcL><=90+@A)f|H^vyYf6*(TT1iK)-y5Vd;dGu zvsXT7k*W6yq~CCypXoH~pE%CXiS*dtaGXD^XPY?A->hermI7X!d(Hom_3S&2v)MZL zH|yDxu~yvpd}OBcKe3+0<{3^{be`+q6!oyIXRN(f?KTt4%1RU#`VXyV7{o1eGt1{m zR~K{SZEl$^u-TK_MsKRd-Lm$!d~vmZF;}VLUqA9vd^&+_GM0WAsV!&BJemY4U=eYxX?xZ~Vp*;{<|OM#Eg9oJVaz2%i( z?h@nf?4PiFRr!~?^zjVeKd4Jxct4MpTu0pX*{+@toT@Fp0-yA8y5?NK;dO6e&P?hc zi#@_5j7WT|j@l2)+S9vwC364UZO7N=VZ6<5;KXJ#KghR%@k-+TBISEX(=Pb?Y3=(R z?J69ZiUWe8nb2nk?j0<%I*6_;zduj3*fE_rFyryy!TY%T5YV+fp{2|BKezjXr($0A z4U)|p)Rv6CdJOq6dRD*tZgCdeX1mCxEfj5^(+TJ$2blAC03_1VJ zXx5GU+DfR53Nga2ZRF$do(cuz9pXM~&+!K52Ca^w8B5VO3(&nyxPx!w0z}88nAU~F zh=7Dc$5|n%{X7X4xAg; z{-gFca9iWhe497V)Y+=@NKM*C>;b$;r3v-j=S)Gwb;66wtdLZE z_NefbrGfWzi}KQ3eu}TQ|LTkAsX$k}&Cz;Vf5A=LGIv|$y?Y+5supxi-|IB&DMBhp0z48TR@0zZUffbyhU$ zbo>`l51A$E(X&N|lGv^LXb;=GZ5OT%tc~mbD(b21in3kI68Tr69(>rtqs-xxB=_|h zH`JrPCflXrYwNQu%8Yf7dPhwJHGx z{Jo2HYl9FtW?iu{IS+s1FD}+wCi@J|FB#^)_)g>aBf9-NBz*l<3#O-x_#49zJyP$^^Mk+8gYS*xZhl?caA&S$EtfmeSf-G6ZQy}a{X|zdSCzP zVr`XCHA%Ze;exgZe4i1wi3sKZzv~0tpXyhv9a^3p+H|o>I5k(>R`?_z+H|qrlSf}* z<}E+obg@#bLsb{Bhkv+OO-uh@apAV5f0`UCE;!Ko{2VI3FjxFeC8kS)zwp?hhrq<@E_UEB+nkpoX6ANW5Zv+Z6Nva)LN%8e*MVa8 z>jMtu4bbR$p9nGeB5x*;ASl2H%X@O**<3%?gC?(}CWD(Rp7XW?UZMr)1( zCwFS{Hcoef^hC3|A&)7G?biDq1m3r~zZ~dKF#B&@tp7g7^`Ah(e~EGZ&5YPd0ckGG z>$rw(+LsgX8Kfo=nm3m*W=POu?p|M@x3CzN5|6D-joJMJxt5|YGB#wYK$45G}i_CC@ge)kpWsb2dF2jLE>mx8}kvLO%?k)Gw zHcJ|C6!YNons6Cn7+*~_Hunm*gfM|6G^c65o3E4SuMT6^BZCZ+AQtlVRK6`4Fk}*z z9*dG9=6hUfSwYDOgTb-^H%#9Ia|aYU#06B2;=8yXXLrp<8q|AcPwGC79a5FvN@dJ* zC^VZCB_;~tn+n0eXQtvv5yR4-knrEClK;fT`q!$YD|QnS{tsQOf2m5IG5=#1>z}I< zOQ)9SS1K=Ey_{|?d`O=m$wNqBu9)Ea$aR?KnmY+Y@31Sf1WFZ~tOEuvE*k#g#$LJ5 z#cxqf8v*(Q5xA%iZ~tgFMf7<|;X3Rwpp`Dm?73X}+QNCONERZX%U#k} zC<7E-7!h858Ur(gH!YcMFZY{^sA%Y@uDnRU%j4z6$z5<7RtvhcUWXJgAy==?!)73dFs>GA-IZ|{By7wFnm>%~|0Fv$~7=e~tQ}%^8#?bEz zcVdF%vn=kFIVz6b(f`cT9K@dGnf#e9b~q;sx0kNLyv_uPAr7M!eSmrpEy4HMDBW%pM){6KSPFD)90`db^~`Ls*f&%{V1J&$HaTBoR42P z(4BsGAGcvapR}cEvqXf3Px#_|-m!Z$U8%Ulw;#9EsaBSp@0#r~do@;+x|ci8#3suj zd9^3sw(Pc(nCwKu=&v!Ze-hpPUX=)5YxC+07c8&---vEkR1Z8pq24y?RuFC?!uKxn zFZ#fLQI&N3MIZPsAR@2Gn|in`is*Moe`Zr3_@}CbGbJIn>F5;S(aN5)jVLetI(~P` zD%L&`9^iQ0#p#fJ@3jupRuW5MhBh@!6aZYM(BI9r`z@ogCxG8%_nzuo?ELDUwUx=G38{!!(U6BayE4~0e}Fj@lv-9L9$7@~fv$B=Dka*H+vEof8N3rM4cdz&H87tD~(oljv%m z_1q_w52=jlm^a33*|C4JdF!qT?1izUiu}aUNTiS8X^%I83eeqo(^*MMwC#uXTt)h(1YjD^8C7e03|u*s!+mntSy)tGX%16L5e_;g!Ept-n>Q>274nEwe!!Lm33J)2Qg8xIx8z=#Ckt& zOI98sM{^oyV1b_>Y ztYGEv(@?aSWgxsLt2SDQ)3qQ~ZL~3TWo7+ZuxWpB;jhic}_bPLbU-`~BTRbFjunMXmStDqBKV;r;mrK@4Q>o3SFjHgDx_3=lq z`rNo&FT)x~=l!9tTR4iIp>=Z(93h7ZF!yQ9`8kF<%{%Zq*+f`a!~&;W^r4R#LDs4t9c=Yj{})p0A%1)f!{KX%^Gx*B^9IYY8sJ^sPN+4_ zE&ARSU4Bm_XT#QZX>7>V4?J0_m`{%5(pt2qaJtf^cb%^|P&R~>;Uc^;X1(P&q}mv9 zh|3SBk#3H;cnPKLA8U%%6y|r#)G_l)H9<;OF(Oz(JBqF0BwadI%(dDr++L%84s{W{ zQ|1l!OB=TdmsZteEnH=iQHRuTn+s=GX|un{X?*0;fTHu8YA@uDU~LnGMvlu7q*?`? zpjdO3{iRWtTX$WO+8d$?;@rsVI^mYd61((+?;hCcm(G$Q{APXq za2Ry?#FrjvyS$W-($hv0m%#)1`MFnpZ>8RwKAKB<45>V7_Ul&64K@R|?SBp2FyHS> zC1P0V5fK%yRJz2|um!23e5rQB_3^N;(J@+`gAH*!*j<{mgh3IsmHhc{%joy`%UpP# zu?O*uF@n-=-37kMLuR21Mm+TscD(FjTKPQ#Q3*Z{*#b5}JqmP*Exh_3v#-rwPVY&W z^RSRw$lc~}VLr4fH2@^+QsQl}r0HG@s|ximdY0L&L%{?NmekM|<8nrL}sz_a% zV^uWd$XBzzSG&TZt1rHFLY+Ez;e<6jx;xtK8CEus;X~+Od>OhnOVV1=*XA(&_VIN` zDBX9m(DN40_Q_qCWA&O}Z8Eq$+uy!7;4SCMqkKf4Zzf zd^c(K!ZxUaUWd>xU2yk6-QAsm3t8LU84bQWYJ%YXn3xcimZ;W+wqk3xlaU^?uRCA= zRhrJlte>;PTBYsjJZHRZs(Vj#vm9DryS9tH(cmm@luin6zHF)i*CgnD^##XqGRNp% zJlbN(4f~m;FofmG*yxsPejJ33)O+lUKz7jkE=vbgVsbrPt7nTz8WWe?u0I#55$8?7 z+^UO@KD^i|u2bqVfunQO=U%pW=LWr6|6w%kQjWMzkI&&2nogG-dy!r(!h~hHzEXZJ z-xhDoMQ#vg%4c`Pd;utm2kLV4bQUxrD}yerN!bdfPuEP-&m4B%Y35g7KB4it z=eqfdisl0NNT$}Df(vyc8W6iFa^vET*cFSj$`}IE9GSTFq6Xf>s z*^`^wV}iOh4sxLU`dp--aAEWI_b<;Z&FrxJp{6Yj z7CR&g4?mqw-tUGBj~;&Zr3)%6eCWU-bPN{zNEE$D@F(`=F7+nbtUM!ay(J8Aj~Xs% zoKlQkDmA0_HQs3x8m>B~=b6Vf2(>TzQExU9W&J{gvn{8_rW5lB-m!ga#Yi=s81nhD zV++L31HsrK;pW{-zb?Jx^`ntUO)k-WsO5&c)if7P=HZ~*PQwkQB^9(0(}MxBYsL(P zRoUCBvMZ7SY7T#59XYy!o(>sep3Ki2Tq08Iy~D{?JV!EjttA@^E}@MR1q+Ut!{A2) zS5=yR@|am-C3g>F&Kz6L!3deS4Hy4RT*7X!e(`CP91s27FZuPu3pLRgKAR)~an8$e zIV^RoR0ScalWSp(6v0ixivLMEAZEv z$8be(Q%u^Uqm>O^s(tp56I1ZmTOZgML%krP`O9#+H9nP}kb=BX2?TPoBpQxAr8qOHI{T zVUNWr_m-K-_*-f>pRBb|Hbbu?8&xA#SJ7*C;-l2!9e)T38h-K6u`T1j#btD6Sgj2gHjdKYK^q>F?HMJTx&iPFmpmzb7_W@^dWp z>q$^;#aEwp*as^A?oaONN^#vKn&F4eq&g%Hq8&pmunxsT78Lt$%07vt+#rOKU(wDP zaD>u`F&TTk=?cTL@6NdZQLWjfYa}PXD;%Y<8TN<^Qagcrg|&^}?`JH~-0Qjz(Nz?- zbEs6s2{q5&6R_L7Rf-so+UbZLv%TDlqOD(oS{*qpBjd$vCw1>Y#=d?>df^bcloNMF zb*W!m3wIk;q<_xopgB@%IblP@kRHxYuk_ zP$OoRkQ<7UA8d4>+3WZbIddhtwS{Q2)4rknr-F!$~$uiL&L$^A!Twv^|dnHyS zQ8no3C1ysv@%m4`?(Oz0d87Q=NX1>796~MEWmVjQy&uMZS$>{)H`F`tiLv!OYiT|3M1dEp%9_{ypAsA-a33v#S%KG|p&YzPH>eL`K>gYLa)I z^?ZtcUu-e<lcST7g1-42srM6=8DE`M%>qBunb!)>opvf-X6Cyg z3}V*Kz5LE?!M-_QI2N2~HL+wDo<0+O{-+cXQ?c%6Cs>3p1ysf#n!J!OZG1I1PJkXN zj^rzwVu?HzON!J)av)fG5I5@|HH%nIkSNAgb1Za_v))SJpE{B*DCb+UX9RQR6wPaE zdl+%JKE2Yt_X8vk#-NfNDZ62lqDjgz|FA-wgi7#RTkwhtLuo1~h(X-`v6^B(tp%DU zYR=-@iOq~l6V@FefgKKx^5GMgf6h5DE^ThT+k(>BsZH@ZA>UPWNn7e1PWI#cRJWpC z=*mEL%emVaL-3o<1OI8-F$IzTIqmp2MJ-LXnf@J2Qp#@DWVq#N?t|S@O51#^BL^?6 z2}#R2)WOhGQ$68(*dLpqoyMO$YI3KSzA(m!G$ypCDGARAqQ6LoP~?n0FsCej9N8M? zwEm^QnA1ZHo~d8kAqsR7H-S%0cN%NB*IoAttL-%Ye5f(aDljwawATB^OUKkME*XoI z-y_wfsFulFF4>AkM<#nuP!2TN%|jcq3Ped^`IJ-3+*j2TdGR~pzoP330vcWk7Y{y` zEcJOAd&fosH7|3hqdV)@m+TwA-#ZSeh5bmptMno?;gsjBeAn==#>x7@r=<>scxwKO zXKHetKcv#X-gxFJB=WH(9R^w#n8VNPg6gBzdx)7GRodKNsw2Jo&2zrAP)!6oT5x>2 zRyJ{W8t%{z*e)HEx3Iv+o35JP;;jhaEBqbvWGb=AxV^Jt03cwgJE*MrfYV@$rW)xw zuXHKy`2{?0R9I|hpyP%>x4?AWGxxf()zbNz>58K*T;lqW9!JqyYGgRsH7^oO_V9v~ zvZ_%-xR1>!6+I7_4$mEm^fJL{v z+A5W0<)IVU>>->z`{CF<=cK-(rP;|oFfbdKWA&r6qn{XqoZgjh;UIqOIQCIXHnHMB zAT~wusnBtULvA_0UovwhG3S2nq7|oTG~`*Y`6MGPBwp;1^jG4E54)6z_20ixZ0FLJ z`fNsf5?%F9YjksbZ>S6&s>Dnr&e%>SO<`>Y(xwbuSJEUM{fZ+(;Fa}p(0+^N)~8uE z9vh~W+$Fcj=BR*caLCZRFgVr*Xzb>kL5uZpEIbSFiI}vg(r@?imzRpMNv>ml>G zX!~{f)*PZ{H!lGL8F;$?Hvi|nK1sdhWw2x}HLutD8$-HJFE*`WCabzuoAKo7mN(f- zV5D}wo9P_k%^7WiE-pK_xAP9kop}HkN&e{nW*8kSHAJ+_3CfqyEPrnP9`(i7OIMhN zr#oX^Y01ueW;|0Yy7IbAO9sKY7x+{2R`4h~rRtyDbO1yFl0dk>cWNe9U0MHkO~j6I zs*eA`O|Q57itoHk_?LeKc4+%h`d|JLQX*JI?=cgm1)W0vP5IK9kTkG179aYnZR*XR zJ}xp<3o(kU`?oY-*e88U^GENisN;{rLblw#S{=Fc^~U|MzqsjD#|f6FCho9Bl&{?z zf2t8<7}yPCoo6dVb*>`FPS3Te4}Jfh=KC^Ow&S5+Z|&cZo&Qgpi1qw@CbXO`r@j}Z z?hk8S(e=DImcE2`IJ7oMbMQ7`2cFj)>r@egt@&`ot&i+!k`^Yl^aVKFl0!SZ-g)TTZ+aY))+na^pP1(R-{fnq5%BV7njmXaIYdD#BxmQUsIcLv*pt#Owf#J9B36i7 z@YX8RGC(QVrtz?QO@}BP36pyD%;WjIHcG(Shdoe?N{~2sOXojc{#k_ z8b2uZXIn+@oOZV%r|o~vkz!CMJI7AiFX4RZ`sEfbrAfT}&G8DFKckU=5h@Q1>~qib zPqA2o{xDL|nf1RwTnkOY63b|9_OLYjb2FN19)OBjM294JQ_jJ895P6}>@l4R)|Ls9 z(-waUe`i%rNGE_C6hReHd`(GhK3v=_G6#E;kgPY0Z{?RDGm;%)8yrW8CxNWA8BK`| zOUV-N_@A0m)s3V^&laeNFPdlJz5E3Hc8s`5>idJ!v=zleGewbX-B%tG3SVx?&u_t< zVfAZ3CQ{|=3O0AEPt&0baC1sYyYYpAgUR$134?dl@ubAFP#;YPe^TPPa>6j3w3IPke=){q!}4H zy~i$QpvnGMF5b9v2KSc2xVbi$y|OjcL@@xiN!bBGmQPSvA4m@;@YL8na^JuDiK`|G{L5HR`=Dd^u6vx1oXH-V5K;IDPdd2`8hgxc{Kz3);gCi~J5 zLyP@cPtuikNKNNE!_wn*V0}3G!BlxKxn0(_SCXOW3fA(Us0BM_>f`5L2%@bB?Tw07 z7naLTPaVos6r1=`$@ebmWP(p92P|W+S4DhfZj@Nf$JfNW?ow?Dz+G>MgTpVZUTtY3 zsf>gN?svd-vtzN6S2caN>~GG}I4a(1!z$~Gz^=4uE^c3Jyl}u`pS|s$;^2GxRWNIC zz2M9(_f@iS1(0X$nqSw|%~hiP^BxEl6{dB)Qa+Eta^f~E{AkDJL)H%dvA86@n6i*v z-6^U0`fy6_FS@Y7kCl_5YW!*LeHCNeHn+FSblM`UJyXebx#Eyz7E5B&pU$HcTlXP< zyjlHP;w#b#iVl|~2t6$;o;&*sOT1oB7*yV1A99@-QB7#YRhAzeQ59P9@x<|bO>euk z0=G*fy$hY33#$Pk&8ROUoj7}vpH`0q_*%)W98>^ggY1<02*-K|8J>3uy`Twm828yr z(a>4uc}wjxPs+x0B&W|1Pqh;AF^dK1!FKkjou!2kXA`9x@TS8B4tG`xylq@j)6GSD zO_C@sFE5)W9uj754)7wfhp^$)|SFxt`x!CB(Iy*zQO3d$O1es=XQ_^30jlQ?P;ZMK0!|M%0^=X%V z8OBgAFt-)AWzHe&iQD8GttZ^tk*_6@nB`43zulGjha2;+myQ24!kFwR$kNPTBg`>% z9oAPUcM9Ja&sRMj&Nm!l#`~;Gs^?7?t8AMve;`vJIj(0S^uxtWkhuss>viP*>P7W? z+Coo3p)nzqtq7~&f9=8@RFX0g&ZN;bTrlcZPiy7X-;}W};L#dGM|iL+@sHN;FSY-A zLo~HmZ(Ea_I%E~6V4>0KGF6-zZ&d&Gy)LVm9h~lwL?dKP<0G*5jQqkC!CajDx8wMyy5($fyCQcdM>`v>A4Vn?6b0+ zOAJ39H5WvXK;FulZZC`#;-PA1#o9y_tOb&@l%0+t<`m2LH-zFubg!e+HrO#TVBEz7Tkh{im}z zL00$6N}VI>&Vc@B7=z?pGouw%z6wFYzvA9WYkT?X&0G-Qzq7*3kNlJF%$v z?MY|fA1&f^Q8Q9n-ptK`FjuAm^Ar{AT3<)pA2Wj<+mUOv^;b$0k(mX( zLlze}Du}Rz(!ocKFEA*X+RybQl@CQkXuD1_t{pjb!X)X|rBmcRx3l4~I|6US{6fl_ zhyU65e;$3|f0JJ8CKR5KCz?5WUL}TZ6Z5uRm1*D6^etqugXqL(wMdGcUfgyWg3b3f zIK}f%a4;PrF_4=(c_~-KIu`pS4k-m~A#3B7k?imaN7x$+k(V9Bm7H+C=u zqgR=CX$eQq@8mmDIs`+PbHJ|+luo+&xri5F!<7RA3g_?i+?kJmY^86G97CV@N2iVvi0P)|h8xk%`Ws_yQ1G)p{y*=W7SWe(Fs zmAu;vHj_n@#H)7m2emU2c`rPC)xNZ`sqT`XK zo_D3_Ft@eOtzvJW9{xWvwk4uRqyeDw4-wW71IB6y&a;>zd<=3^k!5ZGIY!dd+R3fr zuzlQQ>oMhxxh1a2e8Gr%McX7fPs;nkbVqiFgxuyzQZ#b#c2D;DdL+mAFtgyCkdP;c z%2V59Y=mfc%QO7qc2`%p&<#g-XURhj#?)T?sb!zJT8k-{ynLwrsmUYp4`&Z^bHh(f zke$*_6}My7`)ZS{*xAsP&Z? zd`0?CsNU#pwuNYosIXMgtnKYpF|R`Py42|&`_nc>u3E>@6O0mZr>uJ28}!`kzdf}M zhNr8(xq2iOr4)T{o-UN6O8UwP95Xxo1P_f{ZJpv`JrPAy@9Z3|o6i^jw&+a4{F+(b zQ5eO4NpM7E&JFh4vx7VOx0(*&ovfH69awX%dJaS!`F2NfoG^2;MN~`w%>VfJg~V*H z)kyxexR80xslE7YohkdHFA;&+R+xY4lxmPENcLY_&1>Bv^TJ;xo8nrX940v#rL-ZY zfDqV9m)^MVLxH4*YlaLuE|kx}x92KuiwpPNKJ$uUv^gTQ%UG*9L1}MuWg52Fy-;@r zIdji+rao$xa{9W(vuR?4Q&2qvv{}ub30^RcGCu6`qTF{SY{jB9WplZN@TIXjG_08@ z46k-B(EI)&o-g=mC4c{zn@P|q&UK~D>1^dO&W6?(lm2xz)T08^!Oey}k#L_6D65=e z9jQEv_eu9ys-qU?vCVHJ6%8ASTE2DE?o*UG1*4?MZ+ot3*vFN5c5-}5;gxZB!xj_2 zHYYE{chMIe&rhdS#s2(NvbHT-c{z6VaUfb2+wl3r&DkAOvO>Z?hG{iO)HndBiiYXH z-+6EUh8Qtv9otAsNlQFR!QM>1LcKX1tgjK8=A+q~A#HairtGw)TdFoOLMBQoxa*4e zq2}9sliQw|YCH05JNq^J^wRl1^V0^hk0-9d_ngjs5yAL2y^Q>1pcjnmFOt3A;2xKz%IuH|Uq8 zaXGgs+416mqDby!ZZhN_QT@Lh#uUiC|7C}9{D18*KGM1Y>I`1}dE%bK&HszTc%$P1 z;RSGp`0~BMzgyD(9S;3J9maOwUVZ&b2fe!CQT6y>Mmg@Ha)>AQ(ZAr(6FIM-l`EB@F-Iark)yA38zZ}Ni_K)-EkM>1v{>x$fd#@`VS}gr<42s00WJV(C z(BBSY>)Zc-au{Ebw4a!|a>~_4c{vZeClkY-{!rJaR32Md@@8po+d)whQ-OT1ld2IyPAlw&fAaDv1(=rG4QuvXh)F#vX{%6 zj?9AR_+4nrbM@{t*T=HoTvEuZ+0y_SuY8&M%B&;lF$s;^`P4z>)U=q-jd~ z4K%!B|5;J9=k4XA%{{0 zRJoyZE+HM25nEq;H{g6w{VH6;+Q!|RzgFWU=C$La$M?)?Zoc7T#zeJ4AAefh%4=n& z;OOI(PetcNbb4m0k%&%z_2eHBo&G;Sk^hCm7`|D+Yn2n##Qwjg)35S6HVgj&iWK}7 z<(?3=!>-LfxLcAq&1GwU&_S{p(2hK$dyMJCQDQaYqf4lwy{iOxjm1sd}?j8L~nr5hx5<%4rXiBKJWfDQCJM0Doj> zIzs4>Zo1o@4t8J@t9@~Hs!&<5WD{8Z3>Z~efsefHCU%pTLuE~!Fj%-HH(#bDw&;Vm z1)8*k3j~SpJT;|IMBR%8kzG0Huy%S1$!%kX2R*&civ~Tan%8cd7lqX9H@_bMsUSO9Hn}AjfHihU&Ny75ND@-A@cznO1`0Zi z%LDW)(7o48OJuz0z>?ainVZeVTaH-=7BxN<=(?2s5@NCCyc^#Pwrf;@Id+dtm>IQ4 z4TxvFGYNUA63%+1lMv#xuC}6wA1;Mgs?U{$IFo#FCgx`{z!TrM z+D1t&Vp`Mm9ZsE-OK8gv$(0r|jzqX8ba*`IW*QHCcUH7!oEG2nQldtEFJ+_jj;D|Y zh!5Lhht$VJPp;Z^+z+YP{k?OfB{ka4t}SwlPm_3Z&t+)MZC(~_UU)x&v^d% zOum-V?#{*_jX-)N55L)b9`vzj#uQ9(LIdwv ztzu()G?mQQ{mb35rQ2FU$}>;qc|Uz_ES=N^zXo#+Ud{eyG%bXNm?O_kY)_-7;j=Ys zhBKvK9~n1N@53D9x!o$5wmLuE5>5w^*AL*FiX&t$ELV_@3vv#6wPK{l(xRdfQpht^ zEnAFD-Q7HKTcw?bSZ%N?yFTXXw>;H8VVhQHGuh_b(falq`c?8^;OrHuLVknubI+lg z4W}uDLAY`7hq-^*0R^QQg@hyL03ruS{E+`*Tt zY72h`HnvNyYF|y>cKh30tczl(~oN| zC9BO<*I@%6|8BHdUtq|QxfPV0W+i(A0p9VQg4#-7wBNjACtd#6!1wO+$!ht-^*zM? zKW}sFmUCJ4U#b!!HS}VCKJJhG7EOgVcgOy68`!0>Q*A|6o;bgytZ6Em`$w^L{WqKV z%2sAPvg0=%9*VVF%IS|`{r1O3cRC1vui8UdpGhk>et=}ZI{cZnao*Fx1P!W>C#rL9 zsq*i&Norvu)t5;sGnDulcqF7c;*$k@l?B^E6N{$Mf-T~XzJqXh*i~-aHad2lf!Iul z&$$YD#lq6KFd7Y8%7oa_;>c9Uh`_L?A%-KMKP7gD}1kRy3MRWRg1_>w||>AB~-% zVE5r6rYwlNkPE)S!tS+%uHukJTy(S(M##aqG9h1h*e}fVc@E|a2O|Iw+AO#L4?9J| zta2sxQsT!fpn3%GPaa5<6@T<9BwsEqK`+)5ceX86?5-Oo&=O*bL;k^?b!1|%6kuja zP<=d1;14%qV7^kJMgT&A54Pc8rU?)`J(wCH!ITmH#saENfvl6@I0n>}6B}6;HwTYx zw3Hq^pQaKCJw+q0a$!4su|52x%^b`KEqk7mFpo!N&ccLTm>nEFi_aD^#CB9;-PutA za*z!?>>vx!A%IJClLB3H8X|K@(qh(})Yv`oH_|fQNtoY^%%4;dPZ0W3h(n59L));! zzfc*l8i*+Yj#bE=C*|(-2j{Y|=6hp*UWFiW7y%7X<$_VnR82}OnVs=7Eze{LmW7D2 zV!?#7U>h2k#>U#NC&=JfS4{WywTgr|6+mB6vAr#j~)0_rca1;B( zEkaq#)Jh@w3Q?gJX9*pVCUR#9=Wkn6Zo7;|R{y^3^(xAaEy@dE*SQ$rBJ>jt`kiUG zP6ew{p{AUo+rNwKYNZ4;j63sk&btURc!c-o$gn+ma0V1~982)WM2SOuzXG~kh${nQ zd*LowJ;vcdblULom9+4boMOopUX7sSr!OUUpM4LZr6DNe;jK zA^XK$L>ON@9#Z-aEAimIY3|*_oq5F%qV0G929McO`*0nANZ=t(98BM_yQ~LNwPcJt zDew8$`=$&)6~{CO!0RlKs1Wj#Rax7wf$Y`1vk6)s-|EW&&Q@z5co9do!EQI>*`C-c+gTj7?SE(u+$D zRgovEo*O>{J5*gbp%l5lD!yAG&Ze5CRFzU#eX*`O^L=&pYBgiEnr~i{zrW^6bWK5F z&GovPqW3j7S8JF`wYO|)Z||?Y8(mvgSo@%^w)}nVIxw+Q3t4T(oIiuB+o_=vNdr{;6veyl-4z zZ3L9rU|Tly02>y=hA8XP>)5CdY>a@7Q*M&7ZIU_Av>~QRzNksDzDfB*ld7NzuiUI* z+pKkU5yh zC8pJ_sC8?7t0$_-=tC<-xox{`+s*@RyJOmXi`t@;Y7Ts8^AoiBE4K&Qwg(+(KN8a( zQq+FDzCH9q`$<7NRk`DgZAW;K!BUFJx%v$}R>uN>T_9iuTrBx&M=(87h6RW-0ho{p z$m1c$@R&r;PLa40rvOS+um&D1PXGWCr~=oOCy-5h2f&yTkmJ~2Otd)w$WzWhn>M{T zbFMDIsN#eEDy+NWfO$<(Hw@JM^!KYErS`Nc;EqA(k5AY+8VJlntn>711C1IrI$tPv zwt_p8o>|P{usZj8-+AH^VhrfEolA_~31zIj-ew&O?D__S{!Huu>eVsZp0-SbB`($j z0Q@*T=Z=agazr+hYz zR&W47v=hk%%FEFLpmUrF0=R$)00t9)V~nA701i;W7=J*Q1OfnHH_Jf45(DE!NA?h4 zfwu4M&j&^db976zFkkrCV_>UvlW1J!F4$+gZQ@j`Z-G9WJ)4R}vX*EE#XJQ8d2tWcak(hqm zLHD(jcmmMP>6v+e?iC*BKgIIOwj#pP|VABKYK9JEP&C|UK$GESzuij7*7L&*3bff>=G9w zMnJbKckTv&SbV1$XPj=(J9w}^f;!UcX}}HxF#Z@-J3xc>N@5?HU@%t;V4HpDY* zT7Z*qnFsk%k+aOnbe}YnxBlm z`6=%Ew*=L>mcIE3<c>!n84c{xI^%%`#ODKR*|MYz12>rt(k@VRjek>*Q zee}WbrGw+^xLLN>Y<1jHwO{YL|H%73tnT-5^k?ii4c#kKBYEHo! zC>0Fjew_a}p0pVZ5XN@d0b&f;wU-ES0&pRA^rqk>k24tWwY2*9SHg$SqEc&~%X@j@ z$r50N$`}!qsbUDjT?f0|h(?_GaauXjWgajifs5wEz7~CQu>+Y?!D9HOj_p5`Sbzix zGIDSlVr}F{c&mYj(6T^d=vjaS5$^7<60Q$Mu)CCSkBaBVW7gm>=G-I^pfY|lPIOAJ zK6WP1OIUzB(Dkz%y&C|-*C0jy!}18+_)}U4!2xzNO7Lmr83fo`ttPL-7MT-^U&ad! zj$fRmIVnc0&lye!ap8@b5`zG5^3)hHzJd|zOPfg5$(0WV1!L%v_B|W}Fvp%=o zFB=7q4t?2C=5xpE&OT9UsC@79lQW}lzU^TJv}J4We>=Cg(q*E;BXXzu`+R|5t?(+< zs6kYO_i85fR6vXN`hn5sk>RsrEgQxg%&c<^ePR;UMxXDo(zLG$*X-Z+eq`w=r^Go& z=iun_v7!6Bp7fPj-v8DeDKTXy+@z#E-w52SF1j&felW`^QPc2%%=oF8%g2N^kNK7- zW(V$g-}C$U=iJ=LbC3R)g$oh9iMAZwLt}!d|!cX>idN+9V$5qPdleDM^17938Sa&%E;Z-2XLJ{UDf&x8k10< zF&Z;5DxlL-GnC9~*{(Am{RGrqPR( zZmfN8;L~Qb%%c%+VzwI5j zZ{{;$|6LkY=IPnVq^!fQgKPKM+stQHw(t2dWAZD@U!2xnZJxH|vOlc((A#S-5|j>E zz37aEDL%QeW2>c`o`Q5yO{>S-t3Oh+rjBO73OH95W{$TSPJGHPDO8ujf0LZMxOx41 zVQ}VjD#9dYSJlR|&ja8&$V$n3-zfy&&TTd9aD7hlEhfFHrcr;Fs~e3+=1Lt8dRY{zg*bQ!3I5Uimw@Hv z`gU=IGx)1_I{C{4Hu0{b>Zkh|__BRKlbh0}KM;s#%no2%q&@V7ON=~p$C0rz9OeQi z8x56~ulwv+5X#DW`%Oc@xk|)RG?IR3<_AR<6a*g_P#eXUfJg30%CiIF*4YqLkBq|B z>680+9p`Ej{IS1IlOdCAN5#t1jg-g9-VP{kxQN2uWcNCSOczcl>z;jetXp}}QEI=u6PYiWLB!p` z&dy!2O4eZ0FlIN?5y^yliew~rqUg5`Oo7Yx?kOkbBfDmd&RXVBw+#76l1HM^g>xQZA6QjF0I9+;Z=@BOpDq#R?7x*Q+eEliL2{u7a3(zqBB2HD!%}x zfLqKF5+l{GDK@n$jZ(YcR_5Ou2Hq+~9(N8y1>Sko73^$y2D!C00cDx#8kB)(eCsk; zcI3G>&A?=U*g93_6GB6oP3*2#ZPA!xwQQ|(hKJQVRL^KUmRlF^^vEPXz(1ceG&V&9 z7zE-*xrS&hANiEKXK$2FzB%jTvokIx5mS4k1ONCME|dcbEi1cRb1~y>6mZVSukEfg z%ou$la0hJ^pQzFbf=48Q;Z)~)@LM(qv+4&U71KNLw~s$Iv}D7mTUm`(@&>7kVjLkC zsZS+++mXhnt(g|M%I^-0axG$ug5dJ*jD+YD{!&LL8`NMZ<}dpr!#F7^ix zTK)q2^OC9_A3yi$iV3TfK$9M3`rsK3MY3axkwyMuK5PI(IDA3}9{^TKfmVOL?;1r8+l@{ zaPRcR6vtbFilq4!&EpbLiLkMG`nmT(gW^1)8R|n;WLv6jV_tn^gE#n(IId*IkA>Kz z$oL5gX_?k@)kP4VtZcHe?rxZbTa$o9gHQmu^JRqEXu>rLUx^FOaR%`>uFm&n!93Hh zQ}&g=8{MxZDb8<_a`Fel8Fp9p)2{d?Ni%6?v&LDzaJT(;oYPSnrY7+Su`%gPboI66 z=n`pyc{@85=~nK&`t-hZDvS?lN)wZtQahia5bu^$_@x%m0vk)Gip?@(E@hmj0^*Bc z<3hOG65NEL>Zy!Gl-Vq2F$7||GHix{I zZz80MF|<@=GL@a~1xll=3#x?2{1d*OreSU+%HqYPaF$x!f_+NFhyuW9nVg#E&?GMg z!dVic$)Uc0$=1YxDR}L5wm63*{xpFWRfWXCB3NXD+f}WKNm0+(m;{gp4rS~AEFXWz z%Ai>@sFGr%rWAsVPphU<(-}%oEJ6*%M!N3@Vbc<{@n9QP8eS&T3`egakbe!p;Iuoe zCQy?v6ds9;XF^aoQ~)3J@i_Xle?lYRQ&WNlx*s1aaU+{OPJ~*MV@n?p6xWgo7*LQsdoPLV_GEWKeK+ z`Y9QdFleWXLZrhsHsJ0ADsYJ}Yfqw?1fZUW%VeHIA$672tTZ%DaL-Jq;fIq@ z@gy-c6(z-?RaAg7kYcin@CXuMxJuWQfs1oYpq#x{W1!93-F@{5K_Zg z$OFLRI=8DFnx3V5F{nu!b-j}WHm-9bLqVN&s9dD;cI#%(3q%b>+c63)CcNr!2=KBQ zQo7)<64fT=?|!866*3aO-9)>yx(rEphEEhn7?>pU;9ik;IvI3U4;ao%(B*;Qk>qx6 zs@^bQ%Q2~8fw2WN%}@z+8A6=xT%~1X5}5!?b51;ijAzg_Z#O5CzCE_jv@?z z#c+{S!24MutzyfEG8*Q=19O~sO_^sS>s3G@Ozx7z_%dABAI>>($qQ&oMbqd?c6Bf2 zuAsSeXHf^mu*c6JZ|WKEd(yA;R4)1=wnu~jrIlbUdEhiB#q^-3=F23lHaPr5d(`0T zd?pQZyfuji-=-qYmGPQfPDRf$e{kWEWk6VUk}f_`v#4~Bhp|4%$t2b}(00h_;L!fp zwAPCQeLHDaPqbzn2V9Q<1X(zoTT4#9ztwJFMr+%kft=G+U3@^hV5`Bag;Zhzuo+)c z5@KA7a;7GyYezP(Dh)iI(v1_{gUhL>A+JTXYNf`Qwow`;y60}1CvvXpJR{>=wB4H+ zF#WO)Z*Nj`Dac?ETnPaM?M*Yv6#KH*vnUfRwDngpn54@>pRpss3hZs0Z0XYM0bN$Y zVIJI)O}jKnkNQb-yd;r$*-8w0$DErb%TU`?PWe{36^`?zwKL>|} zRHV#E{AdB$FcV2x{qLFH8JS>t`JG3nM|636tETB}hD=mX<)8E}EizpHhQ3c=0!_s? zuFUc3FiJ}1aa`yCx=c}tW}<@|u1HVUhS1+l<}?QFZE}Ok94x-J-)Gk!`29!wb7K=4 z^USg(ijx`b`Io)f+YmB4-`&yL8F<6<^l-xEZoSepxFSN^whZt|NOeHpoj1NJ#(x-s zDsTosQve7z4b{Tar6~{Mlq&X}P!zhNub^Vdl?OjOki1)xmTES&SM`~DBp#x~rWI}+ zx~(D}jsx92H~Jtw)j@2#!Rr(;KIo=4B!d5OOL}Hs>{#Y?I6JIqU*^Fn;DdC*ND^r* z2f8=+IXL{nE9S^PTOQc904AR%hNEp>c&NQEd3zWMoU!1?ll5JgfuouG>)PJeTptOD zO%=Z^^Efjde`hC7<^$})-lUhskxW{}>EZ0&ClBb@m)hPX!`)On^M-s04Z@xVr#%6Y zJ3mXL%d)s+G~2vjPt|Q`^f~sXfp=Zyo^|{^n+saOld0!$&Ck&&eXhKGS#c6B1&!-p z%gTJ>(W$fA|B;b_W{vz=cqjbG7|dR}R=+JN>dwcKv`JFe)}n#5^J}SE^&?`Or~1?K zG9PY{C)I?x61alBTfSGH@})(6(5FWydM(4*ZLfK`qaJLH_*n>CemCjv9##1u*O%HB zD$ikLhqHwJTh0akDxqIJyZ5wzs@;Z6L&kf**yij)_|}4(hQ7}63m(Zln)$_#61gvT z3Lq=fa7o(0gB}e@oOUz?r51X4*JAJ(etQNDw4$&(LqhD%El=odMW_689#T7=^s)ZN z_Or);cXpgy<({OSK=`IjO^9d6X}21;#Hc!=SuELv-9kd%{WE~T;#;A$WqEey5MNjX zh34#+>Jk@pOy_7XI$h|YRmuas+j9C|m=Q8A@00X=EhEz6e8jtJlkn-@_l|GX%tyx$ zrhw$DwUsEQO{9cB3gY^`m08z1(*34RpvSTyZ`8Hl#`A~HM+a-1OV79WIeV=1`Oj>5 zMg3f)$IpWSQ_Q5W`sU~(pHZ^!6n>@Nx-`)+EiZP*B_eK*Oy-_8=w;s!PtBXTr|!~X z&z*?3+xkj6HJD4c{@9{Nf`@TvS0WP(!hx0GRuJxDXfPNB5R}sFAK#?;_`^-y95(Yh zXN-ZGS}0T=8d05;eA`_Em+DafU`fceEpn#@PoP-^oR~@qA=Od>xlNy}<;FrX>BsFs zpQFGl=`VWfpu+ITIp50Ao(5>r%vV_){IvYF4LboZHcVfh)=CzOS>Et@{c6jR)EEzh zJS*%pJM0~E;8Du3%=F9sPzb;dHBOE5!)9o&Cg$h`_0#y zky3qGAj3;^X#_liPY>ASY_N78iwP?ffaDQKTgsj5#!bN#+S>P1l^074BU2Usz=B3- zee0nzZw~QIXdWEH0nMSN@}q|@pL)Ilzf*MSI4R};t+447gKyRr9{esDvP< z?$ma8L?YM1ngZ%t>&H$EMU**&99)T@Ma_j@R=owN@fv@DBP`v7C1(|N6s$W`YWN@= z2Nuern-o6u$w?MNu*;TqWRX%~t$WOU46r<%^M4|0LfTGUrlS#kablN)nRN6p&A>O! zfchL6O1cz#C&Ld;E!9_IJXEs;*&gm5T&BfHFKq9sk)BOF;+G=LN;l`zkuM&Ni#RKijwy@MI@vo&N2g>W~OR8jA%Qhn(ndA zPu^EShZaQ%LfCudGr)C;6ulzWq`zWg{BA|JMcsrvVtj^I&r(%pdkT85R*K+v&LK6N zYEbLqZ>$b^=_w!A6Kx9Y{$47?a|qj>pPmWR}Vi+s$Mafx^to4Bsx3Dydhd&&aXUc+7KmO zxb!OWix^5->dVN-q)p=Y63*PZ?ssueyyvvXDZ}ICCx@=qAKJR|^)aV~lQ__HysPGm z(wiUfN8uAm!<=UiA8%E}e@U#DHoci4=5>ITb7=4c$5PKD;naa}V&eD5ZnP*C{nlvm zJ09zO$LgJ6^3nLZ^Met4UY!~^^|oEyGa^@en`6S`y(;gc`Bqwyu>9GS-cMAaoyqfJ)h*giWetYhX?4rW1HC;Eac=|J8KhMn{IN}$m+w3HLeCC|s7mtR|PHr+? zHdre2E4bDv;&k%v`4{BO$9V@*39t9k4EWr0hm4nuZ8jZjSm`J`d{{`$JKet7>V3}s z-LUag%He?*YQi=z4^gSN1D)e?sDwk_Rn_;@Yp&g^&W{Q$Hp&T!d>%zCNnN*o4DF^A z44mzsr-@c=*#Sxjd$vAtrLexocId#J>dl+#pFN8bt}lDdyYIH;yXxR0%-8#z8*kYe z9cg4dsjf*>=$O71JC^+N?X|(-`*UaC+f`qFaDM)yJY#6=N6`HrJ>8m@)>Q1TJzN>f zyp0d4%ugUHHiRAGpWhk&D;6@1lZ_kj}rd z685^iu0C)_XT$|7V?$BwLF!qqNwk{084i6auuw#An=A2PiaF6s1z?w7S!oXlv8l;; zu}4OFLkBOO(W}`q^qhh9=D1=7E31ZHh#5C#=)I5p_~29x=j*yY^Vn|_aYb>`F$QC2 zVi%wYDQvb=y^pG=1ipU_Xo0zjXbr~V+qN0b6sLb4$d1chelt~C^U&zywfm-n-Z#2U zf7I-07w$b*f8w)aObqAbh0o1=j&~={#6A7YdaUutcp-Y_z42motIYZfXhoFqx1niK z%wL-$uK^6;1}^`n$hT1a)jza@GPXDWCLR_Vy^$cZ|5w#TKeIdEB!O4fZo{Exb6%=w z0iFM%g2;8~+1);DcNn}^(8Ko+t{dAIE(sknizj%Fsb)zFiTIiO=!C446Sy%lmKRT>mKg*|k$UC)(?_sG5jly8 zsrVn7uPe@}V(O<$e`KHUD+tHtsa&yH(#FVvM;Qed=w_nGe~tQZPi8 zQ;HqeZ-u<|e`lP_H$ME8kAP(BB-oOKA!sg{Ye5Ka8WwM7+cz*5P98O}z(pw-p^6 zyCPk<(o5m$h!M}@2v5n5@sty2XtTN9eml%%F}rfa=P zAcWacYoy9*%uJ?yzT~5IqQ5ENTr13kw5fO4zV6`X!CL6hSC2TnN@d9PNvYMsN71Zt zG>4#JnHXY!Vw0b}K=!BFvBoU@g5AX&L^EEKg?W2!NxqbzxkE;sO3plYpXYIhS)%rv zM8D=MuaiFFiKY0Mw&$05hT@K-Ur6++Zm8uHd?1|b>sh^duE5A3KF0gP`$Q!(xNToG z`)f9Odr^bJ^_OP)^QSQtN6*uYfDahWHFTc}6zcpAa_oD47AfwQLY+)N+Mc4Zw~ng49gfBTQ2 zzW)mHJ@D*v`28QhBHznBcm7gc{DOQpwutwz{tT5qdXy~CHl}*P`lf6=`mCZ-!9qG5 z-xWX&yVf=#&XK%p{z4ZWb)@Oc2nH?-bf6=e%w(`=Bp%j`jA=67v_@%{5ADe+X-$9+ zv+k}pG0}Ev*h_E6QVe)8d@=RW*pD9~%#BdEBeM6gTc+WN0%q1Rkft2l<`sV5(W{Zm z;{sBY^Br4b&K~;Il>BW0uBuK($Chb$j0G@s(j30V(%!OStSKW-reCw=<>uMsX0azI z)sBjt(xt@w@hcPI#w$bXiahCNN4miKBrWSr7yxY;>rfsh!YfU+ltbLr8DkVIbM=>r8YE`lb^3|}IHlp4d?R34%MS}Ft?_&stB zfcQfiIZytq#pq$OpVGXgV3UpPP9b7>2Mw58PCoQW_JO@C&54kT7%gnRQ#c+ zm-*>zvH&jw>PF%*CIOwQU@!N04Hhg_4WHh4YHG%H(#n!JqeN3Oo(pp8s*>;F`|eBQG1wkJBz~;VcWhBw3S65(aNpALwq_NJkrga!>*VvCu)=K7H4H zXCoaA!WT~2b%P)UcI1!lXU*M-T|o#*+TXqTdpg>3!D!vHe@I6=xwG}UZ}-NP<0B~i z$jA2^SB^#zOw01FpI44OR;C79FT2Nei$C1y@Y7>csu7*};6B%MzCVbA=9# zH#|0{yWfudX*&Nn();KUhXcUWm}IC}05cAuQpt?x*PCG`V64EmQb*PP>)qNc>o?$RmIi6{}G&7z>fNh?jT8afuWZS4z zO`NmWo1HjMwAwtG^K42Tj~$XeC4%=2Ne?BgIQ_Csn%-@^IXZNO!>VAd=ieg&|g3$>dtFpQnvjav1N+Vj*>g{ z9y*~7d$Kb)X;m^$QrR@AGM6hlE?DQ5?nq! zFIi5IUt?K2_^iH&VE>>)9kfGqGj)WVJ-g9Im9Ey8$u1_bfQGoDCIG8rT%XleY7XWNu9tGJp z=|0e%+(5VzXwv=Q@BrV!klD@+lM4TT^EX?6x?QHEXc;ECjmdKO;7(mAPNy%RbH!L@}w6GIq2IdWp zlbwsuQf^{Q(OC(?p7hqJmADME+E!E`P)pl?lOpVb6`&ktAoExdsJ*y6zK<;GKnI;z z9v6bqnuXnPamT2U{n)Bo-s{aMo<6g3OKKdxtvNBGD-06BvR1>Mp400S9Z(xOF6999 zTmJS+-p{=`a1)4kzf9@5jdBiN)j{0i^50&Mo7vP=`s18f%^P73@XB&AfderB0#c(^9M8dc48vcj2R))5u>AO@<{uUO`741 zrP~v0@B^pT_MLtTiPm{|vTxgqm(_d|hCShjrxL{=Jn*y6bro%c{c<4t0z~?v3El) z*{%{ZQ%U!3bJJHRBk$0<9&qc6qqawXxA%HJ9rVBZC_{1Q(KUC{y~jV(SMT1M?)``Q z;@=>B_5YRnqAc(5hpNi*>JQbm4Ie(#&^tKlA8YGhAO3i)@eh0N#~a)U?i{^MENHH- zOQmM+=2N>A23}QHPi{YWNg5-5Z)x%_K(sosUawGX7W|;Y@?M_=zdiwdZ2fw5K5X)9#d4TyR%M^P z#|bxmYh%0Kt+A_V5_Y$Ff}ZYL`HJqVRoI2nwG$R~==+D3=KqEI;<`TGW}SA_LqNN@ z4+eeayQ8{H+OOAasp7ouvcIN%9Y?#cp^ndc zIF=*Pnz1%nB65QPXhHnF6JOS^EG#c1!|utC+#@Uyw^CO>*WiiIKHcCvT<6F*nk+2O zdExO31q6jPr{7lUZp1E-Oq5uJ%h?c&Vki()pt}`nC7VJdwelrUI>p-Oll3XSm|?)nn9WRb4`HDP zmn{^+22ujaSwi{1hz=Xq8p-d&mk!A3&5(^_Xz^KVq!igM+77;>WTD>iPUR*#MGEC3 zSqDt~SS}XO85~WULnH}^(V<(Sm_ns2bGaZ1-mQaivW64Ru&NofiwhaySOCZtbtb^D zoAU9%6+IOoLA8qkm{`IQX|rVgWekK%cFBZN(6J)FMM<;B3k3iqmSO?H(ZR47ZhJ+I zk@4Cx9#1a@$!Io%(pjU5r72L1r8lH`qLcUNV@@JZ1Sp#OeH6GH5)TSc6vMl76YddU zG!}$JkS!L6iE(OOB=Ejk+9jO`Fe?x%pvs>Ft{+7!^y!EpmXqyyltW-ujB`j-Gp~~b!Ct@5VrqU|TDSl%(}dwMfcD)mU4uy0(#h`2zJk)M zPOK#p9j;5RawEi>tz-|Ln;d!2!IYp~K|A2IZDJ^vhN%P64Fi0+ z96|iBJXJryWQ)R%uu`^{R%En(g+NT~%OK{>Xx>34a^_pSmkGx4`}{aGpJ1-07MHk_ z0YuL!f#qHhhhO^;q;0Sk`@~t0MbkKb`b&cF5gJGX2YR4wViq3vhJ1=r3fea@v3EVr zWAf(r#8hg{f|!$|)Us>d?zt)hG3Py3499u5sk4~j2!>g2}|Z}lZmIL{#Zy6 zCjpSr7BX!2j(CBP86eCw4xx(OyuFaWZRXi{wX*V|tQAngh(f+Bhekj@cd;p>9OUw6Sr#|1T;j^IMGfqUW~J`8dGRatqo00Ir!lu zO9F6jK_0Bx5nERLxSP7g$X6SX2Krv0BzTNi3@Gq=>1rUsS~HmTQkX2#$VXTZ!%3l3 zxPS+{0Hz~(V8Ak%(Bwo&(bL6}c-~h<$5KJdQ*~Ux3qaC#wCrCIMDbE{kA9;wXe^+79zO@`FB9|t@3 zH>Ei?joeht?b2KmfsZuabJ7YrB!C6wReUfoCKiZHw=2q>)fXMS1YT;=QU4_q?KUU~ z1m4&OM8LRdVQb>Q%x3?SKUo53;0Ngbf2GF3i2r>L1-eq>UFQSwZV>4@OuH7p4G9)r z4)Qmdno9qbj;^wiNcM<(p5i1 z;w8Q7%9Zw?@Vb2MrHS>qJ$MOjYh zb_W=~wEJJ`p@bdxAxf(WuROyS)^Ch$`&AsCS-jW87#aMKK$}gSeX-Ct|CJDy@g>oh z+GE`-@dR(Le`Xv>y!abt`|d;6uD#lK>eIJ@e_f3e+1eR=bg${%T`Ounoy1=%(P`76Q#gM%z`j&!psPdgp0??7 z!If8Yf}Ox?ZH12!zJu>KF}$1-`4P=hYd1-cOfh|O(t?Gnek`=5sU;y}Ed~Fy6P@sO zUY^?IXQp0d+J~GtBmg=fyUq4tbcBvV(aTcxJ&&Kb-TRtLKYLa4kZt$A;jej((YepX z`hnCwriSOT6imnmQp(8Gt`g}2C)-lp`KP4sN1KP}5zGB}+pYB1%V{T zi)~p~)yt{{M!E7|x@}lfoUEHD0^c_ODt`4WOLm9ukOs+JWK5iS`*2;~( z1L3FRdK;$}XkqKtJS=9`!_W0M;$(wB#L+DM0BkPi$g~5zr6-b#6*P23cp+OH5?+71 z9{H}u6!HU2am&lOyiL{A_M+piqhgy*Oa=#{)2Tfim|g?acFVy-HTz<3He4z_a_E5m zc697u95i>WLr481$aYz=lwwk$N6&sUu={mm*1KApEGP0Y#>qQ5CNtrD7*uR<9K1;P zNZ@AjP{dn7>6f3-csKIU*wL_a3g0^I;U!)RE#%U*S6>Jo)gvzN^W~K!EWO*GIDw{5 zmHQqcXa?Vlw=*p`ubr!;YVrZ?5V`#ad}nW~v5HJJ&}y;z>9FK0l(zS+sXBJJ(1u_t8G?o>xSvew$- z$$fMbyuPDV>DrY1Q9xs4utiBv6`%KCQlt=G~Kua^ z?sFAQNz6DMZEb3OKfO2gCA#Z66l|yi^D)Z*=)$jdoBG3kIAn$W|&1b8m2;ZNtP6sviao% zaiTX@f14n@zY&@&yk)jp%U5uXBLPLKGn4M9j7cx3ExC^nUmQ8L|Jq7@0t`n;4mfd< zS#?&2za<-HWO?zK^?}5$UF)wRF0b|-wz`KmH~Sn>Qu1b)`2kM|Np*@an^@>G>}^)R zGPZ9Np7afl<`3bn&sa!HF`I>C=!h);W{^F1k1v?Sj9`$VZa_2ga~d4NWFoN)z@DGd0zRSLn0Cvt; zg?K8~y%~1lou7yriARc?Gs0$S2^0_#co-MtPxao1gwQ@|x-gd{0Jgs7qvxs-%x^rH zXtmH@v=W)YD^1wWQRQX5j?g93(<>Q5!Gt(+Is~dqX@QoIp2_>Mq0+b%ysi@<#2{4a zgn-=`r)RIaLI8*(01;+IeC>?>t^`q{@+goX?d`E_I9Zi`+Ksf!l63~lT}Xg~5i`Is zqPqkWWJ87`Xi;kzp6@9j>q+;RaIh#9X1ow7L4~d6!m?hS84-^$8-Qc!pb)^#k_mDH zAXq~9$^<%r#tp83ssahk1O99}*rNkINCZIeQ^w55lj7D2$|o%e;8h+tFEw6*20D=( zua1DPk>PTLM0*K&HWp&w>T%HB^;BiTj}%alKhQ$+!UGVPp1{!xFH{K9VGx+)Y(rlW z=NXtr&k;^DrqDPH4;v?tNlA^7;3ZQ~HOcU6$&ptofng?Ew-1)Fjr%`wkpQ+kAi(qW zw%odYgywZ&If|EnT)hpx(a0ZfL*X2Ba2^2e&Y*2rUSmwaeA4S;1#q^&)4?E>O#>B$ zWK?$|g>h&W4kYY<+JK1;A%j+x`SE~{k21FohZu9H=(epFpjIcFmQ5TB`}JB zke8zn)HF{zNb5GpKsjsF2KL?Hbc&wm9MN5m8MTv@#yt|kfSWWe@*4t7I|OtyoopDC zaO`%yZ3h5#{RPiSgfZHOP<2lShGPzEWhs)HZBb0FH5L{+^J5rREX)MO;M}!o z)bJK2Fh|Ji4dC(a@Sg(qvI$Vl?@&(~P?*o-y#zI8qKn9Y6Nbl=4kkD9OE920A?H6? zZA{^-rqSFf{1OShMoZin&~r#Y%w7K@4NjX4Y@aP>E779RD=_~qMS*D(45%UgR$Qx$lxGh8LB9ZUhozIbJ zazq-Ow`6?TrTclV*9ye&F?y5^;8{@O2c9(mp6u?iw=V$FfWnd?;RN`}RF7OJnAZw? zhEDR~4dy0wtpq@nD+L^QuR8Xf@T=m}JDAK(-68S#;23E~Zu4JTf~2#c)pssjigdh~ z2vuUiVrif|qnGyDDE_-Eo~ARkW@yzeY|8{0^~aI?Z!=l0mN=A}D&WD0dhpQ{^S|gXYR< zuRSG^Cw;Pb81f zyj7GU1j}t-Ud;OKis4icHaXS6^^OluLtbI605eJ#lXor=x_w_|+Wz{K#xra(Dt$SV z_q89Ecp7fx#$$xuR&b7usgm1P%wvQ&C4J9^9KJ*2PW_JvnLHS2HD7_@ZO`)B4KJll zQ%P+_8l-32gf@@hTDfY zin5?)zkzfaw(YqHUgjM)9B}f`tu+9AFbc_wMUDY*9&BVsB@$pkYLE}d(&0OCZXJ6n zUWwO_u^>kRPXqKb#G%7WM34RC(+e#49u2o)8c^^W&cnK$dDqF39L@@PID7x$UL%kv z164=8#G!+17^q^}!yEc7+jm6rk`n4jkX{O$8+J4j2_D9x92seWSnoLiE}rUNiOC)A zg8{UJd;%!78#(b9l~01iZ%^h8sZ}nj<-<{mF_CjPgaD3rII#U(VtWFuUAM2jr5rdW z!;7MyiNI#eQ4zBet>bpoQD*a82){6~JdEtYWj{nXG=Z?Ipq_8&+`+b>2Ecf(buQ?% zG|O$r-ozqIq|t05gyfQg$<;4P4`iMlCPQJ=H0W7WIPu}(aQ@6$v}ii%cr-tjkh?|) zjeh|7=Rb~q1%IC;w{07*G{fOZW$Kwc*xg`ADbckbmt;eST9S78g~SJvp}b4n^X!Tv zBIJ_bT-?MHfOvQ96=nsxwm+34L4-U&34`!=#_0V3H}?se71{XQ3KFTd8%J98 zd%|;@kkU2AQ|z^lEj!>6B=3{7jk;#{^v#3v;l6_%T5eKuS=@a~2sYiX}^glyVq7J;9`IYF`7KKE^zq z0xwr$ro*DZ?V>0pCg}EF5H7H+<$12R1p4x3+xLx5VJ@!QzN6k-4lf-HdaE>?>*D@} z=w7+VG&XjQEyDS(DKzmvn=p?+o42 z4d8oSmERTd?D7b=CR}#7J~h7m{ceOAgtr4-NPtC(SxZcfVPnRfJu$m4Pu$uzX^=8G z?^n6d7wC}Nnq2MgnoE4KWzy^Ney__!m_O}Y1AS7mTI_RJ?@$-<9_hAM1lub^~XUNX3 zp5pe^*Lm_~Vz$X})4+6bDS^c4t5;`d;+#CS1GTjTFSErPVpKIAj&2G4s0f-9+B9cx zHs^D0@}?=~S9qxe5Cmy~u5;UUoZrL!|6c7VbyghxPCs=F+HtP8ug%?;sCK2GU>`)><3=JXlt!+mSmgv#&8jp5-+!_aIXEu|BBjCOvRG?M!@VRyhJ-N!T+)~e&7?NILvp~ zPk2d9P!~Q~IvV;AQ4cqp6>(`zY5BypdL!y6C7}>8KW6&B6TI|iTR_4!hnDEy$IuXEtQh^(P_rSoPi>@Vac)Kx*D8J=nAoDaq zZM(MyM463FE$^CG7RfeCZbvnjcAKsFcWGaa?5iT18+smY#A3I?(T8cWKb(CnZV5lGK5*X!^pke`y9Jw{om#oS z7Hl>;wHpgIf6{K;1)G2AG}=o3C+#-VU$a5G{rJw}c4{3T{d6k)J?+*wf`;*SIsQ(& z?Hq-8d+@P;+jZ1UA`yD@*R%Ma^vD1IpZ<%3gnyGCYU7lUK>u(`hD1eD{yZg)f1HwE zUaNkVVJglsBKld_rq~fu^^ySAn7grt7O*P={7mxim<`v|5O(CXw^qrX? zh|(WT6os8CjyEA@7bi06@Y<0ZzTEph_Jk`zA|D}-#yr0g)<%5E6Nn8*~{XK1YI zj|tY#IHR-l)C(Y@PY`!|@ynRU7vl^O+9~>WIzwgW?<1dEpbaN*ZBq4>&b_8PJyR!- zZr)wjA$KFEzo+03Y?@DZwkfjroPEb;naffCZ6@@W*Xr-gM)*HxHuOIJH<-|PD@!gD zy4~G$>F4s<(#J24CT~MojBTSodUc@sB1vWoD#65mcwzheW=zOa9J6ep~Xb05Wx zH}}pQHP^p3BG3s0tzwHk>ne5QlXQD-XW&m#m@<~v8?xOnoW zWW%t6R({?OEJssJo)`~u>rCHBG8a!)Aj`fc#)~OuT5;Q#*Ox~Ge#MiKzWf|xci%&E zZ!5<#cl^YYdG}hNv}Nj)4y^@yx149YZs5t_$bgfXLkmXAsr&P5PxL|Z{N?U-%J@w& zeEHLRswPdd74&BCo2N*Dx!Q{yLN|VOrr#E=-}uD0K)YMt@nfu7S)dia%hTeT zqAH`QUU`+pJ^uVR7H?S>a+Y-AsP`b1XHs<{G{fwuNa$lRELgk(#@TIV3>~!@kCOM@5Ua)1PF}=;BHIiN(k^IqB2R zy&AkM`}(&Zy@-jm@z%2(K*zjMCxUfUN%<+p9Vrqt z`Wdf0qY;|3_w~XeX{C*0F?-Y3$fy3#W^SIQHo#3d6X*K6mS`6ZjSL&~(y5)&TDm7U*(+{sb2MUEmA z*Q9=ke2L%M>BTlv{HEmoAba4u@B`197kf&h8{k_EG-Z?}1gHI9r9jz=Li-}xC+&fz z@9P#Zg(n6_m*{AHnc~O=bC4n%&WNS?9Nr}z59K+Rk@t)c{~cokX%%gV9USH1ZmHf<^v@|Nfrn-QsNp3H%7_J`5G~yK|A0=W9p7GhTEkva!Te2?A z26{$EXOo4hI!$zh64!VKunX~9Uw!JeN%xpg-1=Bm$1qJWu4uyJD+sOGNItJcRVx)Llb=B{zO@R1=E4jN$U1t*TJ96SmU9y-Ck3Q(zcX* z^^C`rrtLJ|rbOk`Lc=HFb8*hmW#tB^7q7+UK=QytD*CZ^(E+u^@PgIX2FE0OWxC8H zS4mSw-{U!1(e=W+tugT_FuT5ZsYM!MSEYFpH(XyAEVbe{RlS5HkX~EJPO_}@^tgEP z-y}GzPRDXAWWNoj*dcYC(s*I=%fwkzl3jn{@M5{avnq#EQT@5RmRGa{s);FfFH5Ap zRod*T-ktyCWx3I}Dq=>pONHI5>VF3b&eq|jhQ?=~f?{ZI-mf<3F0Wn-N!j^!OzL}M z*VUk%=eysQK=ltj`$Q4tk58NlRr&?$NCN4CyaA=Zihd?O1}3cTv7szZL@C-8+0>P^ zr$Zx2Z@cCZKa2iaGir~1wwkk5(ednecwxwi$|6baPD<>@fDdIuzNCkj2+k^1ow%KS zr9yQ|Z7KDet@9m9)n684%3pF3RI9VztU-L&ux^s1f;xmP3I8!S>oJ)@NS*?6ViufB;>Rg$Zg1j178j*ken-dOfA95)xR!2K! z&gnX~-$pSijZG>KqYGia7m%8nInd z@hJ8b#mC-o3iW;4?p>j)jL(J=ul&bl1ief{Pgj;KPoLg4@XZ9e_m4k4%L=I!%Dte+*+MhpzY89?lBMQ!Pok?8buS0!>*-;LDX|pd_;dbNe==;O^cS)3O0pif>8!*K<**CZ{0tMGq zy$;`neRw-$ykmzM&P(JjsgA#`Gg0Mzwae1ddiplj(EY@k53(~V+O8re-m-W4wP?RO z^8+Jy>+Pb|o45soP^&1Vxqz~Zyu-D}jr{pHeQ?na~;-G~T?l%u;*8b(NWmoO0N25E58Eg&Ew zvft->e)q3??m5pNPn~=3`*|s=DoKc2IRc9be*^xvNMJy6YT^eEp#WMy1>JoXdMY+{ zIs`jA11E&&F(CRejW8GcD{gi%KAxutK3!pis0g2$2&0Us$P+PEq?nkY7@@lafJh1; zBPFaLEe)5UdL|>K@El2x1gObA|Df;~sVJ7A0OmkOJ@s$8@x!a!YRS6x9` zgDq0?k*AiNfDV{gmoP_H(m+pLTAxNuUr5gYX=I?!VnXoLl>DWcnwj~Fcyk3SON$#z zBXKK2Wh)jHD}g|(mj>2P-&?cW+L}1oTI1|d@(yGu2TcPCR>jE~YLn_AV|i z-Y!nYZqE|jh}7Jf`#kL3-k`j^Jk`BnGCl-8K5n*el|6lJa(!PLzT@8495pE@qkJud=yva&j_r6N+=Q)AE9I^U_xH=_m>x#T3RC78ceN z<@Xi6D*lx1S0P(kQ4m}i5K-llTBTQ2RaIS8)LdOTUQ^^-#~)p%gQ?@mu76)ppZKxS zy|%HSsj;fANv)}=v8$=XV{?Y!v&Ve@mL50L2wW6VCIm5Qa!vO=sy)`3o>7y^l z#>N`Q(?=)zswRWFCad}<8%w9XYNkWmr?b1KF_Y87v$M0=b1y#4xy{UtT+a>W&KnoZ zTQAPfVHQlA7Gm=jjpvt!7nY{SmRkx|%okV2R#sO2tj?CK*;K5#*RKVwuYbd>ui(Bf z^x!fFasA(LU)FI;>px~TH#dK5ecRkxTG<&~+Zo;8#o>2XzU{Z|?63SdnAkn|F?E=E zc(}24)PHnzv~@hYf4u(lZ2#x4t%Hlji;JU+i;In`{+p|lKflke{+u2Bo&NLpsUgDv$X{Q~7MEPLW!!T64v>A_N`z znA&N@LM`e`43m0G)l#G5Y=!x#AXe-H$I( z6n`H`8sutuC$XvLX|y+Nk7fwFtdF%f?oMLlW0^HOn)YYQ^eZjKJDLv{>TE{y{@t8S zePs-kDjDx={rMdm3T4siYCGE;O6C2@H2GS4mG-SbzSifC%frP+m+upwJAda+aqr;A z#;Uvi{MwtXw4Chz{P*|i=4iflPxrsS*S`+GPxkcu7fKKp4oyVkhzo{r4q4huhJ){E zC5_waSW_E1{s^bFFZ&T;qsgToCN4=kt2yynUY+Cp>e)aHU(4P`EMkCbGfrgQaWh_G zr))Dp`f6`85eeenN>ZS4+Dca8EZ<7e5Zm8M)mlGO=4kTx^+=1Alc>OEa#fI+Tpou8 zlG%QA+R1b*Dc{L*ZQ0++_88#a&GDLd+RgRdDc{WtxZ2;%M}v6wFkv*#dj(O({PQ_tO*VVycEf&OERY#?f6igz5HdZY*(sS0gJ%bAokpqGGiNJzYic{1wwj0y6 zT=;=7@uSl(l*)W({nRgA&%V++RGkelzCAh{gh%q753#4ao)2@CR-KRVwH}?1A_n;c z3CS^-P!1yP19)GOjBpsqycF)VS?HnL#k9)f>Wdi-@#BkG9cBK@xks~ceUy_bCBp9P zO1_aKj1bMsMcZ_@t0l+M>Z@hfRwhwGdW;9Smw2?l`BTd4FJTbtPW6@>@woMrr0zzSmc)@Kl8~6Ghm`Bj{E+#UlX}2)l{bry4OFSX}siS$S zW#SVH5;CeJt#=?647v|Y=k-i*y6teH^QZ9lyuT-(Qxk5^$6m%^fl^a-??7@GH1-<{ zSFI3CYA){&Sl;$|{kXg|p!<5fwD#^sUK=3Kamq)2e3&pRvb168|J-XFfyMmALt`_( z-Tr>QMoei9}Sc zQI41^lAqd@05l?x1hGSaptdPLg(B3h2!e=$6%CR?;lOxR-##$?S(Ju9GwC_y^MUE& zpsUersdab=B#E4u$N)~t6`{y~kPvHpn~zUY8JJS^fI!NS=Tg7XFk7q%L?s7gg(7AY zf&=L(t^1jE2wcvRgUQPPzVggHikn5?y__(saF*+>OR4KFB{h!}d3jmReaIJAlSI(B zx!`6LN|i(klf0M)FA|DIzBc?tAg+MrqVvVe`>W}s-ESgsqoeR&4ZAXbYj`g&{KkEGX&$eMG#*JR6)t;0_e1b;0TRV4O{ zM3dfZoOEwA2ULXw5+Q|4yvEQ_?^6QDKFVCpI&QWX*8q|kbgm}AVTaR0=Wts;`N`%bL7t<&sVBeT5(jjetYE?+s*ZH6aF(+-luRO8P~}E^z~4+k)_ML2g7!g&Jm!v&nFF0 zoG0D5d&pmOv+TrnE*y7ccvE0I{ASIoRQRRvlV`>a+w&Xkzh91d+x%U-_Xf=@uh|QR zd3M8^-jd)m2EZ%Xj^_ibzlcaYWWYJ`p|H!drXxVTXimuaNe(xQ>MI5Ev+oOjZOuQ% zUzh(AUj-_8rt}1&+BvTuuHtb`ov&VEw?vb+@o)b8JX!Q)^10o~6#g;A!*!Z#=>au_ zbWDwA%V$&6hdg810Mj9eFci5>AN8fqin}jzl$myo9_a3|J&#~jy#YSE*e_?cv zwN_|CL5ORvsydl$>*mm_yEOzw`LMTM3rHLByQ^-hlb&^)R4adAO5Jmtqs^<%4T9b3 zId7M!kWRd%>(edxjg%@Pcb|ymj?Ol*YfoAGW@hjxEv@wy7pQ(tktx0Uk>c~d zgZAI$mF@yJjepyMmUZ*9nzQ-8!uKTLEerl; zaq_M_)$-l9U)>*3PyXN)W$;iFY58j^$F`e=$-Apg{J+!0p4(k*{LKpf?s~H4?)N1A z?(cu^09rJm1e(YQ4e~`pGSH-LXxJk9_^lvJf`A1R(1#0Dp7;OpI4BqFzq3G0jsn1G zLs%t3*o{IseM7i1LU`Ij_!dJ1u0s&Cp+XX&B1WNNzM&Esp;B$3(u<+bu0xTuVR8~- z3XqT;p|HtK;z^z0&Ry@Uhxa3122CPC)u?lpgtbJ3tx<%% zZ-iq;gmYVj>tclab%Y0Pq^CrrmrTw9 zi!n9VF?F=D4HB_UMzJlvv27W#9c{5qL(uLaXfv9$0S-;}AeKx5@+HNMw8f2F13ls3 zBmhYPnsn|uwtF#V0ZnR$fyFL{X>CQZ-3y#ihV~#q1!!nP9kkN}dbF5ua-DEWn|Lmf zcwv-y<(qh&k$BUVc)OT*cby2JOCpp^A~H?_y-R{*CXu!$!IqN9|0I!+K1PjU?^NouKdDH%G&#vM1>-cOcWEk_X=?3h8cS(ff6{d5KIln)FfjgL`0j&I<_DAZ z4`xdrEdG2z?SH5}B~6}!L;}FHBq`4A9~m-%9+V`JI?1&jA7&_FZ{VpS!+ukx!Pu8U zlEcsr9axBDMwoF%#Jh~B%#4`!jJTzYgg+TcbeSoVnQ6wEAKztWWM*cyXXY$r=KaaU z&}9`$W)&M}6{^A-5ztZ`i9HEewvG@^ncX0n-Ox@b3jjw>LGo}UrAb+}=*;O;(zlYS zW4{BnA@?_E-6NF~b|4vFJz!IGxigZvbH=&z?{XJ2bC=t5SC?|v|K#H6@;302d0WPL zJMZ%LGV>1F^NyDCPX6Sb(&e8^<{zrUuAK7A-;q`>W!0)?-_d2aL}z!D=a)-De>>%W zSA|U@pkK=U#v_9h_xv`&{y|GQy&kaPqyqYm0>aIeIctVSLz)c~7zDP)_NW`Q_%&$lyt4OM&NZOmV4)9}i%dE`nJs^^xcP*lFR_*?u{9~N_bYK+E&!9l{%{x9kYp>G zWOI0AtEger{V<#d#borKRP0lU*a9E;eKO7}2{S2+@GFbTDvRkTi^DIMB~%nXA1SG5 z&%PxoRqO!hCXu}1!Kh|sJy(NqRFvu*#83PVi0e(?dRS%@Q;@h^QFBvKM_<_>RoP@x zDRNW(N3~quxx6X5e8Ynx>)@kzMRC4bVPO_@R=MI;bNYSy=*ZHFQ0K}Ssp>hC>UqEF z#jNUro1*rStj>e{Za<3OXo@VgQlMIu22bH&Ruu(l(2uB!7dPqC71b9ewO4+%*IBhU z^hIq_;@D25h# z=X`6^7JHXU=E_RIU&1dqiU|Nj>nX+asiy6bavzf_=7YMC12-|=0&|xFOaGRT&N56) z(YQz3IHE14vn_12`p%@B*Mrbao$?}y_&KWXHyo_z-wNw2@LLH8=xh#pT5mDhR-#@O z358)jV3ALYWN)&2bzr4WJDZ>-PSA#Ae?l}4_UROuen_!_pt#1g??^TK9<~dP!i=CD zCp;bR)jH6ooim4@`ln#kQ=~I+vPNu2^h)73D6AT<1M5XWwkn~)`(+iTZA=v%0$JUy zq_BghpU<-kTTRO!Cnb|jH4za&o}($QDVx8Jwm*5=GThmf@vuXO_T_utvf!sZgh%>o#8|;)XXRsj641?*l#jBDPlr&&pG5hn;YD_VdVnn?E{7EM z^@0BoCZN#E6)cJ*?!=O3o{}V>z%{3X+&5omb%0)IXwxdR4NlCA1UFM=SgH@k=>V1!aRe`LBr`fNhByLJ57HXf!tN>nkq2lN-uXmr@C7Bn8cx#pQIbH z${u>vK`03zqY(QN-#D8+7(%^66&Z-fL~b=4(vc!V102z_e3@CIE0gk|Y5O zit~U8

    571NX6s_L)-q_F`?4lxN7v zgSb_d#OoN6hwb2|CmX^7y^^41I#B8TR0B$25*oIfU4X9wNW-CBM+H7PZ5q}6-=Irk zw~NCbeGJZXq!@rAB}o~8#A}N1$r@xq8ai7;~tjLn_K_VJjtkxHfCH}6Y7UT$V8 zR6Jf*(geJmf_AMgSNqRg(XCF`jawtgn%3rJ{J-()5UQd;e7K2g00fPr_(C~MpfUg3 z7<)9*KZuW>Jgu0Q9GxFoo9soyIx)mPND_Y7;2>s@Gv>1cfH<8C3O_D(paiDia_Da+ zi70_+nTfn?m0oNQr`$Fcp|k+?dG}6dMIw06>|M0RB2? z2WpvAXR#Im1q;j(KO5wStxRK}t=Q35Ik0L3lxn?>$Q?HhCsSTq5wFI)9^05Un}fRJ zzL^q#3m_9WUw<~b`gjTui=(hGUn9CV36P%ucsuZNW$oc&e@;MSUe|!e`s9W`)CWl% ziJ6P?ht*6Cu#o;Jmjv^B5c^S*M#PWz>%ihMgr6yK|Hh~BOgP*s%v%RIa76e5??JlG zI4G(E%maL*MeoIs=e}~=qtyZW;7CN#L?OeQL^y)7Dd<%-v=>1ZTDO#pAY;FSn0Sn- zVD`e*$37+ys7yg($zky)Bpq-vfH^o6wV(0q8*@dU_0()r5UDjK;AIk2n)|S(X6*aQ zUc3(RfHcHsYL*m7z&}M;iyOD;Zq%9>vRWf@KLl|l5uYH3mvXmPH5)C+*HO&1(|?ag z0%2d_WT865J|2)7?B{VnuKPs6JOkM*0xX6h6vvXJpdjU+dxtzgPD#)>rkOi3oH*G8 z&-e^8a-11?06iFENg!ak+uy0gc-Jj0X^kslXSj`FqDX)i}Zi;zyt?;gdsAkBTIrO&rFfdq99^u=;x$mYKlEUEYvP>m~njO4FFn3 zNy73BG^Dvfngr_9fjS|_ZP2h89LdBOFcBFqb2`JEl)$b9E(}_{A|E&^yK#a;YbgFU zOhU3{K#4lgMEJ4}V2+)cq~QrDdyTM_f<*9ipB2wMlg@Ob%MbnZ{z5Ma+BpRorGQRQ z{41}%d3g#+M8vfMhzWG=`mX;3B-T5=Csg4j*91@+@BE?q9jcIUUlzycytO}9Zrm89 z87JmK-e}enB;!9YUL=)tun`}(JGrHr#{HtkR#nJC#hbbP^_qe<_Ep5loVhS5b};nfXvRa85$&RKmTH z+$BdMODuAVa7dM_5iBO=`n9rO+C#{|*SVaPypv__9;}!y%sN^tM0~*&-z^phj|2Y{ zT2V}QN+|+-Fp+*;^RW+28k>QSu}IaYkpi9;q??9hf-+~lrtM(c5I1tMJvrXO^jj|y zrRk*`xv};c*3aRQe?6nUX3TwpiD@*_JMHvk$~qMdoGKtV(u1ppX=UqJ(W^x!vK)vk zF`XA>9~FJuRv&DwSD7)4+Ir|w6no50TC6p9eNm#ETC$?ow6`ETZGQQFNlv$dkA>f1 z^KXj+N~U5EPZKVf^ywmE<{takWr57V2?Tqs5m__=gnP{RD+M9|%RBcEOQeD5gFWS= z*RCl0(R>K0qsr8Lbf%XjtP#=FDS}*6&r+g31x#r|Ndz{J(f7_oRcuGAx0PQd?@$XW zb)R(H)5&zxnIfE$+rgt^bg45xx5V00!VmzF8SCPD3gMz;L%;o!x2kkA756SLV`jF# zzIE?7;8q!ZmK`IKRBBQ7j81ZzX>H(Hd)~v>HFO4N^>*Y3@=?vAEzG zpxb1ccb}4f7r;)$Ijcy9DQkNl$yAcl>&Zr`j?iFtqrIm{ns%bFzwG905`6NpvW9r;_#stZOvA`@(Ket61;zSYxz-b~=;FzB)3 zzo8@rv)2h#=w-k+USCx`=t9xnp$Cxty&>#o8}~g`RwLnZutC`gg>Ay&c7eBFQmdfs z$9@mvw_0T3Al)besR$8JFOdIwlu#SRO2yrqUO#5dQiqM@!_u^v&k-`(8hz!VZ{*#0 zpsm5A&n%cS9d~kyyVrnBC`w3e0I0DcWRf%=K#?P|FXyYet5L+Nb(AE=I8Z=NF*X0z$MrtZ8mi1nPYK%+a+?Opx5E$% z(Ly`XYW%x#mijTEakMN<0NejS`UjP23M(H`F!Ue$Chb>W#b_%pyN0LLq#KsU#5rCi^jq{;=<7vMjTlyHiKx(GA7bnH1{V$W; zFREQ~85Fe^LUm{>|8j}O2G;dsi&Zi%ta5ZeaGoIZHD4m?`z8d zsIxO@RBtu)96#QEXX6xxJev)lprCmBJFhhlJ#$VqBz}q5l|n*!32q=FbBZ3PPHIW*?Se0&X8(1h1`2F^_49K z73Go>alfjlpVv$ZVuboa5#g)ITliy$8ObV)5?0GV;DU+RQBJjj{kBIsTcDChWJCZt zr>sqa_9J2hsRZFliD10S-ZEu#U4pzgJ&b>qcHF5K=*I)T8@)c{~e_G`OWntT&xfS zX>5AP_-yNA|1oYp2ul3TeMPS11P31Q9EQ)l+PzSzIy{UE{c960?zOY{;(O!z?_Ztbcu52`b(~UhFW3nY z_Gq3OrpvTJ88`cvHV5PK_yWC*S)P@S_Utz8SNmH}Xd~!V@Xs{t`R$9?foqwaK=Y|1 zDTj;?0&Plw$7UY6ZGRm~@azA)XS$pNW%0qU-_oB6Ki_JkPN&CK>_oR+e zfY?)YDtsp~Wb9d8d6&jP^zCLha7%`QD0gb~lQ})eF9VE35zxScsf=XiO(6@RGKst- z%lZ#jRvxYDH>LbVGOgA)O2i(yz&11DfSqKkTznnEVM1I)26asTxHJ z;Cl8}($s7!Wrb`lUbFy;R~Sl11LEa_aX!{97T%^NTzXDDL}{+6S!c>r=}Y zKZ+IB$P+7;m*kVy7Yoz4_KN64EKoHqLMxZD zdmr3uS74KTte~J%{7HflD{_RC+KOGlV4XMXL9Q{2l3!l-m+FUoF=PcXWI{q^^M&hz zS@i*Ja>PqRNbear>!#$Fx6yT{(Z+F1-cm|r8cKFy1NIsz#{F{j*e{M#ed$6O4jPp_ zX#+a2!OpW9Q)datBZB+B{SV?4C?$d!RR|r*`sq3pu16F!U8Cs(I29Ue6{(cpQ!84- zRBUYrY!Pzijy=G?iO|lvJd{cSYREf_+5jol(pZRRbR8)4Q2A6w%Hb3PRae?}8cJAA zsKCU$mmap(?FSbKtJo^j1SmUDf@srZ=(>~x7)i#L23HT2(~eZh{1hGdVx(QaObz!O zZ9YTXDCC=|l^h8G|J0ays-_17v(thyoBC;+RL6RUS7uci4Ms{}>hRoqu=~TD{}^`iq${Vjpp`` z_PA;c251Z^^kZGs`{G9Za@6}pNWR{V3>|4q-fB#_HVx;DRPc=@6b+A6jRuJDPQWx5 z0yG!n#^Lmut$doJ>`im48ro@^>qna3Z#D5Y1>?OTx2Uh_?E%}x9o((gUU}`6j(IKtY zfqFL)X;PMwhP(|^Kk(A|Q>>kfWw_6;OC_UAt*QIKT$jd8_hFzeZM-gBt}cDGF2iSC z#&KPyHC^UoU6y~ka5B9|OnR&zrlY19$d0qgMaMy3$HzppQMTm)S$aIxdc2?Y_{R14 z*YpIA^`89GLy+kUGU*HP>kG^1i)iZOMa}ia-1Nl*^(ErpdM~V1MyU z&1hyNu68wUN(&b1HWeAL2v*QEP&7ACax+j4G*F2*P|Y<^t2R*oY@ji2pt)wCb!?#h z&p?Olg)Y+zJ^mN^GA|4?U%W7XVd(ZvMFwmn6ZslP>}xwC3!HoWVD2pm(6bF3hy$a@ z46T?9t@#aYWDIRJ4eiVg?cEF=0u3GG4V`igovRI9J{!7@8@jC-x*r?9`e*1t_VP8; zOHcj6s;)enIV!^K&&sS#eKkw}w)*AU$b_Q#V!ZiM zuK9Ac`O0VWflKhWfBOC~1AM04xTT}%DwD+qzs07E#g^v!)Hm}!^EFOQ(|&W)Y)Xsm zYKz0q7DwY2$7}1kmtc2Ci}kN#X8+8$i!DxMEH5-IFU>8lzMB3dwro0HI}41iDK`G< zW_deq`ESkg?)ZC7;Pcrgbak z6DyWGD>%9JBW7z>fejXNPUuG~jvy0?1WTGc)Wf_b+0WlOYBq{RNxb01vL2zLNZ^w@ zYXrHCAhV5-fQ|4o8@$Lf8zCbbF?SpBARCDU8_7HysT!N77C>oAq8CUoeT(&Tv1Lv% zvpsL~eKHH4I~#>N>TZSnU?99kZ)Hy>{ahs{M7Vk5NWIZC?x$J#p z>}|E|?JVr=-R&KM>>cfQyp6yU5ke)*-*?+gY0H=f_n6a`G|$@o#GdyG?1co% zq9Z-K*HAP>UI6gBJO{rT2mfw|fC-1db%&r62ai3F%K%UWXY(dv`;~k&okS$k7FlArQN*b|A?j3k=M1}wRGh z>~tdLgN}5#fTnvVRUa2{CvIdD$0qA;4Te8qS75!x(1t6pvo8QZ8H|7f?*Y&dOvDXM zI1GX&fB>jaPUV+i5dd2B)a*SjjG&IFOY8X99>ik|&Pi~3phM8=e)N_L2!Ig7-XC^M z5U)hMy0UvkLbE$6h8t6QjlTrTqMh?aNnRld0EY~T!LRf0Ki3{o(GT> z?7{xbHOKCp18h;V0?-p;M@Kat4*&o@Bk*y_eg<>!y$lC@mIc^{_|1aS zn?;LX3kk<>F+f7f*GnJ6mLtwUhG!lEPCfQ34+r1ec?0dD9RJdU5CG5uD%S582?3bb z`QgEUy|7djA^?i;JsK>8BhsTJ1;M@(82&4AH7fM0_ zm^T(b_dSP*Wlo)uFHthud(ZOMpa1l^4F}`g9o9Yp08<3w@IB6l5&s2U+T*S9RRjSx zn0*2bi~{@8qJa{Cdt4sczavG5e%|?ns+Xb(PylK!52D{-{38^=T>{Nfw}kpnS^9on zDfTLJ;8Fm8?pAkc0R{_+{~FW+0x)O+aL6m<6%iawNeLiC69uA)04iuoIGSjPI1uA= z@8LUF2M}Tp?5T}=MKS$bzz-h|7U6QuyNu)nprz60|K)*7!s{LcJlhfufB>OaksP!D zcQhD;KpUNctq=g(Q?LojBNZMB0C?O(0IW_UwLJU-kdXw7gzqy5fv0xHlh-CaSGfXU z(<%4Fh07%H&8sAb2iKPw7Lhs-G((0x(4C;t5sSkAZh~e)!Vw#=b}ZHQa4|7bFTmKU%!0!Qa&Fg8UGG zM|Ia62ml=z%>fBTU_)Jk8^b}P9{{M6U=pkolZ3~2PQ5C}WL60~ zyVW0uduMDzNvz5#d`{==BWXM@8(jC#ImR-CUfyid>^!Wt?8%ea+|g&!@jiiH{2P8cv$Vh7~?okby zQtWcfvX(R;A;i2J)F1}%{fEa&GwFVfnps-^Ktf4c$WNfwLe?hum#N*`5yGA!DE~-4 zvQb+fZ5U?>F8> zV?oNNh$LBE&OeP|1aHC>*i91wt8?;Aj{u@j}NV=j}Tru4P6eVT2Wt}uYC5YDP* zHfg(OtX2?jXyW~_`pYngG@Nq96|oFTAm!=$wfXdBj{v~*?*d2yuKB$wyR3Jd?>EOC@(gs!7npNAx*r+@Od2S%7&;!b zKh6&v*h+*K0-_S1+XCtoQAf!_XcEAUjXD6-^=O2ifDsx0`~X1-AdiL-py}_L*g^<` zV-RTZglT{^!1Q_(1Sn@zBn7;>E{clak5MvlvoI8~i64FE9|2w68dd5-@;(*icJ@Bp3mvVe@u< z0FVNl>lrHqPq0DYat)LR1$)nF5h1SGMgUwzTdcLaCPY=#l3D)A3PS8~z>MlaYHTZ^ z4kfANy$`RO2#wHh&4M<-8*X8@)UJDeHjodQ*DrI)H) zE+)Q@KmFdi;yS>e_SxI?{ab_EWy$aBlm7UBo#+M$y&X*U`@6l&e6KWR^nbU`UoWzX z_93TseE+WlyTOxRhuhZ_{y)PwlRY7cVm7>RfMt3QZ8h-zAt4J9TUqH|Z#;v0Mc(CJ z#OJQpKP~Uy!XZEP9||?u1Rf=mFf-b6-f~Pj*qFk-qd4U0tZm;5hgro5>47}L>LQtT-p>G>w9RzHGSgL}*=<=4B)ob}a@nY9H+l?iHb_L7@)&jQ#(V5t)7w|;x9 zZ?Atw+}%D#0(qu_?^C}e;)@i5>w;7r!O7y-WU zS4dI^3^0&WhFeX)V&+3b)2GLucA2{#QLaokm56=j)_PMhn|p3hI)YLXV85rO;<_0} ze$1u!!%kygGtKWo^PZmdpNV3<2PimeIDEAZB;?UtY4MN8=nKz$eM)ncJwGok7KsNT zxGJmN19{Chc^2AdnrnP=T}&IDMjAH_Yy;pB3Heu%6sh6Ugss!@KgA|1!Q* zv0AUVV|wYlrM3GX{|ilZUAv8W_log6B~$3?woq&P#HEc}vgGOpr()|!pny+f<=U~H za7UFF&+BrRHDk}#t{s^tewS=!r+N>$Hx*s|HhI7QnQ7%X)GYN)9PK@vZ|%AJR~zv2 zaOAc@xG2(`6-`xzBL=?2YI7izUS^w}3$^vpbbDAcF|KcPKkTKwLqy4&sgvPb7utwF zo(BKm-gwa1_7%tWI{M$V)q~Eq0kQ7-7oTG%2;La=RDD7ugz{}Y;{6~!cqcTDV zG17i*i`MVjpy`nq5Ux3kFJ<`Qo+jW=C-9R6n$1qRD>0&9bN->R zYg^#Go~lT`cv2|83O?jMPl2%Om-}jj zj~59gF*s;S2r~ruz~?`kUNSyhy&I5OrSDI&e8wW-F604o@egJdtNM4L?6}~EHyFl= zA4G&!hgO*L50>}t^2H^9v>8Z97!m}t7TD_h?_Undz|ugzkO}aa?pWFb28G&2PP zC#=7htm1}^Kr`9+0u&2rFz*DGY!v`oMe{Gp32|=1!T`jOU?JN_JP`nH5!QzKFVvo! z_13x%=n#=&=8Lxpg`Bsu{4HcUf-HIXe_L;MCaD)@d< z@3WQyVO%gZihvnWD8(7_fUB4OXj9C$kff}rKE9Bot*H3*7JEwQgE`yc_E2^mtfTK2 z?Ykl*=O;Z7i)=!EFjMiLz+&MsB&e;IL=X7bcAH&8&d`%kpd*yQ7E1=_02F><_9_;m zH|1SN-?v3VG_1uXLhtJU$W;i4R{=Ei+vdv~u#87CgxgX)8)ONE#(8m)*X%maHrN%A z|EUoI0vxtVoZF%tx5+F=$B#%@3rM2Sbbff?Y!|EJNs;(s2qXi*z{#NOSIp5M&x+c* zmm&v&1lRl^AjSntcoGt~5lC@`K3FVyx+q743nnWm;&Kl~o!DB%^jUojCaJ=SErw9q zqN!OqjnzuLEgUvf3HTB~oc7k9355VwdoDd7yAqmu152i{rH}&Twng7(4@sxr_aNMP zIDw|D+h^+_Fk$7C!~#X(Yy$Ip+ETkfrSf8}iYd)tOXY3 zAW`UYdu(Y+@KHgbS09k911MPsbkho{^a-WBW|K-HAdVuGZVMGsAz;RSF)k#Nj*?5f zD@S*fKFEoCI)o8gMUxn?Ng^Dm`fS_^<>Q4)NxCci^eS3|Do2cYnCWfbx|cNs&fUi% z--)q>N`YZV`_yXqWu+_*;v`x6jbH{1!vAtBvJnhl{N$Em1sTIKLqY_#LY{ia5%I~g z;(B2^=)@Rg!$dJxS_OkjA2V%{o78{s!v@y|z=63SbDg)l<#Agc--ty{o7@Y$Id@}p zrf|RO7&B&Uz*XGQwlR~#v5%E5jEx&23(o>DuW~=$+Qd#+b8X0$#bBAS0JP_$7-j}Z zk3O;hXCg@T=jo!@iDGtJEWO5~zzFAZAS-Bzw7nYQ?!CHE1PM)vb%uR6@yQj;Y|_e)h^bmw3X4a?sYFOyk{kfs^%> zT><^Qkg%(JtHWP^l(^fT(A1tOt)6u2{S4~=$z1#E*^^_AT6X?gWsxTr7Ei7$@-9a1 zUHXJw+CRCD__^*@d(ZpHvw+&`lG>X>ra$&ce+qxz4AkCgng6xWM%Y*UB>#DP@&r$A z{*QV3UxdY7f;*n3j-YWB@2mruWFcUw13s@Cac3H@Uw_hFdnbngIUvr}5HtH0lnJ~g zq=;V@b)fV*lHaC;lhcF_EF}4L(4N!wPp9nF2)VvGvhQ^r!yY+C9>C2y3jAr)eqGL# zhXEnJ{vOL&Gl`(^_76M1$M>HLQj2HbZv;~)*FUhVuh)ES_1b|d+l*5RP8(TInmyRa88E@TJwkkg4SbVA{0j~I--QHr8w7p|J^9`61TTaDHzFv61s^sFvIq-3ZWMYV zEG*tA{9IT>xlx2ln665;oKB@S+S1PRj3=^R`iNjSqERAISTem)GGAD#v{9;7_-Sk7 z(;mFA^kAd(q_E6Fqs({VXSQ@0snv?8w>GKwh-eHp zX-tY}E;MO=7tz{n()uZ){kusUFQNl()}av9eb}taBC5A5qO$u@J>Wcj|X>rUKb9T7z#9QlB+TzmFLREcr(ITc2Mpv^Y=DyqF z{!{GL@0M5imJ*KTLboe0gAYWRs<@usVD#ba81uPfRD7V2UrB`*4|9|?P{%(Nx2)W$y%vEIE-0RK6@ z!nfi0B_hPzk{(O?+?U)7Y)g4AIU>}4TQ8CJf9$3Db`uhnfW` zVAiDkKhabQua0wOsi~)3Gz3kUQh6J|eG!Br$;1uM2U2-w3X{=TWK*hc=UH-H**xQV zHs=b`jbcubZ*w=T-V_Qr6f-MhJts~rckx*P#-JLtl{q&Ft#pR?n-c-4{WL_;p0o?E{ zY2WGXqPU%dQB7-H$UeDjYpi=|jC(O<3RA2KAS|}hR{E$*=P;uV&|bG9jCyr=W_?@$ zOzcDvqv49|t{cxyVmT5T?^M6vLzfVO85Q7{^P+0(8GmTSx-@Bg{7ccR8zOXy6jmNn7J|bnWTyooIzjVX8c0<^ z?{M=`&C_txLbFs;;n-1eEeiQ~w6jv}WG=K_8KyMeyhXjdsYKIETtKpCL%OY-!yy!~ zM+)uenVzH=utx9WB=&sY^a85$t+g4t@om$1c%%n=gxyo7HbcT%L9HY8xOacXo6X1X zHVM=1IPCdM0wDsYcFys8aVMOB>o@vkV-2=$Sd01LU|{BcncB}VLQaR$Af22x3^#`Cq#n1<46WLI~$Pd5ep7CIhN&vz$FtQL}U-xP4sOn5y1 z6e-rZV1rHP3$Id()rJ|4e&^^kK0&+87qHin(wqm81+RW@TC1-T*85#n4Ro4xro`5f z*)V4pN@Oyl@WBdY$13S7oJxtsUupf-i`k9CC5I1XNeSOCBE*7kK=k!AG?n2KKFeyS zZPe;UC*l=?9Svm{x4n9s`LT%14CXiPd4ef0FMuB%)vFc_jrBwrL6az?@ zw3w)@W4Af}sSE~U2w^~K$VefYrw^Mx?Wp z@P06Xc`Nz=k^O4a0Ew+CTH(p~%y~(9D>9lm)qrhrt6{HD#TPP9lZl*q7;kuDJC4$l z?vU205XKG&p8-OL&c^5-k!`)u;ipM!A8+wZ>wzRfk-^f2ZO_d-atK7eEvlR5 z%nAV^(F7G!y5bwH5OmPQbB)fjxiqAF0dVzX1B9PwCw(w^C?KFU|0dzKc#bUsb7iT6)H5D9*ZgS=9>)VC%dumhS={NwwVhr ze)sw0uc6o7Ds?zY$YU_59gWV}MDAk<3u+)FFYSz{v~fTsho%oEW~#RquG9_r(I);Y zIPQnC4~wO5m#!N6AZ_`6%HHaZv{O-$(O-JFSykWh_0Xj4#=Z2E^X4;$McvS#)yL0D z{O{cQ6-M^gtNW3JBHMPLV15@V~kcj;$>TXEri5 z{77tUJ%!WMG0W;t9oSUZx6fpyD6ko>x6UUF&Cl0JELwCk z7LtG4?-%O;-*a3>A$Q!(?y%bc+y>hZXx)Z?mY!Bk@Ovl7A$9WtZ8at?#GIa2Ueuj7 z?gkXy58l9}UXWOug_!|qq2>WDW!}SgeDQ5vQK7DK*P2qZqHHlzB0wPo5~=5fjtiAZ zZ)mJ71aV~{Oo9ThX<_XUwcpFOjvnDWyF(&e-QV_UGK|4jkEn&VnsK}`oMd+gOo6{7CVD;@ zp-i+*)V=eA4Dxg;(#%n&UE$i?{_ge~dK9F-xzN2jtRA?sHi=8mcVxxz+5 zgk(Ite0J}v^O9srXS5}8PM(69xTreabi$DQK1m}y7yGz1--Lsp7;iTLyZJN=XmZHhho$+`TS`1frX&KZsHNK>z^ z3s&A|1!EflF3Bjm*$}F?0+=HT+>~{!0?xT3Y3JoX8Shwyg4oBhL10z+91`pzafM^K zq-FU6KJ4P(amVsSstP5V*d;0&ChX@reTT5j@z^K_KbTdB1)HGDj1^8)mzEV9+Of;6 z<4)9%RFzuRu`8VCPBfpEl{((BD?!+&+K6h(JtR0)K?WMG9voDl4iB?Kt&Sac5@6YHBO%I1Np6XBN&U zsw{^?C2+5l9O%br)!QZ{u8(szNh|6He7Mb1ap!hLY8oe+xGhU_=MGIP8W$e8ty|a^ zPD5&%H|e--M+z4%ODmcW!H#H`2<1pU1k;1vvym)$Um}B7w7}qZ+)hZGOD{xqZD>-w zE=0vkZ~W7Zf%}I>oSPwDT6G;{ExaE5`AdJ!RUI@>yk2sgt3YveUCa!;zR!wR!78h| zxE*-?%<)&D#_D>68+ZesJ*=!Lzvz&H@dgEPt|Nog^(jg5hr|`Hqmx$kY54Jnf5%_P z7ODTG*Gd{z!rBvb{<6e*jys}*bCWovZory>KW40Wlf1NQz}})Va%X?!-h?qTqG~YvgAeEwFZmLDNvNV6+*FM}YJi$3LeNU6vpkZN~LAZA0 za9hL7J1Sp7xPBG?(0Y`%XMvV5|KjG@_Oxc{2`1cJ#VM!qAwJ_eXjp_)YAi08#&qH# z+D1=!?ES219im0FgLWa_gS&1mTSGW?UT-hXt7#*a(uW@{0F4u?4U^D8w9lOIG-|AA zo3f#fs0`sl6>E*AmqK*t4aeE|TVLY&6A_x$^=~IXcdEvb9vi`L0*a0TK z1Rc08RAmC(?B>pb3~QFid2CGRNv`$p1E>r(eJF1QF2p8Z_c=G5Cp<}RvvA)IxicII zZy_+S!e5U+d0Ki4t8IkSu%8%{{`EZ7ffH!tSr^}M-2jt3W(>5TkYqhClWGlz;yNrB z@Vf5ug9qP%BIgmEFPmDVFG~+@=z$w(y~3ogVv6luxXG87866)N{5W^lc0cPNeeXEn zqYI7jcmX@TR zUV5FcUO=!%LOXW?TPvW1nP|ym$H&VL!HJfui_$O`6Qr$Ma1Ix5P6BY^mT(m^aL(G$ zA=*TER%EqCo+AKXoHl`{Akte9seTrM)#Z)TCeb4#(&XfWQzrT?B#sjV z5`_+MI|6>ZlSCjApb`nwF$O~;doOIir{5b1O# ztZpY_KP0xUHfm%Ss=gI!{uTgyI7u??g*#4;u?j;ZxO&-<0b>K z20*y|omewK{96#RJ@acj@T}WvW)uDO6@;mu`R2upaSOtq?nHDUg9|5vduGPaXTp2z zLOPKpJ?$d7g~X4IL^&fTeIzH{B_}0e!QjzGqZCA@n@42U6B}H&E7I1 zJfVFe|BWHEi8~XCG@eDXo`pO8QGUxLMd^YwBg1&q0hnzPJwsx<2_W}pk@jaI(8V-|t?*99_-#R^L<0P}4+Vgb1M7bdj8A zkygtgQ)aiAeeOm z&$rNB1V}r#;NGuEfVT7lg0P1W>|s-+u$3$zM!YTzHkAlg$snkdnlq;>Kph(XxFE7> zCmU%syu}TA@D+hS^Y<4J>d#DAeGrek0O|Y|oa!d};0)fGAV<&*dgvbxQC37p5N96d zb$U#6b_{=9IV=c*U($w|lpJGxi^N3`HouyPBn#&eM0!XD6DvnbCWs^d=V88vw=NPD zZW<$?h9fN#1uVRU6pi_K%!C7N$ECos;Whb1Jp9MXIZ0)vfH@EcX$r+T>)b^E=P`?P z7ZS@p8h!wv+`K-!N zTM)Cvv4J^(s+<+n54I{Q*m9DUS5{RdCEpHRwtnVdtAl>y1;weA#kFHGtMS^Zjm+(m zWGVJwYnopIA3-P^Q5q3-3OZ;%&rVq{>UL>IHO#+h!s8&`PLi)xDZkZiMB0bMD(IY` zXjesP)5PECGhq;6h+*FAoP_Cw4C=5utkZ_#&sIr({;vA>63~4^u7sukR;H(MsHgUQ zo%R$({2uPtJcRfazygwrRnY+Y{4YMOfv4gcZ7Jy&W{mw!K%tj$%Diy~j!BN9NkP0x zNxbodHYx=q)~8w&m3Sl0?^>U%P(#{&5jt1DhxmBU2CC=q(}=^Xt>sxO4hgu)`SbzxQ@tAidF|_mXHB*Gz~j8 zr~OP18*~kzuL-uoN_JIawpH+UqPX^d9u_4Y{G^rawQwD@!tBEa?e!KMEFX5d73IJ+ z4%Q2fo)1$7?BYdt`fTBjqV))CW3t?gguu<+UG147364NB&s4)_E zDe{U#qs1{2UnTnYLu7%s6AgaMSbcQZ35azmhS@QO@hL`BMMt7(-8HW}5FkZ|Idw&RRb z*MJkJ6k(~7e$xawl@xKel#vmUf&G*L?{Wv#oPbD>wKkr)#hFFOb%^tv^wE#Lic43L z3yo^Zp!q0Dqh^;l=kPCEuHgInapnjv=RTF?$f)Ll6mt_2b75Wb^a$4ERP#+$*Nv9* zZC&!!p0m@Ia_k8T-BAjBk_*es3quHs;?4?Vl8ao;i&6-Rtbl8e>Mi%SSfM9+$A zl1q5aOIip@Kb@8IB$pDJmyQsW`GptkJ?5ctm2Esvj1!a}G*8dFkbpNhvyW6Oq8hTb zV#}Bb@*h+ya~8@+Fe*GAlQmPqOhQ1KiuyYTwrPT1J z4Dt}xiV}j!UeZ6M)c#2s5>cyDYvEE}sna7=kVQK`4&_ApM7lEH6qb*+C{R&Y&D zc28*>QErHdYY2I%*HLQ{Ole9#x1NU1JaX(lFYsZpzHNNIszY3XpS=uvAu zN^Tt?ET153n^A3BXenJ;Y3m_q-)Sj1NNKNGZogVX7D4rgoVxcaf^+Q>u3>C3n-m<}woX2&(q5yXA1N_JC;!dWBcBL{ob)mwRPW zGykaffs^{w)HAe*`u9}&4c*dASNq5C2W)v#9a9IImIgdulYP_&bCL!_+>#=QhC)<^ z5?&KhR)=iyhjUWn3sQ$QmWC^M;%d}K#FIu^USm3lMp;!x2i&4ZR!8yi$7Wh17gERG z7RNSL!*|rjaTCW+QoSySCcqDkgx{MUQhQ!j^}s}v{OXfB@sr3Jy=WRnm>N@(sZ)>( zQ>4UwlxsHTDbuE^6RgDU>}fM&3p4z0;UG0F@N&Tq-h5%=Iq>2%KkkeY@1(efnJITX zKEXV^>YU}8v#rKLSi*uUv6Cn9BB9Fs$3$AoeUVUg5gfDTkdU^dv#^w*VV|S1{JClF z7w%HYT1Jt^ig3b83vp~kn|%%O>dw>3NSoyZ@mh!C+QOUR%G#QO%Bpd~>JD-0*4jEL z&c;n!{DHd@P1;7u>PA7s{0-NpyYmKGx*DeDRzlnsp{61!$#$6Y=1{{rO?u)dlARv; z9rpAjdJlJGk}6)lQ9%!nAL+YUO*?w$)=|r{kLR$e8sL zN9z-4{F9#a(E*aEtEE$B{L=-#Pa$jR#u{g9?Z@CPzKA`Nx})~#BrnR;?^Xtt(FO<=K1G4aqIM z(~YE-059p~Z&FX0j62ZyomR)LuGanMH?7>uHlOy&u~&3Ul%s09FO-nm1Hj2L-6GF?+=nz>v0qNs zVFI9^sob5)l}zCalR13F;*)IwNIPKq%z;?`tpv#(|Eb{`_s1Hn7QTt z@~CCQE06N2#rAB8T1Pl?doxxt3ylpk0T#|xDYLB|(^k9PBKwV&grRvN5BhBJyZf^Hy1 zxEO^Awg7^!^I7(l=lnw;%TZHGeiEiBO0dKP&-gN zaTOb5q{xmB8D@CA7s;o6-WHEe*M;f_d^6R#Oulp4N{SS04J zm~32_9nQ6zZ|rb9QD6~0Y*JD#ZA4e>C!tDGSg{^#T7J|=S5i>>-dJ4TjVgI+*?M1m zY}fa7#H=QgnBKJdiBrcb?$1ZaZAkLe8+*&ZOYbWhuLQuN~R4 zl{1aIu(<*B0Y`m4AE^+y>U#0G%=>V{1e5Rzg>Hj(qYCEnoPIOlBJ*L!lQYaz$ z%yh*7vAXs*>X_XOlTb+Uw6x5xyFgjS)7&0Or7`=3XAN`fF;&-J4ohcOM0JY{(B=*+ z7AY#VjvU!3wG(5KYr9F-Etk@#x!iq zVS^n4(Q)Skt6%Gm+n$Wz^XZsy*Kn6(NSrXD@Ob+6=!AUBa+kAunf_<5@&&^C?JuM* zu9w|tpXOMw{l0RIdHuN^9qZvoYZI3HZR@sY7a{%ZUdxjMw=(64#wYmhR0DTsL*vYu zP7n5oBA7rR@pzqj5gsF$BKK^&Uh*8`Ww;zc>)d=2#xrlsxN}-)S=0%&!EF+z`4~eX z0ke(d!5_y+#7NX!X@}kjAcP;)=7Lxwg~O1L^EIgcd-#@( zJ(xKN{rOLNXx6mgZ5Gvlb`%#=Po1hjg4V}AreSo0Z%aJ{zT^PPbJQT@KNNP^dQt3l zAROTD9CEavK@mEes8F+9N>aojF$tUK*wS6GOPvsgM#zYtE!o|-2Lq@z8`#8GV|+r-{4`OFbPqxy8VNuy@@ ztQm-71};7X-9SIGK>>g9%^w!7z1i=|-5epCd6b zh@l0%7eNzl$F>=-rv?0A#7QqCyG$5{LP50PNnbjccgA<3@^5Y7Nepv?3q#oN9@SGLN zcp}YaBH0&;Fckle2%gQQvoDe|FaDE(G*>8LUo2l%tWXg=SGsMN&+2YWqg*T(n_Qbq z+@}q9V>o#C>sXTcbCHDFvt07J9fn<5i3U1oq4n6l!tJa?3yiebiR4h}%TTI=7P8n& z=TH@DUaCimyfi4`P#s%V`j;_eY1G7_CZVpxkfuuEw^JOJ&ZiQNsm5-R@(MeES&5oL zv0R0VLP>2|nYm@i%IdL0L+e?Yr6=<0CelMewfko?1+Mv-Y@A<5J?p3o%c=|0+l^HW z<@OaJ;I&f|$ClNza>owj^~+$#)?J1Q=ZTQ@+hWJIQ}YVf4dji-VaN8{vI_U2l;B-D zB|l?%s^;{Of-^rvaYxst8ZOE*Y}(por#` zvLu&ogiT{d$SWgnc1^PPLI;7kj_fS4D)K${)BaDYBhJPqCi})45DpgjdHVg`IuS5LQ;t z>l->y&aFaRS9iZQG);t^+m*Pkomw=sY@l2?jkvDgma{jOotY7_urCLd*Oes%UeK<% zZo)7&cAo(>^;{ZSEb!3R!E)`?bkWBb> zY>C_67t5v*E!3OD5x0G|il#Bk@SD^Vw*#t*#`XvEp>Gny;(Uf|6A|IJx%BSfBPq+~ znGDpsLP_^yc`cVIderd{J)UT=`+0u_>V0L1`>8%-%hE*neQk;RnVDtF$_DB~6&CaTVdN_s=qq06yMN$gQtK;& z;P;!>?~j*TV8~!#*kNDfhm zoKa9hU{F$FP|9FX+F?)zLU0ysaE@4To>B0xN92OS;F7`MvO|pE6lR>F$KV>VkUFD~ zWDXoi$p6IT3qbHdyh0&E{)5Q}BNB=I4>9>ZUed+tL)CdB2~2v!{|zR;&G|I3TbEjg z;RBOTCy1k1W3pCUgLO7>f793e3zPr8 z#Iejj*8&Ii{&1{s9MGiGmG;1i3 z40e)k|G?x|$9xJ&N58ZS)?qcF8Fc?^eftk4-}8!HT>BJ}ZB^I@|EH`IJnZF^Yaj!a zsv9b~6A%rmZD8YHn0%>Y;#>mI0wR{K*f+A-Od%W$sxg6jVy8(4!hd1%{b96OkguY% z<+us-Q}EDH9CgD!F!{N^83^LCzlLGwjtDa-%ae)yO#=aqVB8BMeMKFP_7c7$CdB&s z#MN#KGpXk8@goBC0|n#&Yym zsX#V>aLXm~PwDsG;)?F~9MiHv>JZ~U%=UO@f6WxNF!^WqD^>#x536(g$m zlEu?rqG9>deyZd7(+4L1>+@mmKbZW&g7W9%(uVWrlgfdwFQEZ^=HwaldNR~;{S$FtoS-ye4qEZ?6F z3qCOUrwteHFP8(1;Mdy)OYqy{K?V5z_2J?J&az41mf;n)YeP+cy4$N*UgqGH_`$xKe`mvDk|iQSm-5q@+;2mlSS9s+}M zLhzwKs$*df(Kbu~CS*1W7*Z6~J2S@*hJ4_A>2^3dYB<5HmF^SEWds6%3Pu>6LRf_C zN4ttO8T(;29vVd;b8Z|Gi;o%);d+WfM@qF(5u}lYEnUC{@6TgC0K;|dOi130hGFQYqmyrQX@3<-C=E}?H{a$lg%wE|O3WlL-{y0S6-he|&!j)! z76_me%LYiyW)s{Miv225b57=Q@}<=ae91i!%?h_sog6tX&3u@7k` zn(3wdPK!g1MS-A4I8sG{$3*s@Tm1>77X&5*0t5sC`9E8Iqxrv#C?u_pvay0ZNdGdT z=r#o-cImL;0snJVEu`AD!7kTueksJNB>{ljgNZ{q5ppxZ2ng?!}qKGPYgEy zj%FI_e@8Ryx9!}b^8eOgQ=x4{Ov+#-xX*UGma4eqb|M>HQy=1BgB!3Nn71?x%pun5rsEIRT(H`ow7 zE;zyX;jTJ%-{}KgmKFZPU_<0^xMhmh6nm-XTm$X?|&I=knR#f zkPu_DWjNysQ+)W*W05EvTeJHAZLs;}Prer`uV}m%r)*HP7q9+LG;8~R-cS6m4K_*C zu^$GTenLmG=wK%SI;vfBAGqfUMoT90NucoI=3!3cPr9SrSVfbMXf`PRU?e&m|34dS zOiM$NdV2{w0Q!a6Knzvgs8aH4v>cAaOArEuQRZp&==YP;n#rH^XSK76rf2`X!KQi8 zLmLWJ7=avyg8!VW&}<98c%1Y$yHgMb6U*$f3xTHevipB#u(9c;WqTG4rbSw&?-+ul z2U6VH$Xda{DQ*70jAlOE(k>vF7OK#f{@hj7W7mw!jI25u-6ip8r>csWQ&T4k)| ztAx*5i7ScQRECl)&ori4`qNb<`^D=`0h-L~krS)d_v=DQw6~9F?r^_m18ju_Mog%y z7z1o>Wa!u?t(=$g>0kJ!FnO3julI!JF8jlZ+Ti_l`6iHn?d< z4?f9FXg~JOf1rIwslqlRf@va1dOl*s9 z=hr5w8n7R?KTzPKa1f_8*@64IhuR3|BkM=9nb4Ta zu_655)_y6NaHV^A6%Y(Uv!3Ddp_mC`Yi90G@NL4%^@2iJ_x`@PJ^e8+-YzDnr2PD$ z@`+~|_QhtRlMx@Duir0=q%B+{{iuQPuIEO4r=Jk$*7iolvV`IyGr|h2^y$WlM_`mS znk1?8lapu3&I~LS@sa&)!l#YsP22lZrAtLoyBo{BQvgQ#XnvY+A~zVqmrkrI0F6f* zAzrzR!tN4H4Lls14^pT8ECzx*;w38qCmNrnKl>r}>NMmI`k1aos>>;FLyzP_Vlzw) z^AzeP3(j~!w`K;5pT-v#8+F5A=^$bW*e1v*_}cantSomqG}PEb*v6Rz#Uih`|ET~chR{quND`jMmjyP{797`Afd3v`q_j#CA7=M4;5rBFBDVx@?3 zr*9ZMq`3NaH(vNDEQeb&;86#K9LL&F{L3KaVG>@pfz0hsh{Tu;Alhy#ISP-@g#J&v_w2_kAJC_ z9NsW0^F!FY;w8cx5^6Qq>l>bUmQT`mwc>(s_TViEIOv-iAA90jFVgHBVM^7hP>0JL z6g4k#MpX=)aW;Q;29UH5%WU?2DYCi%q_bM8CG{^~+`d*=IVoKXX6fG2HQAHPZn8Ce zFv2y7L_&bLQx}GsRTD+5q$U;*1S~1oD=`DbTrRT{saQa`E`vZ}TCp>vzAPh=M!?|4 ze`VAnaHGk%)2rBnr{BxyA!5nts=Ov{9rkmjER;HthvNYc6lW#ZWAFiz02v|_sS^@X zyKDxp04kyq2km30RpwtW)Z{P@!xtuNq`=vezz)^^I{JM=Zf7XANl=(x@(%Uta_g$S zjYGqVgn%0v`f=DXONy5ld9BS7|G2<}TD-be|JL%)g>rM-M!V~2hx~1ixT z*LJ8EoR~6hj^1nQwsWTKV@ffQ(6DE4e3$&qk%ybSoAB12c1Tnjj^Iz^7f7B$;0;e4 z$j>mEm%as$TOku31zGV|#g*Q0voC%L{0mo{`+FylS%2jUr^3Aa{$AIq33Wmo-N6Py zD9q2ppk}&^z(A8`YC31C&0x7|*-6%l{z1)@m#Y0+w`XmkEhMrpcf1Fu1=J&*H0fFJ z_!1~$HD@;EXt~Pd@T};7!i;6~0DzCDpIvaUNPr2?cZcnC7r24I03sw(tGKay@Wnt< z3c%-kd0-wu(ad`*26ECysxB)i{EAfjz(2woxlj%G0lW-m|Nf}lg)k}fpM%qgpY&tf zcZQwO9lwKpzkz)puw|gp5ROC`>Qld{_ct4a_$7cea|@wg2rU8^h#Kil3O2MAu9VW+ zgkZ~5(@vCpKY&nb48pnf22XNuCAxhKrckGF{cy0bFoJoM-36f`sUqFEnH+^Vj2)Dq zG9rE5Dxq*Ijfrs}DJOqt$9sHI^h;F^6~BQ5@#0Y7EoLgg(a;oOR--?Nk{4 zBL-j#8$Arwl!rVt3WXWsO;4o%u=RbYb$s`MLQx-sDIhN`FK`lqantBSt+V#Pilz-I z*a?q+4})A85{J!(F91on37uFO{eiKjHn4-mkGX{AKfdK$uVrNgJA(%6)JpyeL1ObH zV8-${@ekLka(@OGeRvUsV_1)*jyPgP1Edi3d0ABAvTwZCy&@5vNsxztC`XZnP9z`x zA&j992woJjEQCo>{AxqgFIq^P$`BjzXq-=M_?3`&Onxrn)<5*)JUH3#JMEuuAj5PS zg)*T7oFz^c*n|`k?`{65v+>U6@c(*Dh+pEenuDFYL{@zQAT_xG#0j*Y+;lnPQ^k=d zn4*MtgfQllG490xd;C^SI<+d%d6>xd~;NBlp*w2-?&!k7D;_+BYkmFZYa zssl->{}^n2GpEgpQ{cIzTePa!Wp!rTr#lo6Qxk%OAs=3W$>mP64P%X1J!w^7G(~VXk2QsqBKl+$n;R z+DN2+6IjUqFxU{KRKc|%^#>Oh4_D%t@Vz1Cpwm}jNh0H{RDFG^W+JR%;jZbj226u5 z(F`Y%vw_uv$))tF)vPy_lgZUmNCicch+iaYBwA{vR%)eRYN4WPxJPOXRI|M2f#b); z%*EBBDM$>+bsq*BUG92)wGz3OYKjsc#S&B@idtIJiVt58)=Hh0TD^m7gHuX_O9-K1 zNWElnap`fk8BN$w0+71j!35gDxON@Ms-VzQ zA$X)Qn!7nytvNrWM9>RZbBSvxiyR+PKgd-rl3eE_*|=&|kxAHG(9+Vn($a29UgC<1pH6Gd*D zQ)}OMZ9n8DRP{nybZs(5t{7F#HP2}kCT!10X^fjh%uZ>4UFmq2Y(}{xI9X}0RI5u~ zq2Fk!vw3NKYUu#KbOMRG^y%RXm_L5^Z5JV}fTYeTlg4rGc9bt&6g=HjQq4G_U4JFA zR*`FGhr0Yux_%;er&YpyQt$ch)}!azeIAlc*IK$3Qg@@)a+i|!BczcgBc!)nvLTJOXP;!{Yk)M}sRYu|hqDpV+v44`j0rHOB}`*g3U>)~wT` zwMqG;ev^Li0l7P%buhVgD7BR^%&dPIx$){GZlbu8)UC^(XCSS0xO8zFnI;uQaV=CZsnweoyvuw=Pz0V$H?uBSch7cv4co}>-9b`3o zjy&Z13*Dc0FvWc#Tce)ja`|->3PEr=e{GJftUt+o2$^U)h+)NzI|<}2c^D&4A>=$IR#vsSR3j)n;(3e1BJE(g3neLQ0g<^(E5Pu<8ClnvdjHx zvr+1c9AnErTW31V*QH$n;k>}H&Bcdb8;(+y(AH20QP9zu>&v{0L%%wN-4@5jHY8I3 zE^P}ikRR3e5h&@+__P(z)|K_PP0aNzlJJrbp|PS@acJ&olk8Vo@13OWCDSjcqwZ}m?3=Cc=L_z`dMv4hZwr_A zrJn6Or61@)9^mrr(1dT^rEWWX-E+}A^nXWl18Z*Eq;DCouZVaYMtU3uece$VKeSul zGbK4n4nH7oN089mUm@O2DBmY)KgvhlXPi7@e%}cR-_JQe^m*Sa;5+%TzLSZ%Ym0jL zE9@vw`lP*mr=H{h@BFygV&{tZWQ1qEqy3ba{eq-ndG?X z>sj)zGsp0=?eR^1Cd5s*ld|)5U;4wja@w8u^Mx~jr1tqr_({Cx>CE}w`3K&lJC zdH4B2cKF3c`Nf;ZWp*2kY}k2E`8neF$#vTW0{@k1*(C({)bRWgqlht;McXmG3cLm|Uap-_os3_}f*GzpYu&uIUL%uUPL^M_SX@_0-+U*%9Ur~s z%DAE;y#a!6ggS0_q;BylW_c`6;U;e7Cq|?z4pBUB(JpR?weB=0hLvQFeqQW;LOam_ z-x*a5=}a7}$lQxf+_7fd+f)piPVBOw-l$pLTk}7-X${z0?#ou(5oX*PZanx;^mm3#%EJtt_rOv}8jg29dR7q5#KFM}IzyIOCf5qDKuPwZvS9a@ij;Pm8Z6UgyMg* z*j=335sf7O5l*Et{V#)!rn;QCcAj*a+*t09lZ7!u&Ze{7{PBYcrw@Y-%JU74j0I=L z+N*-CL2+W7I;y(L+_6HbN}a{t^4y71rA}YC#?tlQ(SmPDbU#dHX?{0etriOx#}>^b z{Vu?uT3NWz>IA`}QL8UrZgm>uW#<`V7D%Vt?qH;;u^(ut%G@FChAiF~PG+xnOrli2kM)?25@aD=o*FvA?$RV7b}lhlZP)Y)w}ekx^$)s8+8wB57_o zp5n@AeYymRHo3m`;&_P>R`K)v5qD*9EN#S3`sLbgY~C@TK#TNKyT3I?cg=^v2Hf5+ zIl^nZb4MU!4L*8bZkzV6YQQda#ZnS2QaC+{Go*$O995I`{l75T=&XQskk4-gI zOK#lc-;%m}RmqgQTLpeU>~GV$pZgrtZAOUysC_3!nJWNp(cAOc?!-B2VojOL6QkwC zv57hz-92b|_0&AfSY*X1sCsk(oOJvN%Q41wJnV54eFD8KIUJL!c(5+i?QZ{B=T ze}7if60Ra06IcIY*{RUys?jXA@4DZKsl+qW_2cX6Nl%xio6YczS?bBa1Ig{;F!w|1 z@JP;2*W>n*_gA}F1px8a^p13H`$fGDEBoPXhcw$I+Xao^f$nWKY6(9UxX-2?+25T& zu1Yb#y&G`&Y`4)xJzS1KIqrq?l%8!a=PhVw&j-0bo1HF3*uhMfA=MhLs}qjkx5{a- z)$LVY0NCSIMNzXydgfcor9;(){u8A(qzpyJskwz0oW3?Rw|D2==SfS{Ty0nyD8Z|t zGIyL^Z7@8xx8Sp}6^Mi?3to_eXL_l^mwtB>F)Kl6s7lr!+fE1KlTkDN{-wt^wpA4S zM`7q|pba04F1ll52QD+3KG-%DrJzob@@JHb(CjLvDD96g1QEegAv!p1g+Hhy08U(b z8Zc!XU5xqY_G-Rc_p^`v6IE3n8oqkO^faRA*;C=JuY@F)jz9R|s?9yHGD%>a2P@H| z0|WJ`RC^lYua~(la+c~%#bj`n zP%f8>*$w&2!1-Mgds!|4qURH_$d6?Ky@VxKOxt;zsQE=yVwy+QnS(u|Y> z8zor$mzl|M^3pDKdHvEatL)>HEvkdOeidmug5%U9{eralC26O0`Zx)ICcdc3plSb~ z3|^N5zPG0wkM}>BUyu$3P+YRTN#wJzBo2j$pELaVA`fMYc0=N%(!oQXn>kV1(;-4_93y($0WS)O+J61_4c#UU5Ps6^~C zQ+F~Y;iLv!KJ28A+Ti$Vvs`1dO^p2>y~OOYQfK{Pxzv~`+W*1cdww;&w(Ht~gwR7M zBGN%Xlq$UwdhbY)UZwY51BBi}M+F5zMUf`GcSL&cy(mcUAV}Ug=Tqid&sux!XODO6 zPx~JrV^CK^cs|K)j)sd{ z&~ULA8ZMrGiiV5tRDYn^aWH)OWV)s1V?~CuE*dT_K*Po2Xt>yZ&S<%VtgfAZ+bxz& zd!2T=;Omo8c{E%cFHy^|0pFK+%a%=`dl5f9St?y02o>|1Zip{=uO8c{gJMPR$IsS$ zQQ?qt<bDfVS!BmZW<%@yyeN{dN)j8X&#BS5HveR;Ob?n5#+!ns_xkN8jx*=k zEXrQo{ls~8tk`I5dlhb2Wa-2He6-1!U-Y2Zne)d>!2nxPAn~yO)at7%F&RnJ^T;rA zFI-$SbtiBmFeXXg=r0+%k!Cg<|k4nnFotB0Us+-Zx2Y=P8xyoL2 zIuNItTSz;vdVl$Yr6-_lise~#eXy8oXrS+^yfqPlrAr_vYr$L9H)z^f*}wOE(e1Qd zI)EDz+m-iu#^r4dO`?IPjPDXEh`mD!Zy(C!<+Cy@CS-p#Xcwvz@Ja0OprbojB;IVn z^WO9&4V}byrR&e@4{wED%0MPl?xw9j)L-Z`q!r3)ZAxa=Kl3-_@U-cn{_Zt?HAJIy zayYv%Be85V9Lp(CKu6=Kq~9=^z2x;yoaV$6b7<;y4f z%%v4Wa_K6eBg0=>_Nq0czlBO_t=>iD_nO^G=hFT1(?;tTzy1;kSpEfZLf!Q1hFO>TB`!n;%)nq3d1Ob30A`JDPe>l zR)SBBkiu3XvS#Q+IWee>gs=TqBR}?O>xfJ{Sr;87q5aNtDN0phswOFfNITAi)U);W zjg$7{uTl)3pp5JtxWdv*2~cKZ=@`t8Ezgd9d+CQ&M2|j6$AwGBc1pXAORud!+M11eY<;ac|$*RZ_!Ukk#+GW+#Kd9Ywi92`{{sO_Pm^Fj<7a$jgw3Z>y6y26FDof z9`mLK3q%Wts(hi0yqz(XagKa>k-Xz6p3{K5UW&ZS0F&!Y&%0T9_bxmSc7Q{t;Ql=PA zR?ye*7^UZhU(trH%&xEL6G~|tUsFz%kf6RKGUY_3zIZ<6IN81!UFB%&z6dYna6}(G zT{*O{FStrMsH-nvLfL<#&-+x_3)JsHrsB@j@5-m*BHQnztKw+gZ||jIhv>IXSFtMW zH>*-H?dmt4P%+x*H#}7_01fDosXk#+)#4k_kX2RJ9Zik_+pg9Zi32KkxP`1sVgWz{%!)!3~EA9<-gM5sMTA7n06W2zct>{4Tx zP@~&Wqd6T!QGwJc$%ZJHhRFDaNMwhgx90_(=1*X5)!w9Z;*9 zbhPrm)@R<)av7~MozW7j(GQ+lMG>R#(zME=3dVfM_%4yt zF-MGXR7{14ud$-vt~OsaIlm3;R;5~pZ#*ht+~&CsJB^M~r;gC1j?JphV-7ivW*y5Q zc`3DVW|Ijibcu}|xpZ|kJF||W>I7H9gu`=P3#*=xc(QGJDYcC8=<8^e62>2z6IvM) zzdqm!Jn8l0p47?B&>ft3V)I1Da}qpp&rnWRH+Rfv@s9N5qzFfLc=qI++vJblL@~lC z-HNen147FucrrF`t?uv^#@w^XO|$i$(h1XBb|=usxK~%FXEv$V6r=Y6N7wb&ly~A( z4|JL_?1|@DtCQN)pvAP$AQ^M>nBNmd59YXl#IeBOWM}?Nb)2r?Nir5FRwxIN|6(Sb zCJtq~sGTZo(5yHUxu_dWs1w7XCtN(k%HOFyI+I813=~Img+i2*d1b!xwVoY%zvC^m^nQPg1;Rw{Rw_+Cp4?%(jClEW+$dtwV9+T0YEoEf=KU`tU9CuFiiM9QEwOjvB3O0Q*XKkv z4ZiTFG0y-4YowSxqKr7ptU6 zi0Yu3Ua4Qud17q!0il$Q$zr##&`sJ~9j>%xlFa{fMQ(0v(&%YoGM7J)p*&nX8?^ZZ z#|y{oh{o)g!|dcA4K~hZ7r|ziiDp;1Waj-CaX zoyA=j3(^>id#@}gOD+DJ1{=%!5|&KrmdvJ>EH0K0LM&O6EFZqId<0&*8?t(5XpQt; z82WD#PhmjGf5)&nW`J^myL&h){05izZ+~ytyNmARfnwAmi|+Njg5|kjjp=Q z6H^;K7aRQ$8-pYp!=4TK(oGJ<4Nf6z9(rpjQy{+`P%mU9v9OYb72MA)yC!rnQ#lw3h9$ku%*eqmv_(N!QcIc>=KvklJ@PAZ|#r}`xM%3dP5X2f>oi)2H=GUy5)Osx4sT5z-nlru4{<0;a`^Dd;g1Fz zhfhNeB})#a`wnHd4&@;~@*u#BbNj52ZMZ4y`73k0QhT9ylaWLAvNQJ4Ex^}$_WTq( zX!%{szGLgHV;jV&oz|&?)2UO!sY`vgWNIg!(=k(GM>5YU45A-i`X1r;sxCFXF4M7L zf4i~Av4_^No7Q=Z(|KINc|zTJ($sm1c5ejY&}3>~Ib_w4x%>5%@n(@8&l4Qf%b|tA zS61jQ9>r*da@kvQ+240LxOF*% zxPD7=c0S+EjB?6u+4C#et!de*JCb?XWsa>Jl= z!{l-UD(qip+MP(a&Mj@9>g~Konxp%wp82>6^yf5d+3gOlT)jKMO5Oqyxf9d5lW@6{ zO1hJ2xZg2zCwFzf8|qGx?0)aHJ7t+WRjWJoushANJMF>I@$KR2yPe(|=VcSe8W-2v z5Lf#n65*)rdh-3%(gUmu_eW(OY^@&b!yX*V9-IdrT&Um8F?UgJPD#fo(V{(UuH%ot zXkt0IqS|)Dn^z*b35CJ?4-R%&ADj@~^^`d9ltg*Lh`gleyrj8KIj2vs1U(t(j;r(> zkC7|)FFrT25H5iTEkSD%PRd+;iFX`s9`iM+MwytTQ!btJuYHN2mgdF#1)>xX(9 zBzqga_I_ICZPe;*JnSticmh^9;Y*%U5PVuWIEDMtOUUf$Kpmk-t%=&Qm-_d!=qYde z*FFwqK8~$EPQyOV%RVj#KCUPqt>|-lGp`5ww)|$E6!xBig6H_jp2Dv^&4*8I&Ai0! z`UbZ81`YcLFZ+fZ_=ci<;Y5C6bbjGnei4#>2o1kTGruTTzvxguR7|p8tcG8JuiJwx zU-M))eg0&Ni`R=jI0Nl&4%MfL$!_M)&TW0q<3i74UH!8{{j-z(pTG8hQRe@$)jwz0 zKlj09#`_Bo!3#zrAB)$Affc$jLO#|S=U_vfqzkQz=ziXAIk!YTLV4~2b3%a zlpX|>p#sW@0xReOKXV0EN(NSG1Xh~`)}Z_uU+*eJ8zepW`6f9&2ItBtGR;^2+@{R) z%IEt1v%q$`pboB}PRXDyji7F`pdQyCG*A8+`-U(2dfmWP<^A;&o}Y~h0h)_}Es|I7 zP=R0Rg2%Xm$0dU&G=eA1f~Q=Ar$d8hl7nYo2hWuS&$k9I3 z$=8Xm0|#EmBQtQCe0_(_0^VH&u40F5zYh6P7P8YCvO65Iw;Zy65ORRJoxdBBAbEvj z=DJQd%{&&g`Tb_7)+_Scb&J0K(d*Fn*rC_0p*O>!zm`L94?Z)QBoDL`X0{o7;y(y@6mE{LmmwKmr!alpV^a2hr2jGg`|;cUP@ZRFpOi?y zsbsQiyBEU4?f2Rx4rh<1KK--7=1VGvZ^Xe7w<{`aq}D>5Avu zz>6=OdI^3779Acyl=G}F*N^AHzt9jdMd@niYf|4*=WBueg$Cb~{k?0!!ZY;zB`TZ`Ao3;0S0be4UOl(lhij@-hTVYro5BH=u-u=MIdLx_Qys} zbc4+`XVX!iC|C2@Tp(A=)sIH5)?Yu1UFw;g`<-iFg{VBsU4tZ;xBdEc&D}+;BKElZ zj#1F#o_mf>?%jlf_2qT%t^&C0Rv$H$e|cD()x^`!(Rjl%@VH-$caVQRh<8YMr-^r1 z{Q8D>L<(Qrs~?}#xV)VS=y?29K24c#LQ6%QfAWb@F#nXHV>AD>$@H$5G6Qyh?pSLQ zhf}Zlvi;b&Y2z<}1^0e&!A0-+;Mz&?y^lUKYp+bs+n+@@kt|12g$S+1vb6}UCyL$* zZKSA32ybQ>g$RFp?${#yJy%lra+wJm^v{$$Bj5pm8CZo*$^TmzRXnRY`M}$M!Kfa7 zzB~LH7J)~jTvaq$pqM5h_NMB?_tJYApJzk_!Y&zIb{hzYhd%7eC z%WV<(bm~oYN9(<5e_~X>v)Z91_6=t{GYoxwL(PpB2P>WNC_0Vb7}eHzvDt7-^Yz8y zdjD&U)|Ow_zcH%e)>bsc0%l!-gUIbx!f;rMR>BDcwpJpb@~o=}GQGcMwWH~ySl42h zGVRu4S>F|{#i9S>*W$VVz^JC|))R%k6|EZH3oszn5AJIVl z`H!8lHZa?6c^A3kZbdIk@&B0BhBOi?eJ4rZmz=L^EQK9pHZqECB*=0}UEFp;M7B1r ziG+81=idmX#z?C-?NO3(H3Czt;_vi9Wy{-vO44}3Lpb5sw1LRA!Pde{mczIW^ROP# zqtBf~Ux5DW2a=rFO@zYr3vH^x+nGT;LfmX&(V{g2aJhSW3wd884_5NKMD7uoVq!?ah9k~jo6J`aiHUz)3o8DuZ= z#W9bg@&yMvSQ!u`?!$v$3AO2y&Q@dD7zRZ6dDDhwS+Y(D=L4F8WjQy~3kaaiTM^M@ znfMcf+eM@kgG-bWdl!P6H%4IcWw=hyolGi|At8OSV+a$nNhgW)xq^i7q(KvHBiZwL z4_VfM4ecJnx9J7ZWGih+KhHmw4T(g3H5%d-5=3aLi_}vSi$nVg(}o1s+mhmGy{Ii> z#Cvo~ZzQ$Fw3AL0+`@@Uk?~#z8h8h6U(V|;h`=Gq(l$6@3>#XFp+f86BDdEZ@U{d~ zYN>n6a1-IFd~EuWcIY{q0;iB)Q)r|s^@@SQfvz8dV5}W?yBCa0Y|x1Hb0mx?2q}o- zEDBfjN}}E{h$h=QO!5`O#M!2ZCStN6+EKEkfx%@ZH}WHK*E(#|c6tP6v^w0=(S8*i zj7NR#1S5Mx*tcxhw8sL{J~xNaQVAo_+z}v0Ap@a}rV&>V5-f5FAlV;}AlgEL=}s+a zY=kiHTfwmX3A8;{V7WY{^hz>gQV~gfEBAGg@xEIOAR9$yigfn|@X~0YzZX)l4Fg2S z_lDRj56EQM8*BW63$ckKcW%S1$FGWr=!?4bdcHcRUKQK+N682NbbIBA zpD%udo8*jhK(%A{M!KE2(X)dm_eY_P=ojMFxz&MJ6i-$JK4)npc<>eAEiB3J84iQL z@%m8Ee1P5iF%m7!C$*DfC+;8BFvL;r$>5wf%QLH;^CBrwuzQ=v#4kHRfqOzinvg~?`_b!cV&#WkzIN<($2qjQ>EGFm{Mx06ht^2;tS^{As?zs2}zu+t_x;SV>JpgvX2N{ zujl&rGiyJ4_Yeoc4OhUp)CP(&?uqz?yUfUX=?+$}XtKubrxy(}3_C`({fqkn7mX`~ zWqw~&mX2RsG;OJs1x)uZoi|)GA0{5qG=~wT#*7wR4wi*n^e^B3yl4fWL3nJ{6-=tj zHXQZx@Vf&mVA0EVXi_=if$A!r(Pan4P&wkjJvjwBgi8;ph*3~oBYAn*#j0Krr$4Yp z-iWq9Bvm9lQ(dQ=zw8kjsz~x3Sf{P_0=#OZIi@S+mfdGITEwvDcM0y0=ZZpMJ81hK2Y06OB2SjhpOHQ4sNRl z{v1a)&KD`D{m_2-bD~JS`lJ5f552~pljTX(pPrdGK(Y{>bwkyqzJoia*FUG*AT{OD zYP*(H*E7B9HJ`HvcWp(lXGfB1s@|*ZIT&5f%?#Dl)Dk+!g8uTj<^z-ffWW8!^0-!X zqw0)Cw;$#xR=nwsr{Hy1=&dODl1wiXN3Ha^@M{{Yj?d}0!~#JdaI-g6ug5z<5241v zn=i-9>KT7|Tt71DtJ;>b=o3h-Wd5)sV6oyPLi%nzQ{QN1N?7-u>ihA(Jg%RoR)&A4 zsSD>hs$rV(q!Lw9{*0IN5n(9owtH@1hz;paWK++{d!zs%r0jA3=i{1fBEtVr(#8Ti z)$^TGu0f9Vv{oh)mhqH}jw^+H0=7!Mt2TWimBRO!&%WkcH@6QP{)SdTDC1u(Z{DhP zdu-wrcA^d!P;RL`QzAxsE(>`l&NO?jzV7p5xk;QAq`PLw=xFHgKMwbQuhPqfNo1Lz;{LNS`uOVJ&R7q#Jm zNR_xb8LEj&AeLb|7h3}bg{J^xiPQod@k90A-5n(X@loQ@BzxgnaC+U4x{J(;rGnX$Cm1!jlG_@`MbHkqsKUe$Aa23Bb%6rccAj)J z15v;BawYB`)U%Fnm(W#?uyNp~waAd9@7K(2_ztVMu}CX(Lz;{d8{uoGW!NCyr(*<% z#wQY%BrT1m-_$d1)9>vdZ#azPPRJ*ChNhn94x@wy@*%#xowPp>qhSPZ2v8+!3D~oH zF^Z~hNV0mnSVWFuH51;Dy;tmh^z@`V#U7p)M+tVPYmzIE zq4a=NoZo2Gcf~Ivb4N*j0|m4fy1;);1{cbe(p8BwF0|->07YZWJWJpVc5}>RD;~o8&$QntJwR z-4Oflspr@~sAm9S5e!>-B%Jaz4@a#?`flGy)NkrJu}Jp8pVTwOV3E9F-)K_6X#qXq z2So+t{{!mzk#@@azfsQ)Vi)v(r=Eeld6T6s=mt`TD-aeo00m&@2b2O(m~8*@>&%Lz z?u=NX{a^e#qn#9~^dB^_YjJnqsm@A1$9!JB@U`+kfUlx&IwNdohC*|0!+;M+319g3 zl?u5ZA1If4@AR#>XIK66GG$aKfb!{OxbxtM6t#YVG`I z^mnsTV|_iM^j{jUX~#fUNh&7tE5x0sE#rGxLq0XFEq}jE9c$bzZN#)U5n0F1l@Tnl z7BO!^CAsa1G&Ryrt>ixV+hs~&^vIO;;Frz|sbO)P4AS+>Ga?GF4eze|(+Q8G#88E4 zJ)t;%yG&V1C*SabY0hu%$1;ItFNW{igA|iEV2I-+T|wm5oMfj(mVEs2P2Jj`mnmD5 zd?e5D;aVq*k9^_{gZ{=@9<9PBpGdOvD|nxm7j=6snKmAXG~KP|;rAS`B_& zDL)yC*vt|6QC!|r^~ljC+Z41QRXy|{E>kE!QsO@^Q$*gLY=45Uyy7bV1Nho_*u!?v zbUNjD(0uu=_@L$HeCMDQfX#l`2Dte5kdp5X$w|RcXy@A+aQQ*ts z8!LBd>{9`+K-R5?7me0wQZy63?ENiK5PJN;!V*7#)I@m+O136>HCZFccJ%%FBIxnauko7O@VKC`BZRDx?C}CBN2J&coCs zgrOn|V-)$m^K}tmU|){Vn71FbIlk}`s|*I^{(@UX7R6g%z{6c9(r~g}_?8-@9{Xw( zhqwAyT#CZonnb}}Ch4AR)l``wT05uvy*T!Bg%q6X)>l^W;i@50o50=iTHu&@kHh|;Ss4-aZh(r>o^}`kvmC$ zkOg2{Ljk~?E`N)+fV$No0cB-k03a67pTr{wfWkSF6-a}+WQr`YXDhIlV zfm9kt9gmMoWVbhd6{d|R8wPD_0QLYn@^~X~Dlt_dqQUC-%G0>DAG+q4;~hGdOSkcs zi30IukGk&QZ$P@Y5L=1pZ{2ocUK6HycjOytC}wTn0J>8fxI6lpgN(qf6R?_^c7nl6 zV6MW5oMV0X7KpU$CeQ;twHs$)mx|+2=62(z-C(`n&6HXc8^z1%YNZnU0&fLez9F_t zDCm<_7Y*fAM777}QLN>&Py|URs52^~ha1&~YpajT$ynQQA%b;}i(t_YN)b#N0qX>m z4y96r!Zqv+HBf8RrB9}wL0po+A^T%E&nMb8+=m!AgKStD*xa$gcseTyRGHEx`v}7Z zDk+d4sI5^n(3%?t=8j<;Ai(A}@1We8VFX}dXCpBHg4s@30Cf2kVHqZJ>@zRWY1CtB zBS)is6r&K80|m$e_<&)I*c1TzPauL7*WGIa9M(4YJp=%ttw|y}1%Lw=RseuDncSLH zUVaR7@KSjwhG#b~2bAHW@d zrsALAv6PcIVCpia5$x4zmb<`M$bLn5VXiC5l3qzBjCg3QcM=?Haa7>(TeA6yXYw6xefo`u+vhxDBS^;$!eHDT5+LNEXU9H@ni&E}9}j!X4y5jXC=ODZ=Z zncgX@XNuk;ofc)n;`#|0qD-cmMwE0Y;q`<%`Aj78HxtBz&eY}xcNxc@gl^V@a6aew zct+m=t>D{94?j6G1%_vLJ({!s@*!l`+#=`EgQjh^Gt~3ZsE|T=#+t(k zW26$uoH z?lEDOW2M+n>F?2F(Kc)oG2!jeEt_l$aCR5sXu@A_9kTp{^G^$`wU-VtA1RFTo3zqK znPfD46TlR%idoCA+-Spy2(^g^;=pwr{eH}kR7Bq7uhZS5nDQQJGc5jC@jjUVa1Ycj zDHR|?A!pP{+vja*9R-61#=vxU4CkFMhd<7ID4}#_!C?oqp>l0OfcGQ};UBfMz8FEf zBfG8yJytLPj587RPqV_JI4`;e7bA-Kn)J!zPs?OgVOe?pfGfgBy-_)LAIL%h59@%> zjSa#9AS4F21CYiVHuv6BS@2cI9++P2+b9?+iSUR=$5Yw>kQK1{etR64J^~jpGABqQ z3>rcd&_n3)JdF)=s!YQ*`(v@e1Z{w~F3*Jp7Pm8%!e*79k^#NM0n{@cvlN!mtl{r0 z^dW$R0B>OD7;_h{+%u}an4!pY;z9fhv>p?No&t%99F#E7)|7XDnCuhKmw?D%w_(PH zcGW;rl%jTC-g!;}QPBN3OkPzW-x?~*IDAGWlZIj;7Df=qM=723)@<$a*2^7|Y> zT-A<|!lvQduil@zsy3{+w8jTbJt~X6ZS60{W?v7Ty>C3;N__)$Q8q=|nh1|Dy~>Em zElYKupu^Hr_pPnGjUU1_Bf3bJ4|jHZn@L z!uvFf{sbp-r1W>oiunj%BqYgN6)*Uqu2+a!qb|Q_+#bI~-E3*J{+u4ZJ#R$)I!tc8 z`L2QbIgh%%9B#e67)IS*qc;&0a7-aM$N&!ZgX3hw@#^3NGjPZy97+*JA{0hu5Jv77 zMv)yxSrod8QFTq1>swJzLg70iAa?jl9V9Ye}l300YfeiDq} zPKf~hRiR{u0R!*?WY9-5^u6Y9jy=%JUykpf`RHjAvA}%+B&_R?Y10<>tYqBiEyKE^ zHa%BF8)FjthsT>REOskQPhqH@YUWq5r42h`2Dz|mABl99DpqL%`S^Y1rykp_mc;Sf zmH~&^ZRWZRc$md^|L;pOQiH*QbLw$gl|Gbj@-#p3w zpS>0@fExe;p8OBz-~T+)Q;P5USCOIcyMh07Y5cFwzqy8%zimms&%Zvl?>_&pH*iT0 zUH)oI`Y+DECO`52*uY^bkXncR%LeXGThf2Ef&05H$#MDrZQ#D)CJRn<{UI{^-`>Ef zd=fk)0}=7}39awdu0*lz*KK4v?$>XmOXD9R!#ln^Jbw`xqI?2}VRWP=r(>#u zd#B@?Do%W3T7MH6@{j7^r=L#!O=P&|I*kf)KA&@{MT-m#3Q7e2CNj)o_x-!baHr9C z1@U{N7fO`$hse-x0j=Ub>*(_`a@|~ey3g@#NO$P!+Y8CxBfa?&p}sz+al*!Vr%L~A zw8*e=_C3%kp5sRdBw%yijZv|#W8=-$Uha2kyoVhT;~Ynozejq?)35xuHnlZMNgTZj zuFms(TgAWFtpJ9;%`19DaOe60!4!)#9-Y^)g|@Gk%60kTr)ZI3=)l-@F^KYFCuhfr zA>qi|k*b#0e21hc!KF?LZ=KYf(H7nU&dev+dl|-*Zh_Cf0K2zxX$H7ZK#L3yEr0Dy zqi$D)-Q1*c4>}_-`ne;h%-hLVF0l!oX-0t?q;UJzbFWa3W)kshh;Z_I86H@}Wp*nL z5x+-zhUCH@pZ9yBM|$5Us-*`Wu7mZ;v6!UkW4-e%z&R^jgl#a0n7I@MdKt$w69&c! zC=DW0HM@5k2BUYvjS8@c!E`Uu_9414?NZJYE`igePwJ3R!q6%fNHOPU-_^hfYz z#8-$f|7zfa`q6Z&3vMSpFTDNewqh&{eSs`vze~UsgC$ku3bun=aJCW0LCy0C7-nRM z!(iACHR&_V-K3$+u~E=;3_`&=Oo+2oj}ts1N397%hcs+FckZmTAdU9mmN+bQB@DhC zXWfB#tlW~CU%E~LqTGT-^9<umYLrQ@S)^9j}(+*vU`foqg?< z9`&$JYlb7D9uYl%zr+$JqWAjG(AG>kM|cJ|Q~UhVF?sNkK?H0>zts7rfV!8^5@Lwk z;er_R+`MEFV(ss9eXHJJu-jwMC^hRsFW8WgQ{wlz_}s1%)Za*Wf0^RT7p04wpZH$ z7%Gpdt9;lwp_<6G}ou5K$P@z?K`74aC(LswHV4_IN_uicjPw5vgM`B zSLyD}04-_0o8#+~bdn}jr24*)b~OykLqWrEMqZi|15WA8v#Aba@VRYpWIX$-II@m5 zZTcgH%`H3WWQnlYBQXN>Fbv@vIbscFBV;XZ%xxLez{$X=N#_i2-fRo+qDU zE$X1QKOz=cPnky<)P0-C@z`@un16eV4H>9#LzFaaWbRCL%zipaoWAoS=~kh`Ehj8| z)^bGfGMdW#%#_MCf-*<34R$W>7HZUt0A!U)83kHN?{;|Rqo+DHcDk7mEl zwFerSgPae;k#7<~t6lK>dx`HfuDUwAqygt`?yg%5c^Y(Y@0f2^PY_za#e{wyx|Iqm zq=S=A%*dl8QSK4ILfS6O_A2f?RUa2;;E8O>1tm$q{N>BGd-e3a7P;;b0L3@nR<&5G zS>_d%`1No|Vy)K%62Arxu~NWMorYWIx!yztlK1MgdD(^QxM8Mv9M`%PM?6(-bx0uDDK^!wrx&_f`XopEgMx^DNSHU9o`I0}l8e=l-c~d`wWIw=7^KqNgXIle&xq<-FoSs#<`I$xTR9y4s+}3p zof8~a7es+b98jI}Vy24_Jo#q~BwG--@X`q{2GT)~^Qg|1>8m-el^InnK?e*H4h8tZ z5}vW9^b7c0ObOq;ilTa+Sy-8HGL~RtXN1cg+#F%FcgUer5;-vfNN8n*AJHjDLD|UsjSj_h9A$pKeTgHeU(LT5GsbYFgo;aCXI(0$v20TEo zEqeo|&Git^g`QvwKxB21G31sdBxmrlAf}>@P}x4ai5C~M+{@}Az@|QfbSBxsFh_PO z#qQj3$j*rBL5%WU{LT%F-E&jMa_qyqX0-2LZai>}{T8zzO&D>WfQiBY|6s)$y@<@N z)pkVd%xbU~SUk^WoICPjp3kK2+Hzw6(wQ;@=HLO7v-TKwLX@6ir6IWo&qL?wk*;}3 z#4GqvLkY|05<>PV6!i{0{`pmrIVR^Rc%DYl!q5r@!bE+S?h7J4ZV)g9=gi(M90n=m z23SLrVB~C5Sprz&UMx?!KSWpoN5QyvqA)A47|WmM$$H<*1Dy%`WXluQj5uNDgSP4j z59~u?4+)M0-l)K%PxTyF*t^5J8+$l>i#e1>&4ljf#DY+3Do}kmm$5D=$ z8w52g=<3I`EjO{gJi>l+;kx??oc6ww-iU0OmzW4xF?YZ*Dno=t5idAS__(Hs8rwHl z=Dm-@hpdkUtIzOlFT5%|Jo?y+BB6Rtbwcl{XV7*Ooh znJJgXZC1b?jssqRhCr2^m!J`=f(~XMG_g*+)*0F6?|;IwD&E8_ndm7dw=E$%7b&GQ z&40u>7FWCyAlT5CFd648Z~OkSu!S-`)`EvavR)~ffZIf+T;ES|d0TO%ZSAWd~oCp{Ols=RQny(GHu7y+8g^Si9jOwBS>tbHk#WmI?{N0w6 zDq5dzRG%4GpZ&7_MPq%=eEqBI`s8?^U>;~6Udz%@dYsf*CB1C<(b%#(-?D$*a!A#BEZTZ%)OsG+dfDIND+;}I zBf7nAMNd&N#o9o|ZQ!6boSZhirZ$3wHptCyTM~&_JDG7ic~Cn=PCI2&JM}_4?M*v9 zbqAwy`$T{1RbU5ePRFCB4)%o(&YKQy>P{Z9PCnz#AM=ldaymttI>i<`C2l%l)LqhI zU9!eq@#;WOfseg4 zap;j;BitD2*=U1EMUy#y>N#H}tZB2d?~1@_?-6?ZOYzIG6vU>xmZcnf7uf?u zwL?-ht?uUcl1o7X%dxX&ly>NQPud~o^jHSzGMG{jeC)ouSUu2Vdsp%=Za&Zt0}w)s zKDVacpwqq$IF>;<b3tJDGuMB=V>tCrLd=Z2-;O01C{6rss8^Dc8-i@mb03{>wDeqtg0B|J$ zQoC?$GJ32-GLzQ{*kpvb`rMdwHuyY;+PhHo74FeHdq9KoF+Xk~k{(wAr3v1F;^`wt z2Hdm+ei88@u#;DS_5jE|H4FoKT$?=1BJ+O0FBCTrW*k^vhY)u%JQo_g$8fxTw<-Hvh{TAd+9J({7gqBKejOaYuMLt901X@x zZjL-*Q^VFjBwXZE(O&_nLxIRVq5wCn@#?8~R;|svId$YrJ#4x=Xu3F8uas~;E*Qw* zhLxN*w*$wrM?#j{#(x5EdGiQ9mJmol=L6{V#v_$gIiN!*e(2B{nwXgF3*+v|1+OC^ zU%kiQG>^S(8q+#c>*EG`!SQk;@dnJN=5l3-Y#3M~`chHl1!bK!Ka7h=^M-QB7UKTXhT0Ybyl7ljGu5C0^<9K2{;s$S<3~a(6 z`Fm=%ASqkK^kzAsJ`%iXPSgi`YO6NCq6tB1uIZOg=C_UPl&^l!g@h79)Xm4mO0fqt zA-sE#UvRKp^MW?qxSwOX9yYq;w!$znUHMCGa|INDoM)z8a+Fwm@QQkAWkcK?_*8QP zcMp;Z#ndr%gYk{(!|~!uFg0i~)zR+-2qcAS`Nqw;WdHz??M*6~fVJs?FeLiT#z(vY zBT5N&R=~tbPwX@Y0`f4mk@y>VAQDrEAMA(r;nofeJ7(X+7)#sPbcgx&TMzMf^$}p= z2u{M2Wnd|`i}!dqy-6wzyIFG}XT;!Z^H@%s+UCf&oe}(mNql`6cKt8H=Vv(Ra_;5B zg}S!gLX&}fbHkxuYDgG1$)Mq~*LWts2{+cB?yFq}m!0aQo%&KW1#ZX{JrSq+oI|e6 z#-Z9G46*|?VGBMqEtvzq-WpRMXw02oIfUS~LO$jm_MT0Sszb=Qjv#UaY+nalP z!Z>syUUsT7d?M_6BGh{9ymZW2cEF=Cx5RPwG<3&N0y=uO2WdUClsvbVJkzD4=At{1 zFgv%=SX;6=)t20FlDx1AJ-2f`TQ5KN(70eTg9f24;GsJ{C@OodlK{HQ`<#~{$(K)C zFA^k|B8I6VUtgG*U7bs8B)z_L34ywY9(*{2+Zle#DnnPB*lOLEZ-T0qwPnJDfUiuYyd9*qF-fa0c_~z>D;5*uu6bb;6 zxxp~;n+`+~E$a|AFqPM#6b{;!)R9CkrYJ?gJK-h+5j{PWfy$7%%aW-4-Ihddg> z)8WU5qIVyEeEd}N<9FEU-C47LN-b;XG$S^{{-Fc(Y|ERzz59+w#q}dO|;L!Z?h>gK(fTxiiNruWEs2mm{ zy7U?sMBN7s25R0%4)Ag}t8t5aUEZXPv`)81$TeK;ThY3a>Ct+UE;e z?#7ehHHTq(g84-RVI>aA?Qd{#0IwzT0$;d->!mvkzqCoyuAc&n$`Kv zKmx{@1pt0_9pEQ6p&h-^mvaDEkoC|u2LPmC8s%Zbu_VeljAA-R3M4*^pbSSor$UVg zG`|z>BzL6m763_sc?!!TSO$_8npO!%_?_b@Iw`(4jq=nxug44Sn1N;F<%Vn?#{-X| z$Uhi#C+%^>-F(f1Z3ul492#ZtVv3>p0RD1wS|06PxK0Pe?L`nA)87-bru&akROP3~ zso4Yns{)>L+<;PJ&EDHE$jR)#Tfk$>I~$e^ezf|(Tfl?*XB5>zgS~B??qPcVza2#l zPba|s6)kD|?-%f>?7f2>w*N~3k6Q85X|(0P;jF|hvDo;#@^k}wZtXW%Y;GW($6%UM<0cZ7CwGz z?Cr)EF3*ehuwJKHzQh)iw#7H{i|`Aqlj1y9fjmP>Q|vvFpYy1Ay61k>hN3VXKQ67xsVD4aj0?302hv0~s)MRbW!IxQ^sQLs~Tc1jXRVT&Nv zXmXJ4_s1Aw@6Ws$)TdAoX>rFAZ@PaMw$&{X(!_W;gfPj>AtAx@ih_rMFB8VbA&*XR$;yy+*j<|sQ}49~LEv7H8p_?fGD|Ciz2fq#dNKE73r59yDrhz>;(bS# zCbpV7t2^Ao)TfjtKUeig*<2;JJF{jzj8vwOt?+%oC(GI|g)Okd4s6lMnbK|wm@Ihi zUH%11;;F90_yuVVQlD9^M>bKs8IAGYL=ku$^-%h;Q;Y>N&mdEkDU<_og^n`Gw}eB8 zN!jD|WjcUr>Pkz&?f0F&wGqUNoPy0i;Xu?oh?@BbxL)0HFSnl?$w?zd10Rk?s?sn~ ztzq_ZFM_?*&E?*DZN7eeY3Z{$hVOwgKCFGv3fGOjxb(t^{5>=<5lDQ5Pd;ct=(tC? zlSbZvB=8!&|EY*H0r;r7>hU7-QMnz-H{al$Vp0UivFRNooBr`lEZpK$3TYpAOw*pX z4m++Vm*TP7cJsrQdbGXg(k&rwRYcL|c8?>~njGxN*hCKEwORy)s7BsZ@pD%!xofrV zc2sY3rAg*}u{+Nu4OdU70-^JXiuqkWE9~G<^^&_XQsRv1BwmY->gjhs8IsR}!ds~$ z77=$B-NbA*-Ox}g@5oSA9FK=h{@m$dxAtAgeZvh2t-J9e?HR1K(VK3gJl?)j_8>6u z6;5R8lzkMT$DO7K?jx&3wMadzaPEZQ+>)q#2Ub*iboo?t?`Bg*-q8o^w|EU*ukmT;iK45hPXFUaTCUIlYVhi>2Whn zadT5~3rBH_4De+s_^L5{-4DK*4*xI}hpQ)Ra|GXGi2p1V|HU}|&@cWdJ^rLA{%k7# z;wT=#h`^Rc;F=)t{Sky22;yc0=`@1;7y-;cpxr30;p5WzCop6rFf}KzOee4%Cx93e zIiwT0OcHtg6ZtX{1)38DrxS&b6Ga%4M5U9&O_C)2lcXjxk|0kLwcT)y_Q+}cape4Q zl}(aW{gc%*k~N!?uTLjyA1CWFrszwj+%!os^iMI)NHJ|rF`rJkbDUzqn2M3$w=qez z^G|h%N-;uGphqdrkrao(G!N-CFOxJM|1`ghw1DQchtp|~kJEw}(_zx-p(g3!{^^k! z>Cw&UvD4}B<8%aLMxt~^vPnkxQ;KsDpi6T`_H;(>aR!nx6C=%EXp&jvpIMTTS=yXg zKAl;4oLR+~RU@5MXOh+6pVgF+)zX~RHl6iCI_sNALiZF?2jfp}Y2Sf9&MyM}<%?mfBd8bW(*X5rsJ{}=4M{)Z>xzeG^^Pl5jaYk%jEtB7VNQWW!Lf3Z<37Telt0s}Q#3rQ zt%33^f}+RN%r=L{(ZKWgH#cSKa7PuT$UNTy!i7frCDDSTBWCFb@gNfK8weqLioym= zNe)(d@g;*yg1i$x8QE`fX}aT$dU=y(K$M(J*R`u9F$(Xudg%ctO1%u zze?I4#mgnel3FOFg2MhYr_H~(IU`0>HdiDpeZM&aq9fE_abWDce(mPW zLFEa?=i1?V0n@wQZpoJYxL>_F``}of5W9~r&7(nH{&6({je(hg}9C@~Hj z6G(|m>I4tg`e4q2<;7FcBi`j9EU2aqaL0Ze?6Fer)(*bhyWnB(9eAdjKdyZLKHQ^R ziOQHSggP4oC^0k|ZS^jxNvtSla}?=TT^P-#TZnG2C+NP&Q?|nPSdZk6jBNl8+oT>m z5rXyP!(0eeB?8}Wb(vUg(LwqRFxulG4uz5sj#yh19z-3($pQ2TvCq)CCbN^pkA|?h z?UQTt0;pE_x@euGhn@46@tcoSnKm)DTSr0U(`P-h(+QExi!esVJuI;cc0BdHI?{mA z9A=#$Fk)+IDn!6N(0j|oD@O#Zn;;5*qyjMtipL7yi!+x(6}>-FllF+3^6f34hgzsYh+EBH zeFz~p!kp$~G%{SLunE1+Rl(9rq0WzSnm(zAunA)o7}}JFi)71ZL?`FYe*ERl4z}(-4f8QrDO zkX$gD=1|v7`U^V|Z=p4dI9rduZLl@#!Ia>&(a!tar|{__B{r-fA+B8rxvjb-Y51O- zv>~EH&u`mQ~K4m1Z$J{hAv#SN~WH=O8WN z6~k}}zx~C_x>7wd7h;AkZlS8;oj|WDS7ELMOEcux4g|6*Fat$UBH1~ z%`p@D2%oFHh2*Bg!KJsuKF*%iW+vHYP0!VPI6d4g%z|d-0>YIgU$eTp9hpp7OLt0r zz6xKsFv=l#War_3PirEz;N`NATGyr0s}rIH1dG=;yEY!UI3$0)r(1DPq8^{UM6^&27(z$0fpO9udI0yV zm~7iryRtEIpZrUD+8Q(YyXw4pZJCN~sqH)O-NC7s-OB@6ckJIoFuH7}&q!=`u24L9 zXX)~_?CA78dB>?@_v3*I&qtuZK$&?Gs-u?ss-MDj+Lq}@j=E(o_7U-ItGqhL{bm=R z(?{CY!6V1R4=xUn6wfzv>d6e@7hfPV`2DfNCzGugU#sGu@7U{{&dgpMHjO;r^&L4~ zIKBARPVr(dLg#Fm8GY2N^Wt;b$k{r@-Q+#KUdke!^ABd|lc|vxhm9lWyAOU4w3Ysv zpuK^Pp#GwO;6z@CXFTg-f|aUvV*8fsf<+uh!hqmUc?L zyKB0hS=;TuQTm8Hz|lE=_-LvqPuTZEY;A7VY~=vmPrZ>mCa?DY<-F1e`zPdm>R-t# zO(E2Oc7q_tQeB(f1Ovbi|=`riODO$f9KWzwtUwAv^V+4 zzn|nhC%FTKHB32Ur08V)1}hro+nv$Yz&Zco+u--D2`g|gUNw%+q8IfrvvfX!opc#z zDIszWCQ~^pxm%6};N3YA0^;#lEHte*xx*s>`JvZz38m6sfIl?4SE>Q>5w%0R=yu`A zg^=-U%o{GaP-59l#E<4*!)zV9vS{o2*maWwmf3s}O%|`wq)91O;lljxK^}9@;gAzNg3GqXV-;U(zYy|4RAIXRP z*r>kyW7%5kpdqR8Ph|SQ?6AOSI>@G0QaPvvJBM9(2i~cWR-tNTs^570~&%q~^ex&bZD1 zwM5k>(m-Ndo4%rc;`wfM&pV@t^WLeB@Wr9od0mVarj_e?8KGGFJekZ6;vxhNR(f~C z=NV7i`5_y4&>~D|*T%W=83)ZSnY1kc+ChrN0*W2FFTZ=4bOG6=0nkU{#PeYpCqyo` zlLK00Y;ZwQBGrV}iueyoMvoU;RoPl-GcJM70F;X;nOR@@T{tZ^7f1g=Az>}ScAM!( z8OQ?wi16B8yI3FB_@_IMMi$OEo5dJ$q2VAy021=G6{lNlZf-|owax?ThahL zycf!5qsA8mB&O!M0mD63g1a^&KIl5Dk@xyr zm(_O1C&XXgzjYpdNGwhb&4gxjPh``68~<#@YElMOgt2FcqOIT;7-WZ*Q& zK6R=v>_Rj}XuR>P92&u`h7mF-`VJjCY_tk;s z!n`ZGtVH$XEUyE-`qw`1e;kWcTWha4P1l|-@Z#a@J6%uU81mQ##YKw4i*C3<-!Dy1 z*Yb3FsJD90cDv}1=Dz2Vy!lN@RfS#?%-9#)z)k1H8E3`?{alC3?=Ypif&W1DVH0xRs+H2{!_fTx zFuH}vW;bq&ZI$?`>j4v3P;u?;v^z64$AFi|6Lk9%E&F7Sx0~-DK9-$%eBru?{+Sl^ z->YxmUl|18{Ex6yuL9L#URgl-$Qg5KjArstO4nMHio_oezWmO4R$d%esrSSeZv|?% z*6wYl0Y_5PChVx3*!QkVxd|9)(=bG7$EdFeXPy-&6buI+vYnrSE9XYzAV$Pz4ca4b z+{v!0#t8sfWMnym9(O(UE}ICPo%$HXPM59WYUk^s)M!|4kTg3;oNY$GeLGfv%6VQ3 z^U6syZpZ_C@DNxK9evHsX*gttq1gt4Eo;1M?PJ*i{EfLx#GvAUPO~FmEwlOgc z=eJ9-p``h#{rT_x*#E!(FDRf200FrC7Cq9ZYE?)DSUc`zeXOeVtU|X-kXI%#m z=bF6kwWfSyrpRT@rA%lu(3!1xB<;3181J{rYJPP1aV0O1%QgGa;gNSO8RYcqw@ap2 z#OjL=@fRZA2jQI*(@D?DHh}`Ew436<#HxOaCNxuu+7E!D>FlOhgu&!d^pfdxY8zzay5I9de#&a%!Ck+ZO*c-TFw+XJzKF!t|v# z*-DNXL)Bb{*Ecr1awB((-nipys#_Ih;Jz3=o|B%a`G%3yHBBlhi^YcE28_+_bwsMF zM-H_Nj{|)6rSmfav}dF9p-u2Yn?!bKWs}PcQwmxucee8FDA^N(Z~bs;pve@!5))pU z?zSQ9HXfBk;A7CuVEtM8hv?I<+kyY{`J@}>k(qybZ=BNVO zIVG6DX0o-Tx;d;u-3tB<7`mDDuH;G`@1dgvbQ}jvhu+CO2O8WSxvshp5-P;alQN|GKHF^= zVr9c>NM@btgeEs)-RIhW-PXn|ZHA3|qYMHPgfZ)5sdLX%1}qx8GBz1s#nK^yOA<5@ zS?M$#rxH?@2gid*LxYrLA8dB#JP1Q^MgedTFt`XQAc)`^QPE^Ncv#s21_lwk2MO)j zl(2*t`74o0fC6gc#1cPt+_hWq8ehhwA!k9j_=}AjR-0UnNT}};-m-jv&?H+z(7ETT z2?{{j9~Eelf`Gm+^qf#3YW7}}*AQ}imxkl+|0Kolb~Bk3KHg6Xv-+4$lN|F8Kq!sT z{5Ug0b%qjQlaR9*11&p=N!TwQQ~^=1!Wi&Wdw)}y6;phL^VzC5iAKBh=9Y+!7M$T{ zG~j`$>uf*Qb!0~{^v9Q(y~CTFllXelI;#Nhdk}H5@0ung3Oh$uAZcWg^Smzze3m%E?&+efoz-%IF}T4|T!SGkLcJm=p&-fiI29gZ8ItImGwV%(Z? z+W7I36iCdkm{}s zXf`H^1TLuph?}#gQXD$XECGh2&D1m&amj2A%shaC?CSut%{Dz|i#30*aBQ9oBu~HM#12S0tHSZVBFc@#r!p@!Nt=*tmzlE-Xzo z7-sitl3kbgFYe*NIC;VPx2qmdU%oR~JNwcF!Jd}Y9EYiS;U9->z7HD^37jP5ZM@-2 zuDVGP}Xy8I}>b9=VQ^<}thK*QFv22YYpFZ>!;re55qzB|nFsbO@s;dSZ_ zX1{~&DSsIICVbAbcc-NQZQWCOGPAQe-pKWOS6W8T*p82K)g_*I?Zd$~uj2r7t1=c> zkP<|-r%&JW-CSi%*7V~iCqf)li)6ULz& zWSyvGm%%2D);)-)-SndF#Bw1NRRVxH7_(EIsC?U@6D*^6q4_oV83W7X}nQWZJHLFs2EVGI-LxT@}^gGkW(GPZ_-{Q3_{y&8RTl zZKp9qnaL-UcFcwrGd|n@k|N1p5I%k$YQ`eSaGvf_$a6G4)#+cZ?5l~8<117Yrc5{! zjkq?KeQywFe-SYPQHoI>?WA?rhyK|rBLFIZa_s3p0=nwAruQ!cS`^ik$Y=1!sX~9Y z#^0SP{9$tO52p&X_vTAeaDSKB`R67VN?Jhk;J-Jy_*WK`<+q9qs%FDGqU<=dtRY1n(HLDC1(p! zA$8#0P#{bOT|O@ho-bItA45f|3kqd6@@kV@ENBkKxM8rg2NBtrk9EC|4M|mav6*gx z4OuvP?zf&tdqHiO36ytz4C$7I10pE@TIllY8>v#^nphZr0Xr9MOY_U75W+ zy&#CR6fOXtSDPhX)?T0Q6Pa{&Bru3dQ(gYK-OS(%GK`2Q=itZp_ftQem?e$3eA~La zJ~Vm2J1?`oZ@}X-mlmJ<^1|wrfUX-<`~I9FNS&Hc>P2v^2aB$l1~Xv?%TfCvmm6pe zX@Pe&sojOph8Z-AbP(oK9H=V~O(OcW;TQsN1fT}+{P8i+wP3M=Wsklk{id8++Hj(}-Cd~fRwOm(_hDXA6g=gA>n3U!@tXzs#pr!y%b z>wAhPX5lm)?$_4Lnba<99(MmrrR8GdWd#qlumlggx0pM#h0&`o%0{!+LNMZZZ4Z;n zti3j40}Pu~XiUT7m^F#!oA)K{@$rZYe5U!=V5vH@ld?d^@j%x!)5DX*>iCykQTJaZ zViZUU7GG#w`KDXY?s19Tu&$)$M0Ixt%tPqQmt)}*{B!C5i`E>00ro6E4Z;xL-wn@Y zHnj?cfd@pU9oWN51u&o(G3L3t6WrGrAS~&W%iZux9k;F%Cff8xvbaJS(_Gei1Fu;6 zWSblmwP*9%zNwR@+I*QV8wHs=6m`(glh=Z<5`WDTb7hL(EhN6h&>TX7zB)Hk%Ps>m zqNq_)8)lWM(PSn^hNqZwaR0z!t4~L8X4?`xsX?KMxHyWp_a7J}gUe|YksK zYKdJRK6V4Z4TBxC>j`BeA*;$^gc!3$w&bE1MX$^E;s^l_$u`3Ml<|CiM{TU>?rYZ5 zmi|tsV6PoXgzNk9e_)M&ZPUnXUw|RwN_c_1H z*xz4I|HK*)BXFy1w1z<1hDHtgrTtb=O-3P*&U*Bw_N_1cd_bx9EJi7zI8SNlJ%~~c zK%v6iG^slA!Gz1;_fXnjo2wu(KLh2D{%Yd8Cg0IBv&P`r21;||QDkGFVO0=L&nQyG zb=U>KsOFARiC_d5ZAtW_QlgJj}1dc<#<{@r&@@NS9?EF88l3 z9LSTQcahN)=aVji(kEJh#RU|wvU`Qzns^~WUSOD z)kUT%w+dpeZxr3*Z5l+_gdxM0Gl4vW_YUl&c9nwH7d-Ha5sMAANsOW{<&DK!_G%At zyN@YZQpGQa1V|oH%r&}}#<4lcC7|?Xn@q$=e=UVkf8WU8{XG;#*gHcANHC#T$*vFy zBna^I!Rc-Cpu@8oJaS!$<6LRKRVO8Cr2*#~uW*{8S{g`Nu?a8dALUBU7GrJL>Dw>X zPE~ScA~Qu4>my~7PC}!Y1gz7Mv0}8&d>^Tz07Vt5saSdtL;+vI!Wg z-~!yAbLIpq1)104b+LV+>kI{ZG~gR2yulGs=$IWvQ36TW*s!vb#ji0j|z8q8nMa zng|sdC_a!9+Pj@THA?U`vAi0twT8QPLlfgZPJU1Q>K6*B_piPtgrbwJm%^y@zv}!1 zP`|Ht;Q;a<>)p#Y(?yoz&fKI%6(QA=E|QRcuYpe1?^R|57kU|;;^*Yu;-GC{_HV#7c3ts;iF-RbCtCAiN$qDv6me2D7bIF_^-Icc%)7<9iRt z;L0z0SJ(uUMTfy_q|=27x|$kKQk1P2LsDFH=66Y2hVSf9@@D3s4s@ays+5Lf7MfNza`ODgA=n<1H5LQtpt#X%>coI6!)$ndF(SB9Yw&|7+eTDNRI-tpbJ!dmhQiTAY7l zN}DE-2ICCBcEo?KD~%C?9F^qF+e~;v{PS?>73M2~DRaMLFPbT5WV`b$>BV+;m1W2G z;w83Og#z1ygJLhIxmmt;p?76rByh>^iDpN|YT1A0RF4%fp*lGaUNFAFj&`! zka*NWI^$uc6gnS}$<+f#fqhozoLec!xn4Q1I$18&*@ed^Tni&bhHL8s@QJPiut?xD zFS%(NP%Gk~P!U%SZ(K0dZ#-FceiINI2-Y|hWN*FpgCAusCTxF&c@y4 zg2kgKs9OxbC=20k)~zl4f!jUTfT+$-i)4@SPU-Gg{PYEQiFp~OAK%xN-=uaIU%JfW zyUf~Ep%g;r+sOabQ261M?^1n6=E)a%H9NbzFXur$0KV}2vMVZ zPRCUC+L@A%`f+E(o@nNS#{(_(RWoh zu0|i(e-$u&!tuN>E?q*XyVhm9)4PORa1zPi`ifZ{g@ia8ZDISQ2P2VPQSY_oPK`a+PSIk=waYrSTR zoxeI{9>V~2`k#P0rqZ}BkP4Vby8Q;!y)Z?n(|MNfqRe;)DB9=7m!htk8YZ#74XweOrl;_1kL+yrUhynrCn)L4GP?~0pBS3JBsL7z5EIc1bKowLMj!Ldn@wO&?s z>3ScH)HMDV5);wI?g)_8$nhxOuPAwb3w z-Jc?C67!r)KR@U92*W(56p;OQ>#%*4QrIFNN86Drv$Qr?v0|Xttw1Aqc65mmWj_y& z;ki;YN4=ug?7F7eeSJ!AEmG}scf!r4B>VmpXxN9?s5ka+Q1~&DDoix@6rRdG6}TPw zb#tUtOULfKC1tzYc@`4zs0GL>owPI8!r4PkWm1Og`TS$Ic48u<7FY9lbQ)5`b!QJ? z5fp+Qq>;EQJa^zq0dciR)(zQ+gCUd?i3FG#f?`{fcT1+Dxv|5MnHG$RjvF^7VsV;q z7N-he6;6H=d?M{)QOShVFvZI>(4vJ>fZtiixKDe%N0`kb1|(RHuu%dWcHEk;jb^n= z?6z^|i(n`lEK-T5bW)ih3D<=|$69XE`SbmQ3|^q(k^lIRDx#S6yKhFnnz%~uehqoY-$Rp`|5$n5+MVty_KpzzY6~oa-Mp@FAl?t;D&;mSY!!+zK5g<^ zR2CiFBx6Bv@*0%dY*DSNrBF~bJe1yDzn&=Zpr1w~saPi;8b|)zc*0I4f}K|2jQ8u^ z(fA#vL~BgTB<+QM5$#B0qJ_* zE|I(LP6Dc6yXn)jrAV)*9l2S&NjBN4{YE}&TCS`yp*O|^@fEfbDR&VBU zo_R_{8Tqcths^A7^G$J>C~&_#XLs_M^$7m1IP`;0c0wykRtD;fLSJ9K7Q`|VY+Q-Q zs<%jUwjzITq0QMK_0IF!!`Tw|ww(&I(=SZ>5!~A}_6q0B{A-sqhFO+Eba51M$>|&| zz@zGnCSQj_*BEduLuqc&EQMp=O4rk7=6QvB%#8oj4&yJnSR|kwU<)w+?FTqF;Bm9~ zdA5CtO0Y@jPXj%*Fj59>s?GI+z9e!LBEAx36gXMLTAyaK+)^V$N{*^fwE`KL4RwcX zA74=%E#Tqh84RoASM${V?sxmvWC&JQaxcu!`@7n<1jK;ppN|6nR<-1>3A-c66`$<} zVD`#b0e|9(7v}_IJW_9fu89mY{|=aZq$S8K;q3Lm3bRBZYgk%%ihldzTzx@mB`hJy z(B*;h!A5lg+HRKxV!1V2;fo$wkU8`I6w+a^0Y9%gSzDfMReN4xb^c+4qV<3$i&e%0 zUEReN5S+44Aze^W^omr|ZFx8-%DmP(gz(X!~6 zU*`y-J!&_g;16A_Uh$L5R~n`Q2(tGwVV6&FSmmd$kVzPmf_TX%2y2Qghn>3ez&m`@ zF(M1?T^xyF{?>f=Fj=bYjF@B0@tKg&-1J3sg884+^7m+CHpkdJ{(ZDx$fVvsG#Ive z!Hqjy8h4|3^l3#4ezfi*^v~Xke-Sl*@uzQ?jAr~JqgkY?m^(22IA-PkEUPO5pTMdE zo1CQ!24wv{7QsxB=&nc(mLwGPxNj+na+y$JGMev}u#MwrmR34Q<;|J>!CF%`z3<^G z{sT~7D6#8>peArx(sg$Z#aTSaJDNh?SY2ChN5W=_E*Ev4i8B`_#v;c>0{e;d5_TF= za@e6X%^En0O}7O;Uut%lEo@R~9GZ{AwL0bDxW0P@qK9L*^SnhwDb!^FkVGvW1S{#Y z#&f~sc($Q7_Vu6}5BJ-HG&W0`Yq8r?Wo*F&&xlsEbW4|i;H5?v2b{Jq* zTOs8XqO|HMS|+}M?@2sVDBGN&s=Asyg3#sCQ-7aPQuNFh+zIOapR}T z^%t-IfB#S7fII*e&g0)cI73Ps6~C~1_|37?N*Vkq4>K>k`478?%iUg4!giw=gY7?d z5BWRcyLlZh#+cnh|9@Nv{H4Km1-K0Fl=u&W?ca9~@36xe|HfeZ;r?yBKMl46syEv! zezUjwINNmaqA0ty@zW23?OZ?b?*`kljr;2ugYEA!Nedg3wcq9aWjuEle)EKrwclI+ zVX!S{{w1ri`MbgPqk+ATojzJ{RwED74%OixfL;CjM?Va8@wE1z<4gbix`9L^xP19O+SQ5;L;NOA(n?XQg7aAWta&~(?lkdc%~Ewmbt*e zhYUA)g=iHb4$|1JWNxk|>1swSNAnLeOx|@7kD?9gbTR-%_VX1KrmaE=kum|Rk#FI0 z7foak3#TRRgd``CA;b1(N5N8 zVPX?bZA^aVlILk{`OLj|*$6#mmz=3n92BCEUslw^;O(l2-9hB+vek$oXnnSfi7C_~APjnX^@89=g7mWjG%U_#uwV!Z5OFci?qwQSd31WZM-aX6Ubxvnj`v7Ml;|S+Y;*WY)(LUz#5&wHDjIZkY#fWs z1L~FDrS(1$*r$w_1bcXyhmeLAE1v!(dBCW~Cc9ddhaS)xtI~nnE9&)&nrb?x*J01@ zSz4I#T{82wwU&58TyCMPH1i1K*qJ?Yh7RrE@1GVWu?6AlMlq>SJx|$NK~XrY^Poq$EPEr6;Kr~C2`#; zp=Vk^m`lZ`Q1qy)D?$iKl~2_O1?jOZ_}_TMawySS*Q>mc>e5Nu2^SNPPIGn>O?o74 z(dN@D-1d?0jv2~=-KekMMesUMD=3(VK|(5xOOGh3iQ&@yRdR6=fV;210rm4SN%1cq z@LSCXvLaQYr_Ys$Szc)=BsPYAQK4qd2BsIZrx1#kBd$MyhKM9i_j?2YRdA;ZA!`N0 zH+(oV!Cnz{-9$VfD*&&Z5d!(u(OmzC0HD}Vu!IM_u^!70)LU(hAWclO z(WRHI>eaGtUXu8z$bHMO2mTf-D#naGI&R&Bma;wk-khebo?1pqt6qgQ71`T@P(Xa* z^oY)UbgALoQUx*oS3gjx-Ys)|gjZO*R=+w2xs@t{^Mccc1_CHhwEHW+kVQICTbP6-t?ViQq9)!Q7QWcuG zN!D%=^DvQ<(yOae-8uPIG+KZ?>!Cn;uEi+@xB9ZyFcfFCguQcEnyx1E3=wB8D(&p_ z@lrojEncd+L=F+_*Py<@HOQ{xeLMPDAlau=b3A(HHF1juw;smZ9K;-=_sBM-mIZIa zXnl;a05G75GG6L%pcBeJXgVq3f->l=f5XG`Esr!TJOp>=o_GB`J{Kc*n0qy+hZ;73 z?F>i1bEuu!=XOY<8wl{0-B5i&q$eN1Nov;4d3zGCT4WJbrZI7gaSteb7sa>0#u?k@ zTAkpA?zo*|7t7k+sWjQ9M@x+iVdjXv-6#1iJ`uG9kbG>gaW4$6amM*F&|N@sn(1zu zRKacZVz+4u@+!Ym2by`$A&XX>&N6Lkisf0Bd6lXm7k5k7fR({BDkFPrUYtl4!>Re5 zDR)(dvo74u#c%~anp*k}T-Fw=tqiw1Ymbwo@k#6&v%xZDm~PigTn+ks8Y2W>tqJqG z!z|T{ubd&k8`is^5I{HMdPkz7&0ag@v-JDq&XEWjr~pU;-n$AJf!gf0D*>u_#+2(q zn07}~K#SGO8oC9nyQ8WK#Lx3Dy?({W?;VA8QGMI2nloz;<`5^5>cna=GL9T>L@LkI z=^va-nDb?nZha*4pnpY&15%O^rIotv;;6reH*~Pph!FCHih+15QM~UE9;dwVb)vM_C>pL~GwT_r%{`gttD{fBY zMhlTL0a^_o_iEIwCgmaP_odzXFUl-*yln2kugaDr5DIK4YDm5FzYe|k!G{YQFgZWf zDrfIl3d&hfw1dRw4IlHW+|FoDWqC^gbkjtOi_~&4Mt(hU_T!2t#qBFMt*&}{m3gX` zYs0|k-G}8~Z)avrKa$lEZ2dJ;bp6qO(k zh%n)ReX~mX_$?Va2xkRG?f?S3*n=)0l<~&wZQMjX4X?C@aH=hHbZ`*bi+hYo>divM z`M^GbSb)2#4-Cg_<9P_9);v~4i~zYJ=TU6K?nSosH)L02G z;YT#wv9Z8b4DvmDOU5p8|9a8oaf2&f1;+IDPLL)S$y6%?bs)d3r)A`wZw;LJB7j?V zUg>B_fJv%Tn$^{;h|pByOPYRx{Xw*NDAIa-;Rz7wauM%WMe`0S3fBBEBX<)b_+>J{9P}O<^&81Q9N?p}tbmL?SPL0+2CjNdf+#`Vwg$tpwUO zUcf}f5If0FIq{BrlFd?*s7Z35Vp2eL63tT%f8|&*N4O|9{8BS*E}gNJLb7T`3J)yV z6eE+g#b|z6aI?p{<)hOxgVNyccyc9k8}4J^FB-GeKE;9^v^rW$UAAyJDbkC zIL-qwA+aGyTvOx*qd0yhlDGv)I)fxXK?0djR1g%6DT*!t#gK_&YC*Bgpx91OAf|i{ zNIsWoK2JbCUuM2QOTOStzVJ!D2vdP5q(I!XKr)~}DzgC6QXo51aP6c3%2cQbDO5Hs zR1GLp&n(nzDZD;YsC`nX%k)Ga^5mxJ6T^Tf#+gq{Tb`KDJh^l7#Db~F3Q}ZaT4Wbc z^0?3Y;_&{F(xrugwmaS&4p3{nzmS`r>m5}8>N z-BJ>Zo+*K!lpvU%CPJPjn?6kqc$%L1G_&Pt_RQ1Vlcz|g(tJp1p=oJRKxs*4X=zJo z`Ali$Nof^RSq-GD&a|u{psXpgtfi%_ZKmwSNm)Bnc_*a&m1%i*KzVOwd4Eg!;7s}O zN%<&K#hX$ji$uj_Kn0+bcB-XfPBYJZtYVR=a(kL~*|c)KpKmpw@&j?@R!ik>K*h(C z%Fo0V`;cdc{pIfio}HAIt39bWJ$cqctOJm#dU4N))FRSEp^7-GtcXfPx`&yBxw@4Y zbL!>tR-l^Uewoy$7jb$un@ri~ch#3(-?~lz z?Zaok+-yaHDpDt6=&yyVO{56`G; z-?=co!Jg?Y8a$j8VaEaI9PW68-@7()dqWOw&TIheMe14huzPGf^S;NMwib7YVSZ6O z3+MfL9xnlS3qWJL|Hzm5b7stS-_PSOaT9J@J2tShMt%V5rW2EZ+afmw#eD zZjgtbT+Gn?aaa8h`SOqEi}$`E=LY_c8S2{BgV_S~{Jn_vKfS9~y8PXI95bEq++O(i zUG=ZuRc8qNk9AQhEdr?F%8IZTSZtbHId1u(clQr&*$Ail5ha6@Y(?RteLrLNPG0F8mqV-o)d=7rH3RR|e#(dp*ZVRTKI;ar1Yp(65u`z8}B7 z``s$k`Ec5yxZ-!KP`4gANk+|g&fn(!zRwaLeyjcX?R^k|@X>Epp^?0mKdeHJRuF2! z$E(RkF2`%>_Lax$*}l8S8^{P@j8$lw%gI(raplQ}^2XhhZH!gu=}z4{m(!0;8Hmi3RTsM8;04+QAN z_iN`LbgvL{PDoNHzSehNWzjl5jSN*-Vfoxqaehxv5+Cc#k$ug(=ka1Th1nXOmXyK9 zvZ~2nM%0G#pwx;j-#nIb85NQQ>h~j2pkTFR&GWwRd5mquLj$Y7-(P~a@5lP!4RE>^ zN{zamMVUvN=d{8j%h_0|WTyV{!+FH@OS~Bma7ZQxT=y1UhOc}$u_tk{s%yRQe_;NO z!?wnL=k~&&Nonq|5%d1|8O5?Z7cxm-V`{+WL*)^h9GY|;0#m4ejC8XgK3nBJCs-K) z4OGd;EMMvobJlc16VIFvK0Mc2yUP_0 zeyBLFW&FvgjTn#qPPUPdBKUe9euZLlv{Jv~)s2r@1neWIR6LHc>rKRo|;AHu1Pro&v}5IE5ZweMBZr6hi= zhS58Ayh^T+GY!(^iX5U`mVXXd3WYkVOX>>AuUmz%Tf7zl>MRV=WL~>$5{XN*tmmId z13lwgfC>P`3!&F9sFecIP>uJtczxiTcOg_?R^+b@O7li0#D0yJTMLpJz2OCmD#~QR zKcL{3tM)D+yDGbegeH=aa%Y6AIz}{?9x^yxddYXoi8U{@|9O5Yf$Ty=Dk5#iozL14}MbD5mAG1)A|vz8+BnLjs)!2F+ddm zj@p%+caHu1#RIzU1nng(Z!R8s>826e#Mj;32s_Mm!pk{)_cQe#g*j5n!Y2Epxc>qa z?W1JsOZsmgDZTCc!w3AIG81CB2y>hM9*SbF?tU4H{;??hHx&J_S@7#n^nb}u#XA7A z=fpZ2AC})$uOoXE#y z*dFjpan}|029_m{$)vHo5rQ*Kwn2U^?hl@WY_!HeGZNUKR$d$3;@7_x_rXX!$lEY= z8R zWkxY9%+t;Q`A`t2ROZAFK^n2)WFVApso+OqQ7l4hiU$=v28+SMR=OvpQ`}{M>*fe7 zc?nO?i4&q)Y~ zAVJ|pl+aWN|9*I-CR<5d`FGh$mb8{5yv%k8mf{MSWc(Mxe$us56wy=o(~t{ioX~T! zw^*>IhuB&05BExiV?q|yt7|?yxIKN>VW$demqti_u`GQHP-0LMhz}!N+y_`heaQ?{!}R#Ij|VpBUV(&%#o3`*&i7R-lp=+ns$Yj^8S z#N6gz<6|7S>prL~4bk+mg4uOk>m{Bqo$+QvIOzq0;=^vfL)4&Nj=ES$K{AKix3p#8 z$gR;UPfpaYY#fSA7@ab5dNHz`@GO0b_t27}OKb9_s||1Z9=OhvK!a0AQSs#QD>nyE zL0)W%m0vz@?Rjp3z5P%k=M}X#3vgqO6S@&+mX={9pRBK7T(JB2YF85v15aA!K)=Md zg`&EqW{$M9DJhZ-OoCz^p4K%d^~VxC14xm{$V5!dN(#3~Yn17fMDvUSku$w5h};79 z)NPf*m9U-FHra=M%E6y~4}5*dQ)0f3kkf8Gr*U(u%g^Rf-k|DSaIzdox_ne4yV_$w z&-XRn`&Z8^Ev8LI6v2t#=}k_FU+5ZW6@f9k_a6gbeNmT#<*zTWWB~#55C2Zre3n-_ zJPPvxKI3((Ao*E9S*Qa z)dRn^j_Cwgax!f2CR=}IjwOc%HiPM}I0Gzd24QY2ZySCnO&8t|0I>0fSZ_%?JS*M{ z>QoIRG|N2)rpCT-{M^`|^F`xNx+Zx22Fa&7Yq@jhz@WX4zjVP_ZhS{ir&pImxuphL z9DS&>DmIBOZTqTWYjuIXcs94k?_G?0Cf)Og?JM|VxssKW!SnJHKjkX5B!0sTE%)8L7rrieCqamd6=qrmXiGY)30*CEnge ziXZ)yyUy#!%Oz;32k}^#Cc@cOyaLB0biq;@vvKCW=_iBOB+c8%;L~bbOwK%P%|}Qf z0MX|*0YF)RMI`dZ*|}W5+Z_Uu0sMdtT-t8mG@@wM_k-OxPWiX6ueAh<4#ebp_KoYp zD1jV*#XAcn$=YJ?^t+w_<{ep7P!zW(qK^CG=`-m)`QtM>XC1!;C>}q1lM^tHn@Ik= z@LT5Cx!6x#7Nqj1UEJOCvkOk7^I^i3;G<9X_Rqs!Z0Ub_*rokt=|YED*^t_CWkazhdZSL;!uXN460EU?6Ctag+AIsVbHdhS_th4!xuK5Xo zgS&Wj=s+sSP|2a}C|UK{J@p{5`%B-uw0r}S^bRY9Fr6G-QzczK!l4%O@|$30a0NXu zSj*H%XY}Z6q`BJV7aD14lv|u?MvNNyjQhyL!PHOdLtgb?R?)uM3;>*bTQdDqK>N=c z`3nH1{{+BRGi(3z0Qg+wefx>B{~iGT0ow%6w5|0!U))_E@~z(87`ggejXbnRo5{ZT z&j5I#Fzb$>^TaZp&csv1#DQ*N5u-)}t&3Rpk>RT+5z?DY9ERJ)#Q%O2fed%C4rzhy zTO3@K2%^&&)+vlAf+iJN{%mraqW($(h9Rb@K)>Gax{~P9RU-V1{Q6+{N|HN6Ow*2j zqmj4rYZL)XYmCG^x?TTcnv5mBRB8pqo(seuQhe941?E@476lOF?`6k{2q>J9|DLv# z*9LQ9AxHU5{w;H?OJ3f*<@ zwy@FkekA)rl|@acq~~IcX8x#Zh2dMg@Oc|N7huxcgr}I!eR{t`9&4C_P$iRP0#8r@ z@F>WzscGLeleXez$xCsqRPc_7Y5i~yRs@9sidL8&(!<0k z3UC1w{0>zYv4IpaO8XpD&{x9?($XEF1PsEYk^;oKtw|5k;>?}|5>76(fajrT$KJrV zT3(zLqlWu!pXp28(lE_^vA}^y4P;x0&w`!}RY=|r;=L_pDn(ytm+Kz}xpZNn1lqi! z_>{wg?aS&r#qqX~d4be4nyff9gRB+r{i84_g3#u`wG zM^W<5OJ4AM3XwdJkrA9bR1^J_kzkZXYV@Q ze>-*`!2S&Kt|eR`{bVe9vYsAlDxt++pnd%G#ug$sYY!_yicxQV+8ryC_VwrA}SQp!BdctUI@|8IXQpoJ{aCWTY z3DA#Wal0u4*Yy4t`O6Z4v+Zze&aW3v06yTEpFL|oS&U2n1pO7@YBJbsRz@b`@3RlT z7jF(t>q-oQL1P2|xp-fKw*BhDA(l@nE^f`x_%kq6;yeq(!?yBWDcYf+ZELx?k;mev z^!|xI`nJ>W>@|w()K6QPUjXijTqKa|GWF^2?DeSljf3Nj(X8AICG6pLMSdY($ADh| zu6F`>*KMpTY~e;v5fb!z|4);~N{2r@=%eJC{_M4!@S%bzrWG%6+|o7;YKv23I@i&j zh8jvR{-bz{_*703B(5w>#z6_nQw$|6dP-ICc4djT@`Ht` zq{DBNr<1h?{HD>oBbC#so*m7<74I36sqopEG_;~9)WA}Ek)eb&Q<=?lbUHhm<>FIS zZbb`N^d_C;$G2o(gsj?XbdaiUr~2`gHRT2V>`emla(y)EZpirpsR0j6o{5r7G3W*}t69$gsC0)wU?!UHYwfKcbi&`1-it>3?}( z=>J>x`u^H`#(d*{hrRx%;{AV;z5cy;pRPN)ZBx=3>DK}ljB*N(H?h%KK0UxIEHe^_ zIY2mcO^ zb!X;=$^-QTl}*m*vdYZRCSU_fV6i(CalxIq!wp!t1s*OkxLTnnxGR zu9pb~ULmz|g~Q`?T6Et;Q7OF6jx8{B&}&h&N2>qQ8?g(A%}%sofKD`N(ZIU4CS?R? zXke?D?qsSF#vSx2-V1!+oz0ho6Ho-d)-H2r+l(#Zq0^vyl-f9OV2iLNO-2tI&v%Ig zkxEC;?!Q05XNRAV{_zQCz^~4F0#9X+i8fcGW33e04g>5l1khJ!izf6WyHit)p!~ZI z@q4Y3`8in4krkpw4NlUl4k0VgOaR}z$ll^^4rZI8TWQtdBufrs<$`KB3_O^5sZ2P- z#k0Lp4e0{1UhjFD5UJuco1X1bcuG@!R9B^;?f;rs63pI7C7{?=_#`II zr0A_z?%vk7Pyfu8K_$uR`E|T^7<0cxhu#*Bok+c=ixZe3tAK+Sj8)*ZH&O*#VBc!3 zbVgWtbptsJ#}eW8a67ImN9VQWf!&dr%@5w^H*4psN(an+9XdIToSoXts4@u|W$`mT*PtI zZ>kPa9cu$B5a?bwSMG7h511g|(B6|8Jt=2sgpT1bNl5@k^QhaoGwr=gjcM|Pu9MK0 z^&awGH5KbinkxVvc!i=$^lgPH;}|H3@Y&^v<)wHWz{gaZ4YC2p?8~hx4HcoU$=k2e0eb2G%n!zYokJnl45@4lh+1gOvD=4r*7#4_Ih?tDcwt zd_vlSZ{Pj(TIaT}GIp9HCt@}p@4Nl&N^kwx=BsCFhqjiZK@AfewzL-UJ!<@aog-v4 zyO@~z{^s6VrR~el&b|M$IYP$Eqgdj>BO*K$;;Xfaf65V}|A#q3QL7ouQnVZnseuW? z-Veq-KqQ()HyYp)%^n&J%!%fzi{=@M=A%V}MX>z3SV0%8a41$J2P;;Gl^DWG(XddF z82&oO-$v0VK*~8W`|DyID5q#tl7e2viL}BrC2fYBsy+jLM3kA&v^42$TKd(!= zpqtpSAaFetv`YbP7lGc#Cx0Ld492Gn52XarQZyj(_t2@A{8A4E#fOqpRVP!!fW-YW z#AP&5;S6yzo+wvL{6Qu{Cy8_*NkTn^<#;S$3&L?cj&l*hZ4t*i4DqKWf-GVo&ml3o ziNeq0_EkZI^x`CQA=1xNWx^oRRcRMp(nZwMRjSetbfh;dq$_e}Tv5&tMr0hJpUn`$ zW<03O(CNtVS;&BKW_l`TYFcET@yj$%$UIq@d8i}v#6qSlXV!7$EGdgDC%-JVge<$t zEWVDcQ=0P3`t|%KX87(+W`E*`9b*pFeF7LN?80pK=+4vpTdetvL{%o>*l{i zU;Km(e+|(6)g1n}0lE~l|Nn*FC?;Mef5Ha8sRZRz5tBsKrwrKO`;~b*x%)hFI;CYe zemd1~<9E()hIBFv`4cu&&A@q7|AYa8tn#+|c5jK{f#o3&QgfLQ;*X^)u zy)$a%{_mAEep&hrHblwpXySn9pbF&25?r+eofgJ8U@n9)d?5?4d!+E3 zHTSI=UPH_)7M{O@i-VgeJ7DUbk5Fx&a1CpK^;w?3!_?9FDd zUetHkWFe`El3t|AiLj$P z5a3P$c8sAvFdjL)^bkq<#4>p_@GU)`{*?!D<;HcA-e;~?>(~|l?l~R;Nb8w3%k2BP zQRt_cI&l&mXZtiSBS4Rw(O?KRbgxuJKZ$*$LK=wU2C|BVa<|nD2$Gu1pRw0kQQoo# z@7WsAq`U%W+8V7B7p~=$Rx1bOL4jZH)3eE9{q$Dq4BLIXI`LZiqqr9@_Ib(e`*cbb zBkm3?;mx+K{?a{sfbRKL%*#4`rPbKHdV=C1)}fI7Ezta(Q+6#?A>gt%GEVPt?JFkX zZfg*}2tlcRb@=sB=)V5bNfDdcnxZ5b>3(R^&SoPx@o({?U(QPN_@Z0IK2rCe%lX7x ztMx>3n;O@k!VRXc;8N?nNirG@WC%U&{&{sTF4o0UHcjq&w*c<(q~jjQ2k~hL|B^x2W24uch?f>)We^OuUtFj?b;vn&hH^M=MSCby5iWYsKJ{N>e4vcZsfRB_iFGW;&hk zQp49up}bXSbIbSXd240LN2@HIx9zYL5bxoXDr;ZMzPyiXw{^R#Y_4|p6>P6nz*7cinmKy0lvkJj`3a z=Xdn3>tN@=(--Ua15@t0&07w>__+QcwEOO<&CbDB+eaR($X1fRA%$>khqRI%2eHJ!t;5~S8bdoskkFKLp&Aa#!ZqpvWbxH9zW(Xq7_P*oG1;{V$ncSi__`l_Xg@3m+P#|h zeN8H@^BID?8)qnM2asCO*@xd<#>IRhMviz&Y|dg2(uzU!{WQd8V2#lmFlXdYY0JI> z@R@al$8}xZHrbvUoLce}P@&G@@8yfe#Kda-SNWP~6jx-}uKX>V;2+JDOB|=(-L$X} zICH`!)v%nriC|^Z;(`PV1vB11wM~rjb*|?ETLqtLAITe8%kAsPGx_9C zMgG;hnb@lwojdWuR;}giRmZFypZ(FYvd+$6fNmL`0DrJKl8oZjKN!y^d*;m_$xUZ* zqxu)!O12AYqB4cXPSnke`(SUd4VZB`LF@aPeq+7ry2t1m=5S=7vZ<=qUez(+#iG?% zb9$P_g0uUVfj57!USY%&JTEL7orBlnLVJE^y>2yfenX|^Lutul-DpI-L9Pay?p%}>y@HA$6&qwDqnxFUR5*c*8h(6n!Vw6WpOs! zouD$8`E95^^NTr`P{iT*-pL-9s#Gqh2AEEAk&FGjyzTq*iWa@woUpV(5{pGf2 z5|8;Br4>e42>#ZvFebnS)&{mA#daxQE~YK|Oq)Ugt5h^>cJFQ=kv4*})pMp|0@XGh zpHW{Mo4C~oFUlvcwChZ*9tE-KyeGbfR#=y|77t+n{HSDHO{Z4xP_NLzRd7ewk|9YEf@Q|E?&RDd0Xs^MBU{CKjR%vX)Xm^42>1d+4 z&A3q}@}M62*YD;9Ln8@cQu^b&X^XS{sm$l(HziXGO=9-Aw6!WJg*qX$Z$}~)K)3i0 z>?-QZFYvxi28U?bw9WS(jh8bDh!3IlO5Tvd05Cfx7e=8^YIE)WQ?~K}R6JS#u-iVf{N?oN$rmsNPvCwr{3UqtTC1 zcWqTnqMo{c&+)8YK54I!O7&~&rJ8CbgqcpY_bwx@T1Gs6X*T>2-lQ2JsFzV)Gap5* zp<%jmPQ!Zl_!SP?&$xLG_2VV%42Q?QG)nn84Et7p9}l$m0S>(GeRev@h5uQKSFcu! zh0c{tzvjA{zTPn(CS{0W#p83m30hVcB414!sknL_y9k4f=w2VQm0qf_9bYcm1XPa0 zetmgG%!io|Fe(3a=rCR$U`2*;=uE;Rz;u-mbgfKVlE49ft@xFxH$)Z#55SgPnR&X9 z!8VTLkep=_|H`ObIe^%eEx583Oz(?`{ylZ_UH{@2ey)Sat1g!1 zt&wTL((SBEMFx}?d{@2Go3&yVrat=G~59G4-VLy&EBc@r?`?2!;#*xAPytHGxpE~^W|@xV}Q{J{-=?mb=3E{K^Sz?0es$v3w)`Ex zl4{7EtYVUxr4RDg1&?5Q$Yjf0{?~#-5fJvj8%X}+9Lv9m2YuRqH;{bv#~e$7?{6T< zX6g1%14*M?lYdjt{t1$}viAScTbT`Y|Fg<4CIe#nzkMFH3hHP)AHm9fPy_*;XpWyw z_gqk`^?a5@M@9Z=AjzJ1;s26+KoxOAvxkpkp%x`GM5rRUsNG;wOD`SP(#6yUQKP)G z&2oxic)FetE`F`M9i0F%4j`ydGBhOKAt8g*R$`uT?XaS;(>AV$m*E?Xl;0UzokfK! ze)LJ-ljpDC^_}q~8pp{pbP{G`rFc-xXM4Z$8>c!e4}1I9_LhYb?o_7jS@SD_QX=S# z*-7fc@iJH_nTZWlaCA?dRVi)Ly;i;8c@{0 zqfahG&Jd;41$nqzphw=9!HO$uzDSIYor6fp00p~XD`We4Dh&P7K$ViBJ0O_FDFY=? z{o3?bvyzYJJUzxnNU&S09$1vL!MS$vBP z;tB}Qr^5OA?&=tQZ8e^kbr{?#6uwhx?K`HW*a@?ok<^$5ob<%-?AJZt#>*~rRC|GI zSb(!1BJ_@|doNg!^DAq&+Yh+^sZ}!E7f}7=AbCa(Lw>O=1)%^Gsv8hJn*C4UzkY&+ z%&N?)Ob7n@1dCu%Y(GCi(nRyM9{%DpgEb{6EXPaEhb;2=M7PavCne5;B@4tgq9yb^ z6Cmvy2jl{L~4BuW~dFYzugqc-u&!=0?Qk1^g(ndmDmA?{V$9Cw?q# z^>eJMv{q_H)0FMy)19ulKG$ErgDK}#o2RWjY8RSweadRV8}T;tXP=Ypg}@YU!tDuG z{2Rn&X8d_nB(suYD1ZymmT^FPEPHsWp$Abls!XnyUg$fu0tvz8L0=8loTj(>majJr z)h(YnvqLMKsi^a8&9JW{>-JZJrmkEqJuDPfZaXb7(0!6TNDWYG2rB348uWh5J?(e} z_c?2UMyUkHcRfvx(tZ!dYt^K|KiqoPK?;E;SS}6Zy2ud$hL7Ae)GZ_q$ z*Dkt(Mr}?oLMT}g5KH&q$?5j%tW^W-9_tQ^4&NfHh&?R%EPKJ#q57RO-dJ~-v zeHFF94_Rt{=$$O3zBVF$JiW{yOEUt9e_x7*NEs)=Yh~`9Zp@RE%G*O7KR-4B&4AdA>T^S8o! zhkf$=1hUW=Jw~bl8<^#+dQPC?ZibaV-7hWGhr07fFo4*lOVJT5Mx_X*> zim?j4W0gO41K$HPQDsAM)4>+$##+Mt+(h0i3yiF#pjZ19mk+dT-+Ul0)np7K*C`ez z+z-q~ueT;Sv6cG(uGs8NA9rVc{(R8$!qJ&wI9ob+YX}5xb<+ zrrvw*P{Z8h@kB?Ycen)GG;YDh(De! zDP7!q1pSb$cf6IzlcoC@Qra$r9tDgsjxk9otT~Uc%us1OuQX|U_e4i}(S3qpB6Lf~ z!5yQlUA;Cn>@bmx)dpxMt}Tx|k3oe3)I3(-8Qcc#eia>>9?y^Z!Tw!S*>*rw3SIY6 zrku}#d5toJM=q0qPgDBvd@rEH;|<-n z?^!Nah44M>f_wEJA8Xa1hHShZNeHLYdjjjJL3Fc>sGLK);RM<}`t-_JG0ccwv{UL@ zSX_opS&PFUS|sdl2(Qa%xg(#EDkgB8obndO=|gASu7D5HKnAaNSIJkYUUg7oqG{IY zD+{VJXacBwI$uz8!ZbIA{Wa;Nu04++@=(fIU2tSi*edaozof7X;xfu)94@_qC(6=k zR9ldtmPfDa&VyLzWm_FC&QFWhO5}yDY-_!Iwb!*O_cw>HA3ms$)8)JHEI)Y!5=y+eIqGcm(z__ zQ`#P-&Q5UMOp`H0DA0!&f=qk$qg3dett~Y`q5Wu*pel>TT%#!~V=UDdJ=hqu)pP3y z$GOxSF*3_rJ6CTPy&|g1ZY5`4EHa9*7Z+A(X~i_wu(9PTKL?*jNO^{t>YpI>f2?Bf zJBzh)8Zg4MW=$N4uNe_~==x^yi%#9NQAGIs%yc_3XgY55+so;Si$Oyc8V7Y62CGAc zVvS9SJ%*&b&&{V#>x>9Y#V@5g?XA~JVx`oyYBG#ey-pJBp?`zP2-uQyNxfk*fjT&*lDb44B3x@E*=x%JKegcU;qN=`io0x#?lt)%_tfP($Z*O(4k&9!e8f<23aJ5j_LTHCt$a1LgtbzMaDJCO@fg4r9U?fX1#R7eN#nyLiyHAk>q z+?C^c0yWfPO|VZ`>pgW;isgJ+7>7VghaBrBA{JGH5=%?)X%Y&3aCf|rg-F%a{8kMQ z^W-s>9CC0!(@GcdAMQjZHN0kB<W--I7|zvkh9(%c5j7v2g;Hdhxlr* z7)uq#EWmsUO;Sbhn`S|Dz>l3l(?z0kjUBpMbzAD3Ui4pxe(fWy0V1j>+?M&p8J%_N8AtVWi#9R^7GlX zrNCQ6XT^jM);#O-9uMrjOews~q##V>E5=b!Gw!RE;_<>rmj*}+PKqA(=kE{rL@%C=q zU#rlR>ps`#`E*|zUgCxAx7Q*?i;ndXjknhx-AoeQKhT?zv{;-D2Mb$J2*Z%aUKzjy zXS^5e$XSa0Lu(Q*Nvl zleHRDGf~5I{Ae)w9n?4$a9wpR&5XZ3Rbr(IujIu}ielX9AbzFo5TkFHgN{}2$E8G# z0(Pd=NnHFm^#?=S9Ck_$1+<;v^p!)r_s+4l@AhT+6czreCzPe&7<; z&8={V83$^Aj-#EO@8uc3@ohx#a&tpHA1r?pR3? zJ;aT2@`cPXA|0{2nW3l8?~z#CL8zrxP{Ph@wqkkb#}puJ<8e-D>c=bwYp8R<$Kzsj zH`}Ps_${4LqcY~FH#0vT?*K{Up@?pXB&Dg<>)(Z_Xi)d7p=d>RU*XSAeYtO0 z26V&=Z$%t{7`j;dA4DZM8hnFwKlns~^^XL$K@3f0tC4#}r2t0S-%ZDfO%9A~t#&}aLG`#xhA@5CD(=wO?zB6g}CV(`+t z_aq)6!W4hqz%Q6J7~PSdr_KA{Qoumc#xX#Mm=+^Cs3ne<%{X7AcEXeZIdt%z---t+ zg+pAbpA7M`NS8f&+jJ4*ZFPfhw*#-te@fPVY~p+}y1Ok|+&hF2Kwgw4iG*yg-(9#^ zlyYKwEl9#;W&~CRN#ZhGC62#OGzVHriluexMGC#-gbZUkd9aQfmyZsGxD%sZ$ZnV3 zHf1N($L&|l3z`9DR-LnE(qnK{`4Jlw*)=80H)*nlvMWh$wbtBfWNoWdm(-rR?(J7* z4OeXoU-oa_9&_jnzti+lf|cbjql~A_2xcKh2nZ9ftG8Spnkubq6kX0i|*tfDMc=^;D?B0%$AwSXyc9X_6xAunf zEl-T+n?+Vv$^e6ai;!4Ur${xHBe)N8GJE+$u!Bpxee;xYsVePIzq28!<*+mR!}FhK z5er>ze1pc$`ep~f(vJLC`LLN?-gIr3e^ARhP<=Z3ug8AKY{e|V#LIYW!f%g_fxcoq zHb3LB&zKOvD|(p=hh$A-_Vy-&ec$H3CeY`K1#SRS(!t^1)_*MY=JLzvc#(CdrY)aG zZmMp7x-uQ#D`%AWc<95$Sya^01;A{xV^VR@cCxHssBL0~W=hQN+fCcTSNRTAuO9gl z3y(i!RZw&s?!D_A$F|&&#qIoS&dhyg8)kkcF2Rajv+T|VF3?p5x=Yr=DBKAz36+x9ry3KaV0*2>ZS0_vJ8Q+(jPykr zwQ1TdYYPXD#!DM(>(dsi<1Zu8SNV^rtc*35?+2Oe?gRg|9%and%zVH7=;@z7s?)($ zAGHbCFStPbm;$yR7`3Wn^b!@|sY8_XZ&I%k$L^6A?Bvs5M$+sfC1@b~B=iog*zq~P z%>J;QwXm|Pz!Kx@yI1bG=?2MmXL7Siq4FFT-`(a4+vENvzOIPbl$noifgAxONp8X_|+;wCiRt!5~=Zy>BVK4hKb2H z$u;9yvisW?O2bGQDYCvjiy866GiB%v8t==v>dNNG-8HrIpB*P&Uc=_{_(?RQiPIiXFbQ?gn4=+Cm;^34T@QjlD|Jk-M3-s5gidS07d4 zIR(h)1iP}cV;T95Yqx?0?!tU!!tR-ht)EX5t``CI_nBCg25*a9LB9UJ2#WwNFN$|V z)|#(X+D)I341aQNfa7$%newNQr9ob{9$}GRmii|)mt~S+{HMQL$ zjWJ+3?KK@+kqRUCvXQ#tG9EFDmI;f@P|68eRk&B>Li5A&6cOVGXpgHGXW&r_)R7$K znfP8QR18-v%i}l@dec8${)v2BudHDSJ1R#0j-#@6==zt6es@^`BkXtO_X`70f~I?J z)w7@DbChyjTAb#3H!f9R9!}K_J}1GTWm&ktwO<^0pq{q3arfH3cw$l7(#j+Aux|rz zvz1m$Phh}9a@vpce;l}a?tK~EX8e=>voG*7hS%{MjFTAs0X%tT<+$nE%z!qIE+9Bn zICid4k9%K_oXe%bx91BwoDH_oR{@uK@fTj!Z!(6At~x)HHKrMEj&5zo7*v_sH53}d7&bHbFnCNJ+EqAZ891e?cr`Rfn6y0#t3i!>m? z7L=LsX4Daf9-$lGi%@!G#S?eov!}e2G>vAs&oy4%p6=#d-4jSKyF&25)wq%c?$(!p zy+G!>69)pGjZb;LhMgUog2=050*KEZ*$3n9qw?jxnk|QRwd(Eb%>!m@cR}u(hnID+ zs_xDho&%}n?10{iZvCcZIm`juhAzn&Z${%R#WKTZh??eWO|7CEj@>H8e&eORT@qV; z3<4|Tymraiafiq6#GeP_h8)Rhnwz)$2Cv&b5pSM0ZAQ_~UnXL8Y(o2%mhN91PFh_E zosit}v>LK^F#a-2^qc`H$w)LKZWr`?ZO{aMfHS#;{E~MI8v*W6gF2v4m5@M z&OCbf`J0m#NZ8!;J~wP<;$(x_9xxaQywSO{xhi?NG>*3tL^MKgCTsG2u#W0Y8{J)) zZrBn(`|$z%UxDfVUs)f%OIwkLaU?>bi% zM{Npo9WV%#Xntk<_N!y!XX%z0`#XDYoeu|1k~EABtSg>FjYCXK;;#Tz*lh}EFBXGd zhEE#A5Wz}qS3w`1e7vBbBT8>guNn0FUnkS@&jxqbf6G6 ziI!=F7*PXxVffL7lvKm-U{2CDT(J;Fm6yeXP(5deMiw7|`7pIhF(_-4wuX1sVllE) z%o5M!4MI^T%8hp1vd3Ix@MO?6pY>c@14jlPBFjEG1WSMaWqTBlZ$;U$L1}(n7+*?wX`fQiE}!y zf66NG`@}WTEpJsf)(tyXE~1%%D3s|PS+1T`qIuKw<~V+Y2#~Dex`q|lJ5FN z-0lM&ylVt?1)t6Kxn(CpCNnCSc5HmXdO7!NSuc2sBP#18>cLDOZv1v+X-)3XCnZ4z zL z^OzEUMY|ivp(J{qJP8vQ4k?BQ&#ZzH#J1#;IW=?<=E_ftSV4wk7*;zILfsrlN_E{G z$7MX?-o}gR1jiuiDSiU_I+YCEifZa_%|0y!aj7ce4Y47(ztFa1E!cMB^?) zjdoGT8H-rS6u_!&zUbalSJ=)_MsM-fP%H6DQu?zzW0ILz)%8tB|Elr_zc!Rg{fUr`Y1~u)fqdA z83;-rKzONOa^TYJRTvc)Q%3v3kZo!;u7}(x$11*SkbP*pF^KNId%%_Y&WHqBQR6Ld zdDDpgvA5!2P*d0x^bjBm5s`cEOz>D8D_tdzH9cWj<~%5pP;~q{-)iE^!RkWeCO|78 z08&+pc%3Awbkb!;>{J&{hr2-oV}x;w}%#bZ-!#Yb9907D8?pvp}O`yI3aqbSKcz@kwf6rlcyx8CK-y-0SaNy1KJ%HVJtp^z!q z>st97LqZMO0lC0gHsV9o(zcPmz1Z&JxshD@LBbap@mJGjZB3ahdu?|fP}Jf3m-1dU$+{nnTAKucD}01OuK^;NdrM^bzm>F zoo2oSR<;J+EUWzh&R>_VV3I9UFPX4Hj5~@&1HLov5V^K7a74=iB!}?BVAkHR+ZE+S zDYtCH%oxi!4!h3!Zts!v+jC-1Y}+?*pFjn}F;-c7^BA}l0QSADt}<-Ov) z$X`3%4(OA_TIVh_mS$dmB}ZF0J=w*xyv2PakPk*t9o9pr)g|)|#HhDk@Ov8vH+Uqv z{e@lr*~R{EkymHbsL3xGz`py(*L$0dnyizY0%h{q^TQ>UdpA983t@0YAN({0=@fbx zyN4guea~ouMHZ?|->mSOc!J$5TZ?X)wmUvh#?vo!DWR>YFo@cG#a_OhcP&)_CHQVU zs^+Y^4WljPfx}~x1qwa;yD0B)5XN;xUz+gR<~rbr3+AADrY_V=wje|r8Q$iHN#qH0 zz{pFE3Rs+C`8or#qVCAy&p!)ubm3;d-^XwY^3%Z>`|9}|HvKc1aB!aO zCbeV`eq$7hx%e@SGxi{{yfw?VTIe0nJND}(_B4)_!|kwrEm!11SQu{B1?Mk8+Z^UodxFGwmR7|D4f%9{coA+Hp+;oQAwA|jqe z1(zfp!gqjXC?b|2m5mQFB{p5#FCE!d(H?h0Ry=qXPg@x^xHjFhTc0)8Mah?5UccFy zX46=~t}yoL#S<9}R$KN=_kQ*H(pqGYzGKqUpDMs$hFnGaf@Q7qSCge9)MTTV(O*CE zjHEdbbMS>@xcQ!wCtD)v#g9AyWTERsf!6v-2JwAuSL4}w`!@nZeo{tLX7P4gXag)~ zE!yEZ4qXpBLtm{hXA_iq5y(1cN-Bg~yb9COI4E}iW*j`6O4H#x{OW(O_a077wf(oZ zgqBcDLT}QMCLN?mNJ0k{0-_+GbOAx6NtfPRf>f2>ZGeiRAkwQyS3m)&5~LV_x=3lJu~M$^Uj=^^A}_%d*|BM^)edG8RSNUwj+wBkq8BOm$f~i@{|B(|&c*PD$Trgf-)<2V>gY zpv~c}Xyyn?FMVreJcDqr515Gsf(%=1H}|J}bb<=4prYM@6xVRd_v4{p6|j!20jFKs znz($8tKw*W6q&}afJtPw|FocWNbtYxBQab0Ii1l&{Le1d<3H$`6B2)(oI;2+rxlM4 zAW>kUxh>}ppBv*i6iDguuzVONnZI$X;`stPf#voIVB?k8opM^qdwWSY1}a9G*PNV? zCZu5(R5dOpU(;0*svhk9u5LYzEmCe+_%42HS|_-Mj_ZMhk6oKS8RxymnI%viM(9j^ zC3@vq=WT@S!NZl;GdkSl0I>V70X}h~)gNpNk}KbIBK9)6PiQVp&DIk!9ZJWO5+OwD zc(8TdS9mPxbdQRC8}5B*?x%an0zndc=CwwwR^}pYJkgglofh$uW5E=kUf`ZkJQRfxJ^Y5$t%W z`B(d(n7h*P(2ESkbu)WdVFBT z-WuRl<`|7nR?o|1H&dzF#um!3^;}j>Q!j&G*LlZ#zvZya zDVvk6T~r(T=d1azr{GV=AUcvrICU%;wM0Ws8Ro{r1ho@6_$()Lp3gU>@E~qbK}>~p z?hy}*=$8gcweLw}TD+~O5vb4NMXRODYjul*YY)VB=SR2 zb=1ef{^no{n(xe&{yVQDoL3~2H{0se+ z)-JFY*=<}{8sXD9&&jb|KnUwiwa!w$&X1i~q&A#YyKV-KjAAT}bDBOjidsUx3QVpT`Z%u;*z9B0h(PL`U8>QSa)T~aq3}oSMnD6qT57d{jNM$g%jM0}$ zm^{;t@uyVMEYXT3$o}?;_M=F`2Do#@3}%!1@kc_XsI%>!F}zDh^B=P)7q5In%2mqd z`Sc$NeTxOcPwe$v32`PuUoNs$Xj|4UO)14q_;u!kmO(SwEG^?{jt9+uUybE~Kl#5E zLGbMl`4K?Mvsrw#!21};QBj(JmT^-sOyQ*OIA$IhmT-edb$D*5F_c%p+czg|l!0H* zJ(;rt>eL%8sjA5${~|ui>u}rdU4pFWhh&|^+ zl}WoOBYC!1E|u;&J!i^+afK=jpHEC_B>CJ{NYS}7oIxQ0Jw8IB1^NnaYHM-?~?f00p~3UjN`TJ zG-2rR=Fh(e%tTcbs=oDR37kJTHrygY4?O0cis8Q*a9j3oP=2{B_JNmNdpPfGTyU|1 zbxK^|%}*O3L!Xsn z;|+O}%LA-yo9328JqJ{BPfvx-3z8r91LV?L z({r3iKU&_WJ^uH-fu99Q|0<}7(2=L@ytpr9HrGKmc;WT%ZL`OxDClHkuBy|==_Y2s zcGk9A0UMMQ3T}bCr74(;Ql=*d?gfcOlinAwUwiHfk10~w!YUIA=Rby~U-N}JO2N!{ zZIrDy23XfuyG+$glzgKN_X4_x4L2PtG2Umd#$5`@X#tKWL@ib<(l$_bz3=H}CwI>u7zz7FWjR?WfaCzT)6*`ZuyyTOmKWu1|iNwC(&? z=Y~_py0qJL-~W-~t56_mA)v&7bSTuvTe>R7q(LrBk7re`#OWgUz>s_VhJ7<_aI^BZ z(sZ7QVr{V5E`|y!)je#CwD17z(RznlWo#*{@~Il;Sy$7L+KlaO$TUEHM|z$Wch(8c zRmDkzCLEy13kUm4+TgPN0VOgAopW)LX*3QB%DXg(NW4M^D5S8`M-@Cfo}!O91AQoA zmEOX1=-t}-AhAkgLeOvMSq7c3Ji6;_+YI=;Y^C-|T`F zJbm8Ho&N4wudJ2(tv-o@^FANHO{>57s8-$gvVV6sSH2JQ&$l?&34Y*l#{X8-YJMAk z#1*?Sg-v02cZg=xv6AYxfZM15WvHa5Sh4uzfqP zE<5yi4l{C62NJ|I%i>Svsh{g_(;Dn-w(9Or`5+2?gbuK}akyo&@GaMBp5_Pl6ZvJ7 zA}aW>#MINhsg8>i5MH&{ntT}NcSmn3pXmbJw3Wp3L_Up>h3&&A)12AaE-bIoS)zpl zGW75?dr0z6Uh9vss(hd0F)v+)3XPF8cQHzCpWwL_;Pt#E8gm_Eq_ z-C81!y@s@jW#QW5 zeCW-Sygbv`6#j9&>;V3KCOYO6uTidr^gK40Pnk{PzFd1R-=7-uQ{XGG{T2r!0dcTR z4`bUB0b-U=)UUDA3Txdxj9R)qi;CkoT7yt%_X>HkGTXOVlun&M_RpvSVM5SPCuun1kgyX%;zDw-)V5X(-z z%k5@-6Isp1{)6Wn+pRCPYLEWxuzv+y7SoB7z+>eJ{IdVWKT4d5ubFL)VT5W}<7MXB zV_Ly#xpg>pl(8tkL= zghLMwKKWYo_m~7r2ywmh|9PtEiD8*h5Gc zOV&;^no&q1SO z!4xyzm=hVfSJE_Dtgw(4Wsl3xyg5 z-3b=pqZ5CBboC4>f|{B4LN|=BHI`05LKt6N+>}5R_7Ze*6MV-l?>On{OyE-!LIqhr zANx6t40|2UBQD>WuaQksiBV}Sfj2#r8S0+lJXTQg8AN=`{Fi}Xk~X81_vwO**;am= zV!hRi2qUK;mYzMc!lvN2f)gu0HorC29rd1!{<=BS^7?O=_ksdR!3ng^|6AmoL4|;i zNBXiwfWLrRO`8Kg*B;GEFSzY5=g{JbKlEzHd(Gr?2e`L1=sg!kvufl_t?bnZi$)-7 z$4B%=77=GOf*3}-UX&sYL%*!7uT-M3Zyt%+b; z*974HuydNsDnwdoMo;2Q?n~T|YB=;hOAPFsZOlnhxS z_j+QwT$JFoboN|6VDj;5&+ASH@dejm#c3gpe};ao7a!6bdC^hC=y8RLx*EMR@U>OuM+CTVvYX*Gi*;mn#abz|E>zGJvF&7c(|l4u znb@<&L-Uc(K;KlwHSd@s100}6rkZqXD1+;PES&zvs}`IZc+YlMjFeT`;z6no@G~~? zB8W_9$3IAZl3XV3?|`96$x#Q}Q&tHCljJtb_mIgx+=Gm$wAG`Bk7{hVc=gA3rMJW6JOp8Crvd+lDNDLRrLHi-eUuVW$fqU^Q~VGehk-IO8L^HMAqbY zcbqEmx^7wfa&daTJ4I62gF}7OfkKUv|mD`IH+Ur)avyE--~o<)aCJrGUP0 ze8Cktv6^cNrE-}6aZ`>rd>8&Vz^UzjgHzYy|8m=D{spJ1{WoyxaK?YZsjvQoQ>}0M zZT%<%G7MffhEuX9^AoCP+B|#YwqxIHSay*4&;6I%j;#HExZ4gdWJV%xm7FJG98z#v zq}}7BPg=DDo6IGEx;{2gQ3IKL}w{*#Zu8*JGTrd_iXYJ`GuH-P6XiOB8=r8Jy7@B|V_qu%>;5ICwgDFg17xVKH-6yR_>u zH@soJRkB)V71*@0hV1%O>zLxL+Ep@0ZfVU+EbcYh`kEWiGY4lbqzOxaN8{UBAU;%` zL_@hjDW3Df22jw1-2js49&OzFFH=gH~PdB|C9K=DK^zp64bXrI2BuvhSz zQ#UjG*vIc0H=qK&ajW_6@<=TeP+t}QViTJITyQ$)ez!6H z9n#t$i0fV3hD}Y`G8U9C9*_x{@a}o8&`4t8&{t%`p&mwz@}u1hYU+et_BHzN=DN42 zbOG#ZCU0osE@6MXG09*kmM4CgF&tHdP;UQ|8uiDtxx?=I*AWGeMVN75y3Xqv4^MGh?H^ISL;+G32O z-u-)4ZpX9o86?KlE-rFuyi%~d^O+dTf$|PpzEk?l2O!7L(jW3Kz#JaSzX1fk zcOJQfj23SlTh549(!N%AO$l&tVG>^+*RoZP*i)0(e(k&fkH7%_bc^;*IAZm%%*8DGW#rcr4;!8*8HaD+qM(@E^MwG3`lZBplZIDOeES0acXNfSnw?p^uL|S|xKit(2 z(~u<>5>QtPMAgM9Q%3lVl^KKw!cR>_QD2YQimGHVQ|~?0P(NTm7u!S>Oi=Bz2~!1jwCvD)jP9JVx5%s6+>=@kM`7S?pC# z&eexIKtRixn=!^2>P`U3uEV%6pvT9kxR^o4Jh~SWv|vvU(8}|SO2q*$hqCP-k3wbm zm=->>Qj?e?g^7UgIai`1b50Y%L$g?r-;$maPOj0#l}Pbqvlli03z2eUE2nG5)1 z5b2=_C5c6mS8eRcDlV8uB|#9E|9B?;17P}oYz?t%3G62owQNE^FpArf3`}>SMF58y zA-lRU(1k?IPz6!F#a$DO66WlLy{Qs9AJv-CEo-sB#}uK?@x9()kgrV_G?4eJj^#5_ ziux8x@L=E@FEhx2ZPXlB&}eD$h}3pdUJM;d^@}rE`1Ru!H^!ew=_@tVCh526HK~`O zdG~|@8HYv0umbt8h^xBwv@%{_1=VX={4UgfW97z8JT5!j$Zx{P@w+{*yT7I9XP#Ke zJ9fKfIo4Np3xJbeA=N4GH1ZNvX;^rDh|B3oHp zDfnPi;D7g7oy`4rV*|mu^c)j((!O9XO+yFKo^jqyIc7Nd4zL7_tPk<|9}aQX=h z%#T82)7X`GagiD7sr1?AA{Ccok}c~EmBAqQsq7+an$YNhc%eDHxX|flN@#)z?~)yZ z!jVrD(-h8y*_qhwps%IZJ@O!>TtFWK*O+dbb9)7k&S2BQqzzzEx?P`8^SB$18JnN{ zoRX+`4gQH8{_So7Mjis`FEliQ1f;U;4y3RO=mM*HF+doFLbcee@fR~P!{j+SiT(?G zrT%aojSB(u1<(k+LYC3}>aHe98K<+tF);$zFf(2?6S3ODgh(Ay%Ym{q->0zpVcC$; z4DmJ}2K0~gJ$?F?OwHMd`z@O`Y5G)a@OS2e)1y@&YQ)YyfDgye^YJpeCP~@66J+Mv zK25dJCMi!1Ib*u;U>*QVxIhLlJ~a`k=hvkJp!ZP1z~KLaA|~n65KRf`>t$Fz6Q9Ox zt91O$nfsB`9z-!fTbP`p>|tE63*@heh+5syGQwcnV4x_I>_8~}`Wgdvn>@2&W$Pd1 z`{>sSjpO0{d1Q+MAS0=>1|E=x*!4yDko{kWocTicRz>;HE3dku*r-TAs9sdh5?1QX zhHV_;pfv8Y6zxG<0s}yzfRTr#{``?1l`wnR!*Q)_S>?BP1bK~ES}5g(hwtR0YJ2<# z&W$zC*~EgpyvKbtjxnPn&)4|@J`w)ddzT)(f!RDV$S6f0q5S5we7zz+qayC^<8EHR z%bCLnBQYo5r`}1|jRsN24CLATDB(t%u*sB=g*Z|^Q>poQG9c~<3>h$2PV{B=V#9;ZO$8;7I+0+zN|6>C4mx%P|y516a zyslTm3SNcVQ5TGpB_oSw*B~FjXw#sJVAQZYwMJ`bwd`5+M)RIQZ z#%)W=sFSRp91djlsB*b&Iw=F9U_VN{SmzonpXD=|@ch!nMLG9~-f*zG0^p5i#n>*f z#NQverQQZnlkDTnW8xE5{;IoHy_P0uNfLQ4MNnnoO(SkZDsVll-w`z1ooG8VSdh<= zsX7(FJ$=2Y7BuFyYo3~#6>^A=;&a#AQ7{VMV^y3%^-bOxKp2Yzn8I;0J2R1Vq!CT@ zYuorQ=RvOs-It16{4V;DbDvM&C1%81+CQKUFiw(waW)lkSff9G3;LtPA!)KlNhn3> zg|09T&1P0=@z2+-+EyuEIt47PrHW@ZtHIEyv#AU9s}~FV&S|qVgNz3o zE4mH8{3CQamw$1EZXvfG(Y{zhXKBCKUub-MJO4?Hr~&$QjQk+~KC5Fks+D*m zJrKg>c*1) zn5E{d?7W4QL{0;APLAQ1(`8Y98CiB2H&tBoy8byE{<|TgC6F*P`>SrS5D<7cfudL1 zZCr-9OD^Pi}Pew&YWQ}b#=-L(#V~D-loU{s@~wz#R?R+YineC9L;iD^+{y! zuPz5$dMzpE!zVX0Y^5Xb2+=Wj*U7YahX)=tEWtj&%VVkK*#bly1zXjtbo_`Gwxu1E z>=F|VZ+)*w{JNkQGWv7ADUb^Ja`sT){ezXSz3jmA?7*=O*@pSG}nHQMV^N^AmDb*mF0W_0B<=+@w4B1Fq{_I!GQ8T zt9QvqW8xk%AewLkszcT3_`o6(FoJpPHb=2FjL%<=r_6+F!P5qvv6dMJZIDy*1ffS7 z2vdoPRJqr&_VMR--#mV#6ASw1`AqU3dvWmWzrb+{!BA=Lh;i+JJOEus4E!(feu_c+wv-i3ZdD2)>6Ti{mF ze6pCEbw)ADvbExyy2T`a6az3HiMmcc@yLc#qjvu^)C}hYBy6=GTXJ)O(TFY`;ORCm zrTCz_d+2i7`(UaBtAQkSI~AQ->$JL^!<%ho{O8NzSTzy<VN@GO=PGM}WdX`* zm@5V^)`??LxUs!dJ=+n5Udisu4Q|*Vnf7 zLX?b0TZUI|T17TeNuSETbGoD9ieYXWJX-Nleg*N z?UPNR_bGx;O8IcR_OA1jO9X#zca4IsHoV4HbHf0?$H&+B)m`MdhA8LGINjK0eKHq) zQ)S40lL3%cFfx}5&_(2E!D@{`;*Y!aPcEf1GaY^NTQ|JX-tSzE-}EGp(oc3wakKsW zPP?0z*Ij9s0iqCtiL)uua;S|KQew?padVwW;wk5uu;M=cXt?gAr%Ay5eFb1Ax6Lo; z4CMTDyOUPok*0u;dJd@$7M_Ss-)~D`aljPKbNW#ETy*P!PiU zW8{JMGB5g9p5H*GxzV2vj=unx|F25*%Nt$)7Zd(xW6}QvaGA%-&qYCT(hygmw&=01 z@I8a#+J&@>4}g{1BulXjciA$~DNFF0t8lhJV2^MPFXLO0`+WCRMIP|y_1w_&c;91^ z>wUBKMjm%yLPehMxl7#Iw!4(TIO!f~VvXuy8YKOj;^-}$ zw{iJ-j~_5mvhC2t%3}G2kBp$5!4HT%+qd_BXqui7z_HD5Gp{_QD{|05AK+g~R@3Y| zJwfd%(m~Zp>Qn+Er6)?SN|570EB`L^1qLx<2LjK%gwbXu_~P0_TRA8O(DHo6X<$g z(v0OIM?eJ1CYyryWQ(5?Zx1l=BZ5#roV{lIu!Ps$fR!vhMY3_4(F|X`Fo2~LV3pGR z6w8dD917gJ@$oCrQTKCkxHU@R69?t(Nun|(V_!A3U9W;!G%Ys9VSA@xZ;St3@KGm1 zFbR`{xEo?3O*L6v&|RZV~e@}lqicR^eMQ0 zE6{nsx7v;~`mOk!+*V%Jtqj{^F;wU9_y}03-lKEHhJaxaKs1W`%LsNwlk{WoCec7z zDDgtRwpfDGd8xkj**0b7Ss?)U`w}4hQnWoZ&7F!h=R5a69>`zwqQzz!BM`kKyE10{ z*gT&uX*0gv*k-Kyy6BgrQW2Z0c`hTSN-kI%d6F=BT}B2#p9u{Na{bY zxPU?nV7Hlp`;c@jE-u{$t{8N*zZHA3NXyja#>~F|fSsnDf+KOzw5qt{I6xL}r}Y_u zJ$WSZgCeGjGxP`}=;(iJ7-%@S%R&_EyeKulKKn8r04UgW0O2t1wpeRoE*?=%0vi z0U*9Ye(S#1j%i&$!$XC&o`yKk{+w>n<_sr>`wkQ#)fG~=XFgSbYYH~i+!pgTvO&g0 z_NojCVhi*_hzA?#&OaBBA*|;}FCj0BjYE{}=wie@D+dd8BLq*~lzr4v_BTDX08pCg zerxS^z#U=1-=0B*O+SsVHz)At!{kfH)$nUHI3X#=ofRO|?;%VtG52{FcD~8lDX8{M z$G~-Di0rSOo5I+F_rbD1mq#nAzcq%*`go5*6vZ_1WU09rZ+eQqk1#6GR8o`u!#hz3 zBpkzj|IVq$|3{VImNdwL)>3D0V>F$C6yOg8Ou&#Mh&;uDK9+Z0T5!8Au0)U9xpSvY z47b3*uaU@-;D{~I$Rh8)Q@7Xppc;DC{G>JzCSnZ0(ZF5b2Am(HdV_=rGx1AV#zk4@ z(|uhpRb1Twv|rEY72m-d3bX5#TR+x%Eplheou?n9@a17J>%(QZXuq?AhWkXdy^P7w3 z{MW9OxS1t*)5+nwnydi5zQnN3rm3E9V>Zfp(FPi^PnvkC_h}u|=I8Wdr*6HwxkWRs zlZE4@w|-;L{T@jbPGNLk(9fy0rMKJ2!OVmQpIo0`>*(C7e>~&!NQBzJ&dPWR_=R zi!g1EWfT>?OxNDU%*)27r%9Se!+J2hI`_V)F#VWrOjZv(m}hCzO;fF_%41ep=X6h|g%F>=}-M#V509sxlB;S48Z(twCWpl14h;FVfYjb2%WepSjfCId~J zaNfJpd4 z8WI=nNd--xlYFlI?*O89ZY|(6C(mO7q`QolKp%-H1G&w&?bW@JvRzMc$b?dBZTK(3 z){A}y@#WLqDFg>-zxK~bIhaJj(B|Mp?ge8N(>{!XyrIH1CA7PJntXTK_`G-j>_ao?}TKlzj6bzSU_1L(15@px#g0xQsp1B^e<=VH?pVmX*K zqQ%@}v0(@*4T1agPwo?@k#~1* z=85N&f5SwH(gQ-D?C_;ZFteJJ{wMsWdy7>JwP`B_W7WoMMMqrA(-W`g;B<={#=Mis zKfxM5>UKOd!-@`nI4a}1lv?DTJnI zkNhm^tnX==obmD1>oURFYr@8s8e}`;I_f1-i3deia zdLsq3TeLWdbON^Ik%(1RW_6m6au4tBaoD#->QIfJzfST6U%EmrZ0kV|ac*DN^gy&y zLOH(70Q-C}kUR`i)9U3lB$um<4N9N1YE@ zr`2_AR?`UeE7o^O+cwQBQXq>nV8zWos*$FDe{c6W+F1{LWtVpId6NeK7chg3hw_zz z`sw`q2d5aA1;X(kCfNDxBFskTGUT`7--Xt$1#wsE>>*_lg)DDrtZU@?L2EGW5)d8s z+JVGZRuL`Rk_eLKW%80-oQ+R|i{ptY$w9vFyy}SsHc3GDK;&L2i@0}VfnRMp*b%Uu z5GZ+t&l3D&16%IIq%vusipG#eBHI)2UmJw(kZU5tQ%JK_1xvJVJ5ZbvH#IVIN%QdK zG>QRI+t12*Gp#>Ssz|7V;Z1_eAc8bwFBo?AMn@V7wY)iq0Dq3>Ffl!(oI=KNbdRC; z28SY1>O2%Z=kgHI#^E6Ja}l)M+|sBisDofEwyNFEq)%+t>P4!bJjRle!9!uUZhTIX zpY~h?-0U4s$DdiiLN#YbtO^wlh7TdQ7!1+QTnLHwW0FQj?kEc(rYvt z!||ugFg$=XOztcCpxOzcaxphLYFk1HZKgkRVq81;bA2eFkop@f0r)1%ia#nrRPryX$R#)$;@^7Zrge!B>%5*f9W zl{B{X)(x={X`~&i9J$I5af?;sIJ``5P1wb#X=VdfEJU_-eZu$PCpWr~0M36$QL2aa zvhSI;+J_pXc~KvU-192!<_tdQymw?CJu@6(5ps;2+1S1%2X(*#bf495-+i^}b7p+G zK@t>b&5gg!WW$>RWEYQ3h@XnuJDgRWB5v456rCo!Z5so}hASb!u_0yaRzzzEX8q`# zSDN4w@s!57vfT&HNR+p#<@({t%EQLu@82l2J{Hw@n!h=sGUq_R*6j0~sH+ie&}HYhn>65faS zj)pRd$YY?DCehcqZbC9X8PF;TP6snz6d5!##{;Dl_`~^+Nb|P>IB&{ND#I?+9fhC( z-9BYZq_CQkv2cuBdwPtHV&vE@OT~6Ar>+zyXr8l+m#&jDA@+g>LQ@=*6nn=%r@Y4C z?;~m%;0gG5g9u<<2aJ^Pxwh<@S+w0rA(GV_jG$&%!xkXoNZ#)5)|H6^%&{t_j$+7QrRVbPqB^>Rk}ll!YbFa{uHJS1L|XAHjRPi%`MeA8WsmDm z^+g~H0vG~{o4h&Ew!-1I7_pUcxFrc&b863}tt#~!>cY)ec@6OEq*^xF~K-0$d&X>a4Ax>opzw7Nl<5jvDRp?ayt@u!F+er zAoThdOqr3-2_1xJJ5#RlEWG=0S9_YwLhtSv#t+1vE^ivUlgH<@T#n#WQ{kW%A)QD{ zFVIX7kSaWjzCuJ{+!0450_g69_8tbkL#+;E`CY068+~iA#g(a@B_k`4$#bk$E)2-E zuiHO6D8+8*+kFvRD+^;OqB76bJ1V8Pk2;&9j4>kly1+x@SvRoEzTna0Nc&4Oyqf%G z4 zg2x$16`x%6Lu@7KOw|aJ$f`4qmQ>tqd|GyNYtJVGeWk2kLpEC4yoF~c^ZhqYU(n99 zvuy!QNi17g%uYqwF|!rDsFp+5kY*-&S@&(G!qUz_eYyKnm^jV1r+j`HbKoKmEfZt? z)tssZia7<%yxKdZCqPc%(aZ*0F;)=wa(c)!VkDhUCYwT^=3UAb1tN@p?AQ9Q zvUf)rYE7nWy!Cm}crJLOWMaw%S|GR`5o8aJzT7mk8CJxgNE;Fm&5x0FhESP@QEd$J zASvyJ`0PNgUZ2Crp2L(OwMUd4DA?!BdjSdH3ae1UevJUS;91EjwIqnIZUCEWnvtC; z*sLAdEnE9V=zw+fs=fllC|HL$$qa!~?i}|%Nk!u{^(;$;)2R$LQ&A*_eXMe>Y_y%R zIbHy!MhM@=ih1;z1Ea)k-;@}tj>1+q^BUS{2F1V}t6WyOatEQ$tTa4%jHi9+L&nrM z2ROf*pgTBIV9KyU!jKp&I(6|hw`a=V2SER9vOt9ar~Ze(bB!69XT^^rBh<<3=t(@R z0EGr=RpQa|u|=Urir&_$vS~0*J<_R&-$V<5JzVO!H~%40J%FVsQh2F9pPL!5iaGUH zxh-uj0z(?H;n*$0nNyXEpWi7NPk z$`QGRew+aAOkUZ8=Cdr#Z{KN}D~ZeUUbyei2k0CNg3ha)+VVCQePKSZ=vX??^6)8f zR|=H^Ef)sI!XUEBRsq}Q30f=~M)Ju=y*Qy~c!NyP$_FYg_y>c_V@{7CQ^Gren1KLg zPEjjFoE7@s)5|>0(_lHSq?3*FRGy~7l|UuSWPqiNnsjVG(shS0a61Dsx`14EjAl`a zVq{4OB_~_pbYp8}2jNABQ7~=ZYhqOXl;R25R%F0FAZjmIdqcC7|$8x*E`JfXSxPVZs|85$3FmB)M9++~hs?_*m;M^fGBIYBM<)n`yt8uqL=DrV2Q_ zp9T^xA3rD`R;lTe(|by8SJRgr<4|0(XFW-E8r}B2%rq8ymB!@%ZKru0HUvMg0#cB* z;)o=j3}H-9N|<49Aw;BasxglLRPp)I8UThum<=xP$(5e>=Wr%+0KUVthdQa_0i)Fg zA8@ZT4j&rSfHT9)5(yT)^A9w#kf-S61I%Vro{V`YaiwDm1}dqK*5piNhucCA0H5g? z)0q}dt)#;d5u8n1en3(EDSf}0S8;Ii6t{XnHOe%WnK@ZZ=lOhC!VGDCV_fQ75$NNU zX}9@;#*aYTU6s14uUUx81wE;)>Nx99HDPlPF9y$Kj)PoR00Q&ua8@c>ecY%8vudTwv?7;-N4zP!B|f2$<;xwBE@%>5x-VUxYO)WV5sEOQSof_k!X zp3>pM$%_}g>9r6aX-ri!#f$PT8!NHUp&ekAIjr}NqWZ3zQ?q3%n|6Ehqi0#QyHLsq z!8AJzkPzxl9!#4=HF7x4BJrM)I%rYv?>L3&!zi3A`6-b11v({w5xV*Ij&ag2xNjkx z3UYM>X*rlVu7`D*9Zvs*@V)MzDWHgpUjh(!A#&~(Cz=qG1x5Qg-T$t2>g?ZN{{QI} z{mqvE^)g?L;5)-vaY&To5p_$x z3}ifd-Kq5E$LfRDI9JVa->OtMS8uI}i7kH#I|8koZ>W1RTM|J@2FzR~voU(?-0jSB z&z&NsJ*?m7>f5G$ytWka`iy&L^IdyAp9RGlIP&dd?gQrbt%8wpo}O-E7bWL8a7ppAfyn-u=gM=SlEKD0mTG9E-h_Ru2Vj9E`9L!fh&rUy5{$NiE-X2yo#?LveUr*hfBFE3 z_c_l;X%bpnrcHApN-B+EL2fWM9V2c|w<_nE3f#O+z}>l0z!s@vug(;3TR=lu0eo{J z^Jl^PNE`CRlRKSGaBko2%a_IUJDanp+T^1zN9(24JM=yi$q{qw4GX4C9kCxA#?lJbz~@k;w2w`TG9Ug;>N7dMN4Gp3W>V%TL=$CWhR8?K+iUk?A0|B34PHu z6T!q~VH?UUsA8u_r&d=KK601CHeS$=!#d&gcOZQ3WhQ>(t=G6Ir(Jm?VAO6q8o9F*k_?dMUKsmNu?jgy|BOj42wg9hA)~BH10NS6hIDuK_ zRQ~gmGe9ySrU$M;Bx&x;Z zka#Z!u8vFWm3WML`~{y6bp;yDYOecWD=WVQNoo@gFQu}*+boPhG~bjslgl!zUa z9X5XHp+W!sA#VJH8+_iHd-Wy9j*kw#k2L_-j0cH7)qP#q3Pm2oH;BFEIy!jNxc9zc z2ysMMdHR_5$k6Qx72jdnGrFjD&o*8J!$vm5_`*gF+wFGOPPmrUpY*)~pp`->YbpMV z9rUz~p<_OMc*&whpd|`I%?ngR{N!~Q9e+B zilRGZ=lw{VA9~B!r*NVnoRa?feY@j6TCKh^x^P0=nLzZp6|GF2MXn<5<>El$?DgJm zVnO*}<)yAO7hx}djMX?eXV?<>cqiWYMBOTGsJs0tD2#sKmg}1hz$7MV)Bo=6Rv}CN z`7rB{`kj^Wmu_8&GX=#-RX__NpYi*5?p@zUkmVoKULEW`-l~r?ZjuffW|JHJCZq7C zV_)d*;y2Wlqe~oN;8Z2sKl*4{uDJYf?4vDnZ@=KWnAvwZ;ctDknW*pgU1+il{@zC` zdVg;6(NccM?($MWIE@6pFq-WqzNlb73SS%|VF;s7*8JRu{CFYJQohGA`h01w{aA7q z83yAj&&yI11)faXA*u4$_15K=b;(;$`9cv&1~m3ii2h8~?gJhf#;G9qGwjtLE6+zD z4<%fhIg;Elfg1Gevb}WePi{Xnd?MvuS$<_vj`Lc>+8bPgA-wWc^c9A;tL3Ltd=6eo z{&e1GeVv4A!9;hwlTK@vG2iQry--DFJ9D*p<+@q2bOyCMP<0sb%lLiuvk$WE9A7VA zXa%M9ZaCjRDJ+TZ;1TlbkIL80k19(~xBAuhR<{OFv{KuH z+U&QshxA3CZ4X~iUfUkQ=t=F2nwj6)`DEezZ0EDJ|Ju%&ZJgBZxMTLM-3gb9XS-kA z>(+L^dUi?eP5O@A+M5bkdbT$mvPWLq`xZ_sO`M5l_aM&3i&hcml9ktq^Ef@}{e=v3 zkNw3Q=c@hhx&G_>ONDXL2X|(Yr^_iA=N#&@Q%$#ZwSMqq)1!l*di+{@14MJE)pt!8 z{A`ZfxkzYdzkRsTEn0oJ*{A&TaBEOc=4gA={Pxk#xO4T`bFrHAs=p!-6kIqm#WF6!@Zwm@(G5*6SVrxATB~U_(CCwy;}w@ONf9a z7gCAF7gL%LA~{$-+>&>x@$kf5<2@_{>*=>LJ;0n5P%ol0=SZg8jXiL8WH-lzi#Wk->8*5x7!`Yd`yWIfZdqm+_;C?roNhWanC; zocahoq-i@YGCExuJOS4}y?G(2eO^e^pvTa1^IlG0>1pMj9;2Ym^g@O*F+GD`(+8Uw zl^4n+%zJvT)Nf|iB$u6WHuzxqWi#tdUzxOj&xh*=o7v3_<+5=GefC^iIo%h^<+FSG zZm4bDA51P+tT6a^({k&m zx>OH?-T{f=wCzynZhkULh=-g|bfNw#56GB@O`CJKR3D^mm&#>mdV+Ey_MA+yZia7! zu!0Nt9Aq;t6Lv3+)in-TB&HLQp_q$SFO{)3!Ksk6Q{^XIc$G~}B2ZX=BTWUdzPe%?l;kS(JfI9V#(`V&Lo-FF8|5m5oLnr(K`zf(F zR{)}Di-zdxKfC!yoqA?CwJcd*lXK7InoD}In5o4qjQ|D>jXgg($F=vSd#wd|@5AJG zHUBqfJUrW5LsSdy1mHH&vKJ zfdo$C3>d*7!?gS18|h%L>BLFdaI(M*h}PoWSv-aCI^1Uepq(!`hXO|6zD{XHY~|>$ ziD`3-(vux;QQU~*7Y>Q0b0fBk8F@t{4bFxta@AP+!&57b@0oWj%0h@ z!zkLz(yb~F-YupPg~)Kz_j1rm**H~~|A)Qz3~I7b+qLf$k^mtIMN9x8^rje)4nio> zMQlj11VN=JAhrMsflxvZy+bI{g;1o60Tn?JK~ZU9C`u6!#6pvNJkMI|owfG2XZAPq z&Fp!9ygwO+Kbe~|SFT*gafbAPyiH|Z+>N-=2YN+LyCJhVn4p>cl;_g{zi5A7MBs{v z#5Vm1{{WICo0V7R z56kRx>xeik7dPv?wfTPgFhuAoz!h{>dO*?sP{qkV{K3N)VGqtXsH`VO2tQ?UPZN(I zzfh2RVg9JM+SL0?iLmoI(gMk5p(;{?G%x%#@(QtAW8mDvxnai-)9hzhg(Xk7xuStS zZ%uhOJJ&Pj#u9xNRKWlYRr)ELort(WSYMsB{`y4x*8p7Y%c6VW3{_%-Zf>9^&W*mQ zE8xUgZ$A9O2p(j;^$FzDxZ8rfq;_&r4D4@y@XY{3Tx=Yp$uyE#SA~h@6iJk=ga~&J zCa4NcPV3J;JKT^9yLYyT>*j#Dj1wCgw#8tF2K|U}t>)i+bN1~el~8d%mxRC{u3xF~ zrsy!+CGl7ji8CukNWc4$;#FdZKn{H0DFAvewg`T6?zyztz^%)7_bpu#Z#C$)y&b#m z%HDW(o!@DIDvdRk_>9>cn>F~MUPcnz_U+=oN}>ho7s5Az=X}3{5<2hP6H{VPzt$|> zVzvd0Cf976<>bzZN>7V9xr1g8O;Fz(c8z{u{F>h~isWw~3J?7@S_E+r3m3-3n?L;F zNRtz``d<3)y|_1M|9ZmtGCbcOh3C?qo#>zeNvKUJnvUn)wnJ5v zqQ3&r0|fV*1HR0(z_wXvf-uh+z+d6E$IFtSz1PtW?4V09-cp!HJLSUR6~y)c`a8p` zB%SyB0Pi2bPyY_|JDty*Ng28fQPW1L(r-=ALK1vEBiMfO_MY)DUlDhIIs+`6;o@=| z>OqG&5zkKE=W<;+cYGLPN4anc!NtA>6|#qB3ZJ)Qg**0yZ`^_&Zw|}WyjglXOk|Kx zqx9nbG!k3m{D$m#ed2kSjL6A7mlriWrovr?&HcR{_yb4~&kUEDcNacCyb#BsK~Hwm zgt1W~zdTFLXyJ31ub4=!G@AWwH?8!b%nbfV?2vqNG)ePXx^}e4DuPCfX3lv=vd~9* zqH&$1>ba=e=9oXvJg~ogGTdYMLSp8-W4hNbxCnD&?}TaSSRTq*6W5Z?9@v93O3=pcES&bgkR)@_4W8G z1b8CD>(6iZQCd9qI(miZeo(=0gHGpZp(97=s0}&>mneYqi#Lk_dM|@p_!r91Vk3#- z8;N*al9W!8jAN3Nr8_n=QM{A1eyIQGY$OwL zDaJY}rj99QEn11GDON2hHX|vv8!2|U)ZHD;DaTZM|5S&}RHv5Ivm>eJH&RKsw2L}v zZjNb}{nOkt(>z=tCE-*imB?VnE0Ob=*DzdMp1w2@B1WrXTvggItJ z_-D{EGoo8EVn;IKH!|qB%p{%66vxb{jkL7P%&eBooRQ4DjZ6kEt57Ga*fFcbKP%jW zZ_i*_)ks$LMivv7U8j@X;F#U$pWU39-8!GTXV9TzBb$ZG>C(x0>X_5*pVO0>^P(lE zZzQLGBWD1Y`$i{s$T9c5f9`N*?r2Ny_(<-Aqr;<(+$oMu-i%}37yrDu%)Et`yv32c z<&C_R`Rqwt{tw6eU;g>)nfaS7`P(D;J36^*8~I>e2GofG3t%9!7(AP~<|7Q$CIcf< zAfQ_y=s-s1!MnxbzVrKxg!azM{L16&k#w=!B z;HK(8u_2C|}9FM!<@!2=Vtk1&8>^Toe` z?*0t7D*^~W>VU2_0DxG4m@fd308iaAX8_O!Aj?igy9a}TYz3kmv#6r-S1as+G#CIN zW)+p@JdoSE+z@6B4g=!aGRZ%}$4{z(`vT^k;7~ z{>`bVRucvqmCVNR(ACz`^Nf1P3TzTUYj_nkFcfx3T+6MnaRA+ghe9ww8e2h*@KA1r zm*UA~7LAAoqj4C3KrvbMg`4x-=Md0x0KiY+UZ9}vFlvtambj(Y*|nB&C@3%A`z|XF z(HOuPaHHVP}kV7ag3vpUPI_tK1tE$9U`C1SEkh#+RGXOajC}*G*d;; znC$zh)^)iQW)u-tjB!k6)K^wE_!BCIh)tlq)et&}Kmik0Kts{RRtxB2pjbOQ#SH-N zUayAx!Wpe8)CI1qmBnC}Y8L9lmn=k4MYSg{fZ}{D-&v`#!8k6k01zG; z_p*JO7cgP9OTH`*cW*oC%+&P)&Jjz$1U!N>xpuxjb^{O?A_6meQm_`e=L#@8H7qvnI89XU3O5_H(IA0&{3Z26B z9@~y`mE(DeSx{ZQwuaW4rO_w9MoZ$Yc6Wz0O96l}qnhf}esZO<)4F2EuHBeWQW*h& zd|SnkfcVu?$#GpTHh>eY!QpxAy^0LRtM6L3^`+IF7j>MVbZAqUfvm<{B2O`$=hirP zA)SY6g{E5ZmA?eZwv`=20#eA%BHhkC+nxO!yW;YJ@_pN#&R0w42DrT`5HqJ|(V-B4 z#MMB2UQT*WK3ky{+mR!JW>mmH1fbNbx{y$DGrG_QhxQ>AUtC1@XM=neN`+!yO2Gi+ z%7Y-0&ihvAgsNv@RSh;CykgAW#AgkkV+u1ZiU9m$YBWmB3k(TC&5bqfuS7Tx)I>31 zm0OOhlny)Hj`G;XcXwI#7{@eGzz5&V!83&`6`pN7Ixk-JveQGnY|ti@Q3w^5TaKMp zur5k>9}F$lR!0qZlpRn?y$W2TpjIg;Z+dM;B?O=Vg?LP}Cjenp%;j_j50v%;kdtFQ zRHvF=Ui2!P?;w9on5ZqsxfRearS~$Tz`2ht4%?j< z*d2`1HP@mcF4m72yzQI106zwV(*ux85hOj}(7oCt0Zb|n+LKj9d;tmsD)#C_;iMuM z9z2`#6ku|V6QH=O57Mu_cVj)k?FZ+K>RxRf+F`>$1N~7;u-I20vi@{-;YaZI0Bu5l z)tn>B$T5`%J%mS2Fxyjk+>|$w!m%T-V^JzRBP5=x$JfU1MmIqV%HHG-53zY)uC%?n zhU!}3t>IK9UwTnDd3ZcSx2Nhs%H`lBun`elJ^aaHlCRjV;`_Bv zDuWZ-e?9@?Y_I`)TI$onVJzIcbnBb|cRxR0hftVq8Lzlx;C9Ot{58Y`+|$QW2@{&A&jOs`$be$e2FA>8Lre z=YRobg;-ihdEneREkE{2#sCwD@2)W8vp~(L8%!>4WoeEkaj(4ljwc|V0gG}k_Fi1y#m<{Q;q0& z&fOEFRyc(6+2bYPXd{rm7r4k(qN&moyFC4BU2q2vB`};N2KsdwbGD#%^QAAMlYF80 z**&Ug2m|W9TsrRj5%Yd--z3_KF?$GvMpM9iA>ihAe#FWwN)=GG!OZzWAjAi{z=Q1g z#ZbF>4_h8Ia}h!UtM0g6Uqo)@o(skIIAZ{7mA#Ly0v7uxuDoa$h@U_ujsmQg)CE2W zsp|4QV6Ao8`9;cd?l+!l_N|I#%S6vIrz=oKF=EqYN*eD_J~W zUhE6h#4;a`>eh(^mWl|3-73!~3?Q*G7vFb6f1>5tBi)>HAbfZ7{Z&QIr*E=`-(`-i zF2sLp^aV!pzKQsJCmzHEdGdlVD1~op8z}UyBT$s{%~FROPAr!yIp(tYjZ^_Ses@jr z!cU!p-^E|mHuJRuD}a91+Ik-7dHh7Q07$?PLMG#WSc#%QE1xxAcWCEN zHezbKSbW78(0srTc~0lO3ngUd?~6gz&YL9*=hsM+Xay!zg#K%Y2$QRtzFWOPW|rpA z5fvmZXU6ZhxuCJpnjH)ZM*Id2K|Ofg?aqXHQ|h~bX}1Aro@!agC9W<4cgF(XL&Gv( zm7YiWh})jL_DpC6k%zM4*zpdUvr<-!N4$8_1zP-h3Lt&_9V;*I6%Q zf&;OLv5VoSx`KHhsE1e#OdA_@RXNzqp(pZWox@~JXU#IQWec~tou**tEIpmm7BgoD zrQ0FiHk;BZjarro*&)3oAmexIF zWy{kv{0TT_Gd9cj#EIa3=%%3A<=I!U*69Gx*Vk!u2z6tMzva1#V}kFWhwZI5NZmPl zv#CGK%xHzCaQ5}wUi?>q_D1OCt2$>2Fp_6e7sfe<2J*{Hi!WxG&UZVVeoy!imiz~O z9tIpGY&wI9)N4K$((bxc5|)HLrQVaEDSom>u=smjy!f6^{wJJ~H9LE&7u+(u{v7x6 zBlhIQ%BPYut#KtbSy#W)k`r0qdnK@HJ#+frFuZg@y?`UQpg+Z8Ms~~sr>Gbzz<)&O zAttM)vAPjYjn%MUyRu(dyO$<}6$|YV-c~CNZya3jvZ}P~`o3z8rwaL5;MaJF85(M= zc|fQUOAPC>01=_eNQ$JiG8j`3g^b_tT>$EH1jGAba~6^}krayzkQ6gC1Ua-SpQsqg z0s(9GP!?2D4NKw5RK*1M1N{9sxGpXQ-B$EbZ)dZj+rIegmip>LF$}vOG&(a~icl0IAV@I)I zKn@9EB(Fh1jMiA~K{g<^lQG{bO9EI)Yx^Iq#k=zFap4vOn&+B9RRkBHyv*Yy?T*i| z8lX0Nhy-?e++a*y+lgWu1y|V#*w{iCKk`9go6oZW*8P)VlI{R;EZ7~v7z5mN2Y7&p zfr9ohIcD7pq!|?+5qE^fQh$R#N(%*f8&6ZhORBzG5w7>BH6~aXO#&iQ8G@8!`yh_A zldrZSK|g2i6OVH)zmCxWJ8>J!4^iRc<1Hs<`_KD<^A9uSOO(`Gp$z95} zP<~VW*N*O2vVP^GVeurur>wK{g$ANEFbXk~!tC&k!)4RVk3H_&offCAPG0ToR~Ko~P~`i6_%)An&r=B= zDWC%%moije2o!fooYF-jfm;kqEa~RWByQnAZHmN>5v^EJs=zIcmAy`dpua~@`&~m7 zH>c4WPYQ*7uHeI;`p9PKqj5HDbimBAeiw~$3QMex+((p?Hht=E>z?#`-2_ft;}%C8 zf$_^cg-Vfmcn=nKW2b=7;?5_K^)3K*w%D@Cg$>wW(7kL;l2Oe!9 zw6Nd?sC?&w?NpjZVz%FRGm#eCSBx$V%$Ga zRw%REux)rswQQ~3MRtM!6g)K(3uZ7J<4#?ad3HM)K7Qj!(Iz6Bb-x^>{~03D!;eD} zbAplA)I$|M@*Prx!p4gezhpiI&8X|TV1fOOi!*!Hv|A6`0s6H@(qh)@R-9+2t4#w~ zyB)A0Dva4P*D=Rk1|$($SBBQx?eUnol~|g=aJ<7lfQXWh1~tP##v4mF}*;j>$VyLxFuT@t&135y5U`JdV4o8nvS=SmD4S1dI{p2@KJ z6SPk0$iD)wka(jWz`oyNq&I`OM0$E4a)=cB)|eYjxXZ`A8sU--AxI8+#SKs%j**c9 z)O#SXA~%3vF|>)NcO~LbVaUOu55WDm2emfaB85O4lHP4Go)y3QnA&?9M;|Qis>^`$ z_uhP$o@j>HT$P{Nt*(`aG+Tty+anPSU;PRpQ{(v%fOx8~9WmVRK2E9kZ2ba^UC+P! z8{Zc~t|qJypJoH3DjJ(EYVylmTZ&E5czW?>tU7fhVQ& zfD2H>6L(E}YdkenIO4;PiV!#@RQ4_#;H+9Kl_^DcSq`8z6)zW+NYN908_46=3BdOq zBQykiK*5B+UI*o?GQ3+U*%VNh&^>ont){`zXSc9!s&+MjCN%_&BGN@YVob509Vlc8 z|HLcovvOi^CN6YOM-fI#fBcc#cQY7p0=3?EftA>JV>1K@bR*$S9ETir~fHu9DsvT7X3TB1@1Xv|EK(lKcoKK#$ z-jkf61a0o(w5~-Z^YTjd;eKKZE+9;5Zlo9(NBZBmuZRhKqD#j+VUk`V4icG4IkCz& zwZq8=wK6n<2Pldwk)Ctvs~`=ZbvT4*LUunaP8ETbp&(nuz6grMW~3<*aZdEUa&|I4 zg`lk_7K{O_oJCz&D2`lYVRv<)J^H`swvGy|i*;Bqa>YaXq_z2^-5oanuH1P@;PLnbfB zoLf%W-F&(y@dARPxdOjrp#!OWFgU1Gp_g!{*szgR9?SyW-=aZ7dd#d;LR8ZRiI7z) zT#x|B^$9MVUw6wFoBs9|YOyC?k~ap`E@L{Rh*SXi_C`@lAIm6cRxY($7TKaKA< ze|Q=e2ng*(V_>H-A=*k5+y3acbZ6jB~9}u0Q%Z#mKg%+NSkB~h=XEsIRm*jpydb* zmkSXDBb!+hDEE|OKi-5iV!}dRBI+uO+|(j$YinPfHC2H^Rr|Q5htextmH()TX}Q1p zfGkRbQtqlT>(;c6U?RQ{5nTmbE~HFs3{!(p@?0p)lPGkcEmwnGCtuKFX8FLruuB#b zf$viNIG!Baldmffa>0VDK1I6C6s}C9+;EdTxSqF=ln#DGg*f+xtZ z6EsRnFHL_`%RCBl6B7=&%hDXdPp(H8@<%*_q*(4h_5<@6P62t6VRkyg!l5AN71)!T z0E-5k@oIo$DF*(yr>AVZawM_t!Yc$}(=ze5I!9a_xCQRP1V76jPPF~iC2*+V*p*O1 zV4!8?_UXBX+~DC@{S`Yh5ecJF?7Ik-$M@-shTc^zfJ;hspG2tH3EU(B2}Xj~9@ONz zQ4Z25#|sXR`9cKQwFg-7l6Gd+yYB*QfSQ;*K%hK~rHJ{%)CjuAbkVoSxeyw#|0F`6 z&Fp1R-i&}7zenV%Q_#SX->Seqd>hwwB@UDb$b+GQ2Qt3pE%~&w_5*QVypbMu5VZp_SA~Ip2QoC~Fw4 zebp%8*kUor-9~K_fg7_g94-hhOA0=ayXcUrGB;wGQ5Z`&v*ygdu$)Py)b*Fq@`P5|v_=uAhNn0l#_ZLN7(72Y50MRy$79 zsRdUt-uQahM6ylEyV}SDyQ=@9wvg67H zY0SVAp$^p3BZp2pLzHzK?>t0Lk#aiUyM;1!KG&oE(J+2&96 zkC4wsLRK6&5=N1O%!}c8?a{**PwE+nG{*Ikc$M>bpXZKac1X zxFq>DiN* zM442->*9lOowk@vAXzw5E+6{kkj!%1`tv+?VA52IwmL;J>2SMYF_o|m_iP^06#E?R zpxSa=yr27$P{fR~n`?T}XTkn2Zly755#N*B%pYRNFbpD4L=X zlAqnr`6eCbI$0K52*3c7h${Hs2C3 z-@G|TahZpB%RZn59fl0^yy+Le_C{r?A>#QJ>hs>)S2HK(zpb%8XkHu1yEb}xarixHsCudR z{58&q{k4xf*VriUNpbHf74K;S?-^U~&*#0rcze$VdC$dr&*ynBRC|Bz@LufqUYhV; zUiSXBMQe*XG{oKvDD5jlj=I0LAMlKiHw6BHh81afTfb3w?KcVCWLcHL6 zr4gk|T)z|PbWe1tTZ^!-;GKwfm!nt)zPSABB3dXOe7eR*dsGl5e3e_M$5Tst0?~{J z&@TO~z=N#gvecn~Xt}<+y(svQ^&24unXQ-J223LdtEt6 zt5;nxASkUhsq(s3@Qq);;QDrzzDi$LEwmulQ{Wj&r#$ve8X*)%3_GxS@{u5y6_|%* z>e}eShyJB9gS4^RH!btsk84v*Lw{)YUHgPe8f);g#XL4IJ$KyvYh(A#>W*vY8gJxh z^jM~UvwB{Bw8keQ?`Go!5|`{_9%&&gU2*jF&8l{gi^Y$fh#MCV-d?bMgyfl@F3UP% zD)K&yKpqAg*M{OYDX^fvOT1J8YCtw4@cg!JS6nM&W#M3pHq{0=!taGl%0`7lWHWLMmt~9M;#bAum4HB@=0vAk! z016sIn$rYEGS;(Lw)v)ofol_p!o3bW$Luvups_u+P8flj_G9~6L`zqbJ11+<>&eGX zD=v!UlyIl_cBX_ihtzh0TqtmTS%|0TDrN6Q-ir7(lk`V}Ac-Gox6epFfhFJA&{`|s zIur~*GC<9}p5c_u`~d(?c^FL9j!kRAVfjKYKmamDQ0fPh(Z&g#Gh8{P>0NL8@C8B$ zYiFBrLK3^jhc&0=;S@}ynp)Bi|A^4g!hTzY^8i^TeXo*8f}pbj-#9_@Uw9!3cw1`? z)NO?&ZL~T8M5M`(n+0LmP*ZYg_YIkF!r&5jK*DZ^n2h8Y7y${~-hj9Pd5_`t2wSU} zV-A2d^n>a8?}PCNLfuT)x2_mNje{@1$X{u$F@`5VuubPK{#;@x?A?Z77fYPT#xR;Z!T_Fxabt0)6d@>UZ>KDL+ff%lpMyd}op|Q4H^r&dXlb{b$%CJ|<0Ymy z>i1AmKI}GG;L3zPT!acjPxE+{hv4-g&PQ+xEgU$&h=L3}`zb#MOq;?7`S2!8L5iij z;mxYvnd--uFhrvph3c$+~}y;RfWe&Ogkh z!Cj0S-;g*AG35+lzsxCbc<;Mjz`1#{TNq$HJObF@rP5tNRfFB4)=(m!QCIsU3@O#K zr&DByd8tbv_YW20BD+!l{8X__Gl}XuI1m=KV|RlH^Zs!w-FJmRq>6e~O?G2!9u}vc zRof|y2X#4sAvlwD!A|>4?nA)YtAm`kd6D>N@}($caYkm)B&e4qy6^f8qWqaI=5vui zEd654PO}RT{u&FU2TdXwIMlg~?n#=$ec8PzMF9mGXz6AcG3x5le1a5gU3=b)NoST~q(%sLW z00N?6Lxxf|A!#%}9AB1EQuj&cV+T>}4A82B(6GeLdMFVZ2OB6cjFaNSck%zYr96K9 zVvoHO0EI4x|86lZ|)=P|X6?SQ% z)VUGO<(p=>;P5oVlHcy3pvQQ_&or}=tz`drV_$+QWvgOGiiHWOMmcLEdprV_bWY{_ zV$ecp=hRc}&rU(P1&EejVG34nO)~H~hM{#o(D(@=bgxtOlL^5=KfVKbc1Y$AISQ`K zQ|f500uBzSyx=q=-IGn3@SuwvxC3DJGaODggTgBlj1=U-wGB^iDs3Ez1hX;9j7U5T zd)oci-VpZPI6v|h9$});#h=6sd)_<2xVq{EQZAKL#`J}E)5~-}+ zqZ~DDaW4!+kwi@6UYl@l)HSq#W@aVSNksWH$yDEMX)u8tp=C;e2u9%{5fE5>pmLRw^` zARF9N&`d9&t`P%^lRN?i>jKIR&)sAyU(49M#oc7%8EC-HD$NhTHus3iA<7UzLhu`t z3~st$r`AK+*ARMv=;!YMog<&o)Vs>1hr78hZ4$@-K5ETcf4Uf} zb3kP^reJz2E{vfCuKh0<<1f#`Uv)*hJ;1wuU}>wIf4~?AY{?ETr021ClV~x^y5ins zg_9NkgfR|x=UCR4yvqKo3bA~x=qR85UoghsRft?DOV2_1x21MPe^nu}A=K*?_CMx{ zl^<$$VGOR3J%w*;-T#6y(iF=>kZhiY?`W=<`rv7voQ7G6tj1j!Lp=7qfbpZ7Cq*u= zr!%K_VGL{Avfrf7y%~r8RfQ;BsoT8n7G7U)vhw`VVZMGI;*9{8qiu~#Y1PN`cJCvb zzJ0QBeG;47$?#2k7P1RtoGp0~r(U!36|wKu+Sg%O=Q}NbK6Nik8g6pJ@*cHstnDg= zFZ^F|AWbI_VT!wMNnwwfL<_m{nW$&`O_4^CIX7P>of0XwwzMS;6&nwhrMyf*;7)v| z#F={79eU+c0RadCO61yu~x-S*CGGBoo^gm9(yyGMZ$W)=` zl#bUqnNln?kRm5)IEyEgLxeH1`>mfZ4t-|!@eElX*F^kKFaZ^(%o{4HTf3DC`V&u- z3xL*MR}1Z{98eyeXf8?Bd6h_KvgKq{%%}=bFQZ;eR`uN`N^iGkvVc&p>9|rrVkh+6 zE`qVun&F|IUD)us4w)Ha_Y|RM!{5#Iag2GlB}kY_d?}Z_zm(_b=;t1;>%1rG_6P6J ze`&%mc*t^7#nioD9-ZRTuXH^2;DFl8(o)jltxwZ+>MySJ_%@_~5Hr32s~%FgJI&~`x)RpO>rNOAt8W zjuDipH8JU98>%Z{l)jh9d@N%VFE(TNJdel7=&%fXR!zfw6Y;O=FYZ0vg`zgBN~ktW zDPfX`HyP5h6J-oRK7kN5O*gbpRkR3B;7BC$I{_+LB*EoJjT~Q!fg)s6Zk~#e2SYpOUHQQgiXqtaQ4`3rxD35QW&VN;f|7Gz)12*TN-b zCy{U9-<&NrYBWMQBPhJ;gCsC)sxN{zg-=qjloLF`u{i7P%WFO#fkAXx-Zh=c6I{Kb zwWsiva~gl7HovK{8UTclPv8%CKDo!Kj_e}^O&aY z-_v^pSJ-_is3B<24~1iAt>~A;W+I1Ziu<_LY(mC`q{Q`Uu<|mbcy)u^u?JHJETcNh zG)*bkQ%c$%6}DwxuHz@h&4lwe--TISFFQTHU=C@1x0QNYfqM5V@Hy)pvhHE!{zTh|I*27&QznHY6RTp<*jBeduwe+|uH~Ig7G5Q03)id}WxSLy#zRCL4 zP08yf58}YM{5E;yqx&e`uovJT+Q8C>rb1bzdP1n)dcY)3NfOyE{GHVhA|?v`X*6<>re2gTAI1dltA`hFh*_k znZ7CEw)JO(_}bVjHq#Oi zO=_l}3{5XWh`#;f*hGBcf$k`OZyt)s zG0|R+d|dwKc|q}}BK(4#-wp|5cYbMTuVZs<&Ap^RTYb6w>iFg(B_H!I1S8&?M^Qm% z_br5opvxy8=7P_Svrozi7UQ$=A0I==z?Lasw9s)OC~#~s>(jx* zWvDy9|J`QW06GBz1G{I)2-sgX+r9rb-Tcq5{^!;B@9=C$?M~UUyWhC{3wi!)%7!X$ zS28gFF=Zd}9Pgr4Meo)7o>Ds*y#<@E`F6^{RN z%C0z_X2Jg^b}kYhUilaD{ExzM>sHJELZ0=8pj~o%H3mMuY}&OKQy}m?iHDS*)`SWw zpYLm~b#1vX{!b$Ry1e_&hduSPuXm*+;zy6yySypeow8qz*7dz-y1vXinmxl6SL5|{ z;?-YMHjBzDVcY&U@@#!hSD^O$>_CyVIk!=9%=|9${Hgy@+wX7eqaF8CFdhHrDceVa zJ@k@-kSimZx9^{0Q|CVelVL*k+IBLKeJ+y`Vz=ieBk>UmQ#6^h|IoNgjEx~sbXBsKZ5L^>w;vZbN?P> z?<)UxH$41nT@baLlX<;rq+U;P3i)3_X5Cc%v5ma8a4-7gfzihlt`|8iO@Fx?{t;x; z&u@94P&OkaPs8Z{wJtC*krS=0o2TCA7yi?<+3v7d^qBfiOKMG|z?d z5}49}&>j7^g)w(@IVsL$(>nWJ!_X{xCzSuk!q*xWOnH&{RM=~=wnwgVv93?^>tg+Y znetM@kiFN^!{N)7OO4~Vzb-YgBSe>>f^H-*pEmM%R6<}i!sS{+mk3(QWPlL@LCjQs z>)8JF_1j|rzJG-U-FtPV6Cq!<(uLGoTzP^q-@p1)=*-pCXV@!MtKDLE7Oy`>x)YyZ zXp7CWQOx`HmBiGcn1c|)^Zjdm>L0GI{eN^f90H>lD+#i-cwL8%l;A!J8AgYpGBgy}^ zE;zplKk6$jHT#XnTa23Z_F^auaW+o#^`qxv?fp*<*|mS&T^Hz&JwN#Oy5QV~VFRnk zKf&~Nki7>Ib@MCqd1-DZT!FD-1nd5^-(cfQ(Wmy6M+p~pHs;ND-3%f@uIXV{oPd`g54y~akg<*r(J|{l_czryWt;=0`6|*i4IQe?~ns?0EjSb47ijW z{CWRD)KTQrO5U)Zq=2GfV}5FBeSm~_V!>(CjC?4iKe?)C6fz=i6tB8U=T4z(dd92RqxaVwUvId=3$ z0Prok`Ds=GgWE@lt$0lbbnH!1tUXy8B1Z_5yUV`13Hki;_Q*@?U3Y`++fVQ*V$Z8T zs=ls?h-Hg17z*!q8ONAG4xXte8~f#H*3TY(Y0f$3yLrPeb0}mFJ?bFeHj~?gH3ctJ zB+Bl%2Ez}?joSIX2+k0IaU9$M4_~`|Y+hK;b^#=)d_V$a#1PmcegwTqQIle1p@$0v z&TN`^?hNo?HOeeIpATNpG>YUPE9|1=ZM2ZZ`d^?kzlI!@*S1Y=FWoKeGr+&LdLLV{ zb++IYGwZYv@@HktnxtC7NLb*X6$n$XO98iVBIFIl$DYAhQ73VU;CHV(H7>!L%cy9? zP>k5MF6Vpx&*0m182*~ERi<`G9&F&gVXOyEa`X(GWn0^zue=z+qOefHRSdoZf`%;- zrD<;yRx@mLj|`tGM#)q>|5{-mr|4Y6d1K#L_-bkVsmRHYLX9l<2ba0#0!&qcSWNzL zDU&N-o@hz^h<<#RvgZ)9S3-@IQWm>xoG@%A+i|c+`_&l%2-Y#v^Jo!=khjOR^&|WI z^>&T|!+X~2`PH;Wk+61gyA%=Q_d7l1(Bn^(T%Db>-@R{C*CX89+umr?HQ3C1cn%|A zH69Vf0QcBPt7YFg^UUe(6I~L~?b!lw@&ut(<=WiE%x1f`3M}aJ0crB19susZALEwl zt3R!D0k(w=9@#cF%4xl@GvOv>s+&M`dmeWXNxppc)q;Jcu5;EA|9r!KwIQ*qlTv<7 zXOY9z;e2DzUX}Eg=Q%47xvJrGr?(uyWvDocTKbXi)2+;dXUq0p*nc)gx8a#bn=1Kn zY=@55#=sur_oTJTmi)5!YBoQklJb?>g0(YKyWY+klzW*R{gnK+@29$qQQX{#uY4`& zU)~xo_VJQW_pQrsd=zmGZIt!bXPn8%e{y0f<&Sw_P8@W?oEVr z0T=A67Atz4rp{+?j%I8Z(-mLMNm1#hiOW$*Es}a?!Y6wV1zl817igU2 zYxrW|P|GQ!?BTZ}4*RcWfWqd`ujUSlPG_e)?5o8Y1zaSlU^4OVMjvnI9;9$g;t-$b zm=NCQk%6nha%xD>cg7suT8!`OY$Ey^PBJGrL~ETPp0|b_lRHqv~(+cIoW4>#nN1~Bxyw^ zQt#2c!qe`eeL<$9KPs`q<&=&^|s_ zJEz;Xen(x9q0Z#zM2;Vt4IH>%_DkXXQtWn@qY$-bb4uc9j>ztxYS;yo#QAK9XnRk^ z9lQ5|Yecl$LZ`*`;FkGb_?IU6-<(5vtGwPrcQ@Z1dpr%uJqj-8{ONxwhuc|=S)IW5Hh5+)1x^mZW`u}f z)xV$N`}rFwI83SkeQ*1Wc(Yc}o;j|2O(FY-LsiI>kr^&=TIlcRejjJ=)q+D*;L1%3F_OV51AQql+D) zOUTh>8POHZ(N)9I)$7qrY)qYYOoKyAB8|{cW_W%D8PZG{rv#U4gz_2l691c!ua1DxA&IoJcqxOt;N!3tthf%ExQ#+=s8MWEH#NqB_c1+owfSG}h7r2C z9e5xS)<@#~I2RX|5xjYaraK%g)I;OOCGNT#VlKgnZUc z`Zsq2PV5-BoLuT{8K7YQ}~-x3}|#) z%hZ3m8#ZF?N8%1frA(Ej*frCR`ltQV-GCGG*hu(D{SS9T<7)P<076VB z`>A7AcT{%YKim!eg8jJcP7i(^3%V7@F;}xC_iuMY&d2hwcy0?UD5GQ}^>b$4U+#v? zjITPhrA(1;ipev$wBIW`3Mv)-rcy)OtR2^e=aVwg)4yH~r{v{xo;`d+xv8 z4V&=-t%-v=SmDuBqy$f{WEP{Lg@M)0SJW+1cPe^QTe#&Rh6ClxdKMXM z77;5%wWAs7s)YpIOq@uuRco=02-burUWzAkhMcW7T5KP1-@yr^{Gt#$nf}%>Ta}05 zs9WO3Q|xS2+^br6G9c&DXo+`p;T4|yDx3GcvWk5IO34e1dQ9no7x^bV>7^c}{<>vh zR{3|W3W5qsC`$PkIZkEqn`J$gNRBVhejG2eJnXDej-FGF3r~68X8A%dGL0t(pCzD% z%Re<*#%Qgmiq0<#D0fsWmAYPWc(kH1pfcRCoLP~W86eOwTHY*D)#a2{x`NJQR2Gj` z%B@yfINg7;SvAm_oJWAwWL3HI2=s}R^@}_hAAKNa`JiYs=ffud$cwUxfa*D`Bz9|M zou$C%tcrP&n(sUbL(z%vl`17ytFP+TY>(EwaHv__%hS{!-fg zk9+{Fd;rU43OUyfBtrWKm=lf_3I#Qxp0#+QI4o80%aXXS7Z`yS^S&uhv%k!bw29?SZadgQ=IWz9>^nkE!X*JTlf`t0!#kRYzN> zR^+WY7WmL15OZ9w{-mCOkzTE%XroTsLj%2P@zsYHvm1|DH)eP??h9%36m2qj*;s2; zKjZXpOQ|WqtM=y0O7EBazHLpRymj}+s?WzXN$534>(v|-t+&{!Kh@SC(AJzXR)ZgC zK0MZl4s1%QtV`W$S#@d#w^h-U8yJDDT}rK%)~&f-55a@TGSRm3ugxdCD((ly*JZbL zD)Gu}H3zrx8^yG>>a{;`g7s7A565botlRr|D_#(HLI3u%UTqJQle)&*hn>sZ@JYRu z4W!Yk51i~rQ-P%}tVeGv+uy~|C-piOtxG;D|39^z`8yQ++pxO}k+m#Yh6t5H$dYA< z>}Iiyb!ITO5+eyI+g(Y@Sj)bZREkKEZH#?4jAg7dW8Y`&yI~&p_jta~5AX53$NRiL zJpaV!IIin+p4VC9UR`;w8cT!Mu6W_ry~;N;aJ4^iJBPKpOPs6A)feCBqS-NU@WE-w)eUL(w zi(!6U!!{_%NwHh_Nb5y)5E&&=f0ZrQ@ji1;{)n%KEGqd!uRfZf*CiJG^ z%BJ|>3J6EDO=hMgy;}GroK9+POKVo+z#37T-7?diy&G;Ln%%K2sZU#OB7Sqp=E!ul z_=8e?KsAp*pC2PyL$p$!U~o^pYeKb7?)|h1VDq1W+TJn{DUqbQi`dp^Ks$7#4Gd^o zy3wAhmHZx1nT)AVqqkdBw!=v6@f;mr5Q(|;#=MY@!pwKyI2sE`9ltP%#T>11TAdY` zjy-t8!>-Db%+5Med>OT~9MIJQh-<{w)`oO-VPo4v8ahc`eRNDuW=pRYez*`rlx=yC ziKlc)jA5Hy>G&CMiAn4?GNOAaQ*^#C|G7-tGN5O)w>$ixJ4Lo<&s%g0l+m2oa|{qY z1bvO-=wToT`16FRr+AhcLiKaPC3V7X7vTb#P)%(JBK7cQ_5PV8K#j9M7xs#C3Ib?O z=S2IifQ2OGjHg5TWJ3jHj(8BHJ|($6eg?YneivRE*slTRy-DLytm(gl<^_^@bVLUX zV|ec3c??1aKtLXo8Xhz90O)Aon1^VI9)O{Vwwwd@HGObkKQgP^=|?{*tKXH}=N{90 zucqIVvo{+%h(Pvhb96r798w|=?$8GVIS2iaO__+H;LyIHnD0!Y!}rNUAo`HcQEw!g z@RHUW6FL+HZ2rzMoRHNUC;AB!IGt3pWu}dAdyh)QjQSOhisMJ$ zP)AecMp3feE^=eX9AmFy#$v%^YUr`g__5`}vGa1Iue7n6!meaAX&pp*h9_mkkS^8? z6GBOAot^zzLm5aio=O^H;K{92@`NaP2;KWPmNcD(pUR@(<;e43G8IT#32hq{9p6Aw zRI^9~3}px3wM-r#r;nd1qHNOI)_@aCvEz@?oo8bw`ei4$C}d{GcDm@~Z;nX;pDw}d z$vW&LAhuKNc=8uu>T-97l;hNwkg03O?bmTrsiY}+v33Qq>9<iLlD>agSa5|M=$ zVO0^13ptG7g(yhnTin9iv4vRqig>ZbXPS%e#mbYz7Ckc-)5ZQ|QWotF7e5@A=4vm2 z;Y(j2rTN)Q8r4gMvA>Ismt;kjf64zUbzBw?UaoK~slqL@jVImI3=3X|f9S4nX{XX6r z3|X^$SfUrcy_L4@5SNV@-=5pwb}r6x(b=&8ZQn+3dpJ?=bCVv_?zB;M@{V?VYpH&# z-4RGqK-~5dr=F(_QZTf{{nB2I`d&nBMr8P2W!m1`o^;Ik-uL~z_?)yvo&6O1{bZ-K z)SUgO%KglTso9MEK+-N9u$>!D%ezU?Ie8Vb57@xFg+1RtUOEh;9&pzjltO8L#(UGV z4(BxvaZbr~m+0x(Ln+R~CdNujabI~BeHuqspb@(tt`JuH`*e;L9gp-wkA@jNE2G5& zr17H;=-M03jf$JczZgg6qQ?t5`Hkbpn)}CVs~t;DM5+$s;dRDdapFNOqm+ZE_Wseq zHdRP1Vu|m2^pHZfPSkm6NS>WVZetKTQDVj;exgo+p&u({-#26OUdFn}ZKQA3ELG9H zJI}s<4xFhTurf2!KM(z&`-)52VPL@`9mRA(xy?e-FxuiXq$xk)gz1q;@R=PYF5CV@ zHOETB2dVa@o^jVj?be(dUYURGF0%`mB;LCe?rA+XwCYs%oSFLyVt5Tz=beU|-5HQT zw!uUl45Ij@T)NU!9?p?QHt!L#`KtqjcOSV81oBT2JJq(lhD+RgzA%tuJLA3J$?NZ< zg?}x@ByD=sAW3_VX4;c(GBPttrXz8ZXYPSB_XDUSWe?{mp`SPVuu!ucGh~q0>LRiD z-Z-VkX{}l)_UcVyNb6+E`0D)lI3xU&#oJ=V{@j9x-w$QSZrouJ@n6$ry(hv`^_r{n z;jilM-|~7~np+HX1#k7% z3e6Yv8nz4Pa_DS@!Te0FWGKtGQKGP{%@gq?f{E9u zZ~+wFguMX1J^lW1_iFNGFz(uj|6>DvuI5>Yk*ullVp7RtE8Ywd3#$Skd#3k-1c zw=}&GKi&!cZ6M~3@Y%9}em-e=A@_7ILwUC< zzCTazW$QLpViQ`aPw(f3+U%=exJ6W!gs90Mc;>4d3e__{jAITS6dtA@=%p6;Yk2)^ zRP|cU_ZK+yE)BnY*i^)wbsJqVz2iv}DcFJeU^gL43u_VnLUz^TgTcNH%idn@=aN*b zeOoraWaKuBq-fDvqun&-imwh~*E+s>iwvcyT@G{D?fTbS z2DQ!(pN|U;NfgM$EZ@7c&0YhuC0k}&Cbjs@q@R+}Rxh$lb~BlkMYFwkpjf78w$I9kdcJc! zwoFxIn^TBsPjnHpN*jDIJ1#EN${}x+Ugv#9_zZ)s&eNmco)|H$!k!QI?UqWLRGz!t zE(3a;-Jh}u`zk*&0)gXrvwyvx*Rr;U2B%-h+2@TTVo4Du)=Ov8Mq!-V{NMTCFTo`2 ztPA9M3hY%TY$}EC6o(~+qxcJs+8k{5viN^2SuH!X580GI<1TtsMnNndXjM~Go<1es zSl=5R8b9LyS;0$ncCxi={K8f8hM(*rRG`ji)wc0VNRpkt2DfiO3jBW7PxV+IwC`!* zEdBTypk~Hz(&zE0^c(-!1N#Ea{;G3j#R0iqp1~?(6oChoMD&`=q0+`3I zeD+ZVzEkxRyRI@EvS_AoNEve4C*C|HMa%8&T#TUe`(jxU&ihBZ@V^XyGINuh{eglF zdsaUy1+`Etrh<)QeQS{gL=-oay-C8*`PEne>cZ~BCYhDwH;b=a1Tu)thc6ys`ZZji z6Ng*8n;$j!W2eh^1YHPQ;_>bwWZV%$v1Ux}SHeTg%t)nQ`{ZoVyP&U>zU-?Vz957y}aC29UXw8ys}VdbB6H+0w{ z1>N^G+xz>7&C-?MRegW6eENU_d)W7k5-b8ea?>!&2(>|Cz_R~O)Y%8FDQqL{l*ykG zh*0iI zsjcP%mY;TlDf@H1xc$(kXHM(m47<(UIwZ{Z(v#MdO59O~l0C<b>BLEB&ukKV?68x3pY#@aU7zCi6640l6#J z^F}Q2Q(P9+mQkfLQnT}GRTyt(bT~HIy%U#1ZV;%|^uK|l;ii!*&%bo)G-U>UJ&UVE zzs5{**KQl!z`5ED9qI#adg9E&Hf{^FPtuB>*3M_GB-heE6EiM`yc7#DQ)CU))lTW& z+~4|ga%SOd-@#q8n#(1v46Ith`-9PpxNS^Y^@>bGaQ~&nT}f`-+Cyka1N8G&`)ld7 zjM{N`t=fmhuaBk+7#u!MIiUjt?c#dKv(?ur0zKw0_ke#`Hpaj0wHVb>&G2EnPrNxB zbpda>Z=Ro-R=)3|(ph(RHrZe7x7Q-)DsX75(brPGv zxF8n&y_e_Q1a*xgIA)^u@5Y=8siT>`S`(>%wEqGoh+TMxYHKpL0$S_l0Jm052fpya~N0v3@l|0th5YZ zAOmZJfsMC;ZHR###=t()z@g9pjy14^v7vaFb?-Ns{N&x|y~__^yR@9HDaER}7@_I= z(vWoeUWnoS5W^d(h7Sr2y|9MfU503qp%2yYA>GiI!{`yf$WPYjv6hiP$S43|6zFaA zB*Z8PWArrB=vkprFxJQiV;Jh+V(9-;>u-J32j<%m4YYT9r=(iAK>7xG%#nG_@Sm?> z#>|!{#(#br1H!|M=`zL?GR0DjlSEmQcUmUzK_*EElVop`ln|3tj7eIi zNqV742G%6A%Os0rl1(+qp__c58^;V7M^hu-VA@`L8%F|+Nu9t0P3?%%#_J`Can5Z~ zdB)Ks_l z@MwhgxwN|)P56k+>~Aw*H<+n|I26wZJ;XxkUC<*E^q2}|(4nU|&HoZLKMjN&ya#Ur z^!YGd=~mEig*SjtiQlkYlr8XQEpraMIVahii)PM!WX{8BabDEof}8~}(1H(a!H={M zKwDf4wGfQ45X!O;{$U|4EHlQo252RVB`xDMG_baHhu+@N+M2Whb*7jM}4nM5nHP#5c zHIi)YNV9f2vWC90wv+Btc+V}NPlQ<$Np3_N=|Nk5oBJ^~o>?{ze%N@`*m&bl3~L)7 zn$5!_8(&V_N20cVa<-3ww*Fw-0Hkdo+SWT@&_#u4CC!}_q&PA0{?4nRdjYm#G~4ha z+rK&Oo{QSOkh6OUw2J`SJt5k<%=XeW2M_XXgOkh|Apv%gHFg-hT@2YSmSz`sWEanA zpCD?VC};l;X#XB;pMn_}@bXZ2MGU+$3tsgDUR?vn;^8$}W8Z;;Uomf!hc$C(@DF5oBM{L9Ml>T4 zEoek*D55O}(Vm6q2(W9|9xG@bO}bNEUxSz}KoB^Qy`soIIb=T&IRHiyIpICDJ{CLz zPe#(jx@643e9o#$Z?I+X;&aByv>}<{xZ=Kn3&K%t* zx}G!FiZk~K)69iBFNV4xkK)xv@j+1hjwk^i)WtBAAkJCuBU?^1N(6@jbfZKmD6tjP zrDK#hmy3khl!#4wx_P>vq>Hqp%M~A&t6?tJVqIjiU9PjBq#!oJI2Uw*i~Neq&0`k@ zE>}e{S0w?IoVGDG1cqpPZqt6G@r?O0d!Y*&pUS52I&R<|oKayoO^Rp;3C4wsv* zn46xw+g)upeaMU+h0Van%_z*xIM&T1+s(Ad&8)~ZyTT1jaf7V5L66U;idkMbmkidq17TcwXf@{}-A100sjV zp2T?m1$5v>IZvj_e{E@K1oKOj^86X?2g77N9?)!2PcIW>K=(@2|F_Y}IQn~P@V5N%sXL+T1t>wll zc5BdUwPtVbzXiG!49TnmYj?PLXJXWtMB~qYW=0__n%GmKV((Dtyh?h!Ub2K%{)96L z*SKOb<26MeN44Hb6pVPJ24M62@+F(#SKIDP>|@k9c1 zny>O}0q6_!(<{Lf5gI&c^vxdDddc|yi|EIJciq_; zOt;A1j|FvGm}zuwj&qYp0w=?~inCe&7GI?NjAl}<4VSSmOZGsW<|tUWOLUhG2ilw@92dlHa+2u_$W}`<%{iYh8i0Q z@t8cR|4WSLTl08=pSHTz|5l9WtHw}!cVN|K)*9xk*1Sp2v(e4$y{@m?yX}mgu!GH< zQ=IvC&YBWl2ycC01?THs>>xyHY<=X3$=APX+WXdFD_0PoZ>ZMM8~beQlQ`!$V7GRHSlUO#9P2T5tCM{s#8@-k*K2_4N+tck|b#1G&Q6`NrVy zmMI+rUo^JAna6yG$t+%c-lZ&S8yYp)p!fD~iln@$)c_Qu6ki-hOtBV?;?YVREc_z% z<7~;Wjz;~vdXTxe<)^8kg%|lMn5LV)jcf?AMmx9iwKEcXXHhvWC3ON8XIV=qwx=gF z3VtyC#d-d@*~WGLITZBmElu`jyTEtNE={GUb>G~fpW}V@p*P_E^W{4*v*EIywz$9S$X{kn2& z`~(a*MZ6;QOh%Q}gIg)}W>c#8CBu~%_FU5Q_<&aK{k6!>vu=@g-Yv}g)YvnpPig)K DBBx7p diff --git a/examples/hello_world_flutter/hello_world_flutter_openai.gif b/examples/hello_world_flutter/hello_world_flutter_openai.gif deleted file mode 100644 index d0547b0897f8dbcb7fac1ab12dcb3ece0533b5d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 119360 zcmV)DK*7I9Nk%v~VQ2%S0`~v_000043jhlY1P&Dh0T=)m7z?Ty5+55AA|x3wBqRbR z0Es3T4=4dMC?#(xBPuK*J1;CzGcYwYEIl95Ga)+? zL_9n`Jvu}`KpH>*NVMl^IsA~i@JOG!eBNib7NNassLhfN`K zPeU(I2R%_7oKZPSQZ!UkOHx!oT3A#8S^ywg0Xtd`ja@`xUtU#UKlfl-H(>%uVJ=%? zOnqTkNMj#mV_kn^BtvHnRA@1lXLrSjEr@SSc{H>tB!$>kcyL#7?hEUlarH`luMhHl5&_tgP3bvnGSE9 zD4d*}nVpWLotvGWl)0Xe+@2YWpo^8Df)Ao0qN1Y0qi>|9o2sX%gs6LtsBU_yK&Psq ztE!@ttWvD3te~!r&90+(uPlkMP{gpGo3VePu_?5%s;;uBt+b%Av`VzJv);6tF10~c zw_x74f|R&yx4E&rxwN9Xqp7=*ySuxKyic{fsKmUx&Aem#yfg8?T;aj8Si)kz!?TgZ zRItRE#KgqQ#KObHyDi5=kH|`+$UdaVecs5zoXKRy$-$S*SIo@Jv(1;V&Wp^>!mQ7J z&(O-I(Q~fRgT2w9!O^ap(^k{d)1}mH($vZL)W@CHS)tZq*w)ja*IS<0S=QOg*V)m` z+P9(GUD(^q+uPgu+|#AqW2)V5vE6>S-izMe)!^RS;NRE3;F!nY#MR-#-r>^U;o1M; z)#T&j;pElh0>=;z(x>CWcq+vn-w+v~yW>*D_FgMv%@AKsJ^z8KX^!oPh`SfFh*r_Y}j2Lc^Rw5ZXeNRujE%CxD|r%fOt?uiw9b0}CEZxUk{Fh!ZPb%($`O zvIiYYo=my2<;$2eYu?Pcv**vC6N?Q^y0q!js8g$6&APQ~(_CN6o=v;9?c2C>>&|Ue z?>f2bf(su`ytwh>$dfBy&b+zv=g^}|pH98H_3PNPYv0bjyZ7(l!;AkPPaZtn-+80f z{awDj`}gqU%b!obzWw|7^XuR5ULN|nP65Z?fCLt3;DHDxsNjMOHrSwCOP!b8HWXH9 z;e{AxsNsejcIe@UAciR7h$NP1;)y7xsN#w&w&>!EFvck3j5OA0<8RE&0R;^~(BSB!kVY!$ zq?A@_>7|%vs_CYjcIxS;poS{ysHB!^>Zz!vs_Lq&w(9Duu*Ux?>#U9jkpmQSFyr5U zYXRlfkCHS>0}2{UFo6WfCadhS%r@)nv(QE>?X=WZYwfkzW~=SC+;;2jx8Q~=?zrTZ zYwo$|rmOC{?6Nz71}M~;$*xcK)Ru7a7}6*Q4+o3^(lX z!w^R-@x&BYZ1KeyXRPtY9Cz&T#~_C+^2h==zyYIw?8<9eJlzXyqYWhRKm#<_Z1c@H z=dAP2JooJL&p-z)^w2~XZS>JdC$03-OgHWH(@;k(_0&{XZS~byXPt8b4X6;ry!f)E zlgq&|69ft|YpwR$Y`5+9+i=G%_uO>XZTH=HL(R3<%nbk67EWaUHU|nFJTPmo94fDTR!zSOw@nfw7=HQbsHd*_>a4f!`s=5Ipu{%B z{Y_Kg>&$`z2a(_0d+$8|4z%#F7jOLWZv&6~^2|41b>=F}5|7_v&lEWAlF%@L@-%Az zf(j~tpaIa{gD*b$--kcG^X#`@Gz2-&;Bx{R1ffC#UW35B)H39-ea#5~K@jw}GZo|@ z2oMk;0o}*I209RP6Oe%<;z7Hx^-3meAr9>Z_dm>aO$IUmp9mqazrBg@ge26N2~P;V z6h82U?n?j!l%Rx|{qA2e0|6FtkN_EA;eL+u*UtZNNJJspFmgf^Vi1&I0j~W^1z3;) z_vlAM8FcN3IJ9BRIAA{~(jW&e1XvKY2*Wa(5pFV6K^-Vqy9>euceG=}2qd695QLD0 z;NzJJ?ubV{@==5#Jfk3wCqpF!ApsT`pA#XV0YwH-2|QH66z<104RFAPID>!%Vy4Ly zT7ZNJm;eWw$TdwifRPQz0TcZN0p95lh&fXMBuOaA`x%RqpS&U%8!&+-g1~W%ROBf) ziONA5^O#bTAqd>SM(Vk7B^ewJh1O994$u*E8u%j%HK08DY4e2K#F;noD8g_e^PHC} zq^+g^$t}{*kWB=^6e5X5_`MUBn5$(Ery&1DdESo(@618W0IJFwsxqE5TqiyOD9Lw1 z0E+ipA}xQ|P#X4W&rYf-MH%}zd30h}jj zndn2?X7;(~l%WRBu#Ra=)0rw&Ni>5~p~k`#cze98WpS!A%2pP-=+&A!Cwf$NCNv15 zO)V5bFk69Yl!xL}r7UG9OMwp8tPRNO0$=-9;;Pkg@~otP%8rLto#YfByx<5oH@OOKz!JBoR2+{_z!#RW zm94tw~zDTO9WMb}*uqZt2y*~F7Ue%;%%g_{vKJ-$%Rr)~4n_ zs8NNp5>ne#CFoC*_6vfPJKJQvLb9L-&LLI(b!9`VQp)mFf)$SpNumZ?r~$TcpI42% z4R_c=AQth6xl18tc=*JRg)BJ(S-@}dy1e9NwXol3D_R=a0tFr5Bc&+I``I(C1=F&T zg$zoe8o7s#e%wI=xWpWwm68LK9dMhSxg|Er$j>IB07Z?+B^pwHXifC6=Z!j7+xo*P z4e?2~yN>SO`jV>c8jMRQ@IDfJ#bKtm!fWP22!K+C4?iUd9e%`4B67tJ|5wH}{z)Td zQsbbcS^Bm)#Zq#z;t>CE9OfVn@`bxRon>b1nGah%l&qQ7wf2RH0WQu4Y_*(e8=}v_ z8FUp69q0*v`Mk4u!NJQt@Cv)D5sM!fHP$GX}FZ}j0MkOPQMJW-Q=H18>% z1Epm=;V1vP4R^!sGmrJY=kD*j=biGPXI->sOt8?KetFUr!!&$xZ!{xz^GzbLtyzzE zOrHMsxX-=rA;(D7vp)0Km3!=QE_2#{e)q^vzVeqZwA8P@^{#&=NjP73yE9`8$7jCw zx6gguOLq&;hkpO?Vc)sVvmX1l@4oui&wiJGfAzn|x0A9ved-r~7T4#%1lrI4{`;TL z6959>2MqBSebN^a{MLE+rv(jg01o(o5Ey|HIDr&cffjgy7?^JsCIEG|chGuw% zXNUqZPyq}R0TB=bAP@s6fC6Bs13vHt(tryE2zPN;f5V4!vxk3OkO{W13x;@zh?t0q zxQL9{h>rjGh>#eGk~oQ!Sc#T+iI|v)nz)Ia*omI_iJ%yYqBx4ASc;~2il~^1r$`H@ zunC(G2vUFpIM4#LSc|q;i$8z{gm4IlunDK|i=lW5UvLb1xPxg{683k0z90skzzemY z3%YO%(m0LOSdG?rjo6rt+PIC}*p1%!jo=uL;y8}vSdQj+j_8<<>bQ>V*pBY_j_??d z@;Hz5SdZ`M3a*d~m#~j;Km;aG0x1xXB`}Z#8IUPZ0yqE$fe;A$$O!o83bz1_5h;=4 zs0*%O2FYLxd&r0SCKAaQh|A~+yKoD?P?9Ejk|>#yD!Gy@*^(~#k}w&QGC7kpS(7$- zlQ{pGlRCMRJlT^z`IA5yltMX_L|K$Zd6YS>NKlu#5@CF&Vk>lrw<@bX?7?Qh?3&XIMY}uA>`Ic}QmvT9mbXk{nd6#&ZmwLIE zeA$n(mck$lvS13G;FN731S}8&oY|S4 z*_kXL1Z_|ViqHw2U<#B83xHV+&HxKwsgZj)mLp+*wq|#;fR<`m47$0SyxE(+`J2EQ zoWePr#95rid7Q|ZoXWYJ%-NjI`JB)hozgj-)LEU@d7apqo!Ysb+}WMgDGcBVo|OM- z3YLkPn)#XN$(f-!nx#pelqn1037*L*49*}6ulakxH-8<8fU`z-#dns>xC_5f48~BP z26~_fnxG20pbXle4*H-F8le(8p%hx77J8u=nxPuHp&Z(w9{Qmm8loaPq9j_PCVHYM z8lv$znUz_dPkEl{`I+lUnx~1H@VTNBN}u)_ma%z^WGQ=g2cRN3peb6UMtY=3nxsm) zq)ghRPWq%!8l_S?r3o6Jy)d3Gnwgs!qo8R5qDiCfd86?up*nh>J*uBQcys1=n*mCs zRQjfH8mDqPr*vAUc6z6Hnx_`JqE}j(rfHsA8lwxq1eV~X>{+9fxuU`lp=JNdqsCZs zv)P|(ik5DwrY>D1tbpp7rBw?DxljsuKxP302{CZJFs_Ju6}B)TH2XRfU3<<1QGBG{;&up0I#f?sPnp@^-8Vx z`VsjmtNU811RJsKLGPkO&Cp)x6i=g0Hv`Cw@pBkzMd#*4L4F2#6ci;{FFb_Xa1lrK7@oKSSs-Qe8 ze~{{tYKpCt+OJA`wspFwB&rN-s|;xSws32wOlz44tC=j20#Hj3^iTyq&<^=<4|j00 z&w8;3`n6~lw){D3LCddk+qhIJ3~bvAy-*Bn`=KqWn$Iu{j=Q;>yQFhFs#+S5ckl|M z@B=?Ev)(Wd`4G6PinGvKxb+E^v3jP6i?3t*pC42klB=PTtG49Y3)aiK+FPNYTeo-nj}HGJvjlMmKadakV6&|| zxLs?YhKo|Q%dxjRdqT>%+graR>bqWG1W^DE;7|y%P`MZCyTT9+f?x$baKLbYkH4U~ zblMBX(6-8Ovb7*=xTZytCy>w}2XuMc}v1K(%X71!&L$@0z~y zy1vHHzC38Hic7Y*`oTC%u2J9xiqHo9+X{`an|rDZzzYpl&;)>R2xrg)8{h=b2(ku> z!A%OD&Y%p)%L=_f!46!Yy$}a+&f~^|J%UI{!e0vLb+BoGCi zkh#8#0SaIWnLMGYtN;Ws2ARvDSsVoc;04bh46n?n%sdRu%*Y|zy@1LBIM56cOG72l zviIN*h~URuE5U)hqlBEF?|aDctFL7%&Ft);SsVu>-~v?82A~iLji3gJz?v4?3!Ll% zfxrv2@QkjY3zy&mJ#Y#nxvlR^05(99&}Y#!yF9@oU1ckrI5?EUO)oG z{Lt-OveZnf)~pTw@Bu{0M0pkv@)!p`)Rv1jJxYx)Lsp{X>0~kAexa- z){W2!lv|kK&E1ZfG)o?X*sJlHku49M&S&~VpZEwV?gr6y1WV9)~Gyt;Sb55b_vE*#F`Dahpv zwuamh9Xr2@yxT?n3(=qpRv-tFaKXw@4FCT-p};!^R=^9pPzw&Nqhs&_n{dhuYzzzi z0&wlurfm#WjJ#RQ#-9zw&S2l0&AwIqyY%he4@{qez{^x%hyh&# z1ck8UZCebjz~cz24Da~{E+7SZunB$5;fnAAyAa>0oYw>Z-zeR$h8+hsK+Csm3d>xd zHedst5Y)CD1$Yk4^tsBlYy)!82{!*=1Z&CK1n>c;P~H#z3(z143cvuD01Xk&2v$(b zcfQO8D$8|l%SMpP$Ghi+kPFWM&to76Hc;qtu+voC6{cT2}otpriK1}abm@X*J}UDeFp z;!-T4BqI@JJNc+*bGkqL$J~LC=5100AT>o4!xH8 z7zhI$;U_ThylnBg0J&Zu%oP7G^M#PP5q}A~u;Pv0@WM<0A>G~xY6FW+@r}*{1mFa` z;0h})^Ba!=mtga}%;1Y12yd{_3~==yzz8Uv?gLBhEbz4ckPrR<3=qKW{-6y=zO^|U z?)KV;3ea!~ zH$dK*-QMp#l79Wp8~p~ZKnsDu;9&p_zYy^XUXmkMcOK%ed`g#aX_@c-r-=m@%9=b?)TZ)8|j1L4^(_TGZ%Kq)C-7W!luJ zFlDl$Sg|rDQK42@vlc6+Y^O3-&z=E+!|O|hFNZwAJ5;L^v|Kk1@uHP$*iD?hoW&vV zV%M#^Iia!|qi7ecTGhe?08;CgEDQvk&`k^^K$Bh2yb$={>ToSxJP2$!Rm&6x1&ns- z`UTZ%v0=Y{J%j%&nQ}GDm~)wuBZ$_n#Bci6$^{E`AX=I^Z{n5sk^%;d01wkC%!UFg z)eu*EZd(^`-@vO0e?i*uNJ;Q#$EHkwV%K zf`t@JD53=%)bYogA0ns%o=FHY2qA?SdMKiaD!Qnpl~OusL=s0#X{DB4@+GF3YQk$j z7-N)iMjC6hu_s}i@xq8Pcx>cHAY0+etDJgy2AUxlp=%QnULb=Gcr*coI~SCU&nBJp z;>$0Gb&~# zm4yG)eDzq7k@0SPpa z1sF`Q#Gha!oF^Y`1~S3IY%`Q)7;lpS*CKJZ3Gt(ID>v~Y6f;th#dp1=`*YAk7kwvT zo{_@_E2!Xshb7c-eTha4#Yq;p%%BU?Uv9ZXiYDNBgoZPC`OGfQHRiJ891|gdfSX+y z!UdvH2D&b;`I<8{0>YR%M?r4R`Gc5u;rUBtoN>mOsHUgc`qesFzWL|-G8@#?o+iMG zx1AA&YCw7BUI7g7$u6by0;_J}KIVPEdC+@-*)%1-yReN{ZyQ|&EqKA7*i9ICTh{;H zQUHTs_@f!~@P{^9P=rZL;tq(z&_aaKxW|2HTpKBuZM0XKGQ4XUOpdn4*p_dI1(ML?JAYQJJrv!M->Wzy_CyPc;-F zKi2%oe)t>Jm-t7$ynscLHp!6@(8G&FE^<4MEZf;6i7^Sft&&1yWh>{UK@RHeKrASN zHbT;j7OWv3A{hk>wUw-Ig$qRFqTCIQnM0Onj$JEqmy2BaOlU@vC#4(12Y~-Tg%`Xa z150GV5TNs zXmn2j+JlDal!h}r3e*hMXbqv9Ax8|Df;K=iB>Qdwvp3u+8fR)?B&*nLA`=58wGyu!c2m4>_F+q=3yATp$B$ZNoQ}VG8Zc zMNkGa1}nJJ19lDrj9H53DRRM(UIf->Fl$0-a^VG~ove)&B^sGT!vOyv^az078`U!& z8-`q@loeh;%@`a*NH1s2!aGQFa_5(^8q*jh85RQ zz+|vj+Pa*UX*eb6P4ps17u->#Vr6b~gA!C_$r3?_Gr=eT*8&QF;kv#IP#7F1%o%nB zt68nwR+Y)a&N)}T>%CF=oS_ICyul3D41|H!x&$lA;urWhLwug$j9%C^7cPJVuJ)R- zLqdlEBV#ZI1Oca#onQ#40iR+i`bHr9rJX}48372u13@r=G^26`EMUeN9wbcxL+~PK z89OIvbdNJY_`uDE>(NaWFr*_5L@l5pM*@ri8ow|x^IX7#k=g%%Rl1O|HEUesOPR8` z#?Xr#66gY0=#RZyW-fGx>Op6P5Dh;FMittD%M`Exs@?r=Rl{3D@}A4QE?Fk$oXKT8 z=h-Q&=maYg5d~McVhDk#252JDx=6qRrGjvUHS%x?Sg^twjs$|mwiXmEVc-J{;Piq+ z)#F=KlL62%gBb`w1U8c31EJlE0#N-ZXh7oO5_l01S{qAQ&Km`F#KbIVM2VDrueQBu5UKzQlT-0Sh3E`euQc)nslF<4q5M z0by`9v{^%fY1idN!?41pqXQT{=Np<{?%;t!z=CKYq6+^ayh9aA;4C#8UJxDqU|cxhqa98;zkgCL?&ja}?QF$8b{Vniy8Uqr(d zfq(-X0wHuSlFI0~>!Z?PeG8y(;xk&4jVr7Ui(j|`G{X=^F<>EgZ=1sH>)~20fWER_ z>>PK{_<7LRj=Q*n#)rco{2?~snE7N}@)qQ`bXkA{Z}>wU^zg?WBG7^qgy7#$^{y?z zD>-69JmPZ3oVw22)$~qY_80GkfPSF}GxWd=fe8PEE85x4LKI?gaSuZiO5KDL0NYc< zD~dC2F)8Rz>Wjn31@v!m{am5+`tHPuGs;v%rHvubxe!ETFtf_oJpbu;9Ftw{;{2^~ z#Tedir(S=d3(#QyQ?V(NT~=PEa+*H?q`POh1y|9l^uxa~xxW324F$x$?Td|Hs6U;6 zjgs5H*~>uCsXU^hrC^|kc_4%qXn}(hz0os>(mTC}1Bul`m)6sv*RwN=(?A+@E@u#n z=R*uy2s;Fn4J(3_t@{E%IECAh51aFelk!0&bSK!5Kp#ZFVmK+idJ!osohT?7O+yW; zBRbc@3HzHs0OY&|Ab1NC6xuf@OF_8Hz;l znnX%Oi5E;JIzz8Jqege+L}BDbUIa#1v_DX^BD#SMXvn!u;FHgIl&|{5cN|E9JPK<> z5NFv&ZWO(5Tn1oJ!PH~Hb3{jv$ejOmguQkwNR6x@VoXI>oI=pCz~_^`>?4?sJV}%s zii2Fpg@l132!@A*#1)*#%c)4$yGR*K#gyF19}>ur#6vY45+dPAqU6byWJwcrNtlF4 zNt8&MBneAo#~Cb2s;tUb97e0mN|dx4WQi`KLQ2t7%9tF-nS9EUh)Rr{O08T=wroqc zq(QEXrLROvv1H0}bV{=f3AD_~x9m&5{7b-GucK_qmmEuni^#l$%5>x^z6?yqd`!rU z%nBOJMXXDiM9jp*p~Z|rOq5K|{7ldc%^IQ1uq;fwgtNrl%gz){)@)7Jd`*?K8@b#Y zZnR9Y#LUd}2i5c{*z8T;{LTN+q)gj9%%;puz1&QkTutCyPUdV*wmeE07*4CYOXJi{ zbWZKuPVU^um5ff)yv*V>&fSc??mSQQOwSDb&hWg=@yyNg%t-Z|Px`D+^@6Sd zktHGc#-$Wb>LkzWyhJg8gqqMG13gd#O;810PzG&K2YpZojZg`lPztS33%yVb%}@>9 zP!8=-5B*RO4N(ytQ4%dt6QxjwNS6F$7XCEM=|s)E+)bg&ta_?Z8@*8+%~2iQQ6BA4 zAN^4v4N@T;QX(xVjZ!I{QYx)dE4@-Iz0vcz8vrqd;ERM7 zJZqeGr$IZzz6?*SW`A_Q#XB6IE_;|ol`okQ#-v=Jk3)*-BUj8Q$PJvKn+wu z9aKUsR6{*fL`_shT~tPGR7ZVONVU^%nABPTM;CpwF^$nOWu`7LhkM|MeF#-i9aT~- zRZ~4xR83V?T~$_XRabpgSdCR#omE<`Ra?DPT+LNo-Bn)gRbTy8U=3Db9ads3R$~=a zdq`GgP1bk72Yf(>cQ}SGScaJJPZ%{z)$D?8@P~Wo2XFmWa1B>+9anNKS93jAbWK-v zT~~H(S9g6^c#T(iomYCTS9`rze9c#V-B*6?SAYFifDKrI9oTb)2ZL<~cvuE8pjI(8 zM>4fZ7^E{~;MD(Z%?FFUSd7hBjony|?O2cfSda}_ksVo*Em@O2S(Hs#m0ekuZCRIn zS(uGknVnghty!DBS)9#To!!}vy$7F-2cU(Ac#sA$U?ywLRBR2ka-CWZ>UDQoo)m>fICEU&}+s5VF$EC^0ZC%^FUEIxG z-Q8W@Rn&^yrC!NJUlu3<1PSr?XF7@kPt z4P*Z_{@Bj-gC~ImRRD%%h+{dPV>+&5JHBH)&SO2^<2>dDI)DTVK4C!~WJ1p1bU21g zu!A%3gKkjaG=Ag+2Hx+*RQMHN08Y*shGa-~T1OBALO6zYXa{rX25uby=ww3JB~9+-U6N$?rDO)SWCtGIOUz_n24jCnhcmDP zX-J1|zy@)cW@@fxYrbY|&Sq`iW^V3gZ+?eEfCO$;-DPHDFHYu19^>~N<`}+Y2u5c# z?p!f&1Zc(vY4&D(&S!nz=X?$aa2Ds*^@mKrf<}htFji+_9$p!)UxSw5Wfp@>NC*FW zw&rlCXp6pRjLv9{-e`{QXpjDAkN#&{MqPifg)cbghGt=eCdb(|Dv`(F^FlE24??FTX#NYh8F4p_6J9>1BtfjY36CEo@%PjX`lXS zKOh8{R_gp+>9REHVxC`Q=IXBQ1~HI^Xuj#HUTd~y>#HVNr`6z#jfcUxTXztHf+lMM z_G;Yp+n3hsz0PWhR_nITXmKb9b|{C!*670)hl_4(s}AR__FZ5A0R*`0%YJ}H?uG<7 zgUPnpFScPv@Pp3=?DOU8_w?(dZkMq(ZS_qDGB}2`cI=&2Y{m9yb_j=EC@}wV$nC|h zXvemNYlv#cPKI!x?T>zIk@W{M000_z?&l7IMy3Vy8E5nDqx>SOb(cS!PZL z*VbRu?%RhA>i|yb@jmTTpo76i?wl@%WOxBUxb2NDheN0U0vLe4@C4spY<9SU0Js8n zaBOfMfJ1nP<4$hL7Gp6efQlsvX%GM{2yKp40TtNpH2?rxC~e=h16AUCTyIR?-u|70_W&**o0++gG2a&Wh;dOe}{5-hj(Cy|MuroR`81UaUlP3 zix!7tsDJ>dfMZbXchG|XfRd;V=>~^uF%SU2pEB8HBbd}aPg0ZaTzag_lj+n(|PkOBmUhijPfDu;(F?`Y)? zS%27az>Nn9KmbB004xv#0MP1=E%S{fgd+$7JBSCT^#^Idf*=qBINv2;PIGKl)uz}u$9Ch5hcf_%Zg_Q#w(p7-bOOKUbLWKtm;pkc<_ahPCRaBG zI062Kgv9;=062jY0Dv!m^bHt*4G@8R&+T&10S2&z0`LKI@Mr%jxAH>Q=vW8YS?6*U zCIkT}fXfB|8Tf9E1@;K`hcf^G4L|_^2!dUbh6fM;87Ke&2=Pn+00i&>0eApu@CQ2p z0BSecWj+J31$Xnk^Ga@K^u}cM7W(Cl2QpX&ZkYIsE(cpcZgr1$s4oXk81QzW=7b*r z;VysyfCP9bg#i!&c*y!I@Kyomg>pCn1}Fu#2!Jnebz2w!C+LSG@Op7zb%|$ifYx%1 zPjg2QfCF%OWA|8*?_OH~03-;B4cF=%5P%Nvh6b1dfz~B|Pz3-W?Vay=lZ^*42zsU0 zUZQ7bm+tnp{r1tt*aQZE0|l@8YQAr@clG~i?)s@803G;qbMS!y;07`X z03Eo3E6@P|&;xcTcp*OqyJv@Y0E7V018$&l=4Xe9*L}ZN{fn<(e|Q7{Kmm{-b2FD* zodk1Jb#(C#S20R^T81#*h5%7QpjR0)?*#Q6y zbwq#Z&cs9N{5kaK(x)HQ$k8K6kt9vdjOqQm_n9?s=IrQ;CvxiQ*RyZ$Ug#QCW+y{^ zS879mFCw;fl}!JsI57lpNI&er0|x*{z;g@(6^PT1I2CMQ4jll{kxxI^lyVF6bIeE|f(l~;Pw#||WjeQ^j~43uI{G6e`QgC05n@P%6^3~)Ov(s?er>2e7sb|H<) zT}o%l8(vD|ja1%FZ>qa5zx`6@Pbzc}=IV>~ZQw+I1iu;%JDZ$9zyJh@uopIK`QXE0 zQh3Du0 z6o84jG#pR>(zg1;jJ5)3Ae%sI9DM*8`S>%71_eAI1_DC*QGmD-5vK~y{+fL@+L>(> zrn}pfROY;Kvy^5@Yqp7A+Ij1}_d@BAaZEW1|HqCkfeQL+Q@nV>i#ScO!=MxJ*kTJi zfr}HkDvWn^%rz`~#kt|)T(fdGD+gM0tYUYg$}EmJinIU+I5LYW(%>8q?zs!JQa9aL zJ&-@~bYtW1!v{}~XU$c#TsnL^|GeJpe$@6hWX3Bu_H^5HFPrA!4L$hbt7|7A=G6R@ z<%ciaI6M87b4&m}nDb%i?AQPQmF4Lhp1%95HJWxuu<6vvJwUEh$RJC zAZikr5PvwLT;jVR2H_+pVU8@?CHHU?#z{Nj`RRmcFc_3co-|!KJd8#ysOHuP7QU z81a|RJYg+62)A5*XO~lqI(Tsi zJ$#|8W<4uf)2jd0wX(IXZhb3UzaWOW(zUL3y(?bxs@J{pwXb|V!>v>S%Zqxou+X8Z zJgs`wmsm5OhJ9>5^Kp-Uz@xI3y)0%ktJ%$RwzHo7ENFd-)X|c*w5CO>)d&mO)fNS@ z9-QZ5UD8;QvbMIiy)ABYD>gY!&yrP*Elg(HOWhK;xW+whS6w?z;I2ft-$O2RqbuF% zzEQcjWUg+qIzrk~x4Yi`E_h4ITMowawTi{AbEz2K^|H6U?oHu%nfP1m&P}@^9qxPc ztKa?dcUi*Z3<{fc%34G58<9EhB{xOiB*J1&` zILJmmGLpwvV`*Mj!zJeBiIXhlDN`A~*XgZ|m+NF5gOtZnt}>XzEasmKnZ8AC$UM@~ zyEU`9&2D}(oZ~F#In%k$cD^&7^Q`AR^SRG{{xhHhE$BfLy3mF`G@=u&=tVQS(T?_W zI_B|^GC!EhS~anH6Ecr(aO2UQ{xqmVE$UH|y40pVHL6ps>Q%E^&z(M}LMXjun(P?U zZA?d}Io;}A^Sal*{xz_JE$m?vyV#57M)NYVlf2~h zruee^P2`J9SRooWIm}}&^O@7U<~F}M&J~XGeakn%{8b6R)V)(&}j)3dJit#iHWUjI76pFYPdH`(Gqzweh3f_1R7z3py)JKW=r zbg^TZ$qt@%fvp~JqRYMSe*Zh*121@|>l^B1zq{F~erYIYTJ3xvJme!U`N>m0?sWg> z_k34=b}u*nAZxEY=tD31(Ubn;m+!gMHLv)fr`_|9FFovIFZzy9{WKmNh({>;Ol{K>!i`Q@Jg4j=*I-~RcW z>LDM%nP2bS8#YKAHauXxMc}nbV75`9wMC!?RvQLdTLTs#2!@~ta>D_pUjMBg+I8Oi z1t2$=L+{Mbw7Fn6z@W6rAPs6;H?%`L`-WGP@7fM?gdSMV+n>j3l9elwR*ifg< zn>l#GH#J}!(!nxhTO6*zG0>qWtN|VTp&i%)4f=r}prI|GfE}a*6iy){Hlp)Yq4-^4 z{Hb6;V4XSG!U$Y}6QF<~ULp!y!4+_#Ie4NIgrXqgp)L48Uj)GjRKOAJAqR59BFZ8J zX5a(PA~yIy*C?Va`b`A@!wphfE+T**@FF(&%>#&lGD?65KmrV2j1k&`1keF5#v(0F z+blYwHg4nKK_cRf9}04w@&#ZjDx(j4;THOUIih1ZioiK)8#V-i2KfI0HgtnDtU(1# zLGO*CIqYCN++Yrl;ts}OKl)<>VnaK4f-)Wf(H-L&hyXC0Awnvo2rwk4Iiv{K0yaoP zDbRojsKWdlK@hxQ4BCPSBqBESBN+apO4eWx#^6hGqfE}E+j%4PNuvKPAhf-nIg|nl zpnz)p#Ul{A5s~07zzFI`}~aK*pM-!xu- z1|*_6sKOLHRX2QPSdOJR_yG%XnmG_cWYVN%UZ&RBB#vRB<9+|&B~F11sD=zoK^IQJ z42ULa0t0BG<|(3~enezFrerBdz#6ET6NtbJ%)k*a6Mo;)C4V5WS|=iC+E*iGMNPU1L5;|Pd=YIGtd`lnHjVu1clq~ zIdlUSR6r5Mp$0fXH?U^~6k}6ns3IPL2spwLe!Z$!lt!t@ zY39@^Aja)v5OxDFTmc=d!4&MFmQI1OkY?{3gP76*nP!_0j3nP_=LjrmH%x&DcctSg*q!akS@Cd3WC?bnaC$*Wwp(g5}GU`M6A(~cz z6Vzijd?$~ZcY;J0v4ka_O

    -DJe&bU9;lSIs;h1sl_p@V{omf5p$yI-436Rp z=IX7oDj7O~U$p6uu2Y-_D?+Zp({zJJ{^UEKx}qXiw40AltLzCLMf=i z7(%Odro*&WYdX+@1QgH_Ug$$=q%>f|gAyIPmZS(wff)4V#B!`E zd~C>~BR9;z7f7SNJ}JOHEz}YoLsT3%672PL=3MHa)^07=a_t57qy`+pG5#Ph=l}{t zWSpL8Wi7+S9HV~N0`9Z}Ayj29MyttEE3PGIJtCWl5<)w`Y_;hvrv)mG{_R8NWt&Qy zNP_1)rb7@6>OYFX)5HTj$nBya$a}zvaToGAs(IqaQcBG*ue~BXracb1fT#N_`z@5!8sfzZZ3%v5ThcJ>@g}TJd{EO zJZJAL!w4XO&1!(7BBb*|?_y4G^~$BDPFoP{C>=^89mInz%m62*19{GX@gi?Zx~l}N zlhb}K{noFz2`uSat^8Hu`4O)&{x1Lr@c(LnUE(0_60ipNK>5_*75D%h^ydhyLGPrb zC#WNC`hg2_LlEfWF*?Escmh1UC5g(SIfwx#t_TXiA_TUB2;X7}pRgXJ@WCR1MUL+X z7^Wv=A_e;Z0nY#i*WwjuKq(Ze{TBbR5#O(U)?N60BTkkstukP>U1DxdF%?&F70WWGhqNF!tg)I6t~rD+{T(qRLo)v2-K^>)?*XzNXR;=5@+Ony z1yrn zOWPzLUnO7ZC3|3>d0WG}G8s$(Nyaj}u^_%3?6&do5%?~f1+z9wauQ=60uF337cAE1 zUj*LjHlK454|COq^VPZ@HLw5QE2FbK)2})=vHnsbK|Jz2?=wFSUpJ$k6(v`yzUPzSZ?Np$h$r$vi1K?}7~ zFLlW6bT0MuKB+oKt5KR11VuS2b66HN0Il9%XetY_(UHwON}RSU-_ill57* zwOeyrT7%G9lk{8HwOwahTxZWh2OPpOtY7~%Un;JwqYMOVkfp@FE(R0wqrjw zWJk7SPc~&&wq;*7W@rDlW^XoUcXq;Z!#xl+epYEnpYOgkHx3+7)Hf+bX zY|l1r*S2lnHg4y(Ztpg4_qK2UHgE^GaQ}8*L(yI@L_Dkk87wz*H@9;?H*`n0bWb;R zSGRRvH+E;Yc5gR#cei(cH+YA)c#k)Em$!MJH+rYHdapNox3_!0cX*2dbBOj5-(*EQ zu_YtKJY3};_P2lkH-HDYfDbr<7r22RID#j*f-g9OH@Jg8ID|*IgiknySGa{=IEH7q zhHp5BcesatIEYWUc+&U#fppb9@vPvOimy0}x44VHIE=@*jL$fY*SL+}IF9Ey2o-k- z9d|?A1CR%~kPrVkkr%mFR23GVKF_|{ry?{A-d@7-s;-RB;oM?dr&HEUMY{};{78^hfcBg`8s-4v_D z8>i6}r_UR2(iCsao8Z`#;K7?1(3BX#o231a>r{)Be82VQF0rmDrNe1F(@CVHHln8~ zZQUtFrA#n^D`2xJ1JdMo@Zh#?mjTTJu1xe=4v$O-6iB+ zapdXq6*xNoGJaICZZ3%6E26+F{90F-&|F;M{5{S2^V{9^f#y=_xRMgi(h0uuJ!h`j zW}WrsatL3gN_48qb@uk1BqDz`4S(3hiUj*b=GB9EZc>#Zf1O54oj!lPNlU%8OOfPK z61&UCnA@P52T@C{a$El9f|lkA{+0$vOH0R7RvRc{FRB8im%N9+eXphcjKAZqr6U1U zJ=Q`!`}DbIQ5P)G9h23WF{A^r?-q8|WEMyU3TTVB_OaLXy5Qu5*sIRm^mz!VnYapm zZ#A>18}JbrMyVT|K^ZzT8SW6!s1Q)E6A>fWtcl?C-m~=CbMl>lyhYo>UCk^;VikqM zLPXnAj@@Dm{@DEC(m)$mgIjo-Th4s#%AOnLjNsdm!TK`uy1bwO6XOi)%R1P74E}Xl zoRwtB6ifYdS`Bqe>2;c}ebo-qI@DpObm-+#0ZoRV21mlMGtT@c;q}heE!jozD9*J@ z<%)G$l#PBH;Cq6&wpIH*%dB^YbBkPHE*s9=q~Sd|{1?*Gr}tu8r$BLoeU$Cfl=dsT z)IZqHV;ZC`!6I}EIfQ5L%b7NXZ?oTD=bcBA$)g*g;Zky*Wy*DLpfx)bh&mJsrSMZ&snC=033YRVs{- z_vf|g$F*JuU<0pFW90~!2fQZw@oluWLK16LUJXT7EU(q7Q8I-@e|#c^zNG{E-u_@< zR`IBVw$wlnHs8pvAKG6BAT#e+S&aGoocm%5m}j^Q_fk6tbwdDvx&=A=fGSH%q~Bv=j%hsPaD0utc{A*?!v4f?E=< zp_y#EnSwGgi`I^m_dJAvo)j+qL?W{*NMU0t#~ThQN(7FENFaw!nfyREJg1F(c zc2pm(t}A9gzHw?)KLqr>l~rmB*&D5KQ3mUPYtu&Cb>U0$8HmUpvZ@NLC zPiW03>s~1G919JxF#S0Nv7KsEXZWw@ob4|A=I81T3JIHBy7K3%=uoSr3tvI)>5r0d z{EzGfqLAw+?~zX&o!NAh%KHfEU-K(_##u*7gTkMdc)uZlJ!OxOQ>Uxo;lbBB{fJ=I zUd&llHQl&bbzQf2bDved{vahWw%eCpW&}mguQDE@Wv?r1MbEjWx}wTqnN&`M?+pUI zO07JnJUgr?PbyLzTwWT_@h%8M3OoZe(b?S6?|9eRK>}M^^;Z zZyInp%W<2PfJDUf)2-{>1hewKMkrpyY$;KY)Qmgv7lc zOtV|Dp21C>bk_dzN_gH0Uod49>!y>Gt*fsmup8wqiPe}K($>UEYxgN?@8|XDE`HWb z6|P0PPhwWA0vnxVTy|qo_+@jx2y~nKMCFn$OCB#Giu;VyT+}qXEV1CPrH_5+l=Ovm z-%D3N{1S(VA(LqPa8lk1oQ2+z5Ir5&t*1YoiZT&pk*KX9$n@bkHeDo9(8SNp8Oxz> z{`!rxW^%-Px1z;1ro-*Ni8Ms{zLXu$b-KzQnTMR@oOBj&4Tl;$q$JpUg++!{$Rg7j z08IA`c$TB0eW4mK$4=>fo`lP4dB;dxGn07tc|i=X9q7;cjC%hihHWWQN#h;w`pEPh zPjXd+>}gJrB0VVrACWDZ?}ijVi$95(cG_tXc8#`%>9UK&THv7 z7qbZ=Cs>q`7OJ-9z*$EdDd-`-Ee@daTB-D(5n^tykSE9Y0!S1=Q!-wlK06PwDnnIR zO&TJAJY#fSGof$qv*=a;-8i&}_)0d?uqvIOz2&OX=?uD5rK4b4`M07ed2^A);@dB+Y{y%H$#Wdu<#n zYDM0DX}bq3JIYvdxYZzDsYg5-0vzfWoG<)lS}G}Ps%dAnhUK5X2{|A3r>FE5jNL+b zvA2op7^DPoKTX-?U90|a=I8yOvf-64)IC7UmzDZffNn&hS#WC2mD^--%6-w$668T0@{kk~J^P%EE{8!hyI``yy@KE-Jk8R^9h@ zoLehdVvhB5@6LV=Y3mUh-c_%>lvxlL$c`h=w_e1ZxgL~&u7_3H#*8Z-(~pKA=W}?S z(6}s~XC+I|R=q9JS7)9CD+Y)Y4V_DZHjZ~F%V$jYT?BLI{sDxWe@J+`M1EV?^Xh!T z&S=ahxupwaI9}WE#_MB^zX+Ww+FGk_B=7uT>v8j-4}#ziijpLSHKeQq9p^YR-d#o~ ztr%%wI(4&HTnN5f`AVZtFrq2-E;i>lf}%%b#1Qu?(L{~0M-#V0_mwSlXyqGsM-%5a zbg+QqlOctYW}(^~gX^z36JZF!q!;dWCX%kHI2be)q<)=EuxctT44RILzs{x8HIvr| z%_PlT=ku=Z%WN{EwtAmp!ue`n{uaB z3w;P^p$qr6GDz3*E0}O`Nd2}tY1Pt1m~d$-o~x>f=np459f<{LPkqzscWV#Al}+5c z#vxrRy9~nB1NFP+#Z@cE4#KsI_`B8vU2B(h!u5x_yY`1wYYzzj+7}*i0Xe}TzKZ*C z-uk0(=F%Leh0Q?Bg!^7PJ=+j{qHTit`+nXv+XxS$9WuO!L0P>YF&RXE=rkUNHP?P5 zbP(;bCOnLqg*|%VmNP?Tg|)P6p}O=YWY1WHlbAlh*7DVB40kPkGBu zKRT4?6CY{LKh8C+IaGKM9~wkqM}*i7kZ0{q;MbjN=H9kH_nA-V6udp!@* zcijeqABN_iPaM)+(n}uuh7(?|i}c-&^ubSy^FrrI8M^$!9@i%)FSkSb?pGP$mjeyR zqxQO+sy5-Hz&FVAfxgGnIvDbR(f%}p3OT=O*t}MPLX3n$nS{cKgnF3hl(4?*WuAb+ zfTmD}p^1cHn1s<(f)<~EfPhenXE4G*IB{h-X&X59^Cw87#i5KcyhbFv_9Xn!1e_x} z{5CU`{v?7m5YbK<@gp-rnl%DsqFcZQF(48UG6{%?q;jqt?2Lf*0m4K9k*6>y;K)ge zCy^_FC^gC`4Us4n!1IjP4=X;+bQD@E{)xQw9Zy|?gs%|Nx??s{?O`@Lx zF|H;N!#9uzp|HX?NnDlC5I;VQMnXGSJ_9~t=2=s*86qEV!+VNg(L`Y}Okpu&VzYn5 z=B~n`m_q$Y2E_Y_6Ey+k24aGhfzlt}eVqV%8X|uqMF`A9q_hQ?PvKf);@N$~bBw}s znZmQXK(N|C4krW4e18`)b-Qa>kZg_QQHdmfh6IWF2vV5@6$0QFIXEmMRY zm_$7vi3XyG+TOkh3V@z)&Vs!X6%2FVRa@T(*~y10DqLXU?hw5TF_ngT;GNxQ0W ztvxa4rbsZ@uH-(FcmY8wm}KDSi{~zk_&mgWR#Ijx3ieCF08i4VE_^a9O0W{y(W7bDIF>(bIzz;s%@sp0QQ;W0Z`N-)3gIo zh@Mbj^J$utXr3R@NRFRgQ={q8ywSf@QAK=WXoyCH>7ppXVno{_p0b2iyf{;4Wt^D? zH+Lc4PEk2QF^#Y?F=H_~i7*6g5GJy{hooT9r+uWiuwizY1_Q_t=s!_7Z6K{}pp_ah z^?bx+@?sj;AYAKYBD`W@e%r>8u@qG^C!wHm&rlZ_;09q)IAsFQ4Z-(@U~^IGo+!j? zLvX1l$4e93CCQ-~{GElz( zYIYWg{5QA5CR(8<|Air5w}=2lz6SSRlzs0?FbMlD`GSXl4O5MS@}&zKnOzt<8j;Bh z{HqH!849513D6UfW`<_+Fa*IrajZL$iLw24`}}U3h_+HeoHVunM6#Pd14+ zG9?b!@RVx>jju^JW9oKd6c(qzM51u#>=c++gm5qO*@ zh=rX>Jq7^fnh&BEIC`C^_gNqvDD>jr@X4Ow=q$w4E5*B5!5MpA%~*m7ATRMJl?YqK ziVRzDV~7A8xnkIz`ZDudmX8t+NZF0=xgspXw+!s-$_5Pt5nnx7de!dsu@UD2}!F^ z>r-hKp}P9`o}J>tC=v3(~zKG|33-z4X1`cyZBvLUwWA{$M1d8}PqGC7#Imj$#4lo1nl>wWlitWJhDLgN2JHrYa|}9Y=Zq zic=-_d)2Jvx;5D>?t6ATf|pI89+c%DHoCD+)ItL+izwu75iGR?ENw@J4o<=V5iW}< zWNKDNGfaSLgZ&Vz^qr`hsD_72 zk?Y+ZU~$t)|C3w8G*t!^R~Me=Y^8_ez6V|-z>&-iEzvvdhp?ZcC0QaW;k_%#XPs12=@R7UlV|ww!vnmha@Wj>)P}4mlo)#h?_>r)hk%&o=C`l0m z8)!^k{Bc~IEsf!h%Hc;r;ot|r84r7oogBMn40lot|6+_VK`gJPnbKzk$HZuBmgs<9 zAEhSvGknPh==iUi@g_;}7K`!L1POLY@$4$hngr1rjf^q#eooT~44P-><%ucyiBS!H z5T`S{7=q*+UO1H`8EVbs3W5~0$m9cLsSMtf4uaGR>lDtBgHnlS*m_24Hd-l4wOyt zNb)BsbD}n#3Y5d%e1WgkLFDYj3Cb1MI_Cj(F=^#0f%5cyvfWs|$8qzjfbu^k=hBVl z==0@UYvq_|4V!~hZI=qjv1D4sC-+id@s5DXsH}>2CBHy zs@O~hP(h13|->P2pc*U>y7yvT$WQE zQwrX$d7lw9a<^mz5H>lu3{HKy@5mk_2DxYT`LYHr{Uu3ApwO=;gIOkfp3-+h!X@Hi@n@ zlDYP(xJt=C18cO~)rk5F;`+ueTkr(>ty@XhQ`>A)2QYv3`!)4{Y#j(#84zq$(H0nF zNF7XY1v)(UB@qpG+z+HQwSE^Et{_56BN}NCK&wj~=@1wxa4n9}>FHV-)tDTa&>6F3 z8Jk}j)22I=YTuwL)#{Ogy>5JgiK>>ZTm&)SW#K!4pr7L{4fi^bFTHWk^AW@gXs3e^v({lhqQ#6fM8{PR z7viOo)yprdOEtt-mAcCG^5le(+RY3d8BtJ}JHK52GE zu5B2)YgaEuxvO49y6acOYY^QvSiOzav<(!!RSc5NG2Kl(y%oZ>%{JmK3cY2T^sTbh zEoPD>cD?PWv~B*^d0~3`%`|LDKYeAV0KPTRF;pSC91 z1MBWN3Qf7J?ZFf8`>ag_r0?IX>_@nd$LJlbryiuZk7bY?PUsvKkc^hB9X1gi)sT!d zq#xz19Cf4*_2?Z3r5=y84@{7p*y^0j3-vFrov0C=ZoBtF_R>#zS5D7fyRY=ls8Y|K z+Pfem=a@R@h#s9N>*p_o7udobco`Q5%NOAFHVXaAsgz3w{Z?l1Rg?A=_j)t``c)3$ zwYYGTbjEeS^0m@>gNpu*UCNDiM!i1xR#*Gh1X5tJerpT5vlGsD%(zooy7PeK`sm+_ zC*Ow%7d*82ybwRk=skGw%%o%-Why_Z$bqcE?Rc>2ZH|iT^7xFzQ8<|; z5dsnn<|vxZQ%K=+L*^|0orll);~MaZaww6x#rXvE*U*#D?O=zOvuvr(a=F_Pp@bKjpv)aA=p)b}4nJniEW2uy1NC>x!1`la>27ct>GGBtI z*?wmIsQZ3D>zz%EyA{MXF7q{v3n6aHm4JNkO9=7dZI%x=x~x%@)LP=U7wye{j%+4t zq5j)*XIbMobuF0<<4d|BqZl?P09Bwyxa_y2Z}st%Zt_2oW+Ep2=ysBRjLVv&{m5(D zeoHvr`QpQ}gk+lOjjn~0K624WBkv|9Ynl@-gj^)*U!F*7WtEI)lACNbY?c&b{@F|} zBzV!RC_i0myCBg2aH23N8`+|?8m6!~y|nppueAAexa4PDbu(7^?;APGs{ZZ-V@vH~ zq|&?(-Va-q10UqS*M+=5mMlvW5Ryvj*4$A}wHCM-j@>uYL#&!%welGK1C~JMO`nS8 zt=n$dIG8Fg*G8;6UXf$Z%9gtqPdX65&)+*Q(22^L0K%xO704RR*1fp;L`S_r>Y=s+ zB+|tf=|mn1KZbt#kM0gqSBzf9(K;6W7-5?THL4yuxVNfb5<;Qunx00rpQLIZH5wHI zhuM2d+?m=>e-Lh4n`Y!fbC^}vptuxnA3|;FL!^Qb8NRv=Tl<#c*U$?K76P=!sv{+ur@ihBVL=t>xBDa>3Oe`3472d0ui+F z#tYfFboCcOHb2+HWhA}mZs-CQm-y84Z$Kmh#cZO0TGR?zDAF>*jvvhp@qE`&tP?Bo zivg+}*Q8f$fjy*h3IRMRWPOyhy_6sezcY(0tPCEBpR1wrnd0O)6^X3mpQ3csd$aN6 z`y^lyPD6}VAgcJnfL^xZ3nQ&(B0P)zUd~}#Ws~Pkq`g3CwpmOc$I%>cQLQxLt8L_0 zWFsP^AQ_SRDR(vBZG3E`A##GtsHjq-V%($}smI8O_~RX_ynSA&cH0>5w^5Wd$53zD zk2oATL6i-oecW8pFz3oyXF===woXRvW&jZ6%src5D@a?+o(ru&O zvPFk*UAF8pK26aGLl%i0%HPJgxs)Ophq36*7-7Q_h2nnR1&N&@X#!v0F1g9YNy=Tj zH&xL}L4?g?1E)V35!X@3DDmM$mDj`zU((9yFJZ@_$7Z8&(kWQWVJ3Zu&3jiz_rW6= zFD#tHnCcIuazrq5R(Wjh`y0AX8NRaB`XQh( zC_4;K<%4ERl_V|HhMd*vf2d{h8`INa(9LIns#z*Vk2GR#q8^QZ#8T}XF}={Mcl!UR zd<5agJ_awubno&@>d=2=zF+8(y3L7+Q!+Lgo=FPCtWN1ORg3J3>SI$k@JxYc_tex1 zP?rQ1y<=39cA5`k$wBQ4eJ}Dd6~;dKGuXWN$auL)`y0pahNtTQj&ahozP0#Do7EG> z_z}p8%OfnlSz!71_ZIWr&PM2|omM~h4k8p5vRJ(0$bLm=&pfmA09{$moYoI8gn`~| z>F>+OALsd4A7S3<$_%*)55ue0%JY_dXWY_rS%nq;4Xy|JaNs^`PEM<~wr}|29y1u) zd?CXg>>PGncUhtF3nH*ggbfsTRrFZsLc^0nFmAZl57MfmFt)h}F>F;xf4jTe35DPv zXS9M=qs&Ka=!5$$Dgq+sG^|=)9ajGpXbD+_})93iqO@Uu=B!tl~E*Ra9$z4KLMrSOYjCK`6E5b!8TU zemY0a^R1_K6&`5EZ;6Wwy-f905n;zh3@(eKR`oR*XeXxfE=#}5>+34QPAn~4mRC-> zs;tqhK2>C)+xPb|hFX4cf~-BSgWaYY`$9*re5RFts@W|J-tL!wUII3j3iI%5THax*gS?lLx3y-Ijv-Z0VjMwuJkLMlc zj)y)Ew-E;6u~F&EU0$%;L5$zytWw9zSp?+y$OH0x)&YUMg(J{hp8u@pCQ=3>gdz+YgKY1VzyX#mEH3 znFb~N3J5F=qE@#9eb(_BwDOS&6fzA=F%2#V3@$1RE*T6i+lK^K079y0LuzC~>P$l# z0z;Y#Ls|wy+V(>_0HIy9p*^%A{{KirHH`Pu`s~-gA4)0{Iz<~cBNH}j8a5vowpbXp zJQ%jRAJ#Q!G_fD5R%n%s7_3qsIw}*sH5h)f|90~#;(|8fN+#mQG~zBW;-N6&X)xkt zKLP@Xgr2IZ7~$ZEDA3uil8Wpa43rSAPS5W zO-2_@(G-Ov8dl09O6mLiu|5Y}g z`5|^}AR1UCaPTYs@N>L1Qi3gAf}L!FgIR)OP=XU(%y*}Fm7-|v-2~O41RvQ%KeNPu zpv0h}#E_xHu!F=1q@*aiq!`(xIJ2aLproXtq?Dngw1cD!q-0axIP)Ps6@tIAKZ}Zz zONNrm4w5U7QmW`uYGhODkmAH8lXtNbKaeNZAEb03rFPM!_QW4 z%DAA*xRTAdG0V6M%6KTscpA!haZ2ArVt=J8&4iW9gg4Jb49-L<&O{l`L_5sHFwcZS z_5jLdy))0k3(g`a&LSMnB0kJIqRZO9&!UjarZUf_3C^Y~&Ss#`AUn)vM$TcS&taF# z;WW?T4$k2%&fy=<5j@NhM$Q$b&lQ);l{C+l4$hS=&Xpg|RXog9LeBe0pQj?1r)Hj~ z5uB%4oTojUr+b(ud5EGjkL*o(N}A4fP-VtOk54b_?Lxqj4nzPgME1YMqU##H=e@FWaG0BzOTpnL`MfC z;t@$AR?3Qi1_%w)6dm~sK@snxc{^F|phzgH1QZZr{1@PW3zsPR$#W+`5*FEBO!OU} z#JVI6ZY-)ZfM8+s4RH7z&44Ut)GO-!TmvnN3W!b8&*p6ey25VaOG((}<;k1azsn6_ zXu>yLiQz%FM;3Jk#D*Af{RKD>t#W@1f|tC-AeBZ^jS-h2sil(CyJ?IJg=-_73O2eu z*vlwkh1(U0hX+te(`0G_?PRGW2&Dk8SCpBt>Y#jMBs#uHY*8npZyG8{IFgcyuQTLg zp;Asj9sC0)U`G5K;9wCjrD`N4p@11MnTf1vSS-gh$>PckqP4Ka>> z&$NR$_oxvHu@tKnUod4ddZOQnMG}?z+DlXe^}62+z2GRn6W^ru6Y0CE%LpPEH6LZx z%3+7dFW0YcTaAZ&on;XO{_8UdI`cQcVL7h#UAq3({{`Ssb~C5rcycpu5b*wX!6@PT z?V@SH{{!H#S9Z7NdIveVTlYj{y5I1{v%25>C*Z(+dcPeh{ZGK*;ZLGT`NM9i+>n?^p6VVee%adf@~6{QYc-d%PDJKs*$xbASMfe@#b1EK+4I$; zH1o^#Zw>30n+21Km)jM`vzNP#0Or^GodoOGhy8+z*T>_Av)8Bd0cOba^}IFY<$kXM z^7?#t_NK9*W)acEG!ub)unXP!+=mo3 z3m`4ijg@@vORJEDtTEV)+kEcF5}Ji-BGUs}I``)p%|dq^>>+tP4-i4k20{X4ddWc- zfieo&SP6r@)O;5~%Awgf1u}i~S{K2eN3(Go2K$(tFG37ZbMOab`q`2%LQNHNK=XtB zT+J6@HlaC0dolxjOBdn4MsrB+1_y*5FCsiqb4d|p2gTMHiTw_wxQnx87{7><+HZU! z$7{m0!jXW5undTR4kR(3VUgl{`zcV^hNC}Uf(sO6=;UQ1Rng(F5LgizG@besaC)PY zpQZU}i{xT=uqeuL2I+k0fO?;%qWc8`X^N-ALu$V!0UrK{0LVrexndZEIw{GKqU9H5 zYhV(7cQGJs`FKf0lO|FEs00tBgk^k_2_D2mx3VHF)-F;ZHr;??7|3`?WlUJS0D@Sf z7aFLvHywdjT0*r;QCzGS9VTj5tezLB$YxJ^em@}m6x3&=?n^lKyeF|CI^~o4g8)f0 zm!HsEiOD&Zw3cIlRe~PqsHcK}j|q*8wFMi?mMfn^36Ma!oTg9zk&!DQBhM^08XTEL z22|V=jRaMB9sdrlH40SvI5ewmbrY|6WAqU|de+aTCt6?9n0|&2XeJRAL&phV9Y~I> zp|(TA!k1AwFpo6ColP|PZjQaFrRcEsgEWV-RQ%W(Xi)npabe@gFsONXxU1eCArHTCV5_svhzHzL~%5h|Q?eVsu z6Rq4PKz?Nlbl2!+51j`C^Y=AF1gZ;q66$|mGX|@+g^h;^^e3EJ@s;R*qdz#s|3ZH_c}g?c{66LX6a5KR=y*ea>Pt1fG$#H+f8LL0 znjx+I_y_uf*0c2&`lH(hSMvC_X4t42x1qkFKMVUc{`jE3qd!TEKI56hY8w@3iMKq5`iQ}tS zf5a3U4o1Nws+4}1)3R=YhgOKf1_UvvbR^56R)|cK#-7XK@S9)Myv>5K2N9xHiu;g^ zpQ91jk5#*}c78rj^rPO$vwz?guDxJm7cZ_JM$USkXhButs1O~=9JP}WV_)K!QG#5R z_56WR7#M-$C9>+p$ReuC)Rtcbazkkla*5sH`u+gIH7(zYn~CGxWk5s!XC>4^e}aaA zf_bY1gtzYL8Sih6)&EaEf_nqHjQ$05{jI;lp{@9zfv$hm>K7fEe*#_Pv3E-O6B!~t z$bSR6IE<9i-hi&Y#oB-Mcf)@HU2+A%#3|UNi#4V*{{*^fjkbqR9_s%$(B*iv(qYUn zoz_zRpFr34KqSTAYxN&M*MHP1!NeG#(LpSPTjjsiD)YMUNpqutU)SG(u6CEri>)7Y zVgZJ9XLq8ITUkn!9Y#>{oNBB{bZ+q0vniF?Q$4V)aMBM z`h|V#2)x#Q`_|ueteqjIzwTAeXWYGbHcUXC|LX7la6_ZDj*@zI<6U;gQhHf#g9|^e z9^EFidn=w>V|#YOF&2t;8BfB%*N7k@zV&xoOujzbB5SGue%#YBzrzyJHG@K-$CnA; zzxq2cH#C8W4Kc>=nHWef9zFs+vlRr&>y;}JmP&OqNI1|FhZW#Ww_=DH(kPDkS0;@? zB&-ZjoMap-fFd;$=I3%2XnvpP6tf$RV}}s#$420L4k(mxWp6y*1_ZIf4R<0>Pb?xY zKf%i|SZz{Y3RO>O!B;eYTB96F8r~r!Pfvt!-mO_;EN=pwdwPBE!RyGE8qbJz`qBlFy7OH2V1QioBBB7-SY^*{)n_-c+0*@Fh z8h#c$TcIdAhX_*9XK~m@C{#ucPuSnBWG{S)UDt8g*18-!4Be&STli_XvT66jnPB!4}iLWg*)9H7gcGP?pKiOb8OQ^6d z-$G^C!8Qe;*sqP$-9_(A2~l^k6djp9VbMs~s{^GXUS)DMNaO6a8YkjRttWCdpE~+) zGHi~?jS(JCKUGV^1Si-oT%k>bcOs<4YdXX{4aHuC&7ufIojWwKP%wEU@IdgI1sXc{yonY zwy0@7g?A>Li54kfafRA}Y;v3vb-Kew_f&;u@`IRy0a_8@JN;p%FP{W&TheD(+|frQ z0uFY_n1v~m+sRYYV6q01+YPfPJfl*Bw`Z#nLKkV>(Ki#e#Uvt;n&cVU9V!0gVEVyx z4(gVnZu?74tf|RB!1N-L5<85NLRGNz@|WN4OfVsdzM&@1T7)VF9w7X$R6j?zLHD;) zjc;KOqURVa4A3dQ_cu}cxPJ=iMd2TJ9JE($#N|P12;qTTXyeYSN;?}+93mWv4d)ol z^(ycS_ONgy$8(SElln!4CXSGnb?A^gsD;t<2|$+#qbvD>Pwh|DCbW5-C;q|s&}(6- z(Wx=&-D!hhnKZ63Gn`+}FqybFg-Yg7cp1)^@t-+MbkQsf#*^oo@_fq}&d;J}!W+FH z9QAEECkoO)mu9)ht~OW#xSiq(bcazmTsFgi0_UAGl+)g(f)82zANPnXo^Nm)gR~pc8pWP z0Y1OZ{iG*@9(BvFxVt||o`+7gqUD|`qg!PXbL*H4qbDfT8!M6KPKi*e_2@{tUlZrp zSRuJL;&-;ez?Rtz(tPSfD)(zBxJ9(nxImUmTO!|gJIng)UGks-O#^U0_oESoz@wi% zT{58m`J{IhHDBom<&$}FU@dZpglR%44i$|(C~8+1v`e(|u|p@*;OCCTtr7GIC2Wd` z-a;SzZa%lyZ|BuNikP=jV{wLZnMxL!61x4WNAvFCP%v=&4!hjmJaDd-(l(S|e)cEW zU{v8-44_g-F^}z;{la#{Zh&==c0-YbcFVNoh=(R>5T=1b%!P92G_Vkn@|OGMF6yXy ze}gnD3A-Q(5G7Ssy785pgd)W^aKceUJAZqfXhkYwEeF(<%9Fr0FoN!fC+~&7Er14j z+MRMLCn+Ny^jW_lUh~&TI+3DwYX0^{s*Z(z7QpZHq{=6%N}u+1R2vm#AnKm-_(4=g z-o3Cs@HFHhRoVWt9n5DcLB61jl}FqxUQzdB(=etmVyfyT0d ziBtCFSN~Oi`!#wY#19sRu@{=uQ{g5LhCUNok1}+FGIa2oy&nLBCIYKd39q9Z_PY_D zC=n}l6+1`eihJyk4A7Omp5QE1XxG^`NSV+ws9D|Dn!}jf=#XJ!?^4+RT zRVvk?2`6D=S)yGsLpaY>6!s$MA9(Ms_{P~4kJv?zC}alI^fDrmlOUe{I7Z^+H$p-L zF$kYy{=lOe87RF!5N2JB!Lg5(k#%Qyh{c50)Sg6po2Ju!b_E(nB0AwyWk+_C1G5IX zKkY!F*aUymGP#|1*;*g%Wi#}hAF!`DJn+W7u-^4%IRRc=ZUxOP`_VAPL`j=oYG z@Me_Vjde{*G{>>gVnJ(HjvHl({G1dw*A=8AOO8B*8o`S)3XK2q5Qz><&Tjhj<99rs zO>*HuR3#-emPi8Sd9si?v`|iBZ4=M$*bfJ>yp2IA8fq!+n%Qnnzx={rTy&;js0nyJWd zu9unXuqM84QDnpcO3iudW)RQIH(_|V0Gq(rKMxsDpiH@6>7y(tS7xaii|O!AuJ0Fu zMv;8phTjk18F%NIA=Yhdl*_c`x?NIRaP@(iO|0mF;s+FhioTrMc$rYS~ z*_{41PZx5|25x4OPp2{lBTJeqB30&FF6CSOH=xV8xxjU)!2Pkn6IAHUSLmx%=>K+} zlUx|wTo?*jDhz)tM2ZY(X-aXzFA8~7{Jeou)R}k4o2_qdhB8c}jg*ntlzknPV~Pw9 z*?`raE-rs8t^}1-^Oe*hzoP}dBki(Dpr9)E%eOcJM-Q$q$PX7jn?Z@r%+&X>!>|z~Vc${QbwwL+9c#Q0c5%1^QDZkgy7i zzp8wxq8M13TAZ`RS8i|!%nmL+;L9x>u8b6^db{OLn^H}GRNTWtHgS&ewGt)Ivx>L4 zDhi@mN_?bW^)_ z%ZGGrE*IU8bn$ApqlxBY2vqm9v{Qz5^Q89h9apYB0RWaAj4LhhmtDgA)fEig+^s!w zt-T7aEd?7r!aDWQ^5p`ey&4w1m;&93t$jKxeQMJ^(x|-?{7q9wRq89voR&R$t^JlO z{is`Qk(KQPt{q~bm~5@tWQ<7W$NjD=1MWm%8^!^K)&`@|P9eTxvE+6$NNJzP%3%2O z;3v#M;p5imqi*9-Y~r8gCGu@ftpkzILzzUwaAZSvMA>mGU1+F{YAtA_p?#2&0a?c3 zYJm|qOZXJvfT6-re#kIi$e?%1fIlfxmBL7`&L}S<3LG>_edtJ@K);brWU)>iduydM zBT|>ZXn*S1l0erWBXHPtNT9DZbZE-8z0|V!Ghx>p@pI>0GYL|gh_{`!5^Sa%_gcq#h? z%%iMh`Tlpx`#H|V;y#i3stMj?TTO?PuM__Ww+G@ z-0zr?Y12I4=Q>tb#s%@Z6<0!ar~F)}5L3n_1r~T;R#sM5l_8j`b>CNJ!&c_s&zFrY zucfUW>SAx`uKZSs>az z|9%7YbQNxG4k3IwO>sSQbpuyu2_t+7vwZR;Y?YvF1Im4qJbbfvWA zZL2bEv%YPUDs7pBWHIApi$`y8+ zON!*rPqghVwDq0$>tgLIG8;&r*8XTK{-Igh5&1B?D7aH~jH;Ku%ZIir#I#8)wEnGp z_ec8-56d$3+Ah)Q&a2|)D$Abx>y$9?yW{J;lh7_w`$`|=axWNtN_qp(wX*Fiw2dva z6V$#RPck91vG3!)?3cdvS#LMc{UG~wTxN1Sa{M6rbyqX|cbw2+`G=9b*ZoK0gA3yM z61};y*TZJ?;i~onHoc=aDy6jjsP}bfdGe^>^+?0&s5|_q@AWv$^*CSXs1W_w=yfv- z{bXr;VDQ7q@aqEo+A%}g(X!sD_R5L8`^1;igVFGl9;?%H_tTm5({Qve8KB12GoZRD#>%%!M`05N&a``L$5EWx@k?D#rqm4u9XoTsU zS^tt>_BL z{1P9su8MJM8gX-VdiJ&BT&H7CFXQ%yQo|ShGq2SfUg0}ujCu!;m5tL&^f!%9_+EGU zZu{l#6ZoD*={~fg*5CS1AjWkt^L3d1gY?rqnf~py?oIsq14hWh(b{ED#)Zk*L%wiT zrqV5i@J*`6W4T9J(b+9@#&t}`b%n>%73c}K;=wlJsU73VBIEpf$5V;^Qy%1LP`IqQ ze#+yelJ7-E|0M(x@v_!IwFG{sReCIgyzE+&ZGvy2m0tJO zU)7^u9Wm~l&R(ze$9T3bPkQj7eA&+e|-{onmy8}H0%qf(!R|b^@%oL8~tngnJXGG9*5_+ zI-ReSvqbdyJX5X43m$`7XW`tiH|QNRb!a>_O%YE9m#%rj>Bp8|PKRk!b=GSAS&|Xd zx=Yt)zl)Uk)lrg8b{d4<>oY-;A58SpTnZi~3zzP!H@f^VX!KTCj_L|2Ta49tnNF5$ zIr+y{lbB~a`s=Ls)>fY!&kLhk)Ys>MT*TmlOI1#~G9ffogw7k~SV zb7k203^%WeEL)2r5Za%M0$`8MICoR!%>{3xlf!H7f59?}0mX4_C9ve}O}j4*ESTC* z<|!hJ?$D2wMX@bvFQka<;L!gUac|WYXV+%!5+ry*0)&v@5G+XWLV~+naCfJ$3J$^D z-QC?Cg1b8e*93R3Rmt=AEB$pJUw0pCt^NV`M%Bg~*BtX2=jh!T3qhxPVdgr?_k(9x zMwC28$xfW2Y4fau$mD%x8>|7XLOVvvjGZKX%jJ0%<(FCz6>gDl6enKA#i=CQvcrY+ z>+aG>VWxtaiO(z*npiSC50@9?N86XSy)YWJiqsHp4y70ozW0uo@?sGwMXZpXT*Y#x zlTs#G_^?z41vx<+N2LUK1SrzWk0oY6ri%RPi>g7~6{%tl+IclenWf{nn(Nioyqv`c z>{-q)vzJoBSlq8wjZ+-srSfg{Q;wfT;->Lxj;=Kwre{x< z1wwiqzi16VDQUUT^maYgqW;b}hzC5c?=;vu zrQSlt3 z{$+1mt?Gq8q%5_(5lPvt|5g8frU5IQ>!2y2{lTVlW8`7KZG0|fvJ*>1bNIXS8;;dt z+yu9C^7k3P^z%CM-R#E%Yw>H!nMGUerzCa08l9`x;~4fA_;XuL8zlT_9}nwlwHY;; zcfZ*%k2to?(A#r&+H}7lD1t^3+njb&WDjrAv@p`G_evVt8D~DG)oA5tu&0=BRN#6( zR%UiQf$RsN^$RuC1t2-I9RFqYooN$@$DuUs?HyDCEJMEGw_r3x%kn!-G(y^svho zny9lseO1}c-mV6=(SYSczddBXd}0@2nJWv;cGZ9P1F4>M?j+PEcWZWYpqq1-(c-Ja zCjQl}Xv$fXp%?k~1Uz1o00mPdpHkY3k6@1Ht5^QPx!c_Uymkp=1^ooc zVseyJs2ap1l+44T5JHLONe4uD^A+Mpm_EEVahBAXtqyD4{!r?GH>~}VS+%4V`zBBf zNCr?y91fjjpd6GkeI87j%%xZjgi!au37p5-xf!#z-Q}5iD5koj>;Xp?^X<|V(=;Xa zIDa(dg4-3-IfD1NkIb@NRuwbSTPPkocZSfqs#6}w_YYnsaw3%lK38&QHDo}OMWiB)kr_3TRP<`=U`4dT99I7-0>4wXEWFt<+*V$j%YfCB}4%Rh zeq$B>(JWRLxJ*l7x!r(rIx0&N{SbdsU6KUh7gjfB8ezAc4IRQ|j4y_im;u-nnFQsZ zze&FO@tpJi;8xUutZQXg;Gzjluk_`9C+546_-05VKX|7}E68oWwdZr;^`n_)bGdSo z=Eh5dyee(??0J%fIC_t94Ja8dA!`s_()j6N?YjLWM*{soO4LQS@h5H6QnI*mHP~vUEpIn5U%H(prifPRqfS$T+8h&o)5?yT2i`~# z_C=z&nlZEI>ru10?c_yGap=Bylx@5{$$_Q`-zwg?%gjakuLTp{j#m@kWVSMqKZ^(w zHHSdi2{?E^_NjQ@O-aOF=DMqzNp4|IC&h2%bF>M_dM5j3y>Ubo*D^;aP*bj9Ud zxe1SW*7hR&1fbVye$CU@hT2{gLf~ehxmGj#oqi)GBB--jKY9UauFhKz#R#9WC1zNA zud--VzyMWsDZBL=SoXu}%KiPy7S>H;g^IO32ee#FR=F|IiNGp_~=%@nnVPISnj)jg(V^Y4ntq!jefNyZaNBlpwY-+RvtC@L@ z!q%cUNitRj#M^-}8QiwiDje&otLH6k_`BwZPO%L~6G;tCyOYnr2K#jJB#5RZKQ1T! z*W+K9vQ7IrORF*khSNo3EG0TxrfTz?V~wj}xn zbk3!_((C;XPv5N}Tk{9jOf73^2F~*>eYdMOflmad@xBjtUZ0)#aS3|?qHb9@l``^F2wMu|u*- zNH@=UHw9J*=cq-?iyk6~Mkl=)AL7buq|i=^Bt8`10+^X9)Da@o6+ZFvTYX?h!zL*mZm>@aeGd{Wh!VH2<<> z2cu9EgoeAFH5Z@JO>i?%kfXJT&9I%5wuuYAiQBN7$*ta?rfI#Vou~FaC#}#dP)Gs+p}qwnnUx|j@*Y{YW?rdcZNW~0+Aevt_p+M8 z;?%uh1K?J-po(FyoKm-XPqPMmzNTfe@}V&DqVNu^NClRNzF~?EcV8ish*7PusuZ1? zCJ&EgUzvf3c~7!ot4PIwh?T=@i$f7DH<9WPk^OFA&`OI9cYo%Fh~s6VgJplE1)Cak zH{W)XTB?W>dGgm0(O5lE4?|?nN6|PWv2TaP?z978Bx1fZ8PLkDP7ARBa&B7|UOORP zh<50JtGrk!JaX8|K()76#E4iVn)t+f@o4s0sqPT&!C7YU1h>Ih?#{XrZuL;{ z*jQ1pk4R{XsA!LP(#&90&Omx>Uk1FyypMRUTYR{t#Iu}4LGFjb&>?bB+IHe zd?EYRA?+%O3PYqyG)cd=lGQB3l|qsgO2Z|FlEn_g(EaU#*x|ff$=q7W%pS?qnc>83 z$%M<{7rIA2GsQ`x&KVK=|*b(nssc*F-o;^|?Gb3)>Qcjm6 zV0dXq>`{AiX%Ne(wUD%x(x`=@w7J8mv9GjI?5IJmw0^C$ZqKOp%&6w}sQRTeLnp1c;6 zzfzPxGnhQFmp}HI{1qdAkTbbkBfs4}xiKxjwl(?VLVg8qY7t9efoy7ySz%UiYFbfY z(qL-bUSZ5^m8k5)=z z95ZZTz1g&)B7QSsQ8Ug`N-i}@0=+Y$lQU`f{StKpvIw&xuV%Aor@0A~l+dR?)B@bx zzQ{!>rK-+q$RoL0lB#`{P)GlA)AWV!N0R5l7v+O598a@K=yUQPB(#JDxwFDG+5q1B z5=KW@j08yYm~;HL(*`cexf&=*Ny@BMa~6IvmcoVgKpgiZR1l7m1&N9kU7#eiwJT4> z$|%I~>6HncGA5SF3h_LAk&3G$)<>KncY=8E=iskL3SRCQ-=5H9fU^*9qZsl5cwd}s zKUNe^R^`CYsX=86@=Xh&z28NDssdRhk~3AKj$TI_O-o-XH}k1E)zxW#K$7u-?K58# zOM(p@ofqE`&(2&_)+$T+f=acnCYJ=8s)cIJs+LE(WE-XYc~EUdL0z;8fyz`JBBq+; zsKUR3UHk(_W&&2cQ(e>+o_1Z`-6*_bN44lEsp<$*hRTP7Z&{?uhgo;|{8U{cF2J8| zX?+WxM@+r-DTaF8C*w&m>~p4{YH`30cD1y3$!ATVI0CuqN;zFe7#)f~l7@EPGKa3( zaN8^LN;qh17o1^QKf(`#v{y2n-mFww>}qhdovMlM>Mp5^o$jw7Ik_mlRB%gA(X4#1 zq9ETn-4!)5iB2mmQ`RLsY9y-6m5sPgYT#C6x!o|OUm(fK~ ze(};;7u!h}DI8kRrHfLoi`xF56m;?EVaw>@XzRVP*2DGCdmFBY_htk7Eke5<-X~oG z!gUw`EV&#kZKcnX-&((%c41i2IKqz!Eo`5+(0x*Po^?3hL?4byI4)fuR#hJsDj(+k zP5OOUMkE8q30S5+SY{q*L6<))tF{4~wE??_0WkdE6?7T!92@XH8}I=Pe;0Jo8w&Cm z3dtA>Ya5DKZ*zzki1ooT&A~Fh@nJaLWTx_A8PI2|)Tg=jp`!48!r7@E_+f0cx(lsW zt6RLCSbGZl-4({xxye*$ME|_W0Dwjbwwb|(I?qsswXq(du|B=A0gthvjIoinu`$n< z7RA=5z8(5~eGP3NwR#^_z|K>z3hX-xB^mwsuqe1Y?T)vbRJ%T^={sumJL>VqnjXeZ zt47A@CNAYBuI(m&3c5@@j!l5!CSKq@2ID;@ITN;lT`KS<>FSD}U%me+a6eBO;=;OT zFJt0EVfyV^-|OEBx*`E)QG{mE^ky+UX0bA6aoT3_)@BJFW{KfuN$F} zp~tY^Z2La>oA$dtB)i6rd29FX`&v6+?C4Ddk<3GsO}y!S(meK3%gsyL%}dA3%T~?H zkIgHd%_{*GRnP;cy!u}){yPQ!=IzH8 zeS}t1^j6b6Rx>hIv)Wd3)>iW#Rtw=)i|JNNRx4vxKUS?EtH)N;fFtkg1J!h& zOl8yhV~qwL^F|NtsvR`=Bot_?TI-la+e>TkwbkL6_0g*J@v-&Ev-K&!=8Vwhoc?60 z-dbbsSH{bq_H4)7alvpch0SSOyRrF9=>1 zgrEa@X#+y^1R+I$kTXCi6(H0O5ZX8heeD!R;M13!_20D}V*Q7k0uAgtpcV&fp{=^z&2AfDkM zQQ;s->F_qdvX0O^+xnv5NSVUqQ&3|8fTWq6&3p#2kjU&+gnx|&WD*zZgz&ti34%w%0EATE^{u2n>? z)eNpRysovfu5~&ODSP*>{WsgD7KJABB(I%|BbaKEE{t(+zsWl1gQ3I?x9>!5T?}sB zyl&7LvscHh&&I9a(`_KaZ7{=asKRZy!);{TZFJ3TOy|D2+@j3Jew!`A`ZL&YPb;L* z#dh|=Yww{Q;zIx)m25}z>cpAreiXnPxDtMR{eVUhL%m_c&VfI6m<>sdzrXfJ{U@)Kff7={#+JoewQ^DN(WA zE3kx8BMvY;@1d4#hv(zC=hK?!^NA+}5lQ?%E9fHa3w_6KcesB>HV_HV5k;%^8fPfB z(74L-;Mb0%F_V#XiJ3jYSSoPm>~Qdc=yyRE%GH5lflLaU$&fE{jge#=jkH1;%{k3n zsd|Im(a)=o3zho)(R3E|#061b=Ix@BU(-kyFecHI1+f%NrVHI1k1k)(k2NnRGR6FY zpwde=C@eBrAKo%<4@6TcmRsI2?T#d~+aDj^G4D@gi^kAf-Lw3h$;K%W;vW8__uXs$ z?rHD_Jh60cj7p)YTK=o26sLsV`Vn}(J(R#~bNr{EOBO=~|FKSUt@_2Uz9RzDqs3qF z7!;Pbpwqt!x}HBj-k$GGSJ*&!o*(aTE>2D$yk0L*fW+Sr-XX0y?wK_ZeD!d@NweOJ z+FQ2i)WV(phG*VL5t>grgg`B!JDvISf$nY*~;~B&$Ym1vICMKI_>VSSHEc9Lf7zPN#and z>t}OGY+Kjuuf{E8RQjyd<_G%Lvmq{qwyPyqhW5Lkk_;^}!Cc1`886}gv4l+y^AY9) z+|GX~j*tV5un7kLiLiyx$P{b-ZN+&gRs63N=kk@jnu347;{3NbLYhR~Kd(6JcK?ne zSPF_6|82#&+H`Fw?N1zmaqCYUVXedc3YPFsgiX2(6FaoA>i3Fsg&}QI_0Di2GsD~; zlOJq`o{QF8doW#Cl6^1AS@%1RaAQf=Qh)R(!uBt5gw5KBk2+(m*5|tuIg%MNZOvD| z7HTZk{%OVeew45pIW?Uue=hsA%R(<;@+K;A9Wudj~=|7!km`O~0wvQ#hYW+htw zAfFoz2Pk z+SoVz*pZz!rRc{RqcN~1;npI!sK;=OHGQ0RB!aQV_I5zlO+fdMa>M=ng7W*Hr47gT zzy8k^=Z8NLw$mP5o5!;O%Ku7)jmLgv-HpfRpn@Ue9syknljLO`T*m`kc__r~p4o8h z`B6L(9)g$uB2(|iH7KE&rK#I94em~hXsJ`dmed0B`t zWr4ZaC`H6by;v-$G(z66lbvuX`vh&Za>OR41BeKli2f|6_$=7tNc?tGQLoU*O+_C2 zvAqWf%r0e#+2ND`fU`8a)cR58QW=2NZn~y6~t_ zH7E(0F=k?&y~T83`xt+2(VvF>Gdaa#A0Z)hW?x?IADtaX?!kwu7G3<_6bvdjp6J+A*bKL zi9Qy@!{Jv%GQ?Hyk+7HLn!(6`QOQ$O0`TT@(#NY0r~DmgJ0@S)Nva zr*{QAZ(CN(A+fK|TVkQGuM~ovkFNd}a^mBGIhO%$#KuTrq?`(h zqKUeZ_+ivnLf=Q2-8Q;$HaiUvQB+Kwef5U-D^c-{mEP({Bg8oyCvS51r+%s5??esO ziNzY@TM@HG3YK2bUqK5ykVE0NfIq|F>Ks`Y_YxGLImnGw4pw5{+&|p|^&Dg(@&*Kg zhPd{{ULbPkB`(MUldtW?&{|I+-I5r2s^WndJBI>7OgjV4VZa4Yw;EUEerI@j>yUFO zaKkN2Fq)b$iG>v`4xLwRafeXv` zLoOGwGXNQ5=b4`c=rqQLV_!1|j(jK9-VduK6&9Xsr*dmB^}F50GPca==BJ+5{ltFQ zoNtpswP)&wz5u?{cVmp4z!!-43NTS+!zaM3fZAWXr59t|D#4?;q@6EpQtJa@f<9lZ zrZBV%eR@4GUAz9V7xYpPmj0Er*d#orUZ&z$PlvLlOqyChEduB3UzLN@E#!CrF7gCH zIyO-;em}hZKt)P?S!PJr0P6`psc>5BKD8x(01XDI<5979uEtO!}QmmNd15=grO|<^O?-|^e0~ErM(9W8{Mo`Tv87`82K|*FP8Gx`e#CqgvgcFP3q?JYHr$xZh|rV>ja4b z!^ag6q{}wY6Q>+M4+wADbCvv9CXqkiDR9t6n5bs@sy z2T&b}4IiGJ#W#)^3L@}w_mc-Hpi z>h=DjeAwn>HkEXdgzR}(Ytm&NcpFPdoJKw|+93}(S|r1|Ipt4ZoBNcTC#6ND(0qHbLz!UPubZEDQQyHmmFV)nRG*;12~IW^LibWb zO3B3nnEL;EFc$Kk24e@L%k2_|S9U$#kg|o9I}}L%_^E!=yf9i0ZW#V?>~hnxj$Yx^ zFRA?#Oh$werNTZxyn5Yy({>tG;kGBacE5Dfemh#>aWlO3e1Fpc1E};ulv;;&pxsNaJ}s&22=%#N^QPQx$U8nsR~vZ*?iM- z+xscJD%3=3>)rBgA8^bzIIT0nwPy>(@gXrXKx&)x{oR0=Om%d^$TkJn-Jo1}b!>su z4vog$km^`+{ zCjDk)kLTfT+zU{fi734<@cw=xP^LB;cXVHb>wYp4;J|zTV#8BusVQ;HL6OkSOvd$o zIulS=C@TF^A?1FiP^PX}W%Q?V%l&L+cwMQ9^e^@0`?g(W z%=Sl#b^ld~Ux76i=zd2S5$v|!{O3vxo$n>XQFUJTh%l0|B?0ZBO8ocvUdgBdeAGOP zW>&60^6kfnINd_zy1X}5$X{{eir@2n$Q1r3+4S(L7G%35I`3gGfKv>UwFoQK3H)UI@xj!!(G5?4UtiIU$Gv8}bnh7v+X9?o& z6IFO__EbT*^5d7K?fc`aastD48P8Z)bq=y99k~Q*dP&$!h6BR2imt%IzTCV1!W4tXB@8g=US){Yz)#1xe4YGDzw{&S!=4}5wdlO zLu|wFbOH+kvO}4=c5F8AUkq}wwfmE#{}ngp1ds5#p}=Dhd^^tWW_LHggUXSmT`-nR z>qP|iPUBl+j((O6WFF(hZx~zww2S2IM`#HntV;Oa0It~Yj78XMt8~k#?>EW(u;sgK2e^*>6=k|1PRrc` zLUNN8Q5nhe06fM}G+(^^$3VJ=ucgB3;~y5fQ5mQhn(VM3l|BdT&=uz9pwHCoi;)-s zB-yispr_Y3pK`irD^bR!d zR&z1b&>iI(R*(7pgqJ2F{r)$i!b5cy{^>G-G)#gPU7=Bb_@JZ0Lu^VP-51?tsnVB&tUeq)tnfI2^28v|{kNhV>m+`B>s#;C zu;%WSe)dS8^UBG58n0z0S4{3e-IT0LhXzI@~eqAE{!6WgkUUo z;}O<{OlhZ1=H%1cS4!AtWcrV$y?n-Kg1##`KeL^=Lm_mNAQD zAcq`sE`H_+O2Ckq2#&sysyf#Q^%gv%v_ez>b?Wv96Y*hn7dn}&&;`}=#(ssLQ}Lb9 za9}{wYqi%Gi389GW5VEwG3Q0nL})%sf%vGo`bF}>NIqM`;HWiCF3l=h0kHoraXa!N zbziE0bAE6P+i}c%(0-l>e$hgOEB}OmgLzI~Ces`IPBwZ+gJ2c@B zjWA+{6$<@UV)e`Hx1)t3>_d~`E|)pPbOZ+qQ|Xb(m$?Lbff6c1Q}NB0d7r|Hq)a5H zlb0^@e=G6Vq3QJd%K|?1Vz~f`ne2C0g<{gh3Q#4^cmw~Q(IH2vKw`F7{i;}Xv{<=e zXtvztszevPM71BP#K~8srqU(q^Fwp>%~xfhuoBHZ36#E4GKT15Le*G^>97nr)-aJ3?A0a9<`XOxJ37;Afxo`C% z{vD#?-;|j1rfK3oQDO#1$YjK~%dOUZ=?a(smJ&a%eOrOc!wmlRB+ri+yCm22je_DAQnsY_`rp3%`0|@C|A_4#=YyII*;7p> zE|#fpt-(ZK_!!!={5Nk?K18Im?|l1_`w|g(lkj(4!b|*SP?X-s#Bwpkm!x5`DJ~xa z*pN`3ZNn8?I2jLa1CZdOgMtgYNU}WY$~9hh2lsH2q`e75%CG#TpV9LQ?x2ou4)Yc0 zxf_7_{jxAj`Y1Zk3+-Lfm5w1@lcCti6kmW?Cep!@5fcBf?#!!a1`09W6TTlfN~HU?`I7KG*F-9v7)#UNtUIR* zg|PRyB&y$5MxxGMg{MdhAT<%(DafP(?OhzuxGy6J@R;DLphDimC{@jT75q1nh|U-$ z5CDiAflYWpZ>mV#;{79>V6tj3%UBkYd@5majGF4X_V#HDVf>csTQY$b@=e7`buIdI z_Gsznx|@^r?hpu31q#rMYv5ZsiBBG^S z?1ZrTpX|}IJ4wtV2xVGxbU5~d%ntGrjf)Q?O!5`wvk|JOBMBLi;Dz3Lb22L0enxY^ z=K}RWkj*(zf-jbNg);BQ895Uf_jyw?W2CSK6VEpC|00R>-9*rE;!6(m3q9u)#K0D* zgkyapH~2|U^?ExVkj*5}%Px*0WM%}SGW*4wUMS&*cN@6nYd72`Uc$EqKlq^r29fr( zbMt_4A}QOSQ?6w8LQcHuGvg33Pi+`(0CZhv?^P4_p@qsF3`i=7>eE?81JpB`YSAJ#+OhZs1YS+ie zp@l})3F=U}ehU06t&1^M(Pw6vi9hry9%OTcMr7MTo!OsnVh!@*p0_?RkRgsYkuU$D zPrYpllUsf1IIksC_eE*B<5iqsQbQn?HIzODM`&bZ?k^IzlJti@rISano%dg`?jTr- zzpcAg<$i|$Z|GB+Z!}2%MxWY7=4OJ@rzpsY|6!#4{|Q3TgsFoefgyvQC#ipUEC!k1 zeUPwdG;d{5pjQRN_K-$7j9%Xsc)Mg!N(h0V5fv z*02vlRh+AdqHbdfoQ^g24YS^Oh=6=N7FGEBo;;M8Z1`ZgV#VT@W%{EujusQ;+7L5z zt~ravGLx$OyyiEi%PBS*eXJZe23ongqr1W_2^ABSIy$fbZlHx;xewewp_Kk-e~vs% z6HMs~|9?F;p+9q=^8bLkFlIM~|J#elZ*Nj5a5R;YAbr7I^KUO6KUeNBH8?pbFI!Rj zYYHcSdy_i{(_^S2g2QxwJ2u^6jeJj*q)Pw1c(CQnVQIF-Ljg=>%atb69`1i$JSexo zX(0_2YaL#=Kfk)L{O*;p2_x5BD2M@(;^9*{Q%iv7k<&0xz#Sm`yw(H53t&t}AzxH7J6mYekpD`X7X3H}*Qw1s-BOk=+c_VKoXb0ozB%9bZQ>WQ=9po3f9a9&iJ723! zWPGc4G0h6}h7|6{)Nlsxl${ADQ-3-|5>M1UnqcgNvBXcsgWpz{`Uq_l;&Z zG9KEUh;qr$LCi@41Djq8m(1~!tn`!@jVSfBN*_(mi>OaT|Y=c7#t!RfATsiHL zk$eR802D15`;_fdS?4CB(d?=Tn7LqvX%th=2N~u=i5i&%SIS1Y#v{i_)}DqGI7Q)! zVbJlmAZ6@p8Q(7uVBkV0q}dmcWEwXDZ{G2}z!3Afm5mTx1ro$bT35p$V8ftoPT3uE z>_-MZb2Pj`jSQ-q%WrRv>31N6nfuYB-FetIN6$6a`Q;M~j7cZlJGOQ>2Dg(AL=1Iy zW+a$S?0)7G?#qn3wy$f>Iy&yxUX(me8v(8zPPf56p3b{_g5}QpX>ijF59u8`E_=q= ztCkcf4IZA)d5z_j#|=4YN1#3O%@49pfgjV)UpsZ~u%0+ix)NV0kn?Zc`MNGVX!H&B z1J9DieFz*<@25(|8qvaW3!3PDV`N| z{ai6anH!%Eav+O|K9h;yx4=&V4vinE$+}@d6e8l3aZaAj9uC}RVhWxN%x4a=cqrr+ zRv$O86ih$mruKkWmy18f%0P59<9oPMNGeBvn;%OlgGwVGFlLv{dqo`>E1cJaQsjDyS<8YEXmv19 zl{*yPWW*5A8q<}iwY*a5ou&!MN6ZJL%qop^{#K>AYVb6bEr32MH>Em4D}|pQ9R(F-sGMk z=gZ&Tyf!n}e(fhs%WUpU_N4r{n7yzOh*tndCW54fSPj+;GpZ&39u6Xe(XJf*49 z&j%_q(?bub{&^$fe>PS8zy1{~Ob!e@obTVpltYTLzfEKR!I<*zo>27<-^gYTJ57bt zrTl$N`G;`}z1{uO6Kc-iZg(OP;`SVvQ1;wkiy8mMo@29MFP~ca`|a-Mk73*rymHr=L|9KN8X#lG5E> zFHe>kGV}<($hjX0?6At1a>BBvwk#~`~u=I!+5j&FIWPsc?6{R zcB{M$v7~uG;`{-fH0Bp%F?3X84WZDkBoGo?CrB^=<3tZ`5REzZd^}ds%{G+IB~T89 zD=L2a1G@*jZK!{}L%&IICyd}UV}5$k&*IY8amGqKZjkR|1)DuX0CIk}$s9rj8AcyK zG`({V4!1ydR+dcUMo(2hHaju9ikmG0e!oa@7LapaPo_3GJs^yT=TW>H&w=x-gm58t z6OMtJkRLIj?Dt{@?dsbUnbXkY$k%N06Io)bE-bAXN@5e&eVSC+{3_bNi;Okk2}fyF!#Mon^LwtjF3vraEzt+ zDXPfJmvk&)W-`t+q5M<6VcxE=v>(_l_T@fBZ^mx*;Bx8M4v>iAf0wCg_^B(|%WR?MQRXDqg~h8Q;?|P ztv6D+R|m~gHzKEQ!gsy`9{c?)Ljd$UT{Akk}6jUphqhJUn;qI zx;;EKD<;NWtebfuMAD=e;z!32RvG%iYL1EnV22=7k3E0`;(&Y8*Q2uooJd7_k*}*Q z7`*~oFNmkWq7U+s9OzjbHuernnEaV6Se##z4?hZBzsCR09YNV#&!7f4auLYk39Ma(*6+lywcNf4|`nX+miVbVZO0j*xGinn|POIUUuBd zSiFgZ_>H`Z&TwP7bgc%~@l;O>^9vhrNeRoj!gT8}@v&R7tL9#FL)(H?B;SrJ%f0%% z>`fYjN0#KOadtxU`HNHV9-cOA1bgR8WgC~#;n5q4`R~JiCq7rj8gs2EMPgS$$~M&- z8_(Ss`dxhif``aliN%r&RA)i+CHlO=;{YOe+t6tQ16+gdH()ux+k9UGf&jGsjfW}% z??X!B6DPdRQy6~X71DNj_V)=d^D{`oZ`8BDBqTV({{xCbFykVqU zm^s+Bb?g0JBV^R_4O7$&iy_6w#W9&{tk`A2o#e|T3eR-pcm(OdbvtMN0BxJ>#ka=T z5lOvaJoaJKnj15xV9u2ZAHFe|(cje~9+(5ziG$Sg%*?&8$O4)Ri9f&O+!GaFD-sB@ zS*W#a57OUJeX={!CqUXR(>kxy@N}-%`Gx&n6cdSDChZWysIge^?9F!wBTXjVGM)U4 zSRfsLWc9IVIg_n!P~a&-K0;%~QV6D0>j>GgU){E$zK+k7z$WQXow#|ouI=hGmL6Qo zTHm#DO#&Ur6nABDHTs&jFzwWW-ECvxnWf=Fd9EC4=L(MK*P>zfpKRqRy%@K^_kz#X z*gVBkZIC7h#aL|kgvjO{SiO9X{K_PQk=ETGsy>oluVPY9TK70{k;a+BPZBLxvPCG_ zr_|^$ML`F%5YT-(JShy9gTYTo;r%Q`$g$+>;0MD6&N2Ac*A+2OKbbaAmJ+hSG2gi* zt-m_27aXJ$+Sc15|-OIkZ1qbrIeN1qy zux~rp{~Gj`i6L?{<%uX=wncJo6+DSycnmUWS~=6cXscMd^1`_1^*_E@gnQcaZ9uS* z<9Q#)Y$TEY*iS4&@VMbRyagHPJJZT`>-`ax6z$r+mD_P*SMhEzyZyPX0Q6+U!ezLc zaRc$J{O0F5p!=qu9&$Hr<8kRKdFq_@`Z40Or_JI9(d$m3iz?gSbt<h$rg=OZQNy@}_iM$elY3`Qh3STQfSMmLNB2ef^kQC#=eRK7)`zBr9|tpy@% z-oC2*RGpckiV8GxVpImqnw@ffqs-<5vx>ObDXCl$=B2n?8Xc3~YkTp=8qoM>g z@XVs#Q)>_eMNOGT;BZ8fszsAHM@tFHBHu;P21No6qUZ;sB>2tX@ZPnn#Ed`$V}Lcv2q-7O3t_nYH?6VoSAK$`eK~6 zbL8i{IDL+I4ZQfb;CR!dc=M)s%f)!>yLb>@f*nVKgIWUEIl(C@!KEp|Z85>)F2M^g z(VHU?L0n70F)=VHF}Nu)bTKjfE-?}>DVifGRxK&sIVmwIDY+>rbulUZE-4c)Ih!Ln zS1mc;Ie7tuaku`O1rwga z*_3{`n0|bheu|fI&XIAcmT~QzahsHJ-<0vVnDP9ZJ(tlPomO&yaR#64);-`FMJbSdj=-T#z57x}IznzJZYy(r$LC^5Mxxw$BHsVM!vDDz!$ zHYb!KSDf!sT$o&3++19`^iS+L^^$s*lE&nc=H`;trIPmhlFoOfU7V#o>ZN@yr31;O zL(Qd7oZQ%b=|9+WE@cbJWlPOvD@$do_hswv%AuUNZT0e9m-7AO@}JG+hfC$he`n9B zS6sVP+$LAtH&;9^LE9%PVBS~4a#g}>RK9erL`tbdX{kh8uKYWD4qKz@jce80l&W_v zRrt$Qgb!83@2g3wB~QVX&IcqN7s;M>27GJ-`nybcYc1gY{3Euj;@hwY#6{?LX=t zt>?Q*HqbLRE_BvFD(O@rnHX^!-G^~}S{ki1n&O8GY<|I?bOKU80xok{MI*U}I|018 za0=@@r~!h^7Z^wDh>a}WzX3qH|h{048oA&LA-e??2%kRur8;7ymffa zPNddO07?#Gzy#bd6C=(#yh|tQcXG^KK16v3Bn1#G=oEmz#d8!Xe7xN88S{hDFS3*g zxav)01!9DNNVG!`>q#XaGaq8;1e`H3;L;T#qBDfSI02zBbfTVSA~IqEPKc$Tee0he0fU{wuq$xK-2gB# z>*EE07r*B;^W%mt#(XEe+9Lv|CF=`Kl%uWgluuoPnmwE=aC;9@l%-vu&^8@RIE6~+ z@+T_c3?My|cS8;Us-!)HK~&Hc8q92!o`7cnAw5De5f#>j5ScLJYmj$zg`_b%TVZ;4 zhzu}382d0X=`H;a-2l)>zzOsLmIV$3LJ6ouE{H@E(}rdSzoJsgIS zuI{8oV8DF2FE?^12iVWyP3HoR(y|_QqATdWJn6*fuVfTl8TfL6@l1=rc8I+D0O-er z+tUE3(*mSVF-~+baMRFsKuF;*7*8-r!Vt|N85o&7%R%`|z-ecPz{mii0zewnet$UL zayZ_IKOynCx^o4d4+N*QKKU9A;_*NJ@tY5)vZk*7Zy(40WB>gea2#_#cU<@Noagzwl8Za34{C-0 zb!`dY56QM_2{!K)AXC@`Omv=!52G1~gfxlHQId&%T%q z9#zaGiUAN}V_?3vk_O~zZY>Z$g6JRtgw4ngZvccgqP;nCV#d@{TQ#IvZ%GZot&3n# z!0@M%T*AfyP{r)0hglyY5AdG`_Y!&&c@DqJf+usmE4RpQ>J@ zRpF{)f+-fuUMWwfkVslGm_U%MVi{PAD>bYg1SRuNE-qo5u zo_EXY)=Qe(RBg%AFCMPI&R6t3R<2!JCAV0dujw%G7&pl2ix-+V%vrSstr=m1lpH!`50>>#a z!f2E`IuM2yPC_HBmmXc)_(6hx?6H(MhrT|rtnO|aYrT>_xEh(Tk`G(S9~`;^+bFl* zP<*sH)@*`d`Fy8qJ?~v{_~1w)TS@Jsakqetiu2EB0_#r#(5=>6moIO<&)F(Z*y^3z z@*Lb6WZS+0+8(vup4Q!-$l2D4-~KeW?bW|M$M)qD>6azzFDcqz)^fhwAnjKj#;vaY zvg6Unm~Xc1hxQ7Gjy8u*L5I%I4)3%ay3HSYU=BUmkGvI*;5J9TK}UYi zjsjYa0_Tq&VUB{?k3;p2u53~rKHiC%e-VkHF!^+xfH_98pFCDLNwzsj4LV7Gc9Pk0 zqP9u(JJsVta0Z^7335rfUH!ZD zqd;q$c!Pa3kIh(jJO*C=XWuf!QS~@Iq+QQ6)Ql@1qS;NT%nF+WWnJkQ?BFL}&iTbe zi=y7IT+qm6AkV46!JROE-de3jxS7q-4jK;UT;s%-UFBd6S*t$%Avm01)s{;82Q$O* z_Nn;MUB*%VHxG5}Twh-znZ-q%E`LUECk1H;l3vw}__ld#CUDq=kO8gY;R`3i)W#n4 z>`KnaZ|Ku9*Z(;B$;>do%d1v=2&n>~1{E~k5EqhoV~nto=Ro3cU2sXBey{!*xkMWK zHL{N&>2lO<*!{Ovhly6c=9j>tIk}2N(z2DiiM*Uqn)9)u7k$fH(Ne>VoC&tH(i%}C z@rsX*^H}c63hr}-_&oF@kg8PU8zyTl4ORBhI~P}bjPgW1W)0NEp%HDum+7F{$fCr% zJ91C+0{OhL_LMhIu{#l{>lG#w21zS27RSRuDZl}0h$y^A&Xc*GZ&$+dn9RbHDQ+W2 z7<_M1Np$!KvF?_zqR2|Mz&T~a>!pRMcQ(s9wt0{QzIbR&*}iQ|@r*)Ec{z)r{H z4&_^&*nUx{gMZ_F?TUU+&hd6+TG&?tngo0Emi%6_V{lyozci6AIxOu@PDC=nZTRxD z7e{tu=~1&WuDe|fw`XV!p<)k&-r~OCU21ygKf}-32hHa(gxfl}w|J5}c1Ne(GZX8= z=P2To5}4E(HfACeIv7?R>KrhDb4sSke7G~AE1e=)WgbeQM!tW?$~b=9z|{2Nx1o`n z-{24O>3%ZN+>=#cJkG;cD2TrqYO5*np=Jn;PhYWNN->fd0Zaqg1r9uqRYYyIyt(Xt8{ULI2fG3+6ubP74?KlG@}?j<4P3b}c&V+E&s zkJI}Ld89vlfCcP5$t5e|)41Min7Nl+s#7Fz^+Y*jWjUoLu1N4gaQbNvYyBt^zW<@m z`ukqmPr>Q+etV|>EI4)P$8NuW7n}y{KV7&GoL(Qiuzvi{2~MATcF3}lmwmb>F#6bj z%SK16Oe@?e!MCB7RR__FL#{lU@~M^wBBZ8E@$yEdZNW_&a5qeWWb`Q$?WNh{P>O0# zeBP|Z0)@;CJ#B;RA$SK1{Cjs|bpW{X&Nnk$|4)ieTF8pg4=k=bcsf@pjY`>&r8eI- z4c28JkE0h^Cy6wm=Nt+7sCK#@U?PRz3sWD#0-9D^Ch3JbvZ`^KJ5g7M(?rLpd{9eD zZx$nDf0@{^Zg+#S=M*(6tEz%mozPZ>XhxI~va~BYn zED&-X=!3kVd9ACcFdEfZYxqtmQCtlnw`vVB9IZsS$Vd2SzryBRHlqZC1O~T277Wn~ zOverC)O>Gj1+~p6;%xE2KsyTRmX0>%@JtGr(zGs^1M|F1jZjYK4Fyz!quM4vRmutPn)i%7vCT63*I)$WR-OZVL8bje>6It@KUTDuRq>S6HQet`hY_$lw0|A; zLfp^8ReOp9dfgfs{c3B*`NWnU&DYI8$Fh2(^Hb+x&8sq8gLuIXB@zeUop{2kW$&&0 zu}QNRG;?0U@MAN=4}gQhr*0csKNj#qAU*DkaUh`^e$=|da50`Y6oJF}lYsYx5sRBx z_ZQd5UG+$sP(ctsGNbFbrd#^XZoDJp6I%$NU2>Ck#aobpwHwmTzqM-k4{EmdY!})s9kTBohM=@Tb@I3g zB{<@>lf`Mu$-3ncLPhOa@~)~w7z{wxxmGNz(6DItMYq0^GlocK(YcWJm1@(QgsQEC z*PMTVylM(q86(cDrwlAcM&H#1;NNxr7EAtV-+}%vosfH<0Z1YZ@h$(}Re9$M)BZ8K z{Vj0R=g4Odd^&TUTdX&2*i4571uirDlUHG!qbZWf_9ah7*;}B6^DDdiKoA4$+*+l= zafC#54F%q|o_rS6|Gb5zA}{DLl4b?lgId2ieRT7_#v>JcQ$o#vYGVh$F@=~$;O8`( zPQqYSwLsh}E)_VYE4*MfRPd*?;F?QDJR$zrfL?Z?iZ$+9;Ve0rh^KD9f#>Ngd0M`jm*ML{KC!B!*4c(TcW~Y zyAoI)hCMXG(JaErFTy!J;!Z<^8!E!%IKq=5(i<8HH;eT3i}Xv63}}c9L`6P2jtpjq z3WY|&(OPCt0;AHSVj802P*K=oF$&2L{TLdZY!;pB7oDCSo!Jnb^%LWq%MgWx&S2$90;;z4wd5 z!kK#;;`&i>gU4~h4DqAT_;Is1RS5nmfOTCBZw3`VcO1XKkgxQEhryzGY*w&eTQUQlBx!5Zkvj zPcbP=J{pd(<_kAx{EdV6%f0mt?8V*0y}8tdLlLOs&@as4+D zfS$!8?|1!${=om z)5;(EA1%Yoi0I|}Yux950q9OD80s<3wlZmqeJzycq}J8i4iN5;7~bM)<^vnqaRNrn z(<`FX?jcHOlRsdQGLPx_LKa*Q&oNUnb#(iR#&lyv5%Ky`vkgEE2GFl`hHY64XTy8K z`XIhiYHqx=T!)ZblUW)JpB7IyfYc@5vYstu?EHd7em`JSGLwj87cGlNGCv};;vuIi z`tXtI5g%lC$nJ(MrLhv2?x)1tdFesdB@HmE024RLZdoar7J;$!9fqUGRW0N<9A7S4 zG-mK!z#^|h$h{WRvTQNV3ntP^mPufDpQ)q->ZV2s{V0N64P?Cf1z^~5Q<5z`mJ*)M zI*~*>t!Qv>neYK_cKC1*S(n*bP9PQ*seA9E@q|C8;Fzwo%9RW{yV0;xh9^73odbb4 zO=9R&T<~A?7YF%nZj|IYC8ydFHkE>lsiuheONgr|H!EtefZ@-IPd&aHR$*phT$L&b z2sV^RtC}_|e}hH-pQJM!e*W6>Q{s(B|K9@8ReC$%7d?T=iFOO__e)G-0mIPs3m^4Q ziTBT9n%@0jM3cAbU{ptX`(R98L+IPM5xo_d3g-Ba0J{GYFnp*maOM-i@ZSRr-%VKW z{U1wb_}>-N7&F)JM?*KH*NAdu8Vae;Uw5#aYv&rdwJBPlTp&1jMsfu9gw{2PAd-Gt zt)^XkR?R$O)nHuN&OA0BXgejfD)}P_yfm^F2@&d4mv&PkWIXR6erl@$b!<=u-r0$y zzS2#LIS5X0#GWf$G=y>qd8%IN%cM4Zgv0|c;v8xmAX<+0SUXGHtr`^;C7c4qRzEf@ zc9>o13YQ$p4y1<;j$R^)Wa-#dyX?&sw^hn1$A{l}?PeI|55I1?-Zqe?0gM(9>AbiwJTK50ffA0MT#Xz9(x)iB`7n5GGG{2WC16dfn|xw z2H8%O7u9*=IGBD>6qjYrE|#s8gK_ehkYPZHh;@gP(Fv_2=HVA*{78x76dEqqA%lTg zH&C9`VyEML&^yog8wa-ZhAUj+(sz*i5^ZFVt6g%S zMLhT$2YTcwAV|Px&M85cQuB!k$q7^E>QFegtbxF(zk<69p}i&u%MeN@ z$3PJS$3PY!<#lVw zOX6lIzX@)$F~2Bh$p;7!dL=f&i<^dh(ahDtR1O>n?xcX9bqi-9VSJcR%dGjekyooj z44#6FSzdIi%k}Jp-xoA0J@y20ZS7L7!)-3iul>88byC=tha<>-6hqbksqGbrT@g!e&P|Y$c zmtZ7h;O0l~l?+2AN=LEa8b{gl@k+C~efyMB=j^%0TEB_1oS*=GuLXY=EkSpz0c?Vc z%Dm*m3_tNlCXI!1pM(!OPBJDe-43ld(ZGxX7LBXjLCLtxURmEtRaiI8hHl1`oA7#m z>~|rTI$#eGihRlY+QbQinCdq z=A1L5t0-Nko2HgiX_}S?zyT#c=D3uk-yIEnee~nUJbyq6x_O?Ys+454W3(SYqeewt;EN@`vBU^?iXU|%5kGNG~m zHf%q0`g9FC)Hry5a6jkA>AE6C({Lp0AfNdRt*O^Enl^Y)EO)k{pV%~>^bY9<4kI!f zYMQ7Y{8s(o?6WOJGpZYQ`10x5ma|^-r^&&?y5_TO&&1}Lb=XnU^w}4`q2{@x!J|6% zFe0J54OhQ=B?h%38beH>K0lI<-^=B^4yX7n?d5-5`}i0C5DLJ-p#!jE+lv7|HJ-yW zEsA*50xUfPGigDGbXup~RZA&6o(p_}ye~MV^+LcfBXrVia}w#*7}9R4OG=5feO_>W zaIr?Fq`P;=bpu|#Y&kFB*~SfLgG`n9PuxvL{5r*Y*>lM^DL0J@HLQ5LJ|(*vmCO5B z-(osk?aaMB!bA3d{L_^H02~$oW1kv)R zN2{p?VgfATerVPLdQT>6W|ti+ZFOOW#aBI}9P;YCj@EtH+#_fX@WpGnWa|0BAXLpq zLB3?zNWs+W%ub-LXsNF1Z zrLHRyhummxj)(!^!EtuKisvEV0eMq}_r`(fAE~W1-DwYyGoI2QXBpacQNO z7EeO9BD}#=5ucb!pob0IITZWx+2RN#lHaO+epbEkU3}VW51gVvTzU`Y`-{tOF*p=6 zZ~Q!ZM@sriVSXHSa5xFKYML-u&3$Q>l)dAydEteJb+!s#4%j;Ivvx)?A}dJ1Z|IEU zV*;|`iJ|gtmJ$)nqF6teTT&KXKW9rU+JQI)0V2z^2O;3X)uX6 zo;=|2o45+&?5IE00=Ly09o%BD{lhoR@>hTmoLGFaZ~{4zIFanYY>yZK9-5K{smL4X z5b=D`j!}`1x>%~>Vyr4s4Qz`SUSI%SoiQ6WC%AQwR{gp#&81Wzzdu+CMMm@6<>xP6=D+vI0H6q{ z!CCli3j8v-{j=Lt-W7-5WJYSf%7PC+Q>lJs-O)ZW;;A(4fBHujhFhfP%cd|kzo{q}AhOaJeQ$l1E<6`3-im)(B zO==7nrxQG%J65F}VtE557LeYi*{F#3_)8N>9v4jJ>l-dGSP}iC8hXsbJ=nGwic~^@4ofyyFoKx_#q zFWQf9MMl9$i?V<90ZLK0Pmg=NB{sN0<_MBPwW`#PNH1?8Ms*d=_WP8MX5z=xY_R3K zW_pL@xMI-R5bkKF!?zJ9_F#bMo%;iIQ+0)JeaCGcC~J<{)7(^)nox?oS~uzBFfuUV z2-&Hh0EtSjZZ-sY#8Rx0R0u;lsaw3H*yiYKjes!>;tdtwiWB9d;#R^0c9%EYCwG z&r_KF!gHb4y4Bg7OVQa4L7wN=DT&rupR+A8WZNFVSeg-&!>kTruv?J8#cNcmX|HCQ zRd_wtxEX{?pk6y@ysol+_ImHt2Coimb_{7AHL+Iq&2IvOmB42A)zm?O34}#jhWL+^ z1%$&|O?0}lvkSBag9nJUSk#<-1?)PVj;#u%R<`~MqR~N#+P7jD_OE97Ej9KacojUf zYp&et0badp!UFYK9LmB6fvqxf=Nc%GaqoXn2uqQ}El4|k<1FeuJkQV|Il3IrVTZDp zmxM(9&6{iM4Zjr@m~4hPG)AjLR`cI11I7lBX~^yHb*#mv@L<}&d!4Z19-6}8+T5prwN zNU_ycMNdn-_WJ`1a&fDMpJ<#DlVCYt$zn(aB$iq{w+>iXyLZ^Ti2zXcj*F|B78ASXWcvbNM8xg|CNyhb9yd6O5k~;9zij+<&1Yi}VD?0q zl^bA6^zqtNQSW+23aXYOgSLa^@r{=*>3QVbnZG1K*K#_CC9~d313g3q2E8*bF+iJ)kP#zD7jdH)& zm77J`$4WG)IZxk==AVeaH7mhmr$zVp7S_aCyGm!-admCUYTGYgMMw}dPNTe(i!e}e zysAo#gmJ(htEg~6>AR!zTGF%Scw z6pY+?hP_<%Yv#S&u*r=fzqj+f8;uVZPy{Mf60I9Oxr6R)^S66%EQG|&UBU(K&vMYG zoj+_gF|cfK@O+o&pwS(!RYcpE@-)W7QcjjPCnC02_l}I0sh(*2eO;Ex>X}x1R*OS7 zQ@4$Y$M~^eY@(|DdebU_sfR__T20>e(|UKQ*3grsTduwk5=349?6>LT++fHC4}EnY zIHcAihTpVP43O^F9(P%mKbTD7&RZh(IJElHX?qmWS)um2Mwo!|Q>C0fg1aOD+ee{X zIvUei6qLJLd|!Q$%xJx3NJ+y(s z7uyXwPoAGEcmejL^-b}&c_ASrX9#jB4Bo%?VjQqT%<#ur>iT1dF;Ngfv?HV`D1c6@ zb3H8SZDgG9tSCY+U+JnUu+6_MDv1@(qk%PLUrc+-^82bRs2?0A_?8Zq+gWDRXyx}Cc-}(RH5nuwhvQCkGE%Ehaa6kItM1jSF1k+p zj~etH+j%Sv=k3eFZBZc_Wu}#X-D2_6rxU^!Z|)M?Pis$#EVSYh?pHma-%)UjUZwLO zlggHeV-PzxiBwx1b*f)rx+E_)J;uC&Zxg}qD*-IPFEs)*U^j8M)wN+uVu+y4GFsJtMRrlPD42JP@e_vDFSGWg>VcF&c;>m*OvW&Je3c@ibb4RSP6<_1fdQ61!^;{U#J1^&Z6gLa@j=Xz4DRhvnohNQ?chk;kfd{S{OvLS7L)YYrhP83{39Ys~CfvMa?v&=DzW3H8!^67c|q*HfQ{*LQtYJ;kCi2z4GkwwTPT9%9_#&D;?8c?@eg^eqsm zv6!lM#)Nm(#ceCpJB@S|#bEEJc->8}A3jKhNV`33Ap%U0?<%whe64fuCZ9{g>0Zdt zp8R5FUAuNJ!%zEF8^`#Dla9f+ulq`XaxDrZq%AU-q&$<3Cn}#=UJOuNh*;-sW#sjw zc*H2I9$W>VEu>8k4Xkg)Nf>OS77zA<W-()YiPcJQ(@aA((U&o--xj2c`QTY2zWtS)P++CoDP7t;N^zYZ(%heWxFb@b#m1VzqmmmlkOtB4B& z&n}mNS!FFS3uJHVh+#UWbi@fiB84p*yuZ#-p3x_;7o;C^XP)v_mm7yH^|OY4-?mrs zz%)d&T6|%z^M%8(^?1^~sZW@jb1awAF0$-Ig!-t`pC#0yU^XJaGe} z3t^}w`wpkNU7OAj!gg_bbGPP!jZd$evrx8WzB$@f8D57=y2@?AuO8Et(88gM@-WXP zd@x2rrB+`fk@$?a3Y7yd8KtNIC0&LcpPW(uYjqhC`}Y2 zE*x@R;Vf9rwMKp`S)Zwq9AKK(>KdGPJm^M>Z$*s6orx;FCy!PGXg?eB!dh>B%Sd3Z z^Vm_13O*94R8*L4nv&hiRYpo0R;DsH@MH_I6M_4|)amQCAxwM4Dj}R!v#)^&2r~6P zUW~`+7D;kzC?KkSk(Ez2+>a04eEYO^}-jYL242l4JI4> z?6jwi9<9OAgWB25UT$MmrX>Qh7565sU)eJ^Q@!6+Dg$;|U3T>>Q^_YzVG1k-pDDQU z{)?vz65HgX{n_Nh0HRNsHIcSZj~OES4wD)?l`vwARU;=XO9hTAeI^%GGcCp{TW!wW z(~}2{VpDpgBE2`?9I*`ZJuN}>^rn&vrK(&X3o6rizC&R57# zb|chtG0FuG$Z+~pM(btaJ;&=J_^Ji4uudbuGElbCfL1L`tePd*{)y+9wfixki?P{} zt<#uvoTc8Q)#zceT)jno#|9m>FZ%e3^oIxh7BCW8w>J1jgcw20r7@?99Acp7<3axQ z5yc`I^{6JV)sEL8JXhsk?OWzWrcrKOImy%aGVQnQ+tNoXcr6ZxUzR@N+g`_F8$~p& z9t=m}(w_rKe`_{oVTXn4k72oqi`5O~XjH^yTvAUJ#&4Hk*jv*HLESkV583DLjZujOSx&^~ z>*gz9Z~#u#w(UVA57%gVYHge0YYRnVcZ_b{@Ci%kPtV_EAFm>+xhG!F6CA~bPvG}B z|G=iqL-%r&CjC$LHSKgIyhm6V6ILMXFdlQV7UAo4m_OOB@Ku=rka; zF1#UN{mz5PX-9XUin)#icLLf@wLp3&?KN}eja3F5;FZxpWd9WTs=aMpVbK#4#@dgK zvCRfwoA>$e=ZV#`vExU7(@IZW<&kCO^T_gxSec3xm-Wh1d~&EX-SS-o@@Gdd7dvLu zKgaCDMZXw|WUdcE)lgcR4d=FAHtmdd;2X~E^5}!38+c$GDzOAt?!1ZcSkgdpMh9NL zPRU*Gfq|Dg`tRBIXYkNBh4K5`f`!W6{q7cR$fujF_JHkDln_5GW-SX;gc>X!eMUMU zxq6bE{|s&sZ_kRV+R}bOrdG*6y}e}|gP91vH=U9f=fly(W=WLWXIXuN@ug5~DyKzL zNCq3ho8(;c&iKpumgjf@ZGhQo-tnSG?n{@g)Wr*{rNhqvS%gKn?q&B zY1ZLU+eNJoTJaO|&eg22khT8s_)kdYI^VDal-SY4NYZQ5d1as{0h^SMIKcvK^uDb$ zC0|pWVN5dukKZcDKo+Gh0ED z<@%v!hzQEVntnR@^U726srsRwAM+`qSNqPmZM<_ET=())k#=izT{*XV%|2UCZI+TbSL{oyq9vS46_O9#KBYAzvD6h>hctPsmz5;0fw2?)1 z1S|fd+Wk<$0`8thzYPo5vPISo`d-J{2(jBW|4f~EmnM!>wE7z$Db+feNrEXd6 z)aO5YyvV9N)v!7dA&Dhk-X?4>C65t zXFmDz5`mZz$~^Bfz5+v=#Ew7)cM9OLGyL{!MnF6YMGk&25zYaBukMS%+QMSiRgtN>sdBT zuhxJHt(x<{%u?d7oxgpu&>r;r@BWKb{TGjhv7`03dIoz4^;{g>80<*-)z~UUm{vzo zM-Jz8h0LaI412}ftHe^^JIx%EXKRE&RL(#1zzCBb^J0o5_+GARKVet0uO${=@5&T@ zK&8_t&NrAR8!{Is^9pVFYzAv}I!;_SNCpkO7AMqmm5bL)s(U4__A!Og)Xn@BlVmnE zfJzljq~B)pq{i$;Q3p^_N7fGxL?^w3FZPB8l#oH|s-9K>ZL$_prhK(>h(O$#m~t#~>cfhl73Zz*iKY`3SRq+PnjNbx+9t2+8;@iHw) zQ=aam^nE$>xcNjgaqgY5$y@uio9>k$${2c5>}^CQqK?u2DZ?wyfROe18;_pv*eL#a zVE@H1{(Fz6u!H~Wi!tV+0{FSx;&pLYIuAqId~@J__9bAujtJZrMFl>}L(QIKBC@9? zyThdF?0x1m{>YX0)Nwmj6;R|Y?RXZ4K}&u^2rOO9g`{?4zX+CxZ%#MBn7rOqqLqZvZB~l`Q(XVJDuX~k#RQP>etpCFxWB0*aC2VQRxbnVc zu?=xC!;hZlbwvW3g~sxhnbczG+3&1iXE+oIQi~Z=DqOjqjA#53PAtCG=5g5~voe}; zy+2#@!CE1sQSo3JU-S&34ZbWFe<%s->H; zi;wNvh!+DY1cZa1RoMWHz*m65iEqUL{kcC9)usJz19YyZfrtTuanFhPKCYl!C{YVZ zl(~19RRiYpUkL` zXTFSpaESq6-19|w3q{W-7=4y&h*oGe@P*|MGo=L=8DH$;v9owDbhXZ5`OJcaz=aZ`?&rKVji~5uhr$~NX_c~5~DGn-@mhy_F0oCrjQkL`S3-uM4;>0Jpc75S+_m& z8$#q*Q2{!vz)+KmH7p(q7FI=u0iz3O!^t_5|8a`{d%MqH{1pcKuE2jU125K$q8o|~ zo8LUVkaBsc^UOih;MaAIIx^nAE0(b(k9z%uowS#1R1{Wuyzn9UvPF|>nRj7-kcjKc z!YEOWfpoGL_Yj04*^LgO?!s7Z!bFA$Jorpoz$;J2m=*{D>$lyY&fb>Ie0l|PLs|o0 zHPGs0h(+2BHc{9?Fr1j6*^!>g(4|$x8LLYM_EZqilyw7XXRv zZ0Y&s9lmS{zZ(ElXrxhzfW>H&o%ni`&Yd_}_o?dxU~$cCj0BO@Lp%WMsF4fcY5%|s z4bG$B&+_-HpZa{P%j&Q`Iju3y8q>IT6qUU>5UUyh;1J^1$DYBH>U1mC*RpOn|d-A`L+@>R~xA4Ye#^^tY zwx20peovDw%+sIyC;X~FOY(!ztO8=@<)5N$EK=`p$!*wx*?Q0A-;>+wpHF+3l79c2 z1M+Xk4X#%Mk;XrfdaiT7lG}b5(y{zR>iv`%NG^|vTKy?A2(S8;+(ycN-XwOB+=i7I zutQcuojYwjYKTkl=q?VYu;3Y(#K4=oP@Uf8{oS8#iHbiDt0UV-k> zoX2)8wT0zu`YT!&D-MPyGGw4qi1b=*$9x!#Nl#AD{fvDI?gT*xh~+GcL!U_>V4qgB z6)tWg-I{D4D})M+%14x=>=xspYtF0@_wM<;Cjy?S^hBETM-3pr-nRznF@gp5vc9Lw zv{G1=U%rez-bo@mrTvxRB$U~=p(J(Mjobw1;<7b2`QC6{IUr^J5+haVXhPr`%W|H{ z8sHVL^qv>4PTbxLJ|z2nW&TxcED|SDXzk4j1=h7lT_muhv#Ta+Tc^^KPQd1n{EWc< zRh&sk^+j@<^~9Q`>?sC!C=?TjlgO(i8r`m7V#w&2{s`|%xMGf~z#8M52*z7F7Hua( zG`BrTN*M6kTr}iWKTamFUt2jDV%ip@=4Rba&c`jBVcES^ae>sceIDx9uaGdq$n$ao zip}|cbh=C|-xG5@^xEM2qN@_qmmi5jvwSz5boR^l%3{XeKS>ZeXn!K4Gd(K$)tK1f z2FcqEv?7+y6&3uYS+IU<&*I$Ne8EJFnwX>Amhq-w?gsh_vY@-r!MJva3K-qAqdGNE zrCV3ft55`2!DZd3eN}1OD;4dy4)VmB!67YtNS_+G1-t5-&U??#+t3mJ?VGtIn>*AKxDVTcZJ{S$hta%S18RD z$kJ0WwXX^(T(gvVlE%DyUwnem9K1-gA&Y(?q;Z@VFx#;HMJ!;=Hn6LJ?EAS~;akjd zCoNy)jIo@KFfK(e(=p}HQbyO`R;ZlPQ6Fq*h1rblu&wd8$=^__MTR)mle zD(hSlx!cg{?@ylrZ(Nq2McYOmmg}p9REt*9Y*yq1ZFdN^v>I^dk$XgZm3u4e3#P0u9L~yB4r9Wz;5KKOjs=!-Io$R zA9;834YX+$2HgeGbQ28G)Zt8Mbx24$!_`cDoU5q5lMhicpp`0M6#XQHkHB&E$^qjt zx_KoqqyP~KeER&S`Vb3L9&-kfPMnxsB+t8D$RpI0K1&h^#HcB{<{8FyPL#*wjs#sH zDPYjDtMvC@^3}pRAa^@dEy%RB`I-t9i;>qE6-8qS%QJ^_@Q7&y<01WGg!1ad97a-= zifYmEGBIM>VMg)h#@CGmuT$qneiTzLb?e?I8jVk4ZG%3+A~(7_LGEr*X0VJz~W94O0EWd8fdQgKVfjduM zApv6I_$%$mv*6hZvo4CYG(^{PafS_+t`9}XO33UanTK2(&y98}$`^m|6S+}yd^;OL z&ade;HS#!fJnx(s`4hR3gGFvw@dTzW5-VLGH{O=VB^mS`EXp4?{qiQH&1e*CB?t5lZPY)q!;rSEDMTE4>kRilYV`L`;! z@Cu7p{1Z(%AFJVSE38*z%IgkOp3fe*+TuK&XkhZKUYo%pD|=C0vQm|yLW1@cn`{_I zRl&C_T23xJ9GC8;zOstecUI`*H5%=&Es9^ijD-i{Mkl;}eou(cVY+e(GniUyK2>P~ ze`+z~ji?9jRXJak`?MrL{F+|PblFta}uL&DvnKjfrKk6%Ft*%s^+h1``R`Z$9B&G_e6q41Z=+p zJ1BZTQ-?+ymEmjOX9NjTg{72mDvKI6p=v}CSnyw!qH^NRlbV4~eRJQ~dO8C$@5XRY z+7OlbwHy|S1mT{}pHHN}$Bh^cp?>4Q$)Da%?c|BPQfbF|AIifrm><>n(vboI<>3=H zOt$K`|Nbt$SE%~=BT`0(Q{RkMX(C zSxFCmxv#L}_Fk^fFw{{XM-fPlE{M{G0Io6T6FRm9*}k|I{|IivscXOs{tN@ZtZfl> z2kAa&x8Cj4w941M;ZCPoMeYG|Et~PNtOzN&qB9FR+Ao_lrXZ`W?zZyr!*0aVVbx_V<3 z)UcI%)Re)WV^c$CH-&4{yAX5rVb`sun(iY<$r5`$yqH%KAd*_}THCo=wy?wuZRbC9 zq7akUCvT{J1Q}%pm0iQA05Te$de2JDid_q7f9;U=!>OmV-m8HlA@*lSENB7m2Q%R?&-|g;FI4ya{MW7Y z#HNc3fc~nCIG98Y&vK-jMqnL48aE_snXk_IRDo^Cubb+QPc4U#2TXI`*&a6n;5de8?BtMLmC9 z&UfC#=Iq~!AH6pNh0|eU;5K~Teb1Tqky3vC@C&=$j|xmAW|M=aajc`0<$2^dHo5H* z(TkSDKDN`krFkcga^>@O%)`YLKe zmr5h=6LCC1g5NiejC$N-ooE*Q#1E3H8J!jt zWxX1jj_uX$MrTGruqK$oo~R;JR7w)KT$8DiA-W?e9Yf(0Tbmwh)f3&& z5M3S>-AWYO4vj(du!fs4Xugai>xv^DjT=6W8|}F{q8V>(V2vte7<7)GIgXy~iJo`n z8AHdf5G549h+pMPSWJ&y^NX8Giu;0fK&G?pKrbAS(*}vh7Y<04?SO`#tR2z!j&XCtON2o!wC|``R$)NARh~#JQihq@}~n5^LGa% zv7sw9#gO=4Iw1WolG|=JrdVQ=+fGt!8B?*zZI0%tPGu<=2!UBJ{*B@PI=L;(JUzny zKPI<@e&8G>9`Z1|Lwm>ZW}kxp775`Wn_P9%$}M0 zH|hz!B6?Z-}6P8{>|hzlYk<#%%Xpu+?H8DJ+0DrK-nV=rv1$4(E%VkhUeeEKM&0Woozk`Skwe+U3zahLnmFM%Vnpi%E~=$BR2R!7b(05Q~>iH?4V|zI2SK6qtT#*;^^h{K|@{ zO3dPwIknbHsi&_@wQ6LiUm5hix>iyCd(TV~fco>Ec<+bCg}(2CxAM>A=>GsomiFc% z5-IwRkYw>?#w&^l64pQD=zoJG|1II$?57<4ZwcQMn`z?vTf^oTAd>%dPsD;q{!IA( zeheV6aT*)y(Bk6VDnc}PHAyD_MTX``G>i; zvUcLm=ca`L*!0S{Hf!b-ezoqhK<-)AsUa5xXdd`x1S_W3IGKsjcQiS;FxbpCnSSC@-{65Mk%rdP^}DO$YZ5Rlj&h+_M-gyV?|ola0y@3ZPK<2*#_ut=D6q zDUp|U)Bp1p4AiXr`_hsEd;(yAem`3 zAHRb|qotm7QC_~DM^};&As~={B@P=x_50GgCh+;srDd40_0N{p4|*w8=;iI&aTC|= z*C?lo?YbG?&+h4Vp3MK6@+%xi49DRoIP2u6ClP7;4*Na-*G7&2aS8v>Sc_3)r^M3k zqjq{|#gGYJM#sROo+o_LUz()7)qB%K89^uamt%UOkoKE1jXT2%sifNt*pDJ;a&@w^ zhAo5IvcUopZJBH>V_Rgona$E=P2P&hCm|&Er&`$AR(Vp4)hl4rr0P7G_g)ooRa=F; zKK76Cs()Va?*7pkzT16~#k$7xshbZ_mDTU>3n}yb@b$LzNS)gJ5NAAc?E#SET)e$; z<4IVvNAh`QV$$YEIa1=8v|-7u>|za)*aeh0T3n<2-cPvK;=|2{BO zb${sgVJnBeaI?8rp9t{!vAQI1`!FJWZfajo(&?CfS^hZk!8R;j89I z2L}c)x}bnQ5M5VDTjFQ4TxsKT$O$}CQ>Bi>(^M^wKVBU!<-4Ur%!1ho*z zZAps$FZSNEt?6&w7N!IU5J+eO(n1pur7J}s2?4=E5fD(3uAnGgI>Zo4KuYKcNN;vf z6r}f}ND-tcy#}R(UXz^Qe-+l+``Y_i&v|=3!EYvW&by3p&oOu_&2yd?jVx7C9*ejp+Jlq1TZM0=hp`TtC_UW=D3q0t$576rlW&D0) zWPqjU%}$r7XKsreu+K7k3<9Do*2E?}UobtTiux zB{r-%%x3!B35Ax4bh5#u$zy&CNwQWZuxh=j=|wEcoB1|fvS*fmX|eZDSSsFAY#C6C zyaFA6O@BzSB_zl3N7-2VMR!bif?>Oaov&NNj&F(Eqdu#|ok_42Q8P~(X7rd?K#?V0 zs#ql*5ZYx?`}0GtWa50qyea&joXTHU_W0Y_zg41;sWQ~w$v>KnI+(kpt!m$1ruH}} z2m2nwnWHPqeDUhv(k1DO*!!X{F6TXq3i97e=(%W8D$&!PrWLdz_1g#&v|+C9LR~P;B7M)A(FmI|{G?Mv&Ro$N1S-JJ$SS6Z>I*91Ws*Qml)_>NGRsC@i1s*XL zeoy|jmN=lIrM>kRopUsm{f{!;=K23DXnCI3LS|0elP>NsK%%?w>)&6E{)uOF=}SKl z4`@fELZ7}Xf)~-{vi{R%_47ILdW;%VxIl^MQW@HsHGvH%A*@~q_{6xGJM&romOd6# zBhvevqRpJ?^$$8n^Gsz97B#hhlK((>q@=oh>(3c(+BR`}l#p7?1ZLF@FdV5n4QxVtHh< zs)?X!Porz)xuO5C%yTdvu(xH3=!A35z`9~gdW8XHyu4ZD0aNj`ziNq#p_vqsoA!Z+ zbYVjY8FXdlx3cMCI9Po=nUofu(%*;@3!iqliT_ikV}{|M1@efn$i_FcXf~7- zLNi47%pV8!BSFhoYi;~KOKa`Wh%@Up12GTQJ0*)>t$#kEbH4n4*WekOtTr}(_`$F5 z&VQo8Q%pXKd+%QRVrlX}(cqaQk0OmY{xc082B8z`G^%XGl?%Yj`t3Pb#$bxi*xw)m|5B1gSa95)62|~o( zdGSY2zZz__Wspl+VNH|dC|xbjltqw}-w!D^X(dnS%2Yo>Me4ahlxL=|n1m{q6x~@S z0C0}8$%WO5J+S!@&sX#s*1@)$!9y0(i2n3gK8H^HVr31#My!Tq+E z2I3u20EL0uWVb=Co!wV(ZRg={R`Ln%F-6W1x41e_aEck=5injvN0UE6wPD#_R^rO& zukDt~Y9+n+bhoGx5j`mCv>tf8zH(HcC%R*G8u%`n?L|vK`&X`osh4}7#aVo+Z zH{k|Ebw|_^yFs&;*5c>Y9K{TFbwJU}U5+s=iy1wj@*Fym2u(gU(ccHUIPg~|U{I?d zZrd$iVvJAQI9>U8Iz2}bVW6cwOHBp*t3CKp3Sq`=FaFr?R=B;qAM?k4mm+Cb z>+5}Fze_>A0+a4phg1OnhI#=Ow6gx7UN+(!XP$g|e%GK}gDrz7Z2|2Ulre6_%Z~dP!2P0f zJO6u-@~}-mi`;OUXfKz!kpWKiCo8B+0r*sL4@S$Jw7)Pm%Y0RNL`#H5Zq)64*VIq~ zYrqQay}Dg@#Be-VosiZaD}=C=Zw9Tr5pDt@`1@G}B)cX7a?6JtEu50@Z{5GFu{jtX z#IRjATX4C3H>ZC=Kg-sj#Q4{fU)o=O27O0*XbLa8dA6+&DUNLiKBiG9U-4;GZDPZh z(koPcVdW~NJzq45`Jr#i^i3{8!f2B50lzKN9gaqBlDz8Y!e&+xLg?98xeJ~U8G(5> z8s6zCAge#cj6pICXez9Hp#cL?EY+DKd)Y+z=Jawj0GrsZO3+C7UI7$#@X>^M0)#Ak z8F7yp1OjbXeSPid+aE{3zxA=o;~ztCz=;ocsiM6G44@05wCdlkF>z8g^D9YOf;!#e zWHZ-(%^lF$W1z z*lE3kEqj>W-^g7=2vf9~34R-3KOF*RXeNqcWy&%8Z{(V#N)nPCaapH}O4SVu@{^KG z9MqhQt~?mFqc6!oaB@8D)ZZ`Sd!`g|V4!lDrMn@C+#swz2&eTOwdc)}V>km=X+!a& zeI;w`AtZM&ch;1kBQQj@?VvRNUbk*vLqb%Y&y1>wAhNpwi2WTteh% z`5+dRxnj7H4a38a0K*TscnSRFdDL`#Bie?ig=UKl!q(1t*pKlCiw$h z@=L6lo1gl^gWkpyV>vHR7wBCdE64b@cxJ3g4zVZ;$_NmgZY$5@HC?JC@`m}S#XWkvR%IZNr>4=*%A7ee5 zb)M;Ux^n?N^3W$d+kMVc%$|L^W&{s*+lOqq zaS>N%MfmnQ^bFo5q;}1oy1;8VsqrSM`0Cv0TYH}^y5FT#bj`^G?0vCeeV^8TbzUK3 zugl5cJw=ah{(SWw`Kw#(`^<&tyDpizGf zvHz?6uJ;?qv^c+ETbBPH_PZX6IMy|)53ue3T+)t{Tn>i^#U*4M&PtLEKYcduUQek$ zoRf?DUnTAKyLB|z4gV%-S8PD>BqUOV`uJ9|VHYRr_+GpatENxk0 zsV=IosPF!it6e7f-UV4v0i*~sf)1|!GhDR4Di1LE+R!GJriQp+8`4q+`0B#k3Rta$ z3aCEWD1NzTd*HXuicv(n{CPGk$~1qo*zf=be|S(eY4vLdAEB}B?pMJ*vD+TuzkYxq zPF4{2Tp~#WhB7`&Z*>kPv#ZEEt?Zbbh|_dmtKgwV~7AY#I-m?v?Js!KI9TLW=H~~sE0Xl#OR?h_lq&I9hkcd z7#J|rP9juVFVxyGR011%yEv4!BlOlns4;bzxkMO1FHFcO%nTj2R2*it8}{GpL#sGg z9@mHNz|h7Y(}$Lf2vEU>`(pp55AF0QEMxee`p`Sd5NPUuuMf?p3>kp3yc&*rvm5mv z>qDzV59CDkS;P$b#tf&${HTmMt`FUy5<`QI`geWk%19cvW4Ql$szWL&$}_8f)~j$% zM-Trb)CMIr|+J8;`MtW3z11Hh-cR2fR zKFpgFN$5qO%q=`-3j2b_`C_Z&dm9|;mh=jL_50@|?~g>o{=nJd%ZC>dJ8coVLtHOrz>k$PmNF zFAwC#VKtyKDEZRDv&r;8njSLqI5C?;jDCd7M~)Mz=>JJq_olnPp3;2NcYJdIxF@oT ziQ1e(8_)z53L#${dWP|^_$?qI&gvGu&XS;0xp40X$(Kw8n4G2~C5uYz9y<``b@jb~ zJ~t5xa|QgolWen7aElr|hc)L3hcCQ{5a3mB=~K^V6Rw``G*dhGs9E34#Ko!C!i+b` zz~%bgR*od_+>pNMsMm-=D%mTtDDjV&aB#w7lYcQ{!mGF zPwzxs=g33| zyh9In#(WoE?~DiTOfBzBJU;R>`FHq-FTH0LujBHmDammq4wmX;;5MJwjsz-a2Dnt! z=N!ynfoi?-Ym22b%>HV+_+DCw_R@NfM%j*m?A(hdZU+$LXhVla&A}V>n%q>8R+bj< znilj>tp0H8*IgGws5${&!|Vo)qredcGDfgYqbVXpUG7`l)@-}bipFYwzUG?p1ChGJ zLkwS!1|HY6(^MK4yt(>#TuV?5+d!)`ng19VJ^9hMA`-r2FZ4YzhyMK|aIg&_h!14@ z0Y3zWL=i&!Lxh;7*pJ=%Dr$MkvvIM%-TJOn!z0j_f&ENc=~Pn~3x{TCNAHw2$_o#P z-d)Xw!6fbQpW5AXV1|-j5I?pOq4u!R%uTBG)r+06dxvF-b{j%*x5?ABUFO|T@abK> zlrC=84xF$$Cta=IYHy72#qgZp#Yy%sDMLTYUkeE{wSm=EZ~^VK$0>rQ(*~gw6{*=H@mSW>o-7@AA|?oZh@N-^oC9e;<9r zQycZ=OnsSSmFsgJ?L1ccAROpH+50O+7ZNIOuJk{(0rO&3D{OshsI7lg#JN~&iC{r~ zd2#0+wj;2T6GPknMZAiyO$8PfKE72^l<{!)u}YYb2^jo6p3(F6-1Y58bOx-)#0g$d zpQ9F~k~kJ88&P}gf|eZwo2CdRzRPETYq|A-9Ewb#_@3-?>`A_xp%r(-ivxGG3^t2T z>qQIdk3UaJ>&XX;XuHT&rFzUarQ@uqcHZRH|xa^F?U0w7W{{s~H2~vTyG4 zudl`oVy3ZycRC)PX)d72PSfbHUv83jp`+p8^T{bjTaE1>th+A0tb=%QSILL3qjmMlow;4NM-GO-AAUh4-z41}z2-~!wWxkOQrI6Dr@XtgoaJoL zEA)(4?J-q6O?%>NueIq%iSr%ZY-}2q0X=84)Em7Jlga}StMP6QhE;yDSO{#U z%`xs}g%fNzTf`3|33!3k&_+`N;4>fbd0Zkt?7@*-CtFNl+kwqk zsD1QW3x@{N`jvCLY3J=d&!rFdBoLSKe;* zMfM5h@+xZcSNX)-x6<;y%5a&!h^+V6O#C9gxStv0Uo&)Mu!~|9**hT>r*3?Sh{OQMjg4n+ps&AdS_P zOQV&&LZ!TF*viNKhNA6>fVU7~g-|BV;8WK;3V1L;obdPULCq3wl1 z+cw+kyBiCz92z3q3qN~MU8;C0(}T}kzv=^Y{W-seDZLylzWt1}!Q+AD3cBQXU;p<& zc7D1#XL9m|ip82D6266}+Hmm(W~>9}t~Ed26JXvleWuI&cNwp|Roc{$thKs?M!0%k zMbDBgdW#2j=pyeV7BdH;;1-cjlXwzdNZRQHM4kOn0$yA1{p{ zl(gNbY`CKaxEb?eA$DMWBSf_qeIi#K* z&0JWV_KuP&?rs(jfHR3c*`pP)z$F>)e1XZUnb{aW)u~WIq>Ywv7Bkf|+|S*ii4;5K z5GJ#`G-v{qWVtqPyyFzwBMxAlopqxL63ukp#us<+q-pwC#WB=MjwYWKnO(eb@{^f> z_7~pc^0&Fva+I?m{r9s#;T>Tpx}&KQ++<_G7hwFoq0&4ZG(+{?(>!Dgv(?VZZT@?5 z0s=zv*peEy5^GqznC`wwn0Dx8DAJ`8_&!dkD~*e|hsebM4Fr+nqx*v!61`!hCCItY z_FZR#cKz1__L6J|c-8ztydF|7H3Bp~c|Ww+4Cg9xtf=#7-j*+)vZ8Mfje$D8FBjX7G^dWO_{*yqm#abwrM~(b6+$vTA zk}-41%=}+M@V{>|97QwJdI1m4ZH#~j&2tQY?N66AFVOq69(MKl9im87(ypvPt~4#W z#L|ye4=m;XIVN)tESM{*XiUS~^uGABu#CZ2wJt~AYf*WB&}<`SAyMLDwA{n{bA0tU z9mclziHSMMs826~nj=c48R0RP*8IFC9nwY~G%T28mrx zS8^QC@UsnJhG9O#qLgtFmDv9_TNWa?f-?L@&geneZG8Q?)t(LW2RRJ;(tKOc^I8{I z=~LJV3bWB90V^k9%0Sam??ULcs4y~I;RnnEYd*#$vPlS2*<5BY?nl7U_p^86?fXvH}r>x68-mMqv$-N;pwxel|Wt zR9c&%8POEk3UfR;+gwQQIyag_`xMBni8fU;2{&wbPIjaSNs=WX%xc`5z7InO`vVtBqp;| zp!+iuM}0thoRCeJJZbM0eU;60aH%MGU{7|aZjhei3(1k8Vs)hc)|LL;A53*sjTh8J zu(GOV3beKBKbI{hQ-A5K-ZmvJpZ5-1%Pe2s+PR$mOlHFvgT5>9eMYNN@c@2`2VUmori7Uf0T zx-AyW7)F3nQoGqq8ZdT&ymRwELGo#9-5P@IUL7oe+ljVOpi7=DX4;hvdUhPAW<%iW z57*<60O_CTdJK60<>gzKtsF~nnYs?Dr8 zSN`AA!@r#X-;E<&f?!>T+DP#e@tlj|6fz+7?fEQa!B8wn)~h?K9WXXfWZGKGa(+kH z+GUccf*TetNP`B}tQj&q^Oj|ysm|-2ua_XZk=`?EFVx@O2~pd$?~Tf{oB&Ms@AQ1< z%cr%L+vmPskj4YZwq&n#oqz`u0eLU46LfMag%`Va861EHZypq$I>+%1%v4A7lISUI zF$t;nSvz6+XjrNawSAI|ln85|5&piCEO5_Q5h)etXH=$qGv`?*`CYnTn(Phh4&u!n zPT$4#2k4#khgDNP(Y2#A0z?sA;K6!E0AK=b;FP zGvB|5(mj*FiSV$OgY?N3eN*raxk|GjCUz}LY@KZySY4mz(IuGV`E4Vk93n^mgS}1P z87*VTX1A;6b_Wjj5t^10qqE-u5tTi(Z-E(y&fr{F3i~PIQ*-@syy{o)^;^L1KF^fc z1DHD?(LCi>RCSAx9c+p%y)75=UUwEDl>F?js9o@$>AbkcX3HJpe_orJi)qUS2aIKP z2$P=U-TQ`%;o1dTrZBbX@rc~$`w9P)%xh}t^66Uch*e3rpCX-}AInqde2o>?;9#9bJcq zK367fmq?dC>8(UfY(_tcc-&@y6Ue{+WenLONyvbI`8wul2s_~MN#oo+r!?skaV|b5 zgoeFk0wXhL(g#!T(#=KsADZ-sYt&MUAjj~~UJg+iub$911-=jg*-RyRPZ_3Ju=2U{ z<(ICR<rBMYs+{qFXH{w~P!rK`uUvvM8()ciIVf#Cht5El>)Cfe~jo)h&VG%A> zXdBHFY9sYUPYfdWVwXA^W*0j$AjAAruNNc>PcT$>zt77qDkP(Q6FL#e%cq8p?!z&g z4l6#(&22T|v5Kyd56`2i&Fz5o4L5^*ZL6^9@VL*BW2y9LH8Di6;QxFe8Ug_`#0|$% z3jGd9Ro(Z{$%zvsZywTmgY(+M7lGHKsaqzQLkK*fmKMnTi%Ck2O`qSvLlgx2O7#f<&zJVhhjB z-rw+Y+U4uwc!pLO^y*8EuHwYW+G4h!v#u!gDbfC&y)+#6FZ8 zl)9CjhhkzZ+CXS4Y8PE3916!Rf`GJG(H(zly z&A}AFDYlPdSlK)sPOvtA!Z+Mu$NAn%S>>KMgTS>yOPE#gy#d(tsL6arP_5Y)+b+fd zzy-H--UVkgP`npnYm-*|^lfffkGLyvmI9gKl<`^VQaonoQbVulo4c@H&ePalnwiW8 zULNWxP9FsRy7uoS#J`3HIEp;Y*!AZm&}~p3=q`L=6Ec2dDLixUsTLN>r^dcv*uso~ z(`(srD>$fM&?L3;q^(cjLXa+$kziKaE11IA7U+5$J_e-@JSo4cb@^QYjA8#h#%@v{ z{pmj2t8K$;&B$wH{SQ&sh3bOn_J}4glP6gei8tp(6=$sD_{^AoV6cPBhFXspeg*ih z3L-;@y<8xGM_yyCEx9Π#pwEoMSmxOwxEeV}N8I z>(ZQG#@8g;(J{$2P~pK{v(VGuiE`a%N;>7HHy(694E$iICASzIvMrS=L%7l$=v+ye z4CEw^C(m29Uf@^kt?`c-rGF=+w-?m>i<|}V(s%o?@1=$<%7(8EIA)u+Lxj`0cC{A- zjH~(aU^%9BXoSWgq_~|~YEHyf3b}^69ZptJz?qYGCB{totK1m9jHo?_(6o=5nMG|) zY3;SF8{EGr2RxUwf*lOMH-x=x?KgxSI;8d3PLI8xO(4xFiYFNUFt@O6ez@n5qI7X{ zF3l+X(tH{@iT7^0qfGg!49DQTQ5< z^sa@>K>y}Me-6QSG#AYHhRTq1Q=AMS>vr1E zYx6Wb(~6zNuCqzpD$uy|;w6OKx%xD+U(H%iM}3O{U&JXDi<~!~{mi-5 z&$)HoMyK52cF-kcEHqUyHzA(clOt}|GzXf2w@5I4~NwD`g|+K!+@L&QRyggivPg=crz#LSP{ zK$(t>bo*V8CUH|d+l7=QLKBL_Bh9Sa&_xrcj4N|GG`CH$&ycHf>t7?J&zGCS+@AA@ zB6l`Tgt5vp<=lwl7w$Esp@;qhA??z2vq6OH0Voxln{X3FD(`$|iCkVQmXp_NR8l@4 z5zDU;!v7Q}()omRn5^W-o(uB`U3oo#T@%$tjlHQ+@mjTNd&(lT|Na~@B9>b)QLSsJ z`w|$6Ou9@=?XmtOX!4$YR7~!^S)nGE!v1NGRISqv8cm6Lr8j>>Kjy4r=h$5^jwWNYYC=CT;Y9oCU7}029(rhAb7iB15Iv|35+t1s^ma$HP zQL(gb54$6uDE~-QJ`v0fv>p$BM7uNF4kpeA5Jyofs`Ai(4y0F8a4_~i$mGz`iyBEe zYfZ|Fnv+UL@OuOcgJvdA*`#(jm#oYyr!^7X7zR33F*AW9h+!ueXG9!c&p4i@c zIUJKC63O!Owr~jr4Gv%=5q9;>;^(aiBu%nl?~op7p#A^bJDZxS+&D4 z2%v&mk0RN{Nx7bRe}6rMq2oBE-eHuBM1%M=`FZ50wJ}_523<~T!n%>Hun<$cp+jB7 zC~)imFDH;IDxyAj>xO$yea51?0I!~yB`p0Uowv!OQ&_AZ?QP5HAyO>t%nqxnIW4a9|pGtUEiw6&LU;IA1^4`bY1{NH!EWa#G z7@r7+t+L4kzFnJs{Ld|VM{Pu5NI;ZjKl-z5HwxBB$8kd4*T_~Y97GoO0+!h|qt+C> z6YqYp(?P{L@UKl#PO7&c?>! z_rkSF4c~3TrB=sPArma!>elb}x6-g2Hm+~{(geH4V=H-0Z9>t}Vx+9(HWit8g}Wyk zfu`A*LrNNTv8feSN@Z3W<Ic5CF=y6ZeAvd$Rp(iRv%bW-1c$ z8hkr>Y8xx!PIW|)SGImS#$|ub28I=>M=3pTPd|TLcJ~GKHEI?rX37^SW-P>wg(G%| zR^ZM_EagKkigzugwq&z)FeH3e?^=l7r{JA+-mBc&fOz5P-8E*vH6djUc**2XhHrLX z0w!K8&zScYH3aPJuQ@DkNOVM>^|AHa% z1pTZqyi=F0EHFOf*dx)K!+OB05k<)Qny<&872}VSYI%V;{#$dYbts?apnN*!=+g@n zNE%H#Y)$kgvhIih42I@B1-|ChI+G$d9+<9Sn%5!1px{Z&^Qz~(->fGyx#l$ z?JNQnK`GSb?^=J!7{!)k?rjtaK2v`+>FI}i#>s7FIpX}Zhf>TJPEP(@cz^%;OXBVb zr9R6ZNj>*GUBmg&%#RP86HN~#cbrE*anijb{%d|yqO1@~S>eXfNmP-`f`tIdd+$Bz zteViwjxD?2-dZiuZoE3dOYcL4JNfIfyqHs0=vo0GC}dSu%h&iZNVrD!t(8O+rb*v-^@*L} z_qJ1F8+Jgjc{)d(QIwghmQA3ag`cFH)mP;T!qbashNIOL5*u^*&)y2b@~SxIt93Jd z-KMG%g@j5hRe#S{3!QhQxC%8Ly9Y;kUg95maz}JdJ%)lMa`^UIA7P1tnGDg8XR_w8 z{1hxPy7&l7R2u#YW)~2#qPo$!-ugt)cr%HQw@*l23tnUfA&%1gV`>O91sm{Vsiy};# z8VcjJK6hW-p4Y&|Vo`oCg(C&-WtQ56j|Ji(Y0m2UGnorN7*_-rm(Y7}mJWSh)}hef zdUwE&$cWia5KXP^4fM)uc_S1kHnoSFW(_hGMr$XTz(K>38yBwhS{LUDJpqX0e5p}p z6A6h_>{gx_P85DZH|H7W2nb1wiz#2$6AivV&pR6jXId1Er{R4HP5|(BI>hU;AAc74 zR~7azZ>&%o6#v0Wx$K5JSsj*p60H%5C@;YHMtf;~qgQCGua`#@pKcszhIm1NVg-tZ z1IDTHXvL8>94xG=l^fPqGw5eVR7)he5kX{2;_pkizWM-Gjrjxi{7Lb^gKe%Q3CQi& zA+j{?I+u#YDP(i>=^94@dn4c7rAJ-L1H}#ZJz4Vju;+X0%Rnd^4ZS;y6w+v=OrXoc zMpY_;0?JgRO;xnfZHQlZvTa=dnc=Nncg-UGP3c`tjxMDtdsxS_>)_09`RAIEvIakC z)d{aOUw>q5!hD^{c%UUh8rzjn_C;uX@~iw+tTqGVruF-|{Z|LJy;f=;AUi5WkqPA3 ztcdQ|k>74x$Uwzedv4JX-TT)xH3tHtu1W9(6O`7*ICeXfg&s%ggjuW83#Lq>m}RgN zFj88x5V%CjVk(e(-~I}CqIN19E~JL%_Yr|KVnn9TZcl~rBuw4DDgVt02OHaki-5n+ zmdyA+ZGpEy>}}3X;()iO1i=*!h0kJ1RQX6y1o6CJBAt7FJB!>3{mE3rAk6m^2X?Qx z$5)E>lI)U?J9Hdv;_2gX3bfIeX5&vO4MUOFsXev--ZP2HWobn~8P(YPAy~K+Za55Z zuf4sVhdu(BwCCEvA|=h6qNafBsOKa59>FP2`shv%rci9tWCrR3eegl*mH6ZkW!AY( zN5{4S6XMGJJvsPIeRP+iK&sbSj~ zyKr6KyD@;I&Jt*E{^3`^&hFDK{nFcs=-L>e)uGjK-FfmP6;Z;x%4dn0>nVQAg}(-V zO9P;(PaIQ6NIP{_Gu#N)e26xjSDroZ79AY^g@TaqBoHfl{ps1u!-1LNX*bnysyxOI z=uan&!|L@UA;R*zUdf>(uY6iI?aG{7*u16uG{kIICpUlu9~OqFo4)UPG9QL-iB`7^ ziPJcoOtDuF1@zIfDb=mnseU(NI_zuksmD>Y2jy|q8hc^1aY;@>xqG9samUhSsJrw9w>dD&D7K0P@x^Ae#}W=h#lAs+h!hEKnracPoGXIl@I%D}4#)10YCF;l-Z31ZLw-R#*fyM~ zzXfBgv*A<}c#1l6n|0K&G`j;XNTf!h9|7Ym0iT$N3yH#7s4Tm~{5-FRx^Dq&zce`M zvu}ssOCYBHt~pk)U?CWeLf^Tb2pTj#UV~PR0=IL~P=|85CDB|} zTWg_=zQ?{Q%dpU`p=yDsk5d8I)OIH^tZFS#b41nBjLp^ghCU&%`U}aqB$$RU-@Tn2 zPe!i0Ym;|Ut)`Pk6JoX$pK450uD9`-f!11hjtCXR%!d;v?+_&;d$IynOr~9YPM^Z( z42Rvt=f)hr-+yhfG^LdI`{R@bU*Mnnom-FEd(5`KGlvCBnUmtBKD^4+4UcW%2*LjX8viZ5#apOA_hQ^;rSN+0%raxe6iTHEj!<$>B#Nl5xX6BP)0Xx6y zM5Wz^+N-Y~oJ{&T_<9T4g)F6LE(AOO}WcZx<~+LcB) z*f)w_I7Hd#8mrCI#>9AJ;h(KDiZ??)x-I`rH)0fS5`mWlOzV!RG(OXP+&I+~3FrS7 zr*)aG%*soyiIxGZ#Nv+OtWMfUc7811M2k}zgNF)c4J#O_eVZII3ZNixJu;?Ls=dsz zOS?UWpzp@qHmd;kAD0o2PQ;~5R5tM=F7-vu!iZfvalH1RbC#Zd!Hz~~BH-rDW4+c< z{FXjWn;k(Rf39$DL*U-wqmB3T6I>FSnLu@~u3TLVMD)czcfKwHDodiMG_y{%hYO2( zZy+2f06CJhr7{4X{t_ZD+_n+#q1R2pyl8IhC))M~78(Q{6uIWeWYUAr(Vgs{&bP?>~HZ^4(2i|-et(s*!=*l3VW16ALN%h`TV0hDmW;@6pAZFf3>QX!m&-Z zz?8=xXZkMBqEaQv+Lce_xdPHrDO@VEKkI99`GR@zr?;*cN|70VEc}>xmriaQ@W#jS zt;py2$Zif9M8Ku#hZx9d!DulTkO{BsbwGoVpwU+(4|z5CQBp8*p4) zb7Ktouts2c2zWRR1)Jq4T3e1Cnq-AzJL$mTU=otYVVN`3(2c>*qxqU`xznK6@7fkU z#E|+xk`lQ~&ggE=v*A$)(j|rQ63wR^AeLMS;DnJaFGBo3^FGf>pbgV~dK3*n%H)DX zyHk!v_aL#6`r$ueZD%)I&f>;)5O33aWp4P*P%neyZwMIr$F zj7In)IOWZU*225TA(+34uw(bzrVc~1Q7HXKn}W=Gc~jzSM?A`(pZW=~EFT%kKzyE# zn4Nfnj_C5URE0QbQbI3;S_=YXQz2Nq0Wg{{#0vu=K%WqysRnV-)wO_~)dUe~&=!9f zeto<)nD$ZFUc0>w^py#3RhMIOPA$riejUj9G51QWbmK>j%VE&B0c-Q`x+vyby(rV( z#b+mDp+6hdRt4Cd9I!Smx@3iY3GR2z0ee?(vP$bfci^~CRC zcQUcQYtuJxeEaAROj0C_CqISvZdLD`%!ozr0`^ygH~C=G1(b{4f53#B@40j^<9#Uf zXwKjbnCx!F`_2nJK^{BsAS%|c7x-}m%7;cs2miWs;bck4BxTNUbLR)l>(Im)(SRTP z?)>!jfRWXW+LTD3IN~}^LW!{)e9yWC+AChm*zy%kzDhr46YGml)wH`L-s_t}XFCZC z$!vvOc*e3hY@^5-DumeCwZ9Vncv{MDDyhJ#g0^} zjWx_7CXR-KZ-mXkWz(iET_{XW>4#TcmpXNY$bYJrMPiY<6E?QF*K^U^5^>U$`Ti*! z>lm9?4%hcddwYw!zS`QCly~pkxz#)68OenTy95E7%{JxTvAN!i74eA9J&ov-158*F zam*uqFEPa7zDV%#`2W}WkUAFs8w2-9!dIP+*bzGJ%i4G_W!RfZLd0^0!Ad6@%6Na1 z;lV|aR0b&s6nV|QPFBN<%CFCX8=1l_zW>`>8vudQ@=CQlH4&+~Z49h?+#Kh0Et1T0 zL)fY9-j&cb(wL&8*URfql-I(9^?tt`c~0pD1+d*Bow5xzZ@AKA=U{?n1g=e8y--%E z!ub<5!i0oD^v;`2==!v2eXu7-3eA1D;y6L%`ucB)p)(F>WF5=qf)qWQpzG4Z6R!x& zQ>ZFD=@thqrFb~leRP+&`t=to3JrRm5>PR6N1J{HlgZ_YpQ|CeJM+jc)=TW>yzgds z8`&U1JGD5no28-VvpU3Z_pHQEQ zbDne|G|AgNEX3#Mdxd48JUv{UI${&>ve%9$glW-^afpZ-dhJ`Us2_D|vNW3XUbtI0 z13>HpH+gUG`5NcKZ5y1vb1H&DCy=*lPq7;){xAn#w5BALCs5UCBvKy%0Q1w`h!EAH zotqIxu-&`^Wpi8d=z^^|h{uRJbXnkRwZGf-vLx-v&A~Meks}<)o8rkjFrvLpV8|9j zJlbu&O(fG$>9i3luk3h1mrEJ)JwtW+_-^q0T6_tIF|RDxXAmf_SSr2|_B<`r$GdpEJAPavfuFc{WvCBYBdU!Z1=_!z zFw6lHALVm|W?Z3wCM{3}3N{*in3ZryVI`t@MK-YlX)-`IsJhIubCsEW-8*?ND(aAx z$w6Uz!pO{!>s&*ixj)sO`EP6W5M5H^*Kqb?r%+`98b4D} zyi-mN>VWk99LO<88ogr^CHc(Vt5vp!m*#hwUui=}BU_WR2!RnH)R}+95$oz-=SBN$ z=Df?sD=+6?I;oLhG+E&w#H353wjnK2j_`JuZ5(1HYKG+=o3EFR5t!exysr!G^b=zwy=s`Qj8I=Hz}wBWK` zFY{?8hr%a-8}8GWHScRrBow;0LBRH-BIC*|cXxW6`_4#CJR80AuRxF@Wj`hVzMm?+ zLX@Fx%213&FPj_&BzjpL-vf>kT_TNTbhzijE=W>_RzqCu+Y{-QyL$WVfjEbqt9eU$ zPU7)=W}C}15}xyyGS!y1e)}n8Ukf9BW8qKm%+|=#B=bIdafCC#eN8<-yO#7kx7iR_ znn$)bzR|uAPIslZ8xcs;pJyp;W>Y7vjXkX9Iww3J`hJCMmWI4tTf&h6y~>Zgmo4FK7nQsH#v#z#7lB zTvAv0I0>5oi1gu71MPb>lO8&xN#BjOorBqT78JVM5~xlr zH4(tLM;C$lPSjc~s6|3<;tqN(6ByXEx6u$_zR$YX=K6ZE|1GoG6MeZA>mP;mOnF3C zUN*G7FRG?7V@mVKD7J;z-XUt$!#q{9>*(-lG;lbV+hIZFf_M0*?0{=KZ*YB^=xO!) zZ&m6v8n@igODDh7)gDB3V*O9K){}RlJ}vd9qX4nGY@(?uh1O`gn*(5&oxEO-j`Uf{ z+@Lc-Ubd)<4}G&u1@B5&VL1OiwVj|kp4#d$8%(Fg(pD#GU9vg?BP&X6n!Q3vi;%UD zj_QD&(@ded_zsweBzkoCyR3uGEnT}!h)QV0*DHtnXS)_0FCwH}U4oh`42v64@=hea z4MU-8!K$~}%V!chVo;w5jO7)wFSN9-GydQkHvR>_F3EXcz{!jP#GleR|4L6c6fQWS zeR|2v?X^{iCRrxhhi$8pONW8_#VEydCdu6INPS$YDFJNbE>=*3VwCl%BcAbN4!(feEdl5$xBLUd@*RIZbCeY6;vO-g09 z6QZ+i)uxp8@&_z%*Q1H@KX-vB7~cGTg)0;mtj6*!a?NnA{tC#Nl`;mQ^F8g>|6%Vu zqnh5`lAt9kiFQG_R10n`Q2)&3H1OdSk0g4aW|P^2jk5a}Xf zLqr8ZqzFwQa%stPqY588_6cC%fW>^y8I@-mQJf z&t0;$h>K2jfBtv(z|O5W2vYL5tc9hVupsW=ch>)$wXl;gDj`V~HT_c*+g}n!uSyBr zbw)*3^!^cp=&mLI4>5?kQF@KQ{jxtru~8mHKPF1bKC9kg*9!fw{WW3KRw49(@9Itz zTSL`{R^GntzeN}Q#;#3v|G$Yrd=RtiWkQ?IO(!;BN2hAPTuIp9fkZM?peTJ@NT0kk zBZTunk3?7(dFDg-s>15AeJxNTIT8#iId`;eNsb;NYvUTL+Cd+oQ~QXkPor@D?Gy1c z{*jYZnLeb|Ui2Q5aT3?I-OwJap$S<@%V_pJ0sO}GWrA6rb7AtS0OL=`)zu0dqqsDd z6Oxv$#Gb^^1Uonw|4wu9aow7`rLb#UcJ9&p5J+M#b{Lvu??|j;gc2<(G3t;fMBt zCralkhYfXc%2g?Sbi&Ewp>qaxD`gkGhA!?_(3jDI&ey!K+2SsL<>HYAzP!79pxJTuclPFbs$pNADPdbt5&6c&FU-f@_u=X$6wSN{}^l#aUh&}&~U7HAd zP9~nYy62iDCE?QA8`tN%u8sc2uGx7g@%?z~b6xHFJHHz;@uV+P?0?3t8SmYgO1Aaf z_>_8~a$`F4`r^iCM#SFDnS#5Xo3kawm78<_wdf+_eZRiFu)Vw!UDVe3L}!HW{;jpq zbba!{cbzX<^gk*kbFKHKoK4z;z0}sC$SI~z-u$x9eO_lja%x-evq;a6rR&@h$f+CX z4VvIe@g6rds31z00>Jc;(LeVEqQ6*M>=jH1^MVA6G*Q5?8tpJ2CmQSu7^T{i&%<8; z=3RbdG=7~F#USGsD3W)_j3dHX`gnR{Bqv*`rh|rO(lp0zLxhfZT>Vxh8Ba06$*UUB zD!y_;n$Gd~hlT|yFH7)HBE_~<3{$2$gB;XS;d}E8gEKFqcurD;zdGrFwnM1kb|>Av zsZq3Ku+*hY>8i5z1a5Sv=%=msSRtv7NVOWV1rr(YlV@F`G^S)6pG-jg^^RbZ$r}RD ziQRYfw2xLs+$_9lDr=c!X4SlMhZ$R_e8IHeesm?JaiDPT_1^w7n=5x&=psUd>42l~ zYHH7piQ;bWfQ#m8+E8qfTCwR9cl*`!k%1!hhTbQh0jn9~=wf2G>7aM|YUYf2vF7XE z!E4Q{Sxd3STAxjy28^y|uMZS!|LA2u4cuJK0Wc*bgxL^H_!|Rxv_ww;RV{v8hK}B4 zA;j^Ke5?o(uW<^Akw%GmRvbfsXB~AH+n;@kU6Cl@iIiSzNRm#jQ+M(~LJ>7)QQwve zJ5S136tA``Z%-D;2G3X=eBQSX@+~nrkUj?jVfB#g30*XkRe-1EHN|MvZ6eLXmF&&3SYx94#*v@MZ_P3a$- z*8I^l@C)HneB|An-VdZL9wHNeCE*R%5Bb_BoqRrcngB5IaOP-L!0Z0^LCha5OL1Rs zJn;xW?epV--ZFwvL z9301*;JUH?2+vg=X>9(HC&-U=5a^IxR5^!w+9`1@HJD3PnZo;ay^A<_B@%WMV#FMI zthyW_;XD>1Jioq^wV)aqF*F%iC)lIBY^)G@(Ruey)&fx+jI~&HlCSENm_^dKqP3vP zJ6Q`d(;(c^v-A7%JJ}LGOPB9FGykl)ipN=8Tg`Yd0TFgcLr~SPFcQ$N;zRz(53aS@ z#vieIos1r{>2}`tOK>H$eP?tdG`D*x?x5i2aAK>0&StgiZF3Ooypbvv6`XKu$w}hA zV26&u0OZz}N2t@7x{HIR_!HnarkY+zEF*Fx@3|X(fk*@+I}fb1N9u`; zB10pV%{gyB{U;GfcJ5`G=d3|^#X5FdKfTR(>`xhpdV4CCI2hIQyfrO9HK*YoG6SuDUN}Q42 zoe&&;PYWsl1n)$ObszG?Mc%O6hNlJv4`z^{A3ua}lg1;6CSNC?Z{7Ot<)_`s)^@p9 zocX@K?J(PDEY5wmvA%@0SMb*xT%jNLe|-P4q+w+X6v5lb;Q#g&q%)EW5g^dE*Ilq9 z#kT@~V5UFaawz5ulM9^e#%MBcl*#(OW+8*=t`8{Je^IcpB+e}+_ADOk-i?hQb8anh z?XM5|CFfpj4BaA7W2u)b>p6~7sl=kI-Cb1vD_C0~*mN4THH59I=VZg6u}dNAWXu`` zzK_Kz@*4Y;|C#|6`mxS*&@}t{^3$|lox^H0N5m0tf z1h@+VD~d?6bGp+*x$AmfQy?(Xl{>%-66)#*n?48cI#=ily=oL)n&MHK>OJUkG5mED zuAM8!?|iP^1)DW!eOFYC9rptux=}Fpg8JnxAf_dfI~IuTXpAlEa$j2t>GtDRPKj;k ziG3A$DH`viBN+2+9rwJ%>$PA!9`1UV5qEGTZdbcw@6Z*Be0&Ge)roQG@b#S=>G&Uj z#}_--#YmU;>hK1Z&s)0$;dV!hNUzHT_%Zr#CJOW$I@gYgLMV|ZjqbDrzstBJHsk(H ze&=DLfBxD<1A!zdjienTo~(b8ylIlydaPA# zob8PqK=|(_ice8F$Yut5l)<&hzzOH_Xy)?S=dP79_|tQRn{!1+bHz7v@xpoFxjQoU zd9nd{^67bs&3XUSM4@>2{F{735L!>OfQ82_kDn%u@-8m{u%QBrXh4_+GfpqisxE+0 z0iGqmJ^%m%fIO4S#H>(-8;ijMf#cXk9Cm)3)6@$9@bGiOr>>OYC>-1!pK!jLxa)m5 z|54nH%_L_O5K3@3X9@x1K~}=2*mSNJJFlR!l6QE_Ja6$b0b8>R`;Jh`sp2q;FJlYJISRXr0)eQoj>3zo{qUOm@LM8Hvvjxz6H{Y&-$tbA z9ln-6j~yo0N;nkXJOY87s}siG-^ODlIg1M%YF*yCI#N+JcWMv}RH6f7LlG19Hp-?9 zq{;xR0$>hGEycdlwy){j8yG+^j7zDy3e=t3D0LWWpeW+Ljbn%2f_5@f5MVlx+28WK zylR!zAaMlS{+(%xcbH`r=#D>pedzu(r4$MuP;H-JV^$9)SC5y$;zwIP#sGF+EQB&# z6%R-g%4ZoM_*;-Y3CTKA>FCu~o?e4wHC@2h5SJV&d{lmXZhP^gGeV=#Y3 z1G--2IRWit{Jk=-!hvzDfGG4v*~OVYjw+81`LT9oE!+iw2`UG<#MJwhcm8T_)H)R& z?2e?o!C>)#8nrrbs3DVzD#Q1busG7FsGPpm0`?K4;}A$)sZ1cVOFpAY^h>+^L-d2_ z2fNXLoOz||&0yeoZL9Y7=1OL?tk6N zuQBfz%-~XXFo>})id8C(-`O%pkQZ1F+IM4b+$^%{?+n-~#ST2FRRWTal(6yDa)%sV zik`~(;(38!BshP+>_%%Xwfco3{8WFN@`KtOO~f9N@|1vf1RXw$$Ide0em9ym0D!~G zsy|@ot1SPn+;Nj!+K-1g$2`cfXCm1I#GO%$8L8gyVMFH4E@0_N{7{wSkS=>1S&J@? zQEU)h;<#^!mfiyx{VdaX*aqRoN{`o03S&z%tAwclh}qG8AC1KUFalVW@CsnSLepE+ zm`^ZE_t;z2?RU8F6ef1)<<1zYdrR}-qm_#2uG$mK2s4N`Cm=>{{Zz(;;Xypq1{mpC zRT(H@&%JnNxxiNGG8z6Mpiqp0s1<&tO9hSw0MewVwrId^sZOe*vgq(@N1$q46I>l# z5N`I;SQ)S)mAq(v6O%P6uU!8ipim8XD#~$+^PEGuFm{;A`3|TnN4e3ilP9b%WmAeD zxX+cn?k`@ym4C+kgQGAXJL@fIFW)-`>h!5@7#HuamT|7_ah&)B&msZE)prfWzm~-> zWKnsoY@DZhLUj4P_+l|${G$u;qfzjOtzTnG;veKIi}wY7tll-LX!+4h^L>mamTLlW zDue49wHC#V4XrC722R2fxeP6koeTVZYm=CXNzxhIG6i81dxM47mz_ZLc}rpAWNjXL!#|#)dBA>i&W-eo-aSY5L- z{+=*t{IX>w`E=u^@2r+G=Dfqq=p_V<^;wh~>k{2DiwE8WeCFZCT6q;pJ%r2XHp8ed z)_(#nM~XG@kQxi#KKOGEMrEo+AC|teBVh6A9OIi+BWHjgr*Q3q)!v=Oj+8;&MeGGC z;L-^9Fwjl4s4QN@wq^?uZ05Bmu&Zajnol7tPw;HhAsU217=HFUr;z|B8aXuNjq1Bb z07%+c#L}#Ce8=~<<4eE#Gs;lrqF;r9rt&gyPwU5}cT4`lU4lvcJxi*k{XRk2O--xX%2~SrNl{R-(f*nXJ7ux2R_ZHIzH_R^=>*ChdZvsh+ z2onPA;0M5yaaLN(!73hVN2$=Wna_*|_;hgb1eg`s<{ASP>IGFTK$$RM2iYGgovBa{ zY8{J&7{+_mjr4o>|a z+Wb5UHdY(V(L4v|xi;rzWs#B;kLh2lau`mYLm~*S0uw}PszN=I7Hd5G%J|T-Y>@+R zXM!S>^Vc&2Z&@*Y3IZKJCHQnO@c;`~;^@l9krKxKNlY(s$8?`39Nb*JO({2)@d(_` zS9o#^m&2Q-cbGVY6YM?k4JhQ{kq3Z*t4vIvEquy-5#L>S$x=n0Bupu6XDP1)7K)-hk7UIR@mS< zY`(O6%09(Q8Vtw7+Z=gHOq7Je1WyDc#pP|hn(+{-hMT#{j}%eNG)@$y=4r23FGDJZ ziD0E2dv6_#qwJNvOVNSs=xDsd`+x+Z%#2+`ex-fV0I%NhG;mDr&pb>FYdh*|fs(bN zO3_-K@M0ZXdukb<-rPQ?=td{uk-;)oWmMy-8v3%?OhInRuyKcFlO!5>PqmMU6Ot34 z3FcCB#VM1j&~TI^AVbFCdBcColZpm$Oed|NBG=pDY~Uvm35L2N^#CC?laIlOE-AM( zuuSSl5GGIxO68_8K!9QIkz%SjQWqEsG8&?h`It*bLcm3S&+%?3S1McvFri7hag_j3 z+&DUg0e(fjd@P6v@{Qg}PWhMm_-@!bfu9=%COu=l7TcKAa3) zU7?3!x-$ zY@+y-h#)8mOa@+1LVcwmR8gtJB(jrt)8=g@7{L(Hic0fWADamG<}}C?0)4 zo)>7IEutd)zV8~lkiRhWP60}OqQ=LZo?!_55<17(=B-5G51N{t0g9$1t!lh5%eJbk z$UAUQ;Q>Div^!+@8#V6r9hN09IAaBGQww*@Sau}=VG6hrfmHK@FG<;+Cg+ihtXBXP zE(Fj+$rJv&NZ+4_=1{{e5w|iOE^F|d0rQD3%ORlyoOu4ASD;)Q3>Q*48^VJZ8Rl9| zNhybl-mUn`T{J|ZuDaddHR#*DXKl;}q~yr^K-AKeRT{Eq??EsjQb&7N!ucV*{dw1H zAh4!|V;y#OZT|FU*yso-4<1~A$minI)&h4e&Oz|$ZAI3)eVl}|v0 zaE})B(>_f#GFqC5M3PzDDkDtk5~-7&KHJ(=y0#8 zJdrvVZoBPXi5a(u)6=0|fYUh4vx0DT0#c;`j6jHDiQ?e0X|P3e@#TA1qVPVvyXP)VND=U%S(VA>`RZt z)%&|OrxgjO6D3UfAEDduB;R*PCxr9)7%v^H_?kcxTm0TjyGeVDgX7ZRk5*j3jKemhK?9~hrlX=4U0S3ugup+G zbdIH92tN~S`}=X3Jua!=?(efgN&>fFy|j=@Ave1MiD7)5@%cGCy%=*5RA zA|&|_{h*}U)S5+sKX*n2=RUXg&W&`9fy{Jb%cXiv2vLvi?(eQWdeZaiFUcrKL5TQ+ z%10~1|5cI=fTsaR%?+!2Bc1n(__14^;I7)dL zCAoU}LDos;}bdc)Q!jc_{3hYr_?pt?dJ8LmCQtI7Z9uE$L5h zml2p|N(8>~g#b^9oPXMiD-@Y7efHpUV+pZPD3ZzS0kx% z!pMpkVgheynN3NL#AWf`5DF+0u|of(-UnqI5Ryc3$ZN~^@0ygBv5*IH_N%t}?I>Fg zj8uovj~r5FDGj7G$@fe4jx$p<4u4BKd|$u%oMyUJ4Mts<H)Oa}oZ~5zU5%NyUg+IEdO+ULNm+F3ll_g@U6MrJ~)+j4(Y7 z?n%_nNB26AMf47u(p9CUStO`LZrL?DhhFiP^Wn4zh{A-qu~;lpZ4@i7yoobHBT`#U z2g(#xj(nt@t6vF#gzz%#JRwjNHGjU}U?GIdYGBurMnortYoS&S#AM4A%*>L2JVGX_=F%$48jSSq=|%DHah$(uFnQiAnLIc&TuFh^t7w@D~9j85|1UE#4{m z>Kt-cPs0I5=T*Kwsc#4OL;E`$E4M_rgvk9Xl#)|!47E(Xqx<9h$i^5Z@t`TXen7Q9 zE#I_{7}Nu5S5WPr4R78s?v9o`pZLJi08EM0on^{l$n|5IK&nID{+aL!XeCs+9E$9G zDjlWZ3a}l3a7$40+YstR3H8;K>HVK*Jt5G^URmQMBp|r&>uqN1NT$&6E= z+!Gr3@orx_|BlP*y%TE8^5_Xy>Sdwgi8SQ2-_!M}+hs`{(~WeQGHvLR?AaNSE;sU7 zKXTM_0K@IIsPMGBPq$s|g?DAXb>lW3@vIfHlsd7F4Ph40a*k z$(xG2aXi@CZ%Epn3)D#GJRQ&TC>o3l@d1?2a|rAqaAw$-3po%&$U9HxVAft%Wi`DU zmcgB*;F}qbW}I(JUVN~iNeC+^uREuB5+Nf$BP`p&XOZdO-ILvVLgB8mX)FA(9lgf) z(N2cp8p^HQf*@xT7}v;viUAFIpA!X*4ptH1dI21)08RmQeoq8?EzxZ5IW&j-u3kn6 zwlG+RSiqgiTRGnCN3`pmfd>Xq_tJr8zfi0yt&yHkjQWfY?=Uu50hvTe?njEC+`K?d{iK-wJqakc8^U;1* z63XEv`gq*FGc_C`C{@5)WIX@24VJQExsL-^l+s?nEoc>MD6?BL$&XouH3alVwV~t8 zOUq$VOR8-PVEENgxYtRk%|{cwtSmo3dsZy=`_cGXRkrqq-Rcb>=Ee}#kk*_O`~#%i zITH?N*faK+QmFPPc$Hj+q%BJI9w5O58EK-r!OK?rn^8OB!mzJ0Wt&zO;%lnjF0Xo*>fynXcKz@1<&xdGkF?-3rl1gp<7`ex!XZpd( zT0?5qFD2$-WVJ88re$2BOco&89#hH%lY2Gh-fp_R43ZB?v*M!ksMC`wuy;zvNP-+J z+UtSFOqzM`5_g$3UHdR;y%8UekLiz!&_0Y|UmsT%k{oz_hy_VTN3V12x52puOe%+) z@6UAV$sEa=;q<5H9DJo|-q_GLF$^9vbzVqSy zYn-twoV}3<7rd&b4VOO!{+^BwAWo&cQ0TRbFkMt9Tssd5mkuGE?-&vgq$=37U8A^+Gs!avt>LP?wDs(2aa>glbssloW}$PIqw|bv%(>b41qI zOBO3}(m7QNF?hT-D>dxun&Yz?r%&`LMiJQt1pNwB8A57CC!sk>jnx-1eRf@Z34Z1U@=W^f*?lT4b zk{+emqKSQRev${S&BZV9QvGDH_ZDYAk*y2s=O0+i-QRYvmz{5kpKm?mUX?Q6zBZrH z=`rZ+-aGWTj{i&PUXT3k`4&%)!o~RMtF?_bT-~y_hcM6c>piM0E&qnw#KIZ}O2=%Y#B|Oiw zrFVau^<_@VUH%+sgOk7T^}?A2jT&$EBbVjYBIKcUZ>B6~>GBm~!d2W4d^02O{h5WK zH3#7WGKhXkm_?la{r*>;YVgfzjUZn;+vODW*R7Cc`+f6itQd`Se$ zCnK)B7{|Ip()3;LK z-scuWV?uVP&Uj;DIFG|nS1@`+S`U)GTOpQnBXXF8iKq_SBNUl?*fxj^a-1z4o3lUi z^In_Wt%NEar4J*V16|Radp-2=_fyRH~+tfuj;T$E#omE{;42c%_o&z}q z{3=P-ORIc;ozLXiPI7T10z15MXy-NsBLsX|UyTS2*Kq)d2fjfV8Erwr>KfQq~kR))_pRxLOFyfOApY)U= zAM0v0AR?2!9yv8(Cj|d@2EL^1_A;)7V8gVzm3Hi$lCxE};+&i=23>Zlm7W>nEJa zp1=wqN|qATkVD99!{@!(PBt{r=kR@uH}f8z#mZ^(xSctMej^2hdJOgC z*~2O5U!MgG)Af) zgs`2-gEuC61sKu@`SpZi%soj^`L%o1aQ?dBVyMSwQ!3DUB}$T&%!6dQ-s3sm^Gx`_ zGn|2@$fXC5^Y7rX*G!?!P_2@B5g@XI^PwBOx+u?!eX4L5ax_8etj1q@Fd za5e_BolO%j@lckJY4X$=Cz;+4zF-WxfZU@7RykjzXso%Ni1 z)RjHP1JLc^BB?-okCLHXEZ*w}dOxXLH}3Y$mRJ&JJ^!$O>uSX@AFe~1vxOOfnscRJ zg)KqPPn+ZUVSE~3@YqqpQXR-q*Y+O9H(wX5R`Tmo&-4=& z{XFnq%R28m_aln7BZEo8=b4FSmu_91>gKZ}<%S&^Z&3z}{G8EWQ*?X$(h*K*}K#er#OziR02>uA;7L*kGKaFfz_YS z;`X>w&kAnj*zep$LGZvX2@$*OdXTQZ7D@L(`H6%4kvqv#5Kzpd$)prm*ZBZ5Zp`vA z=%Z9MxAiB$2JHlu>*3ZLdVHa5OBxSR=+Wh=Irv$D6tt}y=~>{Uf|(G%u10;}^~)TX zON86JA_I-Sl+bY+794M!a^cn@$n+Eh>a8b#w?bdsKPj@Dy&?^|=k<8^fk#rv{j{*# z-=?HND7`(ubiLFYBu6AXFsB^hO#G^+FV&AbJ0E8tTFX zr#_M@C7K-f!Dq%y;IT75*z2e>{tlO>UMNOYuwBKelV?Ug41O)dimRGG>Cl}^(q5=H z&j@+qL8SIm%hZg0P*N_tSO4(*3`t#t4$s*Lfs<#Zw_aGa&z4Y3X61o|R*sS_8Sc~4 zT934UO~64Vu3$4)q$-N?INtkt24mStGDPu3F3ANz(JEo{8j)}Qh4R@ zm=+L2_NGe|5_Axm?0oXl?;IbU6P#35&KZMzRD4KB&#PB z@GJ{Oi1sL+fjH>O66)W*Gy-c-j8L2mDIr%t@GT?QL~$nE2j7qe0SoXngJKb}Lx#es zur(I1#Qp;^VpzJa%~z59Zc}L_(DOO^JqP|J5iFRM%T4I~7*q8)(L{R5P?28q-1}LB z%}s-QmisFvFjpH(%5uxqP01uLx!KX(g=Cq4sc zL&cyAS=91Z1aN#o2;4dpcyf_fmPd!=LviQ!RD)Z`6->~O^yLxJ;3~)|(>**r<(EFH zgmKC;bQ}o@G5p`twc{15=I6j*6hIp>2ZhtQ7ZU44?*@s^YtFA5!{)rQ;abxdco_GL zoQKZyi>DclEk8O?d;3XsX&!j}IE_}M@)!}R3-<9g5a4h*z@^({Q|3p)q?JffhPXh| zL|$_VDs{hC0Vmbl0OlOW45w~_WYt_@U%I#{taU(DI{{r13ER2L5R{jv9-m5tD%PJ+ zx34^B@=@;##Du|qZu4-pghRH9>-K}OY3(0XgU{nIrRU$dgPv$gUd@OlB#{idkcVq_ zUmhAW`ao}guisvaxjt#M2bpfqy2_j2C!i%hOn&-svK=kH`e0&I#I#j6|EvG{R&G5{ zuO^a9=+@WIvBKE>509uH2q|)&Xc1ZO`;}EeyMQIlR9x2rJjp>{Ma{l75__xpGEzDx zKRV0!ui*z1qxDRKWAd+GJ{u?%j!+gGu{xM{qziq=7&&v_JK&Oh{9^U(mM<=^<67Pc zFz zP2gHp+u_BlRa@^kk9$Tkq0o^#p_eo`2>A=m3kL%YS2Q{+Y*A)4Ir# zaW|+#Gx{Icnj`csM;dw~DX%(MGVI-7*&5V>hB+P*(n8{&TCcJlE@GQ=yf9pvJ(jLt z>;AjPG`ZPZXczJbqc(CdkfSi|gax`u%}9#iv1g26+m z4j*4vawYh2xb%^1bAS0d8VdRltjf>)T2Zq-Q+G-I2&_+vu*N4;zyyKj|&?_%JB;u{-EO!a?>| zi`Iw!5q3|@&pr+GU0Pe3?EkTK#(8sNeRY0cBj-fbR;h0N2P(Ad{wZBUjGDM%U0C-p z4JvkgJe1GIbtkOHrDP&p^xDis1U_8;W28*VA6}@B(L27Ak1>QU`N>$dKeXT_lkr+> zGm{AXj3rc<3$+@{l9i%O@{ z|L{U}DeRiGr5tnWixrP>T@&s|Ctwx@xSjeweeWE zKjKop@S7HVhph?!T?_89_?yRcM+<(f7o+#C?#QGH{IL@5;Mcx>DfLY*H{&){9`F)t zNL-f?Z};K`Gd^2c%c+$sr>Cmrr_oqm3;#c8!T%4jHK(pGe1A=e*t7P#7wU~`@x8U* z*_!{^3-#>(K3j94>et%X^{>BvjQ^npzr4LMQ(U#Z`K96O|B@D5Uxo?OTnfgSQ?%j7 zgH;iT} zj_$1TUt@p%0LOoi1;I(1Wt#lwjz69~cBJ1Oe}um|sjtqB{e_c6iJASzN$S4FpYE&s zi<8=&kMkcnNwFipA)mq;jK4Ukf3M}0?;h+psek7re|J)wH!>&w>iGLV!%5zBxs#Ztm3bL*n{NEkC!`dEY*L+Q$Cy zYa1p{IaQ2?j#H7ce*AK5(US2HT;M0lf2`%whrv6wd{D;mH^<-q)mq*>8cO4zv-wl( z&%gL_>H_@0IGBHD=@Tii3|6=Lot-@srY6|~?$krA;$sq15{Ym=n zr+?NvMn7cyb?MFSyB{@!DB1l@NB*n$+|JVfP4DRS=cPaN4E%?VT>0g`-)qb>wo`{gYG~b&X5A|e>h$EW~%;OiTR5`mf+-g%H)p{BlYdav7&z{F^Z@A)SQoH zk@L+Szp48(g>P0IEtbLirxH`=JlVIfQUBM|<@)?s+j*toETyXd)zd}j=@Vq5uU^bS ztN!ex4|Y0*#;fyxDKYy#gf28UQo`DAE1eyD*tGh)LALeA#PUaP1*zhl)8)t4@kcRi ziDPG)^%h1RTbPfm=S+Uv=@fE~wYC1*+UXQNJpIie3oCu~-hu~2BnKmQPiF_C)n|52 z7bE$JKTa1;ML!&E`<}q~x#Rbw|0|t>MuByt(9}erD3^1p?rz7O+`zwe3Za&P@(D@Y zvke8LV|&;6(#{&4c>niK!O^71-zELL_{h{vCmL}`IcZ4+Arksu>J+jy5fVuQdh{Ih z0-NJQw!i9r1RwYrW4`j?+T45<>oY&z-e@@~14q&`U`naGGg_?>Wf8@MQQo$Grj8&uTQ2776Q}|CPd=I%H zDoWRR#XU{>Ld5v%0cfxuzt z2B_&AIYjgu1%7a(9g@BjdOd{1Ia++826HuY7ZhTNyzu)@<_-*Faee@T*<_|^^hUv8h6mz zWFES&q@5tU9A{(>K$97v)w55}J3&DF-gH2SA{p-TNc!!PNlyxFm>yB*1femYoW#oW zY%+-7nW8H&Yza@;%Ex17wFI@3;2K`}V%)vG!X3-WCC-_mC&&>$jt&xL&Yd+-b2fz6 zoP+WR;*ldWkN0oW@48qAzwKFnysu~_6-dtKq0s?OpQS|H5NrSS5~y|=1*PJX2`A#s z?(4F^PFqDHdvIO5y;+IzTP~2iH&juzn)5hDjyR}EeV6tA*w?WW4?9fRgXZBiAYO_L z5-jxOLM3nJ7lR@_0W+cojfMI8z|pb5gg4rRYyeS!AiPTrFu-bzupom|XYMh5^lAQM zXsLow-|)D?exydK-hu8ekmla!U^dXsSFzC@R5Dcb&Dv3$Eg7QFgD)O22QU}YL#p?@ z%<}aqmVVkWkP1H$@w!d{DkG$Cq=!!m8c-C2m>K79=NI{)^-6_rA?_Iksjd(@C z#i|pc`EJD-{&IeO>1RG#Y+QcyTH0}^$~b1&OZ;10hFzoPtHP@lMThUOg5g8l(9cdq z7XHGIQl|lLEBQPw4FXJ9246IwTn3j(gWx)ola8DKV?q^P_OORZaG!_nc)6t)g5C!} zK0g(0gGNW;5H?*|J_5ljYq#br}#FW=B z2P9saK5^0KwTI?;#ffq?Njr5K-@kCSeC{!3wha{tAGLz1i>x;^8b;fZz~ z(Szfw%a*0-r$mk`yfHos7pPEqQA+EcK2cDg5)t<;pJv3ThS z?ACB3D({(R`kt1KiVFeH9ysxCja=RLSdO2d^t=w<-uml~43cZi`tu2^g2lSe8(-eM z7;kO>8SeQ=4N`i9e0!|8Xu%u3Z{Q_|NL8z9otyn(VS66-I{S&Nfr*Q0)3vg9U|DOm7c|A8}Z0pe94$$^1%h+EeB559cbK zZfi-nJbih8G3S!j@N#;czx>fJwTGf-+D+6t_p})(+x~LiP;+j(i|>VM zCKw$;f3fb~l$iJgWbG@`bMo?`vBgf=<9DdNP=YGr&UxpC`4W$dB3jfoi)+koK>Dt!e&$!9nz+}~~J zG^vd}{;Zh$o3Pr}aC_+0Cy@X#&xTlI}!pE5qIdS8`C zyn4Fbr~S)Q@S+Z4Wovu&t;9|5MYu}I-Yc`m*n4FdO&u8`TH@;ftU&mk7;q2yGWd_7Fp zmAZE%NLuxxp{u_@u>S$O9V{Z)+=OOn7w+O0?shOtS3cCv&zEg{i%X7bVnRLWM;$GO zNfSV>jS)9TB3`S5@T4#nD}3jv#+&@CcF+ELd1D6_ zIwUgqQX^M#gZAl}uz5ykd=Gk;3B+MIc5pp*NHA`g8219XYd>xmHx9yP#K{AKnoB!L z1(-%sWVm36(M+%@GQ4*L_HrbCaXo%XFk$B*|4J}~UClY}hn_^nhM9y$ng|%D#?KKs zTgk!S{OHKE-zj`k$p$7)mLSlK=Xar_>q(KnX2M0*69mu2steMfZgkAwDEv3Emnn&Y zvk5Qma*LP7$J^1f=}CX3@MmKm6C?+huuA@kQ>kH>O)e5lL%NYWvu;TTnr{Dv!uLZs%np_CqttQRMO8;efmxI^6wNrStI8XF?J?6=YzuS zy4f6LIs?^52htP4WnxgXh(wJ{PWxQ`fZUfOjLNiZCU@=`p>RZWu9Rk;OthqMbhZd6 z{k=ji-Yj?bW*$K}AMnom-kZBSSWvk+O;RymYcyZmUQ}(9F+Y(@f0H-7k#CY-V3y9S zJDTxhHWjRqYusF5ZC_|(mjAssOHr7vp_pp3Sx8AQu$wEG<0w4coPRF8$i0lwLBiD# zik?K_*M$m=nv1Rl6eCBAz{^E_z3HEuvMQ#Mw_P@Z64$=a+!W-$32;oV+Uy>pe))m4acCZ9t!a2(n{&dfkn zZ=$No+^hdb07w_N=pY-i|ADcCXss(Du_BwYDtjL=Pz1qHviKUS<65xsy0SL^d$Yyi zvM~Fw`5F=^i?cu*v}B>P0_&P5>#@Hov`V|QIYG20+o?#~tV}z#R7(&}8?)CMu~Dn7 zRok^ed$l|(v+qi?AM3SdE3{y{u{{eCKYO-ri?eBKs%!fZZTq%%o3e4+u5;@Vb$hpd zTd{dNurnK+wEDM(OR+8cvPBE1y}Gshdbp5#t|5@PJFB*0tF>gCw2`~H^BTBD3$>XG zmXN@?q$}=JzKGfd z-HWwHo4nXdzU-T(=4-s@3%b-hq3hee_FJXz8@(LsvyNN8_xry;+70>Zy|UZC0DQnm zD!}I(wpq)&^V_2Si@*|Gq5~Yi$os&B8o?91!53P=44l3ge7zhz!XxUzmfOJmtG*<> z!WL@6x{JbWYrrf#!wBladb_(R9KzyD!#up7HY~VftHa9M!$ka^Km55DOs5%K#7r!n zM~u1!tinw^#nk!4(mS|HEW%X0#l~sH`y0bj%*A4Sn_j%P9kH((JjQBVnPrT_U@XLG zyvA}|m~9NhbSuMioX3+{#~_@=JFLfmOqhHuzkWQ#fqck#Imk)>e5Qts$c}uLiu}EB z{K%ACmXRFAlU&J~ER~l$y@eddn;gm~*~wVU$e=9BsN9gG9K@fT%C4M}tBkps{K~XE zlCkW-tz65xypXq?!@0c6z)X+5{Kmf=%*OnV!+ghKe9X*DjmgZnc-+j=42{ox#?d^@ z*sLYh%n@ju&EBld3w*+TY|7tU&Q`L`8_~_?yv`Vc&T~r6>>SS&;?8DDs9G$~`1~OB z?5MMx&;AS``}~&u{Lciv9{~-V#az$|eIEzSrS|;C3mwt)(a?ihq)RN(7%d+Z{cs7L z(H;#L8!Zv){Lv)+7a=`M*Id#nJrySn5hK0QFdY>w{SYtz9n&^_6Ep1)HGR`Otq?ig z5Ifz|LR}C)?GHgc)JR>_M~&1%t<)*a)I;sm%M8^?9nLR2&JW$lR9)5Z`@w_^&s)va z`fJAA{M2D>(^5UqXMNLY-O+0u(`^mbZ_UDP*_(2NbKj{VpK9ogkN*_7?imQBBztwpMBAx?a`w> z*rhGerw!4m9nq_;)vdkIuZ`KUUCy&z+p2BYNG!>TjobKq+rACix(&siEZ4!U*~5Ls ztZdxJ{oBd?#mcPK%^lCcJ>B_S-PSG7*qz<%yxrXYZO-2P-QOJE;+@UpUEb4t-ss)T z>b>5_+}`dT%;nC`kl)B-QS}8-vGYJ0zTlCT;K-&$OxX`h`ito z9>@;<;CdY45J-r_9$ z;xJCaGCt!RT;n!A!8o4d2)yGw4!}PCHWItoIbCf{^{u&>Y{$Gq+aUWdg`bSt*XB2&f4m(F08N~>$p1Wv~H`me(S87 z>$)zgyx!}e`s=_xslq<&iCXN&eyGTv?035C%)X}1{_O7i++>~3W-aZT8|~QsrP{vj z_)G0>9NoR`?S|X!zsL}O@owwy9PghV|MC18 z@**FfBwzAIobkNP+$gWKC*Shp`SLLTAD%Kl^VV7OHebamkICbn^NYUo#l7)9AGA0> z^j!?}r7ZMDU+74`+)BUnh~D&w&GJz1=u(f%RA2Rxe)Ybb^;%EqTu;ni|Miz1_ToPF zWUuLFkJr+j_6l3{Zhx9^ANQp>_jGTWc7OMjnfH2sn0(*&biDQh-}ZnnuYX_od3pGV zf0v5C_;uO%jt|L#U)BU4`P~ZnmQR+LpZQ|B`JB(mlW*;n|M{;T`tSVoq|fT6U(Xd! z+o>O_p8xtg8T+!IleAy^H+lQG56i31&pp5UzTW!+{rkWl?81-GXkYx4s{6`MlFZ-y zs(kzo-TKg9>(XD*ja~h>`uy7epOD<&{RjE|;6IPzKmPGp{^pO&*U!<(pZ?R{@K;Uo z+3)_#zy2b9?eq`r_K(t|pZ{tK5M6xD^5u&evuz32EnFwhp*(*PB~GMR(c;91@))j5 zxb0v;f?fWNBw5nrNt7v7u4LKLN`s(4|)vssY-u-*O;dM85Sbq9%)z;I)=ilG|e*gna zNxt3cv(G&Is`E%V%LH`rK?oz1Fg60|gXqAi&eN~K2|M)gLl8p@t3v1u#L%t;(c@4= z7F%@jMHmOvYBBLlJFy}S7rZV;9((lh$Nxwi&%_%eb7(Rh_XBcCCYyA!I3aB;QlcCy zi~Mz1WRq1^r($nBc3Ehn zl~zWkMw_ohJV%Xm%v-1R_FG|_J@Hv^%QY8QaWN$KTz1?2G~EN&b$4ER6NT44dF$18 z-!$(nFJFEG7Whejr2}|igcFXj;NA{a_+f}WWcW0PBewWr04MhCNZ<~26=RS?=1=3t zDi--KJIXxg|C)2q zgZ5l-#zQxK=+QIh_H@>xPTg&nHE(@((d*9b?ng0){pZzb_dVy=k(B*+;!h6#?cR%5 zKIG$LPkwpmBW|8|q@%YU;pwj~{d(@*&3^Ij!{_(=$;CH+Uh;=OfBkRMf1Z8#rFFmh z`0G!0{1(hBp|8;xV-`<@KOg>G9e0Gh$RNTP=<4g;S6nfrW)Q*hg!J__OxjrgJ>9#M%QO5zfoIH4v!QHq#Sp@m9V!6{}@LQ&LW7vIB0FNRS)U=(8+ zw?jrVrg1rFRAU>B!$vp8aW-(2V;x;XM?2<`GkDZvA1}j4KL)Z(Rm9d6JD5K~CbBt% zBq0m2s7OdIvRb#Zq9YIKzer|sFo4u#C%MK*DRFRj+Vf;7HwnrO`s$IUWF=cT*-BUr z<(09drB7yAOI+sUmbv65O?KH!V7}y+!6c?ihFMHxmgJbpOy)9~Sxjd>GnmnoW-qD# zSedmoZ&R(ILYZrbDmR^=~QPV+1XA-z7w8;l;=GCSWkQE5uf?A z<39N*M}Pj)jR6&?8VOoZGaeM7VpQltz1UEPY7wFlo#I3(`b3Lfbcq?&=n*;E(II{m zq(2ntNOxFLlim=eDV^a;S^7ekz7&Nql_>~mTGI>O6sHc<=}r~cQ=b+Ps6hqbP>H%f zqaM|MNmXk6nA%kKJr%0wlj>B(S5>Qa535+Rg4YZ*}9V*!*O5weav=}w*1XX#-0j}1Fqm?c2T5HHr zg0Hqxr0s5H)?44E47kBH-fbns9Rl#T^G9w)9!Y$ z$6Rf}7J=UVV0XzIF!P>wU+GnEz1Z7c^no{T;)QQZ<%?S6d9c1Yz3*$0tG@m6cajkd zrGHba-vBr8y$LSYf*IVt{u;}<4_0u5oi$+!C(ywej@E`byetoY_AisQ9n7CY6&_=0haqdMb%)!4>V#j(M3yyL6(xM4pAvQ~vmF(McLxvNLs*nd^4 z8|El8xnxO3b6VBRGAARr&CtX0 zoI6!#J6j6Rd8X8!`MfAU|2c$XUSV_3ENHO4*=B4;H0T1|Xg@s~(sYvaq~%m;OZQ9B zJ&QA#GcDIm8|~1aW^1UO#%NL(4%4aT(yCd_LsM6+)0lkqUSsVwRM%Ruw=UbLb?s?7;cj*~W%;;$|J&Y4few*=|&|BX{lbavQSV*4?ma zE$+sWyM^e^+P2v(B6q(VMDdol#MGVZdW-hn($4p|30+A;gS+4V+TM4|NICGw5i%pL zHM7BWi}21Vyv!HI&%q(?@3&A=f()m)!;^dQ$x1wx4Tqh@K^`^|EgZKcfB4BQEAlRr z{LANddD2oo@e)_O1xGV zn2)(d{()qlQE&2~w=OdL4*0+eUhD^+1v8+|`s_;M^^DK|yyj=0-`ba98@Qj@8FWu0 z(wE-!zjxkS%0T?$3&(C2`nls#-)Ux)VHVI&88FfyeZb><`D#a=UYIclPH-P(nXrTT zr{DC)Y@YL5*AX)e!3Xu9%nyWce@Cu`_V_zJ^7}GWxV`+_7cM}BjEkVyqY9E>KI$=u zF4(>Uyck;$18G=5E>k|udqBA>yn^5aEvUeY!GcUchNcKV@tX(_96$w05@uk7IUqrZ zu>&{AKx7!bLJL3v{J>5th%%T#8bla7I0Ik+1`SNU>gz!HJGE^Qj)GtVEug?5+!riZ zgCop173@70Y(Y5dysXd)Nf3iB@B>Z&hAKQ4V8{gjKM(^ka0FjK!X#9}(~(gk_H#gh!+gQUz`PEJjP^PMrw2fimb^0iZld_%t(#gNRI4CkNilG3`vn3Ns=r{ zlRQb3Oi7hoNtSF$mwZW>j7gcCNt&!lo4iS!%t@Wx$(yuDpQOfR+(%XLM}Zv3%Tq`I zwY+q_NO%+1+)nQ7PVf9q@C;A!98dBrPxCxa^h{6ngwB90O3wt&w(Jd*fXmV} z&C^s)U|diA+)w`OPyhT+01Z$99Z&)-Py;8;&fHKH_06?(P|ys`;UrEcOc0mJLktCp70po{ z-BBLxQ6K$LAPrI>9a17KQX@T5Bu!E!T~a1(QYU>;SA5M^ece}n?N@*OSAY#zfgMND13Wr=yE*JVXmiJe&gimh0Sy;zLRSdHCSj_p{F{aBC|UqAgmZJz9i~*%wX9ctutf zTe#Fy+Q>s%s;ye9y;`izTCLq$uI*Z{tyrm5TBw~_selQfgW9l7TeV$VwryLteOtJV zTe+QEx~*Hgy<5D^TfN;|zU^DT{ae5dT)`b&!Yy3GP1~_0TbgwVl0)0WeO$y%Cs= z&0g)@UheH)@BLoz4PWscU-B(q^F3emO<(oJ-aM&^m0MrZVHR#-7k*(Fj$s*|VH&Ps71jwG&S4$iVIJ;bAO2w= z4q_o5Vj>1&o+x4@PGTiqVkT~4Cw^imj$#=02`R2(E52eZ&SEXzVlM7txT9h(4r4JM wV=^vdGd^P@G%nkpNMkl`V>f`ZMJkDc1-eW12iGTnAI}E}iX8-^I diff --git a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist index 9625e105..7c569640 100644 --- a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist +++ b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist @@ -21,6 +21,6 @@ CFBundleVersion 1.0 MinimumOSVersion - 11.0 + 12.0 diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj index d030af56..a50a737e 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj @@ -168,7 +168,7 @@ 97C146E61CF9000F007C117D /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 1300; + LastUpgradeCheck = 1510; ORGANIZATIONNAME = ""; TargetAttributes = { 331C8080294A63A400263BE5 = { @@ -344,7 +344,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; @@ -472,7 +472,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = iphoneos; @@ -521,7 +521,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme index e42adcb3..8e3ca5df 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -1,6 +1,6 @@ { - HomeScreenCubit() : super(const HomeScreenState()); + HomeScreenCubit() : super(const HomeScreenState()) { + _updateChain(); + } + + RunnableSequence? chain; - void onClientTypeChanged(final ClientType clientType) { - emit(state.copyWith(clientType: clientType, response: '')); + void onProviderChanged(final Provider provider) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + provider: provider, + response: '', + ), + ); + _updateChain(); } - void onOpenAIKeyChanged(final String openAIKey) { - emit(state.copyWith(openAIKey: openAIKey)); + void onModelChanged(final String model) { + final newModel = { + ...state.model, + state.provider: model, + }; + emit(state.copyWith(model: newModel)); + _updateChain(); } - void onLocalUrlChanged(final String localUrl) { - emit(state.copyWith(localUrl: localUrl)); + void onApiKeyChanged(final String apiKey) { + final newApiKey = { + ...state.apiKey, + state.provider: apiKey, + }; + emit(state.copyWith(apiKey: newApiKey)); + _updateChain(); + } + + void onBaseUrlChanged(final String baseUrl) { + final newBaseUrl = { + ...state.baseUrl, + state.provider: baseUrl, + }; + emit(state.copyWith(baseUrl: newBaseUrl)); + _updateChain(); } void onQueryChanged(final String query) { @@ -27,68 +62,106 @@ class HomeScreenCubit extends Cubit { } Future onSubmitPressed() async { - final config = _getClientConfig(); - if (config == null) { - return; - } - final (apiKey, baseUrl) = config; + if (!_validateInput()) return; + emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); - final query = state.query; - if (query == null || query.isEmpty) { + assert(chain != null); + final stream = chain!.stream(state.query).handleError(_onErrorGenerating); + await for (final result in stream) { emit( state.copyWith( status: HomeScreenStatus.idle, - error: HomeScreenError.queryEmpty, + response: (state.response) + result, ), ); - return; } + } - emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); + bool _validateInput() { + final provider = state.provider; + if (provider.isRemote && (state.apiKey[provider] ?? '').isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.apiKeyEmpty, + ), + ); + return false; + } - final llm = ChatOpenAI( - apiKey: apiKey, - baseUrl: baseUrl ?? '', - ); + if (state.query.isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.queryEmpty, + ), + ); + return false; + } - final result = await llm([ChatMessage.humanText(query)]); - emit( - state.copyWith( - status: HomeScreenStatus.idle, - response: result.content.trim(), - ), - ); + return true; } - (String? apiKey, String? baseUrl)? _getClientConfig() { - final clientType = state.clientType; + void _updateChain() { + try { + final provider = state.provider; + final model = state.model; + final apiKey = state.apiKey; - if (clientType == ClientType.openAI) { - final openAIKey = state.openAIKey; - if (openAIKey == null || openAIKey.isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.openAIKeyEmpty, + final chatModel = switch (provider) { + Provider.googleAI => ChatGoogleGenerativeAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: model[provider] ?? provider.defaultModel, + ), + ), + Provider.mistral => ChatMistralAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatMistralAIOptions( + model: model[provider] ?? provider.defaultModel, + ), + ), + Provider.openAI => ChatOpenAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatOpenAIOptions( + model: model[provider] ?? provider.defaultModel, + ), ), - ); - return null; - } - - return (openAIKey, null); - } else { - final localUrl = state.localUrl; - if (localUrl == null || localUrl.isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.localUrlEmpty, + Provider.ollama => ChatOllama( + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatOllamaOptions( + model: model[provider] ?? provider.defaultModel, + ), ), - ); - return null; - } + } as BaseChatModel; - return (null, localUrl); + chain?.close(); + chain = Runnable.getMapFromInput('query') + .pipe( + ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'Your are a helpful assistant. Reply to the user using Markdown.', + ), + (ChatMessageType.human, '{query}'), + ]), + ) + .pipe(chatModel) + .pipe(const StringOutputParser()); + } catch (_) { + // Ignore invalid base URL exceptions } } + + void _onErrorGenerating(final Object error) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.generationError, + ), + ); + } } diff --git a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart index d76e34dd..c5a95466 100644 --- a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart +++ b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart @@ -6,36 +6,40 @@ class HomeScreenState extends Equatable { const HomeScreenState({ this.status = HomeScreenStatus.idle, this.error, - this.clientType = ClientType.openAI, - this.openAIKey, - this.localUrl, - this.query, - this.response, + this.provider = Provider.ollama, + this.model = const {}, + this.apiKey = const {}, + this.baseUrl = const {}, + this.query = '', + this.response = '', }); final HomeScreenStatus status; final HomeScreenError? error; - final ClientType clientType; - final String? openAIKey; - final String? localUrl; - final String? query; - final String? response; + final Provider provider; + final Map model; + final Map apiKey; + final Map baseUrl; + final String query; + final String response; HomeScreenState copyWith({ final HomeScreenStatus? status, final HomeScreenError? error, - final ClientType? clientType, - final String? openAIKey, - final String? localUrl, + final Provider? provider, + final Map? model, + final Map? apiKey, + final Map? baseUrl, final String? query, final String? response, }) { return HomeScreenState( status: status ?? this.status, error: error, - clientType: clientType ?? this.clientType, - openAIKey: openAIKey ?? this.openAIKey, - localUrl: localUrl ?? this.localUrl, + provider: provider ?? this.provider, + model: model ?? this.model, + apiKey: apiKey ?? this.apiKey, + baseUrl: baseUrl ?? this.baseUrl, query: query ?? this.query, response: response ?? this.response, ); @@ -45,9 +49,10 @@ class HomeScreenState extends Equatable { List get props => [ status, error, - clientType, - openAIKey, - localUrl, + provider, + model, + apiKey, + baseUrl, query, response, ]; @@ -59,12 +64,9 @@ enum HomeScreenStatus { } enum HomeScreenError { - openAIKeyEmpty, - localUrlEmpty, + modelEmpty, + apiKeyEmpty, + baseUrlEmpty, queryEmpty, -} - -enum ClientType { - openAI, - local, + generationError, } diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart new file mode 100644 index 00000000..c92b87af --- /dev/null +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -0,0 +1,40 @@ +// ignore_for_file: public_member_api_docs + +enum Provider { + googleAI( + name: 'GoogleAI', + defaultModel: 'gemini-1.5-pro', + defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta', + isRemote: true, + ), + mistral( + name: 'Mistral', + defaultModel: 'mistral-small', + defaultBaseUrl: 'https://api.mistral.ai/v1', + isRemote: true, + ), + openAI( + name: 'OpenAI', + defaultModel: 'gpt-4o', + defaultBaseUrl: 'https://api.openai.com/v1', + isRemote: true, + ), + ollama( + name: 'Ollama', + defaultModel: 'llama3', + defaultBaseUrl: 'http://localhost:11434/api', + isRemote: false, + ); + + const Provider({ + required this.name, + required this.defaultModel, + required this.defaultBaseUrl, + required this.isRemote, + }); + + final String name; + final String defaultModel; + final String defaultBaseUrl; + final bool isRemote; +} diff --git a/examples/hello_world_flutter/lib/home/home_screen.dart b/examples/hello_world_flutter/lib/home/home_screen.dart index 2b46a017..5b117845 100644 --- a/examples/hello_world_flutter/lib/home/home_screen.dart +++ b/examples/hello_world_flutter/lib/home/home_screen.dart @@ -1,8 +1,10 @@ // ignore_for_file: public_member_api_docs import 'package:flutter/material.dart'; import 'package:flutter_bloc/flutter_bloc.dart'; +import 'package:flutter_markdown/flutter_markdown.dart'; import 'bloc/home_screen_cubit.dart'; +import 'bloc/providers.dart'; class HomeScreen extends StatelessWidget { const HomeScreen({super.key}); @@ -27,10 +29,7 @@ class _Scaffold extends StatelessWidget { backgroundColor: theme.colorScheme.inversePrimary, title: const Text('🦜️🔗 LangChain.dart'), ), - body: const Padding( - padding: EdgeInsets.all(16), - child: _Body(), - ), + body: const _Body(), ); } } @@ -38,146 +37,203 @@ class _Scaffold extends StatelessWidget { class _Body extends StatelessWidget { const _Body(); - @override - Widget build(final BuildContext context) { - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.clientType != current.clientType, - builder: (final context, final state) { - return Column( - mainAxisSize: MainAxisSize.min, - crossAxisAlignment: CrossAxisAlignment.start, - children: [ - _ClientTypeSelector(state.clientType), - const SizedBox(height: 16), - if (state.clientType == ClientType.openAI) - const _OpenAIKeyTextField() - else - const _LocalUrlTextField(), - const SizedBox(height: 16), - const _QueryTextField(), - const SizedBox(height: 16), - const _SubmitButton(), - const SizedBox(height: 12), - const Divider(), - const SizedBox(height: 16), - const _Response(), - ], - ); - }, - ); - } -} - -class _ClientTypeSelector extends StatelessWidget { - const _ClientTypeSelector(this.selected); - - final ClientType selected; - @override Widget build(final BuildContext context) { final cubit = context.read(); - return Center( - child: SegmentedButton( - segments: const >[ - ButtonSegment( - value: ClientType.openAI, - label: Text('OpenAI'), - icon: Icon(Icons.cloud_outlined), - ), - ButtonSegment( - value: ClientType.local, - label: Text('Local'), - icon: Icon(Icons.install_desktop_outlined), + return BlocListener( + listenWhen: (final previous, final current) => + previous.error != current.error, + listener: (final context, final state) { + if (state.error == HomeScreenError.generationError) { + ScaffoldMessenger.of(context).showSnackBar( + const SnackBar( + content: Text( + 'An error occurred while generating the response', + ), + ), + ); + } + }, + child: SingleChildScrollView( + child: Padding( + padding: const EdgeInsets.all(16), + child: Column( + mainAxisSize: MainAxisSize.min, + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + const _ProviderSelector(), + const SizedBox(height: 16), + Row( + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + Expanded(child: _ApiKeyTextField(cubit)), + const SizedBox(width: 16), + Expanded(child: _BaseUrlTextField(cubit)), + ], + ), + const SizedBox(height: 16), + _ModelTextField(cubit), + const SizedBox(height: 16), + _QueryTextField(cubit), + const SizedBox(height: 16), + const _SubmitButton(), + const SizedBox(height: 12), + const Divider(), + const SizedBox(height: 16), + const _Response(), + ], ), - ], - selected: {selected}, - onSelectionChanged: (final Set newSelection) { - cubit.onClientTypeChanged(newSelection.first); - }, + ), ), ); } } -class _OpenAIKeyTextField extends StatelessWidget { - const _OpenAIKeyTextField(); +class _ProviderSelector extends StatelessWidget { + const _ProviderSelector(); @override Widget build(final BuildContext context) { final cubit = context.read(); return BlocBuilder( buildWhen: (final previous, final current) => - previous.error != current.error, + previous.provider != current.provider, builder: (final context, final state) { - return TextField( - controller: TextEditingController(text: state.openAIKey), - decoration: InputDecoration( - prefixIcon: const Icon(Icons.password), - labelText: 'OpenAI API key', - filled: true, - errorText: state.error == HomeScreenError.openAIKeyEmpty - ? 'OpenAI API key cannot be empty' - : null, + return Center( + child: SegmentedButton( + segments: Provider.values + .map( + (final provider) => ButtonSegment( + value: provider, + label: Text(provider.name), + icon: Icon( + provider.isRemote + ? Icons.cloud_outlined + : Icons.install_desktop_outlined, + ), + ), + ) + .toList(), + selected: {state.provider}, + onSelectionChanged: (final Set newSelection) { + cubit.onProviderChanged(newSelection.first); + }, ), - obscureText: true, - onChanged: cubit.onOpenAIKeyChanged, ); }, ); } } -class _LocalUrlTextField extends StatelessWidget { - const _LocalUrlTextField(); +class _ModelTextField extends _BaseTextField { + const _ModelTextField(this.cubit); + + final HomeScreenCubit cubit; @override - Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.error != current.error, - builder: (final context, final state) { - return TextField( - controller: TextEditingController(text: state.localUrl), - decoration: InputDecoration( - prefixIcon: const Icon(Icons.link), - labelText: 'Local URL', - filled: true, - errorText: state.error == HomeScreenError.localUrlEmpty - ? 'Local URL cannot be empty' - : null, - ), - onChanged: cubit.onLocalUrlChanged, - ); - }, - ); - } + String get labelText => 'Model name'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.link; + + @override + HomeScreenError get errorType => HomeScreenError.modelEmpty; + + @override + String get errorText => 'Model name cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.model[state.provider] ?? state.provider.defaultModel; + + @override + void onTextChanged(final String value) => cubit.onModelChanged(value); } -class _QueryTextField extends StatelessWidget { - const _QueryTextField(); +class _ApiKeyTextField extends _BaseTextField { + const _ApiKeyTextField(this.cubit); + + final HomeScreenCubit cubit; @override - Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.error != current.error, - builder: (final context, final state) { - return TextField( - decoration: InputDecoration( - labelText: 'Enter question', - filled: true, - errorText: state.error == HomeScreenError.queryEmpty - ? 'Question cannot be empty' - : null, - ), - onChanged: cubit.onQueryChanged, - ); - }, - ); - } + String get labelText => 'API key'; + + @override + bool get obscureText => true; + + @override + IconData get prefixIcon => Icons.password; + + @override + HomeScreenError get errorType => HomeScreenError.apiKeyEmpty; + + @override + String get errorText => 'Api API key cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.apiKey[state.provider] ?? ''; + + @override + void onTextChanged(final String value) => cubit.onApiKeyChanged(value); +} + +class _BaseUrlTextField extends _BaseTextField { + const _BaseUrlTextField(this.cubit); + + final HomeScreenCubit cubit; + + @override + String get labelText => 'Base URL'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.language; + + @override + HomeScreenError get errorType => HomeScreenError.baseUrlEmpty; + + @override + String get errorText => 'Base URL cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.baseUrl[state.provider] ?? state.provider.defaultBaseUrl; + + @override + void onTextChanged(final String value) => cubit.onBaseUrlChanged(value); +} + +class _QueryTextField extends _BaseTextField { + const _QueryTextField(this.cubit); + + final HomeScreenCubit cubit; + + @override + String get labelText => 'Enter question'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.question_answer; + + @override + HomeScreenError get errorType => HomeScreenError.queryEmpty; + + @override + String get errorText => 'Question cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => ''; + + @override + void onTextChanged(final String value) => cubit.onQueryChanged(value); } class _SubmitButton extends StatelessWidget { @@ -211,7 +267,7 @@ class _Response extends StatelessWidget { return BlocBuilder( builder: (final context, final state) { final response = state.response; - if (response == null || response.isEmpty) { + if (response.isEmpty) { return const SizedBox.shrink(); } @@ -224,8 +280,10 @@ class _Response extends StatelessWidget { 'Response', style: theme.textTheme.headlineSmall, ), - SelectableText( - state.response ?? '', + Markdown( + data: state.response, + shrinkWrap: true, + padding: EdgeInsets.zero, ), ], ); @@ -233,3 +291,64 @@ class _Response extends StatelessWidget { ); } } + +abstract class _BaseTextField extends StatefulWidget { + const _BaseTextField(); + + String get labelText; + + bool get obscureText; + + IconData get prefixIcon; + + HomeScreenError get errorType; + + String get errorText; + + String onProviderChanged(final HomeScreenState state); + + void onTextChanged(final String value); + + @override + _BaseTextFieldState createState() => _BaseTextFieldState(); +} + +class _BaseTextFieldState extends State<_BaseTextField> { + late TextEditingController _controller; + + @override + void initState() { + super.initState(); + _controller = TextEditingController(); + } + + @override + Widget build(BuildContext context) { + return BlocBuilder( + buildWhen: (previous, current) => + previous.provider != current.provider || + previous.error != current.error, + builder: (context, state) { + _controller.text = widget.onProviderChanged(state); + return TextField( + controller: _controller, + obscureText: widget.obscureText, + decoration: InputDecoration( + prefixIcon: Icon(widget.prefixIcon), + labelText: widget.labelText, + filled: true, + errorText: + state.error == widget.errorType ? widget.errorText : null, + ), + onChanged: widget.onTextChanged, + ); + }, + ); + } + + @override + void dispose() { + _controller.dispose(); + super.dispose(); + } +} diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index ecb15bcc..a402383b 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -1,6 +1,22 @@ # Generated by pub # See https://dart.dev/tools/pub/glossary#lockfile packages: + _discoveryapis_commons: + dependency: transitive + description: + name: _discoveryapis_commons + sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + url: "https://pub.dev" + source: hosted + version: "1.0.6" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" async: dependency: transitive description: @@ -94,6 +110,14 @@ packages: url: "https://pub.dev" source: hosted version: "8.1.5" + flutter_markdown: + dependency: "direct main" + description: + name: flutter_markdown + sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + url: "https://pub.dev" + source: hosted + version: "0.6.23" freezed_annotation: dependency: transitive description: @@ -102,6 +126,46 @@ packages: url: "https://pub.dev" source: hosted version: "2.4.1" + gcloud: + dependency: transitive + description: + name: gcloud + sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + url: "https://pub.dev" + source: hosted + version: "0.8.12" + google_generative_ai: + dependency: transitive + description: + name: google_generative_ai + sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + url: "https://pub.dev" + source: hosted + version: "0.4.0" + google_identity_services_web: + dependency: transitive + description: + name: google_identity_services_web + sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + url: "https://pub.dev" + source: hosted + version: "0.3.1+1" + googleapis: + dependency: transitive + description: + name: googleapis + sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + url: "https://pub.dev" + source: hosted + version: "12.0.0" + googleapis_auth: + dependency: transitive + description: + name: googleapis_auth + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 + url: "https://pub.dev" + source: hosted + version: "1.6.0" http: dependency: transitive description: @@ -148,6 +212,27 @@ packages: relative: true source: path version: "0.3.1" + langchain_google: + dependency: "direct main" + description: + path: "../../packages/langchain_google" + relative: true + source: path + version: "0.5.0" + langchain_mistralai: + dependency: "direct main" + description: + path: "../../packages/langchain_mistralai" + relative: true + source: path + version: "0.2.0+1" + langchain_ollama: + dependency: "direct main" + description: + path: "../../packages/langchain_ollama" + relative: true + source: path + version: "0.2.1+1" langchain_openai: dependency: "direct main" description: @@ -163,6 +248,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.1" + markdown: + dependency: transitive + description: + name: markdown + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 + url: "https://pub.dev" + source: hosted + version: "7.2.2" material_color_utilities: dependency: transitive description: @@ -179,6 +272,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.12.0" + mistralai_dart: + dependency: "direct overridden" + description: + path: "../../packages/mistralai_dart" + relative: true + source: path + version: "0.0.3+1" nested: dependency: transitive description: @@ -187,6 +287,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + ollama_dart: + dependency: "direct overridden" + description: + path: "../../packages/ollama_dart" + relative: true + source: path + version: "0.1.0+1" openai_dart: dependency: "direct overridden" description: @@ -210,6 +317,14 @@ packages: url: "https://pub.dev" source: hosted version: "6.1.1" + retry: + dependency: transitive + description: + name: retry + sha256: "822e118d5b3aafed083109c72d5f484c6dc66707885e07c0fbcb8b986bba7efc" + url: "https://pub.dev" + source: hosted + version: "3.1.2" rxdart: dependency: transitive description: @@ -279,6 +394,13 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" + vertex_ai: + dependency: "direct overridden" + description: + path: "../../packages/vertex_ai" + relative: true + source: path + version: "0.1.0" web: dependency: transitive description: @@ -289,4 +411,4 @@ packages: version: "0.5.1" sdks: dart: ">=3.3.0 <4.0.0" - flutter: ">=1.16.0" + flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 9fc3a925..6d125283 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -11,7 +11,11 @@ dependencies: sdk: flutter equatable: ^2.0.5 flutter_bloc: ^8.1.5 + flutter_markdown: ^0.6.22 langchain: ^0.7.1 + langchain_google: ^0.5.0 + langchain_mistralai: ^0.2.0+1 + langchain_ollama: ^0.2.1+1 langchain_openai: ^0.6.1+1 flutter: diff --git a/examples/hello_world_flutter/pubspec_overrides.yaml b/examples/hello_world_flutter/pubspec_overrides.yaml index 93b5421a..d5192892 100644 --- a/examples/hello_world_flutter/pubspec_overrides.yaml +++ b/examples/hello_world_flutter/pubspec_overrides.yaml @@ -1,10 +1,22 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,mistralai_dart,ollama_dart,vertex_ai dependency_overrides: langchain: path: ../../packages/langchain langchain_core: path: ../../packages/langchain_core + langchain_google: + path: ../../packages/langchain_google + langchain_mistralai: + path: ../../packages/langchain_mistralai + langchain_ollama: + path: ../../packages/langchain_ollama langchain_openai: path: ../../packages/langchain_openai + mistralai_dart: + path: ../../packages/mistralai_dart + ollama_dart: + path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart + vertex_ai: + path: ../../packages/vertex_ai diff --git a/examples/hello_world_flutter/web/flutter_bootstrap.js b/examples/hello_world_flutter/web/flutter_bootstrap.js new file mode 100644 index 00000000..8ce49d8a --- /dev/null +++ b/examples/hello_world_flutter/web/flutter_bootstrap.js @@ -0,0 +1,12 @@ +{{flutter_js}} +{{flutter_build_config}} + +_flutter.loader.load({ + serviceWorkerSettings: { + serviceWorkerVersion: {{flutter_service_worker_version}}, + }, + onEntrypointLoaded: async function(engineInitializer) { + const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); + await appRunner.runApp(); + }, +}); diff --git a/examples/hello_world_flutter/web/index.html b/examples/hello_world_flutter/web/index.html index add98e6a..68ffe01a 100644 --- a/examples/hello_world_flutter/web/index.html +++ b/examples/hello_world_flutter/web/index.html @@ -1,59 +1,25 @@ - + + + + - This is a placeholder for base href that will be replaced by the value of - the `--base-href` argument provided to `flutter build`. - --> - + + - - - - - - - - - - - - - - hello_world_flutter - - - - - + Hello World Flutter + - + diff --git a/examples/hello_world_flutter/web/manifest.json b/examples/hello_world_flutter/web/manifest.json index ab44f4f1..2332c807 100644 --- a/examples/hello_world_flutter/web/manifest.json +++ b/examples/hello_world_flutter/web/manifest.json @@ -1,11 +1,11 @@ { "name": "hello_world_flutter", - "short_name": "hello_world_flutter", + "short_name": "Hello World Flutter", "start_url": ".", "display": "standalone", "background_color": "#0175C2", "theme_color": "#0175C2", - "description": "A new Flutter project.", + "description": "A sample Flutter app integrating LangChain.", "orientation": "portrait-primary", "prefer_related_applications": false, "icons": [ From edccc1abf5cd69baf3fa56fbae86b5f84316d08f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 17:51:03 +0200 Subject: [PATCH 029/251] refactor: Reformat OpenAI OpenAPI specs (#443) --- packages/openai_dart/oas/openapi_curated.yaml | 78 ++-- .../openai_dart/oas/openapi_official.yaml | 382 +++++++++--------- 2 files changed, 230 insertions(+), 230 deletions(-) diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index b1a945bc..f3eb8a26 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1027,7 +1027,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1153,7 +1153,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1169,7 +1169,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -1371,7 +1371,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1387,7 +1387,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -1963,7 +1963,7 @@ components: `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -1972,7 +1972,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" user: *end_user_param_configuration function_call: @@ -2808,7 +2808,7 @@ components: type: string description: | The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - enum: ["wandb"] + enum: [ "wandb" ] wandb: id: FineTuningIntegrationWandB type: object @@ -2943,7 +2943,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] first_id: type: string description: The ID of the first checkpoint in the list. @@ -3036,7 +3036,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -3442,7 +3442,7 @@ components: title: AssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" required: - id @@ -3555,7 +3555,7 @@ components: title: CreateAssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" required: - model @@ -3635,7 +3635,7 @@ components: title: ModifyAssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" DeleteAssistantResponse: type: object @@ -3730,7 +3730,7 @@ components: type: type: string title: AssistantsToolType - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: $ref: "#/components/schemas/AssistantsFunctionCallOption" @@ -3752,7 +3752,7 @@ components: type: type: string title: AssistantsResponseFormatType - enum: ["text", "json_object"] + enum: [ "text", "json_object" ] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -3764,7 +3764,7 @@ components: type: string name: TruncationStrategy description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -3842,7 +3842,7 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] + enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -3877,7 +3877,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. type: string @@ -3936,21 +3936,21 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: RunObjectResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" required: - id @@ -4113,7 +4113,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" response_format: description: | @@ -4127,7 +4127,7 @@ components: title: CreateRunRequestResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" stream: type: boolean @@ -4343,7 +4343,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" response_format: description: | @@ -4357,7 +4357,7 @@ components: title: CreateThreadAndRunRequestResponseFormatMode description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsResponseFormat" stream: type: boolean @@ -4436,7 +4436,7 @@ components: type: array description: | A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -4544,7 +4544,7 @@ components: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string nullable: true - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: id: MessageIncompleteDetails type: object @@ -4641,7 +4641,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: $ref: "#/components/schemas/MessageDelta" required: @@ -5166,7 +5166,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: $ref: "#/components/schemas/RunStepDelta" required: @@ -5580,7 +5580,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -5639,7 +5639,7 @@ components: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string name: VectorStoreStatus - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -5772,7 +5772,7 @@ components: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string title: VectorStoreFileStatus - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: id: VectorStoreFileLastError type: object @@ -5879,7 +5879,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object description: The number of files per status. @@ -6128,11 +6128,11 @@ components: nullable: true BatchEndpoint: type: string - enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. Batch: type: object @@ -6142,7 +6142,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: $ref: "#/components/schemas/BatchEndpoint" @@ -6262,7 +6262,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -6327,7 +6327,7 @@ components: object: type: string description: The object type, which is always `list`. - enum: [list] + enum: [ list ] required: - object - data diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index fa38d7f7..2f18ad09 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2608,7 +2608,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: &pagination_after_param_description | @@ -3457,7 +3457,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4300,7 +4300,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5409,7 +5409,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5615,7 +5615,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5990,7 +5990,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6006,7 +6006,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6552,7 +6552,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6568,7 +6568,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6659,11 +6659,11 @@ paths: Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -7040,7 +7040,7 @@ components: properties: object: type: string - enum: [list] + enum: [ list ] data: type: array items: @@ -7071,7 +7071,7 @@ components: anyOf: - type: string - type: string - enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] x-oaiTypeLabel: string prompt: description: &completions_prompt_description | @@ -7283,7 +7283,7 @@ components: The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] + enum: [ "stop", "length", "content_filter" ] index: type: integer logprobs: @@ -7325,7 +7325,7 @@ components: object: type: string description: The object type, which is always "text_completion" - enum: [text_completion] + enum: [ text_completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -7370,7 +7370,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -7382,7 +7382,7 @@ components: detail: type: string description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -7396,7 +7396,7 @@ components: properties: type: type: string - enum: ["text"] + enum: [ "text" ] description: The type of the content part. text: type: string @@ -7423,7 +7423,7 @@ components: type: string role: type: string - enum: ["system"] + enum: [ "system" ] description: The role of the messages author, in this case `system`. name: type: string @@ -7452,7 +7452,7 @@ components: x-oaiExpandable: true role: type: string - enum: ["user"] + enum: [ "user" ] description: The role of the messages author, in this case `user`. name: type: string @@ -7472,7 +7472,7 @@ components: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the messages author, in this case `assistant`. name: type: string @@ -7502,7 +7502,7 @@ components: properties: role: type: string - enum: ["tool"] + enum: [ "tool" ] description: The role of the messages author, in this case `tool`. content: type: string @@ -7522,7 +7522,7 @@ components: properties: role: type: string - enum: ["function"] + enum: [ "function" ] description: The role of the messages author, in this case `function`. content: nullable: true @@ -7572,7 +7572,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: $ref: "#/components/schemas/FunctionObject" @@ -7609,7 +7609,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" x-oaiExpandable: true @@ -7619,7 +7619,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7648,7 +7648,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7678,7 +7678,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7727,7 +7727,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the author of this message. function_call: type: object @@ -7772,7 +7772,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" role: type: string - enum: ["system", "user", "assistant", "tool"] + enum: [ "system", "user", "assistant", "tool" ] description: The role of the author of this message. CreateChatCompletionRequest: @@ -7877,7 +7877,7 @@ components: properties: type: type: string - enum: ["text", "json_object"] + enum: [ "text", "json_object" ] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -7955,7 +7955,7 @@ components: description: > `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" x-oaiExpandable: true functions: @@ -8040,7 +8040,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8077,7 +8077,7 @@ components: description: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: ["stop", "length", "function_call", "content_filter"] + enum: [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8098,7 +8098,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8157,7 +8157,7 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8214,7 +8214,7 @@ components: object: type: string description: The object type, which is always `chat.completion.chunk`. - enum: [chat.completion.chunk] + enum: [ chat.completion.chunk ] usage: type: object description: | @@ -8264,7 +8264,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2", "dall-e-3"] + enum: [ "dall-e-2", "dall-e-3" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-3" @@ -8280,27 +8280,27 @@ components: description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: type: string - enum: ["standard", "hd"] + enum: [ "standard", "hd" ] default: "standard" example: "standard" description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: &images_response_format type: string - enum: ["url", "b64_json"] + enum: [ "url", "b64_json" ] default: "url" example: "url" nullable: true description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string - enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] default: "1024x1024" example: "1024x1024" nullable: true description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: type: string - enum: ["vivid", "natural"] + enum: [ "vivid", "natural" ] default: "vivid" example: "vivid" nullable: true @@ -8361,7 +8361,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8377,7 +8377,7 @@ components: description: The number of images to generate. Must be between 1 and 10. size: &dalle2_images_size type: string - enum: ["256x256", "512x512", "1024x1024"] + enum: [ "256x256", "512x512", "1024x1024" ] default: "1024x1024" example: "1024x1024" nullable: true @@ -8399,7 +8399,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8437,7 +8437,7 @@ components: anyOf: - type: string - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] + enum: [ "text-moderation-latest", "text-moderation-stable" ] x-oaiTypeLabel: string required: - input @@ -8580,7 +8580,7 @@ components: $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8600,7 +8600,7 @@ components: Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: ["assistants", "batch", "fine-tune"] + enum: [ "assistants", "batch", "fine-tune" ] required: - file - purpose @@ -8612,7 +8612,7 @@ components: type: string object: type: string - enum: [file] + enum: [ file ] deleted: type: boolean required: @@ -8631,7 +8631,7 @@ components: anyOf: - type: string - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] x-oaiTypeLabel: string training_file: description: | @@ -8654,7 +8654,7 @@ components: are updated less frequently, but with lower variance. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 256 @@ -8665,7 +8665,7 @@ components: overfitting. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: number minimum: 0 exclusiveMinimum: true @@ -8676,7 +8676,7 @@ components: through the training dataset. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 @@ -8721,7 +8721,7 @@ components: The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. oneOf: - type: string - enum: [wandb] + enum: [ wandb ] wandb: type: object description: | @@ -8778,7 +8778,7 @@ components: $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8792,7 +8792,7 @@ components: $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [list] + enum: [ list ] first_id: type: string nullable: true @@ -8867,7 +8867,7 @@ components: example: "float" default: "float" type: string - enum: ["float", "base64"] + enum: [ "float", "base64" ] dimensions: description: | The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -8892,7 +8892,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] usage: type: object description: The usage information for the request. @@ -8929,7 +8929,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string language: description: | @@ -8964,7 +8964,7 @@ components: enum: - word - segment - default: [segment] + default: [ segment ] required: - file - model @@ -9051,7 +9051,7 @@ components: type: number format: float description: End time of the word in seconds. - required: [word, start, end] + required: [ word, start, end ] CreateTranscriptionResponseVerboseJson: type: object @@ -9076,7 +9076,7 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] x-oaiMeta: name: The transcription object group: audio @@ -9099,7 +9099,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string prompt: description: | @@ -9145,7 +9145,7 @@ components: description: Segments of the translated text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] CreateSpeechRequest: type: object @@ -9157,7 +9157,7 @@ components: anyOf: - type: string - type: string - enum: ["tts-1", "tts-1-hd"] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string input: type: string @@ -9166,12 +9166,12 @@ components: voice: description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] response_format: description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -9196,7 +9196,7 @@ components: object: type: string description: The object type, which is always "model". - enum: [model] + enum: [ model ] owned_by: type: string description: The organization that owns the model. @@ -9228,7 +9228,7 @@ components: object: type: string description: The object type, which is always `file`. - enum: ["file"] + enum: [ "file" ] purpose: type: string description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. @@ -9246,7 +9246,7 @@ components: type: string deprecated: true description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: ["uploaded", "processed", "error"] + enum: [ "uploaded", "processed", "error" ] status_details: type: string deprecated: true @@ -9287,7 +9287,7 @@ components: object: type: string description: The object type, which is always "embedding". - enum: [embedding] + enum: [ embedding ] required: - index - object @@ -9352,15 +9352,15 @@ components: n_epochs: oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 default: auto description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. required: - n_epochs model: @@ -9369,7 +9369,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job". - enum: [fine_tuning.job] + enum: [ fine_tuning.job ] organization_id: type: string description: The organization that owns the fine-tuning job. @@ -9448,7 +9448,7 @@ components: type: type: string description: "The type of the integration being enabled for the fine-tuning job" - enum: ["wandb"] + enum: [ "wandb" ] wandb: type: object description: | @@ -9493,12 +9493,12 @@ components: type: integer level: type: string - enum: ["info", "warn", "error"] + enum: [ "info", "warn", "error" ] message: type: string object: type: string - enum: [fine_tuning.job.event] + enum: [ fine_tuning.job.event ] required: - id - object @@ -9558,7 +9558,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -9655,7 +9655,7 @@ components: - type: string description: > `auto` is the default value - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/AssistantsApiResponseFormat" x-oaiExpandable: true @@ -9666,7 +9666,7 @@ components: properties: type: type: string - enum: ["text", "json_object"] + enum: [ "text", "json_object" ] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -9682,7 +9682,7 @@ components: object: description: The object type, which is always `assistant`. type: string - enum: [assistant] + enum: [ assistant ] created_at: description: The Unix timestamp (in seconds) for when the assistant was created. type: integer @@ -9710,7 +9710,7 @@ components: tools: description: &assistant_tools_param_description | A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [] + default: [ ] type: array maxItems: 128 items: @@ -9731,7 +9731,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -9841,7 +9841,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -9862,7 +9862,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -9897,8 +9897,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -9956,7 +9956,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -9977,7 +9977,7 @@ components: type: array description: | Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10029,7 +10029,7 @@ components: type: boolean object: type: string - enum: [assistant.deleted] + enum: [ assistant.deleted ] required: - id - object @@ -10072,7 +10072,7 @@ components: type: type: string description: "The type of tool being defined: `code_interpreter`" - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] required: - type @@ -10083,7 +10083,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: ["file_search"] + enum: [ "file_search" ] required: - type @@ -10094,7 +10094,7 @@ components: type: type: string description: "The type of tool being defined: `function`" - enum: ["function"] + enum: [ "function" ] function: $ref: "#/components/schemas/FunctionObject" required: @@ -10109,7 +10109,7 @@ components: type: type: string description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -10132,7 +10132,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" x-oaiExpandable: true @@ -10142,7 +10142,7 @@ components: properties: type: type: string - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: type: object @@ -10166,7 +10166,7 @@ components: object: description: The object type, which is always `thread.run`. type: string - enum: ["thread.run"] + enum: [ "thread.run" ] created_at: description: The Unix timestamp (in seconds) for when the run was created. type: integer @@ -10199,7 +10199,7 @@ components: type: description: For now, this is always `submit_tool_outputs`. type: string - enum: ["submit_tool_outputs"] + enum: [ "submit_tool_outputs" ] submit_tool_outputs: type: object description: Details on the tool outputs needed for this run to continue. @@ -10222,7 +10222,7 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] + enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10257,7 +10257,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string @@ -10266,7 +10266,7 @@ components: type: string tools: description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [] + default: [ ] type: array maxItems: 20 items: @@ -10558,7 +10558,7 @@ components: type: type: string description: The type of tool call the output is required for. For now, this is always `function`. - enum: ["function"] + enum: [ "function" ] function: type: object description: The function definition. @@ -10644,7 +10644,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10721,7 +10721,7 @@ components: object: description: The object type, which is always `thread`. type: string - enum: ["thread"] + enum: [ "thread" ] created_at: description: The Unix timestamp (in seconds) for when the thread was created. type: integer @@ -10737,7 +10737,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10795,7 +10795,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10830,8 +10830,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -10855,7 +10855,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10885,7 +10885,7 @@ components: type: boolean object: type: string - enum: [thread.deleted] + enum: [ thread.deleted ] required: - id - object @@ -10927,7 +10927,7 @@ components: object: description: The object type, which is always `thread.message`. type: string - enum: ["thread.message"] + enum: [ "thread.message" ] created_at: description: The Unix timestamp (in seconds) for when the message was created. type: integer @@ -10937,7 +10937,7 @@ components: status: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: description: On an incomplete message, details about why the message is incomplete. type: object @@ -10967,7 +10967,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11060,7 +11060,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: description: The delta containing the fields that have changed on the Message. type: object @@ -11068,7 +11068,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11109,7 +11109,7 @@ components: properties: role: type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] description: | The role of the entity that is creating the message. Allowed values include: - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. @@ -11176,7 +11176,7 @@ components: type: boolean object: type: string - enum: [thread.message.deleted] + enum: [ thread.message.deleted ] required: - id - object @@ -11215,7 +11215,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11225,7 +11225,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - file_id @@ -11244,7 +11244,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11254,7 +11254,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11267,7 +11267,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -11279,7 +11279,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -11298,7 +11298,7 @@ components: type: description: Always `image_url`. type: string - enum: ["image_url"] + enum: [ "image_url" ] image_url: type: object properties: @@ -11308,7 +11308,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11322,7 +11322,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -11351,7 +11351,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: string description: Text content to be sent to the model @@ -11367,7 +11367,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11404,7 +11404,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11440,7 +11440,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -11469,7 +11469,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11503,7 +11503,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -11535,7 +11535,7 @@ components: object: description: The object type, which is always `thread.run.step`. type: string - enum: ["thread.run.step"] + enum: [ "thread.run.step" ] created_at: description: The Unix timestamp (in seconds) for when the run step was created. type: integer @@ -11551,11 +11551,11 @@ components: type: description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string - enum: ["message_creation", "tool_calls"] + enum: [ "message_creation", "tool_calls" ] status: description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] step_details: type: object description: The details of the run step. @@ -11571,7 +11571,7 @@ components: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] + enum: [ "server_error", "rate_limit_exceeded" ] message: type: string description: A human-readable description of the error. @@ -11635,7 +11635,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: description: The delta containing the fields that have changed on the run step. type: object @@ -11706,7 +11706,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -11727,7 +11727,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -11745,7 +11745,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -11768,7 +11768,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -11793,7 +11793,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -11832,7 +11832,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -11861,7 +11861,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -11880,7 +11880,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -11895,7 +11895,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -11918,7 +11918,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -11939,7 +11939,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -11962,7 +11962,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -11982,7 +11982,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12019,7 +12019,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12046,7 +12046,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -12067,7 +12067,7 @@ components: object: description: The object type, which is always `vector_store`. type: string - enum: ["vector_store"] + enum: [ "vector_store" ] created_at: description: The Unix timestamp (in seconds) for when the vector store was created. type: integer @@ -12104,7 +12104,7 @@ components: status: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -12225,7 +12225,7 @@ components: type: boolean object: type: string - enum: [vector_store.deleted] + enum: [ vector_store.deleted ] required: - id - object @@ -12242,7 +12242,7 @@ components: object: description: The object type, which is always `vector_store.file`. type: string - enum: ["vector_store.file"] + enum: [ "vector_store.file" ] usage_bytes: description: The total vector store usage in bytes. Note that this may be different from the original file size. type: integer @@ -12255,7 +12255,7 @@ components: status: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: type: object description: The last error associated with this vector store file. Will be `null` if there are no errors. @@ -12343,7 +12343,7 @@ components: type: boolean object: type: string - enum: [vector_store.file.deleted] + enum: [ vector_store.file.deleted ] required: - id - object @@ -12360,7 +12360,7 @@ components: object: description: The object type, which is always `vector_store.file_batch`. type: string - enum: ["vector_store.files_batch"] + enum: [ "vector_store.files_batch" ] created_at: description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer @@ -12370,7 +12370,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object properties: @@ -12473,7 +12473,7 @@ components: properties: event: type: string - enum: ["thread.created"] + enum: [ "thread.created" ] data: $ref: "#/components/schemas/ThreadObject" required: @@ -12489,7 +12489,7 @@ components: properties: event: type: string - enum: ["thread.run.created"] + enum: [ "thread.run.created" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12502,7 +12502,7 @@ components: properties: event: type: string - enum: ["thread.run.queued"] + enum: [ "thread.run.queued" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12515,7 +12515,7 @@ components: properties: event: type: string - enum: ["thread.run.in_progress"] + enum: [ "thread.run.in_progress" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12528,7 +12528,7 @@ components: properties: event: type: string - enum: ["thread.run.requires_action"] + enum: [ "thread.run.requires_action" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12541,7 +12541,7 @@ components: properties: event: type: string - enum: ["thread.run.completed"] + enum: [ "thread.run.completed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12554,7 +12554,7 @@ components: properties: event: type: string - enum: ["thread.run.failed"] + enum: [ "thread.run.failed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12567,7 +12567,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelling"] + enum: [ "thread.run.cancelling" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12580,7 +12580,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelled"] + enum: [ "thread.run.cancelled" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12593,7 +12593,7 @@ components: properties: event: type: string - enum: ["thread.run.expired"] + enum: [ "thread.run.expired" ] data: $ref: "#/components/schemas/RunObject" required: @@ -12609,7 +12609,7 @@ components: properties: event: type: string - enum: ["thread.run.step.created"] + enum: [ "thread.run.step.created" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12622,7 +12622,7 @@ components: properties: event: type: string - enum: ["thread.run.step.in_progress"] + enum: [ "thread.run.step.in_progress" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12635,7 +12635,7 @@ components: properties: event: type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] data: $ref: "#/components/schemas/RunStepDeltaObject" required: @@ -12648,7 +12648,7 @@ components: properties: event: type: string - enum: ["thread.run.step.completed"] + enum: [ "thread.run.step.completed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12661,7 +12661,7 @@ components: properties: event: type: string - enum: ["thread.run.step.failed"] + enum: [ "thread.run.step.failed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12674,7 +12674,7 @@ components: properties: event: type: string - enum: ["thread.run.step.cancelled"] + enum: [ "thread.run.step.cancelled" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12687,7 +12687,7 @@ components: properties: event: type: string - enum: ["thread.run.step.expired"] + enum: [ "thread.run.step.expired" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -12703,7 +12703,7 @@ components: properties: event: type: string - enum: ["thread.message.created"] + enum: [ "thread.message.created" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12716,7 +12716,7 @@ components: properties: event: type: string - enum: ["thread.message.in_progress"] + enum: [ "thread.message.in_progress" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12729,7 +12729,7 @@ components: properties: event: type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] data: $ref: "#/components/schemas/MessageDeltaObject" required: @@ -12742,7 +12742,7 @@ components: properties: event: type: string - enum: ["thread.message.completed"] + enum: [ "thread.message.completed" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12755,7 +12755,7 @@ components: properties: event: type: string - enum: ["thread.message.incomplete"] + enum: [ "thread.message.incomplete" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -12770,7 +12770,7 @@ components: properties: event: type: string - enum: ["error"] + enum: [ "error" ] data: $ref: "#/components/schemas/Error" required: @@ -12785,10 +12785,10 @@ components: properties: event: type: string - enum: ["done"] + enum: [ "done" ] data: type: string - enum: ["[DONE]"] + enum: [ "[DONE]" ] required: - event - data @@ -12803,7 +12803,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: type: string @@ -12928,7 +12928,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -12994,14 +12994,14 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data - has_more security: - - ApiKeyAuth: [] + - ApiKeyAuth: [ ] x-oaiMeta: navigationGroups: From f1109fca7a171e860f4c36f8d715c1709d282bf1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 17:55:23 +0200 Subject: [PATCH 030/251] feat: Support FastChat OpenAI-compatible API (#444) --- packages/openai_dart/README.md | 2 +- ...reate_chat_completion_stream_response.dart | 4 +- .../src/generated/schema/function_object.dart | 2 +- .../generated/schema/function_parameters.dart | 2 +- .../src/generated/schema/schema.freezed.dart | 66 +- .../lib/src/generated/schema/schema.g.dart | 8 +- packages/openai_dart/oas/openapi_curated.yaml | 13 +- .../openai_dart/oas/openapi_official.yaml | 1074 +++++++++-------- 8 files changed, 601 insertions(+), 570 deletions(-) diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index f020d128..76dcd335 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), etc. +- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. **Supported endpoints:** diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index 18cab5fa..724f4066 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -24,7 +24,7 @@ class CreateChatCompletionStreamResponse required List choices, /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - required int created, + @JsonKey(includeIfNull: false) int? created, /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, @@ -36,7 +36,7 @@ class CreateChatCompletionStreamResponse String? systemFingerprint, /// The object type, which is always `chat.completion.chunk`. - required String object, + @JsonKey(includeIfNull: false) String? object, /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? usage, diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 8049253e..647b4e0a 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -21,7 +21,7 @@ class FunctionObject with _$FunctionObject { /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? description, - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, diff --git a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart index abd11036..2429f8ba 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart @@ -8,7 +8,7 @@ part of open_a_i_schema; // TYPE: FunctionParameters // ========================================== -/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. +/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. typedef FunctionParameters = Map; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 1395bc5a..16efa483 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -6695,7 +6695,7 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) String? get description => throw _privateConstructorUsedError; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) @@ -6821,12 +6821,12 @@ class _$FunctionObjectImpl extends _FunctionObject { @JsonKey(includeIfNull: false) final String? description; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. final Map? _parameters; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @override @@ -6898,7 +6898,7 @@ abstract class _FunctionObject extends FunctionObject { String? get description; @override - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) @@ -9004,7 +9004,8 @@ mixin _$CreateChatCompletionStreamResponse { throw _privateConstructorUsedError; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + int? get created => throw _privateConstructorUsedError; /// The model to generate the completion. @JsonKey(includeIfNull: false) @@ -9017,7 +9018,8 @@ mixin _$CreateChatCompletionStreamResponse { String? get systemFingerprint => throw _privateConstructorUsedError; /// The object type, which is always `chat.completion.chunk`. - String get object => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get object => throw _privateConstructorUsedError; /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) @@ -9041,11 +9043,11 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); $CompletionUsageCopyWith<$Res>? get usage; @@ -9067,10 +9069,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_value.copyWith( @@ -9082,10 +9084,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable @@ -9094,10 +9096,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9130,11 +9132,11 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); @override @@ -9156,10 +9158,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_$CreateChatCompletionStreamResponseImpl( @@ -9171,10 +9173,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value._choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable @@ -9183,10 +9185,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9202,11 +9204,11 @@ class _$CreateChatCompletionStreamResponseImpl const _$CreateChatCompletionStreamResponseImpl( {@JsonKey(includeIfNull: false) this.id, required final List choices, - required this.created, + @JsonKey(includeIfNull: false) this.created, @JsonKey(includeIfNull: false) this.model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, - required this.object, + @JsonKey(includeIfNull: false) this.object, @JsonKey(includeIfNull: false) this.usage}) : _choices = choices, super._(); @@ -9235,7 +9237,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. @override - final int created; + @JsonKey(includeIfNull: false) + final int? created; /// The model to generate the completion. @override @@ -9251,7 +9254,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The object type, which is always `chat.completion.chunk`. @override - final String object; + @JsonKey(includeIfNull: false) + final String? object; /// Usage statistics for the completion request. @override @@ -9311,11 +9315,11 @@ abstract class _CreateChatCompletionStreamResponse const factory _CreateChatCompletionStreamResponse( {@JsonKey(includeIfNull: false) final String? id, required final List choices, - required final int created, + @JsonKey(includeIfNull: false) final int? created, @JsonKey(includeIfNull: false) final String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, - required final String object, + @JsonKey(includeIfNull: false) final String? object, @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = _$CreateChatCompletionStreamResponseImpl; const _CreateChatCompletionStreamResponse._() : super._(); @@ -9337,7 +9341,8 @@ abstract class _CreateChatCompletionStreamResponse @override /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created; + @JsonKey(includeIfNull: false) + int? get created; @override /// The model to generate the completion. @@ -9353,7 +9358,8 @@ abstract class _CreateChatCompletionStreamResponse @override /// The object type, which is always `chat.completion.chunk`. - String get object; + @JsonKey(includeIfNull: false) + String? get object; @override /// Usage statistics for the completion request. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 4062dc95..8b4963d6 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -835,10 +835,10 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: json['created'] as int, + created: json['created'] as int?, model: json['model'] as String?, systemFingerprint: json['system_fingerprint'] as String?, - object: json['object'] as String, + object: json['object'] as String?, usage: json['usage'] == null ? null : CompletionUsage.fromJson(json['usage'] as Map), @@ -856,10 +856,10 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( writeNotNull('id', instance.id); val['choices'] = instance.choices.map((e) => e.toJson()).toList(); - val['created'] = instance.created; + writeNotNull('created', instance.created); writeNotNull('model', instance.model); writeNotNull('system_fingerprint', instance.systemFingerprint); - val['object'] = instance.object; + writeNotNull('object', instance.object); writeNotNull('usage', instance.usage?.toJson()); return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index f3eb8a26..07b38bb8 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -2212,7 +2212,7 @@ components: - name FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true ChatCompletionTool: type: object @@ -2426,10 +2426,10 @@ components: $ref: "#/components/schemas/CompletionUsage" required: - choices - - created + # - created # Made nullable to support FastChat API which doesn't return this field with some models # - id # Made nullable to support OpenRouter API which doesn't return this field with some models # - model # Made nullable to support TogetherAI API which doesn't return this field with some models - - object + # - object # Made nullable to support FastChat API which doesn't return this field with some models ChatCompletionStreamResponseChoice: type: object description: A choice the model generated for the input prompt. @@ -6128,7 +6128,12 @@ components: nullable: true BatchEndpoint: type: string - enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 2f18ad09..395d6481 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -87,7 +87,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -95,22 +95,22 @@ paths: {"role": "user", "content": "Hello!"} ] ) - + print(completion.choices[0].message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "system", content: "You are a helpful assistant." }], model: "VAR_model_id", }); - + console.log(completion.choices[0]); } - + main(); response: &chat_completion_example | { @@ -163,9 +163,9 @@ paths: }' python: | from openai import OpenAI - + client = OpenAI() - + response = client.chat.completions.create( model="gpt-4-turbo", messages=[ @@ -182,13 +182,13 @@ paths: ], max_tokens=300, ) - + print(response.choices[0]) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.chat.completions.create({ model: "gpt-4-turbo", @@ -254,7 +254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -263,15 +263,15 @@ paths: ], stream=True ) - + for chunk in completion: print(chunk.choices[0].delta) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ model: "VAR_model_id", @@ -281,20 +281,20 @@ paths: ], stream: true, }); - + for await (const chunk of completion) { console.log(chunk.choices[0].delta.content); } } - + main(); response: &chat_completion_chunk_example | {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} - + .... - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: @@ -338,7 +338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -366,13 +366,13 @@ paths: tools=tools, tool_choice="auto" ) - + print(completion) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; const tools = [ @@ -395,17 +395,17 @@ paths: } } ]; - + const response = await openai.chat.completions.create({ model: "gpt-4-turbo", messages: messages, tools: tools, tool_choice: "auto", }); - + console.log(response); } - + main(); response: &chat_completion_function_example | { @@ -460,7 +460,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -469,14 +469,14 @@ paths: logprobs=True, top_logprobs=2 ) - + print(completion.choices[0].message) print(completion.choices[0].logprobs) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: "Hello!" }], @@ -484,10 +484,10 @@ paths: logprobs: true, top_logprobs: 2, }); - + console.log(completion.choices[0]); } - + main(); response: | { @@ -716,7 +716,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -725,9 +725,9 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.completions.create({ model: "VAR_model_id", @@ -735,7 +735,7 @@ paths: max_tokens: 7, temperature: 0, }); - + console.log(completion); } main(); @@ -776,7 +776,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -787,16 +787,16 @@ paths: print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.completions.create({ model: "VAR_model_id", prompt: "Say this is a test.", stream: true, }); - + for await (const chunk of stream) { console.log(chunk.choices[0].text) } @@ -857,7 +857,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.generate( model="dall-e-3", prompt="A cute baby sea otter", @@ -866,12 +866,12 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); - + console.log(image.data); } main(); @@ -923,7 +923,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), @@ -934,16 +934,16 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.edit({ image: fs.createReadStream("otter.png"), mask: fs.createReadStream("mask.png"), prompt: "A cute baby sea otter wearing a beret", }); - + console.log(image.data); } main(); @@ -993,7 +993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.images.create_variation( image=open("image_edit_original.png", "rb"), n=2, @@ -1002,14 +1002,14 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.createVariation({ image: fs.createReadStream("otter.png"), }); - + console.log(image.data); } main(); @@ -1063,7 +1063,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.embeddings.create( model="text-embedding-ada-002", input="The food was delicious and the waiter...", @@ -1071,19 +1071,19 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", encoding_format: "float", }); - + console.log(embedding); } - + main(); response: | { @@ -1151,7 +1151,7 @@ paths: python: | from pathlib import Path import openai - + speech_file_path = Path(__file__).parent / "speech.mp3" response = openai.audio.speech.create( model="tts-1", @@ -1163,11 +1163,11 @@ paths: import fs from "fs"; import path from "path"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + const speechFile = path.resolve("./speech.mp3"); - + async function main() { const mp3 = await openai.audio.speech.create({ model: "tts-1", @@ -1216,7 +1216,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( model="whisper-1", @@ -1225,15 +1225,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), model: "whisper-1", }); - + console.log(transcription.text); } main(); @@ -1254,7 +1254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1262,14 +1262,14 @@ paths: response_format="verbose_json", timestamp_granularities=["word"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1277,7 +1277,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["word"] }); - + console.log(transcription.text); } main(); @@ -1314,7 +1314,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1322,14 +1322,14 @@ paths: response_format="verbose_json", timestamp_granularities=["segment"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1337,7 +1337,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["segment"] }); - + console.log(transcription.text); } main(); @@ -1401,7 +1401,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.translations.create( model="whisper-1", @@ -1410,15 +1410,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const translation = await openai.audio.translations.create({ file: fs.createReadStream("speech.mp3"), model: "whisper-1", }); - + console.log(translation.text); } main(); @@ -1459,21 +1459,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.files.list(); - + for await (const file of list) { console.log(file); } } - + main(); response: | { @@ -1503,13 +1503,13 @@ paths: - Files summary: | Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. - + The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - + The Fine-tuning API only supports `.jsonl` files. - + The Batch API only supports `.jsonl` files up to 100 MB in size. - + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true @@ -1538,7 +1538,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.create( file=open("mydata.jsonl", "rb"), purpose="fine-tune" @@ -1546,18 +1546,18 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.create({ file: fs.createReadStream("mydata.jsonl"), purpose: "fine-tune", }); - + console.log(file); } - + main(); response: | { @@ -1601,19 +1601,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.del("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1652,19 +1652,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.retrieve("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1707,19 +1707,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + content = client.files.content("file-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.content("file-abc123"); - + console.log(file); } - + main(); /fine_tuning/jobs: @@ -1729,9 +1729,9 @@ paths: - Fine-tuning summary: | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) requestBody: required: true @@ -1764,24 +1764,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-3.5-turbo" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { @@ -1812,7 +1812,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-3.5-turbo", @@ -1822,19 +1822,19 @@ paths: ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", model: "gpt-3.5-turbo", hyperparameters: { n_epochs: 2 } }); - + console.log(fineTune); } - + main(); response: | { @@ -1864,7 +1864,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", @@ -1872,18 +1872,18 @@ paths: ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", validation_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { @@ -1983,21 +1983,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.jobs.list(); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2023,7 +2023,7 @@ paths: - Fine-tuning summary: | Get info about a fine-tuning job. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: - in: path @@ -2053,19 +2053,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); - + console.log(fineTune); } - + main(); response: &fine_tuning_example | { @@ -2140,24 +2140,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list_events( fine_tuning_job_id="ftjob-abc123", limit=2 ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2219,16 +2219,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); - + console.log(fineTune); } main(); @@ -2352,16 +2352,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.models.list(); - + for await (const model of list) { console.log(model); } @@ -2426,19 +2426,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.retrieve("VAR_model_id"); - + console.log(model); } - + main(); response: &retrieve_model_response | { @@ -2480,16 +2480,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); - + console.log(model); } main(); @@ -2535,17 +2535,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + moderation = client.moderations.create(input="I want to kill them.") print(moderation) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const moderation = await openai.moderations.create({ input: "I want to kill them." }); - + console.log(moderation); } main(); @@ -2643,7 +2643,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistants = client.beta.assistants.list( order="desc", limit="20", @@ -2651,18 +2651,18 @@ paths: print(my_assistants.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistants = await openai.beta.assistants.list({ order: "desc", limit: "20", }); - + console.log(myAssistants.data); } - + main(); response: &list_assistants_example | { @@ -2759,7 +2759,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", @@ -2769,9 +2769,9 @@ paths: print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2780,10 +2780,10 @@ paths: tools: [{ type: "code_interpreter" }], model: "gpt-4-turbo", }); - + console.log(myAssistant); } - + main(); response: &create_assistants_example | { @@ -2820,7 +2820,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", name="HR Helper", @@ -2831,9 +2831,9 @@ paths: print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2847,10 +2847,10 @@ paths: }, model: "gpt-4-turbo" }); - + console.log(myAssistant); } - + main(); response: | { @@ -2912,22 +2912,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.retrieve("asst_abc123") print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.retrieve( "asst_abc123" ); - + console.log(myAssistant); } - + main(); response: | { @@ -2993,7 +2993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_assistant = client.beta.assistants.update( "asst_abc123", instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", @@ -3001,13 +3001,13 @@ paths: tools=[{"type": "file_search"}], model="gpt-4-turbo" ) - + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myUpdatedAssistant = await openai.beta.assistants.update( "asst_abc123", @@ -3019,10 +3019,10 @@ paths: model: "gpt-4-turbo" } ); - + console.log(myUpdatedAssistant); } - + main(); response: | { @@ -3083,17 +3083,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.assistants.delete("asst_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.assistants.del("asst_abc123"); - + console.log(response); } main(); @@ -3139,20 +3139,20 @@ paths: python: | from openai import OpenAI client = OpenAI() - + empty_thread = client.beta.threads.create() print(empty_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const emptyThread = await openai.beta.threads.create(); - + console.log(emptyThread); } - + main(); response: | { @@ -3181,7 +3181,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message_thread = client.beta.threads.create( messages=[ { @@ -3194,13 +3194,13 @@ paths: }, ] ) - + print(message_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messageThread = await openai.beta.threads.create({ messages: [ @@ -3214,10 +3214,10 @@ paths: }, ], }); - + console.log(messageThread); } - + main(); response: | { @@ -3263,22 +3263,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_thread = client.beta.threads.retrieve("thread_abc123") print(my_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myThread = await openai.beta.threads.retrieve( "thread_abc123" ); - + console.log(myThread); } - + main(); response: | { @@ -3338,7 +3338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_thread = client.beta.threads.update( "thread_abc123", metadata={ @@ -3349,9 +3349,9 @@ paths: print(my_updated_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const updatedThread = await openai.beta.threads.update( "thread_abc123", @@ -3359,10 +3359,10 @@ paths: metadata: { modified: "true", user: "abc123" }, } ); - + console.log(updatedThread); } - + main(); response: | { @@ -3410,17 +3410,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.threads.delete("thread_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.threads.del("thread_abc123"); - + console.log(response); } main(); @@ -3496,22 +3496,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_messages = client.beta.threads.messages.list("thread_abc123") print(thread_messages.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.list( "thread_abc123" ); - + console.log(threadMessages.data); } - + main(); response: | { @@ -3606,7 +3606,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_message = client.beta.threads.messages.create( "thread_abc123", role="user", @@ -3615,18 +3615,18 @@ paths: print(thread_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.create( "thread_abc123", { role: "user", content: "How does AI work? Explain it in simple terms." } ); - + console.log(threadMessages); } - + main(); response: | { @@ -3691,7 +3691,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.retrieve( message_id="msg_abc123", thread_id="thread_abc123", @@ -3699,18 +3699,18 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.retrieve( "thread_abc123", "msg_abc123" ); - + console.log(message); } - + main(); response: | { @@ -3785,7 +3785,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.update( message_id="msg_abc12", thread_id="thread_abc123", @@ -3797,9 +3797,9 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.update( "thread_abc123", @@ -3875,7 +3875,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_message = client.beta.threads.messages.delete( message_id="msg_abc12", thread_id="thread_abc123", @@ -3883,15 +3883,15 @@ paths: print(deleted_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const deletedMessage = await openai.beta.threads.messages.del( "thread_abc123", "msg_abc123" ); - + console.log(deletedMessage); } response: | @@ -3901,7 +3901,6 @@ paths: "deleted": true } - /threads/runs: post: operationId: createThreadAndRun @@ -3945,7 +3944,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.create_and_run( assistant_id="asst_abc123", thread={ @@ -3954,13 +3953,13 @@ paths: ] } ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.createAndRun({ assistant_id: "asst_abc123", @@ -3970,10 +3969,10 @@ paths: ], }, }); - + console.log(run); } - + main(); response: | { @@ -4028,7 +4027,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.create_and_run( assistant_id="asst_123", thread={ @@ -4038,14 +4037,14 @@ paths: }, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4056,58 +4055,58 @@ paths: }, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} - + event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}], "metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} - + event: done data: [DONE] @@ -4153,7 +4152,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4174,7 +4173,7 @@ paths: } } ] - + stream = client.beta.threads.create_and_run( thread={ "messages": [ @@ -4185,14 +4184,14 @@ paths: tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4213,7 +4212,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4225,52 +4224,52 @@ paths: tools: tools, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} - + event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} - + ... - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} - + event: thread.run.requires_action data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4333,25 +4332,25 @@ paths: python: | from openai import OpenAI client = OpenAI() - + runs = client.beta.threads.runs.list( "thread_abc123" ) - + print(runs) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const runs = await openai.beta.threads.runs.list( "thread_abc123" ); - + console.log(runs); } - + main(); response: | { @@ -4498,27 +4497,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.create( "thread_abc123", { assistant_id: "asst_abc123" } ); - + console.log(run); } - + main(); response: &run_object_example | { @@ -4569,74 +4568,74 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.create( thread_id="thread_123", assistant_id="asst_123", stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_123", { assistant_id: "asst_123", stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4677,7 +4676,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4698,21 +4697,21 @@ paths: } } ] - + stream = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123", tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4733,7 +4732,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_abc123", @@ -4743,55 +4742,55 @@ paths: stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4835,27 +4834,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.retrieve( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.retrieve( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -4947,19 +4946,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.update( thread_id="thread_abc123", run_id="run_abc123", metadata={"user_id": "user_abc123"}, ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.update( "thread_abc123", @@ -4970,10 +4969,10 @@ paths: }, } ); - + console.log(run); } - + main(); response: | { @@ -5082,7 +5081,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5093,13 +5092,13 @@ paths: } ] ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5113,10 +5112,10 @@ paths: ], } ); - + console.log(run); } - + main(); response: | { @@ -5190,7 +5189,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5202,14 +5201,14 @@ paths: ], stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5223,61 +5222,61 @@ paths: ], } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.message.created data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} - + event: thread.message.completed data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -5322,27 +5321,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.cancel( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.cancel( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -5442,17 +5441,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_steps = client.beta.threads.runs.steps.list( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run_steps) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.list( "thread_abc123", @@ -5460,7 +5459,7 @@ paths: ); console.log(runStep); } - + main(); response: | { @@ -5545,18 +5544,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_step = client.beta.threads.runs.steps.retrieve( thread_id="thread_abc123", run_id="run_abc123", step_id="step_abc123" ) - + print(run_step) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( "thread_abc123", @@ -5565,7 +5564,7 @@ paths: ); console.log(runStep); } - + main(); response: &run_step_object_example | { @@ -5648,18 +5647,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_stores = client.beta.vector_stores.list() print(vector_stores) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStores = await openai.beta.vectorStores.list(); console.log(vectorStores); } - + main(); response: | { @@ -5734,7 +5733,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.create( name="Support FAQ" ) @@ -5742,14 +5741,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.create({ name: "Support FAQ" }); console.log(vectorStore); } - + main(); response: | { @@ -5802,7 +5801,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.retrieve( vector_store_id="vs_abc123" ) @@ -5810,14 +5809,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.retrieve( "vs_abc123" ); console.log(vectorStore); } - + main(); response: | { @@ -5868,7 +5867,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.update( vector_store_id="vs_abc123", name="Support FAQ" @@ -5877,7 +5876,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.update( "vs_abc123", @@ -5887,7 +5886,7 @@ paths: ); console.log(vectorStore); } - + main(); response: | { @@ -5940,7 +5939,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store = client.beta.vector_stores.delete( vector_store_id="vs_abc123" ) @@ -5948,14 +5947,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStore = await openai.beta.vectorStores.del( "vs_abc123" ); console.log(deletedVectorStore); } - + main(); response: | { @@ -6029,7 +6028,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.files.list( vector_store_id="vs_abc123" ) @@ -6037,14 +6036,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.files.list( "vs_abc123" ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6112,7 +6111,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6121,7 +6120,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFile = await openai.beta.vectorStores.files.create( "vs_abc123", @@ -6131,7 +6130,7 @@ paths: ); console.log(myVectorStoreFile); } - + main(); response: | { @@ -6187,7 +6186,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.retrieve( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6196,7 +6195,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( "vs_abc123", @@ -6204,7 +6203,7 @@ paths: ); console.log(vectorStoreFile); } - + main(); response: | { @@ -6256,7 +6255,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file = client.beta.vector_stores.files.delete( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6265,7 +6264,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( "vs_abc123", @@ -6273,7 +6272,7 @@ paths: ); console.log(deletedVectorStoreFile); } - + main(); response: | { @@ -6328,7 +6327,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["file-abc123", "file-abc456"] @@ -6337,7 +6336,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( "vs_abc123", @@ -6347,7 +6346,7 @@ paths: ); console.log(myVectorStoreFileBatch); } - + main(); response: | { @@ -6408,7 +6407,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.retrieve( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6417,7 +6416,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( "vs_abc123", @@ -6425,7 +6424,7 @@ paths: ); console.log(vectorStoreFileBatch); } - + main(); response: | { @@ -6485,7 +6484,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel( vector_store_id="vs_abc123", file_batch_id="vsfb_abc123" @@ -6494,7 +6493,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( "vs_abc123", @@ -6502,7 +6501,7 @@ paths: ); console.log(deletedVectorStoreFileBatch); } - + main(); response: | { @@ -6591,7 +6590,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.file_batches.list_files( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6600,7 +6599,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( "vs_abc123", @@ -6608,7 +6607,7 @@ paths: ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6653,13 +6652,18 @@ paths: type: string description: | The ID of an uploaded file that contains requests for the new batch. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string @@ -6696,7 +6700,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.create( input_file_id="file-abc123", endpoint="/v1/chat/completions", @@ -6704,19 +6708,19 @@ paths: ) node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.create({ input_file_id: "file-abc123", endpoint: "/v1/chat/completions", completion_window: "24h" }); - + console.log(batch); } - + main(); response: | { @@ -6787,21 +6791,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.list() node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.batches.list(); - + for await (const batch of list) { console.log(batch); } } - + main(); response: | { @@ -6876,19 +6880,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.retrieve("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.retrieve("batch_abc123"); - + console.log(batch); } - + main(); response: &batch_object | { @@ -6955,19 +6959,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.cancel("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.cancel("batch_abc123"); - + console.log(batch); } - + main(); response: | { @@ -7076,7 +7080,7 @@ components: prompt: description: &completions_prompt_description | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. default: "<|endoftext|>" nullable: true @@ -7110,9 +7114,9 @@ components: nullable: true description: &completions_best_of_description | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: type: boolean @@ -7128,7 +7132,7 @@ components: nullable: true description: &completions_frequency_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) logit_bias: &completions_logit_bias type: object @@ -7139,9 +7143,9 @@ components: type: integer description: &completions_logit_bias_description | Modify the likelihood of specified tokens appearing in the completion. - + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: &completions_logprobs_configuration type: integer @@ -7151,7 +7155,7 @@ components: nullable: true description: &completions_logprobs_description | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - + The maximum value for `logprobs` is 5. max_tokens: type: integer @@ -7161,7 +7165,7 @@ components: nullable: true description: &completions_max_tokens_description | The maximum number of [tokens](/tokenizer) that can be generated in the completion. - + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: type: integer @@ -7172,7 +7176,7 @@ components: nullable: true description: &completions_completions_description | How many completions to generate for each prompt. - + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: type: number @@ -7182,7 +7186,7 @@ components: nullable: true description: &completions_presence_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) seed: &completions_seed_param type: integer @@ -7191,7 +7195,7 @@ components: nullable: true description: | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: description: &completions_stop_description > @@ -7221,7 +7225,7 @@ components: suffix: description: | The suffix that comes after a completion of inserted text. - + This parameter is only supported for `gpt-3.5-turbo-instruct`. default: null nullable: true @@ -7236,7 +7240,7 @@ components: nullable: true description: &completions_temperature_description | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - + We generally recommend altering this or `top_p` but not both. top_p: type: number @@ -7247,7 +7251,7 @@ components: nullable: true description: &completions_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or `temperature` but not both. user: &end_user_param_configuration type: string @@ -7320,7 +7324,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -7483,6 +7487,7 @@ components: type: object deprecated: true description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + nullable: true properties: arguments: type: string @@ -7538,7 +7543,7 @@ components: FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true ChatCompletionFunctions: @@ -7601,7 +7606,7 @@ components: `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -7831,7 +7836,7 @@ components: type: integer description: | Modify the likelihood of specified tokens appearing in the completion. - + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @@ -7847,7 +7852,7 @@ components: max_tokens: description: | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. type: integer nullable: true @@ -7870,9 +7875,9 @@ components: type: object description: | An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. properties: type: @@ -7943,12 +7948,12 @@ components: deprecated: true description: | Deprecated in favor of `tool_choice`. - + Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - + `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: - type: string @@ -7962,7 +7967,7 @@ components: deprecated: true description: | Deprecated in favor of `tools`. - + A list of functions the model may generate JSON inputs for. type: array minItems: 1 @@ -8035,7 +8040,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -8077,7 +8082,8 @@ components: description: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: [ "stop", "length", "function_call", "content_filter" ] + enum: + [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8093,7 +8099,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -8429,7 +8435,7 @@ components: model: description: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. nullable: false default: "text-moderation-latest" @@ -8597,10 +8603,10 @@ components: purpose: description: | The intended purpose of the uploaded file. - + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune" ] + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - file - purpose @@ -8636,11 +8642,11 @@ components: training_file: description: | The ID of an uploaded file that contains training data. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -8684,7 +8690,7 @@ components: suffix: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. type: string minLength: 1 @@ -8694,14 +8700,14 @@ components: validation_file: description: | The ID of an uploaded file that contains validation data. - + If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. - + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string nullable: true @@ -8980,7 +8986,7 @@ components: required: - text x-oaiMeta: - name: The transcription object + name: The transcription object (JSON) group: audio example: *basic_transcription_response_example @@ -9078,7 +9084,7 @@ components: $ref: "#/components/schemas/TranscriptionSegment" required: [ language, duration, text ] x-oaiMeta: - name: The transcription object + name: The transcription object (Verbose JSON) group: audio example: *verbose_transcription_response_example @@ -9240,7 +9246,7 @@ components: "batch_output", "fine-tune", "fine-tune-results", - "vision" + "vision", ] status: type: string @@ -9647,9 +9653,9 @@ components: AssistantsApiResponseFormatOption: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string @@ -9770,7 +9776,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -9923,7 +9929,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -10014,7 +10020,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -10222,7 +10228,8 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + enum: + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10459,7 +10466,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. stream: type: boolean @@ -12438,21 +12445,21 @@ components: AssistantStreamEvent: description: | Represents an event emitted when streaming a Run. - + Each event in a server-sent events stream has an `event` and `data` property: - + ``` event: thread.created data: {"id": "thread_123", "object": "thread", ...} ``` - + We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit `thread.run.created` when a new run is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a `thread.message.created event`, a `thread.message.in_progress` event, many `thread.message.delta` events, and finally a `thread.message.completed` event. - + We may add additional events over time, so we recommend handling unknown events gracefully in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to integrate the Assistants API with streaming. @@ -12550,6 +12557,19 @@ components: description: Occurs when a [run](/docs/api-reference/runs/object) is completed. x-oaiMeta: dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - type: object properties: event: @@ -13037,7 +13057,7 @@ x-oaiMeta: title: Audio description: | Learn how to turn audio into text or text into audio. - + Related guide: [Speech to text](/docs/guides/speech-to-text) navigationGroup: endpoints sections: @@ -13060,7 +13080,7 @@ x-oaiMeta: title: Chat description: | Given a list of messages comprising a conversation, the model will return a response. - + Related guide: [Chat Completions](/docs/guides/text-generation) navigationGroup: endpoints sections: @@ -13077,7 +13097,7 @@ x-oaiMeta: title: Embeddings description: | Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - + Related guide: [Embeddings](/docs/guides/embeddings) navigationGroup: endpoints sections: @@ -13091,7 +13111,7 @@ x-oaiMeta: title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - + Related guide: [Fine-tune models](/docs/guides/fine-tuning) navigationGroup: endpoints sections: @@ -13126,7 +13146,7 @@ x-oaiMeta: title: Batch description: | Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount. - + Related guide: [Batch](/docs/guides/batch) navigationGroup: endpoints sections: @@ -13179,7 +13199,7 @@ x-oaiMeta: title: Images description: | Given a prompt and/or an input image, the model will generate a new image. - + Related guide: [Image generation](/docs/guides/images) navigationGroup: endpoints sections: @@ -13217,7 +13237,7 @@ x-oaiMeta: title: Moderations description: | Given some input text, outputs if the model classifies it as potentially harmful across several categories. - + Related guide: [Moderations](/docs/guides/moderation) navigationGroup: endpoints sections: @@ -13232,7 +13252,7 @@ x-oaiMeta: beta: true description: | Build assistants that can call models and use tools to perform tasks. - + [Get started with the Assistants API](/docs/assistants) navigationGroup: assistants sections: @@ -13259,7 +13279,7 @@ x-oaiMeta: beta: true description: | Create threads that assistants can interact with. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13283,7 +13303,7 @@ x-oaiMeta: beta: true description: | Create messages within threads - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13310,7 +13330,7 @@ x-oaiMeta: beta: true description: | Represents an execution run on a thread. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13343,7 +13363,7 @@ x-oaiMeta: beta: true description: | Represents the steps (model and tool calls) taken during the run. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13361,7 +13381,7 @@ x-oaiMeta: beta: true description: | Vector stores are used to store files for use by the `file_search` tool. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13388,7 +13408,7 @@ x-oaiMeta: beta: true description: | Vector store files represent files inside a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13412,7 +13432,7 @@ x-oaiMeta: beta: true description: | Vector store file batches represent operations to add multiple files to a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13436,11 +13456,11 @@ x-oaiMeta: beta: true description: | Stream the result of executing a Run or resuming a Run after submitting tool outputs. - + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. - + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the [Assistants API quickstart](/docs/assistants/overview) to learn more. navigationGroup: assistants From 16a335d92d4fb8c02093c008a0d66d48685b7f62 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 29 May 2024 21:59:16 +0200 Subject: [PATCH 031/251] feat: Support buffered stream responses in ollama_dart (#445) --- packages/ollama_dart/lib/src/client.dart | 53 +++++++++++++----------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/packages/ollama_dart/lib/src/client.dart b/packages/ollama_dart/lib/src/client.dart index 2bb5a7be..c5dded40 100644 --- a/packages/ollama_dart/lib/src/client.dart +++ b/packages/ollama_dart/lib/src/client.dart @@ -1,4 +1,5 @@ // ignore_for_file: use_super_parameters +import 'dart:async'; import 'dart:convert'; import 'package:http/http.dart' as http; @@ -56,11 +57,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => GenerateCompletionResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => GenerateCompletionResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -85,11 +84,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => GenerateChatCompletionResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => GenerateChatCompletionResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -114,11 +111,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => CreateModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => CreateModelResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -143,11 +138,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => PullModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => PullModelResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -172,11 +165,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => PushModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => PushModelResponse.fromJson(json.decode(d))); } @override @@ -184,3 +175,15 @@ class OllamaClient extends g.OllamaClient { return onRequestHandler(request); } } + +class _OllamaStreamTransformer + extends StreamTransformerBase, String> { + const _OllamaStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()); + } +} From 43005ecdf7253d57c5a24f546860b9d8066de879 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 31 May 2024 08:45:57 +0200 Subject: [PATCH 032/251] docs: Add WikivoyageEU example, a fully local RAG with Llama3 and ObjectBox (#446) --- examples/wikivoyage_eu/.gitignore | 3 + examples/wikivoyage_eu/README.md | 89 +++++ examples/wikivoyage_eu/analysis_options.yaml | 1 + examples/wikivoyage_eu/bin/injestion.dart | 21 ++ examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 82 ++++ .../bin/wikivoyage_eu_dataset.csv | 161 ++++++++ examples/wikivoyage_eu/pubspec.lock | 350 ++++++++++++++++++ examples/wikivoyage_eu/pubspec.yaml | 12 + examples/wikivoyage_eu/pubspec_overrides.yaml | 16 + examples/wikivoyage_eu/rag.png | Bin 0 -> 18434 bytes examples/wikivoyage_eu/wikivoyage_eu.gif | Bin 0 -> 171257 bytes 11 files changed, 735 insertions(+) create mode 100644 examples/wikivoyage_eu/.gitignore create mode 100644 examples/wikivoyage_eu/README.md create mode 100644 examples/wikivoyage_eu/analysis_options.yaml create mode 100644 examples/wikivoyage_eu/bin/injestion.dart create mode 100644 examples/wikivoyage_eu/bin/wikivoyage_eu.dart create mode 100644 examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv create mode 100644 examples/wikivoyage_eu/pubspec.lock create mode 100644 examples/wikivoyage_eu/pubspec.yaml create mode 100644 examples/wikivoyage_eu/pubspec_overrides.yaml create mode 100644 examples/wikivoyage_eu/rag.png create mode 100644 examples/wikivoyage_eu/wikivoyage_eu.gif diff --git a/examples/wikivoyage_eu/.gitignore b/examples/wikivoyage_eu/.gitignore new file mode 100644 index 00000000..3a857904 --- /dev/null +++ b/examples/wikivoyage_eu/.gitignore @@ -0,0 +1,3 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md new file mode 100644 index 00000000..ca686dcb --- /dev/null +++ b/examples/wikivoyage_eu/README.md @@ -0,0 +1,89 @@ +# Wikivoyage EU + +This example demonstrates how to build a fully local Retrieval Augmented Generation (RAG) pipeline with Llama 3 and ObjectBox using LangChain.dart and Ollama. + +> This example is adapted from [Ashmi Banerjee](https://ashmibanerjee.com)'s workshop "[Building a RAG using Google Gemma and MongoDB](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk)". + +![RAG Pipeline](rag.png) +*Figure 1: RAG Architecture (source: [Ashmi Banerjee](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk))* + +## Setup + +### 1. Install Ollama + +- Go to the [Ollama](https://ollama.ai/) website and download the latest version of the Ollama app. + +### 2. Download models + +- For this example we will be using the following models: + * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) + * LLM: [`llama3:8b`](https://ollama.com/library/llama3) +- Open your terminal and run: +```bash +ollama pull jina/jina-embeddings-v2-small-en +ollama run llama3:8b +``` + +### 3. Setup ObjectBox + +- We will be using [ObjectBox](https://objectbox.io) for our vector store. +- In order to use ObjectBox, we need to download the ObjectBox C library. You can find more information on how to do this [here](https://docs.objectbox.io/getting-started). +```bash +bash <(curl -s https://raw.githubusercontent.com/objectbox/objectbox-dart/main/install.sh) +``` + +### 4. Get dependencies + +```bash +dart pub get +``` + +## How it works + +The example has two scripts: +1. `injestion.dart`: This script reads the Wikivoyage dataset, creates embeddings from the data and stores it in the ObjectBox database. +2. `wikivoyage_eu.dart`: This script implements the chatbot implementing the RAG pipeline. + +### Ingestion + +We will be using data from [Wikivoyage](https://wikivoyage.org), a freely accessible online travel guide authored by volunteers. + +The `wikivoyage_eu_dataset.csv` file contains data from 160 European cities, including the city name, country, coordinates, population and a brief description: + +| city | country | lat | lng | population | abstract | +|-----------|-------------|---------|--------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Amsterdam | Netherlands | 52.3728 | 4.8936 | 1459402.0 | Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges. | + +The script does the following: +1. It uses LangChain.dart's `CsvLoader` to load the `wikivoyage_eu_dataset.csv` dataset. +2. It uses the `jina/jina-embeddings-v2-small-en` model to create embeddings for each city's data. The generated embeddings have 1024 dimensions. + + *As the data for each city is not very large, we won't be chunking it into smaller parts, but you could easily do that using the `RecursiveCharacterTextSplitter` class.* +3. It stores the embeddings in the ObjectBox vector database. + +You can run the script using: +```bash +$ dart run bin/injestion.dart +Added 160 documents to the vector store. +``` + +### Chatbot + +The chatbot script implements the RAG pipeline. It does the following: +1. Takes a user query as input. +2. Uses the `mxbai-embed-large:335m` model to create an embedding for the query. +3. Retrieves the 5 most similar documents from the ObjectBox database. +4. Builds a prompt using the retrieved documents and the query. +5. Uses the `llama3:8b` model to generate a response to the prompt. + +You can run the script using: +```bash +$ dart run bin/wikivoyage_eu.dart +``` + +![Wikivoyage EU](wikivoyage_eu.gif) + +## Conclusion + +This example demonstrates how to build a simple RAG pipeline that can run locally on your machine. You can easily extend this example to build more complex RAG pipelines with more advance retrieval and generation techniques. Check out the [LangChain.dart](https://langchaindart.dev/) documentation for more information. + +For simplicity, this example is a CLI application. However, you can easily adapt this code to work in a Flutter app. To get started with ObjectBox in Flutter, refer to the [ObjectBox documentation](https://docs.objectbox.io/getting-started). diff --git a/examples/wikivoyage_eu/analysis_options.yaml b/examples/wikivoyage_eu/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/examples/wikivoyage_eu/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/examples/wikivoyage_eu/bin/injestion.dart b/examples/wikivoyage_eu/bin/injestion.dart new file mode 100644 index 00000000..6aa7eaa3 --- /dev/null +++ b/examples/wikivoyage_eu/bin/injestion.dart @@ -0,0 +1,21 @@ +// ignore_for_file: avoid_print +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + final loader = CsvLoader('bin/wikivoyage_eu_dataset.csv'); + final docs = await loader.load(); + + final embeddings = OllamaEmbeddings( + model: 'jina/jina-embeddings-v2-small-en', + ); + final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 512, + ); + + final ids = await vectorStore.addDocuments(documents: docs); + print('Added ${ids.length} documents to the vector store.'); + + embeddings.close(); +} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart new file mode 100644 index 00000000..b1f82689 --- /dev/null +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -0,0 +1,82 @@ +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings( + model: 'jina/jina-embeddings-v2-small-en', + ), + dimensions: 512, + ); + + final retriever = vectorStore.asRetriever( + defaultOptions: VectorStoreRetrieverOptions( + searchType: ObjectBoxSimilaritySearch(k: 5), + ), + ); + final setupAndRetrieval = Runnable.fromMap({ + 'context': retriever.pipe( + Runnable.mapInput( + (docs) => docs.map((d) => d.pageContent).join('\n---\n'), + ), + ), + 'question': Runnable.passthrough(), + }); + + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + ''' +Here is some data from Wikivoyage about travel destinations in Europe: + + +{context} + + +Please read the Wikivoyage data carefully and consider how you can best answer the user's question using only the information provided. + +Use ANSI escape codes instead of Markdown to format your answer. +For example, `\x1B[1m\x1B[0m` will make "" bold. + +If the user's question is not about Europe, just respond with: +"I can only help you with vacation planning in Europe." +Do not provide any other suggestion if the question is not about Europe. +''' + ), + (ChatMessageType.human, '{question}'), + ]); + + final model = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3', + ), + ); + const outputParser = StringOutputParser(); + final chain = setupAndRetrieval // + .pipe(promptTemplate) + .pipe(model) + .pipe(outputParser); + + stdout.writeln( + 'Hello! Ask me anything about your vacation plans in Europe, ' + 'and I will provide you with the best itinerary.', + ); + + while (true) { + stdout.write('> '); + final query = stdin.readLineSync() ?? ''; + + if (query.toLowerCase() == 'q') { + break; + } + + final stream = chain.stream(query); + await stream.forEach(stdout.write); + stdout.write('\n\n'); + } + + chain.close(); +} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv new file mode 100644 index 00000000..0e775870 --- /dev/null +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv @@ -0,0 +1,161 @@ +city,country,lat,lng,population,abstract +Aalborg,Denmark,57.05,9.9167,143598.0,"Aalborg is the largest city in North Jutland, Denmark. Its population, as of 2016, is 134,672, making it the fourth largest city in Denmark." +Adana,Turkey,37.0,35.3213,1765981.0,"Adana is a city on the Cilician Plains of central Turkey, on the Seyhan River about 50 km from the Mediterranean coast. It's industrial and mostly modern but with several places of interest in its historic centre." +Amsterdam,Netherlands,52.3728,4.8936,1459402.0,"Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges." +Ancona,Italy,43.6169,13.5167,100924.0,Ancona is the capital of the Italian region called the Marches and an important port city on the coast of the Adriatic Sea. +Ankara,Turkey,39.93,32.85,5503985.0,"Ankara is the capital of Turkey, central within the country on the plateau of Central Anatolia. It's a sprawling modern place around an ancient citadel, and in 2022 had a population of almost 5." +Antalya,Turkey,36.8874,30.7075,2426356.0,"Antalya is a city in Pamphylia on the Turkish Mediterranean coast, and the chief resort of the ""Turkish Riviera"". It's a metropolis with a population of 2." +Arad,Romania,46.175,21.3125,159074.0,There is more than one place in the world called Arad. You might be looking for: +Arkhangelsk,Russia,64.55,40.5333,351488.0,"Arkhangelsk (population 350,000 in 2018) is a regional center in Northwestern Russia, located on both banks of Northern Dvina river near its mouth on the White Sea, about 1250 km by road to the north of Moscow and about 1100 km northeast of Saint Petersburg. It is part of the Silver Ring of cultural and historical centers of Northwestern Russia." +Astrakhan,Russia,46.35,48.035,532504.0,Astrakhan (Russian: А́страхань AH-struh-khun) is a city in Russia. +Baia Mare,Romania,47.6667,23.5833,123738.0,Baia Mare is a city in north-western Romania. +Baku,Azerbaijan,40.3667,49.8352,2300500.0,Baku (Azeri: Bakı) is the capital of Azerbaijan and is the largest city in the Caucasus. Baku's Old Town has UNESCO World Heritage status. +Barcelona,Spain,41.3825,2.1769,4800000.0,"Barcelona is Spain's second largest city, with a population of nearly two million people, and the capital of Catalonia. A major port on the northeastern Mediterranean coast of Spain, Barcelona has a wide variety of attractions that bring in tourists from around the globe." +Bari,Italy,41.1253,16.8667,323370.0,"Bari (Bari dialect: Bare) is the capital of the Apulia region of Italy, on the Adriatic Sea. With a population of 317,000 (in 2019), it's the second largest city in Southern Italy after Naples." +Batman,Turkey,37.887,41.132,447106.0,"Batman (pronounced as baat-maan, not like the name of the superhero; Kurdish: Iluh) is a city in southeastern Anatolia. It is the capital of an important oil producing province." +Belgrade,Serbia,44.82,20.46,1378682.0,"Belgrade (Serbian: Београд, Beograd) is the capital of the Republic of Serbia and the country's largest city. Belgrade has been re-emerging as a tourist destination in the past years." +Bergen,Norway,60.3894,5.33,267117.0,"Bergen is Norway's second largest city and the most popular gateway to the fjords of West Norway. The city is renowned for its great location amidst mountains, fjords, and the ocean." +Berlin,Germany,52.52,13.405,4473101.0,"Berlin is Germany's capital and biggest city. Within the city limits, Berlin in 2022 had a population of 3." +Bologna,Italy,44.4939,11.3428,392564.0,"Bologna (Emilian: Bulåggna) is a beautiful and historic city in the Emilia-Romagna region of Northeast Italy. It has the oldest university in the Western world, a lively student population, excellent food, a striking brick terracotta-roofed cityscape, and lots to see and do." +Bordeaux,France,44.84,-0.58,260958.0,"Bordeaux is a city in the Gironde region of southwest France, standing on the River Garonne. It's the country's fifth largest city, with a population of 259,809 in 2020, and another million living in its associated towns." +Braga,Portugal,41.5503,-8.42,181494.0,"Braga is one of the five largest cities of Portugal, situated in the Minho region in the North of the country. It is known for its abundance of churches and thus called the ""city of archbishops""." +Bratislava,Slovakia,48.1439,17.1097,475503.0,"Bratislava (Hungarian: Pozsony, German: Pressburg, known as Prešporok before 1919), is the capital and largest city of Slovakia. It has a population of more than 475,000 (2021), and is the administrative, cultural and economic centre of the country." +Bremen,Germany,53.0833,8.8,566573.0,"The Free Hanseatic City of Bremen is a city in northern Germany with a major port on the River Weser. The population is 567,000 (2020)." +Brest,Belarus,52.1347,23.6569,340723.0,There is more than one place called Brest: +Brno,Czechia,49.1925,16.6083,382405.0,"Brno (pronounced Bruhno) (German: Brünn, Štatl in the local dialect) is the major city of Moravia (a historical region in the Czech Republic). It is the largest city in Moravia and the second-largest city in the Czech Republic by population and area." +Brussels,Belgium,50.8467,4.3525,1743000.0,"Brussels (French: Bruxelles, Dutch: Brussel) is the capital of Belgium and one of the three administrative regions within the country, together with Flanders and Wallonia. Apart from its role within its country, it is also an internationally important city, hosting numerous international institutions, and in particular the headquarters of NATO and the core institutions of the European Union." +Budapest,Hungary,47.4925,19.0514,2997958.0,"Budapest is the capital city of Hungary. With a unique, youthful atmosphere, world-class classical music scene, a pulsating nightlife increasingly appreciated among European youth, and last but not least, an exceptional offer of natural thermal baths, Budapest is one of Europe's most delightful and enjoyable cities." +Burgas,Bulgaria,42.503,27.4702,210813.0,Burgas (also Bourgas) is a city on the Black Sea coast of Bulgaria. It is a large industrial centre with many tourist attractions in the region. +Bursa,Turkey,40.1833,29.05,2901396.0,"Bursa is a large city in the Southern Marmara region of Turkey, 20 km inland from the Marmara coast. It's the country's fourth-largest city, with a population of 2,161,990 in 2021, and with another million living in the wider metro area." +Bydgoszcz,Poland,53.1219,18.0003,346739.0,"Bydgoszcz (German: Bromberg) is a major city of 360,000 in Poland and with suburban area the agglomeration has nearly 500,000. It has well preserved 19th-century architecture and was known as Little Berlin before the world wars." +Cagliari,Italy,39.2278,9.1111,154106.0,"Cagliari (Sardinian: Casteddu, ""castle""; Latin: Caralis) is the capital city of the Italian island of Sardinia." +Cheboksary,Russia,56.1333,47.25,489498.0,"Cheboksary (Russian: Чебокса́ры chee-bahk-SAH-ree) is the capital of Chuvashia in the Volga Region of the Russian Federation. About 600,000 people live here and in the nearby satellite city Novocheboksarsk." +Chelyabinsk,Russia,55.15,61.4,1202371.0,"Chelyabinsk (Russian: Челя́бинск cheel-YAH-beensk) is a big city, with more than a million inhabitants, the capital of Chelyabinsk Oblast in the European part of Russia." +Cluj-Napoca,Romania,46.7667,23.5833,324576.0,"Cluj-Napoca (Romanian), Kolozsvár (Hungarian) or Klausenburg (German) is the capital of Cluj county and the unofficial capital of the historical region of Transylvania. The city, with about 320,000 people (2016), is very pleasant, and it is a great experience for those who want to see urban Transylvanian life at its best." +Coimbra,Portugal,40.2111,-8.4292,143396.0,"Coimbra is the traditional capital city of Central Portugal's historic Beira Litoral region. With over 140,000 inhabitants (2021), it is the largest municipality there and one of Portugal's four largest metropolises." +Copenhagen,Denmark,55.6761,12.5683,1366301.0,"Copenhagen (Danish: København) is the capital city of Denmark and forms the moderate conurbation that one million Danes call home. It is big enough to form a small Danish metropolis, with shopping, culture and nightlife par excellence, yet small enough still to feel intimate and be safe." +Cork,Ireland,51.8972,-8.47,222333.0,"Cork is the principal city of County Cork in southwest Ireland. It was already the second-largest city in Ireland when in 2019 its boundaries were extended, to have a population of 210,000." +Craiova,Romania,44.3333,23.8167,269506.0,"Craiova with 306,000 inhabitants (2016), is one of the five largest cities of Romania. Craiova is in the southwestern region of the country and hosts the administrative buildings of the Dolj County and of the Oltenia district." +Debrecen,Hungary,47.53,21.6392,328642.0,[a Nagytemplom télen.jpg|thumb|400px|The Great Church of Debrecen in winter] +Denizli,Turkey,37.7667,29.0833,1027782.0,"Denizli is a city in the Southern Aegean region of Turkey, which most visitors simply transit to reach Pamukkale 20 km north. It's a typical modern Turkish city, far from picturesque, but does have enough sights of its own if your schedule allows." +Dijon,France,47.3167,5.0167,158002.0,"Dijon is the largest city in the eastern French region of Bourgogne-Franche-Comté. Dijon is best known for its mustard (named after the town), which is no longer produced in its metropolitan area, but it is still one of the most beautiful cities in France, and its historic buildings and byways were not heavily damaged by bombing in World War II and are largely intact." +Donetsk,Ukraine,48.0028,37.8053,929063.0,"Donetsk (Ukrainian: Донецьк, Russian: Доне́цк) is a city in the Donetsk People's Republic, on the banks of the River Kalmius." +Dresden,Germany,51.05,13.74,561922.0,"Dresden is the capital of Saxony (Sachsen). It's often referred to locally as Elbflorenz, or ""Florence on the Elbe"", reflecting its location on the Elbe river and its historical role as a centre for the arts and beautiful architecture - much like Florence in Italy." +Dublin,Ireland,53.35,-6.2603,1173179.0,"Dublin (Irish: Baile Átha Cliath, ""Town of the Hurdled Ford"") is the capital city of Ireland. Its vibrancy, nightlife and tourist attractions are world renowned and it's the most popular entry point for international visitors to Ireland." +Erfurt,Germany,50.9833,11.0333,213835.0,Erfurt is the capital of the German state of Thuringia (Thüringen). The city is the largest one in that province and likewise a major transportation hub. +Erzincan,Turkey,39.7464,39.4914,157452.0,"Erzincan is a city in Eastern Anatolia. It's modern, on a grid pattern, as its predecessor was destroyed by an earthquake in 1939." +Erzurum,Turkey,39.9086,41.2769,767848.0,"Erzurum is a city in Eastern Anatolia, and is the hub for visiting eastern Turkey." +Gaziantep,Turkey,37.0628,37.3792,2028563.0,"Gaziantep is a city in Southeastern Anatolia. Although it is a major city in Turkey (counting almost 2 million inhabitants) and known as the Turkish capital of gastronomy, it counts very few international tourists." +Geneva,Switzerland,46.2017,6.1469,201818.0,thumb|200px|right|The old town of Geneva in the winter +Hamburg,Germany,53.55,10.0,2484800.0,"The Free and Hanseatic City of Hamburg (Freie und Hansestadt Hamburg) is Germany's second-largest city and, at the same time, one of Germany's 16 federal states or Bundesländer. Prior to the formation of the modern German state, Hamburg for centuries enjoyed a status as de facto independent city state and regional power and trade hub in the North Sea." +Helsinki,Finland,60.1708,24.9375,1268296.0,Helsinki (Helsingfors in Swedish) is Finland's capital and largest city. Helsinki combines modern and historic architectural styles with beautiful open spaces. +Innsbruck,Austria,47.2683,11.3933,132493.0,"Innsbruck is the fifth-largest city in Austria and the provincial capital of Tyrol, as well as one of the largest cities in the Alps. It is in a valley of the river Inn between mountain ranges of above 2000 m above sea level, halfway between Bavaria and northern Italy, and is a hub of a region popular for skiing and other mountain-related activities and a busy tourist destination." +Ioannina,Greece,39.6636,20.8522,113094.0,"Ioannina (Ιωάννινα) (population: 112,486 (2011)) is a beautiful city in Northern Greece whose old town is surrounded by tall defensive walls." +Isparta,Turkey,37.7647,30.5567,258375.0,"Isparta (Greek: Σπάρτη, Baris) is a city of 220,000 inhabitants in the Lakes District of Mediterranean Turkey." +Istanbul,Turkey,41.0136,28.955,16079000.0,"Istanbul (Turkish: İstanbul) is a very large city of fantastic history, culture and beauty. Called Byzantium in ancient times, the city's name was changed to Constantinople in 324 CE when it was rebuilt by the first Christian Roman Emperor, Constantine." +Ivano-Frankivsk,Ukraine,48.9228,24.7106,238196.0,"Ivano-Frankivsk (Ukrainian: Івано-Франківськ, also transliterated Ivano-Frankovsk from Russian: Ивано-Франковск) (formerly in Polish: Stanisławów, German: Stanislau) is a city in the Ukrainian part of East Galicia." +Izmir,Turkey,38.42,27.14,4320519.0,"thumb|270px|Clock tower in Konak Square, iconic symbol of the city" +Kahramanmaras,Turkey,37.5833,36.9333,443575.0,"Kahramanmaraş, which used to be known as Maraş, is a city in Turkey, located on the crossroad of southern, eastern and southeastern Turkey." +Kaliningrad,Russia,54.7003,20.4531,475056.0,"Kaliningrad (Russian: Калинингра́д kuh-leen-een-GRAHD) , also known by its original German name, Königsberg, is the capital city of Kaliningrad Oblast in Russia. It has about 475,000 inhabitants (2018)." +Kars,Turkey,40.6078,43.0958,115891.0,"Kars is a city in Eastern Anatolia. It is most frequently visited as a jumping off point for travelers going to Ani, but it is a viable destination in its own right for its 19th-century Russian imperial buildings, and, of course, its role as the setting for Orhan Pamuk's famous novel Snow." +Kaunas,Lithuania,54.8972,23.8861,381007.0,"Kaunas is the second-largest city in Lithuania, with a population of some 288,000 people. The main reason to visit is its charming Old Town, connected to the 19th century New Town ranged along Laisvės alėja." +Kayseri,Turkey,38.7225,35.4875,1389680.0,"Kayseri is a city in Central Anatolia, 350 km southeast of Ankara. In 2021 the population was 1." +Kazan,Russia,55.7964,49.1089,1243500.0,Kazan (Russian: Каза́нь kuh-ZAHN) is the capital of Russia's republic of Tatarstan and the center of the world Tatar culture. +Kharkiv,Ukraine,49.9925,36.2311,1446107.0,"Kharkiv (Ukrainian: Харків, also transliterated Kharkov from Russian: Харьков) is a major city in the Kharkiv region of Ukraine and is the second largest city in Ukraine with a population of over 1.5 million inhabitants." +Kiel,Germany,54.3233,10.1394,246601.0,"Kiel is the capital city of the German state of Schleswig-Holstein and has a population of roughly 248,000 (2018). It is located on the Baltic Sea at the end of the ""Kieler Förde""." +Kirov,Russia,58.6,49.65,501468.0,"Kirov (Russian: Ки́ров KEE-ruhf) is the capital city of Kirov Oblast, Russia." +Klagenfurt,Austria,46.6167,14.3,101403.0,Klagenfurt (Slovenian: Celovec) is the capital of Carinthia in Austria. It was one of the eight host cities in the 2008 European Football Championships. +Konya,Turkey,37.8667,32.4833,2232374.0,"Konya is a city in Central Anatolia in Turkey, known as the city of ""whirling dervishes"" and for its outstanding Seljuk architecture. In 2021 Konya metropolis had a population of 2,277,017, the sixth largest in Turkey, but the area of most interest is compact." +Krasnodar,Russia,45.0333,38.9667,948827.0,"Krasnodar is the capital of Krasnodar Krai in southern Russia, with a popolulation in 2018 of just under 900,000. Its main industries are based on agriculture and food." +Kutaisi,Georgia,42.25,42.7,147900.0,"Kutaisi is a city in the Rioni Region of Georgia. The city itself is very cinematographic and charming, and a visit to Kutaisi is almost mandatory to see the Bagrati Cathedral and Gelati Monastery, which are UNESCO World Heritage sites and offer views from the mountain slopes over the city and the Rioni River." +Lille,France,50.6278,3.0583,234475.0,"Lille (Dutch: Rijsel) is the capital of the Hauts-de-France region in northern France and the core of one of the largest metropolitan agglomerations in the country. Historically, it has also been the capital of Flanders, and later an industrial powerhouse, thanks to which it now boasts a large and handsome historic centre." +Ljubljana,Slovenia,46.0514,14.5061,286745.0,"Ljubljana (""lee-oo-blee-AH-nuh"") is the small but delightful capital of Slovenia. While the city's population had grown to 295,500 in 2020, the sights and amenities are concentrated in the charming old centre." +London,United Kingdom,51.5072,-0.1275,11262000.0,"Noisy, vibrant and truly multicultural, London is a megalopolis of people, ideas and frenetic energy. The capital and largest city of the United Kingdom sits on the River Thames in South-East England, Greater London has a population of a little over 9 million." +Luxembourg,Luxembourg,49.6117,6.1319,132780.0,"The Grand Duchy of Luxembourg (Luxembourgish: Groussherzogtum Lëtzebuerg, French: Grand-Duché de Luxembourg, German: Großherzogtum Luxemburg), is a landlocked Benelux country at the crossroads of Germanic and Latin cultures." +Lviv,Ukraine,49.8425,24.0322,724314.0,"Lviv (also spelled L'viv; Ukrainian: Львів; Polish: Lwów, German: Lemberg, Russian: Львов), formerly known as Lvov after its Russian name, is in Western Ukraine and used to be the capital of East Galicia. It's the biggest city of the region and a major Ukrainian cultural centre on the UNESCO World Heritage List." +Lyon,France,45.76,4.84,522969.0,"Lyon is the capital of the French administrative region of Auvergne-Rhône-Alpes. A city of half a million, Lyon alone is the country's third-largest city, but its metropolitan area is only second in population to Paris." +Maastricht,Netherlands,50.85,5.6833,277721.0,"By many considered to be the most beautiful city of the country, Maastricht is the southernmost city in the Netherlands. It's the capital of the province of Limburg and famous for what the Dutch call the ""Burgundian"" way of life." +Madrid,Spain,40.4169,-3.7033,6211000.0,"Madrid is Spain's capital and largest city. A city that has been marked by Spain's varied and tumultuous history, Madrid has some of Europe's most impressive cultural and architectural heritage, which includes grand avenues, plazas, buildings and monuments, world-class art galleries and museums, highly popular football teams, and cultural events of international fame for everyone." +Magdeburg,Germany,52.1317,11.6392,236188.0,"Magdeburg is the capital city of the Bundesland of Saxony-Anhalt, Germany, with a population of 240,000 (2018). Magdeburg has become a modern city with numerous interesting sights of high importance and uniqueness, as well as many parks, which make Magdeburg the third greenest city in Germany." +Malatya,Turkey,38.3486,38.3194,426381.0,thumb|350px|New Mosque at the central square +Milan,Italy,45.4669,9.19,1366180.0,"Milan (Italian: Milano; Milanese: Milan) is financially the most important city in Italy, and home to the Borsa Italiana stock exchange. It is the second most populous city proper in the country, but sits at the centre of Italy's largest urban and metropolitan area." +Minsk,Belarus,53.9,27.5667,2009786.0,"Minsk (Belarusian: Мінск, Russian: Минск) is the capital and largest city of the Republic of Belarus. Its population is about two million people in 2021." +Miskolc,Hungary,48.0833,20.6667,150695.0,"Miskolc, with population of about 157,000 (2017), is the third largest city in Hungary, located in the north-east of the country, east of Bükk mountains." +Moscow,Russia,55.7558,37.6178,17332000.0,"Since its founding in 1147, Moscow (Russian: Москва, Moskva) has been at the crossroads of history as the capital of empires and a frequent target for invaders. As the capital of the Russian Empire, the Soviet Union, and, today, the Russian Federation, it has played a central role in the development of the largest country in the world." +Munich,Germany,48.1375,11.575,2606021.0,"Munich (German: München, Bavarian: Minga) is the capital of the federal state of Bavaria in the south of Germany. Within the city limits, Munich in 2021 had a population of just under 1." +Murcia,Spain,37.9861,-1.1303,672773.0,You could be looking for: +Murmansk,Russia,68.9706,33.075,298096.0,"Murmansk (Russian: Му́рманск) is a city in the extreme northwest of Russia and the world's largest city north of the Arctic Circle. It lies in the Kola Bay on the Kola Peninsula, by the Barents Sea." +Mykolaiv,Ukraine,46.975,31.995,498748.0,"Mykolaiv (Ukrainian: Миколаїв, also transliterated Nikolaev or Nikolayev from Russian: Николаев) is a city in Southern Ukraine. It is an important shipbuilding centre and transportation hub for Ukraine, and has a large military presence." +Nalchik,Russia,43.4833,43.6167,265162.0,"Nalchik is the capital city of Kabardino-Balkaria, a republic located in the very south of the Russian Federation." +Nantes,France,47.2181,-1.5528,318808.0,"Nantes (Breton: Naoned) is the capital of Pays de la Loire region in northwest France. Historically it was part of Brittany, whose dukes built up its castle and made the town their capital." +Naples,Italy,40.8333,14.25,966144.0,"Naples (Italian: Napoli; Neapolitan: Napule) in Italy, an ancient port on the Mediterranean sea. With just short of a million citizens, is the third most populous municipality." +Nevsehir,Turkey,38.6264,34.7139,153117.0,"Nevşehir is one of the major cities in Cappadoccia Region, which displays a beautiful combination of nature and history. The traditional main sources of income of the city, carpet weaving and viticulture, have been overtaken by tourism, because of its proximity to the underground shelters, the fairy chimneys, monasteries, caravanserais and the famous rock-hewn churches of Göreme." +Nicosia,Cyprus,35.1725,33.365,330000.0,Nicosia (Greek: Λευκωσία; Turkish: Lefkoşa) is the capital of Cyprus and is the largest city by far. +Novi Sad,Serbia,45.2542,19.8425,380000.0,thumb|right|350px|Freedom square (Trg Slobode) +Oradea,Romania,47.0722,21.9211,196367.0,"Oradea is one the few undiscovered gems of Romania's tourism. Despite being one of the largest and most important cities in Transylvania, and having a high degree of administrative, economic and commercial importance, it is often overlooked by tourists in favor of other Transylvanian cities such as Brasov, Sibiu, Sighisoara or Cluj-Napoca." +Orenburg,Russia,51.7667,55.1,564773.0,"Orenburg (Russian: Оренб'ург, Uh-rehn-BOORK) is the capital of Orenburg Oblast. Every citizen will point you the sign at the bridge across the Ural river, supposedly landmarking the geographical border between Europe and Asia (the actual boundary is further east)." +Pamplona,Spain,42.8167,-1.65,203418.0,"Pamplona (Basque: Iruña) is a city in Navarra, Spain. It is most famous world-wide for its San Fermín festival, held each year from July 6-14." +Paris,France,48.8567,2.3522,11060000.0,thumb|300px|The Eiffel Tower and the river Seine +Penza,Russia,53.2,45.0,523726.0,There's more than one place called Penza: +Perm,Russia,58.0139,56.2489,1048005.0,"Perm (Russian: Пермь p`yehr`m`) is a city in Perm Krai, Russia." +Perugia,Italy,43.1122,12.3889,165683.0,"Perugia is a city in the Italian region of Umbria. It has an important university that attracts many foreign students, is a major center of medieval art, has a stunningly beautiful central area and is home of the Umbria Jazz Festival." +Petrozavodsk,Russia,61.7833,34.35,278551.0,thumb|350 px|Old and New Petrozavodsk +Plovdiv,Bulgaria,42.15,24.75,383540.0,thumb|Old Plovdiv +Podgorica,Montenegro,42.4413,19.2629,150977.0,"Podgorica (Montenegrin: Подгорица) is the capital of Montenegro. While not a typical European eye candy, the city is definitely worth visiting, owing to its interesting mix of old and new, its café culture and nightlife, and its laid back Mediterranean atmosphere." +Porto,Portugal,41.1621,-8.622,1278210.0,"Porto is Portugal's second largest city and the capital of the Northern region, and a busy industrial and commercial centre. The city isn't very populous (about 238,000 inhabitants in 2024), but the Porto metropolitan area has some 1." +Prague,Czechia,50.0875,14.4214,1335084.0,"Prague (Czech: Praha) is the capital and largest city of the Czech Republic. The city's historic buildings and narrow, winding streets are testament to its centuries-old role as capital of the historic region of Bohemia." +Pristina,Kosovo,42.6633,21.1622,161751.0,"Pristina (Albanian: Prishtinë, Serbian: Priština), the capital city of Kosovo, is not beautiful: it is messy, with centuries-old Ottoman heritage competing with communist designs and post-communist architectural monstrosities. However, there is a powerful draw to this city of 162,000 people (2011), offering much to passing visitors." +Pskov,Russia,57.8167,28.3333,209840.0,"Pskov is the largest city and administrative capital of Pskov Oblast. One of the oldest cities in the country, it has preserved many unique architectural monuments of the 12th-16th centuries." +Rennes,France,48.1147,-1.6794,220488.0,"Rennes is the chief city of Brittany in northwest France. It's mostly modern and industrial, but has many grand 18th and 19th century buildings, and survivors of earlier times." +Riga,Latvia,56.9489,24.1064,920643.0,"Riga is the financial, creative, and cultural centre of Latvia. It is the capital and the largest city in Latvia, it is also the largest city in the Baltic States." +Rijeka,Croatia,45.3272,14.4411,191293.0,"Rijeka (literally ""River"" in Croatian language) is a city in Kvarner Bay, a northern inlet of the Adriatic Sea in Croatia. It is the principal seaport of the country." +Rivne,Ukraine,50.6192,26.2519,246574.0,"Rivne (Ukrainian: Рівне, also transliterated Rovno from Russian: Ровно) (Polish: Równe) is a city in Western Ukraine." +Rome,Italy,41.8931,12.4828,2872800.0,"Rome (Italian and Latin: Roma), the 'Eternal City', is the capital and largest city of Italy and of the Lazio region. It's the famed city of the Roman Empire, the Seven Hills, La Dolce Vita, the Vatican City and Three Coins in the Fountain." +Rouen,France,49.4428,1.0886,112321.0,"Rouen is the capital of the French region of Upper Normandy on the River Seine, 135 km (approximately 90 minutes drive) northwest from the centre of Paris. The city has a population of 110,000 and its metropolitan area includes some 666,000 inhabitants (2017)." +Saint Petersburg,Russia,59.95,30.3167,5384342.0,"Saint Petersburg (Russian: Са́нкт-Петербу́рг Sankt-Peterburg), known as Petrograd (Петроград) in 1914-1924 and Leningrad (Ленинград) in 1924-1991, is the second largest city of Russia, with 5.6 million inhabitants (2021), and the former capital of the Russian Empire." +Salzburg,Austria,47.8,13.045,155021.0,"Salzburg is a city in Austria, near the border with Germany's Bavaria state, with a population of 157,000 (2020). It was the setting for the 1965 movie The Sound of Music, so you may think you know all there is to see in Salzburg if you have seen the movie." +Samara,Russia,53.2028,50.1408,1169719.0,thumb|300px|Iversky Convent +Samsun,Turkey,41.2903,36.3336,1335716.0,"Samsun, in the Central Karadeniz region of Turkey, is the largest city on the Turkish Black Sea coast." +Santander,Spain,43.4628,-3.805,172221.0,"Santander is the capital and largest city of the province of Cantabria in Spain. It's on the north coast, with many beaches, ferries from Britain, and a small historic centre." +Sarajevo,Bosnia and Herzegovina,43.8564,18.4131,419957.0,"Sarajevo is the capital of Bosnia and Herzegovina, and its largest city, with 420,000 citizens in its urban area (2013). Sarajevo metropolitan area that has a population of 555,000 also includes some neighbourhoods of ""East Sarajevo"" that are a part of Republika Srpska." +Saratov,Russia,51.5333,46.0167,845300.0,Saratov (Russian: Сара́тов suh-RAH-tuhf) is a city in the Volga region of Russia. +Satu Mare,Romania,47.79,22.89,102411.0,"Satu Mare is a city in the Maramureș region of Romania. As of 2021, it had a population of 91,520." +Sibiu,Romania,45.7928,24.1519,147245.0,"Sibiu is a town in southern Transylvania, Romania, 280 km by road from Bucharest. The old town centre is very attractive." +Siirt,Turkey,37.925,41.9458,166332.0,Siirt is a city in Southeastern Anatolia. +Simferopol,Ukraine,44.9484,34.1,341799.0,"Simferopol (Russian: Симферополь, Ukrainian: Сімферополь) is the capital city of the Crimea." +Sivas,Turkey,39.75,37.0167,377561.0,"Sivas is a city in Central Anatolia, with a population in 2020 of 335,570. By road it's 450 km east of Ankara, and stands at 1278 m elevation." +Skopje,Macedonia,41.9961,21.4317,640000.0,"Skopje (Macedonian: Скопје, Albanian: Shkup, Turkish: Üsküp) is the capital and largest city of the Republic of North Macedonia. Skopje is city of many cultures and many centuries." +Sofia,Bulgaria,42.7,23.33,1547779.0,Sofia (София) is the capital of Bulgaria. It is also the biggest city in the country with about 2 million citizens (including suburbs). +Stavanger,Norway,58.97,5.7314,237369.0,"Stavanger is Norway's fourth largest city, at 145,000 citizens (2021). It is the largest city in, and the administrative centre of, Rogaland county in West Norway." +Stavropol,Russia,45.05,41.9833,450680.0,Stravropol (Ставрополь) is a city in Russia. +Stockholm,Sweden,59.3294,18.0686,1611776.0,"Stockholm is Sweden's capital and largest city, with nearly a million inhabitants in the city, and 2.4 million within Stockholm County (as of 2021)." +Strasbourg,France,48.5833,7.7458,290576.0,"thumb|300px|Strasbourg railway station, known for the sky dome" +Stuttgart,Germany,48.7775,9.18,2787724.0,"Stuttgart is the capital of the Bundesland of Baden-Württemberg in Germany. With a population of approximately 632,000 in the immediate city (2017) and more than 5." +Syktyvkar,Russia,61.6667,50.8167,245313.0,thumb|300px|Street scene in Syktyvkar. +Szczecin,Poland,53.4325,14.5481,403833.0,"Szczecin, (pronounced Shchetsin, German: Stettin, Latin: Stetinum) is a maritime port city and the capital of Zachodniopomorskie in Poland. The city has a population of over 400,000, with almost 780,000 living in its metro area (2019)." +Tallinn,Estonia,59.4372,24.7453,438341.0,"Tallinn is Estonia's capital and largest city. Tallinn is an important port of the Baltic Sea, with the busy passenger section of the port reaching the foothill of the picturesque medieval Old Town, which has been astonishingly well preserved and was inscribed on the UNESCO World Heritage List in 1997." +Tampere,Finland,61.4981,23.76,334112.0,thumb|350px|View to Näsinneula tower in Tampere +Tbilisi,Georgia,41.7225,44.7925,1118035.0,"Tbilisi (Georgian: , Russian: ), is the capital city of the country of Georgia, lying on the banks of the Mtkvari River. The metropolitan area covers 726 km² (280 mi²) and has a population of approximately 1." +Thessaloniki,Greece,40.6403,22.9347,824676.0,"Thessaloniki (Greek: Θεσσαλονίκη, Albanian, Turkish: Selanik, Serbian, Bulgarian, Macedonian: Солун, Solun, Judaeo-Spanish: סאלוניקו / Saloniko, Romanian: Salonic, Aromanian: Sãrunã, French: Salonique) is the capital of the administrative region of Central Macedonia and the whole historical region of Macedonia, Greece, and is, at about one million inhabitants (2011), the second largest city in the country. More importantly, it is a city with a continuous 3,000-year history, preserving relics of its Roman, Byzantine and Ottoman past and of its formerly dominant Jewish population." +Tirana,Albania,41.3289,19.8178,418495.0,"Tirana (Albanian: Tiranë) is the bustling and relatively modernised capital of Albania. It is the most important economic, financial, political and trade centre in the country." +Toulouse,France,43.6045,1.444,493465.0,"Toulouse is the chief city of Haute-Garonne in the Occitanie region of France. It stands north of the Pyrenees on the River Garonne, halfway between the Atlantic and the Mediterranean." +Trabzon,Turkey,41.005,39.7225,426882.0,"Trabzon (formerly Trebizond) is the largest city in the Eastern Karadeniz region of Turkey. Trabzon functioned as an independent state or empire during several periods in its long history, ruling over a vast area from Sinop in the west to Georgia in the east, even including territory in Crimea." +Turku,Finland,60.45,22.2667,252468.0,"Turku (Swedish: Åbo) is Finland's oldest city and the biggest one until the mid 1800s. Believed to have been founded in the early 13th century, it is the cradle of modern Finnish culture and has extensively influenced Finnish history." +Ufa,Russia,54.7261,55.9475,1115560.0,"Ufa (Russian: Уфа́ oo-FAH, Bashkirː ӨФӨ oe-FOE), the capital of Bashkortostan, is a large, interesting, and rapidly developing city, with a population of over 1.1 million in 2018." +Uzhhorod,Ukraine,48.6239,22.295,114897.0,"Uzhhorod (Ukrainian: Ужгород, also transliterated Uzhgorod from Russian: Ужгород; Hungarian: Ungvár, German: Uschhorod) is a city in Western Ukraine, the administrative center of Zakarpatska Oblast (Transcarpthian Region). The population of Uzhhorod is multiethnic." +Valencia,Spain,39.47,-0.3764,792492.0,"Valencia (València in Catalan/Valencian) is a charming old city and the capital of the Valencian Community. With just over 830,000 inhabitants in 2023, it is Spain’s third-largest city and, after Barcelona, the most significant cultural centre along the Spanish Mediterranean coast." +Valladolid,Spain,41.6528,-4.7236,297775.0,You may be looking for: +Van,Turkey,38.4942,43.38,353419.0,"Van (pronounced vahn in Turkish, wahn in Kurdish) is a city in Eastern Anatolia, Turkey. For Turks from the other regions of Turkey, it has a surprising beach resort feel in an area where their country is farthest from the sea." +Varna,Bulgaria,43.2167,27.9167,348668.0,"Varna (Варна) is a large city on the Black Sea coast in the northeast of Bulgaria. It's the larger of the country's two major sea ports (the other one is Burgas), and a gateway to the seaside resorts on the northern part of the coast." +Vienna,Austria,48.2083,16.3725,1973403.0,"Vienna (German: Wien; Austro-Bavarian: Wean) is the capital of Austria and by far its most populous city, with an urban population of 2 million and a metropolitan population of 2.9 million (2023)." +Vilnius,Lithuania,54.6872,25.28,708203.0,"Vilnius is the capital and largest city of Lithuania. It has a beautiful baroque Old Town, listed as a , and excellent tourist facilities in all price ranges." +Vinnytsia,Ukraine,49.2333,28.4833,371855.0,"Vinnytsia (Ukrainian: Вінниця, also transliterated Vinnitsa from Russian: Винница) is a city in Central Ukraine, the administrative center of the Vinnytsia region. 267 km southwest of Kyiv, it has been known since the Middle Ages, and is home to a former Soviet Cold War airbase." +Vitoria-Gasteiz,Spain,42.85,-2.6833,253672.0,"Vitoria-Gasteiz (Spanish: Vitoria, Basque: Gasteiz) is in the heart of the Basque Country in Spain. The old town has some of the best preserved medieval streets and plazas in the region and it is one of very few cities with two cathedrals." +Vladikavkaz,Russia,43.04,44.6775,306978.0,Vladikavkaz is the capital city of North Ossetia and a major transit hub for the North Caucasus region. Its position on the Georgian Military Highway makes it a staging post for journeys to both Georgia and South Ossetia. +Volgograd,Russia,48.7086,44.5147,1015586.0,"Volgograd (Russian: Волгогра́д vuhl-gah-GRAHD) is a large city along the west bank of the Volga River in Southern Russia. It used to be known as Stalingrad, a name which the city is still known as on several war-related dates each year (according to local legislation)." +Voronezh,Russia,51.6717,39.2106,1050602.0,[of the Annunciation] +Warsaw,Poland,52.23,21.0111,1860281.0,Warsaw (Polish: Warszawa) is Poland's capital and largest city. Warsaw is a bustling metropolis and one of the European Union's fastest-developing capitals and the Union's ninth most populous urban centre. +Zagreb,Croatia,45.8167,15.9833,809268.0,thumb|350px|right|Ban Jelačić Square +Zaporizhzhia,Ukraine,47.85,35.1175,741717.0,"Zaporizhzhia (Ukrainian: Запоріжжя, also transliterated Zaporozhye from Russian: Запорожье) is a city in Ukraine." +Zaragoza,Spain,41.65,-0.8833,675301.0,"Zaragoza is the capital and largest city of Aragon in Spain, and one of Spain's five largest cities, but it is one of the least known outside of Spain. Founded on the river Ebro during the Roman Empire as Cesaraugusta, Zaragoza now holds a large cultural and architectural heritage attesting to 2,000 years of affluence and importance." +Zurich,Switzerland,47.3744,8.5411,436332.0,"Zurich (German: Zürich, Swiss German: Züri) is the largest city in Switzerland, with a population of some 435,000 (2018) in the city, and 1.3 million (2009) in the metro area." diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock new file mode 100644 index 00000000..f132728e --- /dev/null +++ b/examples/wikivoyage_eu/pubspec.lock @@ -0,0 +1,350 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + beautiful_soup_dart: + dependency: transitive + description: + name: beautiful_soup_dart + sha256: "57e23946c85776dd9515a4e9a14263fff37dbedbd559bc4412bf565886e12b10" + url: "https://pub.dev" + source: hosted + version: "0.3.0" + characters: + dependency: transitive + description: + name: characters + sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" + url: "https://pub.dev" + source: hosted + version: "1.3.0" + collection: + dependency: transitive + description: + name: collection + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + url: "https://pub.dev" + source: hosted + version: "1.18.0" + cross_file: + dependency: transitive + description: + name: cross_file + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + url: "https://pub.dev" + source: hosted + version: "0.3.4+1" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" + csslib: + dependency: transitive + description: + name: csslib + sha256: "706b5707578e0c1b4b7550f64078f0a0f19dec3f50a178ffae7006b0a9ca58fb" + url: "https://pub.dev" + source: hosted + version: "1.0.0" + csv: + dependency: transitive + description: + name: csv + sha256: c6aa2679b2a18cb57652920f674488d89712efaf4d3fdf2e537215b35fc19d6c + url: "https://pub.dev" + source: hosted + version: "6.0.0" + fetch_api: + dependency: transitive + description: + name: fetch_api + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + url: "https://pub.dev" + source: hosted + version: "2.2.0" + fetch_client: + dependency: transitive + description: + name: fetch_client + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + url: "https://pub.dev" + source: hosted + version: "1.1.2" + ffi: + dependency: transitive + description: + name: ffi + sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + url: "https://pub.dev" + source: hosted + version: "2.1.2" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" + freezed_annotation: + dependency: transitive + description: + name: freezed_annotation + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + url: "https://pub.dev" + source: hosted + version: "2.4.1" + html: + dependency: transitive + description: + name: html + sha256: "3a7812d5bcd2894edf53dfaf8cd640876cf6cef50a8f238745c8b8120ea74d3a" + url: "https://pub.dev" + source: hosted + version: "0.15.4" + http: + dependency: transitive + description: + name: http + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + iregexp: + dependency: transitive + description: + name: iregexp + sha256: "143859dcaeecf6f683102786762d70a47ef8441a0d2287a158172d32d38799cf" + url: "https://pub.dev" + source: hosted + version: "0.1.2" + json_annotation: + dependency: transitive + description: + name: json_annotation + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + url: "https://pub.dev" + source: hosted + version: "4.9.0" + json_path: + dependency: transitive + description: + name: json_path + sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + url: "https://pub.dev" + source: hosted + version: "0.7.1" + langchain: + dependency: "direct main" + description: + path: "../../packages/langchain" + relative: true + source: path + version: "0.7.1" + langchain_community: + dependency: "direct main" + description: + path: "../../packages/langchain_community" + relative: true + source: path + version: "0.2.0+1" + langchain_core: + dependency: "direct overridden" + description: + path: "../../packages/langchain_core" + relative: true + source: path + version: "0.3.1" + langchain_ollama: + dependency: "direct main" + description: + path: "../../packages/langchain_ollama" + relative: true + source: path + version: "0.2.1+1" + langchain_openai: + dependency: "direct overridden" + description: + path: "../../packages/langchain_openai" + relative: true + source: path + version: "0.6.1+1" + langchain_tiktoken: + dependency: transitive + description: + name: langchain_tiktoken + sha256: c1804f4b3e56574ca67e562305d9f11e3eabe3c8aa87fea8635992f7efc66674 + url: "https://pub.dev" + source: hosted + version: "1.0.1" + math_expressions: + dependency: transitive + description: + name: math_expressions + sha256: db0b72d867491c4e53a1c773e2708d5d6e94bbe06be07080fc9f896766b9cd3d + url: "https://pub.dev" + source: hosted + version: "2.5.0" + maybe_just_nothing: + dependency: transitive + description: + name: maybe_just_nothing + sha256: "0c06326e26d08f6ed43247404376366dc4d756cef23a4f1db765f546224c35e0" + url: "https://pub.dev" + source: hosted + version: "0.5.3" + meta: + dependency: transitive + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" + ollama_dart: + dependency: "direct overridden" + description: + path: "../../packages/ollama_dart" + relative: true + source: path + version: "0.1.0+1" + openai_dart: + dependency: "direct overridden" + description: + path: "../../packages/openai_dart" + relative: true + source: path + version: "0.3.2+1" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + petitparser: + dependency: transitive + description: + name: petitparser + sha256: c15605cd28af66339f8eb6fbe0e541bfe2d1b72d5825efc6598f3e0a31b9ad27 + url: "https://pub.dev" + source: hosted + version: "6.0.2" + rfc_6901: + dependency: transitive + description: + name: rfc_6901 + sha256: df1bbfa3d023009598f19636d6114c6ac1e0b7bb7bf6a260f0e6e6ce91416820 + url: "https://pub.dev" + source: hosted + version: "0.2.0" + rxdart: + dependency: transitive + description: + name: rxdart + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + url: "https://pub.dev" + source: hosted + version: "0.27.7" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + sprintf: + dependency: transitive + description: + name: sprintf + sha256: "1fc9ffe69d4df602376b52949af107d8f5703b77cda567c4d7d86a0693120f23" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" + uuid: + dependency: transitive + description: + name: uuid + sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + url: "https://pub.dev" + source: hosted + version: "4.4.0" + vector_math: + dependency: transitive + description: + name: vector_math + sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + web: + dependency: transitive + description: + name: web + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + url: "https://pub.dev" + source: hosted + version: "0.5.1" +sdks: + dart: ">=3.3.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml new file mode 100644 index 00000000..7b4ce9a2 --- /dev/null +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -0,0 +1,12 @@ +name: wikivoyage_eu +description: Wikivoyage EU chatbot using llama3 and ObjectBox. +version: 1.0.0 +publish_to: none + +environment: + sdk: ">=3.0.0 <4.0.0" + +dependencies: + langchain: ^0.7.1 + langchain_ollama: ^0.2.1+1 + langchain_community: 0.2.0+1 diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml new file mode 100644 index 00000000..8b4fec3e --- /dev/null +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -0,0 +1,16 @@ +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,langchain_ollama,ollama_dart +dependency_overrides: + langchain: + path: ../../packages/langchain + langchain_community: + path: ../../packages/langchain_community + langchain_core: + path: ../../packages/langchain_core + langchain_ollama: + path: ../../packages/langchain_ollama + langchain_openai: + path: ../../packages/langchain_openai + ollama_dart: + path: ../../packages/ollama_dart + openai_dart: + path: ../../packages/openai_dart diff --git a/examples/wikivoyage_eu/rag.png b/examples/wikivoyage_eu/rag.png new file mode 100644 index 0000000000000000000000000000000000000000..ca46092d1d31d894f03a4eeb8070787bd5c66e28 GIT binary patch literal 18434 zcmbTd1yozX*Dsm`cXulUDK5pKIKiRN;ufq}af(wyixk)5THLj`1SwjixD*Hl3KT6~ z^o9Pv`+ax4_10VK-khxD%szYe{ASP2%sIb3iFl!@gojOy4FCY}RFoBT000y?005%J zL_tdMKZvLx1%MaouM{7UulxJofB$~$T7$K&9$uZsey5M&$3r1z^ey@$i$ z!NI|yp`mqkb!TU1@7}#b8X+bjiI0z8US7`2%R4wYI6XaWZEcm6leM+A&B@6b8ymBC za44^+xc>Y1$&)91e0*zbYi4F<{{H^k+uP30&eYV@M@L7Fj*ga=mh0>5(lRntRaG-H zGs8oJ$;ru>n3$51k|QG{Sy@?JTwEX!NLN>nfPkQ(pB&h-ana1#NmfAy;&A!v@xj3H!R*dhTw%e@{e5A{ z=cCJmiLFsr#H{1mEMWeJ%=#8=?-2fb+o&U!v(%w{X2R=R<>2)tCN;4dh#R_6Eh6}aO0vX=i=Jv?(VU*{T3X&-P?P-vojVEQ6nangOA_$=FNOw z-fB(F;p%F?i%aXvm!ByqPZk$%&(1D)celsJ7K@8-4i3f<{mp~5exY8u)frur%?Y*D z#Un#+W1)G|$-*nnS6;W1)WTHw z5O?dlK9SyH=C6S`X)b7zgE4_O`)UP_gnaM*IM5JEVwQds>09{tqzA+SCB$QrRg5s$ z#Z*wnbT}LSHF_jdsdd@h`f}nt(d?J>IW0PRcu025Os%UjoVa$M286!y@oIw&jSD!+ z49$wfd7(5WNA)aUNGkv1FqVj9uH5KG_P}=Fy8i3e+i3)-WYR{zdp~1d1+%!6Y7tf2 ztFP96^hsT}@nmUgV+E?g>s^3p`b^_~Os_?_P{A~LthW1M^F6SLMSu}|-Ehi64z0h9 znff5>b+09s4wlDKEqK3{tD`;8-hx_z~t`7uipHf z%G>PhdleiESIlGXu=@lqR6whWL3rS~l2yU?xuG{B3~g(Nlvkd&62KG{I3!_-lw+mx z=hxuL={98^liF|7X%4`xC`?rCI`T#HGmY6qGOFLUhM`y)Dy3r3A7>o44R}o%}Kpw ze&J>Ft8%rQ0cY+JHHHZpg!(7TN=^-Fz-PH;Rv%-#=Qk3t?U3BedweLIWA%;)wie|D z$daqM`H@N3cS7s+h=Z%d$2#0 zW^ypmf6kMOD*XIw$oFfGIrk(I)rZ^t!!?ww2o370uLeFFjEGk+RCrHSP4uMAJ{Fmy zyXq0iC3C}Oxk3!S$aYUQhm_`TQTRD$kdV<@QdJRsZ}t7mNnq$!&#mCal%_PwaUTzR zSW|)DMSeyNW34}P$Hn6J5Q^1v5PGUdJ`miDRpm%*W3?AR$=Lt46>s&a zRHL3gutMOatbRJ_=kqXgwAx`>XM*sXHVszhX1TPQOxmJ7aY(i*`AGofY)AhVAkT z@J4~ofB|lQ;RP!fodhtf)N?DfI7-|S9#0n19HR{mMW$d?5k#>XP>^+@BWh;N6@0#IX`ZrHRjXqea*US=n;kh2-JHgsNS7X3%de8!!twZ)>3_E>r!2HVuZ*gzU zZ!J~e#I7101^R`!jy8-SGPc})jS6Z6sffl{+D_B2p4OIQz;RX!yNcecT{0}FQ6m`m zkdCaw7qxw+TK)OcS{9j;sV;44rzQhP>WXR3`cMji8r}kNH1`bX&AY%(AY(f#$G)pS zR)GgO6W5}4JTsIZWsI(^Mqv%%Wx=)EJ&{EH;g)`CHx|{U(4wNcpT9+%o!Ah%{pS;uwqOtDe;f3TRY?^lH3UBbT z>fkaIEjuqrb78yCJ9@r>JcC47ZX&9rH~$rgYd-JU_3s%88LNz-c3+X8?l)mU&FV{k zT%3X*PL+Vs+G(8)oSeM*zU-fEo%pfyYZvHO$iR}IXx=B^mdzRn%^GnnPu>c8F^eZJ zF$Iv5$roe5oa$0ELGM(FkN1tTH;5CxRu&JA)4M+y;pnVR^))HVZNDh~yC+=n zcn|?nn=0A8EurVvjSXj@1&8WO z{cJw{wfY6oJI{s{r?YJ#_y>QnpBF?Jf5&`DjNH1{ih<5&B~NtIrS!}x0%^*=@uiP= zV*L6&5i4(dG&iJQocW>x{fyPLASbl-{o1J$ASnFgc9fM|S`CgIjOv*lvJ(XwFMN-5 zUA$T;!V{V*FrObZ#zf0U4T#fKTGJuv-s9@&t7`Al`_>AMr*49Kq+Xs|T`7Ow>g6v* z+RBsi78-dS(_XB~d9XHX?3H?8f43igXKgq~h+Q*W5itU}elYU8Z8_Xk^@ST-LnbST!r*Lhzq2_5v zq#xfrwcR4LJwdqq_l!Vvcw8CFtCee69@QfJKOfXzp}kSN4mJ6T&l~p3L<*t%iJKx` z+Lbwb1R#zPrd5g7WvAim=F`(^jM)KOqpW4LOMvAjqe=?nez>?0S8`eM1Y#JKP(xYE zb8)|r=Pf#u^^QaGx+xo2fc_h<+xz?bfM0sS^+rDyv)tbTT@L1MzOmOfMnj6U|Hx~s z8%o?(Vq6jaD##>>+8}CdgBiAAY6Sr$p94)qL7{8ZwGZ4{s>rA(*ni=8(7UYHiD6)B zb^y}E`?L|l?b_16&=g#D*+Stzq!;6hN3R?Ch;YFRFzIj$HO_mlODHFot{ev~jr zc&%sQ>laC++y1a+!x;mmi0Mkph5jU#oGXy%kwblczaag76nYVue zebJS47vk88+UCwxR54#PTw2+43I)iG@<#wkhcZCnSrK8@?^Br{y)s8qSsR_H*s>kq$9sViz+uO_`K*^~ zl7-Oxk@znfW~9`dO^kf$Z^E)C+eIYaBaq=?T;!a@b;Ayr|F`gW0&_o;DUGJmXN){cy6sKGOZ-64LCF*~`eBa!1 z{k`RpLua!_QrnXT^MRv_xrEE|8euik1IA}k#oq`?hn=(JX7(=o((G^=Qr^M!uu%`GgpnGi6=ZabS zcuyRFmS#2cffy~tX*;6Ih=^XfriGGn@cs3yD1u$>@kYxZg z-RJf9Hk5$bxnjux$?DWm_JGuXZdirgT=z(%W**df2H^%!^Q`~C3x3ItkYtn{nnimf z2%oc`lRuCA`x!Y0jdqD3{=j!8rPPxryk7p;m(yz4UGhH27oyWt;$RkC4Wu(4dKa5> zq@+pUHP}@3rsf}#8_wNDyDZe53s|HOgFY=gbZbDJ0l*vUsKKpcNz8#!8X=ay3Aj78HBzrXVK((?8{J z1W_XH{aUopTvwuts1YY8yl0g;aRq+pu5&;x7!aKj3dH4ugD9aev|0eN1ViJ418@*T z$gfd*d~V=96}+6h^%d|39#o^;o?e#()())9d?SKJ3ALd`YC<2aqyW$j{&R|~M*P2| zdPYu`(d`FbsnZ6Y48n_C-G8^vj|jp8PYP80dR%vf2Nvk#oUp7&s(0+O&7?W@`Jh3r= zT+gg3Uk@m@mi)dG3wZzTKA1m|)3eJXbU^v>#-E=U(yI&_P^KIvqXuJ9{?FTjIXWZ( zbszfxJ0{`rvBBzJIXZzaky8*t%=bTUY=P)a21;NkI=pK9!9Wd8 zxl7!qe4yE^f7zBTjBF^C65`P+hc$b{}+sOw14J3uO6@Itn8H# z?BN)|4bh_$(EB{4hdv~H#y~3Q+|jI*+z>(BL&pW7(Sqvo`gXu=YCZ^cNga%U0oh2> zF4yEQdnug8U1ni0vKxY?AxrK=4lniX^mi7b1&u4`x^t_@8y0{#l;AP)JvL;i8+jT^ z0}+=*_OsBnPdOZe3mQ%8PN@PV`OdM zcHMSd+!g^%F(YKeni30q5J|B`uw3%|u{2C}eNpE(m?QSIlL!rZ=4Al>&^A{3t5!H% zXfPKZ8bO}?woli?L(^}Su9V$7NG;! zoe;hV{hL{Shl{KQRg58G!XFFTkO!3O^68WK1U)iL++g#_cUmQ47I{}#;8EMjtlDSYNOgQKNs-Yh{2pVYCd0TMPJ=9S{Hsoqr&3!T-pttWfVKht|Yd9 zlPOqG(?}&Sg&j;r4KfbcnmvAE=maU$u}or+gBS@DQc{)K?0x;jotpSX&W>WRb3AHD zc@#i}MWwR;>*B&`Pr}f+%lIqrUj4`DM&?>MtI*e7>B}^RLBnl){oDANS>e%*22c1< zozJ6xn0aFuD)l?(>d3n#B#=L%OdQ+_uVI}X_B(y9P2qtH1~Vf2{yIR}-duBd0Pb;} ze38A24MxgVZw`dn#ry;J%i`YZxZf?`9I){<;HiFC7he<&NCm0No5?`l!(X=O`)jI> z2ePu5JARoiL?FhNwhmGf43c*%46O^+%hP)W@>&OGQ)Gy(>t8Io+5E^Fc zWo$FKAGJ-@gS(4!^Vde@9?`ELaAyHvik)<2wVz?={XVDc)yaeQ-Tg((lYn!>+2{nF z{+(B3YAsRW*4bu*I4nvZ`TbqR|2SFR4lfym#x{8Ma8zf)x>rc(1Hw<&+cA1qn`Z?e zs@f$zlL4PHTm@(v-L7FSCl^9mN(x0dnXc3$(AvN^DnTLxoNgUdMm>K_?m2KNM{kK= zi=kDX^FyxBx}F4_Vs3v0|6vVynEp)(E-ID1u0V-;xPuLLRN(V%+~?tK?ai^Y^>B?< zCKI`y$0YnTD175BgJ-a38GV+D(Rg8sIMV?enGrr}BATx27wV`TH z<~!3G`kQa^f)>XBKFzJHP&5D@yPC&*=7%uRwBSrpNNn83$D*9-xwklgHSxq}rh%LW z0^g=;WeKdqq8=PmaUlT62LWfyuej(+TYC8qX6Y~s{IV-IxDi3cB*Q+A`CahoR$NWU z9YMK8^egNI85-s2YT~zy_9bcAEtq-5LOD1(H+&3HQ@>e%&NvfzSY!ZunMXRGB7Bkw z6n8JMkHh9icS||rB!%1*90}i|zIKJOW%V>Nd=?b~aeoI;6xm}yv)>YFm|>2~366WO z!`5OFKONK2T1*kyvUSt%;8uW4;)_L+`b|D?E7VbjzWL~X15`iZe%{~DNpU>B74(W3 zA;K;hd5w9s{e1l%oFx)>h~qD<-$PkOS(_8(5PyuX$hET3#?8QI8%LF2Y=RKQ9PV#stV(FZy<+R&^uN-+Si(8QsR1uAtP0~$ z*Gtf{q;aINdX;RYA*u;$Th!})tWSCfKnD2zj2U2|8BYS4V0S=DDV^Ji3uxiYBN99T z=YBPH_X^(7h7-S!8bes+%&2vNi+>-kHdms+QnYRD%wwr%fNYwLR{4Z4nRc9u5-TOZVGFDD{bxIr&;o+)^u zxaoY*PCD2kucHRlMHS=1N}msg45gI_^G$T39G!?2r|Q(ET>mC;sFgj9_j$$f$@^82 z1Nt+Fx7$-;Y}T4RB{s1q*xRZ=?W=&GNF-wc(!@SR-(<=^ANj&_?(F};t zxqTThwDnu^7uCWtw#rbbrjaibA{$n*Lk;7c3DqY&ei+GI%I>$l>2(oJK(ChqBbcd< z)#(a*gI#IPf)T8mMnP^s6tG*lv zDnA;G#9#&JVf3K#NwL?OG~v66TtNgWeli1JfZr>bd++t0OI?15IIe+)2jSTC3_s9= z8bCcx@Ft!{vB@NYD7wVQypxcor!By;hlQ6xz?lZ8`jdGjNPqil&3Arr)! zhu`&kkNyE(=uuo-Xrz44IK~eQH+AbD6Pyswj=6HpGJ*QKSvrSM=hcSQY>|yMq6hLN z^rGqJokzed5LPi!u`Nj;6oB7Oq}#e*8g&BJZnYuT-ui54A528d{pS_T0(wZY9=NZ0tE?$J5fzmrO(HFfXv(uv$hNi zD|X^7E-mzEF;0Ht84RdR#>e-2Gnfm7d($W@ATkjR5hbd#T?uNA1!q}ONtJ_&R#a5S2}8YvZliS7b%pB z=J!6!ZiY@9AB8Q2hBF3W?fH2a+DGLF+sqMiK&-FVR`<1VqDoX!+h%1aGRQ(0PHj+W zw-g%Sifup{wP{}&mH7l_QxO>d5uLT?bS7ZV4u4D#4LYsJ@wAKtO2vSxehTm#3gK!6 z8ib8tiarz^FN-bC#G*w%dnQBkrJgNCDghn)HDH{}OlE`G-2q^C{Zg~bh`;wifqiIH)&YK_ zUaWsr|Dm5J4NJ#D7^rgg1VQxCsFdpB9iP<}YV=Jng)JXM+y%&HU#=(oW%}iH1Z+A< z0S#c-kp3+cRl@7P`rZmg2_qvdw2CUCgwoX-Y54u;_WcWT9>MnxPN|%v~APEbw0=kZ@8vVy#J7uuN?OTPV zdeDG=ulb(cO9|hX5_&(s!_H+gn7-0D=^1yFs-UnYp0EfMT#3x+!OgMP^u!TxM*y`7 zPN?oSFz@1;PpNJ36(I<8{?<5F^_rV7W&+o(`j_QN`;QlKinlnh)_d-5j%Uws zO}>GQ$P}7>&;o2S9R;@XMYPesb{#vO$0ckQ=$g$wbNvkK%|)4eiK+>T&z`B3sGSMa zK)Ve-2A3Y=En^VcvW&BeFy)LEHf3>O8-5{N1?rR*k`R~RdiTe3P zT9(;qO*KtsudfQrg?}#W8*VkZyCo~#29*`?C(^q9%?+*2hb_{zKx!pibz>7YVOz}! z)v9msO>C{0#h}PRzfh@+Z2h%l zD)SvLm3%PGd)IBpX`YWGEReWDd{EgPj$FoOFAR4!Oj)U6)>$RSq65z(5%CF2+Wh+s zljnkuAV-zw1W^o+=TkyZ$_)ZtA^0LE-Vx z;_{7Vu8|vU*O8J&I2NKtuWHu9^b0-!XpMh>y!zY#*L;eu370@h0!=+W#8h=MvH|>- z@d4$pf%%*iu8?{3v-ohd%92rN2D?yC^ZVUGOv_H;*TN<}u&5h3{q_Hae;9WQBU-Zt zWlpyiCNhNjI{MJpjxsu!iAo$%Y5*@vqT1w7?7-&Ejqx0TJ)fDD3)F$DIsi_^h4#v} zq0xDlc8c7&7B@fEYQEs8`>594RWc4Jhm3LT{L&G8M)LaQ%e=>(yw zZ^roJGXbtH=%3Ts0i2HDEZtEaCJ*0`W!-D{53t_avA+_AdtxG*2T2dLwiM#dCfbyP zE(&Pc$b5#o;sMSX5w_*iE?MrU&z@n8yrJ(nwI8~e^y`p_cyjzAJBC%K*o0F`N^jgH z{m=53BL>Pn{K05ul$2-%C}>lwqVLobU)?j@7Lo{5gD|~7J&v8t7rP)NZ39z9Y{0fi zGlrhZw`hmkkTB8g##%FXp`-vw>~QOK=n@4!*n5V%{HGrOCG5(8E^>qN_*Yw>D0mpg z0wz)crM;pHn|Y$_sU(n?B{vE)*2C=ICJSQYAv9_W7YE;6C0#j6t3sFJp@_5Fd zuA-0P27BmT651BJblGl&9~u7rwNk+hCUVr1p=2NbOlTm~7A^@6yc=td4)9*{UK2PO zn?EvRDVdyEO`CzuKdq)FC}nfmp}&sqAN`uj+clbmo3?664%o~8`SctHZJG=`ql!qs zG4I+i?P3ONS7hNH>wMMNM3FVyrGFpYzfv@4{thwK{ze7zStQp6K0^Og>QKBH^u9&f z9C>-oKQ-6)@88i|3@)mODWm0>nYxKQ2#3dDkwZa18`>CM>Bz z$0NZ@&Fya(WJFDOq{^fF8TPbZ2~4dPIs;~uQkDvy*_)yC_!W8|u}~f353!%EKHy~< z7F_&n-SOHWSg-!IUG=^RGrx~kSl_yAR?11(dH&|@19L*viM^lu^I~J372U9xuR9xe z2Gw#CHc79;D36^o%Dz7R&SEcRCh%$nJcf%YDT^^CYvQ9P%$3(FPHDqzi9 z>+5jY!t+_F2}%<%UzY@p(&qG5ZUQs7j-IcFU9hv&MvHF#ltB>aXC|Wsm0R=3E_3>h zpBe;7d}&9;ha%GK-M4NhNUdcwy||yU_(t45W_cSgJcD;wSpY$2U5G;Ky2gF|LK(6Y zy0-YmV=n~w1oy+)dh)j~Ep)M}CJD5=U8DT$5n3rFff)zuD7y?6%t(%zvysdQPRYKH zWSo)oGm=u}pGv`hBvlRbk=s`usa7N@jbvnrA@y>``A7=(zp3d*-9#ds{|m)`Mv)eY zYsr@Wce4AR#s5!M{B{fp=l|U{QuDt-{-2wC6WSza#SBF9-ADrXpDUuRA1V6(?fst% z#`#r~NM0JbZwE^w}K;L%h6sT`6}IjLud^yq(^d~}|5f3Gr9iX^kK zT12xo2RYdO)hJAz-V29A-XE`q1X~l=)5v1Nu7I54A5#!?! zh9sOkviQW15r*3TxQsUFu|EUKVpRWnV^L^tCU?}RuqtVY>}vb#Ld3h&OO_^t=_4oK z$lQJRFJ*BFWSCkFLe~2GFFdeBr61vAi9Y|&7e5$l0sHQ@K@REA|MH>ckaDwD&Bt{asplG&9;aC)MdwYmvSjts3N3QusfgBd46Fmr+*3R zmdCm+5f9Y^+1eDiIdd(pG1itRyoxDAy_c9%#u1ne0zu0ZU!x+Jc<=fX*x6X8M)fW%3M7A#NFi2Le{2a&&DHD1UNfj4sG9OmA( zeF~}|HJVipa7l&(tI&Klo~DD2)M%hbAJKfcvdKkDBVy3*I`A)AYN}DYBuc4OW|fQK zv^QKWR)J}tqb)9gCRW|AoDWWR=K>DZ{@bemn!uF4kzBnT{Qx>;y!3I$^AgsGBsHTP&Sp z{?_utd&xGRR<2Nn28g>gFwANg*cCD+*c?A}LUp|rDWwYzBvVE7D05Cyr=vi`VNS-~ z%97YG@WlE{H^DqmF1DZTfa~|trSM=+arl}hVjgyve_Q$Jh(ON27$zIPo)-m62+kbd z;7BrvK>pI;d~^O8^KVdJTi*QvxiC0t!@ed3OvcYp!;?tl1DwoG64>GXw5XU)!l?AL zBDs~ItRheF!rl-lN2!fp>WUAwQo-}jQ4rU0(*`XM4i$e>80^r>^rtDHiv9bABr*ut z&yM7=et>P%uPyIm-E%F}VQeX894;dbra##cFPH(kgMFOAD0i_%Up<_t4Lp2!l6;7>n&gSdGn}bgtesVZ%3n9BUp!QcwQi zT^#(tk^|>#_%)qmedKdGZx|Cx-z}S;k(G&kK%JFW8gori4W^L@Qsg)M)iKPM@Ra7*z zx#V~^pWwhc3(Mg5!kCEU{c~g1M(%e+TBm3XSd!odt9lfJ0QOQ4uYi=%rsCy(8=X?CH1l0&hY@_vc%9o%H8&8G`ydB!f!S2QS+`P{XC9kuOQT>7+`e!d@*Av$pq z3L-m&dp~v<18#8mRUwcE=~uuiv=%4xZ$>=mt*~8qX?ogla}cm~HO806pW* zp#e>o-xv+N&sdAyeTy$Tx#{~7fu>|R@YZ_fgfMY9K{1FmJM>v+5PR~OP1BR|dA*YN zODcDekLSwXp3mYnQ%(0Gj3R}ezw|n{51iz=zlh@A1?qy?gSCPeZNy#)O{EGr^)Uwp zn3+bu0>(zcRTW4E6X>9#0-(u7eu72~l<_}llrAR#7st<91YLDN$HCZdel7vjwGf3V zG+w*`@X-$D7)YLusqB*+LFIE2mGJUng(Rl*`lXmchr(hwE@h2w%-KQ9U))p^tA`zk z(0AXv_sew)t-pX8zrDWDAj5_Ml{E*X=C2VwuL)n-jaM%Xk*FuiPCn~1gaz0d(Pm@7 z^=_8HE138)od!w$zJOYWB9@hBHLooL#aJFryiSkk)KX`tw%lSZT*!=`%!75Xw~9+@k_`jEg3cs%P!&MQ7)hwp`Bopdc~E#dcTS z#x3z~5?yc^HT&PEgqUw;EVX>NgrQ7<>M~2PK!Q#tX%xpg4N-nbFk1ws=q4M>hbl zSn#qvM34YFyEOBny#~>1TkYUI`$7U4z*gAVVj;VpWC&#s^8p=Abw3-bB+ch*@ziew zLcsX7yp9UA5@`n4T6~LY<>?0IUptKPzT9x3XwCfyKt>gt>C1=~ic*y%=L@_DhPRoZ zX6T8@oKaE!?+nR7etbeQP7kSF0_T}wwWKuSt2lYQFUt{jO!duu+b@Ksj15r(-1I+4 zw(x4=VtwbVNb4Zp3p3#N<+u^8p9gKk=v@~T%y3RuqdSzYFoaP%=*QnrR`ikQF0^E? z4o&=s6*}3(C{KvB!g0-7ELNeT6iuM$JfTjKa=Hj@gz6+8W`bP$nlyO0V(pRMas&q= zc!NUVioOP*r%thM5_9`R{NO5&%$lMSow9Gq;eR>LxbLYP3lR^uTbXF|(b)zq zJ+d=4RiIfoN| z5~f7MeA3G?dHBbfugtgL8RZ@p^|?6zOp>PzC?7WXBVx4D81?iaja3wY51IVmOS0i9 z2Y#RGjFqGItAK}^@{QAC?@Fo5OeNE}M$nrR=^vuuvU%RjPLf-!;QHegwEO1MZnRGi zH^RYMfefj?df5IBepw*YR8i+WBmXil$)h^=_M@)EftT?{glta#djoOW4=uequ6PeV z@&~^T1b5MP$Y^knb6}z7>(x`t=6BzDsst)@Gn6ckjz}q41-hn0Fawkv3r~wi9s>4= zMDL>sa2;e}hJ*Z?!@j}xeN!eVB@Qf4@_1#nKU17|oZh||>u)1AIDiQF-m0f=V!p`O zP3^x1>eI4yz+U!3kJO^9R`6>14&&AH75o*nl*HlnX{66B(MF~}u=(ABga4|uJt0*8 zSTG$Up_BB=n!!a)!?J`^#?PZj{+VVDASLzP_X~QI&QJZbL8caVVyvxSj|{@9xvFA9A^Uj2wl<44Q%B)`{T8U&pdhB&rI<$E``Mp4jXg96>#sz~FD<87 zs$d#niyFIoQ5IltEJui6*{zZ0-aN>*sDQTI-gGkk<2=VU1CEK%K)S1FEDuN9nm_k( zV{}}|RGqf+jn&{5OL!f+n){M-g-Wthciq`VOtifBXX%-DOo3}!h@(_F{b=H&bRu3> zu)wBZRp@(3J4{Iq4FPz3|Brc4MZTA>dHYg~=huK%?C3#y!ckC;UFRRyeZ2V5U=93W zvMQL!MkbmO?Mgg-cYVj!HOFqY`+jH~+JH3HOCaLYt$QH$-6rl(NeLsj6I>yvN)y5T zB683$UKjoa-OZdnE#bR=3r(3W#78UY%+fL`LJQ3-UU8Kq+P>*V9~>y*z^Yd>E8fni zRchlrBH)}%DTFpgYeoF$kAWG`M@2#qR%DDpE7;r&KJ zoleqIO8hSSPlG?fkEj-sy{)a&zBFFZ``OhqYR5JLP^J`?c}DgJ8TBfYIIl}~ldo&g zG;?JZ2@~Eq?HVDz7k=!YO1~6ah>u46AR($U_u7pVy5AFL)fhP1a4wYT*7lUQXSP7y z;S+|N5mAVs(#LNd_(l?!TiT}zbjI4A>+sly)3Ni_ge{fjdua@ONhR(n_T3e|}+x0EWAa)jMirfNc2< z&R>+;%!bx1zR(M$tus0MT}Tny-^p(_jV{7*WgBhZXkB8Qz{LRs(9 zP#b%lK&d(rlCmrK>O)`P;q?T^L!=9~z#ZZVoH`(?O)3YUYb9RwFePisO&g7Uk!zRl zW*xxC^wUp}*Eys4?o{YEq!VJU*N_x#*Xrw-N-V z$B&kx`{Zc!6^c8m=b-g`(kgI7;8o4v6L`Lh0dnbbN=7~`871abWThvs|IU&0jJcxq zrJi}_jt`C7`@~~BKpC2CP;A3dnmxzJnYpNQr(o92HE5F%ho-5-l<-ca1C-t(; z=M7evp!s&1i0y)j666P|Xtj&x;`LjCtl!V7oB9j)f8E5qcNF3`v>n@Ne*5-!5{uA` zrYX&Cv$jxTGlEcQS=Bmt)K%_mi?P~1&VWCiKte$}8->zz?yK7?(8)&E70+^0^pG{@ zi$lb7wgm5Jb6jYB0%@S1*I&>=+zkXXYlm2xE4J^oo2|~~A~gbkfO*4@{*T$+{46Ft zU}dz8xt9RB(ocNWvC{>+c?fSFn5?=t4BLF_mMgo@TvHl8X=QLL3tM7G2EI295*pr@ zmqLj@d1jRRfQOVcMbfr0g*tX$FKZI^|G*|TNn$18O&MV0yBFEa}J;?)c+6k6n)j8If zNeL$Rp%xpZHeF2+eN?5@6&-O2EWjS`o)wN&KCo)mzfEK|jgBg{L+nD-+FS z+{$p~hf{V@#PFw9pS)L#STQ}j@lNh}#te0@%*KCZts;KDcr$k67u56+dbj>6&>^_* zoR{zleCUK-rDLZ>6GW=jcX_m=j4~VsPf@2EsrX7h#1_m`W#9pdZVa9ngV;7=D5DLO zTrvo1Y?n*_w>`6A@{%Pdo z&kJnG8|#jaNQ&&*lsmuRhdndwN>s03_yAkI!ZG|TQ;9V0Q%C$wz|$z@@jmXxn3Z^H#|DL*+{JY@9a2g^A4ra(dxaq|Th(1~GHMv+Ovw1@=jJ&9lsXnPBJi_lLm zZy!Ah;G8fGXqj=zz8oi?T^UcvEVhDgmIM>kFI;nd>k}L|mYJcU`)n5UFdtM-y=yk7 zy#5THgX0m8w?<0&&KSX$6>)d5ePkNb*y_c?=Ms4?6Og}y9OLU3jfBq)ms8OvNd!SL|d&g^yNJ4O%Um}J-C?gi^CtRU)DM&0BLPzrJV3; z1SjDXu@HPEl%1f!yyXodEQhp>f+T8^B}PqGJbMk5!mSBluB~p+@iE{<{N57m>Q`WdIrmNHG?C%Y6`v&*1*J^3WcruKuRg&(;G7s9i z{k?oM@&=!e=T;WMCy6A9 zG~FyI*oIz;X12GYlWR@CYyV4YB{pp(y4T_@M9gxS0cYtdK~9cseISvz&aF7q91?$d zXk6Miw>$XJ(dK;w!GfLH5R0bm#Uubg$P08II66_Sy#aTBTpkmBHKA zJU>iO)|#nRuqu&kPmY|3KcY6t+;NjuJHSLEnlC5I+I8Hoqll{y=6p*`VFhmd^<0 zhea3S2uz)uu7s#?yqDhaZ_~_A?_qCzJ*4d1{^K{o^%>*V%=l@?Qz^L9B)1KPRK-y&96R9EvJ?gt4FXVEuDd(Jakdf&N$JHLl4Aa@H-TO}t{Z;$QyetHlvOS<|eNlf@5u6}2} zk-asLwjc`ukyDjnXD&V8^4j27maY-d_=!P?RHAgNCMnzUu5+8YuNqTQS})931|l#x_q5rDyn|68gV~Kl zF?OTv?)k$}Tl2((HM8#0&9eGD9-cKb@+iz!Y2Wy&e%0f0WyvYZku)9fYD!E7VhNzy zJ&t#gG>+tY@nmy`lZVZEs#ON^6G*k&G9?hnWWMl>J(Ms!clh|k(o=Wpuk7O`m9K>X zvUIF>))V-V0Duf-*kA7$98tfr@W7Nelq&Plq(e9d$W(~{6a}9z5t&#}6-eYuB;kZB z_bQ{CK$$I=pW7fmESPy4DU2wGVR*XC?zC?;8WOfDjF|&eHd#q+=XTgjj42_<_d6YdKo1wx`qzw5#3Ke7mS z_*Z&VKxSV4r!8b^YpT`-!tG#fNc#X94!NgLac)be5@pPVJ+_t(=?^Y0n2}s&w{?UzF{NE^Dggq z){XS)pftc~Il$=0g+GLG1*TQPThg)hw&Eq9k;uDCtoBH+i*@iO|s?tgy zTkwMSlmVJ2lrjZR{nC;kdv?!efoFa%JW{$AF>%U!P;c-=HrkpB&UnY~Ct9qv-^)6Z zuSnXjoXs~AU?iXYAX0B9u-0?0GoY+h%hNB;gXOky@Y1}6Z0&edOP@aCM#TE`>>y_C z=I-xu_fVor1Wa*2SzdJK&P>$PDZ{Frtp3N##o?buXLWzZq~1SNfAhzt>N2}y{+9WB zF*-pV7eCE$qkf-&n#7Pf8**xuew_7sLq_UUQ7)mz-GgWDU8Zmif}*DE4vx1bOEz_hKxKGo80%P`2-JQ$XzMZK1t?VV2HKc-hXM4uc?dk#Dr zq0r7@dKL{lKv%kWENSkeG}+fBysbR`mrD0y)cKMJvfh#akBa|O9bWE&SQA}?~dBL4dLpDQUv zyu@9xID#0d5?5NTq7ylqMbd^96p5^Oh=v$a1&R3}hrZ{fQ5~X9q=cyI+tX@K`y(l| zaYA&Aw(EbT1p#?#|2eN4ep?LpsUi>YMQsS8guz*HX9(q z`#56mA~L})^MRh?5++5i0|(S zDQDD+Vh!iSIsDzXwpbWJjC7jZ^E}V|_&3^e;GV*qKpQAR9SyR9h~plu(IW zcv3&6DvCrgRB06!Q4#hD`b0x`{s!OBXVJiuq%x8IH8Ex-Rpx{GLD@I#5q3Js9CP!i zT*FNM0D0yTYVv6^Z=?Auvdnc{KS-K>?Jk9Zui$5KIix)h+c4iL>I3P~=rf^)<>-O* z^%E^~Cx&N2ya(P05kc;YDBk$i4p>}_6vMyeTYAsZ;S{u_OO4^CDnjG18$k^-IMW(Qzp1jdDcx z$QkufsX9ZVevIgY(<17nl<+alwYF0sYq~e2lTcsL2)u1h|JUAo_WC(E*_#-LM!<;B@4$Z@ z2qYGT!9j5_i~uMAihy`2A-pIkB!(B3M2cdCL?r+*2u4g$SWH?}f+!)WATA{%EG>bQ z!Ai- zNnsTUB^6DSy0C;g5u<@u)zDVe&;v9PNKFB}rlho{qJWmDke0ZJmb8MFs)m-1l(xK< zj-HO*E+u_U4Sii*{oQB-5N99?8c2y6$jBHd5)D-J42;BfDM%X08yM}DH&r(_wKO)h z65TB;U?whOrle~{l(#Fn7)un4DcH`_ydT@HD9MEHb7 zL`Ou&+8*BPa@gDBuusHcs&|ZkbWD6eZ0Nzbh}ihV_#)-j)B!Li2@vyRcj>4y{O zDaYx#(b;h)vI}B!5|8F&q~{!`=P+^^MM3$87=^`*qKfe1*yQ4jtm15X@jpXZOi5CD z>2YdVN=aEoc|~Q}$(oc?$0|-$(JL9bl|@CBrRk?n96Oy;dHPIR&GG6p=Z@9p)Sj)Q zpJU{mD=s`&R&wrS#kr~z=L^o)HPo@1O6pJMHWU{&l$~y;^NxxEf=}P<6_UrBJ8`p1iR@^*uv$LnGr?2OBf8W6Hz|h^{k>--AaI5#_QFB>b0oxUy! z2Kg`34>$q^Yy;Z=C651+34q=PFciu0*W;&h&?1_Ff1~vQ8lj#=j3P8nH9DTs z465~ReB4TDbUyEV>(bN9zBdjpJou)PkCwyf%kAyF^6ZAx?Mp!uomXFUQI{T^H`}lM z{C3*=)X}b*0Z$C z2zRY+C{xKf-VxCe!>Hd`X6f5=|%uL>D30U-= z5VA^@^tP25xe|IlgC#bFP~bAA`{ZHtMZ81ffs#cva%V07vijYC`knm6~=Yki5_>QbI{OtatB`O6{Na9tVd}# zs`nsEj9M;zx^$)U`7CN%a64?TRu}CcL4D-r+Fq?Bw}go+JujcwNw8n8P3UyYALLrw zMM0f~x;EyEhz>UoPLOJ4yX|$6?Ro-=CND!LZl(66;#A6oOdNz#e|ui(xL%i%t7&0b z@7z980`-^Zd}$q{>#&9^*8DDrI-oSjOvGvMZ)Ls}8RV_L&O7ik6Q|8r(0951(%(Vr z>fp_UyXsw)fmeuX-IvxLXm@>Hb3xoWsUtomPQRxf$7a5ipM95Dm_1^0Fi4_@7fKy% zN`C!SUOjOXI`DEoT&nTPo$RhH zL;Z&p)x2ywq3WBScLhstZbJ`Be80}WtCO8;_Kj<9kZU}u(BO-#+qL?_;V z8LR4jSGGs?m3oVP+a&ng+v<&9Vw?~QWbxiV4z&HiI5f)e{uHL+*73BNX`k_t1vjGZmnea%j zlD*i>m_iDRLof$UN~vupSD;ouva>!Ei)+}^F}w6)K#bXfI{Z3J*}pxfL~0STz0~dT z8+y3$ZwqWpF)=$Eu@;w-%;_tBkRd|U}i_Xt5ZgWDf1MwUStk~ z{;9^EGmou{Vsd_P(-f6;Nhy{Y&`QSlD{=yF#2tO#%EeP*ZZk0@hhp=T0ylCW-C8}v zDQVdi>H~M5{V4HR_%3PJQ}jWBgDBsjX7mLn&h<(tcsYGWyN97 z>b*pa?>o`}d)vt4PlJ%>$>*d;$Xbu_gMx`$ALxLGfTyTu*u5UOp7JIlT*}+`xePGrGT)BBu*U#b{IBNC-a3eGaBvFyuH_kEmaiUpr)zUBnb+M~zgSGo7saxzS3_ z+b0uJvD4GmRb*s{s6F6hQ1~U%Q$=ZnYVCB(DaQ$ORQ@vZsLtp?*ySZ@=ebj%_{!0Gq1;}CxIjZ;$o+#Z#9o{GSw0t)0PD*$Yedf* z5oVrmFEyg}9?n#coW==0^;L&miZGt=i+bZ&jd7Z0ml-|MsJTbSc3n#QBGEy)pq2%(#NN05wOHw9CFP*Vd>tLxx_9AI zyR-K7P?xBVW#&Dm(fQ{=59j7*287e+ZcBIyLhZJm>ZcUCKG%BIHN1RB7kD%9{b>#} z`Syi9!90(LPS;SVmq9s>bG>Lcc6R#undLqtfdLhYnkB1K=<1fMKiYHfeOxeeYa}9XU+}^rJ3N9|#@3JJl)O=6x(e<*_&my4t`#v(*!OVAgt0?nh z+|jM$Wc9PEqQcz1tU=8tTxTTH!5++)1I}Vl`;Tqam{G8vsSV{p{)JEvNP% zVJtO3dAe!>s>qByl9LNHCc-+BgyXAAWQ@@)GHdE;{CP?UWVUS6jn}_YY0G*J`qBmj z*T0m%{53o4V<<%>9&!7SIS=kk^H%5PY73lG68?%EG!urv(`;X;7N$Bp?-x!QY+6#= zemx{1xmc0**lgj?N~sQ@K&8+WpN~D-g`iSPqISVVrMNWMEO$MU7YbQ^)YB4IUs${| z6OvTGke8)?IQPq+R~B`TeMYVdAO3SbeKX@>(wA>+tz*mm;BJ(ON+q61CUlOQI#?3u zfxIQq@$t~ynV^*R?cUE;%*F3uEJ)#^ zN1{dzYAXcE#G&P=vA&m)M>c1Yf}fsg?PHgpr%Mau2y}cccmb&pJSdfbZX3Jy z9eo9|W31b7D(}Pwp1t^u{di!@ediy0hwardw=r``%O7D9agc`mEG4VVTI19* zw08~}j>e+y0J_ovVn-4XpG!OcJ>7wjk?oKM^`-TQL+&fU>&LQ!j1Plo`i$D~_xZ=Y zBM(WMWH+{E4vc21v>m%uogQL*Sfe04Z4+!F66TqLZ$}|GG9cfTG|0z&A%JV*kZ`KN zAQ`_WDnpb8Ajt9Mv)~jh@02)XkcIzf9C^S4fU|*&d|-qIPBDN$NpT@I5X%Fs?g*hN z46D{CQ3`@CBUG(Ca`dld(z_$dwR=b``IsRKWR=N;Xo8n@cgld!dg~9Puwr zQYPP*qn_oknwRu;Qf}+SWO9pNCknSjHq(@_tKf}STulMr^0=Hl$@_Q7RD=d z{ZToDt|DHqkW0_^yI2Bkt^BB7vRo5s=Ubv(SX}O#zquv$!?#4;xW;WRY#G4He;0TR zh>LQNEC8*>tbj6cuYm*v6ZlBNn*R_pp`k082x2}sLOelSg}fz#AE_|cG4X7p~J4PWH1kKuz1W__FgJ0kW z&@&rA&ktxnB5-M&2D(!*G*YAxA708*(GW?VHbp%x%-cakzo=0AESZ+oh)H4#fmDIV zWF(7(nmSWg$w6BK;>JWY`v+v&36eoWFZ!MG8Y&Z|z~IU;>mT8Zn{Y%1ONz;2Yn<=@ z0ip5X1yoG`v%_K;^%-htH)>1}6;sJ?NV12RWuuLO0&57mmV{O#KrlZm4f4^}T)>N6 zm&B>talal}#G-NGP3f$wn__-6w@PZ=6IH?r2MuDf^?x?(z@i-p&ASiR*V52h)C&Wx zbr~Gw7mWsfbgkDX-~b==BLg7eh$wLGi7Gss0u$oG(-T{&e~7t!sy8OK4IL!T7?{Ghi(RNNPdd z#9VBOs2_XYAnnX@{aKIIJTm?9>|iTOR1GbhhWgl37yq*&XAKwd;0kdI%phMVt-ZSO z1GLeEqq!ISSQlWLg{3su_nu>4$>&N#&K(GCIRJ=z?>-yHjr;luEEu}d+u6|GD`d#L z_Umwyq2@zWyA1L&lG> ziwDV|EDh));&)v-h7Q54kwH5d{0JaELcl)*@MA;gKn`+^11^*B)oehG1@7$z*GMQR z6|>H75G8{1WXKd3{K^ME@xe;q(kc-Qjlu2Ps{BI43tmpml>r%MsPmh+bu!2!<7;V` zC94sq&Vt`K*hM3OuVip}sJ9D=US)${IACfoa*d62oC6zaz%yb`WlRs66lMJy9Hhi0 zQSfnOAe(c0P^NFBIC%xAHRglM+dx-}bGsVr+LHi$5*cWu;7{Z5;nbK*L_Cjx|3rlk zOM`1>`1v7-JMPvB_jU{!sKfQrctGFs?Jpbzf{a|HfwpHsYf{ySJ+Qi5`z-`VtAV_G zfkrZ;g#hdZX8_`YvG4&1_VX{0PZ}2dJtXuSGDW)UYL0F< zOGN{?kJL=FVzdJlU(1JlB_XWY|J;2W!9Qw-tKHvv=PUK(j(LR3=ez2^!7s!CKQ>?j zbh!#(aocR@`(8R>gznb&s!{T~AQ=Zc4d$iVR4z8xWb z=BBBX@1PV5`;85)q=8WFan_5TYytc-7o1|ZdT4=aSqW}hV_*4$UkF2Y`QU3B_zl3` zmchRzcZ=+fT4_b`xwY#Aa8D78#|5`uB$vIs{g!&?)=RMW;Y676!=#s3tEs_z0z**# zaEQ=IUS>B(^`>piO(6=5yLTK7-1acLeI7Sz{~7Qj;|tgTj)mp3*q*D9n%}q)4mimK z+==*X05AsdfuD{${SsOxV~=KaI;X)v(p0KFrU^>-fs-`g_Z4h~ zI-bS^)c6n{6$g@L;biPOZR8RGFRdq<1mJ@>(R#9iehUH!>Qr^egLT$@y${D}Q~@;r zXe8o|*}(lkIEIP)!p;215!mr_9(V5Ez85tJ((IvQv+5sH8u_3J1&Hnz`bg?DCO(T{ z-gBI*fwQNk0^sXIk4`Lf>MwLIvxPQ90FVWaQ2;w4zKI4n5b;Kvd00)QA2egmz= zC!vL?P#@%6qE2R`+_SU=#5xO$AR*SdXzT5($3ESjL6ETVIx#XY?{pr+bYFde;2_pm z3ol(U5Eg2kj0AwJq^mJcEan;O#UNpAUy~&<|XJ zY6?E-&-`o<;77z?r2!doKoSw}@?~aUvG9kE#oMI|{DnDF3p|Yi6p-=ROyFz~5OF0c z$P_z`!yRCQ8cRn%0xx8Rp1{dv?GAC1RBn7|v7fX<)f z)PITR;d`ltXFIr+0e^3NWC9Qxx{DeMvUqa7(PZ#NlF`RUf(v6lyy=BkspscHu1?7w zE8#P1+XZnDB0Af3KHe25AiWj~e(;Pb=-mZ&kwz`8W*Zj-R%mmeCHBfX_-tj$`Gt^z z&WtDlWc^JESfsw8tel!XJxP9gwh++!i>u~VNO6HKHdx95?~+F%({L$Q!PHwIhJ)sF z(PbR;8tJOK6eNy>Wf27R&qKbDu7YHNXXH-ot7pzdU?&C0aJXYp0wfXeN<~2HeQ!4a4TL~{Uq*dMTfQq)BQF+)SwzeB3NWGWw73zB?U zz=Z6^yt6OgaDKdTvHz6Ju?;DX0Gm9rGKXAeBZb%?n+fRj+I}PmM#_#J|1+E&pX zEQC8Y&%#tW@khkBNRMx1@>Y8yMkc8^r_)^A$_PGqLh~2=Be%luz!p*tUwGidRRAdC z@bM?!w|tPXRzO!@_|ZG~0~q3~u{$PRmo7&yS!f?Os@vK25$$Y7uv*-*-L8A$(Ef$q z`s3)|oC8PJCPQ#=?&{Ogp>Q$~Tef-lM3yWW(32~OBhJMh>4Wfpf%sKnJV6SMeM@*< zU~Vh5NY`Y)k!ygW~ zLN0%)_Gm1)lPP@TVvgF!=V;j$dK^W3+X`8}pLr?*9ifG2lX?;cuOXj{$> zeZ&1ZjZ&)HN&Cq<4-}5{54j@T)vJBU z(z%hP?I{iI!#KYiMSW2XLgANjucV%cCzsf+^?IPYt;XS&g!Nk@PWRtjdD>9;LF!|Dqy9a-lS}`(udgqN?#M$yC!;e_y*Keo$YB8 zr_nKupNUoh99EWG_x#nT85+61h(*%fdf+UfwBBEdOwz|;Y*jMls3_fjIGRxz!H`mY zh&LI+(V+;>F%uU9_KKW?u9$@SS+2)MZhAl5>mgp&7go4)Y794Vm ziWCh$J$YnM@=!^lb34t#d%`aLQ%!ah!_;030qODHS8b`z6^8oV(vY_ch}%#C)p<$o zF69|`h(m3MiCpYj%)J*^EVHmzLb-QTWw&E%(;KuX$fghC-4X3QOZ4ExF0r)Czdzi& zy$yxz;9?Ksx)z0fLqqf;udLcXN<(06u(HtR)0Zq8h(^OWe5M-ZLSYNdv{O^Skwpub z;?PsDAwB=*lq~=Rd0)I&qlj9lyXMn-NYwXD4~(uvCDVB65r6VzQSlWY3Z};T-}R>q z)a-=VF5c&^rd?foPzALAnHqu$XSov-o+O~0E^tgYb)dW6vPSS4L>BH*?Vbt+F_E-` zb@ot`^^@mOMezy13bpS{tSs@1!!_B8q)m7)Gg1&T6e)`27kqeh;z0PpHu%d99Grq4Ei&rEK@tN6UB+M}-gw$1m~id2HHmYy3wFYDJ$l@@OazsP)Z<5oUI39=;py-cT?2O7W%uVKNGDN^Gua zIsM3%1dNacpt?8d0_Lxgn12F&b3OZY#q_yDS0_Sgz`dBn>kYOr8L<0~S%C*UHenf#pxB{5u#vSyHMvv{ zOnO_XK$m4>0`-Gv@3Lyi9#Dcp9LZ+kLNuT8CD4w&AcEx=Yx{jp-&5hID=vEBhb-y{@n zo0%(I8$;)lYxVH4%rN;%c1wT56sVu9AiJgxg%;@HZNurZJnC%QIB`$M)gifg6R0{b zAs>}RgVLYc2nq9Q@ytefU0g*>Xfg22ehJ;k8k1#eHR)zdK(q=Sv=%#N4~Rm9)}lsX zve~6-6zC3?GQt9$kQ+E-!;q?Zp*Ks-m!{CJm*ssHl?{3MNgOatavX}C{n!RAX^^i@ zv);8RwDV4QqhK~+*s8+}oo+q?{9XE5NJ185(4ejFzw8Wp|7(8S%}7Q7flb$LC^RBG z#UKc_7V58asj+Q9r3tn0yjbbczRuoPXU7bNV}hYqI<6j>l!A5^D9 zcwLO(;?~)DBC++0@v9|Jx2O@l=Lsl%W*O8_g`@X5As;Ggg-`O$&-}m@!rGR%`4Pu= ziwdCCejCLme_9pM4PAt0T@(@-#wzk&7ied7lBe?=j{f>ZH)Xz-_)7dFIAB|%MjMu| zCeWA*8BN_8d%N#W29}kdefiF)%P-U*(b7KwmfqAz-Z5}jwuRtsV+MU_l%7}tN&Ev}YP)0Cm%j$>wTLyHkth=;!t$3)1Lbj6_@ zS?rbj24z+k(Z3sx+o8wqNDLDLwACT0oasvvQN@wnmvIGThdV`^{~EKVBYI5C@2(0Y zimJ<1lR2ci*TT0R*@(d3RtLQj!aD$DF$4D@+2?mohuwuWzgEbDb(XOX*oINcB<&=S z)6dXZR&xBPh}~>;R_ypOkevZ7TTCcfv1NVYOCqdMio;{{?|XV% zHc&6IDF`?1Z)d^m!zY8ygtyDAuCx$o^JV6{tY{wN#}7}|P_r7`Yeas(;jT(D^!EsV z@0n;^NlhKvIq+Bc?9HX6FHBVA9-ug9q{*$e{EY@$jcPugMBAB$ilyeOi1f5(qAmuo zWOD7YRy@6@)JDD7&*Hd{`EU$!L)~_$MYDOCe_)p&L73(vFC?q;Ic3M zUmg-8j@AzS8y>}HC2xpHo}2T9yfLAR@y-aCWb3xkBNMPZ6MODYu>*w?C||lZ)pEXo zCYF{BwNl?Kk{H*xnWhd!Q-#AS?Yj%A@okV=)@YcyeK~t9IK0$f5f)dhJMoLAMs7RY zRGH#5K14gMNH{;+&D0=`ZPVKvW?|T)-FfjEXl#L_Z#{-^al9+!Qi-w_xd@PDYR4;= zi=;uPoLG10PV&|y`Q4+(K0BO9t}3r|WC4x>t5u(^P$2PsIZzR%O*kEX|M6wW1+Lxc z&rsO{!Q-FDPGmWMhhfg!+DH1`&#o`Thm2X|m==mQQ3LLmOO7Q9*kfy?=Lt?X-;T1g zoG%mr3Ta^QhWPo14`#XQDFPGyeU9M#{aV1q@l&-Sd4dj%H}=uhhF04Y?>;lR3Qu}4dD9{I%7Z3R`@^5bSZ^na-;N!->rfVEe*b};g9d>a zD|q@L^w3hZ05yACgXc^9tYDYx76?sqiKAN570qft0zxyUK^Mxsf@&Fh8A-9Txn|CRim=vx@E$G>yk zzY0)dCyv7->N2d92=HoZO27E`>y$D(mlud}-AdLFMSKW6@Bp*|L~ z=~I_|Q62Zk*UwGK=YhhP!Oe5-z3gI>;&{MMq zg#f~9q-Sbd@{7l}EKk(FCtJx?Iu>K&H{CHu-EjyHnWJ6_8Wflu#X#_}_R-3J7o69G(ofIya;J+tDE^*No@3*Hy)_*>krP{K>QG zz3;pon!TT??;G~pH{zi}j(Vz{yKnvOKA&U4LCqXJ8@K}JMxdRTTEVp-3!m`P(gPN! zhb#7J?tAL2a^A~zU$CoBO!I7n%G1OBK3|h(?fY{~sb2AN`xABchublO1wF6@1#mVc z>FCAm@xp%#@1WBC8U1tljR2gme{Rbq>(AV9#eQ0xMy8x^j)iZo;W-4r$&K^P|MnWn zf9b0K<$nJwlm1s1{AL@`}WrJn{w!m{-TUL_uF;>m$zI5xpQ5%rz=&^x9<5s5ic5(3!R$_Z@lw= zI2ky(5O_UK<5wjOO$IRC0qeP?clW)xVfgar6)}$B3(Fvnv15g3N7_3{*$ccgN*p>4 z9dNt*<#yeVfZ=+Y-LZ-5`xc-<0EYQwUW^kiwiuxuIA@YA#-1EjQ2Clndq)PI1TAj# z1@c0may;5ALCV*6f$J6rHkzMzyBd3Pa^1-YR~97UPrxYR#f_kSH>400AkTt#HD$M?NW^^SsF0gaL-cz_wa2-|%`_uQoypY|3PX4EjQ|<$JdQ9~IY{@#Td<)*JOG)&xM3J}+|M`lTf@yX6*#>N;S1ao zbNXdV%#4P)vJgk2vKMp~^MXKVDPUbz$`N~e+#(!Go?H61Ul6?@#>r77(slnnEE1U= zv3QH@2LA49?J5B*#*q$G_>rv~B;V0F8?+Hxd~FMWoE$9zUDz@3wQIP>@rbCh_qA8) z5x#vTw0)4a+>kaJHsp}$Bj^*Q92piZ5iX<;z<1xMR_necFht+U(Sh@5(cB!YZv-8H zk=3EDzHdzaVla|4*P1*6k^Zm7#c?Sc3^@enx}4N{5#5#(GIsma-?o~_H4Q0tCP_6f|?i&hvP z)xH~5`aVi_-c^c3EI_r5K?3N9S^#H0 z@&F$ZL(9eTBI`cVLioAyDIr!Sj4(Em81il#P0MrVz4zxMq?t6UG=_g0@<7d^We`1p zh>QtAhB-3y$bcFVd1yQ@QZfhUsDd4$hm9kA(~yTKE4a4YU^LxwW#s?`>C0yL^8gGn z@AT@w9s$GfeX!KrNPb=*8>z~k9Jr78z*q?f{*~dukF+6t`3!eoa4&TwykHp|g2Vt{ zBexLJ1;9>_daw-{?u(45S^4CK{5-p|Cie)>W;nG01T& zh(AA!JQNp$h92oe93tig@{nJGUk|t?svJm+xrH$-WROGx1RWV-4xeY)Dp!riFlhsJ0r_MA;#0K`%lu&3 zMw8?L-Ncn}YSijJhGhugMOeoatm;VuJ2;HpzDpu=I;t#&0YH}|(s8`J{RD_G58=h$ zm_JR^;4$pl0G&4EF5e{_5#dAr3Q_@Q;=k1o$n=|5#OUa9ENcXVEUd0MrB86 zNL*mJrF|JXlvwgOtu%igL7?rVp=SLbi?uOKOg@_|27|Ob6DlHvo8w8z#riVDuF>p# z83Z=ua2m|AHP1Ee3zC!@PNkbV(5xI81OTE@!+5fiWcL)Z|3k8Md8<1i$|ohmeLVLF zC!0_M*tan((YY@33{uUy$Oix}nPY;2csz#ezeXd}AizRA zuY;MPySRgg3xUR{=ZGI*IB0+nDQEu#{=YVxq%*@yYr1a*2i?S~XlfUAwlqtq)54-trODC>~y zTto5f-I}q-CU(?>;G@-SE0CmIMPr< zfkMjh4!yP@Ahav6Z7Bb&IO2+1UJWPGzeMVQ6>ug3b)8YJ5E;1TIr$p@0&l;BM{PeY-VyEqOd-ZI2Xkp@-ij!&jfK#J z$BCh@g=*%8%oalAX1Xa>BD0mNOnATdFc~dG0rskkc~da@x{hK~nX=kk2mPx-%(E~# z^=VX&_?Dh4I_2EbJ)2uD!vI=mT-_Su@a(P?w6@Rlj*Y}kk4YNYJLyHRQIY)v4KWgb zY?pZu)i&aH(2%X6KZ!qTM5!^F#1jeIdS|f}&o-utm0gm2JnY*~FO_-Pv1|;Fn0s4e zW|~R4Lztl7`F8+}pk#*#+Q)n3V5uxERja+{Q?J&w{+~a!*mtmIR~)pOU=xCE z&;4Z)*5>v1cSSjRcty!>Gzj90*dl23p4e$8{3+Uv9nxI>s*TS5Hcl{VfYXpCxtmDF z8u+t41Y>mJMKe$aO=L`be-!nuXuS6gM@bL$SzO@oXJ|I!3JGQ>IgxB#T^n0*(nadv zTiTGsBEJHzX?8tE88x81-DoE@6Wj0v5s)n6Bz7>F1~VzvRwywFbR8~1ixgA{-A?hK z;CAkD@qDCdA*$gAm#LgCBl<$KtJLTwK_CN#g?_6d0C@@G|YVFQwT z_p?W4qkg|_0z&@=(MXU)xm#Zv=^3@|DBMR|w@15*#+E9EI!@74Z{~WP7bg?Sk+wYS2^jYT{3eNB;`w2wjF~LAk|Alq~pjn?A0;vjh;lK^1 zT?{7hOefK3xR>(w(qBnoDj&1a@0-w9>%%PZ(+-ol0Sw!tMX)?Q7*b+B*QiD)-`6n* ze%2%ptxd@fAE&9R7Xy+n_ZQm5E(waX;r@Ki-vhm=AoW=fi!h<$z6!ylU$ohTqPt7? z6A7LhGBEuXk5av?DEc)2mHm5$k(ej!xLCKSE~{S9YkX4V18&}gMz!;b5v@mWf;OX?2qw6!cB`~U<)NI4eLcCL=;R_I_-&Zg`q)LuXt zv#AJC)TYCCC6vQ=6LKW^@1RCxF3y?hXxOe|*;zzJMX<)eAy#GPIfzOqhu&Wqg52qQ z2(w*9BVS|@j!JXGROp;>hvNiif8L$2w2O2~Z-<)-FiZ?1=JeOF97!xMoj75LBwz)1 z%Bs~#klNuYtzxM*-Y9sS1QQV8-Pok5BG<_^uz+bNI4ou}Y zx8gF~`nMw>DM=-lpE^g!m;e)7%1_FZvea#rG^YwZZ#Eboc7rQ&#}!SJPv`ilgbJS- z-)$G(c=j&Sp;(#jP`6=U7dSyOj&8LrqN!%R-fR2z8N{{hX8EW`o8zx2&saqSWX~th zOqSGzn&TT#MHMR#YQ}WIIZ#7|`pN#7|J6$8Te_?FdCt-L^xaX7D59gL(}|+(zVZ|@ z^sLLW&(07pgn+e_t3s29IdLxHB+E^L6Yg4{;nnE!w1HuveNg1`{Jn_QOXG#(-f|D< zp_|vwV&$|M>c!J#E=C=-I-hOdOZY#C5WgW=^g7%_`q@}U3VWeM6j4J!Rh&%rV}6OS z+Z~d<>yXinvb>AYa?;N)wusv}_Qat`oZM%~qjeR%akdBB{9dPQwzx~%SiSYDcpP7K zd%#sEY(#5u^<-CH;_ol+#ahqbJS;b9T=?wS7~oLzzNKsQ*8Pm90TnZDRnWDgA5*^m zn*a5Dzkb8@p^&yB({WGW*5*BqCn}J;6keq$w$gM&?_WL93%!)pZ#xfLXuRZXUowXJ zev8g(%Jkp1OzVGtOs^zNdr8V`qq)NRT9EEy<} z{ktqpn)NChwclxYrSGz3=;1zh*=`;+|33!w5@*+UYCr4U2n_h9L&$ zmb5>ZPWL=IzXTwBi1_p?L`KSA0~Sn&XC;!MBid$V@i_suN1}JC?D(!GC7odNYb^P1 zJcBZos~=sHdZ+8&i#6-?XWz=|ooTT&yyVq?jxv{nX!0?<5~j24C$f8HlX|oz`s#Af zdheWmr(ydsozI~nx99T_o0m(kem-Py`&q3`O8a@8FVSANpFg5^-nw!1OI(u7wnn;J zGW)j9O2VlZbMv~!z0+4$+%IGxC! zwbM8H-szq&eHf;@erCGwz0siQqonrr^Xq*dEVoTLIl3G5BKO<; zV)sm=?zf9x{hxdWchBBv|8_O0e>r%2_xyZ?cBN}GQel-F@Ga+ZR?)Fz-rkE^S4Snf86&PSgYC| zG=Fb!?Z;%&z!QhWaCoDG_cmDb4HTZLO&~o#}wLf2y27fJX zTW*c){JVN;@b~*Ydwxz|`}^(2;GdNfdwwtN-2O2=_;+(~&)@ZH+ke&vxBqVM0sd72 zf<%Zo3nEX1sOs0zsbV?B@&SJ1W-Xpyk1CNL0EOWURYN_ z#He1xQbE+dUerxN%%@%~SV26ZUOZkwBBfs9xPm03Ub0Le*Ubz#{wffW4Mb2;LA*ghUQtoCK~Yyx$*4ieQc>By zLD@}F#iv0fSWz{iK{Z}cEu}&2xS~3vLA^{-qpCrpPEoU^L9<;^tFu9CKv8?NLHm)S z&TNCuqN48m2Hh3Kotq6i|0PrOl2UP)iIQD0Zdz^Ku{QfZfc<1RNPL!U;& zU?ro7Mx%Ho*q9RhvC^RlJOvy)0F{?VG*bRQCBa?+aG(iD>qTSJ|J^y#Kh0 zFQeJFOvSIN*{@E;zopr~T_vEiIbc8~aI`t_kxI~PbI_tn@cZWA6_o><%?JLfgdkf& zK-Ez3mQZ=sgQ_hDbydTRTEZ+XanjfiN8h*ynFIoc9=Ts4Z(5>=)e zUDXm@r+T=h<#4-dOlM2XfNJb$OY9@nxY?GtMb-HCE%7U=M>boI{8gnQTdAO0f_Q6! zyjr4aYoe}Nl2L1trCPFmYqFc#QJ>bM!D=ZHtts(psVS|g$3HmRlW$h21`x^cp$NDr znf(7C(QZHp5GViz05lTV#{8Bb{12q||LznNumaG46P0YxwEe?NFgNMH$~TgMUk|y>et~yJXDNtTpvlX&ty! zGM@O};f!wRleZvU3I9Lx7ZlI~i~#k2G1&&T z|11&NtULd~4JpKY0m1*m4XJnb6o?oc7Ih5Gv&xpSF?tAi)Qz8#x=j-#opJ%-vUpFPFFD+ z0v20hpYKQ;VowI%7T3-%TYGzz#b?F^oR=+A%nE3%>z7G>mZ)<7^ufDm^yu;5b6txs zd!^KEf!fdx+xtb|J=GQ4>?7!)RRe@FQa+P_l&Qx7zs*ytek8XqZzfjF$P>Xx__K>q0{w7pd=X+@vkgJIGU zGH;83Xs->rCj7`ZtzA zn3*4eBw)EVV}pTWp6fr~{iVcrN0Qouq=~YtoauX_%R?Q*Dw{*XK+WV8`lLl;Np!hM zjI60ML~!>rN9ZxcsJaHdCz=alsa<$;$roV%OH{Tz`~u9?Y^s;8Rp)YJ)Z+=FV8PEs(u$3R<_!4 zb8X`4Gui2J)R%`zIHSz>F^Y<7ys2xdYsuH4CI%Dx zAM3!0ZyuRHE;v;-{{1PnGTIsU_HLpUUzsB`u`o1^@!7F)2cu>?`OV!}6cQ)Yc%Aax zUz7}=qQI|bGG7Q3e)D@FYfH^O5MeXxV z&5zSX8kF&&-7u@WZMfUwYdfz1Hn?i>_Itvinip5!73_j;O44>ExjB z*3;H%eGs9}4R)3EMp-FG0Waw>LP-l`tZ2BS)kCZ9Ih^tkje^1-=7}f6tqAl2^_FhY z;4VSY|FZ!{3?KpT0geA;FpTV0ISiGJ5YqIn3H=95Lk%5s{}(X*-?12h3khVH^E~Qp z1z{?c{|!v=sYJ}339dh$gmV?p=Guc{Y5TjZyz^zXroPl>3du?m9=b)s6|mR;L*=&Dz zTRc=Mz3cj={Ram6_x@)vQ4HmO`+O+oK<{AuHl6>J;%@QnX{T`X<9F3(={CpM6nNy* zyDV$acDy17?ph!eM-{ZC!=#4AUeOT4aD_~vTn8b9xa!kWMNki}tvIRo?m@dCS&t>V zowc`=$|LC#VI{^)`s+Q`iapT6&7SaLogLw1I$`YLbs9s@Gx4FQ!kuq-P`=_D%~m1c z#=8nx+K)r|CIZab+7Ui=k^wG z;y{B?yYmd+U7$_pA77J5V(~5031R^o>hQ>4Zl+eMvK#8#PxA(a9ATr@6xVEHKw%j1 zi3?Z@HU)~p3G~6Ot(j3!D1pxhwsxJ!tWR*JuyuHd(%MG&SEvOzL5&W&Ugdlo0E*iU zOx-%2%d@!(x!bSM`n#WJ0tyUCi%SK-s@o+-hvF7Ueiw}HT~Oy8 z>_Zx$Wot=?667$5Qx%lsnLITJ9h*+!-N>U=@ll&g8*NCCmx{WB!`y)bzJzw z?`v5}wL`k4<^LjnYes#8j#sE-YCfpHEG+SEn{ux!8P5eOiFlJ_&%yK2+ppD;VY zs>6gRyDm@b&y zOW!oFI~7QCxMh|f6{xCp2q7f7^4a{Cc{LHaKvv4F4i#3twS1#{p>nG;(0K-gdacD?o9i zLXJRyA6fENxnFVBW~By30Rw{47wTpNHRzdFurEK>vK9_Uf%YY4jI}1-dr$(mI43|Q zrnZ1^1d95g4^UYz0wxHZN@O{e6ajDZ15EogPMH%$rvV_-H1Yz49mbX*tfnJdV+)jO zd=BCp3*itLa3+E%+?!ig8u zTF?pZ1p2vR9K#BeM3f z3_oqI>Ixuno+N#{slpPDTz|Hms=~jyZE)Z%;d)m7A?Eea(*xElfT++du|2WUpSiA$hs};p!S|Wa=pG{7aqjxr_OS z-Jiy(3_Sk5xbPc0op>I95BhHePSyzn>;PosT*lJWm;b-<=ATlSzOaJAf0e>0*|+>p zDGc?>BUveo3~xs3AphXaze{1d8!tZpKT2W#9d91hTxGcNr!UP5#oAS6*%cBzcvCuw z3jYgx<^54CbMD67#Qm@CC@;opEH7A5wWW86;m!TC1yRz?n2fLJQ;r#Fikm-ctf(AG zV>X@py(dR(P`CF}-^wP1kXlavZw{a245fE#e*#NA!?zc4TJ((_N}??NSsM0Uj*0;^ z%Zy{>OH7x;q{o_u@~7xajw8ErGwvf#xE__jsJC3(M%HWGZ(mBEGvX4Mt?j9Q?eq2o z?dok5dkNqQR0Avii7Wqs@uhpc z{k%?mGXrauP>s^NHsoX9`{nVJ&=$3 zarH5_B-p^8|CcDT3lR&V<+9cFGPT0jXvo4YZQJW0>yLC%`n^}j6cm^Kl5nCvan7HN zRdaD(aDAGi6}yB9d3v?~GR=JF^RWg0e&u6odM_t;vXvH<{NPp%I7hUzxKn~ zQLs}$Q~$o5_q2TB{uGKHBn`n^P?QGMZx*D(d~T_;Fhz%cB8&|%rod4G_=m!zltuyW z4W#Aj0YanMR0=3;{>3LY&`YNY6Bk#PyxWLMwaF)w2K{_pOc0I2boiQwyOib$X+qcU zs$v~K=(;i_>a`OC0QCyO@Cl)KPM4A<;t#4(wDXLXGAJF|QaYoJP{g10SK{J8&*5n# zP@sA-Bh<)mqyHR~pEObS2v7fIf(i3wuj(}V9IpX7we5&~Ct;Xt;AODEUjibOD#p5T z1KcGKoj()GyPbz?C4JhZUy_3TxQxC`7cP#Gp|gR}*QI2=x|gdH9d9Z_U2L}w;!sZ* zng=aQN5M__((4;mcam6O@U7FBE1xxXcq!!|9u275NksU$}R%V9R z(AL=vEH_xep7hvSIFrUT^IF^Vl>x#sN^64>lrgu#WiGY11X3irj&hy}u2^{)@?@SK zn-lVUU93W25*!rS6Xyw3bj?G-b;5KSsA4HdNE}DRKQ;o91W#>XBt7&kdxO>4>0Mu}AhvTG)H*~yJ3n?l|EV2YWsS%H#nuDd(lml)e z8H&L2WGKQ{7vo9piUD7-d5Yc2?Qr&=mx5yZX_&~{Z>)3Jh-8adU1Xb_6MZY&q6?zt z4#3F%OXtxy-EJ@R3=b%XogFAm| z$6>w-=vc21^m>HQ?PgM%<)jVw*h2_!tyAHj;D^gPJ@634QymII_m-_AR4#d9pWe8Z z5Va0r+DBjQRc0S349kgRqv2}&;|8nbaI;CCcCFO*8&09P9f8@#iW0Pcxq{lLzC5ad zQ^yoN2UT#*9md~i+=8vuspK0w4EORv8pAlZ(%i}WfSS5{7xDbmi_QZHipfPb_+oLS z^u0~)LDDp8iZV&zQC}xW8^d%i5dh7XAgd^H)EVd_q0;2{d{vD^So@}K`F3TF78!Ja zI>dI~KeR?~nBb+mw8h{N1+8*e2r}<#RqAd##WOZC^S(2^m;SJ17?nuaVCn{CY&DDu z&eGEj1i2RfEHv;sb{}i%hJE?6Z29*mXs7*^(6OJ;hhnjf%D(Qksj;{2kw2!hYdqyD z{xTGif|WAfXz?~QsKvThae)RRJ10KW^4uuoGZO|*3*@)fFBztpDe@vyji;L1JLqQ5 zkc4K{JB+3MrN@Q|Y89IDIBLpF56}B)x$;W$hsZ5Y^46^nQobDg@lsGojyre(BTq&o z1*zqz%@>AN5I*Xq#W}?WGJ{kH>ysn*M%Zo$js3Ld=XZkj(ENG$)TitJj#N|u3=jcW z$^JFZ%C6hm|9%;3u|oa1C;NpKb^>s#TY{qT=U+e44<{{6{Xdd~kxbnXt zmCr8}>G%BU5?@;2H6_2YcLUL~`Z~{Nt!Mu#OcSn>ZWO(HjD>%AV78|ONM*FTVl)2# z9>@O+I)KWqPMP3;T%DYje|DCMh5xMSqoGzH_LXSF2(lD4NW?-6qlW2619Gfip_sf#vt!EP&;Pqb0W4Qqyc zs`c74>><(Ly0Q#UF^4H!(=j(VOOL)DJ&WD=+!c9d;}E5lfJ-woSzPkvSQ{?1*>RFS zjud$5S3jkm!oQ@(!}Zb`fztSK%n^mz!pf{sC0x4Oj{Rn1B}ePn7+5B3vqzCsFzdU2 zh2@Ju*N12ATBv*Y6T9rI^hV{}d?w7s1!2kIM&Wy@&l#{?`zs6FN(uCJtE7hI24yB$ z613i-kjaT^`fB5#jF@x%5Vzs=C74qJWA(N@*d5YWleLYDUafm<{f{w_dZ-1hlJZB} zBKKZ0?r%BBMP2^Va3NXn=8VeWlV2#US&wQHUnSd3-55;J3Jklvbt87;`yZs~n;)w# zTW#*miJZD4=`f#ZUU+$2aAh&YXaD`>#qF0Kob$(638Bx9A7}}ENziIlEPjmhv~^I# z-Ps#MX_24n3NwDFoMgwk@h@N?S<-pIEd(}zeGs<8rNfaJo&X1853^MCB zsktjQ3q`tqVzwyaQ~TmZw`X-P-m1CmK==E9lh&D-Rxkh?H(h$Y+?q}j;W<24>##Se zcO1*1o+{Kz!#fu zJw8(WU^fY?$iski84VrD+H@J7+(ql)Kq`Bl%iDYfLN&oMVG~pss{_RdePlcxOR>@5 z^yeKxZuOLv3SuB&;@(YZ;w`2OHh3v2T7kfxxP$|F4z}1qy|OKV`kQG`{t;c4P`i8! z+LOTEqktTONvWJ0s=?W6G(gtbX_Ie|%djui@UR;#$TXPbv!#K50qgf%)AbP0bj87- zGhS>-fYvzt3TlWXGaVb`a=>P0_*O>3fIhm|=73^x^%+_$bTsqz4cu%cep zjswJGP#JCrY}YUxC_6?yLkaezKnY`_RmbZ%iaSK@+7hsG%(7dInhVH4i<52u&H+O1N*c$D49*yrpaKjhGwc|`N94$My6G`3#*kCVL%Yhyo zCxYtlC)Dd>`mlMjgp$74he{a6d(byRq4jb}>!?YhqA{gQ4P9o0VP)SEZAUXbt_ctt zg_WhEOl_o@;|#&6<=Sp-IXzTNQXFbQ<3pdQi!s9=(e1kft;x_+yS* zqb2@4f5onC-{4wLa|a(_lQ10uYHB0ue8<8SLv^N_V45|Bu;j+p#<`{pph1*t1_Ue3 zH>E=chBFe!Lz=M~9WFU+c1WYa`ZQ*Gm_G6EX(hqh_b9dJ;n?C{4np6rEz8T#rd zKxqAjia8iOqHE&S2~E7&kGzb+-=bU`17dPjfMDD#Q#0e|rG7pGfIneEfE*&)6G}l_ z8Vm8VJT$-u>$rOfEl2$_x|}0GaW!9(e9sK4-f9A5CrRQqCv;#cNyq6XtVltk5O3I| z`kCMr4-vCB7wwMc4K%bLCk1f@oz_;fYu@R|Vwg?tF5@-` zaJ-&QBx${~mmmA6_K6-M=aN9yao!!Z4!2qB(82+Si}2uTv^J zdmhR2PqNbo+5|g53&3vOqd=FNmDd`4$d{y`a=?)d*Oz~nyB$GLN4pCfj*Ee)3<#Dx zU~~7{EXv<*7}2_2XnSr5cW-v82k`VQeLaAssM4Dd_3;KCF8L89?UA}%4`*%3Cx_fE z_)7?(`t%Q%WTNx&4YC_o$7gl~Sp&qA)+{9Fs6DVE)9|xQPj&svw}V7HOF+mX}2?1)_aETQ}v zrRbQ!na@KTyKZ~}@~uc&;BB8$Y#S8z@A#^{qW$0Jx8yo?SE2f3!})-upm&8Joyx`S{<-o<)ABpYO_{wig8$WylXr- z+#%VpM0-xMee%S?6e9dHOEHoKM+oGHHbHL^(0e^KW(cSq^>R}fC4>kzFH!WRAX6j4 z7Fb%h8R#?yu!9KaV$hXD_^x{3el~DXJvE>Jwth3zc5EN1k(0(iOBNKrilJCOYM2O% z`NC7^WRSRoVrAptU5&&GX)>SiiU ziBX;+_FVni0SSX!qU0-reXjR-wWu&=l-cz4fKgIz?a=;CRx&*uNXMXMapx%jJuU>f zVNeqBM)%BvNm)rV7?dX)(8^YtCZZn@9Y|~?n{NuA2?%I?Sl<9@mcOx$qRYC83M|3Y z@$+UeO8s`0KbKI`M8w7z_;XX#GYU%9jWmHlO-SNzkl-7P@?4Cd?TuLx9}!TBT&z*Z z1krOAf{>L*%~6o&D=;P$#h@386D*|lOVm;Vc#4P+s|D)34tp*gCU5wPil%@%g_}A> zg|hF^ny;#|%DPH6f+tQiL!xgSbdwShEcd(}?41Fkt+N8=VsLKPz{%sBM4`K#Iq_6j z^1-AS=vD?~%{+F~5Hy{-ZU=ylWJ8+6Z900xOo$F9#8dXfQ&9!Qp%a*g84kO8F%Ruv z+U}a>5@^_Uo6jtywm2cw`;%qv;gp#9l_PpR7cZsruJ!%zYEx)RVusf zJm+nHCPTPDp`KraY0N66Yt{sL#KYMwTZhWpI?8UuDdXzpWFf*`^<}yR7G`1SMTR#f zr6L-%>Zv9URHPl0UM?L1TpLeW7C362I72F}%=EXV_}R4%RfaaKb}S?MY`egWA#Tpm z0H3%Jv{Ehb%>_2q3IzIJUqg(Hy(t2+YDI*(jJ@HOc8i4ua9wUh}oLuDrXOlw8I;EVYlW*(yN1`n%Ws?yZ>* z>F>I68dTwft#S{RKNBU-g{s2ilUT1f0dlPOdgO^!Oi3!cXn3I--heyg>I} zr_#ZQ{5>L!zj+X&WfOB|;#`Tw0%Px~PG zWZH_|@h@1fvzi2fuTbIpqpVHp7W(q>a>n>1%%lX(T_}D&=%Rdb=|0{F4kThQRtO=ecXybnhhP z(YeBPS&2tl-&TB^zMGFN+Mg2P$fq8Hrgl6%v|YhM4D^xx7|WB|1nZ)Oj%w(pA@lev zq^Sql*7vjZ&e|7F>el|ILT``9b%G}XBv8WDW6o@y${4x@)@jO!Dq$^&=4-k3q5xF41$9r+8tW(+3`2%Z+>YnHxSvvH=aM74R& zVDeMy$ni48U+HY|;V5INEUHMVz{q2QvS0LOqsFc?Xw+ld5%&jhS`ej0gu42Ow1W-=IK zj7@S|x#e~DmJVfXifNFYeXkUu-Ta(s)}O9oDx+Tn`ngNt7u<%ukv>hf;2lYD0yGAx zeMX>D++%3s*zNaDSfPp%Al)lXP)U_xI=8yYuy6J9tutaSNCLB6%C7-FJ#9#=fK95O z^~LCm)knn?@P57gE43B)p}=hN82|a^E(m@W(TUt3;n_EWbW@UAU4Bz@lDRbPg?e}2 z%io~%0kBRn6RLl1Vq?6}K+B#>QF`BtiftajKWcoXvgM8c$v*okTUBqnQ9a(>JI{A& z2eNg}<)D%?M&zL4&+7UTuhs31_*no7x-a3y96*AcKK20P2}}p)d*H;-Np6ShhI(EE zPF5Wxf`auYg5njy*l&V%e4iiNug5?ECgT)}&Sy+et@tUnr^{qF#~?iM?mUYQ+AQIP zkOYW%raK5X(E7G-@d!#Bz3;r!zZjLQZZFH#zIVSLe)`D*B8lr^1wQ1dX(*RVF1ULVjS*Ufj z0y{A0lqBy9(29u@;7=MZy0Gf^y>po$_GS`0$8_e3+RYYi@RDMFmu-!0@8VgWWg?s};F?5_Tt#b6t zcHn-x^3gSJT7(t(_T0VrIyO9aPtWA$wvW)U<6SC;k;0<@q4IMijck1A!OO+XY?Msh zA0zOBaEbPhZ3*D07LOF>wAY-<5;0t2T)GC@&T&{uMJ=T zl+42P!Ai8=-ZK4LT!B{lmMK5_dd2J~Xjj{hgP6lv&azYb;N;1^?Ddv_fgtiDAi@_n z*@2xhnBZ~=Gaa;s z9lt58uo3UuJiIkFyVh!dlB`8A8~;OAI$fmLncHd9c$!8RwP6zp=`XM$`B55{=jNK) z+4H{w;)9y;uLV(!={hL*pcwxoDJ^P-!S>1-fee>;s`uCOF5cbRhc8Mc=SWm1hIW3N z7q33wc7>7_yi=w0K^x9l`rV1I2bM`WQi~i`c5-0ybk4zfm#bHkqn!yWrUKiv5qIBR z*tz0G)~oU1Z_kdN>I%a>v?oJ)QFO4yU3pb@lz;qP}4 zir~X$i>ad&`S00DzlyVd-U6G{Z@U}=@s+Ft$AInGj!DQp#gENU`@oMg7hn zj?^TsqpZqyeAOoT0;(@&FXVF>D+fW5{|Yi^P){bM$QYkJ-#MFmz_ctofy-S@2c^_7 zuq?<{%ARy4rPvE2U}o2btNIexZEgXTC%HPZ{j#gGng@<=8p$d>5wO3_rRh4;z=8Q9 zPVr-bX6bb%yuj&(DfWg<-enTzKpUo&nP=`tLd4`%+o|9Htrz3DSHY?zL!K`cFRDB~ zGJbq|d|zwa8XZoSY8E2l8`_;>>c(-3rscb|9gWJhEoD-^oXp(%FXP8P zA2`gw@T52O;*0+^8n1t`+%}u_~iLSZ!NcHcygs?=hI@@y@P|)lk}oaKx087| z2}l6bW3$f}WUbs+vK0}|rc^MyCn&n4RqNCQJn72MwSG7CM9zYm9Ahe0dse~hwP@v+ zddjIEzF=wXiOH(FsG>4JpsljyaGnhMk=r9KN7Hp1uYb7|tw)h}a*W?}-V&nMFm>mP zLk7?IPG~y^5?xz{bTV(h^d71BPBi`CO9ZPG8E5r8t|+yInpwTjv`@N-hlOljUJi0R)r(^2JIkC zQqk$9;f%0{e$cEBc^KHawRDck*4iCQ0cYM?-iDZLeB$Px-gzGb*KK@adKo8Xt+RW= z9=3p+ApD^fH23u`?;9O^_Dy+wetLIC(dg>NipE1P#^JZeud(Okyl1|A9W;5^-0QyV zpYrw}8b|QS5Q!%yKXGZeLi%jQ%_E1&Zy%;#En4u2LneW=-r)o^G9T%XMb zvva@-BoSDfoxW?i96waA4U+P!;J_$r=Qf)aV5mO{j-$kZw*sJ3B`&&PYz(}N0y;X!VITQW#@0}moVE4{&OulRtJjFxjyI@44y{|$?3!F0Rv>GBrvb4g zILkK}Guv9d8Z`o&Ne|08;y*%}yz#03eJ}rY9FtD=xce6zkM)}R#QXrUO9X0VVr@swD*y3R>SeI zR8Mp9klVmRNojN{*vND?qwO69v*mdkj%30P$_NWjlX-2UKnVK6UZqIpaTQ>;#GW6b zBq*Vs852m6^g)Wb0)p+d=hHs~4$~7l=Y-D~{$fa)h_2QLvbOte0Cr?%7>@y`d*2Nq zf03zM^abnpzo}E05kUw`l_ysa+%Sn6?_KxtpC%5tVWyi+QQFjFF*oOs#l#8MF(M*{ zOT*+5XwV$WvuyA%0`}r>j2_$DD?ntfgUaq=Nx1<<2vM8MOE$u@Ke*EBMZ0?AupeJP zbgz^N(O-~caxgDyyw=wQ*xMiqs-6kblE8!|RnFN(yLfpg5Me(nohlYX*fmtBj6V1n zZak-}WHv4nS*|wBm0_QNoxQO6`&u!7IgGqX)D~nX$w2%cSpAZaZ!e8{hL0d~AX$36 z&CVhC&oa!r^R672Hd!2Z=6 z{eW`do6L{7AEexjifM;nalunZN6PMn^{DH#WWqp!rcgNH2^Iuo9K5W>TVWx%`{5q6 zO`n*ZM1D@t4WvXq$I6I;!7dUd+a;wIxm-?r@QnGGI5@QJqtZxQOY}qVW>MI$m9<+w zC&8WG%<>Ef>{d084C42Mv}-p(x(bKkk3=@X$?R5Xmsi9_s0>$Iq;9_VC9l)e=cil$ zzGx7FJ?tvN<{dpl(H`}pY>~Cip~XG{3Zm)PIY2&@E{58sdX!O||^Zs77eY&OEx8f?x zJdxbQj5|Fthqulc^%xjYuG5ftqjv9EJ$V1X)AD|sDufIQ0^rv(O8{Ux%hSTJUq8*d z*-@mBlhEG~GPjrjt@?*l^hAmho;%KGZ*Q-V-Dht>fcj}JKMccdf*VcCdO+E3I=&R4 zT?}jXPw@JTOHze_IAfa>rpANJUcGS6 z%qFZz^;<7Ze?CK^f_WUJwen#6op)wtUZ3A@{+6JJWwx18uz3lR%XmpFVAJ5ob`Vx5 zbh#}bQc~tv=Z50i*_|o~N{sUwJ^buwN_xI1D>DD^vF<+HW1;1A>!q_(IH3n=1&6zx zA<-?D{#cRKz}`b{J6xIJS+hmLOND@SJiUZSksjh9@|+|~+Ic*>$45KcUWc(7L|&oC z4ymlb<}n{i$uBYNr!M08a1*vM8Flx)^nRXa6Vz;5%n+~)ejk$Qd2Dw+_e}uiL|7Y0 zL8MqGlAiz+z;vLEuas$!5Gk98$v)!kdDv+ziQfsDqR9L2S-;W54m=Um2lpGZ_icyluM2YOfXS=K+G2pjcU>Ubd zB9cQg5|qZwCMfbCjQ3T*$iNIs#^@jnX;t;{JHVzD96>YVNnGyqQ?PyLH9o#(kfdq#s7v)tw zfjR=(qB?`9c?=Q*S>^74ARwk>J3AEABv90>x;C<&j~PE!TUg{7Ohbr@6`gp!q=V0w zY{4YNXH0zE#-vEmj$xtmG)Bo&HXy=+*9HKWh-l)CL!J}fJ$h4$Uif7^L9YaE5!6GVA z;-|oZ&N3)B9bqu>F1M{@q1l;s@4&YS3XuZ%$pFkk*4b0s&^*~f2MgL*6}77bN1Fx3 zQRE&7&nF5gKVP2@s6T!w8Tv;*qW}RC)D-)VR+a88KldFvKtOrvos~s)UXakk63{Ra zt#}NvAVvf*k~Y6C<3f3;OK^J@q{m7AE9PKdQ;AL>kj_W#5FrXbLWcqLEC$tPyC+Bv zUBUNoXDi8A?2w3vr68w?yZREqZ2($ALA@LX9x!AjR^WE=jwgK7FJpf};{7HUP)`J?UlExnt5I40HP!uyJXxJDv1<3^ZrMsu_%|znjhGex znVvcHhKQq7Cphmu4G&Eesa5+_ML35AP*5`jbcKM#l~m1$5m%hxQ-q?rczC1y#lme~ z6^!Z`_DtBkOndl{bAyba#{7nD}rwcm6r;jes1$8>&`e{$;`Qs zbMeBOc@XReDl@{aLd8|k>g>ycFv1&5>cMJ{i9{EiAOQMO3SJ3OIg4XK<%+InR@M{$ z!eHycz9OPI0Yi;su6RV79l5TviY8;zYp;WKQyR76NxrP>-q*n0Bhgtm!NFQLS+flr z->R*(hfO^9sffGYynrTNCL5oESv+>wy%uA<2we`n0oJ=7bR470xOMn)z*fQbsIY5V znm2>DuiaMA6dhvb&e|KCWC3LZ+^NU--rJ zt$q->-txrp`1zx6|LoN-sij05|7ru+#jF&%Okr27udck5ZAmX&T$^B zu*w*;VYuh`6R+TSmi^2A_3bIh_y|fWJrvoDh zP>v6?^D;o|UT!5ttwOis0!rJ~EVt>jv{K0Jbh3+KPYhR*z*8~D#`@c%jBE2HfHCUouanWJSL)ja2RvU}*foDu`J6VJV&4^#5H z6|14ODRN7pZ-u{x#@Dttm!dBCs0KPj4mQSJFy@+LrXL{osQj>RvUhuN$=5Yd$zl~G zyM6^`?G8-K%6E657p=5^e$zGGSh}QK86Gp}iiKF;{?)lv41Z|!IF_Nfec$+lJ+{pb z13z@Z8NLSEWw@2sDM$tRRXdx8&Y#49l~!M0$Fl6c{Yc&8pjV4?OEqKhH^hb9TY9GY$JmFKiiUK( zP9_^LRNpjH-?+?9m#I0E%}D|UinMW4%8gWnd6 zQZ}hoj+nXbYrjMhX|^e^0eFimbmON9mrRVBaLtNW0LiGYRt~!EtP6AaMSxWrSB4mHdCXSiKyeqakiqY+C z{z)9mvaIGKu?J3W3f5_AzbGmQc|m;!-*kbKxTodgmc$)(_6N7PnP;2~JiK}=aM#Lr zNc&gX@rO0Tf~)EllRX0H^KWf9&YL3qID7wphHU=WXg7iXao@6C{#pD_8*NfO0j4A+ zDJ~NkvS1C?XtQ0QAB3Kz3ilTP|FL#0@5B-h*yy(+u$XNb{|Gky&yS@Twz}J}q_2gV z2X=~(G_hJJN@||J!wSRnJdfGs|FO~T8MwF>KGu&d6t^|}S-W0)%c`fw;ZfFMpOfKk zN-NM2)DpEJ6MmH!^yQ=eD3+Jl-I22)hTS)Kl=+qY(5xKsW*q*4(@SsMGBCK$Xm|4CG__St2%ko2R^&*@vF zOPB1n!QGi|AUlBB`Gy$J& zlpJ%?+h!C|Xo}EO|9Bc9Sgb(cv{#{F_&?ts;yBO*I>k|ZG}n)I@mTbjh0Y|m$9N@+wF!8 z7Vj{6-1<@16FTv8RBPdqFA@>*aqiiqTEZ(9^1_+f9^={NXSY4}WE4J?-%HTi#0BGb zAbpNj+QlxHZoKv)cz)}gFP?}{eo^%cRL=rv`NyUgu z<+^qC1b9HPqtZEOd<^nkz{VzJk!43FxePe4v9(q0rN~j|OZG}Ssdd?%4c5xgij=B} z*}4fGAlonAOjhAv`f_|DT<|m4MES{Iqr?o8alOgdvTT@F)kf_lwuWmxusV1|qcboa z0#@ljTSVkWWmI^->GC+ujnS6|(zJdLb^PYqFy+4I_}T|d5~ z;qf?o7-7Oa-~=XUGPbyKZMLxmV%@$g!I;t#h&R{=lxXL6df!B=k!*nE1w#yFi|7U{ z3MU-R1>efSrD`mc4{o}ioB7+PF zz$#&w{Igt{Bk&q3a~y~ht#fLQNJDF?puz7i?aHTj|F=*(aG=wx>uMUSvBW#wS<-|5eu! zLl1WX5W?2YH?Ljrg7Sc%@8sSplI2;lRm<}Neuljx22Sp?$q!mL51sL<(lP+#Q)))= z5<)s;4d5Fild*kSl%x?oUQ$hkpmB&|^YI^cVUkl8x$7D)g&hj&Y_cZ51fe(8+UV6f zwK~72@)DXy5Qq{8M=4i~rU%RHbk+1V47Oh_?G!}QgWLeh50lvz*nGINNH3q&&jyN; zJY7a-GcP#X#f|qBiZy zvn06ln*yce8hzQ?rB`R{0wLcFBSy^z!ks2V6f6z4KAP$R|9$@p?!#9A)upe$M1WJN zUBiM-n=5PC=un%xl@*;viV}`W(hq2;h0thpHUrEo*V)m9OnKqL-_FnnP>fC+dHz4u zZ#+Vfoy-RTvQd^xkS~DGXe$UL+h>v8hC`Yw5GZDr%e2PRqwEwtEYzKxBRe=nX@NRC zZZ_=p@4@YoEne)L1tZ?U05VnQ?)|nu#%gLm4UHgyU}Hdjh|~ao3ju)JsfytKA1`m8 zYs;kF1}NGiP(UM-ykGQXk3sODG_aE>yK5)drR!n29mE_=zR7@_HdM-nIiHGyCy_Zo zri=e1V#1vqfnETg;%05DmQI)$1X1xXh za;rq*a^%{X?ff<~lM)C@u3_KqhA~JQ;h|~Ytm3cxG;IN68eNUaXEKMcD16wWYy6L^XB-1Z9O{2;hh&=t8o5*&)2jeD0 zhAGc&7krbequlQPe`tI2uoxS^|NES+nYMFgYTCDHUop`#DoiyklA5WcD5ga#nHDOd znbv7hQ$n(hHc6NgvQLVVXd)zCOp6rKOeJw)o-^0IeDB|V-1qSu$Mbvs)*mx+9?s8u zdA)U{Nq)T@FG7L+pQE?$M|1Uc*KF^Sn!&k>AMPQ3jG6%klaithO>FYnoF8@PPTx>& z?f9rY2`A1IIm@j~)kXy#P(m+$K42<$pAueCB39@WB|hp$_3d_ zuo@EfHAx{}CN0YzADlw#6K;XRl=d<#DHqr$PLpLE&`a}8d$9ZWF}KM)Uu`D*lqvSL zID3#Eze0V7qbJ_vrHq7T40*THTo9VT9u&%Hi2w*1%|4oH^d2)yVJ__XGfBwTE@5Qo zK?AWO!V`Xmx@yurCPK_d^m{A~P{Lm(eg6vM@uTLU8BRft4<`4b2)LwnaO{}ku0_bp zIk37Pqhp8Flcp*Ux@8`p10DCkK^7p-1?DyZ$uV>?Mb3w!G?|NmXmm>%Ux*-w17aJt z)1!a{PdOwJNz4O$N!X@9{~3ln3;<4>W@US*>%UTX^kEC0D)-@!;uCSb=uS{ZaAbPD z8lAw+2Dv~z2Lp|ZKKc+V%gFc3qlJo+@V15gs9oJ(kvO18Op$YYx$>6cyu?AdDZ%D6nRMt9aDA|buzg1sBQ?BArL%wxb4uHw)8FnQ6=R3_S&PxD_u&hWuOA^3%c zl-eEHv_3sx?*T~hiFlRXMn=1QP4JMF6O+(LRwOkI7fFI-J?t?O+PFY*N{EUVf;VV@ zdqKq<-=DS~rWUhh1Oig3=ZT^+q(rd59QrVvc#h&d9GB?{q8PiqRlq4C1Ak4%0~dBR zx3cgxn2`qmM-uzWM57oSD*#*I3HFe&sWjv@*6~f7fT0PbhuR*WuJZjgV1*!djV)Pi1*2; zaWOPeEa!PO50Y9qB4|YT%;I{`coAE}+&;r&e!+*$a6wBE5CP?6DelQaFmpAoi^QHL zMK)5P1=?+)QMyuA%Cv@}J#!y|fa7o`s6rRX~3^xeJN3 zE|%}?_3~#^WK$ef77v5uk*g?3qJ%%oMNIU^#w8+xc}E8bFfz1Lyu%BPhgnL+NW$3- zr?25wH=m<@C*Y3k3GQr2yP31E=iI=2fF*Ckfl3s?9&bs}GUcCZtU3?UFke2Lz-)Y3zd=v16}<;uBG;gRKYvd1{G*X`y{a_})~KTmO}@N3r4aziIM?00 zpe@*1=B4BzMA?!;5NE-Aet?C>sYE$D#R2uBKU`Pjgl&0v$vaYOWs1x6&{p)>1|_;% z5=!314qGESCDBy&O}V_OQ}g4N%W@;3>PNN3n;YKt;QB3;zBb|yO86#c8wQLt#cG=A z_zQ}E^Y_h#J{kdJRNSU^(8so1+8^OEo#m1ou+r+5i${yfT+BBT8-B!FWZQ`bDOTj{q?aN@rUwOm;QqBy7@mSZ~q*pOOfRz%G>qJ z@9zCWdEsJRcBquniPIK_ph z4oeL<(tAG7+1Z$Pjs`)@(*`KyRKLB=itR+ykEv!43hA$Nv%i7Hz|cej!P;Au@Y6(y z#y7G1tigfD6^-bEvSwmsx%`q2P5-b!4G=+E(| z!0>6P9#VhzVYixm&B8YaW!;!>dChmaCZpvEUO*keN}#i!{Bw4L<4El{UD(kiDI`gs zG?{OQ9~zkJwI<88QZBZoe+1Itk^1L>`h()9W&o~m2#yrn*EBYjsKYdf&s@FE`~{;t zas&kqxYoS#MJ&8O>$>3bj3hM{hE#C;0Chb7_C3|IS5l(Oq8W7Vu_YRG>$r(b;_=5Y zD<|aS177EvBoC;vfq20xR0V;1)`K4mJUh`8+$-rc*4OtVjN*)*5DR2Phk zoKp=Z%Tp1ehV^LkynFOf$?69>_CR=xWPWz_DlgdfZB@C%du`HejKLkHo}E{(i8~!W4b;fNJ@xvZI*#8y zh=h0CHUu5Mvfw+Ashx*W6&TveQsv2lY{fc;DzDIKwAR0L+1pcVZ#r=xtcJrUj&7ee zvPr*BbKA{TaxhaeshIr0jr+i{=GF6_{r79KA@C!S+OEl=Uhy%k7}gh(W-0kjC3q8U z?wAf7iGmFYOZEtA*q_Mv*9n_T5D#k<@o^8GkQ`9os5@iQ@@eeL#Ot>Y@PK!5Xs4GW zB{8H1m04VGx{q=PbwY5sQ2v%(`_^p zDC^K!$}}^biwy=49HZ30NTeD{1+3rFkwaI@PBH2r4Y2iU?LF>4}o6i$)j}) z38>Q8q0`|h(45fcz=y+h_hAHP;}RsY6|QTK>((4?mT`@X(HSHe8Q^I-|L$^6Vjkg? zB6Vs7CoXF93ibw-Is0%8fdrt`9Y>UZQV9oMQc71-MLJo+4vpu|g@BErjx3bAOz1bs zAqs-Md(W+%9+lo$E-qhZ)OjM6ALND{&eHzy4B=ErV?}6R1`efl$%1tc-)+ReZp3)N z-Sz2WRsqoH*>z*@>s(~cn3^nwMPz<2-5XO4fQz|gC5p_peUHjaV@#a??M+#n28Z4K z3PU^T>%?=o&Ix;pQg(`vrW6YG@vCPWkRtWhB;SqqcrVhp`mhIy=H5H@nQ4==2(=Kv zN?iXui{f83s1fJrS>evNHK@MM>ccO0E)48+#evK&h(=YC$;~DJvXM zXBmPkF3>@%dKf)p)p~NPLz&k zm25!8Iuw2!DnA|IZ@DbUky6IObO#&}d&Wi29DVM2ke5+XnNQ~Em+-)=HQHo$5_I|p z5mfCPrzd4Y_PtMnggJUGf~zqkG{E&>^|WPxxfIV* z{%OnKM{Ek4S;)~c;sM*Y!gvWZ5;mCv@W}2H-rk?K*wBW^9>>W1cZ{kVq5zP4T+ICokZtPR@jo~Ptsqu-a- zR-`q8O=)rEw#jB%~Ic`3JDb+6GrGJn&vYdQ=yTl_vaiX=rUQ~=%2 z@qjL9(3#56$#&aLcGm}zI;DBI+8!okK{(qF3!xR)ZXz$b#} zu9n^RerM-i+A4^srd=gETjM?-$s~mD3JnOBhx+j2{mCTcLs7?a)Z0`s7eU}@RJhIGSl zmfVQW8jdma4(mp;IwL_gqgezvOL>ouF9~HzVdF)}If~o%`!J9q1Dz87mG)w_yzoJ# zv{ji1iB3$VZi|c8GzrV21;@qcEwI6U;Xyr=d=^zpX;kHvzIIQN{R2G5lcSxbtd(z& z&zZ!1Z`Yq>Yi39%=|~scur()x9M5j!WF6L%aZb{a(e;d3ezaRBvB8`*8|6A4q~{8= zXDRmMyy*6MeDxwdy6Kl@nF=9hJ5HM;M#@#L)rlZ=jBv8JC~u`=MM)t+V%?1?;O(EJn}~c5OI&?uM4iLqlYxR!=-gGAqqBkfn$0GO+w|AGc_;g1{(vGg$Ja`w)Vz zseabvF@{0kY|s@{I%9TdX0WtXM(1~>YLz+{dE6HW<}#Y$k=a}YsJ*J8tla{iC1M-v zOR?!?>c`8jIm2ve*agZe)$?TrtIO3N>FWezRo0hV+$gs)$DMp%W+fss_sT%w2#z?q0SimU{vSQ%Uqbo|K4 zxz)!gvYm5SGJx@6t&|#&EmX6ekl?6uLFMW1$B#I} zN|*}UNd}xT+Zry|C)jZt8EAwAY_7vWAUWstabEg~w)N`Td|($54k=Hn-X8mIQcR!#~%?u|gbVJs64H0(I}=zaL1JWSg?D$qN1GQs@R z$2^E!@$S0 zB+VKxXW6~a%}Ub>MlrgUqG~ORnzcV_NNj}V{nATO_y+H*0OJxj1~8a_k9j zhq?d@6$d%<)5QOaJ57TOH*Ek9-89Wmp!{c77_VJHL%|{>M#x6y zVx+SD67E^IjWY6by?f(a&_!}+de4!aj7}_ZSzx4R-s7$cgsmAta(T45l4RQ*62vt2 zW;>t@zn{zEJ!BipMENz$P4;Dm{D!SkAKm`EV&&gI<-mCiGJytWBZmcG0(7+feS|8@zWRGpC7ZRBnD@2N@(kzYWfr0AJeSbi1xDg+ zkfV5|-3q1Ws2^m)`S82}J9Z%B!s~V6(rb06H#&ZP+=bddzZhM2ZRVY;t@OEIq?&>^ zU|DWI=-)vf>8;st?okWxX_s1k{5zj#bsL6Ep zf|byOCSs|RC5<**5#W>aJ~Mh{Mg96MaYoOo(Cyz%P__dReTv~0ZWCqL^n7EqUIx*W zdQk32S*o$@kckU{EX48bVEdwwaqNJ}nyi-BJJx*7IgUpWXphdh;>kF6IU-TSLS(OP z{dnGamozf2-cX;T_?)x7Rw`b+*&E=C$faEMs{?QF52s#v3&nj*726UUu;v{Nwc5 z5A|CUZhf~HzwDz&d**G^XfzLl^FBYvz@#@TX3zvD7^m7?rMT?D3fYg6>)H`bk~``U zMSIPN*>O7WXQ30R-L2%$O{^c~5^@RfS~W;hV&7Y}KYo4#T5I6Sspm;#Fyz8Qz!3S-GGOBm3$eGgvH15Hhz7px>#AbV@Lw5r}=G_Tquhj3`y4!TsT6N*)z* ztJI4I6O(gl}eDa>FDPHI!2 zgew9{^a{Q*8TT-Ir@+)0D&9(wI==f+iMhKB?a%1Sbc6zp?Dz<7dPvTg0yW?lo@W+d#QGJ$xUIr>bo zd!&Tfi6F=awL9)?Zd@&l0FeL{a<-%;dmz?zBnRzqUsAG1B+~0mF|jds23A7vpre&t z)?8a0dNNsOIwl|ShQ*0#s=-%~s-1OZI9V?dI6;cQ`mH{gB1912L4P@jn!!}6>#jzw zhuvHJA8iflhV)!<=?V6lM8I^9+Q#Abde013Oj#}LEJevN0$Xy%J6-d=^_QjV1po0 z1TLME+E83u`X{gLf=o!1g^?*boo%Wx;S?VZ0T=aXI!*}`U~BQR7UWM4hXg!;If|0y_DT3a-aZnJkl`vLR)vKz0^@ese%+!9_r~LpbKdfB}Ay{wk6-7~Cvz3!o6u zD!Yvt_xSy?#O5AY{E|Gt2wZ~{bV!lS9P=`HG*Z;bP7-Jj?Yo$RtI?TZc{sd<6}2n< zW2E`ekPy0DX`IY<)T9G@Xs>Q|cmmG|e`C`W?l;fIuSX_KG-&pQNm1Iu&u>b~6bDZG zA4kBBysmH9@^XtmR$Q4CaQRzKN)rrbibI;amz6|k>}^Z?tqq@j$#HKD*Nr!01z&st z1`$LuUlwyUu9Sgji0704)XGeb&(5s81}ry+go!1K3TEFc;)DBWAKWT%K3Kf3wZ!O0o73ge&}2`qbQ2nKV{lv-@Hi6fAIx}#ryFTYXCPJ z^)RLC zUcy+XUA+J5w*)zAXT_qEjv*0GBMYs3;0Mum_t}S{oJO`x6La^z;)W$YMX zacQlvtDl0dUXS@bmucgoJzbd}pHlb9y{SiaQhczfy>CdX=!eI|eLMoo* zaLIlc7G@sm^f1FKQ4wMQdr=f5WhqW^|9E5E&{-0?7@EqAw-D?!8u;EJl#!5l;?2|WsdhM4GI#@Op(B%g@gjswJ;mayJaa(^JPMD;4CpwB6ijt1WPDD?ZcvJ<`$zpWgsPgqaiN6GD=ID2){=w z`b0u|CnE2nv0bEn8gxV_YqQGgGU(AeM6~4t*xM=b3l9qlP;wH1{|Hk7$HiOn{caCz zlaW+VHf3le!bgbvVGBrSbI_LsgyPCyWr0<=ZKR{%h8yJ`Tt%8JwhHGcBVpYA26nN6 z52=ANW5CfP^l2RS9)od-4IJWu%pk-QZlt{mES0%k-M#V?bHlrpA)`P#zawpTJwE9jI>NO=+F3vi@@v9ShMB}+noA*JtQZf`hWg|d-*IuC9?W`d_E zbcxJfxypRxMMCbK(mDDe9&?Ih9T!}*Gu@j-fq+PdpGuD%t_~;QX-Hq5-Q*nBm$%BX zW69071oA`Yu=b=gvw5fE=2q5h#^G5|g?ZV$tc5A-jAsr4I-JkX!sfqQe%9dKLPW!z zcfzf0d}m=#C0GwG%3hSK*-mhyA-}J(pPwSlrdPiW&wlsp%<5QPgDuJpx7Jdy0`6e1 zqzQxYM0X6%qNp&`kfk0CF`Is7U38K|m94_ia{HX}s$IE$*4P-?ryp&iTDYr;pSlxW3K4G-h zQ0~l`79JdCp($F{XJEQPV(s2;@4>cm+!3MUWw@4e^IYd_z3vaC27_}6Nfwz(yF>iW zpBTdB{J`aYtcCYy>}fN7{VdU|!|F_jRgnSya?h%!p~hYK3t-rVo_1qf1~%2DKC--V zql@mEIb+*&l^Ug{2|A+ks!?~3?u~y9J}+fCCjr`jxxA3JG-Bo1Fd4R{4JBl8#V7^D z{MWAXIHd@ov$n~s%((L?T)Fb!yUKDQ>A10bSOv7ZaQz?jtDB!wTRFV%tklK$6f}zD z!!x8Ogd8pxJ!3y=g0h?o4S{7!zhRAJ{+8F*nPZxY_wn_^x6%?Fa8UV#?uIl~Xw~Ou zIlH7QTh^yPYE-!{^}^lemkq7M8L7qxo_x9+k9IqYi`6VKkvq3{i*5U=w$EtZd2(dH_F**CRqmQv`2Xwjg1XA%T&im9c$w!?SJ~TcYWy&5 zsjD0ixO&{{%j*wsGwhOH>+QiNa^Dwid%1bSN6L$;3knP0V{%NJVOyh&c(huNrYG2I zazgD_HsHx$Gap^dTSgM{4#?!b^7D-$OnAiAle^bVV^X>(yCMgIs?rX*&|p&S2<}PD z`<22iq`)mSOGKEf&|w^NcEHVf?)D~2_d{&=XO*OfbHj?M!%QI!iJ>-9_!yh*PKR!K z7BuRyCXP`Ks(h-tba3yc&koX^Ts-`mLdw*-m*v4UJ85LLC){#istSz@I4R!mDD2{9mW_?EAeZ$jKj#(h`?e7qSDJ=&nnv3;1mzH*J>6_ijl%zRT&`DpD`^ zzj`(q``61}-K%qg{+Uu9xyC0ntnclirdTMPym-~zxjE{?N89cZ*SF`vzy&w?lwEiB z1vLh9OnH~(k6=pR?XJp;VTx^Fi}tL!sgV z+tVq=UrlvWrA^GBaHb0X6a{0|#>*?+AreGN)y;66t99znc3s8`3mtDL8u;Sz85Cz2 zN<9c_WaIgOvpi0CHuKG2o)kuKt}2@wy)2j&6-H-KV3+_q6?k=Qt^n?@_T5izCVs+uqpgB;4aGv!7*8iH>GKRYQcCclT+Zy(~!ZK9E9@> z-ll8a6xEQXdW1gPcj?mkn9nX&_dNjfsSG}gI786a04r!KA4C$k&JQ(tpEZNEs(_@d z%G9rWT@9;x!<11z!`2UsImPl-yh9@4`JE^At7Q!=LvG%OHV){|bYnDBM6}EkHxxHHv942CnX9nRI0~g^C zG+=O&#C=#|<2Sd?esTn9P%*HxrswC|tvHxzXEGXqWt_J*UMqjDi@*wbhPr`~n?+VY z2tgP~-CCtW^ckS$-QO1p!#xKZliAhsoYK z@7=6U*Aj>6pdV_{l@WJ+o{+h7pu;RHOMpLhJsVstNpWToZ(e?W>si?gT!vGYGIN82 z)QXqy<<@L~@e)kWIO`7b zbZ7Q$h61Vxo%#*=IC1`sU+1%FQ|q5Aa%4)kD%{7r2{$@5y>din5Orp7U5?|{`I;Iz z@X_wP$a)f%isL2Cc;-9d63?W@jIMuz@i+we|F_dH;kpX~oJ)fKiEJE;=<9isu)2l3 z)V%FbcektF#M;j5fB`YN4*OOW$E@aQ%q(`{$6**?7|=aLp~##<{pk|daJm#REKYha z8#vbBp{C6%*@^^Uqw4PP+42*Py%CB4`9A3`!);cdq9O@`fuqfN8@OZ0FELcu%P>!> z#q0_U(@4W$^?KM~b%H!+SQ#TO`s&8h_{66>l#~J5tu-1IhoSw0MbHC6zRm^J8OhJH z4FnN5JlQ$NjD${8Xfe8l+~QE>%(CjuCZN*pJDDZ+Mz*w2qF_&y2b#gO*6h@Hrois% zYX#+cgqBKNm$i40lsimx(e)>?EQXu2mS01l!3N`up~M1hJVI;xO;h1ZwxZG9TuM+(HDCm`TUZbB#8BeJ zVc>4sK?2RU*4>#WMLHZah?T`IK@s;VVnhNDgfO=;&IM<>_{`aJwAa~*5`FB66+FSY zqzTGL5VqZXVC|Kx`ty#f3dSrl|Gq>XJt?ER8yeGE3vEecoY!eP1Xxvv(XkqlI?{$C zTa}ftdBiTfk>cezLDq?fYZ5L&^>dq{=4U%`V=@@-RJ`B*{IpJz*6Wy@IQdbQdoRR< zrHxpH)vp34ccP}-qV4$oh1PBi#lfI1r)Tt>$|zT2{AAp`S>niEn^C^uDX+EE>$_AAoQfdT6{x&la;)O z?wx0_x?QgDwal%G$JHM^oAr&*b3F3+qdnYm0!TTqFY8P~^tH*LY^Txh*wI-}DNc-Q zNm9nKCo~&$YRp(RyGi!Hm1=9k`ZvLqTr?%nh{yOYy!n8HjTEfD^LjBCP@=$M9Vqoi zUN1J5it3aJmCmGMcNlX(oczsi#NCi(V_+tY{! zaWYX?FEr01(`j&kd58skd^{D_xkr7P^~B-e+32OEwHM>9WqT&#&^i9PC6l+f|tv$-&y!wjGDmSRW zR|{{s^x+ZNeq0xN`6)%Ejqm?)g(bb__n3DgC*cz^KTF^fTpi!S zO2ZhrEAT{DNFjWZqJ9=f~@Q^#bWs&2~3}y<)I4O z_rM`LMd8Z;2zj`Te0Ec^f<3SUx`n^7!772xJ%NK0+2rCikVV?T8$nvkgI*ZDNg?gV zH%%1N&Bjm3lzY&e;D%=+M|cOjSSqHtylWmRdglmNvh4XIdb^ts&eDgY74T4qq0-C{z2${VM zhgBLU+Vs)Z%Xn@2AaqvplNsfP{X8(<1+Wzc_(w$s;R^iQH9|?w8S z&l%=MjC?~!bkn2CBn4+*LRk{hTH}|hmeI;Q z7M&~aqVR?961gLITC?m*ZSP^~1ZF1||ygsQJHdjhLWh)oqh1*SEN~m)ZVn)Aplyw9HCl`R_O8 zo3eWon=0e}f|g(Z__{~eB%`+Y-LNL41Pe0W3KpA8QA zCtoiQt&2ise*a+~6@^pw-~G$*t7X;bpB3IU4mr+BZdX)Wy_PmO_z`H7#FtuhIUz^EGdLn-O)qk^Z%Y@95m6 z(yO*(*&nMf`>6S#2ct#JT1W4v|LFrirI6Lfqc&}v!>ZKx4**v1O_c84{)$UhpC2ZN zVqHP<&yz0JZp($>SO$rB?`YZ{n^;d8((u~kXi;JFhj;5Lr>Tc!J}b&BS$$@zDDv;@ z2(IQN`FZi$rGt|S%cXPwz_9`XW8eRQV;v3IJN5n;1jm|KLds4YkEvgRV_ln?IKg~q z{^4ZG5*#aS;{1ox84G`qvJf0AM>THpEXSmAvRc_7DaUyq$zp0h$f^|7R_3Xlciyg0 zfhx6&vS^I)RUmw9XsFl3quJ*_HeNVaC51|GtuNy~U2L0Z{B-H|!pBdS1u~YO`JJlq z5K>l`aKTf5`H9b0R+I(A7IoUBlwA=?#KQE6< zxb<%v9Pu-E7AG#uKpPyNX6^xKE3p8nwo}}WF>MmxSFr!gEFaX29Pc2g*(SQEZEmur z2iOuM4~h1VaM$&-ue#^RuK()$use?mGgCU`M{9C?v6KGD<5b~M3JNpdd>M~p#5K&g zP`zBoN4*2oS}tw1=)n`{xFW19Dft;<${KCpWmpGwbIJN#hZAcYeVxB}+ejXP)_>Xh z`(3})Pr>K>n4@&vUZ(oE`{@B^?z@%P$mK6&f9-+yto(5C8-2!zx~KnP5@UHf{OpyB zJqn@Y-J@P3!pn~NzSF{CI#^<~>JT?;kc(Twc3~t?kNNA?xD( z&w_LE$Kn4{0{)M6!xM%&?vao~oKr+JjpOjk;<~cjekbnm3}bW`5PB@$n+e;4jwzy_ z5*FJ(J^w#))HHqOd(Pm*`-idAO6Uwu4NZ>TKaayz6ATGMV= z%2_Wx$53oQ#f+`jgLE~A)oF7Be(xv5aQ_?zYcm|S3o6PH*ul-o{n-KGYT6&C7O*<-*ZG68 zy^6IjGybyucgr5T;D<9)!=xy<=rG?OqP(El3B#;8#-7fdDpc3J{dWk(^2qn$IE$_Z zmyv(B?7Nj$N^2o%m=00w%hAWo=OoMEKhSoUDjK=C1g5N#@%N6irlFO123$F*nLW;S z?Y}+8^>Hfd$SiNEx*zu>Ja$UUBL+u(VMT19WP#EYgC0AC^{lVn`U89*nIRX6c0NKj zp<#rW@eZ3bjLqY!lY5~p9bR6XuzoZ5dnp?pOybIEsvqad5GYdf(PL8YFfQhd(~A-nd zNjhbYaN5_w4GB{o$oCtBHiF+{?<=IZAHK@HA%_EyHj%{!v8V?wTukHdIrszeho-rw zj(6WQQFOfb(#ltGosOI~s}69HU8N9k-;Wo2Jm-1q`vrI+f7*d(&=ySSnrM)#FEa1;$BC4yVw-axm)8z?dwc{Ww9gd)GyyZ@a}02ZQ$!}yXhRertAB9-F=N4^nSS%&Yc0!o|FcF&%$`~;N}Q{ zXqlLeJGmW!P;Zyp!atx~D?-_KJ4<%ZvUL>BV$OeaMwf_l%p$kR`gh+~b=aM|y!fm_ zS@eC4A#q;V1${8JyF>SfIG>_Wjg#hkb{c3(IG*0sO2@i8O&lZzwBl;QInM_cTO@@+ zz16DMyB{q70g+i0YSa_q?rWx)#W4yIh$rNBo16WIG7>#|kKQ7~Q4C91`C+U+yW)?MvFe%s#dwX!)UKei^w>d_Rj0n)0Vr}7aWDwhF9stm|c{1LNRpvAbz$e@B&j~9@=c)BEpb5^Qt6AShK8fUVH1VucaviHLFxM@*HJWqR|6&S^=JtzSB$<|yO!ZjTNh43kDt7;s&N_@(or?4k!;r*q-BhB$_+x+W`{e*( z8uIB4xX`Q)90o;ujpd-X&y*3hcue0a+w}GnlF{Xx2vh6d*1CUdOi|@DIEaKq1#aYk z6yLel#u97c6aJq60o%HCpe6y<4BLHby;jA30iVf?S<@XmmB{(<~eCO5Jvc7-JZ zdMDm2-Sls=6L$kqL^||c0P(cxS7;Or!mf#pcPdjBqF{qBmZy$1%XDn3y!|s|#eO>l4LTEAP02a!F94tPQHvDY`VQne!#Q`?*))4j{Ey%PYyO2+9PR) zN29Z%_~|!o>n*yo;yA(I2UBu;?wDlzJ{sAdly>bUPj{gW79#s@{z_;b+-hw>N^iCB zgvYR{iE-i93|F?wK#gwJIVYjoWg>Iv&U_?o;lZ8P{^-cH`m5ZMdw1VwuTV%a3r@4N z-jKO3$#mP-+ZBo*1{(`E~$L3Ezz8zltDu9Ez8CI_jY!@SqVXK~e zmCtsEO)(X^odIZ^$Ah;`TLhqJfRc&{O6+Hn$VTE6`zZiMoQTBx%~Aa1S?T>a5W`~w z?tpEs5>gDS;L9){1g_`~x2fAEO7u&x+tctx6V#MS{p*%wad#DxpjqEHKQip zORNyelTBta2_#fVE@EvE&Nmm6#Y4MMXvKDzgVI49EDuw`MTNA(_KA_uaz+Kmheg6{ zdB5*C88e1V_nyo#P=ooC(DGcxMi#)LZNcn`5DPH?E7z9>WQoB~6fBDc6s|-|_~0ZB z%NMA0h{1MGpAHc?!z&UC<&IwuExZbfg0&?yE-S>QY%*TT*n=Rq4gx)$|%MK5-Z zy9JJl!6^#%I#JbE5Hv-^a`=EL7o5g{pNQCG7GO#NKhYpZU5g-=uQA4sFp=~897Igv zNb9bj$G}lqa4In~&l7|gc6=`Q*d6MnqVKi=U0e_Y5mXuSCW-J@Dq*kA?buo$s>2TH zc@TnU7N=JR>q^VO>(Fa35$phB(AGXvJUQE-CAuDDzE@E~k=$T=5pdiV|4Cf_i4rpA z0qduP=s_k33H##J5grb!@Hz*a^3oHdu9L75lAJG%{N#n)RTJK-Rs=_d6CM*3FTQ?g&mmisdKy;4uo2Pyp=X?}or> zcN^4wxDnAZa+5-Eo*{3X2uI-})$y=Carp(qqAXFBT0fO=f2v5-Cl`p zYzW_(i-1O=KGE23+yJ<0l-Fzpgc_HKHpO0xs!sO{PN$nR5E-gK$Otf80Qeb4%&;&s z8az`dXU+l}0f3B>pA`C$1xG=yViyg{aGj7c&Y*CxE|wyv8PHEtP_(U*(2y#1vioQ! z>e6_xT0@gXm#<%IxZwYooFbP0y%*Uf22HHdk=$6QrX2=5qL*H9IGS^4 zA*QUp+N-Bp%!nK0g2B*$6;u9Q6vs``q|0KU-tj0S$X#=Z z76_?IsY02h#0Uwk%|y!TNLQPq?v+*U&oC6p11FLho6(>a*lMX-?xuN~xGAM&mV zbAl2N6Z5cdGP|F3s`!YJru;y0AmZEJ%pJ|}-dv@}sNK5O5h=~7Z``(zH3#0cj`${X z7~Cg}wWJjgaEy!zygf6@s;GJ0{@qr4;K8EfU_1|RnUZ!?)#nZKmV~XIpI8!>+H_^V zlE+3|Jm-8w>P{8d;SV_a$F>NUZ4B)U?t5xmhT>pN~T& zk;&YV_8fVw<+V6T*n<*fJ_A^jfVq=knaJ4V)j>XcR-=%KXQ zFrj*Z)Oio9L_Ek}XmL@GMVGn#@W=|*ZGv3|xuy%MH3$9_&>4@iHQlPMx+S$eOSbmO^{e#V z(YDaJt&!Ni4vF->sZFjpC&0QNwg~O9Im>02{N91D^z(myA;=NlvxSLUlFbfgY#wJI z*|Xi}l^+N0yqy(|DpWQza-|<1AA)MlsUw%~vYLG~(#}oaWKY8_w|UR0w#VRcq|@H3 zjN30J9xAt}+@=1$kEefk6=!%k78W5O2JCTw{51OnJmV#@ztOvwMwISrQzcBX^u=J!W8hQ1y+7?OAV$gvFv zXb4t#{rTrdcD6ivjz$*W?=wU4&QF{oU41=q0@?7a)MC%5d5rhIo9hW^PB~Jkx?tgZ zG)$;yVeXj=x5uIIR6dl7-J26zPY_H+TXKEkK6+?>KCXmr)K2zZUNRu%>v`x6SPZyp z>AS^W)FboVS9NH+_Q;l$Yfhc-c4&=tKfmwzGxX%Zb4c`~Cf`3;rjLVpfC1l15g3{P z5B){u2#B+{{Y%N6C0pWJ#A{o#+Mdc8K|4dkG^)+QFg`)`5OK8gS}Fz}s+B!}YnNrw z%9+TkFU3A~(+d$hZ>z5|da5|Tbe z{8i|WUF|aX4dyfwkdgS1sz=2dqe8|d=+WEHF=o@f&mYK&ZyR?ar!^S27#xb}tG_+dhF`WrXCD=TrdC-NUU-Dvu2UJ*r8+Uq#Z6bW>u z!n3c8SNMmpZgs_hD?r4>5lW+OGSC968g`9Vo1-G89CAHqRL7a#RdPO02Cm2etYJjY z_|7vpz?}K*%l;OhpNFvqr;=MO9A+|TNT=^f+^0_WY$8fy@4Ri9RepCfcMS(tm@)`? zs+0=yB>vD=UbfzG$L+U8{c@Y|##&PlwV7cJwRste=|vtkJ=M;Xff|kYB7ySkLP?YW zL69LkBPkecS~nsq@i3#M9twxFJz8nX>~qhHgGlTuXQSWz=z<5(u~Z|L5pMtetDG1m z^iGisCh_q)#D@3_wMKJV+kuKT$Eb{zgP z=KXrTp3ldlC|Ylq7VB8?RKJp&U(p5a)REHTGBN3Dw8o->kA2J)2Ms+H=ZntB>~My| z|E>R8|6+eg1X#c_jQ&6WhbChk!Tu+l2Bdxrc(?JdoK8W7!L`}}j=aNi^}%a@<#cfB zeX(^z$DH+Ug&h1#l3}TMxUYD|q1LbZ^T>J^egHmj{c53ZgP>D?(~&H%`z>#tO!pL! zAs+r>OaG$JquT>z$a}Vz$8_Oohh(KBYfr?Ne|>MUSK@rO;?TWM`0B$-k$llj`FGnM zNL;acJ2ITL%3iUNRdGi((1+36=-uYz&1ux}M5fLZ7&%UdZyi;P1b)dALg3B5OdZu! zva%5_-ekkBR+TBYqneiy)tNsO$N*!(ZfQ_S6VJE@w)EE(d5N}f76;8ZqP zd2izOZLziZ$k-)`IPT^6>8FlQiE{jk7xe0p6~o!#svI{Pq=gQz03C*1;mDL@ysqH< zoKT<-8;1@_vB&QY;RrbYZczG$g$DiTgYruLZs*VwI~WR0!}}!NfzKGHVx%rO8JgUw z1o>%?4O~@`>;b7n*ZA!UztwVob8-LQ+yTUY+aJ!a`)(9OcG`FK z8oBM{)y@n1`}fI~r{=Nx4E*lHP=1CXI>_*BQx3}bn;e@2wWg`Kf2 zEp#$N{5gP3(ZUFu4vVfm4r!H`_dd4TqWASdhS|k5DQhglug+`bIUYmqsb?$kwoGil zF*k+y5$)y2tSipzhFkUNBs&T*k8630V|9LU=vW?;_aDSi`NU0@J#uG z_clU_rTg=J;e#fg)@<+**>2&1?NYZNW0#>52ceUVmm3~XV;4KPOfL|6Nl zS_F40H6F?^*iRRs7-QiYdszTlHOOMzITQv5IP2WPj$gE(!3~cBus$Hy5VaGA6oIxc z!|dPf_R)QM$ZeOzt%*mEn8B!E@;FkBb?1$8oQ`#5J!yyh+EddAE3S+EV^)VX6HapE zf-~N1*(BzyCSCy3HZVbtpCb_qtmGoKX((6f#F4q+r|9=0jC}=lCz6cHQjgT!)q<5W zS@2PAcEmbj+M4$8lU<^nio@e($y4cO`K+gQFNdU*H6l=(J_eibVZp&jM~!hILi-FT zfj!p}kUkKzt@?`cp^k{=Py^b&&N>rChN*upceQzAf&=oZG^T?hZEHbxn8-=vQv=m9 zYW!L}wD$Q!X1kGL5t%(384YG^0B+IWj^X&*(yH1_qV7NixrjIlL7 zCb`y7Y zN0TBZ@#&&nLHMWKgXY?5jY5WfUnqOc$Iq=@RA6PhG(aB>znT$+1Rm1}n?Ir;WR&ZU zMBBn}A_T1P+8wzbe*RQN=sH7Eupc<2cXns!;oM7?nQBE_%~N3-VobIb+t9IYk6)hO zIak{%lObD3aW-q9j^CwRm{6mmJ`tIfAc^h^90|u`sI1i#CR^LQ4ZyO;lcm|*V{r&EkLMTm8cA-Evn;K=tI2pt3usNJNCcD9J11uY~04rKI6^9m!t-slV9OxD^(3W z<*jQapx`SUlwW(|+?9{vNe`Q7t#rj?X5>DGx80C#E4Cc6+Mcc_K}07kRX z*+}lBx+76QJqao8%|`*eK_}@KgrJBB7W~R|-ga@W_P8A9&rj^y%kvA!4I(EZFb%gC zv~Ppi!Zm@^6}qoK6L1(o22@@|+@KXkLRF#<;(EeTh};%>&2j1AqeM)jl$!K}=F{C- z_SzOa-@YcVZos)_udSQ*D;$j9=4x(E#~+gEg6Xpa#;Ue?n;9&P%aJI_N8+195!DI{ zPpCwi=9nJuY9Exd&u-o+NN-ePmIYk*IK47yyZP;w@ zghmEUFuP8evyJ1V0pTxnsCI>$L#waJt*~@EDPOE1KZR6bqjDZLF)lj1R+b36Q&*W6 z?qY&5H&f7~5DUWS0YYPd)qCuEFlQ&TL7f66V5W0u{NE z-z;}G+hixgqQXS^Xrrlt8xdmnU4g4sJT!TvNI5r=&hpE+;i>Cju=ff;gL0e z#kz^0kr{&tE6)0!`KVK5X1^*Rp`!ih74g5lu)tu+ZG z?$q3qeTGfS=x2?3pQr(sG}lG^R6P;ue>ZTAw6VuH_wufxH&(#?Z%6s^(3gtJ@l5#6 ziXuy$dgE6e&ha6?Y_{#;)4~)M&NmL zA2m1?+~|TKiZymO4Mg{^`DB$Y*twuUOgkVga2900s#KzLgM67_vg6a{2@YIt8cdd1 zh(D8|GXF^%jSu2h>fosY^-ZrUGag{oO3?vGLm5Y`imq>il**P!Q}p@-`^xn?R=hqT zrEL0YSpvRo5(B{U5<;Lr_F}Frms;hD+#;xkV4N8G1Ui`=)4R21-dW)A-n_9AjN9ON zRGze43k9Adhf#;g^=z(=HxSdk$pWe&E9L*{TA^3gM2DTqXjeI3&b(0TO3J57He~F@ zj|sV@zH%VFlHM}xlZgE@HsToGmyNLQyoYc{k|>BmtErJ|`;dR7nddx77ahC$GQ912 zODx=U=4CS8%6}w%q{EOch(Vk83@5r$XVz~f7SG(%?x`Tw^YE+F;)2R!&Val9Y&&95 zRl0%)A+p-!2Px7i#C*Fcz8OIqDPkAcj`vk#QTQ>^pw0A1tTb%?4CO=YZ7qOnHxWsB zfWt>DJoxitlN`(wZfRQmhd_>T&tZx>Qyzd~%MFqodt#m7UG07lcdMz`*QOQ8hEkBa zKq(@s58(!j?KkUnn66(cu$^hsaf6d>`0H);Q&{j*OFW6AfV{+%6Hpf8V9*T7m0EHj zW9qebwnD=>VwN~5K+l9O4dUkQGCtew(lITh!txd$U=f|MPO#pZNf6+Ub#`DACXFBO zlO7{|eVb0C_Lwm;!Ibwk8%>>FGy{SAPB6gS-`G52i) z?ig@kg!=9hP0<1qiMbMN_VRf0Nhh|>SY>NM#A>zCNM{dRL$|_M zgL)aUAKefHuuIPYWeqQ0-XIpPh0LsTeP?y1((?C#3-~g5{^W!5qR8$^mF`G1qSXqA zn4U*sl#!?dkdKNOsw{?VNDV`I`fWb)(~)TV{n1XeXZxd9jUepK5Yyz_oMKA;#*s>- z(5duTHY~3EI7eoV>L^GHB#Nb@jo83m1B#ralCc6OL)v{~{3|i~2ERQ+4#UMMZMMF1 z6SY1$eoxoyVGMimr;fdTXziv{6`(CHEH1M1rMW9f@M(I#EKJxn4X5R?$W++-`+f4Y zfxw3Da1>NqH=8t-tW#W<1{4?q)>Y8wyu2P$M=1x~6YZjK9F2ALBhamwdLLSk{^ln& zq*kyuF+rs zHxOxgv7r^)7I>J$Ba+Q>>RFCL(5dF zwD0S-iTr|$7EA_Lgt#GzLj|qA+kK|pUQNjPWi)o`sP5a7y?-6Mzys3ndiX0FwMQy} z5`l+C8I>)DdhRUBB&_~Vj;4G@h6g)6nafu*5-k)YP`4WggS`(#j4CyAr?5e`A@$`# zUSgk94*qL8l!Fz-Rm2=%vo_jJbQD%;MW9remD7#PAr3<%kn>|06+h<_H?2i!ZSXXAG}>j|LX^Q^{2kAIc$t!a#7FoZ*xp;)oc3Z@4EBj z`W_GbNN4gU^bfCCq^HErY{;&$lpX@y_Paty>Le{SljizfN1|-qcg>bF7EaqQ&u#so zs`rmy(7)+4Ww4b0*yLEz5cG5@g2sq|UiqKylsmb~|D$o0(+&}19I#|lU2;+cHIu7q zp7$RWLMRFLaMC?Z(fc#S{jUmP@hUy;^h1XosBNi4gNbd3G?TKwCps zwUqENnEK~Xh47yGrg-}E>wgh96RX^Fb-&{@Jlkl)XC>XsmvVv_M-{SkJkEFqSJR6} zw0r6xc+1;+hDARwfena}Bdb@ZKuNIptxh7TdvRVjTcTP|1fNJ*L44LhY|W6?^nNHs zSH|xmZhO#f-P0`5M%H3pK)TcBUH5-Isob9UPSkZsQp?{0!CN}RkGuYV1aJB0tLcB1 z=+s@yS&`-E)x-u-GBs~bNk-ZNmi_`XMDyxl#l7mNs7z z%CyZ(HK+r{7|wtLp3J)Iz@?AWf(y$|GUrNHm$N}U*_;QV%_I@RstY@<`sw;)@%aig z)DXLjl$&K($>qa~Y|zePpIX5xWqQ??XP+RrKfIQL9>HUVb06t4RM} z-{g=tQYufY{b@bA4Sqr8;vHo3ZXf%{7p|}DlYb3|uo6^phdNauZR07FL2it5|1YOS zr?OuNT-a^QM?<>NtB@h`oJ<6P-n%TH4+3K|!r-l!>8sC31bG*RNKY3F?f2HJlBi#n zB=MsN3hw^kdO))?!Yr1YlsInuh(Y)m4` z@U0+f#msMreL2B#bk^DG_w3+Rqr}#k$kZW!spcl%*7b)&R?`o~9!~02VT3L@P})Ru+30U7;)II3zPcAU8?HH`zIB8u zUJmX0t!k+cF2}2$*>aPdhBzxR!02|j{6)iE7tcGsr}w1dUF9ilrEsM`xyBEyO;`m2 zysjMd*}NXJb-MwK&|llwu;Wjp)OYRay3p*w-zm-c;ST}}Z$o1L-Q;-Av%gftEIN=j zBXwi{?mYg!+zjHGH|nL2YtvW!TF<}J{yU|)9&Q)}ugt|`jfN$^5^ruw;OlzU9ZC9~ zw)T1SC*oxHcKN~=l6 zB8L!SiBlGow%+b06&6`^RBgS95T#}v#KR+Y$2{j!?(WF7kT919&EtjBlZB78EXcg% z-TbExChZ_p@4d1@*Aw{V=ZL>Gn@6tcbQ9OBSZ^kk6qDm?CyVH=9ZPiRoN#LHD zbLBfP&eZ-rrMYUhuIBij*$Z{G7iTZlx6IC7YUuf`BL3%T>;JP`R`SMu3wM-v)i3l= z65lQKQnQT~`*h1HjQIIB&r9!K`hIlZCZpm_MF=z1Cg7S%oG+vQvi3P<)t@5Ar4whqf$zz`tj~`ZB{5lW2FDs`X;Fv0O^_bBH&=9J z;1In+)}Kv9mwvtk1x0;@VS>Sf&q?a)>*Ut%dzAy~NZKb0Korl>`(s!t|2hNi3w{$S z9rT{V&aKh?G$Lv9URSBomM}H#>v3Q56OiFUDNO-f71AX_DNUt5fs_rD(#+!{_1&P9 zrZ`=>pD$(E*MUzKXHcP(rmI`0+({^<=~;=}2&FV{h%;$WO4D`fmP%@U*4nAjs+h>e zqhER1x=Z7DNk*qWb;5&Nkh~uG;Czv^!Vvd`qc(oMBTPT#oDMl?LhhBE;l-3t{myT_ zN=4^vK3CS{opXE&Una_?)px*L?!HnYS-9Rj>>HmLqLO1JPK)bDp#3IwG#wgT(M^>q z14%t*RYjQ?Vq@f%=n3Uav%42ajRk4lQ=UfpRpYV|sU3?`^xAUMz#RarQp<-<1p6S1 z41nUYw^2X<$rP-qYd?`|z;0*C6Jvx8e{%_@ZN88x$-JvRoAg-Wg0q5a!!ZO2K-o_%6<8SgDlCGK z*um(BqG*VTVk%v?c5Qk9>q>GZQG|KlC}A2WK~Skg4Y2Re zkVBPs*&#`6-}Xra07X#ugAO)FkJfOSIAky4+lKREKgPHA=ys;bi$V z=Rea~Yj$?=$g|;RL zvV6ctNlhIv{UEeKX#Hb2vq~KgvR~Ey$UCAdxM8<0Ylj01jvhY0p5Q zK=be%Hp0f%QRAwM8*<_%9<3V++roBLlM}<7&625me`Z@ex;%r#fVWORjvJ6sDO{On zrhb_kZ+8Y{%SR~Rn}F)CL@$L;_Yw^{jT2Dnk*ef80*XsHVvoX{dXu!%y6eQ0i6WJQzGH2BvbRfSRtzAINi*JwH<9nK> zFOMW7<@p3(-Hch1+Af_Yh|+ax~g~(ppz*kaG1@h7U!gypBY2!FU zgr8Gh4xj+sLOnOi^i@P~hm=Bn7$HC~@?c3@3K z0L7~CA>Q$$ME>OWy9%D;Qd7&lq#NvCT)_d9cpFUX^%|n1*%ZRiiY^<-%-C_Bx9^C| zA}_&^eB4hV?p$xyWaIixquT2u<@RH;W7@Lfl#P8NiNoDWP_1*giJ^ZrJ30vP`kFl~ zX=Efg!XnDtAO{Is;GX=^P07@iOv4SaWNtH&jzR0Uo4ci&xaAmR=TCD7sxqO7Ds6{0 zgydZ!%H9@B_p;E^0;SdIKqJHKk0!vZ65FIt$e&UIrMx9fPYluHl&&{foR!;_Q(tEU z26#?{N)H|P#*0y69%^zbmjYn+3gt}$B2-IJCu!&y5)9rTC8nW*6qWX}u=;vRng9k0 zq?Qr)nppTj7pas!?jwvbXn~4t0G{%3>6w9C(_EIK$L%`YMzQz9fd~C6Sio^SGXOziA_AT9m&#DmY{aw(pPz}Trsbm~-;5~K$WK)KjHq~tzw2VL zR|G(K%G}q>-aw|}$64r4Y)nhZ7H1xCU5MW@4#d)qY0V&y=x@Cj02qq#UE(84&}tjR zeLw+aJbWv=*!r8%{|IfewHR2!02tm%4D@NhL-D?XDd1$SR|Y^R$td0Q4JJ*Ca8`u5 z1_BlUfy=fW2$e<>clbTVy@!M_{G0<=W2K|`ITjYlE+b+Klq}+(h7!_6$(*D1cy?G` zS}ALVi?n$6gDzNAAYcg)^1=NTZvd+&d&YUv1ES-V*NyV9xGxQbUwFJ7C1r|=FlB{E zz8F6!#Iwn;UMPJC1z08E#@RRJ$OhY}h68Vp9zcpuXGJxAe$PNR{KE~abiwKOKo}ZIDW4A&d^j(^sAl zV9%yyVLs9lda3v*Ec8J$YOmW5cenXalN_i=m@btaCq^bx2^k=iB`#hR;$zu6$n4{n zEYV0l`sf$@MqdXgbvm_z(n3qJPApz^qv|onAR+z8LfH|BV9Y}uN%$nADMX#l%KKm$ zBE@ARijJgX(kuYrYw)?i9Ane+bzwF8hp70J%5>B&%W3}%G>p@zn;&Cy{@HeIydw;) z>ZZK9DBHWwmE+FKN?v{FyWD;&+%kyEo6ao0=T~uq6z-OPdJ`?w>LOz4LanOdKdv|a z#dP&Ae&hxXURM8Y@cOT(2~rpbqk`Yr7xcHhUe13Rym~S4Fx9c?#@d137@EJ;;}lMi zr+~jNMgC*(f;1$}dq#|qD}(-Cb1j6E>N<`KLSA%U{7tld-io#MKV`v^gSbi%(e}CD zG`KSbAq_1>#GNXL!^=OyAPq?`@Ap#VdgBgWM7>s=SwlCJ*K^7}UdMk7I>Fa3KcxMx z$5mP+W&K`?+*98__j2IJYIK#v>hs@fu6@OZ&{E|3UyjVP-v3#T`}5}Sn(Mzsv~7DO zk1o;@r5!gGE5I>pt)8k&dksB83#=oFv@;A}e zraZh5Y)tZWVniuV6_Hi2$B!W{Q;C3^OE4(|eYfUlNzl#z(!3;_~Sp0 zHQO@priVS;ucK^PO2362cQ0uc9~M1uz}Wrpv4e+AD%4ghGl_ev%jxY`SRZVpu6WdVlFV)Ya6I3ufvc)8?n5PK&deEKv{NAA?d^|2yh&A0!fB zAF=9JG{;X?Hz{?v{L5PNMu)>;23IG1E}0Im>I1S9S;IMZ09eQDX~QiN{+97&-Gai< zD|QQ@EB&NYAxp-HmS8q~6Sw2ik!}wF8J!L)C-7VRaSkyt$xw%@)%Yh=6r8DD&qLz@1QKPt$p+N zn1g@Qg5uy}xr4XbUwbpJ%k8{%=Y(b28=v$GIjMD>KfuG)-~8Pyf<+k^F@ z8<7=^V_Zc4b>1tUSr{rBG`7_qNX+5^kM=pkbe~G>qEy#!R9NmcFmy~ixIusoF5g90 zB*G5irl9YaS~`VoaV%~egZsGxrP%NwFYss}g^Ix2sgiPbW}a5*+U4-^uFsoj@D%bJ zfI0(UaCe(9x7`dfw||Fiot>3CG`Etwj#qFnk8EZCVt1z%D|-zSyfEg!r*og`s{M+e z>Q2n=Zmqg^y)KaJIL4TDc-&xoVMTo1*3v%uoEF|SGvJbw*DU-aJ3k!iX5qnAtcu+a zL#FAuq#(q)f}$rETuL{GzSgVlQ|%-`Q)L&?5o1^qn8A!a_akrX)?%zS4q(fDU;!|L z@2KP^I)LG`&n3A4Ug>?XAq*OhYXC$N1{+9@M7~u7*&}i&7}71<7!U|wF+*Eqy@HU+ z&z`4Q@quI(M=yMM^P?MbZGRlw4a4!qwi=fKcek*CDpLN-b<#JVZjXees1=>Db|h>X z7q}$edHJef?WS0(o;9=M7-ZbVHVJLzNZ(gHlgvi+)O4#(sHMfPwe-8Wy>2gRg1vCu zvKa$(+Z8akc&H~fIovSvsQ!Y=Ti$RznAp8$4C4Guk6-;DL{JU1IPYg6Ehz6pA()uA zD~>K7%kozXSzi6^3_v^?iNx0?p&511%yrt4zLAD;=g|$(fBvn17GlZ*m74TK#V-GH`-t>)fLL`%N$yYg7Nw7pzRtAP#>xs8~P0`jFx^O50Dp=;P6c_ z0)-5amf8@o)xL$R8z`Tz6gJtprLlGToX(a)M$vu%U;{w47_TV~{=kde)|`~sYPZDRxTaYP`bB@!O|HV!ICb-;f2u*6#W5Yg6(2pc8hjT597*@%0?@pw>X)g&+|meH)@hH*3V zc`O)I@?#;fEci4P^Q&xStDG#NE-smuV}vH;6M@@8ZVKY)CQ_OdKOX9FDVG1$;1Qxi zP4iHpS-NdLUn8t7l?k0nvYZ`JrsYIw@zF{ykW3`FvU2mOKo<|6FT`ACaiWrZzI7)J z>acxiQG)8jCB3k+FPx}`9KWE>LXG&Sud0{nnzTW4o}XktOL)ZIQy2r{gd1zcx3LS`yWIa#Qb3lFlH2XJ@F zX=fxs8UFq`zse!C=4P=7DXZt7@zC2;k4*w|#bV@}^xPYL$0VX-E?M!HlpW_;*wC1? z-ts)7-aLJ=nl2a6C+0)Lb8}FBZ-b#{5a-6Cj71XsWm}2YQw)+OBc>J)5DT3~3!KaC zjLJAgZx7Fl0Q;pNF|Sawp%DFzfD*(c_J*Sl^1jKz&RUdXi7-oQi4>$d1#tt@S(k}` zW;y`zk*ngfxA3wtGe&2d@#IV(0>b&yHkAMv^^H+2ke(zHQ3K7JJXxq480xs*;_jBM zbs>$T<-M60#fRnEk;mx=PeGc{-X!u`b2GyK6N>fK1G%0H)_WC@#4Ek0qA3H(UJ`iNyd ziIGSUx5(cy3a$@&>*^neZ*%IQVh!Y0JzVBG-vktcdb&f zOyD9P|Lt4;iziZxP?J!EpAj6FX4NFqP@NElR26^>s$3RQ5^QtmXz<~qcJpk^Mdzri zO9Ts{OtO+}x$C}79Q>9q1oVZ(2L7s3$}|XA@i_rLLra{8i1GlY3r+YyBn&ZabWY$u ze%`GKNQkk}MR>u$I*N95hym^PPSb;A@T zr4O;GHw}KrH?X(O^UJ7mu-*oIF5CthqTfSfY%4rgj2};eL(?l(09&5@L-w2@yW#St zqy(+({oZo0z_0jT2_Ihu&EG_VdT!LDC2op$7J?|FMxrjW!hw@q62IXC?J%5=W;`#= z5N&G#uFMND>iXN?PuuB>fQkSj3bkIL6@GfdL2?V+Pmt9RNz_)hHAk#BB(_EvGA(%! zym*^Q2Q2#e%JLcH4M(yj%Lbv99%I{Dv51VjK`j4Xon8?=j~4GYO@}rxkLf>4pUxn9t?{tITii zy>DD~9_jN?y5;#?Agsk7r?suRY2%HFKbJV}!_v8rZEryUt`EX(kAgi$p9jxL|0%rb z-OnJG$lo6ExZN#2umqT$bR;9&Z)-fcP1xjhs<*vo(&r<`TVdRrF~Ot`yQ=mZQ`-G; zOE4d58+D;$zC=fP>@TPlYnOIf$Nf`YG?a#cg@UwzueD4ZD>>KQ)I6wD9}eHRy3#}P z#AC>5Lw|-xYGW7sADPHhsG7ou+5MNJ>3^HuwX$>mBhZkhYw7Ra-7DRC%RSj;-6_9x#0;*VRd*M4vW8M+S7|I0x|s zbhJbfV{Y3L<=q@ASuoFGrEP3YTEbWCAqsEXN(`pe`x}QG+!1fJO_LHuqS52FYRwQ=;&V_7KIH!D{ZJr~_Cac^q)Nlm_{+Q{F-ytH0ydd3 zGkD*@ZNEGHO9gh8q`ZG!Vt21>T<#I%o{utgLkxM4g?jwvrNb7P5^u)~_B8<+$~GIH zWvb*H7kR5^l@n1l3c3}beomxn6g;}_e{Q4se;#P~_Yqa9$8&Sme&_9@TCdve@>v%7 zqNe;8Vhl`|VLWxn+qr6%URiLlE)02nZRLg9E4{Oq>N`k(9ZlW$UTD$YBOhgyqxcVv z(|@u2`QQE{Di8~7f&X(qQpJBSOC>X?jWu`EDN?n|Wm;kPR{!0P*Tzqd(2vJ+qRIa+poDqK!DdRfj<@9VerA73z)#oayDmMOha z-lhc~zTE0UI=+4jm8Dk2IUUr!>vR{qRTLPy^-XYGX9m@oIN-3Q-h`JRi!%>fOV0XA zwU-3xLw+R3DWp>w&s=DG>YTx3_(kKjOz*PYR+9llCI|4Jm>*>x!hdQWCONKH`PlvR zN{G@OcKETM)P7&yhGL(c2mRd7MqhY+Y}@}DG%auK?Ek4VKEb9lz-Lb36Tzlmd%baz zL30_bXv5=fdJAY>`K(kzWD;SqGKM3`bB8^K+HJyi6H6)N+T%X-dvSdFUIugWYV0Fh z-mTB884=!PGNm2rcS%$8>SW3?DPi?$y8wIi2^XI2;L9PFvv|~%XXBO-e}b+n6|y0c zG#vNzHsZyxW1aUpbqGiG9pa?vS4bbb!KnUlW+EetaW+Z)rT<3yhWeke>+|4u zDIfO@rkrCwg0K8p$hOOvYWDgu<1cK}j5Dggi`5biKhrmJ$pfXPCFLC)6vJ!VlLDU3 zm{(Y)%OiK}KGa~EjGE!|CxjDL#I=VBWUZEVt>#B|!B#8_*F7kK$bU-f(;!DO?e=X> zLWY%t-FH?94fL+8Wwu{r+55XCPn$fw=ZAd@c$TfaHQ$)jmC77~cU;@%t!wzQW>K$b znef2qQUAs#7T(^kIn7V-?{cD>OInsN7tY9);?J#R$`&1Hf=3(HZ*&l#obUQws}A2L zfyeTWGq{MbA!RqRG6+kE(6=q}MOr$adt)t5-}aC#MeFNWySo88dnen zem7;s`ZRq5G7-{F!7KRa&B95=SY;&s!B>>KmA3SB9!N5{Dg}-p$SA%*mgX+?2s|gf zsN7>AEoaFXb>edQu7;h9X|`651h)Blm0pUJLUPHi^pS97eXanFZ-QsMv{1F^8%2vq`16hy$2Y{kbT~~U zMAy@a^G0y$yM=%XiAFV$(Uwhg4I{WBMRMOlx@clpjv8h{@l)F!bz)kEJak@%I4*YG z(ZuKtof^GCh_&xwIlx$_Mlwra2JWKuxgQTPbD2wOP7=WjQ>F45WWWtnp5E22+2-8lZ1X+`18b_%(BJPA(-re-SW(oZ z{pOE91EmSk=8i-kE@(eDj%jdd0UDsf+b|s;Jtmg>OynTESx8DsD6C+5zFZQGi0RLe zP(~rI5`$tH09*$W8_=G2NQAh0C(1;dQxMfd24go&EtWY?tv$uo9IQOB)Q2h>9~#k_ zv%KqGs~9jjlqcA&2up{=RRAmdK91d*h!J4VG-SZp!9?uz6E2DOW|ag1yVi^Xehn2^ z>)Zg^osj^A9s&>5(XgTlWX}lF7t6oEUgoOlJsy$@Xho=FMNu477$NBE8{`M!F2F5% z#`8#2^AguZ3OTbaNieB6HGLU)!z_#o>EsJXy6vUHfP}Nbgijh$COKeM@?LFK&N;KX z`80e`eV0ZJ5J5m0EG&h|c*?)0>c4MmbdPS^tYPu$!$4X8 zAMzCfC-fLDB)AvwBdITc>V;*~(*b zg{c*LcYmEfqKK9L=Q~1UtJ_xmqP~Jff&-q(#p=nsJU14(X{X6 zN~XP1^a(oMSy;Q({$D?+)9DiM=b^RC*AAPNNm?5o`BLv6=|wt@8f>6ak4j0cE`R7N zxTEczEXQ#5eb^o6j=d84m50c_0M~T+c|Kk%^D-Rq|GF4YgD!`m|K)NhkbzsXXnhUi z4zj8g_y2tBRrG01IHs$bQpen&x87f+roT@gJ^qmrt+4(M_yAJL65M`cl-Ui&Su z_#N3Ci9E$Yo&UPey?IqnruQ$D!gVpi_tM`Yn??`02z4?sNnU%?HCFLUT|IH6F5@eW_Xpbcprte1HSoQmIc&Aw^@Q0H2_(rUx z&YnQY=te=n`rntsGrKQ)AvO}oyNOS4jG6WD0HQXZA9^05@U9Ae8i zZ<^H|F5U9~ySLuIRgB+EDVNznJlFp9&797k!`g9c{-6*){c+3CMR87GubpDVLmW28 z(a8pOLX)f37kJ&rCoHmyH+ijC*h;&r6W)7Yn?Z(4?e#CD?^T!roscQ_{^*q$(Rc%P z#vGHY)Z5F0ddyOCEYi|^yO6@Q((OLC0XsDc`Z2ewpmhCS_kmQkQ;i^4=E8hzbl6vD za~3W)W@~a{sHj=~#;SOh3Fs^0RQ&v1Xki*)<29nfEv7NDn@f z*aZ+DsGR11zEAM;9G|Gx2BqF4Ja>FBewh9~rpxAn7xSgm(U)`RI<=1nJ;r6auh&KO z+Y_Iy^GSF%BW1rlY@_+^k@F<{f_#)tq+V=l*qStOm_Kq{SbaX16surFJ9Nt zp>PFC!wZDCuk)H+@{fK+Sl+zTj`w{0!}b35Mb44XN%n244#&lvg<*#UCbsw8q_kt< z7Y|Qu-7K$ws^6HQa9SkAxUl(dNEH!RUWibMaFdZ(>}b0$%ssI3gjUD#e+t%2UdRoF zz?!{oJ*FoY@*+K}RiE_rti7?2&&sQ&ymjlfdA3lH)L*UfrLTA0_k}{XObr#izRyv0 zk;e_m>%$F8>F%#Na#SXesIz;u>;CRzB}Rd$0Br@gog0G6mvx)gh1#R-oJz*w8uXKN zu#`e9?a)5uapleWpznKjTixtZUv#wj^TK>uOPu1Pz6JM%M;=r(@^{^;;bh^IUNq2w4;F~MG<8vOKNFO+_ zEWP#d&e?~fDO7mu*G&B{jalcBYv5aydxn#6{y9vL(97+ieV*_;4Ueytt=`?W)yn^> z&B}9zmp6H=OlQl!*A4yYsSaK08e3dKVPGX2Ac-9Rlmm8QyA5Ht&hs#2_7%)#3|#6I zZ&4+Xjb#A9Y7$`s&KDTs_Jn@IaSu|$uGuVI`X=Le&!{hh($(iY2yQ-!5|W{R60Jce^Q0Q0;m&oe0)CgYwssMmHA9l)N?7#I(tBUx;GVneX}?)~rNiDA-7Ayp*qsE1kV z*iMQSRrLetYTKwq2q1%)DNM5+7*^arTPi2E>VT)nL};xEtuuLJoG(3L+rl$gVUq7< zyPw2HHaW^;lG5!AeAyQLlX5U3(k@5DmT#{n!HL}_26j0~V{Cw1-m8vb9ZinwfX!0Z z!n;lKkc^e>1JM1W;&3*=bJPen01ex>u(#D`um&Ps&tCmp2$fD^Fl}9SRrM?o2qk~` z3IGNR1+q`bund-~t!Stqfj5~o5QzYC8XzD=GiE`Ol>Qb2;11fsU_Wlr;b%HjkX94w zd(;xHZjL@`{vn~~<}cTE8prd);hWo(dU|Z#hPXPMCRuTxSf7LNhG>~)W{Sx2P>)e{++DwbE) zco%d43Cn^J_Hhtu1FVoQ8bjV~s?}U}T1|7b7Zb*(#zU%93jA`xsJ5s+J@a1#Ik}=a&}20h(;88A?7p4ZQ4#TGP;9Mp(}UV%ol}DjH^Fn>b z<965xU4{{GFg+a1g(s+QPe2?3d=M!)Mq=$b1nU=KB|nZFNbo&y;RFEQOhz@~&w0E0 z1>y6?hzfPdG7=EW$D}>OB!Xy9X7DT@lZ_2-UCK|c!D`UN=c`%MVWL%0V%mZOWh@CO zL`=ZR3kkz(@Q#CIM8(gNauvDnL{x8!L`tR;^2Viq8De{k5l$SMSWS>~7<7g!iU@N9 zk0i#lXp{+pT?6%*Gt~Pg>BRK$^uRhZcjS>bUW%~nEj@Em3nv;Grd=&tHf>0%3-S=e zp zUKDopZ-F(3DtTo~XtBiFO@FHg*<3Do^gve}F?i;SbL-GbnDm^Z@ik_M(ATK`^E$?> z2dov}B&94rnW+R}ryG-(*XD&m^DFw0OM|Chc48qo-O^y?$z?R$zBg^jPkb z>VQY*B~I?Lk`^`?ez3W_vi;a@aWBr2ZrUz(LS)4^8IkUS&Qt%H(<+{}?tI9b-lK<8 zKdtyC4-n%!tt3zG`ahW9G!2_XiqgLyAx)kIhb|tVjk|4g} zuUY}ZwxH2ux!vWXDMWDlNd*IIV`-}Y+BZ4=SFHdTSo?qT_5#*}{NI&sr&wPR*l~^erJ2^cnk3FX zkzM??P3LvX<}6kRgwA(rUcNftrE}}U{3E?~orP}0XQ2x{ziS1kUq3AL(U5CD^BJ=H zKKI*cRDFKD$@t^v0jBNR#X-02`xc+L@2OgR>K*lQrB-0=m!ZJZ`@TF6x?J_;Md+=M zUtWf_ul+h4`E1|USJ4+DnXer`314mPj1#{5x^49!Hf-dlVp+qO%TXFna~b3uj;7tZ zwKN9#^}b=v&-(Q-Zv{TRkP2FHAjt!6j+$E1yKmuqY?6M32yO=yC+ z0!eG$E1RGu{ZE_=ZP$~kuv@cx1h%c!-|&M=&K|q@zlo&70In@+w)O@x$B0a_J8>ye*Y_IA!J{qe2#*2tneO ztU<$9_Mz6&!iAtV3y>0ZDC`7DnfQ=ugOhYuWyRw7EhYosB;%#(6+g1}Am5tw?L6GC z)ah+yo`PdJe&|oAUSmmZ&1C_G0ZLEu+YMx1@KX=>vFGho+=Sb?>t zeq4S|wIU+_9c$N3n9MXS7%&Xap_70kMqm?wO?YNBu^zS1@Jtl!8@zC-R`IJglupyn z3Mx1#SLhyZHbT$vYQU{>Ik_exKefq`x2RZ2iATMoyFIoBnf3)pll$M`#73)P4HD8c z&H;-iT3trOwEYAn&fPjdZQes0(?XnklZb5$GJO=cjdij))I}F+9NmN559CV|XW>OX zuhGyikdf~)4b{y}Ax_x%ZS_xYJisX;k{(y&ArLB*qK@{kp{kkMbq)NUp||4D%FL73 z$iqL--NZH~PfMqdlQc{OH@7$XXcZlu&3yAn^V5N1obj9vp`Q(cH8QtX0c-T`A z&-+KtI@j~8?&t6QbazOa6#S~24c$C|uqDUF(P=+Sl|I_Yo zg9(;6HZ*KxdD_qN*Um&}wd1$lf2U%gZW;@KqE};q!l+hL#Pw@9f)K2sOo)MsiRh+Pb7}CKC>6@% z#DeDU&}U7qn$^E*ni22@4Gs*-z-J$t{W);q$oH+b=V_}yv~xoM<~$oVMZf&}v!%Q# zgvpimArt{=>e(Z_^GdT<CsIP1Qp8cs->p_4f~$W1^)|RQIfpXaXn=sQT2DAp=9B6y;eR%u`oGo} zq^-24>Vzej&Lk~3QTLI+u5y#h8$jTWSZJyfWiCje2{+SdjLKp zxscr~qYV$K2|#tTUKG6Lp&Re=%Bz1p_1aIBMaw2ccTGbtk7;3a~An!kNgJ;mE^Tjz` zK-$8yXdklV=}nU!+i5Rc&82M^U5d%1S~Gdq+C&g97H4_Lb;}Ndq@Hy?=gmutp%o;fGnI)zMI-Qli9j!<4{r`Ez~Pc;IemxZEhO?E(F7}j8rt*Iny&UVy_XeY8x$=9Hmrf?!ThF5|@!>j=~PV#G03vOdsD)|z&39lNa zl-L0^;g8ltUJa0lV70H6Vs)5s$i#gOFvMqpp1evZ`b7rUEbFaV|5frT=cxqKlKxAG zJ&-z zF*>?&jRJ+nQ+(0ia9F&VRm}p^&NV467p;i%LG8rXhlLf9)b}VvfmPWH`qjf%eKDVc zhD}9AfpN~5wV=FcR>1%j+yqnfE6r1XqnzV{^+kL#B+*0tdJh{~k~Byu)Mp^Z@X3hW zfRyiHfS$86^u&6Np|lxnJC1P2Nsq_lStukjkq= z4)I(2bGB(NHOckrrQ9uPl1O=`(U&bq`ca%A7P8c;dFfMxM8bBnjVWsh?htvIVf@Ga zg0-UZHOaw6V@f~VFHh#`DyPq71u)0pZ#prWUq;iQBkG`K-!9n6YL|M|vo6=(Ig41b z!kVM%hAlPoyHT`JMd4C+(CgBU`vUlf!Gpp}i=;NaF`UGp6c@#%WO<2kis?37Kzu#% z82biGU`jF%Jjx7s8laer?V!Ex$3!&_kf2l7y49WPE*baSu(qWUD?>DQD;wU98X*B8 zX+99pk3j1l6|v~Ts~PsEm{^a9=5yg^nRmfxKD1LUINYjfvvxGF$>Un?)eLh3!R{TMeSp@mlqMkmt;1h!(JxKs}VLdBWDBQsp-Z9B|uT!)>^)P z&K}iP|6KGoZB6MMpIv)h3Bq=QijB+Yrko#d^`3F^0wVfAeG_x5dF*a!R6>-YhociK zRY`_*C7_H9+1QvI!dK?u(-Mi8u!M&=esUs$CC*m`p3Y)fE)B3?8BYb3{Z1^$!ItPk zd5-~SZf~MmsGqi(0-&4A_Npfi>-XJ#t+5clZrERGPA*x-s4gPLtR#z$kCn&iWyB08 zNcreJZgh@HI-0UR7Dq$s6kTQS1VI4X9C_;=_oqF3s0+wgj^L+EUL?fnMs6zfe)-zQ z98!Jlb5)q_h9$qrdp~}CAZfSsMG1dU(6pnxRUo0VEYmkN zsu*rSz8LURhh1{elOHi?r!73cV}Y?7tBbhbW{O;h3u)b1O& z&a+VaI1-=2VCVIbpdq(R54p1%(7PdG3%H&lNPGkwa>f8qGL$S6HMUhEa pk9WD zi&{20mL9wJ){$3+bYLP*>4;@}oh7HWnS_0bq{ zUgfu{#dA6^?zC$ngV%a6F(|FoJ&sSH%LW^H6zuY>%H8&{ z&11_KNeSy5$#_qAle_A?QlO2$u11ddbI#7(tbS>~Rq+an1=`INO zdkAVahYi!1e_X8Vqr*#^v$g+Ov$@tS&SSiwn*V>c1c@jn?tZYe~J# ztZN$#3+K9JdHyl{KCcTJxRjKRmL5b2HO~c^Ebipj(Gz2r1@x+&@p=z-Yks@+yV6yA zhqoRSnh};0baYu~`T>e?=T~+EAL5;kkqdc3qWcD2x3vCU1sU>;RU3D-R*>S;;1Dy} zRw43UG0{m?SSs+mED^tRo0HZbSy!=?6DUXD>EneCGs+?-3ZJ6&D0PIoxhsl9G=a2Y>tXWY%UxH;J6 zmTU42p=wrqvaof+dS$O##NcwxD~IhgUOKAZ8F_PuYt z@ptP)n#diM+}tZKOgZ||aqvv?H5=`jppFhsw(mC(LRmC*~MpIiGk%eZhnN<_rq>)WMoeW=awSu9xhNGF6-(_wK)r@w0# zLOP^u-`*DNx%O?c=yw%ls_yrxveWy&PnTc523FzT`t*IKx?T6jhx%vx ze|&8CT?P5|(~sZ7ub@@-zmauawN?0Y>D^A#gzsOEPr1exKdbznVDJDdSo4egimIw& z$S`6WBFe>J`10E-EBQQmY2gks4V}l9gL)uA-Uv-HVZYo}l~md!2@72wTdj@FR@!8D z#|C70_jK5ZsK`|Xi6==RHa!XuYDyJi9&^G4<#qSkubLV?daukL!lkZ zX2;g+aGjJ?Hys~?52@v`*s`S5Tw57oKl?Zw!0slX%$w~@XjpiXeWMk8WXqW#6Besd zFU#y21*@bUPLeMRm`JE41|L;d(W3)|J}IljO@m-JHYyZsdw3OY7jT7SB;8$&j9#kvz5eizP;h+n3(+xH$UTqq)Boc&98R&*{*yxwnRts z%jz-BDcZ#M9J3B106|Z9u@{id{bg0QkF0_ zDAQ17SD<@eKFkDqNYoLFbdL(LMFp;!Sjy3;o)g(|V>>fz@3!r9+1jf4t27204FEXR zTi8{OZ<<_PkEy4BSF;PM*(NXe@|KgGcKx2}@EWMi1e|3WzfWc!zI1H_UKD=7L2uLR zt49ysgN5tJ7?Aw07l^0$d+mRssZ(|t;0NLJN7T)0?aNY<+N`WT>VHqLf$pzOL&StX z+aRUU%f*K}11e(EZSIlp#Y1qNBa91IZ)vAW>)=wHct^z^2B5C0{(=#b^BPyLR)yiG{`xlNnoVFRK^F8OWDknc$7o-06YHvEZQ z8+nzz<~p{d0UJ7i2i2#kLLqNySzwqbd8PoKX2gi)vOd!kjNke|&M}grYD(^Gl}2 zXlwc|+eTdK1r20}Cp=ot-&-Y2k*SiOA7dtOSN80wG#pK3uWry?zBAz87yHGXp+?!N zia=si@;BKjH!~Uz-qi&o2Kyg2_~Ygmf2_OCIIqpGRO~AL$jcNq+w5A7zsjpVX+d$v zS3Nf{7rXBd#wMGKcAmoLVH_dtHN9NSwO-RBd*1Sp1{Q5P@??+}%uNvz z3)pJ`R_dx$rG$cEkZjCiB|)!GN##EVRWhbsJyg1w3Rr{GwWy2*y>Q8ilpgWpsN)ed_yoMvyJ3H z_xIlNr(l7GnNDWYMKmpYkIMC-)c3^PB3MhBl@Hv*@T`-0RihEIVPY`A(Wk6Ck!`vt zE>##REW-9yFECtTP?mTmn>u-|zt>ctL;Q3OWfU-5-DExPRQJBoN)%d=eyhz?d|hKN zPROluqRey{q1 zZi-q(VD*~{e3E$UW@t|~nY}m`*8EH(e_Il@j1-ncts;drL*MSW36R>0z&fh=VJ1(1 z8YF(G++qly{MNeARo*?u)&{Ns*aH=X4Qr?7gFF!P{-1R=)R501ikO~y2o1MjSn>rq zH9STeLcfiQne@4yxcR}1p(_hNkAm_f1Zn%vbPxF$^(nl3o5doHk)+s^sPYJ_RdZCF ztZ6lY38SVF1kMK46cY9c+?PN%yd%S05Ebp7>opZy?ZSKO5%k7d)}?0OCmJqJze}@~ zw*ZGMzOb~HeNjp?d`-`=A^MqBrt@&R^o(t-phq5nQ`sCAo@<2-Q?{aZa9FE8szNDY zE4GUqNB11)RCV&AdJ-UUkYoCh&wvnVAQ##fBapKuE6lOI2)uv7xM3}{}n&bbd}+i%&mn@z@}K6Z-u%-SLK3zc)|4Ce9C+neEr4JjyJ zmkmjlZ}jDYo3H18iflL|sYVu@lq}k7)81hN`2>2|1IJHh(@2LBG&uwNxMnbqc*=|T zv~9cqIh60~Un(5-HY_WHXx3qBg9EeMbp{9&Z%L2@AXRBc2J?{eD3UFWlee!jNet0} z%Z7qTZ)^Tz2`MJ9g=gjEJi>~p)_*We0AyyDYyl^l@Sg3Lbkjixvhh8Y`*#prK!4e< z(-Q}L{HDeAA+fwSiDsZK&Zo>#pvDhUI$J+Gky?VF(9(WW`+{EFqrCIpf}rUROFdIlsVVYk&u>(h+|M*mXh&OMRiCNclU0b&HDQ ziXoKdtv7D4L#f4L*Aj$O%*7bGfW09ULw(v=Qg)JDE==|>roiI|2avox0^8=e(*+aOQJyt4FoeJSonNk(+& z92?(VeE7xP7Slp&bbOJoV!;yB zy@9;0Y+CoObe7eg=}nStSKagyn!3uCp6ou>6LVL#YayrHYIjF#SJ#&CZmZ=uyrsrN zK8arMNVqq0iK=Gis$a1~eRJ?d(xk?@U72OtJ=0F_%0!rR_2stb^36_sA1uGtwe=rQ zMQ}hLNP_14x%YOHR^Yn-{Gt69N1s3|%)dDL{Ka^pB_?E5)BbX5*H?50IQ@0>(HHgq zn^$24t_$wH!d%DSjHj@in;DL`e#3RaPR8F(?ca>2Klk1hufku9r?-y^3_||!Du9e9 zrOkKhr&qiRAov<&Jgs;Y{%~qXfQ+Z#UWFC#bz{Ta`2WFpI&O=7{q5kryHT8K@S*wc zv~l`wvJW$4bHjZwl_wZ?&Sr+0AN{tpAjU}NJoqI9ME;{r#$N+F{^nKiv&49Vd((f- ztKg~OpZ8=}b=jU@ou5@t;H{?%wr$(u;d?48oE9H!eb4aXsnX)lZ)t?|CCG94IHp__ z^61EJdyurBLq-`+z|TN?f=B(H|7^K%0eNv82U!HKkHzNJ)4+w8Ukh*1P=r9UI1Un4 zK}Q80dSh#e>KZzuj@XN>h*NQEp&L^+SeFo`g#u@4=8aO0*_tXcvrJgD9a|!tDy*b8 zqus9VY)~+iG@T;m>s-QL)ZpWVOcg#g7Eqr-_9!sA9Db=)^F`x!AK_ol!i9<29{xrC zbZr?ylRGWs8+6}Mm)6%|6ap&P+r_`gGX+OHel*?1%-!MZeWDu3*Xt`c&fdsE!<_>@ zZ&97K5TR~(b^*0HUe`{RtUA;BX~&hJHt4rCdDMLb!!ExeIX7PWGo|=S+Xz*HL>IK5 z%p_ZC)b9-l^iJAL>Cucl=<6)T2pOCWT3>5BzZ@`Q(L#7zO?xlyxS!raU{pTp__BsW zMpZFuY>eW#PlmQ5ZGuHETyxOltFIM4fB!nx$8Ne`{-cg&Yg{-j=q%Kpb;&N}>*>8B zp`T7O+oH2YDR2IV_g;p@sw9(5zx?dwictkWoV!mQfAG`AZ^P$u=d*7X-`<>kCN}fk z-KXLF18O+;B#I=M<7c#1Sq_~l5NB4B3M^MawNBh9H{VUgi#tNcqVSuS zt^LJZc8cUkmDecRvRY8>sC&)vKd^z+>}pcjomPy}W0%Yo%Z{;f3n-OR z3HKF_sd*UTihb`aUO&F{NO3{W-*vr>aS>xWKDoFL-%h9h~MG8(KACw3UpjCgD2v?p;!|V=s%vyU1Zdfu_MJ7K9u9x zqXOUG1)s`2xaG574p=rl#KtIFxR1X%8{aI{y3h+gl~DFY? zV4#>T<`3Gsj`~bJrM0S|MpID5C@|ELx0vU>c0FM{%kXK?|DvtyzsMo?#AV(89_VJn z6A8I$8@=yFa80|2bdy6tGZKdoQhIG+Mn-Ri=_y@qwJ3Nae z(W_y$5CUP2UPWZ^N(L=eKej1Xg)yN7Tv!3!6Ni3!6Lw=_U6%7W}uyv{q#Kx`N4dlKR8;Fdhi`c`3>gxn5j zNcU>wd4pp$!D4ZtoGEzG>+?^qiBy@=>rNk9z`xE_f_S&M2%0+O^N-zDmwpwtO&5O8 zV3zj&lE{8bikzXOIkG1rQ}F6rXeD)G+3Fsx5=C&{!0I1B^wb+!aA1J#&X0@`-bSC= z?-*hZjqmb__Nz3R%KQP%eM2YC z?YSf2wpOC~C!uEVWOVIay_@&(u|t1mSrRm8$B-ot;jx^C!6C7d36|ySld4tZrwVUogFG5|Vf0EoO4};Hy_!ivQIpixsxT`q-E#>=N&qn2Zm4cs{6)*Hm8zQ*y{%xfnn~2 z1N;G~;pfYzMfVzvHQjP)#&9MqwxMMO#8jhhpEW9Fu?VX%o0TKyd&+&Oy+Nxmy z8$wIM!s#jZe@R!L6n$?n<^l%@bftbfn(kCQI~FwhS-JyS@98R z5wTj_Z@8D{DRo9hX@ZnWsf9?|j&POweBYP`x+g{Q$x4fyc14-$gaE#k(&vJ8h+2Jc z?(-#l3xdf{&aq>-Fr`@<(jpH1ig!N=+QUf=Ne!3W0_Vwm32IX)@sngQNW^~4=G_l- z0oovWRR|@;JOyNV4_mdhW3{1cCZFiIuTxxLJ)|ncC&n0-`c1i~!m#0z?hIdvAvpv@ zBwR8fdrT=?lV&&;4u`Uug-Rh)mga_YnNh>VE#xmD>6?onguVc!iR?BBabVmGpkWsq z;NWQ}eH+g*1{0KIt_fio4JLL}Ri_reWafD3uqc}^*=p}s#xPu;ci}*I_ETKE*&tc* z0f4xJL&N~y{g*@*UoW4`AbBHv(~<^NkLv6 z5!G&hn1CsqP##$^_ganryZGtYZU`i#z*bk+funBw4I*qvG*_sBX$!5M%#iq+FZ3mk z@Drt=oP(x=xo=!3zCHUy9-6-(?eV@fF4|^uv$j9MYWFx7(Ei4%uju&%1)7y^vV6^W zYo}cR-jfy~*q}XmH0TIV1+rHgqm5bTT2vAz^gqgp9h((Iu z2CnS@Gtz#NG~RIOrK(h&ppC&W{!zKU=p$qSF54{YtGq~cq^YjH8v{PlJ=Ty+eI1@? zG}JQc%rkg$Wyk3&U(d+RQlQ=@)xcJeK=47PW9^u}WGL5)2;XZ>9n;S`JW-;?)%#-B z_~iO%TdDNZ!bB<&j0%OWx@*nip^r7i7DYjzn2h_zwLT31=k)T)62As(1fNG4PQ@qhHs;^UJpc@(kuagZt_ETMFg(0)mG1zf`b_A zAvVCbR~(Mqu;5G)z%@AG1BBzvPten4d)~^b3^8mG$`(5gt>5uvokM`3r|Wt|kioe` ze6Y2p>IAT8?X2xb*j~4}EG)kBRQztb6VXoUv#Qi=y%E6*=uJ-Cb}Rn*pzO=R&2J(T z83l>9Q*w`6lQtJ5cH}Z$DKdPx@jqR9A$ou$_yeFoqtnk)8a+`YUIej**dXw2P8PCh ze`J$|3BKNT3I3O}l?#08FNJw>v$3x7!Y%@=HfL^!!uRc__-YlDR0Tc2FtI#jb={@@aC z^h4`yIV}pX_m;m;)^Xtu@0ZR`yc#fCq_7bZ^dSnRyTzS8+1iJ)U%RE_YE1lV)KLwo z!$-ziQfm%sB<@YgmYTX9DcZd8M?C1HjJ#Z_DChMtG=4Rf_2OMQZ}3j_t^w&d=B~MI&A3>#D`rwWr)Z0QJ)~tiA7M4abkR-Ty$IuRx z^@dT4+%u_u@gy6jG9*`Y5TgS}*~K$qD%AlIaNgtIMBMxmIrF1peverxgX}De*>r;j zS*sEw1HG>3&Pxf+6+w}fv?f?U9~A}wZPwubEQ}kKLt+T@PPITLmM)X9Jcdz8o0{vB zNF1&%RdR--ItXdrTQtGaW@rb|m}S?b5Exn9Nj$T;QdBZmmULZzw|<`@5XNVF zS@wjdD@`8N>%dz;^ODaU+)3tjOO+`&NU+oaW%UF?SbH5*&jJCd z3~NUdPiLa+oQPE&FGVwW#^Zr3lULtt4XfvJK#&Ba#SW>m(%BKohVGL?XqLp^K8|Hc zeWYZ*%c8GhC_ z@I|c-l7ghP^mxHjVG#DYFfi4vT<3(1NO9-@C_=)(m zDez!tYk<;C6pNhHaQd*fivKJV0UZSSCIuQvTwV&rx0rXlpclCOT--`V zAg+p5g+5>*H#Em*?*2FJE8 zZ%5P52B|g0Itu+*1~r3ww>nz_>0{Tv?wXdc6|Hbk{}!!XL1q#_XZb5tZfaMRj!tqs zKPj@?5rtIY+wcy~l3gf&LpWG(ZF#$?R-9zDMeZ^w#G~B; z4zz162S8ZSwx)%}o3K7}Vh~Nh6im(9@W$bY5uTZ5{Ka)?TZz2sAV4mO4bN-KtKtQU zl+fAyd=02ThZ%;YOEQRl2XnVf5uS3`QtGT4b^}a)O>{Mxm7o-*$3wdp+Aah(`OJ|X z67TaEXj7*&QNY@~6^Dry5@C$gL#HSkN$k0`IZEsTsHk&T^D(rIVik%fJ5`K^M*4we zmCe5l!?y}@OO!G)Y%HEwvSU$W0V4W+Xt3~6gW0?vUXVcDKbrKO-bwARZdW6ALOQSc zDNb-zgV{iii=3~nhxQdItHW8UFQ7{JkX8$oFl0jdxcr3sUJ15x!=6o%82HR}no~7< zR;e9wb85TU3tWz0D?129gKHy7kX!3V)7bhbgcG0br|K)GJqhSwhplBQqATu_ANt$P zu{9__%xs#ip~~XG=PI%SN+~IU$PtUG25yb+3QK7+=(lNT@xONFkzj)E%rS}%6bv44 z2hGkk3|}F;eGp@*JY^qIiKyqhh1dYBp5403ZWQg{RsZU znWwl85ve{Yi23Qs4)j%ru+M-6WL+y2y*~3?^UR*QW%6_AxBLr6TE#)eKUQm#km;}H zb|l$(JwIUj=+=X^6>VIFrxs`o={>I3L3{3b`G%aj(Lu!Q{W}}j%DP_SWSJ{h!Dnwj z3Qi{o1Tx-+X<Lh2>_D1Nbi+OKHl;2=t9CY7hk60_aRG8fL?_IbYrcV01xWxp*0RU?MuZIRuaH4j^ zx3vCKDdj))FFZP}2YS-v=TLv-2ygd~HA|`+2>vRi{AI&D=UzqCI#?F;4z_Z)W-PtP9l(h+vBj-45Ka~I4J_IXDLIxQSN!O@5KZ+C73 zh$g*M{ul;2T` z;yUjLNA4YVe(>wz)W*yq<2z$*>BniosppuhuU5Z_N1Z})EjL=99!bR7XpScRZ{`RY z#J~|(Z`v8-6V}o{$GM60?^;-HRBzcHI#Gw0&lPGK50u_fJ3Vy~F%8*yXu}QLCZh2u zi$lsMRw!Cp1}6kNCP}Ln(n=nG9eio=dFl}8LcWB@P?0^i);K*JbyJ|1VUw<~lhB5J z7Ab3g(G+7#vabFYzRMg{$p@)L2o z2oUpLLDW}-MgCYD`H6NLYxrqX<0&=f2I%VN&uK5eoRu;Wb}UF97RnXj%|-5cPTVRq z=|01>raf%B3r~KQ2Ln3}bn=LUH2!hu$r=5~HMYJxx`btowT_c-J+yNBMR)BG!F-?^ z*fHbL)BUKe1kC}BU8JYQ%sq>~hzSLOe4-79{%=uW+*#i=ZvD?CarM4DY}$p~#Zx?y63m6TjSl*!ei3+_+zHvmJjYNsFIMo^gYj_cRUym- z%h2AwSX7rOqb2gZd8%y5r;}rD-(|l`cK4R93ugXO8BEIg$#>f&ij@aFD(x?M@bMQn z2!n!ikG1WcscM+4oVnKc<^9Zco{08`>QcYCW_6_HH#LPjA2x0axhv>J&o=7a zJIlHEdGgBaee~$v=ZBY-rmycNFtK`7U837zl}M!4%NEhQZ4iWZ2U*g8euFo;Rq{&Y zrn9+R$S}Dbw_Rjg+e?qGc-Rqt>G=+|vyJ4gFEXK=UeV2sEEpo<7Eht1i`axT z6boEn!@@sc5tZIjP5Jq}E&Ma9i*B=&hKK{*3b8(lO~gf()E8t>sl=%l9%7L%+qYl& z=<#BoYtmxZ%i^Qh7X)wN<1XsrF!%FglP}LByTViikGzFUPOrY0T{5_A#f|WY?aP)q zEJ0M9{R`u^Xn4>%wsw949yiu5wtc zgegnxy#DCAJbL_q_)#!LG)Em}VP{Fru@Y%P^AC3su(T$#?8slva35wu2mbUqJ(NnC zxQZPPah;V$wgwKiLVddX8?XtqP4KW0l*6Xc^~3_$^2q7rr`CZE#z9HylfHWBtMTGK z1tZfsDD--9GI60gb-p0AG{){@n8cX^>_~qafGUZ_@F1qcV839{SWs~c@?EVAggF?X zILm5R5MslUBI~UbKbEI{al&SUl37d|K+^?TNAM}Sk~58me_DeH2UR)S%r>*9xtJpp zP{<>^*I$?yLK;v1wK66$Um`l6+iuFA!-OVKkeq>`B#$uU(K?($|l?sOdUT`!diq(!s3B)2PU|Nl7OWzol zfT~wf@^u`y*jNqFh~scTE}6Q2gyq**ibBbWC<|-YeiSb1qo5Rpl(kc^E>+ZsmgAMA zvb=v;5pRdFV8>QCCu0YE@0RUT{{o?N{0Y<1ZBwD~LTe;=MG#QpVf%p`%{#Rjv7EQi zS{!ESt)-H++-?ZGm|+;ZI|jGZtZ75fiXs*R?P)e}G(Rn>Xar`e0|uOx*%k-u&aSsa zQdYOsCEKkl4hcf4xGiLE%mCRXtS4&tumo_mR_Y_!m=)@G_dS7OF%$?C$dF4=Q31et z|B-*Nd1#;kafp;MLgXjM)cZqb_6|=I=GWVHSP@&XskUN#@ldyZoVvA^nrX|H(`fu9 zjWxj(kw4}i0 ztf}AS*tRX!LXC9ulz8b1+%@ZwN%R7Sy<5986F!j75~hf5z_@u<365Smny_12c z>pkV?7{q=-2)^ZrH8Js~lUu}rk;=PdshdN0CE0@KhSW8(dqUdC2Vss|s1BO4`er8vy;exzezXw7RuA43 z*_X*`5V7Ve-cj2%!n%#oVPWrzAEoWPg%=ISlALb`aFnph%~)RGXxR3f*&X%QEsX@3 z{iZXTpL@4_q$Fk2wUTV%ytAeRN!_ z#a;onGqd!K17J;Li5QT9ed|c$A_|xP`h1MwDHAVs9 zuUDm)7EOfhipReZsgf|UOpw|K0 z9AV+*T@o*(w9PMFUUg*He*+>^>A5egA(?V5vV3~0;W2fCWodh?5MWuN!z^Sc%Ok*6 zGI~MsS?mj}t!)DYEetR)4YC%Cv``h7I#D3EdT(d?rmPaeWxI!KJm)gpyGvwlMQdsv z#MwA5`)6X+ZRBoT%Q9L|RB%jU8%+kK34@APStUs_uZDs!!+p16`Z zLDw_sS}K@7Q6AT&=A~66?p>Qdl96_!?4g5?O33Ne;WV=BdPmP*br^vQ=(?l>=s@i= z%A>SKPj#U^e)mI zpWS$T96JKbceh$I;{ia291B&{f}NGS3IiAcC8POIiMqu|40A*z0W59~g`gNH2&6sQ zM#fMddCbNay;APAqZwkp34$%0vUJI`2DV&nfk1-brcS=j>#yFZ`uM(CO>-0;^$Nh+ z(%@=IBi9s2mRPV$`2G7BxI@qw=A*-mU!DDv#~gCvNcUTMin3(na6Z(D*>3uJlzm88 zA0PpjYcp#FZ@Dc0%d9k{$zVTQi!uV6jz*fiR?aaY8Y{2X1T-i7hT%mlZu`(j)Az;c z=MqYk-h&{PFvpZ=E}1xMmx`4Uz%!m$NgHO-WWo4wINvG}$6tHk=-rOTc{YAM?z`_0 zt5af7B|sF2=wo}qqs3LA&w6IfGFU9X`j#ux4b}jb#P%Zrw^w=5%?jiBNzq$T(*^AY z*nVwMS!KxWAlvKv>FEH4rr54YlbwXT(dur&84~xHdeW_+-TlYqbmG)teV zIlgru>;K~J&Eui`_y7NEHnSMl%-G2?_O%+ajHS%j$ug2cDoGM8Bg&F$1|f+dB5h-- zkZOvMwy~5YGPJ2wV@V`6woBzUO>zzvXZK!MI-6>+yU%@Ar$V z9v5T6Ay>7qW^a)FIVfIbFN?)*cjN+rY8Pw{KzcRW4)LA|zzL?pl$4TQ2B!jtsPBvE zX#XcLlw!+qEz$3{&2$NP12Nki8L0~Gbfa^vLX{yA@-#F<&-<2*$@|(F`1)O*kMEYM z5z~gOz9~B|;g%>Yy|Xnx?1Y%jT78i9WQ^}@!Rx_Mvzvgr%fD6kj- z9LuehC*vecoNtZ(wq2*4@@BScXbBG-CA-*+L^TCG3w%;)Aq6kl^tS4NuvbK-!>A(A z9Xb9amdLs}f9q=Wn{TS=yvfS7=xuH`DeyWn3QlAu$B~nOE(|*O^p9*+Xhj9)Uz;`1 zlZs5Fy&MdTvvwr`v8ph%q&T+4_8JX{z7W_q1|A(4CJBw6O=G9(56J|ZWG|uaKz*X# zSDs-akj5vU=B3vmcsW^1s)|+Xs5@;mJ#Drv=F86#TVxJsWb3VtQ%fF&?@|?|cqnWl zPQ~3XIT`t?LPm&5D0Hw%Ie&bwLO2OMb%&*u%2w~7NnKD!|B+Wmshw`G};{?xw&v>O#ham=deWdJUT$(WZ zYItl_<8%=Q&>S4d+FOHA8x^vM*EW5N?dyRxa}%|U33I*uf&3*_jWYErKjf-_0KNJ* zXzMlI2W{B_l=Y3`H))bEcl-TiUINsTY7pJoaNS{TaMk$b7vJXIeXh&m98#`Y z{2g)5w97^Ld|BYwssj}@?&vReHux8rHe3qKbsgyX%QsRzN-Bp zU@>ZSh1pWidTUvTWGF~gzUKoI`GB*j5%t#AsPJ>XOS)20wzMtUum>4S!6XQgMIz)! zdl66F)#Cj584~62AFggon zVA3n&7FE@6c`kq?#svCf{0;zc5p9NX(bFQ7kcwX~l9sZ-^nHe5Dfm$#00?knWX}da zBq+cu`0h;tAlI|xL#Lxp{1;XAfwDxYB0(^;R7VwJ%4j&qhYaV3_c5_78l-ClHth|5 zmLxSO2&*F#ocOT=nwZ8|#78p7C1dQhIS98!sIP<`0OtY_yjh4^@g95^nZW%*h-68P zlL=$o0I?_{iM2z5BsC$->0{#U%_6~bUh75p_xyx2af^S<=H4M4!vq|25@4p8h(%Aq zA_m|DN#a<*mUoyh6407Fll0q2*1Th0*YeG7gf{?_AcmDOS4+_I0u59?IY;%;j%gY! zKpS0XxPCc+#m{tt6k-(29xNdy4S}L8z#qL?Qp21aNUgWqfng&d>BI@%0^xgT*gJk9 z$zK_mz3Rj9tG$4hF46A-52L zLL!NY2%zOX*DQRar1B&spIz;)UQTS+re1x->UKPUdoL|+SE+yGJ)@n3@gWY@B)|U3 zy#lkT3=P3i?b=FUgz{qTqUARJB~vCP`WhJTpb_ zIFF8G<$KexM`;1FGzsL8pEnC*Q)HH7SaD)<&2Jkz2Ef<@NxhB|A_0o#6z;4`7!2N6 zE0Xq>-|$IbLLvs$J^^!LvC{&7St2-0A=nyTsQ;$?>GS$QZi2yiw4}&80SBqiw|G5Y zYqS=)L?kRvh$ImsByzy4Xv7$8pScLEqyfvGU?zhJ9OcW^ldz3U$!Q|wi9=qIfwjEA z*HQ3wOoX$%IfjXzFWtmLj2fEmxgDJ~Y^ z;)WUZ6VOM!35Mj7^Ig7;t5lVhz&e%wwtqtbVjwqw z$A3B1`l}n@FEwhd19N=NupOxJ_n5Jf2Y8e!K8Qv<*9B$re_W?^oc#-R{)!okSa}-z zD&QYCP9ABtxQlx4o{LzZW7b^`s+Xic|6QXt#VS6QtpBxf;^3k8P@}ZU^{Ynx zz`{9w+4_+MtN59W>5WHw7B)^)d-eWj8z+Bvoqne7edpHK>Cs0}je3EODH?ik*8A=P zX<>SNp+*fko&U>?lfTR4{ihTEpkt!%BoAiXym5c5sfwXWwkDckWM(J7Jn%`FOv9|@mzuW*j(~h}qId3JK9Z@?r za+2GKcz0^qq~p8OaU&J)&Ln<*eo7Z{Lnr-g%8C?Par%MbH6@uVQbz3l_HX?!Ml%bk z3K5R|m8vK(6zBf?(agN-0HQHR|L;Ere}$y}8qNIiG5G&ts#3cwH2u~=avCdDAnSS!!h~697_qODQ~p8rmTna(|KRdkSFBoOf$+9Dv91* zgUkxOJ@3$DXCse2q00?e=1jjj@5|2H(-T587pr{DPAd1D&c>NA+ZBw9jU9C(_vp`* z73~~j5KRN;6i0SclO5izb2KfpkL;XBjL?GvC*Q5xrujI02i-WL03+8P{sNU7_Dvc+ zxs9rO0UcjC76{iZ9NMPBq>hv`&s7>;hqnXk37e6+9s2%@!Q*__x(*kYo z)9ty1(aikk@lSUFw6TbfRM;+R#2hQ+MPFHh52{#XZG5Vd=w2m{CT{dIv0t>VDt|wU z9K5DkE%x4EGRBE>b?wsQL2@WPg)%QXH8cN>-MH*9_kn*;wLnHw9Npx%*l4~RCpPot zKeY8Wzh~E%d7ZC}f0dMl@UWDUYZuEhB-k?O|t%<25vmtwE*0>BK|r5WkD{F z=K^R~K3rME!gvI)ojT7y8_JK8%EO{16zB!q33aVXp~q+{$10qon>>d`rE6p#EH70< z1$aZ(lE&lZ$?V0a+D(2&RmaGd?UL|Whxh5tOvX$!Qc~pdn1{C=o{;I4tjTp3WU3t{arz z_Q2yQ@{@n%>eDo$OT9K)lX=)DXev*g^VVLzLVO?MAIg4bBH=4&-up(Pk5^ci)29qp zt{L&Q|CsVlXHk;dn)h8NXM+clDJP)bVUxT}C?|zUNiRAPD%%6u(=Qun9HVNKRu60G z7qO+>9m&Pt`rn~@O|px0%*ry4gxi_~2=-Ot%6*5mmIMUXY1|cklJ4VvvDa zo)v|6apY@b$Sj1)+O^BYR5*R#uu*7hC6SNYdve#?%C+y5*1X;@bNq8~gmq3niWZD# zwpOvp`Y^&e08X-}@lDS3pgcPpS4{s_MD*&0f;*_XN+U$J78y>A=4hi;n3-W5nCu%l z4W%}o#C%pXSVT+FHeeuWDvhM0<#gmu%B%Mq)?)5IIjMFA5E>7^L$DNERQu#LR%j+k zdjTmj7%ncxc0l^!Q5p$J&xp;mC#ILP5lEgyoX4ixf=*GYloc*cz|DlN^%W5?KSsbW5pn49>z_WqVk?pY`@OsvL5Dot4+RVn!qfpi*rA+70b^BR^Na|rtziJZV4F-t~39b)u*xC<2SJe@4z66ktS@ z#JgMF9ek0@H5;ddiGnXkkO%)1ykE>sN}}w?L+%Cum$3)`oaw8}mg)kcM$!(vZ#@7A z52$m1Sb(s72$-ppxFV+^NkNN*{QVOy#NbTJh9M(lX7&PR~R*3o_ zmM83>>|gKC_H=|9QZbJ#HoBz)l(vLr{>hg5Nhe*xp5?M8dc2Nf2(d&Ud~?E5#pF*c zOqIUbckQ$Vh8_j|NMcP%EE+B%%;ZafknE5V-V(b^g#qM|36{iAnYlzJ^^jlOe#82V zmwCy@6k%@ZbR=ihwL?HOiv?rGBr*4m@nJhLX?$0ScvnDSZ(5JuIs|*|>q7Nu+4PWf z;4aTkf}d38>X29mf*e3nfD#ELu>3<@+F=vFqi~M8!(2v{!jbB5k0P|2cH!dZTy>v4 zL7$lTNG95n2P{bk*2uH^IA8!zVuC4o#lmTjzx93|>bd+ce4khdG*bx49Mo-z%r0W; z`4s2}AmlNSWx)i7B4JfM{E43FYD{h{1F>!fHo@b(Py~hN|1g$wyiE>Eon4a!T@*(Z z527ZKQ%uPn6m)W3Kpjsafr+r>=WqCWe0=0!^;^hLxL_i0=7CfBM0E-}l#w1w$v>ID zdyN}gRZGLitpMT`>bR}fKLM`i=PprVmbx>) z5*uk}qU%87`J=vDiM0S>d@4itO10(kLIa+^DX+HJ1%Zu7#Ec~$V91#kVY*3 zr3pkt6!O_jiBX4!QE<+cdnK92%21oi+$u{oukD$kpe2cAE*%=gcV$*FaE}g{RY!@( zyAt^TqCz1G)<@i+e7>*-j+c%Cub#8aIA_ve;mvnDVU{HJU#wqLqHxY>sD^o^=0f*s zv%#%qPFfnt-!wD#2OgF!){Q8sjmXx0*!U#4_|aQR#xs zsmO@PE=kWxv0l9Y{Ypjp%B${9*Q}+dXOMXU%q9amS4sL@jf;6OA?I=BqgAfkhvlbb zO=*t}6SWD`w2~JgZhJ|qWQXPVDlKUZRO=6Q*7Ujf_HPv#3kbxsaL%6>bDw=^ZMf8>8lFC(|K|H=OjUx}7Pel+_hbM7Z$mPyuDogegBz@`F~nlab)FwgOS3};foe;l-J$j~`+Yx2L_4N7p3e zBC!6I+gAF{PiN9*w;R59ohbXtJ_UKhBOLXrC}Wkph<3+-s+y<&u^XAj69WMW=>x~z z@{wSD$`cXu9^dh)ML;*Qlh3Zuc6Y23HQ_g2U^Ww7@TbdA^N%uH6t{j*K`F<6m~17d z8Beuo7yQyz%x^tu2q6#yRqoy@wKl!0-%W!*u9|)ralL6lTXEZ<%>F^)H0^}^ZA}CB zX^(y%St{rSKGV8tZe|1b`tAb`TjzOPWwp}Q(y}`=qce?c@PObkn{B<6m6UgLueYj#=UKusJZuc9%68`I!LBe$!c88>YNK^&R#quc1+ zlW6hn^V}1w<=tyDYM?jiXyk?PC(@Y`S6->^`E+eRbgdzsv)bUW*QfK_#+0RbDqBoc zi>=N%yQn0Jxgr?x9Nk=Ve%-PQscOrIeAKtis8B<5`<|YYbOBG-qu1Z^B`lxCBYNPz zoza~;NgUJQ<&)$jK50xKct_W!R;m0Jn8{TgF@pP};Yw73Zo; z6jORTrgU60_A!2JBysf+V%hKXVK<-1Enf`11#gm=MM-Qr> z60LkPo&E>~_fFS(VIm^G?6i51`ngI=_GQLQlU`;ZqhCyAG|RwW5p!u*?? z9wr^|j|+}mR}MKX>7Vh7mdsNq&9p2v^9-ma32TCbu9ZRC7@J&= zQbZ(1$XMwW*E`IT zlNU*OGyRlzLx;wHs+_gi3TK?raD5$4s>@XV2xUvC=Vy#`xv_q1XaYjge}%oEXQ&*r zf1P~7k*P;Tg=dk-KYs=Pdyg+9blUHSKmYTzUwC|x!sZG#ZK8uJYPzRfb2R8u%Er#~ zX;({Hlcko1$00^`Nqt#;PSPwK_=u&3>0u&8MtgHgF~h-RX;_NmwX`OV2RIii`v8pHY0X1mj& zQ(bh9Xr>X)Cxy29N@IuN^d~cu1dP$1Lg5k0ef^=##Q|H6p*E~v({ z)>&<)VfBrnh5U6>Wz|FPjde`+pq;}!+a6y@7c{z~yyd@i}_&a&dwIhU-qq=YrV ztrA7|6t`AQ-dFj@)BfL;RGq|o|96tAKWQcszWBcaj~@milZ^f?;PK71S1MH!@1ZPy z-YjhH4&vKASF@XyIZ?clsFO(ekl3hEd+Oe7VD~c47gUX@K<7qHzbzd}E>W?*8I@hN z7_qqJr5zYb-0(n_W8$7kYp@#$!L;AEYrAUdLL83NvCp2IFW?p@l3^(JSrO8nucX0Z zhuF8EQu>(KNwTooA6QAJFJNj%VQ8rdTvPe%t>gPQg;>AMcC3>0?EuR1q; zJ4nvyTzyw+Q{&2{4+M?TpkkY!B(Y=BSC>LK%+WT5axL-a<^q%k|L5bi zq{?yvY3cS7Z9iT9LCjXouf!AIfT+A(gSm=G_91Kk-Kr9~BK1f`S1sf!hvV_r$lnuN zEga6Lm2Y3FjCj6PQRyqeG1Pa9CDE0!{xe&dFRkGMgf)_HmGI`^8_Zw%#W(SI*DhCud0q^B*73BqA*y7j)UeS|gju|Chux#Hi(T`j zi*8pgF?=->`A%5r*I0C!m+GWyA^qKoXS!xLQ|_37`pasva^AJVuBtH02hSfSe6Hq7 zT?%(u`J%Vzb4|JDrHHi;UOa91e4Tgn()O(@U;eUv)OTIl8T;Vn>u;ay_)?ddX)F5& zmA}-td0yUK@SuOh`pXT$(aSLlwvWIs4Sijg_trgl^)cbgO`+74IQE$T(@9y*+x4&R zz_(p}`Gj9S>g2Ys+4Sl4C>Gp^lV)2J_{OXw_Ri9=Pr{Guk`o?lmBUvI9qjo=!Z}*7 zkz=3YH|GXtB$723onRg{43cw=pi+<)l!HI&g)~AZp;85y~Kl~Q~DNx0q(6(@d5Jnl{DjS98yzZ z4t$s2q!_>jj-dyK*+KE`#KU}@&vnC?(&yr?Jw8)*5tK}GDl5QUtvZaC7?9pSAQ_T+ zjl4Z)EA_%f?MO^_;nMlA4zq;rL%P#weIUc<18VVZi*zpbRoF5+Wz8Jj_R^l&Z7VF} zdkezv*@8i#E1nYCn}cXbB7=1skAxw^)le7)lA$fvOlsSMlqZkGX2Jms@fzX|E!zSK zz-r~&;5e?^BS+SYki9gs79SSeY-wAcS_YzA>U6rWL1HQ#2^Yj~41892!0O?wBl9%4 z4q){mSxz=dKX%|^zP~yuOEW8>vqD`QpMJd0)WtHlvIT{2kV=-Y5{R3MKWSRMV(Xok z13=+Dv;)q&CKdLQTL_}<7pv5FOzd*U6u6ThSw_DnebL;U zV)tm0?g%Zv?AZG8RRIVcAS&s$u;AT#W-D15;FE_~?*f8@enR0qqijkemX=~&QjB&9 zr301F3ilEiCU}TX$}4Qje)osqIk^xviF6Da#M)=kehHps55XhJ$uFK~wKp9bQ0!sy zl~bbMoPYNgfn=bp9&d>6f)-`+>QhF~*-JDnQah#xq%7#9SuPMMHM~V9oUrftmHkV2 z+26#JCXMMepK{bL`Cqt~G>`hDqgig685cUF+De?Uc}geHM7Q5Ve%4{$!Ii1tI1z;U zPti1BqX-;jfm0kv(R*+TV1G>m5*TcvB4`9`UOey<7n~CMk|5zL4IHN=*`bm@ioi)UArAm5iG(vQgj$N6RXsRJ zB&;|@RG$MyJWwRSVHgM)mGy~(l4K!9h2Q|0P!t^13M6T|CXr~^86Jq3!=sqkX(7^B z7YbbWt2p8&Y2bSX!AB7mO(sn6z-cCSyJCui7=w%bjGrQduIULoNTC6&lrh!;-zdC@ znB=IK7M%`$VoAMi-LyrP;KY@uvky8-@3Xc>pj!cs=C?;Fb&MM>50H*X5us;3D9t#L@-)P!NfwZRxdA*Ug6S;GLeG!519cVX7U6%=Vd{r555$k~wKSml*I=QtRswQ2_b_*y zi-%uu+X4#Dh&jObLZFm(Vx;xdvJ@avc>HXSL@9vl5rBFepgDd=5(`uTQU?KC5(DAC zjYtxqN-4M&-iZmTqAc2pX{Ft*WN?~${KpBGpgt6V8LIdUH7Sq?3M+sKF;g5Viwvn) z{uWswQUqXvbPHRVJMtKr=~!I*Ui{JLC=pPq#|3lUu=DZo!zWH$X{X|>I7bcv>fc~` z0O2TSb1etN0H?KEV53Y)NNFSD`a$xW8{3FCze++18-4&1MLAXCOxjw+k$irN;CLQn zNrfwceKdSnfdc(Ao~mk=VVzYzj)C7)>6Q=86$G>K>W;0?jC z6=N4bVG;c~=Q&ilWN~o~{uxp5!a4kVBEi7FIBx(jGXz^`hh3NzCk)S?>`>AQEQxG) z=>qbMlndNnNF?f&s&{xNy2~P22oVzv4_1D6xMF2M8HxyV{gyQ)kbo0m<1`$=J+H?A zR77A;`HADB%($Y8m7hVVEGFXyVgwSCA|zM##I}aqC(48`;l+KtEI0-8iHX4Qxaw^1 z5)n8>N`&SZ-Cv6^yh<2AZ;jvI!^=F|Q?8t%@J$g!1q0ajypqEe>V{P{=>THw=0Ohl zF?j1;u|ncF5eDa9(VhXTPh8IXT1i`lNC*cku}2!*Kq#RHpoX02qS2zN3vNfS0=#xa zNjcSd5+LkqFDdfbgKLY0hEFgKY6_6NU0a6bprTorNg6JiCLzg_Af-!v$v_nrhFOA@xlSM8IP3X&|?fA!8p@K$K%P{cc> zo{_@i=hX&Z)K?_jrO2%p;1U3wV-Z-u*?yNO^{F15vVoN5(j)=KrM_Y&8a+Y0q9SBB zK?4dNnEw?CIhE!tSI2UbKk-(6$-hay0hUFE39azZcx9MopOfJ$M+$xW9h?+mJVtMV z+$LT;d{M)pSYeu?B?_TWD5U{LJaCsau{OAQg1cf#W<@%=x#1Z&$&wl%meEI$(_Czs zyMjHo$(4$cztD7c4u5})?e0OW3~rtzO8xwPeWREQikMkrOytm9Q)JMINa6kGEGd)p z>j!3FmdsH9Mwk&-8aKv-MB+bOrHwcp6`2apbPJV~I^ZT9&^$$!h448ft0zL{AMV&; z(qYiD(a8tt#E>OvgnHA8?KC@u&#$U0z-m2YNdphm8mSdk_CXyFR*_&%6qLH4f&Q6r z(U@o?xccxE>gFl?^JWh=o_w$}tP3NoZ8l!K<1_MthpZ9zfl%eaa3Ugrb6W4)b#)Q4 z<`%r?)I(>v&=2zjDlIg?$XQ2Mp*b{c=23*N38jUZGPZYXIqZNdY10bl9$6EGd$+c# zwS+jB^jbRiwy5-|Pwr`J@7-40yO{1s+CQn}08`dn-qUs%yzz``x_{&I(B-tnkBX;G zqcwU#`3HE<5Xq;#>Yo?ou`ukq7OCZ%-pxKH^aw^*J=r`9ODA4gt-9$xA|gPN1TpkK zbKCdN2&g7IFszGqo0;uV8^1!o;#1JOzfAo3C%UCuJ0R9bXh=o#=AnGE%)dn(EX-f{ zF#g{sGJijR32DMB{oJBb$WEq7DgTVGalQ7(p5e7~5bk4L$ZCw~pN%;0Qz|PIJ3m+d z1;za$*CFK~Ngi{`Aiv_t#f3e?2$@uxY0zI3R9g{y@HJtzmtI2J@Ksg5f*jf+q@640 z7m7QqSPGB*Rg4TcV>ma&#WtZX>YVYovHPv7l$-woihGkjlbE#5b)`_fd!ZOiL-eDtumeK@T(WC!w-7{3X?vwAo zP>h`OVevg5AC}WCDBsUKM^`&)b+QO`)G;h%yV{qxC-Z$wRre&%Pd}u`cIrCPeQlkH z`Y~%py!KAcmwFYa`OS7?;WsLjEhK+5jczPcjKMMo6;oW7z~z{D3sPr%MbK!KmNfii zhU~T|M=5cPhuMXnd%~TIX!$Hp?{t*+rDJ;nITh6FuxDsLEUs#!Ic6r(XKRSuyK7PF z-Cjv?4)B_2FE2m7RQNbhelZi2&FiTj_`lnT)2o8!FP0xf(EP=CvPE|7_Q~IgjCP3= zu|d$QyRA9vwdFNB5|f!uxBn5{;0!Hd-#tucLLG7q4kH`I(gYB;Wf((DVENW8*oWoz5?p1*^^esC0gmwB!GP zf=Vk!2fcH}ncJ&gAH0(H7_PIQ_;qmU^<{wajiNh#uZp~hd$Jh0adTqgw*wyU&Bi}G zgKt}8_4DdA3|`A;yzRvI&uPc-t`i2FYqE%fL7-V`%PurMjRwpSYCMTL5i+iCSG47k za)}0R-=EqYKVBgtuj2#X7GN`@O%&e{>5+%{gFC~IEsMIcAJM2zkcuB{*7&R)Xk(`7 z@wc%4f06XY!6$)pf1O6n7icu||AFH_N&Z`5j@&9Przg1W7*waN@-MyLm_(5(3UaGH z@0q2dU&YbebGTjAZJxtQs-e|r9C>xYE?b`{`>nV(w(2BMD}3cHE%In*@>=ie?|RPl4sQyz0GWO zeOp*cTyV&}ti_wF0_CbVcW7<#e# zUMogiq^h_iX&^a{B-{4=7#t1<@9a)zu=q(Mfd1|1;Qc~49HSpBj#)uT^ty%Wv|P#{ zf3&;x=7bHAxLA4a&i2M5t?q2TtS9vYS{>H8cqYiY7U0ni@@i!0vGBJo8`!~Pj!&6l z6`7+^rm8bA@^8g5Z_NS=uty;>fKlSMC~?alishlMVyvcR=4#}Y8?fCeh~I*nnK@Wa zUXZ;#(c`JD^T??b>y^1-QWyHX&L3*2&xGd6h)Yr)mN!o+feD7GArWF8n z&8)qH;B8?fp%A0vJF_L{5Nm9YM<8Ns<}cJfNg*rCW}v0CeNA*rHE1>4XV2O@!LQD; z>G6NkX*=4MoJB<9HM5Cem6({V1p`VVDhD3(_`iXG1q*KHiInOr+h{T~v$Uk?Q3u>I zT_(HcJ?84!Gz`l+JLAGR-w#)#9Sa2Lp8BD3YY_v^GJKTbkiA&T>gq%5UQPRAruP6O zhf(p;A)@s7(~>#Ud^lk6DUqXUYbne0u*M=;ZHxV!0CyWbZD-hDd^7^Gi;f^*_(u=D$AhQ4vEwS6r|f1Y65;UJP6r9V1^>wDB*%w>wq1GleAYwT z@BM{a)Va|Q5K1z%4%+}(fxE3GQ6y;%0)h9+#~)1rUtb_`-L1t$)ovNdP?@Zwb#0|r zVs@e>V(%`41;xV)tSETfzLaFoWf0#KraxNa(7o73i;2DO=fh~Ro=|y|X|$d zNBP?^5gLKZo)2VwVIxOjERd*b(ZZGrp&m*wsu%bY8R{Q&GInBkxQ}AUk~33i?CD)k zhvS6Qv$8aqaV9MeN5jVVlL1tCt3Jfki<4yo<}9>k1d0#eFI26?0Dy81rI#II)hc)% zQ__?98G}T-&;a-u;x)42Fnv8hX0M>wQ$vPQaC8?VIA3n)Try$_k_^WyvXCFDc32tb z(T02Qjz1*Ruh9X`QFeOSb%(F&hH$5^IDtxL>R%6AdC9v!-W!sRj!Rlp@CYv> zv2;N%lY`+|xW0$bEutdZkD?SM3yF=2*f%DLbK>?r_@sU4t0-IfC+PqU-=?~CrnC5jWG(1;)Z4E#*J;ls! zLG3fw2OGFoQcnTA^m|5L-(PTU0c$L%KWI|-W!Rb?Z1cj$#T!BfuQ)w2-K+N> zxa7a2B&hDO^9`#CnD0(k4malM4Jgia$!n6qV;!|#Kyj|kZo~y`Dlr5xYGxV#-2!6!UVf#kmzs52dlP4v49>#4t&C zp^LjN!(W$&6bPW#RNZoVauea==Cx%^K`YJXTJ-q&{%>#6P&rb)uRT=;h6me@O&{7J zXy-e19C>Dnp*GBpZ}$uM?johMcUZ&vAT&7KP;qo>VSndfulcY2oo9!A7xs59-R%CK z?C<=iYeIk7-#NZ*e*`(PmQKv7Ob ziC15ROGt6V`Xv>Y#s1Au6+6zQO8v5+xALAGC&#*egJL$-pfQj~v z@h!`#jQ`HCM#V6aSfRH3V%i?$>KJ&fA{Z-BQD1(tVked;yW|G_&dZ%K{Y^&(rO?Z- zGRm=&D__@qwr(4zZCju6fi~e^wvC@4??ZHgW+CIO5s1PTjZ)WM?R;#>_P?(Q{TqWr zqxSI&V;I-7cj-H*qHDwu+^L4hIr+~GR`irHU-<3YAFl6wGDDZ>{OUe#_eMfIN`xmY zim5x4{q)GdP2>|9pK{sGJXjwNb;j?KK|G#I7OxJP?jvAB=X9_GHXJps(DCGrX#WkX z#|)p_3mO6GmDx0V%nXdbXzz~(TD5oi9F6qdy^c0Hx--7IXzA>CyG!B25Tobe#oA=j zmHowckWz>e*y-e*jO)lQ$XV7E{cvr|biaLj1KMIK1Ez9HwT z0-5ug_Vii$!SEz<9{-If!JhO=6-h*}yT1K|5il=ylE)-Z4*!h4m^8ZMz|(yK26?Ln z3_q;cVv6~KE4iJVVzl}_oJ+&$bHak0_DB>X2Y-^-IaQ3c_`x+ev5r}u99^iso{CW# zWE0bjg@_?TI4Di4G)SQsDLrT-ZKo1VG5V`pX>A|U*~MXg!|9Th3O@#QkApbJeaXQg zPz!(Y{3)*ZMc?{GZOUDP_l=Yt*A6vcKg*U~!S{0$pxlqRtfm7GGcmrSq!3*rXX3lu zP!g|%p%7$pa}<5a?FhmBK7|szF_E6sgWcbf>$I}i5E1Ygx4*KW_($kb&)4%|F&X*~ z&2p+wje1Few8vWo_V4BoF0iBs`~Bc(%%ApVE?avu6}*bL+&S6p7pxjF#Yc7 z|F7UfHZPE)+!Xl;eHT!rQs2D~`*_qNcaz>|N!|g+#!?jZMY-*^S`CO}J3W%b(=8^S4z>Bi$RXpW!9he~;MPWG7SbKy>eI81J(G_zKH02W7DX z+tb3SZt|Y~g@5B|yYRb-0ObSVG4yXYgUs=@LqsfVIn5*x~Mg5LI+Z ztMh(gwk)x9Ib=$c}+t~S2P2wMss)RYs+btL6olc1j>!+mKf#9F9AN!66>In0;I zJnWIvb3{6w--u3kZ8NRRrByld$sg!gI5Su(lE&8=$;pa`^dC1nQ*E=zEFU3)h$J^# zkK{D3bY#Jk>5V zzEuauIvS_K2DrRdRSd*>j2S=-v(oAwlL(F&O*IG!qtFXGvS>jAlkP(DJ;6ZUxTvcX zJZfQ?kfgI}M(rrTSN%|o%SyQ)j7? zgwAGf^%_Cys*)orRG8C*2?lO_UC!%AxT{Vb7zd%p2Z=5USrkGDqY6N}@)cf_vY9Ck zYT>U8?4P|qTMKVe<6&r|7L&{yUQ?G*aj9+y(UFAyM%e0nBk+tUTa)jgLSzC4sgp22 zalZo*m}?4n)ZyEqo1QOAgm2!Jb2PDE6DX=jYr>h&fXqR)Un&rv6l8ysT62Ah&C`KP z7r`NgBQdnyrBT@3{M@=@vl9mdim>I}&1#~#ds6z=1Ug0tPAfXWc2Y9qIvrG#SbV#k z0(>0Dm!P>1ZZT|<&5VKe!1T94A|o(_yY{x^Hx1Qn;u>}$SHkowZ>J{jMY{a8mct#P zk=F---9Lt~&bBb{e&0I0g6m;11Bdpaa&}M59X*ujl_6QM*|o>9l|+S^M{Xa2{SfH- z^U!SE=WBZ?;r5a(b?Y{zch^Gx@WWRaZZp^oL@hjs0ZsmL%o%>hUB^0ou zI`S_*{VT}t-1?hm*8g^04rO?b`~Bg%EEsgZPXCTm^H?}bS6Ttie>qFnm2nj#f){AO zuRFh9;$ZR*?)~k$bbjV}I9Tbo>+(+d$2U0kC8ECk@?U4^M#4Vc_uZle*X0|VU2&6d zAsX;muvT*UyMqy0(sz}b3q)$ z5)raC@D`)o4Gc){og&+GJ@~J#%YlD-bFqvPHKJAUjzgK|FQ{5{(_YL_Hf^w# zjzp57Wg*a!1;AWzHN$Z6<2D#-r8RLl-@izKu28@jbVT~RH$jn$V(2Gm39Q|%dFoWA zgH}-|oqjy#snRPBS5W#8GFx2bW5;eeJhYkW-1L?*89M#YMBR~d&*oE^ueXusi)~}) zZok^?27u)b9&q)tNtP%3iRWlmArKAt_p3uv;_KsiL+|x3RH#ui7qT7@ziW;$c zWMGHI^Wfy87qi;5K$F2+r8|zwc7C0>3w*i9My&6IibyasC~Z)$TILQ@cRUPeeuA*s zH^u2Po-m-FhE z=_b`17CD{M*Si$sA9sJ+%WObpnpR#=ek2l^Mf(OC?>JBl+K!nW^CH3b6Cc{FC*PiV zv3;!^4D_0>zD%1^8s_5y`h(wc)NRFAliF@vCa2upp|G>w*Egx1Fkhkbkt`eGcJ0a>{fC-5EKk~p6N>!lb%T~GOZk-e^_?X5peDCAsa#XJRq?(F4<>0a zpzjVc9gR9hv~DbCYAk#4dUkJ$5zZDyv@~3oU7z1*aY;kDD=t+{Uc^R?!q~0h2oy$e z>z$J~i@H5K^XKFty&3O#IP4xpTw;WH06+=zar4OSwS#sFUpcbyEpw>lu5^|~*MQ>P@-!`ZTC?~SC5@%h z&5?)t=zWljtZgGbeFhm9LkHd;=ARrKE(=OW>^7pM5F*bXb{2A3Zu7tiiKwJpxlq>t zZi?Wu`ZLt1XPl ziy*oUS>nPi=lvRLJD7Z|3d+FkdJK?}ioxBFhd7(JqwQXiq?Aw@Dj)Up5>n~j@hYX!b%U-huJ5AIsY;=)>(g76YiN|a*(@8y>&~Rp&-1)QSx3V1O2hLXqVF(| z#J6mg^{&@+_p3Z}(2*PXED3ELeHC=9Nmg8A&9*t9@nAa_q2XZNq~qF?5r>DbMr|9^ zABmE5FOy0v2zSZSuLd_;@#Q;0o?;(A%39e!qyCM*#asxv$%TS>BC zrB_Y0<$4A9HQie>d7o=44k_LZ{0<;bnb(SbcuLZvvh~QC>e0cQ6zU%9%`#Xji^ib^ zTan~$ZT=~5!H5m9u_A8jGQcbM2Mi6@OMxv|b5|M+lA*t3)~S+Q`L7|r{o%$TWZizW zPRukR4aH({zS9CEiNNG@${p&NF3yM$Gs_Awr>XcW3}5XfK%C+U;Dv2AEjT8;TD3F- zu;}eMWL`5QzeguMbW9^%Yq&)1|KjewqnggwegAJ72`vRePeSM*2tg^4A_*<12*C;p z3W6F1H8d%TrqD|Wih_z7ipnS|C?b~8P!vW{gCb%X4Mjx|2vt-h_lwT#J+tpQbNBh} zbI!fz_ji`Ar7J6+r@WunOJnh7xL{OZPvEvS9@BOuL}%E0{v;3Fln0#K*$oeDGup9I ziQzOX0=xI^#OJJW+YQHh-J8-=?3b$TyettM+W90>M$70D&^YUJ=Rv*_JRv8*m<3pu z_%mM??!;c$tx`@5dsk2{fnay$IW+@a}rjxRe{J6E1<*dd-sHS4l(jZKEOQtIC9cL`H^NoXzc zBcIfL>X%}c*vL|JKcdkww8l)b-V*ie$9?){G4{!4Xk%KpIs6aVSJkMShK{xdTa3gn zaiPZh>zY6^^Y`jbUcD=a4Ott$GMZgKb(b-F{`EKOL}RRf(l2k$j~z}>?y=5awBe6C zHY~sfqM(1dg$IiS)kUHIXn9?!IV1XWkzljE?GIOwzir`@v5+TcsLI$jKqF#dl;Xnb zh@dOy)>b$Z(}}GyAD>+&EV7z@FFm=k7XRU!v$x~}R0*n8m3IHC&EqR!r@jzaBFeU_=c<33{>RC zpM0f6E*0+m{^6~k_muq{>#Tj_bPhG?!y6D!wEx}3tFL|<2M>r)=1k3RCjAFPmDJWW zx!t>_)v5H;xA9QE_Lj@v_A9=$rFUZgO{8`<6ki%Hmo6GuoRKIhFebfq)r*4>88u=(V!XQ6t@skA=b)EzGhFZ3Rx zjpHXmxPioT+^HI2R`9YrU%SKIWT&ZT9u{>rG)9?U z;T@G6Yq;@rZ&M|!-N9oXR2U2A$0E7toM&_diX$^82s=8y-S{(D^LKkf2uE-nPRP5zprroW%_!M?kr zPom+Fe=F#Tn}HR?1n!kGe%-?2GFHAf1G^#NL9<$6@P=yf=Qcsq-8~l<%`TgQ`)fCQ zpG-V9d(@SAFd2w3^;-tl41A@8nE$DScVIJ?yA(6q#HGQ*K>svhcfSEkmn92h8wglE zS>~rJK^=8nf6tEtuKzJQYReTGQ+FHU^rr6VA#~k~8*0Q)nI6?G4wfI8Vh4@+46O^~ zd>2b5Wk(G$bH!AbCjkpYAtWvBB?U^Rn?xEjkj?TqsqI#-3{J#`^l)@#oL;G8FrEo6_1IV z8@ef|cFot&%!gB&$0bM7dLwdW2((4SjYtnV}`0R(3G|N_7{Kue7YH)pWE4kAC)E^M<+Sv#^CuDmz&zRicK@WeLMBj3F-c&x0X z*G1h9Oo3pLWL7;!PigYOzeh?xJ zUwlF@$8(?{Ik{$^Eha|9Cy4-ntah!HU97SL9q8fQ3CRyS+ho=s$e?!O@*QqgY2@x@ zI2?`_Vz3;%jGW;#5SCN4D2zCWWw>DJR?H5M@3t0HwL(%=Y05BwfL`hr&^H}agKC4* ztN;ux;-dmz1b5kKi(*Kn`p4};%)h0}IRRDS>yvKk#}mRhElD{$D{Q8BV?`@&pLdl{>dz zPO-NFJldm`krca~ayHbZZYtlbl7D!J$#Cw(LJCaDP?JeL$c&=oi^+v8Efy^Kx=*=V z;VMzdnJu`sF9PrSRW*uKh!Z@3!=7n`gz;OhzbI8auqFJItlu3<6+Z`=sWn^d=dEZ@ z2NpE*RTO)oKp1x}TaFJ!%e1|WbVmj2@nZJfRbEcBT;N!?gI+hy(`T!_N=y- zQ(60qV>}H!q8{rM)lJ&%B|=5Um#h^#qfq_tVI`AQ8$N%Te|rE&D|ic#!!;pkQ_7HJ zP*6i)^6y?>b$g%X(aoz(QHl6&{oUUfOXx2VNZ}gw(@!tL)`(twjwVAdoU)X5wg=tZ z!UqyEL5nrL$D7(kZdl%eBye6TFv^KtD5Qg+<^rp^SErf9SeSio`Q;1Y_}kx*K@SEF zVVGjY(XONWtWcPH8&E$NG5wcly;$}6yy}i&jsEDHn=YN-Lc29$=mS@9e7WC>T=O-A zyB`9Pz5|tlcM(_`*HJkeAd7UmXuwk9v9!aQ)CYMXL)`>gx?UW>lU1~r>R|wiEwB{+-7=x76djQ@U`~;LC zgcHd)K9ZY3ZBKaBkec~wMDfdXmsuq`svNsr3;#L) zOI^7#nuvhTw&x~=@(KF{jH8xT)hHJ_=_;#LV6x`&bt7Gd_8pJ~zECN`h!`tr!torCp3QuGtem5V zU}N*wKfC`@dN`xrp96S;pT-tD9#)?M*|<%_G3JNfY8ON_a2yYncsOhB;l-1AWy{tQ zi!vbj5sSmf3@qbmeuF5@X%jxAG7KnuII$bpyAM_hsJy8W#FvIw;IzH%fZZ9pBgVBt z7}1AnY+LER{qSZE^5qC~c|*&z_OBQAJY~?|*jMxtN2`y`2dU%tTEexP8|lsfcTjn2 z%HZJGJtcu{Yut>m%VqyalJNvrqe==NpB#1z#?))14o*g2c0D}5|KO95`tx4X+LI`n zLNd8A`ua)N7s~Ejb?Lq-Q+{pQURT6>>k5{CeM5`OnpCS*>=bJKmutw0*lvt(X?ce8 z@xe!fU!L{OuC*6G8aaubNypv)TtEsLxI#Fd(QtnQ;j*pH3d*l{t-H$>x78^~ntjxb zIon#WZ=nmddo&>76x8T~?Xh9%Wey~m&AD>w6E)8AUXJbM#kVq;nEKdBCgv#vXCxNz)7Dmye@`yG3L{Eslh*J8@dib%Lhz|^eMePZg|E!U`ltn;3Rg1X1 zH$4D0_}Ji4nI7e`rRH3W@egVmD%f`V{vX>;P!Rol+qt2S{O9m@_=Yy}@3ZM&E^mFL zg4_GI+4S~`pMKx--jg-C9e^!a@GV1d_Q#%gF!~%#2}(podHx80JH*Q>$m_0s-}A1G zxWRu^cX#j)dtTdBG&u28yzjM3Z|^Tq4t;HP1@-wxB=oH~wPX@)sX5K?d|mXv06biOJPp{O44|oY~YGB<}qE z)B*G|T5eJNyHcsJ%sI{A*zZcEBNwKhS^b`BaOgdM=?ArAep}_vAE>Ece`LrUO?kJz zqo)2L)vydabpXHZ-#0PWz#$9Bfc~+G`KNl9gO(c~&Ye2UZDPPw!{on=nj&u*@*S0? z-Zr6ir&ZhYzV|Nw_9o^#eBvK^7u%=sKB1@Pw!Q+i6K8J7@=K5tqP@4Ta76%_xpvhS z#g&W8jU!GS4(;jiD~=dg#i3ah{`Agoyb5GMv+r80(L%Ocyg#n#MG+oQP|SLNQtb*| zc#@;J4p*9Ps-#wy?J&?^F7%%Fp~Bqup82WbKP#0Msx1^2R?NXCs_F&?{@dZd|ID{N z*k-wDWX7eyji;@;?uDi>VZqjeUQZr4ZftH~IInDOK*DnyADJU0!Iwj*O;yZCs1&iC zAMM;@e@jFOJ8lCtRy676XH$EHt}HuJ(78_USIM&g|B%V2J6&C|5AmL4N9cSbJmgn} z2XyaWHSfrY4bc79erCcqo?d?CY1Fy+T-shaf1!B`cKp5xl}ejJ`i*?F)5(;I?VsVQ zxR0DI@xyV?px<)8(DA6z`;?YfyjCQhB}jVYR&+idE+E8u!u9dw$?mLkX@2fh#>#x> z(Jl|cdjkUw4SqwN;_K><6~8}H&YH#`@xCK8$X;GP@EOIGr@D;Q8u`1nOx&bF;zkJj z;Jxcf4YpHzAud2UQ?Do6QLDt>hy*^ol#ksgWeo^U&E3@4R8gC z9Ui1iGEMVwKz?#tlV+ZCH?A*Q1rA8nCbd&xg^c{XF49JG!1i4` zdLPz5E9ROG;;2FXvS35v6xI_0ZiB2#I0RWGT}y$Rr>x|TB~(Jl3N?bbzz2}XOolTY zlARpd1W}HX*r&z947DxAmezm`JNNY{fldcVLIagq-~Oei-=w5m}A2 zU02}gdpIrYXkUc(+xAoH73IW2mZ!r&y8_f@!JfTGH#kLomSd1YF6lN_toXt}fQf*S z0tjB_yTJC5+D#log}YcA2(39~8coG56eSSosNx}le`9#>prexr2Am?h_kr+d%_>os z3eNB4L0&25$5na|y1S0~Ec=W(7d8wZsD7*X3lp=RDusKQLy?ViCpfc_#1HMK33V_r zcJ5gJ%$HiPOWWA2!t%-bG}vNc2P(pjH15*^^9k@fB)9`f*2OR1kPM}F8EGl9-#byk z^Hi6HZTMD}*24*m5@>k(`tdz<)}m8Y#rQOYbH{z#X-1M=IiLuTpzESBbU><1LYxcS zqu`rdMc4ccsytr|thplns55TPbH)X7*2}xXwToCSPTOS|cx5B0Tl?;Ob6R@13XC+O zeV4MG*3mv21*xcKP zmZ#NPm1Zh_3A@>gLP(ljytx2@z$Ogn(} zw8T8hJKK%Z=#@h%d+kaXE`n}Bur|5ql1S#Z6OwO@m8u0_>3i{7@35ZlYO-B7O~J1k zN^BWZkd}*8Pq3i_*Q(E$$+jKXC5$+C=fI`t8x1%=a$U!A`(FghkDn-E!j!7S+s#5_ z=!t{_7*9@##i`NNf!Vzki!Ly$*9^b^^v;T^WM`c(eextD+VQE&ET7HZLRf_ociNpx zvRZOZvHaFL2BQ&Q~}Fbo_eB+`(<_)TYaA#duJE4PJ4SzoKx5z;rnxs zEEZ4!AKB%A8#o|jgJ>)S!qCbmw7XG7ejQ)(Skmp5JfP5Mnb=ZxmuE&uw3>d{Tf=22 z91cchhB~E@a#VD+!=%>>3T^s}$A5HVdiEbm-T(dZ zpNnCbK0qw6dGS3!J~y}=`UP_OVX4o-yDn$vN?p2g;6&EAKD09MvRW=p$oU?_2nme) z!I=zEuJOJX27RkIaj$OpAzOawy8y!bw7hegcyj?)yd_=nmJt{r_h$->`16-B&w`xE zpbaLKj&I+!%{~6aCzqa{JnwVPx>6u5IDanuzNt@oDDH3}a^I1kbuZExvP9*zKi6*= z?LJpyu+i_Rdz*&itVX4(Bo$j50;hwi*xHBa!-&-1jDSIBU<%Gr7sE0|55cF`; zsb92u=cl5wKOo-sDiLD5+D}r2Pae9BvMx!SOfM|ybAh$5>^^!G>)Gbck-QCG?b;cB zpH_&DFwf@Az=~*R3%_h~?y`^{=}O&vXI!!yX`$6y7E404xb~WRl-Dzmj*gCW7(%(6gxR)Mdzu1TH#_pY} z`8eh;xMeloX7?;=`~men$eDEc^l^?eIe$XpskLLG-N)?w#A9E_p@}C<#{9{SRcm)l zJ`MbfNaNH?cFX)vui~EV`1EVy-^o)Pr1js(Qx~RtavlFA^3>N6G16v6)~vN_X0+Ap z;>_5O4U2y2=)mlWF0+q15&FGI`{4VNceZS{xivMkeAkoT>Y1D8u6z#6vzc8d+YjI8 zH5r?FYu>lo$NR)j^_BuB4c*3E(_s4bCnu~AyXAR?Zo-xV@#bYTuJ5k>-(x)fYs!C#j4}|EvLgJ8nnPhyT!Bv#lP2Nt~_5fJvE{`+U zgddIEr`o+1svc7VABzm0nYSzeTkjd>RkO^$mW&XD=4|y0N24nZp%#>E7NJ+mOT_tH zh`JLQP_O{yo8iJ1d2`L%_k_3u=2P97(8MfmkY{Gm3yH!!Z;|qCY@^#*bE3&u^?tny zEvx#4g>jyyFce!nO}GR=$s)mHk1)8BOb5*r0SGn+Z$#rjJwvl!jT)PucLz%Fcb0V} zw%7=2(vd4#3YA41({!9_QbhM5L>$}8olYwAKE z!fqnY{tdyr6e)C)D&$F;2_@morL3|d=p2o{`dChY6wZ-&8nJ1T=p8s5r`RLT_nN|j zIQkjSnHGWq4jqkx;v&oe@@u++C$&rgKTU&g2YL2FI^ddX{BzVDENZIn#5Ns;#dbA- zv2}&cI4#3PLT?${D+106GpLX0nDu=U#lb-2q8@H0lE?e*I&77Ro6C$y161hReYkv( zrLK)4q#C`lRiQI`P$=q|W9E>2xZFDwJxVntAs6H}-0Rie{-DwMbwk%0AMDiw8X9ZU zyU~eN9xEO(KeA5(4VcPt;-CmgRugrG-}E5#i;(6=mkPsFv~G;B5D)-|iAyyibqCTt z+PekpRv50F(}%gZX8z?+k9|tECg`K>3IINglt&X^kub88;!1(?OeFeu^)~B)+s^3U zb&0q>x)Pi~C!I%$34QGd0G!wDm_|CifM60Tnuy28%2uz*AtI#?%4b&{gmqFdbUYZK ztmgyp=v;Lm6hBr^hX@J;AbS>f)%>2KCXl^5@fXNS^98^fiVEex{bhcM8Kg#S$ham% z4r!8IBFm#6?k|zTQM5}&C`v|BfA=pV(*R&yAXwl3DKxoZ%uo7kV@-3pav1*2#PD0@ z3ZWsXNES|?+PVZ@)!mM2^`>IEo0IUR&OGuR^T)V6Xl-|U$A?C~-YA+uEyImTJiq!$ z(97c(=bgNP{A=UAWWDC=wTmI~FA6^F-y4Q6Co8y|U;-;zn=}hKwFpJYTl-(d#2z4x zyfgG8AA~5$0>|fk3M}x7Z=~xYjUdb2>WhWU4XriVyaYRUpvi|}q}#5P5W&|UZ(F;) zP_4D0TD+m>@;S|%YQgTgC{61GFR54%{|G`7-t~5_#-XF^jGWr#oR)=|?Sd`Zp{nGo zmjR!w!Kf~RUV`W$vN(iO<|%qD_W^G1nHjaNSiKyfef{_zBCxb=&J~Vy5n{cWU`{fx z!>CGAhim2HK^$cvg^#LMBm#!HXjXbmDsk7?jEnB>yO&9)hd3JIavWS+!NrFp2)65$ zKypTzYHSa_os-p+b{g;URi5F;sp`Va7(hu9xjZ2&V=#2HUY~6m6JrFVhjC%onSB79 zHe^%Oq7}SEk&Xp$IZNIiA8H*}XLEk-^5FpH?Ys4x%8#QHOXG2neD#SLVj&rDTBpi! z7MG)NiH)ji%puTB4#fe0$qO8$>6N$)RS%K7_=J7tRjVOo%uDd6w|eWAJ%Y5eyyu-? z@X74e#4tqLn|k;IXAQ6+Y@U~TAz%ZiL)b-{BUsGTdlL~BBWbZ}T8k z#+il1vzeLL?R=u(t&j@sT8aW=Oj=q`#yw~f6D3-v7HY|V`i zwH10m(o;5$nI!#G#Q)4#R0~SnQi5$=^RqyN;St}|bXdFD>58TK74*$PPGjQ`_vcuR z2&}@S<(X?o`(ln_yN^;J$7-@@CoR^VT}g|vzdsO@*L*c=h~w_JovaagyTsxD#aKlF>IBTW><5@o(+L{kk1U z{?ixV{c_-J(I&n1J4WSypDivbp#Sx3(f-ZV3*q;FTujw(_2^*RffyBVlCJOf18n=B zJzJFWvEFkj??3tF{?pnE5Tg>j8}!SKO?C?1!+BZjc0p6vl7Q_yQBMNk#frhl%nf<` zlIG>MWllpXa11?R_KDevQ}aaVv5 z^m*?VX%*La=6I>mPkCpyWN+Wv*DwE2R(|nuU1LW07AXg&mbY&3qFBN5V|BA;?Z9$MW9oZ#LWicyycXn+xOLO#Ek7NIo^wCx-!rZedDi@d#}*p2X^s4enp}cqR*hv5qnV;VkKI;N))qk zKVs26)Us&|I*XgG*}X+JN*o9yG`;b1dK;nv17q1DDA8R-l|pFh`FY-fuKTp>awv!Ny+lM;!iV=L;*$k+hbH$}E;Vd7?8BAGKM zmjcXzae@UrROw)`oMJL>brOEfb+yBi*gi#tQbqr7O__NT551ilTr1RX>2VrlL4Rp( z5CH#j`jos9VVz1E(9I~(Rz)G1oF21V-vIKL= zJV-s{)pL=Jg0sD-l!*;+QZ_m<0(7Yr!wj1Z1au}(Wt&wdSn@tTaKVGz*9a0R z9i`YqEY;45e7#H<=|eHY8VnyStMedj6wy|{X$NcgCSrRaVDhdzH9Z@qXEAx0y3|V; zFKapA=o9|wajk%PJe2qVL_+i#pnz9WVjeisJ!*>pKs8hXSxkHf9*LpKNLt3LU6#hr z;~Ps9$0Ts*bqa=omue3sB3Hd>SF+Hp$c*H)FgsQ)z0q?8C>|PAmA0eca&|RPBy3Sv z5@jVq$osf6_!q$RPowjPRcqOb;NAGj7mtfOYY(3b1lEf@1XQwD zkp6Q7M4cvtQ!c=!*WP7|nuyQ5R^sj=lz1I5w8{YXOP6Mv{!8%!P{=Oo8z3SNxB_KhqJMHiM2+rcv#MpfMgdXIM{n;4%!*9GvYH3 z<3n=+TS3DBKk*we^mc=r;Kal!$8R43@(t+gAQ({ShP~;O{`6(LAgQt^$DWnWDvx&E z|H$KVRfyPRz*gz_%4GppR}?P0S9VgJ8d?YsqHlH#@nHCuq6TAsQLkJ61v59^y?3c! zy%D0axLaXR9reM)aF5QTS^WxssiNPTDW0cPVI`F-_ha%ksQVGyWBS}m_Rl+FWJvN7 zPu87ZZTe<8C3&S)vRr5Wp2EpAZ2NoP3fm)JPUlW2?EAvs__?7{ICiGy*wp&lpB`oj zY%`eTeunde2kvdGyK@2#(W4d0hJ$mnPbQ0J@BJ#0(cQ;L~v*!l|hOtxZ(1TA1^uCCAAq|E{=_b!g=Z6W&1W0i4PKtFbq zK2~*Hhz1$1Cs~+M^X$;Dj8S+%o&>GWhCPrd08IFC4Il@BLU2&y5>!SjG_*3uUmNnE z6VW6=@ucVz8887&sRD~RiUoAuC+&~VkyK_ruEbw?7PXPGBTtGZFySBzxP`59#6%#& zuBFP*SBrB#OsAs70-O+d`xZz~gh80Vdrd&L8#V_EmE^Zi=Ld#mBs&NZpwNJ(=tTp$ zm5Ok7Mx7L1N5eeK%u!!bu%du=yq*Pmp$DqBO?Az6JU*W=Z&psDLD`qthx~azXdT<_UHdNJ?3wl|H3ufKm6n7 zzvA%rfuAb>AKd(xtWY3jl+14>pV19E?(ks>T=3VZob=Iu?zyDFT3#OHVc+{KO=OXA zy2afw=jIbd)ya=R5QV2?y8!2mh{*K|H#jBi zbG~(bx23si(udmHx7H27OHbgC_^R*0ZjN)e!PA{zY1h{l&P3gO#>{thM2%39SNIWN zh?`uy*BxE!hZ}ggR%4dl>)-rUX|h9oMwYsg1{vxM-wo4Qgz1}gCkVOxPil*#H26%; zJ-K#_TW*O>%95~+Em7_ZxS$l&vX=%&j6rUE=tZOIUwo(G%P#UF#Vxi3@_|F=RkrF+ zw<7o9^0w3(ZdEH0G5lJE+Vdzpi; z=kMhJXUPmrJ4j|Rcpp3NqJwxLQe5iNef(t<%p4fys=Hh~oa@oz*2hIb0C!S9IGJyZ zhC!J<#35me*6R=8xHO#D&tWWVWkAfT!ikeCFUY(ugeaR&YLT5F(&?D^n=n!YClum6 z${@P0B#hF|IkB-2g0Pq9gt%j%eCcU&qDEEh{8nU{0hsTG1yQtxu z!mvw~J)S3b5X?ogiSN2ma|M1f4QPJ{?Dv1q;o;(mPlg&+dAJG#~Bd_S;uEu6bFieRhK=2y{r zjyf5@O-$c0nQaWumkqX5iDdRxoPJ)D>(Ft8Hze}> zbhO)ds1$v@QNzcm6)aWzzmVJKrtuB0P`O1_mEj-(o0AvcYDDz%5!+UWwHUjxBil1u z>W=KeR()F}akRD*@>ASbhIbOmv7-KU>L3tDNJb}F&?%PG?FLN=_ z9iiEMAfs1SD|`^93<*{JC5Qp6DsKwZCBOv$MnH~PRY=|LfgKXAPmrJ+9I!1|%n*e) zCQ<_b5ku8my4flb&larJ#0*n-G7#2JhIa#)g9s&y3>O?liJGZuS*c@54jK`n^`?}m z+L-63JgUKqTG`GQNTpHoddn`}wS#^|00?MA+dt*%Pj7ApFrb3MZ5*>=8ZFjm~tUK*?!F+z!{;Sa-MQ!Wb`8%+Dr=YrRa@slV_iBH%=;3LzB|No%$o}b>!j& z9UfS-YG|QPR?lvzfAxYFr`Fmtvn)mIf@pXMGf=5CIOo|4QrL!*t4)8yxnkg zhVu@S#L7xnxp--y0}@*cwPq_7W! zAM6YhX7wH{zpVGi0r#AqO&MzWk5lZwbH99QdYV*psO;&h?PV?h!2OCbN#kqV*Zpyd z{kDLoll-?l_@y`|1=_M-8GCcp?{)H_`YpdDWM7VH*6jbKxZnq?B+5&CeA!%`Ja20^ zNLjqN@u~zB^Z9!oeC@AHy}%*;OzljgxjNanp1a)=WR+Z7=)V2Q?COHSYqcMqq;j!# z+_~$%9ZSS>Q|y3Tyq!wNjyYC|9e+&Wf5H@74!37Ae7zS553X4H3*CFQ;gZdVgLP{+ zgOtUjUV<5RqM7EG^}O5}zKD#X2a!eZ1oVc5Z)uKgKfOO;RTm&}-b-3S6D{^L5IUur zo(1)6UmRDv>)U||*?+xGhI$Q{6t;e0+;zL>aPEzF{_M(BTH8`*(eW2OT0?XM*3S1T zWP#Vty$du7mmPfn@Vzl2rtaLygU{}L-RL~^?rJ;IvWm>S4fWzQ9oe=*xaHPxfUL6N z>Fg6qBbHGjRA^fAI}H3^K12Th9FmzF`t;^M7?OECb@lX~jsh4`ufwVR`K`&#m+-E! zzkUq5OBz`Z=ECl~1?^?L9W*{R*i75E$;`8`8JvWzFBitC#PR1D3+|6@dJ@cNf4^+$ zfMK*H6KXN^@YaSb3>wXKbs*JWuG-{6@|K25qWD6xpXP}(1O ztAEi<|HVfNfIW~6wfg@1k@es!<3D~K{72T&KVl5(e9!#eXD2U&{YGiM!csUF@|#TH zzxO)$KRsdp=RUjgw}ibB7h&jW`29M#QwRT4OeZ- zUo;wsHF18yDy72jBuY$ZBrgSezn#TfzabG7=`J#VPm2nn)B>4=s70HZvf9etV%B5) zBe&4yT&5=ceAs$GA#FUr;EKJ6gr;zfZva(r4VUC^8}2i6pJLTF(%m(;KPi(}WtL@{ zs15Z07xmfm3!3I}lTCewRNr%QOxGakt~Q$GC%tl zby=-?=T;6E_jw>bzC5W;(e&gVP-18sH7{`pjs|7U|Dbm)2`o4L!42F?I-#&+U~FH} zt8gK(@O@Qm5FFnz{&3nAQp=fC`F3)?x7qpHRVp_BBSLom5!)s@zB;5#W(VYco4OOE z(fjCC28ML6$MLBh9Tx7n?=w9ltKjxWJ(J5`1Tn#Fl~fM9Pv^hV`{2G{{hM#eF{$2F zS!6GMrbIjDXNc}*E_&}$g?9|>kpMOm=eaa@LFnxa6y@Lh{m3m<**f?8Axo#Wm7>c( zX;4xq>r`tcP4e2OO_--5g=b%~)eZ%o(7%uh!v}I*`f8v`R9w)q;GQ)XSh{HTs6I28 z?m7?vvs_`B;~UX|ylJj(b7|BN&w1n1$r3ugXciEIj7Yc@x1Cc)$+&u{dn5I(*SCT| zR)o5{AqPNu()L(oaf}d(1?C6n8%^jOorCZtN2oM`UXU0{Smfiq0)P}op%5j)G(Xc+ z;UuLZAv1u{77u%X#IVnryvoqE?MyLt&_ z-evDey?00~kexy;0~}&{?TfCpgl~9~W2W7Vo6+VjtB5SQr)?fLQ_2kTt(t4e zXmdcsss+5K@ZNB9sCnn-vL)~EKf#N)d1#6{^3-VA$tW?jM_V--F9IA8;cyg_v)oDT z3?t=~v618{F+f^1dvu#$lQD0&U(ZGvL zt9in><$&$Ti&mZ$2S{dlQY|E_;I&r*stY&A_Dx`26W_ttd<@G{k^%{GDiqihVvYu7 ze&y~3DiPrL;#CXQmD7V}_HjrrWYxOl9z}E@Oy{~NYf-Z~6iRM#K8;)>$0U5swo_R!#` zMIh&jeEQyTDA}$(t#Q~K8dTh*F23)J#BD_1!`!An|bV1Ar1@EXb{FQ3qtHaaE;2NHORZtl4-)vjJzYW`!QMG{bCE3p;Qs z6&qm{XnS`-Mce2aj}FMus18wuz3%t4p?qB!a5diaMUZ4he>n@7Ka&>Q(6Z;ZdoG;Nh^m>m+ z93QcJJ_Hjj3V+0nYX*am=p+~ZeFYuw({H15SFf2Op4r)=i=yNv)a!QQz9YB<@xBcZ zGoOs|HwO6|UyQ)u;#%tC0L*$O85%1DFT-u~m%+!hlU|QQ1qRy_t6-pqP^{XF!z7=B zAvM=jtM~bC^(2dK)^S=)bZs-y>DyGtS*b9)Jv?abCeq8(8CM(T?~IAuw5*Apabr@0 zYtsxPb{Aj9o;|XTvd3(Hfqc;$x4CWTUh%xpQd-2DAwgWd2NpVg$7!dO6IRQY+_C1t z1l?%9O{nTvKeV7X8u=jGh>%d3aa)d?fY0>2m$PZYYd4r?tsd&A{zjKu*LVpkO-B`0FwzikB~d*lH#rlAcci{GKOo>d>JYnz3V#%n))LHE=(I8pEUGl(4gp<(}!IR#N zNb_h@%x$(ATB7oKEszOdzLdouGTlZ~z(_P8{eUup36pV9iceIk0bh~=hVK~HUcDwy zhHmUb7t+>0z^Wc8S|hh@qjO!c`KPmrKRwDnin zDvI14XT|tAy_*Dqra^X4lq4N7 z6wv0zGPmJn5M|mn_a;a`XP=`u=B#FF!lk7$043SNlXCz8jSngc1)QZet?8kwfG3qU zA2axJ5npELyI4E#iFWD`GpVLaDYOG!9tzOA_8K03iRGY{_A(hz)YJND$ zGT~bT|vUv$t z3a$*z?^2LB1iiUxSY*Dh8)tp6fMUjmvQ8&C$^(HBEl3wz1wjV*GPENz^m7aLWh!b! zhN(T2*iHrAkSaqQSixhJZf5e0;`mq@8WdmTdZzh{4}Fj*!P&XV8-S^bR6?t zB~M+7;{L6{0D{3sLi%|mIDz2OnLTuv)@$S(h~U@mhy%tj`#VIj;N zio(>wM5rXFkegK)Gkd>YU~D<74h=Pk)Dm!iuYkRp?0C z*U-fk$g>g!8*?9}bd>095mpmYC_DGbMuK6^PQMPCZHm)clLF4*6I(r!Q< z7F@DlkBy6mZFzQDiJJ(<2;lKhS7k5H98aaYyWL|~IZ7_H6z=F3LsoJw1zm;ui)@4o zF2&ax+Dk5Lq{2eThxijaPU=|Lt+?!lXV|kZDT^Tpky6OlE0$`ej+8npWpAc(ts}XX zK(0Nbw6OL0CM{)RTaeEB9NUktO656Y zY`L)+4V2|#hC|Z^VP2<}8SUy=vh;E0{ynzdOMq&o$-x^&#|G(|;cM}~nAb+e9Qj(a zrh|R^Sp2aFqnN_$@V68W+^GB6(Kj`h4i@;(3;hkZ2DrU`yLjy?Kdpic0U71KR_)z= ztkPiwN6IL;&)C^VAKz0a)4TP{Hq4ZC!`5cgLwf3H;hOnj>y~L4=fX7<3AY05yeNz2 zd&Mz^_Nc*5si#cfwS3haMFG#pewIbwcJ|az|ETlF*_}Ed0#cy)|Nf5KT*$w}ZO$~9 zAyZ&d@(0fb#&Kb^Jz2&|`^?a`dS>I=O3)Y?2Kx!_L$&S4J ze%<~}t;3bSME#c04>Fg|_4{w7sq~hb*umEiZcx(pGuRu9Tk#1abDUwf0mpH-USz4w zfBd?A=FKd^oV@R`D##fge!!RVRL&N8{V9Q3FWjNKyT2Pup!SEs4G*(6ZSX4r$y^b? zcqsl4AahmyH?SO*o#QGX6kZvxgy{uuZcy@93~dTtpi*<7#dpRy3xNo=Jvgco$Dl_> zRrT==V~?zhHTl}wv0R>bcrsW|EODgkE~w0aNEGO&*Px| z=jzS2u=nSW#Enf~?c1os9Z^=Br%if9^VjjZ^qK>oV`!s>;a=V^Q59bFoCy3f-uHC1 zqTfPh0(6o0igl*;LJbS(YKl-<$AsHGN1fZ0ymY2~xrceon#Td;EfXS-w!l-3ZoyPm zFHOu2$Lh*`MteYV!-u|bV-p->EXI40la-#VM~68bIz&>H_ig{FwsZTrD5{hFApPuJxTXKk zujl>$G+piR^P*d-aj)79$)DOY37%P3;@-%IKBc9|zm3~Ke~A@#Lw_}#?$MYxD={x8 z>osWQs;g>3&QepCzb(Yf>S_0UaGWfkq}K<$RuYJ=s` zAAKD6Zoly;*EY3lRd5E9972>QgGu2-9|-4EF6( z#b(9eac21SXKCDyasz=Ls%JRHonOmd7`7+m%YqE_9!|PSm4VOyV(&bonpzt@zmx2c z5JIv;7byWmKrBH}LJ@Z8HVA?UHbPNRECCS^Q9}<(35tT)LQw%lqat7>Gyw$-76dFO z6pO}pxXK|gjQldfi(ap?= z()zNYLCU3tJ61m3*fDmx zqUqA&h{mT|r^m8{#LKj#l}-NYsE-%e$Y}#%)enymX+3d>7=3N`dbK06NFE4Lw z-WxT3R!qD?uV48rG-v!D(SJI|FHALEvH994eSTzm{30M<2;)|XV$~=3__c+0s!v3T zmJ_*9YN3O{s+MCLC-T&q3!TiLw4_B%359Q^zqFbBk*(i~D@%om{0xge5j_&*E0vypH8KYAJx9&Qz z9X}5UQ&zGhI75}!VQLI<_iY&Url>U@bJX)%_VSac;#JFibiOTc>T1n=8Z~*FFJBVm zyt?B>&SYuf+LB!xo_4g}o4g}PE!n+e_1g|<$7ESWbIIO_r*9|1!guB<_wGBT;LI_X8p-Cyx!#z##LX6?`Kb;&?&Ln2v09GwSV)cv3cy4!@DCtja_ZG7ThcDb2csw zemA^0l&Z~gi4L$sJyI!nupa9#)?l@t7MB<6civb-7i=kt?s-9>015%T&{)h}kM>Lch~Yeyu?+Cu!x01YLgHJfv6fzQV2{31L@Y znSt-{;|tbWMrJtYaq@7R+!eTUgMDxmibr*qLQap;Yz@_5GQXp9ANg9mOaCb%ts$KVsqv_YteAsK@qy zT<-g=Ib^aSYj88m+Li$sPz8B%4|?XADl|*#O1JlEbT206$F3qBK)tyCL2-tn#O}Nq zgFSKF<>!>Hwl_Stmm3DAmyhOfc9fkG@ezyRgsV9)9pas!T?}1S}Stz z8Wd;GC2T`B@H8Bca;1`2fUG*+`7(^n3z9bkx7jmbP$O48e=6#Y5^3!EHX;%g;{Yvq z=Q36JGkFQCgu+DH6+G<0Kn8$7+Wj9Y5=2{dx!j-sZTikdI|`K9C)e<LLZnB`d(M}N8N$*W{!tB z005Wkj5CsetIX{1hwq}OX&n42So4-S96ukr8LQ}_3GMIOQg;zJX0h2{la-9by;|@Z z3(sPrvVW#1dMc*U z0~s{|;uGcVVwNGbi+Zk;uOTg|==cP);G(|X?RSRi=9faf@1J1q66hKMGxN3LCU60euOLll%CM! zV4RsB$&}DA@u_XmQhKgK&&Pgh?S5AVKRpYXp4bB326lR zD&mn0pqL&$NqH)?UlNNXF7}H7JEZ_u0clV|8m7er!@zzjc}PGSXUlRkNQ1~{DTn0m zL>gq0o5gYC24rGF&21wtktP8gHaAp_WGn`@NJv9e(gd5B@dbqy zgbV`YK{2U?d9Xy7AmPbx$8WLjsQQaW-54NkLzELmc9}46x-;9H5Iv#Iw=; z0t;gi$w$qc=nhI~cnTYB>>_(t44`Plr3It{01(i~ZsIr=Mczj(c)SnqLML+|ga|}- zA6<3d^NH`J3paEvdD7h%W6c%qCi2oq$G^+t$n z&ID>WB+@V4D|x>x{tBsxk!C5pv2PdGC=B<+i1VZFM$f|H<8kUOh;hiS& zE+849C?vD=4K)I#r^1N#Dtqnr;S>iTQ$%tpLuRhYu0r2)2ea&xEc#iY5fItnjC?06 zx|~XO6koV$PUx2qpZ1;;?UZX~0;)N=b2kF!5>hb-7=j5o;xpt<61{4HE(6L`q4{iF zEUtjM3+G@)FC_WN^|O*kMKZE%^q}NiP=frLY|=%PiYq|oh~p@smWBvV zqmz%khAu?PVvV-Tv2hY{WFK4Z&aq3hX_PEbVvsC9%#rWp$k&RjTG+wVb|NG}4bccP z2rm=5Wvk!hK9P#<-P5gNdCy04u__Q9%vfMKsLs;Q@D( zFKe^TVwvDjGT;Vh-n0i=58}|UOPh#@hkz^z$-g%T#ZlXT=knr#C@z5e1$Pxv|V3US$LW# zT)b(AYl*^`kU>56=jx>A%4v95_KOJbCWv{WCO-(vsCE!Q3D&wWwqG4Fwg`2IF3J?_ z=YCwd_ilatn7yY$CQ4*A8^@C(c|6JaK@kBZ!A=NR zc6{7^nAC}=vmv?X2;@{4@Zcox9Uxt#m@7e~J`pySN^W5STqZdaDeN=JS-b!|jqnxO za8^O79+KNb!e^L2*buCa?Uh-dHD4=XHUuTabF~IZg6$5I4(Pi zoC9Zn%~38q{cv6`wCB_k&uQ|9sl~-_DSD%*N`O2+`zl2Yb_pNXvoWVDEN!?XpF`;2 z2gif@0rQnHh$dIbJl{@3diJ%-(_n~{h@90Yv=J?S%}x`qyv1!;rw&rx6p=Db?x7Ko zN$UcD>?n%O<~9xq(EG)tp`y!iJhmH5;EM5iO_}e^uil$Osy&EKG|cE1pE9kG@hhs4 zXP|EFh}~~R47>wTL^6wAVzMjH5fajKL0s%K*%2b`o+0sR#lD*`^@8KiscBKP_z_rl z*O{yyam|ph2niLoyWwCrHNUjr(h87 zov91exkJeZmKca{91UMA>rzaif^LFX-FQxEqb>Tj1Sb<1q5o8YSG|+ry472PPL56FKXTs$q{DA9O8FPkC zV?w}-lo#$!gkkXuYB<{Y2YH3OuiZm**fm^WB)E|1BV~hIScjkx|BWR9OX^>=axd-w zf^uBz;z+|9_noCfF3h3qM8a!FW+H}yYKOhf8K;`{LX?RYax3h>0tB4oae!c>7J6z66iqQMSy|#^ z*^#lfxJcfL!#Y95SyFiHISv^&$DkYqYm>xwWp{_VI<@xsXpkmRSJxpB18e zpXdD!pk5_!B8ZbI`6aJT1|l(WY#cj8T3T-4W);yQ2 zvrhT}D6JFf#U8=&r$3(Qqk5)0vGSy^pA~JW)8j3eYXPX9EYkoNl(_bGB{h)q!qHl#4gcq(k%$EK zZ;M7=@R0ctrRC9=S3i$!N4v0fdSI7(k#7IB`n|6S*iP#GHF0gXA?xz*@h&6!KYl*f zxQsr!4vpS&_L*JrT(@<%pB-1a1mDFMB;RYf8rB3y;T;&1_M@fxLzwoT=@>!MKKCmk z>B&da{|-V6@maO5HzY6M#)2_h>AK$O{g<9lOM+NspdFD&0yoUpz>g5eh%qz9vD#eV z?y;}%CfPvLvKRJW)#%5R_tFyVBCBEfz=FJeCQPUEQNAz!mf`eI^lJ>T614`i_+R&C z{xJiRH^;Sj^S@+3-dj_I(CdFuQ+{6_cHZut^H*=C_;t$a+e=FtI{(<&UR`$0t;9z= zw6B9#(q2A$c&siTW|nT6YsqCw;@-w6X|(;}%_vH1p5FG$(o@1O=1Q`v2}=!>W^>g@ z|8S}4)azZmINvgDLyaPXy{tD@zJ}4z6e{NO;WR?i&mB?bBX_iCVzqwdsuNAA{Bx(N zNq8)QsZ#N-Me~Ccg5Vu;!a$M|UP+3=-Zh@;dx>3470fId`zq;vdhy17k&wu_Mcdc( zwn}kTrghaD9ETWMuZRv|<8%ic@{{y@7K$D8I87n;fzLa|E}ZBe_KdkX>^`l-zvG=Y zn$=lzsS?Ik4LToZJy@!kt|fO3er7pgX1~d7_`!IZ(@DAKLb7dHBSoXNwjuHxS7>sz{zwf(6P9 zvu3(T8LPyWZs2SpDKQE$MUILjUG6(wHwuZ>O)^T==-g$eO<$UFL@#_`b{*vEOZDr= zEU#E)&10j{D9LH{<;@P%ceaOrwy)piT8nA8D0O)0YSzxOCz42F_G-(KCQ~j8=g6*I zQV^&&gd85bX)dWVHCa~3V_YOVv{?Z_M{GXP5!IsM0aW&sDF`nhy=RPd1SHm3UW4Ze z&q<>tAPzqhw>B7Gq7N9>heb+e#|QV@O$H8LzH_zg_G67PuE&Vxey*bH+=xIY#`6BA z+8RtT8)(IfmmcL_om*?;++D1)TuF- zHKigHQX*^p)NQq-A3f3NH3(0cs}yW9%TuNXsoTRMD)SdlS#~@|J#y!-T;#*C-=#)T zGk!K#t#v7WR$uh&tDen8l6;(o44Ak?_77^6#ns)m~SvR45cp=A440oxv$u`zHvFi_rOx^SHQ@AEwY@@$xE8Q;Pp~; zEp+TMqF4V47*Ny?>1IcN^FP~IjDdH+XGf}{0lU&2KM$s2eVnfB8M1_QUij1srie>=s zO-jAuJ;76uE>`U{%BK+R=g9-L}g~98I-MogdbKvSqdCNCXHcl?mHbj;Tbqqdp2P1;Ny_b#5Izv7OEHek( zK(@d-ZDkoh3(>Ee?PSB;Qgl~{W0b#z9pY7n)uA#L^=vY-dfTP`)kp)uR4=(z6PGz& zAxho5Zn4@4g$q2Todp-e%AdZ!8^)`6FMlKQH@o+vIbR>JS{6sWdHUfv2m(6aKr9ZB z0t;z~NBRxD_@8xm@`OZHQ?@YZX8ynI?)?5hebmIyh-*4a|Jw&D;pKKlzQbQ1sH>C* z#M;=iF1XNhPwxD=ZeP}U48QGV@gJOF)o{J+)vRS3rHgB4yF0&Caxk`^O?gi$nK+9+ z7@&8kqZ!6Wntk*h0 zspX@H|Bve(CF6r*=8>wIzt-)2SL0tv(jCG+i))TcwMRUxetD!3jfYv^zs%k-Zk4l~ z;c)~ZSUJlXmN69n;gQZSMEw8degEKr`pf?}?Bf1+LJ0onyF35k1*)?~>sb6#t; zeR~sQMCnGsQ}8gyUG<4x<1{Q|rul=n@b;8Gxh0)^ykdik=Z+j58zycXmupiIeW)62>;#Poxf)9w_epI4cR{+ zM)MC_6_A~z%N1%-mu^s%4D*imN)qPHPo<(g{ExmefEKK}X*1xBhi2r5kFkdps6;THPj`^CJ0 zTv*|d_p;wT(t>#gEB`$n>1xxx8Mbe4udJ?iU;To*EyeQlN8cxg5aR(GD?iW<4}=i? zR?RmaQFJl;iM8K0U7swaBDwW-r?y*~nxd%`#$rjVh_UOlZ=ckI}j zJl&iAr}i9U`f4rOv8~}?F6Td8fSNsod1&XqKZNDCH}GbIDv;j4Is>S#sP~|zhU^dHvD-A{~1()TExE}!d~m= z_rRTen(&uO(3U5TohK6>^gCh%t3~l%n|J!x%J})P4qGoxmlfa5<<9Z}j?r7t8X}n4}cAn59v~3H@$wpni zeC)+J^V17NCdH4X3trGL{U%JaTfH6bApT}?u&=sOMBWV<<;lVSKLfa7@3)WHJFb80 zJr^p+fd;S(S>b z&Jf~1*=jVMK~3&?LOwl>2?5YE=2@TQ1C>BxBY+!Ai7ItCG<9e_dgX}+IRd1~_zh?8 zMSUcjH!p56<$}zLpc8}>e_x>d>mfW_fQo3S(VCVd!NfcWdFa>bs*iGA*5oNj~)0Pz`t=J3Ht3NZrgwMDj$u1sUB{20%Vf|eB#%haoRP@tvJoXmAB`L&bWYzMu zd?1GUu?!5dw`OugC*I}=^{95=MR?u8RTk%$p2GO)TTI5_!GaU(1!3EaN9&&~|Jb5+ zuC{<+n+&Xfm#-Bo_-G3OD?x`{u6RQLJ>AE_G659k+)H#`QIeNZF0Dyv{rTP=VCUp$ zPTOVxWg}>Gea22}XD3;++^;AS^W?Vh<55NuDGFx#L(9ak)qxm{1JyrcKLk>db%MDw z`7&`-jK&w*!|K_x58e=ajE0BtcDX*;INE!ongmD&Ckc|VXsC+8(;3LDxATg=Tv<>| z)i5o=S;Rvz2>V4dIa&s1!y_FM1!zqG4yaqq^c@aj%wSMG&X6q^)exuEWr#*WCNGz_ z+AEH#712B@cx0-W^o1S2s5c}YwC>cDAafPH86@00Kh1nP!&t&dMw{hpRc6~;_TEgy zze7vJ2J>3;J7$|Thhb&Sfj;OJ_N1jV_;_16=#*DHUCjHQ0o z!K1BpwAx?}rnZGNDlPF-DyBQFaAI(Rn!06ypoA3@JPsZi`6v+J0yO2?6$=71NkI(V$gF3y%B9gn)qWg7GY{c#(HxCQwa2C}b^~9^ zx}^#ZGLC?TJ2~QAmr4y(8?;j0{-rKUXUvWZ`5^4v)=9zfig(H|dHZf(!3kM?>_Z*Kk zX;meEOUH9Es0p1w&Y%u?t+iDw?{m+&NaY3vy}NJqe-yB2cO3_A?jof^4l24vSGryw zqDIbKw_g%nZ9D9$_jU~jqjrS1tSqC{FPiS`eNb}zl`z**Ly!4P)2SeAb0X?*quKvN z8T^m^5erxfsG=SJ{+Xe;U~>cV%!t?cc{uVP>xn02DRlU6^~AMV6vBy$zx1pZcq;#C zM)a?B#+$^4{;|`bb?MR{&y2O%yZ^E_T=KS@9pgI3d;YIIGtzleQ7ya)oh)0BUp{L_ zxb2!Mr{C;G@~|GeqXnd8jR?uCZ1us}J+s&0Hjz5WPD9~MA<3_UDvaPc6%S+&9e%4z4OySV`6FbnL|&qx93QCbNX_*zjhk_vwLRA z?4Ju3=2o|cHHNw=t$uaxNiS|a17&vggCR&c%R`kdVg=>UT?S9EBx<#v_eDj$Rn(PI zcMc!gSvH!UIW}|a`oVrRu#YjqwYYK6KD_uw!kW!)*24(+oM917Lr>@)7SoE@tl`yDzj?q)bfz)9GPQc0TXqZS#V~ zqe>+|FT*)0jnQ^!mYMs|%eml#v36~`}dY(%x>pe?9+IF5k%DgC7b+p<6 zv+^5xa`-nXxq9l;zvEl4@r#>XEZ;Nh$ueC4nx}F682%f$F7CxPt^(fYH@FsO^gRO~ zEv0SEv*7^Cl|iLZcD1dv_?6Ck>=PFsLZ_{N*5dgaxy<*|&M3mI6Po`0+RyA^(@FC@ z^46WMiNzKs%4d&A|9&&FI4s7E=&K+pKm^=&}E(I{akJM&J%Ls_=k`D8Rz_v4y3V-F72Z)nZex=s9a zA+fQ}+=43CHRT9~t(2nXIdazgur8RVCkAi}S^5+Pz&O*l3Y>U}Dm>09QxUkmNxsFj z=r?dAOh1obn4Xb%FUWE0N*Rv3FxSu`2z=s*G6<|3hEJ)4ch)DR&bi z`2q4(Jwn{GGtKyDKQ(i%5O)M&sNr?hw-~}IiS8hX@0!yb>13AU4qn=-tI<+XV`IdQ z^Wy5M?`Q4;&(VmXP`7MV^h-39O90L6F76Rv2NI_*?I(S~C<&`f$_U4-&}DKpLdIq} ze>pfvPKnRyXEqvn=B)xz{I(=L0{m(&F>lk$9%G9i9d8=#;?YrDC%R#bW$tBrtz*gq zwyomDPo=8@3(f8nSr5enNueyi+G)VD_GT?tfe(Cd(nrgfq+hQ-p#X5|Z1yrZR!w?x zXo2H{BPJ=np6iv7Dy_)?3UBVHr&YjxY~9M?s3T7jK2rAP$X+2su<`&GHj7gA-MEeq zJW|FxQee)}F0pE5kTZRADp#=4gWz>a*mEPlMAzcr+VMr&>U{d(AZbfQig8e}Q8Lk${8b9@Og zkQ4_c(ZLq{Qne>^U+LBl@k?R!gHp7 z$VgRp;LD5zCB6791B)hBYYtLJlHQ2Y4hZvy2M{mH>82#{q>e#lHqJuCNMEhQ!PIb_ zmv@B_--H0DyKHBQhG^RcQjI3#%5Ss}E$sW{{`$$!WvmlX+m84a%R$N&4#+Zg_+~e1 z*(|}FPx6P)ox{^_Lxy`tfp_S+HFTLd;?7a8+Wa}q!pAo?gzv)Nb&dF@InPa5yej)Q zPM2BI#93WhLO4}dy}rUhYDT<~w&(*@vz{MmDHu&xF4cQ!nK*6h3Ul1~7N~Nb^9K3( zo5QG6KV&7~7!{8&)m1hCNLsE&3^+nt&wU}gN=Q*Wz|Il1;*QL?0aQ*sR2o6VO2Bbe zWH1zq5(cEq=j;~%{k6M%B%~ArJc7xP0|?g*)=K$-<5vQL1wnvhwrxEIZM%QRpy*1} z| z&N-6%Bu6^0b0+jK$&A)DASeE-JxY!pjNi85w#)H8WwM=`QLTTN<@)VdSayUx-#&Ef zbC@{tHQwtB>B8mhsp3OjLR6oKI4HuRIZ3jDwDO2hq?c=iBfm7KSexNZ%#WJYCsMNH z`$Pn;ByD(m2sR-ksv$|6HUFy~xAp)J&*VOyhnLWTBf#S*zGEAdJW3_C3o=r9z`C{! z`vdbiA3>z+?esOlSQW*IP3^!Zm*CwGsR3zJ^4v4>ZET#AGzrpG4X1>ovtN^18+L6x z6&LP!a_$`YSQ@!G$cW5CRc?#-(kIs@hHjye5q%FOBDP4}x6eO?VuGU_{3B-)Rz!$+ z6J7?BbT=>^Ir5j(fGn0Xy3LcslS8;QI}$M zMtm4RZE(97j{JxO#KHumMSboKX^FFp z3y$0=70jbB6Ap&SDN<17vg8q1_H?5?l2IIfL2Bcl@8UVv!#2)4BBMcXJD_j`B5APE zN87pmd~QTMDTlMDUPJ^B;1UVsO34z0x6`p>O~v z+$3A>JQvd#$|Hn?`>+pVaE3@Zy3Z41%oZ5aiXiW8joZT9MC&n~fH4gVoP`W%+2cD> zrJf)(mm<4qOY4Pp*At#*g^EHX?qlYRg?E zoqJbquW{IPJc5vOJ_YaQxMwFE;?p!69N8V<6zrSTo z-ZsT$u=CjyD=A}T)o*o5uFn74uW1Mf1%&9qe;lTfw}Af{YUj_7-+2|RS&X;#s+q4k zSKN*%9r=BvFy1PC!Il9NyJ{l#lgasCAHN$St{Y}6%^IDlA7qg*P5Y^3&x$V*_LZ!T z_#NZDxGfA^DVw<~V%r-8 z-raMzbZ72<*Q?=%fd^_t^v$!Vo#pkBQleq$w)@nwqY;37X4XB#=#&n~XATHi8+%T_ zWgtc8sJh-gC2xKRzSVlgxu$zc_g#B9)87Wl9@Ur{Wo^Z##j z{y&AyDQ1|U*D_XgbNnRmEW}Qux*x)$A7TxIA7cuZ(!qts^JBQ_8lAa z7A}%ehAcATx7b9;ga-ewHTF-}zYjn7I{bGePy4qA$FBUSn~4uNGWf&C-{N2V@%da$6=6u$=UJKm3?=oPg}TE?mT09C)y0{cvLFgYkAT6 zvN>-CC4v=X|7{dsPn0i84^aENUvRVq8#bib%OpZr;X6Mk491RK(Ppo8(J~zRrEv7gh%86y0Bx*^g6Ef@@(>6@}$>guMj9V|iv6sUYCo39jIz07a z*HLa_ofSdsjb;{T*Ojq~dfK!w(or|?)LN)@3&FAB;eC@#`}lytu160B%m?Lhb98wb z<=2n1V+XiZ4ykL;FJ>fAE|^)G9GOmyNYgzz5^VF)m4GK5iPZHpOHiL|^Z3nHGelCg zh2N;JuoBbp=>=oX%B&Z0xV4_{tCeD3AqAbkL`42$V&gygDJlY0Hbcw&u0w8{`cLjX z-aUc&YpyZQ204m@id!1V+*`z_cYmbU>g_M9qZu7{T6oC;fIrQF*w@oFcQLDnkU z`~19$)k<^e=)*x>UXs9Zw-|%6piHf=Gaj7dW{Xd)wp-X*nD2_wHS1Jlv^eBtUUs5tQzgDFCFWLaw99lHl7MQhXvOVZZ$z;dw@}d7V^-M9+ zyUAvLn#Tr>q@jwn%hv2wvX*Jteb6bcCbK}L1Oa{^wAlqmagM7aX|gL8#W#* zur1xwb~Ne9w9kv`Zi{GArK;~I@g;(4j!g-Z*HaVsDy&V_^f&9(FWmMW7SHi~exkp5 z$5BOW@r~gXA5R^>_~To5Wi1M8+@SZIpmffaKHaxt-ciOoz0xD{U)uKD&*bJ`?9U7= zb9}=(}QB7OHixRorGyd`v7Xr?P zRiKjOD-ArY3uev!g)s(6ITTp_E*q=XW@ks4i_jAiJG&NSMMAU}sg_A9!sYfUc zX#EBW=paV8rG{u-^%kt;Fp6Ra_-dykjoCgwQm~|PT76|QL*sc0_1)&l7l!yl#BPu> zC6xae5*r}0*P7rymWM=%5wG;L9A9Bj9ilYs6O0nb2W?zN@9k*omUHCSqhB*}kG`X7 ze1R_0FVM*3?Jq*@9-u5gqtb)kRUL?}aGvWNlI{eF)xHGPX{JTuKvupMA8@ehios~G z`?T#Mah5-Cq7kE4v!5?QKemt_Q!K64;&YQsW2VZ-9R>JumwNPE9bi2|B#7lCtk|2u z*)1>y6wYI?tvAz-LR8f+-u9?GlXTs_Dy?D{00)UVVLgiR!DN^vTR_axjRtjN-AQ2pB70+#p56;$ZxqRr%;aXSFmNH5A>YGkVJ9T z;u!f5Vaw$muL5OW^l=ZA>Hyk7I7>wvfj4FszXeq3xf;4TP^mmc)9$6sqm?W3*2+-FADJJo8C>B2;l6!o@WMxo`>Lmqb)B%`yqi>woK-}XJj zXbB8^)i;_X9sQ6CI0iXc_r|0xw-$r4FlgNoV~3;qBkpRwWv6?*E*JBtJ~C;6*=lBG z%aTlIQHh&f(FRbZO4gTJZ+#CwrK5tnwqP^Eeo)0 zYGQY>$kI`Ih|S^*dkqc;^ptNT`1pSB$?$r0eo63Bjg|6YPw!8(1jcP`-RdTIkn^9< z4OJl*Dg5U}`lFlrZx~LtFu^pQH1znN3jzbP<4tby;L*PfuM^C6j`_wPmCO8N;KtK5 z+HeZ{$3=QP<3lCrD&MO2v~1SU`!hEbIX>|IX~gvlM>l<+y+|eQxWBnb-=^$fXA8Gu z->jT12z*eM!WSiL?#&u{9#^;iKHl^feCYo!Lr>?(9J6Je38_BZ&cygmVP{gp9ZVPZ zV3Eh1WW(y$E)3nveOUqRUw%4P4DhiOAJf^EK%V6uGg{qqoGTfMZ7(Pmv& zvdXrktvUR|4%ONij8cl12nV8mD&Tml_hw*r62QBTM?H?1JI8*_Ki1i)`!hWeW5IKz zS2{pSXgNByXpIt;dZ>5Kfe?q^n0L3jgA}COi>4-Qt?2Fy<(DUFI0&4q`>rvnepBjW ztTC(E{iSO1EV=Z@Z^4J2b^nIW@csFf7y?%j_-1kP`qz)&p!d8rcZgB^!u6Q^Eh3u1 z_f=H*f%&MTeX-%Kxp-`zE>Brk7(w{P!Fal-2WZq#C{yr|lpy9b6w%Q7yBp*+ zbCYiAaDYAm7EpmI@yLRPSyQKFo|=3Vr#b=|{vE}4ag>XB2}`5>RbrAH=8=XYyNhE6 zA8pqmE#%9OXWOHl>`_3Uj+Ozy0YJMLiy$<`Jo1TpuZU~ZR7Kh3^BdKWAwzR#$p=r> z{{SeB%Y$MS+=M!I8h>y=1)D%GTH~r+FJvPkt99JicJa{w6vy56ZJ2{U;9yTq%SK}j z+2E?6E>eys9@mP{?5Tjt5dWjT5emQYXFx_(bdQ%VJI*P{pLBXNGWN?}gMJVa+pU`f zU9Qip*Fn}#E2>~!>9Kucwi`JH!E74T- zVdOGG43$+7hlto3^u7?{2P57YG{n*C3W>v(qWKbhl7{RqW_)oh;3L{;InY8NIEd9G z5YBQS2G4{@=ew$L`kz}}o0Vb3A(0kZrUh|B0yKxH^|E_KDa!#qew+=W1RLTbt*cae zsDNS5CK?2$n+Z5Hb(6pf zhZXmjweiy3*tyMyd7nffa=3G1us0ruL$j0oykDN~S9`1ujgofLg}=i{p|&bSvrSS) z(xxFPaGISK!tkMeKzvD!IE@mzF}7-bIk{As266^FY@is1Fdr|aS9P()_;oy8tsDqm zzBeQmx%rhlG@X_QIwwX(`w3dIcW-k8S|WIP6z9T{Jw{N=%mpfnu$^t)N<}cU_DOCo zxRv_mqIYAdX!k%J9E5N#XSx*5XmIzmpwh!%>8U#!3N^%|U{fwe1uc|`kxyY=iS|>< zr^;BTae%Kr84CTJgsA%U>64GXVz4xmF}mRWYu!Y&qj1n9M<;zXeoCrp&Z#Fsn>hi2 zHAaZHr%r(6v*)v_k!;c;7b~(ukeZGhjhd#xChn#u5Ti((?1u&xq^{*$MrX;sxEJ`) z(g3Y&_g+h<(e&`P>hLJ)7#z78*e(2~N$20z>(wFobGtR24LRTEuTIw6XR$e|A}P!5 zlPiO^9G(9SKm#Ga%_PowfHH)w&?%{#ud(ig{Y;>1z?@|b`EvIjJE7<8X+LW1_gE@S zltI0*#@usl@8}JWyU*+jeVZQNLYF~}#ix0JfU~o(O0s|{ER#mN&*>9`Jj6grlHhdT3q6q8T1H_vXd2-6-(BR4 z(s<`b>cKnD9TwK_E(jn+(PJ%qH-VcPy74mFE>;7Hn8h{UzdsbrRY{yAVmIzRpTB9`Lqb`jIT_lSru(;N)b+S#qsrWcQy z9GTL+-x4B=p>vdmpbD+lT#t41OLNLuC~5!7j8rh1p_%{1`czWPoF|F)JwYY>jt`Hm zg+0|;<&yU}FJZo)7OJxHqhGEL$2=7C$!pWr_2BJKRt=NjCzS{p3P+}DEx9d}-wAy$ zN2^z$ryX51_%Rt1WM>{Wv=&mBBAe6H857_#j$Y-L7<5IfvB2icC%SGwDh$B1SaT^#u(yp;M*I&PrKZ$8r_|P!g1V#R;Z>Nx@bhHZ zImLTI+w>TNuNPg8vs_&P_!O);Js#&5U_+Qll-Eg`*J7dSmn7$Bu|1NmT52BVopgg@ zUI3eqM$lhpEZ^a6`Nq%FAD^EnqHv%7E%TBC@BuB9=yyHr*W-{8EAvO%>+eJ5h9L}8 z8ud@Myt3&$r3p7TyHn^5{6DBXa|itHLNRu+H0-gbD-n8SzKeYLO)jnI=V9Gx#nv&W$I zS@SHF=l^=P{QpF+ynAZTisI6@XI+--pE_sPaaS439`1uxYm^EM_Fx;g@wCphxWINc zvZaM*p}V!n660%uXd6vtfN8y{!~?Y3(`f3Pf_KEY1MYW5$(Lo9dE{TkysmkT@M*<{ z?e^Dfu*Vp&59x(&lE7_h4iC!;UkI?u_w5AUB^f-{!Mbe5_MVj2-WQFw3#$U;LRdRJ zlOJ!?vnv`}Ug%Ml(W_GMxomLwzT~m-$b*U2;E{(@y;nyb%}frCR3ago+3NXjNfrKI zXUp&2czrZ<6BE>}oa<$GV&M3~f(>&DTpys@#%|32Uc5;}9a*kE{AAQg*sf~hjvmoO zr$!Avd{!N>DD!WPoPQ!Lu0#8xzWqf4Zu(cghlq$M>;eVUrfU8EUw-8=B}0uS;aI`b ztl7cOB7swC?85zJ1-}B0))q`}9k&n1j%tMUf=^0!GE!ahk1!2OogPM+^A^0{2^Flq zp=irlWZ~80ja$&XYvFh0&l}5dZfscS)pzWw^U1v{WWV}8Ifgf!s8iEf)L8A-D5j;f z#9(SoHRpyAeL8U_D@@`v{x)0UFjj%^t}P_a{dcVgiblo9a7Y1@e9glQ5sZ%!$IirG_2L1URr7AS`=;D;+IF=244 z1(MuuiLdctp%=k^Nm!*RGRmQ!B1EUOsK&&{e*35syGY9gmLI3a0#TZaA#YTB2!uk( zMkN3!r3V-SN&}hs(GRXIq@unjPm$3|(=OB<+HGTUmV=pZ+A~43$W8Iq1bk1D93EP0 z2I_KZmQd_q}D6BR36q&^nhPEgiG&w6(zu+ zdla5|4BYyjaOLe0iK{V||8>XK365RoI+py$N1%&3NO5bbvD zb{c(@YR3RGMYl;760yhV=OT3*M6~dum@|QL(jB`!vg;u<7SZCJJiC^udV-t}Cef4q+^Z zz=nss1bIk{A(|>nC4uJo8@>FsCMSq>1&$oQ(q z^IP>5_LG*9(k>y6MARe2r#mlBpRYa5U29r19-+#2k$?FNXQ_--D4I)AFlC&sE7Tb8 za|d+C@o3JXzBLDN)^3pC5(X%(9zr<<&8Ghj(^bbg|G%qbh$ps|E<>)~loG z1OOb9Ax+h*U*Ig!5ZXgn8o3-~9CI(wQ^)tkE}%%rsC#1>8DV_c`?1v#fmM_?!|i&y zk~H72rE~k)LUp9#49?_hcI6PTv1+%lGrF3oK@P^fe#_fRbkIYb1oQi^pN3gu(l%IY zqW3bSY2FuDq~j~w3}H%Y_&F-NWfr*^690K2!sO&$3h#}4(2=cpejsp1=+E*0(cPKH zL;df4|2vymFbj%g8Dy7+EJKzV+o;5Zl%kY1R3oybnn8?xL|LN7k|arKk+!jfM21vI zHI|Bmu~b@q=Kj`ixz0J)@0{zL`#O*NzOVc5JbFC*^Zj_g-mmxbnP#=2+d31!N@c3~ zy04LM3?v9{k*T`g4i1*a1Weeg&v2wqnPPu?>->Jfm5tm7xhS{*3Ga7|_Im>}96E*H z?`fZ8r>4|RbXb?G(P`q|ldt1{!O*3jr%zX1`8u@4dMG=yXpe35K0&~;Mo-`t1ElIP z$JV?8<3@|i6=24wZCJknYhhgF_8@TLR1W9S*hsV+ggND4kQFKcQxrS$ban4+ro7}D zXg6C-LDH^gkRDib=x#_uCZWsboL_7VdV;vQ!hS>hsMwFcz{SSfmx=HIk}3}9^%5YE zAr&QiN2V`K&?U{ARg|{l6_WamAaUW$1U3>Y1y7A~1*@!FpH#6^$MH}iK$(JC4Eoqq z%YVEvtEIB__3)P3nlb{GK|~B!Zn{elD9`W{Axqf+oSPs6$#F~vB z6zu~zp=p{!rAqJp176;Vy%I@|2l_jkdd9|fySg7y&Gl+8E=PpAFVG>Hx$3bQje;@z@mKrQcJ zCuAEhVc$&RsDS{Ib!Z5(%P)v#QA^156-;8M1fR8B(@{~v9m|fF+pT-mxYNZTTxOOV z(wOiibIVacapWsfdu$dvsXpjpBMPD|moM>*&~QEk4&+rKBxJ27aeU0_4S+ga}+E4Fm-QuV_eM#?yipAS*pAwkZ@a-sWnRs3?)a;c? z*5)BLIB|*%G#Q-a-uE3(>u56XC478Q?<Qnwzg?;FlpqKilCBnfzMsrhsRPzO@4I{I+O_Ymj~x5%^^!PuCY6U#6=jtP z(&p6nD+M0CEMKyC@zeEH_vS$G9Xpl-^A<~GN{>-HRxWD75>(40h5oZ&)C6M24cYWP zXsHZ4rf5EJHv}aQVe8`OWE<= zRo}!qI;Q>0yXY7GEWE4S66mp|C=$a~+y@ znFtg8RTBX9SCh{yF*#N4PoP_W9*b*spv2?;h{bih2j@$hC7^4}SEEi65f$5ha^JyN z+#6kPN95@B*9E0{Z2B3CD_OhWSw(^l-H!HmH6*M5WEzMoIm!9oZUWC~UwNvPC9n>P zE_pk}2s1jsJC}Rt_pi+#iatpdHT>|AR=VAnY%ti-2(S7Xi+}2sCcsc}pAS^Q>TVq;B5c^cKt!(32=Ob$^PCvgG^7`@J+b{IwO4LMNRQzY_xkGm^?&`kx z%Z}TKl-p-_oiO@;n`v;X+#N~~%Gt~fcouFu>iXQl-KsFT#QPH>RX#+_Tb*u!m<=c^ zN%fE?91xGg%b4@L?Xm$_r*Ley?uc|)= zm_EMSCohwG#OBqp1501V&tG2p`r+ox(zkyBx|Pm_YdSV!whMo9b4=SCv47u3`X}eK zwd6P7hKoeceDM8f@SL`2{S}}@Cy}V1a`XgJW-7ugflR|U6znOIdvlS6npF1e+jCA| z&B1K_jIx)V%ZiCHg)^6W-1%nIJFogH+Kn`}kD*l++r};@jE7}0CCZ7&_3bwDa~!r) zxvK2Lc88zb_kWUU@P0Z=X5xu-%$~Ck9j^o_Ffe-dCL8rP_^l4K+weufU}GU7oVY)I zg||+fW!i^1C$?&8{SripWfAVjlykf@R#$yIu|v1f>unjz{u(u6wQQLu2C9{vh5`h# zm2QRRV)2hJ5UvoCk{ z_A#%wVjpv9)0Gvc9m?&EYSFo4p;D0N3W?-p{wod;i5&e1cZFlw$lFtWsS_#lyZuTye+oDjW+(zwMa&!JcW+ z;`s>eL2*dj_itPf)o2B`&{)!A9+9Hof%eo}DVt6}akQ?l9zxv{@V z{!{J5Bi%io*G^4oM_$p4f9a{BTUH*Qbd}V>^f!1kFz-(+M+^i8ZZXoTSzew}w&~L7 zQ58J3^3(OZWlY-ckQ^rDzl~;q0fzuLnCze4O#XF>)_$domV&kV1K{}QZ^>^Aw$#mk z+)VyR(M~s|{qDAY+3|EFx#lOp5v+~=hnv>-xq2t?!)*<&SeKoi@+tcb;7HnN`TL5M zg~doX2~}268?x5^V6cU|Vxcc$ls5feUa<}*{&a}Q1^n)&B_{n1AE&=}D{S|t?ydR$ zKq1BOapkDBSp&FY6@kww#}X4UYK=&YSjBNNF~h4P_wL~;jx!z zQo+b6SjJp8L2|YB+d^Uh!-jeS?N>m_yff-p0PUIYRuBDz9$AYyLIj{FRNr&!eu)T8 zYVg;I@uWEkGrX$B(kSnAp!}+_vZ9@G6ENNSwRkHug2AsSu8`kdihL74VO`oJda~hM z)iZOIDiYS{IG(I}ZLjR9L7nPEQ&#~#yIJG++Gy=wF63IDgVw(5{Q(!JZVW|EPTdqF z=}y;=oeG)0HC_R_Y5#jE+P`_l8a4Itxli)H-c{19SsY0>on9Q}(Dc8I<^D@(hCgbf z?_KGuge|fz%c@$V4y)yQu6kX4rZxC|+^ulunIJNL%K^(A2lZ8tKM9i0KEWW=3mc{9 z+1;UHEAlV=8{ZPXN&I3m(dp2yZtJ*yX}r`0x#i;9WhHvw9xcX^XR}mw4Nh4fbcJe=ty!f0X~g zpvJ%|Sg##P=2x1Y(9DFZxWnHE(S*#349l-fwWRFhmY-%VmZPWgDvRFa+^7Ru;lWMATsgwG8(i01}X^kUm3-_D@ zj}Hv*z*cIqDK(*A6csC6^CE@;#C~Q*P(&lAWoT6v$CD%9M=X3Z;g_X_w+zjs<>{A{ znI3`J=S74UHE3&Y^k8g?D5VwXa*{1PC@j~8oOK=5RLc)xqJ-F2ITpfd5UjAb`&fV` z_mdAqTN0y)jq}Nb#f;moJWfBpJh!CT(+`3dbRW|rLQzD(XxVz!vJX#6<@Pzl&rnhv zP*g_+ezOcvlA<5c=csNuVdJ(z!Tc3FkI8N`AMDB0BZK)(H1fx|Wu|blTw~L?bBsY7#4ckaJa$>)5+G zb2BH^+4$0(VIFjlh^1HV$zmL#sVNA69o*5HG7=BzEYzuczfj6N*`->?#45=6WXdcu zi{TXJF1Byhl-z?r+{o6>027mV#TN53^h*1q;F8R*-qvC}iz!_md0lMpRLGIvkZ(bD zlBxoD7;jXuO-|i)4P@xvTlcCOMg34K3P8D|CSNY&N=OBs9INJDeP%hi>-z1L8@RqQ zhvAY=Y3K06S8tk<#1B*Ug#73VZLLYDa3P9S{sg_*i)*EWKrF!z67f<@ z)P;t`{w3obKN&;F2X8j9v6U|p&l;qG57|N&OU4OY2btG2KLZ&<;Qhn@faCwsEQ*-+ zKkN$q|5v)jjZ=IfvJz^}VzSSjn{GBX*Y2ye&@JMp6<$fwco`m<_)~E*G2dAeM>WBr6|YTrt-2? z_VB5+nhg`h8UHS4#cl^ubY#QZbZrunH0FDv0C@K&vaK_AXw9Z zYB8inRVVo8l#gTZ zGYXp0=W|r4yhR?^jH9E(kO_!&78?vs>g(*u+~Ycp;A&;0(#kqdk=>#6d__jfx;; zvjq#*?gJ8T1SwT0p8AKdbYq`Ur7{$c^Z}B#u6$InpW-{!CkHWUm{nG$x0x@gFunSG zB?*$@^II_hmzi;Wm3 zEdLmk0J9rFxvDayb~@3_Tf)+yAj&36i>LTuw{6%RUj(W?(c)Rp#*xjn@h4?8G@cA+ zu9V@fke7HPy+&oZ=3s&pq@O39N}ptI?uQWo2HYw%Uoyr*1-QwknRl2`8%yS8KQcy< zs~zVZ+ZB-o@F4&8@1L}nT~QjoU@Gu}?`I@R!@0}64~@TLo;-VlgL z<>Y<&14nBCohZ#vJ^9p+jj9<5242mDM~(@vayJ!I5-?jF zS@GQx*2y~Fg>?=Yg^pz!{^0nt9xLO~o^x|V8F^td;b<);S92gOKyr`w?(SETmwmF8 z#%cwxQ&@XvgTD(^W(E=nsxk}f@&dclj-X{fe9H>6N5(%5xU}{1kJKhVbQR9qV9u(^-DPtZg05qi+fhh6_T;JEoprAH@bPM2BUb-uT4ed` zedB%d$Jl)Td`@e!nb5L`6Mx3HCn-bpu_Mpt^%S%_MA_w4*z8A%FAclS76-<#2A^~_ zQhd|xS|@$qFafx*`c%FW zim(&*Ld9h$5U76aHVTA-@so!fT73=3*cmhKn=hg)hk&SB3R5X_6}l|lDD!Pu8i5nm zxn_Z5!;BBM=Z$4%nDHGgyA@y^UIagai&1%dcxPtt)s)wTn?EH|;F;qrWkiF)3aVtT z=M6E0PXP_k68eyzDI~B(YMI0g&Sro(!(p-2j6aZdz}0BjO6oQUmu8^}fS)g!YATv2 zXCl@v&GwqJgDZC$B&*pIq3p|Kvdu%8_{|it6(LMRq-}VRVLQ6toMEe0*ty%i&M#u~ zfz6_8EVj}v3HZfoX5aQcB(pwE1%X(_d8}2eEz>BSYowZ(O91#7a|oQyqblPm9p+d# zA7w7Ap*k%8hOv7Iu6R}Cni7U)?R;fqAnk-q^3^8fxmaVIUS-np=g?-h?YMRGoq4Rq zB<~3Z!fZ)!CeJ0-*}%!%QS7rq$G!X(c0aURJ%ecItvkn+b}=zd|N7W`jBNS9daArG z(|p^ch{}_mtX6ZjV8b1pEXF`6xYqd7pT-go|Q3vQ3^TI+4DS>~-wWVC_&Ymzk) zl0Nqs3DD;r?-)vXO&lT^zsyd6(Le|@o{#uS@IeUWl3=N%EDy3kF`jR`4g{dN0zPu% z3}$OzNqNPxMo1TfHkE6a6vIf8_*icTz{Ar0GJf;|nP_QFn&hBAgu9J)Lex5UPns8k z>B@%!&q!J8^aSdYMEe%06PW?uN5$Tg9U!5@nUQyF6{qD|y|jq%Uo33*iP${sVe+XR zbX5RLM;a*5l|qdYGR5gp8mE7SH%r^L9RXp;o-N0wXz!3^4@ha*$vEXBSt!;u7zL;xse!7{^xz-tGWG zIM_Sd%TS0QF%!5tDi?9y043KVQs7sgwik$($sr5`VTn*?;QEd`8{3UkS`1fT&EA>9 zBWO`mJz9M6C}yclNL#h^o#vCMdW4yL9XKo`z&dj9Sc-}$^TWA`LPWd9jh|cF(G~l2 z_@9<@k-_FZh?#mji>2{p8~S0z$BQsXRbhG%KCMh?%PY0v*~H{~WdwMTyh+RMwh+Wa z-J9`kW8(*rCt?wbH???@8U@U$BCaB{-qt)cWn<_y*vjGBQAsIc=R)p&19AG>9aebe zCSx0=5n>Yg)#k1WbAT|z*MP>$-Oh;Y(-&5GnP>J$#Gq6mYc{u;@A`_v)YlQtUOB%N z1*C1)_bWSB(xQ=&l3~R9sNchGTg&dj>INr6Bh!J;^4ne68*)n(n8%6ez3r``VT%5k zKvq3mr=u|UL^Fy8k*Kw7P?g4)eBZrtWHwP*bj~+;ojqJ-@~fLAzxPx*ib+l)Fya0F zFf6&1v`osH+5$s*K&{QW=X0PynS>90PmXwA%)k{C`0? z25-1fr#!A?`HT)IsZ1%>6+xc{eWOQ{OL+^F7DCb$EKr4jJYF52tHnr&G8#n}a@!Om z%-7r8v%5!C)G>F5FdYXZR*r4kX_90tx9p*>oD0$3iky>iXiPkWM&Xg(8nE>_Wv2#I zo7Yg{k|*_iI-~drY#J3uRZtPhevH;lY0Azt3LMVY|E9R2_C@NM{XLh(s{)J80=G1S z3Lfpm4oknCO;cT%%_Q@i722~iOokfQ%~gIF&~6SaF8e42g%Z@PJr*=Ec6RfDck% zX#_uo?WLkEM&$gah+IEn_2_qXuiFJLta*oO+KOnJ^^J>$K1KB8dF2Gbsw@Rl&Z|3# z1vQdgKE3t3EV?zwx5c`43-54^R!_~|NJ}DnJ;^B*rfoa)R6OTVOH3cFc-*4B)P4lPQAEzPg&{yyRZ*DbF? z0;7f}A5fHy6&^~eFG{N(eG}-f%lw|))4E1_LiFGld>H22JC|eWC&G1Q*KuGJXie~&{rmq&&q_5ua+^=1<^>sgZ1L5x#QW$?J@31bsh!QKia&S*bs|dt0_$&Xb1+4vzk=0wx$D&bfbGquuCL4}5tIBW zRv(xejW^f%5gf05EH0z6E_e9n64Ex+`R8p8be)Bl53(v^g;7YNj`QCd|xRty0nMMWkkKiVRfL zGM_~*G`7|ByN~kIiWzIyJkQNl%FhUl#Pn%Id3S1}?4mt%g6TQt&s!@$AV?fF5qtyZ zjN?HOP!W@d1ueBTr!d~o(;LLP&Ea>YmCUEn-*9K0YB=~}wDt~f$)#JVUX?c{FlVk; z{+okiR!U5Tt0r-wGpAwCi$M!fWt9L557G&QA{wh!)xp9jt6Oaym`qqA=31+@{u(bA z>O}v1cOjG|b*{ERBd%E;VLZCZ1lKsU0|L)1O|`XA*2YoqN=2kMYvan>D1}B}X03A_ z^#JWQlZSIf3|eS6$z41m?SybB3`unoiE@s&M6E-2TTj=oJzqCtH#pzOH#G9^R^Sbg zY&NWSE-&g#b(;UDn7)4m$2{bk?_h9z;QMU5=H>5mou)J2=lTB*`Sia7tp2x5-zYW1 zN0PH_f}yIEdY!4dNA*^-M_F(44`z9+HZa_?=jlYb241r%C{!x1bq=vAqHyPvIA65hJ^lm;xq{B{O7EVo)p zP})qDvHK9F+tbC82fYQQs{P3*@`>cK5wq@tKxlJds2{-rV zo``%qPlr=t#b=2t)v7CALb4{DMg@> zg*q75(-w8*S|^=0x^BpQRQp3dy?4e@=gH}!h-9PUM+z**tGu{{5DWM_o5=05c?9E3 zHpi(y_I+mpv-U$5$A-4}p8Z|Lo1!n1)z;@3(gSUwSisMYiXBvg7{YW;&!^Q1yGq zq~KQWFSa)%2A}`77t~4lDR$dadIh%YZ(a)f zAB${1M}1%*_crf`*ezNvfK#J}ue94l^p&Zb!2-qH_;I3IT^wKl$EPNU7d`7%E zgMQ}ZX5p5V36aU|)*-9AC zu;!lVy+-x3>t_5a_}v_Ydc3-@+d?c`Mdy;v;C4D$Btz&w1MRVWB0YkKhi;n0uPiQ6 z)*EAcGw<2GcWU0ArpVRxOY674dh~jH{nLkiQ;{#8wdmdc-)YzXFju{aN87~J#^8VGwAz42xAyQ!zrv0$9~ zPm10C7AF(S-mA1Q{eGrvqvnTiM|||BAp2w2LE(bw*OT|^Oed^mqEqR7Q@D$AS%*}_ zQ_4|&VH#*W$GuXAZraV!*uOGj9d+l9HA=7H#`CyJC$!NWyEcIizkg&$TdoEiHJb$! zqsZK&%JkunMP~>d8&|5{r>+!J4Dt7*QHf9)$7Qy}aSp1FD2|H5BFR3w0!zj* z5mVVli`XO)(3YT4jYX!at&49TQ(og#A`$4={KL)kx1aZajBM{LU4FCufqv|4nvX=8 zT$^L7Vc~3gkW-ob^Y&IS7tLTONMT0%Im;U|{UfirDv7Sp%uD^5QJtRXEsHBYZ}!Wy zTJhxbnZCAf27xX}i3}KGZF`R1L1M-bedb!u`tDAu>ackR+%`h{%T$2NTU|%Z)2>-X z*G||^l5N5`ht|G{IAA=PlV-V{il<&HCzmreTc{d8oLYRp76)`{96BL+-awpl)~w=; z%$!8!H7RIbORvu9wd^;2y;|OQ$0eDAH*<-GX|vk}?&A&`4zpqAW9v|6*WMMxToTuR zH`Z=A3K3=PZ_rW<(9!U?dh49TV)q8vDy{7|zLZ%Q_GRHSD(y5k^rUWID3$A~bTIAc z$&6hnBT8Oyq;2RuQMgd9wdul!tsT9m8h(7ZHk&T=*_=stAv#5!8OuNh#$F zRJ+1cH2lQYdGE#YN`K3^i5!z2Sc01kvO-}mp4E#1p6H_7_8DF}7FS0^2)7dyIB9B# zNGCuMWw5kmeO~#z)IFOWR0~8GaRtb-d*kSzC*u?9CDKs~0v>&_X>9=Am=gcoi*QW~={lVzV$P3G{dX?SXZVmg| z=@SRd2J>S$1=Ea8Fn_+wR`jp{F;@W|gt3yuq>6-1RMrs4phu?6DP} z^}51c!nB`COT}ie+RA#jK1KH-o3gIAtd%-eR{G@dFQ0c8pPT63!#2d?I-2FNSJ*VC z5vkL1j%5~CQnz-FEWf?@fRuS9J@7ks+N!OUa)*==)j6tk7};V`NP5P+qlO8{Xg9BF zq+UFgt#LEc2IsGZ)T)KXwEH4t_+ec|A8P7RX4<;~9_5D6afgS9kK9M+a+a08$%yk2 zCG5)tzBdhJpUF&wuCPieMoYb6^I{U*YiO3WLvIT%+-k&>%xuEf0!Tr0ymNcgk^(2t zn&wlhP{NaJ=X;!Ba4uwNI9ATQ3$jNHD(OXQKK1DLv(s!@o?EDgixl&xU%Nt=DCTcj33`DclIb=k%3*ig&TRMwlZl2BgI9B2sN6pP zOn>^yzLhMmabK)>`?J%HoA5g}u2gxpw6p$p@zlQ0xlnt#r1IBY&-Sc54c#T@P}ev$ zvsdjK>^|FJ?RM5a!?Nv9-5wph$hwK$h6i3u*z(Rv?>LW;VQRbWWkkYm1}26EwkSkOTWCq89b^C!!X`4YP7(Mxokoo!NipU>S@lHPip) zNY?aqgdK6r)Wuv`O%PQ2Bn1l3kJk{hnn&p|fvu(oAf0DE?VWRemOx zE# zjp*Ex(!m?2oC4^YMSWXKhRtOL0qCGj0%0yJI5FRCjzfN8+ z!+<6*4o8dt_7rp;^9aG`==k}gv3Q6#h*~ zS4%Y-ohyLvD?MCTi|Ag$Nyo+VvXG*% zA7tApqWgeqgopu+l*{%502)cc`GKASDQQ9gK$xhTJP3FuTgb=Sn4RGb9RH;FhJ1;`MdfHTk^ z@p(5HXovuKPm#b1!CgWYl^pYB(MS`(W$?1J60tp`{5(OBOTCy=&&f#BlYDzLnuE>Y zON=nkZGw|sOi9yss2S3Zmp6bL9IWLbDC&n>UIMe154D~F>n$>L``UCfpp#5NIHsv zoCR2sl{>>8(k8j+aGJybFON|GDEOQ?=82Dt79aD`(j*}-*g*D?FkHN(#}^#xtA;As zsm%-nFpD$k$L0Xg@-E)v3dG`Z=g^17zQ8AZiZqF+auQ~OzDbjQ`V%|rq~_W?6Q@Dtg;l%iR_SltAd{*I#Qa|X_oY-UJQF(d|vC^)@X#02^;6vx>CwcM?7<>v=h zlsP~YfWvDQ?8D>u&)p^gj6Ci9s>0mk!2mM_xP@QcF9sac;0}-^Mk{xq zfT{-d0vC40p@}jO-#b9R1mb(4Bnc29jAc~ilPg_g2t9Zl2pFF7v6|nOg)Kkl@#OR` z(NG%iJS;mA#lgOs0K5U*J`UhDaWVtI&9_5l1ckK#?mmFx&^XaLm2ttzH-Jh(3JTZ( zKqo;e&&A*%d;{$#QMZYTF^x)%hrLe$T=1u6Jn_zS7@7{0<8hO&z?KO>{yD2zG-fhI zDL-FWHN!_O>wwgxULhA=c40<(p)nz}^JvD^3_RdTNxxTBiU2zYh!xvYKPhb8vc-eaUj_@exe0!<0HfqDyOAit+`=J4VUX16|Owz9_Fi-CYjzQr; z-Fa4IIrb7Bh&_*i3lVc9;4=9Q z)PWQl39~>#!UZ=ftEyxMG4FwEQIvdatr$`cmq$XziE`>fo=e<&DlyNL90RW3*Fk~g zU>BjpM}Rd!#JYi>Kt@?>+MP?f@o6vWvvhH_bT}8lh2w!-5-x*-=qKXdEZ$laE4c%xBJEW0YbM88NAa=jLcBB^t1*C`25_+gK!$`E z6JqB`7z6`)AHdDvan0Ofgp zLiyiu&n2jWp~^=^mAO};o<7|U?_wV&bib^=Lgtx%`%Vo`gT?JImSMWB;GIkvcf*VA z{^fkcnwQDar{PEUxoYA~Y^sgdj5pzbNq%6vY`24bl+iB~`_+ATxXn(txSbXCvMj%w7<;4z&KvE!b!^jjN_sA1wT+(-Di;m zKoQdm=_yltNAwyn;y>we|1~P>*Ek-$3Hlkw{~uva zW-GhgyYtg?3f=_W{r1zP{U@H&;4Zt Date: Fri, 31 May 2024 10:46:54 +0200 Subject: [PATCH 033/251] docs: Update README.md --- examples/wikivoyage_eu/pubspec.lock | 14 ----- examples/wikivoyage_eu/pubspec_overrides.yaml | 6 +- packages/langchain/README.md | 61 ++++++++++++------- 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index f132728e..c05174d0 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -189,13 +189,6 @@ packages: relative: true source: path version: "0.2.1+1" - langchain_openai: - dependency: "direct overridden" - description: - path: "../../packages/langchain_openai" - relative: true - source: path - version: "0.6.1+1" langchain_tiktoken: dependency: transitive description: @@ -243,13 +236,6 @@ packages: relative: true source: path version: "0.1.0+1" - openai_dart: - dependency: "direct overridden" - description: - path: "../../packages/openai_dart" - relative: true - source: path - version: "0.3.2+1" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml index 8b4fec3e..075ddc4f 100644 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,langchain_ollama,ollama_dart +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -8,9 +8,5 @@ dependency_overrides: path: ../../packages/langchain_core langchain_ollama: path: ../../packages/langchain_ollama - langchain_openai: - path: ../../packages/langchain_openai ollama_dart: path: ../../packages/ollama_dart - openai_dart: - path: ../../packages/openai_dart diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 561f7d7d..b86c0eae 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -10,9 +10,9 @@ Build LLM-powered Dart/Flutter applications. ## What is LangChain.dart? -LangChain.dart is a Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). +LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). -LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, extraction, etc.). +LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, translation, extraction, recsys, etc.). The components can be grouped into a few core modules: @@ -22,7 +22,7 @@ The components can be grouped into a few core modules: - 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). - 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. -The different components can be composed together using the LangChain Expression Language (LCEL). +The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). ## Motivation @@ -37,15 +37,32 @@ LangChain.dart aims to fill this gap by abstracting the intricacies of working w ## Packages LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: -- [`langchain_core`](https://pub.dev/packages/langchain_core): contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. - > Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. -- [`langchain`](https://pub.dev/packages/langchain): contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - > Depend on this package to build LLM applications with LangChain.dart. - > This package exposes `langchain_core` so you don't need to depend on it explicitly. -- [`langchain_community`](https://pub.dev/packages/langchain_community): contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. - > Depend on this package if you want to use any of the integrations or components it provides. -- Integration-specific packages (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), etc.): popular third-party integrations are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. - > Depend on an integration-specific package if you want to use the specific integration. + +### [`langchain_core`](https://pub.dev/packages/langchain_core) + +Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + +> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with LangChain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration.

    @@ -81,22 +98,24 @@ Functionality provided by each integration package: The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: -| Package | Version | Description | -|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|----------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | Anthropic (Claude API) client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | ## Getting started -To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_openai` or `langchain_google`): +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): ```yaml dependencies: langchain: {version} + langchain_community: {version} langchain_openai: {version} langchain_google: {version} ... From 7971ec46a82e5346e4e0f8ee82ea23f584615edd Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 1 Jun 2024 00:16:24 +0200 Subject: [PATCH 034/251] chore(release): publish packages - langchain@0.7.2 - langchain_chroma@0.2.0+5 - langchain_community@0.2.1 - langchain_core@0.3.2 - langchain_firebase@0.1.0+2 - langchain_google@0.5.1 - langchain_mistralai@0.2.1 - langchain_ollama@0.2.2 - langchain_openai@0.6.2 - langchain_pinecone@0.1.0+5 - langchain_supabase@0.1.0+5 - anthropic_sdk_dart@0.0.1 - chromadb@0.2.0+1 - googleai_dart@0.1.0+1 - mistralai_dart@0.0.3+2 - ollama_dart@0.1.1 - openai_dart@0.3.3 - vertex_ai@0.1.0+1 --- CHANGELOG.md | 84 +++++++++++++++---- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.yaml | 14 ++-- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.yaml | 10 +-- .../pubspec.yaml | 2 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- melos.yaml | 2 +- packages/anthropic_sdk_dart/CHANGELOG.md | 6 +- packages/anthropic_sdk_dart/pubspec.yaml | 2 +- packages/chromadb/CHANGELOG.md | 4 + packages/chromadb/pubspec.yaml | 2 +- packages/googleai_dart/CHANGELOG.md | 4 + packages/googleai_dart/pubspec.yaml | 2 +- packages/langchain/CHANGELOG.md | 8 ++ packages/langchain/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 4 + packages/langchain_chroma/pubspec.yaml | 12 +-- packages/langchain_community/CHANGELOG.md | 5 ++ packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 6 ++ packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 + .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 4 + packages/langchain_google/pubspec.yaml | 6 +- packages/langchain_mistralai/CHANGELOG.md | 4 + packages/langchain_mistralai/pubspec.yaml | 6 +- packages/langchain_ollama/CHANGELOG.md | 4 + packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 5 ++ packages/langchain_openai/pubspec.yaml | 10 +-- packages/langchain_pinecone/CHANGELOG.md | 4 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 4 + packages/langchain_supabase/pubspec.yaml | 10 +-- packages/mistralai_dart/CHANGELOG.md | 4 + packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/CHANGELOG.md | 5 ++ packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 6 ++ packages/openai_dart/pubspec.yaml | 2 +- packages/vertex_ai/CHANGELOG.md | 4 + packages/vertex_ai/pubspec.yaml | 2 +- 46 files changed, 220 insertions(+), 87 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 079c8450..e1699211 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,73 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-06-01 + +### Changes + +--- + +New packages: + + - [`anthropic_sdk_dart` - `v0.0.1`](#anthropic_sdk_dart---v001) + +Packages with other changes: + + - [`langchain` - `v0.7.2`](#langchain---v072) + - [`langchain_core` - `v0.3.2`](#langchain_core---v032) + - [`langchain_community` - `v0.2.1`](#langchain_community---v021) + - [`langchain_chroma` - `v0.2.0+5`](#langchain_chroma---v0205) + - [`langchain_firebase` - `v0.1.0+2`](#langchain_firebase---v0102) + - [`langchain_google` - `v0.5.1`](#langchain_google---v051) + - [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) + - [`langchain_ollama` - `v0.2.2`](#langchain_ollama---v022) + - [`langchain_openai` - `v0.6.2`](#langchain_openai---v062) + - [`langchain_pinecone` - `v0.1.0+5`](#langchain_pinecone---v0105) + - [`langchain_supabase` - `v0.1.0+5`](#langchain_supabase---v0105) + - [`chromadb` - `v0.2.0+1`](#chromadb---v0201) + - [`googleai_dart` - `v0.1.0+1`](#googleai_dart---v0101) + - [`mistralai_dart` - `v0.0.3+2`](#mistralai_dart---v0032) + - [`ollama_dart` - `v0.1.1`](#ollama_dart---v011) + - [`openai_dart` - `v0.3.3`](#openai_dart---v033) + - [`vertex_ai` - `v0.1.0+1`](#vertex_ai---v0101) + +--- + +#### `langchain` - `v0.7.2` + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + +#### `langchain_core` - `v0.3.2` + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) + +#### `langchain_community` - `v0.2.1` + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + +#### `langchain_openai` - `v0.6.2` + + - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) + +#### `anthropic_sdk_dart` - `v0.0.1` + + - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) + +#### `ollama_dart` - `v0.1.1` + + - **FEAT**: Support buffered stream responses ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) + +#### `openai_dart` - `v0.3.3` + + - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) + - **FIX**: Make vector store name optional ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + + ## 2024-05-20 ### Changes @@ -2422,20 +2489,3 @@ Packages with changes: - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85)) - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) - -## 2023-07-02 - -### Changes - -#### `langchain` - `v0.0.1` - - - Initial public release. -t/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) - -## 2023-07-02 - -### Changes - -#### `langchain` - `v0.0.1` - - - Initial public release. diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 2ab1aff4..fcb8dfa3 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 + langchain_openai: ^0.6.2 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 716c7270..09e311f0 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,10 +7,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_chroma: ^0.2.0+4 - langchain_community: 0.2.0+1 - langchain_google: ^0.5.0 - langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_chroma: ^0.2.0+5 + langchain_community: 0.2.1 + langchain_google: ^0.5.1 + langchain_mistralai: ^0.2.1 + langchain_ollama: ^0.2.2 + langchain_openai: ^0.6.2 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 4c7f0059..665ba178 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_openai: ^0.6.2 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index d814f7c4..28872dc6 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_openai: ^0.6.2 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 6d125283..4f9f4c56 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 - langchain: ^0.7.1 - langchain_google: ^0.5.0 - langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_google: ^0.5.1 + langchain_mistralai: ^0.2.1 + langchain_ollama: ^0.2.2 + langchain_openai: ^0.6.2 flutter: uses-material-design: true diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 34b972bf..9de8254f 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -10,4 +10,4 @@ dependencies: gcloud: ^0.8.12 googleapis_auth: ^1.5.1 http: ^1.1.0 - vertex_ai: ^0.1.0 + vertex_ai: ^0.1.0+1 diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 7b4ce9a2..1c81fb76 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_ollama: ^0.2.1+1 - langchain_community: 0.2.0+1 + langchain: ^0.7.2 + langchain_ollama: ^0.2.2 + langchain_community: 0.2.1 diff --git a/melos.yaml b/melos.yaml index b39bb2a1..4912fe7c 100644 --- a/melos.yaml +++ b/melos.yaml @@ -14,7 +14,7 @@ command: branch: main changelogs: - path: CHANGELOG.md - description: Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. + description: "Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release." packageFilters: no-private: true bootstrap: diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index 90f8e244..de958be3 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.0.1 + + - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) + ## 0.0.1-dev.1 -- Bootstrap package. + - Bootstrap package. diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 5beab57e..164ba95f 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: anthropic_sdk_dart description: Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). -version: 0.0.1-dev.1 +version: 0.0.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 899efe6f..7f7724ef 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.0+1 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.2.0 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index 40252b6b..f11b20ea 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -1,6 +1,6 @@ name: chromadb description: Dart Client for the Chroma open-source embedding database API. -version: 0.2.0 +version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/chromadb issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 8277d0d5..7a6ca6b8 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.1.0 - **REFACTOR**: Minor changes ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e)) diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 2ed4d004..2006a059 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: googleai_dart description: Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 9d255b21..cc6953da 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,3 +1,11 @@ +## 0.7.2 + +> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + ## 0.7.1 > Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 1483d1f5..a92d1e9c 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.1 +version: 0.7.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 266080ac..8e785534 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.0+5 + + - Update a dependency to the latest release. + ## 0.2.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 84e24303..4ce07684 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.0+4 +version: 0.2.0+5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -17,14 +17,14 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - chromadb: ^0.2.0 + chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 + langchain_openai: ^0.6.2 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 7f48bd87..63111604 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.1 + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index caa994db..c7083a98 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.0+1 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -21,7 +21,7 @@ dependencies: csv: ^6.0.0 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -29,7 +29,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.9 - langchain_openai: ^0.6.1+1 + langchain_openai: ^0.6.2 objectbox_generator: ^4.0.0 test: ^1.25.2 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 25cf9ffd..dd637cd5 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,9 @@ +## 0.3.2 + + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.3.1 - **FEAT**: Add equals to ChatToolChoiceForced ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index d6f04b41..b682b76a 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index a7350a9b..60c41358 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+2 + + - Update a dependency to the latest release. + ## 0.1.0+1 - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index a4857f0d..8c912278 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.6.22 - langchain: 0.7.1 - langchain_firebase: 0.1.0+1 + langchain: 0.7.2 + langchain_firebase: 0.1.0+2 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 413e85c3..357f102d 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0+1 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -23,7 +23,7 @@ dependencies: firebase_core: ^2.31.0 cloud_firestore: ^4.17.0 firebase_vertexai: ^0.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index b61c71d8..c2d95eed 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.5.1 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.5.0 > Note: `ChatGoogleGenerativeAI` and `GoogleGenerativeAIEmbeddings` now use the version `v1beta` of the Gemini API (instead of `v1`) which support the latest models (`gemini-1.5-pro-latest` and `gemini-1.5-flash-latest`). diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 67a75cff..0f07b091 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.5.0 +version: 0.5.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,10 +24,10 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 - vertex_ai: ^0.1.0 + vertex_ai: ^0.1.0+1 langchain_firebase: ^0.1.0 firebase_core: ^2.31.0 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index c87fd2db..d5d9ca46 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 964397d3..2eda0275 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.0+1 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - mistralai_dart: ^0.0.3+1 + mistralai_dart: ^0.0.3+2 dev_dependencies: test: ^1.25.2 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 81bb56d2..a72f229e 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.2 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.2.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index aea5e9ee..ae7adb8d 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.1+1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.0+1 + ollama_dart: ^0.1.1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index ae115e6d..4daab488 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.6.2 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) + ## 0.6.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index efab060a..5d31a856 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.1+1 +version: 0.6.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.2+1 + openai_dart: ^0.3.3 uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.1 - langchain_community: 0.2.0+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 test: ^1.25.2 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 276d2616..6e3c39e3 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+5 + + - Update a dependency to the latest release. + ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 479b441e..b943bde0 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+4 +version: 0.1.0+5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.1+1 + langchain_openai: ^0.6.2 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index d98b5fe3..00a141c5 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+5 + + - Update a dependency to the latest release. + ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 91340307..d6e0e622 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.0+4 +version: 0.1.0+5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.1 + langchain_core: ^0.3.2 meta: ^1.11.0 supabase: ^2.0.8 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1+1 + langchain: ^0.7.2 + langchain_community: 0.2.1 + langchain_openai: ^0.6.2 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index fcb706a7..d1426493 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.0.3+2 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.0.3+1 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index 27b81ed4..a2aad311 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: mistralai_dart description: Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.0.3+1 +version: 0.0.3+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index f7c943f9..21ceb1cf 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.1.1 + + - **FEAT**: Support buffered stream responses in ollama_dart ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.1.0+1 - **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index ab538c0d..81f9fd49 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.0+1 +version: 0.1.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 6e366631..0a0e4085 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,9 @@ +## 0.3.3 + + - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) + - **FIX**: Make vector store name optional in openai_dart ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.3.2+1 - **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index ee8442e2..f617c8f0 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.3.2+1 +version: 0.3.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index f081d3a9..18902a6a 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+1 + + - Update a dependency to the latest release. + ## 0.1.0 - **REFACTOR**: Minor changes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96)) diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 703fb145..3454b32d 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,6 +1,6 @@ name: vertex_ai description: GCP Vertex AI ML platform API client (PaLM, Matching Engine, etc.). -version: 0.1.0 +version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart From 32d56da621bd838852bdd092c2ff7c2aaa1ab4df Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 1 Jun 2024 00:58:21 +0200 Subject: [PATCH 035/251] fix: Add missing dependency in langchain_community package (#448) --- melos.yaml | 1 + packages/langchain_community/pubspec.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/melos.yaml b/melos.yaml index 4912fe7c..8390e622 100644 --- a/melos.yaml +++ b/melos.yaml @@ -35,6 +35,7 @@ command: firebase_app_check: ^0.2.2+5 firebase_core: ^2.31.0 firebase_vertexai: ^0.1.0 + flat_buffers: ^23.5.26 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 freezed_annotation: ^2.4.1 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index c7083a98..17a07668 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -19,6 +19,7 @@ dependencies: beautiful_soup_dart: ^0.3.0 cross_file: ^0.3.4+1 csv: ^6.0.0 + flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 langchain_core: ^0.3.2 From 6d905628bbcd958890e08f6a29905aa36a970d60 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 1 Jun 2024 00:58:56 +0200 Subject: [PATCH 036/251] chore(release): publish packages - langchain_community@0.2.1+1 - langchain_firebase@0.1.0+3 --- examples/browser_summarizer/pubspec.lock | 10 +++---- examples/browser_summarizer/pubspec.yaml | 2 +- examples/docs_examples/pubspec.lock | 26 +++++++++---------- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_backend/pubspec.lock | 8 +++--- examples/hello_world_cli/pubspec.lock | 8 +++--- examples/hello_world_flutter/pubspec.lock | 20 +++++++------- .../pubspec.lock | 2 +- examples/wikivoyage_eu/pubspec.lock | 10 +++---- examples/wikivoyage_eu/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_community/CHANGELOG.md | 4 +++ packages/langchain_community/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 +++ .../langchain_firebase/example/pubspec.lock | 6 ++--- .../langchain_firebase/example/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- 20 files changed, 63 insertions(+), 55 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index ca9c5503..17d43f80 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.2.1+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index fcb8dfa3..0e729f8d 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -14,7 +14,7 @@ dependencies: flutter_markdown: ^0.6.22 js: ^0.7.1 langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_openai: ^0.6.2 shared_preferences: ^2.2.2 diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index bc3d8b13..6199523b 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -47,7 +47,7 @@ packages: path: "../../packages/chromadb" relative: true source: path - version: "0.2.0" + version: "0.2.0+1" collection: dependency: transitive description: @@ -238,56 +238,56 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.0+4" + version: "0.2.0+5" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.2.1+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.0" + version: "0.5.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.0+1" + version: "0.2.1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -326,7 +326,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+1" + version: "0.0.3+2" objectbox: dependency: transitive description: @@ -341,14 +341,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0+1" + version: "0.1.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: @@ -451,7 +451,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" web: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 09e311f0..3044b6d2 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -9,7 +9,7 @@ environment: dependencies: langchain: ^0.7.2 langchain_chroma: ^0.2.0+5 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_google: ^0.5.1 langchain_mistralai: ^0.2.1 langchain_ollama: ^0.2.2 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 9c8a5ba4..dc3ac458 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index df156ea2..8fc27717 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index a402383b..d9c9c29f 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -204,42 +204,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.0" + version: "0.5.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.0+1" + version: "0.2.1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1+1" + version: "0.6.2" langchain_tiktoken: dependency: transitive description: @@ -278,7 +278,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+1" + version: "0.0.3+2" nested: dependency: transitive description: @@ -293,14 +293,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0+1" + version: "0.1.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2+1" + version: "0.3.3" path: dependency: transitive description: @@ -400,7 +400,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" web: dependency: transitive description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index a29715a0..99209b09 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -151,7 +151,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+1" web: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index c05174d0..49dc9df4 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.2.1+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0+1" + version: "0.1.1" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 1c81fb76..198686c0 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -9,4 +9,4 @@ environment: dependencies: langchain: ^0.7.2 langchain_ollama: ^0.2.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 4ce07684..3c96bacb 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -26,5 +26,5 @@ dependencies: dev_dependencies: test: ^1.25.2 langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_openai: ^0.6.2 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 63111604..5c3aaba2 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1+1 + + - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330)) + ## 0.2.1 - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 17a07668..e2286be4 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 60c41358..d5128425 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+3 + + - Update a dependency to the latest release. + ## 0.1.0+2 - Update a dependency to the latest release. diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 3b051b3a..87d91077 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -238,21 +238,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.1.0+1" + version: "0.1.0+3" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 8c912278..5b635cc5 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -14,7 +14,7 @@ dependencies: sdk: flutter flutter_markdown: ^0.6.22 langchain: 0.7.2 - langchain_firebase: 0.1.0+2 + langchain_firebase: 0.1.0+3 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 89e38672..eee61f63 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -206,7 +206,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.2" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 357f102d..eec70b30 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0+2 +version: 0.1.0+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 5d31a856..c0ccb98d 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -27,5 +27,5 @@ dependencies: dev_dependencies: langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 test: ^1.25.2 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index d6e0e622..ffb0656d 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -25,5 +25,5 @@ dependencies: dev_dependencies: test: ^1.25.2 langchain: ^0.7.2 - langchain_community: 0.2.1 + langchain_community: 0.2.1+1 langchain_openai: ^0.6.2 From 423e79cc57487a20c2539d9b85538b1aef10f84d Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 1 Jun 2024 11:02:11 +0200 Subject: [PATCH 037/251] docs: Update ObjectBox docs --- .../vector_stores/integrations/objectbox.md | 21 ++++++++++++------- .../vector_stores/integrations/objectbox.dart | 4 ++-- examples/wikivoyage_eu/README.md | 2 +- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index af4bb6c6..9c165306 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -13,6 +13,7 @@ ObjectBox features: - Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB) - Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers +Official ObjectBox resources: - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) - [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0) - [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter) @@ -42,21 +43,21 @@ dependencies: ### 3. Instantiate the ObjectBox vector store ```dart -final embeddings = OllamaEmbeddings(model: 'mxbai-embed-large:335m'); +final embeddings = OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'); final vectorStore = ObjectBoxVectorStore( embeddings: embeddings, - dimensions: 1024, + dimensions: 512, ); ``` -The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the `mxbai-embed-large:335m` model, which has 1024 dimensions. +The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) model, which has 512 dimensions. The `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored: ```dart final vectorStore = ObjectBoxVectorStore( embeddings: embeddings, - dimensions: 1024, + dimensions: 512, directory: 'path/to/db', ); ``` @@ -129,7 +130,7 @@ This example demonstrates how to build a fully local RAG (Retrieval-Augmented Ge Before running the example, make sure you have the following: - Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions). -- [mxbai-embed-large:335m](https://ollama.com/library/mxbai-embed-large:335m) and [`llama3:8b`](https://ollama.com/library/llama3:8b) models downloaded. +- [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) and [llama3:8b](https://ollama.com/library/llama3:8b) models downloaded. #### Steps @@ -137,7 +138,7 @@ Before running the example, make sure you have the following: 1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader. 2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`. -3. Create embeddings from the document chunks using the `mxbai-embed-large:335m` embeddings model via `OllamaEmbeddings`. +3. Create embeddings from the document chunks using the `jina/jina-embeddings-v2-small-en` embeddings model via `OllamaEmbeddings`. 4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`. > Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries. @@ -151,8 +152,8 @@ Before running the example, make sure you have the following: ```dart // 1. Instantiate vector store final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), - dimensions: 1024, + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + dimensions: 512, ); // 2. Load documents @@ -241,6 +242,10 @@ await stream.forEach(stdout.write); // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ ``` +## Example: Wikivoyage EU + +Check out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_dart/tree/main/examples/wikivoyage_eu), to see how to build a fully local chatbot that uses RAG to plan vacation plans in Europe. + ## Advance ### BaseObjectBoxVectorStore diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart index 4a8950b7..cd558d1b 100644 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -11,8 +11,8 @@ void main() async { Future _rag() async { // 1. Instantiate vector store final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings(model: 'mxbai-embed-large:335m'), - dimensions: 1024, + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + dimensions: 512, directory: 'bin/modules/retrieval/vector_stores/integrations', ); diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md index ca686dcb..07bc5073 100644 --- a/examples/wikivoyage_eu/README.md +++ b/examples/wikivoyage_eu/README.md @@ -70,7 +70,7 @@ Added 160 documents to the vector store. The chatbot script implements the RAG pipeline. It does the following: 1. Takes a user query as input. -2. Uses the `mxbai-embed-large:335m` model to create an embedding for the query. +2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. 3. Retrieves the 5 most similar documents from the ObjectBox database. 4. Builds a prompt using the retrieved documents and the query. 5. Uses the `llama3:8b` model to generate a response to the prompt. From 360ab66846e247ad1628ee7cc9d68df436d5297a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 10 Jun 2024 22:24:57 +0200 Subject: [PATCH 038/251] refactor: Migrate conditional imports to js_interop (#453) --- .../anthropic_sdk_dart/lib/src/http_client/http_client.dart | 3 +-- packages/googleai_dart/lib/src/http_client/http_client.dart | 3 +-- .../lib/src/utils/https_client/http_client.dart | 3 +-- packages/mistralai_dart/lib/src/http_client/http_client.dart | 3 +-- packages/ollama_dart/lib/src/http_client/http_client.dart | 3 +-- packages/openai_dart/lib/src/http_client/http_client.dart | 3 +-- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/googleai_dart/lib/src/http_client/http_client.dart b/packages/googleai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/googleai_dart/lib/src/http_client/http_client.dart +++ b/packages/googleai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/langchain_google/lib/src/utils/https_client/http_client.dart b/packages/langchain_google/lib/src/utils/https_client/http_client.dart index 479d2164..6b9ed76c 100644 --- a/packages/langchain_google/lib/src/utils/https_client/http_client.dart +++ b/packages/langchain_google/lib/src/utils/https_client/http_client.dart @@ -2,8 +2,7 @@ import 'package:http/http.dart' as http; export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; /// {@template custom_http_client} /// Custom HTTP client that wraps the base HTTP client and allows to override diff --git a/packages/mistralai_dart/lib/src/http_client/http_client.dart b/packages/mistralai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/mistralai_dart/lib/src/http_client/http_client.dart +++ b/packages/mistralai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/ollama_dart/lib/src/http_client/http_client.dart b/packages/ollama_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/ollama_dart/lib/src/http_client/http_client.dart +++ b/packages/ollama_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/openai_dart/lib/src/http_client/http_client.dart b/packages/openai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/openai_dart/lib/src/http_client/http_client.dart +++ b/packages/openai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; From 5668fda6696e40a402b902bc599490dee7b03f97 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 13 Jun 2024 08:55:56 +0200 Subject: [PATCH 039/251] feat: Bootstrap `langgraph` package (#454) --- packages/langgraph/.gitignore | 7 +++++++ packages/langgraph/CHANGELOG.md | 3 +++ packages/langgraph/LICENSE | 21 +++++++++++++++++++ packages/langgraph/README.md | 17 +++++++++++++++ packages/langgraph/analysis_options.yaml | 1 + .../langgraph/example/langgraph_example.dart | 3 +++ packages/langgraph/lib/langgraph.dart | 2 ++ packages/langgraph/pubspec.yaml | 16 ++++++++++++++ 8 files changed, 70 insertions(+) create mode 100644 packages/langgraph/.gitignore create mode 100644 packages/langgraph/CHANGELOG.md create mode 100644 packages/langgraph/LICENSE create mode 100644 packages/langgraph/README.md create mode 100644 packages/langgraph/analysis_options.yaml create mode 100644 packages/langgraph/example/langgraph_example.dart create mode 100644 packages/langgraph/lib/langgraph.dart create mode 100644 packages/langgraph/pubspec.yaml diff --git a/packages/langgraph/.gitignore b/packages/langgraph/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/langgraph/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/langgraph/CHANGELOG.md b/packages/langgraph/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/langgraph/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/langgraph/LICENSE b/packages/langgraph/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/langgraph/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/langgraph/README.md b/packages/langgraph/README.md new file mode 100644 index 00000000..70fc2aae --- /dev/null +++ b/packages/langgraph/README.md @@ -0,0 +1,17 @@ +# 🦜🕸️LangGraph + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![langgraph](https://img.shields.io/pub/v/langgraph.svg)](https://pub.dev/packages/langgraph) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +⚡ Building language agents as graphs ⚡ + +## Overview + +TODO + +## License + +LangChain.dart is licensed under the +[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/langgraph/analysis_options.yaml b/packages/langgraph/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/langgraph/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/langgraph/example/langgraph_example.dart b/packages/langgraph/example/langgraph_example.dart new file mode 100644 index 00000000..21f3e9f2 --- /dev/null +++ b/packages/langgraph/example/langgraph_example.dart @@ -0,0 +1,3 @@ +void main() { + // TODO +} diff --git a/packages/langgraph/lib/langgraph.dart b/packages/langgraph/lib/langgraph.dart new file mode 100644 index 00000000..790b457d --- /dev/null +++ b/packages/langgraph/lib/langgraph.dart @@ -0,0 +1,2 @@ +/// Build resilient language agents as graphs. +library; diff --git a/packages/langgraph/pubspec.yaml b/packages/langgraph/pubspec.yaml new file mode 100644 index 00000000..2b4ebaaf --- /dev/null +++ b/packages/langgraph/pubspec.yaml @@ -0,0 +1,16 @@ +name: langgraph +description: Build resilient language agents as graphs. +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langgraph +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langgraph +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - nlp + - llms + - langchain + +environment: + sdk: ">=3.0.0 <4.0.0" From 45b1a22d1781bc53209e90170b9815faf3c6d626 Mon Sep 17 00:00:00 2001 From: Konstantin S Date: Thu, 13 Jun 2024 11:58:01 +0400 Subject: [PATCH 040/251] feat: Add support for listing running Ollama models (#451) Co-authored-by: David Miguel --- packages/ollama_dart/README.md | 10 + .../ollama_dart/lib/src/generated/client.dart | 23 +- .../src/generated/schema/process_model.dart | 69 +++ .../generated/schema/process_response.dart | 40 ++ .../lib/src/generated/schema/schema.dart | 2 + .../src/generated/schema/schema.freezed.dart | 482 ++++++++++++++++++ .../lib/src/generated/schema/schema.g.dart | 52 ++ packages/ollama_dart/oas/ollama-curated.yaml | 56 +- .../test/ollama_dart_models_test.dart | 13 + 9 files changed, 742 insertions(+), 5 deletions(-) create mode 100644 packages/ollama_dart/lib/src/generated/schema/process_model.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/process_response.dart diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 27895b5b..5b750447 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -31,6 +31,7 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. * [Models](#models) + [Create model](#create-model) + [List models](#list-models) + + [List running models](#list-running-models) + [Show Model Information](#show-model-information) + [Pull a Model](#pull-a-model) + [Push a Model](#push-a-model) @@ -192,6 +193,15 @@ final res = await client.listModels(); print(res.models); ``` +#### List running models + +Lists models currently loaded and their memory footprint. + +```dart +final res = await client.listRunningModels(); +print(res.models); +``` + #### Show Model Information Show details about a model including modelfile, template, parameters, license, and system prompt. diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index a22d8729..6c00d36f 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -477,6 +477,25 @@ class OllamaClient { return ModelsResponse.fromJson(_jsonDecode(r)); } + // ------------------------------------------ + // METHOD: listRunningModels + // ------------------------------------------ + + /// List models that are running. + /// + /// `GET` `http://localhost:11434/api/ps` + Future listRunningModels() async { + final r = await makeRequest( + baseUrl: 'http://localhost:11434/api', + path: '/ps', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return ProcessResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: showModelInfo // ------------------------------------------ @@ -567,7 +586,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/json', + responseType: 'application/x-ndjson', body: request, ); return PullModelResponse.fromJson(_jsonDecode(r)); @@ -593,7 +612,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/json', + responseType: 'application/x-ndjson', body: request, ); return PushModelResponse.fromJson(_jsonDecode(r)); diff --git a/packages/ollama_dart/lib/src/generated/schema/process_model.dart b/packages/ollama_dart/lib/src/generated/schema/process_model.dart new file mode 100644 index 00000000..dad453f0 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/process_model.dart @@ -0,0 +1,69 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ProcessModel +// ========================================== + +/// A model that is currently loaded. +@freezed +class ProcessModel with _$ProcessModel { + const ProcessModel._(); + + /// Factory constructor for ProcessModel + const factory ProcessModel({ + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) String? model, + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) int? size, + + /// The model's digest. + @JsonKey(includeIfNull: false) String? digest, + + /// Details about a model. + @JsonKey(includeIfNull: false) ModelDetails? details, + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram, + }) = _ProcessModel; + + /// Object construction from a JSON representation + factory ProcessModel.fromJson(Map json) => + _$ProcessModelFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'size', + 'digest', + 'details', + 'expires_at', + 'size_vram' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'size': size, + 'digest': digest, + 'details': details, + 'expires_at': expiresAt, + 'size_vram': sizeVram, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/process_response.dart b/packages/ollama_dart/lib/src/generated/schema/process_response.dart new file mode 100644 index 00000000..6261a813 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/process_response.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ProcessResponse +// ========================================== + +/// Response class for the list running models endpoint. +@freezed +class ProcessResponse with _$ProcessResponse { + const ProcessResponse._(); + + /// Factory constructor for ProcessResponse + const factory ProcessResponse({ + /// List of running models. + @JsonKey(includeIfNull: false) List? models, + }) = _ProcessResponse; + + /// Object construction from a JSON representation + factory ProcessResponse.fromJson(Map json) => + _$ProcessResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['models']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'models': models, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index 5c8eb964..ed6b2733 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -26,6 +26,8 @@ part 'create_model_status.dart'; part 'models_response.dart'; part 'model.dart'; part 'model_details.dart'; +part 'process_response.dart'; +part 'process_model.dart'; part 'model_info_request.dart'; part 'model_info.dart'; part 'copy_model_request.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index ab02ac2b..88e82b13 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -4857,6 +4857,488 @@ abstract class _ModelDetails extends ModelDetails { throw _privateConstructorUsedError; } +ProcessResponse _$ProcessResponseFromJson(Map json) { + return _ProcessResponse.fromJson(json); +} + +/// @nodoc +mixin _$ProcessResponse { + /// List of running models. + @JsonKey(includeIfNull: false) + List? get models => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ProcessResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ProcessResponseCopyWith<$Res> { + factory $ProcessResponseCopyWith( + ProcessResponse value, $Res Function(ProcessResponse) then) = + _$ProcessResponseCopyWithImpl<$Res, ProcessResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} + +/// @nodoc +class _$ProcessResponseCopyWithImpl<$Res, $Val extends ProcessResponse> + implements $ProcessResponseCopyWith<$Res> { + _$ProcessResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_value.copyWith( + models: freezed == models + ? _value.models + : models // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ProcessResponseImplCopyWith<$Res> + implements $ProcessResponseCopyWith<$Res> { + factory _$$ProcessResponseImplCopyWith(_$ProcessResponseImpl value, + $Res Function(_$ProcessResponseImpl) then) = + __$$ProcessResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} + +/// @nodoc +class __$$ProcessResponseImplCopyWithImpl<$Res> + extends _$ProcessResponseCopyWithImpl<$Res, _$ProcessResponseImpl> + implements _$$ProcessResponseImplCopyWith<$Res> { + __$$ProcessResponseImplCopyWithImpl( + _$ProcessResponseImpl _value, $Res Function(_$ProcessResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_$ProcessResponseImpl( + models: freezed == models + ? _value._models + : models // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ProcessResponseImpl extends _ProcessResponse { + const _$ProcessResponseImpl( + {@JsonKey(includeIfNull: false) final List? models}) + : _models = models, + super._(); + + factory _$ProcessResponseImpl.fromJson(Map json) => + _$$ProcessResponseImplFromJson(json); + + /// List of running models. + final List? _models; + + /// List of running models. + @override + @JsonKey(includeIfNull: false) + List? get models { + final value = _models; + if (value == null) return null; + if (_models is EqualUnmodifiableListView) return _models; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'ProcessResponse(models: $models)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ProcessResponseImpl && + const DeepCollectionEquality().equals(other._models, _models)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => + __$$ProcessResponseImplCopyWithImpl<_$ProcessResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ProcessResponseImplToJson( + this, + ); + } +} + +abstract class _ProcessResponse extends ProcessResponse { + const factory _ProcessResponse( + {@JsonKey(includeIfNull: false) final List? models}) = + _$ProcessResponseImpl; + const _ProcessResponse._() : super._(); + + factory _ProcessResponse.fromJson(Map json) = + _$ProcessResponseImpl.fromJson; + + @override + + /// List of running models. + @JsonKey(includeIfNull: false) + List? get models; + @override + @JsonKey(ignore: true) + _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ProcessModel _$ProcessModelFromJson(Map json) { + return _ProcessModel.fromJson(json); +} + +/// @nodoc +mixin _$ProcessModel { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size => throw _privateConstructorUsedError; + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest => throw _privateConstructorUsedError; + + /// Details about a model. + @JsonKey(includeIfNull: false) + ModelDetails? get details => throw _privateConstructorUsedError; + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) + String? get expiresAt => throw _privateConstructorUsedError; + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) + int? get sizeVram => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ProcessModelCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ProcessModelCopyWith<$Res> { + factory $ProcessModelCopyWith( + ProcessModel value, $Res Function(ProcessModel) then) = + _$ProcessModelCopyWithImpl<$Res, ProcessModel>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); + + $ModelDetailsCopyWith<$Res>? get details; +} + +/// @nodoc +class _$ProcessModelCopyWithImpl<$Res, $Val extends ProcessModel> + implements $ProcessModelCopyWith<$Res> { + _$ProcessModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + Object? expiresAt = freezed, + Object? sizeVram = freezed, + }) { + return _then(_value.copyWith( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as String?, + sizeVram: freezed == sizeVram + ? _value.sizeVram + : sizeVram // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModelDetailsCopyWith<$Res>? get details { + if (_value.details == null) { + return null; + } + + return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { + return _then(_value.copyWith(details: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ProcessModelImplCopyWith<$Res> + implements $ProcessModelCopyWith<$Res> { + factory _$$ProcessModelImplCopyWith( + _$ProcessModelImpl value, $Res Function(_$ProcessModelImpl) then) = + __$$ProcessModelImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); + + @override + $ModelDetailsCopyWith<$Res>? get details; +} + +/// @nodoc +class __$$ProcessModelImplCopyWithImpl<$Res> + extends _$ProcessModelCopyWithImpl<$Res, _$ProcessModelImpl> + implements _$$ProcessModelImplCopyWith<$Res> { + __$$ProcessModelImplCopyWithImpl( + _$ProcessModelImpl _value, $Res Function(_$ProcessModelImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + Object? expiresAt = freezed, + Object? sizeVram = freezed, + }) { + return _then(_$ProcessModelImpl( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as String?, + sizeVram: freezed == sizeVram + ? _value.sizeVram + : sizeVram // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ProcessModelImpl extends _ProcessModel { + const _$ProcessModelImpl( + {@JsonKey(includeIfNull: false) this.model, + @JsonKey(includeIfNull: false) this.size, + @JsonKey(includeIfNull: false) this.digest, + @JsonKey(includeIfNull: false) this.details, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) this.sizeVram}) + : super._(); + + factory _$ProcessModelImpl.fromJson(Map json) => + _$$ProcessModelImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// Size of the model on disk. + @override + @JsonKey(includeIfNull: false) + final int? size; + + /// The model's digest. + @override + @JsonKey(includeIfNull: false) + final String? digest; + + /// Details about a model. + @override + @JsonKey(includeIfNull: false) + final ModelDetails? details; + + /// No Description + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + final String? expiresAt; + + /// Size of the model on disk. + @override + @JsonKey(name: 'size_vram', includeIfNull: false) + final int? sizeVram; + + @override + String toString() { + return 'ProcessModel(model: $model, size: $size, digest: $digest, details: $details, expiresAt: $expiresAt, sizeVram: $sizeVram)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ProcessModelImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.size, size) || other.size == size) && + (identical(other.digest, digest) || other.digest == digest) && + (identical(other.details, details) || other.details == details) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.sizeVram, sizeVram) || + other.sizeVram == sizeVram)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, model, size, digest, details, expiresAt, sizeVram); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => + __$$ProcessModelImplCopyWithImpl<_$ProcessModelImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ProcessModelImplToJson( + this, + ); + } +} + +abstract class _ProcessModel extends ProcessModel { + const factory _ProcessModel( + {@JsonKey(includeIfNull: false) final String? model, + @JsonKey(includeIfNull: false) final int? size, + @JsonKey(includeIfNull: false) final String? digest, + @JsonKey(includeIfNull: false) final ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) + final String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) + final int? sizeVram}) = _$ProcessModelImpl; + const _ProcessModel._() : super._(); + + factory _ProcessModel.fromJson(Map json) = + _$ProcessModelImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size; + @override + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest; + @override + + /// Details about a model. + @JsonKey(includeIfNull: false) + ModelDetails? get details; + @override + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) + String? get expiresAt; + @override + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) + int? get sizeVram; + @override + @JsonKey(ignore: true) + _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => + throw _privateConstructorUsedError; +} + ModelInfoRequest _$ModelInfoRequestFromJson(Map json) { return _ModelInfoRequest.fromJson(json); } diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index f5548646..3443737b 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -476,6 +476,58 @@ Map _$$ModelDetailsImplToJson(_$ModelDetailsImpl instance) { return val; } +_$ProcessResponseImpl _$$ProcessResponseImplFromJson( + Map json) => + _$ProcessResponseImpl( + models: (json['models'] as List?) + ?.map((e) => ProcessModel.fromJson(e as Map)) + .toList(), + ); + +Map _$$ProcessResponseImplToJson( + _$ProcessResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('models', instance.models?.map((e) => e.toJson()).toList()); + return val; +} + +_$ProcessModelImpl _$$ProcessModelImplFromJson(Map json) => + _$ProcessModelImpl( + model: json['model'] as String?, + size: json['size'] as int?, + digest: json['digest'] as String?, + details: json['details'] == null + ? null + : ModelDetails.fromJson(json['details'] as Map), + expiresAt: json['expires_at'] as String?, + sizeVram: json['size_vram'] as int?, + ); + +Map _$$ProcessModelImplToJson(_$ProcessModelImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('model', instance.model); + writeNotNull('size', instance.size); + writeNotNull('digest', instance.digest); + writeNotNull('details', instance.details?.toJson()); + writeNotNull('expires_at', instance.expiresAt); + writeNotNull('size_vram', instance.sizeVram); + return val; +} + _$ModelInfoRequestImpl _$$ModelInfoRequestImplFromJson( Map json) => _$ModelInfoRequestImpl( diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 876bab50..7ade34a7 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -109,6 +109,19 @@ paths: application/json: schema: $ref: '#/components/schemas/ModelsResponse' + /ps: + get: + operationId: listRunningModels + tags: + - Models + summary: List models that are running. + responses: + '200': + description: Successful operation. + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessResponse' /show: post: operationId: showModelInfo @@ -171,7 +184,7 @@ paths: '200': description: Successful operation. content: - application/json: + application/x-ndjson: schema: $ref: '#/components/schemas/PullModelResponse' /push: @@ -190,7 +203,7 @@ paths: '200': description: Successful operation. content: - application/json: + application/x-ndjson: schema: $ref: '#/components/schemas/PushModelResponse' /blobs/{digest}: @@ -760,6 +773,43 @@ components: quantization_level: type: string description: The quantization level of the model. + ProcessResponse: + type: object + description: Response class for the list running models endpoint. + properties: + models: + type: array + description: List of running models. + items: + $ref: '#/components/schemas/ProcessModel' + ProcessModel: + type: object + description: A model that is currently loaded. + properties: + model: + type: string + description: *model_name + example: llama3:8b + size: + type: integer + format: int64 + description: Size of the model on disk. + example: 7323310500 + digest: + type: string + description: The model's digest. + example: 'sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711a' + details: + $ref: '#/components/schemas/ModelDetails' + expires_at: + type: string + format: date-time + example: 2023-08-02T17:02:23.713454393-07:00 + size_vram: + type: integer + format: int64 + description: Size of the model on disk. + example: 7323310500 ModelInfoRequest: description: Request class for the show model info endpoint. type: object @@ -805,7 +855,7 @@ components: nullable: true description: The default messages for the model. items: - $ref: '#/components/schemas/Message' + $ref: '#/components/schemas/Message' CopyModelRequest: description: Request class for copying a model. type: object diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index abb3cef3..e511bff4 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -65,6 +65,19 @@ void main() { expect(res.models?.any((final m) => m.model == defaultModel), isTrue); }); + test('Test list running models', () async { + await client.generateCompletion( + request: const GenerateCompletionRequest( + model: defaultModel, + prompt: 'You are a llama', + options: RequestOptions(numPredict: 1), + ), + ); + + final res = await client.listRunningModels(); + expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + }); + test('Test show model info', () async { final res = await client.showModelInfo( request: const ModelInfoRequest(model: defaultModel), From abbc724414d7b53dd3441aa68de72e0f23e17dec Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 14 Jun 2024 22:35:05 +0200 Subject: [PATCH 041/251] feat: Bootstrap `tavily_dart` package (#455) --- packages/tavily_dart/.gitignore | 7 +++++++ packages/tavily_dart/CHANGELOG.md | 3 +++ packages/tavily_dart/LICENSE | 21 +++++++++++++++++++ packages/tavily_dart/README.md | 16 ++++++++++++++ packages/tavily_dart/analysis_options.yaml | 1 + .../example/tavily_dart_example.dart | 3 +++ packages/tavily_dart/lib/tavily_dart.dart | 2 ++ packages/tavily_dart/pubspec.yaml | 16 ++++++++++++++ 8 files changed, 69 insertions(+) create mode 100644 packages/tavily_dart/.gitignore create mode 100644 packages/tavily_dart/CHANGELOG.md create mode 100644 packages/tavily_dart/LICENSE create mode 100644 packages/tavily_dart/README.md create mode 100644 packages/tavily_dart/analysis_options.yaml create mode 100644 packages/tavily_dart/example/tavily_dart_example.dart create mode 100644 packages/tavily_dart/lib/tavily_dart.dart create mode 100644 packages/tavily_dart/pubspec.yaml diff --git a/packages/tavily_dart/.gitignore b/packages/tavily_dart/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/tavily_dart/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/tavily_dart/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/tavily_dart/LICENSE b/packages/tavily_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/tavily_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/tavily_dart/README.md b/packages/tavily_dart/README.md new file mode 100644 index 00000000..bf452982 --- /dev/null +++ b/packages/tavily_dart/README.md @@ -0,0 +1,16 @@ +# Tavily Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Dart Client for the [Tavily](https://tavily.com) API (a search engine optimized for LLMs and RAG). + +## Features + +TODO + +## License + +Ollama Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/tavily_dart/analysis_options.yaml b/packages/tavily_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/tavily_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/tavily_dart/example/tavily_dart_example.dart b/packages/tavily_dart/example/tavily_dart_example.dart new file mode 100644 index 00000000..21f3e9f2 --- /dev/null +++ b/packages/tavily_dart/example/tavily_dart_example.dart @@ -0,0 +1,3 @@ +void main() { + // TODO +} diff --git a/packages/tavily_dart/lib/tavily_dart.dart b/packages/tavily_dart/lib/tavily_dart.dart new file mode 100644 index 00000000..c894f0f7 --- /dev/null +++ b/packages/tavily_dart/lib/tavily_dart.dart @@ -0,0 +1,2 @@ +/// Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). +library; diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml new file mode 100644 index 00000000..24fccdcb --- /dev/null +++ b/packages/tavily_dart/pubspec.yaml @@ -0,0 +1,16 @@ +name: tavily_dart +description: Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/tavily_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:tavily_dart +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - llms + - search + - rag + +environment: + sdk: ">=3.0.0 <4.0.0" From 65613e7cd10fc0f2a3c5b4c3e0e95e677da4d9ef Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 14 Jun 2024 23:37:06 +0200 Subject: [PATCH 042/251] feat: Implement tavily_dart, a Dart client for Tavily API (#456) --- packages/tavily_dart/README.md | 119 +- packages/tavily_dart/build.yaml | 13 + .../example/tavily_dart_example.dart | 29 +- .../tavily_dart/lib/src/generated/client.dart | 382 ++++++ .../lib/src/generated/schema/schema.dart | 15 + .../src/generated/schema/schema.freezed.dart | 1027 +++++++++++++++++ .../lib/src/generated/schema/schema.g.dart | 116 ++ .../src/generated/schema/search_request.dart | 103 ++ .../src/generated/schema/search_response.dart | 68 ++ .../src/generated/schema/search_result.dart | 62 + packages/tavily_dart/lib/tavily_dart.dart | 3 + packages/tavily_dart/oas/main.dart | 23 + packages/tavily_dart/oas/tavily_openapi.yaml | 156 +++ packages/tavily_dart/pubspec.yaml | 18 + packages/tavily_dart/test/tavily_test.dart | 45 + 15 files changed, 2175 insertions(+), 4 deletions(-) create mode 100644 packages/tavily_dart/build.yaml create mode 100644 packages/tavily_dart/lib/src/generated/client.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.g.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/search_request.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/search_response.dart create mode 100644 packages/tavily_dart/lib/src/generated/schema/search_result.dart create mode 100644 packages/tavily_dart/oas/main.dart create mode 100644 packages/tavily_dart/oas/tavily_openapi.yaml create mode 100644 packages/tavily_dart/test/tavily_test.dart diff --git a/packages/tavily_dart/README.md b/packages/tavily_dart/README.md index bf452982..a7cd6afd 100644 --- a/packages/tavily_dart/README.md +++ b/packages/tavily_dart/README.md @@ -9,8 +9,123 @@ Dart Client for the [Tavily](https://tavily.com) API (a search engine optimized ## Features -TODO +- Fully type-safe, [documented](https://pub.dev/documentation/tavily_dart/latest) and tested +- All platforms supported +- Custom base URL, headers and query params support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) + +**Supported endpoints:** +- Search + +## Table of contents + +- [Usage](#usage) + * [Authentication](#authentication) + * [Search](#search) +- [Advance Usage](#advance-usage) + * [Custom HTTP client](#custom-http-client) + * [Using a proxy](#using-a-proxy) + + [HTTP proxy](#http-proxy) + + [SOCKS5 proxy](#socks5-proxy) +- [Acknowledgements](#acknowledgements) +- [License](#license) + +## Usage + +Refer to the [documentation](https://docs.tavily.com) for more information about the API. + +### Authentication + +The Tavily API uses API keys for authentication. Visit the [Tavily console](https://app.tavily.com/) to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final apiKey = Platform.environment['TAVILY_API_KEY']; +final client = TavilyClient(); +``` + +### Search + +Search for data based on a query. + +**Basic search:** + +```dart +final res = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + ), +); +print(res); +``` + +**Advanced search:** + +```dart +final res = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + searchDepth: SearchRequestSearchDepth.advanced, + ), +); +print(res); +``` + +See the API documentation for more information on all supported search parameters. + +## Advance Usage + +### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = TavilyClient( + client: MyHttpClient(), +); +``` + +### Using a proxy + +#### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = TavilyClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +#### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = TavilyClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. ## License -Ollama Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). +Tavily Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/tavily_dart/build.yaml b/packages/tavily_dart/build.yaml new file mode 100644 index 00000000..dee719ac --- /dev/null +++ b/packages/tavily_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + - unnecessary_null_checks + json_serializable: + options: + explicit_to_json: true diff --git a/packages/tavily_dart/example/tavily_dart_example.dart b/packages/tavily_dart/example/tavily_dart_example.dart index 21f3e9f2..652564b2 100644 --- a/packages/tavily_dart/example/tavily_dart_example.dart +++ b/packages/tavily_dart/example/tavily_dart_example.dart @@ -1,3 +1,28 @@ -void main() { - // TODO +// ignore_for_file: avoid_print +import 'dart:io'; + +import 'package:tavily_dart/tavily_dart.dart'; + +void main() async { + final apiKey = Platform.environment['TAVILY_API_KEY']!; + final client = TavilyClient(); + + // Basic search + final res1 = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + ), + ); + print(res1); + + // Advanced search + final res2 = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + searchDepth: SearchRequestSearchDepth.advanced, + ), + ); + print(res2); } diff --git a/packages/tavily_dart/lib/src/generated/client.dart b/packages/tavily_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..f6fb0439 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/client.dart @@ -0,0 +1,382 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target, unused_import + +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; +import 'package:meta/meta.dart'; + +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: TavilyClientException +// ========================================== + +/// HTTP exception handler for TavilyClient +class TavilyClientException implements Exception { + TavilyClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + final String message; + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'TavilyClientException($s)'; + } +} + +// ========================================== +// CLASS: TavilyClient +// ========================================== + +/// Client for Tavily API (v.1.0.0) +/// +/// Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. +class TavilyClient { + /// Creates a new TavilyClient instance. + /// + /// - [TavilyClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [TavilyClient.headers] Global headers to be sent with every request + /// - [TavilyClient.queryParams] Global query parameters to be sent with every request + /// - [TavilyClient.client] Override HTTP client to use for requests + TavilyClient({ + this.baseUrl, + this.headers = const {}, + this.queryParams = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// Global query parameters to be sent with every request + final Map queryParams; + + /// HTTP client for requests + final http.Client client; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _jsonDecode + // ------------------------------------------ + + dynamic _jsonDecode(http.Response r) { + return json.decode(utf8.decode(r.bodyBytes)); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + @protected + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Add global query parameters + queryParams = {...queryParams, ...this.queryParams}; + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw TavilyClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + return await client.send(request); + } + + // ------------------------------------------ + // METHOD: makeRequestStream + // ------------------------------------------ + + /// Reusable request stream method + @protected + Future makeRequestStream({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.StreamedResponse response; + try { + response = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw TavilyClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw TavilyClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } + + // ------------------------------------------ + // METHOD: makeRequest + // ------------------------------------------ + + /// Reusable request method + @protected + Future makeRequest({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.Response response; + try { + final streamedResponse = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + response = await http.Response.fromStream(streamedResponse); + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw TavilyClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw TavilyClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: search + // ------------------------------------------ + + /// Search for data based on a query. + /// + /// `request`: The search request object. + /// + /// `POST` `https://api.tavily.com/search` + Future search({ + required SearchRequest request, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.tavily.com', + path: '/search', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return SearchResponse.fromJson(_jsonDecode(r)); + } +} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.dart b/packages/tavily_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..4b3ba505 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,15 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library tavily_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'search_request.dart'; +part 'search_response.dart'; +part 'search_result.dart'; diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..cc459594 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,1027 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); + +SearchRequest _$SearchRequestFromJson(Map json) { + return _SearchRequest.fromJson(json); +} + +/// @nodoc +mixin _$SearchRequest { + /// Your unique API key. + @JsonKey(name: 'api_key') + String get apiKey => throw _privateConstructorUsedError; + + /// The search query string. + String get query => throw _privateConstructorUsedError; + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + SearchRequestSearchDepth get searchDepth => + throw _privateConstructorUsedError; + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') + bool get includeImages => throw _privateConstructorUsedError; + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') + bool get includeAnswer => throw _privateConstructorUsedError; + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + bool get includeRawContent => throw _privateConstructorUsedError; + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') + int get maxResults => throw _privateConstructorUsedError; + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains => throw _privateConstructorUsedError; + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchRequestCopyWith<$Res> { + factory $SearchRequestCopyWith( + SearchRequest value, $Res Function(SearchRequest) then) = + _$SearchRequestCopyWithImpl<$Res, SearchRequest>; + @useResult + $Res call( + {@JsonKey(name: 'api_key') String apiKey, + String query, + @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') bool includeImages, + @JsonKey(name: 'include_answer') bool includeAnswer, + @JsonKey(name: 'include_raw_content') bool includeRawContent, + @JsonKey(name: 'max_results') int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains}); +} + +/// @nodoc +class _$SearchRequestCopyWithImpl<$Res, $Val extends SearchRequest> + implements $SearchRequestCopyWith<$Res> { + _$SearchRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? apiKey = null, + Object? query = null, + Object? searchDepth = null, + Object? includeImages = null, + Object? includeAnswer = null, + Object? includeRawContent = null, + Object? maxResults = null, + Object? includeDomains = freezed, + Object? excludeDomains = freezed, + }) { + return _then(_value.copyWith( + apiKey: null == apiKey + ? _value.apiKey + : apiKey // ignore: cast_nullable_to_non_nullable + as String, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + searchDepth: null == searchDepth + ? _value.searchDepth + : searchDepth // ignore: cast_nullable_to_non_nullable + as SearchRequestSearchDepth, + includeImages: null == includeImages + ? _value.includeImages + : includeImages // ignore: cast_nullable_to_non_nullable + as bool, + includeAnswer: null == includeAnswer + ? _value.includeAnswer + : includeAnswer // ignore: cast_nullable_to_non_nullable + as bool, + includeRawContent: null == includeRawContent + ? _value.includeRawContent + : includeRawContent // ignore: cast_nullable_to_non_nullable + as bool, + maxResults: null == maxResults + ? _value.maxResults + : maxResults // ignore: cast_nullable_to_non_nullable + as int, + includeDomains: freezed == includeDomains + ? _value.includeDomains + : includeDomains // ignore: cast_nullable_to_non_nullable + as List?, + excludeDomains: freezed == excludeDomains + ? _value.excludeDomains + : excludeDomains // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchRequestImplCopyWith<$Res> + implements $SearchRequestCopyWith<$Res> { + factory _$$SearchRequestImplCopyWith( + _$SearchRequestImpl value, $Res Function(_$SearchRequestImpl) then) = + __$$SearchRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'api_key') String apiKey, + String query, + @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') bool includeImages, + @JsonKey(name: 'include_answer') bool includeAnswer, + @JsonKey(name: 'include_raw_content') bool includeRawContent, + @JsonKey(name: 'max_results') int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains}); +} + +/// @nodoc +class __$$SearchRequestImplCopyWithImpl<$Res> + extends _$SearchRequestCopyWithImpl<$Res, _$SearchRequestImpl> + implements _$$SearchRequestImplCopyWith<$Res> { + __$$SearchRequestImplCopyWithImpl( + _$SearchRequestImpl _value, $Res Function(_$SearchRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? apiKey = null, + Object? query = null, + Object? searchDepth = null, + Object? includeImages = null, + Object? includeAnswer = null, + Object? includeRawContent = null, + Object? maxResults = null, + Object? includeDomains = freezed, + Object? excludeDomains = freezed, + }) { + return _then(_$SearchRequestImpl( + apiKey: null == apiKey + ? _value.apiKey + : apiKey // ignore: cast_nullable_to_non_nullable + as String, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + searchDepth: null == searchDepth + ? _value.searchDepth + : searchDepth // ignore: cast_nullable_to_non_nullable + as SearchRequestSearchDepth, + includeImages: null == includeImages + ? _value.includeImages + : includeImages // ignore: cast_nullable_to_non_nullable + as bool, + includeAnswer: null == includeAnswer + ? _value.includeAnswer + : includeAnswer // ignore: cast_nullable_to_non_nullable + as bool, + includeRawContent: null == includeRawContent + ? _value.includeRawContent + : includeRawContent // ignore: cast_nullable_to_non_nullable + as bool, + maxResults: null == maxResults + ? _value.maxResults + : maxResults // ignore: cast_nullable_to_non_nullable + as int, + includeDomains: freezed == includeDomains + ? _value._includeDomains + : includeDomains // ignore: cast_nullable_to_non_nullable + as List?, + excludeDomains: freezed == excludeDomains + ? _value._excludeDomains + : excludeDomains // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchRequestImpl extends _SearchRequest { + const _$SearchRequestImpl( + {@JsonKey(name: 'api_key') required this.apiKey, + required this.query, + @JsonKey(name: 'search_depth') + this.searchDepth = SearchRequestSearchDepth.basic, + @JsonKey(name: 'include_images') this.includeImages = false, + @JsonKey(name: 'include_answer') this.includeAnswer = false, + @JsonKey(name: 'include_raw_content') this.includeRawContent = false, + @JsonKey(name: 'max_results') this.maxResults = 5, + @JsonKey(name: 'include_domains', includeIfNull: false) + final List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + final List? excludeDomains}) + : _includeDomains = includeDomains, + _excludeDomains = excludeDomains, + super._(); + + factory _$SearchRequestImpl.fromJson(Map json) => + _$$SearchRequestImplFromJson(json); + + /// Your unique API key. + @override + @JsonKey(name: 'api_key') + final String apiKey; + + /// The search query string. + @override + final String query; + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @override + @JsonKey(name: 'search_depth') + final SearchRequestSearchDepth searchDepth; + + /// Include a list of query related images in the response. Default is False. + @override + @JsonKey(name: 'include_images') + final bool includeImages; + + /// Include answers in the search results. Default is False. + @override + @JsonKey(name: 'include_answer') + final bool includeAnswer; + + /// Include raw content in the search results. Default is False. + @override + @JsonKey(name: 'include_raw_content') + final bool includeRawContent; + + /// The number of maximum search results to return. Default is 5. + @override + @JsonKey(name: 'max_results') + final int maxResults; + + /// A list of domains to specifically include in the search results. Default is None. + final List? _includeDomains; + + /// A list of domains to specifically include in the search results. Default is None. + @override + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains { + final value = _includeDomains; + if (value == null) return null; + if (_includeDomains is EqualUnmodifiableListView) return _includeDomains; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of domains to specifically exclude from the search results. Default is None. + final List? _excludeDomains; + + /// A list of domains to specifically exclude from the search results. Default is None. + @override + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains { + final value = _excludeDomains; + if (value == null) return null; + if (_excludeDomains is EqualUnmodifiableListView) return _excludeDomains; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'SearchRequest(apiKey: $apiKey, query: $query, searchDepth: $searchDepth, includeImages: $includeImages, includeAnswer: $includeAnswer, includeRawContent: $includeRawContent, maxResults: $maxResults, includeDomains: $includeDomains, excludeDomains: $excludeDomains)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchRequestImpl && + (identical(other.apiKey, apiKey) || other.apiKey == apiKey) && + (identical(other.query, query) || other.query == query) && + (identical(other.searchDepth, searchDepth) || + other.searchDepth == searchDepth) && + (identical(other.includeImages, includeImages) || + other.includeImages == includeImages) && + (identical(other.includeAnswer, includeAnswer) || + other.includeAnswer == includeAnswer) && + (identical(other.includeRawContent, includeRawContent) || + other.includeRawContent == includeRawContent) && + (identical(other.maxResults, maxResults) || + other.maxResults == maxResults) && + const DeepCollectionEquality() + .equals(other._includeDomains, _includeDomains) && + const DeepCollectionEquality() + .equals(other._excludeDomains, _excludeDomains)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + apiKey, + query, + searchDepth, + includeImages, + includeAnswer, + includeRawContent, + maxResults, + const DeepCollectionEquality().hash(_includeDomains), + const DeepCollectionEquality().hash(_excludeDomains)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => + __$$SearchRequestImplCopyWithImpl<_$SearchRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$SearchRequestImplToJson( + this, + ); + } +} + +abstract class _SearchRequest extends SearchRequest { + const factory _SearchRequest( + {@JsonKey(name: 'api_key') required final String apiKey, + required final String query, + @JsonKey(name: 'search_depth') final SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') final bool includeImages, + @JsonKey(name: 'include_answer') final bool includeAnswer, + @JsonKey(name: 'include_raw_content') final bool includeRawContent, + @JsonKey(name: 'max_results') final int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + final List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + final List? excludeDomains}) = _$SearchRequestImpl; + const _SearchRequest._() : super._(); + + factory _SearchRequest.fromJson(Map json) = + _$SearchRequestImpl.fromJson; + + @override + + /// Your unique API key. + @JsonKey(name: 'api_key') + String get apiKey; + @override + + /// The search query string. + String get query; + @override + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + SearchRequestSearchDepth get searchDepth; + @override + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') + bool get includeImages; + @override + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') + bool get includeAnswer; + @override + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + bool get includeRawContent; + @override + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') + int get maxResults; + @override + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains; + @override + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains; + @override + @JsonKey(ignore: true) + _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => + throw _privateConstructorUsedError; +} + +SearchResponse _$SearchResponseFromJson(Map json) { + return _SearchResponse.fromJson(json); +} + +/// @nodoc +mixin _$SearchResponse { + /// The answer to your search query. + @JsonKey(includeIfNull: false) + String? get answer => throw _privateConstructorUsedError; + + /// Your search query. + String get query => throw _privateConstructorUsedError; + + /// Your search result response time. + @JsonKey(name: 'response_time') + double get responseTime => throw _privateConstructorUsedError; + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) + List? get images => throw _privateConstructorUsedError; + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions => throw _privateConstructorUsedError; + + /// A list of search results. + List get results => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchResponseCopyWith<$Res> { + factory $SearchResponseCopyWith( + SearchResponse value, $Res Function(SearchResponse) then) = + _$SearchResponseCopyWithImpl<$Res, SearchResponse>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? answer, + String query, + @JsonKey(name: 'response_time') double responseTime, + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + List results}); +} + +/// @nodoc +class _$SearchResponseCopyWithImpl<$Res, $Val extends SearchResponse> + implements $SearchResponseCopyWith<$Res> { + _$SearchResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? answer = freezed, + Object? query = null, + Object? responseTime = null, + Object? images = freezed, + Object? followUpQuestions = freezed, + Object? results = null, + }) { + return _then(_value.copyWith( + answer: freezed == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as String?, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + responseTime: null == responseTime + ? _value.responseTime + : responseTime // ignore: cast_nullable_to_non_nullable + as double, + images: freezed == images + ? _value.images + : images // ignore: cast_nullable_to_non_nullable + as List?, + followUpQuestions: freezed == followUpQuestions + ? _value.followUpQuestions + : followUpQuestions // ignore: cast_nullable_to_non_nullable + as List?, + results: null == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchResponseImplCopyWith<$Res> + implements $SearchResponseCopyWith<$Res> { + factory _$$SearchResponseImplCopyWith(_$SearchResponseImpl value, + $Res Function(_$SearchResponseImpl) then) = + __$$SearchResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? answer, + String query, + @JsonKey(name: 'response_time') double responseTime, + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + List results}); +} + +/// @nodoc +class __$$SearchResponseImplCopyWithImpl<$Res> + extends _$SearchResponseCopyWithImpl<$Res, _$SearchResponseImpl> + implements _$$SearchResponseImplCopyWith<$Res> { + __$$SearchResponseImplCopyWithImpl( + _$SearchResponseImpl _value, $Res Function(_$SearchResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? answer = freezed, + Object? query = null, + Object? responseTime = null, + Object? images = freezed, + Object? followUpQuestions = freezed, + Object? results = null, + }) { + return _then(_$SearchResponseImpl( + answer: freezed == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as String?, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + responseTime: null == responseTime + ? _value.responseTime + : responseTime // ignore: cast_nullable_to_non_nullable + as double, + images: freezed == images + ? _value._images + : images // ignore: cast_nullable_to_non_nullable + as List?, + followUpQuestions: freezed == followUpQuestions + ? _value._followUpQuestions + : followUpQuestions // ignore: cast_nullable_to_non_nullable + as List?, + results: null == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchResponseImpl extends _SearchResponse { + const _$SearchResponseImpl( + {@JsonKey(includeIfNull: false) this.answer, + required this.query, + @JsonKey(name: 'response_time') required this.responseTime, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + final List? followUpQuestions, + required final List results}) + : _images = images, + _followUpQuestions = followUpQuestions, + _results = results, + super._(); + + factory _$SearchResponseImpl.fromJson(Map json) => + _$$SearchResponseImplFromJson(json); + + /// The answer to your search query. + @override + @JsonKey(includeIfNull: false) + final String? answer; + + /// Your search query. + @override + final String query; + + /// Your search result response time. + @override + @JsonKey(name: 'response_time') + final double responseTime; + + /// A list of query related image urls. + final List? _images; + + /// A list of query related image urls. + @override + @JsonKey(includeIfNull: false) + List? get images { + final value = _images; + if (value == null) return null; + if (_images is EqualUnmodifiableListView) return _images; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of suggested research follow up questions related to original query. + final List? _followUpQuestions; + + /// A list of suggested research follow up questions related to original query. + @override + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions { + final value = _followUpQuestions; + if (value == null) return null; + if (_followUpQuestions is EqualUnmodifiableListView) + return _followUpQuestions; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of search results. + final List _results; + + /// A list of search results. + @override + List get results { + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_results); + } + + @override + String toString() { + return 'SearchResponse(answer: $answer, query: $query, responseTime: $responseTime, images: $images, followUpQuestions: $followUpQuestions, results: $results)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchResponseImpl && + (identical(other.answer, answer) || other.answer == answer) && + (identical(other.query, query) || other.query == query) && + (identical(other.responseTime, responseTime) || + other.responseTime == responseTime) && + const DeepCollectionEquality().equals(other._images, _images) && + const DeepCollectionEquality() + .equals(other._followUpQuestions, _followUpQuestions) && + const DeepCollectionEquality().equals(other._results, _results)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + answer, + query, + responseTime, + const DeepCollectionEquality().hash(_images), + const DeepCollectionEquality().hash(_followUpQuestions), + const DeepCollectionEquality().hash(_results)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => + __$$SearchResponseImplCopyWithImpl<_$SearchResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$SearchResponseImplToJson( + this, + ); + } +} + +abstract class _SearchResponse extends SearchResponse { + const factory _SearchResponse( + {@JsonKey(includeIfNull: false) final String? answer, + required final String query, + @JsonKey(name: 'response_time') required final double responseTime, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + final List? followUpQuestions, + required final List results}) = _$SearchResponseImpl; + const _SearchResponse._() : super._(); + + factory _SearchResponse.fromJson(Map json) = + _$SearchResponseImpl.fromJson; + + @override + + /// The answer to your search query. + @JsonKey(includeIfNull: false) + String? get answer; + @override + + /// Your search query. + String get query; + @override + + /// Your search result response time. + @JsonKey(name: 'response_time') + double get responseTime; + @override + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) + List? get images; + @override + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions; + @override + + /// A list of search results. + List get results; + @override + @JsonKey(ignore: true) + _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +SearchResult _$SearchResultFromJson(Map json) { + return _SearchResult.fromJson(json); +} + +/// @nodoc +mixin _$SearchResult { + /// The title of the search result url. + String get title => throw _privateConstructorUsedError; + + /// The url of the search result. + String get url => throw _privateConstructorUsedError; + + /// The most query related content from the scraped url. + String get content => throw _privateConstructorUsedError; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) + String? get rawContent => throw _privateConstructorUsedError; + + /// The relevance score of the search result. + double get score => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchResultCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchResultCopyWith<$Res> { + factory $SearchResultCopyWith( + SearchResult value, $Res Function(SearchResult) then) = + _$SearchResultCopyWithImpl<$Res, SearchResult>; + @useResult + $Res call( + {String title, + String url, + String content, + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + double score}); +} + +/// @nodoc +class _$SearchResultCopyWithImpl<$Res, $Val extends SearchResult> + implements $SearchResultCopyWith<$Res> { + _$SearchResultCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? title = null, + Object? url = null, + Object? content = null, + Object? rawContent = freezed, + Object? score = null, + }) { + return _then(_value.copyWith( + title: null == title + ? _value.title + : title // ignore: cast_nullable_to_non_nullable + as String, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + rawContent: freezed == rawContent + ? _value.rawContent + : rawContent // ignore: cast_nullable_to_non_nullable + as String?, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchResultImplCopyWith<$Res> + implements $SearchResultCopyWith<$Res> { + factory _$$SearchResultImplCopyWith( + _$SearchResultImpl value, $Res Function(_$SearchResultImpl) then) = + __$$SearchResultImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String title, + String url, + String content, + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + double score}); +} + +/// @nodoc +class __$$SearchResultImplCopyWithImpl<$Res> + extends _$SearchResultCopyWithImpl<$Res, _$SearchResultImpl> + implements _$$SearchResultImplCopyWith<$Res> { + __$$SearchResultImplCopyWithImpl( + _$SearchResultImpl _value, $Res Function(_$SearchResultImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? title = null, + Object? url = null, + Object? content = null, + Object? rawContent = freezed, + Object? score = null, + }) { + return _then(_$SearchResultImpl( + title: null == title + ? _value.title + : title // ignore: cast_nullable_to_non_nullable + as String, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + rawContent: freezed == rawContent + ? _value.rawContent + : rawContent // ignore: cast_nullable_to_non_nullable + as String?, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchResultImpl extends _SearchResult { + const _$SearchResultImpl( + {required this.title, + required this.url, + required this.content, + @JsonKey(name: 'raw_content', includeIfNull: false) this.rawContent, + required this.score}) + : super._(); + + factory _$SearchResultImpl.fromJson(Map json) => + _$$SearchResultImplFromJson(json); + + /// The title of the search result url. + @override + final String title; + + /// The url of the search result. + @override + final String url; + + /// The most query related content from the scraped url. + @override + final String content; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @override + @JsonKey(name: 'raw_content', includeIfNull: false) + final String? rawContent; + + /// The relevance score of the search result. + @override + final double score; + + @override + String toString() { + return 'SearchResult(title: $title, url: $url, content: $content, rawContent: $rawContent, score: $score)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchResultImpl && + (identical(other.title, title) || other.title == title) && + (identical(other.url, url) || other.url == url) && + (identical(other.content, content) || other.content == content) && + (identical(other.rawContent, rawContent) || + other.rawContent == rawContent) && + (identical(other.score, score) || other.score == score)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, title, url, content, rawContent, score); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => + __$$SearchResultImplCopyWithImpl<_$SearchResultImpl>(this, _$identity); + + @override + Map toJson() { + return _$$SearchResultImplToJson( + this, + ); + } +} + +abstract class _SearchResult extends SearchResult { + const factory _SearchResult( + {required final String title, + required final String url, + required final String content, + @JsonKey(name: 'raw_content', includeIfNull: false) + final String? rawContent, + required final double score}) = _$SearchResultImpl; + const _SearchResult._() : super._(); + + factory _SearchResult.fromJson(Map json) = + _$SearchResultImpl.fromJson; + + @override + + /// The title of the search result url. + String get title; + @override + + /// The url of the search result. + String get url; + @override + + /// The most query related content from the scraped url. + String get content; + @override + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) + String? get rawContent; + @override + + /// The relevance score of the search result. + double get score; + @override + @JsonKey(ignore: true) + _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => + throw _privateConstructorUsedError; +} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.g.dart b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..f9214d02 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,116 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$SearchRequestImpl _$$SearchRequestImplFromJson(Map json) => + _$SearchRequestImpl( + apiKey: json['api_key'] as String, + query: json['query'] as String, + searchDepth: $enumDecodeNullable( + _$SearchRequestSearchDepthEnumMap, json['search_depth']) ?? + SearchRequestSearchDepth.basic, + includeImages: json['include_images'] as bool? ?? false, + includeAnswer: json['include_answer'] as bool? ?? false, + includeRawContent: json['include_raw_content'] as bool? ?? false, + maxResults: (json['max_results'] as num?)?.toInt() ?? 5, + includeDomains: (json['include_domains'] as List?) + ?.map((e) => e as String) + .toList(), + excludeDomains: (json['exclude_domains'] as List?) + ?.map((e) => e as String) + .toList(), + ); + +Map _$$SearchRequestImplToJson(_$SearchRequestImpl instance) { + final val = { + 'api_key': instance.apiKey, + 'query': instance.query, + 'search_depth': _$SearchRequestSearchDepthEnumMap[instance.searchDepth]!, + 'include_images': instance.includeImages, + 'include_answer': instance.includeAnswer, + 'include_raw_content': instance.includeRawContent, + 'max_results': instance.maxResults, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('include_domains', instance.includeDomains); + writeNotNull('exclude_domains', instance.excludeDomains); + return val; +} + +const _$SearchRequestSearchDepthEnumMap = { + SearchRequestSearchDepth.basic: 'basic', + SearchRequestSearchDepth.advanced: 'advanced', +}; + +_$SearchResponseImpl _$$SearchResponseImplFromJson(Map json) => + _$SearchResponseImpl( + answer: json['answer'] as String?, + query: json['query'] as String, + responseTime: (json['response_time'] as num).toDouble(), + images: + (json['images'] as List?)?.map((e) => e as String).toList(), + followUpQuestions: (json['follow_up_questions'] as List?) + ?.map((e) => e as String) + .toList(), + results: (json['results'] as List) + .map((e) => SearchResult.fromJson(e as Map)) + .toList(), + ); + +Map _$$SearchResponseImplToJson( + _$SearchResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('answer', instance.answer); + val['query'] = instance.query; + val['response_time'] = instance.responseTime; + writeNotNull('images', instance.images); + writeNotNull('follow_up_questions', instance.followUpQuestions); + val['results'] = instance.results.map((e) => e.toJson()).toList(); + return val; +} + +_$SearchResultImpl _$$SearchResultImplFromJson(Map json) => + _$SearchResultImpl( + title: json['title'] as String, + url: json['url'] as String, + content: json['content'] as String, + rawContent: json['raw_content'] as String?, + score: (json['score'] as num).toDouble(), + ); + +Map _$$SearchResultImplToJson(_$SearchResultImpl instance) { + final val = { + 'title': instance.title, + 'url': instance.url, + 'content': instance.content, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('raw_content', instance.rawContent); + val['score'] = instance.score; + return val; +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_request.dart b/packages/tavily_dart/lib/src/generated/schema/search_request.dart new file mode 100644 index 00000000..c0d16e7a --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_request.dart @@ -0,0 +1,103 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchRequest +// ========================================== + +/// The search request object. +@freezed +class SearchRequest with _$SearchRequest { + const SearchRequest._(); + + /// Factory constructor for SearchRequest + const factory SearchRequest({ + /// Your unique API key. + @JsonKey(name: 'api_key') required String apiKey, + + /// The search query string. + required String query, + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + @Default(SearchRequestSearchDepth.basic) + SearchRequestSearchDepth searchDepth, + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') @Default(false) bool includeImages, + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') @Default(false) bool includeAnswer, + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + @Default(false) + bool includeRawContent, + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') @Default(5) int maxResults, + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains, + }) = _SearchRequest; + + /// Object construction from a JSON representation + factory SearchRequest.fromJson(Map json) => + _$SearchRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'api_key', + 'query', + 'search_depth', + 'include_images', + 'include_answer', + 'include_raw_content', + 'max_results', + 'include_domains', + 'exclude_domains' + ]; + + /// Validation constants + static const maxResultsDefaultValue = 5; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'api_key': apiKey, + 'query': query, + 'search_depth': searchDepth, + 'include_images': includeImages, + 'include_answer': includeAnswer, + 'include_raw_content': includeRawContent, + 'max_results': maxResults, + 'include_domains': includeDomains, + 'exclude_domains': excludeDomains, + }; + } +} + +// ========================================== +// ENUM: SearchRequestSearchDepth +// ========================================== + +/// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. +enum SearchRequestSearchDepth { + @JsonValue('basic') + basic, + @JsonValue('advanced') + advanced, +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_response.dart b/packages/tavily_dart/lib/src/generated/schema/search_response.dart new file mode 100644 index 00000000..473db9c1 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_response.dart @@ -0,0 +1,68 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchResponse +// ========================================== + +/// The response data from the search query. +@freezed +class SearchResponse with _$SearchResponse { + const SearchResponse._(); + + /// Factory constructor for SearchResponse + const factory SearchResponse({ + /// The answer to your search query. + @JsonKey(includeIfNull: false) String? answer, + + /// Your search query. + required String query, + + /// Your search result response time. + @JsonKey(name: 'response_time') required double responseTime, + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) List? images, + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + + /// A list of search results. + required List results, + }) = _SearchResponse; + + /// Object construction from a JSON representation + factory SearchResponse.fromJson(Map json) => + _$SearchResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'answer', + 'query', + 'response_time', + 'images', + 'follow_up_questions', + 'results' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'answer': answer, + 'query': query, + 'response_time': responseTime, + 'images': images, + 'follow_up_questions': followUpQuestions, + 'results': results, + }; + } +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_result.dart b/packages/tavily_dart/lib/src/generated/schema/search_result.dart new file mode 100644 index 00000000..cfb75690 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_result.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchResult +// ========================================== + +/// The search result object. +@freezed +class SearchResult with _$SearchResult { + const SearchResult._(); + + /// Factory constructor for SearchResult + const factory SearchResult({ + /// The title of the search result url. + required String title, + + /// The url of the search result. + required String url, + + /// The most query related content from the scraped url. + required String content, + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + + /// The relevance score of the search result. + required double score, + }) = _SearchResult; + + /// Object construction from a JSON representation + factory SearchResult.fromJson(Map json) => + _$SearchResultFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'title', + 'url', + 'content', + 'raw_content', + 'score' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'title': title, + 'url': url, + 'content': content, + 'raw_content': rawContent, + 'score': score, + }; + } +} diff --git a/packages/tavily_dart/lib/tavily_dart.dart b/packages/tavily_dart/lib/tavily_dart.dart index c894f0f7..272b33ce 100644 --- a/packages/tavily_dart/lib/tavily_dart.dart +++ b/packages/tavily_dart/lib/tavily_dart.dart @@ -1,2 +1,5 @@ /// Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). library; + +export 'src/generated/client.dart'; +export 'src/generated/schema/schema.dart'; diff --git a/packages/tavily_dart/oas/main.dart b/packages/tavily_dart/oas/main.dart new file mode 100644 index 00000000..bf08264b --- /dev/null +++ b/packages/tavily_dart/oas/main.dart @@ -0,0 +1,23 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Tavily API client Dart code from the OpenAPI spec. +/// https://docs.tavily.com/docs/tavily-api/rest_api +void main() async { + final spec = OpenApi.fromFile(source: 'oas/tavily_openapi.yaml'); + + await spec.generate( + package: 'Tavily', + destination: 'lib/src/generated/', + replace: true, + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} diff --git a/packages/tavily_dart/oas/tavily_openapi.yaml b/packages/tavily_dart/oas/tavily_openapi.yaml new file mode 100644 index 00000000..250fa447 --- /dev/null +++ b/packages/tavily_dart/oas/tavily_openapi.yaml @@ -0,0 +1,156 @@ +openapi: 3.0.3 + +info: + title: Tavily API + description: Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. + version: 1.0.0 + contact: + name: Tavily Support + url: https://tavily.com + +servers: + - url: https://api.tavily.com + +paths: + /search: + post: + summary: Search for data based on a query. + operationId: search + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SearchRequest' + responses: + '200': + description: Successful search response. + content: + application/json: + schema: + $ref: '#/components/schemas/SearchResponse' + '400': + description: Bad Request — Your request is invalid. + '401': + description: Unauthorized — Your API key is wrong. + '403': + description: Forbidden — The endpoint requested is hidden for administrators only. + '404': + description: Not Found — The specified endpoint could not be found. + '405': + description: Method Not Allowed — You tried to access an endpoint with an invalid method. + '429': + description: Too Many Requests — You're requesting too many results! Slow down! + '500': + description: Internal Server Error — We had a problem with our server. Try again later. + '503': + description: Service Unavailable — We're temporarily offline for maintenance. Please try again later. + '504': + description: Gateway Timeout — We're temporarily offline for maintenance. Please try again later. + +components: + schemas: + SearchRequest: + type: object + description: The search request object. + properties: + api_key: + type: string + description: Your unique API key. + example: "your api key" + query: + type: string + description: The search query string. + example: "your search query" + search_depth: + type: string + description: The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + enum: + - basic + - advanced + default: basic + include_images: + type: boolean + description: Include a list of query related images in the response. Default is False. + default: false + include_answer: + type: boolean + description: Include answers in the search results. Default is False. + default: false + include_raw_content: + type: boolean + description: Include raw content in the search results. Default is False. + default: false + max_results: + type: integer + description: The number of maximum search results to return. Default is 5. + default: 5 + include_domains: + type: array + items: + type: string + description: A list of domains to specifically include in the search results. Default is None. + exclude_domains: + type: array + items: + type: string + description: A list of domains to specifically exclude from the search results. Default is None. + required: + - api_key + - query + SearchResponse: + type: object + description: The response data from the search query. + properties: + answer: + type: string + description: The answer to your search query. + query: + type: string + description: Your search query. + response_time: + type: number + description: Your search result response time. + images: + type: array + items: + type: string + description: A list of query related image urls. + follow_up_questions: + type: array + items: + type: string + description: A list of suggested research follow up questions related to original query. + results: + type: array + description: A list of search results. + items: + $ref: '#/components/schemas/SearchResult' + required: + - query + - response_time + - results + SearchResult: + type: object + description: The search result object. + properties: + title: + type: string + description: The title of the search result url. + url: + type: string + description: The url of the search result. + content: + type: string + description: The most query related content from the scraped url. + raw_content: + type: string + description: The parsed and cleaned HTML of the site. For now includes parsed text only. + score: + type: number + description: The relevance score of the search result. + required: + - title + - url + - content + - score diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml index 24fccdcb..216e0b0d 100644 --- a/packages/tavily_dart/pubspec.yaml +++ b/packages/tavily_dart/pubspec.yaml @@ -14,3 +14,21 @@ topics: environment: sdk: ">=3.0.0 <4.0.0" + +dependencies: + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 + meta: ^1.11.0 + +dev_dependencies: + build_runner: ^2.4.9 + freezed: ^2.4.7 + json_serializable: ^6.7.1 + # openapi_spec: ^0.7.8 + openapi_spec: + git: + url: https://github.com/davidmigloz/openapi_spec.git + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + test: ^1.25.2 diff --git a/packages/tavily_dart/test/tavily_test.dart b/packages/tavily_dart/test/tavily_test.dart new file mode 100644 index 00000000..0df02cb8 --- /dev/null +++ b/packages/tavily_dart/test/tavily_test.dart @@ -0,0 +1,45 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:tavily_dart/tavily_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Tavily API tests', () { + late TavilyClient client; + + setUp(() async { + client = TavilyClient(); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call search API', () async { + final res = await client.search( + request: SearchRequest( + apiKey: Platform.environment['TAVILY_API_KEY']!, + query: 'Should I invest in Apple right now?', + includeAnswer: true, + includeImages: true, + includeRawContent: true, + maxResults: 3, + ), + ); + expect(res.answer, isNotEmpty); + expect(res.query, 'Should I invest in Apple right now?'); + expect(res.responseTime, greaterThan(0)); + expect(res.images, isNotEmpty); + expect(res.results, hasLength(3)); + final result = res.results.first; + expect(result.title, isNotEmpty); + expect(result.url, isNotEmpty); + expect(result.content, isNotEmpty); + expect(result.rawContent, isNotEmpty); + expect(result.score, greaterThan(0)); + }); + }); +} From d7dac3d1d761a48dfdf570a5c9cec3061070ce1a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 09:56:10 +0200 Subject: [PATCH 043/251] feat: Add support for usage metadata in ChatFirebaseVertexAI (#457) --- melos.yaml | 6 +- .../Flutter/GeneratedPluginRegistrant.swift | 2 + .../langchain_firebase/example/pubspec.lock | 60 +++++++++++++------ .../langchain_firebase/example/pubspec.yaml | 2 +- .../src/chat_models/vertex_ai/mappers.dart | 10 ++-- packages/langchain_firebase/pubspec.lock | 60 +++++++++++++------ packages/langchain_firebase/pubspec.yaml | 6 +- 7 files changed, 98 insertions(+), 48 deletions(-) diff --git a/melos.yaml b/melos.yaml index 8390e622..0f524a0a 100644 --- a/melos.yaml +++ b/melos.yaml @@ -32,9 +32,9 @@ command: csv: ^6.0.0 equatable: ^2.0.5 fetch_client: ^1.0.2 - firebase_app_check: ^0.2.2+5 - firebase_core: ^2.31.0 - firebase_vertexai: ^0.1.0 + firebase_app_check: ^0.3.0 + firebase_core: ^3.1.0 + firebase_vertexai: ^0.2.2 flat_buffers: ^23.5.26 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 diff --git a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift index 2884d031..a2fafff9 100644 --- a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -7,10 +7,12 @@ import Foundation import cloud_firestore import firebase_app_check +import firebase_auth import firebase_core func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { FLTFirebaseFirestorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseFirestorePlugin")) FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAppCheckPlugin")) + FLTFirebaseAuthPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAuthPlugin")) FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseCorePlugin")) } diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 87d91077..4b481fb2 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" + sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" url: "https://pub.dev" source: hosted - version: "1.3.33" + version: "1.3.37" args: dependency: transitive description: @@ -117,58 +117,82 @@ packages: dependency: transitive description: name: firebase_app_check - sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b + sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" url: "https://pub.dev" source: hosted - version: "0.2.2+5" + version: "0.3.0+1" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 + sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" url: "https://pub.dev" source: hosted - version: "0.1.0+27" + version: "0.1.0+31" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" + sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a url: "https://pub.dev" source: hosted - version: "0.1.2+5" + version: "0.1.2+9" + firebase_auth: + dependency: transitive + description: + name: firebase_auth + sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + url: "https://pub.dev" + source: hosted + version: "5.1.0" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + url: "https://pub.dev" + source: hosted + version: "7.4.0" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + url: "https://pub.dev" + source: hosted + version: "5.12.2" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" + sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a url: "https://pub.dev" source: hosted - version: "2.31.0" + version: "3.1.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 + sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" url: "https://pub.dev" source: hosted - version: "5.0.0" + version: "5.1.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" + sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" url: "https://pub.dev" source: hosted - version: "2.17.0" + version: "2.17.2" firebase_vertexai: dependency: transitive description: name: firebase_vertexai - sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" + sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.2.2" fixnum: dependency: transitive description: @@ -212,10 +236,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" http: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 5b635cc5..ff8593ef 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -9,7 +9,7 @@ environment: dependencies: cupertino_icons: ^1.0.6 - firebase_core: ^2.31.0 + firebase_core: ^3.1.0 flutter: sdk: flutter flutter_markdown: ^0.6.22 diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 05840e8f..3d649592 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -134,11 +134,11 @@ extension GenerateContentResponseMapper on f.GenerateContentResponse { .toList(growable: false), 'finish_message': candidate.finishMessage, }, - usage: const LanguageModelUsage( - // promptTokens: usageMetadata?.promptTokenCount, // not yet supported - // responseTokens: usageMetadata?.candidatesTokenCount, - // totalTokens: usageMetadata?.totalTokenCount, - ), + usage: LanguageModelUsage( + promptTokens: usageMetadata?.promptTokenCount, + responseTokens: usageMetadata?.candidatesTokenCount, + totalTokens: usageMetadata?.totalTokenCount, + ), ); } diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index eee61f63..1c5e494a 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" + sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" url: "https://pub.dev" source: hosted - version: "1.3.33" + version: "1.3.37" async: dependency: transitive description: @@ -101,58 +101,82 @@ packages: dependency: "direct main" description: name: firebase_app_check - sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b + sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" url: "https://pub.dev" source: hosted - version: "0.2.2+5" + version: "0.3.0+1" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 + sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" url: "https://pub.dev" source: hosted - version: "0.1.0+27" + version: "0.1.0+31" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" + sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a url: "https://pub.dev" source: hosted - version: "0.1.2+5" + version: "0.1.2+9" + firebase_auth: + dependency: transitive + description: + name: firebase_auth + sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + url: "https://pub.dev" + source: hosted + version: "5.1.0" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + url: "https://pub.dev" + source: hosted + version: "7.4.0" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + url: "https://pub.dev" + source: hosted + version: "5.12.2" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" + sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a url: "https://pub.dev" source: hosted - version: "2.31.0" + version: "3.1.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 + sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" url: "https://pub.dev" source: hosted - version: "5.0.0" + version: "5.1.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" + sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" url: "https://pub.dev" source: hosted - version: "2.17.0" + version: "2.17.2" firebase_vertexai: dependency: "direct main" description: name: firebase_vertexai - sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" + sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.2.2" fixnum: dependency: transitive description: @@ -180,10 +204,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" http: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index eec70b30..3dda1869 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" - firebase_app_check: ^0.2.2+5 - firebase_core: ^2.31.0 + firebase_app_check: ^0.3.0 + firebase_core: ^3.1.0 cloud_firestore: ^4.17.0 - firebase_vertexai: ^0.1.0 + firebase_vertexai: ^0.2.2 langchain_core: ^0.3.2 meta: ^1.11.0 uuid: ^4.3.3 From cad03c51c3f61d36ab8e4c8ad038a5d318f97eb7 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 10:09:57 +0200 Subject: [PATCH 044/251] feat!: Update ChatFirebaseVertexAI default model to gemini-1.5-flash (#458) --- .../integrations/firebase_vertex_ai.md | 26 ++++----- .../langchain_firebase/example/lib/main.dart | 2 +- .../example/web/flutter_bootstrap.js | 12 +++++ .../langchain_firebase/example/web/index.html | 54 ++++--------------- .../vertex_ai/chat_firebase_vertex_ai.dart | 22 ++++---- .../lib/src/chat_models/vertex_ai/types.dart | 2 +- 6 files changed, 47 insertions(+), 71 deletions(-) create mode 100644 packages/langchain_firebase/example/web/flutter_bootstrap.js diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index ef8e03d0..cd33daa2 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -66,22 +66,22 @@ print(res); ## Available models The following models are available: -- `gemini-1.0-pro` - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 -- `gemini-1.0-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.5-pro-preview-0514`: +- `gemini-1.5-flash`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-flash-preview-0514`: +- `gemini-1.5-pro`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. @@ -90,7 +90,7 @@ Mind that this list may not be up-to-date. Refer to the [documentation](https:// ```dart final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', ), ); final res = await chatModel.invoke( @@ -122,7 +122,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', ), ); @@ -160,7 +160,7 @@ const tool = ToolSpec( ); final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', temperature: 0, tools: [tool], ), diff --git a/packages/langchain_firebase/example/lib/main.dart b/packages/langchain_firebase/example/lib/main.dart index 44e019e9..f9d5db92 100644 --- a/packages/langchain_firebase/example/lib/main.dart +++ b/packages/langchain_firebase/example/lib/main.dart @@ -155,7 +155,7 @@ class _ChatWidgetState extends State { _model = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', tools: [exchangeRateTool], ), // location: 'us-central1', diff --git a/packages/langchain_firebase/example/web/flutter_bootstrap.js b/packages/langchain_firebase/example/web/flutter_bootstrap.js new file mode 100644 index 00000000..8ce49d8a --- /dev/null +++ b/packages/langchain_firebase/example/web/flutter_bootstrap.js @@ -0,0 +1,12 @@ +{{flutter_js}} +{{flutter_build_config}} + +_flutter.loader.load({ + serviceWorkerSettings: { + serviceWorkerVersion: {{flutter_service_worker_version}}, + }, + onEntrypointLoaded: async function(engineInitializer) { + const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); + await appRunner.runApp(); + }, +}); diff --git a/packages/langchain_firebase/example/web/index.html b/packages/langchain_firebase/example/web/index.html index 27ef6265..cce674b5 100644 --- a/packages/langchain_firebase/example/web/index.html +++ b/packages/langchain_firebase/example/web/index.html @@ -1,61 +1,25 @@ - - + - - - - + + + + - - - example - + - - - + VertexAI for Firebase in LangChain.dart + - + diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 1a3863b4..3d58f8ea 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -36,22 +36,22 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.0-pro` -/// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 -/// - `gemini-1.0-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 -/// - `gemini-1.5-pro-preview-0514`: +/// - `gemini-1.5-flash`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-flash-preview-0514`: +/// - `gemini-1.5-pro`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 +/// - `gemini-1.0-pro-vision`: +/// * text / image -> text model +/// * Max input token: 12288 +/// * Max output tokens: 4096 +/// - `gemini-1.0-pro` +/// * text -> text model +/// * Max input token: 30720 +/// * Max output tokens: 2048 /// /// Mind that this list may not be up-to-date. /// Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) @@ -132,7 +132,7 @@ import 'types.dart'; /// ); /// final chatModel = ChatFirebaseVertexAI( /// defaultOptions: ChatFirebaseVertexAIOptions( -/// model: 'gemini-1.5-pro-preview-0514', +/// model: 'gemini-1.5-pro', /// temperature: 0, /// tools: [tool], /// ), diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index d41e4032..7a0ddbdb 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -6,7 +6,7 @@ import 'package:langchain_core/chat_models.dart'; class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// {@macro chat_firebase_vertex_ai_options} const ChatFirebaseVertexAIOptions({ - this.model = 'gemini-1.0-pro', + this.model = 'gemini-1.5-flash', this.topP, this.topK, this.candidateCount, From 4ce7310e221b5012467c9044cd123c32e7a27b86 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 10:27:18 +0200 Subject: [PATCH 045/251] refactor: Simplify how tools are passed to the internal Firebase client (#459) --- .../vertex_ai/chat_firebase_vertex_ai.dart | 40 +++++-------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 3d58f8ea..47661d68 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -4,7 +4,6 @@ import 'package:firebase_core/firebase_core.dart'; import 'package:firebase_vertexai/firebase_vertexai.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; import 'package:uuid/uuid.dart'; import 'mappers.dart'; @@ -193,24 +192,20 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// The current system instruction set in [_firebaseClient]; String? _currentSystemInstruction; - /// The current tools set in [_firebaseClient]; - List? _currentTools; - - /// The current tool choice set in [_firebaseClient]; - ChatToolChoice? _currentToolChoice; - @override Future invoke( final PromptValue input, { final ChatFirebaseVertexAIOptions? options, }) async { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig) = + final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); final completion = await _firebaseClient.generateContent( prompt, safetySettings: safetySettings, generationConfig: generationConfig, + tools: tools, + toolConfig: toolConfig, ); return completion.toChatResult(id, model); } @@ -221,13 +216,15 @@ class ChatFirebaseVertexAI extends BaseChatModel { final ChatFirebaseVertexAIOptions? options, }) { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig) = + final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); return _firebaseClient .generateContentStream( prompt, safetySettings: safetySettings, generationConfig: generationConfig, + tools: tools, + toolConfig: toolConfig, ) .map((final completion) => completion.toChatResult(id, model)); } @@ -238,6 +235,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { Iterable prompt, List? safetySettings, GenerationConfig? generationConfig, + List? tools, + ToolConfig? toolConfig, ) _generateCompletionRequest( final List messages, { final ChatFirebaseVertexAIOptions? options, @@ -260,6 +259,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, ), + (options?.tools ?? defaultOptions.tools)?.toToolList(), + (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), ); } @@ -288,8 +289,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { GenerativeModel _createFirebaseClient( final String model, { final String? systemInstruction, - final List? tools, - final ChatToolChoice? toolChoice, }) { return FirebaseVertexAI.instanceFor( app: app, @@ -300,8 +299,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { model: model, systemInstruction: systemInstruction != null ? Content.system(systemInstruction) : null, - tools: tools?.toToolList(), - toolConfig: toolChoice?.toToolConfig(), ); } @@ -309,14 +306,10 @@ class ChatFirebaseVertexAI extends BaseChatModel { void _recreateFirebaseClient( final String model, final String? systemInstruction, - final List? tools, - final ChatToolChoice? toolChoice, ) { _firebaseClient = _createFirebaseClient( model, systemInstruction: systemInstruction, - tools: tools, - toolChoice: toolChoice, ); } @@ -332,9 +325,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { ? messages.firstOrNull?.contentAsString : null; - final tools = options?.tools ?? defaultOptions.tools; - final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; - bool recreate = false; if (model != _currentModel) { _currentModel = model; @@ -344,17 +334,9 @@ class ChatFirebaseVertexAI extends BaseChatModel { _currentSystemInstruction = systemInstruction; recreate = true; } - if (!const ListEquality().equals(tools, _currentTools)) { - _currentTools = tools; - recreate = true; - } - if (toolChoice != _currentToolChoice) { - _currentToolChoice = toolChoice; - recreate = true; - } if (recreate) { - _recreateFirebaseClient(model, systemInstruction, tools, toolChoice); + _recreateFirebaseClient(model, systemInstruction); } } } From 348a58bc541206836cc58fa87d0cbe37c7a14ac9 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 10:36:29 +0200 Subject: [PATCH 046/251] feat: Add support for Firebase Auth in ChatFirebaseVertexAI (#460) --- melos.yaml | 1 + .../vertex_ai/chat_firebase_vertex_ai.dart | 5 +++++ .../src/chat_models/vertex_ai/mappers.dart | 22 ++++++------------- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 1 + 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/melos.yaml b/melos.yaml index 0f524a0a..0d9dde51 100644 --- a/melos.yaml +++ b/melos.yaml @@ -33,6 +33,7 @@ command: equatable: ^2.0.5 fetch_client: ^1.0.2 firebase_app_check: ^0.3.0 + firebase_auth: ^5.1.0 firebase_core: ^3.1.0 firebase_vertexai: ^0.2.2 flat_buffers: ^23.5.26 diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 47661d68..251f3913 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -1,5 +1,6 @@ import 'package:collection/collection.dart'; import 'package:firebase_app_check/firebase_app_check.dart'; +import 'package:firebase_auth/firebase_auth.dart'; import 'package:firebase_core/firebase_core.dart'; import 'package:firebase_vertexai/firebase_vertexai.dart'; import 'package:langchain_core/chat_models.dart'; @@ -157,6 +158,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { ), this.app, this.appCheck, + this.auth, this.options, this.location, }) : _currentModel = defaultOptions.model ?? '' { @@ -171,6 +173,9 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// The optional [FirebaseAppCheck] to use to protect the project from abuse. final FirebaseAppCheck? appCheck; + /// The optional [FirebaseAuth] to use for authentication. + final FirebaseAuth? auth; + /// Configuration parameters for sending requests to Firebase. final RequestOptions? options; diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 3d649592..d256b815 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -217,45 +217,38 @@ extension ChatToolListMapper on List { switch (type) { case 'string': if (enumValues != null) { - return f.Schema( - f.SchemaType.string, + return f.Schema.enumString( enumValues: enumValues, description: description, nullable: nullable, - format: 'enum', ); } else { - return f.Schema( - f.SchemaType.string, + return f.Schema.string( description: description, nullable: nullable, ); } case 'number': - return f.Schema( - f.SchemaType.number, + return f.Schema.number( description: description, nullable: nullable, format: format, ); case 'integer': - return f.Schema( - f.SchemaType.integer, + return f.Schema.integer( description: description, nullable: nullable, format: format, ); case 'boolean': - return f.Schema( - f.SchemaType.boolean, + return f.Schema.boolean( description: description, nullable: nullable, ); case 'array': if (items != null) { final itemsSchema = _mapJsonSchemaToSchema(items); - return f.Schema( - f.SchemaType.array, + return f.Schema.array( description: description, nullable: nullable, items: itemsSchema, @@ -267,8 +260,7 @@ extension ChatToolListMapper on List { final propertiesSchema = properties.map( (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), ); - return f.Schema( - f.SchemaType.object, + return f.Schema.object( properties: propertiesSchema, requiredProperties: requiredProperties, description: description, diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 1c5e494a..62232007 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -122,7 +122,7 @@ packages: source: hosted version: "0.1.2+9" firebase_auth: - dependency: transitive + dependency: "direct main" description: name: firebase_auth sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 3dda1869..ec1265df 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -20,6 +20,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" firebase_app_check: ^0.3.0 + firebase_auth: ^5.1.0 firebase_core: ^3.1.0 cloud_firestore: ^4.17.0 firebase_vertexai: ^0.2.2 From cae46db9338e21720cc842cda67baf7cddc7d0cd Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 11:13:53 +0200 Subject: [PATCH 047/251] feat: Support response MIME type and schema in ChatGoogleGenerativeAI (#461) --- melos.yaml | 2 +- .../google_ai/chat_google_generative_ai.dart | 5 +++ .../src/chat_models/google_ai/mappers.dart | 14 +++++--- .../lib/src/chat_models/google_ai/types.dart | 35 +++++++++++++++++++ packages/langchain_google/pubspec.yaml | 2 +- 5 files changed, 52 insertions(+), 6 deletions(-) diff --git a/melos.yaml b/melos.yaml index 0d9dde51..56d0555c 100644 --- a/melos.yaml +++ b/melos.yaml @@ -41,7 +41,7 @@ command: flutter_markdown: ^0.6.22 freezed_annotation: ^2.4.1 gcloud: ^0.8.12 - google_generative_ai: 0.4.0 + google_generative_ai: 0.4.3 googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index b6835b89..3b8ebc1b 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -322,6 +322,11 @@ class ChatGoogleGenerativeAI temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, + responseMimeType: + options?.responseMimeType ?? defaultOptions.responseMimeType, + responseSchema: + (options?.responseSchema ?? defaultOptions.responseSchema) + ?.toSchema(), ), (options?.tools ?? defaultOptions.tools)?.toToolList(), (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 8623a2c1..8bf41f84 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -198,14 +198,17 @@ extension ChatToolListMapper on List { (tool) => g.FunctionDeclaration( tool.name, tool.description, - _mapJsonSchemaToSchema(tool.inputJsonSchema), + tool.inputJsonSchema.toSchema(), ), ).toList(growable: false), ), ]; } +} - g.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { +extension SchemaMapper on Map { + g.Schema toSchema() { + final jsonSchema = this; final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -248,7 +251,7 @@ extension ChatToolListMapper on List { ); case 'array': if (items != null) { - final itemsSchema = _mapJsonSchemaToSchema(items); + final itemsSchema = items.toSchema(); return g.Schema.array( items: itemsSchema, description: description, @@ -259,7 +262,10 @@ extension ChatToolListMapper on List { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), + (key, value) => MapEntry( + key, + (value as Map).toSchema(), + ), ); return g.Schema.object( properties: propertiesSchema, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index b3553cab..2971a22b 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -13,6 +13,8 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { this.maxOutputTokens, this.temperature, this.stopSequences, + this.responseMimeType, + this.responseSchema, this.safetySettings, super.tools, super.toolChoice, @@ -68,6 +70,39 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; + /// Output response mimetype of the generated candidate text. + /// + /// Supported mimetype: + /// - `text/plain`: (default) Text output. + /// - `application/json`: JSON response in the candidates. + final String? responseMimeType; + + /// Output response schema of the generated candidate text. + /// Following the [JSON Schema specification](https://json-schema.org). + /// + /// - Note: This only applies when the specified ``responseMIMEType`` supports + /// a schema; currently this is limited to `application/json`. + /// + /// Example: + /// ```json + /// { + /// 'type': 'object', + /// 'properties': { + /// 'answer': { + /// 'type': 'string', + /// 'description': 'The answer to the question being asked', + /// }, + /// 'sources': { + /// 'type': 'array', + /// 'items': {'type': 'string'}, + /// 'description': 'The sources used to answer the question', + /// }, + /// }, + /// 'required': ['answer', 'sources'], + /// }, + /// ``` + final Map? responseSchema; + /// A list of unique [ChatGoogleGenerativeAISafetySetting] instances for blocking /// unsafe content. /// diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 0f07b091..a2d2670a 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -20,7 +20,7 @@ dependencies: collection: ">=1.17.0 <1.19.0" fetch_client: ^1.0.2 gcloud: ^0.8.12 - google_generative_ai: 0.4.0 + google_generative_ai: 0.4.3 googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 From 214e8ba3278a3cc34a49d58abfd39792ba5ddb9b Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 11:22:44 +0200 Subject: [PATCH 048/251] feat!: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash (#462) --- .../chat_models/integrations/googleai.md | 28 +++++++++---------- .../vertex_ai/chat_firebase_vertex_ai.dart | 2 +- .../google_ai/chat_google_generative_ai.dart | 22 +++++++-------- .../lib/src/chat_models/google_ai/types.dart | 2 +- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 87a43755..033c7672 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -7,22 +7,22 @@ Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemin To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). The following models are available: -- `gemini-1.0-pro` (or `gemini-pro`): - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 -- `gemini-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.5-pro-latest`: text / image -> text model +- `gemini-1.5-flash`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-flash-latest`: +- `gemini-1.5-pro`: text / image -> text model * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 +- `gemini-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. @@ -34,7 +34,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -63,7 +63,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -99,7 +99,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -138,7 +138,7 @@ const tool = ToolSpec( ); final chatModel = ChatGoogleGenerativeAI( defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, tools: [tool], ), diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 251f3913..94da974d 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -154,7 +154,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// - [ChatFirebaseVertexAI.location] ChatFirebaseVertexAI({ super.defaultOptions = const ChatFirebaseVertexAIOptions( - model: 'gemini-1.0-pro', + model: 'gemini-1.5-flash', ), this.app, this.appCheck, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 3b8ebc1b..58934755 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -31,22 +31,22 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.0-pro` (or `gemini-pro`): -/// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 -/// - `gemini-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 -/// - `gemini-1.5-pro-latest`: text / image -> text model +/// - `gemini-1.5-flash`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-flash-latest`: +/// - `gemini-1.5-pro`: text / image -> text model /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 +/// - `gemini-pro-vision`: +/// * text / image -> text model +/// * Max input token: 12288 +/// * Max output tokens: 4096 +/// - `gemini-1.0-pro` (or `gemini-pro`): +/// * text -> text model +/// * Max input token: 30720 +/// * Max output tokens: 2048 /// /// Mind that this list may not be up-to-date. /// Refer to the [documentation](https://ai.google.dev/models) for the updated list. @@ -211,7 +211,7 @@ class ChatGoogleGenerativeAI final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatGoogleGenerativeAIOptions( - model: 'gemini-pro', + model: 'gemini-1.5-flash', ), }) : _currentModel = defaultOptions.model ?? '', _httpClient = createDefaultHttpClient( diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index 2971a22b..c86c80a5 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -6,7 +6,7 @@ import 'package:langchain_core/chat_models.dart'; class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// {@macro chat_google_generative_ai_options} const ChatGoogleGenerativeAIOptions({ - this.model = 'gemini-pro', + this.model = 'gemini-1.5-flash', this.topP, this.topK, this.candidateCount, From cf7559e0f8a4e4458927b094a12d70d5c34d66a6 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 15 Jun 2024 11:26:23 +0200 Subject: [PATCH 049/251] feat: Support response MIME type in ChatFirebaseVertexAI (#461) (#463) --- .../vertex_ai/chat_firebase_vertex_ai.dart | 6 ++++++ .../lib/src/chat_models/vertex_ai/mappers.dart | 14 ++++++++++---- .../lib/src/chat_models/vertex_ai/types.dart | 8 ++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 94da974d..77ce67d6 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -263,6 +263,12 @@ class ChatFirebaseVertexAI extends BaseChatModel { temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, + responseMimeType: + options?.responseMimeType ?? defaultOptions.responseMimeType, + // responseSchema not supported yet + // responseSchema: + // (options?.responseSchema ?? defaultOptions.responseSchema) + // ?.toSchema(), ), (options?.tools ?? defaultOptions.tools)?.toToolList(), (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index d256b815..41517a64 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -197,14 +197,17 @@ extension ChatToolListMapper on List { (tool) => f.FunctionDeclaration( tool.name, tool.description, - _mapJsonSchemaToSchema(tool.inputJsonSchema), + tool.inputJsonSchema.toSchema(), ), ).toList(growable: false), ), ]; } +} - f.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { +extension SchemaMapper on Map { + f.Schema toSchema() { + final jsonSchema = this; final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -247,7 +250,7 @@ extension ChatToolListMapper on List { ); case 'array': if (items != null) { - final itemsSchema = _mapJsonSchemaToSchema(items); + final itemsSchema = items.toSchema(); return f.Schema.array( description: description, nullable: nullable, @@ -258,7 +261,10 @@ extension ChatToolListMapper on List { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), + (key, value) => MapEntry( + key, + (value as Map).toSchema(), + ), ); return f.Schema.object( properties: propertiesSchema, diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index 7a0ddbdb..d2aee55d 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -13,6 +13,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { this.maxOutputTokens, this.temperature, this.stopSequences, + this.responseMimeType, this.safetySettings, super.tools, super.toolChoice, @@ -69,6 +70,13 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; + /// Output response mimetype of the generated candidate text. + /// + /// Supported mimetype: + /// - `text/plain`: (default) Text output. + /// - `application/json`: JSON response in the candidates. + final String? responseMimeType; + /// A list of unique [ChatFirebaseVertexAISafetySetting] instances for blocking /// unsafe content. /// From cec82309c0931ba34d30edf54b8682fb4d06c765 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:48:59 +0200 Subject: [PATCH 050/251] build(deps): bump actions/checkout from 4.1.6 to 4.1.7 (#464) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.1.7. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/a5ac7e51b41094c92402da3b24376905380afc29...692973e3d937129bcbf40652eb9f2f61becf3332) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yaml | 2 +- .github/workflows/test.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 5520d768..98f80b82 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2b4ff0c5..0e6c4e20 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 From 37bf5655705be61253bd6ab92f162c1b57b138d1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 20 Jun 2024 23:39:56 +0200 Subject: [PATCH 051/251] feat: Add support for TavilySearchResultsTool and TavilyAnswerTool (#467) --- examples/browser_summarizer/pubspec.lock | 7 + .../browser_summarizer/pubspec_overrides.yaml | 4 +- examples/docs_examples/pubspec.lock | 11 +- examples/docs_examples/pubspec_overrides.yaml | 4 +- examples/hello_world_flutter/pubspec.lock | 4 +- examples/wikivoyage_eu/pubspec.lock | 7 + examples/wikivoyage_eu/pubspec_overrides.yaml | 4 +- packages/langchain/README.md | 47 ++++--- .../langchain_chroma/pubspec_overrides.yaml | 4 +- packages/langchain_community/README.md | 4 + .../lib/src/tools/tavily/mappers.dart | 21 +++ .../lib/src/tools/tavily/tavily.dart | 3 + .../lib/src/tools/tavily/tavily_answer.dart | 102 ++++++++++++++ .../tools/tavily/tavily_search_results.dart | 130 +++++++++++++++++ .../lib/src/tools/tavily/types.dart | 131 ++++++++++++++++++ .../lib/src/tools/tools.dart | 1 + packages/langchain_community/pubspec.yaml | 1 + .../pubspec_overrides.yaml | 4 +- .../test/tools/tavily_test.dart | 31 +++++ .../langchain_core/lib/src/tools/base.dart | 1 - .../langchain_openai/pubspec_overrides.yaml | 4 +- .../langchain_supabase/pubspec_overrides.yaml | 4 +- 22 files changed, 494 insertions(+), 35 deletions(-) create mode 100644 packages/langchain_community/lib/src/tools/tavily/mappers.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart create mode 100644 packages/langchain_community/lib/src/tools/tavily/types.dart create mode 100644 packages/langchain_community/test/tools/tavily_test.dart diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 17d43f80..c32f085f 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -491,6 +491,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.0.1-dev.1" term_glyph: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec_overrides.yaml b/examples/browser_summarizer/pubspec_overrides.yaml index 3947b2ae..49be75a7 100644 --- a/examples/browser_summarizer/pubspec_overrides.yaml +++ b/examples/browser_summarizer/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -10,3 +10,5 @@ dependency_overrides: path: ../../packages/langchain_openai openai_dart: path: ../../packages/openai_dart + tavily_dart: + path: ../../packages/tavily_dart diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 6199523b..2a5b086d 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -148,10 +148,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" google_identity_services_web: dependency: transitive description: @@ -413,6 +413,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.0.1-dev.1" term_glyph: dependency: transitive description: diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index e02da308..cc3f10d6 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community +# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart dependency_overrides: chromadb: path: ../../packages/chromadb @@ -24,5 +24,7 @@ dependency_overrides: path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart + tavily_dart: + path: ../../packages/tavily_dart vertex_ai: path: ../../packages/vertex_ai diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index d9c9c29f..9bbfa0f2 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.3" google_identity_services_web: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 49dc9df4..18f2890b 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -292,6 +292,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.2.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.0.1-dev.1" term_glyph: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml index 075ddc4f..6f7e46d1 100644 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -10,3 +10,5 @@ dependency_overrides: path: ../../packages/langchain_ollama ollama_dart: path: ../../packages/ollama_dart + tavily_dart: + path: ../../packages/tavily_dart diff --git a/packages/langchain/README.md b/packages/langchain/README.md index b86c0eae..83608f5a 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -68,25 +68,25 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack

    -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4 Turbo, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | Functionality provided by each integration package: | Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | |---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| -| [langchain_community](https://pub.dev/packages/langchain_community) | | | | | | | | +| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | @@ -98,15 +98,16 @@ Functionality provided by each integration package: The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: -| Package | Version | Description | -|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|----------------------------------------------| -| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | Anthropic (Claude API) client | -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | +| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | ## Getting started diff --git a/packages/langchain_chroma/pubspec_overrides.yaml b/packages/langchain_chroma/pubspec_overrides.yaml index 4583d481..3470527c 100644 --- a/packages/langchain_chroma/pubspec_overrides.yaml +++ b/packages/langchain_chroma/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain +# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain,tavily_dart dependency_overrides: chromadb: path: ../chromadb @@ -12,3 +12,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_community/README.md b/packages/langchain_community/README.md index b76ee3c3..1dcb80e3 100644 --- a/packages/langchain_community/README.md +++ b/packages/langchain_community/README.md @@ -27,6 +27,10 @@ The most popular third-party integrations have their own packages (e.g. [langcha * `WebBaseLoader`: for web pages. - Tools: * `CalculatorTool`: to calculate math expressions. + * `TavilySearchResultsTool`: returns a list of results for a query using the [Tavily](https://tavily.com) search engine. + * `TavilyAnswerTool`: returns an answer for a query using the [Tavily](https://tavily.com) search engine. +- Vector stores: + * `ObjectBoxVectorStore`: [ObjectBox](https://objectbox.io/) on-device vector database. Check out the [API reference](https://pub.dev/documentation/langchain_community/latest) for more details. diff --git a/packages/langchain_community/lib/src/tools/tavily/mappers.dart b/packages/langchain_community/lib/src/tools/tavily/mappers.dart new file mode 100644 index 00000000..21e907e5 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/mappers.dart @@ -0,0 +1,21 @@ +// ignore_for_file: public_member_api_docs +import 'package:tavily_dart/tavily_dart.dart'; + +import 'types.dart'; + +extension TavilySearchDepthX on TavilySearchDepth { + SearchRequestSearchDepth toSearchRequestSearchDepth() => switch (this) { + TavilySearchDepth.basic => SearchRequestSearchDepth.basic, + TavilySearchDepth.advanced => SearchRequestSearchDepth.advanced, + }; +} + +extension TavilySearchResultX on SearchResult { + TavilySearchResult toTavilySearchResult() => TavilySearchResult( + title: title, + url: url, + content: content, + rawContent: rawContent, + score: score, + ); +} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily.dart b/packages/langchain_community/lib/src/tools/tavily/tavily.dart new file mode 100644 index 00000000..64f26c5d --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily.dart @@ -0,0 +1,3 @@ +export 'tavily_answer.dart'; +export 'tavily_search_results.dart'; +export 'types.dart'; diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart new file mode 100644 index 00000000..a5ad637f --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart @@ -0,0 +1,102 @@ +import 'dart:async'; + +import 'package:http/http.dart' as http; +import 'package:langchain_core/tools.dart'; +import 'package:tavily_dart/tavily_dart.dart'; + +import 'mappers.dart'; +import 'tavily_search_results.dart'; +import 'types.dart'; + +/// Tool that queries the [Tavily Search API](https://tavily.com) and +/// gets an answer to the search query. +/// +/// The Tavily API uses API keys for authentication. Visit the +/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll +/// use in your requests. +/// +/// If you want to get a list of search results instead, use the +/// [TavilySearchResultsTool] instead. +/// +/// Example: +/// ```dart +/// final tool = TavilyAnswerTool( +/// apiKey: Platform.environment['TAVILY_API_KEY']!, +/// ); +/// final res = await tool.invoke('What is the weather like in New York?'); +/// print(res); +/// // The current weather in New York is clear with a temperature of 22.8°C (73.0°F)... +/// ``` +final class TavilyAnswerTool extends StringTool { + /// Creates a [TavilyAnswerTool] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Tavily API key. You can find your API key in the + /// [Tavily console](https://app.tavily.com/). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters (e.g. Azure OpenAI API + /// required to attach a `version` query parameter to every request). + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + TavilyAnswerTool({ + required this.apiKey, + final String? baseUrl, + final Map headers = const {}, + final Map queryParams = const {}, + final http.Client? client, + super.defaultOptions = const TavilyAnswerToolOptions(), + }) : _client = TavilyClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ), + super( + name: 'tavily_answer', + description: + 'A search engine optimized for comprehensive, accurate, and trusted answers. ' + 'Useful for when you need to answer questions about current events. ' + 'The tool returns an answer to the search query - not the search results.', + inputDescription: 'The search query to get an answer to. ' + 'Eg: "What is the weather like in New York?"', + ); + + /// A client for interacting with Tavily API. + final TavilyClient _client; + + /// Your Tavily API key. + String apiKey; + + @override + Future invokeInternal( + final String toolInput, { + final TavilyAnswerToolOptions? options, + }) async { + final res = await _client.search( + request: SearchRequest( + apiKey: apiKey, + query: toolInput, + includeAnswer: true, + searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) + .toSearchRequestSearchDepth(), + maxResults: options?.maxResults ?? defaultOptions.maxResults, + includeDomains: + options?.includeDomains ?? defaultOptions.includeDomains, + excludeDomains: + options?.excludeDomains ?? defaultOptions.excludeDomains, + ), + ); + return res.answer ?? ''; + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart new file mode 100644 index 00000000..7e5693c7 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart @@ -0,0 +1,130 @@ +import 'dart:async'; + +import 'package:http/http.dart' as http; +import 'package:langchain_core/tools.dart'; +import 'package:tavily_dart/tavily_dart.dart'; + +import 'mappers.dart'; +import 'tavily_answer.dart'; +import 'types.dart'; + +/// Tool that queries the [Tavily Search API](https://tavily.com) and +/// gets back a list of search results. +/// +/// The Tavily API uses API keys for authentication. Visit the +/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll +/// use in your requests. +/// +/// If you want to get directly an answer to a search query, use the +/// [TavilyAnswerTool] instead. +/// +/// Example: +/// ```dart +/// final tool = TavilySearchResultsTool( +/// apiKey: Platform.environment['TAVILY_API_KEY']!, +/// ); +/// final res = await tool.invoke('What is the weather like in New York?'); +/// print(res); +/// // [ +/// // { +/// // "title": "Weather in New York", +/// // "url": "https://www.weatherapi.com/", +/// // "content": "{'location': {'lat': 40.71, 'lon': -74.01}, 'current': {'last_updated': '2024-06-20 17:00', 'temp_c': 31.1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png'}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 161, 'wind_dir': 'SSE', 'pressure_mb': 1025.0, 'pressure_in': 30.26, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 48, 'cloud': 0, 'feelslike_c': 33.1, 'feelslike_f': 91.6, 'windchill_c': 29.5, 'windchill_f': 85.0, 'heatindex_c': 30.6, 'heatindex_f': 87.0, 'dewpoint_c': 17.7, 'dewpoint_f': 63.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 16.4, 'gust_kph': 26.4}}", +/// // "score": 0.98855 +/// // }, +/// // ... +/// // ] +/// ``` +final class TavilySearchResultsTool + extends Tool { + /// Creates a [TavilySearchResultsTool] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Tavily API key. You can find your API key in the + /// [Tavily console](https://app.tavily.com/). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters (e.g. Azure OpenAI API + /// required to attach a `version` query parameter to every request). + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + TavilySearchResultsTool({ + required this.apiKey, + final String? baseUrl, + final Map headers = const {}, + final Map queryParams = const {}, + final http.Client? client, + super.defaultOptions = const TavilySearchResultsToolOptions(), + }) : _client = TavilyClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ), + super( + name: 'tavily_search_results', + description: + 'A search engine optimized for comprehensive, accurate, and trusted results. ' + 'Useful for when you need to answer questions about current events. ' + 'The tool returns a JSON object with search results.', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The search query to look up. ' + 'Eg: "What is the weather like in New York?"', + }, + }, + 'required': ['query'], + }, + ); + + /// A client for interacting with Tavily API. + final TavilyClient _client; + + /// Your Tavily API key. + String apiKey; + + @override + Future invokeInternal( + final String input, { + final TavilySearchResultsToolOptions? options, + }) async { + final res = await _client.search( + request: SearchRequest( + apiKey: apiKey, + query: input, + searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) + .toSearchRequestSearchDepth(), + maxResults: options?.maxResults ?? defaultOptions.maxResults, + includeRawContent: + options?.includeRawContent ?? defaultOptions.includeRawContent, + includeDomains: + options?.includeDomains ?? defaultOptions.includeDomains, + excludeDomains: + options?.excludeDomains ?? defaultOptions.excludeDomains, + ), + ); + return TavilySearchResults( + results: res.results + .map((r) => r.toTavilySearchResult()) + .toList(growable: false), + ); + } + + @override + String getInputFromJson(final Map json) { + return json['query'] as String; + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_community/lib/src/tools/tavily/types.dart b/packages/langchain_community/lib/src/tools/tavily/types.dart new file mode 100644 index 00000000..d6dc9134 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/types.dart @@ -0,0 +1,131 @@ +import 'dart:convert'; + +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; + +import 'tavily_answer.dart'; +import 'tavily_search_results.dart'; + +/// The depth of the search. +enum TavilySearchDepth { + /// Basic search depth. + basic, + + /// Advanced search depth. + advanced, +} + +/// {@template tavily_search_results} +/// A search results from the Tavily search engine. +/// {@endtemplate} +@immutable +class TavilySearchResults { + /// {@macro tavily_search_results} + const TavilySearchResults({ + required this.results, + }); + + /// The search results. + final List results; + + @override + String toString() { + return json.encode( + results + .map( + (result) => { + 'title': result.title, + 'url': result.url, + 'content': result.content, + 'rawContent': result.rawContent, + 'score': result.score, + }, + ) + .toList(growable: false), + ); + } +} + +/// {@template tavily_search_result} +/// A search result from the Tavily search engine. +/// {@endtemplate} +@immutable +class TavilySearchResult { + /// {@macro tavily_search_result} + const TavilySearchResult({ + required this.title, + required this.url, + required this.content, + this.rawContent, + required this.score, + }); + + /// The title of the search result url. + final String title; + + /// The url of the search result. + final String url; + + /// The most query related content from the scraped url. + final String content; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + final String? rawContent; + + /// The relevance score of the search result. + final double score; +} + +/// {@template tavily_search_results_tool_options} +/// Generation options to pass into the [TavilySearchResultsTool]. +/// {@endtemplate} +class TavilySearchResultsToolOptions extends ToolOptions { + /// {@macro tavily_search_results_tool_options} + const TavilySearchResultsToolOptions({ + this.maxResults = 5, + this.searchDepth = TavilySearchDepth.basic, + this.includeRawContent = false, + this.includeDomains, + this.excludeDomains, + }); + + /// The number of maximum search results to return. + final int maxResults; + + /// The depth of the search. + final TavilySearchDepth searchDepth; + + /// Include raw content in the search results. + final bool includeRawContent; + + /// A list of domains to specifically include in the search results. + final List? includeDomains; + + /// A list of domains to specifically exclude from the search results. + final List? excludeDomains; +} + +/// {@template tavily_answer_tool_options} +/// Generation options to pass into the [TavilyAnswerTool]. +/// {@endtemplate} +class TavilyAnswerToolOptions extends ToolOptions { + /// {@macro tavily_answer_tool_options} + const TavilyAnswerToolOptions({ + this.maxResults = 5, + this.searchDepth = TavilySearchDepth.basic, + this.includeDomains, + this.excludeDomains, + }); + + /// The number of maximum search results to return. + final int maxResults; + + /// The depth of the search. + final TavilySearchDepth searchDepth; + + /// A list of domains to specifically include in the search results. + final List? includeDomains; + + /// A list of domains to specifically exclude from the search results. + final List? excludeDomains; +} diff --git a/packages/langchain_community/lib/src/tools/tools.dart b/packages/langchain_community/lib/src/tools/tools.dart index 9601880a..4aa306f8 100644 --- a/packages/langchain_community/lib/src/tools/tools.dart +++ b/packages/langchain_community/lib/src/tools/tools.dart @@ -1 +1,2 @@ export 'calculator.dart'; +export 'tavily/tavily.dart'; diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index e2286be4..b386fb9d 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -26,6 +26,7 @@ dependencies: math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 + tavily_dart: ^0.0.1-dev.1 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_community/pubspec_overrides.yaml b/packages/langchain_community/pubspec_overrides.yaml index de62cfcc..19febce5 100644 --- a/packages/langchain_community/pubspec_overrides.yaml +++ b/packages/langchain_community/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain_core: path: ../langchain_core @@ -6,3 +6,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_community/test/tools/tavily_test.dart b/packages/langchain_community/test/tools/tavily_test.dart new file mode 100644 index 00000000..85214c6c --- /dev/null +++ b/packages/langchain_community/test/tools/tavily_test.dart @@ -0,0 +1,31 @@ +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_community/langchain_community.dart'; +import 'package:test/test.dart'; + +void main() { + group('TavilySearchResultsTool tests', () { + test('Calculate expressions', () async { + final tool = TavilySearchResultsTool( + apiKey: Platform.environment['TAVILY_API_KEY']!, + ); + final res = await tool.invoke('What is the weather like in New York?'); + expect(res.results, isNotEmpty); + final jsonString = res.toString(); + expect(() => json.decode(jsonString), returnsNormally); + tool.close(); + }); + }); + + group('TavilyAnswerTool tests', () { + test('Invoke TavilyAnswerTool', () async { + final tool = TavilyAnswerTool( + apiKey: Platform.environment['TAVILY_API_KEY']!, + ); + final res = await tool.invoke('What is the weather like in New York?'); + expect(res, isNotEmpty); + tool.close(); + }); + }); +} diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index e676fc6b..37f9f9d2 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -12,7 +12,6 @@ import 'types.dart'; /// {@template tool_spec} /// The specification of a LangChain tool without the actual implementation. /// {@endtemplate} -@immutable class ToolSpec { /// {@macro tool_spec} const ToolSpec({ diff --git a/packages/langchain_openai/pubspec_overrides.yaml b/packages/langchain_openai/pubspec_overrides.yaml index 18a3bcaa..d4293e4f 100644 --- a/packages/langchain_openai/pubspec_overrides.yaml +++ b/packages/langchain_openai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain +# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain,tavily_dart dependency_overrides: langchain: path: ../langchain @@ -8,3 +8,5 @@ dependency_overrides: path: ../langchain_core openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_supabase/pubspec_overrides.yaml b/packages/langchain_supabase/pubspec_overrides.yaml index 5eb34624..d5cb8df4 100644 --- a/packages/langchain_supabase/pubspec_overrides.yaml +++ b/packages/langchain_supabase/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community +# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart # melos_managed_dependency_overrides: langchain dependency_overrides: langchain: @@ -11,3 +11,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart From 937cec0724de9945adf234343fe70b9bc34e3798 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 22 Jun 2024 15:48:14 +0200 Subject: [PATCH 052/251] feat: Add support for tool use in anthropic_sdk_dart client (#469) --- packages/anthropic_sdk_dart/README.md | 171 +- .../example/anthropic_sdk_dart_example.dart | 200 + .../lib/anthropic_sdk_dart.dart | 2 +- .../anthropic_sdk_dart/lib/src/client.dart | 7 +- .../lib/src/extensions.dart | 59 + .../lib/src/generated/schema/block.dart | 100 + .../lib/src/generated/schema/block_delta.dart | 56 + .../schema/create_message_request.dart | 95 +- .../schema/message_stream_event.dart | 10 +- .../lib/src/generated/schema/schema.dart | 5 +- .../src/generated/schema/schema.freezed.dart | 4063 +++++++++++++---- .../lib/src/generated/schema/schema.g.dart | 190 +- .../lib/src/generated/schema/stop_reason.dart | 2 + .../generated/schema/text_block_delta.dart | 44 - .../lib/src/generated/schema/tool.dart | 59 + .../lib/src/generated/schema/tool_choice.dart | 54 + .../generated/schema/tool_choice_type.dart | 24 + .../oas/anthropic_openapi_curated.yaml | 230 +- packages/anthropic_sdk_dart/oas/main.dart | 4 + packages/anthropic_sdk_dart/pubspec.yaml | 2 +- .../test/messages_test.dart | 180 +- 21 files changed, 4492 insertions(+), 1065 deletions(-) create mode 100644 packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart create mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md index 6ccb7d3a..bc8b7208 100644 --- a/packages/anthropic_sdk_dart/README.md +++ b/packages/anthropic_sdk_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (a **Supported endpoints:** -- Messages (with streaming support) +- Messages (with tools and streaming support) ## Table of contents @@ -57,57 +57,188 @@ Send a structured list of input messages with text and/or image content, and the ```dart final res = await client.createMessage( request: CreateMessageRequest( - model: Model.model(Models.claude3Opus20240229), + model: Model.model(Models.claude35Sonnet20240620), maxTokens: 1024, messages: [ Message( role: MessageRole.user, - content: 'Hello, Claude', + content: MessageContent.text('Hello, Claude'), ), ], ), ); print(res.content.text); -// Hi there! How can I help you today? +// Hello! It's nice to meet you. How are you doing today? ``` `Model` is a sealed class that offers two ways to specify the model: -- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-3-haiku-20240307'`). -- `Model.model(Models.claude3Opus20240229)`: a value from `Models` enum which lists all the available models. +- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-instant-1.2'`). +- `Model.model(Models.claude35Sonnet20240620)`: a value from `Models` enum which lists all the available models. Mind that this list may not be up-to-date. Refer to the [documentation](https://docs.anthropic.com/en/docs/models-overview) for the updated list. **Streaming messages:** ```dart -final stream = await client.createMessageStream( +final stream = client.createMessageStream( request: CreateMessageRequest( - model: Model.model(Models.claude3Opus20240229), + model: Model.model(Models.claude35Sonnet20240620), maxTokens: 1024, messages: [ Message( role: MessageRole.user, - content: 'Hello, Claude', + content: MessageContent.text('Hello, Claude'), ), ], ), ); -String text = ''; await for (final res in stream) { res.map( - messageStart: (e) {}, - messageDelta: (e) {}, - messageStop: (e) {}, - contentBlockStart: (e) {}, - contentBlockDelta: (e) { - text += e.delta.text; + messageStart: (MessageStartEvent e) {}, + messageDelta: (MessageDeltaEvent e) {}, + messageStop: (MessageStopEvent e) {}, + contentBlockStart: (ContentBlockStartEvent e) {}, + contentBlockDelta: (ContentBlockDeltaEvent e) { + stdout.write(e.delta.text); + }, + contentBlockStop: (ContentBlockStopEvent e) {}, + ping: (PingEvent e) {}, + ); +} +// Hello! It's nice to meet you. How are you doing today? +``` + +### Tool use + +Claude is capable of interacting with external client-side tools and functions, allowing you to equip Claude with your own custom tools to perform a wider variety of tasks. + +Refer to the [official documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information. + +In the following example, we want the model to be able to use our function that return the current weather in a given city: + +```dart +Map _getCurrentWeather( + final String location, + final String unit, +) { + const temperature = 22; + const weather = 'Sunny'; + return { + 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, + 'unit': unit, + 'description': weather, + }; +} +``` + +To do that, we need to provide the definition of the tool: +```dart +const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], }, - contentBlockStop: (e) {}, - ping: (e) {}, + }, + 'required': ['location'], + }, +); +``` + +Then we can use the tool in the message request: +```dart +final request1 = CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, +); +final aiMessage1 = await client.createMessage(request: request1); + +final toolUse = aiMessage1.content.blocks.firstOrNull; +if (toolUse == null || toolUse is! ToolUseBlock) { + return; +} + +// Call your tool here with the given input +final toolResult = _getCurrentWeather( + toolUse.input['location'], + toolUse.input['unit'], +); + +final request2 = CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(json.encode(toolResult)), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, +); +final aiMessage2 = await client.createMessage(request: request2); + +print(aiMessage2.content.text); +// Based on the current weather information for Boston, here's what it's like right now: +// +// The temperature in Boston is 71.6°F (Fahrenheit). +// The weather conditions are described as sunny. +``` + +You can also stream the input for a tool: + +```dart +final stream = client.createMessageStream(request: request); +await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent v) {}, + messageDelta: (MessageDeltaEvent v) {}, + messageStop: (MessageStopEvent v) {}, + contentBlockStart: (ContentBlockStartEvent v) {}, + contentBlockDelta: (ContentBlockDeltaEvent v) { + stdout.write(v.delta.inputJson); + }, + contentBlockStop: (ContentBlockStopEvent v) {}, + ping: (PingEvent v) {}, ); } -print(text); -// Hi there! How can I help you today? +// {"location": "Boston, MA", "unit": "fahrenheit"} ``` ## Advance Usage diff --git a/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart new file mode 100644 index 00000000..0a576196 --- /dev/null +++ b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart @@ -0,0 +1,200 @@ +// ignore_for_file: avoid_print +import 'dart:async'; +import 'dart:convert'; +import 'dart:io'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; + +Future main() async { + final client = AnthropicClient( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + ); + + await _createMessage(client); + await _createMessageStream(client); + await _toolUse(client); + await _toolUseStreaming(client); + + client.endSession(); +} + +Future _createMessage(final AnthropicClient client) async { + final res = await client.createMessage( + request: const CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), + ); + print(res.content.text); + // Hello! It's nice to meet you. How are you doing today? +} + +Future _createMessageStream(final AnthropicClient client) async { + final stream = client.createMessageStream( + request: const CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), + ); + await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent e) {}, + messageDelta: (MessageDeltaEvent e) {}, + messageStop: (MessageStopEvent e) {}, + contentBlockStart: (ContentBlockStartEvent e) {}, + contentBlockDelta: (ContentBlockDeltaEvent e) { + stdout.write(e.delta.text); + }, + contentBlockStop: (ContentBlockStopEvent e) {}, + ping: (PingEvent e) {}, + ); + } + // Hello! It's nice to meet you. How are you doing today? +} + +Future _toolUse(final AnthropicClient client) async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + + final aiMessage1 = await client.createMessage(request: request1); + + final toolUse = aiMessage1.content.blocks.firstOrNull; + if (toolUse == null || toolUse is! ToolUseBlock) { + return; + } + + // Call your tool here with the given input + final toolResult = _getCurrentWeather( + toolUse.input['location'], + toolUse.input['unit'], + ); + + final request2 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(json.encode(toolResult)), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, + ); + final aiMessage2 = await client.createMessage(request: request2); + print(aiMessage2.content.text); + // Based on the current weather information for Boston, here's what it's like right now: + // + // The temperature in Boston is 71.6°F (Fahrenheit). + // The weather conditions are described as sunny. +} + +Future _toolUseStreaming(final AnthropicClient client) async { + final request = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + + final stream = client.createMessageStream(request: request); + await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent v) {}, + messageDelta: (MessageDeltaEvent v) {}, + messageStop: (MessageStopEvent v) {}, + contentBlockStart: (ContentBlockStartEvent v) {}, + contentBlockDelta: (ContentBlockDeltaEvent v) { + stdout.write(v.delta.inputJson); + }, + contentBlockStop: (ContentBlockStopEvent v) {}, + ping: (PingEvent v) {}, + ); + } + // {"location": "Boston, MA", "unit": "fahrenheit"} +} + +Map _getCurrentWeather( + final String location, + final String unit, +) { + const temperature = 22; + const weather = 'Sunny'; + return { + 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, + 'unit': unit, + 'description': weather, + }; +} + +const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart index 65378d70..7a853ada 100644 --- a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -1,4 +1,4 @@ -/// Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +/// Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). library anthropic_sdk_dart; export 'src/client.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/client.dart b/packages/anthropic_sdk_dart/lib/src/client.dart index 17c4e2a1..3d02a34b 100644 --- a/packages/anthropic_sdk_dart/lib/src/client.dart +++ b/packages/anthropic_sdk_dart/lib/src/client.dart @@ -76,8 +76,11 @@ class AnthropicClient extends g.AnthropicClient { yield* r.stream .transform(const _AnthropicStreamTransformer()) // .map( - (final d) => MessageStreamEvent.fromJson(json.decode(d)), - ); + (final d) { + final j = json.decode(d) as Map; + return MessageStreamEvent.fromJson(j); + }, + ); } @override diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart index 749979e5..58b178a2 100644 --- a/packages/anthropic_sdk_dart/lib/src/extensions.dart +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -10,4 +10,63 @@ extension MessageContentX on MessageContent { blocks.value.whereType().map((t) => t.text).join('\n'), ); } + + /// Returns the blocks of the message. + List get blocks { + return map( + text: (text) => [Block.text(text: text.value)], + blocks: (blocks) => blocks.value, + ); + } +} + +/// Extension methods for [Block]. +extension BlockX on Block { + /// Returns the text content of the block. + String get text { + return mapOrNull( + text: (text) => text.text, + ) ?? + ''; + } + + /// Returns the image source of the block. + ImageBlock? get image { + return mapOrNull( + image: (image) => image, + ); + } + + /// Returns the tool use block. + ToolUseBlock? get toolUse { + return mapOrNull( + toolUse: (toolUse) => toolUse, + ); + } + + /// Returns the tool result block. + ToolResultBlock? get toolResult { + return mapOrNull( + toolResult: (toolResult) => toolResult, + ); + } +} + +/// Extension methods for [BlockDelta]. +extension BlockDeltaX on BlockDelta { + /// Returns the text content of the block delta. + String get text { + return map( + textDelta: (text) => text.text, + inputJsonDelta: (inputJson) => '', + ); + } + + /// Returns the type of the block delta. + String get inputJson { + return map( + textDelta: (text) => '', + inputJsonDelta: (inputJson) => inputJson.partialJson ?? '', + ); + } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart index 36fcbaae..959a5ecb 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -39,6 +39,47 @@ sealed class Block with _$Block { @Default('image') String type, }) = ImageBlock; + // ------------------------------------------ + // UNION: ToolUseBlock + // ------------------------------------------ + + /// The tool the model wants to use. + const factory Block.toolUse({ + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + required String id, + + /// The name of the tool being used. + required String name, + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + required Map input, + + /// The type of content block. + @Default('tool_use') String type, + }) = ToolUseBlock; + + // ------------------------------------------ + // UNION: ToolResultBlock + // ------------------------------------------ + + /// The result of using a tool. + const factory Block.toolResult({ + /// The `id` of the tool use request this is a result for. + @JsonKey(name: 'tool_use_id') required String toolUseId, + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @_ToolResultBlockContentConverter() required ToolResultBlockContent content, + + /// Set to `true` if the tool execution resulted in an error. + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + + /// The type of content block. + @Default('tool_result') String type, + }) = ToolResultBlock; + /// Object construction from a JSON representation factory Block.fromJson(Map json) => _$BlockFromJson(json); } @@ -52,4 +93,63 @@ enum BlockEnumType { text, @JsonValue('image') image, + @JsonValue('tool_use') + toolUse, + @JsonValue('tool_result') + toolResult, +} + +// ========================================== +// CLASS: ToolResultBlockContent +// ========================================== + +/// The result of the tool, as a string (e.g. `"content": "15 degrees"`) +/// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). +/// These content blocks can use the text or image types. +@freezed +sealed class ToolResultBlockContent with _$ToolResultBlockContent { + const ToolResultBlockContent._(); + + /// An array of content blocks. + const factory ToolResultBlockContent.blocks( + List value, + ) = ToolResultBlockContentListBlock; + + /// A single text block. + const factory ToolResultBlockContent.text( + String value, + ) = ToolResultBlockContentString; + + /// Object construction from a JSON representation + factory ToolResultBlockContent.fromJson(Map json) => + _$ToolResultBlockContentFromJson(json); +} + +/// Custom JSON converter for [ToolResultBlockContent] +class _ToolResultBlockContentConverter + implements JsonConverter { + const _ToolResultBlockContentConverter(); + + @override + ToolResultBlockContent fromJson(Object? data) { + if (data is List && data.every((item) => item is Map)) { + return ToolResultBlockContentListBlock(data + .map((i) => Block.fromJson(i as Map)) + .toList(growable: false)); + } + if (data is String) { + return ToolResultBlockContentString(data); + } + throw Exception( + 'Unexpected value for ToolResultBlockContent: $data', + ); + } + + @override + Object? toJson(ToolResultBlockContent data) { + return switch (data) { + ToolResultBlockContentListBlock(value: final v) => v, + ToolResultBlockContentString(value: final v) => v, + }; + } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart new file mode 100644 index 00000000..d107a864 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: BlockDelta +// ========================================== + +/// A delta in a streaming message. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class BlockDelta with _$BlockDelta { + const BlockDelta._(); + + // ------------------------------------------ + // UNION: TextBlockDelta + // ------------------------------------------ + + /// A delta in a streaming text block. + const factory BlockDelta.textDelta({ + /// The text delta. + required String text, + + /// The type of content block. + required String type, + }) = TextBlockDelta; + + // ------------------------------------------ + // UNION: InputJsonBlockDelta + // ------------------------------------------ + + /// A delta in a streaming input JSON. + const factory BlockDelta.inputJsonDelta({ + /// The partial JSON delta. + @JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, + + /// The type of content block. + required String type, + }) = InputJsonBlockDelta; + + /// Object construction from a JSON representation + factory BlockDelta.fromJson(Map json) => + _$BlockDeltaFromJson(json); +} + +// ========================================== +// ENUM: BlockDeltaEnumType +// ========================================== + +enum BlockDeltaEnumType { + @JsonValue('text_delta') + textDelta, + @JsonValue('input_json_delta') + inputJsonDelta, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart index 2f06233e..df2f1b5b 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -36,6 +36,9 @@ class CreateMessageRequest with _$CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -149,6 +152,84 @@ class CreateMessageRequest with _$CreateMessageRequest { /// deterministic. @JsonKey(includeIfNull: false) double? temperature, + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) ToolChoice? toolChoice, + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) List? tools, + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -189,6 +270,8 @@ class CreateMessageRequest with _$CreateMessageRequest { 'stop_sequences', 'system', 'temperature', + 'tool_choice', + 'tools', 'top_k', 'top_p', 'stream' @@ -209,6 +292,8 @@ class CreateMessageRequest with _$CreateMessageRequest { 'stop_sequences': stopSequences, 'system': system, 'temperature': temperature, + 'tool_choice': toolChoice, + 'tools': tools, 'top_k': topK, 'top_p': topP, 'stream': stream, @@ -222,16 +307,18 @@ class CreateMessageRequest with _$CreateMessageRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum Models { + @JsonValue('claude-3-5-sonnet-20240620') + claude35Sonnet20240620, + @JsonValue('claude-3-haiku-20240307') + claude3Haiku20240307, @JsonValue('claude-3-opus-20240229') claude3Opus20240229, @JsonValue('claude-3-sonnet-20240229') claude3Sonnet20240229, - @JsonValue('claude-3-haiku-20240307') - claude3Haiku20240307, - @JsonValue('claude-2.1') - claude21, @JsonValue('claude-2.0') claude20, + @JsonValue('claude-2.1') + claude21, @JsonValue('claude-instant-1.2') claudeInstant12, } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart index 73dac3c3..46ef88ba 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart @@ -69,8 +69,9 @@ sealed class MessageStreamEvent with _$MessageStreamEvent { /// A start event in a streaming content block. const factory MessageStreamEvent.contentBlockStart({ - /// A block of text content. - @JsonKey(name: 'content_block') required TextBlock contentBlock, + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @JsonKey(name: 'content_block') required Block contentBlock, /// The index of the content block. required int index, @@ -85,8 +86,9 @@ sealed class MessageStreamEvent with _$MessageStreamEvent { /// A delta event in a streaming content block. const factory MessageStreamEvent.contentBlockDelta({ - /// A delta in a streaming text block. - required TextBlockDelta delta, + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + required BlockDelta delta, /// The index of the content block. required int index, diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart index 1953d0e4..b9d2ef26 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart @@ -12,14 +12,17 @@ part 'schema.freezed.dart'; part 'create_message_request.dart'; part 'create_message_request_metadata.dart'; +part 'tool_choice.dart'; +part 'tool_choice_type.dart'; part 'message.dart'; part 'message_role.dart'; +part 'tool.dart'; part 'image_block_source.dart'; part 'stop_reason.dart'; part 'usage.dart'; part 'message_stream_event_type.dart'; part 'message_delta.dart'; part 'message_delta_usage.dart'; -part 'text_block_delta.dart'; part 'block.dart'; part 'message_stream_event.dart'; +part 'block_delta.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart index a014e0e8..528c9b30 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -42,6 +42,9 @@ mixin _$CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -160,6 +163,86 @@ mixin _$CreateMessageRequest { @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? get toolChoice => throw _privateConstructorUsedError; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) + List? get tools => throw _privateConstructorUsedError; + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -209,12 +292,16 @@ abstract class $CreateMessageRequestCopyWith<$Res> { List? stopSequences, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) List? tools, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, bool stream}); $ModelCopyWith<$Res> get model; $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; + $ToolChoiceCopyWith<$Res>? get toolChoice; } /// @nodoc @@ -238,6 +325,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, Object? stopSequences = freezed, Object? system = freezed, Object? temperature = freezed, + Object? toolChoice = freezed, + Object? tools = freezed, Object? topK = freezed, Object? topP = freezed, Object? stream = null, @@ -271,6 +360,14 @@ class _$CreateMessageRequestCopyWithImpl<$Res, ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + toolChoice: freezed == toolChoice + ? _value.toolChoice + : toolChoice // ignore: cast_nullable_to_non_nullable + as ToolChoice?, + tools: freezed == tools + ? _value.tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, topK: freezed == topK ? _value.topK : topK // ignore: cast_nullable_to_non_nullable @@ -306,6 +403,18 @@ class _$CreateMessageRequestCopyWithImpl<$Res, return _then(_value.copyWith(metadata: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ToolChoiceCopyWith<$Res>? get toolChoice { + if (_value.toolChoice == null) { + return null; + } + + return $ToolChoiceCopyWith<$Res>(_value.toolChoice!, (value) { + return _then(_value.copyWith(toolChoice: value) as $Val); + }); + } } /// @nodoc @@ -325,6 +434,9 @@ abstract class _$$CreateMessageRequestImplCopyWith<$Res> List? stopSequences, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) List? tools, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, bool stream}); @@ -333,6 +445,8 @@ abstract class _$$CreateMessageRequestImplCopyWith<$Res> $ModelCopyWith<$Res> get model; @override $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; + @override + $ToolChoiceCopyWith<$Res>? get toolChoice; } /// @nodoc @@ -353,6 +467,8 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> Object? stopSequences = freezed, Object? system = freezed, Object? temperature = freezed, + Object? toolChoice = freezed, + Object? tools = freezed, Object? topK = freezed, Object? topP = freezed, Object? stream = null, @@ -386,6 +502,14 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + toolChoice: freezed == toolChoice + ? _value.toolChoice + : toolChoice // ignore: cast_nullable_to_non_nullable + as ToolChoice?, + tools: freezed == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, topK: freezed == topK ? _value.topK : topK // ignore: cast_nullable_to_non_nullable @@ -414,11 +538,14 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { final List? stopSequences, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(includeIfNull: false) final List? tools, @JsonKey(name: 'top_k', includeIfNull: false) this.topK, @JsonKey(name: 'top_p', includeIfNull: false) this.topP, this.stream = false}) : _messages = messages, _stopSequences = stopSequences, + _tools = tools, super._(); factory _$CreateMessageRequestImpl.fromJson(Map json) => @@ -447,6 +574,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -534,6 +664,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -678,6 +811,164 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { @JsonKey(includeIfNull: false) final double? temperature; + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @override + @JsonKey(name: 'tool_choice', includeIfNull: false) + final ToolChoice? toolChoice; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + final List? _tools; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @override + @JsonKey(includeIfNull: false) + List? get tools { + final value = _tools; + if (value == null) return null; + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -712,7 +1003,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { @override String toString() { - return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, topK: $topK, topP: $topP, stream: $stream)'; + return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, toolChoice: $toolChoice, tools: $tools, topK: $topK, topP: $topP, stream: $stream)'; } @override @@ -731,6 +1022,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { (identical(other.system, system) || other.system == system) && (identical(other.temperature, temperature) || other.temperature == temperature) && + (identical(other.toolChoice, toolChoice) || + other.toolChoice == toolChoice) && + const DeepCollectionEquality().equals(other._tools, _tools) && (identical(other.topK, topK) || other.topK == topK) && (identical(other.topP, topP) || other.topP == topP) && (identical(other.stream, stream) || other.stream == stream)); @@ -747,6 +1041,8 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().hash(_stopSequences), system, temperature, + toolChoice, + const DeepCollectionEquality().hash(_tools), topK, topP, stream); @@ -778,6 +1074,9 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { final List? stopSequences, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + final ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) final List? tools, @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, final bool stream}) = _$CreateMessageRequestImpl; @@ -811,6 +1110,9 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { /// continue immediately from the content in that message. This can be used to /// constrain part of the model's response. /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// /// Example with a single `user` message: /// /// ```json @@ -934,6 +1236,88 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { double? get temperature; @override + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? get toolChoice; + @override + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) + List? get tools; + @override + /// Only sample from the top K options for each subsequent token. /// /// Used to remove "long tail" low probability responses. @@ -1524,6 +1908,191 @@ abstract class _CreateMessageRequestMetadata get copyWith => throw _privateConstructorUsedError; } +ToolChoice _$ToolChoiceFromJson(Map json) { + return _ToolChoice.fromJson(json); +} + +/// @nodoc +mixin _$ToolChoice { + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + ToolChoiceType get type => throw _privateConstructorUsedError; + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolChoiceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolChoiceCopyWith<$Res> { + factory $ToolChoiceCopyWith( + ToolChoice value, $Res Function(ToolChoice) then) = + _$ToolChoiceCopyWithImpl<$Res, ToolChoice>; + @useResult + $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class _$ToolChoiceCopyWithImpl<$Res, $Val extends ToolChoice> + implements $ToolChoiceCopyWith<$Res> { + _$ToolChoiceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? name = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolChoiceType, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolChoiceImplCopyWith<$Res> + implements $ToolChoiceCopyWith<$Res> { + factory _$$ToolChoiceImplCopyWith( + _$ToolChoiceImpl value, $Res Function(_$ToolChoiceImpl) then) = + __$$ToolChoiceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class __$$ToolChoiceImplCopyWithImpl<$Res> + extends _$ToolChoiceCopyWithImpl<$Res, _$ToolChoiceImpl> + implements _$$ToolChoiceImplCopyWith<$Res> { + __$$ToolChoiceImplCopyWithImpl( + _$ToolChoiceImpl _value, $Res Function(_$ToolChoiceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? name = freezed, + }) { + return _then(_$ToolChoiceImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolChoiceType, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolChoiceImpl extends _ToolChoice { + const _$ToolChoiceImpl( + {required this.type, @JsonKey(includeIfNull: false) this.name}) + : super._(); + + factory _$ToolChoiceImpl.fromJson(Map json) => + _$$ToolChoiceImplFromJson(json); + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @override + final ToolChoiceType type; + + /// The name of the tool to use. + @override + @JsonKey(includeIfNull: false) + final String? name; + + @override + String toString() { + return 'ToolChoice(type: $type, name: $name)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolChoiceImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, name); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => + __$$ToolChoiceImplCopyWithImpl<_$ToolChoiceImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolChoiceImplToJson( + this, + ); + } +} + +abstract class _ToolChoice extends ToolChoice { + const factory _ToolChoice( + {required final ToolChoiceType type, + @JsonKey(includeIfNull: false) final String? name}) = _$ToolChoiceImpl; + const _ToolChoice._() : super._(); + + factory _ToolChoice.fromJson(Map json) = + _$ToolChoiceImpl.fromJson; + + @override + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + ToolChoiceType get type; + @override + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) + String? get name; + @override + @JsonKey(ignore: true) + _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => + throw _privateConstructorUsedError; +} + Message _$MessageFromJson(Map json) { return _Message.fromJson(json); } @@ -2407,20 +2976,259 @@ abstract class MessageContentString extends MessageContent { get copyWith => throw _privateConstructorUsedError; } -ImageBlockSource _$ImageBlockSourceFromJson(Map json) { - return _ImageBlockSource.fromJson(json); +Tool _$ToolFromJson(Map json) { + return _Tool.fromJson(json); } /// @nodoc -mixin _$ImageBlockSource { - /// The base64-encoded image data. - String get data => throw _privateConstructorUsedError; +mixin _$Tool { + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + String get name => throw _privateConstructorUsedError; - /// The media type of the image. - @JsonKey(name: 'media_type') - ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; - /// The type of image source. + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') + Map get inputSchema => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCopyWith<$Res> { + factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = + _$ToolCopyWithImpl<$Res, Tool>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(name: 'input_schema') Map inputSchema}); +} + +/// @nodoc +class _$ToolCopyWithImpl<$Res, $Val extends Tool> + implements $ToolCopyWith<$Res> { + _$ToolCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? inputSchema = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + inputSchema: null == inputSchema + ? _value.inputSchema + : inputSchema // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { + factory _$$ToolImplCopyWith( + _$ToolImpl value, $Res Function(_$ToolImpl) then) = + __$$ToolImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(name: 'input_schema') Map inputSchema}); +} + +/// @nodoc +class __$$ToolImplCopyWithImpl<$Res> + extends _$ToolCopyWithImpl<$Res, _$ToolImpl> + implements _$$ToolImplCopyWith<$Res> { + __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? inputSchema = null, + }) { + return _then(_$ToolImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + inputSchema: null == inputSchema + ? _value._inputSchema + : inputSchema // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolImpl extends _Tool { + const _$ToolImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + @JsonKey(name: 'input_schema') + required final Map inputSchema}) + : _inputSchema = inputSchema, + super._(); + + factory _$ToolImpl.fromJson(Map json) => + _$$ToolImplFromJson(json); + + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + @override + final String name; + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + final Map _inputSchema; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @override + @JsonKey(name: 'input_schema') + Map get inputSchema { + if (_inputSchema is EqualUnmodifiableMapView) return _inputSchema; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_inputSchema); + } + + @override + String toString() { + return 'Tool(name: $name, description: $description, inputSchema: $inputSchema)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality() + .equals(other._inputSchema, _inputSchema)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_inputSchema)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolImplToJson( + this, + ); + } +} + +abstract class _Tool extends Tool { + const factory _Tool( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(name: 'input_schema') + required final Map inputSchema}) = _$ToolImpl; + const _Tool._() : super._(); + + factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; + + @override + + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + String get name; + @override + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) + String? get description; + @override + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') + Map get inputSchema; + @override + @JsonKey(ignore: true) + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ImageBlockSource _$ImageBlockSourceFromJson(Map json) { + return _ImageBlockSource.fromJson(json); +} + +/// @nodoc +mixin _$ImageBlockSource { + /// The base64-encoded image data. + String get data => throw _privateConstructorUsedError; + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + + /// The type of image source. ImageBlockSourceType get type => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @@ -3182,37 +3990,115 @@ abstract class _MessageDeltaUsage extends MessageDeltaUsage { throw _privateConstructorUsedError; } -TextBlockDelta _$TextBlockDeltaFromJson(Map json) { - return _TextBlockDelta.fromJson(json); +Block _$BlockFromJson(Map json) { + switch (json['type']) { + case 'text': + return TextBlock.fromJson(json); + case 'image': + return ImageBlock.fromJson(json); + case 'tool_use': + return ToolUseBlock.fromJson(json); + case 'tool_result': + return ToolResultBlock.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); + } } /// @nodoc -mixin _$TextBlockDelta { - /// The text delta. - String get text => throw _privateConstructorUsedError; - +mixin _$Block { /// The type of content block. String get type => throw _privateConstructorUsedError; - + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $TextBlockDeltaCopyWith get copyWith => - throw _privateConstructorUsedError; + $BlockCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $TextBlockDeltaCopyWith<$Res> { - factory $TextBlockDeltaCopyWith( - TextBlockDelta value, $Res Function(TextBlockDelta) then) = - _$TextBlockDeltaCopyWithImpl<$Res, TextBlockDelta>; +abstract class $BlockCopyWith<$Res> { + factory $BlockCopyWith(Block value, $Res Function(Block) then) = + _$BlockCopyWithImpl<$Res, Block>; @useResult - $Res call({String text, String type}); + $Res call({String type}); } /// @nodoc -class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> - implements $TextBlockDeltaCopyWith<$Res> { - _$TextBlockDeltaCopyWithImpl(this._value, this._then); +class _$BlockCopyWithImpl<$Res, $Val extends Block> + implements $BlockCopyWith<$Res> { + _$BlockCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3222,14 +4108,9 @@ class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> @pragma('vm:prefer-inline') @override $Res call({ - Object? text = null, Object? type = null, }) { return _then(_value.copyWith( - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -3239,22 +4120,21 @@ class _$TextBlockDeltaCopyWithImpl<$Res, $Val extends TextBlockDelta> } /// @nodoc -abstract class _$$TextBlockDeltaImplCopyWith<$Res> - implements $TextBlockDeltaCopyWith<$Res> { - factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, - $Res Function(_$TextBlockDeltaImpl) then) = - __$$TextBlockDeltaImplCopyWithImpl<$Res>; +abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$TextBlockImplCopyWith( + _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = + __$$TextBlockImplCopyWithImpl<$Res>; @override @useResult $Res call({String text, String type}); } /// @nodoc -class __$$TextBlockDeltaImplCopyWithImpl<$Res> - extends _$TextBlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> - implements _$$TextBlockDeltaImplCopyWith<$Res> { - __$$TextBlockDeltaImplCopyWithImpl( - _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) +class __$$TextBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> + implements _$$TextBlockImplCopyWith<$Res> { + __$$TextBlockImplCopyWithImpl( + _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -3263,7 +4143,7 @@ class __$$TextBlockDeltaImplCopyWithImpl<$Res> Object? text = null, Object? type = null, }) { - return _then(_$TextBlockDeltaImpl( + return _then(_$TextBlockImpl( text: null == text ? _value.text : text // ignore: cast_nullable_to_non_nullable @@ -3278,31 +4158,31 @@ class __$$TextBlockDeltaImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$TextBlockDeltaImpl extends _TextBlockDelta { - const _$TextBlockDeltaImpl({required this.text, required this.type}) - : super._(); +class _$TextBlockImpl extends TextBlock { + const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); - factory _$TextBlockDeltaImpl.fromJson(Map json) => - _$$TextBlockDeltaImplFromJson(json); + factory _$TextBlockImpl.fromJson(Map json) => + _$$TextBlockImplFromJson(json); - /// The text delta. + /// The text content. @override final String text; /// The type of content block. @override + @JsonKey() final String type; @override String toString() { - return 'TextBlockDelta(text: $text, type: $type)'; + return 'Block.text(text: $text, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$TextBlockDeltaImpl && + other is _$TextBlockImpl && (identical(other.text, text) || other.text == text) && (identical(other.type, type) || other.type == type)); } @@ -3314,218 +4194,25 @@ class _$TextBlockDeltaImpl extends _TextBlockDelta { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => - __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( - this, _$identity); + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); @override - Map toJson() { - return _$$TextBlockDeltaImplToJson( - this, - ); - } -} - -abstract class _TextBlockDelta extends TextBlockDelta { - const factory _TextBlockDelta( - {required final String text, - required final String type}) = _$TextBlockDeltaImpl; - const _TextBlockDelta._() : super._(); - - factory _TextBlockDelta.fromJson(Map json) = - _$TextBlockDeltaImpl.fromJson; - - @override - - /// The text delta. - String get text; - @override - - /// The type of content block. - String get type; - @override - @JsonKey(ignore: true) - _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => - throw _privateConstructorUsedError; -} - -Block _$BlockFromJson(Map json) { - switch (json['type']) { - case 'text': - return TextBlock.fromJson(json); - case 'image': - return ImageBlock.fromJson(json); - - default: - throw CheckedFromJsonException( - json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$Block { - /// The type of content block. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String text, String type)? text, - TResult? Function(ImageBlockSource source, String type)? image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String text, String type)? text, - TResult Function(ImageBlockSource source, String type)? image, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(TextBlock value) text, - required TResult Function(ImageBlock value) image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(TextBlock value)? text, - TResult? Function(ImageBlock value)? image, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(TextBlock value)? text, - TResult Function(ImageBlock value)? image, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BlockCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $BlockCopyWith<$Res> { - factory $BlockCopyWith(Block value, $Res Function(Block) then) = - _$BlockCopyWithImpl<$Res, Block>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$BlockCopyWithImpl<$Res, $Val extends Block> - implements $BlockCopyWith<$Res> { - _$BlockCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { - factory _$$TextBlockImplCopyWith( - _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = - __$$TextBlockImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({String text, String type}); -} - -/// @nodoc -class __$$TextBlockImplCopyWithImpl<$Res> - extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> - implements _$$TextBlockImplCopyWith<$Res> { - __$$TextBlockImplCopyWithImpl( - _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? text = null, - Object? type = null, - }) { - return _then(_$TextBlockImpl( - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$TextBlockImpl extends TextBlock { - const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); - - factory _$TextBlockImpl.fromJson(Map json) => - _$$TextBlockImplFromJson(json); - - /// The text content. - @override - final String text; - - /// The type of content block. - @override - @JsonKey() - final String type; - - @override - String toString() { - return 'Block.text(text: $text, type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$TextBlockImpl && - (identical(other.text, text) || other.text == text) && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, text, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => - __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(String text, String type) text, - required TResult Function(ImageBlockSource source, String type) image, - }) { - return text(this.text, type); + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return text(this.text, type); } @override @@ -3533,6 +4220,15 @@ class _$TextBlockImpl extends TextBlock { TResult? whenOrNull({ TResult? Function(String text, String type)? text, TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, }) { return text?.call(this.text, type); } @@ -3542,6 +4238,15 @@ class _$TextBlockImpl extends TextBlock { TResult maybeWhen({ TResult Function(String text, String type)? text, TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, required TResult orElse(), }) { if (text != null) { @@ -3555,6 +4260,8 @@ class _$TextBlockImpl extends TextBlock { TResult map({ required TResult Function(TextBlock value) text, required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, }) { return text(this); } @@ -3564,6 +4271,8 @@ class _$TextBlockImpl extends TextBlock { TResult? mapOrNull({ TResult? Function(TextBlock value)? text, TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, }) { return text?.call(this); } @@ -3573,6 +4282,8 @@ class _$TextBlockImpl extends TextBlock { TResult maybeMap({ TResult Function(TextBlock value)? text, TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, required TResult orElse(), }) { if (text != null) { @@ -3703,6 +4414,15 @@ class _$ImageBlockImpl extends ImageBlock { TResult when({ required TResult Function(String text, String type) text, required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, }) { return image(source, type); } @@ -3712,6 +4432,15 @@ class _$ImageBlockImpl extends ImageBlock { TResult? whenOrNull({ TResult? Function(String text, String type)? text, TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, }) { return image?.call(source, type); } @@ -3721,6 +4450,15 @@ class _$ImageBlockImpl extends ImageBlock { TResult maybeWhen({ TResult Function(String text, String type)? text, TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, required TResult orElse(), }) { if (image != null) { @@ -3734,6 +4472,8 @@ class _$ImageBlockImpl extends ImageBlock { TResult map({ required TResult Function(TextBlock value) text, required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, }) { return image(this); } @@ -3743,6 +4483,8 @@ class _$ImageBlockImpl extends ImageBlock { TResult? mapOrNull({ TResult? Function(TextBlock value)? text, TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, }) { return image?.call(this); } @@ -3752,6 +4494,8 @@ class _$ImageBlockImpl extends ImageBlock { TResult maybeMap({ TResult Function(TextBlock value)? text, TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, required TResult orElse(), }) { if (image != null) { @@ -3789,54 +4533,1468 @@ abstract class ImageBlock extends Block { throw _privateConstructorUsedError; } -MessageStreamEvent _$MessageStreamEventFromJson(Map json) { - switch (json['type']) { - case 'message_start': - return MessageStartEvent.fromJson(json); - case 'message_delta': - return MessageDeltaEvent.fromJson(json); - case 'message_stop': - return MessageStopEvent.fromJson(json); - case 'content_block_start': - return ContentBlockStartEvent.fromJson(json); - case 'content_block_delta': - return ContentBlockDeltaEvent.fromJson(json); - case 'content_block_stop': - return ContentBlockStopEvent.fromJson(json); - case 'ping': - return PingEvent.fromJson(json); +/// @nodoc +abstract class _$$ToolUseBlockImplCopyWith<$Res> + implements $BlockCopyWith<$Res> { + factory _$$ToolUseBlockImplCopyWith( + _$ToolUseBlockImpl value, $Res Function(_$ToolUseBlockImpl) then) = + __$$ToolUseBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String id, String name, Map input, String type}); +} - default: - throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', - 'Invalid union type "${json['type']}"!'); +/// @nodoc +class __$$ToolUseBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ToolUseBlockImpl> + implements _$$ToolUseBlockImplCopyWith<$Res> { + __$$ToolUseBlockImplCopyWithImpl( + _$ToolUseBlockImpl _value, $Res Function(_$ToolUseBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? name = null, + Object? input = null, + Object? type = null, + }) { + return _then(_$ToolUseBlockImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + input: null == input + ? _value._input + : input // ignore: cast_nullable_to_non_nullable + as Map, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); } } /// @nodoc -mixin _$MessageStreamEvent { - /// The type of a streaming event. - MessageStreamEventType get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, - required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, +@JsonSerializable() +class _$ToolUseBlockImpl extends ToolUseBlock { + const _$ToolUseBlockImpl( + {required this.id, + required this.name, + required final Map input, + this.type = 'tool_use'}) + : _input = input, + super._(); + + factory _$ToolUseBlockImpl.fromJson(Map json) => + _$$ToolUseBlockImplFromJson(json); + + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + @override + final String id; + + /// The name of the tool being used. + @override + final String name; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + final Map _input; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + @override + Map get input { + if (_input is EqualUnmodifiableMapView) return _input; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_input); + } + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.toolUse(id: $id, name: $name, input: $input, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolUseBlockImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality().equals(other._input, _input) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, id, name, const DeepCollectionEquality().hash(_input), type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => + __$$ToolUseBlockImplCopyWithImpl<_$ToolUseBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return toolUse(id, name, input, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return toolUse?.call(id, name, input, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (toolUse != null) { + return toolUse(id, name, input, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return toolUse(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return toolUse?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (toolUse != null) { + return toolUse(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolUseBlockImplToJson( + this, + ); + } +} + +abstract class ToolUseBlock extends Block { + const factory ToolUseBlock( + {required final String id, + required final String name, + required final Map input, + final String type}) = _$ToolUseBlockImpl; + const ToolUseBlock._() : super._(); + + factory ToolUseBlock.fromJson(Map json) = + _$ToolUseBlockImpl.fromJson; + + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + String get id; + + /// The name of the tool being used. + String get name; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + Map get input; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolResultBlockImplCopyWith<$Res> + implements $BlockCopyWith<$Res> { + factory _$$ToolResultBlockImplCopyWith(_$ToolResultBlockImpl value, + $Res Function(_$ToolResultBlockImpl) then) = + __$$ToolResultBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type}); + + $ToolResultBlockContentCopyWith<$Res> get content; +} + +/// @nodoc +class __$$ToolResultBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ToolResultBlockImpl> + implements _$$ToolResultBlockImplCopyWith<$Res> { + __$$ToolResultBlockImplCopyWithImpl( + _$ToolResultBlockImpl _value, $Res Function(_$ToolResultBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? toolUseId = null, + Object? content = null, + Object? isError = freezed, + Object? type = null, + }) { + return _then(_$ToolResultBlockImpl( + toolUseId: null == toolUseId + ? _value.toolUseId + : toolUseId // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as ToolResultBlockContent, + isError: freezed == isError + ? _value.isError + : isError // ignore: cast_nullable_to_non_nullable + as bool?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } + + @override + @pragma('vm:prefer-inline') + $ToolResultBlockContentCopyWith<$Res> get content { + return $ToolResultBlockContentCopyWith<$Res>(_value.content, (value) { + return _then(_value.copyWith(content: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockImpl extends ToolResultBlock { + const _$ToolResultBlockImpl( + {@JsonKey(name: 'tool_use_id') required this.toolUseId, + @_ToolResultBlockContentConverter() required this.content, + @JsonKey(name: 'is_error', includeIfNull: false) this.isError, + this.type = 'tool_result'}) + : super._(); + + factory _$ToolResultBlockImpl.fromJson(Map json) => + _$$ToolResultBlockImplFromJson(json); + + /// The `id` of the tool use request this is a result for. + @override + @JsonKey(name: 'tool_use_id') + final String toolUseId; + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @override + @_ToolResultBlockContentConverter() + final ToolResultBlockContent content; + + /// Set to `true` if the tool execution resulted in an error. + @override + @JsonKey(name: 'is_error', includeIfNull: false) + final bool? isError; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.toolResult(toolUseId: $toolUseId, content: $content, isError: $isError, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockImpl && + (identical(other.toolUseId, toolUseId) || + other.toolUseId == toolUseId) && + (identical(other.content, content) || other.content == content) && + (identical(other.isError, isError) || other.isError == isError) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, toolUseId, content, isError, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => + __$$ToolResultBlockImplCopyWithImpl<_$ToolResultBlockImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return toolResult(toolUseId, content, isError, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return toolResult?.call(toolUseId, content, isError, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (toolResult != null) { + return toolResult(toolUseId, content, isError, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return toolResult(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return toolResult?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (toolResult != null) { + return toolResult(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockImplToJson( + this, + ); + } +} + +abstract class ToolResultBlock extends Block { + const factory ToolResultBlock( + {@JsonKey(name: 'tool_use_id') required final String toolUseId, + @_ToolResultBlockContentConverter() + required final ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) final bool? isError, + final String type}) = _$ToolResultBlockImpl; + const ToolResultBlock._() : super._(); + + factory ToolResultBlock.fromJson(Map json) = + _$ToolResultBlockImpl.fromJson; + + /// The `id` of the tool use request this is a result for. + @JsonKey(name: 'tool_use_id') + String get toolUseId; + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @_ToolResultBlockContentConverter() + ToolResultBlockContent get content; + + /// Set to `true` if the tool execution resulted in an error. + @JsonKey(name: 'is_error', includeIfNull: false) + bool? get isError; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolResultBlockContent _$ToolResultBlockContentFromJson( + Map json) { + switch (json['runtimeType']) { + case 'blocks': + return ToolResultBlockContentListBlock.fromJson(json); + case 'text': + return ToolResultBlockContentString.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'runtimeType', + 'ToolResultBlockContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ToolResultBlockContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentListBlock value) blocks, + required TResult Function(ToolResultBlockContentString value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentListBlock value)? blocks, + TResult? Function(ToolResultBlockContentString value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentListBlock value)? blocks, + TResult Function(ToolResultBlockContentString value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolResultBlockContentCopyWith<$Res> { + factory $ToolResultBlockContentCopyWith(ToolResultBlockContent value, + $Res Function(ToolResultBlockContent) then) = + _$ToolResultBlockContentCopyWithImpl<$Res, ToolResultBlockContent>; +} + +/// @nodoc +class _$ToolResultBlockContentCopyWithImpl<$Res, + $Val extends ToolResultBlockContent> + implements $ToolResultBlockContentCopyWith<$Res> { + _$ToolResultBlockContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { + factory _$$ToolResultBlockContentListBlockImplCopyWith( + _$ToolResultBlockContentListBlockImpl value, + $Res Function(_$ToolResultBlockContentListBlockImpl) then) = + __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> + extends _$ToolResultBlockContentCopyWithImpl<$Res, + _$ToolResultBlockContentListBlockImpl> + implements _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { + __$$ToolResultBlockContentListBlockImplCopyWithImpl( + _$ToolResultBlockContentListBlockImpl _value, + $Res Function(_$ToolResultBlockContentListBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ToolResultBlockContentListBlockImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockContentListBlockImpl + extends ToolResultBlockContentListBlock { + const _$ToolResultBlockContentListBlockImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'blocks', + super._(); + + factory _$ToolResultBlockContentListBlockImpl.fromJson( + Map json) => + _$$ToolResultBlockContentListBlockImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ToolResultBlockContent.blocks(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockContentListBlockImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockContentListBlockImplCopyWith< + _$ToolResultBlockContentListBlockImpl> + get copyWith => __$$ToolResultBlockContentListBlockImplCopyWithImpl< + _$ToolResultBlockContentListBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return blocks(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return blocks?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentListBlock value) blocks, + required TResult Function(ToolResultBlockContentString value) text, + }) { + return blocks(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentListBlock value)? blocks, + TResult? Function(ToolResultBlockContentString value)? text, + }) { + return blocks?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentListBlock value)? blocks, + TResult Function(ToolResultBlockContentString value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockContentListBlockImplToJson( + this, + ); + } +} + +abstract class ToolResultBlockContentListBlock extends ToolResultBlockContent { + const factory ToolResultBlockContentListBlock(final List value) = + _$ToolResultBlockContentListBlockImpl; + const ToolResultBlockContentListBlock._() : super._(); + + factory ToolResultBlockContentListBlock.fromJson(Map json) = + _$ToolResultBlockContentListBlockImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$ToolResultBlockContentListBlockImplCopyWith< + _$ToolResultBlockContentListBlockImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolResultBlockContentStringImplCopyWith<$Res> { + factory _$$ToolResultBlockContentStringImplCopyWith( + _$ToolResultBlockContentStringImpl value, + $Res Function(_$ToolResultBlockContentStringImpl) then) = + __$$ToolResultBlockContentStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> + extends _$ToolResultBlockContentCopyWithImpl<$Res, + _$ToolResultBlockContentStringImpl> + implements _$$ToolResultBlockContentStringImplCopyWith<$Res> { + __$$ToolResultBlockContentStringImplCopyWithImpl( + _$ToolResultBlockContentStringImpl _value, + $Res Function(_$ToolResultBlockContentStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ToolResultBlockContentStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { + const _$ToolResultBlockContentStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'text', + super._(); + + factory _$ToolResultBlockContentStringImpl.fromJson( + Map json) => + _$$ToolResultBlockContentStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ToolResultBlockContent.text(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockContentStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockContentStringImplCopyWith< + _$ToolResultBlockContentStringImpl> + get copyWith => __$$ToolResultBlockContentStringImplCopyWithImpl< + _$ToolResultBlockContentStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return text(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return text?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentListBlock value) blocks, + required TResult Function(ToolResultBlockContentString value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentListBlock value)? blocks, + TResult? Function(ToolResultBlockContentString value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentListBlock value)? blocks, + TResult Function(ToolResultBlockContentString value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockContentStringImplToJson( + this, + ); + } +} + +abstract class ToolResultBlockContentString extends ToolResultBlockContent { + const factory ToolResultBlockContentString(final String value) = + _$ToolResultBlockContentStringImpl; + const ToolResultBlockContentString._() : super._(); + + factory ToolResultBlockContentString.fromJson(Map json) = + _$ToolResultBlockContentStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$ToolResultBlockContentStringImplCopyWith< + _$ToolResultBlockContentStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +MessageStreamEvent _$MessageStreamEventFromJson(Map json) { + switch (json['type']) { + case 'message_start': + return MessageStartEvent.fromJson(json); + case 'message_delta': + return MessageDeltaEvent.fromJson(json); + case 'message_stop': + return MessageStopEvent.fromJson(json); + case 'content_block_start': + return ContentBlockStartEvent.fromJson(json); + case 'content_block_delta': + return ContentBlockDeltaEvent.fromJson(json); + case 'content_block_stop': + return ContentBlockStopEvent.fromJson(json); + case 'ping': + return PingEvent.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageStreamEvent { + /// The type of a streaming event. + MessageStreamEventType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageStreamEventCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageStreamEventCopyWith<$Res> { + factory $MessageStreamEventCopyWith( + MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = + _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> + implements $MessageStreamEventCopyWith<$Res> { + _$MessageStreamEventCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, + $Res Function(_$MessageStartEventImpl) then) = + __$$MessageStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({Message message, MessageStreamEventType type}); + + $MessageCopyWith<$Res> get message; +} + +/// @nodoc +class __$$MessageStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> + implements _$$MessageStartEventImplCopyWith<$Res> { + __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, + $Res Function(_$MessageStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? message = null, + Object? type = null, + }) { + return _then(_$MessageStartEventImpl( + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as Message, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { + return _then(_value.copyWith(message: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStartEventImpl extends MessageStartEvent { + const _$MessageStartEventImpl({required this.message, required this.type}) + : super._(); + + factory _$MessageStartEventImpl.fromJson(Map json) => + _$$MessageStartEventImplFromJson(json); + + /// A message in a chat conversation. + @override + final Message message; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStartEventImpl && + (identical(other.message, message) || other.message == message) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, message, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStart(message, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStart?.call(message, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(message, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStartEventImplToJson( + this, + ); + } +} + +abstract class MessageStartEvent extends MessageStreamEvent { + const factory MessageStartEvent( + {required final Message message, + required final MessageStreamEventType type}) = _$MessageStartEventImpl; + const MessageStartEvent._() : super._(); + + factory MessageStartEvent.fromJson(Map json) = + _$MessageStartEventImpl.fromJson; + + /// A message in a chat conversation. + Message get message; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, + $Res Function(_$MessageDeltaEventImpl) then) = + __$$MessageDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {MessageDelta delta, + MessageStreamEventType type, + MessageDeltaUsage usage}); + + $MessageDeltaCopyWith<$Res> get delta; + $MessageDeltaUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class __$$MessageDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> + implements _$$MessageDeltaEventImplCopyWith<$Res> { + __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, + $Res Function(_$MessageDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? type = null, + Object? usage = null, + }) { + return _then(_$MessageDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as MessageDelta, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as MessageDeltaUsage, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaCopyWith<$Res> get delta { + return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaUsageCopyWith<$Res> get usage { + return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { + return _then(_value.copyWith(usage: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaEventImpl extends MessageDeltaEvent { + const _$MessageDeltaEventImpl( + {required this.delta, required this.type, required this.usage}) + : super._(); + + factory _$MessageDeltaEventImpl.fromJson(Map json) => + _$$MessageDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + @override + final MessageDelta delta; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + final MessageDeltaUsage usage; + + @override + String toString() { + return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta(delta, type, usage); + } + + @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(Message message, MessageStreamEventType type)? @@ -3845,16 +6003,18 @@ mixin _$MessageStreamEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta?.call(delta, type, usage); + } + + @override @optionalTypeArgs TResult maybeWhen({ TResult Function(Message message, MessageStreamEventType type)? @@ -3863,17 +6023,22 @@ mixin _$MessageStreamEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (messageDelta != null) { + return messageDelta(delta, type, usage); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ required TResult Function(MessageStartEvent value) messageStart, @@ -3883,8 +6048,11 @@ mixin _$MessageStreamEvent { required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(MessageStartEvent value)? messageStart, @@ -3894,8 +6062,11 @@ mixin _$MessageStreamEvent { TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, - }) => - throw _privateConstructorUsedError; + }) { + return messageDelta?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ TResult Function(MessageStartEvent value)? messageStart, @@ -3906,107 +6077,97 @@ mixin _$MessageStreamEvent { TResult Function(ContentBlockStopEvent value)? contentBlockStop, TResult Function(PingEvent value)? ping, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageStreamEventCopyWith get copyWith => - throw _privateConstructorUsedError; -} + }) { + if (messageDelta != null) { + return messageDelta(this); + } + return orElse(); + } -/// @nodoc -abstract class $MessageStreamEventCopyWith<$Res> { - factory $MessageStreamEventCopyWith( - MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = - _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; - @useResult - $Res call({MessageStreamEventType type}); + @override + Map toJson() { + return _$$MessageDeltaEventImplToJson( + this, + ); + } } -/// @nodoc -class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> - implements $MessageStreamEventCopyWith<$Res> { - _$MessageStreamEventCopyWithImpl(this._value, this._then); +abstract class MessageDeltaEvent extends MessageStreamEvent { + const factory MessageDeltaEvent( + {required final MessageDelta delta, + required final MessageStreamEventType type, + required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; + const MessageDeltaEvent._() : super._(); - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + factory MessageDeltaEvent.fromJson(Map json) = + _$MessageDeltaEventImpl.fromJson; - @pragma('vm:prefer-inline') + /// A delta in a streaming message. + MessageDelta get delta; @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - ) as $Val); - } + + /// The type of a streaming event. + MessageStreamEventType get type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + MessageDeltaUsage get usage; + @override + @JsonKey(ignore: true) + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageStartEventImplCopyWith<$Res> +abstract class _$$MessageStopEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, - $Res Function(_$MessageStartEventImpl) then) = - __$$MessageStartEventImplCopyWithImpl<$Res>; + factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, + $Res Function(_$MessageStopEventImpl) then) = + __$$MessageStopEventImplCopyWithImpl<$Res>; @override @useResult - $Res call({Message message, MessageStreamEventType type}); - - $MessageCopyWith<$Res> get message; + $Res call({MessageStreamEventType type}); } /// @nodoc -class __$$MessageStartEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> - implements _$$MessageStartEventImplCopyWith<$Res> { - __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, - $Res Function(_$MessageStartEventImpl) _then) +class __$$MessageStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> + implements _$$MessageStopEventImplCopyWith<$Res> { + __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, + $Res Function(_$MessageStopEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? message = null, Object? type = null, }) { - return _then(_$MessageStartEventImpl( - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as Message, + return _then(_$MessageStopEventImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as MessageStreamEventType, )); } - - @override - @pragma('vm:prefer-inline') - $MessageCopyWith<$Res> get message { - return $MessageCopyWith<$Res>(_value.message, (value) { - return _then(_value.copyWith(message: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageStartEventImpl extends MessageStartEvent { - const _$MessageStartEventImpl({required this.message, required this.type}) - : super._(); - - factory _$MessageStartEventImpl.fromJson(Map json) => - _$$MessageStartEventImplFromJson(json); +class _$MessageStopEventImpl extends MessageStopEvent { + const _$MessageStopEventImpl({required this.type}) : super._(); - /// A message in a chat conversation. - @override - final Message message; + factory _$MessageStopEventImpl.fromJson(Map json) => + _$$MessageStopEventImplFromJson(json); /// The type of a streaming event. @override @@ -4014,27 +6175,26 @@ class _$MessageStartEventImpl extends MessageStartEvent { @override String toString() { - return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + return 'MessageStreamEvent.messageStop(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageStartEventImpl && - (identical(other.message, message) || other.message == message) && + other is _$MessageStopEventImpl && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, message, type); + int get hashCode => Object.hash(runtimeType, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => - __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( this, _$identity); @override @@ -4047,18 +6207,18 @@ class _$MessageStartEventImpl extends MessageStartEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return messageStart(message, type); + return messageStop(type); } @override @@ -4070,16 +6230,15 @@ class _$MessageStartEventImpl extends MessageStartEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return messageStart?.call(message, type); + return messageStop?.call(type); } @override @@ -4091,18 +6250,17 @@ class _$MessageStartEventImpl extends MessageStartEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (messageStart != null) { - return messageStart(message, type); + if (messageStop != null) { + return messageStop(type); } return orElse(); } @@ -4118,7 +6276,7 @@ class _$MessageStartEventImpl extends MessageStartEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return messageStart(this); + return messageStop(this); } @override @@ -4132,7 +6290,7 @@ class _$MessageStartEventImpl extends MessageStartEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return messageStart?.call(this); + return messageStop?.call(this); } @override @@ -4147,164 +6305,148 @@ class _$MessageStartEventImpl extends MessageStartEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (messageStart != null) { - return messageStart(this); + if (messageStop != null) { + return messageStop(this); } return orElse(); } @override Map toJson() { - return _$$MessageStartEventImplToJson( + return _$$MessageStopEventImplToJson( this, ); } } -abstract class MessageStartEvent extends MessageStreamEvent { - const factory MessageStartEvent( - {required final Message message, - required final MessageStreamEventType type}) = _$MessageStartEventImpl; - const MessageStartEvent._() : super._(); +abstract class MessageStopEvent extends MessageStreamEvent { + const factory MessageStopEvent({required final MessageStreamEventType type}) = + _$MessageStopEventImpl; + const MessageStopEvent._() : super._(); - factory MessageStartEvent.fromJson(Map json) = - _$MessageStartEventImpl.fromJson; + factory MessageStopEvent.fromJson(Map json) = + _$MessageStopEventImpl.fromJson; - /// A message in a chat conversation. - Message get message; @override /// The type of a streaming event. MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageDeltaEventImplCopyWith<$Res> +abstract class _$$ContentBlockStartEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, - $Res Function(_$MessageDeltaEventImpl) then) = - __$$MessageDeltaEventImplCopyWithImpl<$Res>; + factory _$$ContentBlockStartEventImplCopyWith( + _$ContentBlockStartEventImpl value, + $Res Function(_$ContentBlockStartEventImpl) then) = + __$$ContentBlockStartEventImplCopyWithImpl<$Res>; @override @useResult $Res call( - {MessageDelta delta, - MessageStreamEventType type, - MessageDeltaUsage usage}); + {@JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type}); - $MessageDeltaCopyWith<$Res> get delta; - $MessageDeltaUsageCopyWith<$Res> get usage; + $BlockCopyWith<$Res> get contentBlock; } /// @nodoc -class __$$MessageDeltaEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> - implements _$$MessageDeltaEventImplCopyWith<$Res> { - __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, - $Res Function(_$MessageDeltaEventImpl) _then) +class __$$ContentBlockStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> + implements _$$ContentBlockStartEventImplCopyWith<$Res> { + __$$ContentBlockStartEventImplCopyWithImpl( + _$ContentBlockStartEventImpl _value, + $Res Function(_$ContentBlockStartEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? delta = null, + Object? contentBlock = null, + Object? index = null, Object? type = null, - Object? usage = null, }) { - return _then(_$MessageDeltaEventImpl( - delta: null == delta - ? _value.delta - : delta // ignore: cast_nullable_to_non_nullable - as MessageDelta, + return _then(_$ContentBlockStartEventImpl( + contentBlock: null == contentBlock + ? _value.contentBlock + : contentBlock // ignore: cast_nullable_to_non_nullable + as Block, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, - usage: null == usage - ? _value.usage - : usage // ignore: cast_nullable_to_non_nullable - as MessageDeltaUsage, + as MessageStreamEventType, )); } @override @pragma('vm:prefer-inline') - $MessageDeltaCopyWith<$Res> get delta { - return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { - return _then(_value.copyWith(delta: value)); - }); - } - - @override - @pragma('vm:prefer-inline') - $MessageDeltaUsageCopyWith<$Res> get usage { - return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { - return _then(_value.copyWith(usage: value)); + $BlockCopyWith<$Res> get contentBlock { + return $BlockCopyWith<$Res>(_value.contentBlock, (value) { + return _then(_value.copyWith(contentBlock: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaEventImpl extends MessageDeltaEvent { - const _$MessageDeltaEventImpl( - {required this.delta, required this.type, required this.usage}) +class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { + const _$ContentBlockStartEventImpl( + {@JsonKey(name: 'content_block') required this.contentBlock, + required this.index, + required this.type}) : super._(); - factory _$MessageDeltaEventImpl.fromJson(Map json) => - _$$MessageDeltaEventImplFromJson(json); + factory _$ContentBlockStartEventImpl.fromJson(Map json) => + _$$ContentBlockStartEventImplFromJson(json); - /// A delta in a streaming message. + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] @override - final MessageDelta delta; + @JsonKey(name: 'content_block') + final Block contentBlock; - /// The type of a streaming event. + /// The index of the content block. @override - final MessageStreamEventType type; + final int index; - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. + /// The type of a streaming event. @override - final MessageDeltaUsage usage; + final MessageStreamEventType type; @override String toString() { - return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaEventImpl && - (identical(other.delta, delta) || other.delta == delta) && - (identical(other.type, type) || other.type == type) && - (identical(other.usage, usage) || other.usage == usage)); + other is _$ContentBlockStartEventImpl && + (identical(other.contentBlock, contentBlock) || + other.contentBlock == contentBlock) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, delta, type, usage); + int get hashCode => Object.hash(runtimeType, contentBlock, index, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => - __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( - this, _$identity); + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< + _$ContentBlockStartEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -4316,18 +6458,18 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return messageDelta(delta, type, usage); + return contentBlockStart(contentBlock, index, type); } @override @@ -4339,16 +6481,15 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return messageDelta?.call(delta, type, usage); + return contentBlockStart?.call(contentBlock, index, type); } @override @@ -4360,18 +6501,17 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (messageDelta != null) { - return messageDelta(delta, type, usage); + if (contentBlockStart != null) { + return contentBlockStart(contentBlock, index, type); } return orElse(); } @@ -4387,7 +6527,7 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return messageDelta(this); + return contentBlockStart(this); } @override @@ -4401,7 +6541,7 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return messageDelta?.call(this); + return contentBlockStart?.call(this); } @override @@ -4416,96 +6556,121 @@ class _$MessageDeltaEventImpl extends MessageDeltaEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (messageDelta != null) { - return messageDelta(this); + if (contentBlockStart != null) { + return contentBlockStart(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaEventImplToJson( + return _$$ContentBlockStartEventImplToJson( this, ); } } -abstract class MessageDeltaEvent extends MessageStreamEvent { - const factory MessageDeltaEvent( - {required final MessageDelta delta, - required final MessageStreamEventType type, - required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; - const MessageDeltaEvent._() : super._(); +abstract class ContentBlockStartEvent extends MessageStreamEvent { + const factory ContentBlockStartEvent( + {@JsonKey(name: 'content_block') required final Block contentBlock, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStartEventImpl; + const ContentBlockStartEvent._() : super._(); - factory MessageDeltaEvent.fromJson(Map json) = - _$MessageDeltaEventImpl.fromJson; + factory ContentBlockStartEvent.fromJson(Map json) = + _$ContentBlockStartEventImpl.fromJson; - /// A delta in a streaming message. - MessageDelta get delta; + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @JsonKey(name: 'content_block') + Block get contentBlock; + + /// The index of the content block. + int get index; @override /// The type of a streaming event. MessageStreamEventType get type; - - /// Billing and rate-limit usage. - /// - /// Anthropic's API bills and rate-limits by token counts, as tokens represent the - /// underlying cost to our systems. - /// - /// Under the hood, the API transforms requests into a format suitable for the - /// model. The model's output then goes through a parsing stage before becoming an - /// API response. As a result, the token counts in `usage` will not match one-to-one - /// with the exact visible content of an API request or response. - /// - /// For example, `output_tokens` will be non-zero, even for an empty string response - /// from Claude. - MessageDeltaUsage get usage; @override @JsonKey(ignore: true) - _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => - throw _privateConstructorUsedError; + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageStopEventImplCopyWith<$Res> +abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, - $Res Function(_$MessageStopEventImpl) then) = - __$$MessageStopEventImplCopyWithImpl<$Res>; + factory _$$ContentBlockDeltaEventImplCopyWith( + _$ContentBlockDeltaEventImpl value, + $Res Function(_$ContentBlockDeltaEventImpl) then) = + __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; @override @useResult - $Res call({MessageStreamEventType type}); + $Res call({BlockDelta delta, int index, MessageStreamEventType type}); + + $BlockDeltaCopyWith<$Res> get delta; } /// @nodoc -class __$$MessageStopEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> - implements _$$MessageStopEventImplCopyWith<$Res> { - __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, - $Res Function(_$MessageStopEventImpl) _then) +class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> + implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { + __$$ContentBlockDeltaEventImplCopyWithImpl( + _$ContentBlockDeltaEventImpl _value, + $Res Function(_$ContentBlockDeltaEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ + Object? delta = null, + Object? index = null, Object? type = null, }) { - return _then(_$MessageStopEventImpl( + return _then(_$ContentBlockDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as BlockDelta, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as MessageStreamEventType, )); } + + @override + @pragma('vm:prefer-inline') + $BlockDeltaCopyWith<$Res> get delta { + return $BlockDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$MessageStopEventImpl extends MessageStopEvent { - const _$MessageStopEventImpl({required this.type}) : super._(); +class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { + const _$ContentBlockDeltaEventImpl( + {required this.delta, required this.index, required this.type}) + : super._(); - factory _$MessageStopEventImpl.fromJson(Map json) => - _$$MessageStopEventImplFromJson(json); + factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => + _$$ContentBlockDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + @override + final BlockDelta delta; + + /// The index of the content block. + @override + final int index; /// The type of a streaming event. @override @@ -4513,27 +6678,29 @@ class _$MessageStopEventImpl extends MessageStopEvent { @override String toString() { - return 'MessageStreamEvent.messageStop(type: $type)'; + return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageStopEventImpl && + other is _$ContentBlockDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, delta, index, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => - __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( - this, _$identity); + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< + _$ContentBlockDeltaEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -4545,18 +6712,18 @@ class _$MessageStopEventImpl extends MessageStopEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return messageStop(type); + return contentBlockDelta(delta, index, type); } @override @@ -4568,16 +6735,15 @@ class _$MessageStopEventImpl extends MessageStopEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return messageStop?.call(type); + return contentBlockDelta?.call(delta, index, type); } @override @@ -4589,18 +6755,17 @@ class _$MessageStopEventImpl extends MessageStopEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (messageStop != null) { - return messageStop(type); + if (contentBlockDelta != null) { + return contentBlockDelta(delta, index, type); } return orElse(); } @@ -4616,7 +6781,7 @@ class _$MessageStopEventImpl extends MessageStopEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return messageStop(this); + return contentBlockDelta(this); } @override @@ -4630,7 +6795,7 @@ class _$MessageStopEventImpl extends MessageStopEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return messageStop?.call(this); + return contentBlockDelta?.call(this); } @override @@ -4645,74 +6810,74 @@ class _$MessageStopEventImpl extends MessageStopEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (messageStop != null) { - return messageStop(this); + if (contentBlockDelta != null) { + return contentBlockDelta(this); } return orElse(); } @override Map toJson() { - return _$$MessageStopEventImplToJson( + return _$$ContentBlockDeltaEventImplToJson( this, ); } } -abstract class MessageStopEvent extends MessageStreamEvent { - const factory MessageStopEvent({required final MessageStreamEventType type}) = - _$MessageStopEventImpl; - const MessageStopEvent._() : super._(); +abstract class ContentBlockDeltaEvent extends MessageStreamEvent { + const factory ContentBlockDeltaEvent( + {required final BlockDelta delta, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockDeltaEventImpl; + const ContentBlockDeltaEvent._() : super._(); - factory MessageStopEvent.fromJson(Map json) = - _$MessageStopEventImpl.fromJson; + factory ContentBlockDeltaEvent.fromJson(Map json) = + _$ContentBlockDeltaEventImpl.fromJson; + + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + BlockDelta get delta; + /// The index of the content block. + int get index; @override /// The type of a streaming event. MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => - throw _privateConstructorUsedError; + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ContentBlockStartEventImplCopyWith<$Res> +abstract class _$$ContentBlockStopEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockStartEventImplCopyWith( - _$ContentBlockStartEventImpl value, - $Res Function(_$ContentBlockStartEventImpl) then) = - __$$ContentBlockStartEventImplCopyWithImpl<$Res>; + factory _$$ContentBlockStopEventImplCopyWith( + _$ContentBlockStopEventImpl value, + $Res Function(_$ContentBlockStopEventImpl) then) = + __$$ContentBlockStopEventImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type}); + $Res call({int index, MessageStreamEventType type}); } /// @nodoc -class __$$ContentBlockStartEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> - implements _$$ContentBlockStartEventImplCopyWith<$Res> { - __$$ContentBlockStartEventImplCopyWithImpl( - _$ContentBlockStartEventImpl _value, - $Res Function(_$ContentBlockStartEventImpl) _then) +class __$$ContentBlockStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> + implements _$$ContentBlockStopEventImplCopyWith<$Res> { + __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, + $Res Function(_$ContentBlockStopEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? contentBlock = freezed, Object? index = null, Object? type = null, }) { - return _then(_$ContentBlockStartEventImpl( - contentBlock: freezed == contentBlock - ? _value.contentBlock - : contentBlock // ignore: cast_nullable_to_non_nullable - as TextBlock, + return _then(_$ContentBlockStopEventImpl( index: null == index ? _value.index : index // ignore: cast_nullable_to_non_nullable @@ -4727,20 +6892,12 @@ class __$$ContentBlockStartEventImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { - const _$ContentBlockStartEventImpl( - {@JsonKey(name: 'content_block') required this.contentBlock, - required this.index, - required this.type}) +class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { + const _$ContentBlockStopEventImpl({required this.index, required this.type}) : super._(); - factory _$ContentBlockStartEventImpl.fromJson(Map json) => - _$$ContentBlockStartEventImplFromJson(json); - - /// A block of text content. - @override - @JsonKey(name: 'content_block') - final TextBlock contentBlock; + factory _$ContentBlockStopEventImpl.fromJson(Map json) => + _$$ContentBlockStopEventImplFromJson(json); /// The index of the content block. @override @@ -4752,31 +6909,28 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { @override String toString() { - return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; + return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ContentBlockStartEventImpl && - const DeepCollectionEquality() - .equals(other.contentBlock, contentBlock) && + other is _$ContentBlockStopEventImpl && (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, - const DeepCollectionEquality().hash(contentBlock), index, type); + int get hashCode => Object.hash(runtimeType, index, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> - get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< - _$ContentBlockStartEventImpl>(this, _$identity); + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< + _$ContentBlockStopEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -4788,18 +6942,18 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return contentBlockStart(contentBlock, index, type); + return contentBlockStop(index, type); } @override @@ -4811,16 +6965,15 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return contentBlockStart?.call(contentBlock, index, type); + return contentBlockStop?.call(index, type); } @override @@ -4832,18 +6985,17 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (contentBlockStart != null) { - return contentBlockStart(contentBlock, index, type); + if (contentBlockStop != null) { + return contentBlockStop(index, type); } return orElse(); } @@ -4859,7 +7011,7 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return contentBlockStart(this); + return contentBlockStop(this); } @override @@ -4873,7 +7025,7 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return contentBlockStart?.call(this); + return contentBlockStop?.call(this); } @override @@ -4888,34 +7040,29 @@ class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (contentBlockStart != null) { - return contentBlockStart(this); + if (contentBlockStop != null) { + return contentBlockStop(this); } return orElse(); } @override Map toJson() { - return _$$ContentBlockStartEventImplToJson( + return _$$ContentBlockStopEventImplToJson( this, ); } } -abstract class ContentBlockStartEvent extends MessageStreamEvent { - const factory ContentBlockStartEvent( - {@JsonKey(name: 'content_block') required final TextBlock contentBlock, - required final int index, - required final MessageStreamEventType - type}) = _$ContentBlockStartEventImpl; - const ContentBlockStartEvent._() : super._(); - - factory ContentBlockStartEvent.fromJson(Map json) = - _$ContentBlockStartEventImpl.fromJson; +abstract class ContentBlockStopEvent extends MessageStreamEvent { + const factory ContentBlockStopEvent( + {required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStopEventImpl; + const ContentBlockStopEvent._() : super._(); - /// A block of text content. - @JsonKey(name: 'content_block') - TextBlock get contentBlock; + factory ContentBlockStopEvent.fromJson(Map json) = + _$ContentBlockStopEventImpl.fromJson; /// The index of the content block. int get index; @@ -4925,82 +7072,50 @@ abstract class ContentBlockStartEvent extends MessageStreamEvent { MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> +abstract class _$$PingEventImplCopyWith<$Res> implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockDeltaEventImplCopyWith( - _$ContentBlockDeltaEventImpl value, - $Res Function(_$ContentBlockDeltaEventImpl) then) = - __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; + factory _$$PingEventImplCopyWith( + _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = + __$$PingEventImplCopyWithImpl<$Res>; @override @useResult - $Res call({TextBlockDelta delta, int index, MessageStreamEventType type}); - - $TextBlockDeltaCopyWith<$Res> get delta; + $Res call({MessageStreamEventType type}); } /// @nodoc -class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> - implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { - __$$ContentBlockDeltaEventImplCopyWithImpl( - _$ContentBlockDeltaEventImpl _value, - $Res Function(_$ContentBlockDeltaEventImpl) _then) +class __$$PingEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> + implements _$$PingEventImplCopyWith<$Res> { + __$$PingEventImplCopyWithImpl( + _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? delta = null, - Object? index = null, Object? type = null, }) { - return _then(_$ContentBlockDeltaEventImpl( - delta: null == delta - ? _value.delta - : delta // ignore: cast_nullable_to_non_nullable - as TextBlockDelta, - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$PingEventImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as MessageStreamEventType, )); } - - @override - @pragma('vm:prefer-inline') - $TextBlockDeltaCopyWith<$Res> get delta { - return $TextBlockDeltaCopyWith<$Res>(_value.delta, (value) { - return _then(_value.copyWith(delta: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { - const _$ContentBlockDeltaEventImpl( - {required this.delta, required this.index, required this.type}) - : super._(); - - factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => - _$$ContentBlockDeltaEventImplFromJson(json); - - /// A delta in a streaming text block. - @override - final TextBlockDelta delta; +class _$PingEventImpl extends PingEvent { + const _$PingEventImpl({required this.type}) : super._(); - /// The index of the content block. - @override - final int index; + factory _$PingEventImpl.fromJson(Map json) => + _$$PingEventImplFromJson(json); /// The type of a streaming event. @override @@ -5008,29 +7123,26 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { @override String toString() { - return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; + return 'MessageStreamEvent.ping(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ContentBlockDeltaEventImpl && - (identical(other.delta, delta) || other.delta == delta) && - (identical(other.index, index) || other.index == index) && + other is _$PingEventImpl && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, delta, index, type); + int get hashCode => Object.hash(runtimeType, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> - get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< - _$ContentBlockDeltaEventImpl>(this, _$identity); + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); @override @optionalTypeArgs @@ -5042,18 +7154,18 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { messageDelta, required TResult Function(MessageStreamEventType type) messageStop, required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, + @JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type) contentBlockStart, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) + BlockDelta delta, int index, MessageStreamEventType type) contentBlockDelta, required TResult Function(int index, MessageStreamEventType type) contentBlockStop, required TResult Function(MessageStreamEventType type) ping, }) { - return contentBlockDelta(delta, index, type); + return ping(type); } @override @@ -5065,16 +7177,15 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, TResult? Function(MessageStreamEventType type)? ping, }) { - return contentBlockDelta?.call(delta, index, type); + return ping?.call(type); } @override @@ -5086,18 +7197,17 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { MessageDeltaUsage usage)? messageDelta, TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, int index, MessageStreamEventType type)? contentBlockStart, - TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? contentBlockDelta, TResult Function(int index, MessageStreamEventType type)? contentBlockStop, TResult Function(MessageStreamEventType type)? ping, required TResult orElse(), }) { - if (contentBlockDelta != null) { - return contentBlockDelta(delta, index, type); + if (ping != null) { + return ping(type); } return orElse(); } @@ -5113,7 +7223,7 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { required TResult Function(ContentBlockStopEvent value) contentBlockStop, required TResult Function(PingEvent value) ping, }) { - return contentBlockDelta(this); + return ping(this); } @override @@ -5127,7 +7237,7 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { TResult? Function(ContentBlockStopEvent value)? contentBlockStop, TResult? Function(PingEvent value)? ping, }) { - return contentBlockDelta?.call(this); + return ping?.call(this); } @override @@ -5142,193 +7252,263 @@ class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { TResult Function(PingEvent value)? ping, required TResult orElse(), }) { - if (contentBlockDelta != null) { - return contentBlockDelta(this); + if (ping != null) { + return ping(this); } return orElse(); } @override Map toJson() { - return _$$ContentBlockDeltaEventImplToJson( + return _$$PingEventImplToJson( this, ); } } -abstract class ContentBlockDeltaEvent extends MessageStreamEvent { - const factory ContentBlockDeltaEvent( - {required final TextBlockDelta delta, - required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockDeltaEventImpl; - const ContentBlockDeltaEvent._() : super._(); - - factory ContentBlockDeltaEvent.fromJson(Map json) = - _$ContentBlockDeltaEventImpl.fromJson; +abstract class PingEvent extends MessageStreamEvent { + const factory PingEvent({required final MessageStreamEventType type}) = + _$PingEventImpl; + const PingEvent._() : super._(); - /// A delta in a streaming text block. - TextBlockDelta get delta; + factory PingEvent.fromJson(Map json) = + _$PingEventImpl.fromJson; - /// The index of the content block. - int get index; @override /// The type of a streaming event. MessageStreamEventType get type; @override @JsonKey(ignore: true) - _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> - get copyWith => throw _privateConstructorUsedError; + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BlockDelta _$BlockDeltaFromJson(Map json) { + switch (json['type']) { + case 'text_delta': + return TextBlockDelta.fromJson(json); + case 'input_json_delta': + return InputJsonBlockDelta.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'BlockDelta', 'Invalid union type "${json['type']}"!'); + } } /// @nodoc -abstract class _$$ContentBlockStopEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$ContentBlockStopEventImplCopyWith( - _$ContentBlockStopEventImpl value, - $Res Function(_$ContentBlockStopEventImpl) then) = - __$$ContentBlockStopEventImplCopyWithImpl<$Res>; +mixin _$BlockDelta { + /// The type of content block. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) textDelta, + required TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? textDelta, + TResult? Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? textDelta, + TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BlockDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BlockDeltaCopyWith<$Res> { + factory $BlockDeltaCopyWith( + BlockDelta value, $Res Function(BlockDelta) then) = + _$BlockDeltaCopyWithImpl<$Res, BlockDelta>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$BlockDeltaCopyWithImpl<$Res, $Val extends BlockDelta> + implements $BlockDeltaCopyWith<$Res> { + _$BlockDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockDeltaImplCopyWith<$Res> + implements $BlockDeltaCopyWith<$Res> { + factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, + $Res Function(_$TextBlockDeltaImpl) then) = + __$$TextBlockDeltaImplCopyWithImpl<$Res>; @override @useResult - $Res call({int index, MessageStreamEventType type}); + $Res call({String text, String type}); } /// @nodoc -class __$$ContentBlockStopEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> - implements _$$ContentBlockStopEventImplCopyWith<$Res> { - __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, - $Res Function(_$ContentBlockStopEventImpl) _then) +class __$$TextBlockDeltaImplCopyWithImpl<$Res> + extends _$BlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> + implements _$$TextBlockDeltaImplCopyWith<$Res> { + __$$TextBlockDeltaImplCopyWithImpl( + _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, + Object? text = null, Object? type = null, }) { - return _then(_$ContentBlockStopEventImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$TextBlockDeltaImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, + as String, )); } } /// @nodoc @JsonSerializable() -class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { - const _$ContentBlockStopEventImpl({required this.index, required this.type}) +class _$TextBlockDeltaImpl extends TextBlockDelta { + const _$TextBlockDeltaImpl({required this.text, required this.type}) : super._(); - factory _$ContentBlockStopEventImpl.fromJson(Map json) => - _$$ContentBlockStopEventImplFromJson(json); + factory _$TextBlockDeltaImpl.fromJson(Map json) => + _$$TextBlockDeltaImplFromJson(json); - /// The index of the content block. + /// The text delta. @override - final int index; + final String text; - /// The type of a streaming event. + /// The type of content block. @override - final MessageStreamEventType type; + final String type; @override String toString() { - return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; + return 'BlockDelta.textDelta(text: $text, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ContentBlockStopEventImpl && - (identical(other.index, index) || other.index == index) && + other is _$TextBlockDeltaImpl && + (identical(other.text, text) || other.text == text) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, index, type); + int get hashCode => Object.hash(runtimeType, text, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> - get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< - _$ContentBlockStopEventImpl>(this, _$identity); + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, + required TResult Function(String text, String type) textDelta, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, }) { - return contentBlockStop(index, type); + return textDelta(text, type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult? Function(String text, String type)? textDelta, TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, }) { - return contentBlockStop?.call(index, type); + return textDelta?.call(text, type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult Function(String text, String type)? textDelta, TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, required TResult orElse(), }) { - if (contentBlockStop != null) { - return contentBlockStop(index, type); + if (textDelta != null) { + return textDelta(text, type); } return orElse(); } @@ -5336,213 +7516,188 @@ class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, }) { - return contentBlockStop(this); + return textDelta(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, }) { - return contentBlockStop?.call(this); + return textDelta?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, required TResult orElse(), }) { - if (contentBlockStop != null) { - return contentBlockStop(this); + if (textDelta != null) { + return textDelta(this); } return orElse(); } @override Map toJson() { - return _$$ContentBlockStopEventImplToJson( + return _$$TextBlockDeltaImplToJson( this, ); } } -abstract class ContentBlockStopEvent extends MessageStreamEvent { - const factory ContentBlockStopEvent( - {required final int index, - required final MessageStreamEventType type}) = - _$ContentBlockStopEventImpl; - const ContentBlockStopEvent._() : super._(); +abstract class TextBlockDelta extends BlockDelta { + const factory TextBlockDelta( + {required final String text, + required final String type}) = _$TextBlockDeltaImpl; + const TextBlockDelta._() : super._(); - factory ContentBlockStopEvent.fromJson(Map json) = - _$ContentBlockStopEventImpl.fromJson; + factory TextBlockDelta.fromJson(Map json) = + _$TextBlockDeltaImpl.fromJson; - /// The index of the content block. - int get index; + /// The text delta. + String get text; @override - /// The type of a streaming event. - MessageStreamEventType get type; + /// The type of content block. + String get type; @override @JsonKey(ignore: true) - _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> - get copyWith => throw _privateConstructorUsedError; + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$PingEventImplCopyWith<$Res> - implements $MessageStreamEventCopyWith<$Res> { - factory _$$PingEventImplCopyWith( - _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = - __$$PingEventImplCopyWithImpl<$Res>; +abstract class _$$InputJsonBlockDeltaImplCopyWith<$Res> + implements $BlockDeltaCopyWith<$Res> { + factory _$$InputJsonBlockDeltaImplCopyWith(_$InputJsonBlockDeltaImpl value, + $Res Function(_$InputJsonBlockDeltaImpl) then) = + __$$InputJsonBlockDeltaImplCopyWithImpl<$Res>; @override @useResult - $Res call({MessageStreamEventType type}); + $Res call( + {@JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, + String type}); } /// @nodoc -class __$$PingEventImplCopyWithImpl<$Res> - extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> - implements _$$PingEventImplCopyWith<$Res> { - __$$PingEventImplCopyWithImpl( - _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) +class __$$InputJsonBlockDeltaImplCopyWithImpl<$Res> + extends _$BlockDeltaCopyWithImpl<$Res, _$InputJsonBlockDeltaImpl> + implements _$$InputJsonBlockDeltaImplCopyWith<$Res> { + __$$InputJsonBlockDeltaImplCopyWithImpl(_$InputJsonBlockDeltaImpl _value, + $Res Function(_$InputJsonBlockDeltaImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ + Object? partialJson = freezed, Object? type = null, }) { - return _then(_$PingEventImpl( + return _then(_$InputJsonBlockDeltaImpl( + partialJson: freezed == partialJson + ? _value.partialJson + : partialJson // ignore: cast_nullable_to_non_nullable + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as MessageStreamEventType, + as String, )); } } /// @nodoc @JsonSerializable() -class _$PingEventImpl extends PingEvent { - const _$PingEventImpl({required this.type}) : super._(); +class _$InputJsonBlockDeltaImpl extends InputJsonBlockDelta { + const _$InputJsonBlockDeltaImpl( + {@JsonKey(name: 'partial_json', includeIfNull: false) this.partialJson, + required this.type}) + : super._(); - factory _$PingEventImpl.fromJson(Map json) => - _$$PingEventImplFromJson(json); + factory _$InputJsonBlockDeltaImpl.fromJson(Map json) => + _$$InputJsonBlockDeltaImplFromJson(json); - /// The type of a streaming event. + /// The partial JSON delta. @override - final MessageStreamEventType type; + @JsonKey(name: 'partial_json', includeIfNull: false) + final String? partialJson; + + /// The type of content block. + @override + final String type; @override String toString() { - return 'MessageStreamEvent.ping(type: $type)'; + return 'BlockDelta.inputJsonDelta(partialJson: $partialJson, type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$PingEventImpl && + other is _$InputJsonBlockDeltaImpl && + (identical(other.partialJson, partialJson) || + other.partialJson == partialJson) && (identical(other.type, type) || other.type == type)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, partialJson, type); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => - __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); + _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => + __$$InputJsonBlockDeltaImplCopyWithImpl<_$InputJsonBlockDeltaImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(Message message, MessageStreamEventType type) - messageStart, - required TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage) - messageDelta, - required TResult Function(MessageStreamEventType type) messageStop, - required TResult Function( - @JsonKey(name: 'content_block') TextBlock contentBlock, - int index, - MessageStreamEventType type) - contentBlockStart, + required TResult Function(String text, String type) textDelta, required TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type) - contentBlockDelta, - required TResult Function(int index, MessageStreamEventType type) - contentBlockStop, - required TResult Function(MessageStreamEventType type) ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, }) { - return ping(type); + return inputJsonDelta(partialJson, type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(Message message, MessageStreamEventType type)? - messageStart, - TResult? Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult? Function(MessageStreamEventType type)? messageStop, - TResult? Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult? Function(String text, String type)? textDelta, TResult? Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult? Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, }) { - return ping?.call(type); + return inputJsonDelta?.call(partialJson, type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(Message message, MessageStreamEventType type)? - messageStart, - TResult Function(MessageDelta delta, MessageStreamEventType type, - MessageDeltaUsage usage)? - messageDelta, - TResult Function(MessageStreamEventType type)? messageStop, - TResult Function(@JsonKey(name: 'content_block') TextBlock contentBlock, - int index, MessageStreamEventType type)? - contentBlockStart, + TResult Function(String text, String type)? textDelta, TResult Function( - TextBlockDelta delta, int index, MessageStreamEventType type)? - contentBlockDelta, - TResult Function(int index, MessageStreamEventType type)? contentBlockStop, - TResult Function(MessageStreamEventType type)? ping, + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, required TResult orElse(), }) { - if (ping != null) { - return ping(type); + if (inputJsonDelta != null) { + return inputJsonDelta(partialJson, type); } return orElse(); } @@ -5550,71 +7705,61 @@ class _$PingEventImpl extends PingEvent { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageStartEvent value) messageStart, - required TResult Function(MessageDeltaEvent value) messageDelta, - required TResult Function(MessageStopEvent value) messageStop, - required TResult Function(ContentBlockStartEvent value) contentBlockStart, - required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, - required TResult Function(ContentBlockStopEvent value) contentBlockStop, - required TResult Function(PingEvent value) ping, + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, }) { - return ping(this); + return inputJsonDelta(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageStartEvent value)? messageStart, - TResult? Function(MessageDeltaEvent value)? messageDelta, - TResult? Function(MessageStopEvent value)? messageStop, - TResult? Function(ContentBlockStartEvent value)? contentBlockStart, - TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult? Function(ContentBlockStopEvent value)? contentBlockStop, - TResult? Function(PingEvent value)? ping, + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, }) { - return ping?.call(this); + return inputJsonDelta?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageStartEvent value)? messageStart, - TResult Function(MessageDeltaEvent value)? messageDelta, - TResult Function(MessageStopEvent value)? messageStop, - TResult Function(ContentBlockStartEvent value)? contentBlockStart, - TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, - TResult Function(ContentBlockStopEvent value)? contentBlockStop, - TResult Function(PingEvent value)? ping, + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, required TResult orElse(), }) { - if (ping != null) { - return ping(this); + if (inputJsonDelta != null) { + return inputJsonDelta(this); } return orElse(); } @override Map toJson() { - return _$$PingEventImplToJson( + return _$$InputJsonBlockDeltaImplToJson( this, ); } } -abstract class PingEvent extends MessageStreamEvent { - const factory PingEvent({required final MessageStreamEventType type}) = - _$PingEventImpl; - const PingEvent._() : super._(); +abstract class InputJsonBlockDelta extends BlockDelta { + const factory InputJsonBlockDelta( + {@JsonKey(name: 'partial_json', includeIfNull: false) + final String? partialJson, + required final String type}) = _$InputJsonBlockDeltaImpl; + const InputJsonBlockDelta._() : super._(); - factory PingEvent.fromJson(Map json) = - _$PingEventImpl.fromJson; + factory InputJsonBlockDelta.fromJson(Map json) = + _$InputJsonBlockDeltaImpl.fromJson; + /// The partial JSON delta. + @JsonKey(name: 'partial_json', includeIfNull: false) + String? get partialJson; @override - /// The type of a streaming event. - MessageStreamEventType get type; + /// The type of content block. + String get type; @override @JsonKey(ignore: true) - _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart index b08b072f..f3dded29 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -25,6 +25,12 @@ _$CreateMessageRequestImpl _$$CreateMessageRequestImplFromJson( .toList(), system: json['system'] as String?, temperature: (json['temperature'] as num?)?.toDouble(), + toolChoice: json['tool_choice'] == null + ? null + : ToolChoice.fromJson(json['tool_choice'] as Map), + tools: (json['tools'] as List?) + ?.map((e) => Tool.fromJson(e as Map)) + .toList(), topK: (json['top_k'] as num?)?.toInt(), topP: (json['top_p'] as num?)?.toDouble(), stream: json['stream'] as bool? ?? false, @@ -48,6 +54,8 @@ Map _$$CreateMessageRequestImplToJson( writeNotNull('stop_sequences', instance.stopSequences); writeNotNull('system', instance.system); writeNotNull('temperature', instance.temperature); + writeNotNull('tool_choice', instance.toolChoice?.toJson()); + writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); writeNotNull('top_k', instance.topK); writeNotNull('top_p', instance.topP); val['stream'] = instance.stream; @@ -69,11 +77,12 @@ Map _$$ModelEnumerationImplToJson( }; const _$ModelsEnumMap = { + Models.claude35Sonnet20240620: 'claude-3-5-sonnet-20240620', + Models.claude3Haiku20240307: 'claude-3-haiku-20240307', Models.claude3Opus20240229: 'claude-3-opus-20240229', Models.claude3Sonnet20240229: 'claude-3-sonnet-20240229', - Models.claude3Haiku20240307: 'claude-3-haiku-20240307', - Models.claude21: 'claude-2.1', Models.claude20: 'claude-2.0', + Models.claude21: 'claude-2.1', Models.claudeInstant12: 'claude-instant-1.2', }; @@ -109,6 +118,33 @@ Map _$$CreateMessageRequestMetadataImplToJson( return val; } +_$ToolChoiceImpl _$$ToolChoiceImplFromJson(Map json) => + _$ToolChoiceImpl( + type: $enumDecode(_$ToolChoiceTypeEnumMap, json['type']), + name: json['name'] as String?, + ); + +Map _$$ToolChoiceImplToJson(_$ToolChoiceImpl instance) { + final val = { + 'type': _$ToolChoiceTypeEnumMap[instance.type]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('name', instance.name); + return val; +} + +const _$ToolChoiceTypeEnumMap = { + ToolChoiceType.auto: 'auto', + ToolChoiceType.any: 'any', + ToolChoiceType.tool: 'tool', +}; + _$MessageImpl _$$MessageImplFromJson(Map json) => _$MessageImpl( id: json['id'] as String?, @@ -153,6 +189,7 @@ const _$StopReasonEnumMap = { StopReason.endTurn: 'end_turn', StopReason.maxTokens: 'max_tokens', StopReason.stopSequence: 'stop_sequence', + StopReason.toolUse: 'tool_use', }; _$MessageContentListBlockImpl _$$MessageContentListBlockImplFromJson( @@ -185,6 +222,28 @@ Map _$$MessageContentStringImplToJson( 'runtimeType': instance.$type, }; +_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( + name: json['name'] as String, + description: json['description'] as String?, + inputSchema: json['input_schema'] as Map, + ); + +Map _$$ToolImplToJson(_$ToolImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['input_schema'] = instance.inputSchema; + return val; +} + _$ImageBlockSourceImpl _$$ImageBlockSourceImplFromJson( Map json) => _$ImageBlockSourceImpl( @@ -257,19 +316,6 @@ Map _$$MessageDeltaUsageImplToJson( 'output_tokens': instance.outputTokens, }; -_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => - _$TextBlockDeltaImpl( - text: json['text'] as String, - type: json['type'] as String, - ); - -Map _$$TextBlockDeltaImplToJson( - _$TextBlockDeltaImpl instance) => - { - 'text': instance.text, - 'type': instance.type, - }; - _$TextBlockImpl _$$TextBlockImplFromJson(Map json) => _$TextBlockImpl( text: json['text'] as String, @@ -294,6 +340,81 @@ Map _$$ImageBlockImplToJson(_$ImageBlockImpl instance) => 'type': instance.type, }; +_$ToolUseBlockImpl _$$ToolUseBlockImplFromJson(Map json) => + _$ToolUseBlockImpl( + id: json['id'] as String, + name: json['name'] as String, + input: json['input'] as Map, + type: json['type'] as String? ?? 'tool_use', + ); + +Map _$$ToolUseBlockImplToJson(_$ToolUseBlockImpl instance) => + { + 'id': instance.id, + 'name': instance.name, + 'input': instance.input, + 'type': instance.type, + }; + +_$ToolResultBlockImpl _$$ToolResultBlockImplFromJson( + Map json) => + _$ToolResultBlockImpl( + toolUseId: json['tool_use_id'] as String, + content: + const _ToolResultBlockContentConverter().fromJson(json['content']), + isError: json['is_error'] as bool?, + type: json['type'] as String? ?? 'tool_result', + ); + +Map _$$ToolResultBlockImplToJson( + _$ToolResultBlockImpl instance) { + final val = { + 'tool_use_id': instance.toolUseId, + 'content': + const _ToolResultBlockContentConverter().toJson(instance.content), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('is_error', instance.isError); + val['type'] = instance.type; + return val; +} + +_$ToolResultBlockContentListBlockImpl + _$$ToolResultBlockContentListBlockImplFromJson(Map json) => + _$ToolResultBlockContentListBlockImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentListBlockImplToJson( + _$ToolResultBlockContentListBlockImpl instance) => + { + 'value': instance.value.map((e) => e.toJson()).toList(), + 'runtimeType': instance.$type, + }; + +_$ToolResultBlockContentStringImpl _$$ToolResultBlockContentStringImplFromJson( + Map json) => + _$ToolResultBlockContentStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentStringImplToJson( + _$ToolResultBlockContentStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + _$MessageStartEventImpl _$$MessageStartEventImplFromJson( Map json) => _$MessageStartEventImpl( @@ -350,7 +471,7 @@ _$ContentBlockStartEventImpl _$$ContentBlockStartEventImplFromJson( Map json) => _$ContentBlockStartEventImpl( contentBlock: - TextBlock.fromJson(json['content_block'] as Map), + Block.fromJson(json['content_block'] as Map), index: (json['index'] as num).toInt(), type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), ); @@ -366,7 +487,7 @@ Map _$$ContentBlockStartEventImplToJson( _$ContentBlockDeltaEventImpl _$$ContentBlockDeltaEventImplFromJson( Map json) => _$ContentBlockDeltaEventImpl( - delta: TextBlockDelta.fromJson(json['delta'] as Map), + delta: BlockDelta.fromJson(json['delta'] as Map), index: (json['index'] as num).toInt(), type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), ); @@ -402,3 +523,38 @@ Map _$$PingEventImplToJson(_$PingEventImpl instance) => { 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, }; + +_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => + _$TextBlockDeltaImpl( + text: json['text'] as String, + type: json['type'] as String, + ); + +Map _$$TextBlockDeltaImplToJson( + _$TextBlockDeltaImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$InputJsonBlockDeltaImpl _$$InputJsonBlockDeltaImplFromJson( + Map json) => + _$InputJsonBlockDeltaImpl( + partialJson: json['partial_json'] as String?, + type: json['type'] as String, + ); + +Map _$$InputJsonBlockDeltaImplToJson( + _$InputJsonBlockDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('partial_json', instance.partialJson); + val['type'] = instance.type; + return val; +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart index 331c6207..d1950d33 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart @@ -25,4 +25,6 @@ enum StopReason { maxTokens, @JsonValue('stop_sequence') stopSequence, + @JsonValue('tool_use') + toolUse, } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart deleted file mode 100644 index fa05ffce..00000000 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/text_block_delta.dart +++ /dev/null @@ -1,44 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of anthropic_schema; - -// ========================================== -// CLASS: TextBlockDelta -// ========================================== - -/// A delta in a streaming text block. -@freezed -class TextBlockDelta with _$TextBlockDelta { - const TextBlockDelta._(); - - /// Factory constructor for TextBlockDelta - const factory TextBlockDelta({ - /// The text delta. - required String text, - - /// The type of content block. - required String type, - }) = _TextBlockDelta; - - /// Object construction from a JSON representation - factory TextBlockDelta.fromJson(Map json) => - _$TextBlockDeltaFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['text', 'type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'text': text, - 'type': type, - }; - } -} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart new file mode 100644 index 00000000..578701a9 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart @@ -0,0 +1,59 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Tool +// ========================================== + +/// A tool the model may use. +@freezed +class Tool with _$Tool { + const Tool._(); + + /// Factory constructor for Tool + const factory Tool({ + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + required String name, + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) String? description, + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') required Map inputSchema, + }) = _Tool; + + /// Object construction from a JSON representation + factory Tool.fromJson(Map json) => _$ToolFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'input_schema' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'input_schema': inputSchema, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart new file mode 100644 index 00000000..cb3d65eb --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: ToolChoice +// ========================================== + +/// How the model should use the provided tools. The model can use a specific tool, +/// any available tool, or decide by itself. +/// +/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. +/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. +/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. +@freezed +class ToolChoice with _$ToolChoice { + const ToolChoice._(); + + /// Factory constructor for ToolChoice + const factory ToolChoice({ + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + required ToolChoiceType type, + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) String? name, + }) = _ToolChoice; + + /// Object construction from a JSON representation + factory ToolChoice.fromJson(Map json) => + _$ToolChoiceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'name']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'name': name, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart new file mode 100644 index 00000000..22b88c4d --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart @@ -0,0 +1,24 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: ToolChoiceType +// ========================================== + +/// How the model should use the provided tools. The model can use a specific tool, +/// any available tool, or decide by itself. +/// +/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. +/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. +/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. +enum ToolChoiceType { + @JsonValue('auto') + auto, + @JsonValue('any') + any, + @JsonValue('tool') + tool, +} diff --git a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml index a3f60e70..5ad1f3db 100644 --- a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml +++ b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml @@ -57,7 +57,7 @@ components: See [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. - example: "claude-3-opus-20240229" + example: "claude-3-5-sonnet-20240620" anyOf: - type: string description: The ID of the model to use for this request. @@ -66,11 +66,12 @@ components: description: | Available models. Mind that the list may not be exhaustive nor up-to-date. enum: + - claude-3-5-sonnet-20240620 + - claude-3-haiku-20240307 - claude-3-opus-20240229 - claude-3-sonnet-20240229 - - claude-3-haiku-20240307 - - claude-2.1 - claude-2.0 + - claude-2.1 - claude-instant-1.2 messages: type: array @@ -89,6 +90,9 @@ components: If the final message uses the `assistant` role, the response content will continue immediately from the content in that message. This can be used to constrain part of the model's response. + + See [message content](https://docs.anthropic.com/en/api/messages-content) for + details on how to construct valid message objects. Example with a single `user` message: @@ -208,6 +212,81 @@ components: Note that even with `temperature` of `0.0`, the results will not be fully deterministic. + tool_choice: + $ref: '#/components/schemas/ToolChoice' + tools: + type: array + description: | + Definitions of tools that the model may use. + + If you include `tools` in your API request, the model may return `tool_use` + content blocks that represent the model's use of those tools. You can then run + those tools using the tool input generated by the model and then optionally + return results back to the model using `tool_result` content blocks. + + Each tool definition includes: + + - `name`: Name of the tool. + - `description`: Optional, but strongly-recommended description of the tool. + - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + shape that the model will produce in `tool_use` output content blocks. + + For example, if you defined `tools` as: + + ```json + [ + { + "name": "get_stock_price", + "description": "Get the current stock price for a given ticker symbol.", + "input_schema": { + "type": "object", + "properties": { + "ticker": { + "type": "string", + "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + } + }, + "required": ["ticker"] + } + } + ] + ``` + + And then asked the model "What's the S&P 500 at today?", the model might produce + `tool_use` content blocks in the response like this: + + ```json + [ + { + "type": "tool_use", + "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + "name": "get_stock_price", + "input": { "ticker": "^GSPC" } + } + ] + ``` + + You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + input, and return the following back to the model in a subsequent `user` + message: + + ```json + [ + { + "type": "tool_result", + "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + "content": "259.75 USD" + } + ] + ``` + + Tools can be used for workflows that include running client-side tools and + functions, or more generally whenever you want the model to produce a particular + JSON structure of output. + + See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + items: + $ref: '#/components/schemas/Tool' top_k: type: integer description: | @@ -254,6 +333,36 @@ components: This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. Do not include any identifying information such as name, email address, or phone number. + ToolChoice: + type: object + description: | + How the model should use the provided tools. The model can use a specific tool, + any available tool, or decide by itself. + + - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + properties: + type: + $ref: "#/components/schemas/ToolChoiceType" + name: + type: string + description: The name of the tool to use. + required: + - type + ToolChoiceType: + type: string + description: | + How the model should use the provided tools. The model can use a specific tool, + any available tool, or decide by itself. + + - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + enum: + - auto + - any + - tool Message: type: object description: A message in a chat conversation. @@ -302,13 +411,42 @@ components: type: string description: The role of the messages author. enum: - - user - - assistant + - user + - assistant + Tool: + type: object + description: A tool the model may use. + properties: + name: + type: string + description: The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + description: + type: string + description: | + Description of what this tool does. + + Tool descriptions should be as detailed as possible. The more information that + the model has about what the tool is and how to use it, the better it will + perform. You can use natural language descriptions to reinforce important + aspects of the tool input JSON schema. + input_schema: + type: object + description: | + [JSON schema](https://json-schema.org/) for this tool's input. + + This defines the shape of the `input` that your tool accepts and that the model + will produce. + additionalProperties: true + required: + - name + - input_schema Block: description: A block of content in a message. oneOf: - $ref: "#/components/schemas/TextBlock" - $ref: "#/components/schemas/ImageBlock" + - $ref: "#/components/schemas/ToolUseBlock" + - $ref: "#/components/schemas/ToolResultBlock" discriminator: propertyName: type TextBlock: @@ -360,6 +498,61 @@ components: - data - media_type - type + ToolUseBlock: + type: object + description: The tool the model wants to use. + properties: + id: + type: string + description: | + A unique identifier for this particular tool use block. + This will be used to match up the tool results later. + example: toolu_01A09q90qw90lq917835lq9 + name: + type: string + description: The name of the tool being used. + example: get_weather + input: + type: object + description: An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + additionalProperties: true + type: + type: string + description: The type of content block. + default: tool_use + required: + - id + - name + - input + ToolResultBlock: + type: object + description: The result of using a tool. + properties: + tool_use_id: + type: string + description: The `id` of the tool use request this is a result for. + content: + description: | + The result of the tool, as a string (e.g. `"content": "15 degrees"`) + or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + These content blocks can use the text or image types. + oneOf: + - type: string + description: A single text block. + - type: array + description: An array of content blocks. + items: + $ref: "#/components/schemas/Block" + is_error: + type: boolean + description: Set to `true` if the tool execution resulted in an error. + type: + type: string + description: The type of content block. + default: tool_result + required: + - tool_use_id + - content StopReason: type: string description: | @@ -373,10 +566,12 @@ components: In non-streaming mode this value is always non-null. In streaming mode, it is null in the `message_start` event and non-null otherwise. + nullable: true enum: - end_turn - max_tokens - stop_sequence + - tool_use Usage: type: object description: | @@ -498,7 +693,7 @@ components: description: A start event in a streaming content block. properties: content_block: - $ref: "#/components/schemas/TextBlock" + $ref: "#/components/schemas/Block" index: type: integer description: The index of the content block. @@ -513,7 +708,7 @@ components: description: A delta event in a streaming content block. properties: delta: - $ref: "#/components/schemas/TextBlockDelta" + $ref: "#/components/schemas/BlockDelta" index: type: integer description: The index of the content block. @@ -523,6 +718,13 @@ components: - delta - index - type + BlockDelta: + description: A delta in a streaming message. + oneOf: + - $ref: "#/components/schemas/TextBlockDelta" + - $ref: "#/components/schemas/InputJsonBlockDelta" + discriminator: + propertyName: type TextBlockDelta: type: object description: A delta in a streaming text block. @@ -537,6 +739,20 @@ components: required: - text - type + InputJsonBlockDelta: + type: object + description: A delta in a streaming input JSON. + properties: + partial_json: + type: string + description: The partial JSON delta. + type: + type: string + description: The type of content block. + default: input_json_delta + required: + - text + - type ContentBlockStopEvent: type: object description: A stop event in a streaming content block. diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart index cdeaa32c..9aa7a39b 100644 --- a/packages/anthropic_sdk_dart/oas/main.dart +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -34,6 +34,10 @@ String? _onSchemaUnionFactoryName( 'ModelString' => 'modelId', 'MessageContentListBlock' => 'blocks', 'MessageContentString' => 'text', + 'ToolResultBlockContentListBlock' => 'blocks', + 'ToolResultBlockContentString' => 'text', + 'TextBlockDelta' => 'textDelta', + 'InputJsonBlockDelta' => 'inputJsonDelta', 'MessageStartEvent' => 'messageStart', 'MessageDeltaEvent' => 'messageDelta', 'MessageStopEvent' => 'messageStop', diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 164ba95f..650ac782 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: anthropic_sdk_dart -description: Dart Client for the Anthropic API (Claude 3 Opus, Sonnet, Haiku, etc.). +description: Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). version: 0.0.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart diff --git a/packages/anthropic_sdk_dart/test/messages_test.dart b/packages/anthropic_sdk_dart/test/messages_test.dart index 63bbb01e..648ad7f4 100644 --- a/packages/anthropic_sdk_dart/test/messages_test.dart +++ b/packages/anthropic_sdk_dart/test/messages_test.dart @@ -1,6 +1,8 @@ +// ignore_for_file: avoid_print @TestOn('vm') library; // Uses dart:io +import 'dart:convert'; import 'dart:io'; import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; @@ -20,9 +22,11 @@ void main() { client.endSession(); }); - test('Test call messages API', () async { + test('Test call messages API', timeout: const Timeout(Duration(minutes: 5)), + () async { const models = Models.values; for (final model in models) { + print('Testing model: ${model.name}'); final res = await client.createMessage( request: CreateMessageRequest( model: Model.model(model), @@ -62,7 +66,8 @@ void main() { } }); - test('Test call messages streaming API', () async { + test('Test call messages streaming API', + timeout: const Timeout(Duration(minutes: 5)), () async { final stream = client.createMessageStream( request: const CreateMessageRequest( model: Model.model(Models.claudeInstant12), @@ -107,7 +112,7 @@ void main() { contentBlockStart: (v) { expect(res.type, MessageStreamEventType.contentBlockStart); expect(v.index, 0); - expect(v.contentBlock.text, isEmpty); + expect(v.contentBlock.text, isNotNull); expect(v.contentBlock.type, 'text'); }, contentBlockDelta: (v) { @@ -115,13 +120,16 @@ void main() { expect(v.index, greaterThanOrEqualTo(0)); expect(v.delta.text, isNotEmpty); expect(v.delta.type, 'text_delta'); - text += v.delta.text.replaceAll(RegExp(r'[\s\n]'), ''); + text += v.delta + .mapOrNull(textDelta: (v) => v.text) + ?.replaceAll(RegExp(r'[\s\n]'), '') ?? + ''; }, contentBlockStop: (v) { expect(res.type, MessageStreamEventType.contentBlockStop); expect(v.index, greaterThanOrEqualTo(0)); }, - ping: (v) { + ping: (PingEvent v) { expect(res.type, MessageStreamEventType.ping); }, ); @@ -146,5 +154,167 @@ void main() { final res = await client.createMessage(request: request); expect(res.stopReason, StopReason.maxTokens); }); + + const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + test('Test tool use', () async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + final aiMessage1 = await client.createMessage(request: request1); + expect(aiMessage1.role, MessageRole.assistant); + + var toolUse = aiMessage1.content.blocks.first; + expect(toolUse, isA()); + toolUse = toolUse as ToolUseBlock; + + expect(toolUse.name, tool.name); + expect(toolUse.input, isNotEmpty); + expect(toolUse.input.containsKey('location'), isTrue); + expect(toolUse.input['location'], contains('Boston')); + + final toolResult = json.encode({ + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }); + + final request2 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(toolResult), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, + ); + final aiMessage2 = await client.createMessage(request: request2); + + expect(aiMessage2.role, MessageRole.assistant); + expect(aiMessage2.content.text, contains('22')); + }); + + test('Test tool use streaming', + timeout: const Timeout(Duration(minutes: 5)), () async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Celsius?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + final stream = client.createMessageStream( + request: request1, + ); + String inputJson = ''; + await for (final res in stream) { + res.map( + messageStart: (v) { + expect(res.type, MessageStreamEventType.messageStart); + expect(v.message.id, isNotEmpty); + expect(v.message.role, MessageRole.assistant); + expect( + v.message.model?.replaceAll(RegExp(r'[-.]'), ''), + Models.claude35Sonnet20240620.name.toLowerCase(), + ); + expect(v.message.stopReason, isNull); + expect(v.message.stopSequence, isNull); + expect(v.message.usage?.inputTokens, greaterThan(0)); + expect(v.message.usage?.outputTokens, greaterThan(0)); + }, + messageDelta: (v) { + expect(res.type, MessageStreamEventType.messageDelta); + expect(v.delta.stopReason, StopReason.toolUse); + expect(v.usage.outputTokens, greaterThan(0)); + }, + messageStop: (v) { + expect(res.type, MessageStreamEventType.messageStop); + }, + contentBlockStart: (v) { + expect(res.type, MessageStreamEventType.contentBlockStart); + expect(v.index, 0); + expect(v.contentBlock.type, 'tool_use'); + expect(v.contentBlock.toolUse, isNotNull); + expect(v.contentBlock.toolUse!.id, isNotEmpty); + expect(v.contentBlock.toolUse!.name, tool.name); + }, + contentBlockDelta: (v) { + expect(res.type, MessageStreamEventType.contentBlockDelta); + expect(v.index, greaterThanOrEqualTo(0)); + expect(v.delta.type, 'input_json_delta'); + inputJson += v.delta.inputJson; + }, + contentBlockStop: (v) { + expect(res.type, MessageStreamEventType.contentBlockStop); + expect(v.index, greaterThanOrEqualTo(0)); + }, + ping: (PingEvent v) { + expect(res.type, MessageStreamEventType.ping); + }, + ); + } + final input = json.decode(inputJson) as Map; + expect(input['location'], contains('Boston')); + expect(input['unit'], 'celsius'); + }); }); } From 45e776911213fcde5e1498791eafcfa91e2f9c02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jun 2024 09:35:00 +0200 Subject: [PATCH 053/251] build(deps): bump melos from 6.0.0 to 6.1.0 (#470) Bumps [melos](https://github.com/invertase/melos/tree/main/packages) from 6.0.0 to 6.1.0. - [Release notes](https://github.com/invertase/melos/releases) - [Changelog](https://github.com/invertase/melos/blob/main/CHANGELOG.md) - [Commits](https://github.com/invertase/melos/commits/melos-v6.1.0/packages) --- updated-dependencies: - dependency-name: melos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pubspec.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubspec.yaml b/pubspec.yaml index 70fc02f6..8373da6a 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -4,4 +4,4 @@ environment: sdk: ">=3.0.0 <4.0.0" dev_dependencies: - melos: 6.0.0 + melos: 6.1.0 From 5473a175448977f57a08c343a0c51f33d0ba5a6a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 27 Jun 2024 17:36:24 +0200 Subject: [PATCH 054/251] feat: Add support for ChatToolChoiceRequired (#474) --- .../lib/src/chat_models/types.dart | 19 ++++++++++++++++--- .../lib/src/language_models/types.dart | 10 ++++++++++ .../src/chat_models/vertex_ai/mappers.dart | 5 +++++ .../src/chat_models/google_ai/mappers.dart | 5 +++++ .../lib/src/chat_models/mappers.dart | 3 +++ 5 files changed, 39 insertions(+), 3 deletions(-) diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index 1ada3bbe..b0ec2aa9 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -423,6 +423,8 @@ class AIChatMessageToolCall { }); /// The id of the tool to call. + /// + /// This is used to match up the tool results later. final String id; /// The name of the tool to call. @@ -716,7 +718,7 @@ class ChatMessageContentImage extends ChatMessageContent { /// Depending on the model, this can be either: /// - The base64 encoded image data - /// - A URL of the image. + /// - A URL of the image (only supported by some providers) final String data; /// The IANA standard MIME type of the source data. @@ -818,9 +820,12 @@ sealed class ChatToolChoice { /// The model does not call a tool, and responds to the end-user. static const none = ChatToolChoiceNone(); - /// The model can pick between an end-user or calling a tool. + /// The model can pick between responding to the end-user or calling a tool. static const auto = ChatToolChoiceAuto(); + /// The model must call at least one tool, but doesn’t force a particular tool. + static const required = ChatToolChoiceRequired(); + /// The model is forced to to call the specified tool. factory ChatToolChoice.forced({required final String name}) => ChatToolChoiceForced(name: name); @@ -835,13 +840,21 @@ final class ChatToolChoiceNone extends ChatToolChoice { } /// {@template chat_tool_choice_auto} -/// The model can pick between an end-user or calling a tool. +/// The model can pick between responding to the end-user or calling a tool. /// {@endtemplate} final class ChatToolChoiceAuto extends ChatToolChoice { /// {@macro chat_tool_choice_auto} const ChatToolChoiceAuto(); } +/// {@template chat_tool_choice_required} +/// The model must call at least one tool, but doesn’t force a particular tool. +/// {@endtemplate} +final class ChatToolChoiceRequired extends ChatToolChoice { + /// {@macro chat_tool_choice_none} + const ChatToolChoiceRequired(); +} + /// {@template chat_tool_choice_forced} /// The model is forced to to call the specified tool. /// {@endtemplate} diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index 8112ab37..f1475ad2 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -99,12 +99,16 @@ class LanguageModelUsage { }); /// The number of tokens in the prompt. + /// + /// Some providers call this "input_tokens". final int? promptTokens; /// The total number of billable characters in the prompt if applicable. final int? promptBillableCharacters; /// The number of tokens in the completion. + /// + /// Some providers call this "output_tokens". final int? responseTokens; /// The total number of billable characters in the completion if applicable. @@ -172,9 +176,13 @@ LanguageModelUsage{ /// The reason the model stopped generating tokens. enum FinishReason { /// The model hit a natural stop point or a provided stop sequence. + /// + /// Some providers call this "end_turn". stop, /// The maximum number of tokens specified in the request was reached. + /// + /// Some providers call this "max_tokens". length, /// The content was flagged for content filter reasons. @@ -184,6 +192,8 @@ enum FinishReason { recitation, /// The model called a tool. + /// + /// Some providers call this "tool_use". toolCalls, /// The finish reason is unspecified. diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 41517a64..9c55d409 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -293,6 +293,11 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: f.FunctionCallingMode.auto, ), ), + ChatToolChoiceRequired() => f.ToolConfig( + functionCallingConfig: f.FunctionCallingConfig( + mode: f.FunctionCallingMode.any, + ), + ), final ChatToolChoiceForced t => f.ToolConfig( functionCallingConfig: f.FunctionCallingConfig( mode: f.FunctionCallingMode.any, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 8bf41f84..106bf60b 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -294,6 +294,11 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: g.FunctionCallingMode.auto, ), ), + ChatToolChoiceRequired() => g.ToolConfig( + functionCallingConfig: g.FunctionCallingConfig( + mode: g.FunctionCallingMode.any, + ), + ), final ChatToolChoiceForced t => g.ToolConfig( functionCallingConfig: g.FunctionCallingConfig( mode: g.FunctionCallingMode.any, diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 78054bed..6b434109 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -193,6 +193,9 @@ extension ChatToolChoiceMapper on ChatToolChoice { ChatToolChoiceAuto _ => const ChatCompletionToolChoiceOption.mode( ChatCompletionToolChoiceMode.auto, ), + ChatToolChoiceRequired() => const ChatCompletionToolChoiceOption.mode( + ChatCompletionToolChoiceMode.required, + ), final ChatToolChoiceForced t => ChatCompletionToolChoiceOption.tool( ChatCompletionNamedToolChoice( type: ChatCompletionNamedToolChoiceType.function, From 82d104ca204518df3d501366018b7301410304a3 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 27 Jun 2024 17:42:14 +0200 Subject: [PATCH 055/251] refactor: Improve schemas names in anthropic_sdk_dart (#475) --- .../lib/src/generated/schema/block.dart | 12 +- .../schema/create_message_request.dart | 12 +- .../lib/src/generated/schema/message.dart | 12 +- .../src/generated/schema/schema.freezed.dart | 445 +++++++++--------- .../lib/src/generated/schema/schema.g.dart | 60 ++- packages/anthropic_sdk_dart/oas/main.dart | 23 +- 6 files changed, 283 insertions(+), 281 deletions(-) diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart index 959a5ecb..e15126a3 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -113,12 +113,12 @@ sealed class ToolResultBlockContent with _$ToolResultBlockContent { /// An array of content blocks. const factory ToolResultBlockContent.blocks( List value, - ) = ToolResultBlockContentListBlock; + ) = ToolResultBlockContentBlocks; /// A single text block. const factory ToolResultBlockContent.text( String value, - ) = ToolResultBlockContentString; + ) = ToolResultBlockContentText; /// Object construction from a JSON representation factory ToolResultBlockContent.fromJson(Map json) => @@ -133,12 +133,12 @@ class _ToolResultBlockContentConverter @override ToolResultBlockContent fromJson(Object? data) { if (data is List && data.every((item) => item is Map)) { - return ToolResultBlockContentListBlock(data + return ToolResultBlockContentBlocks(data .map((i) => Block.fromJson(i as Map)) .toList(growable: false)); } if (data is String) { - return ToolResultBlockContentString(data); + return ToolResultBlockContentText(data); } throw Exception( 'Unexpected value for ToolResultBlockContent: $data', @@ -148,8 +148,8 @@ class _ToolResultBlockContentConverter @override Object? toJson(ToolResultBlockContent data) { return switch (data) { - ToolResultBlockContentListBlock(value: final v) => v, - ToolResultBlockContentString(value: final v) => v, + ToolResultBlockContentBlocks(value: final v) => v, + ToolResultBlockContentText(value: final v) => v, }; } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart index df2f1b5b..e310adff 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -338,12 +338,12 @@ sealed class Model with _$Model { /// Available models. Mind that the list may not be exhaustive nor up-to-date. const factory Model.model( Models value, - ) = ModelEnumeration; + ) = ModelCatalog; /// The ID of the model to use for this request. const factory Model.modelId( String value, - ) = ModelString; + ) = ModelId; /// Object construction from a JSON representation factory Model.fromJson(Map json) => _$ModelFromJson(json); @@ -356,14 +356,14 @@ class _ModelConverter implements JsonConverter { @override Model fromJson(Object? data) { if (data is String && _$ModelsEnumMap.values.contains(data)) { - return ModelEnumeration( + return ModelCatalog( _$ModelsEnumMap.keys.elementAt( _$ModelsEnumMap.values.toList().indexOf(data), ), ); } if (data is String) { - return ModelString(data); + return ModelId(data); } throw Exception( 'Unexpected value for Model: $data', @@ -373,8 +373,8 @@ class _ModelConverter implements JsonConverter { @override Object? toJson(Model data) { return switch (data) { - ModelEnumeration(value: final v) => _$ModelsEnumMap[v]!, - ModelString(value: final v) => v, + ModelCatalog(value: final v) => _$ModelsEnumMap[v]!, + ModelId(value: final v) => v, }; } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart index e8e0b298..2444ac92 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart @@ -120,12 +120,12 @@ sealed class MessageContent with _$MessageContent { /// An array of content blocks. const factory MessageContent.blocks( List value, - ) = MessageContentListBlock; + ) = MessageContentBlocks; /// A single text block. const factory MessageContent.text( String value, - ) = MessageContentString; + ) = MessageContentText; /// Object construction from a JSON representation factory MessageContent.fromJson(Map json) => @@ -140,12 +140,12 @@ class _MessageContentConverter @override MessageContent fromJson(Object? data) { if (data is List && data.every((item) => item is Map)) { - return MessageContentListBlock(data + return MessageContentBlocks(data .map((i) => Block.fromJson(i as Map)) .toList(growable: false)); } if (data is String) { - return MessageContentString(data); + return MessageContentText(data); } throw Exception( 'Unexpected value for MessageContent: $data', @@ -155,8 +155,8 @@ class _MessageContentConverter @override Object? toJson(MessageContent data) { return switch (data) { - MessageContentListBlock(value: final v) => v, - MessageContentString(value: final v) => v, + MessageContentBlocks(value: final v) => v, + MessageContentText(value: final v) => v, }; } } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart index 528c9b30..4045606f 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -1356,9 +1356,9 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { Model _$ModelFromJson(Map json) { switch (json['runtimeType']) { case 'model': - return ModelEnumeration.fromJson(json); + return ModelCatalog.fromJson(json); case 'modelId': - return ModelString.fromJson(json); + return ModelId.fromJson(json); default: throw CheckedFromJsonException(json, 'runtimeType', 'Model', @@ -1390,20 +1390,20 @@ mixin _$Model { throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(ModelEnumeration value) model, - required TResult Function(ModelString value) modelId, + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ModelEnumeration value)? model, - TResult? Function(ModelString value)? modelId, + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(ModelEnumeration value)? model, - TResult Function(ModelString value)? modelId, + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -1428,20 +1428,20 @@ class _$ModelCopyWithImpl<$Res, $Val extends Model> } /// @nodoc -abstract class _$$ModelEnumerationImplCopyWith<$Res> { - factory _$$ModelEnumerationImplCopyWith(_$ModelEnumerationImpl value, - $Res Function(_$ModelEnumerationImpl) then) = - __$$ModelEnumerationImplCopyWithImpl<$Res>; +abstract class _$$ModelCatalogImplCopyWith<$Res> { + factory _$$ModelCatalogImplCopyWith( + _$ModelCatalogImpl value, $Res Function(_$ModelCatalogImpl) then) = + __$$ModelCatalogImplCopyWithImpl<$Res>; @useResult $Res call({Models value}); } /// @nodoc -class __$$ModelEnumerationImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelEnumerationImpl> - implements _$$ModelEnumerationImplCopyWith<$Res> { - __$$ModelEnumerationImplCopyWithImpl(_$ModelEnumerationImpl _value, - $Res Function(_$ModelEnumerationImpl) _then) +class __$$ModelCatalogImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelCatalogImpl> + implements _$$ModelCatalogImplCopyWith<$Res> { + __$$ModelCatalogImplCopyWithImpl( + _$ModelCatalogImpl _value, $Res Function(_$ModelCatalogImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -1449,7 +1449,7 @@ class __$$ModelEnumerationImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ModelEnumerationImpl( + return _then(_$ModelCatalogImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -1460,13 +1460,13 @@ class __$$ModelEnumerationImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ModelEnumerationImpl extends ModelEnumeration { - const _$ModelEnumerationImpl(this.value, {final String? $type}) +class _$ModelCatalogImpl extends ModelCatalog { + const _$ModelCatalogImpl(this.value, {final String? $type}) : $type = $type ?? 'model', super._(); - factory _$ModelEnumerationImpl.fromJson(Map json) => - _$$ModelEnumerationImplFromJson(json); + factory _$ModelCatalogImpl.fromJson(Map json) => + _$$ModelCatalogImplFromJson(json); @override final Models value; @@ -1483,7 +1483,7 @@ class _$ModelEnumerationImpl extends ModelEnumeration { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelEnumerationImpl && + other is _$ModelCatalogImpl && (identical(other.value, value) || other.value == value)); } @@ -1494,9 +1494,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => - __$$ModelEnumerationImplCopyWithImpl<_$ModelEnumerationImpl>( - this, _$identity); + _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => + __$$ModelCatalogImplCopyWithImpl<_$ModelCatalogImpl>(this, _$identity); @override @optionalTypeArgs @@ -1532,8 +1531,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override @optionalTypeArgs TResult map({ - required TResult Function(ModelEnumeration value) model, - required TResult Function(ModelString value) modelId, + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, }) { return model(this); } @@ -1541,8 +1540,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ModelEnumeration value)? model, - TResult? Function(ModelString value)? modelId, + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, }) { return model?.call(this); } @@ -1550,8 +1549,8 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ModelEnumeration value)? model, - TResult Function(ModelString value)? modelId, + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, required TResult orElse(), }) { if (model != null) { @@ -1562,41 +1561,41 @@ class _$ModelEnumerationImpl extends ModelEnumeration { @override Map toJson() { - return _$$ModelEnumerationImplToJson( + return _$$ModelCatalogImplToJson( this, ); } } -abstract class ModelEnumeration extends Model { - const factory ModelEnumeration(final Models value) = _$ModelEnumerationImpl; - const ModelEnumeration._() : super._(); +abstract class ModelCatalog extends Model { + const factory ModelCatalog(final Models value) = _$ModelCatalogImpl; + const ModelCatalog._() : super._(); - factory ModelEnumeration.fromJson(Map json) = - _$ModelEnumerationImpl.fromJson; + factory ModelCatalog.fromJson(Map json) = + _$ModelCatalogImpl.fromJson; @override Models get value; @JsonKey(ignore: true) - _$$ModelEnumerationImplCopyWith<_$ModelEnumerationImpl> get copyWith => + _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ModelStringImplCopyWith<$Res> { - factory _$$ModelStringImplCopyWith( - _$ModelStringImpl value, $Res Function(_$ModelStringImpl) then) = - __$$ModelStringImplCopyWithImpl<$Res>; +abstract class _$$ModelIdImplCopyWith<$Res> { + factory _$$ModelIdImplCopyWith( + _$ModelIdImpl value, $Res Function(_$ModelIdImpl) then) = + __$$ModelIdImplCopyWithImpl<$Res>; @useResult $Res call({String value}); } /// @nodoc -class __$$ModelStringImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelStringImpl> - implements _$$ModelStringImplCopyWith<$Res> { - __$$ModelStringImplCopyWithImpl( - _$ModelStringImpl _value, $Res Function(_$ModelStringImpl) _then) +class __$$ModelIdImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelIdImpl> + implements _$$ModelIdImplCopyWith<$Res> { + __$$ModelIdImplCopyWithImpl( + _$ModelIdImpl _value, $Res Function(_$ModelIdImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -1604,7 +1603,7 @@ class __$$ModelStringImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ModelStringImpl( + return _then(_$ModelIdImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -1615,13 +1614,13 @@ class __$$ModelStringImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ModelStringImpl extends ModelString { - const _$ModelStringImpl(this.value, {final String? $type}) +class _$ModelIdImpl extends ModelId { + const _$ModelIdImpl(this.value, {final String? $type}) : $type = $type ?? 'modelId', super._(); - factory _$ModelStringImpl.fromJson(Map json) => - _$$ModelStringImplFromJson(json); + factory _$ModelIdImpl.fromJson(Map json) => + _$$ModelIdImplFromJson(json); @override final String value; @@ -1638,7 +1637,7 @@ class _$ModelStringImpl extends ModelString { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelStringImpl && + other is _$ModelIdImpl && (identical(other.value, value) || other.value == value)); } @@ -1649,8 +1648,8 @@ class _$ModelStringImpl extends ModelString { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => - __$$ModelStringImplCopyWithImpl<_$ModelStringImpl>(this, _$identity); + _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => + __$$ModelIdImplCopyWithImpl<_$ModelIdImpl>(this, _$identity); @override @optionalTypeArgs @@ -1686,8 +1685,8 @@ class _$ModelStringImpl extends ModelString { @override @optionalTypeArgs TResult map({ - required TResult Function(ModelEnumeration value) model, - required TResult Function(ModelString value) modelId, + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, }) { return modelId(this); } @@ -1695,8 +1694,8 @@ class _$ModelStringImpl extends ModelString { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ModelEnumeration value)? model, - TResult? Function(ModelString value)? modelId, + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, }) { return modelId?.call(this); } @@ -1704,8 +1703,8 @@ class _$ModelStringImpl extends ModelString { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ModelEnumeration value)? model, - TResult Function(ModelString value)? modelId, + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, required TResult orElse(), }) { if (modelId != null) { @@ -1716,23 +1715,22 @@ class _$ModelStringImpl extends ModelString { @override Map toJson() { - return _$$ModelStringImplToJson( + return _$$ModelIdImplToJson( this, ); } } -abstract class ModelString extends Model { - const factory ModelString(final String value) = _$ModelStringImpl; - const ModelString._() : super._(); +abstract class ModelId extends Model { + const factory ModelId(final String value) = _$ModelIdImpl; + const ModelId._() : super._(); - factory ModelString.fromJson(Map json) = - _$ModelStringImpl.fromJson; + factory ModelId.fromJson(Map json) = _$ModelIdImpl.fromJson; @override String get value; @JsonKey(ignore: true) - _$$ModelStringImplCopyWith<_$ModelStringImpl> get copyWith => + _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2581,9 +2579,9 @@ abstract class _Message extends Message { MessageContent _$MessageContentFromJson(Map json) { switch (json['runtimeType']) { case 'blocks': - return MessageContentListBlock.fromJson(json); + return MessageContentBlocks.fromJson(json); case 'text': - return MessageContentString.fromJson(json); + return MessageContentText.fromJson(json); default: throw CheckedFromJsonException(json, 'runtimeType', 'MessageContent', @@ -2615,20 +2613,20 @@ mixin _$MessageContent { throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(MessageContentListBlock value) blocks, - required TResult Function(MessageContentString value) text, + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentListBlock value)? blocks, - TResult? Function(MessageContentString value)? text, + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentListBlock value)? blocks, - TResult Function(MessageContentString value)? text, + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -2654,22 +2652,20 @@ class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> } /// @nodoc -abstract class _$$MessageContentListBlockImplCopyWith<$Res> { - factory _$$MessageContentListBlockImplCopyWith( - _$MessageContentListBlockImpl value, - $Res Function(_$MessageContentListBlockImpl) then) = - __$$MessageContentListBlockImplCopyWithImpl<$Res>; +abstract class _$$MessageContentBlocksImplCopyWith<$Res> { + factory _$$MessageContentBlocksImplCopyWith(_$MessageContentBlocksImpl value, + $Res Function(_$MessageContentBlocksImpl) then) = + __$$MessageContentBlocksImplCopyWithImpl<$Res>; @useResult $Res call({List value}); } /// @nodoc -class __$$MessageContentListBlockImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentListBlockImpl> - implements _$$MessageContentListBlockImplCopyWith<$Res> { - __$$MessageContentListBlockImplCopyWithImpl( - _$MessageContentListBlockImpl _value, - $Res Function(_$MessageContentListBlockImpl) _then) +class __$$MessageContentBlocksImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentBlocksImpl> + implements _$$MessageContentBlocksImplCopyWith<$Res> { + __$$MessageContentBlocksImplCopyWithImpl(_$MessageContentBlocksImpl _value, + $Res Function(_$MessageContentBlocksImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -2677,7 +2673,7 @@ class __$$MessageContentListBlockImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$MessageContentListBlockImpl( + return _then(_$MessageContentBlocksImpl( null == value ? _value._value : value // ignore: cast_nullable_to_non_nullable @@ -2688,15 +2684,15 @@ class __$$MessageContentListBlockImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentListBlockImpl extends MessageContentListBlock { - const _$MessageContentListBlockImpl(final List value, +class _$MessageContentBlocksImpl extends MessageContentBlocks { + const _$MessageContentBlocksImpl(final List value, {final String? $type}) : _value = value, $type = $type ?? 'blocks', super._(); - factory _$MessageContentListBlockImpl.fromJson(Map json) => - _$$MessageContentListBlockImplFromJson(json); + factory _$MessageContentBlocksImpl.fromJson(Map json) => + _$$MessageContentBlocksImplFromJson(json); final List _value; @override @@ -2718,7 +2714,7 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentListBlockImpl && + other is _$MessageContentBlocksImpl && const DeepCollectionEquality().equals(other._value, _value)); } @@ -2730,9 +2726,10 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> - get copyWith => __$$MessageContentListBlockImplCopyWithImpl< - _$MessageContentListBlockImpl>(this, _$identity); + _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> + get copyWith => + __$$MessageContentBlocksImplCopyWithImpl<_$MessageContentBlocksImpl>( + this, _$identity); @override @optionalTypeArgs @@ -2768,8 +2765,8 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentListBlock value) blocks, - required TResult Function(MessageContentString value) text, + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, }) { return blocks(this); } @@ -2777,8 +2774,8 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentListBlock value)? blocks, - TResult? Function(MessageContentString value)? text, + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, }) { return blocks?.call(this); } @@ -2786,8 +2783,8 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentListBlock value)? blocks, - TResult Function(MessageContentString value)? text, + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, required TResult orElse(), }) { if (blocks != null) { @@ -2798,42 +2795,42 @@ class _$MessageContentListBlockImpl extends MessageContentListBlock { @override Map toJson() { - return _$$MessageContentListBlockImplToJson( + return _$$MessageContentBlocksImplToJson( this, ); } } -abstract class MessageContentListBlock extends MessageContent { - const factory MessageContentListBlock(final List value) = - _$MessageContentListBlockImpl; - const MessageContentListBlock._() : super._(); +abstract class MessageContentBlocks extends MessageContent { + const factory MessageContentBlocks(final List value) = + _$MessageContentBlocksImpl; + const MessageContentBlocks._() : super._(); - factory MessageContentListBlock.fromJson(Map json) = - _$MessageContentListBlockImpl.fromJson; + factory MessageContentBlocks.fromJson(Map json) = + _$MessageContentBlocksImpl.fromJson; @override List get value; @JsonKey(ignore: true) - _$$MessageContentListBlockImplCopyWith<_$MessageContentListBlockImpl> + _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentStringImplCopyWith<$Res> { - factory _$$MessageContentStringImplCopyWith(_$MessageContentStringImpl value, - $Res Function(_$MessageContentStringImpl) then) = - __$$MessageContentStringImplCopyWithImpl<$Res>; +abstract class _$$MessageContentTextImplCopyWith<$Res> { + factory _$$MessageContentTextImplCopyWith(_$MessageContentTextImpl value, + $Res Function(_$MessageContentTextImpl) then) = + __$$MessageContentTextImplCopyWithImpl<$Res>; @useResult $Res call({String value}); } /// @nodoc -class __$$MessageContentStringImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentStringImpl> - implements _$$MessageContentStringImplCopyWith<$Res> { - __$$MessageContentStringImplCopyWithImpl(_$MessageContentStringImpl _value, - $Res Function(_$MessageContentStringImpl) _then) +class __$$MessageContentTextImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextImpl> + implements _$$MessageContentTextImplCopyWith<$Res> { + __$$MessageContentTextImplCopyWithImpl(_$MessageContentTextImpl _value, + $Res Function(_$MessageContentTextImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -2841,7 +2838,7 @@ class __$$MessageContentStringImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$MessageContentStringImpl( + return _then(_$MessageContentTextImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -2852,13 +2849,13 @@ class __$$MessageContentStringImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentStringImpl extends MessageContentString { - const _$MessageContentStringImpl(this.value, {final String? $type}) +class _$MessageContentTextImpl extends MessageContentText { + const _$MessageContentTextImpl(this.value, {final String? $type}) : $type = $type ?? 'text', super._(); - factory _$MessageContentStringImpl.fromJson(Map json) => - _$$MessageContentStringImplFromJson(json); + factory _$MessageContentTextImpl.fromJson(Map json) => + _$$MessageContentTextImplFromJson(json); @override final String value; @@ -2875,7 +2872,7 @@ class _$MessageContentStringImpl extends MessageContentString { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentStringImpl && + other is _$MessageContentTextImpl && (identical(other.value, value) || other.value == value)); } @@ -2886,10 +2883,9 @@ class _$MessageContentStringImpl extends MessageContentString { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> - get copyWith => - __$$MessageContentStringImplCopyWithImpl<_$MessageContentStringImpl>( - this, _$identity); + _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => + __$$MessageContentTextImplCopyWithImpl<_$MessageContentTextImpl>( + this, _$identity); @override @optionalTypeArgs @@ -2925,8 +2921,8 @@ class _$MessageContentStringImpl extends MessageContentString { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentListBlock value) blocks, - required TResult Function(MessageContentString value) text, + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, }) { return text(this); } @@ -2934,8 +2930,8 @@ class _$MessageContentStringImpl extends MessageContentString { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentListBlock value)? blocks, - TResult? Function(MessageContentString value)? text, + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, }) { return text?.call(this); } @@ -2943,8 +2939,8 @@ class _$MessageContentStringImpl extends MessageContentString { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentListBlock value)? blocks, - TResult Function(MessageContentString value)? text, + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, required TResult orElse(), }) { if (text != null) { @@ -2955,25 +2951,25 @@ class _$MessageContentStringImpl extends MessageContentString { @override Map toJson() { - return _$$MessageContentStringImplToJson( + return _$$MessageContentTextImplToJson( this, ); } } -abstract class MessageContentString extends MessageContent { - const factory MessageContentString(final String value) = - _$MessageContentStringImpl; - const MessageContentString._() : super._(); +abstract class MessageContentText extends MessageContent { + const factory MessageContentText(final String value) = + _$MessageContentTextImpl; + const MessageContentText._() : super._(); - factory MessageContentString.fromJson(Map json) = - _$MessageContentStringImpl.fromJson; + factory MessageContentText.fromJson(Map json) = + _$MessageContentTextImpl.fromJson; @override String get value; @JsonKey(ignore: true) - _$$MessageContentStringImplCopyWith<_$MessageContentStringImpl> - get copyWith => throw _privateConstructorUsedError; + _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => + throw _privateConstructorUsedError; } Tool _$ToolFromJson(Map json) { @@ -5048,9 +5044,9 @@ ToolResultBlockContent _$ToolResultBlockContentFromJson( Map json) { switch (json['runtimeType']) { case 'blocks': - return ToolResultBlockContentListBlock.fromJson(json); + return ToolResultBlockContentBlocks.fromJson(json); case 'text': - return ToolResultBlockContentString.fromJson(json); + return ToolResultBlockContentText.fromJson(json); default: throw CheckedFromJsonException( @@ -5085,20 +5081,20 @@ mixin _$ToolResultBlockContent { throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(ToolResultBlockContentListBlock value) blocks, - required TResult Function(ToolResultBlockContentString value) text, + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentListBlock value)? blocks, - TResult? Function(ToolResultBlockContentString value)? text, + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(ToolResultBlockContentListBlock value)? blocks, - TResult Function(ToolResultBlockContentString value)? text, + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -5125,23 +5121,23 @@ class _$ToolResultBlockContentCopyWithImpl<$Res, } /// @nodoc -abstract class _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { - factory _$$ToolResultBlockContentListBlockImplCopyWith( - _$ToolResultBlockContentListBlockImpl value, - $Res Function(_$ToolResultBlockContentListBlockImpl) then) = - __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res>; +abstract class _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { + factory _$$ToolResultBlockContentBlocksImplCopyWith( + _$ToolResultBlockContentBlocksImpl value, + $Res Function(_$ToolResultBlockContentBlocksImpl) then) = + __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res>; @useResult $Res call({List value}); } /// @nodoc -class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> +class __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res> extends _$ToolResultBlockContentCopyWithImpl<$Res, - _$ToolResultBlockContentListBlockImpl> - implements _$$ToolResultBlockContentListBlockImplCopyWith<$Res> { - __$$ToolResultBlockContentListBlockImplCopyWithImpl( - _$ToolResultBlockContentListBlockImpl _value, - $Res Function(_$ToolResultBlockContentListBlockImpl) _then) + _$ToolResultBlockContentBlocksImpl> + implements _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { + __$$ToolResultBlockContentBlocksImplCopyWithImpl( + _$ToolResultBlockContentBlocksImpl _value, + $Res Function(_$ToolResultBlockContentBlocksImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -5149,7 +5145,7 @@ class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ToolResultBlockContentListBlockImpl( + return _then(_$ToolResultBlockContentBlocksImpl( null == value ? _value._value : value // ignore: cast_nullable_to_non_nullable @@ -5160,17 +5156,16 @@ class __$$ToolResultBlockContentListBlockImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ToolResultBlockContentListBlockImpl - extends ToolResultBlockContentListBlock { - const _$ToolResultBlockContentListBlockImpl(final List value, +class _$ToolResultBlockContentBlocksImpl extends ToolResultBlockContentBlocks { + const _$ToolResultBlockContentBlocksImpl(final List value, {final String? $type}) : _value = value, $type = $type ?? 'blocks', super._(); - factory _$ToolResultBlockContentListBlockImpl.fromJson( + factory _$ToolResultBlockContentBlocksImpl.fromJson( Map json) => - _$$ToolResultBlockContentListBlockImplFromJson(json); + _$$ToolResultBlockContentBlocksImplFromJson(json); final List _value; @override @@ -5192,7 +5187,7 @@ class _$ToolResultBlockContentListBlockImpl bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolResultBlockContentListBlockImpl && + other is _$ToolResultBlockContentBlocksImpl && const DeepCollectionEquality().equals(other._value, _value)); } @@ -5204,10 +5199,10 @@ class _$ToolResultBlockContentListBlockImpl @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolResultBlockContentListBlockImplCopyWith< - _$ToolResultBlockContentListBlockImpl> - get copyWith => __$$ToolResultBlockContentListBlockImplCopyWithImpl< - _$ToolResultBlockContentListBlockImpl>(this, _$identity); + _$$ToolResultBlockContentBlocksImplCopyWith< + _$ToolResultBlockContentBlocksImpl> + get copyWith => __$$ToolResultBlockContentBlocksImplCopyWithImpl< + _$ToolResultBlockContentBlocksImpl>(this, _$identity); @override @optionalTypeArgs @@ -5243,8 +5238,8 @@ class _$ToolResultBlockContentListBlockImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ToolResultBlockContentListBlock value) blocks, - required TResult Function(ToolResultBlockContentString value) text, + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, }) { return blocks(this); } @@ -5252,8 +5247,8 @@ class _$ToolResultBlockContentListBlockImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentListBlock value)? blocks, - TResult? Function(ToolResultBlockContentString value)? text, + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, }) { return blocks?.call(this); } @@ -5261,8 +5256,8 @@ class _$ToolResultBlockContentListBlockImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ToolResultBlockContentListBlock value)? blocks, - TResult Function(ToolResultBlockContentString value)? text, + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, required TResult orElse(), }) { if (blocks != null) { @@ -5273,46 +5268,46 @@ class _$ToolResultBlockContentListBlockImpl @override Map toJson() { - return _$$ToolResultBlockContentListBlockImplToJson( + return _$$ToolResultBlockContentBlocksImplToJson( this, ); } } -abstract class ToolResultBlockContentListBlock extends ToolResultBlockContent { - const factory ToolResultBlockContentListBlock(final List value) = - _$ToolResultBlockContentListBlockImpl; - const ToolResultBlockContentListBlock._() : super._(); +abstract class ToolResultBlockContentBlocks extends ToolResultBlockContent { + const factory ToolResultBlockContentBlocks(final List value) = + _$ToolResultBlockContentBlocksImpl; + const ToolResultBlockContentBlocks._() : super._(); - factory ToolResultBlockContentListBlock.fromJson(Map json) = - _$ToolResultBlockContentListBlockImpl.fromJson; + factory ToolResultBlockContentBlocks.fromJson(Map json) = + _$ToolResultBlockContentBlocksImpl.fromJson; @override List get value; @JsonKey(ignore: true) - _$$ToolResultBlockContentListBlockImplCopyWith< - _$ToolResultBlockContentListBlockImpl> + _$$ToolResultBlockContentBlocksImplCopyWith< + _$ToolResultBlockContentBlocksImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ToolResultBlockContentStringImplCopyWith<$Res> { - factory _$$ToolResultBlockContentStringImplCopyWith( - _$ToolResultBlockContentStringImpl value, - $Res Function(_$ToolResultBlockContentStringImpl) then) = - __$$ToolResultBlockContentStringImplCopyWithImpl<$Res>; +abstract class _$$ToolResultBlockContentTextImplCopyWith<$Res> { + factory _$$ToolResultBlockContentTextImplCopyWith( + _$ToolResultBlockContentTextImpl value, + $Res Function(_$ToolResultBlockContentTextImpl) then) = + __$$ToolResultBlockContentTextImplCopyWithImpl<$Res>; @useResult $Res call({String value}); } /// @nodoc -class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> +class __$$ToolResultBlockContentTextImplCopyWithImpl<$Res> extends _$ToolResultBlockContentCopyWithImpl<$Res, - _$ToolResultBlockContentStringImpl> - implements _$$ToolResultBlockContentStringImplCopyWith<$Res> { - __$$ToolResultBlockContentStringImplCopyWithImpl( - _$ToolResultBlockContentStringImpl _value, - $Res Function(_$ToolResultBlockContentStringImpl) _then) + _$ToolResultBlockContentTextImpl> + implements _$$ToolResultBlockContentTextImplCopyWith<$Res> { + __$$ToolResultBlockContentTextImplCopyWithImpl( + _$ToolResultBlockContentTextImpl _value, + $Res Function(_$ToolResultBlockContentTextImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @@ -5320,7 +5315,7 @@ class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$ToolResultBlockContentStringImpl( + return _then(_$ToolResultBlockContentTextImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable @@ -5331,14 +5326,14 @@ class __$$ToolResultBlockContentStringImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { - const _$ToolResultBlockContentStringImpl(this.value, {final String? $type}) +class _$ToolResultBlockContentTextImpl extends ToolResultBlockContentText { + const _$ToolResultBlockContentTextImpl(this.value, {final String? $type}) : $type = $type ?? 'text', super._(); - factory _$ToolResultBlockContentStringImpl.fromJson( + factory _$ToolResultBlockContentTextImpl.fromJson( Map json) => - _$$ToolResultBlockContentStringImplFromJson(json); + _$$ToolResultBlockContentTextImplFromJson(json); @override final String value; @@ -5355,7 +5350,7 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ToolResultBlockContentStringImpl && + other is _$ToolResultBlockContentTextImpl && (identical(other.value, value) || other.value == value)); } @@ -5366,10 +5361,9 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ToolResultBlockContentStringImplCopyWith< - _$ToolResultBlockContentStringImpl> - get copyWith => __$$ToolResultBlockContentStringImplCopyWithImpl< - _$ToolResultBlockContentStringImpl>(this, _$identity); + _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> + get copyWith => __$$ToolResultBlockContentTextImplCopyWithImpl< + _$ToolResultBlockContentTextImpl>(this, _$identity); @override @optionalTypeArgs @@ -5405,8 +5399,8 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override @optionalTypeArgs TResult map({ - required TResult Function(ToolResultBlockContentListBlock value) blocks, - required TResult Function(ToolResultBlockContentString value) text, + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, }) { return text(this); } @@ -5414,8 +5408,8 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ToolResultBlockContentListBlock value)? blocks, - TResult? Function(ToolResultBlockContentString value)? text, + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, }) { return text?.call(this); } @@ -5423,8 +5417,8 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ToolResultBlockContentListBlock value)? blocks, - TResult Function(ToolResultBlockContentString value)? text, + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, required TResult orElse(), }) { if (text != null) { @@ -5435,25 +5429,24 @@ class _$ToolResultBlockContentStringImpl extends ToolResultBlockContentString { @override Map toJson() { - return _$$ToolResultBlockContentStringImplToJson( + return _$$ToolResultBlockContentTextImplToJson( this, ); } } -abstract class ToolResultBlockContentString extends ToolResultBlockContent { - const factory ToolResultBlockContentString(final String value) = - _$ToolResultBlockContentStringImpl; - const ToolResultBlockContentString._() : super._(); +abstract class ToolResultBlockContentText extends ToolResultBlockContent { + const factory ToolResultBlockContentText(final String value) = + _$ToolResultBlockContentTextImpl; + const ToolResultBlockContentText._() : super._(); - factory ToolResultBlockContentString.fromJson(Map json) = - _$ToolResultBlockContentStringImpl.fromJson; + factory ToolResultBlockContentText.fromJson(Map json) = + _$ToolResultBlockContentTextImpl.fromJson; @override String get value; @JsonKey(ignore: true) - _$$ToolResultBlockContentStringImplCopyWith< - _$ToolResultBlockContentStringImpl> + _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart index f3dded29..dc8d9833 100644 --- a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -62,15 +62,13 @@ Map _$$CreateMessageRequestImplToJson( return val; } -_$ModelEnumerationImpl _$$ModelEnumerationImplFromJson( - Map json) => - _$ModelEnumerationImpl( +_$ModelCatalogImpl _$$ModelCatalogImplFromJson(Map json) => + _$ModelCatalogImpl( $enumDecode(_$ModelsEnumMap, json['value']), $type: json['runtimeType'] as String?, ); -Map _$$ModelEnumerationImplToJson( - _$ModelEnumerationImpl instance) => +Map _$$ModelCatalogImplToJson(_$ModelCatalogImpl instance) => { 'value': _$ModelsEnumMap[instance.value]!, 'runtimeType': instance.$type, @@ -86,13 +84,13 @@ const _$ModelsEnumMap = { Models.claudeInstant12: 'claude-instant-1.2', }; -_$ModelStringImpl _$$ModelStringImplFromJson(Map json) => - _$ModelStringImpl( +_$ModelIdImpl _$$ModelIdImplFromJson(Map json) => + _$ModelIdImpl( json['value'] as String, $type: json['runtimeType'] as String?, ); -Map _$$ModelStringImplToJson(_$ModelStringImpl instance) => +Map _$$ModelIdImplToJson(_$ModelIdImpl instance) => { 'value': instance.value, 'runtimeType': instance.$type, @@ -192,31 +190,31 @@ const _$StopReasonEnumMap = { StopReason.toolUse: 'tool_use', }; -_$MessageContentListBlockImpl _$$MessageContentListBlockImplFromJson( +_$MessageContentBlocksImpl _$$MessageContentBlocksImplFromJson( Map json) => - _$MessageContentListBlockImpl( + _$MessageContentBlocksImpl( (json['value'] as List) .map((e) => Block.fromJson(e as Map)) .toList(), $type: json['runtimeType'] as String?, ); -Map _$$MessageContentListBlockImplToJson( - _$MessageContentListBlockImpl instance) => +Map _$$MessageContentBlocksImplToJson( + _$MessageContentBlocksImpl instance) => { 'value': instance.value.map((e) => e.toJson()).toList(), 'runtimeType': instance.$type, }; -_$MessageContentStringImpl _$$MessageContentStringImplFromJson( +_$MessageContentTextImpl _$$MessageContentTextImplFromJson( Map json) => - _$MessageContentStringImpl( + _$MessageContentTextImpl( json['value'] as String, $type: json['runtimeType'] as String?, ); -Map _$$MessageContentStringImplToJson( - _$MessageContentStringImpl instance) => +Map _$$MessageContentTextImplToJson( + _$MessageContentTextImpl instance) => { 'value': instance.value, 'runtimeType': instance.$type, @@ -385,31 +383,31 @@ Map _$$ToolResultBlockImplToJson( return val; } -_$ToolResultBlockContentListBlockImpl - _$$ToolResultBlockContentListBlockImplFromJson(Map json) => - _$ToolResultBlockContentListBlockImpl( - (json['value'] as List) - .map((e) => Block.fromJson(e as Map)) - .toList(), - $type: json['runtimeType'] as String?, - ); - -Map _$$ToolResultBlockContentListBlockImplToJson( - _$ToolResultBlockContentListBlockImpl instance) => +_$ToolResultBlockContentBlocksImpl _$$ToolResultBlockContentBlocksImplFromJson( + Map json) => + _$ToolResultBlockContentBlocksImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentBlocksImplToJson( + _$ToolResultBlockContentBlocksImpl instance) => { 'value': instance.value.map((e) => e.toJson()).toList(), 'runtimeType': instance.$type, }; -_$ToolResultBlockContentStringImpl _$$ToolResultBlockContentStringImplFromJson( +_$ToolResultBlockContentTextImpl _$$ToolResultBlockContentTextImplFromJson( Map json) => - _$ToolResultBlockContentStringImpl( + _$ToolResultBlockContentTextImpl( json['value'] as String, $type: json['runtimeType'] as String?, ); -Map _$$ToolResultBlockContentStringImplToJson( - _$ToolResultBlockContentStringImpl instance) => +Map _$$ToolResultBlockContentTextImplToJson( + _$ToolResultBlockContentTextImpl instance) => { 'value': instance.value, 'runtimeType': instance.$type, diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart index 9aa7a39b..316cc26c 100644 --- a/packages/anthropic_sdk_dart/oas/main.dart +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -12,6 +12,7 @@ void main() async { destination: 'lib/src/generated/', replace: true, schemaOptions: const SchemaGeneratorOptions( + onSchemaName: _onSchemaName, onSchemaUnionFactoryName: _onSchemaUnionFactoryName, ), clientOptions: const ClientGeneratorOptions( @@ -25,17 +26,27 @@ void main() async { ); } +String? _onSchemaName(final String schemaName) => switch (schemaName) { + 'ModelEnumeration' => 'ModelCatalog', + 'ModelString' => 'ModelId', + 'MessageContentString' => 'MessageContentText', + 'MessageContentListBlock' => 'MessageContentBlocks', + 'ToolResultBlockContentListBlock' => 'ToolResultBlockContentBlocks', + 'ToolResultBlockContentString' => 'ToolResultBlockContentText', + _ => schemaName, + }; + String? _onSchemaUnionFactoryName( final String union, final String unionSubclass, ) => switch (unionSubclass) { - 'ModelEnumeration' => 'model', - 'ModelString' => 'modelId', - 'MessageContentListBlock' => 'blocks', - 'MessageContentString' => 'text', - 'ToolResultBlockContentListBlock' => 'blocks', - 'ToolResultBlockContentString' => 'text', + 'ModelCatalog' => 'model', + 'ModelId' => 'modelId', + 'MessageContentText' => 'text', + 'MessageContentBlocks' => 'blocks', + 'ToolResultBlockContentBlocks' => 'blocks', + 'ToolResultBlockContentText' => 'text', 'TextBlockDelta' => 'textDelta', 'InputJsonBlockDelta' => 'inputJsonDelta', 'MessageStartEvent' => 'messageStart', From c257b8a2b9becee32cf47e81b42214ed529b868e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 2 Jul 2024 22:15:48 +0200 Subject: [PATCH 056/251] feat: Add extensions on ToolResultBlockContent in anthropic_sdk_dart (#476) --- .../lib/src/extensions.dart | 20 +++++++++++++++++++ packages/langchain_anthropic/README.md | 15 ++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart index 58b178a2..ddafbab9 100644 --- a/packages/anthropic_sdk_dart/lib/src/extensions.dart +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -20,6 +20,26 @@ extension MessageContentX on MessageContent { } } +/// Extension methods for [ToolResultBlockContent]. +extension ToolResultBlockContentX on ToolResultBlockContent { + /// Returns the text content of the tool result block content. + String get text { + return map( + text: (ToolResultBlockContentText t) => t.value, + blocks: (b) => + b.value.whereType().map((t) => t.text).join('\n'), + ); + } + + /// Returns the blocks of the tool result block content. + List get blocks { + return map( + text: (t) => [Block.text(text: t.value)], + blocks: (b) => b.value, + ); + } +} + /// Extension methods for [Block]. extension BlockX on Block { /// Returns the text content of the block. diff --git a/packages/langchain_anthropic/README.md b/packages/langchain_anthropic/README.md index 2d9f50a0..85d07866 100644 --- a/packages/langchain_anthropic/README.md +++ b/packages/langchain_anthropic/README.md @@ -1,6 +1,17 @@ -# 🦜️🔗 LangChain.dart +# 🦜️🔗 LangChain.dart / Anthropic -Anthropic module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) +[![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +[Anthropic](https://anthropic.com) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). + +## Features + +- Chat models: + * `ChatAnthropic`: wrapper around [Anthropic Messages](https://docs.anthropic.com/en/api/messages) API (Claude). ## License From 59f23407750d89527058e1ee6b91187bac915741 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 2 Jul 2024 22:24:12 +0200 Subject: [PATCH 057/251] feat: Add ChatAnthropic integration (#477) --- docs/_sidebar.md | 1 + .../models/chat_models/how_to/tools.md | 1 + .../chat_models/integrations/anthropic.md | 145 ++++++ .../chat_models/integrations/anthropic.dart | 109 +++++ examples/docs_examples/pubspec.lock | 14 + examples/docs_examples/pubspec.yaml | 1 + examples/docs_examples/pubspec_overrides.yaml | 6 +- packages/anthropic_sdk_dart/README.md | 1 + .../example/langchain_anthropic_example.dart | 42 +- .../lib/langchain_anthropic.dart | 2 + .../lib/src/chat_models/chat_anthropic.dart | 238 ++++++++++ .../lib/src/chat_models/chat_models.dart | 2 + .../lib/src/chat_models/mappers.dart | 431 ++++++++++++++++++ .../lib/src/chat_models/types.dart | 116 +++++ packages/langchain_anthropic/pubspec.yaml | 14 +- .../pubspec_overrides.yaml | 6 + .../test/chat_models/assets/apple.jpeg | Bin 0 -> 66803 bytes .../test/chat_models/chat_anthropic_test.dart | 293 ++++++++++++ .../lib/src/chat_models/types.dart | 2 +- 19 files changed, 1418 insertions(+), 6 deletions(-) create mode 100644 docs/modules/model_io/models/chat_models/integrations/anthropic.md create mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/chat_models.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/mappers.dart create mode 100644 packages/langchain_anthropic/lib/src/chat_models/types.dart create mode 100644 packages/langchain_anthropic/pubspec_overrides.yaml create mode 100644 packages/langchain_anthropic/test/chat_models/assets/apple.jpeg create mode 100644 packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 6ce757ba..2637ce9b 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -56,6 +56,7 @@ - [Tool calling](/modules/model_io/models/chat_models/how_to/tools.md) - [LLMChain](/modules/model_io/models/chat_models/how_to/llm_chain.md) - Integrations + - [Anthropic](/modules/model_io/models/chat_models/integrations/anthropic.md) - [OpenAI](/modules/model_io/models/chat_models/integrations/openai.md) - [Firebase Vertex AI](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) diff --git a/docs/modules/model_io/models/chat_models/how_to/tools.md b/docs/modules/model_io/models/chat_models/how_to/tools.md index 16c12081..0303be9c 100644 --- a/docs/modules/model_io/models/chat_models/how_to/tools.md +++ b/docs/modules/model_io/models/chat_models/how_to/tools.md @@ -3,6 +3,7 @@ > We use the term "tool calling" interchangeably with "function calling". Although function calling is sometimes meant to refer to invocations of a single function, we treat all models as though they can return multiple tool or function calls in each message. > Tool calling is currently supported by: +> - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md) > - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) > - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) > - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md) diff --git a/docs/modules/model_io/models/chat_models/integrations/anthropic.md b/docs/modules/model_io/models/chat_models/integrations/anthropic.md new file mode 100644 index 00000000..b3e99c84 --- /dev/null +++ b/docs/modules/model_io/models/chat_models/integrations/anthropic.md @@ -0,0 +1,145 @@ +# ChatAnthropic + +Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). + +## Setup + +The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. + +The following models are available: +- `claude-3-5-sonnet-20240620` +- `claude-3-haiku-20240307` +- `claude-3-opus-20240229` +- `claude-3-sonnet-20240229` +- `claude-2.0` +- `claude-2.1` + +Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); + +print(res.output.content); +// -> 'The fruit in the image is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 123 +// 456789101 +// 112131415161 +// 718192021222 +// 324252627282 +// 930 +``` + +## Tool calling + +`ChatAnthropic` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + tools: [tool], + ), +); + +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart new file mode 100644 index 00000000..45c1cd55 --- /dev/null +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart @@ -0,0 +1,109 @@ +// ignore_for_file: avoid_print +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_anthropic/langchain_anthropic.dart'; + +void main(final List arguments) async { + await _invokeModel(); + await _multiModal(); + await _streaming(); +} + +Future _invokeModel() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + + final chatPrompt = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.' + ), + (ChatMessageType.human, 'Text to translate:\n{text}'), + ]); + + final chain = chatPrompt | chatModel | const StringOutputParser(); + + final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', + }); + print(res); + // -> 'J'adore programmer.' + + chatModel.close(); +} + +Future _multiModal() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), + ); + print(res.output.content); + // -> 'The fruit in the image is an apple.' + + chatModel.close(); +} + +Future _streaming() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas.', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), + ]); + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + + final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + + final stream = chain.stream({'max_num': '30'}); + await stream.forEach(print); + // 123 + // 456789101 + // 112131415161 + // 718192021222 + // 324252627282 + // 930 + + chatModel.close(); +} diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 2a5b086d..47e6b5b7 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -9,6 +9,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.6" + anthropic_sdk_dart: + dependency: "direct overridden" + description: + path: "../../packages/anthropic_sdk_dart" + relative: true + source: path + version: "0.0.1" args: dependency: transitive description: @@ -239,6 +246,13 @@ packages: relative: true source: path version: "0.7.2" + langchain_anthropic: + dependency: "direct main" + description: + path: "../../packages/langchain_anthropic" + relative: true + source: path + version: "0.0.1-dev.1" langchain_chroma: dependency: "direct main" description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 3044b6d2..2f6b0f37 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -8,6 +8,7 @@ environment: dependencies: langchain: ^0.7.2 + langchain_anthropic: ^0.0.1-dev.1 langchain_chroma: ^0.2.0+5 langchain_community: 0.2.1+1 langchain_google: ^0.5.1 diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index cc3f10d6..1c756a5e 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,9 +1,13 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart +# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart,anthropic_sdk_dart,langchain_anthropic dependency_overrides: + anthropic_sdk_dart: + path: ../../packages/anthropic_sdk_dart chromadb: path: ../../packages/chromadb langchain: path: ../../packages/langchain + langchain_anthropic: + path: ../../packages/langchain_anthropic langchain_chroma: path: ../../packages/langchain_chroma langchain_community: diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md index bc8b7208..dc51d776 100644 --- a/packages/anthropic_sdk_dart/README.md +++ b/packages/anthropic_sdk_dart/README.md @@ -23,6 +23,7 @@ Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (a - [Usage](#usage) * [Authentication](#authentication) * [Messages](#messages) + * [Tool use](#tool-use) - [Advance Usage](#advance-usage) * [Default HTTP client](#default-http-client) * [Custom HTTP client](#custom-http-client) diff --git a/packages/langchain_anthropic/example/langchain_anthropic_example.dart b/packages/langchain_anthropic/example/langchain_anthropic_example.dart index 21f3e9f2..fabef4bd 100644 --- a/packages/langchain_anthropic/example/langchain_anthropic_example.dart +++ b/packages/langchain_anthropic/example/langchain_anthropic_example.dart @@ -1,3 +1,41 @@ -void main() { - // TODO +// ignore_for_file: avoid_print, unused_element +import 'dart:io'; + +import 'package:langchain_anthropic/langchain_anthropic.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; + +/// Check the docs for more examples: +/// https://langchaindart.dev +void main() async { + // Uncomment the example you want to run: + await _example1(); + // await _example2(); +} + +/// The most basic example of LangChain is calling a model on some input +Future _example1() async { + final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; + final llm = ChatAnthropic( + apiKey: openAiApiKey, + defaultOptions: const ChatAnthropicOptions(temperature: 1), + ); + final ChatResult res = await llm.invoke( + PromptValue.string('Tell me a joke'), + ); + print(res); +} + +/// Instead of waiting for the full response from the model, you can stream it +/// while it's being generated +Future _example2() async { + final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; + final llm = ChatAnthropic( + apiKey: openAiApiKey, + defaultOptions: const ChatAnthropicOptions(temperature: 1), + ); + final Stream stream = llm.stream( + PromptValue.string('Tell me a joke'), + ); + await stream.forEach((final chunk) => stdout.write(chunk.output.content)); } diff --git a/packages/langchain_anthropic/lib/langchain_anthropic.dart b/packages/langchain_anthropic/lib/langchain_anthropic.dart index d8becc4d..78ee6803 100644 --- a/packages/langchain_anthropic/lib/langchain_anthropic.dart +++ b/packages/langchain_anthropic/lib/langchain_anthropic.dart @@ -1,2 +1,4 @@ /// Anthropic module for LangChain.dart. library; + +export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart new file mode 100644 index 00000000..13a687a3 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart @@ -0,0 +1,238 @@ +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; +import 'package:http/http.dart' as http; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_tiktoken/langchain_tiktoken.dart'; + +import 'mappers.dart'; +import 'types.dart'; + +/// Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) +/// (aka Claude API). +/// +/// Example: +/// ```dart +/// final chatModel = ChatAnthropic(apiKey: '...'); +/// final messages = [ +/// ChatMessage.system('You are a helpful assistant that translates English to French.'), +/// ChatMessage.humanText('I love programming.'), +/// ]; +/// final prompt = PromptValue.chat(messages); +/// final res = await llm.invoke(prompt); +/// ``` +/// +/// - Docs: https://docs.anthropic.com +/// +/// ### Authentication +/// +/// The Anthropic API uses API keys for authentication. Visit your +/// [API Keys](https://console.anthropic.com/settings/keys) page to retrieve +/// the API key you'll use in your requests. +/// +/// ### Available models +/// +/// The following models are available: +/// - `claude-3-5-sonnet-20240620` +/// - `claude-3-haiku-20240307` +/// - `claude-3-opus-20240229` +/// - `claude-3-sonnet-20240229` +/// - `claude-2.0` +/// - `claude-2.1` +/// +/// Mind that the list may not be up-to-date. +/// See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. +/// +/// ### Call options +/// +/// You can configure the parameters that will be used when calling the +/// chat completions API in several ways: +/// +/// **Default options:** +/// +/// Use the [defaultOptions] parameter to set the default options. These +/// options will be used unless you override them when generating completions. +/// +/// ```dart +/// final chatModel = ChatAnthropic( +/// apiKey: anthropicApiKey, +/// defaultOptions: const ChatAnthropicOptions( +/// temperature: 0.9, +/// maxTokens: 100, +/// ), +/// ); +/// ``` +/// +/// **Call options:** +/// +/// You can override the default options when invoking the model: +/// +/// ```dart +/// final res = await chatModel.invoke( +/// prompt, +/// options: const ChatAnthropicOptions(temperature: 0.5), +/// ); +/// ``` +/// +/// **Bind:** +/// +/// You can also change the options in a [Runnable] pipeline using the bind +/// method. +/// +/// In this example, we are using two totally different models for each +/// question: +/// +/// ```dart +/// final chatModel = ChatAnthropic(apiKey: anthropicApiKey); +/// const outputParser = StringOutputParser(); +/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); +/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); +/// final chain = Runnable.fromMap({ +/// 'q1': prompt1 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-5-sonnet-20240620)) | outputParser, +/// 'q2': prompt2 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-sonnet-20240229)) | outputParser, +/// }); +/// final res = await chain.invoke({'name': 'David'}); +/// ``` +/// +/// ### Advance +/// +/// #### Custom HTTP client +/// +/// You can always provide your own implementation of `http.Client` for further +/// customization: +/// +/// ```dart +/// final client = ChatAnthropic( +/// apiKey: 'ANTHROPIC_API_KEY', +/// client: MyHttpClient(), +/// ); +/// ``` +/// +/// #### Using a proxy +/// +/// ##### HTTP proxy +/// +/// You can use your own HTTP proxy by overriding the `baseUrl` and providing +/// your required `headers`: +/// +/// ```dart +/// final client = ChatAnthropic( +/// baseUrl: 'https://my-proxy.com', +/// headers: {'x-my-proxy-header': 'value'}, +/// ); +/// ``` +/// +/// If you need further customization, you can always provide your own +/// `http.Client`. +/// +/// ##### SOCKS5 proxy +/// +/// To use a SOCKS5 proxy, you can use the +/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a +/// custom `http.Client`. +class ChatAnthropic extends BaseChatModel { + /// Create a new [ChatAnthropic] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Anthropic API key. You can find your API key in the + /// [Anthropic dashboard](https://console.anthropic.com/settings/keys). + /// - [ChatAnthropic.encoding] + /// - [ChatAnthropic.defaultOptions] + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Anthropic's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + ChatAnthropic({ + final String? apiKey, + final String baseUrl = 'https://api.anthropic.com/v1', + final Map? headers, + final Map? queryParams, + final http.Client? client, + super.defaultOptions = const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + ), + this.encoding = 'cl100k_base', + }) : _client = a.AnthropicClient( + apiKey: apiKey ?? '', + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ); + + /// A client for interacting with Anthropic API. + final a.AnthropicClient _client; + + /// The encoding to use by tiktoken when [tokenize] is called. + /// + /// Anthropic does not provide any API to count tokens, so we use tiktoken + /// to get an estimation of the number of tokens in a prompt. + String encoding; + + @override + String get modelType => 'anthropic-chat'; + + @override + Future invoke( + final PromptValue input, { + final ChatAnthropicOptions? options, + }) async { + final completion = await _client.createMessage( + request: createMessageRequest( + input.toChatMessages(), + options: options, + defaultOptions: defaultOptions, + throwNullModelError: throwNullModelError, + ), + ); + return completion.toChatResult(); + } + + @override + Stream stream( + final PromptValue input, { + final ChatAnthropicOptions? options, + }) { + return _client + .createMessageStream( + request: createMessageRequest( + input.toChatMessages(), + options: options, + defaultOptions: defaultOptions, + stream: true, + throwNullModelError: throwNullModelError, + ), + ) + .transform(MessageStreamEventTransformer()); + } + + /// Tokenizes the given prompt using tiktoken. + /// + /// Currently Anthropic does not provide a tokenizer for the models it supports. + /// So we use tiktoken and [encoding] model to get an approximation + /// for counting tokens. Mind that the actual tokens will be totally + /// different from the ones used by the Anthropic model. + /// + /// If an encoding model is specified in [encoding] field, that + /// encoding is used instead. + /// + /// - [promptValue] The prompt to tokenize. + @override + Future> tokenize( + final PromptValue promptValue, { + final ChatAnthropicOptions? options, + }) async { + final encoding = getEncoding(this.encoding); + return encoding.encode(promptValue.toString()); + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart new file mode 100644 index 00000000..1a011d3c --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart @@ -0,0 +1,2 @@ +export 'chat_anthropic.dart'; +export 'types.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart new file mode 100644 index 00000000..002df82c --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart @@ -0,0 +1,431 @@ +// ignore_for_file: public_member_api_docs +import 'dart:async'; +import 'dart:convert'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; +import 'package:collection/collection.dart' show IterableExtension; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:rxdart/rxdart.dart' show WhereNotNullExtension; + +import 'types.dart'; + +/// Creates a [CreateMessageRequest] from the given input. +a.CreateMessageRequest createMessageRequest( + final List messages, { + required final ChatAnthropicOptions? options, + required final ChatAnthropicOptions defaultOptions, + final bool stream = false, + required Never Function() throwNullModelError, +}) { + final systemMsg = messages.firstOrNull is SystemChatMessage + ? messages.firstOrNull?.contentAsString + : null; + + final messagesDtos = messages.toMessages(); + final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; + final toolChoiceDto = toolChoice?.toToolChoice(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toTool(toolChoice); + + return a.CreateMessageRequest( + model: a.Model.modelId( + options?.model ?? defaultOptions.model ?? throwNullModelError(), + ), + messages: messagesDtos, + maxTokens: options?.maxTokens ?? defaultOptions.maxTokens ?? 1024, + stopSequences: options?.stopSequences ?? defaultOptions.stopSequences, + system: systemMsg, + temperature: options?.temperature ?? defaultOptions.temperature, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + metadata: a.CreateMessageRequestMetadata( + userId: options?.userId ?? defaultOptions.userId, + ), + tools: toolsDtos, + toolChoice: toolChoiceDto, + stream: stream, + ); +} + +extension ChatMessageListMapper on List { + List toMessages() { + final List result = []; + final List consecutiveToolMessages = []; + + void flushToolMessages() { + if (consecutiveToolMessages.isNotEmpty) { + result.add(_mapToolChatMessages(consecutiveToolMessages)); + consecutiveToolMessages.clear(); + } + } + + for (final message in this) { + switch (message) { + case SystemChatMessage(): + flushToolMessages(); + continue; // System message set in request params + case final HumanChatMessage msg: + flushToolMessages(); + final res = _mapHumanChatMessage(msg); + result.add(res); + case final AIChatMessage msg: + flushToolMessages(); + final res = _mapAIChatMessage(msg); + result.add(res); + case final ToolChatMessage msg: + consecutiveToolMessages.add(msg); + case CustomChatMessage(): + throw UnsupportedError('Anthropic does not support custom messages'); + } + } + + flushToolMessages(); // Flush any remaining tool messages + return result; + } + + a.Message _mapHumanChatMessage(final HumanChatMessage msg) { + return a.Message( + role: a.MessageRole.user, + content: switch (msg.content) { + final ChatMessageContentText t => a.MessageContent.text(t.text), + final ChatMessageContentImage i => a.MessageContent.blocks([ + _mapHumanChatMessageContentImage(i), + ]), + final ChatMessageContentMultiModal mm => a.MessageContent.blocks( + mm.parts + .map( + (final part) => switch (part) { + final ChatMessageContentText t => + a.Block.text(text: t.text), + final ChatMessageContentImage i => + _mapHumanChatMessageContentImage(i), + ChatMessageContentMultiModal() => throw ArgumentError( + 'Cannot have multimodal content in multimodal content', + ), + }, + ) + .toList(growable: false), + ), + }, + ); + } + + a.Block _mapHumanChatMessageContentImage(ChatMessageContentImage i) { + return a.Block.image( + source: a.ImageBlockSource( + type: a.ImageBlockSourceType.base64, + mediaType: switch (i.mimeType) { + 'image/jpeg' => a.ImageBlockSourceMediaType.imageJpeg, + 'image/png' => a.ImageBlockSourceMediaType.imagePng, + 'image/gif' => a.ImageBlockSourceMediaType.imageGif, + 'image/webp' => a.ImageBlockSourceMediaType.imageWebp, + _ => + throw AssertionError('Unsupported image MIME type: ${i.mimeType}'), + }, + data: i.data.startsWith('http') + ? throw AssertionError( + 'Anthropic only supports base64-encoded images', + ) + : i.data, + ), + ); + } + + a.Message _mapAIChatMessage(final AIChatMessage msg) { + if (msg.toolCalls.isEmpty) { + return a.Message( + role: a.MessageRole.assistant, + content: a.MessageContent.text(msg.content), + ); + } else { + return a.Message( + role: a.MessageRole.assistant, + content: a.MessageContent.blocks( + msg.toolCalls + .map( + (final toolCall) => a.Block.toolUse( + id: toolCall.id, + name: toolCall.name, + input: toolCall.arguments, + ), + ) + .toList(growable: false), + ), + ); + } + } + + a.Message _mapToolChatMessages(final List msgs) { + return a.Message( + role: a.MessageRole.user, + content: a.MessageContent.blocks( + msgs + .map( + (msg) => a.Block.toolResult( + toolUseId: msg.toolCallId, + content: a.ToolResultBlockContent.text(msg.content), + ), + ) + .toList(growable: false), + ), + ); + } +} + +extension MessageMapper on a.Message { + ChatResult toChatResult() { + final (content, toolCalls) = _mapMessageContent(this.content); + return ChatResult( + id: id ?? '', + output: AIChatMessage( + content: content, + toolCalls: toolCalls, + ), + finishReason: _mapFinishReason(stopReason), + metadata: { + 'model': model, + 'stop_sequence': stopSequence, + }, + usage: _mapUsage(usage), + ); + } +} + +class MessageStreamEventTransformer + extends StreamTransformerBase { + MessageStreamEventTransformer(); + + String? lastMessageId; + String? lastToolCallId; + + @override + Stream bind(final Stream stream) { + return stream + .map( + (event) => switch (event) { + final a.MessageStartEvent e => _mapMessageStartEvent(e), + final a.MessageDeltaEvent e => _mapMessageDeltaEvent(e), + final a.ContentBlockStartEvent e => _mapContentBlockStartEvent(e), + final a.ContentBlockDeltaEvent e => _mapContentBlockDeltaEvent(e), + final a.ContentBlockStopEvent e => _mapContentBlockStopEvent(e), + final a.MessageStopEvent e => _mapMessageStopEvent(e), + a.PingEvent() => null, + }, + ) + .whereNotNull(); + } + + ChatResult _mapMessageStartEvent(final a.MessageStartEvent e) { + final msg = e.message; + + final msgId = msg.id ?? lastMessageId ?? ''; + lastMessageId = msgId; + final (content, toolCalls) = _mapMessageContent(e.message.content); + + return ChatResult( + id: msgId, + output: AIChatMessage( + content: content, + toolCalls: toolCalls, + ), + finishReason: _mapFinishReason(e.message.stopReason), + metadata: { + if (e.message.model != null) 'model': e.message.model, + if (e.message.stopSequence != null) + 'stop_sequence': e.message.stopSequence, + }, + usage: _mapUsage(e.message.usage), + streaming: true, + ); + } + + ChatResult _mapMessageDeltaEvent(final a.MessageDeltaEvent e) { + return ChatResult( + id: lastMessageId ?? '', + output: const AIChatMessage(content: ''), + finishReason: _mapFinishReason(e.delta.stopReason), + metadata: { + if (e.delta.stopSequence != null) 'stop_sequence': e.delta.stopSequence, + }, + usage: _mapMessageDeltaUsage(e.usage), + streaming: true, + ); + } + + ChatResult _mapContentBlockStartEvent(final a.ContentBlockStartEvent e) { + final (content, toolCall) = _mapContentBlock(e.contentBlock); + if (toolCall != null) { + lastToolCallId = toolCall.id; + } + + return ChatResult( + id: lastMessageId ?? '', + output: AIChatMessage( + content: content, + toolCalls: [if (toolCall != null) toolCall], + ), + finishReason: FinishReason.unspecified, + metadata: const {}, + usage: const LanguageModelUsage(), + streaming: true, + ); + } + + ChatResult _mapContentBlockDeltaEvent(final a.ContentBlockDeltaEvent e) { + final (content, toolCals) = _mapContentBlockDelta(lastToolCallId, e.delta); + return ChatResult( + id: lastMessageId ?? '', + output: AIChatMessage( + content: content, + toolCalls: toolCals, + ), + finishReason: FinishReason.unspecified, + metadata: { + 'index': e.index, + }, + usage: const LanguageModelUsage(), + streaming: true, + ); + } + + ChatResult? _mapContentBlockStopEvent(final a.ContentBlockStopEvent e) { + lastToolCallId = null; + return null; + } + + ChatResult? _mapMessageStopEvent(final a.MessageStopEvent e) { + lastMessageId = null; + return null; + } +} + +(String content, List toolCalls) _mapMessageContent( + final a.MessageContent content, +) => + switch (content) { + final a.MessageContentText t => ( + t.value, + const [] + ), + final a.MessageContentBlocks b => ( + b.text, + b.value + .whereType() + .map( + (toolUse) => AIChatMessageToolCall( + id: toolUse.id, + name: toolUse.name, + argumentsRaw: toolUse.input.isNotEmpty + ? json.encode(toolUse.input) + : '', + arguments: toolUse.input, + ), + ) + .toList(growable: false), + ), + }; + +(String content, AIChatMessageToolCall? toolCall) _mapContentBlock( + final a.Block contentBlock, +) => + switch (contentBlock) { + final a.TextBlock t => (t.text, null), + final a.ImageBlock i => (i.source.data, null), + final a.ToolUseBlock tu => ( + '', + AIChatMessageToolCall( + id: tu.id, + name: tu.name, + argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '', + arguments: tu.input, + ), + ), + final a.ToolResultBlock tr => (tr.content.text, null), + }; + +(String content, List toolCalls) _mapContentBlockDelta( + final String? lastToolId, + final a.BlockDelta blockDelta, +) => + switch (blockDelta) { + final a.TextBlockDelta t => (t.text, const []), + final a.InputJsonBlockDelta jb => ( + '', + [ + AIChatMessageToolCall( + id: lastToolId ?? '', + name: '', + argumentsRaw: jb.partialJson ?? '', + arguments: const {}, + ), + ], + ), + }; + +extension ToolSpecListMapper on List { + List toTool(final ChatToolChoice? toolChoice) { + if (toolChoice is ChatToolChoiceNone) { + return const []; + } + + if (toolChoice is ChatToolChoiceForced) { + final tool = firstWhereOrNull((final t) => t.name == toolChoice.name); + return [if (tool != null) _mapTool(tool)]; + } + + return map(_mapTool).toList(growable: false); + } + + a.Tool _mapTool(final ToolSpec tool) { + return a.Tool( + name: tool.name, + description: tool.description, + inputSchema: tool.inputJsonSchema, + ); + } +} + +extension ChatToolChoiceMapper on ChatToolChoice { + a.ToolChoice toToolChoice() { + return switch (this) { + ChatToolChoiceNone _ => const a.ToolChoice(type: a.ToolChoiceType.auto), + ChatToolChoiceAuto _ => const a.ToolChoice(type: a.ToolChoiceType.auto), + ChatToolChoiceRequired _ => + const a.ToolChoice(type: a.ToolChoiceType.any), + final ChatToolChoiceForced t => a.ToolChoice( + type: a.ToolChoiceType.tool, + name: t.name, + ), + }; + } +} + +FinishReason _mapFinishReason( + final a.StopReason? reason, +) => + switch (reason) { + a.StopReason.endTurn => FinishReason.stop, + a.StopReason.maxTokens => FinishReason.length, + a.StopReason.stopSequence => FinishReason.stop, + a.StopReason.toolUse => FinishReason.toolCalls, + null => FinishReason.unspecified, + }; + +LanguageModelUsage _mapUsage(final a.Usage? usage) { + return LanguageModelUsage( + promptTokens: usage?.inputTokens, + responseTokens: usage?.outputTokens, + totalTokens: usage?.inputTokens != null && usage?.outputTokens != null + ? usage!.inputTokens + usage.outputTokens + : null, + ); +} + +LanguageModelUsage _mapMessageDeltaUsage(final a.MessageDeltaUsage? usage) { + return LanguageModelUsage( + responseTokens: usage?.outputTokens, + totalTokens: usage?.outputTokens, + ); +} diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart new file mode 100644 index 00000000..4374c820 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -0,0 +1,116 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; + +/// {@template chat_anthropic_options} +/// Options to pass into the Anthropic Chat Model. +/// {@endtemplate} +class ChatAnthropicOptions extends ChatModelOptions { + /// {@macro chat_anthropic_options} + const ChatAnthropicOptions({ + this.model = 'claude-3-5-sonnet-20240620', + this.maxTokens = 1024, + this.stopSequences, + this.temperature, + this.topK, + this.topP, + this.userId, + super.tools, + super.toolChoice, + super.concurrencyLimit, + }); + + /// ID of the model to use (e.g. 'claude-3-5-sonnet-20240620'). + /// + /// Available models: + /// - `claude-3-5-sonnet-20240620` + /// - `claude-3-haiku-20240307` + /// - `claude-3-opus-20240229` + /// - `claude-3-sonnet-20240229` + /// - `claude-2.0` + /// - `claude-2.1` + /// + /// Mind that the list may be outdated. + /// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. + final String? model; + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + final int? maxTokens; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Anthropic models will normally stop when they have naturally completed + /// their turn. If you want the model to stop generating when it encounters + /// custom strings of text, you can use the `stopSequences` parameter. + final List? stopSequences; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + final double? temperature; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + final int? topK; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + final double? topP; + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + final String? userId; + + /// Creates a copy of this [ChatAnthropicOptions] object with the given fields + /// replaced with the new values. + ChatAnthropicOptions copyWith({ + String? model, + int? maxTokens, + List? stopSequences, + double? temperature, + int? topK, + double? topP, + String? userId, + List? tools, + ChatToolChoice? toolChoice, + int? concurrencyLimit, + }) { + return ChatAnthropicOptions( + model: model ?? this.model, + maxTokens: maxTokens ?? this.maxTokens, + stopSequences: stopSequences ?? this.stopSequences, + temperature: temperature ?? this.temperature, + topK: topK ?? this.topK, + topP: topP ?? this.topP, + userId: userId ?? this.userId, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } +} diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 6ed5624f..7a581267 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,11 +1,10 @@ name: langchain_anthropic -description: Anthropic module for LangChain.dart. +description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart documentation: https://langchaindart.dev -publish_to: none # Remove when the package is ready to be published topics: - ai @@ -15,3 +14,14 @@ topics: environment: sdk: ">=3.0.0 <4.0.0" + +dependencies: + anthropic_sdk_dart: ^0.0.1 + collection: '>=1.17.0 <1.19.0' + http: ^1.1.0 + langchain_core: ^0.3.2 + langchain_tiktoken: ^1.0.1 + rxdart: ^0.27.7 + +dev_dependencies: + test: ^1.25.2 diff --git a/packages/langchain_anthropic/pubspec_overrides.yaml b/packages/langchain_anthropic/pubspec_overrides.yaml new file mode 100644 index 00000000..4d7afffa --- /dev/null +++ b/packages/langchain_anthropic/pubspec_overrides.yaml @@ -0,0 +1,6 @@ +# melos_managed_dependency_overrides: anthropic_sdk_dart,langchain_core +dependency_overrides: + anthropic_sdk_dart: + path: ../anthropic_sdk_dart + langchain_core: + path: ../langchain_core diff --git a/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg b/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..62d7ca92657957e3ab45fc3ec7332a9938465b68 GIT binary patch literal 66803 zcmbq)WmJ?;*f$`8NFyOB;L<5ecf-=%&C;QC8-z#+?9#C0(!F$xluPW=of6AZN-Bu{ zU!Lcj_ni0pJ9E#MnKOA`IdlE~TmEkwheAsoq>h7&i-Uvvka7N7!->Tqcu0f#7X!&myhvX414j#oLiib|ODV(wmkQKmZH%*M2BMgRfK%jiOkt`r@-A^{^%!KT% zCbC*A+v-_ z75^(?i};{WajvBcH#;=>8ExpdtY%Q4S9g6b^c9qoepbcC{qdBZCrmZIyX<4bvQ)hc z(+NtuMV{9nk{Q+>5_8Hy?~O|1$1=gn5jxvF90;A?Ty87U7JxM5TryI|gHr<^l>p8{ zt~fwc;qXUXS?Ea6Ck1^8Guko*6Q&j3aUVV!;c;&@3mq3*QKv%$IHywE>l+iYi`HV+ z;j4t)2w7%DZrUukIka+Rg`ZhuRqq>7iwqy&H^L>6HKIZ*!zqQWzU~NPL+>4Hh7^g! z%9E7SAXi?6B<;v>YbSH@F06L!)3LfgWAv;s=n5=|_mHCDO5g>x=fk^>Gb1?lm0_P$ zs7(&Otc0%U=9ujx%I#(oRn1V7mOx5QS7}fY`23A6C%Yc%X%MTW?lXddU^lQxa1=y8 zrlpQb+s(}@(vu(GV6g=QA_?99&HDd(dO= z3gGAlQ3_j{A8YrXr6}5 zl2fjh$jF>Pp2%k;%mKMfePjU~wE*PM1zUh1QZiD45C)7;8@9;(|wgkH!o@nbaPYWO4^3OwDg^yS{ZW4+PHz1 zxWNA41h+)#;WQ1yD0nFbfNtT8GzPW1YSU}+;ho_gXqK$RGONbxCGn`;fFwy^P2kgq z(NPd5)FH6vq}&g)>Z8#yS#lE05osD8!f3~@5i15f`d zVre-i*!mvc`R6eoktMBV+R$R&o-9*_>nmu(h~r`^jv&ijwb&VVp6g6(X#lW}`C-B= z6p{oh0F{1V>tIb+kT(jQ7}}5IVgs~4&=Ara5|z7hu2V#(_ea$2F%ggV;ot&havx{xJ~n8#t4u*mcO4Uy27b#ZR>@M2wo1VGGOKf`aN z{`{CjPh+l8GO{bdMdh&KOljk3o`bi}S4)gD`@g-?gzV8EF78>k5oj?pL~2$;rsV|s zGS<~X+NCwO0E?>llJNBSljrl}GV6jpV$ZIoXqab1U{ktNj#DZ-|DpOESa{qT5=UHWdXl2wQ=V1Ug%p z76ac0r}))$eb0*nd@zcsj@*x@>gvcYCb^1PLdN6H2?$XJow! z9PZX_fq&`U(hJE}X6yaeo|`)|G&jFExLTyXSg5{IXrC96Ej5F=e-KGX^AR%IulLhX z88=C%@)8M4+fZPt8gJLDUh%0+5b`d7q#WhEXu!NiN$17d)IaupoCD$&ob92S$#9 z;S3=uPSNYGw$il;Jiu;|z}wt)+6=Hrz9q0{{rumD@7I_AJP!Z8-a?(BH)!2-=(#uw zAM}7c31o8en68gTJ{w@owHy|kd?A}v?EQRzIKQnP+g8F96zHr6_c z$$OoWYoWgkF;>0a?oB2TDL{hGX$w5Yk{4btJ^uv6X##zZ_Y!Eb-Mg`$V-;3PMvSq;1z2WzXvV-04-u zEkVOWjQ6aV-@aW+y?cRkeqj*U(zsKpr~0kp>Il&MG(OA)PbVA&6s_JeISC*QO86#P zxb}Knq;}-+K3>YvxDck;_2D|J`qDRKL#>iL{@JpB;CUgfZr2c_iFB36TzXA+avf>} z5|rrTy1-ZQm2FXtUhe#Z;72v6Yfk-^2jvii05L%vq4mkPw0G|!$W^{1-GM**{)dz4-`{dMBt(|3 zbo2XWygch#%jT3O6Z&wt?MZO00*DR4z{MLxkwcJX6wflW_Pt6x%fNu3>!KNgGLPn>9*WSrQ7CNT^C zhm&oTbn0_uE0dVpgy zS@S9Wn2KP*LN_ZV89Q(6D7ZYKjxH;&b--SzUwy5z07 zH%mWhir|=6i(by+bQ1PgWvMyea_A7%0;q7zgFeu+{1xCo#XnhE89u~WIgvtZ|F}R% z>09lE-}c6T>svzI{|OV#2><5~N*`QAInzN8Vq{=+&=mSsmFMcV#n@qMgLL;0ogy6` z<>%m}`E-yWGjKbX4Wi#_Txtn#PO2P%Suj-l1{)q-GRkc^=T~AtQM8rsip;0265bUg z-0|1HF3|rqr}FzVrn2|06V^8N<|B{!D#OYBAmhRQZq|VGaYM)fi10Z4Wz6k=I2zSIiz0qz=b*Ct zFqPn)Ic-|Kk9q~Bz~Zy>H*=W*vm@4NW0c^XLRcRCexqIgm`sUuNZ-3W zkFEHbi(AR7%dKxeTu1+r5b96dM8Y8^?*mf4@p+87TN1Y;CZ-he+XQp=N3bHt(e0*E z1z5-{0ppK#5o0wYGSbp+gPbbBN{&3k2lFT+@tP;-VzaTYWIv*fkt$4v!3@!6ZYtKY z;P_cNfxU%JSEbg5q-v3O=9>EhlMOTfmTJ)LU)2|%eq4%4B^T%mt#1%BH0TXQqX|vE zM3W>T3JCWv2hS=ue`|oABZBEgA2bBjgHZ;VJUL0}TfD`D?1tP4MgJxIF8ikt{2z|m z0YY!EoSq5=Z-7a{P0A7jo5R!=TQdD=5kVH&i1X7TC0bHI*Qa2b9`P?;Lv(HfIat^3$|g1T^XEc|g+9Mr!Gs z5_(Y8ofM-`tb|!de8jj9#N-k8oPNuVk#>w1Fc_uB6!PROVeu%2MQ|b9Z>mTEU6g%(~1X6M&0-1 z7PdlAaNO+W=a@~dc=29bZX}HfGw%{@@j|f|0r^AdlxJ>~?o}s+mb?;@`*AD9Y0DC@ zE_Xj$5nba-`plN9vOv6P{n)^BT9#UrL5v65qGyV;YTepQLf;%h|~V z-n|On5;TPDRdpQmM8urhXoN{N16a~RdYC}{Q!IFhdl^rE1=V#GQ*zkcL(J@7`<$; z6(^`Xl50gv2)0oGTp_AI11LML`|R4XeCL#@PwWJw!1dnd_&-gj-?7b*?dpG7voH4| z9pW;Fsa;b^WX}OP0tpZC9OGck*;dAq%cHv9ilxAq77?||AdabWzDZ02e{k&@i?s8C zvbovU$NCHV!B@vYk@~JfIsdF3NxKpS>2s}ODyt5p!+*SBRDeiE$YEudH1yn>@9u`8 zhmN|iH*3>=*4{9O=yW^IiAvER;Xxdjb zrZ2Dip0iPEY1{3LrM3`vIB|YBam(QOWbh(OiPQc1pC@_#6HWy?D=wbCLgJ3U+>xE0 z7YV+peBunzTVH5otVcoo2`k>PETY*JI68Nuyb2N&B&j0@CkFQr*l(CjVA`|(O;&@xF1(w|f4{i_d#w@B=QiYdqZ z7hY}V)oh5H8nI`4uGB9-gCqAkFM^byNn4gCfsm@PiVHczAM*dC7otX_9kzo)g-;K@ zW&Y`~uiWpJFSO5Z{u?pOxOCDUddl&3sq-SBD8Hq#U{Ptx-q-f^%xB5GkQf^^weh*B zVA?OdK`Yd*?$bon#xeNJb%l+BU0UR{B=DcOL2IRNJrqn29cOkv{=xSCwIh4i+8d?7 zI(JIIT#yn)A$l|hn=ZfYEgZmG$2r2t+iM@ZaT9E40X!d4)vlZ${8r&Dd)hji`;0zF zTag}Jcl4#t>Nx&+*T`PoDg`>rA~j0?DYWH+yEoS;G{%HDg&k*AbZ+>urR~2Uhz1O> zEUk|S;>U7PgQkPHJ3Yobtd?sOS3-wti^_BBS?0uFiMgz_RKu!s5GQlsv2SO$wHLp~ zr)}*BajXh@zkVKRb88Nf*Ye3R+8$Qsa}vN`(h;MZ-P4+=F08irV{)S*>MVWZ<%p&1 zlK;kKzaM@v-SkhI#D*WQv^r$sS;;rr?VUzcKm@bkuNTc>-jO|5m;%dQXXj*@`kt7u zmnCbn?iX3rhZk|oqbZ$>8Pb-SK6TtPK~>7`v(?+DG^z(|Y2@$_k;};Vnyat??KFj| zeu;xWFO;NZ@BKDFFY_r{R(rcL-OYzz`D}C*;j?@-Z~UBlEG7Qqb_m_q@!{}~9guiI zcB!Q|^au=%QstGmA)@bOFw6v?Dn7$!X!6VmE5w%|X2z_SIx)-F7EI|rAN$&TWj<)EDN-TALqT~L(ld`nen1n->WaQDDeyY%VurCO zm`$0}d%~IuV{$?m$!a?V)M8UKJ=BB-VR!{J+tS|j}wQ5L4OaQ)FWjL_3` z@*e#HTc9CPnOU1A&n#8?o3b9sHqnAas*J-(LH@wCnOlZTf0 zkvhVIR0q}ySZZn`65!$1%s8@W(Ni1q+O4!kzE3Vq-Z7*ZkR0@wc}GGRJTS@S;eeg4 z34ViJA$jni=T6G$<2!3e;=`;SxsUY^9kNM_U-_fQ8crgAy(>enUvB2-z-2v1s&{Eh zrmldOdmfS0%d2-lJDC@~c=_Khd-PsE+_5PX z)7CByA4hHl_SH?XNMrM1#h2X+5}8{2IRt_Uvm&s?h5b__%%sOz=O(%9ikWl29m~e! z{;-3nr4hMQ%E__*i*Bbj0vXImf_L3e<7{}|x@UJeV zzc=Vi$0%Jw!teBoI$P}wL#xNK8o_Sr?KIluI|mzE`4TOOTtf3N8ZY$hD=|XH$6#MV z*H31c)=#u=+?9BqtAm`*Dr+r8zZostdS1CT)3AhsveZ-3zw&B+Y?cpPvS}mId(1%) znL_+bzz>=;$5`v@pgK=jyU!P#DGSFE>Y;!Yeh_WyjP?jN|G;%qfX1|pwrth)VNqdd zvvp`UqnGDp4`=XBey-kYHWR_dLOwf*iSa`9pZR`i^YzI+{fw3pUh$g7)0=zHvn6Zv zIPsB^f1I|Uv!@ugaXRkkci&nYZR!ivz~b=QeBFJbWLrpxu>`517?oIawY!ePH5Eo$ z(ajF$7;!7-bbb?16!1^uL{`Ei(rLuJ>tatQ%$4)YUYj&KhlFyz^rl)J$N z#ZV$R8zp$G@D2kY-%_sJQO3<-BWFu;!9fUUt*DiD=0=6cy2x0&Eqhpp?bubsrgh1* zpTym!SCNAFMqZGaaX}vV3X^Z?TM=QY?T0d5OG0M#!s@Bb*rCOmw7=s)n#1DxqU>0Q zOOM396w$U~7EWsQZwmZIN#k#VC#2zt^RG;oCQ)98hqn?S3F%-9nUO)gG!dPh{RjU+ z$t|XYT>WCSJ&FN9;9HU>m6w|+88P0kmnEZtr15Gz2qFI}XlJgZdEqiisKHH_Htkc! zG%8T~AxByQlM|ylIkjT)d9*N#Wroq|@Xbx0dpUdd8+3C(sRiwKky9?R)rJB!g^mIx zJzBBi-aC#cr4p5$u(wdc)Y&Q8KrV~>G2O{P^avtcFyv5P8C2MdI#)#TFK?}0!nPiV z%e@zs?HACC;R=aK+t4D%EET?LAnLAlcN5Qc4ADDTTmqCQWaep+wbhCid{)pf8pL{m ze@5BE?6JH1wwUywKiBJ0`e7Ml(OWzX_4`2X4=)YH-_jPiw58H}b(Lgv24E<@|B1%lN!k}S+&j*RTH)s+MokhpqsUV1Sz8{ak=wp*4^ z2hoF|-+(t@C2Ln~YXeAe&fck(5^|c0Hchz#-Xp1}2gIkf2-cTUv#J`Ao!wybV$BVyQQnU*5r}kkpB}LdxpV)l&OPj;qILSo10CpzH{G`>jRw| zQaqvA7;NzE>RUgd5ZF;?XuCvhaC(69Wxt~`Eg~fE z<H0wNWunZc$3{+(89-kw>);-u0AM{4#n2Ov zuliUWUl(+|#mMlLKW~&2|8jXWjPr$B>3ei4_{f8f7|@oa_@SQOzTNcWr_1-Hu$7VWAhSxP$4^Yc#{ukaK0f?WhGyLdQ+#7h zvi_*i$`?iOKJv8A^9D>-i4<6$F+X(sk!c#D@8`_Z!J}2?KZD|B$LZCeUip**(~>DK zeR%AWtCB<-CL`slzeI=h34AjsWoTL z61m`aMRBMwmNg=PLn2)u&wG(2+Q z6_dozE!1j9n`h4dP=z_R%KnF=6GNzwbwFcy>807Q&f>ki`hAgUSnT&={Re)zuI@@Q z(MmHnsvxP@5)T&piXD#yzGnUwPbbTCU&XKqzbJ+5Pd9T*-xraRMjyKsUQZLf&cqrm zP{{V3B|RKyS2uH{oV~PkIDNQOt0qa(_Y;Qk<9*fihP1_MHl-AS@G=>UU%rj_ozF(^ zEM{SJKQJ;V>ZG5d?(9FD0Ffj|kLDO?PI^71Pu0gJPr_xU1{u5ggKqJ(`ElqV#bV0c z-P^`J>+9XSOXu;w)(iJz=@*T~x6NCZAv`B`hC{#ZQy47oqY(8P0&2v*+Ij6XA`Yz1 z9EO=!mbCL2Na4WxaGDXlTT`#VxGZRa0iuS54PE|`;;Zi@lc{?Ulr?j&* zv{F`^)IlIQGd|%m(>CnY-R6kpjP2RvZmZzIj;>SW9xDRnJkTAYb>Gpt%K7uJkl+P{LkWtp!<{rbLD z^5-q%9CthOe>g&y|KU(BgjI53J8yd|HbYvBMTbY*^AojtuvVOIJTAr#*bMq-S^~T~ zU~-WRLsKQ{xp)DLx&4UjjtC^fMLrh@dM%>PBTd{6DVcA@5wL;A@;RSAj|G@Z1Tv0C zy_k}bkAgNdQd1EG`qZ<$)_}EQolN#pnrKWK#vWDK{Y~+ndTXyf@lw#bXnOa@7_H`| zOZU;9NVVv(F8DK_PZhM1EufC%vyaR5WdcXLV!lbhRlrCtl%=7Tot^!5AnT7C`t3mG zo9e&%*&t3;B=d9RXRf~Q5rr>QoRZ?cm%Cwd?ST=+nDz(qC1pxsQJs?jAbpGw-;681 zTao@E4CUBlp4=bJ!E`?Tl3jfeCdb9L^^2KFo#Mvh5`Ia(gx(*#YYby&lo=nfE54TV z;Xe8Px@JuH-ndoCbkWq-G*6mF`NOoXe;FxWfbSyq&R&7zR(>l!Hu!Dj;C2{c6nNMq0{bOSCXT}wBx+d*vRbxk zqZG9ay29-$WvkNFrnzMjTr0?a&9^PTwbP%SVia=q&tHfm)@oQxAb@t}ER1Cgb)`*p zUco0!bv!uG7S}j6F;D_6LCQT7L4KI$63%=uH8$bLDxHvqr~6K&vTEh=B`JM{`X6V$ z9RGP+#D8XA+3k~!kdb;Ldg0jt%TV{heO+wUy(z%`+5wBBN2i9~F&;TaN{P1Z(tXMe zv>KfTNa6aFBJC>f zcEB9Hm}D-nwHiG)HGq=M#qL+DYisDT{_p`>7QZXlsY~OyR%EpIxJh8DN3qFue6mztI@hf??o)CQ~eny>f%S9 zatM5C=;2b6=y)#Im=df!3eeNS+Je|^jH5GuPM5A z0x|A05zty`Yu5-pl8j;>=GW!rPg;xDwtl8%xFPfW%<@QwXV@b%%}Rbyic_t%f#4Q$ zz@+OtX(@hnf(h#Ci6RgS7<*S-s3cZ6$e&lw%92ywl3{yG#QmspS=0w!!U@*EpSWA0hM%HAfS0 z(5Cbyl&_so{`W_J3R($k`n$sEdrG&<1uXVG4uGF#lfG)W<<|2GRC~#I0^}Afq?`PL zJD!cq!CmO+A>IiqH~l8deSi^>tL5U~BYB*zi%4#nLD|=)2+<=}GpiNo`8qtiykfD| zXi?FtdAjgT%4f#i`Dm!)UGd(Pi_gY7OH5wgD83d? zOX>9zs#eLIZ!io*DW&>RxL-oPy=!Cwt`De8tt`Ex#AbO zoEr<1j<1gvqSbBX>bkWldlF_QtT1U(msNP~yE2sYtF2GISuu1^k{h1ZH}QYbWMmgt zo=%bmdO7_5O7$7K0?fK?7uD-B6uvZVwk|U+Fx`e^msHw;l-770*Ni>qM!M=-BNlH^ zwOqlatNyvBsC-U*!?^?2si&K!#z||#nQhC-N-r5%=7^a-t*>GEXq!25X;!Q+zzE%RBl&t?l#HS`YP)At|91s0L1WcgOg|H0*Ap-@atO zVP*Nw|FMPahv`l)I$8sl(}*Qtp{~1!rA|WsH%r@qQf@+c`UELs#;Iz3?!5D9>@HGP z;piO>IVSq>@y5PLG@NJ?(;*e!9j!R`y{Yt&NYCn|diAlDK&i_*bJFKDsF`GME%@$z zQ&}x*ulDirSxWvZ$C0TyxtWh~Djl@6sd~}kVH0*AZ$+l#SM}+{yB6fuQvVCa)zN8Jw6hGa4-x0!p67 z53sCDN@XoATn@Emt_nkZDoWc=-;85A@4RS9*QO+@MJ%SXfZa)Ec?_L-UUMC735|R| zd>lP5)*{zap0p?AJ}#NfflsBjdi@kUPkytg4z7I};h7khS`VXLm)uJpsZY*44Koa}nkX4ElZ$x*# zij}UpUdE~iA=t7!x9QPgipA4?vP)R9RvTWJ4Wd%hXGC@=DK+H1#4;Fnm} zX+wlB_B-}+`dCAR%us1Xd_hR|3rLg-25N`=;6(H!WYl(dOQ=pfF$(m2|Eg@fDTc?y zyg$Jx`ET9gCfD~ZlgJgdeI8Lgv@}P;8@=pzIh&V%+PV8*;m(@U#QCDmL+UR!cp+^K zGMId_yPc$Bg44b-=E$Exh(Z{fRQW`QtPHPjw{0f%L1IzRB8ln`mrR6RcU?>pB+rWT zQ#chVdY+KO76+GUXA?{mkj6Y@ftDu0KiUO*OrE+(t|BMOIUhsoVd{#jXi5(A_a$#0;8vKR}wQ8ii_W7{A zM#2uvYZK7z4;ZcYwKaj&96Knu&Na z=)yr%46rq<%CvQjQ}X1Lt)8=Io&L>UAo4WCM@mbNpEto)D>QR~ja$-Un)mYyXmvIF zq`$}fa|MNM1<)H}`)pwM=YYod=7xwF)(9V}6(xO|z3+>|D4KT1l zT| ztS^R|I{mDKPsxi6wW;2i~BB~_n$pTBd9wV=gYv)92A ztJBC?Yj4jT?BFla=Bu#vix_sYx1R`44By)wfk$Atdzef`(_NbBtmVZkEg`mMPIjwI z*Ji#28NSh`)@Twk^=*odsZHDK7pU?s&k4rHi9(EAHRo|q*xxOg_BZ_ntwxGJc(L6b zFz-@j7b?#nz4GfMFT-JOm->~wF!YQvc(M2W1hWco$Xt2QO}?v@*L*r{M7yCT@goAd z+~|>E_K_Niie3E*sIBC9<<{#95Ww#?$oi5}+KgI9KQ33Enozgf z0ey7f{dN$w!0&Q8*GVMBMKtHN6{FQXkAKCa|3+|f3;(=4o0P;}A*rtTf=u(;^E{FM zf1lZqx~Kd;Qr0C4UsmJDgOQ z=*F1|=eNx=d(v$CvD11LVrDJQB%o7S1qm)cFmNF&nG^8HpbzL~sxeC!R#t1ZEScII zJ)pAkOZJ1fpPYkY8D6=MSX=X_JiV@OhCS=`Lr2m&>}V5}l;sv@%&|s% z?K4CU5OSID4@?>nm6&jXZyy2yJ%ke>(zG*R-Gp4T*V_Ai#W{T*Ni*C%CiP4kswJyZ zT=gMW=htO3EtN^l44&>Tp8iV~>)+?;gY=Zd4Q$2W#_x2d093C#F zRtS&AJDg{_0N4U4vqD2!^6|HIlvUjDMC$J-l_~2Mo^!UMNq%3e>r=Op(z)@f*z}2H z`df{gY{05R_q)UxZLT%zGf=GTj{dWO*(UY4X!v&VR3IDFQ~g;LJmvZRAyb3{H3EL# zxu|*(6IWI!oX76l+A?e&H$Z64{QEA|XiwUBtU6R(LV(FW7$&QX`VS}VxeqSrX;g`2 z#rozi%7tC!>H5kf;RKq?htR<}ij$MT4CAMno=(45RIG~s=s9a*$3-S(M#P@ZPKwHx zDACLK_ypv?>PJ<`xB4JL8DxW}g{jo%T8(W5x(aKQ9?^jNFfw|)rH@q9liURjGtItz zzODCiY~cZjnb{yG%M3Kli!K_y$Kv%zLJMJ-?2jHM>i2SXTWo!paG0!TOV%kd>qLH0 zV>Y+a^ntcAV)?!U3uY$w{%!d9ueu{J!Z4sS?RZIo|s48yKO~ z%C;09WF5$Gq?ZI3|3SyLPeg@$8SP%$lIanYNKD`IN?a>z)jxJ((}AGu@I`_Dn!d4w zy7}s1Q@Fg>VIk3aD7MrLuVKop30)0FjmI>|?CVRH^s+!rh86h1PffZN11si{S(onEJ? zV#ZuK4J7d~9=n@KT?~;ra5e2pT`1Qstt~854Qo;nM<~)omWbgC8Y})eqQ*|^NuztE z*CZ;zeGLfr50(KI zaDuNg4@}7k;tlbE=L$o>np81qCx@;XE{TTeB!z(99od^4O5=b)VBlBW_o=kAGWzWcso*lt$Ev1ixy5|IzCi`$xuYym>(*)--#>lwRmrs8}Zu7qLXXw3;< zm>RQMaf(6Q%+w4NvzeJcv2Z>nbox)uFOE;+_)`3iMJOU^`sg(ph`Mr>B5n7g)2=5C zBpIvAYdvZ>Gzya44j_-TNYR}6#`IpQI>w|aO+vl6h;&2DM*Ry%j0Ym{;9zun5S$}$r=$fngpMuh!`gDSEJa|(*wqyr^vVemtFd;fk!GxzP^{V{&hZ#M!GO|ex~s*roU z2d5tza6l^YNcI6531Sx)3wSywW%RK=`AN|NE+_#Ke zx{8mucm*aHP8{D1&pchMPH*p#gc zE3+Dgvft75dd9=?`0Ga-sb?H% z_?6ZT&tscajoegyX35Q4GWsYIjt)fAp0t8IF|CEIq)moq-~w5V{)E}QhIqMSo%Jiv zy6E=c7aI9|kxK{mX7?K*d)za`J>!L2l95}r)0m*%MmNi`fx?p;O>P5`()WwDL0XOh zh|h~sR`DVNEyp6+cq^RFQVafjM~dZKqz{j7=3+S7$(f2>)&loW;Mk3s#1w2!6o;-7lyn0iZFt9(OAj^ z!g*sZ@H`$CX46bFU@$_UHB$~#fY*`+q zT2JGeDjj2bz3Kx7{Eqn|=LLTooVZlHh!HP)+hLuzw%^c&;|{7v7_Ek=9}Ls7wdG2) zW?Nu4NjHo|j`#yPyTK|H^&`3m0m>?PC0=DIj8!nc1|OtrRbmSP%lq|3bP;wC7J?ut50h(16NisAw)kvTbPYL178DDfM_Q73I z`JnSV0`NzD_=s61#TKj~Jl8C4(AFGX;NKQJjG{``(#kdSPiiq#s`1k47P6k>qY%p@ zN<8p5SbIWI@(QKODwXsE?fNO(H*eSDiN%ivVdu39^Ce&d3?D1|5_C|@ii#+U=SpMs z-NFSyl?{XObk4tO2?oIT;19qa9C$$%h>36HYquM{EVBhvbvj-gWX|VxC<;S8SZ^T|kot_2Q=dEFO4wds`ov7(l6ZPOfB!L^=f(UO&*Qc5;O3oJ5 zmKngpJdtSvjTg<$sqU2xF@X#3*IE-V;|04(zMIyI-TC|~(l>W50a z$nS-BGVhGDm$t8^+0R>L2Mqpv>856UqHhQ9umv6;+5G?+yve0T!4Cg$Lqp3pNL ztr%yptM2X{(B=a5dD)cFA6FV#6Gl<-n|L+`XsS;&0zw7sw0pj~X>A5(fPtbp;8|-C z{b&%%O>+-=0+S*FyzziwKp(@D!%`L|H!tt5xD|kzgX4v-OORa2-HdK3FP*LaY;NDA z@j%<#2jrU3Gab$7qpx|xN#&w5(-gG<&(#2wE=`C!&@1yfq~I`q=(aeQ&33@|N*9Q?s+L zUA8`C`7%l`>ptclt+^K4Z`w}wf#tEmF;Q!xsJDeF9wT>9wlVj4w_qlc9TF;U9+Cux zpO(UY#`;=vr_At*xh#&0sm*=~KP^r2VBai1ZBX2BzcA@DSPl8wLlCdKe0P4<+1DJg zc2Mxb`^}Kf6gqgX3DX!3(q1F8Jx8M<@#{yKfp$? z1@R#79}iM%&H>_rI{E0as(QVDr7Y)36nX zgQvbmJ@UA%Hl}RM_R>GFA|K=8b#;+p5$j;c#wP{AOS?-$iv%`}&0;eeO8!(B0$ntnV}F$|l0wBDXQT4i97orn^R9~aX76zH60LWDS-L<=oXV>mjxm(RU)^hs z(<1_{?p+6p4zE?eGarQK`BN=h-5NTz@C?^-Sp&7C_~L%@%078udQU#BZvHeQRnZOU zme%&bv(l&76JlmMTtoQCtC97Zq$FkrmN)GaN;Do=*Wr%(D@uFeYe9@~kZ+8;X`{h1 zCNN;-(rwPpmhX1I^<)U~co55gGc)cUm+`JB@b&|SV`T?$h=iyBPFpIXAZ~*z%UHmg zOiMB(Afp%q5|QM}pwr@USX^RVgK6+BghhqtM08e3-v9L;Fd|wt5kGS|M6GEo_%`h& z9^Sbo9@~Ds7(b{~>zMfQEs!DfW6oJ-W*DI=-F*MJSP(~JQSAaMs?B<7if4hjj-|xe z$-?KcS8^PT=?sc9ooO6WzhXJssaociM@VOosB*i}>T-zDZ_NIgTGD&5^ph<-`GwaQ z0YLemi{Q!m^x}cQ!OZ%ofC6{0sf63A7wmc<+d1yf(XC3!1&6!A6;#+3+i0r6A0 zjmYrZ0z@+G-9X;3BJjy4h{pqFiHQ0OAO{orO}s-NdDZ_;Z`0CJ^HqQ@UAq`at}r~t z9rY*va*fzMp%@RAaDaWv@FqmL+m=x5!P(Z|Z`+d){3hvx|7xmr5~m@-!-8X8mkP)q z^lCVtNcovMU}mzn9xCT_hSaJ#iXr4GrA@1q$%bzW;h9H%59o}56X9vpv5Msi>=zne zEpg2jNu?aw2$kQ-`nMae>!OQAiy*9JoxOaR)yhpZkhTVU!A$G$kHV?yfGD#e( zpRU5BS{w~M4@U<#yiFP_VWeq%ITEvJ=+WcO(=^wfkttc1t5N1z%%81I7I;If)=hME z#WFB-$KP|;UrSzh)yvc$fjH2<4E;+dvHK?zdpUyYBkjD&aSr=cd+SBNNfTv&zSK*K zJN?Nl6Bt%4!iy~;2#&Oz9bDlzZb>euCxK=0tO$p=;cAJj0P^vX8Pb}2M@my0gA4nI zE2vH$lIF9@27Y&IQX{eT!<%S(U6j+uSz=Go=y@}q7SS?W|4aQ$anbWuT%Zb|O8oqG ztoP~)U#3)ou_(%*W|hlUR}A!RhS0U8siyH{KOa%b#hev8V_{ew);@!tF@U?B2dB~F zYL&+O$=bf}`|;_gsO7X3@q0<8_^Jh^OLLlQa%hPEt*PtW@$Z;YPB7#1c7l-Ehezr( z>Gqx8t*|D@Y0U+t%2>52HGjx-1{|LyV9I*UX+1=G<;qRIt=ijsOF(F9W8O#kUOu(t z%INsAUSD%{4e~we%2c2IOv-upogKFKPu(wBTauok!}pSXEU%%5w^)Z6On4uqq%>iX zGg_u!Wc|0K#?PpKA=2z`e-wd$W$@1+Hi$MDdIDvGL+G`HTHv1pC-6_UyTJkQd;$nA zOrSnbYuLS<45FsxES}L_yy`)GEyruKvF2Q^i=JW?N!4T&(4t*SG_F9^XxGnaf&Wq` zdQ)ezf`|g^yfoYJIi7mvWrxsf25sfYP&M}HDectyC)pB;u^dz3N1(+` zQ$WKtkET6!)rSD2$?r`DyA44NnhEOebmCsF=kQspIUZ6zau^~vYlnutf}HMu?GqBJ zMLp6+T~}kN)(68%ZQq87jg@0Vt04o=0I+9pWe~vw&gTL8ND4?pK~|)78X%7!pkXgz zfT0IOHYWuR-v6XW{|mqVA5=Cj9?l~QN_?uvtj~lM38)Qd9B3aP*iRmi*|>PPf6Ppj znpWP+k;s1~dGd$~bYDieeCJ)j|JZ_gJE2_w+xNFrP*dTk4%EW`?dnzE{OOxv=&kB= zcJfw=!J(VYNsgSNVfF&=|8T0y6Yw3TF%c_HQL&V&?prxvOvKhg|6mLuiMH~)Cq_*a zuD|{jX0TFxOoyN$CWn@WCOE4|nS>59jgI=faWneSg{cDYCgP zZ?^VN2ZlQ-ORl$WtrigkAF1nJl?~l|B6k}84`=(sbN6>>`X#$!+z(VFvhG{KzXtAE zefR$9P6ge6C$FOMowVur!?1L+wP)J+u>gPPv(%35PUHE1I2pp;?n)98tuh?Q(E-l? zaP*?DkLO8M$UBnHnNqiZ+iOVuv+Db~~C zHK|4OTX5O4?KW0k{Am9ZeA-zFF8K6riqdKS!^an;+1p#Ybr<;mSos>-EZ8%6@Ox4w zciPQTP;UN%)jb!x_W4J`JnAmE`vIeianpzg9E<;owsY_WU}%HYy;N-P(DI z^S;`GT{Uxzs3+b&Hzaw_9nrAk34c;tuvAzU2H5 zFMlzqfv&I4TLaq)!it0X?v(ai(xfZ;dW@%FDTPy|#DFV9B5{^_mqtsL zgP%f{D|fgue8EhA(@V-jkL55`TmtK;#l@>5v3__1`yjQz@bAu- z*VW?f;i)1zAD&lI(z(cO`#ooprA^-axoNC6QrlXTZx!+RT4LahOu-&V0gE6kDpt(b z{U)G0(@hLBwg%flRKf^tiWk|9N8RYuyGv{cQ?9d04E)9E^!eerA;$Bj4(AI@G$qvMX{tN4_7|1%BSKE@(bKWyl|9_(BoybgsczlC+f&FswY*GGw5ltd; z?_F%bM9ZhXFXRyCx|Z3s?qu1c|@O|2bD`%5GAa`)YtH+M~k^Ug&a z>+mg2$3yqK|0a(HrlIfiUQ+AJt}mab>4pA=zUl1ZlMuYbaB9ua0G8{6*9^=gfmVOz zChKB8c^%d~#i7ab1MwN0^K2DcjP+(*HHD_wN%wNL-(JINd^%op_YGft9XZ&jN5C+X zJ^$O3|8H`4ZNB!)>IkY2Bj??LtBafkp9F}9d_HMTa@`!^!vzVQ!k0}uFBKGd^Ho+q z2R+(>&v!LFS@pPMWC%PeN&6eUD81z5HOQKX1(H`VHp7Wp`i6oq7jpvms}-Z|t?%db z`_BCgkpoQHm#q0^A9Sg!!AdGBHLagLLc+aJYc)-CP4iD?PR#VODL zS)~bF`w$%89;J%rRh1nw;_ip|9LLTta2fcQkYCB(DKgbrNw(^B6dqH*8>r!`o8g*7#3i#p;Vi4gp zX|K_?qCCyt&`7Ztg4MyME?6cSQ|hdH65aS&N`Ba>N}T?x+qOgf4`4n$f&+-rKVQ<~>Sc`roOp6; z&dEM8$yX$)o}Sk)iMouhd9^=N+mi6{d=<0u4+eA}GX|xt!D?>e%Q3YCH?Z^fM}J9M zRr9h|wfv8}Plj*&Ql)TB(G^9Z?}uJhTq~M))B-w== z*Bij#I-X5D?g3q2kUvvk{MKb6^hvWhO_p^Kpeo= z{{T#e#BdeGQmM}B!sd%tM!#1K{{iF(Mstob5(+B)8$Q!`z1t-@%gi8o`=ZdWh^jH7 z08+IDwi+HHHQ3uQPVLHApj)u;i_ZD!isR!RfY;PfxuAoPqxpjiZ_uPppf+Ugs=Dwy zX^*uX=A+XNd!>MDFTZx1dE7ZOeCAxH!)-`0zbB-kOMb!39h@sA=zD242Gm<<>sgj~ zVlyVu>c*rZa&2=L-b-o~{etix031m0Ah8a<{&4;w*`wfT%;5g4f|X+D`uB*usEofV zHPQj=f58o^Pn37 z9k%nKfu|38-*!Szm~tJrLfwm9B5%bHF?^Y@BJfTTW3&`85sdk=@4BP1zIcZ{YjXqc z4P&wN3m-qcIluFK>YO+mUoEj)T3zzzm&{{AS5v+dmR-J{`!0f`Fh!p`Lm5)uMxRf9 zjLuaV06ZM{p4lz|NAhS2pDqn(V8(<+-Q%9f!Ui{-f(EA3U~THe{K$O3{?`}Uk|JsK zcU92^bgGu!el>170x6q5?Z4;aa2HAh*vkQ>M?bynM^<;4Hr}t6zK8m!sE%fR3A(Wx7Ofe z#x!b9V_S~oydY`LZ@y(AvTw)7tr7|drXozrO?Xw!kw`V}jb%&*k@H-6+~&E~-ZwwiJ1X4TBeRE0)KKWiW{)dUTpo- zbGzj|Wm{ud0pdoGm)d#By}OHWzT{YnUFFK))B_@nsM{{!K|UsS zpKTC6u#YnS#d|HD6d7Y*Lk)pil@3-IhJT)&QLi1Hh$0uf9n%*?a>pdL|s(Xo$ol6v{SHM+ouN7*j5t7d! zru{hB^gTdO(651Cyld_SbHWseH;(IGbidfrJmRX+L~_jaN67l0k*Y*fB#J#6n-%BC zHo-~c{0Ey%mFNVI*rtMr_fveZ6wD#}3LChgnwS1Nry^SBirp_p_Wc$|L`eZxsH|egzWuuZETs7SEs7Fl0@o?B{=-}yQuHMK&xPj? zX65POSYi?{8BtfYJ4bj6%Xtamz z&{A#gH^1k6eifpcLq6X13cr8xm-`8QV?TKzFsjyK2+NsSTs624F1cn(eQdI2qS>DicEmHSw)AHm=@cMN%J?4bS&9a$=8$n zto4A6w^svc$Xrby`C4O2V7&stti$o38Nt(&or}G}7)-Ui$m3ZDCq|$AuM7)gm3xWS z2SXD}2*+4Tx_`KEBjgu)>3om#(&b`2#_nHNkG zXByjz z=|88-XYK+QPvG+=ZdqX6Y)k zrdI)Pu*Jf~sqF8w4u~~liyYt#P7YVhzn{ohH(MF`sDwn(Z&N2I=yxA6_5b{p>EPi7 zMUL+Jp30<_szwVwggtGz-8wBu~7*b|p zgQw2Uem-5_jz^?|!%nFhhh?m7Bc`WQJ9iNBF3pO+_w5Q(ce)Q)6^&6R`w(j-LCE^% zW%Gt@Pk+!@Q$7O7I-r^;iW}uAIJzmwE`Kxn~_r*Bx1=Q?_jEvs-`g`*-D^QD)2XmW}nI=a50v_&COC0|k7F z@BY*J@&&U?R?SYPcN8&+P+`2qnd}CU&affO&Bpjm&-<4_F5{I&dTj9`+av{*4=BtB zMbzR_e0cX-etfG0o5vKP(nrm!fIBZLE7+Y}TCa!8Ygpwvz2bgBkuo)sj+2fWj8g!C z6iC~-`j_|^f4OPVKWeiI*;K%FXY~2@m?>+EAtZ{}Pv(ul1-;CN7mr-ng>yND&Saww z{sGh@5UdgW{u5uCW~u$$4WX!hT*F!*YtHH{#=;Hu(PF!4TcEnSEH2O0=!MYWco62} z5I|`HTQB?7)hZmBpqIb9t)V23MKcIEIPeL(qlQ}(7skx*GeFnXkPY46uS*qhm;01K zlpg-FePwyGbI^DiZg{GKcmwVX&2Z;WvQAJHcQib6;ft}Chg*Wg*+6tWsPsR77vxX& zr@P5?HI@Q7-vry;6}#sdxCOEepO5t3(nvR}57o!8lnxwh=tK;mCsaM{?%AqP^a-g3 z)gT2icHp;VH*9x13-$RgjkSj6U&VXcFS6_uXgJl8>Z}N|eu9u3C#UZW3~>U@t#hZ6 zwc=$$OZWX76)~g07%}B37MIw4kODZO{n0npdv;?e52#wh1kM`4dtA^AneY6qTkT5s zfoAS|Aj#8x)BppvF}O&-UI`n>dfNK=$&-s(OdTJG#lD8~OI_#iBIK32mc4yS^F0nR zt~x74Qu{qSXLX5@9uGVVzR-Zk@Y&8=8$PQ9|{0~m2p-N)~9 z0*uDLT6{mu(6%8~%HB~dFdaCE^%RuF3|+4u1lw@2k<&>8&FjqT6oHNhKhD0}rgC?CqD3Kg{;Kt@!O zFoZ>tKm?^7TW6S#Go|QEg)8yrN0Wax6y0R*d+DFn1^fo#5dpYef%s*u()YGFCVI)w zD*h;@T07Zd6|zcAjaYU|rG3x~O%9D_RfvM3AU1snY#>`JM7Xiu^Q*-zgQS4H(VEI? z+~=QRSYFk^r$lBm9zUM~5rTag!9Z3h7=T|Mms|Bz{jg2}tGY;EpX1##IC*(YKoP$D ztb||;xg^f0(I!O{rH-=TrUDi&NC~?d==ps(r2NFc;jC#7KD}s3JuA89d-^iZ4g(!p zI9arBk{H`1D8DJ-QV8}SWSUowYmr25CQ9a%#jTvRT(r^@)-fcJhfQypyYxRBS_?8| zy?u*Wi6j`={`f02nB1kN?xV)N*=1C2B&91b4EiQKnqFd~BIj~lRh$7EvJHE!fU5%_ z!WEP^zkuA3@cIef!W7FKbRB%?3G1yB#gmh~CI^zyuosVqca0Qs;(1Y7VG0ow2!@ug z^LKe!EA}&$eKwE-FMLMNMztv&-W{Bc@SRzQLG`~N6#y&o!qmi3SZCIkk#Gd8*0Unn zq?WLO#>4t@vac^&a^ZQY#O~Z~rM2%P$0ia6P2|9)y|wkOig3F^wli%hMS0OJKAAFc4XZz1eGdW#PcyKSr*rGNVeIa@p@G%H zRQ1Zd>*djeEu+7&DJmD=A!=kcC?{1ZjokWg#Jf+-#G-R>l^CNpi;binY z&f^?PV&VMzZ!{6$AyaL*M~C?cCNZ0Fi)E1|)?`M&KYlj_Y~3@?pQa*_M%{cvxQn4JLolIzj2w{4l&A{-bCkc>nwoij0RsG5$b+iL3IT=_v1#|Rl5U>?_Z=WJCobc;XJbDw5a32^+x)uP2L; zepz68BkPsBf}vR;Vz?$IdP!-fjj0&Mii`$WPpW-fqT-OCzz^PUc&@H@O3yxts(YL~zWsZ;pN5~=H`nH2 z?UWA#-V~9V$_VUK1$+_KcyZtuT-c{rgi5H}#$1A&JaA=mvf)Zd zJ;Aibc4zh&#VTe-K7V=w&aZgtpT#sh)hK?N<8)uRFY;ZTuG!i=Ql{S)QD^VR#m43; zwEc`rF!{rj7d`&z6nlA5nR~XatYmTkpUjd3h=qztuym`213A?aY0NBfaWqBPl1Zka zIFYCvJ;#V3Q@Owj{aOQud0`U>%(L;Jj=?wZYX4YBsd8}(joxD zU&us7XQLp#JQum*CY|}^1Y$pQZXtbff}hQu31jK|a*}DuBLNV+xTNP`B?e4AN|Pqx zhKHV{*igm`Ce1YP#8u#;?a)S^BMnPeAm(W7r-Uh)t49hqXuj#R(vHAyHdb^q#hA{= zd7HjaYg_s}hCpjozv>eaUNpamHI?Z&nhfkJ1CnQb<2bggL?R(SD6iwDTsY~UkYCdN0LXr7)v7gK4arEcMZK5fv=_Q9zjea$vxtz7f5 z;W^2?h*}XS2?xPG@zw?ab7mhP0Re>;DwC7@2KxZYI=_^8tDECZFPJgTCP)F|Y?mP> z`t7eM3LpbgM(|WrSMcL1iLYtoVK|jS^1872ftvX565`W+eQtiORBA3OM}0vN0!c8 zjmzS3LaA{nIBW*}#Yn3kZSUv6?nXaFrLV8-oJ3w|4++C35i&^|_+scqs3`q}itQ#T z-gF-rMir7HkZe6M;buCO3=#hOjm1@}Pj_knTUf39jrwqTcgsN-c_oy@W|~d68aR|K z&C997s*5!|=HJjEcKRz0KZ~Lo*x=wps+v(<4FZzP>gwOa1@osA-ySAsaAr>*1as<@S{vanOZ&oFzfH1#EpJ46 z*Q*eM=XgaM>m7p15C8ziTfljmNCSc>(9g0(;`!~i=dMF}DCp=+)LC!bq=s8WPitJ= zRtFM(c*LETXBF*TQdQ(&PTPU;4igvGyw@8$cLvEae~6Q@7c!G$+>RT+&9ZA!Bc}(_d3gD7^~h2qkn9SyI1I-Z{%d{} z>*8V~AIpI;A`a|yu0do1ycq=C`?eC+0piDXVPW(OmSuKi?R1kb5|g(-StSXXxEr+u zeX`GkI`@}o<=^(gQR6nWiBG|E&?FGgd2FwPJ-1;7PJM`1eWYGJ>*bPXq5Uq#hQesc zq39NdDGNR=9Bi)pHzZS3rZ=y!==aZ2GXf*$NTQax{_Vi*eZS+*MeU%k!r+){nTEfi zDT|NvvQ*Gh_@Wx-*!7A=92~G(0)ntET6mRdq$nZGtnqQj!0=}^#_Z5MTo!q~*%o)w z?$e*^e*6E?kWMO6m*8yF>n1CK5e%&?8o)~vXV~Ribx=ev>G;F)wKIy|PRpSuk(A*1Y}ML7EhYF_L?hr?<)<^+L?u zV|g94=ldRcK!-fcs!H&WTTqc;1VfwcvuUg&n~0-kcWICKjD@dTR)Y>4Gqyu5?q)dt z8y=@28yG(Nu9v6%f^_d*nplm~_aC4$NaYgG?=0|>Yx{O*ZRJ$|(}BCZyKH5rkm^0N z+e4K0velHaA^pTQl*(N78MF#owA^P*#C2s>4<%@bcF&Nu9V+4i^dIbccgRY8m9z+U zms;khx$Au#zSczv@2FpPKmL7KXO*%=I9wMeF!?V5$vg?(-f4D@o9`R*Q z(I(UxdTSYw`sCS;Pw1N1Hulp1dU75+7n7XgHY`0RC6rjuE3<&@T*x>*8If|uSjr<9 z4RN?t&fZ_`a`f%*`Ig#lzrsj7jiHiZOe(@R&>cNF^-sS5JBV*8<+#r2*kezX`K1T! z!JJ01zy`hS)0)GXer{r9L<~SB!Hp;RNJ+eNO8z&@q9Aa6^=mPs2sWPU-_Yj)Nv{qU z>i!EVXrH$Js&TUj{)t&;p&UsPC9#-zQOeHiALrLqBd$DJ?V0C+S3YZm{TalME~=n@ zGCxRnJ@0q0`RN(wV!&Sw9bf8o3}*lf#|6`r;qT4Gpa6j?$S_@Ne=Uu6yE3Ttp3#2b?pa<4G-P!OGwUwskKPWT5fOu-Em@S>i`CwG5=j6Z9zL&nK~)sj2($K3+ym;d5&wQB{vK>Luf z90j(MXGl>sNC{c}18|3AMMl4LIAn=U-_`=X{L%a}wb40|iz^f+9{9{#kg9$n-955( zRrM?{`eIscu`v~spcF0CU~`>@Gk%%e|65LO$oxvV7DP|V$@K}=177e;8#b8L)ZVwd`bFIp1sTDX4de7mxl~9kEnzJ(o%KY=Esy9&_!gYn z7V%ZC6qDy-gUDhcBPOnBYfDIEZA`8IO1Ev$Tx+bm(QYEIorpqUA*c#$?1f(YUhY~X z;%wP4V|(XTQRGHFbiBf6?Zj)(`?1{E#_M2y8VThc^qm-+dBuT@IrmODyXFu={@aE(Y>A?1?(q}ydx<&QFyq0#F;?)ASqCZ2N zTmdRMPFYlPqr>LOHH_s9{+gH+EsTslf1mqd^Ha+}V=Ue+keF&o{Cs02+&{3iN{8lc zWBZ%wjaZ*EBk5}wcNNNI)i_J?FRA_xE(4&4l&?#{_G7XpW-`}UG#F=v*|`# zC#IwwK-Q|zENLklW?LT(pgT^jsUJ7*XrrC62tvZ-GxTv;I;IGGm9bZ%LFZ}~#P)S5D)6?>>kJWh0n4u9J@)nN9h2d0PRLn;v z61H3{&v*U-ri(_p^M7oG7GgJ|*lir0h`FG#Te&CLzaa&E)y5Bdh-uC`?XzwPMi?Lv zuKhraPcHu~2UnI#4f!1mL7p`c>hG!H(HSXM51NpOe2W&G^wBfy(3MZfko4=iJrcyM zX2(m+kj2w3I?;cg!bXsp6a%N9+WgQIKs^_>@Y3TjBWd3q0PL9_aU`6C)^%WDle;8M zzA##E|GhH^<*8ppr-+T@WSt)+tXG6(sr=E0qQag&^2_gE^y0WfxCG|wNgsZ5=^TXk1qLip{~K5)Ut!fyY^}6b&MBW5iNucXkvvU-JQ$ zgjD@1cqk1@=HSLg>j zGQvI}ZwjTON374cq-SjJS-)Q-w!a{a<&lED$w$;JnzK*NosdY%;3eIyR}pqZ(LWtM z|C{sko}<|q=wBNIqG-@5uGKZssZp1i))|ir-~|mvdvEozdV7Yl{-w-Cm-E7* zpaat9qX)^-p{`=CO&g>bLmTjI@91{@Ed35+}xR0TYIK^ z1mhFj$Un~wn7pZPvGdO2=ro&Qi`6$C>~Flcr%=Lt9U3>PbHCZoWm*=Y!q=?sy_R_9Gj)%5}$CJM(Et!waAYArJtoDalsQ zeRl28%O*V&+}6q`Osm(DmTqLuh$K0=)gE+d9Nqu>A^m$@o=ID7C^)p7oUn&MDHAnj zURC(ggeE(lGHd=}Gbew@rlr88E)+d+r`oU0zF!;1%WI%ATkBwTZUz4Q*r4+04Mzl) zJdF-^3w~TTcK4I5+kuQpffi5W~*CbO24+VX%p#i%xAjlBYfExeE=O^z=<^%8r;m?FU**!kuFitMbmQ*yeSu};J}q|D6+c#Y=j6rdB;lTgalx9lR3<@w30lQi69{6qQK?v zsznBy=|j95Uq{HLceCQ>j|rt^UH29Bm(QZT^UWpJIIAarzzS6!d6zUa z22>I}K3BLH$%&qCqEzH_Lm%k%(dzBrVQPwujf{8|^poRg^h%vOx8VH7ntsn?#S^>aj8Tth-UY%dYi#^m(kKNM z9gz;xJ_!E|xa`ckKcz-< zuJY4Pv(Wccg&}5%ZjCJ69Qc&Ph8Bo;Y?gp~3?;OMygt@v_4W0Qf);clEm8sUd#}zS zxxi3?h>oSsU+c@cwJy$9rT+l?=F_*C%Bh++^RE8DLKUR;Bc)2#D(czS-EVjT>zHB` z9Nrh}7#Tg1oVhP~Py3t7AsD@noUW}gwu89BdOm9L<~csw z*x15>LB{iU{p`7J40ue!#r4TBgHMNV8zTy#$Ps&D4I?lKW$qXEsPXZYzgF!ZGCFr!8C3EYEQo0mQfLBxT)}G zx|y!VB%8V(SklBq>rJIOWX5Lkv*u4#<;U{GGO`q0AkrVmEI4$b%)-RVqcEvIypS>{ zuJ0-zWLMjpiTQ`)tZ<>cb%vu$3f80;BifQ&JxHBS+R(A5nD$(UwbsS9*3Xg6-nnc6 zyi?y8q5D#Ml36E2d*KDNqVn;rPMsNaens3uo?3Ql;3B@?{v8_9m(laH4R42Hw25Ni zm{7-+Sl(F5!NkgSEjg3Aamz2VYKMkenyXN%^T~D6MPo$_}}Kv zTE9)Czl9AT$H`obryPQTbJQqYZZ@G)tOqJspR-SC8z`mJ`&{@F_`3%7xZ?Q1hGCmP zFW&RYVX3c4gWOm?&98`%eqekgoskK%@%tI6CR-$W&%&xH1uDG>ri=BJQ2jZY-p_lT1M zDvDH8II^)LiNMXMEyeGLsK-{Q34x`>l1JecD#<)w@eLCdB{iVXuU~Wmt0nkRqK_)u z$W-@}{sFpr#$EWJq*=9YK}uAB_nio*me#FG`ezjbJ~w(h_s3d@)|^@~+*#ck!bMC1 z{e-M8@v(MhYLiHv8+cl#Qfjxlnuwt!x$1CV+~Rqsin`21jh?5m(bNm-=`Q(?Dw_K4@6}1AKf%Mie3h#MAaz+Gze9ar{-xt1$bWe73%K z`KVk*$#{+q(mN`f#*$Ve?1{S{5?z1nP-Zb%x`q!@6ksfbWJ{INcFMS0I3ck`(C-yp z@O=Ex;m?kt)Qi_PA3o}OQsE?Ty-G(bVVqE333#{F?TlDrRY2y9pX;lZ9Ak5rp7_}v z`6)HYT`u#4&8?#D#z0Pg*rf$h-y`lX0TP9 zxeG6L$I26ckyg$Ui3(NDD0ch?NVdI%n)A>>py^|`Qcx+ z)#q-3#z;=KV`Lc3jBOHs&4&A`x=EhBa3YcmVTihw_7^J^5f4ew-17N|U((|0WS4V6 z)h$=?QoVV_b*)XKQ`kXVrbD#db4#l{76$#T(7$B8j4eXOPRo7m3#vI2N-Xk@)*-C{hHek&(d9cyLDCw zioml%u~}#$iZmW++_~2$i!TGYr_y0xr9U}0ZYkyu9^EGSrOO=dH17FTxC+;(TM+?F z@__gijya^N(mBspnP`u=){+nXJjT{KB{h@uYsXj4sSiM%Ixf&BhKDo2an6XiNR2vr zhklYSHP_`}v1;(oo(CNROuG3Z>6nX-lkLhC@2=*pNuGwHfmCX386lC^$7P2uMX{Rl zmCQQbjUV;f-Moj%;!An`#HjbRyo591gJ{WWgP~bGzqx0Vjq^W2w7dr(zQujHsKzw2 z+LB)W1@I3?7auLsxTC!?8kr^YpWPB087M#NI-<`tT8czNrkOg}Co`pX^B>5pwMj>zwvHZC)peE>M~R5ePp1y zG3sUMKDA7x57P&xOSZ%qM0jVZ8HzsIT^I-6D382(IJ%bZDg_kLUP- zvt~ruvXsVBvZVKTCR6BKQ3as<(>LFToy`&~i1Un$HWEx*jn_N36uB~{L>u3kd6+$0&(sCKh5CiP zPe6}FF+ZPDVd`?;^3j@KtFZhIMxuT+_QYkwr>WEP9z@|)^K-pUevcr=MrHGkneIfU zJ>P|{85D@Z+&9#~jV%HztCcPk4#(YVH9rk1YW|cKp-ju$W=PBCV%FwF1 zvjr`-z`8%t(wXNK#yWT&d|pTU)l%5S%?(!_uV}aNHzGZ%@cdjynUeQ;KbIq~r0a zxJ=xF1g5M~Wk0t!-3EQG=Me8RIcGfWxf~ME@;@sDubR`g?K8rRMeZt&yo!fj7B5T8 zfID0Gi(40aPKieKR2&pc5`G%1TqiqULiQ8V!=jEhn@?Zln{v|C! z#PrjXou1aSjYde)OZDe{!!K>y%=J`0l{fTQmx55-RcD>3Ao9$Lp_pI2=HiMYmZ zB&Hm~hrv}<9&jVFs^qGl>C_AH3IIY~E+Z#;-|jE+AUA+f(ODdmW};L2of{AhJ>2qb#PP%ItAl0!c5{6sdOP#fo|-Kr z=_~f=Hk2iz&Beh1-9~h9Fq>fF0PH>hfO|%u$G|ry%N}M>=LEAZpk%2U_m(ZyjxkG8 zPmUgSGM|>7_7eyZ9{bz6k03V^qQH!Nd5qd^6IbogW?T)F?iBBqBm`xmR{0?fcM!0(b{TJ6LcK1=upamj58KZ z8D4L6lk#kg_FWN2Kie&1w9~||CA2F8NlS8?)XB2@!K?h@j%i&)F04zwo8T9A zBU|V9{;Uy7GN{O(^-6*(_vz|e*!il?0EL1?^q-@v<=$nd%N* zQ|$vMv+#-$>ejy^Xqi_@5tg=;9yrSleJr4#a%U~@0uh**poGeVN)x|8%Vy5Y1U|w6 zCXmu8sQl>Ju^7Tdxhh!3-R{i3PEorm8+xWUesKSuS7beNl0A3nh2Hq9w61TSlUX_2 z1um7ngd;JJKdqKyadOownZ9A>@{XkJOs|^xu*yswsjylx2|#6~N7v*9ozJLdRxZ{w$FebP|DG9VE%2Kwskz_CxbrY^2T+@dyI|VwauK)NodB ztiAoY*QjpX_X0eJ$6H{ccq3jZg*yToqwUP8Ev2GYs4dI4bLa*jt&bT`ofK!qf;3Id z@1>A@3Y({YmRWd8#$5?}xiwvZ^k(V9iS00&u;SBYHp$^VT2Lv%NEL}i@4e@$n5GbW z@RE%26PVz)7Fcju_htN@oQ^S`JQW&I>$2si*luE;R5ht0!%gHWtpRR<#Tz48mz=T) zzvKk4g%v64N?BawV}1=U(tr4BJ2|)k*JN9AAlzbdf{ecvo zQbrC%^;u|Q@h*E5-?p01&%LvPj^n|3@JA&-J+_(1(64v9>N)(QzZk}TN_a@IYr^v4 z&6ne!Py$+le*i>7*pnjkXEXv_CD2TPg7NJMST6iZ4X=Vi= zy4&zx9|y`HCk>jTO%giOMho%W_)!i-{iFnqoe35umKjsiojQZNk4Xjtm6;k7q{c-= zxbN6g&;jUx{^$Py)QM;Md0RBE>7GR}&I*2skiUKw!nUpG(m$FLhaSPjw5^Q4EJa#Z z*O*U0`FVi1K`Z0AB23WI^6jmR7sTAcWlIWo&+LueQa{VuIQ1iPPy<`9T0}rwyZi>W zY`L|x>J~~Whv;s;LiaqH83$LBr*&w+KfoggH1d%pmH7ahP0Q6Y;cO|PEafMH?BgzO+O1t9A#K^z!z$Dfb;^iSe z7H*Vlmk2-V<|}Yd89z0`(W~@M=CQ=%AL6?oezc@L{ecN$ae)4}{q_HL!TzriHWq+_ zO$el*Ys2ajDVSE&@c*l@jen4YcwilsWs+x zB)1h94V1=4Lp+cPAdQ72FS-!Vd&`v&(%WBMzw%Z#esvTvbTN(kKesGAqThwozt3=P ztCkffkEH2^jt+lgS3IG5nrg=2J8lPhW>{dym-MdHa^;&Wd1$0E=R1aVuj{o$4jVRX z;f)5B_p&r0cAm8LcODp8S-gM&Sru~L%lwz&p%LP7I(+(;1fSxUI@gV(_5>&y@R z!VPVio>%a&u_+s=m%?em>yjyRxuq-3KKDwd%z_ z`P{X{QEN|N_oC9`boZ9=FD8x`U%>n+E7m3i`nZepZ79v{O4|0i(p}q&$Zz>~521|L z8y9kt=dt~#{bx@74{AFHpMA;D5ZQ)Np7%Dp!55HW_ac^dx~8{3yneNZrd=>JhEZDI z(U4%udvY^3Wkm&QdWuxnT~`D?yy#P2bwcA>T`9-@W7rP@g3HW#e$ObiXo_oY7xe(MUr0S&Z?~j8puEEXZl4+g+yq z+Wc|r>+mO9a#j)1Q5m65e>dK}?pxfrCHT9~VN)P5!r?^>WK|k^_nw-pzjo+EiPqy= zL?YoLMhO9TQ42f4N_Nq=oQb_;8zH_Yug@55o^G58C(-Ruw z&0m{We|8~X;z`D2t=>JkI~`tm2zX!c^cP3+Y#4gXZvX9Pb2du4aM=gp zHLqTQ@{xC3VoziR{7nbDeg`jIggOl$o(*D}Qe|%HvVm6ojUO$^P5uKEJ!ld1?Mk?_ zVmiT*_pNocu_Ui63wY#bru3QX#Mjdu33W0kE}Idz_O)q={qop&6M8!A;u70;qk5!p z->7tdCFz9@Wop`y#rhWW*o?(2Xm^ghPvJa|V)SMymoWFoY_oidVSIwn&_6)q#m1YX zcW>=AzN=XaTaHexh+F@FXD;4YcD0?3X!SCO9%MHB`tdE}F2L^LThAT2FUwKeqc*R) za7@`^t7)~F!Z;P)}e?Q9UpgWUs!tFwS)8STW@9HGjqGeX1X}@F^;2~ z`cw1@;aO|1Oxq;uyOl33#IdbWXP$0!?u{7ufV10iCdcuByCO+&F#tO{8!yN7VU@o{6HBc1QHYn* zimYsPQ*VG!Q-;-f!y?ltPB;MV&1pqPFxQ%jS?d1)ctcJ1skG;O5m!%>qTnoz1e~|w zxa`bNN@-PB%o}0;0ghsHJ;xVt-Aq~XcD|c$c{T;HY2n`n9`4@A;U8_7KGU9-rrehk z7#(R!DcNXe~xfK;t107EIXNPJBH}BE4 zc(_PQ4_e|gHz9?nZ) zb}+^ziPSsccJtu=&vO3&YzCj2Qr~&8TQquo&OG$~dnGN0mv_DP55U=Y7;+vvT#(^4 zAjhcp#4O+duSu3U`i6zBl*=Gv`4>dFl40GdEH<3^4wu|oIK001qTyu;8Lbk}j<`~# za5owGNCAuIipL2^=@S0^n?U#VqwBjU69%T3GmfuC6CA5zCoPB46ip126ZXe{!!zer zIN_8HyRrS~&Cw9)dIjv71T$GBn9*Fg_Y<8AyGWwEU;)yR*taJnR)WURj43^TN}21Ss|2W znp=d_EHi`LiIsa~hB@A!f50=&=kuKVIp;pt^}VJtA2m76_7SR3AcSe zc{ACyu%X%(yx5GzTJ6E}Km&xtNZWTqp*o>@W(72SRCU|#Nc8Cmd2p$a-bGMYF`|F3)W=~ z%E};}y8OH+jhDf;T2oXvy!GhyWYJ((8mo`d$AA%!O$~*&)m+Aj-;wk}y3U$S|Jube zQ-$|i!@Fn06CikR`K5-ph?t#S>26^7>TJoB?|%XrK2H&`j)}X;`nHO+1B^(TR(k)M zYG3nJM|bR&ert_!-bu#ml$0Em@_L9Ky>V+Vc}1HRnbbzsoWvSVD{Q857`ZWW4LEi6+`QE4C z&z86n->%wske&p^GjVm^FwtPZTyQqw_~jbQME-@vWzO zaaquHfNJaCXkoV@qAPtd&C@%)_?_36Ia`ogVPuO`+w-8r2&ZHcQbW-%`jxbP$maG= zcDr5D#jeI3J>}Aw#dY5D=uYcY?bK%b&xu!jalB~2iZ~Q)6kVVlxVw$FLz8f7qyYF< z%6(@nk3E~b_99_8b*N~rrF&+%dHGMjX{YJ8VJSR}{l@)0V^&+W#NdJveLy=&=84;c zueuTby^ig)fvsCm02k}day%_MXeU$T?UC^g){0G^`=Li&(47(f^Vg?1>#FGf*oPKN zg>}ULfNHGB#ykWo7137bu>#`lN~ah81>)&8+rrvTXayg&wSw}Prl}pg#d-^nR@1+2 z)~+(NnOLiIeb)}&xfXdKYK!CqQkmBL^?Vj6YrJZ;ySAyc5wcMm(@kxL9Zeg2Z_?zQ z&i)6p{deOte@1mG4IPt~B{phoD+HtRBpJ7C9~j<}BJ^s=@0de2$=AS>;CBH&H&=%; zL;vW>8Lg>X;KLj0{SpE-UTxZ+PYaDgJt*Xlm63LeZ_%Zgm zdu_7~Aow~m)UoSBqar=m@VM{}ZrSD;w(j z((#^~KH6`XB^jSRnleNnYmIT`xGI^;#hGBEhI+KONNdnXbbhM!jWGXULVzGKoXHR| zQwQ{QARAu4Sg!^BWlmLlLSY)6Gxru=JeWvd?HeI4S+h*LY^AbhHWFZsO3LMop~5&9 zRhd``qAB+NZ}Weeyw+>mOEh3S177;s5DgM&YjnJ_5PmcFHNL+R3EyIY z8*H{FN&RVKDeLty7y9q6Qzy1Gd?PQWJK`PpuvPmr#SuHPk?K=;r`cM7LHC-$Ce=9Y zKx2lOcA9j=?V-h#=zrkS|xK&wMCw7PR%fqf8A#b6PKsoQX>~cRy6=YJsKgv&4FJMMNNd^!gLM?a0pk z+$p?rHr7qlnJ+S~e+H=d58`9!j*V~#H6`!odY5m+RkdhD+~fD3Tul)C%`OR`05Z(G zN~@)@D3)FOVQaNK#H1_Q+Pu-#yXEL7SU_V+$#5Ly@NRfEZmF1Q%>0Dr#r>KEDz%0A zasGE4W^fq>-lO@iB}{C*PmM&QLB%b>YMJV4k2$IAp-f%a4)pjg^r)sFrOpnu%?XBe zDX1x7#`;ye+_>9dvgDXfsH@8Dwd+g`g2Yc0Bv-U9>?Q3k|91Du{F`8iG{>rD62cb*7HwY4JwMwiEpZ~eNzpV=ShGs}QvD-94{oDef-)R>B`tvC z?A-$imw4csF{|@91$tn42L8Dp?ZBGy8~;xL-u~y{JBb9Bm`^p8+B$CR5Qk)tb}ko- zj@@$6*fyKJv@P1Nxos;7M&+jVs}=}9YDB2FFZNCIeXE6jYrMRy(t!Wj3nK~9f65JGw}!H9ZTD4ok)=hu<-eY! zhjv}J?Q}BmXXk#J;ZMT*Y6S<2H&WGvhcaP9*{E!^o=szV-;YiZf6`-L9NLKY>Bka3 zZzYBo?Y$}LOR38OtVsK`hUW5eQo=oV@+KO;#(0jY^k~y1hq}TH)!2ItM?HqUMt>hl zxH!;SGr0?DOifH0XW;lrH^qg&%$l-{yFM%Z8+x?-qwYma;EL%dmJL15INMZD{*kVV z=?4&}cjP{~s*shDVz%9MtlSW9*p}u89zRRH{@f_Ov=h2Xh{k#^1wS-;SliyHv0L$I zCykk2k~~FNuKkg6;J$xphq+AaGN0@F{OboR5{Wmf-8BWHV!)U9Ak%M~<+bP+eYMk! z1A#2OQ>CCH^|C)w!UA*&B?fMSRzfe}w*mN%Fw3FG|=EkHdKR=8E% z>2HaZ&cbg{FLotm?M0_lquOzDNqmv{e*#?geaL(i=FoRXd4yl4$5)J2-JM~RYvwVR zKN+r$ge15F3l4VpPtU|i_AIq$F2$+XosCqhAa5>hC4%?dA@xpmCRQ>}X!Dl;Zm1@bzcFV%C3;t;uM7wGm!6aI$NHq(hWbuM&l@0`3NhCQL zeAqaR6hN}N4mcCrYF44WHz|!tv+^%R zmzRe+Rf^ag2~_7Tm%(f+%RR?R#s!75cqLq0|4^}hi!`@kW1f8HEz#7^-a#2A$<8O& z1~>&Zr@sxqKWmZzUE&dJin6b#?+sX3{yZJ@6bpYU@>xs1!@xedhAWLY;tRqhy}ZMf zZ4zZ(-B!|uzLTcohiciPr!NKA3~Bc-?rwu<>BJJe@l=+2O7?w(og`X3DZ+1xI!0kd zj@0WuV0~1sI~^dB@guDWNrD5PmupT8GNj~=JAao!a8Ru14&q_`=eHtC>R#Z12gJPe zAcz{gU4TjCZZ6$F#}S(-UGYQ2Bn&Ku&gz(`&4Q1x!_1E5d8aC)0`zm zu1aP)glD!HWeogWl9MR~wHH!Ce9Uz-r_tlx!M-!1kFd+xhY@O6(n?ShF-YhHc@W`q?c5?Whcc z8y|fCv0OZ|E_}X}sGaeR@_1%m$j}a#pi$q-`V#WFKUnrX?`{d`PdjgwuMV-UB~AO4 zjLO4m(ZGj3gZipYRlxUXXpDfYO%3l&$=2nWBYGc@UA~zb)}G@hQlLIMf<` zP;6a03=Hlh2)Ab7A3h{6+AXep4Y(mQ{gT*mry+KYlPeebh;ipvT24ji*L#k?rQWT$ zpB#!JeW*zA8_LKq|G5MEqUlz-1r8$Z_iu9^Jl{?p<{E*JRYtF9?B8tHe)(Oj*6usKWSwgVYFO8{|K3$HRFcA~*bq(DC=v4RR@{(UwN}L_6>AEX$xv7l0sX~8a%X|7!Gcy&z=k4o*|Mk!ZEJ$V5KF-zr!3h@Qtvad#KJ=Nm zi-jh+>3Q$wk&O#NQF<-=(X7*gC(EsuQ$@5hjIgx>go90X3wz8SUCE@XCSsuwXngeMfJa#+&yCkI~e$-Aj^8@Uh_B3+v>XR{&wM*a`x_OVPTN+gn zT=YxwZ=Hu~P}SX`U(NeIS%}ucTyS=zu54?byB5tknF&* z2*oTO&dfe&l8Ai9jsRyd^dVxW;S+eAeO6=oH z-P;L1%PqtY-s=0uph3@x<$GO1)r5DJ-=GxT@`vH z?+JYr#*j+T^l&$Sv6Wy77Y(6u{>k*b_04O*RP^4v!*zQPGn_u6k@8X5nqtbz_%>L2eoAV&lMf(;VyL0axtY1GrcWTZuy&1p&;|K6QY|1;cB;=*hRuN zAmTe2WnVX`nj3?|$@_hKq%mE3f`TWX=mOUmjIifYY^q_ST|Icm2JmB9ItDg<1C;)t zw@gR%fM&87BT#sWHkFYuP-uCPOn5kQ_((us8uy{VhM@tOdHP}Jz%6MP^Rb_rXO|BD zxgRRB!$4bFJARfH?f?185OhDZ;s!JN;VIYs4wEG}`PYLbJ*|I92j;4|TZ@21{A!=} zU!Mua8w1(f%Kjd_S(#$4giJeuPw0x*-!a$yn{z)++9&TtzxjHm8b~`&Yy3$9GC_u1YcmGX=7P#cZ9V&R>O< zp^mU&zmN@^w;7$L$Vj}%8wPxnmEehV?;Rp+tL@wFR&Qv1Y-ze{=9u_YQ7GyG-FOOD z{hOk8)-rXb)YPUa;o^tfD*&4}-}?t&iD7k$M~>opMqMF?H^8B0yKX*34r~i_iThId z?<2yaAx#$VJR>)WXfXiIIfspin5**P-79B-Y}_8f2N8wPky_GAiw!mMS@p{k1C(UI z(LGK}&+}Z5|GAsf>@DDzN}n73Xg~LI>==7Mvo}&VvF2UYo1sW4;s=yi=d}y}39vZd z9!xENzR{K&`4pCE0g@9)^WXwDPQ8#5AT$VWgBAWKqUm~Qu2$NPd8!6p_j|UN@MXik z-@RcjgS{4BDT#2)M4+v!gB53E)H2JApZVJjk*(h{)je~RP97BYq4Cs}fVJqtykNAV zS7r#o@}^~q_Qx`jil%Lds`l$N!`d2IPzh=W;>ucfx2n_<*3zE6Dgkv;*4Q%~4Blhn z37?8}gVr@hJwFonDSKZaY97}8dWJ-{!RWKWDbSJaXUM|cF6!|9Y46Z@IaICz6Z!gjz=&;=tTHX3o7qIV{pXe*YvxfF*3H<~3 zn=c3@&a|Hq3X}L>;wb75!Q6LXpupn9*MhODB2U_DP78F^_RH)L7yja1%8G!LB5g&j zZL1!v@jxC1U?jA&e=1|GQ4k@>+B($XcF0dETd&fMBxV}Z>2l->H*NP9liwqy;2cLvOWmafn zNEA;Hj^GGc2&y=OJ&d=~9vgfWa>c&DJ(uN0t36y%b01uBe&6`dk|kI6C%brd2Fet_ z0W{3&5p-zd)PsTIK`yh5@o#z>3%!LC90 z5h0~)K<&v5T$pSuwJTk3Mb$VP?7D?A-T)|<-dZDs9h%F7L`bMKa|l27@vNB}GTvEk z%IsuG0m^c|&VqlQx|%Px_6!X^8rYl{lXQ%!K~+(5XPB!uB;7OoI5KOe z(wZV2IbNI~+BIpqZ9TbH#qSrU3CP`ZMk$Sk3Wg$x0uKK9bsGTy2-?6uv=bOr=&80_ zdjTHa+jaU6xFGkryV)K+E1;?br>B&x7fwoWN|wBdcLR7B@=F{yAPihq|GpOWbdRtB z%ZV>hyr?5;tt}k{JF{QSftKL4S#$je!m-vAnnJuCkGQ zOa}&i|6mkg;U}`t73YivzgM1zCBE*HPx(9yUYBaG@G1PGSJAz5T$hWTd*#hD_u*?8yK)>|gL}E6?xQfH@gta389J z&9>yb+JZ7N_|E+QF`g{}HM7{pvxW>ZjRGEkhf%*07lhnxw<;Ee2mbv(_kagLDUck>aY{P?7B-o zN${7R%j};~xWk&wRQa5a9y+|6n|Gn~xqtQsqHfEwPt|>pqMJAUYxbW^@P#unlXHa( z%^(SR8P_E;IA1>3Ln-DXA6{qVRme3&&m2&wW%wqJR%9()AttMQcTy2k zLONbtzctVc=)x`fEBn87P9-}OY72l~tIMTP)!#xeX%t&Iaf%0y>+r@%?~Ol#>U3P% z2b-Yudw;Mfpj~xWHOOzNhn;CDVL{}xRj_Z-j>KdO>F0m_?7lG$2c0wjT_`^AnO2i( zDT;w{X8ll=ky&3CY5$Hv1hC*gFG6xS4#Cr+ir?}z-u4Ugj?Dc;A~^P2rt?S5FP)jU z@lSH{7gmSo`Q(2b)1usHFWKI!9_}i)uJ##Qi=ABohu{v3zKt?=c|#hu{qQ&0{WVRMDP7RZt?IH32sLDb0Zx|Alw+g_Q zCkaONl5{EH*_@lXQ3XjqSk2wAoTmtk#C?)%|Cj!?;{Inf%MFreiwp5Fh$(W0=C1gk z1Xbn2=ze)4Jr|?1?w?F$r`bx|x_hs}(Z9ve@{M=t^NobTs%!LYy@1Ij{l;&B5)O|w zd^Y>_fM!sDDxjxFCoVP5zKr;|y2+?(zAE~qL;fZbSyHwr?gG3oZJde_$SboFIO=`u z%f6FyhLTJk%=Kq1r6$>yd5y+o+(F1>^P#Byy%Kx`u} z{p|VM9njqTwIVUCC(0}xQCGf^g+B8=Y}8{xjB2jV{^w} z*6Tk{{!M|Z^I{{2B|vHKS-SR^yqk&u$x}d3HrJq=Jl}AT{AH3QZ&n|M_|rWtvn5<# zKP#-+4fb?RaP$nq{p)eWP4a~sr&9jHb$_&iuiZvJd@&*Yujb+g@IRes27@`N%-? z96(GfL-sr!wSJaoy5t1X2uMybFvDHmb3C~h{Hj+%^?^o}x_=(%@mUurUEZtUe#xP~ zHTG7WO?5Jxh*Q?+08 zp7xU=l}pjOKIp1t9Ug1~0reg;iY4z(r-v%vPUh^cYU)BOW{zCL`mwoV7b%dk&#(1o z$d-fl_S^UIjUrWpcQN9^j(RtX>%-z;=kD9y1*nZJYmxodZ{67ri$gllkFu~&K>IyM zv}SGp6Ckiz^`7Tuesn4+!}4v-e;{$rGh))GJTTd;aanQLsY=+|-(;h;gy6W9!B)h&_>?zh6l$~8CEUg>u< zh#QSNt|f>*vs->|0D-D=YZ_0%KA=_VrPvLX*43%j>$q2leM~8K*okZs+jHwEue3M- zXu$r$=4fLQ%B3v#y;!X%;=8D8tqO7^WPSv@8d0a*@#=j%rkqsj3&`=7(`SHQKLThy z%eg5RUSJHn@;4NbuDx)HME>3GL%(PE@IY?EH+tre2hqnYW4($5wBiE0QJ<*4wPB1*Q7pvBNv*OOZOR?=JIH+YaVUF}K9tjk0H?02>cI^%AN zfl~*+ncm9$s5{)Do0X9?k8^_y{V8M}?{w5f=G&d0!NTP(g@rXlsh?E;r{GCT+Aa6{ z#?-IEX5u%=A6&!pJmlfbZsMd>SR@+~+SiD2w35`EHaPEQkFX~`Jk|AeMk-!UJY@wq<%+PC+*B4fUGH z?E#7Pk-wtvlU}CumzPCgnCgBnH_Pxh(@y5Y{%JXsYKO>@DPFho?<<2c`?(gzI2nOjN@jRBA#}M=bG|mwID$(Db8`k5 zwubPI=)5``GNu%2$+6w@mR)&Uf*sEhz0b8r`tu)_{9W_-3ga8cTo!ptlmBAM^Z>Y1 zjF9jBgQaT^y(HzD-QFRZkWz42;yE{3x0L)$(2atl)_n_*Fx5K#QT>898?Z<1u{Xv0 z+7(abBl()?6gyjXL*FzW!2eTv(7Q>NmgyHSXbXU{J_HDH{ag!3gfzCqw7e8$c_2aS zrAHvDGgOjdrZF8vmHx580|K%H{G>3C1)n|T6>-Rtzza(b%jK$2uVY-rkTmVMvb6xY ze=kIFoy8tELo$1gJp$Pd+x1cG00A2lR#Vqzz{=gA7ai^0$gW5eZ#^J_Z=H|MHF$8$w} zK$u(Q^AhpYAK^;`4F~sfgPo!xsw1%M;hO_(CMQ|=apUF@w?*juA;ekF+r@JB?!HT& zrPTh_j%3ZM?QjNXKuxR@6ke-`)ip}mw$sZ7#)-Vn@^&`;p4hePmgB$LM}2P*y9LnN z!wl;RhQVe+?6b@qa*-7fo9HrG897ZM@KyVwM;(;em%a#<#{5XVgFX?AT6gix*`Eb$BHJIzo$O2F`Hf(k?n;IWv*=- zL|{owf5FM2>!$*><37r%q{Qq#ZQM!Gk$h9~yv#Eu|C~RX%nlB0yZ?-AQ!>9j1~s%2 zAx$#563zNg>9&R=Jri_^pQh)bAGnGe(GjNW3!RBSj5gRL$3yYzr}Fwf6V$pmh16-B zh@HAL+j(HLYk6CbBSDZE)xYQC#8O;Pjca(*xsL)ePjxBJh*_CmffAhnH}55$c?rn+ za|2QH{6&TElk-Fk~-BsmtK8MmV=LVUSDcCI#|&yOFJEw8Sc3Wik^ z)-!DFgLEy+RKes0Tk>aiX5%XDK*o$@D3=D-lsFBMx2ebEe8_FT`w8~<7TN=SdT!WvT*?Q&R-??9}8~|aE!^$Cd z`^7u0xr+);R9d+Paoda$!i~EBp!^q=Df^>oFEy#Pz=F+#*H70W*{^orQfBdqEwYs{ z{rv->KVKN7vTss^{gnNmwj4QPhxHvab@*64$-G%VknM#ufY)kPhiYKVXV57n2?qCJ zA7H12goUKzHVH%*m0U&Cejl>rJI36LrPr`B@G&GeaFekJ(rB{-qDuc1HXAdQIw4kA z+*`)a@&01i$~sOu64;(Dkdb}E>3@x<_3ON%`JB8Lyj~BDwMIQW`P9vpHRm1ut^`G1 zf*9$^#QjSOYkMhXX(Nw5TuSY!ObNWTkd4)w4xB^yd$;% z5i$@S{bWOOP(^D?%HIdX#PbR^oa5ECWd-W5%UK82{g`n}X&08%EVez_X=o&dJ<=8C zqoVXaZB~bn02GHD#jbgvh!8hP9b$bSOjekMI9q0clwh9?}N zogXkgAN{S#i*)S(xpMWcw!=m`xiDl)4{3UsZJNZ9W;sR&BvZp1svyzaxD*SClr_YZ zZ}$v;%Y1fL35ufwkevTrrhkAGfSe_(lUyYKRxoxpcGZ-Jih|rLu0cy>;2&U|5ul`Q z83VZ?FOj1Z*i4go%Z3O;!xxm&nb63Nogq)n&Qkg z{#ED%nnxF$b#yIQGZx%uIpD;huScM2OS3>rKIjL3Xl<^jzb*y&Uf$Ym`DQpLWK?vZ z;mb5Dx)gtICvjI$@}Ubx@@S9b#yAi>1TRzqqZHv1zJ1UGG6()qy^OogI(4I;6Q?43Z3+CpkOhLmOW-0DY6VRb=RQBu5P7u6eFf) zbC;uwJ@4wf-kp4F9Ipf|#!TaJleVpwDZR`n&FLZdF_ov+K2w#hTcyRkPn9gZ(Jzwp z&N#O>lgeq&$bRQo{Rg4Vt4{ppDO`sg(b#<-w~ia(ONqmcJy8CA3`_;>R|p+n*_=4o z-rj=`&}83)BAoo^Zqgo3Jw?>N-HO#XohE-Is8Q?7g2|0n?3|W%|M?#pN^O$|2RRtS zhA*8X;sRSe4h6$L%v}oT9cv=b@*S>$X}ned*UG(=tKRLIT6U|VQxT+Y{yl)evFBbZ za_O8Z)pI44YfR*cUx?Lw&`fx3Q?8Yfk;h8Faeufwp;2NQ*B6Vk9K( zE~?VvMB9^nR+`3o7$cP->+eI(psCf9UOQq-a0Qt*!S(x@$td z&W~f=VQNifyta`jgKbD5T>Ur3(N7G|Y(T8JHzhGLf5h_#3|pMb!E>Z+_es}imNZIr z8&LA1qsuXz_9ZZ{CNMycz7OG;_g2UOOhdy4j?%^PeGZ`qni~!JH_Yvq;19HEk8*L1 zcA?_}e)3XSIaQ{}P3s-5_kw2ses>X|C#viLILKUcD)!ev}88{XGyMs?igY-;JC zguMzLCUtp?p&R1yJbv&7mw&``aLc^$!HYudSWpi2jQhjg9ZA=&)Ex1?5gX0=Lp=r= zRy#5q=od^FQx7#a1L3~hn#Or+LO1#gl!kcIcKcozJmeq=a@GSS7Kcr>zq=*4Z*edO ztdzYO$E4`LrFtt2W4^ZNGJ}97Qsj)F36du>C^DKz2b$2Xk*G{jzSO8N6j#tNY66RJ z8pqyKz9Oy7s9isZ$Jv2|e5t2fyU9O=H4I|<+zy6^A{bB0Ph=@a<=zRL@dX z(gbo{QRf<`<(zw{RA&Lr72Y#a0a)jCv*8&X z>aY zY!};r{wrj-R-;uTxfUK@9JCSewz`#Pc}g}HO+Jj~lVgs`{E3Mjf0>U^4qmBg*{fPA zvC{g{ZP*2EOry5#o-B`>ThjK2chp=h84%`kyA?7+1+UJkT@X6bB(|X9AsHT?KWRp! zo{J1$?cGrDaA!obat2-+dgxY^7>bbWw5Mj!l~ShC-A`hQNtB_-SCkK&Iy-p>{Z^3U zF@n&g4iNk2BWt_hi^kKCEiSTy59_q8`$zm6@%hsHk&26d`x<5%o0YZLS4tA#M+h_- ztw4z#@oQ|sf}G4o2afD*_w$-~VT4U>CSNVHY1&L(nQm~~_mZA0E<^5l+c$s#j>p2b z*ddd9K zdfChnm|`!OrN4e@3Vy~iWx40Z#FmGvM`Ck_e3hYz!5%t74&-jFhb5BWiyR0*d+a!D zXc?E7M1#XuP_3D*?0n$c;%I%BrQ{r1x1@~~Xe%1ci*)Nw$dZ2!Zw`9$hf-;z{hN%M z3x0waGW~2r&L2WN$hh=+h=yB!)SI8%%`X}bIBTdNLU>6mumU(=Cw+;GhSh>J2=5SlZ31;jE~4PBYGM?rpku^wvAARw4$f zNj8eU)edS>=b$E?G!D8N6FvyXDWh=BT5slvA1to5 zh}Sp`XKl)btm$1PSm54a(T}mJKe~3)fjXrI>rzb~A zUqJnPQ-lu$NbksQ%}ignbC;CnoZ<`%ps^^%O9P1IsA2w%$#20G{|jmNhi9eQX-2i0 zUY9&0TV&=H{w64@qJ_ToZ-PGp>wI$l*v8K;sD`1$mRDf2yQ9{Z0NbS&oc9$J?9< zFI9T$w`GQS(-(tGRhC_re`tC=`~O3xbZ>T;P#&rzSHCionwOUXb;z2NxF73P<$ zWSKRxv-J)=BhJGw+5Rnhe*!O_pgPu)$kl(S7~>X4jnz$(v-KBNWqK&xH7E7vM}8hj z2%NZScTT9bN_$V^!ViQ2cX><~swo@;dfk(aI%>ev_CK#_R+rsTnG{5gb1k68Ye_vn zN@%U!>g0Ri5Zsi1&SIw-lA<9DA3V>Fd8akmiiX71jt%d--i#4~|&c zv=WuO`u<3Sh3K~dAp_O!D(nsh6VE>+htY?`&uTY%DmOJc1kWpfC|p)|6;lj3Of2KV zQNAgElHI;9Ww}iSYJR2^HS%%#?kVOG4O0+o!r{+la%b~-LMEg-n_Bq~F-lP!r$!U? zF!C{jO}4^UfC|%Iq?Of;&Pdmqp^dc!qr``D$Fwh~NM#YaN%1$Is_Ips*a?2{X(Tn( zl}LtVQ})*^qXIc)y2xqw8;IV&3q8T_WId)&9XbcyZ+QdEl#z9``$C zbaW90lDe4%m6)WSI+MX#6o_t&@t>H=T<#9JLmXJ^8c*oBPgE?o3F)6Ah7L#-2WnQ>CiTQ^u5f+myUAJiVjAF8+jgFM5F?fmn zM1w-zJ542Aj;E749MI3Z*R)ruo7C(PHy8!7px(&C~Ov+EisQ$|M1KB}LAl<9cDHZDG9{?~O zR>Ej;F_Wn{OpNtd9yHV`u&K(*aEYKfnv_#z{8RG+bt6HwK96@QY}iSr#H_qQ9Cr+0 zLosKwKTdI?{}Z6R_2ER5z9_{j-5%444Jn49qzoxy+vXbTH-%)j1ltfgaipkA#w%l* znXtm#*MoxE?)~Z;wW{w==x$x!O2lNa$8;iOjv+=!=QYX#KE7>2h7>u{`_4SR2(lG} zwyZqVH^3ii}9rhP~+5 zU!xbVs7^vY?V?EmYX%bLP82~LZ8k2P_GkJ`Revfb3bccpKB#BKZ9`48vNF#Z>!n5w z?{1mANI>vmzc*bnFQ~F`sLKfzzF$1a|A*Vh8uIJ&e>)M zz@=entHZ+=E@=&rG<~co8j|4y5~uSn^fcX7b!AL@>dEw-!3h}}xz+#H15k6goFxa! zsKI{%pYH|ci;hVWE@2c!p&(vO&m{W^2?|+Mc9BBNAMtkk04K)hAICyGJ8S>8IQa@- zur3`;p-Rdj-Fwh>dYH`(1anXR#v=BdIkS%zLv8fp)Dzrxh)4o%at?BZK_{;x> z!(HXH8b%gvD8TxaSB7LQOkwn531E3VOX<{T*=A1SmO~<9ZrwduuW7TwmNca z?YL5z*C1Uo?8zN}$X@P6qnLwqMCi&HzMi~Y8UpY3n_#{@sGlzq)U=FVHkN>5NzcQ#A z+nuFLd%B>;*u4r77w?B(q9~SJ4_`Hs)!Yu1nXT0%Org6grLo>DYO0NxNRVH9q z)5r)RBqY1x(3D$@| z`Rm#L2GnT|VlMz>Gy2NjMo0neVA5LTm`FR=jpm%3cDnj|!d3s8wfNhacTH%U5|<0_ zYydhL!6%NDhrf|55BgW6kWrqb`Ll!*(H-XX%c?<0^IngaMYJ z{Dt3>Kgy~}8XDM2Q?AKIXW0NlzPZS2r1p0z6Dy2HF-xEle*;3lcA}`TW<&#{PDBE7 z{P1O0N_ueqRf~o@cosPqFFW7vnWXgFW-(TFw{2TW+I^DS1DF2V@R_YLY-aQ~=K~q~ zG|^v-V&EtPC^u=eFd1*oJxHlkx?n|@Xm05FKTY2r&h-EO-$5cOM$V_r%pr$1GvpMu z*%+ddLx{~Oqe!AqIghcEFo(PyLegQINQx4(AsQ+}CsZm5A?JSke6Q>Gx~^UOZ->|G z`MjU^{dnB><1t%#^NPEpVF?9mbl}bbY!T?j-sCj&^IhQ#(Y~K3-G8ZB>29+m%Twm; z#wzV{*hAez`$!+{fCjX(A|)w@cE*V0l*zvxt=_@BxIyDx8+z&1dnUk<>E+ztmu~JU zy8rU6@AXUB);oNTkm;^iyXO;>4BI`cstXGbxtFiZ+T1ez!!9}c;cv)p#)x&ig$?OC zXw%=EZ-37Wt~OVO4q?$Ev0hK2^mobFi$N=eZF(yQTlHdh!*&fNgo5VX zh6JEUV{-JM=OV9m_HN-F`kv{7_)13H_0%MZp9d`uSV`=8>QM^#YaVj{m}}`y*5@`@ zsCw??bwMw)x=mYWoJ}-%OEoWi7IcAKC6!!3R zqUp7upvB|EpHtA@9~U;GY>)b7@m6doAwyT~X5UNXt1fFuan-HlrH3`dbdLknkgwvf+et+82h(qG)#(3EfZm|zMX)qANXQ>HGFmVelyS~G|nV~me_ z4xDSdve0H-5v!Kgz~dUGu`M%Rm+$8s%f7{@%0?TXA-Hvd_mX!47~ert>Yr%v^Fzkd ztC0KN#;?9ARO=j6{8HU2#yco(V_%35`M2Yzj>Ghd5niWeXNY3du&_8y2}n{ z`4?m#Yd90_l->HvM1ARX&&S-k<8Q%48|5GF*9#TGWaEK%X&?L+@}MMv&geeFYui9R z<_&3fWT*N}A7<~go&+kfaQZ$t=rudmN9}2TVSu@Ajl@c{nfUAS&#oLFXsB|(@-|(( z8~*SMXMS~pakbELx2~9K4Samb=ZH|^R}V8}=l8Qz@Bz}*qGQi%ir`9H;COB6JYHiD zWIBEPP6Q#KhWVxXRRDV(R z=m-UC?F*8L86S7OvQ%<4aBM&;a9L&Ef{H^~e^;Dp)I9qocd;<-CWcNr0U_bLK+yz= z*H&-^8+h?AzH3?;`Y7W>OV+N0epCp0MP2yr`6l7ojlOtCTbFUE*nzz1j=j7Zv{fN- zj%s)Vcr5$y8Z;=akt}&e`IkT23LJ0qg>)r8K~d=s?Hk>GPm#o+5bMasrPWf}zX}b&lP~i}E1O?Dd^~&fB5zcU7RHudD8dP)>9oG<6D^|ZO)_JP9N+UDn(rP5kq3{T!ztaXUFpOQTmQ_Y7ZUleSLC!miH+_<6LPXq?FHd8vJ9BUf> z8$s3y&IJ&AW( z*AWDxej{m<%iGwWkSCZce4g%m-ZhiPhY1-J;un$skdqzgq6^m(9Ci%9FlT8t!uUx5Ta9ed(R{ zH*p(yct+;bBX?@?F~%p}{E!UqbE~q2FD1%UD{j*ml_&;D*;K5Uy|yYFmnfDMMkUJK zk#mWS+t6cUNu|Zjf3{7v?4ow0_{u2;bxQ>aIfh4ts3Cjd2k^O+olyC%vc6F0D!?k- z7kkt>$a}v|f-g(G%tzv2-Zx#Tzotv#gS>L>E6TJV0Q`~6?kxO^%iL?t&lpfCD@IY9 zjS+kA_Grh&{3E;XSRfzHU~rqnFMsXM;&&^3t0`j3cm*baoPJ zd+pbZ#U1xEPG~dfXCB7Bsd$8c7-SuCZ%pBs4C7#*YKxrl9sZRt3-=wUTjQBBH!3vG zV(LKE5(Z!;D%FWS^{|qj?iIaqWcE`^bDW3;6@{-a9jeW-6C9;+JL>11Cg8=9Mbjl# z=LNskgTODdg`A(l#dDQs_w=R1rhZ9o>px{+YnZZsn;K++b^k4}_ul&&|98>sXj<4e1gvPRJ- zLxZB^pJ+3B72mNm+A9%JlnoIHuqVZI>i5>3mZ_Hc)PbIKg~iyr3zKe)fwtY|OY8%m zC1$oaT&jyhHbTebs511Y>}~IDkAX`f{~iU@U9@WV^OJ68vs8Pd)*;;}mr?8JfME~fuB1Q*v?o)em{d?%x-DWgw zIkd4`Jcx_LkvjLxr?VH^+|WbzM=cF_%j6drx%9MO!qUD4jGuB%15Tr_Im>(N3yY}~ z(h|Feo`1y|NM;}|?YnwTY2BSZ(;dCkc_{${r_EBV&YGJ`9u%sN*H-?u-Ox_6JahiV zg)_UgVA4d%i$CW1Qem;CVQOi~)GM;p(~}0+T7o_V#%?wK+9{{c89f=g0E)|xmE0LD zt9%RblDHMNSJk+2z)oak<>L0ae3ihdaT%UGvSaf)1Yv?0o@0X*VTs*&@D=&foxw@t zwtBDs=dpqkiR4LtpD}2MV|I$58%>9 z8L}b(J8s|ntr_$1E8;&y3b)P`+Jbz`Kw`NI*GPY~ht;q>^JWztAM$>Jsn(c(B5Sx|I<>duW=JxzO0a;~t~1Xwq|tg`TDeJ@JZ#-; zJ7Q-4m``#~e*rJaRQ~f;N%4o) z1vrn1>?fw#MEy3Epq5wZCIpH8=S4H%GmIyg{(n0*EH0$sU<02+jpbxVHZ#;U3A7tP zR@)#?J62IkhS50@gc82px%*o5Cm{WD*gXotf*;BQ*tR^-MFh;=IbyHDwI5jb?VT;- zM^lVa4xX8|W|}>&lE()>I-95Ab;`*}(zbGneoSd~KU3UlBj#koPm$o;AUW6W`NeO) zZ4W5Vv!0+rk!xXw>4~cG8vs)~szjmmuHtK*(IUJD4USP3jF!Cx%nWwA*Cc(+#sXdX ziN6{%cU+By$cU(d7eb(d1X`4=`IGR&%1paz}wbnQAP$ZmwESX;z*0DVrWE}_mb!7EO zdr;(RV)Qn@t}sTYQ+~?U(xy+mH`Fcj-E`q4t+Uu$?4Re)PBxokp65@W9q|5zG6nKK zM0Z=|e<9b|p%@G};Fzo+Rou?wXOPM<4=wxvZxY#R3(hZ9lKx`eq5W^i^Az(x$z5rlPtc5+z_6BweQzWTBuWCI{yTv>9;%!2r=|wz-7H_(fazfA?+8rj3sa@3Q zq~}MqTg93@%gAc})p?#=nwvN+y?1z_^0E6wYS{NkI>*!!WB{;;Fgcm>-(~Fkx9S(0 zW9IIEzjX1J!UfLR;q(LS!=kg{P5Ikn(9BjwOaM~%GUlQZpBWUCs^x3^s-=XxY8r5Q zM|0_D@uH}E1gSgSbRlDBulM!@ICa(`Q%=J7KJ3ujU9dKw8S%Z}<;}mF6;^*9tc|>X zrd!x&JL_aW!^?-eZ4~`Pn;p95WUX z&r~OaMG9X(Gu}MfB^VXg>fDs3np=;ae|XT4`f$-ElQt~*_pFsxDs(v*JG;*MUA;gu z!aA{lF?%%pEHEX>EgeB}qd$Gj2+LX%T3Z2j_n;+Q&YJK1^sH z_DCzmcWF18RPe-c!`}1pR}MI))CrCFp-er|$kB8K_FTTR4cHnOr_(BN%Xvm7Gk5fr zyur=~pN*;%Pdz&D^=_JXkH=hoVdhev*YU96vo#dLUg;!wu!$H;K4#e=|odMCi zBNLO~udc5yi)w)FOHFHX=Y3W;y`@733ZLgkC=J)j`~{3|9A+IPu53{?xqBql>APUF zGV4g^2?@moGafMOEezNgj)qGqNLe;Is-g!x2V8|80UC-TUj}t1l+5PkhNg611~)?}Rvmj_j0u zRd|E@?&Pz8Z;OG+D}q!&Ir(vRN=N*r>Q=sH^-+8B_I&;2+zUxfR_rT^ zZ=_ZTnxler>BE}B{+B`G9ox>2&Dqsa(nkt+lDCb;6ctC#e*3q>M*_6o6l{lV8y#kivXMza=m0LO0-8Xo#=s}54O*6aqi zMS`9z<%Pld#`l(l_y`G658mWxgu0vC&zjg*?DTP`#%K*wr@*2oeZ?ueIwMxS??fyaZ7{d!(MFuhANixAT!DdDOx$`sD~ z$5H7e&;3Ua?=1elcYz~(Fp0H1=WIJ*+gVO{H99fw^aIWkvO2*}8mQaf3a1YP8ENW2 zui8@{kNyEq5(c zUYod>L$htGY;M+MoMavQ&h2|VG44hC#3@+#zN6jF^PrG+;!o^*Zr3f&?d4~yM(q!( zH{&%Ies5?Ny^k8#tgzRPDcGXrGgUekuRKd@Q+~Mn2=k~0;eL(AQN*G4iqWz_`fj8k z^&(}$@JqXb@$O}1T}J)O6RWq9q!#0So;`Lq?t;w> z3JSE1_jfd=-EuwqC~ow_4%eMqy%%*9u%cuWSloV_QvS%@$U$7$>LoFh^LM%kid`u+ zT41zqYI+wYk{R5HonlZm)r7u}#6X6-)(88qP&XDs*&kt4mvYDDUSDX?S5}pBP^Sv< zY^*}f*$dYlB`-;Q`|;P1St!?meXjCv$Ex&ob5EHAv{l$1!>`t=MekO>t(s5$uoYVy zBxh}QsLtC9mmt451I}y4&!~Kp7#)|ASu@$o;NUpGt~WM+XXEwKk=VYcSq`ol*p{Ds>K8@?y{l}q2+ zggpDgjyRCPGZNJT30ZP~?ppk*^rjM@1e zsUIW)qdrcQbzVt}1Wp+lZk%QB*2-w{;;M}9WIeQz_|0der%LyBuXSEn_)^}KG8^#Y z%zr9Tj~r)3jM?3kXPjH;s|*gvxg0;`5$SGxhKxzIxd@YC@p0;XuYhLN_L2K9k#oij- zcDjTuY~NcgliBbIqUm3&!<(@YlxAbsH*BXx&pTI@T$>!k=DD4Gg&}^`_Jo93{7QoJ zuTjN=(RA2FwF0xSQ`)NbMW@e}j_!(e&=*Pb=I*@u;C=sFC+L!B!(ZzSb1%M0%6=I% z-!j+19-S{Rb#FV2IQiH|V^cNb(W(Mp}i{n25 zKH56qY&oB8Ghq580h$S%q1_`ju9!IvaPcdrKF*WQe~y=$c8Wh^`s1(Mza6D!Jn@i` zfU`%=`}xyxGuJ5~lepM7i?0z(nlTevDOC$`3%ya{S%t7x+3^rx`w!jDgDsDHZt?Ks|1Ng%iX z+o9@6a&)+X{7;W|GH~On8U;4%;J3zvy_8GX&|Ys2ft0m6eHZ@u5?k;kydxgk+w3$uJ1=Pe(M7mgi;n8CU=QI(7j^?#$4%!^((tzNmkI)P}Y=LW=aXoels5({k#0#jxTvfTi9DhJu zzhUrlrOB%hN@Qik52xNQ6>_UmLKc~jX^wkYe%p~wr1%mU%I-0Iuzx;o>uq~CGbaU; zUEHx0PzXT~;z*jBO?Id?BCGdsv&;00OMH`}!=IJ5{h~|cnwX9OWDMOlP;O?`ctMJI z1wV|;gzKWM79CDDap4|bPO2Mv3?-h!>uNoY+#*WxcCC#cSh&(}KTx<_DQ>{K!|(QH`|_lUKrwPYpX}F%FytzokvB!bM;bdfThXz-1p>gDmR^cBg4Ue@s41# z`X2heqG~S>rSDb9rhdyaNAYW(=NcDV2aTBUm7HDD5i?Bk2ZLNd;?GgK82di!*_ zJ%iLOLk{l`X3PE#-qkFkLL@eOCUCpriD5yaaO3H%SbT}v$Ccz*uKB+m!KJwTNFey% z4moh92d>ioUecv)Dc3>>Fx$6iHy`Iy%tjOm+w9fV?4cX3fBtS>pq6wvk{n6IyFyg^t?@0eeuh+G0_R_df0Oa05* ze2s`?%{Gyk4P9@++DAbkxqK~yyr70P3V29}Oyg6$c>O1yL41s{oyfCY^?Vvi`43|B z7tVXwF;7zI-QF8G&+v%cdndGIfd+5QV~k|WPLJsbzs@F}OzFA#ik9Z8G&uzGC%v3?Gk+m5 z)a;6Qqjbu~{v3jdU+#F$lsr+jB?SH;`&i?V-{MeC-QGOb65%to8Ed6U2SQMEx^w?p zQt>ztGze%wi+aJ)nA-Y+B=2%k(_k*sO@0IUUl#d7aQ1)vM~8@mv}V2^o-j=0mV|fS zZ^Xn9%REE%-atcIkc9kVro!pFZyxxWU<)DmB8^3GPYp0W=VX?zC!~=aQQ)j!Y3$;G zCN`T7R6n03={=coJ1$3jCDt|Jzkfa{!VGSFDqs7?2EM*}8h5jqpf8j@J*L>Rp{=Z~ z)>uR6oL)s%Rqoq3f?6C`uhM@abE~NOD*J7zs+@vA0X4QlGx92Ir=jFMJ|*N`I0_@M z-IfP#8)zTKeni_TcbkG`r5n^d^t*b8lN5qFz(JQKe$j5lg%$0k8T(9TgY;sMQ8tD- z`a;V!E9g<+HZDCPf(3;}v0||vA%)&u5Z@s=pg~H1QR7v;cnS~->(dDgJwpO{s^uCs zITHxKhL3!$ie-ndf6vIjVfLk^)TOT{Z2 zTWgqJ!*uJ)HV74IFGRZabMDK$VPZ82VtuM|=h<=2`nM!6;6G*Q%Ql(Ux1*X(rR;*= zFnT7(@MomAGc%i>$UJ#dTybcIA7}nEt1SDTr}c4x8~+j7XaA2(nv>1}z4;rxmRgO+ zp;Ob{2V`f~oCVZBR={8NzmA}bg+?QzMtij4kwa7_8>3a}J{NoSCIs~0_ z{BE~K{yzMfc`)3g7a-F_j=o(BrE84K-Y_w89yHK4nhr%nLDZjtgy3;&Nql&CgootG zZ1kII1=T9(z0JC9u|P1N?zXIaz_ps6GuyuRMLweVzk+kwDA_ani|#~7;nHlaKQ1Io ztQi?~Ak&SUSh1N|cMirQpSKO)u-GS11`veMyD1HzIQ=%(M3Hu09vp35alqC?`_Q4= z59mC=895}ir(syWH1z0#iWWW6pLOX@JW4XU)DbINk}Fc9OuAV8)#dj#?-}%9L@tK= zOIxE@a5Xhl$3w4~j~3`1r-6`7gFrk!0OVT}3PzP{C11H;h4 znQ%wVakU~$v{1m;SQ4DX`YgmII9K?Eb4(UPVUCbi6_p{qg=RDeDG8GFYVX33mS^9V zjLQxCI>=H4bd91=Gu{)EQ+I$>$ikxCS56`_fR>UChT&A^0{cwFGhPMZ5*Q1u>?&{u zfU@=Sr1Ao8k7m7Y%zJlNqdj+cNMmxDvL=nEeyxwPsg_O{3i-i>W98K1e+{7I_1?ix zQ>%R!7-|Or`(ISaxf)@3tuiElBa&tW)>}%rYn`BXx}M=nK)t>7@%F{0rVov2a==Y0 zAApbUGxRT>igl7Sf+W6X>o&t;z&4(G4Gz=UUCcOt(tcC<$4=qZdQ1gqWtE4HEP9kB zkf)=L(^ah9a>HZgo*@JBogxMIFCpW&P35xjj%yM0F~D@U%H{9TyjtyR%*5=Kw-`a_PU9xV&?u~&<7?ayuk2y)lFb!gTh_G{#4+$}ri54c$wZ-3wH<^|VSRBnx&;^^Kq^@GpW#Px_9#CjB$}SpwM$r>=ih)$^%IQ z{xBz;Dgfj2^40dkEnTXZNHA&){1^t5dN1WM48DuO%LXS&e%B0`A%=VW+hJs{t@4?! z`yC)#U)@z1IRK(xd4@p>z?gU)La=C@)Gzm3eIA^UXDex+qn)d`9|m~qy%^UeN|SfH zb(IXOM>fG>O{3M%yCB_mB9KAS49Wk+%L<(<#G(80H3o(NqwFIu<{-}ArG$G@a-JvBqkn7c@poknm^ul zh|^0`c&E46gb5D*LOq$i?y=!CcWdtr5`0~ceYX*|UYV6-{(`(sx;n>hDG_w5|nf=TDVJE-oET;kZ~eax6ro_;gnbru&+!trST=v zg3d&E>>+^Ct1N2JLcKAdLenG`oXNArw86LnC=?3!W?pd^2Cu0Xz?-RAcT9PnUOKJN z3t2$gFMj(T!LBtq`SzS4^dUUyps!xltxnY86>+tCpVHk#`vPNGG#F!LM6&e_#TIv^MSuB`gFj~h0 zoZS%cs6nqP6Na|0tOCpXU?sEjh)sUhptY^t%HLMfcQ5S=$|3m?&8Yf#}l> z&wJT6EmiCWS&;S{$WYjhZcFJIye$x~mgjV{p~(nraxzDWi{UXH9WgnyUM&<`Uw{H0 zOyk8O=T>YrA^JHbrjv7dC*DdU#l;^VM7kE*7)!z=&i!3+3kE~F`SNfZ5BcDX zCcWDYhlS68EVpIged$!47jaG|HWY1+)Oog~dK7CBx?1H5XyGeA(pKa5@Rb)%ggJze zo|_?i0>aRn@R1WG=!8Z0kcT_i)IQ+?Qpre1=CDvzG)q=@16=bXhM*%q#_eOc)6eB}jl2i!M z--i;_i0&@fGM|XU>SkAJ!4&Y9=dUvxQ8J_)ySz$(sdHl%#4yB*2@0oD>D5)XNGTV# zj~3+1kV0+a;#bO^=R<5P%B`7%nP3OgArkk*31K*?opA%%XMrAdo$%&0CZo zt??GRF2UdUL}pSZqEO80xUO`RPyT)u_?g$cDy2c{z2YkTwaHBAce*2XA1a@K*Ef~d za~>cOKwltwHJRLLzfZh1(xTk-4Dkhn8{U1>7Nyfzp^Uvit~gXDFm#1>=S9+xwI?j) z3ElbbGl8nwf!&lTLg0diz6jo>*6`NasS9qcpN@Yedry)VNbkfTZrDl*(J*w;7vIoR zeV|rgt$8gb#A6y#s%>yG%kroLA8uGNZUmDEIZ$F9-gmQ$4<9l$8+tPgO(NOfUJ725 z+@hAqA*&UIj49nASdy0=MQ16cuRSB%yZZ*)PPU`UPMXriZBwvts;|zdlSr;{Q4qmf zdh=cF6uqu89{i(vF+MaYvWr&|%&?xeZxQa{=h*(9=YZIU>FR(ul1r4pcv%N8ifZNi zS^VP3kA^wJ5ppGHCy09!VWHCL6^on6B%CS2t^V5q!ajhY&oQhACs4H(_FD(Ve70|a zUTDr7&sjGD3BG1g6P=jX@N2S2&d1S3mRo-j5;8(tsEdHb32q$CdM zq}(Q_D#tmg8si>;_8eU)<8|gWzRda%#FnpY1xqfr?XU3luap6OoU0lwg)SLs&lDlY zb@ZY$NnE$B(g-b+wr%XK5ussAR}t| z*Z>^<@Otucu#jBUvF-vwz;Xa9Mio(CR&z|5Yu6|WJ;1GoyVVb6DFMPb%=)BCj{f-vqq z?z_3G23vjce=#nXw+sKW`-JBYTTbJ85U)j?5>P(@Z6y6=I-sC~u!ZoL=YJ|wZHl?d zHR6afePf~aCK9O@^qhIY-PaBgz58n1i~%v{O2(ge?2-4>^Aowe4L@R|t}5a?OXe?l z#Mxaz8(KdtKEj1N=piT1LG^V7MVbn59pX!sY89;~S~)ZPqrttU*pe++$;F&G$^BB7 zSE;YJnmj8hTYJ>@X|iompXCBCtfX|2y&p8&dt~>F(%>N_U%CbNmlG5*{j8O1<*?b|P@s<)H7xyOOgZ?}2fYrU3d+mGI^l!E(Y)gFZ zGm)WDh98~ywCDZ)xSsFcKsT>$io&50PX)h1;O%I*vzWV5)U>dm#bQ;Y8S8L=U8lo0xjh*bN_NRw% zGY_teIsUD0UUw~CN~}92?liP$HjYfzX`iLk*l)6~xQVq9|Lu76{brmaDpdnoWih2G z>jdz`!u!XbUO0JM$gn*#o@W9j7HF<+AKy&{N2JYQwqLsu6B6Nmt{}4)?(; z;QcBV4kbCP;7X?`WIJE^D&OwUFPA98a-H?i)-%EXcCf0_(_4@}3MeW~tzfX>+$BxP zybnAQ@8Smy9H!^f^(^@T@Z_Cal`8$c!NS=O@7Vai5lg?p9n}YP#OQt~Z^nFczL^2x zzjvudU~>M+`oaW@3(=);Pre!_eXg6cU8ui^wwBFp%5JbefMkOsO=G&vuk9H>KkoPa zZu7ay1cr(}E$#e^rQ7Qkob(P{zW$RMUO1;iM!S0C+6&t7*MPGF07?OuPpEN=?|y<0 z$wo@MRI7j>5Ln7V(Rz5**m=mU#zi{m%jG`kNe0%8l4 zDTZJ=1It{RFJt5xgc>GR+ioh&#q}aCRT?%ImBv=y$BJkK%?F z#Dj;w^ql$c^+W52Q%D;X*TZN#r!LZZypj&~pbIHc!B7N>F3qO|*XOHw{7zMH8#_K9 zTw3G?G21(wqf_&Oc3CWJsW|>qB)l~SQ9t=RWX=IN-DO`*dnL=W&RubKX8zd!^!NGQ zRHp1~dU~UWZ3+HH*UP$#*95sLsaWZ*;wenG@zHEnr@*3V9Tt`kE_-T@RP*y^1_hHD zEdUup@SXi0y6D6um&5M7MMi(MDzQ)3ve}09xzBD&ykCP+5`oD0ZmB&_S(ilVVvB(} zw!wMU7aliY-hEEX7@85FO>`7Bnc5JPKisu9Q1><8PLjGb`!`AAO486AZAoYcQ>+zX zdekLd!KdtAN`1*CbhY%wM0DM7h*XT1+X3Se{S6svw#b-4O52u0fQ&Lxrhzr&cknx9 zqR17x;s<&+JDtlpU>w`>DIMUPTg~7|Q_>7JXt|&QP;V~mJL?NY8FYM-5xDEzaR*8= z2^_1&AFMviF}#XLezsiA)vg?I*?P2%S;ti@mY_{shaFqp`aOs7SOcV4N%*yAGH=dS zkbq9Af#S)U(Z|tBA>83fz9S^W*@UfP8NBg?H5I#!TbEr0)>6JpN#hr3dk<-X_@o>U zRCjeX=sQb@%|AR{s5&2MW8*wV3B`H#KuR=bTnPH$iv;yz+McGm(?SNQ;9UR1oYHxl z`g?Z?&6sL&T|@^G%GEb8OfCqD6XfaJD?^JJ9x=dM6$%^L7ENXfo){L3jpJyev7Li= z#hl>sQ3A$D$R&X9=1qZ-8`dh8aB*;D@UF=?umYTfEjAl#v8aEw2KW4?v}jvKxKCx( zv!-&<-Z9jtG~B!n;ihbc%$fQ6==m(~i0t);@%CT(j&@4aaI0F~#;P9cV-vf%w z(V43D(A8jOwB!j3TUpsvIeL-8EGfRuI&|`Q4tD=+GsRWq45@)g$GLD$ zPAL8T^`gOIvg416QwKr1H^E^e%4V=&-Zu8@`ROl@_JN!*uMhX;?dlgf1H6#BMA(!e zjy%V3hjNi`1Y$x9&!(90&nshAmfrL*i6-l7dljCjMtE4q=cSO`jIr^g7u`)@ow*#2 zJO&7?Mi`%XqG5Z4UU|2mgCD@<_Jyz!0*YKIRwYSF?f53Z&%45rmJ4RMDsc>HfvhK2 zBcawOzG+JL+=)n3sPXA3+RzA1K7pw0U>JLi*7&X5@0KHdAFX>8lbox7aRUUX_{#=% z^-{lZ$@yIw0B8rqw#EWdUx>&bRQw?YE@OJ+ zRDeS=8@h6aS3;79eojB5U4?s~)!baQ-7}9+^0{0@SD_&M2MSDR=Xko93n!h`6V!Z=c!3fJeB}waK8ZS}>xr%d6!@B11X$-qr-Si}!Tu8NVPmid ztn$U8wGUh5?z{o4N7}yBfPoBEHS8JXe7DyNmaE#5b?9<4YytW1Y-mGupEp<#o;cR^2OlbK(*dfkYUlv9d${e2@d_j8V>xXFiPvR1X}VHI9Z zUjxu6PjGpVFuKdHc>(2Z+sz5EW4XUH$VMXlL5a`F>%xNrA zHc+zyoXn{ccs0JUAC#VK_qg99?Mt$^itMv78Cg7Al3p>k&T*>0g-L!1-X2!P7RDOY z;tH;^=w6M>hDj_$(G8L;pjz{VJlJT<_fnq8xYC%4%D9*@8lHV`jz;wBZtD<(h%ih@ z=53AH9t7c4cayO3?c)YdU30{^u*7?d&-6zzblQXovhrusE|RK5_)%BK^LdBI@1jbG zM^fu+&~Cgg46hwCj4i{*6XG_Yd@IL5h-IQtQ*Ycv}t$u0|xkuCE~-s9gy;q9?Y$pVVx%c1;Jv( z>GyI@AHYR3>8ih{yYnwav2BuMp4ITM_0IWGU_wcF#+oCKPKv<7(U_6oMAm=};*GX$ z^*I(Hw&47)Rhy#+_Q|+?zua_FCh!X6o{WDv#AQGz)88QYq4fA0!9&UcIz$L~i*O6& z=vuRdqI`{vR0nZaFkjJL-o@+#NN~o@?&8H*r+e_JJse@8XjW7{bRKi7FAk(%qI8CK zyN9c}ek4922RsC%+ks2(H)oidED+iHF`A2j4HOXOabgu3+@mm-XZ|f>3~e^rH5M{@ zrxGH#3BVPFH8}pxt^&#u$+VEXlCm-&#ITe&uN+J48f7GQMb&Hv?`IY8ATFJDxX+oipRqGTK7@D0XH(u8>1Y-$AEy%R#m z+aBoR#Z9h+pT--(jA9HK8@Y-%n1^8KcJqT<1!#R*cW~#0$WN7dC9_3G16Az^fuRT# z?dqjRbG&Ittm4Xm2Qt*K5PQ+p)FX||L?%^fb}`L_xTx5(P~`y6h)n|m|#A>)MgOVf^=4a$Ss$g{vL8}F_!vwl4di>gJRsr(z_oCloiYYh9#aiO60qzx$7!3d_ZP; zdO8_8CzDfSuXLLLgJROlp|;t5pJ{Q3*x;zj+V}gw`LjKn9;FR(cVHXF*{`%qHRCK; zYXpC!Bl(G{SO0QYy=PYkum4<^?&}&d9XCRl5_j_&UvYM%h0sLB8}a7G2}eA^m)-69 zhUR)y{%$p#7~Zu~0?yHMfG!F4M#zR*lb|jyl;iLfDQ-w5CZCo(cS#Vrgw?K++>~?6 z-N?eriVJx;Pas9WBw2DR#ADeJp6)M1 zSbECWd*ZOheGW7WQ4LE7oXBn`fjsmoC1ptY*LDTBXZJn7y;9cZP+H^Q>=2hodlQGA zxg3$ly5E(d^NFFdL5k~rqAyQn&XYTKwNvhVR%;)=%Q1DTcm8&09eJ8?R13;Ej8!4u zPYn3`>`+sZECG}4pftLbClGf)4a+%1aLKFhDpQw z_}2`a0+~ACOP?6V+SbLP635!V;!b0tDK`-H(KF{64wne({iP9bofT8l-_<@zYchl+ zSyk207IW$7ryWtU(L6gL*YHy_X{UsPs$OMfUT(9VO=H?Du1vm)aKm0^)$S2xLiuU(mxkVSg?GVBEZ{TP->wzu>f@r0y z)-$h7gq|sf;v<%jBnW|y<{Z8i`28wzGJVEJsn$O`S578l_U7xZMhY(1RqlHkwWOL2 zST*e&m#f7Wf#DYUy6V)Yg(zyMr6iRqBn`XLiX6(v)||jT5bnmkN&^&q4IkA-5lMkR zEgqx8juN!JJeUU1k6>>9(0Ao>&?N&_mkSGEW@*uh9A~hs{=%Bw!$~VZ;@degjzDle z%@(o7yfs~o;~c{T-tgcPnK$wl`5@~kfMI%uwrUd)D^O05Thlh~OX{(kVe{bnao9@! zqmzN7neRO^%>dV4X&gXqI5>lE|Me&ZaZCtL$(1Zl;L%)1MfS7$05OoQgarO)sCS8f zUafc4kG?~u>fsqBhTbN+cwhN-M5kh~8j6f+ft%Cg~4@U-|?ZJ=}4 zds(lZPmD9P%s?Vj6A`;;9v~IqWTi&VYt=wE<6*DBk-7sBDO3?=1ds)QT`Z*;SIsy? z9GNfT%vKyuIyf{^9~6Y9;av|ljY%}t+gt&zlI=ER+j`wGmUc2eDOoT`srrM1we?qHp7s3Edz8%f&{QNCll9^G1WStSo~EZ zhRi)0Qa{e1Abc?AK@}h-{%ju~81D(r%n1?kGfBUBy~C{7r6+EW&7vJAHNwTmfn@l zR3K#OF|~$|giH=`k-1>dT?*g zQLo^QHo{R+d}8Wrdz7P%7>t6_l;v56>^IRFNbhD(OK2Vf&ZX=jWuMG$<~37GwYuI^ zd!)xqzA~<^mMyrO_?LQ2AaYwRgP9@nr zT_{c3EcE7PHiyTk98S@dyZ)(%d?8FSCHwm~V1(x{&zYAL&2#!bN*%4xN%EOI1Zsj9 zohhJ))*Vd`OlI))o5weKY2N`?O~A->B$E0mV<%FSk5Om6&|;6f0XJvqAu?-6Xs6L& zw|y7F2Wum1N%-()^3gl!#-0Xp_B3u6y%Es-Atw#JiWVsY6K*?(mWV?siSIV4Hj zC{biFwqt@uIGEM4)CD^fjz={5W}{D!BKuOaCZ1PG3r{c5@ITPuByyS-St05qJo0}d zl$ph!PEW#QaLxQOI(dJ^kQ6$%rZBXY%r&6N^PEJy7{&qx(EwK^sF^kBB;Q1K$N5LV8hw(lU0~dlH22Ti& z$d8eW!W?7YNR(sWNRmcGyCy#*ghR;)#=IgikLeMNZFGo6H{ow=w|h&&AM<7p{A|?! a03jIsq5l9AA*g-~@9twCwR;f7xBuDQJ7seK literal 0 HcmV?d00001 diff --git a/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart new file mode 100644 index 00000000..6023d581 --- /dev/null +++ b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart @@ -0,0 +1,293 @@ +// ignore_for_file: avoid_redundant_argument_values, avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_anthropic/langchain_anthropic.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:test/test.dart'; + +void main() { + group('ChatAnthropic tests', () { + const defaultModel = 'claude-3-5-sonnet-20240620'; + + late ChatAnthropic chatModel; + + setUp(() async { + chatModel = ChatAnthropic( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + defaultOptions: const ChatAnthropicOptions( + model: defaultModel, + ), + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test Text-only input with different models', () async { + final models = [ + 'claude-3-5-sonnet-20240620', + 'claude-3-haiku-20240307', + 'claude-3-opus-20240229', + 'claude-3-sonnet-20240229', + ]; + for (final model in models) { + print('Testing model: $model'); + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + options: ChatAnthropicOptions( + model: model, + temperature: 0, + ), + ); + expect(res.id, isNotEmpty); + expect(res.finishReason, isNot(FinishReason.unspecified)); + expect(res.metadata['model'], contains(model.toLowerCase())); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + await Future.delayed(const Duration(seconds: 5)); + } + }); + + test('Text-and-image input', () async { + final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./test/chat_models/assets/apple.jpeg') + .readAsBytes(), + ), + ), + ]), + ), + ]), + ); + + expect(res.output.content.toLowerCase(), contains('apple')); + }); + + test('Test stop sequence', () async { + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + options: const ChatAnthropicOptions( + model: defaultModel, + stopSequences: ['4'], + ), + ); + final text = res.output.content; + expect(text, contains('123')); + expect(text, isNot(contains('456789'))); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test max tokens', () async { + final res = await chatModel.invoke( + PromptValue.string('Tell me a joke'), + options: const ChatAnthropicOptions( + model: defaultModel, + maxTokens: 10, + ), + ); + expect(res.output.content.length, lessThan(50)); + expect(res.finishReason, FinishReason.length); + }); + + test('Test Multi-turn conversations', () async { + final prompt = PromptValue.chat([ + ChatMessage.humanText( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + ChatMessage.ai('123456789'), + ChatMessage.humanText( + 'Remove the number 4 from the list', + ), + ]); + final res = await chatModel.invoke( + prompt, + options: const ChatAnthropicOptions( + model: defaultModel, + temperature: 0, + ), + ); + expect( + res.output.content, + contains('12356789'), + ); + }); + + test('Test streaming', () async { + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 100 in order ' + 'without any spaces, commas or additional explanations.', + ), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content; + count++; + } + expect(count, greaterThan(1)); + expect(content, contains('123456789')); + }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + final model = chatModel.bind( + const ChatAnthropicOptions( + model: defaultModel, + tools: [tool], + ), + ); + + final humanMessage = ChatMessage.humanText( + "What's the weather like in Boston and Madrid right now in celsius?", + ); + final res1 = await model.invoke(PromptValue.chat([humanMessage])); + + final aiMessage1 = res1.output; + expect(aiMessage1.toolCalls, hasLength(2)); + + final toolCall1 = aiMessage1.toolCalls.first; + expect(toolCall1.name, tool.name); + expect(toolCall1.arguments.containsKey('location'), isTrue); + expect(toolCall1.arguments['location'], contains('Boston')); + expect(toolCall1.arguments['unit'], 'celsius'); + + final toolCall2 = aiMessage1.toolCalls.last; + expect(toolCall2.name, tool.name); + expect(toolCall2.arguments.containsKey('location'), isTrue); + expect(toolCall2.arguments['location'], contains('Madrid')); + expect(toolCall2.arguments['unit'], 'celsius'); + + final functionResult1 = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage1 = ChatMessage.tool( + toolCallId: toolCall1.id, + content: json.encode(functionResult1), + ); + + final functionResult2 = { + 'temperature': '25', + 'unit': 'celsius', + 'description': 'Cloudy', + }; + final functionMessage2 = ChatMessage.tool( + toolCallId: toolCall2.id, + content: json.encode(functionResult2), + ); + + final res2 = await model.invoke( + PromptValue.chat([ + humanMessage, + aiMessage1, + functionMessage1, + functionMessage2, + ]), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.content, contains('25')); + }); + + test('Test streaming with tools', + timeout: const Timeout(Duration(minutes: 5)), () async { + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = chatModel.bind( + ChatAnthropicOptions( + model: defaultModel, + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final jsonOutputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(jsonOutputParser); + + final stream = chain.stream({'foo': 'bears'}); + + List lastResult = []; + int count = 0; + await for (final res in stream) { + print(res); + lastResult = res; + count++; + } + + expect(count, greaterThan(1)); + expect(lastResult, hasLength(1)); + final toolCall = lastResult.first; + expect(toolCall.arguments['setup'], isNotEmpty); + expect(toolCall.arguments['punchline'], isNotEmpty); + }); + }); +} diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index b0ec2aa9..fa0bc0fc 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -44,7 +44,7 @@ class ChatResult extends LanguageModelResult { final LanguageModelResult other, ) { return ChatResult( - id: other.id, + id: other.id.isNotEmpty ? other.id : id, output: output.concat(other.output), finishReason: finishReason != FinishReason.unspecified && other.finishReason == FinishReason.unspecified From b581d7c4766d283ab39478298fcbc92aff7bb7b7 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Tue, 2 Jul 2024 22:26:09 +0200 Subject: [PATCH 058/251] docs: Update README.md --- packages/langchain/README.md | 96 ++++++++++++++----- .../lib/src/chat_models/chat_ollama.dart | 2 +- 2 files changed, 71 insertions(+), 27 deletions(-) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 83608f5a..652ef1be 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -38,13 +38,13 @@ LangChain.dart aims to fill this gap by abstracting the intricacies of working w LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: -### [`langchain_core`](https://pub.dev/packages/langchain_core) +### [`langchain_core`](https://pub.dev/packages/langchain_core) [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. > Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. -### [`langchain`](https://pub.dev/packages/langchain) +### [`langchain`](https://pub.dev/packages/langchain) [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. @@ -52,7 +52,7 @@ Contains higher-level and use-case specific chains, agents, and retrieval algori > > This package exposes `langchain_core` so you don't need to depend on it explicitly. -### [`langchain_community`](https://pub.dev/packages/langchain_community) +### [`langchain_community`](https://pub.dev/packages/langchain_community) [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. @@ -64,40 +64,28 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack > Depend on an integration-specific package if you want to use the specific integration. -

    - -

    - | Package | Version | Description | |---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | -Functionality provided by each integration package: +

    + +

    -| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | -|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| -| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | -| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | +### API clients packages The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: +> Depend on an API client package if you just want to consume the API of a specific provider directly without using LangChain.dart abstractions. + | Package | Version | Description | |-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| | [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | @@ -109,6 +97,62 @@ The following packages are maintained (and used internally) by LangChain.dart, a | [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | | [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | +## Integrations + +The following integrations are available in LangChain.dart: + +### Chat Models + +| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | +|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | +| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | +| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | +| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | | [Ollama Chat API](https://ollama.ai) | +| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | +| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | + +### LLMs + +_Note: Prefer using Chat Models over LLMs as many providers have deprecated them._ + +| LLM | Package | Streaming | Description | +|-------------------------------------------------------------------------------------------------|---------------------------------------------------------------|-----------|--------------------------------------------------------------------------------------| +| [Ollama](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | [Ollama Completions API](https://ollama.ai) | +| [OpenAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions) | +| [VertexAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | [GCP Vertex AI Text API](https://cloud.google.com/vertex-ai) | + +### Embedding Models + +| Embedding model | Package | Description | +|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------| +| [GoogleGenerativeAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/google_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Google AI Embeddings API](https://ai.google.dev) | +| [MistralAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [Mistral Embeddings API](https://docs.mistral.ai) | +| [OllamaEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [Ollama Embeddings API](https://ollama.ai) | +| [OpenAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) | +| [VertexAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [GCP Vertex AI Embeddings API](https://cloud.google.com/vertex-ai) | + +### Vector Stores + +| Vector store | Package | Description | +|--------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| +| [Chroma](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/chroma) | [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [Chroma](https://trychroma.com/) integration | +| [MemoryVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/memory) | [langchain](https://pub.dev/packages/langchain) | In-memory vector store for prototype and testing | +| [ObjectBoxVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox) | [langchain_community](https://pub.dev/packages/langchain_community) | [ObjectBox](https://objectbox.io/) integration | +| [Pinecone](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/pinecone) | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [Pinecone](https://pinecone.io/) integration | +| [Supabase](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [Supabase Vector](https://supabase.com/vector) integration | +| [VertexAIMatchingEngine](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview) (former Matching Engine) integration | + +### Tools + +| Tool | Package | Description | +|-----------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +| [CalculatorTool](https://langchaindart.dev/#/modules/agents/tools/calculator) | [langchain_community](https://pub.dev/packages/langchain_community) | To calculate math expressions | +| [OpenAIDallETool](https://langchaindart.dev/#/modules/agents/tools/openai_dall_e) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI's DALL-E Image Generator](https://platform.openai.com/docs/api-reference/images) | +| TavilyAnswerTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns an answer for a query using the [Tavily](https://tavily.com) search engine | +| TavilySearchResultsTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns a list of results for a query using the [Tavily](https://tavily.com) search engine | + ## Getting started To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart index 2e8fe5f6..64a5cdae 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart @@ -9,7 +9,7 @@ import '../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; -/// Wrapper around [Ollama](https://ollama.ai) Completions API that enables +/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, From d8973d4d876a68d6189f6938570d6caeba330980 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Tue, 2 Jul 2024 22:35:17 +0200 Subject: [PATCH 059/251] chore(release): publish packages - langchain@0.7.3 - langchain_core@0.3.3 - langchain_community@0.2.2 - langchain_anthropic@0.1.0 - langchain_chroma@0.2.1 - langchain_firebase@0.2.0 - langchain_google@0.6.0 - langchain_mistralai@0.2.1 - langchain_ollama@0.2.2+1 - langchain_openai@0.6.3 - langchain_pinecone@0.1.0+6 - langchain_supabase@0.1.1 - anthropic_sdk_dart@0.1.0 - googleai_dart@0.1.0+2 - mistralai_dart@0.0.3+3 - ollama_dart@0.1.2 - openai_dart@0.3.3+1 - tavily_dart@0.1.0 --- CHANGELOG.md | 127 ++++++++++++++++++ examples/browser_summarizer/pubspec.lock | 12 +- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.lock | 26 ++-- examples/docs_examples/pubspec.yaml | 14 +- examples/hello_world_backend/pubspec.lock | 8 +- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.lock | 8 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.lock | 16 +-- examples/hello_world_flutter/pubspec.yaml | 8 +- examples/wikivoyage_eu/pubspec.lock | 12 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- packages/anthropic_sdk_dart/CHANGELOG.md | 7 + packages/anthropic_sdk_dart/pubspec.yaml | 2 +- packages/googleai_dart/CHANGELOG.md | 4 + packages/googleai_dart/pubspec.yaml | 2 +- packages/langchain/CHANGELOG.md | 11 +- packages/langchain/pubspec.yaml | 4 +- packages/langchain_anthropic/CHANGELOG.md | 4 + packages/langchain_anthropic/pubspec.yaml | 6 +- packages/langchain_chroma/CHANGELOG.md | 4 + packages/langchain_chroma/pubspec.yaml | 10 +- packages/langchain_community/CHANGELOG.md | 4 + packages/langchain_community/pubspec.yaml | 8 +- packages/langchain_core/CHANGELOG.md | 5 + packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 11 ++ .../langchain_firebase/example/pubspec.lock | 6 +- .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 9 ++ packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 4 + packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 4 + packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 4 + packages/langchain_openai/pubspec.yaml | 10 +- packages/langchain_pinecone/CHANGELOG.md | 4 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 4 + packages/langchain_supabase/pubspec.yaml | 10 +- packages/mistralai_dart/CHANGELOG.md | 4 + packages/mistralai_dart/pubspec.yaml | 2 +- packages/ollama_dart/CHANGELOG.md | 5 + packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 4 + packages/openai_dart/pubspec.yaml | 2 +- packages/tavily_dart/CHANGELOG.md | 4 + packages/tavily_dart/pubspec.yaml | 2 +- 52 files changed, 332 insertions(+), 113 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1699211..aead8730 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,133 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-07-02 + +### Changes + +--- + +New packages: + +- [`langchain_anthropic` - `v0.1.0`](#langchain_anthropic---v010) +- [`tavily_dart` - `v0.1.0`](#tavily_dart---v010) + +Packages with breaking changes: + +- [`langchain_firebase` - `v0.2.0`](#langchain_firebase---v020) +- [`langchain_google` - `v0.6.0`](#langchain_google---v060) + +Packages with other changes: + +- [`langchain` - `v0.7.3`](#langchain---v073) +- [`langchain_core` - `v0.3.3`](#langchain_core---v033) +- [`langchain_community` - `v0.2.2`](#langchain_community---v022) +- [`langchain_chroma` - `v0.2.1`](#langchain_chroma---v021) +- [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) +- [`langchain_ollama` - `v0.2.2+1`](#langchain_ollama---v0221) +- [`langchain_openai` - `v0.6.3`](#langchain_openai---v063) +- [`langchain_pinecone` - `v0.1.0+6`](#langchain_pinecone---v0106) +- [`langchain_supabase` - `v0.1.1`](#langchain_supabase---v011) +- [`anthropic_sdk_dart` - `v0.1.0`](#anthropic_sdk_dart---v010) +- [`googleai_dart` - `v0.1.0+2`](#googleai_dart---v0102) +- [`mistralai_dart` - `v0.0.3+3`](#mistralai_dart---v0033) +- [`ollama_dart` - `v0.1.2`](#ollama_dart---v012) +- [`openai_dart` - `v0.3.3+1`](#openai_dart---v0331) + +--- + +#### `langchain` - `v0.7.3` + +> Note: Anthropic integration (`ChatAnthropic`) is available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) +- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +#### `langchain_core` - `v0.3.3` + +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +#### `langchain_community` - `v0.2.2` + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) + +#### `langchain_anthropic` - `v0.1.0` + +- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +#### `langchain_firebase` - `v0.2.0` + +> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. + +- **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) +- **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) +- **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) +- **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) + +#### `langchain_google` - `v0.6.0` + +> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. + +- **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `langchain_openai` - `v0.6.3` + +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + +#### `langchain_ollama` - `v0.2.2+1` + +- **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +#### `langchain_chroma` - `v0.2.1` + +- Update a dependency to the latest release. + +#### `langchain_mistralai` - `v0.2.1` + +- Update a dependency to the latest release. + +#### `langchain_pinecone` - `v0.1.0+6` + +- Update a dependency to the latest release. + +#### `langchain_supabase` - `v0.1.1` + +- Update a dependency to the latest release. + +#### `anthropic_sdk_dart` - `v0.1.0` + +- **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) +- **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) +- **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `ollama_dart` - `v0.1.2` + +- **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `tavily_dart` - `v0.1.0` + +- **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) + +#### `googleai_dart` - `v0.1.0+2` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `mistralai_dart` - `v0.0.3+3` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `openai_dart` - `v0.3.3+1` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 2024-06-01 ### Changes diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index c32f085f..621fd8c2 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: @@ -497,7 +497,7 @@ packages: path: "../../packages/tavily_dart" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" term_glyph: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 0e729f8d..3868b314 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.2 - langchain_community: 0.2.1+1 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_community: 0.2.2 + langchain_openai: ^0.6.3 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 47e6b5b7..806b30f6 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -15,7 +15,7 @@ packages: path: "../../packages/anthropic_sdk_dart" relative: true source: path - version: "0.0.1" + version: "0.1.0" args: dependency: transitive description: @@ -245,42 +245,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.0+5" + version: "0.2.1" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.1" + version: "0.6.0" langchain_mistralai: dependency: "direct main" description: @@ -294,14 +294,14 @@ packages: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2" + version: "0.2.2+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -340,7 +340,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+2" + version: "0.0.3+3" objectbox: dependency: transitive description: @@ -355,14 +355,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.1" + version: "0.1.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: @@ -433,7 +433,7 @@ packages: path: "../../packages/tavily_dart" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" term_glyph: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 2f6b0f37..4888329b 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_anthropic: ^0.0.1-dev.1 - langchain_chroma: ^0.2.0+5 - langchain_community: 0.2.1+1 - langchain_google: ^0.5.1 + langchain: ^0.7.3 + langchain_anthropic: ^0.1.0 + langchain_chroma: ^0.2.1 + langchain_community: 0.2.2 + langchain_google: ^0.6.0 langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2 - langchain_openai: ^0.6.2 + langchain_ollama: ^0.2.2+1 + langchain_openai: ^0.6.3 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index dc3ac458..1acce35a 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 665ba178..c091ef7c 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_openai: ^0.6.3 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 8fc27717..45f69561 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 28872dc6..0e070b1d 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_openai: ^0.6.3 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 9bbfa0f2..9802cb30 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -204,21 +204,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.1" + version: "0.6.0" langchain_mistralai: dependency: "direct main" description: @@ -232,14 +232,14 @@ packages: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2" + version: "0.2.2+1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_tiktoken: dependency: transitive description: @@ -278,7 +278,7 @@ packages: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+2" + version: "0.0.3+3" nested: dependency: transitive description: @@ -293,14 +293,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.1" + version: "0.1.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3" + version: "0.3.3+1" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 4f9f4c56..786c1edd 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 - langchain: ^0.7.2 - langchain_google: ^0.5.1 + langchain: ^0.7.3 + langchain_google: ^0.6.0 langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2 - langchain_openai: ^0.6.2 + langchain_ollama: ^0.2.2+1 + langchain_openai: ^0.6.3 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 18f2890b..da33efe5 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.1+1" + version: "0.2.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2" + version: "0.2.2+1" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.1" + version: "0.1.2" path: dependency: transitive description: @@ -298,7 +298,7 @@ packages: path: "../../packages/tavily_dart" relative: true source: path - version: "0.0.1-dev.1" + version: "0.1.0" term_glyph: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 198686c0..a591713f 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.2 - langchain_ollama: ^0.2.2 - langchain_community: 0.2.1+1 + langchain: ^0.7.3 + langchain_ollama: ^0.2.2+1 + langchain_community: 0.2.2 diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index de958be3..85fb6080 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.1.0 + + - **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) + - **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) + - **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.0.1 - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 650ac782..160596dc 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: anthropic_sdk_dart description: Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). -version: 0.0.1 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 7a6ca6b8..e1d53bc8 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+2 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.1.0+1 - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 2006a059..22370975 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: googleai_dart description: Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -version: 0.1.0+1 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index cc6953da..219c9dc5 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,6 +1,13 @@ +## 0.7.3 + +> Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) +- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + ## 0.7.2 -> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package +> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package. - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) @@ -8,7 +15,7 @@ ## 0.7.1 -> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. +> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is now available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. - **DOCS**: Add docs for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) - **DOCS**: Update ChatOllama docs ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e)) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index a92d1e9c..e53a3859 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.2 +version: 0.7.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 6df81faa..fe3d0a4d 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0 + +- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + ## 0.0.1-dev.1 - Bootstrap project. diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 7a581267..700b6559 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.0.1-dev.1 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -16,10 +16,10 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - anthropic_sdk_dart: ^0.0.1 + anthropic_sdk_dart: ^0.1.0 collection: '>=1.17.0 <1.19.0' http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 rxdart: ^0.27.7 diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 8e785534..218a218c 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1 + + - Update a dependency to the latest release. + ## 0.2.0+5 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 3c96bacb..8d7684ba 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.0+5 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.2 - langchain_community: 0.2.1+1 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_community: 0.2.2 + langchain_openai: ^0.6.3 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 5c3aaba2..3e7be761 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.2 + + - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) + ## 0.2.1+1 - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index b386fb9d..4e2c6f7a 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.1+1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,16 +22,16 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 - tavily_dart: ^0.0.1-dev.1 + tavily_dart: ^0.1.0 uuid: ^4.3.3 dev_dependencies: build_runner: ^2.4.9 - langchain_openai: ^0.6.2 + langchain_openai: ^0.6.3 objectbox_generator: ^4.0.0 test: ^1.25.2 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index dd637cd5..7757bae9 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.3 + + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + ## 0.3.2 - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index b682b76a..38081129 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.2 +version: 0.3.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index d5128425..a0eb0aa4 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,14 @@ +## 0.2.0 + +> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. + + - **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) + - **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) + - **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) + - **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) + ## 0.1.0+3 - Update a dependency to the latest release. diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 4b481fb2..03e1dab8 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.2" + version: "0.7.3" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.1.0+3" + version: "0.2.0" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index ff8593ef..f1618ec8 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.6.22 - langchain: 0.7.2 - langchain_firebase: 0.1.0+3 + langchain: 0.7.3 + langchain_firebase: 0.2.0 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 62232007..2927c037 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.2" + version: "0.3.3" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index ec1265df..3b01a394 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0+3 +version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: firebase_core: ^3.1.0 cloud_firestore: ^4.17.0 firebase_vertexai: ^0.2.2 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index c2d95eed..9964d000 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,12 @@ +## 0.6.0 + +> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. + + - **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.5.1 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index a2d2670a..eeec07e2 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.5.1 +version: 0.6.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index d5d9ca46..7b74ab46 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.1 + + - Update a dependency to the latest release. + ## 0.2.1 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 2eda0275..b8025bfe 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - mistralai_dart: ^0.0.3+2 + mistralai_dart: ^0.0.3+3 dev_dependencies: test: ^1.25.2 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index a72f229e..4d475188 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.2.2+1 + + - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + ## 0.2.2 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index ae7adb8d..273a1594 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.2 +version: 0.2.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.1 + ollama_dart: ^0.1.2 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 4daab488..c1503886 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.6.3 + + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + ## 0.6.2 - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index c0ccb98d..4679404e 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.2 +version: 0.6.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.3 + openai_dart: ^0.3.3+1 uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.2 - langchain_community: 0.2.1+1 + langchain: ^0.7.3 + langchain_community: 0.2.2 test: ^1.25.2 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 6e3c39e3..9faacd04 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0+6 + + - Update a dependency to the latest release. + ## 0.1.0+5 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index b943bde0..282943fd 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+5 +version: 0.1.0+6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.2 + langchain_openai: ^0.6.3 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index 00a141c5..bb20d19b 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.1 + + - Update a dependency to the latest release. + ## 0.1.0+5 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index ffb0656d..80ab7e11 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.0+5 +version: 0.1.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.2 + langchain_core: ^0.3.3 meta: ^1.11.0 supabase: ^2.0.8 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.2 - langchain_community: 0.2.1+1 - langchain_openai: ^0.6.2 + langchain: ^0.7.3 + langchain_community: 0.2.2 + langchain_openai: ^0.6.3 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index d1426493..df84e9cc 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.0.3+3 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.0.3+2 - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index a2aad311..970d1403 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: mistralai_dart description: Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.0.3+2 +version: 0.0.3+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 21ceb1cf..5dfee162 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.1.2 + + - **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.1.1 - **FEAT**: Support buffered stream responses in ollama_dart ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 81f9fd49..954eb772 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.1 +version: 0.1.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 0a0e4085..70e28286 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.3.3+1 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + ## 0.3.3 - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index f617c8f0..4c449cc2 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.3.3 +version: 0.3.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md index 90f8e244..897f0558 100644 --- a/packages/tavily_dart/CHANGELOG.md +++ b/packages/tavily_dart/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.1.0 + + - **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) + ## 0.0.1-dev.1 - Bootstrap package. diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml index 216e0b0d..5694d98f 100644 --- a/packages/tavily_dart/pubspec.yaml +++ b/packages/tavily_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: tavily_dart description: Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). -version: 0.0.1-dev.1 +version: 0.1.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/tavily_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:tavily_dart homepage: https://github.com/davidmigloz/langchain_dart From ae5926cf99dc78c768f41db563b8dbd48ad3102f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 5 Jul 2024 17:20:07 +0200 Subject: [PATCH 060/251] docs: Update Ollama request options default values in API docs (#479) --- .../lib/src/chat_models/types.dart | 4 +- .../src/generated/schema/request_options.dart | 60 ++++-- .../src/generated/schema/schema.freezed.dart | 183 ++++++++++++------ packages/ollama_dart/oas/ollama-curated.yaml | 61 ++++-- 4 files changed, 200 insertions(+), 108 deletions(-) diff --git a/packages/langchain_ollama/lib/src/chat_models/types.dart b/packages/langchain_ollama/lib/src/chat_models/types.dart index 3f14d2a2..4db56920 100644 --- a/packages/langchain_ollama/lib/src/chat_models/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/types.dart @@ -141,7 +141,7 @@ class ChatOllamaOptions extends ChatModelOptions { final double? mirostatEta; /// Penalize newlines in the output. - /// (Default: false) + /// (Default: true) final bool? penalizeNewline; /// Sequences where the API will stop generating further tokens. The returned @@ -172,7 +172,7 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? lowVram; /// Enable f16 key/value. - /// (Default: false) + /// (Default: true) final bool? f16KV; /// Enable logits all. diff --git a/packages/ollama_dart/lib/src/generated/schema/request_options.dart b/packages/ollama_dart/lib/src/generated/schema/request_options.dart index a83df364..b6288d57 100644 --- a/packages/ollama_dart/lib/src/generated/schema/request_options.dart +++ b/packages/ollama_dart/lib/src/generated/schema/request_options.dart @@ -18,68 +18,84 @@ class RequestOptions with _$RequestOptions { /// Number of tokens to keep from the prompt. @JsonKey(name: 'num_keep', includeIfNull: false) int? numKeep, - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? seed, - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? temperature, - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? repeatPenalty, - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? frequencyPenalty, - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? mirostat, - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? mirostatTau, - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? mirostatEta, - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? penalizeNewline, - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? stop, /// Enable NUMA support. (Default: false) @JsonKey(includeIfNull: false) bool? numa, - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? numCtx, - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? numBatch, - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? numGpu, /// The GPU to use for the main model. Default is 0. @@ -88,7 +104,7 @@ class RequestOptions with _$RequestOptions { /// Enable low VRAM mode. (Default: false) @JsonKey(name: 'low_vram', includeIfNull: false) bool? lowVram, - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? f16Kv, /// Enable logits all. (Default: false) @@ -103,7 +119,9 @@ class RequestOptions with _$RequestOptions { /// Enable mlock. (Default: false) @JsonKey(name: 'use_mlock', includeIfNull: false) bool? useMlock, - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? numThread, }) = _RequestOptions; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 88e82b13..db00fd66 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -567,67 +567,82 @@ mixin _$RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) int? get numKeep => throw _privateConstructorUsedError; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict => throw _privateConstructorUsedError; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK => throw _privateConstructorUsedError; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ => throw _privateConstructorUsedError; - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP => throw _privateConstructorUsedError; - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN => throw _privateConstructorUsedError; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty => throw _privateConstructorUsedError; - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat => throw _privateConstructorUsedError; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau => throw _privateConstructorUsedError; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta => throw _privateConstructorUsedError; - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline => throw _privateConstructorUsedError; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? get stop => throw _privateConstructorUsedError; @@ -635,15 +650,16 @@ mixin _$RequestOptions { @JsonKey(includeIfNull: false) bool? get numa => throw _privateConstructorUsedError; - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx => throw _privateConstructorUsedError; - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch => throw _privateConstructorUsedError; - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu => throw _privateConstructorUsedError; @@ -655,7 +671,7 @@ mixin _$RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) bool? get lowVram => throw _privateConstructorUsedError; - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv => throw _privateConstructorUsedError; @@ -675,7 +691,9 @@ mixin _$RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) bool? get useMlock => throw _privateConstructorUsedError; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread => throw _privateConstructorUsedError; @@ -1144,85 +1162,101 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) final int? numKeep; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @override @JsonKey(includeIfNull: false) final int? seed; - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @override @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @override @JsonKey(name: 'top_k', includeIfNull: false) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @override @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ; - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @override @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP; - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @override @JsonKey(name: 'repeat_last_n', includeIfNull: false) final int? repeatLastN; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @override @JsonKey(includeIfNull: false) final double? temperature; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @override @JsonKey(name: 'repeat_penalty', includeIfNull: false) final double? repeatPenalty; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @override @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) final double? frequencyPenalty; - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @override @JsonKey(includeIfNull: false) final int? mirostat; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @override @JsonKey(name: 'mirostat_tau', includeIfNull: false) final double? mirostatTau; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @override @JsonKey(name: 'mirostat_eta', includeIfNull: false) final double? mirostatEta; - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @override @JsonKey(name: 'penalize_newline', includeIfNull: false) final bool? penalizeNewline; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. final List? _stop; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @override @JsonKey(includeIfNull: false) List? get stop { @@ -1238,17 +1272,18 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(includeIfNull: false) final bool? numa; - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @override @JsonKey(name: 'num_ctx', includeIfNull: false) final int? numCtx; - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @override @JsonKey(name: 'num_batch', includeIfNull: false) final int? numBatch; - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @override @JsonKey(name: 'num_gpu', includeIfNull: false) final int? numGpu; @@ -1263,7 +1298,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) final bool? lowVram; - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @override @JsonKey(name: 'f16_kv', includeIfNull: false) final bool? f16Kv; @@ -1288,7 +1323,9 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) final bool? useMlock; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @override @JsonKey(name: 'num_thread', includeIfNull: false) final int? numThread; @@ -1451,82 +1488,97 @@ abstract class _RequestOptions extends RequestOptions { int? get numKeep; @override - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed; @override - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict; @override - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK; @override - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; @override - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ; @override - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP; @override - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN; @override - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature; @override - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty; @override - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; @override - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; @override - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat; @override - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau; @override - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta; @override - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline; @override - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? get stop; @override @@ -1536,17 +1588,18 @@ abstract class _RequestOptions extends RequestOptions { bool? get numa; @override - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx; @override - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch; @override - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu; @override @@ -1561,7 +1614,7 @@ abstract class _RequestOptions extends RequestOptions { bool? get lowVram; @override - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv; @override @@ -1586,7 +1639,9 @@ abstract class _RequestOptions extends RequestOptions { bool? get useMlock; @override - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread; @override diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 7ade34a7..12159978 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -326,90 +326,106 @@ components: type: integer nullable: true description: | - Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + Sets the random number seed to use for generation. Setting this to a specific number will make the model + generate the same text for the same prompt. (Default: 0) num_predict: type: integer nullable: true description: | - Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + Maximum number of tokens to predict when generating text. + (Default: 128, -1 = infinite generation, -2 = fill context) top_k: type: integer nullable: true description: | - Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: type: number format: float nullable: true description: | - Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) tfs_z: type: number format: float nullable: true description: | - Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) typical_p: type: number format: float nullable: true description: | - Typical p is used to reduce the impact of less probable tokens from the output. + Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) repeat_last_n: type: integer nullable: true description: | - Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + Sets how far back for the model to look back to prevent repetition. + (Default: 64, 0 = disabled, -1 = num_ctx) temperature: type: number format: float nullable: true description: | - The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + The temperature of the model. Increasing the temperature will make the model answer more creatively. + (Default: 0.8) repeat_penalty: type: number format: float nullable: true description: | - Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) presence_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + Positive values penalize new tokens based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. (Default: 0) frequency_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. (Default: 0) mirostat: type: integer nullable: true description: | - Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + Enable Mirostat sampling for controlling perplexity. + (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) mirostat_tau: type: number format: float nullable: true description: | - Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + Controls the balance between coherence and diversity of the output. A lower value will result in more + focused and coherent text. (Default: 5.0) mirostat_eta: type: number format: float nullable: true description: | - Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + (Default: 0.1) penalize_newline: type: boolean nullable: true description: | - Penalize newlines in the output. (Default: false) + Penalize newlines in the output. (Default: true) stop: type: array nullable: true - description: Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + description: | + Sequences where the API will stop generating further tokens. The returned text will not contain the stop + sequence. items: type: string numa: @@ -421,17 +437,18 @@ components: type: integer nullable: true description: | - Sets the size of the context window used to generate the next token. + Sets the size of the context window used to generate the next token. (Default: 2048) num_batch: type: integer nullable: true description: | - Sets the number of batches to use for generation. (Default: 1) + Sets the number of batches to use for generation. (Default: 512) num_gpu: type: integer nullable: true description: | - The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + The number of layers to send to the GPU(s). + On macOS it defaults to 1 to enable metal support, 0 to disable. main_gpu: type: integer nullable: true @@ -446,7 +463,7 @@ components: type: boolean nullable: true description: | - Enable f16 key/value. (Default: false) + Enable f16 key/value. (Default: true) logits_all: type: boolean nullable: true @@ -471,7 +488,9 @@ components: type: integer nullable: true description: | - Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + performance. It is recommended to set this value to the number of physical CPU cores your system has + (as opposed to the logical number of cores). ResponseFormat: type: string description: | From 85f610f19c4c3ca44525aa257aa29d247c229d3a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 10 Jul 2024 08:08:56 +0200 Subject: [PATCH 061/251] refactor: Depend on exact versions for internal 1st party dependencies (#484) --- packages/langchain/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 2 +- packages/langchain_chroma/pubspec.yaml | 2 +- packages/langchain_community/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_google/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 2 +- packages/langchain_ollama/pubspec.yaml | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- packages/langchain_pinecone/pubspec.yaml | 2 +- packages/langchain_supabase/pubspec.yaml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index e53a3859..1dfd6338 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.19.0" crypto: ^3.0.3 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 700b6559..9444cd96 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: '>=1.17.0 <1.19.0' http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 rxdart: ^0.27.7 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 8d7684ba..395c1ca6 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 4e2c6f7a..55b71a63 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 3b01a394..8f83a8c4 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -24,7 +24,7 @@ dependencies: firebase_core: ^3.1.0 cloud_firestore: ^4.17.0 firebase_vertexai: ^0.2.2 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index eeec07e2..24e330ac 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index b8025bfe..2dfd84a2 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 273a1594..13b206cb 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 ollama_dart: ^0.1.2 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 4679404e..7df76216 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.19.0" http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 openai_dart: ^0.3.3+1 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 282943fd..987d270b 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -18,7 +18,7 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 80ab7e11..8650f10c 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -18,7 +18,7 @@ environment: dependencies: http: ^1.1.0 - langchain_core: ^0.3.3 + langchain_core: 0.3.3 meta: ^1.11.0 supabase: ^2.0.8 From cb382c5c29ef23b90c5482e8a99f2e0b30a54b68 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 10 Jul 2024 08:13:21 +0200 Subject: [PATCH 062/251] build: Expand collection package version constraints to 1.20.0 (#485) --- melos.yaml | 2 +- packages/langchain/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 2 +- packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- packages/langchain_google/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 2 +- packages/langchain_ollama/pubspec.yaml | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- packages/vertex_ai/pubspec.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/melos.yaml b/melos.yaml index 56d0555c..164a7618 100644 --- a/melos.yaml +++ b/melos.yaml @@ -26,7 +26,7 @@ command: async: ^2.11.0 beautiful_soup_dart: ^0.3.0 characters: ^1.3.0 - collection: '>=1.17.0 <1.19.0' + collection: '>=1.17.0 <1.20.0' cross_file: ^0.3.4+1 crypto: ^3.0.3 csv: ^6.0.0 diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 1dfd6338..9217303a 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -17,7 +17,7 @@ environment: dependencies: characters: ^1.3.0 - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" crypto: ^3.0.3 langchain_core: 0.3.3 meta: ^1.11.0 diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 9444cd96..33e625f7 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -17,7 +17,7 @@ environment: dependencies: anthropic_sdk_dart: ^0.1.0 - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 38081129..b1f7f159 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -17,7 +17,7 @@ environment: dependencies: async: ^2.11.0 - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.20.0" cross_file: ^0.3.4+1 crypto: ^3.0.3 meta: ^1.11.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 8f83a8c4..34805499 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -18,7 +18,7 @@ environment: flutter: ">=3.19.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" firebase_app_check: ^0.3.0 firebase_auth: ^5.1.0 firebase_core: ^3.1.0 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 24e330ac..da0082a6 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" fetch_client: ^1.0.2 gcloud: ^0.8.12 google_generative_ai: 0.4.3 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 2dfd84a2..3c725d01 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 13b206cb..0214a6c7 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 7df76216..4b628c54 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.19.0" + collection: ">=1.17.0 <1.20.0" http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 3454b32d..ccfa07c8 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -17,7 +17,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' + collection: ">=1.17.0 <1.20.0" googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 From 69b66403bd5b663052e5bd61c180135c6a133718 Mon Sep 17 00:00:00 2001 From: Ganesh Date: Fri, 12 Jul 2024 03:08:26 +0530 Subject: [PATCH 063/251] feat: Add support for ChatOllamaTools (#288) Co-authored-by: David Miguel --- docs/_sidebar.md | 1 + .../chat_models/integrations/ollama_tools.md | 273 ++++++++++++++++ .../integrations/ollama_tools.dart | 226 ++++++++++++++ .../langchain_core/lib/src/tools/base.dart | 18 ++ .../lib/src/chat_models/chat_models.dart | 6 +- .../{ => chat_ollama}/chat_ollama.dart | 8 +- .../{ => chat_ollama}/mappers.dart | 0 .../chat_models/{ => chat_ollama}/types.dart | 3 +- .../chat_ollama_tools/chat_ollama_tools.dart | 294 ++++++++++++++++++ .../chat_ollama_tools/mappers.dart | 1 + .../chat_models/chat_ollama_tools/types.dart | 70 +++++ .../chat_models/chat_ollama_tools_test.dart | 207 ++++++++++++ 12 files changed, 1101 insertions(+), 6 deletions(-) create mode 100644 docs/modules/model_io/models/chat_models/integrations/ollama_tools.md create mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart rename packages/langchain_ollama/lib/src/chat_models/{ => chat_ollama}/chat_ollama.dart (97%) rename packages/langchain_ollama/lib/src/chat_models/{ => chat_ollama}/mappers.dart (100%) rename packages/langchain_ollama/lib/src/chat_models/{ => chat_ollama}/types.dart (99%) create mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart create mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart create mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart create mode 100644 packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 2637ce9b..5296fd96 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -62,6 +62,7 @@ - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) - [Google AI](/modules/model_io/models/chat_models/integrations/googleai.md) - [Ollama](/modules/model_io/models/chat_models/integrations/ollama.md) + - [OllamaTools](/modules/model_io/models/chat_models/integrations/ollama_tools.md) - [Mistral AI](/modules/model_io/models/chat_models/integrations/mistralai.md) - [OpenRouter](/modules/model_io/models/chat_models/integrations/open_router.md) - [Together AI](/modules/model_io/models/chat_models/integrations/together_ai.md) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md b/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md new file mode 100644 index 00000000..17334a5b --- /dev/null +++ b/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md @@ -0,0 +1,273 @@ +# ChatOllamaTools + +LangChain.dart offers an experimental wrapper around open source models run locally via [Ollama](https://ollama.ai) that enables [tool calling capabilities](/modules/model_io/models/chat_models/how_to/tools.md). + +> Warning: This is an experimental wrapper that attempts to add tool calling support to models that do not support it natively. Use with caution. + +More powerful and capable models will perform better with complex schema and/or multiple tools. For a complete list of supported models, see the [Ollama model library](https://ollama.ai/library). The examples below use Google's [Gemma2 9B model](https://ollama.com/library/gemma2) running locally. + +## Setup + +Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: + +1. Download and install [Ollama](https://ollama.ai) +2. Fetch a model via `ollama pull ` + * e.g., for Llama 3: `ollama pull gemma2` + +## Usage + +You can use `ChatOllamaTools` in a similar way to a regular `ChatOllama` wrapper. The main difference is that `ChatOllamaToolsOptions` accepts: +- `options`: the usual `ChatOllamaOptions` options +- `tools`: the list with the definition of the tools the model can call +- `toolChoice`: how the model should respond to tool calls +- `toolsSystemPromptTemplate`: the prompt template used to inform the user about the available tools + +`ChatOllamaTools` follows the standard [LangChain tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); +final model = ChatOllamaTools( + defaultOptions: ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string("What's the weather in Barcelona?"), +); +print(res); +// ChatResult{ +// id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, +// output: AIChatMessage{ +// content: , +// toolCalls: [ +// AIChatMessageToolCall{ +// id: 42139039-9251-4e1b-9f47-21f24da65be9, +// name: get_current_weather, +// arguments: {location: Barcelona, ES, unit: celsius}, +// } +// ], +// }, +// finishReason: FinishReason.stop, +// metadata: { +// model: gemma2, +// created_at: 2024-07-11T15:44:56.893216Z, +// done: true, +// total_duration: 2900101792, +// load_duration: 41286000, +// prompt_eval_count: 327, +// prompt_eval_duration: 453557000, +// eval_count: 57, +// eval_duration: 2401129000 +// }, +// usage: LanguageModelUsage{ +// promptTokens: 327, +// promptBillableCharacters: null, +// responseTokens: 57, +// responseBillableCharacters: null, +// totalTokens: 384 +// } +// } +``` + +If you want to extract only the tool calls, you can use the `ToolCallOutputParser`: + +```dart +final chain = model.pipe(ToolsOutputParser()); +final res2 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), +); +print(res2); +// [ +// ParsedToolCall{ +// id: b62a9051-0193-4115-9bac-362005c40c2d, +// name: get_current_weather, +// arguments: {location: Barcelona, ES, unit: celsius}, +// }, +// ParsedToolCall{ +// id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, +// name: get_current_weather, +// arguments: {location: Amsterdam, NL, unit: celsius}, +// } +// ] +``` + +As you can see, `ChatOllamaTools` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final res3 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), + options: ChatOllamaToolsOptions( + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +Note: streaming is not supported at the moment. + +## Customizing the system prompt template + +Behind the scenes, `ChatOllamaTools` uses Ollama's JSON mode to restrict output to JSON, and passes tool schemas to the prompt as JSON schemas. + +You can find the default system prompt in `ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate`. + +Because different models have different strengths, it may be helpful to pass in your own system prompt. Here's an example of how you can customize the system prompt template: + +```dart +const toolSystemPromptTemplate = ''' +You have access to these tools: +{tools} + +Based on the user input, select {tool_choice} from the available tools. + +Respond with a JSON containing a list of tool call objects. +The tool call objects should have two properties: +- "tool_name": The name of the selected tool (string) +- "tool_input": A JSON string with the input for the tool matching the tool's input schema + +Example response format: +```json +{{ + "tool_calls": [ + {{ + "tool_name": "tool_name", + "tool_input": "{{"param1":"value1","param2":"value2"}}" + }} + ] +}} + +Ensure your response is valid JSON and follows this exact format.'''; + +final model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + toolsSystemPromptTemplate: toolSystemPromptTemplate, + ), +); +``` + +You prompt template should contain the following placeholders: +- `{tools}`: where the list of available tools will be inserted +- `{tool_choice}`: where the instruction to select a certain tool will be inserted + +The model should return a JSON like: +```json +{ + "tool_calls": [ + { + "tool_name": "tool_name", + "tool_input": "{\"param1\":\"value1\",\"param2\":\"value2\"}" + } + ] +} +``` + +## Example: extracting structured data + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllamaTools( + defaultOptions: ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart new file mode 100644 index 00000000..486b8c1b --- /dev/null +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart @@ -0,0 +1,226 @@ +// ignore_for_file: avoid_print, avoid_redundant_argument_values +import 'package:langchain/langchain.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main(final List arguments) async { + await _tools(); + await _customizingSystemPrompt(); + await _extraction(); +} + +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); + +Future _tools() async { + final model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + ), + ); + + final res = await model.invoke( + PromptValue.string("What's the weather in Barcelona?"), + ); + print(res); + // ChatResult{ + // id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, + // output: AIChatMessage{ + // content: , + // toolCalls: [ + // AIChatMessageToolCall{ + // id: 42139039-9251-4e1b-9f47-21f24da65be9, + // name: get_current_weather, + // arguments: {location: Barcelona, ES, unit: celsius}, + // } + // ], + // }, + // finishReason: FinishReason.stop, + // metadata: { + // model: gemma2, + // created_at: 2024-07-11T15:44:56.893216Z, + // done: true, + // total_duration: 2900101792, + // load_duration: 41286000, + // prompt_eval_count: 327, + // prompt_eval_duration: 453557000, + // eval_count: 57, + // eval_duration: 2401129000 + // }, + // usage: LanguageModelUsage{ + // promptTokens: 327, + // promptBillableCharacters: null, + // responseTokens: 57, + // responseBillableCharacters: null, + // totalTokens: 384 + // } + // } + + final chain = model.pipe(ToolsOutputParser()); + final res2 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), + ); + print(res2); + // [ + // ParsedToolCall{ + // id: b62a9051-0193-4115-9bac-362005c40c2d, + // name: get_current_weather, + // arguments: {location: Barcelona, ES, unit: celsius}, + // }, + // ParsedToolCall{ + // id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, + // name: get_current_weather, + // arguments: {location: Amsterdam, NL, unit: celsius}, + // } + // ] + + final res3 = await chain.invoke( + PromptValue.string("What's the weather in Barcelona and Amsterdam?"), + options: ChatOllamaToolsOptions( + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), + ); + print(res3); +} + +Future _customizingSystemPrompt() async { + const toolSystemPromptTemplate = ''' +You have access to these tools: +{tools} + +Based on the user input, select {tool_choice} from the available tools. + +Respond with a JSON containing a list of tool call objects. +The tool call objects should have two properties: +- "tool_name": The name of the selected tool (string) +- "tool_input": A JSON string with the input for the tool matching the tool's input schema + +Example response format: +```json +{{ + "tool_calls": [ + {{ + "tool_name": "tool_name", + "tool_input": "{{"param1":"value1","param2":"value2"}}" + }} + ] +}} +``` + +Ensure your response is valid JSON and follows this exact format.'''; + + final model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: [tool], + toolsSystemPromptTemplate: toolSystemPromptTemplate, + ), + ); + + final res = await model.invoke( + PromptValue.string("What's the weather in Barcelona?"), + ); + print(res); +} + +Future _extraction() async { + const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, + ); + + final model = ChatOllamaTools( + defaultOptions: ChatOllamaToolsOptions( + options: const ChatOllamaOptions( + model: 'gemma2', + temperature: 0, + ), + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + + final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + + final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', + ); + final extractedData = res.first.arguments; + print(extractedData); + // { + // people: [ + // { + // name: Alex, + // height: 152, + // hair_color: blonde + // }, + // { + // name: Claudia, + // height: 183, + // hair_color: orange + // } + // ] + // } +} diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index 37f9f9d2..f6e8dd29 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -73,6 +73,15 @@ ToolSpec{ } '''; } + + /// Converts the tool spec to a JSON-serializable map. + Map toJson() { + return { + 'name': name, + 'description': description, + 'inputJsonSchema': inputJsonSchema, + }; + } } /// {@template tool} @@ -214,6 +223,15 @@ abstract base class Tool name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + + @override + Map toJson() { + return { + 'name': name, + 'description': description, + 'inputJsonSchema': inputJsonSchema, + }; + } } /// {@template tool_func} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart index 731f4e59..4b826ef4 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart @@ -1,2 +1,4 @@ -export 'chat_ollama.dart'; -export 'types.dart'; +export 'chat_ollama/chat_ollama.dart'; +export 'chat_ollama/types.dart'; +export 'chat_ollama_tools/chat_ollama_tools.dart'; +export 'chat_ollama_tools/types.dart' hide conversationalResponseTool; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart similarity index 97% rename from packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 64a5cdae..7dbed939 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -5,7 +5,7 @@ import 'package:langchain_tiktoken/langchain_tiktoken.dart'; import 'package:ollama_dart/ollama_dart.dart'; import 'package:uuid/uuid.dart'; -import '../llms/mappers.dart'; +import '../../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; @@ -31,6 +31,8 @@ import 'types.dart'; /// /// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion) /// +/// If you need to use tools, consider using the [ChatOllamaTools] instead. +/// /// ### Setup /// /// 1. Download and install [Ollama](https://ollama.ai) @@ -218,8 +220,8 @@ class ChatOllama extends BaseChatModel { return GenerateChatCompletionRequest( model: options?.model ?? defaultOptions.model ?? throwNullModelError(), messages: messages.toMessages(), - format: options?.format?.toResponseFormat(), - keepAlive: options?.keepAlive, + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, stream: stream, options: RequestOptions( numKeep: options?.numKeep ?? defaultOptions.numKeep, diff --git a/packages/langchain_ollama/lib/src/chat_models/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart similarity index 100% rename from packages/langchain_ollama/lib/src/chat_models/mappers.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart diff --git a/packages/langchain_ollama/lib/src/chat_models/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart similarity index 99% rename from packages/langchain_ollama/lib/src/chat_models/types.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 4db56920..67598acb 100644 --- a/packages/langchain_ollama/lib/src/chat_models/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,6 +1,7 @@ import 'package:langchain_core/chat_models.dart'; -import '../llms/types.dart'; +import '../../../langchain_ollama.dart'; +import '../../llms/types.dart'; /// {@template chat_ollama_options} /// Options to pass into ChatOllama. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart new file mode 100644 index 00000000..889e7c87 --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart @@ -0,0 +1,294 @@ +import 'dart:convert'; + +import 'package:collection/collection.dart' show IterableExtension; +import 'package:http/http.dart' as http; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_tiktoken/langchain_tiktoken.dart'; +import 'package:ollama_dart/ollama_dart.dart'; +import 'package:uuid/uuid.dart'; + +import 'mappers.dart'; +import 'types.dart'; + +/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables tool +/// calling capabilities. +/// +/// Warning: This is an experimental wrapper that attempts to add tool calling +/// support to models that do not support it natively. More powerful and +/// capable models will perform better with complex schema and/or multiple +/// tools. Use with caution. +/// +/// Example: +/// ```dart +/// const tool = ToolSpec( +/// name: 'get_current_weather', +/// description: 'Get the current weather in a given location', +/// inputJsonSchema: { +/// 'type': 'object', +/// 'properties': { +/// 'location': { +/// 'type': 'string', +/// 'description': 'The city and state, e.g. San Francisco, CA', +/// }, +/// 'unit': { +/// 'type': 'string', +/// 'enum': ['celsius', 'fahrenheit'], +/// }, +/// }, +/// 'required': ['location'], +/// }, +/// ); +/// final chatModel = ChatOllamaTools( +/// defaultOptions: ChatOllamaToolOptions( +/// options: ChatOllamaOptions(model: 'llama3:8b'), +/// tools: [tool], +/// ), +/// ); +/// final prompt = PromptValue.string('What's the weather in Bangalore, India?'); +/// final res = await ollamaTools.invoke(prompt); +/// ``` +/// +/// If you don't need to use tools, use [ChatOllama] instead. +/// +/// ### Setup +/// +/// 1. Download and install [Ollama](https://ollama.ai) +/// 2. Fetch a model via `ollama pull ` +/// * e.g., for Llama 3: `ollama pull llama3` +/// +/// ### Ollama base URL +/// +/// By default, [ChatOllama] uses 'http://localhost:11434/api' as base URL +/// (default Ollama API URL). But if you are running Ollama on a different +/// one, you can override it using the [baseUrl] parameter. +class ChatOllamaTools extends BaseChatModel { + /// Create a new [ChatOllamaTools] instance. + /// + /// Main configuration options: + /// - `baseUrl`: the base URL of Ollama API. + /// - [ChatOllamaTools.defaultOptions] + /// + /// Advance configuration options: + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + /// - [ChatOllama.encoding] + ChatOllamaTools({ + final String baseUrl = 'http://localhost:11434/api', + final Map? headers, + final Map? queryParams, + final http.Client? client, + super.defaultOptions = const ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: 'llama3'), + ), + this.encoding = 'cl100k_base', + }) : _client = OllamaClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ); + + /// A client for interacting with Ollama API. + final OllamaClient _client; + + /// The encoding to use by tiktoken when [tokenize] is called. + /// + /// Ollama does not provide any API to count tokens, so we use tiktoken + /// to get an estimation of the number of tokens in a prompt. + String encoding; + + /// A UUID generator. + late final Uuid _uuid = const Uuid(); + + @override + String get modelType => 'chat-ollama-tools'; + + @override + Future invoke( + PromptValue input, { + ChatOllamaToolsOptions? options, + }) async { + final id = _uuid.v4(); + final completion = await _client.generateChatCompletion( + request: _generateCompletionRequest(input, options), + ); + final result = completion.toChatResult(id); + return _parseResult(result); + } + + /// Creates a [GenerateChatCompletionRequest] from the given input. + GenerateChatCompletionRequest _generateCompletionRequest( + final PromptValue input, + final ChatOllamaToolsOptions? toolsOptions, { + final bool stream = false, + }) { + final messages = _formatPrompt(input, toolsOptions).toChatMessages(); + final options = toolsOptions?.options; + final defaultOptions = + this.defaultOptions.options ?? const ChatOllamaOptions(); + return GenerateChatCompletionRequest( + model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + messages: messages.toMessages(), + format: ResponseFormat.json, + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, + stream: stream, + options: RequestOptions( + numKeep: options?.numKeep ?? defaultOptions.numKeep, + seed: options?.seed ?? defaultOptions.seed, + numPredict: options?.numPredict ?? defaultOptions.numPredict, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, + typicalP: options?.typicalP ?? defaultOptions.typicalP, + repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, + temperature: options?.temperature ?? defaultOptions.temperature, + repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + mirostat: options?.mirostat ?? defaultOptions.mirostat, + mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, + mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, + penalizeNewline: + options?.penalizeNewline ?? defaultOptions.penalizeNewline, + stop: options?.stop ?? defaultOptions.stop, + numa: options?.numa ?? defaultOptions.numa, + numCtx: options?.numCtx ?? defaultOptions.numCtx, + numBatch: options?.numBatch ?? defaultOptions.numBatch, + numGpu: options?.numGpu ?? defaultOptions.numGpu, + mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, + lowVram: options?.lowVram ?? defaultOptions.lowVram, + f16Kv: options?.f16KV ?? defaultOptions.f16KV, + logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, + vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, + useMmap: options?.useMmap ?? defaultOptions.useMmap, + useMlock: options?.useMlock ?? defaultOptions.useMlock, + numThread: options?.numThread ?? defaultOptions.numThread, + ), + ); + } + + PromptValue _formatPrompt( + final PromptValue input, + final ChatOllamaToolsOptions? options, + ) { + final toolsSystemPromptTemplate = options?.toolsSystemPromptTemplate ?? + defaultOptions.toolsSystemPromptTemplate ?? + ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate; + final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, toolsSystemPromptTemplate), + (ChatMessageType.messagesPlaceholder, 'input'), + ]); + final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; + final availableTools = options?.tools ?? defaultOptions.tools; + final tools = switch (toolChoice) { + // If toolChoice is auto, we include all the tools + ChatToolChoiceAuto() || null => [ + ...?availableTools, + conversationalResponseTool, + ], + // If toolChoice is none, we include only the conversational response tool + ChatToolChoiceNone() => [conversationalResponseTool], + // If toolChoice is required, we include only the user specified tools + ChatToolChoiceRequired() => availableTools!, + // If toolChoice is forced, we include only the forced tool + final ChatToolChoiceForced f => [ + availableTools!.firstWhere((t) => t.name == f.name), + ] + }; + final toolsJsonMap = json.encode( + tools.map((tool) => tool.toJson()).toList(growable: false), + ); + final toolChoiceInstructions = switch (toolChoice) { + ChatToolChoiceNone() => '`${conversationalResponseTool.name}` tool', + ChatToolChoiceAuto() || + ChatToolChoiceRequired() || + null => + 'one or more tools', + final ChatToolChoiceForced f => '`${f.name}` tool', + }; + return promptTemplate.formatPrompt({ + 'tools': toolsJsonMap, + 'tool_choice': toolChoiceInstructions, + 'input': input.toChatMessages(), + }); + } + + Future _parseResult(final ChatResult result) async { + try { + final output = result.output.content; + final outputMap = json.decode(output) as Map; + final toolCalls = (outputMap['tool_calls'] as List).map((t) { + final tool = t as Map; + final toolInput = tool['tool_input']; + final toolInputMap = json.decode(toolInput) as Map; + return AIChatMessageToolCall( + id: _uuid.v4(), + name: tool['tool_name'].toString(), + arguments: toolInputMap, + argumentsRaw: toolInput, + ); + }).toList(growable: false); + + final conversationalResponseToolCall = toolCalls + .firstWhereOrNull((t) => t.name == conversationalResponseTool.name); + final content = conversationalResponseToolCall != null + ? await conversationalResponseTool.invoke( + conversationalResponseTool.getInputFromJson( + conversationalResponseToolCall.arguments, + ), + ) + : ''; + final otherToolCalls = toolCalls + .where((t) => t.name != conversationalResponseTool.name) + .toList(growable: false); + + return ChatResult( + id: result.id, + output: AIChatMessage( + content: content, + toolCalls: otherToolCalls, + ), + finishReason: result.finishReason, + metadata: result.metadata, + usage: result.usage, + ); + } catch (e) { + throw Exception( + 'Model did not respond in valid json string format, ' + 'try improving your prompt, instruct to "respond in JSON"', + ); + } + } + + /// Tokenizes the given prompt using tiktoken. + /// + /// Currently Ollama does not provide a tokenizer for the models it supports. + /// So we use tiktoken and [encoding] model to get an approximation + /// for counting tokens. Mind that the actual tokens will be totally + /// different from the ones used by the Ollama model. + /// + /// If an encoding model is specified in [encoding] field, that + /// encoding is used instead. + /// + /// - [promptValue] The prompt to tokenize. + @override + Future> tokenize( + PromptValue promptValue, { + ChatOllamaToolsOptions? options, + }) async { + final encoding = getEncoding(this.encoding); + return encoding.encode(promptValue.toString()); + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart new file mode 100644 index 00000000..3a9ebb5a --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart @@ -0,0 +1 @@ +export '../chat_ollama/mappers.dart'; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart new file mode 100644 index 00000000..9447a51f --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart @@ -0,0 +1,70 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; + +import '../chat_ollama/types.dart'; +import 'chat_ollama_tools.dart'; + +export '../chat_ollama/types.dart'; + +/// {@template chat_ollama_tools_options} +/// Options to pass into [ChatOllamaTools]. +/// {@endtemplate} +class ChatOllamaToolsOptions extends ChatModelOptions { + /// {@macro chat_ollama_tools_options} + const ChatOllamaToolsOptions({ + this.options, + super.tools, + super.toolChoice, + this.toolsSystemPromptTemplate, + }); + + /// [ChatOllamaOptions] to pass into Ollama. + final ChatOllamaOptions? options; + + /// Prompt template for the system message where the model is instructed to + /// use the tools. + /// + /// The following placeholders can be used: + /// - `{tools}`: The list of tools available to the model. + /// - `{tool_choice}`: the tool choice the model must always select. + /// + /// If not provided, [defaultToolsSystemPromtTemplate] will be used. + final String? toolsSystemPromptTemplate; + + /// Default [toolsSystemPromptTemplate]. + static const String defaultToolsSystemPromtTemplate = ''' +You have access to these tools: +{tools} + +Based on the user input, select {tool_choice} from the available tools. + +Respond with a JSON containing a list of tool call objects. +The tool call objects should have two properties: +- "tool_name": The name of the selected tool (string) +- "tool_input": A JSON string with the input for the tool matching the tool's input schema + +Example response format: +```json +{{ + "tool_calls": [ + {{ + "tool_name": "tool_name", + "tool_input": "{{"param1":"value1","param2":"value2"}}" + }} + ] +}} +``` + +Ensure your response is valid JSON and follows this exact format. +'''; +} + +/// Default tool called if model decides no other tools should be called +/// for a given query. +final conversationalResponseTool = StringTool.fromFunction( + name: '_conversational_response', + description: + 'Respond conversationally if no other tools should be called for a given query.', + inputDescription: 'Conversational response to the user', + func: (input) => input, +); diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart new file mode 100644 index 00000000..7204591a --- /dev/null +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart @@ -0,0 +1,207 @@ +import 'dart:io'; + +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:test/test.dart'; + +void main() { + group( + 'ChatOllamaTools tests', + skip: Platform.environment.containsKey('CI'), + () { + const defaultModel = 'gemma2'; + late ChatOllamaTools model; + const tool1 = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + const tool2 = ToolSpec( + name: 'get_historic_weather', + description: 'Get the historic weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + setUp(() async { + model = ChatOllamaTools( + defaultOptions: const ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: defaultModel, + keepAlive: 2, + ), + tools: [tool1, tool2], + ), + ); + }); + + tearDown(() { + model.close(); + }); + + test('Test single tool call', () async { + final res = await model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, 'get_current_weather'); + expect(toolCall.argumentsRaw, isNotEmpty); + expect(toolCall.arguments, isNotEmpty); + expect(toolCall.arguments['location'], contains('Vellore')); + expect(toolCall.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test multi tool call', () async { + final res = await model.invoke( + PromptValue.string( + "What's the weather in Vellore, India and in Barcelona, Spain?", + ), + ); + expect(res.output.toolCalls, hasLength(2)); + final toolCall1 = res.output.toolCalls.first; + expect(toolCall1.name, 'get_current_weather'); + expect(toolCall1.argumentsRaw, isNotEmpty); + expect(toolCall1.arguments, isNotEmpty); + expect(toolCall1.arguments['location'], 'Vellore, India'); + expect(toolCall1.arguments['unit'], 'celsius'); + final toolCall2 = res.output.toolCalls.last; + expect(toolCall2.name, 'get_current_weather'); + expect(toolCall2.argumentsRaw, isNotEmpty); + expect(toolCall2.arguments, isNotEmpty); + expect(toolCall2.arguments['location'], 'Barcelona, Spain'); + expect(toolCall2.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test ChatToolChoice.none', () async { + final res = await model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: const ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + tools: [tool1], + toolChoice: ChatToolChoice.none, + ), + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test('Test ChatToolChoice.forced', () async { + final res = await model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: ChatOllamaToolsOptions( + options: const ChatOllamaOptions(model: defaultModel), + tools: const [tool1, tool2], + toolChoice: ChatToolChoice.forced(name: tool2.name), + ), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, tool2.name); + }); + + test( + 'Should throw exception if model did not respond in right JSON string format', + () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + tools: [tool1], + toolsSystemPromptTemplate: + 'You have access to the following tools: {tools} ' + 'You must always select one of the above tools ' + 'and respond in plain text.', + ); + + expect( + () async => model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: toolOptions, + ), + throwsException, + ); + }, + ); + + test( + 'Should return content if no other tools should be called for a given query', + () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + tools: [], + ); + final res = await model.invoke( + PromptValue.string('Do you know the weather in Vellore, India?'), + options: toolOptions, + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test( + 'Should throw error if toolSystemPromptTemplate not in right format', + () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions(model: defaultModel), + toolsSystemPromptTemplate: + 'You have access to the following tools: tools} ' + 'You must always select one of the above tools', + ); + expect( + () async => model.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: toolOptions, + ), + throwsException, + ); + }, + ); + + test('Test ChatOllamaToolsOptions', () async { + const toolOptions = ChatOllamaToolsOptions( + options: ChatOllamaOptions( + model: defaultModel, + ), + tools: [tool1], + toolsSystemPromptTemplate: 'toolSystemPromptTemplate', + ); + + expect(toolOptions.options?.model, defaultModel); + expect( + toolOptions.toolsSystemPromptTemplate, + 'toolSystemPromptTemplate', + ); + expect(toolOptions.tools![0], tool1); + }); + }, + ); +} From 079b02a65413bb8f67736932bd093eea8a8813e7 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 11 Jul 2024 23:42:21 +0200 Subject: [PATCH 064/251] docs: Update README.md --- packages/langchain/README.md | 1 + packages/langchain_ollama/README.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 652ef1be..51793fa8 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -110,6 +110,7 @@ The following integrations are available in LangChain.dart: | [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | | [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | | [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | | [Ollama Chat API](https://ollama.ai) | +| [ChatOllamaTools](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama_tools) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) with tool-calling capabilities | | [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | | [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index e6d6d884..0c37e80f 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -13,7 +13,8 @@ - LLMs: * `Ollama`: wrapper around Ollama Completions API. - Chat models: - * `ChatOllama`: wrapper around Ollama Completions API in a chat-like fashion. + * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion. + * `ChatOllamaTools`: Wrapper around Ollama Chat API that enables tool calling capabilities. - Embeddings: * `OllamaEmbeddings`: wrapper around Ollama Embeddings API. From be1cf3c977c8a35a16c70dae4788cd1a69955ad4 Mon Sep 17 00:00:00 2001 From: Konstantin S Date: Sat, 13 Jul 2024 01:45:52 +0400 Subject: [PATCH 065/251] feat: Add support for Ollama version and model info (#488) Co-authored-by: David Miguel --- packages/ollama_dart/README.md | 10 + .../ollama_dart/lib/src/generated/client.dart | 21 + .../lib/src/generated/schema/model_info.dart | 6 + .../generated/schema/model_information.dart | 61 +++ .../lib/src/generated/schema/schema.dart | 2 + .../src/generated/schema/schema.freezed.dart | 459 +++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 52 ++ .../generated/schema/version_response.dart | 40 ++ packages/ollama_dart/oas/ollama-curated.yaml | 42 ++ .../test/ollama_dart_chat_test.dart | 8 +- .../test/ollama_dart_completions_test.dart | 13 +- .../test/ollama_dart_embeddings_test.dart | 4 +- .../test/ollama_dart_models_test.dart | 24 +- .../test/ollama_dart_version_test.dart | 24 + 14 files changed, 748 insertions(+), 18 deletions(-) create mode 100644 packages/ollama_dart/lib/src/generated/schema/model_information.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/version_response.dart create mode 100644 packages/ollama_dart/test/ollama_dart_version_test.dart diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 5b750447..cf822953 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -36,6 +36,7 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. + [Pull a Model](#pull-a-model) + [Push a Model](#push-a-model) + [Check if a Blob Exists](#check-if-a-blob-exists) + * [Version](#version) - [Advance Usage](#advance-usage) * [Default HTTP client](#default-http-client) * [Custom HTTP client ](#custom-http-client) @@ -271,6 +272,15 @@ await client.checkBlob( If the blob doesn't exist, an `OllamaClientException` exception will be thrown. +### Version + +Get the version of the Ollama server. + +```dart +final res = await client.getVersion(); +print(res.version); +``` + ## Advance Usage ### Default HTTP client diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index 6c00d36f..0a530915 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -356,6 +356,27 @@ class OllamaClient { ); } + // ------------------------------------------ + // METHOD: getVersion + // ------------------------------------------ + + /// Returns the version of the Ollama server. + /// + /// This endpoint returns the version of the Ollama server. + /// + /// `GET` `http://localhost:11434/api/version` + Future getVersion() async { + final r = await makeRequest( + baseUrl: 'http://localhost:11434/api', + path: '/version', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return VersionResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: generateCompletion // ------------------------------------------ diff --git a/packages/ollama_dart/lib/src/generated/schema/model_info.dart b/packages/ollama_dart/lib/src/generated/schema/model_info.dart index cb212131..30c2a949 100644 --- a/packages/ollama_dart/lib/src/generated/schema/model_info.dart +++ b/packages/ollama_dart/lib/src/generated/schema/model_info.dart @@ -33,6 +33,10 @@ class ModelInfo with _$ModelInfo { /// Details about a model. @JsonKey(includeIfNull: false) ModelDetails? details, + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, + /// The default messages for the model. @JsonKey(includeIfNull: false) List? messages, }) = _ModelInfo; @@ -49,6 +53,7 @@ class ModelInfo with _$ModelInfo { 'template', 'system', 'details', + 'model_info', 'messages' ]; @@ -66,6 +71,7 @@ class ModelInfo with _$ModelInfo { 'template': template, 'system': system, 'details': details, + 'model_info': modelInfo, 'messages': messages, }; } diff --git a/packages/ollama_dart/lib/src/generated/schema/model_information.dart b/packages/ollama_dart/lib/src/generated/schema/model_information.dart new file mode 100644 index 00000000..d10848f8 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/model_information.dart @@ -0,0 +1,61 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ModelInformation +// ========================================== + +/// Details about a model. +@freezed +class ModelInformation with _$ModelInformation { + const ModelInformation._(); + + /// Factory constructor for ModelInformation + const factory ModelInformation({ + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion, + }) = _ModelInformation; + + /// Object construction from a JSON representation + factory ModelInformation.fromJson(Map json) => + _$ModelInformationFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'general.architecture', + 'general.file_type', + 'general.parameter_count', + 'general.quantization_version' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'general.architecture': generalArchitecture, + 'general.file_type': generalFileType, + 'general.parameter_count': generalParameterCount, + 'general.quantization_version': generalQuantizationVersion, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index ed6b2733..dae6d4fb 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -13,6 +13,7 @@ part 'schema.freezed.dart'; part 'generate_completion_request.dart'; part 'request_options.dart'; part 'response_format.dart'; +part 'version_response.dart'; part 'generate_completion_response.dart'; part 'generate_chat_completion_request.dart'; part 'generate_chat_completion_response.dart'; @@ -26,6 +27,7 @@ part 'create_model_status.dart'; part 'models_response.dart'; part 'model.dart'; part 'model_details.dart'; +part 'model_information.dart'; part 'process_response.dart'; part 'process_model.dart'; part 'model_info_request.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index db00fd66..83c14bb1 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -1650,6 +1650,154 @@ abstract class _RequestOptions extends RequestOptions { throw _privateConstructorUsedError; } +VersionResponse _$VersionResponseFromJson(Map json) { + return _VersionResponse.fromJson(json); +} + +/// @nodoc +mixin _$VersionResponse { + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) + String? get version => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VersionResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $VersionResponseCopyWith<$Res> { + factory $VersionResponseCopyWith( + VersionResponse value, $Res Function(VersionResponse) then) = + _$VersionResponseCopyWithImpl<$Res, VersionResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) String? version}); +} + +/// @nodoc +class _$VersionResponseCopyWithImpl<$Res, $Val extends VersionResponse> + implements $VersionResponseCopyWith<$Res> { + _$VersionResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? version = freezed, + }) { + return _then(_value.copyWith( + version: freezed == version + ? _value.version + : version // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$VersionResponseImplCopyWith<$Res> + implements $VersionResponseCopyWith<$Res> { + factory _$$VersionResponseImplCopyWith(_$VersionResponseImpl value, + $Res Function(_$VersionResponseImpl) then) = + __$$VersionResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) String? version}); +} + +/// @nodoc +class __$$VersionResponseImplCopyWithImpl<$Res> + extends _$VersionResponseCopyWithImpl<$Res, _$VersionResponseImpl> + implements _$$VersionResponseImplCopyWith<$Res> { + __$$VersionResponseImplCopyWithImpl( + _$VersionResponseImpl _value, $Res Function(_$VersionResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? version = freezed, + }) { + return _then(_$VersionResponseImpl( + version: freezed == version + ? _value.version + : version // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$VersionResponseImpl extends _VersionResponse { + const _$VersionResponseImpl({@JsonKey(includeIfNull: false) this.version}) + : super._(); + + factory _$VersionResponseImpl.fromJson(Map json) => + _$$VersionResponseImplFromJson(json); + + /// The version of the Ollama server. + @override + @JsonKey(includeIfNull: false) + final String? version; + + @override + String toString() { + return 'VersionResponse(version: $version)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$VersionResponseImpl && + (identical(other.version, version) || other.version == version)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, version); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => + __$$VersionResponseImplCopyWithImpl<_$VersionResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$VersionResponseImplToJson( + this, + ); + } +} + +abstract class _VersionResponse extends VersionResponse { + const factory _VersionResponse( + {@JsonKey(includeIfNull: false) final String? version}) = + _$VersionResponseImpl; + const _VersionResponse._() : super._(); + + factory _VersionResponse.fromJson(Map json) = + _$VersionResponseImpl.fromJson; + + @override + + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) + String? get version; + @override + @JsonKey(ignore: true) + _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + GenerateCompletionResponse _$GenerateCompletionResponseFromJson( Map json) { return _GenerateCompletionResponse.fromJson(json); @@ -4912,6 +5060,266 @@ abstract class _ModelDetails extends ModelDetails { throw _privateConstructorUsedError; } +ModelInformation _$ModelInformationFromJson(Map json) { + return _ModelInformation.fromJson(json); +} + +/// @nodoc +mixin _$ModelInformation { + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? get generalArchitecture => throw _privateConstructorUsedError; + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? get generalFileType => throw _privateConstructorUsedError; + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? get generalParameterCount => throw _privateConstructorUsedError; + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? get generalQuantizationVersion => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModelInformationCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelInformationCopyWith<$Res> { + factory $ModelInformationCopyWith( + ModelInformation value, $Res Function(ModelInformation) then) = + _$ModelInformationCopyWithImpl<$Res, ModelInformation>; + @useResult + $Res call( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion}); +} + +/// @nodoc +class _$ModelInformationCopyWithImpl<$Res, $Val extends ModelInformation> + implements $ModelInformationCopyWith<$Res> { + _$ModelInformationCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? generalArchitecture = freezed, + Object? generalFileType = freezed, + Object? generalParameterCount = freezed, + Object? generalQuantizationVersion = freezed, + }) { + return _then(_value.copyWith( + generalArchitecture: freezed == generalArchitecture + ? _value.generalArchitecture + : generalArchitecture // ignore: cast_nullable_to_non_nullable + as String?, + generalFileType: freezed == generalFileType + ? _value.generalFileType + : generalFileType // ignore: cast_nullable_to_non_nullable + as int?, + generalParameterCount: freezed == generalParameterCount + ? _value.generalParameterCount + : generalParameterCount // ignore: cast_nullable_to_non_nullable + as int?, + generalQuantizationVersion: freezed == generalQuantizationVersion + ? _value.generalQuantizationVersion + : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModelInformationImplCopyWith<$Res> + implements $ModelInformationCopyWith<$Res> { + factory _$$ModelInformationImplCopyWith(_$ModelInformationImpl value, + $Res Function(_$ModelInformationImpl) then) = + __$$ModelInformationImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion}); +} + +/// @nodoc +class __$$ModelInformationImplCopyWithImpl<$Res> + extends _$ModelInformationCopyWithImpl<$Res, _$ModelInformationImpl> + implements _$$ModelInformationImplCopyWith<$Res> { + __$$ModelInformationImplCopyWithImpl(_$ModelInformationImpl _value, + $Res Function(_$ModelInformationImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? generalArchitecture = freezed, + Object? generalFileType = freezed, + Object? generalParameterCount = freezed, + Object? generalQuantizationVersion = freezed, + }) { + return _then(_$ModelInformationImpl( + generalArchitecture: freezed == generalArchitecture + ? _value.generalArchitecture + : generalArchitecture // ignore: cast_nullable_to_non_nullable + as String?, + generalFileType: freezed == generalFileType + ? _value.generalFileType + : generalFileType // ignore: cast_nullable_to_non_nullable + as int?, + generalParameterCount: freezed == generalParameterCount + ? _value.generalParameterCount + : generalParameterCount // ignore: cast_nullable_to_non_nullable + as int?, + generalQuantizationVersion: freezed == generalQuantizationVersion + ? _value.generalQuantizationVersion + : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelInformationImpl extends _ModelInformation { + const _$ModelInformationImpl( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + this.generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + this.generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + this.generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + this.generalQuantizationVersion}) + : super._(); + + factory _$ModelInformationImpl.fromJson(Map json) => + _$$ModelInformationImplFromJson(json); + + /// The architecture of the model. + @override + @JsonKey(name: 'general.architecture', includeIfNull: false) + final String? generalArchitecture; + + /// The file type of the model. + @override + @JsonKey(name: 'general.file_type', includeIfNull: false) + final int? generalFileType; + + /// The number of parameters in the model. + @override + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + final int? generalParameterCount; + + /// The number of parameters in the model. + @override + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + final int? generalQuantizationVersion; + + @override + String toString() { + return 'ModelInformation(generalArchitecture: $generalArchitecture, generalFileType: $generalFileType, generalParameterCount: $generalParameterCount, generalQuantizationVersion: $generalQuantizationVersion)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelInformationImpl && + (identical(other.generalArchitecture, generalArchitecture) || + other.generalArchitecture == generalArchitecture) && + (identical(other.generalFileType, generalFileType) || + other.generalFileType == generalFileType) && + (identical(other.generalParameterCount, generalParameterCount) || + other.generalParameterCount == generalParameterCount) && + (identical(other.generalQuantizationVersion, + generalQuantizationVersion) || + other.generalQuantizationVersion == + generalQuantizationVersion)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, generalArchitecture, + generalFileType, generalParameterCount, generalQuantizationVersion); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => + __$$ModelInformationImplCopyWithImpl<_$ModelInformationImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ModelInformationImplToJson( + this, + ); + } +} + +abstract class _ModelInformation extends ModelInformation { + const factory _ModelInformation( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + final String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + final int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + final int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + final int? generalQuantizationVersion}) = _$ModelInformationImpl; + const _ModelInformation._() : super._(); + + factory _ModelInformation.fromJson(Map json) = + _$ModelInformationImpl.fromJson; + + @override + + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? get generalArchitecture; + @override + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? get generalFileType; + @override + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? get generalParameterCount; + @override + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? get generalQuantizationVersion; + @override + @JsonKey(ignore: true) + _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => + throw _privateConstructorUsedError; +} + ProcessResponse _$ProcessResponseFromJson(Map json) { return _ProcessResponse.fromJson(json); } @@ -5573,6 +5981,10 @@ mixin _$ModelInfo { @JsonKey(includeIfNull: false) ModelDetails? get details => throw _privateConstructorUsedError; + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? get modelInfo => throw _privateConstructorUsedError; + /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages => throw _privateConstructorUsedError; @@ -5595,9 +6007,12 @@ abstract class $ModelInfoCopyWith<$Res> { @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); $ModelDetailsCopyWith<$Res>? get details; + $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -5619,6 +6034,7 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> Object? template = freezed, Object? system = freezed, Object? details = freezed, + Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_value.copyWith( @@ -5646,6 +6062,10 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, + modelInfo: freezed == modelInfo + ? _value.modelInfo + : modelInfo // ignore: cast_nullable_to_non_nullable + as ModelInformation?, messages: freezed == messages ? _value.messages : messages // ignore: cast_nullable_to_non_nullable @@ -5664,6 +6084,18 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> return _then(_value.copyWith(details: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ModelInformationCopyWith<$Res>? get modelInfo { + if (_value.modelInfo == null) { + return null; + } + + return $ModelInformationCopyWith<$Res>(_value.modelInfo!, (value) { + return _then(_value.copyWith(modelInfo: value) as $Val); + }); + } } /// @nodoc @@ -5681,10 +6113,14 @@ abstract class _$$ModelInfoImplCopyWith<$Res> @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); @override $ModelDetailsCopyWith<$Res>? get details; + @override + $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -5704,6 +6140,7 @@ class __$$ModelInfoImplCopyWithImpl<$Res> Object? template = freezed, Object? system = freezed, Object? details = freezed, + Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_$ModelInfoImpl( @@ -5731,6 +6168,10 @@ class __$$ModelInfoImplCopyWithImpl<$Res> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, + modelInfo: freezed == modelInfo + ? _value.modelInfo + : modelInfo // ignore: cast_nullable_to_non_nullable + as ModelInformation?, messages: freezed == messages ? _value._messages : messages // ignore: cast_nullable_to_non_nullable @@ -5749,6 +6190,7 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) this.template, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.details, + @JsonKey(name: 'model_info', includeIfNull: false) this.modelInfo, @JsonKey(includeIfNull: false) final List? messages}) : _messages = messages, super._(); @@ -5786,6 +6228,11 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) final ModelDetails? details; + /// Details about a model. + @override + @JsonKey(name: 'model_info', includeIfNull: false) + final ModelInformation? modelInfo; + /// The default messages for the model. final List? _messages; @@ -5802,7 +6249,7 @@ class _$ModelInfoImpl extends _ModelInfo { @override String toString() { - return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, messages: $messages)'; + return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, modelInfo: $modelInfo, messages: $messages)'; } @override @@ -5819,6 +6266,8 @@ class _$ModelInfoImpl extends _ModelInfo { other.template == template) && (identical(other.system, system) || other.system == system) && (identical(other.details, details) || other.details == details) && + (identical(other.modelInfo, modelInfo) || + other.modelInfo == modelInfo) && const DeepCollectionEquality().equals(other._messages, _messages)); } @@ -5832,6 +6281,7 @@ class _$ModelInfoImpl extends _ModelInfo { template, system, details, + modelInfo, const DeepCollectionEquality().hash(_messages)); @JsonKey(ignore: true) @@ -5856,6 +6306,8 @@ abstract class _ModelInfo extends ModelInfo { @JsonKey(includeIfNull: false) final String? template, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + final ModelInformation? modelInfo, @JsonKey(includeIfNull: false) final List? messages}) = _$ModelInfoImpl; const _ModelInfo._() : super._(); @@ -5895,6 +6347,11 @@ abstract class _ModelInfo extends ModelInfo { ModelDetails? get details; @override + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? get modelInfo; + @override + /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index 3443737b..1ad66a40 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -133,6 +133,26 @@ Map _$$RequestOptionsImplToJson( return val; } +_$VersionResponseImpl _$$VersionResponseImplFromJson( + Map json) => + _$VersionResponseImpl( + version: json['version'] as String?, + ); + +Map _$$VersionResponseImplToJson( + _$VersionResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('version', instance.version); + return val; +} + _$GenerateCompletionResponseImpl _$$GenerateCompletionResponseImplFromJson( Map json) => _$GenerateCompletionResponseImpl( @@ -476,6 +496,33 @@ Map _$$ModelDetailsImplToJson(_$ModelDetailsImpl instance) { return val; } +_$ModelInformationImpl _$$ModelInformationImplFromJson( + Map json) => + _$ModelInformationImpl( + generalArchitecture: json['general.architecture'] as String?, + generalFileType: json['general.file_type'] as int?, + generalParameterCount: json['general.parameter_count'] as int?, + generalQuantizationVersion: json['general.quantization_version'] as int?, + ); + +Map _$$ModelInformationImplToJson( + _$ModelInformationImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('general.architecture', instance.generalArchitecture); + writeNotNull('general.file_type', instance.generalFileType); + writeNotNull('general.parameter_count', instance.generalParameterCount); + writeNotNull( + 'general.quantization_version', instance.generalQuantizationVersion); + return val; +} + _$ProcessResponseImpl _$$ProcessResponseImplFromJson( Map json) => _$ProcessResponseImpl( @@ -550,6 +597,10 @@ _$ModelInfoImpl _$$ModelInfoImplFromJson(Map json) => details: json['details'] == null ? null : ModelDetails.fromJson(json['details'] as Map), + modelInfo: json['model_info'] == null + ? null + : ModelInformation.fromJson( + json['model_info'] as Map), messages: (json['messages'] as List?) ?.map((e) => Message.fromJson(e as Map)) .toList(), @@ -570,6 +621,7 @@ Map _$$ModelInfoImplToJson(_$ModelInfoImpl instance) { writeNotNull('template', instance.template); writeNotNull('system', instance.system); writeNotNull('details', instance.details?.toJson()); + writeNotNull('model_info', instance.modelInfo?.toJson()); writeNotNull('messages', instance.messages?.map((e) => e.toJson()).toList()); return val; } diff --git a/packages/ollama_dart/lib/src/generated/schema/version_response.dart b/packages/ollama_dart/lib/src/generated/schema/version_response.dart new file mode 100644 index 00000000..21d3259e --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/version_response.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: VersionResponse +// ========================================== + +/// The response class for the version endpoint. +@freezed +class VersionResponse with _$VersionResponse { + const VersionResponse._(); + + /// Factory constructor for VersionResponse + const factory VersionResponse({ + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) String? version, + }) = _VersionResponse; + + /// Object construction from a JSON representation + factory VersionResponse.fromJson(Map json) => + _$VersionResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['version']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'version': version, + }; + } +} diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 12159978..08ec6845 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -20,6 +20,18 @@ tags: description: List and describe the various models available. paths: + /version: + get: + operationId: getVersion + summary: Returns the version of the Ollama server. + description: This endpoint returns the version of the Ollama server. + responses: + '200': + description: Successful operation. + content: + application/json: + schema: + $ref: '#/components/schemas/VersionResponse' /generate: post: operationId: generateCompletion @@ -501,6 +513,13 @@ components: Note: it's important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace. enum: - json + VersionResponse: + type: object + description: The response class for the version endpoint. + properties: + version: + type: string + description: The version of the Ollama server. GenerateCompletionResponse: type: object description: The response class for the generate endpoint. @@ -792,6 +811,27 @@ components: quantization_level: type: string description: The quantization level of the model. + ModelInformation: + type: object + description: Details about a model. + properties: + general.architecture: + type: string + description: The architecture of the model. + general.file_type: + type: integer + nullable: true + description: The file type of the model. + general.parameter_count: + type: integer + format: int64 + nullable: true + description: The number of parameters in the model. + general.quantization_version: + type: integer + nullable: true + description: The number of parameters in the model. + ProcessResponse: type: object description: Response class for the list running models endpoint. @@ -869,6 +909,8 @@ components: description: The system prompt for the model. details: $ref: '#/components/schemas/ModelDetails' + model_info: + $ref: '#/components/schemas/ModelInformation' messages: type: array nullable: true diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index af90c448..807e1b67 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,19 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; - const visionModel = 'llava:latest'; + const defaultModel = 'gemma2'; + const visionModel = 'llava'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); expect( - res.models?.firstWhere((final m) => m.model == visionModel), + res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), isNotNull, ); }); diff --git a/packages/ollama_dart/test/ollama_dart_completions_test.dart b/packages/ollama_dart/test/ollama_dart_completions_test.dart index 5c4b2981..5a134b37 100644 --- a/packages/ollama_dart/test/ollama_dart_completions_test.dart +++ b/packages/ollama_dart/test/ollama_dart_completions_test.dart @@ -7,20 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; - const visionModel = 'llava:latest'; + const defaultModel = 'gemma2'; + const visionModel = 'llava'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); - expect( - res.models?.firstWhere((final m) => m.model == visionModel), + res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), isNotNull, ); }); @@ -76,9 +75,9 @@ void main() { }); test('Test call completions API with raw mode', () async { - const testPrompt = '[INST] List the numbers from 1 to 9 in order. ' + const testPrompt = 'List the numbers from 1 to 9 in order. ' 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS: [/INST]'; + 'NUMBERS:'; final res = await client.generateCompletion( request: const GenerateCompletionRequest( diff --git a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart index c32701a8..e6ff8b6f 100644 --- a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart +++ b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Generate Embeddings API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; + const defaultModel = 'mxbai-embed-large:335m'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); }); diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index e511bff4..b94698ad 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Models API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; + const defaultModel = 'gemma2'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); }); @@ -62,7 +62,10 @@ void main() { test('Test list models', () async { final res = await client.listModels(); - expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + expect( + res.models?.any((final m) => m.model!.startsWith(defaultModel)), + isTrue, + ); }); test('Test list running models', () async { @@ -75,7 +78,10 @@ void main() { ); final res = await client.listRunningModels(); - expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + expect( + res.models?.any((final m) => m.model!.startsWith(defaultModel)), + isTrue, + ); }); test('Test show model info', () async { @@ -84,7 +90,17 @@ void main() { ); expect(res.license, isNotEmpty); expect(res.modelfile, isNotEmpty); + expect(res.parameters, isNotEmpty); expect(res.template, isNotEmpty); + expect(res.details?.format, isNotEmpty); + expect(res.details?.family, isNotEmpty); + expect(res.details?.families, isNotEmpty); + expect(res.details?.parameterSize, isNotEmpty); + expect(res.details?.quantizationLevel, isNotEmpty); + expect(res.modelInfo?.generalArchitecture, isNotEmpty); + expect(res.modelInfo?.generalFileType, greaterThan(0)); + expect(res.modelInfo?.generalParameterCount, greaterThan(0)); + expect(res.modelInfo?.generalQuantizationVersion, greaterThan(0)); }); test('Test copy model', () async { diff --git a/packages/ollama_dart/test/ollama_dart_version_test.dart b/packages/ollama_dart/test/ollama_dart_version_test.dart new file mode 100644 index 00000000..002f8167 --- /dev/null +++ b/packages/ollama_dart/test/ollama_dart_version_test.dart @@ -0,0 +1,24 @@ +import 'dart:io'; + +import 'package:ollama_dart/ollama_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Ollama Version API tests', + skip: Platform.environment.containsKey('CI'), () { + late OllamaClient client; + + setUp(() async { + client = OllamaClient(); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test get version', () async { + final res = await client.getVersion(); + expect(res.version, isNotEmpty); + }); + }); +} From 4f17878fa5c2711612c7d4267280cec6ef64f931 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 13 Jul 2024 00:05:55 +0200 Subject: [PATCH 066/251] refactor!: Change Ollama push model status type from enum to String (#489) --- .../generated/schema/push_model_response.dart | 6 +-- .../generated/schema/push_model_status.dart | 21 ---------- .../lib/src/generated/schema/schema.dart | 1 - .../src/generated/schema/schema.freezed.dart | 39 ++++++------------- .../lib/src/generated/schema/schema.g.dart | 12 +----- packages/ollama_dart/oas/ollama-curated.yaml | 11 +----- .../test/ollama_dart_models_test.dart | 6 +-- 7 files changed, 20 insertions(+), 76 deletions(-) delete mode 100644 packages/ollama_dart/lib/src/generated/schema/push_model_status.dart diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart index bdfb3574..d3bb5142 100644 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart @@ -16,11 +16,7 @@ class PushModelResponse with _$PushModelResponse { /// Factory constructor for PushModelResponse const factory PushModelResponse({ /// Status pushing the model. - @JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - PushModelStatus? status, + @JsonKey(includeIfNull: false) String? status, /// the model's digest @JsonKey(includeIfNull: false) String? digest, diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart deleted file mode 100644 index c043c843..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart +++ /dev/null @@ -1,21 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// ENUM: PushModelStatus -// ========================================== - -/// Status pushing the model. -enum PushModelStatus { - @JsonValue('retrieving manifest') - retrievingManifest, - @JsonValue('starting upload') - startingUpload, - @JsonValue('pushing manifest') - pushingManifest, - @JsonValue('success') - success, -} diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index dae6d4fb..5ed7214c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -39,4 +39,3 @@ part 'pull_model_response.dart'; part 'pull_model_status.dart'; part 'push_model_request.dart'; part 'push_model_response.dart'; -part 'push_model_status.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 83c14bb1..bf7cf75c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -7485,9 +7485,8 @@ PushModelResponse _$PushModelResponseFromJson(Map json) { /// @nodoc mixin _$PushModelResponse { /// Status pushing the model. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? get status => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get status => throw _privateConstructorUsedError; /// the model's digest @JsonKey(includeIfNull: false) @@ -7514,10 +7513,7 @@ abstract class $PushModelResponseCopyWith<$Res> { _$PushModelResponseCopyWithImpl<$Res, PushModelResponse>; @useResult $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? status, + {@JsonKey(includeIfNull: false) String? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -7545,7 +7541,7 @@ class _$PushModelResponseCopyWithImpl<$Res, $Val extends PushModelResponse> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as PushModelStatus?, + as String?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -7571,10 +7567,7 @@ abstract class _$$PushModelResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? status, + {@JsonKey(includeIfNull: false) String? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -7600,7 +7593,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as PushModelStatus?, + as String?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -7621,10 +7614,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> @JsonSerializable() class _$PushModelResponseImpl extends _PushModelResponse { const _$PushModelResponseImpl( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.status, + {@JsonKey(includeIfNull: false) this.status, @JsonKey(includeIfNull: false) this.digest, @JsonKey(includeIfNull: false) this.total, @JsonKey(includeIfNull: false) this.completed}) @@ -7635,9 +7625,8 @@ class _$PushModelResponseImpl extends _PushModelResponse { /// Status pushing the model. @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final PushModelStatus? status; + @JsonKey(includeIfNull: false) + final String? status; /// the model's digest @override @@ -7693,10 +7682,7 @@ class _$PushModelResponseImpl extends _PushModelResponse { abstract class _PushModelResponse extends PushModelResponse { const factory _PushModelResponse( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final PushModelStatus? status, + {@JsonKey(includeIfNull: false) final String? status, @JsonKey(includeIfNull: false) final String? digest, @JsonKey(includeIfNull: false) final int? total, @JsonKey(includeIfNull: false) final int? completed}) = @@ -7709,9 +7695,8 @@ abstract class _PushModelResponse extends PushModelResponse { @override /// Status pushing the model. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? get status; + @JsonKey(includeIfNull: false) + String? get status; @override /// the model's digest diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index 1ad66a40..a4aee619 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -749,8 +749,7 @@ Map _$$PushModelRequestImplToJson( _$PushModelResponseImpl _$$PushModelResponseImplFromJson( Map json) => _$PushModelResponseImpl( - status: $enumDecodeNullable(_$PushModelStatusEnumMap, json['status'], - unknownValue: JsonKey.nullForUndefinedEnumValue), + status: json['status'] as String?, digest: json['digest'] as String?, total: json['total'] as int?, completed: json['completed'] as int?, @@ -766,16 +765,9 @@ Map _$$PushModelResponseImplToJson( } } - writeNotNull('status', _$PushModelStatusEnumMap[instance.status]); + writeNotNull('status', instance.status); writeNotNull('digest', instance.digest); writeNotNull('total', instance.total); writeNotNull('completed', instance.completed); return val; } - -const _$PushModelStatusEnumMap = { - PushModelStatus.retrievingManifest: 'retrieving manifest', - PushModelStatus.startingUpload: 'starting upload', - PushModelStatus.pushingManifest: 'pushing manifest', - PushModelStatus.success: 'success', -}; diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 08ec6845..9d3a507e 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -1037,7 +1037,8 @@ components: description: Response class for pushing a model. properties: status: - $ref: '#/components/schemas/PushModelStatus' + type: string + description: Status pushing the model. digest: type: string description: the model's digest @@ -1052,11 +1053,3 @@ components: format: int64 description: Total bytes transferred. example: 2142590208 - PushModelStatus: - type: string - description: Status pushing the model. - enum: - - retrieving manifest - - starting upload - - pushing manifest - - success diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index b94698ad..03086e4b 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -158,7 +158,7 @@ void main() { request: const PushModelRequest(model: 'mattw/pygmalion:latest'), ); - expect(res.status, PushModelStatus.success); + expect(res.status, equals('success')); }); test('Test push model stream', skip: true, () async { @@ -167,13 +167,13 @@ void main() { ); int count = 0; - PushModelStatus? lastStatus; + String? lastStatus; await for (final res in stream) { lastStatus = res.status; count++; } expect(count, greaterThan(1)); - expect(lastStatus, equals(PushModelStatus.success)); + expect(lastStatus, equals('success')); }); test('Test check blob', skip: true, () async { From 3412c4c8542112973bd678c1ddcdd0fc98591dce Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 18 Jul 2024 23:24:59 +0200 Subject: [PATCH 067/251] feat: Add support for overrides in the file search tool in openai_dart (#491) --- .../openai_dart/lib/src/generated/client.dart | 4 +- .../src/generated/schema/assistant_tools.dart | 57 ++- .../schema/create_batch_request.dart | 2 +- .../create_fine_tuning_job_request.dart | 7 +- ...ontent_text_annotations_file_citation.dart | 6 +- .../src/generated/schema/schema.freezed.dart | 384 +++++++++++++++--- .../lib/src/generated/schema/schema.g.dart | 64 ++- packages/openai_dart/oas/openapi_curated.yaml | 35 +- .../openai_dart/oas/openapi_official.yaml | 224 ++++++++-- 9 files changed, 637 insertions(+), 146 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index aca8f85f..66c918d1 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -58,7 +58,7 @@ class OpenAIClientException implements Exception { // CLASS: OpenAIClient // ========================================== -/// Client for OpenAI API (v.2.0.0) +/// Client for OpenAI API (v.2.1.0) /// /// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. class OpenAIClient { @@ -1846,7 +1846,7 @@ class OpenAIClient { // METHOD: cancelBatch // ------------------------------------------ - /// Cancels an in-progress batch. + /// Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. /// /// `batchId`: The ID of the batch to cancel. /// diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 6e45f715..043a7d9a 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -30,7 +30,11 @@ sealed class AssistantTools with _$AssistantTools { /// FileSearch tool const factory AssistantTools.fileSearch({ /// The type of tool being defined: `file_search` - @Default('file_search') String type, + required String type, + + /// Overrides for the file search tool. + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch, }) = AssistantToolsFileSearch; // ------------------------------------------ @@ -63,3 +67,54 @@ enum AssistantToolsEnumType { @JsonValue('function') function, } + +// ========================================== +// CLASS: AssistantToolsFileSearchFileSearch +// ========================================== + +/// Overrides for the file search tool. +@freezed +class AssistantToolsFileSearchFileSearch + with _$AssistantToolsFileSearchFileSearch { + const AssistantToolsFileSearchFileSearch._(); + + /// Factory constructor for AssistantToolsFileSearchFileSearch + const factory AssistantToolsFileSearchFileSearch({ + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, + }) = _AssistantToolsFileSearchFileSearch; + + /// Object construction from a JSON representation + factory AssistantToolsFileSearchFileSearch.fromJson( + Map json) => + _$AssistantToolsFileSearchFileSearchFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['max_num_results']; + + /// Validation constants + static const maxNumResultsMinValue = 1; + static const maxNumResultsMaxValue = 50; + + /// Perform validations on the schema property values + String? validateSchema() { + if (maxNumResults != null && maxNumResults! < maxNumResultsMinValue) { + return "The value of 'maxNumResults' cannot be < $maxNumResultsMinValue"; + } + if (maxNumResults != null && maxNumResults! > maxNumResultsMaxValue) { + return "The value of 'maxNumResults' cannot be > $maxNumResultsMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'max_num_results': maxNumResults, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart index 5014b4f1..b7a86f72 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart @@ -19,7 +19,7 @@ class CreateBatchRequest with _$CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') required String inputFileId, /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 14929898..17b649aa 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -23,7 +23,12 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') required String trainingFile, diff --git a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart index 1e6807c9..c8d3a8f1 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart @@ -18,9 +18,6 @@ class MessageContentTextAnnotationsFileCitation const factory MessageContentTextAnnotationsFileCitation({ /// The ID of the specific File the citation is from. @JsonKey(name: 'file_id') required String fileId, - - /// The specific quote in the file. - @JsonKey(includeIfNull: false) String? quote, }) = _MessageContentTextAnnotationsFileCitation; /// Object construction from a JSON representation @@ -29,7 +26,7 @@ class MessageContentTextAnnotationsFileCitation _$MessageContentTextAnnotationsFileCitationFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id', 'quote']; + static const List propertyNames = ['file_id']; /// Perform validations on the schema property values String? validateSchema() { @@ -40,7 +37,6 @@ class MessageContentTextAnnotationsFileCitation Map toMap() { return { 'file_id': fileId, - 'quote': quote, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 16efa483..0bc9015f 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -13428,7 +13428,12 @@ mixin _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') @@ -13682,7 +13687,12 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @override @@ -13815,7 +13825,12 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') @@ -40381,10 +40396,6 @@ mixin _$MessageContentTextAnnotationsFileCitation { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; - /// The specific quote in the file. - @JsonKey(includeIfNull: false) - String? get quote => throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $MessageContentTextAnnotationsFileCitationCopyWith< @@ -40400,9 +40411,7 @@ abstract class $MessageContentTextAnnotationsFileCitationCopyWith<$Res> { _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, MessageContentTextAnnotationsFileCitation>; @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(includeIfNull: false) String? quote}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc @@ -40421,17 +40430,12 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, @override $Res call({ Object? fileId = null, - Object? quote = freezed, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: freezed == quote - ? _value.quote - : quote // ignore: cast_nullable_to_non_nullable - as String?, ) as $Val); } } @@ -40445,9 +40449,7 @@ abstract class _$$MessageContentTextAnnotationsFileCitationImplCopyWith<$Res> __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'file_id') String fileId, - @JsonKey(includeIfNull: false) String? quote}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc @@ -40464,17 +40466,12 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> @override $Res call({ Object? fileId = null, - Object? quote = freezed, }) { return _then(_$MessageContentTextAnnotationsFileCitationImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: freezed == quote - ? _value.quote - : quote // ignore: cast_nullable_to_non_nullable - as String?, )); } } @@ -40484,8 +40481,7 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> class _$MessageContentTextAnnotationsFileCitationImpl extends _MessageContentTextAnnotationsFileCitation { const _$MessageContentTextAnnotationsFileCitationImpl( - {@JsonKey(name: 'file_id') required this.fileId, - @JsonKey(includeIfNull: false) this.quote}) + {@JsonKey(name: 'file_id') required this.fileId}) : super._(); factory _$MessageContentTextAnnotationsFileCitationImpl.fromJson( @@ -40497,14 +40493,9 @@ class _$MessageContentTextAnnotationsFileCitationImpl @JsonKey(name: 'file_id') final String fileId; - /// The specific quote in the file. - @override - @JsonKey(includeIfNull: false) - final String? quote; - @override String toString() { - return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId, quote: $quote)'; + return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId)'; } @override @@ -40512,13 +40503,12 @@ class _$MessageContentTextAnnotationsFileCitationImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$MessageContentTextAnnotationsFileCitationImpl && - (identical(other.fileId, fileId) || other.fileId == fileId) && - (identical(other.quote, quote) || other.quote == quote)); + (identical(other.fileId, fileId) || other.fileId == fileId)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId, quote); + int get hashCode => Object.hash(runtimeType, fileId); @JsonKey(ignore: true) @override @@ -40541,8 +40531,7 @@ class _$MessageContentTextAnnotationsFileCitationImpl abstract class _MessageContentTextAnnotationsFileCitation extends MessageContentTextAnnotationsFileCitation { const factory _MessageContentTextAnnotationsFileCitation( - {@JsonKey(name: 'file_id') required final String fileId, - @JsonKey(includeIfNull: false) final String? quote}) = + {@JsonKey(name: 'file_id') required final String fileId}) = _$MessageContentTextAnnotationsFileCitationImpl; const _MessageContentTextAnnotationsFileCitation._() : super._(); @@ -40556,11 +40545,6 @@ abstract class _MessageContentTextAnnotationsFileCitation @JsonKey(name: 'file_id') String get fileId; @override - - /// The specific quote in the file. - @JsonKey(includeIfNull: false) - String? get quote; - @override @JsonKey(ignore: true) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< _$MessageContentTextAnnotationsFileCitationImpl> @@ -48068,7 +48052,7 @@ mixin _$CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') String get inputFileId => throw _privateConstructorUsedError; @@ -48214,7 +48198,7 @@ class _$CreateBatchRequestImpl extends _CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @override @JsonKey(name: 'input_file_id') final String inputFileId; @@ -48300,7 +48284,7 @@ abstract class _CreateBatchRequest extends CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') String get inputFileId; @override @@ -52790,21 +52774,33 @@ mixin _$AssistantTools { @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) => @@ -52952,7 +52948,11 @@ class _$AssistantToolsCodeInterpreterImpl @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) { return codeInterpreter(type); @@ -52962,7 +52962,11 @@ class _$AssistantToolsCodeInterpreterImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) { return codeInterpreter?.call(type); @@ -52972,7 +52976,11 @@ class _$AssistantToolsCodeInterpreterImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { @@ -53053,7 +53061,12 @@ abstract class _$$AssistantToolsFileSearchImplCopyWith<$Res> __$$AssistantToolsFileSearchImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type}); + $Res call( + {String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch}); + + $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch; } /// @nodoc @@ -53069,32 +53082,57 @@ class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> @override $Res call({ Object? type = null, + Object? fileSearch = freezed, }) { return _then(_$AssistantToolsFileSearchImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, + fileSearch: freezed == fileSearch + ? _value.fileSearch + : fileSearch // ignore: cast_nullable_to_non_nullable + as AssistantToolsFileSearchFileSearch?, )); } + + @override + @pragma('vm:prefer-inline') + $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch { + if (_value.fileSearch == null) { + return null; + } + + return $AssistantToolsFileSearchFileSearchCopyWith<$Res>(_value.fileSearch!, + (value) { + return _then(_value.copyWith(fileSearch: value)); + }); + } } /// @nodoc @JsonSerializable() class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { - const _$AssistantToolsFileSearchImpl({this.type = 'file_search'}) : super._(); + const _$AssistantToolsFileSearchImpl( + {required this.type, + @JsonKey(name: 'file_search', includeIfNull: false) this.fileSearch}) + : super._(); factory _$AssistantToolsFileSearchImpl.fromJson(Map json) => _$$AssistantToolsFileSearchImplFromJson(json); /// The type of tool being defined: `file_search` @override - @JsonKey() final String type; + /// Overrides for the file search tool. + @override + @JsonKey(name: 'file_search', includeIfNull: false) + final AssistantToolsFileSearchFileSearch? fileSearch; + @override String toString() { - return 'AssistantTools.fileSearch(type: $type)'; + return 'AssistantTools.fileSearch(type: $type, fileSearch: $fileSearch)'; } @override @@ -53102,12 +53140,14 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { return identical(this, other) || (other.runtimeType == runtimeType && other is _$AssistantToolsFileSearchImpl && - (identical(other.type, type) || other.type == type)); + (identical(other.type, type) || other.type == type) && + (identical(other.fileSearch, fileSearch) || + other.fileSearch == fileSearch)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, type, fileSearch); @JsonKey(ignore: true) @override @@ -53120,32 +53160,44 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) { - return fileSearch(type); + return fileSearch(type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) { - return fileSearch?.call(type); + return fileSearch?.call(type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { if (fileSearch != null) { - return fileSearch(type); + return fileSearch(type, this.fileSearch); } return orElse(); } @@ -53194,7 +53246,10 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { } abstract class AssistantToolsFileSearch extends AssistantTools { - const factory AssistantToolsFileSearch({final String type}) = + const factory AssistantToolsFileSearch( + {required final String type, + @JsonKey(name: 'file_search', includeIfNull: false) + final AssistantToolsFileSearchFileSearch? fileSearch}) = _$AssistantToolsFileSearchImpl; const AssistantToolsFileSearch._() : super._(); @@ -53205,6 +53260,10 @@ abstract class AssistantToolsFileSearch extends AssistantTools { /// The type of tool being defined: `file_search` String get type; + + /// Overrides for the file search tool. + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? get fileSearch; @override @JsonKey(ignore: true) _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> @@ -53310,7 +53369,11 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @optionalTypeArgs TResult when({ required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, required TResult Function(String type, FunctionObject function) function, }) { return function(type, this.function); @@ -53320,7 +53383,11 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @optionalTypeArgs TResult? whenOrNull({ TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult? Function(String type, FunctionObject function)? function, }) { return function?.call(type, this.function); @@ -53330,7 +53397,11 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @optionalTypeArgs TResult maybeWhen({ TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { @@ -53405,6 +53476,187 @@ abstract class AssistantToolsFunction extends AssistantTools { get copyWith => throw _privateConstructorUsedError; } +AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson( + Map json) { + return _AssistantToolsFileSearchFileSearch.fromJson(json); +} + +/// @nodoc +mixin _$AssistantToolsFileSearchFileSearch { + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) + int? get maxNumResults => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $AssistantToolsFileSearchFileSearchCopyWith< + AssistantToolsFileSearchFileSearch> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + factory $AssistantToolsFileSearchFileSearchCopyWith( + AssistantToolsFileSearchFileSearch value, + $Res Function(AssistantToolsFileSearchFileSearch) then) = + _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + AssistantToolsFileSearchFileSearch>; + @useResult + $Res call( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + int? maxNumResults}); +} + +/// @nodoc +class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + $Val extends AssistantToolsFileSearchFileSearch> + implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + _$AssistantToolsFileSearchFileSearchCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxNumResults = freezed, + }) { + return _then(_value.copyWith( + maxNumResults: freezed == maxNumResults + ? _value.maxNumResults + : maxNumResults // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> + implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + factory _$$AssistantToolsFileSearchFileSearchImplCopyWith( + _$AssistantToolsFileSearchFileSearchImpl value, + $Res Function(_$AssistantToolsFileSearchFileSearchImpl) then) = + __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + int? maxNumResults}); +} + +/// @nodoc +class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> + extends _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + _$AssistantToolsFileSearchFileSearchImpl> + implements _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> { + __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl( + _$AssistantToolsFileSearchFileSearchImpl _value, + $Res Function(_$AssistantToolsFileSearchFileSearchImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxNumResults = freezed, + }) { + return _then(_$AssistantToolsFileSearchFileSearchImpl( + maxNumResults: freezed == maxNumResults + ? _value.maxNumResults + : maxNumResults // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$AssistantToolsFileSearchFileSearchImpl + extends _AssistantToolsFileSearchFileSearch { + const _$AssistantToolsFileSearchFileSearchImpl( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + this.maxNumResults}) + : super._(); + + factory _$AssistantToolsFileSearchFileSearchImpl.fromJson( + Map json) => + _$$AssistantToolsFileSearchFileSearchImplFromJson(json); + + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @override + @JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults; + + @override + String toString() { + return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$AssistantToolsFileSearchFileSearchImpl && + (identical(other.maxNumResults, maxNumResults) || + other.maxNumResults == maxNumResults)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, maxNumResults); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$AssistantToolsFileSearchFileSearchImplCopyWith< + _$AssistantToolsFileSearchFileSearchImpl> + get copyWith => __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl< + _$AssistantToolsFileSearchFileSearchImpl>(this, _$identity); + + @override + Map toJson() { + return _$$AssistantToolsFileSearchFileSearchImplToJson( + this, + ); + } +} + +abstract class _AssistantToolsFileSearchFileSearch + extends AssistantToolsFileSearchFileSearch { + const factory _AssistantToolsFileSearchFileSearch( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults}) = _$AssistantToolsFileSearchFileSearchImpl; + const _AssistantToolsFileSearchFileSearch._() : super._(); + + factory _AssistantToolsFileSearchFileSearch.fromJson( + Map json) = + _$AssistantToolsFileSearchFileSearchImpl.fromJson; + + @override + + /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search + /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) + int? get maxNumResults; + @override + @JsonKey(ignore: true) + _$$AssistantToolsFileSearchFileSearchImplCopyWith< + _$AssistantToolsFileSearchFileSearchImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageContent _$MessageContentFromJson(Map json) { switch (json['type']) { case 'image_file': diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 8b4963d6..5795dcc1 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -3906,24 +3906,13 @@ _$MessageContentTextAnnotationsFileCitationImpl Map json) => _$MessageContentTextAnnotationsFileCitationImpl( fileId: json['file_id'] as String, - quote: json['quote'] as String?, ); Map _$$MessageContentTextAnnotationsFileCitationImplToJson( - _$MessageContentTextAnnotationsFileCitationImpl instance) { - final val = { - 'file_id': instance.fileId, - }; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('quote', instance.quote); - return val; -} + _$MessageContentTextAnnotationsFileCitationImpl instance) => + { + 'file_id': instance.fileId, + }; _$MessageDeltaContentImageUrlObjectImpl _$$MessageDeltaContentImageUrlObjectImplFromJson( @@ -5114,14 +5103,28 @@ Map _$$AssistantToolsCodeInterpreterImplToJson( _$AssistantToolsFileSearchImpl _$$AssistantToolsFileSearchImplFromJson( Map json) => _$AssistantToolsFileSearchImpl( - type: json['type'] as String? ?? 'file_search', + type: json['type'] as String, + fileSearch: json['file_search'] == null + ? null + : AssistantToolsFileSearchFileSearch.fromJson( + json['file_search'] as Map), ); Map _$$AssistantToolsFileSearchImplToJson( - _$AssistantToolsFileSearchImpl instance) => - { - 'type': instance.type, - }; + _$AssistantToolsFileSearchImpl instance) { + final val = { + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('file_search', instance.fileSearch?.toJson()); + return val; +} _$AssistantToolsFunctionImpl _$$AssistantToolsFunctionImplFromJson( Map json) => @@ -5138,6 +5141,27 @@ Map _$$AssistantToolsFunctionImplToJson( 'function': instance.function.toJson(), }; +_$AssistantToolsFileSearchFileSearchImpl + _$$AssistantToolsFileSearchFileSearchImplFromJson( + Map json) => + _$AssistantToolsFileSearchFileSearchImpl( + maxNumResults: json['max_num_results'] as int?, + ); + +Map _$$AssistantToolsFileSearchFileSearchImplToJson( + _$AssistantToolsFileSearchFileSearchImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('max_num_results', instance.maxNumResults); + return val; +} + _$MessageContentImageFileObjectImpl _$$MessageContentImageFileObjectImplFromJson(Map json) => _$MessageContentImageFileObjectImpl( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 07b38bb8..bb054143 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4,7 +4,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.1.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -1465,7 +1465,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. + summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. parameters: - in: path name: batch_id @@ -2668,8 +2668,13 @@ components: See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + `fine-tune`. + The contents of the file should differ depending on if the model uses the + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -3710,7 +3715,23 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - default: "file_search" + default: file_search + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search + tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + required: + - type AssistantToolsFunction: type: object description: Function tool @@ -4885,12 +4906,8 @@ components: file_id: description: The ID of the specific File the citation is from. type: string - quote: - description: The specific quote in the file. - type: string required: - file_id - # - quote # https://github.com/openai/openai-openapi/issues/263 MessageContentTextAnnotationsFilePathObject: type: object description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. @@ -6115,7 +6132,7 @@ components: See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: $ref: "#/components/schemas/BatchEndpoint" completion_window: diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 395d6481..6763b140 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.1.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -16,7 +16,7 @@ tags: - name: Assistants description: Build Assistants that can call models and use tools. - name: Audio - description: Learn how to turn audio into text or text into audio. + description: Turn audio into text or text into audio. - name: Chat description: Given a list of messages comprising a conversation, the model will return a response. - name: Completions @@ -1506,9 +1506,9 @@ paths: The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - The Fine-tuning API only supports `.jsonl` files. + The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models. - The Batch API only supports `.jsonl` files up to 100 MB in size. + The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input). Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: @@ -4005,7 +4005,8 @@ paths: "incomplete_details": null, "usage": null, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming @@ -4067,13 +4068,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4105,7 +4106,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] @@ -4236,13 +4237,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} @@ -4268,7 +4269,7 @@ paths: data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4400,7 +4401,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true }, { "id": "run_abc456", @@ -4446,7 +4448,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } ], "first_id": "run_abc123", @@ -4552,7 +4555,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming request: @@ -4596,13 +4600,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4634,7 +4638,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4751,13 +4755,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4789,7 +4793,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4893,7 +4897,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } post: operationId: modifyRun @@ -5021,7 +5026,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: @@ -5167,7 +5173,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming @@ -5234,10 +5241,10 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} @@ -5275,7 +5282,7 @@ paths: data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5373,7 +5380,9 @@ paths: "usage": null, "temperature": 1.0, "top_p": 1.0, - "response_format": "auto" + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } /threads/{thread_id}/runs/{run_id}/steps: @@ -6655,7 +6664,7 @@ paths: See [upload file](/docs/api-reference/files/create) for how to upload a file. - Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string enum: @@ -6930,7 +6939,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. + summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. parameters: - in: path name: batch_id @@ -7501,6 +7510,20 @@ components: required: - role + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: [ 0, 1 ] + description: "Controls whether the assistant message is trained against (0 or 1)" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + ChatCompletionRequestToolMessage: type: object title: Tool message @@ -8647,6 +8670,8 @@ components: Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -9594,6 +9619,98 @@ components: "step_number": 88 } + FinetuneChatRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for chat models + properties: + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: | + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } + + FinetuneCompletionRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for completions models + properties: + prompt: + type: string + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. + x-oaiMeta: + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } + CompletionUsage: type: object description: Usage statistics for the completion request. @@ -10083,6 +10200,29 @@ components: - type AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + required: + - type + + AssistantToolsFileSearchTypeOnly: type: object title: FileSearch tool properties: @@ -11006,7 +11146,7 @@ components: items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" x-oaiExpandable: true description: A list of files attached to the message, and the tools they were added to. nullable: true @@ -11151,7 +11291,7 @@ components: items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" x-oaiExpandable: true description: A list of files attached to the message, and the tools they should be added to. required: @@ -11384,12 +11524,8 @@ components: file_id: description: The ID of the specific File the citation is from. type: string - quote: - description: The specific quote in the file. - type: string required: - file_id - - quote start_index: type: integer minimum: 0 @@ -13133,6 +13269,12 @@ x-oaiMeta: - type: endpoint key: cancelFineTuningJob path: cancel + - type: object + key: FinetuneChatRequestInput + path: chat-input + - type: object + key: FinetuneCompletionRequestInput + path: completions-input - type: object key: FineTuningJob path: object @@ -13167,10 +13309,10 @@ x-oaiMeta: path: object - type: object key: BatchRequestInput - path: requestInput + path: request-input - type: object key: BatchRequestOutput - path: requestOutput + path: request-output - id: files title: Files description: | From e20ba18fe4b6158a8bf31ef73dd599317f1e699a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 18 Jul 2024 23:48:20 +0200 Subject: [PATCH 068/251] feat: Add support for disabling parallel tool calls in openai_dart (#492) --- .../create_chat_completion_request.dart | 8 + .../generated/schema/create_run_request.dart | 8 + .../schema/create_thread_and_run_request.dart | 8 + .../lib/src/generated/schema/run_object.dart | 6 + .../src/generated/schema/schema.freezed.dart | 157 +++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 8 + packages/openai_dart/oas/openapi_curated.yaml | 11 ++ .../openai_dart/oas/openapi_official.yaml | 7 + 8 files changed, 209 insertions(+), 4 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 997af317..8c740fde 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -104,6 +104,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + @Default(true) + bool? parallelToolCalls, + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? user, @@ -149,6 +155,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p', 'tools', 'tool_choice', + 'parallel_tool_calls', 'user', 'function_call', 'functions' @@ -237,6 +244,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p': topP, 'tools': tools, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'user': user, 'function_call': functionCall, 'functions': functions, diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 95ad74a8..375ea8a0 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -69,6 +69,12 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + @Default(true) + bool? parallelToolCalls, + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -101,6 +107,7 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -155,6 +162,7 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 5f7692df..ff5013d5 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -68,6 +68,12 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + @Default(true) + bool? parallelToolCalls, + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -100,6 +106,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -154,6 +161,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index e34403a8..d3e7dcf5 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -99,6 +99,10 @@ class RunObject with _$RunObject { @JsonKey(name: 'tool_choice') required RunObjectToolChoice? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -140,6 +144,7 @@ class RunObject with _$RunObject { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format' ]; @@ -187,6 +192,7 @@ class RunObject with _$RunObject { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 0bc9015f..68caea69 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3500,6 +3500,11 @@ mixin _$CreateChatCompletionRequest { ChatCompletionToolChoiceOption? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; @@ -3565,6 +3570,8 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3611,6 +3618,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3688,6 +3696,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3813,6 +3825,8 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3864,6 +3878,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3941,6 +3956,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3985,6 +4004,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls = true, @JsonKey(includeIfNull: false) this.user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4142,6 +4163,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @override @JsonKey(includeIfNull: false) @@ -4180,7 +4207,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4216,6 +4243,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().equals(other._tools, _tools) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.user, user) || other.user == user) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && @@ -4245,6 +4274,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { topP, const DeepCollectionEquality().hash(_tools), toolChoice, + parallelToolCalls, user, functionCall, const DeepCollectionEquality().hash(_functions) @@ -4297,6 +4327,8 @@ abstract class _CreateChatCompletionRequest @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @JsonKey(includeIfNull: false) final String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4426,6 +4458,12 @@ abstract class _CreateChatCompletionRequest ChatCompletionToolChoiceOption? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; + @override + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? get user; @@ -26742,6 +26780,11 @@ mixin _$RunObject { @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -26793,6 +26836,7 @@ abstract class $RunObjectCopyWith<$Res> { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -26844,6 +26888,7 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_value.copyWith( @@ -26947,6 +26992,10 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -27075,6 +27124,7 @@ abstract class _$$RunObjectImplCopyWith<$Res> @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -27131,6 +27181,7 @@ class __$$RunObjectImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_$RunObjectImpl( @@ -27234,6 +27285,10 @@ class __$$RunObjectImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -27273,6 +27328,7 @@ class _$RunObjectImpl extends _RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required this.toolChoice, + @JsonKey(name: 'parallel_tool_calls') required this.parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required this.responseFormat}) @@ -27421,6 +27477,12 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'tool_choice') final RunObjectToolChoice? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls') + final bool? parallelToolCalls; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -27433,7 +27495,7 @@ class _$RunObjectImpl extends _RunObject { @override String toString() { - return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat)'; + return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat)'; } @override @@ -27483,6 +27545,8 @@ class _$RunObjectImpl extends _RunObject { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat)); } @@ -27516,6 +27580,7 @@ class _$RunObjectImpl extends _RunObject { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat ]); @@ -27566,6 +27631,8 @@ abstract class _RunObject extends RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required final RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') + required final bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required final RunObjectResponseFormat responseFormat}) = _$RunObjectImpl; @@ -27698,6 +27765,12 @@ abstract class _RunObject extends RunObject { RunObjectToolChoice? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') + bool? get parallelToolCalls; + @override + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -29562,6 +29635,11 @@ mixin _$CreateRunRequest { CreateRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -29611,6 +29689,8 @@ abstract class $CreateRunRequestCopyWith<$Res> { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -29648,6 +29728,7 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -29704,6 +29785,10 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29797,6 +29882,8 @@ abstract class _$$CreateRunRequestImplCopyWith<$Res> @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -29836,6 +29923,7 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -29892,6 +29980,10 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29930,6 +30022,8 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls = true, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -30043,6 +30137,12 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -30060,7 +30160,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @override String toString() { - return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -30090,6 +30190,8 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); @@ -30112,6 +30214,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat, stream); @@ -30154,6 +30257,8 @@ abstract class _CreateRunRequest extends CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateRunRequestResponseFormat? responseFormat, @@ -30239,6 +30344,12 @@ abstract class _CreateRunRequest extends CreateRunRequest { CreateRunRequestToolChoice? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; + @override + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -32834,6 +32945,11 @@ mixin _$CreateThreadAndRunRequest { CreateThreadAndRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -32882,6 +32998,8 @@ abstract class $CreateThreadAndRunRequestCopyWith<$Res> { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -32922,6 +33040,7 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -32978,6 +33097,10 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -33095,6 +33218,8 @@ abstract class _$$CreateThreadAndRunRequestImplCopyWith<$Res> @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -33140,6 +33265,7 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -33196,6 +33322,10 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -33230,6 +33360,8 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls = true, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -33332,6 +33464,12 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -33349,7 +33487,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @override String toString() { - return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -33378,6 +33516,8 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); @@ -33400,6 +33540,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat, stream); @@ -33441,6 +33582,8 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -33526,6 +33669,12 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { CreateThreadAndRunRequestToolChoice? get toolChoice; @override + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; + @override + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 5795dcc1..e52d47f7 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -335,6 +335,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), toolChoice: const _ChatCompletionToolChoiceOptionConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, user: json['user'] as String?, functionCall: const _ChatCompletionFunctionCallConverter() .fromJson(json['function_call']), @@ -376,6 +377,7 @@ Map _$$CreateChatCompletionRequestImplToJson( 'tool_choice', const _ChatCompletionToolChoiceOptionConverter() .toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull('user', instance.user); writeNotNull( 'function_call', @@ -2583,6 +2585,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => json['truncation_strategy'] as Map), toolChoice: const _RunObjectToolChoiceConverter().fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _RunObjectResponseFormatConverter() .fromJson(json['response_format']), ); @@ -2623,6 +2626,7 @@ Map _$$RunObjectImplToJson(_$RunObjectImpl instance) { val['truncation_strategy'] = instance.truncationStrategy?.toJson(); val['tool_choice'] = _$JsonConverterToJson( instance.toolChoice, const _RunObjectToolChoiceConverter().toJson); + val['parallel_tool_calls'] = instance.parallelToolCalls; val['response_format'] = const _RunObjectResponseFormatConverter().toJson(instance.responseFormat); return val; @@ -2836,6 +2840,7 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, responseFormat: const _CreateRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -2868,6 +2873,7 @@ Map _$$CreateRunRequestImplToJson( writeNotNull('truncation_strategy', instance.truncationStrategy?.toJson()); writeNotNull('tool_choice', const _CreateRunRequestToolChoiceConverter().toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateRunRequestResponseFormatConverter() @@ -3158,6 +3164,7 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateThreadAndRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, responseFormat: const _CreateThreadAndRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -3191,6 +3198,7 @@ Map _$$CreateThreadAndRunRequestImplToJson( 'tool_choice', const _CreateThreadAndRunRequestToolChoiceConverter() .toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateThreadAndRunRequestResponseFormatConverter() diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index bb054143..31fe55f9 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1974,6 +1974,13 @@ components: `required` means the model must call one or more tools. enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + parallel_tool_calls: ¶llel_tool_calls + description: | + Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + during tool use. + type: boolean + default: true + nullable: true user: *end_user_param_configuration function_call: title: ChatCompletionFunctionCall @@ -3959,6 +3966,7 @@ components: `required` means the model must call one or more tools before responding to the user. enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. @@ -3997,6 +4005,7 @@ components: - max_completion_tokens - truncation_strategy - tool_choice + - parallel_tool_calls - response_format RunCompletionUsage: type: object @@ -4136,6 +4145,7 @@ components: `required` means the model must call one or more tools before responding to the user. enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. @@ -4366,6 +4376,7 @@ components: `required` means the model must call one or more tools before responding to the user. enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: description: | Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 6763b140..1b0f8c0c 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -7661,6 +7661,11 @@ components: - type - function + ParallelToolCalls: + description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + type: boolean + default: true + ChatCompletionMessageToolCalls: type: array description: The tool calls generated by the model, such as function calls. @@ -7966,6 +7971,8 @@ components: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" user: *end_user_param_configuration function_call: deprecated: true From 89b06b8d40e816a1b496ae3d77a1a8dc121b65e9 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 18 Jul 2024 23:56:40 +0200 Subject: [PATCH 069/251] feat: Add support for disabling parallel tool calls in ChatOpenAI (#493) --- .../langchain_openai/lib/src/chat_models/chat_openai.dart | 2 ++ packages/langchain_openai/lib/src/chat_models/types.dart | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 83bb8cd5..fa03bf4c 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -305,6 +305,8 @@ class ChatOpenAI extends BaseChatModel { : null, temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, + parallelToolCalls: + options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, user: options?.user ?? defaultOptions.user, streamOptions: stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index a82ab9a1..859c5cdf 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -18,6 +18,7 @@ class ChatOpenAIOptions extends ChatModelOptions { this.stop, this.temperature, this.topP, + this.parallelToolCalls, this.user, super.tools, super.toolChoice, @@ -123,6 +124,13 @@ class ChatOpenAIOptions extends ChatModelOptions { /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p final double? topP; + /// Whether to enable parallel tool calling during tool use. + /// By default, it is enabled. + /// + /// + /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling + final bool? parallelToolCalls; + /// A unique identifier representing your end-user, which can help OpenAI to /// monitor and detect abuse. /// From 94bdd3a9cd011ceaef34d459bcaa552880b7404e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 19 Jul 2024 00:19:05 +0200 Subject: [PATCH 070/251] feat: Add support for service tier in openai_dart (#494) --- .../create_chat_completion_request.dart | 34 +++ .../create_chat_completion_response.dart | 11 + ...reate_chat_completion_stream_response.dart | 11 + .../lib/src/generated/schema/schema.dart | 1 + .../src/generated/schema/schema.freezed.dart | 199 +++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 23 ++ .../src/generated/schema/service_tier.dart | 18 ++ packages/openai_dart/oas/openapi_curated.yaml | 25 +++ .../openai_dart/oas/openapi_official.yaml | 33 ++- packages/openai_dart/pubspec.yaml | 2 +- 10 files changed, 352 insertions(+), 5 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/service_tier.dart diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 8c740fde..657f7268 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -68,6 +68,20 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. @JsonKey(includeIfNull: false) int? seed, + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + CreateChatCompletionRequestServiceTier? serviceTier, + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) @@ -148,6 +162,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'presence_penalty', 'response_format', 'seed', + 'service_tier', 'stop', 'stream', 'stream_options', @@ -237,6 +252,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'presence_penalty': presencePenalty, 'response_format': responseFormat, 'seed': seed, + 'service_tier': serviceTier, 'stop': stop, 'stream': stream, 'stream_options': streamOptions, @@ -398,6 +414,24 @@ class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { } } +// ========================================== +// ENUM: CreateChatCompletionRequestServiceTier +// ========================================== + +/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers +/// subscribed to the scale tier service: +/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. +/// - If set to 'default', the request will be processed using the default service tier with a lower +/// uptime SLA and no latency guarantee. +/// +/// When this parameter is set, the response body will include the `service_tier` utilized. +enum CreateChatCompletionRequestServiceTier { + @JsonValue('auto') + auto, + @JsonValue('default') + vDefault, +} + // ========================================== // CLASS: ChatCompletionStop // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart index 95771ce0..9a9687d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart @@ -27,6 +27,15 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { /// The model used for the chat completion. required String model, + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ServiceTier? serviceTier, + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -50,6 +59,7 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices', 'created', 'model', + 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -67,6 +77,7 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices': choices, 'created': created, 'model': model, + 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index 724f4066..cc0341fc 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -29,6 +29,15 @@ class CreateChatCompletionStreamResponse /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ServiceTier? serviceTier, + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -53,6 +62,7 @@ class CreateChatCompletionStreamResponse 'choices', 'created', 'model', + 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -70,6 +80,7 @@ class CreateChatCompletionStreamResponse 'choices': choices, 'created': created, 'model': model, + 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 6d9b2613..793315da 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -30,6 +30,7 @@ part 'chat_completion_stream_options.dart'; part 'create_chat_completion_response.dart'; part 'chat_completion_response_choice.dart'; part 'chat_completion_finish_reason.dart'; +part 'service_tier.dart'; part 'chat_completion_logprobs.dart'; part 'chat_completion_token_logprob.dart'; part 'chat_completion_token_top_logprob.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 68caea69..7737861a 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3458,6 +3458,20 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? get serviceTier => + throw _privateConstructorUsedError; + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) @@ -3558,6 +3572,11 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(name: 'response_format', includeIfNull: false) ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3611,6 +3630,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, + Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3668,6 +3688,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3813,6 +3837,11 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(name: 'response_format', includeIfNull: false) ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3871,6 +3900,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, + Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3928,6 +3958,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3995,6 +4029,11 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @JsonKey(includeIfNull: false) this.seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) this.stop, @JsonKey(includeIfNull: false) this.stream = false, @JsonKey(name: 'stream_options', includeIfNull: false) this.streamOptions, @@ -4107,6 +4146,20 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) final int? seed; + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateChatCompletionRequestServiceTier? serviceTier; + /// Up to 4 sequences where the API will stop generating further tokens. @override @_ChatCompletionStopConverter() @@ -4207,7 +4260,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4233,6 +4286,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.seed, seed) || other.seed == seed) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.stop, stop) || other.stop == stop) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.streamOptions, streamOptions) || @@ -4267,6 +4322,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { presencePenalty, responseFormat, seed, + serviceTier, stop, stream, streamOptions, @@ -4315,6 +4371,11 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'response_format', includeIfNull: false) final ChatCompletionResponseFormat? responseFormat, @JsonKey(includeIfNull: false) final int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) final ChatCompletionStop? stop, @@ -4411,6 +4472,20 @@ abstract class _CreateChatCompletionRequest int? get seed; @override + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + /// - If set to 'default', the request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? get serviceTier; + @override + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) @@ -7741,6 +7816,14 @@ mixin _$CreateChatCompletionResponse { /// The model used for the chat completion. String get model => throw _privateConstructorUsedError; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier => throw _privateConstructorUsedError; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -7773,6 +7856,11 @@ abstract class $CreateChatCompletionResponseCopyWith<$Res> { List choices, int created, String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -7799,6 +7887,7 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, Object? choices = null, Object? created = null, Object? model = null, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -7820,6 +7909,10 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -7862,6 +7955,11 @@ abstract class _$$CreateChatCompletionResponseImplCopyWith<$Res> List choices, int created, String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -7888,6 +7986,7 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> Object? choices = null, Object? created = null, Object? model = null, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -7909,6 +8008,10 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -7933,6 +8036,11 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { required final List choices, required this.created, required this.model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, required this.object, @@ -7968,6 +8076,15 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override final String model; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -7986,7 +8103,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override String toString() { - return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -7998,6 +8115,8 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && @@ -8012,6 +8131,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().hash(_choices), created, model, + serviceTier, systemFingerprint, object, usage); @@ -8039,6 +8159,11 @@ abstract class _CreateChatCompletionResponse required final List choices, required final int created, required final String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, required final String object, @@ -8068,6 +8193,15 @@ abstract class _CreateChatCompletionResponse String get model; @override + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier; + @override + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -9049,6 +9183,14 @@ mixin _$CreateChatCompletionStreamResponse { @JsonKey(includeIfNull: false) String? get model => throw _privateConstructorUsedError; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier => throw _privateConstructorUsedError; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9083,6 +9225,11 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { List choices, @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, @JsonKey(includeIfNull: false) String? object, @@ -9109,6 +9256,7 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, Object? choices = null, Object? created = freezed, Object? model = freezed, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = freezed, Object? usage = freezed, @@ -9130,6 +9278,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -9172,6 +9324,11 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> List choices, @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, @JsonKey(includeIfNull: false) String? object, @@ -9198,6 +9355,7 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> Object? choices = null, Object? created = freezed, Object? model = freezed, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = freezed, Object? usage = freezed, @@ -9219,6 +9377,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -9244,6 +9406,11 @@ class _$CreateChatCompletionStreamResponseImpl required final List choices, @JsonKey(includeIfNull: false) this.created, @JsonKey(includeIfNull: false) this.model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, @JsonKey(includeIfNull: false) this.object, @@ -9283,6 +9450,15 @@ class _$CreateChatCompletionStreamResponseImpl @JsonKey(includeIfNull: false) final String? model; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9302,7 +9478,7 @@ class _$CreateChatCompletionStreamResponseImpl @override String toString() { - return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -9314,6 +9490,8 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && @@ -9328,6 +9506,7 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().hash(_choices), created, model, + serviceTier, systemFingerprint, object, usage); @@ -9355,6 +9534,11 @@ abstract class _CreateChatCompletionStreamResponse required final List choices, @JsonKey(includeIfNull: false) final int? created, @JsonKey(includeIfNull: false) final String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, @JsonKey(includeIfNull: false) final String? object, @@ -9388,6 +9572,15 @@ abstract class _CreateChatCompletionStreamResponse String? get model; @override + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier; + @override + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index e52d47f7..9ed1ff90 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -322,6 +322,9 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( : ChatCompletionResponseFormat.fromJson( json['response_format'] as Map), seed: json['seed'] as int?, + serviceTier: $enumDecodeNullable( + _$CreateChatCompletionRequestServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), stop: const _ChatCompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -366,6 +369,8 @@ Map _$$CreateChatCompletionRequestImplToJson( writeNotNull('presence_penalty', instance.presencePenalty); writeNotNull('response_format', instance.responseFormat?.toJson()); writeNotNull('seed', instance.seed); + writeNotNull('service_tier', + _$CreateChatCompletionRequestServiceTierEnumMap[instance.serviceTier]); writeNotNull( 'stop', const _ChatCompletionStopConverter().toJson(instance.stop)); writeNotNull('stream', instance.stream); @@ -388,6 +393,11 @@ Map _$$CreateChatCompletionRequestImplToJson( return val; } +const _$CreateChatCompletionRequestServiceTierEnumMap = { + CreateChatCompletionRequestServiceTier.auto: 'auto', + CreateChatCompletionRequestServiceTier.vDefault: 'default', +}; + _$ChatCompletionModelEnumerationImpl _$$ChatCompletionModelEnumerationImplFromJson(Map json) => _$ChatCompletionModelEnumerationImpl( @@ -707,6 +717,9 @@ _$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( .toList(), created: json['created'] as int, model: json['model'] as String, + serviceTier: $enumDecodeNullable( + _$ServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, object: json['object'] as String, usage: json['usage'] == null @@ -728,12 +741,18 @@ Map _$$CreateChatCompletionResponseImplToJson( val['choices'] = instance.choices.map((e) => e.toJson()).toList(); val['created'] = instance.created; val['model'] = instance.model; + writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); val['object'] = instance.object; writeNotNull('usage', instance.usage?.toJson()); return val; } +const _$ServiceTierEnumMap = { + ServiceTier.scale: 'scale', + ServiceTier.vDefault: 'default', +}; + _$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( Map json) => _$ChatCompletionResponseChoiceImpl( @@ -839,6 +858,9 @@ _$CreateChatCompletionStreamResponseImpl .toList(), created: json['created'] as int?, model: json['model'] as String?, + serviceTier: $enumDecodeNullable( + _$ServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, object: json['object'] as String?, usage: json['usage'] == null @@ -860,6 +882,7 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( val['choices'] = instance.choices.map((e) => e.toJson()).toList(); writeNotNull('created', instance.created); writeNotNull('model', instance.model); + writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); writeNotNull('object', instance.object); writeNotNull('usage', instance.usage?.toJson()); diff --git a/packages/openai_dart/lib/src/generated/schema/service_tier.dart b/packages/openai_dart/lib/src/generated/schema/service_tier.dart new file mode 100644 index 00000000..8a01afc5 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/service_tier.dart @@ -0,0 +1,18 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ServiceTier +// ========================================== + +/// The service tier used for processing the request. This field is only included if the `service_tier` parameter +/// is specified in the request. +enum ServiceTier { + @JsonValue('scale') + scale, + @JsonValue('default') + vDefault, +} diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 31fe55f9..cb469506 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1908,6 +1908,19 @@ components: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers + subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower + uptime SLA and no latency guarantee. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: title: ChatCompletionStop description: | @@ -2297,6 +2310,8 @@ components: model: type: string description: The model used for the chat completion. + service_tier: + $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | @@ -2349,6 +2364,14 @@ components: "content_filter", "function_call", ] + ServiceTier: + description: | + The service tier used for processing the request. This field is only included if the `service_tier` parameter + is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true ChatCompletionLogprobs: &chat_completion_response_logprobs description: Log probability information for the choice. type: object @@ -2419,6 +2442,8 @@ components: model: type: string description: The model to generate the completion. + service_tier: + $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 1b0f8c0c..fd863f50 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -7925,6 +7925,17 @@ components: Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. x-oaiMeta: beta: true + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: description: | Up to 4 sequences where the API will stop generating further tokens. @@ -8066,6 +8077,12 @@ components: model: type: string description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true system_fingerprint: type: string description: | @@ -8242,6 +8259,12 @@ components: model: type: string description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true system_fingerprint: type: string description: | @@ -10462,6 +10485,8 @@ components: tool_choice: $ref: "#/components/schemas/AssistantsApiToolChoiceOption" nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10489,6 +10514,7 @@ components: - max_completion_tokens - truncation_strategy - tool_choice + - parallel_tool_calls - response_format x-oaiMeta: name: The run object @@ -10526,7 +10552,8 @@ components: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } CreateRunRequest: type: object @@ -10638,6 +10665,8 @@ components: tool_choice: $ref: "#/components/schemas/AssistantsApiToolChoiceOption" nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10857,6 +10886,8 @@ components: tool_choice: $ref: "#/components/schemas/AssistantsApiToolChoiceOption" nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 4c449cc2..5b2fef22 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 test: ^1.25.2 From 309b3b014654118c8795848e871c0b3839ed73de Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 10:56:54 +0200 Subject: [PATCH 071/251] feat: Add support for service tier in ChatOpenAI (#495) --- .../lib/src/chat_models/chat_openai.dart | 17 +++++++++------- .../lib/src/chat_models/mappers.dart | 11 ++++++++++ .../lib/src/chat_models/types.dart | 20 +++++++++++++++++++ 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index fa03bf4c..e218637a 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -276,13 +276,15 @@ class ChatOpenAI extends BaseChatModel { final bool stream = false, }) { final messagesDtos = messages.toChatCompletionMessages(); - final toolsDtos = options?.tools?.toChatCompletionTool() ?? - defaultOptions.tools?.toChatCompletionTool(); - final toolChoice = options?.toolChoice?.toChatCompletionToolChoice() ?? - defaultOptions.toolChoice?.toChatCompletionToolChoice(); - final responseFormat = - options?.responseFormat ?? defaultOptions.responseFormat; - final responseFormatDto = responseFormat?.toChatCompletionResponseFormat(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); + final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) + ?.toChatCompletionToolChoice(); + final responseFormatDto = + (options?.responseFormat ?? defaultOptions.responseFormat) + ?.toChatCompletionResponseFormat(); + final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) + .toCreateChatCompletionRequestServiceTier(); return CreateChatCompletionRequest( model: ChatCompletionModel.modelId( @@ -307,6 +309,7 @@ class ChatOpenAI extends BaseChatModel { topP: options?.topP ?? defaultOptions.topP, parallelToolCalls: options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, + serviceTier: serviceTierDto, user: options?.user ?? defaultOptions.user, streamOptions: stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 6b434109..0c70fd73 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -260,6 +260,17 @@ extension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat { } } +extension ChatOpenAIServiceTierX on ChatOpenAIServiceTier? { + CreateChatCompletionRequestServiceTier? + toCreateChatCompletionRequestServiceTier() => switch (this) { + ChatOpenAIServiceTier.auto => + CreateChatCompletionRequestServiceTier.auto, + ChatOpenAIServiceTier.vDefault => + CreateChatCompletionRequestServiceTier.vDefault, + null => null, + }; +} + FinishReason _mapFinishReason( final ChatCompletionFinishReason? reason, ) => diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 859c5cdf..0526f937 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -19,6 +19,7 @@ class ChatOpenAIOptions extends ChatModelOptions { this.temperature, this.topP, this.parallelToolCalls, + this.serviceTier, this.user, super.tools, super.toolChoice, @@ -131,6 +132,10 @@ class ChatOpenAIOptions extends ChatModelOptions { /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling final bool? parallelToolCalls; + /// Specifies the latency tier to use for processing the request. + /// This is relevant for customers subscribed to the scale tier service. + final ChatOpenAIServiceTier? serviceTier; + /// A unique identifier representing your end-user, which can help OpenAI to /// monitor and detect abuse. /// @@ -151,6 +156,8 @@ class ChatOpenAIOptions extends ChatModelOptions { final List? stop, final double? temperature, final double? topP, + final bool? parallelToolCalls, + final ChatOpenAIServiceTier? serviceTier, final String? user, final List? tools, final ChatToolChoice? toolChoice, @@ -167,6 +174,8 @@ class ChatOpenAIOptions extends ChatModelOptions { stop: stop ?? this.stop, temperature: temperature ?? this.temperature, topP: topP ?? this.topP, + parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls, + serviceTier: serviceTier ?? this.serviceTier, user: user ?? this.user, tools: tools ?? this.tools, toolChoice: toolChoice ?? this.toolChoice, @@ -196,3 +205,14 @@ enum ChatOpenAIResponseFormatType { /// guarantees the message the model generates is valid JSON. jsonObject, } + +/// Specifies the latency tier to use for processing the request. +/// This is relevant for customers subscribed to the scale tier service. +enum ChatOpenAIServiceTier { + /// The system will utilize scale tier credits until they are exhausted. + auto, + + /// The request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + vDefault, +} From 28f6a5cf3ec6787e822e43d03ff175e7d88f91fd Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 11:00:12 +0200 Subject: [PATCH 072/251] feat: Support chunking strategy in file_search tool in openai_dart (#496) --- .../chunking_strategy_request_param.dart | 54 + .../chunking_strategy_response_param.dart | 55 + ...reate_vector_store_file_batch_request.dart | 8 +- .../create_vector_store_file_request.dart | 8 +- .../schema/create_vector_store_request.dart | 7 + .../lib/src/generated/schema/schema.dart | 3 + .../src/generated/schema/schema.freezed.dart | 1455 ++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 160 +- .../schema/static_chunking_strategy.dart | 60 + ...ol_resources_file_search_vector_store.dart | 12 +- .../schema/vector_store_file_object.dart | 9 +- packages/openai_dart/oas/openapi_curated.yaml | 101 ++ .../openai_dart/oas/openapi_official.yaml | 203 ++- 13 files changed, 2088 insertions(+), 47 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart new file mode 100644 index 00000000..a8f0c03d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChunkingStrategyRequestParam +// ========================================== + +/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ChunkingStrategyRequestParam with _$ChunkingStrategyRequestParam { + const ChunkingStrategyRequestParam._(); + + // ------------------------------------------ + // UNION: AutoChunkingStrategyRequestParam + // ------------------------------------------ + + /// Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` + /// and `chunk_overlap_tokens` of `400`. + const factory ChunkingStrategyRequestParam.auto({ + /// Always `auto`. + required String type, + }) = AutoChunkingStrategyRequestParam; + + // ------------------------------------------ + // UNION: StaticChunkingStrategyRequestParam + // ------------------------------------------ + + /// Static chunking strategy + const factory ChunkingStrategyRequestParam.static({ + /// Always `static`. + required String type, + + /// Static chunking strategy + required StaticChunkingStrategy static, + }) = StaticChunkingStrategyRequestParam; + + /// Object construction from a JSON representation + factory ChunkingStrategyRequestParam.fromJson(Map json) => + _$ChunkingStrategyRequestParamFromJson(json); +} + +// ========================================== +// ENUM: ChunkingStrategyRequestParamEnumType +// ========================================== + +enum ChunkingStrategyRequestParamEnumType { + @JsonValue('auto') + auto, + @JsonValue('static') + static, +} diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart new file mode 100644 index 00000000..c706df60 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart @@ -0,0 +1,55 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChunkingStrategyResponseParam +// ========================================== + +/// The chunking strategy used to chunk the file(s). +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ChunkingStrategyResponseParam + with _$ChunkingStrategyResponseParam { + const ChunkingStrategyResponseParam._(); + + // ------------------------------------------ + // UNION: StaticChunkingStrategyResponseParam + // ------------------------------------------ + + /// Static Chunking Strategy. + const factory ChunkingStrategyResponseParam.static({ + /// Always `static`. + required String type, + + /// Static chunking strategy + required StaticChunkingStrategy static, + }) = StaticChunkingStrategyResponseParam; + + // ------------------------------------------ + // UNION: OtherChunkingStrategyResponseParam + // ------------------------------------------ + + /// Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because + /// the file was indexed before the `chunking_strategy` concept was introduced in the API. + const factory ChunkingStrategyResponseParam.other({ + /// Always `other`. + required String type, + }) = OtherChunkingStrategyResponseParam; + + /// Object construction from a JSON representation + factory ChunkingStrategyResponseParam.fromJson(Map json) => + _$ChunkingStrategyResponseParamFromJson(json); +} + +// ========================================== +// ENUM: ChunkingStrategyResponseParamEnumType +// ========================================== + +enum ChunkingStrategyResponseParamEnumType { + @JsonValue('static') + static, + @JsonValue('other') + other, +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart index 6a607eae..3111c855 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart @@ -18,6 +18,11 @@ class CreateVectorStoreFileBatchRequest const factory CreateVectorStoreFileBatchRequest({ /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids') required List fileIds, + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileBatchRequest; /// Object construction from a JSON representation @@ -26,7 +31,7 @@ class CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids']; + static const List propertyNames = ['file_ids', 'chunking_strategy']; /// Perform validations on the schema property values String? validateSchema() { @@ -37,6 +42,7 @@ class CreateVectorStoreFileBatchRequest Map toMap() { return { 'file_ids': fileIds, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart index 742fae3b..c18eadee 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart @@ -17,6 +17,11 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { const factory CreateVectorStoreFileRequest({ /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_id') required String fileId, + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileRequest; /// Object construction from a JSON representation @@ -24,7 +29,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { _$CreateVectorStoreFileRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id']; + static const List propertyNames = ['file_id', 'chunking_strategy']; /// Perform validations on the schema property values String? validateSchema() { @@ -35,6 +40,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { Map toMap() { return { 'file_id': fileId, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index bb9e83d7..61e87095 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -25,6 +25,11 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _CreateVectorStoreRequest; @@ -38,6 +43,7 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { 'name', 'file_ids', 'expires_after', + 'chunking_strategy', 'metadata' ]; @@ -52,6 +58,7 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { 'name': name, 'file_ids': fileIds, 'expires_after': expiresAfter, + 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 793315da..e4f6c023 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -127,6 +127,7 @@ part 'update_vector_store_request.dart'; part 'list_vector_stores_response.dart'; part 'delete_vector_store_response.dart'; part 'vector_store_file_object.dart'; +part 'static_chunking_strategy.dart'; part 'create_vector_store_file_request.dart'; part 'list_vector_store_files_response.dart'; part 'delete_vector_store_file_response.dart'; @@ -152,4 +153,6 @@ part 'run_step_details_tool_calls.dart'; part 'run_step_delta_step_details_tool_calls.dart'; part 'run_step_details_tool_calls_code_output.dart'; part 'run_step_delta_step_details_tool_calls_code_output.dart'; +part 'chunking_strategy_request_param.dart'; +part 'chunking_strategy_response_param.dart'; part 'assistant_stream_event.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 7737861a..d1f911c1 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -36641,6 +36641,12 @@ mixin _$ToolResourcesFileSearchVectorStore { @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; @@ -36662,7 +36668,11 @@ abstract class $ToolResourcesFileSearchVectorStoreCopyWith<$Res> { @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -36680,6 +36690,7 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, @override $Res call({ Object? fileIds = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( @@ -36687,12 +36698,29 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable as dynamic, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -36706,7 +36734,12 @@ abstract class _$$ToolResourcesFileSearchVectorStoreImplCopyWith<$Res> @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -36723,6 +36756,7 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> @override $Res call({ Object? fileIds = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_$ToolResourcesFileSearchVectorStoreImpl( @@ -36730,6 +36764,10 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -36745,6 +36783,8 @@ class _$ToolResourcesFileSearchVectorStoreImpl const _$ToolResourcesFileSearchVectorStoreImpl( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, super._(); @@ -36767,6 +36807,12 @@ class _$ToolResourcesFileSearchVectorStoreImpl return EqualUnmodifiableListView(value); } + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) @@ -36774,7 +36820,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl @override String toString() { - return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, metadata: $metadata)'; + return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; } @override @@ -36783,6 +36829,8 @@ class _$ToolResourcesFileSearchVectorStoreImpl (other.runtimeType == runtimeType && other is _$ToolResourcesFileSearchVectorStoreImpl && const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy) && const DeepCollectionEquality().equals(other.metadata, metadata)); } @@ -36791,6 +36839,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_fileIds), + chunkingStrategy, const DeepCollectionEquality().hash(metadata)); @JsonKey(ignore: true) @@ -36814,6 +36863,8 @@ abstract class _ToolResourcesFileSearchVectorStore const factory _ToolResourcesFileSearchVectorStore( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) final dynamic metadata}) = _$ToolResourcesFileSearchVectorStoreImpl; const _ToolResourcesFileSearchVectorStore._() : super._(); @@ -36829,6 +36880,12 @@ abstract class _ToolResourcesFileSearchVectorStore List? get fileIds; @override + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata; @@ -45270,6 +45327,12 @@ mixin _$CreateVectorStoreRequest { VectorStoreExpirationAfter? get expiresAfter => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; @@ -45291,9 +45354,12 @@ abstract class $CreateVectorStoreRequestCopyWith<$Res> { @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -45313,6 +45379,7 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, Object? name = freezed, Object? fileIds = freezed, Object? expiresAfter = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( @@ -45328,6 +45395,10 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable as VectorStoreExpirationAfter?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -45347,6 +45418,19 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, return _then(_value.copyWith(expiresAfter: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -45363,10 +45447,14 @@ abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); @override $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -45385,6 +45473,7 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> Object? name = freezed, Object? fileIds = freezed, Object? expiresAfter = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_$CreateVectorStoreRequestImpl( @@ -45400,6 +45489,10 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> ? _value.expiresAfter : expiresAfter // ignore: cast_nullable_to_non_nullable as VectorStoreExpirationAfter?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -45416,6 +45509,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, super._(); @@ -45447,6 +45542,12 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) @@ -45454,7 +45555,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @override String toString() { - return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; } @override @@ -45466,6 +45567,8 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { const DeepCollectionEquality().equals(other._fileIds, _fileIds) && (identical(other.expiresAfter, expiresAfter) || other.expiresAfter == expiresAfter) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy) && const DeepCollectionEquality().equals(other.metadata, metadata)); } @@ -45476,6 +45579,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { name, const DeepCollectionEquality().hash(_fileIds), expiresAfter, + chunkingStrategy, const DeepCollectionEquality().hash(metadata)); @JsonKey(ignore: true) @@ -45500,6 +45604,8 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { final List? fileIds, @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) final dynamic metadata}) = _$CreateVectorStoreRequestImpl; const _CreateVectorStoreRequest._() : super._(); @@ -45524,6 +45630,12 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { VectorStoreExpirationAfter? get expiresAfter; @override + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata; @@ -46275,6 +46387,12 @@ mixin _$VectorStoreFileObject { VectorStoreFileObjectLastError? get lastError => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? get chunkingStrategy => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $VectorStoreFileObjectCopyWith get copyWith => @@ -46294,9 +46412,12 @@ abstract class $VectorStoreFileObjectCopyWith<$Res> { @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy}); $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46320,6 +46441,7 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, Object? vectorStoreId = null, Object? status = null, Object? lastError = freezed, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( id: null == id @@ -46350,6 +46472,10 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, ? _value.lastError : lastError // ignore: cast_nullable_to_non_nullable as VectorStoreFileObjectLastError?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyResponseParam?, ) as $Val); } @@ -46365,6 +46491,19 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, return _then(_value.copyWith(lastError: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyResponseParamCopyWith<$Res>( + _value.chunkingStrategy!, (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -46383,10 +46522,14 @@ abstract class _$$VectorStoreFileObjectImplCopyWith<$Res> @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy}); @override $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + @override + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46408,6 +46551,7 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> Object? vectorStoreId = null, Object? status = null, Object? lastError = freezed, + Object? chunkingStrategy = freezed, }) { return _then(_$VectorStoreFileObjectImpl( id: null == id @@ -46438,6 +46582,10 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> ? _value.lastError : lastError // ignore: cast_nullable_to_non_nullable as VectorStoreFileObjectLastError?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyResponseParam?, )); } } @@ -46452,7 +46600,9 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { @JsonKey(name: 'created_at') required this.createdAt, @JsonKey(name: 'vector_store_id') required this.vectorStoreId, required this.status, - @JsonKey(name: 'last_error') required this.lastError}) + @JsonKey(name: 'last_error') required this.lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : super._(); factory _$VectorStoreFileObjectImpl.fromJson(Map json) => @@ -46490,9 +46640,15 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { @JsonKey(name: 'last_error') final VectorStoreFileObjectLastError? lastError; + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyResponseParam? chunkingStrategy; + @override String toString() { - return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError)'; + return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError, chunkingStrategy: $chunkingStrategy)'; } @override @@ -46510,13 +46666,15 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { other.vectorStoreId == vectorStoreId) && (identical(other.status, status) || other.status == status) && (identical(other.lastError, lastError) || - other.lastError == lastError)); + other.lastError == lastError) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash(runtimeType, id, object, usageBytes, - createdAt, vectorStoreId, status, lastError); + createdAt, vectorStoreId, status, lastError, chunkingStrategy); @JsonKey(ignore: true) @override @@ -46542,7 +46700,9 @@ abstract class _VectorStoreFileObject extends VectorStoreFileObject { @JsonKey(name: 'vector_store_id') required final String vectorStoreId, required final VectorStoreFileStatus status, @JsonKey(name: 'last_error') - required final VectorStoreFileObjectLastError? lastError}) = + required final VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyResponseParam? chunkingStrategy}) = _$VectorStoreFileObjectImpl; const _VectorStoreFileObject._() : super._(); @@ -46582,6 +46742,12 @@ abstract class _VectorStoreFileObject extends VectorStoreFileObject { @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? get lastError; @override + + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? get chunkingStrategy; + @override @JsonKey(ignore: true) _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -46768,6 +46934,204 @@ abstract class _VectorStoreFileObjectLastError get copyWith => throw _privateConstructorUsedError; } +StaticChunkingStrategy _$StaticChunkingStrategyFromJson( + Map json) { + return _StaticChunkingStrategy.fromJson(json); +} + +/// @nodoc +mixin _$StaticChunkingStrategy { + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') + int get maxChunkSizeTokens => throw _privateConstructorUsedError; + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') + int get chunkOverlapTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $StaticChunkingStrategyCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $StaticChunkingStrategyCopyWith<$Res> { + factory $StaticChunkingStrategyCopyWith(StaticChunkingStrategy value, + $Res Function(StaticChunkingStrategy) then) = + _$StaticChunkingStrategyCopyWithImpl<$Res, StaticChunkingStrategy>; + @useResult + $Res call( + {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); +} + +/// @nodoc +class _$StaticChunkingStrategyCopyWithImpl<$Res, + $Val extends StaticChunkingStrategy> + implements $StaticChunkingStrategyCopyWith<$Res> { + _$StaticChunkingStrategyCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxChunkSizeTokens = null, + Object? chunkOverlapTokens = null, + }) { + return _then(_value.copyWith( + maxChunkSizeTokens: null == maxChunkSizeTokens + ? _value.maxChunkSizeTokens + : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable + as int, + chunkOverlapTokens: null == chunkOverlapTokens + ? _value.chunkOverlapTokens + : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$StaticChunkingStrategyImplCopyWith<$Res> + implements $StaticChunkingStrategyCopyWith<$Res> { + factory _$$StaticChunkingStrategyImplCopyWith( + _$StaticChunkingStrategyImpl value, + $Res Function(_$StaticChunkingStrategyImpl) then) = + __$$StaticChunkingStrategyImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); +} + +/// @nodoc +class __$$StaticChunkingStrategyImplCopyWithImpl<$Res> + extends _$StaticChunkingStrategyCopyWithImpl<$Res, + _$StaticChunkingStrategyImpl> + implements _$$StaticChunkingStrategyImplCopyWith<$Res> { + __$$StaticChunkingStrategyImplCopyWithImpl( + _$StaticChunkingStrategyImpl _value, + $Res Function(_$StaticChunkingStrategyImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? maxChunkSizeTokens = null, + Object? chunkOverlapTokens = null, + }) { + return _then(_$StaticChunkingStrategyImpl( + maxChunkSizeTokens: null == maxChunkSizeTokens + ? _value.maxChunkSizeTokens + : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable + as int, + chunkOverlapTokens: null == chunkOverlapTokens + ? _value.chunkOverlapTokens + : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$StaticChunkingStrategyImpl extends _StaticChunkingStrategy { + const _$StaticChunkingStrategyImpl( + {@JsonKey(name: 'max_chunk_size_tokens') required this.maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') required this.chunkOverlapTokens}) + : super._(); + + factory _$StaticChunkingStrategyImpl.fromJson(Map json) => + _$$StaticChunkingStrategyImplFromJson(json); + + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @override + @JsonKey(name: 'max_chunk_size_tokens') + final int maxChunkSizeTokens; + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @override + @JsonKey(name: 'chunk_overlap_tokens') + final int chunkOverlapTokens; + + @override + String toString() { + return 'StaticChunkingStrategy(maxChunkSizeTokens: $maxChunkSizeTokens, chunkOverlapTokens: $chunkOverlapTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StaticChunkingStrategyImpl && + (identical(other.maxChunkSizeTokens, maxChunkSizeTokens) || + other.maxChunkSizeTokens == maxChunkSizeTokens) && + (identical(other.chunkOverlapTokens, chunkOverlapTokens) || + other.chunkOverlapTokens == chunkOverlapTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, maxChunkSizeTokens, chunkOverlapTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> + get copyWith => __$$StaticChunkingStrategyImplCopyWithImpl< + _$StaticChunkingStrategyImpl>(this, _$identity); + + @override + Map toJson() { + return _$$StaticChunkingStrategyImplToJson( + this, + ); + } +} + +abstract class _StaticChunkingStrategy extends StaticChunkingStrategy { + const factory _StaticChunkingStrategy( + {@JsonKey(name: 'max_chunk_size_tokens') + required final int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') + required final int chunkOverlapTokens}) = _$StaticChunkingStrategyImpl; + const _StaticChunkingStrategy._() : super._(); + + factory _StaticChunkingStrategy.fromJson(Map json) = + _$StaticChunkingStrategyImpl.fromJson; + + @override + + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') + int get maxChunkSizeTokens; + @override + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') + int get chunkOverlapTokens; + @override + @JsonKey(ignore: true) + _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> + get copyWith => throw _privateConstructorUsedError; +} + CreateVectorStoreFileRequest _$CreateVectorStoreFileRequestFromJson( Map json) { return _CreateVectorStoreFileRequest.fromJson(json); @@ -46779,6 +47143,12 @@ mixin _$CreateVectorStoreFileRequest { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $CreateVectorStoreFileRequestCopyWith @@ -46793,7 +47163,12 @@ abstract class $CreateVectorStoreFileRequestCopyWith<$Res> { _$CreateVectorStoreFileRequestCopyWithImpl<$Res, CreateVectorStoreFileRequest>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46811,14 +47186,32 @@ class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, @override $Res call({ Object? fileId = null, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -46830,7 +47223,13 @@ abstract class _$$CreateVectorStoreFileRequestImplCopyWith<$Res> __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -46847,12 +47246,17 @@ class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> @override $Res call({ Object? fileId = null, + Object? chunkingStrategy = freezed, }) { return _then(_$CreateVectorStoreFileRequestImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, )); } } @@ -46861,7 +47265,9 @@ class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { const _$CreateVectorStoreFileRequestImpl( - {@JsonKey(name: 'file_id') required this.fileId}) + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : super._(); factory _$CreateVectorStoreFileRequestImpl.fromJson( @@ -46873,9 +47279,15 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { @JsonKey(name: 'file_id') final String fileId; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + @override String toString() { - return 'CreateVectorStoreFileRequest(fileId: $fileId)'; + return 'CreateVectorStoreFileRequest(fileId: $fileId, chunkingStrategy: $chunkingStrategy)'; } @override @@ -46883,12 +47295,14 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { return identical(this, other) || (other.runtimeType == runtimeType && other is _$CreateVectorStoreFileRequestImpl && - (identical(other.fileId, fileId) || other.fileId == fileId)); + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId); + int get hashCode => Object.hash(runtimeType, fileId, chunkingStrategy); @JsonKey(ignore: true) @override @@ -46909,7 +47323,9 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { abstract class _CreateVectorStoreFileRequest extends CreateVectorStoreFileRequest { const factory _CreateVectorStoreFileRequest( - {@JsonKey(name: 'file_id') required final String fileId}) = + {@JsonKey(name: 'file_id') required final String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy}) = _$CreateVectorStoreFileRequestImpl; const _CreateVectorStoreFileRequest._() : super._(); @@ -46922,6 +47338,12 @@ abstract class _CreateVectorStoreFileRequest @JsonKey(name: 'file_id') String get fileId; @override + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override @JsonKey(ignore: true) _$$CreateVectorStoreFileRequestImplCopyWith< _$CreateVectorStoreFileRequestImpl> @@ -48012,6 +48434,12 @@ mixin _$CreateVectorStoreFileBatchRequest { @JsonKey(name: 'file_ids') List get fileIds => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $CreateVectorStoreFileBatchRequestCopyWith @@ -48026,7 +48454,12 @@ abstract class $CreateVectorStoreFileBatchRequestCopyWith<$Res> { _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, CreateVectorStoreFileBatchRequest>; @useResult - $Res call({@JsonKey(name: 'file_ids') List fileIds}); + $Res call( + {@JsonKey(name: 'file_ids') List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -48044,14 +48477,32 @@ class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, @override $Res call({ Object? fileIds = null, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( fileIds: null == fileIds ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -48063,7 +48514,13 @@ abstract class _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_ids') List fileIds}); + $Res call( + {@JsonKey(name: 'file_ids') List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -48080,12 +48537,17 @@ class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> @override $Res call({ Object? fileIds = null, + Object? chunkingStrategy = freezed, }) { return _then(_$CreateVectorStoreFileBatchRequestImpl( fileIds: null == fileIds ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, )); } } @@ -48095,7 +48557,9 @@ class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> class _$CreateVectorStoreFileBatchRequestImpl extends _CreateVectorStoreFileBatchRequest { const _$CreateVectorStoreFileBatchRequestImpl( - {@JsonKey(name: 'file_ids') required final List fileIds}) + {@JsonKey(name: 'file_ids') required final List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : _fileIds = fileIds, super._(); @@ -48115,9 +48579,15 @@ class _$CreateVectorStoreFileBatchRequestImpl return EqualUnmodifiableListView(_fileIds); } + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + @override String toString() { - return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds)'; + return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy)'; } @override @@ -48125,13 +48595,15 @@ class _$CreateVectorStoreFileBatchRequestImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$CreateVectorStoreFileBatchRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds)); + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); + int get hashCode => Object.hash(runtimeType, + const DeepCollectionEquality().hash(_fileIds), chunkingStrategy); @JsonKey(ignore: true) @override @@ -48152,7 +48624,9 @@ class _$CreateVectorStoreFileBatchRequestImpl abstract class _CreateVectorStoreFileBatchRequest extends CreateVectorStoreFileBatchRequest { const factory _CreateVectorStoreFileBatchRequest( - {@JsonKey(name: 'file_ids') required final List fileIds}) = + {@JsonKey(name: 'file_ids') required final List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy}) = _$CreateVectorStoreFileBatchRequestImpl; const _CreateVectorStoreFileBatchRequest._() : super._(); @@ -48166,6 +48640,12 @@ abstract class _CreateVectorStoreFileBatchRequest @JsonKey(name: 'file_ids') List get fileIds; @override + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + @override @JsonKey(ignore: true) _$$CreateVectorStoreFileBatchRequestImplCopyWith< _$CreateVectorStoreFileBatchRequestImpl> @@ -62244,6 +62724,933 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject get copyWith => throw _privateConstructorUsedError; } +ChunkingStrategyRequestParam _$ChunkingStrategyRequestParamFromJson( + Map json) { + switch (json['type']) { + case 'auto': + return AutoChunkingStrategyRequestParam.fromJson(json); + case 'static': + return StaticChunkingStrategyRequestParam.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'ChunkingStrategyRequestParam', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ChunkingStrategyRequestParam { + /// Always `auto`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChunkingStrategyRequestParamCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChunkingStrategyRequestParamCopyWith<$Res> { + factory $ChunkingStrategyRequestParamCopyWith( + ChunkingStrategyRequestParam value, + $Res Function(ChunkingStrategyRequestParam) then) = + _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + ChunkingStrategyRequestParam>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + $Val extends ChunkingStrategyRequestParam> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + _$ChunkingStrategyRequestParamCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + factory _$$AutoChunkingStrategyRequestParamImplCopyWith( + _$AutoChunkingStrategyRequestParamImpl value, + $Res Function(_$AutoChunkingStrategyRequestParamImpl) then) = + __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type}); +} + +/// @nodoc +class __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + _$AutoChunkingStrategyRequestParamImpl> + implements _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> { + __$$AutoChunkingStrategyRequestParamImplCopyWithImpl( + _$AutoChunkingStrategyRequestParamImpl _value, + $Res Function(_$AutoChunkingStrategyRequestParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$AutoChunkingStrategyRequestParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$AutoChunkingStrategyRequestParamImpl + extends AutoChunkingStrategyRequestParam { + const _$AutoChunkingStrategyRequestParamImpl({required this.type}) + : super._(); + + factory _$AutoChunkingStrategyRequestParamImpl.fromJson( + Map json) => + _$$AutoChunkingStrategyRequestParamImplFromJson(json); + + /// Always `auto`. + @override + final String type; + + @override + String toString() { + return 'ChunkingStrategyRequestParam.auto(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$AutoChunkingStrategyRequestParamImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$AutoChunkingStrategyRequestParamImplCopyWith< + _$AutoChunkingStrategyRequestParamImpl> + get copyWith => __$$AutoChunkingStrategyRequestParamImplCopyWithImpl< + _$AutoChunkingStrategyRequestParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, + }) { + return auto(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, + }) { + return auto?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, + required TResult orElse(), + }) { + if (auto != null) { + return auto(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, + }) { + return auto(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, + }) { + return auto?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, + required TResult orElse(), + }) { + if (auto != null) { + return auto(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$AutoChunkingStrategyRequestParamImplToJson( + this, + ); + } +} + +abstract class AutoChunkingStrategyRequestParam + extends ChunkingStrategyRequestParam { + const factory AutoChunkingStrategyRequestParam({required final String type}) = + _$AutoChunkingStrategyRequestParamImpl; + const AutoChunkingStrategyRequestParam._() : super._(); + + factory AutoChunkingStrategyRequestParam.fromJson(Map json) = + _$AutoChunkingStrategyRequestParamImpl.fromJson; + + @override + + /// Always `auto`. + String get type; + @override + @JsonKey(ignore: true) + _$$AutoChunkingStrategyRequestParamImplCopyWith< + _$AutoChunkingStrategyRequestParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + factory _$$StaticChunkingStrategyRequestParamImplCopyWith( + _$StaticChunkingStrategyRequestParamImpl value, + $Res Function(_$StaticChunkingStrategyRequestParamImpl) then) = + __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, StaticChunkingStrategy static}); + + $StaticChunkingStrategyCopyWith<$Res> get static; +} + +/// @nodoc +class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + _$StaticChunkingStrategyRequestParamImpl> + implements _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> { + __$$StaticChunkingStrategyRequestParamImplCopyWithImpl( + _$StaticChunkingStrategyRequestParamImpl _value, + $Res Function(_$StaticChunkingStrategyRequestParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? static = null, + }) { + return _then(_$StaticChunkingStrategyRequestParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + static: null == static + ? _value.static + : static // ignore: cast_nullable_to_non_nullable + as StaticChunkingStrategy, + )); + } + + @override + @pragma('vm:prefer-inline') + $StaticChunkingStrategyCopyWith<$Res> get static { + return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { + return _then(_value.copyWith(static: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$StaticChunkingStrategyRequestParamImpl + extends StaticChunkingStrategyRequestParam { + const _$StaticChunkingStrategyRequestParamImpl( + {required this.type, required this.static}) + : super._(); + + factory _$StaticChunkingStrategyRequestParamImpl.fromJson( + Map json) => + _$$StaticChunkingStrategyRequestParamImplFromJson(json); + + /// Always `static`. + @override + final String type; + + /// Static chunking strategy + @override + final StaticChunkingStrategy static; + + @override + String toString() { + return 'ChunkingStrategyRequestParam.static(type: $type, static: $static)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StaticChunkingStrategyRequestParamImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.static, static) || other.static == static)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, static); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$StaticChunkingStrategyRequestParamImplCopyWith< + _$StaticChunkingStrategyRequestParamImpl> + get copyWith => __$$StaticChunkingStrategyRequestParamImplCopyWithImpl< + _$StaticChunkingStrategyRequestParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, + }) { + return static(type, this.static); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, + }) { + return static?.call(type, this.static); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, + required TResult orElse(), + }) { + if (static != null) { + return static(type, this.static); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, + }) { + return static(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, + }) { + return static?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, + required TResult orElse(), + }) { + if (static != null) { + return static(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$StaticChunkingStrategyRequestParamImplToJson( + this, + ); + } +} + +abstract class StaticChunkingStrategyRequestParam + extends ChunkingStrategyRequestParam { + const factory StaticChunkingStrategyRequestParam( + {required final String type, + required final StaticChunkingStrategy static}) = + _$StaticChunkingStrategyRequestParamImpl; + const StaticChunkingStrategyRequestParam._() : super._(); + + factory StaticChunkingStrategyRequestParam.fromJson( + Map json) = + _$StaticChunkingStrategyRequestParamImpl.fromJson; + + @override + + /// Always `static`. + String get type; + + /// Static chunking strategy + StaticChunkingStrategy get static; + @override + @JsonKey(ignore: true) + _$$StaticChunkingStrategyRequestParamImplCopyWith< + _$StaticChunkingStrategyRequestParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChunkingStrategyResponseParam _$ChunkingStrategyResponseParamFromJson( + Map json) { + switch (json['type']) { + case 'static': + return StaticChunkingStrategyResponseParam.fromJson(json); + case 'other': + return OtherChunkingStrategyResponseParam.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'ChunkingStrategyResponseParam', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ChunkingStrategyResponseParam { + /// Always `static`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChunkingStrategyResponseParamCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChunkingStrategyResponseParamCopyWith<$Res> { + factory $ChunkingStrategyResponseParamCopyWith( + ChunkingStrategyResponseParam value, + $Res Function(ChunkingStrategyResponseParam) then) = + _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + ChunkingStrategyResponseParam>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + $Val extends ChunkingStrategyResponseParam> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + _$ChunkingStrategyResponseParamCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + factory _$$StaticChunkingStrategyResponseParamImplCopyWith( + _$StaticChunkingStrategyResponseParamImpl value, + $Res Function(_$StaticChunkingStrategyResponseParamImpl) then) = + __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, StaticChunkingStrategy static}); + + $StaticChunkingStrategyCopyWith<$Res> get static; +} + +/// @nodoc +class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + _$StaticChunkingStrategyResponseParamImpl> + implements _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> { + __$$StaticChunkingStrategyResponseParamImplCopyWithImpl( + _$StaticChunkingStrategyResponseParamImpl _value, + $Res Function(_$StaticChunkingStrategyResponseParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? static = null, + }) { + return _then(_$StaticChunkingStrategyResponseParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + static: null == static + ? _value.static + : static // ignore: cast_nullable_to_non_nullable + as StaticChunkingStrategy, + )); + } + + @override + @pragma('vm:prefer-inline') + $StaticChunkingStrategyCopyWith<$Res> get static { + return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { + return _then(_value.copyWith(static: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$StaticChunkingStrategyResponseParamImpl + extends StaticChunkingStrategyResponseParam { + const _$StaticChunkingStrategyResponseParamImpl( + {required this.type, required this.static}) + : super._(); + + factory _$StaticChunkingStrategyResponseParamImpl.fromJson( + Map json) => + _$$StaticChunkingStrategyResponseParamImplFromJson(json); + + /// Always `static`. + @override + final String type; + + /// Static chunking strategy + @override + final StaticChunkingStrategy static; + + @override + String toString() { + return 'ChunkingStrategyResponseParam.static(type: $type, static: $static)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StaticChunkingStrategyResponseParamImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.static, static) || other.static == static)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, static); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$StaticChunkingStrategyResponseParamImplCopyWith< + _$StaticChunkingStrategyResponseParamImpl> + get copyWith => __$$StaticChunkingStrategyResponseParamImplCopyWithImpl< + _$StaticChunkingStrategyResponseParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, + }) { + return static(type, this.static); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, + }) { + return static?.call(type, this.static); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, + required TResult orElse(), + }) { + if (static != null) { + return static(type, this.static); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, + }) { + return static(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, + }) { + return static?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, + required TResult orElse(), + }) { + if (static != null) { + return static(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$StaticChunkingStrategyResponseParamImplToJson( + this, + ); + } +} + +abstract class StaticChunkingStrategyResponseParam + extends ChunkingStrategyResponseParam { + const factory StaticChunkingStrategyResponseParam( + {required final String type, + required final StaticChunkingStrategy static}) = + _$StaticChunkingStrategyResponseParamImpl; + const StaticChunkingStrategyResponseParam._() : super._(); + + factory StaticChunkingStrategyResponseParam.fromJson( + Map json) = + _$StaticChunkingStrategyResponseParamImpl.fromJson; + + @override + + /// Always `static`. + String get type; + + /// Static chunking strategy + StaticChunkingStrategy get static; + @override + @JsonKey(ignore: true) + _$$StaticChunkingStrategyResponseParamImplCopyWith< + _$StaticChunkingStrategyResponseParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + factory _$$OtherChunkingStrategyResponseParamImplCopyWith( + _$OtherChunkingStrategyResponseParamImpl value, + $Res Function(_$OtherChunkingStrategyResponseParamImpl) then) = + __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type}); +} + +/// @nodoc +class __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + _$OtherChunkingStrategyResponseParamImpl> + implements _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> { + __$$OtherChunkingStrategyResponseParamImplCopyWithImpl( + _$OtherChunkingStrategyResponseParamImpl _value, + $Res Function(_$OtherChunkingStrategyResponseParamImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$OtherChunkingStrategyResponseParamImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$OtherChunkingStrategyResponseParamImpl + extends OtherChunkingStrategyResponseParam { + const _$OtherChunkingStrategyResponseParamImpl({required this.type}) + : super._(); + + factory _$OtherChunkingStrategyResponseParamImpl.fromJson( + Map json) => + _$$OtherChunkingStrategyResponseParamImplFromJson(json); + + /// Always `other`. + @override + final String type; + + @override + String toString() { + return 'ChunkingStrategyResponseParam.other(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$OtherChunkingStrategyResponseParamImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$OtherChunkingStrategyResponseParamImplCopyWith< + _$OtherChunkingStrategyResponseParamImpl> + get copyWith => __$$OtherChunkingStrategyResponseParamImplCopyWithImpl< + _$OtherChunkingStrategyResponseParamImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, + }) { + return other(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, + }) { + return other?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, + required TResult orElse(), + }) { + if (other != null) { + return other(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, + }) { + return other(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, + }) { + return other?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, + required TResult orElse(), + }) { + if (other != null) { + return other(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$OtherChunkingStrategyResponseParamImplToJson( + this, + ); + } +} + +abstract class OtherChunkingStrategyResponseParam + extends ChunkingStrategyResponseParam { + const factory OtherChunkingStrategyResponseParam( + {required final String type}) = _$OtherChunkingStrategyResponseParamImpl; + const OtherChunkingStrategyResponseParam._() : super._(); + + factory OtherChunkingStrategyResponseParam.fromJson( + Map json) = + _$OtherChunkingStrategyResponseParamImpl.fromJson; + + @override + + /// Always `other`. + String get type; + @override + @JsonKey(ignore: true) + _$$OtherChunkingStrategyResponseParamImplCopyWith< + _$OtherChunkingStrategyResponseParamImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantStreamEvent _$AssistantStreamEventFromJson(Map json) { switch (json['event']) { case 'thread_stream_event': diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 9ed1ff90..ede4fed4 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -3520,6 +3520,10 @@ _$ToolResourcesFileSearchVectorStoreImpl fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -3534,6 +3538,7 @@ Map _$$ToolResourcesFileSearchVectorStoreImplToJson( } writeNotNull('file_ids', instance.fileIds); + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -4401,6 +4406,10 @@ _$CreateVectorStoreRequestImpl _$$CreateVectorStoreRequestImplFromJson( ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -4417,6 +4426,7 @@ Map _$$CreateVectorStoreRequestImplToJson( writeNotNull('name', instance.name); writeNotNull('file_ids', instance.fileIds); writeNotNull('expires_after', instance.expiresAfter?.toJson()); + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -4499,19 +4509,33 @@ _$VectorStoreFileObjectImpl _$$VectorStoreFileObjectImplFromJson( ? null : VectorStoreFileObjectLastError.fromJson( json['last_error'] as Map), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyResponseParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$VectorStoreFileObjectImplToJson( - _$VectorStoreFileObjectImpl instance) => - { - 'id': instance.id, - 'object': instance.object, - 'usage_bytes': instance.usageBytes, - 'created_at': instance.createdAt, - 'vector_store_id': instance.vectorStoreId, - 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, - 'last_error': instance.lastError?.toJson(), - }; + _$VectorStoreFileObjectImpl instance) { + final val = { + 'id': instance.id, + 'object': instance.object, + 'usage_bytes': instance.usageBytes, + 'created_at': instance.createdAt, + 'vector_store_id': instance.vectorStoreId, + 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, + 'last_error': instance.lastError?.toJson(), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} const _$VectorStoreFileStatusEnumMap = { VectorStoreFileStatus.inProgress: 'in_progress', @@ -4542,17 +4566,45 @@ const _$VectorStoreFileObjectLastErrorCodeEnumMap = { VectorStoreFileObjectLastErrorCode.unhandledMimeType: 'unhandled_mime_type', }; +_$StaticChunkingStrategyImpl _$$StaticChunkingStrategyImplFromJson( + Map json) => + _$StaticChunkingStrategyImpl( + maxChunkSizeTokens: json['max_chunk_size_tokens'] as int, + chunkOverlapTokens: json['chunk_overlap_tokens'] as int, + ); + +Map _$$StaticChunkingStrategyImplToJson( + _$StaticChunkingStrategyImpl instance) => + { + 'max_chunk_size_tokens': instance.maxChunkSizeTokens, + 'chunk_overlap_tokens': instance.chunkOverlapTokens, + }; + _$CreateVectorStoreFileRequestImpl _$$CreateVectorStoreFileRequestImplFromJson( Map json) => _$CreateVectorStoreFileRequestImpl( fileId: json['file_id'] as String, + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileRequestImplToJson( - _$CreateVectorStoreFileRequestImpl instance) => - { - 'file_id': instance.fileId, - }; + _$CreateVectorStoreFileRequestImpl instance) { + final val = { + 'file_id': instance.fileId, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} _$ListVectorStoreFilesResponseImpl _$$ListVectorStoreFilesResponseImplFromJson( Map json) => @@ -4651,13 +4703,27 @@ _$CreateVectorStoreFileBatchRequestImpl fileIds: (json['file_ids'] as List) .map((e) => e as String) .toList(), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileBatchRequestImplToJson( - _$CreateVectorStoreFileBatchRequestImpl instance) => - { - 'file_ids': instance.fileIds, - }; + _$CreateVectorStoreFileBatchRequestImpl instance) { + final val = { + 'file_ids': instance.fileIds, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} _$ErrorImpl _$$ErrorImplFromJson(Map json) => _$ErrorImpl( code: json['code'] as String?, @@ -5810,6 +5876,64 @@ Map return val; } +_$AutoChunkingStrategyRequestParamImpl + _$$AutoChunkingStrategyRequestParamImplFromJson( + Map json) => + _$AutoChunkingStrategyRequestParamImpl( + type: json['type'] as String, + ); + +Map _$$AutoChunkingStrategyRequestParamImplToJson( + _$AutoChunkingStrategyRequestParamImpl instance) => + { + 'type': instance.type, + }; + +_$StaticChunkingStrategyRequestParamImpl + _$$StaticChunkingStrategyRequestParamImplFromJson( + Map json) => + _$StaticChunkingStrategyRequestParamImpl( + type: json['type'] as String, + static: StaticChunkingStrategy.fromJson( + json['static'] as Map), + ); + +Map _$$StaticChunkingStrategyRequestParamImplToJson( + _$StaticChunkingStrategyRequestParamImpl instance) => + { + 'type': instance.type, + 'static': instance.static.toJson(), + }; + +_$StaticChunkingStrategyResponseParamImpl + _$$StaticChunkingStrategyResponseParamImplFromJson( + Map json) => + _$StaticChunkingStrategyResponseParamImpl( + type: json['type'] as String, + static: StaticChunkingStrategy.fromJson( + json['static'] as Map), + ); + +Map _$$StaticChunkingStrategyResponseParamImplToJson( + _$StaticChunkingStrategyResponseParamImpl instance) => + { + 'type': instance.type, + 'static': instance.static.toJson(), + }; + +_$OtherChunkingStrategyResponseParamImpl + _$$OtherChunkingStrategyResponseParamImplFromJson( + Map json) => + _$OtherChunkingStrategyResponseParamImpl( + type: json['type'] as String, + ); + +Map _$$OtherChunkingStrategyResponseParamImplToJson( + _$OtherChunkingStrategyResponseParamImpl instance) => + { + 'type': instance.type, + }; + _$ThreadStreamEventImpl _$$ThreadStreamEventImplFromJson( Map json) => _$ThreadStreamEventImpl( diff --git a/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart new file mode 100644 index 00000000..aa67e062 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart @@ -0,0 +1,60 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: StaticChunkingStrategy +// ========================================== + +/// Static chunking strategy +@freezed +class StaticChunkingStrategy with _$StaticChunkingStrategy { + const StaticChunkingStrategy._(); + + /// Factory constructor for StaticChunkingStrategy + const factory StaticChunkingStrategy({ + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') required int maxChunkSizeTokens, + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') required int chunkOverlapTokens, + }) = _StaticChunkingStrategy; + + /// Object construction from a JSON representation + factory StaticChunkingStrategy.fromJson(Map json) => + _$StaticChunkingStrategyFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'max_chunk_size_tokens', + 'chunk_overlap_tokens' + ]; + + /// Validation constants + static const maxChunkSizeTokensMinValue = 100; + static const maxChunkSizeTokensMaxValue = 4096; + + /// Perform validations on the schema property values + String? validateSchema() { + if (maxChunkSizeTokens < maxChunkSizeTokensMinValue) { + return "The value of 'maxChunkSizeTokens' cannot be < $maxChunkSizeTokensMinValue"; + } + if (maxChunkSizeTokens > maxChunkSizeTokensMaxValue) { + return "The value of 'maxChunkSizeTokens' cannot be > $maxChunkSizeTokensMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'max_chunk_size_tokens': maxChunkSizeTokens, + 'chunk_overlap_tokens': chunkOverlapTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart index 63247873..cc01299d 100644 --- a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart +++ b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart @@ -19,6 +19,11 @@ class ToolResourcesFileSearchVectorStore /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _ToolResourcesFileSearchVectorStore; @@ -29,7 +34,11 @@ class ToolResourcesFileSearchVectorStore _$ToolResourcesFileSearchVectorStoreFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids', 'metadata']; + static const List propertyNames = [ + 'file_ids', + 'chunking_strategy', + 'metadata' + ]; /// Perform validations on the schema property values String? validateSchema() { @@ -40,6 +49,7 @@ class ToolResourcesFileSearchVectorStore Map toMap() { return { 'file_ids': fileIds, + 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart index 53e6f928..b6c24133 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart @@ -36,6 +36,11 @@ class VectorStoreFileObject with _$VectorStoreFileObject { /// The last error associated with this vector store file. Will be `null` if there are no errors. @JsonKey(name: 'last_error') required VectorStoreFileObjectLastError? lastError, + + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy, }) = _VectorStoreFileObject; /// Object construction from a JSON representation @@ -50,7 +55,8 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'created_at', 'vector_store_id', 'status', - 'last_error' + 'last_error', + 'chunking_strategy' ]; /// Perform validations on the schema property values @@ -68,6 +74,7 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'vector_store_id': vectorStoreId, 'status': status, 'last_error': lastError, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index cb469506..141e9071 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4527,6 +4527,8 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: type: object description: | @@ -5733,6 +5735,8 @@ components: type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: description: *metadata_description type: object @@ -5848,6 +5852,8 @@ components: required: - code - message + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyResponseParam" required: - id - object @@ -5856,6 +5862,97 @@ components: - vector_store_id - status - last_error + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + discriminator: + propertyName: type + AutoChunkingStrategyRequestParam: + type: object + description: | + Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` + and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + default: auto + required: + - type + StaticChunkingStrategyRequestParam: + type: object + description: Static chunking strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + default: static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + StaticChunkingStrategy: + type: object + description: Static chunking strategy + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: | + The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + ChunkingStrategyResponseParam: + type: object + description: The chunking strategy used to chunk the file(s). + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + discriminator: + propertyName: type + OtherChunkingStrategyResponseParam: + type: object + description: | + Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because + the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + default: other + required: + - type + StaticChunkingStrategyResponseParam: + type: object + description: Static Chunking Strategy. + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + default: static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static CreateVectorStoreFileRequest: type: object description: Request object for the Create vector store file endpoint. @@ -5863,6 +5960,8 @@ components: file_id: description: A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_id ListVectorStoreFilesResponse: @@ -5976,6 +6075,8 @@ components: maxItems: 500 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_ids AssistantStreamEvent: diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index fd863f50..0dd10f12 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -10044,6 +10044,52 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true metadata: type: object description: | @@ -11009,11 +11055,58 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true metadata: type: object description: | Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + x-oaiExpandable: true oneOf: - required: [ vector_store_ids ] - required: [ vector_stores ] @@ -12349,6 +12442,13 @@ components: type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true metadata: description: *metadata_description type: object @@ -12458,6 +12558,13 @@ components: required: - code - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true required: - id - object @@ -12477,9 +12584,99 @@ components: "created_at": 1698107661, "vector_store_id": "vs_abc123", "status": "completed", - "last_error": null + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } } + OtherChunkingStrategyResponseParam: + type: object + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + enum: [ "other" ] + required: + - type + + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + CreateVectorStoreFileRequest: type: object additionalProperties: false @@ -12487,6 +12684,8 @@ components: file_id: description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_id @@ -12613,6 +12812,8 @@ components: maxItems: 500 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_ids From f6136c737d921bab9a699868facc8de11ebfe303 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 11:19:52 +0200 Subject: [PATCH 073/251] feat: Add GPT-4o-mini to model catalog (#497) --- .../lib/src/chat_models/types.dart | 16 +- .../schema/chat_completion_message.dart | 2 +- .../schema/create_assistant_request.dart | 4 + .../create_chat_completion_request.dart | 6 + .../generated/schema/create_run_request.dart | 4 + .../schema/create_thread_and_run_request.dart | 4 + .../fine_tuning_job_hyperparameters.dart | 12 +- .../src/generated/schema/schema.freezed.dart | 21 +- .../lib/src/generated/schema/schema.g.dart | 8 + packages/openai_dart/oas/openapi_curated.yaml | 29 +- .../openai_dart/oas/openapi_official.yaml | 836 +++++++++++++----- 11 files changed, 701 insertions(+), 241 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 0526f937..299902fe 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -30,23 +30,23 @@ class ChatOpenAIOptions extends ChatModelOptions { /// /// Available models: /// - `gpt-4` - /// - `gpt-4-0314` - /// - `gpt-4-0613` /// - `gpt-4-32k` /// - `gpt-4-32k-0314` /// - `gpt-4-32k-0613` - /// - `gpt-4-turbo-preview` - /// - `gpt-4-1106-preview` /// - `gpt-4-0125-preview` + /// - `gpt-4-0314` + /// - `gpt-4-0613` + /// - `gpt-4-1106-preview` + /// - `gpt-4-turbo` + /// - `gpt-4-turbo-2024-04-09` + /// - `gpt-4-turbo-preview` /// - `gpt-4-vision-preview` /// - `gpt-4o` /// - `gpt-4o-2024-05-13` + /// - `gpt-4o-mini` + /// - `gpt-4o-mini-2024-07-18` /// - `gpt-3.5-turbo` /// - `gpt-3.5-turbo-16k` - /// - `gpt-3.5-turbo-0301` - /// - `gpt-3.5-turbo-0613` - /// - `gpt-3.5-turbo-1106` - /// - `gpt-3.5-turbo-16k-0613` /// /// Mind that the list may be outdated. /// See https://platform.openai.com/docs/models for the latest list. diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index 65e9b1d8..e546e524 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -135,7 +135,7 @@ sealed class ChatCompletionUserMessageContent with _$ChatCompletionUserMessageContent { const ChatCompletionUserMessageContent._(); - /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. + /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. const factory ChatCompletionUserMessageContent.parts( List value, ) = ChatCompletionMessageContentParts; diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 16db2e01..3b9086d3 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -163,6 +163,10 @@ enum AssistantModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 657f7268..6e7e429a 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -73,6 +73,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @JsonKey( @@ -302,6 +303,10 @@ enum ChatCompletionModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -423,6 +428,7 @@ class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. +/// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. enum CreateChatCompletionRequestServiceTier { diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 375ea8a0..83c04dc1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -203,6 +203,10 @@ enum RunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index ff5013d5..67b921cb 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -202,6 +202,10 @@ enum ThreadAndRunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart index 51d89b60..409aa1d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart @@ -15,8 +15,10 @@ class FineTuningJobHyperparameters with _$FineTuningJobHyperparameters { /// Factory constructor for FineTuningJobHyperparameters const factory FineTuningJobHyperparameters({ - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') required FineTuningNEpochs nEpochs, @@ -56,8 +58,10 @@ enum FineTuningNEpochsOptions { // CLASS: FineTuningNEpochs // ========================================== -/// The number of epochs to train the model for. An epoch refers to one -/// full cycle through the training dataset. +/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. +/// +/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number +/// manually, we support any number between 1 and 50 epochs. @freezed sealed class FineTuningNEpochs with _$FineTuningNEpochs { const FineTuningNEpochs._(); diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index d1f911c1..75973e85 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3463,6 +3463,7 @@ mixin _$CreateChatCompletionRequest { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @JsonKey( @@ -4151,6 +4152,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @override @@ -4477,6 +4479,7 @@ abstract class _CreateChatCompletionRequest /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. /// - If set to 'default', the request will be processed using the default service tier with a lower /// uptime SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @JsonKey( @@ -15766,8 +15769,10 @@ FineTuningJobHyperparameters _$FineTuningJobHyperparametersFromJson( /// @nodoc mixin _$FineTuningJobHyperparameters { - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; @@ -15882,8 +15887,10 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { Map json) => _$$FineTuningJobHyperparametersImplFromJson(json); - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @override @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') @@ -15936,8 +15943,10 @@ abstract class _FineTuningJobHyperparameters @override - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index ede4fed4..95ffa209 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -427,6 +427,8 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt4VisionPreview: 'gpt-4-vision-preview', ChatCompletionModels.gpt4o: 'gpt-4o', ChatCompletionModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ChatCompletionModels.gpt4oMini: 'gpt-4o-mini', + ChatCompletionModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', ChatCompletionModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ChatCompletionModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2275,6 +2277,8 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt4VisionPreview: 'gpt-4-vision-preview', AssistantModels.gpt4o: 'gpt-4o', AssistantModels.gpt4o20240513: 'gpt-4o-2024-05-13', + AssistantModels.gpt4oMini: 'gpt-4o-mini', + AssistantModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', AssistantModels.gpt35Turbo: 'gpt-3.5-turbo', AssistantModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', AssistantModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2935,6 +2939,8 @@ const _$RunModelsEnumMap = { RunModels.gpt4VisionPreview: 'gpt-4-vision-preview', RunModels.gpt4o: 'gpt-4o', RunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + RunModels.gpt4oMini: 'gpt-4o-mini', + RunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', RunModels.gpt35Turbo: 'gpt-3.5-turbo', RunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', RunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -3259,6 +3265,8 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt4VisionPreview: 'gpt-4-vision-preview', ThreadAndRunModels.gpt4o: 'gpt-4o', ThreadAndRunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ThreadAndRunModels.gpt4oMini: 'gpt-4o-mini', + ThreadAndRunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ThreadAndRunModels.gpt35Turbo: 'gpt-3.5-turbo', ThreadAndRunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ThreadAndRunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 141e9071..420c7cf4 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1820,6 +1820,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -1915,6 +1917,7 @@ components: - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarantee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. type: string @@ -2071,7 +2074,7 @@ components: - type: string description: The text contents of the message. - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. items: $ref: "#/components/schemas/ChatCompletionMessageContentPart" minItems: 1 @@ -2918,8 +2921,10 @@ components: n_epochs: title: FineTuningNEpochs description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + manually, we support any number between 1 and 50 epochs. oneOf: - type: string title: FineTuningNEpochsOptions @@ -3523,6 +3528,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4085,6 +4092,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4327,6 +4336,8 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -5872,7 +5883,7 @@ components: propertyName: type AutoChunkingStrategyRequestParam: type: object - description: | + description: | Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. additionalProperties: false @@ -5906,7 +5917,7 @@ components: type: integer minimum: 100 maximum: 4096 - description: | + description: | The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. chunk_overlap_tokens: @@ -5922,13 +5933,13 @@ components: type: object description: The chunking strategy used to chunk the file(s). oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" discriminator: - propertyName: type + propertyName: type OtherChunkingStrategyResponseParam: type: object - description: | + description: | Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. additionalProperties: false diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 0dd10f12..1a91af5d 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -29,6 +29,8 @@ tags: description: Create large batches of API requests to run asynchronously. - name: Files description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Uploads + description: Use Uploads to upload large files in multiple parts. - name: Images description: Given a prompt and/or an input image, the model will generate a new image. - name: Models @@ -117,7 +119,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -214,7 +216,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -289,13 +291,13 @@ paths: main(); response: &chat_completion_chunk_example | - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} .... - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: curl: | @@ -412,7 +414,7 @@ paths: "id": "chatcmpl-abc123", "object": "chat.completion", "created": 1699896916, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -494,7 +496,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1702685778, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -1721,6 +1723,218 @@ paths: } main(); + /uploads: + post: + operationId: createUpload + tags: + - Uploads + summary: | + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. + + Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: + - [Assistants](/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Create upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `pending`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "purpose": "fine-tune", + "filename": "training_examples.jsonl", + "bytes": 2147483648, + "mime_type": "text/jsonl" + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "pending", + "expires_at": 1719127296 + } + + /uploads/{upload_id}/parts: + post: + operationId: addUploadPart + tags: + - Uploads + summary: | + Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/AddUploadPartRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/UploadPart" + x-oaiMeta: + name: Add upload part + group: uploads + returns: The upload [Part](/docs/api-reference/uploads/part-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/parts + -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." + response: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719185911, + "upload_id": "upload_abc123" + } + + /uploads/{upload_id}/complete: + post: + operationId: completeUpload + tags: + - Uploads + summary: | + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part IDs. + + The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CompleteUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Complete upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/complete + -d '{ + "part_ids": ["part_def456", "part_ghi789"] + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + + /uploads/{upload_id}/cancel: + post: + operationId: cancelUpload + tags: + - Uploads + summary: | + Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Cancel upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/cancel + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "cancelled", + "expires_at": 1719127296 + } /fine_tuning/jobs: post: @@ -2608,7 +2822,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: &pagination_after_param_description | @@ -3457,7 +3671,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -3680,7 +3894,7 @@ paths: name: Retrieve message group: threads beta: true - returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + returns: The [message](/docs/api-reference/messages/object) object matching the specified ID. examples: request: curl: | @@ -3768,7 +3982,7 @@ paths: name: Modify message group: threads beta: true - returns: The modified [message](/docs/api-reference/threads/messages/object) object. + returns: The modified [message](/docs/api-reference/messages/object) object. examples: request: curl: | @@ -4300,7 +4514,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5417,7 +5631,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5623,7 +5837,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -5998,7 +6212,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -6014,7 +6228,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -6560,7 +6774,7 @@ paths: schema: type: string default: desc - enum: [ "asc", "desc" ] + enum: ["asc", "desc"] - name: after in: query description: *pagination_after_param_description @@ -6576,7 +6790,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: [ "in_progress", "completed", "failed", "cancelled" ] + enum: ["in_progress", "completed", "failed", "cancelled"] responses: "200": description: OK @@ -6676,7 +6890,7 @@ paths: description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: [ "24h" ] + enum: ["24h"] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -7053,7 +7267,7 @@ components: properties: object: type: string - enum: [ list ] + enum: [list] data: type: array items: @@ -7084,7 +7298,7 @@ components: anyOf: - type: string - type: string - enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] x-oaiTypeLabel: string prompt: description: &completions_prompt_description | @@ -7296,7 +7510,7 @@ components: The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. - enum: [ "stop", "length", "content_filter" ] + enum: ["stop", "length", "content_filter"] index: type: integer logprobs: @@ -7338,7 +7552,7 @@ components: object: type: string description: The object type, which is always "text_completion" - enum: [ text_completion ] + enum: [text_completion] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -7383,7 +7597,7 @@ components: properties: type: type: string - enum: [ "image_url" ] + enum: ["image_url"] description: The type of the content part. image_url: type: object @@ -7395,7 +7609,7 @@ components: detail: type: string description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - url @@ -7409,7 +7623,7 @@ components: properties: type: type: string - enum: [ "text" ] + enum: ["text"] description: The type of the content part. text: type: string @@ -7436,7 +7650,7 @@ components: type: string role: type: string - enum: [ "system" ] + enum: ["system"] description: The role of the messages author, in this case `system`. name: type: string @@ -7457,7 +7671,7 @@ components: description: The text contents of the message. title: Text content - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. title: Array of content parts items: $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" @@ -7465,7 +7679,7 @@ components: x-oaiExpandable: true role: type: string - enum: [ "user" ] + enum: ["user"] description: The role of the messages author, in this case `user`. name: type: string @@ -7485,7 +7699,7 @@ components: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. role: type: string - enum: [ "assistant" ] + enum: ["assistant"] description: The role of the messages author, in this case `assistant`. name: type: string @@ -7518,7 +7732,7 @@ components: properties: weight: type: integer - enum: [ 0, 1 ] + enum: [0, 1] description: "Controls whether the assistant message is trained against (0 or 1)" - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" required: @@ -7530,7 +7744,7 @@ components: properties: role: type: string - enum: [ "tool" ] + enum: ["tool"] description: The role of the messages author, in this case `tool`. content: type: string @@ -7550,7 +7764,7 @@ components: properties: role: type: string - enum: [ "function" ] + enum: ["function"] description: The role of the messages author, in this case `function`. content: nullable: true @@ -7600,7 +7814,7 @@ components: properties: type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: $ref: "#/components/schemas/FunctionObject" @@ -7637,7 +7851,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" x-oaiExpandable: true @@ -7647,7 +7861,7 @@ components: properties: type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7681,7 +7895,7 @@ components: description: The ID of the tool call. type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7711,7 +7925,7 @@ components: description: The ID of the tool call. type: type: string - enum: [ "function" ] + enum: ["function"] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7760,7 +7974,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: type: string - enum: [ "assistant" ] + enum: ["assistant"] description: The role of the author of this message. function_call: type: object @@ -7805,7 +8019,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" role: type: string - enum: [ "system", "user", "assistant", "tool" ] + enum: ["system", "user", "assistant", "tool"] description: The role of the author of this message. CreateChatCompletionRequest: @@ -7827,6 +8041,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -7910,7 +8126,7 @@ components: properties: type: type: string - enum: [ "text", "json_object" ] + enum: ["text", "json_object"] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -7930,10 +8146,11 @@ components: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. type: string - enum: [ "auto", "default" ] + enum: ["auto", "default"] nullable: true default: null stop: @@ -8001,7 +8218,7 @@ components: description: > `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - enum: [ none, auto ] + enum: [none, auto] - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" x-oaiExpandable: true functions: @@ -8080,7 +8297,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: [ "scale", "default" ] + enum: ["scale", "default"] example: "scale" nullable: true system_fingerprint: @@ -8092,7 +8309,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] + enum: [chat.completion] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8130,7 +8347,7 @@ components: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. enum: - [ "stop", "length", "function_call", "content_filter" ] + ["stop", "length", "function_call", "content_filter"] index: type: integer description: The index of the choice in the list of choices. @@ -8151,7 +8368,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] + enum: [chat.completion] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8210,7 +8427,7 @@ components: type: boolean object: type: string - enum: [ list ] + enum: [list] required: - object - data @@ -8262,7 +8479,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: [ "scale", "default" ] + enum: ["scale", "default"] example: "scale" nullable: true system_fingerprint: @@ -8273,7 +8490,7 @@ components: object: type: string description: The object type, which is always `chat.completion.chunk`. - enum: [ chat.completion.chunk ] + enum: [chat.completion.chunk] usage: type: object description: | @@ -8323,7 +8540,7 @@ components: anyOf: - type: string - type: string - enum: [ "dall-e-2", "dall-e-3" ] + enum: ["dall-e-2", "dall-e-3"] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-3" @@ -8339,27 +8556,27 @@ components: description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: type: string - enum: [ "standard", "hd" ] + enum: ["standard", "hd"] default: "standard" example: "standard" description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: &images_response_format type: string - enum: [ "url", "b64_json" ] + enum: ["url", "b64_json"] default: "url" example: "url" nullable: true description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string - enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] default: "1024x1024" example: "1024x1024" nullable: true description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: type: string - enum: [ "vivid", "natural" ] + enum: ["vivid", "natural"] default: "vivid" example: "vivid" nullable: true @@ -8420,7 +8637,7 @@ components: anyOf: - type: string - type: string - enum: [ "dall-e-2" ] + enum: ["dall-e-2"] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8436,7 +8653,7 @@ components: description: The number of images to generate. Must be between 1 and 10. size: &dalle2_images_size type: string - enum: [ "256x256", "512x512", "1024x1024" ] + enum: ["256x256", "512x512", "1024x1024"] default: "1024x1024" example: "1024x1024" nullable: true @@ -8458,7 +8675,7 @@ components: anyOf: - type: string - type: string - enum: [ "dall-e-2" ] + enum: ["dall-e-2"] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8496,7 +8713,7 @@ components: anyOf: - type: string - type: string - enum: [ "text-moderation-latest", "text-moderation-stable" ] + enum: ["text-moderation-latest", "text-moderation-stable"] x-oaiTypeLabel: string required: - input @@ -8639,7 +8856,7 @@ components: $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [ list ] + enum: [list] required: - object - data @@ -8659,7 +8876,7 @@ components: Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] + enum: ["assistants", "batch", "fine-tune", "vision"] required: - file - purpose @@ -8671,7 +8888,7 @@ components: type: string object: type: string - enum: [ file ] + enum: [file] deleted: type: boolean required: @@ -8679,6 +8896,70 @@ components: - object - deleted + CreateUploadRequest: + type: object + additionalProperties: false + properties: + filename: + description: | + The name of the file to upload. + type: string + purpose: + description: | + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + type: string + enum: ["assistants", "batch", "fine-tune", "vision"] + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: | + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + type: string + required: + - filename + - purpose + - bytes + - mime_type + + AddUploadPartRequest: + type: object + additionalProperties: false + properties: + data: + description: | + The chunk of bytes for this Part. + type: string + format: binary + required: + - data + + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: + type: array + description: | + The ordered list of Part IDs. + items: + type: string + md5: + description: | + The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. + type: string + required: + - part_ids + + CancelUploadRequest: + type: object + additionalProperties: false + CreateFineTuningJobRequest: type: object properties: @@ -8690,7 +8971,7 @@ components: anyOf: - type: string - type: string - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] x-oaiTypeLabel: string training_file: description: | @@ -8715,7 +8996,7 @@ components: are updated less frequently, but with lower variance. oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: integer minimum: 1 maximum: 256 @@ -8726,7 +9007,7 @@ components: overfitting. oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: number minimum: 0 exclusiveMinimum: true @@ -8737,7 +9018,7 @@ components: through the training dataset. oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: integer minimum: 1 maximum: 50 @@ -8782,7 +9063,7 @@ components: The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. oneOf: - type: string - enum: [ wandb ] + enum: [wandb] wandb: type: object description: | @@ -8839,7 +9120,7 @@ components: $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - enum: [ list ] + enum: [list] required: - object - data @@ -8853,7 +9134,7 @@ components: $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [ list ] + enum: [list] first_id: type: string nullable: true @@ -8928,7 +9209,7 @@ components: example: "float" default: "float" type: string - enum: [ "float", "base64" ] + enum: ["float", "base64"] dimensions: description: | The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -8953,7 +9234,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [ list ] + enum: [list] usage: type: object description: The usage information for the request. @@ -8990,7 +9271,7 @@ components: anyOf: - type: string - type: string - enum: [ "whisper-1" ] + enum: ["whisper-1"] x-oaiTypeLabel: string language: description: | @@ -9025,7 +9306,7 @@ components: enum: - word - segment - default: [ segment ] + default: [segment] required: - file - model @@ -9112,7 +9393,7 @@ components: type: number format: float description: End time of the word in seconds. - required: [ word, start, end ] + required: [word, start, end] CreateTranscriptionResponseVerboseJson: type: object @@ -9137,7 +9418,7 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + required: [language, duration, text] x-oaiMeta: name: The transcription object (Verbose JSON) group: audio @@ -9160,7 +9441,7 @@ components: anyOf: - type: string - type: string - enum: [ "whisper-1" ] + enum: ["whisper-1"] x-oaiTypeLabel: string prompt: description: | @@ -9206,7 +9487,7 @@ components: description: Segments of the translated text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + required: [language, duration, text] CreateSpeechRequest: type: object @@ -9218,7 +9499,7 @@ components: anyOf: - type: string - type: string - enum: [ "tts-1", "tts-1-hd" ] + enum: ["tts-1", "tts-1-hd"] x-oaiTypeLabel: string input: type: string @@ -9227,12 +9508,12 @@ components: voice: description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] response_format: description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] + enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -9257,7 +9538,7 @@ components: object: type: string description: The object type, which is always "model". - enum: [ model ] + enum: [model] owned_by: type: string description: The organization that owns the model. @@ -9289,7 +9570,7 @@ components: object: type: string description: The object type, which is always `file`. - enum: [ "file" ] + enum: ["file"] purpose: type: string description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. @@ -9307,7 +9588,7 @@ components: type: string deprecated: true description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: [ "uploaded", "processed", "error" ] + enum: ["uploaded", "processed", "error"] status_details: type: string deprecated: true @@ -9331,6 +9612,105 @@ components: "filename": "salesOverview.pdf", "purpose": "assistants", } + Upload: + type: object + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + description: The status of the Upload. + enum: ["pending", "completed", "cancelled", "expired"] + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + description: The object type, which is always "upload". + enum: [upload] + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. + required: + - bytes + - created_at + - expires_at + - filename + - id + - purpose + - status + - step_number + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: + type: object + title: UploadPart + description: | + The upload Part represents a chunk of bytes we can add to an Upload object. + properties: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: ['upload.part'] + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } Embedding: type: object description: | @@ -9348,7 +9728,7 @@ components: object: type: string description: The object type, which is always "embedding". - enum: [ embedding ] + enum: [embedding] required: - index - object @@ -9413,15 +9793,15 @@ components: n_epochs: oneOf: - type: string - enum: [ auto ] + enum: [auto] - type: integer minimum: 1 maximum: 50 default: auto description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. required: - n_epochs model: @@ -9430,7 +9810,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job". - enum: [ fine_tuning.job ] + enum: [fine_tuning.job] organization_id: type: string description: The organization that owns the fine-tuning job. @@ -9509,7 +9889,7 @@ components: type: type: string description: "The type of the integration being enabled for the fine-tuning job" - enum: [ "wandb" ] + enum: ["wandb"] wandb: type: object description: | @@ -9554,12 +9934,12 @@ components: type: integer level: type: string - enum: [ "info", "warn", "error" ] + enum: ["info", "warn", "error"] message: type: string object: type: string - enum: [ fine_tuning.job.event ] + enum: [fine_tuning.job.event] required: - id - object @@ -9619,7 +9999,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [ fine_tuning.job.checkpoint ] + enum: [fine_tuning.job.checkpoint] required: - created_at - fine_tuning_job_id @@ -9808,7 +10188,7 @@ components: - type: string description: > `auto` is the default value - enum: [ none, auto ] + enum: [none, auto] - $ref: "#/components/schemas/AssistantsApiResponseFormat" x-oaiExpandable: true @@ -9819,7 +10199,7 @@ components: properties: type: type: string - enum: [ "text", "json_object" ] + enum: ["text", "json_object"] example: "json_object" default: "text" description: Must be one of `text` or `json_object`. @@ -9835,7 +10215,7 @@ components: object: description: The object type, which is always `assistant`. type: string - enum: [ assistant ] + enum: [assistant] created_at: description: The Unix timestamp (in seconds) for when the assistant was created. type: integer @@ -9863,7 +10243,7 @@ components: tools: description: &assistant_tools_param_description | A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [ ] + default: [] type: array maxItems: 128 items: @@ -9884,7 +10264,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -9957,6 +10337,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -9994,7 +10376,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [ ] + default: [] type: array maxItems: 128 items: @@ -10015,7 +10397,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -10057,7 +10439,7 @@ components: type: type: string description: Always `auto`. - enum: [ "auto" ] + enum: ["auto"] required: - type - type: object @@ -10067,7 +10449,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: type: object additionalProperties: false @@ -10096,8 +10478,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] + - required: [vector_store_ids] + - required: [vector_stores] nullable: true metadata: description: *metadata_description @@ -10155,7 +10537,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [ ] + default: [] type: array maxItems: 128 items: @@ -10176,7 +10558,7 @@ components: type: array description: | Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -10228,7 +10610,7 @@ components: type: boolean object: type: string - enum: [ assistant.deleted ] + enum: [assistant.deleted] required: - id - object @@ -10271,7 +10653,7 @@ components: type: type: string description: "The type of tool being defined: `code_interpreter`" - enum: [ "code_interpreter" ] + enum: ["code_interpreter"] required: - type @@ -10282,7 +10664,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] + enum: ["file_search"] file_search: type: object description: Overrides for the file search tool. @@ -10305,7 +10687,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] + enum: ["file_search"] required: - type @@ -10316,7 +10698,7 @@ components: type: type: string description: "The type of tool being defined: `function`" - enum: [ "function" ] + enum: ["function"] function: $ref: "#/components/schemas/FunctionObject" required: @@ -10331,7 +10713,7 @@ components: type: type: string description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: [ "auto", "last_messages" ] + enum: ["auto", "last_messages"] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -10354,7 +10736,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] + enum: [none, auto, required] - $ref: "#/components/schemas/AssistantsNamedToolChoice" x-oaiExpandable: true @@ -10364,7 +10746,7 @@ components: properties: type: type: string - enum: [ "function", "code_interpreter", "file_search" ] + enum: ["function", "code_interpreter", "file_search"] description: The type of the tool. If type is `function`, the function name must be set function: type: object @@ -10388,7 +10770,7 @@ components: object: description: The object type, which is always `thread.run`. type: string - enum: [ "thread.run" ] + enum: ["thread.run"] created_at: description: The Unix timestamp (in seconds) for when the run was created. type: integer @@ -10421,7 +10803,7 @@ components: type: description: For now, this is always `submit_tool_outputs`. type: string - enum: [ "submit_tool_outputs" ] + enum: ["submit_tool_outputs"] submit_tool_outputs: type: object description: Details on the tool outputs needed for this run to continue. @@ -10445,7 +10827,7 @@ components: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. enum: - [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. @@ -10480,7 +10862,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: [ "max_completion_tokens", "max_prompt_tokens" ] + enum: ["max_completion_tokens", "max_prompt_tokens"] model: description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string @@ -10489,7 +10871,7 @@ components: type: string tools: description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [ ] + default: [] type: array maxItems: 20 items: @@ -10618,6 +11000,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -10787,7 +11171,7 @@ components: type: type: string description: The type of tool call the output is required for. For now, this is always `function`. - enum: [ "function" ] + enum: ["function"] function: type: object description: The function definition. @@ -10826,6 +11210,8 @@ components: [ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -10873,7 +11259,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -10952,7 +11338,7 @@ components: object: description: The object type, which is always `thread`. type: string - enum: [ "thread" ] + enum: ["thread"] created_at: description: The Unix timestamp (in seconds) for when the thread was created. type: integer @@ -10968,7 +11354,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -11026,7 +11412,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -11068,7 +11454,7 @@ components: type: type: string description: Always `auto`. - enum: [ "auto" ] + enum: ["auto"] required: - type - type: object @@ -11078,7 +11464,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: type: object additionalProperties: false @@ -11108,8 +11494,8 @@ components: x-oaiTypeLabel: map x-oaiExpandable: true oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] + - required: [vector_store_ids] + - required: [vector_stores] nullable: true metadata: description: *metadata_description @@ -11133,7 +11519,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] + default: [] maxItems: 20 items: type: string @@ -11163,7 +11549,7 @@ components: type: boolean object: type: string - enum: [ thread.deleted ] + enum: [thread.deleted] required: - id - object @@ -11205,7 +11591,7 @@ components: object: description: The object type, which is always `thread.message`. type: string - enum: [ "thread.message" ] + enum: ["thread.message"] created_at: description: The Unix timestamp (in seconds) for when the message was created. type: integer @@ -11215,7 +11601,7 @@ components: status: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: [ "in_progress", "incomplete", "completed" ] + enum: ["in_progress", "incomplete", "completed"] incomplete_details: description: On an incomplete message, details about why the message is incomplete. type: object @@ -11245,7 +11631,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: [ "user", "assistant" ] + enum: ["user", "assistant"] content: description: The content of the message in array of text and/or images. type: array @@ -11338,7 +11724,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: [ "thread.message.delta" ] + enum: ["thread.message.delta"] delta: description: The delta containing the fields that have changed on the Message. type: object @@ -11346,7 +11732,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: [ "user", "assistant" ] + enum: ["user", "assistant"] content: description: The content of the message in array of text and/or images. type: array @@ -11387,7 +11773,7 @@ components: properties: role: type: string - enum: [ "user", "assistant" ] + enum: ["user", "assistant"] description: | The role of the entity that is creating the message. Allowed values include: - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. @@ -11454,7 +11840,7 @@ components: type: boolean object: type: string - enum: [ thread.message.deleted ] + enum: [thread.message.deleted] required: - id - object @@ -11493,7 +11879,7 @@ components: type: description: Always `image_file`. type: string - enum: [ "image_file" ] + enum: ["image_file"] image_file: type: object properties: @@ -11503,7 +11889,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - file_id @@ -11522,7 +11908,7 @@ components: type: description: Always `image_file`. type: string - enum: [ "image_file" ] + enum: ["image_file"] image_file: type: object properties: @@ -11532,7 +11918,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - index @@ -11545,7 +11931,7 @@ components: properties: type: type: string - enum: [ "image_url" ] + enum: ["image_url"] description: The type of the content part. image_url: type: object @@ -11557,7 +11943,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - url @@ -11576,7 +11962,7 @@ components: type: description: Always `image_url`. type: string - enum: [ "image_url" ] + enum: ["image_url"] image_url: type: object properties: @@ -11586,7 +11972,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] + enum: ["auto", "low", "high"] default: "auto" required: - index @@ -11600,7 +11986,7 @@ components: type: description: Always `text`. type: string - enum: [ "text" ] + enum: ["text"] text: type: object properties: @@ -11629,7 +12015,7 @@ components: type: description: Always `text`. type: string - enum: [ "text" ] + enum: ["text"] text: type: string description: Text content to be sent to the model @@ -11645,7 +12031,7 @@ components: type: description: Always `file_citation`. type: string - enum: [ "file_citation" ] + enum: ["file_citation"] text: description: The text in the message content that needs to be replaced. type: string @@ -11678,7 +12064,7 @@ components: type: description: Always `file_path`. type: string - enum: [ "file_path" ] + enum: ["file_path"] text: description: The text in the message content that needs to be replaced. type: string @@ -11714,7 +12100,7 @@ components: type: description: Always `text`. type: string - enum: [ "text" ] + enum: ["text"] text: type: object properties: @@ -11743,7 +12129,7 @@ components: type: description: Always `file_citation`. type: string - enum: [ "file_citation" ] + enum: ["file_citation"] text: description: The text in the message content that needs to be replaced. type: string @@ -11777,7 +12163,7 @@ components: type: description: Always `file_path`. type: string - enum: [ "file_path" ] + enum: ["file_path"] text: description: The text in the message content that needs to be replaced. type: string @@ -11809,7 +12195,7 @@ components: object: description: The object type, which is always `thread.run.step`. type: string - enum: [ "thread.run.step" ] + enum: ["thread.run.step"] created_at: description: The Unix timestamp (in seconds) for when the run step was created. type: integer @@ -11825,11 +12211,11 @@ components: type: description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string - enum: [ "message_creation", "tool_calls" ] + enum: ["message_creation", "tool_calls"] status: description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] step_details: type: object description: The details of the run step. @@ -11845,7 +12231,7 @@ components: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: [ "server_error", "rate_limit_exceeded" ] + enum: ["server_error", "rate_limit_exceeded"] message: type: string description: A human-readable description of the error. @@ -11909,7 +12295,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: [ "thread.run.step.delta" ] + enum: ["thread.run.step.delta"] delta: description: The delta containing the fields that have changed on the run step. type: object @@ -11980,7 +12366,7 @@ components: type: description: Always `message_creation`. type: string - enum: [ "message_creation" ] + enum: ["message_creation"] message_creation: type: object properties: @@ -12001,7 +12387,7 @@ components: type: description: Always `message_creation`. type: string - enum: [ "message_creation" ] + enum: ["message_creation"] message_creation: type: object properties: @@ -12019,7 +12405,7 @@ components: type: description: Always `tool_calls`. type: string - enum: [ "tool_calls" ] + enum: ["tool_calls"] tool_calls: type: array description: | @@ -12042,7 +12428,7 @@ components: type: description: Always `tool_calls`. type: string - enum: [ "tool_calls" ] + enum: ["tool_calls"] tool_calls: type: array description: | @@ -12067,7 +12453,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] + enum: ["code_interpreter"] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12106,7 +12492,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] + enum: ["code_interpreter"] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12135,7 +12521,7 @@ components: type: description: Always `logs`. type: string - enum: [ "logs" ] + enum: ["logs"] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12154,7 +12540,7 @@ components: type: description: Always `logs`. type: string - enum: [ "logs" ] + enum: ["logs"] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12169,7 +12555,7 @@ components: type: description: Always `image`. type: string - enum: [ "image" ] + enum: ["image"] image: type: object properties: @@ -12192,7 +12578,7 @@ components: type: description: Always `image`. type: string - enum: [ "image" ] + enum: ["image"] image: type: object properties: @@ -12213,7 +12599,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] + enum: ["file_search"] file_search: type: object description: For now, this is always going to be an empty object. @@ -12236,7 +12622,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] + enum: ["file_search"] file_search: type: object description: For now, this is always going to be an empty object. @@ -12256,7 +12642,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] + enum: ["function"] function: type: object description: The definition of the function that was called. @@ -12293,7 +12679,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] + enum: ["function"] function: type: object description: The definition of the function that was called. @@ -12320,7 +12706,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: [ "last_active_at" ] + enum: ["last_active_at"] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -12341,7 +12727,7 @@ components: object: description: The object type, which is always `vector_store`. type: string - enum: [ "vector_store" ] + enum: ["vector_store"] created_at: description: The Unix timestamp (in seconds) for when the vector store was created. type: integer @@ -12378,7 +12764,7 @@ components: status: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - enum: [ "expired", "in_progress", "completed" ] + enum: ["expired", "in_progress", "completed"] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -12506,7 +12892,7 @@ components: type: boolean object: type: string - enum: [ vector_store.deleted ] + enum: [vector_store.deleted] required: - id - object @@ -12523,7 +12909,7 @@ components: object: description: The object type, which is always `vector_store.file`. type: string - enum: [ "vector_store.file" ] + enum: ["vector_store.file"] usage_bytes: description: The total vector store usage in bytes. Note that this may be different from the original file size. type: integer @@ -12536,7 +12922,7 @@ components: status: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] + enum: ["in_progress", "completed", "cancelled", "failed"] last_error: type: object description: The last error associated with this vector store file. Will be `null` if there are no errors. @@ -12603,7 +12989,7 @@ components: type: type: string description: Always `other`. - enum: [ "other" ] + enum: ["other"] required: - type @@ -12615,7 +13001,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -12650,7 +13036,7 @@ components: type: type: string description: Always `auto`. - enum: [ "auto" ] + enum: ["auto"] required: - type @@ -12662,7 +13048,7 @@ components: type: type: string description: Always `static`. - enum: [ "static" ] + enum: ["static"] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -12723,7 +13109,7 @@ components: type: boolean object: type: string - enum: [ vector_store.file.deleted ] + enum: [vector_store.file.deleted] required: - id - object @@ -12740,7 +13126,7 @@ components: object: description: The object type, which is always `vector_store.file_batch`. type: string - enum: [ "vector_store.files_batch" ] + enum: ["vector_store.files_batch"] created_at: description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer @@ -12750,7 +13136,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] + enum: ["in_progress", "completed", "cancelled", "failed"] file_counts: type: object properties: @@ -12855,7 +13241,7 @@ components: properties: event: type: string - enum: [ "thread.created" ] + enum: ["thread.created"] data: $ref: "#/components/schemas/ThreadObject" required: @@ -12871,7 +13257,7 @@ components: properties: event: type: string - enum: [ "thread.run.created" ] + enum: ["thread.run.created"] data: $ref: "#/components/schemas/RunObject" required: @@ -12884,7 +13270,7 @@ components: properties: event: type: string - enum: [ "thread.run.queued" ] + enum: ["thread.run.queued"] data: $ref: "#/components/schemas/RunObject" required: @@ -12897,7 +13283,7 @@ components: properties: event: type: string - enum: [ "thread.run.in_progress" ] + enum: ["thread.run.in_progress"] data: $ref: "#/components/schemas/RunObject" required: @@ -12910,7 +13296,7 @@ components: properties: event: type: string - enum: [ "thread.run.requires_action" ] + enum: ["thread.run.requires_action"] data: $ref: "#/components/schemas/RunObject" required: @@ -12923,7 +13309,7 @@ components: properties: event: type: string - enum: [ "thread.run.completed" ] + enum: ["thread.run.completed"] data: $ref: "#/components/schemas/RunObject" required: @@ -12949,7 +13335,7 @@ components: properties: event: type: string - enum: [ "thread.run.failed" ] + enum: ["thread.run.failed"] data: $ref: "#/components/schemas/RunObject" required: @@ -12962,7 +13348,7 @@ components: properties: event: type: string - enum: [ "thread.run.cancelling" ] + enum: ["thread.run.cancelling"] data: $ref: "#/components/schemas/RunObject" required: @@ -12975,7 +13361,7 @@ components: properties: event: type: string - enum: [ "thread.run.cancelled" ] + enum: ["thread.run.cancelled"] data: $ref: "#/components/schemas/RunObject" required: @@ -12988,7 +13374,7 @@ components: properties: event: type: string - enum: [ "thread.run.expired" ] + enum: ["thread.run.expired"] data: $ref: "#/components/schemas/RunObject" required: @@ -13004,7 +13390,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.created" ] + enum: ["thread.run.step.created"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13017,7 +13403,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.in_progress" ] + enum: ["thread.run.step.in_progress"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13030,7 +13416,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.delta" ] + enum: ["thread.run.step.delta"] data: $ref: "#/components/schemas/RunStepDeltaObject" required: @@ -13043,7 +13429,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.completed" ] + enum: ["thread.run.step.completed"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13056,7 +13442,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.failed" ] + enum: ["thread.run.step.failed"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13069,7 +13455,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.cancelled" ] + enum: ["thread.run.step.cancelled"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13082,7 +13468,7 @@ components: properties: event: type: string - enum: [ "thread.run.step.expired" ] + enum: ["thread.run.step.expired"] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13098,7 +13484,7 @@ components: properties: event: type: string - enum: [ "thread.message.created" ] + enum: ["thread.message.created"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13111,7 +13497,7 @@ components: properties: event: type: string - enum: [ "thread.message.in_progress" ] + enum: ["thread.message.in_progress"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13124,7 +13510,7 @@ components: properties: event: type: string - enum: [ "thread.message.delta" ] + enum: ["thread.message.delta"] data: $ref: "#/components/schemas/MessageDeltaObject" required: @@ -13137,7 +13523,7 @@ components: properties: event: type: string - enum: [ "thread.message.completed" ] + enum: ["thread.message.completed"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13150,7 +13536,7 @@ components: properties: event: type: string - enum: [ "thread.message.incomplete" ] + enum: ["thread.message.incomplete"] data: $ref: "#/components/schemas/MessageObject" required: @@ -13165,7 +13551,7 @@ components: properties: event: type: string - enum: [ "error" ] + enum: ["error"] data: $ref: "#/components/schemas/Error" required: @@ -13180,10 +13566,10 @@ components: properties: event: type: string - enum: [ "done" ] + enum: ["done"] data: type: string - enum: [ "[DONE]" ] + enum: ["[DONE]"] required: - event - data @@ -13198,7 +13584,7 @@ components: type: string object: type: string - enum: [ batch ] + enum: [batch] description: The object type, which is always `batch`. endpoint: type: string @@ -13323,7 +13709,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: [ "POST" ] + enum: ["POST"] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -13389,14 +13775,14 @@ components: type: boolean object: type: string - enum: [ list ] + enum: [list] required: - object - data - has_more security: - - ApiKeyAuth: [ ] + - ApiKeyAuth: [] x-oaiMeta: navigationGroups: @@ -13576,6 +13962,30 @@ x-oaiMeta: - type: object key: OpenAIFile path: object + - id: uploads + title: Uploads + description: | + Allows you to upload large files in multiple parts. + navigationGroup: endpoints + sections: + - type: endpoint + key: createUpload + path: create + - type: endpoint + key: addUploadPart + path: add-part + - type: endpoint + key: completeUpload + path: complete + - type: endpoint + key: cancelUpload + path: cancel + - type: object + key: Upload + path: object + - type: object + key: UploadPart + path: part-object - id: images title: Images description: | From 1b6792349be50d47426af447f9fe1351d8d50909 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 16:33:58 +0200 Subject: [PATCH 074/251] refactor: Remove default model from the language model options (#498) --- .../lib/src/chat_models/chat_anthropic.dart | 11 +- .../lib/src/chat_models/mappers.dart | 8 +- .../lib/src/chat_models/types.dart | 61 ++++++-- packages/langchain_anthropic/pubspec.yaml | 1 + .../lib/src/chat_models/types.dart | 3 +- .../lib/src/language_models/base.dart | 31 ---- .../lib/src/language_models/types.dart | 5 + .../langchain_core/lib/src/llms/types.dart | 1 + .../vertex_ai/chat_firebase_vertex_ai.dart | 14 +- .../lib/src/chat_models/vertex_ai/types.dart | 81 +++++++++- .../google_ai/chat_google_generative_ai.dart | 14 +- .../lib/src/chat_models/google_ai/types.dart | 48 +++++- .../chat_models/vertex_ai/chat_vertex_ai.dart | 33 ++-- .../lib/src/chat_models/vertex_ai/types.dart | 52 +++++-- .../lib/src/llms/vertex_ai/types.dart | 51 +++++-- .../lib/src/llms/vertex_ai/vertex_ai.dart | 32 ++-- .../chat_google_generative_ai_test.dart | 14 +- .../lib/src/chat_models/chat_mistralai.dart | 7 +- .../lib/src/chat_models/types.dart | 35 ++++- .../chat_models/chat_ollama/chat_ollama.dart | 5 +- .../src/chat_models/chat_ollama/types.dart | 93 +++++++++++- .../chat_ollama_tools/chat_ollama_tools.dart | 7 +- .../chat_models/chat_ollama_tools/types.dart | 36 +++++ .../langchain_ollama/lib/src/llms/ollama.dart | 7 +- .../langchain_ollama/lib/src/llms/types.dart | 103 ++++++++++++- .../test/chat_models/chat_ollama_test.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 18 ++- .../lib/src/chat_models/types.dart | 141 ++++++++++++------ .../langchain_openai/lib/src/llms/openai.dart | 26 +++- .../langchain_openai/lib/src/llms/types.dart | 123 +++++++-------- .../test/chains/qa_with_sources_test.dart | 1 - .../test/chat_models/anyscale_test.dart | 4 - .../test/chat_models/chat_openai_test.dart | 1 - 33 files changed, 767 insertions(+), 302 deletions(-) diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart index 13a687a3..1c8360d4 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart @@ -154,7 +154,8 @@ class ChatAnthropic extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', + model: defaultModel, + maxTokens: defaultMaxTokens, ), this.encoding = 'cl100k_base', }) : _client = a.AnthropicClient( @@ -177,6 +178,12 @@ class ChatAnthropic extends BaseChatModel { @override String get modelType => 'anthropic-chat'; + /// The default model to use unless another is specified. + static const defaultModel = 'claude-3-5-sonnet-20240620'; + + /// The default max tokens to use unless another is specified. + static const defaultMaxTokens = 1024; + @override Future invoke( final PromptValue input, { @@ -187,7 +194,6 @@ class ChatAnthropic extends BaseChatModel { input.toChatMessages(), options: options, defaultOptions: defaultOptions, - throwNullModelError: throwNullModelError, ), ); return completion.toChatResult(); @@ -205,7 +211,6 @@ class ChatAnthropic extends BaseChatModel { options: options, defaultOptions: defaultOptions, stream: true, - throwNullModelError: throwNullModelError, ), ) .transform(MessageStreamEventTransformer()); diff --git a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart index 002df82c..020ef844 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart @@ -9,6 +9,7 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/tools.dart'; import 'package:rxdart/rxdart.dart' show WhereNotNullExtension; +import 'chat_anthropic.dart'; import 'types.dart'; /// Creates a [CreateMessageRequest] from the given input. @@ -17,7 +18,6 @@ a.CreateMessageRequest createMessageRequest( required final ChatAnthropicOptions? options, required final ChatAnthropicOptions defaultOptions, final bool stream = false, - required Never Function() throwNullModelError, }) { final systemMsg = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString @@ -31,10 +31,12 @@ a.CreateMessageRequest createMessageRequest( return a.CreateMessageRequest( model: a.Model.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? ChatAnthropic.defaultModel, ), messages: messagesDtos, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens ?? 1024, + maxTokens: options?.maxTokens ?? + defaultOptions.maxTokens ?? + ChatAnthropic.defaultMaxTokens, stopSequences: options?.stopSequences ?? defaultOptions.stopSequences, system: systemMsg, temperature: options?.temperature ?? defaultOptions.temperature, diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart index 4374c820..98069444 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/types.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -1,14 +1,28 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_anthropic_options} /// Options to pass into the Anthropic Chat Model. +/// +/// Available models: +/// - `claude-3-5-sonnet-20240620` +/// - `claude-3-haiku-20240307` +/// - `claude-3-opus-20240229` +/// - `claude-3-sonnet-20240229` +/// - `claude-2.0` +/// - `claude-2.1` +/// +/// Mind that the list may be outdated. +/// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. /// {@endtemplate} +@immutable class ChatAnthropicOptions extends ChatModelOptions { /// {@macro chat_anthropic_options} const ChatAnthropicOptions({ - this.model = 'claude-3-5-sonnet-20240620', - this.maxTokens = 1024, + super.model, + this.maxTokens, this.stopSequences, this.temperature, this.topK, @@ -19,20 +33,6 @@ class ChatAnthropicOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'claude-3-5-sonnet-20240620'). - /// - /// Available models: - /// - `claude-3-5-sonnet-20240620` - /// - `claude-3-haiku-20240307` - /// - `claude-3-opus-20240229` - /// - `claude-3-sonnet-20240229` - /// - `claude-2.0` - /// - `claude-2.1` - /// - /// Mind that the list may be outdated. - /// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. - final String? model; - /// The maximum number of tokens to generate before stopping. /// /// Note that our models may stop _before_ reaching this maximum. This parameter @@ -113,4 +113,33 @@ class ChatAnthropicOptions extends ChatModelOptions { concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatAnthropicOptions other) { + return model == other.model && + maxTokens == other.maxTokens && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + temperature == other.temperature && + topK == other.topK && + topP == other.topP && + userId == other.userId && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + maxTokens.hashCode ^ + const ListEquality().hash(stopSequences) ^ + temperature.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + userId.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 33e625f7..180234ac 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -21,6 +21,7 @@ dependencies: http: ^1.1.0 langchain_core: 0.3.3 langchain_tiktoken: ^1.0.1 + meta: ^1.11.0 rxdart: ^0.27.7 dev_dependencies: diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index fa0bc0fc..e9b788c7 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -10,9 +10,10 @@ import '../tools/base.dart'; class ChatModelOptions extends LanguageModelOptions { /// {@macro chat_model_options} const ChatModelOptions({ - super.concurrencyLimit, + super.model, this.tools, this.toolChoice, + super.concurrencyLimit, }); /// A list of tools the model may call. diff --git a/packages/langchain_core/lib/src/language_models/base.dart b/packages/langchain_core/lib/src/language_models/base.dart index 33d3b002..3156cd74 100644 --- a/packages/langchain_core/lib/src/language_models/base.dart +++ b/packages/langchain_core/lib/src/language_models/base.dart @@ -1,5 +1,3 @@ -import 'package:meta/meta.dart'; - import '../langchain/base.dart'; import '../prompts/types.dart'; import 'types.dart'; @@ -58,33 +56,4 @@ abstract class BaseLanguageModel< @override String toString() => modelType; - - /// Throws an error if the model id is not specified. - @protected - Never throwNullModelError() { - throw ArgumentError(''' -Null model in $runtimeType. - -You need to specify the id of model to use either in `$runtimeType.defaultOptions` -or in the options passed when invoking the model. - -Example: -``` -// In defaultOptions -final model = $runtimeType( - defaultOptions: ${runtimeType}Options( - model: 'model-id', - ), -); - -// Or when invoking the model -final res = await model.invoke( - prompt, - options: ${runtimeType}Options( - model: 'model-id', - ), -); -``` -'''); - } } diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index f1475ad2..c2e6df11 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -10,8 +10,13 @@ import '../langchain/types.dart'; abstract class LanguageModelOptions extends BaseLangChainOptions { /// {@macro language_model_options} const LanguageModelOptions({ + this.model, super.concurrencyLimit, }); + + /// ID of the language model to use. + /// Check the provider's documentation for available models. + final String? model; } /// {@template language_model} diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index d6bed6f3..47b98285 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -9,6 +9,7 @@ import '../language_models/types.dart'; class LLMOptions extends LanguageModelOptions { /// {@macro llm_options} const LLMOptions({ + super.model, super.concurrencyLimit, }); } diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 77ce67d6..20b2b520 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -154,7 +154,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// - [ChatFirebaseVertexAI.location] ChatFirebaseVertexAI({ super.defaultOptions = const ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-flash', + model: defaultModel, ), this.app, this.appCheck, @@ -188,15 +188,18 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// A UUID generator. late final Uuid _uuid = const Uuid(); - @override - String get modelType => 'chat-firebase-vertex-ai'; - /// The current model set in [_firebaseClient]; String _currentModel; /// The current system instruction set in [_firebaseClient]; String? _currentSystemInstruction; + @override + String get modelType => 'chat-firebase-vertex-ai'; + + /// The default model to use unless another is specified. + static const defaultModel = 'gemini-1.5-flash'; + @override Future invoke( final PromptValue input, { @@ -329,8 +332,7 @@ class ChatFirebaseVertexAI extends BaseChatModel { final List messages, final ChatFirebaseVertexAIOptions? options, ) { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index d2aee55d..7c92e16c 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -1,12 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_firebase_vertex_ai_options} /// Options to pass into the Vertex AI for Firebase model. +/// +/// You can find a list of available models here: +/// https://firebase.google.com/docs/vertex-ai/gemini-models /// {@endtemplate} +@immutable class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// {@macro chat_firebase_vertex_ai_options} const ChatFirebaseVertexAIOptions({ - this.model = 'gemini-1.5-flash', + super.model, this.topP, this.topK, this.candidateCount, @@ -20,12 +27,6 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// The LLM to use. - /// - /// You can find a list of available models here: - /// https://firebase.google.com/docs/vertex-ai/gemini-models - final String? model; - /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -99,7 +100,11 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { final int? maxOutputTokens, final double? temperature, final List? stopSequences, + final String? responseMimeType, final List? safetySettings, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatFirebaseVertexAIOptions( model: model ?? this.model, @@ -109,9 +114,48 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens, temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, + responseMimeType: responseMimeType ?? this.responseMimeType, safetySettings: safetySettings ?? this.safetySettings, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatFirebaseVertexAIOptions other) { + return model == other.model && + topP == other.topP && + topK == other.topK && + candidateCount == other.candidateCount && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + responseMimeType == other.responseMimeType && + const ListEquality() + .equals(safetySettings, other.safetySettings) && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + candidateCount.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + const ListEquality().hash(stopSequences) ^ + responseMimeType.hashCode ^ + const ListEquality() + .hash(safetySettings) ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_google_generative_ai_safety_setting} @@ -119,6 +163,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// Passing a safety setting for a category changes the allowed probability that /// content is blocked. /// {@endtemplate} +@immutable class ChatFirebaseVertexAISafetySetting { /// {@macro chat_google_generative_ai_safety_setting} const ChatFirebaseVertexAISafetySetting({ @@ -131,6 +176,28 @@ class ChatFirebaseVertexAISafetySetting { /// Controls the probability threshold at which harm is blocked. final ChatFirebaseVertexAISafetySettingThreshold threshold; + + /// Creates a copy of this [ChatFirebaseVertexAISafetySetting] object with + /// the given fields replaced with the new values. + ChatFirebaseVertexAISafetySetting copyWith({ + final ChatFirebaseVertexAISafetySettingCategory? category, + final ChatFirebaseVertexAISafetySettingThreshold? threshold, + }) { + return ChatFirebaseVertexAISafetySetting( + category: category ?? this.category, + threshold: threshold ?? this.threshold, + ); + } + + @override + bool operator ==(covariant final ChatFirebaseVertexAISafetySetting other) { + return category == other.category && threshold == other.threshold; + } + + @override + int get hashCode { + return category.hashCode ^ threshold.hashCode; + } } /// Safety settings categorizes. diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 58934755..0fde4b9f 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -211,7 +211,7 @@ class ChatGoogleGenerativeAI final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-flash', + model: defaultModel, ), }) : _currentModel = defaultOptions.model ?? '', _httpClient = createDefaultHttpClient( @@ -247,15 +247,18 @@ class ChatGoogleGenerativeAI /// Get the API key. String get apiKey => _httpClient.headers['x-goog-api-key'] ?? ''; - @override - String get modelType => 'chat-google-generative-ai'; - /// The current model set in [_googleAiClient]; String _currentModel; /// The current system instruction set in [_googleAiClient]; String? _currentSystemInstruction; + @override + String get modelType => 'chat-google-generative-ai'; + + /// The default model to use unless another is specified. + static const defaultModel = 'gemini-1.5-flash'; + @override Future invoke( final PromptValue input, { @@ -389,8 +392,7 @@ class ChatGoogleGenerativeAI final List messages, final ChatGoogleGenerativeAIOptions? options, ) { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index c86c80a5..8c4bff41 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -1,12 +1,17 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_google_generative_ai_options} /// Options to pass into the Google Generative AI Chat Model. +/// +/// You can find a list of available models [here](https://ai.google.dev/models). /// {@endtemplate} +@immutable class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// {@macro chat_google_generative_ai_options} const ChatGoogleGenerativeAIOptions({ - this.model = 'gemini-1.5-flash', + super.model, this.topP, this.topK, this.candidateCount, @@ -21,11 +26,6 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// The LLM to use. - /// - /// You can find a list of available models here: https://ai.google.dev/models - final String? model; - /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -126,6 +126,9 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { final double? temperature, final List? stopSequences, final List? safetySettings, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatGoogleGenerativeAIOptions( model: model ?? this.model, @@ -136,8 +139,41 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, safetySettings: safetySettings ?? this.safetySettings, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) { + return model == other.model && + topP == other.topP && + topK == other.topK && + candidateCount == other.candidateCount && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + stopSequences == other.stopSequences && + safetySettings == other.safetySettings && + tools == other.tools && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + candidateCount.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + stopSequences.hashCode ^ + safetySettings.hashCode ^ + tools.hashCode ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_google_generative_ai_safety_setting} diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart index 4f668b40..e79f00b4 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart @@ -117,8 +117,8 @@ class ChatVertexAI extends BaseChatModel { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const ChatVertexAIOptions( - publisher: 'google', - model: 'chat-bison', + publisher: defaultPublisher, + model: defaultModel, ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -139,6 +139,12 @@ class ChatVertexAI extends BaseChatModel { @override String get modelType => 'vertex-ai-chat'; + /// The default publisher to use unless another is specified. + static const defaultPublisher = 'google'; + + /// The default model to use unless another is specified. + static const defaultModel = 'chat-bison'; + @override Future invoke( final PromptValue input, { @@ -158,19 +164,15 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final result = await client.chat.predict( context: context, examples: examples, messages: vertexMessages, - publisher: options?.publisher ?? - defaultOptions.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, parameters: VertexAITextChatModelRequestParams( maxOutputTokens: @@ -216,18 +218,15 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final res = await client.chat.countTokens( context: context, examples: examples, messages: vertexMessages, - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index 49316c4e..c0642867 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -1,13 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:meta/meta.dart'; /// {@template chat_vertex_ai_options} /// Options to pass into the Vertex AI Chat Model. +/// +/// You can find a list of available models here: +/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} +@immutable class ChatVertexAIOptions extends ChatModelOptions { /// {@macro chat_vertex_ai_options} const ChatVertexAIOptions({ - this.publisher = 'google', - this.model = 'chat-bison', + this.publisher, + super.model, this.maxOutputTokens, this.temperature, this.topP, @@ -23,17 +29,6 @@ class ChatVertexAIOptions extends ChatModelOptions { /// Use `google` for first-party models. final String? publisher; - /// The text model to use. - /// - /// To use the latest model version, specify the model name without a version - /// number (e.g. `chat-bison`). - /// To use a stable model version, specify the model version number - /// (e.g. `chat-bison@001`). - /// - /// You can find a list of available models here: - /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models - final String? model; - /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -114,6 +109,7 @@ class ChatVertexAIOptions extends ChatModelOptions { final List? stopSequences, final int? candidateCount, final List? examples, + final int? concurrencyLimit, }) { return ChatVertexAIOptions( publisher: publisher ?? this.publisher, @@ -125,6 +121,36 @@ class ChatVertexAIOptions extends ChatModelOptions { stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, examples: examples ?? this.examples, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatVertexAIOptions other) { + return publisher == other.publisher && + model == other.model && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + topP == other.topP && + topK == other.topK && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + candidateCount == other.candidateCount && + const ListEquality().equals(examples, other.examples) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return publisher.hashCode ^ + model.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + const ListEquality().hash(stopSequences) ^ + candidateCount.hashCode ^ + const ListEquality().hash(examples) ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart index bf382c44..f9eee704 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart @@ -1,13 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; +import 'package:meta/meta.dart'; /// {@template vertex_ai_options} /// Options to pass into the Vertex AI LLM. +/// +/// You can find a list of available models here: +/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} +@immutable class VertexAIOptions extends LLMOptions { /// {@macro vertex_ai_options} const VertexAIOptions({ - this.publisher = 'google', - this.model = 'text-bison', + this.publisher, + super.model, this.maxOutputTokens, this.temperature, this.topP, @@ -22,17 +28,6 @@ class VertexAIOptions extends LLMOptions { /// Use `google` for first-party models. final String? publisher; - /// The text model to use. - /// - /// To use the latest model version, specify the model name without a version - /// number (e.g. `text-bison`). - /// To use a stable model version, specify the model version number - /// (e.g. `text-bison@001`). - /// - /// You can find a list of available models here: - /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models - final String? model; - /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -109,6 +104,7 @@ class VertexAIOptions extends LLMOptions { final int? topK, final List? stopSequences, final int? candidateCount, + final int? concurrencyLimit, }) { return VertexAIOptions( publisher: publisher ?? this.publisher, @@ -119,6 +115,35 @@ class VertexAIOptions extends LLMOptions { topK: topK ?? this.topK, stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + @override + bool operator ==(covariant final VertexAIOptions other) { + return publisher == other.publisher && + model == other.model && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + topP == other.topP && + topK == other.topK && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + candidateCount == other.candidateCount && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return publisher.hashCode ^ + model.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + const ListEquality().hash(stopSequences) ^ + candidateCount.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart index a0873fcc..955cc7ca 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart @@ -123,8 +123,8 @@ class VertexAI extends BaseLLM { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const VertexAIOptions( - publisher: 'google', - model: 'text-bison', + publisher: defaultPublisher, + model: defaultModel, ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -145,21 +145,24 @@ class VertexAI extends BaseLLM { @override String get modelType => 'vertex-ai'; + /// The default publisher to use unless another is specified. + static const defaultPublisher = 'google'; + + /// The default model to use unless another is specified. + static const defaultModel = 'text-bison'; + @override Future invoke( final PromptValue input, { final VertexAIOptions? options, }) async { final id = _uuid.v4(); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final result = await client.text.predict( prompt: input.toString(), - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, parameters: VertexAITextModelRequestParams( maxOutputTokens: @@ -191,15 +194,12 @@ class VertexAI extends BaseLLM { final PromptValue promptValue, { final VertexAIOptions? options, }) async { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final res = await client.text.countTokens( prompt: promptValue.toString(), - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart index f6567f6d..6d692977 100644 --- a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart +++ b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatGoogleGenerativeAI tests', () { - const defaultModel = 'gemini-1.5-pro-latest'; + const defaultModel = 'gemini-1.5-pro'; late ChatGoogleGenerativeAI chatModel; @@ -73,7 +73,7 @@ void main() { expect(res.output.content, isNotEmpty); }); - test('Text-and-image input with gemini-pro-vision', () async { + test('Text-and-image input', () async { final res = await chatModel.invoke( PromptValue.chat([ ChatMessage.human( @@ -89,9 +89,6 @@ void main() { ]), ), ]), - options: const ChatGoogleGenerativeAIOptions( - model: 'gemini-pro-vision', - ), ); expect(res.output.content.toLowerCase(), contains('apple')); @@ -122,7 +119,8 @@ void main() { ), ); expect(res.output.content.length, lessThan(20)); - expect(res.finishReason, FinishReason.length); + // It seems the gemini-1.5 doesn't return length reason anymore + // expect(res.finishReason, FinishReason.length); }); test('Test Multi-turn conversations with gemini-pro', () async { @@ -177,7 +175,7 @@ void main() { 'properties': { 'location': { 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', + 'description': 'The city and country, e.g. San Francisco, US', }, 'unit': { 'type': 'string', @@ -196,7 +194,7 @@ void main() { ); final humanMessage = ChatMessage.humanText( - 'What’s the weather like in Boston and Madrid right now in celsius?', + 'What’s the weather like in Boston, US and Madrid, Spain in Celsius?', ); final res1 = await model.invoke(PromptValue.chat([humanMessage])); diff --git a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart index 31bc53aa..70f6bd4b 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart @@ -156,7 +156,7 @@ class ChatMistralAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatMistralAIOptions( - model: 'mistral-small', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = MistralAIClient( @@ -179,6 +179,9 @@ class ChatMistralAI extends BaseChatModel { @override String get modelType => 'chat-mistralai'; + /// The default model to use unless another is specified. + static const defaultModel = 'mistral-small'; + @override Future invoke( final PromptValue input, { @@ -216,7 +219,7 @@ class ChatMistralAI extends BaseChatModel { }) { return ChatCompletionRequest( model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), messages: messages.toChatCompletionMessages(), temperature: options?.temperature ?? defaultOptions.temperature, diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index 60158ea7..aa2f9537 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -1,12 +1,16 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:meta/meta.dart'; /// {@template chat_mistral_ai_options} /// Options to pass into ChatMistralAI. +/// +/// You can check the list of available models [here](https://docs.mistral.ai/models). /// {@endtemplate} +@immutable class ChatMistralAIOptions extends ChatModelOptions { /// {@macro chat_mistral_ai_options} const ChatMistralAIOptions({ - this.model = 'mistral-small', + super.model, this.temperature, this.topP, this.maxTokens, @@ -15,11 +19,6 @@ class ChatMistralAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// ID of the model to use. You can use the [List Available Models](https://docs.mistral.ai/api#operation/listModels) - /// API to see all of your available models, or see our [Model overview](https://docs.mistral.ai/models) - /// for model descriptions. - final String? model; - /// What sampling temperature to use, between 0.0 and 2.0. Higher values like /// 0.8 will make the output more random, while lower values like 0.2 will /// make it more focused and deterministic. @@ -56,6 +55,7 @@ class ChatMistralAIOptions extends ChatModelOptions { final int? maxTokens, final bool? safePrompt, final int? randomSeed, + final int? concurrencyLimit, }) { return ChatMistralAIOptions( model: model ?? this.model, @@ -64,6 +64,29 @@ class ChatMistralAIOptions extends ChatModelOptions { maxTokens: maxTokens ?? this.maxTokens, safePrompt: safePrompt ?? this.safePrompt, randomSeed: randomSeed ?? this.randomSeed, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatMistralAIOptions other) { + return model == other.model && + temperature == other.temperature && + topP == other.topP && + maxTokens == other.maxTokens && + safePrompt == other.safePrompt && + randomSeed == other.randomSeed && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + maxTokens.hashCode ^ + safePrompt.hashCode ^ + randomSeed.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 7dbed939..2ff391ef 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -178,6 +178,9 @@ class ChatOllama extends BaseChatModel { @override String get modelType => 'chat-ollama'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3'; + @override Future invoke( final PromptValue input, { @@ -218,7 +221,7 @@ class ChatOllama extends BaseChatModel { final ChatOllamaOptions? options, }) { return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, messages: messages.toMessages(), format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 67598acb..971f259c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,15 +1,21 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:meta/meta.dart'; import '../../../langchain_ollama.dart'; import '../../llms/types.dart'; /// {@template chat_ollama_options} /// Options to pass into ChatOllama. +/// +/// For a complete list of supported models and model variants, see the +/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} +@immutable class ChatOllamaOptions extends ChatModelOptions { /// {@macro chat_ollama_options} const ChatOllamaOptions({ - this.model = 'llama3', + super.model, this.format, this.keepAlive, this.numKeep, @@ -44,9 +50,6 @@ class ChatOllamaOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// The model used to generate completions - final String? model; - /// The format to return a response in. Currently the only accepted value is /// json. /// @@ -203,6 +206,7 @@ class ChatOllamaOptions extends ChatModelOptions { ChatOllamaOptions copyWith({ final String? model, final OllamaResponseFormat? format, + final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, @@ -223,7 +227,6 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? numa, final int? numCtx, final int? numBatch, - final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -232,14 +235,13 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, - final bool? embeddingOnly, - final double? ropeFrequencyBase, - final double? ropeFrequencyScale, final int? numThread, + final int? concurrencyLimit, }) { return ChatOllamaOptions( model: model ?? this.model, format: format ?? this.format, + keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, @@ -269,6 +271,81 @@ class ChatOllamaOptions extends ChatModelOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatOllamaOptions other) { + return model == other.model && + format == other.format && + keepAlive == other.keepAlive && + numKeep == other.numKeep && + seed == other.seed && + numPredict == other.numPredict && + topK == other.topK && + topP == other.topP && + tfsZ == other.tfsZ && + typicalP == other.typicalP && + repeatLastN == other.repeatLastN && + temperature == other.temperature && + repeatPenalty == other.repeatPenalty && + presencePenalty == other.presencePenalty && + frequencyPenalty == other.frequencyPenalty && + mirostat == other.mirostat && + mirostatTau == other.mirostatTau && + mirostatEta == other.mirostatEta && + penalizeNewline == other.penalizeNewline && + const ListEquality().equals(stop, other.stop) && + numa == other.numa && + numCtx == other.numCtx && + numBatch == other.numBatch && + numGpu == other.numGpu && + mainGpu == other.mainGpu && + lowVram == other.lowVram && + f16KV == other.f16KV && + logitsAll == other.logitsAll && + vocabOnly == other.vocabOnly && + useMmap == other.useMmap && + useMlock == other.useMlock && + numThread == other.numThread && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + format.hashCode ^ + keepAlive.hashCode ^ + numKeep.hashCode ^ + seed.hashCode ^ + numPredict.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + tfsZ.hashCode ^ + typicalP.hashCode ^ + repeatLastN.hashCode ^ + temperature.hashCode ^ + repeatPenalty.hashCode ^ + presencePenalty.hashCode ^ + frequencyPenalty.hashCode ^ + mirostat.hashCode ^ + mirostatTau.hashCode ^ + mirostatEta.hashCode ^ + penalizeNewline.hashCode ^ + const ListEquality().hash(stop) ^ + numa.hashCode ^ + numCtx.hashCode ^ + numBatch.hashCode ^ + numGpu.hashCode ^ + mainGpu.hashCode ^ + lowVram.hashCode ^ + f16KV.hashCode ^ + logitsAll.hashCode ^ + vocabOnly.hashCode ^ + useMmap.hashCode ^ + useMlock.hashCode ^ + numThread.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart index 889e7c87..677fd308 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart @@ -83,7 +83,7 @@ class ChatOllamaTools extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: 'llama3'), + options: ChatOllamaOptions(model: defaultModel), ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -108,6 +108,9 @@ class ChatOllamaTools extends BaseChatModel { @override String get modelType => 'chat-ollama-tools'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3'; + @override Future invoke( PromptValue input, { @@ -132,7 +135,7 @@ class ChatOllamaTools extends BaseChatModel { final defaultOptions = this.defaultOptions.options ?? const ChatOllamaOptions(); return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, messages: messages.toMessages(), format: ResponseFormat.json, keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart index 9447a51f..f10f1186 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart @@ -1,5 +1,7 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; import '../chat_ollama/types.dart'; import 'chat_ollama_tools.dart'; @@ -9,6 +11,7 @@ export '../chat_ollama/types.dart'; /// {@template chat_ollama_tools_options} /// Options to pass into [ChatOllamaTools]. /// {@endtemplate} +@immutable class ChatOllamaToolsOptions extends ChatModelOptions { /// {@macro chat_ollama_tools_options} const ChatOllamaToolsOptions({ @@ -57,6 +60,39 @@ Example response format: Ensure your response is valid JSON and follows this exact format. '''; + + /// Creates a copy of this [ChatOllamaToolsOptions] object with the given + /// fields replaced with the new values. + ChatOllamaToolsOptions copyWith({ + ChatOllamaOptions? options, + List? tools, + ChatToolChoice? toolChoice, + String? toolsSystemPromptTemplate, + }) { + return ChatOllamaToolsOptions( + options: options ?? this.options, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + toolsSystemPromptTemplate: + toolsSystemPromptTemplate ?? this.toolsSystemPromptTemplate, + ); + } + + @override + bool operator ==(covariant final ChatOllamaToolsOptions other) { + return options == other.options && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + toolsSystemPromptTemplate == other.toolsSystemPromptTemplate; + } + + @override + int get hashCode { + return options.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + toolsSystemPromptTemplate.hashCode; + } } /// Default tool called if model decides no other tools should be called diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index e61c6e27..fd9a8ed4 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -152,7 +152,7 @@ class Ollama extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OllamaOptions( - model: 'llama3', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -177,6 +177,9 @@ class Ollama extends BaseLLM { @override String get modelType => 'ollama'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3'; + @override Future invoke( final PromptValue input, { @@ -210,7 +213,7 @@ class Ollama extends BaseLLM { final OllamaOptions? options, }) { return GenerateCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, prompt: prompt, system: options?.system, template: options?.template, diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index dcbe7669..494e759e 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -1,12 +1,18 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; +import 'package:meta/meta.dart'; /// {@template ollama_options} /// Options to pass into the Ollama LLM. +/// +/// For a complete list of supported models and model variants, see the +/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} +@immutable class OllamaOptions extends LLMOptions { /// {@macro ollama_options} const OllamaOptions({ - this.model = 'llama3', + super.model, this.system, this.template, this.context, @@ -45,9 +51,6 @@ class OllamaOptions extends LLMOptions { super.concurrencyLimit, }); - /// The model used to generate completions - final String? model; - /// The system prompt (Overrides what is defined in the Modelfile). final String? system; @@ -228,6 +231,7 @@ class OllamaOptions extends LLMOptions { final List? context, final OllamaResponseFormat? format, final bool? raw, + final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, @@ -248,7 +252,6 @@ class OllamaOptions extends LLMOptions { final bool? numa, final int? numCtx, final int? numBatch, - final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -257,10 +260,8 @@ class OllamaOptions extends LLMOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, - final bool? embeddingOnly, - final double? ropeFrequencyBase, - final double? ropeFrequencyScale, final int? numThread, + final int? concurrencyLimit, }) { return OllamaOptions( model: model ?? this.model, @@ -269,6 +270,7 @@ class OllamaOptions extends LLMOptions { context: context ?? this.context, format: format ?? this.format, raw: raw ?? this.raw, + keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, @@ -298,8 +300,93 @@ class OllamaOptions extends LLMOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, ); } + + @override + bool operator ==(covariant final OllamaOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + system == other.system && + template == other.template && + const ListEquality().equals(context, other.context) && + format == other.format && + raw == other.raw && + keepAlive == other.keepAlive && + numKeep == other.numKeep && + seed == other.seed && + numPredict == other.numPredict && + topK == other.topK && + topP == other.topP && + tfsZ == other.tfsZ && + typicalP == other.typicalP && + repeatLastN == other.repeatLastN && + temperature == other.temperature && + repeatPenalty == other.repeatPenalty && + presencePenalty == other.presencePenalty && + frequencyPenalty == other.frequencyPenalty && + mirostat == other.mirostat && + mirostatTau == other.mirostatTau && + mirostatEta == other.mirostatEta && + penalizeNewline == other.penalizeNewline && + const ListEquality().equals(stop, other.stop) && + numa == other.numa && + numCtx == other.numCtx && + numBatch == other.numBatch && + numGpu == other.numGpu && + mainGpu == other.mainGpu && + lowVram == other.lowVram && + f16KV == other.f16KV && + logitsAll == other.logitsAll && + vocabOnly == other.vocabOnly && + useMmap == other.useMmap && + useMlock == other.useMlock && + numThread == other.numThread && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + system.hashCode ^ + template.hashCode ^ + const ListEquality().hash(context) ^ + format.hashCode ^ + raw.hashCode ^ + keepAlive.hashCode ^ + numKeep.hashCode ^ + seed.hashCode ^ + numPredict.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + tfsZ.hashCode ^ + typicalP.hashCode ^ + repeatLastN.hashCode ^ + temperature.hashCode ^ + repeatPenalty.hashCode ^ + presencePenalty.hashCode ^ + frequencyPenalty.hashCode ^ + mirostat.hashCode ^ + mirostatTau.hashCode ^ + mirostatEta.hashCode ^ + penalizeNewline.hashCode ^ + const ListEquality().hash(stop) ^ + numa.hashCode ^ + numCtx.hashCode ^ + numBatch.hashCode ^ + numGpu.hashCode ^ + mainGpu.hashCode ^ + lowVram.hashCode ^ + f16KV.hashCode ^ + logitsAll.hashCode ^ + vocabOnly.hashCode ^ + useMmap.hashCode ^ + useMlock.hashCode ^ + numThread.hashCode ^ + concurrencyLimit.hashCode; + } } /// The format to return a response in. diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 0fa46c03..7e001289 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -107,7 +107,7 @@ void main() { ]), ); expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + res.output.content.replaceAll(RegExp(r'[\s\n-]'), ''), contains('123456789'), ); expect(res.finishReason, FinishReason.stop); diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index e218637a..54c955e9 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -25,8 +25,10 @@ import 'types.dart'; /// - [Completions API docs](https://platform.openai.com/docs/api-reference/chat) /// /// You can also use this wrapper to consume OpenAI-compatible APIs like -/// [Anyscale](https://www.anyscale.com), [Together AI](https://www.together.ai), -/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), etc. +/// [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), +/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), +/// [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), +/// [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. /// /// ### Call options /// @@ -172,7 +174,7 @@ class ChatOpenAI extends BaseChatModel { /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). /// - [ChatOpenAI.encoding] - /// - [OpenAI.defaultOptions] + /// - [ChatOpenAI.defaultOptions] /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -192,7 +194,7 @@ class ChatOpenAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOpenAIOptions( - model: 'gpt-3.5-turbo', + model: defaultModel, ), this.encoding, }) : _client = OpenAIClient( @@ -236,6 +238,9 @@ class ChatOpenAI extends BaseChatModel { @override String get modelType => 'openai-chat'; + /// The default model to use unless another is specified. + static const defaultModel = 'gpt-3.5-turbo'; + @override Future invoke( final PromptValue input, { @@ -288,7 +293,7 @@ class ChatOpenAI extends BaseChatModel { return CreateChatCompletionRequest( model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), messages: messagesDtos, tools: toolsDtos, @@ -334,8 +339,7 @@ class ChatOpenAI extends BaseChatModel { final PromptValue promptValue, { final ChatOpenAIOptions? options, }) async { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final tiktoken = _getTiktoken(); final messages = promptValue.toChatMessages(); diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 299902fe..ed53c65c 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -1,13 +1,39 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_openai_options} /// Options to pass into the OpenAI Chat Model. +/// +/// Available [ChatOpenAIOptions.model]s: +/// - `gpt-4` +/// - `gpt-4-32k` +/// - `gpt-4-32k-0314` +/// - `gpt-4-32k-0613` +/// - `gpt-4-0125-preview` +/// - `gpt-4-0314` +/// - `gpt-4-0613` +/// - `gpt-4-1106-preview` +/// - `gpt-4-turbo` +/// - `gpt-4-turbo-2024-04-09` +/// - `gpt-4-turbo-preview` +/// - `gpt-4-vision-preview` +/// - `gpt-4o` +/// - `gpt-4o-2024-05-13` +/// - `gpt-4o-mini` +/// - `gpt-4o-mini-2024-07-18` +/// - `gpt-3.5-turbo` +/// - `gpt-3.5-turbo-16k` +/// +/// Mind that the list may be outdated. +/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} +@immutable class ChatOpenAIOptions extends ChatModelOptions { /// {@macro chat_openai_options} const ChatOpenAIOptions({ - this.model = 'gpt-3.5-turbo', + super.model, this.frequencyPenalty, this.logitBias, this.maxTokens, @@ -18,40 +44,14 @@ class ChatOpenAIOptions extends ChatModelOptions { this.stop, this.temperature, this.topP, + super.tools, + super.toolChoice, this.parallelToolCalls, this.serviceTier, this.user, - super.tools, - super.toolChoice, super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'gpt-3.5-turbo'). - /// - /// Available models: - /// - `gpt-4` - /// - `gpt-4-32k` - /// - `gpt-4-32k-0314` - /// - `gpt-4-32k-0613` - /// - `gpt-4-0125-preview` - /// - `gpt-4-0314` - /// - `gpt-4-0613` - /// - `gpt-4-1106-preview` - /// - `gpt-4-turbo` - /// - `gpt-4-turbo-2024-04-09` - /// - `gpt-4-turbo-preview` - /// - `gpt-4-vision-preview` - /// - `gpt-4o` - /// - `gpt-4o-2024-05-13` - /// - `gpt-4o-mini` - /// - `gpt-4o-mini-2024-07-18` - /// - `gpt-3.5-turbo` - /// - `gpt-3.5-turbo-16k` - /// - /// Mind that the list may be outdated. - /// See https://platform.openai.com/docs/models for the latest list. - final String? model; - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on /// their existing frequency in the text so far, decreasing the model's /// likelihood to repeat the same line verbatim. @@ -145,22 +145,23 @@ class ChatOpenAIOptions extends ChatModelOptions { /// Creates a copy of this [ChatOpenAIOptions] object with the given fields /// replaced with the new values. ChatOpenAIOptions copyWith({ - final String? model, - final double? frequencyPenalty, - final Map? logitBias, - final int? maxTokens, - final int? n, - final double? presencePenalty, - final ChatOpenAIResponseFormat? responseFormat, - final int? seed, - final List? stop, - final double? temperature, - final double? topP, - final bool? parallelToolCalls, - final ChatOpenAIServiceTier? serviceTier, - final String? user, - final List? tools, - final ChatToolChoice? toolChoice, + String? model, + double? frequencyPenalty, + Map? logitBias, + int? maxTokens, + int? n, + double? presencePenalty, + ChatOpenAIResponseFormat? responseFormat, + int? seed, + List? stop, + double? temperature, + double? topP, + List? tools, + ChatToolChoice? toolChoice, + bool? parallelToolCalls, + ChatOpenAIServiceTier? serviceTier, + String? user, + int? concurrencyLimit, }) { return ChatOpenAIOptions( model: model ?? this.model, @@ -174,13 +175,59 @@ class ChatOpenAIOptions extends ChatModelOptions { stop: stop ?? this.stop, temperature: temperature ?? this.temperature, topP: topP ?? this.topP, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls, serviceTier: serviceTier ?? this.serviceTier, user: user ?? this.user, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatOpenAIOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + frequencyPenalty == other.frequencyPenalty && + const MapEquality() + .equals(logitBias, other.logitBias) && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + responseFormat == other.responseFormat && + seed == other.seed && + const ListEquality().equals(stop, other.stop) && + temperature == other.temperature && + topP == other.topP && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + parallelToolCalls == other.parallelToolCalls && + serviceTier == other.serviceTier && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + responseFormat.hashCode ^ + seed.hashCode ^ + const ListEquality().hash(stop) ^ + temperature.hashCode ^ + topP.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + parallelToolCalls.hashCode ^ + serviceTier.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_openai_response_format} diff --git a/packages/langchain_openai/lib/src/llms/openai.dart b/packages/langchain_openai/lib/src/llms/openai.dart index 9471acfc..aed0e9e9 100644 --- a/packages/langchain_openai/lib/src/llms/openai.dart +++ b/packages/langchain_openai/lib/src/llms/openai.dart @@ -1,3 +1,5 @@ +import 'dart:math'; + import 'package:http/http.dart' as http; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/prompts.dart'; @@ -186,8 +188,9 @@ class OpenAI extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OpenAIOptions( - model: 'gpt-3.5-turbo-instruct', - maxTokens: 256, + model: defaultModel, + maxTokens: defaultMaxTokens, + concurrencyLimit: defaultConcurrencyLimit, ), this.encoding, }) : _client = OpenAIClient( @@ -228,6 +231,15 @@ class OpenAI extends BaseLLM { @override String get modelType => 'openai'; + /// The default model to use unless another is specified. + static const defaultModel = 'gpt-3.5-turbo-instruct'; + + /// The default max tokens to use unless another is specified. + static const defaultMaxTokens = 256; + + /// The default concurrency limit to use unless another is specified. + static const defaultConcurrencyLimit = 20; + @override Future invoke( final PromptValue input, { @@ -259,7 +271,8 @@ class OpenAI extends BaseLLM { // Otherwise, we can batch the calls to the API final finalOptions = options?.first ?? defaultOptions; - final concurrencyLimit = finalOptions.concurrencyLimit; + final concurrencyLimit = + min(finalOptions.concurrencyLimit, defaultConcurrencyLimit); var index = 0; final results = []; @@ -302,7 +315,7 @@ class OpenAI extends BaseLLM { }) { return CreateCompletionRequest( model: CompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), prompt: CompletionPrompt.listString(prompts), bestOf: options?.bestOf ?? defaultOptions.bestOf, @@ -310,7 +323,8 @@ class OpenAI extends BaseLLM { options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, logitBias: options?.logitBias ?? defaultOptions.logitBias, logprobs: options?.logprobs ?? defaultOptions.logprobs, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + maxTokens: + options?.maxTokens ?? defaultOptions.maxTokens ?? defaultMaxTokens, n: options?.n ?? defaultOptions.n, presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, @@ -340,7 +354,7 @@ class OpenAI extends BaseLLM { final encoding = this.encoding != null ? getEncoding(this.encoding!) : encodingForModel( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ); return encoding.encode(promptValue.toString()); } diff --git a/packages/langchain_openai/lib/src/llms/types.dart b/packages/langchain_openai/lib/src/llms/types.dart index 6869a4c4..7f8da471 100644 --- a/packages/langchain_openai/lib/src/llms/types.dart +++ b/packages/langchain_openai/lib/src/llms/types.dart @@ -4,17 +4,24 @@ import 'package:meta/meta.dart'; /// {@template openai_options} /// Options to pass into the OpenAI LLM. +/// +/// Available models: +/// - `gpt-3.5-turbo-instruct` +/// - `davinci-002` +/// - `babbage-002` +/// Mind that the list may be outdated. +/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} @immutable class OpenAIOptions extends LLMOptions { /// {@macro openai_options} const OpenAIOptions({ - this.model = 'gpt-3.5-turbo-instruct', + super.model, this.bestOf, this.frequencyPenalty, this.logitBias, this.logprobs, - this.maxTokens = 256, + this.maxTokens, this.n, this.presencePenalty, this.seed, @@ -23,20 +30,9 @@ class OpenAIOptions extends LLMOptions { this.temperature, this.topP, this.user, - super.concurrencyLimit = 20, + super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'gpt-3.5-turbo-instruct'). - /// - /// Available models: - /// - `gpt-3.5-turbo-instruct` - /// - `davinci-002` - /// - `babbage-002` - /// - /// Mind that the list may be outdated. - /// See https://platform.openai.com/docs/models for the latest list. - final String? model; - /// Generates best_of completions server-side and returns the "best" /// (the one with the highest log probability per token). /// @@ -128,20 +124,21 @@ class OpenAIOptions extends LLMOptions { /// Creates a copy of this [OpenAIOptions] object with the given fields /// replaced with the new values. OpenAIOptions copyWith({ - final String? model, - final int? bestOf, - final double? frequencyPenalty, - final Map? logitBias, - final int? logprobs, - final int? maxTokens, - final int? n, - final double? presencePenalty, - final int? seed, - final List? stop, - final String? suffix, - final double? temperature, - final double? topP, - final String? user, + String? model, + int? bestOf, + double? frequencyPenalty, + Map? logitBias, + int? logprobs, + int? maxTokens, + int? n, + double? presencePenalty, + int? seed, + List? stop, + String? suffix, + double? temperature, + double? topP, + String? user, + int? concurrencyLimit, }) { return OpenAIOptions( model: model ?? this.model, @@ -158,42 +155,48 @@ class OpenAIOptions extends LLMOptions { temperature: temperature ?? this.temperature, topP: topP ?? this.topP, user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, ); } @override - bool operator ==(covariant final OpenAIOptions other) => - identical(this, other) || - runtimeType == other.runtimeType && - model == other.model && - bestOf == other.bestOf && - frequencyPenalty == other.frequencyPenalty && - const MapEquality().equals(logitBias, other.logitBias) && - logprobs == other.logprobs && - maxTokens == other.maxTokens && - n == other.n && - presencePenalty == other.presencePenalty && - seed == other.seed && - stop == other.stop && - suffix == other.suffix && - temperature == other.temperature && - topP == other.topP && - user == other.user; + bool operator ==(covariant final OpenAIOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + bestOf == other.bestOf && + frequencyPenalty == other.frequencyPenalty && + const MapEquality() + .equals(logitBias, other.logitBias) && + logprobs == other.logprobs && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + seed == other.seed && + const ListEquality().equals(stop, other.stop) && + suffix == other.suffix && + temperature == other.temperature && + topP == other.topP && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } @override - int get hashCode => - model.hashCode ^ - bestOf.hashCode ^ - frequencyPenalty.hashCode ^ - const MapEquality().hash(logitBias) ^ - logprobs.hashCode ^ - maxTokens.hashCode ^ - n.hashCode ^ - presencePenalty.hashCode ^ - seed.hashCode ^ - stop.hashCode ^ - suffix.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - user.hashCode; + int get hashCode { + return model.hashCode ^ + bestOf.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + logprobs.hashCode ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + seed.hashCode ^ + const ListEquality().hash(stop) ^ + suffix.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index b1080986..a94ea862 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -53,7 +53,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-3.5-turbo-0613', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/anyscale_test.dart b/packages/langchain_openai/test/chat_models/anyscale_test.dart index 1a2fdef1..f0a99e88 100644 --- a/packages/langchain_openai/test/chat_models/anyscale_test.dart +++ b/packages/langchain_openai/test/chat_models/anyscale_test.dart @@ -30,8 +30,6 @@ void main() { 'codellama/CodeLlama-34b-Instruct-hf', 'mistralai/Mistral-7B-Instruct-v0.1', 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'HuggingFaceH4/zephyr-7b-beta', - 'Open-Orca/Mistral-7B-OpenOrca', ]; for (final model in models) { final res = await chatModel.invoke( @@ -67,8 +65,6 @@ void main() { 'codellama/CodeLlama-34b-Instruct-hf', 'mistralai/Mistral-7B-Instruct-v0.1', 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'HuggingFaceH4/zephyr-7b-beta', - 'Open-Orca/Mistral-7B-OpenOrca', ]; for (final model in models) { final stream = chatModel.stream( diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 6268a77b..7c2d95d1 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -208,7 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', From ac94b222203c8e7d838a8afa8efe064c9491d113 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 20 Jul 2024 18:21:45 +0200 Subject: [PATCH 075/251] feat: Implement additive options merging for cascade bind calls (#500) --- .../lib/src/chat_models/types.dart | 39 ++-- .../lib/src/tools/tavily/types.dart | 50 +++++ .../langchain_core/lib/src/chains/types.dart | 2 +- .../lib/src/chat_models/fake.dart | 179 ++++++++++++++++-- .../lib/src/chat_models/types.dart | 3 +- .../lib/src/langchain/types.dart | 2 +- .../lib/src/language_models/types.dart | 2 +- .../langchain_core/lib/src/llms/types.dart | 2 +- .../lib/src/output_parsers/types.dart | 4 +- .../lib/src/retrievers/types.dart | 4 +- .../lib/src/runnables/binding.dart | 4 +- .../lib/src/runnables/types.dart | 27 +++ .../langchain_core/lib/src/tools/types.dart | 4 +- .../test/runnables/binding_test.dart | 26 +++ .../lib/src/chat_models/vertex_ai/types.dart | 23 ++- .../lib/src/chat_models/google_ai/types.dart | 22 ++- .../lib/src/chat_models/vertex_ai/types.dart | 19 +- .../lib/src/llms/vertex_ai/types.dart | 17 +- .../lib/src/chat_models/types.dart | 16 +- .../src/chat_models/chat_ollama/types.dart | 42 +++- .../chat_models/chat_ollama_tools/types.dart | 25 ++- .../langchain_ollama/lib/src/llms/types.dart | 46 ++++- .../lib/src/chat_models/types.dart | 60 ++++-- .../langchain_openai/lib/src/llms/types.dart | 54 ++++-- .../langchain_openai/lib/src/tools/types.dart | 59 ++++++ .../test/chat_models/chat_openai_test.dart | 140 ++++++++++---- 26 files changed, 733 insertions(+), 138 deletions(-) diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart index 98069444..f91abdb3 100644 --- a/packages/langchain_anthropic/lib/src/chat_models/types.dart +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -86,19 +86,18 @@ class ChatAnthropicOptions extends ChatModelOptions { /// name, email address, or phone number. final String? userId; - /// Creates a copy of this [ChatAnthropicOptions] object with the given fields - /// replaced with the new values. + @override ChatAnthropicOptions copyWith({ - String? model, - int? maxTokens, - List? stopSequences, - double? temperature, - int? topK, - double? topP, - String? userId, - List? tools, - ChatToolChoice? toolChoice, - int? concurrencyLimit, + final String? model, + final int? maxTokens, + final List? stopSequences, + final double? temperature, + final int? topK, + final double? topP, + final String? userId, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatAnthropicOptions( model: model ?? this.model, @@ -114,6 +113,22 @@ class ChatAnthropicOptions extends ChatModelOptions { ); } + @override + ChatAnthropicOptions merge(covariant final ChatAnthropicOptions? other) { + return copyWith( + model: other?.model, + maxTokens: other?.maxTokens, + stopSequences: other?.stopSequences, + temperature: other?.temperature, + topK: other?.topK, + topP: other?.topP, + userId: other?.userId, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatAnthropicOptions other) { return model == other.model && diff --git a/packages/langchain_community/lib/src/tools/tavily/types.dart b/packages/langchain_community/lib/src/tools/tavily/types.dart index d6dc9134..872723cf 100644 --- a/packages/langchain_community/lib/src/tools/tavily/types.dart +++ b/packages/langchain_community/lib/src/tools/tavily/types.dart @@ -108,6 +108,7 @@ class TavilySearchResultsToolOptions extends ToolOptions { /// {@template tavily_answer_tool_options} /// Generation options to pass into the [TavilyAnswerTool]. /// {@endtemplate} +@immutable class TavilyAnswerToolOptions extends ToolOptions { /// {@macro tavily_answer_tool_options} const TavilyAnswerToolOptions({ @@ -115,6 +116,7 @@ class TavilyAnswerToolOptions extends ToolOptions { this.searchDepth = TavilySearchDepth.basic, this.includeDomains, this.excludeDomains, + super.concurrencyLimit, }); /// The number of maximum search results to return. @@ -128,4 +130,52 @@ class TavilyAnswerToolOptions extends ToolOptions { /// A list of domains to specifically exclude from the search results. final List? excludeDomains; + + @override + TavilyAnswerToolOptions copyWith({ + final int? maxResults, + final TavilySearchDepth? searchDepth, + final List? includeDomains, + final List? excludeDomains, + final int? concurrencyLimit, + }) { + return TavilyAnswerToolOptions( + maxResults: maxResults ?? this.maxResults, + searchDepth: searchDepth ?? this.searchDepth, + includeDomains: includeDomains ?? this.includeDomains, + excludeDomains: excludeDomains ?? this.excludeDomains, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + TavilyAnswerToolOptions merge( + covariant final TavilyAnswerToolOptions? other, + ) { + return copyWith( + maxResults: other?.maxResults, + searchDepth: other?.searchDepth, + includeDomains: other?.includeDomains, + excludeDomains: other?.excludeDomains, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final TavilyAnswerToolOptions other) { + return maxResults == other.maxResults && + searchDepth == other.searchDepth && + includeDomains == other.includeDomains && + excludeDomains == other.excludeDomains && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return maxResults.hashCode ^ + searchDepth.hashCode ^ + includeDomains.hashCode ^ + excludeDomains.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_core/lib/src/chains/types.dart b/packages/langchain_core/lib/src/chains/types.dart index f677381e..e76d876c 100644 --- a/packages/langchain_core/lib/src/chains/types.dart +++ b/packages/langchain_core/lib/src/chains/types.dart @@ -6,7 +6,7 @@ import '../langchain/types.dart'; typedef ChainValues = Map; /// {@template chain_options} -/// Options to pass to a chain. +/// Options to pass to the chain. /// {@endtemplate} @immutable class ChainOptions extends BaseLangChainOptions { diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index f465223d..79c44d1b 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -1,3 +1,5 @@ +import 'package:collection/collection.dart'; + import '../../language_models.dart'; import '../prompts/types.dart'; import 'base.dart'; @@ -7,11 +9,12 @@ import 'types.dart'; /// Fake Chat Model for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeChatModel extends SimpleChatModel { +class FakeChatModel extends BaseChatModel { /// {@macro fake_list_llm} FakeChatModel({ required this.responses, - }) : super(defaultOptions: const ChatModelOptions()); + super.defaultOptions = const FakeChatModelOptions(), + }); /// Responses to return in order when called. final List responses; @@ -22,17 +25,28 @@ class FakeChatModel extends SimpleChatModel { String get modelType => 'fake-chat-model'; @override - Future callInternal( - final List messages, { - final ChatModelOptions? options, - }) { - return Future.value(responses[_i++ % responses.length]); + Future invoke( + final PromptValue input, { + final FakeChatModelOptions? options, + }) async { + final text = responses[_i++ % responses.length]; + final message = AIChatMessage(content: text); + return ChatResult( + id: '1', + output: message, + finishReason: FinishReason.unspecified, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, + usage: const LanguageModelUsage(), + ); } @override Stream stream( final PromptValue input, { - final ChatModelOptions? options, + final FakeChatModelOptions? options, }) { final res = responses[_i++ % responses.length].split(''); return Stream.fromIterable(res).map( @@ -40,7 +54,10 @@ class FakeChatModel extends SimpleChatModel { id: 'fake-chat-model', output: AIChatMessage(content: char), finishReason: FinishReason.stop, - metadata: const {}, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, usage: const LanguageModelUsage(), streaming: true, ), @@ -60,30 +77,96 @@ class FakeChatModel extends SimpleChatModel { } } -/// {@template fake_echo_llm} +/// {@template fake_chat_model_options} +/// Fake Chat Model Options for testing. +/// {@endtemplate} +class FakeChatModelOptions extends ChatModelOptions { + /// {@macro fake_chat_model_options} + const FakeChatModelOptions({ + super.model, + this.metadata, + super.concurrencyLimit, + }); + + /// Metadata. + final Map? metadata; + + @override + FakeChatModelOptions copyWith({ + final String? model, + final Map? metadata, + final int? concurrencyLimit, + }) { + return FakeChatModelOptions( + model: model ?? this.model, + metadata: metadata ?? this.metadata, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + FakeChatModelOptions merge( + covariant final FakeChatModelOptions? other, + ) { + return copyWith( + model: other?.model, + metadata: other?.metadata, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final FakeChatModelOptions other) { + return model == other.model && + const MapEquality().equals(metadata, other.metadata) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + const MapEquality().hash(metadata) ^ + concurrencyLimit.hashCode; + } +} + +/// {@template fake_echo_chat_model} /// Fake Chat Model for testing. /// It just returns the content of the last message of the prompt /// or streams it char by char. /// {@endtemplate} -class FakeEchoChatModel extends SimpleChatModel { - /// {@macro fake_echo_llm} - const FakeEchoChatModel() : super(defaultOptions: const ChatModelOptions()); +class FakeEchoChatModel extends BaseChatModel { + /// {@macro fake_echo_chat_model} + const FakeEchoChatModel({ + super.defaultOptions = const FakeEchoChatModelOptions(), + }); @override String get modelType => 'fake-echo-chat-model'; @override - Future callInternal( - final List messages, { - final ChatModelOptions? options, - }) { - return Future.value(messages.last.contentAsString); + Future invoke( + final PromptValue input, { + final FakeEchoChatModelOptions? options, + }) async { + final text = input.toChatMessages().last.contentAsString; + final message = AIChatMessage(content: text); + return ChatResult( + id: '1', + output: message, + finishReason: FinishReason.unspecified, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, + usage: const LanguageModelUsage(), + ); } @override Stream stream( final PromptValue input, { - final ChatModelOptions? options, + final FakeEchoChatModelOptions? options, }) { final prompt = input.toChatMessages().first.contentAsString.split(''); return Stream.fromIterable(prompt).map( @@ -91,7 +174,10 @@ class FakeEchoChatModel extends SimpleChatModel { id: 'fake-echo-chat-model', output: AIChatMessage(content: char), finishReason: FinishReason.stop, - metadata: const {}, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, usage: const LanguageModelUsage(), streaming: true, ), @@ -110,3 +196,56 @@ class FakeEchoChatModel extends SimpleChatModel { .toList(growable: false); } } + +/// {@template fake_chat_model_options} +/// Fake Echo Chat Model Options for testing. +/// {@endtemplate} +class FakeEchoChatModelOptions extends ChatModelOptions { + /// {@macro fake_chat_model_options} + const FakeEchoChatModelOptions({ + super.model, + this.metadata, + super.concurrencyLimit, + }); + + /// Metadata. + final Map? metadata; + + @override + FakeEchoChatModelOptions copyWith({ + final String? model, + final Map? metadata, + final int? concurrencyLimit, + }) { + return FakeEchoChatModelOptions( + model: model ?? this.model, + metadata: metadata ?? this.metadata, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + FakeEchoChatModelOptions merge( + covariant final FakeEchoChatModelOptions? other, + ) { + return copyWith( + model: other?.model, + metadata: other?.metadata, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final FakeEchoChatModelOptions other) { + return model == other.model && + const MapEquality().equals(metadata, other.metadata) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + const MapEquality().hash(metadata) ^ + concurrencyLimit.hashCode; + } +} diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index e9b788c7..f9c2aff3 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -7,7 +7,8 @@ import '../tools/base.dart'; /// {@template chat_model_options} /// Generation options to pass into the Chat Model. /// {@endtemplate} -class ChatModelOptions extends LanguageModelOptions { +@immutable +abstract class ChatModelOptions extends LanguageModelOptions { /// {@macro chat_model_options} const ChatModelOptions({ super.model, diff --git a/packages/langchain_core/lib/src/langchain/types.dart b/packages/langchain_core/lib/src/langchain/types.dart index 8dabca52..091429d6 100644 --- a/packages/langchain_core/lib/src/langchain/types.dart +++ b/packages/langchain_core/lib/src/langchain/types.dart @@ -3,7 +3,7 @@ import 'package:meta/meta.dart'; import '../runnables/types.dart'; /// {@template base_lang_chain_options} -/// Base class for LangChain components' options. +/// Base options class for LangChain components. /// {@endtemplate} @immutable class BaseLangChainOptions extends RunnableOptions { diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index c2e6df11..3b52ee63 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -4,7 +4,7 @@ import 'package:meta/meta.dart'; import '../langchain/types.dart'; /// {@template language_model_options} -/// Generation options to pass into the language model. +/// Options to pass into the language model. /// {@endtemplate} @immutable abstract class LanguageModelOptions extends BaseLangChainOptions { diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index 47b98285..02a506de 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -3,7 +3,7 @@ import 'package:meta/meta.dart'; import '../language_models/types.dart'; /// {@template llm_options} -/// Generation options to pass into the LLM. +/// Options to pass into the LLM. /// {@endtemplate} @immutable class LLMOptions extends LanguageModelOptions { diff --git a/packages/langchain_core/lib/src/output_parsers/types.dart b/packages/langchain_core/lib/src/output_parsers/types.dart index 460840fa..9e8906b7 100644 --- a/packages/langchain_core/lib/src/output_parsers/types.dart +++ b/packages/langchain_core/lib/src/output_parsers/types.dart @@ -60,7 +60,9 @@ class ParsedToolCall { } @override - int get hashCode => id.hashCode ^ name.hashCode ^ arguments.hashCode; + int get hashCode { + return id.hashCode ^ name.hashCode ^ arguments.hashCode; + } @override String toString() { diff --git a/packages/langchain_core/lib/src/retrievers/types.dart b/packages/langchain_core/lib/src/retrievers/types.dart index 4ed82147..a80412e2 100644 --- a/packages/langchain_core/lib/src/retrievers/types.dart +++ b/packages/langchain_core/lib/src/retrievers/types.dart @@ -9,7 +9,9 @@ import '../vector_stores/types.dart'; @immutable class RetrieverOptions extends BaseLangChainOptions { /// {@macro retriever_options} - const RetrieverOptions(); + const RetrieverOptions({ + super.concurrencyLimit, + }); } /// {@template vector_store_retriever_options} diff --git a/packages/langchain_core/lib/src/runnables/binding.dart b/packages/langchain_core/lib/src/runnables/binding.dart index a1b9f907..1bd1bee4 100644 --- a/packages/langchain_core/lib/src/runnables/binding.dart +++ b/packages/langchain_core/lib/src/runnables/binding.dart @@ -60,7 +60,9 @@ class RunnableBinding? safetySettings; - /// Creates a copy of this [ChatFirebaseVertexAIOptions] object with the given fields - /// replaced with the new values. + @override ChatFirebaseVertexAIOptions copyWith({ final String? model, final double? topP, @@ -122,6 +121,26 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { ); } + @override + ChatFirebaseVertexAIOptions merge( + covariant final ChatFirebaseVertexAIOptions? other, + ) { + return copyWith( + model: other?.model, + topP: other?.topP, + topK: other?.topK, + candidateCount: other?.candidateCount, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + stopSequences: other?.stopSequences, + responseMimeType: other?.responseMimeType, + safetySettings: other?.safetySettings, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatFirebaseVertexAIOptions other) { return model == other.model && diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index 8c4bff41..4c2f4063 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -115,8 +115,7 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// the default safety setting for that category. final List? safetySettings; - /// Creates a copy of this [ChatGoogleGenerativeAIOptions] object with the given fields - /// replaced with the new values. + @override ChatGoogleGenerativeAIOptions copyWith({ final String? model, final double? topP, @@ -145,6 +144,25 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { ); } + @override + ChatGoogleGenerativeAIOptions merge( + covariant final ChatGoogleGenerativeAIOptions? other, + ) { + return copyWith( + model: other?.model, + topP: other?.topP, + topK: other?.topK, + candidateCount: other?.candidateCount, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + stopSequences: other?.stopSequences, + safetySettings: other?.safetySettings, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) { return model == other.model && diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index c0642867..019ab64e 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -97,8 +97,7 @@ class ChatVertexAIOptions extends ChatModelOptions { /// List of messages to the model to learn how to respond to the conversation. final List? examples; - /// Creates a copy of this [ChatVertexAIOptions] object with the given fields - /// replaced with the new values. + @override ChatVertexAIOptions copyWith({ final String? publisher, final String? model, @@ -125,6 +124,22 @@ class ChatVertexAIOptions extends ChatModelOptions { ); } + @override + ChatVertexAIOptions merge(covariant ChatVertexAIOptions? other) { + return copyWith( + publisher: other?.publisher, + model: other?.model, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + topP: other?.topP, + topK: other?.topK, + stopSequences: other?.stopSequences, + candidateCount: other?.candidateCount, + examples: other?.examples, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatVertexAIOptions other) { return publisher == other.publisher && diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart index f9eee704..e11589c4 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart @@ -93,8 +93,7 @@ class VertexAIOptions extends LLMOptions { /// Range: `[1–8]` final int? candidateCount; - /// Creates a copy of this [VertexAIOptions] object with the given fields - /// replaced with the new values. + @override VertexAIOptions copyWith({ final String? publisher, final String? model, @@ -120,6 +119,20 @@ class VertexAIOptions extends LLMOptions { } @override + VertexAIOptions merge(covariant final VertexAIOptions? other) { + return copyWith( + publisher: other?.publisher, + model: other?.model, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + topP: other?.topP, + topK: other?.topK, + stopSequences: other?.stopSequences, + candidateCount: other?.candidateCount, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final VertexAIOptions other) { return publisher == other.publisher && diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index aa2f9537..e6ba07b9 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -46,8 +46,7 @@ class ChatMistralAIOptions extends ChatModelOptions { /// If set, different calls will generate deterministic results. final int? randomSeed; - /// Creates a copy of this [ChatMistralAIOptions] object with the given fields - /// replaced with the new values. + @override ChatMistralAIOptions copyWith({ final String? model, final double? temperature, @@ -68,6 +67,19 @@ class ChatMistralAIOptions extends ChatModelOptions { ); } + @override + ChatMistralAIOptions merge(covariant ChatMistralAIOptions? other) { + return copyWith( + model: other?.model, + temperature: other?.temperature, + topP: other?.topP, + maxTokens: other?.maxTokens, + safePrompt: other?.safePrompt, + randomSeed: other?.randomSeed, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatMistralAIOptions other) { return model == other.model && diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 971f259c..137d0bdf 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -201,8 +201,7 @@ class ChatOllamaOptions extends ChatModelOptions { /// the logical number of cores). final int? numThread; - /// Creates a copy of this [ChatOllamaOptions] object with the given fields - /// replaced with the new values. + @override ChatOllamaOptions copyWith({ final String? model, final OllamaResponseFormat? format, @@ -275,6 +274,45 @@ class ChatOllamaOptions extends ChatModelOptions { ); } + @override + ChatOllamaOptions merge(covariant final ChatOllamaOptions? other) { + return copyWith( + model: other?.model, + format: other?.format, + keepAlive: other?.keepAlive, + numKeep: other?.numKeep, + seed: other?.seed, + numPredict: other?.numPredict, + topK: other?.topK, + topP: other?.topP, + tfsZ: other?.tfsZ, + typicalP: other?.typicalP, + repeatLastN: other?.repeatLastN, + temperature: other?.temperature, + repeatPenalty: other?.repeatPenalty, + presencePenalty: other?.presencePenalty, + frequencyPenalty: other?.frequencyPenalty, + mirostat: other?.mirostat, + mirostatTau: other?.mirostatTau, + mirostatEta: other?.mirostatEta, + penalizeNewline: other?.penalizeNewline, + stop: other?.stop, + numa: other?.numa, + numCtx: other?.numCtx, + numBatch: other?.numBatch, + numGpu: other?.numGpu, + mainGpu: other?.mainGpu, + lowVram: other?.lowVram, + f16KV: other?.f16KV, + logitsAll: other?.logitsAll, + vocabOnly: other?.vocabOnly, + useMmap: other?.useMmap, + useMlock: other?.useMlock, + numThread: other?.numThread, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatOllamaOptions other) { return model == other.model && diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart index f10f1186..7ad2615a 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart @@ -19,6 +19,7 @@ class ChatOllamaToolsOptions extends ChatModelOptions { super.tools, super.toolChoice, this.toolsSystemPromptTemplate, + super.concurrencyLimit, }); /// [ChatOllamaOptions] to pass into Ollama. @@ -61,13 +62,13 @@ Example response format: Ensure your response is valid JSON and follows this exact format. '''; - /// Creates a copy of this [ChatOllamaToolsOptions] object with the given - /// fields replaced with the new values. + @override ChatOllamaToolsOptions copyWith({ - ChatOllamaOptions? options, - List? tools, - ChatToolChoice? toolChoice, - String? toolsSystemPromptTemplate, + final ChatOllamaOptions? options, + final List? tools, + final ChatToolChoice? toolChoice, + final String? toolsSystemPromptTemplate, + final int? concurrencyLimit, }) { return ChatOllamaToolsOptions( options: options ?? this.options, @@ -75,6 +76,18 @@ Ensure your response is valid JSON and follows this exact format. toolChoice: toolChoice ?? this.toolChoice, toolsSystemPromptTemplate: toolsSystemPromptTemplate ?? this.toolsSystemPromptTemplate, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + ChatOllamaToolsOptions merge(covariant final ChatOllamaToolsOptions? other) { + return copyWith( + options: other?.options, + tools: other?.tools, + toolChoice: other?.toolChoice, + toolsSystemPromptTemplate: other?.toolsSystemPromptTemplate, + concurrencyLimit: other?.concurrencyLimit, ); } diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index 494e759e..ecad337d 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -222,8 +222,7 @@ class OllamaOptions extends LLMOptions { /// the logical number of cores). final int? numThread; - /// Creates a copy of this [OllamaOptions] object with the given fields - /// replaced with the new values. + @override OllamaOptions copyWith({ final String? model, final String? system, @@ -304,6 +303,49 @@ class OllamaOptions extends LLMOptions { ); } + @override + OllamaOptions merge(covariant final OllamaOptions? other) { + return copyWith( + model: other?.model, + system: other?.system, + template: other?.template, + context: other?.context, + format: other?.format, + raw: other?.raw, + keepAlive: other?.keepAlive, + numKeep: other?.numKeep, + seed: other?.seed, + numPredict: other?.numPredict, + topK: other?.topK, + topP: other?.topP, + tfsZ: other?.tfsZ, + typicalP: other?.typicalP, + repeatLastN: other?.repeatLastN, + temperature: other?.temperature, + repeatPenalty: other?.repeatPenalty, + presencePenalty: other?.presencePenalty, + frequencyPenalty: other?.frequencyPenalty, + mirostat: other?.mirostat, + mirostatTau: other?.mirostatTau, + mirostatEta: other?.mirostatEta, + penalizeNewline: other?.penalizeNewline, + stop: other?.stop, + numa: other?.numa, + numCtx: other?.numCtx, + numBatch: other?.numBatch, + numGpu: other?.numGpu, + mainGpu: other?.mainGpu, + lowVram: other?.lowVram, + f16KV: other?.f16KV, + logitsAll: other?.logitsAll, + vocabOnly: other?.vocabOnly, + useMmap: other?.useMmap, + useMlock: other?.useMlock, + numThread: other?.numThread, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final OllamaOptions other) { return identical(this, other) || diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index ed53c65c..9712ff59 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -142,26 +142,25 @@ class ChatOpenAIOptions extends ChatModelOptions { /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - /// Creates a copy of this [ChatOpenAIOptions] object with the given fields - /// replaced with the new values. + @override ChatOpenAIOptions copyWith({ - String? model, - double? frequencyPenalty, - Map? logitBias, - int? maxTokens, - int? n, - double? presencePenalty, - ChatOpenAIResponseFormat? responseFormat, - int? seed, - List? stop, - double? temperature, - double? topP, - List? tools, - ChatToolChoice? toolChoice, - bool? parallelToolCalls, - ChatOpenAIServiceTier? serviceTier, - String? user, - int? concurrencyLimit, + final String? model, + final double? frequencyPenalty, + final Map? logitBias, + final int? maxTokens, + final int? n, + final double? presencePenalty, + final ChatOpenAIResponseFormat? responseFormat, + final int? seed, + final List? stop, + final double? temperature, + final double? topP, + final List? tools, + final ChatToolChoice? toolChoice, + final bool? parallelToolCalls, + final ChatOpenAIServiceTier? serviceTier, + final String? user, + final int? concurrencyLimit, }) { return ChatOpenAIOptions( model: model ?? this.model, @@ -184,6 +183,29 @@ class ChatOpenAIOptions extends ChatModelOptions { ); } + @override + ChatOpenAIOptions merge(covariant final ChatOpenAIOptions? other) { + return copyWith( + model: other?.model, + frequencyPenalty: other?.frequencyPenalty, + logitBias: other?.logitBias, + maxTokens: other?.maxTokens, + n: other?.n, + presencePenalty: other?.presencePenalty, + responseFormat: other?.responseFormat, + seed: other?.seed, + stop: other?.stop, + temperature: other?.temperature, + topP: other?.topP, + tools: other?.tools, + toolChoice: other?.toolChoice, + parallelToolCalls: other?.parallelToolCalls, + serviceTier: other?.serviceTier, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final ChatOpenAIOptions other) { return identical(this, other) || diff --git a/packages/langchain_openai/lib/src/llms/types.dart b/packages/langchain_openai/lib/src/llms/types.dart index 7f8da471..a6bc2ee2 100644 --- a/packages/langchain_openai/lib/src/llms/types.dart +++ b/packages/langchain_openai/lib/src/llms/types.dart @@ -121,24 +121,23 @@ class OpenAIOptions extends LLMOptions { /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - /// Creates a copy of this [OpenAIOptions] object with the given fields - /// replaced with the new values. + @override OpenAIOptions copyWith({ - String? model, - int? bestOf, - double? frequencyPenalty, - Map? logitBias, - int? logprobs, - int? maxTokens, - int? n, - double? presencePenalty, - int? seed, - List? stop, - String? suffix, - double? temperature, - double? topP, - String? user, - int? concurrencyLimit, + final String? model, + final int? bestOf, + final double? frequencyPenalty, + final Map? logitBias, + final int? logprobs, + final int? maxTokens, + final int? n, + final double? presencePenalty, + final int? seed, + final List? stop, + final String? suffix, + final double? temperature, + final double? topP, + final String? user, + final int? concurrencyLimit, }) { return OpenAIOptions( model: model ?? this.model, @@ -159,6 +158,27 @@ class OpenAIOptions extends LLMOptions { ); } + @override + OpenAIOptions merge(covariant final OpenAIOptions? other) { + return copyWith( + model: other?.model, + bestOf: other?.bestOf, + frequencyPenalty: other?.frequencyPenalty, + logitBias: other?.logitBias, + logprobs: other?.logprobs, + maxTokens: other?.maxTokens, + n: other?.n, + presencePenalty: other?.presencePenalty, + seed: other?.seed, + stop: other?.stop, + suffix: other?.suffix, + temperature: other?.temperature, + topP: other?.topP, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + @override bool operator ==(covariant final OpenAIOptions other) { return identical(this, other) || diff --git a/packages/langchain_openai/lib/src/tools/types.dart b/packages/langchain_openai/lib/src/tools/types.dart index 3b049dc6..086ba0f5 100644 --- a/packages/langchain_openai/lib/src/tools/types.dart +++ b/packages/langchain_openai/lib/src/tools/types.dart @@ -1,10 +1,12 @@ import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; import 'dall_e.dart'; /// {@template open_ai_dall_e_tool_options} /// Generation options to pass into the [OpenAIDallETool]. /// {@endtemplate} +@immutable class OpenAIDallEToolOptions extends ToolOptions { /// {@macro open_ai_dall_e_tool_options} const OpenAIDallEToolOptions({ @@ -14,6 +16,7 @@ class OpenAIDallEToolOptions extends ToolOptions { this.size = ImageSize.v1024x1024, this.style = ImageStyle.vivid, this.user, + super.concurrencyLimit, }); /// ID of the model to use (e.g. `dall-e-2` or 'dall-e-3'). @@ -63,4 +66,60 @@ class OpenAIDallEToolOptions extends ToolOptions { /// /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; + + @override + OpenAIDallEToolOptions copyWith({ + final String? model, + final ImageQuality? quality, + final ImageResponseFormat? responseFormat, + final ImageSize? size, + final ImageStyle? style, + final String? user, + final int? concurrencyLimit, + }) { + return OpenAIDallEToolOptions( + model: model ?? this.model, + quality: quality ?? this.quality, + responseFormat: responseFormat ?? this.responseFormat, + size: size ?? this.size, + style: style ?? this.style, + user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + OpenAIDallEToolOptions merge(covariant final OpenAIDallEToolOptions? other) { + return copyWith( + model: other?.model, + quality: other?.quality, + responseFormat: other?.responseFormat, + size: other?.size, + style: other?.style, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final OpenAIDallEToolOptions other) { + return model == other.model && + quality == other.quality && + responseFormat == other.responseFormat && + size == other.size && + style == other.style && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + quality.hashCode ^ + responseFormat.hashCode ^ + size.hashCode ^ + style.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 7c2d95d1..edb42b2e 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -118,36 +118,36 @@ void main() { expect(res.content, isNotEmpty); }); + const getCurrentWeatherTool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + test('Test ChatOpenAI tool calling', timeout: const Timeout(Duration(minutes: 1)), () async { final chat = ChatOpenAI(apiKey: openaiApiKey); - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - final humanMessage = ChatMessage.humanText( 'What’s the weather like in Boston right now?', ); final res1 = await chat.invoke( PromptValue.chat([humanMessage]), - options: const ChatOpenAIOptions(tools: [tool]), + options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), ); final aiMessage1 = res1.output; @@ -156,7 +156,7 @@ void main() { expect(aiMessage1.toolCalls, isNotEmpty); final toolCall = aiMessage1.toolCalls.first; - expect(toolCall.name, tool.name); + expect(toolCall.name, getCurrentWeatherTool.name); expect(toolCall.arguments.containsKey('location'), isTrue); expect(toolCall.arguments['location'], contains('Boston')); @@ -172,7 +172,7 @@ void main() { final res2 = await chat.invoke( PromptValue.chat([humanMessage, aiMessage1, functionMessage]), - options: const ChatOpenAIOptions(tools: [tool]), + options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), ); final aiMessage2 = res2.output; @@ -269,26 +269,26 @@ void main() { expect(result.usage.totalTokens, greaterThan(0)); }); - test('Test ChatOpenAI streaming with functions', () async { - const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, + const jokeTool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', }, - 'required': ['location', 'punchline'], }, - ); + 'required': ['location', 'punchline'], + }, + ); + test('Test ChatOpenAI streaming with functions', () async { final promptTemplate = ChatPromptTemplate.fromTemplate( 'tell me a long joke about {foo}', ); @@ -300,7 +300,7 @@ void main() { ), ).bind( ChatOpenAIOptions( - tools: const [tool], + tools: const [jokeTool], toolChoice: ChatToolChoice.forced(name: 'joke'), ), ); @@ -433,5 +433,63 @@ void main() { final res = await chatModel.invoke(prompt); expect(res.output.content.toLowerCase(), contains('apple')); }); + + test('Test additive bind calls', () async { + final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: defaultModel, + temperature: 0, + ), + ); + + final chatModelWithTools = chatModel.bind( + const ChatOpenAIOptions( + tools: [getCurrentWeatherTool, jokeTool], + ), + ); + + final res1 = await chatModelWithTools.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res1.output.toolCalls.map((tc) => tc.name).toSet(), + {getCurrentWeatherTool.name, jokeTool.name}, + ); + + final chatModelForceWeatherTool = chatModelWithTools.bind( + ChatOpenAIOptions( + toolChoice: ChatToolChoice.forced(name: getCurrentWeatherTool.name), + ), + ); + + final res2 = await chatModelForceWeatherTool.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res2.output.toolCalls.map((tc) => tc.name).toSet(), + {getCurrentWeatherTool.name}, + ); + + final chatModelForceJokeTool = chatModelWithTools.bind( + ChatOpenAIOptions( + toolChoice: ChatToolChoice.forced(name: jokeTool.name), + ), + ); + + final res3 = await chatModelForceJokeTool.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res3.output.toolCalls.map((tc) => tc.name).toSet(), + {jokeTool.name}, + ); + }); }); } From 8b8c48fd8808f29a65aa2ad004f7238e26f82505 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 23 Jul 2024 23:12:24 +0200 Subject: [PATCH 076/251] feat: Allow to customize OpenAI-Beta header in openai_dart (#502) --- packages/openai_dart/lib/src/client.dart | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/openai_dart/lib/src/client.dart b/packages/openai_dart/lib/src/client.dart index 098a4cf2..b01a1594 100644 --- a/packages/openai_dart/lib/src/client.dart +++ b/packages/openai_dart/lib/src/client.dart @@ -18,6 +18,8 @@ class OpenAIClient extends g.OpenAIClient { /// - `apiKey`: your OpenAI API key. You can find your API key in the /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). + /// - `beta`: the content to use for the `OpenAI-Beta` header which can be + /// used to enable beta features. /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -32,6 +34,7 @@ class OpenAIClient extends g.OpenAIClient { OpenAIClient({ final String? apiKey, final String? organization, + final String? beta = 'assistants=v2', final String? baseUrl, final Map? headers, final Map? queryParams, @@ -41,7 +44,7 @@ class OpenAIClient extends g.OpenAIClient { baseUrl: baseUrl, headers: { if (organization != null) 'OpenAI-Organization': organization, - 'OpenAI-Beta': 'assistants=v2', + if (beta != null) 'OpenAI-Beta': beta, ...?headers, }, queryParams: queryParams ?? const {}, From 2e97d1f17e885b032b48e0a67657c753266a23cb Mon Sep 17 00:00:00 2001 From: Ganesh Date: Wed, 24 Jul 2024 12:05:50 +0530 Subject: [PATCH 077/251] feat: Add Fallback support for Runnables (#501) Co-authored-by: David Miguel --- docs/_sidebar.md | 1 + docs/expression_language/fallbacks.md | 135 +++++++++++++ .../bin/expression_language/fallbacks.dart | 181 ++++++++++++++++++ .../lib/src/chat_models/fake.dart | 48 +++-- .../lib/src/runnables/fallbacks.dart | 112 +++++++++++ .../lib/src/runnables/runnable.dart | 20 ++ .../lib/src/runnables/runnables.dart | 1 + .../test/runnables/fallbacks_test.dart | 102 ++++++++++ 8 files changed, 589 insertions(+), 11 deletions(-) create mode 100644 docs/expression_language/fallbacks.md create mode 100644 examples/docs_examples/bin/expression_language/fallbacks.dart create mode 100644 packages/langchain_core/lib/src/runnables/fallbacks.dart create mode 100644 packages/langchain_core/test/runnables/fallbacks_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 5296fd96..bfb7aad0 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -14,6 +14,7 @@ - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) - [Streaming](/expression_language/streaming.md) + - [Fallbacks](/expression_language/fallbacks.md) - Cookbook - [Prompt + LLM](/expression_language/cookbook/prompt_llm_parser.md) - [Multiple chains](/expression_language/cookbook/multiple_chains.md) diff --git a/docs/expression_language/fallbacks.md b/docs/expression_language/fallbacks.md new file mode 100644 index 00000000..5fd0b8a7 --- /dev/null +++ b/docs/expression_language/fallbacks.md @@ -0,0 +1,135 @@ +# Fallbacks + +When working with language models, you may often encounter issues from the underlying APIs, e.g. rate limits or downtime. Therefore, as you move your LLM applications into production it becomes more and more important to have contingencies for errors. That's why we've introduced the concept of fallbacks. + +Crucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use e.g. a different prompt template. + +## Handling LLM API errors with fallbacks + +This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit a rate limit, or any number of things. This Situation can be handled using Fallbacks. + +Fallbacks can be created using `withFallbacks()` function on the runnable that you are working on, for example `final runnablWithFallbacks = mainRunnable.withFallbacks([fallback1, fallback2])` this would create a `RunnableWithFallback` along with a list of fallbacks. When it is invoked, the `mainRunnable` would be called first, if it fails then fallbacks would be invoked sequentially until one of the fallback in list return output. If the `mainRunnable` succeeds and returns output then the fallbacks won't be called. + +## Fallback for chat models + +```dart +// fake model will throw error during invoke and fallback model will be called +final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), +); + +final latestModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), +); + +final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); + +final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + +final res = await modelWithFallbacks.invoke(prompt); +print(res); +/* +{ + "ChatResult": { + "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", + "output": { + "AIChatMessage": { + "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", + "toolCalls": [] + } + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721542696, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "LanguageModelUsage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 36, + "responseBillableCharacters": null, + "totalTokens": 52 + } + }, + "streaming": false + } +} +*/ +``` + +Note: if the options provided when invoking the runnable with fallbacks are not compatible with some of the fallbacks, they will be ignored. If you want to use different options for different fallbacks, provide them as `defaultOptions` when instantiating the fallbacks or use `bind()`. + +## Fallbacks for RunnableSequences with batch + +```dart +final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), +); + +final latestModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + +final badChain = promptTemplate.pipe(fakeOpenAIModel); +final goodChain = promptTemplate.pipe(latestModel); + +final chainWithFallbacks = badChain.withFallbacks([goodChain]); + +final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], +); +print(res); +/* +[ + { + "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", + "output": { + "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 31, + "responseBillableCharacters": null, + "totalTokens": 44 + }, + "streaming": false + }, + { + "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", + "output": { + "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 29, + "responseBillableCharacters": null, + "totalTokens": 42 + }, + "streaming": false + } +] +*/ +``` diff --git a/examples/docs_examples/bin/expression_language/fallbacks.dart b/examples/docs_examples/bin/expression_language/fallbacks.dart new file mode 100644 index 00000000..8eea7bb2 --- /dev/null +++ b/examples/docs_examples/bin/expression_language/fallbacks.dart @@ -0,0 +1,181 @@ +// ignore_for_file: avoid_print +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _modelWithFallbacks(); + await _modelWithMultipleFallbacks(); + await _chainWithFallbacks(); +} + +Future _modelWithFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), + ); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); + + final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + + final res = await modelWithFallbacks.invoke(prompt); + print(res); +/* +{ + "ChatResult": { + "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", + "output": { + "AIChatMessage": { + "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", + "toolCalls": [] + } + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721542696, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "LanguageModelUsage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 36, + "responseBillableCharacters": null, + "totalTokens": 52 + } + }, + "streaming": false + } +} +*/ +} + +Future _modelWithMultipleFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel1 = + ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'tomato')); + + final fakeOpenAIModel2 = + ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'potato')); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final modelWithFallbacks = + fakeOpenAIModel1.withFallbacks([fakeOpenAIModel2, latestModel]); + + final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + + final res = await modelWithFallbacks.invoke(prompt); + print(res); + /* + { + "id": "chatcmpl-9nLKW345nrh0nzmw18iO35XnoQ2jo", + "output": { + "content": "The sky appears blue due to Rayleigh scattering, where shorter blue wavelengths of sunlight are scattered more than other colors by the molecules in Earth's atmosphere. This scattering disperses blue light in all directions, making the sky look blue.", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721547092, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 45, + "responseBillableCharacters": null, + "totalTokens": 61 + }, + "streaming": false +} +*/ +} + +Future _chainWithFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), + ); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + + final badChain = promptTemplate.pipe(fakeOpenAIModel); + final goodChain = promptTemplate.pipe(latestModel); + + final chainWithFallbacks = badChain.withFallbacks([goodChain]); + + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + print(res); +/* +[ + { + "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", + "output": { + "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 31, + "responseBillableCharacters": null, + "totalTokens": 44 + }, + "streaming": false + }, + { + "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", + "output": { + "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 29, + "responseBillableCharacters": null, + "totalTokens": 42 + }, + "streaming": false + } +] +*/ +} diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index 79c44d1b..b69868c3 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -149,6 +149,12 @@ class FakeEchoChatModel extends BaseChatModel { final PromptValue input, { final FakeEchoChatModelOptions? options, }) async { + final throwError = + options?.throwRandomError ?? defaultOptions.throwRandomError; + if (throwError) { + throw Exception('Random error'); + } + final text = input.toChatMessages().last.contentAsString; final message = AIChatMessage(content: text); return ChatResult( @@ -169,18 +175,29 @@ class FakeEchoChatModel extends BaseChatModel { final FakeEchoChatModelOptions? options, }) { final prompt = input.toChatMessages().first.contentAsString.split(''); + final throwError = + options?.throwRandomError ?? defaultOptions.throwRandomError; + + var index = 0; return Stream.fromIterable(prompt).map( - (final char) => ChatResult( - id: 'fake-echo-chat-model', - output: AIChatMessage(content: char), - finishReason: FinishReason.stop, - metadata: { - 'model': options?.model ?? defaultOptions.model, - ...?options?.metadata ?? defaultOptions.metadata, - }, - usage: const LanguageModelUsage(), - streaming: true, - ), + (final char) { + if (throwError && index == prompt.length ~/ 2) { + throw Exception('Random error'); + } + + return ChatResult( + id: 'fake-echo-chat-model', + output: AIChatMessage(content: char), + finishReason: FinishReason.stop, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + 'index': index++, + }, + usage: const LanguageModelUsage(), + streaming: true, + ); + }, ); } @@ -205,21 +222,27 @@ class FakeEchoChatModelOptions extends ChatModelOptions { const FakeEchoChatModelOptions({ super.model, this.metadata, + this.throwRandomError = false, super.concurrencyLimit, }); /// Metadata. final Map? metadata; + /// If true, throws a random error. + final bool throwRandomError; + @override FakeEchoChatModelOptions copyWith({ final String? model, final Map? metadata, + final bool? throwRandomError, final int? concurrencyLimit, }) { return FakeEchoChatModelOptions( model: model ?? this.model, metadata: metadata ?? this.metadata, + throwRandomError: throwRandomError ?? this.throwRandomError, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } @@ -231,6 +254,7 @@ class FakeEchoChatModelOptions extends ChatModelOptions { return copyWith( model: other?.model, metadata: other?.metadata, + throwRandomError: other?.throwRandomError, concurrencyLimit: other?.concurrencyLimit, ); } @@ -239,6 +263,7 @@ class FakeEchoChatModelOptions extends ChatModelOptions { bool operator ==(covariant final FakeEchoChatModelOptions other) { return model == other.model && const MapEquality().equals(metadata, other.metadata) && + throwRandomError == other.throwRandomError && concurrencyLimit == other.concurrencyLimit; } @@ -246,6 +271,7 @@ class FakeEchoChatModelOptions extends ChatModelOptions { int get hashCode { return model.hashCode ^ const MapEquality().hash(metadata) ^ + throwRandomError.hashCode ^ concurrencyLimit.hashCode; } } diff --git a/packages/langchain_core/lib/src/runnables/fallbacks.dart b/packages/langchain_core/lib/src/runnables/fallbacks.dart new file mode 100644 index 00000000..db4d31a7 --- /dev/null +++ b/packages/langchain_core/lib/src/runnables/fallbacks.dart @@ -0,0 +1,112 @@ +import 'runnable.dart'; +import 'types.dart'; + +/// {@template runnable_with_fallback} +/// A [Runnable] that can fallback to other [Runnable]s if it fails. +/// +/// This class allows for the creation of a [Runnable] chain where a main +/// [Runnable] is attempted first, and if it fails, a sequence of fallback +/// [Runnable]s are tried in order. This process continues until one of the +/// [Runnable]s succeeds or all of them fail, in which case an exception is +/// thrown. +/// +/// You can create a [RunnableWithFallback] using the [Runnable.withFallbacks] +/// method. +/// +/// Example: +/// ```dart +/// final mainChatModel = ChatOpenAI(...); +/// final fallbackChatModel = ChatOpenAI(...); +/// final chatModel = mainChatModel.withFallbacks([fallbackChatModel]); +/// final res = await chatModel.invoke(...); +/// ``` +/// {@endtemplate} +class RunnableWithFallback + extends Runnable { + /// {@macro runnable_fallback} + RunnableWithFallback({ + required this.mainRunnable, + required this.fallbacks, + }) : super(defaultOptions: const RunnableOptions()); + + /// The Runnable to run first. + final Runnable mainRunnable; + + /// A sequence of fallbacks to try if the [mainRunnable] fails. + final List> fallbacks; + + @override + Future invoke(RunInput input, {RunnableOptions? options}) async { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + try { + return await runnable.invoke( + input, + options: firstError == null + ? options + : runnable.getCompatibleOptions(options), + ); + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } + + @override + Future> batch( + List inputs, { + List? options, + }) async { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + List? currentOptions; + if (firstError == null) { + currentOptions = options; + } else { + final compatibleOptions = + options?.map(runnable.getCompatibleOptions).toList(growable: false); + final hasNullOptions = + compatibleOptions?.any((o) => o == null) ?? false; + if (!hasNullOptions) { + currentOptions = compatibleOptions?.cast(); + } + } + + try { + return await runnable.batch( + inputs, + options: currentOptions, + ); + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } + + @override + Stream stream( + RunInput input, { + RunnableOptions? options, + }) async* { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + try { + final stream = runnable.stream( + input, + options: firstError == null + ? options + : runnable.getCompatibleOptions(options), + ); + await for (final output in stream) { + yield output; + } + return; + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } +} diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 71021af6..7c020a50 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -2,6 +2,7 @@ import 'dart:async'; import '../../utils.dart'; import 'binding.dart'; +import 'fallbacks.dart'; import 'function.dart'; import 'input_map.dart'; import 'input_stream_map.dart'; @@ -282,6 +283,25 @@ abstract class Runnable withFallbacks( + List> fallbacks, + ) { + return RunnableWithFallback( + mainRunnable: this, + fallbacks: fallbacks, + ); + } + /// Returns the given [options] if they are compatible with the [Runnable], /// otherwise returns `null`. CallOptions? getCompatibleOptions( diff --git a/packages/langchain_core/lib/src/runnables/runnables.dart b/packages/langchain_core/lib/src/runnables/runnables.dart index d909b234..146761d7 100644 --- a/packages/langchain_core/lib/src/runnables/runnables.dart +++ b/packages/langchain_core/lib/src/runnables/runnables.dart @@ -1,4 +1,5 @@ export 'binding.dart'; +export 'fallbacks.dart'; export 'function.dart'; export 'input_map.dart'; export 'input_stream_map.dart'; diff --git a/packages/langchain_core/test/runnables/fallbacks_test.dart b/packages/langchain_core/test/runnables/fallbacks_test.dart new file mode 100644 index 00000000..7bc7a72d --- /dev/null +++ b/packages/langchain_core/test/runnables/fallbacks_test.dart @@ -0,0 +1,102 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:test/test.dart'; + +void main() { + group('RunnableFallback tests', () { + late FakeEchoChatModel model; + late FakeChatModel fallbackModel; + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + final input = PromptValue.string('why is the sky blue'); + + setUp(() { + model = const FakeEchoChatModel(); + fallbackModel = FakeChatModel(responses: ['fallback response']); + }); + + test('RunnableFallback should return main runnable output', () async { + final modelWithFallback = model.withFallbacks([fallbackModel]); + final res = await modelWithFallback.invoke(input); + expect(res.output.content, 'why is the sky blue'); + }); + + test('Should call fallback runnable if main runnable fails', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); + final res = await modelWithFallback.invoke(input); + expect(res.output.content, 'fallback response'); + }); + + test('Test batch response of main runnable in RunnableFallback', () async { + const model = FakeEchoChatModel(); + const fallbackModel = FakeEchoChatModel(); + final fallbackChain = promptTemplate.pipe(fallbackModel); + final chainWithFallbacks = + promptTemplate.pipe(model).withFallbacks([fallbackChain]); + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + expect(res[0].output.content, 'tell me a joke about bears'); + expect(res[1].output.content, 'tell me a joke about cats'); + }); + + test('Test fallbacks response in batch', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final fallbackChain = promptTemplate.pipe(fallbackModel); + final chainWithFallbacks = + promptTemplate.pipe(brokenModel).withFallbacks([fallbackChain]); + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + ], + ); + expect(res.first.output.content, 'fallback response'); + }); + + test('Should throw error if none of runnable returned output', () async { + final brokenModel1 = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final brokenModel2 = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final fallbackChain = promptTemplate.pipe(brokenModel2); + final chainWithFallbacks = + promptTemplate.pipe(brokenModel1).withFallbacks([fallbackChain]); + expect( + () async => chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + ], + ), + throwsException, + ); + }); + + test('Test stream response of main runnable in RunnableFallback', () async { + final modelWithFallback = model.withFallbacks([fallbackModel]); + final chain = modelWithFallback.pipe(const StringOutputParser()); + final res = await chain.stream(input).toList(); + expect(res.join('|'), 'w|h|y| |i|s| |t|h|e| |s|k|y| |b|l|u|e'); + }); + + test('Test fallbacks response in stream', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); + final chain = modelWithFallback.pipe(const StringOutputParser()); + final res = await chain.stream(input).toList(); + expect(res.join('|'), endsWith('f|a|l|l|b|a|c|k| |r|e|s|p|o|n|s|e')); + }); + }); +} From a28f9af5628972a3fb3a5843a6d46b22bebeac1a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 24 Jul 2024 18:37:42 +0200 Subject: [PATCH 078/251] feat: Add suffix support in Ollama completions API in ollama_dart (#503) --- .../schema/generate_completion_request.dart | 5 +++ .../src/generated/schema/schema.freezed.dart | 32 ++++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 2 ++ packages/ollama_dart/oas/ollama-curated.yaml | 18 ++++------- packages/ollama_dart/pubspec.yaml | 2 +- 5 files changed, 46 insertions(+), 13 deletions(-) diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart index 1368ac7a..014e2654 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart @@ -23,6 +23,9 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { /// The prompt to generate a response. required String prompt, + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) String? suffix, + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, @@ -74,6 +77,7 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { static const List propertyNames = [ 'model', 'prompt', + 'suffix', 'images', 'system', 'template', @@ -95,6 +99,7 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { return { 'model': model, 'prompt': prompt, + 'suffix': suffix, 'images': images, 'system': system, 'template': template, diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index bf7cf75c..9b6d2f8f 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -29,6 +29,10 @@ mixin _$GenerateCompletionRequest { /// The prompt to generate a response. String get prompt => throw _privateConstructorUsedError; + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) + String? get suffix => throw _privateConstructorUsedError; + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; @@ -91,6 +95,7 @@ abstract class $GenerateCompletionRequestCopyWith<$Res> { $Res call( {String model, String prompt, + @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -123,6 +128,7 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, $Res call({ Object? model = null, Object? prompt = null, + Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -142,6 +148,10 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, images: freezed == images ? _value.images : images // ignore: cast_nullable_to_non_nullable @@ -206,6 +216,7 @@ abstract class _$$GenerateCompletionRequestImplCopyWith<$Res> $Res call( {String model, String prompt, + @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -238,6 +249,7 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> $Res call({ Object? model = null, Object? prompt = null, + Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -257,6 +269,10 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, images: freezed == images ? _value._images : images // ignore: cast_nullable_to_non_nullable @@ -303,6 +319,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { const _$GenerateCompletionRequestImpl( {required this.model, required this.prompt, + @JsonKey(includeIfNull: false) this.suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.template, @@ -332,6 +349,11 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override final String prompt; + /// The text that comes after the inserted text. + @override + @JsonKey(includeIfNull: false) + final String? suffix; + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) final List? _images; @@ -409,7 +431,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override String toString() { - return 'GenerateCompletionRequest(model: $model, prompt: $prompt, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateCompletionRequest(model: $model, prompt: $prompt, suffix: $suffix, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; } @override @@ -419,6 +441,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { other is _$GenerateCompletionRequestImpl && (identical(other.model, model) || other.model == model) && (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.suffix, suffix) || other.suffix == suffix) && const DeepCollectionEquality().equals(other._images, _images) && (identical(other.system, system) || other.system == system) && (identical(other.template, template) || @@ -438,6 +461,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { runtimeType, model, prompt, + suffix, const DeepCollectionEquality().hash(_images), system, template, @@ -467,6 +491,7 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { const factory _GenerateCompletionRequest( {required final String model, required final String prompt, + @JsonKey(includeIfNull: false) final String? suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final String? template, @@ -497,6 +522,11 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { String get prompt; @override + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) + String? get suffix; + @override + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index a4aee619..e5d46d53 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -13,6 +13,7 @@ _$GenerateCompletionRequestImpl _$$GenerateCompletionRequestImplFromJson( _$GenerateCompletionRequestImpl( model: json['model'] as String, prompt: json['prompt'] as String, + suffix: json['suffix'] as String?, images: (json['images'] as List?)?.map((e) => e as String).toList(), system: json['system'] as String?, @@ -42,6 +43,7 @@ Map _$$GenerateCompletionRequestImplToJson( } } + writeNotNull('suffix', instance.suffix); writeNotNull('images', instance.images); writeNotNull('system', instance.system); writeNotNull('template', instance.template); diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 9d3a507e..540b7141 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -278,6 +278,9 @@ components: type: string description: The prompt to generate a response. example: Why is the sky blue? + suffix: + type: string + description: The text that comes after the inserted text. images: type: array description: (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @@ -312,10 +315,10 @@ components: description: &stream | If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. default: false - keep_alive: + keep_alive: &keep_alive type: integer nullable: true - description: &keep_alive | + description: | How long (in minutes) to keep the model loaded in memory. - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. @@ -598,10 +601,7 @@ components: type: boolean description: *stream default: false - keep_alive: - type: integer - nullable: true - description: *keep_alive + keep_alive: *keep_alive required: - model - messages @@ -697,10 +697,7 @@ components: example: 'Here is an article about llamas...' options: $ref: '#/components/schemas/RequestOptions' - keep_alive: - type: integer - nullable: true - description: *keep_alive + keep_alive: *keep_alive required: - model - prompt @@ -831,7 +828,6 @@ components: type: integer nullable: true description: The number of parameters in the model. - ProcessResponse: type: object description: Response class for the list running models endpoint. diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 954eb772..5da5caa2 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 test: ^1.25.2 From a2ea62f7426ebdb0e0c53e8d608efd47b6506eca Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 24 Jul 2024 23:41:59 +0200 Subject: [PATCH 079/251] feat: Add tool calling support in ollama_dart (#504) --- .../src/chat_models/chat_ollama/mappers.dart | 2 +- packages/ollama_dart/README.md | 99 +- .../example/ollama_dart_example.dart | 80 +- .../generate_chat_completion_request.dart | 7 +- .../generate_chat_completion_response.dart | 8 +- .../lib/src/generated/schema/message.dart | 14 +- .../lib/src/generated/schema/schema.dart | 6 + .../src/generated/schema/schema.freezed.dart | 988 ++++++++++++++++-- .../lib/src/generated/schema/schema.g.dart | 106 +- .../lib/src/generated/schema/tool.dart | 53 + .../lib/src/generated/schema/tool_call.dart | 40 + .../generated/schema/tool_call_function.dart | 44 + .../schema/tool_call_function_args.dart | 12 + .../src/generated/schema/tool_function.dart | 52 + .../schema/tool_function_params.dart | 12 + packages/ollama_dart/oas/ollama-curated.yaml | 72 +- packages/ollama_dart/pubspec.yaml | 2 +- .../test/ollama_dart_chat_test.dart | 69 +- 18 files changed, 1540 insertions(+), 126 deletions(-) create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_function.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart index 0553fb88..d8b31e61 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -101,7 +101,7 @@ extension ChatResultMapper on GenerateChatCompletionResponse { return ChatResult( id: id, output: AIChatMessage( - content: message?.content ?? '', + content: message.content, ), finishReason: _mapFinishReason(doneReason), metadata: { diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index cf822953..46ad88a3 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -17,17 +17,24 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. **Supported endpoints:** - Completions (with streaming support) -- Chat completions +- Chat completions (with streaming and tool calling support) - Embeddings - Models - Blobs +- Version ## Table of contents - [Usage](#usage) * [Completions](#completions) + + [Generate completion](#generate-completion) + + [Stream completion](#stream-completion) * [Chat completions](#chat-completions) + + [Generate chat completion](#generate-chat-completion) + + [Stream chat completion](#stream-chat-completion) + + [Tool calling](#tool-calling) * [Embeddings](#embeddings) + + [Generate embedding](#generate-embedding) * [Models](#models) + [Create model](#create-model) + [List models](#list-models) @@ -54,7 +61,7 @@ Refer to the [documentation](https://github.com/jmorganca/ollama/blob/main/docs/ Given a prompt, the model will generate a response. -**Generate completion:** +#### Generate completion ```dart final generated = await client.generateCompletion( @@ -67,7 +74,7 @@ print(generated.response); // The sky appears blue because of a phenomenon called Rayleigh scattering... ``` -**Stream completion:** +#### Stream completion ```dart final stream = client.generateCompletionStream( @@ -88,7 +95,7 @@ print(text); Given a prompt, the model will generate a response in a chat format. -**Generate chat completion:** +#### Generate chat completion ```dart final res = await client.generateChatCompletion( @@ -111,7 +118,7 @@ print(res); // Message(role: MessageRole.assistant, content: 123456789) ``` -**Stream chat completion:** +#### Stream chat completion ```dart final stream = client.generateChatCompletionStream( @@ -139,11 +146,91 @@ print(text); // 123456789 ``` +#### Tool calling + +Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema, that you can then use to call the tools in your code and return the result back to the model to complete the conversation. + +**Notes:** +- Tool calling requires Ollama 0.2.8 or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). + +```dart +const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), +); + +const userMsg = Message( + role: MessageRole.user, + content: 'What’s the weather like in Barcelona in celsius?', +); + +final res1 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [userMsg], + tools: [tool], + ), +); + +print(res1.message.toolCalls); +// [ +// ToolCall( +// function: +// ToolCallFunction( +// name: get_current_weather, +// arguments: { +// location: Barcelona, ES, +// unit: celsius +// } +// ) +// ) +// ] + +// Call your tool here. For this example, we'll just mock the response. +const toolResult = '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; + +// Submit the response of the tool call to the model +final res2 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [ + userMsg, + res1.message, + Message( + role: MessageRole.tool, + content: toolResult, + ), + ], + ), +); +print(res2.message.content); +// The current weather in Barcelona is 20°C. +``` + ### Embeddings Given a prompt, the model will generate an embedding representing the prompt. -**Generate embedding:** +#### Generate embedding ```dart final generated = await client.generateEmbedding( diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index fab5f712..e5d11e3c 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -11,6 +11,7 @@ Future main() async { await _generateChatCompletion(client); await _generateChatCompletionWithHistory(client); await _generateChatCompletionStream(client); + await _generateChatToolCalling(client); // Embeddings await _generateEmbedding(client); @@ -86,7 +87,7 @@ Future _generateChatCompletion(final OllamaClient client) async { ], ), ); - print(generated.message?.content); + print(generated.message.content); } Future _generateChatCompletionWithHistory( @@ -111,7 +112,7 @@ Future _generateChatCompletionWithHistory( ], ), ); - print(generated.message?.content); + print(generated.message.content); } Future _generateChatCompletionStream(final OllamaClient client) async { @@ -132,11 +133,84 @@ Future _generateChatCompletionStream(final OllamaClient client) async { ); String text = ''; await for (final res in stream) { - text += (res.message?.content ?? '').trim(); + text += res.message.content.trim(); } print(text); } +Future _generateChatToolCalling(final OllamaClient client) async { + const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), + ); + + const userMsg = Message( + role: MessageRole.user, + content: 'What’s the weather like in Barcelona in celsius?', + ); + + final res1 = await client.generateChatCompletion( + request: const GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [userMsg], + tools: [tool], + keepAlive: 1, + ), + ); + + print(res1.message.toolCalls); + // [ + // ToolCall( + // function: + // ToolCallFunction( + // name: get_current_weather, + // arguments: { + // location: Barcelona, ES, + // unit: celsius + // } + // ) + // ) + // ] + + // Call your tool here. For this example, we'll just mock the response. + const toolResult = + '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; + + // Submit the response of the tool call to the model + final res2 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.1', + messages: [ + userMsg, + res1.message, + const Message( + role: MessageRole.tool, + content: toolResult, + ), + ], + ), + ); + print(res2.message.content); + // The current weather in Barcelona is 20°C. +} + Future _generateEmbedding(final OllamaClient client) async { final generated = await client.generateEmbedding( request: const GenerateEmbeddingRequest( diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart index 491efa66..fe47da47 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart @@ -47,6 +47,9 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { /// - If set to 0, the model will be unloaded immediately once finished. /// - If not set, the model will stay loaded for 5 minutes by default @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) List? tools, }) = _GenerateChatCompletionRequest; /// Object construction from a JSON representation @@ -60,7 +63,8 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'format', 'options', 'stream', - 'keep_alive' + 'keep_alive', + 'tools' ]; /// Perform validations on the schema property values @@ -77,6 +81,7 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'options': options, 'stream': stream, 'keep_alive': keepAlive, + 'tools': tools, }; } } diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart index faf7462a..d7857fd4 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart @@ -16,18 +16,18 @@ class GenerateChatCompletionResponse with _$GenerateChatCompletionResponse { /// Factory constructor for GenerateChatCompletionResponse const factory GenerateChatCompletionResponse({ /// A message in the chat endpoint - @JsonKey(includeIfNull: false) Message? message, + required Message message, /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) String? model, + required String model, /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, + @JsonKey(name: 'created_at') required String createdAt, /// Whether the response has completed. - @JsonKey(includeIfNull: false) bool? done, + required bool done, /// Reason why the model is done generating a response. @JsonKey( diff --git a/packages/ollama_dart/lib/src/generated/schema/message.dart b/packages/ollama_dart/lib/src/generated/schema/message.dart index 362e2349..add48dc2 100644 --- a/packages/ollama_dart/lib/src/generated/schema/message.dart +++ b/packages/ollama_dart/lib/src/generated/schema/message.dart @@ -23,6 +23,10 @@ class Message with _$Message { /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, + + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, }) = _Message; /// Object construction from a JSON representation @@ -30,7 +34,12 @@ class Message with _$Message { _$MessageFromJson(json); /// List of all property names of schema - static const List propertyNames = ['role', 'content', 'images']; + static const List propertyNames = [ + 'role', + 'content', + 'images', + 'tool_calls' + ]; /// Perform validations on the schema property values String? validateSchema() { @@ -43,6 +52,7 @@ class Message with _$Message { 'role': role, 'content': content, 'images': images, + 'tool_calls': toolCalls, }; } } @@ -59,4 +69,6 @@ enum MessageRole { user, @JsonValue('assistant') assistant, + @JsonValue('tool') + tool, } diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index 5ed7214c..f951912a 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -19,6 +19,12 @@ part 'generate_chat_completion_request.dart'; part 'generate_chat_completion_response.dart'; part 'done_reason.dart'; part 'message.dart'; +part 'tool.dart'; +part 'tool_function.dart'; +part 'tool_function_params.dart'; +part 'tool_call.dart'; +part 'tool_call_function.dart'; +part 'tool_call_function_args.dart'; part 'generate_embedding_request.dart'; part 'generate_embedding_response.dart'; part 'create_model_request.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index 9b6d2f8f..b9128995 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -2363,6 +2363,10 @@ mixin _$GenerateChatCompletionRequest { @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive => throw _privateConstructorUsedError; + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) + List? get tools => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $GenerateChatCompletionRequestCopyWith @@ -2386,7 +2390,8 @@ abstract class $GenerateChatCompletionRequestCopyWith<$Res> { ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + @JsonKey(includeIfNull: false) List? tools}); $RequestOptionsCopyWith<$Res>? get options; } @@ -2411,6 +2416,7 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, + Object? tools = freezed, }) { return _then(_value.copyWith( model: null == model @@ -2437,6 +2443,10 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, + tools: freezed == tools + ? _value.tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } @@ -2471,7 +2481,8 @@ abstract class _$$GenerateChatCompletionRequestImplCopyWith<$Res> ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + @JsonKey(includeIfNull: false) List? tools}); @override $RequestOptionsCopyWith<$Res>? get options; @@ -2496,6 +2507,7 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, + Object? tools = freezed, }) { return _then(_$GenerateChatCompletionRequestImpl( model: null == model @@ -2522,6 +2534,10 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, + tools: freezed == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -2539,8 +2555,10 @@ class _$GenerateChatCompletionRequestImpl this.format, @JsonKey(includeIfNull: false) this.options, this.stream = false, - @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) + @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive, + @JsonKey(includeIfNull: false) final List? tools}) : _messages = messages, + _tools = tools, super._(); factory _$GenerateChatCompletionRequestImpl.fromJson( @@ -2594,9 +2612,23 @@ class _$GenerateChatCompletionRequestImpl @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive; + /// A list of tools the model may call. + final List? _tools; + + /// A list of tools the model may call. + @override + @JsonKey(includeIfNull: false) + List? get tools { + final value = _tools; + if (value == null) return null; + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive, tools: $tools)'; } @override @@ -2610,7 +2642,8 @@ class _$GenerateChatCompletionRequestImpl (identical(other.options, options) || other.options == options) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.keepAlive, keepAlive) || - other.keepAlive == keepAlive)); + other.keepAlive == keepAlive) && + const DeepCollectionEquality().equals(other._tools, _tools)); } @JsonKey(ignore: true) @@ -2622,7 +2655,8 @@ class _$GenerateChatCompletionRequestImpl format, options, stream, - keepAlive); + keepAlive, + const DeepCollectionEquality().hash(_tools)); @JsonKey(ignore: true) @override @@ -2651,8 +2685,9 @@ abstract class _GenerateChatCompletionRequest final ResponseFormat? format, @JsonKey(includeIfNull: false) final RequestOptions? options, final bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive}) = _$GenerateChatCompletionRequestImpl; + @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive, + @JsonKey(includeIfNull: false) + final List? tools}) = _$GenerateChatCompletionRequestImpl; const _GenerateChatCompletionRequest._() : super._(); factory _GenerateChatCompletionRequest.fromJson(Map json) = @@ -2698,6 +2733,11 @@ abstract class _GenerateChatCompletionRequest @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive; @override + + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) + List? get tools; + @override @JsonKey(ignore: true) _$$GenerateChatCompletionRequestImplCopyWith< _$GenerateChatCompletionRequestImpl> @@ -2712,22 +2752,19 @@ GenerateChatCompletionResponse _$GenerateChatCompletionResponseFromJson( /// @nodoc mixin _$GenerateChatCompletionResponse { /// A message in the chat endpoint - @JsonKey(includeIfNull: false) - Message? get message => throw _privateConstructorUsedError; + Message get message => throw _privateConstructorUsedError; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; + String get model => throw _privateConstructorUsedError; /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) - String? get createdAt => throw _privateConstructorUsedError; + @JsonKey(name: 'created_at') + String get createdAt => throw _privateConstructorUsedError; /// Whether the response has completed. - @JsonKey(includeIfNull: false) - bool? get done => throw _privateConstructorUsedError; + bool get done => throw _privateConstructorUsedError; /// Reason why the model is done generating a response. @JsonKey( @@ -2775,10 +2812,10 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { GenerateChatCompletionResponse>; @useResult $Res call( - {@JsonKey(includeIfNull: false) Message? message, - @JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, - @JsonKey(includeIfNull: false) bool? done, + {Message message, + String model, + @JsonKey(name: 'created_at') String createdAt, + bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2793,7 +2830,7 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { @JsonKey(name: 'eval_count', includeIfNull: false) int? evalCount, @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); - $MessageCopyWith<$Res>? get message; + $MessageCopyWith<$Res> get message; } /// @nodoc @@ -2810,10 +2847,10 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ - Object? message = freezed, - Object? model = freezed, - Object? createdAt = freezed, - Object? done = freezed, + Object? message = null, + Object? model = null, + Object? createdAt = null, + Object? done = null, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2823,22 +2860,22 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, Object? evalDuration = freezed, }) { return _then(_value.copyWith( - message: freezed == message + message: null == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message?, - model: freezed == model + as Message, + model: null == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: freezed == createdAt + as String, + createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String?, - done: freezed == done + as String, + done: null == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool?, + as bool, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2872,12 +2909,8 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @override @pragma('vm:prefer-inline') - $MessageCopyWith<$Res>? get message { - if (_value.message == null) { - return null; - } - - return $MessageCopyWith<$Res>(_value.message!, (value) { + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { return _then(_value.copyWith(message: value) as $Val); }); } @@ -2893,10 +2926,10 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) Message? message, - @JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, - @JsonKey(includeIfNull: false) bool? done, + {Message message, + String model, + @JsonKey(name: 'created_at') String createdAt, + bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2912,7 +2945,7 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); @override - $MessageCopyWith<$Res>? get message; + $MessageCopyWith<$Res> get message; } /// @nodoc @@ -2928,10 +2961,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> @pragma('vm:prefer-inline') @override $Res call({ - Object? message = freezed, - Object? model = freezed, - Object? createdAt = freezed, - Object? done = freezed, + Object? message = null, + Object? model = null, + Object? createdAt = null, + Object? done = null, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2941,22 +2974,22 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> Object? evalDuration = freezed, }) { return _then(_$GenerateChatCompletionResponseImpl( - message: freezed == message + message: null == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message?, - model: freezed == model + as Message, + model: null == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: freezed == createdAt + as String, + createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String?, - done: freezed == done + as String, + done: null == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool?, + as bool, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2994,10 +3027,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> class _$GenerateChatCompletionResponseImpl extends _GenerateChatCompletionResponse { const _$GenerateChatCompletionResponseImpl( - {@JsonKey(includeIfNull: false) this.message, - @JsonKey(includeIfNull: false) this.model, - @JsonKey(name: 'created_at', includeIfNull: false) this.createdAt, - @JsonKey(includeIfNull: false) this.done, + {required this.message, + required this.model, + @JsonKey(name: 'created_at') required this.createdAt, + required this.done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -3019,25 +3052,22 @@ class _$GenerateChatCompletionResponseImpl /// A message in the chat endpoint @override - @JsonKey(includeIfNull: false) - final Message? message; + final Message message; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. @override - @JsonKey(includeIfNull: false) - final String? model; + final String model; /// Date on which a model was created. @override - @JsonKey(name: 'created_at', includeIfNull: false) - final String? createdAt; + @JsonKey(name: 'created_at') + final String createdAt; /// Whether the response has completed. @override - @JsonKey(includeIfNull: false) - final bool? done; + final bool done; /// Reason why the model is done generating a response. @override @@ -3143,11 +3173,10 @@ class _$GenerateChatCompletionResponseImpl abstract class _GenerateChatCompletionResponse extends GenerateChatCompletionResponse { const factory _GenerateChatCompletionResponse( - {@JsonKey(includeIfNull: false) final Message? message, - @JsonKey(includeIfNull: false) final String? model, - @JsonKey(name: 'created_at', includeIfNull: false) - final String? createdAt, - @JsonKey(includeIfNull: false) final bool? done, + {required final Message message, + required final String model, + @JsonKey(name: 'created_at') required final String createdAt, + required final bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -3172,25 +3201,22 @@ abstract class _GenerateChatCompletionResponse @override /// A message in the chat endpoint - @JsonKey(includeIfNull: false) - Message? get message; + Message get message; @override /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model; + String get model; @override /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) - String? get createdAt; + @JsonKey(name: 'created_at') + String get createdAt; @override /// Whether the response has completed. - @JsonKey(includeIfNull: false) - bool? get done; + bool get done; @override /// Reason why the model is done generating a response. @@ -3252,6 +3278,10 @@ mixin _$Message { @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $MessageCopyWith get copyWith => throw _privateConstructorUsedError; @@ -3265,7 +3295,9 @@ abstract class $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images}); + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc @@ -3284,6 +3316,7 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> Object? role = null, Object? content = null, Object? images = freezed, + Object? toolCalls = freezed, }) { return _then(_value.copyWith( role: null == role @@ -3298,6 +3331,10 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> ? _value.images : images // ignore: cast_nullable_to_non_nullable as List?, + toolCalls: freezed == toolCalls + ? _value.toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -3312,7 +3349,9 @@ abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images}); + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc @@ -3329,6 +3368,7 @@ class __$$MessageImplCopyWithImpl<$Res> Object? role = null, Object? content = null, Object? images = freezed, + Object? toolCalls = freezed, }) { return _then(_$MessageImpl( role: null == role @@ -3343,6 +3383,10 @@ class __$$MessageImplCopyWithImpl<$Res> ? _value._images : images // ignore: cast_nullable_to_non_nullable as List?, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -3353,8 +3397,11 @@ class _$MessageImpl extends _Message { const _$MessageImpl( {required this.role, required this.content, - @JsonKey(includeIfNull: false) final List? images}) + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) : _images = images, + _toolCalls = toolCalls, super._(); factory _$MessageImpl.fromJson(Map json) => @@ -3382,9 +3429,23 @@ class _$MessageImpl extends _Message { return EqualUnmodifiableListView(value); } + /// A list of tools the model wants to call. + final List? _toolCalls; + + /// A list of tools the model wants to call. + @override + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'Message(role: $role, content: $content, images: $images)'; + return 'Message(role: $role, content: $content, images: $images, toolCalls: $toolCalls)'; } @override @@ -3394,13 +3455,19 @@ class _$MessageImpl extends _Message { other is _$MessageImpl && (identical(other.role, role) || other.role == role) && (identical(other.content, content) || other.content == content) && - const DeepCollectionEquality().equals(other._images, _images)); + const DeepCollectionEquality().equals(other._images, _images) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, role, content, const DeepCollectionEquality().hash(_images)); + runtimeType, + role, + content, + const DeepCollectionEquality().hash(_images), + const DeepCollectionEquality().hash(_toolCalls)); @JsonKey(ignore: true) @override @@ -3418,10 +3485,11 @@ class _$MessageImpl extends _Message { abstract class _Message extends Message { const factory _Message( - {required final MessageRole role, - required final String content, - @JsonKey(includeIfNull: false) final List? images}) = - _$MessageImpl; + {required final MessageRole role, + required final String content, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) = _$MessageImpl; const _Message._() : super._(); factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; @@ -3440,11 +3508,749 @@ abstract class _Message extends Message { @JsonKey(includeIfNull: false) List? get images; @override + + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; + @override @JsonKey(ignore: true) _$$MessageImplCopyWith<_$MessageImpl> get copyWith => throw _privateConstructorUsedError; } +Tool _$ToolFromJson(Map json) { + return _Tool.fromJson(json); +} + +/// @nodoc +mixin _$Tool { + /// The type of tool. + ToolType get type => throw _privateConstructorUsedError; + + /// A function that the model may call. + @JsonKey(includeIfNull: false) + ToolFunction? get function => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCopyWith<$Res> { + factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = + _$ToolCopyWithImpl<$Res, Tool>; + @useResult + $Res call( + {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); + + $ToolFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class _$ToolCopyWithImpl<$Res, $Val extends Tool> + implements $ToolCopyWith<$Res> { + _$ToolCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? function = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolType, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolFunction?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ToolFunctionCopyWith<$Res>? get function { + if (_value.function == null) { + return null; + } + + return $ToolFunctionCopyWith<$Res>(_value.function!, (value) { + return _then(_value.copyWith(function: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { + factory _$$ToolImplCopyWith( + _$ToolImpl value, $Res Function(_$ToolImpl) then) = + __$$ToolImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); + + @override + $ToolFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class __$$ToolImplCopyWithImpl<$Res> + extends _$ToolCopyWithImpl<$Res, _$ToolImpl> + implements _$$ToolImplCopyWith<$Res> { + __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? function = freezed, + }) { + return _then(_$ToolImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolType, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolFunction?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolImpl extends _Tool { + const _$ToolImpl( + {this.type = ToolType.function, + @JsonKey(includeIfNull: false) this.function}) + : super._(); + + factory _$ToolImpl.fromJson(Map json) => + _$$ToolImplFromJson(json); + + /// The type of tool. + @override + @JsonKey() + final ToolType type; + + /// A function that the model may call. + @override + @JsonKey(includeIfNull: false) + final ToolFunction? function; + + @override + String toString() { + return 'Tool(type: $type, function: $function)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.function, function) || + other.function == function)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, function); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolImplToJson( + this, + ); + } +} + +abstract class _Tool extends Tool { + const factory _Tool( + {final ToolType type, + @JsonKey(includeIfNull: false) final ToolFunction? function}) = + _$ToolImpl; + const _Tool._() : super._(); + + factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; + + @override + + /// The type of tool. + ToolType get type; + @override + + /// A function that the model may call. + @JsonKey(includeIfNull: false) + ToolFunction? get function; + @override + @JsonKey(ignore: true) + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolFunction _$ToolFunctionFromJson(Map json) { + return _ToolFunction.fromJson(json); +} + +/// @nodoc +mixin _$ToolFunction { + /// The name of the function to be called. + String get name => throw _privateConstructorUsedError; + + /// A description of what the function does, used by the model to choose when and how to call the function. + String get description => throw _privateConstructorUsedError; + + /// The parameters the functions accepts, described as a JSON Schema object. + Map get parameters => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolFunctionCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolFunctionCopyWith<$Res> { + factory $ToolFunctionCopyWith( + ToolFunction value, $Res Function(ToolFunction) then) = + _$ToolFunctionCopyWithImpl<$Res, ToolFunction>; + @useResult + $Res call({String name, String description, Map parameters}); +} + +/// @nodoc +class _$ToolFunctionCopyWithImpl<$Res, $Val extends ToolFunction> + implements $ToolFunctionCopyWith<$Res> { + _$ToolFunctionCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = null, + Object? parameters = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: null == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String, + parameters: null == parameters + ? _value.parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolFunctionImplCopyWith<$Res> + implements $ToolFunctionCopyWith<$Res> { + factory _$$ToolFunctionImplCopyWith( + _$ToolFunctionImpl value, $Res Function(_$ToolFunctionImpl) then) = + __$$ToolFunctionImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String name, String description, Map parameters}); +} + +/// @nodoc +class __$$ToolFunctionImplCopyWithImpl<$Res> + extends _$ToolFunctionCopyWithImpl<$Res, _$ToolFunctionImpl> + implements _$$ToolFunctionImplCopyWith<$Res> { + __$$ToolFunctionImplCopyWithImpl( + _$ToolFunctionImpl _value, $Res Function(_$ToolFunctionImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = null, + Object? parameters = null, + }) { + return _then(_$ToolFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: null == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String, + parameters: null == parameters + ? _value._parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolFunctionImpl extends _ToolFunction { + const _$ToolFunctionImpl( + {required this.name, + required this.description, + required final Map parameters}) + : _parameters = parameters, + super._(); + + factory _$ToolFunctionImpl.fromJson(Map json) => + _$$ToolFunctionImplFromJson(json); + + /// The name of the function to be called. + @override + final String name; + + /// A description of what the function does, used by the model to choose when and how to call the function. + @override + final String description; + + /// The parameters the functions accepts, described as a JSON Schema object. + final Map _parameters; + + /// The parameters the functions accepts, described as a JSON Schema object. + @override + Map get parameters { + if (_parameters is EqualUnmodifiableMapView) return _parameters; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_parameters); + } + + @override + String toString() { + return 'ToolFunction(name: $name, description: $description, parameters: $parameters)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolFunctionImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality() + .equals(other._parameters, _parameters)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_parameters)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => + __$$ToolFunctionImplCopyWithImpl<_$ToolFunctionImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolFunctionImplToJson( + this, + ); + } +} + +abstract class _ToolFunction extends ToolFunction { + const factory _ToolFunction( + {required final String name, + required final String description, + required final Map parameters}) = _$ToolFunctionImpl; + const _ToolFunction._() : super._(); + + factory _ToolFunction.fromJson(Map json) = + _$ToolFunctionImpl.fromJson; + + @override + + /// The name of the function to be called. + String get name; + @override + + /// A description of what the function does, used by the model to choose when and how to call the function. + String get description; + @override + + /// The parameters the functions accepts, described as a JSON Schema object. + Map get parameters; + @override + @JsonKey(ignore: true) + _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolCall _$ToolCallFromJson(Map json) { + return _ToolCall.fromJson(json); +} + +/// @nodoc +mixin _$ToolCall { + /// The function the model wants to call. + @JsonKey(includeIfNull: false) + ToolCallFunction? get function => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCallCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCallCopyWith<$Res> { + factory $ToolCallCopyWith(ToolCall value, $Res Function(ToolCall) then) = + _$ToolCallCopyWithImpl<$Res, ToolCall>; + @useResult + $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); + + $ToolCallFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class _$ToolCallCopyWithImpl<$Res, $Val extends ToolCall> + implements $ToolCallCopyWith<$Res> { + _$ToolCallCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? function = freezed, + }) { + return _then(_value.copyWith( + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolCallFunction?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ToolCallFunctionCopyWith<$Res>? get function { + if (_value.function == null) { + return null; + } + + return $ToolCallFunctionCopyWith<$Res>(_value.function!, (value) { + return _then(_value.copyWith(function: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ToolCallImplCopyWith<$Res> + implements $ToolCallCopyWith<$Res> { + factory _$$ToolCallImplCopyWith( + _$ToolCallImpl value, $Res Function(_$ToolCallImpl) then) = + __$$ToolCallImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); + + @override + $ToolCallFunctionCopyWith<$Res>? get function; +} + +/// @nodoc +class __$$ToolCallImplCopyWithImpl<$Res> + extends _$ToolCallCopyWithImpl<$Res, _$ToolCallImpl> + implements _$$ToolCallImplCopyWith<$Res> { + __$$ToolCallImplCopyWithImpl( + _$ToolCallImpl _value, $Res Function(_$ToolCallImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? function = freezed, + }) { + return _then(_$ToolCallImpl( + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolCallFunction?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolCallImpl extends _ToolCall { + const _$ToolCallImpl({@JsonKey(includeIfNull: false) this.function}) + : super._(); + + factory _$ToolCallImpl.fromJson(Map json) => + _$$ToolCallImplFromJson(json); + + /// The function the model wants to call. + @override + @JsonKey(includeIfNull: false) + final ToolCallFunction? function; + + @override + String toString() { + return 'ToolCall(function: $function)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolCallImpl && + (identical(other.function, function) || + other.function == function)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, function); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => + __$$ToolCallImplCopyWithImpl<_$ToolCallImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolCallImplToJson( + this, + ); + } +} + +abstract class _ToolCall extends ToolCall { + const factory _ToolCall( + {@JsonKey(includeIfNull: false) final ToolCallFunction? function}) = + _$ToolCallImpl; + const _ToolCall._() : super._(); + + factory _ToolCall.fromJson(Map json) = + _$ToolCallImpl.fromJson; + + @override + + /// The function the model wants to call. + @JsonKey(includeIfNull: false) + ToolCallFunction? get function; + @override + @JsonKey(ignore: true) + _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolCallFunction _$ToolCallFunctionFromJson(Map json) { + return _ToolCallFunction.fromJson(json); +} + +/// @nodoc +mixin _$ToolCallFunction { + /// The name of the function to be called. + String get name => throw _privateConstructorUsedError; + + /// The arguments to pass to the function. + Map get arguments => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCallFunctionCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCallFunctionCopyWith<$Res> { + factory $ToolCallFunctionCopyWith( + ToolCallFunction value, $Res Function(ToolCallFunction) then) = + _$ToolCallFunctionCopyWithImpl<$Res, ToolCallFunction>; + @useResult + $Res call({String name, Map arguments}); +} + +/// @nodoc +class _$ToolCallFunctionCopyWithImpl<$Res, $Val extends ToolCallFunction> + implements $ToolCallFunctionCopyWith<$Res> { + _$ToolCallFunctionCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? arguments = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolCallFunctionImplCopyWith<$Res> + implements $ToolCallFunctionCopyWith<$Res> { + factory _$$ToolCallFunctionImplCopyWith(_$ToolCallFunctionImpl value, + $Res Function(_$ToolCallFunctionImpl) then) = + __$$ToolCallFunctionImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String name, Map arguments}); +} + +/// @nodoc +class __$$ToolCallFunctionImplCopyWithImpl<$Res> + extends _$ToolCallFunctionCopyWithImpl<$Res, _$ToolCallFunctionImpl> + implements _$$ToolCallFunctionImplCopyWith<$Res> { + __$$ToolCallFunctionImplCopyWithImpl(_$ToolCallFunctionImpl _value, + $Res Function(_$ToolCallFunctionImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? arguments = null, + }) { + return _then(_$ToolCallFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value._arguments + : arguments // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolCallFunctionImpl extends _ToolCallFunction { + const _$ToolCallFunctionImpl( + {required this.name, required final Map arguments}) + : _arguments = arguments, + super._(); + + factory _$ToolCallFunctionImpl.fromJson(Map json) => + _$$ToolCallFunctionImplFromJson(json); + + /// The name of the function to be called. + @override + final String name; + + /// The arguments to pass to the function. + final Map _arguments; + + /// The arguments to pass to the function. + @override + Map get arguments { + if (_arguments is EqualUnmodifiableMapView) return _arguments; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_arguments); + } + + @override + String toString() { + return 'ToolCallFunction(name: $name, arguments: $arguments)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolCallFunctionImpl && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality() + .equals(other._arguments, _arguments)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, name, const DeepCollectionEquality().hash(_arguments)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => + __$$ToolCallFunctionImplCopyWithImpl<_$ToolCallFunctionImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ToolCallFunctionImplToJson( + this, + ); + } +} + +abstract class _ToolCallFunction extends ToolCallFunction { + const factory _ToolCallFunction( + {required final String name, + required final Map arguments}) = _$ToolCallFunctionImpl; + const _ToolCallFunction._() : super._(); + + factory _ToolCallFunction.fromJson(Map json) = + _$ToolCallFunctionImpl.fromJson; + + @override + + /// The name of the function to be called. + String get name; + @override + + /// The arguments to pass to the function. + Map get arguments; + @override + @JsonKey(ignore: true) + _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => + throw _privateConstructorUsedError; +} + GenerateEmbeddingRequest _$GenerateEmbeddingRequestFromJson( Map json) { return _GenerateEmbeddingRequest.fromJson(json); diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index e5d46d53..fbf45bc0 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -211,6 +211,9 @@ _$GenerateChatCompletionRequestImpl json['options'] as Map), stream: json['stream'] as bool? ?? false, keepAlive: json['keep_alive'] as int?, + tools: (json['tools'] as List?) + ?.map((e) => Tool.fromJson(e as Map)) + .toList(), ); Map _$$GenerateChatCompletionRequestImplToJson( @@ -230,18 +233,17 @@ Map _$$GenerateChatCompletionRequestImplToJson( writeNotNull('options', instance.options?.toJson()); val['stream'] = instance.stream; writeNotNull('keep_alive', instance.keepAlive); + writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); return val; } _$GenerateChatCompletionResponseImpl _$$GenerateChatCompletionResponseImplFromJson(Map json) => _$GenerateChatCompletionResponseImpl( - message: json['message'] == null - ? null - : Message.fromJson(json['message'] as Map), - model: json['model'] as String?, - createdAt: json['created_at'] as String?, - done: json['done'] as bool?, + message: Message.fromJson(json['message'] as Map), + model: json['model'] as String, + createdAt: json['created_at'] as String, + done: json['done'] as bool, doneReason: $enumDecodeNullable( _$DoneReasonEnumMap, json['done_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -255,7 +257,12 @@ _$GenerateChatCompletionResponseImpl Map _$$GenerateChatCompletionResponseImplToJson( _$GenerateChatCompletionResponseImpl instance) { - final val = {}; + final val = { + 'message': instance.message.toJson(), + 'model': instance.model, + 'created_at': instance.createdAt, + 'done': instance.done, + }; void writeNotNull(String key, dynamic value) { if (value != null) { @@ -263,10 +270,6 @@ Map _$$GenerateChatCompletionResponseImplToJson( } } - writeNotNull('message', instance.message?.toJson()); - writeNotNull('model', instance.model); - writeNotNull('created_at', instance.createdAt); - writeNotNull('done', instance.done); writeNotNull('done_reason', _$DoneReasonEnumMap[instance.doneReason]); writeNotNull('total_duration', instance.totalDuration); writeNotNull('load_duration', instance.loadDuration); @@ -289,6 +292,9 @@ _$MessageImpl _$$MessageImplFromJson(Map json) => content: json['content'] as String, images: (json['images'] as List?)?.map((e) => e as String).toList(), + toolCalls: (json['tool_calls'] as List?) + ?.map((e) => ToolCall.fromJson(e as Map)) + .toList(), ); Map _$$MessageImplToJson(_$MessageImpl instance) { @@ -304,6 +310,8 @@ Map _$$MessageImplToJson(_$MessageImpl instance) { } writeNotNull('images', instance.images); + writeNotNull( + 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); return val; } @@ -311,8 +319,84 @@ const _$MessageRoleEnumMap = { MessageRole.system: 'system', MessageRole.user: 'user', MessageRole.assistant: 'assistant', + MessageRole.tool: 'tool', +}; + +_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( + type: $enumDecodeNullable(_$ToolTypeEnumMap, json['type']) ?? + ToolType.function, + function: json['function'] == null + ? null + : ToolFunction.fromJson(json['function'] as Map), + ); + +Map _$$ToolImplToJson(_$ToolImpl instance) { + final val = { + 'type': _$ToolTypeEnumMap[instance.type]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('function', instance.function?.toJson()); + return val; +} + +const _$ToolTypeEnumMap = { + ToolType.function: 'function', }; +_$ToolFunctionImpl _$$ToolFunctionImplFromJson(Map json) => + _$ToolFunctionImpl( + name: json['name'] as String, + description: json['description'] as String, + parameters: json['parameters'] as Map, + ); + +Map _$$ToolFunctionImplToJson(_$ToolFunctionImpl instance) => + { + 'name': instance.name, + 'description': instance.description, + 'parameters': instance.parameters, + }; + +_$ToolCallImpl _$$ToolCallImplFromJson(Map json) => + _$ToolCallImpl( + function: json['function'] == null + ? null + : ToolCallFunction.fromJson(json['function'] as Map), + ); + +Map _$$ToolCallImplToJson(_$ToolCallImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('function', instance.function?.toJson()); + return val; +} + +_$ToolCallFunctionImpl _$$ToolCallFunctionImplFromJson( + Map json) => + _$ToolCallFunctionImpl( + name: json['name'] as String, + arguments: json['arguments'] as Map, + ); + +Map _$$ToolCallFunctionImplToJson( + _$ToolCallFunctionImpl instance) => + { + 'name': instance.name, + 'arguments': instance.arguments, + }; + _$GenerateEmbeddingRequestImpl _$$GenerateEmbeddingRequestImplFromJson( Map json) => _$GenerateEmbeddingRequestImpl( diff --git a/packages/ollama_dart/lib/src/generated/schema/tool.dart b/packages/ollama_dart/lib/src/generated/schema/tool.dart new file mode 100644 index 00000000..4a225d1a --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool.dart @@ -0,0 +1,53 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: Tool +// ========================================== + +/// A tool the model may call. +@freezed +class Tool with _$Tool { + const Tool._(); + + /// Factory constructor for Tool + const factory Tool({ + /// The type of tool. + @Default(ToolType.function) ToolType type, + + /// A function that the model may call. + @JsonKey(includeIfNull: false) ToolFunction? function, + }) = _Tool; + + /// Object construction from a JSON representation + factory Tool.fromJson(Map json) => _$ToolFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'function']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'function': function, + }; + } +} + +// ========================================== +// ENUM: ToolType +// ========================================== + +/// The type of tool. +enum ToolType { + @JsonValue('function') + function, +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart new file mode 100644 index 00000000..ec1d82e0 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolCall +// ========================================== + +/// The tool the model wants to call. +@freezed +class ToolCall with _$ToolCall { + const ToolCall._(); + + /// Factory constructor for ToolCall + const factory ToolCall({ + /// The function the model wants to call. + @JsonKey(includeIfNull: false) ToolCallFunction? function, + }) = _ToolCall; + + /// Object construction from a JSON representation + factory ToolCall.fromJson(Map json) => + _$ToolCallFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['function']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'function': function, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart new file mode 100644 index 00000000..4d5e969c --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolCallFunction +// ========================================== + +/// The function the model wants to call. +@freezed +class ToolCallFunction with _$ToolCallFunction { + const ToolCallFunction._(); + + /// Factory constructor for ToolCallFunction + const factory ToolCallFunction({ + /// The name of the function to be called. + required String name, + + /// The arguments to pass to the function. + required ToolCallFunctionArgs arguments, + }) = _ToolCallFunction; + + /// Object construction from a JSON representation + factory ToolCallFunction.fromJson(Map json) => + _$ToolCallFunctionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['name', 'arguments']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'arguments': arguments, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart new file mode 100644 index 00000000..a1d7d7b8 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart @@ -0,0 +1,12 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// TYPE: ToolCallFunctionArgs +// ========================================== + +/// The arguments to pass to the function. +typedef ToolCallFunctionArgs = Map; diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart new file mode 100644 index 00000000..35d5e8f1 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart @@ -0,0 +1,52 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolFunction +// ========================================== + +/// A function that the model may call. +@freezed +class ToolFunction with _$ToolFunction { + const ToolFunction._(); + + /// Factory constructor for ToolFunction + const factory ToolFunction({ + /// The name of the function to be called. + required String name, + + /// A description of what the function does, used by the model to choose when and how to call the function. + required String description, + + /// The parameters the functions accepts, described as a JSON Schema object. + required ToolFunctionParams parameters, + }) = _ToolFunction; + + /// Object construction from a JSON representation + factory ToolFunction.fromJson(Map json) => + _$ToolFunctionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'parameters' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'parameters': parameters, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart new file mode 100644 index 00000000..89fa74fb --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart @@ -0,0 +1,12 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// TYPE: ToolFunctionParams +// ========================================== + +/// The parameters the functions accepts, described as a JSON Schema object. +typedef ToolFunctionParams = Map; diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 540b7141..a0a42067 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -602,6 +602,11 @@ components: description: *stream default: false keep_alive: *keep_alive + tools: + type: array + description: A list of tools the model may call. + items: + $ref: '#/components/schemas/Tool' required: - model - messages @@ -654,6 +659,11 @@ components: format: int64 description: Time in nanoseconds spent generating the response. example: 1325948000 + required: + - model + - created_at + - message + - done DoneReason: type: string description: Reason why the model is done generating a response. @@ -668,7 +678,7 @@ components: role: type: string description: The role of the message - enum: [ "system", "user", "assistant" ] + enum: [ "system", "user", "assistant", "tool" ] content: type: string description: The content of the message @@ -680,9 +690,69 @@ components: type: string description: Base64-encoded image (for multimodal models such as llava) example: iVBORw0KGgoAAAANSUhEUgAAAAkAAAANCAIAAAD0YtNRAAAABnRSTlMA/AD+APzoM1ogAAAAWklEQVR4AWP48+8PLkR7uUdzcMvtU8EhdykHKAciEXL3pvw5FQIURaBDJkARoDhY3zEXiCgCHbNBmAlUiyaBkENoxZSDWnOtBmoAQu7TnT+3WuDOA7KBIkAGAGwiNeqjusp/AAAAAElFTkSuQmCC + tool_calls: + type: array + description: A list of tools the model wants to call. + items: + $ref: '#/components/schemas/ToolCall' required: - role - content + Tool: + type: object + description: A tool the model may call. + properties: + type: + type: string + enum: + - function + default: function + description: The type of tool. + function: + $ref: '#/components/schemas/ToolFunction' + ToolFunction: + type: object + description: A function that the model may call. + properties: + name: + type: string + description: The name of the function to be called. + description: + type: string + description: | + A description of what the function does, used by the model to choose when and how to call the function. + parameters: + $ref: '#/components/schemas/ToolFunctionParams' + required: + - name + - description + - parameters + ToolFunctionParams: + type: object + description: The parameters the functions accepts, described as a JSON Schema object. + additionalProperties: true + ToolCall: + type: object + description: The tool the model wants to call. + properties: + function: + $ref: '#/components/schemas/ToolCallFunction' + ToolCallFunction: + type: object + description: The function the model wants to call. + properties: + name: + type: string + description: The name of the function to be called. + arguments: + $ref: '#/components/schemas/ToolCallFunctionArgs' + required: + - name + - arguments + ToolCallFunctionArgs: + type: object + description: The arguments to pass to the function. + additionalProperties: true GenerateEmbeddingRequest: description: Generate embeddings from a model. type: object diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 5da5caa2..c967f29e 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: ollama_dart -description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). +description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). version: 0.1.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index 807e1b67..3ed66209 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,7 +7,7 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'gemma2'; + const defaultModel = 'llama3.1'; const visionModel = 'llava'; setUp(() async { @@ -48,7 +48,7 @@ void main() { expect(response.model, defaultModel); expect(response.createdAt, isNotNull); expect( - response.message?.content, + response.message.content, isNotEmpty, ); expect(response.done, isTrue); @@ -79,7 +79,7 @@ void main() { ); String text = ''; await for (final res in stream) { - text += (res.message?.content ?? '').trim(); + text += res.message.content.trim(); } expect(text, contains('123456789')); }); @@ -103,7 +103,7 @@ void main() { format: ResponseFormat.json, ), ); - final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('[1,2,3,4,5,6,7,8,9]')); }); @@ -125,7 +125,7 @@ void main() { options: RequestOptions(stop: ['4']), ), ); - final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('123')); expect(generation, isNot(contains('456789'))); expect(res.doneReason, DoneReason.stop); @@ -170,8 +170,65 @@ void main() { ); final res1 = await client.generateChatCompletion(request: request); - final text1 = res1.message?.content; + final text1 = res1.message.content; expect(text1, contains('star')); }); + + test('Test tool calling', () async { + const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), + ); + + final res = await client.generateChatCompletion( + request: const GenerateChatCompletionRequest( + model: defaultModel, + messages: [ + Message( + role: MessageRole.system, + content: 'You are a helpful assistant.', + ), + Message( + role: MessageRole.user, + content: + 'What’s the weather like in Boston and Barcelona in celsius?', + ), + ], + tools: [tool], + keepAlive: 1, + ), + ); + // https://github.com/ollama/ollama/issues/5796 + expect(res.doneReason, DoneReason.stop); + expect(res.message.role, MessageRole.assistant); + expect(res.message.content, isEmpty); + final toolCalls = res.message.toolCalls; + expect(toolCalls, hasLength(2)); + final toolCall1 = toolCalls?.first.function; + expect(toolCall1?.name, tool.function?.name); + expect(toolCall1?.arguments['location'], contains('Boston')); + expect(toolCall1?.arguments['unit'], 'celsius'); + final toolCall2 = toolCalls?.last.function; + expect(toolCall2?.name, tool.function?.name); + expect(toolCall2?.arguments['location'], contains('Barcelona')); + expect(toolCall2?.arguments['unit'], 'celsius'); + }); }); } From c7910de3f6860ff5096d9d173340fc2e05c920a8 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 24 Jul 2024 23:54:12 +0200 Subject: [PATCH 080/251] feat: Add tool calling support in ChatOllama (#505) --- .../models/chat_models/how_to/tools.md | 3 +- .../chat_models/integrations/anthropic.md | 4 +- .../models/chat_models/integrations/ollama.md | 51 +++++ .../chat_models/integrations/ollama.dart | 43 +++++ packages/langchain/README.md | 4 +- .../chat_models/chat_ollama/chat_ollama.dart | 61 +----- .../src/chat_models/chat_ollama/mappers.dart | 178 +++++++++++++++--- .../src/chat_models/chat_ollama/types.dart | 2 + packages/langchain_ollama/pubspec.yaml | 2 +- .../test/chat_models/chat_ollama_test.dart | 87 ++++++++- 10 files changed, 347 insertions(+), 88 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/how_to/tools.md b/docs/modules/model_io/models/chat_models/how_to/tools.md index 0303be9c..11bf5f3e 100644 --- a/docs/modules/model_io/models/chat_models/how_to/tools.md +++ b/docs/modules/model_io/models/chat_models/how_to/tools.md @@ -4,9 +4,10 @@ > Tool calling is currently supported by: > - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md) -> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) > - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) > - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md) +> - [`ChatOllama`](/modules/model_io/models/chat_models/integrations/ollama.md) +> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. While the name implies that the model is performing some action, this is actually not the case! The model is coming up with the arguments to a tool, and actually running the tool (or not) is up to the user - for example, if you want to extract output matching some schema from unstructured text, you could give the model an “extraction” tool that takes parameters matching the desired schema, then treat the generated output as your final result. diff --git a/docs/modules/model_io/models/chat_models/integrations/anthropic.md b/docs/modules/model_io/models/chat_models/integrations/anthropic.md index b3e99c84..b607ddc7 100644 --- a/docs/modules/model_io/models/chat_models/integrations/anthropic.md +++ b/docs/modules/model_io/models/chat_models/integrations/anthropic.md @@ -112,7 +112,7 @@ await stream.forEach(print); `ChatAnthropic` supports tool calling. -Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. Example: ```dart @@ -124,7 +124,7 @@ const tool = ToolSpec( 'properties': { 'location': { 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', + 'description': 'The city and country, e.g. San Francisco, US', }, }, 'required': ['location'], diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 37110289..9c9339e8 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -125,6 +125,57 @@ print(res.output.content); // -> 'An Apple' ``` +## Tool calling + +`ChatOllama` supports tool calling. + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +**Notes:** +- Tool calling requires Ollama 0.2.8 or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + ## RAG (Retrieval-Augmented Generation) pipeline We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 4e5cf3b5..5fa3bcd0 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -10,6 +10,7 @@ void main(final List arguments) async { await _chatOllamaStreaming(); await _chatOllamaJsonMode(); await _chatOllamaMultimodal(); + await _chatOllamaToolCalling(); await _rag(); } @@ -94,6 +95,48 @@ Future _chatOllamaJsonMode() async { // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} } +Future _chatOllamaToolCalling() async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, + ); + + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), + ); + + final res = await chatModel.invoke( + PromptValue.string( + 'What’s the weather like in Boston and Madrid right now in celsius?', + ), + ); + print(res.output.toolCalls); + // [AIChatMessageToolCall{ + // id: a621064b-03b3-4ca6-8278-f37504901034, + // name: get_current_weather, + // arguments: {location: Boston, US}, + // }, + // AIChatMessageToolCall{ + // id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, + // name: get_current_weather, + // arguments: {location: Madrid, ES}, + // }] +} + Future _chatOllamaMultimodal() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 51793fa8..fc16ffa9 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -71,7 +71,7 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | | [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.1, Gemma 2, Phi-3, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | | [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | @@ -109,7 +109,7 @@ The following integrations are available in LangChain.dart: | [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | | [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | | [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | -| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | | [Ollama Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | | [ChatOllamaTools](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama_tools) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) with tool-calling capabilities | | [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | | [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 2ff391ef..7317dd69 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -5,7 +5,6 @@ import 'package:langchain_tiktoken/langchain_tiktoken.dart'; import 'package:ollama_dart/ollama_dart.dart'; import 'package:uuid/uuid.dart'; -import '../../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; @@ -13,7 +12,7 @@ import 'types.dart'; /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, -/// such as Llama 3 or LLaVA, locally. +/// such as Llama 3.1, Gemma 2 or LLaVA, locally. /// /// For a complete list of supported models and model variants, see the /// [Ollama model library](https://ollama.ai/library). @@ -37,7 +36,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3` +/// * e.g., for Llama 3: `ollama pull llama3.1` /// /// ### Ollama base URL /// @@ -188,9 +187,10 @@ class ChatOllama extends BaseChatModel { }) async { final id = _uuid.v4(); final completion = await _client.generateChatCompletion( - request: _generateCompletionRequest( + request: generateChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, ), ); return completion.toChatResult(id); @@ -204,9 +204,11 @@ class ChatOllama extends BaseChatModel { final id = _uuid.v4(); return _client .generateChatCompletionStream( - request: _generateCompletionRequest( + request: generateChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, + stream: true, ), ) .map( @@ -214,55 +216,6 @@ class ChatOllama extends BaseChatModel { ); } - /// Creates a [GenerateChatCompletionRequest] from the given input. - GenerateChatCompletionRequest _generateCompletionRequest( - final List messages, { - final bool stream = false, - final ChatOllamaOptions? options, - }) { - return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? defaultModel, - messages: messages.toMessages(), - format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), - keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, - stream: stream, - options: RequestOptions( - numKeep: options?.numKeep ?? defaultOptions.numKeep, - seed: options?.seed ?? defaultOptions.seed, - numPredict: options?.numPredict ?? defaultOptions.numPredict, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, - typicalP: options?.typicalP ?? defaultOptions.typicalP, - repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, - temperature: options?.temperature ?? defaultOptions.temperature, - repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - mirostat: options?.mirostat ?? defaultOptions.mirostat, - mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, - mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, - penalizeNewline: - options?.penalizeNewline ?? defaultOptions.penalizeNewline, - stop: options?.stop ?? defaultOptions.stop, - numa: options?.numa ?? defaultOptions.numa, - numCtx: options?.numCtx ?? defaultOptions.numCtx, - numBatch: options?.numBatch ?? defaultOptions.numBatch, - numGpu: options?.numGpu ?? defaultOptions.numGpu, - mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, - lowVram: options?.lowVram ?? defaultOptions.lowVram, - f16Kv: options?.f16KV ?? defaultOptions.f16KV, - logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, - vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, - useMmap: options?.useMmap ?? defaultOptions.useMmap, - useMlock: options?.useMlock ?? defaultOptions.useMlock, - numThread: options?.numThread ?? defaultOptions.numThread, - ), - ); - } - /// Tokenizes the given prompt using tiktoken. /// /// Currently Ollama does not provide a tokenizer for the models it supports. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart index d8b31e61..0c543a9c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -1,47 +1,138 @@ // ignore_for_file: public_member_api_docs +import 'dart:convert'; + import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; -import 'package:ollama_dart/ollama_dart.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:ollama_dart/ollama_dart.dart' as o; +import 'package:uuid/uuid.dart'; + +import '../../llms/mappers.dart' show OllamaResponseFormatMapper; +import 'chat_ollama.dart'; +import 'types.dart'; + +/// Creates a [GenerateChatCompletionRequest] from the given input. +o.GenerateChatCompletionRequest generateChatCompletionRequest( + final List messages, { + required final ChatOllamaOptions? options, + required final ChatOllamaOptions defaultOptions, + final bool stream = false, +}) { + return o.GenerateChatCompletionRequest( + model: options?.model ?? defaultOptions.model ?? ChatOllama.defaultModel, + messages: messages.toMessages(), + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, + tools: _mapTools( + tools: options?.tools ?? defaultOptions.tools, + toolChoice: options?.toolChoice ?? defaultOptions.toolChoice, + ), + stream: stream, + options: o.RequestOptions( + numKeep: options?.numKeep ?? defaultOptions.numKeep, + seed: options?.seed ?? defaultOptions.seed, + numPredict: options?.numPredict ?? defaultOptions.numPredict, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, + typicalP: options?.typicalP ?? defaultOptions.typicalP, + repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, + temperature: options?.temperature ?? defaultOptions.temperature, + repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + mirostat: options?.mirostat ?? defaultOptions.mirostat, + mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, + mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, + penalizeNewline: + options?.penalizeNewline ?? defaultOptions.penalizeNewline, + stop: options?.stop ?? defaultOptions.stop, + numa: options?.numa ?? defaultOptions.numa, + numCtx: options?.numCtx ?? defaultOptions.numCtx, + numBatch: options?.numBatch ?? defaultOptions.numBatch, + numGpu: options?.numGpu ?? defaultOptions.numGpu, + mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, + lowVram: options?.lowVram ?? defaultOptions.lowVram, + f16Kv: options?.f16KV ?? defaultOptions.f16KV, + logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, + vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, + useMmap: options?.useMmap ?? defaultOptions.useMmap, + useMlock: options?.useMlock ?? defaultOptions.useMlock, + numThread: options?.numThread ?? defaultOptions.numThread, + ), + ); +} + +List? _mapTools({ + final List? tools, + final ChatToolChoice? toolChoice, +}) { + if (tools == null || tools.isEmpty) { + return null; + } + + return switch (toolChoice) { + ChatToolChoiceNone() => null, + ChatToolChoiceAuto() || + ChatToolChoiceRequired() || + null => + tools.map(_mapTool).toList(growable: false), + final ChatToolChoiceForced f => [ + _mapTool(tools.firstWhere((t) => t.name == f.name)), + ] + }; +} + +o.Tool _mapTool(final ToolSpec tool) { + return o.Tool( + function: o.ToolFunction( + name: tool.name, + description: tool.description, + parameters: tool.inputJsonSchema, + ), + ); +} extension OllamaChatMessagesMapper on List { - List toMessages() { + List toMessages() { return map(_mapMessage).expand((final msg) => msg).toList(growable: false); } - List _mapMessage(final ChatMessage msg) { + List _mapMessage(final ChatMessage msg) { return switch (msg) { final SystemChatMessage msg => [ - Message( - role: MessageRole.system, + o.Message( + role: o.MessageRole.system, content: msg.content, ), ], final HumanChatMessage msg => _mapHumanMessage(msg), - final AIChatMessage msg => [ - Message( - role: MessageRole.assistant, + final AIChatMessage msg => _mapAIMessage(msg), + final ToolChatMessage msg => [ + o.Message( + role: o.MessageRole.tool, content: msg.content, ), ], - ToolChatMessage() => - throw UnsupportedError('Ollama does not support tool calls'), CustomChatMessage() => throw UnsupportedError('Ollama does not support custom messages'), }; } - List _mapHumanMessage(final HumanChatMessage message) { + List _mapHumanMessage(final HumanChatMessage message) { return switch (message.content) { final ChatMessageContentText c => [ - Message( - role: MessageRole.user, + o.Message( + role: o.MessageRole.user, content: c.text, ), ], final ChatMessageContentImage c => [ - Message( - role: MessageRole.user, + o.Message( + role: o.MessageRole.user, content: c.data, ), ], @@ -49,7 +140,7 @@ extension OllamaChatMessagesMapper on List { }; } - List _mapContentMultiModal( + List _mapContentMultiModal( final ChatMessageContentMultiModal content, ) { final parts = content.parts.groupListsBy((final p) => p.runtimeType); @@ -63,8 +154,8 @@ extension OllamaChatMessagesMapper on List { // If there's only one text part and the rest are images, then we combine them in one message if ((parts[ChatMessageContentText]?.length ?? 0) == 1) { return [ - Message( - role: MessageRole.user, + o.Message( + role: o.MessageRole.user, content: (parts[ChatMessageContentText]!.first as ChatMessageContentText) .text, @@ -79,12 +170,12 @@ extension OllamaChatMessagesMapper on List { return content.parts .map( (final p) => switch (p) { - final ChatMessageContentText c => Message( - role: MessageRole.user, + final ChatMessageContentText c => o.Message( + role: o.MessageRole.user, content: c.text, ), - final ChatMessageContentImage c => Message( - role: MessageRole.user, + final ChatMessageContentImage c => o.Message( + role: o.MessageRole.user, content: c.data, ), ChatMessageContentMultiModal() => throw UnsupportedError( @@ -94,14 +185,38 @@ extension OllamaChatMessagesMapper on List { ) .toList(growable: false); } + + List _mapAIMessage(final AIChatMessage message) { + return [ + o.Message( + role: o.MessageRole.assistant, + content: message.content, + toolCalls: message.toolCalls.isNotEmpty + ? message.toolCalls.map(_mapToolCall).toList(growable: false) + : null, + ), + ]; + } + + o.ToolCall _mapToolCall(final AIChatMessageToolCall toolCall) { + return o.ToolCall( + function: o.ToolCallFunction( + name: toolCall.name, + arguments: toolCall.arguments, + ), + ); + } } -extension ChatResultMapper on GenerateChatCompletionResponse { +extension ChatResultMapper on o.GenerateChatCompletionResponse { ChatResult toChatResult(final String id, {final bool streaming = false}) { return ChatResult( id: id, output: AIChatMessage( content: message.content, + toolCalls: + message.toolCalls?.map(_mapToolCall).toList(growable: false) ?? + const [], ), finishReason: _mapFinishReason(doneReason), metadata: { @@ -120,6 +235,15 @@ extension ChatResultMapper on GenerateChatCompletionResponse { ); } + AIChatMessageToolCall _mapToolCall(final o.ToolCall toolCall) { + return AIChatMessageToolCall( + id: const Uuid().v4(), + name: toolCall.function?.name ?? '', + argumentsRaw: json.encode(toolCall.function?.arguments ?? const {}), + arguments: toolCall.function?.arguments ?? const {}, + ); + } + LanguageModelUsage _mapUsage() { return LanguageModelUsage( promptTokens: promptEvalCount, @@ -131,12 +255,12 @@ extension ChatResultMapper on GenerateChatCompletionResponse { } FinishReason _mapFinishReason( - final DoneReason? reason, + final o.DoneReason? reason, ) => switch (reason) { - DoneReason.stop => FinishReason.stop, - DoneReason.length => FinishReason.length, - DoneReason.load => FinishReason.unspecified, + o.DoneReason.stop => FinishReason.stop, + o.DoneReason.length => FinishReason.length, + o.DoneReason.load => FinishReason.unspecified, null => FinishReason.unspecified, }; } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 137d0bdf..1b3b4d77 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -47,6 +47,8 @@ class ChatOllamaOptions extends ChatModelOptions { this.useMmap, this.useMlock, this.numThread, + super.tools, + super.toolChoice, super.concurrencyLimit, }); diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 0214a6c7..33a60f44 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_ollama -description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). +description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). version: 0.2.2+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 7e001289..66167f0f 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -6,13 +6,14 @@ import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_ollama/langchain_ollama.dart'; import 'package:test/test.dart'; void main() { group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () { late ChatOllama chatModel; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.1'; const visionModel = 'llava:latest'; setUp(() async { @@ -251,5 +252,89 @@ void main() { expect(res.output.content.toLowerCase(), contains('apple')); }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + final model = chatModel.bind( + const ChatOllamaOptions( + model: defaultModel, + tools: [tool], + ), + ); + + final humanMessage = ChatMessage.humanText( + "What's the weather like in Boston and Madrid right now in celsius?", + ); + final res1 = await model.invoke(PromptValue.chat([humanMessage])); + + final aiMessage1 = res1.output; + expect(aiMessage1.toolCalls, hasLength(2)); + + final toolCall1 = aiMessage1.toolCalls.first; + expect(toolCall1.name, tool.name); + expect(toolCall1.arguments.containsKey('location'), isTrue); + expect(toolCall1.arguments['location'], contains('Boston')); + expect(toolCall1.arguments['unit'], 'celsius'); + + final toolCall2 = aiMessage1.toolCalls.last; + expect(toolCall2.name, tool.name); + expect(toolCall2.arguments.containsKey('location'), isTrue); + expect(toolCall2.arguments['location'], contains('Madrid')); + expect(toolCall2.arguments['unit'], 'celsius'); + + final functionResult1 = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage1 = ChatMessage.tool( + toolCallId: toolCall1.id, + content: json.encode(functionResult1), + ); + + final functionResult2 = { + 'temperature': '25', + 'unit': 'celsius', + 'description': 'Cloudy', + }; + final functionMessage2 = ChatMessage.tool( + toolCallId: toolCall2.id, + content: json.encode(functionResult2), + ); + + final res2 = await model.invoke( + PromptValue.chat([ + humanMessage, + aiMessage1, + functionMessage1, + functionMessage2, + ]), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.content, contains('25')); + }); }); } From 64c844a59f6bccd839b468f967d4b44cc434bb9a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 25 Jul 2024 09:58:34 +0200 Subject: [PATCH 081/251] feat!: Update Ollama default model to llama-3.1 (#506) --- docs/expression_language/primitives/router.md | 6 +++--- .../models/chat_models/integrations/ollama.md | 12 +++++------ .../models/llms/integrations/ollama.md | 6 +++--- .../text_embedding/integrations/ollama.md | 2 +- .../vector_stores/integrations/objectbox.md | 2 +- .../expression_language/cookbook/routing.dart | 6 +++--- .../chat_models/integrations/ollama.dart | 10 +++++----- .../models/llms/integrations/ollama.dart | 4 ++-- .../vector_stores/integrations/objectbox.dart | 2 +- .../lib/home/bloc/providers.dart | 2 +- examples/wikivoyage_eu/README.md | 6 +++--- examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 2 +- examples/wikivoyage_eu/pubspec.yaml | 2 +- .../vector_stores/objectbox/objectbox.dart | 2 +- .../chat_models/chat_ollama/chat_ollama.dart | 8 ++++---- .../chat_ollama_tools/chat_ollama_tools.dart | 6 +++--- .../lib/src/embeddings/ollama_embeddings.dart | 6 +++--- .../langchain_ollama/lib/src/llms/ollama.dart | 10 +++++----- .../test/embeddings/ollama_test.dart | 2 +- .../test/llms/ollama_test.dart | 2 +- .../example/ollama_dart_example.dart | 6 +++--- packages/ollama_dart/oas/ollama-curated.yaml | 20 +++++++++---------- 22 files changed, 62 insertions(+), 62 deletions(-) diff --git a/docs/expression_language/primitives/router.md b/docs/expression_language/primitives/router.md index 15b6f8ad..effd5f66 100644 --- a/docs/expression_language/primitives/router.md +++ b/docs/expression_language/primitives/router.md @@ -12,7 +12,7 @@ First, let’s create a chain that will identify incoming questions as being abo ```dart final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3'), + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -131,7 +131,7 @@ Here is a question: {query} '''; -final embeddings = OllamaEmbeddings(model: 'llama3'); +final embeddings = OllamaEmbeddings(model: 'llama3.1'); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( promptTemplates.map((final pt) => Document(pageContent: pt)).toList(), @@ -146,7 +146,7 @@ final chain = Runnable.fromMap({'query': Runnable.passthrough()}) | return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ) | StringOutputParser(); diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 9c9339e8..1e440c83 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3` + * e.g., for Llama 3: `ollama pull llama3.1` ## Usage @@ -28,7 +28,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -53,7 +53,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -79,7 +79,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, format: OllamaResponseFormat.json, ), @@ -183,7 +183,7 @@ We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `Ch ```dart // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3'), + embeddings: OllamaEmbeddings(model: 'llama3.1'), ); await vectorStore.addDocuments( documents: [ @@ -200,7 +200,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3'), + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever( defaultOptions: VectorStoreRetrieverOptions( diff --git a/docs/modules/model_io/models/llms/integrations/ollama.md b/docs/modules/model_io/models/llms/integrations/ollama.md index 3a90917c..c139e7d9 100644 --- a/docs/modules/model_io/models/llms/integrations/ollama.md +++ b/docs/modules/model_io/models/llms/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3` + * e.g., for Llama 3: `ollama pull llama3.1` ## Usage @@ -26,7 +26,7 @@ final prompt = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); final chain = prompt | llm | StringOutputParser(); @@ -43,7 +43,7 @@ final promptTemplate = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); final chain = promptTemplate | llm | StringOutputParser(); diff --git a/docs/modules/retrieval/text_embedding/integrations/ollama.md b/docs/modules/retrieval/text_embedding/integrations/ollama.md index 395b1203..fc83bbb5 100644 --- a/docs/modules/retrieval/text_embedding/integrations/ollama.md +++ b/docs/modules/retrieval/text_embedding/integrations/ollama.md @@ -1,7 +1,7 @@ # OllamaEmbeddings ```dart -final embeddings = OllamaEmbeddings(model: 'llama3'); +final embeddings = OllamaEmbeddings(model: 'llama3.1'); const text = 'This is a test document.'; final res = await embeddings.embedQuery(text); final res = await embeddings.embedDocuments([text]); diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index 9c165306..0ac3dd9b 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -205,7 +205,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3:8b'), + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/routing.dart b/examples/docs_examples/bin/expression_language/cookbook/routing.dart index d177611d..79bbd348 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/routing.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/routing.dart @@ -9,7 +9,7 @@ void main(final List arguments) async { Future _runnableRouter() async { final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -114,7 +114,7 @@ Here is a question: '''; final embeddings = OllamaEmbeddings( - model: 'llama3', + model: 'llama3.1', ); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( @@ -132,7 +132,7 @@ Here is a question: return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ) | const StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 5fa3bcd0..5c47bb0e 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -25,7 +25,7 @@ Future _chatOllama() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -52,7 +52,7 @@ Future _chatOllamaStreaming() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, ), ); @@ -77,7 +77,7 @@ Future _chatOllamaJsonMode() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', temperature: 0, format: OllamaResponseFormat.json, ), @@ -162,7 +162,7 @@ Future _chatOllamaMultimodal() async { Future _rag() async { // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3'), + embeddings: OllamaEmbeddings(model: 'llama3.1'), ); await vectorStore.addDocuments( documents: [ @@ -184,7 +184,7 @@ Future _rag() async { // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever( defaultOptions: const VectorStoreRetrieverOptions( diff --git a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart index 2095d341..eb019a6b 100644 --- a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart @@ -13,7 +13,7 @@ Future _ollama() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); @@ -29,7 +29,7 @@ Future _ollamaStreaming() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); const stringOutputParser = StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart index cd558d1b..6c66d5dc 100644 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -66,7 +66,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart index c92b87af..1445bec3 100644 --- a/examples/hello_world_flutter/lib/home/bloc/providers.dart +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -21,7 +21,7 @@ enum Provider { ), ollama( name: 'Ollama', - defaultModel: 'llama3', + defaultModel: 'llama3.1', defaultBaseUrl: 'http://localhost:11434/api', isRemote: false, ); diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md index 07bc5073..cc573899 100644 --- a/examples/wikivoyage_eu/README.md +++ b/examples/wikivoyage_eu/README.md @@ -17,11 +17,11 @@ This example demonstrates how to build a fully local Retrieval Augmented Generat - For this example we will be using the following models: * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) - * LLM: [`llama3:8b`](https://ollama.com/library/llama3) + * LLM: [`llama3.1`](https://ollama.com/library/llama3.1) - Open your terminal and run: ```bash ollama pull jina/jina-embeddings-v2-small-en -ollama run llama3:8b +ollama run llama3.1 ``` ### 3. Setup ObjectBox @@ -73,7 +73,7 @@ The chatbot script implements the RAG pipeline. It does the following: 2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. 3. Retrieves the 5 most similar documents from the ObjectBox database. 4. Builds a prompt using the retrieved documents and the query. -5. Uses the `llama3:8b` model to generate a response to the prompt. +5. Uses the `llama3.1` model to generate a response to the prompt. You can run the script using: ```bash diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart index b1f82689..8123c262 100644 --- a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -51,7 +51,7 @@ Do not provide any other suggestion if the question is not about Europe. final model = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.1', ), ); const outputParser = StringOutputParser(); diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index a591713f..70fc19fb 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -1,5 +1,5 @@ name: wikivoyage_eu -description: Wikivoyage EU chatbot using llama3 and ObjectBox. +description: Wikivoyage EU chatbot using llama3.1 and ObjectBox. version: 1.0.0 publish_to: none diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart index 0a3ac27b..94457e54 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. /// /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); /// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); /// ``` /// diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 7317dd69..8db88d0d 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -57,7 +57,7 @@ import 'types.dart'; /// ```dart /// final chatModel = ChatOllama( /// defaultOptions: const ChatOllamaOptions( -/// model: 'llama3', +/// model: 'llama3.1', /// temperature: 0, /// format: 'json', /// ), @@ -89,7 +89,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3')) | outputParser, +/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.1')) | outputParser, /// 'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -152,7 +152,7 @@ class ChatOllama extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOllamaOptions( - model: 'llama3', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -178,7 +178,7 @@ class ChatOllama extends BaseChatModel { String get modelType => 'chat-ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3'; + static const defaultModel = 'llama3.1'; @override Future invoke( diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart index 677fd308..82da6a95 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart @@ -41,7 +41,7 @@ import 'types.dart'; /// ); /// final chatModel = ChatOllamaTools( /// defaultOptions: ChatOllamaToolOptions( -/// options: ChatOllamaOptions(model: 'llama3:8b'), +/// options: ChatOllamaOptions(model: 'llama3.1'), /// tools: [tool], /// ), /// ); @@ -55,7 +55,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3` +/// * e.g., for Llama 3: `ollama pull llama3.1` /// /// ### Ollama base URL /// @@ -109,7 +109,7 @@ class ChatOllamaTools extends BaseChatModel { String get modelType => 'chat-ollama-tools'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3'; + static const defaultModel = 'llama3.1'; @override Future invoke( diff --git a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart index 66ac2edb..bd40cf60 100644 --- a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart +++ b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart @@ -13,7 +13,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// Example: /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); /// final res = await embeddings.embedQuery('Hello world'); /// ``` /// @@ -23,7 +23,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3` +/// * e.g., for `Llama-7b`: `ollama pull llama3.1` /// /// ### Advance /// @@ -76,7 +76,7 @@ class OllamaEmbeddings implements Embeddings { /// - `client`: the HTTP client to use. You can set your own HTTP client if /// you need further customization (e.g. to use a Socks5 proxy). OllamaEmbeddings({ - this.model = 'llama3', + this.model = 'llama3.1', this.keepAlive, final String baseUrl = 'http://localhost:11434/api', final Map? headers, diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index fd9a8ed4..b3601f6e 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOption: const OllamaOptions( -/// model: 'llama3', +/// model: 'llama3.1', /// temperature: 1, /// ), /// ); @@ -49,7 +49,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOptions: const OllamaOptions( -/// model: 'llama3', +/// model: 'llama3.1', /// temperature: 0, /// format: 'json', /// ), @@ -83,7 +83,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3')) | outputParser, +/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.1')) | outputParser, /// 'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -93,7 +93,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3` +/// * e.g., for `Llama-7b`: `ollama pull llama3.1` /// /// ### Advance /// @@ -178,7 +178,7 @@ class Ollama extends BaseLLM { String get modelType => 'ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3'; + static const defaultModel = 'llama3.1'; @override Future invoke( diff --git a/packages/langchain_ollama/test/embeddings/ollama_test.dart b/packages/langchain_ollama/test/embeddings/ollama_test.dart index 0f94ad0d..ac8f999e 100644 --- a/packages/langchain_ollama/test/embeddings/ollama_test.dart +++ b/packages/langchain_ollama/test/embeddings/ollama_test.dart @@ -8,7 +8,7 @@ void main() { group('OllamaEmbeddings tests', skip: Platform.environment.containsKey('CI'), () { late OllamaEmbeddings embeddings; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.1'; setUp(() async { embeddings = OllamaEmbeddings( diff --git a/packages/langchain_ollama/test/llms/ollama_test.dart b/packages/langchain_ollama/test/llms/ollama_test.dart index e9a6ac55..d21d0e56 100644 --- a/packages/langchain_ollama/test/llms/ollama_test.dart +++ b/packages/langchain_ollama/test/llms/ollama_test.dart @@ -10,7 +10,7 @@ import 'package:test/test.dart'; void main() { group('Ollama tests', skip: Platform.environment.containsKey('CI'), () { late Ollama llm; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.1'; setUp(() async { llm = Ollama( diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index e5d11e3c..b1e9361f 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -70,7 +70,7 @@ Future _generateCompletionStream(final OllamaClient client) async { Future _generateChatCompletion(final OllamaClient client) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.1', messages: [ Message( role: MessageRole.system, @@ -95,7 +95,7 @@ Future _generateChatCompletionWithHistory( ) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.1', messages: [ Message( role: MessageRole.user, @@ -118,7 +118,7 @@ Future _generateChatCompletionWithHistory( Future _generateChatCompletionStream(final OllamaClient client) async { final stream = client.generateChatCompletionStream( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.1', messages: [ Message( role: MessageRole.system, diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index a0a42067..b7c04cae 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -273,7 +273,7 @@ components: The model name. Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - example: llama3:8b + example: llama3.1 prompt: type: string description: The prompt to generate a response. @@ -530,7 +530,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 created_at: type: string format: date-time @@ -587,7 +587,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 messages: type: array description: The messages of the chat, this can be used to keep a chat memory @@ -619,7 +619,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 created_at: type: string format: date-time @@ -760,7 +760,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 prompt: type: string description: Text to generate embeddings for. @@ -837,7 +837,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 modified_at: type: string format: date-time @@ -914,7 +914,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 size: type: integer format: int64 @@ -942,7 +942,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 required: - model ModelInfo: @@ -990,7 +990,7 @@ components: source: type: string description: Name of the model to copy. - example: llama3:8b + example: llama3.1 destination: type: string description: Name of the new model. @@ -1015,7 +1015,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.1 insecure: type: boolean description: | From b47a439fd204530543b74122acf82732796e80a8 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 26 Jul 2024 18:13:46 +0200 Subject: [PATCH 082/251] feat!: Update ChatOpenAI default model to gpt-4o-mini (#507) --- .../cookbook/prompt_llm_parser.md | 6 +- docs/expression_language/get_started.md | 2 +- docs/expression_language/interface.md | 2 +- .../expression_language/primitives/binding.md | 2 +- .../primitives/function.md | 8 +- docs/expression_language/streaming.md | 4 +- .../modules/agents/agent_types/agent_types.md | 2 +- .../models/chat_models/chat_models.md | 2 +- .../cookbook/prompt_llm_parser.dart | 6 +- .../cookbook/streaming.dart | 4 +- .../bin/expression_language/get_started.dart | 2 +- .../bin/expression_language/interface.dart | 2 +- .../primitives/binding.dart | 2 +- .../primitives/function.dart | 8 +- .../lib/src/chains/qa_with_sources.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 9 +- .../test/chains/qa_with_sources_test.dart | 2 +- .../test/chat_models/chat_openai_test.dart | 3 +- .../test/chat_models/open_router_test.dart | 6 +- packages/openai_dart/README.md | 8 +- .../create_chat_completion_request.dart | 1 - .../generated/schema/create_run_request.dart | 1 - .../schema/create_thread_and_run_request.dart | 1 - .../src/generated/schema/schema.freezed.dart | 6 +- .../lib/src/generated/schema/schema.g.dart | 6 +- packages/openai_dart/oas/openapi_curated.yaml | 2 +- .../test/openai_client_chat_test.dart | 117 ++---------------- 27 files changed, 57 insertions(+), 159 deletions(-) diff --git a/docs/expression_language/cookbook/prompt_llm_parser.md b/docs/expression_language/cookbook/prompt_llm_parser.md index bb9a1a28..e96bf6c1 100644 --- a/docs/expression_language/cookbook/prompt_llm_parser.md +++ b/docs/expression_language/cookbook/prompt_llm_parser.md @@ -33,7 +33,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -74,7 +74,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -144,7 +144,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/get_started.md b/docs/expression_language/get_started.md index 70c12b9a..9b51efe6 100644 --- a/docs/expression_language/get_started.md +++ b/docs/expression_language/get_started.md @@ -120,7 +120,7 @@ print(res2); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/interface.md b/docs/expression_language/interface.md index 9b7085d8..30fcf890 100644 --- a/docs/expression_language/interface.md +++ b/docs/expression_language/interface.md @@ -107,7 +107,7 @@ final res = await chain.batch( {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/docs/expression_language/primitives/binding.md b/docs/expression_language/primitives/binding.md index a04a511f..2aeb9575 100644 --- a/docs/expression_language/primitives/binding.md +++ b/docs/expression_language/primitives/binding.md @@ -57,7 +57,7 @@ final chain = Runnable.fromMap({ chatModel.bind(ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | + chatModel.bind(ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, }); diff --git a/docs/expression_language/primitives/function.md b/docs/expression_language/primitives/function.md index e0b621fd..88bf731b 100644 --- a/docs/expression_language/primitives/function.md +++ b/docs/expression_language/primitives/function.md @@ -76,7 +76,7 @@ await chain.invoke('x raised to the third plus seven equals 12'); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -122,7 +122,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -141,7 +141,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -160,7 +160,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/streaming.md b/docs/expression_language/streaming.md index 8b4b720f..25725045 100644 --- a/docs/expression_language/streaming.md +++ b/docs/expression_language/streaming.md @@ -49,7 +49,7 @@ print(chunks.first); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -71,7 +71,7 @@ print(result); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/modules/agents/agent_types/agent_types.md b/docs/modules/agents/agent_types/agent_types.md index 229422ee..d6c79bd0 100644 --- a/docs/modules/agents/agent_types/agent_types.md +++ b/docs/modules/agents/agent_types/agent_types.md @@ -8,7 +8,7 @@ response to the user. Here are the agents available in LangChain. ### OpenAI Functions -Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been +Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been explicitly fine-tuned to detect when a function should to be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models. diff --git a/docs/modules/model_io/models/chat_models/chat_models.md b/docs/modules/model_io/models/chat_models/chat_models.md index 5aabfd23..e191707b 100644 --- a/docs/modules/model_io/models/chat_models/chat_models.md +++ b/docs/modules/model_io/models/chat_models/chat_models.md @@ -93,5 +93,5 @@ print(chatRes1.generations); print(chatRes1.usage?.totalTokens); // -> 36 print(chatRes1.modelOutput); -// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-3.5-turbo} +// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-4o-mini} ``` diff --git a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart index f34fab19..21cea3b4 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart @@ -32,7 +32,7 @@ Future _promptTemplateLLM() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -65,7 +65,7 @@ Future _attachingStopSequences() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -133,7 +133,7 @@ Future _attachingToolCallInformation() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart index 7af0bb43..66e4a7a6 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart @@ -33,7 +33,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -49,7 +49,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/get_started.dart b/examples/docs_examples/bin/expression_language/get_started.dart index 5ccc2505..c3ecbd1f 100644 --- a/examples/docs_examples/bin/expression_language/get_started.dart +++ b/examples/docs_examples/bin/expression_language/get_started.dart @@ -82,7 +82,7 @@ Future _promptModelOutputParser() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/interface.dart b/examples/docs_examples/bin/expression_language/interface.dart index f678f18a..f2a634b7 100644 --- a/examples/docs_examples/bin/expression_language/interface.dart +++ b/examples/docs_examples/bin/expression_language/interface.dart @@ -96,7 +96,7 @@ Future _runnableInterfaceBatchOptions() async { {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/examples/docs_examples/bin/expression_language/primitives/binding.dart b/examples/docs_examples/bin/expression_language/primitives/binding.dart index 1c456ef7..d16d81d8 100644 --- a/examples/docs_examples/bin/expression_language/primitives/binding.dart +++ b/examples/docs_examples/bin/expression_language/primitives/binding.dart @@ -63,7 +63,7 @@ Future _differentModels() async { chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | + chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, }); final res = await chain.invoke({'name': 'David'}); diff --git a/examples/docs_examples/bin/expression_language/primitives/function.dart b/examples/docs_examples/bin/expression_language/primitives/function.dart index 8c631877..029322bb 100644 --- a/examples/docs_examples/bin/expression_language/primitives/function.dart +++ b/examples/docs_examples/bin/expression_language/primitives/function.dart @@ -73,7 +73,7 @@ Future _function() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -116,7 +116,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -135,7 +135,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -154,7 +154,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart index 207577a1..7c812836 100644 --- a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart +++ b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart @@ -12,7 +12,7 @@ import 'qa_with_structure.dart'; /// ```dart /// final llm = ChatOpenAI( /// apiKey: openaiApiKey, -/// model: 'gpt-3.5-turbo-0613', +/// model: 'gpt-4o-mini', /// temperature: 0, /// ); /// final qaChain = OpenAIQAWithSourcesChain(llm: llm); diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 54c955e9..dbd9c333 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -76,7 +76,7 @@ import 'types.dart'; /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ /// 'q1': prompt1 | chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4')) | outputParser, -/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser, +/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); /// ``` @@ -239,7 +239,7 @@ class ChatOpenAI extends BaseChatModel { String get modelType => 'openai-chat'; /// The default model to use unless another is specified. - static const defaultModel = 'gpt-3.5-turbo'; + static const defaultModel = 'gpt-4o-mini'; @override Future invoke( @@ -348,7 +348,6 @@ class ChatOpenAI extends BaseChatModel { final int tokensPerName; switch (model) { - case 'gpt-3.5-turbo-0613': case 'gpt-3.5-turbo-16k-0613': case 'gpt-4-0314': case 'gpt-4-32k-0314': @@ -362,8 +361,8 @@ class ChatOpenAI extends BaseChatModel { // If there's a name, the role is omitted tokensPerName = -1; default: - if (model.startsWith('gpt-3.5-turbo') || model.startsWith('gpt-4')) { - // Returning num tokens assuming gpt-3.5-turbo-0613 + if (model.startsWith('gpt-4o-mini') || model.startsWith('gpt-4')) { + // Returning num tokens assuming gpt-4 tokensPerMessage = 3; tokensPerName = 1; } else { diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index a94ea862..15a80431 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -125,7 +125,7 @@ Question: {question} final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-3.5-turbo', + model: 'gpt-4o-mini', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index edb42b2e..7ba681c6 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatOpenAI tests', () { final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - const defaultModel = 'gpt-3.5-turbo'; + const defaultModel = 'gpt-4o-mini'; test('Test ChatOpenAI parameters', () async { final chat = ChatOpenAI( @@ -208,7 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', 'gpt-4-0613', diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index f108db8b..d7c8fc9c 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -27,7 +27,7 @@ void main() { test('Test invoke OpenRouter API with different models', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', @@ -57,7 +57,7 @@ void main() { test('Test stream OpenRouter API with different models', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', // 'google/gemini-pro', // Not supported 'anthropic/claude-2', @@ -88,7 +88,7 @@ void main() { test('Test countTokens', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 76dcd335..2c003f8d 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -257,7 +257,7 @@ const tool = ChatCompletionTool( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -333,7 +333,7 @@ const function = FunctionObject( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-3.5-turbo'), + model: ChatCompletionModel.modelId('gpt-4o-mini'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -355,7 +355,7 @@ final functionResult = getCurrentWeather(arguments['location'], arguments['unit' final res2 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-3.5-turbo'), + model: ChatCompletionModel.modelId('gpt-4o-mini'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -480,7 +480,7 @@ Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-t ```dart const request = CreateFineTuningJobRequest( - model: FineTuningModel.modelId('gpt-3.5-turbo'), + model: FineTuningModel.modelId('gpt-4o-mini'), trainingFile: 'file-abc123', validationFile: 'file-abc123', hyperparameters: FineTuningJobHyperparameters( diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 6e7e429a..f9213271 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -122,7 +122,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - @Default(true) bool? parallelToolCalls, /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 83c04dc1..0e395531 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -72,7 +72,6 @@ class CreateRunRequest with _$CreateRunRequest { /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - @Default(true) bool? parallelToolCalls, /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 67b921cb..ae054a5c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -71,7 +71,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - @Default(true) bool? parallelToolCalls, /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 75973e85..06e93133 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -4045,7 +4045,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls = true, + this.parallelToolCalls, @JsonKey(includeIfNull: false) this.user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -30225,7 +30225,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls = true, + this.parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -33563,7 +33563,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) - this.parallelToolCalls = true, + this.parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 95ffa209..191f05e7 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -338,7 +338,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), toolChoice: const _ChatCompletionToolChoiceOptionConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, + parallelToolCalls: json['parallel_tool_calls'] as bool?, user: json['user'] as String?, functionCall: const _ChatCompletionFunctionCallConverter() .fromJson(json['function_call']), @@ -2867,7 +2867,7 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -3193,7 +3193,7 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateThreadAndRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), - parallelToolCalls: json['parallel_tool_calls'] as bool? ?? true, + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateThreadAndRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 420c7cf4..00dbbe54 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1995,7 +1995,7 @@ components: Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. type: boolean - default: true + default: null nullable: true user: *end_user_param_configuration function_call: diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index 9277c848..fa272bbe 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -23,7 +23,7 @@ void main() { test('Test call chat completion API', () async { final models = [ - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ChatCompletionModels.gpt4, ]; @@ -73,7 +73,7 @@ void main() { test('Test call chat completion API with stop sequence', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -105,7 +105,7 @@ void main() { test('Test call chat completions API with max tokens', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -128,7 +128,7 @@ void main() { test('Test call chat completions API with other parameters', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -154,7 +154,7 @@ void main() { test('Test call chat completions streaming API', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -179,7 +179,7 @@ void main() { await for (final res in stream) { expect(res.id, isNotEmpty); expect(res.created, greaterThan(0)); - expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.model, startsWith('gpt-4o-mini')); expect(res.object, isNotEmpty); if (res.choices.isNotEmpty) { expect(res.choices, hasLength(1)); @@ -224,7 +224,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -272,7 +272,7 @@ void main() { final request2 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -330,7 +330,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -360,7 +360,7 @@ void main() { res.object, isNotEmpty, ); - expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.model, startsWith('gpt-4o-mini')); expect(res.choices, hasLength(1)); final choice = res.choices.first; expect(choice.index, 0); @@ -386,103 +386,6 @@ void main() { expect(count, greaterThan(1)); }); - test('Test call chat completions API functions', () async { - const function = FunctionObject( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - final request1 = CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, - ), - messages: [ - const ChatCompletionMessage.system( - content: 'You are a helpful assistant.', - ), - const ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'What’s the weather like in Boston right now?', - ), - ), - ], - functions: [function], - functionCall: ChatCompletionFunctionCall.function( - ChatCompletionFunctionCallOption(name: function.name), - ), - ); - final res1 = await client.createChatCompletion(request: request1); - expect(res1.choices, hasLength(1)); - - final choice1 = res1.choices.first; - - final aiMessage1 = choice1.message; - expect(aiMessage1.role, ChatCompletionMessageRole.assistant); - expect(aiMessage1.content, isNull); - expect(aiMessage1.functionCall, isNotNull); - - final functionCall = aiMessage1.functionCall!; - expect(functionCall.name, function.name); - expect(functionCall.arguments, isNotEmpty); - final arguments = json.decode( - functionCall.arguments, - ) as Map; - expect(arguments.containsKey('location'), isTrue); - expect(arguments['location'], contains('Boston')); - - final functionResult = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - - final request2 = CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, - ), - messages: [ - const ChatCompletionMessage.system( - content: 'You are a helpful assistant.', - ), - const ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'What’s the weather like in Boston right now?', - ), - ), - ChatCompletionMessage.function( - name: function.name, - content: json.encode(functionResult), - ), - ], - functions: [function], - ); - final res2 = await client.createChatCompletion(request: request2); - expect(res2.choices, hasLength(1)); - - final choice2 = res2.choices.first; - expect(choice2.finishReason, ChatCompletionFinishReason.stop); - - final aiMessage2 = choice2.message; - expect(aiMessage2.role, ChatCompletionMessageRole.assistant); - expect(aiMessage2.content, contains('22')); - expect(aiMessage2.functionCall, isNull); - }); - test('Test jsonObject response format', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( From 037071b70b619cf1c4a92fac65c1d1ed3f76fb00 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 26 Jul 2024 18:20:35 +0200 Subject: [PATCH 083/251] docs: Update Ollama documentation (#508) --- .../models/chat_models/integrations/ollama.md | 321 +++++++++++++++--- .../chat_models/integrations/ollama.dart | 258 ++++++++++++-- 2 files changed, 500 insertions(+), 79 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 1e440c83..2521307a 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -2,13 +2,9 @@ Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. -Ollama allows you to run open-source large language models, such as Llama 3, locally. +Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. -Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. - -It optimizes setup and configuration details, including GPU usage. - -For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. ## Setup @@ -17,6 +13,30 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` * e.g., for Llama 3: `ollama pull llama3.1` +3. Instantiate the `ChatOllama` class with the downloaded model. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + ), +); +``` + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). + +### Ollama base URL + +By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + baseUrl: 'https://your-remote-server-where-ollama-is-running.com', + model: 'llama3.1', + ), +); +``` ## Usage @@ -44,7 +64,9 @@ print(res); // -> 'La traduction est : "J'aime le programming.' ``` -## Streaming +### Streaming + +Ollama supports streaming the output as the model generates it. ```dart final promptTemplate = ChatPromptTemplate.fromTemplates([ @@ -68,36 +90,7 @@ await stream.forEach(print); // 9 ``` -## JSON mode - -You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), - (ChatMessageType.human, '{question}'), -]); -final chat = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - format: OllamaResponseFormat.json, - ), -); - -final chain = Runnable.getMapFromInput('question') - .pipe(promptTemplate) - .pipe(chat) - .pipe(JsonOutputParser()); - -final res = await chain.invoke( - 'What is the population of Spain, The Netherlands, and France?', -); -print(res); -// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} -``` - -## Multimodal support +### Multimodal support Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). @@ -125,14 +118,12 @@ print(res.output.content); // -> 'An Apple' ``` -## Tool calling +### Tool calling -`ChatOllama` supports tool calling. - -Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. +`ChatOllama` now offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). **Notes:** -- Tool calling requires Ollama 0.2.8 or newer. +- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. - Streaming tool calls is not supported at the moment. - Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). @@ -176,7 +167,251 @@ print(res.output.toolCalls); // }] ``` -## RAG (Retrieval-Augmented Generation) pipeline +As you can see, `ChatOllamaTools` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +### JSON mode + +You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), + (ChatMessageType.human, '{question}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), +); + +final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + +final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', +); +print(res); +// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +``` + +## Examples + +### Answering questions with data from an external API + +Imagine you have an API that provides flight times between two cities: + +```dart +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} +``` + +Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. + +```dart +const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [getFlightTimesTool], + ), +); + +final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), +]; + +// First API call: Send the query and function description to the model +final response = await chatModel.invoke(PromptValue.chat(messages)); + +messages.add(response.output); + +// Check if the model decided to use the provided function +if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; +} + +// Process function calls made by the model +for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, + ), + ); +} + +// Second API call: Get final response from the model +final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); +print(finalResponse.output.content); +// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. +``` + +### Extracting structured data with tools + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + options: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` + +### RAG (Retrieval-Augmented Generation) pipeline We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 5c47bb0e..0682326f 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -8,9 +8,11 @@ import 'package:langchain_ollama/langchain_ollama.dart'; void main(final List arguments) async { await _chatOllama(); await _chatOllamaStreaming(); - await _chatOllamaJsonMode(); await _chatOllamaMultimodal(); await _chatOllamaToolCalling(); + await _chatOllamaJsonMode(); + await _extraction(); + await _flights(); await _rag(); } @@ -67,32 +69,26 @@ Future _chatOllamaStreaming() async { // 9 } -Future _chatOllamaJsonMode() async { - final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are an assistant that respond question using JSON format.' - ), - (ChatMessageType.human, '{question}'), - ]); - final chat = ChatOllama( +Future _chatOllamaMultimodal() async { + final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llava', temperature: 0, - format: OllamaResponseFormat.json, ), ); - - final chain = Runnable.getMapFromInput('question') - .pipe(promptTemplate) - .pipe(chat) - .pipe(JsonOutputParser()); - - final res = await chain.invoke( - 'What is the population of Spain, The Netherlands, and France?', + final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), ); - print(res); - // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} + final res = await chatModel.invoke(PromptValue.chat([prompt])); + print(res.output.content); + // -> 'An Apple' } Future _chatOllamaToolCalling() async { @@ -137,26 +133,216 @@ Future _chatOllamaToolCalling() async { // }] } -Future _chatOllamaMultimodal() async { +Future _chatOllamaJsonMode() async { + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are an assistant that respond question using JSON format.' + ), + (ChatMessageType.human, '{question}'), + ]); + final chat = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), + ); + + final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + + final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', + ); + print(res); + // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +} + +Future _extraction() async { + const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, + ); + + final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + + final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + + final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', + ); + final extractedData = res.first.arguments; + print(extractedData); + // { + // people: [ + // { + // name: Alex, + // height: 152, + // hair_color: blonde + // }, + // { + // name: Claudia, + // height: 183, + // hair_color: orange + // } + // ] + // } +} + +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} + +Future _flights() async { + const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, + ); + final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llava', + model: 'llama3.1', temperature: 0, + tools: [getFlightTimesTool], ), ); - final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), + + final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), + ]; + + // First API call: Send the query and function description to the model + final response = await chatModel.invoke(PromptValue.chat(messages)); + + messages.add(response.output); + + // Check if the model decided to use the provided function + if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; + } + + // Process function calls made by the model + for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, ), - ]), - ); - final res = await chatModel.invoke(PromptValue.chat([prompt])); - print(res.output.content); - // -> 'An Apple' + ); + } + + // Second API call: Get final response from the model + final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); + print(finalResponse.output.content); + // The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. } Future _rag() async { From 7dee2e7887d90f0a003afce39a4dbf52d68bc50d Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 26 Jul 2024 18:23:56 +0200 Subject: [PATCH 084/251] refactor: Remove ChatOllamaTools in favour of ChatOllama (#509) --- docs/_sidebar.md | 1 - .../models/chat_models/integrations/ollama.md | 2 +- .../chat_models/integrations/ollama_tools.md | 273 ---------------- .../integrations/ollama_tools.dart | 226 ------------- packages/langchain/README.md | 1 - packages/langchain_ollama/README.md | 1 - .../lib/src/chat_models/chat_models.dart | 2 - .../chat_models/chat_ollama/chat_ollama.dart | 2 - .../chat_ollama_tools/chat_ollama_tools.dart | 297 ------------------ .../chat_ollama_tools/mappers.dart | 1 - .../chat_models/chat_ollama_tools/types.dart | 119 ------- .../test/chat_models/chat_ollama_test.dart | 119 +++++-- .../chat_models/chat_ollama_tools_test.dart | 207 ------------ 13 files changed, 96 insertions(+), 1155 deletions(-) delete mode 100644 docs/modules/model_io/models/chat_models/integrations/ollama_tools.md delete mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart delete mode 100644 packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index bfb7aad0..c51de21b 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -63,7 +63,6 @@ - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) - [Google AI](/modules/model_io/models/chat_models/integrations/googleai.md) - [Ollama](/modules/model_io/models/chat_models/integrations/ollama.md) - - [OllamaTools](/modules/model_io/models/chat_models/integrations/ollama_tools.md) - [Mistral AI](/modules/model_io/models/chat_models/integrations/mistralai.md) - [OpenRouter](/modules/model_io/models/chat_models/integrations/open_router.md) - [Together AI](/modules/model_io/models/chat_models/integrations/together_ai.md) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 2521307a..d12b5b93 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -167,7 +167,7 @@ print(res.output.toolCalls); // }] ``` -As you can see, `ChatOllamaTools` support calling multiple tools in a single request. +As you can see, `ChatOllama` support calling multiple tools in a single request. If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md b/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md deleted file mode 100644 index 17334a5b..00000000 --- a/docs/modules/model_io/models/chat_models/integrations/ollama_tools.md +++ /dev/null @@ -1,273 +0,0 @@ -# ChatOllamaTools - -LangChain.dart offers an experimental wrapper around open source models run locally via [Ollama](https://ollama.ai) that enables [tool calling capabilities](/modules/model_io/models/chat_models/how_to/tools.md). - -> Warning: This is an experimental wrapper that attempts to add tool calling support to models that do not support it natively. Use with caution. - -More powerful and capable models will perform better with complex schema and/or multiple tools. For a complete list of supported models, see the [Ollama model library](https://ollama.ai/library). The examples below use Google's [Gemma2 9B model](https://ollama.com/library/gemma2) running locally. - -## Setup - -Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: - -1. Download and install [Ollama](https://ollama.ai) -2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull gemma2` - -## Usage - -You can use `ChatOllamaTools` in a similar way to a regular `ChatOllama` wrapper. The main difference is that `ChatOllamaToolsOptions` accepts: -- `options`: the usual `ChatOllamaOptions` options -- `tools`: the list with the definition of the tools the model can call -- `toolChoice`: how the model should respond to tool calls -- `toolsSystemPromptTemplate`: the prompt template used to inform the user about the available tools - -`ChatOllamaTools` follows the standard [LangChain tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). - -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, -); -final model = ChatOllamaTools( - defaultOptions: ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - ), -); -final res = await model.invoke( - PromptValue.string("What's the weather in Barcelona?"), -); -print(res); -// ChatResult{ -// id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, -// output: AIChatMessage{ -// content: , -// toolCalls: [ -// AIChatMessageToolCall{ -// id: 42139039-9251-4e1b-9f47-21f24da65be9, -// name: get_current_weather, -// arguments: {location: Barcelona, ES, unit: celsius}, -// } -// ], -// }, -// finishReason: FinishReason.stop, -// metadata: { -// model: gemma2, -// created_at: 2024-07-11T15:44:56.893216Z, -// done: true, -// total_duration: 2900101792, -// load_duration: 41286000, -// prompt_eval_count: 327, -// prompt_eval_duration: 453557000, -// eval_count: 57, -// eval_duration: 2401129000 -// }, -// usage: LanguageModelUsage{ -// promptTokens: 327, -// promptBillableCharacters: null, -// responseTokens: 57, -// responseBillableCharacters: null, -// totalTokens: 384 -// } -// } -``` - -If you want to extract only the tool calls, you can use the `ToolCallOutputParser`: - -```dart -final chain = model.pipe(ToolsOutputParser()); -final res2 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), -); -print(res2); -// [ -// ParsedToolCall{ -// id: b62a9051-0193-4115-9bac-362005c40c2d, -// name: get_current_weather, -// arguments: {location: Barcelona, ES, unit: celsius}, -// }, -// ParsedToolCall{ -// id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, -// name: get_current_weather, -// arguments: {location: Amsterdam, NL, unit: celsius}, -// } -// ] -``` - -As you can see, `ChatOllamaTools` support calling multiple tools in a single request. - -If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: - -```dart -final res3 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), - options: ChatOllamaToolsOptions( - toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), - ), -); -``` - -Note: streaming is not supported at the moment. - -## Customizing the system prompt template - -Behind the scenes, `ChatOllamaTools` uses Ollama's JSON mode to restrict output to JSON, and passes tool schemas to the prompt as JSON schemas. - -You can find the default system prompt in `ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate`. - -Because different models have different strengths, it may be helpful to pass in your own system prompt. Here's an example of how you can customize the system prompt template: - -```dart -const toolSystemPromptTemplate = ''' -You have access to these tools: -{tools} - -Based on the user input, select {tool_choice} from the available tools. - -Respond with a JSON containing a list of tool call objects. -The tool call objects should have two properties: -- "tool_name": The name of the selected tool (string) -- "tool_input": A JSON string with the input for the tool matching the tool's input schema - -Example response format: -```json -{{ - "tool_calls": [ - {{ - "tool_name": "tool_name", - "tool_input": "{{"param1":"value1","param2":"value2"}}" - }} - ] -}} - -Ensure your response is valid JSON and follows this exact format.'''; - -final model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - toolsSystemPromptTemplate: toolSystemPromptTemplate, - ), -); -``` - -You prompt template should contain the following placeholders: -- `{tools}`: where the list of available tools will be inserted -- `{tool_choice}`: where the instruction to select a certain tool will be inserted - -The model should return a JSON like: -```json -{ - "tool_calls": [ - { - "tool_name": "tool_name", - "tool_input": "{\"param1\":\"value1\",\"param2\":\"value2\"}" - } - ] -} -``` - -## Example: extracting structured data - -A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. - -```dart -const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, -); - -final model = ChatOllamaTools( - defaultOptions: ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), -); - -final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - -final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - -final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', -); -final extractedData = res.first.arguments; -print(extractedData); -// { -// people: [ -// { -// name: Alex, -// height: 152, -// hair_color: blonde -// }, -// { -// name: Claudia, -// height: 183, -// hair_color: orange -// } -// ] -// } -``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart deleted file mode 100644 index 486b8c1b..00000000 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama_tools.dart +++ /dev/null @@ -1,226 +0,0 @@ -// ignore_for_file: avoid_print, avoid_redundant_argument_values -import 'package:langchain/langchain.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; - -void main(final List arguments) async { - await _tools(); - await _customizingSystemPrompt(); - await _extraction(); -} - -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, -); - -Future _tools() async { - final model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - ), - ); - - final res = await model.invoke( - PromptValue.string("What's the weather in Barcelona?"), - ); - print(res); - // ChatResult{ - // id: 51ede9a1-7ab7-4c81-803e-cfe237eb92ae, - // output: AIChatMessage{ - // content: , - // toolCalls: [ - // AIChatMessageToolCall{ - // id: 42139039-9251-4e1b-9f47-21f24da65be9, - // name: get_current_weather, - // arguments: {location: Barcelona, ES, unit: celsius}, - // } - // ], - // }, - // finishReason: FinishReason.stop, - // metadata: { - // model: gemma2, - // created_at: 2024-07-11T15:44:56.893216Z, - // done: true, - // total_duration: 2900101792, - // load_duration: 41286000, - // prompt_eval_count: 327, - // prompt_eval_duration: 453557000, - // eval_count: 57, - // eval_duration: 2401129000 - // }, - // usage: LanguageModelUsage{ - // promptTokens: 327, - // promptBillableCharacters: null, - // responseTokens: 57, - // responseBillableCharacters: null, - // totalTokens: 384 - // } - // } - - final chain = model.pipe(ToolsOutputParser()); - final res2 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), - ); - print(res2); - // [ - // ParsedToolCall{ - // id: b62a9051-0193-4115-9bac-362005c40c2d, - // name: get_current_weather, - // arguments: {location: Barcelona, ES, unit: celsius}, - // }, - // ParsedToolCall{ - // id: 442ff44c-2a8e-4e16-9fc5-ddaf586a37ce, - // name: get_current_weather, - // arguments: {location: Amsterdam, NL, unit: celsius}, - // } - // ] - - final res3 = await chain.invoke( - PromptValue.string("What's the weather in Barcelona and Amsterdam?"), - options: ChatOllamaToolsOptions( - toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), - ), - ); - print(res3); -} - -Future _customizingSystemPrompt() async { - const toolSystemPromptTemplate = ''' -You have access to these tools: -{tools} - -Based on the user input, select {tool_choice} from the available tools. - -Respond with a JSON containing a list of tool call objects. -The tool call objects should have two properties: -- "tool_name": The name of the selected tool (string) -- "tool_input": A JSON string with the input for the tool matching the tool's input schema - -Example response format: -```json -{{ - "tool_calls": [ - {{ - "tool_name": "tool_name", - "tool_input": "{{"param1":"value1","param2":"value2"}}" - }} - ] -}} -``` - -Ensure your response is valid JSON and follows this exact format.'''; - - final model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: [tool], - toolsSystemPromptTemplate: toolSystemPromptTemplate, - ), - ); - - final res = await model.invoke( - PromptValue.string("What's the weather in Barcelona?"), - ); - print(res); -} - -Future _extraction() async { - const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, - ); - - final model = ChatOllamaTools( - defaultOptions: ChatOllamaToolsOptions( - options: const ChatOllamaOptions( - model: 'gemma2', - temperature: 0, - ), - tools: const [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), - ); - - final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - - final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - - final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', - ); - final extractedData = res.first.arguments; - print(extractedData); - // { - // people: [ - // { - // name: Alex, - // height: 152, - // hair_color: blonde - // }, - // { - // name: Claudia, - // height: 183, - // hair_color: orange - // } - // ] - // } -} diff --git a/packages/langchain/README.md b/packages/langchain/README.md index fc16ffa9..1e4ad928 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -110,7 +110,6 @@ The following integrations are available in LangChain.dart: | [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | | [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | | [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | -| [ChatOllamaTools](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama_tools) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) with tool-calling capabilities | | [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | | [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index 0c37e80f..a0c8c7e4 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -14,7 +14,6 @@ * `Ollama`: wrapper around Ollama Completions API. - Chat models: * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion. - * `ChatOllamaTools`: Wrapper around Ollama Chat API that enables tool calling capabilities. - Embeddings: * `OllamaEmbeddings`: wrapper around Ollama Embeddings API. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart index 4b826ef4..0232e939 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart @@ -1,4 +1,2 @@ export 'chat_ollama/chat_ollama.dart'; export 'chat_ollama/types.dart'; -export 'chat_ollama_tools/chat_ollama_tools.dart'; -export 'chat_ollama_tools/types.dart' hide conversationalResponseTool; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 8db88d0d..4b0e9c75 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -30,8 +30,6 @@ import 'types.dart'; /// /// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion) /// -/// If you need to use tools, consider using the [ChatOllamaTools] instead. -/// /// ### Setup /// /// 1. Download and install [Ollama](https://ollama.ai) diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart deleted file mode 100644 index 82da6a95..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/chat_ollama_tools.dart +++ /dev/null @@ -1,297 +0,0 @@ -import 'dart:convert'; - -import 'package:collection/collection.dart' show IterableExtension; -import 'package:http/http.dart' as http; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_tiktoken/langchain_tiktoken.dart'; -import 'package:ollama_dart/ollama_dart.dart'; -import 'package:uuid/uuid.dart'; - -import 'mappers.dart'; -import 'types.dart'; - -/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables tool -/// calling capabilities. -/// -/// Warning: This is an experimental wrapper that attempts to add tool calling -/// support to models that do not support it natively. More powerful and -/// capable models will perform better with complex schema and/or multiple -/// tools. Use with caution. -/// -/// Example: -/// ```dart -/// const tool = ToolSpec( -/// name: 'get_current_weather', -/// description: 'Get the current weather in a given location', -/// inputJsonSchema: { -/// 'type': 'object', -/// 'properties': { -/// 'location': { -/// 'type': 'string', -/// 'description': 'The city and state, e.g. San Francisco, CA', -/// }, -/// 'unit': { -/// 'type': 'string', -/// 'enum': ['celsius', 'fahrenheit'], -/// }, -/// }, -/// 'required': ['location'], -/// }, -/// ); -/// final chatModel = ChatOllamaTools( -/// defaultOptions: ChatOllamaToolOptions( -/// options: ChatOllamaOptions(model: 'llama3.1'), -/// tools: [tool], -/// ), -/// ); -/// final prompt = PromptValue.string('What's the weather in Bangalore, India?'); -/// final res = await ollamaTools.invoke(prompt); -/// ``` -/// -/// If you don't need to use tools, use [ChatOllama] instead. -/// -/// ### Setup -/// -/// 1. Download and install [Ollama](https://ollama.ai) -/// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3.1` -/// -/// ### Ollama base URL -/// -/// By default, [ChatOllama] uses 'http://localhost:11434/api' as base URL -/// (default Ollama API URL). But if you are running Ollama on a different -/// one, you can override it using the [baseUrl] parameter. -class ChatOllamaTools extends BaseChatModel { - /// Create a new [ChatOllamaTools] instance. - /// - /// Main configuration options: - /// - `baseUrl`: the base URL of Ollama API. - /// - [ChatOllamaTools.defaultOptions] - /// - /// Advance configuration options: - /// - `headers`: global headers to send with every request. You can use - /// this to set custom headers, or to override the default headers. - /// - `queryParams`: global query parameters to send with every request. You - /// can use this to set custom query parameters. - /// - `client`: the HTTP client to use. You can set your own HTTP client if - /// you need further customization (e.g. to use a Socks5 proxy). - /// - [ChatOllama.encoding] - ChatOllamaTools({ - final String baseUrl = 'http://localhost:11434/api', - final Map? headers, - final Map? queryParams, - final http.Client? client, - super.defaultOptions = const ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - ), - this.encoding = 'cl100k_base', - }) : _client = OllamaClient( - baseUrl: baseUrl, - headers: headers, - queryParams: queryParams, - client: client, - ); - - /// A client for interacting with Ollama API. - final OllamaClient _client; - - /// The encoding to use by tiktoken when [tokenize] is called. - /// - /// Ollama does not provide any API to count tokens, so we use tiktoken - /// to get an estimation of the number of tokens in a prompt. - String encoding; - - /// A UUID generator. - late final Uuid _uuid = const Uuid(); - - @override - String get modelType => 'chat-ollama-tools'; - - /// The default model to use unless another is specified. - static const defaultModel = 'llama3.1'; - - @override - Future invoke( - PromptValue input, { - ChatOllamaToolsOptions? options, - }) async { - final id = _uuid.v4(); - final completion = await _client.generateChatCompletion( - request: _generateCompletionRequest(input, options), - ); - final result = completion.toChatResult(id); - return _parseResult(result); - } - - /// Creates a [GenerateChatCompletionRequest] from the given input. - GenerateChatCompletionRequest _generateCompletionRequest( - final PromptValue input, - final ChatOllamaToolsOptions? toolsOptions, { - final bool stream = false, - }) { - final messages = _formatPrompt(input, toolsOptions).toChatMessages(); - final options = toolsOptions?.options; - final defaultOptions = - this.defaultOptions.options ?? const ChatOllamaOptions(); - return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? defaultModel, - messages: messages.toMessages(), - format: ResponseFormat.json, - keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, - stream: stream, - options: RequestOptions( - numKeep: options?.numKeep ?? defaultOptions.numKeep, - seed: options?.seed ?? defaultOptions.seed, - numPredict: options?.numPredict ?? defaultOptions.numPredict, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, - typicalP: options?.typicalP ?? defaultOptions.typicalP, - repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, - temperature: options?.temperature ?? defaultOptions.temperature, - repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - mirostat: options?.mirostat ?? defaultOptions.mirostat, - mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, - mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, - penalizeNewline: - options?.penalizeNewline ?? defaultOptions.penalizeNewline, - stop: options?.stop ?? defaultOptions.stop, - numa: options?.numa ?? defaultOptions.numa, - numCtx: options?.numCtx ?? defaultOptions.numCtx, - numBatch: options?.numBatch ?? defaultOptions.numBatch, - numGpu: options?.numGpu ?? defaultOptions.numGpu, - mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, - lowVram: options?.lowVram ?? defaultOptions.lowVram, - f16Kv: options?.f16KV ?? defaultOptions.f16KV, - logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, - vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, - useMmap: options?.useMmap ?? defaultOptions.useMmap, - useMlock: options?.useMlock ?? defaultOptions.useMlock, - numThread: options?.numThread ?? defaultOptions.numThread, - ), - ); - } - - PromptValue _formatPrompt( - final PromptValue input, - final ChatOllamaToolsOptions? options, - ) { - final toolsSystemPromptTemplate = options?.toolsSystemPromptTemplate ?? - defaultOptions.toolsSystemPromptTemplate ?? - ChatOllamaToolsOptions.defaultToolsSystemPromtTemplate; - final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, toolsSystemPromptTemplate), - (ChatMessageType.messagesPlaceholder, 'input'), - ]); - final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; - final availableTools = options?.tools ?? defaultOptions.tools; - final tools = switch (toolChoice) { - // If toolChoice is auto, we include all the tools - ChatToolChoiceAuto() || null => [ - ...?availableTools, - conversationalResponseTool, - ], - // If toolChoice is none, we include only the conversational response tool - ChatToolChoiceNone() => [conversationalResponseTool], - // If toolChoice is required, we include only the user specified tools - ChatToolChoiceRequired() => availableTools!, - // If toolChoice is forced, we include only the forced tool - final ChatToolChoiceForced f => [ - availableTools!.firstWhere((t) => t.name == f.name), - ] - }; - final toolsJsonMap = json.encode( - tools.map((tool) => tool.toJson()).toList(growable: false), - ); - final toolChoiceInstructions = switch (toolChoice) { - ChatToolChoiceNone() => '`${conversationalResponseTool.name}` tool', - ChatToolChoiceAuto() || - ChatToolChoiceRequired() || - null => - 'one or more tools', - final ChatToolChoiceForced f => '`${f.name}` tool', - }; - return promptTemplate.formatPrompt({ - 'tools': toolsJsonMap, - 'tool_choice': toolChoiceInstructions, - 'input': input.toChatMessages(), - }); - } - - Future _parseResult(final ChatResult result) async { - try { - final output = result.output.content; - final outputMap = json.decode(output) as Map; - final toolCalls = (outputMap['tool_calls'] as List).map((t) { - final tool = t as Map; - final toolInput = tool['tool_input']; - final toolInputMap = json.decode(toolInput) as Map; - return AIChatMessageToolCall( - id: _uuid.v4(), - name: tool['tool_name'].toString(), - arguments: toolInputMap, - argumentsRaw: toolInput, - ); - }).toList(growable: false); - - final conversationalResponseToolCall = toolCalls - .firstWhereOrNull((t) => t.name == conversationalResponseTool.name); - final content = conversationalResponseToolCall != null - ? await conversationalResponseTool.invoke( - conversationalResponseTool.getInputFromJson( - conversationalResponseToolCall.arguments, - ), - ) - : ''; - final otherToolCalls = toolCalls - .where((t) => t.name != conversationalResponseTool.name) - .toList(growable: false); - - return ChatResult( - id: result.id, - output: AIChatMessage( - content: content, - toolCalls: otherToolCalls, - ), - finishReason: result.finishReason, - metadata: result.metadata, - usage: result.usage, - ); - } catch (e) { - throw Exception( - 'Model did not respond in valid json string format, ' - 'try improving your prompt, instruct to "respond in JSON"', - ); - } - } - - /// Tokenizes the given prompt using tiktoken. - /// - /// Currently Ollama does not provide a tokenizer for the models it supports. - /// So we use tiktoken and [encoding] model to get an approximation - /// for counting tokens. Mind that the actual tokens will be totally - /// different from the ones used by the Ollama model. - /// - /// If an encoding model is specified in [encoding] field, that - /// encoding is used instead. - /// - /// - [promptValue] The prompt to tokenize. - @override - Future> tokenize( - PromptValue promptValue, { - ChatOllamaToolsOptions? options, - }) async { - final encoding = getEncoding(this.encoding); - return encoding.encode(promptValue.toString()); - } - - @override - void close() { - _client.endSession(); - } -} diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart deleted file mode 100644 index 3a9ebb5a..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/mappers.dart +++ /dev/null @@ -1 +0,0 @@ -export '../chat_ollama/mappers.dart'; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart deleted file mode 100644 index 7ad2615a..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama_tools/types.dart +++ /dev/null @@ -1,119 +0,0 @@ -import 'package:collection/collection.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:meta/meta.dart'; - -import '../chat_ollama/types.dart'; -import 'chat_ollama_tools.dart'; - -export '../chat_ollama/types.dart'; - -/// {@template chat_ollama_tools_options} -/// Options to pass into [ChatOllamaTools]. -/// {@endtemplate} -@immutable -class ChatOllamaToolsOptions extends ChatModelOptions { - /// {@macro chat_ollama_tools_options} - const ChatOllamaToolsOptions({ - this.options, - super.tools, - super.toolChoice, - this.toolsSystemPromptTemplate, - super.concurrencyLimit, - }); - - /// [ChatOllamaOptions] to pass into Ollama. - final ChatOllamaOptions? options; - - /// Prompt template for the system message where the model is instructed to - /// use the tools. - /// - /// The following placeholders can be used: - /// - `{tools}`: The list of tools available to the model. - /// - `{tool_choice}`: the tool choice the model must always select. - /// - /// If not provided, [defaultToolsSystemPromtTemplate] will be used. - final String? toolsSystemPromptTemplate; - - /// Default [toolsSystemPromptTemplate]. - static const String defaultToolsSystemPromtTemplate = ''' -You have access to these tools: -{tools} - -Based on the user input, select {tool_choice} from the available tools. - -Respond with a JSON containing a list of tool call objects. -The tool call objects should have two properties: -- "tool_name": The name of the selected tool (string) -- "tool_input": A JSON string with the input for the tool matching the tool's input schema - -Example response format: -```json -{{ - "tool_calls": [ - {{ - "tool_name": "tool_name", - "tool_input": "{{"param1":"value1","param2":"value2"}}" - }} - ] -}} -``` - -Ensure your response is valid JSON and follows this exact format. -'''; - - @override - ChatOllamaToolsOptions copyWith({ - final ChatOllamaOptions? options, - final List? tools, - final ChatToolChoice? toolChoice, - final String? toolsSystemPromptTemplate, - final int? concurrencyLimit, - }) { - return ChatOllamaToolsOptions( - options: options ?? this.options, - tools: tools ?? this.tools, - toolChoice: toolChoice ?? this.toolChoice, - toolsSystemPromptTemplate: - toolsSystemPromptTemplate ?? this.toolsSystemPromptTemplate, - concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, - ); - } - - @override - ChatOllamaToolsOptions merge(covariant final ChatOllamaToolsOptions? other) { - return copyWith( - options: other?.options, - tools: other?.tools, - toolChoice: other?.toolChoice, - toolsSystemPromptTemplate: other?.toolsSystemPromptTemplate, - concurrencyLimit: other?.concurrencyLimit, - ); - } - - @override - bool operator ==(covariant final ChatOllamaToolsOptions other) { - return options == other.options && - const ListEquality().equals(tools, other.tools) && - toolChoice == other.toolChoice && - toolsSystemPromptTemplate == other.toolsSystemPromptTemplate; - } - - @override - int get hashCode { - return options.hashCode ^ - const ListEquality().hash(tools) ^ - toolChoice.hashCode ^ - toolsSystemPromptTemplate.hashCode; - } -} - -/// Default tool called if model decides no other tools should be called -/// for a given query. -final conversationalResponseTool = StringTool.fromFunction( - name: '_conversational_response', - description: - 'Respond conversationally if no other tools should be called for a given query.', - inputDescription: 'Conversational response to the user', - func: (input) => input, -); diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 66167f0f..fcceacdb 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -210,10 +210,10 @@ void main() { test('Test Multi-turn conversations', () async { final prompt = PromptValue.chat([ - ChatMessage.humanText('List the numbers from 1 to 9 in order. '), + ChatMessage.humanText('List the numbers from 1 to 9 in order.'), ChatMessage.ai('123456789'), ChatMessage.humanText( - 'Remove the number "4" from the list', + 'Remove the number "4" from the list. Output only the remaining numbers in ascending order.', ), ]); final res = await chatModel.invoke( @@ -253,31 +253,49 @@ void main() { expect(res.output.content.toLowerCase(), contains('apple')); }); - test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), - () async { - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, + const tool1 = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], }, - 'required': ['location'], }, - ); + 'required': ['location'], + }, + ); + const tool2 = ToolSpec( + name: 'get_historic_weather', + description: 'Get the historic weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { final model = chatModel.bind( const ChatOllamaOptions( model: defaultModel, - tools: [tool], + tools: [tool1], ), ); @@ -290,13 +308,13 @@ void main() { expect(aiMessage1.toolCalls, hasLength(2)); final toolCall1 = aiMessage1.toolCalls.first; - expect(toolCall1.name, tool.name); + expect(toolCall1.name, tool1.name); expect(toolCall1.arguments.containsKey('location'), isTrue); expect(toolCall1.arguments['location'], contains('Boston')); expect(toolCall1.arguments['unit'], 'celsius'); final toolCall2 = aiMessage1.toolCalls.last; - expect(toolCall2.name, tool.name); + expect(toolCall2.name, tool1.name); expect(toolCall2.arguments.containsKey('location'), isTrue); expect(toolCall2.arguments['location'], contains('Madrid')); expect(toolCall2.arguments['unit'], 'celsius'); @@ -336,5 +354,58 @@ void main() { expect(aiMessage2.content, contains('22')); expect(aiMessage2.content, contains('25')); }); + + test('Test multi tool call', () async { + final res = await chatModel.invoke( + PromptValue.string( + "What's the weather in Vellore, India and in Barcelona, Spain?", + ), + options: const ChatOllamaOptions( + model: defaultModel, + tools: [tool1, tool2], + ), + ); + expect(res.output.toolCalls, hasLength(2)); + final toolCall1 = res.output.toolCalls.first; + expect(toolCall1.name, 'get_current_weather'); + expect(toolCall1.argumentsRaw, isNotEmpty); + expect(toolCall1.arguments, isNotEmpty); + expect(toolCall1.arguments['location'], 'Vellore, India'); + expect(toolCall1.arguments['unit'], 'celsius'); + final toolCall2 = res.output.toolCalls.last; + expect(toolCall2.name, 'get_current_weather'); + expect(toolCall2.argumentsRaw, isNotEmpty); + expect(toolCall2.arguments, isNotEmpty); + expect(toolCall2.arguments['location'], 'Barcelona, Spain'); + expect(toolCall2.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test ChatToolChoice.none', () async { + final res = await chatModel.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: const ChatOllamaOptions( + model: defaultModel, + tools: [tool1], + toolChoice: ChatToolChoice.none, + ), + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test('Test ChatToolChoice.forced', () async { + final res = await chatModel.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: ChatOllamaOptions( + model: defaultModel, + tools: const [tool1, tool2], + toolChoice: ChatToolChoice.forced(name: tool2.name), + ), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, tool2.name); + }); }); } diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart deleted file mode 100644 index 7204591a..00000000 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_tools_test.dart +++ /dev/null @@ -1,207 +0,0 @@ -import 'dart:io'; - -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; -import 'package:test/test.dart'; - -void main() { - group( - 'ChatOllamaTools tests', - skip: Platform.environment.containsKey('CI'), - () { - const defaultModel = 'gemma2'; - late ChatOllamaTools model; - const tool1 = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - const tool2 = ToolSpec( - name: 'get_historic_weather', - description: 'Get the historic weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - 'unit': { - 'type': 'string', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - setUp(() async { - model = ChatOllamaTools( - defaultOptions: const ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: defaultModel, - keepAlive: 2, - ), - tools: [tool1, tool2], - ), - ); - }); - - tearDown(() { - model.close(); - }); - - test('Test single tool call', () async { - final res = await model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - ); - expect(res.output.toolCalls, hasLength(1)); - final toolCall = res.output.toolCalls.first; - expect(toolCall.name, 'get_current_weather'); - expect(toolCall.argumentsRaw, isNotEmpty); - expect(toolCall.arguments, isNotEmpty); - expect(toolCall.arguments['location'], contains('Vellore')); - expect(toolCall.arguments['unit'], 'celsius'); - expect(res.finishReason, FinishReason.stop); - }); - - test('Test multi tool call', () async { - final res = await model.invoke( - PromptValue.string( - "What's the weather in Vellore, India and in Barcelona, Spain?", - ), - ); - expect(res.output.toolCalls, hasLength(2)); - final toolCall1 = res.output.toolCalls.first; - expect(toolCall1.name, 'get_current_weather'); - expect(toolCall1.argumentsRaw, isNotEmpty); - expect(toolCall1.arguments, isNotEmpty); - expect(toolCall1.arguments['location'], 'Vellore, India'); - expect(toolCall1.arguments['unit'], 'celsius'); - final toolCall2 = res.output.toolCalls.last; - expect(toolCall2.name, 'get_current_weather'); - expect(toolCall2.argumentsRaw, isNotEmpty); - expect(toolCall2.arguments, isNotEmpty); - expect(toolCall2.arguments['location'], 'Barcelona, Spain'); - expect(toolCall2.arguments['unit'], 'celsius'); - expect(res.finishReason, FinishReason.stop); - }); - - test('Test ChatToolChoice.none', () async { - final res = await model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: const ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - tools: [tool1], - toolChoice: ChatToolChoice.none, - ), - ); - expect(res.output.toolCalls, isEmpty); - expect(res.output.content, isNotEmpty); - }); - - test('Test ChatToolChoice.forced', () async { - final res = await model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: ChatOllamaToolsOptions( - options: const ChatOllamaOptions(model: defaultModel), - tools: const [tool1, tool2], - toolChoice: ChatToolChoice.forced(name: tool2.name), - ), - ); - expect(res.output.toolCalls, hasLength(1)); - final toolCall = res.output.toolCalls.first; - expect(toolCall.name, tool2.name); - }); - - test( - 'Should throw exception if model did not respond in right JSON string format', - () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - tools: [tool1], - toolsSystemPromptTemplate: - 'You have access to the following tools: {tools} ' - 'You must always select one of the above tools ' - 'and respond in plain text.', - ); - - expect( - () async => model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: toolOptions, - ), - throwsException, - ); - }, - ); - - test( - 'Should return content if no other tools should be called for a given query', - () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - tools: [], - ); - final res = await model.invoke( - PromptValue.string('Do you know the weather in Vellore, India?'), - options: toolOptions, - ); - expect(res.output.toolCalls, isEmpty); - expect(res.output.content, isNotEmpty); - }); - - test( - 'Should throw error if toolSystemPromptTemplate not in right format', - () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions(model: defaultModel), - toolsSystemPromptTemplate: - 'You have access to the following tools: tools} ' - 'You must always select one of the above tools', - ); - expect( - () async => model.invoke( - PromptValue.string("What's the weather in Vellore, India?"), - options: toolOptions, - ), - throwsException, - ); - }, - ); - - test('Test ChatOllamaToolsOptions', () async { - const toolOptions = ChatOllamaToolsOptions( - options: ChatOllamaOptions( - model: defaultModel, - ), - tools: [tool1], - toolsSystemPromptTemplate: 'toolSystemPromptTemplate', - ); - - expect(toolOptions.options?.model, defaultModel); - expect( - toolOptions.toolsSystemPromptTemplate, - 'toolSystemPromptTemplate', - ); - expect(toolOptions.tools![0], tool1); - }); - }, - ); -} From ea9a693df185afd7bee8ffe223efd690a4c6c853 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Fri, 26 Jul 2024 23:08:53 +0200 Subject: [PATCH 085/251] chore(release): publish packages - langchain@0.7.4 - langchain_anthropic@0.1.1 - langchain_chroma@0.2.1+1 - langchain_community@0.3.0 - langchain_core@0.3.4 - langchain_firebase@0.2.1 - langchain_google@0.6.1 - langchain_mistralai@0.2.2 - langchain_ollama@0.3.0 - langchain_openai@0.7.0 - langchain_pinecone@0.1.0+7 - langchain_supabase@0.1.1+1 - ollama_dart@0.2.0 - openai_dart@0.4.0 --- CHANGELOG.md | 120 ++++++++++++++++++ examples/browser_summarizer/pubspec.lock | 10 +- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.lock | 22 ++-- examples/docs_examples/pubspec.yaml | 16 +-- examples/hello_world_backend/pubspec.lock | 8 +- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.lock | 8 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.lock | 16 +-- examples/hello_world_flutter/pubspec.yaml | 10 +- examples/wikivoyage_eu/pubspec.lock | 10 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- packages/anthropic_sdk_dart/CHANGELOG.md | 2 + packages/chromadb/CHANGELOG.md | 2 + packages/googleai_dart/CHANGELOG.md | 2 + packages/langchain/CHANGELOG.md | 10 ++ packages/langchain/pubspec.yaml | 4 +- packages/langchain_anthropic/CHANGELOG.md | 8 ++ packages/langchain_anthropic/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 6 + packages/langchain_chroma/pubspec.yaml | 10 +- packages/langchain_community/CHANGELOG.md | 7 + packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 8 ++ packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 8 ++ .../langchain_firebase/example/pubspec.lock | 6 +- .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.lock | 2 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 8 ++ packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 8 ++ packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 11 ++ packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 12 ++ packages/langchain_openai/pubspec.yaml | 10 +- packages/langchain_pinecone/CHANGELOG.md | 6 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 6 + packages/langchain_supabase/pubspec.yaml | 10 +- packages/mistralai_dart/CHANGELOG.md | 2 + packages/ollama_dart/CHANGELOG.md | 13 ++ packages/ollama_dart/pubspec.yaml | 4 +- packages/openai_dart/CHANGELOG.md | 11 ++ packages/openai_dart/pubspec.yaml | 4 +- packages/tavily_dart/CHANGELOG.md | 2 + packages/vertex_ai/CHANGELOG.md | 2 + 50 files changed, 359 insertions(+), 105 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aead8730..47d794f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,126 @@ Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +## 2024-07-26 + +### Changes + +--- + +Packages with breaking changes: + + - [`langchain_community` - `v0.3.0`](#langchain_community---v030) + - [`langchain_ollama` - `v0.3.0`](#langchain_ollama---v030) + - [`langchain_openai` - `v0.7.0`](#langchain_openai---v070) + - [`ollama_dart` - `v0.2.0`](#ollama_dart---v020) + - [`openai_dart` - `v0.4.0`](#openai_dart---v040) + +Packages with other changes: + + - [`langchain` - `v0.7.4`](#langchain---v074) + - [`langchain_anthropic` - `v0.1.1`](#langchain_anthropic---v011) + - [`langchain_chroma` - `v0.2.1+1`](#langchain_chroma---v0211) + - [`langchain_core` - `v0.3.4`](#langchain_core---v034) + - [`langchain_firebase` - `v0.2.1`](#langchain_firebase---v021) + - [`langchain_google` - `v0.6.1`](#langchain_google---v061) + - [`langchain_mistralai` - `v0.2.2`](#langchain_mistralai---v022) + - [`langchain_pinecone` - `v0.1.0+7`](#langchain_pinecone---v0107) + - [`langchain_supabase` - `v0.1.1+1`](#langchain_supabase---v0111) + +--- + +#### `langchain` - `v0.7.4` + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) + +#### `langchain_core` - `v0.3.4` + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + +#### `langchain_community` - `v0.3.0` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_ollama` - `v0.3.0` + + - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +#### `langchain_openai` - `v0.7.0` + + - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) + - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_anthropic` - `v0.1.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_firebase` - `v0.2.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_google` - `v0.6.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_mistralai` - `v0.2.2` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_chroma` - `v0.2.1+1` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_pinecone` - `v0.1.0+7` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_supabase` - `v0.1.1+1` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `ollama_dart` - `v0.2.0` + + - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) + - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) + - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +#### `openai_dart` - `v0.4.0` + + - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) + - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) + - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) + - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) + ## 2024-07-02 ### Changes diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 621fd8c2..a18a608d 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.2" + version: "0.3.0" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 3868b314..922fd87b 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.3 - langchain_community: 0.2.2 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_community: 0.3.0 + langchain_openai: ^0.7.0 shared_preferences: ^2.2.2 flutter: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 806b30f6..f37088f5 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -245,63 +245,63 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.1.0" + version: "0.1.1" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1" + version: "0.2.1+1" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.2" + version: "0.3.0" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.0" + version: "0.6.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.1" + version: "0.2.2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2+1" + version: "0.3.0" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -355,14 +355,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.2" + version: "0.2.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 4888329b..50df0b98 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_anthropic: ^0.1.0 - langchain_chroma: ^0.2.1 - langchain_community: 0.2.2 - langchain_google: ^0.6.0 - langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2+1 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_anthropic: ^0.1.1 + langchain_chroma: ^0.2.1+1 + langchain_community: 0.3.0 + langchain_google: ^0.6.1 + langchain_mistralai: ^0.2.2 + langchain_ollama: ^0.3.0 + langchain_openai: ^0.7.0 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 1acce35a..86030403 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -119,21 +119,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -156,7 +156,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index c091ef7c..171d3039 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_openai: ^0.7.0 shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 45f69561..4c5e1daa 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 0e070b1d..531264b7 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_openai: ^0.7.0 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 9802cb30..74a75767 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -204,42 +204,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.0" + version: "0.6.1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.1" + version: "0.2.2" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2+1" + version: "0.3.0" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.3" + version: "0.7.0" langchain_tiktoken: dependency: transitive description: @@ -293,14 +293,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.2" + version: "0.2.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.3+1" + version: "0.4.0" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 786c1edd..a8419a7d 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.5 flutter_markdown: ^0.6.22 - langchain: ^0.7.3 - langchain_google: ^0.6.0 - langchain_mistralai: ^0.2.1 - langchain_ollama: ^0.2.2+1 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_google: ^0.6.1 + langchain_mistralai: ^0.2.2 + langchain_ollama: ^0.3.0 + langchain_openai: ^0.7.0 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index da33efe5..4caa6233 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.2" + version: "0.3.0" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.2+1" + version: "0.3.0" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.2" + version: "0.2.0" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 70fc19fb..109cd236 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.3 - langchain_ollama: ^0.2.2+1 - langchain_community: 0.2.2 + langchain: ^0.7.4 + langchain_ollama: ^0.3.0 + langchain_community: 0.3.0 diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index 85fb6080..600092c2 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0 - **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 7f7724ef..171047ca 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.2.0+1 - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index e1d53bc8..437f20b2 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0+2 - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 219c9dc5..680dadd0 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,3 +1,13 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.7.4 + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) + ## 0.7.3 > Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 9217303a..2a05e1c7 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.3 +version: 0.7.4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: characters: ^1.3.0 collection: ">=1.17.0 <1.20.0" crypto: ^3.0.3 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index fe3d0a4d..03cf82f7 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.1.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.1.0 - **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 180234ac..4a76ab62 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.0 +version: 0.1.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 rxdart: ^0.27.7 diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 218a218c..cb464d8e 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,9 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.1+1 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.1 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 395c1ca6..9ae0ce5d 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.3 - langchain_community: 0.2.2 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_community: 0.3.0 + langchain_openai: ^0.7.0 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 3e7be761..01068b20 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,10 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.3.0 + +- **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) +- **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.2 - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 55b71a63..0415ef78 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.2 +version: 0.3.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.1.0 json_path: ^0.7.1 - langchain_core: 0.3.3 + langchain_core: 0.3.4 math_expressions: ^2.4.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -31,7 +31,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.9 - langchain_openai: ^0.6.3 + langchain_openai: ^0.7.0 objectbox_generator: ^4.0.0 test: ^1.25.2 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 7757bae9..9e750ed8 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.3.4 + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + ## 0.3.3 - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index b1f7f159..56b13134 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.3 +version: 0.3.4 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index a0eb0aa4..d7aedb1f 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.0 > Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 03e1dab8..e3f16b8b 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.3" + version: "0.7.4" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.0" + version: "0.2.1" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index f1618ec8..ddb7010d 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.6.22 - langchain: 0.7.3 - langchain_firebase: 0.2.0 + langchain: 0.7.4 + langchain_firebase: 0.2.1 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 2927c037..b8f43f02 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.3" + version: "0.3.4" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 34805499..b6298571 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.2.0 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: firebase_core: ^3.1.0 cloud_firestore: ^4.17.0 firebase_vertexai: ^0.2.2 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index 9964d000..fb087939 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.6.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.6.0 > Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index da0082a6..3adcb2ed 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.6.0 +version: 0.6.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.5.1 http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 uuid: ^4.3.3 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index 7b74ab46..acae7e78 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.2 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.2.1 - Update a dependency to the latest release. diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 3c725d01..d237e3f9 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ environment: dependencies: collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 4d475188..e8f7ae0e 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,14 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.3.0 + + - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + ## 0.2.2+1 - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 33a60f44..a97ab982 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.2.2+1 +version: 0.3.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.2 + ollama_dart: ^0.2.0 uuid: ^4.3.3 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index c1503886..5b2c1ed7 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,15 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.7.0 + + - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) + - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.6.3 - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 4b628c54..f2e989a9 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.3 +version: 0.7.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ">=1.17.0 <1.20.0" http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.3+1 + openai_dart: ^0.4.0 uuid: ^4.3.3 dev_dependencies: - langchain: ^0.7.3 - langchain_community: 0.2.2 + langchain: ^0.7.4 + langchain_community: 0.3.0 test: ^1.25.2 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 9faacd04..8925fcd8 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,9 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.1.0+7 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.1.0+6 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 987d270b..ca636d73 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+6 +version: 0.1.0+7 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.3.3 dev_dependencies: test: ^1.25.2 - langchain_openai: ^0.6.3 + langchain_openai: ^0.7.0 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index bb20d19b..3cd0af92 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,9 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.1.1+1 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + ## 0.1.1 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index 8650f10c..d1120d45 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1 +version: 0.1.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.1.0 - langchain_core: 0.3.3 + langchain_core: 0.3.4 meta: ^1.11.0 supabase: ^2.0.8 dev_dependencies: test: ^1.25.2 - langchain: ^0.7.3 - langchain_community: 0.2.2 - langchain_openai: ^0.6.3 + langchain: ^0.7.4 + langchain_community: 0.3.0 + langchain_openai: ^0.7.0 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index df84e9cc..9a9234bb 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.0.3+3 - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 5dfee162..af9e377b 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,16 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.2.0 + +> Note: This release has breaking changes. + + - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) + - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) + - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + ## 0.1.2 - **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index c967f29e..69b2f8a3 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.1.2 +version: 0.2.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 70e28286..d1fafe5f 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,14 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 0.4.0 + + - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) + - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) + - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) + - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) + ## 0.3.3+1 - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 5b2fef22..97f7f230 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.3.3+1 +version: 0.4.0 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart @@ -30,5 +30,5 @@ dev_dependencies: openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 test: ^1.25.2 diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md index 897f0558..9abf1cdf 100644 --- a/packages/tavily_dart/CHANGELOG.md +++ b/packages/tavily_dart/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0 - **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 18902a6a..217a19e8 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,3 +1,5 @@ +📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + ## 0.1.0+1 - Update a dependency to the latest release. From 30cc964816e127b17a198b3a2e8748161ff8db79 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sat, 27 Jul 2024 00:43:24 +0200 Subject: [PATCH 086/251] docs: Fix typo in langchain_ollama readme --- packages/langchain_ollama/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index a0c8c7e4..885dbf9f 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -2,7 +2,7 @@ [![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) [![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) -[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollam.svg)](https://pub.dev/packages/langchain_ollama) +[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) From 4065aa72b37f4a24bb083d28050b01702ea24fbd Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 29 Jul 2024 18:33:24 +0200 Subject: [PATCH 087/251] refactor: Don't send OpenAI-Beta header in ChatOpenAI (#511) --- packages/langchain_openai/lib/src/chat_models/chat_openai.dart | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index dbd9c333..0dc31168 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -200,6 +200,7 @@ class ChatOpenAI extends BaseChatModel { }) : _client = OpenAIClient( apiKey: apiKey ?? '', organization: organization, + beta: null, baseUrl: baseUrl, headers: headers, queryParams: queryParams, From a32b48b2c858bc336a05bfe5e161758318cc4de3 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Mon, 29 Jul 2024 21:33:44 +0200 Subject: [PATCH 088/251] feat: Add support for min_p in Ollama (#512) --- .../src/chat_models/chat_ollama/mappers.dart | 1 + .../src/chat_models/chat_ollama/types.dart | 16 ++++++- .../langchain_ollama/lib/src/llms/ollama.dart | 14 +++--- .../langchain_ollama/lib/src/llms/types.dart | 25 +++++++++- .../src/generated/schema/request_options.dart | 10 +++- .../src/generated/schema/schema.freezed.dart | 47 +++++++++++++++++-- .../lib/src/generated/schema/schema.g.dart | 2 + packages/ollama_dart/oas/ollama-curated.yaml | 11 ++++- 8 files changed, 112 insertions(+), 14 deletions(-) diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart index 0c543a9c..ce12e70f 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -35,6 +35,7 @@ o.GenerateChatCompletionRequest generateChatCompletionRequest( numPredict: options?.numPredict ?? defaultOptions.numPredict, topK: options?.topK ?? defaultOptions.topK, topP: options?.topP ?? defaultOptions.topP, + minP: options?.minP ?? defaultOptions.minP, tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, typicalP: options?.typicalP ?? defaultOptions.typicalP, repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 1b3b4d77..6e9c0f20 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -23,6 +23,7 @@ class ChatOllamaOptions extends ChatModelOptions { this.numPredict, this.topK, this.topP, + this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -90,12 +91,20 @@ class ChatOllamaOptions extends ChatModelOptions { /// (Default: 40) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more + /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; + /// Alternative to the [topP], and aims to ensure a balance of quality and + /// variety. [minP] represents the minimum probability for a token to be + /// considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability + /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. + /// (Default: 0.0) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -213,6 +222,7 @@ class ChatOllamaOptions extends ChatModelOptions { final int? numPredict, final int? topK, final double? topP, + final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -248,6 +258,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, + minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -287,6 +298,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict: other?.numPredict, topK: other?.topK, topP: other?.topP, + minP: other?.minP, tfsZ: other?.tfsZ, typicalP: other?.typicalP, repeatLastN: other?.repeatLastN, @@ -325,6 +337,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict == other.numPredict && topK == other.topK && topP == other.topP && + minP == other.minP && tfsZ == other.tfsZ && typicalP == other.typicalP && repeatLastN == other.repeatLastN && @@ -362,6 +375,7 @@ class ChatOllamaOptions extends ChatModelOptions { numPredict.hashCode ^ topK.hashCode ^ topP.hashCode ^ + minP.hashCode ^ tfsZ.hashCode ^ typicalP.hashCode ^ repeatLastN.hashCode ^ diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index b3601f6e..db352184 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -215,12 +215,13 @@ class Ollama extends BaseLLM { return GenerateCompletionRequest( model: options?.model ?? defaultOptions.model ?? defaultModel, prompt: prompt, - system: options?.system, - template: options?.template, - context: options?.context, - format: options?.format?.toResponseFormat(), - raw: options?.raw, - keepAlive: options?.keepAlive, + system: options?.system ?? defaultOptions.system, + suffix: options?.suffix ?? defaultOptions.suffix, + template: options?.template ?? defaultOptions.template, + context: options?.context ?? defaultOptions.context, + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + raw: options?.raw ?? defaultOptions.raw, + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, stream: stream, options: RequestOptions( numKeep: options?.numKeep ?? defaultOptions.numKeep, @@ -228,6 +229,7 @@ class Ollama extends BaseLLM { numPredict: options?.numPredict ?? defaultOptions.numPredict, topK: options?.topK ?? defaultOptions.topK, topP: options?.topP ?? defaultOptions.topP, + minP: options?.minP ?? defaultOptions.minP, tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, typicalP: options?.typicalP ?? defaultOptions.typicalP, repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index ecad337d..a8807248 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -14,6 +14,7 @@ class OllamaOptions extends LLMOptions { const OllamaOptions({ super.model, this.system, + this.suffix, this.template, this.context, this.format, @@ -24,6 +25,7 @@ class OllamaOptions extends LLMOptions { this.numPredict, this.topK, this.topP, + this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -54,6 +56,9 @@ class OllamaOptions extends LLMOptions { /// The system prompt (Overrides what is defined in the Modelfile). final String? system; + /// The text that comes after the inserted text. + final String? suffix; + /// The full prompt or prompt template (overrides what is defined in the /// Modelfile). final String? template; @@ -109,12 +114,20 @@ class OllamaOptions extends LLMOptions { /// (Default: 40) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more + /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; + /// Alternative to the [topP], and aims to ensure a balance of quality and + /// variety. [minP] represents the minimum probability for a token to be + /// considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability + /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. + /// (Default: 0.0) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -226,6 +239,7 @@ class OllamaOptions extends LLMOptions { OllamaOptions copyWith({ final String? model, final String? system, + final String? suffix, final String? template, final List? context, final OllamaResponseFormat? format, @@ -236,6 +250,7 @@ class OllamaOptions extends LLMOptions { final int? numPredict, final int? topK, final double? topP, + final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -265,6 +280,7 @@ class OllamaOptions extends LLMOptions { return OllamaOptions( model: model ?? this.model, system: system ?? this.system, + suffix: suffix ?? this.suffix, template: template ?? this.template, context: context ?? this.context, format: format ?? this.format, @@ -275,6 +291,7 @@ class OllamaOptions extends LLMOptions { numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, + minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -308,6 +325,7 @@ class OllamaOptions extends LLMOptions { return copyWith( model: other?.model, system: other?.system, + suffix: other?.suffix, template: other?.template, context: other?.context, format: other?.format, @@ -318,6 +336,7 @@ class OllamaOptions extends LLMOptions { numPredict: other?.numPredict, topK: other?.topK, topP: other?.topP, + minP: other?.minP, tfsZ: other?.tfsZ, typicalP: other?.typicalP, repeatLastN: other?.repeatLastN, @@ -352,6 +371,7 @@ class OllamaOptions extends LLMOptions { runtimeType == other.runtimeType && model == other.model && system == other.system && + suffix == other.suffix && template == other.template && const ListEquality().equals(context, other.context) && format == other.format && @@ -362,6 +382,7 @@ class OllamaOptions extends LLMOptions { numPredict == other.numPredict && topK == other.topK && topP == other.topP && + minP == other.minP && tfsZ == other.tfsZ && typicalP == other.typicalP && repeatLastN == other.repeatLastN && @@ -393,6 +414,7 @@ class OllamaOptions extends LLMOptions { int get hashCode { return model.hashCode ^ system.hashCode ^ + suffix.hashCode ^ template.hashCode ^ const ListEquality().hash(context) ^ format.hashCode ^ @@ -403,6 +425,7 @@ class OllamaOptions extends LLMOptions { numPredict.hashCode ^ topK.hashCode ^ topP.hashCode ^ + minP.hashCode ^ tfsZ.hashCode ^ typicalP.hashCode ^ repeatLastN.hashCode ^ diff --git a/packages/ollama_dart/lib/src/generated/schema/request_options.dart b/packages/ollama_dart/lib/src/generated/schema/request_options.dart index b6288d57..940d09d4 100644 --- a/packages/ollama_dart/lib/src/generated/schema/request_options.dart +++ b/packages/ollama_dart/lib/src/generated/schema/request_options.dart @@ -30,10 +30,16 @@ class RequestOptions with _$RequestOptions { /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @@ -136,6 +142,7 @@ class RequestOptions with _$RequestOptions { 'num_predict', 'top_k', 'top_p', + 'min_p', 'tfs_z', 'typical_p', 'repeat_last_n', @@ -175,6 +182,7 @@ class RequestOptions with _$RequestOptions { 'num_predict': numPredict, 'top_k': topK, 'top_p': topP, + 'min_p': minP, 'tfs_z': tfsZ, 'typical_p': typicalP, 'repeat_last_n': repeatLastN, diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index b9128995..1c77269c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -612,11 +612,18 @@ mixin _$RequestOptions { @JsonKey(name: 'top_k', includeIfNull: false) int? get topK => throw _privateConstructorUsedError; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) + double? get minP => throw _privateConstructorUsedError; + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) @@ -745,6 +752,7 @@ abstract class $RequestOptionsCopyWith<$Res> { @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -793,6 +801,7 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, + Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -839,6 +848,10 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, + minP: freezed == minP + ? _value.minP + : minP // ignore: cast_nullable_to_non_nullable + as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -953,6 +966,7 @@ abstract class _$$RequestOptionsImplCopyWith<$Res> @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -999,6 +1013,7 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, + Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -1045,6 +1060,10 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, + minP: freezed == minP + ? _value.minP + : minP // ignore: cast_nullable_to_non_nullable + as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -1154,6 +1173,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) this.numPredict, @JsonKey(name: 'top_k', includeIfNull: false) this.topK, @JsonKey(name: 'top_p', includeIfNull: false) this.topP, + @JsonKey(name: 'min_p', includeIfNull: false) this.minP, @JsonKey(name: 'tfs_z', includeIfNull: false) this.tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) this.typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) this.repeatLastN, @@ -1210,12 +1230,20 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'top_k', includeIfNull: false) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @override + @JsonKey(name: 'min_p', includeIfNull: false) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @override @@ -1362,7 +1390,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @override String toString() { - return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; + return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, minP: $minP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; } @override @@ -1376,6 +1404,7 @@ class _$RequestOptionsImpl extends _RequestOptions { other.numPredict == numPredict) && (identical(other.topK, topK) || other.topK == topK) && (identical(other.topP, topP) || other.topP == topP) && + (identical(other.minP, minP) || other.minP == minP) && (identical(other.tfsZ, tfsZ) || other.tfsZ == tfsZ) && (identical(other.typicalP, typicalP) || other.typicalP == typicalP) && @@ -1426,6 +1455,7 @@ class _$RequestOptionsImpl extends _RequestOptions { numPredict, topK, topP, + minP, tfsZ, typicalP, repeatLastN, @@ -1474,6 +1504,7 @@ abstract class _RequestOptions extends RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) final double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) @@ -1536,12 +1567,20 @@ abstract class _RequestOptions extends RequestOptions { int? get topK; @override - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; @override + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) + double? get minP; + @override + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index fbf45bc0..473e7825 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -67,6 +67,7 @@ _$RequestOptionsImpl _$$RequestOptionsImplFromJson(Map json) => numPredict: json['num_predict'] as int?, topK: json['top_k'] as int?, topP: (json['top_p'] as num?)?.toDouble(), + minP: (json['min_p'] as num?)?.toDouble(), tfsZ: (json['tfs_z'] as num?)?.toDouble(), typicalP: (json['typical_p'] as num?)?.toDouble(), repeatLastN: json['repeat_last_n'] as int?, @@ -108,6 +109,7 @@ Map _$$RequestOptionsImplToJson( writeNotNull('num_predict', instance.numPredict); writeNotNull('top_k', instance.topK); writeNotNull('top_p', instance.topP); + writeNotNull('min_p', instance.minP); writeNotNull('tfs_z', instance.tfsZ); writeNotNull('typical_p', instance.typicalP); writeNotNull('repeat_last_n', instance.repeatLastN); diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index b7c04cae..0939dfb3 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -360,8 +360,17 @@ components: format: float nullable: true description: | - Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + min_p: + type: number + format: float + nullable: true + description: | + Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + probability for a token to be considered, relative to the probability of the most likely token. For + example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + than 0.05*0.9=0.045 are filtered out. (Default: 0.0) tfs_z: type: number format: float From fca99fc0f2947d30ee96077ad2e8257dbc900842 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 10 Aug 2024 18:31:48 +0200 Subject: [PATCH 089/251] build: Update dependencies (#520) --- examples/browser_summarizer/pubspec.lock | 84 +++++++++---------- examples/browser_summarizer/pubspec.yaml | 8 +- examples/docs_examples/pubspec.lock | 54 +++++------- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_backend/pubspec.lock | 50 +++++------ examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.lock | 42 ++++------ examples/hello_world_cli/pubspec.yaml | 2 +- examples/hello_world_flutter/pubspec.lock | 58 ++++++------- examples/hello_world_flutter/pubspec.yaml | 6 +- .../pubspec.lock | 18 ++-- .../pubspec.yaml | 6 +- examples/wikivoyage_eu/pubspec.lock | 26 +++--- examples/wikivoyage_eu/pubspec.yaml | 2 +- melos.yaml | 51 +++++------ packages/anthropic_sdk_dart/pubspec.lock | 56 ++++++------- packages/anthropic_sdk_dart/pubspec.yaml | 20 ++--- packages/chromadb/pubspec.yaml | 18 ++-- packages/googleai_dart/pubspec.yaml | 20 ++--- packages/langchain/pubspec.yaml | 8 +- packages/langchain_amazon/pubspec.yaml | 2 +- packages/langchain_anthropic/pubspec.yaml | 10 +-- packages/langchain_chroma/pubspec.yaml | 8 +- packages/langchain_cohere/pubspec.yaml | 2 +- packages/langchain_community/pubspec.yaml | 18 ++-- .../lib/src/runnables/function.dart | 4 +- packages/langchain_core/pubspec.yaml | 10 +-- .../langchain_firebase/example/pubspec.lock | 52 ++++++------ .../langchain_firebase/example/pubspec.yaml | 8 +- packages/langchain_firebase/pubspec.lock | 48 +++++------ packages/langchain_firebase/pubspec.yaml | 11 +-- packages/langchain_google/pubspec.yaml | 16 ++-- packages/langchain_huggingface/pubspec.yaml | 2 +- packages/langchain_microsoft/pubspec.yaml | 2 +- packages/langchain_mistralai/pubspec.yaml | 8 +- packages/langchain_ollama/pubspec.yaml | 10 +-- packages/langchain_openai/pubspec.yaml | 10 +-- packages/langchain_pinecone/pubspec.yaml | 8 +- packages/langchain_supabase/pubspec.yaml | 8 +- packages/langchain_weaviate/pubspec.yaml | 2 +- packages/langchain_wikipedia/pubspec.yaml | 2 +- packages/langchain_wolfram/pubspec.yaml | 2 +- packages/langgraph/pubspec.yaml | 2 +- packages/mistralai_dart/pubspec.yaml | 20 ++--- packages/ollama_dart/pubspec.yaml | 20 ++--- packages/openai_dart/pubspec.yaml | 20 ++--- packages/tavily_dart/pubspec.yaml | 20 ++--- packages/vertex_ai/pubspec.yaml | 10 +-- 48 files changed, 420 insertions(+), 450 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index a18a608d..21488e9b 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -93,18 +93,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "77f3be8c9acaa64ed37dd49c21c056da71b78053d31131ca26a273884a753f66" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "2.0.0-wasm" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: e11722d7d8cd21f944b52af780392274f7c34a41156b1c80053fc2a414e09a1b + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.1.0-wasm" + version: "1.1.2" ffi: dependency: transitive description: @@ -146,18 +146,18 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 + sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a url: "https://pub.dev" source: hosted - version: "8.1.5" + version: "8.1.6" flutter_markdown: dependency: "direct main" description: name: flutter_markdown - sha256: "87e11b9df25a42e2db315b8b7a51fae8e66f57a4b2f50ec4b822d0fa155e6b52" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.22" + version: "0.7.3+1" flutter_web_plugins: dependency: transitive description: flutter @@ -167,10 +167,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" html: dependency: transitive description: @@ -183,10 +183,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -215,18 +215,18 @@ packages: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" json_path: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: @@ -275,18 +275,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" math_expressions: dependency: transitive description: name: math_expressions - sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.4.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -299,10 +299,10 @@ packages: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" nested: dependency: transitive description: @@ -410,58 +410,58 @@ packages: dependency: "direct main" description: name: shared_preferences - sha256: "81429e4481e1ccfb51ede496e916348668fd0921627779233bd24cc3ff6abd02" + sha256: c272f9cabca5a81adc9b0894381e9c1def363e980f960fa903c604c471b22f68 url: "https://pub.dev" source: hosted - version: "2.2.2" + version: "2.3.1" shared_preferences_android: dependency: transitive description: name: shared_preferences_android - sha256: "8568a389334b6e83415b6aae55378e158fbc2314e074983362d20c562780fb06" + sha256: a7e8467e9181cef109f601e3f65765685786c1a738a83d7fbbde377589c0d974 url: "https://pub.dev" source: hosted - version: "2.2.1" + version: "2.3.1" shared_preferences_foundation: dependency: transitive description: name: shared_preferences_foundation - sha256: "7bf53a9f2d007329ee6f3df7268fd498f8373602f943c975598bbb34649b62a7" + sha256: "776786cff96324851b656777648f36ac772d88bc4c669acff97b7fce5de3c849" url: "https://pub.dev" source: hosted - version: "2.3.4" + version: "2.5.1" shared_preferences_linux: dependency: transitive description: name: shared_preferences_linux - sha256: "9f2cbcf46d4270ea8be39fa156d86379077c8a5228d9dfdb1164ae0bb93f1faa" + sha256: "580abfd40f415611503cae30adf626e6656dfb2f0cee8f465ece7b6defb40f2f" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" shared_preferences_platform_interface: dependency: transitive description: name: shared_preferences_platform_interface - sha256: "22e2ecac9419b4246d7c22bfbbda589e3acf5c0351137d87dd2939d984d37c3b" + sha256: "57cbf196c486bc2cf1f02b85784932c6094376284b3ad5779d1b1c6c6a816b80" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" shared_preferences_web: dependency: transitive description: name: shared_preferences_web - sha256: "9aee1089b36bd2aafe06582b7d7817fd317ef05fc30e6ba14bff247d0933042a" + sha256: d2ca4132d3946fec2184261726b355836a82c33d7d5b67af32692aff18a4684e url: "https://pub.dev" source: hosted - version: "2.3.0" + version: "2.4.2" shared_preferences_windows: dependency: transitive description: name: shared_preferences_windows - sha256: "841ad54f3c8381c480d0c9b508b89a34036f512482c407e6df7a9c4aa2ef8f59" + sha256: "94ef0f72b2d71bc3e700e025db3710911bd51a71cefb65cc609dd0d9a982e3c1" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" sky_engine: dependency: transitive description: flutter @@ -518,10 +518,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -555,5 +555,5 @@ packages: source: hosted version: "1.0.4" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 922fd87b..326a41a5 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -4,19 +4,19 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 js: ^0.7.1 langchain: ^0.7.4 langchain_community: 0.3.0 langchain_openai: ^0.7.0 - shared_preferences: ^2.2.2 + shared_preferences: ^2.3.0 flutter: uses-material-design: true diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index f37088f5..8aaa5dcd 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -67,10 +67,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -99,18 +99,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" ffi: dependency: transitive description: @@ -139,10 +139,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" gcloud: dependency: transitive description: @@ -155,10 +155,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" google_identity_services_web: dependency: transitive description: @@ -179,10 +179,10 @@ packages: dependency: transitive description: name: googleapis_auth - sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 url: "https://pub.dev" source: hosted - version: "1.5.1" + version: "1.6.0" html: dependency: transitive description: @@ -195,10 +195,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -215,30 +215,22 @@ packages: url: "https://pub.dev" source: hosted version: "0.1.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" json_path: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: @@ -314,10 +306,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.4.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -454,10 +446,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -482,4 +474,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 50df0b98..398e7e15 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -4,7 +4,7 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 86030403..97cf3b4b 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_methods: dependency: transitive description: @@ -93,26 +93,18 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" - url: "https://pub.dev" - source: hosted - version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "0.6.7" + version: "4.1.0" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: @@ -177,10 +169,10 @@ packages: dependency: "direct main" description: name: shelf - sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 url: "https://pub.dev" source: hosted - version: "1.4.1" + version: "1.4.2" shelf_router: dependency: "direct main" description: @@ -249,10 +241,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" web: dependency: transitive description: @@ -262,4 +254,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 171d3039..b7b5dd3a 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 langchain_openai: ^0.7.0 - shelf: ^1.4.1 + shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 4c5e1daa..06a4acdb 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -89,22 +89,14 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: @@ -209,10 +201,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" web: dependency: transitive description: @@ -222,4 +214,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 531264b7..d73e4928 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -4,7 +4,7 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 74a75767..94a94e96 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -77,18 +77,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -106,26 +106,26 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 + sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a url: "https://pub.dev" source: hosted - version: "8.1.5" + version: "8.1.6" flutter_markdown: dependency: "direct main" description: name: flutter_markdown - sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.23" + version: "0.7.3+1" freezed_annotation: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" gcloud: dependency: transitive description: @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" google_identity_services_web: dependency: transitive description: @@ -170,10 +170,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -182,22 +182,14 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: @@ -260,18 +252,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" meta: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" mistralai_dart: dependency: "direct overridden" description: @@ -382,10 +374,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -410,5 +402,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index a8419a7d..fb83f0cc 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -4,14 +4,14 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 langchain: ^0.7.4 langchain_google: ^0.6.1 langchain_mistralai: ^0.2.2 diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index 99209b09..653b4e81 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: google_identity_services_web - sha256: "0c56c2c5d60d6dfaf9725f5ad4699f04749fb196ee5a70487a46ef184837ccf6" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.0+2" + version: "0.3.1+4" googleapis: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: "direct main" description: name: googleapis_auth - sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 url: "https://pub.dev" source: hosted - version: "1.5.1" + version: "1.6.0" http: dependency: "direct main" description: name: http - sha256: d4872660c46d929f6b8a9ef4e7a7eff7e49bbf0c4ec3f385ee32df5119175139 + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.2.2" http_parser: dependency: transitive description: @@ -156,9 +156,9 @@ packages: dependency: transitive description: name: web - sha256: edc8a9573dd8c5a83a183dae1af2b6fd4131377404706ca4e5420474784906fa + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "1.0.0" sdks: - dart: ">=3.2.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 9de8254f..e42414a8 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: gcloud: ^0.8.12 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 vertex_ai: ^0.1.0+1 diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 4caa6233..da1458be 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -37,10 +37,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -109,10 +109,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" html: dependency: transitive description: @@ -125,10 +125,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -157,10 +157,10 @@ packages: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: @@ -201,10 +201,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: db0b72d867491c4e53a1c773e2708d5d6e94bbe06be07080fc9f896766b9cd3d + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.5.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -319,10 +319,10 @@ packages: dependency: transitive description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -340,4 +340,4 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 109cd236..ab9a51be 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -4,7 +4,7 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: langchain: ^0.7.4 diff --git a/melos.yaml b/melos.yaml index 164a7618..69c804de 100644 --- a/melos.yaml +++ b/melos.yaml @@ -20,54 +20,55 @@ command: bootstrap: usePubspecOverrides: true environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: async: ^2.11.0 beautiful_soup_dart: ^0.3.0 characters: ^1.3.0 - collection: '>=1.17.0 <1.20.0' - cross_file: ^0.3.4+1 + collection: ^1.18.0 + cross_file: ^0.3.4+2 crypto: ^3.0.3 csv: ^6.0.0 equatable: ^2.0.5 - fetch_client: ^1.0.2 + fetch_client: ^1.1.2 firebase_app_check: ^0.3.0 firebase_auth: ^5.1.0 - firebase_core: ^3.1.0 + firebase_core: ^3.3.0 firebase_vertexai: ^0.2.2 flat_buffers: ^23.5.26 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 - freezed_annotation: ^2.4.1 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 + freezed_annotation: ^2.4.2 gcloud: ^0.8.12 - google_generative_ai: 0.4.3 + google_generative_ai: 0.4.4 googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 js: ^0.7.1 - json_annotation: ^4.8.1 - json_path: ^0.7.1 + json_annotation: ^4.9.0 + json_path: ^0.7.4 langchain_tiktoken: ^1.0.1 - math_expressions: ^2.4.0 + math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 pinecone: ^0.7.2 - shared_preferences: ^2.2.2 - shelf: ^1.4.1 + rxdart: ">=0.27.7 <0.29.0" + shared_preferences: ^2.3.0 + shelf: ^1.4.2 shelf_router: ^1.1.4 - supabase: ^2.0.8 - uuid: ^4.3.3 + supabase: ^2.2.7 + uuid: ^4.4.2 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 - objectbox_generator: ^4.0.0 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 + objectbox_generator: ^4.0.1 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 scripts: lint: diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock index 1849898b..ddbf9394 100644 --- a/packages/anthropic_sdk_dart/pubspec.lock +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -5,23 +5,23 @@ packages: dependency: transitive description: name: _fe_analyzer_shared - sha256: "5aaf60d96c4cd00fe7f21594b5ad6a1b699c80a27420f8a837f4d68473ef09e3" + sha256: f256b0c0ba6c7577c15e2e4e114755640a875e885099367bf6e012b19314c834 url: "https://pub.dev" source: hosted - version: "68.0.0" + version: "72.0.0" _macros: dependency: transitive description: dart source: sdk - version: "0.1.0" + version: "0.3.2" analyzer: dependency: transitive description: name: analyzer - sha256: "21f1d3720fd1c70316399d5e2bccaebb415c434592d778cce8acb967b8578808" + sha256: b652861553cd3990d8ed361f7979dc6d7053a9ac8843fa73820ab68ce5410139 url: "https://pub.dev" source: hosted - version: "6.5.0" + version: "6.7.0" args: dependency: transitive description: @@ -82,10 +82,10 @@ packages: dependency: "direct dev" description: name: build_runner - sha256: "1414d6d733a85d8ad2f1dfcb3ea7945759e35a123cb99ccfac75d0758f75edfa" + sha256: dd09dd4e2b078992f42aac7f1a622f01882a8492fef08486b27ddde929c19f04 url: "https://pub.dev" source: hosted - version: "2.4.10" + version: "2.4.12" build_runner_core: dependency: transitive description: @@ -178,18 +178,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: c0a76bfd84d4bc5a0733ab8b9fcee268d5069228790a6dd71fc2a6d1049223cc + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.2.0" fetch_client: dependency: "direct main" description: name: fetch_client - sha256: "0b935eff9dfa84fb56bddadaf020c9aa61f02cbd6fa8dad914d6d343a838936d" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.1.1" + version: "1.1.2" file: dependency: transitive description: @@ -210,18 +210,18 @@ packages: dependency: "direct dev" description: name: freezed - sha256: "5606fb95d54f3bb241b3e12dcfb65ba7494c775c4cb458334eccceb07334a3d9" + sha256: "44c19278dd9d89292cf46e97dc0c1e52ce03275f40a97c5a348e802a924bf40e" url: "https://pub.dev" source: hosted - version: "2.5.3" + version: "2.5.7" freezed_annotation: dependency: "direct main" description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" frontend_server_client: dependency: transitive description: @@ -250,10 +250,10 @@ packages: dependency: "direct main" description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_multi_server: dependency: transitive description: @@ -322,10 +322,10 @@ packages: dependency: transitive description: name: macros - sha256: "12e8a9842b5a7390de7a781ec63d793527582398d16ea26c60fed58833c9ae79" + sha256: "0acaed5d6b7eab89f63350bccd82119e6c602df0f391260d0e32b5e23db79536" url: "https://pub.dev" source: hosted - version: "0.1.0-main.0" + version: "0.1.2-main.4" matcher: dependency: transitive description: @@ -362,8 +362,8 @@ packages: dependency: "direct dev" description: path: "." - ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" - resolved-ref: "280ae0d41806eda25e923203d67bd6f4992a81e9" + ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" + resolved-ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" url: "https://github.com/davidmigloz/openapi_spec.git" source: git version: "0.7.8" @@ -531,26 +531,26 @@ packages: dependency: "direct dev" description: name: test - sha256: d11b55850c68c1f6c0cf00eabded4e66c4043feaf6c0d7ce4a36785137df6331 + sha256: "713a8789d62f3233c46b4a90b174737b2c04cb6ae4500f2aa8b1be8f03f5e67f" url: "https://pub.dev" source: hosted - version: "1.25.5" + version: "1.25.8" test_api: dependency: transitive description: name: test_api - sha256: "2419f20b0c8677b2d67c8ac4d1ac7372d862dc6c460cdbb052b40155408cd794" + sha256: "664d3a9a64782fcdeb83ce9c6b39e78fd2971d4e37827b9b06c3aa1edc5e760c" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.3" test_core: dependency: transitive description: name: test_core - sha256: "4d070a6bc36c1c4e89f20d353bfd71dc30cdf2bd0e14349090af360a029ab292" + sha256: "12391302411737c176b0b5d6491f466b0dd56d4763e347b6714efbaa74d7953d" url: "https://pub.dev" source: hosted - version: "0.6.2" + version: "0.6.5" timing: dependency: transitive description: @@ -624,4 +624,4 @@ packages: source: hosted version: "3.1.2" sdks: - dart: ">=3.4.0 <4.0.0" + dart: ">=3.5.0-259.0.dev <4.0.0" diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml index 160596dc..84376d27 100644 --- a/packages/anthropic_sdk_dart/pubspec.yaml +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - anthropic environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index f11b20ea..eb6a6f29 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -12,20 +12,20 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index 22370975..ee294296 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -14,22 +14,22 @@ topics: - gemini environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 2a05e1c7..70437c8d 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -13,15 +13,15 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: characters: ^1.3.0 - collection: ">=1.17.0 <1.20.0" + collection: ^1.18.0 crypto: ^3.0.3 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_amazon/pubspec.yaml b/packages/langchain_amazon/pubspec.yaml index abbcb58c..d948eb8c 100644 --- a/packages/langchain_amazon/pubspec.yaml +++ b/packages/langchain_amazon/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 4a76ab62..c25c8e94 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -13,16 +13,16 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: anthropic_sdk_dart: ^0.1.0 - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - rxdart: ^0.27.7 + rxdart: ">=0.27.7 <0.29.0" dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 9ae0ce5d..391b329b 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -14,17 +14,17 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: chromadb: ^0.2.0+1 - http: ^1.1.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 langchain: ^0.7.4 langchain_community: 0.3.0 langchain_openai: ^0.7.0 diff --git a/packages/langchain_cohere/pubspec.yaml b/packages/langchain_cohere/pubspec.yaml index 8ace6cf2..ed26abe5 100644 --- a/packages/langchain_cohere/pubspec.yaml +++ b/packages/langchain_cohere/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 0415ef78..ebf10d32 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -13,27 +13,27 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: beautiful_soup_dart: ^0.3.0 - cross_file: ^0.3.4+1 + cross_file: ^0.3.4+2 csv: ^6.0.0 flat_buffers: ^23.5.26 - http: ^1.1.0 - json_path: ^0.7.1 + http: ^1.2.2 + json_path: ^0.7.4 langchain_core: 0.3.4 - math_expressions: ^2.4.0 + math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 tavily_dart: ^0.1.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - build_runner: ^2.4.9 + build_runner: ^2.4.11 langchain_openai: ^0.7.0 - objectbox_generator: ^4.0.0 - test: ^1.25.2 + objectbox_generator: ^4.0.1 + test: ^1.25.8 objectbox: output_dir: src/vector_stores/objectbox diff --git a/packages/langchain_core/lib/src/runnables/function.dart b/packages/langchain_core/lib/src/runnables/function.dart index 32843641..7af32bcc 100644 --- a/packages/langchain_core/lib/src/runnables/function.dart +++ b/packages/langchain_core/lib/src/runnables/function.dart @@ -89,7 +89,7 @@ class RunnableFunction final RunnableOptions? options, }) async { if (_invokeFunc != null) { - return _invokeFunc!(input, options); + return _invokeFunc(input, options); } else { return stream(input, options: options).first; } @@ -113,7 +113,7 @@ class RunnableFunction final RunnableOptions? options, }) async* { if (_streamFunc != null) { - yield* _streamFunc!(inputStream, options); + yield* _streamFunc(inputStream, options); } else { yield* inputStream.asyncMap((final input) async { return invoke(input, options: options); diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 56b13134..170363a7 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -13,15 +13,15 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: async: ^2.11.0 - collection: ">=1.17.0 <1.20.0" - cross_file: ^0.3.4+1 + collection: ^1.18.0 + cross_file: ^0.3.4+2 crypto: ^3.0.3 meta: ^1.11.0 - rxdart: ^0.27.7 + rxdart: ">=0.27.7 <0.29.0" dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index e3f16b8b..1896af9c 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -165,26 +165,26 @@ packages: dependency: "direct main" description: name: firebase_core - sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a + sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "3.3.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" + sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.2.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" + sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e url: "https://pub.dev" source: hosted - version: "2.17.2" + version: "2.17.4" firebase_vertexai: dependency: transitive description: @@ -218,10 +218,10 @@ packages: dependency: "direct main" description: name: flutter_markdown - sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.23" + version: "0.7.3+1" flutter_test: dependency: "direct dev" description: flutter @@ -281,18 +281,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" + sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" url: "https://pub.dev" source: hosted - version: "10.0.4" + version: "10.0.5" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" + sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "3.0.5" leak_tracker_testing: dependency: transitive description: @@ -329,18 +329,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" meta: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" path: dependency: transitive description: @@ -422,10 +422,10 @@ packages: dependency: transitive description: name: test_api - sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" + sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" url: "https://pub.dev" source: hosted - version: "0.7.0" + version: "0.7.2" typed_data: dependency: transitive description: @@ -438,10 +438,10 @@ packages: dependency: transitive description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -454,10 +454,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc url: "https://pub.dev" source: hosted - version: "14.2.1" + version: "14.2.4" web: dependency: transitive description: @@ -467,5 +467,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index ddb7010d..d63e1ccd 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -4,15 +4,15 @@ version: 1.0.0+1 publish_to: 'none' environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: cupertino_icons: ^1.0.6 - firebase_core: ^3.1.0 + firebase_core: ^3.3.0 flutter: sdk: flutter - flutter_markdown: ^0.6.22 + flutter_markdown: ^0.7.3 langchain: 0.7.4 langchain_firebase: 0.2.1 diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index b8f43f02..6c5ffbb4 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -77,10 +77,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -149,26 +149,26 @@ packages: dependency: "direct main" description: name: firebase_core - sha256: fae4ab4317c2a7afb13d44ef1e3f9f28a630e10016bc5cfe761e8e6a0ed7816a + sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "3.3.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: "1003a5a03a61fc9a22ef49f37cbcb9e46c86313a7b2e7029b9390cf8c6fc32cb" + sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.2.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "6643fe3dbd021e6ccfb751f7882b39df355708afbdeb4130fc50f9305a9d1a3d" + sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e url: "https://pub.dev" source: hosted - version: "2.17.2" + version: "2.17.4" firebase_vertexai: dependency: "direct main" description: @@ -235,18 +235,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" + sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" url: "https://pub.dev" source: hosted - version: "10.0.4" + version: "10.0.5" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" + sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "3.0.5" leak_tracker_testing: dependency: transitive description: @@ -267,18 +267,18 @@ packages: dependency: transitive description: name: material_color_utilities - sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.8.0" + version: "0.11.1" meta: dependency: "direct main" description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" path: dependency: transitive description: @@ -360,10 +360,10 @@ packages: dependency: transitive description: name: test_api - sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" + sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" url: "https://pub.dev" source: hosted - version: "0.7.0" + version: "0.7.2" typed_data: dependency: transitive description: @@ -376,10 +376,10 @@ packages: dependency: "direct main" description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -392,10 +392,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc url: "https://pub.dev" source: hosted - version: "14.2.1" + version: "14.2.4" web: dependency: transitive description: @@ -405,5 +405,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index b6298571..8413d6d1 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -14,19 +14,20 @@ topics: - firebase environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: - collection: ">=1.17.0 <1.20.0" + collection: ^1.18.0 firebase_app_check: ^0.3.0 firebase_auth: ^5.1.0 - firebase_core: ^3.1.0 + firebase_core: ^3.3.0 cloud_firestore: ^4.17.0 + firebase_vertexai: ^0.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: flutter_test: diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 3adcb2ed..0dda61e6 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -14,24 +14,24 @@ topics: - vertex-ai environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - fetch_client: ^1.0.2 + collection: ^1.18.0 + fetch_client: ^1.1.2 gcloud: ^0.8.12 - google_generative_ai: 0.4.3 + google_generative_ai: 0.4.4 googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 vertex_ai: ^0.1.0+1 langchain_firebase: ^0.1.0 firebase_core: ^2.31.0 dev_dependencies: - test: ^1.24.9 + test: ^1.25.8 fake_cloud_firestore: ^2.5.1 langchain: ^0.6.0+1 diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 7c1f00d4..2f29e62b 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_microsoft/pubspec.yaml b/packages/langchain_microsoft/pubspec.yaml index 3bd05e6a..11d0021c 100644 --- a/packages/langchain_microsoft/pubspec.yaml +++ b/packages/langchain_microsoft/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index d237e3f9..5d12387b 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -14,15 +14,15 @@ topics: - mistral environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index a97ab982..53d659d0 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -14,16 +14,16 @@ topics: - ollama environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 ollama_dart: ^0.2.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index f2e989a9..2e8e5ff6 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -14,18 +14,18 @@ topics: - gpt environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" - http: ^1.1.0 + collection: ^1.18.0 + http: ^1.2.2 langchain_core: 0.3.4 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 openai_dart: ^0.4.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: langchain: ^0.7.4 langchain_community: 0.3.0 - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index ca636d73..ffbd4c9a 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -14,15 +14,15 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - http: ^1.1.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 pinecone: ^0.7.2 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 langchain_openai: ^0.7.0 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index d1120d45..af7c072f 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -14,16 +14,16 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - http: ^1.1.0 + http: ^1.2.2 langchain_core: 0.3.4 meta: ^1.11.0 - supabase: ^2.0.8 + supabase: ^2.2.7 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 langchain: ^0.7.4 langchain_community: 0.3.0 langchain_openai: ^0.7.0 diff --git a/packages/langchain_weaviate/pubspec.yaml b/packages/langchain_weaviate/pubspec.yaml index fb6e6ce4..3d9b8cd3 100644 --- a/packages/langchain_weaviate/pubspec.yaml +++ b/packages/langchain_weaviate/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_wikipedia/pubspec.yaml b/packages/langchain_wikipedia/pubspec.yaml index d8f713b5..2dcc9e5c 100644 --- a/packages/langchain_wikipedia/pubspec.yaml +++ b/packages/langchain_wikipedia/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_wolfram/pubspec.yaml b/packages/langchain_wolfram/pubspec.yaml index 950db4e1..14b30014 100644 --- a/packages/langchain_wolfram/pubspec.yaml +++ b/packages/langchain_wolfram/pubspec.yaml @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langgraph/pubspec.yaml b/packages/langgraph/pubspec.yaml index 2b4ebaaf..e6ef9c18 100644 --- a/packages/langgraph/pubspec.yaml +++ b/packages/langgraph/pubspec.yaml @@ -13,4 +13,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index 970d1403..406b7170 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - mistral environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 69b2f8a3..3f8d7f75 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - ollama environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 97f7f230..4126650f 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - gpt environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml index 5694d98f..29519674 100644 --- a/packages/tavily_dart/pubspec.yaml +++ b/packages/tavily_dart/pubspec.yaml @@ -13,22 +13,22 @@ topics: - rag environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: 280ae0d41806eda25e923203d67bd6f4992a81e9 - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index ccfa07c8..c612870d 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -14,14 +14,14 @@ topics: - matching-engine environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: ">=1.17.0 <1.20.0" + collection: ^1.18.0 googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 meta: ^1.11.0 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 From 81aadfa39ba8f233240814dd5050f61ebb9b12ae Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 10 Aug 2024 18:33:14 +0200 Subject: [PATCH 090/251] docs: Suggest using few-shot prompting with Ollama tool calling (#521) --- docs/modules/model_io/models/chat_models/integrations/ollama.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index d12b5b93..9c368d1b 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -182,6 +182,8 @@ final chatModel = ChatOllama( ); ``` +**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). + ### JSON mode You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. From 53aaa433cbe297ae08eede1bbc5244f64e34f510 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 10 Aug 2024 18:38:43 +0200 Subject: [PATCH 091/251] feat: Add gpt-4o-2024-08-06 to model catalog in openai_dart (#522) --- .../lib/src/chat_models/types.dart | 6 + .../openai_dart/lib/src/generated/client.dart | 2 +- .../generated/schema/assistant_object.dart | 48 +- .../src/generated/schema/assistant_tools.dart | 2 +- .../lib/src/generated/schema/batch.dart | 4 +- .../schema/create_assistant_request.dart | 50 +- .../create_chat_completion_request.dart | 2 + .../create_fine_tuning_job_request.dart | 4 +- .../schema/create_message_request.dart | 4 +- .../generated/schema/create_run_request.dart | 47 +- .../schema/create_thread_and_run_request.dart | 47 +- .../schema/create_thread_request.dart | 4 +- .../schema/create_vector_store_request.dart | 4 +- .../src/generated/schema/message_object.dart | 4 +- .../schema/modify_assistant_request.dart | 48 +- .../schema/modify_message_request.dart | 4 +- .../generated/schema/modify_run_request.dart | 4 +- .../schema/modify_thread_request.dart | 4 +- .../lib/src/generated/schema/run_object.dart | 38 +- .../src/generated/schema/run_step_object.dart | 4 +- .../src/generated/schema/schema.freezed.dart | 7238 ++++++++++++----- .../lib/src/generated/schema/schema.g.dart | 267 +- .../src/generated/schema/thread_object.dart | 4 +- .../schema/update_vector_store_request.dart | 4 +- .../schema/vector_store_file_object.dart | 14 +- .../generated/schema/vector_store_object.dart | 4 +- packages/openai_dart/oas/main.dart | 5 +- packages/openai_dart/oas/openapi_curated.yaml | 113 +- .../openai_dart/oas/openapi_official.yaml | 796 +- 29 files changed, 6234 insertions(+), 2541 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 9712ff59..988d27c0 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -21,10 +21,16 @@ import 'package:meta/meta.dart'; /// - `gpt-4-vision-preview` /// - `gpt-4o` /// - `gpt-4o-2024-05-13` +/// - `gpt-4o-2024-08-06` /// - `gpt-4o-mini` /// - `gpt-4o-mini-2024-07-18` /// - `gpt-3.5-turbo` /// - `gpt-3.5-turbo-16k` +/// - `gpt-3.5-turbo-16k-0613` +/// - `gpt-3.5-turbo-0125` +/// - `gpt-3.5-turbo-0301` +/// - `gpt-3.5-turbo-0613` +/// - `gpt-3.5-turbo-1106` /// /// Mind that the list may be outdated. /// See https://platform.openai.com/docs/models for the latest list. diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index 66c918d1..828b26be 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -58,7 +58,7 @@ class OpenAIClientException implements Exception { // CLASS: OpenAIClient // ========================================== -/// Client for OpenAI API (v.2.1.0) +/// Client for OpenAI API (v.2.3.0) /// /// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. class OpenAIClient { diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index 59bac618..5784d1ed 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -36,29 +36,46 @@ class AssistantObject with _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. required String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. required List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? responseFormat, @@ -170,11 +187,22 @@ enum AssistantResponseFormatMode { // CLASS: AssistantObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class AssistantObjectResponseFormat with _$AssistantObjectResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 043a7d9a..5c0c2c47 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -80,7 +80,7 @@ class AssistantToolsFileSearchFileSearch /// Factory constructor for AssistantToolsFileSearchFileSearch const factory AssistantToolsFileSearchFileSearch({ - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search diff --git a/packages/openai_dart/lib/src/generated/schema/batch.dart b/packages/openai_dart/lib/src/generated/schema/batch.dart index 94cc6080..471ac112 100644 --- a/packages/openai_dart/lib/src/generated/schema/batch.dart +++ b/packages/openai_dart/lib/src/generated/schema/batch.dart @@ -74,7 +74,9 @@ class Batch with _$Batch { @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? requestCounts, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _Batch; diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 3b9086d3..0e849a85 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -27,29 +27,46 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? responseFormat, @@ -163,6 +180,8 @@ enum AssistantModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -254,11 +273,22 @@ enum CreateAssistantResponseFormatMode { // CLASS: CreateAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateAssistantRequestResponseFormat with _$CreateAssistantRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index f9213271..fd24189a 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -302,6 +302,8 @@ enum ChatCompletionModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 17b649aa..3da0a42e 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -39,7 +39,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? suffix, /// The ID of an uploaded file that contains validation data. @@ -127,6 +127,8 @@ enum FineTuningModels { davinci002, @JsonValue('gpt-3.5-turbo') gpt35Turbo, + @JsonValue('gpt-4o-mini') + gpt4oMini, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart index 7837049f..fc42a4d2 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart @@ -25,7 +25,9 @@ class CreateMessageRequest with _$CreateMessageRequest { /// A list of files attached to the message, and the tools they were added to. @JsonKey(includeIfNull: false) List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateMessageRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 0e395531..4c13ec8f 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -37,13 +37,18 @@ class CreateRunRequest with _$CreateRunRequest { /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. @JsonKey(includeIfNull: false) List? tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -74,11 +79,22 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -202,6 +218,8 @@ enum RunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -382,11 +400,22 @@ enum CreateRunRequestResponseFormatMode { // CLASS: CreateRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateRunRequestResponseFormat with _$CreateRunRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index ae054a5c..e69a2060 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -36,13 +36,18 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -73,11 +78,22 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -201,6 +217,8 @@ enum ThreadAndRunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -388,11 +406,22 @@ enum CreateThreadAndRunRequestResponseFormatMode { // CLASS: CreateThreadAndRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateThreadAndRunRequestResponseFormat with _$CreateThreadAndRunRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart index 22823647..2cfb4b35 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart @@ -22,7 +22,9 @@ class CreateThreadRequest with _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index 61e87095..b26b786e 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -30,7 +30,9 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? chunkingStrategy, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _CreateVectorStoreRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/message_object.dart b/packages/openai_dart/lib/src/generated/schema/message_object.dart index fae9d2ae..9e991a27 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_object.dart @@ -58,7 +58,9 @@ class MessageObject with _$MessageObject { /// A list of files attached to the message, and the tools they were added to. required List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, }) = _MessageObject; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index b02d123e..99c1f887 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -27,7 +27,8 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -37,22 +38,38 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? responseFormat, @@ -150,11 +167,22 @@ enum ModifyAssistantResponseFormatMode { // CLASS: ModifyAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class ModifyAssistantRequestResponseFormat with _$ModifyAssistantRequestResponseFormat { diff --git a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart index b6e7d119..b7ec05e1 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart @@ -15,7 +15,9 @@ class ModifyMessageRequest with _$ModifyMessageRequest { /// Factory constructor for ModifyMessageRequest const factory ModifyMessageRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyMessageRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart index 3d113815..973a0b3d 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart @@ -15,7 +15,9 @@ class ModifyRunRequest with _$ModifyRunRequest { /// Factory constructor for ModifyRunRequest const factory ModifyRunRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyRunRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart index a335f1b6..96f4983f 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart @@ -19,7 +19,9 @@ class ModifyThreadRequest with _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index d3e7dcf5..73ffe897 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -68,7 +68,9 @@ class RunObject with _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. required List tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -103,11 +105,22 @@ class RunObject with _$RunObject { /// during tool use. @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required RunObjectResponseFormat responseFormat, @@ -448,11 +461,22 @@ enum RunObjectResponseFormatMode { // CLASS: RunObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-4o-mini-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { const RunObjectResponseFormat._(); diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart index 2e56839e..ede505da 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart @@ -58,7 +58,9 @@ class RunStepObject with _$RunStepObject { /// The Unix timestamp (in seconds) for when the run step completed. @JsonKey(name: 'completed_at') required int? completedAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 06e93133..36ff6d91 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -123,8 +123,12 @@ mixin _$CreateCompletionRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -178,6 +182,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -276,6 +282,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionModelCopyWith<$Res> get model { @@ -284,6 +292,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionPromptCopyWith<$Res>? get prompt { @@ -296,6 +306,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionStopCopyWith<$Res>? get stop { @@ -308,6 +320,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -377,6 +391,8 @@ class __$$CreateCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -676,7 +692,7 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -699,7 +715,9 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { topP, user); - @JsonKey(ignore: true) + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> @@ -746,127 +764,129 @@ abstract class _CreateCompletionRequest extends CreateCompletionRequest { factory _CreateCompletionRequest.fromJson(Map json) = _$CreateCompletionRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_CompletionModelConverter() CompletionModel get model; - @override /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. /// /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @override @_CompletionPromptConverter() CompletionPrompt? get prompt; - @override /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. /// /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override @JsonKey(name: 'best_of', includeIfNull: false) int? get bestOf; - @override /// Echo back the prompt in addition to the completion + @override @JsonKey(includeIfNull: false) bool? get echo; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; - @override /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. /// /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - @override /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. /// /// The maximum value for `logprobs` is 5. + @override @JsonKey(includeIfNull: false) int? get logprobs; - @override /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion. /// /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - @override /// How many completions to generate for each prompt. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - @override /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. /// /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + @override @JsonKey(includeIfNull: false) int? get seed; - @override /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @override @_CompletionStopConverter() @JsonKey(includeIfNull: false) CompletionStop? get stop; - @override /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override @JsonKey(includeIfNull: false) bool? get stream; - @override /// Options for streaming response. Only set this when you set `stream: true`. + @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; - @override /// The suffix that comes after a completion of inserted text. /// /// This parameter is only supported for `gpt-3.5-turbo-instruct`. + @override @JsonKey(includeIfNull: false) String? get suffix; - @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -925,6 +945,8 @@ mixin _$CompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -944,6 +966,9 @@ class _$CompletionModelCopyWithImpl<$Res, $Val extends CompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -966,6 +991,8 @@ class __$$CompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CompletionModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1010,11 +1037,13 @@ class _$CompletionModelEnumerationImpl extends CompletionModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> @@ -1101,7 +1130,10 @@ abstract class CompletionModelEnumeration extends CompletionModel { @override CompletionModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1124,6 +1156,8 @@ class __$$CompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$CompletionModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1167,11 +1201,13 @@ class _$CompletionModelStringImpl extends CompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> @@ -1258,7 +1294,10 @@ abstract class CompletionModelString extends CompletionModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1333,6 +1372,8 @@ mixin _$CompletionPrompt { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionPrompt to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -1352,6 +1393,9 @@ class _$CompletionPromptCopyWithImpl<$Res, $Val extends CompletionPrompt> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -1374,6 +1418,8 @@ class __$$CompletionPromptListListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListListIntImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1425,12 +1471,14 @@ class _$CompletionPromptListListIntImpl extends CompletionPromptListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> @@ -1529,7 +1577,10 @@ abstract class CompletionPromptListListInt extends CompletionPrompt { @override List> get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1553,6 +1604,8 @@ class __$$CompletionPromptListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListIntImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1603,12 +1656,14 @@ class _$CompletionPromptListIntImpl extends CompletionPromptListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> @@ -1707,7 +1762,10 @@ abstract class CompletionPromptListInt extends CompletionPrompt { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1732,6 +1790,8 @@ class __$$CompletionPromptListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1783,12 +1843,14 @@ class _$CompletionPromptListStringImpl extends CompletionPromptListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> @@ -1887,7 +1949,10 @@ abstract class CompletionPromptListString extends CompletionPrompt { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1911,6 +1976,8 @@ class __$$CompletionPromptStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1954,11 +2021,13 @@ class _$CompletionPromptStringImpl extends CompletionPromptString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> @@ -2057,7 +2126,10 @@ abstract class CompletionPromptString extends CompletionPrompt { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2116,6 +2188,8 @@ mixin _$CompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -2135,6 +2209,9 @@ class _$CompletionStopCopyWithImpl<$Res, $Val extends CompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -2156,6 +2233,8 @@ class __$$CompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopListStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2206,12 +2285,14 @@ class _$CompletionStopListStringImpl extends CompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> @@ -2298,7 +2379,10 @@ abstract class CompletionStopListString extends CompletionStop { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2320,6 +2404,8 @@ class __$$CompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2363,11 +2449,13 @@ class _$CompletionStopStringImpl extends CompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> @@ -2455,7 +2543,10 @@ abstract class CompletionStopString extends CompletionStop { @override String? get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2493,8 +2584,12 @@ mixin _$CreateCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2529,6 +2624,8 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2572,6 +2669,8 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -2618,6 +2717,8 @@ class __$$CreateCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2739,7 +2840,7 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -2751,7 +2852,9 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> @@ -2782,40 +2885,42 @@ abstract class _CreateCompletionResponse extends CreateCompletionResponse { factory _CreateCompletionResponse.fromJson(Map json) = _$CreateCompletionResponseImpl.fromJson; - @override - /// A unique identifier for the completion. - String get id; @override + String get id; /// The list of completion choices the model generated for the input prompt. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the completion was created. - int get created; @override + int get created; /// The model used for completion. - String get model; @override + String get model; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always "text_completion" - CreateCompletionResponseObject get object; @override + CreateCompletionResponseObject get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2844,8 +2949,12 @@ mixin _$CompletionChoice { /// The text of the completion. String get text => throw _privateConstructorUsedError; + /// Serializes this CompletionChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2878,6 +2987,8 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2906,6 +3017,8 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> ) as $Val); } + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionLogprobsCopyWith<$Res>? get logprobs { @@ -2948,6 +3061,8 @@ class __$$CompletionChoiceImplCopyWithImpl<$Res> $Res Function(_$CompletionChoiceImpl) _then) : super(_value, _then); + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3032,12 +3147,14 @@ class _$CompletionChoiceImpl extends _CompletionChoice { (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, finishReason, index, logprobs, text); - @JsonKey(ignore: true) + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => @@ -3066,29 +3183,31 @@ abstract class _CompletionChoice extends CompletionChoice { factory _CompletionChoice.fromJson(Map json) = _$CompletionChoiceImpl.fromJson; - @override - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// or `content_filter` if content was omitted due to a flag from our content filters. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) CompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of generated choices. - int get index; @override + int get index; /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - CompletionLogprobs? get logprobs; @override + CompletionLogprobs? get logprobs; /// The text of the completion. + @override String get text; + + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3116,8 +3235,12 @@ mixin _$CompletionLogprobs { List?>? get topLogprobs => throw _privateConstructorUsedError; + /// Serializes this CompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3148,6 +3271,8 @@ class _$CompletionLogprobsCopyWithImpl<$Res, $Val extends CompletionLogprobs> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3203,6 +3328,8 @@ class __$$CompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$CompletionLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3327,7 +3454,7 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -3336,7 +3463,9 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { const DeepCollectionEquality().hash(_tokens), const DeepCollectionEquality().hash(_topLogprobs)); - @JsonKey(ignore: true) + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => @@ -3366,28 +3495,30 @@ abstract class _CompletionLogprobs extends CompletionLogprobs { factory _CompletionLogprobs.fromJson(Map json) = _$CompletionLogprobsImpl.fromJson; - @override - /// The offset of the token from the beginning of the prompt. + @override @JsonKey(name: 'text_offset', includeIfNull: false) List? get textOffset; - @override /// The log probabilities of tokens in the completion. + @override @JsonKey(name: 'token_logprobs', includeIfNull: false) List? get tokenLogprobs; - @override /// The tokens generated by the model converted back to text. + @override @JsonKey(includeIfNull: false) List? get tokens; - @override /// The log probabilities of the `logprobs` most likely tokens. + @override @JsonKey(name: 'top_logprobs', includeIfNull: false) List?>? get topLogprobs; + + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3543,8 +3674,12 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) List? get functions => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3617,6 +3752,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3740,6 +3877,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionModelCopyWith<$Res> get model { @@ -3748,6 +3887,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat { @@ -3761,6 +3902,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStopCopyWith<$Res>? get stop { @@ -3773,6 +3916,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -3786,6 +3931,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice { @@ -3799,6 +3946,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallCopyWith<$Res>? get functionCall { @@ -3887,6 +4036,8 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4309,7 +4460,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { .equals(other._functions, _functions)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -4338,7 +4489,9 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().hash(_functions) ]); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> @@ -4404,75 +4557,73 @@ abstract class _CreateChatCompletionRequest factory _CreateChatCompletionRequest.fromJson(Map json) = _$CreateChatCompletionRequestImpl.fromJson; - @override - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + @override @_ChatCompletionModelConverter() ChatCompletionModel get model; - @override /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - List get messages; @override + List get messages; /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; - @override /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - @override /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + @override @JsonKey(includeIfNull: false) bool? get logprobs; - @override /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + @override @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; - @override /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. /// /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - @override /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - @override /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @JsonKey(name: 'response_format', includeIfNull: false) ChatCompletionResponseFormat? get responseFormat; - @override /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + @override @JsonKey(includeIfNull: false) int? get seed; - @override /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: @@ -4482,47 +4633,47 @@ abstract class _CreateChatCompletionRequest /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. + @override @JsonKey( name: 'service_tier', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) CreateChatCompletionRequestServiceTier? get serviceTier; - @override /// Up to 4 sequences where the API will stop generating further tokens. + @override @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? get stop; - @override /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override @JsonKey(includeIfNull: false) bool? get stream; - @override /// Options for streaming response. Only set this when you set `stream: true`. + @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; - @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + @override @JsonKey(includeIfNull: false) List? get tools; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. @@ -4531,21 +4682,21 @@ abstract class _CreateChatCompletionRequest /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. + @override @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; - @override /// Deprecated in favor of `tool_choice`. /// @@ -4555,18 +4706,22 @@ abstract class _CreateChatCompletionRequest /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. + @override @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionFunctionCall? get functionCall; - @override /// Deprecated in favor of `tools`. /// /// A list of functions the model may generate JSON inputs for. + @override @JsonKey(includeIfNull: false) List? get functions; + + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4625,6 +4780,8 @@ mixin _$ChatCompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -4644,6 +4801,9 @@ class _$ChatCompletionModelCopyWithImpl<$Res, $Val extends ChatCompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -4666,6 +4826,8 @@ class __$$ChatCompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4711,11 +4873,13 @@ class _$ChatCompletionModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelEnumerationImplCopyWith< @@ -4803,7 +4967,10 @@ abstract class ChatCompletionModelEnumeration extends ChatCompletionModel { @override ChatCompletionModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionModelEnumerationImplCopyWith< _$ChatCompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -4829,6 +4996,8 @@ class __$$ChatCompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4872,11 +5041,13 @@ class _$ChatCompletionModelStringImpl extends ChatCompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> @@ -4963,7 +5134,10 @@ abstract class ChatCompletionModelString extends ChatCompletionModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4979,8 +5153,12 @@ mixin _$ChatCompletionResponseFormat { ChatCompletionResponseFormatType get type => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionResponseFormatCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -5007,6 +5185,8 @@ class _$ChatCompletionResponseFormatCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5043,6 +5223,8 @@ class __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5086,11 +5268,13 @@ class _$ChatCompletionResponseFormatImpl extends _ChatCompletionResponseFormat { (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionResponseFormatImplCopyWith< @@ -5116,12 +5300,14 @@ abstract class _ChatCompletionResponseFormat factory _ChatCompletionResponseFormat.fromJson(Map json) = _$ChatCompletionResponseFormatImpl.fromJson; - @override - /// Must be one of `text` or `json_object`. + @override ChatCompletionResponseFormatType get type; + + /// Create a copy of ChatCompletionResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionResponseFormatImplCopyWith< _$ChatCompletionResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -5181,6 +5367,8 @@ mixin _$ChatCompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5200,6 +5388,9 @@ class _$ChatCompletionStopCopyWithImpl<$Res, $Val extends ChatCompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5222,6 +5413,8 @@ class __$$ChatCompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopListStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5273,12 +5466,14 @@ class _$ChatCompletionStopListStringImpl extends ChatCompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopListStringImplCopyWith< @@ -5366,7 +5561,10 @@ abstract class ChatCompletionStopListString extends ChatCompletionStop { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStopListStringImplCopyWith< _$ChatCompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -5392,6 +5590,8 @@ class __$$ChatCompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5435,11 +5635,13 @@ class _$ChatCompletionStopStringImpl extends ChatCompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> @@ -5526,7 +5728,10 @@ abstract class ChatCompletionStopString extends ChatCompletionStop { @override String? get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5597,6 +5802,8 @@ mixin _$ChatCompletionToolChoiceOption { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionToolChoiceOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5619,6 +5826,9 @@ class _$ChatCompletionToolChoiceOptionCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5641,6 +5851,8 @@ class __$$ChatCompletionToolChoiceOptionEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolChoiceOptionEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5687,11 +5899,13 @@ class _$ChatCompletionToolChoiceOptionEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< @@ -5791,7 +6005,10 @@ abstract class ChatCompletionToolChoiceOptionEnumeration @override ChatCompletionToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< _$ChatCompletionToolChoiceOptionEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -5828,6 +6045,8 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit _then) : super(_value, _then); + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5842,6 +6061,8 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit )); } + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionNamedToolChoiceCopyWith<$Res> get value { @@ -5886,11 +6107,13 @@ class _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< @@ -5992,7 +6215,10 @@ abstract class ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice @override ChatCompletionNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -6063,6 +6289,8 @@ mixin _$ChatCompletionFunctionCall { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -6084,6 +6312,9 @@ class _$ChatCompletionFunctionCallCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -6106,6 +6337,8 @@ class __$$ChatCompletionFunctionCallEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6152,11 +6385,13 @@ class _$ChatCompletionFunctionCallEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallEnumerationImplCopyWith< @@ -6253,7 +6488,10 @@ abstract class ChatCompletionFunctionCallEnumeration @override ChatCompletionFunctionCallMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallEnumerationImplCopyWith< _$ChatCompletionFunctionCallEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -6290,6 +6528,8 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6304,6 +6544,8 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith )); } + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get value { @@ -6349,11 +6591,13 @@ class _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< @@ -6453,7 +6697,10 @@ abstract class ChatCompletionFunctionCallChatCompletionFunctionCallOption @override ChatCompletionFunctionCallOption get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6472,8 +6719,12 @@ mixin _$ChatCompletionMessageFunctionCall { /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. String get arguments => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageFunctionCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6500,6 +6751,8 @@ class _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6541,6 +6794,8 @@ class __$$ChatCompletionMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageFunctionCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6595,11 +6850,13 @@ class _$ChatCompletionMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageFunctionCallImplCopyWith< @@ -6626,16 +6883,18 @@ abstract class _ChatCompletionMessageFunctionCall Map json) = _$ChatCompletionMessageFunctionCallImpl.fromJson; - @override - /// The name of the function to call. - String get name; @override + String get name; /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override String get arguments; + + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageFunctionCallImplCopyWith< _$ChatCompletionMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -6651,8 +6910,12 @@ mixin _$ChatCompletionFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6679,6 +6942,8 @@ class _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6715,6 +6980,8 @@ class __$$ChatCompletionFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallOptionImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6757,11 +7024,13 @@ class _$ChatCompletionFunctionCallOptionImpl (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallOptionImplCopyWith< @@ -6787,12 +7056,14 @@ abstract class _ChatCompletionFunctionCallOption Map json) = _$ChatCompletionFunctionCallOptionImpl.fromJson; - @override - /// The name of the function to call. + @override String get name; + + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6817,8 +7088,12 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) Map? get parameters => throw _privateConstructorUsedError; + /// Serializes this FunctionObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FunctionObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6845,6 +7120,8 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6891,6 +7168,8 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> _$FunctionObjectImpl _value, $Res Function(_$FunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6972,12 +7251,14 @@ class _$FunctionObjectImpl extends _FunctionObject { .equals(other._parameters, _parameters)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, description, const DeepCollectionEquality().hash(_parameters)); - @JsonKey(ignore: true) + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => @@ -7003,24 +7284,26 @@ abstract class _FunctionObject extends FunctionObject { factory _FunctionObject.fromJson(Map json) = _$FunctionObjectImpl.fromJson; - @override - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - String get name; @override + String get name; /// A description of what the function does, used by the model to choose when and how to call the function. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. + @override @JsonKey(includeIfNull: false) Map? get parameters; + + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7037,8 +7320,12 @@ mixin _$ChatCompletionTool { /// A function that the model may call. FunctionObject get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTool to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionToolCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7064,6 +7351,8 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7082,6 +7371,8 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> ) as $Val); } + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FunctionObjectCopyWith<$Res> get function { @@ -7113,6 +7404,8 @@ class __$$ChatCompletionToolImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7164,11 +7457,13 @@ class _$ChatCompletionToolImpl extends _ChatCompletionTool { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => @@ -7192,16 +7487,18 @@ abstract class _ChatCompletionTool extends ChatCompletionTool { factory _ChatCompletionTool.fromJson(Map json) = _$ChatCompletionToolImpl.fromJson; - @override - /// The type of the tool. Currently, only `function` is supported. - ChatCompletionToolType get type; @override + ChatCompletionToolType get type; /// A function that the model may call. + @override FunctionObject get function; + + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7221,8 +7518,12 @@ mixin _$ChatCompletionNamedToolChoice { ChatCompletionFunctionCallOption get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7253,6 +7554,8 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7271,6 +7574,8 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get function { @@ -7308,6 +7613,8 @@ class __$$ChatCompletionNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7362,11 +7669,13 @@ class _$ChatCompletionNamedToolChoiceImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionNamedToolChoiceImplCopyWith< @@ -7393,16 +7702,18 @@ abstract class _ChatCompletionNamedToolChoice factory _ChatCompletionNamedToolChoice.fromJson(Map json) = _$ChatCompletionNamedToolChoiceImpl.fromJson; - @override - /// The type of the tool. Currently, only `function` is supported. - ChatCompletionNamedToolChoiceType get type; @override + ChatCompletionNamedToolChoiceType get type; /// Forces the model to call the specified function. + @override ChatCompletionFunctionCallOption get function; + + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -7426,8 +7737,12 @@ mixin _$ChatCompletionMessageToolCall { ChatCompletionMessageFunctionCall get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageToolCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageToolCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7459,6 +7774,8 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7482,6 +7799,8 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageFunctionCallCopyWith<$Res> get function { @@ -7520,6 +7839,8 @@ class __$$ChatCompletionMessageToolCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageToolCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7584,11 +7905,13 @@ class _$ChatCompletionMessageToolCallImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageToolCallImplCopyWith< @@ -7616,20 +7939,22 @@ abstract class _ChatCompletionMessageToolCall factory _ChatCompletionMessageToolCall.fromJson(Map json) = _$ChatCompletionMessageToolCallImpl.fromJson; - @override - /// The ID of the tool call. - String get id; @override + String get id; /// The type of the tool. Currently, only `function` is supported. - ChatCompletionMessageToolCallType get type; @override + ChatCompletionMessageToolCallType get type; /// The name and arguments of a function that should be called, as generated by the model. + @override ChatCompletionMessageFunctionCall get function; + + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageToolCallImplCopyWith< _$ChatCompletionMessageToolCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -7646,8 +7971,12 @@ mixin _$ChatCompletionStreamOptions { @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamOptions to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamOptionsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7676,6 +8005,8 @@ class _$ChatCompletionStreamOptionsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7714,6 +8045,8 @@ class __$$ChatCompletionStreamOptionsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamOptionsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7758,11 +8091,13 @@ class _$ChatCompletionStreamOptionsImpl extends _ChatCompletionStreamOptions { other.includeUsage == includeUsage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, includeUsage); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> @@ -7787,13 +8122,15 @@ abstract class _ChatCompletionStreamOptions factory _ChatCompletionStreamOptions.fromJson(Map json) = _$ChatCompletionStreamOptionsImpl.fromJson; - @override - /// If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + @override @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage; + + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7840,8 +8177,12 @@ mixin _$CreateChatCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7883,6 +8224,8 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7931,6 +8274,8 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -7982,6 +8327,8 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8126,7 +8473,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -8139,7 +8486,9 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionResponseImplCopyWith< @@ -8177,50 +8526,52 @@ abstract class _CreateChatCompletionResponse factory _CreateChatCompletionResponse.fromJson(Map json) = _$CreateChatCompletionResponseImpl.fromJson; - @override - /// A unique identifier for the chat completion. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// A list of chat completion choices. Can be more than one if `n` is greater than 1. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the chat completion was created. - int get created; @override + int get created; /// The model used for the chat completion. - String get model; @override + String get model; /// The service tier used for processing the request. This field is only included if the `service_tier` parameter /// is specified in the request. + @override @JsonKey( name: 'service_tier', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ServiceTier? get serviceTier; - @override /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always `chat.completion`. - String get object; @override + String get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionResponseImplCopyWith< _$CreateChatCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -8254,8 +8605,12 @@ mixin _$ChatCompletionResponseChoice { /// Log probability information for the choice. ChatCompletionLogprobs? get logprobs => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionResponseChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8291,6 +8646,8 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8319,6 +8676,8 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionLogprobsCopyWith<$Res>? get logprobs { @@ -8364,6 +8723,8 @@ class __$$ChatCompletionResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionResponseChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8451,12 +8812,14 @@ class _$ChatCompletionResponseChoiceImpl extends _ChatCompletionResponseChoice { other.logprobs == logprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, finishReason, index, const DeepCollectionEquality().hash(message), logprobs); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionResponseChoiceImplCopyWith< @@ -8488,31 +8851,33 @@ abstract class _ChatCompletionResponseChoice factory _ChatCompletionResponseChoice.fromJson(Map json) = _$ChatCompletionResponseChoiceImpl.fromJson; - @override - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of choices. + @override @JsonKey(includeIfNull: false) int? get index; - @override /// An assistant message in a chat conversation. - ChatCompletionAssistantMessage get message; @override + ChatCompletionAssistantMessage get message; /// Log probability information for the choice. + @override ChatCompletionLogprobs? get logprobs; + + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionResponseChoiceImplCopyWith< _$ChatCompletionResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -8529,8 +8894,12 @@ mixin _$ChatCompletionLogprobs { List? get content => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8555,6 +8924,8 @@ class _$ChatCompletionLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8591,6 +8962,8 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8642,12 +9015,14 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> @@ -8671,12 +9046,14 @@ abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { factory _ChatCompletionLogprobs.fromJson(Map json) = _$ChatCompletionLogprobsImpl.fromJson; - @override - /// A list of message content tokens with log probability information. + @override List? get content; + + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8702,8 +9079,12 @@ mixin _$ChatCompletionTokenLogprob { List get topLogprobs => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTokenLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionTokenLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8734,6 +9115,8 @@ class _$ChatCompletionTokenLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8790,6 +9173,8 @@ class __$$ChatCompletionTokenLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenLogprobImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8886,7 +9271,7 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -8895,7 +9280,9 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { const DeepCollectionEquality().hash(_bytes), const DeepCollectionEquality().hash(_topLogprobs)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> @@ -8923,25 +9310,27 @@ abstract class _ChatCompletionTokenLogprob extends ChatCompletionTokenLogprob { factory _ChatCompletionTokenLogprob.fromJson(Map json) = _$ChatCompletionTokenLogprobImpl.fromJson; - @override - /// The token. - String get token; @override + String get token; /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - double get logprob; @override + double get logprob; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - List? get bytes; @override + List? get bytes; /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + @override @JsonKey(name: 'top_logprobs') List get topLogprobs; + + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8962,8 +9351,12 @@ mixin _$ChatCompletionTokenTopLogprob { /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. List? get bytes => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTokenTopLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionTokenTopLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8990,6 +9383,8 @@ class _$ChatCompletionTokenTopLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9036,6 +9431,8 @@ class __$$ChatCompletionTokenTopLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenTopLogprobImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9111,12 +9508,14 @@ class _$ChatCompletionTokenTopLogprobImpl const DeepCollectionEquality().equals(other._bytes, _bytes)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, token, logprob, const DeepCollectionEquality().hash(_bytes)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenTopLogprobImplCopyWith< @@ -9143,20 +9542,22 @@ abstract class _ChatCompletionTokenTopLogprob factory _ChatCompletionTokenTopLogprob.fromJson(Map json) = _$ChatCompletionTokenTopLogprobImpl.fromJson; - @override - /// The token. - String get token; @override + String get token; /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - double get logprob; @override + double get logprob; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + @override List? get bytes; + + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionTokenTopLogprobImplCopyWith< _$ChatCompletionTokenTopLogprobImpl> get copyWith => throw _privateConstructorUsedError; @@ -9208,8 +9609,12 @@ mixin _$CreateChatCompletionStreamResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionStreamResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionStreamResponseCopyWith< CreateChatCompletionStreamResponse> get copyWith => throw _privateConstructorUsedError; @@ -9252,6 +9657,8 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9300,6 +9707,8 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -9351,6 +9760,8 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionStreamResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9501,7 +9912,7 @@ class _$CreateChatCompletionStreamResponseImpl (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -9514,7 +9925,9 @@ class _$CreateChatCompletionStreamResponseImpl object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionStreamResponseImplCopyWith< @@ -9553,54 +9966,56 @@ abstract class _CreateChatCompletionStreamResponse Map json) = _$CreateChatCompletionStreamResponseImpl.fromJson; - @override - /// A unique identifier for the chat completion. Each chunk has the same ID. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the /// last chunk if you set `stream_options: {"include_usage": true}`. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + @override @JsonKey(includeIfNull: false) int? get created; - @override /// The model to generate the completion. + @override @JsonKey(includeIfNull: false) String? get model; - @override /// The service tier used for processing the request. This field is only included if the `service_tier` parameter /// is specified in the request. + @override @JsonKey( name: 'service_tier', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ServiceTier? get serviceTier; - @override /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always `chat.completion.chunk`. + @override @JsonKey(includeIfNull: false) String? get object; - @override /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionStreamResponseImplCopyWith< _$CreateChatCompletionStreamResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -9636,8 +10051,12 @@ mixin _$ChatCompletionStreamResponseChoice { @JsonKey(includeIfNull: false) int? get index => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseChoiceCopyWith< ChatCompletionStreamResponseChoice> get copyWith => throw _privateConstructorUsedError; @@ -9676,6 +10095,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9704,6 +10125,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta { @@ -9713,6 +10136,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, }); } + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res>? get logprobs { @@ -9762,6 +10187,8 @@ class __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9851,12 +10278,14 @@ class _$ChatCompletionStreamResponseChoiceImpl (identical(other.index, index) || other.index == index)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, delta, logprobs, finishReason, index); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceImplCopyWith< @@ -9890,32 +10319,34 @@ abstract class _ChatCompletionStreamResponseChoice Map json) = _$ChatCompletionStreamResponseChoiceImpl.fromJson; - @override - /// A chat completion delta generated by streamed model responses. - ChatCompletionStreamResponseDelta get delta; @override + ChatCompletionStreamResponseDelta get delta; /// Log probability information for the choice. + @override @JsonKey(includeIfNull: false) ChatCompletionStreamResponseChoiceLogprobs? get logprobs; - @override /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of choices. + @override @JsonKey(includeIfNull: false) int? get index; + + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseChoiceImplCopyWith< _$ChatCompletionStreamResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -9933,8 +10364,12 @@ mixin _$ChatCompletionStreamResponseChoiceLogprobs { List? get content => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoiceLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseChoiceLogprobsCopyWith< ChatCompletionStreamResponseChoiceLogprobs> get copyWith => throw _privateConstructorUsedError; @@ -9963,6 +10398,8 @@ class _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10000,6 +10437,8 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10053,12 +10492,14 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< @@ -10087,12 +10528,14 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs Map json) = _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson; - @override - /// A list of message content tokens with log probability information. + @override List? get content; + + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< _$ChatCompletionStreamResponseChoiceLogprobsImpl> get copyWith => throw _privateConstructorUsedError; @@ -10124,8 +10567,12 @@ mixin _$ChatCompletionStreamResponseDelta { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10163,6 +10610,8 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10191,6 +10640,8 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get functionCall { @@ -10239,6 +10690,8 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseDeltaImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10336,12 +10789,14 @@ class _$ChatCompletionStreamResponseDeltaImpl (identical(other.role, role) || other.role == role)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, content, functionCall, const DeepCollectionEquality().hash(_toolCalls), role); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseDeltaImplCopyWith< @@ -10376,29 +10831,31 @@ abstract class _ChatCompletionStreamResponseDelta Map json) = _$ChatCompletionStreamResponseDeltaImpl.fromJson; - @override - /// The contents of the chunk message. + @override @JsonKey(includeIfNull: false) String? get content; - @override /// The name and arguments of a function that should be called, as generated by the model. + @override @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall; - @override /// No Description + @override @JsonKey(name: 'tool_calls', includeIfNull: false) List? get toolCalls; - @override /// The role of the messages author. One of `system`, `user`, `assistant`, or `tool` (`function` is deprecated). + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role; + + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseDeltaImplCopyWith< _$ChatCompletionStreamResponseDeltaImpl> get copyWith => throw _privateConstructorUsedError; @@ -10420,8 +10877,12 @@ mixin _$ChatCompletionStreamMessageFunctionCall { @JsonKey(includeIfNull: false) String? get arguments => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamMessageFunctionCallCopyWith< ChatCompletionStreamMessageFunctionCall> get copyWith => throw _privateConstructorUsedError; @@ -10452,6 +10913,8 @@ class _$ChatCompletionStreamMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10495,6 +10958,8 @@ class __$$ChatCompletionStreamMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageFunctionCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10552,11 +11017,13 @@ class _$ChatCompletionStreamMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< @@ -10585,18 +11052,20 @@ abstract class _ChatCompletionStreamMessageFunctionCall Map json) = _$ChatCompletionStreamMessageFunctionCallImpl.fromJson; - @override - /// The name of the function to call. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override @JsonKey(includeIfNull: false) String? get arguments; + + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< _$ChatCompletionStreamMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -10628,8 +11097,12 @@ mixin _$ChatCompletionStreamMessageToolCallChunk { ChatCompletionStreamMessageFunctionCall? get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamMessageToolCallChunk to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamMessageToolCallChunkCopyWith< ChatCompletionStreamMessageToolCallChunk> get copyWith => throw _privateConstructorUsedError; @@ -10668,6 +11141,8 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10696,6 +11171,8 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get function { @@ -10743,6 +11220,8 @@ class __$$ChatCompletionStreamMessageToolCallChunkImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageToolCallChunkImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10827,11 +11306,13 @@ class _$ChatCompletionStreamMessageToolCallChunkImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< @@ -10866,28 +11347,30 @@ abstract class _ChatCompletionStreamMessageToolCallChunk Map json) = _$ChatCompletionStreamMessageToolCallChunkImpl.fromJson; - @override - /// No Description - int get index; @override + int get index; /// The ID of the tool call. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// The type of the tool. Currently, only `function` is supported. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionStreamMessageToolCallChunkType? get type; - @override /// The name and arguments of a function that should be called, as generated by the model. + @override @JsonKey(includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get function; + + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< _$ChatCompletionStreamMessageToolCallChunkImpl> get copyWith => throw _privateConstructorUsedError; @@ -10911,8 +11394,12 @@ mixin _$CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this CompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10939,6 +11426,8 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10985,6 +11474,8 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> _$CompletionUsageImpl _value, $Res Function(_$CompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11054,12 +11545,14 @@ class _$CompletionUsageImpl extends _CompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => @@ -11085,23 +11578,25 @@ abstract class _CompletionUsage extends CompletionUsage { factory _CompletionUsage.fromJson(Map json) = _$CompletionUsageImpl.fromJson; - @override - /// Number of tokens in the generated completion. + @override @JsonKey(name: 'completion_tokens') int? get completionTokens; - @override /// Number of tokens in the prompt. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used in the request (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11134,8 +11629,12 @@ mixin _$CreateEmbeddingRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateEmbeddingRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateEmbeddingRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -11168,6 +11667,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11201,6 +11702,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingModelCopyWith<$Res> get model { @@ -11209,6 +11712,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingInputCopyWith<$Res> get input { @@ -11250,6 +11755,8 @@ class __$$CreateEmbeddingRequestImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11343,12 +11850,14 @@ class _$CreateEmbeddingRequestImpl extends _CreateEmbeddingRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, model, input, encodingFormat, dimensions, user); - @JsonKey(ignore: true) + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> @@ -11377,33 +11886,35 @@ abstract class _CreateEmbeddingRequest extends CreateEmbeddingRequest { factory _CreateEmbeddingRequest.fromJson(Map json) = _$CreateEmbeddingRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_EmbeddingModelConverter() EmbeddingModel get model; - @override /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @_EmbeddingInputConverter() EmbeddingInput get input; - @override /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @override @JsonKey(name: 'encoding_format') EmbeddingEncodingFormat get encodingFormat; - @override /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + @override @JsonKey(includeIfNull: false) int? get dimensions; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11462,6 +11973,8 @@ mixin _$EmbeddingModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -11481,6 +11994,9 @@ class _$EmbeddingModelCopyWithImpl<$Res, $Val extends EmbeddingModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -11502,6 +12018,8 @@ class __$$EmbeddingModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11545,11 +12063,13 @@ class _$EmbeddingModelEnumerationImpl extends EmbeddingModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> @@ -11636,7 +12156,10 @@ abstract class EmbeddingModelEnumeration extends EmbeddingModel { @override EmbeddingModels get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11658,6 +12181,8 @@ class __$$EmbeddingModelStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11701,11 +12226,13 @@ class _$EmbeddingModelStringImpl extends EmbeddingModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> @@ -11793,7 +12320,10 @@ abstract class EmbeddingModelString extends EmbeddingModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11868,6 +12398,8 @@ mixin _$EmbeddingInput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -11887,6 +12419,9 @@ class _$EmbeddingInputCopyWithImpl<$Res, $Val extends EmbeddingInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -11908,6 +12443,8 @@ class __$$EmbeddingInputListListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListListIntImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11958,12 +12495,14 @@ class _$EmbeddingInputListListIntImpl extends EmbeddingInputListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> @@ -12062,7 +12601,10 @@ abstract class EmbeddingInputListListInt extends EmbeddingInput { @override List> get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12085,6 +12627,8 @@ class __$$EmbeddingInputListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListIntImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12135,12 +12679,14 @@ class _$EmbeddingInputListIntImpl extends EmbeddingInputListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> @@ -12239,7 +12785,10 @@ abstract class EmbeddingInputListInt extends EmbeddingInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12263,6 +12812,8 @@ class __$$EmbeddingInputListStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12313,12 +12864,14 @@ class _$EmbeddingInputListStringImpl extends EmbeddingInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> @@ -12417,7 +12970,10 @@ abstract class EmbeddingInputListString extends EmbeddingInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12439,6 +12995,8 @@ class __$$EmbeddingInputStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12482,11 +13040,13 @@ class _$EmbeddingInputStringImpl extends EmbeddingInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> @@ -12586,7 +13146,10 @@ abstract class EmbeddingInputString extends EmbeddingInput { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12612,8 +13175,12 @@ mixin _$CreateEmbeddingResponse { @JsonKey(includeIfNull: false) EmbeddingUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateEmbeddingResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateEmbeddingResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12644,6 +13211,8 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12672,6 +13241,8 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingUsageCopyWith<$Res>? get usage { @@ -12714,6 +13285,8 @@ class __$$CreateEmbeddingResponseImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12797,12 +13370,14 @@ class _$CreateEmbeddingResponseImpl extends _CreateEmbeddingResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_data), model, object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> @@ -12829,25 +13404,27 @@ abstract class _CreateEmbeddingResponse extends CreateEmbeddingResponse { factory _CreateEmbeddingResponse.fromJson(Map json) = _$CreateEmbeddingResponseImpl.fromJson; - @override - /// The list of embeddings generated by the model. - List get data; @override + List get data; /// The name of the model used to generate the embedding. - String get model; @override + String get model; /// The object type, which is always "list". - CreateEmbeddingResponseObject get object; @override + CreateEmbeddingResponseObject get object; /// The usage information for the request. + @override @JsonKey(includeIfNull: false) EmbeddingUsage? get usage; + + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12868,8 +13445,12 @@ mixin _$Embedding { /// The object type, which is always "embedding". EmbeddingObject get object => throw _privateConstructorUsedError; + /// Serializes this Embedding to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $EmbeddingCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12897,6 +13478,8 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12920,6 +13503,8 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> ) as $Val); } + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingVectorCopyWith<$Res> get embedding { @@ -12954,6 +13539,8 @@ class __$$EmbeddingImplCopyWithImpl<$Res> _$EmbeddingImpl _value, $Res Function(_$EmbeddingImpl) _then) : super(_value, _then); + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13019,11 +13606,13 @@ class _$EmbeddingImpl extends _Embedding { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, embedding, object); - @JsonKey(ignore: true) + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => @@ -13047,21 +13636,23 @@ abstract class _Embedding extends Embedding { factory _Embedding.fromJson(Map json) = _$EmbeddingImpl.fromJson; - @override - /// The index of the embedding in the list of embeddings. - int get index; @override + int get index; /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + @override @_EmbeddingVectorConverter() EmbeddingVector get embedding; - @override /// The object type, which is always "embedding". + @override EmbeddingObject get object; + + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13120,6 +13711,8 @@ mixin _$EmbeddingVector { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingVector to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -13139,6 +13732,9 @@ class _$EmbeddingVectorCopyWithImpl<$Res, $Val extends EmbeddingVector> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -13160,6 +13756,8 @@ class __$$EmbeddingVectorListDoubleImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorListDoubleImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13210,12 +13808,14 @@ class _$EmbeddingVectorListDoubleImpl extends EmbeddingVectorListDouble { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> @@ -13302,7 +13902,10 @@ abstract class EmbeddingVectorListDouble extends EmbeddingVector { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13325,6 +13928,8 @@ class __$$EmbeddingVectorStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13368,11 +13973,13 @@ class _$EmbeddingVectorStringImpl extends EmbeddingVectorString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> @@ -13459,7 +14066,10 @@ abstract class EmbeddingVectorString extends EmbeddingVector { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13478,8 +14088,12 @@ mixin _$EmbeddingUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this EmbeddingUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $EmbeddingUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13505,6 +14119,8 @@ class _$EmbeddingUsageCopyWithImpl<$Res, $Val extends EmbeddingUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13545,6 +14161,8 @@ class __$$EmbeddingUsageImplCopyWithImpl<$Res> _$EmbeddingUsageImpl _value, $Res Function(_$EmbeddingUsageImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13601,11 +14219,13 @@ class _$EmbeddingUsageImpl extends _EmbeddingUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => @@ -13630,18 +14250,20 @@ abstract class _EmbeddingUsage extends EmbeddingUsage { factory _EmbeddingUsage.fromJson(Map json) = _$EmbeddingUsageImpl.fromJson; - @override - /// The number of tokens used by the prompt. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// The total number of tokens used by the request. + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13680,7 +14302,7 @@ mixin _$CreateFineTuningJobRequest { /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? get suffix => throw _privateConstructorUsedError; @@ -13707,8 +14329,12 @@ mixin _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; + /// Serializes this CreateFineTuningJobRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateFineTuningJobRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13746,6 +14372,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13789,6 +14417,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningModelCopyWith<$Res> get model { @@ -13797,6 +14427,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters { @@ -13847,6 +14479,8 @@ class __$$CreateFineTuningJobRequestImplCopyWithImpl<$Res> $Res Function(_$CreateFineTuningJobRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13940,7 +14574,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @JsonKey(includeIfNull: false) final String? suffix; @@ -14002,7 +14636,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { (identical(other.seed, seed) || other.seed == seed)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -14014,7 +14648,9 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { const DeepCollectionEquality().hash(_integrations), seed); - @JsonKey(ignore: true) + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> @@ -14047,13 +14683,11 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { factory _CreateFineTuningJobRequest.fromJson(Map json) = _$CreateFineTuningJobRequestImpl.fromJson; - @override - /// The name of the model to fine-tune. You can select one of the /// [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @override @_FineTuningModelConverter() FineTuningModel get model; - @override /// The ID of an uploaded file that contains training data. /// @@ -14067,21 +14701,21 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(name: 'training_file') String get trainingFile; - @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? get hyperparameters; - @override /// A string of up to 18 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + @override @JsonKey(includeIfNull: false) String? get suffix; - @override /// The ID of an uploaded file that contains validation data. /// @@ -14093,21 +14727,25 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(name: 'validation_file', includeIfNull: false) String? get validationFile; - @override /// A list of integrations to enable for your fine-tuning job. + @override @JsonKey(includeIfNull: false) List? get integrations; - @override /// The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. /// If a seed is not specified, one will be generated for you. + @override @JsonKey(includeIfNull: false) int? get seed; + + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14166,6 +14804,8 @@ mixin _$FineTuningModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this FineTuningModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -14185,6 +14825,9 @@ class _$FineTuningModelCopyWithImpl<$Res, $Val extends FineTuningModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -14207,6 +14850,8 @@ class __$$FineTuningModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14251,11 +14896,13 @@ class _$FineTuningModelEnumerationImpl extends FineTuningModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> @@ -14342,7 +14989,10 @@ abstract class FineTuningModelEnumeration extends FineTuningModel { @override FineTuningModels get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14365,6 +15015,8 @@ class __$$FineTuningModelStringImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelStringImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14408,11 +15060,13 @@ class _$FineTuningModelStringImpl extends FineTuningModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> @@ -14499,7 +15153,10 @@ abstract class FineTuningModelString extends FineTuningModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14566,8 +15223,12 @@ mixin _$FineTuningJob { List? get integrations => throw _privateConstructorUsedError; + /// Serializes this FineTuningJob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -14610,6 +15271,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14693,6 +15356,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> ) as $Val); } + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobErrorCopyWith<$Res>? get error { @@ -14705,6 +15370,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> }); } + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters { @@ -14755,6 +15422,8 @@ class __$$FineTuningJobImplCopyWithImpl<$Res> _$FineTuningJobImpl _value, $Res Function(_$FineTuningJobImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14988,7 +15657,7 @@ class _$FineTuningJobImpl extends _FineTuningJob { .equals(other._integrations, _integrations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -15008,7 +15677,9 @@ class _$FineTuningJobImpl extends _FineTuningJob { validationFile, const DeepCollectionEquality().hash(_integrations)); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => @@ -15045,77 +15716,79 @@ abstract class _FineTuningJob extends FineTuningJob { factory _FineTuningJob.fromJson(Map json) = _$FineTuningJobImpl.fromJson; - @override - /// The object identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. - FineTuningJobError? get error; @override + FineTuningJobError? get error; /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'fine_tuned_model') String? get fineTunedModel; - @override /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'finished_at') int? get finishedAt; - @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - FineTuningJobHyperparameters get hyperparameters; @override + FineTuningJobHyperparameters get hyperparameters; /// The base model that is being fine-tuned. - String get model; @override + String get model; /// The object type, which is always "fine_tuning.job". - FineTuningJobObject get object; @override + FineTuningJobObject get object; /// The organization that owns the fine-tuning job. + @override @JsonKey(name: 'organization_id') String get organizationId; - @override /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'result_files') List get resultFiles; - @override /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - FineTuningJobStatus get status; @override + FineTuningJobStatus get status; /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'trained_tokens') int? get trainedTokens; - @override /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'training_file') String get trainingFile; - @override /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'validation_file') String? get validationFile; - @override /// A list of integrations to enable for this fine-tuning job. + @override @JsonKey(includeIfNull: false) List? get integrations; + + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15135,8 +15808,12 @@ mixin _$FineTuningIntegration { /// to your run, and set a default entity (team, username, etc) to be associated with your run. FineTuningIntegrationWandb get wandb => throw _privateConstructorUsedError; + /// Serializes this FineTuningIntegration to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningIntegrationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15163,6 +15840,8 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15181,6 +15860,8 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningIntegrationWandbCopyWith<$Res> get wandb { @@ -15214,6 +15895,8 @@ class __$$FineTuningIntegrationImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15266,11 +15949,13 @@ class _$FineTuningIntegrationImpl extends _FineTuningIntegration { (identical(other.wandb, wandb) || other.wandb == wandb)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, wandb); - @JsonKey(ignore: true) + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> @@ -15295,18 +15980,20 @@ abstract class _FineTuningIntegration extends FineTuningIntegration { factory _FineTuningIntegration.fromJson(Map json) = _$FineTuningIntegrationImpl.fromJson; - @override - /// The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - FineTuningIntegrationType get type; @override + FineTuningIntegrationType get type; /// The settings for your integration with Weights and Biases. This payload specifies the project that /// metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags /// to your run, and set a default entity (team, username, etc) to be associated with your run. + @override FineTuningIntegrationWandb get wandb; + + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15335,8 +16022,12 @@ mixin _$FineTuningIntegrationWandb { @JsonKey(includeIfNull: false) List? get tags => throw _privateConstructorUsedError; + /// Serializes this FineTuningIntegrationWandb to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningIntegrationWandbCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15366,6 +16057,8 @@ class _$FineTuningIntegrationWandbCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15421,6 +16114,8 @@ class __$$FineTuningIntegrationWandbImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationWandbImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15512,12 +16207,14 @@ class _$FineTuningIntegrationWandbImpl extends _FineTuningIntegrationWandb { const DeepCollectionEquality().equals(other._tags, _tags)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, project, name, entity, const DeepCollectionEquality().hash(_tags)); - @JsonKey(ignore: true) + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> @@ -15544,29 +16241,31 @@ abstract class _FineTuningIntegrationWandb extends FineTuningIntegrationWandb { factory _FineTuningIntegrationWandb.fromJson(Map json) = _$FineTuningIntegrationWandbImpl.fromJson; - @override - /// The name of the project that the new run will be created under. - String get project; @override + String get project; /// A display name to set for the run. If not set, we will use the Job ID as the name. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The entity to use for the run. This allows you to set the team or username of the WandB user that you would /// like associated with the run. If not set, the default entity for the registered WandB API key is used. + @override @JsonKey(includeIfNull: false) String? get entity; - @override /// A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some /// default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + @override @JsonKey(includeIfNull: false) List? get tags; + + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15586,8 +16285,12 @@ mixin _$FineTuningJobError { /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. String? get param => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15611,6 +16314,8 @@ class _$FineTuningJobErrorCopyWithImpl<$Res, $Val extends FineTuningJobError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15654,6 +16359,8 @@ class __$$FineTuningJobErrorImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobErrorImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15715,11 +16422,13 @@ class _$FineTuningJobErrorImpl extends _FineTuningJobError { (identical(other.param, param) || other.param == param)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => @@ -15744,20 +16453,22 @@ abstract class _FineTuningJobError extends FineTuningJobError { factory _FineTuningJobError.fromJson(Map json) = _$FineTuningJobErrorImpl.fromJson; - @override - /// A machine-readable error code. - String get code; @override + String get code; /// A human-readable error message. - String get message; @override + String get message; /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + @override String? get param; + + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15777,8 +16488,12 @@ mixin _$FineTuningJobHyperparameters { @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobHyperparameters to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobHyperparametersCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15810,6 +16525,8 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15823,6 +16540,8 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningNEpochsCopyWith<$Res> get nEpochs { @@ -15860,6 +16579,8 @@ class __$$FineTuningJobHyperparametersImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobHyperparametersImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15909,11 +16630,13 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { (identical(other.nEpochs, nEpochs) || other.nEpochs == nEpochs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, nEpochs); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobHyperparametersImplCopyWith< @@ -15941,17 +16664,19 @@ abstract class _FineTuningJobHyperparameters factory _FineTuningJobHyperparameters.fromJson(Map json) = _$FineTuningJobHyperparametersImpl.fromJson; - @override - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. /// /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number /// manually, we support any number between 1 and 50 epochs. + @override @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs; + + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobHyperparametersImplCopyWith< _$FineTuningJobHyperparametersImpl> get copyWith => throw _privateConstructorUsedError; @@ -16011,6 +16736,8 @@ mixin _$FineTuningNEpochs { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this FineTuningNEpochs to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -16030,6 +16757,9 @@ class _$FineTuningNEpochsCopyWithImpl<$Res, $Val extends FineTuningNEpochs> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -16052,6 +16782,8 @@ class __$$FineTuningNEpochsEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16096,11 +16828,13 @@ class _$FineTuningNEpochsEnumerationImpl extends FineTuningNEpochsEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsEnumerationImplCopyWith< @@ -16189,7 +16923,10 @@ abstract class FineTuningNEpochsEnumeration extends FineTuningNEpochs { @override FineTuningNEpochsOptions get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningNEpochsEnumerationImplCopyWith< _$FineTuningNEpochsEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -16212,6 +16949,8 @@ class __$$FineTuningNEpochsIntImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsIntImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16255,11 +16994,13 @@ class _$FineTuningNEpochsIntImpl extends FineTuningNEpochsInt { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> @@ -16347,7 +17088,10 @@ abstract class FineTuningNEpochsInt extends FineTuningNEpochs { @override int get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -16370,8 +17114,12 @@ mixin _$ListPaginatedFineTuningJobsResponse { ListPaginatedFineTuningJobsResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListPaginatedFineTuningJobsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListPaginatedFineTuningJobsResponseCopyWith< ListPaginatedFineTuningJobsResponse> get copyWith => throw _privateConstructorUsedError; @@ -16402,6 +17150,8 @@ class _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16451,6 +17201,8 @@ class __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl<$Res> $Res Function(_$ListPaginatedFineTuningJobsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16525,12 +17277,14 @@ class _$ListPaginatedFineTuningJobsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), hasMore, object); - @JsonKey(ignore: true) + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListPaginatedFineTuningJobsResponseImplCopyWith< @@ -16559,21 +17313,23 @@ abstract class _ListPaginatedFineTuningJobsResponse Map json) = _$ListPaginatedFineTuningJobsResponseImpl.fromJson; - @override - /// The list of fine-tuning jobs. - List get data; @override + List get data; /// Whether there are more fine-tuning jobs to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; - @override /// The object type, which is always "list". + @override ListPaginatedFineTuningJobsResponseObject get object; + + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListPaginatedFineTuningJobsResponseImplCopyWith< _$ListPaginatedFineTuningJobsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16593,8 +17349,12 @@ mixin _$ListFineTuningJobEventsResponse { ListFineTuningJobEventsResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListFineTuningJobEventsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListFineTuningJobEventsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16623,6 +17383,8 @@ class _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16666,6 +17428,8 @@ class __$$ListFineTuningJobEventsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobEventsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16727,12 +17491,14 @@ class _$ListFineTuningJobEventsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), object); - @JsonKey(ignore: true) + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobEventsResponseImplCopyWith< @@ -16759,16 +17525,18 @@ abstract class _ListFineTuningJobEventsResponse factory _ListFineTuningJobEventsResponse.fromJson(Map json) = _$ListFineTuningJobEventsResponseImpl.fromJson; - @override - /// The list of fine-tuning job events. - List get data; @override + List get data; /// The object type, which is always "list". + @override ListFineTuningJobEventsResponseObject get object; + + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListFineTuningJobEventsResponseImplCopyWith< _$ListFineTuningJobEventsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16800,8 +17568,12 @@ mixin _$ListFineTuningJobCheckpointsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListFineTuningJobCheckpointsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListFineTuningJobCheckpointsResponseCopyWith< ListFineTuningJobCheckpointsResponse> get copyWith => throw _privateConstructorUsedError; @@ -16834,6 +17606,8 @@ class _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16895,6 +17669,8 @@ class __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobCheckpointsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16993,7 +17769,7 @@ class _$ListFineTuningJobCheckpointsResponseImpl (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -17003,7 +17779,9 @@ class _$ListFineTuningJobCheckpointsResponseImpl lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobCheckpointsResponseImplCopyWith< @@ -17035,31 +17813,33 @@ abstract class _ListFineTuningJobCheckpointsResponse Map json) = _$ListFineTuningJobCheckpointsResponseImpl.fromJson; - @override - /// The list of fine-tuning job checkpoints. - List get data; @override + List get data; /// The object type, which is always "list". - ListFineTuningJobCheckpointsResponseObject get object; @override + ListFineTuningJobCheckpointsResponseObject get object; /// The ID of the first checkpoint in the list. + @override @JsonKey(name: 'first_id', includeIfNull: false) String? get firstId; - @override /// The ID of the last checkpoint in the list. + @override @JsonKey(name: 'last_id', includeIfNull: false) String? get lastId; - @override /// Whether there are more checkpoints to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListFineTuningJobCheckpointsResponseImplCopyWith< _$ListFineTuningJobCheckpointsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -17087,8 +17867,12 @@ mixin _$FineTuningJobEvent { /// The object type, which is always "fine_tuning.job.event". FineTuningJobEventObject get object => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17117,6 +17901,8 @@ class _$FineTuningJobEventCopyWithImpl<$Res, $Val extends FineTuningJobEvent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17175,6 +17961,8 @@ class __$$FineTuningJobEventImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobEventImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17262,12 +18050,14 @@ class _$FineTuningJobEventImpl extends _FineTuningJobEvent { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, createdAt, level, message, object); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => @@ -17295,29 +18085,31 @@ abstract class _FineTuningJobEvent extends FineTuningJobEvent { factory _FineTuningJobEvent.fromJson(Map json) = _$FineTuningJobEventImpl.fromJson; - @override - /// The event identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the event was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The log level of the event. - FineTuningJobEventLevel get level; @override + FineTuningJobEventLevel get level; /// The message of the event. - String get message; @override + String get message; /// The object type, which is always "fine_tuning.job.event". + @override FineTuningJobEventObject get object; + + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17356,8 +18148,12 @@ mixin _$FineTuningJobCheckpoint { FineTuningJobCheckpointObject get object => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobCheckpoint to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCheckpointCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17392,6 +18188,8 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17435,6 +18233,8 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics { @@ -17478,6 +18278,8 @@ class __$$FineTuningJobCheckpointImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17595,12 +18397,14 @@ class _$FineTuningJobCheckpointImpl extends _FineTuningJobCheckpoint { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, createdAt, fineTunedModelCheckpoint, stepNumber, metrics, fineTuningJobId, object); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> @@ -17632,40 +18436,42 @@ abstract class _FineTuningJobCheckpoint extends FineTuningJobCheckpoint { factory _FineTuningJobCheckpoint.fromJson(Map json) = _$FineTuningJobCheckpointImpl.fromJson; - @override - /// The checkpoint identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the checkpoint was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the fine-tuned checkpoint model that is created. + @override @JsonKey(name: 'fine_tuned_model_checkpoint') String get fineTunedModelCheckpoint; - @override /// The step number that the checkpoint was created at. + @override @JsonKey(name: 'step_number') int get stepNumber; - @override /// Metrics at the step number during the fine-tuning job. - FineTuningJobCheckpointMetrics get metrics; @override + FineTuningJobCheckpointMetrics get metrics; /// The name of the fine-tuning job that this checkpoint was created from. + @override @JsonKey(name: 'fine_tuning_job_id') String get fineTuningJobId; - @override /// The object type, which is always "fine_tuning.job.checkpoint". + @override FineTuningJobCheckpointObject get object; + + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17705,8 +18511,12 @@ mixin _$FineTuningJobCheckpointMetrics { @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobCheckpointMetrics to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCheckpointMetricsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17744,6 +18554,8 @@ class _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17821,6 +18633,8 @@ class __$$FineTuningJobCheckpointMetricsImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointMetricsImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17949,7 +18763,7 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidMeanTokenAccuracy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -17961,7 +18775,9 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidLoss, fullValidMeanTokenAccuracy); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointMetricsImplCopyWith< @@ -17999,43 +18815,45 @@ abstract class _FineTuningJobCheckpointMetrics factory _FineTuningJobCheckpointMetrics.fromJson(Map json) = _$FineTuningJobCheckpointMetricsImpl.fromJson; - @override - /// The step number that the metrics were recorded at. + @override @JsonKey(includeIfNull: false) double? get step; - @override /// The training loss at the step number. + @override @JsonKey(name: 'train_loss', includeIfNull: false) double? get trainLoss; - @override /// The training mean token accuracy at the step number. + @override @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) double? get trainMeanTokenAccuracy; - @override /// The validation loss at the step number. + @override @JsonKey(name: 'valid_loss', includeIfNull: false) double? get validLoss; - @override /// The validation mean token accuracy at the step number. + @override @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) double? get validMeanTokenAccuracy; - @override /// The full validation loss at the step number. + @override @JsonKey(name: 'full_valid_loss', includeIfNull: false) double? get fullValidLoss; - @override /// The full validation mean token accuracy at the step number. + @override @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy; + + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobCheckpointMetricsImplCopyWith< _$FineTuningJobCheckpointMetricsImpl> get copyWith => throw _privateConstructorUsedError; @@ -18083,8 +18901,12 @@ mixin _$CreateImageRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateImageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateImageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18130,6 +18952,8 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18178,6 +19002,8 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> ) as $Val); } + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateImageRequestModelCopyWith<$Res>? get model { @@ -18233,6 +19059,8 @@ class __$$CreateImageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18377,12 +19205,14 @@ class _$CreateImageRequestImpl extends _CreateImageRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, prompt, model, n, quality, responseFormat, size, style, user); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => @@ -18425,52 +19255,54 @@ abstract class _CreateImageRequest extends CreateImageRequest { factory _CreateImageRequest.fromJson(Map json) = _$CreateImageRequestImpl.fromJson; - @override - /// A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - String get prompt; @override + String get prompt; /// The model to use for image generation. + @override @_CreateImageRequestModelConverter() @JsonKey(includeIfNull: false) CreateImageRequestModel? get model; - @override /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - ImageQuality get quality; @override + ImageQuality get quality; /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + @override @JsonKey( name: 'response_format', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageResponseFormat? get responseFormat; - @override /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageSize? get size; - @override /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageStyle? get style; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -18533,6 +19365,8 @@ mixin _$CreateImageRequestModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateImageRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -18553,6 +19387,9 @@ class _$CreateImageRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -18575,6 +19412,8 @@ class __$$CreateImageRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18621,11 +19460,13 @@ class _$CreateImageRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelEnumerationImplCopyWith< @@ -18715,7 +19556,10 @@ abstract class CreateImageRequestModelEnumeration @override ImageModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestModelEnumerationImplCopyWith< _$CreateImageRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -18741,6 +19585,8 @@ class __$$CreateImageRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18786,11 +19632,13 @@ class _$CreateImageRequestModelStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelStringImplCopyWith< @@ -18878,7 +19726,10 @@ abstract class CreateImageRequestModelString extends CreateImageRequestModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestModelStringImplCopyWith< _$CreateImageRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -18896,8 +19747,12 @@ mixin _$ImagesResponse { /// The list of images generated by the model. List get data => throw _privateConstructorUsedError; + /// Serializes this ImagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ImagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18921,6 +19776,8 @@ class _$ImagesResponseCopyWithImpl<$Res, $Val extends ImagesResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18959,6 +19816,8 @@ class __$$ImagesResponseImplCopyWithImpl<$Res> _$ImagesResponseImpl _value, $Res Function(_$ImagesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19018,12 +19877,14 @@ class _$ImagesResponseImpl extends _ImagesResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, created, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => @@ -19047,16 +19908,18 @@ abstract class _ImagesResponse extends ImagesResponse { factory _ImagesResponse.fromJson(Map json) = _$ImagesResponseImpl.fromJson; - @override - /// The Unix timestamp (in seconds) when the image was created. - int get created; @override + int get created; /// The list of images generated by the model. + @override List get data; + + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19079,8 +19942,12 @@ mixin _$Image { @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt => throw _privateConstructorUsedError; + /// Serializes this Image to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ImageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19106,6 +19973,8 @@ class _$ImageCopyWithImpl<$Res, $Val extends Image> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19152,6 +20021,8 @@ class __$$ImageImplCopyWithImpl<$Res> _$ImageImpl _value, $Res Function(_$ImageImpl) _then) : super(_value, _then); + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19220,11 +20091,13 @@ class _$ImageImpl extends _Image { other.revisedPrompt == revisedPrompt)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, b64Json, url, revisedPrompt); - @JsonKey(ignore: true) + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ImageImplCopyWith<_$ImageImpl> get copyWith => @@ -19248,23 +20121,25 @@ abstract class _Image extends Image { factory _Image.fromJson(Map json) = _$ImageImpl.fromJson; - @override - /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @override @JsonKey(name: 'b64_json', includeIfNull: false) String? get b64Json; - @override /// The URL of the generated image, if `response_format` is `url` (default). + @override @JsonKey(includeIfNull: false) String? get url; - @override /// The prompt that was used to generate the image, if there was any revision to the prompt. + @override @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt; + + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ImageImplCopyWith<_$ImageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19288,8 +20163,12 @@ mixin _$Model { @JsonKey(name: 'owned_by') String get ownedBy => throw _privateConstructorUsedError; + /// Serializes this Model to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModelCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19315,6 +20194,8 @@ class _$ModelCopyWithImpl<$Res, $Val extends Model> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19366,6 +20247,8 @@ class __$$ModelImplCopyWithImpl<$Res> _$ModelImpl _value, $Res Function(_$ModelImpl) _then) : super(_value, _then); + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19441,11 +20324,13 @@ class _$ModelImpl extends _Model { (identical(other.ownedBy, ownedBy) || other.ownedBy == ownedBy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, created, object, ownedBy); - @JsonKey(ignore: true) + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModelImplCopyWith<_$ModelImpl> get copyWith => @@ -19469,25 +20354,27 @@ abstract class _Model extends Model { factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; - @override - /// The model identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) when the model was created. - int get created; @override + int get created; /// The object type, which is always "model". - ModelObject get object; @override + ModelObject get object; /// The organization that owns the model. + @override @JsonKey(name: 'owned_by') String get ownedBy; + + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModelImplCopyWith<_$ModelImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19504,8 +20391,12 @@ mixin _$ListModelsResponse { /// The list of models. List get data => throw _privateConstructorUsedError; + /// Serializes this ListModelsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListModelsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19529,6 +20420,8 @@ class _$ListModelsResponseCopyWithImpl<$Res, $Val extends ListModelsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19567,6 +20460,8 @@ class __$$ListModelsResponseImplCopyWithImpl<$Res> $Res Function(_$ListModelsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19626,12 +20521,14 @@ class _$ListModelsResponseImpl extends _ListModelsResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, object, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => @@ -19655,16 +20552,18 @@ abstract class _ListModelsResponse extends ListModelsResponse { factory _ListModelsResponse.fromJson(Map json) = _$ListModelsResponseImpl.fromJson; - @override - /// The object type, which is always "list". - ListModelsResponseObject get object; @override + ListModelsResponseObject get object; /// The list of models. + @override List get data; + + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19684,8 +20583,12 @@ mixin _$DeleteModelResponse { /// The object type, which is always "model". String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteModelResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteModelResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19709,6 +20612,8 @@ class _$DeleteModelResponseCopyWithImpl<$Res, $Val extends DeleteModelResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19752,6 +20657,8 @@ class __$$DeleteModelResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteModelResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19813,11 +20720,13 @@ class _$DeleteModelResponseImpl extends _DeleteModelResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => @@ -19842,20 +20751,22 @@ abstract class _DeleteModelResponse extends DeleteModelResponse { factory _DeleteModelResponse.fromJson(Map json) = _$DeleteModelResponseImpl.fromJson; - @override - /// The model identifier. - String get id; @override + String get id; /// Whether the model was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always "model". + @override String get object; + + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19878,8 +20789,12 @@ mixin _$CreateModerationRequest { @_ModerationInputConverter() ModerationInput get input => throw _privateConstructorUsedError; + /// Serializes this CreateModerationRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateModerationRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19911,6 +20826,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19929,6 +20846,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationModelCopyWith<$Res>? get model { @@ -19941,6 +20860,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationInputCopyWith<$Res> get input { @@ -19981,6 +20902,8 @@ class __$$CreateModerationRequestImplCopyWithImpl<$Res> $Res Function(_$CreateModerationRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20040,11 +20963,13 @@ class _$CreateModerationRequestImpl extends _CreateModerationRequest { (identical(other.input, input) || other.input == input)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, model, input); - @JsonKey(ignore: true) + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> @@ -20071,21 +20996,23 @@ abstract class _CreateModerationRequest extends CreateModerationRequest { factory _CreateModerationRequest.fromJson(Map json) = _$CreateModerationRequestImpl.fromJson; - @override - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. /// /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @override @_ModerationModelConverter() @JsonKey(includeIfNull: false) ModerationModel? get model; - @override /// The input text to classify + @override @_ModerationInputConverter() ModerationInput get input; + + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20144,6 +21071,8 @@ mixin _$ModerationModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModerationModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -20163,6 +21092,9 @@ class _$ModerationModelCopyWithImpl<$Res, $Val extends ModerationModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -20185,6 +21117,8 @@ class __$$ModerationModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ModerationModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20229,11 +21163,13 @@ class _$ModerationModelEnumerationImpl extends ModerationModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> @@ -20320,7 +21256,10 @@ abstract class ModerationModelEnumeration extends ModerationModel { @override ModerationModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20343,6 +21282,8 @@ class __$$ModerationModelStringImplCopyWithImpl<$Res> $Res Function(_$ModerationModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20386,11 +21327,13 @@ class _$ModerationModelStringImpl extends ModerationModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> @@ -20477,7 +21420,10 @@ abstract class ModerationModelString extends ModerationModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20536,6 +21482,8 @@ mixin _$ModerationInput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModerationInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -20555,6 +21503,9 @@ class _$ModerationInputCopyWithImpl<$Res, $Val extends ModerationInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -20576,6 +21527,8 @@ class __$$ModerationInputListStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputListStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20626,12 +21579,14 @@ class _$ModerationInputListStringImpl extends ModerationInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> @@ -20718,7 +21673,10 @@ abstract class ModerationInputListString extends ModerationInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20741,6 +21699,8 @@ class __$$ModerationInputStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20784,11 +21744,13 @@ class _$ModerationInputStringImpl extends ModerationInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> @@ -20875,7 +21837,10 @@ abstract class ModerationInputString extends ModerationInput { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20896,8 +21861,12 @@ mixin _$CreateModerationResponse { /// A list of moderation objects. List get results => throw _privateConstructorUsedError; + /// Serializes this CreateModerationResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateModerationResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20922,6 +21891,8 @@ class _$CreateModerationResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20968,6 +21939,8 @@ class __$$CreateModerationResponseImplCopyWithImpl<$Res> $Res Function(_$CreateModerationResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21039,12 +22012,14 @@ class _$CreateModerationResponseImpl extends _CreateModerationResponse { const DeepCollectionEquality().equals(other._results, _results)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, model, const DeepCollectionEquality().hash(_results)); - @JsonKey(ignore: true) + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> @@ -21070,20 +22045,22 @@ abstract class _CreateModerationResponse extends CreateModerationResponse { factory _CreateModerationResponse.fromJson(Map json) = _$CreateModerationResponseImpl.fromJson; - @override - /// The unique identifier for the moderation request. - String get id; @override + String get id; /// The model used to generate the moderation results. - String get model; @override + String get model; /// A list of moderation objects. + @override List get results; + + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21105,8 +22082,12 @@ mixin _$Moderation { ModerationCategoriesScores get categoryScores => throw _privateConstructorUsedError; + /// Serializes this Moderation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21137,6 +22118,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21160,6 +22143,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> ) as $Val); } + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesCopyWith<$Res> get categories { @@ -21168,6 +22153,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> }); } + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesScoresCopyWith<$Res> get categoryScores { @@ -21206,6 +22193,8 @@ class __$$ModerationImplCopyWithImpl<$Res> _$ModerationImpl _value, $Res Function(_$ModerationImpl) _then) : super(_value, _then); + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21272,12 +22261,14 @@ class _$ModerationImpl extends _Moderation { other.categoryScores == categoryScores)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, flagged, categories, categoryScores); - @JsonKey(ignore: true) + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => @@ -21303,21 +22294,23 @@ abstract class _Moderation extends Moderation { factory _Moderation.fromJson(Map json) = _$ModerationImpl.fromJson; - @override - /// Whether any of the below categories are flagged. - bool get flagged; @override + bool get flagged; /// A list of the categories, and whether they are flagged or not. - ModerationCategories get categories; @override + ModerationCategories get categories; /// A list of the categories along with their scores as predicted by model. + @override @JsonKey(name: 'category_scores') ModerationCategoriesScores get categoryScores; + + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21368,8 +22361,12 @@ mixin _$ModerationCategories { @JsonKey(name: 'violence/graphic') bool get violenceGraphic => throw _privateConstructorUsedError; + /// Serializes this ModerationCategories to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCategoriesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21405,6 +22402,8 @@ class _$ModerationCategoriesCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21499,6 +22498,8 @@ class __$$ModerationCategoriesImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesImpl) _then) : super(_value, _then); + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21668,7 +22669,7 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { other.violenceGraphic == violenceGraphic)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -21684,7 +22685,9 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { violence, violenceGraphic); - @JsonKey(ignore: true) + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> @@ -21721,59 +22724,61 @@ abstract class _ModerationCategories extends ModerationCategories { factory _ModerationCategories.fromJson(Map json) = _$ModerationCategoriesImpl.fromJson; - @override - /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - bool get hate; @override + bool get hate; /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @override @JsonKey(name: 'hate/threatening') bool get hateThreatening; - @override /// Content that expresses, incites, or promotes harassing language towards any target. - bool get harassment; @override + bool get harassment; /// Harassment content that also includes violence or serious harm towards any target. + @override @JsonKey(name: 'harassment/threatening') bool get harassmentThreatening; - @override /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @override @JsonKey(name: 'self-harm') bool get selfHarm; - @override /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @override @JsonKey(name: 'self-harm/intent') bool get selfHarmIntent; - @override /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @override @JsonKey(name: 'self-harm/instructions') bool get selfHarmInstructions; - @override /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - bool get sexual; @override + bool get sexual; /// Sexual content that includes an individual who is under 18 years old. + @override @JsonKey(name: 'sexual/minors') bool get sexualMinors; - @override /// Content that depicts death, violence, or physical injury. - bool get violence; @override + bool get violence; /// Content that depicts death, violence, or physical injury in graphic detail. + @override @JsonKey(name: 'violence/graphic') bool get violenceGraphic; + + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21825,8 +22830,12 @@ mixin _$ModerationCategoriesScores { @JsonKey(name: 'violence/graphic') double get violenceGraphic => throw _privateConstructorUsedError; + /// Serializes this ModerationCategoriesScores to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCategoriesScoresCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21863,6 +22872,8 @@ class _$ModerationCategoriesScoresCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21960,6 +22971,8 @@ class __$$ModerationCategoriesScoresImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesScoresImpl) _then) : super(_value, _then); + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22130,7 +23143,7 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { other.violenceGraphic == violenceGraphic)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -22146,7 +23159,9 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { violence, violenceGraphic); - @JsonKey(ignore: true) + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> @@ -22183,59 +23198,61 @@ abstract class _ModerationCategoriesScores extends ModerationCategoriesScores { factory _ModerationCategoriesScores.fromJson(Map json) = _$ModerationCategoriesScoresImpl.fromJson; - @override - /// The score for the category 'hate'. - double get hate; @override + double get hate; /// The score for the category 'hate/threatening'. + @override @JsonKey(name: 'hate/threatening') double get hateThreatening; - @override /// The score for the category 'harassment'. - double get harassment; @override + double get harassment; /// The score for the category 'harassment/threatening'. + @override @JsonKey(name: 'harassment/threatening') double get harassmentThreatening; - @override /// The score for the category 'self-harm'. + @override @JsonKey(name: 'self-harm') double get selfHarm; - @override /// The score for the category 'self-harm/intent'. + @override @JsonKey(name: 'self-harm/intent') double get selfHarmIntent; - @override /// The score for the category 'self-harm/instructions'. + @override @JsonKey(name: 'self-harm/instructions') double get selfHarmInstructions; - @override /// The score for the category 'sexual'. - double get sexual; @override + double get sexual; /// The score for the category 'sexual/minors'. + @override @JsonKey(name: 'sexual/minors') double get sexualMinors; - @override /// The score for the category 'violence'. - double get violence; @override + double get violence; /// The score for the category 'violence/graphic'. + @override @JsonKey(name: 'violence/graphic') double get violenceGraphic; + + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22268,38 +23285,59 @@ mixin _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this AssistantObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -22342,6 +23380,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22415,6 +23455,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> ) as $Val); } + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -22427,6 +23469,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> }); } + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantObjectResponseFormatCopyWith<$Res>? get responseFormat { @@ -22481,6 +23525,8 @@ class __$$AssistantObjectImplCopyWithImpl<$Res> _$AssistantObjectImpl _value, $Res Function(_$AssistantObjectImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22610,10 +23656,12 @@ class _$AssistantObjectImpl extends _AssistantObject { @override final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override List get tools { if (_tools is EqualUnmodifiableListView) return _tools; @@ -22626,10 +23674,14 @@ class _$AssistantObjectImpl extends _AssistantObject { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -22639,23 +23691,37 @@ class _$AssistantObjectImpl extends _AssistantObject { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -22692,7 +23758,7 @@ class _$AssistantObjectImpl extends _AssistantObject { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -22710,7 +23776,9 @@ class _$AssistantObjectImpl extends _AssistantObject { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => @@ -22749,72 +23817,91 @@ abstract class _AssistantObject extends AssistantObject { factory _AssistantObject.fromJson(Map json) = _$AssistantObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `assistant`. - AssistantObjectObject get object; @override + AssistantObjectObject get object; /// The Unix timestamp (in seconds) for when the assistant was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the assistant. The maximum length is 256 characters. - String? get name; @override + String? get name; /// The description of the assistant. The maximum length is 512 characters. - String? get description; @override + String? get description; /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. - String get model; @override + String get model; /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - String? get instructions; @override + String? get instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat; + + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22888,6 +23975,8 @@ mixin _$AssistantObjectResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -22910,6 +23999,9 @@ class _$AssistantObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -22932,6 +24024,8 @@ class __$$AssistantObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22978,11 +24072,13 @@ class _$AssistantObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectResponseFormatEnumerationImplCopyWith< @@ -23084,7 +24180,10 @@ abstract class AssistantObjectResponseFormatEnumeration @override AssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectResponseFormatEnumerationImplCopyWith< _$AssistantObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -23120,6 +24219,8 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23133,6 +24234,8 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< )); } + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -23176,11 +24279,13 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< @@ -23283,7 +24388,10 @@ abstract class AssistantObjectResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -23312,39 +24420,60 @@ mixin _$CreateAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this CreateAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -23386,6 +24515,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23444,6 +24575,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantModelCopyWith<$Res> get model { @@ -23452,6 +24585,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -23464,6 +24599,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -23520,6 +24657,8 @@ class __$$CreateAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$CreateAssistantRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23622,10 +24761,12 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -23639,10 +24780,14 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -23653,23 +24798,37 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -23702,7 +24861,7 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -23717,7 +24876,9 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> @@ -23753,64 +24914,83 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { factory _CreateAssistantRequest.fromJson(Map json) = _$CreateAssistantRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_AssistantModelConverter() AssistantModel get model; - @override /// The name of the assistant. The maximum length is 256 characters. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The description of the assistant. The maximum length is 512 characters. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat; + + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23869,6 +25049,8 @@ mixin _$AssistantModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -23888,6 +25070,9 @@ class _$AssistantModelCopyWithImpl<$Res, $Val extends AssistantModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -23909,6 +25094,8 @@ class __$$AssistantModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23952,11 +25139,13 @@ class _$AssistantModelEnumerationImpl extends AssistantModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> @@ -24043,7 +25232,10 @@ abstract class AssistantModelEnumeration extends AssistantModel { @override AssistantModels get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24065,6 +25257,8 @@ class __$$AssistantModelStringImplCopyWithImpl<$Res> $Res Function(_$AssistantModelStringImpl) _then) : super(_value, _then); + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24108,11 +25302,13 @@ class _$AssistantModelStringImpl extends AssistantModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> @@ -24200,7 +25396,10 @@ abstract class AssistantModelString extends AssistantModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24274,6 +25473,8 @@ mixin _$CreateAssistantRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -24296,6 +25497,9 @@ class _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -24322,6 +25526,8 @@ class __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24368,11 +25574,13 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -24475,7 +25683,10 @@ abstract class CreateAssistantRequestResponseFormatEnumeration @override CreateAssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< _$CreateAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -24512,6 +25723,8 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24526,6 +25739,8 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi )); } + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -24570,11 +25785,13 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -24679,7 +25896,10 @@ abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -24708,7 +25928,8 @@ mixin _$ModifyAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -24719,32 +25940,52 @@ mixin _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this ModifyAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -24786,6 +26027,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24849,6 +26092,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -24861,6 +26106,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -24916,6 +26163,8 @@ class __$$ModifyAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyAssistantRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25025,10 +26274,12 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -25054,10 +26305,14 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -25068,23 +26323,37 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -25118,7 +26387,7 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -25134,7 +26403,9 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> @@ -25171,69 +26442,88 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { factory _ModifyAssistantRequest.fromJson(Map json) = _$ModifyAssistantRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @JsonKey(includeIfNull: false) String? get model; - @override /// The name of the assistant. The maximum length is 256 characters. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The description of the assistant. The maximum length is 512 characters. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + @override @JsonKey(name: 'file_ids') List get fileIds; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat; + + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25307,6 +26597,8 @@ mixin _$ModifyAssistantRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModifyAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -25329,6 +26621,9 @@ class _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -25355,6 +26650,8 @@ class __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25401,11 +26698,13 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -25508,7 +26807,10 @@ abstract class ModifyAssistantRequestResponseFormatEnumeration @override ModifyAssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< _$ModifyAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -25545,6 +26847,8 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25559,6 +26863,8 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi )); } + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -25603,11 +26909,13 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -25712,7 +27020,10 @@ abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -25735,8 +27046,12 @@ mixin _$DeleteAssistantResponse { DeleteAssistantResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteAssistantResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteAssistantResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25761,6 +27076,8 @@ class _$DeleteAssistantResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25807,6 +27124,8 @@ class __$$DeleteAssistantResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteAssistantResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25868,11 +27187,13 @@ class _$DeleteAssistantResponseImpl extends _DeleteAssistantResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> @@ -25898,20 +27219,22 @@ abstract class _DeleteAssistantResponse extends DeleteAssistantResponse { factory _DeleteAssistantResponse.fromJson(Map json) = _$DeleteAssistantResponseImpl.fromJson; - @override - /// The assistant identifier. - String get id; @override + String get id; /// Whether the assistant was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `assistant.deleted`. + @override DeleteAssistantResponseObject get object; + + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25941,8 +27264,12 @@ mixin _$ListAssistantsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListAssistantsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListAssistantsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25972,6 +27299,8 @@ class _$ListAssistantsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26033,6 +27362,8 @@ class __$$ListAssistantsResponseImplCopyWithImpl<$Res> $Res Function(_$ListAssistantsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26129,12 +27460,14 @@ class _$ListAssistantsResponseImpl extends _ListAssistantsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> @@ -26162,31 +27495,33 @@ abstract class _ListAssistantsResponse extends ListAssistantsResponse { factory _ListAssistantsResponse.fromJson(Map json) = _$ListAssistantsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of assistants. - List get data; @override + List get data; /// The ID of the first assistant in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last assistant in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more assistants to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26206,8 +27541,12 @@ mixin _$AssistantsNamedToolChoice { AssistantsFunctionCallOption? get function => throw _privateConstructorUsedError; + /// Serializes this AssistantsNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26236,6 +27575,8 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26254,6 +27595,8 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsFunctionCallOptionCopyWith<$Res>? get function { @@ -26295,6 +27638,8 @@ class __$$AssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$AssistantsNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26348,11 +27693,13 @@ class _$AssistantsNamedToolChoiceImpl extends _AssistantsNamedToolChoice { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> @@ -26378,17 +27725,19 @@ abstract class _AssistantsNamedToolChoice extends AssistantsNamedToolChoice { factory _AssistantsNamedToolChoice.fromJson(Map json) = _$AssistantsNamedToolChoiceImpl.fromJson; - @override - /// The type of the tool. If type is `function`, the function name must be set - AssistantsToolType get type; @override + AssistantsToolType get type; /// No Description + @override @JsonKey(includeIfNull: false) AssistantsFunctionCallOption? get function; + + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26403,8 +27752,12 @@ mixin _$AssistantsFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; + /// Serializes this AssistantsFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26431,6 +27784,8 @@ class _$AssistantsFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26467,6 +27822,8 @@ class __$$AssistantsFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$AssistantsFunctionCallOptionImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26507,11 +27864,13 @@ class _$AssistantsFunctionCallOptionImpl extends _AssistantsFunctionCallOption { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name); - @JsonKey(ignore: true) + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsFunctionCallOptionImplCopyWith< @@ -26536,12 +27895,14 @@ abstract class _AssistantsFunctionCallOption factory _AssistantsFunctionCallOption.fromJson(Map json) = _$AssistantsFunctionCallOptionImpl.fromJson; - @override - /// The name of the function to call. + @override String get name; + + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsFunctionCallOptionImplCopyWith< _$AssistantsFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -26557,8 +27918,12 @@ mixin _$AssistantsResponseFormat { /// Must be one of `text` or `json_object`. AssistantsResponseFormatType get type => throw _privateConstructorUsedError; + /// Serializes this AssistantsResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsResponseFormatCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26583,6 +27948,8 @@ class _$AssistantsResponseFormatCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26619,6 +27986,8 @@ class __$$AssistantsResponseFormatImplCopyWithImpl<$Res> $Res Function(_$AssistantsResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26661,11 +28030,13 @@ class _$AssistantsResponseFormatImpl extends _AssistantsResponseFormat { (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> @@ -26689,12 +28060,14 @@ abstract class _AssistantsResponseFormat extends AssistantsResponseFormat { factory _AssistantsResponseFormat.fromJson(Map json) = _$AssistantsResponseFormatImpl.fromJson; - @override - /// Must be one of `text` or `json_object`. + @override AssistantsResponseFormatType get type; + + /// Create a copy of AssistantsResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26712,8 +28085,12 @@ mixin _$TruncationObject { @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages => throw _privateConstructorUsedError; + /// Serializes this TruncationObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $TruncationObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26739,6 +28116,8 @@ class _$TruncationObjectCopyWithImpl<$Res, $Val extends TruncationObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26779,6 +28158,8 @@ class __$$TruncationObjectImplCopyWithImpl<$Res> $Res Function(_$TruncationObjectImpl) _then) : super(_value, _then); + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26833,11 +28214,13 @@ class _$TruncationObjectImpl extends _TruncationObject { other.lastMessages == lastMessages)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, lastMessages); - @JsonKey(ignore: true) + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => @@ -26862,17 +28245,19 @@ abstract class _TruncationObject extends TruncationObject { factory _TruncationObject.fromJson(Map json) = _$TruncationObjectImpl.fromJson; - @override - /// The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - TruncationObjectType get type; @override + TruncationObjectType get type; /// The number of most recent messages from the thread when constructing the context for the run. + @override @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages; + + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26946,7 +28331,9 @@ mixin _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. List get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -26987,18 +28374,33 @@ mixin _$RunObject { @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat => throw _privateConstructorUsedError; + /// Serializes this RunObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -27062,6 +28464,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27205,6 +28609,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ) as $Val); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunRequiredActionCopyWith<$Res>? get requiredAction { @@ -27217,6 +28623,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunLastErrorCopyWith<$Res>? get lastError { @@ -27229,6 +28637,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -27242,6 +28652,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunCompletionUsageCopyWith<$Res>? get usage { @@ -27254,6 +28666,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -27266,6 +28680,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectToolChoiceCopyWith<$Res>? get toolChoice { @@ -27278,6 +28694,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectResponseFormatCopyWith<$Res> get responseFormat { @@ -27355,6 +28773,8 @@ class __$$RunObjectImplCopyWithImpl<$Res> _$RunObjectImpl _value, $Res Function(_$RunObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27627,10 +29047,14 @@ class _$RunObjectImpl extends _RunObject { return EqualUnmodifiableListView(_tools); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -27685,11 +29109,22 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'parallel_tool_calls') final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') @@ -27753,7 +29188,7 @@ class _$RunObjectImpl extends _RunObject { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -27786,7 +29221,9 @@ class _$RunObjectImpl extends _RunObject { responseFormat ]); - @JsonKey(ignore: true) + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => @@ -27843,146 +29280,161 @@ abstract class _RunObject extends RunObject { factory _RunObject.fromJson(Map json) = _$RunObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run`. - RunObjectObject get object; @override + RunObjectObject get object; /// The Unix timestamp (in seconds) for when the run was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. - RunStatus get status; @override + RunStatus get status; /// Details on the action required to continue the run. Will be `null` if no action is required. + @override @JsonKey(name: 'required_action') RunRequiredAction? get requiredAction; - @override /// The last error associated with this run. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') RunLastError? get lastError; - @override /// The Unix timestamp (in seconds) for when the run will expire. + @override @JsonKey(name: 'expires_at') int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the run was started. + @override @JsonKey(name: 'started_at') int? get startedAt; - @override /// The Unix timestamp (in seconds) for when the run was cancelled. + @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; - @override /// The Unix timestamp (in seconds) for when the run failed. + @override @JsonKey(name: 'failed_at') int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the run was completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override /// Details on why the run is incomplete. Will be `null` if the run is not incomplete. + @override @JsonKey(name: 'incomplete_details') RunObjectIncompleteDetails? get incompleteDetails; - @override /// The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - String get model; @override + String get model; /// The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - String get instructions; @override + String get instructions; /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - List get tools; @override + List get tools; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). - RunCompletionUsage? get usage; @override + RunCompletionUsage? get usage; /// The sampling temperature used for this run. If not set, defaults to 1. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// The nucleus sampling value used for this run. If not set, defaults to 1. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens specified to have been used over the course of the run. + @override @JsonKey(name: 'max_prompt_tokens') int? get maxPromptTokens; - @override /// The maximum number of completion tokens specified to have been used over the course of the run. + @override @JsonKey(name: 'max_completion_tokens') int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy') TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat; + + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28001,8 +29453,12 @@ mixin _$RunRequiredAction { RunSubmitToolOutputs get submitToolOutputs => throw _privateConstructorUsedError; + /// Serializes this RunRequiredAction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunRequiredActionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28031,6 +29487,8 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28049,6 +29507,8 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> ) as $Val); } + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunSubmitToolOutputsCopyWith<$Res> get submitToolOutputs { @@ -28084,6 +29544,8 @@ class __$$RunRequiredActionImplCopyWithImpl<$Res> $Res Function(_$RunRequiredActionImpl) _then) : super(_value, _then); + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28138,11 +29600,13 @@ class _$RunRequiredActionImpl extends _RunRequiredAction { other.submitToolOutputs == submitToolOutputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, submitToolOutputs); - @JsonKey(ignore: true) + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => @@ -28168,17 +29632,19 @@ abstract class _RunRequiredAction extends RunRequiredAction { factory _RunRequiredAction.fromJson(Map json) = _$RunRequiredActionImpl.fromJson; - @override - /// For now, this is always `submit_tool_outputs`. - RunRequiredActionType get type; @override + RunRequiredActionType get type; /// Details on the tool outputs needed for this run to continue. + @override @JsonKey(name: 'submit_tool_outputs') RunSubmitToolOutputs get submitToolOutputs; + + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28195,8 +29661,12 @@ mixin _$RunLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this RunLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28220,6 +29690,8 @@ class _$RunLastErrorCopyWithImpl<$Res, $Val extends RunLastError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28258,6 +29730,8 @@ class __$$RunLastErrorImplCopyWithImpl<$Res> _$RunLastErrorImpl _value, $Res Function(_$RunLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28308,11 +29782,13 @@ class _$RunLastErrorImpl extends _RunLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => @@ -28335,16 +29811,18 @@ abstract class _RunLastError extends RunLastError { factory _RunLastError.fromJson(Map json) = _$RunLastErrorImpl.fromJson; - @override - /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - RunLastErrorCode get code; @override + RunLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28362,8 +29840,12 @@ mixin _$RunObjectIncompleteDetails { RunObjectIncompleteDetailsReason? get reason => throw _privateConstructorUsedError; + /// Serializes this RunObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28393,6 +29875,8 @@ class _$RunObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28433,6 +29917,8 @@ class __$$RunObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$RunObjectIncompleteDetailsImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28480,11 +29966,13 @@ class _$RunObjectIncompleteDetailsImpl extends _RunObjectIncompleteDetails { (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, reason); - @JsonKey(ignore: true) + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> @@ -28511,14 +29999,16 @@ abstract class _RunObjectIncompleteDetails extends RunObjectIncompleteDetails { factory _RunObjectIncompleteDetails.fromJson(Map json) = _$RunObjectIncompleteDetailsImpl.fromJson; - @override - /// The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) RunObjectIncompleteDetailsReason? get reason; + + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28579,6 +30069,8 @@ mixin _$RunObjectToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunObjectToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -28598,6 +30090,9 @@ class _$RunObjectToolChoiceCopyWithImpl<$Res, $Val extends RunObjectToolChoice> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -28620,6 +30115,8 @@ class __$$RunObjectToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28665,11 +30162,13 @@ class _$RunObjectToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceEnumerationImplCopyWith< @@ -28760,7 +30259,10 @@ abstract class RunObjectToolChoiceEnumeration extends RunObjectToolChoice { @override RunObjectToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectToolChoiceEnumerationImplCopyWith< _$RunObjectToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -28791,6 +30293,8 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceAssistantsNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28804,6 +30308,8 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> )); } + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -28845,11 +30351,13 @@ class _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -28944,7 +30452,10 @@ abstract class RunObjectToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -29012,6 +30523,8 @@ mixin _$RunObjectResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -29032,6 +30545,9 @@ class _$RunObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -29054,6 +30570,8 @@ class __$$RunObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29100,11 +30618,13 @@ class _$RunObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectResponseFormatEnumerationImplCopyWith< @@ -29199,7 +30719,10 @@ abstract class RunObjectResponseFormatEnumeration @override RunObjectResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectResponseFormatEnumerationImplCopyWith< _$RunObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -29231,6 +30754,8 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29244,6 +30769,8 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> )); } + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -29285,11 +30812,13 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< @@ -29386,7 +30915,10 @@ abstract class RunObjectResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< _$RunObjectResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -29402,8 +30934,12 @@ mixin _$RunSubmitToolOutputs { @JsonKey(name: 'tool_calls') List get toolCalls => throw _privateConstructorUsedError; + /// Serializes this RunSubmitToolOutputs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunSubmitToolOutputsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29428,6 +30964,8 @@ class _$RunSubmitToolOutputsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29461,6 +30999,8 @@ class __$$RunSubmitToolOutputsImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputsImpl) _then) : super(_value, _then); + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29513,12 +31053,14 @@ class _$RunSubmitToolOutputsImpl extends _RunSubmitToolOutputs { .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> @@ -29544,13 +31086,15 @@ abstract class _RunSubmitToolOutputs extends RunSubmitToolOutputs { factory _RunSubmitToolOutputs.fromJson(Map json) = _$RunSubmitToolOutputsImpl.fromJson; - @override - /// A list of the relevant tool calls. + @override @JsonKey(name: 'tool_calls') List get toolCalls; + + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29573,8 +31117,12 @@ mixin _$RunCompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this RunCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29601,6 +31149,8 @@ class _$RunCompletionUsageCopyWithImpl<$Res, $Val extends RunCompletionUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29647,6 +31197,8 @@ class __$$RunCompletionUsageImplCopyWithImpl<$Res> $Res Function(_$RunCompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29716,12 +31268,14 @@ class _$RunCompletionUsageImpl extends _RunCompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => @@ -29747,23 +31301,25 @@ abstract class _RunCompletionUsage extends RunCompletionUsage { factory _RunCompletionUsage.fromJson(Map json) = _$RunCompletionUsageImpl.fromJson; - @override - /// Number of completion tokens used over the course of the run. + @override @JsonKey(name: 'completion_tokens') int get completionTokens; - @override /// Number of prompt tokens used over the course of the run. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29800,15 +31356,20 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -29842,11 +31403,22 @@ mixin _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat => @@ -29856,8 +31428,12 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this CreateRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29914,6 +31490,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30002,6 +31580,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ) as $Val); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestModelCopyWith<$Res>? get model { @@ -30014,6 +31594,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -30026,6 +31608,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -30039,6 +31623,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -30109,6 +31695,8 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30288,10 +31876,14 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -30302,12 +31894,15 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -30345,11 +31940,22 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -30399,7 +32005,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -30420,7 +32026,9 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { responseFormat, stream); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => @@ -30471,102 +32079,120 @@ abstract class _CreateRunRequest extends CreateRunRequest { factory _CreateRunRequest.fromJson(Map json) = _$CreateRunRequestImpl.fromJson; - @override - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + @override @_CreateRunRequestModelConverter() @JsonKey(includeIfNull: false) CreateRunRequestModel? get model; - @override /// Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override /// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + @override @JsonKey(name: 'additional_instructions', includeIfNull: false) String? get additionalInstructions; - @override /// Adds additional messages to the thread before creating the run. + @override @JsonKey(name: 'additional_messages', includeIfNull: false) List? get additionalMessages; - @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) List? get tools; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; - @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30629,6 +32255,8 @@ mixin _$CreateRunRequestModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -30649,6 +32277,9 @@ class _$CreateRunRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -30671,6 +32302,8 @@ class __$$CreateRunRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30717,11 +32350,13 @@ class _$CreateRunRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelEnumerationImplCopyWith< @@ -30809,7 +32444,10 @@ abstract class CreateRunRequestModelEnumeration extends CreateRunRequestModel { @override RunModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestModelEnumerationImplCopyWith< _$CreateRunRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -30835,6 +32473,8 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30879,11 +32519,13 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> @@ -30970,7 +32612,10 @@ abstract class CreateRunRequestModelString extends CreateRunRequestModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31038,6 +32683,8 @@ mixin _$CreateRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -31059,6 +32706,9 @@ class _$CreateRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -31081,6 +32731,8 @@ class __$$CreateRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31127,11 +32779,13 @@ class _$CreateRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< @@ -31227,7 +32881,10 @@ abstract class CreateRunRequestToolChoiceEnumeration @override CreateRunRequestToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< _$CreateRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -31263,6 +32920,8 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31276,6 +32935,8 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< )); } + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -31318,11 +32979,13 @@ class _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -31420,7 +33083,10 @@ abstract class CreateRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -31492,6 +33158,8 @@ mixin _$CreateRunRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -31514,6 +33182,9 @@ class _$CreateRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -31536,6 +33207,8 @@ class __$$CreateRunRequestResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31582,11 +33255,13 @@ class _$CreateRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< @@ -31686,7 +33361,10 @@ abstract class CreateRunRequestResponseFormatEnumeration @override CreateRunRequestResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< _$CreateRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -31723,6 +33401,8 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl _then) : super(_value, _then); + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31736,6 +33416,8 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl )); } + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -31779,11 +33461,13 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -31883,7 +33567,10 @@ abstract class CreateRunRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -31913,8 +33600,12 @@ mixin _$ListRunsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListRunsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListRunsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -31943,6 +33634,8 @@ class _$ListRunsResponseCopyWithImpl<$Res, $Val extends ListRunsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32001,6 +33694,8 @@ class __$$ListRunsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32097,12 +33792,14 @@ class _$ListRunsResponseImpl extends _ListRunsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => @@ -32130,31 +33827,33 @@ abstract class _ListRunsResponse extends ListRunsResponse { factory _ListRunsResponse.fromJson(Map json) = _$ListRunsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of runs. - List get data; @override + List get data; /// The ID of the first run in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last run in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more runs to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32165,12 +33864,18 @@ ModifyRunRequest _$ModifyRunRequestFromJson(Map json) { /// @nodoc mixin _$ModifyRunRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32194,6 +33899,8 @@ class _$ModifyRunRequestCopyWithImpl<$Res, $Val extends ModifyRunRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32227,6 +33934,8 @@ class __$$ModifyRunRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32252,10 +33961,14 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { factory _$ModifyRunRequestImpl.fromJson(Map json) => _$$ModifyRunRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -32279,12 +33992,14 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => @@ -32308,13 +34023,17 @@ abstract class _ModifyRunRequest extends ModifyRunRequest { factory _ModifyRunRequest.fromJson(Map json) = _$ModifyRunRequestImpl.fromJson; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32335,8 +34054,12 @@ mixin _$SubmitToolOutputsRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this SubmitToolOutputsRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $SubmitToolOutputsRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32365,6 +34088,8 @@ class _$SubmitToolOutputsRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32408,6 +34133,8 @@ class __$$SubmitToolOutputsRunRequestImplCopyWithImpl<$Res> $Res Function(_$SubmitToolOutputsRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32473,12 +34200,14 @@ class _$SubmitToolOutputsRunRequestImpl extends _SubmitToolOutputsRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_toolOutputs), stream); - @JsonKey(ignore: true) + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> @@ -32505,18 +34234,20 @@ abstract class _SubmitToolOutputsRunRequest factory _SubmitToolOutputsRunRequest.fromJson(Map json) = _$SubmitToolOutputsRunRequestImpl.fromJson; - @override - /// A list of tools for which the outputs are being submitted. + @override @JsonKey(name: 'tool_outputs') List get toolOutputs; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32535,8 +34266,12 @@ mixin _$RunSubmitToolOutput { @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; + /// Serializes this RunSubmitToolOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunSubmitToolOutputCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32562,6 +34297,8 @@ class _$RunSubmitToolOutputCopyWithImpl<$Res, $Val extends RunSubmitToolOutput> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32602,6 +34339,8 @@ class __$$RunSubmitToolOutputImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputImpl) _then) : super(_value, _then); + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32657,11 +34396,13 @@ class _$RunSubmitToolOutputImpl extends _RunSubmitToolOutput { (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, toolCallId, output); - @JsonKey(ignore: true) + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => @@ -32687,18 +34428,20 @@ abstract class _RunSubmitToolOutput extends RunSubmitToolOutput { factory _RunSubmitToolOutput.fromJson(Map json) = _$RunSubmitToolOutputImpl.fromJson; - @override - /// The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + @override @JsonKey(name: 'tool_call_id', includeIfNull: false) String? get toolCallId; - @override /// The output of the tool call to be submitted to continue the run. + @override @JsonKey(includeIfNull: false) String? get output; + + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32718,8 +34461,12 @@ mixin _$RunToolCallObject { /// The function definition. RunToolCallFunction get function => throw _privateConstructorUsedError; + /// Serializes this RunToolCallObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunToolCallObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32746,6 +34493,8 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32769,6 +34518,8 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> ) as $Val); } + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunToolCallFunctionCopyWith<$Res> get function { @@ -32801,6 +34552,8 @@ class __$$RunToolCallObjectImplCopyWithImpl<$Res> $Res Function(_$RunToolCallObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32863,11 +34616,13 @@ class _$RunToolCallObjectImpl extends _RunToolCallObject { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => @@ -32892,20 +34647,22 @@ abstract class _RunToolCallObject extends RunToolCallObject { factory _RunToolCallObject.fromJson(Map json) = _$RunToolCallObjectImpl.fromJson; - @override - /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint. - String get id; @override + String get id; /// The type of tool call the output is required for. For now, this is always `function`. - RunToolCallObjectType get type; @override + RunToolCallObjectType get type; /// The function definition. + @override RunToolCallFunction get function; + + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32922,8 +34679,12 @@ mixin _$RunToolCallFunction { /// The arguments that the model expects you to pass to the function. String get arguments => throw _privateConstructorUsedError; + /// Serializes this RunToolCallFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunToolCallFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32947,6 +34708,8 @@ class _$RunToolCallFunctionCopyWithImpl<$Res, $Val extends RunToolCallFunction> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32985,6 +34748,8 @@ class __$$RunToolCallFunctionImplCopyWithImpl<$Res> $Res Function(_$RunToolCallFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33036,11 +34801,13 @@ class _$RunToolCallFunctionImpl extends _RunToolCallFunction { other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => @@ -33064,16 +34831,18 @@ abstract class _RunToolCallFunction extends RunToolCallFunction { factory _RunToolCallFunction.fromJson(Map json) = _$RunToolCallFunctionImpl.fromJson; - @override - /// The name of the function. - String get name; @override + String get name; /// The arguments that the model expects you to pass to the function. + @override String get arguments; + + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33110,15 +34879,20 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -33152,11 +34926,22 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat => @@ -33166,8 +34951,12 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this CreateThreadAndRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateThreadAndRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -33226,6 +35015,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33314,6 +35105,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadRequestCopyWith<$Res>? get thread { @@ -33326,6 +35119,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadAndRunModelCopyWith<$Res>? get model { @@ -33338,6 +35133,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -33350,6 +35147,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -33362,6 +35161,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -33375,6 +35176,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -33451,6 +35254,8 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33615,10 +35420,14 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -33629,12 +35438,15 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -33672,11 +35484,22 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -33725,7 +35548,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -33746,7 +35569,9 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { responseFormat, stream); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> @@ -33796,102 +35621,120 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { factory _CreateThreadAndRunRequest.fromJson(Map json) = _$CreateThreadAndRunRequestImpl.fromJson; - @override - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// If no thread is provided, an empty thread will be created. + @override @JsonKey(includeIfNull: false) CreateThreadRequest? get thread; - @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + @override @_ThreadAndRunModelConverter() @JsonKey(includeIfNull: false) ThreadAndRunModel? get model; - @override /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) List? get tools; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; - @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? get toolChoice; - @override /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) /// during tool use. + @override @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-4o-mini-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33950,6 +35793,8 @@ mixin _$ThreadAndRunModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ThreadAndRunModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -33969,6 +35814,9 @@ class _$ThreadAndRunModelCopyWithImpl<$Res, $Val extends ThreadAndRunModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -33991,6 +35839,8 @@ class __$$ThreadAndRunModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34035,11 +35885,13 @@ class _$ThreadAndRunModelEnumerationImpl extends ThreadAndRunModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelEnumerationImplCopyWith< @@ -34127,7 +35979,10 @@ abstract class ThreadAndRunModelEnumeration extends ThreadAndRunModel { @override ThreadAndRunModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadAndRunModelEnumerationImplCopyWith< _$ThreadAndRunModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -34152,6 +36007,8 @@ class __$$ThreadAndRunModelStringImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34195,11 +36052,13 @@ class _$ThreadAndRunModelStringImpl extends ThreadAndRunModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> @@ -34286,7 +36145,10 @@ abstract class ThreadAndRunModelString extends ThreadAndRunModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34361,6 +36223,8 @@ mixin _$CreateThreadAndRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateThreadAndRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -34383,6 +36247,9 @@ class _$CreateThreadAndRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -34408,6 +36275,8 @@ class __$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34454,11 +36323,13 @@ class _$CreateThreadAndRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< @@ -34562,7 +36433,10 @@ abstract class CreateThreadAndRunRequestToolChoiceEnumeration @override CreateThreadAndRunRequestToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< _$CreateThreadAndRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -34599,6 +36473,8 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34613,6 +36489,8 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi )); } + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -34657,11 +36535,13 @@ class _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -34767,7 +36647,10 @@ abstract class CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -34847,6 +36730,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateThreadAndRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -34870,6 +36755,9 @@ class _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -34900,6 +36788,8 @@ class __$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34946,11 +36836,13 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< @@ -35057,7 +36949,10 @@ abstract class CreateThreadAndRunRequestResponseFormatEnumeration @override CreateThreadAndRunRequestResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< _$CreateThreadAndRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -35096,6 +36991,8 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35110,6 +37007,8 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop )); } + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsResponseFormatCopyWith<$Res> get value { @@ -35154,11 +37053,13 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< @@ -35267,7 +37168,10 @@ abstract class CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat @override AssistantsResponseFormat get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; @@ -35293,11 +37197,17 @@ mixin _$ThreadObject { @JsonKey(name: 'tool_resources') ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ThreadObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ThreadObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35328,6 +37238,8 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35361,6 +37273,8 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> ) as $Val); } + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35401,6 +37315,8 @@ class __$$ThreadObjectImplCopyWithImpl<$Res> _$ThreadObjectImpl _value, $Res Function(_$ThreadObjectImpl) _then) : super(_value, _then); + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35468,10 +37384,14 @@ class _$ThreadObjectImpl extends _ThreadObject { @JsonKey(name: 'tool_resources') final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -35500,12 +37420,14 @@ class _$ThreadObjectImpl extends _ThreadObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, createdAt, toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => @@ -35532,30 +37454,34 @@ abstract class _ThreadObject extends ThreadObject { factory _ThreadObject.fromJson(Map json) = _$ThreadObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread`. - ThreadObjectObject get object; @override + ThreadObjectObject get object; /// The Unix timestamp (in seconds) for when the thread was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources') ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override Map? get metadata; + + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35575,12 +37501,18 @@ mixin _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35610,6 +37542,8 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35633,6 +37567,8 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> ) as $Val); } + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35672,6 +37608,8 @@ class __$$CreateThreadRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35730,10 +37668,14 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -35760,7 +37702,7 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -35768,7 +37710,9 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => @@ -35796,23 +37740,27 @@ abstract class _CreateThreadRequest extends CreateThreadRequest { factory _CreateThreadRequest.fromJson(Map json) = _$CreateThreadRequestImpl.fromJson; - @override - /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. + @override @JsonKey(includeIfNull: false) List? get messages; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35827,12 +37775,18 @@ mixin _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35861,6 +37815,8 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35879,6 +37835,8 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> ) as $Val); } + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35917,6 +37875,8 @@ class __$$ModifyThreadRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyThreadRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35954,10 +37914,14 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -35983,12 +37947,14 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => @@ -36014,18 +37980,22 @@ abstract class _ModifyThreadRequest extends ModifyThreadRequest { factory _ModifyThreadRequest.fromJson(Map json) = _$ModifyThreadRequestImpl.fromJson; - @override - /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36045,8 +38015,12 @@ mixin _$ToolResources { @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch => throw _privateConstructorUsedError; + /// Serializes this ToolResources to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36077,6 +38051,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36095,6 +38071,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> ) as $Val); } + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCodeInterpreterCopyWith<$Res>? get codeInterpreter { @@ -36108,6 +38086,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> }); } + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesFileSearchCopyWith<$Res>? get fileSearch { @@ -36149,6 +38129,8 @@ class __$$ToolResourcesImplCopyWithImpl<$Res> _$ToolResourcesImpl _value, $Res Function(_$ToolResourcesImpl) _then) : super(_value, _then); + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36206,11 +38188,13 @@ class _$ToolResourcesImpl extends _ToolResources { other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, codeInterpreter, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => @@ -36235,18 +38219,20 @@ abstract class _ToolResources extends ToolResources { factory _ToolResources.fromJson(Map json) = _$ToolResourcesImpl.fromJson; - @override - /// No Description + @override @JsonKey(name: 'code_interpreter', includeIfNull: false) ToolResourcesCodeInterpreter? get codeInterpreter; - @override /// No Description + @override @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch; + + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36262,8 +38248,12 @@ mixin _$ToolResourcesCodeInterpreter { @JsonKey(name: 'file_ids') List get fileIds => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesCodeInterpreterCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36290,6 +38280,8 @@ class _$ToolResourcesCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36326,6 +38318,8 @@ class __$$ToolResourcesCodeInterpreterImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesCodeInterpreterImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36377,12 +38371,14 @@ class _$ToolResourcesCodeInterpreterImpl extends _ToolResourcesCodeInterpreter { const DeepCollectionEquality().equals(other._fileIds, _fileIds)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesCodeInterpreterImplCopyWith< @@ -36408,13 +38404,15 @@ abstract class _ToolResourcesCodeInterpreter factory _ToolResourcesCodeInterpreter.fromJson(Map json) = _$ToolResourcesCodeInterpreterImpl.fromJson; - @override - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + @override @JsonKey(name: 'file_ids') List get fileIds; + + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesCodeInterpreterImplCopyWith< _$ToolResourcesCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -36436,8 +38434,12 @@ mixin _$ToolResourcesFileSearch { List? get vectorStores => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesFileSearchCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36466,6 +38468,8 @@ class _$ToolResourcesFileSearchCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36511,6 +38515,8 @@ class __$$ToolResourcesFileSearchImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36589,14 +38595,16 @@ class _$ToolResourcesFileSearchImpl extends _ToolResourcesFileSearch { .equals(other._vectorStores, _vectorStores)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_vectorStoreIds), const DeepCollectionEquality().hash(_vectorStores)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> @@ -36623,18 +38631,20 @@ abstract class _ToolResourcesFileSearch extends ToolResourcesFileSearch { factory _ToolResourcesFileSearch.fromJson(Map json) = _$ToolResourcesFileSearchImpl.fromJson; - @override - /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + @override @JsonKey(name: 'vector_store_ids', includeIfNull: false) List? get vectorStoreIds; - @override /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. + @override @JsonKey(name: 'vector_stores', includeIfNull: false) List? get vectorStores; + + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36660,8 +38670,12 @@ mixin _$ToolResourcesFileSearchVectorStore { @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesFileSearchVectorStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesFileSearchVectorStoreCopyWith< ToolResourcesFileSearchVectorStore> get copyWith => throw _privateConstructorUsedError; @@ -36695,6 +38709,8 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36718,6 +38734,8 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -36761,6 +38779,8 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchVectorStoreImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36843,7 +38863,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -36851,7 +38871,9 @@ class _$ToolResourcesFileSearchVectorStoreImpl chunkingStrategy, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchVectorStoreImplCopyWith< @@ -36882,24 +38904,26 @@ abstract class _ToolResourcesFileSearchVectorStore Map json) = _$ToolResourcesFileSearchVectorStoreImpl.fromJson; - @override - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + @override @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; - @override /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesFileSearchVectorStoreImplCopyWith< _$ToolResourcesFileSearchVectorStoreImpl> get copyWith => throw _privateConstructorUsedError; @@ -36920,8 +38944,12 @@ mixin _$DeleteThreadResponse { /// The object type, which is always `thread.deleted`. DeleteThreadResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteThreadResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteThreadResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36946,6 +38974,8 @@ class _$DeleteThreadResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36989,6 +39019,8 @@ class __$$DeleteThreadResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteThreadResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37050,11 +39082,13 @@ class _$DeleteThreadResponseImpl extends _DeleteThreadResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> @@ -37081,20 +39115,22 @@ abstract class _DeleteThreadResponse extends DeleteThreadResponse { factory _DeleteThreadResponse.fromJson(Map json) = _$DeleteThreadResponseImpl.fromJson; - @override - /// The thread identifier. - String get id; @override + String get id; /// Whether the thread was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `thread.deleted`. + @override DeleteThreadResponseObject get object; + + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37123,8 +39159,12 @@ mixin _$ListThreadsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListThreadsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListThreadsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37153,6 +39193,8 @@ class _$ListThreadsResponseCopyWithImpl<$Res, $Val extends ListThreadsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37211,6 +39253,8 @@ class __$$ListThreadsResponseImplCopyWithImpl<$Res> $Res Function(_$ListThreadsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37307,12 +39351,14 @@ class _$ListThreadsResponseImpl extends _ListThreadsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => @@ -37340,31 +39386,33 @@ abstract class _ListThreadsResponse extends ListThreadsResponse { factory _ListThreadsResponse.fromJson(Map json) = _$ListThreadsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of threads. - List get data; @override + List get data; /// The ID of the first thread in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last thread in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more threads to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37424,11 +39472,17 @@ mixin _$MessageObject { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this MessageObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37470,6 +39524,8 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37548,6 +39604,8 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> ) as $Val); } + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -37600,6 +39658,8 @@ class __$$MessageObjectImplCopyWithImpl<$Res> _$MessageObjectImpl _value, $Res Function(_$MessageObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37782,10 +39842,14 @@ class _$MessageObjectImpl extends _MessageObject { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -37828,7 +39892,7 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -37847,7 +39911,9 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => @@ -37884,72 +39950,76 @@ abstract class _MessageObject extends MessageObject { factory _MessageObject.fromJson(Map json) = _$MessageObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.message`. - MessageObjectObject get object; @override + MessageObjectObject get object; /// The Unix timestamp (in seconds) for when the message was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + @override @JsonKey(unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageObjectStatus? get status; - @override /// On an incomplete message, details about why the message is incomplete. + @override @JsonKey(name: 'incomplete_details') MessageObjectIncompleteDetails? get incompleteDetails; - @override /// The Unix timestamp (in seconds) for when the message was completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override /// The Unix timestamp (in seconds) for when the message was marked as incomplete. + @override @JsonKey(name: 'incomplete_at') int? get incompleteAt; - @override /// The entity that produced the message. One of `user` or `assistant`. - MessageRole get role; @override + MessageRole get role; /// The content of the message in array of text and/or images. - List get content; @override + List get content; /// If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message. + @override @JsonKey(name: 'assistant_id') String? get assistantId; - @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + @override @JsonKey(name: 'run_id') String? get runId; - @override /// A list of files attached to the message, and the tools they were added to. - List? get attachments; @override + List? get attachments; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override Map? get metadata; + + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37965,8 +40035,12 @@ mixin _$MessageObjectIncompleteDetails { MessageObjectIncompleteDetailsReason get reason => throw _privateConstructorUsedError; + /// Serializes this MessageObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37993,6 +40067,8 @@ class _$MessageObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38029,6 +40105,8 @@ class __$$MessageObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$MessageObjectIncompleteDetailsImpl) _then) : super(_value, _then); + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38071,11 +40149,13 @@ class _$MessageObjectIncompleteDetailsImpl (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, reason); - @JsonKey(ignore: true) + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageObjectIncompleteDetailsImplCopyWith< @@ -38101,12 +40181,14 @@ abstract class _MessageObjectIncompleteDetails factory _MessageObjectIncompleteDetails.fromJson(Map json) = _$MessageObjectIncompleteDetailsImpl.fromJson; - @override - /// The reason the message is incomplete. + @override MessageObjectIncompleteDetailsReason get reason; + + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageObjectIncompleteDetailsImplCopyWith< _$MessageObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; @@ -38126,8 +40208,12 @@ mixin _$MessageAttachment { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; + /// Serializes this MessageAttachment to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageAttachmentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38153,6 +40239,8 @@ class _$MessageAttachmentCopyWithImpl<$Res, $Val extends MessageAttachment> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38193,6 +40281,8 @@ class __$$MessageAttachmentImplCopyWithImpl<$Res> $Res Function(_$MessageAttachmentImpl) _then) : super(_value, _then); + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38257,12 +40347,14 @@ class _$MessageAttachmentImpl extends _MessageAttachment { const DeepCollectionEquality().equals(other._tools, _tools)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, fileId, const DeepCollectionEquality().hash(_tools)); - @JsonKey(ignore: true) + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => @@ -38287,18 +40379,20 @@ abstract class _MessageAttachment extends MessageAttachment { factory _MessageAttachment.fromJson(Map json) = _$MessageAttachmentImpl.fromJson; - @override - /// The ID of the file to attach to the message. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - @override /// The tools to add this file to. + @override @JsonKey(includeIfNull: false) List? get tools; + + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38318,8 +40412,12 @@ mixin _$MessageDeltaObject { /// The delta containing the fields that have changed on the Message. MessageDelta get delta => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38345,6 +40443,8 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38368,6 +40468,8 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> ) as $Val); } + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaCopyWith<$Res> get delta { @@ -38399,6 +40501,8 @@ class __$$MessageDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38460,11 +40564,13 @@ class _$MessageDeltaObjectImpl extends _MessageDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => @@ -38489,20 +40595,22 @@ abstract class _MessageDeltaObject extends MessageDeltaObject { factory _MessageDeltaObject.fromJson(Map json) = _$MessageDeltaObjectImpl.fromJson; - @override - /// The identifier of the message, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.message.delta`. - MessageDeltaObjectObject get object; @override + MessageDeltaObjectObject get object; /// The delta containing the fields that have changed on the Message. + @override MessageDelta get delta; + + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38522,8 +40630,12 @@ mixin _$MessageDelta { @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; + /// Serializes this MessageDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38552,6 +40664,8 @@ class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38595,6 +40709,8 @@ class __$$MessageDeltaImplCopyWithImpl<$Res> _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) : super(_value, _then); + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38663,12 +40779,14 @@ class _$MessageDeltaImpl extends _MessageDelta { const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, role, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => @@ -38695,19 +40813,21 @@ abstract class _MessageDelta extends MessageDelta { factory _MessageDelta.fromJson(Map json) = _$MessageDeltaImpl.fromJson; - @override - /// The entity that produced the message. One of `user` or `assistant`. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageRole? get role; - @override /// The content of the message in array of text and/or images. + @override @JsonKey(includeIfNull: false) List? get content; + + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38730,12 +40850,18 @@ mixin _$CreateMessageRequest { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38767,6 +40893,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38795,6 +40923,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateMessageRequestContentCopyWith<$Res> get content { @@ -38831,6 +40961,8 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38898,10 +41030,14 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -38929,7 +41065,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -38938,7 +41074,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> @@ -38967,27 +41105,31 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { factory _CreateMessageRequest.fromJson(Map json) = _$CreateMessageRequestImpl.fromJson; - @override - /// The entity that produced the message. One of `user` or `assistant`. - MessageRole get role; @override + MessageRole get role; /// The content of the message. + @override @_CreateMessageRequestContentConverter() CreateMessageRequestContent get content; - @override /// A list of files attached to the message, and the tools they were added to. + @override @JsonKey(includeIfNull: false) List? get attachments; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39054,6 +41196,8 @@ mixin _$CreateMessageRequestContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateMessageRequestContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -39076,6 +41220,9 @@ class _$CreateMessageRequestContentCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -39101,6 +41248,8 @@ class __$$CreateMessageRequestContentListMessageContentImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentListMessageContentImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39154,12 +41303,14 @@ class _$CreateMessageRequestContentListMessageContentImpl const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentListMessageContentImplCopyWith< @@ -39256,7 +41407,10 @@ abstract class CreateMessageRequestContentListMessageContent @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestContentListMessageContentImplCopyWith< _$CreateMessageRequestContentListMessageContentImpl> get copyWith => throw _privateConstructorUsedError; @@ -39282,6 +41436,8 @@ class __$$CreateMessageRequestContentStringImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39328,11 +41484,13 @@ class _$CreateMessageRequestContentStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentStringImplCopyWith< @@ -39426,7 +41584,10 @@ abstract class CreateMessageRequestContentString @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestContentStringImplCopyWith< _$CreateMessageRequestContentStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -39438,12 +41599,18 @@ ModifyMessageRequest _$ModifyMessageRequestFromJson(Map json) { /// @nodoc mixin _$ModifyMessageRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39468,6 +41635,8 @@ class _$ModifyMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39501,6 +41670,8 @@ class __$$ModifyMessageRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyMessageRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39526,10 +41697,14 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { factory _$ModifyMessageRequestImpl.fromJson(Map json) => _$$ModifyMessageRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -39553,12 +41728,14 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> @@ -39583,13 +41760,17 @@ abstract class _ModifyMessageRequest extends ModifyMessageRequest { factory _ModifyMessageRequest.fromJson(Map json) = _$ModifyMessageRequestImpl.fromJson; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39610,8 +41791,12 @@ mixin _$DeleteMessageResponse { /// The object type, which is always `thread.message.deleted`. DeleteMessageResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteMessageResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteMessageResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39636,6 +41821,8 @@ class _$DeleteMessageResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39681,6 +41868,8 @@ class __$$DeleteMessageResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteMessageResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39742,11 +41931,13 @@ class _$DeleteMessageResponseImpl extends _DeleteMessageResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> @@ -39772,20 +41963,22 @@ abstract class _DeleteMessageResponse extends DeleteMessageResponse { factory _DeleteMessageResponse.fromJson(Map json) = _$DeleteMessageResponseImpl.fromJson; - @override - /// The message identifier. - String get id; @override + String get id; /// Whether the message was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `thread.message.deleted`. + @override DeleteMessageResponseObject get object; + + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39814,8 +42007,12 @@ mixin _$ListMessagesResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListMessagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListMessagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39845,6 +42042,8 @@ class _$ListMessagesResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39903,6 +42102,8 @@ class __$$ListMessagesResponseImplCopyWithImpl<$Res> $Res Function(_$ListMessagesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39999,12 +42200,14 @@ class _$ListMessagesResponseImpl extends _ListMessagesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> @@ -40033,31 +42236,33 @@ abstract class _ListMessagesResponse extends ListMessagesResponse { factory _ListMessagesResponse.fromJson(Map json) = _$ListMessagesResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of messages. - List get data; @override + List get data; /// The ID of the first message in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last message in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more messages to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40076,8 +42281,12 @@ mixin _$MessageContentImageFile { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this MessageContentImageFile to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentImageFileCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40104,6 +42313,8 @@ class _$MessageContentImageFileCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40147,6 +42358,8 @@ class __$$MessageContentImageFileImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageFileImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40201,11 +42414,13 @@ class _$MessageContentImageFileImpl extends _MessageContentImageFile { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, detail); - @JsonKey(ignore: true) + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> @@ -40229,17 +42444,19 @@ abstract class _MessageContentImageFile extends MessageContentImageFile { factory _MessageContentImageFile.fromJson(Map json) = _$MessageContentImageFileImpl.fromJson; - @override - /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + @override @JsonKey(name: 'file_id') String get fileId; - @override /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + @override MessageContentImageDetail get detail; + + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40257,8 +42474,12 @@ mixin _$MessageContentImageUrl { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this MessageContentImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentImageUrlCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40283,6 +42504,8 @@ class _$MessageContentImageUrlCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40324,6 +42547,8 @@ class __$$MessageContentImageUrlImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageUrlImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40376,11 +42601,13 @@ class _$MessageContentImageUrlImpl extends _MessageContentImageUrl { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, url, detail); - @JsonKey(ignore: true) + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> @@ -40404,16 +42631,18 @@ abstract class _MessageContentImageUrl extends MessageContentImageUrl { factory _MessageContentImageUrl.fromJson(Map json) = _$MessageContentImageUrlImpl.fromJson; - @override - /// The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp. - String get url; @override + String get url; /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + @override MessageContentImageDetail get detail; + + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40431,8 +42660,12 @@ mixin _$MessageRequestContentTextObject { /// Text content to be sent to the model String get text => throw _privateConstructorUsedError; + /// Serializes this MessageRequestContentTextObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageRequestContentTextObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40459,6 +42692,8 @@ class _$MessageRequestContentTextObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40500,6 +42735,8 @@ class __$$MessageRequestContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageRequestContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40553,11 +42790,13 @@ class _$MessageRequestContentTextObjectImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageRequestContentTextObjectImplCopyWith< @@ -40583,16 +42822,18 @@ abstract class _MessageRequestContentTextObject factory _MessageRequestContentTextObject.fromJson(Map json) = _$MessageRequestContentTextObjectImpl.fromJson; - @override - /// Always `text`. - String get type; @override + String get type; /// Text content to be sent to the model + @override String get text; + + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageRequestContentTextObjectImplCopyWith< _$MessageRequestContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -40612,8 +42853,12 @@ mixin _$MessageContentText { List? get annotations => throw _privateConstructorUsedError; + /// Serializes this MessageContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40640,6 +42885,8 @@ class _$MessageContentTextCopyWithImpl<$Res, $Val extends MessageContentText> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40681,6 +42928,8 @@ class __$$MessageContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40746,12 +42995,14 @@ class _$MessageContentTextImpl extends _MessageContentText { .equals(other._annotations, _annotations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - @JsonKey(ignore: true) + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => @@ -40777,17 +43028,19 @@ abstract class _MessageContentText extends MessageContentText { factory _MessageContentText.fromJson(Map json) = _$MessageContentTextImpl.fromJson; - @override - /// The data that makes up the text. - String get value; @override + String get value; /// A list of annotations that point to specific quotes from specific files. + @override @JsonKey(includeIfNull: false) List? get annotations; + + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40804,8 +43057,12 @@ mixin _$MessageContentTextAnnotationsFileCitation { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsFileCitationCopyWith< MessageContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -40834,6 +43091,8 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40870,6 +43129,8 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40914,11 +43175,13 @@ class _$MessageContentTextAnnotationsFileCitationImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFileCitationImplCopyWith< @@ -40947,13 +43210,15 @@ abstract class _MessageContentTextAnnotationsFileCitation Map json) = _$MessageContentTextAnnotationsFileCitationImpl.fromJson; - @override - /// The ID of the specific File the citation is from. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< _$MessageContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; @@ -40978,8 +43243,12 @@ mixin _$MessageDeltaContentImageUrlObject { @JsonKey(name: 'image_url', includeIfNull: false) MessageContentImageUrl? get imageUrl => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentImageUrlObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentImageUrlObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41012,6 +43281,8 @@ class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41035,6 +43306,8 @@ class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageUrlCopyWith<$Res>? get imageUrl { @@ -41077,6 +43350,8 @@ class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41146,11 +43421,13 @@ class _$MessageDeltaContentImageUrlObjectImpl other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentImageUrlObjectImplCopyWith< @@ -41180,23 +43457,25 @@ abstract class _MessageDeltaContentImageUrlObject Map json) = _$MessageDeltaContentImageUrlObjectImpl.fromJson; - @override - /// The index of the content part in the message. + @override @JsonKey(includeIfNull: false) int? get index; - @override /// Always `image_url`. + @override @JsonKey(includeIfNull: false) String? get type; - @override /// The image URL part of a message. + @override @JsonKey(name: 'image_url', includeIfNull: false) MessageContentImageUrl? get imageUrl; + + /// Create a copy of MessageDeltaContentImageUrlObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentImageUrlObjectImplCopyWith< _$MessageDeltaContentImageUrlObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -41218,8 +43497,12 @@ mixin _$MessageDeltaContentText { List? get annotations => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41247,6 +43530,8 @@ class _$MessageDeltaContentTextCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41291,6 +43576,8 @@ class __$$MessageDeltaContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41357,12 +43644,14 @@ class _$MessageDeltaContentTextImpl extends _MessageDeltaContentText { .equals(other._annotations, _annotations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> @@ -41388,18 +43677,20 @@ abstract class _MessageDeltaContentText extends MessageDeltaContentText { factory _MessageDeltaContentText.fromJson(Map json) = _$MessageDeltaContentTextImpl.fromJson; - @override - /// The data that makes up the text. + @override @JsonKey(includeIfNull: false) String? get value; - @override /// A list of annotations that point to specific quotes from specific files. + @override @JsonKey(includeIfNull: false) List? get annotations; + + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41420,8 +43711,12 @@ mixin _$MessageDeltaContentTextAnnotationsFileCitation { @JsonKey(includeIfNull: false) String? get quote => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsFileCitationCopyWith< MessageDeltaContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -41452,6 +43747,8 @@ class _$MessageDeltaContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41498,6 +43795,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41554,11 +43853,13 @@ class _$MessageDeltaContentTextAnnotationsFileCitationImpl (identical(other.quote, quote) || other.quote == quote)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, quote); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< @@ -41588,18 +43889,20 @@ abstract class _MessageDeltaContentTextAnnotationsFileCitation Map json) = _$MessageDeltaContentTextAnnotationsFileCitationImpl.fromJson; - @override - /// The ID of the specific File the citation is from. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - @override /// The specific quote in the file. + @override @JsonKey(includeIfNull: false) String? get quote; + + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< _$MessageDeltaContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; @@ -41664,14 +43967,20 @@ mixin _$RunStepObject { @JsonKey(name: 'completed_at') int? get completedAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. RunStepCompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this RunStepObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41715,6 +44024,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41803,6 +44114,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> ) as $Val); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsCopyWith<$Res> get stepDetails { @@ -41811,6 +44124,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepLastErrorCopyWith<$Res>? get lastError { @@ -41823,6 +44138,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepCompletionUsageCopyWith<$Res>? get usage { @@ -41878,6 +44195,8 @@ class __$$RunStepObjectImplCopyWithImpl<$Res> _$RunStepObjectImpl _value, $Res Function(_$RunStepObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42060,10 +44379,14 @@ class _$RunStepObjectImpl extends _RunStepObject { @JsonKey(name: 'completed_at') final int? completedAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -42114,7 +44437,7 @@ class _$RunStepObjectImpl extends _RunStepObject { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -42135,7 +44458,9 @@ class _$RunStepObjectImpl extends _RunStepObject { const DeepCollectionEquality().hash(_metadata), usage); - @JsonKey(ignore: true) + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => @@ -42172,83 +44497,87 @@ abstract class _RunStepObject extends RunStepObject { factory _RunStepObject.fromJson(Map json) = _$RunStepObjectImpl.fromJson; - @override - /// The identifier of the run step, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run.step`. - RunStepObjectObject get object; @override + RunStepObjectObject get object; /// The Unix timestamp (in seconds) for when the run step was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of. + @override @JsonKey(name: 'run_id') String get runId; - @override /// The type of run step, which can be either `message_creation` or `tool_calls`. - RunStepType get type; @override + RunStepType get type; /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. - RunStepStatus get status; @override + RunStepStatus get status; /// The details of the run step. /// Any of: [RunStepDetailsMessageCreationObject], [RunStepDetailsToolCallsObject] + @override @JsonKey(name: 'step_details') RunStepDetails get stepDetails; - @override /// The last error associated with this run step. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') RunStepLastError? get lastError; - @override /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + @override @JsonKey(name: 'expired_at') int? get expiredAt; - @override /// The Unix timestamp (in seconds) for when the run step was cancelled. + @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; - @override /// The Unix timestamp (in seconds) for when the run step failed. + @override @JsonKey(name: 'failed_at') int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the run step completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + @override RunStepCompletionUsage? get usage; + + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42265,8 +44594,12 @@ mixin _$RunStepLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this RunStepLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42290,6 +44623,8 @@ class _$RunStepLastErrorCopyWithImpl<$Res, $Val extends RunStepLastError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42328,6 +44663,8 @@ class __$$RunStepLastErrorImplCopyWithImpl<$Res> $Res Function(_$RunStepLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42378,11 +44715,13 @@ class _$RunStepLastErrorImpl extends _RunStepLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => @@ -42406,16 +44745,18 @@ abstract class _RunStepLastError extends RunStepLastError { factory _RunStepLastError.fromJson(Map json) = _$RunStepLastErrorImpl.fromJson; - @override - /// One of `server_error` or `rate_limit_exceeded`. - RunStepLastErrorCode get code; @override + RunStepLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42435,8 +44776,12 @@ mixin _$RunStepDeltaObject { /// The delta containing the fields that have changed on the run step. RunStepDelta get delta => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42462,6 +44807,8 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42485,6 +44832,8 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> ) as $Val); } + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaCopyWith<$Res> get delta { @@ -42516,6 +44865,8 @@ class __$$RunStepDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42577,11 +44928,13 @@ class _$RunStepDeltaObjectImpl extends _RunStepDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => @@ -42606,20 +44959,22 @@ abstract class _RunStepDeltaObject extends RunStepDeltaObject { factory _RunStepDeltaObject.fromJson(Map json) = _$RunStepDeltaObjectImpl.fromJson; - @override - /// The identifier of the run step, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run.step.delta`. - RunStepDeltaObjectObject get object; @override + RunStepDeltaObjectObject get object; /// The delta containing the fields that have changed on the run step. + @override RunStepDelta get delta; + + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42635,8 +44990,12 @@ mixin _$RunStepDelta { @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails => throw _privateConstructorUsedError; + /// Serializes this RunStepDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42664,6 +45023,8 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42677,6 +45038,8 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> ) as $Val); } + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaDetailsCopyWith<$Res>? get stepDetails { @@ -42714,6 +45077,8 @@ class __$$RunStepDeltaImplCopyWithImpl<$Res> _$RunStepDeltaImpl _value, $Res Function(_$RunStepDeltaImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42758,11 +45123,13 @@ class _$RunStepDeltaImpl extends _RunStepDelta { other.stepDetails == stepDetails)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, stepDetails); - @JsonKey(ignore: true) + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => @@ -42785,14 +45152,16 @@ abstract class _RunStepDelta extends RunStepDelta { factory _RunStepDelta.fromJson(Map json) = _$RunStepDeltaImpl.fromJson; - @override - /// The details of the run step /// Any of: [RunStepDeltaStepDetailsMessageCreationObject], [RunStepDeltaStepDetailsToolCallsObject] + @override @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails; + + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42821,8 +45190,12 @@ mixin _$ListRunStepsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListRunStepsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListRunStepsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42852,6 +45225,8 @@ class _$ListRunStepsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42910,6 +45285,8 @@ class __$$ListRunStepsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunStepsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43006,12 +45383,14 @@ class _$ListRunStepsResponseImpl extends _ListRunStepsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> @@ -43040,31 +45419,33 @@ abstract class _ListRunStepsResponse extends ListRunStepsResponse { factory _ListRunStepsResponse.fromJson(Map json) = _$ListRunStepsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of run steps. - List get data; @override + List get data; /// The ID of the first run step in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last run step in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more run steps to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43080,8 +45461,12 @@ mixin _$RunStepDetailsMessageCreation { @JsonKey(name: 'message_id') String get messageId => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsMessageCreationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -43108,6 +45493,8 @@ class _$RunStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43144,6 +45531,8 @@ class __$$RunStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsMessageCreationImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43189,11 +45578,13 @@ class _$RunStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, messageId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsMessageCreationImplCopyWith< @@ -43219,13 +45610,15 @@ abstract class _RunStepDetailsMessageCreation factory _RunStepDetailsMessageCreation.fromJson(Map json) = _$RunStepDetailsMessageCreationImpl.fromJson; - @override - /// The ID of the message that was created by this run step. + @override @JsonKey(name: 'message_id') String get messageId; + + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsMessageCreationImplCopyWith< _$RunStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -43243,8 +45636,12 @@ mixin _$RunStepDeltaStepDetailsMessageCreation { @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsMessageCreationCopyWith< RunStepDeltaStepDetailsMessageCreation> get copyWith => throw _privateConstructorUsedError; @@ -43273,6 +45670,8 @@ class _$RunStepDeltaStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43310,6 +45709,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsMessageCreationImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43355,11 +45756,13 @@ class _$RunStepDeltaStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, messageId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< @@ -43387,13 +45790,15 @@ abstract class _RunStepDeltaStepDetailsMessageCreation Map json) = _$RunStepDeltaStepDetailsMessageCreationImpl.fromJson; - @override - /// The ID of the message that was created by this run step. + @override @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId; + + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< _$RunStepDeltaStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -43414,8 +45819,12 @@ mixin _$RunStepDetailsToolCallsCodeObjectCodeInterpreter { List get outputs => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -43445,6 +45854,8 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43491,6 +45902,8 @@ class __$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43553,12 +45966,14 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -43588,16 +46003,18 @@ abstract class _RunStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - @override - /// The input to the Code Interpreter tool call. - String get input; @override + String get input; /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + @override List get outputs; + + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -43621,8 +46038,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter { List? get outputs => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -43660,6 +46081,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl< // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43715,6 +46138,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithI _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43785,12 +46210,14 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -43822,18 +46249,20 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - @override - /// The input to the Code Interpreter tool call. + @override @JsonKey(includeIfNull: false) String? get input; - @override /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + @override @JsonKey(includeIfNull: false) List? get outputs; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -43851,8 +46280,12 @@ mixin _$RunStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -43880,6 +46313,8 @@ class _$RunStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43916,6 +46351,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputImageImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43960,11 +46397,13 @@ class _$RunStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -43992,13 +46431,15 @@ abstract class _RunStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDetailsToolCallsCodeOutputImageImpl.fromJson; - @override - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -44016,8 +46457,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDeltaStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -44046,6 +46491,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44086,6 +46533,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44130,11 +46579,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -44164,13 +46615,15 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl.fromJson; - @override - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -44195,8 +46648,12 @@ mixin _$RunStepCompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this RunStepCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44224,6 +46681,8 @@ class _$RunStepCompletionUsageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44273,6 +46732,8 @@ class __$$RunStepCompletionUsageImplCopyWithImpl<$Res> $Res Function(_$RunStepCompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44342,12 +46803,14 @@ class _$RunStepCompletionUsageImpl extends _RunStepCompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> @@ -44373,23 +46836,25 @@ abstract class _RunStepCompletionUsage extends RunStepCompletionUsage { factory _RunStepCompletionUsage.fromJson(Map json) = _$RunStepCompletionUsageImpl.fromJson; - @override - /// Number of completion tokens used over the course of the run step. + @override @JsonKey(name: 'completion_tokens') int get completionTokens; - @override /// Number of prompt tokens used over the course of the run step. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -44408,8 +46873,12 @@ mixin _$VectorStoreExpirationAfter { /// The number of days after the anchor time that the vector store will expire. int get days => throw _privateConstructorUsedError; + /// Serializes this VectorStoreExpirationAfter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreExpirationAfterCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44435,6 +46904,8 @@ class _$VectorStoreExpirationAfterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44476,6 +46947,8 @@ class __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res> $Res Function(_$VectorStoreExpirationAfterImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44528,11 +47001,13 @@ class _$VectorStoreExpirationAfterImpl extends _VectorStoreExpirationAfter { (identical(other.days, days) || other.days == days)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, anchor, days); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> @@ -44556,16 +47031,18 @@ abstract class _VectorStoreExpirationAfter extends VectorStoreExpirationAfter { factory _VectorStoreExpirationAfter.fromJson(Map json) = _$VectorStoreExpirationAfterImpl.fromJson; - @override - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. - VectorStoreExpirationAfterAnchor get anchor; @override + VectorStoreExpirationAfterAnchor get anchor; /// The number of days after the anchor time that the vector store will expire. + @override int get days; + + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> get copyWith => throw _privateConstructorUsedError; } @@ -44614,11 +47091,17 @@ mixin _$VectorStoreObject { @JsonKey(name: 'last_active_at') int? get lastActiveAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this VectorStoreObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -44657,6 +47140,8 @@ class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44720,6 +47205,8 @@ class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> ) as $Val); } + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts { @@ -44729,6 +47216,8 @@ class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> }); } + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { @@ -44779,6 +47268,8 @@ class __$$VectorStoreObjectImplCopyWithImpl<$Res> $Res Function(_$VectorStoreObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -44909,7 +47400,9 @@ class _$VectorStoreObjectImpl extends _VectorStoreObject { @JsonKey(name: 'last_active_at') final int? lastActiveAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override final dynamic metadata; @@ -44942,7 +47435,7 @@ class _$VectorStoreObjectImpl extends _VectorStoreObject { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -44958,7 +47451,9 @@ class _$VectorStoreObjectImpl extends _VectorStoreObject { lastActiveAt, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => @@ -44993,58 +47488,62 @@ abstract class _VectorStoreObject extends VectorStoreObject { factory _VectorStoreObject.fromJson(Map json) = _$VectorStoreObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `vector_store`. - String get object; @override + String get object; /// The Unix timestamp (in seconds) for when the vector store was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the vector store. - String? get name; @override + String? get name; /// The total number of bytes used by the files in the vector store. + @override @JsonKey(name: 'usage_bytes') int get usageBytes; - @override /// The number of files in the vector store. + @override @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts get fileCounts; - @override /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - VectorStoreObjectStatus get status; @override + VectorStoreObjectStatus get status; /// The expiration policy for a vector store. + @override @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; - @override /// The Unix timestamp (in seconds) for when the vector store will expire. + @override @JsonKey(name: 'expires_at', includeIfNull: false) int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the vector store was last active. + @override @JsonKey(name: 'last_active_at') int? get lastActiveAt; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override dynamic get metadata; + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45072,8 +47571,12 @@ mixin _$VectorStoreObjectFileCounts { /// The total number of files. int get total => throw _privateConstructorUsedError; + /// Serializes this VectorStoreObjectFileCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreObjectFileCountsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45105,6 +47608,8 @@ class _$VectorStoreObjectFileCountsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45166,6 +47671,8 @@ class __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res> $Res Function(_$VectorStoreObjectFileCountsImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45256,12 +47763,14 @@ class _$VectorStoreObjectFileCountsImpl extends _VectorStoreObjectFileCounts { (identical(other.total, total) || other.total == total)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> @@ -45289,29 +47798,31 @@ abstract class _VectorStoreObjectFileCounts factory _VectorStoreObjectFileCounts.fromJson(Map json) = _$VectorStoreObjectFileCountsImpl.fromJson; - @override - /// The number of files that are currently being processed. + @override @JsonKey(name: 'in_progress') int get inProgress; - @override /// The number of files that have been successfully processed. - int get completed; @override + int get completed; /// The number of files that have failed to process. - int get failed; @override + int get failed; /// The number of files that were cancelled. - int get cancelled; @override + int get cancelled; /// The total number of files. + @override int get total; + + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45342,12 +47853,18 @@ mixin _$CreateVectorStoreRequest { ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateVectorStoreRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45382,6 +47899,8 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45415,6 +47934,8 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { @@ -45428,6 +47949,8 @@ class _$CreateVectorStoreRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -45476,6 +47999,8 @@ class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> $Res Function(_$CreateVectorStoreRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45557,7 +48082,9 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { @JsonKey(name: 'chunking_strategy', includeIfNull: false) final ChunkingStrategyRequestParam? chunkingStrategy; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) final dynamic metadata; @@ -45581,7 +48108,7 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -45591,7 +48118,9 @@ class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { chunkingStrategy, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> @@ -45622,34 +48151,38 @@ abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { factory _CreateVectorStoreRequest.fromJson(Map json) = _$CreateVectorStoreRequestImpl.fromJson; - @override - /// The name of the vector store. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @override @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; - @override /// The expiration policy for a vector store. + @override @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45670,12 +48203,18 @@ mixin _$UpdateVectorStoreRequest { VectorStoreExpirationAfter? get expiresAfter => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this UpdateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $UpdateVectorStoreRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45706,6 +48245,8 @@ class _$UpdateVectorStoreRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45729,6 +48270,8 @@ class _$UpdateVectorStoreRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { @@ -45772,6 +48315,8 @@ class __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res> $Res Function(_$UpdateVectorStoreRequestImpl) _then) : super(_value, _then); + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -45818,7 +48363,9 @@ class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) final VectorStoreExpirationAfter? expiresAfter; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) final dynamic metadata; @@ -45839,12 +48386,14 @@ class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, expiresAfter, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> @@ -45871,23 +48420,27 @@ abstract class _UpdateVectorStoreRequest extends UpdateVectorStoreRequest { factory _UpdateVectorStoreRequest.fromJson(Map json) = _$UpdateVectorStoreRequestImpl.fromJson; - @override - /// The name of the vector store. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The expiration policy for a vector store. + @override @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? get expiresAfter; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -45917,8 +48470,12 @@ mixin _$ListVectorStoresResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListVectorStoresResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListVectorStoresResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -45948,6 +48505,8 @@ class _$ListVectorStoresResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46009,6 +48568,8 @@ class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> $Res Function(_$ListVectorStoresResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46105,12 +48666,14 @@ class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> @@ -46138,31 +48701,33 @@ abstract class _ListVectorStoresResponse extends ListVectorStoresResponse { factory _ListVectorStoresResponse.fromJson(Map json) = _$ListVectorStoresResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// A list of assistant files. - List get data; @override + List get data; /// The ID of the first assistant file in the list. + @override @JsonKey(name: 'first_id') String? get firstId; - @override /// The ID of the last assistant file in the list. + @override @JsonKey(name: 'last_id') String? get lastId; - @override /// Whether there are more assistant files available. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -46183,8 +48748,12 @@ mixin _$DeleteVectorStoreResponse { /// The object type, which is always `vector_store.deleted`. String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteVectorStoreResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteVectorStoreResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46209,6 +48778,8 @@ class _$DeleteVectorStoreResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46255,6 +48826,8 @@ class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteVectorStoreResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46316,11 +48889,13 @@ class _$DeleteVectorStoreResponseImpl extends _DeleteVectorStoreResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> @@ -46345,20 +48920,22 @@ abstract class _DeleteVectorStoreResponse extends DeleteVectorStoreResponse { factory _DeleteVectorStoreResponse.fromJson(Map json) = _$DeleteVectorStoreResponseImpl.fromJson; - @override - /// The ID of the deleted vector store. - String get id; @override + String get id; /// Whether the vector store was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `vector_store.deleted`. + @override String get object; + + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -46402,8 +48979,12 @@ mixin _$VectorStoreFileObject { ChunkingStrategyResponseParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46440,6 +49021,8 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46488,6 +49071,8 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError { @@ -46501,6 +49086,8 @@ class _$VectorStoreFileObjectCopyWithImpl<$Res, }); } + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy { @@ -46550,6 +49137,8 @@ class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46680,12 +49269,14 @@ class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, usageBytes, createdAt, vectorStoreId, status, lastError, chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> @@ -46718,46 +49309,48 @@ abstract class _VectorStoreFileObject extends VectorStoreFileObject { factory _VectorStoreFileObject.fromJson(Map json) = _$VectorStoreFileObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `vector_store.file`. - String get object; @override + String get object; /// The total vector store usage in bytes. Note that this may be different from the original file size. + @override @JsonKey(name: 'usage_bytes') int get usageBytes; - @override /// The Unix timestamp (in seconds) for when the vector store file was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override @JsonKey(name: 'vector_store_id') String get vectorStoreId; - @override /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - VectorStoreFileStatus get status; @override + VectorStoreFileStatus get status; /// The last error associated with this vector store file. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? get lastError; - @override /// The chunking strategy used to chunk the file(s). /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyResponseParam? get chunkingStrategy; + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -46776,8 +49369,12 @@ mixin _$VectorStoreFileObjectLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileObjectLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileObjectLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46804,6 +49401,8 @@ class _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46845,6 +49444,8 @@ class __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileObjectLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46898,11 +49499,13 @@ class _$VectorStoreFileObjectLastErrorImpl (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileObjectLastErrorImplCopyWith< @@ -46928,16 +49531,18 @@ abstract class _VectorStoreFileObjectLastError factory _VectorStoreFileObjectLastError.fromJson(Map json) = _$VectorStoreFileObjectLastErrorImpl.fromJson; - @override - /// One of `server_error` or `rate_limit_exceeded`. - VectorStoreFileObjectLastErrorCode get code; @override + VectorStoreFileObjectLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileObjectLastErrorImplCopyWith< _$VectorStoreFileObjectLastErrorImpl> get copyWith => throw _privateConstructorUsedError; @@ -46961,8 +49566,12 @@ mixin _$StaticChunkingStrategy { @JsonKey(name: 'chunk_overlap_tokens') int get chunkOverlapTokens => throw _privateConstructorUsedError; + /// Serializes this StaticChunkingStrategy to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $StaticChunkingStrategyCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -46989,6 +49598,8 @@ class _$StaticChunkingStrategyCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47032,6 +49643,8 @@ class __$$StaticChunkingStrategyImplCopyWithImpl<$Res> $Res Function(_$StaticChunkingStrategyImpl) _then) : super(_value, _then); + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47091,12 +49704,14 @@ class _$StaticChunkingStrategyImpl extends _StaticChunkingStrategy { other.chunkOverlapTokens == chunkOverlapTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, maxChunkSizeTokens, chunkOverlapTokens); - @JsonKey(ignore: true) + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> @@ -47122,21 +49737,23 @@ abstract class _StaticChunkingStrategy extends StaticChunkingStrategy { factory _StaticChunkingStrategy.fromJson(Map json) = _$StaticChunkingStrategyImpl.fromJson; - @override - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the /// maximum value is `4096`. + @override @JsonKey(name: 'max_chunk_size_tokens') int get maxChunkSizeTokens; - @override /// The number of tokens that overlap between chunks. The default value is `400`. /// /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @override @JsonKey(name: 'chunk_overlap_tokens') int get chunkOverlapTokens; + + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> get copyWith => throw _privateConstructorUsedError; } @@ -47158,8 +49775,12 @@ mixin _$CreateVectorStoreFileRequest { ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreFileRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateVectorStoreFileRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47191,6 +49812,8 @@ class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47209,6 +49832,8 @@ class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -47251,6 +49876,8 @@ class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> $Res Function(_$CreateVectorStoreFileRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47309,11 +49936,13 @@ class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateVectorStoreFileRequestImplCopyWith< @@ -47341,19 +49970,21 @@ abstract class _CreateVectorStoreFileRequest factory _CreateVectorStoreFileRequest.fromJson(Map json) = _$CreateVectorStoreFileRequestImpl.fromJson; - @override - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + @override @JsonKey(name: 'file_id') String get fileId; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; + + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateVectorStoreFileRequestImplCopyWith< _$CreateVectorStoreFileRequestImpl> get copyWith => throw _privateConstructorUsedError; @@ -47384,8 +50015,12 @@ mixin _$ListVectorStoreFilesResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListVectorStoreFilesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListVectorStoreFilesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47417,6 +50052,8 @@ class _$ListVectorStoreFilesResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47478,6 +50115,8 @@ class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> $Res Function(_$ListVectorStoreFilesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47575,12 +50214,14 @@ class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListVectorStoreFilesResponseImplCopyWith< @@ -47610,31 +50251,33 @@ abstract class _ListVectorStoreFilesResponse factory _ListVectorStoreFilesResponse.fromJson(Map json) = _$ListVectorStoreFilesResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// A list of message files. - List get data; @override + List get data; /// The ID of the first message file in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last message file in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more message files available. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListVectorStoreFilesResponseImplCopyWith< _$ListVectorStoreFilesResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -47656,8 +50299,12 @@ mixin _$DeleteVectorStoreFileResponse { /// The object type, which is always `vector_store.file.deleted`. String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteVectorStoreFileResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteVectorStoreFileResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47684,6 +50331,8 @@ class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47730,6 +50379,8 @@ class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteVectorStoreFileResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47793,11 +50444,13 @@ class _$DeleteVectorStoreFileResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteVectorStoreFileResponseImplCopyWith< @@ -47824,20 +50477,22 @@ abstract class _DeleteVectorStoreFileResponse factory _DeleteVectorStoreFileResponse.fromJson(Map json) = _$DeleteVectorStoreFileResponseImpl.fromJson; - @override - /// The ID of the deleted vector store file. - String get id; @override + String get id; /// Whether the vector store file was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `vector_store.file.deleted`. + @override String get object; + + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteVectorStoreFileResponseImplCopyWith< _$DeleteVectorStoreFileResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -47873,8 +50528,12 @@ mixin _$VectorStoreFileBatchObject { VectorStoreFileBatchObjectFileCounts get fileCounts => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileBatchObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileBatchObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -47909,6 +50568,8 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -47947,6 +50608,8 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts { @@ -47989,6 +50652,8 @@ class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileBatchObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48092,12 +50757,14 @@ class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { other.fileCounts == fileCounts)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, object, createdAt, vectorStoreId, status, fileCounts); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> @@ -48127,35 +50794,37 @@ abstract class _VectorStoreFileBatchObject extends VectorStoreFileBatchObject { factory _VectorStoreFileBatchObject.fromJson(Map json) = _$VectorStoreFileBatchObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `vector_store.file_batch`. - String get object; @override + String get object; /// The Unix timestamp (in seconds) for when the vector store files batch was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override @JsonKey(name: 'vector_store_id') String get vectorStoreId; - @override /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - VectorStoreFileBatchObjectStatus get status; @override + VectorStoreFileBatchObjectStatus get status; /// The number of files per status. + @override @JsonKey(name: 'file_counts') VectorStoreFileBatchObjectFileCounts get fileCounts; + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -48183,8 +50852,12 @@ mixin _$VectorStoreFileBatchObjectFileCounts { /// The total number of files. int get total => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileBatchObjectFileCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VectorStoreFileBatchObjectFileCountsCopyWith< VectorStoreFileBatchObjectFileCounts> get copyWith => throw _privateConstructorUsedError; @@ -48217,6 +50890,8 @@ class _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48278,6 +50953,8 @@ class __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res> $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48369,12 +51046,14 @@ class _$VectorStoreFileBatchObjectFileCountsImpl (identical(other.total, total) || other.total == total)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< @@ -48404,29 +51083,31 @@ abstract class _VectorStoreFileBatchObjectFileCounts Map json) = _$VectorStoreFileBatchObjectFileCountsImpl.fromJson; - @override - /// The number of files that are currently being processed. + @override @JsonKey(name: 'in_progress') int get inProgress; - @override /// The number of files that have been processed. - int get completed; @override + int get completed; /// The number of files that have failed to process. - int get failed; @override + int get failed; /// The number of files that where cancelled. - int get cancelled; @override + int get cancelled; /// The total number of files. + @override int get total; + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< _$VectorStoreFileBatchObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; @@ -48449,8 +51130,12 @@ mixin _$CreateVectorStoreFileBatchRequest { ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreFileBatchRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateVectorStoreFileBatchRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -48482,6 +51167,8 @@ class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48500,6 +51187,8 @@ class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { @@ -48542,6 +51231,8 @@ class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> $Res Function(_$CreateVectorStoreFileBatchRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48609,12 +51300,14 @@ class _$CreateVectorStoreFileBatchRequestImpl other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds), chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateVectorStoreFileBatchRequestImplCopyWith< @@ -48643,19 +51336,21 @@ abstract class _CreateVectorStoreFileBatchRequest Map json) = _$CreateVectorStoreFileBatchRequestImpl.fromJson; - @override - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @override @JsonKey(name: 'file_ids') List get fileIds; - @override /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override @JsonKey(name: 'chunking_strategy', includeIfNull: false) ChunkingStrategyRequestParam? get chunkingStrategy; + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateVectorStoreFileBatchRequestImplCopyWith< _$CreateVectorStoreFileBatchRequestImpl> get copyWith => throw _privateConstructorUsedError; @@ -48679,8 +51374,12 @@ mixin _$Error { /// The type of error. String get type => throw _privateConstructorUsedError; + /// Serializes this Error to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -48702,6 +51401,8 @@ class _$ErrorCopyWithImpl<$Res, $Val extends Error> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48749,6 +51450,8 @@ class __$$ErrorImplCopyWithImpl<$Res> _$ErrorImpl _value, $Res Function(_$ErrorImpl) _then) : super(_value, _then); + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48823,11 +51526,13 @@ class _$ErrorImpl extends _Error { (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param, type); - @JsonKey(ignore: true) + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => @@ -48851,24 +51556,26 @@ abstract class _Error extends Error { factory _Error.fromJson(Map json) = _$ErrorImpl.fromJson; - @override - /// The error code. - String? get code; @override + String? get code; /// A human-readable description of the error. - String get message; @override + String get message; /// The parameter in the request that caused the error. - String? get param; @override + String? get param; /// The type of error. + @override String get type; + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -48899,8 +51606,12 @@ mixin _$CreateBatchRequest { @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateBatchRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateBatchRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -48929,6 +51640,8 @@ class _$CreateBatchRequestCopyWithImpl<$Res, $Val extends CreateBatchRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -48982,6 +51695,8 @@ class __$$CreateBatchRequestImplCopyWithImpl<$Res> $Res Function(_$CreateBatchRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49076,12 +51791,14 @@ class _$CreateBatchRequestImpl extends _CreateBatchRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, inputFileId, endpoint, completionWindow, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => @@ -49109,31 +51826,33 @@ abstract class _CreateBatchRequest extends CreateBatchRequest { factory _CreateBatchRequest.fromJson(Map json) = _$CreateBatchRequestImpl.fromJson; - @override - /// The ID of an uploaded file that contains requests for the new batch. /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + @override @JsonKey(name: 'input_file_id') String get inputFileId; - @override /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint; @override + BatchEndpoint get endpoint; /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override @JsonKey(name: 'completion_window') BatchCompletionWindow get completionWindow; - @override /// Optional custom metadata for the batch. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -49217,12 +51936,18 @@ mixin _$Batch { @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? get requestCounts => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this Batch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -49270,6 +51995,8 @@ class _$BatchCopyWithImpl<$Res, $Val extends Batch> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49378,6 +52105,8 @@ class _$BatchCopyWithImpl<$Res, $Val extends Batch> ) as $Val); } + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $BatchErrorsCopyWith<$Res>? get errors { @@ -49390,6 +52119,8 @@ class _$BatchCopyWithImpl<$Res, $Val extends Batch> }); } + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $BatchRequestCountsCopyWith<$Res>? get requestCounts { @@ -49449,6 +52180,8 @@ class __$$BatchImplCopyWithImpl<$Res> _$BatchImpl _value, $Res Function(_$BatchImpl) _then) : super(_value, _then); + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49678,7 +52411,9 @@ class _$BatchImpl extends _Batch { @JsonKey(name: 'request_counts', includeIfNull: false) final BatchRequestCounts? requestCounts; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) final dynamic metadata; @@ -49730,7 +52465,7 @@ class _$BatchImpl extends _Batch { const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -49756,7 +52491,9 @@ class _$BatchImpl extends _Batch { const DeepCollectionEquality().hash(metadata) ]); - @JsonKey(ignore: true) + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchImplCopyWith<_$BatchImpl> get copyWith => @@ -49805,104 +52542,108 @@ abstract class _Batch extends Batch { factory _Batch.fromJson(Map json) = _$BatchImpl.fromJson; - @override - /// No Description - String get id; @override + String get id; /// The object type, which is always `batch`. - BatchObject get object; @override + BatchObject get object; /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint; @override + BatchEndpoint get endpoint; /// No Description + @override @JsonKey(includeIfNull: false) BatchErrors? get errors; - @override /// The ID of the input file for the batch. + @override @JsonKey(name: 'input_file_id') String get inputFileId; - @override /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override @JsonKey(name: 'completion_window') BatchCompletionWindow get completionWindow; - @override /// The current status of the batch. - BatchStatus get status; @override + BatchStatus get status; /// The ID of the file containing the outputs of successfully executed requests. + @override @JsonKey(name: 'output_file_id', includeIfNull: false) String? get outputFileId; - @override /// The ID of the file containing the outputs of requests with errors. + @override @JsonKey(name: 'error_file_id', includeIfNull: false) String? get errorFileId; - @override /// The Unix timestamp (in seconds) for when the batch was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The Unix timestamp (in seconds) for when the batch started processing. + @override @JsonKey(name: 'in_progress_at', includeIfNull: false) int? get inProgressAt; - @override /// The Unix timestamp (in seconds) for when the batch will expire. + @override @JsonKey(name: 'expires_at', includeIfNull: false) int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the batch started finalizing. + @override @JsonKey(name: 'finalizing_at', includeIfNull: false) int? get finalizingAt; - @override /// The Unix timestamp (in seconds) for when the batch was completed. + @override @JsonKey(name: 'completed_at', includeIfNull: false) int? get completedAt; - @override /// The Unix timestamp (in seconds) for when the batch failed. + @override @JsonKey(name: 'failed_at', includeIfNull: false) int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the batch expired. + @override @JsonKey(name: 'expired_at', includeIfNull: false) int? get expiredAt; - @override /// The Unix timestamp (in seconds) for when the batch started cancelling. + @override @JsonKey(name: 'cancelling_at', includeIfNull: false) int? get cancellingAt; - @override /// The Unix timestamp (in seconds) for when the batch was cancelled. + @override @JsonKey(name: 'cancelled_at', includeIfNull: false) int? get cancelledAt; - @override /// The request counts for different statuses within the batch. + @override @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? get requestCounts; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchImplCopyWith<_$BatchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -49921,8 +52662,12 @@ mixin _$BatchErrors { @JsonKey(includeIfNull: false) List? get data => throw _privateConstructorUsedError; + /// Serializes this BatchErrors to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchErrorsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -49948,6 +52693,8 @@ class _$BatchErrorsCopyWithImpl<$Res, $Val extends BatchErrors> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -49988,6 +52735,8 @@ class __$$BatchErrorsImplCopyWithImpl<$Res> _$BatchErrorsImpl _value, $Res Function(_$BatchErrorsImpl) _then) : super(_value, _then); + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50052,12 +52801,14 @@ class _$BatchErrorsImpl extends _BatchErrors { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, object, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => @@ -50081,18 +52832,20 @@ abstract class _BatchErrors extends BatchErrors { factory _BatchErrors.fromJson(Map json) = _$BatchErrorsImpl.fromJson; - @override - /// The object type, which is always `list`. + @override @JsonKey(includeIfNull: false) String? get object; - @override /// No Description + @override @JsonKey(includeIfNull: false) List? get data; + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50112,8 +52865,12 @@ mixin _$BatchRequestCounts { /// Number of requests that have failed. int get failed => throw _privateConstructorUsedError; + /// Serializes this BatchRequestCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchRequestCountsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50137,6 +52894,8 @@ class _$BatchRequestCountsCopyWithImpl<$Res, $Val extends BatchRequestCounts> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50180,6 +52939,8 @@ class __$$BatchRequestCountsImplCopyWithImpl<$Res> $Res Function(_$BatchRequestCountsImpl) _then) : super(_value, _then); + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50242,11 +53003,13 @@ class _$BatchRequestCountsImpl extends _BatchRequestCounts { (identical(other.failed, failed) || other.failed == failed)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, total, completed, failed); - @JsonKey(ignore: true) + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => @@ -50271,20 +53034,22 @@ abstract class _BatchRequestCounts extends BatchRequestCounts { factory _BatchRequestCounts.fromJson(Map json) = _$BatchRequestCountsImpl.fromJson; - @override - /// Total number of requests in the batch. - int get total; @override + int get total; /// Number of requests that have been completed successfully. - int get completed; @override + int get completed; /// Number of requests that have failed. + @override int get failed; + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50311,8 +53076,12 @@ mixin _$BatchErrorsDataInner { @JsonKey(includeIfNull: false) int? get line => throw _privateConstructorUsedError; + /// Serializes this BatchErrorsDataInner to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $BatchErrorsDataInnerCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50341,6 +53110,8 @@ class _$BatchErrorsDataInnerCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50393,6 +53164,8 @@ class __$$BatchErrorsDataInnerImplCopyWithImpl<$Res> $Res Function(_$BatchErrorsDataInnerImpl) _then) : super(_value, _then); + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50471,11 +53244,13 @@ class _$BatchErrorsDataInnerImpl extends _BatchErrorsDataInner { (identical(other.line, line) || other.line == line)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param, line); - @JsonKey(ignore: true) + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> @@ -50503,28 +53278,30 @@ abstract class _BatchErrorsDataInner extends BatchErrorsDataInner { factory _BatchErrorsDataInner.fromJson(Map json) = _$BatchErrorsDataInnerImpl.fromJson; - @override - /// An error code identifying the error type. + @override @JsonKey(includeIfNull: false) String? get code; - @override /// A human-readable message providing more details about the error. + @override @JsonKey(includeIfNull: false) String? get message; - @override /// The name of the parameter that caused the error, if applicable. + @override @JsonKey(includeIfNull: false) String? get param; - @override /// The line number of the input file where the error occurred, if applicable. + @override @JsonKey(includeIfNull: false) int? get line; + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50553,8 +53330,12 @@ mixin _$ListBatchesResponse { /// The object type, which is always `list`. ListBatchesResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListBatchesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListBatchesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50583,6 +53364,8 @@ class _$ListBatchesResponseCopyWithImpl<$Res, $Val extends ListBatchesResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50641,6 +53424,8 @@ class __$$ListBatchesResponseImplCopyWithImpl<$Res> $Res Function(_$ListBatchesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -50737,7 +53522,7 @@ class _$ListBatchesResponseImpl extends _ListBatchesResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -50747,7 +53532,9 @@ class _$ListBatchesResponseImpl extends _ListBatchesResponse { hasMore, object); - @JsonKey(ignore: true) + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => @@ -50775,31 +53562,33 @@ abstract class _ListBatchesResponse extends ListBatchesResponse { factory _ListBatchesResponse.fromJson(Map json) = _$ListBatchesResponseImpl.fromJson; - @override - /// No Description - List get data; @override + List get data; /// The ID of the first batch in the list. + @override @JsonKey(name: 'first_id', includeIfNull: false) String? get firstId; - @override /// The ID of the last batch in the list. + @override @JsonKey(name: 'last_id', includeIfNull: false) String? get lastId; - @override /// Whether there are more batches available. + @override @JsonKey(name: 'has_more') bool get hasMore; - @override /// The object type, which is always `list`. + @override ListBatchesResponseObject get object; + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -50944,8 +53733,13 @@ mixin _$ChatCompletionMessage { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionMessage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -50970,6 +53764,8 @@ class _$ChatCompletionMessageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51009,6 +53805,8 @@ class __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionSystemMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51075,11 +53873,13 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> @@ -51242,20 +54042,22 @@ abstract class ChatCompletionSystemMessage extends ChatCompletionMessage { factory ChatCompletionSystemMessage.fromJson(Map json) = _$ChatCompletionSystemMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `system`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the system message. + @override String get content; /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -51288,6 +54090,8 @@ class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionUserMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51311,6 +54115,8 @@ class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> )); } + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionUserMessageContentCopyWith<$Res> get content { @@ -51363,11 +54169,13 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> @@ -51531,21 +54339,23 @@ abstract class ChatCompletionUserMessage extends ChatCompletionMessage { factory ChatCompletionUserMessage.fromJson(Map json) = _$ChatCompletionUserMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `user`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the user message. + @override @_ChatCompletionUserMessageContentConverter() ChatCompletionUserMessageContent get content; /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -51581,6 +54391,8 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionAssistantMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51614,6 +54426,8 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> )); } + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { @@ -51699,12 +54513,14 @@ class _$ChatCompletionAssistantMessageImpl other.functionCall == functionCall)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name, const DeepCollectionEquality().hash(_toolCalls), functionCall); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionAssistantMessageImplCopyWith< @@ -51872,13 +54688,12 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { factory ChatCompletionAssistantMessage.fromJson(Map json) = _$ChatCompletionAssistantMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `assistant`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + @override @JsonKey(includeIfNull: false) String? get content; @@ -51893,8 +54708,11 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionMessageFunctionCall? get functionCall; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionAssistantMessageImplCopyWith< _$ChatCompletionAssistantMessageImpl> get copyWith => throw _privateConstructorUsedError; @@ -51925,6 +54743,8 @@ class __$$ChatCompletionToolMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -51991,11 +54811,13 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { other.toolCallId == toolCallId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, toolCallId); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> @@ -52158,20 +54980,22 @@ abstract class ChatCompletionToolMessage extends ChatCompletionMessage { factory ChatCompletionToolMessage.fromJson(Map json) = _$ChatCompletionToolMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `tool`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the tool message. + @override String get content; /// Tool call that this message is responding to. @JsonKey(name: 'tool_call_id') String get toolCallId; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -52198,6 +55022,8 @@ class __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionMessageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52264,11 +55090,13 @@ class _$ChatCompletionFunctionMessageImpl (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, role, content, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionMessageImplCopyWith< @@ -52431,19 +55259,21 @@ abstract class ChatCompletionFunctionMessage extends ChatCompletionMessage { factory ChatCompletionFunctionMessage.fromJson(Map json) = _$ChatCompletionFunctionMessageImpl.fromJson; - @override - /// The role of the messages author, in this case `function`. - ChatCompletionMessageRole get role; @override + ChatCompletionMessageRole get role; /// The contents of the function message. + @override String? get content; /// The name of the function to call. String get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionMessageImplCopyWith< _$ChatCompletionFunctionMessageImpl> get copyWith => throw _privateConstructorUsedError; @@ -52509,6 +55339,8 @@ mixin _$ChatCompletionUserMessageContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionUserMessageContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -52531,6 +55363,9 @@ class _$ChatCompletionUserMessageContentCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -52553,6 +55388,8 @@ class __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageContentPartsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52606,12 +55443,14 @@ class _$ChatCompletionMessageContentPartsImpl const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageContentPartsImplCopyWith< @@ -52704,7 +55543,10 @@ abstract class ChatCompletionMessageContentParts @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageContentPartsImplCopyWith< _$ChatCompletionMessageContentPartsImpl> get copyWith => throw _privateConstructorUsedError; @@ -52730,6 +55572,8 @@ class __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionUserMessageContentStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52776,11 +55620,13 @@ class _$ChatCompletionUserMessageContentStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionUserMessageContentStringImplCopyWith< @@ -52873,7 +55719,10 @@ abstract class ChatCompletionUserMessageContentString @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionUserMessageContentStringImplCopyWith< _$ChatCompletionUserMessageContentStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -52950,8 +55799,13 @@ mixin _$ChatCompletionMessageContentPart { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionMessageContentPart to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageContentPartCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -52978,6 +55832,8 @@ class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53014,6 +55870,8 @@ class __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageContentPartTextImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53069,11 +55927,13 @@ class _$ChatCompletionMessageContentPartTextImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageContentPartTextImplCopyWith< @@ -53173,15 +56033,17 @@ abstract class ChatCompletionMessageContentPartText Map json) = _$ChatCompletionMessageContentPartTextImpl.fromJson; - @override - /// The type of the content part, in this case `text`. + @override ChatCompletionMessageContentPartType get type; /// The text content. String get text; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageContentPartTextImplCopyWith< _$ChatCompletionMessageContentPartTextImpl> get copyWith => throw _privateConstructorUsedError; @@ -53213,6 +56075,8 @@ class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageContentPartImageImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53231,6 +56095,8 @@ class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> )); } + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl { @@ -53279,11 +56145,13 @@ class _$ChatCompletionMessageContentPartImageImpl other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageContentPartImageImplCopyWith< @@ -53385,16 +56253,18 @@ abstract class ChatCompletionMessageContentPartImage Map json) = _$ChatCompletionMessageContentPartImageImpl.fromJson; - @override - /// The type of the content part, in this case `image_url`. + @override ChatCompletionMessageContentPartType get type; /// The URL of the image. @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl get imageUrl; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageContentPartImageImplCopyWith< _$ChatCompletionMessageContentPartImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -53414,8 +56284,12 @@ mixin _$ChatCompletionMessageImageUrl { ChatCompletionMessageImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageImageUrlCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -53442,6 +56316,8 @@ class _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53483,6 +56359,8 @@ class __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageImageUrlImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53537,11 +56415,13 @@ class _$ChatCompletionMessageImageUrlImpl (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, url, detail); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageImageUrlImplCopyWith< @@ -53568,16 +56448,18 @@ abstract class _ChatCompletionMessageImageUrl factory _ChatCompletionMessageImageUrl.fromJson(Map json) = _$ChatCompletionMessageImageUrlImpl.fromJson; - @override - /// Either a URL of the image or the base64 encoded image data. - String get url; @override + String get url; /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + @override ChatCompletionMessageImageDetail get detail; + + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageImageUrlImplCopyWith< _$ChatCompletionMessageImageUrlImpl> get copyWith => throw _privateConstructorUsedError; @@ -53659,8 +56541,13 @@ mixin _$AssistantTools { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantTools to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantToolsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -53684,6 +56571,8 @@ class _$AssistantToolsCopyWithImpl<$Res, $Val extends AssistantTools> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53720,6 +56609,8 @@ class __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsCodeInterpreterImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53763,11 +56654,13 @@ class _$AssistantToolsCodeInterpreterImpl (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsCodeInterpreterImplCopyWith< @@ -53872,12 +56765,14 @@ abstract class AssistantToolsCodeInterpreter extends AssistantTools { factory AssistantToolsCodeInterpreter.fromJson(Map json) = _$AssistantToolsCodeInterpreterImpl.fromJson; - @override - /// The type of tool being defined: `code_interpreter` + @override String get type; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsCodeInterpreterImplCopyWith< _$AssistantToolsCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -53909,6 +56804,8 @@ class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -53927,6 +56824,8 @@ class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch { @@ -53976,11 +56875,13 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> @@ -54087,16 +56988,18 @@ abstract class AssistantToolsFileSearch extends AssistantTools { factory AssistantToolsFileSearch.fromJson(Map json) = _$AssistantToolsFileSearchImpl.fromJson; - @override - /// The type of tool being defined: `file_search` + @override String get type; /// Overrides for the file search tool. @JsonKey(name: 'file_search', includeIfNull: false) AssistantToolsFileSearchFileSearch? get fileSearch; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -54124,6 +57027,8 @@ class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54142,6 +57047,8 @@ class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FunctionObjectCopyWith<$Res> get function { @@ -54185,11 +57092,13 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> @@ -54294,15 +57203,17 @@ abstract class AssistantToolsFunction extends AssistantTools { factory AssistantToolsFunction.fromJson(Map json) = _$AssistantToolsFunctionImpl.fromJson; - @override - /// The type of tool being defined: `function` + @override String get type; /// A function that the model may call. FunctionObject get function; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -54314,7 +57225,7 @@ AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson( /// @nodoc mixin _$AssistantToolsFileSearchFileSearch { - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search @@ -54322,8 +57233,12 @@ mixin _$AssistantToolsFileSearchFileSearch { @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; + /// Serializes this AssistantToolsFileSearchFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantToolsFileSearchFileSearchCopyWith< AssistantToolsFileSearchFileSearch> get copyWith => throw _privateConstructorUsedError; @@ -54353,6 +57268,8 @@ class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54391,6 +57308,8 @@ class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> $Res Function(_$AssistantToolsFileSearchFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54418,7 +57337,7 @@ class _$AssistantToolsFileSearchFileSearchImpl Map json) => _$$AssistantToolsFileSearchFileSearchImplFromJson(json); - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search @@ -54441,11 +57360,13 @@ class _$AssistantToolsFileSearchFileSearchImpl other.maxNumResults == maxNumResults)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, maxNumResults); - @JsonKey(ignore: true) + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantToolsFileSearchFileSearchImplCopyWith< @@ -54472,17 +57393,19 @@ abstract class _AssistantToolsFileSearchFileSearch Map json) = _$AssistantToolsFileSearchFileSearchImpl.fromJson; - @override - - /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + @override @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantToolsFileSearchFileSearchImplCopyWith< _$AssistantToolsFileSearchFileSearchImpl> get copyWith => throw _privateConstructorUsedError; @@ -54563,8 +57486,13 @@ mixin _$MessageContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -54588,6 +57516,8 @@ class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54628,6 +57558,8 @@ class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54646,6 +57578,8 @@ class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageFileCopyWith<$Res> get imageFile { @@ -54693,11 +57627,13 @@ class _$MessageContentImageFileObjectImpl other.imageFile == imageFile)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageFile); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageFileObjectImplCopyWith< @@ -54804,16 +57740,18 @@ abstract class MessageContentImageFileObject extends MessageContent { factory MessageContentImageFileObject.fromJson(Map json) = _$MessageContentImageFileObjectImpl.fromJson; - @override - /// Always `image_file`. + @override String get type; /// The image file that is part of a message. @JsonKey(name: 'image_file') MessageContentImageFile get imageFile; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageFileObjectImplCopyWith< _$MessageContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -54845,6 +57783,8 @@ class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageUrlObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -54863,6 +57803,8 @@ class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageUrlCopyWith<$Res> get imageUrl { @@ -54909,11 +57851,13 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageUrlObjectImplCopyWith< @@ -55020,16 +57964,18 @@ abstract class MessageContentImageUrlObject extends MessageContent { factory MessageContentImageUrlObject.fromJson(Map json) = _$MessageContentImageUrlObjectImpl.fromJson; - @override - /// The type of the content part. Always `image_url`. + @override String get type; /// The image URL part of a message. @JsonKey(name: 'image_url') MessageContentImageUrl get imageUrl; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageUrlObjectImplCopyWith< _$MessageContentImageUrlObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -55058,6 +58004,8 @@ class __$$MessageContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55076,6 +58024,8 @@ class __$$MessageContentTextObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentTextCopyWith<$Res> get text { @@ -55117,11 +58067,13 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> @@ -55225,15 +58177,17 @@ abstract class MessageContentTextObject extends MessageContent { factory MessageContentTextObject.fromJson(Map json) = _$MessageContentTextObjectImpl.fromJson; - @override - /// Always `text`. + @override String get type; /// The text content that is part of a message. MessageContentText get text; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -55318,8 +58272,13 @@ mixin _$MessageDeltaContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageDeltaContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -55343,6 +58302,8 @@ class _$MessageDeltaContentCopyWithImpl<$Res, $Val extends MessageDeltaContent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55390,6 +58351,8 @@ class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentImageFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55413,6 +58376,8 @@ class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentImageFileCopyWith<$Res>? get imageFile { @@ -55469,11 +58434,13 @@ class _$MessageDeltaContentImageFileObjectImpl other.imageFile == imageFile)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, imageFile); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentImageFileObjectImplCopyWith< @@ -55586,20 +58553,22 @@ abstract class MessageDeltaContentImageFileObject extends MessageDeltaContent { Map json) = _$MessageDeltaContentImageFileObjectImpl.fromJson; - @override - /// The index of the content part in the message. - int get index; @override + int get index; /// Always `image_file`. + @override String get type; /// The image file that is part of a message. @JsonKey(name: 'image_file', includeIfNull: false) MessageContentImageFile? get imageFile; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentImageFileObjectImplCopyWith< _$MessageDeltaContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -55632,6 +58601,8 @@ class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -55655,6 +58626,8 @@ class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaContentTextCopyWith<$Res>? get text { @@ -55710,11 +58683,13 @@ class _$MessageDeltaContentTextObjectImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextObjectImplCopyWith< @@ -55825,20 +58800,22 @@ abstract class MessageDeltaContentTextObject extends MessageDeltaContent { factory MessageDeltaContentTextObject.fromJson(Map json) = _$MessageDeltaContentTextObjectImpl.fromJson; - @override - /// The index of the content part in the message. - int get index; @override + int get index; /// Always `text`. + @override String get type; /// The text content that is part of a message. @JsonKey(includeIfNull: false) MessageDeltaContentText? get text; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextObjectImplCopyWith< _$MessageDeltaContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -55963,8 +58940,13 @@ mixin _$MessageContentTextAnnotations { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageContentTextAnnotations to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -55995,6 +58977,8 @@ class _$MessageContentTextAnnotationsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56057,6 +59041,8 @@ class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56090,6 +59076,8 @@ class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation { @@ -56159,12 +59147,14 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text, fileCitation, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< @@ -56310,30 +59300,32 @@ abstract class MessageContentTextAnnotationsFileCitationObject Map json) = _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson; - @override - /// Always `file_citation`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override String get text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @JsonKey(name: 'file_citation') MessageContentTextAnnotationsFileCitation get fileCitation; - @override /// The start index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'start_index') int get startIndex; - @override /// The end index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'end_index') int get endIndex; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< _$MessageContentTextAnnotationsFileCitationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -56371,6 +59363,8 @@ class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56404,6 +59398,8 @@ class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath { @@ -56473,12 +59469,14 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text, filePath, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< @@ -56623,30 +59621,32 @@ abstract class MessageContentTextAnnotationsFilePathObject Map json) = _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson; - @override - /// Always `file_path`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override String get text; /// No Description @JsonKey(name: 'file_path') MessageContentTextAnnotationsFilePath get filePath; - @override /// No Description + @override @JsonKey(name: 'start_index') int get startIndex; - @override /// No Description + @override @JsonKey(name: 'end_index') int get endIndex; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< _$MessageContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -56663,8 +59663,12 @@ mixin _$MessageContentTextAnnotationsFilePath { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageContentTextAnnotationsFilePath to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsFilePathCopyWith< MessageContentTextAnnotationsFilePath> get copyWith => throw _privateConstructorUsedError; @@ -56692,6 +59696,8 @@ class _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56728,6 +59734,8 @@ class __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFilePathImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -56772,11 +59780,13 @@ class _$MessageContentTextAnnotationsFilePathImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFilePathImplCopyWith< @@ -56803,13 +59813,15 @@ abstract class _MessageContentTextAnnotationsFilePath Map json) = _$MessageContentTextAnnotationsFilePathImpl.fromJson; - @override - /// The ID of the file that was generated. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFilePathImplCopyWith< _$MessageContentTextAnnotationsFilePathImpl> get copyWith => throw _privateConstructorUsedError; @@ -56948,8 +59960,13 @@ mixin _$MessageDeltaContentTextAnnotations { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageDeltaContentTextAnnotations to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsCopyWith< MessageDeltaContentTextAnnotations> get copyWith => throw _privateConstructorUsedError; @@ -56982,6 +59999,8 @@ class _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57055,6 +60074,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57093,6 +60114,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< )); } + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? @@ -57175,12 +60198,14 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, index, type, text, fileCitation, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< @@ -57339,35 +60364,37 @@ abstract class MessageDeltaContentTextAnnotationsFileCitationObject Map json) = _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson; - @override - /// The index of the annotation in the text content part. - int get index; @override + int get index; /// Always `file_citation`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override @JsonKey(includeIfNull: false) String? get text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @JsonKey(name: 'file_citation', includeIfNull: false) MessageDeltaContentTextAnnotationsFileCitation? get fileCitation; - @override /// The start index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'start_index', includeIfNull: false) int? get startIndex; - @override /// The end index of the text in the message content that needs to be replaced. + @override @JsonKey(name: 'end_index', includeIfNull: false) int? get endIndex; + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -57409,6 +60436,8 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57447,6 +60476,8 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? @@ -57528,12 +60559,14 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, index, type, text, filePath, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< @@ -57690,35 +60723,37 @@ abstract class MessageDeltaContentTextAnnotationsFilePathObject Map json) = _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson; - @override - /// The index of the annotation in the text content part. - int get index; @override + int get index; /// Always `file_path`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. + @override @JsonKey(includeIfNull: false) String? get text; /// No Description @JsonKey(name: 'file_path', includeIfNull: false) MessageDeltaContentTextAnnotationsFilePathObjectFilePath? get filePath; - @override /// No Description + @override @JsonKey(name: 'start_index', includeIfNull: false) int? get startIndex; - @override /// No Description + @override @JsonKey(name: 'end_index', includeIfNull: false) int? get endIndex; + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -57737,8 +60772,12 @@ mixin _$MessageDeltaContentTextAnnotationsFilePathObjectFilePath { @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentTextAnnotationsFilePathObjectFilePath to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< MessageDeltaContentTextAnnotationsFilePathObjectFilePath> get copyWith => throw _privateConstructorUsedError; @@ -57773,6 +60812,8 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57820,6 +60861,8 @@ class __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithIm _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -57866,11 +60909,13 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< @@ -57901,13 +60946,15 @@ abstract class _MessageDeltaContentTextAnnotationsFilePathObjectFilePath Map json) = _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson; - @override - /// The ID of the file that was generated. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> get copyWith => throw _privateConstructorUsedError; @@ -57995,8 +61042,13 @@ mixin _$RunStepDetails { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -58020,6 +61072,8 @@ class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58061,6 +61115,8 @@ class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58079,6 +61135,8 @@ class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation { @@ -58126,11 +61184,13 @@ class _$RunStepDetailsMessageCreationObjectImpl other.messageCreation == messageCreation)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, messageCreation); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsMessageCreationObjectImplCopyWith< @@ -58247,16 +61307,18 @@ abstract class RunStepDetailsMessageCreationObject extends RunStepDetails { Map json) = _$RunStepDetailsMessageCreationObjectImpl.fromJson; - @override - /// Always `message_creation`. + @override String get type; /// Details of the message creation by the run step. @JsonKey(name: 'message_creation') RunStepDetailsMessageCreation get messageCreation; + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsMessageCreationObjectImplCopyWith< _$RunStepDetailsMessageCreationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -58286,6 +61348,8 @@ class __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58351,12 +61415,14 @@ class _$RunStepDetailsToolCallsObjectImpl .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsObjectImplCopyWith< @@ -58472,16 +61538,18 @@ abstract class RunStepDetailsToolCallsObject extends RunStepDetails { factory RunStepDetailsToolCallsObject.fromJson(Map json) = _$RunStepDetailsToolCallsObjectImpl.fromJson; - @override - /// Always `tool_calls`. + @override String get type; /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @JsonKey(name: 'tool_calls') List get toolCalls; + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsObjectImplCopyWith< _$RunStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -58571,8 +61639,13 @@ mixin _$RunStepDeltaDetails { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -58596,6 +61669,8 @@ class _$RunStepDeltaDetailsCopyWithImpl<$Res, $Val extends RunStepDeltaDetails> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58639,6 +61714,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58657,6 +61734,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation { @@ -58709,11 +61788,13 @@ class _$RunStepDeltaStepDetailsMessageCreationObjectImpl other.messageCreation == messageCreation)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, messageCreation); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< @@ -58835,16 +61916,18 @@ abstract class RunStepDeltaStepDetailsMessageCreationObject Map json) = _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson; - @override - /// Always `message_creation`. + @override String get type; /// Details of the message creation by the run step. @JsonKey(name: 'message_creation', includeIfNull: false) RunStepDeltaStepDetailsMessageCreation? get messageCreation; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< _$RunStepDeltaStepDetailsMessageCreationObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -58875,6 +61958,8 @@ class __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -58942,12 +62027,14 @@ class _$RunStepDeltaStepDetailsToolCallsObjectImpl .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< @@ -59068,16 +62155,18 @@ abstract class RunStepDeltaStepDetailsToolCallsObject Map json) = _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson; - @override - /// Always `tool_calls`. + @override String get type; /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @JsonKey(name: 'tool_calls', includeIfNull: false) List? get toolCalls; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -59181,8 +62270,13 @@ mixin _$RunStepDetailsToolCalls { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -59207,6 +62301,8 @@ class _$RunStepDetailsToolCallsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59255,6 +62351,8 @@ class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59278,6 +62376,8 @@ class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> @@ -59332,11 +62432,13 @@ class _$RunStepDetailsToolCallsCodeObjectImpl other.codeInterpreter == codeInterpreter)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, codeInterpreter); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< @@ -59465,20 +62567,22 @@ abstract class RunStepDetailsToolCallsCodeObject Map json) = _$RunStepDetailsToolCallsCodeObjectImpl.fromJson; - @override - /// The ID of the tool call. - String get id; @override + String get id; /// Always `code_interpreter`. + @override String get type; /// The Code Interpreter tool call definition. @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter get codeInterpreter; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< _$RunStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -59509,6 +62613,8 @@ class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59585,12 +62691,14 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl .equals(other._fileSearch, _fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, type, const DeepCollectionEquality().hash(_fileSearch)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< @@ -59720,20 +62828,22 @@ abstract class RunStepDetailsToolCallsFileSearchObject Map json) = _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson; - @override - /// The ID of the tool call object. - String get id; @override + String get id; /// The type of tool call. This is always going to be `file_search` for this type of tool call. + @override String get type; /// For now, this is always going to be an empty object. @JsonKey(name: 'file_search') Map get fileSearch; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< _$RunStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -59763,6 +62873,8 @@ class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -59786,6 +62898,8 @@ class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function { @@ -59836,11 +62950,13 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< @@ -59968,19 +63084,21 @@ abstract class RunStepDetailsToolCallsFunctionObject Map json) = _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson; - @override - /// The ID of the tool call object. - String get id; @override + String get id; /// Always `function`. + @override String get type; /// The definition of the function that was called. RunStepDetailsToolCallsFunction get function; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< _$RunStepDetailsToolCallsFunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -60002,8 +63120,12 @@ mixin _$RunStepDetailsToolCallsFunction { /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. String? get output => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -60030,6 +63152,8 @@ class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60076,6 +63200,8 @@ class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60140,11 +63266,13 @@ class _$RunStepDetailsToolCallsFunctionImpl (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments, output); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsFunctionImplCopyWith< @@ -60171,20 +63299,22 @@ abstract class _RunStepDetailsToolCallsFunction factory _RunStepDetailsToolCallsFunction.fromJson(Map json) = _$RunStepDetailsToolCallsFunctionImpl.fromJson; - @override - /// The name of the function. - String get name; @override + String get name; /// The arguments passed to the function. - String get arguments; @override + String get arguments; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @override String? get output; + + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsFunctionImplCopyWith< _$RunStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; @@ -60329,8 +63459,13 @@ mixin _$RunStepDeltaStepDetailsToolCalls { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -60358,6 +63493,8 @@ class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60414,6 +63551,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60442,6 +63581,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? @@ -60509,12 +63650,14 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl other.codeInterpreter == codeInterpreter)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, codeInterpreter); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< @@ -60680,26 +63823,28 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeObject Map json) = _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson; - @override - /// The index of the tool call in the tool calls array. - int get index; @override + int get index; /// The ID of the tool call. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// Always `code_interpreter`. + @override String get type; /// The Code Interpreter tool call definition. - outputs @JsonKey(name: 'code_interpreter', includeIfNull: false) RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? get codeInterpreter; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -60735,6 +63880,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60823,12 +63970,14 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl .equals(other._fileSearch, _fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, const DeepCollectionEquality().hash(_fileSearch)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< @@ -60994,25 +64143,27 @@ abstract class RunStepDeltaStepDetailsToolCallsFileSearchObject Map json) = _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson; - @override - /// The index of the tool call in the tool calls array. - int get index; @override + int get index; /// The ID of the tool call object. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// The type of tool call. This is always going to be `file_search` for this type of tool call. + @override String get type; /// For now, this is always going to be an empty object. @JsonKey(name: 'file_search') Map get fileSearch; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -61049,6 +64200,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61077,6 +64230,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function { @@ -61141,11 +64296,13 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< @@ -61311,25 +64468,27 @@ abstract class RunStepDeltaStepDetailsToolCallsFunctionObject Map json) = _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson; - @override - /// The index of the tool call in the tool calls array. - int get index; @override + int get index; /// The ID of the tool call object. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// Always `function`. + @override String get type; /// The definition of the function that was called. @JsonKey(includeIfNull: false) RunStepDeltaStepDetailsToolCallsFunction? get function; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -61355,8 +64514,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsFunction { @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsFunctionCopyWith< RunStepDeltaStepDetailsToolCallsFunction> get copyWith => throw _privateConstructorUsedError; @@ -61388,6 +64551,8 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61437,6 +64602,8 @@ class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61506,11 +64673,13 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionImpl (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments, output); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< @@ -61540,23 +64709,25 @@ abstract class _RunStepDeltaStepDetailsToolCallsFunction Map json) = _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson; - @override - /// The name of the function. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The arguments passed to the function. + @override @JsonKey(includeIfNull: false) String? get arguments; - @override /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @override @JsonKey(includeIfNull: false) String? get output; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< _$RunStepDeltaStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; @@ -61630,8 +64801,13 @@ mixin _$RunStepDetailsToolCallsCodeOutput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsCodeOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeOutputCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -61658,6 +64834,8 @@ class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61696,6 +64874,8 @@ class __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61749,11 +64929,13 @@ class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, logs); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< @@ -61853,15 +65035,17 @@ abstract class RunStepDetailsToolCallsCodeOutputLogsObject Map json) = _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - @override - /// Always `logs`. + @override String get type; /// The text output from the Code Interpreter tool call. String get logs; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -61893,6 +65077,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61911,6 +65097,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> )); } + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image { @@ -61955,11 +65143,13 @@ class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl (identical(other.image, image) || other.image == image)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, image); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< @@ -62060,15 +65250,17 @@ abstract class RunStepDetailsToolCallsCodeOutputImageObject Map json) = _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - @override - /// Always `image`. + @override String get type; /// Code interpreter image output. RunStepDetailsToolCallsCodeOutputImage get image; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -62172,8 +65364,13 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeOutput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith< RunStepDeltaStepDetailsToolCallsCodeOutput> get copyWith => throw _privateConstructorUsedError; @@ -62202,6 +65399,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62251,6 +65450,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62318,11 +65519,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, logs); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< @@ -62448,20 +65651,22 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - @override - /// The index of the output in the outputs array. - int get index; @override + int get index; /// Always `logs`. + @override String get type; /// The text output from the Code Interpreter tool call. @JsonKey(includeIfNull: false) String? get logs; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -62502,6 +65707,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62525,6 +65732,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< )); } + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image { @@ -62583,11 +65792,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl (identical(other.image, image) || other.image == image)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, type, image); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< @@ -62714,20 +65925,22 @@ abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - @override - /// The index of the output in the outputs array. - int get index; @override + int get index; /// Always `image`. + @override String get type; /// Code interpreter image output. @JsonKey(includeIfNull: false) RunStepDeltaStepDetailsToolCallsCodeOutputImage? get image; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -62793,8 +66006,13 @@ mixin _$ChunkingStrategyRequestParam { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChunkingStrategyRequestParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChunkingStrategyRequestParamCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -62821,6 +66039,8 @@ class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62857,6 +66077,8 @@ class __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res> $Res Function(_$AutoChunkingStrategyRequestParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62899,11 +66121,13 @@ class _$AutoChunkingStrategyRequestParamImpl (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AutoChunkingStrategyRequestParamImplCopyWith< @@ -62991,12 +66215,14 @@ abstract class AutoChunkingStrategyRequestParam factory AutoChunkingStrategyRequestParam.fromJson(Map json) = _$AutoChunkingStrategyRequestParamImpl.fromJson; - @override - /// Always `auto`. + @override String get type; + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AutoChunkingStrategyRequestParamImplCopyWith< _$AutoChunkingStrategyRequestParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63026,6 +66252,8 @@ class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> $Res Function(_$StaticChunkingStrategyRequestParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63044,6 +66272,8 @@ class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> )); } + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $StaticChunkingStrategyCopyWith<$Res> get static { @@ -63087,11 +66317,13 @@ class _$StaticChunkingStrategyRequestParamImpl (identical(other.static, static) || other.static == static)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, static); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$StaticChunkingStrategyRequestParamImplCopyWith< @@ -63182,15 +66414,17 @@ abstract class StaticChunkingStrategyRequestParam Map json) = _$StaticChunkingStrategyRequestParamImpl.fromJson; - @override - /// Always `static`. + @override String get type; /// Static chunking strategy StaticChunkingStrategy get static; + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$StaticChunkingStrategyRequestParamImplCopyWith< _$StaticChunkingStrategyRequestParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63256,8 +66490,13 @@ mixin _$ChunkingStrategyResponseParam { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChunkingStrategyResponseParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChunkingStrategyResponseParamCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -63284,6 +66523,8 @@ class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63322,6 +66563,8 @@ class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> $Res Function(_$StaticChunkingStrategyResponseParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63340,6 +66583,8 @@ class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> )); } + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $StaticChunkingStrategyCopyWith<$Res> get static { @@ -63383,11 +66628,13 @@ class _$StaticChunkingStrategyResponseParamImpl (identical(other.static, static) || other.static == static)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, static); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$StaticChunkingStrategyResponseParamImplCopyWith< @@ -63478,15 +66725,17 @@ abstract class StaticChunkingStrategyResponseParam Map json) = _$StaticChunkingStrategyResponseParamImpl.fromJson; - @override - /// Always `static`. + @override String get type; /// Static chunking strategy StaticChunkingStrategy get static; + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$StaticChunkingStrategyResponseParamImplCopyWith< _$StaticChunkingStrategyResponseParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63514,6 +66763,8 @@ class __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res> $Res Function(_$OtherChunkingStrategyResponseParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63556,11 +66807,13 @@ class _$OtherChunkingStrategyResponseParamImpl (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$OtherChunkingStrategyResponseParamImplCopyWith< @@ -63649,12 +66902,14 @@ abstract class OtherChunkingStrategyResponseParam Map json) = _$OtherChunkingStrategyResponseParamImpl.fromJson; - @override - /// Always `other`. + @override String get type; + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$OtherChunkingStrategyResponseParamImplCopyWith< _$OtherChunkingStrategyResponseParamImpl> get copyWith => throw _privateConstructorUsedError; @@ -63777,8 +67032,13 @@ mixin _$AssistantStreamEvent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantStreamEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantStreamEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -63803,6 +67063,8 @@ class _$AssistantStreamEventCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63838,6 +67100,8 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> $Res Function(_$ThreadStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63856,6 +67120,8 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadObjectCopyWith<$Res> get data { @@ -63896,11 +67162,13 @@ class _$ThreadStreamEventImpl extends ThreadStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => @@ -64033,16 +67301,18 @@ abstract class ThreadStreamEvent extends AssistantStreamEvent { factory ThreadStreamEvent.fromJson(Map json) = _$ThreadStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages). + @override ThreadObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64068,6 +67338,8 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> _$RunStreamEventImpl _value, $Res Function(_$RunStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64086,6 +67358,8 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectCopyWith<$Res> get data { @@ -64126,11 +67400,13 @@ class _$RunStreamEventImpl extends RunStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => @@ -64263,16 +67539,18 @@ abstract class RunStreamEvent extends AssistantStreamEvent { factory RunStreamEvent.fromJson(Map json) = _$RunStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). + @override RunObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64298,6 +67576,8 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64316,6 +67596,8 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepObjectCopyWith<$Res> get data { @@ -64356,11 +67638,13 @@ class _$RunStepStreamEventImpl extends RunStepStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => @@ -64493,16 +67777,18 @@ abstract class RunStepStreamEvent extends AssistantStreamEvent { factory RunStepStreamEvent.fromJson(Map json) = _$RunStepStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a step in execution of a run. + @override RunStepObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64531,6 +67817,8 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamDeltaEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64549,6 +67837,8 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaObjectCopyWith<$Res> get data { @@ -64589,11 +67879,13 @@ class _$RunStepStreamDeltaEventImpl extends RunStepStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> @@ -64726,16 +68018,18 @@ abstract class RunStepStreamDeltaEvent extends AssistantStreamEvent { factory RunStepStreamDeltaEvent.fromJson(Map json) = _$RunStepStreamDeltaEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a run step delta i.e. any changed fields on a run step during streaming. + @override RunStepDeltaObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64761,6 +68055,8 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -64779,6 +68075,8 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectCopyWith<$Res> get data { @@ -64819,11 +68117,13 @@ class _$MessageStreamEventImpl extends MessageStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => @@ -64956,16 +68256,18 @@ abstract class MessageStreamEvent extends AssistantStreamEvent { factory MessageStreamEvent.fromJson(Map json) = _$MessageStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). + @override MessageObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -64994,6 +68296,8 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamDeltaEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -65012,6 +68316,8 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaObjectCopyWith<$Res> get data { @@ -65052,11 +68358,13 @@ class _$MessageStreamDeltaEventImpl extends MessageStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> @@ -65189,16 +68497,18 @@ abstract class MessageStreamDeltaEvent extends AssistantStreamEvent { factory MessageStreamDeltaEvent.fromJson(Map json) = _$MessageStreamDeltaEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a message delta i.e. any changed fields on a message during streaming. + @override MessageDeltaObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -65224,6 +68534,8 @@ class __$$ErrorEventImplCopyWithImpl<$Res> _$ErrorEventImpl _value, $Res Function(_$ErrorEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -65242,6 +68554,8 @@ class __$$ErrorEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ErrorCopyWith<$Res> get data { @@ -65281,11 +68595,13 @@ class _$ErrorEventImpl extends ErrorEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => @@ -65417,16 +68733,18 @@ abstract class ErrorEvent extends AssistantStreamEvent { factory ErrorEvent.fromJson(Map json) = _$ErrorEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents an error that occurred during an API request. + @override Error get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -65450,6 +68768,8 @@ class __$$DoneEventImplCopyWithImpl<$Res> _$DoneEventImpl _value, $Res Function(_$DoneEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -65499,11 +68819,13 @@ class _$DoneEventImpl extends DoneEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => @@ -65635,16 +68957,18 @@ abstract class DoneEvent extends AssistantStreamEvent { factory DoneEvent.fromJson(Map json) = _$DoneEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// No Description + @override String get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 191f05e7..63581c97 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -13,17 +13,17 @@ _$CreateCompletionRequestImpl _$$CreateCompletionRequestImplFromJson( _$CreateCompletionRequestImpl( model: const _CompletionModelConverter().fromJson(json['model']), prompt: const _CompletionPromptConverter().fromJson(json['prompt']), - bestOf: json['best_of'] as int?, + bestOf: (json['best_of'] as num?)?.toInt(), echo: json['echo'] as bool? ?? false, frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, e as int), + (k, e) => MapEntry(k, (e as num).toInt()), ), - logprobs: json['logprobs'] as int?, - maxTokens: json['max_tokens'] as int? ?? 16, - n: json['n'] as int? ?? 1, + logprobs: (json['logprobs'] as num?)?.toInt(), + maxTokens: (json['max_tokens'] as num?)?.toInt() ?? 16, + n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), stop: const _CompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -113,7 +113,8 @@ _$CompletionPromptListListIntImpl _$$CompletionPromptListListIntImplFromJson( Map json) => _$CompletionPromptListListIntImpl( (json['value'] as List) - .map((e) => (e as List).map((e) => e as int).toList()) + .map((e) => + (e as List).map((e) => (e as num).toInt()).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -128,7 +129,7 @@ Map _$$CompletionPromptListListIntImplToJson( _$CompletionPromptListIntImpl _$$CompletionPromptListIntImplFromJson( Map json) => _$CompletionPromptListIntImpl( - (json['value'] as List).map((e) => e as int).toList(), + (json['value'] as List).map((e) => (e as num).toInt()).toList(), $type: json['runtimeType'] as String?, ); @@ -202,7 +203,7 @@ _$CreateCompletionResponseImpl _$$CreateCompletionResponseImplFromJson( choices: (json['choices'] as List) .map((e) => CompletionChoice.fromJson(e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num).toInt(), model: json['model'] as String, systemFingerprint: json['system_fingerprint'] as String?, object: @@ -243,7 +244,7 @@ _$CompletionChoiceImpl _$$CompletionChoiceImplFromJson( finishReason: $enumDecodeNullable( _$CompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int, + index: (json['index'] as num).toInt(), logprobs: json['logprobs'] == null ? null : CompletionLogprobs.fromJson( @@ -270,7 +271,7 @@ _$CompletionLogprobsImpl _$$CompletionLogprobsImplFromJson( Map json) => _$CompletionLogprobsImpl( textOffset: (json['text_offset'] as List?) - ?.map((e) => e as int) + ?.map((e) => (e as num).toInt()) .toList(), tokenLogprobs: (json['token_logprobs'] as List?) ?.map((e) => (e as num?)?.toDouble()) @@ -310,18 +311,18 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, e as int), + (k, e) => MapEntry(k, (e as num).toInt()), ), logprobs: json['logprobs'] as bool?, - topLogprobs: json['top_logprobs'] as int?, - maxTokens: json['max_tokens'] as int?, - n: json['n'] as int? ?? 1, + topLogprobs: (json['top_logprobs'] as num?)?.toInt(), + maxTokens: (json['max_tokens'] as num?)?.toInt(), + n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null ? null : ChatCompletionResponseFormat.fromJson( json['response_format'] as Map), - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), serviceTier: $enumDecodeNullable( _$CreateChatCompletionRequestServiceTierEnumMap, json['service_tier'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -427,6 +428,7 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt4VisionPreview: 'gpt-4-vision-preview', ChatCompletionModels.gpt4o: 'gpt-4o', ChatCompletionModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ChatCompletionModels.gpt4o20240806: 'gpt-4o-2024-08-06', ChatCompletionModels.gpt4oMini: 'gpt-4o-mini', ChatCompletionModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -717,7 +719,7 @@ _$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( .map((e) => ChatCompletionResponseChoice.fromJson(e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num).toInt(), model: json['model'] as String, serviceTier: $enumDecodeNullable( _$ServiceTierEnumMap, json['service_tier'], @@ -761,7 +763,7 @@ _$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), message: ChatCompletionAssistantMessage.fromJson( json['message'] as Map), logprobs: json['logprobs'] == null @@ -816,7 +818,9 @@ _$ChatCompletionTokenLogprobImpl _$$ChatCompletionTokenLogprobImplFromJson( _$ChatCompletionTokenLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: (json['bytes'] as List?)?.map((e) => e as int).toList(), + bytes: (json['bytes'] as List?) + ?.map((e) => (e as num).toInt()) + .toList(), topLogprobs: (json['top_logprobs'] as List) .map((e) => ChatCompletionTokenTopLogprob.fromJson(e as Map)) @@ -837,8 +841,9 @@ _$ChatCompletionTokenTopLogprobImpl _$ChatCompletionTokenTopLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: - (json['bytes'] as List?)?.map((e) => e as int).toList(), + bytes: (json['bytes'] as List?) + ?.map((e) => (e as num).toInt()) + .toList(), ); Map _$$ChatCompletionTokenTopLogprobImplToJson( @@ -858,7 +863,7 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: json['created'] as int?, + created: (json['created'] as num?)?.toInt(), model: json['model'] as String?, serviceTier: $enumDecodeNullable( _$ServiceTierEnumMap, json['service_tier'], @@ -904,7 +909,7 @@ _$ChatCompletionStreamResponseChoiceImpl finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), ); Map _$$ChatCompletionStreamResponseChoiceImplToJson( @@ -1013,7 +1018,7 @@ _$ChatCompletionStreamMessageToolCallChunkImpl _$$ChatCompletionStreamMessageToolCallChunkImplFromJson( Map json) => _$ChatCompletionStreamMessageToolCallChunkImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: $enumDecodeNullable( _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap, @@ -1051,9 +1056,9 @@ const _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap = { _$CompletionUsageImpl _$$CompletionUsageImplFromJson( Map json) => _$CompletionUsageImpl( - completionTokens: json['completion_tokens'] as int?, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num?)?.toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$CompletionUsageImplToJson( @@ -1072,7 +1077,7 @@ _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( encodingFormat: $enumDecodeNullable( _$EmbeddingEncodingFormatEnumMap, json['encoding_format']) ?? EmbeddingEncodingFormat.float, - dimensions: json['dimensions'] as int?, + dimensions: (json['dimensions'] as num?)?.toInt(), user: json['user'] as String?, ); @@ -1139,7 +1144,8 @@ _$EmbeddingInputListListIntImpl _$$EmbeddingInputListListIntImplFromJson( Map json) => _$EmbeddingInputListListIntImpl( (json['value'] as List) - .map((e) => (e as List).map((e) => e as int).toList()) + .map((e) => + (e as List).map((e) => (e as num).toInt()).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -1154,7 +1160,7 @@ Map _$$EmbeddingInputListListIntImplToJson( _$EmbeddingInputListIntImpl _$$EmbeddingInputListIntImplFromJson( Map json) => _$EmbeddingInputListIntImpl( - (json['value'] as List).map((e) => e as int).toList(), + (json['value'] as List).map((e) => (e as num).toInt()).toList(), $type: json['runtimeType'] as String?, ); @@ -1231,7 +1237,7 @@ const _$CreateEmbeddingResponseObjectEnumMap = { _$EmbeddingImpl _$$EmbeddingImplFromJson(Map json) => _$EmbeddingImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), embedding: const _EmbeddingVectorConverter().fromJson(json['embedding']), object: $enumDecode(_$EmbeddingObjectEnumMap, json['object']), ); @@ -1279,8 +1285,8 @@ Map _$$EmbeddingVectorStringImplToJson( _$EmbeddingUsageImpl _$$EmbeddingUsageImplFromJson(Map json) => _$EmbeddingUsageImpl( - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$EmbeddingUsageImplToJson( @@ -1305,7 +1311,7 @@ _$CreateFineTuningJobRequestImpl _$$CreateFineTuningJobRequestImplFromJson( ?.map( (e) => FineTuningIntegration.fromJson(e as Map)) .toList(), - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), ); Map _$$CreateFineTuningJobRequestImplToJson( @@ -1348,6 +1354,7 @@ const _$FineTuningModelsEnumMap = { FineTuningModels.babbage002: 'babbage-002', FineTuningModels.davinci002: 'davinci-002', FineTuningModels.gpt35Turbo: 'gpt-3.5-turbo', + FineTuningModels.gpt4oMini: 'gpt-4o-mini', }; _$FineTuningModelStringImpl _$$FineTuningModelStringImplFromJson( @@ -1367,12 +1374,12 @@ Map _$$FineTuningModelStringImplToJson( _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => _$FineTuningJobImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), error: json['error'] == null ? null : FineTuningJobError.fromJson(json['error'] as Map), fineTunedModel: json['fine_tuned_model'] as String?, - finishedAt: json['finished_at'] as int?, + finishedAt: (json['finished_at'] as num?)?.toInt(), hyperparameters: FineTuningJobHyperparameters.fromJson( json['hyperparameters'] as Map), model: json['model'] as String, @@ -1382,7 +1389,7 @@ _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => .map((e) => e as String) .toList(), status: $enumDecode(_$FineTuningJobStatusEnumMap, json['status']), - trainedTokens: json['trained_tokens'] as int?, + trainedTokens: (json['trained_tokens'] as num?)?.toInt(), trainingFile: json['training_file'] as String, validationFile: json['validation_file'] as String?, integrations: (json['integrations'] as List?) @@ -1528,7 +1535,7 @@ const _$FineTuningNEpochsOptionsEnumMap = { _$FineTuningNEpochsIntImpl _$$FineTuningNEpochsIntImplFromJson( Map json) => _$FineTuningNEpochsIntImpl( - json['value'] as int, + (json['value'] as num).toInt(), $type: json['runtimeType'] as String?, ); @@ -1632,7 +1639,7 @@ _$FineTuningJobEventImpl _$$FineTuningJobEventImplFromJson( Map json) => _$FineTuningJobEventImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), level: $enumDecode(_$FineTuningJobEventLevelEnumMap, json['level']), message: json['message'] as String, object: $enumDecode(_$FineTuningJobEventObjectEnumMap, json['object']), @@ -1662,9 +1669,9 @@ _$FineTuningJobCheckpointImpl _$$FineTuningJobCheckpointImplFromJson( Map json) => _$FineTuningJobCheckpointImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), fineTunedModelCheckpoint: json['fine_tuned_model_checkpoint'] as String, - stepNumber: json['step_number'] as int, + stepNumber: (json['step_number'] as num).toInt(), metrics: FineTuningJobCheckpointMetrics.fromJson( json['metrics'] as Map), fineTuningJobId: json['fine_tuning_job_id'] as String, @@ -1732,7 +1739,7 @@ _$CreateImageRequestImpl _$$CreateImageRequestImplFromJson( model: json['model'] == null ? const CreateImageRequestModelString('dall-e-2') : const _CreateImageRequestModelConverter().fromJson(json['model']), - n: json['n'] as int? ?? 1, + n: (json['n'] as num?)?.toInt() ?? 1, quality: $enumDecodeNullable(_$ImageQualityEnumMap, json['quality']) ?? ImageQuality.standard, responseFormat: $enumDecodeNullable( @@ -1831,7 +1838,7 @@ Map _$$CreateImageRequestModelStringImplToJson( _$ImagesResponseImpl _$$ImagesResponseImplFromJson(Map json) => _$ImagesResponseImpl( - created: json['created'] as int, + created: (json['created'] as num).toInt(), data: (json['data'] as List) .map((e) => Image.fromJson(e as Map)) .toList(), @@ -1867,7 +1874,7 @@ Map _$$ImageImplToJson(_$ImageImpl instance) { _$ModelImpl _$$ModelImplFromJson(Map json) => _$ModelImpl( id: json['id'] as String, - created: json['created'] as int, + created: (json['created'] as num).toInt(), object: $enumDecode(_$ModelObjectEnumMap, json['object']), ownedBy: json['owned_by'] as String, ); @@ -2109,7 +2116,7 @@ _$AssistantObjectImpl _$$AssistantObjectImplFromJson( _$AssistantObjectImpl( id: json['id'] as String, object: $enumDecode(_$AssistantObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), name: json['name'] as String?, description: json['description'] as String?, model: json['model'] as String, @@ -2277,6 +2284,7 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt4VisionPreview: 'gpt-4-vision-preview', AssistantModels.gpt4o: 'gpt-4o', AssistantModels.gpt4o20240513: 'gpt-4o-2024-05-13', + AssistantModels.gpt4o20240806: 'gpt-4o-2024-08-06', AssistantModels.gpt4oMini: 'gpt-4o-mini', AssistantModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', AssistantModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -2545,7 +2553,7 @@ _$TruncationObjectImpl _$$TruncationObjectImplFromJson( Map json) => _$TruncationObjectImpl( type: $enumDecode(_$TruncationObjectTypeEnumMap, json['type']), - lastMessages: json['last_messages'] as int?, + lastMessages: (json['last_messages'] as num?)?.toInt(), ); Map _$$TruncationObjectImplToJson( @@ -2573,7 +2581,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => _$RunObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), threadId: json['thread_id'] as String, assistantId: json['assistant_id'] as String, status: $enumDecode(_$RunStatusEnumMap, json['status']), @@ -2584,11 +2592,11 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => lastError: json['last_error'] == null ? null : RunLastError.fromJson(json['last_error'] as Map), - expiresAt: json['expires_at'] as int?, - startedAt: json['started_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, - failedAt: json['failed_at'] as int?, - completedAt: json['completed_at'] as int?, + expiresAt: (json['expires_at'] as num?)?.toInt(), + startedAt: (json['started_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), incompleteDetails: json['incomplete_details'] == null ? null : RunObjectIncompleteDetails.fromJson( @@ -2604,8 +2612,8 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => : RunCompletionUsage.fromJson(json['usage'] as Map), temperature: (json['temperature'] as num?)?.toDouble(), topP: (json['top_p'] as num?)?.toDouble(), - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( @@ -2830,9 +2838,9 @@ Map _$$RunSubmitToolOutputsImplToJson( _$RunCompletionUsageImpl _$$RunCompletionUsageImplFromJson( Map json) => _$RunCompletionUsageImpl( - completionTokens: json['completion_tokens'] as int, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num).toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$RunCompletionUsageImplToJson( @@ -2859,8 +2867,8 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( @@ -2939,6 +2947,7 @@ const _$RunModelsEnumMap = { RunModels.gpt4VisionPreview: 'gpt-4-vision-preview', RunModels.gpt4o: 'gpt-4o', RunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + RunModels.gpt4o20240806: 'gpt-4o-2024-08-06', RunModels.gpt4oMini: 'gpt-4o-mini', RunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', RunModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -3185,8 +3194,8 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( @@ -3265,6 +3274,7 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt4VisionPreview: 'gpt-4-vision-preview', ThreadAndRunModels.gpt4o: 'gpt-4o', ThreadAndRunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ThreadAndRunModels.gpt4o20240806: 'gpt-4o-2024-08-06', ThreadAndRunModels.gpt4oMini: 'gpt-4o-mini', ThreadAndRunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ThreadAndRunModels.gpt35Turbo: 'gpt-3.5-turbo', @@ -3377,7 +3387,7 @@ _$ThreadObjectImpl _$$ThreadObjectImplFromJson(Map json) => _$ThreadObjectImpl( id: json['id'] as String, object: $enumDecode(_$ThreadObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), toolResources: json['tool_resources'] == null ? null : ToolResources.fromJson( @@ -3597,7 +3607,7 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => _$MessageObjectImpl( id: json['id'] as String, object: $enumDecode(_$MessageObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), threadId: json['thread_id'] as String, status: $enumDecodeNullable(_$MessageObjectStatusEnumMap, json['status'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -3605,8 +3615,8 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => ? null : MessageObjectIncompleteDetails.fromJson( json['incomplete_details'] as Map), - completedAt: json['completed_at'] as int?, - incompleteAt: json['incomplete_at'] as int?, + completedAt: (json['completed_at'] as num?)?.toInt(), + incompleteAt: (json['incomplete_at'] as num?)?.toInt(), role: $enumDecode(_$MessageRoleEnumMap, json['role']), content: (json['content'] as List) .map((e) => MessageContent.fromJson(e as Map)) @@ -3962,7 +3972,7 @@ _$MessageDeltaContentImageUrlObjectImpl _$$MessageDeltaContentImageUrlObjectImplFromJson( Map json) => _$MessageDeltaContentImageUrlObjectImpl( - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), type: json['type'] as String?, imageUrl: json['image_url'] == null ? null @@ -4040,7 +4050,7 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => _$RunStepObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunStepObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), assistantId: json['assistant_id'] as String, threadId: json['thread_id'] as String, runId: json['run_id'] as String, @@ -4052,10 +4062,10 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => ? null : RunStepLastError.fromJson( json['last_error'] as Map), - expiredAt: json['expired_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, - failedAt: json['failed_at'] as int?, - completedAt: json['completed_at'] as int?, + expiredAt: (json['expired_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), metadata: json['metadata'] as Map?, usage: json['usage'] == null ? null @@ -4300,9 +4310,9 @@ Map _$RunStepCompletionUsageImpl _$$RunStepCompletionUsageImplFromJson( Map json) => _$RunStepCompletionUsageImpl( - completionTokens: json['completion_tokens'] as int, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num).toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$RunStepCompletionUsageImplToJson( @@ -4318,7 +4328,7 @@ _$VectorStoreExpirationAfterImpl _$$VectorStoreExpirationAfterImplFromJson( _$VectorStoreExpirationAfterImpl( anchor: $enumDecode( _$VectorStoreExpirationAfterAnchorEnumMap, json['anchor']), - days: json['days'] as int, + days: (json['days'] as num).toInt(), ); Map _$$VectorStoreExpirationAfterImplToJson( @@ -4337,9 +4347,9 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( _$VectorStoreObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), name: json['name'] as String?, - usageBytes: json['usage_bytes'] as int, + usageBytes: (json['usage_bytes'] as num).toInt(), fileCounts: VectorStoreObjectFileCounts.fromJson( json['file_counts'] as Map), status: $enumDecode(_$VectorStoreObjectStatusEnumMap, json['status']), @@ -4347,8 +4357,8 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), - expiresAt: json['expires_at'] as int?, - lastActiveAt: json['last_active_at'] as int?, + expiresAt: (json['expires_at'] as num?)?.toInt(), + lastActiveAt: (json['last_active_at'] as num?)?.toInt(), metadata: json['metadata'], ); @@ -4386,11 +4396,11 @@ const _$VectorStoreObjectStatusEnumMap = { _$VectorStoreObjectFileCountsImpl _$$VectorStoreObjectFileCountsImplFromJson( Map json) => _$VectorStoreObjectFileCountsImpl( - inProgress: json['in_progress'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, - cancelled: json['cancelled'] as int, - total: json['total'] as int, + inProgress: (json['in_progress'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), + cancelled: (json['cancelled'] as num).toInt(), + total: (json['total'] as num).toInt(), ); Map _$$VectorStoreObjectFileCountsImplToJson( @@ -4509,8 +4519,8 @@ _$VectorStoreFileObjectImpl _$$VectorStoreFileObjectImplFromJson( _$VectorStoreFileObjectImpl( id: json['id'] as String, object: json['object'] as String, - usageBytes: json['usage_bytes'] as int, - createdAt: json['created_at'] as int, + usageBytes: (json['usage_bytes'] as num).toInt(), + createdAt: (json['created_at'] as num).toInt(), vectorStoreId: json['vector_store_id'] as String, status: $enumDecode(_$VectorStoreFileStatusEnumMap, json['status']), lastError: json['last_error'] == null @@ -4568,17 +4578,16 @@ Map _$$VectorStoreFileObjectLastErrorImplToJson( }; const _$VectorStoreFileObjectLastErrorCodeEnumMap = { - VectorStoreFileObjectLastErrorCode.internalError: 'internal_error', - VectorStoreFileObjectLastErrorCode.fileNotFound: 'file_not_found', - VectorStoreFileObjectLastErrorCode.parsingError: 'parsing_error', - VectorStoreFileObjectLastErrorCode.unhandledMimeType: 'unhandled_mime_type', + VectorStoreFileObjectLastErrorCode.serverError: 'server_error', + VectorStoreFileObjectLastErrorCode.unsupportedFile: 'unsupported_file', + VectorStoreFileObjectLastErrorCode.invalidFile: 'invalid_file', }; _$StaticChunkingStrategyImpl _$$StaticChunkingStrategyImplFromJson( Map json) => _$StaticChunkingStrategyImpl( - maxChunkSizeTokens: json['max_chunk_size_tokens'] as int, - chunkOverlapTokens: json['chunk_overlap_tokens'] as int, + maxChunkSizeTokens: (json['max_chunk_size_tokens'] as num).toInt(), + chunkOverlapTokens: (json['chunk_overlap_tokens'] as num).toInt(), ); Map _$$StaticChunkingStrategyImplToJson( @@ -4657,7 +4666,7 @@ _$VectorStoreFileBatchObjectImpl _$$VectorStoreFileBatchObjectImplFromJson( _$VectorStoreFileBatchObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), vectorStoreId: json['vector_store_id'] as String, status: $enumDecode( _$VectorStoreFileBatchObjectStatusEnumMap, json['status']), @@ -4687,11 +4696,11 @@ _$VectorStoreFileBatchObjectFileCountsImpl _$$VectorStoreFileBatchObjectFileCountsImplFromJson( Map json) => _$VectorStoreFileBatchObjectFileCountsImpl( - inProgress: json['in_progress'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, - cancelled: json['cancelled'] as int, - total: json['total'] as int, + inProgress: (json['in_progress'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), + cancelled: (json['cancelled'] as num).toInt(), + total: (json['total'] as num).toInt(), ); Map _$$VectorStoreFileBatchObjectFileCountsImplToJson( @@ -4802,15 +4811,15 @@ _$BatchImpl _$$BatchImplFromJson(Map json) => _$BatchImpl( status: $enumDecode(_$BatchStatusEnumMap, json['status']), outputFileId: json['output_file_id'] as String?, errorFileId: json['error_file_id'] as String?, - createdAt: json['created_at'] as int, - inProgressAt: json['in_progress_at'] as int?, - expiresAt: json['expires_at'] as int?, - finalizingAt: json['finalizing_at'] as int?, - completedAt: json['completed_at'] as int?, - failedAt: json['failed_at'] as int?, - expiredAt: json['expired_at'] as int?, - cancellingAt: json['cancelling_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, + createdAt: (json['created_at'] as num).toInt(), + inProgressAt: (json['in_progress_at'] as num?)?.toInt(), + expiresAt: (json['expires_at'] as num?)?.toInt(), + finalizingAt: (json['finalizing_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + expiredAt: (json['expired_at'] as num?)?.toInt(), + cancellingAt: (json['cancelling_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), requestCounts: json['request_counts'] == null ? null : BatchRequestCounts.fromJson( @@ -4892,9 +4901,9 @@ Map _$$BatchErrorsImplToJson(_$BatchErrorsImpl instance) { _$BatchRequestCountsImpl _$$BatchRequestCountsImplFromJson( Map json) => _$BatchRequestCountsImpl( - total: json['total'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, + total: (json['total'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), ); Map _$$BatchRequestCountsImplToJson( @@ -4911,7 +4920,7 @@ _$BatchErrorsDataInnerImpl _$$BatchErrorsDataInnerImplFromJson( code: json['code'] as String?, message: json['message'] as String?, param: json['param'] as String?, - line: json['line'] as int?, + line: (json['line'] as num?)?.toInt(), ); Map _$$BatchErrorsDataInnerImplToJson( @@ -5250,7 +5259,7 @@ _$AssistantToolsFileSearchFileSearchImpl _$$AssistantToolsFileSearchFileSearchImplFromJson( Map json) => _$AssistantToolsFileSearchFileSearchImpl( - maxNumResults: json['max_num_results'] as int?, + maxNumResults: (json['max_num_results'] as num?)?.toInt(), ); Map _$$AssistantToolsFileSearchFileSearchImplToJson( @@ -5315,7 +5324,7 @@ _$MessageDeltaContentImageFileObjectImpl _$$MessageDeltaContentImageFileObjectImplFromJson( Map json) => _$MessageDeltaContentImageFileObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, imageFile: json['image_file'] == null ? null @@ -5343,7 +5352,7 @@ Map _$$MessageDeltaContentImageFileObjectImplToJson( _$MessageDeltaContentTextObjectImpl _$$MessageDeltaContentTextObjectImplFromJson(Map json) => _$MessageDeltaContentTextObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] == null ? null @@ -5376,8 +5385,8 @@ _$MessageContentTextAnnotationsFileCitationObjectImpl text: json['text'] as String, fileCitation: MessageContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: json['start_index'] as int, - endIndex: json['end_index'] as int, + startIndex: (json['start_index'] as num).toInt(), + endIndex: (json['end_index'] as num).toInt(), ); Map @@ -5399,8 +5408,8 @@ _$MessageContentTextAnnotationsFilePathObjectImpl text: json['text'] as String, filePath: MessageContentTextAnnotationsFilePath.fromJson( json['file_path'] as Map), - startIndex: json['start_index'] as int, - endIndex: json['end_index'] as int, + startIndex: (json['start_index'] as num).toInt(), + endIndex: (json['end_index'] as num).toInt(), ); Map _$$MessageContentTextAnnotationsFilePathObjectImplToJson( @@ -5430,15 +5439,15 @@ _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] as String?, fileCitation: json['file_citation'] == null ? null : MessageDeltaContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: json['start_index'] as int?, - endIndex: json['end_index'] as int?, + startIndex: (json['start_index'] as num?)?.toInt(), + endIndex: (json['end_index'] as num?)?.toInt(), ); Map @@ -5466,15 +5475,15 @@ _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] as String?, filePath: json['file_path'] == null ? null : MessageDeltaContentTextAnnotationsFilePathObjectFilePath .fromJson(json['file_path'] as Map), - startIndex: json['start_index'] as int?, - endIndex: json['end_index'] as int?, + startIndex: (json['start_index'] as num?)?.toInt(), + endIndex: (json['end_index'] as num?)?.toInt(), ); Map @@ -5683,7 +5692,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, codeInterpreter: json['code_interpreter'] == null @@ -5714,7 +5723,7 @@ _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, fileSearch: json['file_search'] as Map, @@ -5743,7 +5752,7 @@ _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, function: json['function'] == null @@ -5831,7 +5840,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, logs: json['logs'] as String?, ); @@ -5858,7 +5867,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, image: json['image'] == null ? null diff --git a/packages/openai_dart/lib/src/generated/schema/thread_object.dart b/packages/openai_dart/lib/src/generated/schema/thread_object.dart index a5ae0ea8..20f2e014 100644 --- a/packages/openai_dart/lib/src/generated/schema/thread_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/thread_object.dart @@ -27,7 +27,9 @@ class ThreadObject with _$ThreadObject { /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources') required ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, }) = _ThreadObject; diff --git a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart index 7105bd0c..d2ef2414 100644 --- a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart @@ -22,7 +22,9 @@ class UpdateVectorStoreRequest with _$UpdateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _UpdateVectorStoreRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart index b6c24133..3664758b 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart @@ -140,12 +140,10 @@ class VectorStoreFileObjectLastError with _$VectorStoreFileObjectLastError { /// One of `server_error` or `rate_limit_exceeded`. enum VectorStoreFileObjectLastErrorCode { - @JsonValue('internal_error') - internalError, - @JsonValue('file_not_found') - fileNotFound, - @JsonValue('parsing_error') - parsingError, - @JsonValue('unhandled_mime_type') - unhandledMimeType, + @JsonValue('server_error') + serverError, + @JsonValue('unsupported_file') + unsupportedFile, + @JsonValue('invalid_file') + invalidFile, } diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart index 836d8337..a3d49591 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart @@ -47,7 +47,9 @@ class VectorStoreObject with _$VectorStoreObject { /// The Unix timestamp (in seconds) for when the vector store was last active. @JsonKey(name: 'last_active_at') required int? lastActiveAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required dynamic metadata, }) = _VectorStoreObject; diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index f1fbee08..5a62f6b1 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -1,3 +1,4 @@ +// ignore_for_file: avoid_print import 'dart:io'; import 'package:openapi_spec/openapi_spec.dart'; @@ -18,10 +19,12 @@ void main() async { enabled: true, ), ); - await Process.run( + final res = await Process.run( 'dart', ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], ); + print(res.stdout); + print(res.stderr); } String? _onSchemaName(final String schemaName) => switch (schemaName) { diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 00dbbe54..c349f64e 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4,7 +4,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.1.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -310,7 +310,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -330,7 +330,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -1796,7 +1796,7 @@ components: model: title: ChatCompletionModel description: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string description: The ID of the model to use for this request. @@ -1820,6 +1820,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -2688,7 +2689,7 @@ components: description: | The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" + example: "gpt-4o-mini" anyOf: - type: string description: The ID of the model to use for this request. @@ -2696,7 +2697,7 @@ components: title: FineTuningModels description: | Available fine-tuning models. Mind that the list may not be exhaustive nor up-to-date. - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] training_file: description: | The ID of an uploaded file that contains training data. @@ -2719,7 +2720,7 @@ components: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 maxLength: 40 @@ -3438,7 +3439,8 @@ components: nullable: true tools: description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + types `code_interpreter`, `file_search`, or `function`. default: [ ] type: array maxItems: 128 @@ -3448,13 +3450,16 @@ components: $ref: "#/components/schemas/ToolResources" metadata: description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + information about the object in a structured format. Keys can be a maximum of 64 characters long and values + can be a maxium of 512 characters long. type: object additionalProperties: true nullable: true temperature: description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 @@ -3469,16 +3474,29 @@ components: example: 1 nullable: true description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + mass are considered. We generally recommend altering this or temperature but not both. response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + description: &assistant_response_format | + Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-4o-mini-1106`. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + system or user message. Without this, the model may generate an unending stream of whitespace until the + generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + that the message content may be partially cut off if `finish_reason="length"`, which indicates the + generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: AssistantResponseFormatMode @@ -3528,6 +3546,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -3568,8 +3587,7 @@ components: additionalProperties: true nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description type: number minimum: 0 maximum: 2 @@ -3583,17 +3601,9 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateAssistantResponseFormatMode @@ -3663,17 +3673,9 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: ModifyAssistantResponseFormatMode @@ -3764,7 +3766,7 @@ components: minimum: 1 maximum: 50 description: | - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search @@ -4000,12 +4002,7 @@ components: - $ref: "#/components/schemas/AssistantsNamedToolChoice" parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: RunObjectResponseFormatMode @@ -4092,6 +4089,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4143,10 +4141,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description max_prompt_tokens: type: integer nullable: true @@ -4181,12 +4176,7 @@ components: - $ref: "#/components/schemas/AssistantsNamedToolChoice" parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateRunRequestResponseFormatMode @@ -4336,6 +4326,7 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4414,12 +4405,7 @@ components: - $ref: "#/components/schemas/AssistantsNamedToolChoice" parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateThreadAndRunRequestResponseFormatMode @@ -5852,10 +5838,9 @@ components: description: One of `server_error` or `rate_limit_exceeded`. enum: [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", + "server_error", + "unsupported_file", + "invalid_file", ] message: type: string diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 1a91af5d..4d45fce2 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.1.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -143,7 +143,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -169,7 +169,7 @@ paths: client = OpenAI() response = client.chat.completions.create( - model="gpt-4-turbo", + model="gpt-4o", messages=[ { "role": "user", @@ -193,7 +193,7 @@ paths: async function main() { const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: [ { role: "user", @@ -305,7 +305,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -399,7 +399,7 @@ paths: ]; const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: messages, tools: tools, tool_choice: "auto", @@ -1973,7 +1973,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI @@ -1981,7 +1981,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; @@ -2001,8 +2001,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2018,7 +2018,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "hyperparameters": { "n_epochs": 2 } @@ -2029,7 +2029,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo", + model="gpt-4o-mini", hyperparameters={ "n_epochs":2 } @@ -2042,7 +2042,7 @@ paths: async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", - model: "gpt-3.5-turbo", + model: "gpt-4o-mini", hyperparameters: { n_epochs: 2 } }); @@ -2054,8 +2054,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2073,7 +2073,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI @@ -2082,7 +2082,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; @@ -2103,8 +2103,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2121,7 +2121,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "integrations": [ { "type": "wandb", @@ -2139,8 +2139,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2380,7 +2380,7 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", - "created_at": 1692407401, + "created_at": 1721764800, "level": "info", "message": "Fine tuning job successfully completed", "data": null, @@ -2389,9 +2389,9 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-tyiGuB72evQncpH87xe505Sv", - "created_at": 1692407400, + "created_at": 1721764800, "level": "info", - "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", "data": null, "type": "message" } @@ -2450,8 +2450,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1689376978, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2514,8 +2514,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", - "created_at": 1519129973, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", + "created_at": 1721764867, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", "metrics": { "full_valid_loss": 0.134, "full_valid_mean_token_accuracy": 0.874 @@ -2526,8 +2526,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", - "created_at": 1519129833, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "created_at": 1721764800, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", "metrics": { "full_valid_loss": 0.167, "full_valid_mean_token_accuracy": 0.781 @@ -2619,7 +2619,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -2672,7 +2672,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -2688,28 +2688,28 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \ -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | from openai import OpenAI client = OpenAI() - client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); console.log(model); } main(); response: | { - "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", "object": "model", "deleted": true } @@ -2822,7 +2822,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: &pagination_after_param_description | @@ -2888,7 +2888,7 @@ paths: "created_at": 1698982736, "name": "Coding Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2903,7 +2903,7 @@ paths: "created_at": 1698982718, "name": "My Assistant", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2918,7 +2918,7 @@ paths: "created_at": 1698982643, "name": null, "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [], "tool_resources": {}, @@ -2967,7 +2967,7 @@ paths: "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | @@ -2978,7 +2978,7 @@ paths: instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", tools=[{"type": "code_interpreter"}], - model="gpt-4-turbo", + model="gpt-4o", ) print(my_assistant) node.js: |- @@ -2992,7 +2992,7 @@ paths: "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name: "Math Tutor", tools: [{ type: "code_interpreter" }], - model: "gpt-4-turbo", + model: "gpt-4o", }); console.log(myAssistant); @@ -3006,7 +3006,7 @@ paths: "created_at": 1698984975, "name": "Math Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "tools": [ { @@ -3029,7 +3029,7 @@ paths: "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [{"type": "file_search"}], "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI @@ -3040,7 +3040,7 @@ paths: name="HR Helper", tools=[{"type": "file_search"}], tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, - model="gpt-4-turbo" + model="gpt-4o" ) print(my_assistant) node.js: |- @@ -3059,7 +3059,7 @@ paths: vector_store_ids: ["vs_123"] } }, - model: "gpt-4-turbo" + model: "gpt-4o" }); console.log(myAssistant); @@ -3073,7 +3073,7 @@ paths: "created_at": 1699009403, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -3150,7 +3150,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -3202,7 +3202,7 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [{"type": "file_search"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI @@ -3213,7 +3213,7 @@ paths: instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name="HR Helper", tools=[{"type": "file_search"}], - model="gpt-4-turbo" + model="gpt-4o" ) print(my_updated_assistant) @@ -3230,7 +3230,7 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name: "HR Helper", tools: [{ type: "file_search" }], - model: "gpt-4-turbo" + model: "gpt-4o" } ); @@ -3245,7 +3245,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [ { @@ -3671,7 +3671,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4203,7 +4203,7 @@ paths: "completed_at": null, "required_action": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant.", "tools": [], "tool_resources": {}, @@ -4282,13 +4282,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4320,7 +4320,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] @@ -4451,13 +4451,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} @@ -4483,7 +4483,7 @@ paths: data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4514,7 +4514,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4584,7 +4584,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4631,7 +4631,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4750,7 +4750,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4814,13 +4814,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4852,7 +4852,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4969,13 +4969,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -5007,7 +5007,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5088,7 +5088,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -5207,7 +5207,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -5351,7 +5351,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [ { @@ -5455,10 +5455,10 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} @@ -5496,7 +5496,7 @@ paths: data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5578,7 +5578,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You summarize books.", "tools": [ { @@ -5631,7 +5631,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5837,7 +5837,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6212,7 +6212,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6228,7 +6228,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6774,7 +6774,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6790,7 +6790,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6890,7 +6890,7 @@ paths: description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -7267,7 +7267,7 @@ components: properties: object: type: string - enum: [list] + enum: [ list ] data: type: array items: @@ -7298,7 +7298,7 @@ components: anyOf: - type: string - type: string - enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] x-oaiTypeLabel: string prompt: description: &completions_prompt_description | @@ -7510,7 +7510,7 @@ components: The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] + enum: [ "stop", "length", "content_filter" ] index: type: integer logprobs: @@ -7552,7 +7552,7 @@ components: object: type: string description: The object type, which is always "text_completion" - enum: [text_completion] + enum: [ text_completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -7597,7 +7597,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -7609,7 +7609,7 @@ components: detail: type: string description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -7650,7 +7650,7 @@ components: type: string role: type: string - enum: ["system"] + enum: [ "system" ] description: The role of the messages author, in this case `system`. name: type: string @@ -7699,7 +7699,7 @@ components: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the messages author, in this case `assistant`. name: type: string @@ -7732,7 +7732,7 @@ components: properties: weight: type: integer - enum: [0, 1] + enum: [ 0, 1 ] description: "Controls whether the assistant message is trained against (0 or 1)" - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" required: @@ -7744,7 +7744,7 @@ components: properties: role: type: string - enum: ["tool"] + enum: [ "tool" ] description: The role of the messages author, in this case `tool`. content: type: string @@ -7764,7 +7764,7 @@ components: properties: role: type: string - enum: ["function"] + enum: [ "function" ] description: The role of the messages author, in this case `function`. content: nullable: true @@ -7814,7 +7814,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: $ref: "#/components/schemas/FunctionObject" @@ -7851,7 +7851,7 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" x-oaiExpandable: true @@ -7861,7 +7861,7 @@ components: properties: type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7895,7 +7895,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7925,7 +7925,7 @@ components: description: The ID of the tool call. type: type: string - enum: ["function"] + enum: [ "function" ] description: The type of the tool. Currently, only `function` is supported. function: type: object @@ -7974,7 +7974,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: type: string - enum: ["assistant"] + enum: [ "assistant" ] description: The role of the author of this message. function_call: type: object @@ -8019,7 +8019,7 @@ components: $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" role: type: string - enum: ["system", "user", "assistant", "tool"] + enum: [ "system", "user", "assistant", "tool" ] description: The role of the author of this message. CreateChatCompletionRequest: @@ -8033,14 +8033,16 @@ components: $ref: "#/components/schemas/ChatCompletionRequestMessage" model: description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -8150,7 +8152,7 @@ components: When this parameter is set, the response body will include the `service_tier` utilized. type: string - enum: ["auto", "default"] + enum: [ "auto", "default" ] nullable: true default: null stop: @@ -8218,7 +8220,7 @@ components: description: > `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - enum: [none, auto] + enum: [ none, auto ] - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" x-oaiExpandable: true functions: @@ -8297,7 +8299,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: ["scale", "default"] + enum: [ "scale", "default" ] example: "scale" nullable: true system_fingerprint: @@ -8309,7 +8311,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8347,7 +8349,7 @@ components: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. enum: - ["stop", "length", "function_call", "content_filter"] + [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8368,7 +8370,7 @@ components: object: type: string description: The object type, which is always `chat.completion`. - enum: [chat.completion] + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: @@ -8427,7 +8429,7 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8479,7 +8481,7 @@ components: service_tier: description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - enum: ["scale", "default"] + enum: [ "scale", "default" ] example: "scale" nullable: true system_fingerprint: @@ -8490,7 +8492,7 @@ components: object: type: string description: The object type, which is always `chat.completion.chunk`. - enum: [chat.completion.chunk] + enum: [ chat.completion.chunk ] usage: type: object description: | @@ -8540,7 +8542,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2", "dall-e-3"] + enum: [ "dall-e-2", "dall-e-3" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-3" @@ -8556,27 +8558,27 @@ components: description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: type: string - enum: ["standard", "hd"] + enum: [ "standard", "hd" ] default: "standard" example: "standard" description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: &images_response_format type: string - enum: ["url", "b64_json"] + enum: [ "url", "b64_json" ] default: "url" example: "url" nullable: true description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string - enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] default: "1024x1024" example: "1024x1024" nullable: true description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: type: string - enum: ["vivid", "natural"] + enum: [ "vivid", "natural" ] default: "vivid" example: "vivid" nullable: true @@ -8637,7 +8639,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8653,7 +8655,7 @@ components: description: The number of images to generate. Must be between 1 and 10. size: &dalle2_images_size type: string - enum: ["256x256", "512x512", "1024x1024"] + enum: [ "256x256", "512x512", "1024x1024" ] default: "1024x1024" example: "1024x1024" nullable: true @@ -8675,7 +8677,7 @@ components: anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "dall-e-2" ] x-oaiTypeLabel: string default: "dall-e-2" example: "dall-e-2" @@ -8713,7 +8715,7 @@ components: anyOf: - type: string - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] + enum: [ "text-moderation-latest", "text-moderation-stable" ] x-oaiTypeLabel: string required: - input @@ -8856,7 +8858,7 @@ components: $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -8876,7 +8878,7 @@ components: Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: ["assistants", "batch", "fine-tune", "vision"] + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - file - purpose @@ -8888,7 +8890,7 @@ components: type: string object: type: string - enum: [file] + enum: [ file ] deleted: type: boolean required: @@ -8910,7 +8912,7 @@ components: See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - enum: ["assistants", "batch", "fine-tune", "vision"] + enum: [ "assistants", "batch", "fine-tune", "vision" ] bytes: description: | The number of bytes in the file you are uploading. @@ -8966,12 +8968,12 @@ components: model: description: | The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + example: "gpt-4o-mini" anyOf: - type: string - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] x-oaiTypeLabel: string training_file: description: | @@ -8996,7 +8998,7 @@ components: are updated less frequently, but with lower variance. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 256 @@ -9007,7 +9009,7 @@ components: overfitting. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: number minimum: 0 exclusiveMinimum: true @@ -9018,7 +9020,7 @@ components: through the training dataset. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 @@ -9027,7 +9029,7 @@ components: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 maxLength: 40 @@ -9063,7 +9065,7 @@ components: The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. oneOf: - type: string - enum: [wandb] + enum: [ wandb ] wandb: type: object description: | @@ -9120,7 +9122,7 @@ components: $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - enum: [list] + enum: [ list ] required: - object - data @@ -9134,7 +9136,7 @@ components: $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [list] + enum: [ list ] first_id: type: string nullable: true @@ -9209,7 +9211,7 @@ components: example: "float" default: "float" type: string - enum: ["float", "base64"] + enum: [ "float", "base64" ] dimensions: description: | The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -9234,7 +9236,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] usage: type: object description: The usage information for the request. @@ -9271,7 +9273,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string language: description: | @@ -9306,7 +9308,7 @@ components: enum: - word - segment - default: [segment] + default: [ segment ] required: - file - model @@ -9393,7 +9395,7 @@ components: type: number format: float description: End time of the word in seconds. - required: [word, start, end] + required: [ word, start, end ] CreateTranscriptionResponseVerboseJson: type: object @@ -9418,7 +9420,7 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] x-oaiMeta: name: The transcription object (Verbose JSON) group: audio @@ -9441,7 +9443,7 @@ components: anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string prompt: description: | @@ -9487,7 +9489,7 @@ components: description: Segments of the translated text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + required: [ language, duration, text ] CreateSpeechRequest: type: object @@ -9499,7 +9501,7 @@ components: anyOf: - type: string - type: string - enum: ["tts-1", "tts-1-hd"] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string input: type: string @@ -9508,12 +9510,12 @@ components: voice: description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] response_format: description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -9538,7 +9540,7 @@ components: object: type: string description: The object type, which is always "model". - enum: [model] + enum: [ model ] owned_by: type: string description: The organization that owns the model. @@ -9570,7 +9572,7 @@ components: object: type: string description: The object type, which is always `file`. - enum: ["file"] + enum: [ "file" ] purpose: type: string description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. @@ -9588,7 +9590,7 @@ components: type: string deprecated: true description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: ["uploaded", "processed", "error"] + enum: [ "uploaded", "processed", "error" ] status_details: type: string deprecated: true @@ -9636,14 +9638,14 @@ components: status: type: string description: The status of the Upload. - enum: ["pending", "completed", "cancelled", "expired"] + enum: [ "pending", "completed", "cancelled", "expired" ] expires_at: type: integer description: The Unix timestamp (in seconds) for when the Upload was created. object: type: string description: The object type, which is always "upload". - enum: [upload] + enum: [ upload ] file: $ref: "#/components/schemas/OpenAIFile" nullable: true @@ -9696,7 +9698,7 @@ components: object: type: string description: The object type, which is always `upload.part`. - enum: ['upload.part'] + enum: [ 'upload.part' ] required: - created_at - id @@ -9728,7 +9730,7 @@ components: object: type: string description: The object type, which is always "embedding". - enum: [embedding] + enum: [ embedding ] required: - index - object @@ -9793,15 +9795,15 @@ components: n_epochs: oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 default: auto description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. required: - n_epochs model: @@ -9810,7 +9812,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job". - enum: [fine_tuning.job] + enum: [ fine_tuning.job ] organization_id: type: string description: The organization that owns the fine-tuning job. @@ -9889,7 +9891,7 @@ components: type: type: string description: "The type of the integration being enabled for the fine-tuning job" - enum: ["wandb"] + enum: [ "wandb" ] wandb: type: object description: | @@ -9934,12 +9936,12 @@ components: type: integer level: type: string - enum: ["info", "warn", "error"] + enum: [ "info", "warn", "error" ] message: type: string object: type: string - enum: [fine_tuning.job.event] + enum: [ fine_tuning.job.event ] required: - id - object @@ -9999,7 +10001,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -10015,7 +10017,7 @@ components: "object": "fine_tuning.job.checkpoint", "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", "metrics": { "step": 88, @@ -10181,6 +10183,8 @@ components: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @@ -10215,7 +10219,7 @@ components: object: description: The object type, which is always `assistant`. type: string - enum: [assistant] + enum: [ assistant ] created_at: description: The Unix timestamp (in seconds) for when the assistant was created. type: integer @@ -10243,7 +10247,7 @@ components: tools: description: &assistant_tools_param_description | A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [] + default: [ ] type: array maxItems: 128 items: @@ -10264,7 +10268,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10329,14 +10333,16 @@ components: properties: model: description: *model_description - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -10376,7 +10382,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -10397,7 +10403,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10439,7 +10445,7 @@ components: type: type: string description: Always `auto`. - enum: ["auto"] + enum: [ "auto" ] required: - type - type: object @@ -10449,7 +10455,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: type: object additionalProperties: false @@ -10478,8 +10484,8 @@ components: Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -10487,8 +10493,7 @@ components: x-oaiTypeLabel: map nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description type: number minimum: 0 maximum: 2 @@ -10502,10 +10507,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10537,7 +10539,7 @@ components: maxLength: 256000 tools: description: *assistant_tools_param_description - default: [] + default: [ ] type: array maxItems: 128 items: @@ -10558,7 +10560,7 @@ components: type: array description: | Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -10593,10 +10595,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true @@ -10610,7 +10609,7 @@ components: type: boolean object: type: string - enum: [assistant.deleted] + enum: [ assistant.deleted ] required: - id - object @@ -10653,7 +10652,7 @@ components: type: type: string description: "The type of tool being defined: `code_interpreter`" - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] required: - type @@ -10664,7 +10663,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: Overrides for the file search tool. @@ -10674,7 +10673,7 @@ components: minimum: 1 maximum: 50 description: | - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. required: @@ -10687,7 +10686,7 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - enum: ["file_search"] + enum: [ "file_search" ] required: - type @@ -10698,7 +10697,7 @@ components: type: type: string description: "The type of tool being defined: `function`" - enum: ["function"] + enum: [ "function" ] function: $ref: "#/components/schemas/FunctionObject" required: @@ -10713,7 +10712,7 @@ components: type: type: string description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -10736,7 +10735,7 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" x-oaiExpandable: true @@ -10746,7 +10745,7 @@ components: properties: type: type: string - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: type: object @@ -10770,7 +10769,7 @@ components: object: description: The object type, which is always `thread.run`. type: string - enum: ["thread.run"] + enum: [ "thread.run" ] created_at: description: The Unix timestamp (in seconds) for when the run was created. type: integer @@ -10803,7 +10802,7 @@ components: type: description: For now, this is always `submit_tool_outputs`. type: string - enum: ["submit_tool_outputs"] + enum: [ "submit_tool_outputs" ] submit_tool_outputs: type: object description: Details on the tool outputs needed for this run to continue. @@ -10827,7 +10826,7 @@ components: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. enum: - ["server_error", "rate_limit_exceeded", "invalid_prompt"] + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10862,7 +10861,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string @@ -10871,7 +10870,7 @@ components: type: string tools: description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [] + default: [ ] type: array maxItems: 20 items: @@ -10961,7 +10960,7 @@ components: "failed_at": null, "completed_at": 1699073498, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], "metadata": {}, @@ -10992,14 +10991,16 @@ components: type: string model: description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -11068,10 +11069,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description stream: type: boolean nullable: true @@ -11171,7 +11169,7 @@ components: type: type: string description: The type of tool call the output is required for. For now, this is always `function`. - enum: ["function"] + enum: [ "function" ] function: type: object description: The function definition. @@ -11202,14 +11200,16 @@ components: description: If no thread is provided, an empty thread will be created. model: description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" + example: "gpt-4o" anyOf: - type: string - type: string enum: [ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", @@ -11259,7 +11259,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11338,7 +11338,7 @@ components: object: description: The object type, which is always `thread`. type: string - enum: ["thread"] + enum: [ "thread" ] created_at: description: The Unix timestamp (in seconds) for when the thread was created. type: integer @@ -11354,7 +11354,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11412,7 +11412,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11454,7 +11454,7 @@ components: type: type: string description: Always `auto`. - enum: ["auto"] + enum: [ "auto" ] required: - type - type: object @@ -11464,7 +11464,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: type: object additionalProperties: false @@ -11494,8 +11494,8 @@ components: x-oaiTypeLabel: map x-oaiExpandable: true oneOf: - - required: [vector_store_ids] - - required: [vector_stores] + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true metadata: description: *metadata_description @@ -11519,7 +11519,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -11549,7 +11549,7 @@ components: type: boolean object: type: string - enum: [thread.deleted] + enum: [ thread.deleted ] required: - id - object @@ -11591,7 +11591,7 @@ components: object: description: The object type, which is always `thread.message`. type: string - enum: ["thread.message"] + enum: [ "thread.message" ] created_at: description: The Unix timestamp (in seconds) for when the message was created. type: integer @@ -11601,7 +11601,7 @@ components: status: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: description: On an incomplete message, details about why the message is incomplete. type: object @@ -11631,7 +11631,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11724,7 +11724,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: description: The delta containing the fields that have changed on the Message. type: object @@ -11732,7 +11732,7 @@ components: role: description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] content: description: The content of the message in array of text and/or images. type: array @@ -11773,7 +11773,7 @@ components: properties: role: type: string - enum: ["user", "assistant"] + enum: [ "user", "assistant" ] description: | The role of the entity that is creating the message. Allowed values include: - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. @@ -11840,7 +11840,7 @@ components: type: boolean object: type: string - enum: [thread.message.deleted] + enum: [ thread.message.deleted ] required: - id - object @@ -11879,7 +11879,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11889,7 +11889,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - file_id @@ -11908,7 +11908,7 @@ components: type: description: Always `image_file`. type: string - enum: ["image_file"] + enum: [ "image_file" ] image_file: type: object properties: @@ -11918,7 +11918,7 @@ components: detail: type: string description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11931,7 +11931,7 @@ components: properties: type: type: string - enum: ["image_url"] + enum: [ "image_url" ] description: The type of the content part. image_url: type: object @@ -11943,7 +11943,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - url @@ -11962,7 +11962,7 @@ components: type: description: Always `image_url`. type: string - enum: ["image_url"] + enum: [ "image_url" ] image_url: type: object properties: @@ -11972,7 +11972,7 @@ components: detail: type: string description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] + enum: [ "auto", "low", "high" ] default: "auto" required: - index @@ -11986,7 +11986,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -12015,7 +12015,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: string description: Text content to be sent to the model @@ -12031,7 +12031,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12064,7 +12064,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12100,7 +12100,7 @@ components: type: description: Always `text`. type: string - enum: ["text"] + enum: [ "text" ] text: type: object properties: @@ -12129,7 +12129,7 @@ components: type: description: Always `file_citation`. type: string - enum: ["file_citation"] + enum: [ "file_citation" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12163,7 +12163,7 @@ components: type: description: Always `file_path`. type: string - enum: ["file_path"] + enum: [ "file_path" ] text: description: The text in the message content that needs to be replaced. type: string @@ -12195,7 +12195,7 @@ components: object: description: The object type, which is always `thread.run.step`. type: string - enum: ["thread.run.step"] + enum: [ "thread.run.step" ] created_at: description: The Unix timestamp (in seconds) for when the run step was created. type: integer @@ -12211,11 +12211,11 @@ components: type: description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string - enum: ["message_creation", "tool_calls"] + enum: [ "message_creation", "tool_calls" ] status: description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] step_details: type: object description: The details of the run step. @@ -12231,7 +12231,7 @@ components: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] + enum: [ "server_error", "rate_limit_exceeded" ] message: type: string description: A human-readable description of the error. @@ -12295,7 +12295,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: description: The delta containing the fields that have changed on the run step. type: object @@ -12366,7 +12366,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -12387,7 +12387,7 @@ components: type: description: Always `message_creation`. type: string - enum: ["message_creation"] + enum: [ "message_creation" ] message_creation: type: object properties: @@ -12405,7 +12405,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -12428,7 +12428,7 @@ components: type: description: Always `tool_calls`. type: string - enum: ["tool_calls"] + enum: [ "tool_calls" ] tool_calls: type: array description: | @@ -12453,7 +12453,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12492,7 +12492,7 @@ components: type: type: string description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] + enum: [ "code_interpreter" ] code_interpreter: type: object description: The Code Interpreter tool call definition. @@ -12521,7 +12521,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12540,7 +12540,7 @@ components: type: description: Always `logs`. type: string - enum: ["logs"] + enum: [ "logs" ] logs: type: string description: The text output from the Code Interpreter tool call. @@ -12555,7 +12555,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -12578,7 +12578,7 @@ components: type: description: Always `image`. type: string - enum: ["image"] + enum: [ "image" ] image: type: object properties: @@ -12599,7 +12599,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -12622,7 +12622,7 @@ components: type: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] + enum: [ "file_search" ] file_search: type: object description: For now, this is always going to be an empty object. @@ -12642,7 +12642,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12679,7 +12679,7 @@ components: type: type: string description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] + enum: [ "function" ] function: type: object description: The definition of the function that was called. @@ -12706,7 +12706,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -12727,7 +12727,7 @@ components: object: description: The object type, which is always `vector_store`. type: string - enum: ["vector_store"] + enum: [ "vector_store" ] created_at: description: The Unix timestamp (in seconds) for when the vector store was created. type: integer @@ -12764,7 +12764,7 @@ components: status: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -12892,7 +12892,7 @@ components: type: boolean object: type: string - enum: [vector_store.deleted] + enum: [ vector_store.deleted ] required: - id - object @@ -12909,7 +12909,7 @@ components: object: description: The object type, which is always `vector_store.file`. type: string - enum: ["vector_store.file"] + enum: [ "vector_store.file" ] usage_bytes: description: The total vector store usage in bytes. Note that this may be different from the original file size. type: integer @@ -12922,7 +12922,7 @@ components: status: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: type: object description: The last error associated with this vector store file. Will be `null` if there are no errors. @@ -12933,10 +12933,9 @@ components: description: One of `server_error` or `rate_limit_exceeded`. enum: [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", + "server_error", + "unsupported_file", + "invalid_file", ] message: type: string @@ -12989,7 +12988,7 @@ components: type: type: string description: Always `other`. - enum: ["other"] + enum: [ "other" ] required: - type @@ -13001,7 +13000,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -13036,7 +13035,7 @@ components: type: type: string description: Always `auto`. - enum: ["auto"] + enum: [ "auto" ] required: - type @@ -13048,7 +13047,7 @@ components: type: type: string description: Always `static`. - enum: ["static"] + enum: [ "static" ] static: $ref: "#/components/schemas/StaticChunkingStrategy" required: @@ -13109,7 +13108,7 @@ components: type: boolean object: type: string - enum: [vector_store.file.deleted] + enum: [ vector_store.file.deleted ] required: - id - object @@ -13126,7 +13125,7 @@ components: object: description: The object type, which is always `vector_store.file_batch`. type: string - enum: ["vector_store.files_batch"] + enum: [ "vector_store.files_batch" ] created_at: description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer @@ -13136,7 +13135,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object properties: @@ -13241,7 +13240,7 @@ components: properties: event: type: string - enum: ["thread.created"] + enum: [ "thread.created" ] data: $ref: "#/components/schemas/ThreadObject" required: @@ -13257,7 +13256,7 @@ components: properties: event: type: string - enum: ["thread.run.created"] + enum: [ "thread.run.created" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13270,7 +13269,7 @@ components: properties: event: type: string - enum: ["thread.run.queued"] + enum: [ "thread.run.queued" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13283,7 +13282,7 @@ components: properties: event: type: string - enum: ["thread.run.in_progress"] + enum: [ "thread.run.in_progress" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13296,7 +13295,7 @@ components: properties: event: type: string - enum: ["thread.run.requires_action"] + enum: [ "thread.run.requires_action" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13309,7 +13308,7 @@ components: properties: event: type: string - enum: ["thread.run.completed"] + enum: [ "thread.run.completed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13335,7 +13334,7 @@ components: properties: event: type: string - enum: ["thread.run.failed"] + enum: [ "thread.run.failed" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13348,7 +13347,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelling"] + enum: [ "thread.run.cancelling" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13361,7 +13360,7 @@ components: properties: event: type: string - enum: ["thread.run.cancelled"] + enum: [ "thread.run.cancelled" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13374,7 +13373,7 @@ components: properties: event: type: string - enum: ["thread.run.expired"] + enum: [ "thread.run.expired" ] data: $ref: "#/components/schemas/RunObject" required: @@ -13390,7 +13389,7 @@ components: properties: event: type: string - enum: ["thread.run.step.created"] + enum: [ "thread.run.step.created" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13403,7 +13402,7 @@ components: properties: event: type: string - enum: ["thread.run.step.in_progress"] + enum: [ "thread.run.step.in_progress" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13416,7 +13415,7 @@ components: properties: event: type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] data: $ref: "#/components/schemas/RunStepDeltaObject" required: @@ -13429,7 +13428,7 @@ components: properties: event: type: string - enum: ["thread.run.step.completed"] + enum: [ "thread.run.step.completed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13442,7 +13441,7 @@ components: properties: event: type: string - enum: ["thread.run.step.failed"] + enum: [ "thread.run.step.failed" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13455,7 +13454,7 @@ components: properties: event: type: string - enum: ["thread.run.step.cancelled"] + enum: [ "thread.run.step.cancelled" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13468,7 +13467,7 @@ components: properties: event: type: string - enum: ["thread.run.step.expired"] + enum: [ "thread.run.step.expired" ] data: $ref: "#/components/schemas/RunStepObject" required: @@ -13484,7 +13483,7 @@ components: properties: event: type: string - enum: ["thread.message.created"] + enum: [ "thread.message.created" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13497,7 +13496,7 @@ components: properties: event: type: string - enum: ["thread.message.in_progress"] + enum: [ "thread.message.in_progress" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13510,7 +13509,7 @@ components: properties: event: type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] data: $ref: "#/components/schemas/MessageDeltaObject" required: @@ -13523,7 +13522,7 @@ components: properties: event: type: string - enum: ["thread.message.completed"] + enum: [ "thread.message.completed" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13536,7 +13535,7 @@ components: properties: event: type: string - enum: ["thread.message.incomplete"] + enum: [ "thread.message.incomplete" ] data: $ref: "#/components/schemas/MessageObject" required: @@ -13551,7 +13550,7 @@ components: properties: event: type: string - enum: ["error"] + enum: [ "error" ] data: $ref: "#/components/schemas/Error" required: @@ -13566,10 +13565,10 @@ components: properties: event: type: string - enum: ["done"] + enum: [ "done" ] data: type: string - enum: ["[DONE]"] + enum: [ "[DONE]" ] required: - event - data @@ -13584,7 +13583,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: type: string @@ -13709,7 +13708,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -13717,7 +13716,7 @@ components: x-oaiMeta: name: The request input object example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} BatchRequestOutput: type: object @@ -13756,7 +13755,7 @@ components: x-oaiMeta: name: The request output object example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-3.5-turbo", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} ListBatchesResponse: type: object @@ -13775,14 +13774,14 @@ components: type: boolean object: type: string - enum: [list] + enum: [ list ] required: - object - data - has_more security: - - ApiKeyAuth: [] + - ApiKeyAuth: [ ] x-oaiMeta: navigationGroups: @@ -13790,6 +13789,8 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants + - id: administration + title: Administration - id: legacy title: Legacy groups: @@ -14038,6 +14039,8 @@ x-oaiMeta: - type: object key: CreateModerationResponse path: object + + - id: assistants title: Assistants beta: true @@ -14265,6 +14268,175 @@ x-oaiMeta: - type: object key: AssistantStreamEvent path: events + + - id: administration + title: Overview + description: | + Programmatically manage your organization. + + The Audit Logs endpoint provides a log of all actions taken in the + organization for security and monitoring purposes. + + To access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints. + + For best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization) + navigationGroup: administration + + - id: invite + title: Invites + description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-invites + path: list + - type: endpoint + key: inviteUser + path: create + - type: endpoint + key: retrieve-invite + path: retrieve + - type: endpoint + key: delete-invite + path: delete + - type: object + key: Invite + path: object + + - id: users + title: Users + description: | + Manage users and their role in an organization. Users will be automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-users + path: list + - type: endpoint + key: modify-user + path: modify + - type: endpoint + key: retrieve-user + path: retrieve + - type: endpoint + key: delete-user + path: delete + - type: object + key: User + path: object + + - id: projects + title: Projects + description: | + Manage the projects within an orgnanization includes creation, updating, and archiving or projects. + The Default project cannot be modified or archived. + navigationGroup: administration + sections: + - type: endpoint + key: list-projects + path: list + - type: endpoint + key: create-project + path: create + - type: endpoint + key: retrieve-project + path: retrieve + - type: endpoint + key: modify-project + path: modify + - type: endpoint + key: archive-project + path: archive + - type: object + key: Project + path: object + + - id: project-users + title: Project Users + description: | + Manage users within a project, including adding, updating roles, and removing users. + Users cannot be removed from the Default project, unless they are being removed from the organization. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-users + path: list + - type: endpoint + key: create-project-user + path: creeate + - type: endpoint + key: retrieve-project-user + path: retrieve + - type: endpoint + key: modify-project-user + path: modify + - type: endpoint + key: delete-project-user + path: delete + - type: object + key: ProjectUser + path: object + + - id: project-service-accounts + title: Project Service Accounts + description: | + Manage service accounts within a project. A service account is a bot user that is not associated with a user. + If a user leaves an organization, their keys and membership in projects will no longer work. Service accounts + do not have this limitation. However, service accounts can also be deleted from a project. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-service-accounts + path: list + - type: endpoint + key: create-project-service-account + path: create + - type: endpoint + key: retrieve-project-service-account + path: retrieve + - type: endpoint + key: delete-project-service-account + path: delete + - type: object + key: ProjectServiceAccount + path: object + + - id: project-api-keys + title: Project API Keys + description: | + Manage API keys for a given project. Supports listing and deleting keys for users. + This API does not allow issuing keys for users, as users need to authorize themselves to generate keys. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-api-keys + path: list + - type: endpoint + key: retrieve-project-api-key + path: retrieve + - type: endpoint + key: delete-project-api-key + path: delete + - type: object + key: ProjectApiKey + path: object + + - id: audit-logs + title: Audit Logs + description: | + Logs of user actions and configuration changes within this organization. + + To log events, you must activate logging in the [Organization Settings](/settings/organization/general). + Once activated, for security reasons, logging cannot be deactivated. + navigationGroup: administration + sections: + - type: endpoint + key: list-audit-logs + path: list + - type: object + key: AuditLog + path: object + - id: completions title: Completions legacy: true From f73aef7ebd7a8de32f320c3b0c898e12fbd2a665 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 16 Aug 2024 15:51:17 +0200 Subject: [PATCH 092/251] build: Upgrade pubspec.lock files (#523) https://github.com/dart-lang/build/issues/3733#issuecomment-2272082820 --- .gitignore | 1 + examples/browser_summarizer/pubspec.lock | 52 ++++++++--------- .../browser_summarizer/pubspec_overrides.yaml | 2 +- examples/docs_examples/pubspec.lock | 40 ++++++------- examples/docs_examples/pubspec_overrides.yaml | 2 +- examples/hello_world_backend/pubspec.lock | 16 +++--- .../pubspec_overrides.yaml | 2 +- examples/hello_world_cli/pubspec.lock | 20 +++---- .../hello_world_cli/pubspec_overrides.yaml | 2 +- examples/hello_world_flutter/pubspec.lock | 28 +++++----- .../pubspec_overrides.yaml | 2 +- .../pubspec.lock | 24 ++++---- examples/wikivoyage_eu/pubspec.lock | 24 ++++---- examples/wikivoyage_eu/pubspec_overrides.yaml | 2 +- packages/anthropic_sdk_dart/pubspec.lock | 56 +++++++++---------- .../langchain_chroma/pubspec_overrides.yaml | 2 +- .../langchain_firebase/example/pubspec.lock | 44 +++++++-------- .../example/pubspec_overrides.yaml | 2 +- packages/langchain_firebase/pubspec.lock | 44 +++++++-------- .../langchain_google/pubspec_overrides.yaml | 2 +- .../pubspec_overrides.yaml | 2 +- .../langchain_ollama/pubspec_overrides.yaml | 2 +- .../langchain_openai/pubspec_overrides.yaml | 2 +- .../langchain_pinecone/pubspec_overrides.yaml | 2 +- .../langchain_supabase/pubspec_overrides.yaml | 3 +- 25 files changed, 185 insertions(+), 193 deletions(-) diff --git a/.gitignore b/.gitignore index dd509d78..cf493593 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ .dart_tool/ /pubspec.lock .vscode/ +.aider* diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 21488e9b..fe72f39c 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" + sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" url: "https://pub.dev" source: hosted - version: "8.1.2" + version: "8.1.4" characters: dependency: transitive description: @@ -109,10 +109,10 @@ packages: dependency: transitive description: name: ffi - sha256: "7bf0adc28a23d395f19f3f1eb21dd7cfd1dd9f8e1c50051c069122e6853bc878" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.1.3" file: dependency: transitive description: @@ -267,10 +267,10 @@ packages: dependency: transitive description: name: markdown - sha256: acf35edccc0463a9d7384e437c015a3535772e09714cf60e07eeef3a15870dcd + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 url: "https://pub.dev" source: hosted - version: "7.1.1" + version: "7.2.2" material_color_utilities: dependency: transitive description: @@ -354,10 +354,10 @@ packages: dependency: transitive description: name: path_provider_windows - sha256: "8bc9f22eee8690981c22aa7fc602f5c85b497a6fb2ceb35ee5a5e5ed85ad8170" + sha256: bd6f00dbd873bfb70d0761682da2b3a2c2fccc2b9e84c495821639601d81afe7 url: "https://pub.dev" source: hosted - version: "2.2.1" + version: "2.3.0" petitparser: dependency: transitive description: @@ -370,10 +370,10 @@ packages: dependency: transitive description: name: platform - sha256: "12220bb4b65720483f8fa9450b4332347737cf8213dd2840d8b2c823e47243ec" + sha256: "9b71283fc13df574056616011fb138fd3b793ea47cc509c189a6c3fa5f8a1a65" url: "https://pub.dev" source: hosted - version: "3.1.4" + version: "3.1.5" plugin_platform_interface: dependency: transitive description: @@ -386,10 +386,10 @@ packages: dependency: transitive description: name: provider - sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c url: "https://pub.dev" source: hosted - version: "6.1.1" + version: "6.1.2" rfc_6901: dependency: transitive description: @@ -402,10 +402,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" shared_preferences: dependency: "direct main" description: @@ -426,10 +426,10 @@ packages: dependency: transitive description: name: shared_preferences_foundation - sha256: "776786cff96324851b656777648f36ac772d88bc4c669acff97b7fce5de3c849" + sha256: c4b35f6cb8f63c147312c054ce7c2254c8066745125264f0c88739c417fc9d9f url: "https://pub.dev" source: hosted - version: "2.5.1" + version: "2.5.2" shared_preferences_linux: dependency: transitive description: @@ -487,10 +487,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" tavily_dart: dependency: "direct overridden" description: @@ -534,18 +534,10 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" - url: "https://pub.dev" - source: hosted - version: "0.5.1" - win32: - dependency: transitive - description: - name: win32 - sha256: "464f5674532865248444b4c3daca12bd9bf2d7c47f759ce2617986e7229494a8" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "5.2.0" + version: "1.0.0" xdg_directories: dependency: transitive description: diff --git a/examples/browser_summarizer/pubspec_overrides.yaml b/examples/browser_summarizer/pubspec_overrides.yaml index 49be75a7..808fbc3a 100644 --- a/examples/browser_summarizer/pubspec_overrides.yaml +++ b/examples/browser_summarizer/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 8aaa5dcd..caa950ab 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" anthropic_sdk_dart: dependency: "direct overridden" description: @@ -20,10 +20,10 @@ packages: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -59,10 +59,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: @@ -115,10 +115,10 @@ packages: dependency: transitive description: name: ffi - sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.3" fixnum: dependency: transitive description: @@ -163,10 +163,10 @@ packages: dependency: transitive description: name: google_identity_services_web - sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.1+1" + version: "0.3.1+4" googleapis: dependency: transitive description: @@ -203,10 +203,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" iregexp: dependency: transitive description: @@ -322,10 +322,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" mistralai_dart: dependency: "direct overridden" description: @@ -391,10 +391,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -415,10 +415,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" tavily_dart: dependency: "direct overridden" description: @@ -469,9 +469,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index 1c756a5e..4060d466 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community,tavily_dart,anthropic_sdk_dart,langchain_anthropic +# melos_managed_dependency_overrides: anthropic_sdk_dart,chromadb,langchain,langchain_anthropic,langchain_chroma,langchain_community,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,tavily_dart,vertex_ai dependency_overrides: anthropic_sdk_dart: path: ../../packages/anthropic_sdk_dart diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 97cf3b4b..b2934b90 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" openai_dart: dependency: "direct overridden" description: @@ -161,10 +161,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" shelf: dependency: "direct main" description: @@ -217,10 +217,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -249,9 +249,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_backend/pubspec_overrides.yaml b/examples/hello_world_backend/pubspec_overrides.yaml index 93b5421a..a52f79af 100644 --- a/examples/hello_world_backend/pubspec_overrides.yaml +++ b/examples/hello_world_backend/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 06a4acdb..40613637 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" json_annotation: dependency: transitive description: @@ -130,10 +130,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" openai_dart: dependency: "direct overridden" description: @@ -153,10 +153,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -177,10 +177,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -209,9 +209,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_cli/pubspec_overrides.yaml b/examples/hello_world_cli/pubspec_overrides.yaml index 93b5421a..a52f79af 100644 --- a/examples/hello_world_cli/pubspec_overrides.yaml +++ b/examples/hello_world_cli/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 94a94e96..02e61985 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" args: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" + sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" url: "https://pub.dev" source: hosted - version: "8.1.2" + version: "8.1.4" characters: dependency: transitive description: @@ -146,10 +146,10 @@ packages: dependency: transitive description: name: google_identity_services_web - sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.1+1" + version: "0.3.1+4" googleapis: dependency: transitive description: @@ -305,10 +305,10 @@ packages: dependency: transitive description: name: provider - sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c url: "https://pub.dev" source: hosted - version: "6.1.1" + version: "6.1.2" retry: dependency: transitive description: @@ -321,10 +321,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter @@ -350,10 +350,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -397,10 +397,10 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec_overrides.yaml b/examples/hello_world_flutter/pubspec_overrides.yaml index d5192892..5c8d37f9 100644 --- a/examples/hello_world_flutter/pubspec_overrides.yaml +++ b/examples/hello_world_flutter/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,mistralai_dart,ollama_dart,vertex_ai +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index 653b4e81..c4bd2136 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -5,18 +5,18 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" args: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" crypto: dependency: transitive description: @@ -85,18 +85,18 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" meta: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" path: dependency: transitive description: @@ -125,10 +125,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index da1458be..2e50c6c0 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: @@ -85,10 +85,10 @@ packages: dependency: transitive description: name: ffi - sha256: "493f37e7df1804778ff3a53bd691d8692ddf69702cf4c1c1096a2e41b4779e21" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.3" fixnum: dependency: transitive description: @@ -133,10 +133,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" iregexp: dependency: transitive description: @@ -264,10 +264,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -288,10 +288,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" tavily_dart: dependency: "direct overridden" description: @@ -335,9 +335,9 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: dart: ">=3.4.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml index 6f7e46d1..5814891d 100644 --- a/examples/wikivoyage_eu/pubspec_overrides.yaml +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_community,langchain_ollama,ollama_dart,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_ollama,ollama_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock index ddbf9394..6e950ea1 100644 --- a/packages/anthropic_sdk_dart/pubspec.lock +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _fe_analyzer_shared - sha256: f256b0c0ba6c7577c15e2e4e114755640a875e885099367bf6e012b19314c834 + sha256: "45cfa8471b89fb6643fe9bf51bd7931a76b8f5ec2d65de4fb176dba8d4f22c77" url: "https://pub.dev" source: hosted - version: "72.0.0" + version: "73.0.0" _macros: dependency: transitive description: dart @@ -18,10 +18,10 @@ packages: dependency: transitive description: name: analyzer - sha256: b652861553cd3990d8ed361f7979dc6d7053a9ac8843fa73820ab68ce5410139 + sha256: "4959fec185fe70cce007c57e9ab6983101dbe593d2bf8bbfb4453aaec0cf470a" url: "https://pub.dev" source: hosted - version: "6.7.0" + version: "6.8.0" args: dependency: transitive description: @@ -90,10 +90,10 @@ packages: dependency: transitive description: name: build_runner_core - sha256: "4ae8ffe5ac758da294ecf1802f2aff01558d8b1b00616aa7538ea9a8a5d50799" + sha256: f8126682b87a7282a339b871298cc12009cb67109cfa1614d6436fb0289193e0 url: "https://pub.dev" source: hosted - version: "7.3.0" + version: "7.3.2" built_collection: dependency: transitive description: @@ -138,10 +138,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" convert: dependency: transitive description: @@ -154,10 +154,10 @@ packages: dependency: transitive description: name: coverage - sha256: "3945034e86ea203af7a056d98e98e42a5518fff200d6e8e6647e1886b07e936e" + sha256: "576aaab8b1abdd452e0f656c3e73da9ead9d7880e15bdc494189d9c1a1baf0db" url: "https://pub.dev" source: hosted - version: "1.8.0" + version: "1.9.0" crypto: dependency: transitive description: @@ -242,10 +242,10 @@ packages: dependency: transitive description: name: graphs - sha256: aedc5a15e78fc65a6e23bcd927f24c64dd995062bcd1ca6eda65a3cff92a4d19 + sha256: "741bbf84165310a68ff28fe9e727332eef1407342fca52759cb21ad8177bb8d0" url: "https://pub.dev" source: hosted - version: "2.3.1" + version: "2.3.2" http: dependency: "direct main" description: @@ -266,10 +266,10 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" intl: dependency: transitive description: @@ -403,10 +403,10 @@ packages: dependency: transitive description: name: pubspec_parse - sha256: c63b2876e58e194e4b0828fcb080ad0e06d051cb607a6be51a9e084f47cb9367 + sha256: c799b721d79eb6ee6fa56f00c04b472dcd44a30d258fac2174a6ec57302678f8 url: "https://pub.dev" source: hosted - version: "1.2.3" + version: "1.3.0" recase: dependency: transitive description: @@ -419,10 +419,10 @@ packages: dependency: transitive description: name: shelf - sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 url: "https://pub.dev" source: hosted - version: "1.4.1" + version: "1.4.2" shelf_packages_handler: dependency: transitive description: @@ -515,10 +515,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -571,10 +571,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "7475cb4dd713d57b6f7464c0e13f06da0d535d8b2067e188962a59bac2cf280b" + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc url: "https://pub.dev" source: hosted - version: "14.2.2" + version: "14.2.4" watcher: dependency: transitive description: @@ -587,26 +587,26 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" web_socket: dependency: transitive description: name: web_socket - sha256: "217f49b5213796cb508d6a942a5dc604ce1cb6a0a6b3d8cb3f0c314f0ecea712" + sha256: "3c12d96c0c9a4eec095246debcea7b86c0324f22df69893d538fcc6f1b8cce83" url: "https://pub.dev" source: hosted - version: "0.1.4" + version: "0.1.6" web_socket_channel: dependency: transitive description: name: web_socket_channel - sha256: a2d56211ee4d35d9b344d9d4ce60f362e4f5d1aafb988302906bd732bc731276 + sha256: "9f187088ed104edd8662ca07af4b124465893caf063ba29758f97af57e61da8f" url: "https://pub.dev" source: hosted - version: "3.0.0" + version: "3.0.1" webkit_inspection_protocol: dependency: transitive description: diff --git a/packages/langchain_chroma/pubspec_overrides.yaml b/packages/langchain_chroma/pubspec_overrides.yaml index 3470527c..d53c4efe 100644 --- a/packages/langchain_chroma/pubspec_overrides.yaml +++ b/packages/langchain_chroma/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain,tavily_dart +# melos_managed_dependency_overrides: chromadb,langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: chromadb: path: ../chromadb diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 1896af9c..b1d7e459 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" + sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa url: "https://pub.dev" source: hosted - version: "1.3.37" + version: "1.3.40" args: dependency: transitive description: @@ -117,50 +117,50 @@ packages: dependency: transitive description: name: firebase_app_check - sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" + sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" url: "https://pub.dev" source: hosted - version: "0.3.0+1" + version: "0.3.0+4" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" + sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 url: "https://pub.dev" source: hosted - version: "0.1.0+31" + version: "0.1.0+34" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a + sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" url: "https://pub.dev" source: hosted - version: "0.1.2+9" + version: "0.1.2+12" firebase_auth: dependency: transitive description: name: firebase_auth - sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.1.4" firebase_auth_platform_interface: dependency: transitive description: name: firebase_auth_platform_interface - sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" url: "https://pub.dev" source: hosted - version: "7.4.0" + version: "7.4.3" firebase_auth_web: dependency: transitive description: name: firebase_auth_web - sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" url: "https://pub.dev" source: hosted - version: "5.12.2" + version: "5.12.5" firebase_core: dependency: "direct main" description: @@ -189,10 +189,10 @@ packages: dependency: transitive description: name: firebase_vertexai - sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d + sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 url: "https://pub.dev" source: hosted - version: "0.2.2" + version: "0.2.2+4" fixnum: dependency: transitive description: @@ -236,18 +236,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -361,10 +361,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter diff --git a/packages/langchain_firebase/example/pubspec_overrides.yaml b/packages/langchain_firebase/example/pubspec_overrides.yaml index 35cb2da2..fb671352 100644 --- a/packages/langchain_firebase/example/pubspec_overrides.yaml +++ b/packages/langchain_firebase/example/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_firebase,langchain +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_firebase dependency_overrides: langchain: path: ../../langchain diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 6c5ffbb4..593dfe9c 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "0816f12bbbd9e21f72ea8592b11bce4a628d4e5cb7a81ff9f1eee4f3dc23206e" + sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa url: "https://pub.dev" source: hosted - version: "1.3.37" + version: "1.3.40" async: dependency: transitive description: @@ -101,50 +101,50 @@ packages: dependency: "direct main" description: name: firebase_app_check - sha256: "8aedc3b274826f923f2cf2d61cddeb014c113fd8604373e2fe3a2068b3f496e7" + sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" url: "https://pub.dev" source: hosted - version: "0.3.0+1" + version: "0.3.0+4" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: "93e8aeeb5659c4926682299f175c033fd955fe3a2aa3b2c9a34c55af1ba25f10" + sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 url: "https://pub.dev" source: hosted - version: "0.1.0+31" + version: "0.1.0+34" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: c2933fc26b73d02b791291df00446a6dbf99d1b59e038bb55cbbec74fcb40c4a + sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" url: "https://pub.dev" source: hosted - version: "0.1.2+9" + version: "0.1.2+12" firebase_auth: dependency: "direct main" description: name: firebase_auth - sha256: "3af60a78e92567af3d9a5e25d3955f0f6a3f7a33b900724c1c4c336ff5e44200" + sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" url: "https://pub.dev" source: hosted - version: "5.1.0" + version: "5.1.4" firebase_auth_platform_interface: dependency: transitive description: name: firebase_auth_platform_interface - sha256: "6941c07a1d129a8b834f85b6673d3455f24102b6338346596c26ef3be2c106ce" + sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" url: "https://pub.dev" source: hosted - version: "7.4.0" + version: "7.4.3" firebase_auth_web: dependency: transitive description: name: firebase_auth_web - sha256: "5c3f6b45dc141cec858c050d6a6f07bdbfab45ab92a68b32be4b08805bdcadaa" + sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" url: "https://pub.dev" source: hosted - version: "5.12.2" + version: "5.12.5" firebase_core: dependency: "direct main" description: @@ -173,10 +173,10 @@ packages: dependency: "direct main" description: name: firebase_vertexai - sha256: a96bc9e8a6e1da0c4bbda2dd24f03b74e069449a3fa7940c87ec611bfc96633d + sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 url: "https://pub.dev" source: hosted - version: "0.2.2" + version: "0.2.2+4" fixnum: dependency: transitive description: @@ -204,18 +204,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: "76e35d93b8c1cd888f69a1a371f8c5dc54cec372b6c74a4c0a5d506e7cf82c1a" + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.3" + version: "0.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -299,10 +299,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter diff --git a/packages/langchain_google/pubspec_overrides.yaml b/packages/langchain_google/pubspec_overrides.yaml index 1fabfd3c..9844d8a9 100644 --- a/packages/langchain_google/pubspec_overrides.yaml +++ b/packages/langchain_google/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: vertex_ai,langchain_core +# melos_managed_dependency_overrides: langchain_core,vertex_ai dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_mistralai/pubspec_overrides.yaml b/packages/langchain_mistralai/pubspec_overrides.yaml index 4a44a89b..0bb3e94e 100644 --- a/packages/langchain_mistralai/pubspec_overrides.yaml +++ b/packages/langchain_mistralai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: mistralai_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,mistralai_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_ollama/pubspec_overrides.yaml b/packages/langchain_ollama/pubspec_overrides.yaml index 9090f50e..1cab36be 100644 --- a/packages/langchain_ollama/pubspec_overrides.yaml +++ b/packages/langchain_ollama/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: ollama_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,ollama_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_openai/pubspec_overrides.yaml b/packages/langchain_openai/pubspec_overrides.yaml index d4293e4f..92ad1620 100644 --- a/packages/langchain_openai/pubspec_overrides.yaml +++ b/packages/langchain_openai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,openai_dart,tavily_dart dependency_overrides: langchain: path: ../langchain diff --git a/packages/langchain_pinecone/pubspec_overrides.yaml b/packages/langchain_pinecone/pubspec_overrides.yaml index 8dd8d545..de62cfcc 100644 --- a/packages/langchain_pinecone/pubspec_overrides.yaml +++ b/packages/langchain_pinecone/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_supabase/pubspec_overrides.yaml b/packages/langchain_supabase/pubspec_overrides.yaml index d5cb8df4..b03ffbc5 100644 --- a/packages/langchain_supabase/pubspec_overrides.yaml +++ b/packages/langchain_supabase/pubspec_overrides.yaml @@ -1,5 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community,tavily_dart -# melos_managed_dependency_overrides: langchain +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain: path: ../langchain From 1f1f592216061b5a5eb72c833be5383667c3c08a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 17 Aug 2024 11:07:28 +0200 Subject: [PATCH 093/251] feat: Add support for Structured Outputs in openai_dart (#525) --- packages/openai_dart/README.md | 51 +- .../generated/schema/assistant_object.dart | 27 +- .../src/generated/schema/assistant_tools.dart | 2 +- .../schema/assistants_response_format.dart | 53 - .../schema/chat_completion_logprobs.dart | 2 +- .../schema/chat_completion_message.dart | 5 +- .../chat_completion_message_content_part.dart | 21 +- ..._completion_message_content_part_type.dart | 2 + ...hat_completion_stream_response_choice.dart | 2 +- ...chat_completion_stream_response_delta.dart | 5 + .../schema/create_assistant_request.dart | 26 +- .../create_chat_completion_request.dart | 59 +- .../generated/schema/create_run_request.dart | 24 +- .../schema/create_thread_and_run_request.dart | 26 +- .../src/generated/schema/function_object.dart | 13 +- .../generated/schema/json_schema_object.dart | 62 + .../src/generated/schema/message_content.dart | 15 + .../schema/message_delta_content.dart | 42 + ...essage_delta_content_image_url_object.dart | 51 - .../schema/modify_assistant_request.dart | 26 +- .../src/generated/schema/response_format.dart | 71 + .../schema/response_format_type.dart | 19 + .../lib/src/generated/schema/run_object.dart | 25 +- .../lib/src/generated/schema/schema.dart | 5 +- .../src/generated/schema/schema.freezed.dart | 4091 ++++++++++++----- .../lib/src/generated/schema/schema.g.dart | 406 +- packages/openai_dart/oas/main.dart | 8 + packages/openai_dart/oas/openapi_curated.yaml | 239 +- .../openai_dart/oas/openapi_official.yaml | 244 +- .../test/openai_client_chat_test.dart | 59 +- 30 files changed, 3911 insertions(+), 1770 deletions(-) delete mode 100644 packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/json_schema_object.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/response_format.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/response_format_type.dart diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 2c003f8d..df9cc58b 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -165,7 +165,7 @@ await for (final res in stream) { // 789 ``` -**Multi-modal prompt:** +**Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision)) ```dart final res = await client.createChatCompletion( @@ -198,7 +198,7 @@ print(res.choices.first.message.content); // The fruit in the image is an apple. ``` -**JSON mode:** +**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) ```dart final res = await client.createChatCompletion( @@ -227,7 +227,52 @@ final res = await client.createChatCompletion( // { "names": ["John", "Mary", "Peter"] } ``` -**Tools:** +**Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))** + +```dart +final res = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, + ), + messages: [ + ChatCompletionMessage.system( + content: + 'You are a helpful assistant. That extracts names from text.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), + ), + ), +); +// {"names":["John","Mary","Peter"]} +``` + +**Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling)) ```dart const function = FunctionObject( diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index 5784d1ed..f0e1f3b5 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -60,9 +60,10 @@ class AssistantObject with _$AssistantObject { /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -177,8 +178,6 @@ enum AssistantObjectObject { /// `auto` is the default value enum AssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -187,9 +186,10 @@ enum AssistantResponseFormatMode { // CLASS: AssistantObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -209,14 +209,14 @@ sealed class AssistantObjectResponseFormat const AssistantObjectResponseFormat._(); /// `auto` is the default value - const factory AssistantObjectResponseFormat.enumeration( + const factory AssistantObjectResponseFormat.mode( AssistantResponseFormatMode value, ) = AssistantObjectResponseFormatEnumeration; /// No Description - const factory AssistantObjectResponseFormat.assistantsResponseFormat( - AssistantsResponseFormat value, - ) = AssistantObjectResponseFormatAssistantsResponseFormat; + const factory AssistantObjectResponseFormat.responseFormat( + ResponseFormat value, + ) = AssistantObjectResponseFormatResponseFormat; /// Object construction from a JSON representation factory AssistantObjectResponseFormat.fromJson(Map json) => @@ -243,8 +243,8 @@ class _AssistantObjectResponseFormatConverter } if (data is Map) { try { - return AssistantObjectResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return AssistantObjectResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -258,8 +258,7 @@ class _AssistantObjectResponseFormatConverter return switch (data) { AssistantObjectResponseFormatEnumeration(value: final v) => _$AssistantResponseFormatModeEnumMap[v]!, - AssistantObjectResponseFormatAssistantsResponseFormat(value: final v) => - v.toJson(), + AssistantObjectResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 5c0c2c47..e36cd8e6 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -84,7 +84,7 @@ class AssistantToolsFileSearchFileSearch /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, }) = _AssistantToolsFileSearchFileSearch; diff --git a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart b/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart deleted file mode 100644 index bc5f9c8b..00000000 --- a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart +++ /dev/null @@ -1,53 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: AssistantsResponseFormat -// ========================================== - -/// An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. -@freezed -class AssistantsResponseFormat with _$AssistantsResponseFormat { - const AssistantsResponseFormat._(); - - /// Factory constructor for AssistantsResponseFormat - const factory AssistantsResponseFormat({ - /// Must be one of `text` or `json_object`. - @Default(AssistantsResponseFormatType.text) - AssistantsResponseFormatType type, - }) = _AssistantsResponseFormat; - - /// Object construction from a JSON representation - factory AssistantsResponseFormat.fromJson(Map json) => - _$AssistantsResponseFormatFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - }; - } -} - -// ========================================== -// ENUM: AssistantsResponseFormatType -// ========================================== - -/// Must be one of `text` or `json_object`. -enum AssistantsResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, -} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart index 8678903a..36c84a12 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart @@ -16,7 +16,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { /// Factory constructor for ChatCompletionLogprobs const factory ChatCompletionLogprobs({ /// A list of message content tokens with log probability information. - required List? content, + @JsonKey(includeIfNull: false) List? content, }) = _ChatCompletionLogprobs; /// Object construction from a JSON representation diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index e546e524..93afcd9b 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -59,6 +59,9 @@ sealed class ChatCompletionMessage with _$ChatCompletionMessage { /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. @JsonKey(includeIfNull: false) String? content, + /// The refusal message by the assistant. + @JsonKey(includeIfNull: false) String? refusal, + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? name, @@ -140,7 +143,7 @@ sealed class ChatCompletionUserMessageContent List value, ) = ChatCompletionMessageContentParts; - /// The text contents of the message. + /// The text contents of the user message. const factory ChatCompletionUserMessageContent.string( String value, ) = ChatCompletionUserMessageContentString; diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart index e96bf346..6e38e239 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart @@ -18,7 +18,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartText // ------------------------------------------ - /// A text content part of a user message. + /// A text content part of a message. const factory ChatCompletionMessageContentPart.text({ /// The type of the content part, in this case `text`. @Default(ChatCompletionMessageContentPartType.text) @@ -32,8 +32,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartImage // ------------------------------------------ - /// Union constructor for [ChatCompletionMessageContentPartImage] - @FreezedUnionValue('image_url') + /// An image content part of a user message. const factory ChatCompletionMessageContentPart.image({ /// The type of the content part, in this case `image_url`. @Default(ChatCompletionMessageContentPartType.imageUrl) @@ -43,6 +42,20 @@ sealed class ChatCompletionMessageContentPart @JsonKey(name: 'image_url') required ChatCompletionMessageImageUrl imageUrl, }) = ChatCompletionMessageContentPartImage; + // ------------------------------------------ + // UNION: ChatCompletionMessageContentPartRefusal + // ------------------------------------------ + + /// A refusal content part of a message. + const factory ChatCompletionMessageContentPart.refusal({ + /// The type of the content part, in this case `refusal`. + @Default(ChatCompletionMessageContentPartType.refusal) + ChatCompletionMessageContentPartType type, + + /// The refusal message generated by the model. + required String refusal, + }) = ChatCompletionMessageContentPartRefusal; + /// Object construction from a JSON representation factory ChatCompletionMessageContentPart.fromJson( Map json) => @@ -58,6 +71,8 @@ enum ChatCompletionMessageContentPartEnumType { text, @JsonValue('image_url') imageUrl, + @JsonValue('refusal') + refusal, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart index 0b4409fb..1aeebe14 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart @@ -14,4 +14,6 @@ enum ChatCompletionMessageContentPartType { text, @JsonValue('image_url') imageUrl, + @JsonValue('refusal') + refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart index 1b4f0705..39a46139 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -79,7 +79,7 @@ class ChatCompletionStreamResponseChoiceLogprobs /// Factory constructor for ChatCompletionStreamResponseChoiceLogprobs const factory ChatCompletionStreamResponseChoiceLogprobs({ /// A list of message content tokens with log probability information. - required List? content, + @JsonKey(includeIfNull: false) List? content, }) = _ChatCompletionStreamResponseChoiceLogprobs; /// Object construction from a JSON representation diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart index e676c18c..5cc5fa0d 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart @@ -19,6 +19,9 @@ class ChatCompletionStreamResponseDelta /// The contents of the chunk message. @JsonKey(includeIfNull: false) String? content, + /// The refusal message generated by the model. + @JsonKey(includeIfNull: false) String? refusal, + /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @@ -43,6 +46,7 @@ class ChatCompletionStreamResponseDelta /// List of all property names of schema static const List propertyNames = [ 'content', + 'refusal', 'function_call', 'tool_calls', 'role' @@ -57,6 +61,7 @@ class ChatCompletionStreamResponseDelta Map toMap() { return { 'content': content, + 'refusal': refusal, 'function_call': functionCall, 'tool_calls': toolCalls, 'role': role, diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 0e849a85..f078f394 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -51,9 +51,10 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -263,8 +264,6 @@ class _AssistantModelConverter /// `auto` is the default value enum CreateAssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -273,9 +272,10 @@ enum CreateAssistantResponseFormatMode { // CLASS: CreateAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -300,9 +300,9 @@ sealed class CreateAssistantRequestResponseFormat ) = CreateAssistantRequestResponseFormatEnumeration; /// No Description - const factory CreateAssistantRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateAssistantRequestResponseFormatAssistantsResponseFormat; + const factory CreateAssistantRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateAssistantRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateAssistantRequestResponseFormat.fromJson( @@ -332,8 +332,8 @@ class _CreateAssistantRequestResponseFormatConverter } if (data is Map) { try { - return CreateAssistantRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateAssistantRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -347,9 +347,7 @@ class _CreateAssistantRequestResponseFormatConverter return switch (data) { CreateAssistantRequestResponseFormatEnumeration(value: final v) => _$CreateAssistantResponseFormatModeEnumMap[v]!, - CreateAssistantRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + CreateAssistantRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index fd24189a..8b6c5c52 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -55,13 +55,16 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? presencePenalty, - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -380,46 +383,6 @@ class _ChatCompletionModelConverter } } -// ========================================== -// CLASS: ChatCompletionResponseFormat -// ========================================== - -/// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. -/// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. -@freezed -class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { - const ChatCompletionResponseFormat._(); - - /// Factory constructor for ChatCompletionResponseFormat - const factory ChatCompletionResponseFormat({ - /// Must be one of `text` or `json_object`. - @Default(ChatCompletionResponseFormatType.text) - ChatCompletionResponseFormatType type, - }) = _ChatCompletionResponseFormat; - - /// Object construction from a JSON representation - factory ChatCompletionResponseFormat.fromJson(Map json) => - _$ChatCompletionResponseFormatFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - }; - } -} - // ========================================== // ENUM: CreateChatCompletionRequestServiceTier // ========================================== @@ -669,15 +632,3 @@ class _ChatCompletionFunctionCallConverter }; } } - -// ========================================== -// ENUM: ChatCompletionResponseFormatType -// ========================================== - -/// Must be one of `text` or `json_object`. -enum ChatCompletionResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, -} diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 4c13ec8f..485869d0 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -79,9 +79,10 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -390,8 +391,6 @@ class _CreateRunRequestToolChoiceConverter /// `auto` is the default value enum CreateRunRequestResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -400,9 +399,10 @@ enum CreateRunRequestResponseFormatMode { // CLASS: CreateRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -427,9 +427,9 @@ sealed class CreateRunRequestResponseFormat ) = CreateRunRequestResponseFormatEnumeration; /// No Description - const factory CreateRunRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateRunRequestResponseFormatAssistantsResponseFormat; + const factory CreateRunRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateRunRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateRunRequestResponseFormat.fromJson(Map json) => @@ -458,8 +458,8 @@ class _CreateRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateRunRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateRunRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -473,7 +473,7 @@ class _CreateRunRequestResponseFormatConverter return switch (data) { CreateRunRequestResponseFormatEnumeration(value: final v) => _$CreateRunRequestResponseFormatModeEnumMap[v]!, - CreateRunRequestResponseFormatAssistantsResponseFormat(value: final v) => + CreateRunRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index e69a2060..e52be3e1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -78,9 +78,10 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -396,8 +397,6 @@ class _CreateThreadAndRunRequestToolChoiceConverter /// `auto` is the default value enum CreateThreadAndRunRequestResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -406,9 +405,10 @@ enum CreateThreadAndRunRequestResponseFormatMode { // CLASS: CreateThreadAndRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -433,9 +433,9 @@ sealed class CreateThreadAndRunRequestResponseFormat ) = CreateThreadAndRunRequestResponseFormatEnumeration; /// No Description - const factory CreateThreadAndRunRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat; + const factory CreateThreadAndRunRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateThreadAndRunRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateThreadAndRunRequestResponseFormat.fromJson( @@ -467,8 +467,8 @@ class _CreateThreadAndRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateThreadAndRunRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -482,9 +482,7 @@ class _CreateThreadAndRunRequestResponseFormatConverter return switch (data) { CreateThreadAndRunRequestResponseFormatEnumeration(value: final v) => _$CreateThreadAndRunRequestResponseFormatModeEnumMap[v]!, - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + CreateThreadAndRunRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 647b4e0a..62426de3 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -15,7 +15,8 @@ class FunctionObject with _$FunctionObject { /// Factory constructor for FunctionObject const factory FunctionObject({ - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. required String name, /// A description of what the function does, used by the model to choose when and how to call the function. @@ -25,6 +26,12 @@ class FunctionObject with _$FunctionObject { /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, + + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @JsonKey(includeIfNull: false) @Default(false) bool? strict, }) = _FunctionObject; /// Object construction from a JSON representation @@ -35,7 +42,8 @@ class FunctionObject with _$FunctionObject { static const List propertyNames = [ 'name', 'description', - 'parameters' + 'parameters', + 'strict' ]; /// Perform validations on the schema property values @@ -49,6 +57,7 @@ class FunctionObject with _$FunctionObject { 'name': name, 'description': description, 'parameters': parameters, + 'strict': strict, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart new file mode 100644 index 00000000..32f20701 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: JsonSchemaObject +// ========================================== + +/// A JSON Schema object. +@freezed +class JsonSchemaObject with _$JsonSchemaObject { + const JsonSchemaObject._(); + + /// Factory constructor for JsonSchemaObject + const factory JsonSchemaObject({ + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + required String name, + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @JsonKey(includeIfNull: false) String? description, + + /// The schema for the response format, described as a JSON Schema object. + required Map schema, + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @JsonKey(includeIfNull: false) @Default(false) bool? strict, + }) = _JsonSchemaObject; + + /// Object construction from a JSON representation + factory JsonSchemaObject.fromJson(Map json) => + _$JsonSchemaObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'schema', + 'strict' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'schema': schema, + 'strict': strict, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_content.dart b/packages/openai_dart/lib/src/generated/schema/message_content.dart index 14e23e22..46783eae 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content.dart @@ -52,6 +52,19 @@ sealed class MessageContent with _$MessageContent { required MessageContentText text, }) = MessageContentTextObject; + // ------------------------------------------ + // UNION: MessageContentRefusalObject + // ------------------------------------------ + + /// The refusal content generated by the assistant. + const factory MessageContent.refusal({ + /// Always `refusal`. + required String type, + + /// No Description + required String refusal, + }) = MessageContentRefusalObject; + /// Object construction from a JSON representation factory MessageContent.fromJson(Map json) => _$MessageContentFromJson(json); @@ -68,4 +81,6 @@ enum MessageContentEnumType { imageUrl, @JsonValue('text') text, + @JsonValue('refusal') + refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart index f53291ee..738ab400 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart @@ -46,7 +46,49 @@ sealed class MessageDeltaContent with _$MessageDeltaContent { @JsonKey(includeIfNull: false) MessageDeltaContentText? text, }) = MessageDeltaContentTextObject; + // ------------------------------------------ + // UNION: MessageDeltaContentRefusalObject + // ------------------------------------------ + + /// The refusal content that is part of a message. + const factory MessageDeltaContent.refusal({ + /// The index of the refusal part in the message. + required int index, + + /// Always `refusal`. + required String type, + + /// The refusal content generated by the assistant. + @JsonKey(includeIfNull: false) String? refusal, + }) = MessageDeltaContentRefusalObject; + + // ------------------------------------------ + // UNION: MessageDeltaContentImageUrlObject + // ------------------------------------------ + + /// References an image URL in the content of a message. + const factory MessageDeltaContent.imageUrl({ + /// The index of the content part in the message. + required int index, + + /// Always `image_url`. + required String type, + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl, + }) = MessageDeltaContentImageUrlObject; + /// Object construction from a JSON representation factory MessageDeltaContent.fromJson(Map json) => _$MessageDeltaContentFromJson(json); } + +// ========================================== +// ENUM: MessageDeltaContentEnumType +// ========================================== + +enum MessageDeltaContentEnumType { + @JsonValue('refusal') + refusal, +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart deleted file mode 100644 index 1008bbb0..00000000 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart +++ /dev/null @@ -1,51 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: MessageDeltaContentImageUrlObject -// ========================================== - -/// References an image URL in the content of a message. -@freezed -class MessageDeltaContentImageUrlObject - with _$MessageDeltaContentImageUrlObject { - const MessageDeltaContentImageUrlObject._(); - - /// Factory constructor for MessageDeltaContentImageUrlObject - const factory MessageDeltaContentImageUrlObject({ - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) int? index, - - /// Always `image_url`. - @JsonKey(includeIfNull: false) String? type, - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl, - }) = _MessageDeltaContentImageUrlObject; - - /// Object construction from a JSON representation - factory MessageDeltaContentImageUrlObject.fromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['index', 'type', 'image_url']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'index': index, - 'type': type, - 'image_url': imageUrl, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index 99c1f887..2b4d94d1 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -54,9 +54,10 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -157,8 +158,6 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// `auto` is the default value enum ModifyAssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -167,9 +166,10 @@ enum ModifyAssistantResponseFormatMode { // CLASS: ModifyAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -194,9 +194,9 @@ sealed class ModifyAssistantRequestResponseFormat ) = ModifyAssistantRequestResponseFormatEnumeration; /// No Description - const factory ModifyAssistantRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = ModifyAssistantRequestResponseFormatAssistantsResponseFormat; + const factory ModifyAssistantRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = ModifyAssistantRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory ModifyAssistantRequestResponseFormat.fromJson( @@ -226,8 +226,8 @@ class _ModifyAssistantRequestResponseFormatConverter } if (data is Map) { try { - return ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return ModifyAssistantRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -241,9 +241,7 @@ class _ModifyAssistantRequestResponseFormatConverter return switch (data) { ModifyAssistantRequestResponseFormatEnumeration(value: final v) => _$ModifyAssistantResponseFormatModeEnumMap[v]!, - ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + ModifyAssistantRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/response_format.dart b/packages/openai_dart/lib/src/generated/schema/response_format.dart new file mode 100644 index 00000000..35b1f30d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/response_format.dart @@ -0,0 +1,71 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ResponseFormat +// ========================================== + +/// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ResponseFormat with _$ResponseFormat { + const ResponseFormat._(); + + // ------------------------------------------ + // UNION: ResponseFormatText + // ------------------------------------------ + + /// The model should respond with plain text. + const factory ResponseFormat.text({ + /// The type of response format being defined. + @Default(ResponseFormatType.text) ResponseFormatType type, + }) = ResponseFormatText; + + // ------------------------------------------ + // UNION: ResponseFormatJsonObject + // ------------------------------------------ + + /// The model should respond with a JSON object. + const factory ResponseFormat.jsonObject({ + /// The type of response format being defined. + @Default(ResponseFormatType.jsonObject) ResponseFormatType type, + }) = ResponseFormatJsonObject; + + // ------------------------------------------ + // UNION: ResponseFormatJsonSchema + // ------------------------------------------ + + /// The model should respond with a JSON object that adheres to the specified schema. + const factory ResponseFormat.jsonSchema({ + /// The type of response format being defined. + @Default(ResponseFormatType.jsonSchema) ResponseFormatType type, + + /// A JSON Schema object. + @JsonKey(name: 'json_schema') required JsonSchemaObject jsonSchema, + }) = ResponseFormatJsonSchema; + + /// Object construction from a JSON representation + factory ResponseFormat.fromJson(Map json) => + _$ResponseFormatFromJson(json); +} + +// ========================================== +// ENUM: ResponseFormatEnumType +// ========================================== + +enum ResponseFormatEnumType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, + @JsonValue('json_schema') + jsonSchema, +} diff --git a/packages/openai_dart/lib/src/generated/schema/response_format_type.dart b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart new file mode 100644 index 00000000..da215209 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart @@ -0,0 +1,19 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ResponseFormatType +// ========================================== + +/// The type of response format being defined. +enum ResponseFormatType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, + @JsonValue('json_schema') + jsonSchema, +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 73ffe897..351d140b 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -105,9 +105,10 @@ class RunObject with _$RunObject { /// during tool use. @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -451,8 +452,6 @@ class _RunObjectToolChoiceConverter /// `auto` is the default value enum RunObjectResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -461,9 +460,10 @@ enum RunObjectResponseFormatMode { // CLASS: RunObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models -/// since `gpt-4o-mini-1106`. +/// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -487,9 +487,9 @@ sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { ) = RunObjectResponseFormatEnumeration; /// No Description - const factory RunObjectResponseFormat.format( - AssistantsResponseFormat value, - ) = RunObjectResponseFormatAssistantsResponseFormat; + const factory RunObjectResponseFormat.responseFormat( + ResponseFormat value, + ) = RunObjectResponseFormatResponseFormat; /// Object construction from a JSON representation factory RunObjectResponseFormat.fromJson(Map json) => @@ -513,8 +513,8 @@ class _RunObjectResponseFormatConverter } if (data is Map) { try { - return RunObjectResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return RunObjectResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -528,8 +528,7 @@ class _RunObjectResponseFormatConverter return switch (data) { RunObjectResponseFormatEnumeration(value: final v) => _$RunObjectResponseFormatModeEnumMap[v]!, - RunObjectResponseFormatAssistantsResponseFormat(value: final v) => - v.toJson(), + RunObjectResponseFormatResponseFormat(value: final v) => v.toJson(), }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index e4f6c023..028e108f 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -22,6 +22,8 @@ part 'chat_completion_message_function_call.dart'; part 'chat_completion_function_call_option.dart'; part 'function_object.dart'; part 'function_parameters.dart'; +part 'response_format_type.dart'; +part 'json_schema_object.dart'; part 'chat_completion_tool.dart'; part 'chat_completion_named_tool_choice.dart'; part 'chat_completion_message_tool_calls.dart'; @@ -73,7 +75,6 @@ part 'delete_assistant_response.dart'; part 'list_assistants_response.dart'; part 'assistants_named_tool_choice.dart'; part 'assistants_function_call_option.dart'; -part 'assistants_response_format.dart'; part 'truncation_object.dart'; part 'run_object.dart'; part 'run_completion_usage.dart'; @@ -106,7 +107,6 @@ part 'message_content_image_detail.dart'; part 'message_request_content_text_object.dart'; part 'message_content_text.dart'; part 'message_content_text_annotations_file_citation.dart'; -part 'message_delta_content_image_url_object.dart'; part 'message_delta_content_text.dart'; part 'message_delta_content_text_annotations_file_citation.dart'; part 'run_step_object.dart'; @@ -142,6 +142,7 @@ part 'batch.dart'; part 'list_batches_response.dart'; part 'chat_completion_message.dart'; part 'chat_completion_message_content_part.dart'; +part 'response_format.dart'; part 'assistant_tools.dart'; part 'message_content.dart'; part 'message_delta_content.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 36ff6d91..d3269d89 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3574,14 +3574,16 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? get responseFormat => - throw _privateConstructorUsedError; + ResponseFormat? get responseFormat => throw _privateConstructorUsedError; /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -3706,7 +3708,7 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, @JsonKey( name: 'service_tier', @@ -3734,7 +3736,7 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) List? functions}); $ChatCompletionModelCopyWith<$Res> get model; - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; + $ResponseFormatCopyWith<$Res>? get responseFormat; $ChatCompletionStopCopyWith<$Res>? get stop; $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions; $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice; @@ -3821,7 +3823,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormat?, + as ResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable @@ -3891,13 +3893,12 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat { + $ResponseFormatCopyWith<$Res>? get responseFormat { if (_value.responseFormat == null) { return null; } - return $ChatCompletionResponseFormatCopyWith<$Res>(_value.responseFormat!, - (value) { + return $ResponseFormatCopyWith<$Res>(_value.responseFormat!, (value) { return _then(_value.copyWith(responseFormat: value) as $Val); }); } @@ -3985,7 +3986,7 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, @JsonKey( name: 'service_tier', @@ -4015,7 +4016,7 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @override $ChatCompletionModelCopyWith<$Res> get model; @override - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; + $ResponseFormatCopyWith<$Res>? get responseFormat; @override $ChatCompletionStopCopyWith<$Res>? get stop; @override @@ -4105,7 +4106,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormat?, + as ResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable @@ -4282,14 +4283,17 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) - final ChatCompletionResponseFormat? responseFormat; + final ResponseFormat? responseFormat; /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -4524,7 +4528,7 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - final ChatCompletionResponseFormat? responseFormat, + final ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) final int? seed, @JsonKey( name: 'service_tier', @@ -4609,14 +4613,17 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? get responseFormat; + ResponseFormat? get responseFormat; /// This feature is in Beta. /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. @@ -5142,177 +5149,6 @@ abstract class ChatCompletionModelString extends ChatCompletionModel { get copyWith => throw _privateConstructorUsedError; } -ChatCompletionResponseFormat _$ChatCompletionResponseFormatFromJson( - Map json) { - return _ChatCompletionResponseFormat.fromJson(json); -} - -/// @nodoc -mixin _$ChatCompletionResponseFormat { - /// Must be one of `text` or `json_object`. - ChatCompletionResponseFormatType get type => - throw _privateConstructorUsedError; - - /// Serializes this ChatCompletionResponseFormat to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $ChatCompletionResponseFormatCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ChatCompletionResponseFormatCopyWith<$Res> { - factory $ChatCompletionResponseFormatCopyWith( - ChatCompletionResponseFormat value, - $Res Function(ChatCompletionResponseFormat) then) = - _$ChatCompletionResponseFormatCopyWithImpl<$Res, - ChatCompletionResponseFormat>; - @useResult - $Res call({ChatCompletionResponseFormatType type}); -} - -/// @nodoc -class _$ChatCompletionResponseFormatCopyWithImpl<$Res, - $Val extends ChatCompletionResponseFormat> - implements $ChatCompletionResponseFormatCopyWith<$Res> { - _$ChatCompletionResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormatType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ChatCompletionResponseFormatImplCopyWith<$Res> - implements $ChatCompletionResponseFormatCopyWith<$Res> { - factory _$$ChatCompletionResponseFormatImplCopyWith( - _$ChatCompletionResponseFormatImpl value, - $Res Function(_$ChatCompletionResponseFormatImpl) then) = - __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({ChatCompletionResponseFormatType type}); -} - -/// @nodoc -class __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res> - extends _$ChatCompletionResponseFormatCopyWithImpl<$Res, - _$ChatCompletionResponseFormatImpl> - implements _$$ChatCompletionResponseFormatImplCopyWith<$Res> { - __$$ChatCompletionResponseFormatImplCopyWithImpl( - _$ChatCompletionResponseFormatImpl _value, - $Res Function(_$ChatCompletionResponseFormatImpl) _then) - : super(_value, _then); - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$ChatCompletionResponseFormatImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormatType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionResponseFormatImpl extends _ChatCompletionResponseFormat { - const _$ChatCompletionResponseFormatImpl( - {this.type = ChatCompletionResponseFormatType.text}) - : super._(); - - factory _$ChatCompletionResponseFormatImpl.fromJson( - Map json) => - _$$ChatCompletionResponseFormatImplFromJson(json); - - /// Must be one of `text` or `json_object`. - @override - @JsonKey() - final ChatCompletionResponseFormatType type; - - @override - String toString() { - return 'ChatCompletionResponseFormat(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionResponseFormatImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type); - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionResponseFormatImplCopyWith< - _$ChatCompletionResponseFormatImpl> - get copyWith => __$$ChatCompletionResponseFormatImplCopyWithImpl< - _$ChatCompletionResponseFormatImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ChatCompletionResponseFormatImplToJson( - this, - ); - } -} - -abstract class _ChatCompletionResponseFormat - extends ChatCompletionResponseFormat { - const factory _ChatCompletionResponseFormat( - {final ChatCompletionResponseFormatType type}) = - _$ChatCompletionResponseFormatImpl; - const _ChatCompletionResponseFormat._() : super._(); - - factory _ChatCompletionResponseFormat.fromJson(Map json) = - _$ChatCompletionResponseFormatImpl.fromJson; - - /// Must be one of `text` or `json_object`. - @override - ChatCompletionResponseFormatType get type; - - /// Create a copy of ChatCompletionResponseFormat - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$ChatCompletionResponseFormatImplCopyWith< - _$ChatCompletionResponseFormatImpl> - get copyWith => throw _privateConstructorUsedError; -} - ChatCompletionStop _$ChatCompletionStopFromJson(Map json) { switch (json['runtimeType']) { case 'listString': @@ -7075,7 +6911,8 @@ FunctionObject _$FunctionObjectFromJson(Map json) { /// @nodoc mixin _$FunctionObject { - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. String get name => throw _privateConstructorUsedError; /// A description of what the function does, used by the model to choose when and how to call the function. @@ -7088,6 +6925,13 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) Map? get parameters => throw _privateConstructorUsedError; + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @JsonKey(includeIfNull: false) + bool? get strict => throw _privateConstructorUsedError; + /// Serializes this FunctionObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -7107,7 +6951,8 @@ abstract class $FunctionObjectCopyWith<$Res> { $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters}); + @JsonKey(includeIfNull: false) Map? parameters, + @JsonKey(includeIfNull: false) bool? strict}); } /// @nodoc @@ -7128,6 +6973,7 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> Object? name = null, Object? description = freezed, Object? parameters = freezed, + Object? strict = freezed, }) { return _then(_value.copyWith( name: null == name @@ -7142,6 +6988,10 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> ? _value.parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, ) as $Val); } } @@ -7157,7 +7007,8 @@ abstract class _$$FunctionObjectImplCopyWith<$Res> $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters}); + @JsonKey(includeIfNull: false) Map? parameters, + @JsonKey(includeIfNull: false) bool? strict}); } /// @nodoc @@ -7176,6 +7027,7 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> Object? name = null, Object? description = freezed, Object? parameters = freezed, + Object? strict = freezed, }) { return _then(_$FunctionObjectImpl( name: null == name @@ -7190,6 +7042,10 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> ? _value._parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, )); } } @@ -7200,14 +7056,16 @@ class _$FunctionObjectImpl extends _FunctionObject { const _$FunctionObjectImpl( {required this.name, @JsonKey(includeIfNull: false) this.description, - @JsonKey(includeIfNull: false) final Map? parameters}) + @JsonKey(includeIfNull: false) final Map? parameters, + @JsonKey(includeIfNull: false) this.strict = false}) : _parameters = parameters, super._(); factory _$FunctionObjectImpl.fromJson(Map json) => _$$FunctionObjectImplFromJson(json); - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. @override final String name; @@ -7234,9 +7092,17 @@ class _$FunctionObjectImpl extends _FunctionObject { return EqualUnmodifiableMapView(value); } + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @override + @JsonKey(includeIfNull: false) + final bool? strict; + @override String toString() { - return 'FunctionObject(name: $name, description: $description, parameters: $parameters)'; + return 'FunctionObject(name: $name, description: $description, parameters: $parameters, strict: $strict)'; } @override @@ -7248,13 +7114,14 @@ class _$FunctionObjectImpl extends _FunctionObject { (identical(other.description, description) || other.description == description) && const DeepCollectionEquality() - .equals(other._parameters, _parameters)); + .equals(other._parameters, _parameters) && + (identical(other.strict, strict) || other.strict == strict)); } @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_parameters)); + const DeepCollectionEquality().hash(_parameters), strict); /// Create a copy of FunctionObject /// with the given fields replaced by the non-null parameter values. @@ -7275,16 +7142,18 @@ class _$FunctionObjectImpl extends _FunctionObject { abstract class _FunctionObject extends FunctionObject { const factory _FunctionObject( - {required final String name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(includeIfNull: false) - final Map? parameters}) = _$FunctionObjectImpl; + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(includeIfNull: false) final Map? parameters, + @JsonKey(includeIfNull: false) final bool? strict}) = + _$FunctionObjectImpl; const _FunctionObject._() : super._(); factory _FunctionObject.fromJson(Map json) = _$FunctionObjectImpl.fromJson; - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. @override String get name; @@ -7300,6 +7169,14 @@ abstract class _FunctionObject extends FunctionObject { @JsonKey(includeIfNull: false) Map? get parameters; + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](docs/guides/function-calling). + @override + @JsonKey(includeIfNull: false) + bool? get strict; + /// Create a copy of FunctionObject /// with the given fields replaced by the non-null parameter values. @override @@ -7308,6 +7185,275 @@ abstract class _FunctionObject extends FunctionObject { throw _privateConstructorUsedError; } +JsonSchemaObject _$JsonSchemaObjectFromJson(Map json) { + return _JsonSchemaObject.fromJson(json); +} + +/// @nodoc +mixin _$JsonSchemaObject { + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + String get name => throw _privateConstructorUsedError; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; + + /// The schema for the response format, described as a JSON Schema object. + Map get schema => throw _privateConstructorUsedError; + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @JsonKey(includeIfNull: false) + bool? get strict => throw _privateConstructorUsedError; + + /// Serializes this JsonSchemaObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $JsonSchemaObjectCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $JsonSchemaObjectCopyWith<$Res> { + factory $JsonSchemaObjectCopyWith( + JsonSchemaObject value, $Res Function(JsonSchemaObject) then) = + _$JsonSchemaObjectCopyWithImpl<$Res, JsonSchemaObject>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map schema, + @JsonKey(includeIfNull: false) bool? strict}); +} + +/// @nodoc +class _$JsonSchemaObjectCopyWithImpl<$Res, $Val extends JsonSchemaObject> + implements $JsonSchemaObjectCopyWith<$Res> { + _$JsonSchemaObjectCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? schema = null, + Object? strict = freezed, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + schema: null == schema + ? _value.schema + : schema // ignore: cast_nullable_to_non_nullable + as Map, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$JsonSchemaObjectImplCopyWith<$Res> + implements $JsonSchemaObjectCopyWith<$Res> { + factory _$$JsonSchemaObjectImplCopyWith(_$JsonSchemaObjectImpl value, + $Res Function(_$JsonSchemaObjectImpl) then) = + __$$JsonSchemaObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map schema, + @JsonKey(includeIfNull: false) bool? strict}); +} + +/// @nodoc +class __$$JsonSchemaObjectImplCopyWithImpl<$Res> + extends _$JsonSchemaObjectCopyWithImpl<$Res, _$JsonSchemaObjectImpl> + implements _$$JsonSchemaObjectImplCopyWith<$Res> { + __$$JsonSchemaObjectImplCopyWithImpl(_$JsonSchemaObjectImpl _value, + $Res Function(_$JsonSchemaObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? schema = null, + Object? strict = freezed, + }) { + return _then(_$JsonSchemaObjectImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + schema: null == schema + ? _value._schema + : schema // ignore: cast_nullable_to_non_nullable + as Map, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$JsonSchemaObjectImpl extends _JsonSchemaObject { + const _$JsonSchemaObjectImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + required final Map schema, + @JsonKey(includeIfNull: false) this.strict = false}) + : _schema = schema, + super._(); + + factory _$JsonSchemaObjectImpl.fromJson(Map json) => + _$$JsonSchemaObjectImplFromJson(json); + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + @override + final String name; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// The schema for the response format, described as a JSON Schema object. + final Map _schema; + + /// The schema for the response format, described as a JSON Schema object. + @override + Map get schema { + if (_schema is EqualUnmodifiableMapView) return _schema; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_schema); + } + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @override + @JsonKey(includeIfNull: false) + final bool? strict; + + @override + String toString() { + return 'JsonSchemaObject(name: $name, description: $description, schema: $schema, strict: $strict)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$JsonSchemaObjectImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality().equals(other._schema, _schema) && + (identical(other.strict, strict) || other.strict == strict)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_schema), strict); + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => + __$$JsonSchemaObjectImplCopyWithImpl<_$JsonSchemaObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$JsonSchemaObjectImplToJson( + this, + ); + } +} + +abstract class _JsonSchemaObject extends JsonSchemaObject { + const factory _JsonSchemaObject( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + required final Map schema, + @JsonKey(includeIfNull: false) final bool? strict}) = + _$JsonSchemaObjectImpl; + const _JsonSchemaObject._() : super._(); + + factory _JsonSchemaObject.fromJson(Map json) = + _$JsonSchemaObjectImpl.fromJson; + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + @override + String get name; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @override + @JsonKey(includeIfNull: false) + String? get description; + + /// The schema for the response format, described as a JSON Schema object. + @override + Map get schema; + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @override + @JsonKey(includeIfNull: false) + bool? get strict; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => + throw _privateConstructorUsedError; +} + ChatCompletionTool _$ChatCompletionToolFromJson(Map json) { return _ChatCompletionTool.fromJson(json); } @@ -8891,6 +9037,7 @@ ChatCompletionLogprobs _$ChatCompletionLogprobsFromJson( /// @nodoc mixin _$ChatCompletionLogprobs { /// A list of message content tokens with log probability information. + @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; @@ -8910,7 +9057,9 @@ abstract class $ChatCompletionLogprobsCopyWith<$Res> { $Res Function(ChatCompletionLogprobs) then) = _$ChatCompletionLogprobsCopyWithImpl<$Res, ChatCompletionLogprobs>; @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -8949,7 +9098,9 @@ abstract class _$$ChatCompletionLogprobsImplCopyWith<$Res> __$$ChatCompletionLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -8982,7 +9133,8 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> @JsonSerializable() class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const _$ChatCompletionLogprobsImpl( - {required final List? content}) + {@JsonKey(includeIfNull: false) + final List? content}) : _content = content, super._(); @@ -8994,6 +9146,7 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -9039,7 +9192,8 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { const factory _ChatCompletionLogprobs( - {required final List? content}) = + {@JsonKey(includeIfNull: false) + final List? content}) = _$ChatCompletionLogprobsImpl; const _ChatCompletionLogprobs._() : super._(); @@ -9048,6 +9202,7 @@ abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content; /// Create a copy of ChatCompletionLogprobs @@ -10361,6 +10516,7 @@ ChatCompletionStreamResponseChoiceLogprobs /// @nodoc mixin _$ChatCompletionStreamResponseChoiceLogprobs { /// A list of message content tokens with log probability information. + @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; @@ -10383,7 +10539,9 @@ abstract class $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res> { _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, ChatCompletionStreamResponseChoiceLogprobs>; @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -10424,7 +10582,9 @@ abstract class _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith<$Res> __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) + List? content}); } /// @nodoc @@ -10458,7 +10618,8 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> class _$ChatCompletionStreamResponseChoiceLogprobsImpl extends _ChatCompletionStreamResponseChoiceLogprobs { const _$ChatCompletionStreamResponseChoiceLogprobsImpl( - {required final List? content}) + {@JsonKey(includeIfNull: false) + final List? content}) : _content = content, super._(); @@ -10471,6 +10632,7 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -10520,7 +10682,8 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl abstract class _ChatCompletionStreamResponseChoiceLogprobs extends ChatCompletionStreamResponseChoiceLogprobs { const factory _ChatCompletionStreamResponseChoiceLogprobs( - {required final List? content}) = + {@JsonKey(includeIfNull: false) + final List? content}) = _$ChatCompletionStreamResponseChoiceLogprobsImpl; const _ChatCompletionStreamResponseChoiceLogprobs._() : super._(); @@ -10530,6 +10693,7 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content; /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs @@ -10552,6 +10716,10 @@ mixin _$ChatCompletionStreamResponseDelta { @JsonKey(includeIfNull: false) String? get content => throw _privateConstructorUsedError; + /// The refusal message generated by the model. + @JsonKey(includeIfNull: false) + String? get refusal => throw _privateConstructorUsedError; + /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall => @@ -10587,6 +10755,7 @@ abstract class $ChatCompletionStreamResponseDeltaCopyWith<$Res> { @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10616,6 +10785,7 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, @override $Res call({ Object? content = freezed, + Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -10625,6 +10795,10 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -10667,6 +10841,7 @@ abstract class _$$ChatCompletionStreamResponseDeltaImplCopyWith<$Res> @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10696,6 +10871,7 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> @override $Res call({ Object? content = freezed, + Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -10705,6 +10881,10 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -10727,6 +10907,7 @@ class _$ChatCompletionStreamResponseDeltaImpl extends _ChatCompletionStreamResponseDelta { const _$ChatCompletionStreamResponseDeltaImpl( {@JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.refusal, @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -10746,6 +10927,11 @@ class _$ChatCompletionStreamResponseDeltaImpl @JsonKey(includeIfNull: false) final String? content; + /// The refusal message generated by the model. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + /// The name and arguments of a function that should be called, as generated by the model. @override @JsonKey(name: 'function_call', includeIfNull: false) @@ -10773,7 +10959,7 @@ class _$ChatCompletionStreamResponseDeltaImpl @override String toString() { - return 'ChatCompletionStreamResponseDelta(content: $content, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; + return 'ChatCompletionStreamResponseDelta(content: $content, refusal: $refusal, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; } @override @@ -10782,6 +10968,7 @@ class _$ChatCompletionStreamResponseDeltaImpl (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseDeltaImpl && (identical(other.content, content) || other.content == content) && + (identical(other.refusal, refusal) || other.refusal == refusal) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && const DeepCollectionEquality() @@ -10791,7 +10978,7 @@ class _$ChatCompletionStreamResponseDeltaImpl @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, content, functionCall, + int get hashCode => Object.hash(runtimeType, content, refusal, functionCall, const DeepCollectionEquality().hash(_toolCalls), role); /// Create a copy of ChatCompletionStreamResponseDelta @@ -10816,6 +11003,7 @@ abstract class _ChatCompletionStreamResponseDelta extends ChatCompletionStreamResponseDelta { const factory _ChatCompletionStreamResponseDelta( {@JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) final ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10836,6 +11024,11 @@ abstract class _ChatCompletionStreamResponseDelta @JsonKey(includeIfNull: false) String? get content; + /// The refusal message generated by the model. + @override + @JsonKey(includeIfNull: false) + String? get refusal; + /// The name and arguments of a function that should be called, as generated by the model. @override @JsonKey(name: 'function_call', includeIfNull: false) @@ -23311,9 +23504,10 @@ mixin _$AssistantObject { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -23706,9 +23900,10 @@ class _$AssistantObjectImpl extends _AssistantObject { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -23877,9 +24072,10 @@ abstract class _AssistantObject extends AssistantObject { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -23909,11 +24105,10 @@ abstract class _AssistantObject extends AssistantObject { AssistantObjectResponseFormat _$AssistantObjectResponseFormatFromJson( Map json) { switch (json['runtimeType']) { - case 'enumeration': + case 'mode': return AssistantObjectResponseFormatEnumeration.fromJson(json); - case 'assistantsResponseFormat': - return AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( - json); + case 'responseFormat': + return AssistantObjectResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -23929,49 +24124,43 @@ mixin _$AssistantObjectResponseFormat { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -24046,7 +24235,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl extends AssistantObjectResponseFormatEnumeration { const _$AssistantObjectResponseFormatEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'enumeration', + : $type = $type ?? 'mode', super._(); factory _$AssistantObjectResponseFormatEnumerationImpl.fromJson( @@ -24061,7 +24250,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override String toString() { - return 'AssistantObjectResponseFormat.enumeration(value: $value)'; + return 'AssistantObjectResponseFormat.mode(value: $value)'; } @override @@ -24090,31 +24279,30 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) { - return enumeration(value); + return mode(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return enumeration?.call(value); + return mode?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(value); + if (mode != null) { + return mode(value); } return orElse(); } @@ -24123,38 +24311,33 @@ class _$AssistantObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) { - return enumeration(this); + return mode(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return enumeration?.call(this); + return mode?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(this); + if (mode != null) { + return mode(this); } return orElse(); } @@ -24190,33 +24373,28 @@ abstract class AssistantObjectResponseFormatEnumeration } /// @nodoc -abstract class _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl value, - $Res Function( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl) + factory _$$AssistantObjectResponseFormatResponseFormatImplCopyWith( + _$AssistantObjectResponseFormatResponseFormatImpl value, + $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) then) = - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res>; + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res> +class __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$AssistantObjectResponseFormatCopyWithImpl<$Res, - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$AssistantObjectResponseFormatResponseFormatImpl> implements - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl _value, - $Res Function(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl) - _then) + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith<$Res> { + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl( + _$AssistantObjectResponseFormatResponseFormatImpl _value, + $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) _then) : super(_value, _then); /// Create a copy of AssistantObjectResponseFormat @@ -24226,11 +24404,11 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< $Res call({ Object? value = null, }) { - return _then(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl( + return _then(_$AssistantObjectResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -24238,8 +24416,8 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -24247,35 +24425,33 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl - extends AssistantObjectResponseFormatAssistantsResponseFormat { - const _$AssistantObjectResponseFormatAssistantsResponseFormatImpl(this.value, +class _$AssistantObjectResponseFormatResponseFormatImpl + extends AssistantObjectResponseFormatResponseFormat { + const _$AssistantObjectResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'assistantsResponseFormat', + : $type = $type ?? 'responseFormat', super._(); - factory _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$AssistantObjectResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$AssistantObjectResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'AssistantObjectResponseFormat.assistantsResponseFormat(value: $value)'; + return 'AssistantObjectResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$AssistantObjectResponseFormatAssistantsResponseFormatImpl && + other is _$AssistantObjectResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -24288,41 +24464,40 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatResponseFormatImpl> get copyWith => - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl>( + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl< + _$AssistantObjectResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) { - return assistantsResponseFormat(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return assistantsResponseFormat?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (assistantsResponseFormat != null) { - return assistantsResponseFormat(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -24331,69 +24506,64 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) { - return assistantsResponseFormat(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return assistantsResponseFormat?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (assistantsResponseFormat != null) { - return assistantsResponseFormat(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( + return _$$AssistantObjectResponseFormatResponseFormatImplToJson( this, ); } } -abstract class AssistantObjectResponseFormatAssistantsResponseFormat +abstract class AssistantObjectResponseFormatResponseFormat extends AssistantObjectResponseFormat { - const factory AssistantObjectResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl; - const AssistantObjectResponseFormatAssistantsResponseFormat._() : super._(); + const factory AssistantObjectResponseFormatResponseFormat( + final ResponseFormat value) = + _$AssistantObjectResponseFormatResponseFormatImpl; + const AssistantObjectResponseFormatResponseFormat._() : super._(); - factory AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( + factory AssistantObjectResponseFormatResponseFormat.fromJson( Map json) = - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson; + _$AssistantObjectResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of AssistantObjectResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24447,9 +24617,10 @@ mixin _$CreateAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -24813,9 +24984,10 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -24966,9 +25138,10 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -25409,9 +25582,8 @@ CreateAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateAssistantRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return CreateAssistantRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -25428,19 +25600,19 @@ mixin _$CreateAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -25450,26 +25622,24 @@ mixin _$CreateAssistantRequestResponseFormat { CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -25594,7 +25764,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -25603,7 +25773,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -25612,7 +25782,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -25628,8 +25798,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -25639,9 +25809,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -25651,9 +25820,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -25693,33 +25861,32 @@ abstract class CreateAssistantRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + factory _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith( + _$CreateAssistantRequestResponseFormatResponseFormatImpl value, $Res Function( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _$CreateAssistantRequestResponseFormatResponseFormatImpl) then) = - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateAssistantRequestResponseFormatResponseFormatImpl> implements - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateAssistantRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateAssistantRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); @@ -25730,12 +25897,11 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi $Res call({ Object? value = null, }) { - return _then( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateAssistantRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -25743,8 +25909,8 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -25752,36 +25918,33 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// @nodoc @JsonSerializable() -class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - extends CreateAssistantRequestResponseFormatAssistantsResponseFormat { - const _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$CreateAssistantRequestResponseFormatResponseFormatImpl + extends CreateAssistantRequestResponseFormatResponseFormat { + const _$CreateAssistantRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateAssistantRequestResponseFormat.format(value: $value)'; + return 'CreateAssistantRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl && + other is _$CreateAssistantRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -25794,40 +25957,40 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateAssistantRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -25839,10 +26002,10 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -25850,11 +26013,10 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -25862,46 +26024,43 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat +abstract class CreateAssistantRequestResponseFormatResponseFormat extends CreateAssistantRequestResponseFormat { - const factory CreateAssistantRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl; - const CreateAssistantRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory CreateAssistantRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateAssistantRequestResponseFormatResponseFormatImpl; + const CreateAssistantRequestResponseFormatResponseFormat._() : super._(); - factory CreateAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateAssistantRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of CreateAssistantRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25959,9 +26118,10 @@ mixin _$ModifyAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -26338,9 +26498,10 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -26499,9 +26660,10 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -26533,9 +26695,8 @@ ModifyAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return ModifyAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return ModifyAssistantRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return ModifyAssistantRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -26552,19 +26713,19 @@ mixin _$ModifyAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -26574,26 +26735,24 @@ mixin _$ModifyAssistantRequestResponseFormat { ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -26718,7 +26877,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -26727,7 +26886,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -26736,7 +26895,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -26752,8 +26911,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -26763,9 +26922,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -26775,9 +26933,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -26817,33 +26974,32 @@ abstract class ModifyAssistantRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + factory _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl value, $Res Function( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _$ModifyAssistantRequestResponseFormatResponseFormatImpl) then) = - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> implements - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$ModifyAssistantRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); @@ -26854,12 +27010,11 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi $Res call({ Object? value = null, }) { - return _then( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$ModifyAssistantRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -26867,8 +27022,8 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -26876,36 +27031,33 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// @nodoc @JsonSerializable() -class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - extends ModifyAssistantRequestResponseFormatAssistantsResponseFormat { - const _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$ModifyAssistantRequestResponseFormatResponseFormatImpl + extends ModifyAssistantRequestResponseFormatResponseFormat { + const _$ModifyAssistantRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'ModifyAssistantRequestResponseFormat.format(value: $value)'; + return 'ModifyAssistantRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl && + other is _$ModifyAssistantRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -26918,40 +27070,40 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> get copyWith => - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -26963,10 +27115,10 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -26974,11 +27126,10 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -26986,46 +27137,43 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat +abstract class ModifyAssistantRequestResponseFormatResponseFormat extends ModifyAssistantRequestResponseFormat { - const factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl; - const ModifyAssistantRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory ModifyAssistantRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$ModifyAssistantRequestResponseFormatResponseFormatImpl; + const ModifyAssistantRequestResponseFormatResponseFormat._() : super._(); - factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + factory ModifyAssistantRequestResponseFormatResponseFormat.fromJson( Map json) = - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of ModifyAssistantRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27908,170 +28056,6 @@ abstract class _AssistantsFunctionCallOption get copyWith => throw _privateConstructorUsedError; } -AssistantsResponseFormat _$AssistantsResponseFormatFromJson( - Map json) { - return _AssistantsResponseFormat.fromJson(json); -} - -/// @nodoc -mixin _$AssistantsResponseFormat { - /// Must be one of `text` or `json_object`. - AssistantsResponseFormatType get type => throw _privateConstructorUsedError; - - /// Serializes this AssistantsResponseFormat to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $AssistantsResponseFormatCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $AssistantsResponseFormatCopyWith<$Res> { - factory $AssistantsResponseFormatCopyWith(AssistantsResponseFormat value, - $Res Function(AssistantsResponseFormat) then) = - _$AssistantsResponseFormatCopyWithImpl<$Res, AssistantsResponseFormat>; - @useResult - $Res call({AssistantsResponseFormatType type}); -} - -/// @nodoc -class _$AssistantsResponseFormatCopyWithImpl<$Res, - $Val extends AssistantsResponseFormat> - implements $AssistantsResponseFormatCopyWith<$Res> { - _$AssistantsResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormatType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$AssistantsResponseFormatImplCopyWith<$Res> - implements $AssistantsResponseFormatCopyWith<$Res> { - factory _$$AssistantsResponseFormatImplCopyWith( - _$AssistantsResponseFormatImpl value, - $Res Function(_$AssistantsResponseFormatImpl) then) = - __$$AssistantsResponseFormatImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({AssistantsResponseFormatType type}); -} - -/// @nodoc -class __$$AssistantsResponseFormatImplCopyWithImpl<$Res> - extends _$AssistantsResponseFormatCopyWithImpl<$Res, - _$AssistantsResponseFormatImpl> - implements _$$AssistantsResponseFormatImplCopyWith<$Res> { - __$$AssistantsResponseFormatImplCopyWithImpl( - _$AssistantsResponseFormatImpl _value, - $Res Function(_$AssistantsResponseFormatImpl) _then) - : super(_value, _then); - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$AssistantsResponseFormatImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormatType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$AssistantsResponseFormatImpl extends _AssistantsResponseFormat { - const _$AssistantsResponseFormatImpl( - {this.type = AssistantsResponseFormatType.text}) - : super._(); - - factory _$AssistantsResponseFormatImpl.fromJson(Map json) => - _$$AssistantsResponseFormatImplFromJson(json); - - /// Must be one of `text` or `json_object`. - @override - @JsonKey() - final AssistantsResponseFormatType type; - - @override - String toString() { - return 'AssistantsResponseFormat(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$AssistantsResponseFormatImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, type); - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> - get copyWith => __$$AssistantsResponseFormatImplCopyWithImpl< - _$AssistantsResponseFormatImpl>(this, _$identity); - - @override - Map toJson() { - return _$$AssistantsResponseFormatImplToJson( - this, - ); - } -} - -abstract class _AssistantsResponseFormat extends AssistantsResponseFormat { - const factory _AssistantsResponseFormat( - {final AssistantsResponseFormatType type}) = - _$AssistantsResponseFormatImpl; - const _AssistantsResponseFormat._() : super._(); - - factory _AssistantsResponseFormat.fromJson(Map json) = - _$AssistantsResponseFormatImpl.fromJson; - - /// Must be one of `text` or `json_object`. - @override - AssistantsResponseFormatType get type; - - /// Create a copy of AssistantsResponseFormat - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> - get copyWith => throw _privateConstructorUsedError; -} - TruncationObject _$TruncationObjectFromJson(Map json) { return _TruncationObject.fromJson(json); } @@ -28374,9 +28358,10 @@ mixin _$RunObject { @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -29109,9 +29094,10 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'parallel_tool_calls') final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -29410,9 +29396,10 @@ abstract class _RunObject extends RunObject { @JsonKey(name: 'parallel_tool_calls') bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -30466,8 +30453,8 @@ RunObjectResponseFormat _$RunObjectResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return RunObjectResponseFormatEnumeration.fromJson(json); - case 'format': - return RunObjectResponseFormatAssistantsResponseFormat.fromJson(json); + case 'responseFormat': + return RunObjectResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -30484,42 +30471,41 @@ mixin _$RunObjectResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -30636,7 +30622,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -30645,7 +30631,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -30654,7 +30640,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -30667,9 +30653,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -30678,8 +30663,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -30688,8 +30673,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -30729,29 +30714,25 @@ abstract class RunObjectResponseFormatEnumeration } /// @nodoc -abstract class _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - factory _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith( - _$RunObjectResponseFormatAssistantsResponseFormatImpl value, - $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) - then) = - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res>; +abstract class _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { + factory _$$RunObjectResponseFormatResponseFormatImplCopyWith( + _$RunObjectResponseFormatResponseFormatImpl value, + $Res Function(_$RunObjectResponseFormatResponseFormatImpl) then) = + __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> +class __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$RunObjectResponseFormatCopyWithImpl<$Res, - _$RunObjectResponseFormatAssistantsResponseFormatImpl> - implements - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith<$Res> { - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$RunObjectResponseFormatAssistantsResponseFormatImpl _value, - $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) - _then) + _$RunObjectResponseFormatResponseFormatImpl> + implements _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { + __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl( + _$RunObjectResponseFormatResponseFormatImpl _value, + $Res Function(_$RunObjectResponseFormatResponseFormatImpl) _then) : super(_value, _then); /// Create a copy of RunObjectResponseFormat @@ -30761,11 +30742,11 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> $Res call({ Object? value = null, }) { - return _then(_$RunObjectResponseFormatAssistantsResponseFormatImpl( + return _then(_$RunObjectResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -30773,8 +30754,8 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -30782,33 +30763,33 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunObjectResponseFormatAssistantsResponseFormatImpl - extends RunObjectResponseFormatAssistantsResponseFormat { - const _$RunObjectResponseFormatAssistantsResponseFormatImpl(this.value, +class _$RunObjectResponseFormatResponseFormatImpl + extends RunObjectResponseFormatResponseFormat { + const _$RunObjectResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$RunObjectResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson(json); + _$$RunObjectResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'RunObjectResponseFormat.format(value: $value)'; + return 'RunObjectResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunObjectResponseFormatAssistantsResponseFormatImpl && + other is _$RunObjectResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -30821,40 +30802,38 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$RunObjectResponseFormatAssistantsResponseFormatImpl> - get copyWith => - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$RunObjectResponseFormatAssistantsResponseFormatImpl>( - this, _$identity); + _$$RunObjectResponseFormatResponseFormatImplCopyWith< + _$RunObjectResponseFormatResponseFormatImpl> + get copyWith => __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl< + _$RunObjectResponseFormatResponseFormatImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -30863,64 +30842,62 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( + return _$$RunObjectResponseFormatResponseFormatImplToJson( this, ); } } -abstract class RunObjectResponseFormatAssistantsResponseFormat +abstract class RunObjectResponseFormatResponseFormat extends RunObjectResponseFormat { - const factory RunObjectResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$RunObjectResponseFormatAssistantsResponseFormatImpl; - const RunObjectResponseFormatAssistantsResponseFormat._() : super._(); + const factory RunObjectResponseFormatResponseFormat( + final ResponseFormat value) = _$RunObjectResponseFormatResponseFormatImpl; + const RunObjectResponseFormatResponseFormat._() : super._(); - factory RunObjectResponseFormatAssistantsResponseFormat.fromJson( + factory RunObjectResponseFormatResponseFormat.fromJson( Map json) = - _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson; + _$RunObjectResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of RunObjectResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$RunObjectResponseFormatAssistantsResponseFormatImpl> + _$$RunObjectResponseFormatResponseFormatImplCopyWith< + _$RunObjectResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31403,9 +31380,10 @@ mixin _$CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -31940,9 +31918,10 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -32163,9 +32142,10 @@ abstract class _CreateRunRequest extends CreateRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -33097,9 +33077,8 @@ CreateRunRequestResponseFormat _$CreateRunRequestResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return CreateRunRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( - json); + case 'responseFormat': + return CreateRunRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -33116,19 +33095,19 @@ mixin _$CreateRunRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -33137,24 +33116,22 @@ mixin _$CreateRunRequestResponseFormat { required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -33275,7 +33252,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -33284,7 +33261,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -33293,7 +33270,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -33308,8 +33285,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -33318,9 +33295,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -33329,9 +33305,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -33371,34 +33346,28 @@ abstract class CreateRunRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl value, - $Res Function( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) + factory _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith( + _$CreateRunRequestResponseFormatResponseFormatImpl value, + $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) then) = - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res>; + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res> +class __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$CreateRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateRunRequestResponseFormatResponseFormatImpl> implements - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) - _then) + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith<$Res> { + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateRunRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); /// Create a copy of CreateRunRequestResponseFormat @@ -33408,11 +33377,11 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl $Res call({ Object? value = null, }) { - return _then(_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateRunRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -33420,8 +33389,8 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -33429,35 +33398,33 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl /// @nodoc @JsonSerializable() -class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - extends CreateRunRequestResponseFormatAssistantsResponseFormat { - const _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl(this.value, +class _$CreateRunRequestResponseFormatResponseFormatImpl + extends CreateRunRequestResponseFormatResponseFormat { + const _$CreateRunRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$CreateRunRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateRunRequestResponseFormat.format(value: $value)'; + return 'CreateRunRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl && + other is _$CreateRunRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -33470,40 +33437,40 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateRunRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -33514,65 +33481,63 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateRunRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateRunRequestResponseFormatAssistantsResponseFormat +abstract class CreateRunRequestResponseFormatResponseFormat extends CreateRunRequestResponseFormat { - const factory CreateRunRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl; - const CreateRunRequestResponseFormatAssistantsResponseFormat._() : super._(); + const factory CreateRunRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateRunRequestResponseFormatResponseFormatImpl; + const CreateRunRequestResponseFormatResponseFormat._() : super._(); - factory CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateRunRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson; + _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of CreateRunRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34926,9 +34891,10 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -35484,9 +35450,10 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) final bool? parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -35705,9 +35672,10 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - /// since `gpt-4o-mini-1106`. + /// since `gpt-3.5-turbo-1106`. /// /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees /// the model will match your supplied JSON schema. Learn more in the @@ -36662,9 +36630,9 @@ CreateThreadAndRunRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateThreadAndRunRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( + json); default: throw CheckedFromJsonException( @@ -36682,19 +36650,19 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -36704,9 +36672,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -36714,9 +36681,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -36724,9 +36690,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -36857,7 +36822,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -36866,7 +36831,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -36875,7 +36840,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -36891,9 +36856,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -36904,9 +36868,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -36917,9 +36880,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -36959,35 +36921,32 @@ abstract class CreateThreadAndRunRequestResponseFormatEnumeration } /// @nodoc -abstract class _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - value, + factory _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl value, $Res Function( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) then) = - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> implements - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - _value, - $Res Function( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); @@ -36998,12 +36957,11 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop $Res call({ Object? value = null, }) { - return _then( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } @@ -37011,8 +36969,8 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -37020,28 +36978,27 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop /// @nodoc @JsonSerializable() -class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - extends CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat { - const _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl + extends CreateThreadAndRunRequestResponseFormatResponseFormat { + const _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplFromJson( + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplFromJson( json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateThreadAndRunRequestResponseFormat.format(value: $value)'; + return 'CreateThreadAndRunRequestResponseFormat.responseFormat(value: $value)'; } @override @@ -37049,7 +37006,7 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl return identical(this, other) || (other.runtimeType == runtimeType && other - is _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl && + is _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } @@ -37062,11 +37019,11 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @@ -37074,29 +37031,29 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -37108,11 +37065,10 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -37121,11 +37077,10 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -37134,46 +37089,43 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat +abstract class CreateThreadAndRunRequestResponseFormatResponseFormat extends CreateThreadAndRunRequestResponseFormat { - const factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl; - const CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory CreateThreadAndRunRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl; + const CreateThreadAndRunRequestResponseFormatResponseFormat._() : super._(); - factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; + ResponseFormat get value; /// Create a copy of CreateThreadAndRunRequestResponseFormat /// with the given fields replaced by the non-null parameter values. @JsonKey(includeFromJson: false, includeToJson: false) - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -43224,263 +43176,6 @@ abstract class _MessageContentTextAnnotationsFileCitation get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContentImageUrlObject _$MessageDeltaContentImageUrlObjectFromJson( - Map json) { - return _MessageDeltaContentImageUrlObject.fromJson(json); -} - -/// @nodoc -mixin _$MessageDeltaContentImageUrlObject { - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) - int? get index => throw _privateConstructorUsedError; - - /// Always `image_url`. - @JsonKey(includeIfNull: false) - String? get type => throw _privateConstructorUsedError; - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl => throw _privateConstructorUsedError; - - /// Serializes this MessageDeltaContentImageUrlObject to a JSON map. - Map toJson() => throw _privateConstructorUsedError; - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - $MessageDeltaContentImageUrlObjectCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - factory $MessageDeltaContentImageUrlObjectCopyWith( - MessageDeltaContentImageUrlObject value, - $Res Function(MessageDeltaContentImageUrlObject) then) = - _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - MessageDeltaContentImageUrlObject>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) int? index, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); - - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; -} - -/// @nodoc -class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - $Val extends MessageDeltaContentImageUrlObject> - implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - _$MessageDeltaContentImageUrlObjectCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = freezed, - Object? type = freezed, - Object? imageUrl = freezed, - }) { - return _then(_value.copyWith( - index: freezed == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, - ) as $Val); - } - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @override - @pragma('vm:prefer-inline') - $MessageContentImageUrlCopyWith<$Res>? get imageUrl { - if (_value.imageUrl == null) { - return null; - } - - return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { - return _then(_value.copyWith(imageUrl: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> - implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( - _$MessageDeltaContentImageUrlObjectImpl value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) int? index, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); - - @override - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; -} - -/// @nodoc -class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - _$MessageDeltaContentImageUrlObjectImpl> - implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( - _$MessageDeltaContentImageUrlObjectImpl _value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) - : super(_value, _then); - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = freezed, - Object? type = freezed, - Object? imageUrl = freezed, - }) { - return _then(_$MessageDeltaContentImageUrlObjectImpl( - index: freezed == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageDeltaContentImageUrlObjectImpl - extends _MessageDeltaContentImageUrlObject { - const _$MessageDeltaContentImageUrlObjectImpl( - {@JsonKey(includeIfNull: false) this.index, - @JsonKey(includeIfNull: false) this.type, - @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) - : super._(); - - factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( - Map json) => - _$$MessageDeltaContentImageUrlObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - @JsonKey(includeIfNull: false) - final int? index; - - /// Always `image_url`. - @override - @JsonKey(includeIfNull: false) - final String? type; - - /// The image URL part of a message. - @override - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl; - - @override - String toString() { - return 'MessageDeltaContentImageUrlObject(index: $index, type: $type, imageUrl: $imageUrl)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageDeltaContentImageUrlObjectImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type) && - (identical(other.imageUrl, imageUrl) || - other.imageUrl == imageUrl)); - } - - @JsonKey(includeFromJson: false, includeToJson: false) - @override - int get hashCode => Object.hash(runtimeType, index, type, imageUrl); - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @JsonKey(includeFromJson: false, includeToJson: false) - @override - @pragma('vm:prefer-inline') - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< - _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); - - @override - Map toJson() { - return _$$MessageDeltaContentImageUrlObjectImplToJson( - this, - ); - } -} - -abstract class _MessageDeltaContentImageUrlObject - extends MessageDeltaContentImageUrlObject { - const factory _MessageDeltaContentImageUrlObject( - {@JsonKey(includeIfNull: false) final int? index, - @JsonKey(includeIfNull: false) final String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl}) = - _$MessageDeltaContentImageUrlObjectImpl; - const _MessageDeltaContentImageUrlObject._() : super._(); - - factory _MessageDeltaContentImageUrlObject.fromJson( - Map json) = - _$MessageDeltaContentImageUrlObjectImpl.fromJson; - - /// The index of the content part in the message. - @override - @JsonKey(includeIfNull: false) - int? get index; - - /// Always `image_url`. - @override - @JsonKey(includeIfNull: false) - String? get type; - - /// The image URL part of a message. - @override - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl; - - /// Create a copy of MessageDeltaContentImageUrlObject - /// with the given fields replaced by the non-null parameter values. - @override - @JsonKey(includeFromJson: false, includeToJson: false) - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - MessageDeltaContentText _$MessageDeltaContentTextFromJson( Map json) { return _MessageDeltaContentText.fromJson(json); @@ -53634,6 +53329,7 @@ mixin _$ChatCompletionMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53662,6 +53358,7 @@ mixin _$ChatCompletionMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53690,6 +53387,7 @@ mixin _$ChatCompletionMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53901,6 +53599,7 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53932,6 +53631,7 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -53963,6 +53663,7 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54197,6 +53898,7 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54228,6 +53930,7 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54259,6 +53962,7 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54372,6 +54076,7 @@ abstract class _$$ChatCompletionAssistantMessageImplCopyWith<$Res> $Res call( {ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54398,6 +54103,7 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> $Res call({ Object? role = null, Object? content = freezed, + Object? refusal = freezed, Object? name = freezed, Object? toolCalls = freezed, Object? functionCall = freezed, @@ -54411,6 +54117,10 @@ class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, name: freezed == name ? _value.name : name // ignore: cast_nullable_to_non_nullable @@ -54449,6 +54159,7 @@ class _$ChatCompletionAssistantMessageImpl const _$ChatCompletionAssistantMessageImpl( {this.role = ChatCompletionMessageRole.assistant, @JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.refusal, @JsonKey(includeIfNull: false) this.name, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -54470,6 +54181,11 @@ class _$ChatCompletionAssistantMessageImpl @JsonKey(includeIfNull: false) final String? content; + /// The refusal message by the assistant. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @override @JsonKey(includeIfNull: false) @@ -54496,7 +54212,7 @@ class _$ChatCompletionAssistantMessageImpl @override String toString() { - return 'ChatCompletionMessage.assistant(role: $role, content: $content, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; + return 'ChatCompletionMessage.assistant(role: $role, content: $content, refusal: $refusal, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; } @override @@ -54506,6 +54222,7 @@ class _$ChatCompletionAssistantMessageImpl other is _$ChatCompletionAssistantMessageImpl && (identical(other.role, role) || other.role == role) && (identical(other.content, content) || other.content == content) && + (identical(other.refusal, refusal) || other.refusal == refusal) && (identical(other.name, name) || other.name == name) && const DeepCollectionEquality() .equals(other._toolCalls, _toolCalls) && @@ -54515,7 +54232,7 @@ class _$ChatCompletionAssistantMessageImpl @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, name, + int get hashCode => Object.hash(runtimeType, role, content, refusal, name, const DeepCollectionEquality().hash(_toolCalls), functionCall); /// Create a copy of ChatCompletionMessage @@ -54543,6 +54260,7 @@ class _$ChatCompletionAssistantMessageImpl required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54556,7 +54274,7 @@ class _$ChatCompletionAssistantMessageImpl ChatCompletionMessageRole role, String? content, String name) function, }) { - return assistant(role, content, name, toolCalls, functionCall); + return assistant(role, content, refusal, name, toolCalls, functionCall); } @override @@ -54574,6 +54292,7 @@ class _$ChatCompletionAssistantMessageImpl TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54587,7 +54306,8 @@ class _$ChatCompletionAssistantMessageImpl ChatCompletionMessageRole role, String? content, String name)? function, }) { - return assistant?.call(role, content, name, toolCalls, functionCall); + return assistant?.call( + role, content, refusal, name, toolCalls, functionCall); } @override @@ -54605,6 +54325,7 @@ class _$ChatCompletionAssistantMessageImpl TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54620,7 +54341,7 @@ class _$ChatCompletionAssistantMessageImpl required TResult orElse(), }) { if (assistant != null) { - return assistant(role, content, name, toolCalls, functionCall); + return assistant(role, content, refusal, name, toolCalls, functionCall); } return orElse(); } @@ -54677,6 +54398,7 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { const factory ChatCompletionAssistantMessage( {final ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? refusal, @JsonKey(includeIfNull: false) final String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -54697,6 +54419,10 @@ abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { @JsonKey(includeIfNull: false) String? get content; + /// The refusal message by the assistant. + @JsonKey(includeIfNull: false) + String? get refusal; + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? get name; @@ -54839,6 +54565,7 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54870,6 +54597,7 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -54901,6 +54629,7 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55119,6 +54848,7 @@ class _$ChatCompletionFunctionMessageImpl required TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55150,6 +54880,7 @@ class _$ChatCompletionFunctionMessageImpl TResult? Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55181,6 +54912,7 @@ class _$ChatCompletionFunctionMessageImpl TResult Function( ChatCompletionMessageRole role, @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(includeIfNull: false) String? name, @JsonKey(name: 'tool_calls', includeIfNull: false) List? toolCalls, @@ -55733,8 +55465,10 @@ ChatCompletionMessageContentPart _$ChatCompletionMessageContentPartFromJson( switch (json['type']) { case 'text': return ChatCompletionMessageContentPartText.fromJson(json); - case 'image_url': + case 'image': return ChatCompletionMessageContentPartImage.fromJson(json); + case 'refusal': + return ChatCompletionMessageContentPartRefusal.fromJson(json); default: throw CheckedFromJsonException( @@ -55758,6 +55492,9 @@ mixin _$ChatCompletionMessageContentPart { required TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -55767,6 +55504,9 @@ mixin _$ChatCompletionMessageContentPart { TResult? Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -55776,6 +55516,8 @@ mixin _$ChatCompletionMessageContentPart { TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -55784,18 +55526,22 @@ mixin _$ChatCompletionMessageContentPart { required TResult Function(ChatCompletionMessageContentPartText value) text, required TResult Function(ChatCompletionMessageContentPartImage value) image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(ChatCompletionMessageContentPartText value)? text, TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(ChatCompletionMessageContentPartText value)? text, TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -55950,6 +55696,9 @@ class _$ChatCompletionMessageContentPartTextImpl required TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, }) { return text(type, this.text); } @@ -55962,6 +55711,9 @@ class _$ChatCompletionMessageContentPartTextImpl TResult? Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, }) { return text?.call(type, this.text); } @@ -55974,6 +55726,8 @@ class _$ChatCompletionMessageContentPartTextImpl TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, required TResult orElse(), }) { if (text != null) { @@ -55988,6 +55742,8 @@ class _$ChatCompletionMessageContentPartTextImpl required TResult Function(ChatCompletionMessageContentPartText value) text, required TResult Function(ChatCompletionMessageContentPartImage value) image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, }) { return text(this); } @@ -55997,6 +55753,7 @@ class _$ChatCompletionMessageContentPartTextImpl TResult? mapOrNull({ TResult? Function(ChatCompletionMessageContentPartText value)? text, TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, }) { return text?.call(this); } @@ -56006,6 +55763,7 @@ class _$ChatCompletionMessageContentPartTextImpl TResult maybeMap({ TResult Function(ChatCompletionMessageContentPartText value)? text, TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, required TResult orElse(), }) { if (text != null) { @@ -56168,6 +55926,9 @@ class _$ChatCompletionMessageContentPartImageImpl required TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, }) { return image(type, imageUrl); } @@ -56180,6 +55941,9 @@ class _$ChatCompletionMessageContentPartImageImpl TResult? Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, }) { return image?.call(type, imageUrl); } @@ -56192,6 +55956,8 @@ class _$ChatCompletionMessageContentPartImageImpl TResult Function(ChatCompletionMessageContentPartType type, @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, required TResult orElse(), }) { if (image != null) { @@ -56206,6 +55972,8 @@ class _$ChatCompletionMessageContentPartImageImpl required TResult Function(ChatCompletionMessageContentPartText value) text, required TResult Function(ChatCompletionMessageContentPartImage value) image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, }) { return image(this); } @@ -56215,6 +55983,7 @@ class _$ChatCompletionMessageContentPartImageImpl TResult? mapOrNull({ TResult? Function(ChatCompletionMessageContentPartText value)? text, TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, }) { return image?.call(this); } @@ -56224,6 +55993,7 @@ class _$ChatCompletionMessageContentPartImageImpl TResult maybeMap({ TResult Function(ChatCompletionMessageContentPartText value)? text, TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, required TResult orElse(), }) { if (image != null) { @@ -56270,6 +56040,221 @@ abstract class ChatCompletionMessageContentPartImage get copyWith => throw _privateConstructorUsedError; } +/// @nodoc +abstract class _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartRefusalImplCopyWith( + _$ChatCompletionMessageContentPartRefusalImpl value, + $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) then) = + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ChatCompletionMessageContentPartType type, String refusal}); +} + +/// @nodoc +class __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartRefusalImpl> + implements _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl( + _$ChatCompletionMessageContentPartRefusalImpl _value, + $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? refusal = null, + }) { + return _then(_$ChatCompletionMessageContentPartRefusalImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + refusal: null == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageContentPartRefusalImpl + extends ChatCompletionMessageContentPartRefusal { + const _$ChatCompletionMessageContentPartRefusalImpl( + {this.type = ChatCompletionMessageContentPartType.refusal, + required this.refusal}) + : super._(); + + factory _$ChatCompletionMessageContentPartRefusalImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartRefusalImplFromJson(json); + + /// The type of the content part, in this case `refusal`. + @override + @JsonKey() + final ChatCompletionMessageContentPartType type; + + /// The refusal message generated by the model. + @override + final String refusal; + + @override + String toString() { + return 'ChatCompletionMessageContentPart.refusal(type: $type, refusal: $refusal)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionMessageContentPartRefusalImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, refusal); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionMessageContentPartRefusalImplCopyWith< + _$ChatCompletionMessageContentPartRefusalImpl> + get copyWith => + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl< + _$ChatCompletionMessageContentPartRefusalImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, + }) { + return refusal(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, + }) { + return refusal?.call(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionMessageContentPartRefusalImplToJson( + this, + ); + } +} + +abstract class ChatCompletionMessageContentPartRefusal + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartRefusal( + {final ChatCompletionMessageContentPartType type, + required final String refusal}) = + _$ChatCompletionMessageContentPartRefusalImpl; + const ChatCompletionMessageContentPartRefusal._() : super._(); + + factory ChatCompletionMessageContentPartRefusal.fromJson( + Map json) = + _$ChatCompletionMessageContentPartRefusalImpl.fromJson; + + /// The type of the content part, in this case `refusal`. + @override + ChatCompletionMessageContentPartType get type; + + /// The refusal message generated by the model. + String get refusal; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageContentPartRefusalImplCopyWith< + _$ChatCompletionMessageContentPartRefusalImpl> + get copyWith => throw _privateConstructorUsedError; +} + ChatCompletionMessageImageUrl _$ChatCompletionMessageImageUrlFromJson( Map json) { return _ChatCompletionMessageImageUrl.fromJson(json); @@ -56465,6 +56450,692 @@ abstract class _ChatCompletionMessageImageUrl get copyWith => throw _privateConstructorUsedError; } +ResponseFormat _$ResponseFormatFromJson(Map json) { + switch (json['type']) { + case 'text': + return ResponseFormatText.fromJson(json); + case 'json_object': + return ResponseFormatJsonObject.fromJson(json); + case 'json_schema': + return ResponseFormatJsonSchema.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'ResponseFormat', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ResponseFormat { + /// The type of response format being defined. + ResponseFormatType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this ResponseFormat to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ResponseFormatCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ResponseFormatCopyWith<$Res> { + factory $ResponseFormatCopyWith( + ResponseFormat value, $Res Function(ResponseFormat) then) = + _$ResponseFormatCopyWithImpl<$Res, ResponseFormat>; + @useResult + $Res call({ResponseFormatType type}); +} + +/// @nodoc +class _$ResponseFormatCopyWithImpl<$Res, $Val extends ResponseFormat> + implements $ResponseFormatCopyWith<$Res> { + _$ResponseFormatCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ResponseFormatTextImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatTextImplCopyWith(_$ResponseFormatTextImpl value, + $Res Function(_$ResponseFormatTextImpl) then) = + __$$ResponseFormatTextImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ResponseFormatType type}); +} + +/// @nodoc +class __$$ResponseFormatTextImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatTextImpl> + implements _$$ResponseFormatTextImplCopyWith<$Res> { + __$$ResponseFormatTextImplCopyWithImpl(_$ResponseFormatTextImpl _value, + $Res Function(_$ResponseFormatTextImpl) _then) + : super(_value, _then); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$ResponseFormatTextImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ResponseFormatTextImpl extends ResponseFormatText { + const _$ResponseFormatTextImpl({this.type = ResponseFormatType.text}) + : super._(); + + factory _$ResponseFormatTextImpl.fromJson(Map json) => + _$$ResponseFormatTextImplFromJson(json); + + /// The type of response format being defined. + @override + @JsonKey() + final ResponseFormatType type; + + @override + String toString() { + return 'ResponseFormat.text(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ResponseFormatTextImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => + __$$ResponseFormatTextImplCopyWithImpl<_$ResponseFormatTextImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return text(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return text?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) { + if (text != null) { + return text(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatTextImplToJson( + this, + ); + } +} + +abstract class ResponseFormatText extends ResponseFormat { + const factory ResponseFormatText({final ResponseFormatType type}) = + _$ResponseFormatTextImpl; + const ResponseFormatText._() : super._(); + + factory ResponseFormatText.fromJson(Map json) = + _$ResponseFormatTextImpl.fromJson; + + /// The type of response format being defined. + @override + ResponseFormatType get type; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ResponseFormatJsonObjectImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatJsonObjectImplCopyWith( + _$ResponseFormatJsonObjectImpl value, + $Res Function(_$ResponseFormatJsonObjectImpl) then) = + __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ResponseFormatType type}); +} + +/// @nodoc +class __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonObjectImpl> + implements _$$ResponseFormatJsonObjectImplCopyWith<$Res> { + __$$ResponseFormatJsonObjectImplCopyWithImpl( + _$ResponseFormatJsonObjectImpl _value, + $Res Function(_$ResponseFormatJsonObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$ResponseFormatJsonObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ResponseFormatJsonObjectImpl extends ResponseFormatJsonObject { + const _$ResponseFormatJsonObjectImpl( + {this.type = ResponseFormatType.jsonObject}) + : super._(); + + factory _$ResponseFormatJsonObjectImpl.fromJson(Map json) => + _$$ResponseFormatJsonObjectImplFromJson(json); + + /// The type of response format being defined. + @override + @JsonKey() + final ResponseFormatType type; + + @override + String toString() { + return 'ResponseFormat.jsonObject(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ResponseFormatJsonObjectImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> + get copyWith => __$$ResponseFormatJsonObjectImplCopyWithImpl< + _$ResponseFormatJsonObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return jsonObject(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return jsonObject?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) { + if (jsonObject != null) { + return jsonObject(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return jsonObject(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return jsonObject?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) { + if (jsonObject != null) { + return jsonObject(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatJsonObjectImplToJson( + this, + ); + } +} + +abstract class ResponseFormatJsonObject extends ResponseFormat { + const factory ResponseFormatJsonObject({final ResponseFormatType type}) = + _$ResponseFormatJsonObjectImpl; + const ResponseFormatJsonObject._() : super._(); + + factory ResponseFormatJsonObject.fromJson(Map json) = + _$ResponseFormatJsonObjectImpl.fromJson; + + /// The type of response format being defined. + @override + ResponseFormatType get type; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ResponseFormatJsonSchemaImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatJsonSchemaImplCopyWith( + _$ResponseFormatJsonSchemaImpl value, + $Res Function(_$ResponseFormatJsonSchemaImpl) then) = + __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema}); + + $JsonSchemaObjectCopyWith<$Res> get jsonSchema; +} + +/// @nodoc +class __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonSchemaImpl> + implements _$$ResponseFormatJsonSchemaImplCopyWith<$Res> { + __$$ResponseFormatJsonSchemaImplCopyWithImpl( + _$ResponseFormatJsonSchemaImpl _value, + $Res Function(_$ResponseFormatJsonSchemaImpl) _then) + : super(_value, _then); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? jsonSchema = null, + }) { + return _then(_$ResponseFormatJsonSchemaImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + jsonSchema: null == jsonSchema + ? _value.jsonSchema + : jsonSchema // ignore: cast_nullable_to_non_nullable + as JsonSchemaObject, + )); + } + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $JsonSchemaObjectCopyWith<$Res> get jsonSchema { + return $JsonSchemaObjectCopyWith<$Res>(_value.jsonSchema, (value) { + return _then(_value.copyWith(jsonSchema: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ResponseFormatJsonSchemaImpl extends ResponseFormatJsonSchema { + const _$ResponseFormatJsonSchemaImpl( + {this.type = ResponseFormatType.jsonSchema, + @JsonKey(name: 'json_schema') required this.jsonSchema}) + : super._(); + + factory _$ResponseFormatJsonSchemaImpl.fromJson(Map json) => + _$$ResponseFormatJsonSchemaImplFromJson(json); + + /// The type of response format being defined. + @override + @JsonKey() + final ResponseFormatType type; + + /// A JSON Schema object. + @override + @JsonKey(name: 'json_schema') + final JsonSchemaObject jsonSchema; + + @override + String toString() { + return 'ResponseFormat.jsonSchema(type: $type, jsonSchema: $jsonSchema)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ResponseFormatJsonSchemaImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.jsonSchema, jsonSchema) || + other.jsonSchema == jsonSchema)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, jsonSchema); + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> + get copyWith => __$$ResponseFormatJsonSchemaImplCopyWithImpl< + _$ResponseFormatJsonSchemaImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return jsonSchema(type, this.jsonSchema); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return jsonSchema?.call(type, this.jsonSchema); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) { + if (jsonSchema != null) { + return jsonSchema(type, this.jsonSchema); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return jsonSchema(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return jsonSchema?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) { + if (jsonSchema != null) { + return jsonSchema(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatJsonSchemaImplToJson( + this, + ); + } +} + +abstract class ResponseFormatJsonSchema extends ResponseFormat { + const factory ResponseFormatJsonSchema( + {final ResponseFormatType type, + @JsonKey(name: 'json_schema') + required final JsonSchemaObject jsonSchema}) = + _$ResponseFormatJsonSchemaImpl; + const ResponseFormatJsonSchema._() : super._(); + + factory ResponseFormatJsonSchema.fromJson(Map json) = + _$ResponseFormatJsonSchemaImpl.fromJson; + + /// The type of response format being defined. + @override + ResponseFormatType get type; + + /// A JSON Schema object. + @JsonKey(name: 'json_schema') + JsonSchemaObject get jsonSchema; + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantTools _$AssistantToolsFromJson(Map json) { switch (json['type']) { case 'code_interpreter': @@ -57229,7 +57900,7 @@ mixin _$AssistantToolsFileSearchFileSearch { /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; @@ -57341,7 +58012,7 @@ class _$AssistantToolsFileSearchFileSearchImpl /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) final int? maxNumResults; @@ -57397,7 +58068,7 @@ abstract class _AssistantToolsFileSearchFileSearch /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; @@ -57419,6 +58090,8 @@ MessageContent _$MessageContentFromJson(Map json) { return MessageContentImageUrlObject.fromJson(json); case 'text': return MessageContentTextObject.fromJson(json); + case 'refusal': + return MessageContentRefusalObject.fromJson(json); default: throw CheckedFromJsonException(json, 'type', 'MessageContent', @@ -57439,6 +58112,7 @@ mixin _$MessageContent { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57450,6 +58124,7 @@ mixin _$MessageContent { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57461,6 +58136,7 @@ mixin _$MessageContent { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -57469,6 +58145,7 @@ mixin _$MessageContent { required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57476,6 +58153,7 @@ mixin _$MessageContent { TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -57483,6 +58161,7 @@ mixin _$MessageContent { TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -57651,6 +58330,7 @@ class _$MessageContentImageFileObjectImpl @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { return imageFile(type, this.imageFile); } @@ -57665,6 +58345,7 @@ class _$MessageContentImageFileObjectImpl @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { return imageFile?.call(type, this.imageFile); } @@ -57679,6 +58360,7 @@ class _$MessageContentImageFileObjectImpl @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { if (imageFile != null) { @@ -57693,6 +58375,7 @@ class _$MessageContentImageFileObjectImpl required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { return imageFile(this); } @@ -57703,6 +58386,7 @@ class _$MessageContentImageFileObjectImpl TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { return imageFile?.call(this); } @@ -57713,6 +58397,7 @@ class _$MessageContentImageFileObjectImpl TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { if (imageFile != null) { @@ -57875,6 +58560,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { return imageUrl(type, this.imageUrl); } @@ -57889,6 +58575,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { return imageUrl?.call(type, this.imageUrl); } @@ -57903,6 +58590,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { if (imageUrl != null) { @@ -57917,6 +58605,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { return imageUrl(this); } @@ -57927,6 +58616,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { return imageUrl?.call(this); } @@ -57937,6 +58627,7 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { if (imageUrl != null) { @@ -58090,6 +58781,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) imageUrl, required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { return text(type, this.text); } @@ -58104,6 +58796,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { return text?.call(type, this.text); } @@ -58118,6 +58811,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? imageUrl, TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { if (text != null) { @@ -58132,6 +58826,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { required TResult Function(MessageContentImageFileObject value) imageFile, required TResult Function(MessageContentImageUrlObject value) imageUrl, required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { return text(this); } @@ -58142,6 +58837,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { TResult? Function(MessageContentImageFileObject value)? imageFile, TResult? Function(MessageContentImageUrlObject value)? imageUrl, TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { return text?.call(this); } @@ -58152,6 +58848,7 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { TResult Function(MessageContentImageFileObject value)? imageFile, TResult Function(MessageContentImageUrlObject value)? imageUrl, TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { if (text != null) { @@ -58192,12 +58889,223 @@ abstract class MessageContentTextObject extends MessageContent { get copyWith => throw _privateConstructorUsedError; } +/// @nodoc +abstract class _$$MessageContentRefusalObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentRefusalObjectImplCopyWith( + _$MessageContentRefusalObjectImpl value, + $Res Function(_$MessageContentRefusalObjectImpl) then) = + __$$MessageContentRefusalObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, String refusal}); +} + +/// @nodoc +class __$$MessageContentRefusalObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentRefusalObjectImpl> + implements _$$MessageContentRefusalObjectImplCopyWith<$Res> { + __$$MessageContentRefusalObjectImplCopyWithImpl( + _$MessageContentRefusalObjectImpl _value, + $Res Function(_$MessageContentRefusalObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? refusal = null, + }) { + return _then(_$MessageContentRefusalObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + refusal: null == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentRefusalObjectImpl extends MessageContentRefusalObject { + const _$MessageContentRefusalObjectImpl( + {required this.type, required this.refusal}) + : super._(); + + factory _$MessageContentRefusalObjectImpl.fromJson( + Map json) => + _$$MessageContentRefusalObjectImplFromJson(json); + + /// Always `refusal`. + @override + final String type; + + /// No Description + @override + final String refusal; + + @override + String toString() { + return 'MessageContent.refusal(type: $type, refusal: $refusal)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentRefusalObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, refusal); + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> + get copyWith => __$$MessageContentRefusalObjectImplCopyWithImpl< + _$MessageContentRefusalObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, + }) { + return refusal(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, + }) { + return refusal?.call(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentRefusalObjectImplToJson( + this, + ); + } +} + +abstract class MessageContentRefusalObject extends MessageContent { + const factory MessageContentRefusalObject( + {required final String type, + required final String refusal}) = _$MessageContentRefusalObjectImpl; + const MessageContentRefusalObject._() : super._(); + + factory MessageContentRefusalObject.fromJson(Map json) = + _$MessageContentRefusalObjectImpl.fromJson; + + /// Always `refusal`. + @override + String get type; + + /// No Description + String get refusal; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageDeltaContent _$MessageDeltaContentFromJson(Map json) { switch (json['type']) { case 'image_file': return MessageDeltaContentImageFileObject.fromJson(json); case 'text': return MessageDeltaContentTextObject.fromJson(json); + case 'refusal': + return MessageDeltaContentRefusalObject.fromJson(json); + case 'image_url': + return MessageDeltaContentImageUrlObject.fromJson(json); default: throw CheckedFromJsonException(json, 'type', 'MessageDeltaContent', @@ -58223,6 +59131,15 @@ mixin _$MessageDeltaContent { required TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text) text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -58236,6 +59153,15 @@ mixin _$MessageDeltaContent { TResult? Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -58249,6 +59175,15 @@ mixin _$MessageDeltaContent { TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -58257,18 +59192,24 @@ mixin _$MessageDeltaContent { required TResult Function(MessageDeltaContentImageFileObject value) imageFile, required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(MessageDeltaContentImageFileObject value)? imageFile, TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -58460,6 +59401,15 @@ class _$MessageDeltaContentImageFileObjectImpl required TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text) text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) { return imageFile(index, type, this.imageFile); } @@ -58476,6 +59426,15 @@ class _$MessageDeltaContentImageFileObjectImpl TResult? Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) { return imageFile?.call(index, type, this.imageFile); } @@ -58492,6 +59451,15 @@ class _$MessageDeltaContentImageFileObjectImpl TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) { if (imageFile != null) { @@ -58506,6 +59474,8 @@ class _$MessageDeltaContentImageFileObjectImpl required TResult Function(MessageDeltaContentImageFileObject value) imageFile, required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { return imageFile(this); } @@ -58515,6 +59485,8 @@ class _$MessageDeltaContentImageFileObjectImpl TResult? mapOrNull({ TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { return imageFile?.call(this); } @@ -58524,6 +59496,8 @@ class _$MessageDeltaContentImageFileObjectImpl TResult maybeMap({ TResult Function(MessageDeltaContentImageFileObject value)? imageFile, TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { if (imageFile != null) { @@ -58709,6 +59683,15 @@ class _$MessageDeltaContentTextObjectImpl required TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text) text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) { return text(index, type, this.text); } @@ -58725,6 +59708,15 @@ class _$MessageDeltaContentTextObjectImpl TResult? Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) { return text?.call(index, type, this.text); } @@ -58741,6 +59733,15 @@ class _$MessageDeltaContentTextObjectImpl TResult Function(int index, String type, @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) { if (text != null) { @@ -58755,6 +59756,8 @@ class _$MessageDeltaContentTextObjectImpl required TResult Function(MessageDeltaContentImageFileObject value) imageFile, required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { return text(this); } @@ -58764,6 +59767,8 @@ class _$MessageDeltaContentTextObjectImpl TResult? mapOrNull({ TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { return text?.call(this); } @@ -58773,6 +59778,8 @@ class _$MessageDeltaContentTextObjectImpl TResult maybeMap({ TResult Function(MessageDeltaContentImageFileObject value)? imageFile, TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { if (text != null) { @@ -58821,6 +59828,552 @@ abstract class MessageDeltaContentTextObject extends MessageDeltaContent { get copyWith => throw _privateConstructorUsedError; } +/// @nodoc +abstract class _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentRefusalObjectImplCopyWith( + _$MessageDeltaContentRefusalObjectImpl value, + $Res Function(_$MessageDeltaContentRefusalObjectImpl) then) = + __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {int index, String type, @JsonKey(includeIfNull: false) String? refusal}); +} + +/// @nodoc +class __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentRefusalObjectImpl> + implements _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> { + __$$MessageDeltaContentRefusalObjectImplCopyWithImpl( + _$MessageDeltaContentRefusalObjectImpl _value, + $Res Function(_$MessageDeltaContentRefusalObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + Object? refusal = freezed, + }) { + return _then(_$MessageDeltaContentRefusalObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaContentRefusalObjectImpl + extends MessageDeltaContentRefusalObject { + const _$MessageDeltaContentRefusalObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.refusal}) + : super._(); + + factory _$MessageDeltaContentRefusalObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentRefusalObjectImplFromJson(json); + + /// The index of the refusal part in the message. + @override + final int index; + + /// Always `refusal`. + @override + final String type; + + /// The refusal content generated by the assistant. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + + @override + String toString() { + return 'MessageDeltaContent.refusal(index: $index, type: $type, refusal: $refusal)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaContentRefusalObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, index, type, refusal); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaContentRefusalObjectImplCopyWith< + _$MessageDeltaContentRefusalObjectImpl> + get copyWith => __$$MessageDeltaContentRefusalObjectImplCopyWithImpl< + _$MessageDeltaContentRefusalObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, + }) { + return refusal(index, type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + }) { + return refusal?.call(index, type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(index, type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaContentRefusalObjectImplToJson( + this, + ); + } +} + +abstract class MessageDeltaContentRefusalObject extends MessageDeltaContent { + const factory MessageDeltaContentRefusalObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? refusal}) = + _$MessageDeltaContentRefusalObjectImpl; + const MessageDeltaContentRefusalObject._() : super._(); + + factory MessageDeltaContentRefusalObject.fromJson(Map json) = + _$MessageDeltaContentRefusalObjectImpl.fromJson; + + /// The index of the refusal part in the message. + @override + int get index; + + /// Always `refusal`. + @override + String get type; + + /// The refusal content generated by the assistant. + @JsonKey(includeIfNull: false) + String? get refusal; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentRefusalObjectImplCopyWith< + _$MessageDeltaContentRefusalObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( + _$MessageDeltaContentImageUrlObjectImpl value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl}); + + $MessageContentImageUrlCopyWith<$Res>? get imageUrl; +} + +/// @nodoc +class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentImageUrlObjectImpl> + implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( + _$MessageDeltaContentImageUrlObjectImpl _value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + Object? imageUrl = freezed, + }) { + return _then(_$MessageDeltaContentImageUrlObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + imageUrl: freezed == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as MessageContentImageUrl?, + )); + } + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $MessageContentImageUrlCopyWith<$Res>? get imageUrl { + if (_value.imageUrl == null) { + return null; + } + + return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { + return _then(_value.copyWith(imageUrl: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaContentImageUrlObjectImpl + extends MessageDeltaContentImageUrlObject { + const _$MessageDeltaContentImageUrlObjectImpl( + {required this.index, + required this.type, + @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) + : super._(); + + factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentImageUrlObjectImplFromJson(json); + + /// The index of the content part in the message. + @override + final int index; + + /// Always `image_url`. + @override + final String type; + + /// The image URL part of a message. + @override + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl; + + @override + String toString() { + return 'MessageDeltaContent.imageUrl(index: $index, type: $type, imageUrl: $imageUrl)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaContentImageUrlObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.imageUrl, imageUrl) || + other.imageUrl == imageUrl)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, index, type, imageUrl); + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< + _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, + }) { + return imageUrl(index, type, this.imageUrl); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + }) { + return imageUrl?.call(index, type, this.imageUrl); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + required TResult orElse(), + }) { + if (imageUrl != null) { + return imageUrl(index, type, this.imageUrl); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + }) { + return imageUrl(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + }) { + return imageUrl?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + required TResult orElse(), + }) { + if (imageUrl != null) { + return imageUrl(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaContentImageUrlObjectImplToJson( + this, + ); + } +} + +abstract class MessageDeltaContentImageUrlObject extends MessageDeltaContent { + const factory MessageDeltaContentImageUrlObject( + {required final int index, + required final String type, + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl}) = + _$MessageDeltaContentImageUrlObjectImpl; + const MessageDeltaContentImageUrlObject._() : super._(); + + factory MessageDeltaContentImageUrlObject.fromJson( + Map json) = + _$MessageDeltaContentImageUrlObjectImpl.fromJson; + + /// The index of the content part in the message. + @override + int get index; + + /// Always `image_url`. + @override + String get type; + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? get imageUrl; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + MessageContentTextAnnotations _$MessageContentTextAnnotationsFromJson( Map json) { switch (json['type']) { diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 63581c97..77e7ee5d 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -320,7 +320,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null ? null - : ChatCompletionResponseFormat.fromJson( + : ResponseFormat.fromJson( json['response_format'] as Map), seed: (json['seed'] as num?)?.toInt(), serviceTier: $enumDecodeNullable( @@ -454,25 +454,6 @@ Map _$$ChatCompletionModelStringImplToJson( 'runtimeType': instance.$type, }; -_$ChatCompletionResponseFormatImpl _$$ChatCompletionResponseFormatImplFromJson( - Map json) => - _$ChatCompletionResponseFormatImpl( - type: $enumDecodeNullable( - _$ChatCompletionResponseFormatTypeEnumMap, json['type']) ?? - ChatCompletionResponseFormatType.text, - ); - -Map _$$ChatCompletionResponseFormatImplToJson( - _$ChatCompletionResponseFormatImpl instance) => - { - 'type': _$ChatCompletionResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$ChatCompletionResponseFormatTypeEnumMap = { - ChatCompletionResponseFormatType.text: 'text', - ChatCompletionResponseFormatType.jsonObject: 'json_object', -}; - _$ChatCompletionStopListStringImpl _$$ChatCompletionStopListStringImplFromJson( Map json) => _$ChatCompletionStopListStringImpl( @@ -611,6 +592,7 @@ _$FunctionObjectImpl _$$FunctionObjectImplFromJson(Map json) => name: json['name'] as String, description: json['description'] as String?, parameters: json['parameters'] as Map?, + strict: json['strict'] as bool? ?? false, ); Map _$$FunctionObjectImplToJson( @@ -627,6 +609,34 @@ Map _$$FunctionObjectImplToJson( writeNotNull('description', instance.description); writeNotNull('parameters', instance.parameters); + writeNotNull('strict', instance.strict); + return val; +} + +_$JsonSchemaObjectImpl _$$JsonSchemaObjectImplFromJson( + Map json) => + _$JsonSchemaObjectImpl( + name: json['name'] as String, + description: json['description'] as String?, + schema: json['schema'] as Map, + strict: json['strict'] as bool? ?? false, + ); + +Map _$$JsonSchemaObjectImplToJson( + _$JsonSchemaObjectImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['schema'] = instance.schema; + writeNotNull('strict', instance.strict); return val; } @@ -808,10 +818,18 @@ _$ChatCompletionLogprobsImpl _$$ChatCompletionLogprobsImplFromJson( ); Map _$$ChatCompletionLogprobsImplToJson( - _$ChatCompletionLogprobsImpl instance) => - { - 'content': instance.content?.map((e) => e.toJson()).toList(), - }; + _$ChatCompletionLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} _$ChatCompletionTokenLogprobImpl _$$ChatCompletionTokenLogprobImplFromJson( Map json) => @@ -942,16 +960,25 @@ _$ChatCompletionStreamResponseChoiceLogprobsImpl ); Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( - _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) => - { - 'content': instance.content?.map((e) => e.toJson()).toList(), - }; + _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} _$ChatCompletionStreamResponseDeltaImpl _$$ChatCompletionStreamResponseDeltaImplFromJson( Map json) => _$ChatCompletionStreamResponseDeltaImpl( content: json['content'] as String?, + refusal: json['refusal'] as String?, functionCall: json['function_call'] == null ? null : ChatCompletionStreamMessageFunctionCall.fromJson( @@ -976,6 +1003,7 @@ Map _$$ChatCompletionStreamResponseDeltaImplToJson( } writeNotNull('content', instance.content); + writeNotNull('refusal', instance.refusal); writeNotNull('function_call', instance.functionCall?.toJson()); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -2185,22 +2213,19 @@ Map _$$AssistantObjectResponseFormatEnumerationImplToJson( }; const _$AssistantResponseFormatModeEnumMap = { - AssistantResponseFormatMode.none: 'none', AssistantResponseFormatMode.auto: 'auto', }; -_$AssistantObjectResponseFormatAssistantsResponseFormatImpl - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( +_$AssistantObjectResponseFormatResponseFormatImpl + _$$AssistantObjectResponseFormatResponseFormatImplFromJson( Map json) => - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$AssistantObjectResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl instance) => +Map _$$AssistantObjectResponseFormatResponseFormatImplToJson( + _$AssistantObjectResponseFormatResponseFormatImpl instance) => { 'value': instance.value.toJson(), 'runtimeType': instance.$type, @@ -2328,27 +2353,24 @@ Map }; const _$CreateAssistantResponseFormatModeEnumMap = { - CreateAssistantResponseFormatMode.none: 'none', CreateAssistantResponseFormatMode.auto: 'auto', }; -_$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$CreateAssistantRequestResponseFormatResponseFormatImpl + _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateAssistantRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( + _$CreateAssistantRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ModifyAssistantRequestImpl _$$ModifyAssistantRequestImplFromJson( Map json) => @@ -2421,27 +2443,24 @@ Map }; const _$ModifyAssistantResponseFormatModeEnumMap = { - ModifyAssistantResponseFormatMode.none: 'none', ModifyAssistantResponseFormatMode.auto: 'auto', }; -_$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$ModifyAssistantRequestResponseFormatResponseFormatImpl + _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$ModifyAssistantRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$DeleteAssistantResponseImpl _$$DeleteAssistantResponseImplFromJson( Map json) => @@ -2530,25 +2549,6 @@ Map _$$AssistantsFunctionCallOptionImplToJson( 'name': instance.name, }; -_$AssistantsResponseFormatImpl _$$AssistantsResponseFormatImplFromJson( - Map json) => - _$AssistantsResponseFormatImpl( - type: $enumDecodeNullable( - _$AssistantsResponseFormatTypeEnumMap, json['type']) ?? - AssistantsResponseFormatType.text, - ); - -Map _$$AssistantsResponseFormatImplToJson( - _$AssistantsResponseFormatImpl instance) => - { - 'type': _$AssistantsResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$AssistantsResponseFormatTypeEnumMap = { - AssistantsResponseFormatType.text: 'text', - AssistantsResponseFormatType.jsonObject: 'json_object', -}; - _$TruncationObjectImpl _$$TruncationObjectImplFromJson( Map json) => _$TruncationObjectImpl( @@ -2800,26 +2800,23 @@ Map _$$RunObjectResponseFormatEnumerationImplToJson( }; const _$RunObjectResponseFormatModeEnumMap = { - RunObjectResponseFormatMode.none: 'none', RunObjectResponseFormatMode.auto: 'auto', }; -_$RunObjectResponseFormatAssistantsResponseFormatImpl - _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson( +_$RunObjectResponseFormatResponseFormatImpl + _$$RunObjectResponseFormatResponseFormatImplFromJson( Map json) => - _$RunObjectResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$RunObjectResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( - _$RunObjectResponseFormatAssistantsResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$RunObjectResponseFormatResponseFormatImplToJson( + _$RunObjectResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$RunSubmitToolOutputsImpl _$$RunSubmitToolOutputsImplFromJson( Map json) => @@ -3028,27 +3025,23 @@ Map _$$CreateRunRequestResponseFormatEnumerationImplToJson( }; const _$CreateRunRequestResponseFormatModeEnumMap = { - CreateRunRequestResponseFormatMode.none: 'none', CreateRunRequestResponseFormatMode.auto: 'auto', }; -_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$CreateRunRequestResponseFormatResponseFormatImpl + _$$CreateRunRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateRunRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateRunRequestResponseFormatResponseFormatImplToJson( + _$CreateRunRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ListRunsResponseImpl _$$ListRunsResponseImplFromJson( Map json) => @@ -3361,27 +3354,24 @@ Map json) => - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ThreadObjectImpl _$$ThreadObjectImplFromJson(Map json) => _$ThreadObjectImpl( @@ -3968,34 +3958,6 @@ Map _$$MessageContentTextAnnotationsFileCitationImplToJson( 'file_id': instance.fileId, }; -_$MessageDeltaContentImageUrlObjectImpl - _$$MessageDeltaContentImageUrlObjectImplFromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectImpl( - index: (json['index'] as num?)?.toInt(), - type: json['type'] as String?, - imageUrl: json['image_url'] == null - ? null - : MessageContentImageUrl.fromJson( - json['image_url'] as Map), - ); - -Map _$$MessageDeltaContentImageUrlObjectImplToJson( - _$MessageDeltaContentImageUrlObjectImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('index', instance.index); - writeNotNull('type', instance.type); - writeNotNull('image_url', instance.imageUrl?.toJson()); - return val; -} - _$MessageDeltaContentTextImpl _$$MessageDeltaContentTextImplFromJson( Map json) => _$MessageDeltaContentTextImpl( @@ -5038,6 +5000,7 @@ _$ChatCompletionAssistantMessageImpl _$ChatCompletionMessageRoleEnumMap, json['role']) ?? ChatCompletionMessageRole.assistant, content: json['content'] as String?, + refusal: json['refusal'] as String?, name: json['name'] as String?, toolCalls: (json['tool_calls'] as List?) ?.map((e) => ChatCompletionMessageToolCall.fromJson( @@ -5062,6 +5025,7 @@ Map _$$ChatCompletionAssistantMessageImplToJson( } writeNotNull('content', instance.content); + writeNotNull('refusal', instance.refusal); writeNotNull('name', instance.name); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -5159,6 +5123,7 @@ Map _$$ChatCompletionMessageContentPartTextImplToJson( const _$ChatCompletionMessageContentPartTypeEnumMap = { ChatCompletionMessageContentPartType.text: 'text', ChatCompletionMessageContentPartType.imageUrl: 'image_url', + ChatCompletionMessageContentPartType.refusal: 'refusal', }; _$ChatCompletionMessageContentPartImageImpl @@ -5180,6 +5145,24 @@ Map _$$ChatCompletionMessageContentPartImageImplToJson( 'image_url': instance.imageUrl.toJson(), }; +_$ChatCompletionMessageContentPartRefusalImpl + _$$ChatCompletionMessageContentPartRefusalImplFromJson( + Map json) => + _$ChatCompletionMessageContentPartRefusalImpl( + type: $enumDecodeNullable( + _$ChatCompletionMessageContentPartTypeEnumMap, + json['type']) ?? + ChatCompletionMessageContentPartType.refusal, + refusal: json['refusal'] as String, + ); + +Map _$$ChatCompletionMessageContentPartRefusalImplToJson( + _$ChatCompletionMessageContentPartRefusalImpl instance) => + { + 'type': _$ChatCompletionMessageContentPartTypeEnumMap[instance.type]!, + 'refusal': instance.refusal, + }; + _$ChatCompletionMessageImageUrlImpl _$$ChatCompletionMessageImageUrlImplFromJson(Map json) => _$ChatCompletionMessageImageUrlImpl( @@ -5202,6 +5185,54 @@ const _$ChatCompletionMessageImageDetailEnumMap = { ChatCompletionMessageImageDetail.high: 'high', }; +_$ResponseFormatTextImpl _$$ResponseFormatTextImplFromJson( + Map json) => + _$ResponseFormatTextImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.text, + ); + +Map _$$ResponseFormatTextImplToJson( + _$ResponseFormatTextImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + }; + +const _$ResponseFormatTypeEnumMap = { + ResponseFormatType.text: 'text', + ResponseFormatType.jsonObject: 'json_object', + ResponseFormatType.jsonSchema: 'json_schema', +}; + +_$ResponseFormatJsonObjectImpl _$$ResponseFormatJsonObjectImplFromJson( + Map json) => + _$ResponseFormatJsonObjectImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.jsonObject, + ); + +Map _$$ResponseFormatJsonObjectImplToJson( + _$ResponseFormatJsonObjectImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + }; + +_$ResponseFormatJsonSchemaImpl _$$ResponseFormatJsonSchemaImplFromJson( + Map json) => + _$ResponseFormatJsonSchemaImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.jsonSchema, + jsonSchema: JsonSchemaObject.fromJson( + json['json_schema'] as Map), + ); + +Map _$$ResponseFormatJsonSchemaImplToJson( + _$ResponseFormatJsonSchemaImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + 'json_schema': instance.jsonSchema.toJson(), + }; + _$AssistantToolsCodeInterpreterImpl _$$AssistantToolsCodeInterpreterImplFromJson(Map json) => _$AssistantToolsCodeInterpreterImpl( @@ -5320,6 +5351,20 @@ Map _$$MessageContentTextObjectImplToJson( 'text': instance.text.toJson(), }; +_$MessageContentRefusalObjectImpl _$$MessageContentRefusalObjectImplFromJson( + Map json) => + _$MessageContentRefusalObjectImpl( + type: json['type'] as String, + refusal: json['refusal'] as String, + ); + +Map _$$MessageContentRefusalObjectImplToJson( + _$MessageContentRefusalObjectImpl instance) => + { + 'type': instance.type, + 'refusal': instance.refusal, + }; + _$MessageDeltaContentImageFileObjectImpl _$$MessageDeltaContentImageFileObjectImplFromJson( Map json) => @@ -5377,6 +5422,61 @@ Map _$$MessageDeltaContentTextObjectImplToJson( return val; } +_$MessageDeltaContentRefusalObjectImpl + _$$MessageDeltaContentRefusalObjectImplFromJson( + Map json) => + _$MessageDeltaContentRefusalObjectImpl( + index: (json['index'] as num).toInt(), + type: json['type'] as String, + refusal: json['refusal'] as String?, + ); + +Map _$$MessageDeltaContentRefusalObjectImplToJson( + _$MessageDeltaContentRefusalObjectImpl instance) { + final val = { + 'index': instance.index, + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('refusal', instance.refusal); + return val; +} + +_$MessageDeltaContentImageUrlObjectImpl + _$$MessageDeltaContentImageUrlObjectImplFromJson( + Map json) => + _$MessageDeltaContentImageUrlObjectImpl( + index: (json['index'] as num).toInt(), + type: json['type'] as String, + imageUrl: json['image_url'] == null + ? null + : MessageContentImageUrl.fromJson( + json['image_url'] as Map), + ); + +Map _$$MessageDeltaContentImageUrlObjectImplToJson( + _$MessageDeltaContentImageUrlObjectImpl instance) { + final val = { + 'index': instance.index, + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('image_url', instance.imageUrl?.toJson()); + return val; +} + _$MessageContentTextAnnotationsFileCitationObjectImpl _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index 5a62f6b1..ab870afb 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -49,11 +49,15 @@ String? _onSchemaUnionFactoryName( 'ChatCompletionMessageContentParts' => 'parts', 'ChatCompletionMessageContentPartText' => 'text', 'ChatCompletionMessageContentPartImage' => 'image', + 'ChatCompletionMessageContentPartRefusal' => 'refusal', 'ChatCompletionToolChoiceOptionEnumeration' => 'mode', 'ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice' => 'tool', 'ChatCompletionFunctionCallEnumeration' => 'mode', 'ChatCompletionFunctionCallChatCompletionFunctionCallOption' => 'function', + 'ResponseFormatText' => 'text', + 'ResponseFormatJsonObject' => 'jsonObject', + 'ResponseFormatJsonSchema' => 'jsonSchema', // Completion 'CompletionModelEnumeration' => 'model', 'CompletionModelString' => 'modelId', @@ -80,6 +84,7 @@ String? _onSchemaUnionFactoryName( // Assistant 'AssistantModelEnumeration' => 'model', 'AssistantModelString' => 'modelId', + 'AssistantObjectResponseFormatEnumeration' => 'mode', 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => 'format', 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', @@ -101,7 +106,10 @@ String? _onSchemaUnionFactoryName( 'MessageContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageContentTextAnnotationsFilePathObject' => 'filePath', 'MessageContentTextObject' => 'text', + 'MessageContentRefusalObject' => 'refusal', 'MessageDeltaContentImageFileObject' => 'imageFile', + 'MessageDeltaContentRefusalObject' => 'refusal', + 'MessageDeltaContentImageUrlObject' => 'imageUrl', 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageDeltaContentTextAnnotationsFilePathObject' => 'filePath', 'MessageDeltaContentTextObject' => 'text', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index c349f64e..46282eab 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1887,21 +1887,7 @@ components: nullable: true description: *completions_presence_penalty_description response_format: - title: ChatCompletionResponseFormat - type: object - description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: [ "text", "json_object" ] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + $ref: "#/components/schemas/ResponseFormat" seed: type: integer # minimum: -9223372036854775808 # The value can't be represented exactly in JavaScript @@ -2069,11 +2055,10 @@ components: default: user description: The role of the messages author, in this case `user`. content: - # TODO extract to ChatCompletionMessageContent once generator bug fixed description: The contents of the user message. oneOf: - type: string - description: The text contents of the message. + description: The text contents of the user message. - type: array description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. items: @@ -2097,6 +2082,10 @@ components: type: string description: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. name: type: string description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. @@ -2148,11 +2137,12 @@ components: oneOf: - $ref: "#/components/schemas/ChatCompletionMessageContentPartText" - $ref: "#/components/schemas/ChatCompletionMessageContentPartImage" + - $ref: "#/components/schemas/ChatCompletionMessageContentPartRefusal" discriminator: propertyName: type ChatCompletionMessageContentPartText: type: object - description: A text content part of a user message. + description: A text content part of a message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2165,7 +2155,7 @@ components: - text ChatCompletionMessageContentPartImage: type: object - title: Image content part + description: An image content part of a user message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2190,9 +2180,25 @@ components: - url required: - image_url + ChatCompletionMessageContentPartRefusal: + type: object + description: A refusal content part of a message. + properties: + type: + $ref: "#/components/schemas/ChatCompletionMessageContentPartType" + default: refusal + description: The type of the content part, in this case `refusal`. + refusal: + type: string + description: The refusal message generated by the model. + required: + - refusal ChatCompletionMessageContentPartType: type: string - enum: [ "text", "image_url" ] + enum: + - text + - image_url + - refusal description: The type of the content part. ChatCompletionMessageRole: type: string @@ -2226,18 +2232,109 @@ components: properties: name: type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + description: | + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + maximum length of 64. description: type: string - description: A description of what the function does, used by the model to choose when and how to call the function. + description: | + A description of what the function does, used by the model to choose when and how to call the function. parameters: $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: | + Whether to enable strict schema adherence when generating the function call. If set to true, the model will + follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + `strict` is `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). required: - name FunctionParameters: type: object description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true + ResponseFormat: + type: object + description: | + An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + discriminator: + propertyName: type + ResponseFormatType: + type: string + enum: + - text + - json_object + - json_schema + description: The type of response format being defined. + ResponseFormatText: + type: object + description: "The model should respond with plain text." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "text" + ResponseFormatJsonObject: + type: object + description: "The model should respond with a JSON object." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "json_object" + ResponseFormatJsonSchema: + type: object + description: "The model should respond with a JSON object that adheres to the specified schema." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "json_schema" + json_schema: + $ref: "#/components/schemas/JsonSchemaObject" + required: + - json_schema + JsonSchemaObject: + type: object + description: "A JSON Schema object." + properties: + name: + type: string + description: | + The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + length of 64. + description: + type: string + description: | + A description of what the response format is for, used by the model to determine how to respond in the + format. + schema: + type: object + description: | + The schema for the response format, described as a JSON Schema object. + additionalProperties: true + strict: + type: boolean + nullable: true + default: false + description: | + Whether to enable strict schema adherence when generating the output. If set to true, the model will always + follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + `strict` is `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + required: + - name + - schema ChatCompletionTool: type: object description: A tool the model may use. @@ -2387,8 +2484,12 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - required: - - content + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true ChatCompletionTokenLogprob: type: object description: Log probability information for a token. @@ -2491,6 +2592,10 @@ components: type: string description: The contents of the chunk message. nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true function_call: $ref: "#/components/schemas/ChatCompletionStreamMessageFunctionCall" tool_calls: @@ -3481,9 +3586,10 @@ components: We generally recommend altering this or temperature but not both. response_format: description: &assistant_response_format | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + Specifies the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-4o-mini-1106`. + since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the @@ -3502,8 +3608,9 @@ components: title: AssistantResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - id - object @@ -3609,8 +3716,9 @@ components: title: CreateAssistantResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - model ModifyAssistantRequest: @@ -3681,8 +3789,9 @@ components: title: ModifyAssistantResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" DeleteAssistantResponse: type: object description: Represents a deleted response returned by the Delete assistant endpoint. @@ -3770,7 +3879,7 @@ components: and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search - tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. required: - type AssistantToolsFunction: @@ -3806,18 +3915,6 @@ components: description: The name of the function to call. required: - name - AssistantsResponseFormat: - type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - title: AssistantsResponseFormatType - enum: [ "text", "json_object" ] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. TruncationObject: type: object description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -4008,8 +4105,9 @@ components: title: RunObjectResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - id - object @@ -4182,8 +4280,9 @@ components: title: CreateRunRequestResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" stream: type: boolean nullable: true @@ -4411,8 +4510,9 @@ components: title: CreateThreadAndRunRequestResponseFormatMode description: > `auto` is the default value - enum: [ none, auto ] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" stream: type: boolean nullable: true @@ -4722,6 +4822,7 @@ components: - $ref: "#/components/schemas/MessageContentImageFileObject" - $ref: "#/components/schemas/MessageContentImageUrlObject" - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" discriminator: propertyName: type MessageDeltaContent: @@ -4730,6 +4831,8 @@ components: oneOf: - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" discriminator: propertyName: type CreateMessageRequest: @@ -4974,6 +5077,20 @@ components: - file_path - start_index - end_index + MessageContentRefusalObject: + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + default: refusal + refusal: + type: string + nullable: false + required: + - type + - refusal MessageDeltaContentImageFileObject: type: object description: References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. @@ -5013,6 +5130,9 @@ components: type: string image_url: $ref: "#/components/schemas/MessageContentImageUrl" + required: + - index + - type MessageDeltaContentTextObject: type: object description: The text content that is part of a message. @@ -5114,6 +5234,23 @@ components: required: - index - type + MessageDeltaContentRefusalObject: + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + type: string + description: Always `refusal`. + default: refusal + refusal: + type: string + description: The refusal content generated by the assistant. + required: + - index + - type RunStepObject: type: object description: | diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 4d45fce2..615f5614 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -7585,11 +7585,20 @@ components: } } - ChatCompletionRequestMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: [ "text" ] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text ChatCompletionRequestMessageContentPartImage: type: object @@ -7617,20 +7626,20 @@ components: - type - image_url - ChatCompletionRequestMessageContentPartText: + ChatCompletionRequestMessageContentPartRefusal: type: object - title: Text content part + title: Refusal content part properties: type: type: string - enum: ["text"] + enum: [ "refusal" ] description: The type of the content part. - text: + refusal: type: string - description: The text content. + description: The refusal message generated by the model. required: - type - - text + - refusal ChatCompletionRequestMessage: oneOf: @@ -7641,13 +7650,44 @@ components: - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" x-oaiExpandable: true + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + ChatCompletionRequestSystemMessage: type: object title: System message properties: content: description: The contents of the system message. - type: string + oneOf: + - type: string + description: The contents of the system message. + title: Text content + - type: array + description: An array of content parts with a defined type. For system messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 role: type: string enum: [ "system" ] @@ -7674,12 +7714,12 @@ components: description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. title: Array of content parts items: - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" minItems: 1 x-oaiExpandable: true role: type: string - enum: ["user"] + enum: [ "user" ] description: The role of the messages author, in this case `user`. name: type: string @@ -7694,9 +7734,22 @@ components: properties: content: nullable: true - type: string + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 description: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. role: type: string enum: [ "assistant" ] @@ -7747,7 +7800,16 @@ components: enum: [ "tool" ] description: The role of the messages author, in this case `tool`. content: - type: string + oneOf: + - type: string + description: The contents of the tool message. + title: Text content + - type: array + description: An array of content parts with a defined type. For tool messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + minItems: 1 description: The contents of the tool message. tool_call_id: type: string @@ -7833,9 +7895,69 @@ components: description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. parameters: $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). required: - name + ResponseFormatText: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `text`" + enum: [ "text" ] + required: + - type + + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: [ "json_object" ] + required: + - type + + ResponseFormatJsonSchemaSchema: + type: object + description: "The schema for the response format, described as a JSON Schema object." + additionalProperties: true + + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: 'The type of response format being defined: `json_schema`' + enum: [ 'json_schema' ] + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + ChatCompletionToolChoiceOption: description: | Controls which (if any) tool is called by the model. @@ -7970,6 +8092,10 @@ components: type: string description: The contents of the message. nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true tool_calls: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" role: @@ -7993,6 +8119,7 @@ components: required: - role - content + - refusal ChatCompletionStreamResponseDelta: type: object @@ -8021,6 +8148,10 @@ components: type: string enum: [ "system", "user", "assistant", "tool" ] description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true CreateChatCompletionRequest: type: object @@ -8040,7 +8171,6 @@ components: enum: [ "gpt-4o", - "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-mini", @@ -8118,20 +8248,19 @@ components: nullable: true description: *completions_presence_penalty_description response_format: - type: object description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true seed: type: integer minimum: -9223372036854775808 @@ -8288,8 +8417,16 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true required: - content + - refusal + created: type: integer description: The Unix timestamp (in seconds) of when the chat completion was created. @@ -10192,22 +10329,12 @@ components: - type: string description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsApiResponseFormat" + enum: [ auto ] + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' x-oaiExpandable: true - AssistantsApiResponseFormat: - type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. - AssistantObject: type: object title: Assistant @@ -11640,6 +11767,7 @@ components: - $ref: "#/components/schemas/MessageContentImageFileObject" - $ref: "#/components/schemas/MessageContentImageUrlObject" - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" x-oaiExpandable: true assistant_id: description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. @@ -11740,6 +11868,7 @@ components: oneOf: - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" x-oaiExpandable: true required: @@ -12007,6 +12136,22 @@ components: - type - text + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + enum: [ "refusal" ] + refusal: + type: string + nullable: false + required: + - type + - refusal + MessageRequestContentTextObject: title: Text type: object @@ -12118,6 +12263,25 @@ components: - index - type + MessageDeltaContentRefusalObject: + title: Refusal + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. + type: string + enum: [ "refusal" ] + refusal: + type: string + required: + - index + - type + + MessageDeltaContentTextAnnotationsFileCitationObject: title: File citation type: object diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index fa272bbe..ebfe8c44 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -389,12 +389,12 @@ void main() { test('Test jsonObject response format', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt41106Preview, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( content: - 'You are a helpful assistant. That extracts names from text ' + 'You are a helpful assistant that extracts names from text ' 'and returns them in a JSON array.', ), ChatCompletionMessage.user( @@ -404,8 +404,59 @@ void main() { ), ], temperature: 0, - responseFormat: ChatCompletionResponseFormat( - type: ChatCompletionResponseFormatType.jsonObject, + responseFormat: ResponseFormat.jsonObject(), + ); + final res = await client.createChatCompletion(request: request); + expect(res.choices, hasLength(1)); + final choice = res.choices.first; + final message = choice.message; + expect(message.role, ChatCompletionMessageRole.assistant); + final content = message.content; + final jsonContent = json.decode(content!) as Map; + final jsonName = jsonContent['names'] as List; + expect(jsonName, isList); + expect(jsonName, hasLength(3)); + expect(jsonName, contains('John')); + expect(jsonName, contains('Mary')); + expect(jsonName, contains('Peter')); + }); + + test('Test jsonSchema response format', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, + ), + messages: [ + ChatCompletionMessage.system( + content: + 'You are a helpful assistant. That extracts names from text.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), ), ); final res = await client.createChatCompletion(request: request); From ef42d5a9d329a096411f71cd89328df4549ec95a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 17 Aug 2024 18:00:03 +0200 Subject: [PATCH 094/251] feat: Add support for Structured Outputs in ChatOpenAI (#526) --- docs/expression_language/primitives/mapper.md | 4 +- docs/expression_language/streaming.md | 4 +- .../models/chat_models/integrations/openai.md | 260 ++++++++++++++++-- docs/modules/model_io/output_parsers/json.md | 4 +- .../cookbook/streaming.dart | 8 +- .../primitives/mapper.dart | 4 +- .../chat_models/integrations/openai.dart | 4 +- .../modules/model_io/output_parsers/json.dart | 8 +- .../lib/src/chat_models/mappers.dart | 22 +- .../lib/src/chat_models/types.dart | 113 +++++++- .../test/chat_models/chat_openai_test.dart | 63 ++++- 11 files changed, 423 insertions(+), 71 deletions(-) diff --git a/docs/expression_language/primitives/mapper.md b/docs/expression_language/primitives/mapper.md index 2fb57295..bc599cb6 100644 --- a/docs/expression_language/primitives/mapper.md +++ b/docs/expression_language/primitives/mapper.md @@ -54,9 +54,7 @@ In the following example, the model streams the output in chunks and the output final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/expression_language/streaming.md b/docs/expression_language/streaming.md index 25725045..dd04c9c6 100644 --- a/docs/expression_language/streaming.md +++ b/docs/expression_language/streaming.md @@ -124,9 +124,7 @@ Let’s see such a parser in action to understand what this means. final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/modules/model_io/models/chat_models/integrations/openai.md b/docs/modules/model_io/models/chat_models/integrations/openai.md index df92b348..6b3ccbbc 100644 --- a/docs/modules/model_io/models/chat_models/integrations/openai.md +++ b/docs/modules/model_io/models/chat_models/integrations/openai.md @@ -1,25 +1,78 @@ # OpenAI -[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of -chat models with different levels of power suitable for different tasks. +This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). -This example goes over how to use LangChain to interact with -OpenAI [models](https://platform.openai.com/docs/models) using the Chat API. +OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). + +> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. + +## Setup + +To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. + +### Credentials + +Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). + +### Installation + +The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: + +```yaml +dart pub add langchain_openai +``` + +## Usage + +### Instantiation + +Now we can instantiate our model object and generate chat completions: ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, '{text}'), -]); - final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o' temperature: 0, + // ...other options ), ); +``` + +If you are using a proxy, you can override the base URL, headers, and other options: + +```dart +final client = ChatOpenAI( + baseUrl: 'https://my-proxy.com', + headers: {'x-my-proxy-header': 'value'}, +); +``` + +### Invocation + +Now you can generate completions by calling the `invoke` method: + +```dart +final messages = [ + ChatMessage.system('You are a helpful assistant that translates English to French.'), + ChatMessage.humanText('I love programming.'), +]; +final prompt = PromptValue.chat(messages); +final res = await llm.invoke(prompt); +// -> 'J'adore la programmation.' +``` + +### Chaining + +We can chain our model with a prompt template or output parser to create a more complex pipeline: + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); final chain = promptTemplate | chatModel | StringOutputParser(); @@ -32,15 +85,16 @@ print(res); // -> 'J'adore la programmation.' ``` -## Streaming +### Streaming + +OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; final promptTemplate = ChatPromptTemplate.fromTemplates([ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' + (ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' 'in order without any spaces or commas', ), (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), @@ -57,7 +111,91 @@ await stream.forEach(print); // 789 ``` -You can also stream OpenAI tool calls: +### Multimodal support + +OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. + +You can send the image as a base64-encoded string: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image + ), + ]), + ), +]); +``` + +Or you can send the URL where the image is hosted: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', + ), + ]), + ), +]); +``` + +### Tool calling + +OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. + + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'gpt-4o' + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. + +You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: ```dart const tool = ToolSpec( @@ -108,9 +246,76 @@ await for (final chunk in stream) { // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} ``` -## JSON mode +### Structured Outputs + +[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), + ), + ), +); + +final res = await chatModel.invoke(prompt); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. + +### JSON mode -GPT-4 Turbo supports a new JSON mode, which ensures the model will respond with valid JSON. JSON mode is useful for developers generating JSON in the Chat Completions API outside of function calling. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. +When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. ```dart final prompt = PromptValue.chat([ @@ -127,9 +332,7 @@ final llm = ChatOpenAI( defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final chain = llm.pipe(JsonOutputParser()); @@ -148,3 +351,22 @@ print(res); // ] // } ``` + +### Fine-tuning + +You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. + +This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: + +```dart +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' + ), +); +``` + +## API reference + +For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs/modules/model_io/output_parsers/json.md b/docs/modules/model_io/output_parsers/json.md index 905b380b..06451f17 100644 --- a/docs/modules/model_io/output_parsers/json.md +++ b/docs/modules/model_io/output_parsers/json.md @@ -21,9 +21,7 @@ final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart index 66e4a7a6..d6b8cdae 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart @@ -79,9 +79,7 @@ Future _inputStreams() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); @@ -125,9 +123,7 @@ Future _inputStreamMapper() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/primitives/mapper.dart b/examples/docs_examples/bin/expression_language/primitives/mapper.dart index 818ed0d7..c9d0400a 100644 --- a/examples/docs_examples/bin/expression_language/primitives/mapper.dart +++ b/examples/docs_examples/bin/expression_language/primitives/mapper.dart @@ -63,9 +63,7 @@ Future _mapInputStream() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart index 6d302daf..2b6ea9df 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart @@ -131,9 +131,7 @@ Future _chatOpenAIJsonMode() async { defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final chain = llm.pipe(JsonOutputParser()); diff --git a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart index 8005f8d0..b921ec7d 100644 --- a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart +++ b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart @@ -22,9 +22,7 @@ Future _invoke() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); @@ -51,9 +49,7 @@ Future _streaming() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 0c70fd73..5e9000c2 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -248,15 +248,19 @@ extension CreateChatCompletionStreamResponseMapper } extension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat { - ChatCompletionResponseFormat toChatCompletionResponseFormat() { - return ChatCompletionResponseFormat( - type: switch (type) { - ChatOpenAIResponseFormatType.text => - ChatCompletionResponseFormatType.text, - ChatOpenAIResponseFormatType.jsonObject => - ChatCompletionResponseFormatType.jsonObject, - }, - ); + ResponseFormat toChatCompletionResponseFormat() { + return switch (this) { + ChatOpenAIResponseFormatText() => const ResponseFormat.text(), + ChatOpenAIResponseFormatJsonObject() => const ResponseFormat.jsonObject(), + final ChatOpenAIResponseFormatJsonSchema res => ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: res.jsonSchema.name, + description: res.jsonSchema.description, + schema: res.jsonSchema.schema, + strict: res.jsonSchema.strict, + ), + ), + }; } } diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 988d27c0..5db3268f 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -261,24 +261,111 @@ class ChatOpenAIOptions extends ChatModelOptions { /// {@template chat_openai_response_format} /// An object specifying the format that the model must output. /// {@endtemplate} -class ChatOpenAIResponseFormat { - /// {@macro chat_openai_response_format} - const ChatOpenAIResponseFormat({ - required this.type, +sealed class ChatOpenAIResponseFormat { + const ChatOpenAIResponseFormat(); + + /// The model will respond with text. + static const text = ChatOpenAIResponseFormatText(); + + /// The model will respond with a valid JSON object. + static const jsonObject = ChatOpenAIResponseFormatJsonObject(); + + /// The model will respond with a valid JSON object that adheres to the + /// specified schema. + factory ChatOpenAIResponseFormat.jsonSchema( + final ChatOpenAIJsonSchema jsonSchema, + ) => + ChatOpenAIResponseFormatJsonSchema(jsonSchema: jsonSchema); +} + +/// {@template chat_openai_response_format_text} +/// The model will respond with text. +/// {@endtemplate} +class ChatOpenAIResponseFormatText extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_text} + const ChatOpenAIResponseFormatText(); +} + +/// {@template chat_openai_response_format_json_object} +/// The model will respond with a valid JSON object. +/// {@endtemplate} +class ChatOpenAIResponseFormatJsonObject extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_json_object} + const ChatOpenAIResponseFormatJsonObject(); +} + +/// {@template chat_openai_response_format_json_schema} +/// The model will respond with a valid JSON object that adheres to the +/// specified schema. +/// {@endtemplate} +@immutable +class ChatOpenAIResponseFormatJsonSchema extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_json_schema} + const ChatOpenAIResponseFormatJsonSchema({ + required this.jsonSchema, }); - /// The format type. - final ChatOpenAIResponseFormatType type; + /// The JSON schema that the model must adhere to. + final ChatOpenAIJsonSchema jsonSchema; + + @override + bool operator ==(covariant ChatOpenAIResponseFormatJsonSchema other) { + return identical(this, other) || + runtimeType == other.runtimeType && jsonSchema == other.jsonSchema; + } + + @override + int get hashCode => jsonSchema.hashCode; } -/// Types of response formats. -enum ChatOpenAIResponseFormatType { - /// Standard text mode. - text, +/// {@template chat_openai_json_schema} +/// Specifies the schema for the response format. +/// {@endtemplate} +@immutable +class ChatOpenAIJsonSchema { + /// {@macro chat_openai_json_schema} + const ChatOpenAIJsonSchema({ + required this.name, + required this.schema, + this.description, + this.strict = false, + }); + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain + /// underscores and dashes, with a maximum length of 64. + final String name; + + /// A description of what the response format is for, used by the model to + /// determine how to respond in the format. + final String? description; + + /// The schema for the response format, described as a JSON Schema object. + final Map schema; + + /// Whether to enable strict schema adherence when generating the output. + /// If set to true, the model will always follow the exact schema defined in + /// the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + final bool strict; - /// [ChatOpenAIResponseFormatType.jsonObject] enables JSON mode, which - /// guarantees the message the model generates is valid JSON. - jsonObject, + @override + bool operator ==(covariant ChatOpenAIJsonSchema other) { + return identical(this, other) || + runtimeType == other.runtimeType && + name == other.name && + description == other.description && + const MapEquality().equals(schema, other.schema) && + strict == other.strict; + } + + @override + int get hashCode { + return name.hashCode ^ + description.hashCode ^ + const MapEquality().hash(schema) ^ + strict.hashCode; + } } /// Specifies the latency tier to use for processing the request. diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 7ba681c6..d56ba1f9 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -357,11 +357,68 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-1106-preview', + model: defaultModel, + temperature: 0, + seed: 9999, + responseFormat: ChatOpenAIResponseFormat.jsonObject, + ), + ); + + final res = await llm.invoke(prompt); + final outputMsg = res.output; + final outputJson = json.decode(outputMsg.content) as Map; + expect(outputJson['companies'], isNotNull); + final companies = outputJson['companies'] as List; + expect(companies, hasLength(2)); + final firstCompany = companies.first as Map; + expect(firstCompany['name'], 'Google'); + expect(firstCompany['origin'], 'USA'); + final secondCompany = companies.last as Map; + expect(secondCompany['name'], 'Deepmind'); + expect(secondCompany['origin'], 'UK'); + }); + + test('Test Structured Output', () async { + final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), + ]); + final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: defaultModel, temperature: 0, seed: 9999, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + const ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), ), ), ); From 6d5e3db66c474c11b20b189d7f939c0e581af4f0 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 17 Aug 2024 18:14:33 +0200 Subject: [PATCH 095/251] feat: Add chatgpt-4o-latest to model catalog (#527) --- packages/langchain_openai/lib/src/chat_models/types.dart | 1 + .../src/generated/schema/create_chat_completion_request.dart | 2 ++ packages/openai_dart/lib/src/generated/schema/schema.g.dart | 1 + packages/openai_dart/oas/openapi_curated.yaml | 1 + packages/openai_dart/oas/openapi_official.yaml | 1 + 5 files changed, 6 insertions(+) diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 5db3268f..0c80184f 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -7,6 +7,7 @@ import 'package:meta/meta.dart'; /// Options to pass into the OpenAI Chat Model. /// /// Available [ChatOpenAIOptions.model]s: +/// - `chatgpt-4o-latest` /// - `gpt-4` /// - `gpt-4-32k` /// - `gpt-4-32k-0314` diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 8b6c5c52..0b6a5920 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -277,6 +277,8 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum ChatCompletionModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 77e7ee5d..01851e43 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -414,6 +414,7 @@ Map _$$ChatCompletionModelEnumerationImplToJson( }; const _$ChatCompletionModelsEnumMap = { + ChatCompletionModels.chatgpt4oLatest: 'chatgpt-4o-latest', ChatCompletionModels.gpt4: 'gpt-4', ChatCompletionModels.gpt432k: 'gpt-4-32k', ChatCompletionModels.gpt432k0314: 'gpt-4-32k-0314', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 46282eab..ba1d409c 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1806,6 +1806,7 @@ components: Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 615f5614..02653404 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -8173,6 +8173,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", From 2156fa3f74738b7197de882660821f96859b7fc6 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 09:24:54 +0200 Subject: [PATCH 096/251] feat: Add copyWith method to all RunnableOptions subclasses (#531) --- .../lib/src/chat_models/fake.dart | 13 +++++++ .../lib/src/chat_models/types.dart | 8 +++++ .../lib/src/language_models/types.dart | 6 ++++ .../langchain_core/lib/src/llms/fake.dart | 34 +++++++++++++++---- .../langchain_core/lib/src/llms/types.dart | 2 +- .../lib/src/retrievers/types.dart | 12 +++++++ .../test/runnables/binding_test.dart | 11 ++++++ .../lib/src/chat_models/vertex_ai/types.dart | 3 ++ .../lib/src/chat_models/types.dart | 3 ++ .../src/chat_models/chat_ollama/types.dart | 5 +++ 10 files changed, 90 insertions(+), 7 deletions(-) diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index b69868c3..bda1d6e3 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -2,6 +2,7 @@ import 'package:collection/collection.dart'; import '../../language_models.dart'; import '../prompts/types.dart'; +import '../tools/base.dart'; import 'base.dart'; import 'types.dart'; @@ -85,6 +86,8 @@ class FakeChatModelOptions extends ChatModelOptions { const FakeChatModelOptions({ super.model, this.metadata, + super.tools, + super.toolChoice, super.concurrencyLimit, }); @@ -95,11 +98,15 @@ class FakeChatModelOptions extends ChatModelOptions { FakeChatModelOptions copyWith({ final String? model, final Map? metadata, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return FakeChatModelOptions( model: model ?? this.model, metadata: metadata ?? this.metadata, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } @@ -223,6 +230,8 @@ class FakeEchoChatModelOptions extends ChatModelOptions { super.model, this.metadata, this.throwRandomError = false, + super.tools, + super.toolChoice, super.concurrencyLimit, }); @@ -237,12 +246,16 @@ class FakeEchoChatModelOptions extends ChatModelOptions { final String? model, final Map? metadata, final bool? throwRandomError, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return FakeEchoChatModelOptions( model: model ?? this.model, metadata: metadata ?? this.metadata, throwRandomError: throwRandomError ?? this.throwRandomError, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index f9c2aff3..f92fe4af 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -22,6 +22,14 @@ abstract class ChatModelOptions extends LanguageModelOptions { /// Controls which (if any) tool is called by the model. final ChatToolChoice? toolChoice; + + @override + ChatModelOptions copyWith({ + final String? model, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }); } /// {@template chat_result} diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index 3b52ee63..39e071bd 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -17,6 +17,12 @@ abstract class LanguageModelOptions extends BaseLangChainOptions { /// ID of the language model to use. /// Check the provider's documentation for available models. final String? model; + + @override + LanguageModelOptions copyWith({ + final String? model, + final int? concurrencyLimit, + }); } /// {@template language_model} diff --git a/packages/langchain_core/lib/src/llms/fake.dart b/packages/langchain_core/lib/src/llms/fake.dart index 0781e607..ffb64c00 100644 --- a/packages/langchain_core/lib/src/llms/fake.dart +++ b/packages/langchain_core/lib/src/llms/fake.dart @@ -7,11 +7,11 @@ import 'types.dart'; /// Fake LLM for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeLLM extends SimpleLLM { +class FakeLLM extends SimpleLLM { /// {@macro fake_list_llm} FakeLLM({ required this.responses, - }) : super(defaultOptions: const LLMOptions()); + }) : super(defaultOptions: const FakeLLMOptions()); /// Responses to return in order when called. final List responses; @@ -60,13 +60,35 @@ class FakeLLM extends SimpleLLM { } } +/// {@template fake_llm_options} +/// Fake LLM options for testing. +/// {@endtemplate} +class FakeLLMOptions extends LLMOptions { + /// {@macro fake_llm_options} + const FakeLLMOptions({ + super.model, + super.concurrencyLimit, + }); + + @override + FakeLLMOptions copyWith({ + final String? model, + final int? concurrencyLimit, + }) { + return FakeLLMOptions( + model: model ?? this.model, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } +} + /// {@template fake_echo_llm} /// Fake LLM for testing. /// It just returns the prompt or streams it char by char. /// {@endtemplate} -class FakeEchoLLM extends BaseLLM { +class FakeEchoLLM extends BaseLLM { /// {@macro fake_echo_llm} - const FakeEchoLLM() : super(defaultOptions: const LLMOptions()); + const FakeEchoLLM() : super(defaultOptions: const FakeLLMOptions()); @override String get modelType => 'fake-echo'; @@ -122,11 +144,11 @@ class FakeEchoLLM extends BaseLLM { /// Fake LLM for testing. /// It returns the string returned by the [handler] function. /// {@endtemplate} -class FakeHandlerLLM extends SimpleLLM { +class FakeHandlerLLM extends SimpleLLM { /// {@macro fake_handler_llm} FakeHandlerLLM({ required this.handler, - }) : super(defaultOptions: const LLMOptions()); + }) : super(defaultOptions: const FakeLLMOptions()); /// Function called to generate the response. final String Function( diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index 02a506de..7a81a0ab 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -6,7 +6,7 @@ import '../language_models/types.dart'; /// Options to pass into the LLM. /// {@endtemplate} @immutable -class LLMOptions extends LanguageModelOptions { +abstract class LLMOptions extends LanguageModelOptions { /// {@macro llm_options} const LLMOptions({ super.model, diff --git a/packages/langchain_core/lib/src/retrievers/types.dart b/packages/langchain_core/lib/src/retrievers/types.dart index a80412e2..e3938296 100644 --- a/packages/langchain_core/lib/src/retrievers/types.dart +++ b/packages/langchain_core/lib/src/retrievers/types.dart @@ -21,10 +21,22 @@ class VectorStoreRetrieverOptions extends RetrieverOptions { /// {@macro vector_store_retriever_options} const VectorStoreRetrieverOptions({ this.searchType = const VectorStoreSimilaritySearch(), + super.concurrencyLimit, }); /// The type of search to perform, either: /// - [VectorStoreSearchType.similarity] (default) /// - [VectorStoreSearchType.mmr] final VectorStoreSearchType searchType; + + @override + VectorStoreRetrieverOptions copyWith({ + final VectorStoreSearchType? searchType, + final int? concurrencyLimit, + }) { + return VectorStoreRetrieverOptions( + searchType: searchType ?? this.searchType, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } } diff --git a/packages/langchain_core/test/runnables/binding_test.dart b/packages/langchain_core/test/runnables/binding_test.dart index 882bf164..e64f0042 100644 --- a/packages/langchain_core/test/runnables/binding_test.dart +++ b/packages/langchain_core/test/runnables/binding_test.dart @@ -4,6 +4,7 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; import 'package:langchain_core/runnables.dart'; +import 'package:langchain_core/tools.dart'; import 'package:test/test.dart'; void main() { @@ -123,4 +124,14 @@ class _FakeOptionsChatModelOptions extends ChatModelOptions { const _FakeOptionsChatModelOptions(this.stop); final String stop; + + @override + ChatModelOptions copyWith({ + final String? model, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }) { + return _FakeOptionsChatModelOptions(stop); + } } diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index 019ab64e..50249bf3 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -1,5 +1,6 @@ import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; import 'package:meta/meta.dart'; /// {@template chat_vertex_ai_options} @@ -108,6 +109,8 @@ class ChatVertexAIOptions extends ChatModelOptions { final List? stopSequences, final int? candidateCount, final List? examples, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return ChatVertexAIOptions( diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index e6ba07b9..d9a75761 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -1,4 +1,5 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; import 'package:meta/meta.dart'; /// {@template chat_mistral_ai_options} @@ -54,6 +55,8 @@ class ChatMistralAIOptions extends ChatModelOptions { final int? maxTokens, final bool? safePrompt, final int? randomSeed, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return ChatMistralAIOptions( diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 6e9c0f20..cf02b00c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,5 +1,6 @@ import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; import 'package:meta/meta.dart'; import '../../../langchain_ollama.dart'; @@ -247,6 +248,8 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? useMmap, final bool? useMlock, final int? numThread, + final List? tools, + final ChatToolChoice? toolChoice, final int? concurrencyLimit, }) { return ChatOllamaOptions( @@ -283,6 +286,8 @@ class ChatOllamaOptions extends ChatModelOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } From d9661a5debe2d72124697ce80406fa5216c8a052 Mon Sep 17 00:00:00 2001 From: Heinrich Date: Wed, 21 Aug 2024 21:54:37 +1000 Subject: [PATCH 097/251] feat: Add ToolsAgent for models with tool-calling support (#530) Co-authored-by: David Miguel --- docs/_sidebar.md | 3 +- .../modules/agents/agent_types/tools_agent.md | 190 +++++ .../models/chat_models/integrations/ollama.md | 15 +- .../agents/agent_types/tools_agent.dart | 160 ++++ .../chat_models/integrations/ollama.dart | 2 +- packages/langchain/lib/src/agents/agents.dart | 1 + packages/langchain/lib/src/agents/tools.dart | 304 ++++++++ packages/langchain/pubspec.yaml | 3 + packages/langchain/pubspec_overrides.yaml | 14 +- .../test/agents/assets/state_of_the_union.txt | 723 ++++++++++++++++++ .../langchain/test/agents/tools_test.dart | 224 ++++++ 11 files changed, 1628 insertions(+), 11 deletions(-) create mode 100644 docs/modules/agents/agent_types/tools_agent.md create mode 100644 examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart create mode 100644 packages/langchain/lib/src/agents/tools.dart create mode 100644 packages/langchain/test/agents/assets/state_of_the_union.txt create mode 100644 packages/langchain/test/agents/tools_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index c51de21b..92d4f394 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -121,7 +121,8 @@ - [Memory](/modules/memory/memory.md) - [Agents](/modules/agents/agents.md) - [Agent types](/modules/agents/agent_types/agent_types.md) - - [OpenAI functions](/modules/agents/agent_types/openai_tools_agent.md) + - [Tools Agent](/modules/agents/agent_types/tools_agent.md) + - [OpenAI Tools Agent](/modules/agents/agent_types/openai_tools_agent.md) - [Tools](/modules/agents/tools/tools.md) - [Calculator](/modules/agents/tools/calculator.md) - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md) diff --git a/docs/modules/agents/agent_types/tools_agent.md b/docs/modules/agents/agent_types/tools_agent.md new file mode 100644 index 00000000..7c0c9de8 --- /dev/null +++ b/docs/modules/agents/agent_types/tools_agent.md @@ -0,0 +1,190 @@ +# Tools Agent + +An agent powered by the [tool calling API](/modules/model_io/models/chat_models/how_to/tools.md). + +This agent is designed to work with any chat model that supports tool calling. It can interpret the model's output and decide when to call specific tools based on that output. + +**Supported models:** +You can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the [tool calling docs](/modules/model_io/models/chat_models/how_to/tools.md) for a complete list. + +## Usage + +In the following example, we use `ChatOllama` with the `llama3.1` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. + +```dart +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +//... + +final llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); +final tool = CalculatorTool(); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final executor = AgentExecutor(agent: agent); +final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', +); +print(res); +// The result is: 4.885 +``` + +## Custom tools + +You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. + +Let's see an example of how to do this. + +First, let's create a class that will be the input for our tool. + +```dart +@immutable +class SearchInput { + const SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); +} +``` + +Now let's define the tool: + +```dart +final searchTool = Tool.fromFunction( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: callYourSearchFunction, + getInputFromJson: SearchInput.fromJson, +); +``` + +Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. + +The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. +```dart +String callYourSearchFunction(final SearchInput input) { + final n = input.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; +} +``` + +Now we can create the agent and run it: + +```dart +final llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3-groq-tool-use', + temperature: 0, + ), +); + +final memory = ConversationBufferMemory(returnMessages: true); +final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + tools: [searchTool], + memory: memory, +); + +final executor = AgentExecutor(agent: agent); + +final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', +); +print(res1); +// Here are the top 3 cat names I found: AAA, BBB, and CCC. +``` + +## Custom agent using LangChain Expression Language (LCEL) + +You can replicate the functionality of the Tools Agent by using the LangChain Expression Language (LCEL) directly. + +```dart +final openAiKey = Platform.environment['OPENAI_API_KEY']; + +final prompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), +]); + +final tool = CalculatorTool(); + +final model = ChatOpenAI( + apiKey: openAiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o-mini', + temperature: 0, + tools: [tool], + ), +); + +const outputParser = ToolsAgentOutputParser(); + +List buildScratchpad(final List intermediateSteps) { + return intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false); +} + +final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), + }, + ).pipe(prompt).pipe(model).pipe(outputParser), + tools: [tool], +); +final executor = AgentExecutor(agent: agent); + +final res = await executor.invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', +}); +print(res['output']); +// The result of 40 raised to the power of 0.43 is approximately 4.885. +``` + +In this way, you can create your own custom agents with full control over their behavior, while still leveraging the flexibility of the Tools Agent to work with various language models and tools. diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 9c368d1b..e6cc5907 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -83,11 +83,9 @@ final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); final stream = chain.stream({'max_num': '9'}); await stream.forEach(print); -// 1 -// 2 -// 3 -// .. -// 9 +// 123 +// 456 +// 789 ``` ### Multimodal support @@ -120,12 +118,13 @@ print(res.output.content); ### Tool calling -`ChatOllama` now offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). +`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). **Notes:** - Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. - Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. ```dart const tool = ToolSpec( @@ -420,7 +419,7 @@ We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `Ch ```dart // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3.1'), + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), ); await vectorStore.addDocuments( documents: [ diff --git a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart new file mode 100644 index 00000000..7554d8d4 --- /dev/null +++ b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart @@ -0,0 +1,160 @@ +// ignore_for_file: avoid_print, unreachable_from_main +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _toolsAgent(); + await _toolsAgentCustomToolsMemory(); + await _toolsAgentLCEL(); +} + +Future _toolsAgent() async { + final llm = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + ); + final tool = CalculatorTool(); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final executor = AgentExecutor(agent: agent); + final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + ); + print(res); + // The result is: 4.885 +} + +Future _toolsAgentCustomToolsMemory() async { + final tool = Tool.fromFunction( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: const { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: callYourSearchFunction, + getInputFromJson: SearchInput.fromJson, + ); + + final llm = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3-groq-tool-use', + temperature: 0, + ), + ); + + final memory = ConversationBufferMemory(returnMessages: true); + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + tools: [tool], + memory: memory, + ); + + final executor = AgentExecutor(agent: agent); + + final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', + ); + print(res1); + // Here are 3 search results for "cats": + // 1. Result 1 + // 2. Result 2 + // 3. Result 3 +} + +class SearchInput { + const SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); +} + +String callYourSearchFunction(final SearchInput input) { + final n = input.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; +} + +Future _toolsAgentLCEL() async { + final openAiKey = Platform.environment['OPENAI_API_KEY']; + + final prompt = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), + ]); + + final tool = CalculatorTool(); + + final model = ChatOpenAI( + apiKey: openAiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o-mini', + temperature: 0, + tools: [tool], + ), + ); + + const outputParser = ToolsAgentOutputParser(); + + List buildScratchpad(final List intermediateSteps) { + return intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false); + } + + final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), + }, + ).pipe(prompt).pipe(model).pipe(outputParser), + tools: [tool], + ); + final executor = AgentExecutor(agent: agent); + + final res = await executor.invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + print(res['output']); + // The result of 40 raised to the power of 0.43 is approximately 4.885. +} diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 0682326f..2d66b367 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -348,7 +348,7 @@ Future _flights() async { Future _rag() async { // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3.1'), + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), ); await vectorStore.addDocuments( documents: [ diff --git a/packages/langchain/lib/src/agents/agents.dart b/packages/langchain/lib/src/agents/agents.dart index ec89c95c..cc12a558 100644 --- a/packages/langchain/lib/src/agents/agents.dart +++ b/packages/langchain/lib/src/agents/agents.dart @@ -1,3 +1,4 @@ export 'package:langchain_core/agents.dart'; export 'executor.dart'; +export 'tools.dart'; diff --git a/packages/langchain/lib/src/agents/tools.dart b/packages/langchain/lib/src/agents/tools.dart new file mode 100644 index 00000000..02a16284 --- /dev/null +++ b/packages/langchain/lib/src/agents/tools.dart @@ -0,0 +1,304 @@ +import 'package:langchain_core/agents.dart'; +import 'package:langchain_core/chains.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/exceptions.dart'; +import 'package:langchain_core/memory.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; + +const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( + prompt: PromptTemplate( + inputVariables: {}, + template: 'You are a helpful AI assistant', + ), +); + +/// {@template tools_agent} +/// An agent powered by the tool calling API. +/// +/// Example: +/// ```dart +/// final llm = ChatOllama( +/// defaultOptions: ChatOllamaOptions( +/// model: 'llama3-groq-tool-use', +/// temperature: 0, +/// ), +/// ); +/// final tools = [CalculatorTool()]; +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final executor = AgentExecutor(agent: agent); +/// final res = await executor.run('What is 40 raised to the 0.43 power? '); +/// ``` +/// +/// You can use any chat model that supports tools, like `ChatOpenAI`, +/// `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the +/// [documentation](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) +/// for a complete list. +/// +/// You can easily add memory to the agent using the memory parameter from the +/// [ToolsAgent.fromLLMAndTools] constructor. Make sure you enable +/// [BaseChatMemory.returnMessages] on your memory, as the agent works with +/// [ChatMessage]s. The default prompt template already takes care of adding +/// the history to the prompt. For example: +/// ```dart +/// final memory = ConversationBufferMemory(returnMessages: true); +/// final agent = ToolsAgent.fromLLMAndTools( +/// llm: llm, +/// tools: tools, +/// memory: memory, +/// ); +/// ``` +/// +/// If you need to use your own [llmChain] make sure your prompt template +/// includes: +/// - `MessagePlaceholder(variableName: agentInputKey)`: the input to the agent. +/// - If you are using memory: +/// * `MessagesPlaceholder(variableName: '{memoryKey}')`: the history of chat +/// messages. +/// - If you are not using memory: +/// * `MessagesPlaceholder(variableName: BaseActionAgent.agentScratchpadInputKey)`: +/// the intermediary work of the agent (if you are using memory, the agent +/// uses the memory to store the intermediary work). +/// Example: +/// ```dart +/// ChatPromptTemplate.fromTemplates([ +/// (ChatMessageType.system, 'You are a helpful AI assistant'), +/// (ChatMessageType.messagesPlaceholder, 'history'), +/// (ChatMessageType.messagePlaceholder, 'input'), +/// ]); +/// ``` +/// +/// You can use [ToolsAgent.createPrompt] to build the prompt +/// template if you only need to customize the system message or add some +/// extra messages. +/// {@endtemplate} +class ToolsAgent extends BaseSingleActionAgent { + /// {@macro tools_agent} + ToolsAgent({ + required this.llmChain, + required super.tools, + }) : _parser = const ToolsAgentOutputParser(), + assert( + llmChain.memory != null || + llmChain.prompt.inputVariables + .contains(BaseActionAgent.agentScratchpadInputKey), + '`${BaseActionAgent.agentScratchpadInputKey}` should be one of the ' + 'variables in the prompt, got ${llmChain.prompt.inputVariables}', + ), + assert( + llmChain.memory == null || llmChain.memory!.returnMessages, + 'The memory must have `returnMessages` set to true', + ); + + /// Chain to use to call the LLM. + /// + /// If the chain does not have a memory, the prompt MUST include a variable + /// called [BaseActionAgent.agentScratchpadInputKey] where the agent can put + /// its intermediary work. + /// + /// If the chain has a memory, the agent will use the memory to store the + /// intermediary work. + /// + /// The memory must have [BaseChatMemory.returnMessages] set to true for + /// the agent to work properly. + final LLMChain llmChain; + + /// Parser to use to parse the output of the LLM. + final ToolsAgentOutputParser _parser; + + /// The key for the input to the agent. + static const agentInputKey = 'input'; + + @override + Set get inputKeys => {agentInputKey}; + + /// Construct an [ToolsAgent] from an [llm] and [tools]. + /// + /// - [llm] - The model to use for the agent. + /// - [tools] - The tools the agent has access to. You can omit this field if + /// you have already configured the tools in the [llm]. + /// - [memory] - The memory to use for the agent. + /// - [systemChatMessage] message to use as the system message that will be + /// the first in the prompt. Default: "You are a helpful AI assistant". + /// - [extraPromptMessages] prompt messages that will be placed between the + /// system message and the input from the agent. + factory ToolsAgent.fromLLMAndTools({ + required final BaseChatModel llm, + final List? tools, + final BaseChatMemory? memory, + final SystemChatMessagePromptTemplate systemChatMessage = + _systemChatMessagePromptTemplate, + final List? extraPromptMessages, + }) { + assert( + tools != null || llm.defaultOptions.tools != null, + 'Tools must be provided or configured in the llm', + ); + assert( + tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool), + 'All elements in `tools` must be of type `Tool` or its subclasses', + ); + + final actualTools = tools ?? llm.defaultOptions.tools!.cast(); + + return ToolsAgent( + llmChain: LLMChain( + llm: llm, + llmOptions: llm.defaultOptions.copyWith( + tools: actualTools, + ), + prompt: createPrompt( + systemChatMessage: systemChatMessage, + extraPromptMessages: extraPromptMessages, + memory: memory, + ), + memory: memory, + ), + tools: actualTools, + ); + } + + @override + Future> plan(final AgentPlanInput input) async { + final llmChainInputs = _constructLlmChainInputs( + input.intermediateSteps, + input.inputs, + ); + final ChainValues output = await llmChain.invoke(llmChainInputs); + final predictedMessage = output[LLMChain.defaultOutputKey] as AIChatMessage; + return _parser.parseChatMessage(predictedMessage); + } + + Map _constructLlmChainInputs( + final List intermediateSteps, + final InputValues inputs, + ) { + final dynamic agentInput; + + // If there is a memory, we pass the last agent step as a function message. + // Otherwise, we pass the input as a human message. + if (llmChain.memory != null && intermediateSteps.isNotEmpty) { + final lastStep = intermediateSteps.last; + final functionMsg = ChatMessage.tool( + toolCallId: lastStep.action.id, + content: lastStep.observation, + ); + agentInput = functionMsg; + } else { + agentInput = switch (inputs[agentInputKey]) { + final String inputStr => ChatMessage.humanText(inputStr), + final ChatMessage inputMsg => inputMsg, + final List inputMsgs => inputMsgs, + _ => throw LangChainException( + message: 'Agent expected a String or ChatMessage as input,' + ' got ${inputs[agentInputKey]}', + ), + }; + } + + return { + ...inputs, + agentInputKey: agentInput, + if (llmChain.memory == null) + BaseActionAgent.agentScratchpadInputKey: + _constructScratchPad(intermediateSteps), + }; + } + + List _constructScratchPad( + final List intermediateSteps, + ) { + return [ + ...intermediateSteps.map((final s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }).expand((final m) => m), + ]; + } + + @override + String get agentType => 'tool-agent'; + + /// Creates prompt for this agent. + /// + /// It takes care of adding the necessary placeholders to handle the + /// intermediary work of the agent or the memory. + /// + /// - [systemChatMessage] message to use as the system message that will be + /// the first in the prompt. + /// - [extraPromptMessages] prompt messages that will be placed between the + /// system message and the new human input. + /// - [memory] optional memory to use for the agent. + static BasePromptTemplate createPrompt({ + final SystemChatMessagePromptTemplate systemChatMessage = + _systemChatMessagePromptTemplate, + final List? extraPromptMessages, + final BaseChatMemory? memory, + }) { + return ChatPromptTemplate.fromPromptMessages([ + systemChatMessage, + ...?extraPromptMessages, + for (final memoryKey in memory?.memoryKeys ?? {}) + MessagesPlaceholder(variableName: memoryKey), + const MessagePlaceholder(variableName: agentInputKey), + if (memory == null) + const MessagesPlaceholder( + variableName: BaseActionAgent.agentScratchpadInputKey, + ), + ]); + } +} + +/// {@template tools_agent_output_parser} +/// Parser for [ToolsAgent]. +/// +/// It parses the output of the LLM and returns the corresponding +/// [BaseAgentAction] to be executed. +/// {@endtemplate} +class ToolsAgentOutputParser extends BaseOutputParser> { + /// {@macro tools_agent_output_parser} + const ToolsAgentOutputParser() + : super(defaultOptions: const OutputParserOptions()); + + @override + Future> invoke( + final ChatResult input, { + final OutputParserOptions? options, + }) { + return parseChatMessage(input.output); + } + + /// Parses the [message] and returns the corresponding [BaseAgentAction]. + Future> parseChatMessage( + final AIChatMessage message, + ) async { + final toolCalls = message.toolCalls; + if (toolCalls.isNotEmpty) { + return toolCalls.map((final toolCall) { + return AgentAction( + id: toolCall.id, + tool: toolCall.name, + toolInput: toolCall.arguments, + log: 'Invoking: `${toolCall.name}` ' + 'with `${toolCall.arguments}`\n' + 'Responded: ${message.content}\n', + messageLog: [message], + ); + }).toList(growable: false); + } else { + return [ + AgentFinish( + returnValues: {'output': message.content}, + log: message.content, + ), + ]; + } + } +} diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 70437c8d..857d9c79 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -25,3 +25,6 @@ dependencies: dev_dependencies: test: ^1.25.8 + langchain_community: ^0.3.0 + langchain_openai: ^0.7.0 + langchain_ollama: ^0.3.0 diff --git a/packages/langchain/pubspec_overrides.yaml b/packages/langchain/pubspec_overrides.yaml index 3508ed77..65792891 100644 --- a/packages/langchain/pubspec_overrides.yaml +++ b/packages/langchain/pubspec_overrides.yaml @@ -1,4 +1,16 @@ -# melos_managed_dependency_overrides: langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_community,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart dependency_overrides: + langchain_community: + path: ../langchain_community langchain_core: path: ../langchain_core + langchain_ollama: + path: ../langchain_ollama + langchain_openai: + path: ../langchain_openai + ollama_dart: + path: ../ollama_dart + openai_dart: + path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain/test/agents/assets/state_of_the_union.txt b/packages/langchain/test/agents/assets/state_of_the_union.txt new file mode 100644 index 00000000..d50175de --- /dev/null +++ b/packages/langchain/test/agents/assets/state_of_the_union.txt @@ -0,0 +1,723 @@ +Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. + +Last year COVID-19 kept us apart. This year we are finally together again. + +Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. + +With a duty to one another to the American people to the Constitution. + +And with an unwavering resolve that freedom will always triumph over tyranny. + +Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. + +He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. + +He met the Ukrainian people. + +From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. + +Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. + +In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. + +Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. + +Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. + +Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. + +They keep moving. + +And the costs and the threats to America and the world keep rising. + +That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. + +The United States is a member along with 29 other nations. + +It matters. American diplomacy matters. American resolve matters. + +Putin’s latest attack on Ukraine was premeditated and unprovoked. + +He rejected repeated efforts at diplomacy. + +He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. + +We prepared extensively and carefully. + +We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. + +I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. + +We countered Russia’s lies with truth. + +And now that he has acted the free world is holding him accountable. + +Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. + +We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. + +Together with our allies –we are right now enforcing powerful economic sanctions. + +We are cutting off Russia’s largest banks from the international financial system. + +Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. + +We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. + +Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. + +The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. + +We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. + +And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. + +The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. + +Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. + +We are giving more than $1 Billion in direct assistance to Ukraine. + +And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. + +Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. + +Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. + +For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. + +As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. + +And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. + +Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. + +And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. + +To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. + +And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. + +Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. + +America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. + +These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. + +But I want you to know that we are going to be okay. + +When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. + +While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. + +We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. + +In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. + +This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. + +To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. + +Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. + +He will never extinguish their love of freedom. He will never weaken the resolve of the free world. + +We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. + +The pandemic has been punishing. + +And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. + +I understand. + +I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. + +That’s why one of the first things I did as President was fight to pass the American Rescue Plan. + +Because people were hurting. We needed to act, and we did. + +Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. + +It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. + +Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. + +And as my Dad used to say, it gave people a little breathing room. + +And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. + +And it worked. It created jobs. Lots of jobs. + +In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year +than ever before in the history of America. + +Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. + +For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. + +But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. + +Vice President Harris and I ran for office with a new economic vision for America. + +Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up +and the middle out, not from the top down. + +Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. + +America used to have the best roads, bridges, and airports on Earth. + +Now our infrastructure is ranked 13th in the world. + +We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. + +That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. + +This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. + +We’re done talking about infrastructure weeks. + +We’re going to have an infrastructure decade. + +It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. + +As I’ve told Xi Jinping, it is never a good bet to bet against the American people. + +We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. + +And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. + +We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. + +4,000 projects have already been announced. + +And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. + +When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. + +The federal government spends about $600 Billion a year to keep the country safe and secure. + +There’s been a law on the books for almost a century +to make sure taxpayers’ dollars support American jobs and businesses. + +Every Administration says they’ll do it, but we are actually doing it. + +We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. + +But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. + +That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. + +Let me give you one example of why it’s so important to pass it. + +If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. + +It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. + +This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. + +Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. + +Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. + +Smartphones. The Internet. Technology we have yet to invent. + +But that’s just the beginning. + +Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from +$20 billion to $100 billion. + +That would be one of the biggest investments in manufacturing in American history. + +And all they’re waiting for is for you to pass this bill. + +So let’s not wait any longer. Send it to my desk. I’ll sign it. + +And we will really take off. + +And Intel is not alone. + +There’s something happening in America. + +Just look around and you’ll see an amazing story. + +The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing. + +Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas. + +That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. + +GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. + +All told, we created 369,000 new manufacturing jobs in America just last year. + +Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. + +As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” + +It’s time. + +But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. + +Inflation is robbing them of the gains they might otherwise feel. + +I get it. That’s why my top priority is getting prices under control. + +Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. + +The pandemic also disrupted global supply chains. + +When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. + +Look at cars. + +Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. + +And guess what, prices of automobiles went up. + +So—we have a choice. + +One way to fight inflation is to drive down wages and make Americans poorer. + +I have a better plan to fight inflation. + +Lower your costs, not your wages. + +Make more cars and semiconductors in America. + +More infrastructure and innovation in America. + +More goods moving faster and cheaper in America. + +More jobs where you can earn a good living in America. + +And instead of relying on foreign supply chains, let’s make it in America. + +Economists call it “increasing the productive capacity of our economy.” + +I call it building a better America. + +My plan to fight inflation will lower your costs and lower the deficit. + +17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: + +First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis. + +He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make. + +But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. + +Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it. + +What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. + +Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy. + +For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it. + +Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. + +Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. + +Second – cut energy costs for families an average of $500 a year by combatting climate change. + +Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. + +Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child. + +Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children. + +My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. + +My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old. + +All of these will lower costs. + +And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody. + +The one thing all Americans agree on is that the tax system is not fair. We have to fix it. + +I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. + +Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax. + +That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. + +We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. + +That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter. + +So that’s my plan. It will grow the economy and lower costs for families. + +So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. + +My plan will not only lower costs to give families a fair shot, it will lower the deficit. + +The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. + +But in my administration, the watchdogs have been welcomed back. + +We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. + +And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. + +By the end of this year, the deficit will be down to less than half what it was before I took office. + +The only president ever to cut the deficit by more than one trillion dollars in a single year. + +Lowering your costs also means demanding more competition. + +I’m a capitalist, but capitalism without competition isn’t capitalism. + +It’s exploitation—and it drives up prices. + +When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. + +We see it happening with ocean carriers moving goods in and out of America. + +During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. + +Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. + +And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. + +That ends on my watch. + +Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. + +We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. + +Let’s pass the Paycheck Fairness Act and paid leave. + +Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. + +Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. + +And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. + +When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. + +For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. + +And I know you’re tired, frustrated, and exhausted. + +But I also know this. + +Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say +we are moving forward safely, back to more normal routines. + +We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. + +Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. + +Under these new guidelines, most Americans in most of the country can now be mask free. + +And based on the projections, more of the country will reach that point across the next couple of weeks. + +Thanks to the progress we have made this past year, COVID-19 need no longer control our lives. + +I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. + +We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. + +Here are four common sense steps as we move forward safely. + +First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. + +We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. + +The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. + +We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%. + +We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month. + +And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost. + +If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. + +We’re leaving no one behind or ignoring anyone’s needs as we move forward. + +And on testing, we have made hundreds of millions of tests available for you to order for free. + +Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. + +Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. + +If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years. + +And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. + +I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. + +Third – we can end the shutdown of schools and businesses. We have the tools we need. + +It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. + +We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. + +Our schools are open. Let’s keep it that way. Our kids need to be in school. + +And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. + +We achieved this because we provided free vaccines, treatments, tests, and masks. + +Of course, continuing this costs money. + +I will soon send Congress a request. + +The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly. + +Fourth, we will continue vaccinating the world. + +We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. + +And we won’t stop. + +We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. + +Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. + +Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. + +We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. + +I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. + +They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. + +Officer Mora was 27 years old. + +Officer Rivera was 22. + +Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. + +I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. + +I’ve worked on these issues a long time. + +I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. + +So let’s not abandon our streets. Or choose between safety and equal justice. + +Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. + +That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. + +That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. + +We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. + +I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. + +And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. + +And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? + +Ban assault weapons and high-capacity magazines. + +Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. + +These laws don’t infringe on the Second Amendment. They save lives. + +The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. + +In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. + +We cannot let this happen. + +Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. + +Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. + +One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. + +And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. + +A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. + +And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. + +We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. + +We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. + +We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. + +We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. + +We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. + +Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. + +Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. + +It’s not only the right thing to do—it’s the economically smart thing to do. + +That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. + +Let’s get it done once and for all. + +Advancing liberty and justice also requires protecting the rights of women. + +The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. + +If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. + +And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. + +As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. + +While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. + +And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. + +So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. + +First, beat the opioid epidemic. + +There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. + +Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. + +If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. + +Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down. + +The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. + +I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. + +Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. + +As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. + +It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. + +And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. + +Third, support our veterans. + +Veterans are the best of us. + +I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. + +My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. + +Our troops in Iraq and Afghanistan faced many dangers. + +One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. + +When they came home, many of the world’s fittest and best trained warriors were never the same. + +Headaches. Numbness. Dizziness. + +A cancer that would put them in a flag-draped coffin. + +I know. + +One of those soldiers was my son Major Beau Biden. + +We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. + +But I’m committed to finding out everything we can. + +Committed to military families like Danielle Robinson from Ohio. + +The widow of Sergeant First Class Heath Robinson. + +He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. + +Stationed near Baghdad, just yards from burn pits the size of football fields. + +Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. + +But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. + +Danielle says Heath was a fighter to the very end. + +He didn’t know how to stop fighting, and neither did she. + +Through her pain she found purpose to demand we do better. + +Tonight, Danielle—we are. + +The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. + +And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. + +I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. + +And fourth, let’s end cancer as we know it. + +This is personal to me and Jill, to Kamala, and to so many of you. + +Cancer is the #2 cause of death in America–second only to heart disease. + +Last month, I announced our plan to supercharge +the Cancer Moonshot that President Obama asked me to lead six years ago. + +Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. + +More support for patients and families. + +To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. + +It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. + +ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. + +A unity agenda for the nation. + +We can do this. + +My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. + +In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. + +We have fought for freedom, expanded liberty, defeated totalitarianism and terror. + +And built the strongest, freest, and most prosperous nation the world has ever known. + +Now is the hour. + +Our moment of responsibility. + +Our test of resolve and conscience, of history itself. + +It is in this moment that our character is formed. Our purpose is found. Our future is forged. + +Well I know this nation. + +We will meet the test. + +To protect freedom and liberty, to expand fairness and opportunity. + +We will save democracy. + +As hard as these times have been, I am more optimistic about America today than I have been my whole life. + +Because I see the future that is within our grasp. + +Because I know there is simply nothing beyond our capacity. + +We are the only nation on Earth that has always turned every crisis we have faced into an opportunity. + +The only nation that can be defined by a single word: possibilities. + +So on this night, in our 245th year as a nation, I have come to report on the State of the Union. + +And my report is this: the State of the Union is strong—because you, the American people, are strong. + +We are stronger today than we were a year ago. + +And we will be stronger a year from now than we are today. + +Now is our moment to meet and overcome the challenges of our time. + +And we will, as one people. + +One America. + +The United States of America. + +May God bless you all. May God protect our troops. \ No newline at end of file diff --git a/packages/langchain/test/agents/tools_test.dart b/packages/langchain/test/agents/tools_test.dart new file mode 100644 index 00000000..8e2d7852 --- /dev/null +++ b/packages/langchain/test/agents/tools_test.dart @@ -0,0 +1,224 @@ +@TestOn('vm') +@Timeout(Duration(minutes: 50)) +library; // Uses dart:io + +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:meta/meta.dart'; +import 'package:test/test.dart'; + +void main() { + late BaseChatModel llm; + const defaultOllamaModel = 'llama3-groq-tool-use'; + const defaultOpenAIModel = 'gpt-4o-mini'; + + group('ChatToolsAgent using Ollama tests', () { + setUp(() async { + llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: defaultOllamaModel, + temperature: 0, + tools: [CalculatorTool(), searchTool], + ), + ); + }); + + test('Test ChatToolsAgent with calculator tool', () async { + await testAgentWithCalculator(llm); + }); + + test('Test ToolsAgent with messages memory', () async { + await testMemory(llm, returnMessages: true); + }); + + test('Test ToolsAgent with string memory throws error', () async { + expect( + () async => testMemory(llm, returnMessages: false), + throwsA(isA()), + ); + }); + + test('Test ToolsAgent LCEL equivalent using Ollama', () async { + final res = + await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + expect(res['output'], contains('4.88')); + }); + }); + + group('ChatToolsAgent using OpenAi tests', () { + setUp(() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: defaultOpenAIModel, + tools: [CalculatorTool(), searchTool], + ), + ); + }); + + test('Test ChatToolsAgent with calculator tool', () async { + await testAgentWithCalculator(llm); + }); + + test('Test ToolsAgent with messages memory', () async { + await testMemory(llm, returnMessages: true); + }); + + test('Test ToolsAgent with string memory throws error', () async { + expect( + () async => testMemory(llm, returnMessages: false), + throwsA(isA()), + ); + }); + + test('Test ToolsAgent LCEL equivalent using OpenAi', () async { + final res = + await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + expect(res['output'], contains('4.88')); + }); + }); +} + +Future testAgentWithCalculator( + BaseChatModel llm, +) async { + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + ); + final executor = AgentExecutor(agent: agent); + final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + ); + expect(res, contains('4.885')); +} + +Future testMemory( + BaseChatModel llm, { + required final bool returnMessages, +}) async { + final memory = ConversationBufferMemory(returnMessages: returnMessages); + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + memory: memory, + ); + + final executor = AgentExecutor(agent: agent); + + final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', + ); + + expect(res1, contains('AAA')); + expect(res1, contains('BBB')); + expect(res1, contains('CCC')); + expect(res1, isNot(contains('DDD'))); + + final res2 = await executor.run( + 'How many results did the search return? Respond with a number.', + ); + expect(res2, contains('3')); + expect(res2, isNot(contains('1'))); + expect(res2, isNot(contains('2'))); + expect(res2, isNot(contains('4'))); + + final res3 = await executor.run('What was the last result?'); + expect(res3, contains('CCC')); +} + +AgentExecutor testLCDLEquivalent({ + required BaseChatModel llm, + required Tool tool, +}) { + final prompt = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), + ]); + + final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': planInput.intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false), + }, + ).pipe(prompt).pipe(llm).pipe(const ToolsAgentOutputParser()), + tools: [tool], + ); + + return AgentExecutor(agent: agent); +} + +@immutable +class _SearchInput { + const _SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + _SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); + + @override + bool operator ==(covariant _SearchInput other) => + identical(this, other) || query == other.query && n == other.n; + + @override + int get hashCode => query.hashCode ^ n.hashCode; +} + +final searchTool = Tool.fromFunction<_SearchInput, String>( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: const { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: (final _SearchInput toolInput) async { + final n = toolInput.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; + }, + getInputFromJson: _SearchInput.fromJson, +); From d338981046a58e7578c6c306a97f5f89610c7c19 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 20:18:48 +0200 Subject: [PATCH 098/251] feat: Deprecate OpenAIToolsAgent in favour of ToolsAgent (#532) --- docs/_sidebar.md | 1 - .../agents/agent_types/openai_tools_agent.md | 173 ------------------ docs/modules/agents/agents.md | 4 +- docs/modules/agents/tools/calculator.md | 2 +- docs/modules/agents/tools/openai_dall_e.md | 2 +- .../agent_types/openai_tools_agent.dart | 150 --------------- .../bin/modules/agents/tools/calculator.dart | 2 +- .../modules/agents/tools/openai_dalle.dart | 2 +- ...{tools_test.dart => tools_agent_test.dart} | 4 +- .../lib/src/tools/calculator.dart | 2 +- .../langchain_core/lib/src/prompts/types.dart | 2 +- .../test/runnables/map_test.dart | 5 +- .../src/chat_models/google_ai/mappers.dart | 3 +- .../google_ai/google_ai_embeddings.dart | 3 +- packages/langchain_openai/README.md | 2 - .../langchain_openai/lib/fix_data/fix.yaml | 19 ++ .../lib/src/agents/tools.dart | 18 +- .../lib/src/tools/dall_e.dart | 2 +- .../test/agents/tools_test.dart | 1 + .../test/tools/dall_e_test.dart | 4 +- 20 files changed, 55 insertions(+), 346 deletions(-) delete mode 100644 docs/modules/agents/agent_types/openai_tools_agent.md delete mode 100644 examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart rename packages/langchain/test/agents/{tools_test.dart => tools_agent_test.dart} (98%) create mode 100644 packages/langchain_openai/lib/fix_data/fix.yaml diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 92d4f394..a5572f4d 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -122,7 +122,6 @@ - [Agents](/modules/agents/agents.md) - [Agent types](/modules/agents/agent_types/agent_types.md) - [Tools Agent](/modules/agents/agent_types/tools_agent.md) - - [OpenAI Tools Agent](/modules/agents/agent_types/openai_tools_agent.md) - [Tools](/modules/agents/tools/tools.md) - [Calculator](/modules/agents/tools/calculator.md) - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md) diff --git a/docs/modules/agents/agent_types/openai_tools_agent.md b/docs/modules/agents/agent_types/openai_tools_agent.md deleted file mode 100644 index db68921e..00000000 --- a/docs/modules/agents/agent_types/openai_tools_agent.md +++ /dev/null @@ -1,173 +0,0 @@ -# OpenAI tools - -Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been -fine-tuned to detect when a tool should to be called and respond with the -inputs that should be passed to the tool. In an API call, you can describe -tools and have the model intelligently choose to output a JSON object -containing arguments to call those tools. The goal of the OpenAI Function -APIs is to more reliably return valid and useful tool calls than a generic -text completion or chat API. - -The OpenAI Tools Agent is designed to work with these models. - -> **Note**: Must be used with an [OpenAI Tools](https://platform.openai.com/docs/guides/function-calling) model. - -```dart -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4-turbo', - temperature: 0, - ), -); -final tool = CalculatorTool(); -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); -final executor = AgentExecutor(agent: agent); -final res = await executor.run('What is 40 raised to the 0.43 power? '); -print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' -``` - -You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. - -Let's see an example of how to do this. - -First let's create a class that will be the input for our tool. - -```dart -class SearchInput { - const SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); -} -``` - -Now let's define the tool: - -```dart -final tool = Tool.fromFunction( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: const { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'number', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: callYourSearchFunction, - getInputFromJson: SearchInput.fromJson, -); -``` - -Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. - -The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. -```dart -String callYourSearchFunction(final SearchInput input) { - return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; -} -``` - -Now we can create the agent and run it. - -```dart -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), -); - -final memory = ConversationBufferMemory(returnMessages: true); -final agent = OpenAIToolsAgent.fromLLMAndTools( - llm: llm, - tools: [tool], - memory: memory, -); - -final executor = AgentExecutor(agent: agent); - -final res1 = await executor.run( - 'Search for cats. Return only 3 results.', -); -print(res1); -// Here are 3 search results for "cats": -// 1. Result 1 -// 2. Result 2 -// 3. Result 3 -``` - -## Using LangChain Expression Language (LCEL) - -You can replicate the functionality of the OpenAI Functions Agent by using the LangChain Expression Language (LCEL) directly. - -```dart -final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - -final prompt = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), -]); - -final tool = CalculatorTool(); - -final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - temperature: 0, - tools: [tool], - ), -); - -const outputParser = OpenAIToolsAgentOutputParser(); - -List buildScratchpad(final List intermediateSteps) { - return intermediateSteps - .map((final s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((final m) => m) - .toList(growable: false); -} - -final agent = Agent.fromRunnable( - Runnable.mapInput( - (final AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), - }, - ).pipe(prompt).pipe(model).pipe(outputParser), - tools: [tool], -); -final executor = AgentExecutor(agent: agent); - -final res = await executor.invoke({ - 'input': 'What is 40 raised to the 0.43 power?', -}); -print(res['output']); -// 40 raised to the power of 0.43 is approximately 4.88524. -``` - -In this way, you can create your own custom agents with full control over their behavior. diff --git a/docs/modules/agents/agents.md b/docs/modules/agents/agents.md index ab56353c..78004d19 100644 --- a/docs/modules/agents/agents.md +++ b/docs/modules/agents/agents.md @@ -75,7 +75,7 @@ First, let's load the language model we're going to use to control the agent. ```dart final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + defaultOptions: ChatOpenAIOptions(temperature: 0), ); ``` @@ -91,7 +91,7 @@ Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. ```dart -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); ``` Now let's create the agent executor and test it out! diff --git a/docs/modules/agents/tools/calculator.md b/docs/modules/agents/tools/calculator.md index 0847f2eb..fe9f127c 100644 --- a/docs/modules/agents/tools/calculator.md +++ b/docs/modules/agents/tools/calculator.md @@ -14,7 +14,7 @@ final llm = ChatOpenAI( ), ); final tool = CalculatorTool(); -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/docs/modules/agents/tools/openai_dall_e.md b/docs/modules/agents/tools/openai_dall_e.md index 9d30914b..426f4d89 100644 --- a/docs/modules/agents/tools/openai_dall_e.md +++ b/docs/modules/agents/tools/openai_dall_e.md @@ -18,7 +18,7 @@ final tools = [ CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart deleted file mode 100644 index 16d0b44f..00000000 --- a/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart +++ /dev/null @@ -1,150 +0,0 @@ -// ignore_for_file: avoid_print, unreachable_from_main -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_openai/langchain_openai.dart'; - -void main() async { - await _openAIToolsAgent(); - await _openAIToolsAgentCustomToolsMemory(); - await _openAIToolsAgentLCEL(); -} - -Future _openAIToolsAgent() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', - temperature: 0, - ), - ); - final tool = CalculatorTool(); - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); - final executor = AgentExecutor(agent: agent); - final res = await executor.run('What is 40 raised to the 0.43 power? '); - print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' -} - -Future _openAIToolsAgentCustomToolsMemory() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final tool = Tool.fromFunction( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: const { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'number', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: callYourSearchFunction, - getInputFromJson: SearchInput.fromJson, - ); - - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), - ); - - final memory = ConversationBufferMemory(returnMessages: true); - final agent = OpenAIToolsAgent.fromLLMAndTools( - llm: llm, - tools: [tool], - memory: memory, - ); - - final executor = AgentExecutor(agent: agent); - - final res1 = await executor.run( - 'Search for cats. Return only 3 results.', - ); - print(res1); - // Here are 3 search results for "cats": - // 1. Result 1 - // 2. Result 2 - // 3. Result 3 -} - -class SearchInput { - const SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); -} - -String callYourSearchFunction(final SearchInput input) { - return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; -} - -Future _openAIToolsAgentLCEL() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final prompt = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), - ]); - - final tool = CalculatorTool(); - - final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - temperature: 0, - tools: [tool], - ), - ); - - const outputParser = OpenAIToolsAgentOutputParser(); - - List buildScratchpad(final List intermediateSteps) { - return intermediateSteps - .map((final s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((final m) => m) - .toList(growable: false); - } - - final agent = Agent.fromRunnable( - Runnable.mapInput( - (final AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), - }, - ).pipe(prompt).pipe(model).pipe(outputParser), - tools: [tool], - ); - final executor = AgentExecutor(agent: agent); - - final res = await executor.invoke({ - 'input': 'What is 40 raised to the 0.43 power?', - }); - print(res['output']); - // 40 raised to the power of 0.43 is approximately 4.88524. -} diff --git a/examples/docs_examples/bin/modules/agents/tools/calculator.dart b/examples/docs_examples/bin/modules/agents/tools/calculator.dart index 5d92dc27..acab2d65 100644 --- a/examples/docs_examples/bin/modules/agents/tools/calculator.dart +++ b/examples/docs_examples/bin/modules/agents/tools/calculator.dart @@ -15,7 +15,7 @@ void main() async { ), ); final tool = CalculatorTool(); - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart index f62d7bde..7144ea82 100644 --- a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart +++ b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart @@ -18,7 +18,7 @@ void main() async { CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/packages/langchain/test/agents/tools_test.dart b/packages/langchain/test/agents/tools_agent_test.dart similarity index 98% rename from packages/langchain/test/agents/tools_test.dart rename to packages/langchain/test/agents/tools_agent_test.dart index 8e2d7852..e879ba88 100644 --- a/packages/langchain/test/agents/tools_test.dart +++ b/packages/langchain/test/agents/tools_agent_test.dart @@ -16,13 +16,15 @@ void main() { const defaultOllamaModel = 'llama3-groq-tool-use'; const defaultOpenAIModel = 'gpt-4o-mini'; - group('ChatToolsAgent using Ollama tests', () { + group('ChatToolsAgent using Ollama tests', + skip: Platform.environment.containsKey('CI'), () { setUp(() async { llm = ChatOllama( defaultOptions: ChatOllamaOptions( model: defaultOllamaModel, temperature: 0, tools: [CalculatorTool(), searchTool], + keepAlive: 1, ), ); }); diff --git a/packages/langchain_community/lib/src/tools/calculator.dart b/packages/langchain_community/lib/src/tools/calculator.dart index 9f41a130..26becb93 100644 --- a/packages/langchain_community/lib/src/tools/calculator.dart +++ b/packages/langchain_community/lib/src/tools/calculator.dart @@ -14,7 +14,7 @@ import 'package:math_expressions/math_expressions.dart'; /// temperature: 0, /// ); /// final tool = CalculatorTool(); -/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/packages/langchain_core/lib/src/prompts/types.dart b/packages/langchain_core/lib/src/prompts/types.dart index c2a9474b..c68f0979 100644 --- a/packages/langchain_core/lib/src/prompts/types.dart +++ b/packages/langchain_core/lib/src/prompts/types.dart @@ -145,7 +145,7 @@ class ChatPromptValue implements PromptValue { return message.concat(otherMessage); } }) - .whereNotNull() + .nonNulls .toList(growable: false), ), }; diff --git a/packages/langchain_core/test/runnables/map_test.dart b/packages/langchain_core/test/runnables/map_test.dart index 98a4a3ff..e65dc73a 100644 --- a/packages/langchain_core/test/runnables/map_test.dart +++ b/packages/langchain_core/test/runnables/map_test.dart @@ -1,5 +1,4 @@ // ignore_for_file: unused_element -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/output_parsers.dart'; @@ -43,11 +42,11 @@ void main() { final left = streamList .map((final it) => it['left']) // - .whereNotNull() + .nonNulls .join(); final right = streamList .map((final it) => it['right']) // - .whereNotNull() + .nonNulls .join(); expect(left, 'Hello world!'); diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 106bf60b..521921ac 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -1,7 +1,6 @@ // ignore_for_file: public_member_api_docs import 'dart:convert'; -import 'package:collection/collection.dart'; import 'package:google_generative_ai/google_generative_ai.dart' as g; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; @@ -96,7 +95,7 @@ extension GenerateContentResponseMapper on g.GenerateContentResponse { _ => throw AssertionError('Unknown part type: $p'), }, ) - .whereNotNull() + .nonNulls .join('\n'), toolCalls: candidate.content.parts .whereType() diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index b5996abd..385f1088 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -1,4 +1,3 @@ -import 'package:collection/collection.dart' show IterableNullableExtension; import 'package:google_generative_ai/google_generative_ai.dart' show Content, EmbedContentRequest, GenerativeModel, TaskType; import 'package:http/http.dart' as http; @@ -175,7 +174,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { ); return data.embeddings .map((final p) => p.values) - .whereNotNull() + .nonNulls .toList(growable: false); }), ); diff --git a/packages/langchain_openai/README.md b/packages/langchain_openai/README.md index b7a080da..39073b3b 100644 --- a/packages/langchain_openai/README.md +++ b/packages/langchain_openai/README.md @@ -20,8 +20,6 @@ OpenAI module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart * `OpenAIQAWithStructureChain` a chain that answer questions in the specified structure. * `OpenAIQAWithSourcesChain`: a chain that answer questions providing sources. -- Agents: - * `OpenAIToolsAgent`: an agent driven by OpenAIs Tools powered API. - Tools: * `OpenAIDallETool`: a tool that uses DallE to generate images from text. diff --git a/packages/langchain_openai/lib/fix_data/fix.yaml b/packages/langchain_openai/lib/fix_data/fix.yaml new file mode 100644 index 00000000..5db14fd0 --- /dev/null +++ b/packages/langchain_openai/lib/fix_data/fix.yaml @@ -0,0 +1,19 @@ +version: 1 + +transforms: + - title: "Migrate to 'ToolsAgent'" + date: 2024-08-21 + element: + uris: ['langchain_openai.dart', 'src/agents/tools.dart'] + class: 'OpenAIToolsAgent' + changes: + - kind: 'rename' + newName: 'ToolsAgent' + - title: "Migrate to 'ToolsAgentOutputParser'" + date: 2024-08-21 + element: + uris: ['langchain_openai.dart', 'src/agents/tools.dart'] + class: 'OpenAIToolsAgentOutputParser' + changes: + - kind: 'rename' + newName: 'ToolsAgentOutputParser' diff --git a/packages/langchain_openai/lib/src/agents/tools.dart b/packages/langchain_openai/lib/src/agents/tools.dart index 2867427d..a1a13583 100644 --- a/packages/langchain_openai/lib/src/agents/tools.dart +++ b/packages/langchain_openai/lib/src/agents/tools.dart @@ -1,3 +1,4 @@ +// ignore_for_file: deprecated_member_use_from_same_package import 'package:langchain_core/agents.dart'; import 'package:langchain_core/chains.dart'; import 'package:langchain_core/chat_models.dart'; @@ -17,6 +18,11 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( ); /// {@template openai_tools_agent} +/// > Note: This class is deprecated. Use `ToolsAgent` (from the `langchain` +/// > package instead). It works with the same API as this class, but can be +/// > used with any provider that supports tool calling. +/// > You can run `dart fix --apply` to automatically update your code. +/// /// An Agent driven by OpenAI's Tools powered API. /// /// Example: @@ -27,7 +33,7 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// temperature: 0, /// ); /// final tools = [CalculatorTool()]; -/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// ``` @@ -69,8 +75,10 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// template if you only need to customize the system message or add some /// extra messages. /// {@endtemplate} +@Deprecated('Use ToolsAgent instead') class OpenAIToolsAgent extends BaseSingleActionAgent { /// {@macro openai_functions_agent} + @Deprecated('Use ToolsAgent instead') OpenAIToolsAgent({ required this.llmChain, required super.tools, @@ -118,6 +126,7 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { /// the first in the prompt. Default: "You are a helpful AI assistant". /// - [extraPromptMessages] prompt messages that will be placed between the /// system message and the input from the agent. + @Deprecated('Use ToolsAgent.fromLLMAndTools() instead') factory OpenAIToolsAgent.fromLLMAndTools({ required final ChatOpenAI llm, required final List tools, @@ -241,14 +250,21 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { } /// {@template openai_tools_agent_output_parser} +/// > Note: This class is deprecated. Use `ToolsAgentOutputParser` (from the +/// > `langchain` package instead). It is equivalent to this class, but +/// > prepared to work with the `ToolsAgent`. +/// > You can run `dart fix --apply` to automatically update your code. +/// /// Parser for [OpenAIToolsAgent]. /// /// It parses the output of the LLM and returns the corresponding /// [BaseAgentAction] to be executed. /// {@endtemplate} +@Deprecated('Use ToolsAgentOutputParser instead') class OpenAIToolsAgentOutputParser extends BaseOutputParser> { /// {@macro openai_tools_agent_output_parser} + @Deprecated('Use ToolsAgentOutputParser instead') const OpenAIToolsAgentOutputParser() : super(defaultOptions: const OutputParserOptions()); diff --git a/packages/langchain_openai/lib/src/tools/dall_e.dart b/packages/langchain_openai/lib/src/tools/dall_e.dart index 3ce66fd9..aefba7b9 100644 --- a/packages/langchain_openai/lib/src/tools/dall_e.dart +++ b/packages/langchain_openai/lib/src/tools/dall_e.dart @@ -34,7 +34,7 @@ export 'package:openai_dart/openai_dart.dart' /// ), /// ), /// ]; -/// final agent = OpenAIToolsAgent.fromLLMAndTools( +/// final agent = ToolsAgent.fromLLMAndTools( /// llm: llm, /// tools: tools, /// ); diff --git a/packages/langchain_openai/test/agents/tools_test.dart b/packages/langchain_openai/test/agents/tools_test.dart index 57342631..c08f4979 100644 --- a/packages/langchain_openai/test/agents/tools_test.dart +++ b/packages/langchain_openai/test/agents/tools_test.dart @@ -1,3 +1,4 @@ +// ignore_for_file: deprecated_member_use_from_same_package @TestOn('vm') library; // Uses dart:io diff --git a/packages/langchain_openai/test/tools/dall_e_test.dart b/packages/langchain_openai/test/tools/dall_e_test.dart index 5a9aba09..7a8a8407 100644 --- a/packages/langchain_openai/test/tools/dall_e_test.dart +++ b/packages/langchain_openai/test/tools/dall_e_test.dart @@ -4,7 +4,7 @@ library; // Uses dart:io import 'dart:io'; -import 'package:langchain/langchain.dart' show AgentExecutor; +import 'package:langchain/langchain.dart' show AgentExecutor, ToolsAgent; import 'package:langchain_community/langchain_community.dart'; import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; @@ -62,7 +62,7 @@ void main() { ), ]; - final agent = OpenAIToolsAgent.fromLLMAndTools( + final agent = ToolsAgent.fromLLMAndTools( llm: llm, tools: tools, ); From c423b66b581f9040369c4ce5c68c48c4702701a6 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 22:47:09 +0200 Subject: [PATCH 099/251] feat: Handle refusal in OpenAI's Structured Outputs API (#533) --- .../lib/src/chat_models/chat_openai.dart | 53 +------- .../lib/src/chat_models/mappers.dart | 128 ++++++++++++++---- .../lib/src/chat_models/types.dart | 22 +++ 3 files changed, 129 insertions(+), 74 deletions(-) diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 0dc31168..c8a670f5 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -248,9 +248,10 @@ class ChatOpenAI extends BaseChatModel { final ChatOpenAIOptions? options, }) async { final completion = await _client.createChatCompletion( - request: _createChatCompletionRequest( + request: createChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, ), ); return completion.toChatResult(completion.id ?? _uuid.v4()); @@ -263,9 +264,10 @@ class ChatOpenAI extends BaseChatModel { }) { return _client .createChatCompletionStream( - request: _createChatCompletionRequest( + request: createChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, stream: true, ), ) @@ -275,53 +277,6 @@ class ChatOpenAI extends BaseChatModel { ); } - /// Creates a [CreateChatCompletionRequest] from the given input. - CreateChatCompletionRequest _createChatCompletionRequest( - final List messages, { - final ChatOpenAIOptions? options, - final bool stream = false, - }) { - final messagesDtos = messages.toChatCompletionMessages(); - final toolsDtos = - (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); - final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) - ?.toChatCompletionToolChoice(); - final responseFormatDto = - (options?.responseFormat ?? defaultOptions.responseFormat) - ?.toChatCompletionResponseFormat(); - final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) - .toCreateChatCompletionRequestServiceTier(); - - return CreateChatCompletionRequest( - model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? defaultModel, - ), - messages: messagesDtos, - tools: toolsDtos, - toolChoice: toolChoice, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - logitBias: options?.logitBias ?? defaultOptions.logitBias, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, - n: options?.n ?? defaultOptions.n, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - responseFormat: responseFormatDto, - seed: options?.seed ?? defaultOptions.seed, - stop: (options?.stop ?? defaultOptions.stop) != null - ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) - : null, - temperature: options?.temperature ?? defaultOptions.temperature, - topP: options?.topP ?? defaultOptions.topP, - parallelToolCalls: - options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, - serviceTier: serviceTierDto, - user: options?.user ?? defaultOptions.user, - streamOptions: - stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, - ); - } - /// Tokenizes the given prompt using tiktoken with the encoding used by the /// [model]. If an encoding model is specified in [encoding] field, that /// encoding is used instead. diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 5e9000c2..a2ea96f4 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -6,8 +6,56 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/tools.dart'; import 'package:openai_dart/openai_dart.dart'; +import 'chat_openai.dart'; import 'types.dart'; +/// Creates a [CreateChatCompletionRequest] from the given input. +CreateChatCompletionRequest createChatCompletionRequest( + final List messages, { + required final ChatOpenAIOptions? options, + required final ChatOpenAIOptions defaultOptions, + final bool stream = false, +}) { + final messagesDtos = messages.toChatCompletionMessages(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); + final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) + ?.toChatCompletionToolChoice(); + final responseFormatDto = + (options?.responseFormat ?? defaultOptions.responseFormat) + ?.toChatCompletionResponseFormat(); + final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) + .toCreateChatCompletionRequestServiceTier(); + + return CreateChatCompletionRequest( + model: ChatCompletionModel.modelId( + options?.model ?? defaultOptions.model ?? ChatOpenAI.defaultModel, + ), + messages: messagesDtos, + tools: toolsDtos, + toolChoice: toolChoice, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + logitBias: options?.logitBias ?? defaultOptions.logitBias, + maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + n: options?.n ?? defaultOptions.n, + presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, + responseFormat: responseFormatDto, + seed: options?.seed ?? defaultOptions.seed, + stop: (options?.stop ?? defaultOptions.stop) != null + ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) + : null, + temperature: options?.temperature ?? defaultOptions.temperature, + topP: options?.topP ?? defaultOptions.topP, + parallelToolCalls: + options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, + serviceTier: serviceTierDto, + user: options?.user ?? defaultOptions.user, + streamOptions: + stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, + ); +} + extension ChatMessageListMapper on List { List toChatCompletionMessages() { return map(_mapMessage).toList(growable: false); @@ -15,36 +63,36 @@ extension ChatMessageListMapper on List { ChatCompletionMessage _mapMessage(final ChatMessage msg) { return switch (msg) { - final SystemChatMessage systemChatMessage => ChatCompletionMessage.system( - content: systemChatMessage.content, - ), - final HumanChatMessage humanChatMessage => ChatCompletionMessage.user( - content: switch (humanChatMessage.content) { - final ChatMessageContentText c => _mapMessageContentString(c), - final ChatMessageContentImage c => - ChatCompletionUserMessageContent.parts( - [_mapMessageContentPartImage(c)], - ), - final ChatMessageContentMultiModal c => _mapMessageContentPart(c), - }, - ), - final AIChatMessage aiChatMessage => ChatCompletionMessage.assistant( - content: aiChatMessage.content, - toolCalls: aiChatMessage.toolCalls.isNotEmpty - ? aiChatMessage.toolCalls - .map(_mapMessageToolCall) - .toList(growable: false) - : null, - ), - final ToolChatMessage toolChatMessage => ChatCompletionMessage.tool( - toolCallId: toolChatMessage.toolCallId, - content: toolChatMessage.content, - ), + final SystemChatMessage msg => _mapSystemMessage(msg), + final HumanChatMessage msg => _mapHumanMessage(msg), + final AIChatMessage msg => _mapAIMessage(msg), + final ToolChatMessage msg => _mapToolMessage(msg), CustomChatMessage() => throw UnsupportedError('OpenAI does not support custom messages'), }; } + ChatCompletionMessage _mapSystemMessage( + final SystemChatMessage systemChatMessage, + ) { + return ChatCompletionMessage.system(content: systemChatMessage.content); + } + + ChatCompletionMessage _mapHumanMessage( + final HumanChatMessage humanChatMessage, + ) { + return ChatCompletionMessage.user( + content: switch (humanChatMessage.content) { + final ChatMessageContentText c => _mapMessageContentString(c), + final ChatMessageContentImage c => + ChatCompletionUserMessageContent.parts( + [_mapMessageContentPartImage(c)], + ), + final ChatMessageContentMultiModal c => _mapMessageContentPart(c), + }, + ); + } + ChatCompletionUserMessageContentString _mapMessageContentString( final ChatMessageContentText c, ) { @@ -105,6 +153,17 @@ extension ChatMessageListMapper on List { return ChatCompletionMessageContentParts(partsList); } + ChatCompletionMessage _mapAIMessage(final AIChatMessage aiChatMessage) { + return ChatCompletionMessage.assistant( + content: aiChatMessage.content, + toolCalls: aiChatMessage.toolCalls.isNotEmpty + ? aiChatMessage.toolCalls + .map(_mapMessageToolCall) + .toList(growable: false) + : null, + ); + } + ChatCompletionMessageToolCall _mapMessageToolCall( final AIChatMessageToolCall toolCall, ) { @@ -117,12 +176,26 @@ extension ChatMessageListMapper on List { ), ); } + + ChatCompletionMessage _mapToolMessage( + final ToolChatMessage toolChatMessage, + ) { + return ChatCompletionMessage.tool( + toolCallId: toolChatMessage.toolCallId, + content: toolChatMessage.content, + ); + } } extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { ChatResult toChatResult(final String id) { final choice = choices.first; final msg = choice.message; + + if (msg.refusal != null && msg.refusal!.isNotEmpty) { + throw OpenAIRefusalException(msg.refusal!); + } + return ChatResult( id: id, output: AIChatMessage( @@ -211,6 +284,11 @@ extension CreateChatCompletionStreamResponseMapper ChatResult toChatResult(final String id) { final choice = choices.firstOrNull; final delta = choice?.delta; + + if (delta?.refusal != null && delta!.refusal!.isNotEmpty) { + throw OpenAIRefusalException(delta.refusal!); + } + return ChatResult( id: id, output: AIChatMessage( diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 0c80184f..6713a56f 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -379,3 +379,25 @@ enum ChatOpenAIServiceTier { /// uptime SLA and no latency guarantee. vDefault, } + +/// {@template openai_refusal_exception} +/// Exception thrown when OpenAI Structured Outputs API returns a refusal. +/// +/// When using OpenAI's Structured Outputs API with user-generated input, the +/// model may occasionally refuse to fulfill the request for safety reasons. +/// +/// See here for more on refusals: +/// https://platform.openai.com/docs/guides/structured-outputs/refusals +/// {@endtemplate} +class OpenAIRefusalException implements Exception { + /// {@macro openai_refusal_exception} + const OpenAIRefusalException(this.message); + + /// The refusal message. + final String message; + + @override + String toString() { + return 'OpenAIRefusalException: $message'; + } +} From bb2fc0aff96f69867fc30e5ae45e80be024886e5 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 23:04:48 +0200 Subject: [PATCH 100/251] feat: Add log probabilities for refusal tokens in openai_dart (#534) --- .../schema/chat_completion_logprobs.dart | 6 +- ...hat_completion_stream_response_choice.dart | 6 +- .../src/generated/schema/function_object.dart | 2 +- .../src/generated/schema/schema.freezed.dart | 134 +++++++++++++++--- .../lib/src/generated/schema/schema.g.dart | 10 ++ packages/openai_dart/oas/openapi_curated.yaml | 24 ++-- 6 files changed, 144 insertions(+), 38 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart index 36c84a12..4b4adc2c 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart @@ -17,6 +17,9 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { const factory ChatCompletionLogprobs({ /// A list of message content tokens with log probability information. @JsonKey(includeIfNull: false) List? content, + + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) List? refusal, }) = _ChatCompletionLogprobs; /// Object construction from a JSON representation @@ -24,7 +27,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { _$ChatCompletionLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content']; + static const List propertyNames = ['content', 'refusal']; /// Perform validations on the schema property values String? validateSchema() { @@ -35,6 +38,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { Map toMap() { return { 'content': content, + 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart index 39a46139..8d81379d 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -80,6 +80,9 @@ class ChatCompletionStreamResponseChoiceLogprobs const factory ChatCompletionStreamResponseChoiceLogprobs({ /// A list of message content tokens with log probability information. @JsonKey(includeIfNull: false) List? content, + + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) List? refusal, }) = _ChatCompletionStreamResponseChoiceLogprobs; /// Object construction from a JSON representation @@ -88,7 +91,7 @@ class ChatCompletionStreamResponseChoiceLogprobs _$ChatCompletionStreamResponseChoiceLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content']; + static const List propertyNames = ['content', 'refusal']; /// Perform validations on the schema property values String? validateSchema() { @@ -99,6 +102,7 @@ class ChatCompletionStreamResponseChoiceLogprobs Map toMap() { return { 'content': content, + 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 62426de3..ac87dc02 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -30,7 +30,7 @@ class FunctionObject with _$FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @JsonKey(includeIfNull: false) @Default(false) bool? strict, }) = _FunctionObject; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index d3269d89..76274966 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -6928,7 +6928,7 @@ mixin _$FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @JsonKey(includeIfNull: false) bool? get strict => throw _privateConstructorUsedError; @@ -7095,7 +7095,7 @@ class _$FunctionObjectImpl extends _FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @override @JsonKey(includeIfNull: false) final bool? strict; @@ -7172,7 +7172,7 @@ abstract class _FunctionObject extends FunctionObject { /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when /// `strict` is `true`. Learn more about Structured Outputs in the - /// [function calling guide](docs/guides/function-calling). + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @override @JsonKey(includeIfNull: false) bool? get strict; @@ -9041,6 +9041,11 @@ mixin _$ChatCompletionLogprobs { List? get content => throw _privateConstructorUsedError; + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) + List? get refusal => + throw _privateConstructorUsedError; + /// Serializes this ChatCompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -9058,8 +9063,9 @@ abstract class $ChatCompletionLogprobsCopyWith<$Res> { _$ChatCompletionLogprobsCopyWithImpl<$Res, ChatCompletionLogprobs>; @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -9079,12 +9085,17 @@ class _$ChatCompletionLogprobsCopyWithImpl<$Res, @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -9099,8 +9110,9 @@ abstract class _$$ChatCompletionLogprobsImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -9119,12 +9131,17 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_$ChatCompletionLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value._refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -9134,8 +9151,11 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const _$ChatCompletionLogprobsImpl( {@JsonKey(includeIfNull: false) - final List? content}) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) : _content = content, + _refusal = refusal, super._(); factory _$ChatCompletionLogprobsImpl.fromJson(Map json) => @@ -9155,9 +9175,23 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return EqualUnmodifiableListView(value); } + /// A list of message refusal tokens with log probability information. + final List? _refusal; + + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal { + final value = _refusal; + if (value == null) return null; + if (_refusal is EqualUnmodifiableListView) return _refusal; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'ChatCompletionLogprobs(content: $content)'; + return 'ChatCompletionLogprobs(content: $content, refusal: $refusal)'; } @override @@ -9165,13 +9199,16 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content)); + const DeepCollectionEquality().equals(other._content, _content) && + const DeepCollectionEquality().equals(other._refusal, _refusal)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_content), + const DeepCollectionEquality().hash(_refusal)); /// Create a copy of ChatCompletionLogprobs /// with the given fields replaced by the non-null parameter values. @@ -9193,7 +9230,9 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { const factory _ChatCompletionLogprobs( {@JsonKey(includeIfNull: false) - final List? content}) = + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) = _$ChatCompletionLogprobsImpl; const _ChatCompletionLogprobs._() : super._(); @@ -9205,6 +9244,11 @@ abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { @JsonKey(includeIfNull: false) List? get content; + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal; + /// Create a copy of ChatCompletionLogprobs /// with the given fields replaced by the non-null parameter values. @override @@ -10520,6 +10564,11 @@ mixin _$ChatCompletionStreamResponseChoiceLogprobs { List? get content => throw _privateConstructorUsedError; + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) + List? get refusal => + throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoiceLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -10540,8 +10589,9 @@ abstract class $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res> { ChatCompletionStreamResponseChoiceLogprobs>; @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -10562,12 +10612,17 @@ class _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -10583,8 +10638,9 @@ abstract class _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) - List? content}); + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -10603,12 +10659,17 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_$ChatCompletionStreamResponseChoiceLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value._refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -10619,8 +10680,11 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl extends _ChatCompletionStreamResponseChoiceLogprobs { const _$ChatCompletionStreamResponseChoiceLogprobsImpl( {@JsonKey(includeIfNull: false) - final List? content}) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) : _content = content, + _refusal = refusal, super._(); factory _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson( @@ -10641,9 +10705,23 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return EqualUnmodifiableListView(value); } + /// A list of message refusal tokens with log probability information. + final List? _refusal; + + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal { + final value = _refusal; + if (value == null) return null; + if (_refusal is EqualUnmodifiableListView) return _refusal; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content)'; + return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content, refusal: $refusal)'; } @override @@ -10651,13 +10729,16 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseChoiceLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content)); + const DeepCollectionEquality().equals(other._content, _content) && + const DeepCollectionEquality().equals(other._refusal, _refusal)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_content), + const DeepCollectionEquality().hash(_refusal)); /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs /// with the given fields replaced by the non-null parameter values. @@ -10683,7 +10764,9 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs extends ChatCompletionStreamResponseChoiceLogprobs { const factory _ChatCompletionStreamResponseChoiceLogprobs( {@JsonKey(includeIfNull: false) - final List? content}) = + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) = _$ChatCompletionStreamResponseChoiceLogprobsImpl; const _ChatCompletionStreamResponseChoiceLogprobs._() : super._(); @@ -10696,6 +10779,11 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs @JsonKey(includeIfNull: false) List? get content; + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal; + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs /// with the given fields replaced by the non-null parameter values. @override diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 01851e43..d03e9a18 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -816,6 +816,10 @@ _$ChatCompletionLogprobsImpl _$$ChatCompletionLogprobsImplFromJson( ?.map((e) => ChatCompletionTokenLogprob.fromJson(e as Map)) .toList(), + refusal: (json['refusal'] as List?) + ?.map((e) => + ChatCompletionTokenLogprob.fromJson(e as Map)) + .toList(), ); Map _$$ChatCompletionLogprobsImplToJson( @@ -829,6 +833,7 @@ Map _$$ChatCompletionLogprobsImplToJson( } writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); return val; } @@ -958,6 +963,10 @@ _$ChatCompletionStreamResponseChoiceLogprobsImpl ?.map((e) => ChatCompletionTokenLogprob.fromJson( e as Map)) .toList(), + refusal: (json['refusal'] as List?) + ?.map((e) => ChatCompletionTokenLogprob.fromJson( + e as Map)) + .toList(), ); Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( @@ -971,6 +980,7 @@ Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( } writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index ba1d409c..9c474cec 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -2233,7 +2233,7 @@ components: properties: name: type: string - description: | + description: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. description: @@ -2246,11 +2246,11 @@ components: type: boolean nullable: true default: false - description: | + description: | Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](](https://platform.openai.com/docs/guides/function-calling). required: - name FunctionParameters: @@ -2311,12 +2311,12 @@ components: properties: name: type: string - description: | + description: | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. description: type: string - description: | + description: | A description of what the response format is for, used by the model to determine how to respond in the format. schema: @@ -2328,7 +2328,7 @@ components: type: boolean nullable: true default: false - description: | + description: | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the @@ -2485,12 +2485,12 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - refusal: - description: A list of message refusal tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true ChatCompletionTokenLogprob: type: object description: Log probability information for a token. From e15e411e26d3849dd39500937fc68af7baf26783 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 23:09:12 +0200 Subject: [PATCH 101/251] feat: Include logprobs in result metadata from ChatOpenAI (#535) --- packages/langchain_openai/lib/src/chat_models/mappers.dart | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index a2ea96f4..3b23ee8c 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -209,6 +209,7 @@ extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { 'model': model, 'created': created, 'system_fingerprint': systemFingerprint, + 'logprobs': choice.logprobs?.toMap(), }, usage: _mapUsage(usage), ); From 035a041c5e29baed74525a557cc41aa6a9e079f1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 21 Aug 2024 23:15:20 +0200 Subject: [PATCH 102/251] feat: Support OpenAI's strict mode for tool calling in ChatOpenAI (#536) --- .../langchain_core/lib/src/tools/base.dart | 40 ++++++++++++++++--- .../langchain_core/lib/src/tools/string.dart | 6 +++ 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index f6e8dd29..079dbab7 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -18,6 +18,7 @@ class ToolSpec { required this.name, required this.description, required this.inputJsonSchema, + this.strict = false, }); /// The unique name of the tool that clearly communicates its purpose. @@ -50,18 +51,31 @@ class ToolSpec { /// ``` final Map inputJsonSchema; + /// Whether to enable strict schema adherence when generating the tool call. + /// If set to true, the model will follow the exact schema defined in the + /// [inputJsonSchema] field. + /// + /// This is only supported by some providers (e.g. OpenAI). Mind that when + /// enabled, only a subset of JSON Schema may be supported. Check out the + /// provider's tool calling documentation for more information. + final bool strict; + @override bool operator ==(covariant final ToolSpec other) { final mapEquals = const DeepCollectionEquality().equals; return identical(this, other) || name == other.name && description == other.description && - mapEquals(inputJsonSchema, other.inputJsonSchema); + mapEquals(inputJsonSchema, other.inputJsonSchema) && + strict == other.strict; } @override int get hashCode => - name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + name.hashCode ^ + description.hashCode ^ + inputJsonSchema.hashCode ^ + strict.hashCode; @override String toString() { @@ -70,6 +84,7 @@ ToolSpec{ name: $name, description: $description, inputJsonSchema: $inputJsonSchema, + strict: $strict, } '''; } @@ -80,6 +95,7 @@ ToolSpec{ 'name': name, 'description': description, 'inputJsonSchema': inputJsonSchema, + 'strict': strict, }; } } @@ -102,6 +118,7 @@ abstract base class Tool inputJsonSchema; + @override + final bool strict; + /// Whether to return the tool's output directly. /// Setting this to true means that after the tool is called, /// the AgentExecutor will stop looping. @@ -132,7 +152,9 @@ abstract base class Tool inputJsonSchema, + final bool strict = false, required final FutureOr Function(Input input) func, Input Function(Map json)? getInputFromJson, final bool returnDirect = false, @@ -157,6 +180,7 @@ abstract base class Tool json['input'] as Input, returnDirect: returnDirect, @@ -217,12 +241,16 @@ abstract base class Tool - name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + name.hashCode ^ + description.hashCode ^ + inputJsonSchema.hashCode ^ + strict.hashCode; @override Map toJson() { @@ -230,6 +258,7 @@ abstract base class Tool required super.name, required super.description, required super.inputJsonSchema, + required super.strict, required FutureOr Function(Input input) function, required Input Function(Map json) getInputFromJson, super.returnDirect = false, diff --git a/packages/langchain_core/lib/src/tools/string.dart b/packages/langchain_core/lib/src/tools/string.dart index 3c9973d5..43e1e2e7 100644 --- a/packages/langchain_core/lib/src/tools/string.dart +++ b/packages/langchain_core/lib/src/tools/string.dart @@ -14,6 +14,7 @@ abstract base class StringTool required super.name, required super.description, final String inputDescription = 'The input to the tool', + super.strict = false, super.returnDirect = false, super.handleToolError, super.defaultOptions, @@ -36,6 +37,8 @@ abstract base class StringTool /// purpose. /// - [description] is used to tell the model how/when/why to use the tool. /// You can provide few-shot examples as a part of the description. + /// - [strict] whether to enable strict schema adherence when generating the + /// tool call (only supported by some providers). /// - [func] is the function that will be called when the tool is run. /// - [returnDirect] whether to return the tool's output directly. /// Setting this to true means that after the tool is called, @@ -46,6 +49,7 @@ abstract base class StringTool required final String name, required final String description, final String inputDescription = 'The input to the tool', + final bool strict = false, required final FutureOr Function(String input) func, final bool returnDirect = false, final String Function(ToolException)? handleToolError, @@ -54,6 +58,7 @@ abstract base class StringTool name: name, description: description, inputDescription: inputDescription, + strict: strict, func: func, returnDirect: returnDirect, handleToolError: handleToolError, @@ -84,6 +89,7 @@ final class _StringToolFunc required super.name, required super.description, super.inputDescription, + required super.strict, required FutureOr Function(String) func, super.returnDirect = false, super.handleToolError, From a9d33ec94dc816cab66bcc1780f9930bca0c2210 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 22 Aug 2024 18:38:30 +0200 Subject: [PATCH 103/251] build: Update analysis_options.yaml and fix linter issues (#537) --- analysis_options.yaml | 15 +++++++++++++-- .../browser_summarizer/lib/chrome/chrome_api.dart | 2 +- .../lib/anthropic_sdk_dart.dart | 2 +- packages/googleai_dart/lib/googleai_dart.dart | 2 +- .../lib/src/vector_stores/chroma.dart | 2 +- .../lib/src/document_loaders/csv.dart | 3 +-- packages/langchain_core/lib/agents.dart | 2 +- packages/langchain_core/lib/chains.dart | 2 +- packages/langchain_core/lib/chat_history.dart | 2 +- packages/langchain_core/lib/chat_models.dart | 2 +- packages/langchain_core/lib/document_loaders.dart | 2 +- packages/langchain_core/lib/documents.dart | 2 +- packages/langchain_core/lib/embeddings.dart | 2 +- packages/langchain_core/lib/exceptions.dart | 2 +- packages/langchain_core/lib/langchain.dart | 2 +- packages/langchain_core/lib/language_models.dart | 2 +- packages/langchain_core/lib/llms.dart | 2 +- packages/langchain_core/lib/memory.dart | 2 +- packages/langchain_core/lib/output_parsers.dart | 2 +- packages/langchain_core/lib/prompts.dart | 2 +- packages/langchain_core/lib/retrievers.dart | 2 +- packages/langchain_core/lib/runnables.dart | 2 +- .../langchain_core/lib/src/chat_models/utils.dart | 2 +- .../langchain_core/lib/src/prompts/types.dart | 2 +- packages/langchain_core/lib/stores.dart | 2 +- packages/langchain_core/lib/tools.dart | 2 +- packages/langchain_core/lib/utils.dart | 2 +- packages/langchain_core/lib/vector_stores.dart | 2 +- packages/langchain_firebase/example/pubspec.lock | 4 ++-- packages/langchain_firebase/pubspec.lock | 4 ++-- packages/mistralai_dart/lib/mistralai_dart.dart | 2 +- packages/ollama_dart/lib/ollama_dart.dart | 2 +- 32 files changed, 46 insertions(+), 36 deletions(-) diff --git a/analysis_options.yaml b/analysis_options.yaml index d8f55c71..9a364363 100644 --- a/analysis_options.yaml +++ b/analysis_options.yaml @@ -7,6 +7,11 @@ analyzer: missing_return: error todo: ignore sdk_version_since: ignore # TODO remove when fixed https://github.com/dart-lang/sdk/issues/52327 + exclude: + - "**/generated_plugin_registrant.dart" + - "**/generated/**" + - "**/*.gen.dart" + - "**/*.g.dart" linter: rules: # https://dart-lang.github.io/linter/lints/{rule}.html @@ -30,7 +35,7 @@ linter: - avoid_null_checks_in_equality_operators - avoid_positional_boolean_parameters - avoid_print - # - avoid_redundant_argument_values # Sometimes is useful to be explicit + # - avoid_redundant_argument_values # I prefer to be explicit sometimes - avoid_relative_lib_imports - avoid_renaming_method_parameters - avoid_return_types_on_setters @@ -65,6 +70,7 @@ linter: # - diagnostic_describe_all_properties # Disabled because it's very verbose - directives_ordering - discarded_futures + # - document_ignores # Disabled because it's very verbose - empty_catches - empty_constructor_bodies - empty_statements @@ -76,6 +82,7 @@ linter: - implementation_imports - implicit_call_tearoffs - invalid_case_patterns + - invalid_runtime_check_with_js_interop_types - iterable_contains_unrelated_type - join_return_with_assignment - leading_newlines_in_multiline_strings @@ -85,6 +92,7 @@ linter: - library_private_types_in_public_api - list_remove_unrelated_type - matching_super_parameters + - missing_code_block_language_in_doc_comment - missing_whitespace_between_adjacent_strings - no_adjacent_strings_in_list - no_default_cases @@ -94,6 +102,7 @@ linter: - no_literal_bool_comparisons - no_logic_in_create_state - no_runtimeType_toString + - no_wildcard_variable_uses - non_constant_identifier_names - noop_primitive_operations - null_check_on_nullable_type_parameter @@ -116,7 +125,7 @@ linter: - prefer_final_fields - prefer_final_in_for_each - prefer_final_locals - # - prefer_final_parameters # adds too much verbosity + # - prefer_final_parameters # Very verbose - prefer_for_elements_to_map_fromIterable - prefer_foreach - prefer_function_declarations_over_variables @@ -152,6 +161,7 @@ linter: - type_init_formals - type_literal_in_constant_pattern - unawaited_futures + - unintended_html_in_doc_comment - unnecessary_await_in_return - unnecessary_brace_in_string_interps - unnecessary_breaks @@ -161,6 +171,7 @@ linter: - unnecessary_lambdas - unnecessary_late - unnecessary_library_directive + - unnecessary_library_name - unnecessary_new - unnecessary_null_aware_assignments - unnecessary_null_aware_operator_on_extension_on_nullable diff --git a/examples/browser_summarizer/lib/chrome/chrome_api.dart b/examples/browser_summarizer/lib/chrome/chrome_api.dart index d60ac8b7..9ab8b8b4 100644 --- a/examples/browser_summarizer/lib/chrome/chrome_api.dart +++ b/examples/browser_summarizer/lib/chrome/chrome_api.dart @@ -1,6 +1,6 @@ // ignore_for_file: public_member_api_docs @JS('chrome') -library chrome; +library; import 'package:js/js.dart'; diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart index 7a853ada..4cc40a27 100644 --- a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). -library anthropic_sdk_dart; +library; export 'src/client.dart'; export 'src/extensions.dart'; diff --git a/packages/googleai_dart/lib/googleai_dart.dart b/packages/googleai_dart/lib/googleai_dart.dart index e673e9d9..f0807211 100644 --- a/packages/googleai_dart/lib/googleai_dart.dart +++ b/packages/googleai_dart/lib/googleai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -library googleai_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show GoogleAIClientException; diff --git a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart index 9ed252ba..dc170896 100644 --- a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart +++ b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart @@ -42,7 +42,7 @@ import 'types.dart'; /// If you are interacting with Chroma server from a web browser, /// you may need to configure the CORS policy. You can do this by /// passing the following environment variable: -/// ``` +/// ```sh /// docker run -p 8000:8000 -e 'CHROMA_SERVER_CORS_ALLOW_ORIGINS=["*"]' chromadb/chroma /// ``` /// The previous command will allow all origins to access the Chroma server diff --git a/packages/langchain_community/lib/src/document_loaders/csv.dart b/packages/langchain_community/lib/src/document_loaders/csv.dart index 155e520d..2a4a4872 100644 --- a/packages/langchain_community/lib/src/document_loaders/csv.dart +++ b/packages/langchain_community/lib/src/document_loaders/csv.dart @@ -17,7 +17,7 @@ import 'package:langchain_core/documents.dart'; /// and [eol]. /// /// The fields are added to the page content in the following format: -/// ``` +/// ```txt /// {field1Name}: {field1Value} /// {field2Name}: {field2Value} /// ... @@ -56,7 +56,6 @@ class CsvLoader extends BaseDocumentLoader { /// the page content of the document. /// /// If not provided, all row fields are extracted. - /// ``` final List? fields; /// Optional field to override the field names from the CSV file. diff --git a/packages/langchain_core/lib/agents.dart b/packages/langchain_core/lib/agents.dart index e99fdb9f..97382b62 100644 --- a/packages/langchain_core/lib/agents.dart +++ b/packages/langchain_core/lib/agents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to agents. -library agents; +library; export 'src/agents/agents.dart'; diff --git a/packages/langchain_core/lib/chains.dart b/packages/langchain_core/lib/chains.dart index 3214cef2..a35484cd 100644 --- a/packages/langchain_core/lib/chains.dart +++ b/packages/langchain_core/lib/chains.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chains. -library chains; +library; export 'src/chains/chains.dart'; diff --git a/packages/langchain_core/lib/chat_history.dart b/packages/langchain_core/lib/chat_history.dart index 316cbccc..726dbd3c 100644 --- a/packages/langchain_core/lib/chat_history.dart +++ b/packages/langchain_core/lib/chat_history.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat history. -library chat_history; +library; export 'src/chat_history/chat_history.dart'; diff --git a/packages/langchain_core/lib/chat_models.dart b/packages/langchain_core/lib/chat_models.dart index 803668df..259fa3c3 100644 --- a/packages/langchain_core/lib/chat_models.dart +++ b/packages/langchain_core/lib/chat_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat models. -library chat_models; +library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_core/lib/document_loaders.dart b/packages/langchain_core/lib/document_loaders.dart index 51fdbead..b8340c67 100644 --- a/packages/langchain_core/lib/document_loaders.dart +++ b/packages/langchain_core/lib/document_loaders.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to document loaders. -library document_loaders; +library; export 'src/document_loaders/document_loaders.dart'; diff --git a/packages/langchain_core/lib/documents.dart b/packages/langchain_core/lib/documents.dart index 24d340a4..a0f68ebd 100644 --- a/packages/langchain_core/lib/documents.dart +++ b/packages/langchain_core/lib/documents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to documents. -library documents; +library; export 'src/documents/documents.dart'; diff --git a/packages/langchain_core/lib/embeddings.dart b/packages/langchain_core/lib/embeddings.dart index 829de2c7..b6c2bc82 100644 --- a/packages/langchain_core/lib/embeddings.dart +++ b/packages/langchain_core/lib/embeddings.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to embeddings. -library embeddings; +library; export 'src/embeddings/embeddings.dart'; diff --git a/packages/langchain_core/lib/exceptions.dart b/packages/langchain_core/lib/exceptions.dart index 4371a3a3..1e0d7fa0 100644 --- a/packages/langchain_core/lib/exceptions.dart +++ b/packages/langchain_core/lib/exceptions.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to exceptions. -library exceptions; +library; export 'src/exceptions/exceptions.dart'; diff --git a/packages/langchain_core/lib/langchain.dart b/packages/langchain_core/lib/langchain.dart index b30c4d14..cf5bb742 100644 --- a/packages/langchain_core/lib/langchain.dart +++ b/packages/langchain_core/lib/langchain.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LangChain. -library langchain; +library; export 'src/langchain/langchain.dart'; diff --git a/packages/langchain_core/lib/language_models.dart b/packages/langchain_core/lib/language_models.dart index 7cabafc7..1fae54b5 100644 --- a/packages/langchain_core/lib/language_models.dart +++ b/packages/langchain_core/lib/language_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to language models. -library language_models; +library; export 'src/language_models/language_models.dart'; diff --git a/packages/langchain_core/lib/llms.dart b/packages/langchain_core/lib/llms.dart index 5b98240d..ed130b60 100644 --- a/packages/langchain_core/lib/llms.dart +++ b/packages/langchain_core/lib/llms.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LLMs. -library llms; +library; export 'src/llms/llms.dart'; diff --git a/packages/langchain_core/lib/memory.dart b/packages/langchain_core/lib/memory.dart index b79467cf..7193923f 100644 --- a/packages/langchain_core/lib/memory.dart +++ b/packages/langchain_core/lib/memory.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to memory. -library memory; +library; export 'src/memory/memory.dart'; diff --git a/packages/langchain_core/lib/output_parsers.dart b/packages/langchain_core/lib/output_parsers.dart index 7f0d0d5f..2915a146 100644 --- a/packages/langchain_core/lib/output_parsers.dart +++ b/packages/langchain_core/lib/output_parsers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to output parsers. -library output_parsers; +library; export 'src/output_parsers/output_parsers.dart'; diff --git a/packages/langchain_core/lib/prompts.dart b/packages/langchain_core/lib/prompts.dart index dbb7ef5b..b7873da5 100644 --- a/packages/langchain_core/lib/prompts.dart +++ b/packages/langchain_core/lib/prompts.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to prompts. -library prompts; +library; export 'src/prompts/prompts.dart'; diff --git a/packages/langchain_core/lib/retrievers.dart b/packages/langchain_core/lib/retrievers.dart index 5b1ec71e..5d1278bf 100644 --- a/packages/langchain_core/lib/retrievers.dart +++ b/packages/langchain_core/lib/retrievers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to retrievers. -library retrievers; +library; export 'src/retrievers/retrievers.dart'; diff --git a/packages/langchain_core/lib/runnables.dart b/packages/langchain_core/lib/runnables.dart index e111eb58..72b67584 100644 --- a/packages/langchain_core/lib/runnables.dart +++ b/packages/langchain_core/lib/runnables.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to runnables. -library runnables; +library; export 'src/runnables/runnables.dart'; diff --git a/packages/langchain_core/lib/src/chat_models/utils.dart b/packages/langchain_core/lib/src/chat_models/utils.dart index 5c84a142..ebfc011c 100644 --- a/packages/langchain_core/lib/src/chat_models/utils.dart +++ b/packages/langchain_core/lib/src/chat_models/utils.dart @@ -1,6 +1,6 @@ import 'types.dart'; -/// Extensions on [List]. +/// Extensions on `List`. extension ChatMessagesX on List { /// This function is to get a string representation of the chat messages /// based on the message content and role. diff --git a/packages/langchain_core/lib/src/prompts/types.dart b/packages/langchain_core/lib/src/prompts/types.dart index c68f0979..3bd9756b 100644 --- a/packages/langchain_core/lib/src/prompts/types.dart +++ b/packages/langchain_core/lib/src/prompts/types.dart @@ -90,7 +90,7 @@ class StringPromptValue implements PromptValue { /// /// When [toString] is called, it returns the string representation of the /// messages using the following format: -/// ``` +/// ```txt /// System: /// Human: /// AI: diff --git a/packages/langchain_core/lib/stores.dart b/packages/langchain_core/lib/stores.dart index 2a234153..96eb406a 100644 --- a/packages/langchain_core/lib/stores.dart +++ b/packages/langchain_core/lib/stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to stores. -library stores; +library; export 'src/stores/stores.dart'; diff --git a/packages/langchain_core/lib/tools.dart b/packages/langchain_core/lib/tools.dart index 9d0b95aa..d829f7d5 100644 --- a/packages/langchain_core/lib/tools.dart +++ b/packages/langchain_core/lib/tools.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to tools. -library tools; +library; export 'src/tools/tools.dart'; diff --git a/packages/langchain_core/lib/utils.dart b/packages/langchain_core/lib/utils.dart index cdcc6670..7ceacd01 100644 --- a/packages/langchain_core/lib/utils.dart +++ b/packages/langchain_core/lib/utils.dart @@ -1,4 +1,4 @@ /// Contains core utilities. -library utils; +library; export 'src/utils/utils.dart'; diff --git a/packages/langchain_core/lib/vector_stores.dart b/packages/langchain_core/lib/vector_stores.dart index 35174345..129d296c 100644 --- a/packages/langchain_core/lib/vector_stores.dart +++ b/packages/langchain_core/lib/vector_stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to vector stores. -library vector_stores; +library; export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index b1d7e459..eedcc6b5 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -454,10 +454,10 @@ packages: dependency: transitive description: name: vm_service - sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc + sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" url: "https://pub.dev" source: hosted - version: "14.2.4" + version: "14.2.5" web: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 593dfe9c..3f945900 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -392,10 +392,10 @@ packages: dependency: transitive description: name: vm_service - sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc + sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" url: "https://pub.dev" source: hosted - version: "14.2.4" + version: "14.2.5" web: dependency: transitive description: diff --git a/packages/mistralai_dart/lib/mistralai_dart.dart b/packages/mistralai_dart/lib/mistralai_dart.dart index 31efab90..05cfac61 100644 --- a/packages/mistralai_dart/lib/mistralai_dart.dart +++ b/packages/mistralai_dart/lib/mistralai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -library mistralai_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show MistralAIClientException; diff --git a/packages/ollama_dart/lib/ollama_dart.dart b/packages/ollama_dart/lib/ollama_dart.dart index a62c32c4..1195c10c 100644 --- a/packages/ollama_dart/lib/ollama_dart.dart +++ b/packages/ollama_dart/lib/ollama_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Ollama API (run Llama 3, Code Llama, and other models locally). -library ollama_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show OllamaClientException; From 910aec1b3cc7e4076ce4510353d8515d1e00930f Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 22 Aug 2024 18:47:44 +0200 Subject: [PATCH 104/251] docs: Add Code Assist AI in README and documentation (#538) --- docs/_sidebar.md | 1 + packages/langchain/README.md | 2 ++ 2 files changed, 3 insertions(+) diff --git a/docs/_sidebar.md b/docs/_sidebar.md index a5572f4d..d04533a7 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -1,3 +1,4 @@ +- [![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) - [Get started](README.md) - [Installation](/get_started/installation.md) - [Quickstart](/get_started/quickstart.md) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 1e4ad928..2dfacb97 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -5,6 +5,7 @@ [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) +[![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) Build LLM-powered Dart/Flutter applications. @@ -221,6 +222,7 @@ print(res); ## Documentation - [LangChain.dart documentation](https://langchaindart.dev) +- [Code Assist AI](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) (Chatbot for LangChain.dart documentation) - [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) - [LangChain.dart blog](https://blog.langchaindart.dev) - [Project board](https://github.com/users/davidmigloz/projects/2/views/1) From 3739397160381dd91320b3323751203c16fe5fc1 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 22 Aug 2024 18:53:03 +0200 Subject: [PATCH 105/251] test: Update model used in OpenAI tests to gpt-4o-mini (#539) --- .../test/agents/tools_test.dart | 3 - .../test/chains/qa_with_sources_test.dart | 1 - .../test/chat_models/anyscale_test.dart | 111 ------------------ .../test/chat_models/chat_openai_test.dart | 7 +- .../embeddings/anyscale_embeddings_test.dart | 36 ------ 5 files changed, 3 insertions(+), 155 deletions(-) delete mode 100644 packages/langchain_openai/test/chat_models/anyscale_test.dart delete mode 100644 packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart diff --git a/packages/langchain_openai/test/agents/tools_test.dart b/packages/langchain_openai/test/agents/tools_test.dart index c08f4979..03b52dea 100644 --- a/packages/langchain_openai/test/agents/tools_test.dart +++ b/packages/langchain_openai/test/agents/tools_test.dart @@ -24,7 +24,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ); @@ -46,7 +45,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ); @@ -135,7 +133,6 @@ void main() { final model = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ).bind(ChatOpenAIOptions(tools: [tool])); diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index 15a80431..c655af98 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -125,7 +125,6 @@ Question: {question} final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4o-mini', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/anyscale_test.dart b/packages/langchain_openai/test/chat_models/anyscale_test.dart deleted file mode 100644 index f0a99e88..00000000 --- a/packages/langchain_openai/test/chat_models/anyscale_test.dart +++ /dev/null @@ -1,111 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anyscale tests', () { - late ChatOpenAI chatModel; - - setUp(() async { - chatModel = ChatOpenAI( - apiKey: Platform.environment['ANYSCALE_API_KEY'], - baseUrl: 'https://api.endpoints.anyscale.com/v1', - ); - }); - - tearDown(() { - chatModel.close(); - }); - - test('Test invoke Anyscale API with different models', () async { - final models = [ - 'meta-llama/Llama-2-70b-chat-hf', - 'codellama/CodeLlama-34b-Instruct-hf', - 'mistralai/Mistral-7B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - ]; - for (final model in models) { - final res = await chatModel.invoke( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions(model: model), - ); - - expect(res.id, isNotEmpty); - expect( - res.finishReason, - isNot(FinishReason.unspecified), - reason: model, - ); - expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), - contains('123456789'), - reason: model, - ); - expect(res.metadata, isNotNull, reason: model); - expect(res.metadata['created'], greaterThan(0), reason: model); - expect(res.metadata['model'], isNotEmpty, reason: model); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - - test('Test stream Anyscale API with different models', () async { - final models = [ - 'meta-llama/Llama-2-70b-chat-hf', - 'codellama/CodeLlama-34b-Instruct-hf', - 'mistralai/Mistral-7B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - ]; - for (final model in models) { - final stream = chatModel.stream( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions(model: model), - ); - - String content = ''; - int count = 0; - await for (final res in stream) { - content += res.output.content.replaceAll(RegExp(r'[\s\n,]'), ''); - count++; - } - expect(count, greaterThan(1), reason: model); - expect(content, contains('123456789'), reason: model); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - - test('Test countTokens', () async { - final models = [ - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'mistralai/Mistral-7B-Instruct-v0.2', - 'NousResearch/Nous-Hermes-2-Yi-34B', - 'openchat/openchat-3.5-1210', - 'togethercomputer/llama-2-70b-chat', - 'togethercomputer/falcon-40b-instruct', - ]; - for (final model in models) { - const text = 'Hello, how are you?'; - - final numTokens = await chatModel.countTokens( - PromptValue.chat([ChatMessage.humanText(text)]), - options: ChatOpenAIOptions(model: model), - ); - expect(numTokens, 13, reason: model); - } - }); - }); -} diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index d56ba1f9..a0ea44fb 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -208,7 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', 'gpt-4-0613', ]; @@ -328,7 +327,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, temperature: 0, seed: 12345, ), @@ -455,7 +454,7 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, ), ); @@ -482,7 +481,7 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, ), ); diff --git a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart b/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart deleted file mode 100644 index 988c7e4c..00000000 --- a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart +++ /dev/null @@ -1,36 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anyscale AI Embeddings tests', () { - late OpenAIEmbeddings embeddings; - - setUp(() async { - embeddings = OpenAIEmbeddings( - apiKey: Platform.environment['ANYSCALE_API_KEY'], - baseUrl: 'https://api.endpoints.anyscale.com/v1', - ); - }); - - tearDown(() { - embeddings.close(); - }); - - test('Test Anyscale Embeddings models', () async { - final models = [ - 'thenlper/gte-large', - ]; - for (final model in models) { - embeddings.model = model; - final res = await embeddings.embedQuery('Hello world'); - expect(res.length, greaterThan(0)); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - }); -} From ee4a1c87a6a9340d35323fdd686e2399dd9ea904 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 22 Aug 2024 18:55:31 +0200 Subject: [PATCH 106/251] chore(release): publish packages - langchain@0.7.5 - langchain_community@0.3.1 - langchain_core@0.3.5 - langchain_google@0.6.2 - langchain_mistralai@0.2.3 - langchain_ollama@0.3.1 - langchain_openai@0.7.1 - ollama_dart@0.2.1 - openai_dart@0.4.1 - langchain_firebase@0.2.1+1 - langchain_supabase@0.1.1+2 - langchain_pinecone@0.1.0+8 - langchain_anthropic@0.1.1+1 - langchain_chroma@0.2.1+2 --- CHANGELOG.md | 92 ++++++++++++++++++- examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.yaml | 16 ++-- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.yaml | 10 +- examples/wikivoyage_eu/pubspec.yaml | 6 +- melos.yaml | 2 +- packages/anthropic_sdk_dart/CHANGELOG.md | 4 +- packages/chromadb/CHANGELOG.md | 4 +- packages/googleai_dart/CHANGELOG.md | 4 +- packages/langchain/CHANGELOG.md | 10 +- packages/langchain/pubspec.yaml | 10 +- packages/langchain_anthropic/CHANGELOG.md | 8 +- packages/langchain_anthropic/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 8 +- packages/langchain_chroma/pubspec.yaml | 10 +- packages/langchain_community/CHANGELOG.md | 8 +- packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 10 +- packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 8 +- .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 8 +- packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 8 +- packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 9 +- packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 14 ++- packages/langchain_openai/pubspec.yaml | 10 +- packages/langchain_pinecone/CHANGELOG.md | 8 +- packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 8 +- packages/langchain_supabase/pubspec.yaml | 10 +- packages/mistralai_dart/CHANGELOG.md | 4 +- packages/ollama_dart/CHANGELOG.md | 8 +- packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 11 ++- packages/openai_dart/pubspec.yaml | 2 +- packages/tavily_dart/CHANGELOG.md | 4 +- packages/vertex_ai/CHANGELOG.md | 4 +- 43 files changed, 287 insertions(+), 87 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47d794f4..2bedbee2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,96 @@ # Change Log -Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 2024-08-22 + +### Changes + +--- + +Packages with breaking changes: + + - There are no breaking changes in this release. + +Packages with other changes: + + - [`langchain` - `v0.7.5`](#langchain---v075) + - [`langchain_core` - `v0.3.5`](#langchain_core---v035) + - [`langchain_community` - `v0.3.1`](#langchain_community---v031) + - [`langchain_openai` - `v0.7.1`](#langchain_openai---v071) + - [`langchain_ollama` - `v0.3.1`](#langchain_ollama---v031) + - [`langchain_google` - `v0.6.2`](#langchain_google---v062) + - [`langchain_mistralai` - `v0.2.3`](#langchain_mistralai---v023) + - [`ollama_dart` - `v0.2.1`](#ollama_dart---v021) + - [`openai_dart` - `v0.4.1`](#openai_dart---v041) + - [`langchain_firebase` - `v0.2.1+1`](#langchain_firebase---v0211) + - [`langchain_supabase` - `v0.1.1+2`](#langchain_supabase---v0112) + - [`langchain_pinecone` - `v0.1.0+8`](#langchain_pinecone---v0108) + - [`langchain_anthropic` - `v0.1.1+1`](#langchain_anthropic---v0111) + - [`langchain_chroma` - `v0.2.1+2`](#langchain_chroma---v0212) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + + - `langchain_firebase` - `v0.2.1+1` + - `langchain_supabase` - `v0.1.1+2` + - `langchain_pinecone` - `v0.1.0+8` + - `langchain_anthropic` - `v0.1.1+1` + - `langchain_chroma` - `v0.2.1+2` + +--- + +#### `langchain` - `v0.7.5` + + - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) + +#### `langchain_core` - `v0.3.5` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +#### `langchain_community` - `v0.3.1` + + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +#### `langchain_openai` - `v0.7.1` + + - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) + - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) + - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) + +#### `langchain_ollama` - `v0.3.1` + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `langchain_google` - `v0.6.2` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `langchain_mistralai` - `v0.2.3` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `openai_dart` - `v0.4.1` + + - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) + - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + +#### `ollama_dart` - `v0.2.1` + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + ## 2024-07-26 diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 326a41a5..f284843f 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 js: ^0.7.1 - langchain: ^0.7.4 - langchain_community: 0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 + langchain_openai: ^0.7.1 shared_preferences: ^2.3.0 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 398e7e15..0136cd5a 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_anthropic: ^0.1.1 - langchain_chroma: ^0.2.1+1 - langchain_community: 0.3.0 - langchain_google: ^0.6.1 - langchain_mistralai: ^0.2.2 - langchain_ollama: ^0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_anthropic: ^0.1.1+1 + langchain_chroma: ^0.2.1+2 + langchain_community: 0.3.1 + langchain_google: ^0.6.2 + langchain_mistralai: ^0.2.3 + langchain_ollama: ^0.3.1 + langchain_openai: ^0.7.1 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index b7b5dd3a..55135704 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_openai: ^0.7.1 shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index d73e4928..26e63ed8 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_openai: ^0.7.1 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index fb83f0cc..15fd553e 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 - langchain: ^0.7.4 - langchain_google: ^0.6.1 - langchain_mistralai: ^0.2.2 - langchain_ollama: ^0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_google: ^0.6.2 + langchain_mistralai: ^0.2.3 + langchain_ollama: ^0.3.1 + langchain_openai: ^0.7.1 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index ab9a51be..4d7ddfc5 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.4 - langchain_ollama: ^0.3.0 - langchain_community: 0.3.0 + langchain: ^0.7.5 + langchain_ollama: ^0.3.1 + langchain_community: 0.3.1 diff --git a/melos.yaml b/melos.yaml index 69c804de..3a1c0092 100644 --- a/melos.yaml +++ b/melos.yaml @@ -14,7 +14,7 @@ command: branch: main changelogs: - path: CHANGELOG.md - description: "Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release." + description: "📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details." packageFilters: no-private: true bootstrap: diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md index 600092c2..c9710913 100644 --- a/packages/anthropic_sdk_dart/CHANGELOG.md +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0 diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 171047ca..70f441bd 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.2.0+1 diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 437f20b2..7bc6e29d 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0+2 diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 680dadd0..79614782 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,4 +1,12 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.7.5 + + - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) ## 0.7.4 diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 857d9c79..06b182e2 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.4 +version: 0.7.5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ dependencies: characters: ^1.3.0 collection: ^1.18.0 crypto: ^3.0.3 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_community: ^0.3.0 - langchain_openai: ^0.7.0 - langchain_ollama: ^0.3.0 + langchain_community: ^0.3.1 + langchain_openai: ^0.7.1 + langchain_ollama: ^0.3.1 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 03cf82f7..690821d1 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.1+1 + + - Update a dependency to the latest release. ## 0.1.1 diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index c25c8e94..6aedbe71 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.1 +version: 0.1.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 rxdart: ">=0.27.7 <0.29.0" diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index cb464d8e..45eaafa2 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1+2 + + - Update a dependency to the latest release. ## 0.2.1+1 diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 391b329b..59aa28a5 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1+1 +version: 0.2.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.4 - langchain_community: 0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 + langchain_openai: ^0.7.1 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 01068b20..9add3205 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.1 + + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) ## 0.3.0 diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index ebf10d32..de530389 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.3.0 +version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.2.2 json_path: ^0.7.4 - langchain_core: 0.3.4 + langchain_core: 0.3.5 math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -31,7 +31,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.11 - langchain_openai: ^0.7.0 + langchain_openai: ^0.7.1 objectbox_generator: ^4.0.1 test: ^1.25.8 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 9e750ed8..b7592ca0 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,4 +1,12 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.5 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) ## 0.3.4 diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 170363a7..d322abdc 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.4 +version: 0.3.5 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index d7aedb1f..11f4c2ea 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1+1 + + - Update a dependency to the latest release. ## 0.2.1 diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index d63e1ccd..f63d336a 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.7.3 - langchain: 0.7.4 - langchain_firebase: 0.2.1 + langchain: 0.7.5 + langchain_firebase: 0.2.1+1 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 8413d6d1..d0b53ac3 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.2.1 +version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -25,7 +25,7 @@ dependencies: cloud_firestore: ^4.17.0 firebase_vertexai: ^0.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index fb087939..a4288382 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.6.2 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) ## 0.6.1 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 0dda61e6..1821b7c1 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.6.1 +version: 0.6.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^12.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 uuid: ^4.4.2 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index acae7e78..a60fe14b 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.3 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) ## 0.2.2 diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 5d12387b..4a3583ed 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.2 +version: 0.2.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index e8f7ae0e..b9795885 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,4 +1,11 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.1 + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) ## 0.3.0 diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 53d659d0..51c98cd9 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.3.0 +version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.2.0 + ollama_dart: ^0.2.1 uuid: ^4.4.2 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 5b2c1ed7..ab160770 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,4 +1,16 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.7.1 + + - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) + - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) + - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) ## 0.7.0 diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 2e8e5ff6..bf0db409 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.7.0 +version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.4.0 + openai_dart: ^0.4.1 uuid: ^4.4.2 dev_dependencies: - langchain: ^0.7.4 - langchain_community: 0.3.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 test: ^1.25.8 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 8925fcd8..e277b4a0 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0+8 + + - Update a dependency to the latest release. ## 0.1.0+7 diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index ffbd4c9a..8cbec927 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+7 +version: 0.1.0+8 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_openai: ^0.7.0 + langchain_openai: ^0.7.1 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index 3cd0af92..be6b7129 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.1+2 + + - Update a dependency to the latest release. ## 0.1.1+1 diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index af7c072f..da03bb1c 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1+1 +version: 0.1.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.4 + langchain_core: 0.3.5 meta: ^1.11.0 supabase: ^2.2.7 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.4 - langchain_community: 0.3.0 - langchain_openai: ^0.7.0 + langchain: ^0.7.5 + langchain_community: 0.3.1 + langchain_openai: ^0.7.1 diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index 9a9234bb..ec5979cc 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.0.3+3 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index af9e377b..c8b93090 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,4 +1,10 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1 + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) ## 0.2.0 diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 3f8d7f75..66bea8fc 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). -version: 0.2.0 +version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index d1fafe5f..789ec5a7 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,4 +1,13 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.4.1 + + - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) + - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) ## 0.4.0 diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 4126650f..91e131b4 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. -version: 0.4.0 +version: 0.4.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md index 9abf1cdf..74cd20f8 100644 --- a/packages/tavily_dart/CHANGELOG.md +++ b/packages/tavily_dart/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0 diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 217a19e8..5c733127 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,4 +1,6 @@ -📣 Check out the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- ## 0.1.0+1 From 18d6c5ad078d1270005c163daa8e2ab5a008dbc3 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 22 Aug 2024 19:16:42 +0200 Subject: [PATCH 107/251] build: Update pubspec.lock files --- examples/browser_summarizer/pubspec.lock | 10 ++++----- examples/docs_examples/pubspec.lock | 22 +++++++++---------- examples/hello_world_backend/pubspec.lock | 8 +++---- examples/hello_world_cli/pubspec.lock | 8 +++---- examples/hello_world_flutter/pubspec.lock | 16 +++++++------- examples/wikivoyage_eu/pubspec.lock | 10 ++++----- packages/langchain/pubspec_overrides.yaml | 2 +- .../langchain_firebase/example/pubspec.lock | 6 ++--- packages/langchain_firebase/pubspec.lock | 2 +- 9 files changed, 42 insertions(+), 42 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index fe72f39c..6eada274 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index caa950ab..78752c5c 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -237,63 +237,63 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.1.1" + version: "0.1.1+1" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1+1" + version: "0.2.1+2" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.1" + version: "0.6.2" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.2" + version: "0.2.3" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -347,14 +347,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.0" + version: "0.2.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index b2934b90..f7ad7603 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 40613637..94af9a94 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -103,21 +103,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -140,7 +140,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 02e61985..7eb4a4d8 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -196,42 +196,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.1" + version: "0.6.2" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.2" + version: "0.2.3" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.0" + version: "0.7.1" langchain_tiktoken: dependency: transitive description: @@ -285,14 +285,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.0" + version: "0.2.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.0" + version: "0.4.1" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index 2e50c6c0..df3386b8 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.0" + version: "0.3.1" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.0" + version: "0.2.1" path: dependency: transitive description: diff --git a/packages/langchain/pubspec_overrides.yaml b/packages/langchain/pubspec_overrides.yaml index 65792891..d9c6fc7e 100644 --- a/packages/langchain/pubspec_overrides.yaml +++ b/packages/langchain/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_community,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart +# melos_managed_dependency_overrides: langchain_community,langchain_core,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart dependency_overrides: langchain_community: path: ../langchain_community diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index eedcc6b5..5a3fa013 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.4" + version: "0.7.5" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.1" + version: "0.2.1+1" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 3f945900..de205b64 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.4" + version: "0.3.5" leak_tracker: dependency: transitive description: From f542a03b253734ee7e3f8c88cd39e0469c3df77e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 09:39:54 +0200 Subject: [PATCH 108/251] build(deps): bump bluefireteam/melos-action (#541) Bumps [bluefireteam/melos-action](https://github.com/bluefireteam/melos-action) from 6085791af7036f6366c9a4b9d55105c0ef9c6388 to 7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52. - [Release notes](https://github.com/bluefireteam/melos-action/releases) - [Commits](https://github.com/bluefireteam/melos-action/compare/6085791af7036f6366c9a4b9d55105c0ef9c6388...7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52) --- updated-dependencies: - dependency-name: bluefireteam/melos-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 0e6c4e20..b77c2ed8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -38,7 +38,7 @@ jobs: run: flutter pub cache clean - name: Install Melos - uses: bluefireteam/melos-action@6085791af7036f6366c9a4b9d55105c0ef9c6388 + uses: bluefireteam/melos-action@7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52 with: run-bootstrap: false From b5d16e43db403fb9b4a486218d2dc2034392e3ba Mon Sep 17 00:00:00 2001 From: Ganesh Date: Mon, 26 Aug 2024 15:53:59 +0530 Subject: [PATCH 109/251] feat: Add retry support for Runnables (#540) Co-authored-by: David Miguel --- docs/_sidebar.md | 1 + docs/expression_language/primitives.md | 1 + docs/expression_language/primitives/retry.md | 94 ++++++++++ .../expression_language/primitives/retry.dart | 177 ++++++++++++++++++ packages/langchain/lib/src/utils/utils.dart | 6 +- .../lib/src/runnables/retry.dart | 63 +++++++ .../lib/src/runnables/runnable.dart | 35 +++- .../lib/src/runnables/runnables.dart | 1 + .../lib/src/utils/retry_client.dart | 92 +++++++++ .../langchain_core/lib/src/utils/utils.dart | 1 + .../test/runnables/retry_test.dart | 87 +++++++++ 11 files changed, 556 insertions(+), 2 deletions(-) create mode 100644 docs/expression_language/primitives/retry.md create mode 100644 examples/docs_examples/bin/expression_language/primitives/retry.dart create mode 100644 packages/langchain_core/lib/src/runnables/retry.dart create mode 100644 packages/langchain_core/lib/src/utils/retry_client.dart create mode 100644 packages/langchain_core/test/runnables/retry_test.dart diff --git a/docs/_sidebar.md b/docs/_sidebar.md index d04533a7..ee2c472a 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -14,6 +14,7 @@ - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) + - [Retry: Retrying runnables](/expression_language/primitives/retry.md) - [Streaming](/expression_language/streaming.md) - [Fallbacks](/expression_language/fallbacks.md) - Cookbook diff --git a/docs/expression_language/primitives.md b/docs/expression_language/primitives.md index 89d618e4..aecaa93e 100644 --- a/docs/expression_language/primitives.md +++ b/docs/expression_language/primitives.md @@ -11,3 +11,4 @@ This section goes into greater depth on where and how some of these components a - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) +- [Retry: Retrying Runnable](/expression_language/primitives/retry.md) diff --git a/docs/expression_language/primitives/retry.md b/docs/expression_language/primitives/retry.md new file mode 100644 index 00000000..ef6ae6c9 --- /dev/null +++ b/docs/expression_language/primitives/retry.md @@ -0,0 +1,94 @@ +# RunnableRetry : Retrying Runnables + +`RunnableRetry` wraps a `Runnable` and retries it if it fails. It be created using `runnable.withRetry()`. + +By default, the runnable will be retried 3 times with exponential backoff strategy. + +## Usage + +## Creating a RunnableRetry + +```dart +final model = ChatOpenAI(); +final input = PromptValue.string('Explain why sky is blue in 2 lines'); + +final modelWithRetry = model.withRetry(); +final res = await modelWithRetry.invoke(input); +print(res); +``` + +## Retrying a chain + +`RunnableRetry` can be used to retry any `Runnable`, including a chain of `Runnable`s. + +Example + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'gpt-4o'), +); +final chain = promptTemplate.pipe(model).withRetry(); + +final res = await chain.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], +); +print(res); +``` + +> In general, it's best to keep the scope of the retry as small as possible. + +## Configuring the retry + +```dart +// passing a fake model to cause Exception +final input = PromptValue.string('Explain why sky is blue in 2 lines'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'fake-model'), +); +final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: true, +); +final res = await modelWithRetry.invoke(input); +print(res); +// retried 3 times and returned Exception: +// OpenAIClientException({ +// "uri": "https://api.openai.com/v1/chat/completions", +// "method": "POST", +// "code": 404, +// "message": "Unsuccessful response", +// "body": { +// "error": { +// "message": "The model `fake-model` does not exist or you do not have access to it.", +// "type": "invalid_request_error", +// "param": null, +// "code": "model_not_found" +// } +// } +// }) +``` + +## Passing delay durations + +If you want to use custom delay durations for each retry attempt, you can pass a list of `Duration` objects to the `delayDurations` parameter. + +```dart +final input = PromptValue.string('Explain why sky is blue in 2 lines'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'fake-model'), +); +final modelWithRetry = model.withRetry( + maxRetries: 3, + delayDurations: [ + Duration(seconds: 1), + Duration(seconds: 2), + Duration(seconds: 3), + ], +); +final res = await modelWithRetry.invoke(input); +print(res); +``` diff --git a/examples/docs_examples/bin/expression_language/primitives/retry.dart b/examples/docs_examples/bin/expression_language/primitives/retry.dart new file mode 100644 index 00000000..917ac501 --- /dev/null +++ b/examples/docs_examples/bin/expression_language/primitives/retry.dart @@ -0,0 +1,177 @@ +// ignore_for_file: avoid_print +import 'dart:io'; +import 'package:langchain/langchain.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _modelWithRetry(); + await _chainWithRetry(); + await _withRetryOptions(); + await _withDelayDurations(); +} + +Future _modelWithRetry() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final model = ChatOpenAI(apiKey: openaiApiKey); + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + + final modelWithRetry = model.withRetry(); + final res = await modelWithRetry.invoke(input); + print(res); + /* + ChatResult{ + id: chatcmpl-9zmFYnu19Pd6ss3zVFHlKN71DILtx, + output: AIChatMessage{ + content: The sky appears blue due to Rayleigh scattering, where shorter wavelengths of sunlight (blue light) are scattered more than longer wavelengths (red light) by the molecules in the Earth's atmosphere. This scattering effect is most prominent when the sun is high in the sky., + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-mini-2024-07-18, created: 1724510508, system_fingerprint: fp_48196bc67a}, + usage: LanguageModelUsage{ + promptTokens: 16, + promptBillableCharacters: null, + responseTokens: 52, + responseBillableCharacters: null, + totalTokens: 68} +, + streaming: false +} +*/ +} + +Future _chainWithRetry() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + final chain = promptTemplate.pipe(model).withRetry(); + + final res = await chain.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + print(res); + /* + [ChatResult{ + id: chatcmpl-9zmjiMfHP2WP3PhM6YXdoHXS02ZAm, + output: AIChatMessage{ + content: Sure, here's a bear-themed joke for you: + +Why did the bear refuse to play cards? + +Because he was afraid he might get spotted—he couldn’t bear the tension! 🐻♠️, + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_3aa7262c27}, + usage: LanguageModelUsage{ + promptTokens: 13, + promptBillableCharacters: null, + responseTokens: 41, + responseBillableCharacters: null, + totalTokens: 54} +, + streaming: false +}, ChatResult{ + id: chatcmpl-9zmji1gxCZ4yR3UtX7Af4TBrRhPP1, + output: AIChatMessage{ + content: Sure, here's one for you: + +Why did the cat sit on the computer? + +Because it wanted to keep an eye on the mouse! 🐱🖱️, + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_c9aa9c0491}, + usage: LanguageModelUsage{ + promptTokens: 13, + promptBillableCharacters: null, + responseTokens: 34, + responseBillableCharacters: null, + totalTokens: 47} +, + streaming: false +}] +*/ +} + +Future _withRetryOptions() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), + ); + final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: true, + ); + final res = await modelWithRetry.invoke(input); + print(res); + /* + retry attempt 0 with delay duration 0:00:01.082000 + retry attempt 1 with delay duration 0:00:02.073000 + retry attempt 2 with delay duration 0:00:04.074000 + Unhandled exception: + Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ + "uri": "https://api.openai.com/v1/chat/completions", + "method": "POST", + "code": 404, + "message": "Unsuccessful response", + "body": { + "error": { + "message": "The model `fake-model` does not exist or you do not have access to it.", + "type": "invalid_request_error", + "param": null, + "code": "model_not_found" + } + } +})*/ +} + +Future _withDelayDurations() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), + ); + final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: false, + delayDurations: const [ + Duration(seconds: 1), + Duration(seconds: 2), + Duration(seconds: 3), + ], + ); + final res = await modelWithRetry.invoke(input); + print(res); + // retried with delays provided in RetryOptions + /* +retry attempt 0 with delay duration 0:00:01.000000 +retry attempt 1 with delay duration 0:00:02.000000 +retry attempt 2 with delay duration 0:00:03.000000 +Unhandled exception: +Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ + "uri": "https://api.openai.com/v1/chat/completions", + "method": "POST", + "code": 401, + "message": "Unsuccessful response", + "body": { + "error": { + "message": "You didn't provide an API key. You need to provide your API key in an Authorization header using Bearer auth (i.e. Authorization: Bearer YOUR_KEY), or as the password field (with blank username) if you're accessing the API from your browser and are prompted for a username and password. You can obtain an API key from https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": null, + "code": null + } + } +}) +*/ +} diff --git a/packages/langchain/lib/src/utils/utils.dart b/packages/langchain/lib/src/utils/utils.dart index d41e35b9..28748719 100644 --- a/packages/langchain/lib/src/utils/utils.dart +++ b/packages/langchain/lib/src/utils/utils.dart @@ -1,2 +1,6 @@ export 'package:langchain_core/utils.dart' - show calculateSimilarity, cosineSimilarity, getIndexesMostSimilarEmbeddings; + show + RetryOptions, + calculateSimilarity, + cosineSimilarity, + getIndexesMostSimilarEmbeddings; diff --git a/packages/langchain_core/lib/src/runnables/retry.dart b/packages/langchain_core/lib/src/runnables/retry.dart new file mode 100644 index 00000000..e49c4d22 --- /dev/null +++ b/packages/langchain_core/lib/src/runnables/retry.dart @@ -0,0 +1,63 @@ +import 'dart:async'; +import '../utils/retry_client.dart'; +import 'runnables.dart'; + +/// {@template runnable_retry} +/// A [Runnable] that automatically retries the operation if it fails. +/// +/// You can create a [RunnableRetry] using [Runnable.withRetry], passing in the +/// [RetryOptions]. +/// +/// When [invoke] or [batch] is called on the runnable, if the initial attempt +/// fails, it will be retried according to the specified [RetryOptions]. +/// +/// Example usage: +/// ```dart +/// final model = ChatOpenAI(...); +/// final modelWithRetry = model.withRetry(maxRetries: 2); +/// final res = await modelWithRetry.invoke(...); +/// ``` +/// {@endtemplate} +class RunnableRetry + extends Runnable { + /// {@macro runnable_retry} + RunnableRetry({ + required this.runnable, + required super.defaultOptions, + required this.retryOptions, + }); + + /// Runnable that will be retried on error. + final Runnable runnable; + + /// Options to retry the runnable. + final RetryOptions retryOptions; + + @override + Future invoke( + RunInput input, { + RunnableOptions? options, + }) async { + return retryClient( + options: retryOptions, + fn: () => runnable.invoke( + input, + options: options, + ), + ); + } + + @override + Future> batch( + List inputs, { + List? options, + }) async { + return retryClient( + options: retryOptions, + fn: () => runnable.batch( + inputs, + options: options, + ), + ); + } +} diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 7c020a50..05f828ca 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -8,6 +8,7 @@ import 'input_map.dart'; import 'input_stream_map.dart'; import 'map.dart'; import 'passthrough.dart'; +import 'retry.dart'; import 'router.dart'; import 'sequence.dart'; import 'types.dart'; @@ -292,7 +293,7 @@ abstract class Runnable withFallbacks( List> fallbacks, ) { @@ -302,6 +303,38 @@ abstract class Runnable withRetry({ + final int maxRetries = 3, + final FutureOr Function(Object e)? retryIf, + final List? delayDurations, + final bool addJitter = false, + }) { + return RunnableRetry( + runnable: this, + defaultOptions: defaultOptions, + retryOptions: RetryOptions( + maxRetries: maxRetries, + retryIf: retryIf, + delayDurations: delayDurations, + addJitter: addJitter, + ), + ); + } + /// Returns the given [options] if they are compatible with the [Runnable], /// otherwise returns `null`. CallOptions? getCompatibleOptions( diff --git a/packages/langchain_core/lib/src/runnables/runnables.dart b/packages/langchain_core/lib/src/runnables/runnables.dart index 146761d7..3cbec552 100644 --- a/packages/langchain_core/lib/src/runnables/runnables.dart +++ b/packages/langchain_core/lib/src/runnables/runnables.dart @@ -5,6 +5,7 @@ export 'input_map.dart'; export 'input_stream_map.dart'; export 'map.dart'; export 'passthrough.dart'; +export 'retry.dart'; export 'router.dart'; export 'runnable.dart'; export 'runnable_ext.dart'; diff --git a/packages/langchain_core/lib/src/utils/retry_client.dart b/packages/langchain_core/lib/src/utils/retry_client.dart new file mode 100644 index 00000000..9cd15317 --- /dev/null +++ b/packages/langchain_core/lib/src/utils/retry_client.dart @@ -0,0 +1,92 @@ +import 'dart:async'; +import 'dart:math'; + +/// {@template retry_options} +/// Options to pass into [retryClient] to control the retry behavior. +/// {@endtemplate} +class RetryOptions { + /// {@macro retry_options} + RetryOptions({ + required this.maxRetries, + required this.addJitter, + this.retryIf, + this.delayDurations, + }); + + /// The maximum number of attempts to retry. + final int maxRetries; + + /// An evaluator function that can be used to decide if the function should + /// be retried based on the exception it throws. + /// + /// If you decide not to retry on a particular exception, [retryIf] can return + /// `false` and the retry won't happen. By default [retryIf] is `true` and + /// all exceptions are retried. + final FutureOr Function(Object e)? retryIf; + + /// The function will be retried based on an exponential backoff strategy + /// with a base delay of 1 second. + /// + /// But you can override this behavior by providing an optional list of + /// [delayDurations]`. Each entry in the list corresponds to a specific + /// retry attempt, and the corresponding delay from the list will be used + /// instead of the default exponential delay. + /// + /// For example, if you provide a list of `[2, 4, 8]`, the delays between the + /// first three retries will be 2, 4, and 8 seconds, respectively. + final List? delayDurations; + + /// Whether to add jitter to the exponential backoff. + /// + /// Jitter is a random value added to the delay to prevent multiple clients + /// from retrying at the same time. + final bool addJitter; +} + +/// A client that handles retry logic for a given function. +/// +/// This client takes [RetryOptions] and a function to execute. If the +/// function fails, it will be retried according to the specified options. +/// If it succeeds, the result of the function will be returned. +FutureOr retryClient({ + required RetryOptions options, + required FutureOr Function() fn, +}) async { + const defaultDelay = Duration(seconds: 1); + + for (int attempt = 0; attempt < options.maxRetries; attempt++) { + try { + return await fn(); + } catch (e) { + final isLastAttempt = attempt == options.maxRetries - 1; + final shouldRetry = await options.retryIf?.call(e) ?? true; + + if (isLastAttempt || !shouldRetry) { + rethrow; + } + + final duration = + options.delayDurations?[attempt] ?? defaultDelay * pow(2, attempt); + await _delay(duration, attempt, options.addJitter); + } + } + + // This line should never be reached + throw StateError('Exhausted all retry attempts'); +} + +Future _delay( + final Duration duration, + final int attempt, + final bool addJitter, +) async { + final Duration delay; + if (addJitter) { + final random = Random(); + final jitter = random.nextInt(100); + delay = Duration(milliseconds: duration.inMilliseconds + jitter); + } else { + delay = duration; + } + await Future.delayed(delay); +} diff --git a/packages/langchain_core/lib/src/utils/utils.dart b/packages/langchain_core/lib/src/utils/utils.dart index d439ed98..57924640 100644 --- a/packages/langchain_core/lib/src/utils/utils.dart +++ b/packages/langchain_core/lib/src/utils/utils.dart @@ -1,3 +1,4 @@ export 'chunk.dart'; export 'reduce.dart'; +export 'retry_client.dart'; export 'similarity.dart'; diff --git a/packages/langchain_core/test/runnables/retry_test.dart b/packages/langchain_core/test/runnables/retry_test.dart new file mode 100644 index 00000000..f1e8f625 --- /dev/null +++ b/packages/langchain_core/test/runnables/retry_test.dart @@ -0,0 +1,87 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/runnables.dart'; +import 'package:test/test.dart'; + +void main() { + group('Runnable Retry Test', () { + late FakeEchoChatModel model; + final input = PromptValue.string('why is the sky blue'); + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + + setUp(() { + model = const FakeEchoChatModel(); + }); + + test('Runnable retry should return output for invoke', () async { + final modelWithRetry = model.withRetry(maxRetries: 2); + final res = await modelWithRetry.invoke(input); + expect(res.output.content, 'why is the sky blue'); + }); + + test('Runnable retry should return output for batch', () async { + final chain = promptTemplate.pipe(model); + final chainWithRetry = chain.withRetry(); + final res = await chainWithRetry.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + expect(res[0].output.content, 'tell me a joke about bears'); + expect(res[1].output.content, 'tell me a joke about cats'); + }); + + test('Should retry based RetryOptions, maxRetries = 2', () async { + final modelWithRetry = model.withRetry(maxRetries: 2); + expect( + () async => modelWithRetry.invoke( + input, + options: const FakeEchoChatModelOptions(throwRandomError: true), + ), + throwsException, + ); + }); + + test('Should return the output after successful retry', () async { + int count = 0; + final modelWithRetry = model.pipe( + Runnable.fromFunction( + invoke: (input, opt) { + if (count++ < 1) { + throw Exception('Random error'); + } + return input; + }, + ), + ).withRetry(maxRetries: 2); + final res = await modelWithRetry.invoke(input); + expect(res.outputAsString, input.toString()); + expect(count, 2); + }); + + test('Should not retry if retryIf returned false', () async { + late String error; + final modelWithRetry = model.withRetry( + maxRetries: 3, + retryIf: (e) { + if (e.toString() == 'Exception: Random error') { + return false; + } else { + return true; + } + }, + ); + try { + await modelWithRetry.invoke( + input, + options: const FakeEchoChatModelOptions(throwRandomError: true), + ); + } catch (e) { + error = e.toString(); + } + expect(error, 'Exception: Random error'); + }); + }); +} From d1cca2242c11e3401e80a4155f3963bac155f560 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 29 Aug 2024 00:06:31 +0200 Subject: [PATCH 110/251] feat: Option to include file search results in assistants API (#543) --- packages/openai_dart/README.md | 134 +- packages/openai_dart/lib/openai_dart.dart | 2 +- .../openai_dart/lib/src/generated/client.dart | 16 + .../schema/assistant_stream_event.dart | 4 +- .../src/generated/schema/assistant_tools.dart | 18 +- .../generated/schema/file_search_ranker.dart | 17 + .../schema/file_search_ranking_options.dart | 62 + .../schema/run_step_details_tool_calls.dart | 5 +- ...n_step_details_tool_calls_file_search.dart | 48 + ...ls_file_search_ranking_options_object.dart | 56 + ...tool_calls_file_search_result_content.dart | 46 + ..._tool_calls_file_search_result_object.dart | 71 + .../lib/src/generated/schema/schema.dart | 6 + .../src/generated/schema/schema.freezed.dart | 1415 ++++++++++++++++- .../lib/src/generated/schema/schema.g.dart | 141 +- packages/openai_dart/oas/openapi_curated.yaml | 117 +- .../openai_dart/oas/openapi_official.yaml | 140 +- packages/openai_dart/pubspec.yaml | 2 +- 18 files changed, 2171 insertions(+), 129 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index df9cc58b..12f5b51f 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -20,7 +20,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen **Supported endpoints:** -- Chat (with tools and streaming support) +- Chat (with structured outputs, tools and streaming support) - Completions (legacy) - Embeddings - Fine-tuning @@ -28,7 +28,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Images - Models - Moderations -- Assistants v2 (with tools and streaming support) `beta` +- Assistants v2 (with structured outputs, tools and streaming support) `beta` * Threads * Messages * Runs @@ -97,14 +97,14 @@ final client = OpenAIClient( Given a list of messages comprising a conversation, the model will return a response. -Related guide: [Chat Completions](https://platform.openai.com/docs/guides/text-generation) +Related guide: [Chat Completions](https://platform.openai.com/docs/guides/chat-completions) **Create chat completion:** ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4'), + model: ChatCompletionModel.modelId('gpt-4o'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -121,28 +121,28 @@ print(res.choices.first.message.content); ``` `ChatCompletionModel` is a sealed class that offers two ways to specify the model: -- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4'` or your fine-tuned model ID). -- `ChatCompletionModel.model(ChatCompletionModels.gpt4)`: a value from `ChatCompletionModels` enum which lists all of the available models. +- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4o'` or your fine-tuned model ID). +- `ChatCompletionModel.model(ChatCompletionModels.gpt4o)`: a value from `ChatCompletionModels` enum which lists all of the available models. `ChatCompletionMessage` is a sealed class that supports the following message types: - `ChatCompletionMessage.system()`: a system message. - `ChatCompletionMessage.user()`: a user message. - `ChatCompletionMessage.assistant()`: an assistant message. - `ChatCompletionMessage.tool()`: a tool message. -- `ChatCompletionMessage.function()`: a function message. +- `ChatCompletionMessage.function()`: a function message (deprecated in favor of tools). `ChatCompletionMessage.user()` takes a `ChatCompletionUserMessageContent` object that supports the following content types: - `ChatCompletionUserMessageContent.string('content')`: string content. - `ChatCompletionUserMessageContent.parts([...])`: multi-modal content (check the 'Multi-modal prompt' section below). * `ChatCompletionMessageContentPart.text('content')`: text content. - * `ChatCompletionMessageContentPart.image(imageUrl: ...)`: image content. + * `ChatCompletionMessageContentPart.image(...)`: image content (URL or base64-encoded image). **Stream chat completion:** ```dart final stream = client.createChatCompletionStream( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4-turbo'), + model: ChatCompletionModel.modelId('gpt-4o'), messages: [ ChatCompletionMessage.system( content: @@ -167,6 +167,8 @@ await for (final res in stream) { **Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision)) +You can either provide the image URL: + ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( @@ -198,37 +200,31 @@ print(res.choices.first.message.content); // The fruit in the image is an apple. ``` -**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) - +Or provide the base64-encoded image: ```dart -final res = await client.createChatCompletion( - request: CreateChatCompletionRequest( - model: ChatCompletionModel.model( - ChatCompletionModels.gpt41106Preview, - ), - messages: [ - ChatCompletionMessage.system( - content: - 'You are a helpful assistant. That extracts names from text ' - 'and returns them in a JSON array.', +//... +ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.parts( + [ + ChatCompletionMessageContentPart.text( + text: 'What fruit is this?', ), - ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'John, Mary, and Peter.', + ChatCompletionMessageContentPart.image( + imageUrl: ChatCompletionMessageImageUrl( + url: '/9j/4AAQSkZJRgABAQAAAQABAAD/2wB...P3s/XHQ8cE/nmiupbL0+fz/r/MjnSbsr69/Rdu1j//2Q==', + detail: ChatCompletionMessageImageDetail.high, ), ), ], - temperature: 0, - responseFormat: ChatCompletionResponseFormat( - type: ChatCompletionResponseFormatType.jsonObject, - ), ), -); -// { "names": ["John", "Mary", "Peter"] } +), +//... ``` **Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))** +Structured Outputs is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema. + ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( @@ -237,8 +233,7 @@ final res = await client.createChatCompletion( ), messages: [ ChatCompletionMessage.system( - content: - 'You are a helpful assistant. That extracts names from text.', + content: 'You are a helpful assistant. That extracts names from text.', ), ChatCompletionMessage.user( content: ChatCompletionUserMessageContent.string( @@ -272,8 +267,41 @@ final res = await client.createChatCompletion( // {"names":["John","Mary","Peter"]} ``` +**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It us recommended to use Structured Outputs if it is supported for your use case. + +```dart +final res = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt41106Preview, + ), + messages: [ + ChatCompletionMessage.system( + content: + 'You are a helpful assistant. That extracts names from text ' + 'and returns them in a JSON array.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ChatCompletionResponseFormat( + type: ChatCompletionResponseFormatType.jsonObject, + ), + ), +); +// { "names": ["John", "Mary", "Peter"] } +``` + **Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling)) +Tool calling allows you to connect models to external tools and systems. + ```dart const function = FunctionObject( name: 'get_current_weather', @@ -301,7 +329,7 @@ const tool = ChatCompletionTool( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: const ChatCompletionModel.model( + model: ChatCompletionModel.model( ChatCompletionModels.gpt4oMini, ), messages: [ @@ -353,6 +381,8 @@ final answer = res2.choices.first.message.content; // The weather in Boston right now is sunny with a temperature of 22°C ``` +You can enable Structured Outputs for your tools by setting `strict: true` in your `FunctionObject` definition. Structured Outputs ensures that the arguments generated by the model for a tool call exactly match the JSON Schema you provided in the tool definition. + **Function calling:** (deprecated in favor of tools) ```dart @@ -813,7 +843,7 @@ final res = await client.createThreadMessage( ), MessageContent.imageUrl( imageUrl: MessageContentImageUrl( - url: 'https://example.com/image.jpg', + url: 'https://example.com/image.jpg', // or base64-encoded image ), ), ]), @@ -867,6 +897,42 @@ final res = await client.createThreadRun( ); ``` +You can also use Structured Outputs to ensure that the model-generated responses adhere to a specific JSON schema: + +```dart + +final res = await client.createThreadRun( + threadId: threadId, + request: CreateRunRequest( + assistantId: assistantId, + instructions: 'You are a helpful assistant that extracts names from text.', + model: CreateRunRequestModel.modelId('gpt-4o'), + responseFormat: CreateRunRequestResponseFormat.responseFormat( + ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), + ) + ) + ), +); +``` + **Create run: (streaming)** ```dart diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart index 7600ced2..57003125 100644 --- a/packages/openai_dart/lib/openai_dart.dart +++ b/packages/openai_dart/lib/openai_dart.dart @@ -1,4 +1,4 @@ -/// Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. +/// Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. library; export 'src/client.dart'; diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index 828b26be..b58d7e15 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -1175,11 +1175,14 @@ class OpenAIClient { /// /// `threadId`: The ID of the thread to run. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `request`: Request object for the Create run endpoint. /// /// `POST` `https://api.openai.com/v1/threads/{thread_id}/runs` Future createThreadRun({ required String threadId, + String? include, required CreateRunRequest request, }) async { final r = await makeRequest( @@ -1190,6 +1193,9 @@ class OpenAIClient { requestType: 'application/json', responseType: 'application/json', body: request, + queryParams: { + if (include != null) 'include': include, + }, ); return RunObject.fromJson(_jsonDecode(r)); } @@ -1324,6 +1330,8 @@ class OpenAIClient { /// /// `before`: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps` Future listThreadRunSteps({ required String threadId, @@ -1332,6 +1340,7 @@ class OpenAIClient { String order = 'desc', String? after, String? before, + String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1345,6 +1354,7 @@ class OpenAIClient { 'order': order, if (after != null) 'after': after, if (before != null) 'before': before, + if (include != null) 'include': include, }, ); return ListRunStepsResponse.fromJson(_jsonDecode(r)); @@ -1362,11 +1372,14 @@ class OpenAIClient { /// /// `stepId`: The ID of the run step to retrieve. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps/{step_id}` Future getThreadRunStep({ required String threadId, required String runId, required String stepId, + String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1375,6 +1388,9 @@ class OpenAIClient { isMultipart: false, requestType: '', responseType: 'application/json', + queryParams: { + if (include != null) 'include': include, + }, ); return RunStepObject.fromJson(_jsonDecode(r)); } diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart index 348155db..0686da7b 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart @@ -61,7 +61,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. const factory AssistantStreamEvent.runStepStreamEvent({ /// The type of the event. required EventType event, @@ -74,7 +74,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamDeltaEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. const factory AssistantStreamEvent.runStepStreamDeltaEvent({ /// The type of the event. required EventType event, diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index e36cd8e6..920d2301 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -83,9 +83,17 @@ class AssistantToolsFileSearchFileSearch /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, + + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions, }) = _AssistantToolsFileSearchFileSearch; /// Object construction from a JSON representation @@ -94,7 +102,10 @@ class AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson(json); /// List of all property names of schema - static const List propertyNames = ['max_num_results']; + static const List propertyNames = [ + 'max_num_results', + 'ranking_options' + ]; /// Validation constants static const maxNumResultsMinValue = 1; @@ -115,6 +126,7 @@ class AssistantToolsFileSearchFileSearch Map toMap() { return { 'max_num_results': maxNumResults, + 'ranking_options': rankingOptions, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart new file mode 100644 index 00000000..6dfc6218 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart @@ -0,0 +1,17 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: FileSearchRanker +// ========================================== + +/// The ranker to use for the file search. If not specified will use the `auto` ranker. +enum FileSearchRanker { + @JsonValue('auto') + auto, + @JsonValue('default_2024_08_21') + default20240821, +} diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart new file mode 100644 index 00000000..e60070f0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FileSearchRankingOptions +// ========================================== + +/// The ranking options for the file search. +/// +/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) +/// for more information. +@freezed +class FileSearchRankingOptions with _$FileSearchRankingOptions { + const FileSearchRankingOptions._(); + + /// Factory constructor for FileSearchRankingOptions + const factory FileSearchRankingOptions({ + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + FileSearchRanker? ranker, + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? scoreThreshold, + }) = _FileSearchRankingOptions; + + /// Object construction from a JSON representation + factory FileSearchRankingOptions.fromJson(Map json) => + _$FileSearchRankingOptionsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranker', 'score_threshold']; + + /// Validation constants + static const scoreThresholdMinValue = 0.0; + static const scoreThresholdMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (scoreThreshold != null && scoreThreshold! < scoreThresholdMinValue) { + return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; + } + if (scoreThreshold != null && scoreThreshold! > scoreThresholdMaxValue) { + return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranker': ranker, + 'score_threshold': scoreThreshold, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart index c4605b7b..327de9f5 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart @@ -42,8 +42,9 @@ sealed class RunStepDetailsToolCalls with _$RunStepDetailsToolCalls { /// The type of tool call. This is always going to be `file_search` for this type of tool call. required String type, - /// For now, this is always going to be an empty object. - @JsonKey(name: 'file_search') required Map fileSearch, + /// The definition of the file search that was called. + @JsonKey(name: 'file_search') + required RunStepDetailsToolCallsFileSearch fileSearch, }) = RunStepDetailsToolCallsFileSearchObject; // ------------------------------------------ diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart new file mode 100644 index 00000000..16f72322 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearch +// ========================================== + +/// The definition of the file search that was called. +@freezed +class RunStepDetailsToolCallsFileSearch + with _$RunStepDetailsToolCallsFileSearch { + const RunStepDetailsToolCallsFileSearch._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearch + const factory RunStepDetailsToolCallsFileSearch({ + /// The ranking options for the file search. + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + + /// The results of the file search. + @JsonKey(includeIfNull: false) + List? results, + }) = _RunStepDetailsToolCallsFileSearch; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearch.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranking_options', 'results']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranking_options': rankingOptions, + 'results': results, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart new file mode 100644 index 00000000..61b2ff06 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchRankingOptionsObject +// ========================================== + +/// The ranking options for the file search. +@freezed +class RunStepDetailsToolCallsFileSearchRankingOptionsObject + with _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const RunStepDetailsToolCallsFileSearchRankingOptionsObject._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchRankingOptionsObject + const factory RunStepDetailsToolCallsFileSearchRankingOptionsObject({ + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + required FileSearchRanker ranker, + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') required double scoreThreshold, + }) = _RunStepDetailsToolCallsFileSearchRankingOptionsObject; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranker', 'score_threshold']; + + /// Validation constants + static const scoreThresholdMinValue = 0.0; + static const scoreThresholdMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (scoreThreshold < scoreThresholdMinValue) { + return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; + } + if (scoreThreshold > scoreThresholdMaxValue) { + return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranker': ranker, + 'score_threshold': scoreThreshold, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart new file mode 100644 index 00000000..3ba23a07 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart @@ -0,0 +1,46 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchResultContent +// ========================================== + +/// The content of the result that was found. +@freezed +class RunStepDetailsToolCallsFileSearchResultContent + with _$RunStepDetailsToolCallsFileSearchResultContent { + const RunStepDetailsToolCallsFileSearchResultContent._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchResultContent + const factory RunStepDetailsToolCallsFileSearchResultContent({ + /// The type of the content. + @Default('text') String type, + + /// The text content of the file. + @JsonKey(includeIfNull: false) String? text, + }) = _RunStepDetailsToolCallsFileSearchResultContent; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchResultContent.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultContentFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'text']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'text': text, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart new file mode 100644 index 00000000..4b1a1de0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart @@ -0,0 +1,71 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchResultObject +// ========================================== + +/// A result instance of the file search. +@freezed +class RunStepDetailsToolCallsFileSearchResultObject + with _$RunStepDetailsToolCallsFileSearchResultObject { + const RunStepDetailsToolCallsFileSearchResultObject._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchResultObject + const factory RunStepDetailsToolCallsFileSearchResultObject({ + /// The ID of the file that result was found in. + @JsonKey(name: 'file_id') required String fileId, + + /// The name of the file that result was found in. + @JsonKey(name: 'file_name') required String fileName, + + /// The score of the result. All values must be a floating point number between 0 and 1. + required double score, + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @JsonKey(includeIfNull: false) + List? content, + }) = _RunStepDetailsToolCallsFileSearchResultObject; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchResultObject.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'file_id', + 'file_name', + 'score', + 'content' + ]; + + /// Validation constants + static const scoreMinValue = 0.0; + static const scoreMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (score < scoreMinValue) { + return "The value of 'score' cannot be < $scoreMinValue"; + } + if (score > scoreMaxValue) { + return "The value of 'score' cannot be > $scoreMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'file_id': fileId, + 'file_name': fileName, + 'score': score, + 'content': content, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 028e108f..a48b094d 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -73,6 +73,8 @@ part 'create_assistant_request.dart'; part 'modify_assistant_request.dart'; part 'delete_assistant_response.dart'; part 'list_assistants_response.dart'; +part 'file_search_ranking_options.dart'; +part 'file_search_ranker.dart'; part 'assistants_named_tool_choice.dart'; part 'assistants_function_call_option.dart'; part 'truncation_object.dart'; @@ -119,6 +121,10 @@ part 'run_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_delta_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_details_tool_calls_code_output_image.dart'; part 'run_step_delta_step_details_tool_calls_code_output_image.dart'; +part 'run_step_details_tool_calls_file_search.dart'; +part 'run_step_details_tool_calls_file_search_ranking_options_object.dart'; +part 'run_step_details_tool_calls_file_search_result_object.dart'; +part 'run_step_details_tool_calls_file_search_result_content.dart'; part 'run_step_completion_usage.dart'; part 'vector_store_expiration_after.dart'; part 'vector_store_object.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 76274966..5753970f 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -27762,6 +27762,222 @@ abstract class _ListAssistantsResponse extends ListAssistantsResponse { get copyWith => throw _privateConstructorUsedError; } +FileSearchRankingOptions _$FileSearchRankingOptionsFromJson( + Map json) { + return _FileSearchRankingOptions.fromJson(json); +} + +/// @nodoc +mixin _$FileSearchRankingOptions { + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? get ranker => throw _privateConstructorUsedError; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? get scoreThreshold => throw _privateConstructorUsedError; + + /// Serializes this FileSearchRankingOptions to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $FileSearchRankingOptionsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FileSearchRankingOptionsCopyWith<$Res> { + factory $FileSearchRankingOptionsCopyWith(FileSearchRankingOptions value, + $Res Function(FileSearchRankingOptions) then) = + _$FileSearchRankingOptionsCopyWithImpl<$Res, FileSearchRankingOptions>; + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? scoreThreshold}); +} + +/// @nodoc +class _$FileSearchRankingOptionsCopyWithImpl<$Res, + $Val extends FileSearchRankingOptions> + implements $FileSearchRankingOptionsCopyWith<$Res> { + _$FileSearchRankingOptionsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = freezed, + Object? scoreThreshold = freezed, + }) { + return _then(_value.copyWith( + ranker: freezed == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker?, + scoreThreshold: freezed == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$FileSearchRankingOptionsImplCopyWith<$Res> + implements $FileSearchRankingOptionsCopyWith<$Res> { + factory _$$FileSearchRankingOptionsImplCopyWith( + _$FileSearchRankingOptionsImpl value, + $Res Function(_$FileSearchRankingOptionsImpl) then) = + __$$FileSearchRankingOptionsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? scoreThreshold}); +} + +/// @nodoc +class __$$FileSearchRankingOptionsImplCopyWithImpl<$Res> + extends _$FileSearchRankingOptionsCopyWithImpl<$Res, + _$FileSearchRankingOptionsImpl> + implements _$$FileSearchRankingOptionsImplCopyWith<$Res> { + __$$FileSearchRankingOptionsImplCopyWithImpl( + _$FileSearchRankingOptionsImpl _value, + $Res Function(_$FileSearchRankingOptionsImpl) _then) + : super(_value, _then); + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = freezed, + Object? scoreThreshold = freezed, + }) { + return _then(_$FileSearchRankingOptionsImpl( + ranker: freezed == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker?, + scoreThreshold: freezed == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { + const _$FileSearchRankingOptionsImpl( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + this.scoreThreshold}) + : super._(); + + factory _$FileSearchRankingOptionsImpl.fromJson(Map json) => + _$$FileSearchRankingOptionsImplFromJson(json); + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final FileSearchRanker? ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold', includeIfNull: false) + final double? scoreThreshold; + + @override + String toString() { + return 'FileSearchRankingOptions(ranker: $ranker, scoreThreshold: $scoreThreshold)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FileSearchRankingOptionsImpl && + (identical(other.ranker, ranker) || other.ranker == ranker) && + (identical(other.scoreThreshold, scoreThreshold) || + other.scoreThreshold == scoreThreshold)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> + get copyWith => __$$FileSearchRankingOptionsImplCopyWithImpl< + _$FileSearchRankingOptionsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FileSearchRankingOptionsImplToJson( + this, + ); + } +} + +abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { + const factory _FileSearchRankingOptions( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold', includeIfNull: false) + final double? scoreThreshold}) = _$FileSearchRankingOptionsImpl; + const _FileSearchRankingOptions._() : super._(); + + factory _FileSearchRankingOptions.fromJson(Map json) = + _$FileSearchRankingOptionsImpl.fromJson; + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? get ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold', includeIfNull: false) + double? get scoreThreshold; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantsNamedToolChoice _$AssistantsNamedToolChoiceFromJson( Map json) { return _AssistantsNamedToolChoice.fromJson(json); @@ -46412,6 +46628,975 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeOutputImage get copyWith => throw _privateConstructorUsedError; } +RunStepDetailsToolCallsFileSearch _$RunStepDetailsToolCallsFileSearchFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearch.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearch { + /// The ranking options for the file search. + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions => + throw _privateConstructorUsedError; + + /// The results of the file search. + @JsonKey(includeIfNull: false) + List? get results => + throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearch to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchCopyWith( + RunStepDetailsToolCallsFileSearch value, + $Res Function(RunStepDetailsToolCallsFileSearch) then) = + _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearch>; + @useResult + $Res call( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + @JsonKey(includeIfNull: false) + List? results}); + + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions; +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearch> + implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? rankingOptions = freezed, + Object? results = freezed, + }) { + return _then(_value.copyWith( + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, + results: freezed == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions { + if (_value.rankingOptions == null) { + return null; + } + + return $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>( + _value.rankingOptions!, (value) { + return _then(_value.copyWith(rankingOptions: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> + implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchImplCopyWith( + _$RunStepDetailsToolCallsFileSearchImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) then) = + __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + @JsonKey(includeIfNull: false) + List? results}); + + @override + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions; +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchImpl> + implements _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? rankingOptions = freezed, + Object? results = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchImpl( + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, + results: freezed == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchImpl + extends _RunStepDetailsToolCallsFileSearch { + const _$RunStepDetailsToolCallsFileSearchImpl( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + this.rankingOptions, + @JsonKey(includeIfNull: false) + final List? results}) + : _results = results, + super._(); + + factory _$RunStepDetailsToolCallsFileSearchImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchImplFromJson(json); + + /// The ranking options for the file search. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + final RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions; + + /// The results of the file search. + final List? _results; + + /// The results of the file search. + @override + @JsonKey(includeIfNull: false) + List? get results { + final value = _results; + if (value == null) return null; + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearch(rankingOptions: $rankingOptions, results: $results)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsFileSearchImpl && + (identical(other.rankingOptions, rankingOptions) || + other.rankingOptions == rankingOptions) && + const DeepCollectionEquality().equals(other._results, _results)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, rankingOptions, + const DeepCollectionEquality().hash(_results)); + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchImplCopyWith< + _$RunStepDetailsToolCallsFileSearchImpl> + get copyWith => __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchImpl>(this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearch + extends RunStepDetailsToolCallsFileSearch { + const factory _RunStepDetailsToolCallsFileSearch( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + final RunStepDetailsToolCallsFileSearchRankingOptionsObject? + rankingOptions, + @JsonKey(includeIfNull: false) + final List? results}) = + _$RunStepDetailsToolCallsFileSearchImpl; + const _RunStepDetailsToolCallsFileSearch._() : super._(); + + factory _RunStepDetailsToolCallsFileSearch.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchImpl.fromJson; + + /// The ranking options for the file search. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions; + + /// The results of the file search. + @override + @JsonKey(includeIfNull: false) + List? get results; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchImplCopyWith< + _$RunStepDetailsToolCallsFileSearchImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFileSearchRankingOptionsObject + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + FileSearchRanker get ranker => throw _privateConstructorUsedError; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') + double get scoreThreshold => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearchRankingOptionsObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< + RunStepDetailsToolCallsFileSearchRankingOptionsObject> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< + $Res> { + factory $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith( + RunStepDetailsToolCallsFileSearchRankingOptionsObject value, + $Res Function(RunStepDetailsToolCallsFileSearchRankingOptionsObject) + then) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchRankingOptionsObject>; + @useResult + $Res call( + {FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchRankingOptionsObject> + implements + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = null, + Object? scoreThreshold = null, + }) { + return _then(_value.copyWith( + ranker: null == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + $Res> + implements + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl value, + $Res Function( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + $Res>; + @override + @useResult + $Res call( + {FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + $Res> + extends _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl< + $Res, _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + implements + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + $Res> { + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) + _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = null, + Object? scoreThreshold = null, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + ranker: null == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl + extends _RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + {required this.ranker, + @JsonKey(name: 'score_threshold') required this.scoreThreshold}) + : super._(); + + factory _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( + json); + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + final FileSearchRanker ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold') + final double scoreThreshold; + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearchRankingOptionsObject(ranker: $ranker, scoreThreshold: $scoreThreshold)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other + is _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl && + (identical(other.ranker, ranker) || other.ranker == ranker) && + (identical(other.scoreThreshold, scoreThreshold) || + other.scoreThreshold == scoreThreshold)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearchRankingOptionsObject + extends RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject( + {required final FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') + required final double scoreThreshold}) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl; + const _RunStepDetailsToolCallsFileSearchRankingOptionsObject._() : super._(); + + factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson; + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + FileSearchRanker get ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold') + double get scoreThreshold; + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFileSearchResultObject + _$RunStepDetailsToolCallsFileSearchResultObjectFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchResultObject.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearchResultObject { + /// The ID of the file that result was found in. + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; + + /// The name of the file that result was found in. + @JsonKey(name: 'file_name') + String get fileName => throw _privateConstructorUsedError; + + /// The score of the result. All values must be a floating point number between 0 and 1. + double get score => throw _privateConstructorUsedError; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @JsonKey(includeIfNull: false) + List? get content => + throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearchResultObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchResultObjectCopyWith< + RunStepDetailsToolCallsFileSearchResultObject> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchResultObjectCopyWith( + RunStepDetailsToolCallsFileSearchResultObject value, + $Res Function(RunStepDetailsToolCallsFileSearchResultObject) then) = + _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchResultObject>; + @useResult + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'file_name') String fileName, + double score, + @JsonKey(includeIfNull: false) + List? content}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchResultObject> + implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileId = null, + Object? fileName = null, + Object? score = null, + Object? content = freezed, + }) { + return _then(_value.copyWith( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + fileName: null == fileName + ? _value.fileName + : fileName // ignore: cast_nullable_to_non_nullable + as String, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + $Res> + implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'file_name') String fileName, + double score, + @JsonKey(includeIfNull: false) + List? content}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + implements + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileId = null, + Object? fileName = null, + Object? score = null, + Object? content = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchResultObjectImpl( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + fileName: null == fileName + ? _value.fileName + : fileName // ignore: cast_nullable_to_non_nullable + as String, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + content: freezed == content + ? _value._content + : content // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchResultObjectImpl + extends _RunStepDetailsToolCallsFileSearchResultObject { + const _$RunStepDetailsToolCallsFileSearchResultObjectImpl( + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(name: 'file_name') required this.fileName, + required this.score, + @JsonKey(includeIfNull: false) + final List? content}) + : _content = content, + super._(); + + factory _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson(json); + + /// The ID of the file that result was found in. + @override + @JsonKey(name: 'file_id') + final String fileId; + + /// The name of the file that result was found in. + @override + @JsonKey(name: 'file_name') + final String fileName; + + /// The score of the result. All values must be a floating point number between 0 and 1. + @override + final double score; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + final List? _content; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @override + @JsonKey(includeIfNull: false) + List? get content { + final value = _content; + if (value == null) return null; + if (_content is EqualUnmodifiableListView) return _content; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearchResultObject(fileId: $fileId, fileName: $fileName, score: $score, content: $content)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsFileSearchResultObjectImpl && + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.fileName, fileName) || + other.fileName == fileName) && + (identical(other.score, score) || other.score == score) && + const DeepCollectionEquality().equals(other._content, _content)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, fileId, fileName, score, + const DeepCollectionEquality().hash(_content)); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearchResultObject + extends RunStepDetailsToolCallsFileSearchResultObject { + const factory _RunStepDetailsToolCallsFileSearchResultObject( + {@JsonKey(name: 'file_id') required final String fileId, + @JsonKey(name: 'file_name') required final String fileName, + required final double score, + @JsonKey(includeIfNull: false) + final List? + content}) = _$RunStepDetailsToolCallsFileSearchResultObjectImpl; + const _RunStepDetailsToolCallsFileSearchResultObject._() : super._(); + + factory _RunStepDetailsToolCallsFileSearchResultObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson; + + /// The ID of the file that result was found in. + @override + @JsonKey(name: 'file_id') + String get fileId; + + /// The name of the file that result was found in. + @override + @JsonKey(name: 'file_name') + String get fileName; + + /// The score of the result. All values must be a floating point number between 0 and 1. + @override + double get score; + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @override + @JsonKey(includeIfNull: false) + List? get content; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFileSearchResultContent + _$RunStepDetailsToolCallsFileSearchResultContentFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchResultContent.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFileSearchResultContent { + /// The type of the content. + String get type => throw _privateConstructorUsedError; + + /// The text content of the file. + @JsonKey(includeIfNull: false) + String? get text => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFileSearchResultContent to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchResultContentCopyWith< + RunStepDetailsToolCallsFileSearchResultContent> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchResultContentCopyWith( + RunStepDetailsToolCallsFileSearchResultContent value, + $Res Function(RunStepDetailsToolCallsFileSearchResultContent) then) = + _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchResultContent>; + @useResult + $Res call({String type, @JsonKey(includeIfNull: false) String? text}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchResultContent> + implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl( + this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? text = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + $Res> + implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith( + _$RunStepDetailsToolCallsFileSearchResultContentImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, @JsonKey(includeIfNull: false) String? text}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + implements + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchResultContentImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? text = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchResultContentImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsFileSearchResultContentImpl + extends _RunStepDetailsToolCallsFileSearchResultContent { + const _$RunStepDetailsToolCallsFileSearchResultContentImpl( + {this.type = 'text', @JsonKey(includeIfNull: false) this.text}) + : super._(); + + factory _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson(json); + + /// The type of the content. + @override + @JsonKey() + final String type; + + /// The text content of the file. + @override + @JsonKey(includeIfNull: false) + final String? text; + + @override + String toString() { + return 'RunStepDetailsToolCallsFileSearchResultContent(type: $type, text: $text)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsFileSearchResultContentImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, text); + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchResultContentImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( + this, + ); + } +} + +abstract class _RunStepDetailsToolCallsFileSearchResultContent + extends RunStepDetailsToolCallsFileSearchResultContent { + const factory _RunStepDetailsToolCallsFileSearchResultContent( + {final String type, + @JsonKey(includeIfNull: false) final String? text}) = + _$RunStepDetailsToolCallsFileSearchResultContentImpl; + const _RunStepDetailsToolCallsFileSearchResultContent._() : super._(); + + factory _RunStepDetailsToolCallsFileSearchResultContent.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson; + + /// The type of the content. + @override + String get type; + + /// The text content of the file. + @override + @JsonKey(includeIfNull: false) + String? get text; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + get copyWith => throw _privateConstructorUsedError; +} + RunStepCompletionUsage _$RunStepCompletionUsageFromJson( Map json) { return _RunStepCompletionUsage.fromJson(json); @@ -57987,11 +59172,20 @@ mixin _$AssistantToolsFileSearchFileSearch { /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? get rankingOptions => + throw _privateConstructorUsedError; + /// Serializes this AssistantToolsFileSearchFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -58013,7 +59207,11 @@ abstract class $AssistantToolsFileSearchFileSearchCopyWith<$Res> { @useResult $Res call( {@JsonKey(name: 'max_num_results', includeIfNull: false) - int? maxNumResults}); + int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions}); + + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; } /// @nodoc @@ -58033,14 +59231,34 @@ class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, @override $Res call({ Object? maxNumResults = freezed, + Object? rankingOptions = freezed, }) { return _then(_value.copyWith( maxNumResults: freezed == maxNumResults ? _value.maxNumResults : maxNumResults // ignore: cast_nullable_to_non_nullable as int?, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as FileSearchRankingOptions?, ) as $Val); } + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions { + if (_value.rankingOptions == null) { + return null; + } + + return $FileSearchRankingOptionsCopyWith<$Res>(_value.rankingOptions!, + (value) { + return _then(_value.copyWith(rankingOptions: value) as $Val); + }); + } } /// @nodoc @@ -58054,7 +59272,12 @@ abstract class _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> @useResult $Res call( {@JsonKey(name: 'max_num_results', includeIfNull: false) - int? maxNumResults}); + int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions}); + + @override + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; } /// @nodoc @@ -58073,12 +59296,17 @@ class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> @override $Res call({ Object? maxNumResults = freezed, + Object? rankingOptions = freezed, }) { return _then(_$AssistantToolsFileSearchFileSearchImpl( maxNumResults: freezed == maxNumResults ? _value.maxNumResults : maxNumResults // ignore: cast_nullable_to_non_nullable as int?, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as FileSearchRankingOptions?, )); } } @@ -58089,7 +59317,9 @@ class _$AssistantToolsFileSearchFileSearchImpl extends _AssistantToolsFileSearchFileSearch { const _$AssistantToolsFileSearchFileSearchImpl( {@JsonKey(name: 'max_num_results', includeIfNull: false) - this.maxNumResults}) + this.maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + this.rankingOptions}) : super._(); factory _$AssistantToolsFileSearchFileSearchImpl.fromJson( @@ -58099,15 +59329,24 @@ class _$AssistantToolsFileSearchFileSearchImpl /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) final int? maxNumResults; + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + final FileSearchRankingOptions? rankingOptions; + @override String toString() { - return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults)'; + return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults, rankingOptions: $rankingOptions)'; } @override @@ -58116,12 +59355,14 @@ class _$AssistantToolsFileSearchFileSearchImpl (other.runtimeType == runtimeType && other is _$AssistantToolsFileSearchFileSearchImpl && (identical(other.maxNumResults, maxNumResults) || - other.maxNumResults == maxNumResults)); + other.maxNumResults == maxNumResults) && + (identical(other.rankingOptions, rankingOptions) || + other.rankingOptions == rankingOptions)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, maxNumResults); + int get hashCode => Object.hash(runtimeType, maxNumResults, rankingOptions); /// Create a copy of AssistantToolsFileSearchFileSearch /// with the given fields replaced by the non-null parameter values. @@ -58144,8 +59385,11 @@ class _$AssistantToolsFileSearchFileSearchImpl abstract class _AssistantToolsFileSearchFileSearch extends AssistantToolsFileSearchFileSearch { const factory _AssistantToolsFileSearchFileSearch( - {@JsonKey(name: 'max_num_results', includeIfNull: false) - final int? maxNumResults}) = _$AssistantToolsFileSearchFileSearchImpl; + {@JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + final FileSearchRankingOptions? rankingOptions}) = + _$AssistantToolsFileSearchFileSearchImpl; const _AssistantToolsFileSearchFileSearch._() : super._(); factory _AssistantToolsFileSearchFileSearch.fromJson( @@ -58155,12 +59399,21 @@ abstract class _AssistantToolsFileSearchFileSearch /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. /// - /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search - /// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @override @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; + /// The ranking options for the file search. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? get rankingOptions; + /// Create a copy of AssistantToolsFileSearchFileSearch /// with the given fields replaced by the non-null parameter values. @override @@ -63844,8 +65097,11 @@ mixin _$RunStepDetailsToolCalls { @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -63860,8 +65116,11 @@ mixin _$RunStepDetailsToolCalls { @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -63876,8 +65135,11 @@ mixin _$RunStepDetailsToolCalls { @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64096,8 +65358,11 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -64115,8 +65380,11 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64134,8 +65402,11 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64241,7 +65512,10 @@ abstract class _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> $Res call( {String id, String type, - @JsonKey(name: 'file_search') Map fileSearch}); + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch}); + + $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch; } /// @nodoc @@ -64273,11 +65547,22 @@ class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> : type // ignore: cast_nullable_to_non_nullable as String, fileSearch: null == fileSearch - ? _value._fileSearch + ? _value.fileSearch : fileSearch // ignore: cast_nullable_to_non_nullable - as Map, + as RunStepDetailsToolCallsFileSearch, )); } + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch { + return $RunStepDetailsToolCallsFileSearchCopyWith<$Res>(_value.fileSearch, + (value) { + return _then(_value.copyWith(fileSearch: value)); + }); + } } /// @nodoc @@ -64287,10 +65572,8 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl const _$RunStepDetailsToolCallsFileSearchObjectImpl( {required this.id, required this.type, - @JsonKey(name: 'file_search') - required final Map fileSearch}) - : _fileSearch = fileSearch, - super._(); + @JsonKey(name: 'file_search') required this.fileSearch}) + : super._(); factory _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson( Map json) => @@ -64304,17 +65587,10 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override final String type; - /// For now, this is always going to be an empty object. - final Map _fileSearch; - - /// For now, this is always going to be an empty object. + /// The definition of the file search that was called. @override @JsonKey(name: 'file_search') - Map get fileSearch { - if (_fileSearch is EqualUnmodifiableMapView) return _fileSearch; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_fileSearch); - } + final RunStepDetailsToolCallsFileSearch fileSearch; @override String toString() { @@ -64328,14 +65604,13 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl other is _$RunStepDetailsToolCallsFileSearchObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._fileSearch, _fileSearch)); + (identical(other.fileSearch, fileSearch) || + other.fileSearch == fileSearch)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, id, type, const DeepCollectionEquality().hash(_fileSearch)); + int get hashCode => Object.hash(runtimeType, id, type, fileSearch); /// Create a copy of RunStepDetailsToolCalls /// with the given fields replaced by the non-null parameter values. @@ -64357,8 +65632,11 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -64376,8 +65654,11 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64395,8 +65676,11 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64461,7 +65745,7 @@ abstract class RunStepDetailsToolCallsFileSearchObject {required final String id, required final String type, @JsonKey(name: 'file_search') - required final Map fileSearch}) = + required final RunStepDetailsToolCallsFileSearch fileSearch}) = _$RunStepDetailsToolCallsFileSearchObjectImpl; const RunStepDetailsToolCallsFileSearchObject._() : super._(); @@ -64477,9 +65761,9 @@ abstract class RunStepDetailsToolCallsFileSearchObject @override String get type; - /// For now, this is always going to be an empty object. + /// The definition of the file search that was called. @JsonKey(name: 'file_search') - Map get fileSearch; + RunStepDetailsToolCallsFileSearch get fileSearch; /// Create a copy of RunStepDetailsToolCalls /// with the given fields replaced by the non-null parameter values. @@ -64614,8 +65898,11 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch) + required TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) fileSearch, required TResult Function( String id, String type, RunStepDetailsToolCallsFunction function) @@ -64633,8 +65920,11 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult? Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult? Function( String id, String type, RunStepDetailsToolCallsFunction function)? @@ -64652,8 +65942,11 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @JsonKey(name: 'code_interpreter') RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, - @JsonKey(name: 'file_search') Map fileSearch)? + TResult Function( + String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? fileSearch, TResult Function( String id, String type, RunStepDetailsToolCallsFunction function)? diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index d03e9a18..3ffb5c36 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -2516,6 +2516,34 @@ Map _$$ListAssistantsResponseImplToJson( 'has_more': instance.hasMore, }; +_$FileSearchRankingOptionsImpl _$$FileSearchRankingOptionsImplFromJson( + Map json) => + _$FileSearchRankingOptionsImpl( + ranker: $enumDecodeNullable(_$FileSearchRankerEnumMap, json['ranker'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + scoreThreshold: (json['score_threshold'] as num?)?.toDouble(), + ); + +Map _$$FileSearchRankingOptionsImplToJson( + _$FileSearchRankingOptionsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('ranker', _$FileSearchRankerEnumMap[instance.ranker]); + writeNotNull('score_threshold', instance.scoreThreshold); + return val; +} + +const _$FileSearchRankerEnumMap = { + FileSearchRanker.auto: 'auto', + FileSearchRanker.default20240821: 'default_2024_08_21', +}; + _$AssistantsNamedToolChoiceImpl _$$AssistantsNamedToolChoiceImplFromJson( Map json) => _$AssistantsNamedToolChoiceImpl( @@ -4280,6 +4308,109 @@ Map return val; } +_$RunStepDetailsToolCallsFileSearchImpl + _$$RunStepDetailsToolCallsFileSearchImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchImpl( + rankingOptions: json['ranking_options'] == null + ? null + : RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + json['ranking_options'] as Map), + results: (json['results'] as List?) + ?.map((e) => + RunStepDetailsToolCallsFileSearchResultObject.fromJson( + e as Map)) + .toList(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchImplToJson( + _$RunStepDetailsToolCallsFileSearchImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('ranking_options', instance.rankingOptions?.toJson()); + writeNotNull('results', instance.results?.map((e) => e.toJson()).toList()); + return val; +} + +_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + ranker: $enumDecode(_$FileSearchRankerEnumMap, json['ranker']), + scoreThreshold: (json['score_threshold'] as num).toDouble(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl instance) => + { + 'ranker': _$FileSearchRankerEnumMap[instance.ranker]!, + 'score_threshold': instance.scoreThreshold, + }; + +_$RunStepDetailsToolCallsFileSearchResultObjectImpl + _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultObjectImpl( + fileId: json['file_id'] as String, + fileName: json['file_name'] as String, + score: (json['score'] as num).toDouble(), + content: (json['content'] as List?) + ?.map((e) => + RunStepDetailsToolCallsFileSearchResultContent.fromJson( + e as Map)) + .toList(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl instance) { + final val = { + 'file_id': instance.fileId, + 'file_name': instance.fileName, + 'score': instance.score, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} + +_$RunStepDetailsToolCallsFileSearchResultContentImpl + _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultContentImpl( + type: json['type'] as String? ?? 'text', + text: json['text'] as String?, + ); + +Map + _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( + _$RunStepDetailsToolCallsFileSearchResultContentImpl instance) { + final val = { + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('text', instance.text); + return val; +} + _$RunStepCompletionUsageImpl _$$RunStepCompletionUsageImplFromJson( Map json) => _$RunStepCompletionUsageImpl( @@ -5302,6 +5433,10 @@ _$AssistantToolsFileSearchFileSearchImpl Map json) => _$AssistantToolsFileSearchFileSearchImpl( maxNumResults: (json['max_num_results'] as num?)?.toInt(), + rankingOptions: json['ranking_options'] == null + ? null + : FileSearchRankingOptions.fromJson( + json['ranking_options'] as Map), ); Map _$$AssistantToolsFileSearchFileSearchImplToJson( @@ -5315,6 +5450,7 @@ Map _$$AssistantToolsFileSearchFileSearchImplToJson( } writeNotNull('max_num_results', instance.maxNumResults); + writeNotNull('ranking_options', instance.rankingOptions?.toJson()); return val; } @@ -5754,7 +5890,8 @@ _$RunStepDetailsToolCallsFileSearchObjectImpl _$RunStepDetailsToolCallsFileSearchObjectImpl( id: json['id'] as String, type: json['type'] as String, - fileSearch: json['file_search'] as Map, + fileSearch: RunStepDetailsToolCallsFileSearch.fromJson( + json['file_search'] as Map), ); Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( @@ -5762,7 +5899,7 @@ Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( { 'id': instance.id, 'type': instance.type, - 'file_search': instance.fileSearch, + 'file_search': instance.fileSearch.toJson(), }; _$RunStepDetailsToolCallsFunctionObjectImpl diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 9c474cec..4fc465f5 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -796,6 +796,16 @@ paths: schema: type: string description: The ID of the thread to run. + - name: include + in: query + description: &include_param_description | + A list of additional fields to include in the response. Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + schema: + type: string requestBody: required: true content: @@ -968,6 +978,11 @@ paths: description: *pagination_before_param_description schema: type: string + - name: include + in: query + description: *include_param_description + schema: + type: string responses: "200": description: OK @@ -1000,6 +1015,11 @@ paths: schema: type: string description: The ID of the run step to retrieve. + - name: include + in: query + description: *include_param_description + schema: + type: string responses: "200": description: OK @@ -3879,10 +3899,32 @@ components: The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - Note that the file search tool may output fewer than `max_num_results` results. See the [file search - tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + Note that the file search tool may output fewer than `max_num_results` results. See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" required: - type + FileSearchRankingOptions: + type: object + description: | + The ranking options for the file search. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + properties: + ranker: + $ref: "#/components/schemas/FileSearchRanker" + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + FileSearchRanker: + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] AssistantToolsFunction: type: object description: Function tool @@ -5653,13 +5695,74 @@ components: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. file_search: - type: object - description: For now, this is always going to be an empty object. - additionalProperties: true + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearch" required: - id - type - file_search + RunStepDetailsToolCallsFileSearch: + type: object + description: The definition of the file search that was called. + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + type: object + description: The ranking options for the file search. + properties: + ranker: + $ref: "#/components/schemas/FileSearchRanker" + score_threshold: + type: number + description: | + The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + RunStepDetailsToolCallsFileSearchResultObject: + type: object + description: A result instance of the file search. + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: | + The content of the result that was found. The content is only included if requested via the include + query parameter. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultContent" + required: + - file_id + - file_name + - score + RunStepDetailsToolCallsFileSearchResultContent: + type: object + description: The content of the result that was found. + properties: + type: + type: string + description: The type of the content. + default: text + text: + type: string + description: The text content of the file. RunStepDeltaStepDetailsToolCallsFileSearchObject: type: object description: File search tool call @@ -6298,7 +6401,7 @@ components: - data RunStepStreamEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" @@ -6309,7 +6412,7 @@ components: - data RunStepStreamDeltaEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 02653404..de7cd98a 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -4682,6 +4682,17 @@ paths: schema: type: string description: The ID of the thread to run. + - name: include[] + in: query + description: &include_param_description | + A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] requestBody: required: true content: @@ -5642,6 +5653,14 @@ paths: description: *pagination_before_param_description schema: type: string + - name: include[] + in: query + description: *include_param_description + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5653,7 +5672,7 @@ paths: name: List run steps group: threads beta: true - returns: A list of [run step](/docs/api-reference/runs/step-object) objects. + returns: A list of [run step](/docs/api-reference/run-steps/step-object) objects. examples: request: curl: | @@ -5745,6 +5764,14 @@ paths: schema: type: string description: The ID of the run step to retrieve. + - name: include[] + in: query + description: *include_param_description + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5756,7 +5783,7 @@ paths: name: Retrieve run step group: threads beta: true - returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. + returns: The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID. examples: request: curl: | @@ -10803,10 +10830,30 @@ components: description: | The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. - Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" required: - type + FileSearchRankingOptions: + title: File search tool call ranking options + type: object + description: | + The ranking options for the file search. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + properties: + ranker: + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + AssistantToolsFileSearchTypeOnly: type: object title: FileSearch tool @@ -12769,11 +12816,72 @@ components: type: object description: For now, this is always going to be an empty object. x-oaiTypeLabel: map + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" required: - id - type - file_search + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + type: object + description: The ranking options for the file search. + properties: + ranker: + type: string + description: The ranker used for the file search. + enum: [ "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + type: object + description: A result instance of the file search. + x-oaiTypeLabel: map + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only included if requested via the include query parameter. + items: + type: object + properties: + type: + type: string + description: The type of the content. + enum: [ "text" ] + text: + type: string + description: The text content of the file. + required: + - file_id + - file_name + - score + RunStepDeltaStepDetailsToolCallsFileSearchObject: title: File search tool call type: object @@ -13560,9 +13668,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13573,9 +13681,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13586,7 +13694,7 @@ components: required: - event - data - description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. x-oaiMeta: dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - type: object @@ -13599,9 +13707,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13612,9 +13720,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13625,9 +13733,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - type: object properties: event: @@ -13638,9 +13746,9 @@ components: required: - event - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" MessageStreamEvent: oneOf: diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index 91e131b4..f98e6d9a 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: openai_dart -description: Dart client for the OpenAI API. Supports completions (GPT-3.5 Turbo), chat (GPT-4o, etc.), embeddings (Embedding v3), images (DALL·E 3), assistants v2 (threads, runs, vector stores, etc.) batch, fine-tuning, etc. +description: Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. version: 0.4.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart From 5df8554857a90c5e02166bfcf845ca812da77dee Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 31 Aug 2024 11:29:32 +0200 Subject: [PATCH 111/251] feat: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings (#544) --- .../lib/src/embeddings/google_ai/google_ai_embeddings.dart | 5 ++--- .../test/embeddings/google_ai/google_ai_embeddings_test.dart | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index 385f1088..93ec105a 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -138,7 +138,6 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { /// The number of dimensions the resulting output embeddings should have. /// Only supported in `text-embedding-004` and later models. - /// TODO https://github.com/google-gemini/generative-ai-dart/pull/149 int? dimensions; /// The maximum number of documents to embed in a single request. @@ -168,7 +167,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { Content.text(doc.pageContent), taskType: TaskType.retrievalDocument, title: doc.metadata[docTitleKey], - // outputDimensionality: dimensions, TODO + outputDimensionality: dimensions, ); }).toList(growable: false), ); @@ -187,7 +186,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { final data = await _googleAiClient.embedContent( Content.text(query), taskType: TaskType.retrievalQuery, - // outputDimensionality: dimensions, TODO + outputDimensionality: dimensions, ); return data.embedding.values; } diff --git a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart index bc942e51..a2f88906 100644 --- a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart +++ b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart @@ -49,8 +49,7 @@ void main() { expect(res[1].length, 768); }); - // TODO https://github.com/google-gemini/generative-ai-dart/pull/149 - test('Test shortening embeddings', skip: true, () async { + test('Test shortening embeddings', () async { embeddings.dimensions = 256; final res = await embeddings.embedQuery('Hello world'); expect(res.length, 256); From 5cdddb2da9ece1aeedd56fc54180537e529b5249 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 6 Sep 2024 23:47:17 +0200 Subject: [PATCH 112/251] docs: Update READMEs (#545) --- packages/langchain/README.md | 18 +- .../lib/langchain_firebase.dart | 2 +- packages/langchain_firebase/pubspec.yaml | 2 +- .../lib/langchain_google.dart | 2 +- packages/langchain_google/pubspec.yaml | 2 +- .../lib/langchain_openai.dart | 2 +- packages/langchain_openai/pubspec.yaml | 2 +- .../test/chat_models/github_models_test.dart | 181 ++++++++++++++++++ packages/openai_dart/README.md | 15 +- 9 files changed, 203 insertions(+), 23 deletions(-) create mode 100644 packages/langchain_openai/test/chat_models/github_models_test.dart diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 2dfacb97..e93bfdd1 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -104,15 +104,15 @@ The following integrations are available in LangChain.dart: ### Chat Models -| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | -|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | -| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | -| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | -| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | -| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | -| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | -| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | +| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | +|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | +| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | +| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | +| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | +| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | +| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | ### LLMs diff --git a/packages/langchain_firebase/lib/langchain_firebase.dart b/packages/langchain_firebase/lib/langchain_firebase.dart index 0b76e587..45448a85 100644 --- a/packages/langchain_firebase/lib/langchain_firebase.dart +++ b/packages/langchain_firebase/lib/langchain_firebase.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). +/// LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index d0b53ac3..e6a64b2c 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_firebase -description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). +description: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). version: 0.2.1+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase diff --git a/packages/langchain_google/lib/langchain_google.dart b/packages/langchain_google/lib/langchain_google.dart index 371e45ad..a4dd4908 100644 --- a/packages/langchain_google/lib/langchain_google.dart +++ b/packages/langchain_google/lib/langchain_google.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). +/// LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 1821b7c1..7f318b39 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_google -description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). +description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). version: 0.6.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google diff --git a/packages/langchain_openai/lib/langchain_openai.dart b/packages/langchain_openai/lib/langchain_openai.dart index d2730a6b..77e92aa5 100644 --- a/packages/langchain_openai/lib/langchain_openai.dart +++ b/packages/langchain_openai/lib/langchain_openai.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). +/// LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). library; export 'package:openai_dart/openai_dart.dart' show OpenAIClientException; diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index bf0db409..c367ac51 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_openai -description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). +description: LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai diff --git a/packages/langchain_openai/test/chat_models/github_models_test.dart b/packages/langchain_openai/test/chat_models/github_models_test.dart new file mode 100644 index 00000000..7eac34dd --- /dev/null +++ b/packages/langchain_openai/test/chat_models/github_models_test.dart @@ -0,0 +1,181 @@ +// ignore_for_file: avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:test/test.dart'; + +void main() { + group('GitHub Models tests', () { + late ChatOpenAI chatModel; + + setUp(() async { + chatModel = ChatOpenAI( + apiKey: Platform.environment['GITHUB_TOKEN'], + baseUrl: 'https://models.inference.ai.azure.com', + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test invoke GitHub Models API with different models', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Mistral-large', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions( + model: model, + temperature: 0, + ), + ); + + expect(res.id, isNotEmpty); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + expect(res.metadata, isNotEmpty, reason: model); + expect(res.metadata['created'], greaterThan(0), reason: model); + expect(res.metadata['model'], isNotEmpty, reason: model); + } + }); + + test('Test stream GitHub Models API with different models', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions( + model: model, + temperature: 0, + ), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content.replaceAll(RegExp(r'[\s\n]'), ''); + count++; + } + expect(count, greaterThan(1), reason: model); + expect(content, contains('123456789'), reason: model); + } + }); + + test('Test countTokens', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Mistral-large', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + const text = 'Hello, how are you?'; + + final numTokens = await chatModel.countTokens( + PromptValue.chat([ChatMessage.humanText(text)]), + options: ChatOpenAIOptions(model: model), + ); + expect(numTokens, 13, reason: model); + } + }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); + }); +} diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index 12f5b51f..68a26356 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. +- It can be used to consume OpenAI-compatible APIs like [GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. **Supported endpoints:** @@ -900,7 +900,6 @@ final res = await client.createThreadRun( You can also use Structured Outputs to ensure that the model-generated responses adhere to a specific JSON schema: ```dart - final res = await client.createThreadRun( threadId: threadId, request: CreateRunRequest( @@ -1198,21 +1197,21 @@ final client = OpenAIClient( This client can be used to consume APIs that are compatible with the OpenAI API spec. -[TogetherAI](https://www.together.ai/): +[GitHub Models](https://github.com/marketplace/models): ```dart final client = OpenAIClient( - baseUrl: 'https://api.together.xyz/v1', - headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, + baseUrl: 'https://models.inference.ai.azure.com', + headers: { 'api-key': 'YOUR_GITHUB_TOKEN' }, ); ``` -[Anyscale](https://www.anyscale.com/): +[TogetherAI](https://www.together.ai/): ```dart final client = OpenAIClient( - baseUrl: 'https://api.endpoints.anyscale.com/v1', - headers: { 'api-key': 'YOUR_ANYSCALE_API_KEY' }, + baseUrl: 'https://api.together.xyz/v1', + headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, ); ``` From 67b3d750206a053b382c7ac3ea96196a9c67c412 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 6 Sep 2024 23:53:43 +0200 Subject: [PATCH 113/251] build: Update gcloud and googleapis dependencies (#546) --- examples/docs_examples/pubspec.lock | 8 ++++---- examples/hello_world_flutter/pubspec.lock | 8 ++++---- examples/vertex_ai_matching_engine_setup/pubspec.lock | 8 ++++---- examples/vertex_ai_matching_engine_setup/pubspec.yaml | 2 +- melos.yaml | 4 ++-- packages/langchain_google/pubspec.yaml | 4 ++-- packages/vertex_ai/pubspec.yaml | 2 +- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 78752c5c..a7a05f06 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -147,10 +147,10 @@ packages: dependency: transitive description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_generative_ai: dependency: transitive description: @@ -171,10 +171,10 @@ packages: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 7eb4a4d8..1fbbc8d3 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -130,10 +130,10 @@ packages: dependency: transitive description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_generative_ai: dependency: transitive description: @@ -154,10 +154,10 @@ packages: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: transitive description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index c4bd2136..752608b4 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -45,10 +45,10 @@ packages: dependency: "direct main" description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_identity_services_web: dependency: transitive description: @@ -61,10 +61,10 @@ packages: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: "direct main" description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index e42414a8..4519fdbb 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - gcloud: ^0.8.12 + gcloud: ^0.8.13 googleapis_auth: ^1.6.0 http: ^1.2.2 vertex_ai: ^0.1.0+1 diff --git a/melos.yaml b/melos.yaml index 3a1c0092..b1835a9d 100644 --- a/melos.yaml +++ b/melos.yaml @@ -40,9 +40,9 @@ command: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 freezed_annotation: ^2.4.2 - gcloud: ^0.8.12 + gcloud: ^0.8.13 google_generative_ai: 0.4.4 - googleapis: ^12.0.0 + googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 js: ^0.7.1 diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 7f318b39..502e6b37 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -19,9 +19,9 @@ environment: dependencies: collection: ^1.18.0 fetch_client: ^1.1.2 - gcloud: ^0.8.12 + gcloud: ^0.8.13 google_generative_ai: 0.4.4 - googleapis: ^12.0.0 + googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 langchain_core: 0.3.5 diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index c612870d..5d7612aa 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -18,7 +18,7 @@ environment: dependencies: collection: ^1.18.0 - googleapis: ^12.0.0 + googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 meta: ^1.11.0 From 72a8faa62ac2554e32485d1390832a62739cb790 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 21 Sep 2024 00:51:10 +0200 Subject: [PATCH 114/251] docs: Bootstrap docusaurus docs (#548) Co-authored-by: Douglas Bett --- .github/workflows/firebase-hosting-merge.yml | 21 + .../firebase-hosting-pull-request.yml | 26 + .github/workflows/test.yaml | 3 + docs_v2/.firebaserc | 5 + docs_v2/.gitignore | 21 + docs_v2/README.md | 41 + docs_v2/babel.config.js | 3 + docs_v2/docs/01-intro.md | 171 + docs_v2/docs/02-tutorials/01-llm_chain.md | 7 + docs_v2/docs/02-tutorials/index.mdx | 28 + docs_v2/docs/03-how_to/01-installation.md | 77 + .../docs/03-how_to/02-structured_output.md | 14 + docs_v2/docs/03-how_to/index.mdx | 149 + docs_v2/docs/04-concepts.mdx | 468 + docs_v2/docs/05-integrations/anthropic.md | 145 + docs_v2/docs/05-integrations/anyscale.md | 84 + .../05-integrations/firebase_vertex_ai.md | 190 + docs_v2/docs/05-integrations/gcp_vertex_ai.md | 116 + docs_v2/docs/05-integrations/googleai.md | 149 + docs_v2/docs/05-integrations/index.mdx | 56 + docs_v2/docs/05-integrations/mistralai.md | 76 + docs_v2/docs/05-integrations/ollama.md | 462 + docs_v2/docs/05-integrations/open_router.md | 157 + docs_v2/docs/05-integrations/openai.md | 372 + docs_v2/docs/05-integrations/prem.md | 24 + docs_v2/docs/05-integrations/together_ai.md | 84 + docs_v2/docs/05-integrations/tools/index.mdx | 5 + .../05-integrations/tools/tavily_search.md | 13 + docs_v2/docusaurus.config.js | 130 + docs_v2/firebase.json | 16 + docs_v2/package-lock.json | 14683 ++++++++++++++++ docs_v2/package.json | 44 + docs_v2/sidebars.js | 30 + .../src/components/HomepageFeatures/index.js | 64 + .../HomepageFeatures/styles.module.css | 11 + docs_v2/src/css/custom.css | 30 + docs_v2/src/pages/index.js | 7 + docs_v2/src/pages/index.module.css | 23 + docs_v2/src/pages/markdown-page.md | 7 + docs_v2/static/.nojekyll | 0 docs_v2/static/img/favicon.ico | Bin 0 -> 15406 bytes docs_v2/static/img/langchain.dart.png | Bin 0 -> 156015 bytes docs_v2/static/img/logo.svg | 1 + 43 files changed, 18013 insertions(+) create mode 100644 .github/workflows/firebase-hosting-merge.yml create mode 100644 .github/workflows/firebase-hosting-pull-request.yml create mode 100644 docs_v2/.firebaserc create mode 100644 docs_v2/.gitignore create mode 100644 docs_v2/README.md create mode 100644 docs_v2/babel.config.js create mode 100644 docs_v2/docs/01-intro.md create mode 100644 docs_v2/docs/02-tutorials/01-llm_chain.md create mode 100644 docs_v2/docs/02-tutorials/index.mdx create mode 100644 docs_v2/docs/03-how_to/01-installation.md create mode 100644 docs_v2/docs/03-how_to/02-structured_output.md create mode 100644 docs_v2/docs/03-how_to/index.mdx create mode 100644 docs_v2/docs/04-concepts.mdx create mode 100644 docs_v2/docs/05-integrations/anthropic.md create mode 100644 docs_v2/docs/05-integrations/anyscale.md create mode 100644 docs_v2/docs/05-integrations/firebase_vertex_ai.md create mode 100644 docs_v2/docs/05-integrations/gcp_vertex_ai.md create mode 100644 docs_v2/docs/05-integrations/googleai.md create mode 100644 docs_v2/docs/05-integrations/index.mdx create mode 100644 docs_v2/docs/05-integrations/mistralai.md create mode 100644 docs_v2/docs/05-integrations/ollama.md create mode 100644 docs_v2/docs/05-integrations/open_router.md create mode 100644 docs_v2/docs/05-integrations/openai.md create mode 100644 docs_v2/docs/05-integrations/prem.md create mode 100644 docs_v2/docs/05-integrations/together_ai.md create mode 100644 docs_v2/docs/05-integrations/tools/index.mdx create mode 100644 docs_v2/docs/05-integrations/tools/tavily_search.md create mode 100644 docs_v2/docusaurus.config.js create mode 100644 docs_v2/firebase.json create mode 100644 docs_v2/package-lock.json create mode 100644 docs_v2/package.json create mode 100644 docs_v2/sidebars.js create mode 100644 docs_v2/src/components/HomepageFeatures/index.js create mode 100644 docs_v2/src/components/HomepageFeatures/styles.module.css create mode 100644 docs_v2/src/css/custom.css create mode 100644 docs_v2/src/pages/index.js create mode 100644 docs_v2/src/pages/index.module.css create mode 100644 docs_v2/src/pages/markdown-page.md create mode 100644 docs_v2/static/.nojekyll create mode 100644 docs_v2/static/img/favicon.ico create mode 100644 docs_v2/static/img/langchain.dart.png create mode 100644 docs_v2/static/img/logo.svg diff --git a/.github/workflows/firebase-hosting-merge.yml b/.github/workflows/firebase-hosting-merge.yml new file mode 100644 index 00000000..262b5a52 --- /dev/null +++ b/.github/workflows/firebase-hosting-merge.yml @@ -0,0 +1,21 @@ +name: Deploy docs_v2 + +on: + push: + branches: + - main + +jobs: + build_and_deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - run: npm ci && npm run build + working-directory: docs_v2 + - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} + channelId: live + projectId: langchain-dart + entryPoint: docs_v2 diff --git a/.github/workflows/firebase-hosting-pull-request.yml b/.github/workflows/firebase-hosting-pull-request.yml new file mode 100644 index 00000000..2a2d9416 --- /dev/null +++ b/.github/workflows/firebase-hosting-pull-request.yml @@ -0,0 +1,26 @@ +name: Deploy docs_v2 on PR + +on: + pull_request: + paths: + - 'docs_v2/**' + +permissions: + checks: write + contents: read + pull-requests: write + +jobs: + build_and_preview: + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - run: npm ci && npm run build + working-directory: docs_v2 + - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} + projectId: langchain-dart + entryPoint: docs_v2 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b77c2ed8..06a8deb6 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -4,6 +4,9 @@ on: # pull_request_target is dangerous! Review external PRs code before approving to run the workflow # We need this to be able to access the secrets required by the workflow pull_request_target: + paths-ignore: + - 'docs/**' + - 'docs_v2/**' workflow_dispatch: # Cancel currently running workflow when a new one is triggered diff --git a/docs_v2/.firebaserc b/docs_v2/.firebaserc new file mode 100644 index 00000000..15e3b72b --- /dev/null +++ b/docs_v2/.firebaserc @@ -0,0 +1,5 @@ +{ + "projects": { + "default": "langchain-dart" + } +} diff --git a/docs_v2/.gitignore b/docs_v2/.gitignore new file mode 100644 index 00000000..0f21febf --- /dev/null +++ b/docs_v2/.gitignore @@ -0,0 +1,21 @@ +# Dependencies +/node_modules + +# Production +/build + +# Generated files +.docusaurus +.cache-loader + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.firebase diff --git a/docs_v2/README.md b/docs_v2/README.md new file mode 100644 index 00000000..0c6c2c27 --- /dev/null +++ b/docs_v2/README.md @@ -0,0 +1,41 @@ +# Website + +This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. + +### Installation + +``` +$ yarn +``` + +### Local Development + +``` +$ yarn start +``` + +This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. + +### Build + +``` +$ yarn build +``` + +This command generates static content into the `build` directory and can be served using any static contents hosting service. + +### Deployment + +Using SSH: + +``` +$ USE_SSH=true yarn deploy +``` + +Not using SSH: + +``` +$ GIT_USER= yarn deploy +``` + +If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docs_v2/babel.config.js b/docs_v2/babel.config.js new file mode 100644 index 00000000..e00595da --- /dev/null +++ b/docs_v2/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; diff --git a/docs_v2/docs/01-intro.md b/docs_v2/docs/01-intro.md new file mode 100644 index 00000000..75428706 --- /dev/null +++ b/docs_v2/docs/01-intro.md @@ -0,0 +1,171 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + +# Introduction + +Build Dart/Flutter applications powered by Large Language Models. + +## What is LangChain.dart? + +LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). LangChain is a framework for developing applications that are powered by large language models (LLMs). + +It comes with a set of components that make working with LLMs easy. +The components can be grouped into a few core modules: + +![LangChain.dart](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain.dart.png) + +- 📃 **Model I/O:** LangChain offers a unified API for interacting with various LLM providers (e.g. OpenAI, Google, Mistral, Ollama, etc.), allowing developers to switch between them with ease. Additionally, it provides tools for managing model inputs (prompt templates and example selectors) and parsing the resulting model outputs (output parsers). +- 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). +- 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. + +The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). + +## Motivation + +Large Language Models (LLMs) have revolutionized Natural Language Processing (NLP), serving as essential components in a wide range of applications, such as question-answering, summarization, translation, and text generation. + +The adoption of LLMs is creating a new tech stack in its wake. However, emerging libraries and tools are predominantly being developed for the Python and JavaScript ecosystems. As a result, the number of applications leveraging LLMs in these ecosystems has grown exponentially. + +In contrast, the Dart / Flutter ecosystem has not experienced similar growth, which can likely be attributed to the scarcity of Dart and Flutter libraries that streamline the complexities associated with working with LLMs. + +LangChain.dart aims to fill this gap by abstracting the intricacies of working with LLMs in Dart and Flutter, enabling developers to harness their combined potential effectively. + +## Packages + +LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: + +### [`langchain_core`](https://pub.dev/packages/langchain_core) + +Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + +> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with LangChain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + + +## Getting started + +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): + +```yaml +dependencies: + langchain: {version} + langchain_community: {version} + langchain_openai: {version} + langchain_google: {version} + ... +``` + +The most basic building block of LangChain.dart is calling an LLM on some prompt. LangChain.dart provides a unified interface for calling different LLMs. For example, we can use `ChatGoogleGenerativeAI` to call Google's Gemini model: + +```dart +final model = ChatGoogleGenerativeAI(apiKey: googleApiKey); +final prompt = PromptValue.string('Hello world!'); +final result = await model.invoke(prompt); +// Hello everyone! I'm new here and excited to be part of this community. +``` + +But the power of LangChain.dart comes from chaining together multiple components to implement complex use cases. For example, a RAG (Retrieval-Augmented Generation) pipeline that would accept a user query, retrieve relevant documents from a vector store, format them using prompt templates, invoke the model, and parse the output: + +```dart +// 1. Create a vector store and add documents to it +final vectorStore = MemoryVectorStore( + embeddings: OpenAIEmbeddings(apiKey: openaiApiKey), +); +await vectorStore.addDocuments( + documents: [ + Document(pageContent: 'LangChain was created by Harrison'), + Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), + ], +); + +// 2. Define the retrieval chain +final retriever = vectorStore.asRetriever(); +final setupAndRetrieval = Runnable.fromMap({ + 'context': retriever.pipe( + Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\n')), + ), + 'question': Runnable.passthrough(), +}); + +// 3. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), + (ChatMessageType.human, '{question}'), +]); + +// 4. Define the final chain +final model = ChatOpenAI(apiKey: openaiApiKey); +const outputParser = StringOutputParser(); +final chain = setupAndRetrieval + .pipe(promptTemplate) + .pipe(model) + .pipe(outputParser); + +// 5. Run the pipeline +final res = await chain.invoke('Who created LangChain.dart?'); +print(res); +// David created LangChain.dart +``` + +## Documentation + +- [LangChain.dart documentation](https://langchaindart.dev) +- [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) +- [LangChain.dart blog](https://blog.langchaindart.dev) +- [Project board](https://github.com/users/davidmigloz/projects/2/views/1) + +## Community + +Stay up-to-date on the latest news and updates on the field, have great discussions, and get help in the official [LangChain.dart Discord server](https://discord.gg/x4qbhqecVR). + +[![LangChain.dart Discord server](https://invidget.switchblade.xyz/x4qbhqecVR?theme=light)](https://discord.gg/x4qbhqecVR) + +## Contribute + +| 📢 **Call for Collaborators** 📢 | +|-------------------------------------------------------------------------| +| We are looking for collaborators to join the core group of maintainers. | + +New contributors welcome! Check out our [Contributors Guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md) for help getting started. + +Join us on [Discord](https://discord.gg/x4qbhqecVR) to meet other maintainers. We'll help you get your first contribution in no time! + +## Related projects + +- [LangChain](https://github.com/langchain-ai/langchain): The original Python LangChain project. +- [LangChain.js](https://github.com/langchain-ai/langchainjs): A JavaScript port of LangChain. +- [LangChain.go](https://github.com/tmc/langchaingo): A Go port of LangChain. +- [LangChain.rb](https://github.com/andreibondarev/langchainrb): A Ruby port of LangChain. + +## Sponsors + +
    + +## License + +LangChain.dart is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/docs_v2/docs/02-tutorials/01-llm_chain.md b/docs_v2/docs/02-tutorials/01-llm_chain.md new file mode 100644 index 00000000..e40bbb77 --- /dev/null +++ b/docs_v2/docs/02-tutorials/01-llm_chain.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + + +# Build a Simple LLM Application with LCEL \ No newline at end of file diff --git a/docs_v2/docs/02-tutorials/index.mdx b/docs_v2/docs/02-tutorials/index.mdx new file mode 100644 index 00000000..82e56f9e --- /dev/null +++ b/docs_v2/docs/02-tutorials/index.mdx @@ -0,0 +1,28 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- +# Tutorials + +New to LangChain or to LLM app development in general? Read this material to quickly get up and running. + +## Basics +- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain) +- [Build a Chatbot](/docs/tutorials/chatbot) +- [Build vector stores and retrievers](/docs/tutorials/retrievers) +- [Build an Agent](/docs/tutorials/agents) + +## Working with external knowledge +- [Build a Retrieval Augmented Generation (RAG) Application](/docs/tutorials/rag) +- [Build a Conversational RAG Application](/docs/tutorials/qa_chat_history) +- [Build a Question/Answering system over SQL data](/docs/tutorials/sql_qa) +- [Build a Query Analysis System](/docs/tutorials/query_analysis) +- [Build a local RAG application](/docs/tutorials/local_rag) +- [Build a Question Answering application over a Graph Database](/docs/tutorials/graph) +- [Build a PDF ingestion and Question/Answering system](/docs/tutorials/pdf_qa/) + +## Specialized tasks +- [Build an Extraction Chain](/docs/tutorials/extraction) +- [Generate synthetic data](/docs/tutorials/data_generation) +- [Classify text into labels](/docs/tutorials/classification) +- [Summarize text](/docs/tutorials/summarization) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/01-installation.md b/docs_v2/docs/03-how_to/01-installation.md new file mode 100644 index 00000000..44604573 --- /dev/null +++ b/docs_v2/docs/03-how_to/01-installation.md @@ -0,0 +1,77 @@ +# Installation +Langchain as a framework consists of a number of packages. They're split into different packages allowing you to choose exactly what pieces of the framework to install and use. + +## Installing essential Langchain.dart packages + +### [`langchain`](https://pub.dev/packages/langchain) +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with Langchain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +```bash +dart pub add langchain +``` + +### [`langchain_core`](https://pub.dev/packages/langchain_core) +This package contains base abstractions of different components and ways to compose them together. +The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. +> Depend on this package to build frameworks on top of Langchain.dart or to interoperate with it. + +To install this package in your Dart or Flutter project +```bash +dart pub add langchain_core +``` + +### [`langchain_community`](https://pub.dev/packages/langchain_community) +Contains third-party integrations and community-contributed components that are not part of the core Langchain.dart API. +> Depend on this package if you want to use any of the integrations or components it provides like CSV,JSON,Text or HTML loaders and more. + +```bash +dart pub add langchain langchain_community +``` + +## Integration packages +Certain integrations like OpenAI and Anthropic have their own packages. Any integrations that require their own package will be documented as such in the Integration docs. + + +Let's say you're using [OpenAI](https://platform.openai.com/), install the `langchain_openai` package. +```bash +dart pub add langchain langchain_community langchain_openai +``` + +Let's say you want Google integration to use (GoogleAI, VertexAI, Gemini etc), install the `langchain_google` package. +```bash +dart pub add langchain langchain_community langchain_google +``` +The following table contains the list of existing Langchain.dart integration packages. + +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +## Documentation + +Detailed documentation for various integrations can be found in the `/docs/05-integration/` directory: + +- [Anthropic](/docs/integrations/anthropic) +- [Anyscale](/docs/integrations/anyscale) +- [Firebase VertexAI](/docs/integrations/firebase_vertex_ai) +- [GCP VertexAI](/docs/integrations/gcp_vertex_ai) +- [GoogleAI](/docs/integrations/googleai) +- [MistralAI](/docs/integrations/mistralai) +- [Ollama](/docs/integrations/ollama) +- [OpenRouter](/docs/integrations/open_router) +- [OpenAI](/docs/integrations/openai) +- [PrEM](/docs/integrations/prem) +- [TogetherAI](/docs/integrations/together_ai) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/02-structured_output.md b/docs_v2/docs/03-how_to/02-structured_output.md new file mode 100644 index 00000000..95983cd6 --- /dev/null +++ b/docs_v2/docs/03-how_to/02-structured_output.md @@ -0,0 +1,14 @@ +--- +sidebar_position: 3 +keywords: [structured output, json, information extraction, with_structured_output] +--- +# How to return structured data from a model + +> This guide assumes familiarity with the following concepts: +> - [Chat models](/docs/concepts/#chat-models) +> - [Function/tool calling](/docs/concepts/#functiontool-calling) + + +It is often useful to have a model return output that matches a specific schema. One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model. + + diff --git a/docs_v2/docs/03-how_to/index.mdx b/docs_v2/docs/03-how_to/index.mdx new file mode 100644 index 00000000..81ea6bc7 --- /dev/null +++ b/docs_v2/docs/03-how_to/index.mdx @@ -0,0 +1,149 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + +# How-to guides + +Here you'll find answers to "How do I...?" types of questions. +These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task. +For conceptual explanations see the [Conceptual guide](/docs/concepts/). +For end-to-end walkthroughs see [Tutorials](/docs/tutorials). +For comprehensive descriptions of every class and function see the [API Reference](https://pub.dev/documentation/langchain/latest/index.html). + + +## Installation + +- [How to: install LangChain packages](/docs/how_to/installation/) + +## Key features +This highlights functionality that is core to using LangChain. + +- [How to: return structured data from a model](/docs/how_to/structured_output/) +- [How to: use a model to call tools](/docs/how_to/tool_calling) +- [How to: stream runnables](/docs/how_to/streaming) +- [How to: debug your LLM apps](/docs/how_to/debugging/) + +[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol. + +[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives. + +- [How to: chain runnables](/docs/how_to/sequence) +- [How to: stream runnables](/docs/how_to/streaming) +- [How to: invoke runnables in parallel](/docs/how_to/parallel/) +- [How to: add default invocation args to runnables](/docs/how_to/binding/) +- [How to: turn any function into a runnable](/docs/how_to/functions) +- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough) +- [How to: configure runnable behavior at runtime](/docs/how_to/configure) +- [How to: add message history (memory) to a chain](/docs/how_to/message_history) +- [How to: route between sub-chains](/docs/how_to/routing) +- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/) +- [How to: inspect runnables](/docs/how_to/inspect) +- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks) +- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains) +- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets) + + +## Components + +These are the core building blocks you can use when building applications. + +### Prompt templates + +[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model. + +- [How to: use few shot examples](/docs/how_to/few_shot_examples) +- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/) +- [How to: partially format prompt templates](/docs/how_to/prompts_partial) +- [How to: compose prompts together](/docs/how_to/prompts_composition) + +### Chat models + +[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message. + +- [How to: do function/tool calling](/docs/how_to/tool_calling) +- [How to: get models to return structured output](/docs/how_to/structured_output) +- [How to: cache model responses](/docs/how_to/chat_model_caching) +- [How to: get log probabilities](/docs/how_to/logprobs) +- [How to: create a custom chat model class](/docs/how_to/custom_chat_model) +- [How to: stream a response back](/docs/how_to/chat_streaming) +- [How to: track token usage](/docs/how_to/chat_token_usage_tracking) +- [How to: track response metadata across providers](/docs/how_to/response_metadata) +- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/) +- [How to: use chat model to call tools](/docs/how_to/tool_calling) +- [How to: stream tool calls](/docs/how_to/tool_streaming) +- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot) +- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific) +- [How to: force a specific tool call](/docs/how_to/tool_choice) +- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/) + +### LLMs + +What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string. + +- [How to: cache model responses](/docs/how_to/llm_caching) +- [How to: create a custom LLM class](/docs/how_to/custom_llm) +- [How to: stream a response back](/docs/how_to/streaming_llm) +- [How to: track token usage](/docs/how_to/llm_token_usage_tracking) +- [How to: work with local LLMs](/docs/how_to/local_llms) + +### Document loaders + +[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources. + +- [How to: load CSV data](/docs/how_to/document_loader_csv) +- [How to: load data from a directory](/docs/how_to/document_loader_directory) +- [How to: load HTML data](/docs/how_to/document_loader_html) +- [How to: load JSON data](/docs/how_to/document_loader_json) +- [How to: load Markdown data](/docs/how_to/document_loader_markdown) +- [How to: load Microsoft Office data](/docs/how_to/document_loader_office_file) +- [How to: load PDF files](/docs/how_to/document_loader_pdf) +- [How to: write a custom document loader](/docs/how_to/document_loader_custom) + +### Text splitters + +[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval. + +- [How to: recursively split text](/docs/how_to/recursive_text_splitter) +- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter) +- [How to: split by HTML sections](/docs/how_to/HTML_section_aware_splitter) +- [How to: split by character](/docs/how_to/character_text_splitter) +- [How to: split code](/docs/how_to/code_splitter) +- [How to: split Markdown by headers](/docs/how_to/markdown_header_metadata_splitter) +- [How to: recursively split JSON](/docs/how_to/recursive_json_splitter) +- [How to: split text into semantic chunks](/docs/how_to/semantic-chunker) +- [How to: split by tokens](/docs/how_to/split_by_token) + +### Vector stores + +[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings. + +- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores) + +### Retrievers + +[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents. + +- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever) +- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever) +- [How to: use contextual compression to compress the data retrieved](/docs/how_to/contextual_compression) +- [How to: write a custom retriever class](/docs/how_to/custom_retriever) +- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever) +- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever) +- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder) +- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector) +- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever) +- [How to: generate metadata filters](/docs/how_to/self_query) +- [How to: create a time-weighted retriever](/docs/how_to/time_weighted_vectorstore) +- [How to: use hybrid vector and keyword retrieval](/docs/how_to/hybrid) + +### Agents + +:::note + +For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation. + +::: + +- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor) +- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent) \ No newline at end of file diff --git a/docs_v2/docs/04-concepts.mdx b/docs_v2/docs/04-concepts.mdx new file mode 100644 index 00000000..fcd2335b --- /dev/null +++ b/docs_v2/docs/04-concepts.mdx @@ -0,0 +1,468 @@ +# Conceptual guide + +This section contains introductions to key parts of LangChain.dart + +## Architecture + +LangChain.dart as a framework consists of a number of packages. + +### [`langchain_core`](https://pub.dev/packages/langchain_core) +This package contains base abstractions of different components and ways to compose them together. +The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. +No third party integrations are defined here. + +> Depend on this package to build frameworks on top of .dart.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with .dart.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + +See [Integrations](/docs/integrations) to integrate with a specific package. + +## LangChain Expression Language (LCEL) + +LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL: + +- **First-class streaming support:** When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens. +- **Optimized concurrent execution:** Whenever your LCEL chains have steps that can be executed concurrently (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency. +- **Retries and fallbacks:** Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. +- **Access intermediate results:** For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. + +### Runnable interface + +To make it as easy as possible to create custom chains, LangChain provides a `Runnable` interface that most components implement, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. + +This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. The standard interface includes: + +- `invoke`: call the chain on an input and return the output. +- `stream`: call the chain on an input and stream the output. +- `batch`: call the chain on a list of inputs and return a list of outputs. + +The type of the input and output varies by component: + +| Component | Input Type | Output Type | +|-----------------------------|------------------------|------------------------| +| `PromptTemplate` | `Map` | `PromptValue` | +| `ChatMessagePromptTemplate` | `Map` | `PromptValue` | +| `LLM` | `PromptValue` | `LLMResult` | +| `ChatModel` | `PromptValue` | `ChatResult` | +| `OutputParser` | Any object | Parser output type | +| `Retriever` | `String` | `List` | +| `DocumentTransformer` | `List` | `List` | +| `Tool` | `Map` | `String` | +| `Chain` | `Map` | `Map` | + +## Components + +### Chat models +Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). +These are traditionally newer models (older models are generally `LLMs`, see below). +Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. + +Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs. + +When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model. + +LangChain does not host any Chat Models, rather we rely on third party integrations. + +We have some standardized parameters when constructing ChatModels: +- `model`: the name of the model +- `temperature`: the sampling temperature +- `timeout`: request timeout +- `maxTokens`: max tokens to generate +- `apiKey`: API key for the model provider +- `baseUrl`: endpoint to send requests to + +Some important things to note: +- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these. +- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``. + +ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model. + +### LLMs +:::caution +Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models), +even for non-chat use cases. + +You are probably looking for [the section above instead](/docs/concepts/#chat-models). +::: + +Language models that takes a string as input and returns a string. +These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above). + +Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. +This gives them the same interface as [Chat Models](/docs/concepts/#chat-models). +When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. + +LangChain.dart does not host any LLMs, rather we rely on third party integrations. See (/docs/integrations) + + + +### Messages +Some language models take a list of messages as input and return a message. + +LangChain provides several objects to easily distinguish between different roles: +#### HumanChatMessage +This represents a message from the user. + +#### AIChatMessage +This represents a message from the model. + +#### SystemChatMessage +This represents a system message, which tells the model how to behave. Not every model provider supports this. + +#### FunctionChatMessage / ToolChatMessage +These represent a decision from an language model to call a tool. They're a subclass of a AIChatMessage. FunctionChatMessage is a legacy message type corresponding to OpenAI's legacy function-calling API. + +### Prompt Templates + +Most LLM applications do not pass user input directly into an `LLM`. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. + +In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it would be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions. + +`PromptTemplates` help with exactly this! They bundle up all the logic for going from user input into a fully formatted prompt. This can start off very simple - for example, a prompt to produce the above string would just be: + +```dart +final prompt = PromptTemplate.fromTemplate( + 'What is a good name for a company that makes {product}?', +); +final res = prompt.format({'product': 'colorful socks'}); +print(res); +// 'What is a good name for a company that makes colorful socks?' +``` + +However, the advantages of using these over raw string formatting are several. You can "partial" out variables - e.g. you can format only some of the variables at a time. You can compose them together, easily combining different templates into a single prompt. + +For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). + +`PromptTemplates` can also be used to produce a list of messages. In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc) Here, what happens most often is a `ChatPromptTemplate` is a list of `ChatMessagePromptTemplates`. Each `ChatMessagePromptTemplate` contains instructions for how to format that `ChatMessage` - its role, and then also its content. Let's take a look at this below: + +```dart +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +const humanTemplate = '{text}'; + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, template), + (ChatMessageType.human, humanTemplate), +]); + +final res = chatPrompt.formatMessages({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// [ +// SystemChatMessage(content='You are a helpful assistant that translates English to French.'), +// HumanChatMessage(content='I love programming.') +// ] +``` + +`ChatPromptTemplates` can also be constructed in other ways - For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). + +### Output parsers + + +:::note + +The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. +More and more models are supporting function (or tool) calling, which handles this automatically. +It is recommended to use function/tool calling rather than output parsing. +See documentation for that [here](/docs/concepts/#function-tool-calling). + +::: +`OutputParsers` convert the raw output of an LLM into a format that can be used downstream. There are few main type of `OutputParsers`, including: + +- Convert text from LLM -> structured information (e.g. JSON). +- Convert a `ChatMessage` into just a string. +- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string. + +For full information on this, see the section on [output parsers](/docs/how_to/#output-parsers). + +### Chat history +Most LLM applications have a conversational interface. An essential component of a conversation is being able to refer to information introduced earlier in the conversation. At bare minimum, a conversational system should be able to access some window of past messages directly. + +The concept of ChatHistory refers to a class in LangChain which can be used to wrap an arbitrary chain. This ChatHistory will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database. Future interactions will then load those messages and pass them into the chain as part of the input. + +### Documents +A Document object in LangChain contains information about some data. It has two attributes: +- pageContent: `String` - The content of this document. +- metadata: `Map` - Arbitrary metadata associated with this document. Can track the document id, file name, etc. + +### Document loaders +Use document loaders to load data from a source as `Document`'s. For example, there are document loaders +for loading a simple .txt file, for loading the text contents of any web page, +or even for loading a transcript of a YouTube video. + +Document loaders expose two methods: + +- `lazyLoad()`: returns a `Stream` of `Document`'s. This is useful for loading + large amounts of data, as it allows you to process each `Document` as it is + loaded, rather than waiting for the entire data set to be loaded in memory. +- `load()`: returns a list of `Document`'s. Under the hood, this method calls + `lazyLoad()` and collects the results into a list. Use this method only with + small data sets. + +The simplest loader reads in a file as text and places it all into one +`Document`. + +```dart + +const filePath = 'example.txt'; +const loader = TextLoader(filePath); +final docs = await loader.load(); +``` + +### Text splitters +Once you've loaded documents, you'll often want to transform them to better suit +your application. The simplest example is you may want to split a long document +into smaller chunks that can fit into your model's context window. LangChain has +a number of built-in document transformers that make it easy to split, combine, +filter, and otherwise manipulate documents. + +## Text splitters + +When you want to deal with long pieces of text, it is necessary to split up that +text into chunks. As simple as this sounds, there is a lot of potential +complexity here. Ideally, you want to keep the semantically related pieces of +text together. What "semantically related" means could depend on the type of +text. This tutorial showcases several ways to do that. + +At a high level, text splitters work as following: + +1. Split the text up into small, semantically meaningful chunks (often + sentences). +2. Start combining these small chunks into a larger chunk until you reach a + certain size (as measured by some function). +3. Once you reach that size, make that chunk its own piece of text and then + start creating a new chunk of text with some overlap (to keep context between + chunks). + +That means there are two different axes along which you can customize your text +splitter: + +1. How the text is split. +2. How the chunk size is measured. + +The most basic text splitter is the `CharacterTextSplitter`. This splits based +on characters (by default `\n\n`) and measure chunk length by number of +characters. + +The default recommended text splitter is the `RecursiveCharacterTextSplitter`. This text splitter +takes a list of characters. It tries to create chunks based on splitting on the first character, +but if any chunks are too large it then moves onto the next character, and so forth. By default +the characters it tries to split on are `["\n\n", "\n", " ", ""]`. + +In addition to controlling which characters you can split on, you can also +control a few other things: + +- `lengthFunction`: how the length of chunks is calculated. Defaults to just + counting number of characters, but it's pretty common to pass a token counter + here. +- `chunkSize`: the maximum size of your chunks (as measured by the length + function). +- `chunkOverlap`: the maximum overlap between chunks. It can be nice to have + some overlap to maintain some continuity between chunks (eg do a sliding + window). +- `addStartIndex`: whether to include the starting position of each chunk within + the original document in the metadata. + +```dart +const filePath = 'state_of_the_union.txt'; +const loader = TextLoader(filePath); +final documents = await loader.load(); +const textSplitter = RecursiveCharacterTextSplitter( + chunkSize: 800, + chunkOverlap: 0, +); +final docs = textSplitter.splitDocuments(documents); +``` + +### Embedding models +Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text. By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning. These natural language search capabilities underpin many of the context retrieval where we provide an LLM with relevant data it needs to effectively respond to a query. + +The Embeddings class is a class designed for interfacing with text embedding +models. There are lots of embedding model providers (OpenAI, Cohere, Hugging +Face, etc) - this class is designed to provide a standard interface for all of +them. + +Embeddings create a vector representation of a piece of text. This is useful +because it means we can think about text in the vector space, and do things like +semantic search where we look for pieces of text that are most similar in the +vector space. + +The base Embeddings class in LangChain exposes two methods: one for embedding +documents and one for embedding a query. The former takes as input multiple +texts, while the latter takes a single text. The reason for having these as two +separate methods is that some embedding providers have different embedding +methods for documents (to be searched over) vs queries (the search query +itself). + +For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models). + +### Vector stores +One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. + +Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before +similarity search, allowing you more control over returned documents. + +Vector stores can be converted to the retriever interface by doing: + +For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores). + +### Retrievers +A retriever is an interface that returns documents given an unstructured query. +It is more general than a vector store. A retriever does not need to be able to +store documents, only to return (or retrieve) it. Vector stores can be used as +the backbone of a retriever, but there are other types of retrievers as well. + +Retrievers accept a string query as input and return a list of Document's as output. + +The public API of the `BaseRetriever` class in LangChain is as follows: + +```dart +abstract interface class BaseRetriever { + Future> getRelevantDocuments(final String query); +} +``` + +For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers). + +### Tools +Tools are utilities designed to be called by a model. Their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. Tools are needed whenever you want a model to control parts of your code or call out to external APIs. +A tool consists of: + +1. The name of the tool. +2. A description of what the tool does. +3. A JSON schema defining the inputs to the tool. +4. A function (and, optionally, an async variant of the function). + +When a tool is bound to a model, the name, description and JSON schema are provided as context to the model. +Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs. + +To define a tool in dart, we use the `ToolSpec` class. +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; +final model = ChatOpenAI(apiKey: openaiApiKey); + +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'Tell me a joke about {foo}', +); + +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline for the joke', + }, + }, + 'required': ['setup', 'punchline'], + }, +); + +final chain = promptTemplate | + model.bind( + ChatOpenAIOptions( + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + +final res = await chain.invoke({'foo': 'bears'}); +print(res); +// ChatResult{ +// id: chatcmpl-9LBPyaZcFMgjmOvkD0JJKAyA4Cihb, +// output: AIChatMessage{ +// content: , +// toolCalls: [ +// AIChatMessageToolCall{ +// id: call_JIhyfu6jdIXaDHfYzbBwCKdb, +// name: joke, +// argumentsRaw: {"setup":"Why don't bears like fast food?","punchline":"Because they can't catch it!"}, +// arguments: { +// setup: Why don't bears like fast food?, +// punchline: Because they can't catch it! +// }, +// } +// ], +// }, +// finishReason: FinishReason.stop, +// metadata: { +// model: gpt-4o-mini, +// created: 1714835806, +// system_fingerprint: fp_3b956da36b +// }, +// usage: LanguageModelUsage{ +// promptTokens: 77, +// responseTokens: 24, +// totalTokens: 101 +// }, +// streaming: false +// } +``` + +When designing tools to be used by a model, it is important to keep in mind that: + +- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models. +- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering. +- Simple, narrowly scoped tools are easier for models to use than complex tools. + +#### Related + +For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools). + +To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/). + +### Agents +By themselves, language models can't take actions - they just output text. +A big use case for LangChain is creating agents. Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be. +The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish. + +### Callbacks +TODO: + + +### Techniques + +#### Streaming + +#### Function/tool calling + +#### Structured Output +LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide range of inputs, but for some use-cases, it can be useful to constrain the LLM's output to a specific format or structure. This is referred to as structured output. + + +#### Few-shot prompting + +#### Retrieval + +#### Text splitting + +#### Evaluation + +#### Tracing +##### \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/anthropic.md b/docs_v2/docs/05-integrations/anthropic.md new file mode 100644 index 00000000..b607ddc7 --- /dev/null +++ b/docs_v2/docs/05-integrations/anthropic.md @@ -0,0 +1,145 @@ +# ChatAnthropic + +Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). + +## Setup + +The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. + +The following models are available: +- `claude-3-5-sonnet-20240620` +- `claude-3-haiku-20240307` +- `claude-3-opus-20240229` +- `claude-3-sonnet-20240229` +- `claude-2.0` +- `claude-2.1` + +Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); + +print(res.output.content); +// -> 'The fruit in the image is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 123 +// 456789101 +// 112131415161 +// 718192021222 +// 324252627282 +// 930 +``` + +## Tool calling + +`ChatAnthropic` supports tool calling. + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + tools: [tool], + ), +); + +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs_v2/docs/05-integrations/anyscale.md b/docs_v2/docs/05-integrations/anyscale.md new file mode 100644 index 00000000..9a71c30e --- /dev/null +++ b/docs_v2/docs/05-integrations/anyscale.md @@ -0,0 +1,84 @@ +# Anyscale + +[Anyscale](https://www.anyscale.com/) offers a unified OpenAI-compatible API for a broad range of [models](https://docs.endpoints.anyscale.com/guides/models/#chat-models) running serverless or on your own dedicated instances. + +It also allows to fine-tune models on your own data or train new models from scratch. + +You can consume Anyscale API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://api.endpoints.anyscale.com/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'meta-llama/Llama-2-70b-chat-hf', + ), +); +``` + +## Invoke + +```dart +final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'meta-llama/Llama-2-70b-chat-hf', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> "I love programming" se traduit en français sous la forme "J'aime passionnément la programmation" +``` + +## Stream + +```dart +final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 1 +// 2 +// 3 +// ... +// 9 +``` diff --git a/docs_v2/docs/05-integrations/firebase_vertex_ai.md b/docs_v2/docs/05-integrations/firebase_vertex_ai.md new file mode 100644 index 00000000..cd33daa2 --- /dev/null +++ b/docs_v2/docs/05-integrations/firebase_vertex_ai.md @@ -0,0 +1,190 @@ +# Vertex AI for Firebase + +The [Vertex AI Gemini API](https://firebase.google.com/docs/vertex-ai) gives you access to the latest generative AI models from Google: the Gemini models. If you need to call the Vertex AI Gemini API directly from your mobile or web app you can use the `ChatFirebaseVertexAI` class instead of the [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) class which is designed to be used on the server-side. + +`ChatFirebaseVertexAI` is built specifically for use with mobile and web apps, offering security options against unauthorized clients as well as integrations with other Firebase services. + +## Key capabilities + +- **Multimodal input**: The Gemini models are multimodal, so prompts sent to the Gemini API can include text, images (even PDFs), video, and audio. +- **Growing suite of capabilities**: You can call the Gemini API directly from your mobile or web app, build an AI chat experience, use function calling, and more. +- **Security for production apps**: Use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. +- **Robust infrastructure**: Take advantage of scalable infrastructure that's built for use with mobile and web apps, like managing structured data with Firebase database offerings (like Cloud Firestore) and dynamically setting run-time configurations with Firebase Remote Config. + +## Setup + +### 1. Set up a Firebase project + +Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/get-started?platform=flutter) for the latest information on how to set up the Vertex AI for Firebase in your Firebase project. + +In summary, you need to: +1. Upgrade your billing plan to the Blaze pay-as-you-go pricing plan. +2. Enable the required APIs (`aiplatform.googleapis.com` and `firebaseml.googleapis.com`). +3. Integrate the Firebase SDK into your app (if you haven't already). +4. Recommended: Enable Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. + +### 2. Add the LangChain.dart Google package + +Add the `langchain_google` package to your `pubspec.yaml` file. + +```yaml +dependencies: + langchain: {version} + langchain_google: {version} +``` + +Internally, `langchain_google` uses the [`firebase_vertexai`](https://pub.dev/packages/firebase_vertexai) SDK to interact with the Vertex AI for Firebase API. + +### 3. Initialize your Firebase app + +```yaml +await Firebase.initializeApp(); +``` + +### 4. Call the Vertex AI Gemini API + +```dart +final chatModel = ChatFirebaseVertexAI(); +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +> Check out the [sample project](https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase/example) to see a complete project using Vertex AI for Firebase. + +## Available models + +The following models are available: +- `gemini-1.5-flash`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.5-pro`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 + +Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. + +## Multimodal support + +```dart +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + ), +); +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); +print(res.output.content); +// -> 'That is an apple.' +``` + +## Streaming + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 1 +// 2345678910111213 +// 1415161718192021 +// 222324252627282930 +``` + +## Tool calling + +`ChatGoogleGenerativeAI` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` + +## Prevent abuse with Firebase App Check + +You can use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/app-check) for more information. + +## Locations + +When initializing the Vertex AI service, you can optionally specify a location in which to run the service and access a model. If you don't specify a location, the default is us-central1. See the list of [available locations](https://firebase.google.com/docs/vertex-ai/locations?platform=flutter#available-locations). + +```dart +final chatModel = ChatFirebaseVertexAI( + location: 'us-central1', +); +``` + +## Alternatives + +- [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md): Use this class to call the Vertex AI Gemini API from the server-side. +- [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md): Use this class to call the "Google AI" version of the Gemini API that provides free-of-charge access (within limits and where available). This API is not intended for production use but for experimentation and prototyping. After you're familiar with how a Gemini API works, migrate to the Vertex AI for Firebase, which have many additional features important for mobile and web apps, like protecting the API from abuse using Firebase App Check. diff --git a/docs_v2/docs/05-integrations/gcp_vertex_ai.md b/docs_v2/docs/05-integrations/gcp_vertex_ai.md new file mode 100644 index 00000000..5417aab5 --- /dev/null +++ b/docs_v2/docs/05-integrations/gcp_vertex_ai.md @@ -0,0 +1,116 @@ +# GCP Chat Vertex AI + +Wrapper around [GCP Vertex AI chat models](https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts) API (aka PaLM API for chat). + +## Set up your Google Cloud Platform project + +1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). +2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). +3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). +4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations). + +### Authentication + +To create an instance of `ChatVertexAI` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package. + +To create an instance of `VertexAI` you need to provide an [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) instance. + +There are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details. + +Example using a service account JSON: + +```dart +final serviceAccountCredentials = ServiceAccountCredentials.fromJson( + json.decode(serviceAccountJson), +); +final authClient = await clientViaServiceAccount( + serviceAccountCredentials, + [ChatVertexAI.cloudPlatformScope], +); +final chatVertexAi = ChatVertexAI( + httpClient: authClient, + project: 'your-project-id', +); +``` + +The service account should have the following [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions): +- `aiplatform.endpoints.predict` + +The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is: +- `https://www.googleapis.com/auth/cloud-platform` (you can use the constant `ChatVertexAI.cloudPlatformScope`) + +See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control + +### Available models + +- `chat-bison` + * Max input token: 4096 + * Max output tokens: 1024 + * Training data: Up to Feb 2023 + * Max turns: 2500 +- `chat-bison-32k` + * Max input and output tokens combined: 32k + * Training data: Up to Aug 2023 + * Max turns: 2500 + +The previous list of models may not be exhaustive or up-to-date. Check out the [Vertex AI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models) for the latest list of available models. + +### Model options + +You can define default options to use when calling the model (e.g. temperature, stop sequences, etc. ) using the `defaultOptions` parameter. + +The default options can be overridden when calling the model using the `options` parameter. + +Example: +```dart +final chatModel = ChatVertexAI( + httpClient: authClient, + project: 'your-project-id', + defaultOptions: ChatVertexAIOptions( + temperature: 0.9, + ), +); +final result = await chatModel( + [ChatMessage.humanText('Hello')], + options: ChatVertexAIOptions( + temperature: 0.5, + ), +); +``` + +### Full example + +```dart +import 'package:langchain/langchain.dart'; +import 'package:langchain_google/langchain_google.dart'; + +void main() async { + final chat = ChatVertexAI( + httpClient: await _getAuthHttpClient(), + project: _getProjectId(), + defaultOptions: const ChatVertexAIOptions( + temperature: 0, + ), + ); + while (true) { + stdout.write('> '); + final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? ''); + final aiMsg = await chat([usrMsg]); + print(aiMsg.content); + } +} + +Future _getAuthHttpClient() async { + final serviceAccountCredentials = ServiceAccountCredentials.fromJson( + json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!), + ); + return clientViaServiceAccount( + serviceAccountCredentials, + [VertexAI.cloudPlatformScope], + ); +} + +String _getProjectId() { + return Platform.environment['VERTEX_AI_PROJECT_ID']!; +} +``` diff --git a/docs_v2/docs/05-integrations/googleai.md b/docs_v2/docs/05-integrations/googleai.md new file mode 100644 index 00000000..033c7672 --- /dev/null +++ b/docs_v2/docs/05-integrations/googleai.md @@ -0,0 +1,149 @@ +# ChatGoogleGenerativeAI + +Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemini API). + +## Setup + +To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). + +The following models are available: +- `gemini-1.5-flash`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.5-pro`: text / image -> text model + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 + +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); +print(res.output.content); +// -> 'That is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: const ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 1 +// 2345678910111213 +// 1415161718192021 +// 222324252627282930 +``` + +## Tool calling + +`ChatGoogleGenerativeAI` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatGoogleGenerativeAI( + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs_v2/docs/05-integrations/index.mdx b/docs_v2/docs/05-integrations/index.mdx new file mode 100644 index 00000000..35f38dfc --- /dev/null +++ b/docs_v2/docs/05-integrations/index.mdx @@ -0,0 +1,56 @@ +--- +sidebar_position: 0 +index: auto +--- +# Integrations + +> If you'd like to write your own integration, see Extending Langchain. + +The following table contains the list of existing Langchain.dart integration packages. To install a specific integration, see [Installing Langchain components](/docs/03-how_to/01-installation.md) + + +

    + +

    + +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | + +Functionality provided by each integration package: + +| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | +|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| +| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | +| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | + +The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: + +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | +| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | + diff --git a/docs_v2/docs/05-integrations/mistralai.md b/docs_v2/docs/05-integrations/mistralai.md new file mode 100644 index 00000000..14f21fb1 --- /dev/null +++ b/docs_v2/docs/05-integrations/mistralai.md @@ -0,0 +1,76 @@ +# ChatMistralAI + +Wrapper around [Mistral AI](https://mistral.ai/) Chat Completions API. + +Mistral AI brings the strongest open generative models to the developers, along with efficient ways to deploy and customise them for production. + +> Note: Mistral AI API is currently in closed beta. You can request access [here](https://console.mistral.ai). + +## Setup + +To use `ChatMistralAI` you need to have a Mistral AI account and an API key. You can get one [here](https://console.mistral.ai/users/). + +The following models are available at the moment: +- `mistral-tiny`: Mistral 7B Instruct v0.2 (a minor release of Mistral 7B Instruct). It only works in English and obtains 7.6 on MT-Bench. +- `mistral-small`: Mixtral 8x7B. It masters English/French/Italian/German/Spanish and code and obtains 8.3 on MT-Bench. +- `mistral-medium`: a prototype model, that is currently among the top serviced models available based on standard benchmarks. It masters English/French/Italian/German/Spanish and code and obtains a score of 8.6 on MT-Bench. + +## Usage + +```dart +final chatModel = ChatMistralAI( + apiKey: 'apiKey', + defaultOptions: ChatMistralAIOptions( + model: 'mistral-small', + temperature: 0, + ), +); + +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); +const humanTemplate = '{text}'; +final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); +final chatPrompt = ChatPromptTemplate.fromPromptMessages( + [systemMessagePrompt, humanMessagePrompt], +); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime la programmation.' +``` + +## Streaming + +```dart +final promptTemplate = ChatPromptTemplate.fromPromptMessages([ + SystemChatMessagePromptTemplate.fromTemplate( + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + HumanChatMessagePromptTemplate.fromTemplate( + 'List the numbers from 1 to {max_num}', + ), +]); +final chat = ChatMistralAI( + apiKey: 'apiKey', + defaultOptions: ChatMistralAIOptions( + model: 'mistral-medium', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 12 +// 345 +// 67 +// 89 +``` diff --git a/docs_v2/docs/05-integrations/ollama.md b/docs_v2/docs/05-integrations/ollama.md new file mode 100644 index 00000000..e6cc5907 --- /dev/null +++ b/docs_v2/docs/05-integrations/ollama.md @@ -0,0 +1,462 @@ +# ChatOllama + +Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. + +Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. + +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. + +## Setup + +Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: + +1. Download and install [Ollama](https://ollama.ai) +2. Fetch a model via `ollama pull ` + * e.g., for Llama 3: `ollama pull llama3.1` +3. Instantiate the `ChatOllama` class with the downloaded model. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + ), +); +``` + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). + +### Ollama base URL + +By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + baseUrl: 'https://your-remote-server-where-ollama-is-running.com', + model: 'llama3.1', + ), +); +``` + +## Usage + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'La traduction est : "J'aime le programming.' +``` + +### Streaming + +Ollama supports streaming the output as the model generates it. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456 +// 789 +``` + +### Multimodal support + +Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). + +You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llava', + temperature: 0, + ), +); +final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), +); +final res = await chatModel.invoke(PromptValue.chat([prompt])); +print(res.output.content); +// -> 'An Apple' +``` + +### Tool calling + +`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). + +**Notes:** +- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +As you can see, `ChatOllama` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). + +### JSON mode + +You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), + (ChatMessageType.human, '{question}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), +); + +final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + +final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', +); +print(res); +// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +``` + +## Examples + +### Answering questions with data from an external API + +Imagine you have an API that provides flight times between two cities: + +```dart +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} +``` + +Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. + +```dart +const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [getFlightTimesTool], + ), +); + +final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), +]; + +// First API call: Send the query and function description to the model +final response = await chatModel.invoke(PromptValue.chat(messages)); + +messages.add(response.output); + +// Check if the model decided to use the provided function +if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; +} + +// Process function calls made by the model +for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, + ), + ); +} + +// Second API call: Get final response from the model +final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); +print(finalResponse.output.content); +// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. +``` + +### Extracting structured data with tools + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + options: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` + +### RAG (Retrieval-Augmented Generation) pipeline + +We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. + +```dart +// 1. Create a vector store and add documents to it +final vectorStore = MemoryVectorStore( + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), +); +await vectorStore.addDocuments( + documents: [ + Document(pageContent: 'LangChain was created by Harrison'), + Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), + ], +); + +// 2. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), + (ChatMessageType.human, '{question}'), +]); + +// 3. Define the model to use and the vector store retriever +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), +); +final retriever = vectorStore.asRetriever( + defaultOptions: VectorStoreRetrieverOptions( + searchType: VectorStoreSimilaritySearch(k: 1), + ), +); + +// 4. Create a Runnable that combines the retrieved documents into a single string +final docCombiner = Runnable.mapInput, String>((docs) { + return docs.map((final d) => d.pageContent).join('\n'); +}); + +// 4. Define the RAG pipeline +final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), +}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); + +// 5. Run the pipeline +final res = await chain.invoke('Who created LangChain.dart?'); +print(res); +// Based on the context provided, David created LangChain.dart. +``` diff --git a/docs_v2/docs/05-integrations/open_router.md b/docs_v2/docs/05-integrations/open_router.md new file mode 100644 index 00000000..c2d63555 --- /dev/null +++ b/docs_v2/docs/05-integrations/open_router.md @@ -0,0 +1,157 @@ +# OpenRouter + +[OpenRouter](https://openrouter.ai/) offers a unified OpenAI-compatible API for a broad range of [models](https://openrouter.ai/models). + +You can also let users pay for their own models via their [OAuth PKCE](https://openrouter.ai/docs#oauth) flow. + +You can consume OpenRouter API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://openrouter.ai/api/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); +``` + +OpenRouter allows you to specify an optional `HTTP-Referer` header to identify your app and make it discoverable to users on openrouter.ai. You can also include an optional `X-Title` header to set or modify the title of your app. + +```dart + final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + headers: { + 'HTTP-Referer': 'com.myapp', + 'X-Title': 'OpenRouterTest', + }, + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); +``` + +## Invoke + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime la programmation.' +``` + +## Stream + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456789 +``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/docs_v2/docs/05-integrations/openai.md b/docs_v2/docs/05-integrations/openai.md new file mode 100644 index 00000000..6b3ccbbc --- /dev/null +++ b/docs_v2/docs/05-integrations/openai.md @@ -0,0 +1,372 @@ +# OpenAI + +This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). + +OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). + +> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. + +## Setup + +To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. + +### Credentials + +Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). + +### Installation + +The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: + +```yaml +dart pub add langchain_openai +``` + +## Usage + +### Instantiation + +Now we can instantiate our model object and generate chat completions: + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o' + temperature: 0, + // ...other options + ), +); +``` + +If you are using a proxy, you can override the base URL, headers, and other options: + +```dart +final client = ChatOpenAI( + baseUrl: 'https://my-proxy.com', + headers: {'x-my-proxy-header': 'value'}, +); +``` + +### Invocation + +Now you can generate completions by calling the `invoke` method: + +```dart +final messages = [ + ChatMessage.system('You are a helpful assistant that translates English to French.'), + ChatMessage.humanText('I love programming.'), +]; +final prompt = PromptValue.chat(messages); +final res = await llm.invoke(prompt); +// -> 'J'adore la programmation.' +``` + +### Chaining + +We can chain our model with a prompt template or output parser to create a more complex pipeline: + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore la programmation.' +``` + +### Streaming + +OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chat = ChatOpenAI(apiKey: openaiApiKey); + +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456 +// 789 +``` + +### Multimodal support + +OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. + +You can send the image as a base64-encoded string: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image + ), + ]), + ), +]); +``` + +Or you can send the URL where the image is hosted: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', + ), + ]), + ), +]); +``` + +### Tool calling + +OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. + + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'gpt-4o' + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. + +You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: + +```dart +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` + +### Structured Outputs + +[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), + ), + ), +); + +final res = await chatModel.invoke(prompt); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. + +### JSON mode + +When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + "Extract the 'name' and 'origin' of any companies mentioned in the " + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonObject, + ), +); +final chain = llm.pipe(JsonOutputParser()); +final res = await chain.invoke(prompt); +print(res); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +### Fine-tuning + +You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. + +This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: + +```dart +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' + ), +); +``` + +## API reference + +For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs_v2/docs/05-integrations/prem.md b/docs_v2/docs/05-integrations/prem.md new file mode 100644 index 00000000..65258f7c --- /dev/null +++ b/docs_v2/docs/05-integrations/prem.md @@ -0,0 +1,24 @@ +# Prem App + +You can easily run local models using [Prem app](https://www.premai.io/#PremApp). +It creates a local server that exposes a REST API with the same interface as +the OpenAI API. + +```dart +const localUrl = 'http://localhost:8000'; // Check Prem app for the actual URL +final chat = ChatOpenAI(baseUrl: localUrl); + +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); +const humanTemplate = '{text}'; +final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); + +final chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]); +final formattedPrompt = chatPrompt.formatPrompt({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.' +}).toChatMessages(); + +final output = chat.predictMessages(formattedPrompt); +``` diff --git a/docs_v2/docs/05-integrations/together_ai.md b/docs_v2/docs/05-integrations/together_ai.md new file mode 100644 index 00000000..10567455 --- /dev/null +++ b/docs_v2/docs/05-integrations/together_ai.md @@ -0,0 +1,84 @@ +# Together AI + +[Together AI](https://www.together.ai) offers a unified OpenAI-compatible API for a broad range of [models](https://api.together.xyz/playground) running serverless or on your own dedicated instances. + +It also allows to fine-tune models on your own data or train new models from scratch. + +You can consume Together AI API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://api.together.xyz/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); +``` + +## Invoke + +```dart +final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime programmer' +``` + +## Stream + +```dart +final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 1 +// 2 +// 3 +// ... +// 9 +``` diff --git a/docs_v2/docs/05-integrations/tools/index.mdx b/docs_v2/docs/05-integrations/tools/index.mdx new file mode 100644 index 00000000..211b41de --- /dev/null +++ b/docs_v2/docs/05-integrations/tools/index.mdx @@ -0,0 +1,5 @@ +--- +sidebar_position: 0 +index: auto +--- +# Tools \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/tools/tavily_search.md b/docs_v2/docs/05-integrations/tools/tavily_search.md new file mode 100644 index 00000000..2f3d461d --- /dev/null +++ b/docs_v2/docs/05-integrations/tools/tavily_search.md @@ -0,0 +1,13 @@ +# Tavily Search + +[Tavily's Search API](https://tavily.com) is a search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed. + +## Setup + +The integration lives in the `langchain-community` package. We also need to install the `tavily-python` package itself. + +```bash +dart pub add langchain langchain_community +``` + +We also need to set our Tavily API key. \ No newline at end of file diff --git a/docs_v2/docusaurus.config.js b/docs_v2/docusaurus.config.js new file mode 100644 index 00000000..1376cddc --- /dev/null +++ b/docs_v2/docusaurus.config.js @@ -0,0 +1,130 @@ +// @ts-check +// `@type` JSDoc annotations allow editor autocompletion and type checking +// (when paired with `@ts-check`). +// There are various equivalent ways to declare your Docusaurus config. +// See: https://docusaurus.io/docs/api/docusaurus-config + +import { themes as prismThemes } from 'prism-react-renderer'; + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: 'LangChain.dart', + tagline: 'LangChain.dart Docs', + favicon: 'img/favicon.ico', + + // Set the production url of your site here + url: 'https://beta.langchaindart.dev/', + // Set the // pathname under which your site is served + // For GitHub pages deployment, it is often '//' + baseUrl: '/', + + // GitHub pages deployment config. + // If you aren't using GitHub pages, you don't need these. + organizationName: 'davidmigloz', // Usually your GitHub org/user name. + projectName: 'langchain_dart', // Usually your repo name. + + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', + + // Even if you don't use internationalization, you can use this field to set + // useful metadata like html lang. For example, if your site is Chinese, you + // may want to replace "en" with "zh-Hans". + i18n: { + defaultLocale: 'en', + locales: ['en'], + }, + + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + sidebarPath: './sidebars.js', + // Please change this to your repo. + // Remove this to remove the "edit this page" links. + editUrl: + 'https://github.com/davidmigloz/langchain_dart/tree/main/docs_v2/', + }, + theme: { + customCss: './src/css/custom.css', + }, + }), + ], + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + // Replace with your project's social card + image: 'img/langchain.dart.png', + navbar: { + title: 'LangChain.dart', + logo: { + alt: 'LangChain Logo', + src: 'img/favicon.ico', + }, + items: [ + { + to: '/docs/integrations', + position: 'left', + label: 'Integrations', + }, + {to: 'https://blog.langchaindart.dev/blog', label: 'Blog', position: 'left'}, + { + href: 'https://github.com/davidmigloz/langchain_dart/', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Docs', + items: [ + { + label: 'Tutorial', + to: '/docs/intro', + }, + ], + }, + { + title: 'Community', + items: [ + { + label: 'Discord', + href: 'https://discord.gg/x4qbhqecVR', + }, + { + label: 'pub.dev', + href: 'https://pub.dev/packages/langchain', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'Blog', + to: '/blog', + }, + { + label: 'GitHub', + href: 'https://github.com/davidmigloz/langchain_dart', + }, + ], + }, + ], + copyright: `Made with 💙 by the LangChain.dart Community`, + }, + prism: { + theme: prismThemes.vsLight, + darkTheme: prismThemes.vsDark, + additionalLanguages: ['yaml','dart','bash'], + }, + }), +}; + +export default config; diff --git a/docs_v2/firebase.json b/docs_v2/firebase.json new file mode 100644 index 00000000..340ed5b7 --- /dev/null +++ b/docs_v2/firebase.json @@ -0,0 +1,16 @@ +{ + "hosting": { + "public": "build", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ] + } +} diff --git a/docs_v2/package-lock.json b/docs_v2/package-lock.json new file mode 100644 index 00000000..21e6a165 --- /dev/null +++ b/docs_v2/package-lock.json @@ -0,0 +1,14683 @@ +{ + "name": "langchain-dart", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "langchain-dart", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/types": "^3.5.2" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", + "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", + "@algolia/autocomplete-shared": "1.9.3" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", + "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", + "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", + "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/cache-browser-local-storage": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz", + "integrity": "sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==", + "dependencies": { + "@algolia/cache-common": "4.24.0" + } + }, + "node_modules/@algolia/cache-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.24.0.tgz", + "integrity": "sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==" + }, + "node_modules/@algolia/cache-in-memory": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz", + "integrity": "sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==", + "dependencies": { + "@algolia/cache-common": "4.24.0" + } + }, + "node_modules/@algolia/client-account": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.24.0.tgz", + "integrity": "sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", + "integrity": "sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.5.3.tgz", + "integrity": "sha512-3rdSdreBL2LGYu4DWmUGlMhaGy1vy36Xp42LdbTFsW/y3bhW5viptMHI5A3PKT0hPEMZUn+te1iM/EWvLUuVGQ==", + "peer": true, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", + "integrity": "sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-personalization/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.5.3.tgz", + "integrity": "sha512-qrokD+uoNxchbiF9aP8niQd/9SZ6BgYg4WaesFaubHhr9DFvwGm4IePEMha8vQcc3fSsY6uL+gOtKB3J6RF0NQ==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3", + "@algolia/requester-browser-xhr": "5.5.3", + "@algolia/requester-fetch": "5.5.3", + "@algolia/requester-node-http": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" + }, + "node_modules/@algolia/logger-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.24.0.tgz", + "integrity": "sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==" + }, + "node_modules/@algolia/logger-console": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.24.0.tgz", + "integrity": "sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==", + "dependencies": { + "@algolia/logger-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.24.0.tgz", + "integrity": "sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.5.3.tgz", + "integrity": "sha512-LsfUPokiXEpvlYF7SwNjyyjkUX7IoW7oIhH6WkDUD4PCfEZkFbVplGQA0UrCiWOAbpb25P7mmP6+ldwjwqW6Kg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", + "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==" + }, + "node_modules/@algolia/requester-fetch": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.5.3.tgz", + "integrity": "sha512-RKaliEFHtVeD/fMxwrApkcI6ZxR+mU6pZna29r3NwVMpCXTJWWtlMpQmbr1RHzUsaAlpfv9pfGJN4nYPE8XWEg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-node-http": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.5.3.tgz", + "integrity": "sha512-2wU+HlTVrVce7BMW2b3Gd62btk8B0jBbuKYYzu3OFeBD/aZa88eHABtjcjQCdw3x+wvkIPEc56UsZx9eHYLebg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/transporter": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.24.0.tgz", + "integrity": "sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==", + "dependencies": { + "@algolia/cache-common": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", + "dependencies": { + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.7.tgz", + "integrity": "sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.7.tgz", + "integrity": "sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helpers": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", + "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", + "dependencies": { + "@babel/types": "^7.24.7", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz", + "integrity": "sha512-BaDeOonYvhdKw+JoMVkAixAAJzG2jVPIwWoKBPdYuY9b452e2rPuI9QPYh3KpofZ3pW2akOmwZLOiOsHMiqRAg==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz", + "integrity": "sha512-xZeCVVdwb4MsDBkkyZ64tReWYrLRHlMN72vP7Bdm3OUOuyFZExhsHUUnuWnm2/XOlAJzR0LfPpB56WXZn0X/lA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.24.7.tgz", + "integrity": "sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==", + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.7.tgz", + "integrity": "sha512-kTkaDl7c9vO80zeX1rJxnuRpEsD5tA81yh11X1gQo+PhSti3JS+7qeZo9U4RHobKRiFPKaGK3svUAeb8D0Q7eg==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.24.7.tgz", + "integrity": "sha512-03TCmXy2FtXJEZfbXDTSqq1fRJArk7lX9DOFC/47VthYcxyIOx+eXQmdo6DOQvrbpIix+KfXwvuXdFDZHxt+rA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", + "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", + "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", + "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.7.tgz", + "integrity": "sha512-LGeMaf5JN4hAT471eJdBs/GK1DoYIJ5GCtZN/EsL6KUiiDZOvO/eKE11AMZJa2zP4zk4qe9V2O/hxAmkRc8p6w==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.7.tgz", + "integrity": "sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz", + "integrity": "sha512-jKiTsW2xmWwxT1ixIdfXUZp+P5yURx2suzLZr5Hi64rURpDYdMW0pv+Uf17EYk2Rd428Lx4tLsnjGJzYKDM/6A==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.24.7.tgz", + "integrity": "sha512-9pKLcTlZ92hNZMQfGCHImUpDOlAgkkpqalWEeftW5FBya75k8Li2ilerxkM/uBEj01iBZXcCIB/bwvDYgWyibA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-wrap-function": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.24.7.tgz", + "integrity": "sha512-qTAxxBM81VEyoAY0TtLrx1oAEJc09ZK67Q9ljQToqCnA+55eNwCORaxlKyu+rNfX86o8OXRUSNUnrtsAZXM9sg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz", + "integrity": "sha512-IO+DLT3LQUElMbpzlatRASEyQtfhSE0+m465v++3jyyXeBTBUjtVZg28/gHeV5mrTJqvEKhKroBGAvhW+qPHiQ==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", + "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", + "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.7.tgz", + "integrity": "sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.24.7.tgz", + "integrity": "sha512-N9JIYk3TD+1vq/wn77YnJOqMtfWhNewNE+DJV4puD2X7Ew9J4JvrzrFDfTfyv5EgEXVy9/Wt8QiOErzEmv5Ifw==", + "dependencies": { + "@babel/helper-function-name": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.7.tgz", + "integrity": "sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", + "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.7.tgz", + "integrity": "sha512-TiT1ss81W80eQsN+722OaeQMY/G4yTb4G9JrqeiDADs3N8lbPMGldWi9x8tyqCW5NLx1Jh2AvkE6r6QvEltMMQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.7.tgz", + "integrity": "sha512-unaQgZ/iRu/By6tsjMZzpeBZjChYfLYry6HrEXPoz3KmfF0sVBQ1l8zKMQ4xRGLWVsjuvB8nQfjNP/DcfEOCsg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz", + "integrity": "sha512-+izXIbke1T33mY4MSNnrqhPXDz01WYhEf3yF5NbnUtkiNnm+XBZJl3kNfoK6NKmYlz/D07+l2GWVK/QfDkNCuQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.7.tgz", + "integrity": "sha512-utA4HuR6F4Vvcr+o4DnjL8fCOlgRFGbeeBEGNg3ZTrLFw6VWG5XmUrvcQ0FjIYMU2ST4XcR2Wsp7t9qOAPnxMg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.7.tgz", + "integrity": "sha512-Ec3NRUMoi8gskrkBe3fNmEQfxDvY8bgfQpz6jlk/41kX9eUjvpyqWU7PBP/pLAvMaSQjbMNKJmvX57jP+M6bPg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.7.tgz", + "integrity": "sha512-hbX+lKKeUMGihnK8nvKqmXBInriT3GVjzXKFriV3YC6APGxMbP8RZNFwy91+hocLXq90Mta+HshoB31802bb8A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", + "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.7.tgz", + "integrity": "sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz", + "integrity": "sha512-Dt9LQs6iEY++gXUwY03DNFat5C2NbO48jj+j/bSAz6b3HgPs39qcPiYt77fDObIcFwj3/C2ICX9YMwGflUoSHQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.7.tgz", + "integrity": "sha512-o+iF77e3u7ZS4AoAuJvapz9Fm001PuD2V3Lp6OSE4FYQke+cSewYtnek+THqGRWyQloRCyvWL1OkyfNEl9vr/g==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz", + "integrity": "sha512-SQY01PcJfmQ+4Ash7NE+rpbLFbmqA2GPIgqzxfFTL4t1FKRq4zTms/7htKpoCUI9OcFYgzqfmCdH53s6/jn5fA==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz", + "integrity": "sha512-yO7RAz6EsVQDaBH18IDJcMB1HnrUn2FJ/Jslc/WtPPWcjhpUJXU/rjbwmluzp7v/ZzWcEhTMXELnnsz8djWDwQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.7.tgz", + "integrity": "sha512-Nd5CvgMbWc+oWzBsuaMcbwjJWAcp5qzrbg69SZdHSP7AMY0AbWFqFO0WTFCA1jxhMCwodRwvRec8k0QUbZk7RQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.7.tgz", + "integrity": "sha512-vKbfawVYayKcSeSR5YYzzyXvsDFWU2mD8U5TFeXtbCPLFUqe7GyCgvO6XDHzje862ODrOwy6WCPmKeWHbCFJ4w==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz", + "integrity": "sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.7.tgz", + "integrity": "sha512-CFbbBigp8ln4FU6Bpy6g7sE8B/WmCmzvivzUC6xDAdWVsjYTXijpuuGJmYkAaoWAzcItGKT3IOAbxRItZ5HTjw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz", + "integrity": "sha512-25cS7v+707Gu6Ds2oY6tCkUwsJ9YIDbggd9+cu9jzzDgiNq7hR/8dkzxWfKWnTic26vsI3EsCXNd4iEB6e8esQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/template": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.7.tgz", + "integrity": "sha512-19eJO/8kdCQ9zISOf+SEUJM/bAUIsvY3YDnXZTupUCQ8LgrWnsG/gFB9dvXqdXnRXMAM8fvt7b0CBKQHNGy1mw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz", + "integrity": "sha512-ZOA3W+1RRTSWvyqcMJDLqbchh7U4NRGqwRfFSVbOLS/ePIP4vHB5e8T8eXcuqyN1QkgKyj5wuW0lcS85v4CrSw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz", + "integrity": "sha512-JdYfXyCRihAe46jUIliuL2/s0x0wObgwwiGxw/UbgJBr20gQBThrokO4nYKgWkD7uBaqM7+9x5TU7NkExZJyzw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz", + "integrity": "sha512-sc3X26PhZQDb3JhORmakcbvkeInvxz+A8oda99lj7J60QRuPZvNAk9wQlTBS1ZynelDrDmTU4pw1tyc5d5ZMUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz", + "integrity": "sha512-Rqe/vSc9OYgDajNIK35u7ot+KeCoetqQYFXM4Epf7M7ez3lWlOjrDjrwMei6caCVhfdw+mIKD4cgdGNy5JQotQ==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz", + "integrity": "sha512-v0K9uNYsPL3oXZ/7F9NNIbAj2jv1whUEtyA6aujhekLs56R++JDQuzRcP2/z4WX5Vg/c5lE9uWZA0/iUoFhLTA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz", + "integrity": "sha512-wo9ogrDG1ITTTBsy46oGiN1dS9A7MROBTcYsfS8DtsImMkHk9JXJ3EWQM6X2SUw4x80uGPlwj0o00Uoc6nEE3g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.7.tgz", + "integrity": "sha512-U9FcnA821YoILngSmYkW6FjyQe2TyZD5pHt4EVIhmcTkrJw/3KqcrRSxuOo5tFZJi7TE19iDyI1u+weTI7bn2w==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz", + "integrity": "sha512-2yFnBGDvRuxAaE/f0vfBKvtnvvqU8tGpMHqMNpTN2oWMKIR3NqFkjaAgGwawhqK/pIN2T3XdjGPdaG0vDhOBGw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.7.tgz", + "integrity": "sha512-vcwCbb4HDH+hWi8Pqenwnjy+UiklO4Kt1vfspcQYFhJdpthSnW8XvWGyDZWKNVrVbVViI/S7K9PDJZiUmP2fYQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz", + "integrity": "sha512-4D2tpwlQ1odXmTEIFWy9ELJcZHqrStlzK/dAOWYyxX3zT0iXQB6banjgeOJQXzEc4S0E0a5A+hahxPaEFYftsw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz", + "integrity": "sha512-T/hRC1uqrzXMKLQ6UCwMT85S3EvqaBXDGf0FaMf4446Qx9vKwlghvee0+uuZcDUCZU5RuNi4781UQ7R308zzBw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz", + "integrity": "sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.7.tgz", + "integrity": "sha512-iFI8GDxtevHJ/Z22J5xQpVqFLlMNstcLXh994xifFwxxGslr2ZXXLWgtBeLctOD63UFDArdvN6Tg8RFw+aEmjQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.7.tgz", + "integrity": "sha512-GYQE0tW7YoaN13qFh3O1NCY4MPkUiAH3fiF7UcV/I3ajmDKEdG3l+UOcbAm4zUE3gnvUU+Eni7XrVKo9eO9auw==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz", + "integrity": "sha512-3aytQvqJ/h9z4g8AsKPLvD4Zqi2qT+L3j7XoFFu1XBlZWEl2/1kWnhmAbxpLgPrHSY0M6UA02jyTiwUVtiKR6A==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz", + "integrity": "sha512-/jr7h/EWeJtk1U/uz2jlsCioHkZk1JJZVcc8oQsJ1dUlaJD83f4/6Zeh2aHt9BIFokHIsSeDfhUmju0+1GPd6g==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz", + "integrity": "sha512-RNKwfRIXg4Ls/8mMTza5oPF5RkOW8Wy/WgMAp1/F1yZ8mMbtwXW+HDoJiOsagWrAhI5f57Vncrmr9XeT4CVapA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz", + "integrity": "sha512-Ts7xQVk1OEocqzm8rHMXHlxvsfZ0cEF2yomUqpKENHWMF4zKk175Y4q8H5knJes6PgYad50uuRmt3UJuhBw8pQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz", + "integrity": "sha512-e6q1TiVUzvH9KRvicuxdBTUj4AdKSRwzIyFFnfnezpCfP2/7Qmbb8qbU2j7GODbl4JMkblitCQjKYUaX/qkkwA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz", + "integrity": "sha512-4QrHAr0aXQCEFni2q4DqKLD31n2DL+RxcwnNjDFkSG0eNQ/xCavnRkfCUjsyqGC2OviNJvZOF/mQqZBw7i2C5Q==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz", + "integrity": "sha512-A/vVLwN6lBrMFmMDmPPz0jnE6ZGx7Jq7d6sT/Ev4H65RER6pZ+kczlf1DthF5N0qaPHBsI7UXiE8Zy66nmAovg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz", + "integrity": "sha512-uLEndKqP5BfBbC/5jTwPxLh9kqPWWgzN/f8w6UwAIirAEqiIVJWWY312X72Eub09g5KF9+Zn7+hT7sDxmhRuKA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.7.tgz", + "integrity": "sha512-tK+0N9yd4j+x/4hxF3F0e0fu/VdcxU18y5SevtyM/PCFlQvXbR0Zmlo2eBrKtVipGNFzpq56o8WsIIKcJFUCRQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz", + "integrity": "sha512-yGWW5Rr+sQOhK0Ot8hjDJuxU3XLRQGflvT4lhlSY0DFvdb3TwKaY26CJzHtYllU0vT9j58hc37ndFPsqT1SrzA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.7.tgz", + "integrity": "sha512-COTCOkG2hn4JKGEKBADkA8WNb35TGkkRbI5iT845dB+NyqgO8Hn+ajPbSnIQznneJTa3d30scb6iz/DhH8GsJQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz", + "integrity": "sha512-9z76mxwnwFxMyxZWEgdgECQglF2Q7cFLm0kMf8pGwt+GSJsY0cONKj/UuO4bOH0w/uAel3ekS4ra5CEAyJRmDA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz", + "integrity": "sha512-EMi4MLQSHfd2nrCqQEWxFdha2gBCqU4ZcCng4WBGZ5CJL4bBRW0ptdqqDdeirGZcpALazVVNJqRmsO8/+oNCBA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.25.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.1.tgz", + "integrity": "sha512-SLV/giH/V4SmloZ6Dt40HjTGTAIkxn33TVIHxNGNvo8ezMhrxBkzisj4op1KZYPIOHFLqhv60OHvX+YRu4xbmQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.7.tgz", + "integrity": "sha512-H/Snz9PFxKsS1JLI4dJLtnJgCJRoo0AUm3chP6NYr+9En1JMKloheEiLIhlp5MDVznWo+H3AAC1Mc8lmUEpsgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.24.7.tgz", + "integrity": "sha512-+Dj06GDZEFRYvclU6k4bme55GKBEWUmByM/eoKuqg4zTNQHiApWRhQph5fxQB2wAEFvRzL1tOEj1RJ19wJrhoA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.24.7.tgz", + "integrity": "sha512-QG9EnzoGn+Qar7rxuW+ZOsbWOt56FvvI93xInqsZDC5fsekx1AlIO4KIJ5M+D0p0SqSH156EpmZyXq630B8OlQ==", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.7.tgz", + "integrity": "sha512-PLgBVk3fzbmEjBJ/u8kFzOqS9tUeDjiaWud/rRym/yjCo/M9cASPlnrd2ZmmZpQT40fOOrvR8jh+n8jikrOhNA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.7.tgz", + "integrity": "sha512-lq3fvXPdimDrlg6LWBoqj+r/DEWgONuwjuOuQCSYgRroXDH/IdM1C0IZf59fL5cHLpjEH/O6opIRBbqv7ELnuA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz", + "integrity": "sha512-0DUq0pHcPKbjFZCfTss/pGkYMfy3vFWydkUBd9r0GHpIyfs2eCDENvqadMycRS9wZCXR41wucAfJHJmwA0UmoQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.7.tgz", + "integrity": "sha512-YqXjrk4C+a1kZjewqt+Mmu2UuV1s07y8kqcUf4qYLnoqemhR4gRQikhdAhSVJioMjVTu6Mo6pAbaypEA3jY6fw==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.1", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz", + "integrity": "sha512-KsDsevZMDsigzbA09+vacnLpmPH4aWjcZjXdyFKGzpplxhbeB4wYtury3vglQkg6KM/xEPKt73eCjPPf1PgXBA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz", + "integrity": "sha512-x96oO0I09dgMDxJaANcRyD4ellXFLLiWhuwDxKZX5g2rWP1bTPkBSwCYv96VDXVT1bD9aPj8tppr5ITIh8hBng==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz", + "integrity": "sha512-kHPSIJc9v24zEml5geKg9Mjx5ULpfncj0wRpYtxbvKyTtHCYDkVE3aHQ03FrpEo4gEe2vrJJS1Y9CJTaThA52g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz", + "integrity": "sha512-AfDTQmClklHCOLxtGoP7HkeMw56k1/bTQjwsfhL6pppo/M4TOBSq+jjBUBLmV/4oeFg4GWMavIl44ZeCtmmZTw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.7.tgz", + "integrity": "sha512-VtR8hDy7YLB7+Pet9IarXjg/zgCMSF+1mNS/EQEiEaUPoFXCVsHG64SIxcaaI2zJgRiv+YmgaQESUfWAdbjzgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.7.tgz", + "integrity": "sha512-iLD3UNkgx2n/HrjBesVbYX6j0yqn/sJktvbtKKgcaLIQ4bTTQ8obAypc1VpyHPD2y4Phh9zHOaAt8e/L14wCpw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz", + "integrity": "sha512-U3ap1gm5+4edc2Q/P+9VrBNhGkfnf+8ZqppY71Bo/pzZmXhhLdqgaUl6cuB07O1+AQJtCLfaOmswiNbSQ9ivhw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz", + "integrity": "sha512-uH2O4OV5M9FZYQrwc7NdVmMxQJOCCzFeYudlZSzUAHRFeOujQefa92E74TQDVskNHCzOXoigEuoyzHDhaEaK5w==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz", + "integrity": "sha512-hlQ96MBZSAXUq7ltkjtu3FJCCSMx/j629ns3hA3pXnBXjanNP0LHi+JpPeA81zaWgVK1VGH95Xuy7u0RyQ8kMg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.7.tgz", + "integrity": "sha512-2G8aAvF4wy1w/AGZkemprdGMRg5o6zPNhbHVImRz3lss55TYCBd6xStN19rt8XJHq20sqV0JbyWjOWwQRwV/wg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.7.tgz", + "integrity": "sha512-1YZNsc+y6cTvWlDHidMBsQZrZfEFjRIo/BZCT906PMdzOyXtSLTgqGdrpcuTDCXyd11Am5uQULtDIcCfnTc8fQ==", + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.7", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.7", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.7", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.7", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.7", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.7", + "@babel/plugin-transform-async-generator-functions": "^7.24.7", + "@babel/plugin-transform-async-to-generator": "^7.24.7", + "@babel/plugin-transform-block-scoped-functions": "^7.24.7", + "@babel/plugin-transform-block-scoping": "^7.24.7", + "@babel/plugin-transform-class-properties": "^7.24.7", + "@babel/plugin-transform-class-static-block": "^7.24.7", + "@babel/plugin-transform-classes": "^7.24.7", + "@babel/plugin-transform-computed-properties": "^7.24.7", + "@babel/plugin-transform-destructuring": "^7.24.7", + "@babel/plugin-transform-dotall-regex": "^7.24.7", + "@babel/plugin-transform-duplicate-keys": "^7.24.7", + "@babel/plugin-transform-dynamic-import": "^7.24.7", + "@babel/plugin-transform-exponentiation-operator": "^7.24.7", + "@babel/plugin-transform-export-namespace-from": "^7.24.7", + "@babel/plugin-transform-for-of": "^7.24.7", + "@babel/plugin-transform-function-name": "^7.24.7", + "@babel/plugin-transform-json-strings": "^7.24.7", + "@babel/plugin-transform-literals": "^7.24.7", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.7", + "@babel/plugin-transform-member-expression-literals": "^7.24.7", + "@babel/plugin-transform-modules-amd": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-modules-systemjs": "^7.24.7", + "@babel/plugin-transform-modules-umd": "^7.24.7", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.24.7", + "@babel/plugin-transform-new-target": "^7.24.7", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.7", + "@babel/plugin-transform-numeric-separator": "^7.24.7", + "@babel/plugin-transform-object-rest-spread": "^7.24.7", + "@babel/plugin-transform-object-super": "^7.24.7", + "@babel/plugin-transform-optional-catch-binding": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7", + "@babel/plugin-transform-parameters": "^7.24.7", + "@babel/plugin-transform-private-methods": "^7.24.7", + "@babel/plugin-transform-private-property-in-object": "^7.24.7", + "@babel/plugin-transform-property-literals": "^7.24.7", + "@babel/plugin-transform-regenerator": "^7.24.7", + "@babel/plugin-transform-reserved-words": "^7.24.7", + "@babel/plugin-transform-shorthand-properties": "^7.24.7", + "@babel/plugin-transform-spread": "^7.24.7", + "@babel/plugin-transform-sticky-regex": "^7.24.7", + "@babel/plugin-transform-template-literals": "^7.24.7", + "@babel/plugin-transform-typeof-symbol": "^7.24.7", + "@babel/plugin-transform-unicode-escapes": "^7.24.7", + "@babel/plugin-transform-unicode-property-regex": "^7.24.7", + "@babel/plugin-transform-unicode-regex": "^7.24.7", + "@babel/plugin-transform-unicode-sets-regex": "^7.24.7", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.4", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.24.7.tgz", + "integrity": "sha512-AAH4lEkpmzFWrGVlHaxJB7RLH21uPQ9+He+eFLWHmF9IuFQVugz8eAsamaW0DXRrTfco5zj1wWtpdcXJUOfsag==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-transform-react-display-name": "^7.24.7", + "@babel/plugin-transform-react-jsx": "^7.24.7", + "@babel/plugin-transform-react-jsx-development": "^7.24.7", + "@babel/plugin-transform-react-pure-annotations": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz", + "integrity": "sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", + "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.24.7.tgz", + "integrity": "sha512-eytSX6JLBY6PVAeQa2bFlDx/7Mmln/gaEpsit5a3WEvjGfiIytEsgAwuIXCPM0xvw0v0cJn3ilq0/TvXrW0kgA==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", + "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", + "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", + "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", + "dependencies": { + "@babel/helper-string-parser": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/css": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.1.tgz", + "integrity": "sha512-VtVb5DS+0hRIprU2CO6ZQjK2Zg4QU5HrDM1+ix6rT0umsYvFvatMAnf97NHZlVWDaaLlx7GRfR/7FikANiM2Fg==" + }, + "node_modules/@docsearch/react": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.1.tgz", + "integrity": "sha512-qXZkEPvybVhSXj0K7U3bXc233tk5e8PfhoZ6MhPOiik/qUQxYC+Dn9DnoS7CxHQQhHfCvTiN0eY9M12oRghEXw==", + "dependencies": { + "@algolia/autocomplete-core": "1.9.3", + "@algolia/autocomplete-preset-algolia": "1.9.3", + "@docsearch/css": "3.6.1", + "algoliasearch": "^4.19.1" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 19.0.0", + "react": ">= 16.8.0 < 19.0.0", + "react-dom": ">= 16.8.0 < 19.0.0", + "search-insights": ">= 1 < 3" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "search-insights": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.5.2.tgz", + "integrity": "sha512-4Z1WkhCSkX4KO0Fw5m/Vuc7Q3NxBG53NE5u59Rs96fWkMPZVSrzEPP16/Nk6cWb/shK7xXPndTmalJtw7twL/w==", + "dependencies": { + "@babel/core": "^7.23.3", + "@babel/generator": "^7.23.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.22.9", + "@babel/preset-env": "^7.22.9", + "@babel/preset-react": "^7.22.5", + "@babel/preset-typescript": "^7.22.5", + "@babel/runtime": "^7.22.6", + "@babel/runtime-corejs3": "^7.22.6", + "@babel/traverse": "^7.22.8", + "@docusaurus/cssnano-preset": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "autoprefixer": "^10.4.14", + "babel-loader": "^9.1.3", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.2", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.31.1", + "css-loader": "^6.8.1", + "css-minimizer-webpack-plugin": "^5.0.1", + "cssnano": "^6.1.2", + "del": "^6.1.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "eval": "^0.1.8", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "html-minifier-terser": "^7.2.0", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.5.3", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.7.6", + "p-map": "^4.0.0", + "postcss": "^8.4.26", + "postcss-loader": "^7.3.3", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "rtl-detect": "^1.0.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.5", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "url-loader": "^4.1.1", + "webpack": "^5.88.1", + "webpack-bundle-analyzer": "^4.9.0", + "webpack-dev-server": "^4.15.1", + "webpack-merge": "^5.9.0", + "webpackbar": "^5.0.2" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@mdx-js/react": "^3.0.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.2.tgz", + "integrity": "sha512-D3KiQXOMA8+O0tqORBrTOEQyQxNIfPm9jEaJoALjjSjc2M/ZAWcUfPQEnwr2JB2TadHw2gqWgpZckQmrVWkytA==", + "dependencies": { + "cssnano-preset-advanced": "^6.1.2", + "postcss": "^8.4.38", + "postcss-sort-media-queries": "^5.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.5.2.tgz", + "integrity": "sha512-LHC540SGkeLfyT3RHK3gAMK6aS5TRqOD4R72BEU/DE2M/TY8WwEUAMY576UUc/oNJXv8pGhBmQB6N9p3pt8LQw==", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.5.2.tgz", + "integrity": "sha512-ku3xO9vZdwpiMIVd8BzWV0DCqGEbCP5zs1iHfKX50vw6jX8vQo0ylYo1YJMZyz6e+JFJ17HYHT5FzVidz2IflA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@mdx-js/mdx": "^3.0.0", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^3.0.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "image-size": "^1.0.2", + "mdast-util-mdx": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "rehype-raw": "^7.0.0", + "remark-directive": "^3.0.0", + "remark-emoji": "^4.0.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0", + "url-loader": "^4.1.1", + "vfile": "^6.0.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.2.tgz", + "integrity": "sha512-Z+Xu3+2rvKef/YKTMxZHsEXp1y92ac0ngjDiExRdqGTmEKtCUpkbNYH8v5eXo5Ls+dnW88n6WTa+Q54kLOkwPg==", + "dependencies": { + "@docusaurus/types": "3.5.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.2.tgz", + "integrity": "sha512-R7ghWnMvjSf+aeNDH0K4fjyQnt5L0KzUEnUhmf1e3jZrv3wogeytZNN6n7X8yHcMsuZHPOrctQhXWnmxu+IRRg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "cheerio": "1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "reading-time": "^1.5.0", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^5.0.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.2.tgz", + "integrity": "sha512-Bt+OXn/CPtVqM3Di44vHjE7rPCEsRCB/DMo2qoOuozB9f7+lsdrHvD0QCHdBs0uhz6deYJDppAr2VgqybKPlVQ==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.2.tgz", + "integrity": "sha512-WzhHjNpoQAUz/ueO10cnundRz+VUtkjFhhaQ9jApyv1a46FPURO4cef89pyNIOMny1fjDz/NUN2z6Yi+5WUrCw==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.5.2.tgz", + "integrity": "sha512-kBK6GlN0itCkrmHuCS6aX1wmoWc5wpd5KJlqQ1FyrF0cLDnvsYSnh7+ftdwzt7G6lGBho8lrVwkkL9/iQvaSOA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "fs-extra": "^11.1.1", + "react-json-view-lite": "^1.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.2.tgz", + "integrity": "sha512-rjEkJH/tJ8OXRE9bwhV2mb/WP93V441rD6XnM6MIluu7rk8qg38iSxS43ga2V2Q/2ib53PcqbDEJDG/yWQRJhQ==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.2.tgz", + "integrity": "sha512-lm8XL3xLkTPHFKKjLjEEAHUrW0SZBSHBE1I+i/tmYMBsjCcUB5UJ52geS5PSiOCFVR74tbPGcPHEV/gaaxFeSA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.2.tgz", + "integrity": "sha512-QkpX68PMOMu10Mvgvr5CfZAzZQFx8WLlOiUQ/Qmmcl6mjGK6H21WLT5x7xDmcpCoKA/3CegsqIqBR+nA137lQg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.2.tgz", + "integrity": "sha512-DnlqYyRAdQ4NHY28TfHuVk414ft2uruP4QWCH//jzpHjqvKyXjj2fmDtI8RPUBh9K8iZKFMHRnLtzJKySPWvFA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.5.2.tgz", + "integrity": "sha512-3ihfXQ95aOHiLB5uCu+9PRy2gZCeSZoDcqpnDvf3B+sTrMvMTr8qRUzBvWkoIqc82yG5prCboRjk1SVILKx6sg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/plugin-debug": "3.5.2", + "@docusaurus/plugin-google-analytics": "3.5.2", + "@docusaurus/plugin-google-gtag": "3.5.2", + "@docusaurus/plugin-google-tag-manager": "3.5.2", + "@docusaurus/plugin-sitemap": "3.5.2", + "@docusaurus/theme-classic": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-search-algolia": "3.5.2", + "@docusaurus/types": "3.5.2" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.5.2.tgz", + "integrity": "sha512-XRpinSix3NBv95Rk7xeMF9k4safMkwnpSgThn0UNQNumKvmcIYjfkwfh2BhwYh/BxMXQHJ/PdmNh22TQFpIaYg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "copy-text-to-clipboard": "^3.2.0", + "infima": "0.2.0-alpha.44", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.4.26", + "prism-react-renderer": "^2.3.0", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.5.2.tgz", + "integrity": "sha512-QXqlm9S6x9Ibwjs7I2yEDgsCocp708DrCrgHgKwg2n2AY0YQ6IjU0gAK35lHRLOvAoJUfCKpQAwUykB0R7+Eew==", + "dependencies": { + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^2.0.0", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^2.3.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.2.tgz", + "integrity": "sha512-qW53kp3VzMnEqZGjakaV90sst3iN1o32PH+nawv1uepROO8aEGxptcq2R5rsv7aBShSRbZwIobdvSYKsZ5pqvA==", + "dependencies": { + "@docsearch/react": "^3.5.2", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "algoliasearch": "^4.18.0", + "algoliasearch-helper": "^3.13.3", + "clsx": "^2.0.0", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.5.2.tgz", + "integrity": "sha512-GPZLcu4aT1EmqSTmbdpVrDENGR2yObFEX8ssEFYTCiAIVc0EihNSdOIBTazUvgNqwvnoU1A8vIs1xyzc3LITTw==", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/types": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.5.2.tgz", + "integrity": "sha512-N6GntLXoLVUwkZw7zCxwy9QiuEXIcTVzA9AkmNw16oc0AP3SXLrMmDMMBIfgqwuKWa6Ox6epHol9kMtJqekACw==", + "dependencies": { + "@mdx-js/mdx": "^3.0.0", + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.5.2.tgz", + "integrity": "sha512-33QvcNFh+Gv+C2dP9Y9xWEzMgf3JzrpL2nW9PopidiohS1nDcyknKRx2DWaFvyVTTYIkkABVSr073VTj/NITNA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@svgr/webpack": "^8.1.0", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "jiti": "^1.20.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "prompts": "^2.4.2", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.5.2.tgz", + "integrity": "sha512-i0AZjHiRgJU6d7faQngIhuHKNrszpL/SHQPgF1zH4H+Ij6E9NBYGy6pkcGWToIv7IVPbs+pQLh1P3whn0gWXVg==", + "dependencies": { + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.5.2.tgz", + "integrity": "sha512-m+Foq7augzXqB6HufdS139PFxDC5d5q2QKZy8q0qYYvGdI6nnlNsGH4cIGsgBnV7smz+mopl3g4asbSDvMV0jA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "fs-extra": "^11.2.0", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", + "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==" + }, + "node_modules/@mdx-js/mdx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.0.1.tgz", + "integrity": "sha512-eIQ4QTrOWyL3LWEe/bu6Taqzq2HQvHcyTMaOrI95P2/LmJE7AsfPfgJGuFLPVqBUE1BC1rik3VIhU+s9u72arA==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-to-js": "^2.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-estree": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "periscopic": "^3.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.1.tgz", + "integrity": "sha512-9ZrPIU4MGf6et1m1ov3zKf+q9+deetI51zprKB1D/z3NOb+rUxxtEl3mCjW5wTGh6VhRdwPueh1oRzi6ezkA8A==", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz", + "integrity": "sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.25", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.25.tgz", + "integrity": "sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", + "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", + "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", + "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", + "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", + "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", + "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", + "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", + "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", + "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", + "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", + "@svgr/babel-plugin-transform-svg-component": "8.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", + "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "camelcase": "^6.2.0", + "cosmiconfig": "^8.1.3", + "snake-case": "^3.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", + "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", + "dependencies": { + "@babel/types": "^7.21.3", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", + "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "@svgr/hast-util-to-babel-ast": "8.0.0", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", + "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", + "dependencies": { + "cosmiconfig": "^8.1.3", + "deepmerge": "^4.3.1", + "svgo": "^3.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", + "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@babel/plugin-transform-react-constant-elements": "^7.21.3", + "@babel/preset-env": "^7.20.2", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.21.0", + "@svgr/core": "8.1.0", + "@svgr/plugin-jsx": "8.1.0", + "@svgr/plugin-svgo": "8.1.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/acorn": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", + "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.56.10", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz", + "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.5", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.5.tgz", + "integrity": "sha512-y6W03tvrACO72aijJ5uF02FRq5cgDR9lUxddQ8vyF+GvmjJQqbzDcJngEjURc+ZsG31VI3hODNZJ2URj86pzmg==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==" + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.14", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.14.tgz", + "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==" + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + }, + "node_modules/@types/node": { + "version": "20.14.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.7.tgz", + "integrity": "sha512-uTr2m2IbJJucF3KUxgnGOZvYbN0QgkGyWxG6973HCpMYFy2KfcgYuIwkJQMQkt1VbBMlvWRbpshFTLxnxCZjKQ==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-forge": { + "version": "1.3.11", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz", + "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" + }, + "node_modules/@types/prismjs": { + "version": "1.26.4", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.4.tgz", + "integrity": "sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" + }, + "node_modules/@types/qs": { + "version": "6.9.15", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.15.tgz", + "integrity": "sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==" + }, + "node_modules/@types/react": { + "version": "18.3.3", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", + "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" + }, + "node_modules/@types/ws": { + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz", + "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz", + "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz", + "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.12.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz", + "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-opt": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1", + "@webassemblyjs/wast-printer": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz", + "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz", + "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz", + "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz", + "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.0.tgz", + "integrity": "sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.3", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.3.tgz", + "integrity": "sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.16.0.tgz", + "integrity": "sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.24.0.tgz", + "integrity": "sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-account": "4.24.0", + "@algolia/client-analytics": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-personalization": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/recommend": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.22.5", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz", + "integrity": "sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.20", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", + "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.23.3", + "caniuse-lite": "^1.0.30001646", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/babel-loader": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", + "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", + "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.1", + "core-js-compat": "^3.36.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/body-parser": { + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.2.1.tgz", + "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001662", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001662.tgz", + "integrity": "sha512-sgMUVwLmGseH8ZIrm1d51UbrhqMCH3jvS7gF/M6byuHOnKyLOBL7W8yz5V02OHwgLGA36o/AFhWzzh4uc5aqTA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-text-to-clipboard": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.37.1.tgz", + "integrity": "sha512-Xn6qmxrQZyB0FFY8E3bgRXei3lWDJHhvI+u0q9TKIYM49G8pAr0FgnnrFRAmsbptZL1yxRADVXn+x5AGsbBfyw==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.1.tgz", + "integrity": "sha512-9TNiImhKvQqSUkOvk/mMRZzOANTiEVC7WaBNhHcKM7x+/5E1l5NvsysR19zuDQScE8k+kfQXWRN3AtS/eOSHpg==", + "dependencies": { + "browserslist": "^4.23.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.1.tgz", + "integrity": "sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-declaration-sorter": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", + "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", + "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.33", + "postcss-modules-extract-imports": "^3.1.0", + "postcss-modules-local-by-default": "^4.0.5", + "postcss-modules-scope": "^3.2.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "cssnano": "^6.0.1", + "jest-worker": "^29.4.3", + "postcss": "^8.4.24", + "schema-utils": "^4.0.1", + "serialize-javascript": "^6.0.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-select": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", + "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", + "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", + "dependencies": { + "cssnano-preset-default": "^6.1.2", + "lilconfig": "^3.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", + "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", + "dependencies": { + "autoprefixer": "^10.4.19", + "browserslist": "^4.23.0", + "cssnano-preset-default": "^6.1.2", + "postcss-discard-unused": "^6.0.5", + "postcss-merge-idents": "^6.0.3", + "postcss-reduce-idents": "^6.0.3", + "postcss-zindex": "^6.0.2" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-default": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", + "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", + "dependencies": { + "browserslist": "^4.23.0", + "css-declaration-sorter": "^7.2.0", + "cssnano-utils": "^4.0.2", + "postcss-calc": "^9.0.1", + "postcss-colormin": "^6.1.0", + "postcss-convert-values": "^6.1.0", + "postcss-discard-comments": "^6.0.2", + "postcss-discard-duplicates": "^6.0.3", + "postcss-discard-empty": "^6.0.3", + "postcss-discard-overridden": "^6.0.2", + "postcss-merge-longhand": "^6.0.5", + "postcss-merge-rules": "^6.1.1", + "postcss-minify-font-values": "^6.1.0", + "postcss-minify-gradients": "^6.0.3", + "postcss-minify-params": "^6.1.0", + "postcss-minify-selectors": "^6.0.4", + "postcss-normalize-charset": "^6.0.2", + "postcss-normalize-display-values": "^6.0.2", + "postcss-normalize-positions": "^6.0.2", + "postcss-normalize-repeat-style": "^6.0.2", + "postcss-normalize-string": "^6.0.2", + "postcss-normalize-timing-functions": "^6.0.2", + "postcss-normalize-unicode": "^6.1.0", + "postcss-normalize-url": "^6.0.2", + "postcss-normalize-whitespace": "^6.0.2", + "postcss-ordered-values": "^6.0.2", + "postcss-reduce-initial": "^6.1.0", + "postcss-reduce-transforms": "^6.0.2", + "postcss-svgo": "^6.0.3", + "postcss-unique-selectors": "^6.0.4" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-utils": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", + "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/csso": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", + "dependencies": { + "css-tree": "~2.2.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==" + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + }, + "node_modules/debug": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", + "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.26", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.26.tgz", + "integrity": "sha512-Z+OMe9M/V6Ep9n/52+b7lkvYEps26z4Yz3vjWL1V61W0q+VLF1pOHhMY17sa4roz4AWmULSI8E6SAojZA5L0YQ==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", + "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.3.tgz", + "integrity": "sha512-i1gCgmR9dCl6Vil6UKPI/trA69s08g/syhiDK9TG0Nf1RJjjFI+AzoWW7sPufzkgYAn861skuCwJa0pIIHYxvg==" + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz", + "integrity": "sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.2", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.6.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "dependencies": { + "punycode": "^1.3.2" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", + "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", + "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^8.0.0", + "property-information": "^6.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.4.tgz", + "integrity": "sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", + "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", + "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==" + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz", + "integrity": "sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", + "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.5.2.tgz", + "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", + "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", + "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", + "dependencies": { + "queue": "6.0.2" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=16.x" + } + }, + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.44", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.44.tgz", + "integrity": "sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", + "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.14.0.tgz", + "integrity": "sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.6", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", + "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.8.0.tgz", + "integrity": "sha512-vJranOAJrI/llyWGRQqiDM+adrw+k83fvmmx3+nV47g3+36xM15jE+zyZ6Ffel02+xSvuM0b2GDRosXZkbb6wA==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", + "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", + "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", + "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz", + "integrity": "sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", + "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-frontmatter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", + "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", + "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz", + "integrity": "sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz", + "integrity": "sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg==", + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", + "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-label": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", + "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz", + "integrity": "sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-space/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-title": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", + "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", + "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", + "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", + "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", + "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", + "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz", + "integrity": "sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz", + "integrity": "sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", + "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", + "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", + "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz", + "integrity": "sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromatch": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", + "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.0.tgz", + "integrity": "sha512-Zs1YsZVfemekSZG+44vBsYTLQORkPMwnlv+aehcxK/NLKC+EGhDB39/YePYYqx/sTk6NnYpuqikhSn7+JIevTA==", + "dependencies": { + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mrmime": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", + "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.1.3.tgz", + "integrity": "sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA==", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", + "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", + "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", + "dependencies": { + "domhandler": "^5.0.2", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/periscopic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", + "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^3.0.0", + "is-reference": "^3.0.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", + "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", + "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "colord": "^2.9.3", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-convert-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", + "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-comments": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", + "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", + "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-empty": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", + "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", + "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-unused": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", + "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", + "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", + "dependencies": { + "cosmiconfig": "^8.3.5", + "jiti": "^1.20.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-merge-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", + "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", + "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^6.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-rules": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", + "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^4.0.2", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", + "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", + "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", + "dependencies": { + "colord": "^2.9.3", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-params": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", + "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", + "dependencies": { + "browserslist": "^4.23.0", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", + "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", + "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.5.tgz", + "integrity": "sha512-6MieY7sIfTK0hYfafw1OMEG+2bg8Q1ocHCpoWLqOKj3JXlKu4G7btkmM/B7lFubYkYWmRSPLZi5chid63ZaZYw==", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.0.tgz", + "integrity": "sha512-oq+g1ssrsZOsx9M96c5w8laRmvEu9C3adDSjI8oTcbfkrTE8hx/zfyobUoWIxaKPO8bt6S62kxpw5GqypEw1QQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", + "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", + "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", + "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", + "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-string": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", + "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", + "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", + "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-url": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", + "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", + "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-ordered-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", + "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", + "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", + "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", + "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.0.tgz", + "integrity": "sha512-UMz42UD0UY0EApS0ZL9o1XnLhSTtvvvLe5Dc2H2O56fvRZi+KulDyf5ctDhhtYJBGKStV2FL1fy6253cmLgqVQ==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", + "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", + "dependencies": { + "sort-css-media-queries": "2.2.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.23" + } + }, + "node_modules/postcss-svgo": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", + "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^3.2.0" + }, + "engines": { + "node": "^14 || ^16 || >= 18" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", + "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/postcss-zindex": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", + "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", + "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", + "dependencies": { + "@types/prismjs": "^1.26.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.0.0" + } + }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "node_modules/pupa": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", + "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/react-dev-utils/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/loader-utils": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.3.1.tgz", + "integrity": "sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/react-dev-utils/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/react-dev-utils/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-json-view-lite": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz", + "integrity": "sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.13.1 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", + "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", + "dependencies": { + "@types/react": "*" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reading-time": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", + "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" + }, + "node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "dependencies": { + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", + "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.0.tgz", + "integrity": "sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", + "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", + "dependencies": { + "@types/mdast": "^4.0.2", + "emoticon": "^4.0.1", + "mdast-util-find-and-replace": "^3.0.1", + "node-emoji": "^2.1.0", + "unified": "^11.0.4" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-frontmatter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", + "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", + "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.0.1.tgz", + "integrity": "sha512-3Pz3yPQ5Rht2pM5R+0J2MrGoBSrzf+tJG94N+t/ilfdh8YLyyKYtidAYwTveB20BoHAcwIopOUqhcmh2F7hGYA==", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.1.tgz", + "integrity": "sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rtl-detect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz", + "integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==" + }, + "node_modules/rtlcss": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sax": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/search-insights": { + "version": "2.17.2", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.2.tgz", + "integrity": "sha512-zFNpOpUO+tY2D85KrxJ+aqwnIfdEGi06UH2+xEb+Bp9Mwznmauqc9djbnBibJO5mpfUPPa8st6Sx65+vbeO45g==", + "peer": true + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "node_modules/selfsigned": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", + "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", + "dependencies": { + "@types/node-forge": "^1.3.0", + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/sitemap": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/snake-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", + "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", + "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-object": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", + "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/stylehacks": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", + "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "node_modules/svgo": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", + "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^5.1.0", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.0.0" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.31.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.31.1.tgz", + "integrity": "sha512-37upzU1+viGvuFtBo9NPufCb9dwM0+l9hMxYyWfBA+fbwrPqNJAhbZ6W47bBFnZHKHTUBnMvi87434qq+qnxOg==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.20", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.26.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.2.tgz", + "integrity": "sha512-NcRtPEOsPFFWjobJEtfihkLCZCXZt/os3zf8nTxjVH3RvTSxjrCamJpbExGvYOF+tFHc3pA65qpdwPbzjohhew==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "node_modules/utility-types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", + "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/watchpack": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.1.tgz", + "integrity": "sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.92.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.92.1.tgz", + "integrity": "sha512-JECQ7IwJb+7fgUFBlrJzbyu3GEuNBcdqr1LD7IbSzwkSmIevTm8PF+wej3Oxuz/JFBUZ6O1o43zsPkwm1C4TmA==", + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.5", + "@webassemblyjs/ast": "^1.12.1", + "@webassemblyjs/wasm-edit": "^1.12.1", + "@webassemblyjs/wasm-parser": "^1.12.1", + "acorn": "^8.7.1", + "acorn-import-attributes": "^1.9.5", + "browserslist": "^4.21.10", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.10", + "watchpack": "^2.4.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.2", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.4", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/webpack/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/docs_v2/package.json b/docs_v2/package.json new file mode 100644 index 00000000..64b6fe3b --- /dev/null +++ b/docs_v2/package.json @@ -0,0 +1,44 @@ +{ + "name": "langchain-dart", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids" + }, + "dependencies": { + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/types": "^3.5.2" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 3 chrome version", + "last 3 firefox version", + "last 5 safari version" + ] + }, + "engines": { + "node": ">=18.0" + } +} diff --git a/docs_v2/sidebars.js b/docs_v2/sidebars.js new file mode 100644 index 00000000..72e4f826 --- /dev/null +++ b/docs_v2/sidebars.js @@ -0,0 +1,30 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + integrations: [{type: 'autogenerated', dirName: '.'}] + + // tutorialSidebar: [ + // 'intro', + // 'tutorials/index', + // 'how_to/index', + // 'concepts', + // 'integrations/index', + // ], + +}; + +export default sidebars; \ No newline at end of file diff --git a/docs_v2/src/components/HomepageFeatures/index.js b/docs_v2/src/components/HomepageFeatures/index.js new file mode 100644 index 00000000..acc76219 --- /dev/null +++ b/docs_v2/src/components/HomepageFeatures/index.js @@ -0,0 +1,64 @@ +import clsx from 'clsx'; +import Heading from '@theme/Heading'; +import styles from './styles.module.css'; + +const FeatureList = [ + { + title: 'Easy to Use', + Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default, + description: ( + <> + Docusaurus was designed from the ground up to be easily installed and + used to get your website up and running quickly. + + ), + }, + { + title: 'Focus on What Matters', + Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default, + description: ( + <> + Docusaurus lets you focus on your docs, and we'll do the chores. Go + ahead and move your docs into the docs directory. + + ), + }, + { + title: 'Powered by React', + Svg: require('@site/static/img/undraw_docusaurus_react.svg').default, + description: ( + <> + Extend or customize your website layout by reusing React. Docusaurus can + be extended while reusing the same header and footer. + + ), + }, +]; + +function Feature({Svg, title, description}) { + return ( +
    +
    + +
    +
    + {title} +

    {description}

    +
    +
    + ); +} + +export default function HomepageFeatures() { + return ( +
    +
    +
    + {FeatureList.map((props, idx) => ( + + ))} +
    +
    +
    + ); +} diff --git a/docs_v2/src/components/HomepageFeatures/styles.module.css b/docs_v2/src/components/HomepageFeatures/styles.module.css new file mode 100644 index 00000000..b248eb2e --- /dev/null +++ b/docs_v2/src/components/HomepageFeatures/styles.module.css @@ -0,0 +1,11 @@ +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; +} + +.featureSvg { + height: 200px; + width: 200px; +} diff --git a/docs_v2/src/css/custom.css b/docs_v2/src/css/custom.css new file mode 100644 index 00000000..2bc6a4cf --- /dev/null +++ b/docs_v2/src/css/custom.css @@ -0,0 +1,30 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #2e8555; + --ifm-color-primary-dark: #29784c; + --ifm-color-primary-darker: #277148; + --ifm-color-primary-darkest: #205d3b; + --ifm-color-primary-light: #33925d; + --ifm-color-primary-lighter: #359962; + --ifm-color-primary-lightest: #3cad6e; + --ifm-code-font-size: 95%; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); +} + +/* For readability concerns, you should choose a lighter palette in dark mode. */ +[data-theme='dark'] { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: #21af90; + --ifm-color-primary-darker: #1fa588; + --ifm-color-primary-darkest: #1a8870; + --ifm-color-primary-light: #29d5b0; + --ifm-color-primary-lighter: #32d8b4; + --ifm-color-primary-lightest: #4fddbf; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} diff --git a/docs_v2/src/pages/index.js b/docs_v2/src/pages/index.js new file mode 100644 index 00000000..176f838c --- /dev/null +++ b/docs_v2/src/pages/index.js @@ -0,0 +1,7 @@ +import { Redirect } from "@docusaurus/router"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import React from "react"; + +export default function Home() { + return ; +} diff --git a/docs_v2/src/pages/index.module.css b/docs_v2/src/pages/index.module.css new file mode 100644 index 00000000..9f71a5da --- /dev/null +++ b/docs_v2/src/pages/index.module.css @@ -0,0 +1,23 @@ +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} diff --git a/docs_v2/src/pages/markdown-page.md b/docs_v2/src/pages/markdown-page.md new file mode 100644 index 00000000..9756c5b6 --- /dev/null +++ b/docs_v2/src/pages/markdown-page.md @@ -0,0 +1,7 @@ +--- +title: Markdown page example +--- + +# Markdown page example + +You don't need React to write simple standalone pages. diff --git a/docs_v2/static/.nojekyll b/docs_v2/static/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/docs_v2/static/img/favicon.ico b/docs_v2/static/img/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..4c29611109064a93f09866b3b89d4486b8db1866 GIT binary patch literal 15406 zcmeI3d3e;-)yHr4%w#f|eaTFcNytV*LVyrR0wjbiC`%+H>>w1apj25D5DN$jvTuU8 zR03E*!Y*Mei(nz31s4!2T5D}BtwP_oeeD0A_xytK9WZ4nA@ON5&vT#q+s^NP&-tBu z?!D)GN2vf6q)a9ywNH%;Rw_p+!S3y9?d88>7l4!yo}oYHmddvA*Gc!mDR8tQ|-7{3?*U5JZ$f5#`OLU zjEkodt_!~UV0P5_Gi$UjW?AwMj3H?T)3w9Bk<&M0)M*!(}D-k z84Wj;)o>>y`Y50N6xns3;yHT+vgvztH~P-dOD3>OV+D3f?vg>8KyOwDBx0y&c-vrrnE>SanTX5}WlV0}&1rhUlA*5!cB2jsHHMkK&RN z%6i{RPEHq!i%aNz&%GqcSlYcuA*pTCNK8sX!}a|;OnQv{Ge}tdAQ@X`W6nzW|9gJ# z{{H!!Tr(PtL`3M(iB4%b?J>kz2{-6)56HsSD+PUo-?z|*W2lg}U!F*Z!^?=SD!8Mz z{W0o{dKvpW5k0CC=FUERbNl0-&;wn9nUFLaA)Va>7G&u%jon9xuW9YBm${y$DjnzaV(d!?6Fj+0+MFPBu(hZK`%z0zpq9UfdiFSh zLl=nNe*Sn9-}d*fMvujxE&Y;kP40ntX%%X6U(~D_sMXb|b)s|4S=81Gs5j1`UOi1{ zY*Lfj-H>1rAJ4(NbRZ!!$_ZTfIO_FRQ2Y0y4#~HE>(te9_-%p*k7&}iHG7!5#}YHU z7|V*`xK}=cd+Q?1M>Y~tw~LU|wS=DDgYNV$!dJ~DTw+9{HoSvxV6RHU`}|-$)sN9` z|3ccHcoEhZ7}D>~+%m!)g?m;Rkvpdm|ItS5e}0q5%LfU#ypMpd_n<%X5{Awx zO=@#3#y8l|To8|Y$v}el%_8KpO{mNJ36)~{Y6q@uvx!J>fB$TQ%$?TpZ6vNI#L=m1 zM`EAoi)+IZ1RRzaQ@0XzY88P;=My=1C^{Ki@3gJP2=n$C zM0H8LD|$Czb2{ym53C|*<56M<3?cvg7ZiQ{Eg7Gl$2mjRZ+`yKAk);fJ8dEvV?Uw& z?3Ltx_$7sx{!YrK*U>v<-f;)9`lWIppT$(V=4}a&z-2>QzcshmQE-<4HK#(lYw=?I$_kKJ+Lkq-RktIqxr+ zi|HVkolYkineFRy!O_vtSZ%^n3b(4sUxO)9*7bQ_;V?^ym%MP6sO~9zF;pKZ= z^XwgB_kUNEaELaW?K?c-#Kgvs(=AW(;zr3E3q~!!)*+VI%503u&a1iP>(`jNFv7B& zSf`W_|KbEPYv+-*XFkr-?7N2BUkCr=s5l$Z12VB!wv)WR31gBI&!j@UQ+nd4lH7Bo z^b^gVS~2Pfvl|FawG#Ey5}XsC!8W{_jG85Q2KT%xe&dhz`eU8sv#dQ5@Q&+B%=lca zc|M#&vuX2O1+jDb;e0R`U50~@E?OQvhTz`u1olZFuxtP!$u$HTjuMu#4O@?n(s!@( zuO{@F>v1=nYx#d?pENvUJL4Xffpb7x;%4@t&9WhQp6-k3fzAX}rJzP-5HPYcA(dqW zbYFmqJC3qRG3+Efy0F=OrUAcR_5nCc6LD6gU>}r5^dsF!SWt;~?tPdfpB_5A9jdCm z=x`qa{imZ!wxPO9j<<{C)jJ*`Fm5JcdP~dUH%m+{YlCHYCXR;%`#kZRMOF1YMBvCw z;US8pOqRVBlB*jo`RU37sEQ+~{-T5E65{D8*zaDfX|Qu+way%YQF7Uq(H(J2>Pf=< z3StDi#$yDG=z4zFO4K;B!YO3%sKi?_5h}e79NvVYwnIfFfsP%+e zVp;^d&JuyCI3BCm-6i~uV~O~S@MeJ%dx@RJ=1)A0n)9OYINMPxq>nydhg$p5RZizQ zvHdg02+k>Q5$sy7)7aI6eN?9KUIn-o3#a#-a9)oI|1@Qs@J$PacX}1|LJexYVBaJh z)F$D9-ndxL1*we}2$-~nFsmOo+l>CB_eNnGmX7<;T=Dh$FfJKHz~kaCPYL#A^HJ5+ zf^`pS`(e}`vHQUfPzOGgBKF^NM(zuj*H!xL_2Y3f>0ru>!96}(_@`1_tA`W(WM4vO zk0o%;9I>ahoz4mIVv2O?5R2~K7U+%{*LCe&Lbq|IK=N`=a%7m>3-}h$B_KX86@qRPn$!li94|#=ewIRpWTM}z3o_}*xuWU z^XwLEo0kxgnbI`BR?y+> zI?^s|B;m8oIKQYNN_a@aA9qUGQGe|C_%*C!?zuYmY^FZisD3ifG#3hYII9%PhVi)f zKTG-t)g=FJ3o*i(Mqb)Y&|ls}35ThbfWPcUf4&;igz^@dS6;7!wqMgWupRD&12Alz zf}?H~Ucqj-AmfG6Quw844&T{BJ`BZagHxW{b~-nol|kF8G|9w zb&bz8ysx=yZ>*Qz{mK|NdMK6S$C3TY0ow09Psih*(dnHp$$J0y#GE@p@W~emshuff z!FbFi89xf_nynndxA*Q_$^+xZP_=kD`GU9b&;Lu&&ibVl9K$lkT&DLZkcWYOj)lgN4Jb9#RDce?%Y8&Xdn#eTnxPsWHJ zjqmvDVzgzEQgni>N57!+8+GKr`w2b&`YpZw_AOn`e=50#hh=>F@%W8D_WEO;4ogZm zk|)1J&el`p*PSExh_%a{4-aeg_X|aLu8I2A$DQu`*__)h_xq*N%RI*07Nv)2$Cs<&$Azf79^;qe)W?-SFZVkyh^nWJ ztm|@tr@fM!y3a3%NzdDnPfNbfhjlOTj+d*Y`w@?qyUok1o4fmmGsMNg@!8Pcc=A}b zajg?qrc_^F9}EW5)YJq5fhwvhfk8oG;Sp`^9kulhsujkL1O9QVnf2>!Ia_~&hGHwv zn~on(7w%SiUe$Ph%9t!LZ*ZMFnvEJt-9T&~U7yI7=zeeZ3ZGAQX$utlqq=)}(6=*u z`*6qeTc%^PFMp=2e5p>S${K!q?%N$%a#DTua4nD{zj(T0&}`Rw+_SQ_HZZkV-#t1v zzi@g6f4IEcJ2=|h-pwd&v5PMI&pS0d{qOn3<=k}PM1$|jh{D8jhj&JsXX#R8{l=e$ z!LdrI>w~XZJ$wHqPMbDghDS#)CRZkHH78U)EIW$yEW_6>o(`{GCXZg4=Fig8Gwu!% z!y+_!(eJBsIqK_JOS@ercP>lI$}GRAstbVa9i5I!yBZ?ISy-6)d3e3GaPso=+>DLE zqGD3g(#Z;MO21$mDd1saV#~|Rm$VCp{ua2ryfXOqt#k&G(7<+fqf<9GxoDqdRXCvX29O)$w=B_jYWSte~oIw1i3?JqvuR_#C&(g$#B{E-rCD} z+e@)lw|^_#IA=*DbTohdWw~kp;_PL4>}9TeD_5%NZ1sZ0t8cUC=w%3g1VA=mm6eiE zcSky2nefbMuMFNmW;Y59E2PaS4*%@*qE%0;wEDp2Psab6KlFGnOA705dQ9 z|AgVF5DdUEP1OJXP$#tkfcL}-e?;#L-x5IqXaRn? zxScIbYy6XJ%8f&3@O==#uUzp_<=V@?`oNQ0Fy?j9a81vDbZb1bcF!$txmUNhTmF}v z?@q^_NoezmL~f|E2l}#C)cWLv49_&aK1RCd%AZxb5=a#svKKy8lHfALe#_ z#fiIJLgHz9?@)|i!E^$IDei>Odolq0@N}#>ld$b}jz++DvzR&I0+s@-zKFOw31174ksKBpGlunXaVyqL=x z#>&rBiaoBZk`)_Zbx$iXZI>iYWB}ML@Q=<0%*=@LaOJ=CbjcQ3D_p{g={Mn`1?fw3 z%e3-Cf%#Sj?!OjLIJzJ~f> zZda3!p1>7_&Xdxl1OTAr6fdWC11+W^B;6bmIr6ZGs@Tj{T5>TQ!B zx%Et08eM@?TD0-!(m?k;dLhwBfaYY1)$}hlm_S`K-sIlcqXG7FzPdy%_Jt80Sb6Kd zMn16}%R{WI4mwHtnJyF#t-)#x<9mmEf#}kyV zT2nzu_&uyd%YP+YYlddbT6{oBRBhP& zqZ~-6Olw*~z9@}YBl@^{qMFJ&b~}BtcDlB5n2fCDb{n6xE7CWnOjHMGB;?oe2~2$R)v(#@VOK1~fKVUmbr?X&nB%kNJFMH2OVL6es&Vq&K#y zWwG1Ba98~}TavpR1S1PCPSpy{^1+G7G5?R~ zN-aKk3)UOr;yIo!&ZZ&NH3$j&V%M7pp^$yG{KTssOay8Zuu^TRJnbHHP|nq2;S6{l4)^dG!NNNVHg z$1f|+O3&UtRX5Kh^N`K!Tvs-q6fadltDeqyHm=#GPnzB_S_dFNdtS{}dzv_kWv-BT zjIzh)A@4q`$!Hz(CSmE>?o4kp{p!FaS`P9>!wp8xO|B$7WWZ%OZYpbeIq9{;v@>JY zFRO(ReL>vC#ba-;U+#uhhXS{X@ye@$tNrm+(`l{5DP^z9eS4JM`cC9z|$lSy1LpWkj#t`9JKv4OuHC`H@t2P?E`P-CkK{M9MK0 zz;6qEvKOa#E?N1#M@ma+38{Q!&D$o&few@XM6dnfFPYIL^&YSi&IiG)8<`?< zs@frkZ~72!H&g(;TrT|U^7B0yv+`!Zy$oN(!yX7r{WRuTbKFO1nZ#Som>03)m4q8S zrB01}_c5-W2+-cf4y~C{%546r!nd&+fA%o$>iZQzB{t_qf8(T4cn2!om6|2a&7zi;`aG1K_(UbKY=3Gi6nqrj(H z9Wl%B(zm*h-%wEnnJ0ZZk!%4h(ewR#Y4P#BOB3U9GC*f@9o=f3{#daj`33FWV5Bh@ zpy>#usKYwBHLIOT!1y_GOkDSR**(n8_obS$VmhcC5=W;*ZJ+s)3OUM_IlZpqH|oaA zG@pOrHGMXh@;lkG8MO%n#&tV?+}e&#MJt~6Jh z4kmqb&Vyaozm-*LJU{nQQ+r#D(L?lsY;2RJ+?-r@$>4gwsShXWdBC-`M7no~KEoW` z7SgP@_46~nSFd}=1RJ(kg|q?ZO=Vwe*4UoU54;FviMK$zXsv3Un@CET3-1QKa{fAu z@;NRiy8py_m2I&>AMQOHhDQ>)v=*A)lN%cvZP{d5w=FH5 zw5eS1u?VF#R)pd=AKdc`O)PEe7!PNP;%(fZ_&)s?ewWtD!85n1^yu0!Z8MBpdA7Eu zT?s~J*L|#U3n>-v9us+(y8|E9J>!cWU*Yrq#}4}bFkh>Bj5i>0j1jRt%^!U(dwbOp zqSUJ`G1uy^QhUXDfZ_3Y?JZv8i~dn8YfZZLCNCFQUTNFz-g{|b3kmMtGTc_dy-(A5 zp1t}0oZa}8y+r0hgc5kXv^J^E_q#725Mh%1rzgyL_;Fon?GnKl{o^sMUod**ZZnMf z3(Ie&Yc-lHp|QtEhcTW*v4+0KQxNZn_YuVJg)^+@?iBa=QA-P&!8uQBe|A9r5HC4d zFL5fG7e}f+h4Xl-QuNO9E@be+j$*lUd<{L6xH3s}dV!%eWqb}TDJyr~qI2TibNGKW ztVQbVQek3z*4=GMZUC>8Z7NtMUQJ@V<;Rtzz;W?R9GKZfKU@Kl=UVS zVCgWRaE^YN5_jR?d&seD6}R19HFWYTjS;BmyChI8e^D8jS41U#$C>Jx8YnPz5(bSD zru7Fb_Qd}{QXhFHx{NSGUIT<(pVT*e z)nV4;b-XIEe;ju&kK^ilX2n>Wu7oRt1GL%c#HWs!JDM@gdTSw<0isR+Mf;rN#mL7L zZu@|p3;`YuNT zD$KMFFOcl8)~3=4VeWzhHq$1=;pMfU6fy?~i#z9JAKjwWCcUpB!&-ySC&RrQ$(1$8 zS5?PN-A;q?Wd_nQH~C^(>l)Xw2BA>C+U@Di-}}v79*2oKZ1inmJnsctyeVR9m&GHU zat$=8eWru^Sbpsol3(JtIADBzSpHzgTsimWGwI6E<2Zz{5)U>GtE`5`eX!2_ms7<- zE<>I^&ecw*rzA^Ky3n@f0Wf)@BfrKsr##eq&A6Ut5QQN;nktj&;t^18Do?#!TkvFlNm)63hACggQw0WuT*+9ih@kT@HqAX92v%%-&Z_GvBAPSvP6lM zP~xXKyki6zm{9y*neGw^SPRu%eJtziPVT7Wc4EX z1?^RxFbWk5u;Rl?A021c*ASUi5v4HE+6J;g9M?c^7`>2yDok9a?lNwXOu-}nae*yA z_=G&%_2YYi0Yp%MIW5$MA96(lFE7@k7@#MQp97{7l3j zc)rb9Nr6nzSILoT%#uOET@NIp*_aB<*i<$Br^fQ3SU?8L<=5(lg%jcyOM~fm<^*XA zZ`ooRzKA)-@iZyCzX`eN8N&V{gK)|Ld@uOCGA91Ux|G7*S@q1X@o@3GlrjM5|LGi+ zWm51LDPA`+cvGd-iAFBh@@rekCzZ}zA?ZyT`%jkXBe?V%{951Uwwt=EfLhX*>_>(* z7~~*}J;bfRi(GFC#^17@l~fjblj?M-~;ni)_(`EF#G&4F854%T!`oj$3$U}W29aPNw;Liqz2FXu4`DxZ z`wb`G*UG6$4_MU^uY-C1J8(pT?Yg@pivBZlyLCx{>?o)NtAIZMJL1>FJS<7)gn%>; zBx-pMOB;eftR~G5#rxGm0xu8GkKnm3qgc+Hx;%eQSy}{_BZWzGXdfD`H-YL4Cy>Uh z!51Y@wZ~T2HK*Y{j~+R!S-p|w;C)~2Z+B-S)*Re<4Nev+W5~zWd!+2}9yRD_@l15hGcWJWqT#G>WZ2cV4CJ2=F1(9T$ zZWM`UxaY}`a@i|>Sn#!+8q^bF=!%c&sY z(+M2~Z%i(aI<~nn{Q1uRZ`Q^xy5Y!`A>puZQk6GLQp!aLSPnb&a#=PQ2bl z(&jw5blAB3%lU$+eN_-s1s2dD0U3GHYKnA-j|gMyF{7_!hl9RIb16Aqnzt4WJ%$3M zLfZd*=-Oso09<}$uPf%HC3X8w*HN<;@(rVmGo}U(rGnX#9Yl^)j2sllZI-s{$iBB3 z4F1{dB!SDJe{=P#e$rvrkGG~-v0jRrm=OIQ2jUP|tMv4ctDPV8<8Ub#$@1lDQZ^uF zBR00@3I+Pe%Dozn`h>);nB~ELx}Z}jf8n`&9)+ZPK?V-}C!Rlk(Y2 z@-3uU2Oqlhj&=91X$K%q^9@Mon0*e;tfO_cfQJiK{+fUuvptU^2ppkq?_whT&n%+Y zy!SU$1tkyR5HcO%{?1QV0*p-h=M}w9B~s=`k6MeVJRse;Mu&jsP-}Zxpv*dyug&2bLwCilFC{y_on+Se9+Za8bY8#9}+JOS%y2ipssxb zk%WFY3OHK`D~i1ypn4PYH%vy!j?~Ej67|ZQg$M8thEtVB;&8W#x`v&TEOtK~Ur^G$ z%zuCZM0OU=zwO9?yUtr%n2a@rW9K?pT-71cy#p{w#gc076)m&ckt-+x`Nc3mbB7J) zk;HeD)=-LzU9aG}9p&hYte(0R!1J|8jbsXa8GA_2G6i*g4VTfJ>~sIoJ+Js~X%E06 zbu-Fs3Fic+c1Pr3_a_A6gH$-O%{4a>Rz-hknBGL!PO5g?AcK)HespR><;Y)lSat6b zZobF?y+KLXt#=+f0d~oG$iTZrB>HiUH95b8t%>H8Mj2TJ!drvbQ^;Q@u-r$MPxe#; zMpQU6?I0BWjbQ|4p?AdG@HggUBCuRw3h`rF(@i`e?%!{*qhm&b?HgQB#h_(a0;Bu8 zo)^kobm&qDX_O8#=A2v!DisVl=gt24D@xAsD%In>-ZDr73}~@$%-l^x!NOOF(2}GB z0AUB6J==5w4HAS$wj;O>uKVXHY~Q&vsFvn7dJ%1>1PP05-V+7NDJ}nP&pRn6 zNXC`*jrKR>{hqIiyKh6 z7XBnn)aq75nX~8Ji|jSg;&1Zb|6NJX^NcSb(y4D>CWZw~*k8uFSv@TtH#E>w!w(<1P$yc5CB#HKD{ ztf|4CAsn=s*q)TSD{PsZw|MC=XS>KZ|9t3n=xjq>8nlZWV%lY%QQkls_vbG_5#}!~ z9mb_UOgakQ@6erVpQ?8Vl!T`3-PF9PPy-Z1xmSM#Z6S-aR8Bv*#MtErb!)6;$2CfC z(KT9hM~jPPz})XRH4+Z<1nCRm)g1$^9{T=Zcl{x>9Qx67$t2D0b{u&J6_0jkl zGdm`5-u?Dy#{7j0CaCozVLc58C^Q7mQQxC``m}r)v~BrOMjw9S9k(vRyJ+4{FxJ}4 zJNQkUEJ_(*&w+g$AXSmQT5(yz8Z^B@o?4^}R$ji>o16=|e0L=2^XWU?zb7iD^NEKh za7hNCY6yc#223uR^F7MNSXQ^$k9A5cw3OtQ->PDh*N$&s zibb??bI2tJYI}5Fo!O744?(5at78n3;qdih<53xU`ow#^C!F>;B3_QDRzUlQ)?ebt z?GoO6`4U_RLr0AtROVR`K_>2n;omIP_%dJtqdhy*1aYrtyq;Ej9}mBEH)NOXU-rwR z(iiS|>GgJ``_6r)3U3kkN+3r2z&(>sDolgG7tr3*ZR)>c?#3mry#Zf(87}rT?~ClMf_RP!YL-;6|q>CBQ9e zFabM@#Wj^Y+|;FN0m`h%Vp@^gnOhTek=uO&kXYs#-(h1IF|5b-$}}R$Dx=V^T>9Nr ztb~0D8GaQM{c2b0+S&x zS-HAE_w_dglD;w=fH4rfQk+L=G%#x*48+Z{2rXrPwUiic4~dV;J^(Kg51cTIP(@;I zkqp?T;F%xnxc3q1XF>xZb%ULo{=3|@jQ~5dFKTFOEZ!zHj*OVCXixOGSxQjN26^vp zuFBF`I~X_c6=IxL7coNZ;PsAxVYT-dVyxZGNdOly$73(i0S$}@7N z&I;)aBdkosmz$`)43Gr3r@PeGZ656uw&t846rd+!l}OSFB}rJ+ETF_qgcdRocWjzN z#|qPT-1F45AI~tk*citkezld}SuLWLC0;TIiA5i-m_u5M-$p5kt&xd6pt88V$7N{q zE~<5)<)*c3F!7&MGmP!t)Ma?VG1h@9^3Qvq;!K0-1ZWWE{xR36M1`^s2KCOQ!O|SvGp`X;|aiHz8Y7UTGRwNNNM^Nb%b0~Be#5Cv9H9Q zY;pwi3j^R3Mqr?snBb8FSAI`eui9`CC?s=-0F;@s86nX%b6 zD6QqhF|rTl+)-x*aoika?&umS4RekyVQ9b2$pn1LSr@6t_)WI*Y{W~J3AmLq6_l21 zGE*P@IO4xzN@#2@6|~gb`RoNjTdREL4Fz$7C2hbI62+g+7t!gZ@_dZP(tDyaD*4Rz zGi)+jua*C_^!(-^Gjiz=P!4Qt=ZwC37YaiDX&SYe0Eb9T6n;uv3N%1?h~{|I_#9ip5*wI)-R1sNU3KNt#H&tHjRzfqU`Tk1w;7ksxH4#o(gT<>(R${N>2GGcCnj5)uK-R*M#?~H>&#ybQ%{JJkk zuz`-&w6BgW|Ki!s0jDOuU8b4m6eYQT@?u!Z2Z5=js3ZLPZp1vfV0VGk3?#42c+Rt#rno`(*Qk?0{OF1wBuyqmI37 z5se0|1y_fe2Nbq=g+sHm<`vvkedqhu8>ci0_5cA_?x20OSJ9b3TYEL-K-C=b)SFq% zj-Pez?t4by1Sg`ODBG-v*Z`R?1t^ArqeOjepP_7FFzpaq$MjhO0K3Re_b0}w3I=Jd zJ5>56((BC}V-ck2mzxOuhl{&jv;{wU**RvYQbd&jh*_YHtwx#BmeTy?hX;Z!em=~* zIvEL(Qf}UfRbh!vzvm)$&qSokIVwq)*}eoI#)2OUS*7K3bh=VdOMjUr_Fj`|uoG_1 znh+MTxR?Fmk^zB6!UEWD@i?X2=e@vDh;f@C4Ln}lIHa%*l@Y(ZS{pe243;b}DAtX7 z%(SFJS=LgT))Q+zuQ$xwO;=j`v3tI*xGi*FV6R6yxm~6^@!pMCQYVaB9j(U`(zN&z z(!h2sujkf?Mndn$3jL}2I~<$MWW>gWc+R}-WzO?QMtZG_!V8|unajoD>O#VN`f&Py z38N`&ifT3Yj2<#PA_Vrmv3yXBDC0_thl%ahP`df}az8#BGI{4d-Rejd$7-T%7RlZI z_yqm$+B-u1x9(~QXZh~M6-NYV8UKGxrGK=t6mAV%QSv({aCJc?OAp(fYlJ=PMsOZ%4_@SIq}v-{@j+V{TwDRmJ&f^VBrUo+28vVXHV zvXMH1r3%7m#RymE{J(>)LR*;*UU<)29~|=3tC>rgF1*i5YQsl$Ai~xiK3gkUR}lUf z{llPHA(n`jh#Pajd5nRU8S@#ccouyN}JG@8xmVt z4&WbRHog&sWE+yvA*P&I!0C$%thHu_8Z4{|WCSTwIzvnt#>bg}hbE#!@o!mN@S z-G}dW1r1<}o+hQe(_+&LbK65=kfnnP%XzBRTSD~Y*MR`FWiW;UYu6_yaLmnhKVm!^ z2mh@zIqaE*U{3^GdjNlfKjtrU#mHMUBJ-BoF}ONL<}a1)rsxIA%L%}4ziq`3PPdL#Mt)GoACOynwi}k27UDo<+FMnluyUK>_5B0vo1$E5Z z$3m=E;HCKf2U%PUc|vRL12{34GFYC0&Y$Bq?-B>A*Eh;a2QSLnc~&Z8-rf%!4hOnE z#L2A~PEj8hyV}Puic9~A?){GdboD)wbb?k#l6Tmr5YRQv0*Ny?$_&LExZ-%o3xE+? zR=6QMVBt%j^&@U~+5@i%sqmj10YJ=G<{Bvngp?s{oEP-H9P}mQMA*UFgDm_t#wx#W z{p+&M)Ra)hpHs*vL&V%xi-okasMq-AlEtKU4Cl3ZE|x6T_Aw+y9_0a+_7a$p?G{{% z7lMq+4Sf?#UGBR*HP|tzL|0pHC3QZ8?w~;@m%9gz6Ni?Q_jAu8jP518^T?=NNrx~8 zN}Vbe*uEEe`?+@P$qGn9w&5w=E6a3?pKosglwHKH;o7d+v(3}e@j9#EN-*WOk4f7bZ&cCaZ1a#x0{eG;tiKR zPZnj)Gp?Q^PtBWnKvS;6k7})Qfb5sV`%*@lfjemJT-h$&)87vf)hQKHM8~jRnQP55 znq%UP@hAX>Izr$Pxf(m;qnSMIW-UW%S6MoY6`vP>lkUupCQr(F4E#q1o+0XYV;10z z%<`#J)H?rSb&RB$_rENstmVGP&3d*(t5di~ zqGokY9sR#)VP$xxdqrZ61{2y=@1&}PnuHZ*x$=5jK`W|6?+5?3HTKULM!#OkAt`pg z%ZDZVnp>i*6`SrA5G@7_O(V%qY@;fgOcWgFy17QH*<|_8ma?A0eJyM=mMA`sV8OaG zuCIKP;kXNilF-w-vh`f4b{YHjo1bKymux9ab=E_E!mY0B1nsT{1z!kNqXg>eWqMOS z^t=q`*mZ5+O?*!NmbuOU41_I==G7=KddWb;s*{v5;kY5WK$`y!P0^se@gL^gq&mrg zxJ*E+mth2yX?rm&#C6kQ1RfeTvP%da-ouG0E{`q$1g&3DX#e@$Y4F*dMC}y(>`tw5 z`&`UUbM>x~&!`JC?D3?#c^o#rsUtnB$vDgGO1Wy~tT~pJ~xu5B-E(pV0I7nEQq^!T|3vWh6 zDlKg|_z56^YePoULo8e%!d^Yc%3eahHt?i*?v-(wJV!Gk4z;i+0aL^7m?N}i zsef3Y7*-o#cezG6L9C|KM+oJi{~;pabesQHP9gdM@s60l>$WXBB~BaMc8*7i!=`hR z50#NI#a2y6Oy0&yR)mDd$$tR@-AZDco21=&G8X zNt$63nf_MQ1q+LDhP(Eg(;yt&ceHi_?ZrAc#v>0?`SH$#M-scYKt}5c{GWr>&#H@( zib}OivNn@_boW*HNY;#4eA!~1#d)VCGxGSBuk)mK3kXuh*B%QWZp)C^LV#?iOpKSy ztLLWkmyc6M5PW<737{J355M26W}?WC;b{3^k9gYc zU69?KY4iSVO;EL9I&8Bz$ZnQVz`jenc5wCYE#Y$Bdl{A#fn%8_1{e}|wkUafouVR= zV2M;@ww=dg(4I8FvxudgIMCn};%o2fpS6-&5gJEd7*`oQ+g$ zdJCKX+1K{P54A!oKISd*yGUGfP|@R7EVn?0%Zo!w_||z2V<72#jzSuL9j6bHAn&d~ z*0qFDriR0>Q~$}&&|T?HXzj&)?C2Eco@aYrm4N3r?uD3d(OUV`WOEH|Q|jgoJ$zVU z9IhuEI5S+3^T?&J zt7j6)aOpMCSSIgJg>OAZhzTq;*{E#yIMCd<YX$-Ed-n5CijD)64c^9IzDyf&mr&;c|)n+ z_PVPO@Vg$I!SZkMPg!!;PE(5j^Vuxeq8!nXiXP1ZI0DxL`E zXncIjb5>51m&e)>E%LK7$nMcX`+TrD77;bN`bDlow2Hheaxo-73-Y*8^T7{( zIIS5UUKYdH)Hr&T%KAa)YwY*$%-7kB3?o#NUvxiHXb)z|-SRu&O2!p94Bg1Q)m;CN zFuE^%L*>nO=hh1uuv_MHP)Nah_n2(4zVw;x++x(GUqSSHbbH*?zN^XwX$@tOF~T1I zO@whk&i})SFtkW}$z5L-w#Xfsa)^%j*Dc&7bu#=nbSugU;bgQGGWCYY)S8L5Gw%}> zru;vN=BvP_?`YoLaK8XdgoXSaW}Ms*BP)b8JuQh6cp1LN1KOf1 z*Ac*{$>ZmfrEtR)3s7;`KztVj%16s^zcw~78S{1-lduvSP`C7SP2qt_OgPNi13f%D zk@E&on*y{a&tyl_A)^}4NpU-SzGZ{4s%(bCEndvjVMY$jZFrxjx!`^TyrBF zFXax9mQUkt^8%jDq_^@gqp-zPN4PEf1;ROl24>YQIleULdVWamG4|xYRJT zr{t=eic|a^r~aBU{_kyG;t4x-`!=e0s&@5#jfRzq{|O?!5>GNBijW;Q)(8~7=->p8 zQMCCjwY!_UM4v}TOB+H6-~X9i&u#OIspj1fNvt>Q#oZBul3uSnAMYvYUH+nG_v5hb zWsb5SAs&+c{&$9^G=(~2ZzJqz88DhWd!qDhh+lIKws0S0R>t0!snSQgyIo*3$lPQq ze;1Fn_I4IT@2RQk_fCYvuY{?^b4wJsvD*CxmW_hn9Q_t%{+i3I69tF3ZV>zt)kT6A zYWb4DNr+4eP~F@llJq)m_tns#SXt)WKqE08 z!l7Pjw~*e}|FB8c%h0krxNB3_j|8$J<^6;XRSTt(7TR~{oTI-8CSItF1f7Ma^D03R zb8iWsQ|__WM)z*Gnv!#IwS?$z7xPc;Mg_JoHA|u3jLem!!l%a*0@Ukn8kfr?&4^0N zM&$!|*&NhNkG0-;J~WR`*txf5Gzv^j;pvY_sKN+-H3Uemf&|6Zwey%Bu1f)&p} zBKqOrA}E0Z4GKS|TCeaZgN1ng+!v37>xXim$X}i~Mo7FLTX%)E)l~-B4a)|p-(w`^ zsN_3o?UXb4h;1|-=Cg{B1fhak{bGz*tQP6xg6yWH1av`X4+Sf>&)^n*Xth17UvOw?zWMThR&t3P2 z>xpIB?uXzrTkZp}@d|*5-{e6qVX@yt<1^px#b28Ebp-W05W)uDtExEb7WAII%Uw^M zlPkDM)e|RiHgn{mzGb4w36xiq1{;3{Q1_HYN5(+!8ARyGt|OGA{1y_E3ftsAbZO=Kfmg1`rU=Y*A0(QZ z6-(w*WO~cax|{y+pfZ8F$X`rMK4`DbM|kvyFnzdK0d zVIRk5N9euorBou#Cxwqp@J~16=6-0*ch9bytRCfRwPA%65meKalS06)?Unkh;kMK) z*yD^%q>tGQ3YS0JUU--ER-&=Tp4tPn_5Cr_-Vy-6u3qu{h|s-d2uP!tBnr!BDS7 zmE0t|-12U(^gQCZDCkUF3hF>i&44V{xh1eigW>lKaVYIgKL^! z8#6Mx#UZT?=`n|-AWPEBoKmRqLN4{U1JMk7iXw23U)fPXQ|L26Q4XeW@zJgfWoqxs zM>pWI&6YOV&+RYd$+C8XjRnG=1J)JoUS8}@xbgb%IF_$4#2o3ePUC4~-RqfeP=Wx2 zT$>$~6CTqjMUU4_0NExBpnLb;xY<_-YV}#ONYj|_JC2go5~}BPb|TGPI}B>WHzmwU z4BpD9d8{x1CV7yZ!{2_{f{q8jQkC1^S^R5#9^K`a=k?9_88VG92&dl|>3_slPmmJAw`JtNq0=E*Vuy1hN=;iwFC|PGPm_|e z3S*6P?fJTI;UU9rCTB?Affj{Lnc!#eIzej}V+`jm3B~Pqz;JAR3Bd9YWcSB?xApS|fgPxW?Z*S+;A@+& zoiHq}!7d%j?e0g9==TO=x6=t?AsY}bHvnFIUc_#NHo3&;F?z<3VL(j1C$L6Wtz|AeC|q+;lv zB@A0<)%7SM5<#Crf&O{YLKuN#B2l0$BV%+a5l|=2XSb8n#OOZhs7utC+qOe6q;hb0 za$9lgFRsQ_sl%zvEtvGVrt_1JM2JUqDa0te_g<%-WNC0eCro;}mo6<+=A^TZ6uP`l zh6$QS$Z!6~Q%YseWayqS<0w$Xcl-0KF?0%*KL4U%ps1ft+ckVMnBrr!GVd3U%j=G% zBjb6)Um-G&3OfGX9H^)ww)~~w@LUfQH8?Yx%I!6r*>@2L>L^zHd*jCvQNbYvyJj94 zpmT7dZ49zgvNsU)>qY_X5@Z#YTM8k=-UQh-EMHMLz~FqQae?Zh77JmP^iVye%Z!qF^$Y%F!rao(^e{xQ6*wdy03 zIi;zP9zmi9+Bq1k4U=*O@OI` zbo*9I*D*o)D2qF3{xToJW`3SUQS0BjV#FA=7+Y2yM<V4(jg_+~C}=NV$A|MV2fLeoBdmO(TOf zyI7`9gslf*d#$ZJK5}Bj9Nv(uLke-BAtzo#{2|}^bhm&TOJs4vu=IznC1m=o>^gRD z>yppHuz*juB*@TpRLl^JeXv!{v3s2kJ_un}=gDdRov8Jo$9h7y4W3~F?D;?l1@ljg zwOnDZaRPU^k@9vfjvhFhk?5%~qfk|+J$M7Z^<|`V(~%rknj?M*kgbasM=3|wqCiy+ z59`U(I&-_OCTLU2w$tKY&H8)ijWk9d_=yZMR#u#ugjJ`2UCUk2***>k;SF$4Fa@fB zsQgi*fZmm^8@hWc>CAqYn==Dm41T=LFvKK__XVWzJ#?5(|KCF!)|xOZg~|J;*uSv~ z@#a_P{+g1k<_nkTyR1~n8Rj-!ED(p9ZirBhZqrj$ovz~1pK3VcOxlmfRF?{5$PDNq z(bvHhbJ5KtiDK590yjk50AXo$|K_a(LWQ)HZcmwHcW!45_cm03X!NHPgz)}}E14{~ z%|}$Duebc%*j-*gU1WN^wK|+NZCy0zSW(Fv;D*Q$Qr?%HShZXJKL}QMm-y; zmy}gGhsiG?4H;VTQ|8OB_QKB~#9o3ss&|n2tL4<@>nSa^2EE^MD^{NuKbk2&1rQzy ziJEs$h2S61Yz34tOp)^80!p#hE>L5BON3@vSglVa0kCF`&Zj$X8jqe)V5) zPeo9vE^5=8n6F@QkLpj5Kp3hGw-UW~*Eegbm>^rrRzTbeU)!C1rLd6BX6U^3_>@Op z(aLb{8?h5X5F*N3_J5c;%dR-OW)0&K+#LoO^g#k4ID?0Q5Zr>h1_*A0yAKXQgS)#1 zch?ZyoxvRr@4q;o`ctp&+N*X|t-GqO@4PjHRPgz5gATt7Z+^TyX3$u{&%|P(BILIq zfVrZORpbhp3Oz{sMkz5ZT{Hb;D&bTB)MPNQcIV(1m)fJ}}f|xO#5IWCQY;8ZUGh$zhTD-i2SMiW)7G-Mx`u%>QO|R*fM0H!dvhRj(4m)o8 z3p_B;lL{Spkj(6d^pVhhG|0e+jLKmB=~0ziz%O6M!oft~JxLh#_nVSKrKm5G{U`F+ z^izk0S@Rl*?s9g$M82_D2}i6^KqZWS8=>W=$|I_9yCB#K^QP(FyRP{W?)2MChI2RT zr(Wg=!@S3E@>~B>eG|fKWf;hCZ99{4BWiIdA4N`nopT8#IrxB1oHdy3gGUMblSec#T(r3J$#Qy-)^DwdVP%{(-oRV zJ?dvo>uGazfb_Tfffw`*ymp{bJFgHFBne;EK%}Gb9Ye<5Ur)s4cm1a3M_#{M^d7g5 zdiNo4_|8Wl0D8}zpNyIB8P+x6V;X61UiD(qzw$}W4=Fxz;H-xK*=H_lu`Qcm^+!y_ zeW%r-hZB+_s*Qrd*`nh)GpPI5e(&z@Sl*CK1E#+ejvSnd>88+UwDmsvvz@137)Ar{ znE;{_31epi@VoCI%`gOgr4;^P>i?|KsAJ`?*JXa*J&$VeDc1QtiV%ov;dQpDie4jF ziIHIfl6))&NyPBo)gEJSMsa7W%wp;Z%|*mdNuPV%6xbj***u(33^B*6As*`8zHy2# z!!Mb5UMuV($E{~fxSjr#0nWzbJ$G>6>=BwoKOhhCN#FgDp^0Ry(1VK84Kl;_eHUW< z82x*2YpU7Uh$8_fX@=Wtk#v^%UQTY&fzO0vKRm@Bxo+wyb${82J03vai=zQg)o;}2 zsZe$`BMI&L1#^?S>&hLg(#~A02I#++Q2~d-bfQe^S`tyN3jbHrX+!*0LepIp8Ljg=t)_wW$S9^v9064E|^wc$#lI5LC-;*znN z`r2ld&)uveu*vsS)93XdT|)8J0bO*SWnoBbeb58L?Glp zM>&+ws5k2HDt2)3jy9>?LinPEKu8MH>SVv^=08-~&wi%t!XxQowbjUc0|EaHucfg; zQe?j0(qs)g;Z-K9lcj5}OJ@N563N{YNtIRMhY6`~#PAtww1V2NT)j}Y%45aWIK4c! z5x`qL=z2Zf_TkxUKZiLi#ohymO6dSSai%(6tSy0oH92w1`dpe2u{E8ycnDvw9h)H>F@!($NUXivXtp#{UDaft8z>{ zJ$T^nDvWov6me7mHNJ;_De@W_+Klg>_}mWZKwjfl_TzDYLqF3eBG-SC#TxM|NLhnh z_Sy7Oa!N7q=_Fmy{egQ@&R%f587Di+`9eLz3l}~*{=d7@eA0S`rM}OYg_ALV6>Re8&@fL?}4T+kQ#F?clT_>r&s17xWu{M1Nee^#nxz--S-BLYRPB>AaftPgCyqV^rL%X5rx)eMUr0-Kd^fZ$;C zb(EL^o!{9hBADGjoJ=Pu`~p80`?@_PJWc6yvn6`DhT^Z$oqd9je~2y%2(nju#-|BO zUqsonfmr#sezScuK(V=@)?GhUObI8$Je#)}Gr{eAxX{|0M-9Qyb8Uei-Y~0kH?;}b zyd$}(KlAXtgt)C_9e)|>%UkUQH@4CFNl6*leI9G0wyC@M9h{MN*_LqZQ8z&nfWKWz z4*iQ3JtJ9W?3;-Lr2|o?NdnuOo~|xssA{x7Z7UBK-MLjPC!|vhdc4eJ>L;|N4Sl=+ zqKqwOyrDcakcSv)-p(KAVbLgXb@|!)R?j-TQ2g(4TA)TC?LKo*uuR;;W9f0NA`*B= zdgF{J?K}}*nPaY#xHXltaQ22i1yT=6#*P_JYy+?Xia9&vf}BHim)kl{D(MEB*eig%68&fzU& z%NKHJ>)C|l$;18;ZQaIxN3A0|Nlm2WTyv8>%oA>H6 z8#yl)>w(`KK%=Zru9EmyPU(Dl{w3w7GtICM8*!VjWpsXh1M~w_i;M&OdwL7F#p436 zd^$~&M%#8y>ujnnSt8^3$_+`Gmjxc#eefw!dC;vO`;HSQxcKgebTfwy(4}IprHBzL zvxpnQ;g8m`6aD=5EQRyVvCze4zAhp;J2VAJ>uR@=Z^$HSdiY(t&mhVivzv>^PJSc6 z5?PNoG91g}C+egS`kZoKZyw4l@5{Pb<~6%@J9hH2sF2CnWWOfa9w8RRN-ccJhqu2Q ztQj>)bhpGG74QAAT7EZUw#jOzNeh-Ysu3DvIxp5gNN3~^`qzL^xc5wepD+{v2BVj!WS#c7_egIO`*9+ z%>P!amyG{dI05h*T9bs7YrSTXwJ3!Y0ktg}4?khy<17b%XXov0{d}w!lih_g z>RCjYP*t*obUrO;Amc;jP=p_m*xE9J?9CDI(=cqw9!2#V`4r5#rqpIq@y)dB$75L6 zHXIWT$f-+2b1D2LfxRQ;t{R0%z(kUM`reZvbnrd*ei|!(B$r-=5j-P_b(Wu_t}uyR zcy&m4gf?t?sQ^8zN1|Kf(~VlP5A^ts#w5(Cw!gB1@Jqu64(nT=W0`=KO*y63r93~ee8ed522>_Va?VuX80`A{m}&CNK0Qunn?ga0^6@_+tKUu_v4X~nwIEmI-c zr}#z{d+jr7*T~!HVYDWJs=Q6PMHqN|zVbz5z=^{F`P&kyhT zF}8@VqIDmz92MS!)7)c!LR-!YZwv1q<;ec{`O-pFlQbsy@K~9Fbi_;e_%zrHBQ;g< z&$hJIEGYQ7F4RC6?IBy%yuh4=NIj^_xw7> z6G6qAZ(@|TJz>v+;dia>Z`#2zV&XU6VTjiJMn8l zd4A~ylIpV4N=@jSt;xYr!B*PhA)@__XXS7KD*#ky@NdY&wi{P3^wAe#SJmWw7PQ(KY&Yg9o2q!)!3vNUKd%I>@qf^!7{$ z40DdPy5{I{OB97i?=hj!Ww5c|vnY+R7l1p#ps`$a+AD2oIA6I&8Tv(eNAB`fs;(d+ z$nDb8w1ZebU!=uld3qPUf9F#?!~8wd@@y${ot#hsa<#%%!nNid88%|LWDm#u;gfye z0)fKYsm*3kq3hV{QFI3xcaZao`8U%S{R$`$#JBAh z_Tr}~U0;vkA}~T1lS$8RShCxv51rW;*mKKCm|`Hp67z}(;8aIsv~`CxvRE|Fz5;xx z_Q*f9pcW~uDP*(F9?F&R!Wq3j0}Y1b+s-DY6qlsU58E-FsLdPL$Ik-ayOyojKR@8N z&O}BTW|2T+25z!x#gN2uGrJBjged;|u_%{HJ0Vtjz=ehI;P7B`5gS(< z3*R8V`S)@5cFAdn+&kY@`XK*5)(sArz^Pt!+cGRu$wgNg(=d_rLMipSX|bpDRwG=b zDjxpODba&Rbp#!Z$v4`Z7yS7U8v*+KpR5Q_xiO8sq6gLdiTlV_^j5oR$DCh?$fo(* z5A%}eoYRse_m=mA@m~8aO>~J6FMGKX2~i$vp%aEo@9WKIrZ%*IPp>mkyqTDold9Ob zgf)i~HCwGpdxMk5MW80_k3*0t6m?My02%E4e1&ENosn>)w@(*LPKA3Bqu3n7eSQKr z%em^WM``l`I2vA_%h^&jOxuJBC&rutxHz{32_~(BwA1}i2JFWUhwX)p~ z{%fsmF;P_e3P=51=8>R9X9k^N;WWLApE@gS1f`I%VfjduZMp}S=0#>w>Qqx@P?8 z*i}r6D?DS%(U)8-^BDel@knnviDR)<9K%f=S(aNM+_zTo>$fsM-L#lOHWKvX9~=P0 zW~(^7-#$$GUORGkTuwpTvkaR#WH_TDZ=UKKh12H-*RQoxZI>hq=K0GfSZOVQ9}NKUJ=?;d2!}EN_!q_CyC@tE)FxsVCPrOUu}jVXC~@vlL~()Gat`y~=NSJZV(k z&`Hiw>%|635LK#zQvSp|O0&Jg=ll_$imQr;{RaN?LBMxu_#wu6h~a2H#l)8CN0{s^ z<|$~%i~xPiKg!IZEnG3Nk*W=Gsd?;0f-Z-PY0AcAEh?4cOx*nUj09BUg@`f%r0rvv zS)JqTS!O=5GE+}^6rCl^+nyCgO@GhN)}j^BqC4$yDxmnB9fDzyEMR+#h`w%~p7!c3 zrb61=e#JG?mJsnx23PyD#no(hbbi)5V~DwTyU|3Em6My`R<+-NP1(diJ*mi9$fY29 zTcJddnZm-tL71**6czl3KDk2~bxN*}50$JUFTFcKP$WlZ^c?6H+J7gQz|gW%jQeF|KJc*(hH)^f$Asp&o$<6_0+HyLf7Pb!=K$9`K3HqOZmUQxRXeTLd z?KYxTun^jNaG79Yf)=g6YgZ^}GW?J>cR%t&(x1RQR9>pKYB64H{1&((V} zSLC#BdY5VNtIZG{kI4##}w%>yEgnP+=?9}cMXG5cM86u%!RDy+o5 zlpV91FDN=Zo2eYiC!}b9Ms@{nMe3nK@{H6EI!H8U1q0?a>iksB)cV*!l$~E|k4J=s z=F3@vWb&OK?Zj9+T=HJC^+!|7zSDU4J%L3239vf43{vyZ2F@avh?(so2pt@q4Bf2M z^FfpOgBfnszi6cwlTK}ZFaDe1mxjQO>rpH==Zu|BJ+l1=3*Y$1YDonP4nceu^;Gy* z)*=4?3PEyeH03ws7_p>$5DVaEfl**xw62M>(oF!yg06B~=0~?LT}YnalC&CN#1gL5 zTO{Mrzo^II!mjP{dfe_oJ#Ic?tW;zVlC5Ny?<*KMJd;9ZPJ@B<0;B%Y;gfcemcRxn zdFcWQZ*LQ~bXS!4jCJ9`C>0KBX>%fPgm*8h!(e)!{@@2lhqhnvc6<{BgDV&~ouGzE zAz6awz1lv!_>rFLA!Z=I4ex0oeD*B$5{aG-X-2@fbXmh46CfC+r%Bvh8ZNxyb#?#* zUG@9f5$`t#BPu6O`O%-72&5m?Q3l&@6CHr`#Q!3weugf;v1eNTgBeIvYTn#Uqsi$Guf-C6k! zX(cv2L+Fy*j?dc?=P!{SI(kCG$s4kDKypBos@`!)?SBQqI)W4EfC`Q`T)IW(Kd+06 znacr*Tl;13(^N+3ZD3JiqP5<^0CU{{6v}|C^KNoY5>wUce42D57d0x8fY_TlB>Dx+fI}tAR4^#+ zKo0Lvdv5Wsz9*`n&p7>wg~x#70okbKv=6SF@KO1geSHzl%ZQC`Sn~0rAdvb}firvRxYSyQA;mG=1Ujl^! z^5rB`B9?8EYp>5XEMz_UvHP@VQ9n4?Z{sZ;QRjwgdU%FOd~|iRUZ~qjin1VyM4a)@ zjObAmqpQrGXhStdYaREek;f}p{+?XSP#P{-B%I`A@K~bV^)Itqu|mL|P;VPA9fsu( zAs2fb8K9TXhXmOdL;@V*dW9C_JkQ`LW{@;2p6LEj(vfwfG`Hu-HoD!w9==Ed+Z_1x z`l92{xajK@KWieW4E57j29VwGF!RLLroFbhY!x4UH{f$IWa^r$vWkK}n|-!z$s%GG zAo>~Bp!l_CQY&FaPFg?nltVuJN&L$IR===5$Nb*RrQMgKQR0<=|6G{qcf?Z_d`4B3%z0RERSfybWX#<=RE0>r;VJ!hpSl1NMC^jq4FM|vyE59 z2>&X6ry=Bn50||U5d+}>PVbxOYcGZ}{(N^0TxMlq0{9P!{L5Jrnr#%8xYYoX$^;2fpMQyN53+(N!rMjkEp4^;j;G8pB0*JyRrs z=|37Tsd5RhTzS>HFLnS^l;O7JXnhih5k!5+&9(2g-+&XiCIO` z%iVd*^d^$wtJja^Bcckv95G*15~k&4B|-&u*uUAI#k@88C<$={0$%Rg%M^_t8dH3KITN!NsDi+hB*hgN+1zal zH1ET<7|-&4&O2;6cl5vcqtY-+jvifQe3t`hSTg2zM0Rz`*qt79R=d({WV^zVC-`s)S~u($oC%&%{d z-jbhbQ;7RQicDb~(2Qp0x;}hX=(hM6{!ucu1)$r*Cr3*8UZ#EIX)U04vWbvbW4#;F z5dK7o@0dbTQF{BqV8*;gXXiRc*+EQ#4>O(1!&LvzaYIn33<{#;bdnZUoZCXX(X`y# zb&jTk_ebiAXi_2fF9`U7XRrI^eA**vm(OLT5nX;kEVu9aV8;8gcJX(2V#09XGhNBwI(L1(;(5u-3$v0zJbx2~Qopk|Or7>gneRS}XKm5ndb5>&Ljf!g&mQGn zEay70vHlE0gtF)0*dff&a#E8~diM3xJ81$)*HP8VkuR88lW`-@Ng8lEq!I0NeC?p*?v2+8=6r}GWQmPy`AVr*J_hz;A$1wz(Lp9>RV*8pB=+c4l)+a?p>!jdUtgH!`yOy%j=$2TSI^3CKt zD*oSI62lm(Bgr2GI$CqT7tS+{E!BkV#@1(X_OzPwYxi*mC3E_ zZfWXsGfxIVThw&k1KRowO-dF?79196E5P3IlMWkaWGspRflWrNRN;_X%4wZoVJY|DH#kOk~Xu=o%`zkA@DL z_G=lzg&aq7jf9KAuznHB=i?!Z09$NBphq1j`LrrswD)TchwB0~7u3LN0i*$`7J&wn zigRkuC82QTA7X-rVxXgZ!L;*xECWNChXYdy)_VoWKR={o-krRf?fa;^z)E<=^a~>pU{X~C)0q6|1LhB4yNaR~Gi5e)PLG?zA zue??(Y)^rPj9@(XwruDu&Q3L8j$(Yii7<%{(8s;wY+oCBLV#!7IiPw9u>T{2C_<01 zO|O?ynu|Pwx?`Xr55^M^q5Tyr*ea2ICpbJ$$TjkShVvMrRRcZlk^m!SCq1D6S?^sR zibekT%~|epa_>^oR?v-;qW}T-XKb*}4GNNAC&*kir~rBVCsQFuMg2y)BHAJS#X^p@ zlNPLk)A~>s1;kh`cNLV+Bz3FsE*CblQk%x7r+VCL#D7Or*+v2SP)!U6shv;RVJ-S0 zd5SCU}zO9tFN<;A@cY$kK%E*Wtf7VL?%v6 z#+@5&ic!YmePKLE+sdF~xNK7~@&yI+7JrtG_`Oez_b8ub#@1Xxw1<&@635p00MnU- zTl9F@16bfK!howdkp3U|N<3V^JtL~;i3qPTlvkaE18cv~?-mnr=Duw$`2optw`&3u zJT0?@0t}38ofF!UEnF!u7^SSk+*Za}AL0SH{17|Bra5eie|M3f$!iLxRKnQ(|40lm z*i_Jx)m;-}r?_!xfW*&B|L8GmsISDKb7m zI*d$t@Z(UePJq3B6;I&?S?#U99s0?^-}3u!I~|%ALlO2}TX0o6NT%{>nyB1wPKYzy zpl`5_B@`gXJ>!GoLDa+M+f3Ap?|MHcpLU=hL)i778irvy*u8{VR<#sSp`U<@S6c8I?B=)|>bPpiSMr$<^li)r09iy3o_H&oWa2|=Bda2vm zp5xx`Eoy_pIf9F3^Vv-o@C-B9>-wUo;&w4Qz5p;mf7?(w*A3CHOWN-$T?{3LNILzF z!Y6{@42NiaF(oy&MN}En7m0?_sRzU$STb&}_>yK8^&lr~PC7PXd(un9SansoU;SlsFBPQ>v?po2Af^`sYdly=U?O`yXLM;xoPi z1W-pU8a0>*KTVCX>PyyZFc$biD328cKP?}jbXmVc0loLAX zZS56rDy-aS+f3@l;X0U0ht!{x&Il74>CWf;i+CoY3>!&j&W&2g$yea0K6ZlesIdS* z)|qHS{8cg?$gSJE)u)t$FL+Y=w z+j+q#{0J)l#{NaTvsDNn6it@*0ru;D`B&K-IM=lC^46=WMzIu<3&>a_DD;W7)s!Up zILkE5zE^&I+V~f69qgg8a85a6dlXP996a*@oOmsPXsEHJ`)^w%BzC8H%v0L2D&N#&kxF0%gBYC!%fD_70JWjR|mRaPu(42k3UC zjhZ~8fRcN$*Jz!Y#04RFx^mDci>qym7E`>MzwD5PESa^keC4U=iFkl?-__c5^Hr;7 z0qz^`Ee$Nif+o)VKvrVpuJPz2Vo$FGnf-+vz>6o1nC8HoiT zQ&u3j5<9O#{nXJm@^)j8n>I%Y$xo578o8VO<3kf7^_yvVQ9NE)6@JJHGuA7%bN8ya zxvR}%fg*Bycj5(i5)X3dP*f_n#wHd^3qV~?)o(xo`RKRh}#v0LUs)|mFd$EQZn6=eM*?w*K! z2^E1GZ6U@{Syk9E2Hr~R6xv}TfsZ$>NQ);N0GuoU>N1#LP3L0OnBvY=4LT3FxR$S7 zKA=%R1slxHqVGfnvc={{l&A%&h3{F)Ou;^8A4R{O-Zw)GLBHOM5E;Y86r7QBc8#GD zo&1q(t%XScK{UjVB+X)ogh{%!rS;}0-*(1 zX8eNVr?*XXvink@bwAT3H8ZFS|Mz`;DQp7#B|7vFq^!FIm4MU1E3@+~O> zY7G4qX|weP(KI|gST;j;P7hJ57sA$u9}Y<45zNs$Q3EhK9pb8Lml0l7dne6Fp)mi7 zYP!RCHTwyamr+{e^)uypEl4+C0?tnmdAmOSH=z|FykYS-0M9Vws1uteX;(P$lt_K9 zGl$yEQ#`fB5VC)7S?$#(U5K11O*+3MWxnu3 zdie-=`j`vRf1>p~h-{ETrC!hY9*Wma_p`h7dg{a-t#Dg%j2u1OmTaR+P-j9n=sP7K zIR_}8uq#tXQS}rHHBR=83KlP?1wWdh8}W-b1a& zX0g*b(y4ihM-i|0Pd+?@Ix*O|jLRtIDmhYpciE(j?mv?cou~@mYDZ(%qdNl`NKnXH zB@~wEY|UU5xQvh+2(~1Di{F8~wHQThRmI2H*NYw^stnKWcaY^5H zZ~}kQcdyb1rf*aJ{nalQ;3UbG$bH`a`s{M3I~N{97SdmJbx3X}T!_r>=Kg=qBz*_9%^(jEUTNC)RQ503INUwxf|0jgw=jxX zI&ux6+%MjLvb85q1Baum85NTUC{KNHTn>xyh7c%S@? zjg|U|7`!h^40tR5CG_V(L?86T4(!d$AKKSpb3&N|`I_k}J_GnKnDldi-3!UpHteUA&+B@9EyYtIxd)H*A*` zp1Ke z(-$JO)84b-eA6j7@RAD^M;m8zd73K`e`5nhpde_`TBxez=vTT1qGlNTeM?b35_mcf z*5`MAA@g_n&rum6*<~*)UZA__`%$+>j4u}MB<3Q`JWj|58A=_Vs)+bT>MJ!!swZt_ zN+Q$nBj;j6K$ueb?zhe@0^^0rIm4N|m;FcV#pCmC$G%-h4;K3iM;q8j*Ly`HYzTV} zhCLu-66~)%;<>LMRHZp4v-;mnvcckU)eXD;K>|$ZVDSdW*gMID_I1f~d)%|lFn~7z zDgKB&>_K?r(MOGqU7-0@8ZuLnM76Hgw;dN*L8&4AaAS9*-?c~1|2p!B@^4o8EtE#o zXO3PU6(e%+W^b;GDq6bi!LmZlXGGbwQxJydk-R3-5cjmcvCE&$_4}U(?(Z($*Z74? z)GE>I0*$Q72?=;n?yOUKldjlvuK5SlpwJmQ+yZHj{)>l?#2t~3%{hQML^Xs`nkc(< zyrfyY*e?N01M5j|7l)TAp;U=|=xuqJr+jEXhjZM2T%V?Z_quM<+2ix`#IA`5iuV7A z4eWICC#p+rwSS;hmR6F#KHfF^dcS#(s{Go5lzPF=SuCD;zgqG#%kwtzA220!O76vK zD{sMR_nY0%5yqHb93;+iMQ^i0n*=Uvg$ z`#EoayQo2slcz^)(W=E*l>LWDl0N3I zI1+K6Ct2FZha1-Y`>31lhPIdwXh_5NK-`A`(ZdCv7f$dfUq5Ys2LW-06i`NDy8N=g zcP8p)kCUCtvc&Y|+S^e$Sut|(3d_?+<>jp&l&nZ6(Cf(Pn>xJjPNFI03GvOF zZaeM*48#wYqKo@~{CK8m+iT4bjt?~Qsdn3Zo)k!WXK!Nv08l!DR^Ot_XL+jTOgw4^ zLvT7J8jfeKwIWOvJ8+J8Yq~g0?MpGJPriQ)?F{gs=w8wJct<=Yf~=adF%#ojpGOs4 z&Vh7krVur`h3fQVc!WRJhRF+(weS8MeiY~}P}Is=++;{GC8+fb2Sg}+exz zvEQGl-|j3w^HR&iW4-2!2n#1JX6>vjN+a=L+i~VMNCqp7))ru3m{9#*~p z>yfPfWgUkDg@~57c6LOmPE6~F(ruU4E_rqTgZxvn%WI6>I+vQB9>|%&7T`%!D2n~e z`;9lfbbOCs39`#cyleJQ57DA{PAYC??aLB$n3EU|{ZKd{y8k!9$ykOYoT5$)cPy)z zZhFrlM1;8dE+viPSk!~P@3s$AeVN3T^7pvp&ewoN|I=FC_N2M0ma~Yj`O9GRt=F6B zr60%*F^_{;a9P#?H(>_Udw^DeFCFVU5N|%t{a2fXlE=Pgp#ZnKS$OlMP%GS~ zoh@%n!Zw;tGPHtD^suzWBRoAz7~NNHcm8X0@_sqz&u9@Qf$RszO1t*$i>k)30zF(S ze&Lzo7OJBRXWyXdu7+uB#ypmdk1WzeU2R+>q|!dxUp^M^O+*<_$O!?HgtTu>3^iJZ zvstdpH`ae(d%ibQaoo62jkA+SybOgnDxTfLUsV>ibw4^;=J2N{CBKk-UgwRA;W-+P z=>0u?|FC;gammp8@A%m3#wIv6B}Lh8`{MrL!Q&a{-?8NHl`ou}H1o04YX+iS8aR1B z9NfS?mOPo2jdNka@4p9ZS)wQC;nmE!@y5kZ{xboZndN3tDOA816LrEwZ<%1(i%*H% zuU8N*x_2X`;VHIK(Y0VKAvk5qOv}f>6my#W#K@BQaFRo!nP7SRoJ>v4*x=qT(eBV7 z!yL4TJJwC0ujZ#%>r+N)>5UWIhenM{%kM55_`UxtNrd`_S4Xo#K(tgMQ-D0Byhr^j zD*gwRTy!sYC+Z)Q3hi|{TU%Q*)a%<)yBjA+uCA^q>y?6oI{W+k^Lgc?>FGL@jr|sm zxL|}Bz{K5MVAbU0`H0@Gv5Kk9dUJyh-o@*d2}x$LD83`2S{KLsDTZ7wL7znj|Dd z;O7PB?=08EkqgxaH;I3#rcBPAi9;9nEuLb58)&F>4Z=qu)V>#k!+-rD6LcD@{Z+TZ z0UaIfKRo1qNs+UU@6eYjD3Og2k#)Ah`Z3j`oRK)RTrQmz^x{(Cj}pNlD#0_hT~JB1 zhb`IrRoi8WNkcr;FOvcBM}4YL=a&vR^ci$d#zc}nz{*#0W|V*Egk#7)+nP3>2GDOJ zff8T-ZGQ%-&150c#wJ_nseOabe!znb!9#g$m$C%T$#%I$66U{^SO|;E$l8wQSDY@u z<4bBn_}KM5Bi$y9Ddg^bOTNyukq@l#?kQtEg!KCmSRs+G{;T=T6`O(HYcSUP`vnyH z$Vf9a8zzGVK+TBxq9cL-^V-mvY#Dk!;T})Y_v3W;h_KnN7(XE~PEBnewwV^>uWVXO zz?`4P-EV3k!`duKWHZUk`D1xc0?}QImj*c)-Acu^o(gTS-V{|R%*w?wLNL~ns;de{3lX*};0jKbX%znCfQ*i*Sv}ZJ1;oON z4(WAWj_s$ADSYW~!%qB@z3wy9cDwUm#0wCAfPSM`Bo-nay6$6t(sm3)p?th}vE|EU z`e;=jeQ0Uerz$+0w?QQgHvEKaNK$#I^m!^DVG3GG&5Cs8!rDy93cV{5KA>Gub^ zbn&s(JqhF*VIj5O!aw_|N=tXqabvQ^4>JzGL&LwR@QQpjrYMD{&g;_0&B>y@n%4Ya zW=}%|6RU1Wi8mKXXWEk=%a`2-pd05|jQzp(By}$M-pU{I4S6%fC0g(Up=L!o20z#J<-t<-n+e7OYrGaXy(p`s*=vqw}i!6t>7S!J_4=40$ z+_tBSvzTyAeH&WFo+!*S-LF*@(Mj}9X@g-X`-*u!;XKj&BQ7x~&yq`fALM=)n?T!v zS+tr=NFF+%WC^w&2d-XfiFh+F3wXfP2TLW9AF@&X3)Hv~`LJ)3fA=ZDsBGbN7@BuA zX}1z37CQtBS?H2vPl5U1Mx*2vU+j2%Ool!X_{}LMN+P~PhY~G3v2#_(DPG&4Huj^j z?1Ky`X$=1pT5qO;v{n}$Z4YLWVo?GpoacIX_AO^~f#mvEU2a?F^o8i&gsrTdkj!H{ zth4{#2WL4|sY8tJw}aN&)%5C%_xT&8S1M!bgPXriBD1trrFF42@v!5DCOQz#DlYD* z1+L{E@Kf>VInh4OI!0__IR)&X0b|yl#B?xKM(%Vx!7s4sR`ac(QmABH(V+LR!XPD1 zH%b#XKa4&GS&yWAII~bmQPGLrBi}GWBGJw88}-9iw&2jdg}#Yc$5=FQZ9YxaIJa=r(rTY` z3`u+5uc%+3eY9*yXzzkLv}>*=z%O1njkCPoBpA3JQH&A|zpR}#ym57p?K9d)Y{YCA3mfb*-|zVE-j?x*x1_;doxmO z5k@P?r{6LY&fQjq^*EN)`Mwo1Esr(g<_R|)Pc;`;W^~~)z6bP=N<4ad$`nskTBA=Y zvq+9E{$S27YrUd&z z-x;p}-=M^S-3WcH+4##>RLak32*c&ql@WK>3z|+FhZ-?nCrQ|GC;yGA_KI1*_{!X% z2{7QSsu(g6DVZ>36e+UJ4i@4w`twvs>=|hxpZe@n7qOFeyaM|tF|gUq)a+SjwcJQX z|8>EtViBw%)@89~12P`}H-q0+27TWB5BKmS^tows+Lm(a-Eb>&2W-O3tAOJ(nYh=s zooSuHJO{_q3e@F)mPfs1paa3!y|v($b;y*Dx>?JA2GJujNEs5De}1M*21A9dj|@vG z8Y2~MRcN~oYHxF-kOj#|mY#YDMKUekhcbP7dVg2uO+yH!)ki|JD=Smza$ei-dLCq5 zCgoDa>FPeM3H1=%Jw}x6W^L-bUC8q-Di!}^bau~jVZ#5!ny{=j*gu+^%=)wZ>QL@@ z-KUB356}7qk>~o&1<%`sFVfq*hqy>A;WVmP^9@I+(f7jrX<>{-gE{1(apAz$qStL- zrI|CKQW|tBy+qr zJi9ll;#Pft%?x{I;a$6E)~vMt4+wZ;j=Yp?>EU#`%$KV)ZMl_)eiufRnJaKdvGu1| zu#*xej(uhx{qRy$0PLUK0#j(hQgFWAx>Z>UQRgQNE$B?7GRGAJ!!HTSw`Ll@ZO%6KyVaZY$ z00*A4U}YaRT-_5mNwx87Z${Kp6eQ^6NF1kD62H3GljJDrAb25T4+~10a5yIswRt#^ zO49!AYM>=;8a=ciyL)LDO*<&-lKpWu$Ofc7CfFsl;IN;zm)NQ3548e1k_As;&G>Z% zq7PFsZo3PDe}f!v!lr=siw*g%tQX~db-MU~$(y8dtVMt4Oqmadde*kO^J$ZgsTRaf z32Otwts*Dw_u>L=OiU-_a?Zl6beQhf+~t3+g*_<-UrVeHv0PV~#vO`L=V_jSHxI$@ z2y_V~WpKb2*uGlTKtsRuk+I7>B69<*LjVvdGd79rwj<5meEN1^g9pwQAdk)#?rA`A^iPZmu z|9Cp_XU?N5sWqWz^41YK$B_Fl=^KKqlDB!qXye*M1nD;w)vOZ>tG_TzCk6+$dE0i~C#{EDdp)GyLPV0s1$EfPIys1hPPn)m7PpU914p4KqGa?ZQ zi7IYpPa|;0?Mz9KWW9D^B)GvF%fLtqHt^f;2EJl{6BAL$_0h47g~cVQHNwi;P@kYB zAIjQY7f4Eu4I5&I665x@(qNOS)cmA=P>Ah6=s_MyB0b+>H{?-sxQqVRc{HE9-SX&) z;rnfX!>Q(zui=8h4(Z)Ip0v^AFcmotC?E?SkqRn=ajOZ==Eq8sv9`!WvMvJ5=&TLd zg**~OV>Vkf9}~t4wj;82)#O&jE;-rlq$**sUEtK(Iml(CXvJnKp!X9`b=A@E%$3^R zgjRNES|>ypP(iKVAh1lfeec8q0}N9Ave^xAfO|xgu`JlAX$yjBGyXzjWHs+W<1bL> zX0G#S@egt~ToOnMwB50uN3F!@4vu+raiFWnIX|htaom+OsdUN1_-ujHQ$jp?=_%7w zYRn@6K{%P9OFU_5cws=q0Eg2_6p~1n40e%?JDHLrgr=MwHi!BnBvgFw*YV^sBF+tV z%vg#_@|scC)J_78L@W-r0A!NTr+Ceqz5dbdBV)f$6{&e zle2q%1*wV1iu{il?A{)4K+^lhHih2Fcg^rUYlxj+)2GLCb{ZI@Ky~L%CS~mg?mc2l zaD(Y9y#jSmVO}z$EAz*FHA8T6w3$_$C`K@&El>3(bgu<*U7g{OM1LkPi^RIX$tRy{ zjH;j1@40cg>t&=_yYD-A6s;rC-ZQ-oRVF$lTuzY|_LTqy7jdgu5q^HE@&ihzR_0NH zFli9$Oc?t5G*8dR6-HUl2`_H-G#8OB^dKqFPPa3hg!B)SQqPBP(|~lLqg6e=qYoOi z-agyb@RI~m-x4YwwbOm3A^m(|#h6Dg7kDJ^!hlCpDKs13_tT>}J0&s2pWJ)lg>L^iEZsT*xg7)a6}R%dwun~SMgSBgB}HZjL5^r21UyYJQ{}dd8tR}7#Y5A+ojat zMIMd(>SF%$1k!*q97-a6Ne2=rBeRMv zDrn1K*O+!?af1>tu};5P!@Si+Ou=+g2uUEdu6heo9;=*5&A;p-5hNBwqnY zcA~0xu(`E?b$Q(~qm0I2*Y@2|&UV&E3*mHYcM2GdwE@dVh3*+#r>s+wSLi{u2{W@f zAVoI*cH%aGqKMjsyCkG66&Y6ka^)9h-2}QuG7@fWs5>R-Y2lw$M!J--`}l2mG&AkZ zym0!YYqRcdBOZy7k5-ig{Xaar2Wh~gm(g6iIgeb+s8TiCP&;Ud9jwu4B;Q<~4Z5!` zluNe~j@)Dq0oQhsM$xEVfnF^5^-9>ku&6(K$Z*o;`o(n$TFIAVfb9V2iiuwo^ z%|9z?cL7L>(}O5~r2(lI7&iFnD=*J*{MJp`RD_Q=FB?@7CP38|0M14BZKuIAuF4Y7|HzUQsGW5f5Y z7h+ecN>2om(n;MiSVi-f8IQE5?*flr=2Kd)@aV0Vd~{n(}& zVtJ6>G^>&C z1QK<1SO+vXwIwNG)u?qJ$GWUtZN736HZ`@n8DAam#z)B7K-fi5_niMM-%k6#Nvyoh z4RW`k;rlk{(M7|zvVilGjvA0IjamIGQUBRZNN!T_Xns;H>D!X#hVMbAlloLg8};Uf z?+ZK6Y*T5vYsH(3pFW6~9T=rZ3))C)H zu7-rg@~ka%H2u>IbMx2Gev1luq`ZOs8uhr>T2^W&WNHXKJX>ZYoKX}GJx3mcDZ*oW z_?~iKJG9=x-7yXgNOOVoi;^nXe40U|Ok)Z6h|%Jl zB9*H18mD0Sw>8WMlWuc}ZsB<*qtL;m7alT0y|e1<=Q3-7gr8LsOqb_#=eP{ji+-6= zlZA_P)e;BPAb&?RW-})nu~u+|+Y&wM`?9@-O31ZM$Xg?-K-5xQVi~H=TrC#S6c5F4 zraEPhb8yec5t}uxEvqfMak(JC{iFwYwF9eZB!eTCUgr#=-e9w({<-%x%u7gf9$hee zU*nN>TTE2fbsk+`DEPMm(sNATpW2YBO3iDp24;qao(C=YM(8>>DO})@U^Qvl-p~ zs|$sVTO)m3tXD8x3-NTBQM09Vmnt=7kNtW%_rgWH{f?m9IqJfoqKrfp+ia+dM?s$J zSOlLe!Lmu~I>ZRAW)#GscW~M4QR+AnHqcz&%yqn=D71nX0KHQfxv0~Q;SQmtV;Q?X zBeZN1e=lFd7)T10ZJ<3HRoB0@H++u+`fa$~p--;Ohx-g*IvBG$Y;}?jeUhi4H#CSx zEqR*w+wy48fK-T?I11()BcyO%Mm!oxAzjm)N3VaH14$M;Pv?VMXflwr3cGpKDi>Jz_Ei=zHgX3W7nKYZeAb^)Owzf)j>+uhzFByOYZ z8u43GVPkF3!?QvffP~Qz+-+;ked?rL>u_pAtMy#+r6&nnvGOvkT}pNwIp;>dZZ$n zS>qduUe$;qZ!_DXPg{4vnuSAVXqdMg3QKZ}nSE-HI_aVCSi%sfkwW@eK)RCXT7-0Y zx>DNesysq+Q%M3fdPqKsgf5jh9tK&HCyG7Yz*KNu7h=fbx~DuTJ3mD0#go^mQy*E2 zwU8DGLDFGGSx4RaJqMD0_z!tBopJDS#3M$eCmQb z2YNNpw698F&8A^D&hnw7VCH<#d#^MgP0`hSycjR;&w(VVqC&kAW;N7okHs~>=6#fa zGDEx@w!m|Poo^1Z`VS?lyr~j-wo*^b3e?p>0FgKQT>q?>Dk!*jASBP~jpXl7vlde7 z7TwiO+E}GhM~&edyZJqokZbppCv|&Bl45u_qz$pP77!^x#}YGsqxvLM zr=TvckgchGvh$ZK$C7Uc(^d1u&lwfg>C6PY(gK`@+syF&acqUV zVzARY=a8EYX!zrHF^ji7A4s1n^iO&8dbg;rPB1oncRcFLLT^(ZU2j18$~@9D{b>#) z^L)Ep0186tNR6YZr9v*##*LZ5u3KgV?W_wA^BMZ%dz`8G-=R(8qN7f_u9R3isc@=) zbg8NW)j8jfM3;HgD^Ln4DcTBclu<;$@F}n8oE_qSAu}u?N8YViCzjq;G$~U0pjE63 zK0u23IBbF@q*%OJ2L`34Bf&g{U_?US)0%|TBlBdd7-@oBlahFE;rH*s^%;>No@3jq zNPo{ckW}3GI55c7EbZHs1|&uKZ_K0lNpE%W7sjlf?J<8okj@R?Z)e%9rhWC-M+|m7 zkEUj}>kUZPc=R^Fp?Fk4`t-0slHHd_nBx(w6@0+^$_Bj1LB+=krfTdjUA&ll~q9(m(z= ztmY66AGg8~O|^v15r^^G5+LC__eSM9{&@%<)oCh}O%T1zEp5j8tkm<&g<7OzhmOqaUUejUd8v}%9 zbB4tJIHCr@16yMIMYe`Hki?0JL9Tymd+-{M&Nc#e2d_57&c@n3%%kT6=}%2aE@$;M z9?io1izY$LO?&+tXYYOOlZ~S`*@@X*kED+OX$mA|>}o?SeG;fbUS>1F4oTGDwLm%6 zyq#ZdlHFDS4}`jzB%d1R=VvKuF5_*QhHeSpr6Wkb>ikLT;6+fG!qJG-d@lL}7s z&{$VQor!JVO=RL&<&;8$Q-WNd7}+Sb3e+G$@9F}G$P_Mh&y*dlzGy7IeUd1S$l8zr z96~P=DDm$h+(S(+lKfGsCWJG9uMp**Vynan$m=MKynJTFMg% zpdymC3dvx{b;y~9JQpvt2~PX9Pwf`byz~^N#`4snRxNpV^eA}J^GHr1X~jws;vJX? zHfhmZ@3SrtA}TXVk`mk6gbWc%zB8!}HMAD33||1DMd0spNCvxOdG5M7QO^V-r@Gqg zd!o#f+BVpO_a5h(@Nt|^gQoztgZ{{jF9XtTZrHvJ8j!}N9^q^__uw{Ijy{H*c=X{q zk9vch24Xx7NS~VD{SA*~1b-XyXx6;?rVkpK2SzS~`5JZI{NU)AM>2j0NRO0|EO>@O z4U1C%un|O+OZJhc5P8Z#)>bpz)Sc-y(gV9~*X*Pgz6Gptzi|`7}40ZHxE3 z6fhxXEq$amxHOc)>=!lE%Wf;K8jnqJQ8URK-#s%4D&MzXc$l>zp+&FqRW}AZe~!VV zK1#;ND&EM`!h;}08SEDCxD8+rYB(W}(-9is{Z1sG+SiUel)!YjXddm|rrz-3^o?sz zy9t%E8w2S!H*9x@uQwoFHhj-VqBdu|X83M-By0K8fz;GMsv)+K2#T|pH%`dOcacZ) z2BdjI>{N<1$C}AXBOobb_YaA?Roeie{`oyTLFE!f&qz<0i5IY-i?k93bG12GY;ysV z(igPyy5iizj9a@q|BOpD-qg!|QQUlU_2hp5IbIA49g-c^r zo2d~`1(ITF+Ys9r>|QQ3AidoQ34!l%!?)fzGT8O!JmFD~zA=w<0dpW#@A^N>y+j@z zsjx;LS>32ilB&}Z174_BnyaEs=pQAp*bH+cOsQebx=?Y6tJtvj32kmP4x(l?tQ+(M z5V5pr?QLvvly^KtHG8Rk+-8BT?1{ptmLO@b2fY9Wkopssq+7SZ_PAE1&dQ8{f;2>{ zB41;YAZf$`wd|GHd}dtkl^uF4M<8Q^>ep79+jctLIcvT{p!C49djPd z4d0g=kba%vP;-zxWjD{*sb5hxZ$MF)zO`+MROKEfam*aDiafk5pUr;>!K&M&(~m~TZjH7-jS5r^#~|6Vz}36T0JK#dQ2-Vm$5|H?cX?`oPi zAoV=bz?4`mB7D zaMMkWme~%>Kpq>1jt<&GV6f|qC7Na{3c=F+*3Av|d-FneTi~`Rd_Nq^?Wljs|%@ppPer z8sMkF3;s3uCU^7jd!H&YAUzm~gZpqBf&P}Unn*TKmj}7&k+RIu!ArIq)%9A<&^GI} zv=Q5%7|8tR0gzPNxSfcBIgk3Tq^`)iYAGA?1;Ph0ZeAg-*KGT_Ol%L=xrB3Y%PNjBJRJ1`NtYW?pP53<|M!))0^y zoYQn&@e%K}|KMWv===z=YCe>;fjday6NI>5H6+BaOzn=0RsWDR4Q?^pMWXifWnFcq zdEBLRoZI2L&P^o9sF_20uu|T_S+feOS?tJCQrO@V{lxG_=i+~X^mz`XmPdY2Jo?aL zYS_Re@J)&}z2SSl8)I(xzRIHy11sNCf%G=q5G#Y-^#&vnVuCI)X3nFRug4=zvX~8M zP@k`!^BhRFL=!F+sylNZaZs`-5&%f(x2mEK8+~Pk6I$yJ%hO;DF!G&4QbKXm#}skd z92D+gJ1zvexOF&Z>XsD$s2&^@Y?Q*3YtP+Wj#zui(gKl)yP}#ZPFjJ=uFXs0OzKZX zDke4?yhP~oSm*5aNC4EE8CitY^D9`DY$5bFNEbai%3MllEQwcp%J<G0zXv9D9Zw~qCFRab$ef(KZY z0$H)*@ZS<#ezzJLl(8@JWO8ExV`qVCB#1;eFO|7J4;- zSCd`k$!I5XfCOYbkp!uU+su98pRaw-WI`gdX57{&Lpm-qoBdlj0E6Qk2_Ky(A}v7D z6e({cV|_2bJOZRin5}_9AFms}2f=?XMs#e_)imbOt)=0RN0Nfi2U0iEMm%a7Ykr+J zuMWC`wEAzlZRi4zUgiz47Y*NzSqIXe#wDbGSO%4bPlAziTkcsOT0^@1nIOD4Ux5M^ z=DF4IMd6III~lOfQ+%-f(U;U4DGF{I013*68#y|C(z3N_t2q6*0JW@yfU`@UVZ z_$ApBOU(h*6l`V&(icmya$R8_4mL}nWPdCk=>Drp=yf^IT1H|W&ml;X(oF@hy+I0$ zLq-F3b+tEk1Ksa#{~0OSid`8&R)lU^YdunV3gwZB^(~Rwqk+_9MZBk}Io{8=%~Z$j z8*H5T;|0UF-Z>Xt0~-FZA0sp$1!1@Pd>}P@dfv{`Xmi8&VDakyL1V-B)P=40KG=Yy zH(oP*_kC4AJp`oB;5w+3XjeW!KfJZ#4ZaD105nY1`$%EyyA_k04Y84d1i|;07UsFn zqe)xkCLi=V?-_2kQ+aAFfd`prhf1{04zVrB_bZicT3CYai6>`@yEac5#zZ-y{&(F2uMF~{==Ku`~>&> z50m>Y&Kl3jsPF;0sZN7R`}v@Nb^a7DB6ia*GC;ZSi2IID1yVs22_(Q!)Q!sZv2a;((b-m1k!_9MPo)l?M7qiZl7}87le#)xHuYwg)GVW8DJQtK!P=AJJ|RaR>{Q}8 z+@9zg`I@o7tlqny;|qawts(Z@Fn-|AqZ=Vk&GuX{}bj|SH zzqQUe;E~=qFymcph&|PY6NBRG3`i;=IWXMVL?*)}j5fpu0d7b&iAa+)-8{?7HLzV@*2*`V9&9Be4B7bEFbn6_Ey^1hz7?l0 z@MvoI9zo(7kLC?Xy+2PAX&ZP6NQ1#apEX-RN%%MtB}~X2M^dxuJZQNmfW-03nU)`8 zXg=twsi<&H950(1j8&laJR_ktrlk9Pbxm$y1J}(&NFlaLAXZ1;;0kN)>k$~ekQPM> z71MkcIY{JTy$M)h@Bh0`-}SP?*vG%r;A(Pl*bdKzG&lw0{1s+KzhB< zfYjmav4&VTH0bD04+5#Cw)lu_mo1RT%EL!`3nDbA1~nS3fK%HONViUl^xCi_mp_s?m<7Ir5s9op7;a(^ zt!4UPt!a0rWD=f3EF#P6a7Qk-6FGy;`L{e85JExJ_HnPN}%>2zzzvyj>3k>qiUqz@op$3H~M7~P}tlqlF1N|LOTdy4+5yv zA82lMtR29hE>GvgT1ULI>Y-NTBkwKzNPd1CND8wf9^E>2eX|CnTl-0K(KWM3bU5o7 zH!^(d*wcZefZWR#cXH1gkb2jf0{4_h*Bg)~0_SyVuoI8EzN$_P3P=-^-9~uuXbiT# zImZm&0D{LL9eQskQo3Ue^AMZV7R2)G)CV+XGoM)Gb~N#$u3QtQ<%cHM z?3^{WP@9jGFspdEhrD$k;o|1fORN4L*7nMT9o&v$;9A=6SMAaqY&^VuWFkrV&3#x= z8R?62QCh2;Q!1N05E<6$3_|-7VK|^`vV`kU)y+uf3}`NLcVsF>+CB;puHKWhM~OY&fZmy*_qtaGx)03--#?ULI8;I_3;L{qg~vr`LMbiY?13bCnMUb;)0l0CUq$LTRJ^(Z{`?Z(O@B9qYMbImVz#|mmDpL&>u>iPit zEM?ltEn-`sN(P2$Qz4hs)lx(z3)Hkc9l=Avsh zUE((9(ZukrW6uZD<2<@z_%0qzbjwh*=v7QqXax}xJhTx(cI&~p_$XMg z-D6$&jtSw+hx2H@4z(6noHA zB#caIfCLb9ZA6;UPphr!P0&q~c{D2lD4*joXk7Y;_MBL|%dwOxwY;936q@2~;M4R-Q)JPf4Iznwu%@c`)q8l+m5r~;3b`vMYQ~0;tzkN>PUv#R3meRVV$7H9%z__%pWNmoV!h|dgNw1 zL2{1MQ<*Br=FB8MM@_{&(SNK%F$~?*kk~s38gvP5kx!T2ob`ZRb4taNY1dO9I}8lVkf$PaWe%hHo*dfb`>Yfpi+?r#u=P zzF)R0JQ5@ZoSE~e%>q8yfaE?64fPuWN!ySwG z5GO*iBe9$Z%(r*Mouc`4a2P+^yXlnQ{uJcQF%nje8bwk&9G)MwAkP&#BbmK+C zw=U^DeGbjG7$6OP2dc^2>g{rms_G&g*UF>@XbH%<(Aq@&{oq~7rTIFSDC9Z1Jy zZLcR4GAMtph~cSyPk52b7-=4Qhc+S3M~d=23n!zWiUbl>6xovWT@O6m&Ee4?4I9_ONqTRmsLhhhFw^dN&JXERJVSTple_d0z}6_}|Q(hj$f8vgR)cXfC1+ zMlGoYsNG;S^oU2(@LX12%=%s7L0Hky4iDqK_wG+#ReqVjid0oKRddffn!7uFiX(&@EZLSkXL68{6sGsLi%`NmWTCLET(bJv zK*6SYNW%-^EJ?c~ax3wQr}qfG_G-UX@aqDbHX=NaI;8p;GsWzza$bX=hp)u`@w4^; zQZtVz7}P0(V`fh5oZY;qokz`zgpPu5c+}tM0y5nUL0og)c+}n+WBq}IbF2XqbsM2i z0+PM?=IWD{E{R7S1mD-jNTG z60&d%#uF`|#yZ`KpeabYI9G4AH1Yv_6obw?%^~0;BC=MST~vLv0ZP`?+J^~Mff`?! z%0ty9%eOXUcUe;iZ`vPd4XO`V#WP=ta52*z|4CMEy;u|9f`+Vdcl>OGR~^3tjppOZpwfAGsN48$z_BAP!}{Qbs_S?d zfeBd5B1t)TaUG(m;43+=LB41&AT_t%qd-HK2#%S#vs`q`%;8i2#?Y(V4ISP6dIxp& z;&?ndHa?HqypXN01R(8MKOk{V=bz3Av0W07?w$*um3yCFZx(EK;!!&|8+L#F{<(7x zmOCvv9*?f;KkcuiFOZC9$)3U-8djZLOQWZ1HpxJQYRqrC=@j<%X6g))gOl~I6FY3f zi*Pt&z!>>2kT9c_nq7)37c0h&aKe!?gg*$}LPHb5aRI8RHz;Xcmo`v@DGvp~Rcl9j zuV9F?jl>-|z}CZJLSP3Nt-s9qBzp7+rsE(!NO+`-F`ru)@R&tW!^iuTy``WJj>oXS zf|}giw)(LlL#lINAJgWX_Y*NkO+nOue0Zm}Rs`a4q6vp4_FDA>g(&#cB=gP-74m~dfR9ojNcHmG76ps; z0@5*flAqIz_7i;1hE-wG{^<^}p0`$~?x^#qOI^A4pj1a5brpO=zHaDS{HFoZwdR9% zoP^jqQNH-{ocVlt|EuL!@TeUUU4vS|0DRbiM^~CD)j0u4|7q47_6L&ADd15964@+D z5i(-CT2?(vVdwy#uL7UadO2 zYv-H|I*f|K)2={jc2nxrrnxp$|0MhcFK`cf>`1*>Iy&XezxhKZZ6NX13izUU!W&g# zV!BX31v_sOwRjXsPF4sR|D98CE^~sK;u&ko6nQR%8L3D-RRyL0NcA&3?VG&KgQNaa z-jOfU+Eh(4T>6g&0+GCdSR>lps0WawFzXWMU+mx;!$RUS`__h6wi_yewDlE8m^e0O z0#b9Jth3;o-FHGC8(n`OjmM*|g73@c^3Q{Z%dMOkmq(_g>5UAL9n3(Sl?{yQ$fJSX zes3UUc{k3S_#gpMlZ>fyq$sLNpqa4Aac%1e(FcbF9$y7|$e1jM^ALxsJ%B^ED)vdY zpdv;4sS0Nb4}n9@DeL`WegQ#ar3&gFU;yDHtG6b#sZd!rDF=^i(^p@al)j#{#I;98 z9qx3a*l8Alp?7QTww_x({o`lzw$a>gM7&GD?b+S1&7Rv?*DU zXHpej?}b+nPz&$8K}X^|S{tjsu+0V$aazH*nlo8tni30I(`CD)8^M7NS%4qJpl=o zgPC^m+*%|(T5jeMF{LpRkRIFYpJpC)5PWy&fW#7Q4bxBe0#f1evqvR2@?WY}Qicad zi7jBjwP}5XSyX<4DMVmh&K+z7{80`{Kn#~1)VAVWp_?5A95_vl9Dyg%N?XMW=&Wix z=U9Dr{S+IuwC3OsBdYO4#~|yOl6lccVn@`9u%>46Jn=8!L0d3-Y;7W^VYZwF(7cCd z;l8RA93dVu%cxvRDwxee^S&sbTg1Im+NkQ?vq z8r7T`!;YD8!S#db$$z z4bM6zAQ9B*A^Lj~9>VARU~GY1{{*D|Kq3#xNE`6Z*c#c+)tlC#zfMP zB40CW0_2Lov+B_DsrduE9O2usYKw-HO`lFw=W~@h{j8}F2TZwC$|{Bga&DYzf{^W6 zi?=a*d4#I4iXsy{xKZ=-A`-MnMo?NBVZD`Dcwg6MYs+v-HditY2vq^t>}bcV@K~+z zs3_9+EYQbctnfN)O}>#gl1bjh$cCa!_L$XT7P4Vt87XhY=|#;iN;)dIkG7 zSIIk{e(qh-4M_EKx_d%wJ_wLLFaz6#q}fk6=j)u92)N-h*N}j8RL;L`y?Mk~!}nb5 zF8J00otEYH3`j@TA4uJK)Lrn667AZB@>$Q3fj@dt{`tkFPJ-|2Z4sPiMZ&X(x1T-^ z>zmKz{~Ez}7ajqmZVh8iI(w~ks8M!RzNVxR!^h&Km!i>vV<9-9;~)95V(rp?#g3yk z2I13j=y^cZdx70yfgs?caH`C*nD|+keyp}M~I5>?A{4F~5f9Ull_Q7fTHaegCR5K273nx@2FtWP@dji3OjQ{?)tv6Oi!X)y4+nCm^*8 zzW<3Wq^5(qACKx4Rj*z-Us^rPKD9)l1osj-F*jRPMr@WyI1cRt=HYp%>?27is4B$-i%tdK>!IvvQn zHlyI?^JY1Aypcz%EE=xC(U!SWb}A0D={F{4=U;2t>GbX`^ZPO$y}xP0D{Q94HG=OA z+uz)MX!7tUT~cp(R0kdjNSoFlNbSWK80#tcX4>lLy|q@#^5gR9UxmxhFW-B7^XZM3 zRl6K3tl||>ht#^t;N0@UMGdP1tsikNq%SYg0 z-0_91v>~rYTdOVxzBE*Dr4_YSrb9C$chL zondLJDCleHTO{o|qgyF|?*DstAay%?7ds7pvt07PVADH$w=?!e08-OAe|!y#wjS8@ zZ`B6Av19Ah)ZQHj!=_rR;!z%A-ij9vuCBJBT|&O=_|(kQp%eQUdHY?t7KfYnj+21Y z?AbfCPCz;hkjCawv*4S^|LXsi0)4>3=eHk(r(Xn30Unol&dZArAAa@nYXsl<@afgr zUr67#VrlgGbGxPG)Ax+kWN_9lu)F&82}t?e79RBl5)IZ&w^#xsa`o$~dO#F0%J^S0aV;?-05EE zjoGO61&5%Mz#nH!Awqi`>q0?j-5+#_?Jpi@{51>&*iz z@9GH0qR#7g(9Eg8tJfb$_LOh#_S31N>aio`l5c)EEF($V!rA}sY_Ng;gE{Z-o*xe9 z_rAA!e-=A>V_Qy-9dPPsQ!(zNb6bX6rVfZYbLG5kM!YazZX?e8=e~*CgA(9ugJF<`<8^S+qBbc-;Hk_^OuoS%9S`qju!x7*TLoS7h_fG^+FU zq#(lYS`^rl-BMnPYOF;1Q*vPWLWYNxQ5fMFjh{q7PIy%VVk0W^fHx3ZU_=|$RI;=n zgVdydVRD&tTiV5D!h2Gj(5uNhDDs$0)_6h`p%?>6OH4+>fh||rZc-9Tu@LIwdHkoz zK{(=J5no|V>?aex63R%7(kuZIi@9D>J-6lfS_QvsuqVuC@BZ&U2qbvaSMbf*dnBOc zKs?)LXOlzjQ1Y!!yrrSxaPp&tT>{S3eEHusYBsH*&(y!xEV21St9<}zdu5@#SN?fd zxcubkmL!&VJMO{e?(Uc#?8v-^Q{56`JLKL1w9V@Zq%J&a_KgiX2)?h!)^&HzHGLj@ zwmRi$Hnem%@dc|XXT*~yTKD|Q{pZ&AC^y`#Rhr#@@%YT0!QjT~pYK~k?jh5@Tv>Yj z?eBCGd^fubn>!#~Z+RtsfaC^)>hQEN2yY!pdHXwC1BU24!%_g~tj&B|UixJw3~0`0 zJ1tNUA#H+SjwwOsZ6%ltO+@c0<%$V5BKMLSZJyIr3z&VF(3kOH0*oRZfF53Cfv)U@ z#RFCo5W`0u2dd}7r|3<^SaVNp)p8UI#15Qj50qk!lgPw4v%2{ z7YHFdZ=B>X8LQ-2B+B6l7yeJM!Yy1!=LBbCFu6Z8QqA=~v@h)F$IhDVU*c_t;eX%QJQ_u|ZzaRqkG z9gx})eYOvfoX4*4waRJ2OXf)O@qbm)B89k_=1sXd$*HKv92`ziF7OB&Nm(l4?o4}J ztlgdWwkQmqr(|n*X`N}aq-;`i^i~IYuFV*6iSZ|xSj7Zn47P*ahrOMb=S@bIgLAwY zZYY-mtU%H>^DxBoBl^7=^}?mVWP#_9HKoTiC8$W)HRT0)sahOihjUKz>P49PTS!(+HU^ItwwFu3 zu9YjC99ghD;@5L4^IJ(f-@Koq8Y$ z(IcIvriR%jkDuz5fK;2?Os?|w^#xKF9*rgV4uiMX+D1#~PYWj@s`L0sIq$+!fJ&Nj zFXbyeEO{hNM2I}CiXSc$fg0S+RCP-b>GKkE_6t4GvQKD)+uV+@gW$W7N3DqN2P8^= zWm;ZxF?A+k7m(Xxs)gZ5%5_^tVCN}MC!!^N_^3z+kx1etw9f0I8Aw`3jp^Vd6*Y=t z3gM|KhQbF#EinH`&EY!(2%06~#48uz3t2GKr43dW;!uPcv!IBNA$mmV9jjrtz@UyG zN(kpA0CCn_rHHIUd*>*{!OsTX(#*%E z9|D^cW|s$Y6k#I%W%96095MT~#R?{Pm2UOt{(tlXQhy#DYn=Fc0McNmJ?T#%T>DUT z+5Osq`Am&BUH)mp_J`ObcU-yXkzmnm-BXFa-w{!EZQniAjT)L^r*kFC%He;T6SMa3 z4>r^csZ#U*8e&J`TIlA_*^dqoH-<$_34NT&|GHucZm0@hxL{Nk=q|7ecoh_ClJ=04TVh_`r??>{TV<`va-|V(&HnMW&ytVX zVn|;cnnkHKn5jjJ(wwdvw^8s7M|4vr@-Ri1bcs{A_A62ttkp`#+Vnxx@V|^XYm-;6jrU}$z@dHIiz`)$~(Wj8-R5Bxl{?CUP{<{UM`J_ z`(?O%WP!wUtZ#4SLQ?~!9s;}0Jkn?%ApPHwppmtOHbn_Ilf3oL*we!#c{o##jlvCN zdDc6@;qt<>rs14&97*WMoi2GIhJ;}RTWTl->LV!qKHQ%yU%RvjRJ=QCw9vh#srTt%crxOe+ig4kl1lZehesmQp?L$|kiH4-vWeLN`^-_} z(%O*w#KmDuS zKxs-%8i(0L2op_&qrXa#oep--#$*(I9PO}SbmT3b3U*dt!TV7-u@J^iSPgiR^UWO` zI{~SgQtJ<-=DZ$wG>+g~gF$3>t8RG6@;SF`hVIH4hjn)ML5Zg@?@^2{T^571}F8Ho}*LEKD1yTWuINNI)1Ad)_DCYNZdwAk5l4r*p zvgRD~nlJZpSe9zU>jg$@ekzY!Qk<^PFX8JMz&`oShao}KG4fYaN?Cj#ilW=9%5Pa z%tdcLavuF5#So~gL`pA?JBjU-4ZFt9qbjO52}J`VU?`u&%QlU+X!mFe2E|1iGQs#) zO?O5#7rcCJ`5$oEEwqO~x(U8DagX9XrOh%Yoxmv;*sw(-yAK~dw3#Rm7Sc(x>O?t^ zCm`*O=vg=+gJ$cS?;V&83kFCWuL_t>m4hFa`<&C%cXquOoO{Em)vp;7O4+i;X`F66cRxb7w*fCQ&_U=6D8$+_zO}*tE*9FB8NQ@6- z7zZ5l$+zzThR!*kLK|}VtF*MQ70z@*j)QVx6+g<0XL7dNVn!g+Wx_M1+N~eGV^%%*gOR~Wuy(ZY@3x~j65nmm?L<4 z*+QNiSh|RN*0kPMLp<3KRA^1$5GIg7#`lnk?Q|iYk%E;ah&lB-0yzZn5PTE3%zoLx zbk-6mIRPqFi7+X7n*)-hupj#ashLNFK)MOOHSw*@fMHsJq%GJT1eS&|xb$`gBwL_i zTi{yeFxx59Yw@I&B7n7^H=nDvu#2Q9``Ny_%{@)^l8&cE=ilp=fYiZcgh%TSB#z%M z39;?jHcbM%>lO}m*PamaWeVQ_q^}GkZU;aCB*Y7)1MUowp=V*uFJv}>x))J!_o7@e zJp9@D2Ow$r6#~09!8bf=ew}|xUxV+<@a>d-aR}_?Pjl3J#YCf!#S1o6l7*tWUm(w!dnQBW(R*+aAhIw0tqQI~V z!>|M5#i9;IaR*oiHRHKZS}=xJc(07(!VCs*VLO(G^{E(?t!k|z`afBlkhkw8Lbt!h z@bVhPnbokK3y`|BFsI_qc!KXim65tP4W8Ok=?^5p+xXV7DYdAacPe@2h7*!zA4#i{ zm^8Dx(bHLzgba(Y{ zB(r-8p%TqLk3cA|8C{9w2#1B0#Flu_Au9MypzyqQ#LLf;pgr(W_D?^Kk(6`pXH;#OI;rFb)fZ~Mkf zbq3=rqzOb!QAk_^V5@LHNAYn-`HDTwAwdg{$eaWsF zkaIzd`0Na_Z%G%P2}mV#z7XUvcb=@mY(*|R*+MVmD3e$3W-yx0t5_aLdhuBjd|&O_ zaiY$n`cyoi{ekps{33=PQ8@@zoeWx@KoS4r12n$BB2WRrB(K7Us5F?1&92f+`7@Mkuv?vAaF??%j@T2nKt8-UAOVwK+NA(1C8`{57 z?uNY9mq)#Tqw&-M$|f} z9O6=r|A-eH=iWF|DIIQWfu}Br1g%nGRSd>FP~>b`1!UOPsG#4u>4Q~d)j1%uR^>|X zLSsm;O>B-#FAsP04?}4Ul=g(iC^`<04Dmpt{w=7R7u~j)q|TJZ`+vQF)Etsu>{|9a zSB3{5Np&aT-Fx9F-%xp<*_LQa$$_aP&fcq=hR3r>8-fx~KzgGI68k9Kpgl=tzRh+l zFX!%7l9IiBY86}wQTwLC5abRa~-VM7vrRV{&oBrj%Qutkfb zj0tRk?r-TYy(6tB5tFjlRCeVU#>5FTD}&8*8WO89#-l74o#~bJS7^GgvOGnHHH`JN zU815Y?4a~eF-jOs#n_Q0>NZh-W*T@zGVauds17`cw=ixq zXfNI&s>SWICH{5_UD49&oPf0W>TG=_(ndcI`D z!))yCgRJ1)ON8Suo`vMs$5Ge)DmfPgCIG!7V6p#w>F&Kq6XPkMG~Uefz$i za!p~SSA@jK^X<>0zCfbMXs)&M&PMW8D9-#JcY!$0=)ycEjCG`Vfya5R8ZvIgC!KX7 znGd4uESV&Q5?E8O- zNJ{Rt!gE9BlDr9X;D8zrDf4zDks)F`OeNrh|5LT}YC=eYQAzYyU;tV2=u7i+ls7p9 zbG=-4%{2x(m;F8lqo~@mJD3fO?-Y<;3pKt!Zs1V}_wadVb?8Wv%H`eBx2$ezCa}u_ zft~IScTFVG-`hiyq5>iHO?L#TLsqM9K9X$j?wvcgN(1ppN5MCUZkxFNK)TwUN1X)U zvLx0B&t3*uL6f%nS&5RnYX(0}6@*qEJ~!svr{(?2pDp9u`|{kQ0GTI)&+ai_TN1F+ z=eA6~^9)eQs6}9RwHuFkCG9-w4I~24S#4?OsN)s|SUzh{1RF-SAq`8JK6OfQ@jBw9 zyfw99l7GkLCw1lKgB@N$D;Q}s2$FVNY>yJd=EpDp2#d8E^~0M4 zb~J2vkp`4ZkXtO}@Z`C`MV>RoQs#S&Y`Fx=zKQOhO9Xb-;zOlQuOa6PTNF0XR>ke8 zz>UmXbH+{a6YRxgIu?FobPeGb(il zn?|$#-&cX;Is)mNO+3Qv=`8p*Vz{}t#UtjvJKVf4nI5zIs=4pKw(H$#nH_WRXlkNd zvr;+mpw;3IhdxT4xt$XgkB;nr^BXbqX?}hO`wX_3meDpBKKkUFZ|n&&ylpo3Rr9jX zmKGizYr%257jON6)O4i6qb>2x_(KK}D)eDV3TtHADR3y-dJ;89PPT|pHDj(GYb zc#7L#^AJd{_om?5kLXh;lf$KU5L7)QqY;zF_zP`_!!$uEbi!$E*#%Z%z|Ghd-l`m| zHbbTYU$p!m%0jhGCG28UYLplNo$|p^vPZxufU1LF;*tDz&Kv8@@`$&WQjHXnPa5{` ze(!tV|NalY`$LZFVBP(w^5!A117{9qSM`kIk?(nebykjy%-dJ}b8|z@o9Redipnx1 z&;zWYs&{~0Xe}UBJ@NMr$VAo<| zG=lH>+WT3vKDW7J`4`zB^Om2MHqh-m8Vkc;+^|S`e5td*jyg!~-b!1db_^gD0z-ZX zIB&CRuzBiATH|8zPLekw$-eN=%!H>`$BfGh=~-6VIbJJV;!py_lV?<8r00%|Z5kz1 zM9j*2O0$}Jb46kV-!$hLMFCRX)af68_b2cF@W&%xBp0Jcn&`nzVoa(I;i~?411lYm z?GdpYR1{8M?u*g)fBLhZ>+_4Be(#slNaTtXY>~&4)M4aml_?1)JUihD#&KAnehV*} z8VWWQOE)+`;K3n=h-eNujc5_DQ^gjyTy(w?Wuz2dt%wpBs+GF}mmYP9p{gH+pL=D5 zcLCDEcnPu1;ipF5-r;0_Oij)YH#hwlITp_LbcFK31pPbr~2-3W# zeOpqaqZzfakamKlkuFTUWsx*@YD~emow)ukyFr&DwY9xMZS-q+biE7sND6x8T!{}J zQUEGIB_VzF6)p~R~n{`eE6$h|N1w-IdbIJzxwS@NGU}gR{*3Wi&J!lLOCjDN5wnq zuhM`=Nbtk&{o?08|Jm<;_xnHm;rGA)-S2+(^Pm0khsjV4x9c0lvsW%POu&LBcA3=| zN($-;v{%C7rIk|*!=XzQk0^%V({suxnq*2PQCzCUt;yy?csM5teI2aO}y9dM<* zx=~#su#3;53Zaym{ebkX?mRlyg+~a!s}I>e-%3>AGdz-3-LQT81n6nHo6!l{A@SMT zpvIt}UjEtB(tK~<8#}g6t%i|K96dR10+Ja8NJkgfssF^;?mTK1d|#~z>{u(!UIr3CJYB2dI6mr zbXY2)j?Pg4Q;P))!h2#2xY%gLU2jMZ`gDI$80DSz%W49KOf6H@+hx3w`i^RO5}AQi zgku`n_Uk|W>G07%m!IGK=GVXaamp(4&Zzp38idMoNb2ZBi{RTxBWgP5>7V@K=Rf=X zU;g^H^7Gff{`D_^`2FvG_Orhiisg@xZ3hKobhBFG7pO6E6UZa5L#8iNktoew5Rk0` zMV25>U?CklK4+i3&mrq~=A7UBX0X=i zY6NznEfE3c{Xl{q+1XfAITdCfuN4H{`}Du?FSc>?&;2jntDi@Qn#R~K+Qshf zscuXSBl!04$R-wh14#%ZyFtMI+58lhVmx518dz(l8jmhIA)+2{etQ?w(_;A}%ALsi zX2YEU%lU*zw(Ca_=*Ha8OERP`g_6QOA#QUGJ z1CU$|u@1pEc5Y`V1;%)JWM5C9rJmb;TKnA}L#OOYe4(%r zK&On_c@)+aQ*GIXLvFCa<&rwOSnx9%R^Ukx4Wm*e#3# z;g0olwd=4jg8ncrExHSNppI*jJikt!_ zC$e6LZ&uVsB7$kI=1OtdaQHS8kRs3<M!5;RBALdPb2T^eBjNR8ogPQJW}Y_KrHA zc8o73?=d;W38aS-kUTtsn#`jxW5%z0^Z!r(rGMpL`Ir9F?l#+zZXR)!O!{v>@rh6T z4<+r_iYc$dcNdR38P|FFLjY-vX*HVM%+#2$ z33D=c6o#Op3t85np)fMuh8V#s>|`7(`8DyCFswp_9B@;uAEMVKc6g)#Qjh}JP^qxyvgq#it~9(L&6%k@_uG9dL3e0Qk0YaSi;g;Mq5=aF4Ddq4ad zKFbU6^T;Ex>*f)(4_+R*fizYFNl~wc9GVHSVi%xd_aSA+YjLUx#RmbyOwEWKx|S(2 zEiCjS`FDpmoj@a%I)l=F1xB`ZA?0I{c1ayih#EjnN!h&wMbc!$^P@dW zue|!#Fa6}7Y_5LlrJ6|Jt%3B~>r=R$qy=ZGLnBgsMi)}qQWe(m;~y?`myazx^T zJR*6VbN+0?%r17;uN*4uw8WsV`{kYqNF&bM@1)dy1=1&df^SCy5(IWGbb$sA!FNZO zE`R_}p)Ys0sbZ~5eBc(?L8js3QFoi{XNCY$^5^Nuym6wqw37USc7Bfkj-2uX92l&q zs<0u|PC13hDhVRe{~*&sw2=(luy=uS))gCE<8Yl!4l_U@D0@~#b6zr^l;AS)Q_IRP z;3@eA!_xT1)=M>zKJ`ytM$Yc1Z@hjWzX!)lpch7<5uhmC%&IG(C6^Kx_ncWcI|z?P zYaESlZSO3ttt~IDY$hUIICy4$YFZ+d-jd=^<)+UBmzibGt|^S2E%7B4zz!=NceTp~Bz&a%hXIo9!nY3oc$iWi+=meJl>SGhc+6nJOh3?e z$Rq3=mHfM#60QbH?3^{>gQ%V9&7BR!23&t=lnY=HQiotoCQMN;+vyDfzx%~hQEb0( zjwYv<>L_LVrSE?CC!c!x`OKpMkY1n5GC~yt;Rf2#PAo@js3=F>TFoHT0eg-uoDJ3V zft`!18(S+&S7v85%Jiu#6PxP;Bwd&|vv`~?;aT%2gkiAC4lQ=GFzb`VohX7CtStH| zboU&u8>ly+P@9gb8dVIsFng&1aj;5Fh(s`=)(~0)2i`-?V(bMYGmn{^rq0(amRQK9 z@a$s(Y19v-d)a{0DfsR@tBXfyK=QET7n@2c?;?c)3wDA4_$-yJ9N}-Dg$fG#~-+mrp zx&udBeCuw4hQl_AXqFHAdE_%+eIFhT2PAgkTSEneLK11bjQTe-doY+$Lkx5(iJ&LM zIPV}ahXzvU!bq`j`{rz?s7qc5b`C;irdZ>>+D7Fz&tp{-h^gSB!-;&bRj{2(1;N`j zj<&V|qXCcVUFis-T671u~1+ z;t-|4fz^wvYMCyg_>}?&?_}z&iYXY;WFGCmZARO2r=t)dY64^#9Ue%XJOUCtQz#(# z_VdW!faE%-xlS9$crVG_fP_Mur%;wY(hmiYkOu1^_~xQL?s)?ec!cANDQ9;Vf!*Eu zlH87}-GYom4shb?hXYbzgwgJMzoVy6OrhIK*qY4)jU|XJinuK>w*pNY8dcln^Rx)P zAAVcYjtzQhyuf&b0-y@UaGgQ>;ls#rXow|ArUYtfD3gd(Y9W#elB5_3Pef8{r|_6k1yCi&u&^TnxyqTettqrl zg|tbOOd5=b4Pc2#o@Gymz!2_;$E`(%ZR-l?nnnH@AMoyg^l`u7yPGj1$oM&U7vw}!2ju1lqdHab$Gz;6z- zLzVNB>@H~@Q|K#vFAX?P z`0xK%2S^Q&u1;c%IwbPwxnfmG1slD|Ng)L#0w?sNiANV}9$lM@cMQCk+!))aL!{BQ zxquO_A_6(2=KG7Sq3kJfmY4(#0V4Ec3I!wNaS~sPhMZ;OltMVFMpOO5mu7}D251Tq zx7cne#Jre^CG6+T4{V@HDWFx+V^X}e?xU@&QZTBqasdeggzT2;w6z=QFZlK}#6s}x zo60S7eP_wEC)NoP}Tpb2P;8rV~6`J<2v%ax8KEA&3!+0*X5i>FXcGO8!6zxc6V9#LPew?7i%)&D@kFH%@-CUUx)bp&7#>Dve zAVgX_5Q}D73_fMBgh&lDh|rsyWsA*|f=Y^0G32)SZw$qoG?534ftj5|NalG-M(@|0 zM#Wfcqq1cN(wl}@Wx&Oe=R{M$qLI!iXlhN{ZOPB1@ND~(>=H;1_Z575H-FeZ^lLzJ z2pUG*f^R>M@Kzp`1m69j4+D_8Lj)IOP^&rISMcrUkwatQ7ko1X>7XppqjH2*E5T~%-uHZIVTI13$~oe*Kb zFjosORzAV^UWefOo;4tG)qbS+A4uGPGY)%By5sqtZBQg z&K0o?OQETBI6RPsnt(Lo6?}so-Dmya8<4yjbU%;$f^YV8-G3krE%^4;YOv4y1>e4g z*uxHuMGkN5s$KRA>^d&hHF1JG8Zu?4fKcG35TNrmB^lOd*hXu`%=x!Y%!To}EswrbtpkV&DoKA3O`xu~Kr3#SnU#)3nISQH79 z4Uh(DyX!xD{q^VHNXMHQNw0q~0Ye4GYj`m{RXZWsESDVD(bLaXvOLf;ETQ&H(+Q`P%H*1zK6Chz1jUg2igTT$!Js{8M9YAaVch#Ehd$6cZ;# z?qdVeh@%1N9tpl5_6xrEeprBnJk;cCJJbS;t0SUQ9hl(4N9ae4au;DOrVI z#cF^5YpHtK#@^D5YHMLhy>7=>C~Ho7hwGhYHd}q+%=p&i%ElmXcjM+eS8D_fP?Vum zBkAqOvrk?W43`R32PE6(uSsVCo%`wMn*O8pt+nX}AfT!#)XDPJ_Qq!I*Kj<8Jz&g{ zWa?9FC!98bhhuZE8!FRRvWhYEn2XRzxd{UgeEaFAhkEVu^m>B39bK zS+`8WwS*zJTLg2kho~q5Up#MYSRnZukopV0x!At}iPzLq@a^Ogw0w@&<32wWK=Sjb zYxB1}vWK5XSoJi-cB|%G;SDI$iCBm9!6#_I8ZK%`Ah7|yy$^|T7v;l<;S3PVQShA; z`%>fvT1?0=HwpS*oHUpO5xOx`mJC}GW~mF}oQ}R0Q*5$39>jJ={tN5qh>}(cnCLNJ zRfTjau=`vTncdkwa%A$_&d$z_8#k{Hkd$x)BE3)(NmVr64kqEkC6-7r3bYiEf!k6i z?2eq9xVSntdbvTM;h7W(dkv(`8b~|mFz{R{&@NFZY!&cZBZUrtGIE=E9`<%nLG$^n z1)eb(6m3GSb|eHJbx@(X3gpj9jyFRFXG1K*VfD&Zr6?t2WyFWs=d{kXb{eMS)m#c5 zmw)dkSUl}KI2VDNr>EQI;r@beACE>nJi;~g7kqc{$SY{DNyP4FK6t-?RAnG_heA#s zVXTnD0dH8r_has6pB{qmZWaTG z4UnKB%Rly+oeGnqr;R-!uoBh+?z5lg5rQ@`CS%P8Sg0LD&vR;EcE1!J;arNN1uyjAjogQz9xi9=X8| z_Arn6L+qd*NW)E>=r8yl>Cu4HSMc4x0f}pb-}^uF{sYO+BRtgs6kC9@&vaFaz%DP3 z4tc^GEV~4Dc&3Y|-8?$9TOd{1;rIou6_jbEPLIACm=MrX2E%_aMj&Dk67y@^!Bnv0 zE0TH|%uE40jTU4xGl~=(K^~1i=|e6)2ogiss;xVxH91nP%-~yvez|O@F`r0~oeHy= z3}3pQLZliArAyt*I!^hGi1DY4<`O!1yXVwM@yUDb<3V;?h z(!qLpWqYiikaX#Ily(oR?OPVHGD)c7B*C)Cyir?9az`ZF_>n=uphi28uDjh9kp2V z{Rh$q$s>p0yMF_cU-12yhev7E!_#gRIC7Y~1yYD*IseFM*wSNZK#8Zg z?bt3PICz`XOaVrOpxLxKhBOEVywW@!+e9%*XqWznr?y?|)G7ffQzr*tBNH0^76fqC zRJ!f@z*>;;ppDm?oE-ur0I4R@3y%}@m*QiwKh zd1`d&;7SdoQMF~saoAI8N$W1bH=+*5-rqglp&N1*2zJ>o z_#T3%j<+8QNNfNJ?f-M*v27`3f1#*yZ;yc`Y5KxZxWKV%78rO6SR=`C8?|_W%gpUl z)X}~q)`h6GQE9se7gGcYSIwK-fR>2cI5V|SwYGEN;^taq<@DyO6hT** z2tLjn!2*G_ZOW~&lyIQH#480*AUqMs36da!VKRmDknghugn=O{782H5ND@m!@-4d>f46{ zNZsi1H6T@oeLQjqz8wv*-GcAK4)An2s^XoTJo4ACI`rnB90Evz`ZwEDWPv2c*MMTX zfcZ8EThJJbEE&XhMfPTQVcSNZ<0*xdfa3|QK^_xQcr0rEMD0!(#cn|G?nM74nt4b| zX(bhdl{P@NrCM68P@osB&JBu4wW^^85{QH_>DITQg2$&vZ32P>c&bBSCv(T<7q&;& zb}rQtyX}(;^GBx*#H^Xy<@xz5%R3h@j!$MdS}sv+mt`IS0uoc@7h}+^(x6?C76ULi zHbN9c!o>=u5x0HI3bCD?*{Yf|olyWCbJ?NfO%-y%s0MT9>oH_kRRz^>FQrr4Ov`Wg z&Pg>4kX#K&$YOO1zVCellG99~gOPi^6DP3f;1L#`f(ShG!vds3{)rQZoPq`mg#LnW z22E~(-C>vD8@J@2nC{_`)tlSYs9)4T6%y6Srdk61Gq)N|l_G)406i!i{bsy3ZFWj6 zFl;7>ld46PIktefg>y|qd%U(4*lAI6o1_gV$_dT{xi$JgK{>#QDN7MLtfj^t6;V?j znZ2HLc5jeK43eIt37nOG$I`$Wp#KApjx8)ab#`KBV*Ap?i(4lL$Kr9I9e0Z5<&`Y_zICgewHrLX_4%~Nh6eF?XN49J zDhy02+H-Va;p~g&Wnz1KpjOyEd2)2IM$-J{Vh9Hbg2cJgXX4CYG{vYL5n`3`6szP3 zL&+Yt1fW)YaEBzagx&_~z@zpR5NPGJW zzU^^ef%Jh0zCDINhlX!JaslD4SwNSd0fW>X`NF&K2&=mUk{Pcc2{-IPsWcYf1yO9m z7)cGo9}~9ZC#5n(0!4A*&J#+d0E6*U5()=;+YocmM&!R{+)(}vxiGni@Fu7j-iWp* zvm!gl^N83XrlN3~fC@v!m{Y)a?$p~&&MpzDMp6x>!G8w-tslPdXBbIqW<;2Y#=ft^{t+5(N<15<-r;Z-0k+c{CT6aO!*)ylVKYebX#5t0!y9&}%kgEazMj_rspe{JM ziI*jRd6v6;AAG2wV7gV187ey`kQ`FDzJhP?$hn{It^o

    %3HiLOmLg9(D`9@lN*^ zNYh{Y!e>8!Pk`j*(IIyOQg^=XaDN_sK!Wd19vyOqj9A?*kOKAC%vL1|iC955R%4_= zCZ3+$dsHGV)v8jgm(<@Mv3aQRtyVj#HXs-Yr7(~OEuk$_#CS-fj~`F4S}>Tz8nu`a ziATjyY`iP;P^z|niDyMk3$02yBwJ3su~%Pz14L?$zfEX*;c;|l35)ZgC8@@^@R`#! zW2#dJMz_Ys#>Z+4hLyqG^2>V@XapM`PCB(d3c1_vit3a0TMI{ zodP>*f+-%bVmYX-FghIDoZSjQBsR8~RlEWG=0Yet3wCkV&aGVB6gHGDs-o7(lg!Bz zcR0)kSu~){xSW>amQgY*CSIh;NDJ)C-$}_MWlB-)<)svr zUOsyI?D;C3o7mVGczADaHb8oLb9?mo9}I}Jc!c5rmE#Mi=bGVt8*7x*2|g-}OI6fy zpo+tG4vheIdHO1-^*N99W`$FGKp1VMDtKP7!Kn#V$#|j?8bhl1XvBaKOjs85zcs;i zz&Y$3bPPG91cQ?uTWb4at~$x$sUJw>k;hTh@~FMfdwJ9?rR$EGDyP8C%cBv0L+oBR zkM`aLa(#jH=vP14I{x(0;eg~2d>?jg-9z%|&~Ob%4vX!2)g$;$lyUONWuVtR&kr{> ztf+x$9^!@qTgmZCgj=X`->^crGSvvWEp5-;h6q6fq*1ZDU9v|m z4V#|SN;t46idA7CRJ(la^mBE(Zu!E-*2>t}c0Jl+qjpW*URfPmxi+(BK%_H!zzd1z z20S`7NZ75+8cCg2YExji#Nv@B8m%oH9!cK85hbG!Rbdw=O5^R)`K5Lq1>B^Jn9X*u zs>#dRSw+a(DbEDh1d+-R6CXZg`1PVFsn%{v2gVu{llnrbZV|dq@ZCMX$^ZV{ zIP45B>$e{gNbCfK3XJI=g$6i<2T>$%QDT6BR2>%;ohPNJE-LwrL`6g1=P?Nz81%`IR{h}Hi3Y01Q$hm$`BYC0BnnkryRf;kGInt8%0_+E zK-xJmxwUn2`NW=Q&n}#tgYZCV9?ewKQ#Fsq&Y9ho`nmShD#`k3v2zn?d?6Hu*oM=f z3V9&~_ZTSx)Ntw&21X!93O{w|TAHUbyhV6J6@$4Zl1+r-2Zb^`a}r~A@jbU&r+F`` zYA(N|><$bMBm@~=f!zo<|NcBOwhg`!uiXN>k$cvF4qREm_s|HLN8YdSk+;e6ZzN(bwklaNrN-~l7=22 zzqJUWjiN^|H4Gaq2#J@vs&K0s8UF|!Wz!#p@~#XuRN+klEU8E>vl~>At`1yQ6OjNT z954Iq(ST$!q!21-$TRM!_%8mg^x`05MHDQu zzT3(;n!hX*-w^B~Lc&afoBIXa7K z?e4j=e!;h&M;`9ssY3w?7_u^F%bWJzj(^~Uq&!_tA@auP8*J$$j^t)BHc(t zdj9pAK|lHt{yX^3clPi@#kpt=d+hWN8Xj#=Y%Qhy;+4(K_4Rrd?8?q;JifCyH(fpV z^y#U=NiNTyoS&_xCkOG-wTP)x2x~cY7X>AFKfBT3gfLOVtjm?e7PQ2?{zXw$m73Bo7#wIwhXW?pf=#QOFZC%|rop)2dbC;DZ_p zNDtVgmw%Vu{X{kNE%*jH z?y=z8`A&$sx(7x$ba#i`JUZmz5ptDx=h41lfF#_++xrL=S7z{;K+vGpb|AO>6ILOZ z0=a2t#hi^dg2<+T?^^yKOa%C&Qqh3IIBn40y?~NXS+Djef4g`qROi`hc-L`X`d5F(G(7>UAAgPlOXKv z6g1>O1tJ0CsHy-DHb96HM1lfCn!M%ggjgF>MUV}3a#4ZU&RE2R7&)#DbB$P1Z_+^6 z#OydyBkbzfv$DxV5@Ar0uK1WxU5aTNe)|^#A`L>Mnn*wU+3P=iO*7j|^n`m(A3xkp?T?W(SdcBes3`%3Fa;M7s{&CNAfwP{ z=TX?7KnB%{+*(F>ItSvTkTsJUB`pU?L>PJ^&=1`;wPv)1w}$ipR1HXa`E<dw2VonA*?|tu)7;Iu8b~(=K)U&hOr+=Q<43>xRsG-k_)-0z=l|kVRADv{S;ZiF z`s{QiYXcq~NI&%(^et^|50ZAHi#uZz2j^oo{g0krJb3ct!ZE342M%`QYxokZh8;12 z`BXDwBxc!Q6$PJb6to5r#xwz>LKd*G2r zcgGtYzE=vbckl?>Z~*BMe+>x|TllL4GYQV@=XP%wYdZj+wmik4Jv#?_w5SCLs7WKf zVx@}eVw3^W1Jo_BWoNHd@JwOVP31Y(O%pC0+|2K7%Dn;`qWI5D2Qe6cqz~^e^7wWl z(mN#551+50^s99IdhnO$ztjHka2qF%J$=3k%NMqHw$5d%C21`3cDA?IS8F1z9UR+S ztE)yoI6J?6a{l{AaVna-z0@sgZO zvv?nNONg}U#X^jhThAS#6!@2xF##kcF%hI8ZU#jVFgMF(Y!an(c9O06x3g`D2s(-#B>WD?oWvejgW7S8#;dq2II0WB^-8^y`xv5i}kj2pW!Fy09}g z*?gAJGsl^&mF@wvgoiOsD8$oi3TqzK~?R8j&KQd=2xbRxVU z6#Sytd90A(scO)eAXMs5z!}vbrs8PF?i%}7tNnvy5mIzf>hD!p$upf$Dl`dKdWrK^ zf{m}6dkZ$wf}zLoKpLJ$9?zVSu40dWu8^bm6qh;T9GC&}_*#FR0=sT<5q-q|?plz( z+3_de$PKa%pgr<_b^dOkc!&4-;v2NN?pC082!W78cjx5M@B+JT9#K2u2U6N#gWWJG zbMD^T(;wtv;uxlfLJcgxX?4Jw6yh1PuM5V*r6(YMLG)yJfFJRgQ)-JoEU&|821vmU zih>xV!6>y5XF#6OW^<|qr0mB1JLWR~%+`QNH`_$2kKfcl`b`a_-wgiq7p(Yj(4WrN z2|GPl^JrO8#}hW>cy4@ayH++_m^ip{?Zv8AH7p-IdbFO%SC@}guB6Md(rLQ?Na@~f zNDnhqAV%ZuLq4o1F5>%yG@@x3OIijFo(D&a?gT_LauDl~hu&$W!>tvrpm;`ug`#s* zErmp!K&k74A8&-nQ(y=W!8iO`d8bI(1%DCgt_?^dejfD_*mc%=d|N*R4UTjpl5no4 z_H|Q8p=k!Oiq)#ik-^q7}?_z!)W_d~);z)t=Zv5@G};W>&) zk5tSvZNMDc)5$3pXAThGL_h>aZ}P-lQBs-VytS1;gyaTNeZY&`=cxMd@C~tCy0?OF z2pZf5C~`<+l$LUj2e^mvKRV!FP|;8aUw)*a3Zx z)?dF15_n9JogYZphHVWZPvlckIHd)J&KoAF5WGoX4$i2@sR~Z^cTh`9Q_J9Xs1=(@ zjQ?hl;0b;sBR_-e6J`{FJTRKcO--}{rLCJ`ikikFqZcz%DiU!tCcBzIT4b9($)yQd zdbdraU;g-Kzy0ldKm5%PYaG=`s)_WQ_g88FaU(J^Q2v2QPKP;H~ge8H9+t!im6cC z(lg;0^Oz{t3Kc741VvgCCv01>?Tcki&x$h{s2g{9cue7Qhs+_Eu_zep1UwU+X5yE> zhGeoyNO<`K-}jhDdv`7PhM>X8BfQ@6R6ma#`n(5s2_!Vde&sIpp=94M?NBv*01Zff zp7a-d`+0;L_2dh?1$NFziYKMW&JQGvK%>+u`LEFBM*ttOzl^i0ioVNIp?#GyEEw^m z(sh8GT#Z{HgngBa<(=J<|clNU|aekbOitt&`7D_c|P zqef8;WtVf?HG|ey7tdGIFP>SxR7XeKYbVewN;HUpgvml?uG9y~ppvK0hbFp!xj~U- z<0DiR=DCu{F0^$m$j(|lib4ii3BNw8ys%)v)Dzgdigo{iCsg`U6>2(|gHz_f+sEDK z_d?K1U}C_MUuB07=_6fX19nu8_`uJjj(6(IqleuMNd0-_Z$QE``-trJ{7J){FLeLu zk*_)&gc=}y+B`s9zAWHl{q0E>`J1v;XK$3VsS+emI5*2o#XbJ9+sp3UbuSX=r zkO78wMxdven;43#p@`6t7E`o}#zu5@`Jg3I9VGqu$G@$Q_ui}j3nIPr?^r`q9gHkL zGhLlOGqExO+jJ4w-=uD->2n9iUtT>Es_Ey}-oCJPaB9vX$SO>Xwt=dHOl*#MRe?MW z(}|Q$4K&fUhDoWfIB6r*%uveBWTBN24drjpbKBlj`!A1{4qOo|h#5bh-T+U8zFSI; zk%g{e_$0Q{7|NGk`hd&qba>f$L!=r=t~3^kJ$-oOw1vZl?h)9H__JvK%w!K0I&O;E z`dzEO{{$Ml&f_b|QOJ?AJLG^2YAwE}x_N@9?$%rAP=6k|1>gQDg@i0iNPZw8l0cG- zeH+|JOw5BP;1BS_HiJVTzlMm4CS-~%Uhd-wWsr7(AEd(ooSMG<0AXCqs0PnK*o4kj zBf`KSK{WX$rhssG@7Ufvx#8z6Ruu*GMq`ear#xt(ZjUuYdbcK0%_JbH#?h;Fh?HgI zn&Elm*mHGpX#U`($y^Z0F_%<=C{JZ}a^<-JwysUAT`A_~CP_y5x1H-MiB2Lc?xB8$ zj||@eO)F@kOF2sxjB6gDs4xJ>CkxdbVI9PydYG3c;^NBf5al9h8S z!DAg&$tl$1qmXm@hPRfFj||#2QYEM_%?$cMQ>0Eug+TG zK$BfEdChsubOSX;0_;@k$b)HiW>6%vVnzp?)}p4$)$)xNk$#bpR1@iyCO~@S)tCOi zN=zm+&dt|qg%{_q?abJdB22=y&n-S%KWlMk=Qy{0w!T9CZA68bwXV&)g}|E=Ew^Mx zENX{~Qjx<&fxIOu7|ig5!UaP~e33AL0*}}+gxsHU&be(|1`UmZA5}*)AqzizN~Kdo zFptCt;03pDu1>u`LWuOx2f(9;osOj4+5?AbF0DEHc>yWk?7?4i_B;>X{Ji?kU>ghHE_j@V08)EzO$XUN?=lhzkp>hMs$65pxaAtrzTudtr5m00~aQ)o|NtsCR{-im3fhw})!t2NIOuy#(K#1*pAlft_1$J>nb6)SYg;E06jJzDFF-YCtdDU?{omJU>vSu)smqx?{^fp|U`VV!V96ymll6c-H&OHs2o zN3q(8c9SpP0CQoH=0e>;1*tv!21ji}v9rS#O*5BOs+sL~;UaXsCQ?SyD-DYhlD<;~ z)~dqsfm-3YGfR_$%dl4KW)`YWF8rWY;vKwjX=-pwrc8p`5N}V-gb)>;htuykcSuBY zh^gpqjRg-`OfB^R;kzJHsOIaZ7=~^PHWSBLR`|$FbDLlk2N-xrx6|7QH|ifV=$16_82!{}-HyS+QNYA+jOxVL zAxto6jr_)wa)BH9GK!?|ax1nAYJ}k$h2uQaD!c{QHzSmPqv#_hg%?q{GA7h?_|_-| z9dx53QecQfF>-(%LEwxFYul*gLO` zN)`Um(@O`Wp%ez2CVTW;R$F|!j-rlUy12O}n0RZ#$6y`oj9^@oZXcnoXpTZuICse8 z$O6@e5wQez{Dv0z6dT_Jju5B)fku3FRv5!+i@^dZDuAU3Qt_5)F{twLa@|egKgi1# z*t)Yv%6<@y05g{W11C6fg!xb*k{yfG4VgX$4XFLQ1a{qv-SwM(4M=Vtxm6O-L-Y@j zu-hf&!sDHq2PE_yvId91&YAJLdz)xafn8^S(k+|oirPFpLe%BBQSaE>PwoOp?M5VQ z&Mb6;Bf=1-*`pv^OMhYq!WK6&b5d2afeGGJEC{2~Um;SV{9BO%WZMfg-_|RScGOVF zH9BhyD@XZ6I!j)r?qJ3^1X%ruhI09Wn6j))-gx)jmun>5yj}z8=8ZaSm-2S63|LhE zxBkyRhU?z|NC&l!ke*tc)G7*Yp?OoBb)W$Ht;DV|?dZtCyi^1*DYOk7 zcNg%Ys6>YrND6I~a+rdMnB33BpVnNXz&TQC3rW!Go7g$G@D|S3hqe`+#3fb(zO-@& zA+u6)XD^D8%6TLzRBrM@Gmz^4`a>jcn*F!w$~`AH*+lNPcy#haf`HOfBMblkH9Lgtike#i~eDGCyQeloOxnETw#Ukr%!ZWAI6Vx&R9G(gfT z0~|HSUj$JMY`QWr*UXies#NgST2u=vy64!!_opjey12SIRrY(M9yE0km3*Uuq#ptp zQR+FYBvC~X>S<(DY@2zKY5||Dk;S$264oV%(romHC83>A?T5J_StbXKOFB%P$Hjnu z0BexrORuFuLPAtr4`Ta{0Ze`%Aw&X^JUnuET>EP+{(%=B!y(8i+~Dl)2lhD*?R9lE z@ssT{4yoHj_1o!>d{Nf^-^C-ZmxE`Qc1B_jF9#=&eC02^yeE$i-|^w@8yZN)TFed$ zP$~_Y_yrCHd7D8-3%R)@FA&?OsuU7H;ER}qy|{)TT{Av0>q!lsZ z7Wh3!GslSD+;oA<9j`{Y4gXUO33=w@v%R^&%Dk;W(R}8bgq!f>7SUkbCNQ#3fe|zU zuyGn`MT`JJlyAOg!Bw~W0$?5F1rkCe?3+XIXyhLA2$XQ1vmej!POebS3n_c2Hb8oy zCy=_O3t#cbzL8YIrg^A0kA7EM&hX?r^uh7Skp;v>Ia;N-+ASIJ11So2j1WZ#ogk*~ zNbrvk6_>qDML8|jdZMyelZX@wmLi@=Pf<3f+NLNx1*t4}+r&yJwi7Z^6lcn%_n7{? zLxaU3qL4VNHn35+5!msk?QUESQw^mh>iG2=HwH+Wcze)|)PzV0NrSeeOr)3Q3gM@( zEl=03sizmN2o>L2g2EGL7HUDmx29?yZO@o+XaHP`4I2#PdJOx>ShM2`ZNV^GL*)dd zLffUK3W+My~+vTx(+(@Fq1St|!kX&h}iqL!sZyN~6I7mX1=B=3>7M``27$BE%?A1VvK~s=NbUiIYIDS^BOs?gOXWipelapump0yYhKi zr3}70Mo4}l<>RB>(}KJ_+IzPGj6c9|$0t2_gw&opl=Cezi-*XwM-LLya@}(#9)AMs zPOaYlyNc1g4Y7EpGe9}qztrT?VY_&Q#qPIqOVhbZ$w@UdkVLHkHiWdMg~xCtFhvs4 zkPmSd^uMiUo)FnCQnZ`L)Wn>Vlj+041g<+pdXuMf&e zzig27>I-v{&pWd^M=JH4z8_+=-75mTJUZ-Zb;9f2C+%j(E_l@6`*q(VmPc+N z)!B60MQ=3)+EU1+2T2!Y2m2_E!b6_#W@n2AmB23gypsuRI_V%_svc^^ea0=^sKpdJ zCbSc|)w7YF^HOvE3Z6G&;vaK!mBDfGmCo^n62uY^4>mYxN*6c{>LRjKSB$RDMV)D6 z-#6Z_fIc)64(dna z2ahB%fLK8__3i;gLIWwIyQWZ6OokYqDmC&#`L}9RqYpp>5*+^`a3)fohjI69K?4wn z$Ou783jNTDZ0A|f@f-3%wZnX|j8YJ8y-dDpu8+M^qGBNg*&ZRh3m`pUi1c7LdOQ^v ze_N1Wd*Ge>+PMgx^x%6*zhX0y{sDCaLq~v;a{60)0mUiIgU>9Y#tsZH3vQc3?Sxp|-b3 z6jMaa=9PmjE488@D}pvA+2HAetYEq@&0;$NLA42hKqN!kJ03!xoNiP(S{wUfprQ?Y z5E0&HvpG+$-q@ZTJbkT6-aWY|%wCq}*7oc3)H!?c51`3cbRNPrHjyom5=FG*6d?ty4Ff_$*J7~uvI7E!Z=(nb(0*D z-Tyf(AdlxJ(r3Ip@|nFks!9%?`CVR}D)d9$i}y~N4jlQzBMYP;%FG-c;gJ5v22r$k zwu=_?#BINb6|+%nXU9cg@K7!sVZ#VgiUq+E1J4Ng2et@WT}AB#B(?Pel$Wrj)`vYO zt5If-A8HygVu7qOxE$OQlwT>NI&vg$?W3F5PBa&AV(rE+ZoEA`9Z&7qBQffp`fta_ z-@Wn9TUTFy^UXJEA~pG>$<^)2BMp&GE*_neJ#`9VcIM2&>1WPY)y!z!e>A={E4UrF zXdC%AC2RtXz~Jdh4T=_AYGB`&=m@ zxequRl1Bj2fgC}o!fZQ)h{k>+ndLqRgG^Y2G5wXcDLP`am$GfAROg?o zl?^8s=Fc2EHov%V`fQ!9lLHeM*Eh%3rrDm%*-lKiQ301#2$JqjA!dsRDRU%*s84g= zkzzZ=Nh3UwNN`m_2v(w=EFuVPnH_*(epG-rq8w=ju^ix`#+98$hxumk9sns2L*y}? zE2^jqDndbYK|!G5Z;b6$HZXIywG|1X>i$K3?q_nw@9Bxrbj#CW2R_>=c~-<}Y+y3umAFqqUzHvt3>5iy#BXu44@TQ~LMNOO zxfQo)!U>c8K%q<{d0laXJ*S> zE9~Gxw1%jfQ)eQ1R};9(fiP9HDJX@=yEHMXaTrojfs(Ae#BO?USU`A`wjZd=Fd?b< z?WJ?JQ#)-Xw4#@(?2nSJC1`RAN>zO+Q?@6!SJpSiHaBZ3t!|8MY;KN^ZynqoU*B9` zUERL6vUTmiY~GRg?5SoZ2ChO^>pIe#Pp00`*|F8}i34>Y6`ppUlBS>6;(027qH93P@$L*jz+DUhKikqFxC+CCov|N9qL%8ia$+Z3 zQqfg}`ZXa$_dHI25UU6-RRk4WG^+U;@*Iq$6@v^9X|532(n<}qYTp42Xi>N^6 zC;2o&ms7nmC)YN|SJ&6Ku1(EM)$vgcrp>XMP^&M$y!!I$`o`82lx;PUs+p;`uU>!W z&0BB0{`Hh4o7-C5*czRjMZZKiaeR4aY<+!meCG<Dlet|0U+DLZtb@u0CgftG?YW9@^eG^A479qF~)yH%;;q7jBG!Gma!?Y|7vaZP5n z2(DitK(0x>wUJp{^wtzxZ#`ozqot%a2)kE`x!rTet-1C5rJeyNSpy+O}c|A^NRuu zMr$MfrnbkHuN>c#r~h59 z%Sl?DntAfcxz9iOohP#-;?%@yEvDNZU7kF5u5P1U+S%G%U*D+9N3$uwvzD|*{@vVj z5@k>iAiOzHB18f|4wk8jz*en-=n8U3cA+3(fH(PsyscUk;|Z}Gu;(Y&v{raP`1i~VqfgV_06)jqzp?;BtE%>J)G`fX(M`XXug zdm5JhPi6s$QrdEDcY*a zY@=cun9!_`He3jiququyQq^aME}&u|5t$$~-r(XmL>(uB&<}G9Q}t?2Pc%m@y*qpE2kue%QI1`JjxWfy^$Cuddbz8Xwyl8?Q5e>jNH5oNF*_ z%u0=xL_-#Q+fX$W_X_C071M%RgFFxkq2|xLYRDUX&{Q-sl)#w!V0L`;6(^ZIXu~fq zF97Z-;Dx|Kr7}Q*VWP>$1rRtwv`-a28vrzCo3YL}Rw)58JYDmG07$z&dLileM-Lof zGC6u-zf|4xW8ePW21LJS(f-}T$fnYbkPSN^J!M!^j!W&&;Y+2 zKq*_Y%>0MrMlNI_5~s}5R=ipx%k4AgSFGw5?!k zUKY47snTCYlh`h8n}!zF{{{IO2qFSIoj|CVum?n{2&?nFQY#URP;r?H3MayaGG}RYFOGMSz=w>r5d@)0A5xoWYPtOue;`ES7jTK@Xx3;gtxS z;6Wk0FCol%7owRqQqF*6E?%CaPJC#V!g+~xEqvo4B6FJpz!LZpr_nSq!^^Jk@xX&M zmg?j8X!52UA57)5zPNb8Y8Sp;eNabI%eAFvhjw))$LoK807x98XH~bsioeUy85L1Q z6t+hsc4F}&E2g>6b=QD?)|a158sA6 z9vtR)=$;*SdxzVuu6})J=tgHl##kH<`zBe6th#Ux2s~3{vbpgGPtCNnTZ2V-rlC8U#-SEp` zj?8gBR^+{<{7pB9ajmsOcdX=^>~CjOm6nBWS%`G^?Z*?ow^ak!sRu_@(hC`v9pQ`= z4@RMkwUWhfZ5<{c@m=H8aIcrTJ`c?sg7=fGyFyabFlt951<{YA2`^0La(j9Vd^NnV zfrQp!4U`p%)2sWX%%8(0+vx%oHB=wgv9zC6vt2O?BTPLe53%uD%4CZc8+q5x!SzN% zt+Vr#esRGTzrl%A(?OCB*;82PkisKRnqOK@voO`wTbxtrY^xI>$Buyz2C1V7xP86l zwLMdnk*+9sDuqkw{3r?h=H}=zED`XSdE&~lwNZ=_%V%v$L3>T;^1U+r!fj2RMTd5? z@Pq!1!r;g`BI7khLQ)D7>UaO@Upye)>q|e{bBtdc_j(dQ`iSVBY+K97LNb)!7&};f z*`6>9cM_>(cbBXQGax47(T@n211G>Um8I$Tz*%m>aO`7`g&I+fNM^eX#T=n=i9)T# zM^Kb$ZFwW$;sxw})TBC4bTOu=Jsl(B7Flt}#;dNbv`sa!PTr%fPP9 zWI@)jHvQY$6DE8R+$C)!bRE`waH=IfV_!3WBjz_0LZWCfsl<4EW_>BLN$t!=^A(tDWZbIKj|% z|F#tV5)+)Os0^&bVE`cw_TZ5OeY)CR-LqvgscWwT!4q?}9}V2D^zZHeGc@w*IspU2 zq^W)?nA<^rlQ8g8nr%za_7@w@+6o{G#1_}2(pERJZ1c25^(uxzOPCxSa6;_mVH$M4X{|w&gs+-$Wn;s{Qoy_r(+Fr> zKGy6*LWGqaMzY*%_I!GC>qRhhSU@9391%h;40wRX>XvQ23P{yP{W)T*mO4tnDI_Cb zJ*`udB;t+cHASr(D{pTDh_2+h&mm;{Xsl+IZS@pxrXj1%Z>*6z!Ahr`(;<|i zBHQKCY~P#?6O)DITKzj|5iBgt=w6GAz*xmNC_JijE!mT(H^8L=F!o7#H`!zmP43*Y zP5i`pKL9Sv+r?1r2qfQRyAGN+bdQ}d)r<2ul}(6qmu?B)brusg5|S_(LPVBh&T>Qe z&RQ!o_?1&JSx*K?_xkEL?@`k8wd|N7;g_bM9$1^pE#U-{oMNYdlTo(EKe{Ccqo5+H z)wv~Et@hqWw0+w6d+ol=AcYudMAX0lN%pMLIL%(YyTP>%*m76$vd`IvoHW9qtoO2Z zsJ6EZi0)LM;+UEbkp_N69n|(&X;L%D(k=y{7aStqLvou+NI@mb#wMH?d_8LF?McLz=T^)%I=v}s@2nB9xY$Z zPuZ8HuE-RaVD9*P*yFW=2;c&49|eauIat#wHe_lV`5_!mnJ~B#kd!cr`$IJRtlf0P zn|jb=_-jc)gAkx?{AqVxLqB}DJ+NJTW^CtnNW%axKC*Jxru>y{t$C9*gztj3UR|ir z(#pGey*}WbM2&UqRsGLQQl=Sbc$Q`$8$1N6Se(Tr5_g-xowJ%?(KayFZxTEhMaZ%)|)V+n>0THE%cQOcum3 zb%_i*#m?<>KLq7v^Gb>{YdzEsHFX_j?4zzHGkTEsW&}HTm37q?W*PJ8NGgNbrTEwIBw3(MdXZ~QhFZ~TDx^SDU`f!n z-6fZN37XTWnu<$7Svec=@XoS0y;2_Y2V*-YA4+-XKy^CUr}MC{DqB8mlE70T<}Q_z2wc8J6zc%(N+GNy^}7QzrV%!m92dk=w@GRO~k6b1pA zD#@$W<%fD@4$c(19JtsE9MvXl{|frjf)qsX$B z%Mr0lujDWFD#4?}gD_x44o(8IDIs)YS7+r`^IXWJ?kmDVva@L?IhErt$o& zq=tKae~prU{{7=+t{afDm8JNv5far_!*^OBq}O&*v(|X7RLXYf;b``@Yn;JX(2*T+~$*770j) z$SG_Cf9;c_fHQU2b89LK+1kx1hwa=z2Rw4dQycQVQAD1zmDdQ1bfzQ;IB9c1q`Y!i zU?WC0Ul_j@>FV=6YjG;e`}*!VSGNP@2Hk5t=wL^iFwUdHwY2XDb6USGOYm5*D0#Yd zukZfkg%?Tp3@LmJ1mPAdfgm|Swv$s_pYCsw7;zrVrn0|lxvy~-3Pggk0EDbC2ragQ zcPO|6~6yD0@KJPK={VIkG5T zsnC0_&Dh(LL3oE-GX8QzkTXZ)P$D5*KK6Bh{ylR(!*OW!)fSgq774rR^qsv9*b38gcGU{f%x88R8xLBO2b;9cZVIE{$N z3>|o~mrsLa&97`QyHdM7}XDn)5iNe zeTEc{spW~HXl3GgcHwOE%s~7}kYuQU7n|K$(=2c+ooa7Ddg}Gk4I_EiNb0LS zsjD*?+;%u;cR_KBJeoe?r&|jslycIsoU^Vo8HI^!<%4lnYU3k139bhSO3i0koBnNO zbBbSEMxtJ1yP6tl$}vpo(yIJ=9;5#;F57E+tS)$n>IpUveX)Yw##^}`$adJyEy6dp za|4ntm}*XfSv$?TQ$FVDGQ!|Mg)3#!Zv!2~N+Cd^MHqd!kqNj4K*Abvkm^xmYo~!X zbU6t1o{M5JSCZ&)g6#~_0+8Aqw>`UHAIM*H+u^#k$+w6;jxg4Q&ey@*J

    (#NMMJTGKRrYwHIZp*G}5>lDxMh+d|HQWDkX`-D1IXW7sj}JCPJ(sSw2gE;8&4tEkI0n z_Lb<-%BP9+qPip=zXpUYB|TB?55DoicL^ZfMba0J7q{*_BnB>Xw4$b)4GBsvkWs#R z?R0}zTC66oQNLTs!mU>H-Q*CAq~sC39N^EA_;*rwRjwYlSJ89=bKEE;&G zmWz=$``KA_#eif?=`xOo?+34Lsv%k3nDAh3W3)==1 zxr=0`adWWys#`j{T?pkPfG0n7wmG3%aQv$glsK^!3)E0oAQ&ef1V~7Ewf&?l zr^d}X4yV=l2qNiRN}UAbmhC<#S~uFw~Y*=2X~?t(!p_d-_ncrZVMx z9g&X5B!fZwf{t1#ay4F=>DFdP~P-X+r|LP?a^sDrL;BW`DR&ON>59C!BY+v z@4T<9-gL^KNu~<-TpxVnts9WuMMcsNkC(GsX z6<;_gd9$Ta%52CIWzyt5@YwGSm}p>#+S`98$%hQt8#v2SVv+5_yw%(8v%}cb2A_jypAKU zh^Dtkjbu}4wobZqSMdMBw7eCDtlWB+c}c2 zU3*M)1HyZ^HjdPC<_33UE4@>XvS6g0$Lh7&ViX`DhosUY!#q?&N?P1{be6jZ;JJ17N~x-4&zuGG8wfk*DHYtXhs$yG~`OpN zGoQ(^3$j`t|L)(xToYOJT^-DU_M@#-*_IQK0J$1`h8kzDP^=g*?b?~VdH4`=*6GP} z*`hIrmsF@_AB`lgtwHV2qCd=#>bTd_udke%q%R*YV%>oB`~TnJHEfd>dx5%mZ1si= z@7c?CGG#t)9h=g9;t9N}^9u*DkEHNaXDY{2McCbt?N&Vlt*i(Rjhwpq0ytB9{VI9v zxPNmMydXrl-=Q_4)KYq_(j#*xook+GNkYX%>mI=LoPUcZ3>QoHs~DJ zGEzO*N__SNzxa74@fU(igW|gOg*R<@Q!1< zjim0_hWG5n5td{Z32nvfIiLEohd1!D@JvilN^Jy8v$>mm*u}+`+=`%r+?`~mV!)Y& zfz0`~X(JI8(8zjJoqnt_*{~g{*gzFj4TeqF*r+)_HO2zfZV$X}rlFiTs6q8*24ryI zLi6sXu1Ic%HvJomJG}7z{r`L8ck567%b^CW=*|Y*@@|x%(|UAQ`bwPC5ChL}P@vf7 z4aIhFju!J|d^8r+4qm&%4}=oS)yk?8tX!-vv?lD%kbLN_Q4ot)8=S(Hce5${mfTN z_W9A?$e^xBd`R}Ag`tl(yTD>@HMnoZjOV|;@rU(S+}icos{n>%}dYRPKt#-0__P7sqnA1b#csa=^Pa^5pBtBZfbHy}Mv1@ws@{P=H^ zOz~pY4M>j?k?Pt^U_nk|UII$E7bmV-vXyxbd$TUmW#L7XT<1Pzn?BG$n5=oWMZk1| z5CCGgTR5i9Nz3L$hcq71>}Jy!p5!(zdb*XrQ+bVhxFG)RZ%-!UNcOy4k2F}U>vfY3`b2io1{2NnNQWW0 z$E2%nKzbf0)t~=0($^n<;fH|L%USmf>47lSY3n^E44Z~FyCoPl8%;{++hzh1j+yF` z_ak!64+a&sJ#%~QNV3zGdR0TuH$G)>`?d+(DqZUVpM{pqN>}QU(+)(hPV_j4`oeX3E zW1n6PTKG>+K>B|Za_f_5!p#ZuxeJ>`SD&8zu2kNc6c&9o4?QqS6_e(tc_(NJWfgg~ z$ijf$v*4>%5>Ds_rXLZf>P*Mb4Mm+?#8lOqoume$Q?0sscV{-Qf_%G*{Cq(NwS@>X96YA5?|Y*Wwn^<86h$zLz`@8 zZ`N6UD_RUG?X+`5Mfg0n5jlFZi8?K0PO8u%?)|n9d?Y{=CUYeKP4M>PQt~6YC9qbsk zXw`rc5Vv3vFh{m>BF{EayS0NMbr-pDv}`h{z+0Jb1|%~wvIvje1h`}>D5{oPajjG3 z8Lw4$tmhH`KxH7&|*$ul~LEE{lgPvrar0#d%Ed1d2KYQ=-!qz=Qx=8zp3!glj z$MA=1o{?q`H~ixj3X8r`4yso5qg83^`dlwtl3MJvuC2_kZPD$WRPFG^yKbYRYjh%; zSImJj*u0h9Ir*@)Mo7>L1A>+HpY52s!!T@9-PD}3y4Dnm?Rg5BFtu*%6clBNT7q1- zdwalzkNCySROH*-AZuOM)<*`A{-3>Hs9txc1augNy|H)VLiE-YE1>7!f8-;-w)EiE zC%xJR%zt`vkuw;rH=20SkK`$&IFjK!Djey%p3gey2Bde0J^Iw|fA*u}g{`|~S0U}! z#lcz}i><0M%*~d%+Y^U?W^)GPJz$UphAR+b7!B@!|7*?9PiOAJfVABOQ#u#`ZIY^! zc$T39+9KE1BK=8}9CNB}>q@p0cGx@c!8uZo`j74Zd?3gFw9_pBlk*{1Xh$3NuHAgt zoBk~=usfY~IxUFeJRuY@^)wS+j(u71njm+i)JE!oyz=yPJnmfR%G zp*(a;rh4?blg`w06ChFb!Y|-69|`b)1R=f%Pyr;tudsaEv`!!2Ke7# zs(A{~&ou(2K z0S84IP3<^kRwjXl5s}q@SV*C!41h%BvG0FM)5{-lf}|jq6D!CFNsEZPWk=wG7&|f3 zXxcww6bK`MQGucqe;_+WAiyKCFsCLgh<0tR8cmI=nxIKy6Cm(KqcK_QaKfOKq z>Fsg*HsD1)zE?o8XT6@ot5z{w`j#%Dez%dvMP$^Lm4verwi z#H76_1XfSfOz5AD@^{LLUKb2O2o;_ zjXloq)&$}~(nE)HccWJ5>Qcc6!gMlz1n7)?$Moe^0agmlLxuWp^Gi}Ijy3uCdw^%HivT;^}x|PNIVgl5cd^0hnYbL#h_tJz?fBWvLXzD}aafj>uMtSjQca zNjOB7bbE|d2$w8>mym=It<-%B&(PerrQk!d(~}7iA%oeeIIsix)HqPD<<*`ZSvGsR4pb9zd%-%jXYWAycfTQ7 zTmvV1W`MK@|8si?RCk~S4AKMf>pN*Jv>1;lR=y#>yDj%Pp&gWVVisARX2kpOzXCLA zqNE%n{1OR$Ux(?=u`Y7QO^KLtpI&wy`uqgu{IVIgeFUufvI~X3s{iqKK&$^mH2MXV z?gNk*e_ixz7?m;`4q0V)s}>7R^dIX(NyX}62_a0ygo9W!rx_jGVwu>sya<(n(-`I_0CUdgpglEu zdrARBqYDM`E2bu0j{&)(Y2bXMLc;nTFC2um?{Q5d9ZK}~`7mqbu}kGCanp9n9Nddf zgewpgkROF~0?(8bfV34x*K_y>-A|1JMx|Y$yb3mpc7){zRfk)j-p;-zN-p9E+R?Ia zK+!KaG<DD2)XqAF3J~Pgv0p5;hIeDOQf?yh;;H znj?sx%g<99M}zfH>vgF8j2}=Eb_^CyfHp3Tc4W&MZqR3HNQzm9D^FJVj3ZMem}gEW zAY{KLFOCUE*j@rg@T_U?YOys-r3Z;GuP3$>6M?DeoYj%fgR}}K+p0W&VG?Lk4lne1 z9~f@ndnx~pyGNUP@wquVwF97sB%lHvNiGJgp;M+o=El<@AZWMxWC7u->fyh2A4dS-%dCrMS7u`=^0d!|BLQNR6Kn4*=Nh@gZDch95|L%#D)6W?U_E5LVWe~=`*~H z_m4Opk_brZh~Ynb6JkS?z6vCerev%ZF^WTL zB|a-iJo<=ft|>Va#cv9}{n{ed$)rRf5y{T5&~SV5X=PhaIlEVC;d!@Eg%O1r{Q=S` zYmwHaL`!Z?!lnEhb_{!km0gRNMuEeWT(Kq-i7x3-g{aBDZ9=X^?JyR0vf(Ma0tlrb z1_cWtLCF)@sfLZh4auAxP8%S8%s3#qABe^YS^bH8Y6#7ni58rr`jeSAdtEgB2E>_+ zF)k!>J^>(&S(y7Mi5{*l`szWeS!>^Hanh&#D`_uY3N z$`1bYr$2qT^WRtO{P)#6m+th3>h1YEJpK1SlqY&$WvfBby*R5bhuDAq?7u$w5_HM_ z`georQ4HDd^kdJSKK*L>(E85FS8?y~os*|ecWZarB@S@=^Dn-r>?|PJveNo90cDm~ z`mAC3Po6X(QoFfouvf_`BzmY+$gHDJ3=%Z=+8lvZJXe`dL)hr9jS`7Kzm}kBKwMF_ z8d2NK;6(IDxPAGd0x265e7HL%2NG>*!itln+X8XPih4OB9&`i|k9<3um#=OtwL9n~ zO7GcMkRhaSdH}s5Uy={!#Yxv|I(I-4 zoS|YiPjqm{CkDY2P{qtBg^?LbhW=C!0@5ZCM^y@#_J^l?dlA+jJ6pY{nBu(H3&1;4 z=m^FfQ3&^E4QUh{A^Frr;O~ht?a&i9?i_q1V}IrR`p&l_b^>Et?sT_&eg&1@+>`9P zvVHwu|K_*9`^}Sq^UI%9zzBfQ038U^PnY)%!LwbQJ_02gSGrq8zHxB+?2FF>Q+oWf z84|&R6BSr5h%JiB(g7>UwDv*Ko<>1}b7^pzvI2}a2|9l9G{(coetmPF;r^0JSjG-E(Iu(ZVOIOC53q zB$3b_3$k(Gc%hC6KCnRxeOngTMfrC@>EIP^>XVeRJoiW0;~9Y)DW{~1Iv`2EQpeE_ zsjxmNEG!GzoNlB0))%N@LKcfnY`pc9AHlWMx2%6Dss{;jd_5_t+UeLEt?$%TcH`ja zThajr-@$-@^D8@lXa@wp{dnl{+c!hcfQ$a=PxlTQ+psq{DAT^3NiSG zHqSXAQjV2<6%ppM(5G^4wFimRfz={VGZmrCSuEb-Qap_10>4s8kRiu|nYG>|Yj)Gg zYbP#ez6cn!*xCx!q2>-TMNM`5%u?juISYEf^@k!ZO9)l2dF9?Rj>FPvCOoW$ENk zrk7G4ZAxjkOB2n>WzI>sR9A^Jc+z*A8L^at6}CE@!E019rvf=6+X+>p!}mrw*G`c< z)g)RZYqwe5@YK+Nxl9`llrl1iq?h6nvz$z9=h%0-hrcJ|Ig!l7pM7^B1tOt;2ND!y zxsLqFbiysV#Sus|lp*v-J z2W7kE2_bMPxQ}z~CwLYBX=5lLeOo~z1mEwQ6K;2nfP#H(fN<9ihYiq!og?#oKT=cc zL(*78dR&AHpMCxHKd%I&-!|hB;AlBaH&7Yu-EHF{z_SKGX~&~+MJ=qpvbTLFpXo_N zq~UOJhhXR~5e$ zfQyq;b5`mbK+QtDL=6IwPX!&eb_hcI{u#sRTDSj^oQ6!7=aHsRX5_p)MIBzN^u^Zl zN37PY>nOH^<18=^+Xg9vOFSk%)q{rWqOS86mTGM7VeV-scX|@`2-KnkBxtVP0o@&J zvk$HpTNEF~g_@5RpRU+mVjf9Q=nKG8Y0picB6SR;r8pa{NuayaX-2rjs-V&YL)!eA z={mv@@#yV+6P&w7{q>iB`47MU^?>sF6T(qjK#zFBy*PF_p5e&d z+L7&2yr*OFs9ieo)0x4?RYYoMNP#*;m$JmELGF-kLSnbP%uz=G#RE71hwgct28=+c zyblc)RbG%ln3fI^i7q@;hKpF=4)zoQfdT_!$CQX&N3Ho)MX?T1I4209NTg(eCh)_? z`if#gTy5|wS;R|u&Fe~(xMjmUl1-uD8(LCB;(s4^+~E)p6Q(wqJCem`rAjoih!Uv;|E`$rc1!!6UT_PeD&Nyb!fg zh`UgL$fZf-2;h6&n&jZ(m9^oPwdX{7C{6&XGsV5)pE|!H%cJ5X2c&ft_p+}8znQ|s zObLd0%GnEer_6#-%D103Y#V#(B|ighw}oH@GKt`8LzQ6DhZ|PnRHLentk^Pp)YWVa z10?^LZ^WZ|L~dhE*w@HK8VL?=--btbjpZ*QUppSvNYc0C2O|Ay6@6;7SABxuSz|3D z@MzMGNA+}Hdls!q+N$T{nOg8@+|DPUr^5j$3$Y~UVEQ7Jwtf6o4aU}4A-1FT+R8gA z3uz8!yP?(t0%Mi?eIF2zOm5zbvx9o|IG^DUB^`OpA`)_fISZKH6aoydeIb|^Btfg^)jy2Bkz|7$Ch_m0Y?*NFJf1emfYC zM!;bM^t^4)_ubW3-^#Dv+)L69%I+0?v%jtaO;hLuN;Je?99qq|weqWax^J97u3aBk z$D?Os^?jcYMI-={c<2exQ?Y7rl>@TaE7ExDo`+#g$L;f3P!XdTx!M@xDLOxW`P|!U=0DkJn~Jf;NbbPO!7V(gpeK- z$$M;KEyWg*_`cNT-!Awr4~72(JnHeIiS(?Rg!wVC@NIc6cMDWI+DsJ6Et6(3MuivY zfOOMWu#jxgH=L$Lno{I=1w0OqF4nKFuC{KcH-~V`UA-;VcXG;^vVTjZ`eIcf_)fH@ zU3R?-wJe>eXY!D!48!CJs}jdT3R89;ACE{Tn(?Tm_Z4{b{kX2A;RT@ac;v_aX?)U- zRt-PeH~XvMc*HbVD<0L4ZRJ-0l<}-(aKa7yTBYgF2Hn1Uv7ZbdVa_N-JhI#!fl3^v zyzs9APa6DIuV-@xlJ;fPA|73mtE$%}%vvfc1PjJER#l@kUA4bZg)+eiiVD85%c6QT z1dB63%3`TLhG4i(Tp5GN$zP8KLH1PGfi*9CwoQ4VrUWcWOoQnVKk-ni6x$`kDG~Vo zP^ius90gwdMfd^a$K&w@{cMLP8YPO{7+AZ-8oJmAgM zPGXI1dAr=YIGQEq<>DYMw8b(r4DeJ1^a2?{vFY&56f3kGi8Fs$X^X~Ht|S};NW`O- z=-Un`$b+?=UQf+6-o9NA@pgp34yO+=QF?O^NxQ#X&5#H+pS7^8C#@*q`yzDJkA2pt zh5NX8)NrRNm1@VMRnPRq-)uV|#XOCpM*txyGXQLSj{=XZ&du1Mp5+mo?~3qp`qBrh z$~8(aXshJ(02PSsqW@+R8&sew&T;4oo>(&gvboSZ&K=LJgj<=5-9zOmwbW?#tk@x$ z#uEjQ%$9T~XSI{in^a(kh)qWUC>7%?08T>wopSPLg2pZOGD&Ft4Q!9jKWI?&G~XZZf6A`O$D~=Vfmz`Dmo#|YIYRrH?Soe ze`V>x_+Q+-(!q|UFq|aqDLW@ft$2js+XOQ;F3~IvZ@WEqwyu6G9@VMTit5=PM^hxs zDgVp*-{ANC`xb)K7Q5?sw7UwAnhl3`9~h77Qo!;0zE6Ni!x$2kR~q9EGx6+cp`hF6 zw+4|o@@PEFztUOF!1cXnK9}syWy+Fz(sQMLE(pGHW~r|bf9!gE3<9fSh@fn}IzK7j@c{b>*x7h_{^{ifz5!f#mc6xAeZ~{;hO?4b={b^|Xc6V;O<{qvCr)Z2 zdLKC+aXYO8(jM3qPPaFJ*&7n-aT19jBA^3he4v!i^ zt{snXXd`J_t?z@Z7zo zhNr+)6|r{V-|`T8$(%73F`1>bc%de&amX+R#pL?aU$DfoW6THiXWInmq$o*>|{-SBnzCFHA$qFVN6KBk=sB=U;Gj zqvECbR9>Aov~mjF@w?*H<_*d&>#mYZwZX1<^W_QU(b3j)orf4Bi2y6I>lXYB*~gI!v}UAxRkLqD;smls)`Ylj6G}_A|)0 zfX9$1ANRUOq8X3s1f`*V)qqFi^?mIWiGb8M@Tb8HCr! z!A#OJ#?5Gq6Nc`kT)a=|G^(_bnO}WSm>?i|$5WP#A~5LSM?=s&6@@aUbFJ6gKG5l1<_c)vVjF7xtmo_Rg`V0Aai zge>@QfCJL$YGwEJ$(}Vf9wfik!iIBjnJl_Oq9z|ngI2K(H<})=2-z}p^6#u zc}tFrYhADN2!eDs(l{G?>v+`Cd|F3@I!3lZ4=}FL>+bCsJn|=H8ENuRTV16GKxp6s zT}ZDdw;CIe>UdP;B2CBxm}ppkxl`jx+pUdwD3f-d47Cmd#4KA!Q!VStIZ>M-Asn5@ zHcG-43rvdm>divg4L%c;#}}rBhKft1r->F*T{+j`cJ;_IO*pBGn`ebTb?l%QG zI^ryI%yoKUvKURyS~$+66w4H_3}7Xb=ArH^i+nQhD4Sw3^z8-|;=KYOZKMH!)LiFp zU?{+edab3E;NVs*c+{vDA7Su-Yu^1FfAeoYV$QDJdZ?k+gME#H@24XgkeXEn5zZP6 zVQY74pi(3C6re~00V(1Lf75HlJZ~)F2K`fO7W*78ptOd?f$vY7_Kx0Um?|Q+8?3-QwF|~{3pm6@cEn99;&g;!b?#G# zUKt1ik9LTjYaN!kvum|)bZcG&>w+X7hRe>T7du5lCj_KpnM_{1>Jvq!*p7;9is19? z^h5j;y#k~cZ03mc?b_o|b9RF>cJ<`c?s&m>9XfWKG<)q{;H}t!JJqi`5|83Dp&4ly zAZ7O8bE<<^mBeGQM#Zs&c6kA!WbvF;@@P`50@yPTncIPdL`Oi;o?kQ~YR(A9we8r) z3nUnoB2uc#o`V=IcxbFWUkbi2_iXJxH}Gtxg7v9l$OVOmHlawfBJ5roAcRYw&$CeT zzB12swgW|Uk$_LSS2REG0H}bvf$O&DxXx|6t%B`QzQ~QhMH!p zR<}lZ2GByp2~x*~;2}ymOYPDTx>0cTz_`V~^4Q~hj_8|cYj%(uu-woKYTR(jS z9__XO!eBfC6&ulj#Ag~Pv?CyCCjKa$=fq=%myDxxfcBWtS)R81vb|D_DtZn~HZ3TM$b17p%DTRs zAY*|p2i96}Q2Qj2g(E=qsfKy+v(>AcBg`lBFIP4)9UEg2&FT3GUh9or^wj>DqH9OW zz0NPBgE{D45;DVy(gXz_k*|ZR`N(?LscW&zfMF02N%)}6fP`rIrp;*dhmVd&Opdjh zMpUhZK91nKLZzBs$sZpyX9t4RfJb8m-@B7$f!$a;6ecy9mY_n48sD#7JeF@sfuvasP`!zmdJHx1rMmG5}22j8;WH4*2CobQNg}s zJ95v0TGSNHc^2^qY>eItjHkZc3h_2i5wANGqG*^y7cyq<&|vSXJWL^gr%Nyh5k;1O zN1GS3t^me45Z7qZ^%f=&p<6JzePc+(BW^c^OA?XnZEMc+vj8#Yc05JnSI&9UEUfON zTV3+i6#_#GL(kYP9swip_Fmt1ubl&Y0nQg1qg((A6@5NhX%&UoLOV?#{X2QI{-L$0JxmoI>Pw1Ri(m3pTJ> z?_N5bAsFJbLUM>VeK46k-@Dj&dHyPkLDQz!U3}dy_buU-xw<_maG&Auot$mCodZtN zo3$favf@o2d*guEys7N^Z&$!b&QvGhlDsdWN5z@}V%HHzLG=qsScv}|2uKaX_ZrCr zu&c55U)RFn;08Pba5Ws+Ztze?!-0bD3PpPJfEl~~h}bF~wKX7tOpL>$<^oRR81>4b zc&64{?bb1_P2Y!?+z3c(pKcJ3dj6Vy!#8*#WF$`%U$DCLzx|GEy)l1ib48mhdvi9- ze@D~`1Bqfgj>G7p&b|u|JexONns_RTetp>l4iJxCle6rg($&YK%QMOaj$XaH;BD3$ zoNA6giP=kqkq^CXKxPb(4jqpOnfP)iu3o)HFOP`VgHB78-h7b=NLTRC+xbxRegs5% z#qi77eJ~8VAS$kLNQWj{360j5S2t0Nz}n*KY#tWovd#_WmerYRgL~1cMnHo5u9^)) zxgceAO7`iMFio~oG<@$w_d@Y95gwl~)c3+t$Fpf0AR$(MJKVOLhqlNZfJZHg>$_$= z`k@(mr~)4@_$D6RwF|x*Uh+ehkPxNY8)6#;->rDmpn7X3DD^BFPH&}Bt27$T^{YPz z=r;mV-@wk`#b*n#QBf_85?uvCT_C#AsifYta4`=v#guA}v^(!W4I(y^6fpY-<@cp& zvtI`#@0Q1VsGjX?V@AXym4lZqF;6{h^V$0TEKOeT+%DFIEMy@sOw^DasYNW{r;J1; z9-n}eDtJW5oRg~{%av6o`pU6Ey7n0d_VS1u#c$OYO*~SfGS^72)e_qCxnb^pPI#pv z`QSX{DRHwKLQetT(r@OgJA%dqUmI%_FZ3I73Du~y$*GG26DLIR3x)=M2E-|v`6gFu zvUMq#he){^r;#vkYe$54@*V>1qHB$S^u|BlJtDW`QKP`F4qSCD9H`jvu}H*@D(Pxm z{a8Gz&mR0=4+ltPF5hzSZEA?c#EDjcUBl_9>a@BpGz-4#txoN?svp>1zxo0%xe<`e z1&qEiw68&@yb@O&w}KMR@yHkudTQWoXVncYd9MOhiJ6?Oaz9A%0x;6mO(xsat1;rL zJa^bRSERb|h259dy_&1gnQptgdx0I~v0w_&kv?MCBh!vwbW4N4&Td~GK|`fo0FSR< zI36u-wtB(*`G)<$E#d^3M^GWO_}Fl|;{uA0iZXjiFMgsTlWeI-RNstZAfc!yd>qLvD&NS7Nf9@QHhTq5o9hl1pSdkcYt`2*ud~h_-6AmbQ(zs3upXn!G)liUW*cXH zJUZ++9Wdy$Tj!=nbtK~OjZLI_ZgKRLed|M!jDYWTc6`=DXb|c!@3VsoZ~XC|sLR(* zN&AYqBTSqy7ahRKUJ2ameLO10-c~QeM0R==p^@p-u8iEP3Ln$9d6w7^dpXwtNWInJ zC22QD(f;yW#ixJ}4UiiYU%~}1YJU#K9wMRzOfb8?C-Y=7ICj)ZA>JCN0=px9gfo_+ zpkOS3wA~CyKk4??GQO^%0jWXo4LoXTb8Xd&k7z)`-Rt?nc2~YX;4MEqAl1dub-{PD zBA`xX`mviB_0@=3No|5}EUD>M!||wkK8i@~fJDPEeqs_OygSjIlZD`+IbpPsPbBCC z4`&c214ybJc{-s{fKA;qQpHB4j5{7P1c&I7Vr%Hx-2JFkQE@U>rCkx|(K%*GOeX3b zG;2OO114wKgkE;*=)28?X27s{NsFdQ7M!^t@tEf%oaMKesLxkAV(?6gBgj@Xsi^f49Wf$Xa7%47@r1hIuoa^Dcb>Ovjx*lJ2* zj#KWH70HAq0X+74igt1b`x#IJDvWO+tWcv+GxsbNdG*APCtj7Ux4miW4p)%mH3QPO z%-9i+MhLz^97f<#t^U*?_{NdNEG?rJJUq=)lq=mqq4JZe`2wDm7F;Sr## zouJe)cDHqy?SKZPXRGk2yynO4fW(^bvb2FN&#(}~;307zJG1_@GfEL8L;@B57EI48 zr!Bx(c%o*n(nwNCbbnc}dM+$(Gh;-lp0Mn6vm}Tvlh;qt5;{@YUb4aycKmJ{t;#7` z)V`XlaGI0J!?Gyo2B;Fz0th7Kgbfed2T|cb2f@Yi&>%sZlnU|ex+Dy*dY(y`>@){v zkxg`x16wWbS^V!N_DcEuK}dMm^IC;1>+cMf|ZMqkT>V#`(@`5KZk#BPT6@0iFh=^BO8HwOhYVoH^{g@ zIv%0=HNs>TJAwP(JecY`M$`dL0piH|MOy^lyA8u^hZ>AeR^btF|H&Xg zBBZc7&B6f6JLE}Xv_NbB+mC^HIGu->pf_Rk4KsFI)LcPqXXvKxsaKHU>L9j$&OY$} zG}W-mT1@6i?rb>4>M@dKVqtcZ9XKe!P{+1aq*y>F6)j#vuwjE4Y-S@(7K_isBLu=@ zXGK2c98V}Ir=2DBgSw-tX;pAHjzKdW4GIK68Se6NP{PX+@L4B6OIiJ*WE#=dl6S=zwE zEg<@&IYnz?7OI{mN%JHzQ?;((UJwoNV(MuZYZ8LStvhPtgO1lk(lQOZQ%;4nCG5RX zO$*MWK=KD30c&TT%1im+6%lDqym%tYdGF@;CyA3Zcwwl;jIEvECxquJrH1msK%rZ* zh?JGRKC^CbNVKidT?it*Q^sjb^($Ou&%glf$VqbM>p4ATHA?diSJ&OEdpcho;a!X- znri^0BKqQDrE>|emka7;4?=07C@Y>82gv{lb}*BO_zPL zE?{gBAccrNt^o;wdbr>lM>YxUu(KJDS_R*AJbF7u@XeiX9te>7J)65D@o2ZbgQzaB zYgA$H)&XGv9__Z@s<{CPyDR((fb_|*8dAbXC5I;{SQE=a934t7#Hp=0*bI5)ShA_) zG?*~MPJ7MWdOD^+S(PBrOI9TJIHUq&&2vsMmE_eR#hCA7Cla%aiWg(^MG9s zKzAXis?R|}F_lm8SpnR?=s>DSC5T6h=WF;Jd@fQG##md{{tw;&?Qo0SUReMm%Z}*a0L)2)^r_Z;;@-UEjC* z^|b@i#;5C4*;${f5_xEH76f)0ZRfHX0=X%;U^F#y`$#_Fbk2MbAs}s);2Z?sTF4Ti zjk@TY?$Gxa*(Y~7bqj}^QX@w@DNrgaq8FWpuJO%ocsS9LV|>0AAZ4c`b5Y2*Ih~pN zfmy4$PmKtxPtep^4wPDf4Q9&DS-qE|Ga}3tZ*2PD*&08UPvfLV{K1!~KP&60}6 z>9y(fsC0I%S%oPpoV0hy;tnT|UivWCxVlp5+ws+%a&>zh<4hua;1{{PK^A&IWBE^d zL_=5XjB_;LSquMn6|ztA92a!+`u+YTQS?J=Rzi*)?}R%ga;9eMAn1|XlUw5(kU+ZX zWTdgpwNdc>{aC^GU0qMnF8IdIKR!GlA^5j8AT=ujcAExPjBY@xV^@vST-;fRVujmyn_+V z$-?lg-0Sf6j6wAvpwBer651AoD{!W=rpt0l$~trEv$)ukEc2uF$Obka6=g@y(e?6p zrkD_$x<06h;CJ^%;v7I0&%`ZA4_vkfl;6MhRpi;GdyY?12DMBZ_&X}`2F)toJ$u6F zj%lg~9k`tx85q!!Ppn;vEbf+6#VIQ=c5f@J?!Z@flgsMZF}v5XFB4lkBC!l)45@FU8XF9my;IIO|C@*2Z`;;p2481V@A$6x0YjYg?Xuf~6 zwsUg4b+z&G9jmx_oi(dcgK#v{+0i=C1dXyU7EX7xi&PXfpY3ZHH)@Hxxu_b@;J8Zq zuY9E2fkdx;IoRJhS-)Pq?A)-ocEoyn&U*zUrM>5z`nlN|F<7$foQCUW(Y=>(5QV~3 z`_uu+(Q&)n6r^;|sb;4*IXWi*Ol03mSOrOsUiOFX&9UO=lZnLD<+baux)+k?VRcBc z*`8@;|Bo}_=)3*gkPEELbgV1sWi#5Q_30A zT00=E#-n;fZ>tLdV%h|DU=s~(uB~R=?E<@YJZjg#bLYbY(qmRY+63RNcvSDGZNwu0 zUo##xRE2;?t=>khz*U$1@zCLb=8}GsD!8pGGz5_Vg7xhvco&p zw>^mMj)LG14~;jnV5)G-A+T4S+34I{L&fR>eL13AZc@%f<9VmEH|e70vcgxTF$anc za2=WPnWp=DJ46{e&@D)>()Fsp2mSZoP2Cc4>hcvH%B8ouaO2|6r$C6UPp# zv-4tuk&y*dTvV(IgegzNrllE?aLDrx>u9NHAhtFPklGuN>UabPyD<$&mHN{v)^q=O zRJSLuHy{lYd_OQCO#nz=wF~U(gUDNhcclRdbHrOoOS|B^%K5%(71*tMq}@73uZD91 ze=8s*q1#I|@K-omvg=*5gv44vA<<>Q1M_@{E-DyIE{Y^gaZ_m67JJ#|RD6I|dnMw{ z&g0~vlrs(FNm!wBr(RAhaM>0@Xw-ZPkj)P7P%~p6zQ>;ew-ik+@ zmyA=;SZ_TqaFdS>K2nwo0zy|^Nt0BDFJOT;UBYS>h+mSNS3KwGto){m6}i z7jbph2O`cx7Xrbz8X_r1M41fxjHuduKy0{DKU46n#EV3gCEJXI&;#im)Hc{j7hV8Q zkLAZ1DFm@rKtgTf`>_)zuzR%NyP>V9E?pQa_^zM{5{?aMT;CZ_e^5XwkT4F9R)qc*=|InmvdceoMC-E;R}5K%!BalOqHMgVl%--)cpxMZSvY`L^_)ZP`EV2 zuD$v(h6KbWAf3M`NXD0xaHn}%p7Y8SSsAUGFTzuwJnu04#8^lc^ZR*~MGa;(XW<~( zjyoL1-mI0cdHoS)hvZF);a98z*si7Fz>1}jT^zaZZ3mRw@c(x&yQ#DR((2*l?^!O86)Q8^OJZv=@}E#K3I2Bbzj8V*Qo z(4Z8~D*&NKHjDCW6EbZG>?lH}Fi()@Q*j83c|K&CTDD`+$)Xm=bJtbl<$mfMW*Z$~ zC4u2|K?tV)*1DzdZ>+lEX^Eq$46_i?!;6*Z^-4u@&p!OP2)={i#W4$02bJp_=YmI> zet3-6s=7xXRnnQMPE5pHCbqx0EKEf+{qorRG^w((3)57R_-v`xSC`7#Cd;c+FPhD$ z+ZIi!sQ4pgt{@#Uo9Z34fF0VU5|F&9$Uk*BEGh9=j49R#KmH%Y@_C^#c1IflX{By# zbOTaDVvl5Gc$+JatfSQ3&lh~(b$Ikcqu{$ib@#A|M%YJsv8^d4$AU zWj?e>R#ug)eKCvW^Jxq&H=9j>v^9B;NPR%q-b+0tZ<;z(z5mPX^y);j5LS^*+`|FI zpj+3DPhXwktjnw0bwjiqhC!*?xYq6E(7t&)6Cr?a;ba%sO)PdEd-4C~ysz0v$cpCJ zR7AoPD<0PK9tfKcCP!P?0u> z>Oeq3m>uq}GhXlwGSVind;6a#`2L|q@ZF9_=tFuSKw7O>ZlX;;&N}|?-3|UBVoxI; zRY^;;;5!}~1$H9@-@v1WvKF3xBp}hik`uz+x>{3&#EpvGoRVx#D-xRdDBWC!cIjKB)J)0dK zs$@0)@i2A63S^q4qaRy~SLfXl7F*Lj6g|+|nHGh7NLKnyAYvYNXuAFW=c9ak?u|_X z=WJo@%qK57M7$$svqh1_sTe~LsvdJjP<|)tIJ?%4@vKE8?@pU-^ibv&;KnbC*=4ux zVm-GWRyS9nq*7#Qb(0B{if4yXmg1^Q(Z26C7ZqnzN!CQ84{tzX4lg){h=O!W1}3qC z+ozP^mdR7hAGXxtcYAtJ~;S_x=w>E_adK5TQ)t6%%E;>%x-C{_I$?mnY-PQpoPI(=62-<`he|1C80c ztWGr%k>d9VySeS;)C5Qflz;f4z4-qT1mDeg^pP5n?wT|ftCaC=qun1CkeY%thm*G@ z*LZ>7zq`TT{b&PHJ>ND~@cpbR_#TZ%Ewl~0MgbB{6$nc#0Q3i>c|K0uL#%lHB#AEF z6zwT43!l808B~SpiP#oJ6iGGB_SXbSK9y1=z+Z)5;A#!Klf!wr9C(K%a=^;c(s#0` zAi$&Sape>XLrdvq4n>!UsM>>w05Zf|ikYwLdrj-!#r7I_7FkutXz)%5{auD^eqctZ zcz95P(i@5w;DUVXDJ@*fyBq%NT20i04oF0O*tN0N%biSh^fgG+VQBu7#|jp#^BMjpHOW7%e(7;R-G<^ zlbiBwfUW^}^i`wayNZW(h;0*mvs$&CqvQo2P!zQ1S0rS!15ptNBODHcBWf}z@nMokPNDMg8XPfv4 zhC-;2p?;$xhtuoD;(EvFhbtzPJ6(o?eGr~VKqLlv({f8r>^rq2Zi+_mwj>RM^fILB z*v_tZ4oy~<0CU;;zO?)WNWbK^V8&2O1NGHsNLo@;@%s4xkQO5Zn^HzLPjNUH z8U$D6MqE<~U2&xxMr8FKOmEeUa4CQg=}?pQnikHX!DBPb8D=B_pH-E0Bs>&~9?FuN zFl|@xB4-mR3m@Os$Kc747a5g%44x80D|KnJLQ#*{FG014ItprYP{X0v!eWZ_rhyP8 zq98^2E6ehigDBQ2h29atqK?3qq~UDCJ}G*cz3X>)e||azDt5156JXBpnJbETZUjoz zbDQp1r;-hQ5{VLC9~3qfTdWZb3Bh+P9yNsNw^hM+8_EDq;zfr~y~WOo#-b6A8sv%p z=K<2n*!!$qam_=&es}%9Z4i7nG{k;Yg~R(7d=G0tdfJ3XUp52MJ4pK9%$?a@8%vVL z^%j1dtOQK&-Pkfd7%YLX*b!I~v(H2H+|Kpv`*PLmQ}jyj-7hj!ei{D^Vd*&Q$o!@? z)9oTvmC7p7@h2j_h(tDroy+8CVvI7nM7XFv^kfL$c0ecJ*G$8{j#k6T*e6r2v=3d; ztY(RB;!d1U{vOY3k^K@Zl>H%E7v<5h1F0^T&BNSgll+%7+_8((B4my!axs_57aQR< z*_hk}eUdn>wY5yqq5P;ujuxIwxly_eFjuZS;(OAX#0vXLP^#IYLdND}c=$x0y`A0K z?3SjdO2xAVd|E8(>x`-**&z*YW6&3P^9(7JM@(jok?G zLWcsR^ZDS$f#WF!-#5FX*~Mr)s476JXj{H4)Af?)Tmz(7ycuhc>;-m_tpI{$Exf>P zu{Wz!wQjVrZOdaxhoJNo#Bz;(bl{1Ni_LhO6|ntHPFo2;kV+1N&yq)ZP7$S`<$@5~ zDR#vEc`g9hx^YCtT0rCUDJrDpp>5y5eA0Xk=gNXfh^R$i=b!jh<7?<3M0p4}PCh>; z1tC|H{ewVsz4Kx?_7_|XZK>0sVw9P$Spc>`Nxviz7l>i==ybP5lrTUvk}LGJne;jz z=|mhDX2)$SC{-|4V!H)D7eL#KKq^2oSIuTilJ!svqz z+xu7jtLw|bQJvuXeCuLcuiKhis%R6t>^XG;QYiaIM59J@0=uG$qm2*|IoieWhM72x z%A#cbfUlzkf>O)n$QeNw=YZkZG^T)X4fh(^z=amC0oy50$aY8QA$XAgB_c(!DU#;+ zYqMIKjI2-k2r7d&VrI14z&&KI0t@}9AFYC$asW-6Coqz0aXC9NEW(T^Pbl*(LN0ll z@lp2An`z^W9+c759((b5+?hZ~>OMJ`oP&Lk!)-f$*wXCT!V#I9d}1i4lT8fpt->(Noqqx(-N_-3#ZkfxiL z*H>G^+uPyR)ph@Jb1*->*```)Ko0bF%!ylWkt(PCPHfXZbMYo^o9TVW6(IJ%7X^p#xM)NVMrtK6C9HKpWx7z>?_ksC9KQD0w4T|!62q< z9j^jfGi1WaeKQ=2T|R>^2w5&jCS6nwKeTP+d;kz7L?lxouZ}D{c#y}Gy(DZaV5La7 ziKaT#O@(}4q)swIc_3E3C8umbF@Pjn3=lhzc@X*{wkXRjFRgJ5bLbM>JeuTE1}~}B z8AW7idTj+|@wL2)1V>kHYixAW?w6&3W=;gj)5Gsv(!g8vqYsEADJDJF{N$&C^|Gt5 zecH*iibr(?NWXl70;FFqPb%F%K>-putvVHX1)+83@3awO)$3-oz3g>yGTNCOk9UU$ z50p9^49@yj!|{&Z>sJ7&f;@HIef;2Qir6iSxb-hs`RKGvd7P@2&NC$@X87M9%ZE94 z^KS1H<3F-mr#J5lU@kTv3BJ!p>EkZTO^JNG=3NBedldypjK1muBpnJPzOtR164qZe z^7LPhE+>-&W+qn#je9dT5f$UO7(@x{mK^M$}+6N70`nnqS1|0|NVV|FT` zg?IlqLrQb$rbT+vSRy^KT$a?ZSYH`n`bd36l!}RUe9!!9bI;v?RAEsobP8Ps-}k?* zFF>jneAgHGE-gR;s1+)SdMRHgAl+_veqB`nsiO3?PVnsqhsjY{Qf!nAG_O7%^dem_ zwl_}o>9629()+RQ7O_~M~H`rYX9U;feg~5_Rw63`OE!@K>b)4heO|${Z?uDlA-_rs5?-A{M@c_OPW&_YBcV!>LcpB4Y9 z8<2b`NoKmjB14}w1mBfBTAozGm8lWfRji4eR^8n%SKX~D-}*@hAf0qUQMHEzPbxj3 zl?pXKxIb&5;=6g1T1rH@x;#AJZGD-kP1*jmb?ZsG_xk-#aeDmqyL&yiLHJ4G(aBbS zesifiL(p(pT(Sd8_Uw<+=_!iW`e37O1xT-}Ht$O(enB7wJIFtX0!r)Czu%?05q&w8 z6kFr?UDtTu(54De9b@;hM?+-~WYfhMRH@T6e{Q~0qI)4MRz;!6a~tQt2AR?9f&CL@ zv8Bs^yciNmitX{Pu4^dUK@}LFo$uw>#Apnl=YLZLn>uU+%2Ev_|Q@EeafNi}||rt%(=CS_ws6Ow2Zuot2cf@S%*&aq)8 z{mGQrrX_5+1s5-0vp?Qp354r+r#q0as1-a~Q}9i)=DubhiTf2nJWX0rxtq?cSQ9y| zdck*f`PNT60O@UK9-T}pfP}QW!V{`gTtDl#PR$a*_gR9=E3!e`Lz{94dah^f#qE@% zZ3m?D;b?d&FHy2MVA9GY5_GnJl&((>Uf*U&KpO1qyn9&Fb;9CJa zBGdFmfJD8E{0FvI^Q9n*60SfoimZypWiH6ZM^8B!&{Te8`m7={55epmO<`Q54RENq zG|M@$4v7}J2m1Mv;MS-f__Sh_Sh<{-XN#$oG5@+v8Xkj5S(j}*9lnp^(C&+q_ zK)92-TcuD|$K}eAmvu{sIA%&VX|C3rVEIHdb($JfG|T+RlJMgp1+6s|Zqd>}4yNQ5 zjkF|8q;oWv^N&rnNWf5wz^#rtvNE-}1jm;02p)PK)e62jvzx#U#Qe64YOhm&^e#Y}?vBT|x8w2g+vk&= zozcn3?BZe-ipacF0cpkFdtC~U7(~*rwL~J9{Fpz{u`qFyeUYJYV~`MX@-XF{ys6Fo z-s$xqab}WYdL!l=$eG%Ez2oAO!q|28j}G;Swr+aoXV=rTfa|=D?a@}huN>8jPJ-{f zszjSk+!QIRfb@;9(C5eZAS=0ZABBs-8G<6Vi*}(#gJDM#Teu|`w17Fsm=LM8`LIg2 zsI)DWg7>ArdR08*$~0wo8&MPgEu?Hru;^3CyV%A+`-YH6OVLKk1_OpTK!J>LR{}e) z35#Nt=E189@{yQR^EX&z=fCK&HrA-9aZ-nfs#^J7U z-Re(k>yWUhHTzK1@Mzvn{+>}@i8==a-<0i9Y-|jVdixg-|7JPB_IN+vWDAy2@)F6It;6RUsmu9$Sn2($S-Fxb5SGi zfR0tTg5{w?Mpg@%LkyBngE@RMMWtG%Gz!$n^T{S1aFh&9uQW*rmU%Ltialy&(YP_1 z^W@lIC`}gfhN%B*fi%h5SZAp%qESIUiD+f?h>_NyvADOwjNf7F5e+;oCd@R7f= z7T)w_%HJ&%QGBu2eCR*M$IA13jFeC1ba-TZbqc0*1k!4YdS8v1dcpUK0;GDu_x%&_ zh#2Ch6?|(HAbsN3vkrx^pRL^duAWD4J^;_j64Sj?xN$Qx`GR~A#Umw|nWlC-GJ1ii>Mb~pk8hfl+)%IY?{Nn)0=yK$GXbC+tn3MdCUcUq9j~$`3I>CfkIF$LI*42A z*mefI5FdFmPc6Hbvft8HfGCNtlXZvmnTk$O;cI5`I=teMXR6-W=3pXniF(+~*A^#h zA$XEZiceT%jZJ4Mt-e~v(bv2o`^!l69qNe%dtazX^}MIo2}l(Mu@wbK zs|0q-cR;Ead{^>_Gi$Wj6){A3vL25#DM0#v?9`AfcQudRRJ1LaNSn>6c5w>=abq9l z+nui7kw28)Yy}Q4Ur)R$W1bi?Oz7DbUy`ktCw5o7y)C?+_pi-g8X1qq>5cu%#Ib{! zoq1i$BhXt>fTZD$KCAjmi^!C>JzMFT^BSnUX5Q5qEELDqE5)_=i z7=ay*O?Krd5*AYpKf(yT9As=aycCS7T-g!%BlURjmRDD0T;hag8a6~uV=zyLA}1%d z3kWfSWEk1OBrVq9+LopIaIQ_NQ;=}vk2jdNUo?MJ3@9GMWz)Z!dWSTkyDR_ZgW7;K*CAu}2 zF%Oj65cENbi!0!)4ji!UPIax1qp%f%Z-0Q5UTl%33{B6A_+XI~&N`3m49!DrD)x-~ zcCH`Q)^p#?(ypK|7`g%}AOihGWz+SB){MZ8UJE)``apvYn=_F-bGdnMoFiz%rhDVvt&@^P_V>lHZCLbh?HcvC&lS z5DA(+W`{)G)L`3*`XU#butu+P&^X;PT*(@}?7mjfso^I8>9_R-u|(&p1a{p8-wbvW zd{^x1#NsOib`=Fk6>@o<`?NrsV~ke=34vWr0aERDuBt=5n%}gHIGVSmy0=#k=bg0B zneS)kWOmr6bIwnLbQPM)$$*9NsA2g zx2fOOGf?Cr>ux!DlJ6A2Ak~TuCY9Ma)EUv!Op-U3A0d;S5k{^Ca(h7#Zn7tboU!6V8#yGXPxbC6?tt(rrZQ0<>1cgK{9U+KmqX7;Vp)eJB;jdPwP zj4Q%zZj|SX{dS$YT{kFQSMp*XRPzE1Ap%aj;GDj?P!L1uRMW&zl5)<$GBJ$uQh4M7 zEvkEJu>HJn%TLNm09sIawr(qE@JkgQ596Bt)_kOkF|umkf}DCV4McK0_nbvM=)g_!S|8^q@Vx(VdL6W z_S|2-zTkTq)l}HSaR6f z_&XiD^x@$lF*|i(G`jZ?Go4C{Zt>F8ej<@!Ed}e;#WDK6=TdIgl z>Qy25hEmMX2}oa$#5NYt!=c2>v=^fu2C*GoK+RhbaZKt)k&E6S;Zdrez>Jm=b&&_9 z4kKlr-ug=;AXiJ~S`sV^K55Z3=gH;#0#bVY^SAXR+Cxi`KwB4WRBSKth*DFw#*=|o z7T)*k3946;@s)Me)*H~gkhzhbE;4}LgdM->T@<{_s`ZExB-e6YiwG9J5>hmRfp!a` zE$GrNhj_QHnu$K&0bf{`>#2Lj2D?*}7r1z4w3ZhzQ$6#F;*TO}_Zt~9NJZq*1*&uq zv_%r4(-(@!qAQT@*J{wv>nQlu+fkLzU+weN3mTpt zNIm%pp9dsa)GCYore-@=h_dW8aa|LRr?T-P(nZ@_Zpf4`vz!6=C|RVBhcG*HbsXC5 zsS}Xy_D}_}`O^6VmKPSYBS^?WtKD&eMz58LHpaUF=^M?G{wf^Hx;yQ)%zzmNbD?Ba zY+4_q^T{ADDUWJX8l_c}k_{C=WV0k36*`KxC!))vA6#qjbZv;DlZFsyX|=W;Ye-wP z@%rxH{*w;AW@2G;+Q4*c?*e^GjVUOVy;I^IWVF-r&J!~DL^fM+vXwaS^g%LjQ#GzW zKkUiwIxwNUP6y`dDFR4mJ0vQGxks)r9Xj|^13{cPP-v?(;gV=>uu(FA`Ya8nA>V79 zv*=;V!wZpP<`3ChB!4MLx)!bMq~&Q_Di}Hg2}X1+h{Z~G7kvA{U;h*!E#(nseq8YV zv!DL-ch5TV2u9WjzN>lUfcE202&C!t+nw>%p&ybdzF7?<1a=jB($#F|%HZx|BUm)u zrxT%RU^agLHm8wJ2X1ro;r3{n?rDB`H9Y7Kk}l3Z{$}_v`Dk~Z&fOi&2JMNr!qIiB z8BBL|l?s=Vq~mn+dh5mRyFZzjzTh~yWv-*@&1V04|DyuC*R=&mZicN+IV7i-LLpbu z1;SHef*E%wT%aw37uW@-lF;A%j|_FTjxZ*+R~M-l+ZT8%=b}q$98E`>c4dBN3kd;a z!A+j0ZIRI9p{jfI`kvHU3VsQ7uH&l~C$6!=>@IPpHC^*wh4C^??s^z-wbt*Wr9KT$ zc*-V~Fhp-lgMpwCM~618L@PB6y3Bl=my#f7Sd5XUAEF*>6eHc2h%&x>x9t9&J;}%< z)-R99p#S^Ct1$4${ewiZAV=D5Y_3jHbUOE*nsf6Vc$=;vyLAd;HB>G5US5Ed4EFE7 z@QxFHtljA0Zy2o>*gb*Zn;ZRcmY)6M?iT-?-3j}C6HrVBVd zF_GV?9JRam>FW8`|1o!F*>N1nn$B~yh+K|^h2|c0#M>f3&8m@9Ywo9uu0`;fPd%Zh z1@u06ND3f900aT9qnTmx`)BEhD0&Eat19mx(GlS>fbuWR?YG}Bdf00b*fI1*9xVdW zzsn)mP>h>cyfS;IKHn1t?1zl#0&St4{%l&-7i+sUJGNq)1 zdnwZ#?wI*GKiKr2|94HJ5P`dLS#D-}A0hZ=m&;29Q4txNX}yI*K%~SBft}TYY`Hkqt_{`~OP=cQP9S2$ zW^@#Z$i_YUkJEsp2SWrR=6!84p>`)R52@i5ku_BbFD$);u0X=aRs)_*(=7S?kcT!Or3mpmzPCHk78*PRwesZOFnCh2)Y-uyQ-b!f4>As ziwa^-{sjcz7sG9&!S;?5HDoRWNUefzigcIri0jV?zIP9#od5sm&bC4L&+|aqYb}Uv z5q!g=+f!pV;L+Z9kq&C%QGF@GyDy1VNJJs}`y3wKK=2KEmjmg8;v%i>B}*=b-~wkrG1in2YvriU{*;7t)yk$WhZ>c9>P zwhetd1%L`=2wQ?%u5zFF1I~6`$Uo%Wlh8oHvzrs!<_Obkqm5p=hGdSAmDlk{RFAq3 zI$jjq5cndt53Yt41gR#poz!zvkAy|A?4jr)&&9*3p&dxg1+ncsnvObV3>vP6<@%_h z0O{F0WD<~$XMML@igbe~6J@ULw%_n*w?O)G+f=n`q4ebxkl4&?Gzx9V@@vO!Zft%^ zz%jYZe@z35NqrwYn3DpIy_7yJlK5@i#ogck@uq@r(ED3~^nTr&&sI|%l47C41A!36 zQ%GbUjefMzN2VH&XoL!g@zkhI<)&d0&{4TVN2Zuvy(~r|cvs}#ah`HEs%b6m2r4HH zQ|b@41EhZr1=^^$AgD4!f>uO+S}?a1T;A*HQA`gHZzjicp)Gmc6}cFhFzZXQL|4bb zx@fzd0wjK6fpdxNR1p=Cp$I^$D#9MMctA#?=aNvtoUJxj+AMBGj|6U4?rDB3;nO#m zj2jFch`p*vmw)fGn<9PoE&Hoe7h9LxHj5g5mF&AEqfa!@EckBVkpd)c<9vzW8y*0p zC#T0pM`ve8SI6r&GP0RR=W~MZ2}m))&VWRK#HVc9-IJJL8c1)q7sR#+?DnUY?nOL; z2$^Via|&tx^5O73SI_%xh`n{_kLvz+_uY_!1dnbYu;Wpe18Fq?DQ~=Ycm#B?LuKqRAxi&@z>GgNiOc@_z~c1wG& zyQtGVoTQtYzeffOZgr;D$J@Yzh;Yozh zVwRhKvc44CG4yckP|^La&)Inyb9>e^IK6^O_93?eFWYSk< zN29zd?zq(U%DW;M;^LVQfycR#gh5geyN|KpmvvqbB3ux7)#bo?Xe95!F#ONei9!>> z9-P0oyu2BTuqUD%g=n5aG9P|E@|*{%%zyCV=#?ZKfz)KsFxfrtMg>Tl&!gccD2$yX&zpI4a(H}o z`4SfC(cW-R4IP*?+O7g;c&9)bU3R`)eHHaM?{1Uon?@it79ic6N2^&LO_xRe#|&9! zZQAhT_P-;Y)?R=F`&tFxHv&>Bcfs<;oobUhwM~m`K`hYZ+0Km|avw)*t|5Ili}ki$ zfX2a}r+Rz^3nh7CzCu#qiq7ynh=jLaGykDUZNF9~=|k;5jwvXq0V5Sed2VTuN+L`; zFfV_IaF4`VfJ5CioVaIAtBMMiFtt>qgr zvPy8k2|JYT>D2csV(EapdHkS6zx>Y7CZL2!v8;{`zNm5R z_gyI^CExP3elQKB#sZ}Mb{_4|jLkPZCuq7xzVGjSzEYLz+pT)@Yr`LZ`jZqM-?cP# zY7p4PbsDaJJKU1qZUmCd7l?%-F38TyYg|!$q+zjdRlcy|EBmY*YQ;&A#2g=@bKHjdhRlfGNx=JY%U8 zLCXs2QT?ii&P{Q7r-TSN))FgrGC~^V9kIKa-Qx=Aux14qie~%XseHm&0Y}v2fNNF@ z?5h49I7j8~;K3eLeDP*R`FE78R=p``!k#mI@uR3gE+C`IH74RWUI8cT)V15#509F? zSx2S*VjwjXAf4Y(@IBc9>F`&8w7BC0V^1m;U7en+)1i;`^Q+wn>`od4b~^-;F!X8Z z_(Kxuqd6eGb8Vfxy3xL!XqTX%gBwd;7GQW>qtCYR0=ah?apI_o*iQX1cwuSf zHV+__NTO3aTPML|Nf!r$Q6-plH~>laPy#@lXXPU+jXdZKk2%y2B*0SYpC*2$ppQ_9 zWxlXOn05_I`q+8yLBaBGIf8M-GHSw}=b*CJT=8;ZSg{w(evlREvZH`Sjd1*c5G$3mO<1 zn3LKVhrL|4y4~5QONOSM_*m{Eq6&l`hG1fR+_42y*SoH$bz1MbBW9I}EiQPXL;xEz zThdGAs%pbWux9vjU_o9q^MN+W0+|(sT@EByy(n*G$2WdW(GCMD=ZM_1u!T`8o`~6j zB2~{>U)4gH<%Sfy0MbdbOkoMRstAX&qV+}Shr^8?JPGZ> zRp93ALIU#gIo{gLyPOx;5uYvR(UicB({}`<&&eFU`yMUqXb02)r2S=r?@wBQut@N| z2V}VUp;5%=IP%S1el=Tw6b=@zn_g^CeJ{pZ%dgrURd3Hd>N|_J?DR=M%0`@7O{?}m z{Fk3MbW}^rdbaIxmy8Xt6R;wsGs_z5dS}dKc*+VX&pIoDZ|mR!4Gp|^dA^b%3U0Db zWzj5~+&Z zNn$&Lk}CYlt{C~}9--#QVcLS|4FxO34lL8-DYMx^96}5hhX?S~FC;`L>T?t))Xn)O zDqh)|>w3?P0?qk*CI08`#Mj+|ZTXhnLLMQ5zA2BMg&_KLbre7nj}GoXdbr*`GHuDd zI@pZ9#?15J(Qxki>f-YFWOZ^?1L?4wck$%%?CK;7>?XB*Q;*bc`^^MQx+5S(qwZMt zqP*tLmfGQ+1|T&SAd%(Yz2N)WoS&s06W=VmJec^CDvd(+F`WR~TPpZ&5S&M=sb(O3 z_!>y81lY+LBw#zKtT!F?B3ctr>e|>!J6VJ$G4Dt&vP2&9un$#Lo+w%gdH6 zBkEGe^-6H0>!ujNEaY;eVHnJ>*6ZW&k_&W7<%N@=M=xB$oKQIs_0<05>^tk1&ljwPKw?2{_eotKu$$r0 z`Ds+d4nTS&J;2G?;I(=57Bnil(;cB4iYp_Ed(maqB7a%b@4+IWu=vOgaK=j9Cr*LbbzK;T=cT&rX_Kp)w zyIk;{4_yMJe}~BdE2?a~Q$!bw$K!z4AwN@`9U1VD~12 z1>Q-s3Or9hB(*Cb-IGPbt*yeN@9$18hXCmxr2uJvav$x-|5Th5{=D?^?8DtFy(!+g zvEcjmbRbzLz(Cis2&uOuLb4+Gb}DIDlIo^%@=*wlfYw>31g1wcr$yKdxp->2Ml>ep zRQaKqhaa@Y)xM1r1_+22=4j9?Z?AFxL5XrQxiB|Nw3X~3S2Lx{mZdOQtVEdgvX~87 z9E8>JRJ3Z8-aQtyB~fP{qek973(rR|TNc~Fqw)0Mn(Hu+(U29=As8(j(MB$$Rjn0| zE}D-&ia%rES9LS0nv<(Z31Yj*dzFUiC{e8EWF|s9o@%FysRmL8887j-__xWvpEbz1 zAy2d5`?|~_c(fj(m;0*`NW<6z)CreGU^igs;(6JDTR$DZb9q{hJ=5jySH9uv<1s=y zof7>u6d)~9-A%XVxH*u>|3|*%oh^~wm_NDmCLrCOEkN485s#*&2NQwJT;JNg&;KBl zG+zGukG{TF=Y{V4Fxu69|F;r+gRVtDO15((iCDxDALK8mhO?VAYkkB~T`a#rG)KH( zJ&oj?sEgH=D%^ljP^WAlMs%een~;LhAgJV5!g^G4bggiRu>u}Wp;17Hb{k@wAmyWA z&CB)>c{N(z8e%)jL`S%Jd6C3F2oo_6_25{M4!T|iDXD5jwiLd0Py%w6KD?4>^oHQj zMbLO|F*q*HJ{P|io=V0{5@E1OMs0Gu2|`i|ehHVeU}A{KS2Y;vHbO)<(I9GM4Rv%q zMv9(hB6<36mjVgF_hKFqcuWhvr~61;MI0oL?u{PcWKZzczwQc1pHF$&{eq449FWj4H}FVA z+26U~yLJpB!uaX;2-l}B&mao;?>S!){MNy?L zJsv^=G_}R@vuAg!ucxK4`Oq$hX0-7gyhBLs#wMOt^g(rHn6Yiyl(H0AZ!)9P17%z-lhCFUc=gZHCbw2M&g<~8bA*#>{>WOXMX|sG&qf&PRpFRhN7N;#B{JYzfeM4d zB(tQAR*0dv15-I8f5z5Zj#>oo$hfdlkXZ>FMFsE5YT_;mDZSeY!so>*?^1{MiV;PZ2l_ z+QyD8FAlTEnA8!FRLm_q;M5Annj!^;_~v#7p((UDnCHB^@Ui-I?Hf ze{ZgXQN!gK1W2DxXT_EazNZCtP3YCo;s}W{m6VRaDvdm`7KB*1*&1Te(*Yj4UmerZ z*n9wk*ACuztIC{|I^ChALNLsGsuFNqZqi34cWeg@)KRjeb3p0^#fztd04H z(b1h&HR znYYujOE_N)q!u0#buPA45zPud0F49){B^JrH|_+(45DMXqKttk#Z1mKB-}FENMR^`i+L%n^5`SpYi%b1 zLgyW9$Fl&@eNd5gT7}i@wj2HzWf)zXsJkJrLt7}0K!figoyMHUi3PS|N_Sekc7Q5~ zw3&tGMc&a|*NKo|xX^$JY1If&!Ly5wsSv=;11XkjX8~R!Qm(o(%htO_Q>maEh%xJV zO@N!=G=}LAg-M7HFM0`(*qLimlRaw}*v%IptuL?f=;(Yk6u)ZnV9x!6ArN|0_;oc* z`8e=0{JNfR9%8)EgUxW!*S+HE#jfGyhF#No?H2T$FXz#&fP{oWJ^JYOGo|?kNb|a} zn+v`hr<~bG+n?sqEozdgO97-?A^5%lkA%6K2);9r-f0Aqp@h>q$9m4zOWk0gchuaZ z-`Rk30xgWbuS<$M<|IX~NX?B#4U{=yf2Vc^WFU}HJH(K&n``q&$3!RBz*2Tz1!%+i z%CmS19!;~v#bZfTwUR9ToO|8Yzp>q!tOdLzi;5)7b(0{b-+qxOp4B;#4KGOyTv?pB zoH~b;0wfD0n@E!Eld}~$Rg2*6USgrQOh|He0!ST z%Wo$2n(ecvI=v|t0BOjqw3NA$R%w%l*E~YTeZIch3XLSHJHm?Ljarn+0~ud9)Bn)Udi69#vT-RJFBOvy z3sXmQ5+ytn^|&tn8AUu{o=5N~!8cM5(|(|MyBXp^L5kiZwXJfq4hKF=`8-Amh2r!w zQTXGKD>7UZk|MivqK9tfLT~TpdKoFubfDL0>Dm)Klb*?EyI=R9LW91!A!6}R zMxqfXxaGHlp->L6h>MXnSSJ}|C_Xc{2$~iG>1oR@+;YKpHHzT7Y{NadI=uhj*;Pmp zlo@(;bfmn}!&R;_CLZnYT=2a&$(}hLMXKztC=WKkx;3>4xVQj`8DL~9qZwKPBte4R z89GW-^{dw799DMQ^euV#k>^|u=9w|7r58zq0OPscXGc(m^Mbk=xRpf+<$w%3wf09F z^qAMw#Ynylo@-~!QYeuUIKxV&=|Yevk%Lu2w`_CckhOPNmih=k5^yfTG%=Ultc%rm zZQAYG{lXDp1byl8szhJV-zJKR0D2N3=#tw0jE%T1qt{Tz^&P=;0*_f8!Vw`E8gwIL zrNEPR8OSG#yi$kIY$AoED|t}ow8$tFSP#R#g07ehKK=6s^_4+qd4H54G7x2x_}GX@Q- ze;FWA1WG07`mX~i>k0TQi+oj8-}}~8zd75?BWqSGjfP^yY;L$p~AR`1+{6p2uM(-fk#iBbw(W%0=i5F(K9deSQ#4HYz-soCv>AaDU*j6$*&xFxW&_C zdA^02sc~)A+l3W@--D^gLJiPKg zeH~;yp%KMP8XX;HoMkt=%cI9fXJ_FVeo*TXZc>0W1vGAS*Fd63_CI|0hxhJ&{jMAf z=7IE2Z3ReZ=^EJ6!XqHj7RxZ}_c&41R&yvjKHl5C;G0LPAvwP_xEM$)#!5#q!}K-&FhSSLSTR)DlOO^M}# z@1;Cq)_Zitw4(XqEewz6>8Q?o{XBikzuN}rVNT<`bj+IboxB?-W7Eeo6m+?#y zPDnc4chFpYF2+83o%bD5Me1d`>OB^+Sx(rfrACAq9R)mTb*_PA#2w_mq%LGnwjYJi zG-c9lDAz@w4$5#t2SKcsA`Xcu#-oeuKgk05H>vXw+v!G~>I116%qHq(AtMY0 z=BNuaKmBF3)0f3|ray=AAP?%gyK8%xLXD0Mv=IzBZ3ruCqhcLoeb)%27S&w4;2SO7 z45Zdqm(OcM(uXgzA$M{3jKYs&|ZvD*)koFq|cC7u=vwkz&_m^aP>hm}L;FVX>@{SX? zZzTA>0g$TN`J5v_H(ONA)P*=DsjhQE@SUU$iY{oO9b`~@B${oLa7ka!MK<~)Na$_7 zPK!W=V@HOk9k67+@A z!eBf_MVi`Fq?oTSa&9Br)*JHtmqGu$w36`RQSG`|H)i!=uYrPcDy+PtThQkj~#QsLxzM>;#Y=|F*7t zUw!}QS^f79^hKKf%&e1pI~9C0ILV_1GX-X)@lz0IrsKp0NIMpMf3jTg-3}x^0um0E zRQdN>OtZc?E#gjxpsg-CRO;kBlDZ!_0xC-VldQt8Z^Nu$RzzHJA0Ezjl@P;}4L63g zP`Q!W0KDd9?d*C55k)j%1ATOgyezy{uvJ23v#4{;K6cU5mcH3m0!9~ND@UH(2cuj9K}jSdNl5}gtf~?ME1foN z=7(#;aJSC;WGq4#l8|gN6)lOewZJE9a+@`HB4_wm3ZPLGbk;PH&vIICn5-1e#h z(*8SqxASN^kjyt?;oLw~z7cNAwjkRRoRaF6-Y>rWjiC9)D>NHd3*q!Ro8}q zVSG3@0wN>`bi+l}9(IWB;ykS)=;eTe6#(qC&LtMSe-%L~8eL?USw{#jH26vHUEiQy zG8r5Yrms^+l=&*!8a31uAIQy;bGuBZccE<|NqxAkse(by`79v4YmQifu9`<}kQ%%E z_hG_y^8H^Gto!rde)HL=-W;IRRl1GOziX{Z0@4+a<^$=1f!zFE9V$%q^x*-g-%bS* zR(7u7o0B6QRd)r#xP0ucgQk65d2+1kgL?30--7Q46AF;#0ZC=9grSHGRXxr)#5^rS zxJCV@+##@~4mLU0gVwIj_|thoA*K`<>;}8%0iI;pw-O9qHw$-Sjr|Q(y5q%mqGI+$ zcz)$KGqv3@K_6x^rDKD>1NG?3yUc7yy%r|&Q!*V^DN+)hr zNgIgH#n+8slCZ0SsWX72oP2}8t^xvDx|uv${E=W{Ir-b)zkmOm-;V3eIoDXFdpny) zBMpP*18F4`RXu_(Pft&ui?(4ZhIv6v6jGryCw0rvcIxX{m}mUktoT z_x%357cZWiw+p_H+C>%(?%m@41R#B-A|FkyLsMIq&JU)Yc{D`t&1^fLL(V^qNX7gK zeqoaIbG~ST$*n|3RSh9!9pX()vSxKTa+v)$SKKW+o(4h!3(MTU&kW0sY)$Tttgu16koh|i>;u;f%2{+}^x3CAuu5sr4Cz%uzHbu+u#sZ8VVyu83@GSTG-s=H{?fiuIG z6JFIhN|myj`-^iF@l?sJ)mR8h$ax0M3~yz~MRLxa5%9_c@$EW?d^s#yJ0ON>ygkck zTg(6wJZiDX)hC;Ilm%|}C;ui*11Iapx2#l{*uAK`83nPoV+|VS1L^ITt$#0`Et-Ku zL2Og<{y}RHZC z>a1f*+mZ!yRgHG>DMPJbiumW+83xjs8^J$hXH05CKOkMUnk9%5DF+b`iG`qQX3fTL zaDt5mDx#sYw-_fx+ijf>l$j%&5czBwvh@O9>?k?4V4fn4o8g46_k*;X-mM)sjVjo> zu@kt&`iq?5+>o>iatzFu0v@1Q&g^j3^O%q{Fp%b?3qD995&t@Gf1rE^cxbjBDhBzE z+#bQVFLw>6P_8W4q8I5->PoB2&SS1~n~_7p%C}hLyPbmXW~ZsX{QckFzgv{ zN3Fih*jT34*0D2rbo=vEAbs|<6-R{YT7h&lV&&TkkfsT~k4C2zJv+a=IDGuWE*_~- z!+FBt`4{W6_0`3-mj8i32SxO8n1YwDTLs?@Jfd=Tlt)}Y1xWvzyL{#G;p<60XY>eE8LY_4Qn^YQKX0%+!8tzq$M^J z)kvP9iVGkkDBrBl1F6h)3RI9Z#PG*Ht0Lzu`M2dGZPDCSNrl)h5W?GsJVszptrH~| zG<}!csrZrI0#Tml$$p-`G_>FLkLQqX=Lo)~kZXlhtrzb(^i%OFexA-F9Gs@AsM{F@ zNCYuQl5boNkedG)0n&qcg71+Ep7YWk_Uz=b*fHGo<>}j%mVLt@ z^v72UPrfY~oOPLZe*I#*soy_ZfOOpCNR5Z~B4oE{0@BxpDOi{@4O{7HIylHQ$Pg5;m1^bv(Q zMxUc#WtW(^^vGhB@3+=7=tVPVp3|Gv5FG4i1`7?cE+)BBJV|o#a)rLjJfEbKm^_Z% zc>)q~TeyTPQCy|_{p)lfwYMmk$fHF&58l03SBC$T$c|4omP*y^pekyQ1xP@8EZE8A z|1%v(i|f;i0Ixt$zmtP;Dlsy)9Z2x#6HlC=qw3LO^=uV!wW~+x1rtxdNV`}c29J&x z&ra4ur0$1U_6t2KPk=i6K9+w|CT_r^^}!b}`b%>GQmf#5OkmgE3*z%BKtiP5m1f(8 z9D@waksClBcOaI>>t!u)+#*M0$P8e`QosxyMH$a5hE^Xsq#M7jfpe3hxMk)xiNUACz6!=k;E5F(N#i<9SLL^^B+Qag{P3%-v=1mC}U`QiFkCud)-U-mre(?XX$dRAe)`MV#f zs`DF#As0XN|MBa@BmNHJQT@n`{`JULynI!!w0Yu0b%tHF_9mJFq;JTh^9y||NnYu~ zuv5_(&8dXMnpv6E$DNct(AAo@hY;Aj%TiuxokIeQZ8H>oPDv#!tW<$&r6Z+)7sEx| zCwl+L&!O%J74_2S(yfuYX2#@H^MEQHZ_+wgpOx!#)lV!c5h++)Cwh3pmDrAVD*NBX zPUNMkm7mNr6UB{{WYg{hL2XkWi~khw$>8!Nj`R^L_ zd2Q}teTvKfG!aM@mfoJ8A6{NuJpBC0{nsaJQYNd3K&lIKz)WjWsk(YM<9__|>{myN z!!kYadZ6mMzx+VX&ZE~Q68B?&hsz{me-~Rq-ioF$jINm;FqD3c^rsN*-@7~l`v-2Pi z3Bue6QQMUJXi=)G_-@{)8{p0*NMt8fYqS|uVU_Qxr%eOWpT{9XuH>IqfRsC-{+4jx zh%~(sDEV19ZT~bLE#?AgyXnO0a)YGjFWZ3xkLGCUDl1)oFfKpnU7gjyq36+cd}#Cf zKu*oUvmPk-i`#SbGNO~i&JJG>K#IE_k{o*3AD#9`y$kh(kR9EVX7Xry0aEDNCj%*) zyj=Z~)6+3RLGx(b84;BAQf7{WK|>(-REApONCfmO$fl+W#SEHykW>9y?nISnnoXWv zt>KU{u7wrq1iDN{ypk8vX_a24dK<>zRn!SV!w`Ao$RKRv3h%Xd2jf{ucIJqK-A@M_(%=z7O%vpmE;a5Pg?}vjXZkL!XqZkP6X1oRS;ff zge1}tV|SaDpN4TJSHlcf8RSSh#GN{90+B%m?*)d?_QnacYEo1vRT1kl<${vo%Z+tR z*^w&0j(8dd$@3wFB~!0fYKaF_QVEB=lkoYFiv__tH$7&!hPq&kxa*B#{JE|u9;wu# zLdZ0NwUS;lc`%75(XB`%PwlYA#_IKRR~^xLNFYHwAql-MdaEbZMx3r_V(VAA1^b?Y=bPR z7g=@p;8EQzU~A8vFYn+U(ypQB1|L} zfn9F9*J*88JG$NI(7VY%s;gW%*~Ftd3j(tIAZ3+!s}GNHC%U^|Ga zH@Yt%5y5)cSLU1*8$X^$VuX2na0*CG<54BBtE3n`c-Ey)-FiiHp*Ov2^m%`>Xynez z1^qw1$~wc?y2`XJir4b&WYfjs?MDUQ$1TlQk6R03nJ{}#2yJQi{gwVlyDT9k>`*pm zn1WPBTkECN%%)2>N`VwLKg|L=#lCXTX5bD*{w2R8u@$Kb8tjGtL4lE_GEYy(40TX> zig~aiYCPR!kr9BgP>TPB!P1d6_C>}(_WetRQP+Q)!2l}AT23Xr0s>IYCfue5Z-QUXZ#lY{c>o<&zf zTrTwP>m(w);077LPXkS*BkId1ieP^6cz{&%#0mD)Q0AMj<>YiA4Z82H>X+k`h7$dO z1d7h2=#_b=q#G)VwQ+Do59jco6*?%|q3vr=!P1GVQLcm07%~8aLW<)q)%FZWWSw z+{e|Kr@U?yaLgvG6>MYz3m6oCG4X1ME>FoeDyp|sh#lIKVT71K<8hF$&jCL}RzQGs3p68g+R)VOC&d7&QV+1R0 zPT>W1p5NVa0054ZdMb2bx|~)=ND3IjQGln9h<$0ix+Kb(t8_r1_?M-c1j*a`48|7u zw^`And&Lfpm7R0Vsx|eppq^JSlBMss8dnYNbX_5|33(BaV{&R3e6oB98W?N4L%}8@ zW$ERBxOfgU&E!oobnRavC0HldLs)Bfpf^Rz5 z1(428gA?CR#>9;)tYz5+j-&DYA)|K`Y!Y{~ zC7}&{Vm(q^*RUJCNqJHsk-`1(ZkLvijWWHXfY#8C)2UDOzIjQNJkk0Vj;%lfG7V{& zxdlk?IM?9G%n*FTo=;qW1fs?_`saNC=|<835=!zn>OZN!kcc&i$>s^Z>0lQ?x_BG; zqyF+I5o~-i*zS1zo2V^%u{bE(xVIiu5UWAYeGzp=tC|Q;`?nD1PA?>VKMf>3pC|Z+ zM@Njk#i2@zE zYc4z%z%O#ei!4lN@Ig3@M9O09H%bk)xI zq+BwUL~A;t@@EJzh?kUJMrepKob^Y6ROeOD(#?Ss=c*2N2Z7E3NH=f#Q{mPuUIsQk zzkL1TpvWDr7mtR0oLq&=_W?1d{XfTN4{IV^_n#Wht!_TtR4(E<8u5mS6Jv~SX)t$_dPdoKRrYwyvVLDy> zyjRQy?f77vkdD#GGh%quP04ZD>{eG)Z4q>Rf>Q*eEQq_^?7YLJBRePZR)wUrDauhq zwVGQbwMpf*`L+X7C0hb1oh#ep*%|B5t+*<|=+nb%lJ#fRd0xG3RDcd6=y<-P$~V_L zo8rIZ4MXr9oa@kY}V_>RH{a#RKxic zwUo-6a2{W?lrc@JZ^^&@+**KygHoSs=26S#LCBdrx^3;~y7z)u9=?Adt$)#0^PD%f!&duw}*km`*Er#(OD^Q^#TO zo#@~q^MX_vc+BG?zG3Z=w^`RxnV+UtS4yRBpz8<4aHKT!5b2g1~ zFu5;uP!h26P^E6W9czV-Ko)s3eZ%ox@$Q-TN}qUYnCs;&qXm6ewNZ^Qr1=)Czn%gl z@o0|VdrZdNz@yvUJZctv-^~MI&L+Dbw*CC?1L>%lM@LhF zc*kP`J3aYeH5mCfI(2rrOQ58_f`!a7+rXN`8H~m*69u8 zzmt)?P@CYpZWBAgqX*S{zSj(-O3si|@{2r=A<48=%@Y?>wVA3}xRLTAVa2In(a98h zijNd61&lNxW;}o=ILfOi77DsCKQH_)Dm9sZM88-Tg}{!>gJcmM>f8pvDv`5TdMy~p z2nX9nUj&KjtTydgt8TV4F9N5i5RzUG*?hP^_+jqcAB;#a5F!-PHnD%)jtFk*g@b6w z%Q-o%jJsudS^gK@sNf=-W2tP^@_gN>@iG-&lSs?0Ao!ZK(uOi5fJ8y;ehQEXS+xqj z-_=I`w&C#v{PtUb1mo%(wD73fXaH$1b+L2|LgYZ!WH*pTd34mG<8Bjt*CS$AuU3^S z@@%D#zWM%F-+%usH;{eu=-`VVzx$7bo?g%XI`(@W61>Bcn@|}ZfA{>L_=y)dSMk}G z4e*>K_};;z>Vxe-T728b>=02QR8mM@w%Y;_gwVFCc{(&w@c{&E6!}QXO;k_7bHLKk zBf*o-H>K?9NNOEpxr1A=e0T360%x+TmQ(7qMZh2{j+Q_mqo3yvDMdlsgIJzb`86nt zroEKhxY5CUID%)jM6AnWaRX+RE-?&mG`6m!0hBExJ;?0r5Db+2yRm`etlXs zJ?LmBp(^J%fk=cc=c!HE=fhJ?--#$V|nLKJ2eAiVy zK(eI(>24eeEj-+L^<7h!*#;izJ?{lbE4db7{2q^1B4r@WD2P=MuR-vQz^=uW;trz@ z%4Y+E4iw3Ibfuc6vuy|ESxEKN0sU12kB;XQ#KNN)Kq4%n;z@iSCkiHo0uy4p9pgd? z&E;VP1vAOocX?uYuA?PKA~jl1Ji1~#LgXPoL7qR%6W|d+>)2HAl6kdDF9wj%S+boR zJ3%E{o0=x+<+nmas$(O-0{| zoc-r7#}{~gc`R=3->yzS;2gns4U9;1L0Imzp^yjid&Zc>#l|m-2L5&l!%Wqp$ z-!hbQcof#Ru}Zd~00|!5%_u-x%qu_|=h1CLK`am7Cy*eF0#k=CSNDpEe*E`U5D(e~ z-|bG*n82>hm5MvG%rH9sv5v|85(ulummmDAgC9>Gp8n7hnmh(kEj(&bIfQtmRc7~1 zZ_?1yDQD18Z<(7R_%7iYE(p^V)Xq#o-GC4VhxTzPj7Co(*A#MvfquL*QIL+OAWX;G!Vty&P%VdSy4xD{z@9C4 zcB*4|YN@Z3gKv?W!ghP(|TJ1REsW)vXFnP?GwH(U;nW(mIE&6qd=IU9IX z-I0fXA|NG=8h_a-Xfl*zJZN{C#sqc`#$Bm=c9)AXCNp?=v)vQ#@}p&{qbp!j^#>ap zG*Htu14!R?IZ@KpEz*1$qBqb{jqJnBk-LyZX^i)sCxf~2~Hz14Y-xdwdEIAyi zbE;%zm0-fHnxRUNF9hlC5}$2#>Qnj(i(JdJ&0V9AT)pPFwSY#nu@Yf)jt}M~s;7;| zVbT1l3j?{NKun#$+0lWbz!FzwVKiT-Q6aWHzf(Gx^ZhA`%Sdr!k}NQ+rQM5+ORAzm zgw8aQG9!bAQS=c4y}sSqRncYQ$+p2Sl?ZaG{-r_F0FasrklF)g^9A4ID)b2gyL}fR z?cUQ}!^3&cpNL4W7wtfTD0>!sABW((fk%toJUSl5%QpXENkmkc?|KdA^jmJ1ht~XK5*%6%mKo-Pufn z27@C|FsIZN0~@kWR;vWq{q`_d7j(?xM}ItUOV0r=1sH;av>>7h?gI+^YK z?(y9@S=Ook$Aq3`E``b({Q5;Q@YN|s(@zW7i&GWH)qVJ$af;&*U1foqQzn*GocVjG zj@8*U5}Ld`RNnY!?rth|_?HPl`eU2mdyGf7ZDS}JFTeeig6|nTdN)b%{V9R;*)JQ8 z-n?oD(l}9$XYeQlc0{s97>7Zh+%0bP^!)Jr>570^Llli&weYlcNh2EiCLobS`etX9 zt9kCUBTRHvr3g*wFp|ooR1C9Zah;~F5pF9y;#Sle2_%&Ui((tOWBRd7B`(Ffq=LL2 z&X|SYi=~{v^bL)o~H!jRAt9O%(6gysN?EH7Z4-(<@i^Cikw#gy*fJyy@Q4g z_os@sa?{e89CxR{hy2p8-2bEQJe1@}Zmpef@jniL;&69Jc_}uAl=t52Rkh@Q=dOJc zNjn)?A8_k*42IQmbB%g4y0fcFQht4a10XCD2Ym)3OSTB>OEE3dgm#39c1FihOE@7> z%XD^~0cIxt?Hj4Vu`geK#oirACj(}cGJV50`sg%|SUz8Xgcq;LBend=HGo9poZl55 z{blrl#EINk>=?dTtxS+KprdNPTb$BYI*N9ZN3VO3(^0%{_JBkZ>F6mo!`iT?R$`{S z!%HpwFp}?Vgo&)C`D3nO*jw8_kPb7Q=rtGR@C%ES@=u-3BBu3WqA<@3jC|r9<}^@t zYumcR;`;`Bv&4vtI{G>A&5>H6*U|1CKIFW&-(NjFXwyLBsq%97H zul=#2FYh2n`}zm7Ib4Violj0Wwf%z_!Vx&0she^{-&b`f*= ztDO`Q6`LP64d307;j&n#&7+Sn3jC7`kXl#Ivc4j5qXJW(0{*M~{bd#My!W%4AC3-? z8a#U46-}B)um8zBV%O1I~<00DWn=Kab z^un+<6x$jBNeu7PUA`idkzmD;!}(z(Rl+Se=1NmH_LNNwi?Jr>$NEgfdK=Ym?C)vl z)JEL)%cPK61Ch=b#5Tqv>5umdkebtqKD&MalJw$T>i(x?6F|Dw-_L*X?0e6i{owbV zUPFBz^{QlLzzoXhFJhILS6P+2O7!)jX!;5?Pt{_BN34FMWBUe3OCp7>QhVFh(Zi7u zb);xXJWxj(&Ile4+lyv%`HXuydvj!LKRp=OYMaf(bd)TRt#7V}ETW41RGleHM!t?HFZfj%0 z*rTnuRQj}H@?7!%ZJ`+v#!g!KED32zo_CIQkme|eBYPWsp(b~$uRu~ku#nEjl zW#4TUwi&Z0`atSWKzF9ot=x_KbP8z-?&AL3&-IjD1@AK;d=`RoUp+BR` zcOOW1r+759O98`ovjC|fMp$essRoZ;Uvi-<$n|*yklH@G6^offvds@#Ajg_Wsn6Fq zlAV?YHmgWC<()nzi+rQSOQeYmYkUykst0mOtK>$VM!;{se7Xa1D(~=!JTRmB!gR); zo}Ce-oMP^L9C#kQuF3h$CWBYcwdJ<(rGGr8x}h;DAD1J`V^_6ds8fuTmU%Ikg=sM> z4BN7rSv_C3Eq`1j`Ybb2wBWX|4gA_ht&ztTg*?bJARKE(iZN5lFxaKs)sFqxtXG9Z zlQmWR*&or`aDg`)EL`HzIBW_f8b~-h0ZCoI2G>Q+_V$`U`sE+?`Kdo_0qL3Gk&r!& z3ZmDS4R%n5k1NP%gP)c?SLG1~yT&4-s~5$)c^Z&bZETKth`Jq!G#QklYf+WTmQpg# zfyt(Zean}F%eQS6B5@uX{y$;1O1sjt)MmBQl2TUZ&gR+dVdg0!S7xH~UB`@nY(DuG zX6if_TEjEtjEH5-@7SYIVWqdv@jlIP-XmEDNMx9i*)RiVBJvpFEOX2|1+%t#aC@od z@mSSm^+u~XAp-P_DDhrjb+*H|YQD=Y301$O*m4vuHn$j-e!=W!JEL^ANVVI33W+9b zS@pDdbbpRV_tz~zV)c9hQsb@nEyFjD^jbh-q-r%Zw1FgnBD*HA*f7|^1od(L;E_1| z#o|dGz3yE9o#v6+G&8nF76DQ}W@p%cw@4}f<-m=-Hq#>oxRyaKL!#)b5G68W*F7Wg%#FQQlLhxCYK=($XKzBRGOV!u);0^v4|z?7KZb3AAO62} zd4Z>6b7s&+ZLlENRvd_oX5aQnj5Pgq?np%*9|Y3vy8`KQL2P^GchB(sk>t_&0wlic z%*2TXj~a$=c=S*gi+SYK-=C_*?&p0Van9-%*DZsc&gy6$oiu#c&vy;qEcbW>qkD#L zO{wQiAYuG|`PKBp*38VXJe#e9&%?k(;H7fFQ=ED!lf3ylU>TYhw8FCqf|&10J0Zh( zb42!?S@w2su^Ne5#LB!Q^Vk$#Ad4&)Nf~}|?wLZQTICw~@bpz+6g;auryQX$KjTp) zX$W~1Mrd>CqOe@G1TQ#+<5J|2s-+!bv5xuL^KN+5vY;}J$`DnH{d7uVHdG8~g{FiZ zI<)8Hh{7D>$z)_OS3dD9lwUlLLHApL)a)JfhJ&_S5c^TnHb<(cVXzzjffFZqYs+9K zS10!2{`r32{Qc|^Tf_5wP`+xn%y|-zq(164(Cf<{9WZ0D&!c_;QkPQibzOGE?7lQ9 zL2vI!&4eN>SH194KPdZCgHW4~Fq<;0qG$!2sKl8e7ATL#l>zlwNrA=;cAC+8y`#w2Y6i zQO^IQ+qW`x3i3Krd}~`gz(zq=rLb+g$#0o1aGOJ+5tjGNE{8*p*gTEN0i2nwYJ-(u zqo?3z0eNC%-wK&VOKC|Wc5NU%R)F;GJR()pC_w5@oY3s}C-F#Vu9m?L%{YNHuG#MU z`wzYbF>FBq>88&kohwpjJgd4vgErW)d@aK_*CYn*H4NV;c!ZLB)&vqUyO~AuTnQQT zL{^Rj{AkHiY{>xUOg{*tmWGJT`iAvomd?@PgnvjXt(1gvWo-Z_a)lAaLRo!y4yPcDNE*cFH>Zl5xs~I453@Y-PKEX&X-XOu zQfuVpHk7r2mytPAn-MKzd5>h%hH>%NFy+w5F@k^ktE-W$6^m( zKF=8QnRWycfBbeoTM(<$GXJ^)q;dTMB+1i{x&}K|-(C$!Kb*4`zW)0$zWx38#B@G2 zGWPjXc_bJ6hVRSz=wt!XUC#wBqmb~l!6V+-qtp$P<@*xp#R`q(26pn-ybllB!BJ-8 zx#MXpUPMhc6j$8pSm2K8TJ0Jr+ks&uJ?g-lQqEg6P7oerDgsR=IGnkpBZcyTyzCMN z6DhVyV!*Z#X(2nzL1*GQgSsQqkcZ%og0nXaM%Gcjbcpk8u){kp#ylu%gB#x!m=-Pn z|``zQ+!B6r?objx}Bi8yn z>ZgTXo#PRP?>>+El)A|SWBU?moeQFWi^Z0v#rZ-;9bX-`g+PySyw(KMI?XrprMfKU z|Czy?`GQSVV{3jdTi`$6i>iEOfgq?EMK1bqHrs-iXhOzFAQZ%KzNkYXz-aBYfXRJ} zveS?^D!P`_aEVQAUX|BEEeGwQYKbcal8hiK^d1A!5T13B_Jip@EEKQYZ=!ahKDTJ^$GUxR># z+EQKEWL7&qmpD=Y6XW;`@w=Oomp~GApH*OlsB*O$vkJ2AMGM);vL!q9bX8HUTH>Ju z-jZswGmC+rBVV2*Z*Y0YTH>d%TzlDA&cmboYaTmG%@=pOyX}XW86FhwIn>1SeL2tO zvk4`)7%+^$uzrwF%^>ZEC04qg;k#2-)Zh_IA8DgNzW_;_;jUkR)OPo!2IFt9X0rQE zO&iSt-`{E3?Bt31q^cwhgnj{1cRVP2s35oRL#TP#vHzUGf z3p%NUHTBqu&4p4i&S|1|vovvufyhY%BV|t3CZ9s1%mg%m5nO?-co_+>66O>7DmV!P zvtLRD;bP{VZ41#B4p~5g-E9ADpC-=2I%Esn_6&(!2yDliU}BCIwKEVM;&9H(iRV1M zNt-ap69W+b0Z9aVH=;e;PT`z7#o4- zyX|V2F}rrdAicmYS;>vVeCXYCSpy`rq>+WrLcn(j$b&E67gccE!&+K45}5q#!D9> zUIwxY&{`m4@O@_|T~r|nMu8tn%4$ndRFQ?L&|XFd$tKWdLHLmg@DcC=N^pIJmw4gU z|L%rBPZTms*q2Vdv`P=bv3R*z82-OKkrqeWz9DoYJe7ZyL0(_!cWKD3hBxqui0ICQ5?VLw7Gd6;YUC&!-r$P_xr{rbYkw zf(mh|QhgH~^W3IMgB`8>lIJ`I&$NE|o8`#*w5pY?gv^bYXO^E_&pvzw6YGY%egAh%ZE zT+P$)>j(b)`u`H@TaPA@zSZWD9z#*rSUp>S)Z!80#)jehuFs=pgN8<#Y>C+Qvq-o9 z_+RDnuFZL}b;@jjA4C<1D$QeZRta8ste9M#`#qCW*>6DJLe5$;!(B}>WDC+?`=xGp z$4d@EOg7!JWgRg_4{FQ=k*-c!^}=(u=$KSu)CWF1Bx&MVCL25s2Ex+i$CIBbBm74QEuni(U&Bk%~v_G0GSXR&&=_vLzS}b>BG< z&SOclGgd^3G>#nuW>)T*I#uKPiSjL7U}k~($W^}FlIrqm=pW1*c{oxIXL8SC$RS_r z3)4xNZMzqJ%MBck!B(^u&KC5$Z*ELC8I!URSonqI^XURfd6LQONTBpx;77X)!BRvOm zTppw|I7hOm3NjXJ{<(2FQG;)=C!ffCRqfbj)W!HZ>*qTyYXOs9ILom!20Rhqs)C3r zTxGC>Y`no38W^<26G}1ysS@M3lV#82x*F#q4H*-VH@JOOoaFM5vg?Bp_yJAX_Jb?+A;w%pZ2t6solJ3%*^xjwV?9tV zfqTA5v*TTObl>0+?`&u;xZ9&KxBB7vh#jW7Lu-cLHoFHsZyCNdkGf^9dIHj2gGc8f z1txk*u=_^9t-q)rvC~1jfBf(5>cNIDEK8DddPMB0ldZ^tFv}&M#*s4j1zg8?UTjDe zTf`$J!r_?-wGdd&P7|{!=9##8?UwBsbtXlh7?#A$jbu9DdegKZQ}Qelg`&zco?Dr< zTOII*K2ZicW_A~Yo#m%VbG+j~K6c8N8i=ZO&%W`qCG*UL7siTEjboxN@>nE(a-{zl zD%(D{Ped_wa-F^KUc8Gy!j;_QQBQXC-pSG3*t8ciW3Bb^)>I{KzQssf8t~{kjqK(i zq#yTz#4q@pM)n?{oaYgn%H?xBx;x3EyB>`Jx4x(|mWCCy`y!K+oNmXNZFR`&Dn(7r zd3@UtRfmLj7-GP4Drga@v|{z8Ve&CtfgvuHRr4B=stp@bw`|3W0*ulI7H8L@5JS8z za!&@~PSic97aqv-r^c$8d%-b}`w`6-v6-46ZtMR=)|;{wgfx761Xh9hAoqI?AQ9^F z*pw(-9uJRhggiDjk*V`Ec%GjvhCC0R8@35EBZ2hp66voE!*>%MPvFtX2IHDX?Sk0W zp4@!no@f($Wr4(R@oZwaX#h#gM$p~?$k8#_y&gToH}7l|#GW#IclBMLM;hEUtEAfo zwS#tF5NRBZXZiGxs#n}%y-bIUMU?2tNSmf^*^!H1iRU3z=5dnbnF5CK5@gM)rHE|g zvXKnOfs`=OYtA$9n4pd=&a(fVG{1JR1@FLMwVf!Dz&Iu zs+%TYm>@5X1X~^v_pEbem!AN59#$KKC!EobC^94YxcY((qM8k}Lw1g^q}j9;-u44_ zS|Yt^^Qcj0s^2NNb14pXZ61AgvH+%oi0FXJ*blbJA!t%{E#$&5wVBbvW1I82hldw*v-oL0$(E~Hup#s zJYm^OWh%C#(>4}n5u+Z!w0CnVch<62@A(8CvHbQb0*SJt z`S+{8&^rh|wS#sSkJ-Df!;IyI!A`B}ISxJa@bR+R)pw27MEwUv!0tSe?12|)9%(S; z24FB=PR#7)fS?V4+l<0lpoKzB=Z<>JK}I%R%IDbRnZY=92CEv!y3Vv3=0aL=T=VgW z8j(_>3bD9$O1dC;Up{)-ROLnHbMxw8^WA+08bhYPvRxj!Sd2r}QXSxAYNt9nkWQ-y z=LNrGLfAP>D>Z;@snf`O4pcbZ2r%0Y(t1Wd1@uW@Z{>jM&gpR&2fI+W?WU$M# zmVDjY7C^!kFV)QM&5jxQ!6B=M8Rc_yM9Kx#D4bUZ8v=umD+e2wR||5HZibMIWK|PN zEyU=+)Cv&Ir|lfNA8i6}X|%hZ!FKS%I8n&z+6#o_)JIGApOHv!;8829Gx`Nc_wwxS zH!N!NXuzX}iM+9A-(UxiKDuhaj=%r+z4zAN_n-a#_rIPq`hNEK2XQ7pwe9z4QRdZY z9`X79I~!e)^rk0y^h!L}Q$+e=7{&XkP!kC0YL+}+j;Yhf(~0h%Wd>;--wh=!YD*n* z^la)|X^bXHN*R|*$Ok>eo;A#6>Ei*}@;wurT1PpgEG7(|UZ^VEJmnA}iM1}oZ84o#D1P70czRO=~=M-!w5Ym2Mr%ibqzwIn@-EyiS{? z7iV}hB@#rEhi{iMm&Y?G)zRjWmQwwRfGy3(o;`zIi$@5gD-ST|?}N4-;$}44exn6W z@QAe1iCUvGJerFq4BztO2gCRCaprHDk#4azxXg>#Mr*lP=c26 z>IkGCjy{pdptM}zui8A)kh?mM?)p4>)#Xut;zZ@qv*+)D`Ewd6TN^ybZ+y=!IRC|G zyj*Z_bGG45u0Wy|$WtEOBh|%Nepzz_L-n;AVY)?}+ckXh%-@@Jcs8PFrx)pcdXh)??FqGg)84&W zYxC%{%A@l=8Z=n)-yfdz!4FgvPP@R{sz>wa^@)PmhT*%RWtuMX-oyz4n>^6-JVGSx zQFqQcridDcC&nw$E}GYbe6UexgI(FQoJiWiX0h}Sz~C@~AXvCcS&%f?nFWjU3rc8@ z6Pc_>HrTnm=iJPD1TF-}Kr!tAkKA)upiZHXW12{%P^g=Hw8Kkr&$)ufMQ9HTh?alE z&Onw0fy^xpx5WbC$IHvNUp*p*N8bV%8x!ykk*A6zi8Q=&z!WP53G- z7pJl*K4Q_4hnKXY%Ni%FY{%u%BozkgFc;;Y6OuKw=Y%?srBDtey=uOC&*x+SwQ~NSHU}bzq!JwX+N&lA%3iXV2f2jrZi}4%UWDUJv;o zgQU_1XQO84D08JqRtZV`64H2b93x>FT*?Cqqhwno#`L+vxmQ*(te>!Bu>$Ou7%YTJZhk)F>&IqA6x5GHF5WS zMk9`{#3Mw~L-hT{tpLV29wY!GjMK@eF+Jc$na%J^aLy$MTW})Jf#I%#gr>q=nL@%~ z7vtERr(7QL&yN*680-k`=`h#jdp?A@a_ubIFdkE(aEscE7~ezVxy$kNU<2h@@2us;3*=YF4=pZF}qY zUi}yG2*&EJe8?l7r$+(lMChm?0#E&1@w z^;IY}!-R2Y&cipF>(Cze9I9%GbQ(fZHzd-22{4)+YzEV_N92)S<7_$+R`_6f92TMq z#9G?a*;ENhLrC&GLWeUC#(Rq(ddHn`&tHAO(KX}|L{dm;Vq;U@+2+wVq0xMT2-K?~ zX+h~LU#WqVK^(~g)iNUI2p%a5^4zaGo6U%;=_s7o)M}a^DOAH;#s3$kHVt#aTy`i6 ziru2^yPd;K$uk+|GCpX_FV5w87LsNZ3mOqYQjVasmZOaH`$xcT9 zh?Mp-g-Kk5x{OlF@KDN?bM>$4Ln?`b$@!@;%_(lVQK% zAjRR))Fv5p2hI`XWskt>1bPL9!fs<`tCAk6G=!2Lxo@cb3LGel+l zdp>B8&Zcu%zSFfwFW=I$gu<>IuS+SR&bxvja|VOKJ@cntpx^H|ZXTGv{VS2-aG0#f z*Tc-286y!#LFp!t>ULM~wcEn#l_Ncj6IRsiPE0Ry^hoH$*LLCS1V(=K0wP6+ORwg? z5D_}X5XsYtU5Faz#2RVSb+WC|5t<`xl7M-Z{hw~C$W!G1uU}`=62G#aGnb!Pmd%j4 zWwBVyk*&rbp{)FO^8jXsY&oZ?MOc=z&oUhJlydY*+IwZ61blE;5dg^1u~0f~7D%;0 z^}0&&71y)Qu}(oE7vK~S=~(GS*KtnKwOPHGF_s5aO1E1n9iVh%9$%6w0yvo*y?8D9PMH7-(&k-$iJG+k=NW&4TUxnwl7%_bfu%n@U0@}w<%^_XG= z{HrwSCaBS~=Wvtkcd;{xl=Vi%HB%!%DZMNn&6@?*Il#3#brL8UZOKJtBv_+j$~3+e zjEePV$~eI1OtpW@l%e1Q%^Qo|hf0alFvGJ+6~4@8CsCuX$@|YAzc~+B4I{}<3te)$ zB|q1Kpiu($Oq0uSl$3Fd!t(HuDD&Xa1IG9ejl{!*VU$ObEOiEcE%031DNzQ_256VG zOtYc_NQ-Dd0o*Zn&fXp5wh2UW7=O3uffAn6oh4*dBEdEYurb64xN7aL9Q7zwQYAG5 zp=xE%(DeH)k&xJZ3r6!77fzGS(s9V4=hXLf8t~M8dKh%<>VM7YiZR+}t7mPvGgI_p z9>~Rfar!h@w8004E>Xiw6LZ2$lS*`|nd?RZ=X000hb{df|;k^lf`Jv~zi0054(k-kX<002jB zDt(g+000gIyQI-GJFyJ_fZ8{X+2tFt3jlz9;iVR~y5&!_EuwXtfw_*H~hTSJws%!%!4OJ)0oAX#h_as?(Tv2s4S{C-0Q1#5b4rtUK%P3_xq z{7ZXo%~_N1sbczk0rw_+NfW{ff2S%L)CYO$bFI!m9=*ODgLD0)&;1D!A8Pgb>2z@} zq@2MA$HFCOR9!1Ij?*Wj5kWAz_Fs8Qz!6YQQ^N z%;z&;QEopi*=$n8gi0ql){*_1`G=X&f1)pCBhPC!n?C22FAu!CJMYL>*GC0&rh1iD zqtUF{P{;S%#QeUI>NU7~CI#ZOsF!8KVt79Y4883Z=PRGU+c~@4?hQEZv=`V^!oeRN zv+QvIWZ6UUKb`33c}NV8+ODZ2)KR*bFMo!AsyKl|r{qYZU^pCv(vF;nX&#%7$(u^=9N@Gmr>ECeIGlrs!cO( z4uPVk!V*5|R>F|sP+hdG_mL1bTIKcCT>n_dGGf?Aq0o?X!AE*a^yN^N9EL+#C?6>r zy7Q5tw*@7+)KPl+cI=?&{a5wTnw%FsXp8E@*}|=oqVjD-6~ISDN7nXkSqN%N#H3Y^ z9Lkn~Od&Z4fhf9VCDlBzk?~PSryg}kUCpCQZ$1b?a#&R9BlVfg^5r|JexZ*n8`+it zABC6sNbdb=AxLSiFDh*67jtK<+&FRsQ2v{w+`a%HK8b;iXbb$X(*_{r!bM9J%f1rN zWfl-Q>8q8*2kjg7@}Lc;1{_jBzwPeVgGdB3nOOT}^HYiRd;ufikw`lP5cE!;iLLzN zRFvN7!utv*odciC zrBFgCE_;d5g*LKvDXu|D7cXns{M=`YxCsvczixQABkwU&_8Y~q zoed{{^3&$B8EjBvhDoYtfAvU;^4rrr+ujqjYi2JTGhRVxa3`tYm(4?>@f{CPV>dRP zUn10fu2k+Dc9^L|7fwb!_A3cAE6|3-S^510m}y0@PPPM&d?`T(Jw> zdQk#X+N(zQJnk6mON20d-(9E-5v$O;0c|UaUP-+p#;awkzMT4PtrYw0%jx6u-9TE< z7C7jzv&cCvB4HJ69Jvt=k_l(ViXF8m8bX(c&%8G2PrxpZ`u2PV?2{4LkbJw%@%VVK zWeOt4_yDfh=GM4iFj8{`@6ZELm<&)H3jqye9Z>v_FLV_7XSE&DwPgvGK(Y62Wd^#f zYz?K3h5xb41^=#IEz3^h=HwgH0x*4voD?xL zVhS<@b^VGjztc1zj|Crpcc4yD>h|qsDoij4SnHhe|3GWqH-T3?nkGW z8fy#;>zdi~1LmF^LUeQt{)yP5*Afu1go!Ad9Ht@G93{SL5{2!3cy5ZWZAi}zYg;Yf zTeNVlG1hi2|B()JP{kTqf7PWkUPGJ!hF+!D{KlY(-=PJ65O(N6?!8`i216n*S_R3VA* zQ)%YWpc|Wx5|8oaPZQ=n|M0o?IU?Pxm_#vj;NcbAWl@Ot?+ocipetpv*0#F0c0oqTK{>C{b~wRL zxh!2bms11RPT}&oUE;YmWtbF%ROY;dNSV_oSHURpc+b9~VRAR$OjPQZBk4~EW=k(r zd>_)KavX734z&lTAkIvpmMXurk&)%R+B(~#AAt?+VVh~pJbN-qlWzOVKh6L=Vsz3( zvWf8A@K^J;)`Zp!x!<+!hdP1@a5)ro+xd#^ebxa?nTH9VZ}+f>xYExq4On(|9ujwv z3OfkgMWOR0jq&O{w4XZM!<^)!cQkFgS>eiPoSU>+{Ih0aXouXc?*CMJt4pC0m#C00 zH9N%Cnb!QX`)lOownBLN3ioBp#@T*|Hpc4AEW?BS-&6U7o*{cB)yXT6WrG7;{@-yigV^msZ8j!e z$GKfjmh@tDgLE8gIW5~#>?ib@{!Fue&&MLntXYDdt44^~P*2{XBpC&@h6Nn1JJ}CB zBkoo*jLi>lWT4gJw=g%;ru^U0$oAO1ehhB5OhYPmV`QTMK988 zDTPghfIsPxh`)+ae0`4$=Q80epa(;Dbuo+RMFHJ8i!7e+OR?7@z&2L9Mz^6xIA?EB zWbUWQKI7N09=ac#okqq{C7!YKZ7!`f$@UGSaJDr4+$X_Du zo;BdCq)!2s!q||;gA*6lOYDLczxPT%-K0_X-Xk0L7kzKF9NH}ng3VW(AO1R~lSS42 zZCBr}+m2ea5Oh*VFQ^MkD-xFAG~W~GkS!QUWW zAJJ_bDCme>8{~JAqWBK9*XK%DKH|yK2h6yu{_Ca>Qc3K#Z>S7-Z)|uxt-e`N7Xuyc z%8&w!Cu0^r0+EiVxiS^kj0=zykZM$(1Ux(AOWV&w1$@G)S{p6Q5|+5++&hjYHVv^7 z&g+rSZ`Q9M=GVYW#r|{LleC7^xO&|UPOv@bN;X2amv7hS%=j|S zSv88jNpT2bmTk4^uci3Kes77R+6k4YEeOx_fQ>WSTf6AD>vyh|B zkx1DDSQewLDuv=DBmBVPE?!do5O;F~j3+A3_b=&Hm8&4R`y^mbE0OavN+B(4v*r3* zepvPsVb)(asZ<%36KQvDc?5(G6x~9!*n`iGi};9Xi11X>EH9S9)_~j|%(*$zu1@w* zhVit@E`+bB;%zM)cBu-hGPyUr)DA@yH7`p*yr7tXvr7G50)5HSSj|R@@s!u!&Dua( zTzwuE&XhsU&g>?!PR`lxijP2x$0FkHkqW+$r?ULm;oE)cYqm6HWbye+JVGQfF( zmguzxHRZAH_DqCkE+sl_48FG2L|+hKsqdThH-k0kJrK_@wA-#u(8DvjPmmY)qC1x0 zzXKO1+Jo7^Q@`)@tpl&d;t(^Ly_}tzPwKtq&MZ0bfuI*xO*Vk|^u=Kr6f}lGnx}A} zz~vz*d1wOgUXZ@J8`T@L(?(|c$e}d3#oMkvS}{akeGGX23)agT6o9+0``W6Ll>qbF zV8oS8+S^QoQMn88I$=PwmjO2z0q`9!{D~8Re}J`DR)UO^>Mg z32RrIF$YsVVlJc47C4C8@Caw6Hl3QA$gtRlypn$}ojT3Njh_s2@;v?7M` z{dJk!wu5Mq1OHxYecbw#Dwi~`bR2E6YySyqmaSm&H3z?Zy)1cgR>6gq{uXEb1pUE#mI%`jiK!2OwfIXR*+wuU4@S*c ze-NDK%6{hu+O#ahjF9w7$34n*2A(|2rzRviTr`57Hc5uBLXYyx^}JFTc@3?QuAMtz zknajGrQ94IC^ET_GzdXELT7Zws|=v0Y9{+l1TGGMVHXyw@<$3e$ZLIh+PI42Tc@O~bD00!Z9V(59E) zT-{ap$TFZ<`xF^qiXkU3Itc&#eD&~`1iZbJPxTzY(`wNIyuAlN9@Ry<%0Khu7tnTm zc|wak2@S0l!Z!A9RNuq2WFhEE_QSMrg`w=V!q?_qwPO4uE>zjCwyS?>+~YH!=l*dA zwJ6GpYq>1Me24N9l`(hcE+El3xc5Z*`8o%9ZKLORhdT?gP^1=CWM z9G9q-0^F#|17>7J6osiU6OMI)-^)*$-T~qbD^^!5C_Yy7X`i5%e$YKD`e$ZIycI-; zLxNw{KvXW>Fqb4DXJ%$-@LeK#X^%-V#$r)U?^xJx-vG$AR5>qC1?bJEp68K5FJ`#_ zaUo`)U8LU|(^0|c%Dx-jKNnVVMi@Ry?_doQyJ^LE+W_trrE+@-b7~p{HoQ-Kdf8Cz zHwczwindC#bg~%lf;_c-M^U%V5BiPi47Qwm^|Qnml<=38P-;(ulu4eC^Nu8o4~fHh zP%+z4_owCrHoI>tk9Xa$zx~PjvJj#heK662XCXBq zQ8@3o3ohULVy_fOrv*=X8_$0RpI`0%Ry3nDd4%Q@P;Dg*bg{YsWGUJUd+dP3y>5lc zr-sut!_E{o#>GvN*&F%`5XfbW5c~r<;f;SpJbpK!fAoF859Z6iJnUDjLs=5CmAK@5 zf-BN_qLDSN^Wm(QY4|`#u{*C7!vrIUO<)-0J#?y(Oq-Ds=|nG1-Nv#Kly=}xEYE7rlXAsp){LSvaT)}-RJXLvdfO2j7HOwA833A!`3H)X)5=fM0T$ZIB-2@`XV7rq|cBfrD5sew(|IWAnlH+{MC zeaLTdbIwdDc*vonK99I`6T`>MJKGKDSA@b;=-tKP+0aJU1{I>#E;C^wCXTLFFUV$v zdocJ3QZ=CHYZQ#MNAkJZ4Pha~T2Rj8^AH>xgB;Kg%hEq3_mEth6b}t;jQ+=ui~Kmn zu*YJ1qWrW-N!&X_RrtL5az-p_Givro0ef{>Ei@oecPsoA;!Q*JP?Z7Y``yO@0Bg!G zbf@nh6+`wy@3&N*Z%s$M|6b(x0?}y+rh+5reNAf>3x21&2x?3n94CUc5ra#$ANIch zH4PFOEd_X6$PHJCT7p;QdY1q^+>16B!dBb?i8C_)--(cR{>xa`AA>qg;SLSKhfI$` z2c-iK(np(#$A(&`VIp5Eh_HFuTOfmwDjkbmDCj8ii9%IC_!eKwH7t1YTFj2sr)8dM zQaD6=>c$^qC&x@Ok`SOrRbMgwoLH8>XA{qv393DMFO|76Cl= zKcscaY$-3}`q2Qx24PVE4>gFk`HG|+XLlHQt7uA==_!}CXdR2LbbMvxr7$wE(E*T1 zPEe0`Qpd&E9!n%5^eHK3`w^gPe=Zg4*Fa!_fSxXhuil#iC=($9H1Gs;p)Nhm*(hK` z(Ov-gECuK*vcx_0p{=W^UmV`66Gz(O?9_P(eo9Al92Uq*SZApJMBVR}YmlSVuq-`RTnv13J!a>4eU4kuB?8iHrxMZubwKS;zMbZoAfQ{MMcQ^Lb8NRN}2UEm9NP z*nTAB{$E#i@?+3id$1apv1YB`Q^?aRTv~2hFF*)ha zWM+a2wv;pS!>R}J(8}(rdIOCiQ)kj2VUc(f{+7f}g*};^q^LX*c>9tz5c)DNTpzw3 z8tX5^{3QENv_%c-s(w6qk2pJ8R6e)a#ss}KBl|DB5p^`MLYXF?R6jb{XHxHFoU8s) z0mSF`77gyfvrF3#U)-i#tj)I)YzcVf-aSQAs|ZRp1|oG6sFbLd9=r(A>*#M?h~b$r zW!V#Yu^(JM&yCry6h}faYF<%!2+Bo|GML_Qt?&agG1lKqm!1zy6%5cA&RGyHOpY5t zWd3ko+!sd$eH$l+AB`VO&AEnuZ5e+f5%B3zK~d0O`)Tv-=ZWbKKc0LF=Vk-b;lyA@ zOjt3vWmbooX?sII4q5-xOhG1-5Wo6l;@%Nnn4V_!iDnE)-GXo#3bTh z%-#=9@a~n-){nqMR)UZIqJn65{)+y7+>!_8B+&#V?T(6rY zr^D#bzO+)ce76Ywn^o!AWL5Zgp{Fss(bd5*#`LtS?G36JEAii-0EzvhUz3cIM6Qjs z745mqH!e`MPUL&VFmt@qOyB4hTxE}`{&h6sGY>FISn-a4Uewhn1eb!tuLX)bQ7*#zJVC-V$VK%%gO5}hfU*V&MVgOB2j0_hUgU1fO25uY z949(GZ3u4#UFu)!!yv53{*vy8KQ#I(@$}BQGE>#3@wFIQO6*R)V%WkhQJmgjDeM>t zgu)H^T9(Rrf3(X|!6@m5Y5IN1xwfS>*=Lf*n+eOJ$7@dsBFpIM1OQu}DYuz54M!20 zS@5kfaCa^6QLMin2Lj;R8|(iJkXU!8Jtb@xn9azu!6g&&+4Ye4$s>kU@oU;|#)Oc( zB8TPrBx$F=+Y9%o9YM z=7md%suF)HwVofFw}?yAJ=$}=bhkB?DrFEgL)#S#T_MSl`j zXg^PP42mJ8+a7;Cu=!ik913=$CapA;N9rS*&*u1fg1%5H1UWG2Cvpqi9O3RXJfX3A z7?sN`1PjnC?yh45Xt(?Xtv~&ad1bhh1G+=QerHl~ro91_;5Br@lHmk|GGfnDUqyC$ zyWGD56&h{Rul&zUvaIVmnmySeGloPUnSt@IwJ>zOS2&8>*#yO{C=!r zzuWF`c3skC8nAip`)1rc%(*gDxn=$0$<{FsY6CNIJv=>05CV`B0|Xz{Y3q>a+{Xbt zNqsc3nfVWaS}}C3_A$vX7T)$tS^&F9LX65)h$9<}^qSqUgBZ z9oDvsVaYJd?uOZ6!Cf4?PgbP1{g(lO(et$2?oFj)h-zT`SD!sf_!HH>_ z>GVxj|2;v2%tf41k@BYotx3bTul#{zN`u4;1K}C*p~KlhPiF(+79D&Xo~z}+wHNf% zhgGQ{(EX84i=@FmA%S~Zu5~Tke*KU5WD;N*-Up~FoX+E?Z)P9o-u9-W{79VUn zYLFCzO#ic$@ofcP<)FvQZsyf}_NVDP033!*<^6VB2k zpcwpE{kKw>DrlgNmKm!?%#r_QtBg$Si&*&GN5ed-raq-{3$^f;5hsI9ARuUwOOPg| zo$~dqzMR3p!^j3pEDiG2-PtH7msjsXhstK9^EdM!ym9}OxUKs8e1FNSDX@|8 zuQVz;Fec4y`VomFykiqNX=6PzjT#ZLg@{cse)IUeA_V?wVGCehdHf&Fsp?~*oWDhy zC3GLavqKvWlLA6{aV?b?eUv^;EeA2@)=f_ETBO~~xmGITiGlS4U#Y>O;z+(^PE*wn zUZ{^iyzImQ0BTC1`Y}A(m<)LuS$_uPP0-(Bkfe8Nu$!WM*v&V#AO8i|RqM{)IJEk3 zcjKi><;lXJ`K5if_8R%@+GD^@&xBS-tsdb0hG0t;4Np#qbOdp_9b(`^VkKlC8cBIP z;@o1i>T#UAsoN_;Gx>`)766Yz?+VaD?)I=&5WHCkD~$wIo$k-2sg+y3?`*z zW@6J(q!zHF5m3ZRFa{dVahLCY3H9b_>G2cKL&V$#fw)DAe3nv$k$7Hi5y>3n8P&g~ zxo9{|K3o^)(RE=3FTpSweL%cCGiLv;*l%P!A7^J!7e!{pLg`${?^$JBDtY#wO0c(M zB6M05TYt`(iXASEV7^v~6WX%d@-(=3>MiA*If6Ey=(gebW z*9MFrIHLUPx!M}S>KgnoF$Knqme*ClyJ!4d7f_xtfzM7!E3M~0hICIi4$mL%oPW_Z zoqq9e!KGD=A)oWvzaMonzzKaJ-li@Tt$V~yMmlpajs}+yevp^oeuhshW!K3 z5Z}LLyO4w0oSK@3b6ci9ztE<>ntwI$Jt1@54UB?T^=%yGNHEjP_xaG~iEYOHqKdLKpXie>_29p+95XNLp>Fy2Gg zTnQuBN(Ul76=cPyJz=qZhk^#@x}xd={)$X6NV)l%@`TZX0{aa~1EM%F)FJ?H0%DI9 zM|TRl6BH=h=KkTVAYxHERq|4y6q`Fgp3=>tV-OBpxNi{mHy7y$bwJt4sp{>psfsPo zj<{)c^a57@O%*0_VQpth@N()kd(3ks?1&q{m$SB*cC9H_(ncU&YmW)?Q6B!f;mBSe z|K=Y_>AllAr(Nd!aC+;R7;E>qD|ZN2GHd&mQ=}RWn=7yVt??t!u;%X~Lv^|zJ?wYB z)X3(U)y1n$)RA3&49WT_OR2rf7Ah`rP)wZ>@V$~S?&cH2;pBK*tIG9I^ZiM){ax=i z2B>nXgX=5$YxADCg;ZUf$fIK%jg~WM7KgH$LEB7gozAq}tC-WG$FI+`Bkz}AaFfC8 z)6_1}CSLB8>b}Jy^2l3dahwhNL@{HV;e?kRUqZvhVn5gSZ@HlreR9ywTPv@~^gcri z=R=zYr3768FPVhIu$JF9*DcJZ4dAdHY5f+Bn4m)`3+%h0JP{H!czG1kx1*4bFj?3X z65%3!?>R7WF*QU8tH`8~XZXeQVP8+NF7Dt0XjsVx);pIP{}y;9VX@ta`g>$dMKQ6XDDGR)FUJ!!nX;xnzyQW{x(y=8|Mec`Q`*hNHNyp=|J|b@fWxyzx4p$ ztCA&~PrQ=*AzE2)=?L1i_>XIIg-5S=Z%WnOp&g066`i2W>@hcw$rvbl89(Qk?Hrm9 zEa(udPDwdP`D<-lDa+i#ZH$2?l&hj9Q3-R7bKJN4fbfuWAGkXjYCwOWoCJu8UAX$? zt=v8IGse-%0H!_Io?uuXZjaIO=(jn^R0L_`|E z!yAQIl|HGyu$zDtn18E_TNISzxBm~(_oWuMP3{$C$qWo`_6|$_xj{z|jV>=AukJH% zuk33Zcda)em+FxdigQ(QbRef1nB{?Iq>9Ilg54*|lU8*%dH&uh>g&M7c*%ZGqR}#P zL2c7P?HSp<)cxW2E;Ha47OjW3Kd!~(t$NIP!i|7y%G#|IiwB2upW_U)HO*kp5tWCc z$T@B$%EeE<`3QmF82(${tJD6K$2xlnj3W#OhB=6@bD`RP2=5OmkF2E)Qc;|IA>krj z(MEL8Y{U**TuyD;&ZUgKZ(Y=Hc^C>XG!nh3!vOUfP>P7%07=}1^IW>m3KK~p_83-A zZ{^h)(5AW9KY^qm_RveE8*I?-Kd;1v-Sm(OLhc*cHxUDuVsIAuP*L0~@4BIqN5<}z z%Ro%`ZKq#Yi*2Q+4Jg-7LhA;-r}Jj1Rb@NN$v0AOMr^te&wO@Re6z6AzVz>Ayp6E zQNN5c8|#7L9x)023HrVeaj1iH9>-&cqk7J^Oq-}6bemcmZ^cHhWS;ANKIVU=&tcP?Djywfd-tuaE;8bZU#>|8UYYsW|X}VZc zTSM+!t1Gz82Rfz2Qy&B&Ykh+6S1tPM*M35*&c6TMI~s1YP`KgI#@cd*-tYS3K}+T3 zVNs?YrYm3OBq=vHvxYnV1u1?f=;R>Ukp>0^Anh~VyI|{6dV(jS<*3l$x)3ya2Y`pK zXkPGizkV%f`1fDe$J3gB?Grt@C$2#aN;IQ}93>T~*A-T12K#qPQ!p0W$ZJ6RKB8nAVfTh3TW-}2>&riGFGHl!^=fK=n-px7NDq+gBD zYpn-E;;ro7@X4sv&?_ui^-(}s9vzoG2B+xDtG zNJuz{J>mBQ+=8AdA}H|HRdMyC7fxLAMBYH;SvEf?g6e|(hXUB^*V*7>So`u$8$>kjGZeITP(w(cxKs7bY|-NS z7l0v(-ZH}VM?fug4I`by5LSG;=HvMi=43%w>Cw?;}w z(rqRkf(b&{I~XX*r_QG>xT01xK9Mx{Nf5}?!1?6KOo~t8^%XvHAgap<)~@|(+C zu|}xK#_q^Le6>x zaeT?!r1GoWpXFkIg@dFhpCSaJeMEOT;5=%55TaM&$2qO{qGQRIDx%~q-qHq|;$}P7 zqia>*nik!;?Hn>=-_3NzUzBKea3(E3y}i>J#~rmi^6_ITnDWV8t;h=`s}AO_-lzHl z30}*0Ou3P}iMf))WDWl^$;QV*FMPM2afdsZi$Ka=`I|14+4g+jDf$(5pW0 z5+`2whb+&?+sw7>lG-^SNSgerxJk&c{Mt1rt9- z>UfrmAak*g-d=rX3cyoTitV&6{uqHb8Vcj2a%aQh(5@Bsb?N8hD;>cNN22#YF9g0p z#sdH&=Ob)LnxlKelGD>Ty^-h*uzQMG#)0Lq&-!rPs~eXdu<$G$mfr&kF|-lB!Qwc=j^ zr+f*v>l$2^aR+XunU=8v^Z1e&wBvB_=oN04T&w-qfSTcJ#( zu02wY*_6QY?n~GrYEhnYvw|~e?$$PgX7uA&I@zb!MFs8JcEe+86fqgbnTKoo#^wP0 zE5voa)e<$rtO>2f-zAo0Gx~S-U32r! zP53<-0JClFOHacA_#C$+<)CZ{Gq*rXb^8@4gXAQ?90b#Dl>c?&O?E4E8%95sb(p3L zA||g1FvOV9uSEtk;-9BphHrlwiSB#cDJ$otHYaxvquRJ%Uf86uaJiik;QN+_?RUbQ zTc+P+4A8Ku(>1HK1|1Xmb#9uU8rY(vO*4-5ztf?}$1Ib5i!P2WBuT{P!GQcDIeL5q zHI1;!AERb=5kg0#KRopM#T z-C{i3)GHq?Jffq^9)$%rqi zWGf#wpMNG?lPy!jX*Rs&EBnY0r<9JLnF;6HT2XuXTh@fmA}ma_op-uE(%pMnI}>wT zotAnZ$AXW8QXU3G`E$}*Zf*?2%Ld=P7+)D4ect_;(lr>SK9wgZ#NTzeEZ}oJTJU|B zxTe`=pOd@>ZQ6&(u@ES@7RHmr${$8Ypto?1|C~OrsWg8pPL?o?T}c(isRUU)qfP6F z6fLHNm*=>lw*nt&+Bvb_g*#mCc^;wHkzQZ(e4(NKf$DBFovU2tjLAC2Prq&r-IjJ8Z%p|`yrG#?q@@uV>rUpv{UrNl@TNN@4esS5m9z zw3Mh8bT{j!#_Jo-pPOG9XabmXq>C<+`NN_SBBBgR@5}kTRAmNUxN$%H^-b}P-)^+a zgCB^n3)W^01}wzDOQw%6DzhLPJ><#?J=6SpnKGkUai{Fa!fy}vgEnW3a5d{1ddSnH z5+8{d^lN$*dHTWuwCsnUN3}MdUe6KD?R$FLoPL9+Dgaz>ZKQjA>nb=ECK3WLG=w=s z0ehY(v#89%Rx$2afd40x?g?1gSAE=Oo3d^ zkZgqS{uVW6+FHRXTKo-iU9FPkPn;cRn|dF_8i!s~s#!oUGT@(!*Y7EVoX8A#6m#9% z8jI*oU#*BzpKg{Ij`!O&em`Ynygw3@w=zGC0%VA>qpqxNJl@oYH&oJ9SYFT72hxwk zV)BRe=m~ak!V{mnmfE;?54u&Q)fsV`#4dZOJsa!j`XNhvWPt#Dy(FLIZO6C=9U;kc zA170>$PQ)vj~N4Y2ouh><+PT6i>;xhpslmW*?R)jcfA?IlnQH`#xt(&Ja(o!itdI} zAWY1WF(XeP$X?BKFTv+%&L4j1QHoWfbbp5jUIME;OOI5+`p{aw{5cU#V zVXO(4hd2fwSwjtV;8O;lewQw=?gh5*H~wrw(5#u@#x zviSM&#}btsM%SGFxOq&sU#}|!eGI?1>oeZ+MW)$K8l}(F0))Oh)1L+;hFep$M$Vk0 zfG(JS?YffSx2jNoj^&4qaaLdE9jP^@?t4}%>RUZflgg>1vh*VpZCCS6hGaymD}mnv z4aMMLn6bBvIwQO^OW>^1ttgwH|DKE zp8OWT#;_sq&~1s-0#j0V2Zj!GM4MQBXHJ14Ai}zoBn(T#0%>{qta$$MpSZKLWq(l1 z85yZ(YHN7qM1!pS0kr%Rjfh~tOMm#5VrTtL67x-Li+QmpEKHUgilV#Xkx$RFoBb@3 z0UCv1x%?Wjdx0dQttI1M#bH)5;p&wG-3jR|@-qiryNmRfR#3qr+ZuQ-y>RaK&yI5U zhgIG5%SZSaIWbs4#xf!KEU&tct+_E?kxuRjUIc#bEw<)JD7o}60JKGBfXYdckS{;i z$GE-?%KqD26x}=<^6|n!9FCqq*QjFqV5&_BIW)ciF#ICwKF;e;J968SG|&+C)@vpq zz#o{Z@Xe^&m+B(aN^MZph2|bSpWN&c-G%P{JPxBpvpvmRUEf(2B>)eM8VjKcmVZ zMHhC^sfR#$D+9tS~Zh^IbVyxZ;qcPuO z5k`0u0S*{Ch9#&FXm}I*d{B-7$pW88FJ7R$L(!pK-Ug54YeV z)1u=a&n7?u_g0!qt3dJRU&~utS?F+fpyuG4;9adXp&0)%L28bLtY9bE=iV~60r^df z0{G~9z^OM4vLSS~DcSq7Jm$&xlz;uf8y!=?`=}D?6sK^&2*3JmKu>!Su(1DUbaZ4U zLUdD{)UK|#0v1E6)@XPMve0VM*GOjhnim$RamgFqj`RmLt{ywm#qKwYi&VX%np{kS~jh@+K`=f zsB-60{ft|f(>%cP-Xn|g_?xmu47#=L>dO|f`vZA$akPfcqmInr9zW1e$a6$~Ssp?v zj8lf_*gI3i07zs4zmD-gT%Skdy~KPCKFVo(E;LoIZR)g{oeK!Tvmqb_YF;X$O{?j} z77%Klx;)HBN?39LK3uV2rZg4qJGN)1~6hD&>HOUz5Vcbe`8|KV#E??TNc2)~-{#$VB%|PcAb!uU(;bw_+6JeQz{DUv0n#E7mK}bN z?WS&!k=!+Xa+5FY3V?h-TtX&P3e&k$YD9-)0=*|aSV%pIf4Ki5W(SP|7W?lvUub~2l%^0zGMajpyJ8c>41lC%S6WViiUkX{kd43)b;{B&BsaOOynghQN0|RHeexwWsHcAVrRe>eJ)L`SmtSg?AtAf6=neV8$^Kr$yi+&r0+n)jlxdaMYkl z3%^5X5K$`@^uyp;{AFw$o-`n6aF5~~1Xh>?q0LVhdoot|%2|774mtd*dV(BA6+M)dHwwBGD1)nURmYp<0{qce zx#T$ErK~F1yqhSZBf3s=UT1uYA)qz7@%jU$l!zcMr#?VXtG}j z?f?e~W9T|nUn1&A3SLnN2I4e5GvT%)phx%LBBF`G5)QtKq_l+30OIf5T~VqJa@*nv zm(|XHBdzQ!4PTqR7y79UB7Dn9bY{pvR$2VX!(2I0TBL-L6~M5{>k1b#nyvkiTIb=2 zrfU+`!Qid)5s+iUSi~KGFRqJ*6UZ}Xak1^88&}I&oCpiJHqx$Ztolip;wR`TAT1N| z(S?zQnZf;sq8dhRnHD*EAC~x&@)9_`}rOlcrE>yu1Qil%mc&@#C_W?(-LgiZGS6YIe-KWNMiA)uQyF&?rXTE zDq!kxg$->iK%Um?(POYKb1t|FsarnG{3sC)7C3m`#K)`pS$qix4~|p zJWTyH2gf5f16YZArp{0;A3AV0Bv zfMct= znolFv$_~y0gRDydJOT6iYZu^)GfsOcRX>za1#4-4BO=iM5Twa(81@4DAIwvBP)lu7TDibyk4oS)BYOe20L))kBs?-Yi{I6FeEI&X`nzgg z$-dUn)AM?>fMMsz5^fLgXav?)6Gbi%sT71daZ8(lL0()fPC_^g3}e37(aiL#^s)uc zfP;f^1x**@-&v%}<9Vg~0!5Gv` zT}SU3VS^Qz)>2{$%buh`8r6NH+K-7diea~b(Eid{8s__5fU+eLuXI^r23JwrjoS$TXD`Ps%A6q)oeZ{x>{G3$^2D6g6voBzhzMm%{^l&O1eL7XUrv?9Kt-+y!bLQSNDr96?3uZrQ==B?U_7cid`N1dKPwegpEO>D(jx!t{sF}< z4=Za-o|(FXvYZSO|_ zLj)B+eSNVsJ*MJ^wTP5As!Rme2UO&TIL!}*FU9(ILf+4>1yipYj7I)pB|vb^AUldp z?rLy;LMSPGBmbkQfMTJkTv_3 zm|085K6{U5t0-&G09eTDWWm5uVY^d-}CwX{X5rnuIpUa`Qw~tInV1p&+ESL zSK*eC%=s(jb(WtmX#SF$$#vlm%eGqxU&*#>IK>@VAtStupMLHsN=+c>$YB#7m&-oK zfUPGVZy*kubp@6j7{B^TK}V4`aa+6+=rPM%49QOi86Zl&5kcg<$a)n4Rn8(b(sTF7 z)tduQ-FW1pJb(z;F2DPYi|+A2`+pxaB1#geDv32`Cxs6!jkhNA|M(F*3czmU%D<2Q zJnkMA+omuFx^vHVA`%j4rZ$uF`Bz)a1M5X-}CpZ1x29hDk9pOjR7edo@+1)2t zAS5hIJaO&QXM1IRf{sE`a53ui4T-X>x;v8{zSq(}Fa!DEIIK=zZHC}e4oM_y7FZ^w zsArn*2_M>{05z$0tW$_$xjz&LrT0B?*}M01LFF`8DH+vx|;#SArBNL=)btjD+UqnxG>IYHSx!~g1H_X2$ z0mMp34_WcNim=|9D-K2stf;ceg zn2*lgB9F~H2aq9^;O~=+2$b~f2sjAN1VmD~Lk<{i?XvU_xAb(G=;JMHD4&`HFx()t zid4(PO5t?of3!Dmw5Kq3-DCjMo@2r^0bX!F$aApn^O$ z(Oz(ljfm7sut64#0_PHk0x#Y`6pg2!@zz99nrR0*s zAmTw3k7U$3-)Qs#ALqv*8|+>8YyO25k^BcTp>uWP=rFea1MV0Bkjo6asTgjp?1=+i z>B1lGopc}fsP}5)$sV394HIJG%35by7xkig6v{)Zl%JYNEoy4JdLJ?#u}Xss>hAK= z^tdB9;TK`LR}nAX!8To%O>%8di*;1svr-iP2jRf~QSHtPf^SyqmFM}xqg2~E%B zg1OaO*+|f}{ZCQax)r7}3y$LcA;_yIrVx}@a*ofGa z$mGohZ(QDhh9Cejjz@WEuHTYs;zC$nam)SP{Od}}2q#XOs3`T4Wv%3)71fu(_GRa+ z3f=?%W&q*N`1L4_&0wkdyB-dfalK(G@+`9HE@DH^dih&>anc&VX1awNUTc4%fwTK> zWAgsQ`EM)2CGt)%6-M*djY<~7D(1@HN5)ZXjOIItC@nRTC{@-TO23;ZG`D}0^~^O! zhA-Hie-j!q|HAaa(8I^fk7sF-RX5;89VR$WSSUD4jLPe&?>7aZND*(ATn4E~kc-7( zyhrV+CV>yo=v4oX$$_BD9knz)J@|^>N-bat)wS8l&AXK!5*qLt9n~(U*i-s1 zLs$hiDyif+DR{XED;u}9_50%JP!ab&L_rBp|44>VU|5|qL>2K*oMT$BZP!6h_d&?n{yqQK{v*(u-2 zyNY8Y-vWkhr|MC85&LZ+R`G2+pH_VsW`g=l{<_VHiRaDr}8RzC9ECL2~0yx zBKDoCe?;iuhMhhg@T)L?NnoQB*aj}MY5gSR2rnOKJz)Ao{yhT+88(R!9rjigqGrXY zt^8F!=-Dqbv3sMOKsZhAlJ)pRi^_}s<&I5BaBbU zpKn^7*zGPy@>}^&79R}@b=Nr0c-%;vSd;{IXb}fOzm0g&eUs+Kxy%V|B1HFT3C}oT zn$7TQsT&v3)Th#(|L3l)SpmvxF+;c*70`)$A}5Pi!i8V;W*gMXwEQAp&Q_JoJY3YZ zh`j~#N?bdvU$!0~h)fT}Po%|uG$N(34|2Db`tjn1$C4fX!^iH=(;^cPuVamw(28A6 z(z}%5=+;b}r|6A6Zx|;!3bq1K!4L7s&0-E~K?g)Zw3r+bP{c(?FGCb|R3;a?Ab>N| zIMR{2V}3$aD$x1*+t8ca_PzGVK;a_4f zRzRoi6xMuvb|J>^ouBT`(1!9|u&u&CT7S#v!6hg?hKc5)rlVO=MonS#BaQO}{Bf<;XW>b;nWkpjpp$P5DrArD z#HwH^L52UXGkOFQ*Zy8?JbTs;mTG%VazL6TwK~~Jzwcu0QfrbL#1{BHSVDbyF-=bK zb{{M}L1nl4BgR=$a+I=fjJNl#K|V2v3m^bvf)XA+$)0 z>MTl>BspT6-?8{5j9T`}76Z~rQ8EQC_SFkme8d<$y4YJv=J#!VPA=AjI7|u&`A0Zp z>G^-rgKHYTCH{vG2CNN&XCMmJdM+~>4|;I)OR?Xa7|m5473yybAGVoa``I-qqS7`{ zEsV3%jW+1XwD30mYecl`SDf>19+nCYj6)3**buY;MZ{qsX}L0Gi-)bTBM7O=2@5St z!-ZGiC}TAawQ6N3SSTkjZ6uvuzK^Rkp&Chl4cN{QPJ*3(+v)U?*>h_0E*~2AlA!|! z5Zmu=c7;=1I&d89;dE8=_j!p2yeOEag4j*dXN40O>>Jt7JvMvE9WU#6b5YeI%h+22 zo60VN_s8T0H$!2(#GX=6VsM{4NxieE@@?FEMg??0gNB%TAT34?Vo^TG>D}zqgTGH6 zKdpkd`A8<>!t?J+Y}kKpykL1C(lCu#&~T0#4P+mevWFLh!#hC9RGU&RL&NJcSBu9y~-rFa6t8RiBSLbzbs`JEG}3vtoioKt2%!A>uPW0bfyB`(YEN-p z`k&VkEuLVZs`y(oX3Ky|;~#?n?PM zwsiZw->@=}iU$5K2;VRNHLGLhgLpHPY&~h7WTC~Cha!c39?vV~VWxbl+_P*g1zuOm z(8oe(6+~%m`4c2N9|qH<>`X31_~X^%iJ=dG5ZbF-)>y&1FnTEP+~#cCfp8Iy&Bo?| z0}Jec7|x3}>4lQYSr`6vD&rWB#SZNN+br8E;Ihi_cQ+zUas9r_^-pW+#S_ox1Z!wB zeE2;x0z)PHstnh1VzjF?5eSbgSuf+j5U{Vux8HmXFF50-TzZ1oP`oA%S|@ zvz$!yk2IDr3(Sio=H=}Ab%doZJdl8rf`_Jrr{6wbWvZNX)mEAq6>Bw_`I?{wZ27zq zOL@>J31eTZP+WNhh2kS@gsCygY%%4jc(?n+ND}WJ%dNlXt6F8JG=4s>3C6NrY;IgY&VF`cLgPI`J(p~BQI|9KlPy-^v8m}22HqoBvu zN9lKjh+^VKKoFWsroxpvyZmhEi=DRsU#R~AQcTWAxx)Edyq43-xpg( zlhm}*I}q_j`I7quWZhQz^(J1q?P4It000Xp^P!k7MU3KA5um1`TCrX7=&>YjQDg{5 zli1s4JUGjS^%RvlBgjqfEWPnD87}nuY))PivP+X-*JW-5tEe6NBx4&rsnK-3I$k%# z;5~yJxd2!`mCqLF5I^+yQ_m*gsC|h|=}K_l-6||>-@;x@h@5A`JLUXF$QG`{v+ku< z0AW;!k`_xh4}^FcPWP3DgR{`A8+uj;C!M#027K?Hh@1OYlMT1ci82r3xy_9ew=(HQ z8NHit4yeYtQIF(2$@hg9W}L35j``$Q$Elc0KnHF65q?HQ$FHUH_hwubLKHe)m_{yd z+H~;IjXO%?%*16>zT*Lk{?#S#{t})Z_ zYqkj03wu-+SkLtfW1pz+wd#_-eXBmCdCYw{XNtDMF_@i)sOFv|ylG1(K?;PW`%or$fc1}*krMKfY){G|cp*GYfSnKHy>MiGfE;Dx93K2I@vTk~9si~LVc zL5R#|g1jsB-i#RN1DL5CqJR&!q96yuTY~~L>4cm~%-ij^;hpkFW?8Z??W(|{Tu|ECivZX}-$Zf8y(;Kv1PdE~6`|94zCu4uE zRnwMLlA z{=gRw7B}1%XK;!qxH$baQkc`(>3?f%iV^$$|G6MSZCGJ$$5+L43jRZEpt+R)qk@qk z@i!f&!^lZ%c&ZJX^N!j6(vw2<_zQA~EOgZ0@50;FedoV!KyRLP@_SuEstNb~RHIXX zlELqcimjKvUnDEpekh*!trZo9kob2$H|HFPQRJNX>4?5O(P}$&=48wI2dDJgZg+Pc zeskN+z0OQUgL31I6ieiK;h7yX=?-If6IGP}uTONZNb11F=&sm8PZlFWRG6Qqj(@UL zPSWL4O5(h3p&;6lu$T%Oz7`S1koi&^Slg)WIcy?r)j`?SvGzdJam-8n&Pp>A6# zizfj?f_no?6Xu;(HIdkp3BQ8$B3hXv=gn0=_l|6D-{sik9~gF48C(2#{@(UsaMOgG z#e5K2d3Cj_FnuHQ-^Ruz_+wer$dX~ujABhq&3o>0;%)NW(TrR7%heJ8J(OL*;O2%? zZM*=Y&**DNa^fjY_2M`7mUZ#A!Cz}m;6o=;Ret;${3~6(-S?#6 zc#|(CA=|e7oAiaccJznqmqynfJqzP+4tM*JU6K=I`?^)}V$1TO#orN>*Nl3?^}y&% zq?9W;MAKiSPv#3IjqwL9h%QhIh#Hp z*>wjnEfmPU-PBa&OsDL!UgPzVK&h7f!(NlMpa%*1*F<>jV(3}gOrht+Tp|oVcBPxZ|Rlsr!`;IQ@}%cVYO?%w#T$ctN<-q z1ZIE;0GmQ(Ya@UBZF_ou%nb4ndFoF~rxkudoMZ0I38-x1Wmq0i4@W=N>)(vzabOK! zD&VgVPu`C-P!S%8&Sg%$ir`}T5t$dw^8Y(vY4QW!0zXaF|9A7Eg|+!B(_8WX2jn^A AsQ>@~ literal 0 HcmV?d00001 diff --git a/docs_v2/static/img/logo.svg b/docs_v2/static/img/logo.svg new file mode 100644 index 00000000..9db6d0d0 --- /dev/null +++ b/docs_v2/static/img/logo.svg @@ -0,0 +1 @@ + \ No newline at end of file From 6f557677170d69e2281a7e085a5768bf84b7d388 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Sep 2024 00:58:54 +0200 Subject: [PATCH 115/251] build(deps): bump the npm_and_yarn group across 1 directory with 7 updates (#547) Bumps the npm_and_yarn group with 4 updates in the /docs_v2 directory: [body-parser](https://github.com/expressjs/body-parser), [express](https://github.com/expressjs/express), [micromatch](https://github.com/micromatch/micromatch) and [webpack](https://github.com/webpack/webpack). Updates `body-parser` from 1.20.2 to 1.20.3 - [Release notes](https://github.com/expressjs/body-parser/releases) - [Changelog](https://github.com/expressjs/body-parser/blob/master/HISTORY.md) - [Commits](https://github.com/expressjs/body-parser/compare/1.20.2...1.20.3) Updates `express` from 4.19.2 to 4.21.0 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.0/History.md) - [Commits](https://github.com/expressjs/express/compare/4.19.2...4.21.0) Updates `express` from 4.19.2 to 4.21.0 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.0/History.md) - [Commits](https://github.com/expressjs/express/compare/4.19.2...4.21.0) Updates `path-to-regexp` from 0.1.7 to 0.1.10 - [Release notes](https://github.com/pillarjs/path-to-regexp/releases) - [Changelog](https://github.com/pillarjs/path-to-regexp/blob/master/History.md) - [Commits](https://github.com/pillarjs/path-to-regexp/compare/v0.1.7...v0.1.10) Updates `micromatch` from 4.0.7 to 4.0.8 - [Release notes](https://github.com/micromatch/micromatch/releases) - [Changelog](https://github.com/micromatch/micromatch/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/micromatch/compare/4.0.7...4.0.8) Updates `send` from 0.18.0 to 0.19.0 - [Release notes](https://github.com/pillarjs/send/releases) - [Changelog](https://github.com/pillarjs/send/blob/master/HISTORY.md) - [Commits](https://github.com/pillarjs/send/compare/0.18.0...0.19.0) Updates `serve-static` from 1.15.0 to 1.16.2 - [Release notes](https://github.com/expressjs/serve-static/releases) - [Changelog](https://github.com/expressjs/serve-static/blob/v1.16.2/HISTORY.md) - [Commits](https://github.com/expressjs/serve-static/compare/v1.15.0...v1.16.2) Updates `webpack` from 5.92.1 to 5.94.0 - [Release notes](https://github.com/webpack/webpack/releases) - [Commits](https://github.com/webpack/webpack/compare/v5.92.1...v5.94.0) --- updated-dependencies: - dependency-name: body-parser dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: express dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: express dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: path-to-regexp dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: micromatch dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: send dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: serve-static dependency-type: indirect dependency-group: npm_and_yarn - dependency-name: webpack dependency-type: indirect dependency-group: npm_and_yarn ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs_v2/package-lock.json | 139 ++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 72 deletions(-) diff --git a/docs_v2/package-lock.json b/docs_v2/package-lock.json index 21e6a165..ebb2ff9e 100644 --- a/docs_v2/package-lock.json +++ b/docs_v2/package-lock.json @@ -3405,24 +3405,6 @@ "@types/ms": "*" } }, - "node_modules/@types/eslint": { - "version": "8.56.10", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz", - "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", - "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "node_modules/@types/eslint-scope": { - "version": "3.7.7", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", - "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, "node_modules/@types/estree": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", @@ -4299,9 +4281,9 @@ } }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -4311,7 +4293,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -5820,17 +5802,17 @@ } }, "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", "engines": { "node": ">= 0.8" } }, "node_modules/enhanced-resolve": { - "version": "5.17.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", - "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", + "version": "5.17.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", + "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -6125,36 +6107,36 @@ } }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.0.tgz", + "integrity": "sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -6190,9 +6172,9 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "node_modules/express/node_modules/range-parser": { "version": "1.2.1", @@ -6377,12 +6359,12 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -8645,9 +8627,12 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/merge-stream": { "version": "2.0.0", @@ -10363,9 +10348,9 @@ ] }, "node_modules/micromatch": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", - "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" @@ -10620,9 +10605,12 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -11758,11 +11746,11 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -12763,9 +12751,9 @@ } }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -12798,6 +12786,14 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -12910,14 +12906,14 @@ } }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" @@ -14175,11 +14171,10 @@ } }, "node_modules/webpack": { - "version": "5.92.1", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.92.1.tgz", - "integrity": "sha512-JECQ7IwJb+7fgUFBlrJzbyu3GEuNBcdqr1LD7IbSzwkSmIevTm8PF+wej3Oxuz/JFBUZ6O1o43zsPkwm1C4TmA==", + "version": "5.94.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", + "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", "dependencies": { - "@types/eslint-scope": "^3.7.3", "@types/estree": "^1.0.5", "@webassemblyjs/ast": "^1.12.1", "@webassemblyjs/wasm-edit": "^1.12.1", @@ -14188,7 +14183,7 @@ "acorn-import-attributes": "^1.9.5", "browserslist": "^4.21.10", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.17.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", From 8642c64f6249dcdcd03d12319fb4fe807b0460af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 15:29:31 +0200 Subject: [PATCH 116/251] build(deps): bump bluefireteam/melos-action (#549) Bumps [bluefireteam/melos-action](https://github.com/bluefireteam/melos-action) from 7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52 to c7dcb921b23cc520cace360b95d02b37bf09cdaa. - [Release notes](https://github.com/bluefireteam/melos-action/releases) - [Commits](https://github.com/bluefireteam/melos-action/compare/7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52...c7dcb921b23cc520cace360b95d02b37bf09cdaa) --- updated-dependencies: - dependency-name: bluefireteam/melos-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 06a8deb6..c4e0c410 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -41,7 +41,7 @@ jobs: run: flutter pub cache clean - name: Install Melos - uses: bluefireteam/melos-action@7e70fbe34bbd91a75eb505eeb4174b0ac9a1df52 + uses: bluefireteam/melos-action@c7dcb921b23cc520cace360b95d02b37bf09cdaa with: run-bootstrap: false From 887a2d1bf15c21aeaaba5b8e2952ab559736416a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 20:30:37 +0200 Subject: [PATCH 117/251] docs: Update Google's models in documentation (#551) --- .../integrations/firebase_vertex_ai.md | 12 ++++++------ .../chat_models/integrations/googleai.md | 18 +++++++++--------- .../text_embedding/integrations/google_ai.md | 2 -- .../vertex_ai/chat_firebase_vertex_ai.dart | 12 ++++++------ .../google_ai/chat_google_generative_ai.dart | 19 ++++++++++--------- .../google_ai/google_ai_embeddings.dart | 2 -- 6 files changed, 31 insertions(+), 34 deletions(-) diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index cd33daa2..167ffe13 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -72,18 +72,18 @@ The following models are available: * Max output tokens: 8192 - `gemini-1.5-pro`: * text / image / audio -> text model - * Max input token: 1048576 + * Max input token: 2097152 * Max output tokens: 8192 - `gemini-1.0-pro-vision`: * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 + * Max input token: 16384 + * Max output tokens: 2048 - `gemini-1.0-pro` * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 + * Max input token: 32760 + * Max output tokens: 8192 -Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) for the updated list. ## Multimodal support diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 033c7672..12ff5f2c 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -11,20 +11,20 @@ The following models are available: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-pro`: text / image -> text model +- `gemini-1.5-pro`: * text / image / audio -> text model - * Max input token: 1048576 + * Max input token: 2097152 * Max output tokens: 8192 -- `gemini-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 - `gemini-1.0-pro` (or `gemini-pro`): * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 + * Max input token: 32760 + * Max output tokens: 8192 +- `aqa`: + * text -> text model + * Max input token: 7168 + * Max output tokens: 1024 -Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) for the updated list. ## Usage diff --git a/docs/modules/retrieval/text_embedding/integrations/google_ai.md b/docs/modules/retrieval/text_embedding/integrations/google_ai.md index 6d84e8a1..657d7f6d 100644 --- a/docs/modules/retrieval/text_embedding/integrations/google_ai.md +++ b/docs/modules/retrieval/text_embedding/integrations/google_ai.md @@ -6,8 +6,6 @@ The embedding service in the [Gemini API](https://ai.google.dev/docs/embeddings_ - `text-embedding-004` * Dimensions: 768 (with support for reduced dimensionality) -- `embedding-001` - * Dimensions: 768 The previous list of models may not be exhaustive or up-to-date. Check out the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of available models. diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index 20b2b520..83ac8d8c 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -42,19 +42,19 @@ import 'types.dart'; /// * Max output tokens: 8192 /// - `gemini-1.5-pro`: /// * text / image / audio -> text model -/// * Max input token: 1048576 +/// * Max input token: 2097152 /// * Max output tokens: 8192 /// - `gemini-1.0-pro-vision`: /// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 +/// * Max input token: 16384 +/// * Max output tokens: 2048 /// - `gemini-1.0-pro` /// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 +/// * Max input token: 32760 +/// * Max output tokens: 8192 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) +/// Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) /// for the updated list. /// /// ### Call options diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 0fde4b9f..5b41f34d 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -35,21 +35,22 @@ import 'types.dart'; /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-pro`: text / image -> text model +/// - `gemini-1.5-pro`: /// * text / image / audio -> text model -/// * Max input token: 1048576 +/// * Max input token: 2097152 /// * Max output tokens: 8192 -/// - `gemini-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 /// - `gemini-1.0-pro` (or `gemini-pro`): /// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 +/// * Max input token: 32760 +/// * Max output tokens: 8192 +/// - `aqa`: +/// * text -> text model +/// * Max input token: 7168 +/// * Max output tokens: 1024 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://ai.google.dev/models) for the updated list. +/// Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) +/// for the updated list. /// /// #### Tuned models /// diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index 93ec105a..263a2c44 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -24,8 +24,6 @@ import '../../utils/https_client/http_client.dart'; /// /// - `text-embedding-004` /// * Dimensions: 768 (with support for reduced dimensionality) -/// - `embedding-001` -/// * Dimensions: 768 /// /// The previous list of models may not be exhaustive or up-to-date. Check out /// the [Google AI documentation](https://ai.google.dev/models/gemini) From 71143ff2df4181aeed5fccaffa1f55d57cf374b4 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 22:06:55 +0200 Subject: [PATCH 118/251] feat: Add support for deleteWhere in ObjectBoxVectorStore (#552) --- .../vector_stores/integrations/objectbox.md | 87 +++++++++++++- .../objectbox/base_objectbox.dart | 108 +++++++++++++++++- .../vector_stores/objectbox/objectbox.dart | 2 +- .../objectbox/objectbox_test.dart | 31 +++++ .../lib/src/vector_stores/base.dart | 2 - 5 files changed, 221 insertions(+), 9 deletions(-) diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index 0ac3dd9b..ee035833 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -119,6 +119,14 @@ To delete documents, you can use the `delete` method passing the ids of the docu await vectorStore.delete(ids: ['9999']); ``` +You can also use `deleteWhere` to delete documents based on a condition. + +```dart +await vectorStore.deleteWhere( + ObjectBoxDocumentProps.metadata.contains('cat'), +); +``` + ## Example: Building a Fully Local RAG App with ObjectBox and Ollama This example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions. @@ -250,7 +258,7 @@ Check out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_d ### BaseObjectBoxVectorStore -If you need more control over the entity (e.g. if you need to persist custom fields), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. +If you need more control over the entity (e.g. if you are using ObjectBox to store other entities, or if you need to customize the Document entity class.), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. `BaseObjectBoxVectorStore` requires the following parameters: - `embeddings`: The embeddings model to use. @@ -260,4 +268,79 @@ If you need more control over the entity (e.g. if you need to persist custom fie - `getIdProperty`: A function that returns the ID property of the entity. - `getEmbeddingProperty`: A function that returns the embedding property of the entity. -You can check how `ObjectBoxVectorStore` is implemented to see how to use `BaseObjectBoxVectorStore`. +Here is an example of how to use this class: + +First, you can define our own Document entity class instead of using the one provided by the [ObjectBoxVectorStore]. In this way, you can customize the entity to your needs. You will need to define the mapping logic between the entity and the LangChain [Document] model. + +```dart +@Entity() +class MyDocumentEntity { + MyDocumentEntity({ + required this.id, + required this.content, + required this.metadata, + required this.embedding, + }); + @Id() + int internalId = 0; + @Unique(onConflict: ConflictStrategy.replace) + String id; + String content; + String metadata; + @HnswIndex( + dimensions: 768, + distanceType: VectorDistanceType.cosine, + ) + @Property(type: PropertyType.floatVector) + List embedding; + factory MyDocumentEntity.fromModel( + Document doc, List embedding, + ) => MyDocumentEntity( + id: doc.id ?? '', + content: doc.pageContent, + metadata: jsonEncode(doc.metadata), + embedding: embedding, + ); + Document toModel() => Document( + id: id, + pageContent: content, + metadata: jsonDecode(metadata), + ); +} +``` + +After defining the entity class, you will need to run the ObjectBox generator: + +```sh +dart run build_runner build --delete-conflicting-outputs +``` + +Then, you just need to create your custom vector store class that extends [BaseObjectBoxVectorStore] and wire everything up: + +```dart +class MyCustomVectorStore extends BaseObjectBoxVectorStore { + MyCustomVectorStore({ + required super.embeddings, + required Store store, + }) : super( + box: store.box(), + createEntity: ( + String id, + String content, + String metadata, + List embedding, + ) => + MyDocumentEntity( + id: id, + content: content, + metadata: metadata, + embedding: embedding, + ), + createDocument: (MyDocumentEntity docDto) => docDto.toModel(), + getIdProperty: () => MyDocumentEntity_.id, + getEmbeddingProperty: () => MyDocumentEntity_.embedding, + ); +} +``` + +Now you can use the [MyCustomVectorStore] class to store and search documents. diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart index 7e065c4a..84658107 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart @@ -14,8 +14,101 @@ import 'package:uuid/uuid.dart'; /// {@template base_object_box_vector_store} /// Base class for ObjectBox vector store. /// -/// Use this class if you need more control over the ObjectBox store. -/// Otherwise, use [ObjectBoxVectorStore] which is a pre-configured version. +/// The [ObjectBoxVectorStore] class is a pre-configured version of this class, +/// but it can only be used if you don't use ObjectBox for anything else. +/// +/// If you need more control over the ObjectBox store, use this class instead. +/// For example, if you are using ObjectBox to store other entities, or if you +/// need to customize the Document entity class. +/// +/// Here is an example of how to use this class: +/// +/// First, you can define our own Document entity class instead of using the +/// one provided by the [ObjectBoxVectorStore]. In this way, you can customize +/// the entity to your needs. You will need to define the mapping logic between +/// the entity and the LangChain [Document] model. +/// +/// ```dart +/// @Entity() +/// class MyDocumentEntity { +/// MyDocumentEntity({ +/// required this.id, +/// required this.content, +/// required this.metadata, +/// required this.embedding, +/// }); +/// +/// @Id() +/// int internalId = 0; +/// +/// @Unique(onConflict: ConflictStrategy.replace) +/// String id; +/// +/// String content; +/// +/// String metadata; +/// +/// @HnswIndex( +/// dimensions: 768, +/// distanceType: VectorDistanceType.cosine, +/// ) +/// @Property(type: PropertyType.floatVector) +/// List embedding; +/// +/// factory MyDocumentEntity.fromModel( +/// Document doc, List embedding, +/// ) => MyDocumentEntity( +/// id: doc.id ?? '', +/// content: doc.pageContent, +/// metadata: jsonEncode(doc.metadata), +/// embedding: embedding, +/// ); +/// +/// Document toModel() => Document( +/// id: id, +/// pageContent: content, +/// metadata: jsonDecode(metadata), +/// ); +/// } +/// ``` +/// +/// After defining the entity class, you will need to run the ObjectBox +/// generator: +/// +/// ```sh +/// dart run build_runner build --delete-conflicting-outputs +/// ``` +/// +/// Then, you just need to create your custom vector store class that +/// extends [BaseObjectBoxVectorStore] and wire everything up: +/// +/// ```dart +/// class MyCustomVectorStore extends BaseObjectBoxVectorStore { +/// MyCustomVectorStore({ +/// required super.embeddings, +/// required Store store, +/// }) : super( +/// box: store.box(), +/// createEntity: ( +/// String id, +/// String content, +/// String metadata, +/// List embedding, +/// ) => +/// MyDocumentEntity( +/// id: id, +/// content: content, +/// metadata: metadata, +/// embedding: embedding, +/// ), +/// createDocument: (MyDocumentEntity docDto) => docDto.toModel(), +/// getIdProperty: () => MyDocumentEntity_.id, +/// getEmbeddingProperty: () => MyDocumentEntity_.embedding, +/// ); +/// } +/// ``` +/// +/// Now you can use the [MyCustomVectorStore] class to store and search documents. /// {@endtemplate} class BaseObjectBoxVectorStore extends VectorStore { /// {@macro base_object_box_vector_store} @@ -87,8 +180,15 @@ class BaseObjectBoxVectorStore extends VectorStore { } @override - Future delete({required final List ids}) async { - _box.query(_getIdProperty().oneOf(ids)).build().remove(); + Future delete({required final List ids}) { + return _box.query(_getIdProperty().oneOf(ids)).build().removeAsync(); + } + + /// Delete by condition. + /// + /// - [condition] is the condition to delete by. + Future deleteWhere(final Condition condition) { + return _box.query(condition).build().removeAsync(); } @override diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart index 94457e54..5889b920 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -26,7 +26,7 @@ import 'types.dart'; /// /// This vector stores creates a [Store] with an [ObjectBoxDocument] entity /// that persists LangChain [Document]s along with their embeddings. If you -/// need more control over the entity, you can use the +/// need more control over the entity or the storeo, you can use the /// [BaseObjectBoxVectorStore] class instead. /// /// See documentation for more details: diff --git a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart index 740a06d7..fdce5a1b 100644 --- a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart +++ b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart @@ -135,6 +135,37 @@ void main() async { ); expect(res2.length, 0); }); + + test('Test delete where', () async { + await vectorStore.addDocuments( + documents: [ + const Document( + id: '9999', + pageContent: 'This document will be deleted', + metadata: {'cat': 'xxx'}, + ), + ], + ); + final res1 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res1.length, 1); + expect(res1.first.id, '9999'); + + await vectorStore.deleteWhere( + ObjectBoxDocumentProps.metadata.contains('xxx'), + ); + final res2 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res2.length, 0); + }); }); group('ObjectBoxSimilaritySearch', () { diff --git a/packages/langchain_core/lib/src/vector_stores/base.dart b/packages/langchain_core/lib/src/vector_stores/base.dart index 9ef54df3..3a5ecb51 100644 --- a/packages/langchain_core/lib/src/vector_stores/base.dart +++ b/packages/langchain_core/lib/src/vector_stores/base.dart @@ -45,8 +45,6 @@ abstract class VectorStore { /// Delete by vector ID. /// /// - [ids] is a list of ids to delete. - /// - /// Returns true if the delete was successful. Future delete({required final List ids}); /// Returns docs most similar to query using specified search type. From 6a378c814a2720de1dd5da0653d43627f4a4fc42 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 22:46:03 +0200 Subject: [PATCH 119/251] refactor: Add stubs for ObjectBox on web platform (#553) --- .../objectbox/base_objectbox_stub.dart | 40 ++++++++++++++ .../lib/src/vector_stores/objectbox/ob.dart | 7 +++ .../src/vector_stores/objectbox/ob_io.dart | 3 ++ .../src/vector_stores/objectbox/ob_stub.dart | 3 ++ .../objectbox/objectbox_stub.dart | 53 +++++++++++++++++++ .../vector_stores/objectbox/types_stub.dart | 11 ++++ .../lib/src/vector_stores/vector_stores.dart | 5 +- 7 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart create mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart new file mode 100644 index 00000000..308e7da0 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart @@ -0,0 +1,40 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'package:langchain_core/documents.dart'; +import 'package:langchain_core/vector_stores.dart'; + +// This is a stub class +class BaseObjectBoxVectorStore extends VectorStore { + BaseObjectBoxVectorStore({ + required super.embeddings, + required final Object? box, + required final Object? createEntity, + required final Object? createDocument, + required final Object? getIdProperty, + required final Object? getEmbeddingProperty, + }); + + @override + Future> addVectors({ + required List> vectors, + required List documents, + }) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + @override + Future delete({required List ids}) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + Future deleteWhere(final Object condition) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + @override + Future> similaritySearchByVectorWithScores({ + required List embedding, + VectorStoreSimilaritySearch config = const VectorStoreSimilaritySearch(), + }) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart new file mode 100644 index 00000000..63b1f86d --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart @@ -0,0 +1,7 @@ +export 'ob_io.dart' if (dart.library.js_interop) 'ob_stub.dart' + show + BaseObjectBoxVectorStore, + ObjectBoxDocument, + ObjectBoxDocumentProps, + ObjectBoxSimilaritySearch, + ObjectBoxVectorStore; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart new file mode 100644 index 00000000..db6546e3 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart @@ -0,0 +1,3 @@ +export 'base_objectbox.dart'; +export 'objectbox.dart'; +export 'types.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart new file mode 100644 index 00000000..87329806 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart @@ -0,0 +1,3 @@ +export 'base_objectbox_stub.dart'; +export 'objectbox_stub.dart'; +export 'types_stub.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart new file mode 100644 index 00000000..7763f9cf --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart @@ -0,0 +1,53 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'base_objectbox_stub.dart'; + +// This is a stub class +class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { + ObjectBoxVectorStore({ + required super.embeddings, + required int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) : super( + box: null, + createEntity: null, + createDocument: null, + getIdProperty: null, + getEmbeddingProperty: null, + ); + + void close() { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } +} + +// This is a stub class +class ObjectBoxDocument { + ObjectBoxDocument( + this.internalId, + this.id, + this.content, + this.metadata, + this.embedding, + ); + + int internalId = 0; + String id; + String content; + String metadata; + List embedding; +} + +// This is a stub class +class ObjectBoxDocumentProps { + static const internalId = null; + static const id = null; + static const content = null; + static const metadata = null; + static const embedding = null; +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart new file mode 100644 index 00000000..4b1aa144 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart @@ -0,0 +1,11 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'package:langchain_core/vector_stores.dart'; + +// This is a stub class +class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { + ObjectBoxSimilaritySearch({ + super.k = 0, + super.scoreThreshold, + Object? filterCondition, + }) : super(filter: null); +} diff --git a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart index 753d8168..d9da952b 100644 --- a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart +++ b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart @@ -1,4 +1 @@ -export 'objectbox/base_objectbox.dart' show BaseObjectBoxVectorStore; -export 'objectbox/objectbox.dart' - show ObjectBoxDocument, ObjectBoxDocumentProps, ObjectBoxVectorStore; -export 'objectbox/types.dart' show ObjectBoxSimilaritySearch; +export 'objectbox/ob.dart'; From 28c9486fabeeabbb7e6ad834fffeb48528e02207 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 22:50:46 +0200 Subject: [PATCH 120/251] feat: Update Ollama default model to llama-3.2 (#554) --- docs/expression_language/primitives/router.md | 6 ++-- .../modules/agents/agent_types/tools_agent.md | 4 +-- .../models/chat_models/integrations/ollama.md | 28 +++++++++---------- .../models/llms/integrations/ollama.md | 6 ++-- .../text_embedding/integrations/ollama.md | 2 +- .../vector_stores/integrations/objectbox.md | 2 +- .../expression_language/cookbook/routing.dart | 6 ++-- .../agents/agent_types/tools_agent.dart | 2 +- .../chat_models/integrations/ollama.dart | 14 +++++----- .../models/llms/integrations/ollama.dart | 4 +-- .../vector_stores/integrations/objectbox.dart | 2 +- .../lib/home/bloc/providers.dart | 2 +- examples/wikivoyage_eu/README.md | 6 ++-- examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 2 +- examples/wikivoyage_eu/pubspec.yaml | 2 +- packages/langchain/README.md | 2 +- .../vector_stores/objectbox/objectbox.dart | 2 +- .../chat_models/chat_ollama/chat_ollama.dart | 10 +++---- .../lib/src/embeddings/ollama_embeddings.dart | 6 ++-- .../langchain_ollama/lib/src/llms/ollama.dart | 10 +++---- packages/langchain_ollama/pubspec.yaml | 2 +- .../test/chat_models/chat_ollama_test.dart | 2 +- .../test/embeddings/ollama_test.dart | 2 +- .../test/llms/ollama_test.dart | 2 +- packages/ollama_dart/README.md | 6 ++-- .../example/ollama_dart_example.dart | 10 +++---- packages/ollama_dart/oas/ollama-curated.yaml | 20 ++++++------- packages/ollama_dart/pubspec.yaml | 2 +- .../test/ollama_dart_chat_test.dart | 2 +- 29 files changed, 83 insertions(+), 83 deletions(-) diff --git a/docs/expression_language/primitives/router.md b/docs/expression_language/primitives/router.md index effd5f66..da5a59c6 100644 --- a/docs/expression_language/primitives/router.md +++ b/docs/expression_language/primitives/router.md @@ -12,7 +12,7 @@ First, let’s create a chain that will identify incoming questions as being abo ```dart final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -131,7 +131,7 @@ Here is a question: {query} '''; -final embeddings = OllamaEmbeddings(model: 'llama3.1'); +final embeddings = OllamaEmbeddings(model: 'llama3.2'); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( promptTemplates.map((final pt) => Document(pageContent: pt)).toList(), @@ -146,7 +146,7 @@ final chain = Runnable.fromMap({'query': Runnable.passthrough()}) | return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ) | StringOutputParser(); diff --git a/docs/modules/agents/agent_types/tools_agent.md b/docs/modules/agents/agent_types/tools_agent.md index 7c0c9de8..45a14352 100644 --- a/docs/modules/agents/agent_types/tools_agent.md +++ b/docs/modules/agents/agent_types/tools_agent.md @@ -9,7 +9,7 @@ You can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatO ## Usage -In the following example, we use `ChatOllama` with the `llama3.1` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. +In the following example, we use `ChatOllama` with the `llama3.2` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. ```dart import 'package:langchain/langchain.dart'; @@ -20,7 +20,7 @@ import 'package:langchain_ollama/langchain_ollama.dart'; final llm = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index e6cc5907..f612616e 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -2,7 +2,7 @@ Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. -Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. +Ollama allows you to run open-source large language models, such as Llama 3.2 or Gemma 2, locally. Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. @@ -12,13 +12,13 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.1` + * e.g., for Llama 3: `ollama pull llama3.2` 3. Instantiate the `ChatOllama` class with the downloaded model. ```dart final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); ``` @@ -33,7 +33,7 @@ By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( baseUrl: 'https://your-remote-server-where-ollama-is-running.com', - model: 'llama3.1', + model: 'llama3.2', ), ); ``` @@ -48,7 +48,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -75,7 +75,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -123,8 +123,8 @@ print(res.output.content); **Notes:** - Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. - Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). -- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.2` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.2:70b` or `llama3.2:405b`. ```dart const tool = ToolSpec( @@ -144,7 +144,7 @@ const tool = ToolSpec( final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [tool], ), @@ -173,7 +173,7 @@ If you want to customize how the model should respond to tool calls, you can use ```dart final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [tool], toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), @@ -194,7 +194,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, format: OllamaResponseFormat.json, ), @@ -284,7 +284,7 @@ const getFlightTimesTool = ToolSpec( final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [getFlightTimesTool], ), @@ -370,7 +370,7 @@ const tool = ToolSpec( final model = ChatOllama( defaultOptions: ChatOllamaOptions( options: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), tools: [tool], @@ -436,7 +436,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever( defaultOptions: VectorStoreRetrieverOptions( diff --git a/docs/modules/model_io/models/llms/integrations/ollama.md b/docs/modules/model_io/models/llms/integrations/ollama.md index c139e7d9..25f6806e 100644 --- a/docs/modules/model_io/models/llms/integrations/ollama.md +++ b/docs/modules/model_io/models/llms/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.1` + * e.g., for Llama 3: `ollama pull llama3.2` ## Usage @@ -26,7 +26,7 @@ final prompt = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); final chain = prompt | llm | StringOutputParser(); @@ -43,7 +43,7 @@ final promptTemplate = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); final chain = promptTemplate | llm | StringOutputParser(); diff --git a/docs/modules/retrieval/text_embedding/integrations/ollama.md b/docs/modules/retrieval/text_embedding/integrations/ollama.md index fc83bbb5..b13ddd28 100644 --- a/docs/modules/retrieval/text_embedding/integrations/ollama.md +++ b/docs/modules/retrieval/text_embedding/integrations/ollama.md @@ -1,7 +1,7 @@ # OllamaEmbeddings ```dart -final embeddings = OllamaEmbeddings(model: 'llama3.1'); +final embeddings = OllamaEmbeddings(model: 'llama3.2'); const text = 'This is a test document.'; final res = await embeddings.embedQuery(text); final res = await embeddings.embedDocuments([text]); diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md index ee035833..08a07bc2 100644 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -213,7 +213,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/routing.dart b/examples/docs_examples/bin/expression_language/cookbook/routing.dart index 79bbd348..1f2232a0 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/routing.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/routing.dart @@ -9,7 +9,7 @@ void main(final List arguments) async { Future _runnableRouter() async { final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -114,7 +114,7 @@ Here is a question: '''; final embeddings = OllamaEmbeddings( - model: 'llama3.1', + model: 'llama3.2', ); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( @@ -132,7 +132,7 @@ Here is a question: return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ) | const StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart index 7554d8d4..b934c0a7 100644 --- a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart +++ b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart @@ -15,7 +15,7 @@ void main() async { Future _toolsAgent() async { final llm = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 2d66b367..3473a738 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -27,7 +27,7 @@ Future _chatOllama() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -54,7 +54,7 @@ Future _chatOllamaStreaming() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, ), ); @@ -109,7 +109,7 @@ Future _chatOllamaToolCalling() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [tool], ), @@ -143,7 +143,7 @@ Future _chatOllamaJsonMode() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, format: OllamaResponseFormat.json, ), @@ -197,7 +197,7 @@ Future _extraction() async { final model = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: const [tool], toolChoice: ChatToolChoice.forced(name: tool.name), @@ -300,7 +300,7 @@ Future _flights() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', temperature: 0, tools: [getFlightTimesTool], ), @@ -370,7 +370,7 @@ Future _rag() async { // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever( defaultOptions: const VectorStoreRetrieverOptions( diff --git a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart index eb019a6b..aae53fa7 100644 --- a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart @@ -13,7 +13,7 @@ Future _ollama() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); @@ -29,7 +29,7 @@ Future _ollamaStreaming() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); const stringOutputParser = StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart index 6c66d5dc..92d419c9 100644 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -66,7 +66,7 @@ Sources: // 6. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.1'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever(); diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart index 1445bec3..4d9c364b 100644 --- a/examples/hello_world_flutter/lib/home/bloc/providers.dart +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -21,7 +21,7 @@ enum Provider { ), ollama( name: 'Ollama', - defaultModel: 'llama3.1', + defaultModel: 'llama3.2', defaultBaseUrl: 'http://localhost:11434/api', isRemote: false, ); diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md index cc573899..74bbd8e2 100644 --- a/examples/wikivoyage_eu/README.md +++ b/examples/wikivoyage_eu/README.md @@ -17,11 +17,11 @@ This example demonstrates how to build a fully local Retrieval Augmented Generat - For this example we will be using the following models: * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) - * LLM: [`llama3.1`](https://ollama.com/library/llama3.1) + * LLM: [`llama3.2`](https://ollama.com/library/llama3.2) - Open your terminal and run: ```bash ollama pull jina/jina-embeddings-v2-small-en -ollama run llama3.1 +ollama run llama3.2 ``` ### 3. Setup ObjectBox @@ -73,7 +73,7 @@ The chatbot script implements the RAG pipeline. It does the following: 2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. 3. Retrieves the 5 most similar documents from the ObjectBox database. 4. Builds a prompt using the retrieved documents and the query. -5. Uses the `llama3.1` model to generate a response to the prompt. +5. Uses the `llama3.2` model to generate a response to the prompt. You can run the script using: ```bash diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart index 8123c262..9fb076eb 100644 --- a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -51,7 +51,7 @@ Do not provide any other suggestion if the question is not about Europe. final model = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', + model: 'llama3.2', ), ); const outputParser = StringOutputParser(); diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index 4d7ddfc5..a95f4820 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -1,5 +1,5 @@ name: wikivoyage_eu -description: Wikivoyage EU chatbot using llama3.1 and ObjectBox. +description: Wikivoyage EU chatbot using llama3.2 and ObjectBox. version: 1.0.0 publish_to: none diff --git a/packages/langchain/README.md b/packages/langchain/README.md index e93bfdd1..7443cae5 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -72,7 +72,7 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | | [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.1, Gemma 2, Phi-3, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | | [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart index 5889b920..22ddeee4 100644 --- a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. /// /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); /// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); /// ``` /// diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index 4b0e9c75..190170d6 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -12,7 +12,7 @@ import 'types.dart'; /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, -/// such as Llama 3.1, Gemma 2 or LLaVA, locally. +/// such as Llama 3.2, Gemma 2 or LLaVA, locally. /// /// For a complete list of supported models and model variants, see the /// [Ollama model library](https://ollama.ai/library). @@ -34,7 +34,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3.1` +/// * e.g., for Llama 3: `ollama pull llama3.2` /// /// ### Ollama base URL /// @@ -55,7 +55,7 @@ import 'types.dart'; /// ```dart /// final chatModel = ChatOllama( /// defaultOptions: const ChatOllamaOptions( -/// model: 'llama3.1', +/// model: 'llama3.2', /// temperature: 0, /// format: 'json', /// ), @@ -87,7 +87,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.1')) | outputParser, +/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.2')) | outputParser, /// 'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -176,7 +176,7 @@ class ChatOllama extends BaseChatModel { String get modelType => 'chat-ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3.1'; + static const defaultModel = 'llama3.2'; @override Future invoke( diff --git a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart index bd40cf60..ffef2882 100644 --- a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart +++ b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart @@ -13,7 +13,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// Example: /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3.1'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); /// final res = await embeddings.embedQuery('Hello world'); /// ``` /// @@ -23,7 +23,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3.1` +/// * e.g., for `Llama-7b`: `ollama pull llama3.2` /// /// ### Advance /// @@ -76,7 +76,7 @@ class OllamaEmbeddings implements Embeddings { /// - `client`: the HTTP client to use. You can set your own HTTP client if /// you need further customization (e.g. to use a Socks5 proxy). OllamaEmbeddings({ - this.model = 'llama3.1', + this.model = 'llama3.2', this.keepAlive, final String baseUrl = 'http://localhost:11434/api', final Map? headers, diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index db352184..9be8ed12 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOption: const OllamaOptions( -/// model: 'llama3.1', +/// model: 'llama3.2', /// temperature: 1, /// ), /// ); @@ -49,7 +49,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOptions: const OllamaOptions( -/// model: 'llama3.1', +/// model: 'llama3.2', /// temperature: 0, /// format: 'json', /// ), @@ -83,7 +83,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.1')) | outputParser, +/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.2')) | outputParser, /// 'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -93,7 +93,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3.1` +/// * e.g., for `Llama-7b`: `ollama pull llama3.2` /// /// ### Advance /// @@ -178,7 +178,7 @@ class Ollama extends BaseLLM { String get modelType => 'ollama'; /// The default model to use unless another is specified. - static const defaultModel = 'llama3.1'; + static const defaultModel = 'llama3.2'; @override Future invoke( diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index 51c98cd9..f6e9e066 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_ollama -description: LangChain.dart integration module for Ollama (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). +description: LangChain.dart integration module for Ollama (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). version: 0.3.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index fcceacdb..9aac4640 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -13,7 +13,7 @@ import 'package:test/test.dart'; void main() { group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () { late ChatOllama chatModel; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; const visionModel = 'llava:latest'; setUp(() async { diff --git a/packages/langchain_ollama/test/embeddings/ollama_test.dart b/packages/langchain_ollama/test/embeddings/ollama_test.dart index ac8f999e..5363d47c 100644 --- a/packages/langchain_ollama/test/embeddings/ollama_test.dart +++ b/packages/langchain_ollama/test/embeddings/ollama_test.dart @@ -8,7 +8,7 @@ void main() { group('OllamaEmbeddings tests', skip: Platform.environment.containsKey('CI'), () { late OllamaEmbeddings embeddings; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; setUp(() async { embeddings = OllamaEmbeddings( diff --git a/packages/langchain_ollama/test/llms/ollama_test.dart b/packages/langchain_ollama/test/llms/ollama_test.dart index d21d0e56..7426b0c6 100644 --- a/packages/langchain_ollama/test/llms/ollama_test.dart +++ b/packages/langchain_ollama/test/llms/ollama_test.dart @@ -10,7 +10,7 @@ import 'package:test/test.dart'; void main() { group('Ollama tests', skip: Platform.environment.containsKey('CI'), () { late Ollama llm; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; setUp(() async { llm = Ollama( diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 46ad88a3..dc637664 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -153,7 +153,7 @@ Tool calling allows a model to respond to a given prompt by generating output th **Notes:** - Tool calling requires Ollama 0.2.8 or newer. - Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1)). +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2)). ```dart const tool = Tool( @@ -185,7 +185,7 @@ const userMsg = Message( final res1 = await client.generateChatCompletion( request: GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [userMsg], tools: [tool], ), @@ -211,7 +211,7 @@ const toolResult = '{"location": "Barcelona, ES", "temperature": 20, "unit": "ce // Submit the response of the tool call to the model final res2 = await client.generateChatCompletion( request: GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ userMsg, res1.message, diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index b1e9361f..53dc2abf 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -70,7 +70,7 @@ Future _generateCompletionStream(final OllamaClient client) async { Future _generateChatCompletion(final OllamaClient client) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ Message( role: MessageRole.system, @@ -95,7 +95,7 @@ Future _generateChatCompletionWithHistory( ) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ Message( role: MessageRole.user, @@ -118,7 +118,7 @@ Future _generateChatCompletionWithHistory( Future _generateChatCompletionStream(final OllamaClient client) async { final stream = client.generateChatCompletionStream( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ Message( role: MessageRole.system, @@ -168,7 +168,7 @@ Future _generateChatToolCalling(final OllamaClient client) async { final res1 = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [userMsg], tools: [tool], keepAlive: 1, @@ -196,7 +196,7 @@ Future _generateChatToolCalling(final OllamaClient client) async { // Submit the response of the tool call to the model final res2 = await client.generateChatCompletion( request: GenerateChatCompletionRequest( - model: 'llama3.1', + model: 'llama3.2', messages: [ userMsg, res1.message, diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index 0939dfb3..05b3f593 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -273,7 +273,7 @@ components: The model name. Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - example: llama3.1 + example: llama3.2 prompt: type: string description: The prompt to generate a response. @@ -539,7 +539,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 created_at: type: string format: date-time @@ -596,7 +596,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 messages: type: array description: The messages of the chat, this can be used to keep a chat memory @@ -628,7 +628,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 created_at: type: string format: date-time @@ -769,7 +769,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 prompt: type: string description: Text to generate embeddings for. @@ -846,7 +846,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 modified_at: type: string format: date-time @@ -923,7 +923,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 size: type: integer format: int64 @@ -951,7 +951,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 required: - model ModelInfo: @@ -999,7 +999,7 @@ components: source: type: string description: Name of the model to copy. - example: llama3.1 + example: llama3.2 destination: type: string description: Name of the new model. @@ -1024,7 +1024,7 @@ components: model: type: string description: *model_name - example: llama3.1 + example: llama3.2 insecure: type: boolean description: | diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 66bea8fc..0bbd9916 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: ollama_dart -description: Dart Client for the Ollama API (run Llama 3.1, Gemma 2, Phi-3, Mistral nemo, Qwen2 and other models locally). +description: Dart Client for the Ollama API (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). version: 0.2.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index 3ed66209..3e8afd82 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,7 +7,7 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3.1'; + const defaultModel = 'llama3.2'; const visionModel = 'llava'; setUp(() async { From 0a582d0ea2446f1fd8f17abe2a58e70bb58d80a4 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 23:24:12 +0200 Subject: [PATCH 121/251] feat: Add OpenAI o1-preview and o1-mini to model catalog (#555) --- packages/langchain/README.md | 22 +- .../lib/src/chat_models/types.dart | 5 + packages/langchain_openai/pubspec.yaml | 2 +- .../schema/create_assistant_request.dart | 12 + .../create_chat_completion_request.dart | 10 + .../generated/schema/create_run_request.dart | 12 + .../schema/create_thread_and_run_request.dart | 12 + .../lib/src/generated/schema/schema.g.dart | 19 + packages/openai_dart/oas/openapi_curated.yaml | 27 +- .../openai_dart/oas/openapi_official.yaml | 13260 +++++++++------- packages/openai_dart/pubspec.yaml | 2 +- 11 files changed, 7983 insertions(+), 5400 deletions(-) diff --git a/packages/langchain/README.md b/packages/langchain/README.md index 7443cae5..d01e9ccd 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -65,17 +65,17 @@ Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/pack > Depend on an integration-specific package if you want to use the specific integration. -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-4o, o1, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration |

    diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index 6713a56f..3173e293 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -23,6 +23,7 @@ import 'package:meta/meta.dart'; /// - `gpt-4o` /// - `gpt-4o-2024-05-13` /// - `gpt-4o-2024-08-06` +/// - `gpt-4o-2024-08-06` /// - `gpt-4o-mini` /// - `gpt-4o-mini-2024-07-18` /// - `gpt-3.5-turbo` @@ -32,6 +33,10 @@ import 'package:meta/meta.dart'; /// - `gpt-3.5-turbo-0301` /// - `gpt-3.5-turbo-0613` /// - `gpt-3.5-turbo-1106` +/// - `o1-mini` +/// - `o1-mini-2024-09-12` +/// - `o1-preview` +/// - `o1-preview-2024-09-12` /// /// Mind that the list may be outdated. /// See https://platform.openai.com/docs/models for the latest list. diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index c367ac51..37c0b6ca 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,5 +1,5 @@ name: langchain_openai -description: LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). +description: LangChain.dart integration module for OpenAI (GPT-4o, o1, Embeddings, DALL·E, etc.). version: 0.7.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index f078f394..cb7f5b82 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -153,6 +153,8 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum AssistantModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -183,6 +185,8 @@ enum AssistantModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -201,6 +205,14 @@ enum AssistantModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 0b6a5920..47280735 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -309,6 +309,8 @@ enum ChatCompletionModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -327,6 +329,14 @@ enum ChatCompletionModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 485869d0..6fe86422 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -191,6 +191,8 @@ class CreateRunRequest with _$CreateRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum RunModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -221,6 +223,8 @@ enum RunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -239,6 +243,14 @@ enum RunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index e52be3e1..1d9c82ee 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -190,6 +190,8 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum ThreadAndRunModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -220,6 +222,8 @@ enum ThreadAndRunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -238,6 +242,14 @@ enum ThreadAndRunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 3ffb5c36..092425e5 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -439,6 +439,10 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ChatCompletionModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ChatCompletionModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + ChatCompletionModels.o1Mini: 'o1-mini', + ChatCompletionModels.o1Mini20240912: 'o1-mini-2024-09-12', + ChatCompletionModels.o1Preview: 'o1-preview', + ChatCompletionModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ChatCompletionModelStringImpl _$$ChatCompletionModelStringImplFromJson( @@ -2306,6 +2310,7 @@ Map _$$AssistantModelEnumerationImplToJson( }; const _$AssistantModelsEnumMap = { + AssistantModels.chatgpt4oLatest: 'chatgpt-4o-latest', AssistantModels.gpt4: 'gpt-4', AssistantModels.gpt432k: 'gpt-4-32k', AssistantModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2330,6 +2335,10 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', AssistantModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', AssistantModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + AssistantModels.o1Mini: 'o1-mini', + AssistantModels.o1Mini20240912: 'o1-mini-2024-09-12', + AssistantModels.o1Preview: 'o1-preview', + AssistantModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$AssistantModelStringImpl _$$AssistantModelStringImplFromJson( @@ -2969,6 +2978,7 @@ Map _$$CreateRunRequestModelEnumerationImplToJson( }; const _$RunModelsEnumMap = { + RunModels.chatgpt4oLatest: 'chatgpt-4o-latest', RunModels.gpt4: 'gpt-4', RunModels.gpt432k: 'gpt-4-32k', RunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2993,6 +3003,10 @@ const _$RunModelsEnumMap = { RunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', RunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', RunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + RunModels.o1Mini: 'o1-mini', + RunModels.o1Mini20240912: 'o1-mini-2024-09-12', + RunModels.o1Preview: 'o1-preview', + RunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$CreateRunRequestModelStringImpl _$$CreateRunRequestModelStringImplFromJson( @@ -3292,6 +3306,7 @@ Map _$$ThreadAndRunModelEnumerationImplToJson( }; const _$ThreadAndRunModelsEnumMap = { + ThreadAndRunModels.chatgpt4oLatest: 'chatgpt-4o-latest', ThreadAndRunModels.gpt4: 'gpt-4', ThreadAndRunModels.gpt432k: 'gpt-4-32k', ThreadAndRunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -3316,6 +3331,10 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ThreadAndRunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ThreadAndRunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + ThreadAndRunModels.o1Mini: 'o1-mini', + ThreadAndRunModels.o1Mini20240912: 'o1-mini-2024-09-12', + ThreadAndRunModels.o1Preview: 'o1-preview', + ThreadAndRunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ThreadAndRunModelStringImpl _$$ThreadAndRunModelStringImplFromJson( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 4fc465f5..793e696e 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1842,6 +1842,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -1851,6 +1852,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] messages: description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @@ -3660,6 +3665,7 @@ components: Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -3675,6 +3681,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -3684,6 +3691,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] name: description: *assistant_name_param_description @@ -4216,6 +4227,7 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4231,6 +4243,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4240,6 +4253,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] instructions: description: Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -4454,6 +4471,7 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4469,6 +4487,7 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4478,6 +4497,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] instructions: description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -5719,7 +5742,7 @@ components: $ref: "#/components/schemas/FileSearchRanker" score_threshold: type: number - description: | + description: | The score threshold for the file search. All values must be a floating point number between 0 and 1. minimum: 0 maximum: 1 @@ -5743,7 +5766,7 @@ components: maximum: 1 content: type: array - description: | + description: | The content of the result that was found. The content is only included if requested via the include query parameter. items: diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index de7cd98a..96e64e32 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -37,6 +37,8 @@ tags: description: List and describe the various models available in the API. - name: Moderations description: Given a input text, outputs if the model classifies it as potentially harmful. + - name: Audit Logs + description: List user actions and configuration changes within this organization. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -177,7 +179,9 @@ paths: {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + } }, ], } @@ -201,9 +205,10 @@ paths: { type: "text", text: "What's in this image?" }, { type: "image_url", - image_url: - "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - }, + image_url: { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + } ], }, ], @@ -2766,7 +2771,7 @@ paths: response: &moderation_example | { "id": "modr-XXXXX", - "model": "text-moderation-005", + "model": "text-moderation-007", "results": [ { "flagged": true, @@ -7254,169 +7259,2450 @@ paths: } } -components: - securitySchemes: - ApiKeyAuth: - type: http - scheme: "bearer" + # Organization + # Audit Logs List + /organization/audit_logs: + get: + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs + tags: + - Audit Logs + parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this range. + required: false + schema: + type: object + properties: + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. + - name: project_ids[] + in: query + description: Return only events for these projects. + required: false + schema: + type: array + items: + type: string + - name: event_types[] + in: query + description: Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object). + required: false + schema: + type: array + items: + $ref: "#/components/schemas/AuditLogEventType" + - name: actor_ids[] + in: query + description: Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID. + required: false + schema: + type: array + items: + type: string + - name: actor_emails[] + in: query + description: Return only events performed by users with these emails. + required: false + schema: + type: array + items: + type: string + - name: resource_ids[] + in: query + description: Return only events performed on these targets. For example, a project ID updated. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: Audit logs listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListAuditLogsResponse" + x-oaiMeta: + name: List audit logs + group: audit-logs + returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/audit_logs \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + response: | + { + "object": "list", + "data": [ + { + "id": "audit_log-xxx_yyyymmdd", + "type": "project.archived", + "effective_at": 1722461446, + "actor": { + "type": "api_key", + "api_key": { + "type": "user", + "user": { + "id": "user-xxx", + "email": "user@example.com" + } + } + }, + "project.archived": { + "id": "proj_abc" + }, + }, + { + "id": "audit_log-yyy__20240101", + "type": "api_key.updated", + "effective_at": 1720804190, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.updated": { + "id": "key_xxxx", + "data": { + "scopes": ["resource_2.operation_2"] + } + }, + } + ], + "first_id": "audit_log-xxx__20240101", + "last_id": "audit_log_yyy__20240101", + "has_more": true + } + /organization/invites: + get: + summary: Returns a list of invites in the organization. + operationId: list-invites + tags: + - Invites + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Invites listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteListResponse' + x-oaiMeta: + name: List invites + group: administration + returns: A list of [Invite](/docs/api-reference/invite/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + ], + "first_id": "invite-abc", + "last_id": "invite-abc", + "has_more": false + } - schemas: - Error: - type: object - properties: - code: - type: string - nullable: true - message: - type: string - nullable: false - param: - type: string - nullable: true - type: - type: string - nullable: false - required: - - type - - message - - param - - code - ErrorResponse: - type: object - properties: - error: - $ref: "#/components/schemas/Error" - required: - - error + post: + summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. + operationId: inviteUser + tags: + - Invites + requestBody: + description: The invite request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InviteRequest' + responses: + "200": + description: User invited successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Create invite + group: administration + returns: The created [Invite](/docs/api-reference/invite/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/invites \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "role": "owner" + }' + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": null + } - ListModelsResponse: - type: object - properties: - object: - type: string - enum: [ list ] - data: - type: array - items: - $ref: "#/components/schemas/Model" - required: - - object - - data - DeleteModelResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - required: - - id - - object - - deleted + /organization/invites/{invite_id}: + get: + summary: Retrieves an invite. + operationId: retrieve-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to retrieve. + responses: + "200": + description: Invite retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Retrieve invite + group: administration + returns: The [Invite](/docs/api-reference/invite/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + delete: + summary: Delete an invite. If the invite has already been accepted, it cannot be deleted. + operationId: delete-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to delete. + responses: + "200": + description: Invite deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteDeleteResponse' + x-oaiMeta: + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite.deleted", + "id": "invite-abc", + "deleted": true + } - CreateCompletionRequest: - type: object - properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true - oneOf: - - type: string - default: "" - example: "This is a test." + /organization/users: + get: + summary: Lists all of the users in the organization. + operationId: list-users + tags: + - Users + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserListResponse' + x-oaiMeta: + name: List users + group: administration + returns: A list of [User](/docs/api-reference/users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + + /organization/users/{user_id}: + get: + summary: Retrieves a user by their identifier. + operationId: retrieve-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Retrieve user + group: administration + returns: The [User](/docs/api-reference/users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + post: + summary: Modifies a user's role in the organization. + operationId: modify-user + tags: + - Users + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserRoleUpdateRequest' + responses: + "200": + description: User role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Modify user + group: administration + returns: The updated [User](/docs/api-reference/users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + delete: + summary: Deletes a user from the organization. + operationId: delete-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserDeleteResponse' + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user.deleted", + "id": "user_abc", + "deleted": true + } + /organization/projects: + get: + summary: Returns a list of projects. + operationId: list-projects + tags: + - Projects + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + - name: include_archived + in: query + schema: + type: boolean + default: false + description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + responses: + "200": + description: Projects listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectListResponse' + x-oaiMeta: + name: List projects + group: administration + returns: A list of [Project](/docs/api-reference/projects/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ], + "first_id": "proj-abc", + "last_id": "proj-xyz", + "has_more": false + } + + post: + summary: Create a new project in the organization. Projects can be created and archived, but cannot be deleted. + operationId: create-project + tags: + - Projects + requestBody: + description: The project create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectCreateRequest' + responses: + "200": + description: Project created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Create project + group: administration + returns: The created [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project ABC", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + }' + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project ABC", + "created_at": 1711471533, + "archived_at": null, + "status": "active", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + } + + /organization/projects/{project_id}: + get: + summary: Retrieves a project. + operationId: retrieve-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: The [Project](/docs/api-reference/projects/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + + post: + summary: Modifies a project in the organization. + operationId: modify-project + tags: + - Projects + requestBody: + description: The project update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpdateRequest' + responses: + "200": + description: Project updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + "400": + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project + group: administration + returns: The updated [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project DEF", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + }' + + /organization/projects/{project_id}/archive: + post: + summary: Archives a project in the organization. Archived projects cannot be used or updated. + operationId: archive-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project archived successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Archive project + group: administration + returns: The archived [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project DEF", + "created_at": 1711471533, + "archived_at": 1711471533, + "status": "archived" + } + + + /organization/projects/{project_id}/users: + get: + summary: Returns a list of users in the project. + operationId: list-project-users + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project users + group: administration + returns: A list of [ProjectUser](/docs/api-reference/project-users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + post: + summary: Adds a user to the project. Users must already be members of the organization to be added to a project. + operationId: create-project-user + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + tags: + - Projects + requestBody: + description: The project user create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserCreateRequest' + responses: + "200": + description: User added to project successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project user + group: administration + returns: The created [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "user_id": "user_abc", + "role": "member" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + /organization/projects/{project_id}/users/{user_id}: + get: + summary: Retrieves a user in the project. + operationId: retrieve-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + x-oaiMeta: + name: Retrieve project user + group: administration + returns: The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + post: + summary: Modifies a user's role in the project. + operationId: modify-project-user + tags: + - Projects + requestBody: + description: The project user update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserUpdateRequest' + responses: + "200": + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project user + group: administration + returns: The updated [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + delete: + summary: Deletes a user from the project. + operationId: delete-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project user + group: administration + returns: Confirmation that project has been deleted or an error in case of an archived project, which has no users + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user.deleted", + "id": "user_abc", + "deleted": true + } + + /organization/projects/{project_id}/service_accounts: + get: + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project service accounts listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project service accounts + group: administration + returns: A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ], + "first_id": "svc_acct_abc", + "last_id": "svc_acct_xyz", + "has_more": false + } + + post: + summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. + operationId: create-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project service account create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' + responses: + "200": + description: Project service account created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project service account + group: administration + returns: The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Production App" + }' + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Production App", + "role": "member", + "created_at": 1711471533, + "api_key": { + "object": "organization.project.service_account.api_key", + "value": "sk-abcdefghijklmnop123", + "name": "Secret Key", + "created_at": 1711471533, + "id": "key_abc" + } + } + + /organization/projects/{project_id}/service_accounts/{service_account_id}: + get: + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccount' + x-oaiMeta: + name: Retrieve project service account + group: administration + returns: The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + delete: + summary: Deletes a service account from the project. + operationId: delete-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' + x-oaiMeta: + name: Delete project service account + group: administration + returns: Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account.deleted", + "id": "svc_acct_abc", + "deleted": true + } + + /organization/projects/{project_id}/api_keys: + get: + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project API keys listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyListResponse' + + x-oaiMeta: + name: List project API keys + group: administration + returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + ], + "first_id": "key_abc", + "last_id": "key_xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + /organization/projects/{project_id}/api_keys/{key_id}: + get: + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKey' + x-oaiMeta: + name: Retrieve project API key + group: administration + returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + + delete: + summary: Deletes an API key from the project. + operationId: delete-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a service account + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key.deleted", + "id": "key_abc", + "deleted": true + } + error_response: + content: | + { + "code": 400, + "message": "API keys cannot be deleted for service accounts, please delete the service account" + } + +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + enum: [ list ] + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: [ "stop", "length", "content_filter" ] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: [ text_completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: [ "text" ] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: + type: string + enum: [ "image_url" ] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - url + required: + - type + - image_url + + ChatCompletionRequestMessageContentPartRefusal: + type: object + title: Refusal content part + properties: + type: + type: string + enum: [ "refusal" ] + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + required: + - type + - refusal + + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + oneOf: + - type: string + description: The contents of the system message. + title: Text content - type: array + description: An array of content parts with a defined type. For system messages, only type `text` is supported. + title: Array of content parts items: - type: string - default: "" - example: "This is a test." + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 + role: + type: string + enum: [ "system" ] + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: [ "user" ] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + nullable: true + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. + role: + type: string + enum: [ "assistant" ] + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + nullable: true + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: [ 0, 1 ] + description: "Controls whether the assistant message is trained against (0 or 1)" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: [ "tool" ] + description: The role of the messages author, in this case `tool`. + content: + oneOf: + - type: string + description: The contents of the tool message. + title: Text content - type: array - minItems: 1 + description: An array of content parts with a defined type. For tool messages, only type `text` is supported. + title: Array of content parts items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - best_of: + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: [ "function" ] + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + required: + - name + + ResponseFormatText: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `text`" + enum: [ "text" ] + required: + - type + + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: [ "json_object" ] + required: + - type + + ResponseFormatJsonSchemaSchema: + type: object + description: "The schema for the response format, described as a JSON Schema object." + additionalProperties: true + + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: 'The type of response format being defined: `json_schema`' + enum: [ 'json_schema' ] + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + enum: [ none, auto, required ] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + + ParallelToolCalls: + description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + type: boolean + default: true + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: + type: object + properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + required: + - id + - type + - function + + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - echo: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - index + + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionStreamOptions: + description: | + Options for streaming response. Only set this when you set `stream: true`. + type: object + nullable: true + default: null + properties: + include_usage: type: boolean - default: false + description: | + If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: [ "assistant" ] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + required: + - role + - content + - refusal + + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: [ "system", "user", "assistant", "tool" ] + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string frequency_penalty: type: number default: 0 minimum: -2 maximum: 2 nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - logit_bias: &completions_logit_bias + description: *completions_frequency_penalty_description + logit_bias: type: object x-oaiTypeLabel: map default: null nullable: true additionalProperties: type: integer - description: &completions_logit_bias_description | + description: | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - logprobs: &completions_logprobs_configuration + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 - maximum: 5 - default: null + maximum: 20 nullable: true - description: &completions_logprobs_description | - Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. type: integer - minimum: 0 - default: 16 - example: 16 nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: type: integer minimum: 1 @@ -7424,63 +9710,72 @@ components: default: 1 example: 1 nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 minimum: -2 maximum: 2 nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + description: *completions_presence_penalty_description + response_format: + description: | + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - seed: &completions_seed_param + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + seed: type: integer minimum: -9223372036854775808 maximum: 9223372036854775807 nullable: true description: | + This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + description: | + Up to 4 sequences where the API will stop generating further tokens. default: null - nullable: true oneOf: - type: string - default: <|endoftext|> - example: "\n" nullable: true - type: array minItems: 1 maxItems: 4 items: type: string - example: '["\n"]' stream: description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false stream_options: $ref: "#/components/schemas/ChatCompletionStreamOptions" - suffix: - description: | - The suffix that comes after a completion of inserted text. - - This parameter is only supported for `gpt-3.5-turbo-instruct`. - default: null - nullable: true - type: string - example: "test." temperature: type: number minimum: 0 @@ -7488,10 +9783,7 @@ components: default: 1 example: 1 nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + description: *completions_temperature_description top_p: type: number minimum: 0 @@ -7499,77 +9791,125 @@ components: default: 1 example: 1 nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + user: *end_user_param_configuration + function_call: + deprecated: true + description: | + Deprecated in favor of `tool_choice`. - We generally recommend altering this or `temperature` but not both. - user: &end_user_param_configuration - type: string - example: user-1234 + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [ none, auto ] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + required: - model - - prompt + - messages - CreateCompletionResponse: + CreateChatCompletionResponse: type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + description: Represents a chat completion response returned by model, based on the provided input. properties: id: type: string - description: A unique identifier for the completion. + description: A unique identifier for the chat completion. choices: type: array - description: The list of completion choices the model generated for the input prompt. + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. items: type: object required: - finish_reason - index + - message - logprobs - - text properties: finish_reason: type: string - description: &completion_finish_reason_description | + description: &chat_completion_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: [ "stop", "length", "content_filter" ] + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] index: type: integer - logprobs: + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. type: object nullable: true properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: + content: + description: A list of message content tokens with log probability information. type: array items: - type: string - top_logprobs: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + refusal: + description: A list of message refusal tokens with log probability information. type: array items: - type: object - additionalProperties: - type: number - text: - type: string + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + created: type: integer - description: The Unix timestamp (in seconds) of when the completion was created. + description: The Unix timestamp (in seconds) of when the chat completion was created. model: type: string - description: The model used for completion. + description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true system_fingerprint: type: string description: | @@ -7578,1611 +9918,1489 @@ components: Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string - description: The object type, which is always "text_completion" - enum: [ text_completion ] + description: The object type, which is always `chat.completion`. + enum: [ chat.completion ] usage: $ref: "#/components/schemas/CompletionUsage" required: - - id - - object + - choices - created + - id - model - - choices + - object x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-4-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } + name: The chat completion object + group: chat + example: *chat_completion_example - ChatCompletionRequestMessageContentPartText: + CreateChatCompletionFunctionResponse: type: object - title: Text content part + description: Represents a chat completion response returned by model, based on the provided input. properties: - type: + id: type: string - enum: [ "text" ] - description: The type of the content part. - text: + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: + [ "stop", "length", "function_call", "content_filter" ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: type: string - description: The text content. - required: - - type - - text - - ChatCompletionRequestMessageContentPartImage: - type: object - title: Image content part - properties: - type: + description: The model used for the chat completion. + system_fingerprint: type: string - enum: [ "image_url" ] - description: The type of the content part. - image_url: - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - type: string - description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: [ "auto", "low", "high" ] - default: "auto" - required: - - url + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [ chat.completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" required: - - type - - image_url + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example - ChatCompletionRequestMessageContentPartRefusal: + ChatCompletionTokenLogprob: type: object - title: Refusal content part properties: - type: - type: string - enum: [ "refusal" ] - description: The type of the content part. - refusal: + token: &chat_completion_response_logprobs_token + description: The token. type: string - description: The refusal message generated by the model. + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes required: - - type - - refusal - - ChatCompletionRequestMessage: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true - - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - x-oaiExpandable: true - - ChatCompletionRequestUserMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true - - ChatCompletionRequestAssistantMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" - x-oaiExpandable: true - - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - x-oaiExpandable: true + - token + - logprob + - bytes + - top_logprobs - ChatCompletionRequestSystemMessage: + ListPaginatedFineTuningJobsResponse: type: object - title: System message properties: - content: - description: The contents of the system message. - oneOf: - - type: string - description: The contents of the system message. - title: Text content - - type: array - description: An array of content parts with a defined type. For system messages, only type `text` is supported. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" - minItems: 1 - role: - type: string - enum: [ "system" ] - description: The role of the messages author, in this case `system`. - name: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + enum: [ list ] required: - - content - - role + - object + - data + - has_more - ChatCompletionRequestUserMessage: + CreateChatCompletionStreamResponse: type: object - title: User message + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. properties: - content: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array description: | - The contents of the user message. - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" - minItems: 1 - x-oaiExpandable: true - role: + A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the + last chunk if you set `stream_options: {"include_usage": true}`. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: type: string - enum: [ "user" ] - description: The role of the messages author, in this case `user`. - name: + description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role - - ChatCompletionRequestAssistantMessage: - type: object - title: Assistant message - properties: - content: - nullable: true - oneOf: - - type: string - description: The contents of the assistant message. - title: Text content - - type: array - description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" - minItems: 1 - description: | - The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - refusal: + enum: [ "scale", "default" ] + example: "scale" nullable: true + system_fingerprint: type: string - description: The refusal message by the assistant. - role: - type: string - enum: [ "assistant" ] - description: The role of the messages author, in this case `assistant`. - name: + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - function_call: + description: The object type, which is always `chat.completion.chunk`. + enum: [ chat.completion.chunk ] + usage: type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - nullable: true - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - required: - - role - - FineTuneChatCompletionRequestAssistantMessage: - allOf: - - type: object - title: Assistant message - deprecated: false + description: | + An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. + When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. properties: - weight: + completion_tokens: type: integer - enum: [ 0, 1 ] - description: "Controls whether the assistant message is trained against (0 or 1)" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens required: - - role + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example - ChatCompletionRequestToolMessage: + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: type: object - title: Tool message properties: - role: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. type: string - enum: [ "tool" ] - description: The role of the messages author, in this case `tool`. - content: - oneOf: + example: "A cute baby sea otter" + model: + anyOf: - type: string - description: The contents of the tool message. - title: Text content - - type: array - description: An array of content parts with a defined type. For tool messages, only type `text` is supported. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" - minItems: 1 - description: The contents of the tool message. - tool_call_id: + - type: string + enum: [ "dall-e-2", "dall-e-3" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: type: string - description: Tool call that this message is responding to. - required: - - role - - content - - tool_call_id - - ChatCompletionRequestFunctionMessage: - type: object - title: Function message - deprecated: true - properties: - role: + enum: [ "standard", "hd" ] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format type: string - enum: [ "function" ] - description: The role of the messages author, in this case `function`. - content: + enum: [ "url", "b64_json" ] + default: "url" + example: "url" nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + size: &images_size type: string - description: The contents of the function message. - name: + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: type: string - description: The name of the function to call. + enum: [ "vivid", "natural" ] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration required: - - role - - content - - name - - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - additionalProperties: true + - prompt - ChatCompletionFunctions: - type: object - deprecated: true + ImagesResponse: properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" required: - - name + - created + - data - ChatCompletionFunctionCallOption: + Image: type: object - description: > - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + description: Represents the url or the content of an image generated by the OpenAI API. properties: - name: + b64_json: type: string - description: The name of the function to call. - required: - - name + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } - ChatCompletionTool: + CreateImageEditRequest: type: object properties: - type: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - $ref: "#/components/schemas/FunctionObject" + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: [ "dall-e-2" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: [ "256x256", "512x512", "1024x1024" ] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration required: - - type - - function + - prompt + - image - FunctionObject: + CreateImageVariationRequest: type: object properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - strict: - type: boolean + format: binary + model: + anyOf: + - type: string + - type: string + enum: [ "dall-e-2" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" nullable: true - default: false - description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration required: - - name + - image - ResponseFormatText: + CreateModerationRequest: type: object properties: - type: - type: string - description: "The type of response format being defined: `text`" - enum: [ "text" ] + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: [ "text-moderation-latest", "text-moderation-stable" ] + x-oaiTypeLabel: string required: - - type + - input - ResponseFormatJsonObject: + CreateModerationResponse: type: object + description: Represents if a given text input is potentially harmful. properties: - type: + id: type: string - description: "The type of response format being defined: `json_object`" - enum: [ "json_object" ] - required: - - type - - ResponseFormatJsonSchemaSchema: - type: object - description: "The schema for the response format, described as a JSON Schema object." - additionalProperties: true - - ResponseFormatJsonSchema: - type: object - properties: - type: + description: The unique identifier for the moderation request. + model: type: string - description: 'The type of response format being defined: `json_schema`' - enum: [ 'json_schema' ] - json_schema: - type: object - properties: - description: - type: string - description: A description of what the response format is for, used by the model to determine how to respond in the format. - name: - type: string - description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - nullable: true - default: false - description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). - required: - - type - - name + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores required: - - type - - json_schema - - ChatCompletionToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools are present. - oneOf: - - type: string - description: > - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - enum: [ none, auto, required ] - - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" - x-oaiExpandable: true + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example - ChatCompletionNamedToolChoice: + ListFilesResponse: type: object - description: Specifies a tool the model should use. Use to force the model to call a specific function. properties: - type: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + object: type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + enum: [ list ] required: - - type - - function - - ParallelToolCalls: - description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. - type: boolean - default: true - - ChatCompletionMessageToolCalls: - type: array - description: The tool calls generated by the model, such as function calls. - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCall" + - object + - data - ChatCompletionMessageToolCall: + CreateFileRequest: type: object + additionalProperties: false properties: - # TODO: index included when streaming - id: - type: string - description: The ID of the tool call. - type: + file: + description: | + The File object (not file name) to be uploaded. type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - description: The function that the model called. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). + type: string + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - - id - - type - - function + - file + - purpose - ChatCompletionMessageToolCallChunk: + DeleteFileResponse: type: object properties: - index: - type: integer id: type: string - description: The ID of the tool call. - type: + object: type: string - enum: [ "function" ] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + enum: [ file ] + deleted: + type: boolean required: - - index - - # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. - ChatCompletionRole: - type: string - description: The role of the author of a message - enum: - - system - - user - - assistant - - tool - - function + - id + - object + - deleted - ChatCompletionStreamOptions: - description: | - Options for streaming response. Only set this when you set `stream: true`. + CreateUploadRequest: type: object - nullable: true - default: null + additionalProperties: false properties: - include_usage: - type: boolean + filename: description: | - If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. - - ChatCompletionResponseMessage: - type: object - description: A chat completion message generated by the model. - properties: - content: + The name of the file to upload. type: string - description: The contents of the message. - nullable: true - refusal: + purpose: + description: | + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - role: + enum: [ "assistants", "batch", "fine-tune", "vision" ] + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: | + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. type: string - enum: [ "assistant" ] - description: The role of the author of this message. - function_call: - type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - name - - arguments required: - - role - - content - - refusal + - filename + - purpose + - bytes + - mime_type - ChatCompletionStreamResponseDelta: + AddUploadPartRequest: type: object - description: A chat completion delta generated by streamed model responses. + additionalProperties: false properties: - content: + data: + description: | + The chunk of bytes for this Part. type: string - description: The contents of the chunk message. - nullable: true - function_call: - deprecated: true - type: object - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - tool_calls: + format: binary + required: + - data + + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: type: array + description: | + The ordered list of Part IDs. items: - $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" - role: - type: string - enum: [ "system", "user", "assistant", "tool" ] - description: The role of the author of this message. - refusal: + type: string + md5: + description: | + The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. type: string - description: The refusal message generated by the model. - nullable: true + required: + - part_ids - CreateChatCompletionRequest: + CancelUploadRequest: + type: object + additionalProperties: false + + CreateFineTuningJobRequest: type: object properties: - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4o" + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + example: "gpt-4o-mini" anyOf: - type: string - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_frequency_penalty_description - logit_bias: - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. - type: boolean - default: false - nullable: true - top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. - type: integer - minimum: 0 - maximum: 20 - nullable: true - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - type: integer - nullable: true - n: - type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_presence_penalty_description - response_format: + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] + x-oaiTypeLabel: string + training_file: description: | - An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + The ID of an uploaded file that contains training data. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + See [upload file](/docs/api-reference/files/create) for how to upload a file. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - $ref: "#/components/schemas/ResponseFormatText" - - $ref: "#/components/schemas/ResponseFormatJsonObject" - - $ref: "#/components/schemas/ResponseFormatJsonSchema" - x-oaiExpandable: true - seed: - type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - x-oaiMeta: - beta: true - service_tier: - description: | - Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - When not set, the default behavior is 'auto'. + The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. - When this parameter is set, the response body will include the `service_tier` utilized. + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - enum: [ "auto", "default" ] - nullable: true - default: null - stop: + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [ auto ] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [ auto ] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [ auto ] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: description: | - Up to 4 sequences where the API will stop generating further tokens. + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 default: null - oneOf: - - type: string - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - stream: - description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - type: boolean - nullable: true - default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 nullable: true - description: *completions_top_p_description - tools: - type: array - description: > - A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. - items: - $ref: "#/components/schemas/ChatCompletionTool" - tool_choice: - $ref: "#/components/schemas/ChatCompletionToolChoiceOption" - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - user: *end_user_param_configuration - function_call: - deprecated: true + validation_file: description: | - Deprecated in favor of `tool_choice`. + The ID of an uploaded file that contains validation data. - Controls which (if any) function is called by the model. - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. - `none` is the default when no functions are present. `auto` is the default if functions are present. - oneOf: - - type: string - description: > - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - enum: [ none, auto ] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - x-oaiExpandable: true - functions: - deprecated: true - description: | - Deprecated in favor of `tools`. + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - - required: - - model - - messages - - CreateChatCompletionResponse: - type: object - description: Represents a chat completion response returned by model, based on the provided input. - properties: - id: + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - description: A unique identifier for the chat completion. - choices: + nullable: true + example: "file-abc123" + integrations: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + description: A list of integrations to enable for your fine-tuning job. + nullable: true items: type: object required: - - finish_reason - - index - - message - - logprobs + - type + - wandb properties: - finish_reason: - type: string - description: &chat_completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - logprobs: &chat_completion_response_logprobs - description: Log probability information for the choice. + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [ wandb ] + wandb: type: object - nullable: true + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project properties: - content: - description: A list of message content tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. nullable: true - refusal: - description: A list of message refusal tokens with log probability information. + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". type: array items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true - required: - - content - - refusal + type: string + example: "custom-tag" - created: + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - service_tier: - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" nullable: true - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" + minimum: 0 + maximum: 2147483647 + example: 42 required: - - choices - - created - - id - model - - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example + - training_file - CreateChatCompletionFunctionResponse: + ListFineTuningJobEventsResponse: type: object - description: Represents a chat completion response returned by model, based on the provided input. properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: + data: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: - &chat_completion_function_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: - [ "stop", "length", "function_call", "content_filter" ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - description: The object type, which is always `chat.completion`. - enum: [ chat.completion ] - usage: - $ref: "#/components/schemas/CompletionUsage" + enum: [ list ] required: - - choices - - created - - id - - model - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_function_example - - ChatCompletionTokenLogprob: - type: object - properties: - token: &chat_completion_response_logprobs_token - description: The token. - type: string - logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - type: number - bytes: &chat_completion_response_logprobs_bytes - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - type: array - items: - type: integer - nullable: true - top_logprobs: - description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. - type: array - items: - type: object - properties: - token: *chat_completion_response_logprobs_token - logprob: *chat_completion_response_logprobs_token_logprob - bytes: *chat_completion_response_logprobs_bytes - required: - - token - - logprob - - bytes - required: - - token - - logprob - - bytes - - top_logprobs + - data - ListPaginatedFineTuningJobsResponse: + ListFineTuningJobCheckpointsResponse: type: object properties: data: type: array items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean + $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string enum: [ list ] + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean required: - object - data - has_more - CreateChatCompletionStreamResponse: + CreateEmbeddingRequest: type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + additionalProperties: false properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array + input: description: | - A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the - last chunk if you set `stream_options: {"include_usage": true}`. - items: - type: object - required: - - delta - - finish_reason - - index - properties: - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - logprobs: *chat_completion_response_logprobs - finish_reason: + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: "This is a test." + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: type: string - description: *chat_completion_finish_reason_description - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - nullable: true - index: + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: type: integer - description: The index of the choice in the list of choices. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: + [ + "text-embedding-ada-002", + "text-embedding-3-small", + "text-embedding-3-large", + ] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - description: The model to generate the completion. - service_tier: - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - type: string - enum: [ "scale", "default" ] - example: "scale" - nullable: true - system_fingerprint: + enum: [ "float", "base64" ] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + description: The name of the model used to generate the embedding. object: type: string - description: The object type, which is always `chat.completion.chunk`. - enum: [ chat.completion.chunk ] + description: The object type, which is always "list". + enum: [ list ] usage: type: object - description: | - An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. - When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. + description: The usage information for the request. properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. prompt_tokens: type: integer - description: Number of tokens in the prompt. + description: The number of tokens used by the prompt. total_tokens: type: integer - description: Total number of tokens used in the request (prompt + completion). + description: The total number of tokens used by the request. required: - prompt_tokens - - completion_tokens - total_tokens required: - - choices - - created - - id - - model - object - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example - - CreateChatCompletionImageResponse: - type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_image_example + - model + - data + - usage - CreateImageRequest: + CreateTranscriptionRequest: type: object + additionalProperties: false properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string - example: "A cute baby sea otter" + x-oaiTypeLabel: file + format: binary model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - type: string - enum: [ "dall-e-2", "dall-e-3" ] + enum: [ "whisper-1" ] x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-3" - nullable: true - description: The model to use for image generation. - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - quality: + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. type: string - enum: [ "standard", "hd" ] - default: "standard" - example: "standard" - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - response_format: &images_response_format + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string - enum: [ "url", "b64_json" ] - default: "url" - example: "url" - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - size: &images_size + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. type: string - enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - style: + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + timestamp_granularities[]: + description: | + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + type: array + items: + type: string + enum: + - word + - segment + default: [ segment ] + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the provided input. + properties: + text: type: string - enum: [ "vivid", "natural" ] - default: "vivid" - example: "vivid" - nullable: true - description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - user: *end_user_param_configuration + description: The transcribed text. required: - - prompt + - text + x-oaiMeta: + name: The transcription object (JSON) + group: audio + example: *basic_transcription_response_example - ImagesResponse: + TranscriptionSegment: + type: object properties: - created: + id: type: integer - data: + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: type: array items: - $ref: "#/components/schemas/Image" + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. required: - - created - - data + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob - Image: + TranscriptionWord: type: object - description: Represents the url or the content of an image generated by the OpenAI API. properties: - b64_json: - type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - url: - type: string - description: The URL of the generated image, if `response_format` is `url` (default). - revised_prompt: + word: type: string - description: The prompt that was used to generate the image, if there was any revision to the prompt. - x-oaiMeta: - name: The image object - example: | - { - "url": "...", - "revised_prompt": "..." - } + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [ word, start, end ] - CreateImageEditRequest: + CreateTranscriptionResponseVerboseJson: type: object + description: Represents a verbose json transcription response returned by model, based on the provided input. properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + language: type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + description: The language of the input audio. + duration: type: string - example: "A cute baby sea otter wearing a beret" - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [ language, duration, text ] + x-oaiMeta: + name: The transcription object (Verbose JSON) + group: audio + example: *verbose_transcription_response_example + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string + x-oaiTypeLabel: file format: binary model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - type: string - enum: [ "dall-e-2" ] + enum: [ "whisper-1" ] x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &dalle2_images_size + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. type: string - enum: [ "256x256", "512x512", "1024x1024" ] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: *images_response_format - user: *end_user_param_configuration + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 required: - - prompt - - image + - file + - model - CreateImageVariationRequest: + # Note: This does not currently support the non-default response format types. + CreateTranslationResponseJson: type: object properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + text: type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: [ "dall-e-2" ] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: *images_n - response_format: *images_response_format - size: *dalle2_images_size - user: *end_user_param_configuration required: - - image + - text - CreateModerationRequest: + CreateTranslationResponseVerboseJson: type: object properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [ language, duration, text ] + + CreateSpeechRequest: + type: object + additionalProperties: false + properties: model: description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` anyOf: - type: string - type: string - enum: [ "text-moderation-latest", "text-moderation-stable" ] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string - required: - - input - - CreateModerationResponse: - type: object - description: Represents if a given text input is potentially harmful. - properties: - id: + input: type: string - description: The unique identifier for the moderation request. - model: + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string - description: The model used to generate the moderation results. - results: - type: array - description: A list of moderation objects. - items: - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." + default: "mp3" + type: string + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 required: - - id - model - - results - x-oaiMeta: - name: The moderation object - example: *moderation_example + - input + - voice - ListFilesResponse: - type: object + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. properties: - data: - type: array - items: - $ref: "#/components/schemas/OpenAIFile" + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. object: type: string - enum: [ list ] + description: The object type, which is always "model". + enum: [ model ] + owned_by: + type: string + description: The organization that owns the model. required: + - id - object - - data + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response - CreateFileRequest: - type: object - additionalProperties: false + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. properties: - file: - description: | - The File object (not file name) to be uploaded. + id: type: string - format: binary + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + description: The object type, which is always `file`. + enum: [ "file" ] purpose: - description: | - The intended purpose of the uploaded file. - - Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] - required: - - file - - purpose - - DeleteFileResponse: - type: object - properties: - id: + description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. + enum: + [ + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision", + ] + status: type: string - object: + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: [ "uploaded", "processed", "error" ] + status_details: type: string - enum: [ file ] - deleted: - type: boolean + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: - id - object - - deleted - - CreateUploadRequest: + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Upload: type: object - additionalProperties: false + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. filename: - description: | - The name of the file to upload. type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. purpose: - description: | - The intended purpose of the uploaded file. - - See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - enum: [ "assistants", "batch", "fine-tune", "vision" ] - bytes: - description: | - The number of bytes in the file you are uploading. + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + description: The status of the Upload. + enum: [ "pending", "completed", "cancelled", "expired" ] + expires_at: type: integer - mime_type: - description: | - The MIME type of the file. - - This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + description: The Unix timestamp (in seconds) for when the Upload was created. + object: type: string + description: The object type, which is always "upload". + enum: [ upload ] + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. required: + - bytes + - created_at + - expires_at - filename + - id - purpose - - bytes - - mime_type - - AddUploadPartRequest: + - status + - step_number + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: type: object - additionalProperties: false + title: UploadPart + description: | + The upload Part represents a chunk of bytes we can add to an Upload object. properties: - data: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: [ 'upload.part' ] + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array description: | - The chunk of bytes for this Part. + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: type: string - format: binary + description: The object type, which is always "embedding". + enum: [ embedding ] required: - - data + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } - CompleteUploadRequest: + FineTuningJob: type: object - additionalProperties: false + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. properties: - part_ids: - type: array - description: | - The ordered list of Part IDs. - items: - type: string - md5: - description: | - The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. + id: type: string - required: - - part_ids - - CancelUploadRequest: - type: object - additionalProperties: false - - CreateFineTuningJobRequest: - type: object - properties: - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). - example: "gpt-4o-mini" - anyOf: - - type: string - - type: string - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] - x-oaiTypeLabel: string - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: type: string - example: "file-abc123" + nullable: true + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. hyperparameters: type: object - description: The hyperparameters used for the fine-tuning job. + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - batch_size: - description: | - Number of examples in each batch. A larger batch size means that model parameters - are updated less frequently, but with lower variance. - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: | - Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - overfitting. - oneOf: - - type: string - enum: [ auto ] - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one full cycle - through the training dataset. oneOf: - type: string enum: [ auto ] @@ -9190,1227 +11408,1463 @@ components: minimum: 1 maximum: 50 default: auto - suffix: - description: | - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: type: string - minLength: 1 - maxLength: 40 - default: null + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: [ fine_tuning.job ] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: + type: integer nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string nullable: true - example: "file-abc123" + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). integrations: type: array - description: A list of integrations to enable for your fine-tuning job. nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 items: - type: object - required: - - type - - wandb - properties: - type: - description: | - The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - oneOf: - - type: string - enum: [ wandb ] - wandb: - type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project - properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true seed: - description: | - The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. - If a seed is not specified, one will be generated for you. + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: type: integer nullable: true - minimum: 0 - maximum: 2147483647 - example: 42 + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id - model + - object + - organization_id + - result_files + - status + - trained_tokens - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example + + FineTuningIntegration: + type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: "The type of the integration being enabled for the fine-tuning job" + enum: [ "wandb" ] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" - ListFineTuningJobEventsResponse: + FineTuningJobEvent: type: object + description: Fine-tuning job event object properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobEvent" + id: + type: string + created_at: + type: integer + level: + type: string + enum: [ "info", "warn", "error" ] + message: + type: string object: type: string - enum: [ list ] + enum: [ fine_tuning.job.event ] required: + - id - object - - data + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } - ListFineTuningJobCheckpointsResponse: + FineTuningJobCheckpoint: type: object + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobCheckpoint" - object: + id: type: string - enum: [ list ] - first_id: + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: type: string - nullable: true - last_id: + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: type: string - nullable: true - has_more: - type: boolean + description: The name of the fine-tuning job that this checkpoint was created from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [ fine_tuning.job.checkpoint ] required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics - object - - data - - has_more + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: | + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } - CreateEmbeddingRequest: + FinetuneChatRequestInput: type: object - additionalProperties: false + description: The per-line training example of a fine-tuning input file for chat models properties: - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" - oneOf: - - type: string - title: string - description: The string that will be turned into an embedding. - default: "" - example: "This is a test." - - type: array - title: array - description: The array of strings that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: string - default: "" - example: "['This is a test.']" - - type: array - title: array - description: The array of integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - title: array - description: The array of arrays containing integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - x-oaiExpandable: true - model: - description: *model_description - example: "text-embedding-3-small" - anyOf: - - type: string - - type: string - enum: - [ - "text-embedding-ada-002", - "text-embedding-3-small", - "text-embedding-3-large", - ] - x-oaiTypeLabel: string - encoding_format: - description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." - example: "float" - default: "float" - type: string - enum: [ "float", "base64" ] - dimensions: - description: | - The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. - type: integer - minimum: 1 - user: *end_user_param_configuration - required: - - model - - input + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: | + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } - CreateEmbeddingResponse: + FinetuneCompletionRequestInput: type: object + description: The per-line training example of a fine-tuning input file for completions models properties: - data: - type: array - description: The list of embeddings generated by the model. - items: - $ref: "#/components/schemas/Embedding" - model: + prompt: type: string - description: The name of the model used to generate the embedding. - object: + description: The input prompt for this training example. + completion: type: string - description: The object type, which is always "list". - enum: [ list ] - usage: - type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens - required: - - object - - model - - data - - usage + description: The desired completion for this training example. + x-oaiMeta: + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } - CreateTranscriptionRequest: + CompletionUsage: type: object - additionalProperties: false + description: Usage statistics for the completion request. properties: - file: - description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: [ "whisper-1" ] - x-oaiTypeLabel: string - language: - description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - type: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - timestamp_granularities[]: - description: | - The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. - type: array - items: - type: string - enum: - - word - - segment - default: [ segment ] + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). required: - - file - - model + - prompt_tokens + - completion_tokens + - total_tokens - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponseJson: + RunCompletionUsage: type: object - description: Represents a transcription response returned by model, based on the provided input. + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - text: - type: string - description: The transcribed text. + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - text - x-oaiMeta: - name: The transcription object (JSON) - group: audio - example: *basic_transcription_response_example + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - TranscriptionSegment: + RunStepCompletionUsage: type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. properties: - id: + completion_tokens: type: integer - description: Unique identifier of the segment. - seek: + description: Number of completion tokens used over the course of the run step. + prompt_tokens: type: integer - description: Seek offset of the segment. - start: - type: number - format: float - description: Start time of the segment in seconds. - end: - type: number - format: float - description: End time of the segment in seconds. - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - format: float - description: Temperature parameter used for generating the segment. - avg_logprob: - type: number - format: float - description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. - compression_ratio: - type: number - format: float - description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. - no_speech_prob: - type: number - format: float - description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - TranscriptionWord: - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - format: float - description: Start time of the word in seconds. - end: - type: number - format: float - description: End time of the word in seconds. - required: [ word, start, end ] + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [ auto ] + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + x-oaiExpandable: true - CreateTranscriptionResponseVerboseJson: + AssistantObject: type: object - description: Represents a verbose json transcription response returned by model, based on the provided input. + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: - language: + id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The language of the input audio. - duration: + object: + description: The object type, which is always `assistant`. type: string - description: The duration of the input audio. - text: + enum: [ assistant ] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. type: string - description: The transcribed text. - words: - type: array - description: Extracted words and their corresponding timestamps. - items: - $ref: "#/components/schemas/TranscriptionWord" - segments: + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true + model: + description: *model_description + type: string + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + default: [ ] type: array - description: Segments of the transcribed text and their corresponding details. + maxItems: 128 items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata x-oaiMeta: - name: The transcription object (Verbose JSON) - group: audio - example: *verbose_transcription_response_example + name: The assistant object + beta: true + example: *create_assistants_example - CreateTranslationRequest: + CreateAssistantRequest: type: object additionalProperties: false properties: - file: - description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 + description: *model_description + example: "gpt-4o" anyOf: - type: string - type: string - enum: [ "whisper-1" ] + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + name: + description: *assistant_name_param_description type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - default: json + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description + type: string + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [ ] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [ vector_store_ids ] + - required: [ vector_stores ] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + description: *run_temperature_description type: number - default: 0 + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - file - model - # Note: This does not currently support the non-default response format types. - CreateTranslationResponseJson: - type: object - properties: - text: - type: string - required: - - text - - CreateTranslationResponseVerboseJson: - type: object - properties: - language: - type: string - description: The language of the output translation (always `english`). - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The translated text. - segments: - type: array - description: Segments of the translated text and their corresponding details. - items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [ language, duration, text ] - - CreateSpeechRequest: + ModifyAssistantRequest: type: object additionalProperties: false properties: model: - description: | - One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + description: *model_description anyOf: - type: string - - type: string - enum: [ "tts-1", "tts-1-hd" ] - x-oaiTypeLabel: string - input: + name: + description: *assistant_name_param_description type: string - description: The text to generate audio for. The maximum length is 4096 characters. - maxLength: 4096 - voice: - description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] - response_format: - description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." - default: "mp3" + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] - speed: - description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [ ] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description type: number - default: 1.0 - minimum: 0.25 - maximum: 4.0 - required: - - model - - input - - voice - - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. - properties: - id: - type: string - description: The model identifier, which can be referenced in the API endpoints. - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: - type: string - description: The object type, which is always "model". - enum: [ model ] - owned_by: - type: string - description: The organization that owns the model. - required: - - id - - object - - created - - owned_by - x-oaiMeta: - name: The model object - example: *retrieve_model_response - - OpenAIFile: - title: OpenAIFile - description: The `File` object represents a document that has been uploaded to OpenAI. - properties: - id: - type: string - description: The file identifier, which can be referenced in the API endpoints. - bytes: - type: integer - description: The size of the file, in bytes. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - type: string - description: The object type, which is always `file`. - enum: [ "file" ] - purpose: - type: string - description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. - enum: - [ - "assistants", - "assistants_output", - "batch", - "batch_output", - "fine-tune", - "fine-tune-results", - "vision", - ] - status: + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + + DeleteAssistantResponse: + type: object + properties: + id: type: string - deprecated: true - description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: [ "uploaded", "processed", "error" ] - status_details: + deleted: + type: boolean + object: type: string - deprecated: true - description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. + enum: [ assistant.deleted ] required: - id - object - - bytes - - created_at - - filename - - purpose - - status - x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "salesOverview.pdf", - "purpose": "assistants", - } - Upload: + - deleted + + ListAssistantsResponse: type: object - title: Upload - description: | - The Upload object can accept byte chunks in the form of Parts. properties: - id: + object: type: string - description: The Upload unique identifier, which can be referenced in API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: + example: "asst_abc123" + last_id: type: string - description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. - status: + example: "asst_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example + + AssistantToolsCode: + type: object + title: Code interpreter tool + properties: + type: type: string - description: The status of the Upload. - enum: [ "pending", "completed", "cancelled", "expired" ] - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - object: + description: "The type of tool being defined: `code_interpreter`" + enum: [ "code_interpreter" ] + required: + - type + + AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: type: string - description: The object type, which is always "upload". - enum: [ upload ] - file: - $ref: "#/components/schemas/OpenAIFile" - nullable: true - description: The ready File object after the Upload is completed. + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" required: - - bytes - - created_at - - expires_at - - filename - - id - - purpose - - status - - step_number - x-oaiMeta: - name: The upload object - example: | - { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - "status": "completed", - "expires_at": 1719127296, - "file": { - "id": "file-xyz321", - "object": "file", - "bytes": 2147483648, - "created_at": 1719186911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", - } - } - UploadPart: + - type + + FileSearchRankingOptions: + title: File search tool call ranking options type: object - title: UploadPart description: | - The upload Part represents a chunk of bytes we can add to an Upload object. + The ranking options for the file search. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. properties: - id: + ranker: type: string - description: The upload Part unique identifier, which can be referenced in API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + + AssistantToolsFileSearchTypeOnly: + type: object + title: FileSearch tool + properties: + type: type: string - description: The ID of the Upload object that this Part was added to. - object: + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + required: + - type + + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: type: string - description: The object type, which is always `upload.part`. - enum: [ 'upload.part' ] + description: "The type of tool being defined: `function`" + enum: [ "function" ] + function: + $ref: "#/components/schemas/FunctionObject" required: - - created_at - - id - - object - - upload_id - x-oaiMeta: - name: The upload part object - example: | - { - "id": "part_def456", - "object": "upload.part", - "created_at": 1719186911, - "upload_id": "upload_abc123" - } - Embedding: + - type + - function + + TruncationObject: type: object - description: | - Represents an embedding vector returned by embedding endpoint. + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: - type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). - items: - type: number - object: + type: type: string - description: The object type, which is always "embedding". - enum: [ embedding ] + description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. + enum: [ "auto", "last_messages" ] + last_messages: + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + minimum: 1 + nullable: true required: - - index - - object - - embedding - x-oaiMeta: - name: The embedding object - example: | - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } + - type + + AssistantsApiToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tools and instead generates a message. + `auto` is the default value and means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - FineTuningJob: + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + enum: [ none, auto, required ] + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + + AssistantsNamedToolChoice: type: object - title: FineTuningJob - description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + description: Specifies a tool the model should use. Use to force the model to call a specific tool. properties: - id: + type: type: string - description: The object identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: + enum: [ "function", "code_interpreter", "file_search" ] + description: The type of the tool. If type is `function`, the function name must be set + function: type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: + name: type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. - nullable: true - required: - - code - - message - - param - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - finished_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - properties: - n_epochs: - oneOf: - - type: string - enum: [ auto ] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + description: The name of the function to call. required: - - n_epochs - model: + - name + required: + - type + + RunObject: + type: object + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The base model that is being fine-tuned. object: + description: The object type, which is always `thread.run`. type: string - description: The object type, which is always "fine_tuning.job". - enum: [ fine_tuning.job ] - organization_id: + enum: [ "thread.run" ] + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). - items: - type: string - example: file-abc123 status: + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. enum: [ - "validating_files", "queued", - "running", - "succeeded", - "failed", + "in_progress", + "requires_action", + "cancelling", "cancelled", + "failed", + "completed", + "incomplete", + "expired", ] - trained_tokens: - type: integer - nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - training_file: - type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: - type: string - nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - integrations: - type: array - nullable: true - description: A list of integrations to enable for this fine-tuning job. - maxItems: 5 - items: - oneOf: - - $ref: "#/components/schemas/FineTuningIntegration" - x-oaiExpandable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed - x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example - - FineTuningIntegration: - type: object - title: Fine-Tuning Job Integration - required: - - type - - wandb - properties: - type: - type: string - description: "The type of the integration being enabled for the fine-tuning job" - enum: [ "wandb" ] - wandb: + required_action: type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true properties: - project: - description: | - The name of the project that the new run will be created under. + type: + description: For now, this is always `submit_tool_outputs`. type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true + enum: [ "submit_tool_outputs" ] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + properties: + code: type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + message: type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - - FineTuningJobEvent: - type: object - description: Fine-tuning job event object - properties: - id: - type: string - created_at: + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. type: integer - level: - type: string - enum: [ "info", "warn", "error" ] - message: - type: string - object: - type: string - enum: [ fine_tuning.job.event ] - required: - - id - - object - - created_at - - level - - message - x-oaiMeta: - name: The fine-tuning job event object - example: | - { - "object": "fine_tuning.job.event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" - } - - FineTuningJobCheckpoint: - type: object - title: FineTuningJobCheckpoint - description: | - The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. - properties: - id: - type: string - description: The checkpoint identifier, which can be referenced in the API endpoints. - created_at: + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: - type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. type: integer - description: The step number that the checkpoint was created at. - metrics: + nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. type: object - description: Metrics at the step number during the fine-tuning job. + nullable: true properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - fine_tuning_job_id: + reason: + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + type: string + enum: [ "max_completion_tokens", "max_prompt_tokens" ] + model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [ fine_tuning.job.checkpoint ] + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [ ] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens specified to have been used over the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens specified to have been used over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - id - - metrics - object - - step_number + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format x-oaiMeta: - name: The fine-tuning job checkpoint object + name: The run object + beta: true example: | { - "object": "fine_tuning.job.checkpoint", - "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", - "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", - "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", - "metrics": { - "step": 88, - "train_loss": 0.478, - "train_mean_token_accuracy": 0.924, - "valid_loss": 10.112, - "valid_mean_token_accuracy": 0.145, - "full_valid_loss": 0.567, - "full_valid_mean_token_accuracy": 0.944 + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 }, - "step_number": 88 + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } - - FinetuneChatRequestInput: + CreateRunRequest: type: object - description: The per-line training example of a fine-tuning input file for chat models + additionalProperties: false properties: - messages: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. type: array - minItems: 1 items: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - description: A list of tools the model may generate JSON inputs for. + maxItems: 20 items: - $ref: "#/components/schemas/ChatCompletionTool" + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true parallel_tool_calls: $ref: "#/components/schemas/ParallelToolCalls" - functions: - deprecated: true - description: - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - x-oaiMeta: - name: Training format for chat models - example: | - { - "messages": [ - { "role": "user", "content": "What is the weather in San Francisco?" }, - { - "role": "assistant", - "tool_calls": [ - { - "id": "call_id", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" - } - } - ] - } - ], - "parallel_tool_calls": false, - "tools": [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and country, eg. San Francisco, USA" - }, - "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } - }, - "required": ["location", "format"] - } - } - } - ] - } - - FinetuneCompletionRequestInput: + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - thread_id + - assistant_id + ListRunsResponse: type: object - description: The per-line training example of a fine-tuning input file for completions models properties: - prompt: + object: type: string - description: The input prompt for this training example. - completion: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: type: string - description: The desired completion for this training example. - x-oaiMeta: - name: Training format for completions models - example: | - { - "prompt": "What is the answer to 2+2", - "completion": "4" - } - - CompletionUsage: - type: object - description: Usage statistics for the completion request. - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false required: - - prompt_tokens - - completion_tokens - - total_tokens - - RunCompletionUsage: + - object + - data + - first_id + - last_id + - has_more + ModifyRunRequest: type: object - description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - RunStepCompletionUsage: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: type: object - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - AssistantsApiResponseFormatOption: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - type: string - description: > - `auto` is the default value - enum: [ auto ] - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - x-oaiExpandable: true + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + required: + - tool_outputs - AssistantObject: + RunToolCallObject: type: object - title: Assistant - description: Represents an `assistant` that can call the model and use tools. + description: Tool call objects properties: id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `assistant`. type: string - enum: [ assistant ] - created_at: - description: The Unix timestamp (in seconds) for when the assistant was created. - type: integer - name: - description: &assistant_name_param_description | - The name of the assistant. The maximum length is 256 characters. + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: type: string - maxLength: 256 - nullable: true - description: - description: &assistant_description_param_description | - The description of the assistant. The maximum length is 512 characters. + description: The type of tool call the output is required for. For now, this is always `function`. + enum: [ "function" ] + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function + + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string - maxLength: 512 - nullable: true + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. model: - description: *model_description - type: string + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true instructions: - description: &assistant_instructions_param_description | - The system instructions that the assistant uses. The maximum length is 256,000 characters. + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. type: string - maxLength: 256000 nullable: true tools: - description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [ ] + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - maxItems: 128 + maxItems: 20 items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - $ref: "#/components/schemas/AssistantToolsFileSearch" - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true tool_resources: type: object description: | @@ -10422,7 +12876,7 @@ components: file_ids: type: array description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [ ] maxItems: 20 items: @@ -10439,20 +12893,18 @@ components: type: string nullable: true metadata: - description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + description: *metadata_description type: object x-oaiTypeLabel: map nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 default: 1 example: 1 nullable: true + description: *run_temperature_description top_p: type: number minimum: 0 @@ -10460,96 +12912,116 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true required: - - id - - object - - created_at - - name - - description - - model - - instructions - - tools - - metadata - x-oaiMeta: - name: The assistant object - beta: true - example: *create_assistants_example + - thread_id + - assistant_id - CreateAssistantRequest: + ThreadObject: type: object - additionalProperties: false + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). properties: - model: - description: *model_description - example: "gpt-4o" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - name: - description: *assistant_name_param_description + id: + description: The identifier, which can be referenced in API endpoints. type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description + object: + description: The object type, which is always `thread`. type: string + enum: [ "thread" ] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description - type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [ ] + required: + - id + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + + CreateThreadRequest: + type: object + additionalProperties: false + properties: + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. type: array - maxItems: 128 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true + $ref: "#/components/schemas/CreateMessageRequest" tool_resources: type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -10568,14 +13040,14 @@ components: vector_store_ids: type: array description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: string vector_stores: type: array description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: object @@ -10638,6 +13110,7 @@ components: description: | Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + x-oaiExpandable: true oneOf: - required: [ vector_store_ids ] - required: [ vector_stores ] @@ -10647,66 +13120,15 @@ components: type: object x-oaiTypeLabel: map nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - required: - - model - ModifyAssistantRequest: + ModifyThreadRequest: type: object additionalProperties: false properties: - model: - description: *model_description - anyOf: - - type: string - name: - description: *assistant_name_param_description - type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description - type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description - type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [ ] - type: array - maxItems: 128 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true tool_resources: type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -10714,7 +13136,7 @@ components: file_ids: type: array description: | - Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [ ] maxItems: 20 items: @@ -10725,7 +13147,7 @@ components: vector_store_ids: type: array description: | - Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: string @@ -10735,27 +13157,8 @@ components: type: object x-oaiTypeLabel: map nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - DeleteAssistantResponse: + DeleteThreadResponse: type: object properties: id: @@ -10764,14 +13167,13 @@ components: type: boolean object: type: string - enum: [ assistant.deleted ] + enum: [ thread.deleted ] required: - id - object - deleted - ListAssistantsResponse: - type: object + ListThreadsResponse: properties: object: type: string @@ -10779,505 +13181,292 @@ components: data: type: array items: - $ref: "#/components/schemas/AssistantObject" - first_id: - type: string - example: "asst_abc123" - last_id: - type: string - example: "asst_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - x-oaiMeta: - name: List assistants response object - group: chat - example: *list_assistants_example - - AssistantToolsCode: - type: object - title: Code interpreter tool - properties: - type: - type: string - description: "The type of tool being defined: `code_interpreter`" - enum: [ "code_interpreter" ] - required: - - type - - AssistantToolsFileSearch: - type: object - title: FileSearch tool - properties: - type: - type: string - description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] - file_search: - type: object - description: Overrides for the file search tool. - properties: - max_num_results: - type: integer - minimum: 1 - maximum: 50 - description: | - The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - ranking_options: - $ref: "#/components/schemas/FileSearchRankingOptions" - required: - - type - - FileSearchRankingOptions: - title: File search tool call ranking options - type: object - description: | - The ranking options for the file search. - - See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. - properties: - ranker: - type: string - description: The ranker to use for the file search. If not specified will use the `auto` ranker. - enum: [ "auto", "default_2024_08_21" ] - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - - AssistantToolsFileSearchTypeOnly: - type: object - title: FileSearch tool - properties: - type: - type: string - description: "The type of tool being defined: `file_search`" - enum: [ "file_search" ] - required: - - type - - AssistantToolsFunction: - type: object - title: Function tool - properties: - type: - type: string - description: "The type of tool being defined: `function`" - enum: [ "function" ] - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function - - TruncationObject: - type: object - title: Thread Truncation Controls - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - properties: - type: - type: string - description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: [ "auto", "last_messages" ] - last_messages: - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - minimum: 1 - nullable: true - required: - - type - - AssistantsApiToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tools and instead generates a message. - `auto` is the default value and means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - oneOf: - - type: string - description: > - `none` means the model will not call any tools and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - enum: [ none, auto, required ] - - $ref: "#/components/schemas/AssistantsNamedToolChoice" - x-oaiExpandable: true - - AssistantsNamedToolChoice: - type: object - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - properties: - type: + $ref: "#/components/schemas/ThreadObject" + first_id: type: string - enum: [ "function", "code_interpreter", "file_search" ] - description: The type of the tool. If type is `function`, the function name must be set - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - type + - object + - data + - first_id + - last_id + - has_more - RunObject: + MessageObject: type: object - title: A run on a thread - description: Represents an execution run on a [thread](/docs/api-reference/threads). + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: id: description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run`. + description: The object type, which is always `thread.message`. type: string - enum: [ "thread.run" ] + enum: [ "thread.message" ] created_at: - description: The Unix timestamp (in seconds) for when the run was created. + description: The Unix timestamp (in seconds) for when the message was created. type: integer thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. - type: string - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string status: - description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string - enum: - [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "incomplete", - "expired", - ] - required_action: - type: object - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - properties: - type: - description: For now, this is always `submit_tool_outputs`. - type: string - enum: [ "submit_tool_outputs" ] - submit_tool_outputs: - type: object - description: Details on the tool outputs needed for this run to continue. - properties: - tool_calls: - type: array - description: A list of the relevant tool calls. - items: - $ref: "#/components/schemas/RunToolCallObject" - required: - - tool_calls - required: - - type - - submit_tool_outputs - last_error: - type: object - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - properties: - code: - type: string - description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: - [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - expires_at: - description: The Unix timestamp (in seconds) for when the run will expire. - type: integer - nullable: true - started_at: - description: The Unix timestamp (in seconds) for when the run was started. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run failed. - type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run was completed. - type: integer - nullable: true + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - type: object - nullable: true - properties: - reason: - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - type: string - enum: [ "max_completion_tokens", "max_prompt_tokens" ] - model: - description: The model that the [assistant](/docs/api-reference/assistants) used for this run. - type: string - instructions: - description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. - type: string - tools: - description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [ ] - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - metadata: - description: *metadata_description + description: On an incomplete message, details about why the message is incomplete. type: object - x-oaiTypeLabel: map - nullable: true - usage: - $ref: "#/components/schemas/RunCompletionUsage" - temperature: - description: The sampling temperature used for this run. If not set, defaults to 1. - type: number - nullable: true - top_p: - description: The nucleus sampling value used for this run. If not set, defaults to 1. - type: number - nullable: true - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens specified to have been used over the course of the run. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens specified to have been used over the course of the run. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - required: - - id - - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format - x-oaiMeta: - name: The run object - beta: true - example: | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1698107661, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "completed", - "started_at": 1699073476, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699073498, - "last_error": null, - "model": "gpt-4o", - "instructions": null, - "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], - "metadata": {}, - "incomplete_details": null, - "usage": { - "prompt_tokens": 123, - "completion_tokens": 456, - "total_tokens": 579 - }, - "temperature": 1.0, - "top_p": 1.0, - "max_prompt_tokens": 1000, - "max_completion_tokens": 1000, - "truncation_strategy": { - "type": "auto", - "last_messages": null - }, - "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true - } - CreateRunRequest: - type: object - additionalProperties: false - properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4o" - anyOf: - - type: string - - type: string + properties: + reason: + type: string + description: The reason the message is incomplete. enum: [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", + "content_filter", + "max_tokens", + "run_cancelled", + "run_expired", + "run_failed", ] - x-oaiTypeLabel: string nullable: true - instructions: - description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. - type: string + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer nullable: true - additional_instructions: - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. - type: string + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + type: integer nullable: true - additional_messages: - description: Adds additional messages to the thread before creating the run. + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: [ "user", "assistant" ] + content: + description: The content of the message in array of text and/or images. type: array items: - $ref: "#/components/schemas/CreateMessageRequest" + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + type: string nullable: true + attachments: type: array - maxItems: 20 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were added to. + nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true + required: + - id + - object + - created_at + - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} + } + + MessageDeltaObject: + type: object + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: [ "thread.message.delta" ] + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: [ "user", "assistant" ] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: + type: string + enum: [ "user", "assistant" ] description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. + content: + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 + x-oaiExpandable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should be added to. + required: + - file_id + - tools nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + + ModifyMessageRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - required: - - thread_id - - assistant_id - ListRunsResponse: + + DeleteMessageResponse: type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [ thread.message.deleted ] + required: + - id + - object + - deleted + + ListMessagesResponse: properties: object: type: string @@ -11285,13 +13474,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageObject" first_id: type: string - example: "run_abc123" + example: "msg_abc123" last_id: type: string - example: "run_abc456" + example: "msg_abc123" has_more: type: boolean example: false @@ -11301,729 +13490,505 @@ components: - first_id - last_id - has_more - ModifyRunRequest: - type: object - additionalProperties: false - properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - SubmitToolOutputsRunRequest: - type: object - additionalProperties: false - properties: - tool_outputs: - description: A list of tools for which the outputs are being submitted. - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - required: - - tool_outputs - RunToolCallObject: + MessageContentImageFileObject: + title: Image file type: object - description: Tool call objects + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - type: string - description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. type: + description: Always `image_file`. type: string - description: The type of tool call the output is required for. For now, this is always `function`. - enum: [ "function" ] - function: + enum: [ "image_file" ] + image_file: type: object - description: The function definition. properties: - name: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. type: string - description: The name of the function. - arguments: + detail: type: string - description: The arguments that the model expects you to pass to the function. - required: - - name - - arguments - required: - - id - - type - - function - - CreateThreadAndRunRequest: - type: object - additionalProperties: false - properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string - thread: - $ref: "#/components/schemas/CreateThreadRequest" - description: If no thread is provided, an empty thread will be created. - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4o" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. - type: string - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - file_id required: - - thread_id - - assistant_id + - type + - image_file - ThreadObject: + MessageDeltaContentImageFileObject: + title: Image file type: object - title: Thread - description: Represents a thread that contains [messages](/docs/api-reference/messages). + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread`. - type: string - enum: [ "thread" ] - created_at: - description: The Unix timestamp (in seconds) for when the thread was created. + index: type: integer - tool_resources: + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: [ "image_file" ] + image_file: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" required: - - id - - object - - created_at - - tool_resources - - metadata - x-oaiMeta: - name: The thread object - beta: true - example: | - { - "id": "thread_abc123", - "object": "thread", - "created_at": 1698107661, - "metadata": {} - } + - index + - type - CreateThreadRequest: + MessageContentImageUrlObject: + title: Image URL type: object - additionalProperties: false + description: References an image URL in the content of a message. properties: - messages: - description: A list of [messages](/docs/api-reference/messages) to start the thread with. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - tool_resources: + type: + type: string + enum: [ "image_url" ] + description: The type of the content part. + image_url: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - chunking_strategy: - # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: [ "auto" ] - required: - - type - - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - enum: [ "static" ] - static: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - required: - - type - - static - x-oaiExpandable: true - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - x-oaiExpandable: true - oneOf: - - required: [ vector_store_ids ] - - required: [ vector_stores ] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + url: + type: string + description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - url + required: + - type + - image_url - ModifyThreadRequest: + MessageDeltaContentImageUrlObject: + title: Image URL type: object - additionalProperties: false + description: References an image URL in the content of a message. properties: - tool_resources: - type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [ ] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_url`. + type: string + enum: [ "image_url" ] + image_url: type: object - x-oaiTypeLabel: map - nullable: true + properties: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + type: string + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - index + - type - DeleteThreadResponse: + MessageContentTextObject: + title: Text type: object + description: The text content that is part of a message. properties: - id: - type: string - deleted: - type: boolean - object: + type: + description: Always `text`. type: string - enum: [ thread.deleted ] + enum: [ "text" ] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations required: - - id - - object - - deleted + - type + - text - ListThreadsResponse: + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/ThreadObject" - first_id: + type: + description: Always `refusal`. type: string - example: "asst_abc123" - last_id: + enum: [ "refusal" ] + refusal: type: string - example: "asst_abc456" - has_more: - type: boolean - example: false + nullable: false required: - - object - - data - - first_id - - last_id - - has_more + - type + - refusal - MessageObject: + MessageRequestContentTextObject: + title: Text type: object - title: The message object - description: Represents a message within a [thread](/docs/api-reference/threads). + description: The text content that is part of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. + type: + description: Always `text`. type: string - object: - description: The object type, which is always `thread.message`. + enum: [ "text" ] + text: type: string - enum: [ "thread.message" ] - created_at: - description: The Unix timestamp (in seconds) for when the message was created. - type: integer - thread_id: - description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + description: Text content to be sent to the model + required: + - type + - text + + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. type: string - status: - description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + enum: [ "file_citation" ] + text: + description: The text in the message content that needs to be replaced. type: string - enum: [ "in_progress", "incomplete", "completed" ] - incomplete_details: - description: On an incomplete message, details about why the message is incomplete. + file_citation: type: object properties: - reason: + file_id: + description: The ID of the specific File the citation is from. type: string - description: The reason the message is incomplete. - enum: - [ - "content_filter", - "max_tokens", - "run_cancelled", - "run_expired", - "run_failed", - ] - nullable: true required: - - reason - completed_at: - description: The Unix timestamp (in seconds) for when the message was completed. + - file_id + start_index: type: integer - nullable: true - incomplete_at: - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + minimum: 0 + end_index: type: integer - nullable: true - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: [ "user", "assistant" ] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageContentTextObject" - - $ref: "#/components/schemas/MessageContentRefusalObject" - x-oaiExpandable: true - assistant_id: - description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. type: string - nullable: true - run_id: - description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + enum: [ "file_path" ] + text: + description: The text in the message content that needs to be replaced. type: string - nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they were added to. - nullable: true - metadata: - description: *metadata_description + file_path: type: object - x-oaiTypeLabel: map - nullable: true + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: - - id - - object - - created_at - - thread_id - - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - - metadata - x-oaiMeta: - name: The message object - beta: true - example: | - { - "id": "msg_abc123", - "object": "thread.message", - "created_at": 1698983503, - "thread_id": "thread_abc123", - "role": "assistant", - "content": [ - { - "type": "text", - "text": { - "value": "Hi! How can I help you today?", - "annotations": [] - } - } - ], - "assistant_id": "asst_abc123", - "run_id": "run_abc123", - "attachments": [], - "metadata": {} - } + - type + - text + - file_path + - start_index + - end_index - MessageDeltaObject: + MessageDeltaContentTextObject: + title: Text type: object - title: Message delta object - description: | - Represents a message delta i.e. any changed fields on a message during streaming. + description: The text content that is part of a message. properties: - id: - description: The identifier of the message, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.message.delta`. + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. type: string - enum: [ "thread.message.delta" ] - delta: - description: The delta containing the fields that have changed on the Message. + enum: [ "text" ] + text: type: object properties: - role: - description: The entity that produced the message. One of `user` or `assistant`. + value: + description: The data that makes up the text. type: string - enum: [ "user", "assistant" ] - content: - description: The content of the message in array of text and/or images. + annotations: type: array items: oneOf: - - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - - $ref: "#/components/schemas/MessageDeltaContentTextObject" - - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" - - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" x-oaiExpandable: true required: - - id - - object - - delta - x-oaiMeta: - name: The message delta object - beta: true - example: | - { - "id": "msg_123", - "object": "thread.message.delta", - "delta": { - "content": [ - { - "index": 0, - "type": "text", - "text": { "value": "Hello", "annotations": [] } - } - ] - } - } + - index + - type - CreateMessageRequest: + MessageDeltaContentRefusalObject: + title: Refusal type: object - additionalProperties: false + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. + type: string + enum: [ "refusal" ] + refusal: + type: string required: - - role - - content + - index + - type + + + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - role: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. type: string - enum: [ "user", "assistant" ] - description: | - The role of the entity that is creating the message. Allowed values include: - - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. - content: + enum: [ "file_citation" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: [ "file_path" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: [ "thread.run.step" ] + created_at: + description: The Unix timestamp (in seconds) for when the run step was created. + type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: string + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. + type: string + enum: [ "message_creation", "tool_calls" ] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] + step_details: + type: object + description: The details of the run step. oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). - title: Array of content parts - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageRequestContentTextObject" - x-oaiExpandable: true - minItems: 1 + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" x-oaiExpandable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they should be added to. + last_error: + type: object + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: [ "server_error", "rate_limit_exceeded" ] + message: + type: string + description: A human-readable description of the error. required: - - file_id - - tools + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer nullable: true - - ModifyMessageRequest: - type: object - additionalProperties: false - properties: metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + x-oaiMeta: + name: The run step object + beta: true + example: *run_step_object_example - DeleteMessageResponse: + RunStepDeltaObject: type: object + title: Run step delta object + description: | + Represents a run step delta i.e. any changed fields on a run step during streaming. properties: id: + description: The identifier of the run step, which can be referenced in API endpoints. type: string - deleted: - type: boolean object: + description: The object type, which is always `thread.run.step.delta`. type: string - enum: [ thread.message.deleted ] + enum: [ "thread.run.step.delta" ] + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true required: - id - object - - deleted + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } - ListMessagesResponse: + ListRunStepsResponse: properties: object: type: string @@ -12031,13 +13996,13 @@ components: data: type: array items: - $ref: "#/components/schemas/MessageObject" + $ref: "#/components/schemas/RunStepObject" first_id: type: string - example: "msg_abc123" + example: "step_abc123" last_id: type: string - example: "msg_abc123" + example: "step_abc456" has_more: type: boolean example: false @@ -12048,504 +14013,784 @@ components: - last_id - has_more - MessageContentImageFileObject: - title: Image file + RunStepDetailsMessageCreationObject: + title: Message creation type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Details of the message creation by the run step. properties: type: - description: Always `image_file`. + description: Always `message_creation`. type: string - enum: [ "image_file" ] - image_file: + enum: [ "message_creation" ] + message_creation: type: object properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + message_id: type: string - detail: + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: [ "message_creation" ] + message_creation: + type: object + properties: + message_id: type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" + description: The ID of the message that was created by this run step. + required: + - type + + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: [ "tool_calls" ] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: [ "tool_calls" ] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: [ "code_interpreter" ] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. required: - - file_id + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true required: + - id - type - - image_file + - code_interpreter - MessageDeltaContentImageFileObject: - title: Image file + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: [ "code_interpreter" ] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: [ "logs" ] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Text output from the Code Interpreter tool call as part of a run step. properties: index: type: integer - description: The index of the content part in the message. + description: The index of the output in the outputs array. type: - description: Always `image_file`. + description: Always `logs`. type: string - enum: [ "image_file" ] - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" + enum: [ "logs" ] + logs: + type: string + description: The text output from the Code Interpreter tool call. required: - index - type - MessageContentImageUrlObject: - title: Image URL + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output type: object - description: References an image URL in the content of a message. properties: type: + description: Always `image`. type: string - enum: [ "image_url" ] - description: The type of the content part. - image_url: + enum: [ "image" ] + image: type: object properties: - url: - type: string - description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - format: uri - detail: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: [ "auto", "low", "high" ] - default: "auto" required: - - url + - file_id required: - type - - image_url + - image - MessageDeltaContentImageUrlObject: - title: Image URL + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output type: object - description: References an image URL in the content of a message. properties: index: type: integer - description: The index of the content part in the message. + description: The index of the output in the outputs array. type: - description: Always `image_url`. + description: Always `image`. type: string - enum: [ "image_url" ] - image_url: + enum: [ "image" ] + image: type: object properties: - url: - description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - type: string - detail: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: [ "auto", "low", "high" ] - default: "auto" required: - index - type - MessageContentTextObject: - title: Text + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call type: object - description: The text content that is part of a message. properties: + id: + type: string + description: The ID of the tool call object. type: - description: Always `text`. type: string - enum: [ "text" ] - text: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: [ "file_search" ] + file_search: type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map properties: - value: - description: The data that makes up the text. - type: string - annotations: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: type: array + description: The results of the file search. items: - oneOf: - - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" - x-oaiExpandable: true - required: - - value - - annotations + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" required: + - id - type - - text + - file_search - MessageContentRefusalObject: - title: Refusal + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options type: object - description: The refusal content generated by the assistant. + description: The ranking options for the file search. properties: - type: - description: Always `refusal`. - type: string - enum: [ "refusal" ] - refusal: + ranker: type: string - nullable: false + description: The ranker used for the file search. + enum: [ "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 required: - - type - - refusal + - ranker + - score_threshold - MessageRequestContentTextObject: - title: Text + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result type: object - description: The text content that is part of a message. + description: A result instance of the file search. + x-oaiTypeLabel: map properties: - type: - description: Always `text`. + file_id: type: string - enum: [ "text" ] - text: + description: The ID of the file that result was found in. + file_name: type: string - description: Text content to be sent to the model + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only included if requested via the include query parameter. + items: + type: object + properties: + type: + type: string + description: The type of the content. + enum: [ "text" ] + text: + type: string + description: The text content of the file. required: - - type - - text + - file_id + - file_name + - score - MessageContentTextAnnotationsFileCitationObject: - title: File citation + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - type: - description: Always `file_citation`. + index: + type: integer + description: The index of the tool call in the tool calls array. + id: type: string - enum: [ "file_citation" ] - text: - description: The text in the message content that needs to be replaced. + description: The ID of the tool call object. + type: type: string - file_citation: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: [ "file_search" ] + file_search: type: object - properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map required: + - index - type - - text - - file_citation - - start_index - - end_index + - file_search - MessageContentTextAnnotationsFilePathObject: - title: File path + RunStepDetailsToolCallsFunctionObject: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + title: Function tool call properties: - type: - description: Always `file_path`. + id: type: string - enum: [ "file_path" ] - text: - description: The text in the message content that needs to be replaced. + description: The ID of the tool call object. + type: type: string - file_path: + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: [ "function" ] + function: type: object + description: The definition of the function that was called. properties: - file_id: - description: The ID of the file that was generated. + name: + type: string + description: The name of the function. + arguments: type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + - name + - arguments + - output required: + - id - type - - text - - file_path - - start_index - - end_index - - MessageDeltaContentTextObject: - title: Text + - function + + RunStepDeltaStepDetailsToolCallsFunctionObject: type: object - description: The text content that is part of a message. + title: Function tool call properties: index: type: integer - description: The index of the content part in the message. + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. type: - description: Always `text`. type: string - enum: [ "text" ] - text: + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: [ "function" ] + function: type: object + description: The definition of the function that was called. properties: - value: - description: The data that makes up the text. + name: type: string - annotations: - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" - x-oaiExpandable: true + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - index - type - MessageDeltaContentRefusalObject: - title: Refusal + VectorStoreExpirationAfter: type: object - description: The refusal content that is part of a message. + title: Vector store expiration policy + description: The expiration policy for a vector store. properties: - index: - type: integer - description: The index of the refusal part in the message. - type: - description: Always `refusal`. - type: string - enum: [ "refusal" ] - refusal: + anchor: + description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string + enum: [ "last_active_at" ] + days: + description: The number of days after the anchor time that the vector store will expire. + type: integer + minimum: 1 + maximum: 365 required: - - index - - type - + - anchor + - days - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + VectorStoreObject: type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + title: Vector store + description: A vector store is a collection of processed files can be used by the `file_search` tool. properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - description: Always `file_citation`. + id: + description: The identifier, which can be referenced in API endpoints. type: string - enum: [ "file_citation" ] - text: - description: The text in the message content that needs to be replaced. + object: + description: The object type, which is always `vector_store`. type: string - file_citation: + enum: [ "vector_store" ] + created_at: + description: The Unix timestamp (in seconds) for when the vector store was created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: type: object properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - quote: - description: The specific quote in the file. - type: string - start_index: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + type: string + enum: [ "expired", "in_progress", "completed" ] + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will expire. type: integer - minimum: 0 - end_index: + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last active. type: integer - minimum: 0 + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - index - - type + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + CreateVectorStoreRequest: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + additionalProperties: false properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - description: Always `file_path`. + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. type: string - enum: [ "file_path" ] - text: - description: The text in the message content that needs to be replaced. + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + ListVectorStoresResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: "vs_abc123" + last_id: + type: string + example: "vs_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + DeleteVectorStoreResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: type: string - file_path: - type: object - properties: - file_id: - description: The ID of the file that was generated. - type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + enum: [ vector_store.deleted ] required: - - index - - type + - id + - object + - deleted - RunStepObject: + VectorStoreFileObject: type: object - title: Run steps - description: | - Represents a step in execution of a run. + title: Vector store files + description: A list of files attached to a vector store. properties: id: - description: The identifier of the run step, which can be referenced in API endpoints. + description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run.step`. + description: The object type, which is always `vector_store.file`. type: string - enum: [ "thread.run.step" ] + enum: [ "vector_store.file" ] + usage_bytes: + description: The total vector store usage in bytes. Note that this may be different from the original file size. + type: integer created_at: - description: The Unix timestamp (in seconds) for when the run step was created. + description: The Unix timestamp (in seconds) for when the vector store file was created. type: integer - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. - type: string - thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was run. - type: string - run_id: - description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. - type: string - type: - description: The type of run step, which can be either `message_creation` or `tool_calls`. + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. type: string - enum: [ "message_creation", "tool_calls" ] status: - description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string - enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" - x-oaiExpandable: true + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: type: object - description: The last error associated with this run step. Will be `null` if there are no errors. + description: The last error associated with this vector store file. Will be `null` if there are no errors. nullable: true properties: code: type: string description: One of `server_error` or `rate_limit_exceeded`. - enum: [ "server_error", "rate_limit_exceeded" ] + enum: + [ + "server_error", + "unsupported_file", + "invalid_file", + ] message: type: string description: A human-readable description of the error. required: - code - message - expired_at: - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run step was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run step failed. - type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run step completed. - type: integer - nullable: true - metadata: - description: *metadata_description + chunking_strategy: type: object - x-oaiTypeLabel: map - nullable: true - usage: - $ref: "#/components/schemas/RunStepCompletionUsage" + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true required: - id - object + - usage_bytes - created_at - - assistant_id - - thread_id - - run_id - - type + - vector_store_id - status - - step_details - last_error - - expired_at - - cancelled_at - - failed_at - - completed_at - - metadata - - usage x-oaiMeta: - name: The run step object + name: The vector store file object beta: true - example: *run_step_object_example + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } - RunStepDeltaObject: + OtherChunkingStrategyResponseParam: type: object - title: Run step delta object - description: | - Represents a run step delta i.e. any changed fields on a run step during streaming. + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false properties: - id: - description: The identifier of the run step, which can be referenced in API endpoints. + type: type: string - object: - description: The object type, which is always `thread.run.step.delta`. + description: Always `other`. + enum: [ "other" ] + required: + - type + + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: type: string - enum: [ "thread.run.step.delta" ] - delta: - description: The delta containing the fields that have changed on the run step. - type: object - properties: - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" - x-oaiExpandable: true + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" required: - - id - - object - - delta - x-oaiMeta: - name: The run step delta object - beta: true - example: | - { - "id": "step_123", - "object": "thread.run.step.delta", - "delta": { - "step_details": { - "type": "tool_calls", - "tool_calls": [ - { - "index": 0, - "id": "call_123", - "type": "code_interpreter", - "code_interpreter": { "input": "", "outputs": [] } - } - ] - } - } - } + - type + - static + + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + + CreateVectorStoreFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_id - ListRunStepsResponse: + ListVectorStoreFilesResponse: properties: object: type: string @@ -12553,13 +14798,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunStepObject" + $ref: "#/components/schemas/VectorStoreFileObject" first_id: type: string - example: "step_abc123" + example: "file-abc123" last_id: type: string - example: "step_abc456" + example: "file-abc456" has_more: type: boolean example: false @@ -12570,801 +14815,1250 @@ components: - last_id - has_more - RunStepDetailsMessageCreationObject: - title: Message creation + DeleteVectorStoreFileResponse: type: object - description: Details of the message creation by the run step. properties: - type: - description: Always `message_creation`. + id: type: string - enum: [ "message_creation" ] - message_creation: + deleted: + type: boolean + object: + type: string + enum: [ vector_store.file.deleted ] + required: + - id + - object + - deleted + + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: [ "vector_store.files_batch" ] + created_at: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: [ "in_progress", "completed", "cancelled", "failed" ] + file_counts: type: object properties: - message_id: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } + + CreateVectorStoreFileBatchRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_ids + + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. + + Each event in a server-sent events stream has an `event` and `data` property: + + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + + ThreadStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.created" ] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.run.created" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.queued" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.in_progress" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.requires_action" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.completed" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.failed" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: type: string - description: The ID of the message that was created by this run step. + enum: [ "thread.run.cancelling" ] + data: + $ref: "#/components/schemas/RunObject" required: - - message_id - required: - - type - - message_creation - - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation - type: object - description: Details of the message creation by the run step. - properties: - type: - description: Always `message_creation`. - type: string - enum: [ "message_creation" ] - message_creation: - type: object + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - message_id: + event: type: string - description: The ID of the message that was created by this run step. - required: - - type - - RunStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: [ "tool_calls" ] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - tool_calls - - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: [ "tool_calls" ] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call - type: object - description: Details of the Code Interpreter tool call the run step was involved in. - properties: - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] - code_interpreter: - type: object - description: The Code Interpreter tool call definition. + enum: [ "thread.run.cancelled" ] + data: + $ref: "#/components/schemas/RunObject" required: - - input - - outputs + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object properties: - input: + event: type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - id - - type - - code_interpreter + enum: [ "thread.run.expired" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call - type: object - description: Details of the Code Interpreter tool call the run step was involved in. - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: [ "code_interpreter" ] - code_interpreter: - type: object - description: The Code Interpreter tool call definition. + RunStepStreamEvent: + oneOf: + - type: object properties: - input: + event: type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - index - - type - - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output - type: object - description: Text output from the Code Interpreter tool call as part of a run step. - properties: - type: - description: Always `logs`. - type: string - enum: [ "logs" ] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - type - - logs - - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output - type: object - description: Text output from the Code Interpreter tool call as part of a run step. - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - description: Always `logs`. - type: string - enum: [ "logs" ] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - index - - type - - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - type: object - properties: - type: - description: Always `image`. - type: string - enum: [ "image" ] - image: - type: object + enum: [ "thread.run.step.created" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.in_progress" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.delta" ] + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. + event: type: string + enum: [ "thread.run.step.completed" ] + data: + $ref: "#/components/schemas/RunStepObject" required: - - file_id - required: - - type - - image - - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - description: Always `image`. - type: string - enum: [ "image" ] - image: - type: object + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. + event: type: string - required: - - index - - type - - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call - type: object - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map + enum: [ "thread.run.step.failed" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object properties: - ranking_options: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" - results: - type: array - description: The results of the file search. - items: - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" - required: - - id - - type - - file_search - - RunStepDetailsToolCallsFileSearchRankingOptionsObject: - title: File search tool call ranking options - type: object - description: The ranking options for the file search. - properties: - ranker: - type: string - description: The ranker used for the file search. - enum: [ "default_2024_08_21" ] - score_threshold: - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - required: - - ranker - - score_threshold - - RunStepDetailsToolCallsFileSearchResultObject: - title: File search tool call result - type: object - description: A result instance of the file search. - x-oaiTypeLabel: map - properties: - file_id: - type: string - description: The ID of the file that result was found in. - file_name: - type: string - description: The name of the file that result was found in. - score: - type: number - description: The score of the result. All values must be a floating point number between 0 and 1. - minimum: 0 - maximum: 1 - content: - type: array - description: The content of the result that was found. The content is only included if requested via the include query parameter. - items: - type: object - properties: - type: - type: string - description: The type of the content. - enum: [ "text" ] - text: - type: string - description: The text content of the file. - required: - - file_id - - file_name - - score - - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: [ "file_search" ] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - required: - - index - - type - - file_search + event: + type: string + enum: [ "thread.run.step.cancelled" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.expired" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - RunStepDetailsToolCallsFunctionObject: - type: object - title: Function tool call - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] - function: - type: object - description: The definition of the function that was called. + MessageStreamEvent: + oneOf: + - type: object properties: - name: + event: type: string - description: The name of the function. - arguments: + enum: [ "thread.message.created" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: type: string - description: The arguments passed to the function. - output: + enum: [ "thread.message.in_progress" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + enum: [ "thread.message.delta" ] + data: + $ref: "#/components/schemas/MessageDeltaObject" required: - - name - - arguments - - output - required: - - id - - type - - function + - event + - data + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.completed" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.incomplete" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - RunStepDeltaStepDetailsToolCallsFunctionObject: + ErrorEvent: type: object - title: Function tool call properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: + event: type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: [ "function" ] - function: - type: object - description: The definition of the function that was called. - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + enum: [ "error" ] + data: + $ref: "#/components/schemas/Error" required: - - index - - type + - event + - data + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" - VectorStoreExpirationAfter: + DoneEvent: type: object - title: Vector store expiration policy - description: The expiration policy for a vector store. properties: - anchor: - description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." + event: type: string - enum: [ "last_active_at" ] - days: - description: The number of days after the anchor time that the vector store will expire. - type: integer - minimum: 1 - maximum: 365 + enum: [ "done" ] + data: + type: string + enum: [ "[DONE]" ] required: - - anchor - - days + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" - VectorStoreObject: + Batch: type: object - title: Vector store - description: A vector store is a collection of processed files can be used by the `file_search` tool. properties: id: - description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `vector_store`. type: string - enum: [ "vector_store" ] + enum: [ batch ] + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. + nullable: true + line: + type: integer + description: The line number of the input file where the error occurred, if applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. created_at: - description: The Unix timestamp (in seconds) for when the vector store was created. type: integer - name: - description: The name of the vector store. - type: string - usage_bytes: - description: The total number of bytes used by the files in the vector store. + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: type: integer - file_counts: + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: type: object properties: - in_progress: - description: The number of files that are currently being processed. + total: type: integer + description: Total number of requests in the batch. completed: - description: The number of files that have been successfully processed. type: integer + description: Number of requests that have been completed successfully. failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that were cancelled. - type: integer - total: - description: The total number of files. type: integer + description: Number of requests that have failed. required: - - in_progress - - completed - - failed - - cancelled - total - status: - description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - type: string - enum: [ "expired", "in_progress", "completed" ] - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - expires_at: - description: The Unix timestamp (in seconds) for when the vector store will expire. - type: integer - nullable: true - last_active_at: - description: The Unix timestamp (in seconds) for when the vector store was last active. - type: integer - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata - x-oaiMeta: - name: The vector store object - beta: true - example: | - { - "id": "vs_123", - "object": "vector_store", - "created_at": 1698107661, - "usage_bytes": 123456, - "last_active_at": 1698107661, - "name": "my_vector_store", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "cancelled": 0, - "failed": 0, - "total": 100 - }, - "metadata": {}, - "last_used_at": 1698107661 - } - - CreateVectorStoreRequest: - type: object - additionalProperties: false - properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - type: array - maxItems: 500 - items: - type: string - name: - description: The name of the vector store. - type: string - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - chunking_strategy: - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + - completed + - failed + description: The request counts for different statuses within the batch. metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + x-oaiMeta: + name: The batch object + example: *batch_object - UpdateVectorStoreRequest: + BatchRequestInput: type: object - additionalProperties: false + description: The per-line object of the batch input file properties: - name: - description: The name of the vector store. + custom_id: type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: [ "POST" ] + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + x-oaiMeta: + name: The request input object + example: | + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + + BatchRequestOutput: + type: object + description: The per-line object of the batch output and error files + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object nullable: true - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - nullable: true - metadata: - description: *metadata_description + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: type: object - x-oaiTypeLabel: map nullable: true + description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: | + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} - ListVectorStoresResponse: + ListBatchesResponse: + type: object properties: - object: - type: string - example: "list" data: type: array items: - $ref: "#/components/schemas/VectorStoreObject" + $ref: "#/components/schemas/Batch" first_id: type: string - example: "vs_abc123" + example: "batch_abc123" last_id: type: string - example: "vs_abc456" + example: "batch_abc456" has_more: type: boolean - example: false + object: + type: string + enum: [ list ] required: - object - data - - first_id - - last_id - has_more - DeleteVectorStoreResponse: + AuditLogActorServiceAccount: type: object + description: The service account that performed the audit logged action. properties: id: type: string - deleted: - type: boolean - object: + description: The service account id. + + AuditLogActorUser: + type: object + description: The user who performed the audit logged action. + properties: + id: type: string - enum: [ vector_store.deleted ] - required: - - id - - object - - deleted + description: The user id. + email: + type: string + description: The user email. - VectorStoreFileObject: + AuditLogActorApiKey: type: object - title: Vector store files - description: A list of files attached to a vector store. + description: The API Key used to perform the audit logged action. properties: id: - description: The identifier, which can be referenced in API endpoints. type: string - object: - description: The object type, which is always `vector_store.file`. + description: The tracking id of the API key. + type: type: string - enum: [ "vector_store.file" ] - usage_bytes: - description: The total vector store usage in bytes. Note that this may be different from the original file size. - type: integer - created_at: - description: The Unix timestamp (in seconds) for when the vector store file was created. - type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + description: The type of API key. Can be either `user` or `service_account`. + enum: [ "user", "service_account" ] + user: + $ref: "#/components/schemas/AuditLogActorUser" + service_account: + $ref: "#/components/schemas/AuditLogActorServiceAccount" + + AuditLogActorSession: + type: object + description: The session in which the audit logged action was performed. + properties: + user: + $ref: "#/components/schemas/AuditLogActorUser" + ip_address: type: string - status: - description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + description: The IP address from which the action was performed. + + AuditLogActor: + type: object + description: The actor who performed the audit logged action. + properties: + type: type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] - last_error: + description: The type of actor. Is either `session` or `api_key`. + enum: [ "session", "api_key" ] + session: type: object - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true + $ref: "#/components/schemas/AuditLogActorSession" + api_key: + type: object + $ref: "#/components/schemas/AuditLogActorApiKey" + + + AuditLogEventType: + type: string + description: The event type. + x-oaiExpandable: true + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - user.added + - user.updated + - user.deleted + + AuditLog: + type: object + description: A log of a user action or configuration change within this organization. + properties: + id: + type: string + description: The ID of this log. + type: + $ref: "#/components/schemas/AuditLogEventType" + + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + description: The project that the action was scoped to. Absent for actions not scoped to projects. properties: - code: + id: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: - [ - "server_error", - "unsupported_file", - "invalid_file", - ] - message: + description: The project ID. + name: + type: string + description: The project title. + actor: + $ref: "#/components/schemas/AuditLogActor" + api_key.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + data: + type: object + description: The payload used to create the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + description: The payload used to update the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + invite.sent: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + description: The payload used to create the invite. + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + invite.accepted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + invite.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + login.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + logout.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + organization.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + description: The payload used to update the organization settings. + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + project.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + data: + type: object + description: The payload used to create the project. + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + project.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the project. + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + project.archived: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + service_account.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + data: + type: object + description: The payload used to create the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + description: The payload used to updated the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + user.added: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + data: + type: object + description: The payload used to add the user to the project. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.updated: + type: object + description: The details for events with this `type`. + properties: + id: type: string - description: A human-readable description of the error. - required: - - code - - message - chunking_strategy: + description: The project ID. + changes_requested: + type: object + description: The payload used to update the user. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.deleted: type: object - description: The strategy used to chunk the file. - oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" - x-oaiExpandable: true + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. required: - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error + - type + - effective_at + - actor x-oaiMeta: - name: The vector store file object - beta: true + name: The audit log object example: | { - "id": "file-abc123", - "object": "vector_store.file", - "usage_bytes": 1234, - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "last_error": null, - "chunking_strategy": { - "type": "static", - "static": { - "max_chunk_size_tokens": 800, - "chunk_overlap_tokens": 400 + "id": "req_xxx_20240101", + "type": "api_key.created", + "effective_at": 1720804090, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.created": { + "id": "key_xxxx", + "data": { + "scopes": ["resource.operation"] + } } - } } - OtherChunkingStrategyResponseParam: + ListAuditLogsResponse: type: object - title: Other Chunking Strategy - description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. - additionalProperties: false properties: - type: + object: type: string - description: Always `other`. - enum: [ "other" ] - required: - - type - - StaticChunkingStrategyResponseParam: - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: + enum: [ list ] + data: + type: array + items: + $ref: "#/components/schemas/AuditLog" + first_id: type: string - description: Always `static`. - enum: [ "static" ] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" + example: "audit_log-defb456h8dks" + last_id: + type: string + example: "audit_log-hnbkd8s93s" + has_more: + type: boolean + required: - - type - - static + - object + - data + - first_id + - last_id + - has_more - StaticChunkingStrategy: + Invite: type: object - additionalProperties: false + description: Represents an individual `invite` to the organization. properties: - max_chunk_size_tokens: + object: + type: string + enum: [ organization.invite ] + description: The object type, which is always `organization.invite` + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: [ owner, reader ] + description: "`owner` or `reader`" + status: + type: string + enum: [ accepted, expired, pending ] + description: "`accepted`,`expired`, or `pending`" + invited_at: type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + required: - - max_chunk_size_tokens - - chunk_overlap_tokens + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } - AutoChunkingStrategyRequestParam: + InviteListResponse: type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false properties: - type: + object: type: string - description: Always `auto`. - enum: [ "auto" ] + enum: [ list ] + description: The object type, which is always `list` + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. required: - - type + - object + - data - StaticChunkingStrategyRequestParam: + InviteRequest: type: object - title: Static Chunking Strategy - additionalProperties: false properties: - type: + email: type: string - description: Always `static`. - enum: [ "static" ] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" + description: "Send an email to this address" + role: + type: string + enum: [ reader, owner ] + description: "`owner` or `reader`" required: - - type - - static + - email + - role - ChunkingStrategyRequestParam: + InviteDeleteResponse: type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + properties: + object: + type: string + enum: [ organization.invite.deleted ] + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted - CreateVectorStoreFileRequest: + User: type: object - additionalProperties: false + description: Represents an individual `user` within an organization. properties: - file_id: - description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + object: type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + enum: [ organization.user ] + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [ owner, reader ] + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. required: - - file_id + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The user object + example: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - ListVectorStoreFilesResponse: + UserListResponse: + type: object properties: object: type: string - example: "list" + enum: [ list ] data: type: array items: - $ref: "#/components/schemas/VectorStoreFileObject" + $ref: '#/components/schemas/User' first_id: type: string - example: "file-abc123" last_id: type: string - example: "file-abc456" has_more: type: boolean - example: false required: - object - data @@ -13372,687 +16066,483 @@ components: - last_id - has_more - DeleteVectorStoreFileResponse: + UserRoleUpdateRequest: + type: object + properties: + role: + type: string + enum: [ owner,reader ] + description: "`owner` or `reader`" + required: + - role + + UserDeleteResponse: type: object properties: + object: + type: string + enum: [ organization.user.deleted ] id: type: string deleted: type: boolean - object: - type: string - enum: [ vector_store.file.deleted ] required: - - id - object + - id - deleted - VectorStoreFileBatchObject: + Project: type: object - title: Vector store file batch - description: A batch of files attached to a vector store. + description: Represents an individual project. properties: id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `vector_store.file_batch`. - type: string - enum: [ "vector_store.files_batch" ] - created_at: - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - type: string - enum: [ "in_progress", "completed", "cancelled", "failed" ] - file_counts: - type: object - properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that where cancelled. - type: integer - total: - description: The total number of files. - type: integer - required: - - in_progress - - completed - - cancelled - - failed - - total + type: string + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: [ organization.project ] + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or `null`. + status: + type: string + enum: [ active, archived ] + description: "`active` or `archived`" + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). required: - id - object + - name - created_at - - vector_store_id - status - - file_counts x-oaiMeta: - name: The vector store files batch object - beta: true + name: The project object example: | { - "id": "vsfb_123", - "object": "vector_store.files_batch", - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "failed": 0, - "cancelled": 0, - "total": 100 - } + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" } - CreateVectorStoreFileBatchRequest: + ProjectListResponse: type: object - additionalProperties: false properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + object: + type: string + enum: [ list ] + data: type: array - minItems: 1 - maxItems: 500 items: - type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + $ref: '#/components/schemas/Project' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - file_ids + - object + - data + - first_id + - last_id + - has_more - AssistantStreamEvent: - description: | - Represents an event emitted when streaming a Run. - - Each event in a server-sent events stream has an `event` and `data` property: - - ``` - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - ``` - - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit `thread.run.created` when a new run - is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses - to create a message during a run, we emit a `thread.message.created event`, a - `thread.message.in_progress` event, many `thread.message.delta` events, and finally a - `thread.message.completed` event. - - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to - integrate the Assistants API with streaming. - oneOf: - - $ref: "#/components/schemas/ThreadStreamEvent" - - $ref: "#/components/schemas/RunStreamEvent" - - $ref: "#/components/schemas/RunStepStreamEvent" - - $ref: "#/components/schemas/MessageStreamEvent" - - $ref: "#/components/schemas/ErrorEvent" - - $ref: "#/components/schemas/DoneEvent" - x-oaiMeta: - name: Assistant stream events - beta: true + ProjectCreateRequest: + type: object + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - name - ThreadStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.created" ] - data: - $ref: "#/components/schemas/ThreadObject" - required: - - event - - data - description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + ProjectUpdateRequest: + type: object + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - name - RunStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.run.created" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a new [run](/docs/api-reference/runs/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.queued" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.in_progress" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.requires_action" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.completed" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.incomplete" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.failed" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.cancelling" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.cancelled" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.expired" ] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + DefaultProjectErrorResponse: + type: object + properties: + code: + type: integer + message: + type: string + required: + - code + - message - RunStepStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.created" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.in_progress" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.delta" ] - data: - $ref: "#/components/schemas/RunStepDeltaObject" - required: - - event - - data - description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.completed" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.failed" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.cancelled" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.run.step.expired" ] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + ProjectUser: + type: object + description: Represents an individual user in a project. + properties: + object: + type: string + enum: [ organization.project.user ] + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. - MessageStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: [ "thread.message.created" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.in_progress" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.delta" ] - data: - $ref: "#/components/schemas/MessageDeltaObject" - required: - - event - - data - description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.completed" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: [ "thread.message.incomplete" ] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The project user object + example: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - ErrorEvent: + ProjectUserListResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/ProjectUser' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + + ProjectUserCreateRequest: + type: object + properties: + user_id: + type: string + description: The ID of the user. + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + required: + - user_id + - role + + ProjectUserUpdateRequest: + type: object + properties: + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + required: + - role + + ProjectUserDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.project.user.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + + ProjectServiceAccount: + type: object + description: Represents an individual service account in a project. + properties: + object: + type: string + enum: [ organization.project.service_account ] + description: The object type, which is always `organization.project.service_account` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the service account + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the service account was created + required: + - object + - id + - name + - role + - created_at + x-oaiMeta: + name: The project service account object + example: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + ProjectServiceAccountListResponse: type: object properties: - event: + object: type: string - enum: [ "error" ] + enum: [ list ] data: - $ref: "#/components/schemas/Error" + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - event + - object - data - description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. - x-oaiMeta: - dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + - first_id + - last_id + - has_more - DoneEvent: + ProjectServiceAccountCreateRequest: type: object properties: - event: - type: string - enum: [ "done" ] - data: + name: type: string - enum: [ "[DONE]" ] + description: The name of the service account being created. required: - - event - - data - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: "`data` is `[DONE]`" + - name - Batch: + ProjectServiceAccountCreateResponse: type: object properties: - id: - type: string object: type: string - enum: [ batch ] - description: The object type, which is always `batch`. - endpoint: + enum: [ organization.project.service_account ] + id: type: string - description: The OpenAI API endpoint used by the batch. - - errors: - type: object - properties: - object: - type: string - description: The object type, which is always `list`. - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: The name of the parameter that caused the error, if applicable. - nullable: true - line: - type: integer - description: The line number of the input file where the error occurred, if applicable. - nullable: true - input_file_id: + name: type: string - description: The ID of the input file for the batch. - completion_window: + role: type: string - description: The time frame within which the batch should be processed. - status: + enum: [ member ] + description: Service accounts can only have one role of type `member` + created_at: + type: integer + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' + required: + - object + - id + - name + - role + - created_at + - api_key + + ProjectServiceAccountApiKey: + type: object + properties: + object: type: string - description: The current status of the batch. - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - output_file_id: + enum: [ organization.project.service_account.api_key ] + description: The object type, which is always `organization.project.service_account.api_key` + + value: type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: + name: type: string - description: The ID of the file containing the outputs of requests with errors. created_at: type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - required: - - total - - completed - - failed - description: The request counts for different statuses within the batch. - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + id: + type: string required: - - id - object - - endpoint - - input_file_id - - completion_window - - status + - value + - name - created_at - x-oaiMeta: - name: The batch object - example: *batch_object + - id - BatchRequestInput: + ProjectServiceAccountDeleteResponse: type: object - description: The per-line object of the batch input file properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + object: type: string - enum: [ "POST" ] - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: + enum: [ organization.project.service_account.deleted ] + id: type: string - description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - x-oaiMeta: - name: The request input object - example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + deleted: + type: boolean + required: + - object + - id + - deleted - BatchRequestOutput: + ProjectApiKey: type: object - description: The per-line object of the batch output and error files + description: Represents an individual API key in a project. properties: - id: + object: type: string - custom_id: + enum: [ organization.project.api_key ] + description: The object type, which is always `organization.project.api_key` + redacted_value: type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - nullable: true - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - x-oaiTypeLabel: map - description: The JSON body of the response - error: + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: type: object - nullable: true - description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: + type: type: string - description: A human-readable error message. + enum: [ user, service_account ] + description: "`user` or `service_account`" + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + required: + - object + - redacted_value + - name + - created_at + - id + - owner x-oaiMeta: - name: The request output object + name: The project API key object example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "created_at": 1711471533 + } + } + } - ListBatchesResponse: + ProjectApiKeyListResponse: type: object properties: + object: + type: string + enum: [ list ] data: type: array items: - $ref: "#/components/schemas/Batch" + $ref: '#/components/schemas/ProjectApiKey' first_id: type: string - example: "batch_abc123" last_id: type: string - example: "batch_abc456" has_more: type: boolean - object: - type: string - enum: [ list ] required: - object - data + - first_id + - last_id - has_more + ProjectApiKeyDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.project.api_key.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + security: - ApiKeyAuth: [ ] diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index f98e6d9a..cff9352b 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,5 +1,5 @@ name: openai_dart -description: Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. +description: Dart client for the OpenAI API. Supports chat (GPT-4o, o1, etc.), completions, embeddings, images (DALL·E 3), assistants (threads,, vector stores, etc.), batch, fine-tuning, etc. version: 0.4.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart From 4c3e83c925b0672d0cfad97470e530ac7d10fa9a Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 23:37:44 +0200 Subject: [PATCH 122/251] feat: Add support for maxCompletionTokens and reasoningTokens in openai_dart (#556) --- .../generated/schema/assistant_object.dart | 8 +- .../src/generated/schema/assistant_tools.dart | 3 +- .../schema/completion_tokens_details.dart | 41 ++ .../generated/schema/completion_usage.dart | 8 +- .../schema/create_assistant_request.dart | 10 +- .../create_chat_completion_request.dart | 101 ++- .../create_fine_tuning_job_request.dart | 4 +- .../generated/schema/create_run_request.dart | 10 +- .../schema/create_thread_and_run_request.dart | 10 +- .../schema/file_search_ranking_options.dart | 10 +- .../schema/modify_assistant_request.dart | 8 +- .../src/generated/schema/response_format.dart | 19 +- .../lib/src/generated/schema/run_object.dart | 8 +- .../lib/src/generated/schema/schema.dart | 1 + .../src/generated/schema/schema.freezed.dart | 645 ++++++++++++++---- .../lib/src/generated/schema/schema.g.dart | 53 +- packages/openai_dart/oas/openapi_curated.yaml | 118 +++- .../openai_dart/oas/openapi_official.yaml | 65 +- .../test/openai_client_chat_test.dart | 8 +- 19 files changed, 853 insertions(+), 277 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index f0e1f3b5..4c7ba8df 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -65,11 +65,11 @@ class AssistantObject with _$AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -191,11 +191,11 @@ enum AssistantResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 920d2301..30a5cacc 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -88,7 +88,8 @@ class AssistantToolsFileSearchFileSearch /// for more information. @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. diff --git a/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart new file mode 100644 index 00000000..14fe08a8 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart @@ -0,0 +1,41 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CompletionTokensDetails +// ========================================== + +/// Breakdown of tokens used in a completion. +@freezed +class CompletionTokensDetails with _$CompletionTokensDetails { + const CompletionTokensDetails._(); + + /// Factory constructor for CompletionTokensDetails + const factory CompletionTokensDetails({ + /// Tokens generated by the model for reasoning. + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens, + }) = _CompletionTokensDetails; + + /// Object construction from a JSON representation + factory CompletionTokensDetails.fromJson(Map json) => + _$CompletionTokensDetailsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['reasoning_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'reasoning_tokens': reasoningTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart index 17826175..86877b8e 100644 --- a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart +++ b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart @@ -23,6 +23,10 @@ class CompletionUsage with _$CompletionUsage { /// Total number of tokens used in the request (prompt + completion). @JsonKey(name: 'total_tokens') required int totalTokens, + + /// Breakdown of tokens used in a completion. + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails, }) = _CompletionUsage; /// Object construction from a JSON representation @@ -33,7 +37,8 @@ class CompletionUsage with _$CompletionUsage { static const List propertyNames = [ 'completion_tokens', 'prompt_tokens', - 'total_tokens' + 'total_tokens', + 'completion_tokens_details' ]; /// Perform validations on the schema property values @@ -47,6 +52,7 @@ class CompletionUsage with _$CompletionUsage { 'completion_tokens': completionTokens, 'prompt_tokens': promptTokens, 'total_tokens': totalTokens, + 'completion_tokens_details': completionTokensDetails, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index cb7f5b82..312d8f5c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -56,11 +56,11 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -185,8 +185,6 @@ enum AssistantModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -289,11 +287,11 @@ enum CreateAssistantResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 47280735..3d59ae2b 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -15,10 +15,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Factory constructor for CreateChatCompletionRequest const factory CreateChatCompletionRequest({ - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @_ChatCompletionModelConverter() required ChatCompletionModel model, - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). required List messages, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -30,22 +32,37 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? logitBias, - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? logprobs, - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) @Default(1) int? n, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -55,27 +72,43 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? presencePenalty, - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) ResponseFormat? responseFormat, /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @JsonKey(includeIfNull: false) int? seed, /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -91,7 +124,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) ChatCompletionStop? stop, - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) @Default(false) bool? stream, /// Options for streaming response. Only set this when you set `stream: true`. @@ -108,14 +141,17 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// We generally recommend altering this or `temperature` but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @JsonKey(includeIfNull: false) List? tools, /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @@ -135,7 +171,8 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -161,6 +198,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs', 'top_logprobs', 'max_tokens', + 'max_completion_tokens', 'n', 'presence_penalty', 'response_format', @@ -251,6 +289,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs': logprobs, 'top_logprobs': topLogprobs, 'max_tokens': maxTokens, + 'max_completion_tokens': maxCompletionTokens, 'n': n, 'presence_penalty': presencePenalty, 'response_format': responseFormat, @@ -309,8 +348,6 @@ enum ChatCompletionModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -343,7 +380,8 @@ enum ChatCompletionModels { // CLASS: ChatCompletionModel // ========================================== -/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. +/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) +/// table for details on which models work with the Chat API. @freezed sealed class ChatCompletionModel with _$ChatCompletionModel { const ChatCompletionModel._(); @@ -401,9 +439,12 @@ class _ChatCompletionModelConverter /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: -/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. -/// - If set to 'default', the request will be processed using the default service tier with a lower -/// uptime SLA and no latency guarantee. +/// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits +/// until they are exhausted. +/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the +/// default service tier with a lower uptime SLA and no latency guarantee. +/// - If set to 'default', the request will be processed using the default service tier with a lower uptime +/// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -491,7 +532,8 @@ enum ChatCompletionToolChoiceMode { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. -/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. +/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the +/// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @freezed @@ -579,7 +621,8 @@ enum ChatCompletionFunctionCallMode { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. -/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. +/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that +/// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @freezed diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 3da0a42e..863ffb57 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -37,7 +37,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? hyperparameters, - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? suffix, @@ -80,7 +80,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// Validation constants static const suffixMinLengthValue = 1; - static const suffixMaxLengthValue = 40; + static const suffixMaxLengthValue = 64; static const seedMinValue = 0; static const seedMaxValue = 2147483647; diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index 6fe86422..3698ed7c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -84,11 +84,11 @@ class CreateRunRequest with _$CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -223,8 +223,6 @@ enum RunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -416,11 +414,11 @@ enum CreateRunRequestResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 1d9c82ee..d58474f8 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -83,11 +83,11 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -222,8 +222,6 @@ enum ThreadAndRunModels { gpt4o20240513, @JsonValue('gpt-4o-2024-08-06') gpt4o20240806, - @JsonValue('gpt-4o-2024-08-06') - gpt4o20240806, @JsonValue('gpt-4o-mini') gpt4oMini, @JsonValue('gpt-4o-mini-2024-07-18') @@ -422,11 +420,11 @@ enum CreateThreadAndRunRequestResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart index e60070f0..03533c56 100644 --- a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart @@ -8,7 +8,8 @@ part of open_a_i_schema; // CLASS: FileSearchRankingOptions // ========================================== -/// The ranking options for the file search. +/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and +/// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. @@ -26,8 +27,7 @@ class FileSearchRankingOptions with _$FileSearchRankingOptions { FileSearchRanker? ranker, /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? scoreThreshold, + @JsonKey(name: 'score_threshold') required double scoreThreshold, }) = _FileSearchRankingOptions; /// Object construction from a JSON representation @@ -43,10 +43,10 @@ class FileSearchRankingOptions with _$FileSearchRankingOptions { /// Perform validations on the schema property values String? validateSchema() { - if (scoreThreshold != null && scoreThreshold! < scoreThresholdMinValue) { + if (scoreThreshold < scoreThresholdMinValue) { return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; } - if (scoreThreshold != null && scoreThreshold! > scoreThresholdMaxValue) { + if (scoreThreshold > scoreThresholdMaxValue) { return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; } return null; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index 2b4d94d1..5bd7ad65 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -59,11 +59,11 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -171,11 +171,11 @@ enum ModifyAssistantResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/response_format.dart b/packages/openai_dart/lib/src/generated/schema/response_format.dart index 35b1f30d..7b975680 100644 --- a/packages/openai_dart/lib/src/generated/schema/response_format.dart +++ b/packages/openai_dart/lib/src/generated/schema/response_format.dart @@ -8,13 +8,24 @@ part of open_a_i_schema; // CLASS: ResponseFormat // ========================================== -/// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +/// An object specifying the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer +/// than `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model +/// will match your supplied JSON schema. +/// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is +/// valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system +/// or user message. Without this, the model may generate an unending stream of whitespace until the generation +/// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message +/// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded +/// `max_tokens` or the conversation exceeded the max context length. @Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) sealed class ResponseFormat with _$ResponseFormat { const ResponseFormat._(); diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 351d140b..98fd5f0c 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -110,11 +110,11 @@ class RunObject with _$RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -465,11 +465,11 @@ enum RunObjectResponseFormatMode { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index a48b094d..265649d4 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -42,6 +42,7 @@ part 'chat_completion_stream_response_delta.dart'; part 'chat_completion_stream_message_function_call.dart'; part 'chat_completion_stream_message_tool_call_chunk.dart'; part 'completion_usage.dart'; +part 'completion_tokens_details.dart'; part 'create_embedding_request.dart'; part 'create_embedding_response.dart'; part 'embedding.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 5753970f..af25caaf 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3530,11 +3530,13 @@ CreateChatCompletionRequest _$CreateChatCompletionRequestFromJson( /// @nodoc mixin _$CreateChatCompletionRequest { - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @_ChatCompletionModelConverter() ChatCompletionModel get model => throw _privateConstructorUsedError; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). List get messages => throw _privateConstructorUsedError; @@ -3546,25 +3548,40 @@ mixin _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias => throw _privateConstructorUsedError; - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? get logprobs => throw _privateConstructorUsedError; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs => throw _privateConstructorUsedError; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens => throw _privateConstructorUsedError; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? get maxCompletionTokens => throw _privateConstructorUsedError; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) int? get n => throw _privateConstructorUsedError; @@ -3574,28 +3591,44 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) ResponseFormat? get responseFormat => throw _privateConstructorUsedError; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -3611,7 +3644,7 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) ChatCompletionStop? get stop => throw _privateConstructorUsedError; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; @@ -3632,7 +3665,9 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; @@ -3640,7 +3675,8 @@ mixin _$CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @@ -3662,7 +3698,8 @@ mixin _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -3704,6 +3741,8 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @@ -3766,6 +3805,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, + Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, @@ -3812,6 +3852,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, + maxCompletionTokens: freezed == maxCompletionTokens + ? _value.maxCompletionTokens + : maxCompletionTokens // ignore: cast_nullable_to_non_nullable + as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -3982,6 +4026,8 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @@ -4049,6 +4095,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, + Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, @@ -4095,6 +4142,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, + maxCompletionTokens: freezed == maxCompletionTokens + ? _value.maxCompletionTokens + : maxCompletionTokens // ignore: cast_nullable_to_non_nullable + as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -4176,6 +4227,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) this.logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) this.topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) this.maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + this.maxCompletionTokens, @JsonKey(includeIfNull: false) this.n = 1, @JsonKey(name: 'presence_penalty', includeIfNull: false) this.presencePenalty = 0.0, @@ -4213,15 +4266,18 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { Map json) => _$$CreateChatCompletionRequestImplFromJson(json); - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @override @_ChatCompletionModelConverter() final ChatCompletionModel model; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). final List _messages; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override List get messages { if (_messages is EqualUnmodifiableListView) return _messages; @@ -4238,12 +4294,20 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. final Map? _logitBias; /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias { @@ -4254,24 +4318,36 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { return EqualUnmodifiableMapView(value); } - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @override @JsonKey(includeIfNull: false) final bool? logprobs; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @override @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @override + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + final int? maxCompletionTokens; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @override @JsonKey(includeIfNull: false) final int? n; @@ -4283,30 +4359,46 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) final ResponseFormat? responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @override @JsonKey(includeIfNull: false) final int? seed; /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -4323,7 +4415,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) final ChatCompletionStop? stop; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override @JsonKey(includeIfNull: false) final bool? stream; @@ -4347,10 +4439,14 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. final List? _tools; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @override @JsonKey(includeIfNull: false) List? get tools { @@ -4365,7 +4461,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @override @@ -4389,7 +4486,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @override @@ -4417,7 +4515,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, maxCompletionTokens: $maxCompletionTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4437,6 +4535,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { other.topLogprobs == topLogprobs) && (identical(other.maxTokens, maxTokens) || other.maxTokens == maxTokens) && + (identical(other.maxCompletionTokens, maxCompletionTokens) || + other.maxCompletionTokens == maxCompletionTokens) && (identical(other.n, n) || other.n == n) && (identical(other.presencePenalty, presencePenalty) || other.presencePenalty == presencePenalty) && @@ -4475,6 +4575,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { logprobs, topLogprobs, maxTokens, + maxCompletionTokens, n, presencePenalty, responseFormat, @@ -4524,6 +4625,8 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + final int? maxCompletionTokens, @JsonKey(includeIfNull: false) final int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty, @@ -4561,12 +4664,14 @@ abstract class _CreateChatCompletionRequest factory _CreateChatCompletionRequest.fromJson(Map json) = _$CreateChatCompletionRequestImpl.fromJson; - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @override @_ChatCompletionModelConverter() ChatCompletionModel get model; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override List get messages; @@ -4579,29 +4684,45 @@ abstract class _CreateChatCompletionRequest /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @override @JsonKey(includeIfNull: false) bool? get logprobs; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @override + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? get maxCompletionTokens; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @override @JsonKey(includeIfNull: false) int? get n; @@ -4613,30 +4734,46 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) ResponseFormat? get responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @override @JsonKey(includeIfNull: false) int? get seed; /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers /// subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower - /// uptime SLA and no latency guarantee. + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. /// - When not set, the default behavior is 'auto'. /// /// When this parameter is set, the response body will include the `service_tier` utilized. @@ -4653,7 +4790,7 @@ abstract class _CreateChatCompletionRequest @JsonKey(includeIfNull: false) ChatCompletionStop? get stop; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override @JsonKey(includeIfNull: false) bool? get stream; @@ -4677,7 +4814,9 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @override @JsonKey(includeIfNull: false) List? get tools; @@ -4686,7 +4825,8 @@ abstract class _CreateChatCompletionRequest /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @override @@ -4710,7 +4850,8 @@ abstract class _CreateChatCompletionRequest /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @override @@ -11675,6 +11816,11 @@ mixin _$CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Breakdown of tokens used in a completion. + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? get completionTokensDetails => + throw _privateConstructorUsedError; + /// Serializes this CompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -11694,7 +11840,11 @@ abstract class $CompletionUsageCopyWith<$Res> { $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + @JsonKey(name: 'total_tokens') int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails}); + + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; } /// @nodoc @@ -11715,6 +11865,7 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, + Object? completionTokensDetails = freezed, }) { return _then(_value.copyWith( completionTokens: freezed == completionTokens @@ -11729,8 +11880,27 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, + completionTokensDetails: freezed == completionTokensDetails + ? _value.completionTokensDetails + : completionTokensDetails // ignore: cast_nullable_to_non_nullable + as CompletionTokensDetails?, ) as $Val); } + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails { + if (_value.completionTokensDetails == null) { + return null; + } + + return $CompletionTokensDetailsCopyWith<$Res>( + _value.completionTokensDetails!, (value) { + return _then(_value.copyWith(completionTokensDetails: value) as $Val); + }); + } } /// @nodoc @@ -11744,7 +11914,12 @@ abstract class _$$CompletionUsageImplCopyWith<$Res> $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + @JsonKey(name: 'total_tokens') int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails}); + + @override + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; } /// @nodoc @@ -11763,6 +11938,7 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, + Object? completionTokensDetails = freezed, }) { return _then(_$CompletionUsageImpl( completionTokens: freezed == completionTokens @@ -11777,6 +11953,10 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, + completionTokensDetails: freezed == completionTokensDetails + ? _value.completionTokensDetails + : completionTokensDetails // ignore: cast_nullable_to_non_nullable + as CompletionTokensDetails?, )); } } @@ -11787,7 +11967,9 @@ class _$CompletionUsageImpl extends _CompletionUsage { const _$CompletionUsageImpl( {@JsonKey(name: 'completion_tokens') required this.completionTokens, @JsonKey(name: 'prompt_tokens') required this.promptTokens, - @JsonKey(name: 'total_tokens') required this.totalTokens}) + @JsonKey(name: 'total_tokens') required this.totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + this.completionTokensDetails}) : super._(); factory _$CompletionUsageImpl.fromJson(Map json) => @@ -11808,9 +11990,14 @@ class _$CompletionUsageImpl extends _CompletionUsage { @JsonKey(name: 'total_tokens') final int totalTokens; + /// Breakdown of tokens used in a completion. + @override + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + final CompletionTokensDetails? completionTokensDetails; + @override String toString() { - return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; + return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens, completionTokensDetails: $completionTokensDetails)'; } @override @@ -11823,13 +12010,16 @@ class _$CompletionUsageImpl extends _CompletionUsage { (identical(other.promptTokens, promptTokens) || other.promptTokens == promptTokens) && (identical(other.totalTokens, totalTokens) || - other.totalTokens == totalTokens)); + other.totalTokens == totalTokens) && + (identical( + other.completionTokensDetails, completionTokensDetails) || + other.completionTokensDetails == completionTokensDetails)); } @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); + int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, + totalTokens, completionTokensDetails); /// Create a copy of CompletionUsage /// with the given fields replaced by the non-null parameter values. @@ -11852,8 +12042,10 @@ abstract class _CompletionUsage extends CompletionUsage { const factory _CompletionUsage( {@JsonKey(name: 'completion_tokens') required final int? completionTokens, @JsonKey(name: 'prompt_tokens') required final int promptTokens, - @JsonKey(name: 'total_tokens') - required final int totalTokens}) = _$CompletionUsageImpl; + @JsonKey(name: 'total_tokens') required final int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + final CompletionTokensDetails? + completionTokensDetails}) = _$CompletionUsageImpl; const _CompletionUsage._() : super._(); factory _CompletionUsage.fromJson(Map json) = @@ -11874,6 +12066,11 @@ abstract class _CompletionUsage extends CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens; + /// Breakdown of tokens used in a completion. + @override + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? get completionTokensDetails; + /// Create a copy of CompletionUsage /// with the given fields replaced by the non-null parameter values. @override @@ -11882,6 +12079,178 @@ abstract class _CompletionUsage extends CompletionUsage { throw _privateConstructorUsedError; } +CompletionTokensDetails _$CompletionTokensDetailsFromJson( + Map json) { + return _CompletionTokensDetails.fromJson(json); +} + +/// @nodoc +mixin _$CompletionTokensDetails { + /// Tokens generated by the model for reasoning. + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? get reasoningTokens => throw _privateConstructorUsedError; + + /// Serializes this CompletionTokensDetails to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CompletionTokensDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionTokensDetailsCopyWith<$Res> { + factory $CompletionTokensDetailsCopyWith(CompletionTokensDetails value, + $Res Function(CompletionTokensDetails) then) = + _$CompletionTokensDetailsCopyWithImpl<$Res, CompletionTokensDetails>; + @useResult + $Res call( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens}); +} + +/// @nodoc +class _$CompletionTokensDetailsCopyWithImpl<$Res, + $Val extends CompletionTokensDetails> + implements $CompletionTokensDetailsCopyWith<$Res> { + _$CompletionTokensDetailsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? reasoningTokens = freezed, + }) { + return _then(_value.copyWith( + reasoningTokens: freezed == reasoningTokens + ? _value.reasoningTokens + : reasoningTokens // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CompletionTokensDetailsImplCopyWith<$Res> + implements $CompletionTokensDetailsCopyWith<$Res> { + factory _$$CompletionTokensDetailsImplCopyWith( + _$CompletionTokensDetailsImpl value, + $Res Function(_$CompletionTokensDetailsImpl) then) = + __$$CompletionTokensDetailsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens}); +} + +/// @nodoc +class __$$CompletionTokensDetailsImplCopyWithImpl<$Res> + extends _$CompletionTokensDetailsCopyWithImpl<$Res, + _$CompletionTokensDetailsImpl> + implements _$$CompletionTokensDetailsImplCopyWith<$Res> { + __$$CompletionTokensDetailsImplCopyWithImpl( + _$CompletionTokensDetailsImpl _value, + $Res Function(_$CompletionTokensDetailsImpl) _then) + : super(_value, _then); + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? reasoningTokens = freezed, + }) { + return _then(_$CompletionTokensDetailsImpl( + reasoningTokens: freezed == reasoningTokens + ? _value.reasoningTokens + : reasoningTokens // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CompletionTokensDetailsImpl extends _CompletionTokensDetails { + const _$CompletionTokensDetailsImpl( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + this.reasoningTokens}) + : super._(); + + factory _$CompletionTokensDetailsImpl.fromJson(Map json) => + _$$CompletionTokensDetailsImplFromJson(json); + + /// Tokens generated by the model for reasoning. + @override + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + final int? reasoningTokens; + + @override + String toString() { + return 'CompletionTokensDetails(reasoningTokens: $reasoningTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CompletionTokensDetailsImpl && + (identical(other.reasoningTokens, reasoningTokens) || + other.reasoningTokens == reasoningTokens)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, reasoningTokens); + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> + get copyWith => __$$CompletionTokensDetailsImplCopyWithImpl< + _$CompletionTokensDetailsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CompletionTokensDetailsImplToJson( + this, + ); + } +} + +abstract class _CompletionTokensDetails extends CompletionTokensDetails { + const factory _CompletionTokensDetails( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + final int? reasoningTokens}) = _$CompletionTokensDetailsImpl; + const _CompletionTokensDetails._() : super._(); + + factory _CompletionTokensDetails.fromJson(Map json) = + _$CompletionTokensDetailsImpl.fromJson; + + /// Tokens generated by the model for reasoning. + @override + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? get reasoningTokens; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> + get copyWith => throw _privateConstructorUsedError; +} + CreateEmbeddingRequest _$CreateEmbeddingRequestFromJson( Map json) { return _CreateEmbeddingRequest.fromJson(json); @@ -14581,7 +14950,7 @@ mixin _$CreateFineTuningJobRequest { FineTuningJobHyperparameters? get hyperparameters => throw _privateConstructorUsedError; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) @@ -14853,7 +15222,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) final FineTuningJobHyperparameters? hyperparameters; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @@ -14991,7 +15360,7 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? get hyperparameters; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @@ -23597,11 +23966,11 @@ mixin _$AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -23993,11 +24362,11 @@ class _$AssistantObjectImpl extends _AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -24165,11 +24534,11 @@ abstract class _AssistantObject extends AssistantObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -24710,11 +25079,11 @@ mixin _$CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -25077,11 +25446,11 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -25231,11 +25600,11 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -26211,11 +26580,11 @@ mixin _$ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -26591,11 +26960,11 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -26753,11 +27122,11 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -27775,8 +28144,8 @@ mixin _$FileSearchRankingOptions { FileSearchRanker? get ranker => throw _privateConstructorUsedError; /// The score threshold for the file search. All values must be a floating point number between 0 and 1. - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? get scoreThreshold => throw _privateConstructorUsedError; + @JsonKey(name: 'score_threshold') + double get scoreThreshold => throw _privateConstructorUsedError; /// Serializes this FileSearchRankingOptions to a JSON map. Map toJson() => throw _privateConstructorUsedError; @@ -27799,8 +28168,7 @@ abstract class $FileSearchRankingOptionsCopyWith<$Res> { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? scoreThreshold}); + @JsonKey(name: 'score_threshold') double scoreThreshold}); } /// @nodoc @@ -27820,17 +28188,17 @@ class _$FileSearchRankingOptionsCopyWithImpl<$Res, @override $Res call({ Object? ranker = freezed, - Object? scoreThreshold = freezed, + Object? scoreThreshold = null, }) { return _then(_value.copyWith( ranker: freezed == ranker ? _value.ranker : ranker // ignore: cast_nullable_to_non_nullable as FileSearchRanker?, - scoreThreshold: freezed == scoreThreshold + scoreThreshold: null == scoreThreshold ? _value.scoreThreshold : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double?, + as double, ) as $Val); } } @@ -27849,8 +28217,7 @@ abstract class _$$FileSearchRankingOptionsImplCopyWith<$Res> includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? scoreThreshold}); + @JsonKey(name: 'score_threshold') double scoreThreshold}); } /// @nodoc @@ -27869,17 +28236,17 @@ class __$$FileSearchRankingOptionsImplCopyWithImpl<$Res> @override $Res call({ Object? ranker = freezed, - Object? scoreThreshold = freezed, + Object? scoreThreshold = null, }) { return _then(_$FileSearchRankingOptionsImpl( ranker: freezed == ranker ? _value.ranker : ranker // ignore: cast_nullable_to_non_nullable as FileSearchRanker?, - scoreThreshold: freezed == scoreThreshold + scoreThreshold: null == scoreThreshold ? _value.scoreThreshold : scoreThreshold // ignore: cast_nullable_to_non_nullable - as double?, + as double, )); } } @@ -27892,8 +28259,7 @@ class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) this.ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - this.scoreThreshold}) + @JsonKey(name: 'score_threshold') required this.scoreThreshold}) : super._(); factory _$FileSearchRankingOptionsImpl.fromJson(Map json) => @@ -27907,8 +28273,8 @@ class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { /// The score threshold for the file search. All values must be a floating point number between 0 and 1. @override - @JsonKey(name: 'score_threshold', includeIfNull: false) - final double? scoreThreshold; + @JsonKey(name: 'score_threshold') + final double scoreThreshold; @override String toString() { @@ -27952,8 +28318,8 @@ abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) final FileSearchRanker? ranker, - @JsonKey(name: 'score_threshold', includeIfNull: false) - final double? scoreThreshold}) = _$FileSearchRankingOptionsImpl; + @JsonKey(name: 'score_threshold') + required final double scoreThreshold}) = _$FileSearchRankingOptionsImpl; const _FileSearchRankingOptions._() : super._(); factory _FileSearchRankingOptions.fromJson(Map json) = @@ -27967,8 +28333,8 @@ abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { /// The score threshold for the file search. All values must be a floating point number between 0 and 1. @override - @JsonKey(name: 'score_threshold', includeIfNull: false) - double? get scoreThreshold; + @JsonKey(name: 'score_threshold') + double get scoreThreshold; /// Create a copy of FileSearchRankingOptions /// with the given fields replaced by the non-null parameter values. @@ -28667,11 +29033,11 @@ mixin _$RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -29403,11 +29769,11 @@ class _$RunObjectImpl extends _RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -29705,11 +30071,11 @@ abstract class _RunObject extends RunObject { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -31689,11 +32055,11 @@ mixin _$CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -32227,11 +32593,11 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -32451,11 +32817,11 @@ abstract class _CreateRunRequest extends CreateRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -35200,11 +35566,11 @@ mixin _$CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -35759,11 +36125,11 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -35981,11 +36347,11 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures /// the model will match your supplied JSON schema. Learn more in the /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates /// is valid JSON. /// /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -59178,7 +59544,8 @@ mixin _$AssistantToolsFileSearchFileSearch { @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults => throw _privateConstructorUsedError; - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. @@ -59336,7 +59703,8 @@ class _$AssistantToolsFileSearchFileSearchImpl @JsonKey(name: 'max_num_results', includeIfNull: false) final int? maxNumResults; - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. @@ -59406,7 +59774,8 @@ abstract class _AssistantToolsFileSearchFileSearch @JsonKey(name: 'max_num_results', includeIfNull: false) int? get maxNumResults; - /// The ranking options for the file search. + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. /// /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) /// for more information. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 092425e5..c57effb3 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -316,6 +316,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( logprobs: json['logprobs'] as bool?, topLogprobs: (json['top_logprobs'] as num?)?.toInt(), maxTokens: (json['max_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null @@ -366,6 +367,7 @@ Map _$$CreateChatCompletionRequestImplToJson( writeNotNull('logprobs', instance.logprobs); writeNotNull('top_logprobs', instance.topLogprobs); writeNotNull('max_tokens', instance.maxTokens); + writeNotNull('max_completion_tokens', instance.maxCompletionTokens); writeNotNull('n', instance.n); writeNotNull('presence_penalty', instance.presencePenalty); writeNotNull('response_format', instance.responseFormat?.toJson()); @@ -1102,15 +1104,50 @@ _$CompletionUsageImpl _$$CompletionUsageImplFromJson( completionTokens: (json['completion_tokens'] as num?)?.toInt(), promptTokens: (json['prompt_tokens'] as num).toInt(), totalTokens: (json['total_tokens'] as num).toInt(), + completionTokensDetails: json['completion_tokens_details'] == null + ? null + : CompletionTokensDetails.fromJson( + json['completion_tokens_details'] as Map), ); Map _$$CompletionUsageImplToJson( - _$CompletionUsageImpl instance) => - { - 'completion_tokens': instance.completionTokens, - 'prompt_tokens': instance.promptTokens, - 'total_tokens': instance.totalTokens, - }; + _$CompletionUsageImpl instance) { + final val = { + 'completion_tokens': instance.completionTokens, + 'prompt_tokens': instance.promptTokens, + 'total_tokens': instance.totalTokens, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'completion_tokens_details', instance.completionTokensDetails?.toJson()); + return val; +} + +_$CompletionTokensDetailsImpl _$$CompletionTokensDetailsImplFromJson( + Map json) => + _$CompletionTokensDetailsImpl( + reasoningTokens: (json['reasoning_tokens'] as num?)?.toInt(), + ); + +Map _$$CompletionTokensDetailsImplToJson( + _$CompletionTokensDetailsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('reasoning_tokens', instance.reasoningTokens); + return val; +} _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( Map json) => @@ -2530,7 +2567,7 @@ _$FileSearchRankingOptionsImpl _$$FileSearchRankingOptionsImplFromJson( _$FileSearchRankingOptionsImpl( ranker: $enumDecodeNullable(_$FileSearchRankerEnumMap, json['ranker'], unknownValue: JsonKey.nullForUndefinedEnumValue), - scoreThreshold: (json['score_threshold'] as num?)?.toDouble(), + scoreThreshold: (json['score_threshold'] as num).toDouble(), ); Map _$$FileSearchRankingOptionsImplToJson( @@ -2544,7 +2581,7 @@ Map _$$FileSearchRankingOptionsImplToJson( } writeNotNull('ranker', _$FileSearchRankerEnumMap[instance.ranker]); - writeNotNull('score_threshold', instance.scoreThreshold); + val['score_threshold'] = instance.scoreThreshold; return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 793e696e..b7333f2c 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -1815,7 +1815,9 @@ components: properties: model: title: ChatCompletionModel - description: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + description: | + ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. example: "gpt-4o" anyOf: - type: string @@ -1842,7 +1844,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -1858,7 +1859,9 @@ components: "o1-preview-2024-09-12", ] messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + description: | + A list of messages comprising the conversation so far. + [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). type: array minItems: 1 items: @@ -1879,22 +1882,39 @@ components: description: | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + relevant token. logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + description: | + Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + each output token returned in the `content` of `message`. type: boolean nullable: true top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + description: | + An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 maximum: 20 nullable: true max_tokens: description: | - The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + type: integer + nullable: true + max_completion_tokens: + description: | + An upper bound for the number of tokens that can be generated for a completion, including visible output + tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). type: integer nullable: true n: @@ -1904,7 +1924,9 @@ components: default: 1 example: 1 nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: | + How many chat completion choices to generate for each input message. Note that you will be charged based on + the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 @@ -1921,15 +1943,20 @@ components: nullable: true description: | This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + If specified, our system will make a best effort to sample deterministically, such that repeated requests + with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + monitor changes in the backend. service_tier: description: | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed using the default service tier with a lower - uptime SLA and no latency guarantee. + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + default service tier with a lower uptime SLA and no latency guarantee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime + SLA and no latency guarantee. - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -1954,8 +1981,10 @@ components: type: string stream: description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false @@ -1979,9 +2008,10 @@ components: description: *completions_top_p_description tools: type: array - description: > + description: | A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + supported. items: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: @@ -1991,7 +2021,8 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: @@ -2020,7 +2051,8 @@ components: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + function. `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: @@ -2285,13 +2317,24 @@ components: ResponseFormat: type: object description: | - An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + than `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + will match your supplied JSON schema. + Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + valid JSON. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + or user message. Without this, the model may generate an unending stream of whitespace until the generation + reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + `max_tokens` or the conversation exceeded the max context length. oneOf: - $ref: "#/components/schemas/ResponseFormatText" - $ref: "#/components/schemas/ResponseFormatJsonObject" @@ -2671,10 +2714,19 @@ components: total_tokens: type: integer description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + $ref: "#/components/schemas/CompletionTokensDetails" required: - prompt_tokens - completion_tokens - total_tokens + CompletionTokensDetails: + type: object + description: Breakdown of tokens used in a completion. + properties: + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. CreateEmbeddingRequest: type: object description: Request object for the Create embedding endpoint. @@ -2849,12 +2901,12 @@ components: $ref: "#/components/schemas/FineTuningJobHyperparameters" suffix: description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 - maxLength: 40 + maxLength: 64 default: null nullable: true validation_file: @@ -3617,11 +3669,11 @@ components: [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a @@ -3681,7 +3733,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -3920,7 +3971,8 @@ components: FileSearchRankingOptions: type: object description: | - The ranking options for the file search. + The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + a score_threshold of 0. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. @@ -3932,6 +3984,8 @@ components: description: The score threshold for the file search. All values must be a floating point number between 0 and 1. minimum: 0 maximum: 1 + required: + - score_threshold FileSearchRanker: type: string description: The ranker to use for the file search. If not specified will use the `auto` ranker. @@ -4243,7 +4297,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", @@ -4487,7 +4540,6 @@ components: "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", - "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 96e64e32..d9b16b55 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -9698,11 +9698,18 @@ components: nullable: true max_tokens: description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). type: integer nullable: true + deprecated: true + max_completion_tokens: + description: | + An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + type: integer + nullable: true + n: type: integer minimum: 1 @@ -9722,9 +9729,9 @@ components: description: | An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: @@ -9746,7 +9753,8 @@ components: service_tier: description: | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -10635,12 +10643,12 @@ components: default: auto suffix: description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 - maxLength: 40 + maxLength: 64 default: null nullable: true validation_file: @@ -10892,16 +10900,7 @@ components: An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json + $ref: "#/components/schemas/AudioResponseFormat" temperature: description: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. @@ -11034,6 +11033,18 @@ components: group: audio example: *verbose_transcription_response_example + AudioResponseFormat: + description: | + The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + CreateTranslationRequest: type: object additionalProperties: false @@ -11058,10 +11069,7 @@ components: An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. type: string response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - default: json + $ref: "#/components/schemas/AudioResponseFormat" temperature: description: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. @@ -11744,6 +11752,13 @@ components: total_tokens: type: integer description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + description: Breakdown of tokens used in a completion. + properties: + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. required: - prompt_tokens - completion_tokens @@ -11791,9 +11806,9 @@ components: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: @@ -12283,7 +12298,7 @@ components: title: File search tool call ranking options type: object description: | - The ranking options for the file search. + The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. properties: @@ -12296,6 +12311,8 @@ components: description: The score threshold for the file search. All values must be a floating point number between 0 and 1. minimum: 0 maximum: 1 + required: + - score_threshold AssistantToolsFileSearchTypeOnly: type: object diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index ebfe8c44..96c57c2a 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -115,7 +115,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxTokens: 2, + maxCompletionTokens: 2, ); final res = await client.createChatCompletion(request: request); expect(res.choices, isNotEmpty); @@ -123,6 +123,10 @@ void main() { res.choices.first.finishReason, ChatCompletionFinishReason.length, ); + expect( + res.usage?.completionTokensDetails?.reasoningTokens, + ChatCompletionFinishReason.length, + ); }); test('Test call chat completions API with other parameters', () async { @@ -138,7 +142,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxTokens: 2, + maxCompletionTokens: 2, presencePenalty: 0.6, frequencyPenalty: 0.6, logitBias: {'50256': -100}, From 9650964b78b85dc368f87fbf16431ecf12507730 Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Wed, 25 Sep 2024 23:47:16 +0200 Subject: [PATCH 123/251] refactor: Migrate ChatOpenAI to maxCompletionTokens (#557) --- packages/langchain_openai/lib/src/chat_models/mappers.dart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 3b23ee8c..ad8bec0b 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -37,7 +37,7 @@ CreateChatCompletionRequest createChatCompletionRequest( frequencyPenalty: options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, logitBias: options?.logitBias ?? defaultOptions.logitBias, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + maxCompletionTokens: options?.maxTokens ?? defaultOptions.maxTokens, n: options?.n ?? defaultOptions.n, presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, responseFormat: responseFormatDto, From 9464af2d6730e68658d03dc860c54497bd210681 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Wed, 25 Sep 2024 23:47:45 +0200 Subject: [PATCH 124/251] chore(release): publish packages - langchain@0.7.6 - langchain_community@0.3.2 - langchain_core@0.3.6 - langchain_firebase@0.2.1+2 - langchain_google@0.6.3 - langchain_ollama@0.3.2 - langchain_openai@0.7.2 - ollama_dart@0.2.2 - openai_dart@0.4.2 - langchain_supabase@0.1.1+3 - langchain_pinecone@0.1.0+9 - langchain_anthropic@0.1.1+2 - langchain_chroma@0.2.1+3 - langchain_mistralai@0.2.3+1 --- CHANGELOG.md | 81 +++++++++++++++++++ examples/browser_summarizer/pubspec.yaml | 6 +- examples/docs_examples/pubspec.yaml | 16 ++-- examples/hello_world_backend/pubspec.yaml | 4 +- examples/hello_world_cli/pubspec.yaml | 4 +- examples/hello_world_flutter/pubspec.yaml | 10 +-- examples/wikivoyage_eu/pubspec.yaml | 6 +- packages/langchain/CHANGELOG.md | 4 + packages/langchain/pubspec.yaml | 10 +-- packages/langchain_anthropic/CHANGELOG.md | 4 + packages/langchain_anthropic/pubspec.yaml | 4 +- packages/langchain_chroma/CHANGELOG.md | 4 + packages/langchain_chroma/pubspec.yaml | 10 +-- packages/langchain_community/CHANGELOG.md | 5 ++ packages/langchain_community/pubspec.yaml | 6 +- packages/langchain_core/CHANGELOG.md | 4 + packages/langchain_core/pubspec.yaml | 2 +- packages/langchain_firebase/CHANGELOG.md | 4 + .../langchain_firebase/example/pubspec.yaml | 4 +- packages/langchain_firebase/pubspec.yaml | 4 +- packages/langchain_google/CHANGELOG.md | 5 ++ packages/langchain_google/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 4 + packages/langchain_mistralai/pubspec.yaml | 4 +- packages/langchain_ollama/CHANGELOG.md | 4 + packages/langchain_ollama/pubspec.yaml | 6 +- packages/langchain_openai/CHANGELOG.md | 5 ++ packages/langchain_openai/pubspec.yaml | 10 +-- packages/langchain_pinecone/CHANGELOG.md | 4 + packages/langchain_pinecone/pubspec.yaml | 6 +- packages/langchain_supabase/CHANGELOG.md | 4 + packages/langchain_supabase/pubspec.yaml | 10 +-- packages/ollama_dart/CHANGELOG.md | 4 + packages/ollama_dart/pubspec.yaml | 2 +- packages/openai_dart/CHANGELOG.md | 6 ++ packages/openai_dart/pubspec.yaml | 2 +- 36 files changed, 207 insertions(+), 65 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bedbee2..7a91910b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,87 @@ 📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. +## 2024-09-25 + +### Changes + +--- + +Packages with breaking changes: + +- There are no breaking changes in this release. + +Packages with other changes: + +- [`langchain` - `v0.7.6`](#langchain---v076) +- [`langchain_core` - `v0.3.6`](#langchain_core---v036) +- [`langchain_community` - `v0.3.2`](#langchain_community---v032) +- [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212) +- [`langchain_google` - `v0.6.3`](#langchain_google---v063) +- [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032) +- [`langchain_openai` - `v0.7.2`](#langchain_openai---v072) +- [`ollama_dart` - `v0.2.2`](#ollama_dart---v022) +- [`openai_dart` - `v0.4.2`](#openai_dart---v042) +- [`langchain_supabase` - `v0.1.1+3`](#langchain_supabase---v0113) +- [`langchain_pinecone` - `v0.1.0+9`](#langchain_pinecone---v0109) +- [`langchain_anthropic` - `v0.1.1+2`](#langchain_anthropic---v0112) +- [`langchain_chroma` - `v0.2.1+3`](#langchain_chroma---v0213) +- [`langchain_mistralai` - `v0.2.3+1`](#langchain_mistralai---v0231) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + +- `langchain_supabase` - `v0.1.1+3` +- `langchain_pinecone` - `v0.1.0+9` +- `langchain_anthropic` - `v0.1.1+2` +- `langchain_chroma` - `v0.2.1+3` +- `langchain_mistralai` - `v0.2.3+1` + +--- + +#### `langchain` - `v0.7.6` + +- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +#### `langchain_core` - `v0.3.6` + +- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +#### `langchain_community` - `v0.3.2` + +- **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) +- **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) + +#### `langchain_firebase` - `v0.2.1+2` + +- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +#### `langchain_google` - `v0.6.3` + +- **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) +- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +#### `langchain_ollama` - `v0.3.2` + +- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +#### `langchain_openai` - `v0.7.2` + +- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) +- **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) + +#### `ollama_dart` - `v0.2.2` + +- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +#### `openai_dart` - `v0.4.2` + +- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) +- **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) +- **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) + + ## 2024-08-22 ### Changes diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index f284843f..42a5999e 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -13,9 +13,9 @@ dependencies: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 js: ^0.7.1 - langchain: ^0.7.5 - langchain_community: 0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 shared_preferences: ^2.3.0 flutter: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 0136cd5a..eb228cde 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -7,11 +7,11 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_anthropic: ^0.1.1+1 - langchain_chroma: ^0.2.1+2 - langchain_community: 0.3.1 - langchain_google: ^0.6.2 - langchain_mistralai: ^0.2.3 - langchain_ollama: ^0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_anthropic: ^0.1.1+2 + langchain_chroma: ^0.2.1+3 + langchain_community: 0.3.2 + langchain_google: ^0.6.3 + langchain_mistralai: ^0.2.3+1 + langchain_ollama: ^0.3.2 + langchain_openai: ^0.7.2 diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 55135704..883ecefc 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -7,7 +7,7 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_openai: ^0.7.2 shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 26e63ed8..55291147 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -7,5 +7,5 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_openai: ^0.7.2 diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 15fd553e..1bb0485e 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -12,11 +12,11 @@ dependencies: equatable: ^2.0.5 flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 - langchain: ^0.7.5 - langchain_google: ^0.6.2 - langchain_mistralai: ^0.2.3 - langchain_ollama: ^0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_google: ^0.6.3 + langchain_mistralai: ^0.2.3+1 + langchain_ollama: ^0.3.2 + langchain_openai: ^0.7.2 flutter: uses-material-design: true diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml index a95f4820..5782a40f 100644 --- a/examples/wikivoyage_eu/pubspec.yaml +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -7,6 +7,6 @@ environment: sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.5 - langchain_ollama: ^0.3.1 - langchain_community: 0.3.1 + langchain: ^0.7.6 + langchain_ollama: ^0.3.2 + langchain_community: 0.3.2 diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 79614782..b5ee86a2 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.7.6 + + - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + ## 0.7.5 - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 06b182e2..48657423 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.5 +version: 0.7.6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ dependencies: characters: ^1.3.0 collection: ^1.18.0 crypto: ^3.0.3 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_community: ^0.3.1 - langchain_openai: ^0.7.1 - langchain_ollama: ^0.3.1 + langchain_community: ^0.3.2 + langchain_openai: ^0.7.2 + langchain_ollama: ^0.3.2 diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 690821d1..167d8c93 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.1+2 + + - Update a dependency to the latest release. + ## 0.1.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index 6aedbe71..7b23e44a 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_anthropic description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). -version: 0.1.1+1 +version: 0.1.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ dependencies: anthropic_sdk_dart: ^0.1.0 collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 rxdart: ">=0.27.7 <0.29.0" diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 45eaafa2..7e458f37 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.1+3 + + - Update a dependency to the latest release. + ## 0.2.1+2 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 59aa28a5..e216f998 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.1+2 +version: 0.2.1+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart @@ -19,12 +19,12 @@ environment: dependencies: chromadb: ^0.2.0+1 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.5 - langchain_community: 0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 9add3205..0336c13e 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -2,6 +2,11 @@ --- +## 0.3.2 + + - **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) + - **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) + ## 0.3.1 - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index de530389..9fd5f428 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.3.1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart @@ -22,7 +22,7 @@ dependencies: flat_buffers: ^23.5.26 http: ^1.2.2 json_path: ^0.7.4 - langchain_core: 0.3.5 + langchain_core: 0.3.6 math_expressions: ^2.6.0 meta: ^1.11.0 objectbox: ^4.0.1 @@ -31,7 +31,7 @@ dependencies: dev_dependencies: build_runner: ^2.4.11 - langchain_openai: ^0.7.1 + langchain_openai: ^0.7.2 objectbox_generator: ^4.0.1 test: ^1.25.8 diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index b7592ca0..382d3dd3 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.3.6 + + - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + ## 0.3.5 - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index d322abdc..69e8bac9 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.5 +version: 0.3.6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 11f4c2ea..ab291e7e 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.1+2 + + - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + ## 0.2.1+1 - Update a dependency to the latest release. diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index f63d336a..2c34d324 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -13,8 +13,8 @@ dependencies: flutter: sdk: flutter flutter_markdown: ^0.7.3 - langchain: 0.7.5 - langchain_firebase: 0.2.1+1 + langchain: 0.7.6 + langchain_firebase: 0.2.1+2 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index e6a64b2c..d71a826d 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_firebase description: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). -version: 0.2.1+1 +version: 0.2.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart @@ -25,7 +25,7 @@ dependencies: cloud_firestore: ^4.17.0 firebase_vertexai: ^0.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index a4288382..a7b8814d 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -2,6 +2,11 @@ --- +## 0.6.3 + + - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) + - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + ## 0.6.2 - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index 502e6b37..ca540a77 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). -version: 0.6.2 +version: 0.6.3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -24,7 +24,7 @@ dependencies: googleapis: ^13.0.0 googleapis_auth: ^1.6.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 vertex_ai: ^0.1.0+1 diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index a60fe14b..99b6c0e2 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.3+1 + + - Update a dependency to the latest release. + ## 0.2.3 - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 4a3583ed..4b29f8a0 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.3 +version: 0.2.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,7 +19,7 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 mistralai_dart: ^0.0.3+3 diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index b9795885..f83459af 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.3.2 + + - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + ## 0.3.1 - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index f6e9e066..eb2c1fc8 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_ollama description: LangChain.dart integration module for Ollama (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). -version: 0.3.1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart @@ -19,10 +19,10 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.2.1 + ollama_dart: ^0.2.2 uuid: ^4.4.2 dev_dependencies: diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index ab160770..a7a12549 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -2,6 +2,11 @@ --- +## 0.7.2 + + - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) + - **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) + ## 0.7.1 - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 37c0b6ca..1161ee71 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_openai description: LangChain.dart integration module for OpenAI (GPT-4o, o1, Embeddings, DALL·E, etc.). -version: 0.7.1 +version: 0.7.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart @@ -19,13 +19,13 @@ environment: dependencies: collection: ^1.18.0 http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.4.1 + openai_dart: ^0.4.2 uuid: ^4.4.2 dev_dependencies: - langchain: ^0.7.5 - langchain_community: 0.3.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 test: ^1.25.8 diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index e277b4a0..f616d549 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.0+9 + + - Update a dependency to the latest release. + ## 0.1.0+8 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 8cbec927..82e39fa2 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+8 +version: 0.1.0+9 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart @@ -18,11 +18,11 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 pinecone: ^0.7.2 uuid: ^4.4.2 dev_dependencies: test: ^1.25.8 - langchain_openai: ^0.7.1 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index be6b7129..bd6956b4 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.1+3 + + - Update a dependency to the latest release. + ## 0.1.1+2 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index da03bb1c..9b5530ad 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.1+2 +version: 0.1.1+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart @@ -18,12 +18,12 @@ environment: dependencies: http: ^1.2.2 - langchain_core: 0.3.5 + langchain_core: 0.3.6 meta: ^1.11.0 supabase: ^2.2.7 dev_dependencies: test: ^1.25.8 - langchain: ^0.7.5 - langchain_community: 0.3.1 - langchain_openai: ^0.7.1 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index c8b93090..d6f79865 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.2.2 + + - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + ## 0.2.1 - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 0bbd9916..52b3b896 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: ollama_dart description: Dart Client for the Ollama API (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). -version: 0.2.1 +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 789ec5a7..aa3ac2cc 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -2,6 +2,12 @@ --- +## 0.4.2 + + - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) + - **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) + - **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) + ## 0.4.1 - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index cff9352b..afff8726 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,6 +1,6 @@ name: openai_dart description: Dart client for the OpenAI API. Supports chat (GPT-4o, o1, etc.), completions, embeddings, images (DALL·E 3), assistants (threads,, vector stores, etc.), batch, fine-tuning, etc. -version: 0.4.1 +version: 0.4.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart From f0b4118c4011e724e4a5c86167d760c4866a434e Mon Sep 17 00:00:00 2001 From: David Miguel Date: Wed, 25 Sep 2024 23:59:13 +0200 Subject: [PATCH 125/251] build: Update pubspec.lock files --- examples/browser_summarizer/pubspec.lock | 10 ++++----- examples/docs_examples/pubspec.lock | 22 +++++++++---------- examples/hello_world_backend/pubspec.lock | 8 +++---- examples/hello_world_cli/pubspec.lock | 8 +++---- examples/hello_world_flutter/pubspec.lock | 16 +++++++------- examples/wikivoyage_eu/pubspec.lock | 10 ++++----- .../langchain_firebase/example/pubspec.lock | 6 ++--- packages/langchain_firebase/pubspec.lock | 2 +- 8 files changed, 41 insertions(+), 41 deletions(-) diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 6eada274..138544a4 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -233,28 +233,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -325,7 +325,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index a7a05f06..40ee1fd3 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -237,63 +237,63 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_anthropic: dependency: "direct main" description: path: "../../packages/langchain_anthropic" relative: true source: path - version: "0.1.1+1" + version: "0.1.1+2" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1+2" + version: "0.2.1+3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.3" + version: "0.2.3+1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -347,14 +347,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index f7ad7603..cbfd6954 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -111,21 +111,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -148,7 +148,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 94af9a94..52a95a74 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -103,21 +103,21 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -140,7 +140,7 @@ packages: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 1fbbc8d3..7ef29219 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -196,42 +196,42 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.2" + version: "0.6.3" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.3" + version: "0.2.3+1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -285,14 +285,14 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.1" + version: "0.4.2" path: dependency: transitive description: diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock index df3386b8..f242c95d 100644 --- a/examples/wikivoyage_eu/pubspec.lock +++ b/examples/wikivoyage_eu/pubspec.lock @@ -167,28 +167,28 @@ packages: path: "../../packages/langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.1" + version: "0.3.2" langchain_tiktoken: dependency: transitive description: @@ -235,7 +235,7 @@ packages: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.1" + version: "0.2.2" path: dependency: transitive description: diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index 5a3fa013..b08dde36 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -262,21 +262,21 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.5" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.2.1+1" + version: "0.2.1+2" leak_tracker: dependency: transitive description: diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index de205b64..eb451c0d 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -230,7 +230,7 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.5" + version: "0.3.6" leak_tracker: dependency: transitive description: From 46905886aef079b093a47dc5c3dfeb5cc319072b Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 26 Sep 2024 00:04:20 +0200 Subject: [PATCH 126/251] docs: Update README.md --- packages/vertex_ai/pubspec.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 5d7612aa..e4a35224 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,5 +1,5 @@ name: vertex_ai -description: GCP Vertex AI ML platform API client (PaLM, Matching Engine, etc.). +description: GCP Vertex AI ML platform API client (PaLM, Vector Search, etc.). version: 0.1.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai @@ -11,7 +11,6 @@ topics: - nlp - llms - palm - - matching-engine environment: sdk: ">=3.4.0 <4.0.0" From 62f042a957ac799c5615c1deea17525ee1446c10 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Thu, 26 Sep 2024 00:04:32 +0200 Subject: [PATCH 127/251] chore(release): publish packages - vertex_ai@0.1.0+2 - langchain_google@0.6.3+1 --- CHANGELOG.md | 5 +++-- examples/docs_examples/pubspec.lock | 4 ++-- examples/docs_examples/pubspec.yaml | 2 +- examples/hello_world_flutter/pubspec.lock | 4 ++-- examples/hello_world_flutter/pubspec.yaml | 2 +- examples/vertex_ai_matching_engine_setup/pubspec.lock | 2 +- examples/vertex_ai_matching_engine_setup/pubspec.yaml | 2 +- packages/langchain_google/CHANGELOG.md | 2 +- packages/langchain_google/pubspec.yaml | 4 ++-- packages/vertex_ai/CHANGELOG.md | 4 ++++ packages/vertex_ai/pubspec.yaml | 2 +- 11 files changed, 19 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a91910b..59691103 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ Packages with other changes: - [`langchain_core` - `v0.3.6`](#langchain_core---v036) - [`langchain_community` - `v0.3.2`](#langchain_community---v032) - [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212) -- [`langchain_google` - `v0.6.3`](#langchain_google---v063) +- [`langchain_google` - `v0.6.3+1`](#langchain_google---v0631) - [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032) - [`langchain_openai` - `v0.7.2`](#langchain_openai---v072) - [`ollama_dart` - `v0.2.2`](#ollama_dart---v022) @@ -38,6 +38,7 @@ Packages with dependency updates only: - `langchain_anthropic` - `v0.1.1+2` - `langchain_chroma` - `v0.2.1+3` - `langchain_mistralai` - `v0.2.3+1` +- `vertex_ai` - `v0.1.0+2` --- @@ -58,7 +59,7 @@ Packages with dependency updates only: - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) -#### `langchain_google` - `v0.6.3` +#### `langchain_google` - `v0.6.3+1` - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 40ee1fd3..1a928bf7 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -272,7 +272,7 @@ packages: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.3" + version: "0.6.3+1" langchain_mistralai: dependency: "direct main" description: @@ -464,7 +464,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+1" + version: "0.1.0+2" web: dependency: transitive description: diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index eb228cde..985f1d64 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -11,7 +11,7 @@ dependencies: langchain_anthropic: ^0.1.1+2 langchain_chroma: ^0.2.1+3 langchain_community: 0.3.2 - langchain_google: ^0.6.3 + langchain_google: ^0.6.3+1 langchain_mistralai: ^0.2.3+1 langchain_ollama: ^0.3.2 langchain_openai: ^0.7.2 diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 7ef29219..feb099a5 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -210,7 +210,7 @@ packages: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.3" + version: "0.6.3+1" langchain_mistralai: dependency: "direct main" description: @@ -392,7 +392,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+1" + version: "0.1.0+2" web: dependency: transitive description: diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index 1bb0485e..f9fe1384 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -13,7 +13,7 @@ dependencies: flutter_bloc: ^8.1.6 flutter_markdown: ^0.7.3 langchain: ^0.7.6 - langchain_google: ^0.6.3 + langchain_google: ^0.6.3+1 langchain_mistralai: ^0.2.3+1 langchain_ollama: ^0.3.2 langchain_openai: ^0.7.2 diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index 752608b4..b3a0f0ae 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -151,7 +151,7 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+1" + version: "0.1.0+2" web: dependency: transitive description: diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 4519fdbb..c37f6c30 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -10,4 +10,4 @@ dependencies: gcloud: ^0.8.13 googleapis_auth: ^1.6.0 http: ^1.2.2 - vertex_ai: ^0.1.0+1 + vertex_ai: ^0.1.0+2 diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index a7b8814d..36d0882e 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -2,7 +2,7 @@ --- -## 0.6.3 +## 0.6.3+1 - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index ca540a77..6bbf914b 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,6 +1,6 @@ name: langchain_google description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). -version: 0.6.3 +version: 0.6.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart @@ -27,7 +27,7 @@ dependencies: langchain_core: 0.3.6 meta: ^1.11.0 uuid: ^4.4.2 - vertex_ai: ^0.1.0+1 + vertex_ai: ^0.1.0+2 langchain_firebase: ^0.1.0 firebase_core: ^2.31.0 diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index 5c733127..372ba2dc 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -2,6 +2,10 @@ --- +## 0.1.0+2 + + - Update a dependency to the latest release. + ## 0.1.0+1 - Update a dependency to the latest release. diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index e4a35224..9e25d858 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,6 +1,6 @@ name: vertex_ai description: GCP Vertex AI ML platform API client (PaLM, Vector Search, etc.). -version: 0.1.0+1 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart From 36c28f66f6c02c9e1b4f3b1f13eb4e6a3d04da9e Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Thu, 26 Sep 2024 00:28:05 +0200 Subject: [PATCH 128/251] refactor: Update deprecated UUID constant (#558) --- packages/langchain/lib/src/embeddings/cache.dart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langchain/lib/src/embeddings/cache.dart b/packages/langchain/lib/src/embeddings/cache.dart index 270ae124..3a67cd39 100644 --- a/packages/langchain/lib/src/embeddings/cache.dart +++ b/packages/langchain/lib/src/embeddings/cache.dart @@ -135,7 +135,7 @@ class EmbeddingsByteStoreEncoder @override String encodeKey(final String key) { final keyHash = sha1.convert(utf8.encode(key)).toString(); - return uuid.v5(Uuid.NAMESPACE_URL, keyHash); + return uuid.v5(Namespace.URL, keyHash); } @override From 4dccc2ba8b342a454df5cb1b8386f2ce23e927b1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 08:53:21 +0200 Subject: [PATCH 129/251] build(deps): bump actions/checkout from 4.1.7 to 4.2.0 (#559) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.7 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/692973e3d937129bcbf40652eb9f2f61becf3332...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yaml | 2 +- .github/workflows/firebase-hosting-merge.yml | 2 +- .github/workflows/firebase-hosting-pull-request.yml | 2 +- .github/workflows/test.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 98f80b82..448ca45f 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/firebase-hosting-merge.yml b/.github/workflows/firebase-hosting-merge.yml index 262b5a52..35567de1 100644 --- a/.github/workflows/firebase-hosting-merge.yml +++ b/.github/workflows/firebase-hosting-merge.yml @@ -9,7 +9,7 @@ jobs: build_and_deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - run: npm ci && npm run build working-directory: docs_v2 - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a diff --git a/.github/workflows/firebase-hosting-pull-request.yml b/.github/workflows/firebase-hosting-pull-request.yml index 2a2d9416..0086d2e1 100644 --- a/.github/workflows/firebase-hosting-pull-request.yml +++ b/.github/workflows/firebase-hosting-pull-request.yml @@ -15,7 +15,7 @@ jobs: if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - run: npm ci && npm run build working-directory: docs_v2 - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c4e0c410..1141bb2d 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 From 8b81bea3652c00ef8f69e3676ce99177462f9049 Mon Sep 17 00:00:00 2001 From: Kenneth Knudsen Date: Tue, 1 Oct 2024 20:13:08 +0200 Subject: [PATCH 130/251] soft-reset to upstream main --- .github/workflows/docs.yaml | 2 +- .github/workflows/firebase-hosting-merge.yml | 21 - .../firebase-hosting-pull-request.yml | 26 - .github/workflows/test.yaml | 7 +- .gitignore | 1 - CHANGELOG.md | 536 +- analysis_options.yaml | 15 +- docs/CNAME | 2 +- docs/README.md | 2 +- docs/_sidebar.md | 7 +- .../cookbook/prompt_llm_parser.md | 6 +- docs/expression_language/fallbacks.md | 135 - docs/expression_language/get_started.md | 2 +- docs/expression_language/interface.md | 2 +- docs/expression_language/primitives.md | 1 - .../expression_language/primitives/binding.md | 2 +- .../primitives/function.md | 8 +- docs/expression_language/primitives/mapper.md | 4 +- docs/expression_language/primitives/retry.md | 94 - docs/expression_language/primitives/router.md | 6 +- docs/expression_language/streaming.md | 8 +- docs/index.html | 13 +- .../modules/agents/agent_types/agent_types.md | 2 +- .../agents/agent_types/openai_tools_agent.md | 173 + .../modules/agents/agent_types/tools_agent.md | 190 - docs/modules/agents/agents.md | 4 +- docs/modules/agents/tools/calculator.md | 2 +- docs/modules/agents/tools/openai_dall_e.md | 2 +- .../models/chat_models/chat_models.md | 2 +- .../models/chat_models/how_to/tools.md | 4 +- .../chat_models/integrations/anthropic.md | 145 - .../integrations/firebase_vertex_ai.md | 32 +- .../chat_models/integrations/googleai.md | 34 +- .../models/chat_models/integrations/ollama.md | 359 +- .../chat_models/integrations/open_router.md | 60 - .../models/chat_models/integrations/openai.md | 260 +- .../models/llms/integrations/ollama.md | 6 +- docs/modules/model_io/output_parsers/json.md | 4 +- .../text_embedding/integrations/google_ai.md | 2 + .../text_embedding/integrations/ollama.md | 2 +- .../integrations/img/objectbox.png | Bin 51968 -> 0 bytes .../vector_stores/integrations/memory.md | 8 +- .../vector_stores/integrations/objectbox.md | 346 - docs_v2/.firebaserc | 5 - docs_v2/.gitignore | 21 - docs_v2/README.md | 41 - docs_v2/babel.config.js | 3 - docs_v2/docs/01-intro.md | 171 - docs_v2/docs/02-tutorials/01-llm_chain.md | 7 - docs_v2/docs/02-tutorials/index.mdx | 28 - docs_v2/docs/03-how_to/01-installation.md | 77 - .../docs/03-how_to/02-structured_output.md | 14 - docs_v2/docs/03-how_to/index.mdx | 149 - docs_v2/docs/04-concepts.mdx | 468 - docs_v2/docs/05-integrations/anthropic.md | 145 - docs_v2/docs/05-integrations/anyscale.md | 84 - .../05-integrations/firebase_vertex_ai.md | 190 - docs_v2/docs/05-integrations/gcp_vertex_ai.md | 116 - docs_v2/docs/05-integrations/googleai.md | 149 - docs_v2/docs/05-integrations/index.mdx | 56 - docs_v2/docs/05-integrations/mistralai.md | 76 - docs_v2/docs/05-integrations/ollama.md | 462 - docs_v2/docs/05-integrations/open_router.md | 157 - docs_v2/docs/05-integrations/openai.md | 372 - docs_v2/docs/05-integrations/prem.md | 24 - docs_v2/docs/05-integrations/together_ai.md | 84 - docs_v2/docs/05-integrations/tools/index.mdx | 5 - .../05-integrations/tools/tavily_search.md | 13 - docs_v2/docusaurus.config.js | 130 - docs_v2/firebase.json | 16 - docs_v2/package-lock.json | 14678 ------- docs_v2/package.json | 44 - docs_v2/sidebars.js | 30 - .../src/components/HomepageFeatures/index.js | 64 - .../HomepageFeatures/styles.module.css | 11 - docs_v2/src/css/custom.css | 30 - docs_v2/src/pages/index.js | 7 - docs_v2/src/pages/index.module.css | 23 - docs_v2/src/pages/markdown-page.md | 7 - docs_v2/static/.nojekyll | 0 docs_v2/static/img/favicon.ico | Bin 15406 -> 0 bytes docs_v2/static/img/langchain.dart.png | Bin 156015 -> 0 bytes docs_v2/static/img/logo.svg | 1 - .../lib/chrome/chrome_api.dart | 2 +- examples/browser_summarizer/pubspec.lock | 165 +- examples/browser_summarizer/pubspec.yaml | 14 +- .../browser_summarizer/pubspec_overrides.yaml | 4 +- examples/docs_examples/README.md | 2 +- .../cookbook/prompt_llm_parser.dart | 6 +- .../expression_language/cookbook/routing.dart | 6 +- .../cookbook/streaming.dart | 12 +- .../bin/expression_language/fallbacks.dart | 181 - .../bin/expression_language/get_started.dart | 2 +- .../bin/expression_language/interface.dart | 2 +- .../primitives/binding.dart | 2 +- .../primitives/function.dart | 8 +- .../primitives/mapper.dart | 4 +- .../expression_language/primitives/retry.dart | 177 - ...ols_agent.dart => openai_tools_agent.dart} | 72 +- .../bin/modules/agents/tools/calculator.dart | 2 +- .../modules/agents/tools/openai_dalle.dart | 2 +- .../chat_models/integrations/anthropic.dart | 109 - .../chat_models/integrations/ollama.dart | 269 +- .../chat_models/integrations/open_router.dart | 54 - .../chat_models/integrations/openai.dart | 4 +- .../models/llms/integrations/ollama.dart | 4 +- .../modules/model_io/output_parsers/json.dart | 8 +- .../vector_stores/integrations/objectbox.dart | 108 - examples/docs_examples/pubspec.lock | 169 +- examples/docs_examples/pubspec.yaml | 19 +- examples/docs_examples/pubspec_overrides.yaml | 8 +- examples/hello_world_backend/README.md | 2 +- examples/hello_world_backend/pubspec.lock | 74 +- examples/hello_world_backend/pubspec.yaml | 8 +- .../pubspec_overrides.yaml | 2 +- examples/hello_world_cli/README.md | 2 +- examples/hello_world_cli/pubspec.lock | 70 +- examples/hello_world_cli/pubspec.yaml | 6 +- .../hello_world_cli/pubspec_overrides.yaml | 2 +- examples/hello_world_flutter/README.md | 21 +- .../android/app/build.gradle | 30 +- .../hello_world_flutter/android/build.gradle | 13 + .../android/gradle.properties | 2 +- .../gradle/wrapper/gradle-wrapper.properties | 2 +- .../android/settings.gradle | 30 +- .../hello_world_flutter/devtools_options.yaml | 3 - .../hello_world_flutter.gif | Bin 235564 -> 0 bytes .../hello_world_flutter_local.gif | Bin 0 -> 144881 bytes .../hello_world_flutter_openai.gif | Bin 0 -> 119360 bytes .../ios/Flutter/AppFrameworkInfo.plist | 2 +- .../ios/Runner.xcodeproj/project.pbxproj | 8 +- .../xcshareddata/xcschemes/Runner.xcscheme | 2 +- examples/hello_world_flutter/lib/app.dart | 1 - .../lib/home/bloc/home_screen_cubit.dart | 179 +- .../lib/home/bloc/home_screen_state.dart | 54 +- .../lib/home/bloc/providers.dart | 40 - .../lib/home/home_screen.dart | 355 +- examples/hello_world_flutter/pubspec.lock | 202 +- examples/hello_world_flutter/pubspec.yaml | 12 +- .../pubspec_overrides.yaml | 14 +- .../web/flutter_bootstrap.js | 12 - examples/hello_world_flutter/web/index.html | 62 +- .../hello_world_flutter/web/manifest.json | 4 +- .../pubspec.lock | 52 +- .../pubspec.yaml | 10 +- examples/wikivoyage_eu/.gitignore | 3 - examples/wikivoyage_eu/README.md | 89 - examples/wikivoyage_eu/analysis_options.yaml | 1 - examples/wikivoyage_eu/bin/injestion.dart | 21 - examples/wikivoyage_eu/bin/wikivoyage_eu.dart | 82 - .../bin/wikivoyage_eu_dataset.csv | 161 - examples/wikivoyage_eu/pubspec.lock | 343 - examples/wikivoyage_eu/pubspec.yaml | 12 - examples/wikivoyage_eu/pubspec_overrides.yaml | 14 - examples/wikivoyage_eu/rag.png | Bin 18434 -> 0 bytes examples/wikivoyage_eu/wikivoyage_eu.gif | Bin 171257 -> 0 bytes melos.yaml | 63 +- packages/anthropic_sdk_dart/CHANGELOG.md | 18 - packages/anthropic_sdk_dart/LICENSE | 21 - packages/anthropic_sdk_dart/README.md | 304 - .../anthropic_sdk_dart/analysis_options.yaml | 1 - packages/anthropic_sdk_dart/build.yaml | 13 - .../example/anthropic_sdk_dart_example.dart | 200 - .../lib/anthropic_sdk_dart.dart | 7 - .../anthropic_sdk_dart/lib/src/client.dart | 104 - .../lib/src/extensions.dart | 92 - .../lib/src/generated/client.dart | 395 - .../lib/src/generated/schema/block.dart | 155 - .../lib/src/generated/schema/block_delta.dart | 56 - .../schema/create_message_request.dart | 380 - .../create_message_request_metadata.dart | 44 - .../generated/schema/image_block_source.dart | 74 - .../lib/src/generated/schema/message.dart | 162 - .../src/generated/schema/message_delta.dart | 61 - .../generated/schema/message_delta_usage.dart | 51 - .../src/generated/schema/message_role.dart | 17 - .../schema/message_stream_event.dart | 126 - .../schema/message_stream_event_type.dart | 27 - .../lib/src/generated/schema/schema.dart | 28 - .../src/generated/schema/schema.freezed.dart | 7758 ---- .../lib/src/generated/schema/schema.g.dart | 558 - .../lib/src/generated/schema/stop_reason.dart | 30 - .../lib/src/generated/schema/tool.dart | 59 - .../lib/src/generated/schema/tool_choice.dart | 54 - .../generated/schema/tool_choice_type.dart | 24 - .../lib/src/generated/schema/usage.dart | 54 - .../lib/src/http_client/http_client.dart | 3 - .../lib/src/http_client/http_client_html.dart | 18 - .../lib/src/http_client/http_client_io.dart | 12 - .../lib/src/http_client/http_client_stub.dart | 10 - .../oas/anthropic_openapi_curated.yaml | 778 - packages/anthropic_sdk_dart/oas/main.dart | 60 - packages/anthropic_sdk_dart/pubspec.lock | 627 - packages/anthropic_sdk_dart/pubspec.yaml | 34 - .../test/messages_test.dart | 320 - packages/chromadb/CHANGELOG.md | 8 - packages/chromadb/pubspec.yaml | 22 +- packages/googleai_dart/CHANGELOG.md | 12 - packages/googleai_dart/lib/googleai_dart.dart | 2 +- .../lib/src/http_client/http_client.dart | 3 +- packages/googleai_dart/pubspec.yaml | 24 +- packages/langchain/CHANGELOG.md | 45 +- packages/langchain/README.md | 166 +- packages/langchain/lib/src/agents/agents.dart | 1 - packages/langchain/lib/src/agents/tools.dart | 304 - .../langchain/lib/src/embeddings/cache.dart | 2 +- packages/langchain/lib/src/utils/utils.dart | 6 +- .../lib/src/vector_stores/memory.dart | 4 +- packages/langchain/pubspec.yaml | 17 +- packages/langchain/pubspec_overrides.yaml | 14 +- .../test/agents/assets/state_of_the_union.txt | 723 - .../test/agents/tools_agent_test.dart | 226 - packages/langchain_amazon/pubspec.yaml | 4 +- packages/langchain_anthropic/CHANGELOG.md | 22 - packages/langchain_anthropic/README.md | 15 +- .../example/langchain_anthropic_example.dart | 42 +- .../lib/langchain_anthropic.dart | 2 - .../lib/src/chat_models/chat_anthropic.dart | 243 - .../lib/src/chat_models/chat_models.dart | 2 - .../lib/src/chat_models/mappers.dart | 433 - .../lib/src/chat_models/types.dart | 160 - packages/langchain_anthropic/pubspec.yaml | 21 +- .../pubspec_overrides.yaml | 6 - .../test/chat_models/assets/apple.jpeg | Bin 66803 -> 0 bytes .../test/chat_models/chat_anthropic_test.dart | 293 - packages/langchain_chroma/CHANGELOG.md | 24 - .../lib/src/vector_stores/chroma.dart | 2 +- packages/langchain_chroma/pubspec.yaml | 22 +- .../langchain_chroma/pubspec_overrides.yaml | 4 +- packages/langchain_cohere/pubspec.yaml | 4 +- packages/langchain_community/CHANGELOG.md | 31 - packages/langchain_community/README.md | 4 - .../lib/langchain_community.dart | 1 - .../lib/src/document_loaders/csv.dart | 3 +- .../lib/src/tools/calculator.dart | 2 +- .../lib/src/tools/tavily/mappers.dart | 21 - .../lib/src/tools/tavily/tavily.dart | 3 - .../lib/src/tools/tavily/tavily_answer.dart | 102 - .../tools/tavily/tavily_search_results.dart | 130 - .../lib/src/tools/tavily/types.dart | 181 - .../lib/src/tools/tools.dart | 1 - .../objectbox/base_objectbox.dart | 220 - .../objectbox/base_objectbox_stub.dart | 40 - .../lib/src/vector_stores/objectbox/ob.dart | 7 - .../src/vector_stores/objectbox/ob_io.dart | 3 - .../src/vector_stores/objectbox/ob_stub.dart | 3 - .../objectbox/objectbox-model.json | 56 - .../vector_stores/objectbox/objectbox.dart | 196 - .../vector_stores/objectbox/objectbox.g.dart | 193 - .../objectbox/objectbox_stub.dart | 53 - .../src/vector_stores/objectbox/types.dart | 29 - .../vector_stores/objectbox/types_stub.dart | 11 - .../lib/src/vector_stores/vector_stores.dart | 1 - packages/langchain_community/pubspec.yaml | 28 +- .../pubspec_overrides.yaml | 8 +- .../test/tools/tavily_test.dart | 31 - .../objectbox/objectbox_test.dart | 190 - packages/langchain_core/CHANGELOG.md | 31 - packages/langchain_core/lib/agents.dart | 2 +- packages/langchain_core/lib/chains.dart | 2 +- packages/langchain_core/lib/chat_history.dart | 2 +- packages/langchain_core/lib/chat_models.dart | 2 +- .../langchain_core/lib/document_loaders.dart | 2 +- packages/langchain_core/lib/documents.dart | 2 +- packages/langchain_core/lib/embeddings.dart | 2 +- packages/langchain_core/lib/exceptions.dart | 2 +- packages/langchain_core/lib/langchain.dart | 2 +- .../langchain_core/lib/language_models.dart | 2 +- packages/langchain_core/lib/llms.dart | 2 +- packages/langchain_core/lib/memory.dart | 2 +- .../langchain_core/lib/output_parsers.dart | 2 +- packages/langchain_core/lib/prompts.dart | 2 +- packages/langchain_core/lib/retrievers.dart | 2 +- packages/langchain_core/lib/runnables.dart | 2 +- .../langchain_core/lib/src/chains/types.dart | 2 +- .../lib/src/chat_models/fake.dart | 232 +- .../lib/src/chat_models/types.dart | 35 +- .../lib/src/chat_models/utils.dart | 2 +- .../lib/src/langchain/types.dart | 2 +- .../lib/src/language_models/base.dart | 31 + .../lib/src/language_models/types.dart | 23 +- .../langchain_core/lib/src/llms/fake.dart | 34 +- .../langchain_core/lib/src/llms/types.dart | 5 +- .../lib/src/output_parsers/string.dart | 4 +- .../lib/src/output_parsers/types.dart | 4 +- .../langchain_core/lib/src/prompts/types.dart | 4 +- .../lib/src/retrievers/types.dart | 16 +- .../lib/src/runnables/binding.dart | 9 +- .../lib/src/runnables/fallbacks.dart | 112 - .../lib/src/runnables/function.dart | 4 +- .../langchain_core/lib/src/runnables/map.dart | 7 - .../lib/src/runnables/retry.dart | 63 - .../lib/src/runnables/runnable.dart | 63 - .../lib/src/runnables/runnables.dart | 2 - .../lib/src/runnables/sequence.dart | 11 +- .../lib/src/runnables/types.dart | 27 - .../langchain_core/lib/src/tools/base.dart | 59 +- .../langchain_core/lib/src/tools/string.dart | 6 - .../langchain_core/lib/src/tools/types.dart | 4 +- .../lib/src/utils/retry_client.dart | 92 - .../langchain_core/lib/src/utils/utils.dart | 1 - .../lib/src/vector_stores/base.dart | 2 + packages/langchain_core/lib/stores.dart | 2 +- packages/langchain_core/lib/tools.dart | 2 +- packages/langchain_core/lib/utils.dart | 2 +- .../langchain_core/lib/vector_stores.dart | 2 +- packages/langchain_core/pubspec.yaml | 14 +- .../test/runnables/binding_test.dart | 37 - .../test/runnables/fallbacks_test.dart | 102 - .../test/runnables/map_test.dart | 5 +- .../test/runnables/retry_test.dart | 87 - packages/langchain_firebase/CHANGELOG.md | 41 - .../langchain_firebase/example/lib/main.dart | 4 +- .../Flutter/GeneratedPluginRegistrant.swift | 2 - .../langchain_firebase/example/pubspec.lock | 118 +- .../langchain_firebase/example/pubspec.yaml | 12 +- .../example/pubspec_overrides.yaml | 2 +- .../example/web/flutter_bootstrap.js | 12 - .../langchain_firebase/example/web/index.html | 54 +- .../lib/langchain_firebase.dart | 2 +- .../vertex_ai/chat_firebase_vertex_ai.dart | 89 +- .../src/chat_models/vertex_ai/mappers.dart | 51 +- .../lib/src/chat_models/vertex_ai/types.dart | 112 +- packages/langchain_firebase/pubspec.lock | 110 +- packages/langchain_firebase/pubspec.yaml | 23 +- packages/langchain_google/CHANGELOG.md | 32 - .../lib/langchain_google.dart | 2 +- .../google_ai/chat_google_generative_ai.dart | 48 +- .../src/chat_models/google_ai/mappers.dart | 22 +- .../lib/src/chat_models/google_ai/types.dart | 105 +- .../chat_models/vertex_ai/chat_vertex_ai.dart | 33 +- .../lib/src/chat_models/vertex_ai/types.dart | 74 +- .../google_ai/google_ai_embeddings.dart | 10 +- .../lib/src/llms/vertex_ai/types.dart | 68 +- .../lib/src/llms/vertex_ai/vertex_ai.dart | 32 +- .../src/utils/https_client/http_client.dart | 3 +- packages/langchain_google/pubspec.yaml | 30 +- .../langchain_google/pubspec_overrides.yaml | 2 +- .../chat_google_generative_ai_test.dart | 14 +- .../google_ai/google_ai_embeddings_test.dart | 3 +- packages/langchain_huggingface/pubspec.yaml | 4 +- packages/langchain_microsoft/pubspec.yaml | 4 +- packages/langchain_mistralai/CHANGELOG.md | 26 - .../lib/src/chat_models/chat_mistralai.dart | 9 +- .../lib/src/chat_models/types.dart | 54 +- packages/langchain_mistralai/pubspec.yaml | 16 +- .../pubspec_overrides.yaml | 2 +- packages/langchain_ollama/CHANGELOG.md | 34 - packages/langchain_ollama/README.md | 4 +- .../lib/src/chat_models/chat_models.dart | 4 +- .../{chat_ollama => }/chat_ollama.dart | 74 +- .../src/chat_models/chat_ollama/mappers.dart | 267 - .../lib/src/chat_models/mappers.dart | 142 + .../chat_models/{chat_ollama => }/types.dart | 165 +- .../lib/src/embeddings/ollama_embeddings.dart | 6 +- .../langchain_ollama/lib/src/llms/ollama.dart | 31 +- .../langchain_ollama/lib/src/llms/types.dart | 174 +- packages/langchain_ollama/pubspec.yaml | 20 +- .../langchain_ollama/pubspec_overrides.yaml | 2 +- .../test/chat_models/chat_ollama_test.dart | 164 +- .../test/embeddings/ollama_test.dart | 2 +- .../test/llms/ollama_test.dart | 2 +- packages/langchain_openai/CHANGELOG.md | 46 +- packages/langchain_openai/README.md | 2 + .../langchain_openai/lib/fix_data/fix.yaml | 19 - .../lib/langchain_openai.dart | 2 +- .../lib/src/agents/tools.dart | 18 +- .../lib/src/chains/qa_with_sources.dart | 2 +- .../lib/src/chat_models/chat_openai.dart | 74 +- .../lib/src/chat_models/mappers.dart | 165 +- .../lib/src/chat_models/types.dart | 308 +- .../langchain_openai/lib/src/llms/openai.dart | 28 +- .../langchain_openai/lib/src/llms/types.dart | 119 +- .../lib/src/tools/dall_e.dart | 4 +- .../langchain_openai/lib/src/tools/types.dart | 59 - packages/langchain_openai/pubspec.yaml | 24 +- .../langchain_openai/pubspec_overrides.yaml | 4 +- .../test/agents/tools_test.dart | 4 +- .../test/chains/qa_with_sources_test.dart | 2 + .../test/chat_models/anyscale_test.dart | 115 + .../test/chat_models/chat_openai_test.dart | 214 +- .../test/chat_models/github_models_test.dart | 181 - .../test/chat_models/open_router_test.dart | 75 +- .../embeddings/anyscale_embeddings_test.dart | 36 + .../test/tools/dall_e_test.dart | 4 +- packages/langchain_pinecone/CHANGELOG.md | 24 - packages/langchain_pinecone/pubspec.yaml | 16 +- .../langchain_pinecone/pubspec_overrides.yaml | 2 +- packages/langchain_supabase/CHANGELOG.md | 24 - .../lib/src/vector_stores/supabase.dart | 2 +- packages/langchain_supabase/pubspec.yaml | 20 +- .../langchain_supabase/pubspec_overrides.yaml | 5 +- packages/langchain_weaviate/pubspec.yaml | 4 +- packages/langchain_wikipedia/pubspec.yaml | 4 +- packages/langchain_wolfram/pubspec.yaml | 4 +- packages/langgraph/.gitignore | 7 - packages/langgraph/CHANGELOG.md | 3 - packages/langgraph/LICENSE | 21 - packages/langgraph/README.md | 17 - packages/langgraph/analysis_options.yaml | 1 - .../langgraph/example/langgraph_example.dart | 3 - packages/langgraph/lib/langgraph.dart | 2 - packages/langgraph/pubspec.yaml | 16 - packages/mistralai_dart/CHANGELOG.md | 12 - .../mistralai_dart/lib/mistralai_dart.dart | 2 +- .../lib/src/http_client/http_client.dart | 3 +- packages/mistralai_dart/pubspec.yaml | 24 +- packages/ollama_dart/CHANGELOG.md | 37 - packages/ollama_dart/README.md | 123 +- .../example/ollama_dart_example.dart | 88 +- packages/ollama_dart/lib/ollama_dart.dart | 2 +- packages/ollama_dart/lib/src/client.dart | 53 +- .../ollama_dart/lib/src/generated/client.dart | 66 +- .../generate_chat_completion_request.dart | 7 +- .../generate_chat_completion_response.dart | 8 +- .../schema/generate_completion_request.dart | 5 - .../lib/src/generated/schema/message.dart | 14 +- .../lib/src/generated/schema/model_info.dart | 6 - .../generated/schema/model_information.dart | 61 - .../src/generated/schema/process_model.dart | 69 - .../generated/schema/process_response.dart | 40 - .../generated/schema/push_model_response.dart | 6 +- .../generated/schema/push_model_status.dart | 21 + .../src/generated/schema/request_options.dart | 68 +- .../lib/src/generated/schema/schema.dart | 11 +- .../src/generated/schema/schema.freezed.dart | 4204 +- .../lib/src/generated/schema/schema.g.dart | 226 +- .../lib/src/generated/schema/tool.dart | 53 - .../lib/src/generated/schema/tool_call.dart | 40 - .../generated/schema/tool_call_function.dart | 44 - .../schema/tool_call_function_args.dart | 12 - .../src/generated/schema/tool_function.dart | 52 - .../schema/tool_function_params.dart | 12 - .../generated/schema/version_response.dart | 40 - .../lib/src/http_client/http_client.dart | 3 +- packages/ollama_dart/oas/ollama-curated.yaml | 296 +- packages/ollama_dart/pubspec.yaml | 26 +- .../test/ollama_dart_chat_test.dart | 75 +- .../test/ollama_dart_completions_test.dart | 13 +- .../test/ollama_dart_embeddings_test.dart | 4 +- .../test/ollama_dart_models_test.dart | 45 +- .../test/ollama_dart_version_test.dart | 24 - packages/openai_dart/CHANGELOG.md | 41 - packages/openai_dart/README.md | 160 +- packages/openai_dart/lib/openai_dart.dart | 2 +- packages/openai_dart/lib/src/client.dart | 5 +- .../openai_dart/lib/src/generated/client.dart | 20 +- .../generated/schema/assistant_object.dart | 67 +- .../schema/assistant_stream_event.dart | 4 +- .../src/generated/schema/assistant_tools.dart | 70 +- .../schema/assistants_response_format.dart | 53 + .../lib/src/generated/schema/batch.dart | 4 +- .../schema/chat_completion_logprobs.dart | 8 +- .../schema/chat_completion_message.dart | 15 +- .../chat_completion_message_content_part.dart | 21 +- ..._completion_message_content_part_type.dart | 2 - ...hat_completion_stream_response_choice.dart | 8 +- ...chat_completion_stream_response_delta.dart | 5 - .../chunking_strategy_request_param.dart | 54 - .../chunking_strategy_response_param.dart | 55 - .../schema/completion_tokens_details.dart | 41 - .../generated/schema/completion_usage.dart | 8 +- .../schema/create_assistant_request.dart | 82 +- .../schema/create_batch_request.dart | 2 +- .../create_chat_completion_request.dart | 189 +- .../create_chat_completion_response.dart | 11 - ...reate_chat_completion_stream_response.dart | 15 +- .../schema/create_completion_request.dart | 2 +- .../schema/create_embedding_request.dart | 2 +- .../create_fine_tuning_job_request.dart | 15 +- .../schema/create_message_request.dart | 10 +- .../generated/schema/create_run_request.dart | 88 +- .../schema/create_thread_and_run_request.dart | 86 +- .../schema/create_thread_request.dart | 4 +- ...reate_vector_store_file_batch_request.dart | 8 +- .../create_vector_store_file_request.dart | 8 +- .../schema/create_vector_store_request.dart | 21 +- .../generated/schema/file_search_ranker.dart | 17 - .../schema/file_search_ranking_options.dart | 62 - .../fine_tuning_job_hyperparameters.dart | 12 +- .../src/generated/schema/function_object.dart | 15 +- .../generated/schema/function_parameters.dart | 2 +- .../generated/schema/json_schema_object.dart | 62 - .../src/generated/schema/message_content.dart | 15 - ...ontent_text_annotations_file_citation.dart | 6 +- .../schema/message_delta_content.dart | 42 - ...essage_delta_content_image_url_object.dart | 51 + .../src/generated/schema/message_object.dart | 4 +- .../schema/modify_assistant_request.dart | 66 +- .../schema/modify_message_request.dart | 4 +- .../generated/schema/modify_run_request.dart | 4 +- .../schema/modify_thread_request.dart | 4 +- .../src/generated/schema/response_format.dart | 82 - .../schema/response_format_type.dart | 19 - .../lib/src/generated/schema/run_object.dart | 61 +- .../schema/run_step_details_tool_calls.dart | 5 +- ...n_step_details_tool_calls_file_search.dart | 48 - ...ls_file_search_ranking_options_object.dart | 56 - ...tool_calls_file_search_result_content.dart | 46 - ..._tool_calls_file_search_result_object.dart | 71 - .../src/generated/schema/run_step_object.dart | 4 +- .../lib/src/generated/schema/schema.dart | 16 +- .../src/generated/schema/schema.freezed.dart | 35249 ++++++---------- .../lib/src/generated/schema/schema.g.dart | 1141 +- .../src/generated/schema/service_tier.dart | 18 - .../schema/static_chunking_strategy.dart | 60 - .../src/generated/schema/thread_object.dart | 4 +- ...ol_resources_file_search_vector_store.dart | 12 +- .../schema/update_vector_store_request.dart | 4 +- .../schema/vector_store_file_object.dart | 23 +- .../generated/schema/vector_store_object.dart | 4 +- .../lib/src/http_client/http_client.dart | 3 +- packages/openai_dart/oas/main.dart | 87 +- packages/openai_dart/oas/openapi_curated.yaml | 837 +- .../openai_dart/oas/openapi_official.yaml | 14925 +++---- packages/openai_dart/pubspec.yaml | 26 +- .../test/openai_client_assistants_test.dart | 10 +- .../test/openai_client_chat_test.dart | 168 +- packages/tavily_dart/.gitignore | 7 - packages/tavily_dart/CHANGELOG.md | 11 - packages/tavily_dart/LICENSE | 21 - packages/tavily_dart/README.md | 131 - packages/tavily_dart/analysis_options.yaml | 1 - packages/tavily_dart/build.yaml | 13 - .../example/tavily_dart_example.dart | 28 - .../tavily_dart/lib/src/generated/client.dart | 382 - .../lib/src/generated/schema/schema.dart | 15 - .../src/generated/schema/schema.freezed.dart | 1027 - .../lib/src/generated/schema/schema.g.dart | 116 - .../src/generated/schema/search_request.dart | 103 - .../src/generated/schema/search_response.dart | 68 - .../src/generated/schema/search_result.dart | 62 - packages/tavily_dart/lib/tavily_dart.dart | 5 - packages/tavily_dart/oas/main.dart | 23 - packages/tavily_dart/oas/tavily_openapi.yaml | 156 - packages/tavily_dart/pubspec.yaml | 34 - packages/tavily_dart/test/tavily_test.dart | 45 - packages/vertex_ai/CHANGELOG.md | 12 - packages/vertex_ai/pubspec.yaml | 19 +- pubspec.yaml | 2 +- 540 files changed, 23812 insertions(+), 85199 deletions(-) delete mode 100644 .github/workflows/firebase-hosting-merge.yml delete mode 100644 .github/workflows/firebase-hosting-pull-request.yml delete mode 100644 docs/expression_language/fallbacks.md delete mode 100644 docs/expression_language/primitives/retry.md create mode 100644 docs/modules/agents/agent_types/openai_tools_agent.md delete mode 100644 docs/modules/agents/agent_types/tools_agent.md delete mode 100644 docs/modules/model_io/models/chat_models/integrations/anthropic.md delete mode 100644 docs/modules/retrieval/vector_stores/integrations/img/objectbox.png delete mode 100644 docs/modules/retrieval/vector_stores/integrations/objectbox.md delete mode 100644 docs_v2/.firebaserc delete mode 100644 docs_v2/.gitignore delete mode 100644 docs_v2/README.md delete mode 100644 docs_v2/babel.config.js delete mode 100644 docs_v2/docs/01-intro.md delete mode 100644 docs_v2/docs/02-tutorials/01-llm_chain.md delete mode 100644 docs_v2/docs/02-tutorials/index.mdx delete mode 100644 docs_v2/docs/03-how_to/01-installation.md delete mode 100644 docs_v2/docs/03-how_to/02-structured_output.md delete mode 100644 docs_v2/docs/03-how_to/index.mdx delete mode 100644 docs_v2/docs/04-concepts.mdx delete mode 100644 docs_v2/docs/05-integrations/anthropic.md delete mode 100644 docs_v2/docs/05-integrations/anyscale.md delete mode 100644 docs_v2/docs/05-integrations/firebase_vertex_ai.md delete mode 100644 docs_v2/docs/05-integrations/gcp_vertex_ai.md delete mode 100644 docs_v2/docs/05-integrations/googleai.md delete mode 100644 docs_v2/docs/05-integrations/index.mdx delete mode 100644 docs_v2/docs/05-integrations/mistralai.md delete mode 100644 docs_v2/docs/05-integrations/ollama.md delete mode 100644 docs_v2/docs/05-integrations/open_router.md delete mode 100644 docs_v2/docs/05-integrations/openai.md delete mode 100644 docs_v2/docs/05-integrations/prem.md delete mode 100644 docs_v2/docs/05-integrations/together_ai.md delete mode 100644 docs_v2/docs/05-integrations/tools/index.mdx delete mode 100644 docs_v2/docs/05-integrations/tools/tavily_search.md delete mode 100644 docs_v2/docusaurus.config.js delete mode 100644 docs_v2/firebase.json delete mode 100644 docs_v2/package-lock.json delete mode 100644 docs_v2/package.json delete mode 100644 docs_v2/sidebars.js delete mode 100644 docs_v2/src/components/HomepageFeatures/index.js delete mode 100644 docs_v2/src/components/HomepageFeatures/styles.module.css delete mode 100644 docs_v2/src/css/custom.css delete mode 100644 docs_v2/src/pages/index.js delete mode 100644 docs_v2/src/pages/index.module.css delete mode 100644 docs_v2/src/pages/markdown-page.md delete mode 100644 docs_v2/static/.nojekyll delete mode 100644 docs_v2/static/img/favicon.ico delete mode 100644 docs_v2/static/img/langchain.dart.png delete mode 100644 docs_v2/static/img/logo.svg delete mode 100644 examples/docs_examples/bin/expression_language/fallbacks.dart delete mode 100644 examples/docs_examples/bin/expression_language/primitives/retry.dart rename examples/docs_examples/bin/modules/agents/agent_types/{tools_agent.dart => openai_tools_agent.dart} (64%) delete mode 100644 examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart delete mode 100644 examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart delete mode 100644 examples/hello_world_flutter/devtools_options.yaml delete mode 100644 examples/hello_world_flutter/hello_world_flutter.gif create mode 100644 examples/hello_world_flutter/hello_world_flutter_local.gif create mode 100644 examples/hello_world_flutter/hello_world_flutter_openai.gif delete mode 100644 examples/hello_world_flutter/lib/home/bloc/providers.dart delete mode 100644 examples/hello_world_flutter/web/flutter_bootstrap.js delete mode 100644 examples/wikivoyage_eu/.gitignore delete mode 100644 examples/wikivoyage_eu/README.md delete mode 100644 examples/wikivoyage_eu/analysis_options.yaml delete mode 100644 examples/wikivoyage_eu/bin/injestion.dart delete mode 100644 examples/wikivoyage_eu/bin/wikivoyage_eu.dart delete mode 100644 examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv delete mode 100644 examples/wikivoyage_eu/pubspec.lock delete mode 100644 examples/wikivoyage_eu/pubspec.yaml delete mode 100644 examples/wikivoyage_eu/pubspec_overrides.yaml delete mode 100644 examples/wikivoyage_eu/rag.png delete mode 100644 examples/wikivoyage_eu/wikivoyage_eu.gif delete mode 100644 packages/anthropic_sdk_dart/CHANGELOG.md delete mode 100644 packages/anthropic_sdk_dart/LICENSE delete mode 100644 packages/anthropic_sdk_dart/README.md delete mode 100644 packages/anthropic_sdk_dart/analysis_options.yaml delete mode 100644 packages/anthropic_sdk_dart/build.yaml delete mode 100644 packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart delete mode 100644 packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/client.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/extensions.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/client.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart delete mode 100644 packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart delete mode 100644 packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml delete mode 100644 packages/anthropic_sdk_dart/oas/main.dart delete mode 100644 packages/anthropic_sdk_dart/pubspec.lock delete mode 100644 packages/anthropic_sdk_dart/pubspec.yaml delete mode 100644 packages/anthropic_sdk_dart/test/messages_test.dart delete mode 100644 packages/langchain/lib/src/agents/tools.dart delete mode 100644 packages/langchain/test/agents/assets/state_of_the_union.txt delete mode 100644 packages/langchain/test/agents/tools_agent_test.dart delete mode 100644 packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart delete mode 100644 packages/langchain_anthropic/lib/src/chat_models/chat_models.dart delete mode 100644 packages/langchain_anthropic/lib/src/chat_models/mappers.dart delete mode 100644 packages/langchain_anthropic/lib/src/chat_models/types.dart delete mode 100644 packages/langchain_anthropic/pubspec_overrides.yaml delete mode 100644 packages/langchain_anthropic/test/chat_models/assets/apple.jpeg delete mode 100644 packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart delete mode 100644 packages/langchain_community/lib/src/tools/tavily/mappers.dart delete mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily.dart delete mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart delete mode 100644 packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart delete mode 100644 packages/langchain_community/lib/src/tools/tavily/types.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/types.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart delete mode 100644 packages/langchain_community/lib/src/vector_stores/vector_stores.dart delete mode 100644 packages/langchain_community/test/tools/tavily_test.dart delete mode 100644 packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart delete mode 100644 packages/langchain_core/lib/src/runnables/fallbacks.dart delete mode 100644 packages/langchain_core/lib/src/runnables/retry.dart delete mode 100644 packages/langchain_core/lib/src/utils/retry_client.dart delete mode 100644 packages/langchain_core/test/runnables/fallbacks_test.dart delete mode 100644 packages/langchain_core/test/runnables/retry_test.dart delete mode 100644 packages/langchain_firebase/example/web/flutter_bootstrap.js rename packages/langchain_ollama/lib/src/chat_models/{chat_ollama => }/chat_ollama.dart (70%) delete mode 100644 packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart create mode 100644 packages/langchain_ollama/lib/src/chat_models/mappers.dart rename packages/langchain_ollama/lib/src/chat_models/{chat_ollama => }/types.dart (61%) delete mode 100644 packages/langchain_openai/lib/fix_data/fix.yaml create mode 100644 packages/langchain_openai/test/chat_models/anyscale_test.dart delete mode 100644 packages/langchain_openai/test/chat_models/github_models_test.dart create mode 100644 packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart delete mode 100644 packages/langgraph/.gitignore delete mode 100644 packages/langgraph/CHANGELOG.md delete mode 100644 packages/langgraph/LICENSE delete mode 100644 packages/langgraph/README.md delete mode 100644 packages/langgraph/analysis_options.yaml delete mode 100644 packages/langgraph/example/langgraph_example.dart delete mode 100644 packages/langgraph/lib/langgraph.dart delete mode 100644 packages/langgraph/pubspec.yaml delete mode 100644 packages/ollama_dart/lib/src/generated/schema/model_information.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/process_model.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/process_response.dart create mode 100644 packages/ollama_dart/lib/src/generated/schema/push_model_status.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/tool.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_function.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart delete mode 100644 packages/ollama_dart/lib/src/generated/schema/version_response.dart delete mode 100644 packages/ollama_dart/test/ollama_dart_version_test.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/json_schema_object.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/response_format.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/response_format_type.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/service_tier.dart delete mode 100644 packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart delete mode 100644 packages/tavily_dart/.gitignore delete mode 100644 packages/tavily_dart/CHANGELOG.md delete mode 100644 packages/tavily_dart/LICENSE delete mode 100644 packages/tavily_dart/README.md delete mode 100644 packages/tavily_dart/analysis_options.yaml delete mode 100644 packages/tavily_dart/build.yaml delete mode 100644 packages/tavily_dart/example/tavily_dart_example.dart delete mode 100644 packages/tavily_dart/lib/src/generated/client.dart delete mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.dart delete mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart delete mode 100644 packages/tavily_dart/lib/src/generated/schema/schema.g.dart delete mode 100644 packages/tavily_dart/lib/src/generated/schema/search_request.dart delete mode 100644 packages/tavily_dart/lib/src/generated/schema/search_response.dart delete mode 100644 packages/tavily_dart/lib/src/generated/schema/search_result.dart delete mode 100644 packages/tavily_dart/lib/tavily_dart.dart delete mode 100644 packages/tavily_dart/oas/main.dart delete mode 100644 packages/tavily_dart/oas/tavily_openapi.yaml delete mode 100644 packages/tavily_dart/pubspec.yaml delete mode 100644 packages/tavily_dart/test/tavily_test.dart diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 448ca45f..59926dbf 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/firebase-hosting-merge.yml b/.github/workflows/firebase-hosting-merge.yml deleted file mode 100644 index 35567de1..00000000 --- a/.github/workflows/firebase-hosting-merge.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Deploy docs_v2 - -on: - push: - branches: - - main - -jobs: - build_and_deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - - run: npm ci && npm run build - working-directory: docs_v2 - - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a - with: - repoToken: ${{ secrets.GITHUB_TOKEN }} - firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} - channelId: live - projectId: langchain-dart - entryPoint: docs_v2 diff --git a/.github/workflows/firebase-hosting-pull-request.yml b/.github/workflows/firebase-hosting-pull-request.yml deleted file mode 100644 index 0086d2e1..00000000 --- a/.github/workflows/firebase-hosting-pull-request.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Deploy docs_v2 on PR - -on: - pull_request: - paths: - - 'docs_v2/**' - -permissions: - checks: write - contents: read - pull-requests: write - -jobs: - build_and_preview: - if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - - run: npm ci && npm run build - working-directory: docs_v2 - - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a - with: - repoToken: ${{ secrets.GITHUB_TOKEN }} - firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} - projectId: langchain-dart - entryPoint: docs_v2 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 1141bb2d..114e4fab 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -4,9 +4,6 @@ on: # pull_request_target is dangerous! Review external PRs code before approving to run the workflow # We need this to be able to access the secrets required by the workflow pull_request_target: - paths-ignore: - - 'docs/**' - - 'docs_v2/**' workflow_dispatch: # Cancel currently running workflow when a new one is triggered @@ -22,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 @@ -41,7 +38,7 @@ jobs: run: flutter pub cache clean - name: Install Melos - uses: bluefireteam/melos-action@c7dcb921b23cc520cace360b95d02b37bf09cdaa + uses: bluefireteam/melos-action@6085791af7036f6366c9a4b9d55105c0ef9c6388 with: run-bootstrap: false diff --git a/.gitignore b/.gitignore index cf493593..dd509d78 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,3 @@ .dart_tool/ /pubspec.lock .vscode/ -.aider* diff --git a/CHANGELOG.md b/CHANGELOG.md index 59691103..a668f597 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,532 +1,6 @@ # Change Log -📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. - -## 2024-09-25 - -### Changes - ---- - -Packages with breaking changes: - -- There are no breaking changes in this release. - -Packages with other changes: - -- [`langchain` - `v0.7.6`](#langchain---v076) -- [`langchain_core` - `v0.3.6`](#langchain_core---v036) -- [`langchain_community` - `v0.3.2`](#langchain_community---v032) -- [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212) -- [`langchain_google` - `v0.6.3+1`](#langchain_google---v0631) -- [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032) -- [`langchain_openai` - `v0.7.2`](#langchain_openai---v072) -- [`ollama_dart` - `v0.2.2`](#ollama_dart---v022) -- [`openai_dart` - `v0.4.2`](#openai_dart---v042) -- [`langchain_supabase` - `v0.1.1+3`](#langchain_supabase---v0113) -- [`langchain_pinecone` - `v0.1.0+9`](#langchain_pinecone---v0109) -- [`langchain_anthropic` - `v0.1.1+2`](#langchain_anthropic---v0112) -- [`langchain_chroma` - `v0.2.1+3`](#langchain_chroma---v0213) -- [`langchain_mistralai` - `v0.2.3+1`](#langchain_mistralai---v0231) - -Packages with dependency updates only: - -> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. - -- `langchain_supabase` - `v0.1.1+3` -- `langchain_pinecone` - `v0.1.0+9` -- `langchain_anthropic` - `v0.1.1+2` -- `langchain_chroma` - `v0.2.1+3` -- `langchain_mistralai` - `v0.2.3+1` -- `vertex_ai` - `v0.1.0+2` - ---- - -#### `langchain` - `v0.7.6` - -- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) - -#### `langchain_core` - `v0.3.6` - -- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) - -#### `langchain_community` - `v0.3.2` - -- **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) -- **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) - -#### `langchain_firebase` - `v0.2.1+2` - -- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) - -#### `langchain_google` - `v0.6.3+1` - -- **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) -- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) - -#### `langchain_ollama` - `v0.3.2` - -- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) - -#### `langchain_openai` - `v0.7.2` - -- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) -- **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) - -#### `ollama_dart` - `v0.2.2` - -- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) - -#### `openai_dart` - `v0.4.2` - -- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) -- **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) -- **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) - - -## 2024-08-22 - -### Changes - ---- - -Packages with breaking changes: - - - There are no breaking changes in this release. - -Packages with other changes: - - - [`langchain` - `v0.7.5`](#langchain---v075) - - [`langchain_core` - `v0.3.5`](#langchain_core---v035) - - [`langchain_community` - `v0.3.1`](#langchain_community---v031) - - [`langchain_openai` - `v0.7.1`](#langchain_openai---v071) - - [`langchain_ollama` - `v0.3.1`](#langchain_ollama---v031) - - [`langchain_google` - `v0.6.2`](#langchain_google---v062) - - [`langchain_mistralai` - `v0.2.3`](#langchain_mistralai---v023) - - [`ollama_dart` - `v0.2.1`](#ollama_dart---v021) - - [`openai_dart` - `v0.4.1`](#openai_dart---v041) - - [`langchain_firebase` - `v0.2.1+1`](#langchain_firebase---v0211) - - [`langchain_supabase` - `v0.1.1+2`](#langchain_supabase---v0112) - - [`langchain_pinecone` - `v0.1.0+8`](#langchain_pinecone---v0108) - - [`langchain_anthropic` - `v0.1.1+1`](#langchain_anthropic---v0111) - - [`langchain_chroma` - `v0.2.1+2`](#langchain_chroma---v0212) - -Packages with dependency updates only: - -> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. - - - `langchain_firebase` - `v0.2.1+1` - - `langchain_supabase` - `v0.1.1+2` - - `langchain_pinecone` - `v0.1.0+8` - - `langchain_anthropic` - `v0.1.1+1` - - `langchain_chroma` - `v0.2.1+2` - ---- - -#### `langchain` - `v0.7.5` - - - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) - -#### `langchain_core` - `v0.3.5` - - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - -#### `langchain_community` - `v0.3.1` - - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - -#### `langchain_openai` - `v0.7.1` - - - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) - - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) - - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) - - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) - - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) - - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) - - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) - -#### `langchain_ollama` - `v0.3.1` - - - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - -#### `langchain_google` - `v0.6.2` - - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - -#### `langchain_mistralai` - `v0.2.3` - - - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) - -#### `openai_dart` - `v0.4.1` - - - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) - - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) - - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) - - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) - -#### `ollama_dart` - `v0.2.1` - - - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) - - -## 2024-07-26 - -### Changes - ---- - -Packages with breaking changes: - - - [`langchain_community` - `v0.3.0`](#langchain_community---v030) - - [`langchain_ollama` - `v0.3.0`](#langchain_ollama---v030) - - [`langchain_openai` - `v0.7.0`](#langchain_openai---v070) - - [`ollama_dart` - `v0.2.0`](#ollama_dart---v020) - - [`openai_dart` - `v0.4.0`](#openai_dart---v040) - -Packages with other changes: - - - [`langchain` - `v0.7.4`](#langchain---v074) - - [`langchain_anthropic` - `v0.1.1`](#langchain_anthropic---v011) - - [`langchain_chroma` - `v0.2.1+1`](#langchain_chroma---v0211) - - [`langchain_core` - `v0.3.4`](#langchain_core---v034) - - [`langchain_firebase` - `v0.2.1`](#langchain_firebase---v021) - - [`langchain_google` - `v0.6.1`](#langchain_google---v061) - - [`langchain_mistralai` - `v0.2.2`](#langchain_mistralai---v022) - - [`langchain_pinecone` - `v0.1.0+7`](#langchain_pinecone---v0107) - - [`langchain_supabase` - `v0.1.1+1`](#langchain_supabase---v0111) - ---- - -#### `langchain` - `v0.7.4` - - - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) - -#### `langchain_core` - `v0.3.4` - - - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - -#### `langchain_community` - `v0.3.0` - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_ollama` - `v0.3.0` - - - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) - - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) - -#### `langchain_openai` - `v0.7.0` - - - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) - - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) - - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) - - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_anthropic` - `v0.1.1` - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_firebase` - `v0.2.1` - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_google` - `v0.6.1` - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_mistralai` - `v0.2.2` - - - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) - - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_chroma` - `v0.2.1+1` - - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_pinecone` - `v0.1.0+7` - - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `langchain_supabase` - `v0.1.1+1` - - - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) - -#### `ollama_dart` - `v0.2.0` - - - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) - - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) - - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) - - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) - - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) - - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) - -#### `openai_dart` - `v0.4.0` - - - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) - - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) - - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) - - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) - - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) - - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) - -## 2024-07-02 - -### Changes - ---- - -New packages: - -- [`langchain_anthropic` - `v0.1.0`](#langchain_anthropic---v010) -- [`tavily_dart` - `v0.1.0`](#tavily_dart---v010) - -Packages with breaking changes: - -- [`langchain_firebase` - `v0.2.0`](#langchain_firebase---v020) -- [`langchain_google` - `v0.6.0`](#langchain_google---v060) - -Packages with other changes: - -- [`langchain` - `v0.7.3`](#langchain---v073) -- [`langchain_core` - `v0.3.3`](#langchain_core---v033) -- [`langchain_community` - `v0.2.2`](#langchain_community---v022) -- [`langchain_chroma` - `v0.2.1`](#langchain_chroma---v021) -- [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) -- [`langchain_ollama` - `v0.2.2+1`](#langchain_ollama---v0221) -- [`langchain_openai` - `v0.6.3`](#langchain_openai---v063) -- [`langchain_pinecone` - `v0.1.0+6`](#langchain_pinecone---v0106) -- [`langchain_supabase` - `v0.1.1`](#langchain_supabase---v011) -- [`anthropic_sdk_dart` - `v0.1.0`](#anthropic_sdk_dart---v010) -- [`googleai_dart` - `v0.1.0+2`](#googleai_dart---v0102) -- [`mistralai_dart` - `v0.0.3+3`](#mistralai_dart---v0033) -- [`ollama_dart` - `v0.1.2`](#ollama_dart---v012) -- [`openai_dart` - `v0.3.3+1`](#openai_dart---v0331) - ---- - -#### `langchain` - `v0.7.3` - -> Note: Anthropic integration (`ChatAnthropic`) is available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. - -- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) -- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) - -#### `langchain_core` - `v0.3.3` - -- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) -- **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) - -#### `langchain_community` - `v0.2.2` - -- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) - -#### `langchain_anthropic` - `v0.1.0` - -- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) - -#### `langchain_firebase` - `v0.2.0` - -> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. - -- **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) -- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) -- **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) -- **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) -- **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) -- **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) - -#### `langchain_google` - `v0.6.0` - -> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. - -- **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) -- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) -- **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) -- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -#### `langchain_openai` - `v0.6.3` - -- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) - -#### `langchain_ollama` - `v0.2.2+1` - -- **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) - -#### `langchain_chroma` - `v0.2.1` - -- Update a dependency to the latest release. - -#### `langchain_mistralai` - `v0.2.1` - -- Update a dependency to the latest release. - -#### `langchain_pinecone` - `v0.1.0+6` - -- Update a dependency to the latest release. - -#### `langchain_supabase` - `v0.1.1` - -- Update a dependency to the latest release. - -#### `anthropic_sdk_dart` - `v0.1.0` - -- **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) -- **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) -- **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) -- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -#### `ollama_dart` - `v0.1.2` - -- **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) -- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -#### `tavily_dart` - `v0.1.0` - -- **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) - -#### `googleai_dart` - `v0.1.0+2` - -- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -#### `mistralai_dart` - `v0.0.3+3` - -- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -#### `openai_dart` - `v0.3.3+1` - -- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) - -## 2024-06-01 - -### Changes - ---- - -New packages: - - - [`anthropic_sdk_dart` - `v0.0.1`](#anthropic_sdk_dart---v001) - -Packages with other changes: - - - [`langchain` - `v0.7.2`](#langchain---v072) - - [`langchain_core` - `v0.3.2`](#langchain_core---v032) - - [`langchain_community` - `v0.2.1`](#langchain_community---v021) - - [`langchain_chroma` - `v0.2.0+5`](#langchain_chroma---v0205) - - [`langchain_firebase` - `v0.1.0+2`](#langchain_firebase---v0102) - - [`langchain_google` - `v0.5.1`](#langchain_google---v051) - - [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) - - [`langchain_ollama` - `v0.2.2`](#langchain_ollama---v022) - - [`langchain_openai` - `v0.6.2`](#langchain_openai---v062) - - [`langchain_pinecone` - `v0.1.0+5`](#langchain_pinecone---v0105) - - [`langchain_supabase` - `v0.1.0+5`](#langchain_supabase---v0105) - - [`chromadb` - `v0.2.0+1`](#chromadb---v0201) - - [`googleai_dart` - `v0.1.0+1`](#googleai_dart---v0101) - - [`mistralai_dart` - `v0.0.3+2`](#mistralai_dart---v0032) - - [`ollama_dart` - `v0.1.1`](#ollama_dart---v011) - - [`openai_dart` - `v0.3.3`](#openai_dart---v033) - - [`vertex_ai` - `v0.1.0+1`](#vertex_ai---v0101) - ---- - -#### `langchain` - `v0.7.2` - - - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) - + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) - - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) - -#### `langchain_core` - `v0.3.2` - - - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) - - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) - -#### `langchain_community` - `v0.2.1` - - - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) - + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) - -#### `langchain_openai` - `v0.6.2` - - - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) - -#### `anthropic_sdk_dart` - `v0.0.1` - - - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) - -#### `ollama_dart` - `v0.1.1` - - - **FEAT**: Support buffered stream responses ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) - -#### `openai_dart` - `v0.3.3` - - - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) - - **FIX**: Make vector store name optional ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) - - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) - - -## 2024-05-20 - -### Changes - ---- - -Packages with breaking changes: - -- There are no breaking changes in this release. - -Packages with other changes: - -- [`langchain_firebase` - `v0.1.0+1`](#langchain_firebase---v0101) -- [`ollama_dart` - `v0.1.0+1`](#ollama_dart---v0101) -- [`openai_dart` - `v0.3.2+1`](#openai_dart---v0321) -- [`langchain_ollama` - `v0.2.1+1`](#langchain_ollama---v0211) -- [`langchain_openai` - `v0.6.1+1`](#langchain_openai---v0611) - -Packages with dependency updates only: - -> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. - -- `langchain_ollama` - `v0.2.1+1` -- `langchain_openai` - `v0.6.1+1` - ---- - -#### `openai_dart` - `v0.3.2+1` - -- **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) -- **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) - -#### `ollama_dart` - `v0.1.0+1` - -- **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) - -#### `langchain_firebase` - `v0.1.0+1` - -- **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) +Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. ## 2024-05-14 @@ -2908,3 +2382,11 @@ Packages with changes: - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85)) - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) + +## 2023-07-02 + +### Changes + +#### `langchain` - `v0.0.1` + + - Initial public release. diff --git a/analysis_options.yaml b/analysis_options.yaml index 9a364363..d8f55c71 100644 --- a/analysis_options.yaml +++ b/analysis_options.yaml @@ -7,11 +7,6 @@ analyzer: missing_return: error todo: ignore sdk_version_since: ignore # TODO remove when fixed https://github.com/dart-lang/sdk/issues/52327 - exclude: - - "**/generated_plugin_registrant.dart" - - "**/generated/**" - - "**/*.gen.dart" - - "**/*.g.dart" linter: rules: # https://dart-lang.github.io/linter/lints/{rule}.html @@ -35,7 +30,7 @@ linter: - avoid_null_checks_in_equality_operators - avoid_positional_boolean_parameters - avoid_print - # - avoid_redundant_argument_values # I prefer to be explicit sometimes + # - avoid_redundant_argument_values # Sometimes is useful to be explicit - avoid_relative_lib_imports - avoid_renaming_method_parameters - avoid_return_types_on_setters @@ -70,7 +65,6 @@ linter: # - diagnostic_describe_all_properties # Disabled because it's very verbose - directives_ordering - discarded_futures - # - document_ignores # Disabled because it's very verbose - empty_catches - empty_constructor_bodies - empty_statements @@ -82,7 +76,6 @@ linter: - implementation_imports - implicit_call_tearoffs - invalid_case_patterns - - invalid_runtime_check_with_js_interop_types - iterable_contains_unrelated_type - join_return_with_assignment - leading_newlines_in_multiline_strings @@ -92,7 +85,6 @@ linter: - library_private_types_in_public_api - list_remove_unrelated_type - matching_super_parameters - - missing_code_block_language_in_doc_comment - missing_whitespace_between_adjacent_strings - no_adjacent_strings_in_list - no_default_cases @@ -102,7 +94,6 @@ linter: - no_literal_bool_comparisons - no_logic_in_create_state - no_runtimeType_toString - - no_wildcard_variable_uses - non_constant_identifier_names - noop_primitive_operations - null_check_on_nullable_type_parameter @@ -125,7 +116,7 @@ linter: - prefer_final_fields - prefer_final_in_for_each - prefer_final_locals - # - prefer_final_parameters # Very verbose + # - prefer_final_parameters # adds too much verbosity - prefer_for_elements_to_map_fromIterable - prefer_foreach - prefer_function_declarations_over_variables @@ -161,7 +152,6 @@ linter: - type_init_formals - type_literal_in_constant_pattern - unawaited_futures - - unintended_html_in_doc_comment - unnecessary_await_in_return - unnecessary_brace_in_string_interps - unnecessary_breaks @@ -171,7 +161,6 @@ linter: - unnecessary_lambdas - unnecessary_late - unnecessary_library_directive - - unnecessary_library_name - unnecessary_new - unnecessary_null_aware_assignments - unnecessary_null_aware_operator_on_extension_on_nullable diff --git a/docs/CNAME b/docs/CNAME index 6217d1b3..17960576 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -langchaindart.dev +langchaindart.com \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index 12785c34..8f9a3f2f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,7 +38,7 @@ LCEL is a declarative way to compose chains. LCEL was designed from day 1 to sup - [Overview](/expression_language/expression_language): LCEL and its benefits - [Interface](/expression_language/interface): The standard interface for LCEL objects -- [Cookbook](https://langchaindart.dev/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks +- [Cookbook](https://langchaindart.com/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks ## Modules diff --git a/docs/_sidebar.md b/docs/_sidebar.md index ee2c472a..532c82b8 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -1,4 +1,3 @@ -- [![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) - [Get started](README.md) - [Installation](/get_started/installation.md) - [Quickstart](/get_started/quickstart.md) @@ -14,9 +13,7 @@ - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) - - [Retry: Retrying runnables](/expression_language/primitives/retry.md) - [Streaming](/expression_language/streaming.md) - - [Fallbacks](/expression_language/fallbacks.md) - Cookbook - [Prompt + LLM](/expression_language/cookbook/prompt_llm_parser.md) - [Multiple chains](/expression_language/cookbook/multiple_chains.md) @@ -59,7 +56,6 @@ - [Tool calling](/modules/model_io/models/chat_models/how_to/tools.md) - [LLMChain](/modules/model_io/models/chat_models/how_to/llm_chain.md) - Integrations - - [Anthropic](/modules/model_io/models/chat_models/integrations/anthropic.md) - [OpenAI](/modules/model_io/models/chat_models/integrations/openai.md) - [Firebase Vertex AI](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) @@ -98,7 +94,6 @@ - [Vector stores](/modules/retrieval/vector_stores/vector_stores.md) - Integrations - [Memory](/modules/retrieval/vector_stores/integrations/memory.md) - - [ObjectBox](/modules/retrieval/vector_stores/integrations/objectbox.md) - [Chroma](/modules/retrieval/vector_stores/integrations/chroma.md) - [Pinecone](/modules/retrieval/vector_stores/integrations/pinecone.md) - [Supabase](/modules/retrieval/vector_stores/integrations/supabase.md) @@ -123,7 +118,7 @@ - [Memory](/modules/memory/memory.md) - [Agents](/modules/agents/agents.md) - [Agent types](/modules/agents/agent_types/agent_types.md) - - [Tools Agent](/modules/agents/agent_types/tools_agent.md) + - [OpenAI functions](/modules/agents/agent_types/openai_tools_agent.md) - [Tools](/modules/agents/tools/tools.md) - [Calculator](/modules/agents/tools/calculator.md) - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md) diff --git a/docs/expression_language/cookbook/prompt_llm_parser.md b/docs/expression_language/cookbook/prompt_llm_parser.md index e96bf6c1..bb9a1a28 100644 --- a/docs/expression_language/cookbook/prompt_llm_parser.md +++ b/docs/expression_language/cookbook/prompt_llm_parser.md @@ -33,7 +33,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -74,7 +74,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -144,7 +144,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/fallbacks.md b/docs/expression_language/fallbacks.md deleted file mode 100644 index 5fd0b8a7..00000000 --- a/docs/expression_language/fallbacks.md +++ /dev/null @@ -1,135 +0,0 @@ -# Fallbacks - -When working with language models, you may often encounter issues from the underlying APIs, e.g. rate limits or downtime. Therefore, as you move your LLM applications into production it becomes more and more important to have contingencies for errors. That's why we've introduced the concept of fallbacks. - -Crucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use e.g. a different prompt template. - -## Handling LLM API errors with fallbacks - -This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit a rate limit, or any number of things. This Situation can be handled using Fallbacks. - -Fallbacks can be created using `withFallbacks()` function on the runnable that you are working on, for example `final runnablWithFallbacks = mainRunnable.withFallbacks([fallback1, fallback2])` this would create a `RunnableWithFallback` along with a list of fallbacks. When it is invoked, the `mainRunnable` would be called first, if it fails then fallbacks would be invoked sequentially until one of the fallback in list return output. If the `mainRunnable` succeeds and returns output then the fallbacks won't be called. - -## Fallback for chat models - -```dart -// fake model will throw error during invoke and fallback model will be called -final fakeOpenAIModel = ChatOpenAI( - defaultOptions: const ChatOpenAIOptions(model: 'tomato'), -); - -final latestModel = ChatOpenAI( - defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), -); - -final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); - -final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); - -final res = await modelWithFallbacks.invoke(prompt); -print(res); -/* -{ - "ChatResult": { - "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", - "output": { - "AIChatMessage": { - "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", - "toolCalls": [] - } - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721542696, - "system_fingerprint": "fp_400f27fa1f" - }, - "usage": { - "LanguageModelUsage": { - "promptTokens": 16, - "promptBillableCharacters": null, - "responseTokens": 36, - "responseBillableCharacters": null, - "totalTokens": 52 - } - }, - "streaming": false - } -} -*/ -``` - -Note: if the options provided when invoking the runnable with fallbacks are not compatible with some of the fallbacks, they will be ignored. If you want to use different options for different fallbacks, provide them as `defaultOptions` when instantiating the fallbacks or use `bind()`. - -## Fallbacks for RunnableSequences with batch - -```dart -final fakeOpenAIModel = ChatOpenAI( - defaultOptions: const ChatOpenAIOptions(model: 'tomato'), -); - -final latestModel = ChatOpenAI( - defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), -); - -final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); - -final badChain = promptTemplate.pipe(fakeOpenAIModel); -final goodChain = promptTemplate.pipe(latestModel); - -final chainWithFallbacks = badChain.withFallbacks([goodChain]); - -final res = await chainWithFallbacks.batch( - [ - {'topic': 'bears'}, - {'topic': 'cats'}, - ], -); -print(res); -/* -[ - { - "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", - "output": { - "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", - "toolCalls": [] - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721545052, - "system_fingerprint": "fp_400f27fa1f" - }, - "usage": { - "promptTokens": 13, - "promptBillableCharacters": null, - "responseTokens": 31, - "responseBillableCharacters": null, - "totalTokens": 44 - }, - "streaming": false - }, - { - "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", - "output": { - "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", - "toolCalls": [] - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721545052, - "system_fingerprint": "fp_c4e5b6fa31" - }, - "usage": { - "promptTokens": 13, - "promptBillableCharacters": null, - "responseTokens": 29, - "responseBillableCharacters": null, - "totalTokens": 42 - }, - "streaming": false - } -] -*/ -``` diff --git a/docs/expression_language/get_started.md b/docs/expression_language/get_started.md index 9b51efe6..70c12b9a 100644 --- a/docs/expression_language/get_started.md +++ b/docs/expression_language/get_started.md @@ -120,7 +120,7 @@ print(res2); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/interface.md b/docs/expression_language/interface.md index 30fcf890..9b7085d8 100644 --- a/docs/expression_language/interface.md +++ b/docs/expression_language/interface.md @@ -107,7 +107,7 @@ final res = await chain.batch( {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/docs/expression_language/primitives.md b/docs/expression_language/primitives.md index aecaa93e..89d618e4 100644 --- a/docs/expression_language/primitives.md +++ b/docs/expression_language/primitives.md @@ -11,4 +11,3 @@ This section goes into greater depth on where and how some of these components a - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) -- [Retry: Retrying Runnable](/expression_language/primitives/retry.md) diff --git a/docs/expression_language/primitives/binding.md b/docs/expression_language/primitives/binding.md index 2aeb9575..a04a511f 100644 --- a/docs/expression_language/primitives/binding.md +++ b/docs/expression_language/primitives/binding.md @@ -57,7 +57,7 @@ final chain = Runnable.fromMap({ chatModel.bind(ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(ChatOpenAIOptions(model: 'gpt-4o-mini')) | + chatModel.bind(ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser, }); diff --git a/docs/expression_language/primitives/function.md b/docs/expression_language/primitives/function.md index 88bf731b..e0b621fd 100644 --- a/docs/expression_language/primitives/function.md +++ b/docs/expression_language/primitives/function.md @@ -76,7 +76,7 @@ await chain.invoke('x raised to the third plus seven equals 12'); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -122,7 +122,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -141,7 +141,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -160,7 +160,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/primitives/mapper.md b/docs/expression_language/primitives/mapper.md index bc599cb6..2fb57295 100644 --- a/docs/expression_language/primitives/mapper.md +++ b/docs/expression_language/primitives/mapper.md @@ -54,7 +54,9 @@ In the following example, the model streams the output in chunks and the output final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); diff --git a/docs/expression_language/primitives/retry.md b/docs/expression_language/primitives/retry.md deleted file mode 100644 index ef6ae6c9..00000000 --- a/docs/expression_language/primitives/retry.md +++ /dev/null @@ -1,94 +0,0 @@ -# RunnableRetry : Retrying Runnables - -`RunnableRetry` wraps a `Runnable` and retries it if it fails. It be created using `runnable.withRetry()`. - -By default, the runnable will be retried 3 times with exponential backoff strategy. - -## Usage - -## Creating a RunnableRetry - -```dart -final model = ChatOpenAI(); -final input = PromptValue.string('Explain why sky is blue in 2 lines'); - -final modelWithRetry = model.withRetry(); -final res = await modelWithRetry.invoke(input); -print(res); -``` - -## Retrying a chain - -`RunnableRetry` can be used to retry any `Runnable`, including a chain of `Runnable`s. - -Example - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); -final model = ChatOpenAI( - defaultOptions: ChatOpenAIOptions(model: 'gpt-4o'), -); -final chain = promptTemplate.pipe(model).withRetry(); - -final res = await chain.batch( - [ - {'topic': 'bears'}, - {'topic': 'cats'}, - ], -); -print(res); -``` - -> In general, it's best to keep the scope of the retry as small as possible. - -## Configuring the retry - -```dart -// passing a fake model to cause Exception -final input = PromptValue.string('Explain why sky is blue in 2 lines'); -final model = ChatOpenAI( - defaultOptions: ChatOpenAIOptions(model: 'fake-model'), -); -final modelWithRetry = model.withRetry( - maxRetries: 3, - addJitter: true, -); -final res = await modelWithRetry.invoke(input); -print(res); -// retried 3 times and returned Exception: -// OpenAIClientException({ -// "uri": "https://api.openai.com/v1/chat/completions", -// "method": "POST", -// "code": 404, -// "message": "Unsuccessful response", -// "body": { -// "error": { -// "message": "The model `fake-model` does not exist or you do not have access to it.", -// "type": "invalid_request_error", -// "param": null, -// "code": "model_not_found" -// } -// } -// }) -``` - -## Passing delay durations - -If you want to use custom delay durations for each retry attempt, you can pass a list of `Duration` objects to the `delayDurations` parameter. - -```dart -final input = PromptValue.string('Explain why sky is blue in 2 lines'); -final model = ChatOpenAI( - defaultOptions: ChatOpenAIOptions(model: 'fake-model'), -); -final modelWithRetry = model.withRetry( - maxRetries: 3, - delayDurations: [ - Duration(seconds: 1), - Duration(seconds: 2), - Duration(seconds: 3), - ], -); -final res = await modelWithRetry.invoke(input); -print(res); -``` diff --git a/docs/expression_language/primitives/router.md b/docs/expression_language/primitives/router.md index da5a59c6..15b6f8ad 100644 --- a/docs/expression_language/primitives/router.md +++ b/docs/expression_language/primitives/router.md @@ -12,7 +12,7 @@ First, let’s create a chain that will identify incoming questions as being abo ```dart final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.2'), + defaultOptions: ChatOllamaOptions(model: 'llama3'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -131,7 +131,7 @@ Here is a question: {query} '''; -final embeddings = OllamaEmbeddings(model: 'llama3.2'); +final embeddings = OllamaEmbeddings(model: 'llama3'); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( promptTemplates.map((final pt) => Document(pageContent: pt)).toList(), @@ -146,7 +146,7 @@ final chain = Runnable.fromMap({'query': Runnable.passthrough()}) | return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), + defaultOptions: const ChatOllamaOptions(model: 'llama3'), ) | StringOutputParser(); diff --git a/docs/expression_language/streaming.md b/docs/expression_language/streaming.md index dd04c9c6..8b4b720f 100644 --- a/docs/expression_language/streaming.md +++ b/docs/expression_language/streaming.md @@ -49,7 +49,7 @@ print(chunks.first); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -71,7 +71,7 @@ print(result); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-4o-mini, +// model: gpt-3.5-turbo-0125, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -124,7 +124,9 @@ Let’s see such a parser in action to understand what this means. final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); diff --git a/docs/index.html b/docs/index.html index 6d4f395b..eab7ac39 100644 --- a/docs/index.html +++ b/docs/index.html @@ -2,13 +2,16 @@ - + @@ -38,7 +41,7 @@ - + diff --git a/docs/modules/agents/agent_types/agent_types.md b/docs/modules/agents/agent_types/agent_types.md index d6c79bd0..229422ee 100644 --- a/docs/modules/agents/agent_types/agent_types.md +++ b/docs/modules/agents/agent_types/agent_types.md @@ -8,7 +8,7 @@ response to the user. Here are the agents available in LangChain. ### OpenAI Functions -Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been +Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been explicitly fine-tuned to detect when a function should to be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models. diff --git a/docs/modules/agents/agent_types/openai_tools_agent.md b/docs/modules/agents/agent_types/openai_tools_agent.md new file mode 100644 index 00000000..db68921e --- /dev/null +++ b/docs/modules/agents/agent_types/openai_tools_agent.md @@ -0,0 +1,173 @@ +# OpenAI tools + +Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been +fine-tuned to detect when a tool should to be called and respond with the +inputs that should be passed to the tool. In an API call, you can describe +tools and have the model intelligently choose to output a JSON object +containing arguments to call those tools. The goal of the OpenAI Function +APIs is to more reliably return valid and useful tool calls than a generic +text completion or chat API. + +The OpenAI Tools Agent is designed to work with these models. + +> **Note**: Must be used with an [OpenAI Tools](https://platform.openai.com/docs/guides/function-calling) model. + +```dart +final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4-turbo', + temperature: 0, + ), +); +final tool = CalculatorTool(); +final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final executor = AgentExecutor(agent: agent); +final res = await executor.run('What is 40 raised to the 0.43 power? '); +print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' +``` + +You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. + +Let's see an example of how to do this. + +First let's create a class that will be the input for our tool. + +```dart +class SearchInput { + const SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); +} +``` + +Now let's define the tool: + +```dart +final tool = Tool.fromFunction( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: const { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'number', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: callYourSearchFunction, + getInputFromJson: SearchInput.fromJson, +); +``` + +Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. + +The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. +```dart +String callYourSearchFunction(final SearchInput input) { + return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; +} +``` + +Now we can create the agent and run it. + +```dart +final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(temperature: 0), +); + +final memory = ConversationBufferMemory(returnMessages: true); +final agent = OpenAIToolsAgent.fromLLMAndTools( + llm: llm, + tools: [tool], + memory: memory, +); + +final executor = AgentExecutor(agent: agent); + +final res1 = await executor.run( + 'Search for cats. Return only 3 results.', +); +print(res1); +// Here are 3 search results for "cats": +// 1. Result 1 +// 2. Result 2 +// 3. Result 3 +``` + +## Using LangChain Expression Language (LCEL) + +You can replicate the functionality of the OpenAI Functions Agent by using the LangChain Expression Language (LCEL) directly. + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final prompt = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), +]); + +final tool = CalculatorTool(); + +final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + temperature: 0, + tools: [tool], + ), +); + +const outputParser = OpenAIToolsAgentOutputParser(); + +List buildScratchpad(final List intermediateSteps) { + return intermediateSteps + .map((final s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((final m) => m) + .toList(growable: false); +} + +final agent = Agent.fromRunnable( + Runnable.mapInput( + (final AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), + }, + ).pipe(prompt).pipe(model).pipe(outputParser), + tools: [tool], +); +final executor = AgentExecutor(agent: agent); + +final res = await executor.invoke({ + 'input': 'What is 40 raised to the 0.43 power?', +}); +print(res['output']); +// 40 raised to the power of 0.43 is approximately 4.88524. +``` + +In this way, you can create your own custom agents with full control over their behavior. diff --git a/docs/modules/agents/agent_types/tools_agent.md b/docs/modules/agents/agent_types/tools_agent.md deleted file mode 100644 index 45a14352..00000000 --- a/docs/modules/agents/agent_types/tools_agent.md +++ /dev/null @@ -1,190 +0,0 @@ -# Tools Agent - -An agent powered by the [tool calling API](/modules/model_io/models/chat_models/how_to/tools.md). - -This agent is designed to work with any chat model that supports tool calling. It can interpret the model's output and decide when to call specific tools based on that output. - -**Supported models:** -You can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the [tool calling docs](/modules/model_io/models/chat_models/how_to/tools.md) for a complete list. - -## Usage - -In the following example, we use `ChatOllama` with the `llama3.2` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. - -```dart -import 'package:langchain/langchain.dart'; -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; - -//... - -final llm = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.2', - temperature: 0, - ), -); -final tool = CalculatorTool(); -final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); -final executor = AgentExecutor(agent: agent); -final res = await executor.run( - 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', -); -print(res); -// The result is: 4.885 -``` - -## Custom tools - -You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. - -Let's see an example of how to do this. - -First, let's create a class that will be the input for our tool. - -```dart -@immutable -class SearchInput { - const SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); -} -``` - -Now let's define the tool: - -```dart -final searchTool = Tool.fromFunction( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'integer', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: callYourSearchFunction, - getInputFromJson: SearchInput.fromJson, -); -``` - -Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. - -The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. -```dart -String callYourSearchFunction(final SearchInput input) { - final n = input.n; - final res = List.generate( - n, - (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', - ); - return 'Results:\n${res.join('\n')}'; -} -``` - -Now we can create the agent and run it: - -```dart -final llm = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3-groq-tool-use', - temperature: 0, - ), -); - -final memory = ConversationBufferMemory(returnMessages: true); -final agent = ToolsAgent.fromLLMAndTools( - llm: llm, - tools: [searchTool], - memory: memory, -); - -final executor = AgentExecutor(agent: agent); - -final res1 = await executor.run( - 'Search for cat names. Return only 3 results.', -); -print(res1); -// Here are the top 3 cat names I found: AAA, BBB, and CCC. -``` - -## Custom agent using LangChain Expression Language (LCEL) - -You can replicate the functionality of the Tools Agent by using the LangChain Expression Language (LCEL) directly. - -```dart -final openAiKey = Platform.environment['OPENAI_API_KEY']; - -final prompt = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), -]); - -final tool = CalculatorTool(); - -final model = ChatOpenAI( - apiKey: openAiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o-mini', - temperature: 0, - tools: [tool], - ), -); - -const outputParser = ToolsAgentOutputParser(); - -List buildScratchpad(final List intermediateSteps) { - return intermediateSteps - .map((s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((m) => m) - .toList(growable: false); -} - -final agent = Agent.fromRunnable( - Runnable.mapInput( - (AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), - }, - ).pipe(prompt).pipe(model).pipe(outputParser), - tools: [tool], -); -final executor = AgentExecutor(agent: agent); - -final res = await executor.invoke({ - 'input': 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', -}); -print(res['output']); -// The result of 40 raised to the power of 0.43 is approximately 4.885. -``` - -In this way, you can create your own custom agents with full control over their behavior, while still leveraging the flexibility of the Tools Agent to work with various language models and tools. diff --git a/docs/modules/agents/agents.md b/docs/modules/agents/agents.md index 78004d19..ab56353c 100644 --- a/docs/modules/agents/agents.md +++ b/docs/modules/agents/agents.md @@ -75,7 +75,7 @@ First, let's load the language model we're going to use to control the agent. ```dart final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: ChatOpenAIOptions(temperature: 0), + defaultOptions: const ChatOpenAIOptions(temperature: 0), ); ``` @@ -91,7 +91,7 @@ Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. ```dart -final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); ``` Now let's create the agent executor and test it out! diff --git a/docs/modules/agents/tools/calculator.md b/docs/modules/agents/tools/calculator.md index fe9f127c..0847f2eb 100644 --- a/docs/modules/agents/tools/calculator.md +++ b/docs/modules/agents/tools/calculator.md @@ -14,7 +14,7 @@ final llm = ChatOpenAI( ), ); final tool = CalculatorTool(); -final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/docs/modules/agents/tools/openai_dall_e.md b/docs/modules/agents/tools/openai_dall_e.md index 426f4d89..9d30914b 100644 --- a/docs/modules/agents/tools/openai_dall_e.md +++ b/docs/modules/agents/tools/openai_dall_e.md @@ -18,7 +18,7 @@ final tools = [ CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; -final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/docs/modules/model_io/models/chat_models/chat_models.md b/docs/modules/model_io/models/chat_models/chat_models.md index e191707b..5aabfd23 100644 --- a/docs/modules/model_io/models/chat_models/chat_models.md +++ b/docs/modules/model_io/models/chat_models/chat_models.md @@ -93,5 +93,5 @@ print(chatRes1.generations); print(chatRes1.usage?.totalTokens); // -> 36 print(chatRes1.modelOutput); -// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-4o-mini} +// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-3.5-turbo} ``` diff --git a/docs/modules/model_io/models/chat_models/how_to/tools.md b/docs/modules/model_io/models/chat_models/how_to/tools.md index 11bf5f3e..16c12081 100644 --- a/docs/modules/model_io/models/chat_models/how_to/tools.md +++ b/docs/modules/model_io/models/chat_models/how_to/tools.md @@ -3,11 +3,9 @@ > We use the term "tool calling" interchangeably with "function calling". Although function calling is sometimes meant to refer to invocations of a single function, we treat all models as though they can return multiple tool or function calls in each message. > Tool calling is currently supported by: -> - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md) +> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) > - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) > - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md) -> - [`ChatOllama`](/modules/model_io/models/chat_models/integrations/ollama.md) -> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. While the name implies that the model is performing some action, this is actually not the case! The model is coming up with the arguments to a tool, and actually running the tool (or not) is up to the user - for example, if you want to extract output matching some schema from unstructured text, you could give the model an “extraction” tool that takes parameters matching the desired schema, then treat the generated output as your final result. diff --git a/docs/modules/model_io/models/chat_models/integrations/anthropic.md b/docs/modules/model_io/models/chat_models/integrations/anthropic.md deleted file mode 100644 index b607ddc7..00000000 --- a/docs/modules/model_io/models/chat_models/integrations/anthropic.md +++ /dev/null @@ -1,145 +0,0 @@ -# ChatAnthropic - -Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). - -## Setup - -The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. - -The following models are available: -- `claude-3-5-sonnet-20240620` -- `claude-3-haiku-20240307` -- `claude-3-opus-20240229` -- `claude-3-sonnet-20240229` -- `claude-2.0` -- `claude-2.1` - -Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. - -## Usage - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), -); - -final chatPrompt = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, 'Text to translate:\n{text}'), -]); - -final chain = chatPrompt | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'adore programmer.' -``` - -## Multimodal support - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), -); - -final res = await chatModel.invoke( - PromptValue.chat([ - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), - ), - ]), -); - -print(res.output.content); -// -> 'The fruit in the image is an apple.' -``` - -## Streaming - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); - -final stream = chain.stream({'max_num': '30'}); -await stream.forEach(print); -// 123 -// 456789101 -// 112131415161 -// 718192021222 -// 324252627282 -// 930 -``` - -## Tool calling - -`ChatAnthropic` supports tool calling. - -Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. - -Example: -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, -); -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - tools: [tool], - ), -); - -final res = await model.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -``` diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index 167ffe13..8dc05345 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -66,31 +66,31 @@ print(res); ## Available models The following models are available: -- `gemini-1.5-flash`: +- `gemini-1.0-pro` + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.5-pro-preview-0514`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-pro`: +- `gemini-1.5-flash-preview-0514`: * text / image / audio -> text model - * Max input token: 2097152 - * Max output tokens: 8192 -- `gemini-1.0-pro-vision`: - * text / image -> text model - * Max input token: 16384 - * Max output tokens: 2048 -- `gemini-1.0-pro` - * text -> text model - * Max input token: 32760 + * Max input token: 1048576 * Max output tokens: 8192 -Mind that this list may not be up-to-date. Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. ## Multimodal support ```dart final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-preview-0514', ), ); final res = await chatModel.invoke( @@ -122,7 +122,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-preview-0514', ), ); @@ -140,7 +140,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart @@ -160,7 +160,7 @@ const tool = ToolSpec( ); final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-preview-0514', temperature: 0, tools: [tool], ), diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 12ff5f2c..6eca8777 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -7,24 +7,24 @@ Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemin To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). The following models are available: -- `gemini-1.5-flash`: +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 +- `gemini-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.5-pro-latest`: text / image -> text model * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-pro`: +- `gemini-1.5-flash-latest`: * text / image / audio -> text model - * Max input token: 2097152 - * Max output tokens: 8192 -- `gemini-1.0-pro` (or `gemini-pro`): - * text -> text model - * Max input token: 32760 + * Max input token: 1048576 * Max output tokens: 8192 -- `aqa`: - * text -> text model - * Max input token: 7168 - * Max output tokens: 1024 -Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. ## Usage @@ -34,7 +34,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-latest', temperature: 0, ), ); @@ -63,7 +63,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-latest', temperature: 0, ), ); @@ -99,7 +99,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-latest', temperature: 0, ), ); @@ -118,7 +118,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart @@ -138,7 +138,7 @@ const tool = ToolSpec( ); final chatModel = ChatGoogleGenerativeAI( defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', + model: 'gemini-1.5-pro-latest', temperature: 0, tools: [tool], ), diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index f612616e..37110289 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -2,9 +2,13 @@ Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. -Ollama allows you to run open-source large language models, such as Llama 3.2 or Gemma 2, locally. +Ollama allows you to run open-source large language models, such as Llama 3, locally. -Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. + +It optimizes setup and configuration details, including GPU usage. + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). ## Setup @@ -12,31 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.2` -3. Instantiate the `ChatOllama` class with the downloaded model. - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.2', - ), -); -``` - -For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). - -### Ollama base URL - -By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - baseUrl: 'https://your-remote-server-where-ollama-is-running.com', - model: 'llama3.2', - ), -); -``` + * e.g., for Llama 3: `ollama pull llama3` ## Usage @@ -48,7 +28,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.2', + model: 'llama3', temperature: 0, ), ); @@ -64,9 +44,7 @@ print(res); // -> 'La traduction est : "J'aime le programming.' ``` -### Streaming - -Ollama supports streaming the output as the model generates it. +## Streaming ```dart final promptTemplate = ChatPromptTemplate.fromTemplates([ @@ -75,7 +53,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.2', + model: 'llama3', temperature: 0, ), ); @@ -83,107 +61,14 @@ final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); final stream = chain.stream({'max_num': '9'}); await stream.forEach(print); -// 123 -// 456 -// 789 -``` - -### Multimodal support - -Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). - -You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llava', - temperature: 0, - ), -); -final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), -); -final res = await chatModel.invoke(PromptValue.chat([prompt])); -print(res.output.content); -// -> 'An Apple' -``` - -### Tool calling - -`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). - -**Notes:** -- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. -- Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). -- At the moment, small models like `llama3.2` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.2:70b` or `llama3.2:405b`. - -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, -); - -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.2', - temperature: 0, - tools: [tool], - ), -); - -final res = await chatModel.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -print(res.output.toolCalls); -// [AIChatMessageToolCall{ -// id: a621064b-03b3-4ca6-8278-f37504901034, -// name: get_current_weather, -// arguments: {location: Boston, US}, -// }, -// AIChatMessageToolCall{ -// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, -// name: get_current_weather, -// arguments: {location: Madrid, ES}, -// }] +// 1 +// 2 +// 3 +// .. +// 9 ``` -As you can see, `ChatOllama` support calling multiple tools in a single request. - -If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.2', - temperature: 0, - tools: [tool], - toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), - ), -); -``` - -**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). - -### JSON mode +## JSON mode You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. @@ -194,7 +79,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3.2', + model: 'llama3', temperature: 0, format: OllamaResponseFormat.json, ), @@ -212,214 +97,42 @@ print(res); // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} ``` -## Examples - -### Answering questions with data from an external API - -Imagine you have an API that provides flight times between two cities: +## Multimodal support -```dart -// Simulates an API call to get flight times -// In a real application, this would fetch data from a live database or API -String getFlightTimes(String departure, String arrival) { - final flights = { - 'NYC-LAX': { - 'departure': '08:00 AM', - 'arrival': '11:30 AM', - 'duration': '5h 30m', - }, - 'LAX-NYC': { - 'departure': '02:00 PM', - 'arrival': '10:30 PM', - 'duration': '5h 30m', - }, - 'LHR-JFK': { - 'departure': '10:00 AM', - 'arrival': '01:00 PM', - 'duration': '8h 00m', - }, - 'JFK-LHR': { - 'departure': '09:00 PM', - 'arrival': '09:00 AM', - 'duration': '7h 00m', - }, - 'CDG-DXB': { - 'departure': '11:00 AM', - 'arrival': '08:00 PM', - 'duration': '6h 00m', - }, - 'DXB-CDG': { - 'departure': '03:00 AM', - 'arrival': '07:30 AM', - 'duration': '7h 30m', - }, - }; - - final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; - return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); -} -``` +Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). -Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. +You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. ```dart -const getFlightTimesTool = ToolSpec( - name: 'get_flight_times', - description: 'Get the flight times between two cities', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'departure': { - 'type': 'string', - 'description': 'The departure city (airport code)', - }, - 'arrival': { - 'type': 'string', - 'description': 'The arrival city (airport code)', - }, - }, - 'required': ['departure', 'arrival'], - }, -); - final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', + defaultOptions: ChatOllamaOptions( + model: 'llava', temperature: 0, - tools: [getFlightTimesTool], - ), -); - -final messages = [ - ChatMessage.humanText( - 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', ), -]; - -// First API call: Send the query and function description to the model -final response = await chatModel.invoke(PromptValue.chat(messages)); - -messages.add(response.output); - -// Check if the model decided to use the provided function -if (response.output.toolCalls.isEmpty) { - print("The model didn't use the function. Its response was:"); - print(response.output.content); - return; -} - -// Process function calls made by the model -for (final toolCall in response.output.toolCalls) { - final functionResponse = getFlightTimes( - toolCall.arguments['departure'], - toolCall.arguments['arrival'], - ); - // Add function response to the conversation - messages.add( - ChatMessage.tool( - toolCallId: toolCall.id, - content: functionResponse, - ), - ); -} - -// Second API call: Get final response from the model -final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); -print(finalResponse.output.content); -// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. -``` - -### Extracting structured data with tools - -A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. - -```dart -const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, ); - -final model = ChatOllama( - defaultOptions: ChatOllamaOptions( - options: ChatOllamaOptions( - model: 'llama3.2', - temperature: 0, +final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), ), - tools: [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), -); - -final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - -final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - -final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', + ]), ); -final extractedData = res.first.arguments; -print(extractedData); -// { -// people: [ -// { -// name: Alex, -// height: 152, -// hair_color: blonde -// }, -// { -// name: Claudia, -// height: 183, -// hair_color: orange -// } -// ] -// } +final res = await chatModel.invoke(PromptValue.chat([prompt])); +print(res.output.content); +// -> 'An Apple' ``` -### RAG (Retrieval-Augmented Generation) pipeline +## RAG (Retrieval-Augmented Generation) pipeline We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. ```dart // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + embeddings: OllamaEmbeddings(model: 'llama3'), ); await vectorStore.addDocuments( documents: [ @@ -436,7 +149,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.2'), + defaultOptions: ChatOllamaOptions(model: 'llama3'), ); final retriever = vectorStore.asRetriever( defaultOptions: VectorStoreRetrieverOptions( diff --git a/docs/modules/model_io/models/chat_models/integrations/open_router.md b/docs/modules/model_io/models/chat_models/integrations/open_router.md index c2d63555..e747ca5f 100644 --- a/docs/modules/model_io/models/chat_models/integrations/open_router.md +++ b/docs/modules/model_io/models/chat_models/integrations/open_router.md @@ -95,63 +95,3 @@ await stream.forEach(print); // 123 // 456789 ``` - -## Tool calling - -OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). - -Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. - -In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. - -```dart -final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; -const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, - }, - 'required': ['location', 'punchline'], - }, -); -final promptTemplate = ChatPromptTemplate.fromTemplate( - 'tell me a long joke about {foo}', -); -final chat = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o', - tools: [tool], - toolChoice: ChatToolChoice.forced(name: 'joke'), - ), -); -final outputParser = ToolsOutputParser(); - -final chain = promptTemplate.pipe(chat).pipe(outputParser); - -final stream = chain.stream({'foo': 'bears'}); -await for (final chunk in stream) { - final args = chunk.first.arguments; - print(args); -} -// {} -// {setup: } -// {setup: Why don't} -// {setup: Why don't bears} -// {setup: Why don't bears like fast food} -// {setup: Why don't bears like fast food?, punchline: } -// {setup: Why don't bears like fast food?, punchline: Because} -// {setup: Why don't bears like fast food?, punchline: Because they can't} -// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} -``` diff --git a/docs/modules/model_io/models/chat_models/integrations/openai.md b/docs/modules/model_io/models/chat_models/integrations/openai.md index 6b3ccbbc..df92b348 100644 --- a/docs/modules/model_io/models/chat_models/integrations/openai.md +++ b/docs/modules/model_io/models/chat_models/integrations/openai.md @@ -1,78 +1,25 @@ # OpenAI -This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). +[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of +chat models with different levels of power suitable for different tasks. -OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). - -> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. - -## Setup - -To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. - -### Credentials - -Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). - -### Installation - -The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: - -```yaml -dart pub add langchain_openai -``` - -## Usage - -### Instantiation - -Now we can instantiate our model object and generate chat completions: +This example goes over how to use LangChain to interact with +OpenAI [models](https://platform.openai.com/docs/models) using the Chat API. ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o' temperature: 0, - // ...other options ), ); -``` - -If you are using a proxy, you can override the base URL, headers, and other options: - -```dart -final client = ChatOpenAI( - baseUrl: 'https://my-proxy.com', - headers: {'x-my-proxy-header': 'value'}, -); -``` - -### Invocation - -Now you can generate completions by calling the `invoke` method: - -```dart -final messages = [ - ChatMessage.system('You are a helpful assistant that translates English to French.'), - ChatMessage.humanText('I love programming.'), -]; -final prompt = PromptValue.chat(messages); -final res = await llm.invoke(prompt); -// -> 'J'adore la programmation.' -``` - -### Chaining - -We can chain our model with a prompt template or output parser to create a more complex pipeline: - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, '{text}'), -]); final chain = promptTemplate | chatModel | StringOutputParser(); @@ -85,16 +32,15 @@ print(res); // -> 'J'adore la programmation.' ``` -### Streaming - -OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. +## Streaming ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' 'in order without any spaces or commas', ), (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), @@ -111,91 +57,7 @@ await stream.forEach(print); // 789 ``` -### Multimodal support - -OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. - -You can send the image as a base64-encoded string: - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system('You are a helpful assistant.'), - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image - ), - ]), - ), -]); -``` - -Or you can send the URL where the image is hosted: - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system('You are a helpful assistant.'), - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', - ), - ]), - ), -]); -``` - -### Tool calling - -OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. - - -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, -); - -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'gpt-4o' - temperature: 0, - tools: [tool], - ), -); - -final res = await chatModel.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -print(res.output.toolCalls); -// [AIChatMessageToolCall{ -// id: a621064b-03b3-4ca6-8278-f37504901034, -// name: get_current_weather, -// arguments: {location: Boston, US}, -// }, -// AIChatMessageToolCall{ -// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, -// name: get_current_weather, -// arguments: {location: Madrid, ES}, -// }] -``` - -Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. - -You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: +You can also stream OpenAI tool calls: ```dart const tool = ToolSpec( @@ -246,76 +108,9 @@ await for (final chunk in stream) { // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} ``` -### Structured Outputs - -[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system( - 'Extract the data of any companies mentioned in the ' - 'following statement. Return a JSON list.', - ), - ChatMessage.humanText( - 'Google was founded in the USA, while Deepmind was founded in the UK', - ), -]); -final chatModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o', - temperature: 0, - responseFormat: ChatOpenAIResponseFormat.jsonSchema( - ChatOpenAIJsonSchema( - name: 'Companies', - description: 'A list of companies', - strict: true, - schema: { - 'type': 'object', - 'properties': { - 'companies': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'origin': {'type': 'string'}, - }, - 'additionalProperties': false, - 'required': ['name', 'origin'], - }, - }, - }, - 'additionalProperties': false, - 'required': ['companies'], - }, - ), - ), - ), -); - -final res = await chatModel.invoke(prompt); -// { -// "companies": [ -// { -// "name": "Google", -// "origin": "USA" -// }, -// { -// "name": "Deepmind", -// "origin": "UK" -// } -// ] -// } -``` - -When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. - -### JSON mode +## JSON mode -When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. - -> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. +GPT-4 Turbo supports a new JSON mode, which ensures the model will respond with valid JSON. JSON mode is useful for developers generating JSON in the Chat Completions API outside of function calling. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. ```dart final prompt = PromptValue.chat([ @@ -332,7 +127,9 @@ final llm = ChatOpenAI( defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final chain = llm.pipe(JsonOutputParser()); @@ -351,22 +148,3 @@ print(res); // ] // } ``` - -### Fine-tuning - -You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. - -This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: - -```dart -final chatModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' - ), -); -``` - -## API reference - -For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs/modules/model_io/models/llms/integrations/ollama.md b/docs/modules/model_io/models/llms/integrations/ollama.md index 25f6806e..3a90917c 100644 --- a/docs/modules/model_io/models/llms/integrations/ollama.md +++ b/docs/modules/model_io/models/llms/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.2` + * e.g., for Llama 3: `ollama pull llama3` ## Usage @@ -26,7 +26,7 @@ final prompt = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3.2', + model: 'llama3', ), ); final chain = prompt | llm | StringOutputParser(); @@ -43,7 +43,7 @@ final promptTemplate = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3.2', + model: 'llama3', ), ); final chain = promptTemplate | llm | StringOutputParser(); diff --git a/docs/modules/model_io/output_parsers/json.md b/docs/modules/model_io/output_parsers/json.md index 06451f17..905b380b 100644 --- a/docs/modules/model_io/output_parsers/json.md +++ b/docs/modules/model_io/output_parsers/json.md @@ -21,7 +21,9 @@ final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); diff --git a/docs/modules/retrieval/text_embedding/integrations/google_ai.md b/docs/modules/retrieval/text_embedding/integrations/google_ai.md index 657d7f6d..6d84e8a1 100644 --- a/docs/modules/retrieval/text_embedding/integrations/google_ai.md +++ b/docs/modules/retrieval/text_embedding/integrations/google_ai.md @@ -6,6 +6,8 @@ The embedding service in the [Gemini API](https://ai.google.dev/docs/embeddings_ - `text-embedding-004` * Dimensions: 768 (with support for reduced dimensionality) +- `embedding-001` + * Dimensions: 768 The previous list of models may not be exhaustive or up-to-date. Check out the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of available models. diff --git a/docs/modules/retrieval/text_embedding/integrations/ollama.md b/docs/modules/retrieval/text_embedding/integrations/ollama.md index b13ddd28..395b1203 100644 --- a/docs/modules/retrieval/text_embedding/integrations/ollama.md +++ b/docs/modules/retrieval/text_embedding/integrations/ollama.md @@ -1,7 +1,7 @@ # OllamaEmbeddings ```dart -final embeddings = OllamaEmbeddings(model: 'llama3.2'); +final embeddings = OllamaEmbeddings(model: 'llama3'); const text = 'This is a test document.'; final res = await embeddings.embedQuery(text); final res = await embeddings.embedDocuments([text]); diff --git a/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png b/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png deleted file mode 100644 index 6d88c06fd27d87ec228ae1bdf5a1b803b0af140d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51968 zcmbSxWm_Cg(>4SW0>Oeykl-#sg9LZC#ogV4ySux)yDaYRu8Vt+#aSF)uKW21?;P`K zrl+dTs)zW&E3QE^XuEkoA>?Wvz4vg$))v?+2x_3$+rIS`sVJu zs^+x(%J8(};Dl_C*bL|JB%Jlnsr&l*wYirV0mQEm$@}A1% zut-KVOEzH*wz}G)^|fj6S=)TuM&Cq2Y26M51x`XvatN3qNdgD>BcNB?mRfi1+8#5)K_n9Nw&0zONu?dzTP}N zQ7W0McXfJAjyVhuT|7SW0fBAfGt^a;xFsd{rKJlO;PTCN#+vGc#f9a|3&Hw&$I%gK zNd*Iaz2U~9qp8-AFi#~#h1J93h3@K`rS8jxE-MRDKNlD9YmgOjic#+m6MB+Ux04vhHmApmt5dnQono#CUpv`7#z3Gt76u&9oZY~U+fhf01vT6*AuOQm3Vqfq z01br-1lqspM2dMlY(Fs`8_Nz1i8!m$1K%}liD;9c6oL@SVf z&;SfnPmWqv*??l?j)S0%@K-}RiFzs;HxUeD^>`*xakOt1fk+`#-OF8JUXpC54^IYvYhbSosHACN{yu4!o`n7kr_|2 z;Ef3uiX6}yIa-;aHV>yyRjQ?90-sy`$vF)kjQJaIRj1zUC9U#Dc4zMoJ~Sk9k5V@K zlivMr>(%cnE`=}(NJw>L<-oDD+5H@uX@Ez1D!mTw&=jh=t|o zaDMB!LAIb;{HtM5k)o_4!pf`G(UfkBE4=TsK-a%K>cH8w zfMMU+-Dok}$3m1-sm)@4CeH)8v6cYJPYtL31 zb-V2yp?c08_FtS!vi^QCh~r6SUFtwEsR!)MrfC8*{mK2*C09e)>I%{S%J$YTSvs3M z8C3lxy=eaYecU2-OauNB&Sop#{is^teE4P9#q5Lr&8Sdk3@A;%!3;!IG+*JLiL%q8 zDNk9sG`bd9<|nF*LRA`cBFYg&1q*J>y3`oGmaG?z4H3})qHc4HOzct0<#=7+QoBS2 zHU{uQ&7C7Gkn>C|j}=@Q8AcAzOClrH33`O7*vr!p?g|SF7(5G&(B)%Ef z`O?RFyzQ>4$|V_7!kfdj;}vE#J_~-1OzbxuPC#Pk%^kzB?`=$apu`GcG?6`aa$*_7 z7uEe2gO?s?Je2cjj!>%6|Z>v zVEaIXcm+YaEc^Jwgn-U~Ag3PXTj<0PX}#O$XU{tITbl+32`{c=k{;|rxre;7Vr;o& z>#gM1k&0#8P!)%6ulsY}4@tkp+gZrOg?4(kk zJGtT>tQICg<#4vxj=${=9;r6&`dav2N#Tj`_puqz#x%Kq(oe_+x4-XQ{PBz8Wmx50#g*M|CwIR%-f+ormA3L}^U^yDG zcWuQF$#LH)NyU68`qpsHSY4^xcF)s1qt@Kbe(+>zdg0;Ne6=L>EE&F!Al&>%phKrh zq|aa8cEJBzBV!}~GjOtQtF^7``e8ZlbXj!kodF=U>9J7q-gTqvY2bEHhVAWLb(^4D zKzr}{`#xy@S!c`m@kFSZ7c$NVwtu_s(%mf%an?x=v%>L49%P_9LtQB~3zb1RPSy|Q z5u^KzrV@c&6dBaX0~v!YVKvUy<{141&Jp^)(?&0iWwtdx>5{44di$^}n_$4_HTT+X zDTRIBx;RY`YxfpWF`km`ett_0nJ3%O?da%ozHb%L2qryxw@o>S;c$)s4hu<%F z!Kgq4JG2>n0jfz+qsxvA7=C9_x4?!eG;1q8)7h}pYQ7|W*2WUeiM73{wU?-cFlDnQsOsAzf=k^ zCLR4L3pXiAY?S@t!c3dxDaMhHOkdB>T_MtnZMamkBUZAk#pn9iE+tjrA{%Fa?p>Lk zhBc@)Kk|AuH+E9sNYt{$7d5Br^%_Yu+f@S0KoJFN%akA$W%-L2YdT~u)$R#O)=TOQ zCnc9JReiBGa?EJLuO8yxkPeY1IJxdwGz>SH7uD_Ce5fttCf~b>hD^+`o|f1H7!>U0 z;Z^}mGUQa!LyjvWX98`xR1|MwxXAtnj$^u7yu4mR16wsi_)+0$EXXt3wZ9e>E5EZe zBW_yycm~m~HuWCkguuPrQGt_M<`M6D-KsV~TpXpT-%-Udw zwm>6Efc(sIFr-2ukutQ#mKD&}1xLzc%&F2g--N`GV2Ol0x`aIhbl7{VNI;|q{B>e` zdDGk=IC$T9PmM;DY7%VUKk2y@6;LYyyyHTcLk{bce>s>0oUuIc<7=fZV-;cKoI83h zNIevefBWV;OT?>l<0K;XWl;$~D|AnIGVKaCKPcsOI*=p+ zoO+GZns(n6oR$!II$1wnYsbK^BQ@)k5op znXsdWyi@kRzOsDCORxp0mRt~IGGR7*O+l$8B|+Gq8Z`FRG0NHDZHF=?=K*vFzglvk z)a_z_FabU~@2b5Wa8v`k^4|3+r2x)1UIWejAj9vPjmzk{am=6usPfacrcQHX_WUM8 zvGOH>#^Hz`sxwaXE&R{aGd5{l#LWpy%w{eI#pV{wSvx*`@p!qrgFUX^zx?L8`}1&T zzmI$Tur@*GFTn3FM6>7TK#Jyowou<;Rp9Q%EXuIzoc{B(-wDp7Uc3d}*n9yn&?@?D zZK}%PeQ*3S9$#JK?MoTL+uHW`(HwELrosE^4DQSlozr*HcA#MF;^MmoVpApiV#^HnD{GSO0 zXuk@7cL0dz^K)r_t}Y|xlQ%QHj_R>o%WNVn zL*b=W=Rp_F1(P$yAl{1K#=*B%%|M(9;fl>tkP-)FjHt@B37SI>tP}6?bm%Ls635+G zo#a1(=rn?O8*lbZnHY-L+WO8vUxkc`MA(I7p{YwjG@udJNYi>=@?V2P80mH>C70ul z=Odp_o}T7dmdVGx|DhM_(p9a$liSFOOc7?px*38G4QDrF1ZCo%?S(zIit2KIet{G9 zDO{|Q*DQh&_QbTgc{G0YUov&_MBDhgA2>Tl>?c~>->1laWO4F{Jl@;2H+E%3Y? z0YwcPV$X!37sM$o?LB=~1nwJ+LS1W-J*H?s^NCTIrSxl*7(zy2=hK$Rnu*M8C|4Jv z1PF&8SU1Uv>IxDuQ0ldlXH2vtM2F#VNlZ98Olv0{MT+JXYUN@lWGP3bvScYI*Z~ka zOO<{{8rB&#T5SwY<*rX*jXcd}d_E>#$Kz_3oG?#sd#AK1Ti!>jr|HpYSb{og-FEmK zCGHPZduYFGH@K^euC;@lE2`$jxzANb%%VZR*5b^Vwq^`REkv3F1Q%yXM@fX*Gpl~b zMMcYyzCMbyr`nVzDvDVn(lLsmsSsGL#rgYcosp^a*U_$UCB}YdBUmq-^uI}%kv&2F zmp)kSPg8ou8KkzEp?9*aJX~kZ=T)RZ+-@c&Wx*Y2WQFzo>foJlaO%e;srt3aYMRt# zV1)s|dsMaoil_XK{qtGo2*Ojvx#-u`2=>bv8f*X1*CY2@GN?n+@Sc!O>}Y6M)EkCt z6r%K%?*Wlgo>bM`)%(WrKiEc1Rm2E1eo!Y5C8MYJ>W^2}!Mh8tVRcN(oA#$FHEv!0 zLm$WP^6FG#PVk&KH~_kAdAtXpcwZ&tO#6GkFU-ve-MFQ;U+z5pY0TmEg>XMo18M}@ z<)dL1tubdAFr;WyoOmg&?y?Qn+N{IFb;$D8Q!Py_>3Sl}qI;KmDKcGkTA2xw zEETdLW>Mb^A!iitzUktc4^;+(?<6eYGOvvs?CRWK*XVdKbQ?TDjjQ+fku#Ou3f z9&e11bCR^E0GIyMx|xC%+L4z{}MI|8`U?geJ zj948x`A1W22;>vY&0(YL+&`u#_sHemFx#Y4HdZz>cQaeWLMudi!iiryNMY{3*Ygp< z&`pmT^*O6X^AA55c;Z{7BcfzJ_>Pe&UaecD@DDRXJ#%X$Ilxtlkt(;h3S+^)??--C zX1mWppQK%c1+ zznHin_3lhOu&Qy=pYBp3aD%^v%Yu!%VnXS`)IW*BxPLXfbZ+c#@IN^xe%BnBZ5 zBO{ooTCK_3lJV-Xy~O5}zm)17GT&EC(DeP#;~8dt#<8du*OG-6BenrriAba|mFnqQEO+(wacqa0B$HzKt z018FX%_F!w4`IQCz*8CIun=K9XUu&(#qd)AH-X6Er+S5Cgx08n8HT_(TYJ8g7#Lyl zod>yOoZNB5rY{cml;Ry`K2*}PIyTVg))yXu06Pqz(^(~Xly3+mtF|Zig5xPjkTX~h zS)(N|VNmCLr;KK(b9wm$?k^yExUcFW;lGle)sYz81Hq07b|qT9hHR+v*q7jwT;)mC zNG0(gNH|dj9zXqtn224o$fk1KJRrX4c?1^x_+|U4G|l! zI4!G>LhTn*5vLUmS8ul=y4pH#N8N9~79HQcpK}_%H@}}9C*85c*gH}>9E~Yh`P}p- zWCsx^ z*GqpI@pDXfyXUb79WKjayN|~+U-$c}bkA$+__(RDWX-)UHqb#R+d@Kqe zAt3VZ%1AeNqXb$w;7r0;O^5s<&nx{}wzyQsa==2V5e1Ul+8M@L9Vwa>8o)V86y!{q zPeqQMYaosA16na_mMU7?BHHdT8iuBtBDi9)r3(ct14w1C^G`i3Z4v$bku&m50`RO^ z&!|E09J34{aCy!ex)AqG80`Jb=I>8n1GaPO<2*MYLB-tB^><0qJE~F@ zOCnHKOUtv`)A!6=*@df_XoiI&B=FINdfVmVs`!xS`@~$(#mgNd+#uTd^z@fCpYFEX z2Wz9ZsXMPr)U_?o?QdPvCwBcJf}#Q@?rF%uf9CbV4w;%lt(*=_ej>;k%f}{?<~sva zLi4Hk4X{VzY82>we9#|N4{(iX|8haIMl@i+Hjg?&`yC}fFu;XYHdYJm6P<|HKvh)d zSq4NWd2W4k+CH>j+Jn$+l-saA&9vyj&L?dcls{xgVx>R*2wG)jhbBp@nAv0v&qkc9 z*{*E~arS}lp{F)1y2-TK9V5lZ`>c-*&ZCmQ7?x_9L4wT3uB1wjXwdyc|ID$LN!ys< zXque!!XEGS<@Q=hO0MHMMd)d{rmpX`q@fqmKx^qTrM=FuemzsPj1vFaHC?M^<_BT@ zoIwykqIJbkPS-S;6$&#i?Gl1avau`v^tBNmAH$bR+I1)Ont3qB0dF}X>38+0h0+gG z7g5&!tIC<8l%GE=_&_j<;ao)qJsyv4P++=X6EUwad$`$ix%OO?)MoSf3HMe=R^IWa zC^=fA8Q(F8SU7JkfeFi=wpNg{joc%05XO!93mc8*@3@JGd@~H8WZ#VzlRiJ$>@|`0 zl~16N?S)(enjqASZgO|@Yb<^EK0N2IH-XL$r%L0NTj^0&6U|G>s(er5-`y|LP~KiA ztOme*t+vN=+Fg$xN$y8`HPXFrOGHF}-pg`&KY^3-nXz*8!wg0#Zw;ZMKE>tpYkCPF zb118zf44mEk&B$&2}FG6fl?g9q)132IgAnT$X&_LMSir8^E?VKNrrJ&*%l-x$ei@2 z()?-dBNz<@PbQfuQ zspXu#)DJgv4}&zT4i7p|RUVS8CYix6W(GUqiX{J>5yf^3<^W9ootkYz5bmqC2nOOL zz;e%}T|*tUGPjl7BZq$#HDQD&?^UYew1VY(J?ZL|_IdZ4L*d)g8JsQO%?++wT3gqz z5UcwwhoimmnvUzC)0&R@E=Y)`?ZYkg?Pm~JoYp_wT-!d-H9Soc)~{LLZt3gqb;XHb zWQ!#qkSk7A&e;@b$R7s+6(Z~Q@-g~zcuqt$Z3 zRnw9KH77;)GpBtfg+{SrxMCB6SUi+$KVk$qlv6``B~wgaW?f!D^rk3*c+lh3XpE;< z$WG2#x@1K(wL3f+wwbaL*F%h`JSP^o4*=8LTJ~3Qf!mB&*`D7vwclCUdf)Yj+^6zp z^0U0GZ~3FjVqtrP8b9+;3vxvhcc{wU#r}+$!`Xei32=5eFFI)0%88mwwbfGdM$pYA z9EP}T4mqxw3QlXaJzTzI-AH+IY&{)`oxk<>yS!a9^X>fU#4>~90?1($Cc`K%$5qmY zVUmgJ&sk{}5?$dTLwyTG$=#rm`F(D>7)knw)7ZcflbcQ=HNjeW3?0k;xljEV?i^XL-!oI0t_0|-R?`{08I|Hp-G zewTIj9WEmtcma2B0Pm3xaCbp^Tt)YnI zVPhSB0lySjo^JK0dSTsyztL9yyMk@%{%GxVXCpPe->#0+qCB;JG)wt_f~{xSIP9Mb zn5>#Zf<`OENNkXRv+~y#Vi|*etFk7Q#G4jZitP9d=f(Z@O^p$9%%SuFTSHefgaczc zR_uE~x*wBw-yYucv^^nLONi6e*Fog~kho1yIvl({Y)F51FiWS&gZ&#=d)2)fhoDjIEDg^C=T<|HP~(ROOe`l#HZHw^Hm*XJUm zFHsW%2>OjM3*U)d> z@2wI)b1*PuNjn=oi(PnLM&LfW9=AqF-P5gf@jsi=^itj!=_(~Z1_;64)tNH=;QQ!> z@i0*S$d;Z#QYZG1*}RE!;<&P*s0p*)3T)jk{R~SjXo$O09wm$P71qhsoG48rZHn6l z``_p8_vkXEGxYU=q<7OJ2D=|(`ApF%&1q@Dv(e-B?Fy4T|0HNO?32^nIN;Pw4ha;* z0b`SP{eI}uxk>Kv-CLjOUcQ|?KC`i!cNez9_y^-gFn&?|QwLs66zj+1@HkbIfHDv1 z)_MAOby7>x-F0)lw>S}7sBP&ufe%l@SX$ues`^JEw&W(c4J!(=z`_s8!=TUUe!5<} zco|D{eHxA-?1X6!Bm;xULnf>zB5gLQM$lj@e&>v!8ac2TW|5n0udMAv=ggg{us$x_4A>2w{iJSZ79M&aDbB|ydE(qvAgp)t`;~TR&D;F z8VbRDxs3#(r6rG=(R|iHH~yyM93sBPLEyNJ08>cd%MSuZ1hWyLV+@o;{Zk+wSK^uI z>OP38&C}$x6$3$w7j{nCV;lMhdKexG6OXG*JkmGqPZpBv?Cf_KE(TKsmjU2y~v*iaTaB@(LjKf3T>cxaWp_w&?Cs6-M z)#lTc{ECLw|1wlvxln#l zpUzG9+3kc6x^Ty*sH|*Um1#AXI5m_+mqGuBLpAszDwX)B^M*905nFg`0FW%{3L*}t z^eNVvA9U)TZ>TKVE8Eh=SkbVng!@ArxrJ0U72!Xr-Vz)opOILSoyaO0yHm6wW-F`{ zI>I8K3khN&ZGT*ND8$7%&ym!Do>OukhThagR6?N4;R-uRxkL}ShVbY|%E86;A)16p z91r~H6>R^3ux&G+9R3X7USD)Pg{<~TOrY0e;N!hYr( zlku3x6$nPm56%rP`vTzS(|-fZGXg^@QS$sOkT z{vr@|L_nqYHl}dgv}RoT?xEh2%MyN*LYvqR0I&2nmP z*hY?!Xrg}_R+iPf@J3G9<@KnG%luq+JSpk8ofY$ELNcw^^q%q6-L#D0T1LvQaMaKM zUhR{1WZjIw@*)G&K#Y+ljBmdT?T)#NUuMC#YpCC;FhPDSRH$O7(#h%mv4|8D9G@fD z|KZ3zXEE4Y=?q?3m11GK}+io=!EZ7tq zbDTuD@!wK~Glc%0w7KtBei8)d<}M0B+XLvs*D*+$X7CjjIKG;%!4enHI{^A^QVuj` zBKF0PpeYxQle33#!Hon#UOSoE>Twmn=Pc&w|Mo3R=~?6$FM|j9az@6SO2zmHr@yB| zJz4>#3Kt`aB)!~ zyC2B(Gj4sKj1-}<3=~4WlYS5jNuiCisz>Ck9f?!V!=H$Ma1Dnpu*RmvauyF z0a;Q(rY$~?SB7cupmz@^9^LGY2L%8P|67ggT6I=|v#inqXC;S1ZQFObnX-cJLg(pa zwpO0p$sQv8ZVjkh-q z-@OIH=Z|=$(JpVgpBzji;=9^WlJgx((^gSQj5kj^QQ^JSgTe?jg^}4n0>)9VI>^!w zTtL!tXDwPy0t5kYlS zQ6+U!C^pR|Rwpv(YmIe_V6RX7(RLhV8~#*`$cY5Q6gQt>7PTDEId}}#IxTI zO0!B5Q27J*Z2;V+r8xY19g_=FBWS4hty3Y{C;xc z#m&$walb6~!l-{lNFj65Tz7)B%ty_x9aS4yyH!z+7_Fd|V3Y7gc{EB8 z#vW&kO$tOODeo*)6b5x3bbjbxtYts$U}je&2@loa;|IqS#uN6>xK-O(ii`?h8Qj?g zA6V+(H|*T{J;-ZfA^S59Qw{m9B{qqVtNUxzX{NTWuKKs<#bjTJ`=wH^Wuox+snAuq z_12fM#j6*$BT&)A`ly2Eu0rj~K0Tw1CJuBDTc(|`k67y|hlI&xfve$JpRs>ii zXoR{2<%^l`W?%k7>q7ki!w;Z|nrurC(Zlm^ilc7u$nXmIBNM>H8-FJbE0c{?&)3u& zSa@sb_tjRK0h>7;MAhhf;pm8KW=QIFe3rYrl#5p z3eIm>_O!<0C|gaxCXF9(&2RZ>_MFxZ>nXR9|Kh8Bpwd7-COENwHJsd9x-cth5JN1{ z0U`ZK4Vqph%j?RltzZE6i_BH)9N>O5q!Sy*n6@!dXD& z+6QbNt6b>K7$VtM3p?t5u zxb)Zq{}dAGdux6P8RTvn_(+DtmB}IKwl&YM#LN=sw1>ez?%LImRM`)@iWA9Nyezs) z9xnDMS$gf$1Ao6|YkAqe1h^i}Y1VXSBI)RIHQF&Meq?xi`F>s+O&o+=Eh_FiVlSHl9C8*asiP2I8>w;`4 zOs|68SOq-CbxZik=(|6vXUIT+&;`${rY-xf{47GrwiG#^YQtWG3s=3j*HXe6C1I(W z#rsg29IKl7K(D)7E{hn3i(USa%P%FokmbrePfzvY9t#E!o)Bk^nR0` zTJhmHm{{$L-2>sY6o4F`HxZRxfj+0wKrC9MGgAf=6&+N(+t8#N}Er& zsLE+9923{Vi7J?-rQB=RosnwimV4o|>T?LNMCQYz@0Qg@}M2rfz_a9@9VIq9>t=CCP(mN5zzGhKLgw()_ke%XkJp{WIRLmHxkyuiS{yxLEst+u);Z26LQT2zWaPk+!B?pw~;T7vJeuOx`_YD0^7~4 z%JZ;T1eCQAQA|D~m?#}nOQcLiWueLm&wrykF=pc9FzsVOd1$NIw&*0*^z`S@{d}b6 z{q8J|dx7g~`E7%eP>{;=JzuMD(pMycl;qae%9&8l#_--qkO19R1Ag#dEebSXJ>7KC z;?ziMYo#;um)+GEg?jr;I;7Zi-NYD!+&(2P3sw2lI{LA>F7Vm(M&~kr^sR+(JHk`V{!59pOcQ0$p9w#BW3OfkxA%m3i zk_-v`6qO$MH{$RK-dJYtT596m@o>_I)Yf{UE{ig|6R`H)*p!jwNb8})B@ zK}!U!6NvNF#NgjvLw(Y;_n$!8U4ML6wU3uysLqvNWan*H33D^1b^cdpddyYYwgq&{-DSOd zeF|d9F<3P_PO8-#kS79b8h8B7Qa5NEh2APx_*$URJ3M!1wqp8=((6+eYg&4x{Y zl|L=JGSpAUzLTMOf3e%f9?AG@*hW_`I{LWemVt2QZ~d{bgKeP(E`F!Yo7%}p zaM&p}ohYLT2W{~^*L@!^mSr{fI0+f_r-jDa=v&PrKkDT_9`*24Jf z>7UB9T_ArGY1x@P!Y0C6uz%3osL2i62MB-XjBYn0bg4rN6KC?9`FaLaOXd7oTR(>+ z{LM1fu*Io||M1JF^rn;=5Qnao6IWLMx9%-EB%sg3Dv@N=w6|K?{8Zv}bU=<*=Pnh- zOTe;Um91Y2G*OsE={x*`kDjNRHX_icxp+0uSRZRi{xBio*4LETpUfyB_Ke2t=wKpNpGdaDABQK4Fz;2VbIV|jn*`)$QBXz1J+8&NA}|4 z1#5n(G_q+a_kjrDR(N^!GRj3kV473>K|T+|d`WmdZ}ut-KAl^S<2mmdf8Si+GB6Ru zW*(tZz3h$NuHL)|PR7P0lkog=y{x@T4(^#JqaGvjAi4mKJT)w?nzd7cOWaxhCXkE) zPJ)2I5(E65%z1F%0!W7MV&*`dGp10JS5)v(s~=X&h(Uhsb6O;b{!73&XU#baxi%4n z#7bdl3w>>VupbWmCDMpKmnu+mO4qZda*@FYAorW&=?jOX5ZLE!?X^2)X_?PK<(cnI z{qLJ+BM4JN%&0*Q@_T_1Qztlc`N^t@I>Xw0*Kas))AD{yVaDxHf_VYi%XvnVbH;>O zOEsnN;~ysQ8D4mr6S1Xj{8?(JyhU@)AN<{G{WOREHEtzG%51~BjqN5hvhVjAzbq*C zQ#Yexh3DW9&d9>h3%z}#zSo27gDHEE77GK%hbTtYdl<7`ZCWb`uB~3YYxz`YEEwtE zic4RS`nwlAeCmBnYL>-nqIizmSk~s}KW-L}lG8A-#`9h$;1YEF8Pd3A!(UXIi*XfC7 zgH0-{1^%dC1x-%gmHw_Al)ghsZB<8gMOu;%NmuojG|8z6v@RlyvG`65^$7anJOjW@DC~>y<7K;R=2M)laO+aXA=w3{CKV&-7c@^qh@g&i9I8(2csh^BLV zL^ViA&YZ~pi-NeHysc?eHB1hZ#+Wkgt)*jOTLI{r&%G{7Pz{-k zVu$-bq~u5o6q@Zvhc~K#IThr!-Ph5On1~_>(c*$3-0XhCh0*+GmeK+~`<11c%St3> z_Lg%0`c=cg4=1oBj!~ti{28bEr(NyV}0=R6l?C}ju zGI>$c1ORWhBy%Z)uQ>>Zzx^+cS&yU74IE-I<-iAQo7-lriY1-L1iZ;Si)C{*B#nj5 zv+8++(k21ZO+dSXVW0GMrf7t|iA(^&AYMBd$pMw6C`G@N0MYHbp@DY&3f1s-fA$}o z-s6ZAr+KYnZ4j9rY_eEjeVh2VNHuC$$~$_p<9s_#Vue7$AtV5=2(CC<3buBz{k6E( zk!806z@j_-ZK*B}RO|LDUKXjGCKp^IiFwQ8R0|@IGQ>A*f_R*MWDH;m*L^WXa4xB8 znKG&}9nj|$n*h?k5FK7gEP;o_Ld=>=>%XnZX0p(=+mO*`;G6exI2L8BPLQ0411J}o zr2t%#N2$Tk)>?|v@yX_Va~Kv(L}NJGBt-mqTN#UZ$smZ~Hbl$RF@owon}T7M8afLb z)}jrNh(uTJ5kdOeF|f-?K2|}7?MC)67=1s+XsJ3q;lTqIO7qCazwyB0BAVd0#?N?O ztp4JDKRxpBAGLg7Q}Ev}X=%hgZB0-{mdSX%IxABIO3>QXi@f}l%NXV z&ZBd4Q5#qjrltWV34Lb81ujR3N*5!;c0QJO9twtX6UO?o!2vZlu!x;IBV;Og(*Pb; zV&H44V-$tSv3Q% z@Vver253J4jd&st9!}vdE=iout=!&56*ZMFPmjh}l>YN%-Q^pSdz+X`z<+Ma3hrE`tui>#$!cxPU!D7)oKF+F`wKTj2zP6wv zYO`dZcN`W9EOL~u3>{-Y^QFPMD+Hv`Y>q=**?qcCxDpl!pfJ*qW~p ztI+ncHDXPWjN#MPc-p9dI}_9T6Bw&9MII@zNO5G~dFGE;D(GgJ(?_z8B*^H*In@{psVF3#$XH4|qAf*^ zjsH_&DVL}jx)xlv58|dE5nLA}j4C20X8tO$`}K714p!nPS=fNY9P02L8e^#aw=g9V zDRO&7#@d$6@YCh5b7(Gc)rA9>A58XHi$2Aavel$h_0adcEj+_9wfiXBSC*Z3 zKKr=1_2kWI`}p{HawXBn^X}L~E+^d^>$lNh4Bouds%;s>dwt5esg$7K9wVsf!tGF} zf{1&-#i{_HCBb&jZSlTRR>6%~*>iE1)5{LdA^P#O6|h(H zr2mTjf_^F#(yzDXJr!e9mY2OoWM3x4qW4ne$IMX{V<`KEa!c&rt5Q1^!_kVO@Tnbr)+^j zdiho0l$nse^9Hcga1CkniYx^p8w=2mO zNA=kLXe?O->nWkET3%H7e$k|%J<^vcQ>8$|_9aHg7l{*=9j{2*KvNE=Jl3;ZzC+h@ zY7u7T+}Vym=~-W@?YlX0 z9HdaYW#RKak>v8Q9T~CZ2`(DfT~Pvd*RJgjdN6i-!r45zx)7{*KOT78JJ&!`qPwMl zK933Sm&ql%uE*qRWDVTF_o<(X9yPo1LWx=~+F(-0kqiql%;JVAP_`J@_`zrK023X* z%9&i9E}1M+yi}0&h=I|_OhEZ$G)Yl;=Eh*2c2Sw+taaA4STHodQHy>ll8d^{y#o-@ z5H5ZqYdttdZIr-a5vHvClZ~-GU-H0}T6)GF$y)>5%U3?vQ5b+C^HrTM6mzUOELtx@!@T24QKI z68Lz(e{k>IGv}F^d+vF3z;gVq9a^4FQydHe3F%k}AWBD}1OcR3hf1JZ-=8!EGiij| z`<2PSuYzAbHn5p4o^`}P1#Gcb#}Fziok&crRkhtV(KO!&MvZ1$O{9GCU9Ndz`Q+Koi zG{s`w1QnQQ@Dw`Y3yx+j>XTkgaI2(RLo!9BYLxq+u8oLr0Q|@`w9}T@P;&R$-qq!N zUI}v{{aVc*mSe^^q1#cIHRq>{*sdDX^=_Gk`U?=&TLBCPB{xXsbCOIB1w$#)= zXsg_49Msuh)rHfxiL>R1MlQ-P8+)+BhohTttEL(hz{>absf39jPkt-IZTcMeyIuP| zsj@>*JAvLT2L;&r1GT_lUk%mM^=u@U2HJ|u;pX^X(Aj2x@W0{M1!jkG`jlfW$Ai7G zg_QIPdzqr+VCGYipq?ehq@BjnD&!o#55%vIJvL^sM0zJTrPo4BaUo_DS)D$ z?W{xhYB+n8eei3dYa>H29AFC z=a`F02kFanIWy%IE;Cvie|7<{FE|EH{=VQJwRLr6T^nu`^G={BmyDNJ4-SCjCH<-L zQW8Aj8{1AONKbnj1s7$gF(vjvrtknH*%{@Aw|#pH=(<$-8}-22(;6yno)71?SRXCl zRp;T+Xa}UQx1&3;x75SzUegEoaHOELEc3zm(`i<4jR;c~3WFaPC4xM47J~(Rf|=|3 z{huC>wQQ|!(py$$Hu?h}=1a`_{Pur0@m0voUZ!NP9iI0Ex+{YFgG*s!I!}6j`a+8G z(3I)Fh%EcJT;(lzujf*uul^Cyc@`C@m+RcJz0bcxHz7C*ReAGBUodc+kje3CO{Ori zsAan=9UMbHFdcX{X-Q8RxJKf2%GCYTn%Wm3nkH7wr?gR*x3|Tsz>p~vD`?JTqb%i% z%;HOcumTweQA$lYc0}YIF6Sz#u683~Oy)Hh#@C;+mX`GG#Q`fJyu{!T8-3obR0y6P z>jGeYs3bN6ds`r|6oNOHb9lpcqT2VBiT&LQh%o9%v`ReR`uUjQ(i5z^zZxjqkl9u( z4&(;h?E)o-+}>Vu!u;VVdBKvRGSEAV;9D4TLqTqm+DxvP=f5>}60VIv3eLA%gQ^w> zji}j<;QHf!;Hd2tiYU8* zT5}N{+=#`30Q|7Na}wU*-)m+;HMwjT8$wwIV{Kl`xcPyuC;Exd@TQ7U=e?)yc}a%fIY`n~dNyi9Vhf<@0wJtQDbz}U! zj&BC!)8fm58GpeHSJeA7Iot?ZG<9`dLY+O2Vm>v-?A-i98P2sr|)X%th^1#c? zgMx@cUY&xf3W(%DwSnnCMu>zQzujX(o%G(zuhT>!^p{vUkX4wgm?cv8_b%YhTa-oN zqv;~2(Fbg#via2|Y~0j(nf9lY(C4vIa9=}e^{_Q^K zj6cj57YqQ1c>O)aDCl{*Dy?}|g8nvXsDqzmn)(B)3~~^8V3G@Gne~y_fz#Nd^?MzC zCFghfE$ABKZn6=9M7u~Rzl3T{;^E>Gnc!lqU|-fz1kBBkHcD!MMSj-cz+A*OZ8A~E zV)@FO<4#D@%ouIyW%sewuGSWvR}-apdb}>?HbpQzgy&DO{MO#f; zJqw{*BPC6Sg!~*>U;X?vw&Yf!6IRu_6P6vP=qmO%^#s5ruWnliW0=ES_~}J3q&rQg zF@Gt-XuDLdUU|(h=NN0YW)!RkWw6QcdEKwSr$7T`6Nb|d1(-!^og{@D%seZ7 z=1@hz=qZx=^Lr`^yPe%ip{pL@smv@EXM_9PhqK%{B0USV7yG2 z76e{FuV-a`lU)ppbvxA>>Gb`{w(T+zKwoOr$mHdfZ_Dy$PhZj&YBPsos+1cfaO*u0 z5J|>CVTgkYqwTiGik)r^&r-6WGT=Lc+#<+a7oIMLC%Iy2R#ACETnuv%n7-3gx$B(0 zNlPFtO4~CNj>$-YZT}Yo{-3-o@|I$FU01^SF%HBf>Ttjeqjw+VgR8R2(?VHm5rm4E z?|8njK8Q6Zp(1^(*5w^AuF-x}FHJkQg=8dLAPbRfko%lq9L@uPC;daw0jn1yL%X}e|P=t#1w3Ey*4 zUObJ^k_87|Ru+4=416_J@>_W)zF-)@ql**M+2h(A8%9CvmxwO3-@D|P_48A;JgcFo3?zzFJWv!^)P!QAzV4Ryws93)gq)G zOnSzx`|+hc$p8{_pTk6yZ`!W1&`R5Zz^ZM|_c6vvPXHV%Rk>ONUb19(-}Q+>=7-`;E`=tOxX<|>wJQQw2pchUARpanV$@p9*y@! z+!i5zBbv%2FhH0t`Smur6uGbO$^|FFv}o4Ro5w$&-*=?fiAUc`p$`)_QKXgyRJw1k5z{H-wHsT^1t zpSQHBg|Q@EW1v$P%r`OXQ?1Rx8mc)*WK))x?og+) z>=s*l2UZC_0HO=g*P%Y2?b^$Qd7lD_`o=yW(?>gJvcRp~sRtEg6M`)hCMV_W>s`Cu zyAF$~Na?rQl8ON(*aWhE=?lPMfY6hwV#+}#iga*vkeU~yejW0GW z21>DpsY+yVgM}Og&41GySi8L0U+jWkxsrFd%a-13pE@RLCsWrbW=|*3;bM-ud+l!v zrkc)^cRmV|S-<}9uAocRHFQ4{%{-Jz{k5f@0Fe}a=Kp90I9EYC3O_^^gHTseZ6jeJ zvaotww6p)YxY^Nyl2S{-S!?D%oMVSoJZCCJJVQjs#tffu)v6kNyKsIpdJM0JrZl_5 zzD3=xl=)v+72#r^c0=C!!&8QsXVuJPMVZtYi#}m&&H{0=cfA+c6a+=E!-}ndti??y z_v*Y7vL9QgH78-(wKW^HD?~_Y-yq5PiE&v!!hsGSqUo(D9V6Z;d?B#Ly)RU!gG*t= zRHLNK`@(DKMtAZwruXEgcxMWw$r0yyyRGIH68Ap+yG0#vcHq~6kFGw`b8)Pk@^t&F zfQAXYQUb*SfA{^$9ZAp=AgLpYron2_y;7fy_M3I{kNT^?VW6vA#|H&rZge?b zaz<30&o^{T5+;zjGzq-`_Vr;OdQ_&wQ1o;VG6FKiADGc>hp{r=B- zwy=hf%L&WptCa$(Oh`lizqcm!v?}{}VTQ)%Pp}Q=)!+~zSm-Oio+$pnQx*#vhwL_p z>_OY)B1~GwK8`Qk>gvBRDTOKx+1bsSCex8&o|&o%67yWJD6Nf!YtwGu_xw3Wi?Hd| z6>V0BN~aDCmP$9VCv!@JjU6!GWp7CmKm7d?bxQK*u~aB{>9D=O_ahV7QuMh`Xe;pA z#-W1V0sG_r%h%mhNijKqJIlM!Lhg;g^_6;ah&Lc>UjAlKR|y~9XUP369WRv(LGx>AUYXp(16C_p)Q z!M(mE50x75osp-R6Hh9!>R+Pv6Y)C!Q*ABr`rBo1k1yo527c?|_A5_S(Ai``|GS5& zr|Ag${Kx3hJvNNHyr|enNP`28Fhl5SKm`}28Rr3{Y8LeJnAuN$wKy|<-l2GY1h~6? z6AO2-IT?O=y#a7^$}p60jjJ_aOe?J3jjb4F_K zE)w7D#+1rj&Rq zSLSHZg^0uTezTyr%R3&as6TZa_hmTim`dnmi(P|3E+ z4zV))gVl~aJ#KiogE#P$_y9Lz-HZ$m3H1qPT z@C&>1BC$fz|6!;9bv>TzWg`AivGRHA0a=z3#$IuvX} zo=u{-ek98z?90kT>^my>TN3h5y$6@Dhf7>?sPplglg>{!#b?sZJ}#RDzXLO@bxFIu z8r_wnFIfm{DK$^NaO;b#oZWV4T8;p=5CDpL7F3xCcSVdt!Qv#3<~kpULJ7j7`(zXH zj;Q_p10I`8VfnIdH>+P?7y*_XCB*sxBfo|-b~Q<6HJ8^V{#B4!OGD{LmL!57_H@Ch z>kb-J)Ons&YS43`+*`h=77`W{IOy%8*Z15gF$$n<|(u#L*aS(Cf z;=h<;AnIfCC5V1-sXD0nuDSjBDYBq$K4^bup)x3NXYU$tM_3FaI7A0Dcziyk8tUC} zX#K16B-oN@j>P-PdHyL+R?>J?Kt&;G z@&M`K9iz2O?p3@Rxr%1&w(wDWg;f@pOrEFvi*dDy^7CMWP~lyZ{#`1h80d3G*!VxDh7- z2bAWm0&yuxDZOsZ{?f8_G1>F~)e+TJJu~bHh8*!zWzR^=ghQStsyFa)T>|xDyuaA; zs>F{W(S&YS(*D$$of$5iLakGR;yLctIkC~*TPu_9hG4DF-W$9K1EB(cO}sU;22~~*6HH6m?}VxpnuBH+xdSzFLI4~vnjp?{?h;0;$7xG z>3JDe;_X-rIvtLb#jd*eJjQeUuzfnPIyqcswXXwC#(ujpRL2SE3($auNQn9ryI-h9 zF2Z|P*VLRh8jA9*t_uX;bM2ZKzDiE{Bw|{876fFnHErD`Oqsm|S9%8)gZ%FGK$+v? zl~&0MoVbBw!$F#wggBM1aCg$U_CoO zGK`UDQM_J$<$xriw!oIYGhet<;N!bBHMvX(Hv|HLmuoRm|{Cu7ru`DAp zaIIG>asMFvkS+yM561Q(QGD6%z_l_?+9!~ZR=QS#@{rmSyjPV|Dxr|CsVp~pSvdtT z<`uC0Mt;cd^Z(ve3`?T14>{K3IyHj4QTzGSNXYZdvX1Di+U?|K-@2Wm#<|aj%9z=X z-@1|4)h5TtUGg2}zw^?rHM=!c`+`_ws5xexKtzqsOwhwwKkwG@cN_UW$h~gv`>GfV zPYuM69PGVkNrmiap}?!Z8RADG%;LuH*yc3L!Rug??wcB)1W>_jm^YOhXjHgR5Ztes zLsVNreV>@^(g9;J&7ewAUh@gj~BoX!=#X4&&>W7(*bTOi1j?Uw0Rg&H7an0P1k(f>7D5{{j_xW0Ts8 z>uc3HXF~pwUL8a_F}UB_zFjc&15QhspU1945*LaM*z4#=qZoS-$AC8fBf^HQAN7V* z0qL=BSn-@J|5vNHXIrKq{l1T#)hNx|9ce>@Z%Sv z5jQ-$i~rJ#V)FqGdUmgT#g=5jhp~Ts*B^RLXQzIGuF6); zHOVCm>!lSeUz@uYEy(wm`fk^90jP2nASLwQqrE1LMYwr`8VHp}P_7fxejTP6ixd9B zPgZ4}!CS)Lk_!Q4O~oa+ zyjCx-`7v~JtWXOz_xxVPTzatkwe&Y91Ca568bkzd!O{3%ijuFyzO1*18wywr4FI;< zW(HVzuxr1$SPY&fQ;R)hAJ|2?x>k>KDb_2v>-~aaC7y?pVAld`fe}AN(>jA{*wF;h zYk}%SPU5uvYbQSC&TB+|dI6h1CNNiKb0@e|>mgS6MycYIA`27MdQYx5NlPzuHB_E% zm{e9*2BC@YHB>ziz1Od&YgzvDJ{;Y2D>L8k?Xu$6w3@}f+21=@t$|A+Cs*t#0kRFE zm7x&H_s(ozL_88LIfQm$Q+03aC_E<3>J2(cP{rV;D28R8B=%=^3;o&q(mXtiW zpC69QV7Se~^*7kZ6q#sMG5Y?=Jy;(0*QelDK|0TIb*RZ~?m`CHho-Tmq$O+%^rWKC zDNsjh28GXH_XZ}i&g;|ZDCaeOZuc(D%d(=Pf*s?2M_Kig%I|q)tZ_C3%I`Iz!aVAFlT=3-zMvA!%E{t)9Le5h|CyPr_E(fwaE>TuBc^K;QrS zZz8?x>dB;UN6_8O_izgwjQ5U$xnVE3qdKg33LHGrq1tZKsL$Ftx3S70VO;MvVgxzr z)WCx*Ae=o1TUkaea!iA6>o*JzZi}{LZ}Q}R)i!~~fTn&TdL69Y9UC~q6mia1_yV0= zWD=wwNL2_I@JJ8StohmgIbOaik~6v~L*$;mM-E9iP?aGAcs=N+)GSj-rW-|`9OT_ZW_J_3G;xhwZFD@VpVdfQ=`o_dvpC_n=_eF#6bplCbJ^q01baE zL`s6V1^7WGvaKJw)@iVK!Ki&z9fv@^el{-wkqwgO1DnZd5^8<9{T8=j zhEp&Ghl_U!J}hda|HS7BxE$>JW;Z-3XNC4iCP<5hDglFAG6KGRy^5 zzD@mV{Dc;Yvy&>?g`5Okh3*^NB#swIcIfQi z--kwNLwdvSlZR0&?eK3TIt&f!Wg&wM{0i`+m9>T8bt(MmUl;~zk5)1y38D-~~ zq&Um(Kj?))a+6);l#_Zo)fA}g^4Rr{{FlB!8ZUS(P`4xwS=H#RG>OM0g3@QJH6Y$e zvsD5cH66av&$%&9;WRt%Zeu+tL^NSVi9ZO0DqyEvm9F=@xdbCyHA3`DfJoap3PCAn z8ll=M(1yRZq>$jyIe}vFO6QV&$GpH2NWtEfGo7Joj-BU|gYA??=pexG?Y1hDcdk~nN*Gsn))qgC%a#s~s zGH`}NsWEqbBU|P<|K50;xBQfXiUM-YK2HS?7lve+<6LWgOA4`FCnrB(NwSRCCLYlo zf}3g}pSGh_tw;>gO(GU7fb5brM+BDG~M?{yMD_#Z5}-%%t*ByHp;q~nnM|kxmIP- zE}v?AK5;~1_@_u`a(8je1Ac$E#OY)!sqbOU-nHLw{x8lJU_@&j^xu3!qHJ&q_Fw}2 zt|;DorF|c)(dLu=#|IO8<+rJinNFjpwj?3_JAN{be6$!XOdkFSx|=c#oAqaJ4w4j3 zyr|d`R3~cjf}MNI!tb-TjkhT^OcWCk_=ni~K7{|EQv)3Ctjb?o2WjrqCj17Vical; zuc%}N$m4L3P3>NHHE+@?^L8F$aNA>)oz-HzSxN1RWfD|m*s(@Bg$Et5&^*6emM(B- zT*$_&6IS6Sm?B%!A(ez+LB3aCpD?UQwSDDbyavY(ON+iK6qm5ko?^SR)sh1~^;&SF zabHzZoqFbse|43dJ*fRYQ~&+p+qLVA95hfZ<_a?}218pAVb^gWRDez^(yUvvRoPO9 zHA@MDm3>rcGnv`NIyB5W%L2dkWQyisWrDMJyM>q9aDK@h@r{)9YpCXg1!$4a7A(e$ zBWz4K+ik$-{Nza)%FtHqDMw4mmHgj_-xQ!~I!X)hN7oodVGvfyn^?`IeEcCnmPcAO zZXU@9Z4sszi2|^Z>aE21w_?EviZ4Byx3(sV-M#D$ z0vdJya5HxuY0DD@1z%`?$popKu9m+?L#PR2tp3$?b<_5$j`&&p>Gf0a)?62(wkQSh zd#Wfr&fY?xAe~$%sZId;2U&GyWQ@G9ccu`*!|1k3+!6x4ACGU1{-G|N6ZI&n_bL(3 zduvi@WC+US8Qb(&y~WX#aPP)|{DK+n-MAb5V`^9&bc%3$d^OndkS(G%VlI9>kI)8ia5! zc4R#@Y0kee==DqWl2^(~zCK1`ORbAk53p&CT5&xHr6;oZQ>Hx#0&q)oGY+=v{^u z$ssQV8>+B>FkytiqzOdi$HHqd+TzW-M=4PF5`*BAQSXqbsjvKHWP_7u&SP&pn-bas z3fECNA7pQO@>d52d<#YiEJ=-!4Zj2zISKjBB~TuT*s2wkQ#lKn{XbY3d%d@GAIlIM zFVnB#)$)sE8mNRcuVp}6;baz91xYLXeHOV5VSt%ZGly&)tI}R#3MSU<)4;2-AI@=e zmdSs~K9jnx(#J7C%cq%+3@d?^F#gQ|?>yb7gYmoJG`5CYQ~`6x^11UmzxcZHwUpE; zDe$2{5jux5)#+(Yv8}FmMX%-HMj#ZRzywwrT|gh^S>X*_i&?<&I)%_@!3gH6 z&yoA;CMVg;54cREV)xH;1;|D(w?jA~uTPp`vFUyX85*YMx}T+~^`3-He1Cltf#7&nv}%5(R7&J#af z{7V{z)SirO{7rp>`2J7$x>KmAX|y8h`a4WfqJL_cFTS$MWO`3zYyyB_{j)#x)>}n{ z0|K86-zt4Ky$X9<-Lioy%9(`sU?&Xi_gYXbXxWz8!_?28b*3jL-`o3c@~br@=+;;iW{V-Y9p91x4VK&Alut2|HXmilOz9>hqWZ!+M{q;%a86qmy1sF zmG1zW?-~k0iofBUZFuy$l~Sj9GoCUg31o?%B+77g0|><5@Bs7{D8baPR9{Y+B&j=4 z$!fKP`wtsD?!Y@(CnXbO58Nt$a{N}9`csO?GpE#<*LC!UTy-B!5ff-CNJvQfgPGa5TEs2p} zQY6VNa3^8)a|VLAPC*OA{MVI48L914o_}YmTEUpTZH7#8PJU{Wx~O#*OVa=%`jkl{jt z4*z=7-0xjd3WSxS|Db6p7c5WWak&iTb8Z3AgbAZF;iFnR7o<+akVFTr*M|W}c#4l? zJ2~aRI65lxPh)-s94#xw^j{Oa_>EjT+n_Ux^pN;4;Qw<-nP3MbpeBwvp zea#@Dri>3u=!wGW+E_>|(}L0=V;Kc#=<;eVntuD$TA7=?k=`5LTyASC&ntewi8W-C zn`~ytNA_`)VdQYM`7L#)cpP8VDxQCQMEMUb7DmXmSP^;C#~;<=OjCSvali4ZYk0Yl z@ILT$w>XVqz8>FM_4JC+R6k>eZ}8?$WK7l&uM;xLC?J}h^$}WTVD`xk%NYEsx{P%r zlMWzZ8|INkqW-x{u7Ds;oU*tqGuhh_wdU=-P9v~!qAL5}rn)z$K+v$65uw6coMZ`Q zv=cSvxq29coT6g@yKP=QfAi0qhffjYO*V1xLb-S*^>;oU_KEB8Ldvf|Bs%`6!L7TM{*lAm2=|T8fP36u>w&m~fqr7| zeNNIBma)x!7fxL3qG&iwrS7QzrSt{>>BSUB3L#~c{4vj0sgpL2hn>NJTm33J;hS!N zEh6-fX|zSLKMg@<9V?QG(2qM`^53MEGxFxf*k+sfwgONfB=+v1pwOrUp;Jq-M29OZh~V zK#+&32Y==uB@I%fTTuBzdCeSu|E57!g$@zXe!IT0G>_+G`R!?~tqjvq6kujR%b6@d zEIM&;THwf*Bc;?JD10wXvQ4G-?Cw#vM$T${yTDZKXAD?C%gYbJIo=qSMpiI;Z~#O8 z-T|5v8x;0mteC^~V>l>qfb+pjmh+G)$xj^p5j-W9@-I+|itbHP=@%^M#*g13w9_&l zxEXWMxIWNQLafWz%BB}!PzZ5Ik%h^f4m^HgL zOg!5Am9n;R{}-# z^I0Vu&&lFGcDF)QnNB{2;ye7xAm2})iri@|31~`SSAfxTRVZO6|K;&MeYQV8)VA!< zllks%4dG&a`O*yw&-iRE()!{hO}64}vE=D{eZnXoRcs(zRMc2lT3G0XLmH?Mb2!sR`qAUM z&^#N!#jg9V;L0?_+`@=SBakBQ%m%JcZA6!e)!~6NO(t8V4`NpYkP@Np`VHd|%|IeK z^VQPh#Z^ud4Y%d*k?p#WCz(^_#7}yD?i|<&s>)LVoG08_!tkosN+*N0J^NKC4a9> z##!v}Wrg6!MSx`uM>ajK7w%n?6B?EMe6|Mg zP_N~#cWxkeGWE+^Lh))qw>YZxIlU53S_~auBX8?#{p5n#LsQT(UiEy>7VM$gZJUW$GN_dNePEBoL-#qYUs@DZ%% z{u%4Sb)^d4Z{hZH^hl%a_ihQOqsi6!RQI2?-#YzvJFQc0+(rlWnuEImOCFWb_qvs% zf2)b0`mKft(Bj-omO!JT4)*ti=?h;c>CJ(kb;2mI#jzdFxf{tbN2_pV-Ja|E!B@A@ z{Wo>)Vnc=B#Z(6zl8PMrfFKgyv)A~?@qzmCR~_#Pk6jAar?bsYROs-OqtUnh6mQDq z(VKVD8t$_?EyTpb&qm5~5al|ep>MnI$ykYW$Hfl9RVTcDJj17Zo|uTiZ%K>Eo-;sr zpbJ#{G@UmXqoNMI>oxP1I&-eM1?N;S|7f>KPzm|3huyxBe2VJ}m+}=;`;e_@w)D^2 z(Tb4oXYg0xW9R1=MstvO&zWv-$Jv{Dsq4w7QQP+s!b)0z@OOw$sommxAGVUvfGNm5+kv+}MxgmRTAZ^HF$eHIbUmQw!V7g=7Cdej^;`pjSMebId zNBZj{##1BJhx)tljP!8oL z%=UE0mBqgKwfnK=6PqskxREg-PK)z{vWvAl+?>z51~w0Nyv#{df%sYf26~lVbY}iV zs^UG|0b$~5o@x%_(fa!D^yl(N1Z=`F`AB<5+eY0@;!DJpIu$G-Q{Ms#EjI(Jq)+_sy@x#krSGFY%|*}wnWf0(zNtYAZvMORffE>#ux&cx%@>Ik>Zn|H(%szQaYjhC5Jq~ zBTWItX`{Qo_<5z@8kur|l)MoU5>z=8Ac2iY@f~J|yV711M4O2_?1C|5pPTk)@o;W4 zH#S$1=eY^2gf#f=V+($)*dTzL!P%9mxxAv0k4zq4j*t0sf{c=HL6)U6!--k|QM*B_sjSIH1~!Z{;LkW)cYI7nH1JLErcs z$vdO}7qIj}T;v!V6xegxJUXp_UsJ8~wsBt-B`*jx&faV7?XjFPGm-;_0QFcxfE-+6 z+QeQ)3T{! z^ep9A<(av>D7D{7%|~fI|BG|-o=_ly1BHlI%*(}s<{1q(!U~4JJv(HrmSvbp|4;cq z_--h0j!ZMcS_LCX-Cu^aaohIs`SqAi#IrEG`a zY*c)k5NQ*~5ny6grIkawj9;`z9_xA}rK2=vI$N(iArm$VU`h|mr3Vp1$u|Z$ZG>H3 zZB18F&b7xWvU+__{DeJW1a-+7r*9uAQ-o#^|CLdso<0#2efpC)^q zmgUEOBXgUNb?&FTPT$@ut7EB@5YJk8-;Ic3%G1+pE?}zDAK?5pwq(|xddh!}XGg9j z@rI@k*hT)70xL`Z0>D8ZnrY{}-#Kfb?|dvbPK-xDli4P(9idk}^8FDs5PKHaq(%)D zST58}X=YG3*Hl7KQllPVEd>6BJBJ7LCy@N$;nenV$9hWP9((6zNkKBq4{mWBy?Bns zA)jCkI{hek8qjKT^sqx=cMl_AruncYt$vskT+h)=bMkv3OkYD^-&}MJ9}{Jv=)zc? zn+#`Xs{W#bDa)(P@vrrCgBKpp<`3JU@l{)~O*MI&aoT-sd9qCc4IJ85f>2;sVpNOR zVOsI;YId?FuY4*GXuz~7*CkSvoF^I-X=)p3~ZTVFtxFsuMt=q6XtE=C>;1{+X zQw!30S3W!MyUJ|Tj5Li`RaZN&co@`xIf*1xyGHah2#Amkr)($J!saW{o6-_YPJUDk z8?L#)CXHzqK9A?cr0mlDhXDyPL=PvktIup7(DV1lOP(oq#S+b5dS`Y?EB`>qzS%p4 zmDhe*cRw-qi_pa!x%#)>?!E>4_5~;~{e043L#u#y?MTuRo+a^)RCBp0e1kMZZg*|*V3XrGc`1L~+@TEyg z^e6P}DDdKoKeMQPdy${#&5y`b?OmpvhD|g=i@V8ZB&ID4m)Ey{ZeqAFiykpvM`Dmn zV$mc#>n?o%uv%m|DO+7v)Bgq+yGE7jw^%q3I`EPa#}|uBa39AxveX$ki>GVoN_~ic z8KKOm$j|0?h!KAUqCkDo!b5JB5Fi3bv9Q9lkC+|s=TNvps@K1UoC}rrRI@M3Q(kk_ z58RM_{zyD+xEn?yXoo5858*oEV0bTnL^aDno}YELhHb^-gxJxtX({8l3A!m6JeW40 z{7u5I0a!Zsp(qp19BlF0Ci2}xqk25a>CUL0>e19$s?R=#D;{{QcPPZ=iis99r}E|( zk5SC0vzQ!k4f?J99cYEopI*Z32lRs~r@7A7-8UpN{fp~r>VloR$tkgdH!@0S3hVif zzWs|5{yvC9a^Jcr4hS`+^+khp6vqAye1+|gADup3woRvhWeU(=G78u<`X^iN?Zyd}9nlX(F8RpvY~nH`SIpcnY~XVWV8x3Z9tA`~u6F z3Z3+NPh0YMY#s`m_}qF}A?%%|n%jrq$-O!1rIXB+owO8NR6H>JA|mz0 z9YZ-;<~9^Pa$lg4m{{l7jFT~@UVu0R6Ooq|!I~Gx>ghU95}i<1REzvkobyI)wLH3q z!2bH*j~)G|uMeLb7bV=Ae7PQ@+*hH7Bth{Tg8W0yxm~>CyRz36QE{W|?=wwupfh0N z>(fHrBowJOJAdUn$ne+L;GvtNql{^f4g2p6mVYU-+t&P_Rc}{Z+R{-R*=@Rc&HMM< zvpRT}`b#I0&r*waj~u9#U`gU8wbkR*e<7&5yp=J|hPHoZOn6mRe%cD-r4zl>>ZTde z((W>Ov(AmVVEkJ2OwBmjVUGd=?%LmKfCV^+0E6NshxZ+};{4}U9IK9~J!6E|XUm6S zg-Fbb_o{`x+nf9VgCJGD@i0JJp}1gGu{ok+rmZm2EB=}i`m99@GSrzAXsljxuuycx zCKHf>An_9(N~zEV4^4|7MaMRmi5~+P026=hQg3EXR%n;C`X0_Dc~wEp@7yof7alSi zK0<%A>W6plZ=4<6js<*a`3 zZ@Ffha_p~nt2RA%mY8cIuElo)x&IJ+E0Y)nuef+eF)jRN-MIOEyfAB8?ryttvv@So zRmE=k5&H2sWGe8gb*OKVU+5MRF*1u)(peP{e)$5UzvBw|_j@$ShuRutHL6k+#8tbbCnehOCeez8i_i5zGMtfE>KyJY*2hR2KKs%o$`I3l{S%2*6><1e5z7}%rZ4+4)`5rv zYI?+}xR9YxVA?Pb)~qB?%%{qG9^S@lN7Of~cAwMdo9F3n2?vfynDN3S_x+{CsD~8* z-2`Lcgf+cklxNp8qK{XW@6XAHO^byD_f^s?4~~2LDL#dkIZ-hR77J#Z9^aFyL)E;9 zE<-~){kYjaey9N=8;3E`4fue5gp%;`JtE$a8 zkQX7R`nq6E8!+hfWdj+zpeAt1Mqj@b5i9lgnCeY1E73MCr1NWIOSY4-J*vS@TCJK+ zczBN}qeEurVE_EQ6AzDuDOX$Lhlicf4qxQiz8{?ta?^nQ&&}43%mw0RQliZVlo(d1 zoC~1MFgj8bdC}=+es55l?*XJE!%&#gekMm+Mgx%bsLs z9JPc5Gw#E=KF9DCCV@oqtadKW$-&tjinfG=d<(RX3JQvfoTMbo3X?(zjO*u>TrNVPydtt9QDKjktXXJDL3PHB${mBG1g3c!+C8z@Q2Zo zE}I9o3VGBL5JxDd!H{ZK1yfGJhjwS4hCQtq)>-)1T?GQbGo4y zQ3VS*b+DEz6G2y-71QvKR?>7hn~gnpJx|46A}}X`4Ro#1=A4@NQJhhgs%X|?Imp-4 zE5#gj8B-AvDeH0Xw@bgGd?=}$BK@Slm-0g@!!qN?v7=6!ng1z|w5UZu>Gjb}Q1c!u zC3F+t5ogr}<2v;AtjGBd^1)-OBzNqDSFl?{$Q`j&7I)N|&n$cPxctwz`ru8M zg*A?ii#k#t+*G0U_NLzcU5P`_z_x~6EAb?KeJA}n&e+e&6za8C1#<9M^2Xm5g(Ig^ zj-o_lLq-2mFx(=F%h9jx$1-{^mTA59I&)G-ftU0nco7HTFe^}CpD%DrS96x|^I_iI zR*+-^Xv_GJC8}7Z@VWkpv=!@Qb@UT*YNcdo>5J{09c7Vu+M*!(m@-E)kT**YaW9R~ zl4;gO8-<CPie2K z)e|x!rOj=a)q3gH`W!6qT^ogH{uPnzQ{r|J#X%2+-6Cyc@k+nXsP zH;$xf)F`TNQg?~+?(H`A5V73=PeG5sNBQ&1x$OC8+n49=hJP24IO)$f+w`+|wHf4U z8&NN;j(;!G+6PRtZw3;Y&o-ZrQ5vbI~2P6=aIDW}zmFC_~*XDh>O2sqcjdB?S!- z$?!E#F(}ltv1(_eAGdr|5ThU-k=tu#E@dOq970O4qE{xXYyauQ+oW~0@=o2AHzUTV zNqx=}%r16}T`>=b%(jd7Y0hs$L-2Nk={Y?b;t)?f9@yNyU_=OIn#iqVh7;E9-uRS% zX(2Gh^_zR$wR|p;y2b8?H5t0v(5wqO>EWra_w3vTuh6G2{M93Z+o9olssO*Y;`wN8 z9CoNdE!GxL)vA$Abx|k9wIz{0Vow(I*$LnS3GHip$6H2kc#0h@Eo9ehI*IEqw02tl zQ1-~~O(57U;-&V~?rI_exdrRITWs_G9{}V)8^3MOew4|MlQc~c_|H1pt7s1?wJck! zh!sOS{2)j2u3a5a96NxknNymjX}y-h*0+Kk`Swq##gY8B;>Wws;V>T8^THS>uxSVC*pt8NG``L$f`wRdo>!hrEHA*&gE}Fx%KO zJM8nK(Tl#)@-EDN@z&{0w!d_&s}(kzmX_{RL>p%?!lm4>O^x92dZZO{H@ z3jbL~dll{K_M8x0D(2d?Pj+l+>U-zy+izP5g5g<~#0i?isJv(Y@sHns|DXTqZS?Y! z1ZwT8p|+y^?%nY{hYRQMJ+PPoNB_COTv!mloxf4|q+aith!Spjqm>Gg%1wP>)QL1H}UoVB=2b~(IKp_v$N@=3wHL2 z#txJD0+QL(_4NZ(O%?6cvr)?ywGRlriT3lb&vcz;_*XywE8o6I(j4)Q3if~fmw$Qd z{eS-Z_f+4$j?sg}W~}}0>cXxkTMwyJKX(o-wU5LeC^G0lY31gRz_HgQMewa1?I*Sc zWnOH5CE%;DyhE>i_rKMhsVDY!wL_S_vT8VVvWR8>+R@H-1BHD=MYJ?^A+8r>{y+B4 zJgTXyZS;Ge`4B)vAOZ<+jvxY}-22Y+NEp6(2;y2VB2-NmSbCAtS3#Ql8BvyjiXl)% z@5uj4z4K`MjS`a3fwJ&>4eh1KkPwK3Bx1+=oDf1loGYd^zdu-e?{jj_euw`0tY<%G z?|h|zBWjPR{qZYg{KAdCd-u~j98tW-sOvi3Npnj<`xpOBr~6-DeF0{d_!GWmZ!`9* zy?)$dsh1bzH7<$0-S&sK0~In4gPb0fHalisV6_86%8s1GwSoKh^5h$?b}8XrGnn+F z@BbL~7JxMS2BXL?xVJLqm)W^lX(W0^Cr76$Z?Gd#<XQ+8+Yp?w{+2qL~+4?TC#lDBb>}Rh2S-@*U||)Q#uHm64j=Tl4efP`tR& zY}=K$w)*kUf4ehpOD0AyC|I_}?7-^{DZ=y3$wbs{hC1v?zu5atCdXR2&!rn2E4O+N zOq-vd+9^eDK<#Oi^cgBkNNH^S@d`=p%`6M~7>T?8>ys~5R#I8*@9KCC)D8r_K4|{v z_tEqqdlbn~IhuUGeHLLyR4p`;f zcs6Y>{a34#F~8cCyFHCUFDy*zJ!S{D*KTIe3VcaRlNuqUZ)wr+kDx4lNlT0BOcZ)@ zB5I$NiG2%_(Ou)B%)VCbPO5KiMeXy{fzjuAHfm$TN?kifEP;SCtp{~FW7QbT1l0Z& zETgV)M-&(?b8Fz2kcfGE(EN^H?7%+-_K*n&!e8wG-j-9E|HBZq%kwUO@I1KjlbZJS z_F<@Q*?6)_SCz>8#m`DhPvg}w;+UzI`$kQ(Zk@2Dm(Uc zRJAr?^u$EeK1Cza=YmE*i8#xH)oyvdY7ZX8q1YrK6i_=JyUhA9IZ#JQ)A)AUosT~HIOz3{KmN$p7*YFUBc?*e{kwk_PV>YR4^Yy6URa7uvcjE!q{OylmV^|)NXT5(42n2WP?K|7>Y3fA=SaHN%Q=Eif<*r83F!8(jXFnzKE)u|Rh2?l=a$&=5(l1&GC> zGW7t-l!o2J2ibEDIGOg#EiB70LKwg=n&e&B|eXv!n-rH5ZSB=&oqISlfp3y();T8;> z1y0}?m@>~U>gT6?&@f8>!=-) zG4Gh$!R#Swk7DPYPOs>1Mw^^YC4ue(Pti2-;EDRIPQbKa7$evDwVqgWyR@@1{moH3 zKyA(`-T9qA{?qU0B;DB9iuSAR?bkfL+7%Y-2itdS+0g38oq2yQh3o~ed@4q^d5m4o z$JB1yeB`y){!I*yU(U84k*%wCRqOeRs=Y5&cU{nzwRM-DJyFqhTmv=C6{pHiBcP|v zc(?^2#2-4ZJFa>|*H#Q_H=b|q1pRlMZRZGBq4V8mn^aZZUCkJ+M?~!l zcGf$WF)(|8v$!nM_c=EBYaQ2HZT@9clc1kpSnv?jQ(>cal9bN0Rug{BaF`S_MNts` z1l%4dlDT`=@Al6`L1f7gwS(0mU=N7Bu{I=Xmy%MGZt01fWmH;uK=(mqvD;_RgHSCO z8cmwqYn9C!?DLDnH(l)r3yC{&vw!sC(x1G)k*lXkgM-7$0qrUVe91%Yg{d1Cxo_H; z^XGr@>!0Q3>`o(51Vuye8&v7A^X)P^d3m5QW?i*69XFmjs_Ln%Y;D<-@Le$>VVeDvDojh25sio4`b-b*&_k?a=QBj+*Lxt8Q zqIQ#0=C+7F^NQN%IiD#bp!WU+k<6T!7$2W^gp1k@6h(Iu;<00TX_zAo^5iWUEW>=Z zoMit50Jnes$BZb7EKP-|9lSo+Ug{6nuO^46{S2IAZL?E>wu0waO_1Lj0UHQ=qUEQa7gdU{! zV~34Zr;i;yY&=m^X*^fNBkoYe!B-%3Z{^XtMATkC>(gjdCi6IFnVhE#lW=?-H|?Cz zo0imW5-9uJ+?;c2GHldNVpX$B*3r=-Wk z)c(;s0j~qKyHXB>9(#>sPAC9n5yIn86J%l#v?z@rF8flgku`z%2~`Xn(cu+3vGnRD zX$;_-WCFv9B8Mb8LG2Dt^a1;nbfkuVC4dlx6lL+^|d;`ywE zZ5tD{=a=T>WNj8O0wN@an*DFKuzmkFSmoXjcfVZwsfvn=9fvf{ZBiDd{JiesX;Ayo zfT8Qkb%(Y0CS;1=s(>s^d3iU)?x6ObD8v_?tFEl7hMU(XvW!}9_NwrN%Ur}+%qBr* za#{NryK9>B%s~*}H#arG`5dl(`;n@*h}i*ZS7Jm`?tl5kirPQ> z8tHEBT1W{eaxORoKw0>DUirdgoKcN!G52@ z%H+Ra5q+rIb!|lmVW?|=Q>`vJcKoIDx>K#7_QO?g0LPm{)UG*UtT?vxp?UY|5Vare zGL}nspS@7Gq;}MFw)@mU<4byUA0kov_5MD>ZkE(;oApsLw~TT-%nPD^+%58bb7rsD zIqvS0nZl_;CJINfoq7U7a+a0JOB=J&G0!pewFN)@BB1t9@6aqmV!zv^sJ*nbG5Aet zR=wx;ty`THNh`O?VxrPC7rsoz~BMvq*=PtD$=Iw0K5kGnI>)iu@)@7aMA^xNmC!jEhd*) zYCGYzjD6PZe}#vKgpqU7-}X^qqIMv8NB!2C_Q9>MKFhSs`!~Kb)ZRX%Skyj{UJop> z*2v?Yso2xDz*OGORib4gC>vHjKur;4Ng<{$J%E-u)1f>3mKtnrtUjb^ee-ll)6t6R zvqe?9b7dM$Q&CH2h}xy%$4gC6{0PgbTGb_8`AJ@X0unOK<+`dmji#hnQxZ@+_ezD) z*xgD*)V?0B^w@ql%sIB-X12t5Z8nERCde#vHrVQLQ8?g@wM@ZkV2mKlcwOd&n0Z_H z6hE>QD+nw_@MMNkkxnx-&82|BcmLqCKmPRY-4E_yOfu2$-ogaUg4%=RWG5=eLF{YQ zj%08mo|Qmj*6X$f7NMCb5a}!g*>hr4A6y!ph2!uX9^b+!95AJbQTwKB*C(ax4mZh8 zfv@4eOE-@Im3K+)h?Ne$7S!<9W$ib^WAz>rJjCgZrloUOozii9b`sPs9ltjTukf;Z z{{<&J)~hEVH#TZrn1w`937+YP>;CDCPpUb?<>=kbyZht4s{t%oG#wmc1RVXcGTOZ>w2SgZ(Dcwakc(}uHw?^y$5yY zo1reItFo-DvaPHspmqXmuY*=Z?d#${{&UnU{ec$zoty=O0Jh25N!z3}28x3w+&T#t z!b9yC6M{@e69nNeM2Fy=CNPT4Yp6{WAi2)yc?8_K`^l#)%TPhJ=UhvOyDfQr9kuI0 zu@07)n1!_l5^-LTwr|Yff*6~Eni-c*7>6rfIvO-?=LiA9_t}%8beO40kkoE=dhIeC z8nq*p)DW5+le2bKu!7)O=?gI1JWY&AV;W%ru4^QcX=8-e9~%vJ^kQN3AV z)ai7_^M?p^S-J82xh~_Wlena^!dP8xtk@65kJUl7qvBI=FYrbQ;-xlJvqRL*H}C1z zbyve9N-DdTgbt3^UFt*;wLc>^UhSBm1eqwZ1Wr$lCNmkDr5T392*<`71V-u-cv5g)xTX=HYtJ(lRRI z-TusclT4=JxM|n*$qY&D{V}wvDCU3$es|r=kd(!I-718Q+6%I$wLs5=GG(}C@Y>gF zci2!sK2lSY>@_U*Y9AGtO2V%8p=(L0Uu)>TIq~Yq%^|tMzl@uZy9EV^fN?@#*|b4K zQ4|XZwE?v&$&mXrhfb7VEUzk4Bcv~@g64^1B!c4<_FcTV??RLMt&Wahbvh<`E88xf z>gwKC%pv_N`!1GOzPYEW7paS2_~MDT^qLEMDoZ4_>kn0Q9X%6K`?p5z21=BP9L5w& zMJr_)TVp6JW!h zT6H>tYNrwIb6hu1tJN|K+&l$upOF$WHkZ}w86(j+yg`}{#Qy!6VWak4pmsmKi*DuB zo9%f|*M@4^M^dK8d$f;5&!R_}RdW-jvhC|CiAgMksSMZqW|pRsQksGy z1W9WQm^?N;J^cWX))3hGF&c+jNUhQKv~>E1h)!tg{Y|cEo$!g`H{3%Oa}(o?ke zqOq+PA$3WQRu7jYgX>Frdo(=a^_otQbQIBayrQj+kEs3IqV}A)nPLr(0I)WYu_RTV zV_EUi`6?|Zhyiu{b)-p(U2oPE1c; zpY4NZPe^JPSE*gItacGrO`T(e$6+&1x<;>us9o)Y>Ue2vMeR|N+AXxZKXAc3gBXj$ zpDmm7oSB#kPA9C?ZV0VXQH;P=yMtFR-+2H1r&~Au!IfcnQq5Mwfc6Q1skrM$Zo-D{ z$=g1By5Rl84>u)d*Rb zq6_8S`!y({_HT{am9JFxEXiC*m$4L&BrriSqHv66_(=hpIgS5tF^{e(eS6PwP>Ao#ui5S5;PYx1A&-YX24`Wa^HV5SZs^ z){qd*F*JuU!BI3#@l@IV-)St)J{!Xru#H~^P?~>bHW=IH6oBBsuE|p@Yg0n;rHLp)5KM1Avd$?>N8+fbe{|G?)qkkt?9omXQTw-O8I=c*(+o{< z(QyJIrL~)~qZ-NLNZq8N*=PZ-VXS)phCtICPqL}&t373n+6k}!xT$FZ+7#&AVTsQ4 zSnVt=Woir=G5xktyM92biHQ~jla#5+G|Wj~oZhSU+39JE6+&+>i$Luzi)_Lg%(Z(n zL<5Y?kHJ_#?XKy}7?bU~Bl>^~DrjUgCMnm>I7esg0_y?I8(65BnVrwb^elK~d>EzeKa|7ND|}Dw^JpwOmseSz(vnyEitRXbo5)OUy)gDoMd}**k zhGbZgAu(tj!v#fBeaG1rsQDC<1Xnu*u^LWOn|Ok z4#%v`6-#6|Z8q23EV$Rrvr>(_!_N5N`s|#`W}EVH_p06Inw*6D7BYD>Fbg;KJIs(Y zmm$FfhMzL@xumfv7@N)T*R{i&%uv_Pj7>p%&f(9sb9S@M<+RShYi0aIzx3Lm79eL_n%5c?f1AnbxqvI53%Zq&DAvjNt=|sSHC)mzLEoG7NhkweN;!DG0QJ z-U>=xV3uCz1K9$j3j*Ri;8$SELLmZ5-9hA?v}H;fvpJ?|v_|b-|IJ`xy8usDTV|nS zhNqd+5tKDTN2#B8NjJ=a&SNFDJ0_)DtQox8;BUDg;E%*W7FeZr0Qcf?byx9IUAc4A+3w74F3z?NeF(AyWarubv9bB|AG? znn~zX1`87{6N|m~pGxoA3YiE56Iaj>V)lmZF%)`U@M>;-REG|?>O=YjMqxoj?L^wn zVDTe?p-<2ZfeD6HG&UTG1EZ4|JkK&DQBnk188uelx;@6SF@|VneYMxa#sm3js|vJ= z6$)i~V1yi#Hg-`=&{%3)yiTXGkAiXf*n-n)P$Q~O2@OjohY)8`C=L)P0Czwkp*%K5 zVFc*My-w5gxKg1gLQ$O_uXUj~iasEX^(o zc4ao~CX+0p)kH-_g_dVw9PF)AwneP;q6oa|AplzQ)4&Li@$vv(8VkJ{dVyaA6;7fM z;7T~D-SDX8h`l`ni!&2$x5wu*06ve$Jux%0IMCCp-DrBGsf@cl9+(N>^TAYLCXYOo zgnPb5jevQAHKb$}jugt{V`+?@7esSMweC?l-^!tIAfk36E({AAajYOx&o#&1^oCLQ=$tC%Q5X_kSdO*DSyqP)H%JkjA1w)V=@-h z4&<&<`)#*!D?CRs`%_IhqIUUCrS`=mD>n{>B@y_t3f*I1tm;JJKt$~bi<`GgWipLB zuhyr@C?21=b?Xe~SP}G225telOEvF2l8|lpNA3A5YQOEVB<%JJKWF>qr<-v^?Fo4b z?F+}i_S`fSmPFu>KAzfN(x9*)qIQJo*aUx4rZH#N&$h&6NM~*Epmr7-XbFFp3-l-@ z++CY24=kh3#3~cE=jZx$pPiDE z_K4a6LWoP+1#gF(U{53$wU#pAyIqBTR~o_GKN{y9#K1$DPz^al0#C6 zN!eA~;HS3sNPb~*{I=K(fn_-y8_zNnc?S+zy5;|#iDPJ5l*uxrYM3KCkK_c7&}%IC z|Lnb6OdIFE_y2olIQTJyMF{7ZlMCe(LaTk*<}Ub<`|&|4HjaZw%C<*h?fp-TPm?;> zo_ZLtA=uckbJ2ZKX)C#ilE2iepsiQh-7hQ64MJ$k?L~#?VWq3pV$+JtcVDPink)-y^?&r)Zsr`tl z9hWz+4%XF;wVcxK?9xk?wYSs{?4EQgZYJ7$ZfLNfuDSkQ%}nc+8S!M)nNwoNa?4nA zb6s86pj2vqPIp5~XDNX|QTx8B>aE*mPa40T8z=(>rPi*fegDKYkC#LydsfyOt`JXm z@U5tBJb&q5_MlI$xt#l#J)dRYN{$oD45n2wO4cxZn7s0O>ZM8 zD5+gh`@TVBS>?kenOt2bdhrPmrP}t^n&zgVfph2n{LiA~W&TC}5s^wYfBy5i?xCjI zv#pid5)6ezQM>lt^IPmmqq(mcjxI`SzdSk`b&EfvZuj$ndv`Dz6@T2L;_vLCrglZ` zWJg=|*uASenZ2%^fLB0>DC?*^HKVDmt7~X%Y`WJp*fn@>@LpqMV|`uS+0)fkZ52Dc zkq8vE7nJg*o*DLpv0UHx4jfq&wcoP2(lJlWla}#_!#xKgp<9QQ+7-1URaskg?O;!C zWi=<7{qd`2M;%p_Dug6yBTTAFOUtz#ogEdO+D>hSw!Bv|uZIJMuq z)CLL#15=4af~Qcy)IM#*`QbVwwJT~Tv{y9E4_9(44{KXm?=3C$G{2hJi9ZdFY4bl< zS5bRmDLLDx$+9PljlsSOP$&!ppY>(}BLS;x+^9l@Qaj|wp{QL^J1T3_)DQJcK7QC- z-KJHMnn`g6m4KHaT6*d`;E+(%PMq#-UOs8PkMH%6pil^~ZaiR)&dv@G&(2%I8U6m{ zjuo{ZZMCDa%9dKm>qE6Om0J0K@b;D33N;-xHFx9T8y`rmsgFR7f*Iudy zh5bXsqBlk6W|QG?*c_GP`z^CSe*99qqIN`_Y^4X7d2^$N@9;rr?YfI=hM5w^S8v$VLfrYQoEvdRH7B@>)pAWSYNO1%xga7 z@N4ag+Q~D0W9*4ybEvxl6!r_j*rXkIg*1(y-UwzaqT7cTS#uM*Arbs#! z2SD_A#FL_c)a&(WJp};q6qEHvygCF4J!9~&m|$a(7%RTZ8rTW(TRh`&nG%4EOwj8} zX9*QEHJb60#jbXQ%dU*wTe~{>xJ%R8Uape7uBg4})DEZn8dL1aVSQk*6AFRS_&Dq( z;3ZTpF}B+nR6MC2GY-zi+0qCF?b5)DbPfw=`G}YQ(}WWQE*hc{BXAaOoR|+H#4rmvwK_K5 zq0tP>+Al}2J{gUqcpqoe)8poFP#w5zPx&&VqY+~E=T7VKuqTX^{S~-=9@Q1P4VJM z?ew%+PY`;O&M1t~1nHR{rv-rp89F4S0D?(Dr%#UPfQSrx5m2sVj10^h5a?}TA7F3P zL%5O#417TROX;McXlw0=EN!cKxH37}^KQ+RG6eesA989})DCrh&FqOG)jd!J`$d7a zi(3rX33wT`(IK`(rm5md?R+HZNHa8{_PYgZCbKx}rG;=1Wa#N|7Jv#zje5??12sJ_ zeapaQ_`tj$fDs%tV6Qtuxhw_@ajzv-ylN*(wUwHNm8IOu;F&5ddUe}fr65y$YOm_+ z+GbA*zg~K`1NMu8ZbVG&p}o}Z6xAN1iYK)rIvq0G7X9&wS&Ku$|BBi}ZgxlQPPw5* zZ~iT{Cp}bgs+}mSs1m(CnOnP8JJUgejH33US9@9a`Cr+S!q}xc684J%FBN3Mi9HH3 zoLG<P5BN`~=~TF7mS4ae6qzBb}8SCtgsyNm6?< zMj_+0z!2$$aa>Gl$EEGnV-2fUdxjd$R(F;_p(t-{ovF=rwY0WgA>pu4)c#Ii-ASp) zZ1wb4fWp4P-i4btFGmP?3Enz)^Olf8M_uiVV0MkiLdi7N1-OhMCI~bi;YLj3BM#q- zYM-~8ybkv`memeEhs8CKvDuC4ms7jj88ZZu)&v^2*iDIIQ9Dsu(JJ+?=gv3ORCSi1 zg6=50w$>vS?$_38RB%WrYKP8&p-uKcc(S>Nksp&rwF0fEB4L8-9l z@URcPbX#u`mn;S7Bfn&c@=pn7ArwmhkqQWcu;?d{FA_GJ9bP_RwbIYjzA){unO(e^ zorB&DaS4~weuBd_+d`u;~*_8@rw<4-=hQ@ink&o|kpY}{#FX5Tj3 zO_%-z1wbJY5GykC5gz^H-KjaT9^<8s{(kKw&G_|3DNqtR#-R5R0ElP(`XB{Zr^CC` zsD8$f<)zoRqZq92g;xTD5(7wai;-`iGb%I$|d@KE)-N3!+z}uWA#fT_3o|b9Wn^3U;5S_ zu69`r1V&H)#vir~mNz%IlZUBxQd`x$Dy~cFu5alqS0Q)?GDfU{m?-N!;jPJ}D*<~_ z*1ROKQV9o3MbpxM{n!7Kz!h`sByD_LeG<+Xa1%b+GEslIc15Sc4i?Du#0<*nPXgDzqE3ulT;sy+7VfL zW$fYArKO$*O~*f$*RkI=JUTk-NMXnizgtMNzwt78s#^~2CGh(oETP_q+W+&5!{PrF zgWAg*E>#~)Y8TbcF5kGgeD~wJ3pdVgG~c+ewf*UX4{l%I+PHJ=?uR!%TVKBa$p@d_ zSbuW&LG9fe8utBb7e2qCS#G@l;oWNmcl(ouzE&s*3Md;(N9aw?bV&}(Cra((-4s$ET#R-6*+Fq65JhL%&J*Act`N?EvA zFdN~fjcPz@>3#V*ic?trfZ_y}@2J%vfjm(!M)C`U`q>-s46XdH{G&=cZ^(#53^W0G zgDHFz)c!wfMWA+6)7!)z1pmLJcJ`ACPcF8s|N7~LWvL+ZBLd`0@ql>7oA%Qfd`!wakxeDkKm=KNk|G<3M#K@%dOarbN*GgF@A$66R5NQ! zlg(wY2Z+G%6nbRXVzY6{=~UisM?sHKN$vSk`Ck9lfu;7;{R`}`AKtxtV{lupwLiLe z_wKd#dkFoSw91#imGsbH10Rd{9 z9`<6Sr1tz!)nBSTs9HO__3``bx9>Ec{kNUezVYC*GiR>X$!f2DQ1k1JyJH_*`zLn2 z=ECg<8qM|FpMU=OMfQoN@%H_iLf2yk2dWSh2n9s?L+-hoFEFeO2uv!@8=j?ucz+%Ev!=aeo z#07Y@-(uwffMV>Ut`zSVoZ~^S;K=g!VY}B)!SmQY)U4gRmd9n{yJ|NxlI*8BJv}ku z31rnegDa3RBmlGC36C{nFnXreH)_P|=d#)#U4O)GU4LKvC>LbTT$^EMuK&~4h3e(4f4k87@rT>nn;YAow}|h4 z#uk=Gy^UHZ913wZBFx{~6}vm+r4byK+DqH2iiDZ2C5e^#>I(8L6=arHCLcent!~q* z&~FQ9VG1OIf}q<~J27SPQ>5M&P))hVF&G^-AI@0D5lFZ!2H-Q?I01abl1|v842Bob zo<2pwUZkSdm()%K77R?7Gl%tjL~xD><}?vcC&Ttd&Sn=D9hMOzVYN6z)0{9K%m}xH zj89Q}o~YjK)f_~v{n~>EAAdZP-FR^0;-}X>+upo?r+V}HC;xH(UmG{>h_#m+w>M_4 z-MMh%U(asddC+|EYoEXW;KKIp`|n@8_90tX8v8W}6bcAVFp+6)es*|x)@>e1 zvs8ZiR-r-`WQq+WPou8X%~Yt~+BE9Y(2m!U8h%gh2t>YlQSBSK)QEAsX<}3H)5`*yl8-^fsF}#j|FI-sgAP zg7L^`fHg)&qb4I`xA}>{Y=#YbxPZ~T0%YYCgO1*{>|C`Q$U?Lf`SO%$QZoow8@{Mc?@}RwL|q5n;##alJffR!`d9O zfKuiV|6J`hcSyvUQORmo>$yPIVx?YGyWkV=GiIx-_CJL1IKt-3(Qr6J@2EYRF_~OW zcZS#70y=GMTiw5^8C)WKah<1^`kEb}C}=0t7iH0aTjvifT^*@Gj1P zUrX&uYDZ`L>YuR3nXQ3yr$C{wAWTpRpV4Ua>3GrZC_lA}Gl;924&sESb|AE+=m?&& zggvtX3c$F?F^IQ?c;H2~qrid*0}du&NA1Wo?-uwM)K0jfUP7w4n-Y7ey<@FsNK;t~ zyC6cPVxobBibOkV=S)VUQAYsyCAH(Jr)qcdlG@QLYPT4$lGUPK4z(IIzW+_dP< zB!F66aOpG2C`wm{P3LI zATzhqA~QEUA9ZHNg9!p;h?&~DJ-D|dlN;1@l1C@CD{9Adz{XidEnXfF6XrOl!~& zy_s`|UH+X@{Z-99x!hy%_pyArDqXJTa=E1)ax0orCAA+NwF6S63!xC_~#ua-4RfGv7~m;*7w!0$Ck~m{#H;Z z1Vk`4y>NM_o&DCF*!MW<77;rjFVEfFxjD>@jME5!I8}4@>J0Qu|rjZFk9PJ6uMxq|P_ZNQ~T-q+%y=SPtrnRC3Fu!ZML^@-H z9ayWE2f)G1v?G)WBEk@00BEa=fVkhv5CL~Ud=T-eAz=+q1(m+`{6T-}={~lxJ*DYw zEC+=GpfXHHcN{)E$_b%>)fGvn)1FAiI^r-}W}nt(I4eT|V3HuD-<=B~Uog=suf(jc z?!*T$jfy&|YpV%(6BM<}5Iz(_eDlJF{u z+6#a$?Fk}yGl;VG*0F}QT+eFbnW_%*b-yi5qwbT&4`9g>F8@=*%H&e+pwzCY9rKiWKVL7a zs2XcnS<0<7)X1&<`GYgzpt4{4n?f}7)h!>-(B9osjbPs>;BkwzOS>h*B;x2dUZH4% zeO}7-j?~_-bhMSL)CaK9Kv`?U<4I|Hwz;C?2%{}2R<#!ZHBqXqthpy{8Q(ovbEO19 zz98C1D5?F8sq8;LlRb{1y}3`Rv=;(obj*p)4o~n1eh0PAlay*Qt_17_LI<%uSV>jG z)ukPBJx7_D5*U!i00$DBz0!sTq`IPD74QxSqy&w z@+ZZkQ^P6r_V#Npndy3__Q~84W^%GtTMTMXcw98R9uzM^VaNkqdTQ)pZgNRn%~V-N zq61dDlG>qjwdc|Ci|yMDmugF3|0wVovCWUbAz^MXrA4 z=<=wcNYw6IbOa$ENIC&B$m0lwLe>eIfP9E+XKII5#GaY@>NagDQd0YYLZ0rscl=^| zOV5B76be9ML_8!f%)=j}PH|#ICPuy0vr$VbHC>OFj`lQa+1PPYI~($V3=y~-3M@Kp zRwK&SHx**P`${f1P_Jq0P$4C?A0QQ7y&CqIvC-6b8Wakq_Wb^DYBY<->3LLdFSW~% zm704iN4oj3{K%>uC3HF_h}Be3&*;!owG%$Q-bVm0C2=*civ#vCdX`rMq2qOoK1kuj zgxgNXK_ng+){{JI`kS<%P%yQJDEMR0p{QM?v|Ur*@&;;GQoHISt2LZa>Acf| zU}dqacEVs6gn(b=52OHiugggJN5VqLLj!B|Si|N;Kb;N@TLM`SAw;zUpp+xbs@KZlDkl}S9}wmBeGS{}abolQfU;Vt2-IGv zJ!Eicdj~$kslEBAs+|wdIy2*mm@pEtChZAH?Z|I-e3S`?QhrMYLohVrW5VH&BGWc+ z+_1n+xvaLu)cDA(`J>=-wF93?`Uy5HxIUUT$5hB`4tSN+fhxjH zApJrBWQb>KPkMEnjRv4H7Jt?~MFPzI!^Vze558$_0rUl|*k zT$=1@&~%g_CAAlp%JTy)DfW1<-q6>elw*|Co)1c`T~>S2>euV_3@xjD)Tx&+5{VFJ z;35R}CLQ{e$+CyqcT;;Jz%jn0GXa2IQG;GqyPCjiBH_21-7X&f0HVCA`JuQ#p}W4i zqLf5RYA=s!^{95 zNTvWWwmsEON~s<5|q;~buP3>wd1ACy<+QDcy$Bf2tt5H%rA8;>jEm@faV53?1G-RH z2SE58wne+m?xO>XL6i+yLW|)LgMpZ`m@r^}m#Zr`=)p@Q@b>W4^OmwENZEP<@Md@*Yz9+6%I9 zssG$D*aC8StgovR6bgp=Sg4&5IHyw(M)YxIHR_8{29rvPQ~gJo$hc`D;q#^dFl#a*Lhs6?Km_?cBI7J2jcIQt6Jb;UL@JWWcxePWlgsKF zr`3=@@zVBcaW&Iq&%=66m6lZ0zJE$;FAZ*Gi@xu%zgsEC6cY8(U$T@3dW%EvH(8U5 zLG@vz_9I)60Zf1dit{`n*R9k5K!_jEP900WAd083_z1}*EiB!{6px?&0fLaEGo^)hk^?Rpe`{{j2Z3`+8^a?RVdPC6B)T?z^AAdiw0U zzZ~~zR1wsA?%c0MJ%w7|H>iRlL|L+A_L%Zx@lDi@WVNe}i=3bExUDR+XfwpE^J$|Y z98MX{v#ykXIvI-_g`_DsA%q!Zy(}2AnPi5iZ8p6zA`XmUn<412SQAQWe`_F;*UpR$ z<(4Mb8frR9N;Hp`&R3JKtM>1%RQ#{3@2~&f@q;Y+zx?uz_WPfu(NF)b-Tv_eq;@Dd z+uyyJE#fKEex;+mI8*!0TbJ|X=FK-t?O}t#WFHOqXu+n#@d=wtx0noK%8+4=W~U0U zo*SVf^DzXJN0hvEb^?Gg+kL_0B9G}vI^h|O5rl8V!78c!t%7h_YwfCZ@?L#Q!_v}H zS0(Y9YX2)+_REgizbF5`qxK&#&|iwN{i|O{C>ICEJ++Hy>-q+_igyaNr(4a4SRfRteO z(T-%m8%z+Sb$%+7u?jXnQd0Zd1tC@YO!Mm1++=P^&4m}bEXWA4N7XCn;*Sgpwuo;qaxPnWN>OhU?phEr!)F9W=HC2Y7g6k0e4DD z_(CSJt9IVVv*S~i3>|P!up&m4Qfog95JD9VOFOJ;y7sGfa$`gLCE}-+cBPzsuIB$Gc?fwxzfm7i{V?=ruHwMY#>}xN6Q03~< z4!PB}{i;2_`AGZy*I&0+{Lp!F)K0W^_tzAi+p}9;eNClsKqzW=I~*YqhxGH?gYazO z)PBo0p(2dag;YBVP78XKCpzslWTyS}h{a_{hr^7~JZ~M3IXF|?IL)~X5h0XCyJ}~+ zaLn)GtUA^qK8x91Iz{b=16iG0y4th$u%WhQY-ryFnT=o8EB^Ll>E@S`+K>5?rCmx| z28z$^8;!jeDnQ|&YV9}YZq13m^3U$zm3wnH3#azEX&(Se7f9_)I0FEbDeU1B5wlGY zGDadbVi9akFK;v_LxRm5(E(+6#=j|Cm{`<-BRzq zzx%fO#b-)tKV)88?fefvfBWsXfBE`*;`{H!>$kGn|F%vLGhctHJwd5m1dDFh6p!22 zn=W;?gTldpNPWbWH!&jt1!{hr}F%{`8Ft86tF>5c>Y|r^H50m2?IL5`?V`-|L#e- z%;w+zzB5?<>o3}^@4oo5V*A^#eypfC5gQFWr%cn+UsD|Q_N~U=F2(FcjZv7pnLkl4 zR?bFM)SfS2ef`sa|0MI(cR%g$m!E(7>(^g=B@Vv$_NSk|KHl4dy+WBdm)iWK7|cE> znjJwAqd=L=!?$kc$K0qj0m|8^irVw$zfSm39IsGP{pbF=b@r$;^IqSDb^?kT1>Rr{ zh4)5SxNHf(z_f>kVngX`FC5BI90j22TwlWxcYF3x_oc25D0UREE|^Vam-qN(0;AT{ zbENLY|Hs}PgD8##U>tu*qT`@uooterBylyW$$Lpm`aVti^2)20yTZQM9=5WWV)`DY zhuz}f3JKRN$M%SDU?(D$w{r(q?Si|f&qTi8Ho`LPpO5#VsRyJTX|F0lnNh3Kuhcub z0X6Q(4TIKCzx;Z(n&UtQX~%)g7gE<^dAX`?*%x8R;TORGX~#7x($3);m7n{eQOw<{ zJ}9#@qTm5#N7}L4k#@Wr^?UgajkRqhkJ*(d^e!a%XgKrUCfzPn`3rB+OgX4TzfT1-6;#5RaKshF(c~uBB?Dv+Aj}j$8+tc z@HNV&`^SphpUwvYsVTsL4C&H8F7GYUj?5Bejv2@TN^#)k&fYn|(Ba;@h))y|*Xn?}?u*L>r?+PQzyL~oP&1FUwyYOkRCk(<)c^tSJ~dE75@ z7Mk_{k`my#cEDGbDodKUGa6bRuQT`cB99{T)kq}u2UzWZ=h`bu5TubC6wwDRZ|T_B z38j|O6i7`0&f1Z7q`fxo)P%9jJ6^QzPph3C7c{N^moEUM)&0ag$0)S z*KYfxc|UIUrJaT>p7$RnA_2IOw39~X=ktMHj;}27Bl?PJ#-EiPoNx|az2h;ZU$5Vg!Yw2pwc@h;gGd!g&6M%1} z9RO+PPY~qhsO$EVqF^)%Qe)vA)~jzeeY@$Ynw5nSWd+UjUdL~U1ORCVJp5QiV)M;d zPlJduRxlc7sb$!z>v?6lJd|bWx$Z(Ur?Ywd^8bv+S>GA7TLb`U2Yh9z;v~P*Q~xP= zki-#Xl>LXk^gK+0EVXp!L1|;F1JVvy?KL8Z%~3}g^d3}aVcVLf>$;}duN~DL_mu9? z$9*Xv?SR!@V}c|}vfOAiWVt1YC^{hR@Z)L!{W^ZT%xVWf+5y_;$M;wASwY$XkaoC) z=2*KHb%L}5{M}AnR3Q)r003NVS&Zt+ClC8 z!#OZ8-#n-ts88)nc%Z2x;I6t(!UJ&x00000000000000000000000000000000000 n0000000000000000PvHpcVFuk;7*Vv00000NkvXXu0mjfew@iP diff --git a/docs/modules/retrieval/vector_stores/integrations/memory.md b/docs/modules/retrieval/vector_stores/integrations/memory.md index 7acb37cf..58f3aacb 100644 --- a/docs/modules/retrieval/vector_stores/integrations/memory.md +++ b/docs/modules/retrieval/vector_stores/integrations/memory.md @@ -1,8 +1,8 @@ # MemoryVectorStore -`MemoryVectorStore` is an in-memory, ephemeral vector store that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity. - -This class is useful for testing and prototyping, but it is not recommended for production use cases. See other vector store integrations for production use cases. +`MemoryVectorStore` is an in-memory, ephemeral vector store that stores +embeddings in-memory and does an exact, linear search for the most similar +embeddings. The default similarity metric is cosine similarity. ```dart const filePath = './test/chains/assets/state_of_the_union.txt'; @@ -30,7 +30,7 @@ final docSearch = await MemoryVectorStore.fromDocuments( ); final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: ChatOpenAIOptions(temperature: 0), + defaultOptions: const ChatOpenAIOptions(temperature: 0), ); final qaChain = OpenAIQAWithSourcesChain(llm: llm); final docPrompt = PromptTemplate.fromTemplate( diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md deleted file mode 100644 index 08a07bc2..00000000 --- a/docs/modules/retrieval/vector_stores/integrations/objectbox.md +++ /dev/null @@ -1,346 +0,0 @@ -# ObjectBox - -Vector store for the [ObjectBox](https://objectbox.io/) on-device database. - -ObjectBox features: -- Embedded Database that runs inside your application without latency -- Vector search based is state-of-the-art HNSW algorithm that scales very well with growing data volume -- HNSW is tightly integrated within ObjectBox's internal database. Vector Search doesn’t just run “on top of database persistence” -- With this deep integration ObjectBox does not need to keep all vectors in memory -- Multi-layered caching: if a vector is not in-memory, ObjectBox fetches it from disk -- Not just a vector database: you can store any data in ObjectBox, not just vectors. You won’t need a second database -- Low minimum hardware requirements: e.g. an old Raspberry Pi comfortably runs ObjectBox smoothly -- Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB) -- Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers - -Official ObjectBox resources: -- [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) -- [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0) -- [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter) - -## Setup - -LangChain.dart offers two classes for working with ObjectBox: -- `ObjectBoxVectorStore`: This vector stores creates a `Store` with an `ObjectBoxDocument` entity that persists LangChain `Document`s along with their embeddings. -- `BaseObjectBoxVectorStore`: If you need more control over the entity (e.g. if you need to persist custom fields), you can use this class instead. - -### 1. Add ObjectBox to your project - -See the [ObjectBox documentation](https://docs.objectbox.io/getting-started) to learn how to add ObjectBox to your project. - -Note that the integration differs depending on whether you are building a Flutter application or a pure Dart application. - -### 2. Add the LangChain.dart Community package - -Add the `langchain_community` package to your `pubspec.yaml` file. - -```yaml -dependencies: - langchain: {version} - langchain_community: {version} -``` - -### 3. Instantiate the ObjectBox vector store - -```dart -final embeddings = OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'); -final vectorStore = ObjectBoxVectorStore( - embeddings: embeddings, - dimensions: 512, -); -``` - -The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) model, which has 512 dimensions. - -The `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored: - -```dart -final vectorStore = ObjectBoxVectorStore( - embeddings: embeddings, - dimensions: 512, - directory: 'path/to/db', -); -``` - -## Usage - -### Storing vectors - -```dart -final res = await vectorStore.addDocuments( - documents: [ - Document( - pageContent: 'The cat sat on the mat', - metadata: {'cat': 'animal'}, - ), - Document( - pageContent: 'The dog chased the ball.', - metadata: {'cat': 'animal'}, - ), - ], -); -``` - -### Querying vectors - -```dart -final res = await vectorStore.similaritySearch( - query: 'Where is the cat?', - config: const ObjectBoxSimilaritySearch(k: 1), -); -``` - -You can change the minimum similarity score threshold by setting the `scoreThreshold` parameter in the `ObjectBoxSimilaritySearch` config object. - -#### Filtering - -You can use the `ObjectBoxSimilaritySearch` class to pass ObjectBox-specific filtering options. - -`ObjectBoxVectorStore` supports filtering queries by id, content or metadata using ObjectBox's `Condition`. You can define the filter condition in the `ObjectBoxSimilaritySearch.filterCondition` parameter. Use the `ObjectBoxDocumentProps` class to reference the entity fields to use in the query. - -For example: -```dart -final res = await vectorStore.similaritySearch( - query: 'What should I feed my cat?', - config: ObjectBoxSimilaritySearch( - k: 5, - scoreThreshold: 0.8, - filterCondition: ObjectBoxDocumentProps.id.equals('my-id') - .or(ObjectBoxDocumentProps.metadata.contains('some-text')), - ), -); -``` - -### Deleting vectors - -To delete documents, you can use the `delete` method passing the ids of the documents you want to delete. - -```dart -await vectorStore.delete(ids: ['9999']); -``` - -You can also use `deleteWhere` to delete documents based on a condition. - -```dart -await vectorStore.deleteWhere( - ObjectBoxDocumentProps.metadata.contains('cat'), -); -``` - -## Example: Building a Fully Local RAG App with ObjectBox and Ollama - -This example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions. - -![RAG Pipeline](img/objectbox.png) - -#### Prerequisites - -Before running the example, make sure you have the following: - -- Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions). -- [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) and [llama3:8b](https://ollama.com/library/llama3:8b) models downloaded. - -#### Steps - -**Step 1: Retrieving and Storing Documents** - -1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader. -2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`. -3. Create embeddings from the document chunks using the `jina/jina-embeddings-v2-small-en` embeddings model via `OllamaEmbeddings`. -4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`. - -> Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries. - -**Step 2: Constructing the RAG Pipeline** - -1. Set up a retrieval pipeline that takes a user question as input and retrieves the most relevant documents from the ObjectBox vector store. -2. Format the retrieved documents into a single string containing the source, title, and content of each document. -3. Pass the formatted string to the Llama 3 model to generate a response to the user question. - -```dart -// 1. Instantiate vector store -final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), - dimensions: 512, -); - -// 2. Load documents -const loader = WebBaseLoader([ - 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', - 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', - 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', - 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', -]); -final List docs = await loader.load(); - -// 3. Split docs into chunks -const splitter = RecursiveCharacterTextSplitter( - chunkSize: 500, - chunkOverlap: 0, -); -final List chunkedDocs = await splitter.invoke(docs); - -// 4. Add documents to vector store -await vectorStore.addDocuments(documents: chunkedDocs); - -// 5. Construct a RAG prompt template -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, - ''' -You are an assistant for question-answering tasks. - -Use the following pieces of retrieved context to answer the user question. - -Context: -{context} - -If you don't know the answer, just say that you don't know. -Use three sentences maximum and keep the answer concise. -Cite the source you used to answer the question. - -Example: -""" -One sentence [1]. Another sentence [2]. - -Sources: -[1] https://example.com/1 -[2] https://example.com/2 -""" -''' - ), - (ChatMessageType.human, '{question}'), -]); - -// 6. Define the model to use and the vector store retriever -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.2'), -); -final retriever = vectorStore.asRetriever(); - -// 7. Create a Runnable that combines the retrieved documents into a single formatted string -final docCombiner = Runnable.mapInput, String>((docs) { - return docs.map((d) => ''' -Source: ${d.metadata['source']} -Title: ${d.metadata['title']} -Content: ${d.pageContent} ---- -''').join('\n'); -}); - -// 8. Define the RAG pipeline -final chain = Runnable.fromMap({ - 'context': retriever.pipe(docCombiner), - 'question': Runnable.passthrough(), -}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); - -// 9. Run the pipeline -final stream = chain.stream( - 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', -); -await stream.forEach(stdout.write); -// According to the sources provided, ObjectBox Vector Search uses the HNSW -// (Hierarchical Navigable Small World) algorithm [1]. -// -// And yes, you can use it in Flutter apps. The article specifically mentions -// that ObjectBox 4.0 introduces an on-device vector database for the -// Dart/Flutter platform [2]. -// -// Sources: -// [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ -// [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ -``` - -## Example: Wikivoyage EU - -Check out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_dart/tree/main/examples/wikivoyage_eu), to see how to build a fully local chatbot that uses RAG to plan vacation plans in Europe. - -## Advance - -### BaseObjectBoxVectorStore - -If you need more control over the entity (e.g. if you are using ObjectBox to store other entities, or if you need to customize the Document entity class.), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. - -`BaseObjectBoxVectorStore` requires the following parameters: -- `embeddings`: The embeddings model to use. -- `box`: The ObjectBox `Box` instance to use. -- `createEntity`: A function that creates an entity from the given data. -- `createDocument`: A function that creates a LangChain's `Document` from the given entity. -- `getIdProperty`: A function that returns the ID property of the entity. -- `getEmbeddingProperty`: A function that returns the embedding property of the entity. - -Here is an example of how to use this class: - -First, you can define our own Document entity class instead of using the one provided by the [ObjectBoxVectorStore]. In this way, you can customize the entity to your needs. You will need to define the mapping logic between the entity and the LangChain [Document] model. - -```dart -@Entity() -class MyDocumentEntity { - MyDocumentEntity({ - required this.id, - required this.content, - required this.metadata, - required this.embedding, - }); - @Id() - int internalId = 0; - @Unique(onConflict: ConflictStrategy.replace) - String id; - String content; - String metadata; - @HnswIndex( - dimensions: 768, - distanceType: VectorDistanceType.cosine, - ) - @Property(type: PropertyType.floatVector) - List embedding; - factory MyDocumentEntity.fromModel( - Document doc, List embedding, - ) => MyDocumentEntity( - id: doc.id ?? '', - content: doc.pageContent, - metadata: jsonEncode(doc.metadata), - embedding: embedding, - ); - Document toModel() => Document( - id: id, - pageContent: content, - metadata: jsonDecode(metadata), - ); -} -``` - -After defining the entity class, you will need to run the ObjectBox generator: - -```sh -dart run build_runner build --delete-conflicting-outputs -``` - -Then, you just need to create your custom vector store class that extends [BaseObjectBoxVectorStore] and wire everything up: - -```dart -class MyCustomVectorStore extends BaseObjectBoxVectorStore { - MyCustomVectorStore({ - required super.embeddings, - required Store store, - }) : super( - box: store.box(), - createEntity: ( - String id, - String content, - String metadata, - List embedding, - ) => - MyDocumentEntity( - id: id, - content: content, - metadata: metadata, - embedding: embedding, - ), - createDocument: (MyDocumentEntity docDto) => docDto.toModel(), - getIdProperty: () => MyDocumentEntity_.id, - getEmbeddingProperty: () => MyDocumentEntity_.embedding, - ); -} -``` - -Now you can use the [MyCustomVectorStore] class to store and search documents. diff --git a/docs_v2/.firebaserc b/docs_v2/.firebaserc deleted file mode 100644 index 15e3b72b..00000000 --- a/docs_v2/.firebaserc +++ /dev/null @@ -1,5 +0,0 @@ -{ - "projects": { - "default": "langchain-dart" - } -} diff --git a/docs_v2/.gitignore b/docs_v2/.gitignore deleted file mode 100644 index 0f21febf..00000000 --- a/docs_v2/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -# Dependencies -/node_modules - -# Production -/build - -# Generated files -.docusaurus -.cache-loader - -# Misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -npm-debug.log* -yarn-debug.log* -yarn-error.log* -.firebase diff --git a/docs_v2/README.md b/docs_v2/README.md deleted file mode 100644 index 0c6c2c27..00000000 --- a/docs_v2/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Website - -This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. - -### Installation - -``` -$ yarn -``` - -### Local Development - -``` -$ yarn start -``` - -This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. - -### Build - -``` -$ yarn build -``` - -This command generates static content into the `build` directory and can be served using any static contents hosting service. - -### Deployment - -Using SSH: - -``` -$ USE_SSH=true yarn deploy -``` - -Not using SSH: - -``` -$ GIT_USER= yarn deploy -``` - -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docs_v2/babel.config.js b/docs_v2/babel.config.js deleted file mode 100644 index e00595da..00000000 --- a/docs_v2/babel.config.js +++ /dev/null @@ -1,3 +0,0 @@ -module.exports = { - presets: [require.resolve('@docusaurus/core/lib/babel/preset')], -}; diff --git a/docs_v2/docs/01-intro.md b/docs_v2/docs/01-intro.md deleted file mode 100644 index 75428706..00000000 --- a/docs_v2/docs/01-intro.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -sidebar_position: 0 -sidebar_class_name: hidden ---- - -# Introduction - -Build Dart/Flutter applications powered by Large Language Models. - -## What is LangChain.dart? - -LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). LangChain is a framework for developing applications that are powered by large language models (LLMs). - -It comes with a set of components that make working with LLMs easy. -The components can be grouped into a few core modules: - -![LangChain.dart](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain.dart.png) - -- 📃 **Model I/O:** LangChain offers a unified API for interacting with various LLM providers (e.g. OpenAI, Google, Mistral, Ollama, etc.), allowing developers to switch between them with ease. Additionally, it provides tools for managing model inputs (prompt templates and example selectors) and parsing the resulting model outputs (output parsers). -- 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). -- 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. - -The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). - -## Motivation - -Large Language Models (LLMs) have revolutionized Natural Language Processing (NLP), serving as essential components in a wide range of applications, such as question-answering, summarization, translation, and text generation. - -The adoption of LLMs is creating a new tech stack in its wake. However, emerging libraries and tools are predominantly being developed for the Python and JavaScript ecosystems. As a result, the number of applications leveraging LLMs in these ecosystems has grown exponentially. - -In contrast, the Dart / Flutter ecosystem has not experienced similar growth, which can likely be attributed to the scarcity of Dart and Flutter libraries that streamline the complexities associated with working with LLMs. - -LangChain.dart aims to fill this gap by abstracting the intricacies of working with LLMs in Dart and Flutter, enabling developers to harness their combined potential effectively. - -## Packages - -LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: - -### [`langchain_core`](https://pub.dev/packages/langchain_core) - -Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. - -> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. - -### [`langchain`](https://pub.dev/packages/langchain) - -Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - -> Depend on this package to build LLM applications with LangChain.dart. -> -> This package exposes `langchain_core` so you don't need to depend on it explicitly. - -### [`langchain_community`](https://pub.dev/packages/langchain_community) - -Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. - -> Depend on this package if you want to use any of the integrations or components it provides. - -### Integration-specific packages - -Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. - -> Depend on an integration-specific package if you want to use the specific integration. - - -## Getting started - -To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): - -```yaml -dependencies: - langchain: {version} - langchain_community: {version} - langchain_openai: {version} - langchain_google: {version} - ... -``` - -The most basic building block of LangChain.dart is calling an LLM on some prompt. LangChain.dart provides a unified interface for calling different LLMs. For example, we can use `ChatGoogleGenerativeAI` to call Google's Gemini model: - -```dart -final model = ChatGoogleGenerativeAI(apiKey: googleApiKey); -final prompt = PromptValue.string('Hello world!'); -final result = await model.invoke(prompt); -// Hello everyone! I'm new here and excited to be part of this community. -``` - -But the power of LangChain.dart comes from chaining together multiple components to implement complex use cases. For example, a RAG (Retrieval-Augmented Generation) pipeline that would accept a user query, retrieve relevant documents from a vector store, format them using prompt templates, invoke the model, and parse the output: - -```dart -// 1. Create a vector store and add documents to it -final vectorStore = MemoryVectorStore( - embeddings: OpenAIEmbeddings(apiKey: openaiApiKey), -); -await vectorStore.addDocuments( - documents: [ - Document(pageContent: 'LangChain was created by Harrison'), - Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), - ], -); - -// 2. Define the retrieval chain -final retriever = vectorStore.asRetriever(); -final setupAndRetrieval = Runnable.fromMap({ - 'context': retriever.pipe( - Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\n')), - ), - 'question': Runnable.passthrough(), -}); - -// 3. Construct a RAG prompt template -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), - (ChatMessageType.human, '{question}'), -]); - -// 4. Define the final chain -final model = ChatOpenAI(apiKey: openaiApiKey); -const outputParser = StringOutputParser(); -final chain = setupAndRetrieval - .pipe(promptTemplate) - .pipe(model) - .pipe(outputParser); - -// 5. Run the pipeline -final res = await chain.invoke('Who created LangChain.dart?'); -print(res); -// David created LangChain.dart -``` - -## Documentation - -- [LangChain.dart documentation](https://langchaindart.dev) -- [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) -- [LangChain.dart blog](https://blog.langchaindart.dev) -- [Project board](https://github.com/users/davidmigloz/projects/2/views/1) - -## Community - -Stay up-to-date on the latest news and updates on the field, have great discussions, and get help in the official [LangChain.dart Discord server](https://discord.gg/x4qbhqecVR). - -[![LangChain.dart Discord server](https://invidget.switchblade.xyz/x4qbhqecVR?theme=light)](https://discord.gg/x4qbhqecVR) - -## Contribute - -| 📢 **Call for Collaborators** 📢 | -|-------------------------------------------------------------------------| -| We are looking for collaborators to join the core group of maintainers. | - -New contributors welcome! Check out our [Contributors Guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md) for help getting started. - -Join us on [Discord](https://discord.gg/x4qbhqecVR) to meet other maintainers. We'll help you get your first contribution in no time! - -## Related projects - -- [LangChain](https://github.com/langchain-ai/langchain): The original Python LangChain project. -- [LangChain.js](https://github.com/langchain-ai/langchainjs): A JavaScript port of LangChain. -- [LangChain.go](https://github.com/tmc/langchaingo): A Go port of LangChain. -- [LangChain.rb](https://github.com/andreibondarev/langchainrb): A Ruby port of LangChain. - -## Sponsors - -

    - - - -

    - -## License - -LangChain.dart is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/docs_v2/docs/02-tutorials/01-llm_chain.md b/docs_v2/docs/02-tutorials/01-llm_chain.md deleted file mode 100644 index e40bbb77..00000000 --- a/docs_v2/docs/02-tutorials/01-llm_chain.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -sidebar_position: 0 -sidebar_class_name: hidden ---- - - -# Build a Simple LLM Application with LCEL \ No newline at end of file diff --git a/docs_v2/docs/02-tutorials/index.mdx b/docs_v2/docs/02-tutorials/index.mdx deleted file mode 100644 index 82e56f9e..00000000 --- a/docs_v2/docs/02-tutorials/index.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -sidebar_position: 0 -sidebar_class_name: hidden ---- -# Tutorials - -New to LangChain or to LLM app development in general? Read this material to quickly get up and running. - -## Basics -- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain) -- [Build a Chatbot](/docs/tutorials/chatbot) -- [Build vector stores and retrievers](/docs/tutorials/retrievers) -- [Build an Agent](/docs/tutorials/agents) - -## Working with external knowledge -- [Build a Retrieval Augmented Generation (RAG) Application](/docs/tutorials/rag) -- [Build a Conversational RAG Application](/docs/tutorials/qa_chat_history) -- [Build a Question/Answering system over SQL data](/docs/tutorials/sql_qa) -- [Build a Query Analysis System](/docs/tutorials/query_analysis) -- [Build a local RAG application](/docs/tutorials/local_rag) -- [Build a Question Answering application over a Graph Database](/docs/tutorials/graph) -- [Build a PDF ingestion and Question/Answering system](/docs/tutorials/pdf_qa/) - -## Specialized tasks -- [Build an Extraction Chain](/docs/tutorials/extraction) -- [Generate synthetic data](/docs/tutorials/data_generation) -- [Classify text into labels](/docs/tutorials/classification) -- [Summarize text](/docs/tutorials/summarization) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/01-installation.md b/docs_v2/docs/03-how_to/01-installation.md deleted file mode 100644 index 44604573..00000000 --- a/docs_v2/docs/03-how_to/01-installation.md +++ /dev/null @@ -1,77 +0,0 @@ -# Installation -Langchain as a framework consists of a number of packages. They're split into different packages allowing you to choose exactly what pieces of the framework to install and use. - -## Installing essential Langchain.dart packages - -### [`langchain`](https://pub.dev/packages/langchain) -Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - -> Depend on this package to build LLM applications with Langchain.dart. -> -> This package exposes `langchain_core` so you don't need to depend on it explicitly. - -```bash -dart pub add langchain -``` - -### [`langchain_core`](https://pub.dev/packages/langchain_core) -This package contains base abstractions of different components and ways to compose them together. -The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. -> Depend on this package to build frameworks on top of Langchain.dart or to interoperate with it. - -To install this package in your Dart or Flutter project -```bash -dart pub add langchain_core -``` - -### [`langchain_community`](https://pub.dev/packages/langchain_community) -Contains third-party integrations and community-contributed components that are not part of the core Langchain.dart API. -> Depend on this package if you want to use any of the integrations or components it provides like CSV,JSON,Text or HTML loaders and more. - -```bash -dart pub add langchain langchain_community -``` - -## Integration packages -Certain integrations like OpenAI and Anthropic have their own packages. Any integrations that require their own package will be documented as such in the Integration docs. - - -Let's say you're using [OpenAI](https://platform.openai.com/), install the `langchain_openai` package. -```bash -dart pub add langchain langchain_community langchain_openai -``` - -Let's say you want Google integration to use (GoogleAI, VertexAI, Gemini etc), install the `langchain_google` package. -```bash -dart pub add langchain langchain_community langchain_google -``` -The following table contains the list of existing Langchain.dart integration packages. - -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | -## Documentation - -Detailed documentation for various integrations can be found in the `/docs/05-integration/` directory: - -- [Anthropic](/docs/integrations/anthropic) -- [Anyscale](/docs/integrations/anyscale) -- [Firebase VertexAI](/docs/integrations/firebase_vertex_ai) -- [GCP VertexAI](/docs/integrations/gcp_vertex_ai) -- [GoogleAI](/docs/integrations/googleai) -- [MistralAI](/docs/integrations/mistralai) -- [Ollama](/docs/integrations/ollama) -- [OpenRouter](/docs/integrations/open_router) -- [OpenAI](/docs/integrations/openai) -- [PrEM](/docs/integrations/prem) -- [TogetherAI](/docs/integrations/together_ai) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/02-structured_output.md b/docs_v2/docs/03-how_to/02-structured_output.md deleted file mode 100644 index 95983cd6..00000000 --- a/docs_v2/docs/03-how_to/02-structured_output.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -sidebar_position: 3 -keywords: [structured output, json, information extraction, with_structured_output] ---- -# How to return structured data from a model - -> This guide assumes familiarity with the following concepts: -> - [Chat models](/docs/concepts/#chat-models) -> - [Function/tool calling](/docs/concepts/#functiontool-calling) - - -It is often useful to have a model return output that matches a specific schema. One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model. - - diff --git a/docs_v2/docs/03-how_to/index.mdx b/docs_v2/docs/03-how_to/index.mdx deleted file mode 100644 index 81ea6bc7..00000000 --- a/docs_v2/docs/03-how_to/index.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -sidebar_position: 0 -sidebar_class_name: hidden ---- - -# How-to guides - -Here you'll find answers to "How do I...?" types of questions. -These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task. -For conceptual explanations see the [Conceptual guide](/docs/concepts/). -For end-to-end walkthroughs see [Tutorials](/docs/tutorials). -For comprehensive descriptions of every class and function see the [API Reference](https://pub.dev/documentation/langchain/latest/index.html). - - -## Installation - -- [How to: install LangChain packages](/docs/how_to/installation/) - -## Key features -This highlights functionality that is core to using LangChain. - -- [How to: return structured data from a model](/docs/how_to/structured_output/) -- [How to: use a model to call tools](/docs/how_to/tool_calling) -- [How to: stream runnables](/docs/how_to/streaming) -- [How to: debug your LLM apps](/docs/how_to/debugging/) - -[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol. - -[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives. - -- [How to: chain runnables](/docs/how_to/sequence) -- [How to: stream runnables](/docs/how_to/streaming) -- [How to: invoke runnables in parallel](/docs/how_to/parallel/) -- [How to: add default invocation args to runnables](/docs/how_to/binding/) -- [How to: turn any function into a runnable](/docs/how_to/functions) -- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough) -- [How to: configure runnable behavior at runtime](/docs/how_to/configure) -- [How to: add message history (memory) to a chain](/docs/how_to/message_history) -- [How to: route between sub-chains](/docs/how_to/routing) -- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/) -- [How to: inspect runnables](/docs/how_to/inspect) -- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks) -- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains) -- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets) - - -## Components - -These are the core building blocks you can use when building applications. - -### Prompt templates - -[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model. - -- [How to: use few shot examples](/docs/how_to/few_shot_examples) -- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/) -- [How to: partially format prompt templates](/docs/how_to/prompts_partial) -- [How to: compose prompts together](/docs/how_to/prompts_composition) - -### Chat models - -[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message. - -- [How to: do function/tool calling](/docs/how_to/tool_calling) -- [How to: get models to return structured output](/docs/how_to/structured_output) -- [How to: cache model responses](/docs/how_to/chat_model_caching) -- [How to: get log probabilities](/docs/how_to/logprobs) -- [How to: create a custom chat model class](/docs/how_to/custom_chat_model) -- [How to: stream a response back](/docs/how_to/chat_streaming) -- [How to: track token usage](/docs/how_to/chat_token_usage_tracking) -- [How to: track response metadata across providers](/docs/how_to/response_metadata) -- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/) -- [How to: use chat model to call tools](/docs/how_to/tool_calling) -- [How to: stream tool calls](/docs/how_to/tool_streaming) -- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot) -- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific) -- [How to: force a specific tool call](/docs/how_to/tool_choice) -- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/) - -### LLMs - -What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string. - -- [How to: cache model responses](/docs/how_to/llm_caching) -- [How to: create a custom LLM class](/docs/how_to/custom_llm) -- [How to: stream a response back](/docs/how_to/streaming_llm) -- [How to: track token usage](/docs/how_to/llm_token_usage_tracking) -- [How to: work with local LLMs](/docs/how_to/local_llms) - -### Document loaders - -[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources. - -- [How to: load CSV data](/docs/how_to/document_loader_csv) -- [How to: load data from a directory](/docs/how_to/document_loader_directory) -- [How to: load HTML data](/docs/how_to/document_loader_html) -- [How to: load JSON data](/docs/how_to/document_loader_json) -- [How to: load Markdown data](/docs/how_to/document_loader_markdown) -- [How to: load Microsoft Office data](/docs/how_to/document_loader_office_file) -- [How to: load PDF files](/docs/how_to/document_loader_pdf) -- [How to: write a custom document loader](/docs/how_to/document_loader_custom) - -### Text splitters - -[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval. - -- [How to: recursively split text](/docs/how_to/recursive_text_splitter) -- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter) -- [How to: split by HTML sections](/docs/how_to/HTML_section_aware_splitter) -- [How to: split by character](/docs/how_to/character_text_splitter) -- [How to: split code](/docs/how_to/code_splitter) -- [How to: split Markdown by headers](/docs/how_to/markdown_header_metadata_splitter) -- [How to: recursively split JSON](/docs/how_to/recursive_json_splitter) -- [How to: split text into semantic chunks](/docs/how_to/semantic-chunker) -- [How to: split by tokens](/docs/how_to/split_by_token) - -### Vector stores - -[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings. - -- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores) - -### Retrievers - -[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents. - -- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever) -- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever) -- [How to: use contextual compression to compress the data retrieved](/docs/how_to/contextual_compression) -- [How to: write a custom retriever class](/docs/how_to/custom_retriever) -- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever) -- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever) -- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder) -- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector) -- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever) -- [How to: generate metadata filters](/docs/how_to/self_query) -- [How to: create a time-weighted retriever](/docs/how_to/time_weighted_vectorstore) -- [How to: use hybrid vector and keyword retrieval](/docs/how_to/hybrid) - -### Agents - -:::note - -For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation. - -::: - -- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor) -- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent) \ No newline at end of file diff --git a/docs_v2/docs/04-concepts.mdx b/docs_v2/docs/04-concepts.mdx deleted file mode 100644 index fcd2335b..00000000 --- a/docs_v2/docs/04-concepts.mdx +++ /dev/null @@ -1,468 +0,0 @@ -# Conceptual guide - -This section contains introductions to key parts of LangChain.dart - -## Architecture - -LangChain.dart as a framework consists of a number of packages. - -### [`langchain_core`](https://pub.dev/packages/langchain_core) -This package contains base abstractions of different components and ways to compose them together. -The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. -No third party integrations are defined here. - -> Depend on this package to build frameworks on top of .dart.dart or to interoperate with it. - -### [`langchain`](https://pub.dev/packages/langchain) - -Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - -> Depend on this package to build LLM applications with .dart.dart. -> -> This package exposes `langchain_core` so you don't need to depend on it explicitly. - -### [`langchain_community`](https://pub.dev/packages/langchain_community) - -Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. - -> Depend on this package if you want to use any of the integrations or components it provides. - -### Integration-specific packages - -Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. - -> Depend on an integration-specific package if you want to use the specific integration. - -See [Integrations](/docs/integrations) to integrate with a specific package. - -## LangChain Expression Language (LCEL) - -LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL: - -- **First-class streaming support:** When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens. -- **Optimized concurrent execution:** Whenever your LCEL chains have steps that can be executed concurrently (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency. -- **Retries and fallbacks:** Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. -- **Access intermediate results:** For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. - -### Runnable interface - -To make it as easy as possible to create custom chains, LangChain provides a `Runnable` interface that most components implement, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. - -This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. The standard interface includes: - -- `invoke`: call the chain on an input and return the output. -- `stream`: call the chain on an input and stream the output. -- `batch`: call the chain on a list of inputs and return a list of outputs. - -The type of the input and output varies by component: - -| Component | Input Type | Output Type | -|-----------------------------|------------------------|------------------------| -| `PromptTemplate` | `Map` | `PromptValue` | -| `ChatMessagePromptTemplate` | `Map` | `PromptValue` | -| `LLM` | `PromptValue` | `LLMResult` | -| `ChatModel` | `PromptValue` | `ChatResult` | -| `OutputParser` | Any object | Parser output type | -| `Retriever` | `String` | `List` | -| `DocumentTransformer` | `List` | `List` | -| `Tool` | `Map` | `String` | -| `Chain` | `Map` | `Map` | - -## Components - -### Chat models -Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). -These are traditionally newer models (older models are generally `LLMs`, see below). -Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. - -Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs. - -When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model. - -LangChain does not host any Chat Models, rather we rely on third party integrations. - -We have some standardized parameters when constructing ChatModels: -- `model`: the name of the model -- `temperature`: the sampling temperature -- `timeout`: request timeout -- `maxTokens`: max tokens to generate -- `apiKey`: API key for the model provider -- `baseUrl`: endpoint to send requests to - -Some important things to note: -- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these. -- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``. - -ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model. - -### LLMs -:::caution -Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models), -even for non-chat use cases. - -You are probably looking for [the section above instead](/docs/concepts/#chat-models). -::: - -Language models that takes a string as input and returns a string. -These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above). - -Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. -This gives them the same interface as [Chat Models](/docs/concepts/#chat-models). -When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. - -LangChain.dart does not host any LLMs, rather we rely on third party integrations. See (/docs/integrations) - - - -### Messages -Some language models take a list of messages as input and return a message. - -LangChain provides several objects to easily distinguish between different roles: -#### HumanChatMessage -This represents a message from the user. - -#### AIChatMessage -This represents a message from the model. - -#### SystemChatMessage -This represents a system message, which tells the model how to behave. Not every model provider supports this. - -#### FunctionChatMessage / ToolChatMessage -These represent a decision from an language model to call a tool. They're a subclass of a AIChatMessage. FunctionChatMessage is a legacy message type corresponding to OpenAI's legacy function-calling API. - -### Prompt Templates - -Most LLM applications do not pass user input directly into an `LLM`. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. - -In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it would be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions. - -`PromptTemplates` help with exactly this! They bundle up all the logic for going from user input into a fully formatted prompt. This can start off very simple - for example, a prompt to produce the above string would just be: - -```dart -final prompt = PromptTemplate.fromTemplate( - 'What is a good name for a company that makes {product}?', -); -final res = prompt.format({'product': 'colorful socks'}); -print(res); -// 'What is a good name for a company that makes colorful socks?' -``` - -However, the advantages of using these over raw string formatting are several. You can "partial" out variables - e.g. you can format only some of the variables at a time. You can compose them together, easily combining different templates into a single prompt. - -For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). - -`PromptTemplates` can also be used to produce a list of messages. In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc) Here, what happens most often is a `ChatPromptTemplate` is a list of `ChatMessagePromptTemplates`. Each `ChatMessagePromptTemplate` contains instructions for how to format that `ChatMessage` - its role, and then also its content. Let's take a look at this below: - -```dart -const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; -const humanTemplate = '{text}'; - -final chatPrompt = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, template), - (ChatMessageType.human, humanTemplate), -]); - -final res = chatPrompt.formatMessages({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// [ -// SystemChatMessage(content='You are a helpful assistant that translates English to French.'), -// HumanChatMessage(content='I love programming.') -// ] -``` - -`ChatPromptTemplates` can also be constructed in other ways - For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). - -### Output parsers - - -:::note - -The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. -More and more models are supporting function (or tool) calling, which handles this automatically. -It is recommended to use function/tool calling rather than output parsing. -See documentation for that [here](/docs/concepts/#function-tool-calling). - -::: -`OutputParsers` convert the raw output of an LLM into a format that can be used downstream. There are few main type of `OutputParsers`, including: - -- Convert text from LLM -> structured information (e.g. JSON). -- Convert a `ChatMessage` into just a string. -- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string. - -For full information on this, see the section on [output parsers](/docs/how_to/#output-parsers). - -### Chat history -Most LLM applications have a conversational interface. An essential component of a conversation is being able to refer to information introduced earlier in the conversation. At bare minimum, a conversational system should be able to access some window of past messages directly. - -The concept of ChatHistory refers to a class in LangChain which can be used to wrap an arbitrary chain. This ChatHistory will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database. Future interactions will then load those messages and pass them into the chain as part of the input. - -### Documents -A Document object in LangChain contains information about some data. It has two attributes: -- pageContent: `String` - The content of this document. -- metadata: `Map` - Arbitrary metadata associated with this document. Can track the document id, file name, etc. - -### Document loaders -Use document loaders to load data from a source as `Document`'s. For example, there are document loaders -for loading a simple .txt file, for loading the text contents of any web page, -or even for loading a transcript of a YouTube video. - -Document loaders expose two methods: - -- `lazyLoad()`: returns a `Stream` of `Document`'s. This is useful for loading - large amounts of data, as it allows you to process each `Document` as it is - loaded, rather than waiting for the entire data set to be loaded in memory. -- `load()`: returns a list of `Document`'s. Under the hood, this method calls - `lazyLoad()` and collects the results into a list. Use this method only with - small data sets. - -The simplest loader reads in a file as text and places it all into one -`Document`. - -```dart - -const filePath = 'example.txt'; -const loader = TextLoader(filePath); -final docs = await loader.load(); -``` - -### Text splitters -Once you've loaded documents, you'll often want to transform them to better suit -your application. The simplest example is you may want to split a long document -into smaller chunks that can fit into your model's context window. LangChain has -a number of built-in document transformers that make it easy to split, combine, -filter, and otherwise manipulate documents. - -## Text splitters - -When you want to deal with long pieces of text, it is necessary to split up that -text into chunks. As simple as this sounds, there is a lot of potential -complexity here. Ideally, you want to keep the semantically related pieces of -text together. What "semantically related" means could depend on the type of -text. This tutorial showcases several ways to do that. - -At a high level, text splitters work as following: - -1. Split the text up into small, semantically meaningful chunks (often - sentences). -2. Start combining these small chunks into a larger chunk until you reach a - certain size (as measured by some function). -3. Once you reach that size, make that chunk its own piece of text and then - start creating a new chunk of text with some overlap (to keep context between - chunks). - -That means there are two different axes along which you can customize your text -splitter: - -1. How the text is split. -2. How the chunk size is measured. - -The most basic text splitter is the `CharacterTextSplitter`. This splits based -on characters (by default `\n\n`) and measure chunk length by number of -characters. - -The default recommended text splitter is the `RecursiveCharacterTextSplitter`. This text splitter -takes a list of characters. It tries to create chunks based on splitting on the first character, -but if any chunks are too large it then moves onto the next character, and so forth. By default -the characters it tries to split on are `["\n\n", "\n", " ", ""]`. - -In addition to controlling which characters you can split on, you can also -control a few other things: - -- `lengthFunction`: how the length of chunks is calculated. Defaults to just - counting number of characters, but it's pretty common to pass a token counter - here. -- `chunkSize`: the maximum size of your chunks (as measured by the length - function). -- `chunkOverlap`: the maximum overlap between chunks. It can be nice to have - some overlap to maintain some continuity between chunks (eg do a sliding - window). -- `addStartIndex`: whether to include the starting position of each chunk within - the original document in the metadata. - -```dart -const filePath = 'state_of_the_union.txt'; -const loader = TextLoader(filePath); -final documents = await loader.load(); -const textSplitter = RecursiveCharacterTextSplitter( - chunkSize: 800, - chunkOverlap: 0, -); -final docs = textSplitter.splitDocuments(documents); -``` - -### Embedding models -Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text. By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning. These natural language search capabilities underpin many of the context retrieval where we provide an LLM with relevant data it needs to effectively respond to a query. - -The Embeddings class is a class designed for interfacing with text embedding -models. There are lots of embedding model providers (OpenAI, Cohere, Hugging -Face, etc) - this class is designed to provide a standard interface for all of -them. - -Embeddings create a vector representation of a piece of text. This is useful -because it means we can think about text in the vector space, and do things like -semantic search where we look for pieces of text that are most similar in the -vector space. - -The base Embeddings class in LangChain exposes two methods: one for embedding -documents and one for embedding a query. The former takes as input multiple -texts, while the latter takes a single text. The reason for having these as two -separate methods is that some embedding providers have different embedding -methods for documents (to be searched over) vs queries (the search query -itself). - -For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models). - -### Vector stores -One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. - -Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before -similarity search, allowing you more control over returned documents. - -Vector stores can be converted to the retriever interface by doing: - -For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores). - -### Retrievers -A retriever is an interface that returns documents given an unstructured query. -It is more general than a vector store. A retriever does not need to be able to -store documents, only to return (or retrieve) it. Vector stores can be used as -the backbone of a retriever, but there are other types of retrievers as well. - -Retrievers accept a string query as input and return a list of Document's as output. - -The public API of the `BaseRetriever` class in LangChain is as follows: - -```dart -abstract interface class BaseRetriever { - Future> getRelevantDocuments(final String query); -} -``` - -For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers). - -### Tools -Tools are utilities designed to be called by a model. Their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. Tools are needed whenever you want a model to control parts of your code or call out to external APIs. -A tool consists of: - -1. The name of the tool. -2. A description of what the tool does. -3. A JSON schema defining the inputs to the tool. -4. A function (and, optionally, an async variant of the function). - -When a tool is bound to a model, the name, description and JSON schema are provided as context to the model. -Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs. - -To define a tool in dart, we use the `ToolSpec` class. -```dart -final openaiApiKey = Platform.environment['OPENAI_API_KEY']; -final model = ChatOpenAI(apiKey: openaiApiKey); - -final promptTemplate = ChatPromptTemplate.fromTemplate( - 'Tell me a joke about {foo}', -); - -const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline for the joke', - }, - }, - 'required': ['setup', 'punchline'], - }, -); - -final chain = promptTemplate | - model.bind( - ChatOpenAIOptions( - tools: const [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), - ); - -final res = await chain.invoke({'foo': 'bears'}); -print(res); -// ChatResult{ -// id: chatcmpl-9LBPyaZcFMgjmOvkD0JJKAyA4Cihb, -// output: AIChatMessage{ -// content: , -// toolCalls: [ -// AIChatMessageToolCall{ -// id: call_JIhyfu6jdIXaDHfYzbBwCKdb, -// name: joke, -// argumentsRaw: {"setup":"Why don't bears like fast food?","punchline":"Because they can't catch it!"}, -// arguments: { -// setup: Why don't bears like fast food?, -// punchline: Because they can't catch it! -// }, -// } -// ], -// }, -// finishReason: FinishReason.stop, -// metadata: { -// model: gpt-4o-mini, -// created: 1714835806, -// system_fingerprint: fp_3b956da36b -// }, -// usage: LanguageModelUsage{ -// promptTokens: 77, -// responseTokens: 24, -// totalTokens: 101 -// }, -// streaming: false -// } -``` - -When designing tools to be used by a model, it is important to keep in mind that: - -- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models. -- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering. -- Simple, narrowly scoped tools are easier for models to use than complex tools. - -#### Related - -For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools). - -To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/). - -### Agents -By themselves, language models can't take actions - they just output text. -A big use case for LangChain is creating agents. Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be. -The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish. - -### Callbacks -TODO: - - -### Techniques - -#### Streaming - -#### Function/tool calling - -#### Structured Output -LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide range of inputs, but for some use-cases, it can be useful to constrain the LLM's output to a specific format or structure. This is referred to as structured output. - - -#### Few-shot prompting - -#### Retrieval - -#### Text splitting - -#### Evaluation - -#### Tracing -##### \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/anthropic.md b/docs_v2/docs/05-integrations/anthropic.md deleted file mode 100644 index b607ddc7..00000000 --- a/docs_v2/docs/05-integrations/anthropic.md +++ /dev/null @@ -1,145 +0,0 @@ -# ChatAnthropic - -Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). - -## Setup - -The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. - -The following models are available: -- `claude-3-5-sonnet-20240620` -- `claude-3-haiku-20240307` -- `claude-3-opus-20240229` -- `claude-3-sonnet-20240229` -- `claude-2.0` -- `claude-2.1` - -Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. - -## Usage - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), -); - -final chatPrompt = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, 'Text to translate:\n{text}'), -]); - -final chain = chatPrompt | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'adore programmer.' -``` - -## Multimodal support - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), -); - -final res = await chatModel.invoke( - PromptValue.chat([ - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), - ), - ]), -); - -print(res.output.content); -// -> 'The fruit in the image is an apple.' -``` - -## Streaming - -```dart -final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); - -final stream = chain.stream({'max_num': '30'}); -await stream.forEach(print); -// 123 -// 456789101 -// 112131415161 -// 718192021222 -// 324252627282 -// 930 -``` - -## Tool calling - -`ChatAnthropic` supports tool calling. - -Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. - -Example: -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, -); -final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - tools: [tool], - ), -); - -final res = await model.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -``` diff --git a/docs_v2/docs/05-integrations/anyscale.md b/docs_v2/docs/05-integrations/anyscale.md deleted file mode 100644 index 9a71c30e..00000000 --- a/docs_v2/docs/05-integrations/anyscale.md +++ /dev/null @@ -1,84 +0,0 @@ -# Anyscale - -[Anyscale](https://www.anyscale.com/) offers a unified OpenAI-compatible API for a broad range of [models](https://docs.endpoints.anyscale.com/guides/models/#chat-models) running serverless or on your own dedicated instances. - -It also allows to fine-tune models on your own data or train new models from scratch. - -You can consume Anyscale API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. - -The only difference is that you need to change the base URL to `https://api.endpoints.anyscale.com/v1`: - -```dart -final chatModel = ChatOpenAI( - apiKey: anyscaleApiKey, - baseUrl: 'https://api.endpoints.anyscale.com/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'meta-llama/Llama-2-70b-chat-hf', - ), -); -``` - -## Invoke - -```dart -final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that translates {input_language} to {output_language}.', - ), - (ChatMessageType.human, '{text}'), -]); - -final chatModel = ChatOpenAI( - apiKey: anyscaleApiKey, - baseUrl: 'https://api.endpoints.anyscale.com/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'meta-llama/Llama-2-70b-chat-hf', - ), -); - -final chain = promptTemplate | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> "I love programming" se traduit en français sous la forme "J'aime passionnément la programmation" -``` - -## Stream - -```dart -final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces or commas', - ), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatOpenAI( - apiKey: anyscaleApiKey, - baseUrl: 'https://api.endpoints.anyscale.com/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '9'}); -await stream.forEach(print); -// 1 -// 2 -// 3 -// ... -// 9 -``` diff --git a/docs_v2/docs/05-integrations/firebase_vertex_ai.md b/docs_v2/docs/05-integrations/firebase_vertex_ai.md deleted file mode 100644 index cd33daa2..00000000 --- a/docs_v2/docs/05-integrations/firebase_vertex_ai.md +++ /dev/null @@ -1,190 +0,0 @@ -# Vertex AI for Firebase - -The [Vertex AI Gemini API](https://firebase.google.com/docs/vertex-ai) gives you access to the latest generative AI models from Google: the Gemini models. If you need to call the Vertex AI Gemini API directly from your mobile or web app you can use the `ChatFirebaseVertexAI` class instead of the [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) class which is designed to be used on the server-side. - -`ChatFirebaseVertexAI` is built specifically for use with mobile and web apps, offering security options against unauthorized clients as well as integrations with other Firebase services. - -## Key capabilities - -- **Multimodal input**: The Gemini models are multimodal, so prompts sent to the Gemini API can include text, images (even PDFs), video, and audio. -- **Growing suite of capabilities**: You can call the Gemini API directly from your mobile or web app, build an AI chat experience, use function calling, and more. -- **Security for production apps**: Use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. -- **Robust infrastructure**: Take advantage of scalable infrastructure that's built for use with mobile and web apps, like managing structured data with Firebase database offerings (like Cloud Firestore) and dynamically setting run-time configurations with Firebase Remote Config. - -## Setup - -### 1. Set up a Firebase project - -Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/get-started?platform=flutter) for the latest information on how to set up the Vertex AI for Firebase in your Firebase project. - -In summary, you need to: -1. Upgrade your billing plan to the Blaze pay-as-you-go pricing plan. -2. Enable the required APIs (`aiplatform.googleapis.com` and `firebaseml.googleapis.com`). -3. Integrate the Firebase SDK into your app (if you haven't already). -4. Recommended: Enable Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. - -### 2. Add the LangChain.dart Google package - -Add the `langchain_google` package to your `pubspec.yaml` file. - -```yaml -dependencies: - langchain: {version} - langchain_google: {version} -``` - -Internally, `langchain_google` uses the [`firebase_vertexai`](https://pub.dev/packages/firebase_vertexai) SDK to interact with the Vertex AI for Firebase API. - -### 3. Initialize your Firebase app - -```yaml -await Firebase.initializeApp(); -``` - -### 4. Call the Vertex AI Gemini API - -```dart -final chatModel = ChatFirebaseVertexAI(); -final chatPrompt = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, 'Text to translate:\n{text}'), -]); - -final chain = chatPrompt | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'adore programmer.' -``` - -> Check out the [sample project](https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase/example) to see a complete project using Vertex AI for Firebase. - -## Available models - -The following models are available: -- `gemini-1.5-flash`: - * text / image / audio -> text model - * Max input token: 1048576 - * Max output tokens: 8192 -- `gemini-1.5-pro`: - * text / image / audio -> text model - * Max input token: 1048576 - * Max output tokens: 8192 -- `gemini-1.0-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.0-pro` - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 - -Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. - -## Multimodal support - -```dart -final chatModel = ChatFirebaseVertexAI( - defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', - ), -); -final res = await chatModel.invoke( - PromptValue.chat([ - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), - ), - ]), -); -print(res.output.content); -// -> 'That is an apple.' -``` - -## Streaming - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatFirebaseVertexAI( - defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '30'}); -await stream.forEach(print); -// 1 -// 2345678910111213 -// 1415161718192021 -// 222324252627282930 -``` - -## Tool calling - -`ChatGoogleGenerativeAI` supports tool calling. - -Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. - -Example: -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - }, - 'required': ['location'], - }, -); -final chatModel = ChatFirebaseVertexAI( - defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro', - temperature: 0, - tools: [tool], - ), -); -final res = await model.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -``` - -## Prevent abuse with Firebase App Check - -You can use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/app-check) for more information. - -## Locations - -When initializing the Vertex AI service, you can optionally specify a location in which to run the service and access a model. If you don't specify a location, the default is us-central1. See the list of [available locations](https://firebase.google.com/docs/vertex-ai/locations?platform=flutter#available-locations). - -```dart -final chatModel = ChatFirebaseVertexAI( - location: 'us-central1', -); -``` - -## Alternatives - -- [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md): Use this class to call the Vertex AI Gemini API from the server-side. -- [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md): Use this class to call the "Google AI" version of the Gemini API that provides free-of-charge access (within limits and where available). This API is not intended for production use but for experimentation and prototyping. After you're familiar with how a Gemini API works, migrate to the Vertex AI for Firebase, which have many additional features important for mobile and web apps, like protecting the API from abuse using Firebase App Check. diff --git a/docs_v2/docs/05-integrations/gcp_vertex_ai.md b/docs_v2/docs/05-integrations/gcp_vertex_ai.md deleted file mode 100644 index 5417aab5..00000000 --- a/docs_v2/docs/05-integrations/gcp_vertex_ai.md +++ /dev/null @@ -1,116 +0,0 @@ -# GCP Chat Vertex AI - -Wrapper around [GCP Vertex AI chat models](https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts) API (aka PaLM API for chat). - -## Set up your Google Cloud Platform project - -1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). -2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). -3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). -4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations). - -### Authentication - -To create an instance of `ChatVertexAI` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package. - -To create an instance of `VertexAI` you need to provide an [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) instance. - -There are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details. - -Example using a service account JSON: - -```dart -final serviceAccountCredentials = ServiceAccountCredentials.fromJson( - json.decode(serviceAccountJson), -); -final authClient = await clientViaServiceAccount( - serviceAccountCredentials, - [ChatVertexAI.cloudPlatformScope], -); -final chatVertexAi = ChatVertexAI( - httpClient: authClient, - project: 'your-project-id', -); -``` - -The service account should have the following [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions): -- `aiplatform.endpoints.predict` - -The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is: -- `https://www.googleapis.com/auth/cloud-platform` (you can use the constant `ChatVertexAI.cloudPlatformScope`) - -See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control - -### Available models - -- `chat-bison` - * Max input token: 4096 - * Max output tokens: 1024 - * Training data: Up to Feb 2023 - * Max turns: 2500 -- `chat-bison-32k` - * Max input and output tokens combined: 32k - * Training data: Up to Aug 2023 - * Max turns: 2500 - -The previous list of models may not be exhaustive or up-to-date. Check out the [Vertex AI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models) for the latest list of available models. - -### Model options - -You can define default options to use when calling the model (e.g. temperature, stop sequences, etc. ) using the `defaultOptions` parameter. - -The default options can be overridden when calling the model using the `options` parameter. - -Example: -```dart -final chatModel = ChatVertexAI( - httpClient: authClient, - project: 'your-project-id', - defaultOptions: ChatVertexAIOptions( - temperature: 0.9, - ), -); -final result = await chatModel( - [ChatMessage.humanText('Hello')], - options: ChatVertexAIOptions( - temperature: 0.5, - ), -); -``` - -### Full example - -```dart -import 'package:langchain/langchain.dart'; -import 'package:langchain_google/langchain_google.dart'; - -void main() async { - final chat = ChatVertexAI( - httpClient: await _getAuthHttpClient(), - project: _getProjectId(), - defaultOptions: const ChatVertexAIOptions( - temperature: 0, - ), - ); - while (true) { - stdout.write('> '); - final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? ''); - final aiMsg = await chat([usrMsg]); - print(aiMsg.content); - } -} - -Future _getAuthHttpClient() async { - final serviceAccountCredentials = ServiceAccountCredentials.fromJson( - json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!), - ); - return clientViaServiceAccount( - serviceAccountCredentials, - [VertexAI.cloudPlatformScope], - ); -} - -String _getProjectId() { - return Platform.environment['VERTEX_AI_PROJECT_ID']!; -} -``` diff --git a/docs_v2/docs/05-integrations/googleai.md b/docs_v2/docs/05-integrations/googleai.md deleted file mode 100644 index 033c7672..00000000 --- a/docs_v2/docs/05-integrations/googleai.md +++ /dev/null @@ -1,149 +0,0 @@ -# ChatGoogleGenerativeAI - -Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemini API). - -## Setup - -To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). - -The following models are available: -- `gemini-1.5-flash`: - * text / image / audio -> text model - * Max input token: 1048576 - * Max output tokens: 8192 -- `gemini-1.5-pro`: text / image -> text model - * text / image / audio -> text model - * Max input token: 1048576 - * Max output tokens: 8192 -- `gemini-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.0-pro` (or `gemini-pro`): - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 - -Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. - -## Usage - -```dart -final apiKey = Platform.environment['GOOGLEAI_API_KEY']; - -final chatModel = ChatGoogleGenerativeAI( - apiKey: apiKey, - defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', - temperature: 0, - ), -); - -final chatPrompt = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, 'Text to translate:\n{text}'), -]); - -final chain = chatPrompt | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'adore programmer.' -``` - -## Multimodal support - -```dart -final apiKey = Platform.environment['GOOGLEAI_API_KEY']; - -final chatModel = ChatGoogleGenerativeAI( - apiKey: apiKey, - defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', - temperature: 0, - ), -); -final res = await chatModel.invoke( - PromptValue.chat([ - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), - ), - ]), -); -print(res.output.content); -// -> 'That is an apple.' -``` - -## Streaming - -```dart -final apiKey = Platform.environment['GOOGLEAI_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatGoogleGenerativeAI( - apiKey: apiKey, - defaultOptions: const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', - temperature: 0, - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '30'}); -await stream.forEach(print); -// 1 -// 2345678910111213 -// 1415161718192021 -// 222324252627282930 -``` - -## Tool calling - -`ChatGoogleGenerativeAI` supports tool calling. - -Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. - -Example: -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - }, - 'required': ['location'], - }, -); -final chatModel = ChatGoogleGenerativeAI( - defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro', - temperature: 0, - tools: [tool], - ), -); -final res = await model.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -``` diff --git a/docs_v2/docs/05-integrations/index.mdx b/docs_v2/docs/05-integrations/index.mdx deleted file mode 100644 index 35f38dfc..00000000 --- a/docs_v2/docs/05-integrations/index.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_position: 0 -index: auto ---- -# Integrations - -> If you'd like to write your own integration, see Extending Langchain. - -The following table contains the list of existing Langchain.dart integration packages. To install a specific integration, see [Installing Langchain components](/docs/03-how_to/01-installation.md) - - -

    - -

    - -| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | - -Functionality provided by each integration package: - -| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | -|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| -| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | -| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | - -The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: - -| Package | Version | Description | -|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| -| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | -| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | - diff --git a/docs_v2/docs/05-integrations/mistralai.md b/docs_v2/docs/05-integrations/mistralai.md deleted file mode 100644 index 14f21fb1..00000000 --- a/docs_v2/docs/05-integrations/mistralai.md +++ /dev/null @@ -1,76 +0,0 @@ -# ChatMistralAI - -Wrapper around [Mistral AI](https://mistral.ai/) Chat Completions API. - -Mistral AI brings the strongest open generative models to the developers, along with efficient ways to deploy and customise them for production. - -> Note: Mistral AI API is currently in closed beta. You can request access [here](https://console.mistral.ai). - -## Setup - -To use `ChatMistralAI` you need to have a Mistral AI account and an API key. You can get one [here](https://console.mistral.ai/users/). - -The following models are available at the moment: -- `mistral-tiny`: Mistral 7B Instruct v0.2 (a minor release of Mistral 7B Instruct). It only works in English and obtains 7.6 on MT-Bench. -- `mistral-small`: Mixtral 8x7B. It masters English/French/Italian/German/Spanish and code and obtains 8.3 on MT-Bench. -- `mistral-medium`: a prototype model, that is currently among the top serviced models available based on standard benchmarks. It masters English/French/Italian/German/Spanish and code and obtains a score of 8.6 on MT-Bench. - -## Usage - -```dart -final chatModel = ChatMistralAI( - apiKey: 'apiKey', - defaultOptions: ChatMistralAIOptions( - model: 'mistral-small', - temperature: 0, - ), -); - -const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; -final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); -const humanTemplate = '{text}'; -final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); -final chatPrompt = ChatPromptTemplate.fromPromptMessages( - [systemMessagePrompt, humanMessagePrompt], -); - -final chain = chatPrompt | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'aime la programmation.' -``` - -## Streaming - -```dart -final promptTemplate = ChatPromptTemplate.fromPromptMessages([ - SystemChatMessagePromptTemplate.fromTemplate( - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces or commas', - ), - HumanChatMessagePromptTemplate.fromTemplate( - 'List the numbers from 1 to {max_num}', - ), -]); -final chat = ChatMistralAI( - apiKey: 'apiKey', - defaultOptions: ChatMistralAIOptions( - model: 'mistral-medium', - temperature: 0, - ), -); - -final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '9'}); -await stream.forEach(print); -// 12 -// 345 -// 67 -// 89 -``` diff --git a/docs_v2/docs/05-integrations/ollama.md b/docs_v2/docs/05-integrations/ollama.md deleted file mode 100644 index e6cc5907..00000000 --- a/docs_v2/docs/05-integrations/ollama.md +++ /dev/null @@ -1,462 +0,0 @@ -# ChatOllama - -Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. - -Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. - -Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. - -## Setup - -Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: - -1. Download and install [Ollama](https://ollama.ai) -2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3.1` -3. Instantiate the `ChatOllama` class with the downloaded model. - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - ), -); -``` - -For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). - -### Ollama base URL - -By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - baseUrl: 'https://your-remote-server-where-ollama-is-running.com', - model: 'llama3.1', - ), -); -``` - -## Usage - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, '{text}'), -]); - -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - ), -); - -final chain = promptTemplate | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'La traduction est : "J'aime le programming.' -``` - -### Streaming - -Ollama supports streaming the output as the model generates it. - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas'), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); -final chat = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - ), -); -final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '9'}); -await stream.forEach(print); -// 123 -// 456 -// 789 -``` - -### Multimodal support - -Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). - -You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llava', - temperature: 0, - ), -); -final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), -); -final res = await chatModel.invoke(PromptValue.chat([prompt])); -print(res.output.content); -// -> 'An Apple' -``` - -### Tool calling - -`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). - -**Notes:** -- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. -- Streaming tool calls is not supported at the moment. -- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). -- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. - -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, -); - -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - tools: [tool], - ), -); - -final res = await chatModel.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -print(res.output.toolCalls); -// [AIChatMessageToolCall{ -// id: a621064b-03b3-4ca6-8278-f37504901034, -// name: get_current_weather, -// arguments: {location: Boston, US}, -// }, -// AIChatMessageToolCall{ -// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, -// name: get_current_weather, -// arguments: {location: Madrid, ES}, -// }] -``` - -As you can see, `ChatOllama` support calling multiple tools in a single request. - -If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: - -```dart -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - tools: [tool], - toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), - ), -); -``` - -**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). - -### JSON mode - -You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), - (ChatMessageType.human, '{question}'), -]); -final chat = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - format: OllamaResponseFormat.json, - ), -); - -final chain = Runnable.getMapFromInput('question') - .pipe(promptTemplate) - .pipe(chat) - .pipe(JsonOutputParser()); - -final res = await chain.invoke( - 'What is the population of Spain, The Netherlands, and France?', -); -print(res); -// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} -``` - -## Examples - -### Answering questions with data from an external API - -Imagine you have an API that provides flight times between two cities: - -```dart -// Simulates an API call to get flight times -// In a real application, this would fetch data from a live database or API -String getFlightTimes(String departure, String arrival) { - final flights = { - 'NYC-LAX': { - 'departure': '08:00 AM', - 'arrival': '11:30 AM', - 'duration': '5h 30m', - }, - 'LAX-NYC': { - 'departure': '02:00 PM', - 'arrival': '10:30 PM', - 'duration': '5h 30m', - }, - 'LHR-JFK': { - 'departure': '10:00 AM', - 'arrival': '01:00 PM', - 'duration': '8h 00m', - }, - 'JFK-LHR': { - 'departure': '09:00 PM', - 'arrival': '09:00 AM', - 'duration': '7h 00m', - }, - 'CDG-DXB': { - 'departure': '11:00 AM', - 'arrival': '08:00 PM', - 'duration': '6h 00m', - }, - 'DXB-CDG': { - 'departure': '03:00 AM', - 'arrival': '07:30 AM', - 'duration': '7h 30m', - }, - }; - - final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; - return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); -} -``` - -Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. - -```dart -const getFlightTimesTool = ToolSpec( - name: 'get_flight_times', - description: 'Get the flight times between two cities', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'departure': { - 'type': 'string', - 'description': 'The departure city (airport code)', - }, - 'arrival': { - 'type': 'string', - 'description': 'The arrival city (airport code)', - }, - }, - 'required': ['departure', 'arrival'], - }, -); - -final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - tools: [getFlightTimesTool], - ), -); - -final messages = [ - ChatMessage.humanText( - 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', - ), -]; - -// First API call: Send the query and function description to the model -final response = await chatModel.invoke(PromptValue.chat(messages)); - -messages.add(response.output); - -// Check if the model decided to use the provided function -if (response.output.toolCalls.isEmpty) { - print("The model didn't use the function. Its response was:"); - print(response.output.content); - return; -} - -// Process function calls made by the model -for (final toolCall in response.output.toolCalls) { - final functionResponse = getFlightTimes( - toolCall.arguments['departure'], - toolCall.arguments['arrival'], - ); - // Add function response to the conversation - messages.add( - ChatMessage.tool( - toolCallId: toolCall.id, - content: functionResponse, - ), - ); -} - -// Second API call: Get final response from the model -final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); -print(finalResponse.output.content); -// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. -``` - -### Extracting structured data with tools - -A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. - -```dart -const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, -); - -final model = ChatOllama( - defaultOptions: ChatOllamaOptions( - options: ChatOllamaOptions( - model: 'llama3.1', - temperature: 0, - ), - tools: [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), -); - -final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - -final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - -final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', -); -final extractedData = res.first.arguments; -print(extractedData); -// { -// people: [ -// { -// name: Alex, -// height: 152, -// hair_color: blonde -// }, -// { -// name: Claudia, -// height: 183, -// hair_color: orange -// } -// ] -// } -``` - -### RAG (Retrieval-Augmented Generation) pipeline - -We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. - -```dart -// 1. Create a vector store and add documents to it -final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), -); -await vectorStore.addDocuments( - documents: [ - Document(pageContent: 'LangChain was created by Harrison'), - Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), - ], -); - -// 2. Construct a RAG prompt template -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), - (ChatMessageType.human, '{question}'), -]); - -// 3. Define the model to use and the vector store retriever -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3.1'), -); -final retriever = vectorStore.asRetriever( - defaultOptions: VectorStoreRetrieverOptions( - searchType: VectorStoreSimilaritySearch(k: 1), - ), -); - -// 4. Create a Runnable that combines the retrieved documents into a single string -final docCombiner = Runnable.mapInput, String>((docs) { - return docs.map((final d) => d.pageContent).join('\n'); -}); - -// 4. Define the RAG pipeline -final chain = Runnable.fromMap({ - 'context': retriever.pipe(docCombiner), - 'question': Runnable.passthrough(), -}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); - -// 5. Run the pipeline -final res = await chain.invoke('Who created LangChain.dart?'); -print(res); -// Based on the context provided, David created LangChain.dart. -``` diff --git a/docs_v2/docs/05-integrations/open_router.md b/docs_v2/docs/05-integrations/open_router.md deleted file mode 100644 index c2d63555..00000000 --- a/docs_v2/docs/05-integrations/open_router.md +++ /dev/null @@ -1,157 +0,0 @@ -# OpenRouter - -[OpenRouter](https://openrouter.ai/) offers a unified OpenAI-compatible API for a broad range of [models](https://openrouter.ai/models). - -You can also let users pay for their own models via their [OAuth PKCE](https://openrouter.ai/docs#oauth) flow. - -You can consume OpenRouter API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. - -The only difference is that you need to change the base URL to `https://openrouter.ai/api/v1`: - -```dart -final chatModel = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/mistral-small', - ), -); -``` - -OpenRouter allows you to specify an optional `HTTP-Referer` header to identify your app and make it discoverable to users on openrouter.ai. You can also include an optional `X-Title` header to set or modify the title of your app. - -```dart - final chatModel = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - headers: { - 'HTTP-Referer': 'com.myapp', - 'X-Title': 'OpenRouterTest', - }, - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/mistral-small', - ), -); -``` - -## Invoke - -```dart -final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that translates {input_language} to {output_language}.', - ), - (ChatMessageType.human, '{text}'), -]); - -final chatModel = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/mistral-small', - ), -); - -final chain = promptTemplate | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'aime la programmation.' -``` - -## Stream - -```dart -final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces or commas', - ), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/mistral-small', - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '9'}); -await stream.forEach(print); -// 123 -// 456789 -``` - -## Tool calling - -OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). - -Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. - -In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. - -```dart -final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; -const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, - }, - 'required': ['location', 'punchline'], - }, -); -final promptTemplate = ChatPromptTemplate.fromTemplate( - 'tell me a long joke about {foo}', -); -final chat = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o', - tools: [tool], - toolChoice: ChatToolChoice.forced(name: 'joke'), - ), -); -final outputParser = ToolsOutputParser(); - -final chain = promptTemplate.pipe(chat).pipe(outputParser); - -final stream = chain.stream({'foo': 'bears'}); -await for (final chunk in stream) { - final args = chunk.first.arguments; - print(args); -} -// {} -// {setup: } -// {setup: Why don't} -// {setup: Why don't bears} -// {setup: Why don't bears like fast food} -// {setup: Why don't bears like fast food?, punchline: } -// {setup: Why don't bears like fast food?, punchline: Because} -// {setup: Why don't bears like fast food?, punchline: Because they can't} -// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} -``` diff --git a/docs_v2/docs/05-integrations/openai.md b/docs_v2/docs/05-integrations/openai.md deleted file mode 100644 index 6b3ccbbc..00000000 --- a/docs_v2/docs/05-integrations/openai.md +++ /dev/null @@ -1,372 +0,0 @@ -# OpenAI - -This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). - -OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). - -> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. - -## Setup - -To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. - -### Credentials - -Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). - -### Installation - -The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: - -```yaml -dart pub add langchain_openai -``` - -## Usage - -### Instantiation - -Now we can instantiate our model object and generate chat completions: - -```dart -final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - -final chatModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o' - temperature: 0, - // ...other options - ), -); -``` - -If you are using a proxy, you can override the base URL, headers, and other options: - -```dart -final client = ChatOpenAI( - baseUrl: 'https://my-proxy.com', - headers: {'x-my-proxy-header': 'value'}, -); -``` - -### Invocation - -Now you can generate completions by calling the `invoke` method: - -```dart -final messages = [ - ChatMessage.system('You are a helpful assistant that translates English to French.'), - ChatMessage.humanText('I love programming.'), -]; -final prompt = PromptValue.chat(messages); -final res = await llm.invoke(prompt); -// -> 'J'adore la programmation.' -``` - -### Chaining - -We can chain our model with a prompt template or output parser to create a more complex pipeline: - -```dart -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, '{text}'), -]); - -final chain = promptTemplate | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'adore la programmation.' -``` - -### Streaming - -OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. - -```dart -final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces or commas', - ), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chat = ChatOpenAI(apiKey: openaiApiKey); - -final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '9'}); -await stream.forEach(print); -// 123 -// 456 -// 789 -``` - -### Multimodal support - -OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. - -You can send the image as a base64-encoded string: - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system('You are a helpful assistant.'), - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image - ), - ]), - ), -]); -``` - -Or you can send the URL where the image is hosted: - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system('You are a helpful assistant.'), - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', - ), - ]), - ), -]); -``` - -### Tool calling - -OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. - - -```dart -const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, -); - -final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'gpt-4o' - temperature: 0, - tools: [tool], - ), -); - -final res = await chatModel.invoke( - PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), -); -print(res.output.toolCalls); -// [AIChatMessageToolCall{ -// id: a621064b-03b3-4ca6-8278-f37504901034, -// name: get_current_weather, -// arguments: {location: Boston, US}, -// }, -// AIChatMessageToolCall{ -// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, -// name: get_current_weather, -// arguments: {location: Madrid, ES}, -// }] -``` - -Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. - -You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: - -```dart -const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, - }, - 'required': ['location', 'punchline'], - }, -); -final promptTemplate = ChatPromptTemplate.fromTemplate( - 'tell me a long joke about {foo}', -); -final chat = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - tools: [tool], - toolChoice: ChatToolChoice.forced(name: 'joke'), - ), -); -final outputParser = ToolsOutputParser(); - -final chain = promptTemplate.pipe(chat).pipe(outputParser); - -final stream = chain.stream({'foo': 'bears'}); -await for (final chunk in stream) { - final args = chunk.first.arguments; - print(args); -} -// {} -// {setup: } -// {setup: Why don't} -// {setup: Why don't bears} -// {setup: Why don't bears like fast food} -// {setup: Why don't bears like fast food?, punchline: } -// {setup: Why don't bears like fast food?, punchline: Because} -// {setup: Why don't bears like fast food?, punchline: Because they can't} -// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} -``` - -### Structured Outputs - -[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system( - 'Extract the data of any companies mentioned in the ' - 'following statement. Return a JSON list.', - ), - ChatMessage.humanText( - 'Google was founded in the USA, while Deepmind was founded in the UK', - ), -]); -final chatModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o', - temperature: 0, - responseFormat: ChatOpenAIResponseFormat.jsonSchema( - ChatOpenAIJsonSchema( - name: 'Companies', - description: 'A list of companies', - strict: true, - schema: { - 'type': 'object', - 'properties': { - 'companies': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'origin': {'type': 'string'}, - }, - 'additionalProperties': false, - 'required': ['name', 'origin'], - }, - }, - }, - 'additionalProperties': false, - 'required': ['companies'], - }, - ), - ), - ), -); - -final res = await chatModel.invoke(prompt); -// { -// "companies": [ -// { -// "name": "Google", -// "origin": "USA" -// }, -// { -// "name": "Deepmind", -// "origin": "UK" -// } -// ] -// } -``` - -When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. - -### JSON mode - -When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. - -> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. - -```dart -final prompt = PromptValue.chat([ - ChatMessage.system( - "Extract the 'name' and 'origin' of any companies mentioned in the " - 'following statement. Return a JSON list.', - ), - ChatMessage.humanText( - 'Google was founded in the USA, while Deepmind was founded in the UK', - ), -]); -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', - temperature: 0, - responseFormat: ChatOpenAIResponseFormat.jsonObject, - ), -); -final chain = llm.pipe(JsonOutputParser()); -final res = await chain.invoke(prompt); -print(res); -// { -// "companies": [ -// { -// "name": "Google", -// "origin": "USA" -// }, -// { -// "name": "Deepmind", -// "origin": "UK" -// } -// ] -// } -``` - -### Fine-tuning - -You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. - -This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: - -```dart -final chatModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' - ), -); -``` - -## API reference - -For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs_v2/docs/05-integrations/prem.md b/docs_v2/docs/05-integrations/prem.md deleted file mode 100644 index 65258f7c..00000000 --- a/docs_v2/docs/05-integrations/prem.md +++ /dev/null @@ -1,24 +0,0 @@ -# Prem App - -You can easily run local models using [Prem app](https://www.premai.io/#PremApp). -It creates a local server that exposes a REST API with the same interface as -the OpenAI API. - -```dart -const localUrl = 'http://localhost:8000'; // Check Prem app for the actual URL -final chat = ChatOpenAI(baseUrl: localUrl); - -const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; -final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); -const humanTemplate = '{text}'; -final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); - -final chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]); -final formattedPrompt = chatPrompt.formatPrompt({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.' -}).toChatMessages(); - -final output = chat.predictMessages(formattedPrompt); -``` diff --git a/docs_v2/docs/05-integrations/together_ai.md b/docs_v2/docs/05-integrations/together_ai.md deleted file mode 100644 index 10567455..00000000 --- a/docs_v2/docs/05-integrations/together_ai.md +++ /dev/null @@ -1,84 +0,0 @@ -# Together AI - -[Together AI](https://www.together.ai) offers a unified OpenAI-compatible API for a broad range of [models](https://api.together.xyz/playground) running serverless or on your own dedicated instances. - -It also allows to fine-tune models on your own data or train new models from scratch. - -You can consume Together AI API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. - -The only difference is that you need to change the base URL to `https://api.together.xyz/v1`: - -```dart -final chatModel = ChatOpenAI( - apiKey: togetherAiApiKey, - baseUrl: 'https://api.together.xyz/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/Mistral-7B-Instruct-v0.2', - ), -); -``` - -## Invoke - -```dart -final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that translates {input_language} to {output_language}.', - ), - (ChatMessageType.human, '{text}'), -]); - -final chatModel = ChatOpenAI( - apiKey: togetherAiApiKey, - baseUrl: 'https://api.together.xyz/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/Mistral-7B-Instruct-v0.2', - ), -); - -final chain = promptTemplate | chatModel | StringOutputParser(); - -final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', -}); -print(res); -// -> 'J'aime programmer' -``` - -## Stream - -```dart -final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; - -final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces or commas', - ), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), -]); - -final chatModel = ChatOpenAI( - apiKey: togetherAiApiKey, - baseUrl: 'https://api.together.xyz/v1', - defaultOptions: const ChatOpenAIOptions( - model: 'mistralai/Mistral-7B-Instruct-v0.2', - ), -); - -final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); - -final stream = chain.stream({'max_num': '9'}); -await stream.forEach(print); -// 1 -// 2 -// 3 -// ... -// 9 -``` diff --git a/docs_v2/docs/05-integrations/tools/index.mdx b/docs_v2/docs/05-integrations/tools/index.mdx deleted file mode 100644 index 211b41de..00000000 --- a/docs_v2/docs/05-integrations/tools/index.mdx +++ /dev/null @@ -1,5 +0,0 @@ ---- -sidebar_position: 0 -index: auto ---- -# Tools \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/tools/tavily_search.md b/docs_v2/docs/05-integrations/tools/tavily_search.md deleted file mode 100644 index 2f3d461d..00000000 --- a/docs_v2/docs/05-integrations/tools/tavily_search.md +++ /dev/null @@ -1,13 +0,0 @@ -# Tavily Search - -[Tavily's Search API](https://tavily.com) is a search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed. - -## Setup - -The integration lives in the `langchain-community` package. We also need to install the `tavily-python` package itself. - -```bash -dart pub add langchain langchain_community -``` - -We also need to set our Tavily API key. \ No newline at end of file diff --git a/docs_v2/docusaurus.config.js b/docs_v2/docusaurus.config.js deleted file mode 100644 index 1376cddc..00000000 --- a/docs_v2/docusaurus.config.js +++ /dev/null @@ -1,130 +0,0 @@ -// @ts-check -// `@type` JSDoc annotations allow editor autocompletion and type checking -// (when paired with `@ts-check`). -// There are various equivalent ways to declare your Docusaurus config. -// See: https://docusaurus.io/docs/api/docusaurus-config - -import { themes as prismThemes } from 'prism-react-renderer'; - -/** @type {import('@docusaurus/types').Config} */ -const config = { - title: 'LangChain.dart', - tagline: 'LangChain.dart Docs', - favicon: 'img/favicon.ico', - - // Set the production url of your site here - url: 'https://beta.langchaindart.dev/', - // Set the // pathname under which your site is served - // For GitHub pages deployment, it is often '//' - baseUrl: '/', - - // GitHub pages deployment config. - // If you aren't using GitHub pages, you don't need these. - organizationName: 'davidmigloz', // Usually your GitHub org/user name. - projectName: 'langchain_dart', // Usually your repo name. - - onBrokenLinks: 'warn', - onBrokenMarkdownLinks: 'warn', - - // Even if you don't use internationalization, you can use this field to set - // useful metadata like html lang. For example, if your site is Chinese, you - // may want to replace "en" with "zh-Hans". - i18n: { - defaultLocale: 'en', - locales: ['en'], - }, - - presets: [ - [ - 'classic', - /** @type {import('@docusaurus/preset-classic').Options} */ - ({ - docs: { - sidebarPath: './sidebars.js', - // Please change this to your repo. - // Remove this to remove the "edit this page" links. - editUrl: - 'https://github.com/davidmigloz/langchain_dart/tree/main/docs_v2/', - }, - theme: { - customCss: './src/css/custom.css', - }, - }), - ], - ], - - themeConfig: - /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ - ({ - // Replace with your project's social card - image: 'img/langchain.dart.png', - navbar: { - title: 'LangChain.dart', - logo: { - alt: 'LangChain Logo', - src: 'img/favicon.ico', - }, - items: [ - { - to: '/docs/integrations', - position: 'left', - label: 'Integrations', - }, - {to: 'https://blog.langchaindart.dev/blog', label: 'Blog', position: 'left'}, - { - href: 'https://github.com/davidmigloz/langchain_dart/', - label: 'GitHub', - position: 'right', - }, - ], - }, - footer: { - style: 'dark', - links: [ - { - title: 'Docs', - items: [ - { - label: 'Tutorial', - to: '/docs/intro', - }, - ], - }, - { - title: 'Community', - items: [ - { - label: 'Discord', - href: 'https://discord.gg/x4qbhqecVR', - }, - { - label: 'pub.dev', - href: 'https://pub.dev/packages/langchain', - }, - ], - }, - { - title: 'More', - items: [ - { - label: 'Blog', - to: '/blog', - }, - { - label: 'GitHub', - href: 'https://github.com/davidmigloz/langchain_dart', - }, - ], - }, - ], - copyright: `Made with 💙 by the LangChain.dart Community`, - }, - prism: { - theme: prismThemes.vsLight, - darkTheme: prismThemes.vsDark, - additionalLanguages: ['yaml','dart','bash'], - }, - }), -}; - -export default config; diff --git a/docs_v2/firebase.json b/docs_v2/firebase.json deleted file mode 100644 index 340ed5b7..00000000 --- a/docs_v2/firebase.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "hosting": { - "public": "build", - "ignore": [ - "firebase.json", - "**/.*", - "**/node_modules/**" - ], - "rewrites": [ - { - "source": "**", - "destination": "/index.html" - } - ] - } -} diff --git a/docs_v2/package-lock.json b/docs_v2/package-lock.json deleted file mode 100644 index ebb2ff9e..00000000 --- a/docs_v2/package-lock.json +++ /dev/null @@ -1,14678 +0,0 @@ -{ - "name": "langchain-dart", - "version": "0.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "langchain-dart", - "version": "0.0.0", - "dependencies": { - "@docusaurus/core": "^3.5.2", - "@docusaurus/preset-classic": "^3.5.2", - "@mdx-js/react": "^3.0.0", - "clsx": "^2.0.0", - "prism-react-renderer": "^2.3.0", - "react": "^18.0.0", - "react-dom": "^18.0.0" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "^3.5.2", - "@docusaurus/types": "^3.5.2" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@algolia/autocomplete-core": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", - "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", - "dependencies": { - "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", - "@algolia/autocomplete-shared": "1.9.3" - } - }, - "node_modules/@algolia/autocomplete-plugin-algolia-insights": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", - "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", - "dependencies": { - "@algolia/autocomplete-shared": "1.9.3" - }, - "peerDependencies": { - "search-insights": ">= 1 < 3" - } - }, - "node_modules/@algolia/autocomplete-preset-algolia": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", - "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", - "dependencies": { - "@algolia/autocomplete-shared": "1.9.3" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/autocomplete-shared": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", - "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz", - "integrity": "sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==", - "dependencies": { - "@algolia/cache-common": "4.24.0" - } - }, - "node_modules/@algolia/cache-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.24.0.tgz", - "integrity": "sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==" - }, - "node_modules/@algolia/cache-in-memory": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz", - "integrity": "sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==", - "dependencies": { - "@algolia/cache-common": "4.24.0" - } - }, - "node_modules/@algolia/client-account": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.24.0.tgz", - "integrity": "sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-account/node_modules/@algolia/client-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", - "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", - "dependencies": { - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-account/node_modules/@algolia/client-search": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", - "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-analytics": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", - "integrity": "sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-analytics/node_modules/@algolia/client-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", - "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", - "dependencies": { - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-analytics/node_modules/@algolia/client-search": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", - "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-common": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.5.3.tgz", - "integrity": "sha512-3rdSdreBL2LGYu4DWmUGlMhaGy1vy36Xp42LdbTFsW/y3bhW5viptMHI5A3PKT0hPEMZUn+te1iM/EWvLUuVGQ==", - "peer": true, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-personalization": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", - "integrity": "sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-personalization/node_modules/@algolia/client-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", - "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", - "dependencies": { - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-search": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.5.3.tgz", - "integrity": "sha512-qrokD+uoNxchbiF9aP8niQd/9SZ6BgYg4WaesFaubHhr9DFvwGm4IePEMha8vQcc3fSsY6uL+gOtKB3J6RF0NQ==", - "peer": true, - "dependencies": { - "@algolia/client-common": "5.5.3", - "@algolia/requester-browser-xhr": "5.5.3", - "@algolia/requester-fetch": "5.5.3", - "@algolia/requester-node-http": "5.5.3" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/events": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", - "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" - }, - "node_modules/@algolia/logger-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.24.0.tgz", - "integrity": "sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==" - }, - "node_modules/@algolia/logger-console": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.24.0.tgz", - "integrity": "sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==", - "dependencies": { - "@algolia/logger-common": "4.24.0" - } - }, - "node_modules/@algolia/recommend": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.24.0.tgz", - "integrity": "sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.24.0", - "@algolia/cache-common": "4.24.0", - "@algolia/cache-in-memory": "4.24.0", - "@algolia/client-common": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/logger-common": "4.24.0", - "@algolia/logger-console": "4.24.0", - "@algolia/requester-browser-xhr": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/requester-node-http": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/recommend/node_modules/@algolia/client-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", - "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", - "dependencies": { - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/recommend/node_modules/@algolia/client-search": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", - "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/recommend/node_modules/@algolia/requester-browser-xhr": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", - "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", - "dependencies": { - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/@algolia/recommend/node_modules/@algolia/requester-node-http": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", - "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", - "dependencies": { - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/@algolia/requester-browser-xhr": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.5.3.tgz", - "integrity": "sha512-LsfUPokiXEpvlYF7SwNjyyjkUX7IoW7oIhH6WkDUD4PCfEZkFbVplGQA0UrCiWOAbpb25P7mmP6+ldwjwqW6Kg==", - "peer": true, - "dependencies": { - "@algolia/client-common": "5.5.3" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/requester-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", - "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==" - }, - "node_modules/@algolia/requester-fetch": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.5.3.tgz", - "integrity": "sha512-RKaliEFHtVeD/fMxwrApkcI6ZxR+mU6pZna29r3NwVMpCXTJWWtlMpQmbr1RHzUsaAlpfv9pfGJN4nYPE8XWEg==", - "peer": true, - "dependencies": { - "@algolia/client-common": "5.5.3" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/requester-node-http": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.5.3.tgz", - "integrity": "sha512-2wU+HlTVrVce7BMW2b3Gd62btk8B0jBbuKYYzu3OFeBD/aZa88eHABtjcjQCdw3x+wvkIPEc56UsZx9eHYLebg==", - "peer": true, - "dependencies": { - "@algolia/client-common": "5.5.3" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/transporter": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.24.0.tgz", - "integrity": "sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==", - "dependencies": { - "@algolia/cache-common": "4.24.0", - "@algolia/logger-common": "4.24.0", - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/@ampproject/remapping": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", - "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", - "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", - "dependencies": { - "@babel/highlight": "^7.24.7", - "picocolors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.7.tgz", - "integrity": "sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.7.tgz", - "integrity": "sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==", - "dependencies": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.24.7", - "@babel/generator": "^7.24.7", - "@babel/helper-compilation-targets": "^7.24.7", - "@babel/helper-module-transforms": "^7.24.7", - "@babel/helpers": "^7.24.7", - "@babel/parser": "^7.24.7", - "@babel/template": "^7.24.7", - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7", - "convert-source-map": "^2.0.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.3", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/generator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", - "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", - "dependencies": { - "@babel/types": "^7.24.7", - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25", - "jsesc": "^2.5.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz", - "integrity": "sha512-BaDeOonYvhdKw+JoMVkAixAAJzG2jVPIwWoKBPdYuY9b452e2rPuI9QPYh3KpofZ3pW2akOmwZLOiOsHMiqRAg==", - "dependencies": { - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz", - "integrity": "sha512-xZeCVVdwb4MsDBkkyZ64tReWYrLRHlMN72vP7Bdm3OUOuyFZExhsHUUnuWnm2/XOlAJzR0LfPpB56WXZn0X/lA==", - "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.24.7.tgz", - "integrity": "sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==", - "dependencies": { - "@babel/compat-data": "^7.24.7", - "@babel/helper-validator-option": "^7.24.7", - "browserslist": "^4.22.2", - "lru-cache": "^5.1.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.7.tgz", - "integrity": "sha512-kTkaDl7c9vO80zeX1rJxnuRpEsD5tA81yh11X1gQo+PhSti3JS+7qeZo9U4RHobKRiFPKaGK3svUAeb8D0Q7eg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-function-name": "^7.24.7", - "@babel/helper-member-expression-to-functions": "^7.24.7", - "@babel/helper-optimise-call-expression": "^7.24.7", - "@babel/helper-replace-supers": "^7.24.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", - "@babel/helper-split-export-declaration": "^7.24.7", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.24.7.tgz", - "integrity": "sha512-03TCmXy2FtXJEZfbXDTSqq1fRJArk7lX9DOFC/47VthYcxyIOx+eXQmdo6DOQvrbpIix+KfXwvuXdFDZHxt+rA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "regexpu-core": "^5.3.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", - "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.22.6", - "@babel/helper-plugin-utils": "^7.22.5", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", - "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", - "dependencies": { - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", - "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", - "dependencies": { - "@babel/template": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", - "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", - "dependencies": { - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.7.tgz", - "integrity": "sha512-LGeMaf5JN4hAT471eJdBs/GK1DoYIJ5GCtZN/EsL6KUiiDZOvO/eKE11AMZJa2zP4zk4qe9V2O/hxAmkRc8p6w==", - "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", - "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", - "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.7.tgz", - "integrity": "sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-module-imports": "^7.24.7", - "@babel/helper-simple-access": "^7.24.7", - "@babel/helper-split-export-declaration": "^7.24.7", - "@babel/helper-validator-identifier": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz", - "integrity": "sha512-jKiTsW2xmWwxT1ixIdfXUZp+P5yURx2suzLZr5Hi64rURpDYdMW0pv+Uf17EYk2Rd428Lx4tLsnjGJzYKDM/6A==", - "dependencies": { - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.24.8", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", - "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.24.7.tgz", - "integrity": "sha512-9pKLcTlZ92hNZMQfGCHImUpDOlAgkkpqalWEeftW5FBya75k8Li2ilerxkM/uBEj01iBZXcCIB/bwvDYgWyibA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-wrap-function": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.24.7.tgz", - "integrity": "sha512-qTAxxBM81VEyoAY0TtLrx1oAEJc09ZK67Q9ljQToqCnA+55eNwCORaxlKyu+rNfX86o8OXRUSNUnrtsAZXM9sg==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-member-expression-to-functions": "^7.24.7", - "@babel/helper-optimise-call-expression": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", - "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", - "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz", - "integrity": "sha512-IO+DLT3LQUElMbpzlatRASEyQtfhSE0+m465v++3jyyXeBTBUjtVZg28/gHeV5mrTJqvEKhKroBGAvhW+qPHiQ==", - "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", - "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", - "dependencies": { - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-string-parser": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", - "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", - "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.7.tgz", - "integrity": "sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.24.7.tgz", - "integrity": "sha512-N9JIYk3TD+1vq/wn77YnJOqMtfWhNewNE+DJV4puD2X7Ew9J4JvrzrFDfTfyv5EgEXVy9/Wt8QiOErzEmv5Ifw==", - "dependencies": { - "@babel/helper-function-name": "^7.24.7", - "@babel/template": "^7.24.7", - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.7.tgz", - "integrity": "sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==", - "dependencies": { - "@babel/template": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", - "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.24.7", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/parser": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", - "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==", - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.7.tgz", - "integrity": "sha512-TiT1ss81W80eQsN+722OaeQMY/G4yTb4G9JrqeiDADs3N8lbPMGldWi9x8tyqCW5NLx1Jh2AvkE6r6QvEltMMQ==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.7.tgz", - "integrity": "sha512-unaQgZ/iRu/By6tsjMZzpeBZjChYfLYry6HrEXPoz3KmfF0sVBQ1l8zKMQ4xRGLWVsjuvB8nQfjNP/DcfEOCsg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz", - "integrity": "sha512-+izXIbke1T33mY4MSNnrqhPXDz01WYhEf3yF5NbnUtkiNnm+XBZJl3kNfoK6NKmYlz/D07+l2GWVK/QfDkNCuQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", - "@babel/plugin-transform-optional-chaining": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.7.tgz", - "integrity": "sha512-utA4HuR6F4Vvcr+o4DnjL8fCOlgRFGbeeBEGNg3ZTrLFw6VWG5XmUrvcQ0FjIYMU2ST4XcR2Wsp7t9qOAPnxMg==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0-placeholder-for-preset-env.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", - "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.7.tgz", - "integrity": "sha512-Ec3NRUMoi8gskrkBe3fNmEQfxDvY8bgfQpz6jlk/41kX9eUjvpyqWU7PBP/pLAvMaSQjbMNKJmvX57jP+M6bPg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-attributes": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.7.tgz", - "integrity": "sha512-hbX+lKKeUMGihnK8nvKqmXBInriT3GVjzXKFriV3YC6APGxMbP8RZNFwy91+hocLXq90Mta+HshoB31802bb8A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-meta": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", - "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", - "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.7.tgz", - "integrity": "sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-unicode-sets-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", - "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz", - "integrity": "sha512-Dt9LQs6iEY++gXUwY03DNFat5C2NbO48jj+j/bSAz6b3HgPs39qcPiYt77fDObIcFwj3/C2ICX9YMwGflUoSHQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-generator-functions": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.7.tgz", - "integrity": "sha512-o+iF77e3u7ZS4AoAuJvapz9Fm001PuD2V3Lp6OSE4FYQke+cSewYtnek+THqGRWyQloRCyvWL1OkyfNEl9vr/g==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-remap-async-to-generator": "^7.24.7", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz", - "integrity": "sha512-SQY01PcJfmQ+4Ash7NE+rpbLFbmqA2GPIgqzxfFTL4t1FKRq4zTms/7htKpoCUI9OcFYgzqfmCdH53s6/jn5fA==", - "dependencies": { - "@babel/helper-module-imports": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-remap-async-to-generator": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz", - "integrity": "sha512-yO7RAz6EsVQDaBH18IDJcMB1HnrUn2FJ/Jslc/WtPPWcjhpUJXU/rjbwmluzp7v/ZzWcEhTMXELnnsz8djWDwQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.7.tgz", - "integrity": "sha512-Nd5CvgMbWc+oWzBsuaMcbwjJWAcp5qzrbg69SZdHSP7AMY0AbWFqFO0WTFCA1jxhMCwodRwvRec8k0QUbZk7RQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-properties": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.7.tgz", - "integrity": "sha512-vKbfawVYayKcSeSR5YYzzyXvsDFWU2mD8U5TFeXtbCPLFUqe7GyCgvO6XDHzje862ODrOwy6WCPmKeWHbCFJ4w==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-static-block": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz", - "integrity": "sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.7.tgz", - "integrity": "sha512-CFbbBigp8ln4FU6Bpy6g7sE8B/WmCmzvivzUC6xDAdWVsjYTXijpuuGJmYkAaoWAzcItGKT3IOAbxRItZ5HTjw==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-compilation-targets": "^7.24.7", - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-function-name": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-replace-supers": "^7.24.7", - "@babel/helper-split-export-declaration": "^7.24.7", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz", - "integrity": "sha512-25cS7v+707Gu6Ds2oY6tCkUwsJ9YIDbggd9+cu9jzzDgiNq7hR/8dkzxWfKWnTic26vsI3EsCXNd4iEB6e8esQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/template": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.7.tgz", - "integrity": "sha512-19eJO/8kdCQ9zISOf+SEUJM/bAUIsvY3YDnXZTupUCQ8LgrWnsG/gFB9dvXqdXnRXMAM8fvt7b0CBKQHNGy1mw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz", - "integrity": "sha512-ZOA3W+1RRTSWvyqcMJDLqbchh7U4NRGqwRfFSVbOLS/ePIP4vHB5e8T8eXcuqyN1QkgKyj5wuW0lcS85v4CrSw==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz", - "integrity": "sha512-JdYfXyCRihAe46jUIliuL2/s0x0wObgwwiGxw/UbgJBr20gQBThrokO4nYKgWkD7uBaqM7+9x5TU7NkExZJyzw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dynamic-import": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz", - "integrity": "sha512-sc3X26PhZQDb3JhORmakcbvkeInvxz+A8oda99lj7J60QRuPZvNAk9wQlTBS1ZynelDrDmTU4pw1tyc5d5ZMUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz", - "integrity": "sha512-Rqe/vSc9OYgDajNIK35u7ot+KeCoetqQYFXM4Epf7M7ez3lWlOjrDjrwMei6caCVhfdw+mIKD4cgdGNy5JQotQ==", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-export-namespace-from": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz", - "integrity": "sha512-v0K9uNYsPL3oXZ/7F9NNIbAj2jv1whUEtyA6aujhekLs56R++JDQuzRcP2/z4WX5Vg/c5lE9uWZA0/iUoFhLTA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz", - "integrity": "sha512-wo9ogrDG1ITTTBsy46oGiN1dS9A7MROBTcYsfS8DtsImMkHk9JXJ3EWQM6X2SUw4x80uGPlwj0o00Uoc6nEE3g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.7.tgz", - "integrity": "sha512-U9FcnA821YoILngSmYkW6FjyQe2TyZD5pHt4EVIhmcTkrJw/3KqcrRSxuOo5tFZJi7TE19iDyI1u+weTI7bn2w==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.24.7", - "@babel/helper-function-name": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-json-strings": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz", - "integrity": "sha512-2yFnBGDvRuxAaE/f0vfBKvtnvvqU8tGpMHqMNpTN2oWMKIR3NqFkjaAgGwawhqK/pIN2T3XdjGPdaG0vDhOBGw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-json-strings": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.7.tgz", - "integrity": "sha512-vcwCbb4HDH+hWi8Pqenwnjy+UiklO4Kt1vfspcQYFhJdpthSnW8XvWGyDZWKNVrVbVViI/S7K9PDJZiUmP2fYQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-logical-assignment-operators": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz", - "integrity": "sha512-4D2tpwlQ1odXmTEIFWy9ELJcZHqrStlzK/dAOWYyxX3zT0iXQB6banjgeOJQXzEc4S0E0a5A+hahxPaEFYftsw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz", - "integrity": "sha512-T/hRC1uqrzXMKLQ6UCwMT85S3EvqaBXDGf0FaMf4446Qx9vKwlghvee0+uuZcDUCZU5RuNi4781UQ7R308zzBw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz", - "integrity": "sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==", - "dependencies": { - "@babel/helper-module-transforms": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.7.tgz", - "integrity": "sha512-iFI8GDxtevHJ/Z22J5xQpVqFLlMNstcLXh994xifFwxxGslr2ZXXLWgtBeLctOD63UFDArdvN6Tg8RFw+aEmjQ==", - "dependencies": { - "@babel/helper-module-transforms": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-simple-access": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.7.tgz", - "integrity": "sha512-GYQE0tW7YoaN13qFh3O1NCY4MPkUiAH3fiF7UcV/I3ajmDKEdG3l+UOcbAm4zUE3gnvUU+Eni7XrVKo9eO9auw==", - "dependencies": { - "@babel/helper-hoist-variables": "^7.24.7", - "@babel/helper-module-transforms": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-validator-identifier": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz", - "integrity": "sha512-3aytQvqJ/h9z4g8AsKPLvD4Zqi2qT+L3j7XoFFu1XBlZWEl2/1kWnhmAbxpLgPrHSY0M6UA02jyTiwUVtiKR6A==", - "dependencies": { - "@babel/helper-module-transforms": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz", - "integrity": "sha512-/jr7h/EWeJtk1U/uz2jlsCioHkZk1JJZVcc8oQsJ1dUlaJD83f4/6Zeh2aHt9BIFokHIsSeDfhUmju0+1GPd6g==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz", - "integrity": "sha512-RNKwfRIXg4Ls/8mMTza5oPF5RkOW8Wy/WgMAp1/F1yZ8mMbtwXW+HDoJiOsagWrAhI5f57Vncrmr9XeT4CVapA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz", - "integrity": "sha512-Ts7xQVk1OEocqzm8rHMXHlxvsfZ0cEF2yomUqpKENHWMF4zKk175Y4q8H5knJes6PgYad50uuRmt3UJuhBw8pQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-numeric-separator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz", - "integrity": "sha512-e6q1TiVUzvH9KRvicuxdBTUj4AdKSRwzIyFFnfnezpCfP2/7Qmbb8qbU2j7GODbl4JMkblitCQjKYUaX/qkkwA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-rest-spread": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz", - "integrity": "sha512-4QrHAr0aXQCEFni2q4DqKLD31n2DL+RxcwnNjDFkSG0eNQ/xCavnRkfCUjsyqGC2OviNJvZOF/mQqZBw7i2C5Q==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz", - "integrity": "sha512-A/vVLwN6lBrMFmMDmPPz0jnE6ZGx7Jq7d6sT/Ev4H65RER6pZ+kczlf1DthF5N0qaPHBsI7UXiE8Zy66nmAovg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-replace-supers": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-optional-catch-binding": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz", - "integrity": "sha512-uLEndKqP5BfBbC/5jTwPxLh9kqPWWgzN/f8w6UwAIirAEqiIVJWWY312X72Eub09g5KF9+Zn7+hT7sDxmhRuKA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-optional-chaining": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.7.tgz", - "integrity": "sha512-tK+0N9yd4j+x/4hxF3F0e0fu/VdcxU18y5SevtyM/PCFlQvXbR0Zmlo2eBrKtVipGNFzpq56o8WsIIKcJFUCRQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz", - "integrity": "sha512-yGWW5Rr+sQOhK0Ot8hjDJuxU3XLRQGflvT4lhlSY0DFvdb3TwKaY26CJzHtYllU0vT9j58hc37ndFPsqT1SrzA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-private-methods": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.7.tgz", - "integrity": "sha512-COTCOkG2hn4JKGEKBADkA8WNb35TGkkRbI5iT845dB+NyqgO8Hn+ajPbSnIQznneJTa3d30scb6iz/DhH8GsJQ==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-private-property-in-object": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz", - "integrity": "sha512-9z76mxwnwFxMyxZWEgdgECQglF2Q7cFLm0kMf8pGwt+GSJsY0cONKj/UuO4bOH0w/uAel3ekS4ra5CEAyJRmDA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-create-class-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz", - "integrity": "sha512-EMi4MLQSHfd2nrCqQEWxFdha2gBCqU4ZcCng4WBGZ5CJL4bBRW0ptdqqDdeirGZcpALazVVNJqRmsO8/+oNCBA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-constant-elements": { - "version": "7.25.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.1.tgz", - "integrity": "sha512-SLV/giH/V4SmloZ6Dt40HjTGTAIkxn33TVIHxNGNvo8ezMhrxBkzisj4op1KZYPIOHFLqhv60OHvX+YRu4xbmQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.8" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.7.tgz", - "integrity": "sha512-H/Snz9PFxKsS1JLI4dJLtnJgCJRoo0AUm3chP6NYr+9En1JMKloheEiLIhlp5MDVznWo+H3AAC1Mc8lmUEpsgg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.24.7.tgz", - "integrity": "sha512-+Dj06GDZEFRYvclU6k4bme55GKBEWUmByM/eoKuqg4zTNQHiApWRhQph5fxQB2wAEFvRzL1tOEj1RJ19wJrhoA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-module-imports": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-jsx": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.24.7.tgz", - "integrity": "sha512-QG9EnzoGn+Qar7rxuW+ZOsbWOt56FvvI93xInqsZDC5fsekx1AlIO4KIJ5M+D0p0SqSH156EpmZyXq630B8OlQ==", - "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.7.tgz", - "integrity": "sha512-PLgBVk3fzbmEjBJ/u8kFzOqS9tUeDjiaWud/rRym/yjCo/M9cASPlnrd2ZmmZpQT40fOOrvR8jh+n8jikrOhNA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.7.tgz", - "integrity": "sha512-lq3fvXPdimDrlg6LWBoqj+r/DEWgONuwjuOuQCSYgRroXDH/IdM1C0IZf59fL5cHLpjEH/O6opIRBbqv7ELnuA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "regenerator-transform": "^0.15.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz", - "integrity": "sha512-0DUq0pHcPKbjFZCfTss/pGkYMfy3vFWydkUBd9r0GHpIyfs2eCDENvqadMycRS9wZCXR41wucAfJHJmwA0UmoQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.7.tgz", - "integrity": "sha512-YqXjrk4C+a1kZjewqt+Mmu2UuV1s07y8kqcUf4qYLnoqemhR4gRQikhdAhSVJioMjVTu6Mo6pAbaypEA3jY6fw==", - "dependencies": { - "@babel/helper-module-imports": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "babel-plugin-polyfill-corejs2": "^0.4.10", - "babel-plugin-polyfill-corejs3": "^0.10.1", - "babel-plugin-polyfill-regenerator": "^0.6.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz", - "integrity": "sha512-KsDsevZMDsigzbA09+vacnLpmPH4aWjcZjXdyFKGzpplxhbeB4wYtury3vglQkg6KM/xEPKt73eCjPPf1PgXBA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz", - "integrity": "sha512-x96oO0I09dgMDxJaANcRyD4ellXFLLiWhuwDxKZX5g2rWP1bTPkBSwCYv96VDXVT1bD9aPj8tppr5ITIh8hBng==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz", - "integrity": "sha512-kHPSIJc9v24zEml5geKg9Mjx5ULpfncj0wRpYtxbvKyTtHCYDkVE3aHQ03FrpEo4gEe2vrJJS1Y9CJTaThA52g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz", - "integrity": "sha512-AfDTQmClklHCOLxtGoP7HkeMw56k1/bTQjwsfhL6pppo/M4TOBSq+jjBUBLmV/4oeFg4GWMavIl44ZeCtmmZTw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.7.tgz", - "integrity": "sha512-VtR8hDy7YLB7+Pet9IarXjg/zgCMSF+1mNS/EQEiEaUPoFXCVsHG64SIxcaaI2zJgRiv+YmgaQESUfWAdbjzgg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typescript": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.7.tgz", - "integrity": "sha512-iLD3UNkgx2n/HrjBesVbYX6j0yqn/sJktvbtKKgcaLIQ4bTTQ8obAypc1VpyHPD2y4Phh9zHOaAt8e/L14wCpw==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.24.7", - "@babel/helper-create-class-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/plugin-syntax-typescript": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz", - "integrity": "sha512-U3ap1gm5+4edc2Q/P+9VrBNhGkfnf+8ZqppY71Bo/pzZmXhhLdqgaUl6cuB07O1+AQJtCLfaOmswiNbSQ9ivhw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-property-regex": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz", - "integrity": "sha512-uH2O4OV5M9FZYQrwc7NdVmMxQJOCCzFeYudlZSzUAHRFeOujQefa92E74TQDVskNHCzOXoigEuoyzHDhaEaK5w==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz", - "integrity": "sha512-hlQ96MBZSAXUq7ltkjtu3FJCCSMx/j629ns3hA3pXnBXjanNP0LHi+JpPeA81zaWgVK1VGH95Xuy7u0RyQ8kMg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-sets-regex": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.7.tgz", - "integrity": "sha512-2G8aAvF4wy1w/AGZkemprdGMRg5o6zPNhbHVImRz3lss55TYCBd6xStN19rt8XJHq20sqV0JbyWjOWwQRwV/wg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.7.tgz", - "integrity": "sha512-1YZNsc+y6cTvWlDHidMBsQZrZfEFjRIo/BZCT906PMdzOyXtSLTgqGdrpcuTDCXyd11Am5uQULtDIcCfnTc8fQ==", - "dependencies": { - "@babel/compat-data": "^7.24.7", - "@babel/helper-compilation-targets": "^7.24.7", - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-validator-option": "^7.24.7", - "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.7", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.7", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.7", - "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.7", - "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.24.7", - "@babel/plugin-syntax-import-attributes": "^7.24.7", - "@babel/plugin-syntax-import-meta": "^7.10.4", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", - "@babel/plugin-transform-arrow-functions": "^7.24.7", - "@babel/plugin-transform-async-generator-functions": "^7.24.7", - "@babel/plugin-transform-async-to-generator": "^7.24.7", - "@babel/plugin-transform-block-scoped-functions": "^7.24.7", - "@babel/plugin-transform-block-scoping": "^7.24.7", - "@babel/plugin-transform-class-properties": "^7.24.7", - "@babel/plugin-transform-class-static-block": "^7.24.7", - "@babel/plugin-transform-classes": "^7.24.7", - "@babel/plugin-transform-computed-properties": "^7.24.7", - "@babel/plugin-transform-destructuring": "^7.24.7", - "@babel/plugin-transform-dotall-regex": "^7.24.7", - "@babel/plugin-transform-duplicate-keys": "^7.24.7", - "@babel/plugin-transform-dynamic-import": "^7.24.7", - "@babel/plugin-transform-exponentiation-operator": "^7.24.7", - "@babel/plugin-transform-export-namespace-from": "^7.24.7", - "@babel/plugin-transform-for-of": "^7.24.7", - "@babel/plugin-transform-function-name": "^7.24.7", - "@babel/plugin-transform-json-strings": "^7.24.7", - "@babel/plugin-transform-literals": "^7.24.7", - "@babel/plugin-transform-logical-assignment-operators": "^7.24.7", - "@babel/plugin-transform-member-expression-literals": "^7.24.7", - "@babel/plugin-transform-modules-amd": "^7.24.7", - "@babel/plugin-transform-modules-commonjs": "^7.24.7", - "@babel/plugin-transform-modules-systemjs": "^7.24.7", - "@babel/plugin-transform-modules-umd": "^7.24.7", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.24.7", - "@babel/plugin-transform-new-target": "^7.24.7", - "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.7", - "@babel/plugin-transform-numeric-separator": "^7.24.7", - "@babel/plugin-transform-object-rest-spread": "^7.24.7", - "@babel/plugin-transform-object-super": "^7.24.7", - "@babel/plugin-transform-optional-catch-binding": "^7.24.7", - "@babel/plugin-transform-optional-chaining": "^7.24.7", - "@babel/plugin-transform-parameters": "^7.24.7", - "@babel/plugin-transform-private-methods": "^7.24.7", - "@babel/plugin-transform-private-property-in-object": "^7.24.7", - "@babel/plugin-transform-property-literals": "^7.24.7", - "@babel/plugin-transform-regenerator": "^7.24.7", - "@babel/plugin-transform-reserved-words": "^7.24.7", - "@babel/plugin-transform-shorthand-properties": "^7.24.7", - "@babel/plugin-transform-spread": "^7.24.7", - "@babel/plugin-transform-sticky-regex": "^7.24.7", - "@babel/plugin-transform-template-literals": "^7.24.7", - "@babel/plugin-transform-typeof-symbol": "^7.24.7", - "@babel/plugin-transform-unicode-escapes": "^7.24.7", - "@babel/plugin-transform-unicode-property-regex": "^7.24.7", - "@babel/plugin-transform-unicode-regex": "^7.24.7", - "@babel/plugin-transform-unicode-sets-regex": "^7.24.7", - "@babel/preset-modules": "0.1.6-no-external-plugins", - "babel-plugin-polyfill-corejs2": "^0.4.10", - "babel-plugin-polyfill-corejs3": "^0.10.4", - "babel-plugin-polyfill-regenerator": "^0.6.1", - "core-js-compat": "^3.31.0", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.6-no-external-plugins", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", - "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/@babel/preset-react": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.24.7.tgz", - "integrity": "sha512-AAH4lEkpmzFWrGVlHaxJB7RLH21uPQ9+He+eFLWHmF9IuFQVugz8eAsamaW0DXRrTfco5zj1wWtpdcXJUOfsag==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-validator-option": "^7.24.7", - "@babel/plugin-transform-react-display-name": "^7.24.7", - "@babel/plugin-transform-react-jsx": "^7.24.7", - "@babel/plugin-transform-react-jsx-development": "^7.24.7", - "@babel/plugin-transform-react-pure-annotations": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-typescript": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz", - "integrity": "sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7", - "@babel/helper-validator-option": "^7.24.7", - "@babel/plugin-syntax-jsx": "^7.24.7", - "@babel/plugin-transform-modules-commonjs": "^7.24.7", - "@babel/plugin-transform-typescript": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" - }, - "node_modules/@babel/runtime": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", - "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/runtime-corejs3": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.24.7.tgz", - "integrity": "sha512-eytSX6JLBY6PVAeQa2bFlDx/7Mmln/gaEpsit5a3WEvjGfiIytEsgAwuIXCPM0xvw0v0cJn3ilq0/TvXrW0kgA==", - "dependencies": { - "core-js-pure": "^3.30.2", - "regenerator-runtime": "^0.14.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/template": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", - "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", - "dependencies": { - "@babel/code-frame": "^7.24.7", - "@babel/parser": "^7.24.7", - "@babel/types": "^7.24.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", - "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", - "dependencies": { - "@babel/code-frame": "^7.24.7", - "@babel/generator": "^7.24.7", - "@babel/helper-environment-visitor": "^7.24.7", - "@babel/helper-function-name": "^7.24.7", - "@babel/helper-hoist-variables": "^7.24.7", - "@babel/helper-split-export-declaration": "^7.24.7", - "@babel/parser": "^7.24.7", - "@babel/types": "^7.24.7", - "debug": "^4.3.1", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/types": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", - "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", - "dependencies": { - "@babel/helper-string-parser": "^7.24.7", - "@babel/helper-validator-identifier": "^7.24.7", - "to-fast-properties": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@colors/colors": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", - "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/@discoveryjs/json-ext": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", - "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/@docsearch/css": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.1.tgz", - "integrity": "sha512-VtVb5DS+0hRIprU2CO6ZQjK2Zg4QU5HrDM1+ix6rT0umsYvFvatMAnf97NHZlVWDaaLlx7GRfR/7FikANiM2Fg==" - }, - "node_modules/@docsearch/react": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.1.tgz", - "integrity": "sha512-qXZkEPvybVhSXj0K7U3bXc233tk5e8PfhoZ6MhPOiik/qUQxYC+Dn9DnoS7CxHQQhHfCvTiN0eY9M12oRghEXw==", - "dependencies": { - "@algolia/autocomplete-core": "1.9.3", - "@algolia/autocomplete-preset-algolia": "1.9.3", - "@docsearch/css": "3.6.1", - "algoliasearch": "^4.19.1" - }, - "peerDependencies": { - "@types/react": ">= 16.8.0 < 19.0.0", - "react": ">= 16.8.0 < 19.0.0", - "react-dom": ">= 16.8.0 < 19.0.0", - "search-insights": ">= 1 < 3" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "react": { - "optional": true - }, - "react-dom": { - "optional": true - }, - "search-insights": { - "optional": true - } - } - }, - "node_modules/@docusaurus/core": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.5.2.tgz", - "integrity": "sha512-4Z1WkhCSkX4KO0Fw5m/Vuc7Q3NxBG53NE5u59Rs96fWkMPZVSrzEPP16/Nk6cWb/shK7xXPndTmalJtw7twL/w==", - "dependencies": { - "@babel/core": "^7.23.3", - "@babel/generator": "^7.23.3", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.22.9", - "@babel/preset-env": "^7.22.9", - "@babel/preset-react": "^7.22.5", - "@babel/preset-typescript": "^7.22.5", - "@babel/runtime": "^7.22.6", - "@babel/runtime-corejs3": "^7.22.6", - "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.5.2", - "@docusaurus/logger": "3.5.2", - "@docusaurus/mdx-loader": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "autoprefixer": "^10.4.14", - "babel-loader": "^9.1.3", - "babel-plugin-dynamic-import-node": "^2.3.3", - "boxen": "^6.2.1", - "chalk": "^4.1.2", - "chokidar": "^3.5.3", - "clean-css": "^5.3.2", - "cli-table3": "^0.6.3", - "combine-promises": "^1.1.0", - "commander": "^5.1.0", - "copy-webpack-plugin": "^11.0.0", - "core-js": "^3.31.1", - "css-loader": "^6.8.1", - "css-minimizer-webpack-plugin": "^5.0.1", - "cssnano": "^6.1.2", - "del": "^6.1.1", - "detect-port": "^1.5.1", - "escape-html": "^1.0.3", - "eta": "^2.2.0", - "eval": "^0.1.8", - "file-loader": "^6.2.0", - "fs-extra": "^11.1.1", - "html-minifier-terser": "^7.2.0", - "html-tags": "^3.3.1", - "html-webpack-plugin": "^5.5.3", - "leven": "^3.1.0", - "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.7.6", - "p-map": "^4.0.0", - "postcss": "^8.4.26", - "postcss-loader": "^7.3.3", - "prompts": "^2.4.2", - "react-dev-utils": "^12.0.1", - "react-helmet-async": "^1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", - "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.3.4", - "react-router-config": "^5.1.1", - "react-router-dom": "^5.3.4", - "rtl-detect": "^1.0.4", - "semver": "^7.5.4", - "serve-handler": "^6.1.5", - "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.9", - "tslib": "^2.6.0", - "update-notifier": "^6.0.2", - "url-loader": "^4.1.1", - "webpack": "^5.88.1", - "webpack-bundle-analyzer": "^4.9.0", - "webpack-dev-server": "^4.15.1", - "webpack-merge": "^5.9.0", - "webpackbar": "^5.0.2" - }, - "bin": { - "docusaurus": "bin/docusaurus.mjs" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@mdx-js/react": "^3.0.0", - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/cssnano-preset": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.2.tgz", - "integrity": "sha512-D3KiQXOMA8+O0tqORBrTOEQyQxNIfPm9jEaJoALjjSjc2M/ZAWcUfPQEnwr2JB2TadHw2gqWgpZckQmrVWkytA==", - "dependencies": { - "cssnano-preset-advanced": "^6.1.2", - "postcss": "^8.4.38", - "postcss-sort-media-queries": "^5.2.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/logger": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.5.2.tgz", - "integrity": "sha512-LHC540SGkeLfyT3RHK3gAMK6aS5TRqOD4R72BEU/DE2M/TY8WwEUAMY576UUc/oNJXv8pGhBmQB6N9p3pt8LQw==", - "dependencies": { - "chalk": "^4.1.2", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/mdx-loader": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.5.2.tgz", - "integrity": "sha512-ku3xO9vZdwpiMIVd8BzWV0DCqGEbCP5zs1iHfKX50vw6jX8vQo0ylYo1YJMZyz6e+JFJ17HYHT5FzVidz2IflA==", - "dependencies": { - "@docusaurus/logger": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "@mdx-js/mdx": "^3.0.0", - "@slorber/remark-comment": "^1.0.0", - "escape-html": "^1.0.3", - "estree-util-value-to-estree": "^3.0.1", - "file-loader": "^6.2.0", - "fs-extra": "^11.1.1", - "image-size": "^1.0.2", - "mdast-util-mdx": "^3.0.0", - "mdast-util-to-string": "^4.0.0", - "rehype-raw": "^7.0.0", - "remark-directive": "^3.0.0", - "remark-emoji": "^4.0.0", - "remark-frontmatter": "^5.0.0", - "remark-gfm": "^4.0.0", - "stringify-object": "^3.3.0", - "tslib": "^2.6.0", - "unified": "^11.0.3", - "unist-util-visit": "^5.0.0", - "url-loader": "^4.1.1", - "vfile": "^6.0.1", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/module-type-aliases": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.2.tgz", - "integrity": "sha512-Z+Xu3+2rvKef/YKTMxZHsEXp1y92ac0ngjDiExRdqGTmEKtCUpkbNYH8v5eXo5Ls+dnW88n6WTa+Q54kLOkwPg==", - "dependencies": { - "@docusaurus/types": "3.5.2", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "@types/react-router-dom": "*", - "react-helmet-async": "*", - "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" - }, - "peerDependencies": { - "react": "*", - "react-dom": "*" - } - }, - "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.2.tgz", - "integrity": "sha512-R7ghWnMvjSf+aeNDH0K4fjyQnt5L0KzUEnUhmf1e3jZrv3wogeytZNN6n7X8yHcMsuZHPOrctQhXWnmxu+IRRg==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/logger": "3.5.2", - "@docusaurus/mdx-loader": "3.5.2", - "@docusaurus/theme-common": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "cheerio": "1.0.0-rc.12", - "feed": "^4.2.2", - "fs-extra": "^11.1.1", - "lodash": "^4.17.21", - "reading-time": "^1.5.0", - "srcset": "^4.0.0", - "tslib": "^2.6.0", - "unist-util-visit": "^5.0.0", - "utility-types": "^3.10.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/plugin-content-docs": "*", - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.2.tgz", - "integrity": "sha512-Bt+OXn/CPtVqM3Di44vHjE7rPCEsRCB/DMo2qoOuozB9f7+lsdrHvD0QCHdBs0uhz6deYJDppAr2VgqybKPlVQ==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/logger": "3.5.2", - "@docusaurus/mdx-loader": "3.5.2", - "@docusaurus/module-type-aliases": "3.5.2", - "@docusaurus/theme-common": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "@types/react-router-config": "^5.0.7", - "combine-promises": "^1.1.0", - "fs-extra": "^11.1.1", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.6.0", - "utility-types": "^3.10.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.2.tgz", - "integrity": "sha512-WzhHjNpoQAUz/ueO10cnundRz+VUtkjFhhaQ9jApyv1a46FPURO4cef89pyNIOMny1fjDz/NUN2z6Yi+5WUrCw==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/mdx-loader": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "fs-extra": "^11.1.1", - "tslib": "^2.6.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-debug": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.5.2.tgz", - "integrity": "sha512-kBK6GlN0itCkrmHuCS6aX1wmoWc5wpd5KJlqQ1FyrF0cLDnvsYSnh7+ftdwzt7G6lGBho8lrVwkkL9/iQvaSOA==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils": "3.5.2", - "fs-extra": "^11.1.1", - "react-json-view-lite": "^1.2.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.2.tgz", - "integrity": "sha512-rjEkJH/tJ8OXRE9bwhV2mb/WP93V441rD6XnM6MIluu7rk8qg38iSxS43ga2V2Q/2ib53PcqbDEJDG/yWQRJhQ==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.2.tgz", - "integrity": "sha512-lm8XL3xLkTPHFKKjLjEEAHUrW0SZBSHBE1I+i/tmYMBsjCcUB5UJ52geS5PSiOCFVR74tbPGcPHEV/gaaxFeSA==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "@types/gtag.js": "^0.0.12", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.2.tgz", - "integrity": "sha512-QkpX68PMOMu10Mvgvr5CfZAzZQFx8WLlOiUQ/Qmmcl6mjGK6H21WLT5x7xDmcpCoKA/3CegsqIqBR+nA137lQg==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.2.tgz", - "integrity": "sha512-DnlqYyRAdQ4NHY28TfHuVk414ft2uruP4QWCH//jzpHjqvKyXjj2fmDtI8RPUBh9K8iZKFMHRnLtzJKySPWvFA==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/logger": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "fs-extra": "^11.1.1", - "sitemap": "^7.1.1", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/preset-classic": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.5.2.tgz", - "integrity": "sha512-3ihfXQ95aOHiLB5uCu+9PRy2gZCeSZoDcqpnDvf3B+sTrMvMTr8qRUzBvWkoIqc82yG5prCboRjk1SVILKx6sg==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/plugin-content-blog": "3.5.2", - "@docusaurus/plugin-content-docs": "3.5.2", - "@docusaurus/plugin-content-pages": "3.5.2", - "@docusaurus/plugin-debug": "3.5.2", - "@docusaurus/plugin-google-analytics": "3.5.2", - "@docusaurus/plugin-google-gtag": "3.5.2", - "@docusaurus/plugin-google-tag-manager": "3.5.2", - "@docusaurus/plugin-sitemap": "3.5.2", - "@docusaurus/theme-classic": "3.5.2", - "@docusaurus/theme-common": "3.5.2", - "@docusaurus/theme-search-algolia": "3.5.2", - "@docusaurus/types": "3.5.2" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/theme-classic": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.5.2.tgz", - "integrity": "sha512-XRpinSix3NBv95Rk7xeMF9k4safMkwnpSgThn0UNQNumKvmcIYjfkwfh2BhwYh/BxMXQHJ/PdmNh22TQFpIaYg==", - "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/mdx-loader": "3.5.2", - "@docusaurus/module-type-aliases": "3.5.2", - "@docusaurus/plugin-content-blog": "3.5.2", - "@docusaurus/plugin-content-docs": "3.5.2", - "@docusaurus/plugin-content-pages": "3.5.2", - "@docusaurus/theme-common": "3.5.2", - "@docusaurus/theme-translations": "3.5.2", - "@docusaurus/types": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "@mdx-js/react": "^3.0.0", - "clsx": "^2.0.0", - "copy-text-to-clipboard": "^3.2.0", - "infima": "0.2.0-alpha.44", - "lodash": "^4.17.21", - "nprogress": "^0.2.0", - "postcss": "^8.4.26", - "prism-react-renderer": "^2.3.0", - "prismjs": "^1.29.0", - "react-router-dom": "^5.3.4", - "rtlcss": "^4.1.0", - "tslib": "^2.6.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/theme-common": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.5.2.tgz", - "integrity": "sha512-QXqlm9S6x9Ibwjs7I2yEDgsCocp708DrCrgHgKwg2n2AY0YQ6IjU0gAK35lHRLOvAoJUfCKpQAwUykB0R7+Eew==", - "dependencies": { - "@docusaurus/mdx-loader": "3.5.2", - "@docusaurus/module-type-aliases": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "clsx": "^2.0.0", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^2.3.0", - "tslib": "^2.6.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/plugin-content-docs": "*", - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.2.tgz", - "integrity": "sha512-qW53kp3VzMnEqZGjakaV90sst3iN1o32PH+nawv1uepROO8aEGxptcq2R5rsv7aBShSRbZwIobdvSYKsZ5pqvA==", - "dependencies": { - "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.5.2", - "@docusaurus/logger": "3.5.2", - "@docusaurus/plugin-content-docs": "3.5.2", - "@docusaurus/theme-common": "3.5.2", - "@docusaurus/theme-translations": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-validation": "3.5.2", - "algoliasearch": "^4.18.0", - "algoliasearch-helper": "^3.13.3", - "clsx": "^2.0.0", - "eta": "^2.2.0", - "fs-extra": "^11.1.1", - "lodash": "^4.17.21", - "tslib": "^2.6.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/theme-translations": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.5.2.tgz", - "integrity": "sha512-GPZLcu4aT1EmqSTmbdpVrDENGR2yObFEX8ssEFYTCiAIVc0EihNSdOIBTazUvgNqwvnoU1A8vIs1xyzc3LITTw==", - "dependencies": { - "fs-extra": "^11.1.1", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/types": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.5.2.tgz", - "integrity": "sha512-N6GntLXoLVUwkZw7zCxwy9QiuEXIcTVzA9AkmNw16oc0AP3SXLrMmDMMBIfgqwuKWa6Ox6epHol9kMtJqekACw==", - "dependencies": { - "@mdx-js/mdx": "^3.0.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "commander": "^5.1.0", - "joi": "^17.9.2", - "react-helmet-async": "^1.3.0", - "utility-types": "^3.10.0", - "webpack": "^5.88.1", - "webpack-merge": "^5.9.0" - }, - "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/@docusaurus/utils": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.5.2.tgz", - "integrity": "sha512-33QvcNFh+Gv+C2dP9Y9xWEzMgf3JzrpL2nW9PopidiohS1nDcyknKRx2DWaFvyVTTYIkkABVSr073VTj/NITNA==", - "dependencies": { - "@docusaurus/logger": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "@svgr/webpack": "^8.1.0", - "escape-string-regexp": "^4.0.0", - "file-loader": "^6.2.0", - "fs-extra": "^11.1.1", - "github-slugger": "^1.5.0", - "globby": "^11.1.0", - "gray-matter": "^4.0.3", - "jiti": "^1.20.0", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.5", - "prompts": "^2.4.2", - "resolve-pathname": "^3.0.0", - "shelljs": "^0.8.5", - "tslib": "^2.6.0", - "url-loader": "^4.1.1", - "utility-types": "^3.10.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/types": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/types": { - "optional": true - } - } - }, - "node_modules/@docusaurus/utils-common": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.5.2.tgz", - "integrity": "sha512-i0AZjHiRgJU6d7faQngIhuHKNrszpL/SHQPgF1zH4H+Ij6E9NBYGy6pkcGWToIv7IVPbs+pQLh1P3whn0gWXVg==", - "dependencies": { - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/types": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/types": { - "optional": true - } - } - }, - "node_modules/@docusaurus/utils-validation": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.5.2.tgz", - "integrity": "sha512-m+Foq7augzXqB6HufdS139PFxDC5d5q2QKZy8q0qYYvGdI6nnlNsGH4cIGsgBnV7smz+mopl3g4asbSDvMV0jA==", - "dependencies": { - "@docusaurus/logger": "3.5.2", - "@docusaurus/utils": "3.5.2", - "@docusaurus/utils-common": "3.5.2", - "fs-extra": "^11.2.0", - "joi": "^17.9.2", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@hapi/hoek": { - "version": "9.3.0", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", - "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" - }, - "node_modules/@hapi/topo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", - "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, - "node_modules/@jest/schemas": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", - "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", - "dependencies": { - "@sinclair/typebox": "^0.27.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/types": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", - "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", - "dependencies": { - "@jest/schemas": "^29.6.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", - "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", - "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/source-map": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", - "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", - "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" - } - }, - "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", - "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==" - }, - "node_modules/@mdx-js/mdx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.0.1.tgz", - "integrity": "sha512-eIQ4QTrOWyL3LWEe/bu6Taqzq2HQvHcyTMaOrI95P2/LmJE7AsfPfgJGuFLPVqBUE1BC1rik3VIhU+s9u72arA==", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdx": "^2.0.0", - "collapse-white-space": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-build-jsx": "^3.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "estree-util-to-js": "^2.0.0", - "estree-walker": "^3.0.0", - "hast-util-to-estree": "^3.0.0", - "hast-util-to-jsx-runtime": "^2.0.0", - "markdown-extensions": "^2.0.0", - "periscopic": "^3.0.0", - "remark-mdx": "^3.0.0", - "remark-parse": "^11.0.0", - "remark-rehype": "^11.0.0", - "source-map": "^0.7.0", - "unified": "^11.0.0", - "unist-util-position-from-estree": "^2.0.0", - "unist-util-stringify-position": "^4.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/@mdx-js/react": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.1.tgz", - "integrity": "sha512-9ZrPIU4MGf6et1m1ov3zKf+q9+deetI51zprKB1D/z3NOb+rUxxtEl3mCjW5wTGh6VhRdwPueh1oRzi6ezkA8A==", - "dependencies": { - "@types/mdx": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - }, - "peerDependencies": { - "@types/react": ">=16", - "react": ">=16" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@pnpm/config.env-replace": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", - "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", - "engines": { - "node": ">=12.22.0" - } - }, - "node_modules/@pnpm/network.ca-file": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", - "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", - "dependencies": { - "graceful-fs": "4.2.10" - }, - "engines": { - "node": ">=12.22.0" - } - }, - "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { - "version": "4.2.10", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", - "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" - }, - "node_modules/@pnpm/npm-conf": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz", - "integrity": "sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==", - "dependencies": { - "@pnpm/config.env-replace": "^1.1.0", - "@pnpm/network.ca-file": "^1.0.1", - "config-chain": "^1.1.11" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@polka/url": { - "version": "1.0.0-next.25", - "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.25.tgz", - "integrity": "sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==" - }, - "node_modules/@sideway/address": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", - "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, - "node_modules/@sideway/formula": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", - "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" - }, - "node_modules/@sideway/pinpoint": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", - "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" - }, - "node_modules/@sinclair/typebox": { - "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" - }, - "node_modules/@sindresorhus/is": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", - "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/is?sponsor=1" - } - }, - "node_modules/@slorber/remark-comment": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", - "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", - "dependencies": { - "micromark-factory-space": "^1.0.0", - "micromark-util-character": "^1.1.0", - "micromark-util-symbol": "^1.0.1" - } - }, - "node_modules/@svgr/babel-plugin-add-jsx-attribute": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", - "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", - "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", - "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", - "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-svg-dynamic-title": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", - "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-svg-em-dimensions": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", - "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-transform-react-native-svg": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", - "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-transform-svg-component": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", - "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-preset": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", - "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", - "dependencies": { - "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", - "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", - "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", - "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", - "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", - "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", - "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", - "@svgr/babel-plugin-transform-svg-component": "8.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/core": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", - "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", - "dependencies": { - "@babel/core": "^7.21.3", - "@svgr/babel-preset": "8.1.0", - "camelcase": "^6.2.0", - "cosmiconfig": "^8.1.3", - "snake-case": "^3.0.4" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/hast-util-to-babel-ast": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", - "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", - "dependencies": { - "@babel/types": "^7.21.3", - "entities": "^4.4.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/plugin-jsx": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", - "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", - "dependencies": { - "@babel/core": "^7.21.3", - "@svgr/babel-preset": "8.1.0", - "@svgr/hast-util-to-babel-ast": "8.0.0", - "svg-parser": "^2.0.4" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@svgr/core": "*" - } - }, - "node_modules/@svgr/plugin-svgo": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", - "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", - "dependencies": { - "cosmiconfig": "^8.1.3", - "deepmerge": "^4.3.1", - "svgo": "^3.0.2" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@svgr/core": "*" - } - }, - "node_modules/@svgr/webpack": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", - "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", - "dependencies": { - "@babel/core": "^7.21.3", - "@babel/plugin-transform-react-constant-elements": "^7.21.3", - "@babel/preset-env": "^7.20.2", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.21.0", - "@svgr/core": "8.1.0", - "@svgr/plugin-jsx": "8.1.0", - "@svgr/plugin-svgo": "8.1.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", - "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", - "dependencies": { - "defer-to-connect": "^2.0.1" - }, - "engines": { - "node": ">=14.16" - } - }, - "node_modules/@trysound/sax": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/@types/acorn": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", - "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", - "dependencies": { - "@types/estree": "*" - } - }, - "node_modules/@types/body-parser": { - "version": "1.19.5", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", - "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/bonjour": { - "version": "3.5.13", - "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", - "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect": { - "version": "3.4.38", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", - "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect-history-api-fallback": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", - "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", - "dependencies": { - "@types/express-serve-static-core": "*", - "@types/node": "*" - } - }, - "node_modules/@types/debug": { - "version": "4.1.12", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", - "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", - "dependencies": { - "@types/ms": "*" - } - }, - "node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" - }, - "node_modules/@types/estree-jsx": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", - "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", - "dependencies": { - "@types/estree": "*" - } - }, - "node_modules/@types/express": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", - "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.33", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "4.19.5", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.5.tgz", - "integrity": "sha512-y6W03tvrACO72aijJ5uF02FRq5cgDR9lUxddQ8vyF+GvmjJQqbzDcJngEjURc+ZsG31VI3hODNZJ2URj86pzmg==", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*", - "@types/send": "*" - } - }, - "node_modules/@types/gtag.js": { - "version": "0.0.12", - "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", - "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" - }, - "node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/@types/history": { - "version": "4.7.11", - "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", - "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" - }, - "node_modules/@types/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" - }, - "node_modules/@types/http-cache-semantics": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", - "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==" - }, - "node_modules/@types/http-errors": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", - "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==" - }, - "node_modules/@types/http-proxy": { - "version": "1.17.14", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.14.tgz", - "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", - "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==" - }, - "node_modules/@types/istanbul-lib-report": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", - "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", - "dependencies": { - "@types/istanbul-lib-coverage": "*" - } - }, - "node_modules/@types/istanbul-reports": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", - "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", - "dependencies": { - "@types/istanbul-lib-report": "*" - } - }, - "node_modules/@types/json-schema": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" - }, - "node_modules/@types/mdast": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", - "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/@types/mdx": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", - "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==" - }, - "node_modules/@types/mime": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", - "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==" - }, - "node_modules/@types/ms": { - "version": "0.7.34", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", - "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" - }, - "node_modules/@types/node": { - "version": "20.14.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.7.tgz", - "integrity": "sha512-uTr2m2IbJJucF3KUxgnGOZvYbN0QgkGyWxG6973HCpMYFy2KfcgYuIwkJQMQkt1VbBMlvWRbpshFTLxnxCZjKQ==", - "dependencies": { - "undici-types": "~5.26.4" - } - }, - "node_modules/@types/node-forge": { - "version": "1.3.11", - "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz", - "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/parse-json": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", - "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" - }, - "node_modules/@types/prismjs": { - "version": "1.26.4", - "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.4.tgz", - "integrity": "sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg==" - }, - "node_modules/@types/prop-types": { - "version": "15.7.12", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", - "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" - }, - "node_modules/@types/qs": { - "version": "6.9.15", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.15.tgz", - "integrity": "sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==" - }, - "node_modules/@types/range-parser": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", - "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==" - }, - "node_modules/@types/react": { - "version": "18.3.3", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", - "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", - "dependencies": { - "@types/prop-types": "*", - "csstype": "^3.0.2" - } - }, - "node_modules/@types/react-router": { - "version": "5.1.20", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", - "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*" - } - }, - "node_modules/@types/react-router-config": { - "version": "5.0.11", - "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", - "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "^5.1.0" - } - }, - "node_modules/@types/react-router-dom": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "*" - } - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" - }, - "node_modules/@types/sax": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", - "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/send": { - "version": "0.17.4", - "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", - "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", - "dependencies": { - "@types/mime": "^1", - "@types/node": "*" - } - }, - "node_modules/@types/serve-index": { - "version": "1.9.4", - "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", - "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", - "dependencies": { - "@types/express": "*" - } - }, - "node_modules/@types/serve-static": { - "version": "1.15.7", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", - "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", - "dependencies": { - "@types/http-errors": "*", - "@types/node": "*", - "@types/send": "*" - } - }, - "node_modules/@types/sockjs": { - "version": "0.3.36", - "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", - "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/unist": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", - "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" - }, - "node_modules/@types/ws": { - "version": "8.5.10", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", - "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/yargs": { - "version": "17.0.32", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", - "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/@types/yargs-parser": { - "version": "21.0.3", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", - "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==" - }, - "node_modules/@ungap/structured-clone": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", - "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz", - "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", - "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", - "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", - "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz", - "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==" - }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", - "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", - "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", - "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz", - "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==", - "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-buffer": "1.12.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/wasm-gen": "1.12.1" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", - "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", - "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", - "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz", - "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==", - "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-buffer": "1.12.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/helper-wasm-section": "1.12.1", - "@webassemblyjs/wasm-gen": "1.12.1", - "@webassemblyjs/wasm-opt": "1.12.1", - "@webassemblyjs/wasm-parser": "1.12.1", - "@webassemblyjs/wast-printer": "1.12.1" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz", - "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==", - "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz", - "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==", - "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-buffer": "1.12.1", - "@webassemblyjs/wasm-gen": "1.12.1", - "@webassemblyjs/wasm-parser": "1.12.1" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz", - "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", - "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-api-error": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz", - "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==", - "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" - }, - "node_modules/accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.0.tgz", - "integrity": "sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-import-attributes": { - "version": "1.9.5", - "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", - "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", - "peerDependencies": { - "acorn": "^8" - } - }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/acorn-walk": { - "version": "8.3.3", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.3.tgz", - "integrity": "sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==", - "dependencies": { - "acorn": "^8.11.0" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/address": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", - "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ajv": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.16.0.tgz", - "integrity": "sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.4.1" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", - "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/algoliasearch": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.24.0.tgz", - "integrity": "sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.24.0", - "@algolia/cache-common": "4.24.0", - "@algolia/cache-in-memory": "4.24.0", - "@algolia/client-account": "4.24.0", - "@algolia/client-analytics": "4.24.0", - "@algolia/client-common": "4.24.0", - "@algolia/client-personalization": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/logger-common": "4.24.0", - "@algolia/logger-console": "4.24.0", - "@algolia/recommend": "4.24.0", - "@algolia/requester-browser-xhr": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/requester-node-http": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/algoliasearch-helper": { - "version": "3.22.5", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz", - "integrity": "sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==", - "dependencies": { - "@algolia/events": "^4.0.1" - }, - "peerDependencies": { - "algoliasearch": ">= 3.1 < 6" - } - }, - "node_modules/algoliasearch/node_modules/@algolia/client-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", - "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", - "dependencies": { - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/algoliasearch/node_modules/@algolia/client-search": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", - "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/algoliasearch/node_modules/@algolia/requester-browser-xhr": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", - "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", - "dependencies": { - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/algoliasearch/node_modules/@algolia/requester-node-http": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", - "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", - "dependencies": { - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "dependencies": { - "string-width": "^4.1.0" - } - }, - "node_modules/ansi-align/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/ansi-align/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-html-community": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", - "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", - "engines": [ - "node >= 0.8.0" - ], - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" - }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/astring": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", - "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", - "bin": { - "astring": "bin/astring" - } - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/autoprefixer": { - "version": "10.4.20", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", - "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "browserslist": "^4.23.3", - "caniuse-lite": "^1.0.30001646", - "fraction.js": "^4.3.7", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.1", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/babel-loader": { - "version": "9.1.3", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", - "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", - "dependencies": { - "find-cache-dir": "^4.0.0", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 14.15.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0", - "webpack": ">=5" - } - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.4.11", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", - "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", - "dependencies": { - "@babel/compat-data": "^7.22.6", - "@babel/helper-define-polyfill-provider": "^0.6.2", - "semver": "^6.3.1" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.10.4", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", - "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.6.1", - "core-js-compat": "^3.36.1" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", - "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.6.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/bail": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" - }, - "node_modules/big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", - "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/body-parser": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", - "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", - "dependencies": { - "bytes": "3.1.2", - "content-type": "~1.0.5", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.13.0", - "raw-body": "2.5.2", - "type-is": "~1.6.18", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/body-parser/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/bonjour-service": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.2.1.tgz", - "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "multicast-dns": "^7.2.5" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" - }, - "node_modules/boxen": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", - "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", - "dependencies": { - "ansi-align": "^3.0.1", - "camelcase": "^6.2.0", - "chalk": "^4.1.2", - "cli-boxes": "^3.0.0", - "string-width": "^5.0.1", - "type-fest": "^2.5.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.0.1" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browserslist": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", - "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "caniuse-lite": "^1.0.30001646", - "electron-to-chromium": "^1.5.4", - "node-releases": "^2.0.18", - "update-browserslist-db": "^1.1.0" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" - }, - "node_modules/bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cacheable-lookup": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", - "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", - "engines": { - "node": ">=14.16" - } - }, - "node_modules/cacheable-request": { - "version": "10.2.14", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", - "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", - "dependencies": { - "@types/http-cache-semantics": "^4.0.2", - "get-stream": "^6.0.1", - "http-cache-semantics": "^4.1.1", - "keyv": "^4.5.3", - "mimic-response": "^4.0.0", - "normalize-url": "^8.0.0", - "responselike": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - } - }, - "node_modules/call-bind": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", - "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "set-function-length": "^1.2.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/camel-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", - "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", - "dependencies": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, - "node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001662", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001662.tgz", - "integrity": "sha512-sgMUVwLmGseH8ZIrm1d51UbrhqMCH3jvS7gF/M6byuHOnKyLOBL7W8yz5V02OHwgLGA36o/AFhWzzh4uc5aqTA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ] - }, - "node_modules/ccount": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", - "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/char-regex": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", - "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", - "engines": { - "node": ">=10" - } - }, - "node_modules/character-entities": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", - "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-entities-html4": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", - "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-entities-legacy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", - "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-reference-invalid": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", - "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.12", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", - "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", - "dependencies": { - "cheerio-select": "^2.1.0", - "dom-serializer": "^2.0.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "htmlparser2": "^8.0.1", - "parse5": "^7.0.0", - "parse5-htmlparser2-tree-adapter": "^7.0.0" - }, - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/cheeriojs/cheerio?sponsor=1" - } - }, - "node_modules/cheerio-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", - "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", - "dependencies": { - "boolbase": "^1.0.0", - "css-select": "^5.1.0", - "css-what": "^6.1.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chrome-trace-event": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", - "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ci-info": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", - "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "engines": { - "node": ">=8" - } - }, - "node_modules/clean-css": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", - "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 10.0" - } - }, - "node_modules/clean-css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/cli-boxes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-table3": { - "version": "0.6.5", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", - "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", - "dependencies": { - "string-width": "^4.2.0" - }, - "engines": { - "node": "10.* || >= 12.*" - }, - "optionalDependencies": { - "@colors/colors": "1.5.0" - } - }, - "node_modules/cli-table3/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/cli-table3/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/clone-deep": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", - "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", - "dependencies": { - "is-plain-object": "^2.0.4", - "kind-of": "^6.0.2", - "shallow-clone": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clsx": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", - "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/collapse-white-space": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", - "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/colord": { - "version": "2.9.3", - "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", - "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" - }, - "node_modules/colorette": { - "version": "2.0.20", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" - }, - "node_modules/combine-promises": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", - "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", - "engines": { - "node": ">=10" - } - }, - "node_modules/comma-separated-tokens": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/common-path-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", - "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==" - }, - "node_modules/compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compressible/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/compression/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/compression/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" - }, - "node_modules/config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "dependencies": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "node_modules/configstore": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", - "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", - "dependencies": { - "dot-prop": "^6.0.1", - "graceful-fs": "^4.2.6", - "unique-string": "^3.0.0", - "write-file-atomic": "^3.0.3", - "xdg-basedir": "^5.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/yeoman/configstore?sponsor=1" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", - "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/consola": { - "version": "2.15.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", - "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" - }, - "node_modules/content-disposition": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", - "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" - }, - "node_modules/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" - }, - "node_modules/copy-text-to-clipboard": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", - "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", - "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", - "dependencies": { - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.1", - "globby": "^13.1.1", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "13.2.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", - "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", - "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.3.0", - "ignore": "^5.2.4", - "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/copy-webpack-plugin/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/core-js": { - "version": "3.37.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.37.1.tgz", - "integrity": "sha512-Xn6qmxrQZyB0FFY8E3bgRXei3lWDJHhvI+u0q9TKIYM49G8pAr0FgnnrFRAmsbptZL1yxRADVXn+x5AGsbBfyw==", - "hasInstallScript": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-compat": { - "version": "3.37.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.1.tgz", - "integrity": "sha512-9TNiImhKvQqSUkOvk/mMRZzOANTiEVC7WaBNhHcKM7x+/5E1l5NvsysR19zuDQScE8k+kfQXWRN3AtS/eOSHpg==", - "dependencies": { - "browserslist": "^4.23.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-pure": { - "version": "3.37.1", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.1.tgz", - "integrity": "sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA==", - "hasInstallScript": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", - "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" - }, - "node_modules/cosmiconfig": { - "version": "8.3.6", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", - "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", - "dependencies": { - "import-fresh": "^3.3.0", - "js-yaml": "^4.1.0", - "parse-json": "^5.2.0", - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/d-fischer" - }, - "peerDependencies": { - "typescript": ">=4.9.5" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/crypto-random-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", - "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", - "dependencies": { - "type-fest": "^1.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/crypto-random-string/node_modules/type-fest": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", - "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/css-declaration-sorter": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", - "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", - "engines": { - "node": "^14 || ^16 || >=18" - }, - "peerDependencies": { - "postcss": "^8.0.9" - } - }, - "node_modules/css-loader": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", - "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", - "dependencies": { - "icss-utils": "^5.1.0", - "postcss": "^8.4.33", - "postcss-modules-extract-imports": "^3.1.0", - "postcss-modules-local-by-default": "^4.0.5", - "postcss-modules-scope": "^3.2.0", - "postcss-modules-values": "^4.0.0", - "postcss-value-parser": "^4.2.0", - "semver": "^7.5.4" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "@rspack/core": "0.x || 1.x", - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@rspack/core": { - "optional": true - }, - "webpack": { - "optional": true - } - } - }, - "node_modules/css-minimizer-webpack-plugin": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", - "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.18", - "cssnano": "^6.0.1", - "jest-worker": "^29.4.3", - "postcss": "^8.4.24", - "schema-utils": "^4.0.1", - "serialize-javascript": "^6.0.1" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@parcel/css": { - "optional": true - }, - "@swc/css": { - "optional": true - }, - "clean-css": { - "optional": true - }, - "csso": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "lightningcss": { - "optional": true - } - } - }, - "node_modules/css-select": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", - "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-tree": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", - "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", - "dependencies": { - "mdn-data": "2.0.30", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" - } - }, - "node_modules/css-what": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", - "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", - "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", - "dependencies": { - "cssnano-preset-default": "^6.1.2", - "lilconfig": "^3.1.1" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/cssnano" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/cssnano-preset-advanced": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", - "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", - "dependencies": { - "autoprefixer": "^10.4.19", - "browserslist": "^4.23.0", - "cssnano-preset-default": "^6.1.2", - "postcss-discard-unused": "^6.0.5", - "postcss-merge-idents": "^6.0.3", - "postcss-reduce-idents": "^6.0.3", - "postcss-zindex": "^6.0.2" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/cssnano-preset-default": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", - "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", - "dependencies": { - "browserslist": "^4.23.0", - "css-declaration-sorter": "^7.2.0", - "cssnano-utils": "^4.0.2", - "postcss-calc": "^9.0.1", - "postcss-colormin": "^6.1.0", - "postcss-convert-values": "^6.1.0", - "postcss-discard-comments": "^6.0.2", - "postcss-discard-duplicates": "^6.0.3", - "postcss-discard-empty": "^6.0.3", - "postcss-discard-overridden": "^6.0.2", - "postcss-merge-longhand": "^6.0.5", - "postcss-merge-rules": "^6.1.1", - "postcss-minify-font-values": "^6.1.0", - "postcss-minify-gradients": "^6.0.3", - "postcss-minify-params": "^6.1.0", - "postcss-minify-selectors": "^6.0.4", - "postcss-normalize-charset": "^6.0.2", - "postcss-normalize-display-values": "^6.0.2", - "postcss-normalize-positions": "^6.0.2", - "postcss-normalize-repeat-style": "^6.0.2", - "postcss-normalize-string": "^6.0.2", - "postcss-normalize-timing-functions": "^6.0.2", - "postcss-normalize-unicode": "^6.1.0", - "postcss-normalize-url": "^6.0.2", - "postcss-normalize-whitespace": "^6.0.2", - "postcss-ordered-values": "^6.0.2", - "postcss-reduce-initial": "^6.1.0", - "postcss-reduce-transforms": "^6.0.2", - "postcss-svgo": "^6.0.3", - "postcss-unique-selectors": "^6.0.4" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/cssnano-utils": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", - "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/csso": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", - "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", - "dependencies": { - "css-tree": "~2.2.0" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/csso/node_modules/css-tree": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", - "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", - "dependencies": { - "mdn-data": "2.0.28", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/csso/node_modules/mdn-data": { - "version": "2.0.28", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", - "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==" - }, - "node_modules/csstype": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", - "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" - }, - "node_modules/debounce": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", - "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" - }, - "node_modules/debug": { - "version": "4.3.5", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", - "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decode-named-character-reference": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", - "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", - "dependencies": { - "character-entities": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/decompress-response/node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-gateway": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", - "dependencies": { - "execa": "^5.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", - "engines": { - "node": ">=10" - } - }, - "node_modules/define-data-property": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", - "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "gopd": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", - "engines": { - "node": ">=8" - } - }, - "node_modules/define-properties": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", - "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", - "dependencies": { - "define-data-property": "^1.0.1", - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/del": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", - "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", - "dependencies": { - "globby": "^11.0.1", - "graceful-fs": "^4.2.4", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.2", - "p-map": "^4.0.0", - "rimraf": "^3.0.2", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/dequal": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", - "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" - }, - "node_modules/detect-port": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", - "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", - "dependencies": { - "address": "^1.0.1", - "debug": "4" - }, - "bin": { - "detect": "bin/detect-port.js", - "detect-port": "bin/detect-port.js" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/detect-port-alt": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", - "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", - "dependencies": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "bin": { - "detect": "bin/detect-port", - "detect-port": "bin/detect-port" - }, - "engines": { - "node": ">= 4.2.1" - } - }, - "node_modules/detect-port-alt/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/detect-port-alt/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/devlop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", - "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", - "dependencies": { - "dequal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dns-packet": { - "version": "5.6.1", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", - "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", - "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ] - }, - "node_modules/domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "dependencies": { - "domelementtype": "^2.3.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/domutils": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", - "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", - "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/dot-prop": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", - "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/dot-prop/node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" - }, - "node_modules/electron-to-chromium": { - "version": "1.5.26", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.26.tgz", - "integrity": "sha512-Z+OMe9M/V6Ep9n/52+b7lkvYEps26z4Yz3vjWL1V61W0q+VLF1pOHhMY17sa4roz4AWmULSI8E6SAojZA5L0YQ==" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" - }, - "node_modules/emojilib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", - "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/emoticon": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", - "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/encodeurl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", - "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/enhanced-resolve": { - "version": "5.17.1", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", - "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", - "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", - "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", - "dependencies": { - "get-intrinsic": "^1.2.4" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-module-lexer": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.3.tgz", - "integrity": "sha512-i1gCgmR9dCl6Vil6UKPI/trA69s08g/syhiDK9TG0Nf1RJjjFI+AzoWW7sPufzkgYAn861skuCwJa0pIIHYxvg==" - }, - "node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", - "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-util-attach-comments": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", - "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", - "dependencies": { - "@types/estree": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-build-jsx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", - "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "estree-walker": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-is-identifier-name": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", - "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-to-js": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", - "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "astring": "^1.8.0", - "source-map": "^0.7.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-value-to-estree": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz", - "integrity": "sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag==", - "dependencies": { - "@types/estree": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/remcohaszing" - } - }, - "node_modules/estree-util-visit": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", - "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eta": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", - "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", - "engines": { - "node": ">=6.0.0" - }, - "funding": { - "url": "https://github.com/eta-dev/eta?sponsor=1" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eval": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", - "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", - "dependencies": { - "@types/node": "*", - "require-like": ">= 0.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/express": { - "version": "4.21.0", - "resolved": "https://registry.npmjs.org/express/-/express-4.21.0.tgz", - "integrity": "sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==", - "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.3", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.6.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.3.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.3", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.10", - "proxy-addr": "~2.0.7", - "qs": "6.13.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.19.0", - "serve-static": "1.16.2", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/express/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.10", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", - "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" - }, - "node_modules/express/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/fast-glob": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", - "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "node_modules/fast-url-parser": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", - "dependencies": { - "punycode": "^1.3.2" - } - }, - "node_modules/fastq": { - "version": "1.17.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", - "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/fault": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", - "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", - "dependencies": { - "format": "^0.2.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/faye-websocket": { - "version": "0.11.4", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", - "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/feed": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", - "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", - "dependencies": { - "xml-js": "^1.6.11" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/file-loader": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", - "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", - "dependencies": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/file-loader/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/file-loader/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/file-loader/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/filesize": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", - "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/finalhandler": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", - "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/finalhandler/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/find-cache-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", - "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", - "dependencies": { - "common-path-prefix": "^3.0.0", - "pkg-dir": "^7.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/find-up": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", - "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", - "dependencies": { - "locate-path": "^7.1.0", - "path-exists": "^5.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "bin": { - "flat": "cli.js" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.6", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", - "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", - "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", - "dependencies": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "glob": "^7.1.6", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=10", - "yarn": ">=1.0.0" - }, - "peerDependencies": { - "eslint": ">= 6", - "typescript": ">= 2.7", - "vue-template-compiler": "*", - "webpack": ">= 4" - }, - "peerDependenciesMeta": { - "eslint": { - "optional": true - }, - "vue-template-compiler": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", - "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.7.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", - "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", - "dependencies": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/form-data-encoder": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", - "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", - "engines": { - "node": ">= 14.17" - } - }, - "node_modules/format": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", - "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fraction.js": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", - "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", - "engines": { - "node": "*" - }, - "funding": { - "type": "patreon", - "url": "https://github.com/sponsors/rawify" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fs-extra": { - "version": "11.2.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", - "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=14.14" - } - }, - "node_modules/fs-monkey": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", - "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==" - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-intrinsic": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", - "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "hasown": "^2.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-own-enumerable-property-symbols": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", - "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" - }, - "node_modules/get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/github-slugger": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", - "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" - }, - "node_modules/global-dirs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", - "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", - "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/global-dirs/node_modules/ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", - "engines": { - "node": ">=10" - } - }, - "node_modules/global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", - "dependencies": { - "global-prefix": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", - "dependencies": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix/node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "dependencies": { - "get-intrinsic": "^1.1.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/got": { - "version": "12.6.1", - "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", - "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", - "dependencies": { - "@sindresorhus/is": "^5.2.0", - "@szmarczak/http-timer": "^5.0.1", - "cacheable-lookup": "^7.0.0", - "cacheable-request": "^10.2.8", - "decompress-response": "^6.0.0", - "form-data-encoder": "^2.1.2", - "get-stream": "^6.0.1", - "http2-wrapper": "^2.1.10", - "lowercase-keys": "^3.0.0", - "p-cancelable": "^3.0.0", - "responselike": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/got?sponsor=1" - } - }, - "node_modules/got/node_modules/@sindresorhus/is": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", - "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/is?sponsor=1" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" - }, - "node_modules/gray-matter": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", - "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", - "dependencies": { - "js-yaml": "^3.13.1", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/gray-matter/node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/gray-matter/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/gzip-size": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", - "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", - "dependencies": { - "duplexer": "^0.1.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", - "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", - "dependencies": { - "es-define-property": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-proto": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", - "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-yarn": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", - "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/hast-util-from-parse5": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", - "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "hastscript": "^8.0.0", - "property-information": "^6.0.0", - "vfile": "^6.0.0", - "vfile-location": "^5.0.0", - "web-namespaces": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-parse-selector": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", - "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", - "dependencies": { - "@types/hast": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-raw": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.4.tgz", - "integrity": "sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "@ungap/structured-clone": "^1.0.0", - "hast-util-from-parse5": "^8.0.0", - "hast-util-to-parse5": "^8.0.0", - "html-void-elements": "^3.0.0", - "mdast-util-to-hast": "^13.0.0", - "parse5": "^7.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-to-estree": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", - "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-attach-comments": "^3.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "hast-util-whitespace": "^3.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^0.4.0", - "unist-util-position": "^5.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-to-jsx-runtime": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", - "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "hast-util-whitespace": "^3.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^1.0.0", - "unist-util-position": "^5.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", - "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==" - }, - "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz", - "integrity": "sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==", - "dependencies": { - "inline-style-parser": "0.2.4" - } - }, - "node_modules/hast-util-to-parse5": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", - "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", - "dependencies": { - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-whitespace": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", - "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", - "dependencies": { - "@types/hast": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hastscript": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", - "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", - "dependencies": { - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "hast-util-parse-selector": "^4.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "bin": { - "he": "bin/he" - } - }, - "node_modules/history": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", - "dependencies": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "dependencies": { - "react-is": "^16.7.0" - } - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hpack.js/node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "node_modules/hpack.js/node_modules/readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/hpack.js/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/hpack.js/node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/html-entities": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.5.2.tgz", - "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/mdevils" - }, - { - "type": "patreon", - "url": "https://patreon.com/mdevils" - } - ] - }, - "node_modules/html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" - }, - "node_modules/html-minifier-terser": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", - "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", - "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "~5.3.2", - "commander": "^10.0.0", - "entities": "^4.4.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.15.1" - }, - "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": "^14.13.1 || >=16.0.0" - } - }, - "node_modules/html-minifier-terser/node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", - "engines": { - "node": ">=14" - } - }, - "node_modules/html-tags": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", - "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/html-void-elements": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", - "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/html-webpack-plugin": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", - "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", - "dependencies": { - "@types/html-minifier-terser": "^6.0.0", - "html-minifier-terser": "^6.0.2", - "lodash": "^4.17.21", - "pretty-error": "^4.0.0", - "tapable": "^2.0.0" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/html-webpack-plugin" - }, - "peerDependencies": { - "@rspack/core": "0.x || 1.x", - "webpack": "^5.20.0" - }, - "peerDependenciesMeta": { - "@rspack/core": { - "optional": true - }, - "webpack": { - "optional": true - } - } - }, - "node_modules/html-webpack-plugin/node_modules/commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", - "engines": { - "node": ">= 12" - } - }, - "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", - "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "^5.2.2", - "commander": "^8.3.0", - "he": "^1.2.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.10.0" - }, - "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/htmlparser2": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", - "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "entities": "^4.4.0" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" - }, - "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", - "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/http-parser-js": { - "version": "0.5.8", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", - "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-middleware": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", - "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", - "dependencies": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@types/express": "^4.17.13" - }, - "peerDependenciesMeta": { - "@types/express": { - "optional": true - } - } - }, - "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", - "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/http2-wrapper": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", - "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", - "dependencies": { - "quick-lru": "^5.1.1", - "resolve-alpn": "^1.2.0" - }, - "engines": { - "node": ">=10.19.0" - } - }, - "node_modules/human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "engines": { - "node": ">=10.17.0" - } - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-utils": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", - "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/ignore": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", - "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/image-size": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", - "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", - "dependencies": { - "queue": "6.0.2" - }, - "bin": { - "image-size": "bin/image-size.js" - }, - "engines": { - "node": ">=16.x" - } - }, - "node_modules/immer": { - "version": "9.0.21", - "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", - "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/immer" - } - }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-lazy": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", - "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/infima": { - "version": "0.2.0-alpha.44", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.44.tgz", - "integrity": "sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ==", - "engines": { - "node": ">=12" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" - }, - "node_modules/inline-style-parser": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", - "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" - }, - "node_modules/interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, - "node_modules/ipaddr.js": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", - "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", - "engines": { - "node": ">= 10" - } - }, - "node_modules/is-alphabetical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", - "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-alphanumerical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", - "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", - "dependencies": { - "is-alphabetical": "^2.0.0", - "is-decimal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-ci": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", - "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", - "dependencies": { - "ci-info": "^3.2.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-core-module": { - "version": "2.14.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.14.0.tgz", - "integrity": "sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-decimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", - "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-hexadecimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", - "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", - "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-npm": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", - "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-plain-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-reference": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", - "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", - "dependencies": { - "@types/estree": "*" - } - }, - "node_modules/is-regexp": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", - "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-root": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", - "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-yarn-global": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", - "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", - "engines": { - "node": ">=12" - } - }, - "node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" - }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/jest-util": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", - "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", - "dependencies": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-worker": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", - "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", - "dependencies": { - "@types/node": "*", - "jest-util": "^29.7.0", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-worker/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/jiti": { - "version": "1.21.6", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", - "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", - "bin": { - "jiti": "bin/jiti.js" - } - }, - "node_modules/joi": { - "version": "17.13.3", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", - "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", - "dependencies": { - "@hapi/hoek": "^9.3.0", - "@hapi/topo": "^5.1.0", - "@sideway/address": "^4.1.5", - "@sideway/formula": "^3.0.1", - "@sideway/pinpoint": "^2.0.0" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" - }, - "node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/kleur": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", - "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", - "engines": { - "node": ">=6" - } - }, - "node_modules/latest-version": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", - "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", - "dependencies": { - "package-json": "^8.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/launch-editor": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.8.0.tgz", - "integrity": "sha512-vJranOAJrI/llyWGRQqiDM+adrw+k83fvmmx3+nV47g3+36xM15jE+zyZ6Ffel02+xSvuM0b2GDRosXZkbb6wA==", - "dependencies": { - "picocolors": "^1.0.0", - "shell-quote": "^1.8.1" - } - }, - "node_modules/leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/lilconfig": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", - "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" - }, - "node_modules/loader-runner": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", - "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/loader-utils": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", - "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - }, - "engines": { - "node": ">=8.9.0" - } - }, - "node_modules/locate-path": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", - "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", - "dependencies": { - "p-locate": "^6.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" - }, - "node_modules/longest-streak": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", - "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } - }, - "node_modules/lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", - "dependencies": { - "tslib": "^2.0.3" - } - }, - "node_modules/lowercase-keys": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", - "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/markdown-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", - "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/markdown-table": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", - "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-directive": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", - "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", - "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "parse-entities": "^4.0.0", - "stringify-entities": "^4.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-find-and-replace": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", - "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", - "dependencies": { - "@types/mdast": "^4.0.0", - "escape-string-regexp": "^5.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mdast-util-from-markdown": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", - "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", - "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", - "mdast-util-to-string": "^4.0.0", - "micromark": "^4.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", - "micromark-util-decode-string": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-stringify-position": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/mdast-util-frontmatter": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", - "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "escape-string-regexp": "^5.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-extension-frontmatter": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mdast-util-gfm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", - "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", - "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-gfm-autolink-literal": "^2.0.0", - "mdast-util-gfm-footnote": "^2.0.0", - "mdast-util-gfm-strikethrough": "^2.0.0", - "mdast-util-gfm-table": "^2.0.0", - "mdast-util-gfm-task-list-item": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-autolink-literal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", - "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", - "dependencies": { - "@types/mdast": "^4.0.0", - "ccount": "^2.0.0", - "devlop": "^1.0.0", - "mdast-util-find-and-replace": "^3.0.0", - "micromark-util-character": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/mdast-util-gfm-footnote": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", - "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-strikethrough": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", - "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-table": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", - "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "markdown-table": "^3.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-task-list-item": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", - "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdx": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", - "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", - "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdx-expression": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", - "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdx-jsx": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", - "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "ccount": "^2.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "parse-entities": "^4.0.0", - "stringify-entities": "^4.0.0", - "unist-util-stringify-position": "^4.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdxjs-esm": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", - "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-phrasing": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", - "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", - "dependencies": { - "@types/mdast": "^4.0.0", - "unist-util-is": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-hast": { - "version": "13.2.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", - "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@ungap/structured-clone": "^1.0.0", - "devlop": "^1.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "trim-lines": "^3.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-markdown": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", - "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", - "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "longest-streak": "^3.0.0", - "mdast-util-phrasing": "^4.0.0", - "mdast-util-to-string": "^4.0.0", - "micromark-util-decode-string": "^2.0.0", - "unist-util-visit": "^5.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", - "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", - "dependencies": { - "@types/mdast": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdn-data": { - "version": "2.0.30", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", - "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memfs": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", - "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", - "dependencies": { - "fs-monkey": "^1.0.4" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", - "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromark": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", - "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "@types/debug": "^4.0.0", - "debug": "^4.0.0", - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", - "micromark-core-commonmark": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-combine-extensions": "^2.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", - "micromark-util-encode": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-resolve-all": "^2.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "micromark-util-subtokenize": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz", - "integrity": "sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", - "micromark-factory-destination": "^2.0.0", - "micromark-factory-label": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-factory-title": "^2.0.0", - "micromark-factory-whitespace": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-classify-character": "^2.0.0", - "micromark-util-html-tag-name": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-resolve-all": "^2.0.0", - "micromark-util-subtokenize": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-directive": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", - "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", - "dependencies": { - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-factory-whitespace": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "parse-entities": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-frontmatter": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", - "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", - "dependencies": { - "fault": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-gfm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", - "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", - "dependencies": { - "micromark-extension-gfm-autolink-literal": "^2.0.0", - "micromark-extension-gfm-footnote": "^2.0.0", - "micromark-extension-gfm-strikethrough": "^2.0.0", - "micromark-extension-gfm-table": "^2.0.0", - "micromark-extension-gfm-tagfilter": "^2.0.0", - "micromark-extension-gfm-task-list-item": "^2.0.0", - "micromark-util-combine-extensions": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-autolink-literal": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", - "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-gfm-footnote": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", - "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", - "dependencies": { - "devlop": "^1.0.0", - "micromark-core-commonmark": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-gfm-strikethrough": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", - "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", - "dependencies": { - "devlop": "^1.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-classify-character": "^2.0.0", - "micromark-util-resolve-all": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-gfm-table": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", - "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", - "dependencies": { - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-gfm-tagfilter": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", - "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", - "dependencies": { - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-task-list-item": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", - "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", - "dependencies": { - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-mdx-expression": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz", - "integrity": "sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "micromark-factory-mdx-expression": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-mdx-jsx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz", - "integrity": "sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg==", - "dependencies": { - "@types/acorn": "^4.0.0", - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "micromark-factory-mdx-expression": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-extension-mdx-md": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", - "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", - "dependencies": { - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdxjs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", - "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", - "dependencies": { - "acorn": "^8.0.0", - "acorn-jsx": "^5.0.0", - "micromark-extension-mdx-expression": "^3.0.0", - "micromark-extension-mdx-jsx": "^3.0.0", - "micromark-extension-mdx-md": "^2.0.0", - "micromark-extension-mdxjs-esm": "^3.0.0", - "micromark-util-combine-extensions": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdxjs-esm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", - "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "micromark-core-commonmark": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-position-from-estree": "^2.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-factory-destination": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", - "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-factory-label": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", - "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "devlop": "^1.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-label/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-factory-mdx-expression": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz", - "integrity": "sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-position-from-estree": "^2.0.0", - "vfile-message": "^4.0.0" - } - }, - "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-factory-space": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", - "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "node_modules/micromark-factory-space/node_modules/micromark-util-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", - "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-factory-title": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", - "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-title/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-factory-whitespace": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", - "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-character": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", - "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "node_modules/micromark-util-character/node_modules/micromark-util-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", - "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-chunked": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", - "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-classify-character": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", - "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-combine-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", - "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-chunked": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-numeric-character-reference": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", - "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-decode-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz", - "integrity": "sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "decode-named-character-reference": "^1.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-encode": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", - "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-events-to-acorn": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz", - "integrity": "sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "@types/acorn": "^4.0.0", - "@types/estree": "^1.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "estree-util-visit": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "vfile-message": "^4.0.0" - } - }, - "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-html-tag-name": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", - "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-normalize-identifier": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", - "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-resolve-all": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", - "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-sanitize-uri": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", - "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-encode": "^2.0.0", - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-subtokenize": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz", - "integrity": "sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "devlop": "^1.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-symbol": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", - "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark-util-types": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", - "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromark/node_modules/micromark-factory-space": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", - "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", - "dependencies": { - "mime-db": "~1.33.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/mimic-response": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", - "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.0.tgz", - "integrity": "sha512-Zs1YsZVfemekSZG+44vBsYTLQORkPMwnlv+aehcxK/NLKC+EGhDB39/YePYYqx/sTk6NnYpuqikhSn7+JIevTA==", - "dependencies": { - "schema-utils": "^4.0.0", - "tapable": "^2.2.1" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/mrmime": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", - "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/multicast-dns": { - "version": "7.2.5", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", - "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", - "dependencies": { - "dns-packet": "^5.2.2", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" - }, - "node_modules/no-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", - "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", - "dependencies": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node_modules/node-emoji": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.1.3.tgz", - "integrity": "sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA==", - "dependencies": { - "@sindresorhus/is": "^4.6.0", - "char-regex": "^1.0.2", - "emojilib": "^2.4.0", - "skin-tone": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/node-forge": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", - "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", - "engines": { - "node": ">= 6.13.0" - } - }, - "node_modules/node-releases": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", - "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", - "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/nprogress": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" - }, - "node_modules/nth-check": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", - "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", - "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.assign": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", - "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", - "dependencies": { - "call-bind": "^1.0.5", - "define-properties": "^1.2.1", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" - }, - "node_modules/on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/open": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", - "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/opener": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", - "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", - "bin": { - "opener": "bin/opener-bin.js" - } - }, - "node_modules/p-cancelable": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", - "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", - "engines": { - "node": ">=12.20" - } - }, - "node_modules/p-limit": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", - "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", - "dependencies": { - "yocto-queue": "^1.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", - "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", - "dependencies": { - "p-limit": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", - "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", - "dependencies": { - "got": "^12.1.0", - "registry-auth-token": "^5.0.1", - "registry-url": "^6.0.0", - "semver": "^7.3.7" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/param-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", - "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-entities": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", - "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", - "dependencies": { - "@types/unist": "^2.0.0", - "character-entities": "^2.0.0", - "character-entities-legacy": "^3.0.0", - "character-reference-invalid": "^2.0.0", - "decode-named-character-reference": "^1.0.0", - "is-alphanumerical": "^2.0.0", - "is-decimal": "^2.0.0", - "is-hexadecimal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/parse-entities/node_modules/@types/unist": { - "version": "2.0.11", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", - "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" - }, - "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse-numeric-range": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", - "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" - }, - "node_modules/parse5": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", - "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", - "dependencies": { - "entities": "^4.4.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", - "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", - "dependencies": { - "domhandler": "^5.0.2", - "parse5": "^7.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascal-case": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", - "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/path-exists": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", - "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" - }, - "node_modules/path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", - "dependencies": { - "isarray": "0.0.1" - } - }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/periscopic": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", - "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", - "dependencies": { - "@types/estree": "^1.0.0", - "estree-walker": "^3.0.0", - "is-reference": "^3.0.0" - } - }, - "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" - }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pkg-dir": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", - "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", - "dependencies": { - "find-up": "^6.3.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-up": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", - "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-up/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-up/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss": { - "version": "8.4.38", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", - "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "nanoid": "^3.3.7", - "picocolors": "^1.0.0", - "source-map-js": "^1.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/postcss-calc": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", - "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", - "dependencies": { - "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.2.2" - } - }, - "node_modules/postcss-colormin": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", - "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", - "dependencies": { - "browserslist": "^4.23.0", - "caniuse-api": "^3.0.0", - "colord": "^2.9.3", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-convert-values": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", - "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", - "dependencies": { - "browserslist": "^4.23.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-comments": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", - "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", - "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-empty": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", - "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", - "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-unused": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", - "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", - "dependencies": { - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-loader": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", - "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", - "dependencies": { - "cosmiconfig": "^8.3.5", - "jiti": "^1.20.0", - "semver": "^7.5.4" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "postcss": "^7.0.0 || ^8.0.1", - "webpack": "^5.0.0" - } - }, - "node_modules/postcss-merge-idents": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", - "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", - "dependencies": { - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", - "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^6.1.1" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-merge-rules": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", - "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", - "dependencies": { - "browserslist": "^4.23.0", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^4.0.2", - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", - "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-gradients": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", - "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", - "dependencies": { - "colord": "^2.9.3", - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-params": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", - "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", - "dependencies": { - "browserslist": "^4.23.0", - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-selectors": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", - "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", - "dependencies": { - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", - "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.5.tgz", - "integrity": "sha512-6MieY7sIfTK0hYfafw1OMEG+2bg8Q1ocHCpoWLqOKj3JXlKu4G7btkmM/B7lFubYkYWmRSPLZi5chid63ZaZYw==", - "dependencies": { - "icss-utils": "^5.0.0", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.1.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-scope": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.0.tgz", - "integrity": "sha512-oq+g1ssrsZOsx9M96c5w8laRmvEu9C3adDSjI8oTcbfkrTE8hx/zfyobUoWIxaKPO8bt6S62kxpw5GqypEw1QQ==", - "dependencies": { - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-values": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", - "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", - "dependencies": { - "icss-utils": "^5.0.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", - "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", - "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-positions": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", - "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", - "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-string": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", - "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", - "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-unicode": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", - "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", - "dependencies": { - "browserslist": "^4.23.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-url": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", - "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-whitespace": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", - "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-ordered-values": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", - "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", - "dependencies": { - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-reduce-idents": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", - "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-reduce-initial": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", - "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", - "dependencies": { - "browserslist": "^4.23.0", - "caniuse-api": "^3.0.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", - "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.0.tgz", - "integrity": "sha512-UMz42UD0UY0EApS0ZL9o1XnLhSTtvvvLe5Dc2H2O56fvRZi+KulDyf5ctDhhtYJBGKStV2FL1fy6253cmLgqVQ==", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-sort-media-queries": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", - "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", - "dependencies": { - "sort-css-media-queries": "2.2.0" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "postcss": "^8.4.23" - } - }, - "node_modules/postcss-svgo": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", - "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "svgo": "^3.2.0" - }, - "engines": { - "node": "^14 || ^16 || >= 18" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-unique-selectors": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", - "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", - "dependencies": { - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" - }, - "node_modules/postcss-zindex": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", - "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/pretty-error": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", - "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^3.0.0" - } - }, - "node_modules/pretty-time": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/prism-react-renderer": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", - "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", - "dependencies": { - "@types/prismjs": "^1.26.0", - "clsx": "^2.0.0" - }, - "peerDependencies": { - "react": ">=16.0.0" - } - }, - "node_modules/prismjs": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", - "engines": { - "node": ">=6" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "node_modules/prompts": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", - "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", - "dependencies": { - "kleur": "^3.0.3", - "sisteransi": "^1.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - } - }, - "node_modules/property-information": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", - "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" - }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/proxy-addr/node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" - }, - "node_modules/pupa": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", - "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", - "dependencies": { - "escape-goat": "^4.0.0" - }, - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/qs": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", - "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", - "dependencies": { - "side-channel": "^1.0.6" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/queue": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", - "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", - "dependencies": { - "inherits": "~2.0.3" - } - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/rc/node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/react": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", - "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", - "dependencies": { - "loose-envify": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/react-dev-utils": { - "version": "12.0.1", - "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", - "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", - "dependencies": { - "@babel/code-frame": "^7.16.0", - "address": "^1.1.2", - "browserslist": "^4.18.1", - "chalk": "^4.1.2", - "cross-spawn": "^7.0.3", - "detect-port-alt": "^1.1.6", - "escape-string-regexp": "^4.0.0", - "filesize": "^8.0.6", - "find-up": "^5.0.0", - "fork-ts-checker-webpack-plugin": "^6.5.0", - "global-modules": "^2.0.0", - "globby": "^11.0.4", - "gzip-size": "^6.0.0", - "immer": "^9.0.7", - "is-root": "^2.1.0", - "loader-utils": "^3.2.0", - "open": "^8.4.0", - "pkg-up": "^3.1.0", - "prompts": "^2.4.2", - "react-error-overlay": "^6.0.11", - "recursive-readdir": "^2.2.2", - "shell-quote": "^1.7.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/react-dev-utils/node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/loader-utils": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.3.1.tgz", - "integrity": "sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==", - "engines": { - "node": ">= 12.13.0" - } - }, - "node_modules/react-dev-utils/node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/react-dev-utils/node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dom": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", - "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", - "dependencies": { - "loose-envify": "^1.1.0", - "scheduler": "^0.23.2" - }, - "peerDependencies": { - "react": "^18.3.1" - } - }, - "node_modules/react-error-overlay": { - "version": "6.0.11", - "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", - "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" - }, - "node_modules/react-fast-compare": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", - "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" - }, - "node_modules/react-helmet-async": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", - "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", - "dependencies": { - "@babel/runtime": "^7.12.5", - "invariant": "^2.2.4", - "prop-types": "^15.7.2", - "react-fast-compare": "^3.2.0", - "shallowequal": "^1.1.0" - }, - "peerDependencies": { - "react": "^16.6.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" - }, - "node_modules/react-json-view-lite": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz", - "integrity": "sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw==", - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "react": "^16.13.1 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/react-loadable": { - "name": "@docusaurus/react-loadable", - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", - "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", - "dependencies": { - "@types/react": "*" - }, - "peerDependencies": { - "react": "*" - } - }, - "node_modules/react-loadable-ssr-addon-v5-slorber": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", - "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", - "dependencies": { - "@babel/runtime": "^7.10.3" - }, - "engines": { - "node": ">=10.13.0" - }, - "peerDependencies": { - "react-loadable": "*", - "webpack": ">=4.41.1 || 5.x" - } - }, - "node_modules/react-router": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", - "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "hoist-non-react-statics": "^3.1.0", - "loose-envify": "^1.3.1", - "path-to-regexp": "^1.7.0", - "prop-types": "^15.6.2", - "react-is": "^16.6.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/react-router-config": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", - "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", - "dependencies": { - "@babel/runtime": "^7.1.2" - }, - "peerDependencies": { - "react": ">=15", - "react-router": ">=5" - } - }, - "node_modules/react-router-dom": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", - "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "loose-envify": "^1.3.1", - "prop-types": "^15.6.2", - "react-router": "5.3.4", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/reading-time": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", - "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" - }, - "node_modules/rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", - "dependencies": { - "resolve": "^1.1.6" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/recursive-readdir": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", - "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", - "dependencies": { - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "node_modules/regenerate-unicode-properties": { - "version": "10.1.1", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", - "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", - "dependencies": { - "regenerate": "^1.4.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" - }, - "node_modules/regenerator-transform": { - "version": "0.15.2", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", - "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regexpu-core": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", - "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", - "dependencies": { - "@babel/regjsgen": "^0.8.0", - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", - "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", - "dependencies": { - "@pnpm/npm-conf": "^2.1.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/registry-url": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", - "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", - "dependencies": { - "rc": "1.2.8" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/regjsparser": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/rehype-raw": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", - "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", - "dependencies": { - "@types/hast": "^3.0.0", - "hast-util-raw": "^9.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/relateurl": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/remark-directive": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.0.tgz", - "integrity": "sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA==", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-directive": "^3.0.0", - "micromark-extension-directive": "^3.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-emoji": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", - "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", - "dependencies": { - "@types/mdast": "^4.0.2", - "emoticon": "^4.0.1", - "mdast-util-find-and-replace": "^3.0.1", - "node-emoji": "^2.1.0", - "unified": "^11.0.4" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, - "node_modules/remark-frontmatter": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", - "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-frontmatter": "^2.0.0", - "micromark-extension-frontmatter": "^2.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-gfm": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", - "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-gfm": "^3.0.0", - "micromark-extension-gfm": "^3.0.0", - "remark-parse": "^11.0.0", - "remark-stringify": "^11.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-mdx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.0.1.tgz", - "integrity": "sha512-3Pz3yPQ5Rht2pM5R+0J2MrGoBSrzf+tJG94N+t/ilfdh8YLyyKYtidAYwTveB20BoHAcwIopOUqhcmh2F7hGYA==", - "dependencies": { - "mdast-util-mdx": "^3.0.0", - "micromark-extension-mdxjs": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-parse": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", - "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-from-markdown": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-rehype": { - "version": "11.1.1", - "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.1.tgz", - "integrity": "sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "mdast-util-to-hast": "^13.0.0", - "unified": "^11.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-stringify": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", - "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-to-markdown": "^2.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/renderkid": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", - "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", - "dependencies": { - "css-select": "^4.1.3", - "dom-converter": "^0.2.0", - "htmlparser2": "^6.1.0", - "lodash": "^4.17.21", - "strip-ansi": "^6.0.1" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", - "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", - "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", - "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-like": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", - "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", - "engines": { - "node": "*" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" - }, - "node_modules/resolve": { - "version": "1.22.8", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", - "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", - "dependencies": { - "is-core-module": "^2.13.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-alpn": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", - "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-pathname": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", - "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" - }, - "node_modules/responselike": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", - "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", - "dependencies": { - "lowercase-keys": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/rtl-detect": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz", - "integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==" - }, - "node_modules/rtlcss": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", - "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", - "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0", - "postcss": "^8.4.21", - "strip-json-comments": "^3.1.1" - }, - "bin": { - "rtlcss": "bin/rtlcss.js" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/sax": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", - "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" - }, - "node_modules/scheduler": { - "version": "0.23.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", - "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", - "dependencies": { - "loose-envify": "^1.1.0" - } - }, - "node_modules/schema-utils": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", - "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.9.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.1.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/search-insights": { - "version": "2.17.2", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.2.tgz", - "integrity": "sha512-zFNpOpUO+tY2D85KrxJ+aqwnIfdEGi06UH2+xEb+Bp9Mwznmauqc9djbnBibJO5mpfUPPa8st6Sx65+vbeO45g==", - "peer": true - }, - "node_modules/section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" - }, - "node_modules/selfsigned": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", - "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", - "dependencies": { - "@types/node-forge": "^1.3.0", - "node-forge": "^1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver": { - "version": "7.6.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", - "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", - "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/send": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", - "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", - "dependencies": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/send/node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/send/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serialize-javascript": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", - "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-handler": { - "version": "6.1.5", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", - "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", - "dependencies": { - "bytes": "3.0.0", - "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", - "mime-types": "2.1.18", - "minimatch": "3.1.2", - "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", - "range-parser": "1.2.0" - } - }, - "node_modules/serve-handler/node_modules/path-to-regexp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" - }, - "node_modules/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" - }, - "node_modules/serve-index/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" - }, - "node_modules/serve-index/node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-static": { - "version": "1.16.2", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", - "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", - "dependencies": { - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.19.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/set-function-length": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", - "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", - "dependencies": { - "define-data-property": "^1.1.4", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" - }, - "node_modules/shallow-clone": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", - "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", - "dependencies": { - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shallowequal": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", - "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "engines": { - "node": ">=8" - } - }, - "node_modules/shell-quote": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", - "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/shelljs": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", - "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", - "dependencies": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - }, - "bin": { - "shjs": "bin/shjs" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/side-channel": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", - "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", - "dependencies": { - "call-bind": "^1.0.7", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" - }, - "node_modules/sirv": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", - "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", - "dependencies": { - "@polka/url": "^1.0.0-next.24", - "mrmime": "^2.0.0", - "totalist": "^3.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/sisteransi": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", - "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" - }, - "node_modules/sitemap": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", - "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", - "dependencies": { - "@types/node": "^17.0.5", - "@types/sax": "^1.2.1", - "arg": "^5.0.0", - "sax": "^1.2.4" - }, - "bin": { - "sitemap": "dist/cli.js" - }, - "engines": { - "node": ">=12.0.0", - "npm": ">=5.6.0" - } - }, - "node_modules/sitemap/node_modules/@types/node": { - "version": "17.0.45", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", - "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" - }, - "node_modules/skin-tone": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", - "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", - "dependencies": { - "unicode-emoji-modifier-base": "^1.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/snake-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", - "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/sockjs": { - "version": "0.3.24", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", - "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^8.3.2", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sort-css-media-queries": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", - "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", - "engines": { - "node": ">= 6.3.0" - } - }, - "node_modules/source-map": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", - "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/source-map-js": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", - "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/source-map-support/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/space-separated-tokens": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" - }, - "node_modules/srcset": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", - "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/std-env": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", - "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/stringify-entities": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", - "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", - "dependencies": { - "character-entities-html4": "^2.0.0", - "character-entities-legacy": "^3.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/stringify-object": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", - "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", - "dependencies": { - "get-own-enumerable-property-symbols": "^3.0.0", - "is-obj": "^1.0.1", - "is-regexp": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/style-to-object": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", - "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", - "dependencies": { - "inline-style-parser": "0.1.1" - } - }, - "node_modules/stylehacks": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", - "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", - "dependencies": { - "browserslist": "^4.23.0", - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/svg-parser": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", - "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" - }, - "node_modules/svgo": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", - "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", - "dependencies": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^5.1.0", - "css-tree": "^2.3.1", - "css-what": "^6.1.0", - "csso": "^5.0.5", - "picocolors": "^1.0.0" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=14.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/svgo" - } - }, - "node_modules/svgo/node_modules/commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", - "engines": { - "node": ">= 10" - } - }, - "node_modules/tapable": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", - "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/terser": { - "version": "5.31.1", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.31.1.tgz", - "integrity": "sha512-37upzU1+viGvuFtBo9NPufCb9dwM0+l9hMxYyWfBA+fbwrPqNJAhbZ6W47bBFnZHKHTUBnMvi87434qq+qnxOg==", - "dependencies": { - "@jridgewell/source-map": "^0.3.3", - "acorn": "^8.8.2", - "commander": "^2.20.0", - "source-map-support": "~0.5.20" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "5.3.10", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", - "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.20", - "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.1", - "terser": "^5.26.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "uglify-js": { - "optional": true - } - } - }, - "node_modules/terser-webpack-plugin/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/terser-webpack-plugin/node_modules/jest-worker": { - "version": "27.5.1", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", - "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/terser-webpack-plugin/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" - }, - "node_modules/thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" - }, - "node_modules/tiny-invariant": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", - "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==" - }, - "node_modules/tiny-warning": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "engines": { - "node": ">=4" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/totalist": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", - "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/trim-lines": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", - "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/trough": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", - "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/tslib": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", - "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" - }, - "node_modules/type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/type-is/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/type-is/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/typescript": { - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.2.tgz", - "integrity": "sha512-NcRtPEOsPFFWjobJEtfihkLCZCXZt/os3zf8nTxjVH3RvTSxjrCamJpbExGvYOF+tFHc3pA65qpdwPbzjohhew==", - "peer": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-emoji-modifier-base": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", - "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unified": { - "version": "11.0.5", - "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", - "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", - "dependencies": { - "@types/unist": "^3.0.0", - "bail": "^2.0.0", - "devlop": "^1.0.0", - "extend": "^3.0.0", - "is-plain-obj": "^4.0.0", - "trough": "^2.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unique-string": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", - "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", - "dependencies": { - "crypto-random-string": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/unist-util-is": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", - "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-position": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", - "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-position-from-estree": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", - "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-stringify-position": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", - "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-visit": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", - "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-visit-parents": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", - "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-is": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", - "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "escalade": "^3.1.2", - "picocolors": "^1.0.1" - }, - "bin": { - "update-browserslist-db": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, - "node_modules/update-notifier": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", - "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", - "dependencies": { - "boxen": "^7.0.0", - "chalk": "^5.0.1", - "configstore": "^6.0.0", - "has-yarn": "^3.0.0", - "import-lazy": "^4.0.0", - "is-ci": "^3.0.1", - "is-installed-globally": "^0.4.0", - "is-npm": "^6.0.0", - "is-yarn-global": "^0.4.0", - "latest-version": "^7.0.0", - "pupa": "^3.1.0", - "semver": "^7.3.7", - "semver-diff": "^4.0.0", - "xdg-basedir": "^5.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/yeoman/update-notifier?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/boxen": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", - "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", - "dependencies": { - "ansi-align": "^3.0.1", - "camelcase": "^7.0.1", - "chalk": "^5.2.0", - "cli-boxes": "^3.0.0", - "string-width": "^5.1.2", - "type-fest": "^2.13.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/camelcase": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", - "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", - "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/uri-js/node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/url-loader": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", - "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", - "dependencies": { - "loader-utils": "^2.0.0", - "mime-types": "^2.1.27", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "file-loader": "*", - "webpack": "^4.0.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "file-loader": { - "optional": true - } - } - }, - "node_modules/url-loader/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/url-loader/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/url-loader/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/url-loader/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/url-loader/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/url-loader/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" - }, - "node_modules/utila": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" - }, - "node_modules/utility-types": { - "version": "3.11.0", - "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", - "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/value-equal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", - "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vfile": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", - "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", - "dependencies": { - "@types/unist": "^3.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vfile-location": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", - "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", - "dependencies": { - "@types/unist": "^3.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vfile-message": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", - "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-stringify-position": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/watchpack": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.1.tgz", - "integrity": "sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==", - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/web-namespaces": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", - "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/webpack": { - "version": "5.94.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", - "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", - "dependencies": { - "@types/estree": "^1.0.5", - "@webassemblyjs/ast": "^1.12.1", - "@webassemblyjs/wasm-edit": "^1.12.1", - "@webassemblyjs/wasm-parser": "^1.12.1", - "acorn": "^8.7.1", - "acorn-import-attributes": "^1.9.5", - "browserslist": "^4.21.10", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.17.1", - "es-module-lexer": "^1.2.1", - "eslint-scope": "5.1.1", - "events": "^3.2.0", - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.11", - "json-parse-even-better-errors": "^2.3.1", - "loader-runner": "^4.2.0", - "mime-types": "^2.1.27", - "neo-async": "^2.6.2", - "schema-utils": "^3.2.0", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.10", - "watchpack": "^2.4.1", - "webpack-sources": "^3.2.3" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-bundle-analyzer": { - "version": "4.10.2", - "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", - "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", - "dependencies": { - "@discoveryjs/json-ext": "0.5.7", - "acorn": "^8.0.4", - "acorn-walk": "^8.0.0", - "commander": "^7.2.0", - "debounce": "^1.2.1", - "escape-string-regexp": "^4.0.0", - "gzip-size": "^6.0.0", - "html-escaper": "^2.0.2", - "opener": "^1.5.2", - "picocolors": "^1.0.0", - "sirv": "^2.0.3", - "ws": "^7.3.1" - }, - "bin": { - "webpack-bundle-analyzer": "lib/bin/analyzer.js" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/webpack-bundle-analyzer/node_modules/commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", - "engines": { - "node": ">= 10" - } - }, - "node_modules/webpack-dev-middleware": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", - "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", - "dependencies": { - "colorette": "^2.0.10", - "memfs": "^3.4.3", - "mime-types": "^2.1.31", - "range-parser": "^1.2.1", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-server": { - "version": "4.15.2", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", - "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", - "dependencies": { - "@types/bonjour": "^3.5.9", - "@types/connect-history-api-fallback": "^1.3.5", - "@types/express": "^4.17.13", - "@types/serve-index": "^1.9.1", - "@types/serve-static": "^1.13.10", - "@types/sockjs": "^0.3.33", - "@types/ws": "^8.5.5", - "ansi-html-community": "^0.0.8", - "bonjour-service": "^1.0.11", - "chokidar": "^3.5.3", - "colorette": "^2.0.10", - "compression": "^1.7.4", - "connect-history-api-fallback": "^2.0.0", - "default-gateway": "^6.0.3", - "express": "^4.17.3", - "graceful-fs": "^4.2.6", - "html-entities": "^2.3.2", - "http-proxy-middleware": "^2.0.3", - "ipaddr.js": "^2.0.1", - "launch-editor": "^2.6.0", - "open": "^8.0.9", - "p-retry": "^4.5.0", - "rimraf": "^3.0.2", - "schema-utils": "^4.0.0", - "selfsigned": "^2.1.1", - "serve-index": "^1.9.1", - "sockjs": "^0.3.24", - "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.4", - "ws": "^8.13.0" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.37.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "webpack": { - "optional": true - }, - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.17.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", - "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/webpack-merge": { - "version": "5.10.0", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", - "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", - "dependencies": { - "clone-deep": "^4.0.1", - "flat": "^5.0.2", - "wildcard": "^2.0.0" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/webpack-sources": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", - "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/webpack/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/webpack/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/webpack/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpackbar": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", - "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", - "dependencies": { - "chalk": "^4.1.0", - "consola": "^2.15.3", - "pretty-time": "^1.1.0", - "std-env": "^3.0.1" - }, - "engines": { - "node": ">=12" - }, - "peerDependencies": { - "webpack": "3 || 4 || 5" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/widest-line": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", - "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", - "dependencies": { - "string-width": "^5.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/wildcard": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", - "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "7.5.10", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", - "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", - "engines": { - "node": ">=8.3.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xdg-basedir": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", - "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/xml-js": { - "version": "1.6.11", - "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", - "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", - "dependencies": { - "sax": "^1.2.4" - }, - "bin": { - "xml-js": "bin/cli.js" - } - }, - "node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/yocto-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", - "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/zwitch": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", - "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - } - } -} diff --git a/docs_v2/package.json b/docs_v2/package.json deleted file mode 100644 index 64b6fe3b..00000000 --- a/docs_v2/package.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "langchain-dart", - "version": "0.0.0", - "private": true, - "scripts": { - "docusaurus": "docusaurus", - "start": "docusaurus start", - "build": "docusaurus build", - "swizzle": "docusaurus swizzle", - "deploy": "docusaurus deploy", - "clear": "docusaurus clear", - "serve": "docusaurus serve", - "write-translations": "docusaurus write-translations", - "write-heading-ids": "docusaurus write-heading-ids" - }, - "dependencies": { - "@docusaurus/core": "^3.5.2", - "@docusaurus/preset-classic": "^3.5.2", - "@mdx-js/react": "^3.0.0", - "clsx": "^2.0.0", - "prism-react-renderer": "^2.3.0", - "react": "^18.0.0", - "react-dom": "^18.0.0" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "^3.5.2", - "@docusaurus/types": "^3.5.2" - }, - "browserslist": { - "production": [ - ">0.5%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 3 chrome version", - "last 3 firefox version", - "last 5 safari version" - ] - }, - "engines": { - "node": ">=18.0" - } -} diff --git a/docs_v2/sidebars.js b/docs_v2/sidebars.js deleted file mode 100644 index 72e4f826..00000000 --- a/docs_v2/sidebars.js +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Creating a sidebar enables you to: - - create an ordered group of docs - - render a sidebar for each doc of that group - - provide next/previous navigation - - The sidebars can be generated from the filesystem, or explicitly defined here. - - Create as many sidebars as you want. - */ - -// @ts-check - -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ -const sidebars = { - // By default, Docusaurus generates a sidebar from the docs folder structure - tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], - integrations: [{type: 'autogenerated', dirName: '.'}] - - // tutorialSidebar: [ - // 'intro', - // 'tutorials/index', - // 'how_to/index', - // 'concepts', - // 'integrations/index', - // ], - -}; - -export default sidebars; \ No newline at end of file diff --git a/docs_v2/src/components/HomepageFeatures/index.js b/docs_v2/src/components/HomepageFeatures/index.js deleted file mode 100644 index acc76219..00000000 --- a/docs_v2/src/components/HomepageFeatures/index.js +++ /dev/null @@ -1,64 +0,0 @@ -import clsx from 'clsx'; -import Heading from '@theme/Heading'; -import styles from './styles.module.css'; - -const FeatureList = [ - { - title: 'Easy to Use', - Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default, - description: ( - <> - Docusaurus was designed from the ground up to be easily installed and - used to get your website up and running quickly. - - ), - }, - { - title: 'Focus on What Matters', - Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default, - description: ( - <> - Docusaurus lets you focus on your docs, and we'll do the chores. Go - ahead and move your docs into the docs directory. - - ), - }, - { - title: 'Powered by React', - Svg: require('@site/static/img/undraw_docusaurus_react.svg').default, - description: ( - <> - Extend or customize your website layout by reusing React. Docusaurus can - be extended while reusing the same header and footer. - - ), - }, -]; - -function Feature({Svg, title, description}) { - return ( -
    -
    - -
    -
    - {title} -

    {description}

    -
    -
    - ); -} - -export default function HomepageFeatures() { - return ( -
    -
    -
    - {FeatureList.map((props, idx) => ( - - ))} -
    -
    -
    - ); -} diff --git a/docs_v2/src/components/HomepageFeatures/styles.module.css b/docs_v2/src/components/HomepageFeatures/styles.module.css deleted file mode 100644 index b248eb2e..00000000 --- a/docs_v2/src/components/HomepageFeatures/styles.module.css +++ /dev/null @@ -1,11 +0,0 @@ -.features { - display: flex; - align-items: center; - padding: 2rem 0; - width: 100%; -} - -.featureSvg { - height: 200px; - width: 200px; -} diff --git a/docs_v2/src/css/custom.css b/docs_v2/src/css/custom.css deleted file mode 100644 index 2bc6a4cf..00000000 --- a/docs_v2/src/css/custom.css +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Any CSS included here will be global. The classic template - * bundles Infima by default. Infima is a CSS framework designed to - * work well for content-centric websites. - */ - -/* You can override the default Infima variables here. */ -:root { - --ifm-color-primary: #2e8555; - --ifm-color-primary-dark: #29784c; - --ifm-color-primary-darker: #277148; - --ifm-color-primary-darkest: #205d3b; - --ifm-color-primary-light: #33925d; - --ifm-color-primary-lighter: #359962; - --ifm-color-primary-lightest: #3cad6e; - --ifm-code-font-size: 95%; - --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); -} - -/* For readability concerns, you should choose a lighter palette in dark mode. */ -[data-theme='dark'] { - --ifm-color-primary: #25c2a0; - --ifm-color-primary-dark: #21af90; - --ifm-color-primary-darker: #1fa588; - --ifm-color-primary-darkest: #1a8870; - --ifm-color-primary-light: #29d5b0; - --ifm-color-primary-lighter: #32d8b4; - --ifm-color-primary-lightest: #4fddbf; - --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); -} diff --git a/docs_v2/src/pages/index.js b/docs_v2/src/pages/index.js deleted file mode 100644 index 176f838c..00000000 --- a/docs_v2/src/pages/index.js +++ /dev/null @@ -1,7 +0,0 @@ -import { Redirect } from "@docusaurus/router"; -import useBaseUrl from "@docusaurus/useBaseUrl"; -import React from "react"; - -export default function Home() { - return ; -} diff --git a/docs_v2/src/pages/index.module.css b/docs_v2/src/pages/index.module.css deleted file mode 100644 index 9f71a5da..00000000 --- a/docs_v2/src/pages/index.module.css +++ /dev/null @@ -1,23 +0,0 @@ -/** - * CSS files with the .module.css suffix will be treated as CSS modules - * and scoped locally. - */ - -.heroBanner { - padding: 4rem 0; - text-align: center; - position: relative; - overflow: hidden; -} - -@media screen and (max-width: 996px) { - .heroBanner { - padding: 2rem; - } -} - -.buttons { - display: flex; - align-items: center; - justify-content: center; -} diff --git a/docs_v2/src/pages/markdown-page.md b/docs_v2/src/pages/markdown-page.md deleted file mode 100644 index 9756c5b6..00000000 --- a/docs_v2/src/pages/markdown-page.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Markdown page example ---- - -# Markdown page example - -You don't need React to write simple standalone pages. diff --git a/docs_v2/static/.nojekyll b/docs_v2/static/.nojekyll deleted file mode 100644 index e69de29b..00000000 diff --git a/docs_v2/static/img/favicon.ico b/docs_v2/static/img/favicon.ico deleted file mode 100644 index 4c29611109064a93f09866b3b89d4486b8db1866..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15406 zcmeI3d3e;-)yHr4%w#f|eaTFcNytV*LVyrR0wjbiC`%+H>>w1apj25D5DN$jvTuU8 zR03E*!Y*Mei(nz31s4!2T5D}BtwP_oeeD0A_xytK9WZ4nA@ON5&vT#q+s^NP&-tBu z?!D)GN2vf6q)a9ywNH%;Rw_p+!S3y9?d88>7l4!yo}oYHmddvA*Gc!mDR8tQ|-7{3?*U5JZ$f5#`OLU zjEkodt_!~UV0P5_Gi$UjW?AwMj3H?T)3w9Bk<&M0)M*!(}D-k z84Wj;)o>>y`Y50N6xns3;yHT+vgvztH~P-dOD3>OV+D3f?vg>8KyOwDBx0y&c-vrrnE>SanTX5}WlV0}&1rhUlA*5!cB2jsHHMkK&RN z%6i{RPEHq!i%aNz&%GqcSlYcuA*pTCNK8sX!}a|;OnQv{Ge}tdAQ@X`W6nzW|9gJ# z{{H!!Tr(PtL`3M(iB4%b?J>kz2{-6)56HsSD+PUo-?z|*W2lg}U!F*Z!^?=SD!8Mz z{W0o{dKvpW5k0CC=FUERbNl0-&;wn9nUFLaA)Va>7G&u%jon9xuW9YBm${y$DjnzaV(d!?6Fj+0+MFPBu(hZK`%z0zpq9UfdiFSh zLl=nNe*Sn9-}d*fMvujxE&Y;kP40ntX%%X6U(~D_sMXb|b)s|4S=81Gs5j1`UOi1{ zY*Lfj-H>1rAJ4(NbRZ!!$_ZTfIO_FRQ2Y0y4#~HE>(te9_-%p*k7&}iHG7!5#}YHU z7|V*`xK}=cd+Q?1M>Y~tw~LU|wS=DDgYNV$!dJ~DTw+9{HoSvxV6RHU`}|-$)sN9` z|3ccHcoEhZ7}D>~+%m!)g?m;Rkvpdm|ItS5e}0q5%LfU#ypMpd_n<%X5{Awx zO=@#3#y8l|To8|Y$v}el%_8KpO{mNJ36)~{Y6q@uvx!J>fB$TQ%$?TpZ6vNI#L=m1 zM`EAoi)+IZ1RRzaQ@0XzY88P;=My=1C^{Ki@3gJP2=n$C zM0H8LD|$Czb2{ym53C|*<56M<3?cvg7ZiQ{Eg7Gl$2mjRZ+`yKAk);fJ8dEvV?Uw& z?3Ltx_$7sx{!YrK*U>v<-f;)9`lWIppT$(V=4}a&z-2>QzcshmQE-<4HK#(lYw=?I$_kKJ+Lkq-RktIqxr+ zi|HVkolYkineFRy!O_vtSZ%^n3b(4sUxO)9*7bQ_;V?^ym%MP6sO~9zF;pKZ= z^XwgB_kUNEaELaW?K?c-#Kgvs(=AW(;zr3E3q~!!)*+VI%503u&a1iP>(`jNFv7B& zSf`W_|KbEPYv+-*XFkr-?7N2BUkCr=s5l$Z12VB!wv)WR31gBI&!j@UQ+nd4lH7Bo z^b^gVS~2Pfvl|FawG#Ey5}XsC!8W{_jG85Q2KT%xe&dhz`eU8sv#dQ5@Q&+B%=lca zc|M#&vuX2O1+jDb;e0R`U50~@E?OQvhTz`u1olZFuxtP!$u$HTjuMu#4O@?n(s!@( zuO{@F>v1=nYx#d?pENvUJL4Xffpb7x;%4@t&9WhQp6-k3fzAX}rJzP-5HPYcA(dqW zbYFmqJC3qRG3+Efy0F=OrUAcR_5nCc6LD6gU>}r5^dsF!SWt;~?tPdfpB_5A9jdCm z=x`qa{imZ!wxPO9j<<{C)jJ*`Fm5JcdP~dUH%m+{YlCHYCXR;%`#kZRMOF1YMBvCw z;US8pOqRVBlB*jo`RU37sEQ+~{-T5E65{D8*zaDfX|Qu+way%YQF7Uq(H(J2>Pf=< z3StDi#$yDG=z4zFO4K;B!YO3%sKi?_5h}e79NvVYwnIfFfsP%+e zVp;^d&JuyCI3BCm-6i~uV~O~S@MeJ%dx@RJ=1)A0n)9OYINMPxq>nydhg$p5RZizQ zvHdg02+k>Q5$sy7)7aI6eN?9KUIn-o3#a#-a9)oI|1@Qs@J$PacX}1|LJexYVBaJh z)F$D9-ndxL1*we}2$-~nFsmOo+l>CB_eNnGmX7<;T=Dh$FfJKHz~kaCPYL#A^HJ5+ zf^`pS`(e}`vHQUfPzOGgBKF^NM(zuj*H!xL_2Y3f>0ru>!96}(_@`1_tA`W(WM4vO zk0o%;9I>ahoz4mIVv2O?5R2~K7U+%{*LCe&Lbq|IK=N`=a%7m>3-}h$B_KX86@qRPn$!li94|#=ewIRpWTM}z3o_}*xuWU z^XwLEo0kxgnbI`BR?y+> zI?^s|B;m8oIKQYNN_a@aA9qUGQGe|C_%*C!?zuYmY^FZisD3ifG#3hYII9%PhVi)f zKTG-t)g=FJ3o*i(Mqb)Y&|ls}35ThbfWPcUf4&;igz^@dS6;7!wqMgWupRD&12Alz zf}?H~Ucqj-AmfG6Quw844&T{BJ`BZagHxW{b~-nol|kF8G|9w zb&bz8ysx=yZ>*Qz{mK|NdMK6S$C3TY0ow09Psih*(dnHp$$J0y#GE@p@W~emshuff z!FbFi89xf_nynndxA*Q_$^+xZP_=kD`GU9b&;Lu&&ibVl9K$lkT&DLZkcWYOj)lgN4Jb9#RDce?%Y8&Xdn#eTnxPsWHJ zjqmvDVzgzEQgni>N57!+8+GKr`w2b&`YpZw_AOn`e=50#hh=>F@%W8D_WEO;4ogZm zk|)1J&el`p*PSExh_%a{4-aeg_X|aLu8I2A$DQu`*__)h_xq*N%RI*07Nv)2$Cs<&$Azf79^;qe)W?-SFZVkyh^nWJ ztm|@tr@fM!y3a3%NzdDnPfNbfhjlOTj+d*Y`w@?qyUok1o4fmmGsMNg@!8Pcc=A}b zajg?qrc_^F9}EW5)YJq5fhwvhfk8oG;Sp`^9kulhsujkL1O9QVnf2>!Ia_~&hGHwv zn~on(7w%SiUe$Ph%9t!LZ*ZMFnvEJt-9T&~U7yI7=zeeZ3ZGAQX$utlqq=)}(6=*u z`*6qeTc%^PFMp=2e5p>S${K!q?%N$%a#DTua4nD{zj(T0&}`Rw+_SQ_HZZkV-#t1v zzi@g6f4IEcJ2=|h-pwd&v5PMI&pS0d{qOn3<=k}PM1$|jh{D8jhj&JsXX#R8{l=e$ z!LdrI>w~XZJ$wHqPMbDghDS#)CRZkHH78U)EIW$yEW_6>o(`{GCXZg4=Fig8Gwu!% z!y+_!(eJBsIqK_JOS@ercP>lI$}GRAstbVa9i5I!yBZ?ISy-6)d3e3GaPso=+>DLE zqGD3g(#Z;MO21$mDd1saV#~|Rm$VCp{ua2ryfXOqt#k&G(7<+fqf<9GxoDqdRXCvX29O)$w=B_jYWSte~oIw1i3?JqvuR_#C&(g$#B{E-rCD} z+e@)lw|^_#IA=*DbTohdWw~kp;_PL4>}9TeD_5%NZ1sZ0t8cUC=w%3g1VA=mm6eiE zcSky2nefbMuMFNmW;Y59E2PaS4*%@*qE%0;wEDp2Psab6KlFGnOA705dQ9 z|AgVF5DdUEP1OJXP$#tkfcL}-e?;#L-x5IqXaRn? zxScIbYy6XJ%8f&3@O==#uUzp_<=V@?`oNQ0Fy?j9a81vDbZb1bcF!$txmUNhTmF}v z?@q^_NoezmL~f|E2l}#C)cWLv49_&aK1RCd%AZxb5=a#svKKy8lHfALe#_ z#fiIJLgHz9?@)|i!E^$IDei>Odolq0@N}#>ld$b}jz++DvzR&I0+s@-zKFOw31174ksKBpGlunXaVyqL=x z#>&rBiaoBZk`)_Zbx$iXZI>iYWB}ML@Q=<0%*=@LaOJ=CbjcQ3D_p{g={Mn`1?fw3 z%e3-Cf%#Sj?!OjLIJzJ~f> zZda3!p1>7_&Xdxl1OTAr6fdWC11+W^B;6bmIr6ZGs@Tj{T5>TQ!B zx%Et08eM@?TD0-!(m?k;dLhwBfaYY1)$}hlm_S`K-sIlcqXG7FzPdy%_Jt80Sb6Kd zMn16}%R{WI4mwHtnJyF#t-)#x<9mmEf#}kyV zT2nzu_&uyd%YP+YYlddbT6{oBRBhP& zqZ~-6Olw*~z9@}YBl@^{qMFJ&b~}BtcDlB5n2fCDb{n6xE7CWnOjHMGB;?oe2~2$R)v(#@VOK1~fKVUmbr?X&nB%kNJFMH2OVL6es&Vq&K#y zWwG1Ba98~}TavpR1S1PCPSpy{^1+G7G5?R~ zN-aKk3)UOr;yIo!&ZZ&NH3$j&V%M7pp^$yG{KTssOay8Zuu^TRJnbHHP|nq2;S6{l4)^dG!NNNVHg z$1f|+O3&UtRX5Kh^N`K!Tvs-q6fadltDeqyHm=#GPnzB_S_dFNdtS{}dzv_kWv-BT zjIzh)A@4q`$!Hz(CSmE>?o4kp{p!FaS`P9>!wp8xO|B$7WWZ%OZYpbeIq9{;v@>JY zFRO(ReL>vC#ba-;U+#uhhXS{X@ye@$tNrm+(`l{5DP^z9eS4JM`cC9z|$lSy1LpWkj#t`9JKv4OuHC`H@t2P?E`P-CkK{M9MK0 zz;6qEvKOa#E?N1#M@ma+38{Q!&D$o&few@XM6dnfFPYIL^&YSi&IiG)8<`?< zs@frkZ~72!H&g(;TrT|U^7B0yv+`!Zy$oN(!yX7r{WRuTbKFO1nZ#Som>03)m4q8S zrB01}_c5-W2+-cf4y~C{%546r!nd&+fA%o$>iZQzB{t_qf8(T4cn2!om6|2a&7zi;`aG1K_(UbKY=3Gi6nqrj(H z9Wl%B(zm*h-%wEnnJ0ZZk!%4h(ewR#Y4P#BOB3U9GC*f@9o=f3{#daj`33FWV5Bh@ zpy>#usKYwBHLIOT!1y_GOkDSR**(n8_obS$VmhcC5=W;*ZJ+s)3OUM_IlZpqH|oaA zG@pOrHGMXh@;lkG8MO%n#&tV?+}e&#MJt~6Jh z4kmqb&Vyaozm-*LJU{nQQ+r#D(L?lsY;2RJ+?-r@$>4gwsShXWdBC-`M7no~KEoW` z7SgP@_46~nSFd}=1RJ(kg|q?ZO=Vwe*4UoU54;FviMK$zXsv3Un@CET3-1QKa{fAu z@;NRiy8py_m2I&>AMQOHhDQ>)v=*A)lN%cvZP{d5w=FH5 zw5eS1u?VF#R)pd=AKdc`O)PEe7!PNP;%(fZ_&)s?ewWtD!85n1^yu0!Z8MBpdA7Eu zT?s~J*L|#U3n>-v9us+(y8|E9J>!cWU*Yrq#}4}bFkh>Bj5i>0j1jRt%^!U(dwbOp zqSUJ`G1uy^QhUXDfZ_3Y?JZv8i~dn8YfZZLCNCFQUTNFz-g{|b3kmMtGTc_dy-(A5 zp1t}0oZa}8y+r0hgc5kXv^J^E_q#725Mh%1rzgyL_;Fon?GnKl{o^sMUod**ZZnMf z3(Ie&Yc-lHp|QtEhcTW*v4+0KQxNZn_YuVJg)^+@?iBa=QA-P&!8uQBe|A9r5HC4d zFL5fG7e}f+h4Xl-QuNO9E@be+j$*lUd<{L6xH3s}dV!%eWqb}TDJyr~qI2TibNGKW ztVQbVQek3z*4=GMZUC>8Z7NtMUQJ@V<;Rtzz;W?R9GKZfKU@Kl=UVS zVCgWRaE^YN5_jR?d&seD6}R19HFWYTjS;BmyChI8e^D8jS41U#$C>Jx8YnPz5(bSD zru7Fb_Qd}{QXhFHx{NSGUIT<(pVT*e z)nV4;b-XIEe;ju&kK^ilX2n>Wu7oRt1GL%c#HWs!JDM@gdTSw<0isR+Mf;rN#mL7L zZu@|p3;`YuNT zD$KMFFOcl8)~3=4VeWzhHq$1=;pMfU6fy?~i#z9JAKjwWCcUpB!&-ySC&RrQ$(1$8 zS5?PN-A;q?Wd_nQH~C^(>l)Xw2BA>C+U@Di-}}v79*2oKZ1inmJnsctyeVR9m&GHU zat$=8eWru^Sbpsol3(JtIADBzSpHzgTsimWGwI6E<2Zz{5)U>GtE`5`eX!2_ms7<- zE<>I^&ecw*rzA^Ky3n@f0Wf)@BfrKsr##eq&A6Ut5QQN;nktj&;t^18Do?#!TkvFlNm)63hACggQw0WuT*+9ih@kT@HqAX92v%%-&Z_GvBAPSvP6lM zP~xXKyki6zm{9y*neGw^SPRu%eJtziPVT7Wc4EX z1?^RxFbWk5u;Rl?A021c*ASUi5v4HE+6J;g9M?c^7`>2yDok9a?lNwXOu-}nae*yA z_=G&%_2YYi0Yp%MIW5$MA96(lFE7@k7@#MQp97{7l3j zc)rb9Nr6nzSILoT%#uOET@NIp*_aB<*i<$Br^fQ3SU?8L<=5(lg%jcyOM~fm<^*XA zZ`ooRzKA)-@iZyCzX`eN8N&V{gK)|Ld@uOCGA91Ux|G7*S@q1X@o@3GlrjM5|LGi+ zWm51LDPA`+cvGd-iAFBh@@rekCzZ}zA?ZyT`%jkXBe?V%{951Uwwt=EfLhX*>_>(* z7~~*}J;bfRi(GFC#^17@l~fjblj?M-~;ni)_(`EF#G&4F854%T!`oj$3$U}W29aPNw;Liqz2FXu4`DxZ z`wb`G*UG6$4_MU^uY-C1J8(pT?Yg@pivBZlyLCx{>?o)NtAIZMJL1>FJS<7)gn%>; zBx-pMOB;eftR~G5#rxGm0xu8GkKnm3qgc+Hx;%eQSy}{_BZWzGXdfD`H-YL4Cy>Uh z!51Y@wZ~T2HK*Y{j~+R!S-p|w;C)~2Z+B-S)*Re<4Nev+W5~zWd!+2}9yRD_@l15hGcWJWqT#G>WZ2cV4CJ2=F1(9T$ zZWM`UxaY}`a@i|>Sn#!+8q^bF=!%c&sY z(+M2~Z%i(aI<~nn{Q1uRZ`Q^xy5Y!`A>puZQk6GLQp!aLSPnb&a#=PQ2bl z(&jw5blAB3%lU$+eN_-s1s2dD0U3GHYKnA-j|gMyF{7_!hl9RIb16Aqnzt4WJ%$3M zLfZd*=-Oso09<}$uPf%HC3X8w*HN<;@(rVmGo}U(rGnX#9Yl^)j2sllZI-s{$iBB3 z4F1{dB!SDJe{=P#e$rvrkGG~-v0jRrm=OIQ2jUP|tMv4ctDPV8<8Ub#$@1lDQZ^uF zBR00@3I+Pe%Dozn`h>);nB~ELx}Z}jf8n`&9)+ZPK?V-}C!Rlk(Y2 z@-3uU2Oqlhj&=91X$K%q^9@Mon0*e;tfO_cfQJiK{+fUuvptU^2ppkq?_whT&n%+Y zy!SU$1tkyR5HcO%{?1QV0*p-h=M}w9B~s=`k6MeVJRse;Mu&jsP-}Zxpv*dyug&2bLwCilFC{y_on+Se9+Za8bY8#9}+JOS%y2ipssxb zk%WFY3OHK`D~i1ypn4PYH%vy!j?~Ej67|ZQg$M8thEtVB;&8W#x`v&TEOtK~Ur^G$ z%zuCZM0OU=zwO9?yUtr%n2a@rW9K?pT-71cy#p{w#gc076)m&ckt-+x`Nc3mbB7J) zk;HeD)=-LzU9aG}9p&hYte(0R!1J|8jbsXa8GA_2G6i*g4VTfJ>~sIoJ+Js~X%E06 zbu-Fs3Fic+c1Pr3_a_A6gH$-O%{4a>Rz-hknBGL!PO5g?AcK)HespR><;Y)lSat6b zZobF?y+KLXt#=+f0d~oG$iTZrB>HiUH95b8t%>H8Mj2TJ!drvbQ^;Q@u-r$MPxe#; zMpQU6?I0BWjbQ|4p?AdG@HggUBCuRw3h`rF(@i`e?%!{*qhm&b?HgQB#h_(a0;Bu8 zo)^kobm&qDX_O8#=A2v!DisVl=gt24D@xAsD%In>-ZDr73}~@$%-l^x!NOOF(2}GB z0AUB6J==5w4HAS$wj;O>uKVXHY~Q&vsFvn7dJ%1>1PP05-V+7NDJ}nP&pRn6 zNXC`*jrKR>{hqIiyKh6 z7XBnn)aq75nX~8Ji|jSg;&1Zb|6NJX^NcSb(y4D>CWZw~*k8uFSv@TtH#E>w!w(<1P$yc5CB#HKD{ ztf|4CAsn=s*q)TSD{PsZw|MC=XS>KZ|9t3n=xjq>8nlZWV%lY%QQkls_vbG_5#}!~ z9mb_UOgakQ@6erVpQ?8Vl!T`3-PF9PPy-Z1xmSM#Z6S-aR8Bv*#MtErb!)6;$2CfC z(KT9hM~jPPz})XRH4+Z<1nCRm)g1$^9{T=Zcl{x>9Qx67$t2D0b{u&J6_0jkl zGdm`5-u?Dy#{7j0CaCozVLc58C^Q7mQQxC``m}r)v~BrOMjw9S9k(vRyJ+4{FxJ}4 zJNQkUEJ_(*&w+g$AXSmQT5(yz8Z^B@o?4^}R$ji>o16=|e0L=2^XWU?zb7iD^NEKh za7hNCY6yc#223uR^F7MNSXQ^$k9A5cw3OtQ->PDh*N$&s zibb??bI2tJYI}5Fo!O744?(5at78n3;qdih<53xU`ow#^C!F>;B3_QDRzUlQ)?ebt z?GoO6`4U_RLr0AtROVR`K_>2n;omIP_%dJtqdhy*1aYrtyq;Ej9}mBEH)NOXU-rwR z(iiS|>GgJ``_6r)3U3kkN+3r2z&(>sDolgG7tr3*ZR)>c?#3mry#Zf(87}rT?~ClMf_RP!YL-;6|q>CBQ9e zFabM@#Wj^Y+|;FN0m`h%Vp@^gnOhTek=uO&kXYs#-(h1IF|5b-$}}R$Dx=V^T>9Nr ztb~0D8GaQM{c2b0+S&x zS-HAE_w_dglD;w=fH4rfQk+L=G%#x*48+Z{2rXrPwUiic4~dV;J^(Kg51cTIP(@;I zkqp?T;F%xnxc3q1XF>xZb%ULo{=3|@jQ~5dFKTFOEZ!zHj*OVCXixOGSxQjN26^vp zuFBF`I~X_c6=IxL7coNZ;PsAxVYT-dVyxZGNdOly$73(i0S$}@7N z&I;)aBdkosmz$`)43Gr3r@PeGZ656uw&t846rd+!l}OSFB}rJ+ETF_qgcdRocWjzN z#|qPT-1F45AI~tk*citkezld}SuLWLC0;TIiA5i-m_u5M-$p5kt&xd6pt88V$7N{q zE~<5)<)*c3F!7&MGmP!t)Ma?VG1h@9^3Qvq;!K0-1ZWWE{xR36M1`^s2KCOQ!O|SvGp`X;|aiHz8Y7UTGRwNNNM^Nb%b0~Be#5Cv9H9Q zY;pwi3j^R3Mqr?snBb8FSAI`eui9`CC?s=-0F;@s86nX%b6 zD6QqhF|rTl+)-x*aoika?&umS4RekyVQ9b2$pn1LSr@6t_)WI*Y{W~J3AmLq6_l21 zGE*P@IO4xzN@#2@6|~gb`RoNjTdREL4Fz$7C2hbI62+g+7t!gZ@_dZP(tDyaD*4Rz zGi)+jua*C_^!(-^Gjiz=P!4Qt=ZwC37YaiDX&SYe0Eb9T6n;uv3N%1?h~{|I_#9ip5*wI)-R1sNU3KNt#H&tHjRzfqU`Tk1w;7ksxH4#o(gT<>(R${N>2GGcCnj5)uK-R*M#?~H>&#ybQ%{JJkk zuz`-&w6BgW|Ki!s0jDOuU8b4m6eYQT@?u!Z2Z5=js3ZLPZp1vfV0VGk3?#42c+Rt#rno`(*Qk?0{OF1wBuyqmI37 z5se0|1y_fe2Nbq=g+sHm<`vvkedqhu8>ci0_5cA_?x20OSJ9b3TYEL-K-C=b)SFq% zj-Pez?t4by1Sg`ODBG-v*Z`R?1t^ArqeOjepP_7FFzpaq$MjhO0K3Re_b0}w3I=Jd zJ5>56((BC}V-ck2mzxOuhl{&jv;{wU**RvYQbd&jh*_YHtwx#BmeTy?hX;Z!em=~* zIvEL(Qf}UfRbh!vzvm)$&qSokIVwq)*}eoI#)2OUS*7K3bh=VdOMjUr_Fj`|uoG_1 znh+MTxR?Fmk^zB6!UEWD@i?X2=e@vDh;f@C4Ln}lIHa%*l@Y(ZS{pe243;b}DAtX7 z%(SFJS=LgT))Q+zuQ$xwO;=j`v3tI*xGi*FV6R6yxm~6^@!pMCQYVaB9j(U`(zN&z z(!h2sujkf?Mndn$3jL}2I~<$MWW>gWc+R}-WzO?QMtZG_!V8|unajoD>O#VN`f&Py z38N`&ifT3Yj2<#PA_Vrmv3yXBDC0_thl%ahP`df}az8#BGI{4d-Rejd$7-T%7RlZI z_yqm$+B-u1x9(~QXZh~M6-NYV8UKGxrGK=t6mAV%QSv({aCJc?OAp(fYlJ=PMsOZ%4_@SIq}v-{@j+V{TwDRmJ&f^VBrUo+28vVXHV zvXMH1r3%7m#RymE{J(>)LR*;*UU<)29~|=3tC>rgF1*i5YQsl$Ai~xiK3gkUR}lUf z{llPHA(n`jh#Pajd5nRU8S@#ccouyN}JG@8xmVt z4&WbRHog&sWE+yvA*P&I!0C$%thHu_8Z4{|WCSTwIzvnt#>bg}hbE#!@o!mN@S z-G}dW1r1<}o+hQe(_+&LbK65=kfnnP%XzBRTSD~Y*MR`FWiW;UYu6_yaLmnhKVm!^ z2mh@zIqaE*U{3^GdjNlfKjtrU#mHMUBJ-BoF}ONL<}a1)rsxIA%L%}4ziq`3PPdL#Mt)GoACOynwi}k27UDo<+FMnluyUK>_5B0vo1$E5Z z$3m=E;HCKf2U%PUc|vRL12{34GFYC0&Y$Bq?-B>A*Eh;a2QSLnc~&Z8-rf%!4hOnE z#L2A~PEj8hyV}Puic9~A?){GdboD)wbb?k#l6Tmr5YRQv0*Ny?$_&LExZ-%o3xE+? zR=6QMVBt%j^&@U~+5@i%sqmj10YJ=G<{Bvngp?s{oEP-H9P}mQMA*UFgDm_t#wx#W z{p+&M)Ra)hpHs*vL&V%xi-okasMq-AlEtKU4Cl3ZE|x6T_Aw+y9_0a+_7a$p?G{{% z7lMq+4Sf?#UGBR*HP|tzL|0pHC3QZ8?w~;@m%9gz6Ni?Q_jAu8jP518^T?=NNrx~8 zN}Vbe*uEEe`?+@P$qGn9w&5w=E6a3?pKosglwHKH;o7d+v(3}e@j9#EN-*WOk4f7bZ&cCaZ1a#x0{eG;tiKR zPZnj)Gp?Q^PtBWnKvS;6k7})Qfb5sV`%*@lfjemJT-h$&)87vf)hQKHM8~jRnQP55 znq%UP@hAX>Izr$Pxf(m;qnSMIW-UW%S6MoY6`vP>lkUupCQr(F4E#q1o+0XYV;10z z%<`#J)H?rSb&RB$_rENstmVGP&3d*(t5di~ zqGokY9sR#)VP$xxdqrZ61{2y=@1&}PnuHZ*x$=5jK`W|6?+5?3HTKULM!#OkAt`pg z%ZDZVnp>i*6`SrA5G@7_O(V%qY@;fgOcWgFy17QH*<|_8ma?A0eJyM=mMA`sV8OaG zuCIKP;kXNilF-w-vh`f4b{YHjo1bKymux9ab=E_E!mY0B1nsT{1z!kNqXg>eWqMOS z^t=q`*mZ5+O?*!NmbuOU41_I==G7=KddWb;s*{v5;kY5WK$`y!P0^se@gL^gq&mrg zxJ*E+mth2yX?rm&#C6kQ1RfeTvP%da-ouG0E{`q$1g&3DX#e@$Y4F*dMC}y(>`tw5 z`&`UUbM>x~&!`JC?D3?#c^o#rsUtnB$vDgGO1Wy~tT~pJ~xu5B-E(pV0I7nEQq^!T|3vWh6 zDlKg|_z56^YePoULo8e%!d^Yc%3eahHt?i*?v-(wJV!Gk4z;i+0aL^7m?N}i zsef3Y7*-o#cezG6L9C|KM+oJi{~;pabesQHP9gdM@s60l>$WXBB~BaMc8*7i!=`hR z50#NI#a2y6Oy0&yR)mDd$$tR@-AZDco21=&G8X zNt$63nf_MQ1q+LDhP(Eg(;yt&ceHi_?ZrAc#v>0?`SH$#M-scYKt}5c{GWr>&#H@( zib}OivNn@_boW*HNY;#4eA!~1#d)VCGxGSBuk)mK3kXuh*B%QWZp)C^LV#?iOpKSy ztLLWkmyc6M5PW<737{J355M26W}?WC;b{3^k9gYc zU69?KY4iSVO;EL9I&8Bz$ZnQVz`jenc5wCYE#Y$Bdl{A#fn%8_1{e}|wkUafouVR= zV2M;@ww=dg(4I8FvxudgIMCn};%o2fpS6-&5gJEd7*`oQ+g$ zdJCKX+1K{P54A!oKISd*yGUGfP|@R7EVn?0%Zo!w_||z2V<72#jzSuL9j6bHAn&d~ z*0qFDriR0>Q~$}&&|T?HXzj&)?C2Eco@aYrm4N3r?uD3d(OUV`WOEH|Q|jgoJ$zVU z9IhuEI5S+3^T?&J zt7j6)aOpMCSSIgJg>OAZhzTq;*{E#yIMCd<YX$-Ed-n5CijD)64c^9IzDyf&mr&;c|)n+ z_PVPO@Vg$I!SZkMPg!!;PE(5j^Vuxeq8!nXiXP1ZI0DxL`E zXncIjb5>51m&e)>E%LK7$nMcX`+TrD77;bN`bDlow2Hheaxo-73-Y*8^T7{( zIIS5UUKYdH)Hr&T%KAa)YwY*$%-7kB3?o#NUvxiHXb)z|-SRu&O2!p94Bg1Q)m;CN zFuE^%L*>nO=hh1uuv_MHP)Nah_n2(4zVw;x++x(GUqSSHbbH*?zN^XwX$@tOF~T1I zO@whk&i})SFtkW}$z5L-w#Xfsa)^%j*Dc&7bu#=nbSugU;bgQGGWCYY)S8L5Gw%}> zru;vN=BvP_?`YoLaK8XdgoXSaW}Ms*BP)b8JuQh6cp1LN1KOf1 z*Ac*{$>ZmfrEtR)3s7;`KztVj%16s^zcw~78S{1-lduvSP`C7SP2qt_OgPNi13f%D zk@E&on*y{a&tyl_A)^}4NpU-SzGZ{4s%(bCEndvjVMY$jZFrxjx!`^TyrBF zFXax9mQUkt^8%jDq_^@gqp-zPN4PEf1;ROl24>YQIleULdVWamG4|xYRJT zr{t=eic|a^r~aBU{_kyG;t4x-`!=e0s&@5#jfRzq{|O?!5>GNBijW;Q)(8~7=->p8 zQMCCjwY!_UM4v}TOB+H6-~X9i&u#OIspj1fNvt>Q#oZBul3uSnAMYvYUH+nG_v5hb zWsb5SAs&+c{&$9^G=(~2ZzJqz88DhWd!qDhh+lIKws0S0R>t0!snSQgyIo*3$lPQq ze;1Fn_I4IT@2RQk_fCYvuY{?^b4wJsvD*CxmW_hn9Q_t%{+i3I69tF3ZV>zt)kT6A zYWb4DNr+4eP~F@llJq)m_tns#SXt)WKqE08 z!l7Pjw~*e}|FB8c%h0krxNB3_j|8$J<^6;XRSTt(7TR~{oTI-8CSItF1f7Ma^D03R zb8iWsQ|__WM)z*Gnv!#IwS?$z7xPc;Mg_JoHA|u3jLem!!l%a*0@Ukn8kfr?&4^0N zM&$!|*&NhNkG0-;J~WR`*txf5Gzv^j;pvY_sKN+-H3Uemf&|6Zwey%Bu1f)&p} zBKqOrA}E0Z4GKS|TCeaZgN1ng+!v37>xXim$X}i~Mo7FLTX%)E)l~-B4a)|p-(w`^ zsN_3o?UXb4h;1|-=Cg{B1fhak{bGz*tQP6xg6yWH1av`X4+Sf>&)^n*Xth17UvOw?zWMThR&t3P2 z>xpIB?uXzrTkZp}@d|*5-{e6qVX@yt<1^px#b28Ebp-W05W)uDtExEb7WAII%Uw^M zlPkDM)e|RiHgn{mzGb4w36xiq1{;3{Q1_HYN5(+!8ARyGt|OGA{1y_E3ftsAbZO=Kfmg1`rU=Y*A0(QZ z6-(w*WO~cax|{y+pfZ8F$X`rMK4`DbM|kvyFnzdK0d zVIRk5N9euorBou#Cxwqp@J~16=6-0*ch9bytRCfRwPA%65meKalS06)?Unkh;kMK) z*yD^%q>tGQ3YS0JUU--ER-&=Tp4tPn_5Cr_-Vy-6u3qu{h|s-d2uP!tBnr!BDS7 zmE0t|-12U(^gQCZDCkUF3hF>i&44V{xh1eigW>lKaVYIgKL^! z8#6Mx#UZT?=`n|-AWPEBoKmRqLN4{U1JMk7iXw23U)fPXQ|L26Q4XeW@zJgfWoqxs zM>pWI&6YOV&+RYd$+C8XjRnG=1J)JoUS8}@xbgb%IF_$4#2o3ePUC4~-RqfeP=Wx2 zT$>$~6CTqjMUU4_0NExBpnLb;xY<_-YV}#ONYj|_JC2go5~}BPb|TGPI}B>WHzmwU z4BpD9d8{x1CV7yZ!{2_{f{q8jQkC1^S^R5#9^K`a=k?9_88VG92&dl|>3_slPmmJAw`JtNq0=E*Vuy1hN=;iwFC|PGPm_|e z3S*6P?fJTI;UU9rCTB?Affj{Lnc!#eIzej}V+`jm3B~Pqz;JAR3Bd9YWcSB?xApS|fgPxW?Z*S+;A@+& zoiHq}!7d%j?e0g9==TO=x6=t?AsY}bHvnFIUc_#NHo3&;F?z<3VL(j1C$L6Wtz|AeC|q+;lv zB@A0<)%7SM5<#Crf&O{YLKuN#B2l0$BV%+a5l|=2XSb8n#OOZhs7utC+qOe6q;hb0 za$9lgFRsQ_sl%zvEtvGVrt_1JM2JUqDa0te_g<%-WNC0eCro;}mo6<+=A^TZ6uP`l zh6$QS$Z!6~Q%YseWayqS<0w$Xcl-0KF?0%*KL4U%ps1ft+ckVMnBrr!GVd3U%j=G% zBjb6)Um-G&3OfGX9H^)ww)~~w@LUfQH8?Yx%I!6r*>@2L>L^zHd*jCvQNbYvyJj94 zpmT7dZ49zgvNsU)>qY_X5@Z#YTM8k=-UQh-EMHMLz~FqQae?Zh77JmP^iVye%Z!qF^$Y%F!rao(^e{xQ6*wdy03 zIi;zP9zmi9+Bq1k4U=*O@OI` zbo*9I*D*o)D2qF3{xToJW`3SUQS0BjV#FA=7+Y2yM<V4(jg_+~C}=NV$A|MV2fLeoBdmO(TOf zyI7`9gslf*d#$ZJK5}Bj9Nv(uLke-BAtzo#{2|}^bhm&TOJs4vu=IznC1m=o>^gRD z>yppHuz*juB*@TpRLl^JeXv!{v3s2kJ_un}=gDdRov8Jo$9h7y4W3~F?D;?l1@ljg zwOnDZaRPU^k@9vfjvhFhk?5%~qfk|+J$M7Z^<|`V(~%rknj?M*kgbasM=3|wqCiy+ z59`U(I&-_OCTLU2w$tKY&H8)ijWk9d_=yZMR#u#ugjJ`2UCUk2***>k;SF$4Fa@fB zsQgi*fZmm^8@hWc>CAqYn==Dm41T=LFvKK__XVWzJ#?5(|KCF!)|xOZg~|J;*uSv~ z@#a_P{+g1k<_nkTyR1~n8Rj-!ED(p9ZirBhZqrj$ovz~1pK3VcOxlmfRF?{5$PDNq z(bvHhbJ5KtiDK590yjk50AXo$|K_a(LWQ)HZcmwHcW!45_cm03X!NHPgz)}}E14{~ z%|}$Duebc%*j-*gU1WN^wK|+NZCy0zSW(Fv;D*Q$Qr?%HShZXJKL}QMm-y; zmy}gGhsiG?4H;VTQ|8OB_QKB~#9o3ss&|n2tL4<@>nSa^2EE^MD^{NuKbk2&1rQzy ziJEs$h2S61Yz34tOp)^80!p#hE>L5BON3@vSglVa0kCF`&Zj$X8jqe)V5) zPeo9vE^5=8n6F@QkLpj5Kp3hGw-UW~*Eegbm>^rrRzTbeU)!C1rLd6BX6U^3_>@Op z(aLb{8?h5X5F*N3_J5c;%dR-OW)0&K+#LoO^g#k4ID?0Q5Zr>h1_*A0yAKXQgS)#1 zch?ZyoxvRr@4q;o`ctp&+N*X|t-GqO@4PjHRPgz5gATt7Z+^TyX3$u{&%|P(BILIq zfVrZORpbhp3Oz{sMkz5ZT{Hb;D&bTB)MPNQcIV(1m)fJ}}f|xO#5IWCQY;8ZUGh$zhTD-i2SMiW)7G-Mx`u%>QO|R*fM0H!dvhRj(4m)o8 z3p_B;lL{Spkj(6d^pVhhG|0e+jLKmB=~0ziz%O6M!oft~JxLh#_nVSKrKm5G{U`F+ z^izk0S@Rl*?s9g$M82_D2}i6^KqZWS8=>W=$|I_9yCB#K^QP(FyRP{W?)2MChI2RT zr(Wg=!@S3E@>~B>eG|fKWf;hCZ99{4BWiIdA4N`nopT8#IrxB1oHdy3gGUMblSec#T(r3J$#Qy-)^DwdVP%{(-oRV zJ?dvo>uGazfb_Tfffw`*ymp{bJFgHFBne;EK%}Gb9Ye<5Ur)s4cm1a3M_#{M^d7g5 zdiNo4_|8Wl0D8}zpNyIB8P+x6V;X61UiD(qzw$}W4=Fxz;H-xK*=H_lu`Qcm^+!y_ zeW%r-hZB+_s*Qrd*`nh)GpPI5e(&z@Sl*CK1E#+ejvSnd>88+UwDmsvvz@137)Ar{ znE;{_31epi@VoCI%`gOgr4;^P>i?|KsAJ`?*JXa*J&$VeDc1QtiV%ov;dQpDie4jF ziIHIfl6))&NyPBo)gEJSMsa7W%wp;Z%|*mdNuPV%6xbj***u(33^B*6As*`8zHy2# z!!Mb5UMuV($E{~fxSjr#0nWzbJ$G>6>=BwoKOhhCN#FgDp^0Ry(1VK84Kl;_eHUW< z82x*2YpU7Uh$8_fX@=Wtk#v^%UQTY&fzO0vKRm@Bxo+wyb${82J03vai=zQg)o;}2 zsZe$`BMI&L1#^?S>&hLg(#~A02I#++Q2~d-bfQe^S`tyN3jbHrX+!*0LepIp8Ljg=t)_wW$S9^v9064E|^wc$#lI5LC-;*znN z`r2ld&)uveu*vsS)93XdT|)8J0bO*SWnoBbeb58L?Glp zM>&+ws5k2HDt2)3jy9>?LinPEKu8MH>SVv^=08-~&wi%t!XxQowbjUc0|EaHucfg; zQe?j0(qs)g;Z-K9lcj5}OJ@N563N{YNtIRMhY6`~#PAtww1V2NT)j}Y%45aWIK4c! z5x`qL=z2Zf_TkxUKZiLi#ohymO6dSSai%(6tSy0oH92w1`dpe2u{E8ycnDvw9h)H>F@!($NUXivXtp#{UDaft8z>{ zJ$T^nDvWov6me7mHNJ;_De@W_+Klg>_}mWZKwjfl_TzDYLqF3eBG-SC#TxM|NLhnh z_Sy7Oa!N7q=_Fmy{egQ@&R%f587Di+`9eLz3l}~*{=d7@eA0S`rM}OYg_ALV6>Re8&@fL?}4T+kQ#F?clT_>r&s17xWu{M1Nee^#nxz--S-BLYRPB>AaftPgCyqV^rL%X5rx)eMUr0-Kd^fZ$;C zb(EL^o!{9hBADGjoJ=Pu`~p80`?@_PJWc6yvn6`DhT^Z$oqd9je~2y%2(nju#-|BO zUqsonfmr#sezScuK(V=@)?GhUObI8$Je#)}Gr{eAxX{|0M-9Qyb8Uei-Y~0kH?;}b zyd$}(KlAXtgt)C_9e)|>%UkUQH@4CFNl6*leI9G0wyC@M9h{MN*_LqZQ8z&nfWKWz z4*iQ3JtJ9W?3;-Lr2|o?NdnuOo~|xssA{x7Z7UBK-MLjPC!|vhdc4eJ>L;|N4Sl=+ zqKqwOyrDcakcSv)-p(KAVbLgXb@|!)R?j-TQ2g(4TA)TC?LKo*uuR;;W9f0NA`*B= zdgF{J?K}}*nPaY#xHXltaQ22i1yT=6#*P_JYy+?Xia9&vf}BHim)kl{D(MEB*eig%68&fzU& z%NKHJ>)C|l$;18;ZQaIxN3A0|Nlm2WTyv8>%oA>H6 z8#yl)>w(`KK%=Zru9EmyPU(Dl{w3w7GtICM8*!VjWpsXh1M~w_i;M&OdwL7F#p436 zd^$~&M%#8y>ujnnSt8^3$_+`Gmjxc#eefw!dC;vO`;HSQxcKgebTfwy(4}IprHBzL zvxpnQ;g8m`6aD=5EQRyVvCze4zAhp;J2VAJ>uR@=Z^$HSdiY(t&mhVivzv>^PJSc6 z5?PNoG91g}C+egS`kZoKZyw4l@5{Pb<~6%@J9hH2sF2CnWWOfa9w8RRN-ccJhqu2Q ztQj>)bhpGG74QAAT7EZUw#jOzNeh-Ysu3DvIxp5gNN3~^`qzL^xc5wepD+{v2BVj!WS#c7_egIO`*9+ z%>P!amyG{dI05h*T9bs7YrSTXwJ3!Y0ktg}4?khy<17b%XXov0{d}w!lih_g z>RCjYP*t*obUrO;Amc;jP=p_m*xE9J?9CDI(=cqw9!2#V`4r5#rqpIq@y)dB$75L6 zHXIWT$f-+2b1D2LfxRQ;t{R0%z(kUM`reZvbnrd*ei|!(B$r-=5j-P_b(Wu_t}uyR zcy&m4gf?t?sQ^8zN1|Kf(~VlP5A^ts#w5(Cw!gB1@Jqu64(nT=W0`=KO*y63r93~ee8ed522>_Va?VuX80`A{m}&CNK0Qunn?ga0^6@_+tKUu_v4X~nwIEmI-c zr}#z{d+jr7*T~!HVYDWJs=Q6PMHqN|zVbz5z=^{F`P&kyhT zF}8@VqIDmz92MS!)7)c!LR-!YZwv1q<;ec{`O-pFlQbsy@K~9Fbi_;e_%zrHBQ;g< z&$hJIEGYQ7F4RC6?IBy%yuh4=NIj^_xw7> z6G6qAZ(@|TJz>v+;dia>Z`#2zV&XU6VTjiJMn8l zd4A~ylIpV4N=@jSt;xYr!B*PhA)@__XXS7KD*#ky@NdY&wi{P3^wAe#SJmWw7PQ(KY&Yg9o2q!)!3vNUKd%I>@qf^!7{$ z40DdPy5{I{OB97i?=hj!Ww5c|vnY+R7l1p#ps`$a+AD2oIA6I&8Tv(eNAB`fs;(d+ z$nDb8w1ZebU!=uld3qPUf9F#?!~8wd@@y${ot#hsa<#%%!nNid88%|LWDm#u;gfye z0)fKYsm*3kq3hV{QFI3xcaZao`8U%S{R$`$#JBAh z_Tr}~U0;vkA}~T1lS$8RShCxv51rW;*mKKCm|`Hp67z}(;8aIsv~`CxvRE|Fz5;xx z_Q*f9pcW~uDP*(F9?F&R!Wq3j0}Y1b+s-DY6qlsU58E-FsLdPL$Ik-ayOyojKR@8N z&O}BTW|2T+25z!x#gN2uGrJBjged;|u_%{HJ0Vtjz=ehI;P7B`5gS(< z3*R8V`S)@5cFAdn+&kY@`XK*5)(sArz^Pt!+cGRu$wgNg(=d_rLMipSX|bpDRwG=b zDjxpODba&Rbp#!Z$v4`Z7yS7U8v*+KpR5Q_xiO8sq6gLdiTlV_^j5oR$DCh?$fo(* z5A%}eoYRse_m=mA@m~8aO>~J6FMGKX2~i$vp%aEo@9WKIrZ%*IPp>mkyqTDold9Ob zgf)i~HCwGpdxMk5MW80_k3*0t6m?My02%E4e1&ENosn>)w@(*LPKA3Bqu3n7eSQKr z%em^WM``l`I2vA_%h^&jOxuJBC&rutxHz{32_~(BwA1}i2JFWUhwX)p~ z{%fsmF;P_e3P=51=8>R9X9k^N;WWLApE@gS1f`I%VfjduZMp}S=0#>w>Qqx@P?8 z*i}r6D?DS%(U)8-^BDel@knnviDR)<9K%f=S(aNM+_zTo>$fsM-L#lOHWKvX9~=P0 zW~(^7-#$$GUORGkTuwpTvkaR#WH_TDZ=UKKh12H-*RQoxZI>hq=K0GfSZOVQ9}NKUJ=?;d2!}EN_!q_CyC@tE)FxsVCPrOUu}jVXC~@vlL~()Gat`y~=NSJZV(k z&`Hiw>%|635LK#zQvSp|O0&Jg=ll_$imQr;{RaN?LBMxu_#wu6h~a2H#l)8CN0{s^ z<|$~%i~xPiKg!IZEnG3Nk*W=Gsd?;0f-Z-PY0AcAEh?4cOx*nUj09BUg@`f%r0rvv zS)JqTS!O=5GE+}^6rCl^+nyCgO@GhN)}j^BqC4$yDxmnB9fDzyEMR+#h`w%~p7!c3 zrb61=e#JG?mJsnx23PyD#no(hbbi)5V~DwTyU|3Em6My`R<+-NP1(diJ*mi9$fY29 zTcJddnZm-tL71**6czl3KDk2~bxN*}50$JUFTFcKP$WlZ^c?6H+J7gQz|gW%jQeF|KJc*(hH)^f$Asp&o$<6_0+HyLf7Pb!=K$9`K3HqOZmUQxRXeTLd z?KYxTun^jNaG79Yf)=g6YgZ^}GW?J>cR%t&(x1RQR9>pKYB64H{1&((V} zSLC#BdY5VNtIZG{kI4##}w%>yEgnP+=?9}cMXG5cM86u%!RDy+o5 zlpV91FDN=Zo2eYiC!}b9Ms@{nMe3nK@{H6EI!H8U1q0?a>iksB)cV*!l$~E|k4J=s z=F3@vWb&OK?Zj9+T=HJC^+!|7zSDU4J%L3239vf43{vyZ2F@avh?(so2pt@q4Bf2M z^FfpOgBfnszi6cwlTK}ZFaDe1mxjQO>rpH==Zu|BJ+l1=3*Y$1YDonP4nceu^;Gy* z)*=4?3PEyeH03ws7_p>$5DVaEfl**xw62M>(oF!yg06B~=0~?LT}YnalC&CN#1gL5 zTO{Mrzo^II!mjP{dfe_oJ#Ic?tW;zVlC5Ny?<*KMJd;9ZPJ@B<0;B%Y;gfcemcRxn zdFcWQZ*LQ~bXS!4jCJ9`C>0KBX>%fPgm*8h!(e)!{@@2lhqhnvc6<{BgDV&~ouGzE zAz6awz1lv!_>rFLA!Z=I4ex0oeD*B$5{aG-X-2@fbXmh46CfC+r%Bvh8ZNxyb#?#* zUG@9f5$`t#BPu6O`O%-72&5m?Q3l&@6CHr`#Q!3weugf;v1eNTgBeIvYTn#Uqsi$Guf-C6k! zX(cv2L+Fy*j?dc?=P!{SI(kCG$s4kDKypBos@`!)?SBQqI)W4EfC`Q`T)IW(Kd+06 znacr*Tl;13(^N+3ZD3JiqP5<^0CU{{6v}|C^KNoY5>wUce42D57d0x8fY_TlB>Dx+fI}tAR4^#+ zKo0Lvdv5Wsz9*`n&p7>wg~x#70okbKv=6SF@KO1geSHzl%ZQC`Sn~0rAdvb}firvRxYSyQA;mG=1Ujl^! z^5rB`B9?8EYp>5XEMz_UvHP@VQ9n4?Z{sZ;QRjwgdU%FOd~|iRUZ~qjin1VyM4a)@ zjObAmqpQrGXhStdYaREek;f}p{+?XSP#P{-B%I`A@K~bV^)Itqu|mL|P;VPA9fsu( zAs2fb8K9TXhXmOdL;@V*dW9C_JkQ`LW{@;2p6LEj(vfwfG`Hu-HoD!w9==Ed+Z_1x z`l92{xajK@KWieW4E57j29VwGF!RLLroFbhY!x4UH{f$IWa^r$vWkK}n|-!z$s%GG zAo>~Bp!l_CQY&FaPFg?nltVuJN&L$IR===5$Nb*RrQMgKQR0<=|6G{qcf?Z_d`4B3%z0RERSfybWX#<=RE0>r;VJ!hpSl1NMC^jq4FM|vyE59 z2>&X6ry=Bn50||U5d+}>PVbxOYcGZ}{(N^0TxMlq0{9P!{L5Jrnr#%8xYYoX$^;2fpMQyN53+(N!rMjkEp4^;j;G8pB0*JyRrs z=|37Tsd5RhTzS>HFLnS^l;O7JXnhih5k!5+&9(2g-+&XiCIO` z%iVd*^d^$wtJja^Bcckv95G*15~k&4B|-&u*uUAI#k@88C<$={0$%Rg%M^_t8dH3KITN!NsDi+hB*hgN+1zal zH1ET<7|-&4&O2;6cl5vcqtY-+jvifQe3t`hSTg2zM0Rz`*qt79R=d({WV^zVC-`s)S~u($oC%&%{d z-jbhbQ;7RQicDb~(2Qp0x;}hX=(hM6{!ucu1)$r*Cr3*8UZ#EIX)U04vWbvbW4#;F z5dK7o@0dbTQF{BqV8*;gXXiRc*+EQ#4>O(1!&LvzaYIn33<{#;bdnZUoZCXX(X`y# zb&jTk_ebiAXi_2fF9`U7XRrI^eA**vm(OLT5nX;kEVu9aV8;8gcJX(2V#09XGhNBwI(L1(;(5u-3$v0zJbx2~Qopk|Or7>gneRS}XKm5ndb5>&Ljf!g&mQGn zEay70vHlE0gtF)0*dff&a#E8~diM3xJ81$)*HP8VkuR88lW`-@Ng8lEq!I0NeC?p*?v2+8=6r}GWQmPy`AVr*J_hz;A$1wz(Lp9>RV*8pB=+c4l)+a?p>!jdUtgH!`yOy%j=$2TSI^3CKt zD*oSI62lm(Bgr2GI$CqT7tS+{E!BkV#@1(X_OzPwYxi*mC3E_ zZfWXsGfxIVThw&k1KRowO-dF?79196E5P3IlMWkaWGspRflWrNRN;_X%4wZoVJY|DH#kOk~Xu=o%`zkA@DL z_G=lzg&aq7jf9KAuznHB=i?!Z09$NBphq1j`LrrswD)TchwB0~7u3LN0i*$`7J&wn zigRkuC82QTA7X-rVxXgZ!L;*xECWNChXYdy)_VoWKR={o-krRf?fa;^z)E<=^a~>pU{X~C)0q6|1LhB4yNaR~Gi5e)PLG?zA zue??(Y)^rPj9@(XwruDu&Q3L8j$(Yii7<%{(8s;wY+oCBLV#!7IiPw9u>T{2C_<01 zO|O?ynu|Pwx?`Xr55^M^q5Tyr*ea2ICpbJ$$TjkShVvMrRRcZlk^m!SCq1D6S?^sR zibekT%~|epa_>^oR?v-;qW}T-XKb*}4GNNAC&*kir~rBVCsQFuMg2y)BHAJS#X^p@ zlNPLk)A~>s1;kh`cNLV+Bz3FsE*CblQk%x7r+VCL#D7Or*+v2SP)!U6shv;RVJ-S0 zd5SCU}zO9tFN<;A@cY$kK%E*Wtf7VL?%v6 z#+@5&ic!YmePKLE+sdF~xNK7~@&yI+7JrtG_`Oez_b8ub#@1Xxw1<&@635p00MnU- zTl9F@16bfK!howdkp3U|N<3V^JtL~;i3qPTlvkaE18cv~?-mnr=Duw$`2optw`&3u zJT0?@0t}38ofF!UEnF!u7^SSk+*Za}AL0SH{17|Bra5eie|M3f$!iLxRKnQ(|40lm z*i_Jx)m;-}r?_!xfW*&B|L8GmsISDKb7m zI*d$t@Z(UePJq3B6;I&?S?#U99s0?^-}3u!I~|%ALlO2}TX0o6NT%{>nyB1wPKYzy zpl`5_B@`gXJ>!GoLDa+M+f3Ap?|MHcpLU=hL)i778irvy*u8{VR<#sSp`U<@S6c8I?B=)|>bPpiSMr$<^li)r09iy3o_H&oWa2|=Bda2vm zp5xx`Eoy_pIf9F3^Vv-o@C-B9>-wUo;&w4Qz5p;mf7?(w*A3CHOWN-$T?{3LNILzF z!Y6{@42NiaF(oy&MN}En7m0?_sRzU$STb&}_>yK8^&lr~PC7PXd(un9SansoU;SlsFBPQ>v?po2Af^`sYdly=U?O`yXLM;xoPi z1W-pU8a0>*KTVCX>PyyZFc$biD328cKP?}jbXmVc0loLAX zZS56rDy-aS+f3@l;X0U0ht!{x&Il74>CWf;i+CoY3>!&j&W&2g$yea0K6ZlesIdS* z)|qHS{8cg?$gSJE)u)t$FL+Y=w z+j+q#{0J)l#{NaTvsDNn6it@*0ru;D`B&K-IM=lC^46=WMzIu<3&>a_DD;W7)s!Up zILkE5zE^&I+V~f69qgg8a85a6dlXP996a*@oOmsPXsEHJ`)^w%BzC8H%v0L2D&N#&kxF0%gBYC!%fD_70JWjR|mRaPu(42k3UC zjhZ~8fRcN$*Jz!Y#04RFx^mDci>qym7E`>MzwD5PESa^keC4U=iFkl?-__c5^Hr;7 z0qz^`Ee$Nif+o)VKvrVpuJPz2Vo$FGnf-+vz>6o1nC8HoiT zQ&u3j5<9O#{nXJm@^)j8n>I%Y$xo578o8VO<3kf7^_yvVQ9NE)6@JJHGuA7%bN8ya zxvR}%fg*Bycj5(i5)X3dP*f_n#wHd^3qV~?)o(xo`RKRh}#v0LUs)|mFd$EQZn6=eM*?w*K! z2^E1GZ6U@{Syk9E2Hr~R6xv}TfsZ$>NQ);N0GuoU>N1#LP3L0OnBvY=4LT3FxR$S7 zKA=%R1slxHqVGfnvc={{l&A%&h3{F)Ou;^8A4R{O-Zw)GLBHOM5E;Y86r7QBc8#GD zo&1q(t%XScK{UjVB+X)ogh{%!rS;}0-*(1 zX8eNVr?*XXvink@bwAT3H8ZFS|Mz`;DQp7#B|7vFq^!FIm4MU1E3@+~O> zY7G4qX|weP(KI|gST;j;P7hJ57sA$u9}Y<45zNs$Q3EhK9pb8Lml0l7dne6Fp)mi7 zYP!RCHTwyamr+{e^)uypEl4+C0?tnmdAmOSH=z|FykYS-0M9Vws1uteX;(P$lt_K9 zGl$yEQ#`fB5VC)7S?$#(U5K11O*+3MWxnu3 zdie-=`j`vRf1>p~h-{ETrC!hY9*Wma_p`h7dg{a-t#Dg%j2u1OmTaR+P-j9n=sP7K zIR_}8uq#tXQS}rHHBR=83KlP?1wWdh8}W-b1a& zX0g*b(y4ihM-i|0Pd+?@Ix*O|jLRtIDmhYpciE(j?mv?cou~@mYDZ(%qdNl`NKnXH zB@~wEY|UU5xQvh+2(~1Di{F8~wHQThRmI2H*NYw^stnKWcaY^5H zZ~}kQcdyb1rf*aJ{nalQ;3UbG$bH`a`s{M3I~N{97SdmJbx3X}T!_r>=Kg=qBz*_9%^(jEUTNC)RQ503INUwxf|0jgw=jxX zI&ux6+%MjLvb85q1Baum85NTUC{KNHTn>xyh7c%S@? zjg|U|7`!h^40tR5CG_V(L?86T4(!d$AKKSpb3&N|`I_k}J_GnKnDldi-3!UpHteUA&+B@9EyYtIxd)H*A*` zp1Ke z(-$JO)84b-eA6j7@RAD^M;m8zd73K`e`5nhpde_`TBxez=vTT1qGlNTeM?b35_mcf z*5`MAA@g_n&rum6*<~*)UZA__`%$+>j4u}MB<3Q`JWj|58A=_Vs)+bT>MJ!!swZt_ zN+Q$nBj;j6K$ueb?zhe@0^^0rIm4N|m;FcV#pCmC$G%-h4;K3iM;q8j*Ly`HYzTV} zhCLu-66~)%;<>LMRHZp4v-;mnvcckU)eXD;K>|$ZVDSdW*gMID_I1f~d)%|lFn~7z zDgKB&>_K?r(MOGqU7-0@8ZuLnM76Hgw;dN*L8&4AaAS9*-?c~1|2p!B@^4o8EtE#o zXO3PU6(e%+W^b;GDq6bi!LmZlXGGbwQxJydk-R3-5cjmcvCE&$_4}U(?(Z($*Z74? z)GE>I0*$Q72?=;n?yOUKldjlvuK5SlpwJmQ+yZHj{)>l?#2t~3%{hQML^Xs`nkc(< zyrfyY*e?N01M5j|7l)TAp;U=|=xuqJr+jEXhjZM2T%V?Z_quM<+2ix`#IA`5iuV7A z4eWICC#p+rwSS;hmR6F#KHfF^dcS#(s{Go5lzPF=SuCD;zgqG#%kwtzA220!O76vK zD{sMR_nY0%5yqHb93;+iMQ^i0n*=Uvg$ z`#EoayQo2slcz^)(W=E*l>LWDl0N3I zI1+K6Ct2FZha1-Y`>31lhPIdwXh_5NK-`A`(ZdCv7f$dfUq5Ys2LW-06i`NDy8N=g zcP8p)kCUCtvc&Y|+S^e$Sut|(3d_?+<>jp&l&nZ6(Cf(Pn>xJjPNFI03GvOF zZaeM*48#wYqKo@~{CK8m+iT4bjt?~Qsdn3Zo)k!WXK!Nv08l!DR^Ot_XL+jTOgw4^ zLvT7J8jfeKwIWOvJ8+J8Yq~g0?MpGJPriQ)?F{gs=w8wJct<=Yf~=adF%#ojpGOs4 z&Vh7krVur`h3fQVc!WRJhRF+(weS8MeiY~}P}Is=++;{GC8+fb2Sg}+exz zvEQGl-|j3w^HR&iW4-2!2n#1JX6>vjN+a=L+i~VMNCqp7))ru3m{9#*~p z>yfPfWgUkDg@~57c6LOmPE6~F(ruU4E_rqTgZxvn%WI6>I+vQB9>|%&7T`%!D2n~e z`;9lfbbOCs39`#cyleJQ57DA{PAYC??aLB$n3EU|{ZKd{y8k!9$ykOYoT5$)cPy)z zZhFrlM1;8dE+viPSk!~P@3s$AeVN3T^7pvp&ewoN|I=FC_N2M0ma~Yj`O9GRt=F6B zr60%*F^_{;a9P#?H(>_Udw^DeFCFVU5N|%t{a2fXlE=Pgp#ZnKS$OlMP%GS~ zoh@%n!Zw;tGPHtD^suzWBRoAz7~NNHcm8X0@_sqz&u9@Qf$RszO1t*$i>k)30zF(S ze&Lzo7OJBRXWyXdu7+uB#ypmdk1WzeU2R+>q|!dxUp^M^O+*<_$O!?HgtTu>3^iJZ zvstdpH`ae(d%ibQaoo62jkA+SybOgnDxTfLUsV>ibw4^;=J2N{CBKk-UgwRA;W-+P z=>0u?|FC;gammp8@A%m3#wIv6B}Lh8`{MrL!Q&a{-?8NHl`ou}H1o04YX+iS8aR1B z9NfS?mOPo2jdNka@4p9ZS)wQC;nmE!@y5kZ{xboZndN3tDOA816LrEwZ<%1(i%*H% zuU8N*x_2X`;VHIK(Y0VKAvk5qOv}f>6my#W#K@BQaFRo!nP7SRoJ>v4*x=qT(eBV7 z!yL4TJJwC0ujZ#%>r+N)>5UWIhenM{%kM55_`UxtNrd`_S4Xo#K(tgMQ-D0Byhr^j zD*gwRTy!sYC+Z)Q3hi|{TU%Q*)a%<)yBjA+uCA^q>y?6oI{W+k^Lgc?>FGL@jr|sm zxL|}Bz{K5MVAbU0`H0@Gv5Kk9dUJyh-o@*d2}x$LD83`2S{KLsDTZ7wL7znj|Dd z;O7PB?=08EkqgxaH;I3#rcBPAi9;9nEuLb58)&F>4Z=qu)V>#k!+-rD6LcD@{Z+TZ z0UaIfKRo1qNs+UU@6eYjD3Og2k#)Ah`Z3j`oRK)RTrQmz^x{(Cj}pNlD#0_hT~JB1 zhb`IrRoi8WNkcr;FOvcBM}4YL=a&vR^ci$d#zc}nz{*#0W|V*Egk#7)+nP3>2GDOJ zff8T-ZGQ%-&150c#wJ_nseOabe!znb!9#g$m$C%T$#%I$66U{^SO|;E$l8wQSDY@u z<4bBn_}KM5Bi$y9Ddg^bOTNyukq@l#?kQtEg!KCmSRs+G{;T=T6`O(HYcSUP`vnyH z$Vf9a8zzGVK+TBxq9cL-^V-mvY#Dk!;T})Y_v3W;h_KnN7(XE~PEBnewwV^>uWVXO zz?`4P-EV3k!`duKWHZUk`D1xc0?}QImj*c)-Acu^o(gTS-V{|R%*w?wLNL~ns;de{3lX*};0jKbX%znCfQ*i*Sv}ZJ1;oON z4(WAWj_s$ADSYW~!%qB@z3wy9cDwUm#0wCAfPSM`Bo-nay6$6t(sm3)p?th}vE|EU z`e;=jeQ0Uerz$+0w?QQgHvEKaNK$#I^m!^DVG3GG&5Cs8!rDy93cV{5KA>Gub^ zbn&s(JqhF*VIj5O!aw_|N=tXqabvQ^4>JzGL&LwR@QQpjrYMD{&g;_0&B>y@n%4Ya zW=}%|6RU1Wi8mKXXWEk=%a`2-pd05|jQzp(By}$M-pU{I4S6%fC0g(Up=L!o20z#J<-t<-n+e7OYrGaXy(p`s*=vqw}i!6t>7S!J_4=40$ z+_tBSvzTyAeH&WFo+!*S-LF*@(Mj}9X@g-X`-*u!;XKj&BQ7x~&yq`fALM=)n?T!v zS+tr=NFF+%WC^w&2d-XfiFh+F3wXfP2TLW9AF@&X3)Hv~`LJ)3fA=ZDsBGbN7@BuA zX}1z37CQtBS?H2vPl5U1Mx*2vU+j2%Ool!X_{}LMN+P~PhY~G3v2#_(DPG&4Huj^j z?1Ky`X$=1pT5qO;v{n}$Z4YLWVo?GpoacIX_AO^~f#mvEU2a?F^o8i&gsrTdkj!H{ zth4{#2WL4|sY8tJw}aN&)%5C%_xT&8S1M!bgPXriBD1trrFF42@v!5DCOQz#DlYD* z1+L{E@Kf>VInh4OI!0__IR)&X0b|yl#B?xKM(%Vx!7s4sR`ac(QmABH(V+LR!XPD1 zH%b#XKa4&GS&yWAII~bmQPGLrBi}GWBGJw88}-9iw&2jdg}#Yc$5=FQZ9YxaIJa=r(rTY` z3`u+5uc%+3eY9*yXzzkLv}>*=z%O1njkCPoBpA3JQH&A|zpR}#ym57p?K9d)Y{YCA3mfb*-|zVE-j?x*x1_;doxmO z5k@P?r{6LY&fQjq^*EN)`Mwo1Esr(g<_R|)Pc;`;W^~~)z6bP=N<4ad$`nskTBA=Y zvq+9E{$S27YrUd&z z-x;p}-=M^S-3WcH+4##>RLak32*c&ql@WK>3z|+FhZ-?nCrQ|GC;yGA_KI1*_{!X% z2{7QSsu(g6DVZ>36e+UJ4i@4w`twvs>=|hxpZe@n7qOFeyaM|tF|gUq)a+SjwcJQX z|8>EtViBw%)@89~12P`}H-q0+27TWB5BKmS^tows+Lm(a-Eb>&2W-O3tAOJ(nYh=s zooSuHJO{_q3e@F)mPfs1paa3!y|v($b;y*Dx>?JA2GJujNEs5De}1M*21A9dj|@vG z8Y2~MRcN~oYHxF-kOj#|mY#YDMKUekhcbP7dVg2uO+yH!)ki|JD=Smza$ei-dLCq5 zCgoDa>FPeM3H1=%Jw}x6W^L-bUC8q-Di!}^bau~jVZ#5!ny{=j*gu+^%=)wZ>QL@@ z-KUB356}7qk>~o&1<%`sFVfq*hqy>A;WVmP^9@I+(f7jrX<>{-gE{1(apAz$qStL- zrI|CKQW|tBy+qr zJi9ll;#Pft%?x{I;a$6E)~vMt4+wZ;j=Yp?>EU#`%$KV)ZMl_)eiufRnJaKdvGu1| zu#*xej(uhx{qRy$0PLUK0#j(hQgFWAx>Z>UQRgQNE$B?7GRGAJ!!HTSw`Ll@ZO%6KyVaZY$ z00*A4U}YaRT-_5mNwx87Z${Kp6eQ^6NF1kD62H3GljJDrAb25T4+~10a5yIswRt#^ zO49!AYM>=;8a=ciyL)LDO*<&-lKpWu$Ofc7CfFsl;IN;zm)NQ3548e1k_As;&G>Z% zq7PFsZo3PDe}f!v!lr=siw*g%tQX~db-MU~$(y8dtVMt4Oqmadde*kO^J$ZgsTRaf z32Otwts*Dw_u>L=OiU-_a?Zl6beQhf+~t3+g*_<-UrVeHv0PV~#vO`L=V_jSHxI$@ z2y_V~WpKb2*uGlTKtsRuk+I7>B69<*LjVvdGd79rwj<5meEN1^g9pwQAdk)#?rA`A^iPZmu z|9Cp_XU?N5sWqWz^41YK$B_Fl=^KKqlDB!qXye*M1nD;w)vOZ>tG_TzCk6+$dE0i~C#{EDdp)GyLPV0s1$EfPIys1hPPn)m7PpU914p4KqGa?ZQ zi7IYpPa|;0?Mz9KWW9D^B)GvF%fLtqHt^f;2EJl{6BAL$_0h47g~cVQHNwi;P@kYB zAIjQY7f4Eu4I5&I665x@(qNOS)cmA=P>Ah6=s_MyB0b+>H{?-sxQqVRc{HE9-SX&) z;rnfX!>Q(zui=8h4(Z)Ip0v^AFcmotC?E?SkqRn=ajOZ==Eq8sv9`!WvMvJ5=&TLd zg**~OV>Vkf9}~t4wj;82)#O&jE;-rlq$**sUEtK(Iml(CXvJnKp!X9`b=A@E%$3^R zgjRNES|>ypP(iKVAh1lfeec8q0}N9Ave^xAfO|xgu`JlAX$yjBGyXzjWHs+W<1bL> zX0G#S@egt~ToOnMwB50uN3F!@4vu+raiFWnIX|htaom+OsdUN1_-ujHQ$jp?=_%7w zYRn@6K{%P9OFU_5cws=q0Eg2_6p~1n40e%?JDHLrgr=MwHi!BnBvgFw*YV^sBF+tV z%vg#_@|scC)J_78L@W-r0A!NTr+Ceqz5dbdBV)f$6{&e zle2q%1*wV1iu{il?A{)4K+^lhHih2Fcg^rUYlxj+)2GLCb{ZI@Ky~L%CS~mg?mc2l zaD(Y9y#jSmVO}z$EAz*FHA8T6w3$_$C`K@&El>3(bgu<*U7g{OM1LkPi^RIX$tRy{ zjH;j1@40cg>t&=_yYD-A6s;rC-ZQ-oRVF$lTuzY|_LTqy7jdgu5q^HE@&ihzR_0NH zFli9$Oc?t5G*8dR6-HUl2`_H-G#8OB^dKqFPPa3hg!B)SQqPBP(|~lLqg6e=qYoOi z-agyb@RI~m-x4YwwbOm3A^m(|#h6Dg7kDJ^!hlCpDKs13_tT>}J0&s2pWJ)lg>L^iEZsT*xg7)a6}R%dwun~SMgSBgB}HZjL5^r21UyYJQ{}dd8tR}7#Y5A+ojat zMIMd(>SF%$1k!*q97-a6Ne2=rBeRMv zDrn1K*O+!?af1>tu};5P!@Si+Ou=+g2uUEdu6heo9;=*5&A;p-5hNBwqnY zcA~0xu(`E?b$Q(~qm0I2*Y@2|&UV&E3*mHYcM2GdwE@dVh3*+#r>s+wSLi{u2{W@f zAVoI*cH%aGqKMjsyCkG66&Y6ka^)9h-2}QuG7@fWs5>R-Y2lw$M!J--`}l2mG&AkZ zym0!YYqRcdBOZy7k5-ig{Xaar2Wh~gm(g6iIgeb+s8TiCP&;Ud9jwu4B;Q<~4Z5!` zluNe~j@)Dq0oQhsM$xEVfnF^5^-9>ku&6(K$Z*o;`o(n$TFIAVfb9V2iiuwo^ z%|9z?cL7L>(}O5~r2(lI7&iFnD=*J*{MJp`RD_Q=FB?@7CP38|0M14BZKuIAuF4Y7|HzUQsGW5f5Y z7h+ecN>2om(n;MiSVi-f8IQE5?*flr=2Kd)@aV0Vd~{n(}& zVtJ6>G^>&C z1QK<1SO+vXwIwNG)u?qJ$GWUtZN736HZ`@n8DAam#z)B7K-fi5_niMM-%k6#Nvyoh z4RW`k;rlk{(M7|zvVilGjvA0IjamIGQUBRZNN!T_Xns;H>D!X#hVMbAlloLg8};Uf z?+ZK6Y*T5vYsH(3pFW6~9T=rZ3))C)H zu7-rg@~ka%H2u>IbMx2Gev1luq`ZOs8uhr>T2^W&WNHXKJX>ZYoKX}GJx3mcDZ*oW z_?~iKJG9=x-7yXgNOOVoi;^nXe40U|Ok)Z6h|%Jl zB9*H18mD0Sw>8WMlWuc}ZsB<*qtL;m7alT0y|e1<=Q3-7gr8LsOqb_#=eP{ji+-6= zlZA_P)e;BPAb&?RW-})nu~u+|+Y&wM`?9@-O31ZM$Xg?-K-5xQVi~H=TrC#S6c5F4 zraEPhb8yec5t}uxEvqfMak(JC{iFwYwF9eZB!eTCUgr#=-e9w({<-%x%u7gf9$hee zU*nN>TTE2fbsk+`DEPMm(sNATpW2YBO3iDp24;qao(C=YM(8>>DO})@U^Qvl-p~ zs|$sVTO)m3tXD8x3-NTBQM09Vmnt=7kNtW%_rgWH{f?m9IqJfoqKrfp+ia+dM?s$J zSOlLe!Lmu~I>ZRAW)#GscW~M4QR+AnHqcz&%yqn=D71nX0KHQfxv0~Q;SQmtV;Q?X zBeZN1e=lFd7)T10ZJ<3HRoB0@H++u+`fa$~p--;Ohx-g*IvBG$Y;}?jeUhi4H#CSx zEqR*w+wy48fK-T?I11()BcyO%Mm!oxAzjm)N3VaH14$M;Pv?VMXflwr3cGpKDi>Jz_Ei=zHgX3W7nKYZeAb^)Owzf)j>+uhzFByOYZ z8u43GVPkF3!?QvffP~Qz+-+;ked?rL>u_pAtMy#+r6&nnvGOvkT}pNwIp;>dZZ$n zS>qduUe$;qZ!_DXPg{4vnuSAVXqdMg3QKZ}nSE-HI_aVCSi%sfkwW@eK)RCXT7-0Y zx>DNesysq+Q%M3fdPqKsgf5jh9tK&HCyG7Yz*KNu7h=fbx~DuTJ3mD0#go^mQy*E2 zwU8DGLDFGGSx4RaJqMD0_z!tBopJDS#3M$eCmQb z2YNNpw698F&8A^D&hnw7VCH<#d#^MgP0`hSycjR;&w(VVqC&kAW;N7okHs~>=6#fa zGDEx@w!m|Poo^1Z`VS?lyr~j-wo*^b3e?p>0FgKQT>q?>Dk!*jASBP~jpXl7vlde7 z7TwiO+E}GhM~&edyZJqokZbppCv|&Bl45u_qz$pP77!^x#}YGsqxvLM zr=TvckgchGvh$ZK$C7Uc(^d1u&lwfg>C6PY(gK`@+syF&acqUV zVzARY=a8EYX!zrHF^ji7A4s1n^iO&8dbg;rPB1oncRcFLLT^(ZU2j18$~@9D{b>#) z^L)Ep0186tNR6YZr9v*##*LZ5u3KgV?W_wA^BMZ%dz`8G-=R(8qN7f_u9R3isc@=) zbg8NW)j8jfM3;HgD^Ln4DcTBclu<;$@F}n8oE_qSAu}u?N8YViCzjq;G$~U0pjE63 zK0u23IBbF@q*%OJ2L`34Bf&g{U_?US)0%|TBlBdd7-@oBlahFE;rH*s^%;>No@3jq zNPo{ckW}3GI55c7EbZHs1|&uKZ_K0lNpE%W7sjlf?J<8okj@R?Z)e%9rhWC-M+|m7 zkEUj}>kUZPc=R^Fp?Fk4`t-0slHHd_nBx(w6@0+^$_Bj1LB+=krfTdjUA&ll~q9(m(z= ztmY66AGg8~O|^v15r^^G5+LC__eSM9{&@%<)oCh}O%T1zEp5j8tkm<&g<7OzhmOqaUUejUd8v}%9 zbB4tJIHCr@16yMIMYe`Hki?0JL9Tymd+-{M&Nc#e2d_57&c@n3%%kT6=}%2aE@$;M z9?io1izY$LO?&+tXYYOOlZ~S`*@@X*kED+OX$mA|>}o?SeG;fbUS>1F4oTGDwLm%6 zyq#ZdlHFDS4}`jzB%d1R=VvKuF5_*QhHeSpr6Wkb>ikLT;6+fG!qJG-d@lL}7s z&{$VQor!JVO=RL&<&;8$Q-WNd7}+Sb3e+G$@9F}G$P_Mh&y*dlzGy7IeUd1S$l8zr z96~P=DDm$h+(S(+lKfGsCWJG9uMp**Vynan$m=MKynJTFMg% zpdymC3dvx{b;y~9JQpvt2~PX9Pwf`byz~^N#`4snRxNpV^eA}J^GHr1X~jws;vJX? zHfhmZ@3SrtA}TXVk`mk6gbWc%zB8!}HMAD33||1DMd0spNCvxOdG5M7QO^V-r@Gqg zd!o#f+BVpO_a5h(@Nt|^gQoztgZ{{jF9XtTZrHvJ8j!}N9^q^__uw{Ijy{H*c=X{q zk9vch24Xx7NS~VD{SA*~1b-XyXx6;?rVkpK2SzS~`5JZI{NU)AM>2j0NRO0|EO>@O z4U1C%un|O+OZJhc5P8Z#)>bpz)Sc-y(gV9~*X*Pgz6Gptzi|`7}40ZHxE3 z6fhxXEq$amxHOc)>=!lE%Wf;K8jnqJQ8URK-#s%4D&MzXc$l>zp+&FqRW}AZe~!VV zK1#;ND&EM`!h;}08SEDCxD8+rYB(W}(-9is{Z1sG+SiUel)!YjXddm|rrz-3^o?sz zy9t%E8w2S!H*9x@uQwoFHhj-VqBdu|X83M-By0K8fz;GMsv)+K2#T|pH%`dOcacZ) z2BdjI>{N<1$C}AXBOobb_YaA?Roeie{`oyTLFE!f&qz<0i5IY-i?k93bG12GY;ysV z(igPyy5iizj9a@q|BOpD-qg!|QQUlU_2hp5IbIA49g-c^r zo2d~`1(ITF+Ys9r>|QQ3AidoQ34!l%!?)fzGT8O!JmFD~zA=w<0dpW#@A^N>y+j@z zsjx;LS>32ilB&}Z174_BnyaEs=pQAp*bH+cOsQebx=?Y6tJtvj32kmP4x(l?tQ+(M z5V5pr?QLvvly^KtHG8Rk+-8BT?1{ptmLO@b2fY9Wkopssq+7SZ_PAE1&dQ8{f;2>{ zB41;YAZf$`wd|GHd}dtkl^uF4M<8Q^>ep79+jctLIcvT{p!C49djPd z4d0g=kba%vP;-zxWjD{*sb5hxZ$MF)zO`+MROKEfam*aDiafk5pUr;>!K&M&(~m~TZjH7-jS5r^#~|6Vz}36T0JK#dQ2-Vm$5|H?cX?`oPi zAoV=bz?4`mB7D zaMMkWme~%>Kpq>1jt<&GV6f|qC7Na{3c=F+*3Av|d-FneTi~`Rd_Nq^?Wljs|%@ppPer z8sMkF3;s3uCU^7jd!H&YAUzm~gZpqBf&P}Unn*TKmj}7&k+RIu!ArIq)%9A<&^GI} zv=Q5%7|8tR0gzPNxSfcBIgk3Tq^`)iYAGA?1;Ph0ZeAg-*KGT_Ol%L=xrB3Y%PNjBJRJ1`NtYW?pP53<|M!))0^y zoYQn&@e%K}|KMWv===z=YCe>;fjday6NI>5H6+BaOzn=0RsWDR4Q?^pMWXifWnFcq zdEBLRoZI2L&P^o9sF_20uu|T_S+feOS?tJCQrO@V{lxG_=i+~X^mz`XmPdY2Jo?aL zYS_Re@J)&}z2SSl8)I(xzRIHy11sNCf%G=q5G#Y-^#&vnVuCI)X3nFRug4=zvX~8M zP@k`!^BhRFL=!F+sylNZaZs`-5&%f(x2mEK8+~Pk6I$yJ%hO;DF!G&4QbKXm#}skd z92D+gJ1zvexOF&Z>XsD$s2&^@Y?Q*3YtP+Wj#zui(gKl)yP}#ZPFjJ=uFXs0OzKZX zDke4?yhP~oSm*5aNC4EE8CitY^D9`DY$5bFNEbai%3MllEQwcp%J<G0zXv9D9Zw~qCFRab$ef(KZY z0$H)*@ZS<#ezzJLl(8@JWO8ExV`qVCB#1;eFO|7J4;- zSCd`k$!I5XfCOYbkp!uU+su98pRaw-WI`gdX57{&Lpm-qoBdlj0E6Qk2_Ky(A}v7D z6e({cV|_2bJOZRin5}_9AFms}2f=?XMs#e_)imbOt)=0RN0Nfi2U0iEMm%a7Ykr+J zuMWC`wEAzlZRi4zUgiz47Y*NzSqIXe#wDbGSO%4bPlAziTkcsOT0^@1nIOD4Ux5M^ z=DF4IMd6III~lOfQ+%-f(U;U4DGF{I013*68#y|C(z3N_t2q6*0JW@yfU`@UVZ z_$ApBOU(h*6l`V&(icmya$R8_4mL}nWPdCk=>Drp=yf^IT1H|W&ml;X(oF@hy+I0$ zLq-F3b+tEk1Ksa#{~0OSid`8&R)lU^YdunV3gwZB^(~Rwqk+_9MZBk}Io{8=%~Z$j z8*H5T;|0UF-Z>Xt0~-FZA0sp$1!1@Pd>}P@dfv{`Xmi8&VDakyL1V-B)P=40KG=Yy zH(oP*_kC4AJp`oB;5w+3XjeW!KfJZ#4ZaD105nY1`$%EyyA_k04Y84d1i|;07UsFn zqe)xkCLi=V?-_2kQ+aAFfd`prhf1{04zVrB_bZicT3CYai6>`@yEac5#zZ-y{&(F2uMF~{==Ku`~>&> z50m>Y&Kl3jsPF;0sZN7R`}v@Nb^a7DB6ia*GC;ZSi2IID1yVs22_(Q!)Q!sZv2a;((b-m1k!_9MPo)l?M7qiZl7}87le#)xHuYwg)GVW8DJQtK!P=AJJ|RaR>{Q}8 z+@9zg`I@o7tlqny;|qawts(Z@Fn-|AqZ=Vk&GuX{}bj|SH zzqQUe;E~=qFymcph&|PY6NBRG3`i;=IWXMVL?*)}j5fpu0d7b&iAa+)-8{?7HLzV@*2*`V9&9Be4B7bEFbn6_Ey^1hz7?l0 z@MvoI9zo(7kLC?Xy+2PAX&ZP6NQ1#apEX-RN%%MtB}~X2M^dxuJZQNmfW-03nU)`8 zXg=twsi<&H950(1j8&laJR_ktrlk9Pbxm$y1J}(&NFlaLAXZ1;;0kN)>k$~ekQPM> z71MkcIY{JTy$M)h@Bh0`-}SP?*vG%r;A(Pl*bdKzG&lw0{1s+KzhB< zfYjmav4&VTH0bD04+5#Cw)lu_mo1RT%EL!`3nDbA1~nS3fK%HONViUl^xCi_mp_s?m<7Ir5s9op7;a(^ zt!4UPt!a0rWD=f3EF#P6a7Qk-6FGy;`L{e85JExJ_HnPN}%>2zzzvyj>3k>qiUqz@op$3H~M7~P}tlqlF1N|LOTdy4+5yv zA82lMtR29hE>GvgT1ULI>Y-NTBkwKzNPd1CND8wf9^E>2eX|CnTl-0K(KWM3bU5o7 zH!^(d*wcZefZWR#cXH1gkb2jf0{4_h*Bg)~0_SyVuoI8EzN$_P3P=-^-9~uuXbiT# zImZm&0D{LL9eQskQo3Ue^AMZV7R2)G)CV+XGoM)Gb~N#$u3QtQ<%cHM z?3^{WP@9jGFspdEhrD$k;o|1fORN4L*7nMT9o&v$;9A=6SMAaqY&^VuWFkrV&3#x= z8R?62QCh2;Q!1N05E<6$3_|-7VK|^`vV`kU)y+uf3}`NLcVsF>+CB;puHKWhM~OY&fZmy*_qtaGx)03--#?ULI8;I_3;L{qg~vr`LMbiY?13bCnMUb;)0l0CUq$LTRJ^(Z{`?Z(O@B9qYMbImVz#|mmDpL&>u>iPit zEM?ltEn-`sN(P2$Qz4hs)lx(z3)Hkc9l=Avsh zUE((9(ZukrW6uZD<2<@z_%0qzbjwh*=v7QqXax}xJhTx(cI&~p_$XMg z-D6$&jtSw+hx2H@4z(6noHA zB#caIfCLb9ZA6;UPphr!P0&q~c{D2lD4*joXk7Y;_MBL|%dwOxwY;936q@2~;M4R-Q)JPf4Iznwu%@c`)q8l+m5r~;3b`vMYQ~0;tzkN>PUv#R3meRVV$7H9%z__%pWNmoV!h|dgNw1 zL2{1MQ<*Br=FB8MM@_{&(SNK%F$~?*kk~s38gvP5kx!T2ob`ZRb4taNY1dO9I}8lVkf$PaWe%hHo*dfb`>Yfpi+?r#u=P zzF)R0JQ5@ZoSE~e%>q8yfaE?64fPuWN!ySwG z5GO*iBe9$Z%(r*Mouc`4a2P+^yXlnQ{uJcQF%nje8bwk&9G)MwAkP&#BbmK+C zw=U^DeGbjG7$6OP2dc^2>g{rms_G&g*UF>@XbH%<(Aq@&{oq~7rTIFSDC9Z1Jy zZLcR4GAMtph~cSyPk52b7-=4Qhc+S3M~d=23n!zWiUbl>6xovWT@O6m&Ee4?4I9_ONqTRmsLhhhFw^dN&JXERJVSTple_d0z}6_}|Q(hj$f8vgR)cXfC1+ zMlGoYsNG;S^oU2(@LX12%=%s7L0Hky4iDqK_wG+#ReqVjid0oKRddffn!7uFiX(&@EZLSkXL68{6sGsLi%`NmWTCLET(bJv zK*6SYNW%-^EJ?c~ax3wQr}qfG_G-UX@aqDbHX=NaI;8p;GsWzza$bX=hp)u`@w4^; zQZtVz7}P0(V`fh5oZY;qokz`zgpPu5c+}tM0y5nUL0og)c+}n+WBq}IbF2XqbsM2i z0+PM?=IWD{E{R7S1mD-jNTG z60&d%#uF`|#yZ`KpeabYI9G4AH1Yv_6obw?%^~0;BC=MST~vLv0ZP`?+J^~Mff`?! z%0ty9%eOXUcUe;iZ`vPd4XO`V#WP=ta52*z|4CMEy;u|9f`+Vdcl>OGR~^3tjppOZpwfAGsN48$z_BAP!}{Qbs_S?d zfeBd5B1t)TaUG(m;43+=LB41&AT_t%qd-HK2#%S#vs`q`%;8i2#?Y(V4ISP6dIxp& z;&?ndHa?HqypXN01R(8MKOk{V=bz3Av0W07?w$*um3yCFZx(EK;!!&|8+L#F{<(7x zmOCvv9*?f;KkcuiFOZC9$)3U-8djZLOQWZ1HpxJQYRqrC=@j<%X6g))gOl~I6FY3f zi*Pt&z!>>2kT9c_nq7)37c0h&aKe!?gg*$}LPHb5aRI8RHz;Xcmo`v@DGvp~Rcl9j zuV9F?jl>-|z}CZJLSP3Nt-s9qBzp7+rsE(!NO+`-F`ru)@R&tW!^iuTy``WJj>oXS zf|}giw)(LlL#lINAJgWX_Y*NkO+nOue0Zm}Rs`a4q6vp4_FDA>g(&#cB=gP-74m~dfR9ojNcHmG76ps; z0@5*flAqIz_7i;1hE-wG{^<^}p0`$~?x^#qOI^A4pj1a5brpO=zHaDS{HFoZwdR9% zoP^jqQNH-{ocVlt|EuL!@TeUUU4vS|0DRbiM^~CD)j0u4|7q47_6L&ADd15964@+D z5i(-CT2?(vVdwy#uL7UadO2 zYv-H|I*f|K)2={jc2nxrrnxp$|0MhcFK`cf>`1*>Iy&XezxhKZZ6NX13izUU!W&g# zV!BX31v_sOwRjXsPF4sR|D98CE^~sK;u&ko6nQR%8L3D-RRyL0NcA&3?VG&KgQNaa z-jOfU+Eh(4T>6g&0+GCdSR>lps0WawFzXWMU+mx;!$RUS`__h6wi_yewDlE8m^e0O z0#b9Jth3;o-FHGC8(n`OjmM*|g73@c^3Q{Z%dMOkmq(_g>5UAL9n3(Sl?{yQ$fJSX zes3UUc{k3S_#gpMlZ>fyq$sLNpqa4Aac%1e(FcbF9$y7|$e1jM^ALxsJ%B^ED)vdY zpdv;4sS0Nb4}n9@DeL`WegQ#ar3&gFU;yDHtG6b#sZd!rDF=^i(^p@al)j#{#I;98 z9qx3a*l8Alp?7QTww_x({o`lzw$a>gM7&GD?b+S1&7Rv?*DU zXHpej?}b+nPz&$8K}X^|S{tjsu+0V$aazH*nlo8tni30I(`CD)8^M7NS%4qJpl=o zgPC^m+*%|(T5jeMF{LpRkRIFYpJpC)5PWy&fW#7Q4bxBe0#f1evqvR2@?WY}Qicad zi7jBjwP}5XSyX<4DMVmh&K+z7{80`{Kn#~1)VAVWp_?5A95_vl9Dyg%N?XMW=&Wix z=U9Dr{S+IuwC3OsBdYO4#~|yOl6lccVn@`9u%>46Jn=8!L0d3-Y;7W^VYZwF(7cCd z;l8RA93dVu%cxvRDwxee^S&sbTg1Im+NkQ?vq z8r7T`!;YD8!S#db$$z z4bM6zAQ9B*A^Lj~9>VARU~GY1{{*D|Kq3#xNE`6Z*c#c+)tlC#zfMP zB40CW0_2Lov+B_DsrduE9O2usYKw-HO`lFw=W~@h{j8}F2TZwC$|{Bga&DYzf{^W6 zi?=a*d4#I4iXsy{xKZ=-A`-MnMo?NBVZD`Dcwg6MYs+v-HditY2vq^t>}bcV@K~+z zs3_9+EYQbctnfN)O}>#gl1bjh$cCa!_L$XT7P4Vt87XhY=|#;iN;)dIkG7 zSIIk{e(qh-4M_EKx_d%wJ_wLLFaz6#q}fk6=j)u92)N-h*N}j8RL;L`y?Mk~!}nb5 zF8J00otEYH3`j@TA4uJK)Lrn667AZB@>$Q3fj@dt{`tkFPJ-|2Z4sPiMZ&X(x1T-^ z>zmKz{~Ez}7ajqmZVh8iI(w~ks8M!RzNVxR!^h&Km!i>vV<9-9;~)95V(rp?#g3yk z2I13j=y^cZdx70yfgs?caH`C*nD|+keyp}M~I5>?A{4F~5f9Ull_Q7fTHaegCR5K273nx@2FtWP@dji3OjQ{?)tv6Oi!X)y4+nCm^*8 zzW<3Wq^5(qACKx4Rj*z-Us^rPKD9)l1osj-F*jRPMr@WyI1cRt=HYp%>?27is4B$-i%tdK>!IvvQn zHlyI?^JY1Aypcz%EE=xC(U!SWb}A0D={F{4=U;2t>GbX`^ZPO$y}xP0D{Q94HG=OA z+uz)MX!7tUT~cp(R0kdjNSoFlNbSWK80#tcX4>lLy|q@#^5gR9UxmxhFW-B7^XZM3 zRl6K3tl||>ht#^t;N0@UMGdP1tsikNq%SYg0 z-0_91v>~rYTdOVxzBE*Dr4_YSrb9C$chL zondLJDCleHTO{o|qgyF|?*DstAay%?7ds7pvt07PVADH$w=?!e08-OAe|!y#wjS8@ zZ`B6Av19Ah)ZQHj!=_rR;!z%A-ij9vuCBJBT|&O=_|(kQp%eQUdHY?t7KfYnj+21Y z?AbfCPCz;hkjCawv*4S^|LXsi0)4>3=eHk(r(Xn30Unol&dZArAAa@nYXsl<@afgr zUr67#VrlgGbGxPG)Ax+kWN_9lu)F&82}t?e79RBl5)IZ&w^#xsa`o$~dO#F0%J^S0aV;?-05EE zjoGO61&5%Mz#nH!Awqi`>q0?j-5+#_?Jpi@{51>&*iz z@9GH0qR#7g(9Eg8tJfb$_LOh#_S31N>aio`l5c)EEF($V!rA}sY_Ng;gE{Z-o*xe9 z_rAA!e-=A>V_Qy-9dPPsQ!(zNb6bX6rVfZYbLG5kM!YazZX?e8=e~*CgA(9ugJF<`<8^S+qBbc-;Hk_^OuoS%9S`qju!x7*TLoS7h_fG^+FU zq#(lYS`^rl-BMnPYOF;1Q*vPWLWYNxQ5fMFjh{q7PIy%VVk0W^fHx3ZU_=|$RI;=n zgVdydVRD&tTiV5D!h2Gj(5uNhDDs$0)_6h`p%?>6OH4+>fh||rZc-9Tu@LIwdHkoz zK{(=J5no|V>?aex63R%7(kuZIi@9D>J-6lfS_QvsuqVuC@BZ&U2qbvaSMbf*dnBOc zKs?)LXOlzjQ1Y!!yrrSxaPp&tT>{S3eEHusYBsH*&(y!xEV21St9<}zdu5@#SN?fd zxcubkmL!&VJMO{e?(Uc#?8v-^Q{56`JLKL1w9V@Zq%J&a_KgiX2)?h!)^&HzHGLj@ zwmRi$Hnem%@dc|XXT*~yTKD|Q{pZ&AC^y`#Rhr#@@%YT0!QjT~pYK~k?jh5@Tv>Yj z?eBCGd^fubn>!#~Z+RtsfaC^)>hQEN2yY!pdHXwC1BU24!%_g~tj&B|UixJw3~0`0 zJ1tNUA#H+SjwwOsZ6%ltO+@c0<%$V5BKMLSZJyIr3z&VF(3kOH0*oRZfF53Cfv)U@ z#RFCo5W`0u2dd}7r|3<^SaVNp)p8UI#15Qj50qk!lgPw4v%2{ z7YHFdZ=B>X8LQ-2B+B6l7yeJM!Yy1!=LBbCFu6Z8QqA=~v@h)F$IhDVU*c_t;eX%QJQ_u|ZzaRqkG z9gx})eYOvfoX4*4waRJ2OXf)O@qbm)B89k_=1sXd$*HKv92`ziF7OB&Nm(l4?o4}J ztlgdWwkQmqr(|n*X`N}aq-;`i^i~IYuFV*6iSZ|xSj7Zn47P*ahrOMb=S@bIgLAwY zZYY-mtU%H>^DxBoBl^7=^}?mVWP#_9HKoTiC8$W)HRT0)sahOihjUKz>P49PTS!(+HU^ItwwFu3 zu9YjC99ghD;@5L4^IJ(f-@Koq8Y$ z(IcIvriR%jkDuz5fK;2?Os?|w^#xKF9*rgV4uiMX+D1#~PYWj@s`L0sIq$+!fJ&Nj zFXbyeEO{hNM2I}CiXSc$fg0S+RCP-b>GKkE_6t4GvQKD)+uV+@gW$W7N3DqN2P8^= zWm;ZxF?A+k7m(Xxs)gZ5%5_^tVCN}MC!!^N_^3z+kx1etw9f0I8Aw`3jp^Vd6*Y=t z3gM|KhQbF#EinH`&EY!(2%06~#48uz3t2GKr43dW;!uPcv!IBNA$mmV9jjrtz@UyG zN(kpA0CCn_rHHIUd*>*{!OsTX(#*%E z9|D^cW|s$Y6k#I%W%96095MT~#R?{Pm2UOt{(tlXQhy#DYn=Fc0McNmJ?T#%T>DUT z+5Osq`Am&BUH)mp_J`ObcU-yXkzmnm-BXFa-w{!EZQniAjT)L^r*kFC%He;T6SMa3 z4>r^csZ#U*8e&J`TIlA_*^dqoH-<$_34NT&|GHucZm0@hxL{Nk=q|7ecoh_ClJ=04TVh_`r??>{TV<`va-|V(&HnMW&ytVX zVn|;cnnkHKn5jjJ(wwdvw^8s7M|4vr@-Ri1bcs{A_A62ttkp`#+Vnxx@V|^XYm-;6jrU}$z@dHIiz`)$~(Wj8-R5Bxl{?CUP{<{UM`J_ z`(?O%WP!wUtZ#4SLQ?~!9s;}0Jkn?%ApPHwppmtOHbn_Ilf3oL*we!#c{o##jlvCN zdDc6@;qt<>rs14&97*WMoi2GIhJ;}RTWTl->LV!qKHQ%yU%RvjRJ=QCw9vh#srTt%crxOe+ig4kl1lZehesmQp?L$|kiH4-vWeLN`^-_} z(%O*w#KmDuS zKxs-%8i(0L2op_&qrXa#oep--#$*(I9PO}SbmT3b3U*dt!TV7-u@J^iSPgiR^UWO` zI{~SgQtJ<-=DZ$wG>+g~gF$3>t8RG6@;SF`hVIH4hjn)ML5Zg@?@^2{T^571}F8Ho}*LEKD1yTWuINNI)1Ad)_DCYNZdwAk5l4r*p zvgRD~nlJZpSe9zU>jg$@ekzY!Qk<^PFX8JMz&`oShao}KG4fYaN?Cj#ilW=9%5Pa z%tdcLavuF5#So~gL`pA?JBjU-4ZFt9qbjO52}J`VU?`u&%QlU+X!mFe2E|1iGQs#) zO?O5#7rcCJ`5$oEEwqO~x(U8DagX9XrOh%Yoxmv;*sw(-yAK~dw3#Rm7Sc(x>O?t^ zCm`*O=vg=+gJ$cS?;V&83kFCWuL_t>m4hFa`<&C%cXquOoO{Em)vp;7O4+i;X`F66cRxb7w*fCQ&_U=6D8$+_zO}*tE*9FB8NQ@6- z7zZ5l$+zzThR!*kLK|}VtF*MQ70z@*j)QVx6+g<0XL7dNVn!g+Wx_M1+N~eGV^%%*gOR~Wuy(ZY@3x~j65nmm?L<4 z*+QNiSh|RN*0kPMLp<3KRA^1$5GIg7#`lnk?Q|iYk%E;ah&lB-0yzZn5PTE3%zoLx zbk-6mIRPqFi7+X7n*)-hupj#ashLNFK)MOOHSw*@fMHsJq%GJT1eS&|xb$`gBwL_i zTi{yeFxx59Yw@I&B7n7^H=nDvu#2Q9``Ny_%{@)^l8&cE=ilp=fYiZcgh%TSB#z%M z39;?jHcbM%>lO}m*PamaWeVQ_q^}GkZU;aCB*Y7)1MUowp=V*uFJv}>x))J!_o7@e zJp9@D2Ow$r6#~09!8bf=ew}|xUxV+<@a>d-aR}_?Pjl3J#YCf!#S1o6l7*tWUm(w!dnQBW(R*+aAhIw0tqQI~V z!>|M5#i9;IaR*oiHRHKZS}=xJc(07(!VCs*VLO(G^{E(?t!k|z`afBlkhkw8Lbt!h z@bVhPnbokK3y`|BFsI_qc!KXim65tP4W8Ok=?^5p+xXV7DYdAacPe@2h7*!zA4#i{ zm^8Dx(bHLzgba(Y{ zB(r-8p%TqLk3cA|8C{9w2#1B0#Flu_Au9MypzyqQ#LLf;pgr(W_D?^Kk(6`pXH;#OI;rFb)fZ~Mkf zbq3=rqzOb!QAk_^V5@LHNAYn-`HDTwAwdg{$eaWsF zkaIzd`0Na_Z%G%P2}mV#z7XUvcb=@mY(*|R*+MVmD3e$3W-yx0t5_aLdhuBjd|&O_ zaiY$n`cyoi{ekps{33=PQ8@@zoeWx@KoS4r12n$BB2WRrB(K7Us5F?1&92f+`7@Mkuv?vAaF??%j@T2nKt8-UAOVwK+NA(1C8`{57 z?uNY9mq)#Tqw&-M$|f} z9O6=r|A-eH=iWF|DIIQWfu}Br1g%nGRSd>FP~>b`1!UOPsG#4u>4Q~d)j1%uR^>|X zLSsm;O>B-#FAsP04?}4Ul=g(iC^`<04Dmpt{w=7R7u~j)q|TJZ`+vQF)Etsu>{|9a zSB3{5Np&aT-Fx9F-%xp<*_LQa$$_aP&fcq=hR3r>8-fx~KzgGI68k9Kpgl=tzRh+l zFX!%7l9IiBY86}wQTwLC5abRa~-VM7vrRV{&oBrj%Qutkfb zj0tRk?r-TYy(6tB5tFjlRCeVU#>5FTD}&8*8WO89#-l74o#~bJS7^GgvOGnHHH`JN zU815Y?4a~eF-jOs#n_Q0>NZh-W*T@zGVauds17`cw=ixq zXfNI&s>SWICH{5_UD49&oPf0W>TG=_(ndcI`D z!))yCgRJ1)ON8Suo`vMs$5Ge)DmfPgCIG!7V6p#w>F&Kq6XPkMG~Uefz$i za!p~SSA@jK^X<>0zCfbMXs)&M&PMW8D9-#JcY!$0=)ycEjCG`Vfya5R8ZvIgC!KX7 znGd4uESV&Q5?E8O- zNJ{Rt!gE9BlDr9X;D8zrDf4zDks)F`OeNrh|5LT}YC=eYQAzYyU;tV2=u7i+ls7p9 zbG=-4%{2x(m;F8lqo~@mJD3fO?-Y<;3pKt!Zs1V}_wadVb?8Wv%H`eBx2$ezCa}u_ zft~IScTFVG-`hiyq5>iHO?L#TLsqM9K9X$j?wvcgN(1ppN5MCUZkxFNK)TwUN1X)U zvLx0B&t3*uL6f%nS&5RnYX(0}6@*qEJ~!svr{(?2pDp9u`|{kQ0GTI)&+ai_TN1F+ z=eA6~^9)eQs6}9RwHuFkCG9-w4I~24S#4?OsN)s|SUzh{1RF-SAq`8JK6OfQ@jBw9 zyfw99l7GkLCw1lKgB@N$D;Q}s2$FVNY>yJd=EpDp2#d8E^~0M4 zb~J2vkp`4ZkXtO}@Z`C`MV>RoQs#S&Y`Fx=zKQOhO9Xb-;zOlQuOa6PTNF0XR>ke8 zz>UmXbH+{a6YRxgIu?FobPeGb(il zn?|$#-&cX;Is)mNO+3Qv=`8p*Vz{}t#UtjvJKVf4nI5zIs=4pKw(H$#nH_WRXlkNd zvr;+mpw;3IhdxT4xt$XgkB;nr^BXbqX?}hO`wX_3meDpBKKkUFZ|n&&ylpo3Rr9jX zmKGizYr%257jON6)O4i6qb>2x_(KK}D)eDV3TtHADR3y-dJ;89PPT|pHDj(GYb zc#7L#^AJd{_om?5kLXh;lf$KU5L7)QqY;zF_zP`_!!$uEbi!$E*#%Z%z|Ghd-l`m| zHbbTYU$p!m%0jhGCG28UYLplNo$|p^vPZxufU1LF;*tDz&Kv8@@`$&WQjHXnPa5{` ze(!tV|NalY`$LZFVBP(w^5!A117{9qSM`kIk?(nebykjy%-dJ}b8|z@o9Redipnx1 z&;zWYs&{~0Xe}UBJ@NMr$VAo<| zG=lH>+WT3vKDW7J`4`zB^Om2MHqh-m8Vkc;+^|S`e5td*jyg!~-b!1db_^gD0z-ZX zIB&CRuzBiATH|8zPLekw$-eN=%!H>`$BfGh=~-6VIbJJV;!py_lV?<8r00%|Z5kz1 zM9j*2O0$}Jb46kV-!$hLMFCRX)af68_b2cF@W&%xBp0Jcn&`nzVoa(I;i~?411lYm z?GdpYR1{8M?u*g)fBLhZ>+_4Be(#slNaTtXY>~&4)M4aml_?1)JUihD#&KAnehV*} z8VWWQOE)+`;K3n=h-eNujc5_DQ^gjyTy(w?Wuz2dt%wpBs+GF}mmYP9p{gH+pL=D5 zcLCDEcnPu1;ipF5-r;0_Oij)YH#hwlITp_LbcFK31pPbr~2-3W# zeOpqaqZzfakamKlkuFTUWsx*@YD~emow)ukyFr&DwY9xMZS-q+biE7sND6x8T!{}J zQUEGIB_VzF6)p~R~n{`eE6$h|N1w-IdbIJzxwS@NGU}gR{*3Wi&J!lLOCjDN5wnq zuhM`=Nbtk&{o?08|Jm<;_xnHm;rGA)-S2+(^Pm0khsjV4x9c0lvsW%POu&LBcA3=| zN($-;v{%C7rIk|*!=XzQk0^%V({suxnq*2PQCzCUt;yy?csM5teI2aO}y9dM<* zx=~#su#3;53Zaym{ebkX?mRlyg+~a!s}I>e-%3>AGdz-3-LQT81n6nHo6!l{A@SMT zpvIt}UjEtB(tK~<8#}g6t%i|K96dR10+Ja8NJkgfssF^;?mTK1d|#~z>{u(!UIr3CJYB2dI6mr zbXY2)j?Pg4Q;P))!h2#2xY%gLU2jMZ`gDI$80DSz%W49KOf6H@+hx3w`i^RO5}AQi zgku`n_Uk|W>G07%m!IGK=GVXaamp(4&Zzp38idMoNb2ZBi{RTxBWgP5>7V@K=Rf=X zU;g^H^7Gff{`D_^`2FvG_Orhiisg@xZ3hKobhBFG7pO6E6UZa5L#8iNktoew5Rk0` zMV25>U?CklK4+i3&mrq~=A7UBX0X=i zY6NznEfE3c{Xl{q+1XfAITdCfuN4H{`}Du?FSc>?&;2jntDi@Qn#R~K+Qshf zscuXSBl!04$R-wh14#%ZyFtMI+58lhVmx518dz(l8jmhIA)+2{etQ?w(_;A}%ALsi zX2YEU%lU*zw(Ca_=*Ha8OERP`g_6QOA#QUGJ z1CU$|u@1pEc5Y`V1;%)JWM5C9rJmb;TKnA}L#OOYe4(%r zK&On_c@)+aQ*GIXLvFCa<&rwOSnx9%R^Ukx4Wm*e#3# z;g0olwd=4jg8ncrExHSNppI*jJikt!_ zC$e6LZ&uVsB7$kI=1OtdaQHS8kRs3<M!5;RBALdPb2T^eBjNR8ogPQJW}Y_KrHA zc8o73?=d;W38aS-kUTtsn#`jxW5%z0^Z!r(rGMpL`Ir9F?l#+zZXR)!O!{v>@rh6T z4<+r_iYc$dcNdR38P|FFLjY-vX*HVM%+#2$ z33D=c6o#Op3t85np)fMuh8V#s>|`7(`8DyCFswp_9B@;uAEMVKc6g)#Qjh}JP^qxyvgq#it~9(L&6%k@_uG9dL3e0Qk0YaSi;g;Mq5=aF4Ddq4ad zKFbU6^T;Ex>*f)(4_+R*fizYFNl~wc9GVHSVi%xd_aSA+YjLUx#RmbyOwEWKx|S(2 zEiCjS`FDpmoj@a%I)l=F1xB`ZA?0I{c1ayih#EjnN!h&wMbc!$^P@dW zue|!#Fa6}7Y_5LlrJ6|Jt%3B~>r=R$qy=ZGLnBgsMi)}qQWe(m;~y?`myazx^T zJR*6VbN+0?%r17;uN*4uw8WsV`{kYqNF&bM@1)dy1=1&df^SCy5(IWGbb$sA!FNZO zE`R_}p)Ys0sbZ~5eBc(?L8js3QFoi{XNCY$^5^Nuym6wqw37USc7Bfkj-2uX92l&q zs<0u|PC13hDhVRe{~*&sw2=(luy=uS))gCE<8Yl!4l_U@D0@~#b6zr^l;AS)Q_IRP z;3@eA!_xT1)=M>zKJ`ytM$Yc1Z@hjWzX!)lpch7<5uhmC%&IG(C6^Kx_ncWcI|z?P zYaESlZSO3ttt~IDY$hUIICy4$YFZ+d-jd=^<)+UBmzibGt|^S2E%7B4zz!=NceTp~Bz&a%hXIo9!nY3oc$iWi+=meJl>SGhc+6nJOh3?e z$Rq3=mHfM#60QbH?3^{>gQ%V9&7BR!23&t=lnY=HQiotoCQMN;+vyDfzx%~hQEb0( zjwYv<>L_LVrSE?CC!c!x`OKpMkY1n5GC~yt;Rf2#PAo@js3=F>TFoHT0eg-uoDJ3V zft`!18(S+&S7v85%Jiu#6PxP;Bwd&|vv`~?;aT%2gkiAC4lQ=GFzb`VohX7CtStH| zboU&u8>ly+P@9gb8dVIsFng&1aj;5Fh(s`=)(~0)2i`-?V(bMYGmn{^rq0(amRQK9 z@a$s(Y19v-d)a{0DfsR@tBXfyK=QET7n@2c?;?c)3wDA4_$-yJ9N}-Dg$fG#~-+mrp zx&udBeCuw4hQl_AXqFHAdE_%+eIFhT2PAgkTSEneLK11bjQTe-doY+$Lkx5(iJ&LM zIPV}ahXzvU!bq`j`{rz?s7qc5b`C;irdZ>>+D7Fz&tp{-h^gSB!-;&bRj{2(1;N`j zj<&V|qXCcVUFis-T671u~1+ z;t-|4fz^wvYMCyg_>}?&?_}z&iYXY;WFGCmZARO2r=t)dY64^#9Ue%XJOUCtQz#(# z_VdW!faE%-xlS9$crVG_fP_Mur%;wY(hmiYkOu1^_~xQL?s)?ec!cANDQ9;Vf!*Eu zlH87}-GYom4shb?hXYbzgwgJMzoVy6OrhIK*qY4)jU|XJinuK>w*pNY8dcln^Rx)P zAAVcYjtzQhyuf&b0-y@UaGgQ>;ls#rXow|ArUYtfD3gd(Y9W#elB5_3Pef8{r|_6k1yCi&u&^TnxyqTettqrl zg|tbOOd5=b4Pc2#o@Gymz!2_;$E`(%ZR-l?nnnH@AMoyg^l`u7yPGj1$oM&U7vw}!2ju1lqdHab$Gz;6z- zLzVNB>@H~@Q|K#vFAX?P z`0xK%2S^Q&u1;c%IwbPwxnfmG1slD|Ng)L#0w?sNiANV}9$lM@cMQCk+!))aL!{BQ zxquO_A_6(2=KG7Sq3kJfmY4(#0V4Ec3I!wNaS~sPhMZ;OltMVFMpOO5mu7}D251Tq zx7cne#Jre^CG6+T4{V@HDWFx+V^X}e?xU@&QZTBqasdeggzT2;w6z=QFZlK}#6s}x zo60S7eP_wEC)NoP}Tpb2P;8rV~6`J<2v%ax8KEA&3!+0*X5i>FXcGO8!6zxc6V9#LPew?7i%)&D@kFH%@-CUUx)bp&7#>Dve zAVgX_5Q}D73_fMBgh&lDh|rsyWsA*|f=Y^0G32)SZw$qoG?534ftj5|NalG-M(@|0 zM#Wfcqq1cN(wl}@Wx&Oe=R{M$qLI!iXlhN{ZOPB1@ND~(>=H;1_Z575H-FeZ^lLzJ z2pUG*f^R>M@Kzp`1m69j4+D_8Lj)IOP^&rISMcrUkwatQ7ko1X>7XppqjH2*E5T~%-uHZIVTI13$~oe*Kb zFjosORzAV^UWefOo;4tG)qbS+A4uGPGY)%By5sqtZBQg z&K0o?OQETBI6RPsnt(Lo6?}so-Dmya8<4yjbU%;$f^YV8-G3krE%^4;YOv4y1>e4g z*uxHuMGkN5s$KRA>^d&hHF1JG8Zu?4fKcG35TNrmB^lOd*hXu`%=x!Y%!To}EswrbtpkV&DoKA3O`xu~Kr3#SnU#)3nISQH79 z4Uh(DyX!xD{q^VHNXMHQNw0q~0Ye4GYj`m{RXZWsESDVD(bLaXvOLf;ETQ&H(+Q`P%H*1zK6Chz1jUg2igTT$!Js{8M9YAaVch#Ehd$6cZ;# z?qdVeh@%1N9tpl5_6xrEeprBnJk;cCJJbS;t0SUQ9hl(4N9ae4au;DOrVI z#cF^5YpHtK#@^D5YHMLhy>7=>C~Ho7hwGhYHd}q+%=p&i%ElmXcjM+eS8D_fP?Vum zBkAqOvrk?W43`R32PE6(uSsVCo%`wMn*O8pt+nX}AfT!#)XDPJ_Qq!I*Kj<8Jz&g{ zWa?9FC!98bhhuZE8!FRRvWhYEn2XRzxd{UgeEaFAhkEVu^m>B39bK zS+`8WwS*zJTLg2kho~q5Up#MYSRnZukopV0x!At}iPzLq@a^Ogw0w@&<32wWK=Sjb zYxB1}vWK5XSoJi-cB|%G;SDI$iCBm9!6#_I8ZK%`Ah7|yy$^|T7v;l<;S3PVQShA; z`%>fvT1?0=HwpS*oHUpO5xOx`mJC}GW~mF}oQ}R0Q*5$39>jJ={tN5qh>}(cnCLNJ zRfTjau=`vTncdkwa%A$_&d$z_8#k{Hkd$x)BE3)(NmVr64kqEkC6-7r3bYiEf!k6i z?2eq9xVSntdbvTM;h7W(dkv(`8b~|mFz{R{&@NFZY!&cZBZUrtGIE=E9`<%nLG$^n z1)eb(6m3GSb|eHJbx@(X3gpj9jyFRFXG1K*VfD&Zr6?t2WyFWs=d{kXb{eMS)m#c5 zmw)dkSUl}KI2VDNr>EQI;r@beACE>nJi;~g7kqc{$SY{DNyP4FK6t-?RAnG_heA#s zVXTnD0dH8r_has6pB{qmZWaTG z4UnKB%Rly+oeGnqr;R-!uoBh+?z5lg5rQ@`CS%P8Sg0LD&vR;EcE1!J;arNN1uyjAjogQz9xi9=X8| z_Arn6L+qd*NW)E>=r8yl>Cu4HSMc4x0f}pb-}^uF{sYO+BRtgs6kC9@&vaFaz%DP3 z4tc^GEV~4Dc&3Y|-8?$9TOd{1;rIou6_jbEPLIACm=MrX2E%_aMj&Dk67y@^!Bnv0 zE0TH|%uE40jTU4xGl~=(K^~1i=|e6)2ogiss;xVxH91nP%-~yvez|O@F`r0~oeHy= z3}3pQLZliArAyt*I!^hGi1DY4<`O!1yXVwM@yUDb<3V;?h z(!qLpWqYiikaX#Ily(oR?OPVHGD)c7B*C)Cyir?9az`ZF_>n=uphi28uDjh9kp2V z{Rh$q$s>p0yMF_cU-12yhev7E!_#gRIC7Y~1yYD*IseFM*wSNZK#8Zg z?bt3PICz`XOaVrOpxLxKhBOEVywW@!+e9%*XqWznr?y?|)G7ffQzr*tBNH0^76fqC zRJ!f@z*>;;ppDm?oE-ur0I4R@3y%}@m*QiwKh zd1`d&;7SdoQMF~saoAI8N$W1bH=+*5-rqglp&N1*2zJ>o z_#T3%j<+8QNNfNJ?f-M*v27`3f1#*yZ;yc`Y5KxZxWKV%78rO6SR=`C8?|_W%gpUl z)X}~q)`h6GQE9se7gGcYSIwK-fR>2cI5V|SwYGEN;^taq<@DyO6hT** z2tLjn!2*G_ZOW~&lyIQH#480*AUqMs36da!VKRmDknghugn=O{782H5ND@m!@-4d>f46{ zNZsi1H6T@oeLQjqz8wv*-GcAK4)An2s^XoTJo4ACI`rnB90Evz`ZwEDWPv2c*MMTX zfcZ8EThJJbEE&XhMfPTQVcSNZ<0*xdfa3|QK^_xQcr0rEMD0!(#cn|G?nM74nt4b| zX(bhdl{P@NrCM68P@osB&JBu4wW^^85{QH_>DITQg2$&vZ32P>c&bBSCv(T<7q&;& zb}rQtyX}(;^GBx*#H^Xy<@xz5%R3h@j!$MdS}sv+mt`IS0uoc@7h}+^(x6?C76ULi zHbN9c!o>=u5x0HI3bCD?*{Yf|olyWCbJ?NfO%-y%s0MT9>oH_kRRz^>FQrr4Ov`Wg z&Pg>4kX#K&$YOO1zVCellG99~gOPi^6DP3f;1L#`f(ShG!vds3{)rQZoPq`mg#LnW z22E~(-C>vD8@J@2nC{_`)tlSYs9)4T6%y6Srdk61Gq)N|l_G)406i!i{bsy3ZFWj6 zFl;7>ld46PIktefg>y|qd%U(4*lAI6o1_gV$_dT{xi$JgK{>#QDN7MLtfj^t6;V?j znZ2HLc5jeK43eIt37nOG$I`$Wp#KApjx8)ab#`KBV*Ap?i(4lL$Kr9I9e0Z5<&`Y_zICgewHrLX_4%~Nh6eF?XN49J zDhy02+H-Va;p~g&Wnz1KpjOyEd2)2IM$-J{Vh9Hbg2cJgXX4CYG{vYL5n`3`6szP3 zL&+Yt1fW)YaEBzagx&_~z@zpR5NPGJW zzU^^ef%Jh0zCDINhlX!JaslD4SwNSd0fW>X`NF&K2&=mUk{Pcc2{-IPsWcYf1yO9m z7)cGo9}~9ZC#5n(0!4A*&J#+d0E6*U5()=;+YocmM&!R{+)(}vxiGni@Fu7j-iWp* zvm!gl^N83XrlN3~fC@v!m{Y)a?$p~&&MpzDMp6x>!G8w-tslPdXBbIqW<;2Y#=ft^{t+5(N<15<-r;Z-0k+c{CT6aO!*)ylVKYebX#5t0!y9&}%kgEazMj_rspe{JM ziI*jRd6v6;AAG2wV7gV187ey`kQ`FDzJhP?$hn{It^o

    %3HiLOmLg9(D`9@lN*^ zNYh{Y!e>8!Pk`j*(IIyOQg^=XaDN_sK!Wd19vyOqj9A?*kOKAC%vL1|iC955R%4_= zCZ3+$dsHGV)v8jgm(<@Mv3aQRtyVj#HXs-Yr7(~OEuk$_#CS-fj~`F4S}>Tz8nu`a ziATjyY`iP;P^z|niDyMk3$02yBwJ3su~%Pz14L?$zfEX*;c;|l35)ZgC8@@^@R`#! zW2#dJMz_Ys#>Z+4hLyqG^2>V@XapM`PCB(d3c1_vit3a0TMI{ zodP>*f+-%bVmYX-FghIDoZSjQBsR8~RlEWG=0Yet3wCkV&aGVB6gHGDs-o7(lg!Bz zcR0)kSu~){xSW>amQgY*CSIh;NDJ)C-$}_MWlB-)<)svr zUOsyI?D;C3o7mVGczADaHb8oLb9?mo9}I}Jc!c5rmE#Mi=bGVt8*7x*2|g-}OI6fy zpo+tG4vheIdHO1-^*N99W`$FGKp1VMDtKP7!Kn#V$#|j?8bhl1XvBaKOjs85zcs;i zz&Y$3bPPG91cQ?uTWb4at~$x$sUJw>k;hTh@~FMfdwJ9?rR$EGDyP8C%cBv0L+oBR zkM`aLa(#jH=vP14I{x(0;eg~2d>?jg-9z%|&~Ob%4vX!2)g$;$lyUONWuVtR&kr{> ztf+x$9^!@qTgmZCgj=X`->^crGSvvWEp5-;h6q6fq*1ZDU9v|m z4V#|SN;t46idA7CRJ(la^mBE(Zu!E-*2>t}c0Jl+qjpW*URfPmxi+(BK%_H!zzd1z z20S`7NZ75+8cCg2YExji#Nv@B8m%oH9!cK85hbG!Rbdw=O5^R)`K5Lq1>B^Jn9X*u zs>#dRSw+a(DbEDh1d+-R6CXZg`1PVFsn%{v2gVu{llnrbZV|dq@ZCMX$^ZV{ zIP45B>$e{gNbCfK3XJI=g$6i<2T>$%QDT6BR2>%;ohPNJE-LwrL`6g1=P?Nz81%`IR{h}Hi3Y01Q$hm$`BYC0BnnkryRf;kGInt8%0_+E zK-xJmxwUn2`NW=Q&n}#tgYZCV9?ewKQ#Fsq&Y9ho`nmShD#`k3v2zn?d?6Hu*oM=f z3V9&~_ZTSx)Ntw&21X!93O{w|TAHUbyhV6J6@$4Zl1+r-2Zb^`a}r~A@jbU&r+F`` zYA(N|><$bMBm@~=f!zo<|NcBOwhg`!uiXN>k$cvF4qREm_s|HLN8YdSk+;e6ZzN(bwklaNrN-~l7=22 zzqJUWjiN^|H4Gaq2#J@vs&K0s8UF|!Wz!#p@~#XuRN+klEU8E>vl~>At`1yQ6OjNT z954Iq(ST$!q!21-$TRM!_%8mg^x`05MHDQu zzT3(;n!hX*-w^B~Lc&afoBIXa7K z?e4j=e!;h&M;`9ssY3w?7_u^F%bWJzj(^~Uq&!_tA@auP8*J$$j^t)BHc(t zdj9pAK|lHt{yX^3clPi@#kpt=d+hWN8Xj#=Y%Qhy;+4(K_4Rrd?8?q;JifCyH(fpV z^y#U=NiNTyoS&_xCkOG-wTP)x2x~cY7X>AFKfBT3gfLOVtjm?e7PQ2?{zXw$m73Bo7#wIwhXW?pf=#QOFZC%|rop)2dbC;DZ_p zNDtVgmw%Vu{X{kNE%*jH z?y=z8`A&$sx(7x$ba#i`JUZmz5ptDx=h41lfF#_++xrL=S7z{;K+vGpb|AO>6ILOZ z0=a2t#hi^dg2<+T?^^yKOa%C&Qqh3IIBn40y?~NXS+Djef4g`qROi`hc-L`X`d5F(G(7>UAAgPlOXKv z6g1>O1tJ0CsHy-DHb96HM1lfCn!M%ggjgF>MUV}3a#4ZU&RE2R7&)#DbB$P1Z_+^6 z#OydyBkbzfv$DxV5@Ar0uK1WxU5aTNe)|^#A`L>Mnn*wU+3P=iO*7j|^n`m(A3xkp?T?W(SdcBes3`%3Fa;M7s{&CNAfwP{ z=TX?7KnB%{+*(F>ItSvTkTsJUB`pU?L>PJ^&=1`;wPv)1w}$ipR1HXa`E<dw2VonA*?|tu)7;Iu8b~(=K)U&hOr+=Q<43>xRsG-k_)-0z=l|kVRADv{S;ZiF z`s{QiYXcq~NI&%(^et^|50ZAHi#uZz2j^oo{g0krJb3ct!ZE342M%`QYxokZh8;12 z`BXDwBxc!Q6$PJb6to5r#xwz>LKd*G2r zcgGtYzE=vbckl?>Z~*BMe+>x|TllL4GYQV@=XP%wYdZj+wmik4Jv#?_w5SCLs7WKf zVx@}eVw3^W1Jo_BWoNHd@JwOVP31Y(O%pC0+|2K7%Dn;`qWI5D2Qe6cqz~^e^7wWl z(mN#551+50^s99IdhnO$ztjHka2qF%J$=3k%NMqHw$5d%C21`3cDA?IS8F1z9UR+S ztE)yoI6J?6a{l{AaVna-z0@sgZO zvv?nNONg}U#X^jhThAS#6!@2xF##kcF%hI8ZU#jVFgMF(Y!an(c9O06x3g`D2s(-#B>WD?oWvejgW7S8#;dq2II0WB^-8^y`xv5i}kj2pW!Fy09}g z*?gAJGsl^&mF@wvgoiOsD8$oi3TqzK~?R8j&KQd=2xbRxVU z6#Sytd90A(scO)eAXMs5z!}vbrs8PF?i%}7tNnvy5mIzf>hD!p$upf$Dl`dKdWrK^ zf{m}6dkZ$wf}zLoKpLJ$9?zVSu40dWu8^bm6qh;T9GC&}_*#FR0=sT<5q-q|?plz( z+3_de$PKa%pgr<_b^dOkc!&4-;v2NN?pC082!W78cjx5M@B+JT9#K2u2U6N#gWWJG zbMD^T(;wtv;uxlfLJcgxX?4Jw6yh1PuM5V*r6(YMLG)yJfFJRgQ)-JoEU&|821vmU zih>xV!6>y5XF#6OW^<|qr0mB1JLWR~%+`QNH`_$2kKfcl`b`a_-wgiq7p(Yj(4WrN z2|GPl^JrO8#}hW>cy4@ayH++_m^ip{?Zv8AH7p-IdbFO%SC@}guB6Md(rLQ?Na@~f zNDnhqAV%ZuLq4o1F5>%yG@@x3OIijFo(D&a?gT_LauDl~hu&$W!>tvrpm;`ug`#s* zErmp!K&k74A8&-nQ(y=W!8iO`d8bI(1%DCgt_?^dejfD_*mc%=d|N*R4UTjpl5no4 z_H|Q8p=k!Oiq)#ik-^q7}?_z!)W_d~);z)t=Zv5@G};W>&) zk5tSvZNMDc)5$3pXAThGL_h>aZ}P-lQBs-VytS1;gyaTNeZY&`=cxMd@C~tCy0?OF z2pZf5C~`<+l$LUj2e^mvKRV!FP|;8aUw)*a3Zx z)?dF15_n9JogYZphHVWZPvlckIHd)J&KoAF5WGoX4$i2@sR~Z^cTh`9Q_J9Xs1=(@ zjQ?hl;0b;sBR_-e6J`{FJTRKcO--}{rLCJ`ikikFqZcz%DiU!tCcBzIT4b9($)yQd zdbdraU;g-Kzy0ldKm5%PYaG=`s)_WQ_g88FaU(J^Q2v2QPKP;H~ge8H9+t!im6cC z(lg;0^Oz{t3Kc741VvgCCv01>?Tcki&x$h{s2g{9cue7Qhs+_Eu_zep1UwU+X5yE> zhGeoyNO<`K-}jhDdv`7PhM>X8BfQ@6R6ma#`n(5s2_!Vde&sIpp=94M?NBv*01Zff zp7a-d`+0;L_2dh?1$NFziYKMW&JQGvK%>+u`LEFBM*ttOzl^i0ioVNIp?#GyEEw^m z(sh8GT#Z{HgngBa<(=J<|clNU|aekbOitt&`7D_c|P zqef8;WtVf?HG|ey7tdGIFP>SxR7XeKYbVewN;HUpgvml?uG9y~ppvK0hbFp!xj~U- z<0DiR=DCu{F0^$m$j(|lib4ii3BNw8ys%)v)Dzgdigo{iCsg`U6>2(|gHz_f+sEDK z_d?K1U}C_MUuB07=_6fX19nu8_`uJjj(6(IqleuMNd0-_Z$QE``-trJ{7J){FLeLu zk*_)&gc=}y+B`s9zAWHl{q0E>`J1v;XK$3VsS+emI5*2o#XbJ9+sp3UbuSX=r zkO78wMxdven;43#p@`6t7E`o}#zu5@`Jg3I9VGqu$G@$Q_ui}j3nIPr?^r`q9gHkL zGhLlOGqExO+jJ4w-=uD->2n9iUtT>Es_Ey}-oCJPaB9vX$SO>Xwt=dHOl*#MRe?MW z(}|Q$4K&fUhDoWfIB6r*%uveBWTBN24drjpbKBlj`!A1{4qOo|h#5bh-T+U8zFSI; zk%g{e_$0Q{7|NGk`hd&qba>f$L!=r=t~3^kJ$-oOw1vZl?h)9H__JvK%w!K0I&O;E z`dzEO{{$Ml&f_b|QOJ?AJLG^2YAwE}x_N@9?$%rAP=6k|1>gQDg@i0iNPZw8l0cG- zeH+|JOw5BP;1BS_HiJVTzlMm4CS-~%Uhd-wWsr7(AEd(ooSMG<0AXCqs0PnK*o4kj zBf`KSK{WX$rhssG@7Ufvx#8z6Ruu*GMq`ear#xt(ZjUuYdbcK0%_JbH#?h;Fh?HgI zn&Elm*mHGpX#U`($y^Z0F_%<=C{JZ}a^<-JwysUAT`A_~CP_y5x1H-MiB2Lc?xB8$ zj||@eO)F@kOF2sxjB6gDs4xJ>CkxdbVI9PydYG3c;^NBf5al9h8S z!DAg&$tl$1qmXm@hPRfFj||#2QYEM_%?$cMQ>0Eug+TG zK$BfEdChsubOSX;0_;@k$b)HiW>6%vVnzp?)}p4$)$)xNk$#bpR1@iyCO~@S)tCOi zN=zm+&dt|qg%{_q?abJdB22=y&n-S%KWlMk=Qy{0w!T9CZA68bwXV&)g}|E=Ew^Mx zENX{~Qjx<&fxIOu7|ig5!UaP~e33AL0*}}+gxsHU&be(|1`UmZA5}*)AqzizN~Kdo zFptCt;03pDu1>u`LWuOx2f(9;osOj4+5?AbF0DEHc>yWk?7?4i_B;>X{Ji?kU>ghHE_j@V08)EzO$XUN?=lhzkp>hMs$65pxaAtrzTudtr5m00~aQ)o|NtsCR{-im3fhw})!t2NIOuy#(K#1*pAlft_1$J>nb6)SYg;E06jJzDFF-YCtdDU?{omJU>vSu)smqx?{^fp|U`VV!V96ymll6c-H&OHs2o zN3q(8c9SpP0CQoH=0e>;1*tv!21ji}v9rS#O*5BOs+sL~;UaXsCQ?SyD-DYhlD<;~ z)~dqsfm-3YGfR_$%dl4KW)`YWF8rWY;vKwjX=-pwrc8p`5N}V-gb)>;htuykcSuBY zh^gpqjRg-`OfB^R;kzJHsOIaZ7=~^PHWSBLR`|$FbDLlk2N-xrx6|7QH|ifV=$16_82!{}-HyS+QNYA+jOxVL zAxto6jr_)wa)BH9GK!?|ax1nAYJ}k$h2uQaD!c{QHzSmPqv#_hg%?q{GA7h?_|_-| z9dx53QecQfF>-(%LEwxFYul*gLO` zN)`Um(@O`Wp%ez2CVTW;R$F|!j-rlUy12O}n0RZ#$6y`oj9^@oZXcnoXpTZuICse8 z$O6@e5wQez{Dv0z6dT_Jju5B)fku3FRv5!+i@^dZDuAU3Qt_5)F{twLa@|egKgi1# z*t)Yv%6<@y05g{W11C6fg!xb*k{yfG4VgX$4XFLQ1a{qv-SwM(4M=Vtxm6O-L-Y@j zu-hf&!sDHq2PE_yvId91&YAJLdz)xafn8^S(k+|oirPFpLe%BBQSaE>PwoOp?M5VQ z&Mb6;Bf=1-*`pv^OMhYq!WK6&b5d2afeGGJEC{2~Um;SV{9BO%WZMfg-_|RScGOVF zH9BhyD@XZ6I!j)r?qJ3^1X%ruhI09Wn6j))-gx)jmun>5yj}z8=8ZaSm-2S63|LhE zxBkyRhU?z|NC&l!ke*tc)G7*Yp?OoBb)W$Ht;DV|?dZtCyi^1*DYOk7 zcNg%Ys6>YrND6I~a+rdMnB33BpVnNXz&TQC3rW!Go7g$G@D|S3hqe`+#3fb(zO-@& zA+u6)XD^D8%6TLzRBrM@Gmz^4`a>jcn*F!w$~`AH*+lNPcy#haf`HOfBMblkH9Lgtike#i~eDGCyQeloOxnETw#Ukr%!ZWAI6Vx&R9G(gfT z0~|HSUj$JMY`QWr*UXies#NgST2u=vy64!!_opjey12SIRrY(M9yE0km3*Uuq#ptp zQR+FYBvC~X>S<(DY@2zKY5||Dk;S$264oV%(romHC83>A?T5J_StbXKOFB%P$Hjnu z0BexrORuFuLPAtr4`Ta{0Ze`%Aw&X^JUnuET>EP+{(%=B!y(8i+~Dl)2lhD*?R9lE z@ssT{4yoHj_1o!>d{Nf^-^C-ZmxE`Qc1B_jF9#=&eC02^yeE$i-|^w@8yZN)TFed$ zP$~_Y_yrCHd7D8-3%R)@FA&?OsuU7H;ER}qy|{)TT{Av0>q!lsZ z7Wh3!GslSD+;oA<9j`{Y4gXUO33=w@v%R^&%Dk;W(R}8bgq!f>7SUkbCNQ#3fe|zU zuyGn`MT`JJlyAOg!Bw~W0$?5F1rkCe?3+XIXyhLA2$XQ1vmej!POebS3n_c2Hb8oy zCy=_O3t#cbzL8YIrg^A0kA7EM&hX?r^uh7Skp;v>Ia;N-+ASIJ11So2j1WZ#ogk*~ zNbrvk6_>qDML8|jdZMyelZX@wmLi@=Pf<3f+NLNx1*t4}+r&yJwi7Z^6lcn%_n7{? zLxaU3qL4VNHn35+5!msk?QUESQw^mh>iG2=HwH+Wcze)|)PzV0NrSeeOr)3Q3gM@( zEl=03sizmN2o>L2g2EGL7HUDmx29?yZO@o+XaHP`4I2#PdJOx>ShM2`ZNV^GL*)dd zLffUK3W+My~+vTx(+(@Fq1St|!kX&h}iqL!sZyN~6I7mX1=B=3>7M``27$BE%?A1VvK~s=NbUiIYIDS^BOs?gOXWipelapump0yYhKi zr3}70Mo4}l<>RB>(}KJ_+IzPGj6c9|$0t2_gw&opl=Cezi-*XwM-LLya@}(#9)AMs zPOaYlyNc1g4Y7EpGe9}qztrT?VY_&Q#qPIqOVhbZ$w@UdkVLHkHiWdMg~xCtFhvs4 zkPmSd^uMiUo)FnCQnZ`L)Wn>Vlj+041g<+pdXuMf&e zzig27>I-v{&pWd^M=JH4z8_+=-75mTJUZ-Zb;9f2C+%j(E_l@6`*q(VmPc+N z)!B60MQ=3)+EU1+2T2!Y2m2_E!b6_#W@n2AmB23gypsuRI_V%_svc^^ea0=^sKpdJ zCbSc|)w7YF^HOvE3Z6G&;vaK!mBDfGmCo^n62uY^4>mYxN*6c{>LRjKSB$RDMV)D6 z-#6Z_fIc)64(dna z2ahB%fLK8__3i;gLIWwIyQWZ6OokYqDmC&#`L}9RqYpp>5*+^`a3)fohjI69K?4wn z$Ou783jNTDZ0A|f@f-3%wZnX|j8YJ8y-dDpu8+M^qGBNg*&ZRh3m`pUi1c7LdOQ^v ze_N1Wd*Ge>+PMgx^x%6*zhX0y{sDCaLq~v;a{60)0mUiIgU>9Y#tsZH3vQc3?Sxp|-b3 z6jMaa=9PmjE488@D}pvA+2HAetYEq@&0;$NLA42hKqN!kJ03!xoNiP(S{wUfprQ?Y z5E0&HvpG+$-q@ZTJbkT6-aWY|%wCq}*7oc3)H!?c51`3cbRNPrHjyom5=FG*6d?ty4Ff_$*J7~uvI7E!Z=(nb(0*D z-Tyf(AdlxJ(r3Ip@|nFks!9%?`CVR}D)d9$i}y~N4jlQzBMYP;%FG-c;gJ5v22r$k zwu=_?#BINb6|+%nXU9cg@K7!sVZ#VgiUq+E1J4Ng2et@WT}AB#B(?Pel$Wrj)`vYO zt5If-A8HygVu7qOxE$OQlwT>NI&vg$?W3F5PBa&AV(rE+ZoEA`9Z&7qBQffp`fta_ z-@Wn9TUTFy^UXJEA~pG>$<^)2BMp&GE*_neJ#`9VcIM2&>1WPY)y!z!e>A={E4UrF zXdC%AC2RtXz~Jdh4T=_AYGB`&=m@ zxequRl1Bj2fgC}o!fZQ)h{k>+ndLqRgG^Y2G5wXcDLP`am$GfAROg?o zl?^8s=Fc2EHov%V`fQ!9lLHeM*Eh%3rrDm%*-lKiQ301#2$JqjA!dsRDRU%*s84g= zkzzZ=Nh3UwNN`m_2v(w=EFuVPnH_*(epG-rq8w=ju^ix`#+98$hxumk9sns2L*y}? zE2^jqDndbYK|!G5Z;b6$HZXIywG|1X>i$K3?q_nw@9Bxrbj#CW2R_>=c~-<}Y+y3umAFqqUzHvt3>5iy#BXu44@TQ~LMNOO zxfQo)!U>c8K%q<{d0laXJ*S> zE9~Gxw1%jfQ)eQ1R};9(fiP9HDJX@=yEHMXaTrojfs(Ae#BO?USU`A`wjZd=Fd?b< z?WJ?JQ#)-Xw4#@(?2nSJC1`RAN>zO+Q?@6!SJpSiHaBZ3t!|8MY;KN^ZynqoU*B9` zUERL6vUTmiY~GRg?5SoZ2ChO^>pIe#Pp00`*|F8}i34>Y6`ppUlBS>6;(027qH93P@$L*jz+DUhKikqFxC+CCov|N9qL%8ia$+Z3 zQqfg}`ZXa$_dHI25UU6-RRk4WG^+U;@*Iq$6@v^9X|532(n<}qYTp42Xi>N^6 zC;2o&ms7nmC)YN|SJ&6Ku1(EM)$vgcrp>XMP^&M$y!!I$`o`82lx;PUs+p;`uU>!W z&0BB0{`Hh4o7-C5*czRjMZZKiaeR4aY<+!meCG<Dlet|0U+DLZtb@u0CgftG?YW9@^eG^A479qF~)yH%;;q7jBG!Gma!?Y|7vaZP5n z2(DitK(0x>wUJp{^wtzxZ#`ozqot%a2)kE`x!rTet-1C5rJeyNSpy+O}c|A^NRuu zMr$MfrnbkHuN>c#r~h59 z%Sl?DntAfcxz9iOohP#-;?%@yEvDNZU7kF5u5P1U+S%G%U*D+9N3$uwvzD|*{@vVj z5@k>iAiOzHB18f|4wk8jz*en-=n8U3cA+3(fH(PsyscUk;|Z}Gu;(Y&v{raP`1i~VqfgV_06)jqzp?;BtE%>J)G`fX(M`XXug zdm5JhPi6s$QrdEDcY*a zY@=cun9!_`He3jiququyQq^aME}&u|5t$$~-r(XmL>(uB&<}G9Q}t?2Pc%m@y*qpE2kue%QI1`JjxWfy^$Cuddbz8Xwyl8?Q5e>jNH5oNF*_ z%u0=xL_-#Q+fX$W_X_C071M%RgFFxkq2|xLYRDUX&{Q-sl)#w!V0L`;6(^ZIXu~fq zF97Z-;Dx|Kr7}Q*VWP>$1rRtwv`-a28vrzCo3YL}Rw)58JYDmG07$z&dLileM-Lof zGC6u-zf|4xW8ePW21LJS(f-}T$fnYbkPSN^J!M!^j!W&&;Y+2 zKq*_Y%>0MrMlNI_5~s}5R=ipx%k4AgSFGw5?!k zUKY47snTCYlh`h8n}!zF{{{IO2qFSIoj|CVum?n{2&?nFQY#URP;r?H3MayaGG}RYFOGMSz=w>r5d@)0A5xoWYPtOue;`ES7jTK@Xx3;gtxS z;6Wk0FCol%7owRqQqF*6E?%CaPJC#V!g+~xEqvo4B6FJpz!LZpr_nSq!^^Jk@xX&M zmg?j8X!52UA57)5zPNb8Y8Sp;eNabI%eAFvhjw))$LoK807x98XH~bsioeUy85L1Q z6t+hsc4F}&E2g>6b=QD?)|a158sA6 z9vtR)=$;*SdxzVuu6})J=tgHl##kH<`zBe6th#Ux2s~3{vbpgGPtCNnTZ2V-rlC8U#-SEp` zj?8gBR^+{<{7pB9ajmsOcdX=^>~CjOm6nBWS%`G^?Z*?ow^ak!sRu_@(hC`v9pQ`= z4@RMkwUWhfZ5<{c@m=H8aIcrTJ`c?sg7=fGyFyabFlt951<{YA2`^0La(j9Vd^NnV zfrQp!4U`p%)2sWX%%8(0+vx%oHB=wgv9zC6vt2O?BTPLe53%uD%4CZc8+q5x!SzN% zt+Vr#esRGTzrl%A(?OCB*;82PkisKRnqOK@voO`wTbxtrY^xI>$Buyz2C1V7xP86l zwLMdnk*+9sDuqkw{3r?h=H}=zED`XSdE&~lwNZ=_%V%v$L3>T;^1U+r!fj2RMTd5? z@Pq!1!r;g`BI7khLQ)D7>UaO@Upye)>q|e{bBtdc_j(dQ`iSVBY+K97LNb)!7&};f z*`6>9cM_>(cbBXQGax47(T@n211G>Um8I$Tz*%m>aO`7`g&I+fNM^eX#T=n=i9)T# zM^Kb$ZFwW$;sxw})TBC4bTOu=Jsl(B7Flt}#;dNbv`sa!PTr%fPP9 zWI@)jHvQY$6DE8R+$C)!bRE`waH=IfV_!3WBjz_0LZWCfsl<4EW_>BLN$t!=^A(tDWZbIKj|% z|F#tV5)+)Os0^&bVE`cw_TZ5OeY)CR-LqvgscWwT!4q?}9}V2D^zZHeGc@w*IspU2 zq^W)?nA<^rlQ8g8nr%za_7@w@+6o{G#1_}2(pERJZ1c25^(uxzOPCxSa6;_mVH$M4X{|w&gs+-$Wn;s{Qoy_r(+Fr> zKGy6*LWGqaMzY*%_I!GC>qRhhSU@9391%h;40wRX>XvQ23P{yP{W)T*mO4tnDI_Cb zJ*`udB;t+cHASr(D{pTDh_2+h&mm;{Xsl+IZS@pxrXj1%Z>*6z!Ahr`(;<|i zBHQKCY~P#?6O)DITKzj|5iBgt=w6GAz*xmNC_JijE!mT(H^8L=F!o7#H`!zmP43*Y zP5i`pKL9Sv+r?1r2qfQRyAGN+bdQ}d)r<2ul}(6qmu?B)brusg5|S_(LPVBh&T>Qe z&RQ!o_?1&JSx*K?_xkEL?@`k8wd|N7;g_bM9$1^pE#U-{oMNYdlTo(EKe{Ccqo5+H z)wv~Et@hqWw0+w6d+ol=AcYudMAX0lN%pMLIL%(YyTP>%*m76$vd`IvoHW9qtoO2Z zsJ6EZi0)LM;+UEbkp_N69n|(&X;L%D(k=y{7aStqLvou+NI@mb#wMH?d_8LF?McLz=T^)%I=v}s@2nB9xY$Z zPuZ8HuE-RaVD9*P*yFW=2;c&49|eauIat#wHe_lV`5_!mnJ~B#kd!cr`$IJRtlf0P zn|jb=_-jc)gAkx?{AqVxLqB}DJ+NJTW^CtnNW%axKC*Jxru>y{t$C9*gztj3UR|ir z(#pGey*}WbM2&UqRsGLQQl=Sbc$Q`$8$1N6Se(Tr5_g-xowJ%?(KayFZxTEhMaZ%)|)V+n>0THE%cQOcum3 zb%_i*#m?<>KLq7v^Gb>{YdzEsHFX_j?4zzHGkTEsW&}HTm37q?W*PJ8NGgNbrTEwIBw3(MdXZ~QhFZ~TDx^SDU`f!n z-6fZN37XTWnu<$7Svec=@XoS0y;2_Y2V*-YA4+-XKy^CUr}MC{DqB8mlE70T<}Q_z2wc8J6zc%(N+GNy^}7QzrV%!m92dk=w@GRO~k6b1pA zD#@$W<%fD@4$c(19JtsE9MvXl{|frjf)qsX$B z%Mr0lujDWFD#4?}gD_x44o(8IDIs)YS7+r`^IXWJ?kmDVva@L?IhErt$o& zq=tKae~prU{{7=+t{afDm8JNv5far_!*^OBq}O&*v(|X7RLXYf;b``@Yn;JX(2*T+~$*770j) z$SG_Cf9;c_fHQU2b89LK+1kx1hwa=z2Rw4dQycQVQAD1zmDdQ1bfzQ;IB9c1q`Y!i zU?WC0Ul_j@>FV=6YjG;e`}*!VSGNP@2Hk5t=wL^iFwUdHwY2XDb6USGOYm5*D0#Yd zukZfkg%?Tp3@LmJ1mPAdfgm|Swv$s_pYCsw7;zrVrn0|lxvy~-3Pggk0EDbC2ragQ zcPO|6~6yD0@KJPK={VIkG5T zsnC0_&Dh(LL3oE-GX8QzkTXZ)P$D5*KK6Bh{ylR(!*OW!)fSgq774rR^qsv9*b38gcGU{f%x88R8xLBO2b;9cZVIE{$N z3>|o~mrsLa&97`QyHdM7}XDn)5iNe zeTEc{spW~HXl3GgcHwOE%s~7}kYuQU7n|K$(=2c+ooa7Ddg}Gk4I_EiNb0LS zsjD*?+;%u;cR_KBJeoe?r&|jslycIsoU^Vo8HI^!<%4lnYU3k139bhSO3i0koBnNO zbBbSEMxtJ1yP6tl$}vpo(yIJ=9;5#;F57E+tS)$n>IpUveX)Yw##^}`$adJyEy6dp za|4ntm}*XfSv$?TQ$FVDGQ!|Mg)3#!Zv!2~N+Cd^MHqd!kqNj4K*Abvkm^xmYo~!X zbU6t1o{M5JSCZ&)g6#~_0+8Aqw>`UHAIM*H+u^#k$+w6;jxg4Q&ey@*J

    (#NMMJTGKRrYwHIZp*G}5>lDxMh+d|HQWDkX`-D1IXW7sj}JCPJ(sSw2gE;8&4tEkI0n z_Lb<-%BP9+qPip=zXpUYB|TB?55DoicL^ZfMba0J7q{*_BnB>Xw4$b)4GBsvkWs#R z?R0}zTC66oQNLTs!mU>H-Q*CAq~sC39N^EA_;*rwRjwYlSJ89=bKEE;&G zmWz=$``KA_#eif?=`xOo?+34Lsv%k3nDAh3W3)==1 zxr=0`adWWys#`j{T?pkPfG0n7wmG3%aQv$glsK^!3)E0oAQ&ef1V~7Ewf&?l zr^d}X4yV=l2qNiRN}UAbmhC<#S~uFw~Y*=2X~?t(!p_d-_ncrZVMx z9g&X5B!fZwf{t1#ay4F=>DFdP~P-X+r|LP?a^sDrL;BW`DR&ON>59C!BY+v z@4T<9-gL^KNu~<-TpxVnts9WuMMcsNkC(GsX z6<;_gd9$Ta%52CIWzyt5@YwGSm}p>#+S`98$%hQt8#v2SVv+5_yw%(8v%}cb2A_jypAKU zh^Dtkjbu}4wobZqSMdMBw7eCDtlWB+c}c2 zU3*M)1HyZ^HjdPC<_33UE4@>XvS6g0$Lh7&ViX`DhosUY!#q?&N?P1{be6jZ;JJ17N~x-4&zuGG8wfk*DHYtXhs$yG~`OpN zGoQ(^3$j`t|L)(xToYOJT^-DU_M@#-*_IQK0J$1`h8kzDP^=g*?b?~VdH4`=*6GP} z*`hIrmsF@_AB`lgtwHV2qCd=#>bTd_udke%q%R*YV%>oB`~TnJHEfd>dx5%mZ1si= z@7c?CGG#t)9h=g9;t9N}^9u*DkEHNaXDY{2McCbt?N&Vlt*i(Rjhwpq0ytB9{VI9v zxPNmMydXrl-=Q_4)KYq_(j#*xook+GNkYX%>mI=LoPUcZ3>QoHs~DJ zGEzO*N__SNzxa74@fU(igW|gOg*R<@Q!1< zjim0_hWG5n5td{Z32nvfIiLEohd1!D@JvilN^Jy8v$>mm*u}+`+=`%r+?`~mV!)Y& zfz0`~X(JI8(8zjJoqnt_*{~g{*gzFj4TeqF*r+)_HO2zfZV$X}rlFiTs6q8*24ryI zLi6sXu1Ic%HvJomJG}7z{r`L8ck567%b^CW=*|Y*@@|x%(|UAQ`bwPC5ChL}P@vf7 z4aIhFju!J|d^8r+4qm&%4}=oS)yk?8tX!-vv?lD%kbLN_Q4ot)8=S(Hce5${mfTN z_W9A?$e^xBd`R}Ag`tl(yTD>@HMnoZjOV|;@rU(S+}icos{n>%}dYRPKt#-0__P7sqnA1b#csa=^Pa^5pBtBZfbHy}Mv1@ws@{P=H^ zOz~pY4M>j?k?Pt^U_nk|UII$E7bmV-vXyxbd$TUmW#L7XT<1Pzn?BG$n5=oWMZk1| z5CCGgTR5i9Nz3L$hcq71>}Jy!p5!(zdb*XrQ+bVhxFG)RZ%-!UNcOy4k2F}U>vfY3`b2io1{2NnNQWW0 z$E2%nKzbf0)t~=0($^n<;fH|L%USmf>47lSY3n^E44Z~FyCoPl8%;{++hzh1j+yF` z_ak!64+a&sJ#%~QNV3zGdR0TuH$G)>`?d+(DqZUVpM{pqN>}QU(+)(hPV_j4`oeX3E zW1n6PTKG>+K>B|Za_f_5!p#ZuxeJ>`SD&8zu2kNc6c&9o4?QqS6_e(tc_(NJWfgg~ z$ijf$v*4>%5>Ds_rXLZf>P*Mb4Mm+?#8lOqoume$Q?0sscV{-Qf_%G*{Cq(NwS@>X96YA5?|Y*Wwn^<86h$zLz`@8 zZ`N6UD_RUG?X+`5Mfg0n5jlFZi8?K0PO8u%?)|n9d?Y{=CUYeKP4M>PQt~6YC9qbsk zXw`rc5Vv3vFh{m>BF{EayS0NMbr-pDv}`h{z+0Jb1|%~wvIvje1h`}>D5{oPajjG3 z8Lw4$tmhH`KxH7&|*$ul~LEE{lgPvrar0#d%Ed1d2KYQ=-!qz=Qx=8zp3!glj z$MA=1o{?q`H~ixj3X8r`4yso5qg83^`dlwtl3MJvuC2_kZPD$WRPFG^yKbYRYjh%; zSImJj*u0h9Ir*@)Mo7>L1A>+HpY52s!!T@9-PD}3y4Dnm?Rg5BFtu*%6clBNT7q1- zdwalzkNCySROH*-AZuOM)<*`A{-3>Hs9txc1augNy|H)VLiE-YE1>7!f8-;-w)EiE zC%xJR%zt`vkuw;rH=20SkK`$&IFjK!Djey%p3gey2Bde0J^Iw|fA*u}g{`|~S0U}! z#lcz}i><0M%*~d%+Y^U?W^)GPJz$UphAR+b7!B@!|7*?9PiOAJfVABOQ#u#`ZIY^! zc$T39+9KE1BK=8}9CNB}>q@p0cGx@c!8uZo`j74Zd?3gFw9_pBlk*{1Xh$3NuHAgt zoBk~=usfY~IxUFeJRuY@^)wS+j(u71njm+i)JE!oyz=yPJnmfR%G zp*(a;rh4?blg`w06ChFb!Y|-69|`b)1R=f%Pyr;tudsaEv`!!2Ke7# zs(A{~&ou(2K z0S84IP3<^kRwjXl5s}q@SV*C!41h%BvG0FM)5{-lf}|jq6D!CFNsEZPWk=wG7&|f3 zXxcww6bK`MQGucqe;_+WAiyKCFsCLgh<0tR8cmI=nxIKy6Cm(KqcK_QaKfOKq z>Fsg*HsD1)zE?o8XT6@ot5z{w`j#%Dez%dvMP$^Lm4verwi z#H76_1XfSfOz5AD@^{LLUKb2O2o;_ zjXloq)&$}~(nE)HccWJ5>Qcc6!gMlz1n7)?$Moe^0agmlLxuWp^Gi}Ijy3uCdw^%HivT;^}x|PNIVgl5cd^0hnYbL#h_tJz?fBWvLXzD}aafj>uMtSjQca zNjOB7bbE|d2$w8>mym=It<-%B&(PerrQk!d(~}7iA%oeeIIsix)HqPD<<*`ZSvGsR4pb9zd%-%jXYWAycfTQ7 zTmvV1W`MK@|8si?RCk~S4AKMf>pN*Jv>1;lR=y#>yDj%Pp&gWVVisARX2kpOzXCLA zqNE%n{1OR$Ux(?=u`Y7QO^KLtpI&wy`uqgu{IVIgeFUufvI~X3s{iqKK&$^mH2MXV z?gNk*e_ixz7?m;`4q0V)s}>7R^dIX(NyX}62_a0ygo9W!rx_jGVwu>sya<(n(-`I_0CUdgpglEu zdrARBqYDM`E2bu0j{&)(Y2bXMLc;nTFC2um?{Q5d9ZK}~`7mqbu}kGCanp9n9Nddf zgewpgkROF~0?(8bfV34x*K_y>-A|1JMx|Y$yb3mpc7){zRfk)j-p;-zN-p9E+R?Ia zK+!KaG<DD2)XqAF3J~Pgv0p5;hIeDOQf?yh;;H znj?sx%g<99M}zfH>vgF8j2}=Eb_^CyfHp3Tc4W&MZqR3HNQzm9D^FJVj3ZMem}gEW zAY{KLFOCUE*j@rg@T_U?YOys-r3Z;GuP3$>6M?DeoYj%fgR}}K+p0W&VG?Lk4lne1 z9~f@ndnx~pyGNUP@wquVwF97sB%lHvNiGJgp;M+o=El<@AZWMxWC7u->fyh2A4dS-%dCrMS7u`=^0d!|BLQNR6Kn4*=Nh@gZDch95|L%#D)6W?U_E5LVWe~=`*~H z_m4Opk_brZh~Ynb6JkS?z6vCerev%ZF^WTL zB|a-iJo<=ft|>Va#cv9}{n{ed$)rRf5y{T5&~SV5X=PhaIlEVC;d!@Eg%O1r{Q=S` zYmwHaL`!Z?!lnEhb_{!km0gRNMuEeWT(Kq-i7x3-g{aBDZ9=X^?JyR0vf(Ma0tlrb z1_cWtLCF)@sfLZh4auAxP8%S8%s3#qABe^YS^bH8Y6#7ni58rr`jeSAdtEgB2E>_+ zF)k!>J^>(&S(y7Mi5{*l`szWeS!>^Hanh&#D`_uY3N z$`1bYr$2qT^WRtO{P)#6m+th3>h1YEJpK1SlqY&$WvfBby*R5bhuDAq?7u$w5_HM_ z`georQ4HDd^kdJSKK*L>(E85FS8?y~os*|ecWZarB@S@=^Dn-r>?|PJveNo90cDm~ z`mAC3Po6X(QoFfouvf_`BzmY+$gHDJ3=%Z=+8lvZJXe`dL)hr9jS`7Kzm}kBKwMF_ z8d2NK;6(IDxPAGd0x265e7HL%2NG>*!itln+X8XPih4OB9&`i|k9<3um#=OtwL9n~ zO7GcMkRhaSdH}s5Uy={!#Yxv|I(I-4 zoS|YiPjqm{CkDY2P{qtBg^?LbhW=C!0@5ZCM^y@#_J^l?dlA+jJ6pY{nBu(H3&1;4 z=m^FfQ3&^E4QUh{A^Frr;O~ht?a&i9?i_q1V}IrR`p&l_b^>Et?sT_&eg&1@+>`9P zvVHwu|K_*9`^}Sq^UI%9zzBfQ038U^PnY)%!LwbQJ_02gSGrq8zHxB+?2FF>Q+oWf z84|&R6BSr5h%JiB(g7>UwDv*Ko<>1}b7^pzvI2}a2|9l9G{(coetmPF;r^0JSjG-E(Iu(ZVOIOC53q zB$3b_3$k(Gc%hC6KCnRxeOngTMfrC@>EIP^>XVeRJoiW0;~9Y)DW{~1Iv`2EQpeE_ zsjxmNEG!GzoNlB0))%N@LKcfnY`pc9AHlWMx2%6Dss{;jd_5_t+UeLEt?$%TcH`ja zThajr-@$-@^D8@lXa@wp{dnl{+c!hcfQ$a=PxlTQ+psq{DAT^3NiSG zHqSXAQjV2<6%ppM(5G^4wFimRfz={VGZmrCSuEb-Qap_10>4s8kRiu|nYG>|Yj)Gg zYbP#ez6cn!*xCx!q2>-TMNM`5%u?juISYEf^@k!ZO9)l2dF9?Rj>FPvCOoW$ENk zrk7G4ZAxjkOB2n>WzI>sR9A^Jc+z*A8L^at6}CE@!E019rvf=6+X+>p!}mrw*G`c< z)g)RZYqwe5@YK+Nxl9`llrl1iq?h6nvz$z9=h%0-hrcJ|Ig!l7pM7^B1tOt;2ND!y zxsLqFbiysV#Sus|lp*v-J z2W7kE2_bMPxQ}z~CwLYBX=5lLeOo~z1mEwQ6K;2nfP#H(fN<9ihYiq!og?#oKT=cc zL(*78dR&AHpMCxHKd%I&-!|hB;AlBaH&7Yu-EHF{z_SKGX~&~+MJ=qpvbTLFpXo_N zq~UOJhhXR~5e$ zfQyq;b5`mbK+QtDL=6IwPX!&eb_hcI{u#sRTDSj^oQ6!7=aHsRX5_p)MIBzN^u^Zl zN37PY>nOH^<18=^+Xg9vOFSk%)q{rWqOS86mTGM7VeV-scX|@`2-KnkBxtVP0o@&J zvk$HpTNEF~g_@5RpRU+mVjf9Q=nKG8Y0picB6SR;r8pa{NuayaX-2rjs-V&YL)!eA z={mv@@#yV+6P&w7{q>iB`47MU^?>sF6T(qjK#zFBy*PF_p5e&d z+L7&2yr*OFs9ieo)0x4?RYYoMNP#*;m$JmELGF-kLSnbP%uz=G#RE71hwgct28=+c zyblc)RbG%ln3fI^i7q@;hKpF=4)zoQfdT_!$CQX&N3Ho)MX?T1I4209NTg(eCh)_? z`if#gTy5|wS;R|u&Fe~(xMjmUl1-uD8(LCB;(s4^+~E)p6Q(wqJCem`rAjoih!Uv;|E`$rc1!!6UT_PeD&Nyb!fg zh`UgL$fZf-2;h6&n&jZ(m9^oPwdX{7C{6&XGsV5)pE|!H%cJ5X2c&ft_p+}8znQ|s zObLd0%GnEer_6#-%D103Y#V#(B|ighw}oH@GKt`8LzQ6DhZ|PnRHLentk^Pp)YWVa z10?^LZ^WZ|L~dhE*w@HK8VL?=--btbjpZ*QUppSvNYc0C2O|Ay6@6;7SABxuSz|3D z@MzMGNA+}Hdls!q+N$T{nOg8@+|DPUr^5j$3$Y~UVEQ7Jwtf6o4aU}4A-1FT+R8gA z3uz8!yP?(t0%Mi?eIF2zOm5zbvx9o|IG^DUB^`OpA`)_fISZKH6aoydeIb|^Btfg^)jy2Bkz|7$Ch_m0Y?*NFJf1emfYC zM!;bM^t^4)_ubW3-^#Dv+)L69%I+0?v%jtaO;hLuN;Je?99qq|weqWax^J97u3aBk z$D?Os^?jcYMI-={c<2exQ?Y7rl>@TaE7ExDo`+#g$L;f3P!XdTx!M@xDLOxW`P|!U=0DkJn~Jf;NbbPO!7V(gpeK- z$$M;KEyWg*_`cNT-!Awr4~72(JnHeIiS(?Rg!wVC@NIc6cMDWI+DsJ6Et6(3MuivY zfOOMWu#jxgH=L$Lno{I=1w0OqF4nKFuC{KcH-~V`UA-;VcXG;^vVTjZ`eIcf_)fH@ zU3R?-wJe>eXY!D!48!CJs}jdT3R89;ACE{Tn(?Tm_Z4{b{kX2A;RT@ac;v_aX?)U- zRt-PeH~XvMc*HbVD<0L4ZRJ-0l<}-(aKa7yTBYgF2Hn1Uv7ZbdVa_N-JhI#!fl3^v zyzs9APa6DIuV-@xlJ;fPA|73mtE$%}%vvfc1PjJER#l@kUA4bZg)+eiiVD85%c6QT z1dB63%3`TLhG4i(Tp5GN$zP8KLH1PGfi*9CwoQ4VrUWcWOoQnVKk-ni6x$`kDG~Vo zP^ius90gwdMfd^a$K&w@{cMLP8YPO{7+AZ-8oJmAgM zPGXI1dAr=YIGQEq<>DYMw8b(r4DeJ1^a2?{vFY&56f3kGi8Fs$X^X~Ht|S};NW`O- z=-Un`$b+?=UQf+6-o9NA@pgp34yO+=QF?O^NxQ#X&5#H+pS7^8C#@*q`yzDJkA2pt zh5NX8)NrRNm1@VMRnPRq-)uV|#XOCpM*txyGXQLSj{=XZ&du1Mp5+mo?~3qp`qBrh z$~8(aXshJ(02PSsqW@+R8&sew&T;4oo>(&gvboSZ&K=LJgj<=5-9zOmwbW?#tk@x$ z#uEjQ%$9T~XSI{in^a(kh)qWUC>7%?08T>wopSPLg2pZOGD&Ft4Q!9jKWI?&G~XZZf6A`O$D~=Vfmz`Dmo#|YIYRrH?Soe ze`V>x_+Q+-(!q|UFq|aqDLW@ft$2js+XOQ;F3~IvZ@WEqwyu6G9@VMTit5=PM^hxs zDgVp*-{ANC`xb)K7Q5?sw7UwAnhl3`9~h77Qo!;0zE6Ni!x$2kR~q9EGx6+cp`hF6 zw+4|o@@PEFztUOF!1cXnK9}syWy+Fz(sQMLE(pGHW~r|bf9!gE3<9fSh@fn}IzK7j@c{b>*x7h_{^{ifz5!f#mc6xAeZ~{;hO?4b={b^|Xc6V;O<{qvCr)Z2 zdLKC+aXYO8(jM3qPPaFJ*&7n-aT19jBA^3he4v!i^ zt{snXXd`J_t?z@Z7zo zhNr+)6|r{V-|`T8$(%73F`1>bc%de&amX+R#pL?aU$DfoW6THiXWInmq$o*>|{-SBnzCFHA$qFVN6KBk=sB=U;Gj zqvECbR9>Aov~mjF@w?*H<_*d&>#mYZwZX1<^W_QU(b3j)orf4Bi2y6I>lXYB*~gI!v}UAxRkLqD;smls)`Ylj6G}_A|)0 zfX9$1ANRUOq8X3s1f`*V)qqFi^?mIWiGb8M@Tb8HCr! z!A#OJ#?5Gq6Nc`kT)a=|G^(_bnO}WSm>?i|$5WP#A~5LSM?=s&6@@aUbFJ6gKG5l1<_c)vVjF7xtmo_Rg`V0Aai zge>@QfCJL$YGwEJ$(}Vf9wfik!iIBjnJl_Oq9z|ngI2K(H<})=2-z}p^6#u zc}tFrYhADN2!eDs(l{G?>v+`Cd|F3@I!3lZ4=}FL>+bCsJn|=H8ENuRTV16GKxp6s zT}ZDdw;CIe>UdP;B2CBxm}ppkxl`jx+pUdwD3f-d47Cmd#4KA!Q!VStIZ>M-Asn5@ zHcG-43rvdm>divg4L%c;#}}rBhKft1r->F*T{+j`cJ;_IO*pBGn`ebTb?l%QG zI^ryI%yoKUvKURyS~$+66w4H_3}7Xb=ArH^i+nQhD4Sw3^z8-|;=KYOZKMH!)LiFp zU?{+edab3E;NVs*c+{vDA7Su-Yu^1FfAeoYV$QDJdZ?k+gME#H@24XgkeXEn5zZP6 zVQY74pi(3C6re~00V(1Lf75HlJZ~)F2K`fO7W*78ptOd?f$vY7_Kx0Um?|Q+8?3-QwF|~{3pm6@cEn99;&g;!b?#G# zUKt1ik9LTjYaN!kvum|)bZcG&>w+X7hRe>T7du5lCj_KpnM_{1>Jvq!*p7;9is19? z^h5j;y#k~cZ03mc?b_o|b9RF>cJ<`c?s&m>9XfWKG<)q{;H}t!JJqi`5|83Dp&4ly zAZ7O8bE<<^mBeGQM#Zs&c6kA!WbvF;@@P`50@yPTncIPdL`Oi;o?kQ~YR(A9we8r) z3nUnoB2uc#o`V=IcxbFWUkbi2_iXJxH}Gtxg7v9l$OVOmHlawfBJ5roAcRYw&$CeT zzB12swgW|Uk$_LSS2REG0H}bvf$O&DxXx|6t%B`QzQ~QhMH!p zR<}lZ2GByp2~x*~;2}ymOYPDTx>0cTz_`V~^4Q~hj_8|cYj%(uu-woKYTR(jS z9__XO!eBfC6&ulj#Ag~Pv?CyCCjKa$=fq=%myDxxfcBWtS)R81vb|D_DtZn~HZ3TM$b17p%DTRs zAY*|p2i96}Q2Qj2g(E=qsfKy+v(>AcBg`lBFIP4)9UEg2&FT3GUh9or^wj>DqH9OW zz0NPBgE{D45;DVy(gXz_k*|ZR`N(?LscW&zfMF02N%)}6fP`rIrp;*dhmVd&Opdjh zMpUhZK91nKLZzBs$sZpyX9t4RfJb8m-@B7$f!$a;6ecy9mY_n48sD#7JeF@sfuvasP`!zmdJHx1rMmG5}22j8;WH4*2CobQNg}s zJ95v0TGSNHc^2^qY>eItjHkZc3h_2i5wANGqG*^y7cyq<&|vSXJWL^gr%Nyh5k;1O zN1GS3t^me45Z7qZ^%f=&p<6JzePc+(BW^c^OA?XnZEMc+vj8#Yc05JnSI&9UEUfON zTV3+i6#_#GL(kYP9swip_Fmt1ubl&Y0nQg1qg((A6@5NhX%&UoLOV?#{X2QI{-L$0JxmoI>Pw1Ri(m3pTJ> z?_N5bAsFJbLUM>VeK46k-@Dj&dHyPkLDQz!U3}dy_buU-xw<_maG&Auot$mCodZtN zo3$favf@o2d*guEys7N^Z&$!b&QvGhlDsdWN5z@}V%HHzLG=qsScv}|2uKaX_ZrCr zu&c55U)RFn;08Pba5Ws+Ztze?!-0bD3PpPJfEl~~h}bF~wKX7tOpL>$<^oRR81>4b zc&64{?bb1_P2Y!?+z3c(pKcJ3dj6Vy!#8*#WF$`%U$DCLzx|GEy)l1ib48mhdvi9- ze@D~`1Bqfgj>G7p&b|u|JexONns_RTetp>l4iJxCle6rg($&YK%QMOaj$XaH;BD3$ zoNA6giP=kqkq^CXKxPb(4jqpOnfP)iu3o)HFOP`VgHB78-h7b=NLTRC+xbxRegs5% z#qi77eJ~8VAS$kLNQWj{360j5S2t0Nz}n*KY#tWovd#_WmerYRgL~1cMnHo5u9^)) zxgceAO7`iMFio~oG<@$w_d@Y95gwl~)c3+t$Fpf0AR$(MJKVOLhqlNZfJZHg>$_$= z`k@(mr~)4@_$D6RwF|x*Uh+ehkPxNY8)6#;->rDmpn7X3DD^BFPH&}Bt27$T^{YPz z=r;mV-@wk`#b*n#QBf_85?uvCT_C#AsifYta4`=v#guA}v^(!W4I(y^6fpY-<@cp& zvtI`#@0Q1VsGjX?V@AXym4lZqF;6{h^V$0TEKOeT+%DFIEMy@sOw^DasYNW{r;J1; z9-n}eDtJW5oRg~{%av6o`pU6Ey7n0d_VS1u#c$OYO*~SfGS^72)e_qCxnb^pPI#pv z`QSX{DRHwKLQetT(r@OgJA%dqUmI%_FZ3I73Du~y$*GG26DLIR3x)=M2E-|v`6gFu zvUMq#he){^r;#vkYe$54@*V>1qHB$S^u|BlJtDW`QKP`F4qSCD9H`jvu}H*@D(Pxm z{a8Gz&mR0=4+ltPF5hzSZEA?c#EDjcUBl_9>a@BpGz-4#txoN?svp>1zxo0%xe<`e z1&qEiw68&@yb@O&w}KMR@yHkudTQWoXVncYd9MOhiJ6?Oaz9A%0x;6mO(xsat1;rL zJa^bRSERb|h259dy_&1gnQptgdx0I~v0w_&kv?MCBh!vwbW4N4&Td~GK|`fo0FSR< zI36u-wtB(*`G)<$E#d^3M^GWO_}Fl|;{uA0iZXjiFMgsTlWeI-RNstZAfc!yd>qLvD&NS7Nf9@QHhTq5o9hl1pSdkcYt`2*ud~h_-6AmbQ(zs3upXn!G)liUW*cXH zJUZ++9Wdy$Tj!=nbtK~OjZLI_ZgKRLed|M!jDYWTc6`=DXb|c!@3VsoZ~XC|sLR(* zN&AYqBTSqy7ahRKUJ2ameLO10-c~QeM0R==p^@p-u8iEP3Ln$9d6w7^dpXwtNWInJ zC22QD(f;yW#ixJ}4UiiYU%~}1YJU#K9wMRzOfb8?C-Y=7ICj)ZA>JCN0=px9gfo_+ zpkOS3wA~CyKk4??GQO^%0jWXo4LoXTb8Xd&k7z)`-Rt?nc2~YX;4MEqAl1dub-{PD zBA`xX`mviB_0@=3No|5}EUD>M!||wkK8i@~fJDPEeqs_OygSjIlZD`+IbpPsPbBCC z4`&c214ybJc{-s{fKA;qQpHB4j5{7P1c&I7Vr%Hx-2JFkQE@U>rCkx|(K%*GOeX3b zG;2OO114wKgkE;*=)28?X27s{NsFdQ7M!^t@tEf%oaMKesLxkAV(?6gBgj@Xsi^f49Wf$Xa7%47@r1hIuoa^Dcb>Ovjx*lJ2* zj#KWH70HAq0X+74igt1b`x#IJDvWO+tWcv+GxsbNdG*APCtj7Ux4miW4p)%mH3QPO z%-9i+MhLz^97f<#t^U*?_{NdNEG?rJJUq=)lq=mqq4JZe`2wDm7F;Sr## zouJe)cDHqy?SKZPXRGk2yynO4fW(^bvb2FN&#(}~;307zJG1_@GfEL8L;@B57EI48 zr!Bx(c%o*n(nwNCbbnc}dM+$(Gh;-lp0Mn6vm}Tvlh;qt5;{@YUb4aycKmJ{t;#7` z)V`XlaGI0J!?Gyo2B;Fz0th7Kgbfed2T|cb2f@Yi&>%sZlnU|ex+Dy*dY(y`>@){v zkxg`x16wWbS^V!N_DcEuK}dMm^IC;1>+cMf|ZMqkT>V#`(@`5KZk#BPT6@0iFh=^BO8HwOhYVoH^{g@ zIv%0=HNs>TJAwP(JecY`M$`dL0piH|MOy^lyA8u^hZ>AeR^btF|H&Xg zBBZc7&B6f6JLE}Xv_NbB+mC^HIGu->pf_Rk4KsFI)LcPqXXvKxsaKHU>L9j$&OY$} zG}W-mT1@6i?rb>4>M@dKVqtcZ9XKe!P{+1aq*y>F6)j#vuwjE4Y-S@(7K_isBLu=@ zXGK2c98V}Ir=2DBgSw-tX;pAHjzKdW4GIK68Se6NP{PX+@L4B6OIiJ*WE#=dl6S=zwE zEg<@&IYnz?7OI{mN%JHzQ?;((UJwoNV(MuZYZ8LStvhPtgO1lk(lQOZQ%;4nCG5RX zO$*MWK=KD30c&TT%1im+6%lDqym%tYdGF@;CyA3Zcwwl;jIEvECxquJrH1msK%rZ* zh?JGRKC^CbNVKidT?it*Q^sjb^($Ou&%glf$VqbM>p4ATHA?diSJ&OEdpcho;a!X- znri^0BKqQDrE>|emka7;4?=07C@Y>82gv{lb}*BO_zPL zE?{gBAccrNt^o;wdbr>lM>YxUu(KJDS_R*AJbF7u@XeiX9te>7J)65D@o2ZbgQzaB zYgA$H)&XGv9__Z@s<{CPyDR((fb_|*8dAbXC5I;{SQE=a934t7#Hp=0*bI5)ShA_) zG?*~MPJ7MWdOD^+S(PBrOI9TJIHUq&&2vsMmE_eR#hCA7Cla%aiWg(^MG9s zKzAXis?R|}F_lm8SpnR?=s>DSC5T6h=WF;Jd@fQG##md{{tw;&?Qo0SUReMm%Z}*a0L)2)^r_Z;;@-UEjC* z^|b@i#;5C4*;${f5_xEH76f)0ZRfHX0=X%;U^F#y`$#_Fbk2MbAs}s);2Z?sTF4Ti zjk@TY?$Gxa*(Y~7bqj}^QX@w@DNrgaq8FWpuJO%ocsS9LV|>0AAZ4c`b5Y2*Ih~pN zfmy4$PmKtxPtep^4wPDf4Q9&DS-qE|Ga}3tZ*2PD*&08UPvfLV{K1!~KP&60}6 z>9y(fsC0I%S%oPpoV0hy;tnT|UivWCxVlp5+ws+%a&>zh<4hua;1{{PK^A&IWBE^d zL_=5XjB_;LSquMn6|ztA92a!+`u+YTQS?J=Rzi*)?}R%ga;9eMAn1|XlUw5(kU+ZX zWTdgpwNdc>{aC^GU0qMnF8IdIKR!GlA^5j8AT=ujcAExPjBY@xV^@vST-;fRVujmyn_+V z$-?lg-0Sf6j6wAvpwBer651AoD{!W=rpt0l$~trEv$)ukEc2uF$Obka6=g@y(e?6p zrkD_$x<06h;CJ^%;v7I0&%`ZA4_vkfl;6MhRpi;GdyY?12DMBZ_&X}`2F)toJ$u6F zj%lg~9k`tx85q!!Ppn;vEbf+6#VIQ=c5f@J?!Z@flgsMZF}v5XFB4lkBC!l)45@FU8XF9my;IIO|C@*2Z`;;p2481V@A$6x0YjYg?Xuf~6 zwsUg4b+z&G9jmx_oi(dcgK#v{+0i=C1dXyU7EX7xi&PXfpY3ZHH)@Hxxu_b@;J8Zq zuY9E2fkdx;IoRJhS-)Pq?A)-ocEoyn&U*zUrM>5z`nlN|F<7$foQCUW(Y=>(5QV~3 z`_uu+(Q&)n6r^;|sb;4*IXWi*Ol03mSOrOsUiOFX&9UO=lZnLD<+baux)+k?VRcBc z*`8@;|Bo}_=)3*gkPEELbgV1sWi#5Q_30A zT00=E#-n;fZ>tLdV%h|DU=s~(uB~R=?E<@YJZjg#bLYbY(qmRY+63RNcvSDGZNwu0 zUo##xRE2;?t=>khz*U$1@zCLb=8}GsD!8pGGz5_Vg7xhvco&p zw>^mMj)LG14~;jnV5)G-A+T4S+34I{L&fR>eL13AZc@%f<9VmEH|e70vcgxTF$anc za2=WPnWp=DJ46{e&@D)>()Fsp2mSZoP2Cc4>hcvH%B8ouaO2|6r$C6UPp# zv-4tuk&y*dTvV(IgegzNrllE?aLDrx>u9NHAhtFPklGuN>UabPyD<$&mHN{v)^q=O zRJSLuHy{lYd_OQCO#nz=wF~U(gUDNhcclRdbHrOoOS|B^%K5%(71*tMq}@73uZD91 ze=8s*q1#I|@K-omvg=*5gv44vA<<>Q1M_@{E-DyIE{Y^gaZ_m67JJ#|RD6I|dnMw{ z&g0~vlrs(FNm!wBr(RAhaM>0@Xw-ZPkj)P7P%~p6zQ>;ew-ik+@ zmyA=;SZ_TqaFdS>K2nwo0zy|^Nt0BDFJOT;UBYS>h+mSNS3KwGto){m6}i z7jbph2O`cx7Xrbz8X_r1M41fxjHuduKy0{DKU46n#EV3gCEJXI&;#im)Hc{j7hV8Q zkLAZ1DFm@rKtgTf`>_)zuzR%NyP>V9E?pQa_^zM{5{?aMT;CZ_e^5XwkT4F9R)qc*=|InmvdceoMC-E;R}5K%!BalOqHMgVl%--)cpxMZSvY`L^_)ZP`EV2 zuD$v(h6KbWAf3M`NXD0xaHn}%p7Y8SSsAUGFTzuwJnu04#8^lc^ZR*~MGa;(XW<~( zjyoL1-mI0cdHoS)hvZF);a98z*si7Fz>1}jT^zaZZ3mRw@c(x&yQ#DR((2*l?^!O86)Q8^OJZv=@}E#K3I2Bbzj8V*Qo z(4Z8~D*&NKHjDCW6EbZG>?lH}Fi()@Q*j83c|K&CTDD`+$)Xm=bJtbl<$mfMW*Z$~ zC4u2|K?tV)*1DzdZ>+lEX^Eq$46_i?!;6*Z^-4u@&p!OP2)={i#W4$02bJp_=YmI> zet3-6s=7xXRnnQMPE5pHCbqx0EKEf+{qorRG^w((3)57R_-v`xSC`7#Cd;c+FPhD$ z+ZIi!sQ4pgt{@#Uo9Z34fF0VU5|F&9$Uk*BEGh9=j49R#KmH%Y@_C^#c1IflX{By# zbOTaDVvl5Gc$+JatfSQ3&lh~(b$Ikcqu{$ib@#A|M%YJsv8^d4$AU zWj?e>R#ug)eKCvW^Jxq&H=9j>v^9B;NPR%q-b+0tZ<;z(z5mPX^y);j5LS^*+`|FI zpj+3DPhXwktjnw0bwjiqhC!*?xYq6E(7t&)6Cr?a;ba%sO)PdEd-4C~ysz0v$cpCJ zR7AoPD<0PK9tfKcCP!P?0u> z>Oeq3m>uq}GhXlwGSVind;6a#`2L|q@ZF9_=tFuSKw7O>ZlX;;&N}|?-3|UBVoxI; zRY^;;;5!}~1$H9@-@v1WvKF3xBp}hik`uz+x>{3&#EpvGoRVx#D-xRdDBWC!cIjKB)J)0dK zs$@0)@i2A63S^q4qaRy~SLfXl7F*Lj6g|+|nHGh7NLKnyAYvYNXuAFW=c9ak?u|_X z=WJo@%qK57M7$$svqh1_sTe~LsvdJjP<|)tIJ?%4@vKE8?@pU-^ibv&;KnbC*=4ux zVm-GWRyS9nq*7#Qb(0B{if4yXmg1^Q(Z26C7ZqnzN!CQ84{tzX4lg){h=O!W1}3qC z+ozP^mdR7hAGXxtcYAtJ~;S_x=w>E_adK5TQ)t6%%E;>%x-C{_I$?mnY-PQpoPI(=62-<`he|1C80c ztWGr%k>d9VySeS;)C5Qflz;f4z4-qT1mDeg^pP5n?wT|ftCaC=qun1CkeY%thm*G@ z*LZ>7zq`TT{b&PHJ>ND~@cpbR_#TZ%Ewl~0MgbB{6$nc#0Q3i>c|K0uL#%lHB#AEF z6zwT43!l808B~SpiP#oJ6iGGB_SXbSK9y1=z+Z)5;A#!Klf!wr9C(K%a=^;c(s#0` zAi$&Sape>XLrdvq4n>!UsM>>w05Zf|ikYwLdrj-!#r7I_7FkutXz)%5{auD^eqctZ zcz95P(i@5w;DUVXDJ@*fyBq%NT20i04oF0O*tN0N%biSh^fgG+VQBu7#|jp#^BMjpHOW7%e(7;R-G<^ zlbiBwfUW^}^i`wayNZW(h;0*mvs$&CqvQo2P!zQ1S0rS!15ptNBODHcBWf}z@nMokPNDMg8XPfv4 zhC-;2p?;$xhtuoD;(EvFhbtzPJ6(o?eGr~VKqLlv({f8r>^rq2Zi+_mwj>RM^fILB z*v_tZ4oy~<0CU;;zO?)WNWbK^V8&2O1NGHsNLo@;@%s4xkQO5Zn^HzLPjNUH z8U$D6MqE<~U2&xxMr8FKOmEeUa4CQg=}?pQnikHX!DBPb8D=B_pH-E0Bs>&~9?FuN zFl|@xB4-mR3m@Os$Kc747a5g%44x80D|KnJLQ#*{FG014ItprYP{X0v!eWZ_rhyP8 zq98^2E6ehigDBQ2h29atqK?3qq~UDCJ}G*cz3X>)e||azDt5156JXBpnJbETZUjoz zbDQp1r;-hQ5{VLC9~3qfTdWZb3Bh+P9yNsNw^hM+8_EDq;zfr~y~WOo#-b6A8sv%p z=K<2n*!!$qam_=&es}%9Z4i7nG{k;Yg~R(7d=G0tdfJ3XUp52MJ4pK9%$?a@8%vVL z^%j1dtOQK&-Pkfd7%YLX*b!I~v(H2H+|Kpv`*PLmQ}jyj-7hj!ei{D^Vd*&Q$o!@? z)9oTvmC7p7@h2j_h(tDroy+8CVvI7nM7XFv^kfL$c0ecJ*G$8{j#k6T*e6r2v=3d; ztY(RB;!d1U{vOY3k^K@Zl>H%E7v<5h1F0^T&BNSgll+%7+_8((B4my!axs_57aQR< z*_hk}eUdn>wY5yqq5P;ujuxIwxly_eFjuZS;(OAX#0vXLP^#IYLdND}c=$x0y`A0K z?3SjdO2xAVd|E8(>x`-**&z*YW6&3P^9(7JM@(jok?G zLWcsR^ZDS$f#WF!-#5FX*~Mr)s476JXj{H4)Af?)Tmz(7ycuhc>;-m_tpI{$Exf>P zu{Wz!wQjVrZOdaxhoJNo#Bz;(bl{1Ni_LhO6|ntHPFo2;kV+1N&yq)ZP7$S`<$@5~ zDR#vEc`g9hx^YCtT0rCUDJrDpp>5y5eA0Xk=gNXfh^R$i=b!jh<7?<3M0p4}PCh>; z1tC|H{ewVsz4Kx?_7_|XZK>0sVw9P$Spc>`Nxviz7l>i==ybP5lrTUvk}LGJne;jz z=|mhDX2)$SC{-|4V!H)D7eL#KKq^2oSIuTilJ!svqz z+xu7jtLw|bQJvuXeCuLcuiKhis%R6t>^XG;QYiaIM59J@0=uG$qm2*|IoieWhM72x z%A#cbfUlzkf>O)n$QeNw=YZkZG^T)X4fh(^z=amC0oy50$aY8QA$XAgB_c(!DU#;+ zYqMIKjI2-k2r7d&VrI14z&&KI0t@}9AFYC$asW-6Coqz0aXC9NEW(T^Pbl*(LN0ll z@lp2An`z^W9+c759((b5+?hZ~>OMJ`oP&Lk!)-f$*wXCT!V#I9d}1i4lT8fpt->(Noqqx(-N_-3#ZkfxiL z*H>G^+uPyR)ph@Jb1*->*```)Ko0bF%!ylWkt(PCPHfXZbMYo^o9TVW6(IJ%7X^p#xM)NVMrtK6C9HKpWx7z>?_ksC9KQD0w4T|!62q< z9j^jfGi1WaeKQ=2T|R>^2w5&jCS6nwKeTP+d;kz7L?lxouZ}D{c#y}Gy(DZaV5La7 ziKaT#O@(}4q)swIc_3E3C8umbF@Pjn3=lhzc@X*{wkXRjFRgJ5bLbM>JeuTE1}~}B z8AW7idTj+|@wL2)1V>kHYixAW?w6&3W=;gj)5Gsv(!g8vqYsEADJDJF{N$&C^|Gt5 zecH*iibr(?NWXl70;FFqPb%F%K>-putvVHX1)+83@3awO)$3-oz3g>yGTNCOk9UU$ z50p9^49@yj!|{&Z>sJ7&f;@HIef;2Qir6iSxb-hs`RKGvd7P@2&NC$@X87M9%ZE94 z^KS1H<3F-mr#J5lU@kTv3BJ!p>EkZTO^JNG=3NBedldypjK1muBpnJPzOtR164qZe z^7LPhE+>-&W+qn#je9dT5f$UO7(@x{mK^M$}+6N70`nnqS1|0|NVV|FT` zg?IlqLrQb$rbT+vSRy^KT$a?ZSYH`n`bd36l!}RUe9!!9bI;v?RAEsobP8Ps-}k?* zFF>jneAgHGE-gR;s1+)SdMRHgAl+_veqB`nsiO3?PVnsqhsjY{Qf!nAG_O7%^dem_ zwl_}o>9629()+RQ7O_~M~H`rYX9U;feg~5_Rw63`OE!@K>b)4heO|${Z?uDlA-_rs5?-A{M@c_OPW&_YBcV!>LcpB4Y9 z8<2b`NoKmjB14}w1mBfBTAozGm8lWfRji4eR^8n%SKX~D-}*@hAf0qUQMHEzPbxj3 zl?pXKxIb&5;=6g1T1rH@x;#AJZGD-kP1*jmb?ZsG_xk-#aeDmqyL&yiLHJ4G(aBbS zesifiL(p(pT(Sd8_Uw<+=_!iW`e37O1xT-}Ht$O(enB7wJIFtX0!r)Czu%?05q&w8 z6kFr?UDtTu(54De9b@;hM?+-~WYfhMRH@T6e{Q~0qI)4MRz;!6a~tQt2AR?9f&CL@ zv8Bs^yciNmitX{Pu4^dUK@}LFo$uw>#Apnl=YLZLn>uU+%2Ev_|Q@EeafNi}||rt%(=CS_ws6Ow2Zuot2cf@S%*&aq)8 z{mGQrrX_5+1s5-0vp?Qp354r+r#q0as1-a~Q}9i)=DubhiTf2nJWX0rxtq?cSQ9y| zdck*f`PNT60O@UK9-T}pfP}QW!V{`gTtDl#PR$a*_gR9=E3!e`Lz{94dah^f#qE@% zZ3m?D;b?d&FHy2MVA9GY5_GnJl&((>Uf*U&KpO1qyn9&Fb;9CJa zBGdFmfJD8E{0FvI^Q9n*60SfoimZypWiH6ZM^8B!&{Te8`m7={55epmO<`Q54RENq zG|M@$4v7}J2m1Mv;MS-f__Sh_Sh<{-XN#$oG5@+v8Xkj5S(j}*9lnp^(C&+q_ zK)92-TcuD|$K}eAmvu{sIA%&VX|C3rVEIHdb($JfG|T+RlJMgp1+6s|Zqd>}4yNQ5 zjkF|8q;oWv^N&rnNWf5wz^#rtvNE-}1jm;02p)PK)e62jvzx#U#Qe64YOhm&^e#Y}?vBT|x8w2g+vk&= zozcn3?BZe-ipacF0cpkFdtC~U7(~*rwL~J9{Fpz{u`qFyeUYJYV~`MX@-XF{ys6Fo z-s$xqab}WYdL!l=$eG%Ez2oAO!q|28j}G;Swr+aoXV=rTfa|=D?a@}huN>8jPJ-{f zszjSk+!QIRfb@;9(C5eZAS=0ZABBs-8G<6Vi*}(#gJDM#Teu|`w17Fsm=LM8`LIg2 zsI)DWg7>ArdR08*$~0wo8&MPgEu?Hru;^3CyV%A+`-YH6OVLKk1_OpTK!J>LR{}e) z35#Nt=E189@{yQR^EX&z=fCK&HrA-9aZ-nfs#^J7U z-Re(k>yWUhHTzK1@Mzvn{+>}@i8==a-<0i9Y-|jVdixg-|7JPB_IN+vWDAy2@)F6It;6RUsmu9$Sn2($S-Fxb5SGi zfR0tTg5{w?Mpg@%LkyBngE@RMMWtG%Gz!$n^T{S1aFh&9uQW*rmU%Ltialy&(YP_1 z^W@lIC`}gfhN%B*fi%h5SZAp%qESIUiD+f?h>_NyvADOwjNf7F5e+;oCd@R7f= z7T)w_%HJ&%QGBu2eCR*M$IA13jFeC1ba-TZbqc0*1k!4YdS8v1dcpUK0;GDu_x%&_ zh#2Ch6?|(HAbsN3vkrx^pRL^duAWD4J^;_j64Sj?xN$Qx`GR~A#Umw|nWlC-GJ1ii>Mb~pk8hfl+)%IY?{Nn)0=yK$GXbC+tn3MdCUcUq9j~$`3I>CfkIF$LI*42A z*mefI5FdFmPc6Hbvft8HfGCNtlXZvmnTk$O;cI5`I=teMXR6-W=3pXniF(+~*A^#h zA$XEZiceT%jZJ4Mt-e~v(bv2o`^!l69qNe%dtazX^}MIo2}l(Mu@wbK zs|0q-cR;Ead{^>_Gi$Wj6){A3vL25#DM0#v?9`AfcQudRRJ1LaNSn>6c5w>=abq9l z+nui7kw28)Yy}Q4Ur)R$W1bi?Oz7DbUy`ktCw5o7y)C?+_pi-g8X1qq>5cu%#Ib{! zoq1i$BhXt>fTZD$KCAjmi^!C>JzMFT^BSnUX5Q5qEELDqE5)_=i z7=ay*O?Krd5*AYpKf(yT9As=aycCS7T-g!%BlURjmRDD0T;hag8a6~uV=zyLA}1%d z3kWfSWEk1OBrVq9+LopIaIQ_NQ;=}vk2jdNUo?MJ3@9GMWz)Z!dWSTkyDR_ZgW7;K*CAu}2 zF%Oj65cENbi!0!)4ji!UPIax1qp%f%Z-0Q5UTl%33{B6A_+XI~&N`3m49!DrD)x-~ zcCH`Q)^p#?(ypK|7`g%}AOihGWz+SB){MZ8UJE)``apvYn=_F-bGdnMoFiz%rhDVvt&@^P_V>lHZCLbh?HcvC&lS z5DA(+W`{)G)L`3*`XU#butu+P&^X;PT*(@}?7mjfso^I8>9_R-u|(&p1a{p8-wbvW zd{^x1#NsOib`=Fk6>@o<`?NrsV~ke=34vWr0aERDuBt=5n%}gHIGVSmy0=#k=bg0B zneS)kWOmr6bIwnLbQPM)$$*9NsA2g zx2fOOGf?Cr>ux!DlJ6A2Ak~TuCY9Ma)EUv!Op-U3A0d;S5k{^Ca(h7#Zn7tboU!6V8#yGXPxbC6?tt(rrZQ0<>1cgK{9U+KmqX7;Vp)eJB;jdPwP zj4Q%zZj|SX{dS$YT{kFQSMp*XRPzE1Ap%aj;GDj?P!L1uRMW&zl5)<$GBJ$uQh4M7 zEvkEJu>HJn%TLNm09sIawr(qE@JkgQ596Bt)_kOkF|umkf}DCV4McK0_nbvM=)g_!S|8^q@Vx(VdL6W z_S|2-zTkTq)l}HSaR6f z_&XiD^x@$lF*|i(G`jZ?Go4C{Zt>F8ej<@!Ed}e;#WDK6=TdIgl z>Qy25hEmMX2}oa$#5NYt!=c2>v=^fu2C*GoK+RhbaZKt)k&E6S;Zdrez>Jm=b&&_9 z4kKlr-ug=;AXiJ~S`sV^K55Z3=gH;#0#bVY^SAXR+Cxi`KwB4WRBSKth*DFw#*=|o z7T)*k3946;@s)Me)*H~gkhzhbE;4}LgdM->T@<{_s`ZExB-e6YiwG9J5>hmRfp!a` zE$GrNhj_QHnu$K&0bf{`>#2Lj2D?*}7r1z4w3ZhzQ$6#F;*TO}_Zt~9NJZq*1*&uq zv_%r4(-(@!qAQT@*J{wv>nQlu+fkLzU+weN3mTpt zNIm%pp9dsa)GCYore-@=h_dW8aa|LRr?T-P(nZ@_Zpf4`vz!6=C|RVBhcG*HbsXC5 zsS}Xy_D}_}`O^6VmKPSYBS^?WtKD&eMz58LHpaUF=^M?G{wf^Hx;yQ)%zzmNbD?Ba zY+4_q^T{ADDUWJX8l_c}k_{C=WV0k36*`KxC!))vA6#qjbZv;DlZFsyX|=W;Ye-wP z@%rxH{*w;AW@2G;+Q4*c?*e^GjVUOVy;I^IWVF-r&J!~DL^fM+vXwaS^g%LjQ#GzW zKkUiwIxwNUP6y`dDFR4mJ0vQGxks)r9Xj|^13{cPP-v?(;gV=>uu(FA`Ya8nA>V79 zv*=;V!wZpP<`3ChB!4MLx)!bMq~&Q_Di}Hg2}X1+h{Z~G7kvA{U;h*!E#(nseq8YV zv!DL-ch5TV2u9WjzN>lUfcE202&C!t+nw>%p&ybdzF7?<1a=jB($#F|%HZx|BUm)u zrxT%RU^agLHm8wJ2X1ro;r3{n?rDB`H9Y7Kk}l3Z{$}_v`Dk~Z&fOi&2JMNr!qIiB z8BBL|l?s=Vq~mn+dh5mRyFZzjzTh~yWv-*@&1V04|DyuC*R=&mZicN+IV7i-LLpbu z1;SHef*E%wT%aw37uW@-lF;A%j|_FTjxZ*+R~M-l+ZT8%=b}q$98E`>c4dBN3kd;a z!A+j0ZIRI9p{jfI`kvHU3VsQ7uH&l~C$6!=>@IPpHC^*wh4C^??s^z-wbt*Wr9KT$ zc*-V~Fhp-lgMpwCM~618L@PB6y3Bl=my#f7Sd5XUAEF*>6eHc2h%&x>x9t9&J;}%< z)-R99p#S^Ct1$4${ewiZAV=D5Y_3jHbUOE*nsf6Vc$=;vyLAd;HB>G5US5Ed4EFE7 z@QxFHtljA0Zy2o>*gb*Zn;ZRcmY)6M?iT-?-3j}C6HrVBVd zF_GV?9JRam>FW8`|1o!F*>N1nn$B~yh+K|^h2|c0#M>f3&8m@9Ywo9uu0`;fPd%Zh z1@u06ND3f900aT9qnTmx`)BEhD0&Eat19mx(GlS>fbuWR?YG}Bdf00b*fI1*9xVdW zzsn)mP>h>cyfS;IKHn1t?1zl#0&St4{%l&-7i+sUJGNq)1 zdnwZ#?wI*GKiKr2|94HJ5P`dLS#D-}A0hZ=m&;29Q4txNX}yI*K%~SBft}TYY`Hkqt_{`~OP=cQP9S2$ zW^@#Z$i_YUkJEsp2SWrR=6!84p>`)R52@i5ku_BbFD$);u0X=aRs)_*(=7S?kcT!Or3mpmzPCHk78*PRwesZOFnCh2)Y-uyQ-b!f4>As ziwa^-{sjcz7sG9&!S;?5HDoRWNUefzigcIri0jV?zIP9#od5sm&bC4L&+|aqYb}Uv z5q!g=+f!pV;L+Z9kq&C%QGF@GyDy1VNJJs}`y3wKK=2KEmjmg8;v%i>B}*=b-~wkrG1in2YvriU{*;7t)yk$WhZ>c9>P zwhetd1%L`=2wQ?%u5zFF1I~6`$Uo%Wlh8oHvzrs!<_Obkqm5p=hGdSAmDlk{RFAq3 zI$jjq5cndt53Yt41gR#poz!zvkAy|A?4jr)&&9*3p&dxg1+ncsnvObV3>vP6<@%_h z0O{F0WD<~$XMML@igbe~6J@ULw%_n*w?O)G+f=n`q4ebxkl4&?Gzx9V@@vO!Zft%^ zz%jYZe@z35NqrwYn3DpIy_7yJlK5@i#ogck@uq@r(ED3~^nTr&&sI|%l47C41A!36 zQ%GbUjefMzN2VH&XoL!g@zkhI<)&d0&{4TVN2Zuvy(~r|cvs}#ah`HEs%b6m2r4HH zQ|b@41EhZr1=^^$AgD4!f>uO+S}?a1T;A*HQA`gHZzjicp)Gmc6}cFhFzZXQL|4bb zx@fzd0wjK6fpdxNR1p=Cp$I^$D#9MMctA#?=aNvtoUJxj+AMBGj|6U4?rDB3;nO#m zj2jFch`p*vmw)fGn<9PoE&Hoe7h9LxHj5g5mF&AEqfa!@EckBVkpd)c<9vzW8y*0p zC#T0pM`ve8SI6r&GP0RR=W~MZ2}m))&VWRK#HVc9-IJJL8c1)q7sR#+?DnUY?nOL; z2$^Via|&tx^5O73SI_%xh`n{_kLvz+_uY_!1dnbYu;Wpe18Fq?DQ~=Ycm#B?LuKqRAxi&@z>GgNiOc@_z~c1wG& zyQtGVoTQtYzeffOZgr;D$J@Yzh;Yozh zVwRhKvc44CG4yckP|^La&)Inyb9>e^IK6^O_93?eFWYSk< zN29zd?zq(U%DW;M;^LVQfycR#gh5geyN|KpmvvqbB3ux7)#bo?Xe95!F#ONei9!>> z9-P0oyu2BTuqUD%g=n5aG9P|E@|*{%%zyCV=#?ZKfz)KsFxfrtMg>Tl&!gccD2$yX&zpI4a(H}o z`4SfC(cW-R4IP*?+O7g;c&9)bU3R`)eHHaM?{1Uon?@it79ic6N2^&LO_xRe#|&9! zZQAhT_P-;Y)?R=F`&tFxHv&>Bcfs<;oobUhwM~m`K`hYZ+0Km|avw)*t|5Ili}ki$ zfX2a}r+Rz^3nh7CzCu#qiq7ynh=jLaGykDUZNF9~=|k;5jwvXq0V5Sed2VTuN+L`; zFfV_IaF4`VfJ5CioVaIAtBMMiFtt>qgr zvPy8k2|JYT>D2csV(EapdHkS6zx>Y7CZL2!v8;{`zNm5R z_gyI^CExP3elQKB#sZ}Mb{_4|jLkPZCuq7xzVGjSzEYLz+pT)@Yr`LZ`jZqM-?cP# zY7p4PbsDaJJKU1qZUmCd7l?%-F38TyYg|!$q+zjdRlcy|EBmY*YQ;&A#2g=@bKHjdhRlfGNx=JY%U8 zLCXs2QT?ii&P{Q7r-TSN))FgrGC~^V9kIKa-Qx=Aux14qie~%XseHm&0Y}v2fNNF@ z?5h49I7j8~;K3eLeDP*R`FE78R=p``!k#mI@uR3gE+C`IH74RWUI8cT)V15#509F? zSx2S*VjwjXAf4Y(@IBc9>F`&8w7BC0V^1m;U7en+)1i;`^Q+wn>`od4b~^-;F!X8Z z_(Kxuqd6eGb8Vfxy3xL!XqTX%gBwd;7GQW>qtCYR0=ah?apI_o*iQX1cwuSf zHV+__NTO3aTPML|Nf!r$Q6-plH~>laPy#@lXXPU+jXdZKk2%y2B*0SYpC*2$ppQ_9 zWxlXOn05_I`q+8yLBaBGIf8M-GHSw}=b*CJT=8;ZSg{w(evlREvZH`Sjd1*c5G$3mO<1 zn3LKVhrL|4y4~5QONOSM_*m{Eq6&l`hG1fR+_42y*SoH$bz1MbBW9I}EiQPXL;xEz zThdGAs%pbWux9vjU_o9q^MN+W0+|(sT@EByy(n*G$2WdW(GCMD=ZM_1u!T`8o`~6j zB2~{>U)4gH<%Sfy0MbdbOkoMRstAX&qV+}Shr^8?JPGZ> zRp93ALIU#gIo{gLyPOx;5uYvR(UicB({}`<&&eFU`yMUqXb02)r2S=r?@wBQut@N| z2V}VUp;5%=IP%S1el=Tw6b=@zn_g^CeJ{pZ%dgrURd3Hd>N|_J?DR=M%0`@7O{?}m z{Fk3MbW}^rdbaIxmy8Xt6R;wsGs_z5dS}dKc*+VX&pIoDZ|mR!4Gp|^dA^b%3U0Db zWzj5~+&Z zNn$&Lk}CYlt{C~}9--#QVcLS|4FxO34lL8-DYMx^96}5hhX?S~FC;`L>T?t))Xn)O zDqh)|>w3?P0?qk*CI08`#Mj+|ZTXhnLLMQ5zA2BMg&_KLbre7nj}GoXdbr*`GHuDd zI@pZ9#?15J(Qxki>f-YFWOZ^?1L?4wck$%%?CK;7>?XB*Q;*bc`^^MQx+5S(qwZMt zqP*tLmfGQ+1|T&SAd%(Yz2N)WoS&s06W=VmJec^CDvd(+F`WR~TPpZ&5S&M=sb(O3 z_!>y81lY+LBw#zKtT!F?B3ctr>e|>!J6VJ$G4Dt&vP2&9un$#Lo+w%gdH6 zBkEGe^-6H0>!ujNEaY;eVHnJ>*6ZW&k_&W7<%N@=M=xB$oKQIs_0<05>^tk1&ljwPKw?2{_eotKu$$r0 z`Ds+d4nTS&J;2G?;I(=57Bnil(;cB4iYp_Ed(maqB7a%b@4+IWu=vOgaK=j9Cr*LbbzK;T=cT&rX_Kp)w zyIk;{4_yMJe}~BdE2?a~Q$!bw$K!z4AwN@`9U1VD~12 z1>Q-s3Or9hB(*Cb-IGPbt*yeN@9$18hXCmxr2uJvav$x-|5Th5{=D?^?8DtFy(!+g zvEcjmbRbzLz(Cis2&uOuLb4+Gb}DIDlIo^%@=*wlfYw>31g1wcr$yKdxp->2Ml>ep zRQaKqhaa@Y)xM1r1_+22=4j9?Z?AFxL5XrQxiB|Nw3X~3S2Lx{mZdOQtVEdgvX~87 z9E8>JRJ3Z8-aQtyB~fP{qek973(rR|TNc~Fqw)0Mn(Hu+(U29=As8(j(MB$$Rjn0| zE}D-&ia%rES9LS0nv<(Z31Yj*dzFUiC{e8EWF|s9o@%FysRmL8887j-__xWvpEbz1 zAy2d5`?|~_c(fj(m;0*`NW<6z)CreGU^igs;(6JDTR$DZb9q{hJ=5jySH9uv<1s=y zof7>u6d)~9-A%XVxH*u>|3|*%oh^~wm_NDmCLrCOEkN485s#*&2NQwJT;JNg&;KBl zG+zGukG{TF=Y{V4Fxu69|F;r+gRVtDO15((iCDxDALK8mhO?VAYkkB~T`a#rG)KH( zJ&oj?sEgH=D%^ljP^WAlMs%een~;LhAgJV5!g^G4bggiRu>u}Wp;17Hb{k@wAmyWA z&CB)>c{N(z8e%)jL`S%Jd6C3F2oo_6_25{M4!T|iDXD5jwiLd0Py%w6KD?4>^oHQj zMbLO|F*q*HJ{P|io=V0{5@E1OMs0Gu2|`i|ehHVeU}A{KS2Y;vHbO)<(I9GM4Rv%q zMv9(hB6<36mjVgF_hKFqcuWhvr~61;MI0oL?u{PcWKZzczwQc1pHF$&{eq449FWj4H}FVA z+26U~yLJpB!uaX;2-l}B&mao;?>S!){MNy?L zJsv^=G_}R@vuAg!ucxK4`Oq$hX0-7gyhBLs#wMOt^g(rHn6Yiyl(H0AZ!)9P17%z-lhCFUc=gZHCbw2M&g<~8bA*#>{>WOXMX|sG&qf&PRpFRhN7N;#B{JYzfeM4d zB(tQAR*0dv15-I8f5z5Zj#>oo$hfdlkXZ>FMFsE5YT_;mDZSeY!so>*?^1{MiV;PZ2l_ z+QyD8FAlTEnA8!FRLm_q;M5Annj!^;_~v#7p((UDnCHB^@Ui-I?Hf ze{ZgXQN!gK1W2DxXT_EazNZCtP3YCo;s}W{m6VRaDvdm`7KB*1*&1Te(*Yj4UmerZ z*n9wk*ACuztIC{|I^ChALNLsGsuFNqZqi34cWeg@)KRjeb3p0^#fztd04H z(b1h&HR znYYujOE_N)q!u0#buPA45zPud0F49){B^JrH|_+(45DMXqKttk#Z1mKB-}FENMR^`i+L%n^5`SpYi%b1 zLgyW9$Fl&@eNd5gT7}i@wj2HzWf)zXsJkJrLt7}0K!figoyMHUi3PS|N_Sekc7Q5~ zw3&tGMc&a|*NKo|xX^$JY1If&!Ly5wsSv=;11XkjX8~R!Qm(o(%htO_Q>maEh%xJV zO@N!=G=}LAg-M7HFM0`(*qLimlRaw}*v%IptuL?f=;(Yk6u)ZnV9x!6ArN|0_;oc* z`8e=0{JNfR9%8)EgUxW!*S+HE#jfGyhF#No?H2T$FXz#&fP{oWJ^JYOGo|?kNb|a} zn+v`hr<~bG+n?sqEozdgO97-?A^5%lkA%6K2);9r-f0Aqp@h>q$9m4zOWk0gchuaZ z-`Rk30xgWbuS<$M<|IX~NX?B#4U{=yf2Vc^WFU}HJH(K&n``q&$3!RBz*2Tz1!%+i z%CmS19!;~v#bZfTwUR9ToO|8Yzp>q!tOdLzi;5)7b(0{b-+qxOp4B;#4KGOyTv?pB zoH~b;0wfD0n@E!Eld}~$Rg2*6USgrQOh|He0!ST z%Wo$2n(ecvI=v|t0BOjqw3NA$R%w%l*E~YTeZIch3XLSHJHm?Ljarn+0~ud9)Bn)Udi69#vT-RJFBOvy z3sXmQ5+ytn^|&tn8AUu{o=5N~!8cM5(|(|MyBXp^L5kiZwXJfq4hKF=`8-Amh2r!w zQTXGKD>7UZk|MivqK9tfLT~TpdKoFubfDL0>Dm)Klb*?EyI=R9LW91!A!6}R zMxqfXxaGHlp->L6h>MXnSSJ}|C_Xc{2$~iG>1oR@+;YKpHHzT7Y{NadI=uhj*;Pmp zlo@(;bfmn}!&R;_CLZnYT=2a&$(}hLMXKztC=WKkx;3>4xVQj`8DL~9qZwKPBte4R z89GW-^{dw799DMQ^euV#k>^|u=9w|7r58zq0OPscXGc(m^Mbk=xRpf+<$w%3wf09F z^qAMw#Ynylo@-~!QYeuUIKxV&=|Yevk%Lu2w`_CckhOPNmih=k5^yfTG%=Ultc%rm zZQAYG{lXDp1byl8szhJV-zJKR0D2N3=#tw0jE%T1qt{Tz^&P=;0*_f8!Vw`E8gwIL zrNEPR8OSG#yi$kIY$AoED|t}ow8$tFSP#R#g07ehKK=6s^_4+qd4H54G7x2x_}GX@Q- ze;FWA1WG07`mX~i>k0TQi+oj8-}}~8zd75?BWqSGjfP^yY;L$p~AR`1+{6p2uM(-fk#iBbw(W%0=i5F(K9deSQ#4HYz-soCv>AaDU*j6$*&xFxW&_C zdA^02sc~)A+l3W@--D^gLJiPKg zeH~;yp%KMP8XX;HoMkt=%cI9fXJ_FVeo*TXZc>0W1vGAS*Fd63_CI|0hxhJ&{jMAf z=7IE2Z3ReZ=^EJ6!XqHj7RxZ}_c&41R&yvjKHl5C;G0LPAvwP_xEM$)#!5#q!}K-&FhSSLSTR)DlOO^M}# z@1;Cq)_Zitw4(XqEewz6>8Q?o{XBikzuN}rVNT<`bj+IboxB?-W7Eeo6m+?#y zPDnc4chFpYF2+83o%bD5Me1d`>OB^+Sx(rfrACAq9R)mTb*_PA#2w_mq%LGnwjYJi zG-c9lDAz@w4$5#t2SKcsA`Xcu#-oeuKgk05H>vXw+v!G~>I116%qHq(AtMY0 z=BNuaKmBF3)0f3|ray=AAP?%gyK8%xLXD0Mv=IzBZ3ruCqhcLoeb)%27S&w4;2SO7 z45Zdqm(OcM(uXgzA$M{3jKYs&|ZvD*)koFq|cC7u=vwkz&_m^aP>hm}L;FVX>@{SX? zZzTA>0g$TN`J5v_H(ONA)P*=DsjhQE@SUU$iY{oO9b`~@B${oLa7ka!MK<~)Na$_7 zPK!W=V@HOk9k67+@A z!eBf_MVi`Fq?oTSa&9Br)*JHtmqGu$w36`RQSG`|H)i!=uYrPcDy+PtThQkj~#QsLxzM>;#Y=|F*7t zUw!}QS^f79^hKKf%&e1pI~9C0ILV_1GX-X)@lz0IrsKp0NIMpMf3jTg-3}x^0um0E zRQdN>OtZc?E#gjxpsg-CRO;kBlDZ!_0xC-VldQt8Z^Nu$RzzHJA0Ezjl@P;}4L63g zP`Q!W0KDd9?d*C55k)j%1ATOgyezy{uvJ23v#4{;K6cU5mcH3m0!9~ND@UH(2cuj9K}jSdNl5}gtf~?ME1foN z=7(#;aJSC;WGq4#l8|gN6)lOewZJE9a+@`HB4_wm3ZPLGbk;PH&vIICn5-1e#h z(*8SqxASN^kjyt?;oLw~z7cNAwjkRRoRaF6-Y>rWjiC9)D>NHd3*q!Ro8}q zVSG3@0wN>`bi+l}9(IWB;ykS)=;eTe6#(qC&LtMSe-%L~8eL?USw{#jH26vHUEiQy zG8r5Yrms^+l=&*!8a31uAIQy;bGuBZccE<|NqxAkse(by`79v4YmQifu9`<}kQ%%E z_hG_y^8H^Gto!rde)HL=-W;IRRl1GOziX{Z0@4+a<^$=1f!zFE9V$%q^x*-g-%bS* zR(7u7o0B6QRd)r#xP0ucgQk65d2+1kgL?30--7Q46AF;#0ZC=9grSHGRXxr)#5^rS zxJCV@+##@~4mLU0gVwIj_|thoA*K`<>;}8%0iI;pw-O9qHw$-Sjr|Q(y5q%mqGI+$ zcz)$KGqv3@K_6x^rDKD>1NG?3yUc7yy%r|&Q!*V^DN+)hr zNgIgH#n+8slCZ0SsWX72oP2}8t^xvDx|uv${E=W{Ir-b)zkmOm-;V3eIoDXFdpny) zBMpP*18F4`RXu_(Pft&ui?(4ZhIv6v6jGryCw0rvcIxX{m}mUktoT z_x%357cZWiw+p_H+C>%(?%m@41R#B-A|FkyLsMIq&JU)Yc{D`t&1^fLL(V^qNX7gK zeqoaIbG~ST$*n|3RSh9!9pX()vSxKTa+v)$SKKW+o(4h!3(MTU&kW0sY)$Tttgu16koh|i>;u;f%2{+}^x3CAuu5sr4Cz%uzHbu+u#sZ8VVyu83@GSTG-s=H{?fiuIG z6JFIhN|myj`-^iF@l?sJ)mR8h$ax0M3~yz~MRLxa5%9_c@$EW?d^s#yJ0ON>ygkck zTg(6wJZiDX)hC;Ilm%|}C;ui*11Iapx2#l{*uAK`83nPoV+|VS1L^ITt$#0`Et-Ku zL2Og<{y}RHZC z>a1f*+mZ!yRgHG>DMPJbiumW+83xjs8^J$hXH05CKOkMUnk9%5DF+b`iG`qQX3fTL zaDt5mDx#sYw-_fx+ijf>l$j%&5czBwvh@O9>?k?4V4fn4o8g46_k*;X-mM)sjVjo> zu@kt&`iq?5+>o>iatzFu0v@1Q&g^j3^O%q{Fp%b?3qD995&t@Gf1rE^cxbjBDhBzE z+#bQVFLw>6P_8W4q8I5->PoB2&SS1~n~_7p%C}hLyPbmXW~ZsX{QckFzgv{ zN3Fih*jT34*0D2rbo=vEAbs|<6-R{YT7h&lV&&TkkfsT~k4C2zJv+a=IDGuWE*_~- z!+FBt`4{W6_0`3-mj8i32SxO8n1YwDTLs?@Jfd=Tlt)}Y1xWvzyL{#G;p<60XY>eE8LY_4Qn^YQKX0%+!8tzq$M^J z)kvP9iVGkkDBrBl1F6h)3RI9Z#PG*Ht0Lzu`M2dGZPDCSNrl)h5W?GsJVszptrH~| zG<}!csrZrI0#Tml$$p-`G_>FLkLQqX=Lo)~kZXlhtrzb(^i%OFexA-F9Gs@AsM{F@ zNCYuQl5boNkedG)0n&qcg71+Ep7YWk_Uz=b*fHGo<>}j%mVLt@ z^v72UPrfY~oOPLZe*I#*soy_ZfOOpCNR5Z~B4oE{0@BxpDOi{@4O{7HIylHQ$Pg5;m1^bv(Q zMxUc#WtW(^^vGhB@3+=7=tVPVp3|Gv5FG4i1`7?cE+)BBJV|o#a)rLjJfEbKm^_Z% zc>)q~TeyTPQCy|_{p)lfwYMmk$fHF&58l03SBC$T$c|4omP*y^pekyQ1xP@8EZE8A z|1%v(i|f;i0Ixt$zmtP;Dlsy)9Z2x#6HlC=qw3LO^=uV!wW~+x1rtxdNV`}c29J&x z&ra4ur0$1U_6t2KPk=i6K9+w|CT_r^^}!b}`b%>GQmf#5OkmgE3*z%BKtiP5m1f(8 z9D@waksClBcOaI>>t!u)+#*M0$P8e`QosxyMH$a5hE^Xsq#M7jfpe3hxMk)xiNUACz6!=k;E5F(N#i<9SLL^^B+Qag{P3%-v=1mC}U`QiFkCud)-U-mre(?XX$dRAe)`MV#f zs`DF#As0XN|MBa@BmNHJQT@n`{`JULynI!!w0Yu0b%tHF_9mJFq;JTh^9y||NnYu~ zuv5_(&8dXMnpv6E$DNct(AAo@hY;Aj%TiuxokIeQZ8H>oPDv#!tW<$&r6Z+)7sEx| zCwl+L&!O%J74_2S(yfuYX2#@H^MEQHZ_+wgpOx!#)lV!c5h++)Cwh3pmDrAVD*NBX zPUNMkm7mNr6UB{{WYg{hL2XkWi~khw$>8!Nj`R^L_ zd2Q}teTvKfG!aM@mfoJ8A6{NuJpBC0{nsaJQYNd3K&lIKz)WjWsk(YM<9__|>{myN z!!kYadZ6mMzx+VX&ZE~Q68B?&hsz{me-~Rq-ioF$jINm;FqD3c^rsN*-@7~l`v-2Pi z3Bue6QQMUJXi=)G_-@{)8{p0*NMt8fYqS|uVU_Qxr%eOWpT{9XuH>IqfRsC-{+4jx zh%~(sDEV19ZT~bLE#?AgyXnO0a)YGjFWZ3xkLGCUDl1)oFfKpnU7gjyq36+cd}#Cf zKu*oUvmPk-i`#SbGNO~i&JJG>K#IE_k{o*3AD#9`y$kh(kR9EVX7Xry0aEDNCj%*) zyj=Z~)6+3RLGx(b84;BAQf7{WK|>(-REApONCfmO$fl+W#SEHykW>9y?nISnnoXWv zt>KU{u7wrq1iDN{ypk8vX_a24dK<>zRn!SV!w`Ao$RKRv3h%Xd2jf{ucIJqK-A@M_(%=z7O%vpmE;a5Pg?}vjXZkL!XqZkP6X1oRS;ff zge1}tV|SaDpN4TJSHlcf8RSSh#GN{90+B%m?*)d?_QnacYEo1vRT1kl<${vo%Z+tR z*^w&0j(8dd$@3wFB~!0fYKaF_QVEB=lkoYFiv__tH$7&!hPq&kxa*B#{JE|u9;wu# zLdZ0NwUS;lc`%75(XB`%PwlYA#_IKRR~^xLNFYHwAql-MdaEbZMx3r_V(VAA1^b?Y=bPR z7g=@p;8EQzU~A8vFYn+U(ypQB1|L} zfn9F9*J*88JG$NI(7VY%s;gW%*~Ftd3j(tIAZ3+!s}GNHC%U^|Ga zH@Yt%5y5)cSLU1*8$X^$VuX2na0*CG<54BBtE3n`c-Ey)-FiiHp*Ov2^m%`>Xynez z1^qw1$~wc?y2`XJir4b&WYfjs?MDUQ$1TlQk6R03nJ{}#2yJQi{gwVlyDT9k>`*pm zn1WPBTkECN%%)2>N`VwLKg|L=#lCXTX5bD*{w2R8u@$Kb8tjGtL4lE_GEYy(40TX> zig~aiYCPR!kr9BgP>TPB!P1d6_C>}(_WetRQP+Q)!2l}AT23Xr0s>IYCfue5Z-QUXZ#lY{c>o<&zf zTrTwP>m(w);077LPXkS*BkId1ieP^6cz{&%#0mD)Q0AMj<>YiA4Z82H>X+k`h7$dO z1d7h2=#_b=q#G)VwQ+Do59jco6*?%|q3vr=!P1GVQLcm07%~8aLW<)q)%FZWWSw z+{e|Kr@U?yaLgvG6>MYz3m6oCG4X1ME>FoeDyp|sh#lIKVT71K<8hF$&jCL}RzQGs3p68g+R)VOC&d7&QV+1R0 zPT>W1p5NVa0054ZdMb2bx|~)=ND3IjQGln9h<$0ix+Kb(t8_r1_?M-c1j*a`48|7u zw^`And&Lfpm7R0Vsx|eppq^JSlBMss8dnYNbX_5|33(BaV{&R3e6oB98W?N4L%}8@ zW$ERBxOfgU&E!oobnRavC0HldLs)Bfpf^Rz5 z1(428gA?CR#>9;)tYz5+j-&DYA)|K`Y!Y{~ zC7}&{Vm(q^*RUJCNqJHsk-`1(ZkLvijWWHXfY#8C)2UDOzIjQNJkk0Vj;%lfG7V{& zxdlk?IM?9G%n*FTo=;qW1fs?_`saNC=|<835=!zn>OZN!kcc&i$>s^Z>0lQ?x_BG; zqyF+I5o~-i*zS1zo2V^%u{bE(xVIiu5UWAYeGzp=tC|Q;`?nD1PA?>VKMf>3pC|Z+ zM@Njk#i2@zE zYc4z%z%O#ei!4lN@Ig3@M9O09H%bk)xI zq+BwUL~A;t@@EJzh?kUJMrepKob^Y6ROeOD(#?Ss=c*2N2Z7E3NH=f#Q{mPuUIsQk zzkL1TpvWDr7mtR0oLq&=_W?1d{XfTN4{IV^_n#Wht!_TtR4(E<8u5mS6Jv~SX)t$_dPdoKRrYwyvVLDy> zyjRQy?f77vkdD#GGh%quP04ZD>{eG)Z4q>Rf>Q*eEQq_^?7YLJBRePZR)wUrDauhq zwVGQbwMpf*`L+X7C0hb1oh#ep*%|B5t+*<|=+nb%lJ#fRd0xG3RDcd6=y<-P$~V_L zo8rIZ4MXr9oa@kY}V_>RH{a#RKxic zwUo-6a2{W?lrc@JZ^^&@+**KygHoSs=26S#LCBdrx^3;~y7z)u9=?Adt$)#0^PD%f!&duw}*km`*Er#(OD^Q^#TO zo#@~q^MX_vc+BG?zG3Z=w^`RxnV+UtS4yRBpz8<4aHKT!5b2g1~ zFu5;uP!h26P^E6W9czV-Ko)s3eZ%ox@$Q-TN}qUYnCs;&qXm6ewNZ^Qr1=)Czn%gl z@o0|VdrZdNz@yvUJZctv-^~MI&L+Dbw*CC?1L>%lM@LhF zc*kP`J3aYeH5mCfI(2rrOQ58_f`!a7+rXN`8H~m*69u8 zzmt)?P@CYpZWBAgqX*S{zSj(-O3si|@{2r=A<48=%@Y?>wVA3}xRLTAVa2In(a98h zijNd61&lNxW;}o=ILfOi77DsCKQH_)Dm9sZM88-Tg}{!>gJcmM>f8pvDv`5TdMy~p z2nX9nUj&KjtTydgt8TV4F9N5i5RzUG*?hP^_+jqcAB;#a5F!-PHnD%)jtFk*g@b6w z%Q-o%jJsudS^gK@sNf=-W2tP^@_gN>@iG-&lSs?0Ao!ZK(uOi5fJ8y;ehQEXS+xqj z-_=I`w&C#v{PtUb1mo%(wD73fXaH$1b+L2|LgYZ!WH*pTd34mG<8Bjt*CS$AuU3^S z@@%D#zWM%F-+%usH;{eu=-`VVzx$7bo?g%XI`(@W61>Bcn@|}ZfA{>L_=y)dSMk}G z4e*>K_};;z>Vxe-T728b>=02QR8mM@w%Y;_gwVFCc{(&w@c{&E6!}QXO;k_7bHLKk zBf*o-H>K?9NNOEpxr1A=e0T360%x+TmQ(7qMZh2{j+Q_mqo3yvDMdlsgIJzb`86nt zroEKhxY5CUID%)jM6AnWaRX+RE-?&mG`6m!0hBExJ;?0r5Db+2yRm`etlXs zJ?LmBp(^J%fk=cc=c!HE=fhJ?--#$V|nLKJ2eAiVy zK(eI(>24eeEj-+L^<7h!*#;izJ?{lbE4db7{2q^1B4r@WD2P=MuR-vQz^=uW;trz@ z%4Y+E4iw3Ibfuc6vuy|ESxEKN0sU12kB;XQ#KNN)Kq4%n;z@iSCkiHo0uy4p9pgd? z&E;VP1vAOocX?uYuA?PKA~jl1Ji1~#LgXPoL7qR%6W|d+>)2HAl6kdDF9wj%S+boR zJ3%E{o0=x+<+nmas$(O-0{| zoc-r7#}{~gc`R=3->yzS;2gns4U9;1L0Imzp^yjid&Zc>#l|m-2L5&l!%Wqp$ z-!hbQcof#Ru}Zd~00|!5%_u-x%qu_|=h1CLK`am7Cy*eF0#k=CSNDpEe*E`U5D(e~ z-|bG*n82>hm5MvG%rH9sv5v|85(ulummmDAgC9>Gp8n7hnmh(kEj(&bIfQtmRc7~1 zZ_?1yDQD18Z<(7R_%7iYE(p^V)Xq#o-GC4VhxTzPj7Co(*A#MvfquL*QIL+OAWX;G!Vty&P%VdSy4xD{z@9C4 zcB*4|YN@Z3gKv?W!ghP(|TJ1REsW)vXFnP?GwH(U;nW(mIE&6qd=IU9IX z-I0fXA|NG=8h_a-Xfl*zJZN{C#sqc`#$Bm=c9)AXCNp?=v)vQ#@}p&{qbp!j^#>ap zG*Htu14!R?IZ@KpEz*1$qBqb{jqJnBk-LyZX^i)sCxf~2~Hz14Y-xdwdEIAyi zbE;%zm0-fHnxRUNF9hlC5}$2#>Qnj(i(JdJ&0V9AT)pPFwSY#nu@Yf)jt}M~s;7;| zVbT1l3j?{NKun#$+0lWbz!FzwVKiT-Q6aWHzf(Gx^ZhA`%Sdr!k}NQ+rQM5+ORAzm zgw8aQG9!bAQS=c4y}sSqRncYQ$+p2Sl?ZaG{-r_F0FasrklF)g^9A4ID)b2gyL}fR z?cUQ}!^3&cpNL4W7wtfTD0>!sABW((fk%toJUSl5%QpXENkmkc?|KdA^jmJ1ht~XK5*%6%mKo-Pufn z27@C|FsIZN0~@kWR;vWq{q`_d7j(?xM}ItUOV0r=1sH;av>>7h?gI+^YK z?(y9@S=Ook$Aq3`E``b({Q5;Q@YN|s(@zW7i&GWH)qVJ$af;&*U1foqQzn*GocVjG zj@8*U5}Ld`RNnY!?rth|_?HPl`eU2mdyGf7ZDS}JFTeeig6|nTdN)b%{V9R;*)JQ8 z-n?oD(l}9$XYeQlc0{s97>7Zh+%0bP^!)Jr>570^Llli&weYlcNh2EiCLobS`etX9 zt9kCUBTRHvr3g*wFp|ooR1C9Zah;~F5pF9y;#Sle2_%&Ui((tOWBRd7B`(Ffq=LL2 z&X|SYi=~{v^bL)o~H!jRAt9O%(6gysN?EH7Z4-(<@i^Cikw#gy*fJyy@Q4g z_os@sa?{e89CxR{hy2p8-2bEQJe1@}Zmpef@jniL;&69Jc_}uAl=t52Rkh@Q=dOJc zNjn)?A8_k*42IQmbB%g4y0fcFQht4a10XCD2Ym)3OSTB>OEE3dgm#39c1FihOE@7> z%XD^~0cIxt?Hj4Vu`geK#oirACj(}cGJV50`sg%|SUz8Xgcq;LBend=HGo9poZl55 z{blrl#EINk>=?dTtxS+KprdNPTb$BYI*N9ZN3VO3(^0%{_JBkZ>F6mo!`iT?R$`{S z!%HpwFp}?Vgo&)C`D3nO*jw8_kPb7Q=rtGR@C%ES@=u-3BBu3WqA<@3jC|r9<}^@t zYumcR;`;`Bv&4vtI{G>A&5>H6*U|1CKIFW&-(NjFXwyLBsq%97H zul=#2FYh2n`}zm7Ib4Violj0Wwf%z_!Vx&0she^{-&b`f*= ztDO`Q6`LP64d307;j&n#&7+Sn3jC7`kXl#Ivc4j5qXJW(0{*M~{bd#My!W%4AC3-? z8a#U46-}B)um8zBV%O1I~<00DWn=Kab z^un+<6x$jBNeu7PUA`idkzmD;!}(z(Rl+Se=1NmH_LNNwi?Jr>$NEgfdK=Ym?C)vl z)JEL)%cPK61Ch=b#5Tqv>5umdkebtqKD&MalJw$T>i(x?6F|Dw-_L*X?0e6i{owbV zUPFBz^{QlLzzoXhFJhILS6P+2O7!)jX!;5?Pt{_BN34FMWBUe3OCp7>QhVFh(Zi7u zb);xXJWxj(&Ile4+lyv%`HXuydvj!LKRp=OYMaf(bd)TRt#7V}ETW41RGleHM!t?HFZfj%0 z*rTnuRQj}H@?7!%ZJ`+v#!g!KED32zo_CIQkme|eBYPWsp(b~$uRu~ku#nEjl zW#4TUwi&Z0`atSWKzF9ot=x_KbP8z-?&AL3&-IjD1@AK;d=`RoUp+BR` zcOOW1r+759O98`ovjC|fMp$essRoZ;Uvi-<$n|*yklH@G6^offvds@#Ajg_Wsn6Fq zlAV?YHmgWC<()nzi+rQSOQeYmYkUykst0mOtK>$VM!;{se7Xa1D(~=!JTRmB!gR); zo}Ce-oMP^L9C#kQuF3h$CWBYcwdJ<(rGGr8x}h;DAD1J`V^_6ds8fuTmU%Ikg=sM> z4BN7rSv_C3Eq`1j`Ybb2wBWX|4gA_ht&ztTg*?bJARKE(iZN5lFxaKs)sFqxtXG9Z zlQmWR*&or`aDg`)EL`HzIBW_f8b~-h0ZCoI2G>Q+_V$`U`sE+?`Kdo_0qL3Gk&r!& z3ZmDS4R%n5k1NP%gP)c?SLG1~yT&4-s~5$)c^Z&bZETKth`Jq!G#QklYf+WTmQpg# zfyt(Zean}F%eQS6B5@uX{y$;1O1sjt)MmBQl2TUZ&gR+dVdg0!S7xH~UB`@nY(DuG zX6if_TEjEtjEH5-@7SYIVWqdv@jlIP-XmEDNMx9i*)RiVBJvpFEOX2|1+%t#aC@od z@mSSm^+u~XAp-P_DDhrjb+*H|YQD=Y301$O*m4vuHn$j-e!=W!JEL^ANVVI33W+9b zS@pDdbbpRV_tz~zV)c9hQsb@nEyFjD^jbh-q-r%Zw1FgnBD*HA*f7|^1od(L;E_1| z#o|dGz3yE9o#v6+G&8nF76DQ}W@p%cw@4}f<-m=-Hq#>oxRyaKL!#)b5G68W*F7Wg%#FQQlLhxCYK=($XKzBRGOV!u);0^v4|z?7KZb3AAO62} zd4Z>6b7s&+ZLlENRvd_oX5aQnj5Pgq?np%*9|Y3vy8`KQL2P^GchB(sk>t_&0wlic z%*2TXj~a$=c=S*gi+SYK-=C_*?&p0Van9-%*DZsc&gy6$oiu#c&vy;qEcbW>qkD#L zO{wQiAYuG|`PKBp*38VXJe#e9&%?k(;H7fFQ=ED!lf3ylU>TYhw8FCqf|&10J0Zh( zb42!?S@w2su^Ne5#LB!Q^Vk$#Ad4&)Nf~}|?wLZQTICw~@bpz+6g;auryQX$KjTp) zX$W~1Mrd>CqOe@G1TQ#+<5J|2s-+!bv5xuL^KN+5vY;}J$`DnH{d7uVHdG8~g{FiZ zI<)8Hh{7D>$z)_OS3dD9lwUlLLHApL)a)JfhJ&_S5c^TnHb<(cVXzzjffFZqYs+9K zS10!2{`r32{Qc|^Tf_5wP`+xn%y|-zq(164(Cf<{9WZ0D&!c_;QkPQibzOGE?7lQ9 zL2vI!&4eN>SH194KPdZCgHW4~Fq<;0qG$!2sKl8e7ATL#l>zlwNrA=;cAC+8y`#w2Y6i zQO^IQ+qW`x3i3Krd}~`gz(zq=rLb+g$#0o1aGOJ+5tjGNE{8*p*gTEN0i2nwYJ-(u zqo?3z0eNC%-wK&VOKC|Wc5NU%R)F;GJR()pC_w5@oY3s}C-F#Vu9m?L%{YNHuG#MU z`wzYbF>FBq>88&kohwpjJgd4vgErW)d@aK_*CYn*H4NV;c!ZLB)&vqUyO~AuTnQQT zL{^Rj{AkHiY{>xUOg{*tmWGJT`iAvomd?@PgnvjXt(1gvWo-Z_a)lAaLRo!y4yPcDNE*cFH>Zl5xs~I453@Y-PKEX&X-XOu zQfuVpHk7r2mytPAn-MKzd5>h%hH>%NFy+w5F@k^ktE-W$6^m( zKF=8QnRWycfBbeoTM(<$GXJ^)q;dTMB+1i{x&}K|-(C$!Kb*4`zW)0$zWx38#B@G2 zGWPjXc_bJ6hVRSz=wt!XUC#wBqmb~l!6V+-qtp$P<@*xp#R`q(26pn-ybllB!BJ-8 zx#MXpUPMhc6j$8pSm2K8TJ0Jr+ks&uJ?g-lQqEg6P7oerDgsR=IGnkpBZcyTyzCMN z6DhVyV!*Z#X(2nzL1*GQgSsQqkcZ%og0nXaM%Gcjbcpk8u){kp#ylu%gB#x!m=-Pn z|``zQ+!B6r?objx}Bi8yn z>ZgTXo#PRP?>>+El)A|SWBU?moeQFWi^Z0v#rZ-;9bX-`g+PySyw(KMI?XrprMfKU z|Czy?`GQSVV{3jdTi`$6i>iEOfgq?EMK1bqHrs-iXhOzFAQZ%KzNkYXz-aBYfXRJ} zveS?^D!P`_aEVQAUX|BEEeGwQYKbcal8hiK^d1A!5T13B_Jip@EEKQYZ=!ahKDTJ^$GUxR># z+EQKEWL7&qmpD=Y6XW;`@w=Oomp~GApH*OlsB*O$vkJ2AMGM);vL!q9bX8HUTH>Ju z-jZswGmC+rBVV2*Z*Y0YTH>d%TzlDA&cmboYaTmG%@=pOyX}XW86FhwIn>1SeL2tO zvk4`)7%+^$uzrwF%^>ZEC04qg;k#2-)Zh_IA8DgNzW_;_;jUkR)OPo!2IFt9X0rQE zO&iSt-`{E3?Bt31q^cwhgnj{1cRVP2s35oRL#TP#vHzUGf z3p%NUHTBqu&4p4i&S|1|vovvufyhY%BV|t3CZ9s1%mg%m5nO?-co_+>66O>7DmV!P zvtLRD;bP{VZ41#B4p~5g-E9ADpC-=2I%Esn_6&(!2yDliU}BCIwKEVM;&9H(iRV1M zNt-ap69W+b0Z9aVH=;e;PT`z7#o4- zyX|V2F}rrdAicmYS;>vVeCXYCSpy`rq>+WrLcn(j$b&E67gccE!&+K45}5q#!D9> zUIwxY&{`m4@O@_|T~r|nMu8tn%4$ndRFQ?L&|XFd$tKWdLHLmg@DcC=N^pIJmw4gU z|L%rBPZTms*q2Vdv`P=bv3R*z82-OKkrqeWz9DoYJe7ZyL0(_!cWKD3hBxqui0ICQ5?VLw7Gd6;YUC&!-r$P_xr{rbYkw zf(mh|QhgH~^W3IMgB`8>lIJ`I&$NE|o8`#*w5pY?gv^bYXO^E_&pvzw6YGY%egAh%ZE zT+P$)>j(b)`u`H@TaPA@zSZWD9z#*rSUp>S)Z!80#)jehuFs=pgN8<#Y>C+Qvq-o9 z_+RDnuFZL}b;@jjA4C<1D$QeZRta8ste9M#`#qCW*>6DJLe5$;!(B}>WDC+?`=xGp z$4d@EOg7!JWgRg_4{FQ=k*-c!^}=(u=$KSu)CWF1Bx&MVCL25s2Ex+i$CIBbBm74QEuni(U&Bk%~v_G0GSXR&&=_vLzS}b>BG< z&SOclGgd^3G>#nuW>)T*I#uKPiSjL7U}k~($W^}FlIrqm=pW1*c{oxIXL8SC$RS_r z3)4xNZMzqJ%MBck!B(^u&KC5$Z*ELC8I!URSonqI^XURfd6LQONTBpx;77X)!BRvOm zTppw|I7hOm3NjXJ{<(2FQG;)=C!ffCRqfbj)W!HZ>*qTyYXOs9ILom!20Rhqs)C3r zTxGC>Y`no38W^<26G}1ysS@M3lV#82x*F#q4H*-VH@JOOoaFM5vg?Bp_yJAX_Jb?+A;w%pZ2t6solJ3%*^xjwV?9tV zfqTA5v*TTObl>0+?`&u;xZ9&KxBB7vh#jW7Lu-cLHoFHsZyCNdkGf^9dIHj2gGc8f z1txk*u=_^9t-q)rvC~1jfBf(5>cNIDEK8DddPMB0ldZ^tFv}&M#*s4j1zg8?UTjDe zTf`$J!r_?-wGdd&P7|{!=9##8?UwBsbtXlh7?#A$jbu9DdegKZQ}Qelg`&zco?Dr< zTOII*K2ZicW_A~Yo#m%VbG+j~K6c8N8i=ZO&%W`qCG*UL7siTEjboxN@>nE(a-{zl zD%(D{Ped_wa-F^KUc8Gy!j;_QQBQXC-pSG3*t8ciW3Bb^)>I{KzQssf8t~{kjqK(i zq#yTz#4q@pM)n?{oaYgn%H?xBx;x3EyB>`Jx4x(|mWCCy`y!K+oNmXNZFR`&Dn(7r zd3@UtRfmLj7-GP4Drga@v|{z8Ve&CtfgvuHRr4B=stp@bw`|3W0*ulI7H8L@5JS8z za!&@~PSic97aqv-r^c$8d%-b}`w`6-v6-46ZtMR=)|;{wgfx761Xh9hAoqI?AQ9^F z*pw(-9uJRhggiDjk*V`Ec%GjvhCC0R8@35EBZ2hp66voE!*>%MPvFtX2IHDX?Sk0W zp4@!no@f($Wr4(R@oZwaX#h#gM$p~?$k8#_y&gToH}7l|#GW#IclBMLM;hEUtEAfo zwS#tF5NRBZXZiGxs#n}%y-bIUMU?2tNSmf^*^!H1iRU3z=5dnbnF5CK5@gM)rHE|g zvXKnOfs`=OYtA$9n4pd=&a(fVG{1JR1@FLMwVf!Dz&Iu zs+%TYm>@5X1X~^v_pEbem!AN59#$KKC!EobC^94YxcY((qM8k}Lw1g^q}j9;-u44_ zS|Yt^^Qcj0s^2NNb14pXZ61AgvH+%oi0FXJ*blbJA!t%{E#$&5wVBbvW1I82hldw*v-oL0$(E~Hup#s zJYm^OWh%C#(>4}n5u+Z!w0CnVch<62@A(8CvHbQb0*SJt z`S+{8&^rh|wS#sSkJ-Df!;IyI!A`B}ISxJa@bR+R)pw27MEwUv!0tSe?12|)9%(S; z24FB=PR#7)fS?V4+l<0lpoKzB=Z<>JK}I%R%IDbRnZY=92CEv!y3Vv3=0aL=T=VgW z8j(_>3bD9$O1dC;Up{)-ROLnHbMxw8^WA+08bhYPvRxj!Sd2r}QXSxAYNt9nkWQ-y z=LNrGLfAP>D>Z;@snf`O4pcbZ2r%0Y(t1Wd1@uW@Z{>jM&gpR&2fI+W?WU$M# zmVDjY7C^!kFV)QM&5jxQ!6B=M8Rc_yM9Kx#D4bUZ8v=umD+e2wR||5HZibMIWK|PN zEyU=+)Cv&Ir|lfNA8i6}X|%hZ!FKS%I8n&z+6#o_)JIGApOHv!;8829Gx`Nc_wwxS zH!N!NXuzX}iM+9A-(UxiKDuhaj=%r+z4zAN_n-a#_rIPq`hNEK2XQ7pwe9z4QRdZY z9`X79I~!e)^rk0y^h!L}Q$+e=7{&XkP!kC0YL+}+j;Yhf(~0h%Wd>;--wh=!YD*n* z^la)|X^bXHN*R|*$Ok>eo;A#6>Ei*}@;wurT1PpgEG7(|UZ^VEJmnA}iM1}oZ84o#D1P70czRO=~=M-!w5Ym2Mr%ibqzwIn@-EyiS{? z7iV}hB@#rEhi{iMm&Y?G)zRjWmQwwRfGy3(o;`zIi$@5gD-ST|?}N4-;$}44exn6W z@QAe1iCUvGJerFq4BztO2gCRCaprHDk#4azxXg>#Mr*lP=c26 z>IkGCjy{pdptM}zui8A)kh?mM?)p4>)#Xut;zZ@qv*+)D`Ewd6TN^ybZ+y=!IRC|G zyj*Z_bGG45u0Wy|$WtEOBh|%Nepzz_L-n;AVY)?}+ckXh%-@@Jcs8PFrx)pcdXh)??FqGg)84&W zYxC%{%A@l=8Z=n)-yfdz!4FgvPP@R{sz>wa^@)PmhT*%RWtuMX-oyz4n>^6-JVGSx zQFqQcridDcC&nw$E}GYbe6UexgI(FQoJiWiX0h}Sz~C@~AXvCcS&%f?nFWjU3rc8@ z6Pc_>HrTnm=iJPD1TF-}Kr!tAkKA)upiZHXW12{%P^g=Hw8Kkr&$)ufMQ9HTh?alE z&Onw0fy^xpx5WbC$IHvNUp*p*N8bV%8x!ykk*A6zi8Q=&z!WP53G- z7pJl*K4Q_4hnKXY%Ni%FY{%u%BozkgFc;;Y6OuKw=Y%?srBDtey=uOC&*x+SwQ~NSHU}bzq!JwX+N&lA%3iXV2f2jrZi}4%UWDUJv;o zgQU_1XQO84D08JqRtZV`64H2b93x>FT*?Cqqhwno#`L+vxmQ*(te>!Bu>$Ou7%YTJZhk)F>&IqA6x5GHF5WS zMk9`{#3Mw~L-hT{tpLV29wY!GjMK@eF+Jc$na%J^aLy$MTW})Jf#I%#gr>q=nL@%~ z7vtERr(7QL&yN*680-k`=`h#jdp?A@a_ubIFdkE(aEscE7~ezVxy$kNU<2h@@2us;3*=YF4=pZF}qY zUi}yG2*&EJe8?l7r$+(lMChm?0#E&1@w z^;IY}!-R2Y&cipF>(Cze9I9%GbQ(fZHzd-22{4)+YzEV_N92)S<7_$+R`_6f92TMq z#9G?a*;ENhLrC&GLWeUC#(Rq(ddHn`&tHAO(KX}|L{dm;Vq;U@+2+wVq0xMT2-K?~ zX+h~LU#WqVK^(~g)iNUI2p%a5^4zaGo6U%;=_s7o)M}a^DOAH;#s3$kHVt#aTy`i6 ziru2^yPd;K$uk+|GCpX_FV5w87LsNZ3mOqYQjVasmZOaH`$xcT9 zh?Mp-g-Kk5x{OlF@KDN?bM>$4Ln?`b$@!@;%_(lVQK% zAjRR))Fv5p2hI`XWskt>1bPL9!fs<`tCAk6G=!2Lxo@cb3LGel+l zdp>B8&Zcu%zSFfwFW=I$gu<>IuS+SR&bxvja|VOKJ@cntpx^H|ZXTGv{VS2-aG0#f z*Tc-286y!#LFp!t>ULM~wcEn#l_Ncj6IRsiPE0Ry^hoH$*LLCS1V(=K0wP6+ORwg? z5D_}X5XsYtU5Faz#2RVSb+WC|5t<`xl7M-Z{hw~C$W!G1uU}`=62G#aGnb!Pmd%j4 zWwBVyk*&rbp{)FO^8jXsY&oZ?MOc=z&oUhJlydY*+IwZ61blE;5dg^1u~0f~7D%;0 z^}0&&71y)Qu}(oE7vK~S=~(GS*KtnKwOPHGF_s5aO1E1n9iVh%9$%6w0yvo*y?8D9PMH7-(&k-$iJG+k=NW&4TUxnwl7%_bfu%n@U0@}w<%^_XG= z{HrwSCaBS~=Wvtkcd;{xl=Vi%HB%!%DZMNn&6@?*Il#3#brL8UZOKJtBv_+j$~3+e zjEePV$~eI1OtpW@l%e1Q%^Qo|hf0alFvGJ+6~4@8CsCuX$@|YAzc~+B4I{}<3te)$ zB|q1Kpiu($Oq0uSl$3Fd!t(HuDD&Xa1IG9ejl{!*VU$ObEOiEcE%031DNzQ_256VG zOtYc_NQ-Dd0o*Zn&fXp5wh2UW7=O3uffAn6oh4*dBEdEYurb64xN7aL9Q7zwQYAG5 zp=xE%(DeH)k&xJZ3r6!77fzGS(s9V4=hXLf8t~M8dKh%<>VM7YiZR+}t7mPvGgI_p z9>~Rfar!h@w8004E>Xiw6LZ2$lS*`|nd?RZ=X000hb{df|;k^lf`Jv~zi0054(k-kX<002jB zDt(g+000gIyQI-GJFyJ_fZ8{X+2tFt3jlz9;iVR~y5&!_EuwXtfw_*H~hTSJws%!%!4OJ)0oAX#h_as?(Tv2s4S{C-0Q1#5b4rtUK%P3_xq z{7ZXo%~_N1sbczk0rw_+NfW{ff2S%L)CYO$bFI!m9=*ODgLD0)&;1D!A8Pgb>2z@} zq@2MA$HFCOR9!1Ij?*Wj5kWAz_Fs8Qz!6YQQ^N z%;z&;QEopi*=$n8gi0ql){*_1`G=X&f1)pCBhPC!n?C22FAu!CJMYL>*GC0&rh1iD zqtUF{P{;S%#QeUI>NU7~CI#ZOsF!8KVt79Y4883Z=PRGU+c~@4?hQEZv=`V^!oeRN zv+QvIWZ6UUKb`33c}NV8+ODZ2)KR*bFMo!AsyKl|r{qYZU^pCv(vF;nX&#%7$(u^=9N@Gmr>ECeIGlrs!cO( z4uPVk!V*5|R>F|sP+hdG_mL1bTIKcCT>n_dGGf?Aq0o?X!AE*a^yN^N9EL+#C?6>r zy7Q5tw*@7+)KPl+cI=?&{a5wTnw%FsXp8E@*}|=oqVjD-6~ISDN7nXkSqN%N#H3Y^ z9Lkn~Od&Z4fhf9VCDlBzk?~PSryg}kUCpCQZ$1b?a#&R9BlVfg^5r|JexZ*n8`+it zABC6sNbdb=AxLSiFDh*67jtK<+&FRsQ2v{w+`a%HK8b;iXbb$X(*_{r!bM9J%f1rN zWfl-Q>8q8*2kjg7@}Lc;1{_jBzwPeVgGdB3nOOT}^HYiRd;ufikw`lP5cE!;iLLzN zRFvN7!utv*odciC zrBFgCE_;d5g*LKvDXu|D7cXns{M=`YxCsvczixQABkwU&_8Y~q zoed{{^3&$B8EjBvhDoYtfAvU;^4rrr+ujqjYi2JTGhRVxa3`tYm(4?>@f{CPV>dRP zUn10fu2k+Dc9^L|7fwb!_A3cAE6|3-S^510m}y0@PPPM&d?`T(Jw> zdQk#X+N(zQJnk6mON20d-(9E-5v$O;0c|UaUP-+p#;awkzMT4PtrYw0%jx6u-9TE< z7C7jzv&cCvB4HJ69Jvt=k_l(ViXF8m8bX(c&%8G2PrxpZ`u2PV?2{4LkbJw%@%VVK zWeOt4_yDfh=GM4iFj8{`@6ZELm<&)H3jqye9Z>v_FLV_7XSE&DwPgvGK(Y62Wd^#f zYz?K3h5xb41^=#IEz3^h=HwgH0x*4voD?xL zVhS<@b^VGjztc1zj|Crpcc4yD>h|qsDoij4SnHhe|3GWqH-T3?nkGW z8fy#;>zdi~1LmF^LUeQt{)yP5*Afu1go!Ad9Ht@G93{SL5{2!3cy5ZWZAi}zYg;Yf zTeNVlG1hi2|B()JP{kTqf7PWkUPGJ!hF+!D{KlY(-=PJ65O(N6?!8`i216n*S_R3VA* zQ)%YWpc|Wx5|8oaPZQ=n|M0o?IU?Pxm_#vj;NcbAWl@Ot?+ocipetpv*0#F0c0oqTK{>C{b~wRL zxh!2bms11RPT}&oUE;YmWtbF%ROY;dNSV_oSHURpc+b9~VRAR$OjPQZBk4~EW=k(r zd>_)KavX734z&lTAkIvpmMXurk&)%R+B(~#AAt?+VVh~pJbN-qlWzOVKh6L=Vsz3( zvWf8A@K^J;)`Zp!x!<+!hdP1@a5)ro+xd#^ebxa?nTH9VZ}+f>xYExq4On(|9ujwv z3OfkgMWOR0jq&O{w4XZM!<^)!cQkFgS>eiPoSU>+{Ih0aXouXc?*CMJt4pC0m#C00 zH9N%Cnb!QX`)lOownBLN3ioBp#@T*|Hpc4AEW?BS-&6U7o*{cB)yXT6WrG7;{@-yigV^msZ8j!e z$GKfjmh@tDgLE8gIW5~#>?ib@{!Fue&&MLntXYDdt44^~P*2{XBpC&@h6Nn1JJ}CB zBkoo*jLi>lWT4gJw=g%;ru^U0$oAO1ehhB5OhYPmV`QTMK988 zDTPghfIsPxh`)+ae0`4$=Q80epa(;Dbuo+RMFHJ8i!7e+OR?7@z&2L9Mz^6xIA?EB zWbUWQKI7N09=ac#okqq{C7!YKZ7!`f$@UGSaJDr4+$X_Du zo;BdCq)!2s!q||;gA*6lOYDLczxPT%-K0_X-Xk0L7kzKF9NH}ng3VW(AO1R~lSS42 zZCBr}+m2ea5Oh*VFQ^MkD-xFAG~W~GkS!QUWW zAJJ_bDCme>8{~JAqWBK9*XK%DKH|yK2h6yu{_Ca>Qc3K#Z>S7-Z)|uxt-e`N7Xuyc z%8&w!Cu0^r0+EiVxiS^kj0=zykZM$(1Ux(AOWV&w1$@G)S{p6Q5|+5++&hjYHVv^7 z&g+rSZ`Q9M=GVYW#r|{LleC7^xO&|UPOv@bN;X2amv7hS%=j|S zSv88jNpT2bmTk4^uci3Kes77R+6k4YEeOx_fQ>WSTf6AD>vyh|B zkx1DDSQewLDuv=DBmBVPE?!do5O;F~j3+A3_b=&Hm8&4R`y^mbE0OavN+B(4v*r3* zepvPsVb)(asZ<%36KQvDc?5(G6x~9!*n`iGi};9Xi11X>EH9S9)_~j|%(*$zu1@w* zhVit@E`+bB;%zM)cBu-hGPyUr)DA@yH7`p*yr7tXvr7G50)5HSSj|R@@s!u!&Dua( zTzwuE&XhsU&g>?!PR`lxijP2x$0FkHkqW+$r?ULm;oE)cYqm6HWbye+JVGQfF( zmguzxHRZAH_DqCkE+sl_48FG2L|+hKsqdThH-k0kJrK_@wA-#u(8DvjPmmY)qC1x0 zzXKO1+Jo7^Q@`)@tpl&d;t(^Ly_}tzPwKtq&MZ0bfuI*xO*Vk|^u=Kr6f}lGnx}A} zz~vz*d1wOgUXZ@J8`T@L(?(|c$e}d3#oMkvS}{akeGGX23)agT6o9+0``W6Ll>qbF zV8oS8+S^QoQMn88I$=PwmjO2z0q`9!{D~8Re}J`DR)UO^>Mg z32RrIF$YsVVlJc47C4C8@Caw6Hl3QA$gtRlypn$}ojT3Njh_s2@;v?7M` z{dJk!wu5Mq1OHxYecbw#Dwi~`bR2E6YySyqmaSm&H3z?Zy)1cgR>6gq{uXEb1pUE#mI%`jiK!2OwfIXR*+wuU4@S*c ze-NDK%6{hu+O#ahjF9w7$34n*2A(|2rzRviTr`57Hc5uBLXYyx^}JFTc@3?QuAMtz zknajGrQ94IC^ET_GzdXELT7Zws|=v0Y9{+l1TGGMVHXyw@<$3e$ZLIh+PI42Tc@O~bD00!Z9V(59E) zT-{ap$TFZ<`xF^qiXkU3Itc&#eD&~`1iZbJPxTzY(`wNIyuAlN9@Ry<%0Khu7tnTm zc|wak2@S0l!Z!A9RNuq2WFhEE_QSMrg`w=V!q?_qwPO4uE>zjCwyS?>+~YH!=l*dA zwJ6GpYq>1Me24N9l`(hcE+El3xc5Z*`8o%9ZKLORhdT?gP^1=CWM z9G9q-0^F#|17>7J6osiU6OMI)-^)*$-T~qbD^^!5C_Yy7X`i5%e$YKD`e$ZIycI-; zLxNw{KvXW>Fqb4DXJ%$-@LeK#X^%-V#$r)U?^xJx-vG$AR5>qC1?bJEp68K5FJ`#_ zaUo`)U8LU|(^0|c%Dx-jKNnVVMi@Ry?_doQyJ^LE+W_trrE+@-b7~p{HoQ-Kdf8Cz zHwczwindC#bg~%lf;_c-M^U%V5BiPi47Qwm^|Qnml<=38P-;(ulu4eC^Nu8o4~fHh zP%+z4_owCrHoI>tk9Xa$zx~PjvJj#heK662XCXBq zQ8@3o3ohULVy_fOrv*=X8_$0RpI`0%Ry3nDd4%Q@P;Dg*bg{YsWGUJUd+dP3y>5lc zr-sut!_E{o#>GvN*&F%`5XfbW5c~r<;f;SpJbpK!fAoF859Z6iJnUDjLs=5CmAK@5 zf-BN_qLDSN^Wm(QY4|`#u{*C7!vrIUO<)-0J#?y(Oq-Ds=|nG1-Nv#Kly=}xEYE7rlXAsp){LSvaT)}-RJXLvdfO2j7HOwA833A!`3H)X)5=fM0T$ZIB-2@`XV7rq|cBfrD5sew(|IWAnlH+{MC zeaLTdbIwdDc*vonK99I`6T`>MJKGKDSA@b;=-tKP+0aJU1{I>#E;C^wCXTLFFUV$v zdocJ3QZ=CHYZQ#MNAkJZ4Pha~T2Rj8^AH>xgB;Kg%hEq3_mEth6b}t;jQ+=ui~Kmn zu*YJ1qWrW-N!&X_RrtL5az-p_Givro0ef{>Ei@oecPsoA;!Q*JP?Z7Y``yO@0Bg!G zbf@nh6+`wy@3&N*Z%s$M|6b(x0?}y+rh+5reNAf>3x21&2x?3n94CUc5ra#$ANIch zH4PFOEd_X6$PHJCT7p;QdY1q^+>16B!dBb?i8C_)--(cR{>xa`AA>qg;SLSKhfI$` z2c-iK(np(#$A(&`VIp5Eh_HFuTOfmwDjkbmDCj8ii9%IC_!eKwH7t1YTFj2sr)8dM zQaD6=>c$^qC&x@Ok`SOrRbMgwoLH8>XA{qv393DMFO|76Cl= zKcscaY$-3}`q2Qx24PVE4>gFk`HG|+XLlHQt7uA==_!}CXdR2LbbMvxr7$wE(E*T1 zPEe0`Qpd&E9!n%5^eHK3`w^gPe=Zg4*Fa!_fSxXhuil#iC=($9H1Gs;p)Nhm*(hK` z(Ov-gECuK*vcx_0p{=W^UmV`66Gz(O?9_P(eo9Al92Uq*SZApJMBVR}YmlSVuq-`RTnv13J!a>4eU4kuB?8iHrxMZubwKS;zMbZoAfQ{MMcQ^Lb8NRN}2UEm9NP z*nTAB{$E#i@?+3id$1apv1YB`Q^?aRTv~2hFF*)ha zWM+a2wv;pS!>R}J(8}(rdIOCiQ)kj2VUc(f{+7f}g*};^q^LX*c>9tz5c)DNTpzw3 z8tX5^{3QENv_%c-s(w6qk2pJ8R6e)a#ss}KBl|DB5p^`MLYXF?R6jb{XHxHFoU8s) z0mSF`77gyfvrF3#U)-i#tj)I)YzcVf-aSQAs|ZRp1|oG6sFbLd9=r(A>*#M?h~b$r zW!V#Yu^(JM&yCry6h}faYF<%!2+Bo|GML_Qt?&agG1lKqm!1zy6%5cA&RGyHOpY5t zWd3ko+!sd$eH$l+AB`VO&AEnuZ5e+f5%B3zK~d0O`)Tv-=ZWbKKc0LF=Vk-b;lyA@ zOjt3vWmbooX?sII4q5-xOhG1-5Wo6l;@%Nnn4V_!iDnE)-GXo#3bTh z%-#=9@a~n-){nqMR)UZIqJn65{)+y7+>!_8B+&#V?T(6rY zr^D#bzO+)ce76Ywn^o!AWL5Zgp{Fss(bd5*#`LtS?G36JEAii-0EzvhUz3cIM6Qjs z745mqH!e`MPUL&VFmt@qOyB4hTxE}`{&h6sGY>FISn-a4Uewhn1eb!tuLX)bQ7*#zJVC-V$VK%%gO5}hfU*V&MVgOB2j0_hUgU1fO25uY z949(GZ3u4#UFu)!!yv53{*vy8KQ#I(@$}BQGE>#3@wFIQO6*R)V%WkhQJmgjDeM>t zgu)H^T9(Rrf3(X|!6@m5Y5IN1xwfS>*=Lf*n+eOJ$7@dsBFpIM1OQu}DYuz54M!20 zS@5kfaCa^6QLMin2Lj;R8|(iJkXU!8Jtb@xn9azu!6g&&+4Ye4$s>kU@oU;|#)Oc( zB8TPrBx$F=+Y9%o9YM z=7md%suF)HwVofFw}?yAJ=$}=bhkB?DrFEgL)#S#T_MSl`j zXg^PP42mJ8+a7;Cu=!ik913=$CapA;N9rS*&*u1fg1%5H1UWG2Cvpqi9O3RXJfX3A z7?sN`1PjnC?yh45Xt(?Xtv~&ad1bhh1G+=QerHl~ro91_;5Br@lHmk|GGfnDUqyC$ zyWGD56&h{Rul&zUvaIVmnmySeGloPUnSt@IwJ>zOS2&8>*#yO{C=!r zzuWF`c3skC8nAip`)1rc%(*gDxn=$0$<{FsY6CNIJv=>05CV`B0|Xz{Y3q>a+{Xbt zNqsc3nfVWaS}}C3_A$vX7T)$tS^&F9LX65)h$9<}^qSqUgBZ z9oDvsVaYJd?uOZ6!Cf4?PgbP1{g(lO(et$2?oFj)h-zT`SD!sf_!HH>_ z>GVxj|2;v2%tf41k@BYotx3bTul#{zN`u4;1K}C*p~KlhPiF(+79D&Xo~z}+wHNf% zhgGQ{(EX84i=@FmA%S~Zu5~Tke*KU5WD;N*-Up~FoX+E?Z)P9o-u9-W{79VUn zYLFCzO#ic$@ofcP<)FvQZsyf}_NVDP033!*<^6VB2k zpcwpE{kKw>DrlgNmKm!?%#r_QtBg$Si&*&GN5ed-raq-{3$^f;5hsI9ARuUwOOPg| zo$~dqzMR3p!^j3pEDiG2-PtH7msjsXhstK9^EdM!ym9}OxUKs8e1FNSDX@|8 zuQVz;Fec4y`VomFykiqNX=6PzjT#ZLg@{cse)IUeA_V?wVGCehdHf&Fsp?~*oWDhy zC3GLavqKvWlLA6{aV?b?eUv^;EeA2@)=f_ETBO~~xmGITiGlS4U#Y>O;z+(^PE*wn zUZ{^iyzImQ0BTC1`Y}A(m<)LuS$_uPP0-(Bkfe8Nu$!WM*v&V#AO8i|RqM{)IJEk3 zcjKi><;lXJ`K5if_8R%@+GD^@&xBS-tsdb0hG0t;4Np#qbOdp_9b(`^VkKlC8cBIP z;@o1i>T#UAsoN_;Gx>`)766Yz?+VaD?)I=&5WHCkD~$wIo$k-2sg+y3?`*z zW@6J(q!zHF5m3ZRFa{dVahLCY3H9b_>G2cKL&V$#fw)DAe3nv$k$7Hi5y>3n8P&g~ zxo9{|K3o^)(RE=3FTpSweL%cCGiLv;*l%P!A7^J!7e!{pLg`${?^$JBDtY#wO0c(M zB6M05TYt`(iXASEV7^v~6WX%d@-(=3>MiA*If6Ey=(gebW z*9MFrIHLUPx!M}S>KgnoF$Knqme*ClyJ!4d7f_xtfzM7!E3M~0hICIi4$mL%oPW_Z zoqq9e!KGD=A)oWvzaMonzzKaJ-li@Tt$V~yMmlpajs}+yevp^oeuhshW!K3 z5Z}LLyO4w0oSK@3b6ci9ztE<>ntwI$Jt1@54UB?T^=%yGNHEjP_xaG~iEYOHqKdLKpXie>_29p+95XNLp>Fy2Gg zTnQuBN(Ul76=cPyJz=qZhk^#@x}xd={)$X6NV)l%@`TZX0{aa~1EM%F)FJ?H0%DI9 zM|TRl6BH=h=KkTVAYxHERq|4y6q`Fgp3=>tV-OBpxNi{mHy7y$bwJt4sp{>psfsPo zj<{)c^a57@O%*0_VQpth@N()kd(3ks?1&q{m$SB*cC9H_(ncU&YmW)?Q6B!f;mBSe z|K=Y_>AllAr(Nd!aC+;R7;E>qD|ZN2GHd&mQ=}RWn=7yVt??t!u;%X~Lv^|zJ?wYB z)X3(U)y1n$)RA3&49WT_OR2rf7Ah`rP)wZ>@V$~S?&cH2;pBK*tIG9I^ZiM){ax=i z2B>nXgX=5$YxADCg;ZUf$fIK%jg~WM7KgH$LEB7gozAq}tC-WG$FI+`Bkz}AaFfC8 z)6_1}CSLB8>b}Jy^2l3dahwhNL@{HV;e?kRUqZvhVn5gSZ@HlreR9ywTPv@~^gcri z=R=zYr3768FPVhIu$JF9*DcJZ4dAdHY5f+Bn4m)`3+%h0JP{H!czG1kx1*4bFj?3X z65%3!?>R7WF*QU8tH`8~XZXeQVP8+NF7Dt0XjsVx);pIP{}y;9VX@ta`g>$dMKQ6XDDGR)FUJ!!nX;xnzyQW{x(y=8|Mec`Q`*hNHNyp=|J|b@fWxyzx4p$ ztCA&~PrQ=*AzE2)=?L1i_>XIIg-5S=Z%WnOp&g066`i2W>@hcw$rvbl89(Qk?Hrm9 zEa(udPDwdP`D<-lDa+i#ZH$2?l&hj9Q3-R7bKJN4fbfuWAGkXjYCwOWoCJu8UAX$? zt=v8IGse-%0H!_Io?uuXZjaIO=(jn^R0L_`|E z!yAQIl|HGyu$zDtn18E_TNISzxBm~(_oWuMP3{$C$qWo`_6|$_xj{z|jV>=AukJH% zuk33Zcda)em+FxdigQ(QbRef1nB{?Iq>9Ilg54*|lU8*%dH&uh>g&M7c*%ZGqR}#P zL2c7P?HSp<)cxW2E;Ha47OjW3Kd!~(t$NIP!i|7y%G#|IiwB2upW_U)HO*kp5tWCc z$T@B$%EeE<`3QmF82(${tJD6K$2xlnj3W#OhB=6@bD`RP2=5OmkF2E)Qc;|IA>krj z(MEL8Y{U**TuyD;&ZUgKZ(Y=Hc^C>XG!nh3!vOUfP>P7%07=}1^IW>m3KK~p_83-A zZ{^h)(5AW9KY^qm_RveE8*I?-Kd;1v-Sm(OLhc*cHxUDuVsIAuP*L0~@4BIqN5<}z z%Ro%`ZKq#Yi*2Q+4Jg-7LhA;-r}Jj1Rb@NN$v0AOMr^te&wO@Re6z6AzVz>Ayp6E zQNN5c8|#7L9x)023HrVeaj1iH9>-&cqk7J^Oq-}6bemcmZ^cHhWS;ANKIVU=&tcP?Djywfd-tuaE;8bZU#>|8UYYsW|X}VZc zTSM+!t1Gz82Rfz2Qy&B&Ykh+6S1tPM*M35*&c6TMI~s1YP`KgI#@cd*-tYS3K}+T3 zVNs?YrYm3OBq=vHvxYnV1u1?f=;R>Ukp>0^Anh~VyI|{6dV(jS<*3l$x)3ya2Y`pK zXkPGizkV%f`1fDe$J3gB?Grt@C$2#aN;IQ}93>T~*A-T12K#qPQ!p0W$ZJ6RKB8nAVfTh3TW-}2>&riGFGHl!^=fK=n-px7NDq+gBD zYpn-E;;ro7@X4sv&?_ui^-(}s9vzoG2B+xDtG zNJuz{J>mBQ+=8AdA}H|HRdMyC7fxLAMBYH;SvEf?g6e|(hXUB^*V*7>So`u$8$>kjGZeITP(w(cxKs7bY|-NS z7l0v(-ZH}VM?fug4I`by5LSG;=HvMi=43%w>Cw?;}w z(rqRkf(b&{I~XX*r_QG>xT01xK9Mx{Nf5}?!1?6KOo~t8^%XvHAgap<)~@|(+C zu|}xK#_q^Le6>x zaeT?!r1GoWpXFkIg@dFhpCSaJeMEOT;5=%55TaM&$2qO{qGQRIDx%~q-qHq|;$}P7 zqia>*nik!;?Hn>=-_3NzUzBKea3(E3y}i>J#~rmi^6_ITnDWV8t;h=`s}AO_-lzHl z30}*0Ou3P}iMf))WDWl^$;QV*FMPM2afdsZi$Ka=`I|14+4g+jDf$(5pW0 z5+`2whb+&?+sw7>lG-^SNSgerxJk&c{Mt1rt9- z>UfrmAak*g-d=rX3cyoTitV&6{uqHb8Vcj2a%aQh(5@Bsb?N8hD;>cNN22#YF9g0p z#sdH&=Ob)LnxlKelGD>Ty^-h*uzQMG#)0Lq&-!rPs~eXdu<$G$mfr&kF|-lB!Qwc=j^ zr+f*v>l$2^aR+XunU=8v^Z1e&wBvB_=oN04T&w-qfSTcJ#( zu02wY*_6QY?n~GrYEhnYvw|~e?$$PgX7uA&I@zb!MFs8JcEe+86fqgbnTKoo#^wP0 zE5voa)e<$rtO>2f-zAo0Gx~S-U32r! zP53<-0JClFOHacA_#C$+<)CZ{Gq*rXb^8@4gXAQ?90b#Dl>c?&O?E4E8%95sb(p3L zA||g1FvOV9uSEtk;-9BphHrlwiSB#cDJ$otHYaxvquRJ%Uf86uaJiik;QN+_?RUbQ zTc+P+4A8Ku(>1HK1|1Xmb#9uU8rY(vO*4-5ztf?}$1Ib5i!P2WBuT{P!GQcDIeL5q zHI1;!AERb=5kg0#KRopM#T z-C{i3)GHq?Jffq^9)$%rqi zWGf#wpMNG?lPy!jX*Rs&EBnY0r<9JLnF;6HT2XuXTh@fmA}ma_op-uE(%pMnI}>wT zotAnZ$AXW8QXU3G`E$}*Zf*?2%Ld=P7+)D4ect_;(lr>SK9wgZ#NTzeEZ}oJTJU|B zxTe`=pOd@>ZQ6&(u@ES@7RHmr${$8Ypto?1|C~OrsWg8pPL?o?T}c(isRUU)qfP6F z6fLHNm*=>lw*nt&+Bvb_g*#mCc^;wHkzQZ(e4(NKf$DBFovU2tjLAC2Prq&r-IjJ8Z%p|`yrG#?q@@uV>rUpv{UrNl@TNN@4esS5m9z zw3Mh8bT{j!#_Jo-pPOG9XabmXq>C<+`NN_SBBBgR@5}kTRAmNUxN$%H^-b}P-)^+a zgCB^n3)W^01}wzDOQw%6DzhLPJ><#?J=6SpnKGkUai{Fa!fy}vgEnW3a5d{1ddSnH z5+8{d^lN$*dHTWuwCsnUN3}MdUe6KD?R$FLoPL9+Dgaz>ZKQjA>nb=ECK3WLG=w=s z0ehY(v#89%Rx$2afd40x?g?1gSAE=Oo3d^ zkZgqS{uVW6+FHRXTKo-iU9FPkPn;cRn|dF_8i!s~s#!oUGT@(!*Y7EVoX8A#6m#9% z8jI*oU#*BzpKg{Ij`!O&em`Ynygw3@w=zGC0%VA>qpqxNJl@oYH&oJ9SYFT72hxwk zV)BRe=m~ak!V{mnmfE;?54u&Q)fsV`#4dZOJsa!j`XNhvWPt#Dy(FLIZO6C=9U;kc zA170>$PQ)vj~N4Y2ouh><+PT6i>;xhpslmW*?R)jcfA?IlnQH`#xt(&Ja(o!itdI} zAWY1WF(XeP$X?BKFTv+%&L4j1QHoWfbbp5jUIME;OOI5+`p{aw{5cU#V zVXO(4hd2fwSwjtV;8O;lewQw=?gh5*H~wrw(5#u@#x zviSM&#}btsM%SGFxOq&sU#}|!eGI?1>oeZ+MW)$K8l}(F0))Oh)1L+;hFep$M$Vk0 zfG(JS?YffSx2jNoj^&4qaaLdE9jP^@?t4}%>RUZflgg>1vh*VpZCCS6hGaymD}mnv z4aMMLn6bBvIwQO^OW>^1ttgwH|DKE zp8OWT#;_sq&~1s-0#j0V2Zj!GM4MQBXHJ14Ai}zoBn(T#0%>{qta$$MpSZKLWq(l1 z85yZ(YHN7qM1!pS0kr%Rjfh~tOMm#5VrTtL67x-Li+QmpEKHUgilV#Xkx$RFoBb@3 z0UCv1x%?Wjdx0dQttI1M#bH)5;p&wG-3jR|@-qiryNmRfR#3qr+ZuQ-y>RaK&yI5U zhgIG5%SZSaIWbs4#xf!KEU&tct+_E?kxuRjUIc#bEw<)JD7o}60JKGBfXYdckS{;i z$GE-?%KqD26x}=<^6|n!9FCqq*QjFqV5&_BIW)ciF#ICwKF;e;J968SG|&+C)@vpq zz#o{Z@Xe^&m+B(aN^MZph2|bSpWN&c-G%P{JPxBpvpvmRUEf(2B>)eM8VjKcmVZ zMHhC^sfR#$D+9tS~Zh^IbVyxZ;qcPuO z5k`0u0S*{Ch9#&FXm}I*d{B-7$pW88FJ7R$L(!pK-Ug54YeV z)1u=a&n7?u_g0!qt3dJRU&~utS?F+fpyuG4;9adXp&0)%L28bLtY9bE=iV~60r^df z0{G~9z^OM4vLSS~DcSq7Jm$&xlz;uf8y!=?`=}D?6sK^&2*3JmKu>!Su(1DUbaZ4U zLUdD{)UK|#0v1E6)@XPMve0VM*GOjhnim$RamgFqj`RmLt{ywm#qKwYi&VX%np{kS~jh@+K`=f zsB-60{ft|f(>%cP-Xn|g_?xmu47#=L>dO|f`vZA$akPfcqmInr9zW1e$a6$~Ssp?v zj8lf_*gI3i07zs4zmD-gT%Skdy~KPCKFVo(E;LoIZR)g{oeK!Tvmqb_YF;X$O{?j} z77%Klx;)HBN?39LK3uV2rZg4qJGN)1~6hD&>HOUz5Vcbe`8|KV#E??TNc2)~-{#$VB%|PcAb!uU(;bw_+6JeQz{DUv0n#E7mK}bN z?WS&!k=!+Xa+5FY3V?h-TtX&P3e&k$YD9-)0=*|aSV%pIf4Ki5W(SP|7W?lvUub~2l%^0zGMajpyJ8c>41lC%S6WViiUkX{kd43)b;{B&BsaOOynghQN0|RHeexwWsHcAVrRe>eJ)L`SmtSg?AtAf6=neV8$^Kr$yi+&r0+n)jlxdaMYkl z3%^5X5K$`@^uyp;{AFw$o-`n6aF5~~1Xh>?q0LVhdoot|%2|774mtd*dV(BA6+M)dHwwBGD1)nURmYp<0{qce zx#T$ErK~F1yqhSZBf3s=UT1uYA)qz7@%jU$l!zcMr#?VXtG}j z?f?e~W9T|nUn1&A3SLnN2I4e5GvT%)phx%LBBF`G5)QtKq_l+30OIf5T~VqJa@*nv zm(|XHBdzQ!4PTqR7y79UB7Dn9bY{pvR$2VX!(2I0TBL-L6~M5{>k1b#nyvkiTIb=2 zrfU+`!Qid)5s+iUSi~KGFRqJ*6UZ}Xak1^88&}I&oCpiJHqx$Ztolip;wR`TAT1N| z(S?zQnZf;sq8dhRnHD*EAC~x&@)9_`}rOlcrE>yu1Qil%mc&@#C_W?(-LgiZGS6YIe-KWNMiA)uQyF&?rXTE zDq!kxg$->iK%Um?(POYKb1t|FsarnG{3sC)7C3m`#K)`pS$qix4~|p zJWTyH2gf5f16YZArp{0;A3AV0Bv zfMct= znolFv$_~y0gRDydJOT6iYZu^)GfsOcRX>za1#4-4BO=iM5Twa(81@4DAIwvBP)lu7TDibyk4oS)BYOe20L))kBs?-Yi{I6FeEI&X`nzgg z$-dUn)AM?>fMMsz5^fLgXav?)6Gbi%sT71daZ8(lL0()fPC_^g3}e37(aiL#^s)uc zfP;f^1x**@-&v%}<9Vg~0!5Gv` zT}SU3VS^Qz)>2{$%buh`8r6NH+K-7diea~b(Eid{8s__5fU+eLuXI^r23JwrjoS$TXD`Ps%A6q)oeZ{x>{G3$^2D6g6voBzhzMm%{^l&O1eL7XUrv?9Kt-+y!bLQSNDr96?3uZrQ==B?U_7cid`N1dKPwegpEO>D(jx!t{sF}< z4=Za-o|(FXvYZSO|_ zLj)B+eSNVsJ*MJ^wTP5As!Rme2UO&TIL!}*FU9(ILf+4>1yipYj7I)pB|vb^AUldp z?rLy;LMSPGBmbkQfMTJkTv_3 zm|085K6{U5t0-&G09eTDWWm5uVY^d-}CwX{X5rnuIpUa`Qw~tInV1p&+ESL zSK*eC%=s(jb(WtmX#SF$$#vlm%eGqxU&*#>IK>@VAtStupMLHsN=+c>$YB#7m&-oK zfUPGVZy*kubp@6j7{B^TK}V4`aa+6+=rPM%49QOi86Zl&5kcg<$a)n4Rn8(b(sTF7 z)tduQ-FW1pJb(z;F2DPYi|+A2`+pxaB1#geDv32`Cxs6!jkhNA|M(F*3czmU%D<2Q zJnkMA+omuFx^vHVA`%j4rZ$uF`Bz)a1M5X-}CpZ1x29hDk9pOjR7edo@+1)2t zAS5hIJaO&QXM1IRf{sE`a53ui4T-X>x;v8{zSq(}Fa!DEIIK=zZHC}e4oM_y7FZ^w zsArn*2_M>{05z$0tW$_$xjz&LrT0B?*}M01LFF`8DH+vx|;#SArBNL=)btjD+UqnxG>IYHSx!~g1H_X2$ z0mMp34_WcNim=|9D-K2stf;ceg zn2*lgB9F~H2aq9^;O~=+2$b~f2sjAN1VmD~Lk<{i?XvU_xAb(G=;JMHD4&`HFx()t zid4(PO5t?of3!Dmw5Kq3-DCjMo@2r^0bX!F$aApn^O$ z(Oz(ljfm7sut64#0_PHk0x#Y`6pg2!@zz99nrR0*s zAmTw3k7U$3-)Qs#ALqv*8|+>8YyO25k^BcTp>uWP=rFea1MV0Bkjo6asTgjp?1=+i z>B1lGopc}fsP}5)$sV394HIJG%35by7xkig6v{)Zl%JYNEoy4JdLJ?#u}Xss>hAK= z^tdB9;TK`LR}nAX!8To%O>%8di*;1svr-iP2jRf~QSHtPf^SyqmFM}xqg2~E%B zg1OaO*+|f}{ZCQax)r7}3y$LcA;_yIrVx}@a*ofGa z$mGohZ(QDhh9Cejjz@WEuHTYs;zC$nam)SP{Od}}2q#XOs3`T4Wv%3)71fu(_GRa+ z3f=?%W&q*N`1L4_&0wkdyB-dfalK(G@+`9HE@DH^dih&>anc&VX1awNUTc4%fwTK> zWAgsQ`EM)2CGt)%6-M*djY<~7D(1@HN5)ZXjOIItC@nRTC{@-TO23;ZG`D}0^~^O! zhA-Hie-j!q|HAaa(8I^fk7sF-RX5;89VR$WSSUD4jLPe&?>7aZND*(ATn4E~kc-7( zyhrV+CV>yo=v4oX$$_BD9knz)J@|^>N-bat)wS8l&AXK!5*qLt9n~(U*i-s1 zLs$hiDyif+DR{XED;u}9_50%JP!ab&L_rBp|44>VU|5|qL>2K*oMT$BZP!6h_d&?n{yqQK{v*(u-2 zyNY8Y-vWkhr|MC85&LZ+R`G2+pH_VsW`g=l{<_VHiRaDr}8RzC9ECL2~0yx zBKDoCe?;iuhMhhg@T)L?NnoQB*aj}MY5gSR2rnOKJz)Ao{yhT+88(R!9rjigqGrXY zt^8F!=-Dqbv3sMOKsZhAlJ)pRi^_}s<&I5BaBbU zpKn^7*zGPy@>}^&79R}@b=Nr0c-%;vSd;{IXb}fOzm0g&eUs+Kxy%V|B1HFT3C}oT zn$7TQsT&v3)Th#(|L3l)SpmvxF+;c*70`)$A}5Pi!i8V;W*gMXwEQAp&Q_JoJY3YZ zh`j~#N?bdvU$!0~h)fT}Po%|uG$N(34|2Db`tjn1$C4fX!^iH=(;^cPuVamw(28A6 z(z}%5=+;b}r|6A6Zx|;!3bq1K!4L7s&0-E~K?g)Zw3r+bP{c(?FGCb|R3;a?Ab>N| zIMR{2V}3$aD$x1*+t8ca_PzGVK;a_4f zRzRoi6xMuvb|J>^ouBT`(1!9|u&u&CT7S#v!6hg?hKc5)rlVO=MonS#BaQO}{Bf<;XW>b;nWkpjpp$P5DrArD z#HwH^L52UXGkOFQ*Zy8?JbTs;mTG%VazL6TwK~~Jzwcu0QfrbL#1{BHSVDbyF-=bK zb{{M}L1nl4BgR=$a+I=fjJNl#K|V2v3m^bvf)XA+$)0 z>MTl>BspT6-?8{5j9T`}76Z~rQ8EQC_SFkme8d<$y4YJv=J#!VPA=AjI7|u&`A0Zp z>G^-rgKHYTCH{vG2CNN&XCMmJdM+~>4|;I)OR?Xa7|m5473yybAGVoa``I-qqS7`{ zEsV3%jW+1XwD30mYecl`SDf>19+nCYj6)3**buY;MZ{qsX}L0Gi-)bTBM7O=2@5St z!-ZGiC}TAawQ6N3SSTkjZ6uvuzK^Rkp&Chl4cN{QPJ*3(+v)U?*>h_0E*~2AlA!|! z5Zmu=c7;=1I&d89;dE8=_j!p2yeOEag4j*dXN40O>>Jt7JvMvE9WU#6b5YeI%h+22 zo60VN_s8T0H$!2(#GX=6VsM{4NxieE@@?FEMg??0gNB%TAT34?Vo^TG>D}zqgTGH6 zKdpkd`A8<>!t?J+Y}kKpykL1C(lCu#&~T0#4P+mevWFLh!#hC9RGU&RL&NJcSBu9y~-rFa6t8RiBSLbzbs`JEG}3vtoioKt2%!A>uPW0bfyB`(YEN-p z`k&VkEuLVZs`y(oX3Ky|;~#?n?PM zwsiZw->@=}iU$5K2;VRNHLGLhgLpHPY&~h7WTC~Cha!c39?vV~VWxbl+_P*g1zuOm z(8oe(6+~%m`4c2N9|qH<>`X31_~X^%iJ=dG5ZbF-)>y&1FnTEP+~#cCfp8Iy&Bo?| z0}Jec7|x3}>4lQYSr`6vD&rWB#SZNN+br8E;Ihi_cQ+zUas9r_^-pW+#S_ox1Z!wB zeE2;x0z)PHstnh1VzjF?5eSbgSuf+j5U{Vux8HmXFF50-TzZ1oP`oA%S|@ zvz$!yk2IDr3(Sio=H=}Ab%doZJdl8rf`_Jrr{6wbWvZNX)mEAq6>Bw_`I?{wZ27zq zOL@>J31eTZP+WNhh2kS@gsCygY%%4jc(?n+ND}WJ%dNlXt6F8JG=4s>3C6NrY;IgY&VF`cLgPI`J(p~BQI|9KlPy-^v8m}22HqoBvu zN9lKjh+^VKKoFWsroxpvyZmhEi=DRsU#R~AQcTWAxx)Edyq43-xpg( zlhm}*I}q_j`I7quWZhQz^(J1q?P4It000Xp^P!k7MU3KA5um1`TCrX7=&>YjQDg{5 zli1s4JUGjS^%RvlBgjqfEWPnD87}nuY))PivP+X-*JW-5tEe6NBx4&rsnK-3I$k%# z;5~yJxd2!`mCqLF5I^+yQ_m*gsC|h|=}K_l-6||>-@;x@h@5A`JLUXF$QG`{v+ku< z0AW;!k`_xh4}^FcPWP3DgR{`A8+uj;C!M#027K?Hh@1OYlMT1ci82r3xy_9ew=(HQ z8NHit4yeYtQIF(2$@hg9W}L35j``$Q$Elc0KnHF65q?HQ$FHUH_hwubLKHe)m_{yd z+H~;IjXO%?%*16>zT*Lk{?#S#{t})Z_ zYqkj03wu-+SkLtfW1pz+wd#_-eXBmCdCYw{XNtDMF_@i)sOFv|ylG1(K?;PW`%or$fc1}*krMKfY){G|cp*GYfSnKHy>MiGfE;Dx93K2I@vTk~9si~LVc zL5R#|g1jsB-i#RN1DL5CqJR&!q96yuTY~~L>4cm~%-ij^;hpkFW?8Z??W(|{Tu|ECivZX}-$Zf8y(;Kv1PdE~6`|94zCu4uE zRnwMLlA z{=gRw7B}1%XK;!qxH$baQkc`(>3?f%iV^$$|G6MSZCGJ$$5+L43jRZEpt+R)qk@qk z@i!f&!^lZ%c&ZJX^N!j6(vw2<_zQA~EOgZ0@50;FedoV!KyRLP@_SuEstNb~RHIXX zlELqcimjKvUnDEpekh*!trZo9kob2$H|HFPQRJNX>4?5O(P}$&=48wI2dDJgZg+Pc zeskN+z0OQUgL31I6ieiK;h7yX=?-If6IGP}uTONZNb11F=&sm8PZlFWRG6Qqj(@UL zPSWL4O5(h3p&;6lu$T%Oz7`S1koi&^Slg)WIcy?r)j`?SvGzdJam-8n&Pp>A6# zizfj?f_no?6Xu;(HIdkp3BQ8$B3hXv=gn0=_l|6D-{sik9~gF48C(2#{@(UsaMOgG z#e5K2d3Cj_FnuHQ-^Ruz_+wer$dX~ujABhq&3o>0;%)NW(TrR7%heJ8J(OL*;O2%? zZM*=Y&**DNa^fjY_2M`7mUZ#A!Cz}m;6o=;Ret;${3~6(-S?#6 zc#|(CA=|e7oAiaccJznqmqynfJqzP+4tM*JU6K=I`?^)}V$1TO#orN>*Nl3?^}y&% zq?9W;MAKiSPv#3IjqwL9h%QhIh#Hp z*>wjnEfmPU-PBa&OsDL!UgPzVK&h7f!(NlMpa%*1*F<>jV(3}gOrht+Tp|oVcBPxZ|Rlsr!`;IQ@}%cVYO?%w#T$ctN<-q z1ZIE;0GmQ(Ya@UBZF_ou%nb4ndFoF~rxkudoMZ0I38-x1Wmq0i4@W=N>)(vzabOK! zD&VgVPu`C-P!S%8&Sg%$ir`}T5t$dw^8Y(vY4QW!0zXaF|9A7Eg|+!B(_8WX2jn^A AsQ>@~ diff --git a/docs_v2/static/img/logo.svg b/docs_v2/static/img/logo.svg deleted file mode 100644 index 9db6d0d0..00000000 --- a/docs_v2/static/img/logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/examples/browser_summarizer/lib/chrome/chrome_api.dart b/examples/browser_summarizer/lib/chrome/chrome_api.dart index 9ab8b8b4..d60ac8b7 100644 --- a/examples/browser_summarizer/lib/chrome/chrome_api.dart +++ b/examples/browser_summarizer/lib/chrome/chrome_api.dart @@ -1,6 +1,6 @@ // ignore_for_file: public_member_api_docs @JS('chrome') -library; +library chrome; import 'package:js/js.dart'; diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 138544a4..5050c14b 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: args - sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 url: "https://pub.dev" source: hosted - version: "2.5.0" + version: "2.4.2" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" + sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" url: "https://pub.dev" source: hosted - version: "8.1.4" + version: "8.1.2" characters: dependency: transitive description: @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -93,26 +93,26 @@ packages: dependency: transitive description: name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + sha256: "77f3be8c9acaa64ed37dd49c21c056da71b78053d31131ca26a273884a753f66" url: "https://pub.dev" source: hosted - version: "2.2.0" + version: "2.0.0-wasm" fetch_client: dependency: transitive description: name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + sha256: e11722d7d8cd21f944b52af780392274f7c34a41156b1c80053fc2a414e09a1b url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.1.0-wasm" ffi: dependency: transitive description: name: ffi - sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" + sha256: "7bf0adc28a23d395f19f3f1eb21dd7cfd1dd9f8e1c50051c069122e6853bc878" url: "https://pub.dev" source: hosted - version: "2.1.3" + version: "2.1.0" file: dependency: transitive description: @@ -129,14 +129,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" - flat_buffers: - dependency: transitive - description: - name: flat_buffers - sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" - url: "https://pub.dev" - source: hosted - version: "23.5.26" flutter: dependency: "direct main" description: flutter @@ -146,18 +138,18 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a + sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 url: "https://pub.dev" source: hosted - version: "8.1.6" + version: "8.1.5" flutter_markdown: dependency: "direct main" description: name: flutter_markdown - sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 + sha256: "87e11b9df25a42e2db315b8b7a51fae8e66f57a4b2f50ec4b822d0fa155e6b52" url: "https://pub.dev" source: hosted - version: "0.7.3+1" + version: "0.6.22" flutter_web_plugins: dependency: transitive description: flutter @@ -167,10 +159,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d url: "https://pub.dev" source: hosted - version: "2.4.4" + version: "2.4.1" html: dependency: transitive description: @@ -183,10 +175,10 @@ packages: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_parser: dependency: transitive description: @@ -215,46 +207,46 @@ packages: dependency: transitive description: name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 url: "https://pub.dev" source: hosted - version: "4.9.0" + version: "4.8.1" json_path: dependency: transitive description: name: json_path - sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" + sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" url: "https://pub.dev" source: hosted - version: "0.7.4" + version: "0.7.1" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.6" + version: "0.7.1" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.2" + version: "0.2.0+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.6" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.2" + version: "0.6.1" langchain_tiktoken: dependency: transitive description: @@ -267,26 +259,26 @@ packages: dependency: transitive description: name: markdown - sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 + sha256: acf35edccc0463a9d7384e437c015a3535772e09714cf60e07eeef3a15870dcd url: "https://pub.dev" source: hosted - version: "7.2.2" + version: "7.1.1" material_color_utilities: dependency: transitive description: name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.11.1" + version: "0.8.0" math_expressions: dependency: transitive description: name: math_expressions - sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 + sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" url: "https://pub.dev" source: hosted - version: "2.6.0" + version: "2.4.0" maybe_just_nothing: dependency: transitive description: @@ -299,10 +291,10 @@ packages: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" nested: dependency: transitive description: @@ -311,21 +303,13 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" - objectbox: - dependency: transitive - description: - name: objectbox - sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" - url: "https://pub.dev" - source: hosted - version: "4.0.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.2" + version: "0.3.2" path: dependency: transitive description: @@ -354,10 +338,10 @@ packages: dependency: transitive description: name: path_provider_windows - sha256: bd6f00dbd873bfb70d0761682da2b3a2c2fccc2b9e84c495821639601d81afe7 + sha256: "8bc9f22eee8690981c22aa7fc602f5c85b497a6fb2ceb35ee5a5e5ed85ad8170" url: "https://pub.dev" source: hosted - version: "2.3.0" + version: "2.2.1" petitparser: dependency: transitive description: @@ -370,10 +354,10 @@ packages: dependency: transitive description: name: platform - sha256: "9b71283fc13df574056616011fb138fd3b793ea47cc509c189a6c3fa5f8a1a65" + sha256: "12220bb4b65720483f8fa9450b4332347737cf8213dd2840d8b2c823e47243ec" url: "https://pub.dev" source: hosted - version: "3.1.5" + version: "3.1.4" plugin_platform_interface: dependency: transitive description: @@ -386,10 +370,10 @@ packages: dependency: transitive description: name: provider - sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c + sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" url: "https://pub.dev" source: hosted - version: "6.1.2" + version: "6.1.1" rfc_6901: dependency: transitive description: @@ -402,66 +386,66 @@ packages: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" shared_preferences: dependency: "direct main" description: name: shared_preferences - sha256: c272f9cabca5a81adc9b0894381e9c1def363e980f960fa903c604c471b22f68 + sha256: "81429e4481e1ccfb51ede496e916348668fd0921627779233bd24cc3ff6abd02" url: "https://pub.dev" source: hosted - version: "2.3.1" + version: "2.2.2" shared_preferences_android: dependency: transitive description: name: shared_preferences_android - sha256: a7e8467e9181cef109f601e3f65765685786c1a738a83d7fbbde377589c0d974 + sha256: "8568a389334b6e83415b6aae55378e158fbc2314e074983362d20c562780fb06" url: "https://pub.dev" source: hosted - version: "2.3.1" + version: "2.2.1" shared_preferences_foundation: dependency: transitive description: name: shared_preferences_foundation - sha256: c4b35f6cb8f63c147312c054ce7c2254c8066745125264f0c88739c417fc9d9f + sha256: "7bf53a9f2d007329ee6f3df7268fd498f8373602f943c975598bbb34649b62a7" url: "https://pub.dev" source: hosted - version: "2.5.2" + version: "2.3.4" shared_preferences_linux: dependency: transitive description: name: shared_preferences_linux - sha256: "580abfd40f415611503cae30adf626e6656dfb2f0cee8f465ece7b6defb40f2f" + sha256: "9f2cbcf46d4270ea8be39fa156d86379077c8a5228d9dfdb1164ae0bb93f1faa" url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.3.2" shared_preferences_platform_interface: dependency: transitive description: name: shared_preferences_platform_interface - sha256: "57cbf196c486bc2cf1f02b85784932c6094376284b3ad5779d1b1c6c6a816b80" + sha256: "22e2ecac9419b4246d7c22bfbbda589e3acf5c0351137d87dd2939d984d37c3b" url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.3.2" shared_preferences_web: dependency: transitive description: name: shared_preferences_web - sha256: d2ca4132d3946fec2184261726b355836a82c33d7d5b67af32692aff18a4684e + sha256: "9aee1089b36bd2aafe06582b7d7817fd317ef05fc30e6ba14bff247d0933042a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.3.0" shared_preferences_windows: dependency: transitive description: name: shared_preferences_windows - sha256: "94ef0f72b2d71bc3e700e025db3710911bd51a71cefb65cc609dd0d9a982e3c1" + sha256: "841ad54f3c8381c480d0c9b508b89a34036f512482c407e6df7a9c4aa2ef8f59" url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.3.2" sky_engine: dependency: transitive description: flutter @@ -487,17 +471,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" url: "https://pub.dev" source: hosted - version: "1.3.0" - tavily_dart: - dependency: "direct overridden" - description: - path: "../../packages/tavily_dart" - relative: true - source: path - version: "0.1.0" + version: "1.2.0" term_glyph: dependency: transitive description: @@ -518,10 +495,10 @@ packages: dependency: transitive description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.3.3" vector_math: dependency: transitive description: @@ -534,10 +511,18 @@ packages: dependency: transitive description: name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" url: "https://pub.dev" source: hosted - version: "1.0.0" + version: "0.5.1" + win32: + dependency: transitive + description: + name: win32 + sha256: "464f5674532865248444b4c3daca12bd9bf2d7c47f759ce2617986e7229494a8" + url: "https://pub.dev" + source: hosted + version: "5.2.0" xdg_directories: dependency: transitive description: @@ -547,5 +532,5 @@ packages: source: hosted version: "1.0.4" sdks: - dart: ">=3.4.0 <4.0.0" - flutter: ">=3.22.0" + dart: ">=3.3.0 <4.0.0" + flutter: ">=3.19.0" diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 42a5999e..9a2c4936 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -4,19 +4,19 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.6 - flutter_markdown: ^0.7.3 + flutter_bloc: ^8.1.5 + flutter_markdown: ^0.6.22 js: ^0.7.1 - langchain: ^0.7.6 - langchain_community: 0.3.2 - langchain_openai: ^0.7.2 - shared_preferences: ^2.3.0 + langchain: ^0.7.1 + langchain_community: 0.2.0+1 + langchain_openai: ^0.6.1 + shared_preferences: ^2.2.2 flutter: uses-material-design: true diff --git a/examples/browser_summarizer/pubspec_overrides.yaml b/examples/browser_summarizer/pubspec_overrides.yaml index 808fbc3a..3947b2ae 100644 --- a/examples/browser_summarizer/pubspec_overrides.yaml +++ b/examples/browser_summarizer/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community dependency_overrides: langchain: path: ../../packages/langchain @@ -10,5 +10,3 @@ dependency_overrides: path: ../../packages/langchain_openai openai_dart: path: ../../packages/openai_dart - tavily_dart: - path: ../../packages/tavily_dart diff --git a/examples/docs_examples/README.md b/examples/docs_examples/README.md index 6ec73e85..a2dc3095 100644 --- a/examples/docs_examples/README.md +++ b/examples/docs_examples/README.md @@ -1,3 +1,3 @@ # Docs examples -Examples used in https://langchaindart.dev documentation. +Examples used in https://langchaindart.com documentation. diff --git a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart index 21cea3b4..f34fab19 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart @@ -32,7 +32,7 @@ Future _promptTemplateLLM() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -65,7 +65,7 @@ Future _attachingStopSequences() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -133,7 +133,7 @@ Future _attachingToolCallInformation() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/cookbook/routing.dart b/examples/docs_examples/bin/expression_language/cookbook/routing.dart index 1f2232a0..d177611d 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/routing.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/routing.dart @@ -9,7 +9,7 @@ void main(final List arguments) async { Future _runnableRouter() async { final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), + defaultOptions: const ChatOllamaOptions(model: 'llama3'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -114,7 +114,7 @@ Here is a question: '''; final embeddings = OllamaEmbeddings( - model: 'llama3.2', + model: 'llama3', ); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( @@ -132,7 +132,7 @@ Here is a question: return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), + defaultOptions: const ChatOllamaOptions(model: 'llama3'), ) | const StringOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart index d6b8cdae..7af0bb43 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart @@ -33,7 +33,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -49,7 +49,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -79,7 +79,9 @@ Future _inputStreams() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); @@ -123,7 +125,9 @@ Future _inputStreamMapper() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/fallbacks.dart b/examples/docs_examples/bin/expression_language/fallbacks.dart deleted file mode 100644 index 8eea7bb2..00000000 --- a/examples/docs_examples/bin/expression_language/fallbacks.dart +++ /dev/null @@ -1,181 +0,0 @@ -// ignore_for_file: avoid_print -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_openai/langchain_openai.dart'; - -void main() async { - await _modelWithFallbacks(); - await _modelWithMultipleFallbacks(); - await _chainWithFallbacks(); -} - -Future _modelWithFallbacks() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final fakeOpenAIModel = ChatOpenAI( - defaultOptions: const ChatOpenAIOptions(model: 'tomato'), - ); - - final latestModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), - ); - - final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); - - final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); - - final res = await modelWithFallbacks.invoke(prompt); - print(res); -/* -{ - "ChatResult": { - "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", - "output": { - "AIChatMessage": { - "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", - "toolCalls": [] - } - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721542696, - "system_fingerprint": "fp_400f27fa1f" - }, - "usage": { - "LanguageModelUsage": { - "promptTokens": 16, - "promptBillableCharacters": null, - "responseTokens": 36, - "responseBillableCharacters": null, - "totalTokens": 52 - } - }, - "streaming": false - } -} -*/ -} - -Future _modelWithMultipleFallbacks() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final fakeOpenAIModel1 = - ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'tomato')); - - final fakeOpenAIModel2 = - ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'potato')); - - final latestModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), - ); - - final modelWithFallbacks = - fakeOpenAIModel1.withFallbacks([fakeOpenAIModel2, latestModel]); - - final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); - - final res = await modelWithFallbacks.invoke(prompt); - print(res); - /* - { - "id": "chatcmpl-9nLKW345nrh0nzmw18iO35XnoQ2jo", - "output": { - "content": "The sky appears blue due to Rayleigh scattering, where shorter blue wavelengths of sunlight are scattered more than other colors by the molecules in Earth's atmosphere. This scattering disperses blue light in all directions, making the sky look blue.", - "toolCalls": [] - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721547092, - "system_fingerprint": "fp_c4e5b6fa31" - }, - "usage": { - "promptTokens": 16, - "promptBillableCharacters": null, - "responseTokens": 45, - "responseBillableCharacters": null, - "totalTokens": 61 - }, - "streaming": false -} -*/ -} - -Future _chainWithFallbacks() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - - final fakeOpenAIModel = ChatOpenAI( - defaultOptions: const ChatOpenAIOptions(model: 'tomato'), - ); - - final latestModel = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), - ); - - final promptTemplate = - ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); - - final badChain = promptTemplate.pipe(fakeOpenAIModel); - final goodChain = promptTemplate.pipe(latestModel); - - final chainWithFallbacks = badChain.withFallbacks([goodChain]); - - final res = await chainWithFallbacks.batch( - [ - {'topic': 'bears'}, - {'topic': 'cats'}, - ], - ); - print(res); -/* -[ - { - "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", - "output": { - "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", - "toolCalls": [] - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721545052, - "system_fingerprint": "fp_400f27fa1f" - }, - "usage": { - "promptTokens": 13, - "promptBillableCharacters": null, - "responseTokens": 31, - "responseBillableCharacters": null, - "totalTokens": 44 - }, - "streaming": false - }, - { - "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", - "output": { - "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", - "toolCalls": [] - }, - "finishReason": "FinishReason.stop", - "metadata": { - "model": "gpt-4o-2024-05-13", - "created": 1721545052, - "system_fingerprint": "fp_c4e5b6fa31" - }, - "usage": { - "promptTokens": 13, - "promptBillableCharacters": null, - "responseTokens": 29, - "responseBillableCharacters": null, - "totalTokens": 42 - }, - "streaming": false - } -] -*/ -} diff --git a/examples/docs_examples/bin/expression_language/get_started.dart b/examples/docs_examples/bin/expression_language/get_started.dart index c3ecbd1f..5ccc2505 100644 --- a/examples/docs_examples/bin/expression_language/get_started.dart +++ b/examples/docs_examples/bin/expression_language/get_started.dart @@ -82,7 +82,7 @@ Future _promptModelOutputParser() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/interface.dart b/examples/docs_examples/bin/expression_language/interface.dart index f2a634b7..f678f18a 100644 --- a/examples/docs_examples/bin/expression_language/interface.dart +++ b/examples/docs_examples/bin/expression_language/interface.dart @@ -96,7 +96,7 @@ Future _runnableInterfaceBatchOptions() async { {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/examples/docs_examples/bin/expression_language/primitives/binding.dart b/examples/docs_examples/bin/expression_language/primitives/binding.dart index d16d81d8..1c456ef7 100644 --- a/examples/docs_examples/bin/expression_language/primitives/binding.dart +++ b/examples/docs_examples/bin/expression_language/primitives/binding.dart @@ -63,7 +63,7 @@ Future _differentModels() async { chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | + chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser, }); final res = await chain.invoke({'name': 'David'}); diff --git a/examples/docs_examples/bin/expression_language/primitives/function.dart b/examples/docs_examples/bin/expression_language/primitives/function.dart index 029322bb..8c631877 100644 --- a/examples/docs_examples/bin/expression_language/primitives/function.dart +++ b/examples/docs_examples/bin/expression_language/primitives/function.dart @@ -73,7 +73,7 @@ Future _function() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -116,7 +116,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -135,7 +135,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -154,7 +154,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-4o-mini, + // model: gpt-3.5-turbo-0125, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/primitives/mapper.dart b/examples/docs_examples/bin/expression_language/primitives/mapper.dart index c9d0400a..818ed0d7 100644 --- a/examples/docs_examples/bin/expression_language/primitives/mapper.dart +++ b/examples/docs_examples/bin/expression_language/primitives/mapper.dart @@ -63,7 +63,9 @@ Future _mapInputStream() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/primitives/retry.dart b/examples/docs_examples/bin/expression_language/primitives/retry.dart deleted file mode 100644 index 917ac501..00000000 --- a/examples/docs_examples/bin/expression_language/primitives/retry.dart +++ /dev/null @@ -1,177 +0,0 @@ -// ignore_for_file: avoid_print -import 'dart:io'; -import 'package:langchain/langchain.dart'; -import 'package:langchain_openai/langchain_openai.dart'; - -void main() async { - await _modelWithRetry(); - await _chainWithRetry(); - await _withRetryOptions(); - await _withDelayDurations(); -} - -Future _modelWithRetry() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final model = ChatOpenAI(apiKey: openaiApiKey); - final input = PromptValue.string('Explain why sky is blue in 2 lines'); - - final modelWithRetry = model.withRetry(); - final res = await modelWithRetry.invoke(input); - print(res); - /* - ChatResult{ - id: chatcmpl-9zmFYnu19Pd6ss3zVFHlKN71DILtx, - output: AIChatMessage{ - content: The sky appears blue due to Rayleigh scattering, where shorter wavelengths of sunlight (blue light) are scattered more than longer wavelengths (red light) by the molecules in the Earth's atmosphere. This scattering effect is most prominent when the sun is high in the sky., - toolCalls: [], -}, - finishReason: FinishReason.stop, - metadata: {model: gpt-4o-mini-2024-07-18, created: 1724510508, system_fingerprint: fp_48196bc67a}, - usage: LanguageModelUsage{ - promptTokens: 16, - promptBillableCharacters: null, - responseTokens: 52, - responseBillableCharacters: null, - totalTokens: 68} -, - streaming: false -} -*/ -} - -Future _chainWithRetry() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final promptTemplate = - ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); - final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), - ); - final chain = promptTemplate.pipe(model).withRetry(); - - final res = await chain.batch( - [ - {'topic': 'bears'}, - {'topic': 'cats'}, - ], - ); - print(res); - /* - [ChatResult{ - id: chatcmpl-9zmjiMfHP2WP3PhM6YXdoHXS02ZAm, - output: AIChatMessage{ - content: Sure, here's a bear-themed joke for you: - -Why did the bear refuse to play cards? - -Because he was afraid he might get spotted—he couldn’t bear the tension! 🐻♠️, - toolCalls: [], -}, - finishReason: FinishReason.stop, - metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_3aa7262c27}, - usage: LanguageModelUsage{ - promptTokens: 13, - promptBillableCharacters: null, - responseTokens: 41, - responseBillableCharacters: null, - totalTokens: 54} -, - streaming: false -}, ChatResult{ - id: chatcmpl-9zmji1gxCZ4yR3UtX7Af4TBrRhPP1, - output: AIChatMessage{ - content: Sure, here's one for you: - -Why did the cat sit on the computer? - -Because it wanted to keep an eye on the mouse! 🐱🖱️, - toolCalls: [], -}, - finishReason: FinishReason.stop, - metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_c9aa9c0491}, - usage: LanguageModelUsage{ - promptTokens: 13, - promptBillableCharacters: null, - responseTokens: 34, - responseBillableCharacters: null, - totalTokens: 47} -, - streaming: false -}] -*/ -} - -Future _withRetryOptions() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final input = PromptValue.string('Explain why sky is blue in 2 lines'); - final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), - ); - final modelWithRetry = model.withRetry( - maxRetries: 3, - addJitter: true, - ); - final res = await modelWithRetry.invoke(input); - print(res); - /* - retry attempt 0 with delay duration 0:00:01.082000 - retry attempt 1 with delay duration 0:00:02.073000 - retry attempt 2 with delay duration 0:00:04.074000 - Unhandled exception: - Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ - "uri": "https://api.openai.com/v1/chat/completions", - "method": "POST", - "code": 404, - "message": "Unsuccessful response", - "body": { - "error": { - "message": "The model `fake-model` does not exist or you do not have access to it.", - "type": "invalid_request_error", - "param": null, - "code": "model_not_found" - } - } -})*/ -} - -Future _withDelayDurations() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final input = PromptValue.string('Explain why sky is blue in 2 lines'); - final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), - ); - final modelWithRetry = model.withRetry( - maxRetries: 3, - addJitter: false, - delayDurations: const [ - Duration(seconds: 1), - Duration(seconds: 2), - Duration(seconds: 3), - ], - ); - final res = await modelWithRetry.invoke(input); - print(res); - // retried with delays provided in RetryOptions - /* -retry attempt 0 with delay duration 0:00:01.000000 -retry attempt 1 with delay duration 0:00:02.000000 -retry attempt 2 with delay duration 0:00:03.000000 -Unhandled exception: -Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ - "uri": "https://api.openai.com/v1/chat/completions", - "method": "POST", - "code": 401, - "message": "Unsuccessful response", - "body": { - "error": { - "message": "You didn't provide an API key. You need to provide your API key in an Authorization header using Bearer auth (i.e. Authorization: Bearer YOUR_KEY), or as the password field (with blank username) if you're accessing the API from your browser and are prompted for a username and password. You can obtain an API key from https://platform.openai.com/account/api-keys.", - "type": "invalid_request_error", - "param": null, - "code": null - } - } -}) -*/ -} diff --git a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart similarity index 64% rename from examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart rename to examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart index b934c0a7..16d0b44f 100644 --- a/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart +++ b/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart @@ -3,34 +3,33 @@ import 'dart:io'; import 'package:langchain/langchain.dart'; import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; import 'package:langchain_openai/langchain_openai.dart'; void main() async { - await _toolsAgent(); - await _toolsAgentCustomToolsMemory(); - await _toolsAgentLCEL(); + await _openAIToolsAgent(); + await _openAIToolsAgentCustomToolsMemory(); + await _openAIToolsAgentLCEL(); } -Future _toolsAgent() async { - final llm = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', +Future _openAIToolsAgent() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', temperature: 0, ), ); final tool = CalculatorTool(); - final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); - final res = await executor.run( - 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', - ); - print(res); - // The result is: 4.885 + final res = await executor.run('What is 40 raised to the 0.43 power? '); + print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' } -Future _toolsAgentCustomToolsMemory() async { +Future _openAIToolsAgentCustomToolsMemory() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final tool = Tool.fromFunction( name: 'search', description: 'Tool for searching the web.', @@ -42,7 +41,7 @@ Future _toolsAgentCustomToolsMemory() async { 'description': 'The query to search for', }, 'n': { - 'type': 'integer', + 'type': 'number', 'description': 'The number of results to return', }, }, @@ -52,15 +51,13 @@ Future _toolsAgentCustomToolsMemory() async { getInputFromJson: SearchInput.fromJson, ); - final llm = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llama3-groq-tool-use', - temperature: 0, - ), + final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(temperature: 0), ); final memory = ConversationBufferMemory(returnMessages: true); - final agent = ToolsAgent.fromLLMAndTools( + final agent = OpenAIToolsAgent.fromLLMAndTools( llm: llm, tools: [tool], memory: memory, @@ -69,7 +66,7 @@ Future _toolsAgentCustomToolsMemory() async { final executor = AgentExecutor(agent: agent); final res1 = await executor.run( - 'Search for cat names. Return only 3 results.', + 'Search for cats. Return only 3 results.', ); print(res1); // Here are 3 search results for "cats": @@ -95,16 +92,11 @@ class SearchInput { } String callYourSearchFunction(final SearchInput input) { - final n = input.n; - final res = List.generate( - n, - (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', - ); - return 'Results:\n${res.join('\n')}'; + return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; } -Future _toolsAgentLCEL() async { - final openAiKey = Platform.environment['OPENAI_API_KEY']; +Future _openAIToolsAgentLCEL() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; final prompt = ChatPromptTemplate.fromTemplates(const [ (ChatMessageType.system, 'You are a helpful assistant'), @@ -115,19 +107,18 @@ Future _toolsAgentLCEL() async { final tool = CalculatorTool(); final model = ChatOpenAI( - apiKey: openAiKey, + apiKey: openaiApiKey, defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o-mini', temperature: 0, tools: [tool], ), ); - const outputParser = ToolsAgentOutputParser(); + const outputParser = OpenAIToolsAgentOutputParser(); List buildScratchpad(final List intermediateSteps) { return intermediateSteps - .map((s) { + .map((final s) { return s.action.messageLog + [ ChatMessage.tool( @@ -136,13 +127,13 @@ Future _toolsAgentLCEL() async { ), ]; }) - .expand((m) => m) + .expand((final m) => m) .toList(growable: false); } final agent = Agent.fromRunnable( Runnable.mapInput( - (AgentPlanInput planInput) => { + (final AgentPlanInput planInput) => { 'input': planInput.inputs['input'], 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), }, @@ -152,9 +143,8 @@ Future _toolsAgentLCEL() async { final executor = AgentExecutor(agent: agent); final res = await executor.invoke({ - 'input': 'What is 40 raised to the power of 0.43? ' - 'Return the result with 3 decimals.', + 'input': 'What is 40 raised to the 0.43 power?', }); print(res['output']); - // The result of 40 raised to the power of 0.43 is approximately 4.885. + // 40 raised to the power of 0.43 is approximately 4.88524. } diff --git a/examples/docs_examples/bin/modules/agents/tools/calculator.dart b/examples/docs_examples/bin/modules/agents/tools/calculator.dart index acab2d65..5d92dc27 100644 --- a/examples/docs_examples/bin/modules/agents/tools/calculator.dart +++ b/examples/docs_examples/bin/modules/agents/tools/calculator.dart @@ -15,7 +15,7 @@ void main() async { ), ); final tool = CalculatorTool(); - final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart index 7144ea82..f62d7bde 100644 --- a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart +++ b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart @@ -18,7 +18,7 @@ void main() async { CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; - final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); + final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart deleted file mode 100644 index 45c1cd55..00000000 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart +++ /dev/null @@ -1,109 +0,0 @@ -// ignore_for_file: avoid_print -import 'dart:convert'; -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_anthropic/langchain_anthropic.dart'; - -void main(final List arguments) async { - await _invokeModel(); - await _multiModal(); - await _streaming(); -} - -Future _invokeModel() async { - final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - - final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: const ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), - ); - - final chatPrompt = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that translates {input_language} to {output_language}.' - ), - (ChatMessageType.human, 'Text to translate:\n{text}'), - ]); - - final chain = chatPrompt | chatModel | const StringOutputParser(); - - final res = await chain.invoke({ - 'input_language': 'English', - 'output_language': 'French', - 'text': 'I love programming.', - }); - print(res); - // -> 'J'adore programmer.' - - chatModel.close(); -} - -Future _multiModal() async { - final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - - final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: const ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), - ); - final res = await chatModel.invoke( - PromptValue.chat([ - ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - mimeType: 'image/jpeg', - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), - ), - ]), - ); - print(res.output.content); - // -> 'The fruit in the image is an apple.' - - chatModel.close(); -} - -Future _streaming() async { - final apiKey = Platform.environment['ANTHROPIC_API_KEY']; - - final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' - 'in order without any spaces or commas.', - ), - (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), - ]); - - final chatModel = ChatAnthropic( - apiKey: apiKey, - defaultOptions: const ChatAnthropicOptions( - model: 'claude-3-5-sonnet-20240620', - temperature: 0, - ), - ); - - final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); - - final stream = chain.stream({'max_num': '30'}); - await stream.forEach(print); - // 123 - // 456789101 - // 112131415161 - // 718192021222 - // 324252627282 - // 930 - - chatModel.close(); -} diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 3473a738..4e5cf3b5 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -8,11 +8,8 @@ import 'package:langchain_ollama/langchain_ollama.dart'; void main(final List arguments) async { await _chatOllama(); await _chatOllamaStreaming(); - await _chatOllamaMultimodal(); - await _chatOllamaToolCalling(); await _chatOllamaJsonMode(); - await _extraction(); - await _flights(); + await _chatOllamaMultimodal(); await _rag(); } @@ -27,7 +24,7 @@ Future _chatOllama() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', + model: 'llama3', temperature: 0, ), ); @@ -54,7 +51,7 @@ Future _chatOllamaStreaming() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', + model: 'llama3', temperature: 0, ), ); @@ -69,70 +66,6 @@ Future _chatOllamaStreaming() async { // 9 } -Future _chatOllamaMultimodal() async { - final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llava', - temperature: 0, - ), - ); - final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), - ), - ]), - ); - final res = await chatModel.invoke(PromptValue.chat([prompt])); - print(res.output.content); - // -> 'An Apple' -} - -Future _chatOllamaToolCalling() async { - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and country, e.g. San Francisco, US', - }, - }, - 'required': ['location'], - }, - ); - - final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', - temperature: 0, - tools: [tool], - ), - ); - - final res = await chatModel.invoke( - PromptValue.string( - 'What’s the weather like in Boston and Madrid right now in celsius?', - ), - ); - print(res.output.toolCalls); - // [AIChatMessageToolCall{ - // id: a621064b-03b3-4ca6-8278-f37504901034, - // name: get_current_weather, - // arguments: {location: Boston, US}, - // }, - // AIChatMessageToolCall{ - // id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, - // name: get_current_weather, - // arguments: {location: Madrid, ES}, - // }] -} - Future _chatOllamaJsonMode() async { final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ( @@ -143,7 +76,7 @@ Future _chatOllamaJsonMode() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', + model: 'llama3', temperature: 0, format: OllamaResponseFormat.json, ), @@ -161,194 +94,32 @@ Future _chatOllamaJsonMode() async { // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} } -Future _extraction() async { - const tool = ToolSpec( - name: 'information_extraction', - description: 'Extracts the relevant information from the passage', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'people': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of a person', - }, - 'height': { - 'type': 'number', - 'description': 'The height of the person in cm', - }, - 'hair_color': { - 'type': 'string', - 'description': 'The hair color of the person', - 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], - }, - }, - 'required': ['name', 'height', 'hair_color'], - }, - }, - }, - 'required': ['people'], - }, - ); - - final model = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llama3.2', - temperature: 0, - tools: const [tool], - toolChoice: ChatToolChoice.forced(name: tool.name), - ), - ); - - final promptTemplate = ChatPromptTemplate.fromTemplate(''' -Extract and save the relevant entities mentioned in the following passage together with their properties. - -Passage: -{input}'''); - - final chain = Runnable.getMapFromInput() - .pipe(promptTemplate) - .pipe(model) - .pipe(ToolsOutputParser()); - - final res = await chain.invoke( - 'Alex is 5 feet tall. ' - 'Claudia is 1 foot taller than Alex and jumps higher than him. ' - 'Claudia has orange hair and Alex is blonde.', - ); - final extractedData = res.first.arguments; - print(extractedData); - // { - // people: [ - // { - // name: Alex, - // height: 152, - // hair_color: blonde - // }, - // { - // name: Claudia, - // height: 183, - // hair_color: orange - // } - // ] - // } -} - -// Simulates an API call to get flight times -// In a real application, this would fetch data from a live database or API -String getFlightTimes(String departure, String arrival) { - final flights = { - 'NYC-LAX': { - 'departure': '08:00 AM', - 'arrival': '11:30 AM', - 'duration': '5h 30m', - }, - 'LAX-NYC': { - 'departure': '02:00 PM', - 'arrival': '10:30 PM', - 'duration': '5h 30m', - }, - 'LHR-JFK': { - 'departure': '10:00 AM', - 'arrival': '01:00 PM', - 'duration': '8h 00m', - }, - 'JFK-LHR': { - 'departure': '09:00 PM', - 'arrival': '09:00 AM', - 'duration': '7h 00m', - }, - 'CDG-DXB': { - 'departure': '11:00 AM', - 'arrival': '08:00 PM', - 'duration': '6h 00m', - }, - 'DXB-CDG': { - 'departure': '03:00 AM', - 'arrival': '07:30 AM', - 'duration': '7h 30m', - }, - }; - - final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; - return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); -} - -Future _flights() async { - const getFlightTimesTool = ToolSpec( - name: 'get_flight_times', - description: 'Get the flight times between two cities', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'departure': { - 'type': 'string', - 'description': 'The departure city (airport code)', - }, - 'arrival': { - 'type': 'string', - 'description': 'The arrival city (airport code)', - }, - }, - 'required': ['departure', 'arrival'], - }, - ); - +Future _chatOllamaMultimodal() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3.2', + model: 'llava', temperature: 0, - tools: [getFlightTimesTool], ), ); - - final messages = [ - ChatMessage.humanText( - 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', - ), - ]; - - // First API call: Send the query and function description to the model - final response = await chatModel.invoke(PromptValue.chat(messages)); - - messages.add(response.output); - - // Check if the model decided to use the provided function - if (response.output.toolCalls.isEmpty) { - print("The model didn't use the function. Its response was:"); - print(response.output.content); - return; - } - - // Process function calls made by the model - for (final toolCall in response.output.toolCalls) { - final functionResponse = getFlightTimes( - toolCall.arguments['departure'], - toolCall.arguments['arrival'], - ); - // Add function response to the conversation - messages.add( - ChatMessage.tool( - toolCallId: toolCall.id, - content: functionResponse, + final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), ), - ); - } - - // Second API call: Get final response from the model - final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); - print(finalResponse.output.content); - // The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. + ]), + ); + final res = await chatModel.invoke(PromptValue.chat([prompt])); + print(res.output.content); + // -> 'An Apple' } Future _rag() async { // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + embeddings: OllamaEmbeddings(model: 'llama3'), ); await vectorStore.addDocuments( documents: [ @@ -370,7 +141,7 @@ Future _rag() async { // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), + defaultOptions: const ChatOllamaOptions(model: 'llama3'), ); final retriever = vectorStore.asRetriever( defaultOptions: const VectorStoreRetrieverOptions( diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart index f552e60b..439943c5 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart @@ -7,7 +7,6 @@ import 'package:langchain_openai/langchain_openai.dart'; void main(final List arguments) async { await _openRouter(); await _openRouterStreaming(); - await _openRouterStreamingTools(); } Future _openRouter() async { @@ -67,56 +66,3 @@ Future _openRouterStreaming() async { // 123 // 456789 } - -Future _openRouterStreamingTools() async { - final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; - - const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, - }, - 'required': ['location', 'punchline'], - }, - ); - final promptTemplate = ChatPromptTemplate.fromTemplate( - 'tell me a long joke about {foo}', - ); - final chat = ChatOpenAI( - apiKey: openRouterApiKey, - baseUrl: 'https://openrouter.ai/api/v1', - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4o', - tools: const [tool], - toolChoice: ChatToolChoice.forced(name: 'joke'), - ), - ); - final outputParser = ToolsOutputParser(); - - final chain = promptTemplate.pipe(chat).pipe(outputParser); - - final stream = chain.stream({'foo': 'bears'}); - await for (final chunk in stream) { - final args = chunk.first.arguments; - print(args); - } - // {} - // {setup: } - // {setup: Why don't} - // {setup: Why don't bears} - // {setup: Why don't bears like fast food} - // {setup: Why don't bears like fast food?, punchline: } - // {setup: Why don't bears like fast food?, punchline: Because} - // {setup: Why don't bears like fast food?, punchline: Because they can't} - // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} -} diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart index 2b6ea9df..6d302daf 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart @@ -131,7 +131,9 @@ Future _chatOpenAIJsonMode() async { defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final chain = llm.pipe(JsonOutputParser()); diff --git a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart index aae53fa7..2095d341 100644 --- a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart @@ -13,7 +13,7 @@ Future _ollama() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3.2', + model: 'llama3', ), ); @@ -29,7 +29,7 @@ Future _ollamaStreaming() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3.2', + model: 'llama3', ), ); const stringOutputParser = StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart index b921ec7d..8005f8d0 100644 --- a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart +++ b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart @@ -22,7 +22,9 @@ Future _invoke() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); final parser = JsonOutputParser(); @@ -49,7 +51,9 @@ Future _streaming() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat.jsonObject, + responseFormat: ChatOpenAIResponseFormat( + type: ChatOpenAIResponseFormatType.jsonObject, + ), ), ); diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart deleted file mode 100644 index 92d419c9..00000000 --- a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart +++ /dev/null @@ -1,108 +0,0 @@ -import 'dart:io'; - -import 'package:langchain/langchain.dart'; -import 'package:langchain_community/langchain_community.dart'; -import 'package:langchain_ollama/langchain_ollama.dart'; - -void main() async { - await _rag(); -} - -Future _rag() async { - // 1. Instantiate vector store - final vectorStore = ObjectBoxVectorStore( - embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), - dimensions: 512, - directory: 'bin/modules/retrieval/vector_stores/integrations', - ); - - // 2. Load documents - const loader = WebBaseLoader([ - 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', - 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', - 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', - 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', - ]); - final List docs = await loader.load(); - - // 3. Split docs into chunks - const splitter = RecursiveCharacterTextSplitter( - chunkSize: 500, - chunkOverlap: 0, - ); - final List chunkedDocs = await splitter.invoke(docs); - - // 4. Add documents to vector store - await vectorStore.addDocuments(documents: chunkedDocs); - - // 5. Construct a RAG prompt template - final promptTemplate = ChatPromptTemplate.fromTemplates(const [ - ( - ChatMessageType.system, - ''' -You are an assistant for question-answering tasks. - -Use the following pieces of retrieved context to answer the user question. - -Context: -{context} - -If you don't know the answer, just say that you don't know. -Use three sentences maximum and keep the answer concise. -Cite the source you used to answer the question. - -Example: -""" -One sentence [1]. Another sentence [2]. - -Sources: -[1] https://example.com/1 -[2] https://example.com/2 -""" -''' - ), - (ChatMessageType.human, '{question}'), - ]); - - // 6. Define the model to use and the vector store retriever - final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), - ); - final retriever = vectorStore.asRetriever(); - - // 7. Create a Runnable that combines the retrieved documents into a single string - final docCombiner = Runnable.mapInput, String>((docs) { - return docs - .map( - (final d) => ''' -Source: ${d.metadata['source']} -Title: ${d.metadata['title']} -Content: ${d.pageContent} ---- -''', - ) - .join('\n'); - }); - - // 8. Define the RAG pipeline - final chain = Runnable.fromMap({ - 'context': retriever.pipe(docCombiner), - 'question': Runnable.passthrough(), - }).pipe(promptTemplate).pipe(chatModel).pipe(const StringOutputParser()); - - // 9. Run the pipeline - final stream = chain.stream( - 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', - ); - await stream.forEach(stdout.write); - // According to the sources provided, ObjectBox Vector Search uses the HNSW - // (Hierarchical Navigable Small World) algorithm [1]. - // - // And yes, you can use it in Flutter apps. The article specifically mentions - // that ObjectBox 4.0 introduces an on-device vector database for the - // Dart/Flutter platform [2]. - // - // Sources: - // [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ - // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ -} diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index 1a928bf7..6234c279 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -5,25 +5,18 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" + sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d url: "https://pub.dev" source: hosted - version: "1.0.7" - anthropic_sdk_dart: - dependency: "direct overridden" - description: - path: "../../packages/anthropic_sdk_dart" - relative: true - source: path - version: "0.1.0" + version: "1.0.6" args: dependency: transitive description: name: args - sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 url: "https://pub.dev" source: hosted - version: "2.5.0" + version: "2.4.2" async: dependency: transitive description: @@ -54,23 +47,23 @@ packages: path: "../../packages/chromadb" relative: true source: path - version: "0.2.0+1" + version: "0.2.0" collection: dependency: transitive description: name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a url: "https://pub.dev" source: hosted - version: "1.19.0" + version: "1.18.0" cross_file: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -99,26 +92,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" url: "https://pub.dev" source: hosted - version: "2.2.0" + version: "1.0.3" fetch_client: dependency: transitive description: name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" - url: "https://pub.dev" - source: hosted - version: "1.1.2" - ffi: - dependency: transitive - description: - name: ffi - sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" + sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" url: "https://pub.dev" source: hosted - version: "2.1.3" + version: "1.0.2" fixnum: dependency: transitive description: @@ -127,62 +112,54 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" - flat_buffers: - dependency: transitive - description: - name: flat_buffers - sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" - url: "https://pub.dev" - source: hosted - version: "23.5.26" freezed_annotation: dependency: transitive description: name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d url: "https://pub.dev" source: hosted - version: "2.4.4" + version: "2.4.1" gcloud: dependency: transitive description: name: gcloud - sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd + sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 url: "https://pub.dev" source: hosted - version: "0.8.13" + version: "0.8.12" google_generative_ai: dependency: transitive description: name: google_generative_ai - sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be + sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be url: "https://pub.dev" source: hosted - version: "0.4.4" + version: "0.4.0" google_identity_services_web: dependency: transitive description: name: google_identity_services_web - sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" + sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" url: "https://pub.dev" source: hosted - version: "0.3.1+4" + version: "0.3.1+1" googleapis: dependency: transitive description: name: googleapis - sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" + sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" url: "https://pub.dev" source: hosted - version: "13.2.0" + version: "12.0.0" googleapis_auth: dependency: transitive description: name: googleapis_auth - sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 + sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" url: "https://pub.dev" source: hosted - version: "1.6.0" + version: "1.5.1" html: dependency: transitive description: @@ -195,18 +172,18 @@ packages: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_parser: dependency: transitive description: name: http_parser - sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" url: "https://pub.dev" source: hosted - version: "4.1.0" + version: "4.0.2" iregexp: dependency: transitive description: @@ -215,85 +192,86 @@ packages: url: "https://pub.dev" source: hosted version: "0.1.2" + js: + dependency: transitive + description: + name: js + sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + url: "https://pub.dev" + source: hosted + version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 url: "https://pub.dev" source: hosted - version: "4.9.0" + version: "4.8.1" json_path: dependency: transitive description: name: json_path - sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" + sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" url: "https://pub.dev" source: hosted - version: "0.7.4" + version: "0.7.1" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.6" - langchain_anthropic: - dependency: "direct main" - description: - path: "../../packages/langchain_anthropic" - relative: true - source: path - version: "0.1.1+2" + version: "0.7.1" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.1+3" + version: "0.2.0+4" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.3.2" + version: "0.2.0+1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.6" + version: "0.3.1" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.6.3+1" + version: "0.5.0" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.3+1" + version: "0.2.0+1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.3.2" + version: "0.2.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.2" + version: "0.6.1" langchain_tiktoken: dependency: transitive description: @@ -306,10 +284,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 + sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" url: "https://pub.dev" source: hosted - version: "2.6.0" + version: "2.4.0" maybe_just_nothing: dependency: transitive description: @@ -322,39 +300,31 @@ packages: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" mistralai_dart: dependency: "direct overridden" description: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+3" - objectbox: - dependency: transitive - description: - name: objectbox - sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" - url: "https://pub.dev" - source: hosted - version: "4.0.1" + version: "0.0.3+1" ollama_dart: dependency: "direct overridden" description: path: "../../packages/ollama_dart" relative: true source: path - version: "0.2.2" + version: "0.1.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.2" + version: "0.3.2" path: dependency: transitive description: @@ -391,10 +361,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" source_span: dependency: transitive description: @@ -415,17 +385,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" url: "https://pub.dev" source: hosted - version: "1.3.0" - tavily_dart: - dependency: "direct overridden" - description: - path: "../../packages/tavily_dart" - relative: true - source: path - version: "0.1.0" + version: "1.2.0" term_glyph: dependency: transitive description: @@ -446,10 +409,10 @@ packages: dependency: transitive description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.3.3" vector_math: dependency: transitive description: @@ -464,14 +427,14 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0+2" + version: "0.1.0" web: dependency: transitive description: name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" url: "https://pub.dev" source: hosted - version: "1.0.0" + version: "0.5.1" sdks: - dart: ">=3.4.0 <4.0.0" + dart: ">=3.3.0 <4.0.0" diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 985f1d64..0b57edbc 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -1,17 +1,16 @@ name: docs_examples -description: Examples used in langchaindart.dev documentation. +description: Examples used in langchaindart.com documentation. version: 1.0.0 publish_to: none environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.6 - langchain_anthropic: ^0.1.1+2 - langchain_chroma: ^0.2.1+3 - langchain_community: 0.3.2 - langchain_google: ^0.6.3+1 - langchain_mistralai: ^0.2.3+1 - langchain_ollama: ^0.3.2 - langchain_openai: ^0.7.2 + langchain: ^0.7.1 + langchain_chroma: ^0.2.0+4 + langchain_community: 0.2.0+1 + langchain_google: ^0.5.0 + langchain_mistralai: ^0.2.0+1 + langchain_ollama: ^0.2.1 + langchain_openai: ^0.6.1 diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index 4060d466..e02da308 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,13 +1,9 @@ -# melos_managed_dependency_overrides: anthropic_sdk_dart,chromadb,langchain,langchain_anthropic,langchain_chroma,langchain_community,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,tavily_dart,vertex_ai +# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community dependency_overrides: - anthropic_sdk_dart: - path: ../../packages/anthropic_sdk_dart chromadb: path: ../../packages/chromadb langchain: path: ../../packages/langchain - langchain_anthropic: - path: ../../packages/langchain_anthropic langchain_chroma: path: ../../packages/langchain_chroma langchain_community: @@ -28,7 +24,5 @@ dependency_overrides: path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart - tavily_dart: - path: ../../packages/tavily_dart vertex_ai: path: ../../packages/vertex_ai diff --git a/examples/hello_world_backend/README.md b/examples/hello_world_backend/README.md index 70208b7a..4f00582c 100644 --- a/examples/hello_world_backend/README.md +++ b/examples/hello_world_backend/README.md @@ -7,7 +7,7 @@ It exposes a REST API that given a list of topics, generates a sonnet about them The HTTP server is implemented using [package:shelf](https://pub.dev/packages/shelf). -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ![Hello world backend](hello_world_backend.gif) diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index cbfd6954..3ef992b7 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a url: "https://pub.dev" source: hosted - version: "1.19.0" + version: "1.18.0" cross_file: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" url: "https://pub.dev" source: hosted - version: "2.2.0" + version: "1.0.3" fetch_client: dependency: transitive description: name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.0.2" fixnum: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d url: "https://pub.dev" source: hosted - version: "2.4.4" + version: "2.4.1" http: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_methods: dependency: transitive description: @@ -93,39 +93,47 @@ packages: dependency: transitive description: name: http_parser - sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + js: + dependency: transitive + description: + name: js + sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 url: "https://pub.dev" source: hosted - version: "4.1.0" + version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 url: "https://pub.dev" source: hosted - version: "4.9.0" + version: "4.8.1" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.6" + version: "0.7.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.6" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.2" + version: "0.6.1" langchain_tiktoken: dependency: transitive description: @@ -138,17 +146,17 @@ packages: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.2" + version: "0.3.2" path: dependency: transitive description: @@ -161,18 +169,18 @@ packages: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" shelf: dependency: "direct main" description: name: shelf - sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 + sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 url: "https://pub.dev" source: hosted - version: "1.4.2" + version: "1.4.1" shelf_router: dependency: "direct main" description: @@ -217,10 +225,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" url: "https://pub.dev" source: hosted - version: "1.3.0" + version: "1.2.0" term_glyph: dependency: transitive description: @@ -241,17 +249,17 @@ packages: dependency: transitive description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.3.3" web: dependency: transitive description: name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" url: "https://pub.dev" source: hosted - version: "1.0.0" + version: "0.5.1" sdks: - dart: ">=3.4.0 <4.0.0" + dart: ">=3.3.0 <4.0.0" diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index 883ecefc..fa43a6d8 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.6 - langchain_openai: ^0.7.2 - shelf: ^1.4.2 + langchain: ^0.7.1 + langchain_openai: ^0.6.1 + shelf: ^1.4.1 shelf_router: ^1.1.4 diff --git a/examples/hello_world_backend/pubspec_overrides.yaml b/examples/hello_world_backend/pubspec_overrides.yaml index a52f79af..93b5421a 100644 --- a/examples/hello_world_backend/pubspec_overrides.yaml +++ b/examples/hello_world_backend/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_cli/README.md b/examples/hello_world_cli/README.md index 3ab0ed81..608daeb6 100644 --- a/examples/hello_world_cli/README.md +++ b/examples/hello_world_cli/README.md @@ -2,7 +2,7 @@ This sample app demonstrates how to call an LLM from a CLI application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ## Usage diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 52a95a74..42f90c1a 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a url: "https://pub.dev" source: hosted - version: "1.19.0" + version: "1.18.0" cross_file: dependency: transitive description: name: cross_file - sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" url: "https://pub.dev" source: hosted - version: "0.3.4+2" + version: "0.3.4+1" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" url: "https://pub.dev" source: hosted - version: "2.2.0" + version: "1.0.3" fetch_client: dependency: transitive description: name: fetch_client - sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.0.2" fixnum: dependency: transitive description: @@ -69,55 +69,63 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d url: "https://pub.dev" source: hosted - version: "2.4.4" + version: "2.4.1" http: dependency: transitive description: name: http - sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.2" + version: "1.2.1" http_parser: dependency: transitive description: name: http_parser - sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + js: + dependency: transitive + description: + name: js + sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 url: "https://pub.dev" source: hosted - version: "4.1.0" + version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 url: "https://pub.dev" source: hosted - version: "4.9.0" + version: "4.8.1" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.6" + version: "0.7.1" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.6" + version: "0.3.1" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.7.2" + version: "0.6.1" langchain_tiktoken: dependency: transitive description: @@ -130,17 +138,17 @@ packages: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.11.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.4.2" + version: "0.3.2" path: dependency: transitive description: @@ -153,10 +161,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" url: "https://pub.dev" source: hosted - version: "0.28.0" + version: "0.27.7" source_span: dependency: transitive description: @@ -177,10 +185,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" url: "https://pub.dev" source: hosted - version: "1.3.0" + version: "1.2.0" term_glyph: dependency: transitive description: @@ -201,17 +209,17 @@ packages: dependency: transitive description: name: uuid - sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 url: "https://pub.dev" source: hosted - version: "4.4.2" + version: "4.3.3" web: dependency: transitive description: name: web - sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" url: "https://pub.dev" source: hosted - version: "1.0.0" + version: "0.5.1" sdks: - dart: ">=3.4.0 <4.0.0" + dart: ">=3.3.0 <4.0.0" diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 55291147..581a3927 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -4,8 +4,8 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.4.0 <4.0.0" + sdk: ">=3.0.0 <4.0.0" dependencies: - langchain: ^0.7.6 - langchain_openai: ^0.7.2 + langchain: ^0.7.1 + langchain_openai: ^0.6.1 diff --git a/examples/hello_world_cli/pubspec_overrides.yaml b/examples/hello_world_cli/pubspec_overrides.yaml index a52f79af..93b5421a 100644 --- a/examples/hello_world_cli/pubspec_overrides.yaml +++ b/examples/hello_world_cli/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart +# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_flutter/README.md b/examples/hello_world_flutter/README.md index eb983d97..80a111af 100644 --- a/examples/hello_world_flutter/README.md +++ b/examples/hello_world_flutter/README.md @@ -1,8 +1,9 @@ -# Hello World Flutter +# Hello world Flutter -This sample application demonstrates how to call various remote and local LLMs from a Flutter application using LangChain.dart. +This sample app demonstrates how to call an LLM from a Flutter application using LangChain.dart. -![Hello World Flutter](hello_world_flutter.gif) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +blog post. ## Usage @@ -10,5 +11,15 @@ This sample application demonstrates how to call various remote and local LLMs f flutter run ``` -- To use the remote providers you need to provide your API key. -- To use local models you need to have the [Ollama](https://ollama.ai/) app running and the model downloaded. +### Using OpenAI API + +You can get your OpenAI API key [here](https://platform.openai.com/account/api-keys). + +![OpenAI](hello_world_flutter_openai.gif) + +### Local model + +You can easily run local models using [Prem app](https://www.premai.io/#PremApp). It creates a local +server that exposes a REST API with the same interface as the OpenAI API. + +![Local](hello_world_flutter_local.gif) diff --git a/examples/hello_world_flutter/android/app/build.gradle b/examples/hello_world_flutter/android/app/build.gradle index 2c711c95..48e93274 100644 --- a/examples/hello_world_flutter/android/app/build.gradle +++ b/examples/hello_world_flutter/android/app/build.gradle @@ -1,9 +1,3 @@ -plugins { - id 'com.android.application' - id 'kotlin-android' - id 'dev.flutter.flutter-gradle-plugin' -} - def localProperties = new Properties() def localPropertiesFile = rootProject.file('local.properties') if (localPropertiesFile.exists()) { @@ -12,6 +6,11 @@ if (localPropertiesFile.exists()) { } } +def flutterRoot = localProperties.getProperty('flutter.sdk') +if (flutterRoot == null) { + throw new GradleException("Flutter SDK not found. Define location with flutter.sdk in the local.properties file.") +} + def flutterVersionCode = localProperties.getProperty('flutter.versionCode') if (flutterVersionCode == null) { flutterVersionCode = '1' @@ -22,18 +21,22 @@ if (flutterVersionName == null) { flutterVersionName = '1.0' } +apply plugin: 'com.android.application' +apply plugin: 'kotlin-android' +apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle" + android { namespace "com.example.hello_world_flutter" compileSdkVersion flutter.compileSdkVersion ndkVersion flutter.ndkVersion compileOptions { - sourceCompatibility JavaVersion.VERSION_17 - targetCompatibility JavaVersion.VERSION_17 + sourceCompatibility JavaVersion.VERSION_1_8 + targetCompatibility JavaVersion.VERSION_1_8 } kotlinOptions { - jvmTarget = '17' + jvmTarget = '1.8' } sourceSets { @@ -41,7 +44,10 @@ android { } defaultConfig { + // TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html). applicationId "com.example.hello_world_flutter" + // You can update the following values to match your application needs. + // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. minSdkVersion flutter.minSdkVersion targetSdkVersion flutter.targetSdkVersion versionCode flutterVersionCode.toInteger() @@ -50,6 +56,8 @@ android { buildTypes { release { + // TODO: Add your own signing config for the release build. + // Signing with the debug keys for now, so `flutter run --release` works. signingConfig signingConfigs.debug } } @@ -58,3 +66,7 @@ android { flutter { source '../..' } + +dependencies { + implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" +} diff --git a/examples/hello_world_flutter/android/build.gradle b/examples/hello_world_flutter/android/build.gradle index bc157bd1..f7eb7f63 100644 --- a/examples/hello_world_flutter/android/build.gradle +++ b/examples/hello_world_flutter/android/build.gradle @@ -1,3 +1,16 @@ +buildscript { + ext.kotlin_version = '1.7.10' + repositories { + google() + mavenCentral() + } + + dependencies { + classpath 'com.android.tools.build:gradle:7.3.0' + classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" + } +} + allprojects { repositories { google() diff --git a/examples/hello_world_flutter/android/gradle.properties b/examples/hello_world_flutter/android/gradle.properties index a199917a..94adc3a3 100644 --- a/examples/hello_world_flutter/android/gradle.properties +++ b/examples/hello_world_flutter/android/gradle.properties @@ -1,3 +1,3 @@ -org.gradle.jvmargs=-Xmx8g -XX:+HeapDumpOnOutOfMemoryError -XX:+UseParallelGC -Dfile.encoding=UTF-8 +org.gradle.jvmargs=-Xmx1536M android.useAndroidX=true android.enableJetifier=true diff --git a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties index 11fce01a..3c472b99 100644 --- a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties +++ b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties @@ -2,4 +2,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip diff --git a/examples/hello_world_flutter/android/settings.gradle b/examples/hello_world_flutter/android/settings.gradle index fd7c1580..44e62bcf 100644 --- a/examples/hello_world_flutter/android/settings.gradle +++ b/examples/hello_world_flutter/android/settings.gradle @@ -1,25 +1,11 @@ -pluginManagement { - def flutterSdkPath = { - def properties = new Properties() - file("local.properties").withInputStream { properties.load(it) } - def flutterSdkPath = properties.getProperty("flutter.sdk") - assert flutterSdkPath != null, "flutter.sdk not set in local.properties" - return flutterSdkPath - }() +include ':app' - includeBuild("$flutterSdkPath/packages/flutter_tools/gradle") +def localPropertiesFile = new File(rootProject.projectDir, "local.properties") +def properties = new Properties() - repositories { - google() - mavenCentral() - gradlePluginPortal() - } -} +assert localPropertiesFile.exists() +localPropertiesFile.withReader("UTF-8") { reader -> properties.load(reader) } -plugins { - id "dev.flutter.flutter-plugin-loader" version "1.0.0" - id "com.android.application" version "8.2.2" apply false - id "org.jetbrains.kotlin.android" version "1.9.23" apply false -} - -include ":app" +def flutterSdkPath = properties.getProperty("flutter.sdk") +assert flutterSdkPath != null, "flutter.sdk not set in local.properties" +apply from: "$flutterSdkPath/packages/flutter_tools/gradle/app_plugin_loader.gradle" diff --git a/examples/hello_world_flutter/devtools_options.yaml b/examples/hello_world_flutter/devtools_options.yaml deleted file mode 100644 index fa0b357c..00000000 --- a/examples/hello_world_flutter/devtools_options.yaml +++ /dev/null @@ -1,3 +0,0 @@ -description: This file stores settings for Dart & Flutter DevTools. -documentation: https://docs.flutter.dev/tools/devtools/extensions#configure-extension-enablement-states -extensions: diff --git a/examples/hello_world_flutter/hello_world_flutter.gif b/examples/hello_world_flutter/hello_world_flutter.gif deleted file mode 100644 index 25058c3853c765f2244c53b6e10bfd69bd67ef44..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 235564 zcmWh!Wmwa17yhj`SkaB6K|(-8q#WHi(vgl71#~JRO1Qx&35O!0gd;^n93dj1qd~%f z2uL{)vB3Wn1C@AvpO4R{=Q`K9&$-WipU2L@*63i+NyrrV7x4dc3jn}^fFK465(Pv= z5z;akh#VldM+Bn?QdAOCQIi(b1omplY3V4+?1RehL*WeoB?H0z25N?fv<{hQ?KQ*8 zm;+|!`udhq7FLE9N3Vk(UJhp7 z1V^&KIkLo2AD22G9b-R?v;RSNf1;nin{}X4K(L!fhiVi+sjpVFHzx|5%9{l*oqoA8vvsL!`D zZ{B7yi<52_=N6Y-*Scd(zvGv6=iD9E4c9X3vWgq=m8lh#MHi}!@zr((cj;H|1>L<@ z+Ix?BueP-0em4CYd*Dw4fDN+j_I5^+9@@>67;Q>mBTY4y%sNrl_YU^PfF?%6U}TeYvZ<Y}IJPgVFTn(V{1#B^{%+ zoY4m(qdk>l&)dh6+Q%yDUevH(WH!7=YkqOP<%R#~i=q46mU?b}8<*P4ZEP6NY#dK* z9#?oV-hY2$@WI5v`iU#86W7Nl23V769h1e~ukKI28qS-FzA;65I+f0xW(-e1c{RQMUnal&_wm24AOHLI?(4VrAAU~# z__?&x2a+|S=%|NFD_>(AQnKVLp?aer*R-`f86ZFg~ZclTfNE9?ro+IV_cS&(e> zb#x)1|6l!v!VthNp!I)n{2wL&au>L;R`%-o)WZ~{$X@?Jc3Dq4Udgf8zM;G?OLG6o z`N4*YfvW_oE3yual|$FnJ!<`j8mop=lWA*iQ?hyH;`pfc)qQf)$%W_REDt~UlPhvgEp@M(B3{-8v(I5w*yQir&8x9jqipF_eU;{JZXCWf@PH@ z@6y(MVvMp-8aUea=<~D?#{KUB6Rowm)^nf7>LNxY&6~1E8@I=&s$H@T8_u@f+WhjK zThSwT&dMeotF^_vEq3jOqBXB;&+f@T+~2Mn2(Mjd?at1>zkGjJvd6&jB8SJWh;6if zG%2@AmkdTKynWPo%4@l$v^GwPq=ifqw8g834l z=Lpb2<#Q_6{3hawJYJ9E}p9N>{=h2eG@lp2UHnQd9{pC4gw~HXYBRrS-nYpHe8#{5gtN6cnDdl?-r}O_$I0+;A=yl?oM9QaF0{ zc;mRvG*Xcsx~uPe2kG`XdEZLs*4yOC!R%IM_R0I}SDX7joJB$hK^SeQ#!Dn*O;zN( z+H=6A=w@k|6G4c52@#y{*Pg$= z=v;@Gn9-J7H%$?({Z074A%uq43WA) zqFw3uNgT2WD7}fDc={(tA$C8%?@@7Ax}-lV$&2i|#lH)+V9-Ekd2$e8CaqMA8b?Gc zhVPzQLfbGDE=HbXsMnssgzHvF*k&O$``Kwhp5TK`&C`=KNj9ES%420!d2;AtQ|^_wtZrlqfP zg&<-k1gAxy2^rXEAb0?7k8b(_v;q_rN5_RX>MIy;+{*6CQ~bt6*pbg=PDj}8Kbx2J z-mLO)c&pXHdUBeb(Xh>xVh`0&i$-1B=+y7AJP;_Ucwrv^Wk# zF(})wyg}qSA4Z6~$ha(E0XWd$ibBa2)$C<3RNX=?unc`rnFBu1Wh02Bq+R-Bi3Z!> z@F2dxNAnVYgW24d0hKYB4A8X*wx{99P8+Yf$=H&`Q0>#tb*_z zys|+o0Wfxc%XCSp_I_L|K~V>b6t=306j@|9u7a<)g=S=a!DpN@U4C#?m(dj1aL}6T zAU+eHXMzKc6b_b&HT_l7SNhs?c(zNzxvURBBnw_#Ut&JjwUSFLL#rSG!3s*4B3+yg zRkoC>r^2-LnSkwVGVUC{@}v}n?z)v*)zVxiu7e=&{nsf3%bHxaf{C<4#8@n}=AG&a zC*$=vSo&N|g+Aq`L&e1Mg`q(ZXr7lb<*S!-#&Y{>Wn0~Tr|eVJUJ5ez ztySm$AbnPbd@kT2v`@Wb5!(*gOCen6_ms39f$Qv6$u-T}*Ro~s6xAXMfC$v1pq~dd z-sp@9&+I9E0m4fphAQi1m!XgFr_p448nk@x1jL}bpuSQk26YR1n-P{El))jS%kCA+KrmTtN4Lso#k{5&bl6I3p| z?arIB3BHSzjIfM2S!ClJJQGfB%jhFyy;$Sy=oLmdqI4q8lp#qihy3kGL2Gb3LCv8J zj;BXs?a@*3P;!s-D^lv-MyarGw;wAk9uf*{7%8{Q@4}xt-z_>j?8?!ACe%SaCAh8W z_A$xTr%u6&-^o5R)_gmrH@Kf$^Oa(SbqCA|#h3oPMU4sM#6H^$QKEePAY;%jl1JfW zJbd@z=ARbAWMRzGUO8~Ino6u$VTqcqL#k~|vUp_Snx?JX3D^nDtGdufy=wJ0f=wY) zuO#osRhWjImR~U$*c+>DtT3%ncMxy0+Zx#5ksFy=LID@%lNgjThz7+ zkNeGcOP>9QV?a(|BU_t}@x z!D0M;aDx*z`i5N*mpn@Hy$AkOKJ+T8hA0k_sj(wA&X5{CC7UD7{6qJrV{-F>c)mc>FS`v@cN8`F z>Z;$&wFC!B$XOOrRU}4=NG^!6H3J0mYyfp)@L4>_Ow|*?w6vW{08`)@u*8>-Q%uRi zf7~uizqg*1Kr>nJi@I9M;3Njym0NDMJAN>ITCMl&F=G|beiGy$6+&dCmREY*Vt}78 z+-Ss$1?k#j#85yK0_Fn;saD^TVcE4+1dtA4pEwcFVW9;NreJP*=%#}6D^twKSE^fu z4o4);+)R{_K%Zp6&(lyn`r2(%YF6FIH$icl2UEkuK~XIcC=Rm40HFkc#JP}vpxVQ6 zR_6d=5qKhj4)h8-FhMvbD3Tg$7%n)(L0VEkI0{Hd7je;BL?R3{bsjI~nKnd$tK)O+ zxF`o*a4g&NT(7-U3~-9+_uCfmBzQ_NAs^BNpG)SFxkwZnvNsf6N`>+^LBV*G4iCao zLZXQ&oD@M%T_<0r-`&kTf|RhY`-K;zYjtFAn;Q5TJmS1=RtKMPizGPREzr8T=etd~ z`|b*=gpV?PeH~#(Y38FI@&wr&%0rImi=X~N-2&^1p+mYRmM9FF>X)aga6~HK>V&^j z#q}3r;&nWfW2RuCQoi+X0X8e&8AUv%;p?<<;{iZyp&gn5i-#P_f5^EWgu3xFG~e+f z%5unYh+km$+F8M;FztkpL61NuyU?}QWquNUorG}o#B`VMY2_kio78S_k=a~4AS*U`>yA_}R-$1L1xno#Ia|J&&&?@Ju6dU9LjhoEG0QT;7xvA)}7 zspk$i8OW$wy6U;La-HvK6gytGFRLn2NG%eQIQ-yZ;moZPd>P>`S1|Wu;XS6n@m&~W z`e0To|5nmt!TZ2%%>m;Ep~LluZtjEMsoB`K2HKx39HXs^$R;39k|7su4Rk&3pcEs1 zX(&9qct_xm%I>nVp2(eZLk9Ry;)qWwnA#G_+yCgM;DS`C{7F<5zWg3wqx{>jX;bh4 z33c`da<4$@I!d&K3v)RS7~F-w~29d1+zX@DEJE8(-kx+S1Fwkc-|-&^}PDE zm8gS)eN)eEua{M^m+ppqt!iT54QeZludQ z%uh8UZs-m|&5h?-mO*#2+wK)#63hK_uSB}G{E7_AxVHLIZO!f4+P2!d+1mO)wQTA8 zjmGzzeebtiy5D;Hep}o9j@kQ9|J>(D*L54$^ydOkH{~bNj(;+k^Sp2aA6m@TBXPjq6u@>sK$;f4E)$>C%IKZvB@(^?d1v-;5uA z_kH;D(!qMp?++5JR4cW7HDUq=h)c4Y`jc^ut|ex*26#4 z2FaoZ>GlTMxd!>a4Fs7+MUzHlzed&gM)jh`z3q)!bB#KG8+B!x^i7%!{F)4RNA4z%JpCSd$3G$$J@RdT zH@4Sj*WjDSM>N&`{RVU z$4P%5(_~tcODM_G-#Js%IosYjKi9eVx0AOk^K{wd z>5AXe)%d3$ik^OIf4VmJ^vmC;e3@t8OrCxBd-gN_*=EtRKkd)9=AP~ReFn&Kz@{9i zKL?(`K^AiaIymTg4t9%!m+cZZ?Gp9x5>M!oEbfx-=#ri9lHckg$aX85b}RdLt0r`- z7kBUN=+>I=*4gUTmF>|t?J@B0F-+(&D(*4v=rNt|G2iMT%Jy3NKilNDZ&7>gi+deA zdY$KcUAKBkvVHEReV+b(-U)r=;y&MwKL7c?z^y)tY`^zuqk#Zc7*U zmETX59XM+`5aT}(n=lYpJaDOFAYpzWX={KcJD6-b$nYObOBl>39=x<`ii;e<{U-HC z$PQi0?zG#&TmulHY?u#wsCa&;bZdwu`@G!rd8PmJ>a`&^3hZvj^Sb%x^;^#?qG0#X z!_EG~Em6abQLwg-;g0#?r(44u*^zG3kzW6i&SF?71(DCk*qIKEZHZlQjBaqSek{xeb?nR57+?0qH`%cb8rGYJT{nI4 zr{l$z>5C0K)<+jJOT$0|xbQ?SvV<$p$we=4vD;jj8K#DSI!zmCp!PD+qtcz@wp!Rw z9;}8l7;HMBywJPBgMHu)s&r0hE%bh+z`k-P^tE69-sRvvuwhGF>>Kt=(}kDj+b@Z7 zla^+a)&Y~Y3oqAc*bhW(J{uNFz~+vOyKj$d;RW1?s8RnX@(B~9obD|E=MCW80=g%( zuzp1B8zS~(=Ty|f)T!+$s@(Khv+0eX)5VzHEf&g+gN-)r&RBnyT{5FIg1cHW z^^QARzdg&An`<Oq z?e5ydsk)N6$$*8a#Dy6b>^l~=GY7-jHJj(T^lUI;H9Xt~se3rOdvsxDq;qyQXMr#G z=3C;z>;m=<0sa-g_-DI&!g>5nNyo{p!J)*bo76>K(c*agVs725jY!^j`{Ikv4&P25 zMs|T|_C~UFNg6x1!o~U#P`jOP6vyWGJ-`UbcT9}*zaQ;~1a^QHm$~iz&x>Z%Tsx5R zqd9dvh1`x80dFUU`zHjK#*3Dt=LdDoJ0_f$W}V(zd|8wU#C{|#e8j)2FTs9eVc*fF zkK`_V0x+BFZpaXN3rjA=zGlO@%;|U*_CJ?* z4K8b*u4|I@SU(P`MrU>GY1bBjDC4em4G+!`L2y1KkORe#fP5aVats$s1WN;8Kj3}5 z{5(v4{{GYVJAAmb?$mwxjvXRYne>sb3mWL1gAR}t8E+5X@8QdWCZxXm1zg~DC4_-eDqmI{`cUUfVz{YS{6ad44`tcz!U65+& z$E{rs_6Q3k2|#LeXWw^zQ8)ij{J*d2f&cwF)w`q%8U|K-1wa4yU}>EP&*NerJ-|NH zg-9_$7FD0QB;UV0h$;c+wC|ns9#r_wTIh?etWH?K`f}t5R*VK(=HaM^afd0ufbOi$ z7hEV87|s?D<3mKTzfecsQ}=aH-wf{15SnaW}AML|p3Fyhv4wZdN zky>aN5xN0vE^&XZQDz>Rj$A^;-#ZGf&#s zZ0NaeTU1Hj;TI2n!ud%XEngekm`)qGu4eJOE0Q{N$31VOWAMPe>j4EQ$b^8{XPhmp z+{omcV0u@|4VJG1lV_1XpYR#++m!Qb&PH>?PK*eP?M@TP;kUom^$t6_ zAJ>wG5Us^O9hsvU?3hn%?^k%^p&9iaGck3*)4ODOab#5@GVf>W%y#FKqN8;!a2BX& z{z>Zm<8 zlE}_bmP?nMv_vC+Rh%}b_Y&?tzUA(D#zx#P=BtflNdB4rq2P;o`53=$7h8QeufdOGw#K(eJk(0JXvbEuR+@3LJHtl)Q+>Y4eJ z)7G({+i`+1vRC#g(zSqI!s$=r!HifIoyUQGR8L~DdK#x}|#@eue7p|~_iHRxn^Z14xGH~I*r>cTv4zEwJAI9>qA7B%=Kb%?`PMzBZ@@@2h%yC)vDYtn5~b0g zPCK>=QqE;t$r_MorOYPGGcU~PQMeT~)nC(gTt~Z%hXQ_CQn=7R? zj6y@@)6xTVY?{pLSS~vmsZm5uiq|2If7RUe_cWtYFpHmI7i_1c3V_*!ujg;&$*r66 zh|7J7G0yB{u_OwRgtp8MRsg-E)>z5!eRw|JaXP+ux;`fEEizAckhmze?{mOIVkWQ9 zwAD@s21LQvDOGaKtWYe#DAlVi*pWD^|&?c z8Ghl^q>!>(HxMNtmD{`<4v3$GIuuihd8=L6s08L75dtQlj?-fcPj%ihqRV|(Og0a7 z5jML77Aa*(FJA=qY1Xi&M*3~9LdOp-=m95y456B@Dd0nBZ)rG0KTef2WbN$ovF`8-zQsT+BD7^ z7?x28qAy8+F#j7=pZ^w@qI3mWSZ$DKPQOD`nEn>%HFW;#zUxZ%95wRq`cIjIA@A5U zIqn(!#kKp#?pfnE8^3}jq7)JvW2{gsKj>zny;v!N-DO(>{9q{Ya`3CYgYM6rdsV#~ zvcl>dPcFlDX|vgM*C6LGZ4=iGBcXEV6lrsD;%z%K8^_!s!AT|sa(RZ;yaQO6t`ouf zhQLXKo)Sn0Y0$u#7rVBphJV?jrYJq$Sdf!`*p)V$mh?7dXR}MhmD8*x)K_ls3O;J^ zRpjy@uwOECSmgfatJ-|FVYjZTgZJM19FCA&kd#hHTm=R)GgLKt5j$DFaTBu{<|R-F zH~!<`clmo+`xfo)ttdbHeIKTb-gRrlv-dj3>h(ec&O?5zz?S}O@?gIxRi@@dD7fK( z#b1B{n{Bw7*h)nIu8I6%%TD(WXUW~MCmR0DyP5{Oei@-Lo++*P|B1%A=j-|FTYjF;a${1N#B=O@CNrDqV&cB(8o598pyUY9PFV zk~SfdEGup2)Ecqg1|%%w!jtsv!Ha$ZO{X_cw4_w8i=#Ql%Bv8#W=5*v1PkyQ4T0!p z?8m_0OtauH3OK zV$22%{@kGb_ex0!6_>KFmjGz5ga~f683vAzV@=33*{Aj#@q`9;S8$>EuBZ^iG%bP3 zQ1f3>d5e%uim5Eppn%*6R(;w@XH?M~xaG=#qAMGOp8*)gA0bFn0tA00RalMBLCFr{ zY@#`WG8Ad~P`tEi900YKoQ1lNsR;Bc?pZPQBW_0po*{7n=hnEFLwFd329RAv2cRQ; zV=J2}=-?FuroZ>90gVDtYsDUB(EqWpeeM)gUrPZGrfixCR(Hv~hxmhdb+PNC#9a_@ z3c=|#4T40d<$Sc}F!oT0@H#;!aYG(Rc0L@Rr&wH=IJQ{gNs;n8cywWpe{^I>5t606 zmN5&;k|NQ-`ZJO?T6`4rBTH^+#uBd1N>$DPCF->98n&E%J&Z%IU~1JYzurH(9{9yV zQ}B4wM#*kMK6~vJ*8nRlO9Ix8ANwmpTXP<;lz7)1rezI6yGw@Q;t84RJ9t0RqJl@r z_*#*=qJSy28{5FNW~q#$jLVt(*9D2NjAX^H$OO*eWtKq7FapZB{o6`_EqUc4nfN0_ z^o+(e56uGyXRniJAQ+MZOai&SMJ$TwHweg;D_u@Jj962g@Zlt6Aj;G^*|ZyfhdEW7 zrd&-#8X=vEtP&DJ(s?SU`Ec)RH>0S?)Arp@71N_y5AKPvRF29>r&w;sb+d(Ae-r-I zMX?WmEp^dWtA6+V^*D^iXL0sc2b_C$Ri!dppIko}qxiB+(343Iqx@r}2Dg%LEoR8b zezlUFzfmEh{w@O%N47f9d>E>7(I%!hX~Ca;4<^JO;9Cd+o>NgrnxIqSrD+Sd8+> z%rj8AlXDh(qil|%RfJCT`I;jADlIRC+5{A*SgY>~TC=;nHE{h_`tw5rz*!=?IPawB zEuyK^X-_FBS=LXsa%}Nn{F>yh0<`-#bwI9IPkRQDU~fl5JLJg@-Qc34Gm@2LY{Rqj z7|KJ^9liNJ_EMRHb>)L{|2)#k?kmj>@(j4vAp8N~a8uT?T+6X&918y4ofkD!7ByI2 z=}@xbaIe^L$1OdIlY9i}STXNdzvft2#!!zddgu4tLfP@Ifsa7k_Rw!a zjt`5S+O{l{kg4h{#=-pIR6)h3=$=kCIF9K+8Oc2#>eTD(ET6@M(^FgPoCdQ;F3+R% z`Cw1`e(H$x=)CjTn)8c)GJ|OL53n6q%jKo1%cQl-3kR2}D3@tx7dA0n8<~FPiOpPx z%lwGT!o17kn#-Flmt9_mOQ3$n5$-?59PuFE`JKP3Zbv_vkfO~=A#vfcM%FGJu4^N% z>+`N()?EKH=#8VKYje{sZn|!m9{KKkuMAU>Ef;;M<6K3SuqgmIqn)I?t34&Yj(P8jk;?uxN9W3>ukI0Vm$QZJhXne z>zjEPxOnUu9Pl_0?Qu}-rA$w*Qh>YhgO~l_?CB8?vjq?Hb&tc_9z=|%MSv~MbmGt> z4{H}sn*dMSXivLDPx~BChZ0Z62cAxyp3bA5E(@Nn>z+rpJxLfZH#skNZ7&ZqFHaXQ zuK+LaXs@G*UgR7vpAs+M2VQ=iUjCzA0SjJ%>t4sUy(k#(AUW?~ZSN2>?@$--umJDy zXz$~R-Vr%dJ|*erC8Z-fy`x6GqZhnSt$Ux|_NHQvo{>9xR{Q8Vv!gLCN6!Zwjg3Bf zA#pk~EIps3828|4eCN@G(W8kAN0Zi%ULHNV<&<|3pMHAt=szoi;X+OgAg4u>soU_2 z2T-x<Uu^T^7Zv-pPPw3g*iSk zsWZ2jK1H2A#iOqaQpvLOf(5U9SQy_jx!HsVKILYbmJHOz2oa1vk#ve}=g*+H~bg1*2>n zZbAvz9p7HKzwOVH3tT==o+v@T>^zzg0L9y<7#e9uvFAr#&40YHI4YJN#;1iyN=NYl zPYObQlP(ZIH=v}S;0MO22mY|}T?#n%EAd!@S)j6GV01R-e;MNL5UX(=D% zZd>q^En2rux!ESO$uc4Adl4hzoz3nF>uihADXcbAf`}p`5*OY*f)d zdfP{M%=_;n`Uxb$g+>4G5k%@kiuxvYupm;B(3XHSkq90lJbe$7 z632rIvw4Uy3O*?~Z=IrE8oXDPEbTuhaw-5-+nC(Bcq)}_hkAn{Ku`jyCZfT+jvH-f z6s|e3=+r{2e5<#DYB16kY!m^Jra`DDm-l{od*w#(VeHB;v7o}TH_ybg#SFnX7DQ;1zK0ExMIvD4^mQ== zqS`n*J0r8D}{GH(NDE-kE|X7UXjr5>(X0F}if z&K0Js&m>>mq#tAwq$OcK;t0X$c0I33WJH00=cn|`AC^BO?>hIoYO z3}BH=x1golkU?kgX(IUjj+>Aum1K1`#Y$RQ))rx!0NzlclWH2z!y`&;rxRPIi~l)# z*iT6<-hxw87WRx1*OQ%{!5wBxVKh35kQ}CdLn|&_iwYQ#;mU=8B!3|~EZs?6LzYE1 zAV|BiK+&r1|oISt;;C}% z>++pwUMc)l;Ck<=H>cy0EhHIigYb2Eh8i%hKJ$eSpMH>(3{4~3_&{A+8Gl~^Iz^41k1)V6`dlSR_nD7}V>18pCqEC7v#VkA78t`g=i`KJBx} zk(7hP)G~`y1Ac(XX0E{FFH>K9Pq~=ljs69qUHN^K>V>mt;(LWVXRnI|M%!P<%mC74 zFzh&3dhna}G3ve-XYF=A?miPcBEx<2ZNaaROB)T@Say0N^R+ynTCMu*V-TY+5QriZ&FE1KLLRB?g!aum@D$=7$SJK>b&t~ux7;j*Zo$K8L1>mm?M{FU6^jo z1umBUcM9=gH|&@1k67A4QaQEo`;PSOxWk~M;pqpdfTF@u^wGsXu=%{pajkt{w8#ug zKKQ8Wj5e8JGnjmHH%(ieaqIH$XdA?1Y^oDyMWvA3$N~1*FBq~TBi~(!vH@dQG^3U& z70LEd$|1rOT!@xJMcTx2FXq2_mthyw$T^uc5Er?ynY+8meD8fpJ80&?bX?7k^Y2dY zginj?go(;eU;ppY>8qKiI_KkDPL0C0O0K4kETTu@pdezX_vH}v1^=pNQ`kLF{9tmX z&A;?XJ`J3TLZ;_FPDZBf?;Lpl@IpXl?9bU(X%X*(J-Oc^U&LAkrfJv2L(gg#5lEaO9Q&O>>P6uR!I_aCxC-qg2)yCt&-I`>?e5-yEy(+&zO;X$sVJ%(-%edC z=f^#c0*dpMos74_HPWQq?>dVemwtU)|NLtokCU9Wsu{Jr zsk_{JWdkEG>k5^sbAC`qI$;Nyt}-adPaJ%UGL8*2=nZ}x1w%WQwbT*kmaPk%5E^i|Xo!zC=UATQ!^L40WZVJ1hO zPRN8@yi}pXb=%Kx$H>-ffY@g(P$A*5}?M1-RuVhsqef%PX3BR@E@;w zvDx}xYKEZ0g>R3UqKu(94_oP0q|~!Rzm@adbZlgAc&=7q3VcpJc~@`&w)+@~tQeUR zQzQ86=_jV!+rrN@9&>Tup_NKxtJSW zc`%i!eoI^Nxj&E5^>K_8SEJB676*6P6KbdKyfm&XOhBI?>~-rsn+ZO;F8Lg49o3gF zaoFI%5p}r#geMiL*|Ey7=h4JWZpStBsf202?b9EbEps`V&)ub~4{OUjS071Vs=}ZN zOzQrzL!X_&NL^NfM1d1*d-R2+#5ReGbrEs5nPqv1#~WFP-yHVxOFS^;BUr6IlDlft z3nh)jcEcMuBExXCj!2vCOJR?R=XBLsboWpui-S7dYiSJ=*`lt>z>3-3C>UXh30Jch zABMvJD&I94R?ZT`337?u;4RpNA*lQe4?t7Ptop&a912GStG&676G3CUfzz@BSa|^^ z(F$T}WZ(#sPRPAwVaF*Hd!dNaqYTNhvlj{`;#I2=ru z{CJ*@Rg~ls9WHB_kKx=OwbhQ6Mf$&d7*v z()~5pK#M4|!A^z@9>gir^sWoGGK`WBD47x{N|+}~CTmFH*p<9sq1R)kJgQk9I|Q!@ z{L@51itALn^2p!;fjWL}7E8qGbcdt5eP3?$nn?E7I55mt6I2;A~t3D@KbuH1pi!^-UBpF1?*1}R65ACUsG6cQB%BIPgZ(j3i4 zfLSL^6#~uQBjk77o_lJ*_$>5psjnY6nx>qfn+a_*~8Fc7!_T-Kz2?)jwnCQp!nmqe0r%3Li$C$1J z{GD-Z-#NfeNqnZLxjGnEmKjr$aQuqTQP~Q0nwV|bHFW|7-w=n8hf`8){Fis3ep2q@ z6P$blQ{%Hi1ta=syvVsV2~-)G@p&tS0ozg#J@O$~07HW~a6k|+E5k^Y%{YA`?!{Qt zAWD^;LE2(b%6`*9{C^Zql7LzBu?7{Z#0|D$DziBgTu$-adOen>Inv4MRV6tJRXwUH|)1xM#BC#w7^s$K$i zYAWYMIUM0;&}+?=57287R}Ss4LDNe?u=|UEC>a64cLsqE@4kc|)}$sg4}}WKusHBu z?Q1IPtekEttxw|$Lxk9(c{740AFZfmYQ%bKj1YuV3E7Q|16#P2?Z@t=Cs&n)4`weC^en5j@jA`;L~}XgURD z(j4fbo_a`k>^qT>k!q`(cg18y^tUeI*XZOAjecY)LEzM#Ut$BV-r-D0^P5^WS}R$1 z{F9N5=(F=staNIi7KHfKMk+c$V89H6_AH-2DpT1XCD)_7X}9%V?p*t zl!&^PIg2R-N0dn<1s%~Y3o-}-9U@46f$NH;ZdNTx8RI3MJInN!$?7vD#om9h1d;aJG5DIfV+d1ObK1g|1tRouj@+~if< zRjb@}R6Go;Jj_)*ZL2(8RlK}aj&#$T_&xUS4i4pDsSdhVS`|50#iyXkr&Ptay2|%< zm8<*sz5>?a-YWkwm4KxYY z&t0v_{i1&LXU)|ebte2C6RVLYe!^vRx5t=PeoaT?y5UEKw)C@4@iN95H@xrN2-J8P z&}*)`Z0xL2kfc$VcCT>ElS*DLEY-MOeeZT#kYfG4qNf_g{Tlg&_ll?Pl`LwMdIuM+ zYTWsG?@qNw3HA#18}E*IZJB&68&=C&+hZLXa3AL53d#g_7lsh^cTC(2b>Et@e z$XNQfWs;ildqOUAuY+VCYoeF+&mL^4y&rfoe~SpCrpl_ICq0qY9eA{E!SNv2D9iFV z1J3F~R5T8-^nQny!OVwl}Le)S2}5 z3`sOv$Rh&$_@c#=83(OqdcWN6OGV{m2}bw98IY82uU6GRH;L7>qseL0Xf_`bF+e|$ z=)uKVe9Gt%!QaL(Eh3l}Urj9vKgeRZUBWc@C_0Z44M@%8D03kGofguAG(i-uz8DY= z9Tw2C8sKtdtSw5QTBB2Sx3P8C@fM0&U4#*71AVeqG){Wu=z zjWQES&{JGHMGdt@jR;?@>Zwl=r8AoJptbY1#d zM$f0Scp-lQS;@&C^19%+K|e2@pnd2Q&hS+)roHBYL?n?a3>~3=e}lF;!elz8=0AN? z5SCAr$=*BA2$8e?a`?8G@RooG$xYlEZfp&bGk}SZ7>1#A3?J}NVdHlB65*#kVlp#M z>^guy!76v|*qgrWH?t;jPufcsu$eel|0{PyM>o4{g!kRb(93~>^(52J2vnc)dQfUP zgD&7_Th^>88r+h6rYHOB#6mel))Rmg)6mo#7)mCFLzKS*{M)}s@6%I|Xe?RQG9g*Q zm_!BY88?4N1sY6+S+8)kQRV1~1$_`q>lpId1Vb5Y;>J_Cs;{wMV=S#lfXVr97CnZ> z|7H@yR%~hu^tGoOKcLHBbo#Hfx0If|K#7D)z_0~at-^Ys`gp^0MwNBO(a9# zTG{ft^_X}B|GF#!DcxjvdIN%4TcvPN%9g_S%M6vlWZA_E?$vE;YwV-G+QnUlAAYZW zU9F)h2=2*&=j$H)Z~tMxPo;}6xe(yrqvb;+_xx=@GXa<^lb(xstd&=F45BH%-oLzK*aQgU=Xlwejs4_z$q>2sRW4kv;i_|#I(l8 znQgUQv(K0d0KL7Yar8}R8%T$RSitxN$%jY(-VZq1686UC=cnDY!1vxRf=)(S5Rss3 z7{bfI8q@{KJ1lZjB2|%Q zArPvxphz#FNE0L!r3#@4C?Hij62O8;2^|s1jedXizPZ2moA>VAdGFtxnar7!oV`AK z?X}m+g{Z{;+{wM7s+^W8pIpfXifTx=+Dub5NY7QyoW7e;r<~RO1Jm4)HQtc@l|Fl} zA!nUNEjHx#2IW#42)PYX@&$RqD*2L)`LZen=Nbz%R0{PP3(ZuXSsk2GaZJt5R05m~z`uI5L|s5rU5ILZ zR8xJTYC~#ML#}FLaZ_WZs*^wU%Z`X-b9Ynofa>eT{gnp&y%y!c8< zv|7pC+$OBnF4^2JtJZO@xkE#(Q?I$xOzn+T^BV`XE|=ymZ?*1#=I#)+o~Y)YM76i6 z&2Mwn-qDoem1@0p&AqK^ecjD{18V)_&HZy~@4q&`UsoI0JM1|2jX;}&YL|)|mJSNP z&b?JC?tgsv-0PgwZyGeI{&*0nXe2Q=_u_z2u-4hV|V zsT3lw*cnCWL#o)mS8H`v(}7zR_$I2@>&i`Je3{-<;au>J9g&D*mbHPbPP*~@%KqE? ztj_vL#|%;wg4k{tK9#n4<@bZ_rg8ciw=s@Xgo|mmYS3<9s>O5+Tqv6B?6M;c<~gPB zs+0#`Jm(*%V2tZAr%v-bRt=MsUl9@PNHa*x&qx-2A^BKhOU#Ai@X<$8kc#sVmyG)I zJf4N5@U`i79p{4{&%n6%QxOwW6qI8GTa#O03xvh0P7dQ+MRt9n^Ci(Sv%nx#@d4jm z_Yco)>imE4?ds>&xGjQDY`d-%8fB`-t6b5p526=2HG=7Yup-4c+J6AUV1m9&=Gpiv zF!(X4%LAcp9~=lB4cOe83QNiC0O4OUI~k(Wno1;=Eac;cS4wmzW$WEkcdAv@J7ypTK1^y$t;9{Lv z{S#_(l(0A|-y$27AL&C}=7A6UNF~F=LtR98$(b(c@QKW>)@F=zcPxBR+l2`cZh#2a zoMMlPlwqufl7|h?TbEs!JN3v{ODuXnl%a$;Nk9a)&kr-=q&Bd! zehNbvytP)$8;IBh@5|9k(khNj{%grP@3s~9os4)det>{w6XrjqokYW;vc*l!VL~38 z*Q1zOO)p4tA-F;%x!}m=GaT?!G0+L?1^X`0Nst}K$H33osUL$^uZ#}qhKf3tz)knL zF;uLkHC?x{gR_(_d`-;jwD_d*)5yz14dEXmA-<2mY^FR+dQo@ZxPw%LZc4Hu290j_ zUveZaWLu+=Kn$y88YKHh8nO){J&03;dvY)PCk03WML7B=R-JM#sPWvGh0na^UM_i- zld)V@F(x&~X4VN{d7W}_@IBJnZ{u7U=_h2Bqc{Aw=6QL(kmq&(sK5(b50$m^qJ9tef$yn7 zYz$od@FgD8EcWKZFX57rM#yZpoOO$`w~>Nz959M26C)c9V){IhkcdIkL#%5-5(>A20Nw=0&w-3ajav z2Sqq=C}34l9`Ms-g5YBUiAxIxLQ3ff=pfrfr8dGD0)c`{JYf22B%>k5TtE!~1W#i_ zwU$hEtFA*&C{VclCqi$P5EyxH>odU7ODN*79kqVqg6u=qIH3b!Px;3wN}pk%rxzCs z@%hJN^*$l(PMgDl{LlTMiDvw!G+&uL-`)fRBp37x5(Z&b)>;xd^ z6562neNIWZpbr77=5*}og=l~V`00|)aa)Vqe4{$B2OwD3j|^|W(R>lkZ~6v&XXDnw z#g|C9Cx%lOQ(HKS*w2@mUbnrJ`H`boJloCm_SB`^BaV`)nQsMQRNKo1LY$@M{L5yK zr!E((bCwuzZGSD&d2R+ zdarTSq<>hssXT2n@Q|xEk8{=4Aa?Ru7wBc_l~s3}Y1{D@u2)r0S3TV9?500*)ir!r z^$MD{n>*sFZ{u9^iLtX^6yk1pdu8o*`n3Jm1Aiga&w6S-M0!~dVAJG>HUEa`tLqQB zo98*#1K-+R+sfg7{q4$n@Z|Khy%z44ji>APzu7tb{K(z<5+F4U5;m7SAnH+dJc*iJ_$}^6L5S~sEu8n8|`|IqvJa3L$ zZalJ?xz63n)5WJQCje=_6f(optvt4o5H#Z?Oy%j( z_nk$`W^$?h4QU77UYoJal!loblI(%S^Xyw`Z|!fM$>r^Lv)syft2JKl#>jw>o{+!$pICYA9v5w)Cp!EeHPT$+6v64YQu!A^bD*TzmCz zuX^3d<)8g#xz{*3>lM(-|8XN_uld_m?|XCnb3exRS~lVl_sQYHK<<4K!!;kAu)sWn zhLY)Fzju^|zyfRTS#@v@VzeE!$UDB@rTo!1F+|{#fZoYBpd;^?T!GKWt%9Bzuq0Yc zh%o9oyI zbI@}K!S6QX2V)H%?^fQLr9UOr#uSKw`cXPQ8Cm_Dnq={PEExu&S`@2%XuTHDH1~Q< zdGH>~O_aZ&PT0IB_aT{K-Rn`!F-Ebp1LoV{wP4R0=`uO3O8j}s4 z$7pEWzyVy}A$TNLXs6Wb*RsuA@Y`+_!)g!oi?YML>A9ncLkyG^?Qr*bozQ+8_YuX) z0VhCe5vu9_wV6J5|LT?#lLYM7hZu(k-yMW^x)R&?c87evh6o?dYn;`P(xl_vZ-Go$ zQ4eB5?vqBY9d4vj55+6)2^nV}tjIn**l9fy=@B|wG6O*I0BY0%TPS!bfqEdjV-BF_ zLH=|{K|WRjqzCC_@rqwUVH9s*j4b3j3L1>2v++iJK|!b&5io06sJ<*j$mbWn@~50R z#03xdG0%fRZ@-X*1Y^0DHHDY+*n`OszRK-kS;%l3f20pkZyiEpp|2lczK&)5f`yGE z_>vPLHRep&2;oB!R%604Q5KTyD7=ei28amXcZAsDspe$xYapM%AlzU8{+R;O#;`bH zV7p|P3)B7-AmR%q#H!Ba$ zcQqeC9Ub6`L9g7zK+Os4yLrpEF~U?)Abqo=aHFI!&9=s!Ah4S!&^*sKEh0D-D!fYu z?P7(;NbnM0?pgx);lROl0Mqh3cSMrV_l-}KPzHMx^IbfnbD{uX9%e{DPUo%PMho*f zf_w4=?-Mrnfk<5v|F9-Rod7=+VKc-)+Cv2l3EVG9h`l_pkOg*bUNBr!_$onAHxE1vkrUB$pR9huq_j#HJ{PG}HV5AsgTZU`>=3e^qEAx38SHdyZtG9Ng?%`p&pl7I_} zZP^i`OMpy07O2hxPgMzt<$>EZd9IO8ZOE_O&lB9Qmf+75YG0GSil7g!l4E_McU56&Lnyz;+N!&!?I2K z`)KV_Qz%s{al=?aM!OPfewmk(BAAF47P_qElB^pWzaZKM=J7*Tm_%t2R!Fo{Ld^RY{>j+!HO zM#n@L&=fQNyT8OJu;$CUtWPgWKOH%$5ahaRCiwJHL?SYZAaIape)8#Mqo<$u^8{D8et zumk|pE|zPwjoW1S=POQ!t(Q96BcF6$3Vi}%K$aM23WjM6gWz&xcqOzvZ-m$|is7`| zxpIB$!(7_vby+T_GnP*0Qk+!%oqnxeleBc!QY<$_)dqn@%b7 z_9-`Sjooy!ym`mcB_PG+-k3`Wmn+WFH7dn5cFZ-A%k7D!TWX41=9pV9mwSPwdvS{U z^D*~Iu3I&hx9U=EHI3bB~J&((7xA*Y`26 zbuMq}mZkSziucbkZz`7$$jS#8P(sn+(Ci?{uKa zuQI@+mHU2&)&1_&`@Q4$2e==MSUng|eK0-#V2=CYqSeE%sSm%8KV0Vy*|G}ROAYxs z9zx~Df~>L7G%V8u7QqwBZXL><7Ro;nD$Emh%sNamElheMOqK_C#u|4n4W~MR)8Gl$ zwhpK2rG*rRX7 zoroLYi660!A5V*)o`|30Nm#T__?njReIj9D0{0^jzMI0-KED^o-||8I`=5H8z=b>6uNFnXSB89X46r=~=y#Sp&S;BR1LN>Dkkh z*>k)(i#9o5({sL0=B)GPZrSASrRV;f%%$=YK(+*E27zgcfZ)qxx6R|u$m5^N6Xwf5 zW}7dWkuN=!FUwbO#S%Ga?aYotm zsj^DG=QXy^>oT4?^7=( z*ZC^9Y%BLNDt}H@Qu&A=J0dib$TUqv@K>?hRdHul@lRI?^H(3UtCq~HmY%Mb<*zwo zS9312Ms>PIgTGeWu2wIz)@ZucjQ`~&yO&m(FYTsZI`F@8vU}x{`Rdm6D{ua~J9c#e znRWN3>q7YJad!1lnf0;L^@;oqPwW~}GaE9e8*=#@3+x(;GaH{zH&*gD)z~%FWi~ZU zH?{INci1&|XEygvHxKZ?9MjRR3mGS)l*q#MO7Tr6X}m zTF(r$i^@_pk*#j`RLnEALvdoh|Fo*?9b3s{(1=EAhJg}DISauLoz6vfT80JP`}ixB z+Y!?aqGz|fA4urvqSnUngkQ>v7hhqNwT$DrJ^gp9%5TG#7(A(qCHvMLI%+WeiT4SS z)^owtOvU{#(G}@bF6ZL2yWaD*^y_Q!UMN$~c>&=OfL>OIQ*UtNn1tFZCI7+9T&j>+ zOY@JdrIE~WiN%N*j|KXC{+9~~b~b4WVWbPxseyEJ-i7l@XQ9C-s6_3zj6QfQ*aej1 zqs3)$ND&lzC?3ebQ1a<4CKQ^#1y@NWqw=_?h+7bt%+w%1s|_;BR7NL@sKqg9BMG`9 z8BKm;wM>8f6JCeQ?NbuB+hPZkcpMvx;!M2KJBZ|9+go;?xXl3t??B``1-kQ^?V@=| z&35?}{V*W2r+z+<>b1*QJt=Qj3QpU{gU}z4>VbS@Y7u86!7xH(XM$OT%flf0M4X9~ zQ0-|GFD134bU74~l~J5JNW7vX=Ru~I^fAUlef5jVmdnatEiYf3*r$@|7$u_7bT9?6 z=tmG-ANwREP%QdGj>YUvgyT_vO*AB<4>Wpp#DmhMw$R5O0aqECZ_8P+r_++6`dlwZ zqXth8)1Vy32B#baW_AQG5k4z|-&Db135k+-g7t~zwsRl|e*l23M zO4|7LSxqc6uKL}Ij8lh(o4u&1nu;Sx#&=;blb6|UMR5U<@=#j~YsXp@-j#KFWoMkW zQ?zTtk_fnY$F*PqSj_K@;Z_X^moewlvAUqk7fY;(hE};!SFG#iT0H1qQC)ji#Z`;* zfLE?Oo0stQL;2D%>()J3Zr+K!L&w|n%}?G}%*U&0%s$cc;9E)YivxojOqY-PP=86AD9F=yOT#}9`92_P#iJY2u%2*9rmB-BvNXbz@x!Uq zMZXbazoP+03%iplDuvw52L~&4>kpS%Qc<@lC0LXytZnmUn>^jg8cmLci9~L#JVFXk z_Ot@2EudBtB(&tr%2Tg%^A!;Y{(->KF~3p!NDQ?mi@}54`31@IcwwD9|p!x~n2KHjIEPJ=;FeTEW(}m9mtL=3}j2?Cq zg?sYQdy?^0OU&1iIW~m~#RmQEYvJcr^z}^08!QCpF69kPs9MfWw(p5$GW=>;G2i(X+P9PqE25HY4GjS zE@#h{B=??9^ewq7Gi)9!6cA?aTQq9+_!>M43a3I>n(8X{hV@g46 z!nsu2aq~&$!FDkkzN~$F>S7wll@fiEvWxVFquHl+O3mZTE+d9VOKo?`tU2AU)V%xf zBKR!9?pK+0x8c}c#LIG5ljpX){$np2cPe~NUbI^@9B-Vut8nM_^J{zW#*06_ti;00 zF%DsH-ad3O&ZPW0_pQg?azLifN++BojV9lzgZV#7L6!5JCf^5-)l46jyXyTMmGKA_ zx6$|H((j!boAP6N?uonQVq{kbdZ&o`Cvw4QKE`Gssei`>gnLI9-tiAE=5{` z>$ruKe=OHaAqDftsXCz6UmN*{bHcm|X-!IdC>ACwcUg=B8VuIT4(lK|lKop2sjF~1F zmkaM0Kq-Vfjk9kNouG)V{Tdiz#r}4?yrK}OT`FjD5UOscvym#OLu7x)Fuz~WpEBiv zeWcMHm{|R9*FcUh0jMHdtWf9E4^AfYN={#>s)vup(^kY$yqhk`J{`@=X_`f|FyzUs7kT| z?`-LO$vbzhn}!!UllX!)A%eX+oQ8bz&hyg5i;PBgW!*yGPBy={{Z8pX>d9<$a=(E> zHSYa->Tx-l%rQ`8(V_{~;OUKa^8ud(X-NeQ$rW+$z1P;w$Rbe^HTI32Ol2%)1bQJO zom=7D7E^7-x!CG0ZRfX-Mub`V)r~erFI#!;d{9z})oFQ|@6`RMrvkGgiYlx+iO?vl z+9pa`zaii(uyQ@Bd3a<5H%jW~0GEI~Ra2+gwb)U6;9?kUBg}`4`X#w2)gdi^W=>zv+{F?h%X&iH?@WR z+Qz!Yaca`&w^SN+NTbHiiZZ*TI-3K3hFlmI@dr^$u zP^N?;<1huT1e^yrO7eZ9J>+y$8?(Q;0N-VTIbIHS7>bt^v42E0muh8+q=ZiziGX<1 z(0q#KVM={@ECEVtX1$Im*H8>RPLw0Hr{jd}#v|+S z7(}R)&M}suNWDG*o^N_x$8{|p-Vj*i=1zN~*tikzQ`^IZOVg4y9Et;19ox%+y*Ztq zlz6$*NdRilqjl3a;i|_1AJC>lEwU=^QMj{Mu6~Dx&=5YYuTY{Rsz=3iIKiD`5xwuK zt&!oI7^f!Af9~x&%f^e>v;D4%YXlkSeWWCn8Q(Z@vf-W8!DcGSeiZSI$G|9(?cS7h z4|h>we?H&T|a>`EOjP&e-{b z3*+y`Uj4>}_~-Utf8)Z3=b#umwlvas%fZecT=>x#-p;a1jQ))ay%Sw3yH)u3a_8T; zV7ptL)?a?}TrUk5c54Xm3RgX&sbNJf1`D#DS~Dg>rhfM&F}}jX#b|m;c53%keSd{l zKrhXYcDIfMf8i5lG_%0BSKnju;&y89%omlthQat3e#J(!WG#t*!4L!h4=@3x{yT>J zuWj)8-h}?U4SuSe)4ME#nj-Ba(EQ}T+u#=9V)0GhdX-nJziseaV6p;jZY=>!!lupj8;dt@Qrd!UpMZ2 zpQyPt_x^R$-a5G>mMZ+W-r_F|*`qA>=9;y({Q9}S^?h!jwUw4A0FfeN!CbE7P?$ss zIgD9xiHt+)O3jDUZ1CqJcy5-=M+*2a%}1dkr52*aQd}2eq>4%w9?85~T8NeFl3I*Y z7;|0x2Zk&rq9K3pE%YTyKRq^9{PyXIx$g1L$(JqNK0md-S^7D}-v8U@)ZaGvY0fEb zU(#KRO21@yy!!Sf)2HkB*DSv=x3AfOUrN8`+~587H5Usxu|x>xa$m~(JBBP3;B`-Y zD@?X@|Mo2HX4$u*EdS+i#e~Qc%O!;=?#rbmfA1}x6T7GzFJ3)lwPa zg>n*uMxf5=yBybyO*;80hjOq3V_9u2nf-83ht$$OaIMx6H#O@eB-5!OFyNeGy!hmH|JILrCC7gsCLp8XHu3ecnJo2dk}=aymYnK`ubCzEh1%0s9e< z5RH8x!!U^j*cI=R1;|jM)@tIWUgM(+NRcofEKWEJ&9*|q!Bga5DJK`fHnDFQTG|-+ z?-vBy)xV=%kr%yu+E*eui}&k>lCQw&$TRopdi$&zeCmC|=`^7yFy^mwKO{htF*d9( zHNz|pjl383BPD#$kh}`iUE4!l84SKIP%V^s%bHH$U0~PRGYd1XLIFkcLKkot9qGTX zxkD(1`_l9R=Q|hsUGb1NF&K^&R6EyT5)4U_4_vVB;%{et#2F_lWQPtL)W>F6;|Z>D z9Nprk03(z4RZon6I_Gq5gmY2=&~j}k8)P43N?hboMCK?B%c)D(`ed!ZtY>{lpw%{c z4n?9S2cs0VR&uZgu3CeG2NJ>EZKt?;u|UNmk+=|757s5L9tVX~Esn;APxW2uAfAqQ zWA=)E3jFq-uj`B_M%x9@dsFC*Zv)*F{#lH%fQoMEnIKwmKJNK>v4~aSwfK8S&pxS~ z9K!px8<(Ex?i3m_gt5%F$$N(}EcffGTq%6TDruEnW*LzonUl|2>YEfRNsmJ|_^#7F znzSx>H&zyT_LP19lXLIyUat2#TXy|Ibbr{1E6r@q6?Y#zzxtMM-1Owj{~kj!c56#a zD%`DTy+z~SFeElzGWZb0@ZVy{vh7|Y%?97Uu6K4VIG}+_T-7K?f85O2YhE&`3?A?O zxZAk*ni5}m|Etm5!PH*Met+e|z23Q_gS}P&3lR%7Ce!inlVGOAuytuX_Fs59Wm-!^ zn_tl2sOg_n7yAh@94Hd-Ze};mCuMM-`}-#i zUewt*`5re^wm7(gc~ti%eGW~-!{~gSqSvNQ=NKNW&w8cpDGT&X>Rb|0j7zMF0c{0SMEcoh*R*>zF8! zfEJ>A!_b*x?x^kiKPTGg#Zy~iIF9?|ybS>Y|JhQ01~LGd035XM$oa#QuG!Ux(tYh; z>_!M`M^Fd*pUVLgHPqPU`n31FKelQz3CQp`zyFB#JMse#ejdnb2%S4>up_UDw}-6E zi1=7-NvQg(PR<^7d3~h2r{(?SXB0`GHIq#c;DZ_XLMk?~6BTnG&1uS13T-rEyd+`r zoX(=lw)E^gE%2s4C|Tc;P9Vfb!kE}?A)*noa`>ahMSiu_Gm1LN4EyIT|A$Ac2tWW9 zfn30UBToMphnvMW4nqHy@iXkm6ov}@bs#=M?o0Vc#?MLLUj+;vOD37j&``=O+eT4-`xB3e?@vgK7*{=}O~nA9Wzqv$%BJbZ=gx9d3S#82)cQ+@#!EYgj6J zzSg+%>ib#~rAvCfd3)^E`s@8K&nvAIR(f;f=#;S_O|g}4vi^WG&fRUZp(IKG>=4JI z#fkuTbPR0(=(2f2_O!bfY=H8C0*M0IN{JceA3IOn+cW8v>z3K-QyBNy>R0+&zV%*h zZ)Ix$4L!L%sKf2KJ)|#Lu{~^jZgqRaT<_%0Uk$RgOU2HZeZcBp#OZ`{s^{*cYjMTy zltt`Dn=(`wMaB*7g_idZ&JT zO1AR)@j1=q#h(WG$5%qsse`4$R4*EF%B>1-m@hnnt0woL-l|lj9yJPb4qBM4KC8h+ zLHr5Y?|TK<2EKm}ybRD+?GWCN-{97)9NLmJ&_M23I64SzzB}Xn0J}oePTE~+p0wDP zyy1>KSag^h+(T4R({?}n{P`~)M=9WJLff1XcNUy6Q12Wd<2p=}Q{k#(3Rt0EX` zy@2|GQHq3aZK0z@gwK8y6Lnq=djmEUJb=rkE{y|3?>`DWF9TDJ29$_DOcc!H0WfhY zA;e@z4=~L+yJ%P#gE@+cK4?~yi;PvgPKI`4($VcAuT#Z+QCp}mx-V&MI~Kk%RKlF* zTAHS+s0H3l#~D?i-;)X)LWL=av3-LX9QY1|8{QuP<7wX2L>vPE)xm5=q}y;LGi+g^ z4kjGcLrG48gNslWMUuGxgeI^TtEVQnFYjM?QG~Mp8EvYZIM(lUy=i+6CcfJfx<5>_ zmn6_}okj#I_z0PmV9ziNzq@v{c{iDdgg7-H0W1s^GzaGaDTEj!vLhXje)_r7QpGL{ zr!!yZxS;g*sc=69cEN=j7cJ``<1iv9efdt29BcbK=WCUDw{>#SXU`fvB(B`uz(xrG zr`jSQo5F3MZu}@u*Zk=yAF{F!gh>>^Suy16)InWP4_=bdT9f}8N*g@n8!ZkJ57kWpXTGWL{x9LoiNE4#8J17oR}-AI?Jo*->yERQI@e zJD?yiO8HCD1+GRPUN>U9D5RQMR>q0dvgEmPJ8Pn(F{;Svf}~2{Bg>bDaqJw2xJ%IB zn3L2%5e~R9NHQU5#M*)MlE4(DJ>_d8f@CRQ}UHAflbLfMxB z1?duhs+eY7d39^nMW%;fgX?N%vO_)BG+H{J-0leUe%%TF$jjt!-ws%w1wc#zI0h*) z+lDn5VBK>GNW=$Lp|t2FC?LZ=918@i*~W+tU&(jk;GDq1`37Zc6T;~Dh@l{b!Er_z zN1@&{0F1bi$Us8pat8XUI1+$NIV7OsAQ~ipl}dFKg_?x;dn@?N35|x zvgUoBF}bL0UKZ}hBnaG3=AqL;q8kdp0(}+^LZd95P4SdmIF^GIX zAf-_GV7bp6g)I-bs#TX*?wfFi7wFu&PF=Yb^7FA|Ga$ruUR&6Rt{+DN`lam)Lxdhp zT>>yDcR%E=6L#_AX`Q@=w-Vn0Ny_>9VF53=8L1zf1UnsqhBa4GycM9qcZ(d`&F{Hk z0J_drEFaD7!Q+pV3;!$3X8r;eR`RZ zB31VUwne08LNe;?JAHM&U4Q4IR0K#835R1Z2V#Nq=D=%YmWoS2Gfcz^8kE0>ICc)! zhy^^}H9v9o^sB;9*GRx)IYb}^;5n)b!-53$kP}!i3(3m~Mh8Oy#=jV6oI{`}U`e4{NN)8*ni;)0 z5uCRlv~F(dy+nH*!Rwd@nx72@Bx2}#SZKNz%YAcWC0~CmKsTHv!{-6>KBD*&OT-P3 zB*xHYNz2~a(_{1zs}7QuIVFkKq+(bUFaUN+Boh{TA~5#ke%OuQx|c^VEOZx(({*M^ zB}5(~>FMgt01;sN!FU!qAcRbha5dU{Vz8Fv!-0qJb-N=YHwGpyK za3+#eFX`#D4>FmJ(Txm%6$f3be>zQs`xf7q{S+oxcroHx&^#q|hzOr2v*@QMjnze- z{iN|u2h2xFvTI1z`jlv>?~w;EGs4EL$axD5Y7~FM2lJYT0L-qDH7mD3ViXC_1{J=b z2kUZ-gWZUhLO7!sQU{Q*MM9v=U5Y;;u9*X9vkrbD5OM}9NeiwZ)7#(;k1+IbD%tV7 zGvbS-+!tg3Z$MZc*{cd0wuR0j2WByj8j9~&rwrfN^EZD?@#~7Yp5uIHblYt;ie*_D zITn+RPrgXMq{@hbdpBUaI#~2+f7B5J#=*x2yFoACvPc5xO1Ehd84n_!!qg~;4H6>N z9I)y}&q#!y;(mPNN1AM)&%&2{IX?AXvQ{Q<-mQ9=%<^s8u8hG`uTn4};w-|S0xS&@ z7D54PdRROGfCo}8#3ijTUG$VW@M)Uvb%uvk6c!(B6TbmM$@%Iy(`U)SlqB?AZvy)K8I0us3DiK}cjw@*{TV_4 zbhduxZW)kUDh>zRSH#MUV@UZ+WG&tjFGT`?7EIzP2aH9=LWn>dlF>*GfcaMP&Y3>= zT`}TwG1eV`v@P0?&^-PLAn(JF3ZUB<^<~EzS~teI7Q3es=>%!+dCmYubh)$~a6~Sj zQVSzC0-3tO32y^sm-2Ba=3|CH9#Vxqxne`Eh}W%X`GdpE0Z5ycxUg4tO%7Oq3{*oh z;wXvx8!Gy8z~gruVZ>rk70?`=se!#wY=9jULw5VnNq0uFAmj3oPw3|94?Y83?jmha zp8S-m?_Bh^^Whxkj;CsMjrAh0E~$1s0_p?MU{*#BDgY`EF!Mu}t)njw)xz))x*zYf ze#@Gk>%!$CAunMD(`b1kv?UhE1E32e;4Cp9N;p*Qd(F?z>c~bj4WH93cdA^Ast}xv zF_AzeN-|v)&=Q@gL?(9LrCT}xjkua;0YE%xpd6Bs{fSoF-o=z^Px?vCm_fkDI%W=| z+A3tdoLqe%`W19LoU0Rjak}no12bo%!{w)1OoPxE0P#m?7JX9I#Gppuv}P9!S|5xo zxp%&(w2`~h+ZV-6fzm@=e~B%3~BL2J7f_Sf13M>|Ca&2-$qI<(VBbPGq53g zTRoP&^jB!VoY@#f@}z*P-+Vhg_`f1L{sRZ{A1?iq?9up5_DH@UqeJOwWLpU_PbrL< zpT*A=>DUg1{Z(u)1BZW2M`_AEfs zv>)tWWNfTO8;^-Jev0K7d?3I@An*kM>&vSMw{m_G&zW1>EjCUQ;sk z4;kAZHinTc#x(GIi#T7PF&V)}ZD+U-av_hMV}<_0Ss0PO+Uatbd`c9L(3%*HIf`b zDQdCb( zXm-KFJqgDc7$!v81~nuwbvl}e>&FKp==vjAoR(a}K59IIAOG@Ou0R_#J>sBeemjKa z3WsbAE^0XY1kA8aiM;JVSdu6xmV$Zgtb*;~RtG=FJk?@SL-|6RptbTz-%pQ*FZ?xX z@`&sHbp{?SJ|)f)T|0C@Tk$VZ8SNKoqAj2&RnU`H7KoqJ?S8W{Og!eA zhJGRz^e?}A*3Bwju58?YA^yJQ@XPo0R)FeVZxT4MbG?oJw~USXT=Qt#FBO_xfy2tf zR|~0kf1^tvU}d9$cm3x^k65b5W}HaP-OYD0ThDIy$_Xdm{)>$<=#Tru#@x|CR_Wp2 zYy4(o)T82PYz(3hJ;HrVu6Ee`w_JglJ^nWvGrN>6e`$4x#>Sj40HK$eswRK4G55Qx zcH$?zf3q={=NFMGyo-O>m}6JgV?e)9461i-3H)YbP7TkDvpB!kfRjR%+7o`WF+4H! zl~cFw(Ab!K1hdMQLYj>2i*4oF!S}#AmY*xNPfp%jt^a!M>}=!S+P$GV>ZwDD9MtS^ z16>($*siJQeXu#Gy)CagYUTZFXTqiO*X~Tf`fs^{?9u+GRPUo7OU1N2J(`T|=qIIH zmU_57?oIu*|Fx2O0A}s(paPf)0H{8Rj*o(c858KZdq@zK-*N>4SW>@@(Uua%(?@`u z>uF;NrqF;r52mNz&Spqd(&9(5;z{jXjg$yET9~bie#ajf+i$r-PlwO}B?`@)j|`5| z?-b?Rh}JjGXG`trlu+4-F^|jV2+k>IWN%Smr_$O|GEERrp87#{ITOS7kE^A6F6$|1|(U*q4D_)qCc?dz}wzM8|pI*BwUIBJDcx{3)lfHP=kHThunr zKtI{>e+IyF7aUHHzqr;^Sa8(*Bg&MWT__1MoOg$Lw(dge63kip@+HyGV)`RiDX-B? z4TCGK3_E6N+7+{(bV@W5J<*_@t99`84ZW^-ufcUiFaZ`3WwsQ5*{wbriRt zK4(ws_Xde+;j*i8nr~G;Ax}Swi6Jfav$hy-(JAcjKR%|QQ+q~;<&*Iu=me$qiH;aW zX9(hCU@o!nx5@WSalyMEesy$etcv|GIPZU`#}c4}$?Fi(n>R?fN1Nz^mE%|@Sy


    4(UD^Vmz}f zRq*gqB99lTfF8ATzkvQm<&^s$a@hNJz^f;4MT6P5Ctgyvt8{Apv-A=KlQj=`Xp*HN z7IFFgi-}xb__H|KC07w$l@CEy*UvlaSPW>0C+KM_CfoR|l!`kzhCnnkFUr=}Ay^JHY-``!r;Kps|?Yp^d%#kmElMr8n zmd-gFL@!nML5>3Ez*oCVBGo^h{7nu&l>O}7P@Fec;I{Pf`SZ*-mt%EKyf|10O6NRS zE=#<5Z>0z-dr0Zj4rtOLNpidwo-FAY-6-H4y{8##=5YAC96cx>lW6@@jviJ#O8gJy z=;&~7z13g^(?eBRNCnc$(JmW=?s#<^J*^zQDno+#> zPyKf}q6a}(QJNGi7NO;cQqb3DGNF~DYvPJ$K9N<0s>S$e<>>SzR+(Bd-ViF_g;TxU zqfl#c21P4JCzLhWO!gBbv}o$7xEMP3`takM;y?tvMy!nK{{7g#E;N8WH&Eyi} zXD6(FtEV=f662o9xES=>PHm>-%P?uSN1tb5z<6}**6W-eChKHmRk0}W9igffr)R!?nZQ{sx%z8VY!Pi^Jw z_Z6$}y`>I3JlM+pqpCHesi(FHFq0B3?%(RE?L1`slKO|YxR6iUsIN1j|BuvDo~0}a z=83UK$AotD%PywU&eeZcPZ@qF`>m?|zf(^+%yz4TA0rA>HeUo*@T>1WN^GX7r&Jbi zIRK3&+WG!#^^|QcrQk&I$qIJ_Z{j~D-!%0UwafoyJ-wK!R%nz=niPW`>|fMVUcS_M zMF;Zfy1{-??PwH^yZjPVWhwNp$@g9p8D1IKstV#>LYWMi$nwa(1ATYrXwr)Qx5;;v zDvk|q`5Lf0&xJvQgh^JgMH6BecOQ9`Q4^|_7RqDJNoGv-VNjv8v7q{91p@awP%Kr^ zdd3TEnfsl}6KlxZn0c-{cm{bnAOCyU1-|M1E+vRBBHSD#(6HalWFH#JYP=}G|D#9W z^mJlq-zQ)@rf14zGbUE%#&M>3NVaQr%9h?NT<=HE!zyyT@ zh#f|8F|A8`KT3T$4`Ivf*nK&nTIDDSBzkY6>Dk9M_!mmz*%<)SJ+#l$3bAjtiR<+0 z_`3=B%yA*xAf)lnZYkIMIMHsx1f8!Y!>96Dr8yMjG)1m9HXEcbIS-;M)r2D-EDCj! z#{LI;?-|x)yRPe|7fRmHJA|TO4}wyKP?RDbTnr;RG*T3<*_3G8wQ`|As2CSob-S#zvL;}uB6if?ZT0r+7xQkfa zS<|t_1kf>6@7PckB>N;MD^a8?yV?hgMy?^?cISg%!*ysX_!E+Hu@)~M@n*^D z4ej10hDo);Z;ibtG^ML&Zdz{$nCZkBshtda=Y5?<_VGqI^*2a)d_u(Qhkcoxwa9<< z`dCxW`ID!ZHh~i>8D<#-q(Obn!~NLP)v--#YgmicRyO&^yT!NAe6X?5kaj!v{17#N zZKK;WLGFFhH{8 zEzXZ$!_Vf18-C<=z`AUE;A?xs1T?F+?9cMs#np>1EKRVG*^Qa6Rqbz^^)v8qKy?j z?_Gr25l^}GMF$K)@<)8jc18_F6Fc?=>qy07j7O)dq#E;i(@ik%iX=cV(wQ1+`0LFS)U0=cj zenP!+Vxwtdvq9orK5&D3thy#qtpW%^?b^Ep8zNpBJA|!Q7g$PL#S38;>g`+h<5r zdbhIJQLQk~MH2&^(sQCVspg9YLpephzYQ`j)WE#D$xgzErZC=T8+AAMY?m!@GMQ?s zBi%83>zTEM7vk`s&5@hGBA6ZGltPM!@X^X(AMVA-PeQQlD&m{xWYg$|91mAgm4aw? zaeE>**2eHi!FkO;BbfQY-wtL}NZ3gp*WWLW)p>*qHg90>w>SWcpox^|ok@ho*9B{S z#`wh%Oqz=ccCkKGR8^dum0Vswt}tht90&`sW2k*g)Mo_4PRQ)r@#eAq9}&!x8>!eJ z5!YnEMr)g1lQl`&y`LI|hCPjh2SnO!Pm#_*#VY?1!7Q>bMcc!oI7xvn`=N6R2TvhX zLO8Oi=x$wip)=cZ8MZNFdK*mA z{+(sU2q8?dTi@oLU~c4B1QXY#lFCBC&53!567Kqi|9c{s|0JdG<$+-K2Nxq(^6Ftq z$9WCfgGJI}EM8}A^__Xf)dIshP7q{YKSAPA;#g7e2uZy|3dh+XknNny+ zKh{klZ2C7-3RN%RpLGB2l!EU0sUI=^zoZnzTs(fgTFk}l*41Jz?mf96=Hi@Ec z89xW4!cQ)?MPLTCjIb!{^=*y2yFg@-vMu@t7w=&sU-=ba_HIO|aNUq=D+{oqSjPtK z_bew17o96Qvj6}|rHi>3n$)JYWRHwHsr-P{Wt0v4ba|9%U7@n|hb9eJYRraF8~8B> z)Ob@D-8-?Ugh_g66lsVQ85OF&;1f0p?2?vb3}QoxH0_wJ$@DsR=`jDOU0kHK12$4= z$+9T?EGbPmT_}oTK&kYC^u6j{Iu1@{3iH?~-;N}Wj%mPml){jzvr#){L{sSoG0fUN zNS`D=-d|;y=s^JMo;8A6r1_)PmFXuk7S!peXJa0w>~ro(7jVEHk*yu2YxJ@9a)|vwq!pzUWica4X_~~LmrZ_Lzttm| z9=8l_PK@Q9c4QnAdSgValr!F6InSOlY3VAwaTUm;(lrhV`K8bLc zk$hHt5DrBfNFcvb$=srdxqW&H$rrmW_4pQPygXvO`Su0!J~?Y;`8LUc)TDWh!y8@R zP^CJCJB~Xa#yL5%T>aj^xan%3sVaLEKHX=p!@Q#D&<6%(#@C;3+=Tdq=eUbc@nVFr zWN&wp&}trt2>y0U?g>)mESsh}LXuwar%49lBVS^DkwzRFYuSdLJxkBQub4@#nAn4@ z6BcRB%dLO|30n2pJsak@ty+h%k5w^Lt7|rGdBTEiRFMJgX5T*4{&NiJv#_(0t9_fT z-x8wY5waPjTRw+UV&lzMuSH9&75m(0QtYo&)cdjNUX{91S8OZe%qcNh6+B>NhHi1; z1rp#{@OpN6tkx3eQ!r#!V_oqxq*`tECZ&jseSG8+#q#Y{~dCO zsPQ?89q=_3uuu+)QPMj;?_~;0zU&qOS>#!pE7~VN#?mz>@CVndwAKAwpe37Z7{E3h zLOe4`w`ndO*xMbXd^z5GOw&3(%Wlx$*dj{ zRn+mJ{XNYvqppIvg5IRiU)k;(v)PJQUbs1VF7YY0qd_KPYf6Nr+0$Up_Ur;GoX|M; zOq_hlJ9lQpQf<#FJ`QQRC^)jBIso&wY?|;neAHQKi4FFX4Tj(6Yj^Z+`Z3l<3zCc9 z=A63*3ywtH9#G~wV_TJRMkPTpbbJp0*E*fB!Z`5hgy+J1Z#|}p9lPQ9sYCt0m$u-Re;E8{1A_b7?ZmAD9 zJe6;~xweW8T+MmouT_^tIsGMM$4!kd^am5E?X%A^|JIQ>H!iBUY;@jD&nH;ik&yhO zBhj_}_XOsT&xZuk6@NZr|InZP^dB)xBI!ZQuM3P$^Qz4&kG^gTC`eBJ#|upR&kjlL z*FjwN-(r>o+R67ww;zLjTGi%(3gAUr*d_7jqj34bRgaWeUHY08PQIRM;>6Cn$;NW% zMArVz(^u^ck4t{0DX&@GdKXeKmo_eWyy;(inD{48Bp7iTc^ee~^*mJrNDrv0S7@I+X$f!-x;{GRMubhnVnV~(ZQc9&y zpj%@T)BaRjR`kBE;L8vS-7Mk_bZr3RK;N%lR&A z=_sz-SQZb$ffw{9n$!u@nb22$9Fl_~4rTwNZUdz)73oDnHC*VnuNWk_DT5pABVI|- zC?kn=p=kuN8?2d>DF5p;G+rD@B$4j6I^D8$@c_#NGfV@p1puEPBW0TkSlWX%N9mDD z+=k>OV!c#cQ)~Y~RiZ+(m*ya&!9c{ZfjPZ*TO+DDU>RvDahxKyj3{r^;mSy=kFHY0 za}euEyp)jZP$Zd-lxw4;oa2GWS{>MLjRA>%z6f;43j|;+1DINZH3kU~X*wZFsgkO( zv3Yo`ZwmMl0aY_r9?2= zC)t>CNUDVk4u*UEMB@De>98^qQnrreDw7zg0hVtye9JLe-RLdpGO8*S)%94xKMm
    5p`u>X>?)zqi6RAwKn4m*VC~Qfh&iRTSp+!ah0yNoWZ%bugIoKPj5%N=kv z+1Yw<_-ysrQq#(Ys@A-l0hY=`ubO3&{c|xIeVmvzk5aequ2N7t{-UK{Le}G6)r}kb zZ*UeEK^>1Yx8v@It(+ZDd`e^$-S{H+8nZ5;uVvGhv&?VDy_RM4r&1JwR_26Pd8}pR z^+23v#-wjZY13y^XenCBf$b;N29e>l18?ZvkXHAUKYw%^)niQU_adB5U&f1GV) zx%8VSyO!zkv(JwdREe&_!HXf0=aCirq4{v2`@UO=;tK$A5%RE>Aq-GW>H$*jb(&)G@5Nz;7jBDj2aY?{$6-2`kbvJKis~n0ri2 zu3(U-Qw;EfPdqv+E>?GZSRFRkPQu(~wz`b0&6(@)b-kV4+cC2K-kj*EfAa0zk1ik0 zU(Iy}ciz7Az2l?R?YX=3ep~rwY_zS~JcsUDS*Y}Mbd$w=7bCf{Sbxiy^WORA%+AUZ zlc!^@Ve>Com^)<-TgJEL%y*Z$-nqK#grDsxn|cBL&h;Z(Cic9V@2Tm$b2I3-yV;gW zFSUifHrJ|(%%_uwEf)GY$yJr5TRt7#yD-q!(t5J;>8F!n3$M7C>YCOqpHJs3ydHI} zzT5lsbKt#&H@xKPx{q7FguYsM%kQjy@crqR^9u{_KtN61w2lArq8bOdiCD(i$&0ZN z3}RDin)O|$;`a)Ml(pg;jow}g4i*gKfZ8?(*RN?gx$ohke$&?WGnT7(Qy=gtwH-%X zr?V%vOPW5b?F_mJH>5X6rxG>>>~g(ijc-6YT1SdE6wQ=aERGRV=4H|-(O35_j(gk+ zm94r2Gt_HrvPY)q2?wD!^=-y|-R=!sVSKB)ry=RCb?-^jd5yIh5fhuWBHw=R{Qh8J z@e2hI$dnQI)oO3Yo&2HZBd*`a><2$P19k89-9jG93BNL*)s3tnf4f;tn@T!@vCt&X zw(Szmlzmy?nmn8DxvlZ}7$|JCXgIavy8_pg-HUC;&IHi1-@uZt>GG zO-Too=*)-=Tr>C0{03!#OcZX8jJI9PZ4Gp1MSR{B?9N7f%~aP3keb~U(dqz)O-VT6 zVUIpiL`00dZ|IiNAm`RVhkBHVjdZ{xlZfZwZQ<@9X*5$kjTF}NQBs6O8yd*I%se03 z3l)5nhEUIk#nLtqp=PWg@qp2e5PS@a_ToU-nx6X<6v@-4t#DKq@DQtg!ySl#Sd`;{ zhe8Y^Q;87{9P}|36aq#;sPNEU6vReS1fu5o#~|rZsSL~lULuTePJfgdhK+ty4!5F2 z)aYZq{9)$)Fb@v26B#4opf?b(nj$cHjDbC1Ks$j&4^n_e4#K@R>J^e4ZmK>RAuR7=tCy^$O^uK8zC{8ZeDsizN>-U`P89MmSoy&pB6Lx(7e5 zCKZ^%!YTAHAKw_Aa-OXdTk3|PPmgabF+iw$cI zjZ;V32`VeHttq%UlV|-u_ zC;4-4gq=8~XpnNJ0g&)Zsp*A!VIz(FVXHaO`hc;W z1t0BzMbA;9mYSwI_d<`3rr*()UEY^iR-FRVMjSH1zQX2Y^e51laf1nOv3{+QXh!@Y?w9pD_N{5v-O{XBqw zB_FjsM4={VAv1D;$AXx%;8&qyJ)GiImK_J`0g~=QhssE(&Jh7heX7!)!idcCVJr+* zRYA*HZH@wP*s-=G+R~#1qb89Y4lvkXXq^QtaHUi6$fxKW`#r^*o&=u3qTdpM6;Fy9 zr~s@Q_{6|`;7acdz3h>FdC%R;`vxw1UcLN*f%(K^Z5c0aZa9I?EIE3&nU{O3Y|rH_D zT9&=I@+a~9CYAX?Jd3?gaNT0q{eyV0@9r*f+=5$CNBPm2t6?8ZdH^#|59a>t%x%8( zdYwvxrlK&3^mW^FI?oLcx>y7Y>^ZY3UNZFxODfNSRYR(+VEBKMps;9N<;q9`n ziF*N06n_c{Yd2d&d$gCs8wGAkx5E?i6Y^>ni4Z2Ry!=H^L1n*P(RP}wzh*c{V}m6NX@ zKOsFg4t5Z&h_o$788-m1Ja(h~R(vJlT?K|n2KyPm-;guYopLl{(`5Lqr{CXPONf|LNl*DtQ(8Y_5n?69|AUlP zHZRJ+r9fg=+y5k`C7wqAXDO{x$45VuRT&(FwALB@(~7=w(x zqbS^pG&5>FmBAlu05tpWaiqU$k%KSVu2zu=^nZ?PQh3^K zxJE1r5TjP-ok3}+N|fG!KXxtlJ7n$uWohX2 zX$POX4VD&CVJhLeijM3C9hevNzyJYfOtDeL>0{S&o=d1=9mj+Lk$yt&97lYwHJu?c z#R}}yJSiG=LNHR5a}fQgvG-1)zv)q1BwCM8l|Em8Q_Im+J=Vx$MS3Ptv+jB5OC2RC z-*2>lw?^_J&MV7GFf;=*qeb)RF;iI1kb@;_tRHPE|Js02H+e}@WD4lABNlZ}C=-8dl_a`7a{ry?!R-P2Z1*~{-UIfV2e_7lFiYkkv1=BV4w=hg2_fww-_|w|ZQHRV z@l55*r_$@A?)CJa`>b|MI`4$!^XsRYtAiwkh)BQQd)%Z`xSoYgeAx5aJ4=}x8q|*4 z-CQ?#xug#_ZsC~dHi-*-%4{)RI*@cmOgv`-nylV#)s0*GJ{4rvcG|3hI5Y>GoN>iX z5Gvxm;+{7b*gcjpypr*uq9)GeSUc(BZum^Ce4taOM5g5QF`eTZ-lBa(aK3p=wu${? zMdYJUIAK?+!*hxm^6+Kn{xkOK+>ehf2iw;>ow>ayNX9}bdvzIKefhdMAYbCqQwXf?ut(FsLp*(*RaMnu;;~r3 zO`-lR;;Bcv@*>p)T>k1d%QjNdtRB!xkq< zz`ed!*V(dCFlL&obY0LeTXk>olYh#+*B@QK-+i_CIr!PVzY|X#7wN`tR1@;(ZgoRS z&-g7C!YM{d-FxJ%K#!-xKZ(cW*<5FsaGC|&A9Zk>f1V>I9=H4ByTrtEPxy_Ua)0uO z+rq#r;cU&b`=5iJExf%a#Lb}|@M7HrLuw*^o7;o0na>0x7NR*$%7d9wx5e?jqWQjO z4`!>MEq)FYEpUN{B7Uoz@N14p&|7_HzW14M_MT{wm-0|>V{}zG$6JVvM_Tb4$O1`y zK4dULgvHpi;DIdkJ4yt!VTDj%x#>IbHWm=pqnGj^OYxAC^#Gq8A#JMsp_&{)5?=(N ztHsx2v17L_LW8a}(h8j5L5#3aLy+_WB@$8-c^XXls00jhC6n;TPej_XvPda9?GW{R zp#{eV8_*qt>2zkq zF_8zoqm}Lgj=Sy^o)R3LRwkbj9M{+yr_PLXvWaW`7&m~7cPx)iaf~nKQ&P$N*K{kaIi`Ea3tY2Jz=mkp}H;M-N%GdR3g(LaidM*Bi}?ZxJZ;G-l~c37!B_X zj^`*RwVNb$`zCe8CS`jh4fMsmu8}{?PbyMQPIn9+@<<*Tm4A;)o~Vf(;3t1IO_>ot z0R*S;nJM!%DT2NfAwLDA!iefhhV5n`LKvtlhD0qxs-N*<*9weEs;pV6{O(l6kW}R{ z1}rsIy+2iRE)}Perel^?XPdS(B+VczZFz0l%Kk)H0~xQ9zSb<=cz62xkaW}9taS6* zbj$v9tGRT7N`|djhW+l0O=2xYR)%wJhD(2j>s$s=<-#_z3+}rwIPAvqS<-#13w@jm zo^uySDi^)XF8b`gcsS&uZ`Q@5wHJ@~UpzT?(O)I=v{~la-I;+QnZa3^p|zRe{h8=@&}{n+S#6vHHAIBKi@Cv`1aRE)l143PxW`af@LZHgWd~y zjP~DA@0I@UxLEIX?Wx=|>%GBXvEFO`L$o{O4%i{vza>ji{Y2Zpq4!c4nRc_v_8K3{ z%Z-@_tnx2P94;&PS~Ol(QWd)7YVKFIT&H5Apb5Bgb3}E?bw1x)T}?Z=yxU(ugg ziR@puS>X=kPPMJIssoI1dtJUhF}4!8J<++n(kKDwwVZysrbpeV^Vsjjry8&kSOKK{ z$Cp^1dFqG%dU}~)7q-AIMnZCRxG&zzK3pEy(-E7ZRdq@uG&Fh} zXKkW;y1R_pt=|ynaMJ!?g!BWt5p<;0rjB>ALm3%MWHbWQYq zcfU2;+ZvUuTxOT^!J*|*qFN^%f zx^#w@SF`>4$-*hd>ETS^ef8>8W%;n#qX+H=6NN$&BxXuq{PB$W4^Y2h;>q|x^&e2bKjAty zUftJ)d*j|8P(PUR1ZJD$_$Sn_tolOGP!T@*6YB5Hxcf`r@IhGr0rlx}O2;LCL4DAt zKcK$XlGCJggN8;tko{%(6Rlfqy7Z^z@7~4zjD?3E27X%ptZt@JZ|n(K^TQPTeYXP+ zmM{0y@<)q`X3z)+enGu?@&8RwA1Wh;`T=cgG1Qw68vKNM#ewBNpk7tu+K^N+SQheV zc%uAEgD#ORvynP^d-A>=2Q8I~V`%i)Ui%a_Cl&RniaXNt$+k1_{l=Qn(-nL6Bj?7lc+hQBzB5l77a_dH|%6R1z>cp!cHwdnPV>PPLb z-;^szEfcZCA{@DRu-(LJwcz_|+=Cp87yLyx_JmJZaPxDC_sN6__2x;F^j-VtgbP8J z{0-##+4ND&5krnxBQEJ?3flq=oA^Cq_Icq;{m-t0(y6~k%mmXMhc$~c|9182zZuk< z*;X57r~0Is{!zUu-;N0$rm5muF$G1*xaGQ)xe2v>hxK^RS!Ey+kvRmABsOh-8I_Pn`=V#fA1wI#6G(cPeqaeUm$4KDf zZ(RrUQ(bi%$x~dIMWmU_EuNivAQvpY z#f9@k5DorzkUz}`%pncd-$}FV-_rnysheNS- zmz-diZcK-4KD%-dZpAZj`eBNdtwh@T+sFIMWO1t;tgdsvg2Lb z^A)4D_69d#V4>Yt6=$?|KD7b%v11}CpI@;VjMXDaaIR19jwWP>gs*BV^g z{mi^qn^#$QA_=_`slB6cpb|kjwQ+dHkA9)fMiKcD9(eUkD*vl)bf zNgqFWjE%H;O}Y)2K5iQqN>f(9gK#K|-K8Qa=OJN-SW92&bPNaEBkHyjuU?AA93^x& zjj#~oyqKMbEC!T24r(3aBNayo`yNA3x32F;TM1H8vTYYiOo*GTXK@Guw@JyUjOvF} zrK!EISK}x*ke=qy8d^Z`sQX|EZVpXN{UG4Puh*R4>RGgOR_j{Q$<6$(nDfFr$Zzy! z z;wCi_%E97!7v?+&0uSTHD$f#eX`Xq zjFsY;K~WzyjYL&)Kgb5_YFHRFsFfXyR4JyZuQt7@YyhH1G^bzf+j1l6U1FN)v`4WC zr$I9cOfq5NWal{z$#VN5&!UFmYsDt0O@onV3EtvSUXyj9In=kZ2`;89Wz{-BtF5+V z_JmRCjYg=OrpR3f-70oji-v=nn?hXlL&;$#~IQuNa6{?E<{X zs>R2|Tlluxa+T`B3QX{J*3I}xmC;PC4Rrer%9b?PDGRmL{@_UKLCDGUu-=ctbL`kR zbZY7D7TwEJB_B7Wjcsa#+wzZLo@`CoaQz_gO~gOP)GdhK#nYA5!L?fJXvch@H^4vf z5ccOzeD>VD?@d@A^868EcXLnQQ}yrkW4Dg=Y1a*D^7oxSJ-DtohlO|Or>vOG`6+j@4 z#(cs>kz+q$gvr~Rzjz=Mts#VvgkYES8H63D_4SymtcMW%dDF&HyDjq^jW0Db7!89R zS3>6pG2OY}clt^xZyioLBh;PnuihG$9s2T`)$GFq20S_ESEpy1W~;xqwcA+rj^w-? zy}IRrlgGK~K@+XvPJLgg4k8e;`Jvu~BSduSwwo}kg(Wd=YW4NuGZ~Nby6B8m4+ZeB zsO4e#iy9vtEZF$Ba|_nDUzK7O{K@>@08kIgmO~afoF94#O=Rsk5pxz^cJ_K9CJYa| zd`|p2@1fpD)#Oug%8@#zk=kWc+7eh8iR_?HmE+4ac4I2pNDn;B*n_q{m}bhPnb*)P z+h`taz_QhS|$FY>-3r*vTJ>pA( z=YNnGXRA=BjdVw?|79%Uwv*CbE&C4c58SJfmnR>uFQ zP!AbPfu;h#Lp^4Wf&Crovr<)S{{r=TyMKrJR7h9aU!Y#}8`R6r0fheu^=7GS<}!Xk z{mzgJ9$6Rm)LxLPrT;5Xk8*du;hX6J2Ev^-PvHRp7g&Bd(+TV@&jv}bKnNB{83*;l zVQ5awO<;EkB1p|2qJx7d<3S(-xEw1ceR;x5@sCLgNkG{Z>GdFxGJ6ZGVL7`ovI?@l zeD_vIWKOvswxD}^tsjjB*wm4I@WBxXmdI1B4B$~aI07np)<7K&eZb(mEu%;#uk zml<}t9y5h?d6Sg~*Je(OD0#Hnymya*Dg6FAhzlPNOwMIf`!9(Id2x=pE&Xezxxi)+ z$djAP*2C;%fu%tZPf*sIHeiB)(BXg->cy_`6?1V&5W&rw0+s_o$-FfJf3!6bjPZw@ zU~icp0&_aD{Z={F1nQArB@P z%N1fJr9mK7928TZJx~u&=A>0YAPf$Q!-1d-1aTbj2Z8jtAcQ|etR@AK;CCTAAUKft zAH}9vH7sVYv)Hc(#j{E@>$BE#;ssa<30Co1e2IiVTF6=>AfO{8AZs}#I8I@V2vr=p z4Ewk?D-FjS!h@lBRDm(D8(%ut2sna3ZXl-GKuH0X>5m6NN$^+3KrjUa#b1#b$jV2% zFN8o|IH3`EkQ%QzY#ao|N_43JIdRAWAHa`r`PEEe!W>J$#pKV!xV!1I%cNS(fEOUB zr#Udo6`P5QiK@;xJgA+Uiz9*txVdv#2n+=&XbK-ogWCg+9_I=C!k0pxS$b2AruEGLl+3~C=dlk z4M9UlP?MZ|9}s9ajyX!mjvTm2G{01I1vJU15b~f9)-6AeSoH*uR{{u*2NAjKAr=S) zg03e)jR{ao{AIUxpqx{z$%SW9Fl#4(b@yN?@oSWhfrbYzbF$so@{({6WG=4q+fm^8 z7_j9Sd{aEKOagiWE6F3|3H+rHY|x2`Dq|8cc&A#=H1yZp9z7RK0)ng`cR}C$IMhX&F zpUrq&4dPZeN&<4YntexaH=eDDdsUaTP?LJ^mf#7dh=Gs;fz~tf>G!~&@t3Of9#U${ zOT+T#%uo<6xKI^PAcBTfHwz@(2Pv3(J*G7XM{K1H;33+^>-gg5ZMOmH%j*A+Qf_hOWqo42khfvGg-Oo#(@GN zNRM%4ZtkWVmbq>LO5WRSO1dS_yGF})6H2g!^);62_tW+^r-kJSc~{2Tfi*eo+4sOZ zinM4gU0oFRXl@z%hKh3^8?<}*)d7D9!vE&?cy`802&n$%K68**hgNtOC4mP;s$RR~ zcgy~2jcsmy=t}X*KTsOZxqu5=s_t9>dg70}Dpvhq>dW4@V=nmB4Yvb**=))c^pA_lJ`qH)yacTSOR_=6*qg5HP5dFa$8yMePKmPr%>+r1F*|3j|3=#8 z+qt#i_1K%c&07ULBm)0*MhY3Xk{(kl{`#z8Hou|O&Q3Rm4Zn^>SVQ1mP(KbHiudEF%Rzg}&TUrvDyB!aD3x6060+C@B= zCI_TUfwjDPMneJ4*9)dra0Ey1bS-n~3&|4lKbltRHypd)8P_Xvl?f#pe z;_}MIYOed_eeS>Msj~TQZO#pPPW|JY`{+93x}55`HQ4bUs7(({yZ5?J51*1) zOGAEY#X~FcZGF0YQKZV%WH45*ZN z8N3q&SwVW!`zAohdh_~eY0Ctl#$Xo^!P8@aKJLNTjklBcFXu8|zgPm@$^~|FL7qIA zCl_@5_`uwKnHP87>Fo!9!7}l<+Rb?IP998I$dc@gp1A);@U%{Lb*(1>@@dbITIXN_ z0sQ(Ydb#G1ka)!nhiR{e)`z~*e~W#w5v}w3N&$OVNP;dt2HJ5@H3CcK{IHfG5D2{- zIGvgI1mH39(~{mR6%+()$T@jp*!>dr_mP$yhzRn89R3$ZT4nejW~dFP|3fl=XYMX+ ze#hVS9R!-Q<%rvRS1HIG`p?A`=6O5WTN=E#{ki~5G2=9;K!4l0!Y%D31K31aD`%W9{wxzyB-&6<; zg^a`F3^UlOt7D~73KHXG3L0X!Gg928`MDJDf5qa8|9;7Q@klG?%JPifeImW1_LIQ_ zvfD)Rp#~Pu%U6}>tcJHdwq{=ZlKDS+#=$oEIlcDL1=I@7^T>jG;RjVOBTGj zViU~2BD>RHkRJW+iQ#I_#fZCm%Pb15mSd&Dhu#ni(t5wU+7(r9jx7LA)GxE~OmKd= z>8dK{a{sd%H#x=wi5g!l8ZFL7epy9*qdo;*Lk+i=D7L2(sG_2})168grgcT5%kC1g zMRLnF21pgx>YH!#ci+jW@N=X^keY0;Ebo7A z6S`NPM6^fQCf5)Do>N)A@}LuODrMD5D$1$tcbgC(io+o+DqUj~$DlI4IH<}bH_n`1 zDF3&d%8?Jg3h&=#TRIVxX?|RI2xIci%F^dI^T zm44_-!nO>kHciUKENX{%q?Mpi+2)fcZ!;46>`cvew`}l1z~|mMMaZ_$aFshB=h3Q-9?%r(?~3!$HqmJ?J9;m zx-K;>-iBsie8J44$?7#XQX>&mWyKnoI?GA1H|SL}eEP+DC&=mkPNj$Qcht!XV2>T+ zr==GOI@Ur$>kiDp3JP~gsG)=;RlJ8Y&^_kE^0{7>KBb5K1muZhqmh*lsHmaKN2KdV zW!yOzu58&7xkea}LJrow<-q((kS&jVq_|wxM=us%Vi8L=*n!miR{r5IWj8@;hlo|l zH?Uwx9ojgMp6op@HGZ4?VfkZot;GXL37({H|$rMx|4rpoc?l`J+JN-)*2ta zEN2EgHL066$A?}mayni!LXlYf2?YBd?0b|PxcUqQey&*gqPt&NW%Y@V)Xly7o89CcPd4eAe~@%H6|Gm(_&{*AfgNL=DKs ziSix3HGj*gWWUNCc`J5yoL@Zll-tGSqyCapiBA6dE1CaKaw-)$i<3W+`MsV0VopUr z%(^@ynEtv37rAZA@ERS1#VuCa?<7Cit|ZSz6?fu-y3_`tc39Baw@Z9&>%_&ui*!0QV8pnqt#^O=dYaVq_#QvRHAR;)FX z5ZQ0^?tAv!1Ch;0*Fp#^PXL{JFn8>E_`A&fxj_f(p1DDQ%(GkA%A#72*oeq|qPZRi z@;4LgtUd*5Lq-^cK@B3leN>sPra*inwzUG{+hSlnW=09VmOw7xA*LzFGM4BJh!YJmgCU zsv?~_-*Z;LK`5}O7`BuU3qL0#DZxRCuYKnRfi&?%gem`mD8Bi_F(mj*D^!94bHjsE zPe&sNFgFTVzn(lzfa+o4@?u{y10~0XIdVY>Y_c)|tOr608Hnu$P&pAP$}Kp6j|WGO z0&exuZanA^2Sg^4%Q>JSE?`Z8Er@eG43r;{+|CA#ashi@^bi}A$pv<@!86}67=jqW z;Rj5Clu^Kkfc}<=c=H`lBf)1sCS2gdXj~xG*I(>vR-+(yI+AyC!E%&D!FNDayq%8# z&gLf0jR2D*qz?}!jl~GXTL9T)Ar9Kchpp!(JJv&mASjLtcVoa2oJ1i-+K+))%Z50E zA-CJe&mEIQ4(Dd7QH~&pF*e>)422+wa#_Sq9<-bSe%0nCpolYx@L)EaRg+v?7O|cU zyJ!IYY>*=Khv<{wZammdg1kQk>?y*dEd3$QgW%{kvXINbF2l&8q9<^Pvks8ue&Qv< zDbsD_MmE@(k~kQY@@W(p;)2H8l7SlVDN@|DBN@+0#&hA{dMOW-CB9{%Ea${7WxBvW zL)sDYsqef&DgbW zl7Lt;1{V)rCMie+PD<#HM&VJD{^<3D3^f95$R91o1v{66{eP}w_aVk_4T@|hK*h%9 z-K>b+;=UiBJ`_p*}U<(IyY0P#d^;5&3HA$ca2YzksY z`jM4=$@=ujnIpg~Mf}}PotR_6Jnwpa?foOL4!PPa$uQ{ip{uODy6 zJ^axl-;o8pBblYyPGWqs803iPC4qOE|u@aw(Swhq*La1xN9&y`IB+#cL$m{y)2G z@4qFo{Np+rwaUCS4Q=;51N%23gN{gaEcWUIig!s3z{OVyrL+=utZLI2r0nxCQ6!1#Bi0vyC;BEi-E$qCufK1rj?nPk{^btzv@OaVcsXsD z3Dw<|GE_IK_(3=5w5Ick?tPC#@?Uy9@o@f)U588!R z`n|Ib-`a5f5iRsu7j6}-3qbkMy_D&nmFF&-{f&DIEcss?|2%C~hzRlUld zvf&7|c~wi%z+l2ALA1;hgI4KhiYcMRx@sI=^xu)CP_Z19bi5dz6d2~ht;mj5U~{qq(6Uoj5;e1)%NhqTFPjZYO>&o=?zXHvCj1h{pEPQhlq zp`df`k*GFqO0e=FO(CukEdX~=x!!gUnzgJ+JOr6Jt<{$pDa~B#lFxLo9octywgnyQ z5j-f{KGQ1a%M}5EbvEh(E>a&+?sA@_D=o_SIL$vuL*oX~7>cdo_py{y@;F5~xD7#R zD|l%i1u{Sx8E=_`o>>hAH*!9jKN_SQl0PGfAJvC@9duoh;V!1dRtYXQC4^@O3npH% zZtenQ&NoTCl7d56SooUj1SojFfz$wJd)~!q^omtS4O7QcHkxwLqzH(nfA)*Ksxy!T z>{U}mw$z!E2Xo4O35HMoT;5OFA>>feW|#2bz|r)4Q6EUQPbMDC!LjhB)b--^f~V8& zoD=Y=3JeNWPWfd?Qz9INr$mVzE38RUBZ43lB^4vxBpQ%kteO}t9r3@Vs*E4;Pm(#aqgiT5e}7{?GL>Au7Uoi zb%LS0`ImJix5xux) zP&R2CZy+O@WR1!^S#BQ1c0)*hs#H0=4t1yRZNFpHoXTDUYaTvz^x*4@-LAFM-?aNR zN2^v8Sm9l7`&uT9WQ;OBKHM+1dYow(XftMzMy1 zbp}=@{*~F#Fd${gmwWcmOHI^mV&ipd9fK>vde#hz*VdTY#Gi6c)sVm%N?4P=_=X=cn&H-j-by4r`k3j?hRMpYqOAMCP+1Q!OrH{KJ;0`>yMq9s2Lf@l zm$Dx3CaXY-ls3Rie;LDQ;3!mu^%U57@m8o&TGM=#kBLq$>-dNV1#}-otGM3;ejY~Z z9|b|kE;MxMnffInflaJJZWN5hL&|!Nrfkk&Xvo!EvcArPl55=J)~R!0X~8C_)i)gi zujN^f3<9bi4O%NeRHaJ{ICQ10x}-;bUuC)FdA&#}Pyc!y0XS9;|3wSuIJ9J@6@70` zS6+f{D__TkNDI26l$D1p1XD0|)@K-dK__K(b(G;e6zRrmMZ1!s^(`AD20S)Fd#WwF zsaRwnp+Jv6>W~YoI6gp!Na>7;|CZKglr%SOs&}9ggP%(n*XXIYm=?Dm`g{iG+w`W6 zRa+6^4IVIfWuC^5(Y*K+8deEK8wa&-rWYNL@Fz^Ga_&AI!@$}Fxu^?|T@;sZ1!oeF zDyrBo$xC%e*vT50bZ1MJWIRZ$hy89qmpV>w?mro}9hWVN~#OwPcO;$omv?iQm>*egGANy+N|SC-=$I7_n68 zXL4VAepsoz@KJ)Y#%#kTLoqC1KQ6IkV&7SYL#DPmzF(v19I10qM;)EMw2{2@K#Lyp z4C!$?JV*kzJOgJcnR3?@ zi~k3EZ~l<^gKL!Wyj=%;OlWC+@bmSd*DPsa(Q-j05?8d=xlZM1 z8MVpzae7{3?8C2vCa16N)DW51cMuWi1JQhC77*w3+E7`>vdo}R3(VlWbn>6|cQk|`W8Mo5w~ zXyvy2BQz7Tp+EEmDGP>H`}vvc;3l|5kVpH#mB|DLQ8)b4@;)ienCw>p88Bxwht!cs z*bJwB#$5u2n)=ucqqOaVWTz&fmgT497D56eA~kUE4uU59vz4uvuDWbfDXp0z#JaCE zd!13`PR`msaVr{9qx@ z9Xqt`k-mMP3-2U*XVcbDQrWNN6s?9|9>LbDg|I7_q|5`fKqh&KgDYCNHuJ(>MInM_ zm}9prkHshH7&=5@jXuR~lb-*S=fyD?5j!`TymSPe(-U=Z!C}QDGSa=cPyVt^^Qfr} zvOibB-Syal^A2JfBJOshg(X2uT^oMUb0g5rp>UdafM-R1Dd`DyWjqLiJ)b25H@YHs zA46|NwBg)zmB)v81}>(qxb=kUTTQ4vE@UM z`is@885u;^p2u#hKQ)GgE=WI=fV}tkk>Xg;$^D-#TdMWm&58Rk>9t}7sbGsii^jvR z@=7ewmJk)aXHt2G_yfmP0K$WR1D*Q*i=DKrpijpXzG2WLSJQDO{-u83ez9`_3>x0d zx5&)k?pMD{RqY3CXJxzRVx=w|$I#8XHs7)~nA`VLlR5MqrC7T@;kSO1Eym`r$$nPx3c;g!{x8tWH1)VX_Wgy5dsQCTa|p(tQ}d{KzY+T3F6FVpb*P9a{b| zHRiCMBzcI&mPOJv2(fIaYs&jPYmZoU@0(r96x7Dk>VulKG}gIbYjg_L?k%I@QiDjw5D9c0pTRTF&I~Xz9UnF-})3<5A3nObvMW zlGX1ntKvBvJ<>VJu$u{6o{7b#GXQ9Ke@=2W3>tm9Cw%Rm_yql1@iHXcmtWs}t` zut+fSE+7wH>N&BbMxkP4>uDjMZNHqTJdg!lN0nwlX-Y3ola8*b8%{HzVZjVASpMJ8+~SCdxn9UEvcz^^X^zK@^jySeMw&|8)g<~E4k9GiWu(JAw-Od-xjy^w^ou0 zc5OBx3Fh+j)^sL$dg#b}9^5EP)m8ID;&69d}TzD_v)kY}fGW_7a!_G!s#3RExW*>bPe97EP zJbt6{;svCQTTE@>?i&L$+F@tsV(M0I_14hX@Oc&i?9WY< z1}>yo8^@AF-^qS4bDX`=@>n{{8O?slR!sJlFcn(C=kSK6Y=a{`sKN z|ILXXg6on#k5^th{56KQ0oB)v*FnEhnxOOj5)G_`H0O>^9Tep8)cN^7DtQ!NlKxlK z*UY^ky5j?T2(z_m>WKH+9E~Iod(ihs- zx&U-z_Vy!DzP269Xp@CFwmRymiJ++n?jTCflV@1NT5*dVDu2rF^K|gshfH@6f%^?% zMoS~`X#x>;HBv*mw;28aJSUag*K9Gh(Ll?>fsLp%)WGNqGNWKiO%wgR0M)9H7RyBg zt>9*TQeMCsnMquq#rxPV@CO)+GzWXOtD7wvxShg6%Gi34s~nfccL)LtqJtMg4Q7Mj zL4qd81}W}&9u(%1+YGC~U!w&}P=MdkyhRE&1hy<%l9Zs&&WsSt5L&s-h>#Glg~UmB ztpf9qNT@kAaV*3f{cSkO(g4P#jhm)iiF}>Qq)Sakfch^c(I597b*~hiE9PW z4O>x1I@XSog)o2EBjUjxpo>Rr7CEVIXwXGh2(ypiAmAmwwq(hT zqStdYU2UVO)@*-_1BBH!HuHYhAY<LA#`dJ6==qe%Mhm-`1}}BBq%dF2O4Tt9+_sJSN&OQRbiNZj9$5{;{i1~8Gk0#vV>iLQ1-Yy3Te4SzIPfI- zt;yd;63!@&g}ycojv6?}d41l$wv+I~w(t(9uRI2d{_%m{4M#Nx{E*DwAWqev-WGm; zpiLJ)lw_o<{ilzC<=GkBRe;9(M>0Q7+uIX}18L4v4wh$>`yDWTdAecNeLc)9eDvFf zw|)iCT4V9%>1}~I%K{zn7qakwQhnujjd#7i&9_MIV@PM`%`a^wmo87Fu`J3d{UU%Y z{5_Ie8SrnC`QJEOLAM*n6+^ve|7d-*U zM%A8x#GDzdIJWMn5v^6X1?QgRFg1oV>l5_S9g10;*xJ4-FrrL_tZ#FP22BK6tV_}v zimtd@!KSF!KtML?+$f>*DB!Txp^*f+C-#Q{i3(kZ0i2lzpAIc^DaC7Oggdw!(JXc! zYk`F;>yfC77VOqjer*z*J3)M@R*#~bn+xrk@Pea{b+Nbs?U82Ud7!hQ5W@e zHvt!VxJ!Yp?IKj2c0aPzb%7!vW}e>i)9(F^Uy3%k)kjMYPV9>R(O~=waINnJ{{z?h zXR&pr&D%)5)dSzx`9CITJx24NKhTcFMdhtouYBHrKf?GOFe}|B4`ux1|Ggh@d69Qu zH0j#F8Q=#h+FQK}SJ6>7UCUyB#Vjm;L*F+LUcb)t5$68(RMsgR%C@r%!<+jO*9A zQokoRr_tYy|AYo(gEZ(%zPEmyRYtFA?aTcdb|k)Q;8A4E!LabSSBH8h8F;sdY{Q^p zX7;StCDDiD0CdKw@3kzik0srmHCUSR%YOi!QL^#On^UDb-o80qmTmX8rt0wKx3&Mx z=5I-VQ}M@C~Anxs)&F=Ss7*+0}8HYzsEU*zM3+BA~9>frA9Wz8D-@gM(z zT9*I2Bh1h5K0RUPIDQ7|N)tXmjXixHn4p2W(qEHW9sk3%EYo$R|F)JzLBV+hd>!CD zR5^H}XoOL(ulE%|qlkxEDCE*chn5Dvhe_*iyjKQ>j=n#OYq&9LcvC$=RjY)VV^FDx zH+*%fe)0<-XlBuxqvr5wzuwtOrYWs1EwbqUD)Q2O!E$P&jwnP}v*m0^}vV=s?Edi1vE+UkdiNrt@*q$wcjQ|%EQ+B2eMS{2@g2UsPS(>cKT_J`-6pMtV zFhQ_%AyLQf8iyz^Lcl3agv`M??`Bkjj7oDbQ${OLSTieK6-X(yrsn0BJ-$nRg)byR< z4QAu;QwzgvfuRnW1+9pMS_N(crv!>%nN3AH8|5gn(&|GR2)aWaWu)Z?b=wdLbqn8q zWO%h0c!JxNo8gp()Pfy zI+2-LkYhb+gWqAx-);2hrO(aN?LP~;D-5gGzQ3`F{L9m=1y@Y`zBKr{mt17NtP?~;Z0cxU8MPBiR9#GezwMwaX~|;aqf@^pI*tc?Io*4M zrFc8<$WMSh>JLbX=)d;ry>&bC%Ljw)tSLm0Sw$Ao=x*`)pd_TG?vTepN`4QvKC|dXrK_R ztF(M|P~Atc(pdbc=H+&;mqL(>8tKJ36dgXR8e?c`E<5~SB@;|lU$dO4^O_{ZVz-@; zfcIElIXo)Bje z*b}V5WP>ViSl_S4>KU?y5SvmUA?mDTpS#@kriY7lZUc7VH}A=_uXp8Ld*Ar}C^9j{ z;`Nr<`d?%>H*zO8o(}rngZ1AmS^PU6+@E_D63b=JiP9<2)!&xwthyQzl`0OB{G>R5 zoT*9?2Mwqkny(F%R#CoE3Yqa-NelIS->v8(c!a;DQl&x9bAn!| z29|kD1%2rvu46d#5}L#ZVf(uAU+cem!E}~cr|Z+C&)lo53KVlYNxZ05lfF^P(rLCu zW~+&Kl#+}VBu|SI#)CN1*hDim1#Tp#)6>?-bYA#D5l*eR9ew7Qe!kdQtepk#VQQ-- z4Bgw(EJg=6v(FD|BnY|2aoMW9E<&uFQjg|nvV~gcgU0Y)P=;t_Bxpq5YGIuuBE||e zZTdjqd3^Y+Y6etQg24FUH8dlnN&2N`GwkqLdhyKgATmh%1mMVdmNdf{)I^A9vKa!A zCRW)rQw7w)0Tv^Ik^y#N!>k;bc(tJ^O=b?&iDtlDu0>h^u}4>{QM=m#hHC3FyiqVo zTg__LQJG_{#a{jRb)TMh&i6%-e!01;VIo!7U7*;qRTB@ukBPoHfdY6_>zqa^Rw0 z3d+u@gc-Gwr@$2DfJv1u!YMLIY!hA16Rvqh)R$!0uozG)xvy?KzI`1e54N_^9QY~W zb}*TIr$^6ex25d$vPj4}dW+Qg%hGh8QOxCY5<9P2y=-ZVS`QO z!Qk|dT=*WtO!VD{j`mb3DqLBO87hV&D4%Ni1

    + + + +